summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorlibo <bo.li@amlogic.com>2022-06-02 20:42:03 +0800
committerHongguang <hgchen@google.com>2022-06-03 10:18:18 -0700
commit5aba84a5bf552ef31c81b4c4fa7619774c6608f9 (patch)
tree120eae15137b08f806f8f9e0b491826a4090fe8b
parentb18840db80ef8621a0c6dfdd7c21388939f5ad42 (diff)
downloaddhd-driver-android-arm64-deadpool-4.9-android13-tv.tar.gz
PD#SWPL-81801 BUG=232494797 Problem: update ap6356 driver to bcmdhd.101.10.361.x Solution: update ap6356 driver to bcmdhd.101.10.361.x Verify: adt3 Signed-off-by: libo <bo.li@amlogic.com> Change-Id: I61f0851c85fc9bd8ae5802d2811cfb0a5d4bce05
-rwxr-xr-xbcmdhd.101.10.361.x/Kconfig61
-rwxr-xr-xbcmdhd.101.10.361.x/Makefile391
-rwxr-xr-xbcmdhd.101.10.361.x/aiutils.c2604
-rwxr-xr-xbcmdhd.101.10.361.x/bcm_app_utils.c1276
-rwxr-xr-xbcmdhd.101.10.361.x/bcm_l2_filter.c766
-rwxr-xr-xbcmdhd.101.10.361.x/bcmbloom.c233
-rwxr-xr-xbcmdhd.101.10.361.x/bcmevent.c445
-rwxr-xr-xbcmdhd.101.10.361.x/bcminternal-android.mk88
-rwxr-xr-xbcmdhd.101.10.361.x/bcminternal.mk60
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdh.c953
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdh_linux.c594
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdh_sdmmc.c2004
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c388
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdspi.h147
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdspi_linux.c433
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdstd.c5406
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdstd.h301
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsdstd_linux.c690
-rwxr-xr-xbcmdhd.101.10.361.x/bcmspibrcm.c1922
-rwxr-xr-xbcmdhd.101.10.361.x/bcmsrom.c6365
-rwxr-xr-xbcmdhd.101.10.361.x/bcmstdlib.c1251
-rwxr-xr-xbcmdhd.101.10.361.x/bcmstdlib_s.c298
-rwxr-xr-xbcmdhd.101.10.361.x/bcmutils.c6097
-rwxr-xr-xbcmdhd.101.10.361.x/bcmwifi_channels.c3000
-rwxr-xr-xbcmdhd.101.10.361.x/bcmwifi_monitor.c1071
-rwxr-xr-xbcmdhd.101.10.361.x/bcmwifi_radiotap.c1035
-rwxr-xr-xbcmdhd.101.10.361.x/bcmwifi_rates.c607
-rwxr-xr-xbcmdhd.101.10.361.x/bcmwifi_rspec.c274
-rwxr-xr-xbcmdhd.101.10.361.x/bcmwpa.c2648
-rwxr-xr-xbcmdhd.101.10.361.x/bcmxtlv.c647
-rwxr-xr-xbcmdhd.101.10.361.x/dbus.c2928
-rwxr-xr-xbcmdhd.101.10.361.x/dbus_usb.c1173
-rwxr-xr-xbcmdhd.101.10.361.x/dbus_usb_linux.c3405
-rwxr-xr-xbcmdhd.101.10.361.x/dhd.h4655
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_bitpack.c228
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_bitpack.h33
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_bus.h424
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_buzzz.h224
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_ccode.c274
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_cdc.c1035
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_cfg80211.c597
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_cfg80211.h49
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_common.c11596
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_config.c5175
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_config.h441
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_csi.c219
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_csi.h76
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_cis.c2010
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_exynos.c333
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_gpio.c437
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_hikey.c290
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_memprealloc.c500
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_msm.c283
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_custom_sec.c1040
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_dbg.h637
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_dbg_ring.c473
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_dbg_ring.h146
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_debug.c2853
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_debug.h891
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_debug_linux.c528
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_event_log_filter.c3236
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_event_log_filter.h56
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_flowring.c1466
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_flowring.h350
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_fwtrace.c563
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_fwtrace.h55
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_gpio.c497
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_ip.c1425
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_ip.h96
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux.c29878
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux.h523
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_exportfs.c2994
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_lb.c1402
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_pktdump.c1578
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_pktdump.h132
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_platdev.c1108
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_priv.h518
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_sched.c47
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_sock_qos.c1034
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_sock_qos.h118
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_wq.c413
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_linux_wq.h89
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_macdbg.c746
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_macdbg.h34
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_mschdbg.c796
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_mschdbg.h36
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_msgbuf.c15512
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pcie.c17674
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pcie.h1048
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pcie_linux.c3379
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pktlog.c1684
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pktlog.h311
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_plat.h58
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pno.c4871
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_pno.h586
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_proto.h302
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_qos_algo.h90
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_rtt.c4855
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_rtt.h555
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_sdio.c11777
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_sec_feature.h226
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_static_buf.c657
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_statlog.c1081
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_statlog.h221
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_timesync.c1239
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_timesync.h68
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_wet.c1187
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_wet.h60
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_wlfc.c4988
-rwxr-xr-xbcmdhd.101.10.361.x/dhd_wlfc.h596
-rwxr-xr-xbcmdhd.101.10.361.x/frag.c108
-rwxr-xr-xbcmdhd.101.10.361.x/frag.h32
-rwxr-xr-xbcmdhd.101.10.361.x/ftdi_sio_external.h39
-rwxr-xr-xbcmdhd.101.10.361.x/hnd_pktpool.c2130
-rwxr-xr-xbcmdhd.101.10.361.x/hnd_pktq.c1548
-rwxr-xr-xbcmdhd.101.10.361.x/hndlhl.c1241
-rwxr-xr-xbcmdhd.101.10.361.x/hndmem.c423
-rwxr-xr-xbcmdhd.101.10.361.x/hndpmu.c9929
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.11.h5920
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.11ah.h281
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.11ax.h1180
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.11e.h133
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.11r.h55
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.11s.h337
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.1d.h47
-rwxr-xr-xbcmdhd.101.10.361.x/include/802.3.h49
-rwxr-xr-xbcmdhd.101.10.361.x/include/aidmp.h438
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcm_fwtrace.h111
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcm_l2_filter.h99
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcm_mpool_pub.h344
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcm_ring.h585
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmarp.h84
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmbloom.h73
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmcdc.h115
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmdefs.h909
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmdevs.h626
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmdevs_legacy.h188
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmdhcp.h86
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmendian.h451
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmerror.h573
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmeth.h109
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmevent.h1617
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmicmp.h83
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmiov.h353
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmip.h286
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmipv6.h160
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmmsgbuf.h1706
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmnvram.h162
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmpcie.h559
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmpcispi.h204
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmperf.h33
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmproto.h275
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmrand.h65
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsdbus.h187
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsdh.h290
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h142
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsdpcm.h304
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmspi.h37
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmspibrcm.h165
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsrom.h72
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsrom_fmt.h1028
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmsrom_tbl.h1303
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmstdlib_s.h54
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmtcp.h86
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmtlv.h375
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmudp.h54
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmutils.h1639
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmwifi_channels.h888
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmwifi_monitor.h98
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmwifi_radiotap.h382
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmwifi_rates.h1262
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmwifi_rspec.h286
-rwxr-xr-xbcmdhd.101.10.361.x/include/bcmwpa.h634
-rwxr-xr-xbcmdhd.101.10.361.x/include/brcm_nl80211.h77
-rwxr-xr-xbcmdhd.101.10.361.x/include/d11.h6055
-rwxr-xr-xbcmdhd.101.10.361.x/include/d11_cfg.h115
-rwxr-xr-xbcmdhd.101.10.361.x/include/d11reglist_proto.h66
-rwxr-xr-xbcmdhd.101.10.361.x/include/d11regs.h180
-rwxr-xr-xbcmdhd.101.10.361.x/include/dbus.h627
-rwxr-xr-xbcmdhd.101.10.361.x/include/dhd_daemon.h55
-rwxr-xr-xbcmdhd.101.10.361.x/include/dhdioctl.h478
-rwxr-xr-xbcmdhd.101.10.361.x/include/dngl_rtlv.h66
-rwxr-xr-xbcmdhd.101.10.361.x/include/dngl_stats.h388
-rwxr-xr-xbcmdhd.101.10.361.x/include/dngl_wlhdr.h39
-rwxr-xr-xbcmdhd.101.10.361.x/include/dnglevent.h174
-rwxr-xr-xbcmdhd.101.10.361.x/include/dnglioctl.h177
-rwxr-xr-xbcmdhd.101.10.361.x/include/eap.h121
-rwxr-xr-xbcmdhd.101.10.361.x/include/eapol.h292
-rwxr-xr-xbcmdhd.101.10.361.x/include/epivers.h51
-rwxr-xr-xbcmdhd.101.10.361.x/include/etd.h636
-rwxr-xr-xbcmdhd.101.10.361.x/include/ethernet.h252
-rwxr-xr-xbcmdhd.101.10.361.x/include/event_log.h666
-rwxr-xr-xbcmdhd.101.10.361.x/include/event_log_payload.h1775
-rwxr-xr-xbcmdhd.101.10.361.x/include/event_log_set.h142
-rwxr-xr-xbcmdhd.101.10.361.x/include/event_log_tag.h617
-rwxr-xr-xbcmdhd.101.10.361.x/include/event_trace.h187
-rwxr-xr-xbcmdhd.101.10.361.x/include/fils.h424
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnd_armtrap.h86
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnd_cons.h98
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnd_debug.h250
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnd_pktpool.h288
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnd_pktq.h330
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnd_trap.h33
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndchipc.h47
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndd11.h121
-rwxr-xr-xbcmdhd.101.10.361.x/include/hnddma.h338
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndlhl.h94
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndmem.h74
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndoobr.h93
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndpmu.h348
-rwxr-xr-xbcmdhd.101.10.361.x/include/hndsoc.h353
-rwxr-xr-xbcmdhd.101.10.361.x/include/ieee80211_radiotap.h400
-rwxr-xr-xbcmdhd.101.10.361.x/include/linux_osl.h868
-rwxr-xr-xbcmdhd.101.10.361.x/include/linux_pkt.h421
-rwxr-xr-xbcmdhd.101.10.361.x/include/linuxver.h945
-rwxr-xr-xbcmdhd.101.10.361.x/include/lpflags.h39
-rwxr-xr-xbcmdhd.101.10.361.x/include/mbo.h279
-rwxr-xr-xbcmdhd.101.10.361.x/include/miniopt.h73
-rwxr-xr-xbcmdhd.101.10.361.x/include/monitor.h230
-rwxr-xr-xbcmdhd.101.10.361.x/include/msf.h60
-rwxr-xr-xbcmdhd.101.10.361.x/include/msgtrace.h56
-rwxr-xr-xbcmdhd.101.10.361.x/include/nan.h1562
-rwxr-xr-xbcmdhd.101.10.361.x/include/nci.h96
-rwxr-xr-xbcmdhd.101.10.361.x/include/osl.h482
-rwxr-xr-xbcmdhd.101.10.361.x/include/osl_decl.h31
-rwxr-xr-xbcmdhd.101.10.361.x/include/osl_ext.h759
-rwxr-xr-xbcmdhd.101.10.361.x/include/p2p.h695
-rwxr-xr-xbcmdhd.101.10.361.x/include/packed_section_end.h62
-rwxr-xr-xbcmdhd.101.10.361.x/include/packed_section_start.h117
-rwxr-xr-xbcmdhd.101.10.361.x/include/pcicfg.h730
-rwxr-xr-xbcmdhd.101.10.361.x/include/pcie_core.h1485
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbchipc.h5282
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbconfig.h279
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbgci.h424
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbhndarm.h414
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbhnddma.h481
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbhndpio.h60
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbpcmcia.h415
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbsdio.h185
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbsdpcmdev.h307
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbsocram.h198
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbsprom.h236
-rwxr-xr-xbcmdhd.101.10.361.x/include/sbsysmem.h191
-rwxr-xr-xbcmdhd.101.10.361.x/include/sdio.h644
-rwxr-xr-xbcmdhd.101.10.361.x/include/sdioh.h459
-rwxr-xr-xbcmdhd.101.10.361.x/include/sdiovar.h124
-rwxr-xr-xbcmdhd.101.10.361.x/include/sdspi.h72
-rwxr-xr-xbcmdhd.101.10.361.x/include/siutils.h1057
-rwxr-xr-xbcmdhd.101.10.361.x/include/spid.h164
-rwxr-xr-xbcmdhd.101.10.361.x/include/trxhdr.h93
-rwxr-xr-xbcmdhd.101.10.361.x/include/typedefs.h408
-rwxr-xr-xbcmdhd.101.10.361.x/include/usbrdl.h134
-rwxr-xr-xbcmdhd.101.10.361.x/include/vlan.h91
-rwxr-xr-xbcmdhd.101.10.361.x/include/wl_bam.h74
-rwxr-xr-xbcmdhd.101.10.361.x/include/wl_bigdata.h81
-rwxr-xr-xbcmdhd.101.10.361.x/include/wldev_common.h135
-rwxr-xr-xbcmdhd.101.10.361.x/include/wlfc_proto.h496
-rwxr-xr-xbcmdhd.101.10.361.x/include/wlioctl.h25850
-rwxr-xr-xbcmdhd.101.10.361.x/include/wlioctl_defs.h2514
-rwxr-xr-xbcmdhd.101.10.361.x/include/wlioctl_utils.h60
-rwxr-xr-xbcmdhd.101.10.361.x/include/wpa.h306
-rwxr-xr-xbcmdhd.101.10.361.x/include/wps.h379
-rwxr-xr-xbcmdhd.101.10.361.x/linux_osl.c2197
-rwxr-xr-xbcmdhd.101.10.361.x/linux_osl_priv.h188
-rwxr-xr-xbcmdhd.101.10.361.x/linux_pkt.c897
-rwxr-xr-xbcmdhd.101.10.361.x/nciutils.c3095
-rwxr-xr-xbcmdhd.101.10.361.x/pcie_core.c227
-rwxr-xr-xbcmdhd.101.10.361.x/pom.h70
-rwxr-xr-xbcmdhd.101.10.361.x/sbutils.c1111
-rwxr-xr-xbcmdhd.101.10.361.x/siutils.c10249
-rwxr-xr-xbcmdhd.101.10.361.x/siutils_priv.h513
-rwxr-xr-xbcmdhd.101.10.361.x/wb_regon_coordinator.c444
-rwxr-xr-xbcmdhd.101.10.361.x/wifi_stats.h377
-rwxr-xr-xbcmdhd.101.10.361.x/wl_android.c14244
-rwxr-xr-xbcmdhd.101.10.361.x/wl_android.h252
-rwxr-xr-xbcmdhd.101.10.361.x/wl_android_ext.c4043
-rwxr-xr-xbcmdhd.101.10.361.x/wl_android_ext.h175
-rwxr-xr-xbcmdhd.101.10.361.x/wl_bam.c643
-rwxr-xr-xbcmdhd.101.10.361.x/wl_bigdata.c575
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfg80211.c22880
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfg80211.h3087
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfg_btcoex.c601
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgnan.c9473
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgnan.h959
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgp2p.c2811
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgp2p.h488
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgscan.c5637
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgscan.h178
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgvendor.c10061
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgvendor.h855
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgvif.c6601
-rwxr-xr-xbcmdhd.101.10.361.x/wl_cfgvif.h251
-rwxr-xr-xbcmdhd.101.10.361.x/wl_dbg.h1544
-rwxr-xr-xbcmdhd.101.10.361.x/wl_escan.c1767
-rwxr-xr-xbcmdhd.101.10.361.x/wl_escan.h89
-rwxr-xr-xbcmdhd.101.10.361.x/wl_event.c556
-rwxr-xr-xbcmdhd.101.10.361.x/wl_event.h18
-rwxr-xr-xbcmdhd.101.10.361.x/wl_export.h285
-rwxr-xr-xbcmdhd.101.10.361.x/wl_ext_genl.c568
-rwxr-xr-xbcmdhd.101.10.361.x/wl_iapsta.c5748
-rwxr-xr-xbcmdhd.101.10.361.x/wl_iapsta.h85
-rwxr-xr-xbcmdhd.101.10.361.x/wl_iw.c4302
-rwxr-xr-xbcmdhd.101.10.361.x/wl_iw.h171
-rwxr-xr-xbcmdhd.101.10.361.x/wl_linux_mon.c412
-rwxr-xr-xbcmdhd.101.10.361.x/wl_roam.c548
-rwxr-xr-xbcmdhd.101.10.361.x/wlc_types.h714
-rwxr-xr-xbcmdhd.101.10.361.x/wldev_common.c537
307 files changed, 437997 insertions, 0 deletions
diff --git a/bcmdhd.101.10.361.x/Kconfig b/bcmdhd.101.10.361.x/Kconfig
new file mode 100755
index 0000000..f49ae76
--- /dev/null
+++ b/bcmdhd.101.10.361.x/Kconfig
@@ -0,0 +1,61 @@
+config BCMDHD
+ tristate "Broadcom FullMAC wireless cards support"
+ ---help---
+ This module adds support for wireless adapters based on
+ Broadcom FullMAC chipset.
+
+config BCMDHD_FW_PATH
+ depends on BCMDHD
+ string "Firmware path"
+ default "/system/etc/firmware/fw_bcmdhd.bin"
+ ---help---
+ Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+ depends on BCMDHD
+ string "NVRAM path"
+ default "/system/etc/firmware/nvram.txt"
+ ---help---
+ Path to the calibration file.
+
+config BCMDHD_WEXT
+ bool "Enable WEXT support"
+ depends on BCMDHD && CFG80211 = n
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ help
+ Enables WEXT support
+
+choice
+ prompt "Enable Chip Interface"
+ depends on BCMDHD
+ ---help---
+ Enable Chip Interface.
+config BCMDHD_SDIO
+ bool "SDIO bus interface support"
+ depends on BCMDHD && MMC
+config BCMDHD_PCIE
+ bool "PCIe bus interface support"
+ depends on BCMDHD && PCI
+config BCMDHD_USB
+ bool "USB bus interface support"
+ depends on BCMDHD && USB
+endchoice
+
+choice
+ depends on BCMDHD && BCMDHD_SDIO
+ prompt "Interrupt type"
+ ---help---
+ Interrupt type
+config BCMDHD_OOB
+ depends on BCMDHD && BCMDHD_SDIO
+ bool "Out-of-Band Interrupt"
+ default y
+ ---help---
+ Interrupt from WL_HOST_WAKE.
+config BCMDHD_SDIO_IRQ
+ depends on BCMDHD && BCMDHD_SDIO
+ bool "In-Band Interrupt"
+ ---help---
+ Interrupt from SDIO DAT[1]
+endchoice
diff --git a/bcmdhd.101.10.361.x/Makefile b/bcmdhd.101.10.361.x/Makefile
new file mode 100755
index 0000000..faf1fa5
--- /dev/null
+++ b/bcmdhd.101.10.361.x/Makefile
@@ -0,0 +1,391 @@
+# bcmdhd
+
+# if not confiure pci mode, we use sdio mode as default
+ifeq ($(CONFIG_BCMDHD_PCIE),)
+$(info bcm SDIO driver configured)
+CONFIG_DHD_USE_STATIC_BUF := y
+endif
+
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
+MODULE_NAME := dhd
+else
+ifeq ($(CONFIG_BCMDHD_USB),y)
+MODULE_NAME := bcmdhd
+else
+MODULE_NAME := dhdpci
+endif
+endif
+
+CONFIG_BCMDHD_ANDROID_VERSION := 13
+
+CONFIG_BCMDHD ?= m
+
+#CONFIG_BCMDHD_SDIO := y
+#CONFIG_BCMDHD_PCIE := y
+#CONFIG_BCMDHD_USB := y
+
+CONFIG_BCMDHD_OOB := y
+#CONFIG_BCMDHD_CUSB := y
+#CONFIG_BCMDHD_NO_POWER_OFF := y
+CONFIG_BCMDHD_PROPTXSTATUS := y
+CONFIG_DHD_USE_STATIC_BUF := y
+#CONFIG_BCMDHD_STATIC_BUF_IN_DHD := y
+CONFIG_BCMDHD_ANDROID_VERSION := 11
+CONFIG_BCMDHD_AUTO_SELECT := y
+CONFIG_BCMDHD_DEBUG := y
+#CONFIG_BCMDHD_TIMESTAMP := y
+#CONFIG_BCMDHD_WAPI := y
+#CONFIG_BCMDHD_RANDOM_MAC := y
+#CONFIG_BCMDHD_MULTIPLE_DRIVER := y
+CONFIG_BCMDHD_TPUT := y
+
+CONFIG_MACH_PLATFORM := y
+#CONFIG_BCMDHD_DTS := y
+
+ifndef CONFIG_KASAN
+ KBUILD_CFLAGS_MODULE += -Wframe-larger-than=3000
+endif
+
+DHDCFLAGS = -Wall -Wstrict-prototypes -Wno-date-time \
+ -Dlinux -DLINUX -DBCMDRIVER \
+ -Wno-unknown-warning-option \
+ -Wno-maybe-uninitialized -Wno-error -Wno-format-security \
+ -Wno-implicit-fallthrough \
+ -DBCMDONGLEHOST -DBCMDMA32 -DBCMFILEIMAGE \
+ -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DGET_OTP_MAC_ENABLE \
+ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \
+ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \
+ -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DOEM_ANDROID \
+ -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP -DDHD_8021X_DUMP \
+ -DPOWERUP_MAX_RETRY=1 -DIFACE_HANG_FORCE_DEV_CLOSE -DWAIT_DEQUEUE \
+ -DUSE_NEW_RSPEC_DEFS \
+ -DWL_EXT_IAPSTA -DWL_ESCAN -DCCODE_LIST -DSUSPEND_EVENT \
+ -DEAPOL_RESEND -DEAPOL_DYNAMATIC_RESEND \
+ -DENABLE_INSMOD_NO_FW_LOAD
+
+DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \
+ dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \
+ dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
+ bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
+ dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \
+ hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o frag.o \
+ dhd_linux_exportfs.o dhd_linux_pktdump.o dhd_mschdbg.o \
+ dhd_config.o dhd_ccode.o wl_event.o wl_android_ext.o \
+ wl_iapsta.o wl_escan.o
+
+ifneq ($(CONFIG_WIRELESS_EXT),)
+ DHDOFILES += wl_iw.o
+ DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
+endif
+ifneq ($(CONFIG_CFG80211),)
+ DHDOFILES += wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o
+ DHDOFILES += wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
+ DHDOFILES += dhd_cfg80211.o wl_cfgvif.o
+ DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
+ DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+ DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
+ DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
+ DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
+ DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT
+ DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8
+ DHDCFLAGS += -DWL_VIRTUAL_APSTA
+ DHDCFLAGS += -DPNO_SUPPORT -DEXPLICIT_DISCIF_CLEANUP
+ DHDCFLAGS += -DDHD_USE_SCAN_WAKELOCK
+ DHDCFLAGS += -DSPECIFIC_MAC_GEN_SCHEME
+ DHDCFLAGS += -DWL_IFACE_MGMT
+ DHDCFLAGS += -DWLFBT
+ DHDCFLAGS += -DWL_EXT_RECONNECT
+ DHDCFLAGS += -DDHD_LOSSLESS_ROAMING
+ DHDCFLAGS += -DGTK_OFFLOAD_SUPPORT
+ DHDCFLAGS += -DWL_STATIC_IF
+ DHDCFLAGS += -DWL_CLIENT_SAE -DWL_OWE
+endif
+
+#BCMDHD_SDIO
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+BUS_TYPE := "sdio"
+DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DMMC_SW_RESET -DBCMLXSDMMC \
+ -DUSE_SDIOFIFO_IOVAR -DSDTEST \
+ -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \
+ -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD \
+ -DDHDENABLE_TAILPAD -DSUPPORT_P2P_GO_PS \
+ -DBCMSDIO_RXLIM_POST -DBCMSDIO_TXSEQ_SYNC -DCONSOLE_DPC \
+ -DBCMSDIO_INTSTATUS_WAR
+ifeq ($(CONFIG_BCMDHD_OOB),y)
+ DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB
+ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
+ DHDCFLAGS += -DDISABLE_WOWLAN
+endif
+else
+ DHDCFLAGS += -DSDIO_ISR_THREAD
+endif
+DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
+ dhd_sdio.o dhd_cdc.o dhd_wlfc.o
+endif
+
+#BCMDHD_PCIE
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+BUS_TYPE := "pcie"
+DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \
+ -DDONGLE_ENABLE_ISOLATION
+DHDCFLAGS += -DDHD_LB -DDHD_LB_RXP -DDHD_LB_STATS -DDHD_LB_TXP
+DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED
+DHDCFLAGS += -DINSMOD_FW_LOAD
+DHDCFLAGS += -DCONFIG_HAS_WAKELOCK
+#DHDCFLAGS += -DDHD_PCIE_RUNTIMEPM -DMAX_IDLE_COUNT=11 -DCUSTOM_DHD_RUNTIME_MS=100
+ifeq ($(CONFIG_BCMDHD_OOB),y)
+ DHDCFLAGS += -DCUSTOMER_OOB -DBCMPCIE_OOB_HOST_WAKE
+endif
+ifneq ($(CONFIG_PCI_MSI),)
+ DHDCFLAGS += -DDHD_MSI_SUPPORT
+endif
+CONFIG_BCMDHD_NO_POWER_OFF := y
+DHDCFLAGS += -DDHD_DISABLE_ASPM
+#DHDCFLAGS += -DUSE_AML_PCIE_TEE_MEM
+DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \
+ dhd_msgbuf.o dhd_linux_lb.o
+endif
+
+#BCMDHD_USB
+ifneq ($(CONFIG_BCMDHD_USB),)
+BUS_TYPE := "usb"
+DHDCFLAGS += -DUSBOS_TX_THREAD -DBCMDBUS -DBCMTRXV2 -DDBUS_USB_LOOPBACK \
+ -DBDC
+DHDCFLAGS += -DBCM_REQUEST_FW -DEXTERNAL_FW_PATH
+CONFIG_BCMDHD_NO_POWER_OFF := y
+ifneq ($(CONFIG_BCMDHD_CUSB),)
+ DHDCFLAGS += -DBCMUSBDEV_COMPOSITE
+ CONFIG_BCMDHD_NO_POWER_OFF := y
+endif
+DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o
+endif
+
+ifeq ($(CONFIG_BCMDHD_NO_POWER_OFF),y)
+ DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD
+ DHDCFLAGS += -DENABLE_INSMOD_NO_POWER_OFF -DNO_POWER_OFF_AFTER_OPEN
+endif
+
+ifeq ($(CONFIG_BCMDHD_MULTIPLE_DRIVER),y)
+ DHDCFLAGS += -DBCMDHD_MDRIVER
+ DHDCFLAGS += -DBUS_TYPE=\"-$(BUS_TYPE)\"
+ DHDCFLAGS += -DDHD_LOG_PREFIX=\"[dhd-$(BUS_TYPE)]\"
+ MODULE_NAME := dhd$(BUS_TYPE)
+else
+ DHDCFLAGS += -DBUS_TYPE=\"\"
+endif
+
+ifeq ($(CONFIG_BCMDHD_TIMESTAMP),y)
+ DHDCFLAGS += -DKERNEL_TIMESTAMP
+ DHDCFLAGS += -DSYSTEM_TIMESTAMP
+endif
+
+#PROPTXSTATUS
+ifeq ($(CONFIG_BCMDHD_PROPTXSTATUS),y)
+ifneq ($(CONFIG_BCMDHD_USB),)
+ DHDCFLAGS += -DPROP_TXSTATUS
+endif
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+ DHDCFLAGS += -DPROP_TXSTATUS -DPROPTX_MAXCOUNT
+endif
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+endif
+endif
+
+ifeq ($(CONFIG_64BIT),y)
+ DHDCFLAGS := $(filter-out -DBCMDMA32,$(DHDCFLAGS))
+ DHDCFLAGS += -DBCMDMA64OSL
+endif
+
+# For Android VTS
+ifneq ($(CONFIG_BCMDHD_ANDROID_VERSION),)
+ DHDCFLAGS += -DANDROID_VERSION=$(CONFIG_BCMDHD_ANDROID_VERSION)
+ DHDCFLAGS += -DDHD_NOTIFY_MAC_CHANGED
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DLINKSTAT_SUPPORT
+ DHDCFLAGS += -DCUSTOM_COUNTRY_CODE -DDHD_GET_VALID_CHANNELS
+ DHDCFLAGS += -DDEBUGABILITY -DDBG_PKT_MON
+ DHDCFLAGS += -DDHD_LOG_DUMP -DDHD_FW_COREDUMP
+ DHDCFLAGS += -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT
+ DHDCFLAGS += -DDHD_WAKE_STATUS
+ DHDOFILES += dhd_rtt.o bcm_app_utils.o
+endif
+else
+ DHDCFLAGS += -DANDROID_VERSION=0
+endif
+
+# For Debug
+ifeq ($(CONFIG_BCMDHD_DEBUG),y)
+ DHDCFLAGS += -DDHD_ARP_DUMP -DDHD_DHCP_DUMP -DDHD_ICMP_DUMP
+ DHDCFLAGS += -DDHD_DNS_DUMP -DDHD_TRX_DUMP
+ DHDCFLAGS += -DTPUT_MONITOR
+# DHDCFLAGS += -DSCAN_SUPPRESS -DBSSCACHE
+ DHDCFLAGS += -DCHECK_DOWNLOAD_FW
+ DHDCFLAGS += -DPKT_STATICS
+ DHDCFLAGS += -DKSO_DEBUG
+# DHDCFLAGS += -DDHD_PKTDUMP_TOFW
+endif
+
+# For Debug2
+ifeq ($(CONFIG_BCMDHD_DEBUG2),y)
+ DHDCFLAGS += -DDEBUGFS_CFG80211
+ DHDCFLAGS += -DSHOW_LOGTRACE -DDHD_LOG_DUMP -DDHD_FW_COREDUMP
+ DHDCFLAGS += -DBCMASSERT_LOG -DSI_ERROR_ENFORCE
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DEWP_EDL
+ DHDCFLAGS += -DDNGL_EVENT_SUPPORT
+ DHDCFLAGS += -DDHD_SSSR_DUMP
+endif
+endif
+
+# MESH support for kernel 3.10 later
+ifeq ($(CONFIG_WL_MESH),y)
+ DHDCFLAGS += -DWLMESH
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DWLMESH_CFG80211
+endif
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000
+endif
+ DHDCFLAGS += -DDHD_UPDATE_INTF_MAC
+ DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
+ DHDCFLAGS :=$(filter-out -DWL_STATIC_IF,$(DHDCFLAGS))
+endif
+
+ifeq ($(CONFIG_BCMDHD_EASYMESH),y)
+ DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
+ DHDCFLAGS :=$(filter-out -DDHD_LOG_DUMP,$(DHDCFLAGS))
+ DHDCFLAGS += -DWLEASYMESH -DWL_STATIC_IF -DWLDWDS -DFOURADDR_AUTO_BRG
+endif
+
+#CSI_SUPPORT
+ifeq ($(CONFIG_CSI_SUPPORT),y)
+ DHDCFLAGS += -DCSI_SUPPORT
+ DHDOFILES += dhd_csi.o
+endif
+
+# For TPUT_IMPROVE
+ifeq ($(CONFIG_BCMDHD_TPUT),y)
+ DHDCFLAGS += -DDHD_TPUT_PATCH
+ DHDCFLAGS += -DTCPACK_INFO_MAXNUM=10 -DTCPDATA_INFO_MAXNUM=10
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+ DHDCFLAGS += -DDYNAMIC_MAX_HDR_READ
+ DHDCFLAGS :=$(filter-out -DSDTEST,$(DHDCFLAGS))
+endif
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DDHD_LB_TXP_DEFAULT_ENAB
+ DHDCFLAGS += -DSET_RPS_CPUS -DSET_XPS_CPUS
+ DHDCFLAGS += -DDHD_LB_PRIMARY_CPUS=0xF0 -DDHD_LB_SECONDARY_CPUS=0x0E
+endif
+endif
+
+# For Zero configure
+ifeq ($(CONFIG_BCMDHD_ZEROCONFIG),y)
+ DHDCFLAGS += -DWL_EXT_GENL -DSENDPROB
+ DHDOFILES += wl_ext_genl.o
+endif
+
+# For WAPI
+ifeq ($(CONFIG_BCMDHD_WAPI),y)
+ DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
+endif
+
+# For scan random mac
+ifneq ($(CONFIG_BCMDHD_RANDOM_MAC),)
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DSUPPORT_RANDOM_MAC_SCAN -DWL_USE_RANDOMIZED_SCAN
+endif
+endif
+
+# For NAN
+ifneq ($(CONFIG_BCMDHD_NAN),)
+ DHDCFLAGS += -DWL_NAN -DWL_NAN_DISC_CACHE
+ DHDOFILES += wl_cfgnan.o bcmbloom.o
+endif
+
+# For Module auto-selection
+ifeq ($(CONFIG_BCMDHD_AUTO_SELECT),y)
+ DHDCFLAGS += -DUPDATE_MODULE_NAME
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+ DHDCFLAGS += -DGET_OTP_MODULE_NAME -DCOMPAT_OLD_MODULE
+endif
+endif
+
+ifeq ($(CONFIG_BCMDHD),m)
+ DHDCFLAGS += -DBCMDHD_MODULAR
+endif
+
+ifeq ($(CONFIG_MACH_PLATFORM),y)
+ DHDOFILES += dhd_gpio.o
+ifeq ($(CONFIG_BCMDHD_DTS),y)
+ DHDCFLAGS += -DBCMDHD_DTS
+endif
+ DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT
+ DHDCFLAGS += -DCUSTOMER_HW_AMLOGIC
+
+# for config custom MAC
+# DHDCFLAGS += -DGET_CUSTOM_MAC_ENABLE -DCUSTOM_MULTI_MAC
+# if also need config AP MAC
+# DHDCFLAGS += -DCUSTOM_AP_MAC
+#
+endif
+
+ifeq ($(CONFIG_BCMDHD_AG),y)
+ DHDCFLAGS += -DBAND_AG
+endif
+
+ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
+ifeq ($(CONFIG_BCMDHD_STATIC_BUF_IN_DHD),y)
+ DHDOFILES += dhd_static_buf.o
+ DHDCFLAGS += -DDHD_STATIC_IN_DRIVER
+else
+# obj-m += dhd_static_buf.o
+endif
+ DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF
+ DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF
+ DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DDHD_USE_STATIC_CTRLBUF
+endif
+endif
+
+ARCH ?= arm64
+CROSS_COMPILE ?=aarch64-linux-gnu-
+KDIR ?=../../../../../../common
+
+BCMDHD_ROOT = $(src)
+#$(warning "BCMDHD_ROOT=$(BCMDHD_ROOT)")
+EXTRA_CFLAGS = $(DHDCFLAGS)
+EXTRA_CFLAGS += -DDHD_COMPILED=\"$(BCMDHD_ROOT)\"
+EXTRA_CFLAGS += -I$(BCMDHD_ROOT)/include/ -I$(BCMDHD_ROOT)/
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
+
+obj-$(CONFIG_BCMDHD) += $(MODULE_NAME).o
+$(MODULE_NAME)-objs += $(DHDOFILES)
+ccflags-y := $(EXTRA_CFLAGS)
+
+#all: bcmdhd_sdio bcmdhd_usb
+all: bcmdhd_sdio
+
+EXTRA_CFLAGS += -I$(KERNEL_SRC)/$(M)/include -I$(KERNEL_SRC)/$(M)/
+modules_install:
+ @$(MAKE) INSTALL_MOD_STRIP=1 M=$(M) -C $(KERNEL_SRC) modules_install
+ mkdir -p ${OUT_DIR}/../private/modules
+ cd ${OUT_DIR}/$(M)/; find -name "*.ko" -exec cp {} ${OUT_DIR}/../private/modules/ \;
+
+bcmdhd_sdio:
+ $(warning "building BCMDHD_SDIO..........")
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) CONFIG_DHD_USE_STATIC_BUF=y CONFIG_BCMDHD_SDIO=y modules
+
+bcmdhd_usb:
+ $(warning "building BCMDHD_USB..........")
+ $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules CONFIG_BCMDHD_USB=y
+ mv dhd.ko dhd_usb.ko
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) clean
+ $(RM) Module.markers
+ $(RM) modules.order
diff --git a/bcmdhd.101.10.361.x/aiutils.c b/bcmdhd.101.10.361.x/aiutils.c
new file mode 100755
index 0000000..b8b9555
--- /dev/null
+++ b/bcmdhd.101.10.361.x/aiutils.c
@@ -0,0 +1,2604 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <pcie_core.h>
+
+#include "siutils_priv.h"
+#include <bcmdevs.h>
+
+#if defined(ETD)
+#include <etd.h>
+#endif
+
+#if !defined(BCMDONGLEHOST)
+#define PMU_DMP() (cores_info->coreid[sii->curidx] == PMU_CORE_ID)
+#define GCI_DMP() (cores_info->coreid[sii->curidx] == GCI_CORE_ID)
+#else
+#define PMU_DMP() (0)
+#define GCI_DMP() (0)
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(AXI_TIMEOUTS_NIC)
+static bool ai_get_apb_bridge(const si_t *sih, uint32 coreidx, uint32 *apb_id,
+ uint32 *apb_coreunit);
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+static void ai_reset_axi_to(const si_info_t *sii, aidmp_t *ai);
+#endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+
+#ifdef DONGLEBUILD
+static uint32 ai_get_sizeof_wrapper_offsets_to_dump(void);
+static uint32 ai_get_wrapper_base_addr(uint32 **offset);
+#endif /* DONGLEBUILD */
+
+/* AXI ID to CoreID + unit mappings */
+typedef struct axi_to_coreidx {
+ uint coreid;
+ uint coreunit;
+} axi_to_coreidx_t;
+
+static const axi_to_coreidx_t axi2coreidx_4369[] = {
+ {CC_CORE_ID, 0}, /* 00 Chipcommon */
+ {PCIE2_CORE_ID, 0}, /* 01 PCIe */
+ {D11_CORE_ID, 0}, /* 02 D11 Main */
+ {ARMCR4_CORE_ID, 0}, /* 03 ARM */
+ {BT_CORE_ID, 0}, /* 04 BT AHB */
+ {D11_CORE_ID, 1}, /* 05 D11 Aux */
+ {D11_CORE_ID, 0}, /* 06 D11 Main l1 */
+ {D11_CORE_ID, 1}, /* 07 D11 Aux l1 */
+ {D11_CORE_ID, 0}, /* 08 D11 Main l2 */
+ {D11_CORE_ID, 1}, /* 09 D11 Aux l2 */
+ {NODEV_CORE_ID, 0}, /* 10 M2M DMA */
+ {NODEV_CORE_ID, 0}, /* 11 unused */
+ {NODEV_CORE_ID, 0}, /* 12 unused */
+ {NODEV_CORE_ID, 0}, /* 13 unused */
+ {NODEV_CORE_ID, 0}, /* 14 unused */
+ {NODEV_CORE_ID, 0} /* 15 unused */
+};
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(const si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+ uint32 size = 0;
+
+ while (TRUE) {
+ ent = R_REG(SI_INFO(sih)->osh, *eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ /* escape condition related EROM size if it has invalid values */
+ size += sizeof(*eromptr);
+ if (size >= ER_SZ_MAX) {
+ SI_ERROR(("Failed to find end of EROM marker\n"));
+ break;
+ }
+
+ nom++;
+ }
+
+ SI_VMSG(("get_erom_ent: Returning ent 0x%08x\n", ent));
+ if (inv + nom) {
+ SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
+ }
+ return ent;
+}
+
+static uint32
+get_asd(const si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+ uint32 *sizel, uint32 *sizeh)
+{
+ uint32 asd, sz, szd;
+
+ BCM_REFERENCE(ad);
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+ /* This is not what we want, "push" it back */
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+/* Parse the enumeration rom to identify all cores */
+void
+BCMATTACHFN(ai_scan)(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ chipcregs_t *cc = (chipcregs_t *)regs;
+ uint32 erombase, *eromptr, *eromlim;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+ BCM_REFERENCE(devid);
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Set wrappers address */
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* Now point the window at the erom */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = regs;
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+ eromptr = (uint32 *)(uintptr)erombase;
+ break;
+#endif /* BCMSDIO */
+
+ default:
+ SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+ sii->axi_num_wrappers = 0;
+
+ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+ OSL_OBFUSCATE_BUF(regs), erombase,
+ OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSCATE_BUF(eromlim)));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+ /* Grok a component */
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+ return;
+ }
+
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+ "nsw = %d, nmp = %d & nsp = %d\n",
+ mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
+#else
+ BCM_REFERENCE(crev);
+#endif
+
+ /* Include Default slave wrapper for timeout monitoring */
+ if ((nsp == 0 && nsw == 0) ||
+#if !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC)
+ ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
+#else
+ ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
+ (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
+#endif /* !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC) */
+ FALSE) {
+ continue;
+ }
+
+ if ((nmw + nsw == 0)) {
+ /* A component which is not a core */
+ /* Should record some info */
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
+ sii->oob_router1 = addrl;
+ } else {
+ sii->oob_router = addrl;
+ }
+ }
+ }
+ if ((cid != NS_CCB_CORE_ID) && (cid != PMU_CORE_ID) &&
+ (cid != GCI_CORE_ID) && (cid != SR_CORE_ID) &&
+ (cid != HUB_CORE_ID) && (cid != HND_OOBR_CORE_ID) &&
+ (cid != CCI400_CORE_ID) && (cid != SPMI_SLAVE_CORE_ID)) {
+ continue;
+ }
+ }
+
+ idx = sii->numcores;
+
+ cores_info->cia[idx] = cia;
+ cores_info->cib[idx] = cib;
+ cores_info->coreid[idx] = cid;
+
+ /* workaround the fact the variable buscoretype is used in _ai_set_coreidx()
+ * when checking PCIE_GEN2() for PCI_BUS case before it is setup later...,
+ * both use and setup happen in si_buscore_setup().
+ */
+ if (BUSTYPE(sih->bustype) == PCI_BUS &&
+ (cid == PCI_CORE_ID || cid == PCIE_CORE_ID || cid == PCIE2_CORE_ID)) {
+ sii->pub.buscoretype = (uint16)cid;
+ }
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ /* Record something? */
+ SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+ /* First Slave Address Descriptor should be port 0:
+ * the main register space for the core
+ */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ do {
+ /* Try again to see if it is a bridge */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else {
+ break;
+ }
+ } while (1);
+ } else {
+ if (addrl == 0 || sizel == 0) {
+ SI_ERROR((" Invalid ASD %x for slave port \n", asd));
+ goto error;
+ }
+ cores_info->coresba[idx] = addrl;
+ cores_info->coresba_size[idx] = sizel;
+ }
+
+ /* Get any more ASDs in first port */
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ /* Support ARM debug core ASD with address space > 4K */
+ if ((asd != 0) && (j == 1)) {
+ SI_VMSG(("Warning: sizel > 0x1000\n"));
+ cores_info->coresba2[idx] = addrl;
+ cores_info->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ /* To get the first base address of second slave port */
+ if ((asd != 0) && (i == 1) && (j == 0)) {
+ cores_info->csp2ba[idx] = addrl;
+ cores_info->csp2ba_size[idx] = sizel;
+ }
+ if (asd == 0)
+ break;
+ j++;
+ } while (1);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ goto error;
+ }
+ }
+
+ /* Now get master wrappers */
+ for (i = 0; i < nmw; i++) {
+ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0) {
+ cores_info->wrapba[idx] = addrl;
+ } else if (i == 1) {
+ cores_info->wrapba2[idx] = addrl;
+ } else if (i == 2) {
+ cores_info->wrapba3[idx] = addrl;
+ }
+
+ if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
+ axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+ axi_wrapper[sii->axi_num_wrappers].cid = cid;
+ axi_wrapper[sii->axi_num_wrappers].rev = crev;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+ sii->axi_num_wrappers++;
+ SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
+ "rev:%x, addr:%x, size:%x\n",
+ sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
+ }
+ }
+
+ /* And finally slave wrappers */
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp <= 1) ? 0 : 1;
+ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d cid %x eromp %p fwp %d \n",
+ i, cid, eromptr, fwp));
+ goto error;
+ }
+
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+
+ /* cache APB bridge wrapper address for set/clear timeout */
+ if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
+ ASSERT(sii->num_br < SI_MAXBR);
+ sii->br_wrapba[sii->num_br++] = addrl;
+ }
+
+ if ((mfg == MFGID_ARM) && (cid == ADB_BRIDGE_ID)) {
+ br = TRUE;
+ }
+
+ BCM_REFERENCE(br);
+
+ if ((nmw == 0) && (i == 0)) {
+ cores_info->wrapba[idx] = addrl;
+ } else if ((nmw == 0) && (i == 1)) {
+ cores_info->wrapba2[idx] = addrl;
+ } else if ((nmw == 0) && (i == 2)) {
+ cores_info->wrapba3[idx] = addrl;
+ }
+
+ /* Include all slave wrappers to the list to
+ * enable and monitor watchdog timeouts
+ */
+
+ if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
+ axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+ axi_wrapper[sii->axi_num_wrappers].cid = cid;
+ axi_wrapper[sii->axi_num_wrappers].rev = crev;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+
+ sii->axi_num_wrappers++;
+
+ SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
+ "rev:%x, addr:%x, size:%x\n",
+ sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
+ }
+ }
+
+#ifndef AXI_TIMEOUTS_NIC
+ /* Don't record bridges and core with 0 slave ports */
+ if (br || (nsp == 0)) {
+ continue;
+ }
+#endif
+
+ /* Done with core */
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END\n"));
+
+error:
+ sii->numcores = 0;
+ return;
+}
+
+#define AI_SETCOREIDX_MAPSIZE(coreid) \
+ (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static volatile void *
+BCMPOSTTRAPFN(_ai_setcoreidx)(si_t *sih, uint coreidx, uint use_wrapn)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint32 addr, wrap, wrap2, wrap3;
+ volatile void *regs;
+
+ if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+ return (NULL);
+
+ addr = cores_info->coresba[coreidx];
+ wrap = cores_info->wrapba[coreidx];
+ wrap2 = cores_info->wrapba2[coreidx];
+ wrap3 = cores_info->wrapba3[coreidx];
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* No need to disable interrupts while entering/exiting APB bridge core */
+ if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
+ (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) ||
+ !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+ }
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(addr,
+ AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ sii->curmap = regs = cores_info->regs[coreidx];
+ if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+ cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
+ }
+ if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
+ cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
+ }
+ if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
+ cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
+ }
+
+ if (use_wrapn == 2) {
+ sii->curwrap = cores_info->wrappers3[coreidx];
+ } else if (use_wrapn == 1) {
+ sii->curwrap = cores_info->wrappers2[coreidx];
+ } else {
+ sii->curwrap = cores_info->wrappers[coreidx];
+ }
+ break;
+
+ case PCI_BUS:
+ regs = sii->curmap;
+
+ /* point bar0 2nd 4KB window to the primary wrapper */
+ if (use_wrapn == 2) {
+ wrap = wrap3;
+ } else if (use_wrapn == 1) {
+ wrap = wrap2;
+ }
+
+ /* Use BAR0 Window to support dual mac chips... */
+
+ /* TODO: the other mac unit can't be supportd by the current BAR0 window.
+ * need to find other ways to access these cores.
+ */
+
+ switch (sii->slice) {
+ case 0: /* main/first slice */
+#ifdef AXI_TIMEOUTS_NIC
+ /* No need to set the BAR0 if core is APB Bridge.
+ * This is to reduce 2 PCI writes while checkng for errlog
+ */
+ if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+ }
+
+ if (PCIE_GEN2(sii))
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+ else
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+
+ break;
+
+ case 1: /* aux/second slice */
+ /* PCIE GEN2 only for other slices */
+ if (!PCIE_GEN2(sii)) {
+ /* other slices not supported */
+ SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
+ ASSERT(0);
+ break;
+ }
+
+ /* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */
+ regs = (volatile uint8 *)regs + PCI_SEC_BAR0_WIN_OFFSET;
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, addr);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, 4, wrap);
+ break;
+
+ case 2: /* scan/third slice */
+ /* PCIE GEN2 only for other slices */
+ if (!PCIE_GEN2(sii)) {
+ /* other slices not supported */
+ SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
+ ASSERT(0);
+ break;
+ }
+
+ /* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */
+ regs = (volatile uint8 *)regs + PCI_TER_BAR0_WIN_OFFSET;
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+ /* point bar0 window */
+ ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, addr);
+ ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, wrap);
+ break;
+
+ default: /* other slices */
+ SI_ERROR(("BAR0 Window not supported for slice %d\n", sii->slice));
+ ASSERT(0);
+ break;
+ }
+
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+ sii->curmap = regs = (void *)((uintptr)addr);
+ if (use_wrapn)
+ sii->curwrap = (void *)((uintptr)wrap2);
+ else
+ sii->curwrap = (void *)((uintptr)wrap);
+ break;
+#endif /* BCMSDIO */
+
+ default:
+ ASSERT(0);
+ sii->curmap = regs = NULL;
+ break;
+ }
+
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+volatile void *
+BCMPOSTTRAPFN(ai_setcoreidx)(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 0);
+}
+
+volatile void *
+BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 1);
+}
+
+volatile void *
+BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 2);
+}
+
+void
+ai_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ chipcregs_t *cc = NULL;
+ uint32 erombase, *eromptr, *eromlim;
+ uint i, j, cidx;
+ uint32 cia, cib, nmp, nsp;
+ uint32 asd, addrl, addrh, sizel, sizeh;
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == CC_CORE_ID) {
+ cc = (chipcregs_t *)cores_info->regs[i];
+ break;
+ }
+ }
+ if (cc == NULL)
+ goto error;
+
+ BCM_REFERENCE(erombase);
+ erombase = R_REG(sii->osh, &cc->eromptr);
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ cidx = sii->curidx;
+ cia = cores_info->cia[cidx];
+ cib = cores_info->cib[cidx];
+
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+ /* scan for cores */
+ while (eromptr < eromlim) {
+ if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+ (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+ break;
+ }
+ }
+
+ /* skip master ports */
+ for (i = 0; i < nmp; i++)
+ get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+ /* Skip ASDs in port 0 */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ /* Try again to see if it is a bridge */
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ }
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0)
+ break;
+
+ if (!asidx--) {
+ *addr = addrl;
+ *size = sizel;
+ return;
+ }
+ j++;
+ } while (1);
+
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ break;
+ }
+ }
+
+error:
+ *size = 0;
+ return;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(const si_t *sih)
+{
+ /* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */
+ BCM_REFERENCE(sih);
+
+ return 2;
+}
+
+/* Return the address of the nth address space in the current core
+ * Arguments:
+ * sih : Pointer to struct si_t
+ * spidx : slave port index
+ * baidx : base address index
+ */
+uint32
+ai_addrspace(const si_t *sih, uint spidx, uint baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint cidx;
+
+ cidx = sii->curidx;
+
+ if (spidx == CORE_SLAVE_PORT_0) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->coresba[cidx];
+ else if (baidx == CORE_BASE_ADDR_1)
+ return cores_info->coresba2[cidx];
+ }
+ else if (spidx == CORE_SLAVE_PORT_1) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->csp2ba[cidx];
+ }
+
+ SI_ERROR(("ai_addrspace: Need to parse the erom again to find %d base addr"
+ " in %d slave port\n",
+ baidx, spidx));
+
+ return 0;
+
+}
+
+/* Return the size of the nth address space in the current core
+* Arguments:
+* sih : Pointer to struct si_t
+* spidx : slave port index
+* baidx : base address index
+*/
+uint32
+ai_addrspacesize(const si_t *sih, uint spidx, uint baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint cidx;
+
+ cidx = sii->curidx;
+ if (spidx == CORE_SLAVE_PORT_0) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->coresba_size[cidx];
+ else if (baidx == CORE_BASE_ADDR_1)
+ return cores_info->coresba2_size[cidx];
+ }
+ else if (spidx == CORE_SLAVE_PORT_1) {
+ if (baidx == CORE_BASE_ADDR_0)
+ return cores_info->csp2ba_size[cidx];
+ }
+
+ SI_ERROR(("ai_addrspacesize: Need to parse the erom again to find %d"
+ " base addr in %d slave port\n",
+ baidx, spidx));
+
+ return 0;
+}
+
+uint
+ai_flag(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+
+ if (PMU_DMP()) {
+ uint idx, flag;
+ idx = sii->curidx;
+ ai_setcoreidx(sih, SI_CC_IDX);
+ flag = ai_flag_alt(sih);
+ ai_setcoreidx(sih, idx);
+ return flag;
+ }
+
+ ai = sii->curwrap;
+ ASSERT(ai != NULL);
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+uint
+ai_flag_alt(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai = sii->curwrap;
+
+ return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
+}
+
+void
+ai_setint(const si_t *sih, int siflag)
+{
+ BCM_REFERENCE(sih);
+ BCM_REFERENCE(siflag);
+
+ /* TODO: Figure out how to set interrupt mask in ai */
+}
+
+uint
+BCMPOSTTRAPFN(ai_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
+
+ if (mask || val) {
+ uint32 w = R_REG(sii->osh, addr);
+ w &= ~mask;
+ w |= val;
+ W_REG(sii->osh, addr, w);
+ }
+ return (R_REG(sii->osh, addr));
+}
+
+uint
+ai_corevendor(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint32 cia;
+
+ cia = cores_info->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+BCMPOSTTRAPFN(ai_corerev)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint32 cib;
+
+ cib = cores_info->cib[sii->curidx];
+ return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+uint
+ai_corerev_minor(const si_t *sih)
+{
+ return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
+ SISF_MINORREV_D11_MASK;
+}
+
+bool
+BCMPOSTTRAPFN(ai_iscoreup)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+BCMPOSTTRAPFN(ai_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ /* readback */
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w = 0;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+BCMPOSTTRAPFN(ai_corereg_addr)(si_t *sih, uint coreidx, uint regoff)
+{
+ volatile uint32 *r = NULL;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ ASSERT(sii->curidx == coreidx);
+ r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
+ }
+
+ return (r);
+}
+
+void
+ai_core_disable(const si_t *sih, uint32 bits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ volatile uint32 dummy;
+ uint32 status;
+ aidmp_t *ai;
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /* if core is already in reset, just return */
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
+ return;
+ }
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+ /* if pending backplane ops still, try waiting longer */
+ if (status != 0) {
+ /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+ /* during driver load we may need more time */
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+ /* if still pending ops, continue on and try disable anyway */
+ /* this is in big hammer path, so don't call wl_reinit in this case... */
+#ifdef BCMDBG
+ if (status != 0) {
+ SI_ERROR(("ai_core_disable: WARN: resetstatus=%0x on core disable\n",
+ status));
+ }
+#endif
+ }
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ dummy = R_REG(sii->osh, &ai->resetctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+static void
+BCMPOSTTRAPFN(_ai_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ volatile uint32 dummy;
+ uint loop_counter = 10;
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+#ifdef BCMDBG_ERR
+ if (dummy != 0) {
+ SI_ERROR(("_ai_core_reset: WARN1: resetstatus=0x%0x\n", dummy));
+ }
+#endif /* BCMDBG_ERR */
+
+ /* put core into reset state */
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ OSL_DELAY(10);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+#ifdef UCM_CORRUPTION_WAR
+ if (si_coreid(sih) == D11_CORE_ID) {
+ /* Reset FGC */
+ OSL_DELAY(1);
+ W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
+ }
+#endif /* UCM_CORRUPTION_WAR */
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+#ifdef BCMDBG_ERR
+ if (dummy != 0)
+ SI_ERROR(("_ai_core_reset: WARN2: resetstatus=0x%0x\n", dummy));
+#endif
+
+ while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+#ifdef BCMDBG_ERR
+ if (dummy != 0)
+ SI_ERROR(("_ai_core_reset: WARN3 resetstatus=0x%0x\n", dummy));
+#endif
+
+ /* take core out of reset */
+ W_REG(sii->osh, &ai->resetctrl, 0);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+ }
+
+#ifdef BCMDBG_ERR
+ if (loop_counter == 0) {
+ SI_ERROR(("_ai_core_reset: Failed to take core 0x%x out of reset\n",
+ si_coreid(sih)));
+ }
+#endif
+
+#ifdef UCM_CORRUPTION_WAR
+ /* Pulse FGC after lifting Reset */
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+#else
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+#endif /* UCM_CORRUPTION_WAR */
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+#ifdef UCM_CORRUPTION_WAR
+ if (si_coreid(sih) == D11_CORE_ID) {
+ /* Reset FGC */
+ OSL_DELAY(1);
+ W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
+ }
+#endif /* UCM_CORRUPTION_WAR */
+ OSL_DELAY(1);
+}
+
+void
+BCMPOSTTRAPFN(ai_core_reset)(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint idx = sii->curidx;
+
+ if (cores_info->wrapba3[idx] != 0) {
+ ai_setcoreidx_3rdwrap(sih, idx);
+ _ai_core_reset(sih, bits, resetbits);
+ ai_setcoreidx(sih, idx);
+ }
+
+ if (cores_info->wrapba2[idx] != 0) {
+ ai_setcoreidx_2ndwrap(sih, idx);
+ _ai_core_reset(sih, bits, resetbits);
+ ai_setcoreidx(sih, idx);
+ }
+
+ _ai_core_reset(sih, bits, resetbits);
+}
+
+#ifdef BOOKER_NIC400_INF
+void
+BCMPOSTTRAPFN(ai_core_reset_ext)(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ _ai_core_reset(sih, bits, resetbits);
+}
+#endif /* BOOKER_NIC400_INF */
+
+void
+ai_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+ uint32 w;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("ai_core_cflags_wo: Accessing PMU DMP register (ioctrl)\n"));
+ return;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32
+BCMPOSTTRAPFN(ai_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+ uint32 w;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("ai_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
+ return 0;
+ }
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+#if !defined(BCMDONGLEHOST)
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+#endif
+ aidmp_t *ai;
+ uint32 w;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("ai_core_sflags: Accessing PMU DMP register (ioctrl)\n"));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+/* print interesting aidmp registers */
+void
+ai_dumpregs(const si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ osl_t *osh;
+ aidmp_t *ai;
+ uint i;
+ uint32 prev_value = 0;
+ const axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+ uint32 cfg_reg = 0;
+ uint bar0_win_offset = 0;
+
+ osh = sii->osh;
+
+ /* Save and restore wrapper access window */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (PCIE_GEN2(sii)) {
+ cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+ bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+ } else {
+ cfg_reg = PCI_BAR0_WIN2;
+ bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+
+ if (prev_value == ID32_INVALID) {
+ SI_PRINT(("ai_dumpregs, PCI_BAR0_WIN2 - %x\n", prev_value));
+ return;
+ }
+ }
+
+ bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
+ sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
+
+ for (i = 0; i < sii->axi_num_wrappers; i++) {
+
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* Set BAR0 window to bridge wapper base address */
+ OSL_PCI_WRITE_CONFIG(osh,
+ cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+ ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
+ } else {
+ ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+ }
+
+ bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
+ axi_wrapper[i].rev,
+ axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
+ axi_wrapper[i].wrapper_addr);
+
+ bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
+ "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
+ "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
+ "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
+ "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
+ "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
+ "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
+ R_REG(osh, &ai->ioctrlset),
+ R_REG(osh, &ai->ioctrlclear),
+ R_REG(osh, &ai->ioctrl),
+ R_REG(osh, &ai->iostatus),
+ R_REG(osh, &ai->ioctrlwidth),
+ R_REG(osh, &ai->iostatuswidth),
+ R_REG(osh, &ai->resetctrl),
+ R_REG(osh, &ai->resetstatus),
+ R_REG(osh, &ai->resetreadid),
+ R_REG(osh, &ai->resetwriteid),
+ R_REG(osh, &ai->errlogctrl),
+ R_REG(osh, &ai->errlogdone),
+ R_REG(osh, &ai->errlogstatus),
+ R_REG(osh, &ai->errlogaddrlo),
+ R_REG(osh, &ai->errlogaddrhi),
+ R_REG(osh, &ai->errlogid),
+ R_REG(osh, &ai->errloguser),
+ R_REG(osh, &ai->errlogflags),
+ R_REG(osh, &ai->intstatus),
+ R_REG(osh, &ai->config),
+ R_REG(osh, &ai->itcr));
+ }
+
+ /* Restore the initial wrapper space */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (prev_value && cfg_reg) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+ }
+ }
+}
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+
+#ifdef BCMDBG
+static void
+_ai_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose)
+{
+ uint32 config;
+
+ config = R_REG(osh, &ai->config);
+ SI_PRINT(("\nCore ID: 0x%x, addr 0x%x, config 0x%x\n", cid, addr, config));
+
+ if (config & AICFG_RST)
+ SI_PRINT(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n",
+ R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus),
+ R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid)));
+
+ if (config & AICFG_IOC)
+ SI_PRINT(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl),
+ R_REG(osh, &ai->ioctrlwidth)));
+
+ if (config & AICFG_IOS)
+ SI_PRINT(("iostatus 0x%x, width %d\n", R_REG(osh, &ai->iostatus),
+ R_REG(osh, &ai->iostatuswidth)));
+
+ if (config & AICFG_ERRL) {
+ SI_PRINT(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n",
+ R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone),
+ R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus)));
+ SI_PRINT(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr "
+ "0x%x/0x%x\n",
+ R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser),
+ R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi),
+ R_REG(osh, &ai->errlogaddrlo)));
+ }
+
+ if (verbose && (config & AICFG_OOB)) {
+ SI_PRINT(("oobselina30 0x%x, oobselina74 0x%x\n",
+ R_REG(osh, &ai->oobselina30), R_REG(osh, &ai->oobselina74)));
+ SI_PRINT(("oobselinb30 0x%x, oobselinb74 0x%x\n",
+ R_REG(osh, &ai->oobselinb30), R_REG(osh, &ai->oobselinb74)));
+ SI_PRINT(("oobselinc30 0x%x, oobselinc74 0x%x\n",
+ R_REG(osh, &ai->oobselinc30), R_REG(osh, &ai->oobselinc74)));
+ SI_PRINT(("oobselind30 0x%x, oobselind74 0x%x\n",
+ R_REG(osh, &ai->oobselind30), R_REG(osh, &ai->oobselind74)));
+ SI_PRINT(("oobselouta30 0x%x, oobselouta74 0x%x\n",
+ R_REG(osh, &ai->oobselouta30), R_REG(osh, &ai->oobselouta74)));
+ SI_PRINT(("oobseloutb30 0x%x, oobseloutb74 0x%x\n",
+ R_REG(osh, &ai->oobseloutb30), R_REG(osh, &ai->oobseloutb74)));
+ SI_PRINT(("oobseloutc30 0x%x, oobseloutc74 0x%x\n",
+ R_REG(osh, &ai->oobseloutc30), R_REG(osh, &ai->oobseloutc74)));
+ SI_PRINT(("oobseloutd30 0x%x, oobseloutd74 0x%x\n",
+ R_REG(osh, &ai->oobseloutd30), R_REG(osh, &ai->oobseloutd74)));
+ SI_PRINT(("oobsynca 0x%x, oobseloutaen 0x%x\n",
+ R_REG(osh, &ai->oobsynca), R_REG(osh, &ai->oobseloutaen)));
+ SI_PRINT(("oobsyncb 0x%x, oobseloutben 0x%x\n",
+ R_REG(osh, &ai->oobsyncb), R_REG(osh, &ai->oobseloutben)));
+ SI_PRINT(("oobsyncc 0x%x, oobseloutcen 0x%x\n",
+ R_REG(osh, &ai->oobsyncc), R_REG(osh, &ai->oobseloutcen)));
+ SI_PRINT(("oobsyncd 0x%x, oobseloutden 0x%x\n",
+ R_REG(osh, &ai->oobsyncd), R_REG(osh, &ai->oobseloutden)));
+ SI_PRINT(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n",
+ R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth),
+ R_REG(osh, &ai->oobaoutwidth)));
+ SI_PRINT(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n",
+ R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth),
+ R_REG(osh, &ai->oobboutwidth)));
+ SI_PRINT(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n",
+ R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth),
+ R_REG(osh, &ai->oobcoutwidth)));
+ SI_PRINT(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n",
+ R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth),
+ R_REG(osh, &ai->oobdoutwidth)));
+ }
+}
+
+void
+ai_view(const si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint32 cid, addr;
+
+ ai = sii->curwrap;
+ osh = sii->osh;
+
+ if (PMU_DMP()) {
+ SI_ERROR(("Cannot access pmu DMP\n"));
+ return;
+ }
+ cid = cores_info->coreid[sii->curidx];
+ addr = cores_info->wrapba[sii->curidx];
+ _ai_view(osh, ai, cid, addr, verbose);
+}
+
+void
+ai_viewall(si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint32 cid, addr;
+ uint i;
+
+ osh = sii->osh;
+ for (i = 0; i < sii->numcores; i++) {
+ si_setcoreidx(sih, i);
+
+ if (PMU_DMP()) {
+ SI_ERROR(("Skipping pmu DMP\n"));
+ continue;
+ }
+ ai = sii->curwrap;
+ cid = cores_info->coreid[sii->curidx];
+ addr = cores_info->wrapba[sii->curidx];
+ _ai_view(osh, ai, cid, addr, verbose);
+ }
+}
+#endif /* BCMDBG */
+
+void
+ai_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
+{
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ uint32 i;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+ uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
+ ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
+
+#ifdef AXI_TIMEOUTS_NIC
+ uint32 prev_value = 0;
+ osl_t *osh = sii->osh;
+ uint32 cfg_reg = 0;
+ uint32 offset = 0;
+#endif /* AXI_TIMEOUTS_NIC */
+
+ if ((sii->axi_num_wrappers == 0) ||
+#ifdef AXI_TIMEOUTS_NIC
+ (!PCIE(sii)) ||
+#endif /* AXI_TIMEOUTS_NIC */
+ FALSE) {
+ SI_VMSG((" iai_update_backplane_timeouts, axi_num_wrappers:%d, Is_PCIE:%d,"
+ " BUS_TYPE:%d, ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Save and restore the wrapper access window */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (PCIE_GEN1(sii)) {
+ cfg_reg = PCI_BAR0_WIN2;
+ offset = PCI_BAR0_WIN2_OFFSET;
+ } else if (PCIE_GEN2(sii)) {
+ cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+ offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+ }
+ else {
+ ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+ if (prev_value == ID32_INVALID) {
+ SI_PRINT(("ai_update_backplane_timeouts, PCI_BAR0_WIN2 - %x\n",
+ prev_value));
+ return;
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ for (i = 0; i < sii->axi_num_wrappers; ++i) {
+ /* WAR for wrong EROM entries w.r.t slave and master wrapper
+ * for ADB bridge core...so checking actual wrapper config to determine type
+ * http://jira.broadcom.com/browse/HW4388-905
+ */
+ if ((cid == 0 || cid == ADB_BRIDGE_ID) &&
+ (axi_wrapper[i].cid == ADB_BRIDGE_ID)) {
+ /* WAR is applicable only to 89B0 and 89C0 */
+ if (CCREV(sih->ccrev) == 70) {
+ ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
+ if (R_REG(sii->osh, &ai->config) & WRAPPER_TIMEOUT_CONFIG) {
+ axi_wrapper[i].wrapper_type = AI_SLAVE_WRAPPER;
+ } else {
+ axi_wrapper[i].wrapper_type = AI_MASTER_WRAPPER;
+ }
+ }
+ }
+ if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER || ((BCM4389_CHIP(sih->chip) ||
+ BCM4388_CHIP(sih->chip)) &&
+ (axi_wrapper[i].wrapper_addr == WL_BRIDGE1_S ||
+ axi_wrapper[i].wrapper_addr == WL_BRIDGE2_S))) {
+ SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
+ axi_wrapper[i].mfg,
+ axi_wrapper[i].cid,
+ axi_wrapper[i].wrapper_addr));
+ continue;
+ }
+
+ /* Update only given core if requested */
+ if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
+ continue;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
+ OSL_PCI_WRITE_CONFIG(osh,
+ cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+ /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
+ ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
+ }
+ else
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+ }
+
+ W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
+
+ SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
+ axi_wrapper[i].mfg,
+ axi_wrapper[i].cid,
+ axi_wrapper[i].wrapper_addr,
+ R_REG(sii->osh, &ai->errlogctrl)));
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Restore the initial wrapper space */
+ if (prev_value) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+}
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+
+/* slave error is ignored, so account for those cases */
+static uint32 si_ignore_errlog_cnt = 0;
+
+static bool
+BCMPOSTTRAPFN(ai_ignore_errlog)(const si_info_t *sii, const aidmp_t *ai,
+ uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
+{
+ uint32 ignore_errsts = AIELS_SLAVE_ERR;
+ uint32 ignore_errsts_2 = 0;
+ uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
+ uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
+ uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
+ bool address_check = TRUE;
+ uint32 axi_id = 0;
+ uint32 axi_id2 = 0;
+ bool extd_axi_id_mask = FALSE;
+ uint32 axi_id_mask;
+
+ SI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n",
+ ai, errsts, err_axi_id, hi_addr, lo_addr));
+
+ /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
+ switch (CHIPID(sii->pub.chip)) {
+#if defined(BT_WLAN_REG_ON_WAR)
+ /*
+ * 4389B0/C0 - WL and BT turn on WAR, ignore AXI error originating from
+ * AHB-AXI bridge i.e, any slave error or timeout from BT access
+ */
+ case BCM4389_CHIP_GRPID:
+ axi_id = BCM4389_BT_AXI_ID;
+ ignore_errsts = AIELS_SLAVE_ERR;
+ axi_id2 = BCM4389_BT_AXI_ID;
+ ignore_errsts_2 = AIELS_TIMEOUT;
+ address_check = FALSE;
+ extd_axi_id_mask = TRUE;
+ break;
+#endif /* BT_WLAN_REG_ON_WAR */
+#ifdef BTOVERPCIE
+ case BCM4388_CHIP_GRPID:
+ axi_id = BCM4388_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+ case BCM4369_CHIP_GRPID:
+ axi_id = BCM4369_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+#endif /* BTOVERPCIE */
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+#ifdef BTOVERPCIE
+ axi_id = BCM4378_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+#endif /* BTOVERPCIE */
+ axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID;
+ extd_axi_id_mask = TRUE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+#ifdef USE_HOSTMEM
+ case BCM43602_CHIP_ID:
+ axi_id = BCM43602_BT_AXI_ID;
+ address_check = FALSE;
+ break;
+#endif /* USE_HOSTMEM */
+ default:
+ return FALSE;
+ }
+
+ axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK;
+
+ /* AXI ID check */
+ err_axi_id &= axi_id_mask;
+ errsts &= AIELS_ERROR_MASK;
+
+ /* check the ignore error cases. 2 checks */
+ if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) ||
+ ((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) {
+ /* not the error ignore cases */
+ return FALSE;
+
+ }
+
+ /* check the specific address checks now, if specified */
+ if (address_check) {
+ /* address range check */
+ if ((hi_addr != ignore_hi) ||
+ (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) {
+ return FALSE;
+ }
+ }
+
+ SI_PRINT(("err check: ignored\n"));
+ return TRUE;
+}
+#endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+
+#ifdef AXI_TIMEOUTS_NIC
+
+/* Function to return the APB bridge details corresponding to the core */
+static bool
+ai_get_apb_bridge(const si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreunit)
+{
+ uint i;
+ uint32 core_base, core_end;
+ const si_info_t *sii = SI_INFO(sih);
+ static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
+ uint32 tmp_coreunit = 0;
+ const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+ return FALSE;
+
+ /* Most of the time apb bridge query will be for d11 core.
+ * Maintain the last cache and return if found rather than iterating the table
+ */
+ if (coreidx_cached == coreidx) {
+ *apb_id = apb_id_cached;
+ *apb_coreunit = apb_coreunit_cached;
+ return TRUE;
+ }
+
+ core_base = cores_info->coresba[coreidx];
+ core_end = core_base + cores_info->coresba_size[coreidx];
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == APB_BRIDGE_ID) {
+ uint32 apb_base;
+ uint32 apb_end;
+
+ apb_base = cores_info->coresba[i];
+ apb_end = apb_base + cores_info->coresba_size[i];
+
+ if ((core_base >= apb_base) &&
+ (core_end <= apb_end)) {
+ /* Current core is attached to this APB bridge */
+ *apb_id = apb_id_cached = APB_BRIDGE_ID;
+ *apb_coreunit = apb_coreunit_cached = tmp_coreunit;
+ coreidx_cached = coreidx;
+ return TRUE;
+ }
+ /* Increment the coreunit */
+ tmp_coreunit++;
+ }
+ }
+
+ return FALSE;
+}
+
+uint32
+ai_clear_backplane_to_fast(si_t *sih, void *addr)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ volatile const void *curmap = sii->curmap;
+ bool core_reg = FALSE;
+
+ /* Use fast path only for core register access */
+ if (((uintptr)addr >= (uintptr)curmap) &&
+ ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
+ /* address being accessed is within current core reg map */
+ core_reg = TRUE;
+ }
+
+ if (core_reg) {
+ uint32 apb_id, apb_coreunit;
+
+ if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
+ &apb_id, &apb_coreunit) == TRUE) {
+ /* Found the APB bridge corresponding to current core,
+ * Check for bus errors in APB wrapper
+ */
+ return ai_clear_backplane_to_per_core(sih,
+ apb_id, apb_coreunit, NULL);
+ }
+ }
+
+ /* Default is to poll for errors on all slave wrappers */
+ return si_clear_backplane_to(sih);
+}
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+static bool g_disable_backplane_logs = FALSE;
+
+static uint32 last_axi_error = AXI_WRAP_STS_NONE;
+static uint32 last_axi_error_log_status = 0;
+static uint32 last_axi_error_core = 0;
+static uint32 last_axi_error_wrap = 0;
+static uint32 last_axi_errlog_lo = 0;
+static uint32 last_axi_errlog_hi = 0;
+static uint32 last_axi_errlog_id = 0;
+
+/*
+ * API to clear the back plane timeout per core.
+ * Caller may pass optional wrapper address. If present this will be used as
+ * the wrapper base address. If wrapper base address is provided then caller
+ * must provide the coreid also.
+ * If both coreid and wrapper is zero, then err status of current bridge
+ * will be verified.
+ */
+uint32
+BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void *wrap)
+{
+ int ret = AXI_WRAP_STS_NONE;
+ aidmp_t *ai = NULL;
+ uint32 errlog_status = 0;
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
+ uint32 current_coreidx = si_coreidx(sih);
+ uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
+
+#if defined(AXI_TIMEOUTS_NIC)
+ si_axi_error_t * axi_error = sih->err_info ?
+ &sih->err_info->axi_error[sih->err_info->count] : NULL;
+#endif /* AXI_TIMEOUTS_NIC */
+ bool restore_core = FALSE;
+
+ if ((sii->axi_num_wrappers == 0) ||
+#ifdef AXI_TIMEOUTS_NIC
+ (!PCIE(sii)) ||
+#endif /* AXI_TIMEOUTS_NIC */
+ FALSE) {
+ SI_VMSG(("ai_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d,"
+ " BUS_TYPE:%d, ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return AXI_WRAP_STS_NONE;
+ }
+
+ if (wrap != NULL) {
+ ai = (aidmp_t *)wrap;
+ } else if (coreid && (target_coreidx != current_coreidx)) {
+
+ if (ai_setcoreidx(sih, target_coreidx) == NULL) {
+ /* Unable to set the core */
+ SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
+ coreid, coreunit, target_coreidx));
+ errlog_lo = target_coreidx;
+ ret = AXI_WRAP_STS_SET_CORE_FAIL;
+ goto end;
+ }
+
+ restore_core = TRUE;
+ ai = (aidmp_t *)si_wrapperregs(sih);
+ } else {
+ /* Read error status of current wrapper */
+ ai = (aidmp_t *)si_wrapperregs(sih);
+
+ /* Update CoreID to current Code ID */
+ coreid = si_coreid(sih);
+ }
+
+ /* read error log status */
+ errlog_status = R_REG(sii->osh, &ai->errlogstatus);
+
+ if (errlog_status == ID32_INVALID) {
+ /* Do not try to peek further */
+ SI_PRINT(("ai_clear_backplane_to_per_core, errlogstatus:%x - Slave Wrapper:%x\n",
+ errlog_status, coreid));
+ ret = AXI_WRAP_STS_WRAP_RD_ERR;
+ errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ goto end;
+ }
+
+ if ((errlog_status & AIELS_ERROR_MASK) != 0) {
+ uint32 tmp;
+ uint32 count = 0;
+ /* set ErrDone to clear the condition */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ /* SPINWAIT on errlogstatus timeout status bits */
+ while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) {
+
+ if (tmp == ID32_INVALID) {
+ SI_PRINT(("ai_clear_backplane_to_per_core: prev errlogstatus:%x,"
+ " errlogstatus:%x\n",
+ errlog_status, tmp));
+ ret = AXI_WRAP_STS_WRAP_RD_ERR;
+ errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ goto end;
+ }
+ /*
+ * Clear again, to avoid getting stuck in the loop, if a new error
+ * is logged after we cleared the first timeout
+ */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ count++;
+ OSL_DELAY(10);
+ if ((10 * count) > AI_REG_READ_TIMEOUT) {
+ errlog_status = tmp;
+ break;
+ }
+ }
+
+ errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
+ errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
+ errlog_id = R_REG(sii->osh, &ai->errlogid);
+ errlog_flags = R_REG(sii->osh, &ai->errlogflags);
+
+ /* we are already in the error path, so OK to check for the slave error */
+ if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
+ errlog_status)) {
+ si_ignore_errlog_cnt++;
+ goto end;
+ }
+
+ /* only reset APB Bridge on timeout (not slave error, or dec error) */
+ switch (errlog_status & AIELS_ERROR_MASK) {
+ case AIELS_SLAVE_ERR:
+ SI_PRINT(("AXI slave error\n"));
+ ret |= AXI_WRAP_STS_SLAVE_ERR;
+ break;
+
+ case AIELS_TIMEOUT:
+ ai_reset_axi_to(sii, ai);
+ ret |= AXI_WRAP_STS_TIMEOUT;
+ break;
+
+ case AIELS_DECODE:
+ SI_PRINT(("AXI decode error\n"));
+#ifdef USE_HOSTMEM
+ /* Ignore known cases of CR4 prefetch abort bugs */
+ if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) !=
+ (BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID))
+#endif
+ {
+ ret |= AXI_WRAP_STS_DECODE_ERR;
+ }
+ break;
+ default:
+ ASSERT(0); /* should be impossible */
+ }
+
+ if (errlog_status & AIELS_MULTIPLE_ERRORS) {
+ SI_PRINT(("Multiple AXI Errors\n"));
+ /* Set multiple errors bit only if actual error is not ignored */
+ if (ret) {
+ ret |= AXI_WRAP_STS_MULTIPLE_ERRORS;
+ }
+ }
+
+ SI_PRINT(("\tCoreID: %x\n", coreid));
+ SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
+ ", status 0x%08x\n",
+ errlog_lo, errlog_hi, errlog_id, errlog_flags,
+ errlog_status));
+ }
+
+end:
+ if (ret != AXI_WRAP_STS_NONE) {
+ last_axi_error = ret;
+ last_axi_error_log_status = errlog_status;
+ last_axi_error_core = coreid;
+ last_axi_error_wrap = (uint32)ai;
+ last_axi_errlog_lo = errlog_lo;
+ last_axi_errlog_hi = errlog_hi;
+ last_axi_errlog_id = errlog_id;
+ }
+
+#if defined(AXI_TIMEOUTS_NIC)
+ if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
+ axi_error->error = ret;
+ axi_error->coreid = coreid;
+ axi_error->errlog_lo = errlog_lo;
+ axi_error->errlog_hi = errlog_hi;
+ axi_error->errlog_id = errlog_id;
+ axi_error->errlog_flags = errlog_flags;
+ axi_error->errlog_status = errlog_status;
+ sih->err_info->count++;
+
+ if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+ sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+ SI_PRINT(("AXI Error log overflow\n"));
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ if (restore_core) {
+ if (ai_setcoreidx(sih, current_coreidx) == NULL) {
+ /* Unable to set the core */
+ return ID32_INVALID;
+ }
+ }
+
+ return ret;
+}
+
+/* reset AXI timeout */
+static void
+BCMPOSTTRAPFN(ai_reset_axi_to)(const si_info_t *sii, aidmp_t *ai)
+{
+ /* reset APB Bridge */
+ OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ /* clear Reset bit */
+ AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ SI_PRINT(("AXI timeout\n"));
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
+ SI_PRINT(("reset failed on wrapper %p\n", ai));
+ g_disable_backplane_logs = TRUE;
+ }
+}
+
+void
+BCMPOSTTRAPFN(ai_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core,
+ uint32 *lo, uint32 *hi, uint32 *id)
+{
+ *error_status = last_axi_error_log_status;
+ *core = last_axi_error_core;
+ *lo = last_axi_errlog_lo;
+ *hi = last_axi_errlog_hi;
+ *id = last_axi_errlog_id;
+}
+
+/* Function to check whether AXI timeout has been registered on a core */
+uint32
+ai_get_axi_timeout_reg(void)
+{
+ return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0);
+}
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+uint32
+BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid)
+{
+ uint coreid = 0;
+ uint coreunit = 0;
+ const axi_to_coreidx_t *axi2coreidx = NULL;
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ axi2coreidx = axi2coreidx_4369;
+ break;
+ default:
+ SI_PRINT(("Chipid mapping not found\n"));
+ break;
+ }
+
+ if (!axi2coreidx)
+ return (BADIDX);
+
+ coreid = axi2coreidx[axiid].coreid;
+ coreunit = axi2coreidx[axiid].coreunit;
+
+ return si_findcoreidx(sih, coreid, coreunit);
+
+}
+
+/*
+ * This API polls all slave wrappers for errors and returns bit map of
+ * all reported errors.
+ * return - bit map of
+ * AXI_WRAP_STS_NONE
+ * AXI_WRAP_STS_TIMEOUT
+ * AXI_WRAP_STS_SLAVE_ERR
+ * AXI_WRAP_STS_DECODE_ERR
+ * AXI_WRAP_STS_PCI_RD_ERR
+ * AXI_WRAP_STS_WRAP_RD_ERR
+ * AXI_WRAP_STS_SET_CORE_FAIL
+ * On timeout detection, correspondign bridge will be reset to
+ * unblock the bus.
+ * Error reported in each wrapper can be retrieved using the API
+ * si_get_axi_errlog_info()
+ */
+uint32
+BCMPOSTTRAPFN(ai_clear_backplane_to)(si_t *sih)
+{
+ uint32 ret = 0;
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ uint32 i;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+#ifdef AXI_TIMEOUTS_NIC
+ uint32 prev_value = 0;
+ osl_t *osh = sii->osh;
+ uint32 cfg_reg = 0;
+ uint32 offset = 0;
+
+ if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
+#else
+ if (sii->axi_num_wrappers == 0)
+#endif
+ {
+ SI_VMSG(("ai_clear_backplane_to, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d,"
+ " ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return AXI_WRAP_STS_NONE;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Save and restore wrapper access window */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (PCIE_GEN1(sii)) {
+ cfg_reg = PCI_BAR0_WIN2;
+ offset = PCI_BAR0_WIN2_OFFSET;
+ } else if (PCIE_GEN2(sii)) {
+ cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+ offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+ }
+ else {
+ ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+
+ if (prev_value == ID32_INVALID) {
+ si_axi_error_t * axi_error =
+ sih->err_info ?
+ &sih->err_info->axi_error[sih->err_info->count] :
+ NULL;
+
+ SI_PRINT(("ai_clear_backplane_to, PCI_BAR0_WIN2 - %x\n", prev_value));
+ if (axi_error) {
+ axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
+ axi_error->errlog_lo = cfg_reg;
+ sih->err_info->count++;
+
+ if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+ sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+ SI_PRINT(("AXI Error log overflow\n"));
+ }
+ }
+
+ return ret;
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ for (i = 0; i < sii->axi_num_wrappers; ++i) {
+ uint32 tmp;
+
+ if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
+ continue;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
+ OSL_PCI_WRITE_CONFIG(osh,
+ cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+ /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
+ ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
+ }
+ else
+#endif /* AXI_TIMEOUTS_NIC */
+ {
+ ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+ }
+
+ tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
+ DISCARD_QUAL(ai, void));
+
+ ret |= tmp;
+ }
+
+#ifdef AXI_TIMEOUTS_NIC
+ /* Restore the initial wrapper space */
+ if (prev_value) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+ return ret;
+}
+
+uint
+ai_num_slaveports(const si_t *sih, uint coreidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint32 cib;
+
+ cib = cores_info->cib[coreidx];
+ return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
+}
+
+#ifdef UART_TRAP_DBG
+void
+ai_dump_APB_Bridge_registers(const si_t *sih)
+{
+ aidmp_t *ai;
+ const si_info_t *sii = SI_INFO(sih);
+
+ ai = (aidmp_t *)sii->br_wrapba[0];
+ printf("APB Bridge 0\n");
+ printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
+ R_REG(sii->osh, &ai->errlogaddrlo),
+ R_REG(sii->osh, &ai->errlogaddrhi),
+ R_REG(sii->osh, &ai->errlogid),
+ R_REG(sii->osh, &ai->errlogflags));
+ printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
+}
+#endif /* UART_TRAP_DBG */
+
+void
+ai_force_clocks(const si_t *sih, uint clock_state)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai, *ai_sec = NULL;
+ volatile uint32 dummy;
+ uint32 ioctrl;
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+ if (cores_info->wrapba2[sii->curidx])
+ ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+ if (clock_state == FORCE_CLK_ON) {
+ ioctrl = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ if (ai_sec) {
+ ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
+ W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
+ dummy = R_REG(sii->osh, &ai_sec->ioctrl);
+ BCM_REFERENCE(dummy);
+ }
+ } else {
+ ioctrl = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ if (ai_sec) {
+ ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
+ W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
+ dummy = R_REG(sii->osh, &ai_sec->ioctrl);
+ BCM_REFERENCE(dummy);
+ }
+ }
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+}
+
+#ifdef DONGLEBUILD
+/*
+ * this is not declared as static const, although that is the right thing to do
+ * reason being if declared as static const, compile/link process would that in
+ * read only section...
+ * currently this code/array is used to identify the registers which are dumped
+ * during trap processing
+ * and usually for the trap buffer, .rodata buffer is reused, so for now just static
+*/
+static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = {
+ OFFSETOF(aidmp_t, ioctrlset),
+ OFFSETOF(aidmp_t, ioctrlclear),
+ OFFSETOF(aidmp_t, ioctrl),
+ OFFSETOF(aidmp_t, iostatus),
+ OFFSETOF(aidmp_t, ioctrlwidth),
+ OFFSETOF(aidmp_t, iostatuswidth),
+ OFFSETOF(aidmp_t, resetctrl),
+ OFFSETOF(aidmp_t, resetstatus),
+ OFFSETOF(aidmp_t, resetreadid),
+ OFFSETOF(aidmp_t, resetwriteid),
+ OFFSETOF(aidmp_t, errlogctrl),
+ OFFSETOF(aidmp_t, errlogdone),
+ OFFSETOF(aidmp_t, errlogstatus),
+ OFFSETOF(aidmp_t, errlogaddrlo),
+ OFFSETOF(aidmp_t, errlogaddrhi),
+ OFFSETOF(aidmp_t, errlogid),
+ OFFSETOF(aidmp_t, errloguser),
+ OFFSETOF(aidmp_t, errlogflags),
+ OFFSETOF(aidmp_t, intstatus),
+ OFFSETOF(aidmp_t, config),
+ OFFSETOF(aidmp_t, itipoobaout),
+ OFFSETOF(aidmp_t, itipoobbout),
+ OFFSETOF(aidmp_t, itipoobcout),
+ OFFSETOF(aidmp_t, itipoobdout)};
+
+#ifdef ETD
+
+/* This is used for dumping wrapper registers for etd when axierror happens.
+ * This should match with the structure hnd_ext_trap_bp_err_t
+ */
+static uint32 BCMPOST_TRAP_RODATA(etd_wrapper_offsets_axierr)[] = {
+ OFFSETOF(aidmp_t, ioctrl),
+ OFFSETOF(aidmp_t, iostatus),
+ OFFSETOF(aidmp_t, resetctrl),
+ OFFSETOF(aidmp_t, resetstatus),
+ OFFSETOF(aidmp_t, resetreadid),
+ OFFSETOF(aidmp_t, resetwriteid),
+ OFFSETOF(aidmp_t, errlogctrl),
+ OFFSETOF(aidmp_t, errlogdone),
+ OFFSETOF(aidmp_t, errlogstatus),
+ OFFSETOF(aidmp_t, errlogaddrlo),
+ OFFSETOF(aidmp_t, errlogaddrhi),
+ OFFSETOF(aidmp_t, errlogid),
+ OFFSETOF(aidmp_t, errloguser),
+ OFFSETOF(aidmp_t, errlogflags),
+ OFFSETOF(aidmp_t, itipoobaout),
+ OFFSETOF(aidmp_t, itipoobbout),
+ OFFSETOF(aidmp_t, itipoobcout),
+ OFFSETOF(aidmp_t, itipoobdout)};
+#endif /* ETD */
+
+/* wrapper function to access the global array wrapper_offsets_to_dump */
+static uint32
+BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)(void)
+{
+ return (sizeof(wrapper_offsets_to_dump));
+}
+
+static uint32
+BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)(uint32 **offset)
+{
+ uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump);
+
+ *offset = &wrapper_offsets_to_dump[0];
+ return arr_size;
+}
+
+uint32
+BCMATTACHFN(ai_wrapper_dump_buf_size)(const si_t *sih)
+{
+ uint32 buf_size = 0;
+ uint32 wrapper_count = 0;
+ const si_info_t *sii = SI_INFO(sih);
+
+ wrapper_count = sii->axi_num_wrappers;
+ if (wrapper_count == 0)
+ return 0;
+
+ /* cnt indicates how many registers, tag_id 0 will say these are address/value */
+ /* address/value pairs */
+ buf_size += 2 * (ai_get_sizeof_wrapper_offsets_to_dump() * wrapper_count);
+
+ return buf_size;
+}
+
+static uint32*
+BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)(const si_info_t *sii, uint32 *p32, uint32 wrap_ba)
+{
+ uint i;
+ uint32 *addr;
+ uint32 arr_size;
+ uint32 *offset_base;
+
+ arr_size = ai_get_wrapper_base_addr(&offset_base);
+
+ for (i = 0; i < arr_size; i++) {
+ addr = (uint32 *)(wrap_ba + *(offset_base + i));
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(sii->osh, addr);
+ }
+ return p32;
+}
+
+#if defined(ETD)
+static uint32
+BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)(uint32 **offset)
+{
+ uint32 arr_size = ARRAYSIZE(etd_wrapper_offsets_axierr);
+
+ *offset = &etd_wrapper_offsets_axierr[0];
+ return arr_size;
+}
+
+uint32
+BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core,
+ uint32 *ba, uchar *p)
+{
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ uint32 *p32;
+ uint32 wrap_ba = last_axi_error_wrap;
+ uint i;
+ uint32 *addr;
+
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (last_axi_error != AXI_WRAP_STS_NONE)
+ {
+ if (wrap_ba)
+ {
+ p32 = (uint32 *)p;
+ uint32 arr_size;
+ uint32 *offset_base;
+
+ arr_size = ai_get_wrapper_base_addr_etd_axierr(&offset_base);
+ for (i = 0; i < arr_size; i++) {
+ addr = (uint32 *)(wrap_ba + *(offset_base + i));
+ *p32++ = R_REG(sii->osh, addr);
+ }
+ }
+ *error = last_axi_error;
+ *core = last_axi_error_core;
+ *ba = wrap_ba;
+ }
+#else
+ *error = 0;
+ *core = 0;
+ *ba = 0;
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+ return 0;
+}
+#endif /* ETD */
+
+uint32
+BCMPOSTTRAPFN(ai_wrapper_dump_binary)(const si_t *sih, uchar *p)
+{
+ uint32 *p32 = (uint32 *)p;
+ uint32 i;
+ const si_info_t *sii = SI_INFO(sih);
+
+ for (i = 0; i < sii->axi_num_wrappers; i++) {
+ p32 = ai_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr);
+ }
+ return 0;
+}
+
+bool
+BCMPOSTTRAPFN(ai_check_enable_backplane_log)(const si_t *sih)
+{
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ if (g_disable_backplane_logs) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+#else /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+ return FALSE;
+#endif /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+}
+#endif /* DONGLEBUILD */
diff --git a/bcmdhd.101.10.361.x/bcm_app_utils.c b/bcmdhd.101.10.361.x/bcm_app_utils.c
new file mode 100755
index 0000000..62d0507
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcm_app_utils.c
@@ -0,0 +1,1276 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else /* BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> /* For wlexe/Makefile.wlm_dll */
+#endif
+
+#include <bcmutils.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+
+#ifndef BCMDRIVER
+/* Take an array of measurments representing a single channel over time and return
+ a summary. Currently implemented as a simple average but could easily evolve
+ into more cpomplex alogrithms.
+*/
+cca_congest_channel_req_t *
+cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent)
+{
+ int sec;
+ cca_congest_t totals;
+
+ totals.duration = 0;
+ totals.congest_ibss = 0;
+ totals.congest_obss = 0;
+ totals.interference = 0;
+ avg->num_secs = 0;
+
+ for (sec = 0; sec < input->num_secs; sec++) {
+ if (input->secs[sec].duration) {
+ totals.duration += input->secs[sec].duration;
+ totals.congest_ibss += input->secs[sec].congest_ibss;
+ totals.congest_obss += input->secs[sec].congest_obss;
+ totals.interference += input->secs[sec].interference;
+ avg->num_secs++;
+ }
+ }
+ avg->chanspec = input->chanspec;
+
+ if (!avg->num_secs || !totals.duration)
+ return (avg);
+
+ if (percent) {
+ avg->secs[0].duration = totals.duration / avg->num_secs;
+ avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration;
+ avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration;
+ avg->secs[0].interference = totals.interference * 100/totals.duration;
+ } else {
+ avg->secs[0].duration = totals.duration / avg->num_secs;
+ avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs;
+ avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs;
+ avg->secs[0].interference = totals.interference / avg->num_secs;
+ }
+
+ return (avg);
+}
+
+static void
+cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos)
+{
+ int i;
+ for (*left = 0, i = 0; i < num_bits; i++) {
+ if (isset(bitmap, i)) {
+ (*left)++;
+ *bit_pos = i;
+ }
+ }
+}
+
+static uint8
+spec_to_chan(chanspec_t chspec)
+{
+ uint8 center_ch, edge, primary, sb;
+
+ center_ch = CHSPEC_CHANNEL(chspec);
+
+ if (CHSPEC_IS20(chspec)) {
+ return center_ch;
+ } else {
+ /* the lower edge of the wide channel is half the bw from
+ * the center channel.
+ */
+ if (CHSPEC_IS40(chspec)) {
+ edge = center_ch - CH_20MHZ_APART;
+ } else {
+ /* must be 80MHz (until we support more) */
+ ASSERT(CHSPEC_IS80(chspec));
+ edge = center_ch - CH_40MHZ_APART;
+ }
+
+ /* find the channel number of the lowest 20MHz primary channel */
+ primary = edge + CH_10MHZ_APART;
+
+ /* select the actual subband */
+ sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT;
+ primary = primary + sb * CH_20MHZ_APART;
+
+ return primary;
+ }
+}
+
+/*
+ Take an array of measumrements representing summaries of different channels.
+ Return a recomended channel.
+ Interference is evil, get rid of that first.
+ Then hunt for lowest Other bss traffic.
+ Don't forget that channels with low duration times may not have accurate readings.
+ For the moment, do not overwrite input array.
+*/
+int
+cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer)
+{
+ uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */
+ int i, left, winner, ret_val = 0;
+ uint32 min_obss = 1 << 30;
+ uint bitmap_sz;
+
+ bitmap_sz = CEIL(num_chans, NBBY);
+ bitmap = (uint8 *)malloc(bitmap_sz);
+ if (bitmap == NULL) {
+ printf("unable to allocate memory\n");
+ return BCME_NOMEM;
+ }
+
+ memset(bitmap, 0, bitmap_sz);
+ /* Initially, all channels are up for consideration */
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->chanspec)
+ setbit(bitmap, i);
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_TOO_FEW;
+ goto f_exit;
+ }
+
+ /* Filter for 2.4 GHz Band */
+ if (flags & CCA_FLAG_2G_ONLY) {
+ for (i = 0; i < num_chans; i++) {
+ if (!CHSPEC_IS2G(input[i]->chanspec))
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_BAND;
+ goto f_exit;
+ }
+
+ /* Filter for 5 GHz Band */
+ if (flags & CCA_FLAG_5G_ONLY) {
+ for (i = 0; i < num_chans; i++) {
+ if (!CHSPEC_IS5G(input[i]->chanspec))
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_BAND;
+ goto f_exit;
+ }
+
+ /* Filter for Duration */
+ if (!(flags & CCA_FLAG_IGNORE_DURATION)) {
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->secs[0].duration < CCA_THRESH_MILLI)
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_DURATION;
+ goto f_exit;
+ }
+
+ /* Filter for 1 6 11 on 2.4 Band */
+ if (flags & CCA_FLAGS_PREFER_1_6_11) {
+ int tmp_channel = spec_to_chan(input[i]->chanspec);
+ int is2g = CHSPEC_IS2G(input[i]->chanspec);
+ for (i = 0; i < num_chans; i++) {
+ if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11)
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_PREF_CHAN;
+ goto f_exit;
+ }
+
+ /* Toss high interference interference */
+ if (!(flags & CCA_FLAG_IGNORE_INTERFER)) {
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE)
+ clrbit(bitmap, i);
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_INTERFER;
+ goto f_exit;
+ }
+ }
+
+ /* Now find lowest obss */
+ winner = 0;
+ for (i = 0; i < num_chans; i++) {
+ if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) {
+ winner = i;
+ min_obss = input[i]->secs[0].congest_obss;
+ }
+ }
+ *answer = input[winner]->chanspec;
+ f_exit:
+ free(bitmap); /* free the allocated memory for bitmap */
+ return ret_val;
+}
+#endif /* !BCMDRIVER */
+
+/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */
+#define IDX_IN_WL_CNT_VER_6_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32))
+
+#define IDX_IN_WL_CNT_VER_7_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_7_t, cntmember) - OFFSETOF(wl_cnt_ver_7_t, txframe)) / sizeof(uint32))
+
+#define IDX_IN_WL_CNT_VER_11_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \
+ / sizeof(uint32))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \
+ ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \
+ (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_7_T \
+ ((sizeof(wl_cnt_ver_7_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \
+ ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude 64 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \
+ ((sizeof(wl_cnt_wlc_t)) / sizeof(uint32))
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */
+static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = {
+ IDX_IN_WL_CNT_VER_6_T(txframe),
+ IDX_IN_WL_CNT_VER_6_T(txbyte),
+ IDX_IN_WL_CNT_VER_6_T(txretrans),
+ IDX_IN_WL_CNT_VER_6_T(txerror),
+ IDX_IN_WL_CNT_VER_6_T(txctl),
+ IDX_IN_WL_CNT_VER_6_T(txprshort),
+ IDX_IN_WL_CNT_VER_6_T(txserr),
+ IDX_IN_WL_CNT_VER_6_T(txnobuf),
+ IDX_IN_WL_CNT_VER_6_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_6_T(txrunt),
+ IDX_IN_WL_CNT_VER_6_T(txchit),
+ IDX_IN_WL_CNT_VER_6_T(txcmiss),
+ IDX_IN_WL_CNT_VER_6_T(txuflo),
+ IDX_IN_WL_CNT_VER_6_T(txphyerr),
+ IDX_IN_WL_CNT_VER_6_T(txphycrs),
+ IDX_IN_WL_CNT_VER_6_T(rxframe),
+ IDX_IN_WL_CNT_VER_6_T(rxbyte),
+ IDX_IN_WL_CNT_VER_6_T(rxerror),
+ IDX_IN_WL_CNT_VER_6_T(rxctl),
+ IDX_IN_WL_CNT_VER_6_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_6_T(rxnondata),
+ IDX_IN_WL_CNT_VER_6_T(rxbadds),
+ IDX_IN_WL_CNT_VER_6_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_6_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_6_T(rxrunt),
+ IDX_IN_WL_CNT_VER_6_T(rxgiant),
+ IDX_IN_WL_CNT_VER_6_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_6_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_6_T(rxbadda),
+ IDX_IN_WL_CNT_VER_6_T(rxfilter),
+ IDX_IN_WL_CNT_VER_6_T(rxoflo),
+ IDX_IN_WL_CNT_VER_6_T(rxuflo),
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_6_T(dmade),
+ IDX_IN_WL_CNT_VER_6_T(dmada),
+ IDX_IN_WL_CNT_VER_6_T(dmape),
+ IDX_IN_WL_CNT_VER_6_T(reset),
+ IDX_IN_WL_CNT_VER_6_T(tbtt),
+ IDX_IN_WL_CNT_VER_6_T(txdmawar),
+ IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_6_T(txfrag),
+ IDX_IN_WL_CNT_VER_6_T(txmulti),
+ IDX_IN_WL_CNT_VER_6_T(txfail),
+ IDX_IN_WL_CNT_VER_6_T(txretry),
+ IDX_IN_WL_CNT_VER_6_T(txretrie),
+ IDX_IN_WL_CNT_VER_6_T(rxdup),
+ IDX_IN_WL_CNT_VER_6_T(txrts),
+ IDX_IN_WL_CNT_VER_6_T(txnocts),
+ IDX_IN_WL_CNT_VER_6_T(txnoack),
+ IDX_IN_WL_CNT_VER_6_T(rxfrag),
+ IDX_IN_WL_CNT_VER_6_T(rxmulti),
+ IDX_IN_WL_CNT_VER_6_T(rxcrc),
+ IDX_IN_WL_CNT_VER_6_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_6_T(rxundec),
+ IDX_IN_WL_CNT_VER_6_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_6_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_6_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_6_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_6_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_6_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_6_T(wepundec),
+ IDX_IN_WL_CNT_VER_6_T(wepicverr),
+ IDX_IN_WL_CNT_VER_6_T(decsuccess),
+ IDX_IN_WL_CNT_VER_6_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_6_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_6_T(txchanrej),
+ IDX_IN_WL_CNT_VER_6_T(psmwds),
+ IDX_IN_WL_CNT_VER_6_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_6_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_6_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_6_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_6_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_6_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_6_T(rfdisable),
+ IDX_IN_WL_CNT_VER_6_T(txexptime),
+ IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_6_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst)
+};
+
+#define INVALID_IDX ((uint8)(-1))
+
+/* Index conversion table from wl_cnt_ver_7_t to wl_cnt_wlc_t */
+static const uint8 wlcntver7t_to_wlcntwlct[] = {
+ IDX_IN_WL_CNT_VER_7_T(txframe),
+ IDX_IN_WL_CNT_VER_7_T(txbyte),
+ IDX_IN_WL_CNT_VER_7_T(txretrans),
+ IDX_IN_WL_CNT_VER_7_T(txerror),
+ IDX_IN_WL_CNT_VER_7_T(txctl),
+ IDX_IN_WL_CNT_VER_7_T(txprshort),
+ IDX_IN_WL_CNT_VER_7_T(txserr),
+ IDX_IN_WL_CNT_VER_7_T(txnobuf),
+ IDX_IN_WL_CNT_VER_7_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_7_T(txrunt),
+ IDX_IN_WL_CNT_VER_7_T(txchit),
+ IDX_IN_WL_CNT_VER_7_T(txcmiss),
+ IDX_IN_WL_CNT_VER_7_T(txuflo),
+ IDX_IN_WL_CNT_VER_7_T(txphyerr),
+ IDX_IN_WL_CNT_VER_7_T(txphycrs),
+ IDX_IN_WL_CNT_VER_7_T(rxframe),
+ IDX_IN_WL_CNT_VER_7_T(rxbyte),
+ IDX_IN_WL_CNT_VER_7_T(rxerror),
+ IDX_IN_WL_CNT_VER_7_T(rxctl),
+ IDX_IN_WL_CNT_VER_7_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_7_T(rxnondata),
+ IDX_IN_WL_CNT_VER_7_T(rxbadds),
+ IDX_IN_WL_CNT_VER_7_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_7_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_7_T(rxrunt),
+ IDX_IN_WL_CNT_VER_7_T(rxgiant),
+ IDX_IN_WL_CNT_VER_7_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_7_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_7_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_7_T(rxbadda),
+ IDX_IN_WL_CNT_VER_7_T(rxfilter),
+ IDX_IN_WL_CNT_VER_7_T(rxoflo),
+ IDX_IN_WL_CNT_VER_7_T(rxuflo),
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_7_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_7_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_7_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_7_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_7_T(dmade),
+ IDX_IN_WL_CNT_VER_7_T(dmada),
+ IDX_IN_WL_CNT_VER_7_T(dmape),
+ IDX_IN_WL_CNT_VER_7_T(reset),
+ IDX_IN_WL_CNT_VER_7_T(tbtt),
+ IDX_IN_WL_CNT_VER_7_T(txdmawar),
+ IDX_IN_WL_CNT_VER_7_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_7_T(txfrag),
+ IDX_IN_WL_CNT_VER_7_T(txmulti),
+ IDX_IN_WL_CNT_VER_7_T(txfail),
+ IDX_IN_WL_CNT_VER_7_T(txretry),
+ IDX_IN_WL_CNT_VER_7_T(txretrie),
+ IDX_IN_WL_CNT_VER_7_T(rxdup),
+ IDX_IN_WL_CNT_VER_7_T(txrts),
+ IDX_IN_WL_CNT_VER_7_T(txnocts),
+ IDX_IN_WL_CNT_VER_7_T(txnoack),
+ IDX_IN_WL_CNT_VER_7_T(rxfrag),
+ IDX_IN_WL_CNT_VER_7_T(rxmulti),
+ IDX_IN_WL_CNT_VER_7_T(rxcrc),
+ IDX_IN_WL_CNT_VER_7_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_7_T(rxundec),
+ IDX_IN_WL_CNT_VER_7_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_7_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_7_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_7_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_7_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_7_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_7_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_7_T(wepundec),
+ IDX_IN_WL_CNT_VER_7_T(wepicverr),
+ IDX_IN_WL_CNT_VER_7_T(decsuccess),
+ IDX_IN_WL_CNT_VER_7_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_7_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_7_T(txchanrej),
+ IDX_IN_WL_CNT_VER_7_T(psmwds),
+ IDX_IN_WL_CNT_VER_7_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_7_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_7_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_7_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_7_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_7_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_7_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_7_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_7_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_7_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_7_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_7_T(rfdisable),
+ IDX_IN_WL_CNT_VER_7_T(txexptime),
+ IDX_IN_WL_CNT_VER_7_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_7_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_7_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_7_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_7_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_7_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_7_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_7_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_7_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_7_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_7_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_7_T(wepexcluded_mcst),
+ IDX_IN_WL_CNT_VER_7_T(dma_hang),
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ IDX_IN_WL_CNT_VER_7_T(rxrtry)
+};
+
+/* Max wl_cnt_wlc_t fields including rxrtry */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_7_T \
+ (sizeof(wlcntver7t_to_wlcntwlct) / sizeof(uint8))
+
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */
+static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = {
+ IDX_IN_WL_CNT_VER_11_T(txframe),
+ IDX_IN_WL_CNT_VER_11_T(txbyte),
+ IDX_IN_WL_CNT_VER_11_T(txretrans),
+ IDX_IN_WL_CNT_VER_11_T(txerror),
+ IDX_IN_WL_CNT_VER_11_T(txctl),
+ IDX_IN_WL_CNT_VER_11_T(txprshort),
+ IDX_IN_WL_CNT_VER_11_T(txserr),
+ IDX_IN_WL_CNT_VER_11_T(txnobuf),
+ IDX_IN_WL_CNT_VER_11_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_11_T(txrunt),
+ IDX_IN_WL_CNT_VER_11_T(txchit),
+ IDX_IN_WL_CNT_VER_11_T(txcmiss),
+ IDX_IN_WL_CNT_VER_11_T(txuflo),
+ IDX_IN_WL_CNT_VER_11_T(txphyerr),
+ IDX_IN_WL_CNT_VER_11_T(txphycrs),
+ IDX_IN_WL_CNT_VER_11_T(rxframe),
+ IDX_IN_WL_CNT_VER_11_T(rxbyte),
+ IDX_IN_WL_CNT_VER_11_T(rxerror),
+ IDX_IN_WL_CNT_VER_11_T(rxctl),
+ IDX_IN_WL_CNT_VER_11_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_11_T(rxnondata),
+ IDX_IN_WL_CNT_VER_11_T(rxbadds),
+ IDX_IN_WL_CNT_VER_11_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_11_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_11_T(rxrunt),
+ IDX_IN_WL_CNT_VER_11_T(rxgiant),
+ IDX_IN_WL_CNT_VER_11_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_11_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_11_T(rxbadda),
+ IDX_IN_WL_CNT_VER_11_T(rxfilter),
+ IDX_IN_WL_CNT_VER_11_T(rxoflo),
+ IDX_IN_WL_CNT_VER_11_T(rxuflo),
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_11_T(dmade),
+ IDX_IN_WL_CNT_VER_11_T(dmada),
+ IDX_IN_WL_CNT_VER_11_T(dmape),
+ IDX_IN_WL_CNT_VER_11_T(reset),
+ IDX_IN_WL_CNT_VER_11_T(tbtt),
+ IDX_IN_WL_CNT_VER_11_T(txdmawar),
+ IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_11_T(txfrag),
+ IDX_IN_WL_CNT_VER_11_T(txmulti),
+ IDX_IN_WL_CNT_VER_11_T(txfail),
+ IDX_IN_WL_CNT_VER_11_T(txretry),
+ IDX_IN_WL_CNT_VER_11_T(txretrie),
+ IDX_IN_WL_CNT_VER_11_T(rxdup),
+ IDX_IN_WL_CNT_VER_11_T(txrts),
+ IDX_IN_WL_CNT_VER_11_T(txnocts),
+ IDX_IN_WL_CNT_VER_11_T(txnoack),
+ IDX_IN_WL_CNT_VER_11_T(rxfrag),
+ IDX_IN_WL_CNT_VER_11_T(rxmulti),
+ IDX_IN_WL_CNT_VER_11_T(rxcrc),
+ IDX_IN_WL_CNT_VER_11_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_11_T(rxundec),
+ IDX_IN_WL_CNT_VER_11_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_11_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_11_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_11_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_11_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_11_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_11_T(wepundec),
+ IDX_IN_WL_CNT_VER_11_T(wepicverr),
+ IDX_IN_WL_CNT_VER_11_T(decsuccess),
+ IDX_IN_WL_CNT_VER_11_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_11_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_11_T(txchanrej),
+ IDX_IN_WL_CNT_VER_11_T(psmwds),
+ IDX_IN_WL_CNT_VER_11_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_11_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_11_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_11_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_11_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_11_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_11_T(rfdisable),
+ IDX_IN_WL_CNT_VER_11_T(txexptime),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_11_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst),
+ IDX_IN_WL_CNT_VER_11_T(dma_hang),
+ IDX_IN_WL_CNT_VER_11_T(reinit),
+ IDX_IN_WL_CNT_VER_11_T(pstatxucast),
+ IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc),
+ IDX_IN_WL_CNT_VER_11_T(pstarxucast),
+ IDX_IN_WL_CNT_VER_11_T(pstarxbcmc),
+ IDX_IN_WL_CNT_VER_11_T(pstatxbcmc),
+ IDX_IN_WL_CNT_VER_11_T(cso_passthrough),
+ IDX_IN_WL_CNT_VER_11_T(cso_normal),
+ IDX_IN_WL_CNT_VER_11_T(chained),
+ IDX_IN_WL_CNT_VER_11_T(chainedsz1),
+ IDX_IN_WL_CNT_VER_11_T(unchained),
+ IDX_IN_WL_CNT_VER_11_T(maxchainsz),
+ IDX_IN_WL_CNT_VER_11_T(currchainsz),
+ IDX_IN_WL_CNT_VER_11_T(pciereset),
+ IDX_IN_WL_CNT_VER_11_T(cfgrestore),
+ IDX_IN_WL_CNT_VER_11_T(reinitreason),
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7,
+ IDX_IN_WL_CNT_VER_11_T(rxrtry),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu),
+ IDX_IN_WL_CNT_VER_11_T(txbar),
+ IDX_IN_WL_CNT_VER_11_T(rxbar),
+ IDX_IN_WL_CNT_VER_11_T(txpspoll),
+ IDX_IN_WL_CNT_VER_11_T(rxpspoll),
+ IDX_IN_WL_CNT_VER_11_T(txnull),
+ IDX_IN_WL_CNT_VER_11_T(rxnull),
+ IDX_IN_WL_CNT_VER_11_T(txqosnull),
+ IDX_IN_WL_CNT_VER_11_T(rxqosnull),
+ IDX_IN_WL_CNT_VER_11_T(txassocreq),
+ IDX_IN_WL_CNT_VER_11_T(rxassocreq),
+ IDX_IN_WL_CNT_VER_11_T(txreassocreq),
+ IDX_IN_WL_CNT_VER_11_T(rxreassocreq),
+ IDX_IN_WL_CNT_VER_11_T(txdisassoc),
+ IDX_IN_WL_CNT_VER_11_T(rxdisassoc),
+ IDX_IN_WL_CNT_VER_11_T(txassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(rxassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(txreassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(rxreassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(txauth),
+ IDX_IN_WL_CNT_VER_11_T(rxauth),
+ IDX_IN_WL_CNT_VER_11_T(txdeauth),
+ IDX_IN_WL_CNT_VER_11_T(rxdeauth),
+ IDX_IN_WL_CNT_VER_11_T(txprobereq),
+ IDX_IN_WL_CNT_VER_11_T(rxprobereq),
+ IDX_IN_WL_CNT_VER_11_T(txprobersp),
+ IDX_IN_WL_CNT_VER_11_T(rxprobersp),
+ IDX_IN_WL_CNT_VER_11_T(txaction),
+ IDX_IN_WL_CNT_VER_11_T(rxaction),
+ IDX_IN_WL_CNT_VER_11_T(ampdu_wds),
+ IDX_IN_WL_CNT_VER_11_T(txlost),
+ IDX_IN_WL_CNT_VER_11_T(txdatamcast),
+ IDX_IN_WL_CNT_VER_11_T(txdatabcast),
+ INVALID_IDX,
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ INVALID_IDX,
+ IDX_IN_WL_CNT_VER_11_T(txbcast),
+ IDX_IN_WL_CNT_VER_11_T(txdropped),
+ IDX_IN_WL_CNT_VER_11_T(rxbcast),
+ IDX_IN_WL_CNT_VER_11_T(rxdropped)
+};
+
+/* Index conversion table from wl_cnt_ver_11_t to
+ * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t
+ */
+static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_11_T(txallfrm),
+ IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txackfrm),
+ IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_11_T(txfbw),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu),
+ IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_11_T(txphyerror),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxstrt),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxackucast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+ IDX_IN_WL_CNT_VER_11_T(rxnodelim),
+ IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_11_T(rxnack),
+ IDX_IN_WL_CNT_VER_11_T(frmscons),
+ IDX_IN_WL_CNT_VER_11_T(txnack),
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+/* For mcst offsets that were not used. (2 Pads) */
+#define INVALID_MCST_IDX ((uint8)(-1))
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_11_T(txallfrm),
+ IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txackfrm),
+ IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_11_T(txfbw),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_11_T(txphyerror),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxstrt),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxackucast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_11_T(rxnack),
+ IDX_IN_WL_CNT_VER_11_T(frmscons),
+ IDX_IN_WL_CNT_VER_11_T(txnack),
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_6_T(txallfrm),
+ IDX_IN_WL_CNT_VER_6_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_6_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_6_T(txackfrm),
+ IDX_IN_WL_CNT_VER_6_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_6_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_6_T(txfunfl),
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_6_T(txfbw),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_6_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_6_T(txphyerror),
+ IDX_IN_WL_CNT_VER_6_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_6_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_6_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_6_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_6_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_6_T(rxstrt),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_6_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_6_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_6_T(rxackucast),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_6_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_6_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_6_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_6_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_6_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_6_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_6_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_6_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_6_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_6_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_6_T(rxnack),
+ IDX_IN_WL_CNT_VER_6_T(frmscons),
+ IDX_IN_WL_CNT_VER_6_T(txnack),
+ IDX_IN_WL_CNT_VER_6_T(rxback),
+ IDX_IN_WL_CNT_VER_6_T(txback),
+ IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_6_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_6_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_6_T(bphy_badplcp)
+};
+
+/* Index conversion table from wl_cnt_ver_7_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver7t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_7_T(txallfrm),
+ IDX_IN_WL_CNT_VER_7_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_7_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_7_T(txackfrm),
+ IDX_IN_WL_CNT_VER_7_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_7_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_7_T(txfunfl),
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_7_T(txfunfl) + 5,
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_7_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_7_T(txphyerror),
+ IDX_IN_WL_CNT_VER_7_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_7_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_7_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_7_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_7_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_7_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_7_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_7_T(rxstrt),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_7_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_7_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_7_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_7_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_7_T(rxackucast),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_7_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_7_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_7_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_7_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_7_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_7_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_7_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_7_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_7_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_7_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_7_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_7_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_7_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_7_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_7_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_7_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_7_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_7_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_7_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_7_T(rxnack),
+ IDX_IN_WL_CNT_VER_7_T(frmscons),
+ IDX_IN_WL_CNT_VER_7_T(txnack),
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_7_T(bphy_rxcrsglitch),
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX,
+ INVALID_MCST_IDX
+};
+
+/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */
+static int
+wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx)
+{
+ uint i;
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* Init wlccnt with invalid value. Unchanged value will not be printed out */
+ for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) {
+ dst[i] = INVALID_CNT_VAL;
+ }
+
+ if (cntver == WL_CNT_VERSION_6) {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) {
+ if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) {
+ /* src buffer does not have counters from here */
+ break;
+ }
+ dst[i] = src[wlcntver6t_to_wlcntwlct[i]];
+ }
+ } else if (cntver == WL_CNT_VERSION_7) {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_7_T; i++) {
+ if (wlcntver7t_to_wlcntwlct[i] >= src_max_idx ||
+ wlcntver7t_to_wlcntwlct[i] == INVALID_IDX) {
+ continue;
+ }
+ dst[i] = src[wlcntver7t_to_wlcntwlct[i]];
+ }
+ } else {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) {
+ if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) {
+ if (wlcntver11t_to_wlcntwlct[i] == INVALID_IDX) {
+ continue;
+ }
+ else {
+ /* src buffer does not have counters from here */
+ break;
+ }
+ }
+ dst[i] = src[wlcntver11t_to_wlcntwlct[i]];
+ }
+ }
+ return BCME_OK;
+}
+
+/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */
+static int
+wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src)
+{
+ uint i;
+
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (cntver == WL_CNT_VERSION_6) {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_6_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ } else if (cntver == WL_CNT_VERSION_7) {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver7t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_7_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver7t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ } else {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_11_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ }
+ return BCME_OK;
+}
+
+static int
+wl_copy_macstat_ver11(uint32 *dst, uint32 *src)
+{
+ uint i;
+
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]];
+ }
+ return BCME_OK;
+}
+
+/**
+ * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format.
+ * Parameters:
+ * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW.
+ * Newly translated xtlv format is written to this pointer.
+ * buflen: length of the "cntbuf" without any padding.
+ * corerev: chip core revision of the driver/FW.
+ */
+int
+wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev)
+{
+ wl_cnt_wlc_t *wlccnt = NULL;
+ uint32 *macstat = NULL;
+ xtlv_desc_t xtlv_desc[3];
+ uint16 mcst_xtlv_id;
+ int res = BCME_OK;
+ wl_cnt_info_t *cntinfo = cntbuf;
+ uint8 *xtlvbuf_p = cntinfo->data;
+ uint16 ver = cntinfo->version;
+ uint16 xtlvbuflen = (uint16)buflen;
+ uint16 src_max_idx;
+#ifdef BCMDRIVER
+ osl_t *osh = ctx;
+#else
+ BCM_REFERENCE(ctx);
+#endif
+
+ if (ver >= WL_CNT_VERSION_XTLV) {
+ /* Already in xtlv format. */
+ goto exit;
+ }
+
+#ifdef BCMDRIVER
+ wlccnt = MALLOC(osh, sizeof(*wlccnt));
+ macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ);
+#else
+ wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt));
+ macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ);
+#endif
+ if (!wlccnt || !macstat) {
+ printf("wl_cntbuf_to_xtlv_format: malloc fail!\n");
+ res = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* Check if the max idx in the struct exceeds the boundary of uint8 */
+ if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) ||
+ NUM_OF_CNT_IN_WL_CNT_VER_7_T > ((uint8)(-1) + 1) ||
+ NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) {
+ printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+ " to be of uint16 instead of uint8\n");
+ res = BCME_ERROR;
+ goto exit;
+ }
+
+ /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */
+ src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32);
+ if (src_max_idx > (uint8)(-1)) {
+ printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+ " to be of uint16 instead of uint8\n"
+ "Try updating wl utility to the latest.\n");
+ src_max_idx = (uint8)(-1);
+ }
+
+ /* Copy wlc layer counters to wl_cnt_wlc_t */
+ res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx);
+ if (res != BCME_OK) {
+ printf("wl_copy_wlccnt fail!\n");
+ goto exit;
+ }
+
+ /* Copy macstat counters to wl_cnt_wlc_t */
+ if (ver == WL_CNT_VERSION_11) {
+ res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data);
+ if (res != BCME_OK) {
+ printf("wl_copy_macstat_ver11 fail!\n");
+ goto exit;
+ }
+ if (corerev >= 40) {
+ mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1;
+ } else {
+ mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1;
+ }
+ } else {
+ res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data);
+ if (res != BCME_OK) {
+ printf("wl_copy_macstat_upto_ver10 fail!\n");
+ goto exit;
+ }
+ mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE;
+ }
+
+ xtlv_desc[0].type = WL_CNT_XTLV_WLC;
+ xtlv_desc[0].len = sizeof(*wlccnt);
+ xtlv_desc[0].ptr = wlccnt;
+
+ xtlv_desc[1].type = mcst_xtlv_id;
+ xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ;
+ xtlv_desc[1].ptr = macstat;
+
+ xtlv_desc[2].type = 0;
+ xtlv_desc[2].len = 0;
+ xtlv_desc[2].ptr = NULL;
+
+ memset(cntbuf, 0, buflen);
+
+ res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen,
+ xtlv_desc, BCM_XTLV_OPTION_ALIGN32);
+ cntinfo->datalen = (buflen - xtlvbuflen);
+exit:
+#ifdef BCMDRIVER
+ if (wlccnt) {
+ MFREE(osh, wlccnt, sizeof(*wlccnt));
+ }
+ if (macstat) {
+ MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ);
+ }
+#else
+ if (wlccnt) {
+ free(wlccnt);
+ }
+ if (macstat) {
+ free(macstat);
+ }
+#endif
+ return res;
+}
diff --git a/bcmdhd.101.10.361.x/bcm_l2_filter.c b/bcmdhd.101.10.361.x/bcm_l2_filter.c
new file mode 100755
index 0000000..5a5ca2c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcm_l2_filter.c
@@ -0,0 +1,766 @@
+/*
+ * L2 Filter handling functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <ethernet.h>
+#include <bcmip.h>
+#include <bcmipv6.h>
+#include <bcmudp.h>
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <bcmproto.h>
+#include <bcmdhcp.h>
+#include <802.11.h>
+#include <bcm_l2_filter.h>
+
+#ifdef BCMDBG_ERR
+#define L2_FILTER_ERROR(args) printf args
+#else
+#define L2_FILTER_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef BCMDBG_MSG
+#define L2_FILTER_MSG(args) printf args
+#else
+#define L2_FILTER_MSG(args)
+#endif /* BCMDBG_msg */
+
+struct arp_table {
+ parp_entry_t *parp_table[BCM_PARP_TABLE_SIZE]; /* proxyarp entries in cache table */
+ parp_entry_t *parp_candidate_list; /* proxyarp entries in candidate list */
+ uint8 parp_smac[ETHER_ADDR_LEN]; /* L2 SMAC from DHCP Req */
+ uint8 parp_cmac[ETHER_ADDR_LEN]; /* Bootp Client MAC from DHCP Req */
+};
+#ifdef DHD_DUMP_ARPTABLE
+void bcm_l2_parp_dump_table(arp_table_t* arp_tbl);
+
+void
+bcm_l2_parp_dump_table(arp_table_t* arp_tbl)
+{
+ parp_entry_t *entry;
+ uint16 idx, ip_len;
+ arp_table_t *ptable;
+ ip_len = IPV4_ADDR_LEN;
+ ptable = arp_tbl;
+ for (idx = 0; idx < BCM_PARP_TABLE_SIZE; idx++) {
+ entry = ptable->parp_table[idx];
+ while (entry) {
+ printf("Cached entries..\n");
+ printf("%d: %d.%d.%d.%d", idx, entry->ip.data[0], entry->ip.data[1],
+ entry->ip.data[2], entry->ip.data[3]);
+ printf("%02x:%02x:%02x:%02x:%02x:%02x", entry->ea.octet[0],
+ entry->ea.octet[1], entry->ea.octet[2], entry->ea.octet[3],
+ entry->ea.octet[4], entry->ea.octet[5]);
+ printf("\n");
+ entry = entry->next;
+ }
+ }
+ entry = ptable->parp_candidate_list;
+ while (entry) {
+ printf("Candidate entries..\n");
+ printf("%d.%d.%d.%d", entry->ip.data[0], entry->ip.data[1],
+ entry->ip.data[2], entry->ip.data[3]);
+ printf("%02x:%02x:%02x:%02x:%02x:%02x", entry->ea.octet[0],
+ entry->ea.octet[1], entry->ea.octet[2], entry->ea.octet[3],
+ entry->ea.octet[4], entry->ea.octet[5]);
+
+ printf("\n");
+ entry = entry->next;
+ }
+}
+#endif /* DHD_DUMP_ARPTABLE */
+
+arp_table_t* init_l2_filter_arp_table(osl_t* osh)
+{
+ return ((arp_table_t*)MALLOCZ(osh, sizeof(arp_table_t)));
+}
+
+void deinit_l2_filter_arp_table(osl_t* osh, arp_table_t* ptable)
+{
+ MFREE(osh, ptable, sizeof(arp_table_t));
+}
+/* returns 0 if gratuitous ARP or unsolicited neighbour advertisement */
+int
+bcm_l2_filter_gratuitous_arp(osl_t *osh, void *pktbuf)
+{
+ uint8 *frame = PKTDATA(osh, pktbuf);
+ uint16 ethertype;
+ int send_ip_offset, target_ip_offset;
+ int iplen;
+ int minlen;
+ uint8 *data;
+ int datalen;
+ bool snap;
+
+ if (get_pkt_ether_type(osh, pktbuf, &data, &datalen, &ethertype, &snap) != BCME_OK)
+ return BCME_ERROR;
+
+ if (!ETHER_ISBCAST(frame + ETHER_DEST_OFFSET) &&
+ bcmp(&ether_ipv6_mcast, frame + ETHER_DEST_OFFSET, sizeof(ether_ipv6_mcast))) {
+ return BCME_ERROR;
+ }
+
+ if (ethertype == ETHER_TYPE_ARP) {
+ L2_FILTER_MSG(("bcm_l2_filter_gratuitous_arp: ARP RX data : %p: datalen : %d\n",
+ data, datalen));
+ send_ip_offset = ARP_SRC_IP_OFFSET;
+ target_ip_offset = ARP_TGT_IP_OFFSET;
+ iplen = IPV4_ADDR_LEN;
+ minlen = ARP_DATA_LEN;
+ } else if (ethertype == ETHER_TYPE_IPV6) {
+ send_ip_offset = NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET;
+ target_ip_offset = NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET;
+ iplen = IPV6_ADDR_LEN;
+ minlen = target_ip_offset + iplen;
+
+ /* check for neighbour advertisement */
+ if (datalen >= minlen && (data[IPV6_NEXT_HDR_OFFSET] != IP_PROT_ICMP6 ||
+ data[NEIGHBOR_ADVERTISE_TYPE_OFFSET] != NEIGHBOR_ADVERTISE_TYPE))
+ return BCME_ERROR;
+
+ /* Dont drop Unsolicitated NA fm AP with allnode mcast dest addr (HS2-4.5.E) */
+ if (datalen >= minlen &&
+ (data[IPV6_NEXT_HDR_OFFSET] == IP_PROT_ICMP6) &&
+ (data[NEIGHBOR_ADVERTISE_TYPE_OFFSET] == NEIGHBOR_ADVERTISE_TYPE) &&
+ (data[NEIGHBOR_ADVERTISE_OPTION_OFFSET] == OPT_TYPE_TGT_LINK_ADDR)) {
+ L2_FILTER_MSG(("Unsolicitated Neighbour Advertisement from AP "
+ "with allnode mcast dest addr tx'ed (%d)\n", datalen));
+ return -1;
+ }
+
+ } else {
+ return BCME_ERROR;
+ }
+
+ if (datalen < minlen) {
+ L2_FILTER_MSG(("BCM: dhd_gratuitous_arp: truncated packet (%d)\n", datalen));
+ return BCME_ERROR;
+ }
+
+ if (bcmp(data + send_ip_offset, data + target_ip_offset, iplen) == 0) {
+ L2_FILTER_MSG((" returning BCME_OK in bcm_l2_filter_gratuitous_arp\n"));
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+int
+get_pkt_ether_type(osl_t *osh, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
+{
+ uint8 *frame = PKTDATA(osh, pktbuf);
+ int length = PKTLEN(osh, pktbuf);
+ uint8 *pt; /* Pointer to type field */
+ uint16 ethertype;
+ bool snap = FALSE;
+ /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+ if (length < ETHER_HDR_LEN) {
+ L2_FILTER_MSG(("BCM: get_pkt_ether_type: short eth frame (%d)\n",
+ length));
+ return BCME_ERROR;
+ } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
+ /* Frame is Ethernet II */
+ pt = frame + ETHER_TYPE_OFFSET;
+ } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+ !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+ pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+ snap = TRUE;
+ } else {
+ L2_FILTER_MSG((" get_pkt_ether_type: non-SNAP 802.3 frame\n"));
+ return BCME_ERROR;
+ }
+
+ ethertype = ntoh16_ua(pt);
+
+ /* Skip VLAN tag, if any */
+ if (ethertype == ETHER_TYPE_8021Q) {
+ pt += VLAN_TAG_LEN;
+
+ if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
+ L2_FILTER_MSG(("BCM: get_pkt_ether_type: short VLAN frame (%d)\n",
+ length));
+ return BCME_ERROR;
+ }
+ ethertype = ntoh16_ua(pt);
+ }
+ *data_ptr = pt + ETHER_TYPE_LEN;
+ *len_ptr = length - (int32)(pt + ETHER_TYPE_LEN - frame);
+ *et_ptr = ethertype;
+ *snap_ptr = snap;
+ return BCME_OK;
+}
+
+int
+get_pkt_ip_type(osl_t *osh, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
+{
+ struct ipv4_hdr *iph; /* IP frame pointer */
+ int iplen; /* IP frame length */
+ uint16 ethertype, iphdrlen, ippktlen;
+ uint16 iph_frag;
+ uint8 prot;
+ bool snap;
+
+ if (get_pkt_ether_type(osh, pktbuf, (uint8 **)&iph,
+ &iplen, &ethertype, &snap) != 0)
+ return BCME_ERROR;
+
+ if (ethertype != ETHER_TYPE_IP) {
+ return BCME_ERROR;
+ }
+
+ /* We support IPv4 only */
+ if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
+ return BCME_ERROR;
+ }
+
+ /* Header length sanity */
+ iphdrlen = IPV4_HLEN(iph);
+
+ /*
+ * Packet length sanity; sometimes we receive eth-frame size bigger
+ * than the IP content, which results in a bad tcp chksum
+ */
+ ippktlen = ntoh16(iph->tot_len);
+ if (ippktlen < iplen) {
+ L2_FILTER_MSG(("get_pkt_ip_type: extra frame length ignored\n"));
+ iplen = ippktlen;
+ } else if (ippktlen > iplen) {
+ L2_FILTER_MSG(("get_pkt_ip_type: truncated IP packet (%d)\n",
+ ippktlen - iplen));
+ return BCME_ERROR;
+ }
+
+ if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
+ L2_FILTER_ERROR((" get_pkt_ip_type: IP-header-len (%d) out of range (%d-%d)\n",
+ iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
+ return BCME_ERROR;
+ }
+
+ /*
+ * We don't handle fragmented IP packets. A first frag is indicated by the MF
+ * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
+ */
+ iph_frag = ntoh16(iph->frag);
+
+ if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
+ L2_FILTER_ERROR(("get_pkt_ip_type: IP fragment not handled\n"));
+ return BCME_ERROR;
+ }
+ prot = IPV4_PROT(iph);
+ *data_ptr = (((uint8 *)iph) + iphdrlen);
+ *len_ptr = iplen - iphdrlen;
+ *prot_ptr = prot;
+ return BCME_OK;
+}
+
+/* Check if packet type is ICMP ECHO */
+int bcm_l2_filter_block_ping(osl_t *osh, void *pktbuf)
+{
+ struct bcmicmp_hdr *icmph;
+ int udpl;
+ uint8 prot;
+
+ if (get_pkt_ip_type(osh, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
+ return BCME_ERROR;
+ if (prot == IP_PROT_ICMP) {
+ if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
+ return BCME_OK;
+ }
+ return BCME_ERROR;
+}
+
+int bcm_l2_filter_get_mac_addr_dhcp_pkt(osl_t *osh, void *pktbuf,
+ int ifidx, uint8** mac_addr)
+{
+ uint8 *eh = PKTDATA(osh, pktbuf);
+ uint8 *udph;
+ uint8 *dhcp;
+ int udpl;
+ int dhcpl;
+ uint16 port;
+ uint8 prot;
+
+ if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
+ return BCME_ERROR;
+ if (get_pkt_ip_type(osh, pktbuf, &udph, &udpl, &prot) != 0)
+ return BCME_ERROR;
+ if (prot != IP_PROT_UDP)
+ return BCME_ERROR;
+ /* check frame length, at least UDP_HDR_LEN */
+ if (udpl < UDP_HDR_LEN) {
+ L2_FILTER_MSG(("BCM: bcm_l2_filter_get_mac_addr_dhcp_pkt: short UDP frame,"
+ " ignored\n"));
+ return BCME_ERROR;
+ }
+ port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
+ /* only process DHCP packets from server to client */
+ if (port != DHCP_PORT_CLIENT)
+ return BCME_ERROR;
+
+ dhcp = udph + UDP_HDR_LEN;
+ dhcpl = udpl - UDP_HDR_LEN;
+
+ if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
+ L2_FILTER_MSG(("BCM: bcm_l2_filter_get_mac_addr_dhcp_pkt: short DHCP frame,"
+ " ignored\n"));
+ return BCME_ERROR;
+ }
+ /* only process DHCP reply(offer/ack) packets */
+ if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+ return BCME_ERROR;
+ /* chaddr = dhcp + DHCP_CHADDR_OFFSET; */
+ *mac_addr = dhcp + DHCP_CHADDR_OFFSET;
+ return BCME_OK;
+}
+/* modify the mac address for IP, in arp table */
+int
+bcm_l2_filter_parp_modifyentry(arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt)
+{
+ parp_entry_t *entry;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4 && !IPV4_ADDR_NULL(ip) && !IPV4_ADDR_BCAST(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ }
+ else if (ip_ver == IP_VER_6 && !IPV6_ADDR_NULL(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ }
+ else {
+ return BCME_ERROR;
+ }
+
+ ptable = arp_tbl;
+ if (cached) {
+ entry = ptable->parp_table[idx];
+ } else {
+ entry = ptable->parp_candidate_list;
+ }
+ while (entry) {
+ if (bcmp(entry->ip.data, ip, ip_len) == 0) {
+ /* entry matches, overwrite mac content and return */
+ bcopy((void *)ea, (void *)&entry->ea, ETHER_ADDR_LEN);
+ entry->used = entry_tickcnt;
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_OK;
+ }
+ entry = entry->next;
+ }
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_ERROR;
+}
+
+/* Add the IP entry in ARP table based on Cached argument, if cached argument is
+ * non zero positive value: it adds to parp_table, else adds to
+ * parp_candidate_list
+ */
+int
+bcm_l2_filter_parp_addentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt)
+{
+ parp_entry_t *entry;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4 && !IPV4_ADDR_NULL(ip) && !IPV4_ADDR_BCAST(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ }
+ else if (ip_ver == IP_VER_6 && !IPV6_ADDR_NULL(ip)) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ }
+ else {
+ return BCME_ERROR;
+ }
+
+ if ((entry = MALLOCZ(osh, sizeof(parp_entry_t) + ip_len)) == NULL) {
+ L2_FILTER_MSG(("Allocating new parp_entry for IPv%d failed!!\n", ip_ver));
+ return BCME_NOMEM;
+ }
+
+ bcopy((void *)ea, (void *)&entry->ea, ETHER_ADDR_LEN);
+ entry->used = entry_tickcnt;
+ entry->ip.id = ip_ver;
+ entry->ip.len = ip_len;
+ bcopy(ip, entry->ip.data, ip_len);
+ ptable = arp_tbl;
+ if (cached) {
+ entry->next = ptable->parp_table[idx];
+ ptable->parp_table[idx] = entry;
+ } else {
+ entry->next = ptable->parp_candidate_list;
+ ptable->parp_candidate_list = entry;
+ }
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_OK;
+}
+
+/* Delete the IP entry in ARP table based on Cached argument, if cached argument is
+ * non zero positive value: it delete from parp_table, else delete from
+ * parp_candidate_list
+ */
+int
+bcm_l2_filter_parp_delentry(osl_t* osh, arp_table_t *arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached)
+{
+ parp_entry_t *entry, *prev = NULL;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ }
+ else if (ip_ver == IP_VER_6) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ }
+ else {
+ return BCME_ERROR;
+ }
+ ptable = arp_tbl;
+ if (cached) {
+ entry = ptable->parp_table[idx];
+ } else {
+ entry = ptable->parp_candidate_list;
+ }
+ while (entry) {
+ if (entry->ip.id == ip_ver &&
+ bcmp(entry->ip.data, ip, ip_len) == 0 &&
+ bcmp(&entry->ea, ea, ETHER_ADDR_LEN) == 0) {
+ if (prev == NULL) {
+ if (cached) {
+ ptable->parp_table[idx] = entry->next;
+ } else {
+ ptable->parp_candidate_list = entry->next;
+ }
+ } else {
+ prev->next = entry->next;
+ }
+ break;
+ }
+ prev = entry;
+ entry = entry->next;
+ }
+ if (entry != NULL)
+ MFREE(osh, entry, sizeof(parp_entry_t) + ip_len);
+#ifdef DHD_DUMP_ARPTABLE
+ bcm_l2_parp_dump_table(arp_tbl);
+#endif
+ return BCME_OK;
+}
+
+/* search the IP entry in ARP table based on Cached argument, if cached argument is
+ * non zero positive value: it searches from parp_table, else search from
+ * parp_candidate_list
+ */
+parp_entry_t *
+bcm_l2_filter_parp_findentry(arp_table_t* arp_tbl, uint8 *ip, uint8 ip_ver, bool cached,
+ unsigned int entry_tickcnt)
+{
+ parp_entry_t *entry;
+ uint8 idx, ip_len;
+ arp_table_t *ptable;
+
+ if (ip_ver == IP_VER_4) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV4_ADDR_LEN - 1]);
+ ip_len = IPV4_ADDR_LEN;
+ } else if (ip_ver == IP_VER_6) {
+ idx = BCM_PARP_TABLE_INDEX(ip[IPV6_ADDR_LEN - 1]);
+ ip_len = IPV6_ADDR_LEN;
+ } else {
+ return NULL;
+ }
+ ptable = arp_tbl;
+ if (cached) {
+ entry = ptable->parp_table[idx];
+ } else {
+ entry = ptable->parp_candidate_list;
+ }
+ while (entry) {
+ if (entry->ip.id == ip_ver && bcmp(entry->ip.data, ip, ip_len) == 0) {
+ /* time stamp of adding the station entry to arp table for ifp */
+ entry->used = entry_tickcnt;
+ break;
+ }
+ entry = entry->next;
+ }
+ return entry;
+}
+
+/* update arp table entries for every proxy arp enable interface */
+void
+bcm_l2_filter_arp_table_update(osl_t *osh, arp_table_t* arp_tbl, bool all, uint8 *del_ea,
+ bool periodic, unsigned int tickcnt)
+{
+ parp_entry_t *prev, *entry, *delentry;
+ uint8 idx, ip_ver;
+ struct ether_addr ea;
+ uint8 ip[IPV6_ADDR_LEN];
+ arp_table_t *ptable;
+
+ ptable = arp_tbl;
+ for (idx = 0; idx < BCM_PARP_TABLE_SIZE; idx++) {
+ entry = ptable->parp_table[idx];
+ while (entry) {
+ /* check if the entry need to be removed */
+ if (all || (periodic && BCM_PARP_IS_TIMEOUT(tickcnt, entry)) ||
+ (del_ea != NULL && !bcmp(del_ea, &entry->ea, ETHER_ADDR_LEN))) {
+ /* copy frame here */
+ ip_ver = entry->ip.id;
+ bcopy(entry->ip.data, ip, entry->ip.len);
+ bcopy(&entry->ea, &ea, ETHER_ADDR_LEN);
+ entry = entry->next;
+ bcm_l2_filter_parp_delentry(osh, ptable, &ea, ip, ip_ver, TRUE);
+ }
+ else {
+ entry = entry->next;
+ }
+ }
+ }
+
+ /* remove candidate or promote to real entry */
+ prev = delentry = NULL;
+ entry = ptable->parp_candidate_list;
+ while (entry) {
+ /* remove candidate */
+ if (all || (periodic && BCM_PARP_ANNOUNCE_WAIT_REACH(tickcnt, entry)) ||
+ (del_ea != NULL && !bcmp(del_ea, (uint8 *)&entry->ea, ETHER_ADDR_LEN))) {
+ bool promote = (periodic && BCM_PARP_ANNOUNCE_WAIT_REACH(tickcnt, entry)) ?
+ TRUE: FALSE;
+ parp_entry_t *node = NULL;
+
+ ip_ver = entry->ip.id;
+
+ if (prev == NULL)
+ ptable->parp_candidate_list = entry->next;
+ else
+ prev->next = entry->next;
+
+ node = bcm_l2_filter_parp_findentry(ptable,
+ entry->ip.data, IP_VER_6, TRUE, tickcnt);
+ if (promote && node == NULL) {
+ bcm_l2_filter_parp_addentry(osh, ptable, &entry->ea,
+ entry->ip.data, entry->ip.id, TRUE, tickcnt);
+ }
+ MFREE(osh, entry, sizeof(parp_entry_t) + entry->ip.len);
+ if (prev == NULL) {
+ entry = ptable->parp_candidate_list;
+ } else {
+ entry = prev->next;
+ }
+ }
+ else {
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+}
+/* create 42 byte ARP packet for ARP response, aligned the Buffer */
+void *
+bcm_l2_filter_proxyarp_alloc_reply(osl_t* osh, uint16 pktlen, struct ether_addr *src_ea,
+ struct ether_addr *dst_ea, uint16 ea_type, bool snap, void **p)
+{
+ void *pkt;
+ uint8 *frame;
+
+ /* adjust pktlen since skb->data is aligned to 2 */
+ pktlen += ALIGN_ADJ_BUFLEN;
+
+ if ((pkt = PKTGET(osh, pktlen, FALSE)) == NULL) {
+ L2_FILTER_ERROR(("bcm_l2_filter_proxyarp_alloc_reply: PKTGET failed\n"));
+ return NULL;
+ }
+ /* adjust for pkt->data aligned */
+ PKTPULL(osh, pkt, ALIGN_ADJ_BUFLEN);
+ frame = PKTDATA(osh, pkt);
+
+ /* Create 14-byte eth header, plus snap header if applicable */
+ bcopy(src_ea, frame + ETHER_SRC_OFFSET, ETHER_ADDR_LEN);
+ bcopy(dst_ea, frame + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ if (snap) {
+ hton16_ua_store(pktlen, frame + ETHER_TYPE_OFFSET);
+ bcopy(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN);
+ hton16_ua_store(ea_type, frame + ETHER_HDR_LEN + SNAP_HDR_LEN);
+ } else
+ hton16_ua_store(ea_type, frame + ETHER_TYPE_OFFSET);
+
+ *p = (void *)(frame + ETHER_HDR_LEN + (snap ? SNAP_HDR_LEN + ETHER_TYPE_LEN : 0));
+ return pkt;
+}
+/* copy the smac entry from parp_table */
+void bcm_l2_filter_parp_get_smac(arp_table_t* ptable, void* smac)
+{
+ bcopy(ptable->parp_smac, smac, ETHER_ADDR_LEN);
+}
+/* copy the cmac entry from parp_table */
+void bcm_l2_filter_parp_get_cmac(arp_table_t* ptable, void* cmac)
+{
+ bcopy(ptable->parp_cmac, cmac, ETHER_ADDR_LEN);
+}
+/* copy the smac entry to smac entry in parp_table */
+void bcm_l2_filter_parp_set_smac(arp_table_t* ptable, void* smac)
+{
+ bcopy(smac, ptable->parp_smac, ETHER_ADDR_LEN);
+}
+/* copy the cmac entry to cmac entry in parp_table */
+void bcm_l2_filter_parp_set_cmac(arp_table_t* ptable, void* cmac)
+{
+ bcopy(cmac, ptable->parp_cmac, ETHER_ADDR_LEN);
+}
+
+uint16
+calc_checksum(uint8 *src_ipa, uint8 *dst_ipa, uint32 ul_len, uint8 prot, uint8 *ul_data)
+{
+ uint16 *startpos;
+ uint32 sum = 0;
+ int i;
+ uint16 answer = 0;
+
+ if (src_ipa) {
+ uint8 ph[8] = {0, };
+ for (i = 0; i < (IPV6_ADDR_LEN / 2); i++) {
+ sum += *((uint16 *)src_ipa);
+ src_ipa += 2;
+ }
+
+ for (i = 0; i < (IPV6_ADDR_LEN / 2); i++) {
+ sum += *((uint16 *)dst_ipa);
+ dst_ipa += 2;
+ }
+
+ *((uint32 *)ph) = hton32(ul_len);
+ *((uint32 *)(ph+4)) = 0;
+ ph[7] = prot;
+ startpos = (uint16 *)ph;
+ for (i = 0; i < 4; i++) {
+ sum += *startpos++;
+ }
+ }
+
+ startpos = (uint16 *)ul_data;
+ while (ul_len > 1) {
+ sum += *startpos++;
+ ul_len -= 2;
+ }
+
+ if (ul_len == 1) {
+ *((uint8 *)(&answer)) = *((uint8 *)startpos);
+ sum += answer;
+ }
+
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ answer = ~sum;
+
+ return answer;
+}
+/*
+ * The length of the option including
+ * the type and length fields in units of 8 octets
+ */
+bcm_tlv_t *
+parse_nd_options(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len * 8;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) &&
+ (totlen >= len))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + len);
+ totlen -= len;
+ }
+
+ return NULL;
+}
+
+/* returns 0 if tdls set up request or tdls discovery request */
+int
+bcm_l2_filter_block_tdls(osl_t *osh, void *pktbuf)
+{
+ uint16 ethertype;
+ uint8 *data;
+ int datalen;
+ bool snap;
+ uint8 action_field;
+
+ if (get_pkt_ether_type(osh, pktbuf, &data, &datalen, &ethertype, &snap) != BCME_OK)
+ return BCME_ERROR;
+
+ if (ethertype != ETHER_TYPE_89_0D)
+ return BCME_ERROR;
+
+ /* validate payload type */
+ if (datalen < TDLS_PAYLOAD_TYPE_LEN + 2) {
+ L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong length for 89-0d eth frame %d\n",
+ datalen));
+ return BCME_ERROR;
+ }
+
+ /* validate payload type */
+ if (*data != TDLS_PAYLOAD_TYPE) {
+ L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong payload type for 89-0d"
+ " eth frame %d\n",
+ *data));
+ return BCME_ERROR;
+ }
+ data += TDLS_PAYLOAD_TYPE_LEN;
+
+ /* validate TDLS action category */
+ if (*data != TDLS_ACTION_CATEGORY_CODE) {
+ L2_FILTER_ERROR(("bcm_l2_filter_block_tdls: wrong TDLS Category %d\n", *data));
+ return BCME_ERROR;
+ }
+ data++;
+
+ action_field = *data;
+
+ if ((action_field == TDLS_SETUP_REQ) || (action_field == TDLS_DISCOVERY_REQ))
+ return BCME_OK;
+
+ return BCME_ERROR;
+}
diff --git a/bcmdhd.101.10.361.x/bcmbloom.c b/bcmdhd.101.10.361.x/bcmbloom.c
new file mode 100755
index 0000000..7660c88
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmbloom.c
@@ -0,0 +1,233 @@
+/*
+ * Bloom filter support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#else /* !BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* !BCMDRIVER */
+#include <bcmutils.h>
+
+#include <bcmbloom.h>
+
+#define BLOOM_BIT_LEN(_x) ((_x) << 3)
+
+struct bcm_bloom_filter {
+ void *cb_ctx;
+ uint max_hash;
+ bcm_bloom_hash_t *hash; /* array of hash functions */
+ uint filter_size; /* in bytes */
+ uint8 *filter; /* can be NULL for validate only */
+};
+
+/* public interface */
+int
+bcm_bloom_create(bcm_bloom_alloc_t alloc_cb,
+ bcm_bloom_free_t free_cb, void *cb_ctx, uint max_hash,
+ uint filter_size, bcm_bloom_filter_t **bloom)
+{
+ int err = BCME_OK;
+ bcm_bloom_filter_t *bp = NULL;
+
+ if (!bloom || !alloc_cb || (max_hash == 0)) {
+ err = BCME_BADARG;
+ goto done;
+ }
+
+ bp = (*alloc_cb)(cb_ctx, sizeof(*bp));
+ if (!bp) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ memset(bp, 0, sizeof(*bp));
+
+ bp->cb_ctx = cb_ctx;
+ bp->max_hash = max_hash;
+ bp->hash = (*alloc_cb)(cb_ctx, sizeof(*bp->hash) * max_hash);
+ if (!bp->hash) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ memset(bp->hash, 0, sizeof(*bp->hash) * max_hash);
+
+ if (filter_size > 0) {
+ bp->filter = (*alloc_cb)(cb_ctx, filter_size);
+ if (!bp->filter) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ bp->filter_size = filter_size;
+ memset(bp->filter, 0, filter_size);
+ }
+
+ *bloom = bp;
+
+done:
+ if (err != BCME_OK)
+ bcm_bloom_destroy(&bp, free_cb);
+
+ return err;
+}
+
+int
+bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb)
+{
+ int err = BCME_OK;
+ bcm_bloom_filter_t *bp;
+
+ if (!bloom || !*bloom || !free_cb)
+ goto done;
+
+ bp = *bloom;
+ *bloom = NULL;
+
+ if (bp->filter)
+ (*free_cb)(bp->cb_ctx, bp->filter, bp->filter_size);
+ if (bp->hash)
+ (*free_cb)(bp->cb_ctx, bp->hash,
+ sizeof(*bp->hash) * bp->max_hash);
+ (*free_cb)(bp->cb_ctx, bp, sizeof(*bp));
+
+done:
+ return err;
+}
+
+int
+bcm_bloom_add_hash(bcm_bloom_filter_t *bp, bcm_bloom_hash_t hash, uint *idx)
+{
+ uint i;
+
+ if (!bp || !hash || !idx)
+ return BCME_BADARG;
+
+ for (i = 0; i < bp->max_hash; ++i) {
+ if (bp->hash[i] == NULL)
+ break;
+ }
+
+ if (i >= bp->max_hash)
+ return BCME_NORESOURCE;
+
+ bp->hash[i] = hash;
+ *idx = i;
+ return BCME_OK;
+}
+
+int
+bcm_bloom_remove_hash(bcm_bloom_filter_t *bp, uint idx)
+{
+ if (!bp)
+ return BCME_BADARG;
+
+ if (idx >= bp->max_hash)
+ return BCME_NOTFOUND;
+
+ bp->hash[idx] = NULL;
+ return BCME_OK;
+}
+
+bool
+bcm_bloom_is_member(bcm_bloom_filter_t *bp,
+ const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len)
+{
+ uint i;
+ int err = BCME_OK;
+
+ if (!tag || (tag_len == 0)) /* empty tag is always a member */
+ goto done;
+
+ /* use internal buffer if none was specified */
+ if (!buf || (buf_len == 0)) {
+ if (!bp->filter) /* every one is a member of empty filter */
+ goto done;
+
+ buf = bp->filter;
+ buf_len = bp->filter_size;
+ }
+
+ for (i = 0; i < bp->max_hash; ++i) {
+ uint pos;
+ if (!bp->hash[i])
+ continue;
+ pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
+
+ /* all bits must be set for a match */
+ if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) {
+ err = BCME_NOTFOUND;
+ break;
+ }
+ }
+
+done:
+ return err;
+}
+
+int
+bcm_bloom_add_member(bcm_bloom_filter_t *bp, const uint8 *tag, uint tag_len)
+{
+ uint i;
+
+ if (!bp || !tag || (tag_len == 0))
+ return BCME_BADARG;
+
+ if (!bp->filter) /* validate only */
+ return BCME_UNSUPPORTED;
+
+ for (i = 0; i < bp->max_hash; ++i) {
+ uint pos;
+ if (!bp->hash[i])
+ continue;
+ pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
+ setbit(bp->filter, pos % BLOOM_BIT_LEN(bp->filter_size));
+ }
+
+ return BCME_OK;
+}
+
+int bcm_bloom_get_filter_data(bcm_bloom_filter_t *bp,
+ uint buf_size, uint8 *buf, uint *buf_len)
+{
+ if (!bp)
+ return BCME_BADARG;
+
+ if (buf_len)
+ *buf_len = bp->filter_size;
+
+ if (buf_size < bp->filter_size)
+ return BCME_BUFTOOSHORT;
+
+ if (bp->filter && bp->filter_size)
+ memcpy(buf, bp->filter, bp->filter_size);
+
+ return BCME_OK;
+}
diff --git a/bcmdhd.101.10.361.x/bcmevent.c b/bcmdhd.101.10.361.x/bcmevent.c
new file mode 100755
index 0000000..a8cafcb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmevent.c
@@ -0,0 +1,445 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <bcmeth.h>
+#include <bcmevent.h>
+#include <802.11.h>
+
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+ uint event;
+ const char *name;
+} bcmevent_name_str_t;
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+/* this becomes static data when all code is changed to use
+ * the bcmevent_get_name() API
+ */
+static const bcmevent_name_str_t bcmevent_names[] = {
+ BCMEVENT_NAME(WLC_E_SET_SSID),
+ BCMEVENT_NAME(WLC_E_JOIN),
+ BCMEVENT_NAME(WLC_E_START),
+ BCMEVENT_NAME(WLC_E_AUTH),
+ BCMEVENT_NAME(WLC_E_AUTH_IND),
+ BCMEVENT_NAME(WLC_E_DEAUTH),
+ BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+ BCMEVENT_NAME(WLC_E_ASSOC),
+ BCMEVENT_NAME(WLC_E_ASSOC_IND),
+ BCMEVENT_NAME(WLC_E_REASSOC),
+ BCMEVENT_NAME(WLC_E_REASSOC_IND),
+ BCMEVENT_NAME(WLC_E_DISASSOC),
+ BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+ BCMEVENT_NAME(WLC_E_QUIET_START),
+ BCMEVENT_NAME(WLC_E_QUIET_END),
+ BCMEVENT_NAME(WLC_E_BEACON_RX),
+ BCMEVENT_NAME(WLC_E_LINK),
+ BCMEVENT_NAME(WLC_E_MIC_ERROR),
+ BCMEVENT_NAME(WLC_E_NDIS_LINK),
+ BCMEVENT_NAME(WLC_E_ROAM),
+ BCMEVENT_NAME(WLC_E_TXFAIL),
+ BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+ BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+ BCMEVENT_NAME(WLC_E_PRUNE),
+ BCMEVENT_NAME(WLC_E_AUTOAUTH),
+ BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+ BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_ADDTS_IND),
+ BCMEVENT_NAME(WLC_E_DELTS_IND),
+ BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+ BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+ BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+ BCMEVENT_NAME(WLC_E_ROAM_PREP),
+ BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+ BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
+ BCMEVENT_NAME(WLC_E_JOIN_START),
+ BCMEVENT_NAME(WLC_E_ROAM_START),
+ BCMEVENT_NAME(WLC_E_ASSOC_START),
+#ifdef EXT_STA
+ BCMEVENT_NAME(WLC_E_RESET_COMPLETE),
+ BCMEVENT_NAME(WLC_E_JOIN_START),
+ BCMEVENT_NAME(WLC_E_ROAM_START),
+ BCMEVENT_NAME(WLC_E_ASSOC_START),
+ BCMEVENT_NAME(WLC_E_ASSOC_RECREATED),
+ BCMEVENT_NAME(WLC_E_SPEEDY_RECREATE_FAIL),
+#endif /* EXT_STA */
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+ BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+ BCMEVENT_NAME(WLC_E_RADIO),
+ BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
+ BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+ BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+ BCMEVENT_NAME(WLC_E_PSK_SUP),
+ BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+ BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+ BCMEVENT_NAME(WLC_E_ICV_ERROR),
+ BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+ BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+ BCMEVENT_NAME(WLC_E_TRACE),
+ BCMEVENT_NAME(WLC_E_IF),
+#ifdef WLP2P
+ BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
+#endif
+ BCMEVENT_NAME(WLC_E_RSSI),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
+#if defined(NDIS)
+ BCMEVENT_NAME(WLC_E_PRE_ASSOC_IND),
+ BCMEVENT_NAME(WLC_E_PRE_REASSOC_IND),
+ BCMEVENT_NAME(WLC_E_CHANNEL_ADOPTED),
+ BCMEVENT_NAME(WLC_E_AP_STARTED),
+ BCMEVENT_NAME(WLC_E_DFS_AP_STOP),
+ BCMEVENT_NAME(WLC_E_DFS_AP_RESUME),
+ BCMEVENT_NAME(WLC_E_ASSOC_IND_NDIS),
+ BCMEVENT_NAME(WLC_E_REASSOC_IND_NDIS),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS),
+ BCMEVENT_NAME(WLC_E_AUTH_REQ),
+ BCMEVENT_NAME(WLC_E_IBSS_COALESCE),
+#endif /* #if defined(NDIS) */
+
+#ifdef BCMWAPI_WAI
+ BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
+ BCMEVENT_NAME(WLC_E_WAI_MSG),
+#endif /* BCMWAPI_WAI */
+
+ BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
+#ifdef WLP2P
+ BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+ BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
+#endif
+#ifdef PROP_TXSTATUS
+ BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
+#endif
+ BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+ BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+ BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+ BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+ BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+ BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+#ifdef SOFTAP
+ BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
+#endif
+ BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+ BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+ BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
+#ifdef WLTDLS
+ BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
+#endif /* WLTDLS */
+ BCMEVENT_NAME(WLC_E_NATIVE),
+#ifdef WLPKTDLYSTAT
+ BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
+#endif /* WLPKTDLYSTAT */
+ BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+ BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+ BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+ BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+ BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
+#ifdef WLWNM
+ BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
+#endif /* WLWNM */
+#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT)
+ BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+ BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+ BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+ BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+ BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND),
+ BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+#ifdef WLAIBSS
+ BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL),
+#endif /* WLAIBSS */
+#ifdef GSCAN_SUPPORT
+ BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT),
+ BCMEVENT_NAME(WLC_E_PFN_SSID_EXT),
+#endif /* GSCAN_SUPPORT */
+#ifdef WLBSSLOAD_REPORT
+ BCMEVENT_NAME(WLC_E_BSS_LOAD),
+#endif
+#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
+ BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
+#endif
+#ifdef WLFBT
+ BCMEVENT_NAME(WLC_E_FBT),
+#endif /* WLFBT */
+ BCMEVENT_NAME(WLC_E_AUTHORIZED),
+ BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX),
+
+#ifdef WLAWDL
+ BCMEVENT_NAME(WLC_E_AWDL_AW),
+ BCMEVENT_NAME(WLC_E_AWDL_ROLE),
+ BCMEVENT_NAME(WLC_E_AWDL_EVENT),
+#endif /* WLAWDL */
+
+ BCMEVENT_NAME(WLC_E_CSA_START_IND),
+ BCMEVENT_NAME(WLC_E_CSA_DONE_IND),
+ BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND),
+ BCMEVENT_NAME(WLC_E_RMC_EVENT),
+ BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND),
+ BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW),
+ BCMEVENT_NAME(WLC_E_MSCH),
+ BCMEVENT_NAME(WLC_E_ULP),
+ BCMEVENT_NAME(WLC_E_NAN),
+ BCMEVENT_NAME(WLC_E_PKT_FILTER),
+ BCMEVENT_NAME(WLC_E_DMA_TXFLUSH_COMPLETE),
+ BCMEVENT_NAME(WLC_E_PSK_AUTH),
+ BCMEVENT_NAME(WLC_E_SDB_TRANSITION),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_BACKOFF),
+ BCMEVENT_NAME(WLC_E_PFN_BSSID_SCAN_BACKOFF),
+ BCMEVENT_NAME(WLC_E_AGGR_EVENT),
+ BCMEVENT_NAME(WLC_E_TVPM_MITIGATION),
+ BCMEVENT_NAME(WLC_E_SCAN),
+ BCMEVENT_NAME(WLC_E_SLOTTED_BSS_PEER_OP),
+ BCMEVENT_NAME(WLC_E_PHY_CAL),
+#ifdef WL_NAN
+ BCMEVENT_NAME(WLC_E_NAN_CRITICAL),
+ BCMEVENT_NAME(WLC_E_NAN_NON_CRITICAL),
+ BCMEVENT_NAME(WLC_E_NAN),
+#endif /* WL_NAN */
+ BCMEVENT_NAME(WLC_E_RPSNOA),
+ BCMEVENT_NAME(WLC_E_WA_LQM),
+ BCMEVENT_NAME(WLC_E_OBSS_DETECTION),
+ BCMEVENT_NAME(WLC_E_SC_CHAN_QUAL),
+ BCMEVENT_NAME(WLC_E_DYNSAR),
+ BCMEVENT_NAME(WLC_E_ROAM_CACHE_UPDATE),
+ BCMEVENT_NAME(WLC_E_AP_BCN_DRIFT),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE_EXT),
+#ifdef WL_CLIENT_SAE
+ BCMEVENT_NAME(WLC_E_AUTH_START),
+#endif /* WL_CLIENT_SAE */
+#ifdef WL_TWT
+ BCMEVENT_NAME(WLC_E_TWT_SETUP),
+ BCMEVENT_NAME(WLC_E_TWT_TEARDOWN),
+ BCMEVENT_NAME(WLC_E_TWT_INFO_FRM)
+#endif /* WL_TWT */
+};
+
+const char *bcmevent_get_name(uint event_type)
+{
+ /* note: first coded this as a static const but some
+ * ROMs already have something called event_name so
+ * changed it so we don't have a variable for the
+ * 'unknown string
+ */
+ const char *event_name = NULL;
+
+ uint idx;
+ for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) {
+
+ if (bcmevent_names[idx].event == event_type) {
+ event_name = bcmevent_names[idx].name;
+ break;
+ }
+ }
+
+ /* if we find an event name in the array, return it.
+ * otherwise return unknown string.
+ */
+ return ((event_name) ? event_name : "Unknown Event");
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void
+wl_event_to_network_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = hton32(evt->event_type);
+ evt->flags = hton16(evt->flags);
+ evt->status = hton32(evt->status);
+ evt->reason = hton32(evt->reason);
+ evt->auth_type = hton32(evt->auth_type);
+ evt->datalen = hton32(evt->datalen);
+ evt->version = hton16(evt->version);
+}
+
+/*
+ * Validate if the event is proper and if valid copy event header to event.
+ * If proper event pointer is passed, to just validate, pass NULL to event.
+ *
+ * Return values are
+ * BCME_OK - It is a BRCM event or BRCM dongle event
+ * BCME_NOTFOUND - Not BRCM, not an event, may be okay
+ * BCME_BADLEN - Bad length, should not process, just drop
+ */
+int
+is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+ bcm_event_msg_u_t *out_event)
+{
+ uint16 evlen = 0; /* length in bcmeth_hdr */
+ uint16 subtype;
+ uint16 usr_subtype;
+ bcm_event_t *bcm_event;
+ uint8 *pktend;
+ uint8 *evend;
+ int err = BCME_OK;
+ uint32 data_len = 0; /* data length in bcm_event */
+
+ pktend = (uint8 *)pktdata + pktlen;
+ bcm_event = (bcm_event_t *)pktdata;
+
+ /* only care about 16-bit subtype / length versions */
+ if ((uint8 *)&bcm_event->bcm_hdr < pktend) {
+ uint8 short_subtype = *(uint8 *)&bcm_event->bcm_hdr;
+ if (!(short_subtype & 0x80)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ }
+
+ /* must have both ether_header and bcmeth_hdr */
+ if (pktlen < OFFSETOF(bcm_event_t, event)) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* check length in bcmeth_hdr */
+
+#ifdef BCMDONGLEHOST
+ /* temporary - header length not always set properly. When the below
+ * !BCMDONGLEHOST is in all branches that use trunk DHD, the code
+ * under BCMDONGLEHOST can be removed.
+ */
+ evlen = (uint16)(pktend - (uint8 *)&bcm_event->bcm_hdr.version);
+#else
+ evlen = ntoh16_ua((void *)&bcm_event->bcm_hdr.length);
+#endif /* BCMDONGLEHOST */
+ evend = (uint8 *)&bcm_event->bcm_hdr.version + evlen;
+ if (evend != pktend) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* match on subtype, oui and usr subtype for BRCM events */
+ subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.subtype);
+ if (subtype != BCMILCP_SUBTYPE_VENDOR_LONG) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ if (bcmp(BRCM_OUI, &bcm_event->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ /* if it is a bcm_event or bcm_dngl_event_t, validate it */
+ usr_subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.usr_subtype);
+ switch (usr_subtype) {
+ case BCMILCP_BCM_SUBTYPE_EVENT:
+ /* check that header length and pkt length are sufficient */
+ if ((pktlen < sizeof(bcm_event_t)) ||
+ (evend < ((uint8 *)bcm_event + sizeof(bcm_event_t)))) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* ensure data length in event is not beyond the packet. */
+ data_len = ntoh32_ua((void *)&bcm_event->event.datalen);
+ if ((sizeof(bcm_event_t) + data_len +
+ BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ if (out_event) {
+ /* ensure BRCM event pkt aligned */
+ memcpy(&out_event->event, &bcm_event->event, sizeof(wl_event_msg_t));
+ }
+
+ break;
+
+ case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
+#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT)
+ if ((pktlen < sizeof(bcm_dngl_event_t)) ||
+ (evend < ((uint8 *)bcm_event + sizeof(bcm_dngl_event_t)))) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ /* ensure data length in event is not beyond the packet. */
+ data_len = ntoh16_ua((void *)&((bcm_dngl_event_t *)pktdata)->dngl_event.datalen);
+ if ((sizeof(bcm_dngl_event_t) + data_len +
+ BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) {
+ err = BCME_BADLEN;
+ goto done;
+ }
+
+ if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ if (out_event) {
+ /* ensure BRCM dngl event pkt aligned */
+ memcpy(&out_event->dngl_event, &((bcm_dngl_event_t *)pktdata)->dngl_event,
+ sizeof(bcm_dngl_event_msg_t));
+ }
+
+ break;
+#else
+ err = BCME_UNSUPPORTED;
+ break;
+#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */
+
+ default:
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+
+ BCM_REFERENCE(data_len);
+done:
+ return err;
+}
diff --git a/bcmdhd.101.10.361.x/bcminternal-android.mk b/bcmdhd.101.10.361.x/bcminternal-android.mk
new file mode 100755
index 0000000..4fecfad
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcminternal-android.mk
@@ -0,0 +1,88 @@
+#
+# Broadcom Proprietary and Confidential. Copyright (C) 2020,
+# All Rights Reserved.
+#
+# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+# the contents of this file may not be disclosed to third parties,
+# copied or duplicated in any form, in whole or in part, without
+# the prior written permission of Broadcom.
+#
+#
+# <<Broadcom-WL-IPTag/Secret:>>
+
+# This file should be seen only by internal builds because it will
+# be mentioned only in internal filelists like brcm.flist.
+# See extended comment bcminternal.mk for details.
+
+BCMINTERNAL := 1
+
+BCMINTERNAL_DFLAGS += -DDHD_NO_MOG
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ # Enable Register access via dhd IOVAR
+ BCMINTERNAL_DFLAGS += -DDHD_PCIE_REG_ACCESS
+ # latency timestamping
+ BCMINTERNAL_DFLAGS += -DDHD_PKTTS
+ # Traffic Pattern Analysis on Socket Flow
+ BCMINTERNAL_DFLAGS += -DDHD_QOS_ON_SOCK_FLOW
+ # QoS unit testing support
+ BCMINTERNAL_DFLAGS += -DDHD_QOS_ON_SOCK_FLOW_UT
+ # Auto QOS
+ BCMINTERNAL_DFLAGS += -DWL_AUTO_QOS
+
+ ifneq ($(filter -DCUSTOMER_HW4, $(DHDCFLAGS)),)
+ # These will be moved to hw4 Makefile for 4389b0
+ BCMINTERNAL_DFLAGS += -DWBRC
+ BCMINTERNAL_DFLAGS += -DWLAN_ACCEL_BOOT
+ BCMINTERNAL_DFLAGS += -DDHD_HTPUT_TUNABLES
+ # BCMINTERNAL_DFLAGS += -DDHD_FIS_DUMP
+ # SCAN TYPES, if kernel < 4.17 ..back port support required
+ ifneq ($(CONFIG_CFG80211_SCANTYPE_BKPORT),)
+ DHDCFLAGS += -DWL_SCAN_TYPE
+ endif
+ # Jig builds
+ # No reset during dhd attach
+ BCMINTERNAL_DFLAGS += -DDHD_SKIP_DONGLE_RESET_IN_ATTACH
+ # Dongle Isolation will ensure no resets devreset ON/OFF
+ BCMINTERNAL_DFLAGS += -DDONGLE_ENABLE_ISOLATION
+ # Quiesce dongle using DB7 trap
+ BCMINTERNAL_DFLAGS += -DDHD_DONGLE_TRAP_IN_DETACH
+ # Collect socram during dongle init failurs for internal builds
+ BCMINTERNAL_DFLAGS += -DDEBUG_DNGL_INIT_FAIL
+ # Dongle reset during Wifi ON to keep in sane state
+ BCMINTERNAL_DFLAGS += -DFORCE_DONGLE_RESET_IN_DEVRESET_ON
+ # Perform Backplane Reset else FLR will happen
+ # BCMINTERNAL_DFLAGS += -DDHD_USE_BP_RESET_SS_CTRL
+ BCMINTERNAL_DFLAGS += -DWIFI_TURNOFF_DELAY=10
+
+ endif
+
+ # NCI_BUS support
+ BCMINTERNAL_DFLAGS += -DSOCI_NCI_BUS
+endif
+
+
+BCMINTERNAL_DFLAGS += -DDHD_BUS_MEM_ACCESS
+
+# Support multiple chips
+BCMINTERNAL_DFLAGS += -DSUPPORT_MULTIPLE_CHIPS
+
+# Support unreleased chips
+BCMINTERNAL_DFLAGS += -DUNRELEASEDCHIP
+
+# Collect socram if readshared fails
+BCMINTERNAL_DFLAGS += -DDEBUG_DNGL_INIT_FAIL
+
+# Force enable memdump value to DUMP_MEMFILE if it is disabled
+BCMINTERNAL_DFLAGS += -DDHD_INIT_DEFAULT_MEMDUMP
+
+ifneq ($(filter -DDHD_QOS_ON_SOCK_FLOW,$(BCMINTERNAL_DFLAGS)),)
+BCMINTERNAL_DHDOFILES += dhd_linux_sock_qos.o
+endif
+ifneq ($(filter -DSOCI_NCI_BUS,$(BCMINTERNAL_DFLAGS)),)
+BCMINTERNAL_DHDOFILES += nciutils.o
+endif
+ifneq ($(filter -DWBRC,$(BCMINTERNAL_DFLAGS)),)
+BCMINTERNAL_DHDOFILES += wb_regon_coordinator.o
+endif
+# vim: filetype=make shiftwidth=2
diff --git a/bcmdhd.101.10.361.x/bcminternal.mk b/bcmdhd.101.10.361.x/bcminternal.mk
new file mode 100755
index 0000000..eb94021
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcminternal.mk
@@ -0,0 +1,60 @@
+#
+# Broadcom Proprietary and Confidential. Copyright (C) 2020,
+# All Rights Reserved.
+#
+# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+# the contents of this file may not be disclosed to third parties,
+# copied or duplicated in any form, in whole or in part, without
+# the prior written permission of Broadcom.
+#
+#
+# <<Broadcom-WL-IPTag/Secret:>>
+
+# This file should be seen only by internal builds because it will
+# be mentioned only in internal filelists like brcm.flist. The idea
+# is that it will be conditionally included by makefiles using the
+# "-include" syntax, with the result that internal builds will see
+# this file and set BCMINTERNAL which will eventually result in a
+# -DBCMINTERNAL option passed to the compiler along with possible
+# other effects. External builds will never see it and it will be
+# silently ignored.
+#
+# Any settings which should not be exposed to customers may be
+# placed here. For instance, if we were working on a super-secret
+# new feature in supersecret.c we could set a variable here like
+# BCMINTERNAL_OBJECTS := supersecret.o
+# and later say
+# OBJECTS += $(BCMINTERNAL_OBJECTS)
+# within the main makefile.
+#
+# The key point is that this file is never shipped to customers
+# because it's present only in internal filelists so anything
+# here is private.
+
+BCMINTERNAL := 1
+
+BCMINTERNAL_DFLAGS += -DBCMINTERNAL
+BCMINTERNAL_DFLAGS += -DDHD_NO_MOG
+
+# Support unreleased chips
+BCMINTERNAL_DFLAGS += -DUNRELEASEDCHIP
+
+ifneq ($(findstring -fwtrace,-$(TARGET)-),)
+ BCMINTERNAL_DFLAGS += -DDHD_FWTRACE
+ BCMINTERNAL_CFILES += dhd_fwtrace.c
+endif
+
+# support only for SDIO MFG Fedora builds
+ifneq ($(findstring -sdstd-,-$(TARGET)-),)
+ ifneq ($(findstring -mfgtest-,-$(TARGET)-),)
+ BCMINTERNAL_DFLAGS += -DDHD_SPROM
+ BCMINTERNAL_CFILES += bcmsrom.c bcmotp.c
+ endif
+endif
+
+ifneq ($(findstring -pciefd-,$(TARGET)-),)
+# NCI_BUS support
+BCMINTERNAL_DFLAGS += -DSOCI_NCI_BUS -DBOOKER_NIC400_INF
+BCMINTERNAL_CFILES += nciutils.c
+endif
+# vim: filetype=make shiftwidth=2
diff --git a/bcmdhd.101.10.361.x/bcmsdh.c b/bcmdhd.101.10.361.x/bcmsdh.c
new file mode 100755
index 0000000..538f056
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh.c
@@ -0,0 +1,953 @@
+/*
+ * BCMSDH interface glue
+ * implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#if !defined(BCMDONGLEHOST)
+#include <bcmsrom.h>
+#endif /* !defined(BCMDONGLEHOST) */
+#include <osl.h>
+
+#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h> /* common SDIO/controller interface */
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#if defined (BT_OVER_SDIO)
+#include <dhd_bt_interface.h>
+#endif /* defined (BT_OVER_SDIO) */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined (BT_OVER_SDIO)
+struct sdio_func *func_f3 = NULL;
+static f3intr_handler processf3intr = NULL;
+static dhd_hang_notification process_dhd_hang_notification = NULL;
+static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE;
+#endif /* defined (BT_OVER_SDIO) */
+
+#if defined(NDIS) && (NDISVER < 0x0630)
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+ sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+#if defined (BT_OVER_SDIO)
+void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state)
+{
+ bool state_change = false;
+
+ BCMSDH_ERROR(("%s: DHD hang state changed - [%d] -> [%d]\n",
+ __FUNCTION__, g_dhd_hang_state, new_state));
+
+ if (g_dhd_hang_state == new_state)
+ return;
+
+ switch (g_dhd_hang_state) {
+ case NO_HANG_STATE:
+ if (HANG_START_STATE == new_state)
+ state_change = true;
+ break;
+
+ case HANG_START_STATE:
+ if (HANG_RECOVERY_STATE == new_state ||
+ NO_HANG_STATE == new_state)
+ state_change = true;
+
+ break;
+
+ case HANG_RECOVERY_STATE:
+ if (NO_HANG_STATE == new_state)
+ state_change = true;
+ break;
+
+ default:
+ BCMSDH_ERROR(("%s: Unhandled Hang state\n", __FUNCTION__));
+ break;
+ }
+
+ if (!state_change) {
+ BCMSDH_ERROR(("%s: Hang state cannot be changed\n", __FUNCTION__));
+ return;
+ }
+
+ g_dhd_hang_state = new_state;
+}
+
+void bcmsdh_btsdio_process_f3_intr(void)
+{
+ if (processf3intr && (g_dhd_hang_state == NO_HANG_STATE))
+ processf3intr(func_f3);
+}
+
+void bcmsdh_btsdio_process_dhd_hang_notification(bool wifi_recovery_completed)
+{
+ bcmsdh_btsdio_process_hang_state(HANG_START_STATE);
+
+ if (process_dhd_hang_notification)
+ process_dhd_hang_notification(func_f3, wifi_recovery_completed);
+
+ /* WiFi was off, so HANG_RECOVERY_STATE is not needed */
+ if (wifi_recovery_completed)
+ bcmsdh_btsdio_process_hang_state(NO_HANG_STATE);
+ else {
+ bcmsdh_btsdio_process_hang_state(HANG_RECOVERY_STATE);
+ }
+}
+
+void bcmsdh_btsdio_interface_init(struct sdio_func *func,
+ f3intr_handler f3intr_fun, dhd_hang_notification hang_notification)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)l_bcmsdh;
+ BCMSDH_INFO(("%s: func %p \n", __FUNCTION__, func));
+ func_f3 = func;
+ processf3intr = f3intr_fun;
+ sdioh_sdmmc_card_enable_func_f3(bcmsdh->sdioh, func);
+ process_dhd_hang_notification = hang_notification;
+
+} EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
+{
+ bcmsdh_info_t *bcmsdh;
+
+ if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+ BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+ bcmsdh->sdioh = sdioh;
+ bcmsdh->osh = osh;
+ bcmsdh->init_success = TRUE;
+ *regsva = si_enum_base(0);
+
+ bcmsdh_force_sbwad_calc(bcmsdh, FALSE);
+
+ /* Report the BAR, to fix if needed */
+ bcmsdh->sbwad = si_enum_base(0);
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
+ return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bcmsdh != NULL) {
+#if defined(NDIS) && (NDISVER < 0x0630)
+ if (bcmsdh->sdioh)
+ sdioh_detach(osh, bcmsdh->sdioh);
+#endif
+ MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+ }
+
+ l_bcmsdh = NULL;
+
+ return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, uint plen, void *arg, uint len, bool set)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ bool on;
+
+ ASSERT(bcmsdh);
+ status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+ if (SDIOH_API_SUCCESS(status))
+ return FALSE;
+ else
+ return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef BCMSPI_ANDROID
+ uint32 data;
+#endif /* BCMSPI_ANDROID */
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+#ifdef BCMSPI_ANDROID
+ data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
+ data |= 0xE0E70000;
+ bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
+#endif /* BCMSPI_ANDROID */
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef BCMSPI_ANDROID
+ uint32 data;
+#endif /* BCMSPI_ANDROID */
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+#ifdef BCMSPI_ANDROID
+ data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
+ data &= ~0xE0E70000;
+ bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
+#endif /* BCMSPI_ANDROID */
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ ASSERT(sdh);
+ return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ ASSERT(sdh);
+
+ /* don't support yet */
+ return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+ uint8 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+#endif
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+}
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+#endif
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+ addr, data));
+}
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ uint8 *tmp_buf, *tmp_ptr;
+ uint8 *ptr;
+ bool ascii = func & ~0xf;
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cis);
+ ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+ status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+ if (ascii) {
+ /* Move binary bits to tmp and format them into the provided buffer. */
+ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+ BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ bcopy(cis, tmp_buf, length);
+ for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+ ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
+ "%.2x ", *tmp_ptr & 0xff);
+ if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+ ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
+ }
+ MFREE(bcmsdh->osh, tmp_buf, length);
+ }
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint32 offset)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cisd);
+
+ status = sdioh_cisaddr_read(bcmsdh->sdioh, func, cisd, offset);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+ int err = 0;
+ uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bar0 != bcmsdh->sbwad || force_set) {
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+ if (!err)
+ bcmsdh->sbwad = bar0;
+ else
+ /* invalidate cached window var */
+ bcmsdh->sbwad = 0;
+
+#ifdef BCMDBG
+ if (err)
+ BCMSDH_ERROR(("%s: error setting address window %08x\n",
+ __FUNCTION__, address));
+#endif /* BCMDBG */
+ }
+
+ return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uintptr addr, uint size)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 word = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, (unsigned int)addr));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc)) {
+ bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock
+ return 0xFFFFFFFF;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+ /* if ok, return appropriately masked word */
+ /* XXX Masking was put in for NDIS port, remove if not needed */
+ if (SDIOH_API_SUCCESS(status)) {
+ switch (size) {
+ case sizeof(uint8):
+ return (word & 0xff);
+ case sizeof(uint16):
+ return (word & 0xffff);
+ case sizeof(uint32):
+ return word;
+ default:
+ bcmsdh->regfail = TRUE;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ BCMSDH_ERROR(("%s: error reading addr 0x%x size %d\n",
+ __FUNCTION__, (unsigned int)addr, size));
+ return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ int err = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ __FUNCTION__, (unsigned int)addr, size*8, data));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc))) {
+ bcmsdh->regfail = TRUE; // terence 20130621:
+ return err;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ if (SDIOH_API_SUCCESS(status))
+ return 0;
+
+ BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ __FUNCTION__, data, (unsigned int)addr, size));
+ return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+ return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+ ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+#ifdef LINUX
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_waitlockfree(bcmsdh->sdioh);
+#else
+ return 0;
+#endif
+}
+
+int
+bcmsdh_query_device(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+#if defined(BCMDONGLEHOST)
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+#else
+ uint8 *fn0cis[1];
+ int err;
+ char *vars;
+ uint varsz;
+ osl_t *osh = bcmsdh->osh;
+
+ bcmsdh->vendevid = ~(0);
+
+ if (!(fn0cis[0] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ BCMSDH_ERROR(("%s: CIS malloc failed\n", __FUNCTION__));
+ return (bcmsdh->vendevid);
+ }
+
+ bzero(fn0cis[0], SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, 0, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT))) {
+ BCMSDH_ERROR(("%s: CIS read err %d, report unknown BRCM device\n",
+ __FUNCTION__, err));
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+ MFREE(osh, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT);
+ return (bcmsdh->vendevid);
+ }
+
+ if (!err) {
+ if ((err = srom_parsecis(NULL, osh, fn0cis, 1, &vars, &varsz))) {
+ BCMSDH_ERROR(("%s: Error parsing CIS = %d\n", __FUNCTION__, err));
+ } else {
+ bcmsdh->vendevid = (getintvar(vars, "vendid") << 16) |
+ getintvar(vars, "devid");
+ MFREE(osh, vars, varsz);
+ }
+ }
+
+ MFREE(osh, fn0cis[0], SBSDIO_CIS_SIZE_LIMIT);
+#endif /* BCMDONGLEHOST */
+ return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+/* XXX For use by NDIS port, remove if not needed. */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+ ASSERT(sdh);
+ return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+#ifdef BCMSPI
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+ return sdioh_get_dstatus(sd);
+#else
+ return 0;
+#endif /* BCMSPI */
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (bcmsdh->sbwad);
+}
+
+/* example usage: if force is TRUE, forces the bcmsdhsdio_set_sbaddr_window to
+ * calculate sbwad always instead of caching.
+ */
+void
+bcmsdh_force_sbwad_calc(void *sdh, bool force)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+ bcmsdh->force_sbwad_calc = force;
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+#ifdef BCMSPI
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+ sdioh_chipinfo(sd, chip, chiprev);
+#else
+ return;
+#endif /* BCMSPI */
+}
+
+#ifdef BCMSPI
+void
+bcmsdh_dwordmode(void *sdh, bool set)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+ sdioh_dwordmode(sd, set);
+ return;
+}
+#endif /* BCMSPI */
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_sleep(sd, enab);
+#else
+ return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioout(sd, gpio, enab);
+}
+
+uint
+bcmsdh_set_mode(void *sdh, uint mode)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return (sdioh_set_mode(bcmsdh->sdioh, mode));
+}
+
+#ifdef PKT_STATICS
+uint32
+bcmsdh_get_spend_time(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return (sdioh_get_spend_time(bcmsdh->sdioh));
+}
+#endif
diff --git a/bcmdhd.101.10.361.x/bcmsdh_linux.c b/bcmdhd.101.10.361.x/bcmsdh_linux.c
new file mode 100755
index 0000000..d297118
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh_linux.c
@@ -0,0 +1,594 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+#include <dhd_linux.h>
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+ DHD_INTR_INVALID = 0,
+ DHD_INTR_INBAND,
+ DHD_INTR_HWOOB,
+ DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+ DHD_HOST_INTR_TYPE intr_type;
+ int oob_irq_num; /* valid when hardware or software oob in use */
+ unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
+ bool oob_irq_registered;
+ bool oob_irq_enabled;
+ bool oob_irq_wake_enabled;
+ spinlock_t oob_irq_spinlock;
+ bcmsdh_cb_fn_t oob_irq_handler;
+ void *oob_irq_handler_context;
+ void *context; /* context returned from upper layer */
+ void *sdioh; /* handle to lower layer (sdioh) */
+ void *dev; /* handle to the underlying device */
+ bool dev_wake_enabled;
+} bcmsdh_os_info_t;
+
+/* debugging macros */
+#ifdef BCMDBG_ERR
+#define SDLX_ERR(x) printf x
+#define SDLX_MSG(x) printf x
+#else
+#define SDLX_ERR(x) printf x
+#define SDLX_MSG(x) printf x
+#endif /* BCMDBG_ERR */
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+ /* Add other vendors and devices as required */
+#ifdef BCMINTERNAL
+#ifdef BCMSDIOH_BCM
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ if (device == BCM_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ if (device == BCM4710_DEVICE_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* For now still accept the old devid */
+ if (device == 0x4380 && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+#endif /* BCMSDIOH_BCM */
+#endif /* BCMINTERNAL */
+
+#ifdef BCMSDIOH_STD
+ /* Check for Arasan host controller */
+ if (vendor == VENDOR_SI_IMAGE) {
+ return (TRUE);
+ }
+ /* Check for BRCM 27XX Standard host controller */
+ if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for BRCM Standard host controller */
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for TI PCIxx21 Standard host controller */
+ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ /* Ricoh R5C822 Standard SDIO Host */
+ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+ return (TRUE);
+ }
+ /* JMicron Standard SDIO Host */
+ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+ return (TRUE);
+ }
+
+#ifdef BCMINTERNAL
+ /* Check for Jinvani (C-Guys) host controller */
+ if (device == JINVANI_SDIOH_ID && vendor == VENDOR_JINVANI) {
+ return (TRUE);
+ }
+#endif /* BCMINTERNAL */
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+ /* This is the PciSpiHost. */
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found PCI SPI Host Controller\n");
+ return (TRUE);
+ }
+
+#ifdef BCMINTERNAL
+ /* This is the SPI Host for QT. */
+ if (device == BCM_SPIH_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found SPI Host Controller\n");
+ return (TRUE);
+ }
+#endif /* BCMINTERNAL */
+#endif /* BCMSDIOH_SPI */
+
+#ifdef BCMINTERNAL
+ /*
+ * XXX - This is a hack to get the GPL SdioLinux driver to load on Arasan/x86
+ * This is accomplished by installing a PciSpiHost into the system alongside the
+ * Arasan controller. The PciSpiHost is just used to get BCMSDH loaded.
+ */
+#ifdef BCMSDH_FD
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found SdioLinux Host Controller\n");
+ return (TRUE);
+ }
+#endif /* BCMSDH_FD */
+#endif /* BCMINTERNAL */
+ return (FALSE);
+}
+
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num)
+{
+ ulong regs;
+ bcmsdh_info_t *bcmsdh;
+ uint32 vendevid;
+ bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
+
+ bcmsdh = bcmsdh_attach(osh, sdioh, &regs);
+ if (bcmsdh == NULL) {
+ SDLX_ERR(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+ if (bcmsdh_osinfo == NULL) {
+ SDLX_ERR(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+ goto err;
+ }
+ bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+ bcmsdh->os_cxt = bcmsdh_osinfo;
+ bcmsdh_osinfo->sdioh = sdioh;
+ bcmsdh_osinfo->dev = dev;
+ osl_set_bus_handle(osh, bcmsdh);
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dev && device_init_wakeup(dev, true) == 0)
+ bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+ /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+ bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
+ &bcmsdh_osinfo->oob_irq_flags);
+ if (bcmsdh_osinfo->oob_irq_num < 0) {
+ SDLX_ERR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+ goto err;
+ }
+#endif /* defined(BCMLXSDMMC) */
+
+ /* Read the vendor/device ID from the CIS */
+ vendevid = bcmsdh_query_device(bcmsdh);
+ /* try to attach to the target device */
+ bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+ slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+ if (bcmsdh_osinfo->context == NULL) {
+ SDLX_ERR(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ return bcmsdh;
+
+ /* error handling */
+err:
+ if (bcmsdh != NULL)
+ bcmsdh_detach(osh, bcmsdh);
+ if (bcmsdh_osinfo != NULL)
+ MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+ return NULL;
+}
+
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (bcmsdh_osinfo->dev)
+ device_init_wakeup(bcmsdh_osinfo->dev, false);
+ bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+ drvinfo.remove(bcmsdh_osinfo->context);
+ MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+ bcmsdh_detach(bcmsdh->osh, bcmsdh);
+
+ return 0;
+}
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh)
+{
+ return bcmsdh->total_wake_count;
+}
+
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag)
+{
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ unsigned long flags;
+#endif
+ int ret = 0;
+
+#if defined(OOB_INTR_ONLY)
+ spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+
+ ret = bcmsdh->pkt_wake;
+ bcmsdh->total_wake_count += flag;
+ bcmsdh->pkt_wake = flag;
+
+ spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+#endif
+ return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+ return -EBUSY;
+ return 0;
+}
+
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ if (drvinfo.resume)
+ return drvinfo.resume(bcmsdh_osinfo->context);
+ return 0;
+}
+
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+ return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+ sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+ int error = 0;
+
+ drvinfo = *driver;
+ SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+ error = bcmsdh_register_client_driver();
+ if (error)
+ SDLX_ERR(("%s: failed %d\n", __FUNCTION__, error));
+
+ return error;
+}
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (bcmsdh_pci_driver.node.next == NULL)
+ return;
+#endif
+
+ bcmsdh_unregister_client_driver();
+}
+
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ return bcmsdh_osinfo->dev_wake_enabled;
+}
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+int bcmsdh_get_oob_intr_num(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ return bcmsdh_osinfo->oob_irq_num;
+}
+
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
+{
+ unsigned long flags;
+ bcmsdh_os_info_t *bcmsdh_osinfo;
+
+ if (!bcmsdh)
+ return;
+
+ bcmsdh_osinfo = bcmsdh->os_cxt;
+ spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+ if (bcmsdh_osinfo->oob_irq_enabled != enable) {
+ if (enable)
+ enable_irq(bcmsdh_osinfo->oob_irq_num);
+ else
+ disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+ bcmsdh_osinfo->oob_irq_enabled = enable;
+ }
+ spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+}
+
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+extern volatile bool dhd_mmc_suspend;
+extern volatile bool dhd_mmc_wake;
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#ifndef BCMSPI_ANDROID
+ bcmsdh_oob_intr_set(bcmsdh, FALSE);
+#endif /* !BCMSPI_ANDROID */
+ bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
+
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+ if (dhd_mmc_suspend) {
+ dhd_mmc_wake = TRUE;
+ }
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+
+ return IRQ_HANDLED;
+}
+
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+ void* oob_irq_handler_context)
+{
+ int err = 0;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ if (bcmsdh_osinfo->oob_irq_registered) {
+ SDLX_ERR(("%s: irq is already registered\n", __FUNCTION__));
+ return -EBUSY;
+ }
+#ifdef HW_OOB
+ SDLX_MSG(("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+#else
+ SDLX_MSG(("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+#endif
+ bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+ bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+ bcmsdh_osinfo->oob_irq_enabled = TRUE;
+ bcmsdh_osinfo->oob_irq_registered = TRUE;
+#if defined(CONFIG_ARCH_ODIN)
+ err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+ err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+ if (err) {
+ SDLX_ERR(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+ bcmsdh_osinfo->oob_irq_enabled = FALSE;
+ bcmsdh_osinfo->oob_irq_registered = FALSE;
+ return err;
+ }
+
+#if defined(DISABLE_WOWLAN)
+ SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__));
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#else
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ if (device_may_wakeup(bcmsdh_osinfo->dev)) {
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (err)
+ SDLX_ERR(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
+ else
+ bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ }
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+#endif
+
+ return 0;
+}
+
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
+{
+ int err = 0;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+ if (!bcmsdh_osinfo->oob_irq_registered) {
+ SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+ return;
+ }
+ if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ if (device_may_wakeup(bcmsdh_osinfo->dev)) {
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (!err)
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ }
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ }
+ if (bcmsdh_osinfo->oob_irq_enabled) {
+ disable_irq(bcmsdh_osinfo->oob_irq_num);
+ bcmsdh_osinfo->oob_irq_enabled = FALSE;
+ }
+ free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+ bcmsdh_osinfo->oob_irq_registered = FALSE;
+}
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+/* Module parameters specific to each host-controller driver */
+/* XXX Need to move these to where they really belong! */
+
+extern uint sd_msglevel; /* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor; /* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok; /* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+extern uint sd_f1_blocksize;
+module_param(sd_f1_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+extern uint sd_tuning_period;
+module_param(sd_tuning_period, uint, 0);
+extern int sd_delay_value;
+module_param(sd_delay_value, uint, 0);
+
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+#if defined (BT_OVER_SDIO)
+EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
+#endif /* defined (BT_OVER_SDIO) */
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c b/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c
new file mode 100755
index 0000000..596c02f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh_sdmmc.c
@@ -0,0 +1,2004 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+static inline void
+mmc_host_clk_hold(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+static inline void
+mmc_host_clk_release(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+static inline unsigned int
+mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) */
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET)
+extern int mmc_sw_reset(struct mmc_host *host);
+#else
+extern int sdio_reset_comm(struct mmc_card *card);
+#endif
+#endif
+#ifdef GLOBAL_SDMMC_INSTANCE
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
+
+#define DEFAULT_SDIO_F2_BLKSIZE 512
+#ifndef CUSTOM_SDIO_F2_BLKSIZE
+#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
+#endif
+
+#define DEFAULT_SDIO_F1_BLKSIZE 64
+#ifndef CUSTOM_SDIO_F1_BLKSIZE
+#define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE
+#endif
+
+#define MAX_IO_RW_EXTENDED_BLK 511
+
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
+
+#if defined (BT_OVER_SDIO)
+uint sd_f3_blocksize = 64;
+#endif /* defined (BT_OVER_SDIO) */
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
+uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_use_dma = TRUE;
+
+#ifndef CUSTOM_RXCHAIN
+#define CUSTOM_RXCHAIN 0
+#endif
+
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK 0x03
+#define MMC_SDIO_ABORT_RETRY_LIMIT 5
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+#ifdef NOTYET
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data);
+#endif /* NOTYET */
+
+#if defined (BT_OVER_SDIO)
+extern
+void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
+{
+ sd->func[3] = func;
+ sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
+uint sdmmc_get_clock_rate(sdioh_info_t *sd);
+void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+ int err_ret;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Enable Function 1 */
+ sdio_claim_host(sd->func[1]);
+ err_ret = sdio_enable_func(sd->func[1]);
+ sdio_release_host(sd->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret));
+ }
+
+ return FALSE;
+}
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, struct sdio_func *func)
+{
+ sdioh_info_t *sd = NULL;
+ int err_ret;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (func == NULL) {
+ sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ sd->fake_func0.num = 0;
+ sd->fake_func0.card = func->card;
+ sd->func[0] = &sd->fake_func0;
+#ifdef GLOBAL_SDMMC_INSTANCE
+ if (func->num == 2)
+ sd->func[1] = gInstance->func[1];
+#else
+ sd->func[1] = func->card->sdio_func[0];
+#endif
+ sd->func[2] = func->card->sdio_func[1];
+#ifdef GLOBAL_SDMMC_INSTANCE
+ sd->func[func->num] = func;
+#endif
+
+#if defined (BT_OVER_SDIO)
+ sd->func[3] = NULL;
+#endif /* defined (BT_OVER_SDIO) */
+
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+ sd->use_rxchain = CUSTOM_RXCHAIN;
+ if (sd->func[1] == NULL || sd->func[2] == NULL) {
+ sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+ goto fail;
+ }
+ sdio_set_drvdata(sd->func[1], sd);
+
+ sdio_claim_host(sd->func[1]);
+ sd->client_block_size[1] = sd_f1_blocksize;
+ err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
+ sdio_release_host(sd->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+ goto fail;
+ }
+
+ sdio_claim_host(sd->func[2]);
+ if ((func->device == BCM43362_CHIP_ID || func->device == BCM4330_CHIP_ID) &&
+ sd_f2_blocksize > 128)
+ sd_f2_blocksize = 128;
+ sd->client_block_size[2] = sd_f2_blocksize;
+ printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
+ err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+ sdio_release_host(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+ sd_f2_blocksize, err_ret));
+ goto fail;
+ }
+
+ sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
+ printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate);
+ sdioh_sdmmc_card_enablefuncs(sd);
+#if !defined(OOB_INTR_ONLY)
+ mutex_init(&sd->claim_host_mutex); // terence 20140926: fix for claim host issue
+#endif
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+
+fail:
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+
+ /* Disable Function 2 */
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ sdio_disable_func(sd->func[2]);
+ sdio_release_host(sd->func[2]);
+ }
+
+ /* Disable Function 1 */
+ if (sd->func[1]) {
+ sdio_claim_host(sd->func[1]);
+ sdio_disable_func(sd->func[1]);
+ sdio_release_host(sd->func[1]);
+ }
+
+ sd->func[1] = NULL;
+ sd->func[2] = NULL;
+
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(sdioh_info_t *sd)
+{
+ uint8 reg;
+ int err;
+
+ if (sd->func[0] == NULL) {
+ sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdio_claim_host(sd->func[0]);
+ reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(sd->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+ /* Enable F1 and F2 interrupts, clear master enable */
+ reg &= ~INTR_CTL_MASTER_EN;
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#if defined (BT_OVER_SDIO)
+ reg |= (INTR_CTL_FUNC3_EN);
+#endif /* defined (BT_OVER_SDIO) */
+ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(sd->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(sdioh_info_t *sd)
+{
+ uint8 reg;
+ int err;
+
+ if (sd->func[0] == NULL) {
+ sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdio_claim_host(sd->func[0]);
+ reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(sd->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#if defined(BT_OVER_SDIO)
+ reg &= ~INTR_CTL_FUNC3_EN;
+#endif
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(sd->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ if (fn == NULL) {
+ sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ /* register and unmask irq */
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ sdio_claim_irq(sd->func[2], IRQHandlerF2);
+ sdio_release_host(sd->func[2]);
+ }
+
+ if (sd->func[1]) {
+ sdio_claim_host(sd->func[1]);
+ sdio_claim_irq(sd->func[1], IRQHandler);
+ sdio_release_host(sd->func[1]);
+ }
+#elif defined(HW_OOB)
+ sdioh_enable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+ if (sd->func[1]) {
+ /* register and unmask irq */
+ sdio_claim_host(sd->func[1]);
+ sdio_release_irq(sd->func[1]);
+ sdio_release_host(sd->func[1]);
+ }
+
+ if (sd->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(sd->func[2]);
+ sdio_release_irq(sd->func[2]);
+ /* Release host controller F2 */
+ sdio_release_host(sd->func[2]);
+ }
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+ if (dhd_download_fw_on_driverload)
+ sdioh_disable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
+#ifdef BCMINTERNAL
+ {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+#endif /* BCMINTERNAL */
+ {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 },
+ {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 },
+#ifdef BCMDBG
+ {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 },
+#endif
+ {NULL, 0, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, uint len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ uint val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* XXX Copied from dhd, copied from wl; certainly overkill here? */
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+ BCM_REFERENCE(bool_val);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ si->client_block_size[func] = blksize;
+
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ if (si->func[func] == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ bcmerror = BCME_NORESOURCE;
+ break;
+ }
+ sdio_claim_host(si->func[func]);
+ bcmerror = sdio_set_block_size(si->func[func], blksize);
+ if (bcmerror)
+ sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
+ __FUNCTION__, func, blksize, bcmerror));
+ sdio_release_host(si->func[func]);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ break;
+ }
+
+ case IOV_GVAL(IOV_RXCHAIN):
+ int_val = (int32)si->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ /* set the clock to divisor, if value is non-zero & power of 2 */
+ if (int_val && !(int_val & (int_val - 1))) {
+ sd_divisor = int_val;
+ sdmmc_set_clock_divisor(si, sd_divisor);
+ } else {
+ DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
+ __FUNCTION__));
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)0;
+ bcopy(&int_val, arg, val_size);
+ break;
+#ifdef BCMINTERNAL
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+ else if (sd_ptr->offset & 2)
+ int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+ else
+ int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = 0;
+
+ if ((uint)sd_ptr->func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if ((uint)sd_ptr->func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+#endif /* BCMINTERNAL */
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ /* XXX Remove protective lock after clients all clean... */
+ return bcmerror;
+}
+
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
+/*
+ * XXX dhd -i eth0 sd_devreg 0 0xf2 0x3
+ */
+
+#ifdef CUSTOMER_HW_AMLOGIC
+#include <linux/amlogic/aml_gpio_consumer.h>
+extern int wifi_irq_trigger_level(void);
+#endif
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+ SDIOH_API_RC status;
+ uint8 data;
+
+ if (enable) {
+ if (wifi_irq_trigger_level() == GPIO_IRQ_LOW)
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+ else
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+ }
+ else
+ data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+ return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset)
+{
+ uint32 foo;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cisd = (uint8)(foo & 0xff);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int err_ret = 0;
+#if defined(MMC_SDIO_ABORT)
+ int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+ struct osl_timespec now, before;
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ if(rw) { /* CMD52 Write */
+ if (func == 0) {
+ /* Can only directly write to some F0 registers. Handle F2 enable
+ * as a special case.
+ */
+ if (regaddr == SDIOD_CCCR_IOEN) {
+#if defined (BT_OVER_SDIO)
+ do {
+ if (sd->func[3]) {
+ sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
+
+ if (*byte & SDIO_FUNC_ENABLE_3) {
+ sdio_claim_host(sd->func[3]);
+
+ /* Set Function 3 Block Size */
+ err_ret = sdio_set_block_size(sd->func[3],
+ sd_f3_blocksize);
+ if (err_ret) {
+ sd_err(("F3 blocksize set err%d\n",
+ err_ret));
+ }
+
+ /* Enable Function 3 */
+ sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
+ sd->func[3]));
+ err_ret = sdio_enable_func(sd->func[3]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
+ err_ret));
+ }
+
+ sdio_release_host(sd->func[3]);
+
+ break;
+ } else if (*byte & SDIO_FUNC_DISABLE_3) {
+ sdio_claim_host(sd->func[3]);
+
+ /* Disable Function 3 */
+ sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
+ sd->func[3]));
+ err_ret = sdio_disable_func(sd->func[3]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
+ err_ret));
+ }
+ sdio_release_host(sd->func[3]);
+ sd->func[3] = NULL;
+
+ break;
+ }
+ }
+#endif /* defined (BT_OVER_SDIO) */
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
+ err_ret));
+ }
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
+ err_ret));
+ }
+ }
+ sdio_release_host(sd->func[2]);
+ }
+#if defined (BT_OVER_SDIO)
+ } while (0);
+#endif /* defined (BT_OVER_SDIO) */
+ }
+#if defined(MMC_SDIO_ABORT)
+ /* to allow abort command through F1 */
+ else if (regaddr == SDIOD_CCCR_IOABORT) {
+ /* XXX Because of SDIO3.0 host issue on Manta,
+ * sometimes the abort fails.
+ * Retrying again will fix this issue.
+ */
+ while (sdio_abort_retry--) {
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with
+ * another api depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ if (!err_ret)
+ break;
+ }
+ }
+#endif /* MMC_SDIO_ABORT */
+#if defined(SDIO_ISR_THREAD)
+ else if (regaddr == SDIOD_CCCR_INTR_EXTN) {
+ while (sdio_abort_retry--) {
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with
+ * another api depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ if (!err_ret)
+ break;
+ }
+ }
+#endif
+ else if (regaddr < 0xF0) {
+ sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+ } else {
+ /* Claim host controller, perform F0 write, and release */
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ sdio_f0_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ }
+ } else {
+ /* Claim host controller, perform Fn write, and release */
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ }
+ } else { /* CMD52 Read */
+ /* Claim host controller, perform Fn read, and release */
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ if (func == 0) {
+ *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
+ } else {
+ *byte = sdio_readb(sd->func[func], regaddr, &err_ret);
+ }
+ sdio_release_host(sd->func[func]);
+ }
+ }
+
+ if (err_ret) {
+ if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ)
+ || (err_ret == -EIO))) {
+ /* XXX: Read/Write to SBSDIO_FUNC1_SLEEPCSR could return -110(timeout)
+ * or -84(CRC) error in case the host tries to wake the device up.
+ * Skip error log message if err code is -110 or -84 when accessing
+ * to SBSDIO_FUNC1_SLEEPCSR to avoid QA misunderstand and DHD shoul
+ * print error log message if retry count over the MAX_KSO_ATTEMPTS.
+ */
+ } else {
+ sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+ }
+ }
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d len=1 cost = %3dms %3dus\n", __FUNCTION__,
+ rw, diff_us/1000, diff_us%1000));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+uint
+sdioh_set_mode(sdioh_info_t *sd, uint mode)
+{
+ if (mode == SDPCM_TXGLOM_CPY)
+ sd->txglom_mode = mode;
+ else if (mode == SDPCM_TXGLOM_MDESC)
+ sd->txglom_mode = mode;
+
+ return (sd->txglom_mode);
+}
+
+#ifdef PKT_STATICS
+uint32
+sdioh_get_spend_time(sdioh_info_t *sd)
+{
+ return (sd->sdio_spent_time_us);
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int err_ret = SDIOH_API_RC_FAIL;
+ int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
+#if defined(MMC_SDIO_ABORT)
+ int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+ struct osl_timespec now, before;
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ if (func == 0) {
+ sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Claim host controller */
+ sdio_claim_host(sd->func[func]);
+
+ if(rw) { /* CMD52 Write */
+ if (nbytes == 4) {
+ sdio_writel(sd->func[func], *word, addr, &err_ret);
+ } else if (nbytes == 2) {
+ sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ } else { /* CMD52 Read */
+ if (nbytes == 4) {
+ *word = sdio_readl(sd->func[func], addr, &err_ret);
+ } else if (nbytes == 2) {
+ *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ }
+
+ /* Release host controller */
+ sdio_release_host(sd->func[func]);
+
+ if (err_ret) {
+#if defined(MMC_SDIO_ABORT)
+ /* Any error on CMD53 transaction should abort that function using function 0. */
+ while (sdio_abort_retry--) {
+ if (sd->func[0]) {
+ sdio_claim_host(sd->func[0]);
+ /*
+ * this sdio_f0_writeb() can be replaced with another api
+ * depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[0],
+ func, SDIOD_CCCR_IOABORT, &err_ret2);
+ sdio_release_host(sd->func[0]);
+ }
+ if (!err_ret2)
+ break;
+ }
+ if (err_ret)
+#endif /* MMC_SDIO_ABORT */
+ {
+ sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
+ rw ? "Write" : "Read", func, addr, *word, err_ret));
+ }
+ }
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__,
+ rw, nbytes, diff_us/1000, diff_us%1000));
+ }
+
+ return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+#ifdef BCMSDIOH_TXGLOM
+static SDIOH_API_RC
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ int err_ret = 0;
+ void *pnext;
+ uint ttl_len, pkt_offset;
+ uint blk_num;
+ uint blk_size;
+ uint max_blk_count;
+ uint max_req_size;
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+ uint32 sg_count;
+ struct sdio_func *sdio_func = sd->func[func];
+ struct mmc_host *host = sdio_func->card->host;
+ uint8 *localbuf = NULL;
+ uint local_plen = 0;
+ uint pkt_len = 0;
+ struct osl_timespec now, before;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+#ifndef PKT_STATICS
+ if (sd_msglevel & SDH_COST_VAL)
+#endif
+ osl_do_gettimeofday(&before);
+
+ blk_size = sd->client_block_size[func];
+ max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+ max_req_size = min(max_blk_count * blk_size, host->max_req_size);
+
+ pkt_offset = 0;
+ pnext = pkt;
+
+ ttl_len = 0;
+ sg_count = 0;
+ if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+ while (pnext != NULL) {
+ ttl_len = 0;
+ sg_count = 0;
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+ sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
+
+ /* Set up scatter-gather DMA descriptors. this loop is to find out the max
+ * data we can transfer with one command 53. blocks per command is limited by
+ * host max_req_size and 9-bit max block number. when the total length of this
+ * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+ * commands (each transfer is still block aligned)
+ */
+ while (pnext != NULL && ttl_len < max_req_size) {
+ int pkt_len;
+ int sg_data_size;
+ uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
+
+ ASSERT(pdata != NULL);
+ pkt_len = PKTLEN(sd->osh, pnext);
+ sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+ /* sg_count is unlikely larger than the array size, and this is
+ * NOT something we can handle here, but in case it happens, PLEASE put
+ * a restriction on max tx/glom count (based on host->max_segs).
+ */
+ if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+ sd_err(("%s: sg list entries(%u) exceed limit(%zu),"
+ " sd blk_size=%u\n",
+ __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size));
+ return (SDIOH_API_RC_FAIL);
+ }
+ pdata += pkt_offset;
+
+ sg_data_size = pkt_len - pkt_offset;
+ if (sg_data_size > max_req_size - ttl_len)
+ sg_data_size = max_req_size - ttl_len;
+ /* some platforms put a restriction on the data size of each scatter-gather
+ * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+ * max_seg_size
+ */
+ if (sg_data_size > host->max_seg_size) {
+ sg_data_size = host->max_seg_size;
+ }
+ sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+ ttl_len += sg_data_size;
+ pkt_offset += sg_data_size;
+ if (pkt_offset == pkt_len) {
+ pnext = PKTNEXT(sd->osh, pnext);
+ pkt_offset = 0;
+ }
+ }
+
+ if (ttl_len % blk_size != 0) {
+ sd_err(("%s, data length %d not aligned to block size %d\n",
+ __FUNCTION__, ttl_len, blk_size));
+ return SDIOH_API_RC_FAIL;
+ }
+ blk_num = ttl_len / blk_size;
+ mmc_dat.sg = sd->sg_list;
+ mmc_dat.sg_len = sg_count;
+ mmc_dat.blksz = blk_size;
+ mmc_dat.blocks = blk_num;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+ mmc_cmd.arg = write ? 1<<31 : 0;
+ mmc_cmd.arg |= (func & 0x7) << 28;
+ mmc_cmd.arg |= 1<<27;
+ mmc_cmd.arg |= fifo ? 0 : 1<<26;
+ mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+ mmc_cmd.arg |= blk_num & 0x1FF;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+ if (!fifo)
+ addr += ttl_len;
+
+ sdio_claim_host(sdio_func);
+ mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+ mmc_wait_for_req(host, &mmc_req);
+ sdio_release_host(sdio_func);
+
+ err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+ if (0 != err_ret) {
+ sd_err(("%s:CMD53 %s failed with code %d\n",
+ __FUNCTION__, write ? "write" : "read", err_ret));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ ttl_len += PKTLEN(sd->osh, pnext);
+ }
+ /* Claim host controller */
+ sdio_claim_host(sd->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
+ pkt_len = PKTLEN(sd->osh, pnext);
+
+ if (!localbuf) {
+ localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
+ if (localbuf == NULL) {
+ sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
+ __FUNCTION__, (write) ? "TX" : "RX"));
+ goto txglomfail;
+ }
+ }
+
+ bcopy(buf, (localbuf + local_plen), pkt_len);
+ local_plen += pkt_len;
+ if (PKTNEXT(sd->osh, pnext))
+ continue;
+
+ buf = localbuf;
+ pkt_len = local_plen;
+txglomfail:
+ /* Align Patch */
+ if (!write || pkt_len < 32)
+ pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+ else if (pkt_len % blk_size)
+ pkt_len += blk_size - (pkt_len % blk_size);
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
+ else if (fifo)
+ err_ret = sdio_readsb(sd->func[func], buf, addr, pkt_len);
+ else
+ err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, pkt_len);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, sg_count, addr, pkt_len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, sg_count, addr, pkt_len));
+
+ if (!fifo)
+ addr += pkt_len;
+ sg_count ++;
+ }
+ sdio_release_host(sd->func[func]);
+ } else {
+ sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (localbuf)
+ MFREE(sd->osh, localbuf, ttl_len);
+
+#ifndef PKT_STATICS
+ if (sd_msglevel & SDH_COST_VAL)
+#endif
+ {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, ttl_len=%4d cost = %3dms %3dus\n", __FUNCTION__,
+ write, ttl_len, diff_us/1000, diff_us%1000));
+#ifdef PKT_STATICS
+ if (write && (func == 2))
+ sd->sdio_spent_time_us = diff_us;
+#endif
+ }
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* BCMSDIOH_TXGLOM */
+
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, uint8 *buf, uint len)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ int err_ret = 0;
+ struct osl_timespec now, before;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ ASSERT(buf);
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ /* NOTE:
+ * For all writes, each packet length is aligned to 32 (or 4)
+ * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+ * is aligned to block boundary. If you want to align each packet to
+ * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+ *
+ * For reads, the alignment is doen in sdioh_request_buffer.
+ *
+ */
+ sdio_claim_host(sd->func[func]);
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+ else if (fifo)
+ err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+ else
+ err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
+
+ sdio_release_host(sd->func[func]);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+ (write) ? "TX" : "RX", buf, addr, len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+ (write) ? "TX" : "RX", buf, addr, len));
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, len=%4d cost = %3dms %3dus\n", __FUNCTION__,
+ write, len, diff_us/1000, diff_us%1000));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC status;
+ void *tmppkt;
+ int is_vmalloc = FALSE;
+ struct osl_timespec now, before;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ if (sd_msglevel & SDH_COST_VAL)
+ osl_do_gettimeofday(&before);
+
+ if (pkt) {
+#ifdef BCMSDIOH_TXGLOM
+ /* packet chain, only used for tx/rx glom, all packets length
+ * are aligned, total length is a block multiple
+ */
+ if (PKTNEXT(sd->osh, pkt))
+ return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
+#endif /* BCMSDIOH_TXGLOM */
+ /* non-glom mode, ignore the buffer parameter and use the packet pointer
+ * (this shouldn't happen)
+ */
+ buffer = PKTDATA(sd->osh, pkt);
+ buf_len = PKTLEN(sd->osh, pkt);
+ }
+
+ ASSERT(buffer);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24)
+ is_vmalloc = is_vmalloc_addr(buffer);
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) */
+
+ /* buffer and length are aligned, use it directly so we can avoid memory copy */
+ if ((((ulong)buffer & DMA_ALIGN_MASK) == 0) && ((buf_len & DMA_ALIGN_MASK) == 0) &&
+ (!is_vmalloc)) {
+ return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
+ }
+
+ if (is_vmalloc) {
+ sd_trace(("%s: Need to memory copy due to virtual memory address.\n",
+ __FUNCTION__));
+ }
+
+ sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+ __FUNCTION__, write, buffer, buf_len));
+
+ /* otherwise, a memory copy is needed as the input buffer is not aligned */
+ tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+ if (tmppkt == NULL) {
+ sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (write)
+ bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+ status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+ PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+ if (!write)
+ bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+ PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+ if (sd_msglevel & SDH_COST_VAL) {
+ uint32 diff_us;
+ osl_do_gettimeofday(&now);
+ diff_us = osl_do_gettimediff(&now, &before);
+ sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__,
+ write, buf_len, diff_us/1000, diff_us%1000));
+ }
+
+ return status;
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+ char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ /* XXX Standard Linux SDIO Stack cannot perform an abort. */
+#if defined(MMC_SDIO_ABORT)
+ /* issue abort cmd52 command through F1 */
+ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp = 0;
+
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ *data = temp;
+ *data &= 0xff;
+ sd_data(("%s: byte read data=0x%02x\n",
+ __FUNCTION__, *data));
+ } else {
+ if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
+ return BCME_SDIO_ERROR;
+ }
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ sd_data(("%s: word read data=0x%08x\n",
+ __FUNCTION__, *data));
+ }
+
+ return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+void sdio_claim_host_lock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (sd)
+ mutex_lock(&sd->claim_host_mutex);
+#endif
+}
+
+void sdio_claim_host_unlock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (sd)
+ mutex_unlock(&sd->claim_host_mutex);
+#endif
+}
+
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd = sdio_get_drvdata(func);
+
+ ASSERT(sd != NULL);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (mutex_is_locked(&sd->claim_host_mutex)) {
+ printf("%s: muxtex is locked and return\n", __FUNCTION__);
+ return;
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+
+ sdio_claim_host_lock_local(sd);
+ sdio_release_host(sd->func[0]);
+
+ if (sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else { /* XXX - Do not remove these sd_err messages. Need to figure
+ out how to keep interrupts disabled until DHD registers
+ a handler.
+ */
+ sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+
+ sdio_claim_host(sd->func[0]);
+ sdio_claim_host_unlock_local(sd);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp;
+
+ temp = data & 0xff;
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ sd_data(("%s: byte write data=0x%02x\n",
+ __FUNCTION__, data));
+ } else {
+ if (regsize == 2)
+ data &= 0xffff;
+
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+ sd_data(("%s: word write data=0x%08x\n",
+ __FUNCTION__, data));
+ }
+
+ return SUCCESS;
+}
+#endif /* NOTUSED */
+
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
+static int sdio_sw_reset(sdioh_info_t *sd)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET)
+ struct mmc_host *host = sd->func[0]->card->host;
+#endif
+ int err = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) && defined(MMC_SW_RESET)
+ printf("%s: Enter\n", __FUNCTION__);
+ sdio_claim_host(sd->func[0]);
+ err = mmc_sw_reset(host);
+ sdio_release_host(sd->func[0]);
+#else
+ err = sdio_reset_comm(sd->func[0]->card);
+#endif
+
+ if (err)
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, err));
+
+ return err;
+}
+#endif
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+#if defined(OEM_ANDROID)
+ int ret;
+
+ if (!sd) {
+ sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
+ return (0);
+ }
+
+ /* Need to do this stages as we can't enable the interrupt till
+ downloading of the firmware is complete, other wise polling
+ sdio access will come in way
+ */
+ if (sd->func[0]) {
+ if (stage == 0) {
+ /* Since the power to the chip is killed, we will have
+ re enumerate the device again. Set the block size
+ and enable the fucntion 1 for in preparation for
+ downloading the code
+ */
+ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+ 2.6.27. The implementation prior to that is buggy, and needs broadcom's
+ patch for it
+ */
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
+ if ((ret = sdio_sw_reset(sd))) {
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+ return ret;
+ } else
+#endif
+ {
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ if (sd->func[1]) {
+ /* Claim host controller */
+ sdio_claim_host(sd->func[1]);
+
+ sd->client_block_size[1] = 64;
+ ret = sdio_set_block_size(sd->func[1], 64);
+ if (ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+ "blocksize(%d)\n", ret));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(sd->func[1]);
+ }
+
+ if (sd->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(sd->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
+ ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+ if (ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+ "blocksize to %d(%d)\n", sd_f2_blocksize, ret));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(sd->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+ }
+ } else {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(sd->func[0]);
+ if (sd->func[2])
+ sdio_claim_irq(sd->func[2], IRQHandlerF2);
+ if (sd->func[1])
+ sdio_claim_irq(sd->func[1], IRQHandler);
+ sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_enable_func_intr(sd);
+#endif
+ bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+#endif /* defined(OEM_ANDROID) */
+
+ return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+#if defined(OEM_ANDROID)
+ /* MSM7201A Android sdio stack has bug with interrupt
+ So internaly within SDIO stack they are polling
+ which cause issue when device is turned off. So
+ unregister interrupt with SDIO stack to stop the
+ polling
+ */
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host_lock_local(sd);
+#endif
+ if (sd->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(sd->func[0]);
+ if (sd->func[1])
+ sdio_release_irq(sd->func[1]);
+ if (sd->func[2])
+ sdio_release_irq(sd->func[2]);
+ sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_disable_func_intr(sd);
+#endif
+ bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+#endif /* defined(OEM_ANDROID) */
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host_unlock_local(sd);
+#endif
+ return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return (1);
+}
+
+#ifdef BCMINTERNAL
+extern SDIOH_API_RC
+sdioh_test_diag(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return (0);
+}
+#endif /* BCMINTERNAL */
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+uint
+sdmmc_get_clock_rate(sdioh_info_t *sd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ struct sdio_func *sdio_func = sd->func[0];
+ struct mmc_host *host = sdio_func->card->host;
+ return mmc_host_clk_rate(host);
+#else
+ return 0;
+#endif
+}
+
+void
+sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ struct sdio_func *sdio_func = sd->func[0];
+ struct mmc_host *host = sdio_func->card->host;
+ struct mmc_ios *ios = &host->ios;
+
+ mmc_host_clk_hold(host);
+ DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
+ if (hz < host->f_min) {
+ DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
+ hz = host->f_min;
+ }
+
+ if (hz > host->f_max) {
+ DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
+ hz = host->f_max;
+ }
+ ios->clock = hz;
+ host->ops->set_ios(host, ios);
+ DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
+ mmc_host_clk_release(host);
+#else
+ return;
+#endif
+}
+
+void
+sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
+{
+ uint hz;
+ uint old_div = sdmmc_get_clock_rate(sd);
+ if (old_div == sd_div) {
+ return;
+ }
+
+ hz = sd->sd_clk_rate / sd_div;
+ sdmmc_set_clock_rate(sd, hz);
+}
diff --git a/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c b/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c
new file mode 100755
index 0000000..1dfb408
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,388 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
+#include <bcmdevs.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+extern void wl_cfg80211_set_parent_dev(void *dev);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern volatile bool dhd_mmc_suspend;
+
+static int sdioh_probe(struct sdio_func *func)
+{
+ int host_idx = func->card->host->index;
+ uint32 rca = func->card->rca;
+ wifi_adapter_info_t *adapter;
+ osl_t *osh = NULL;
+ sdioh_info_t *sdioh = NULL;
+
+ sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+ adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+ if (adapter != NULL) {
+ sd_err(("found adapter info '%s'\n", adapter->name));
+ adapter->bus_type = SDIO_BUS;
+ adapter->bus_num = host_idx;
+ adapter->slot_num = rca;
+ adapter->sdio_func = func;
+ } else
+ sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+ wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+ /* allocate SDIO Host Controller state info */
+ osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+ if (osh == NULL) {
+ sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ osl_static_mem_init(osh, adapter);
+ sdioh = sdioh_attach(osh, func);
+ if (sdioh == NULL) {
+ sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+ if (sdioh->bcmsdh == NULL) {
+ sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ sdio_set_drvdata(func, sdioh);
+ return 0;
+
+fail:
+ if (sdioh != NULL)
+ sdioh_detach(osh, sdioh);
+ if (osh != NULL)
+ osl_detach(osh);
+ return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+ sdioh_info_t *sdioh;
+ osl_t *osh;
+
+ sdioh = sdio_get_drvdata(func);
+ if (sdioh == NULL) {
+ sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+ return;
+ }
+ sd_err(("%s: Enter\n", __FUNCTION__));
+
+ osh = sdioh->osh;
+ bcmsdh_remove(sdioh->bcmsdh);
+ sdioh_detach(osh, sdioh);
+ osl_detach(osh);
+}
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+
+ if (func == NULL)
+ return -EINVAL;
+
+ sd_err(("%s: Enter num=%d\n", __FUNCTION__, func->num));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+ gInstance->func[func->num] = func;
+#endif
+
+ /* 4318 doesn't have function 2 */
+ if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+ ret = sdioh_probe(func);
+
+ return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+ if (func == NULL) {
+ sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+ return;
+ }
+
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+ sdioh_remove(func);
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+ /* XXX This should not be in the external release, as it will attach to any SDIO
+ * device, even non-WLAN devices.
+ * Need to add IDs for the FALCON-based chips and put this under BCMINTERNAL
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43752_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N2G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43013_D11N5G_ID) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ { 0, 0, 0, 0 /* end: all zeroes */
+ },
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+ int err;
+ sdioh_info_t *sdioh;
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+ mmc_pm_flag_t sdio_flags;
+
+ printf("%s Enter func->num=%d\n", __FUNCTION__, func->num);
+ if (func->num != 2)
+ return 0;
+
+ dhd_mmc_suspend = TRUE;
+ sdioh = sdio_get_drvdata(func);
+ err = bcmsdh_suspend(sdioh->bcmsdh);
+ if (err) {
+ printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err);
+ dhd_mmc_suspend = FALSE;
+ return err;
+ }
+
+ sdio_flags = sdio_get_host_pm_caps(func);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
+ return -EINVAL;
+ }
+
+ /* keep power while host suspended */
+ err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (err) {
+ sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
+ return err;
+ }
+ smp_mb();
+
+ printf("%s Exit\n", __FUNCTION__);
+ return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+ sdioh_info_t *sdioh;
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+
+ printf("%s Enter func->num=%d\n", __FUNCTION__, func->num);
+ if (func->num != 2)
+ return 0;
+
+ dhd_mmc_suspend = FALSE;
+ sdioh = sdio_get_drvdata(func);
+ bcmsdh_resume(sdioh->bcmsdh);
+
+ smp_mb();
+ printf("%s Exit\n", __FUNCTION__);
+ return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+ .suspend = bcmsdh_sdmmc_suspend,
+ .resume = bcmsdh_sdmmc_resume,
+};
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ sd_err(("%s: enter\n", __FUNCTION__));
+ if (func && (func->num != 2)) {
+ return 0;
+ }
+
+ if (notify_semaphore)
+ up(notify_semaphore);
+ return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .name = "dummy_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+ };
+
+int sdio_func_reg_notify(void* semaphore)
+{
+ notify_semaphore = semaphore;
+ return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+ OSL_SLEEP(15);
+ sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+ .probe = bcmsdh_sdmmc_probe,
+ .remove = bcmsdh_sdmmc_remove,
+ .name = "bcmsdh_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+ .drv = {
+ .pm = &bcmsdh_sdmmc_pm_ops,
+ },
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+ };
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+};
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ if (!sd)
+ return BCME_BADARG;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+ int error = 0;
+ error = sdio_function_init();
+ return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+ sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+ return sdio_register_driver(&bcmsdh_sdmmc_driver);
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+ sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+}
diff --git a/bcmdhd.101.10.361.x/bcmsdspi.h b/bcmdhd.101.10.361.x/bcmsdspi.h
new file mode 100755
index 0000000..9a29370
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdspi.h
@@ -0,0 +1,147 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdspi.h 833013 2019-08-02 16:26:31Z jl904071 $
+ */
+#ifndef _BCM_SD_SPI_H
+#define _BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#ifdef BCMDBG
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+#ifdef BCMPERFSTATS
+#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0)
+#else
+#define sd_log(x)
+#endif
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of sdspi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current register transfer size */
+ uint32 cmd53_wr_data; /* Used to pass CMD53 write data */
+ uint32 card_response; /* Used to pass back response status byte */
+ uint32 card_rsp_data; /* Used to pass back response data word */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/bcmdhd.101.10.361.x/bcmsdspi_linux.c b/bcmdhd.101.10.361.x/bcmsdspi_linux.c
new file mode 100755
index 0000000..b771682
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdspi_linux.c
@@ -0,0 +1,433 @@
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#ifdef BCMSPI_ANDROID
+#include <bcmsdh.h>
+#include <bcmspibrcm.h>
+#include <linux/spi/spi.h>
+#else
+#include <pcicfg.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <linux/sched.h> /* request_irq(), free_irq() */
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+#endif /* BCMSPI_ANDROID */
+
+#ifndef BCMSPI_ANDROID
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+#endif /* !BCMSPI_ANDROID */
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+#ifndef BCMSPI_ANDROID
+ wait_queue_head_t intr_wait_queue;
+#endif /* !BCMSPI_ANDROID */
+};
+
+#ifndef BCMSPI_ANDROID
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt()) /* XXX Doesn't handle CONFIG_PREEMPT? */
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+ sdioh_info_t *sd;
+ struct sdos_info *sdos;
+ bool ours;
+
+ sd = (sdioh_info_t *)dev_id;
+ sd->local_intrcount++;
+
+ if (!sd->card_init_done) {
+ sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+ return IRQ_RETVAL(FALSE);
+ } else {
+ ours = spi_check_client_intr(sd, NULL);
+
+ /* For local interrupts, wake the waiting process */
+ if (ours && sd->got_hcint) {
+ sdos = (struct sdos_info *)sd->sdos_info;
+ wake_up_interruptible(&sdos->intr_wait_queue);
+ }
+
+ return IRQ_RETVAL(ours);
+ }
+}
+#endif /* !BCMSPI_ANDROID */
+
+#ifdef BCMSPI_ANDROID
+static struct spi_device *gBCMSPI = NULL;
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+
+static int bcmsdh_spi_probe(struct spi_device *spi_dev)
+{
+ int ret = 0;
+
+ gBCMSPI = spi_dev;
+
+#ifdef SPI_PIO_32BIT_RW
+ spi_dev->bits_per_word = 32;
+#else
+ spi_dev->bits_per_word = 8;
+#endif /* SPI_PIO_32BIT_RW */
+ ret = spi_setup(spi_dev);
+
+ if (ret) {
+ sd_err(("bcmsdh_spi_probe: spi_setup fail with %d\n", ret));
+ }
+ sd_err(("bcmsdh_spi_probe: spi_setup with %d, bits_per_word=%d\n",
+ ret, spi_dev->bits_per_word));
+ ret = bcmsdh_probe(&spi_dev->dev);
+
+ return ret;
+}
+
+static int bcmsdh_spi_remove(struct spi_device *spi_dev)
+{
+ int ret = 0;
+
+ ret = bcmsdh_remove(&spi_dev->dev);
+ gBCMSPI = NULL;
+
+ return ret;
+}
+
+static struct spi_driver bcmsdh_spi_driver = {
+ .probe = bcmsdh_spi_probe,
+ .remove = bcmsdh_spi_remove,
+ .driver = {
+ .name = "wlan_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+ int error = 0;
+ sd_trace(("bcmsdh_gspi: %s Enter\n", __FUNCTION__));
+
+ error = spi_register_driver(&bcmsdh_spi_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+ sd_trace(("%s Enter\n", __FUNCTION__));
+ spi_unregister_driver(&bcmsdh_spi_driver);
+}
+#endif /* BCMSPI_ANDROID */
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+#ifndef BCMSPI_ANDROID
+ sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+ if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+ sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+ return ERROR;
+ }
+#endif /* !BCMSPI_ANDROID */
+ return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+#ifndef BCMSPI_ANDROID
+ free_irq(irq, sd);
+#endif /* !BCMSPI_ANDROID */
+}
+
+/* Map Host controller registers */
+#ifndef BCMSPI_ANDROID
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+}
+#endif /* !BCMSPI_ANDROID */
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+#ifndef BCMSPI_ANDROID
+ init_waitqueue_head(&sdos->intr_wait_queue);
+#endif /* !BCMSPI_ANDROID */
+ return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ if (!(sd->host_init_done && sd->card_init_done)) {
+ sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+#ifndef BCMSPI_ANDROID
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#endif /* !BCMSPI_ANDROID */
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+#ifndef BCMSPI_ANDROID
+ if (enable && !sd->lockcount)
+ spi_devintr_on(sd);
+ else
+ spi_devintr_off(sd);
+#endif /* !BCMSPI_ANDROID */
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (sd->lockcount) {
+ sd_err(("%s: Already locked!\n", __FUNCTION__));
+ ASSERT(sd->lockcount == 0);
+ }
+#ifdef BCMSPI_ANDROID
+ if (sd->client_intr_enabled)
+ bcmsdh_oob_intr_set(0);
+#else
+ spi_devintr_off(sd);
+#endif /* BCMSPI_ANDROID */
+ sd->lockcount++;
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+ ASSERT(sd->lockcount > 0);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+#ifdef BCMSPI_ANDROID
+ bcmsdh_oob_intr_set(1);
+#else
+ spi_devintr_on(sd);
+#endif /* BCMSPI_ANDROID */
+ }
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+#ifndef BCMSPI_ANDROID
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+#ifndef BCMSDYIELD
+ ASSERT(!yield);
+#endif
+ sd_trace(("%s: yield %d canblock %d\n",
+ __FUNCTION__, yield, BLOCKABLE()));
+
+ /* Clear the "interrupt happened" flag and last intrstatus */
+ sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+ if (yield && BLOCKABLE()) {
+ struct sdos_info *sdos;
+ sdos = (struct sdos_info *)sd->sdos_info;
+ /* Wait for the indication, the interrupt will be masked when the ISR fires. */
+ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+ } else
+#endif /* BCMSDYIELD */
+ {
+ spi_spinbits(sd);
+ }
+
+}
+#else /* !BCMSPI_ANDROID */
+int bcmgspi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */
+
+static void
+hexdump(char *pfx, unsigned char *msg, int msglen)
+{
+ int i, col;
+ char buf[80];
+
+ ASSERT(strlen(pfx) + 49 <= sizeof(buf));
+
+ col = 0;
+
+ for (i = 0; i < msglen; i++, col++) {
+ if (col % 16 == 0)
+ strcpy(buf, pfx);
+ sprintf(buf + strlen(buf), "%02x", msg[i]);
+ if ((col + 1) % 16 == 0)
+ printf("%s\n", buf);
+ else
+ sprintf(buf + strlen(buf), " ");
+ }
+
+ if (col % 16 != 0)
+ printf("%s\n", buf);
+}
+
+/* Send/Receive an SPI Packet */
+void
+spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen)
+{
+ int write = 0;
+ int tx_len = 0;
+ struct spi_message msg;
+ struct spi_transfer t[2];
+
+ spi_message_init(&msg);
+ memset(t, 0, 2*sizeof(struct spi_transfer));
+
+ if (sd->wordlen == 2)
+#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
+ write = msg_out[2] & 0x80; /* XXX bit 7: read:0, write :1 */
+#else
+ write = msg_out[1] & 0x80; /* XXX bit 7: read:0, write :1 */
+#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */
+ if (sd->wordlen == 4)
+#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
+ write = msg_out[0] & 0x80; /* XXX bit 7: read:0, write :1 */
+#else
+ write = msg_out[3] & 0x80; /* XXX bit 7: read:0, write :1 */
+#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */
+
+ if (bcmgspi_dump) {
+ hexdump(" OUT: ", msg_out, msglen);
+ }
+
+ tx_len = write ? msglen-4 : 4;
+
+ sd_trace(("spi_sendrecv: %s, wordlen %d, cmd : 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ write ? "WR" : "RD", sd->wordlen,
+ msg_out[0], msg_out[1], msg_out[2], msg_out[3]));
+
+ t[0].tx_buf = (char *)&msg_out[0];
+ t[0].rx_buf = 0;
+ t[0].len = tx_len;
+
+ spi_message_add_tail(&t[0], &msg);
+
+ t[1].rx_buf = (char *)&msg_in[tx_len];
+ t[1].tx_buf = 0;
+ t[1].len = msglen-tx_len;
+
+ spi_message_add_tail(&t[1], &msg);
+ spi_sync(gBCMSPI, &msg);
+
+ if (bcmgspi_dump) {
+ hexdump(" IN : ", msg_in, msglen);
+ }
+}
+#endif /* !BCMSPI_ANDROID */
diff --git a/bcmdhd.101.10.361.x/bcmsdstd.c b/bcmdhd.101.10.361.x/bcmsdstd.c
new file mode 100755
index 0000000..b58de62
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdstd.c
@@ -0,0 +1,5406 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <siutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+#include <pcicfg.h>
+#include <bcmsdstd.h>
+/* XXX Quick NDIS hack */
+#ifdef NDIS
+#define inline __inline
+#define PCI_CFG_VID 0
+#define PCI_CFG_BAR0 0x10
+#endif
+
+#define SD_PAGE_BITS 12
+#define SD_PAGE (1 << SD_PAGE_BITS)
+#define SDSTD_MAX_TUNING_PHASE 5
+
+/*
+ * Upper GPIO 16 - 31 are available on J22
+ * J22.pin3 == gpio16, J22.pin5 == gpio17, etc.
+ * Lower GPIO 0 - 15 are available on J15 (WL_GPIO)
+ */
+#define SDH_GPIO16 16
+#define SDH_GPIO_ENABLE 0xffff
+
+#include <bcmsdstd.h>
+#include <sbsdio.h> /* SDIOH (host controller) core hardware definitions */
+
+/* Globals */
+uint sd_msglevel = SDH_ERROR_VAL;
+
+uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 64; /* Default blocksize */
+uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */
+
+#define sd3_trace(x)
+
+/* sd3ClkMode: 0-SDR12 [25MHz]
+ * 1-SDR25 [50MHz]+SHS=1
+ * 2-SDR50 [100MHz]+SSDR50=1
+ * 3-SDR104 [208MHz]+SSDR104=1
+ * 4-DDR50 [50MHz]+SDDR50=1
+ */
+#define SD3CLKMODE_0_SDR12 (0)
+#define SD3CLKMODE_1_SDR25 (1)
+#define SD3CLKMODE_2_SDR50 (2)
+#define SD3CLKMODE_3_SDR104 (3)
+#define SD3CLKMODE_4_DDR50 (4)
+#define SD3CLKMODE_DISABLED (-1)
+#define SD3CLKMODE_AUTO (99)
+
+/* values for global_UHSI_Supp : Means host and card caps match. */
+#define HOST_SDR_UNSUPP (0)
+#define HOST_SDR_12_25 (1)
+#define HOST_SDR_50_104_DDR (2)
+
+/* depends-on/affects sd3_autoselect_uhsi_max.
+ * see sd3_autoselect_uhsi_max
+ */
+int sd_uhsimode = SD3CLKMODE_DISABLED;
+uint sd_tuning_period = CAP3_RETUNING_TC_OTHER;
+uint sd_delay_value = 500000;
+/* Enables host to dongle glomming. Also increases the
+ * dma buffer size. This will increase the rx throughput
+ * as there will be lesser CMD53 transactions
+ */
+#ifdef BCMSDIOH_TXGLOM
+uint sd_txglom;
+#ifdef LINUX
+module_param(sd_txglom, uint, 0);
+#endif
+#endif /* BCMSDIOH_TXGLOM */
+
+char dhd_sdiod_uhsi_ds_override[2] = {' '};
+
+#define MAX_DTS_INDEX (3)
+#define DRVSTRN_MAX_CHAR ('D')
+#define DRVSTRN_IGNORE_CHAR (' ')
+
+char DTS_vals[MAX_DTS_INDEX + 1] = {
+ 0x1, /* Driver Strength Type-A */
+ 0x0, /* Driver Strength Type-B */
+ 0x2, /* Driver Strength Type-C */
+ 0x3, /* Driver Strength Type-D */
+ };
+
+/* depends-on/affects sd_uhsimode.
+ select MAX speed automatically based on caps of host and card.
+ If this is 1, sd_uhsimode will be ignored. If the sd_uhsimode is set
+ by the user specifically, this var becomes 0. default value: 0. [XXX:TBD: for future]
+ */
+uint32 sd3_autoselect_uhsi_max = 0;
+
+#define MAX_TUNING_ITERS (40)
+/* (150+10)millisecs total time; so dividing it for per-loop */
+#define PER_TRY_TUNING_DELAY_MS (160/MAX_TUNING_ITERS)
+#define CLKTUNING_MAX_BRR_RETRIES (1000) /* 1 ms: 1000 retries with 1 us delay per loop */
+
+/* table analogous to preset value register.
+* This is bcos current HC doesn't have preset value reg support.
+* All has DrvStr as 'B' [val:0] and CLKGEN as 0.
+*/
+static unsigned short presetval_sw_table[] = {
+ 0x0520, /* initialization: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 520 [division: 320*2 = 640: ~400 KHz]
+ */
+ 0x0008, /* default speed:DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
+ */
+ 0x0004, /* High speed: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
+ */
+ 0x0008, /* SDR12: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
+ */
+ 0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
+ */
+ 0x0001, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
+ * SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz]
+ */
+ 0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0;
+ SDCLKFreqSel: 1 [no division: ~255/~208 MHz]
+ */
+ 0x0002 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
+ SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
+ */
+};
+
+/* This is to have software overrides to the hardware. Info follows:
+ For override [1]: Preset registers: not supported
+ Voltage switch: not supported
+ Clock Tuning: not supported
+*/
+bool sd3_sw_override1 = FALSE;
+bool sd3_sw_read_magic_bytes = FALSE;
+
+#define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \
+ (sd->version == HOST_CONTR_VER_3) && \
+ ((sd_uhsimode == SD3CLKMODE_3_SDR104) || \
+ ((sd_uhsimode == SD3CLKMODE_2_SDR50) && \
+ (GFIELD(sd->caps3, CAP3_TUNING_SDR50)))))
+
+/* find next power of 2 */
+#define NEXT_POW2(n) {n--; n |= n>>1; n |= n>>2; n |= n>>4; n++;}
+
+#ifdef BCMSDYIELD
+bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
+uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
+bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
+#endif
+
+/* XXX: Issues with CMD14 enter/exit sleep
+ * XXX: Temp fix for special CMD14 handling
+ */
+#define F1_SLEEPCSR_ADDR 0x1001F
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz
+ :might get changed in code for 208
+ */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_3_power_save = 1; /* Default to SDIO 3.0 power save */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+uint8 sd_dma_mode = DMA_MODE_AUTO; /* Default to AUTO & program based on capability */
+
+/* XXX Base timeout counter value on 48MHz (2^20 @ 48MHz => 21845us)
+ * Could adjust by adding sd_divisor (to maintain bit count) but really
+ * need something more elaborate to do that right. Still allows xfer
+ * of about 1000 bytes at 400KHz, so constant is ok.
+ * Timeout control N produces 2^(13+N) counter.
+ */
+uint sd_toctl = 7;
+static bool trap_errs = FALSE;
+
+static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
+
+/* Prototypes */
+static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
+static uint16 sdstd_start_power(sdioh_info_t *sd, int volts_req);
+static bool sdstd_bus_width(sdioh_info_t *sd, int width);
+static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
+static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
+static int sdstd_card_enablefuncs(sdioh_info_t *sd);
+static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
+static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
+static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 *data);
+static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 data);
+static int sdstd_driver_init(sdioh_info_t *sd);
+static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
+static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data);
+static int sdstd_abort(sdioh_info_t *sd, uint func);
+static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
+static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
+static void sd_map_dma(sdioh_info_t * sd);
+static void sd_unmap_dma(sdioh_info_t * sd);
+static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
+static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
+static void sd_create_adma_descriptor(sdioh_info_t *sd,
+ uint32 index, uint32 addr_phys,
+ uint16 length, uint16 flags);
+static void sd_dump_adma_dscr(sdioh_info_t *sd);
+static void sdstd_dumpregs(sdioh_info_t *sd);
+
+static int sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode);
+static int sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd);
+static int sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd,
+ int sd3_requested_clkmode);
+static bool sdstd_3_get_matching_drvstrn(sdioh_info_t *sd,
+ int sd3_requested_clkmode, uint32 *drvstrn, uint16 *presetval);
+static int sdstd_3_clock_wrapper(sdioh_info_t *sd);
+static int sdstd_clock_wrapper(sdioh_info_t *sd);
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static int parse_caps(uint32 caps_reg, char *buf, int len);
+static int parse_state(uint32 state_reg, char *buf, int len);
+static void cis_fetch(sdioh_info_t *sd, int func, char *data, int len);
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+#ifdef BCMDBG
+static void print_regs(sdioh_info_t *sd);
+#endif
+
+/*
+ * Private register access routines.
+ */
+
+/* 16 bit PCI regs */
+
+/* XXX This is a hack to satisfy the -Wmissing-prototypes warning */
+extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
+uint16
+sdstd_rreg16(sdioh_info_t *sd, uint reg)
+{
+
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+
+/* XXX This is a hack to satisfy the -Wmissing-prototypes warning */
+extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
+void
+sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
+{
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16) data;
+ sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+
+static void
+sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
+{
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
+ data |= val;
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+
+}
+static void
+sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
+{
+
+ volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+ sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
+ data &= ~mask;
+ data |= (val & mask);
+ *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+}
+
+/* 32 bit PCI regs */
+static uint32
+sdstd_rreg(sdioh_info_t *sd, uint reg)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+static inline void
+sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
+{
+ *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
+ sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
+
+}
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static void
+sdstd_or_reg(sdioh_info_t *sd, uint reg, uint32 val)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ data |= val;
+ *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data;
+}
+static void
+sdstd_mod_reg(sdioh_info_t *sd, uint reg, uint32 mask, uint32 val)
+{
+ volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+ data &= ~mask;
+ data |= (val & mask);
+ *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data;
+}
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+/* 8 bit PCI regs */
+static inline void
+sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
+{
+ *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
+ sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+static uint8
+sdstd_rreg8(sdioh_info_t *sd, uint reg)
+{
+ volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
+ sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
+ return data;
+}
+
+/*
+ * Private work routines
+ */
+
+sdioh_info_t *glob_sd;
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ glob_sd = sd;
+ sd->osh = osh;
+ if (sdstd_osinit(sd) != 0) {
+ sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+ sd->mem_space = (volatile char *)sdstd_reg_map(osh, (ulong)bar0, SDIOH_REG_WINSZ);
+ sd_init_dma(sd);
+ sd->irq = irq;
+ if (sd->mem_space == NULL) {
+ sd_err(("%s:ioremap() failed\n", __FUNCTION__));
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+ sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ sd->intr_handler_valid = FALSE;
+
+ /* Set defaults */
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->sd_dma_mode = sd_dma_mode;
+
+ /* XXX Haven't figured out how to make bytemode work with dma */
+ if (!sd->sd_blockmode)
+ sd->sd_dma_mode = DMA_MODE_NONE;
+
+ if (sdstd_driver_init(sd) != SUCCESS) {
+ /* If host CPU was reset without resetting SD bus or
+ SD device, the device will still have its RCA but
+ driver no longer knows what it is (since driver has been restarted).
+ go through once to clear the RCA and a gain reassign it.
+ */
+ sd_info(("driver_init failed - Reset RCA and try again\n"));
+ if (sdstd_driver_init(sd) != SUCCESS) {
+ sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+ }
+
+ /* XXX Needed for NDIS as its OSL checks for correct dma address width
+ * This value is normally set by wlc_attach() which has yet to run
+ */
+ OSL_DMADDRWIDTH(osh, 32);
+
+ /* Always map DMA buffers, so we can switch between DMA modes. */
+ sd_map_dma(sd);
+
+ if (sdstd_register_irq(sd, irq) != SUCCESS) {
+ sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+ sdstd_free_irq(sd->irq, sd);
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd) {
+ sd_unmap_dma(sd);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+ if (sd->sd3_tuning_reqd == TRUE) {
+ sdstd_3_osclean_tuning(sd);
+ sd->sd3_tuning_reqd = FALSE;
+ }
+ sd->sd3_tuning_disable = FALSE;
+ sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
+ sdstd_free_irq(sd->irq, sd);
+ if (sd->card_init_done)
+ sdstd_reset(sd, 1, 1);
+ if (sd->mem_space) {
+ sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ sdstd_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we receive client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ uint16 intrstatus;
+ intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
+ return !!(intrstatus & CLIENT_INTR);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_POWER_SAVE,
+ IOV_YIELDCPU,
+ IOV_MINYIELD,
+ IOV_FORCERB,
+ IOV_CLOCK,
+ IOV_UHSIMOD,
+ IOV_TUNEMOD,
+ IOV_TUNEDIS
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, 0, IOVT_UINT32, 0 },
+#ifdef BCMSDYIELD
+ {"sd_yieldcpu", IOV_YIELDCPU, 0, 0, IOVT_BOOL, 0 },
+ {"sd_minyield", IOV_MINYIELD, 0, 0, IOVT_UINT32, 0 },
+ {"sd_forcerb", IOV_FORCERB, 0, 0, IOVT_BOOL, 0 },
+#endif
+ {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
+ {"sd_power_save", IOV_POWER_SAVE, 0, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
+ {"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0},
+#ifdef BCMDBG
+ {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 },
+#endif
+ {"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0},
+ {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0},
+
+ {NULL, 0, 0, 0, 0, 0 }
+};
+uint8 sdstd_turn_on_clock(sdioh_info_t *sd)
+{
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
+ return 0;
+}
+
+uint8 sdstd_turn_off_clock(sdioh_info_t *sd)
+{
+ sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
+ return 0;
+}
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, uint len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ uint val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* XXX Copied from dhd, copied from wl; certainly overkill here? */
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+ BCM_REFERENCE(bool_val);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ if (!si->sd_blockmode)
+ si->sd_dma_mode = DMA_MODE_NONE;
+ break;
+
+#ifdef BCMSDYIELD
+ case IOV_GVAL(IOV_YIELDCPU):
+ int_val = sd_yieldcpu;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_YIELDCPU):
+ sd_yieldcpu = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_MINYIELD):
+ int_val = sd_minyield;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MINYIELD):
+ sd_minyield = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_FORCERB):
+ int_val = sd_forcerb;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCERB):
+ sd_forcerb = (bool)int_val;
+ break;
+#endif /* BCMSDYIELD */
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ sdstd_lock(si);
+ bcmerror = set_client_block_size(si, func, blksize);
+ sdstd_unlock(si);
+ break;
+ }
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_dma_mode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_dma_mode = (char)int_val;
+ sdstd_set_dma_mode(si, si->sd_dma_mode);
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("set clock failed!\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_POWER_SAVE):
+ int_val = (uint32)sd_3_power_save;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ if (sd_power == 1) {
+ if (sdstd_driver_init(si) != SUCCESS) {
+ sd_err(("set SD Slot power failed!\n"));
+ bcmerror = BCME_ERROR;
+ } else {
+ sd_err(("SD Slot Powered ON.\n"));
+ }
+ } else {
+ uint8 pwr = 0;
+
+ pwr = SFIELD(pwr, PWR_BUS_EN, 0);
+ sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
+ sd_err(("SD Slot Powered OFF.\n"));
+ }
+ break;
+
+ case IOV_SVAL(IOV_POWER_SAVE):
+ sd_3_power_save = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ if (sd_clock == 1) {
+ sd_info(("SD Clock turned ON.\n"));
+ if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ bcmerror = BCME_ERROR;
+ }
+ } else {
+ /* turn off HC clock */
+ sdstd_wreg16(si, SD_ClockCntrl,
+ sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
+
+ sd_info(("SD Clock turned OFF.\n"));
+ }
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+
+ if (!sdstd_bus_width(si, sd_sdmode)) {
+ sd_err(("sdstd_bus_width failed\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
+ break;
+
+ case IOV_GVAL(IOV_UHSIMOD):
+ sd3_trace(("%s: Get UHSI: \n", __FUNCTION__));
+ int_val = (int)sd_uhsimode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_UHSIMOD):
+ {
+ int oldval = sd_uhsimode; /* save old, working value */
+ sd3_trace(("%s: Set UHSI: \n", __FUNCTION__));
+ /* check if UHSI is supported by card/host */
+ if (!(si->card_UHSI_voltage_Supported && si->host_UHSISupported)) {
+ sd_err(("%s:UHSI not suppoted!\n", __FUNCTION__));
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ /* check for valid values */
+ if (!((int_val == SD3CLKMODE_AUTO) ||
+ (int_val == SD3CLKMODE_DISABLED) ||
+ ((int_val >= SD3CLKMODE_0_SDR12) &&
+ (int_val <= SD3CLKMODE_4_DDR50)))) {
+ sd_err(("%s:CLK: bad arg!\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_uhsimode = int_val;
+ if (SUCCESS != sdstd_3_clock_wrapper(si)) {
+ sd_err(("%s:Error in setting uhsi clkmode:%d,"
+ "restoring back to %d\n", __FUNCTION__,
+ sd_uhsimode, oldval));
+ /* try to set back the old one */
+ sd_uhsimode = oldval;
+ if (SUCCESS != sdstd_3_clock_wrapper(si)) {
+ sd_err(("%s:Error in setting uhsi to old mode;"
+ "ignoring:\n", __FUNCTION__));
+ }
+ }
+ break;
+ }
+#ifdef DHD_DEBUG
+ case IOV_SVAL(IOV_TUNEMOD):
+ {
+
+ if( int_val == SD_DHD_DISABLE_PERIODIC_TUNING) { /* do tuning single time */
+ sd3_trace(("Start tuning from Iovar\n"));
+ si->sd3_tuning_reqd = TRUE;
+ sdstd_enable_disable_periodic_timer(si, int_val);
+ sdstd_lock(si);
+ sdstd_3_clk_tuning(si, sdstd_3_get_uhsi_clkmode(si));
+ sdstd_unlock(si);
+ si->sd3_tuning_reqd = FALSE;
+ }
+ if (int_val == SD_DHD_ENABLE_PERIODIC_TUNING) {
+ sd3_trace(("Enabling automatic tuning\n"));
+ si->sd3_tuning_reqd = TRUE;
+ sdstd_enable_disable_periodic_timer(si, int_val);
+ }
+ break;
+ }
+#endif /* debugging purpose */
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)si->local_intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = sdstd_rreg8(si, sd_ptr->offset);
+ else if (sd_ptr->offset & 2)
+ int_val = sdstd_rreg16(si, sd_ptr->offset);
+ else
+ int_val = sdstd_rreg(si, sd_ptr->offset);
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
+ else if (sd_ptr->offset & 2)
+ sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
+ else
+ sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+#ifdef BCMDBG
+ case IOV_GVAL(IOV_HCIREGS):
+ {
+ struct bcmstrbuf b;
+ bcm_binit(&b, arg, len);
+
+ sdstd_lock(si);
+ bcm_bprintf(&b, "IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
+ sdstd_rreg16(si, SD_IntrStatus),
+ sdstd_rreg16(si, SD_ErrorIntrStatus));
+ bcm_bprintf(&b, "IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
+ sdstd_rreg16(si, SD_IntrStatusEnable),
+ sdstd_rreg16(si, SD_ErrorIntrStatusEnable));
+ bcm_bprintf(&b, "IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
+ sdstd_rreg16(si, SD_IntrSignalEnable),
+ sdstd_rreg16(si, SD_ErrorIntrSignalEnable));
+ print_regs(si);
+
+ sdstd_unlock(si);
+
+ if (!b.size)
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+#endif /* BCMDBG */
+
+ case IOV_SVAL(IOV_TUNEDIS):
+ si->sd3_tuning_disable = (bool)int_val;
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ /* XXX Remove protective lock after clients all clean... */
+ return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdstd_lock(sd);
+ *cis = 0;
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+ sdstd_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int status = SDIOH_API_RC_SUCCESS;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+ sdstd_lock(sd);
+ if (rw == SDIOH_READ)
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
+
+ /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
+
+#ifdef BCMDBG
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: Entering: ErririntrStatus 0x%x, intstat = 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg16(sd, SD_IntrStatus)));
+ }
+#endif
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
+
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+ sdstd_unlock(sd);
+ return status;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ status = SDIOH_API_RC_FAIL;
+ }
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) {
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ * While exiting sleep with CMD14, device returning 0x00
+ * Don't flag as error for now for 0x1001f.
+ */
+ if (GFIELD(cmd_arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) {
+ sd_err(("%s: rsp5 flags is 0x%x\t %d \n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+ }
+ status = SDIOH_API_RC_FAIL;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF)) {
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ status = SDIOH_API_RC_FAIL;
+ }
+
+ if (rw == SDIOH_READ)
+ *byte = GFIELD(rsp5, RSP5_DATA);
+
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+
+ /* check if we have to do tuning; if so, start */
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
+
+ sdstd_unlock(sd);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int status;
+
+ sdstd_lock(sd);
+
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
+
+ /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
+
+ if (rw == SDIOH_READ) {
+ status = sdstd_card_regread(sd, func, addr, nbytes, word);
+ } else {
+ status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
+ }
+
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+
+ /* check if we have to do tuning; if so, start */
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
+
+ sdstd_unlock(sd);
+ return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+#ifdef BCMSDIOH_TXGLOM
+void
+sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
+{
+ BCM_REFERENCE(pkt);
+ sd->glom_info.dma_buf_arr[sd->glom_info.count] = frame;
+ sd->glom_info.nbytes[sd->glom_info.count] = len;
+ /* Convert the frame addr to phy addr for DMA in case of host controller version3 */
+ if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+ sd->glom_info.dma_phys_arr[sd->glom_info.count] = DMA_MAP(sd->osh,
+ frame,
+ len,
+ DMA_TX, 0, 0);
+ }
+ sd->glom_info.count++;
+}
+
+void
+sdioh_glom_clear(sdioh_info_t *sd)
+{
+ int i;
+ /* DMA_MAP is done per frame only if host controller version is 3 */
+ if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+ for (i = 0; i < sd->glom_info.count; i++) {
+ DMA_UNMAP(sd->osh,
+ sd->glom_info.dma_phys_arr[i],
+ sd->glom_info.nbytes[i],
+ DMA_TX, 0, 0);
+ }
+ }
+ sd->glom_info.count = 0;
+}
+
+uint
+sdioh_set_mode(sdioh_info_t *sd, uint mode)
+{
+ if (mode == SDPCM_TXGLOM_CPY)
+ sd->txglom_mode = mode;
+ else if ((mode == SDPCM_TXGLOM_MDESC) && (sd->version == HOST_CONTR_VER_3))
+ sd->txglom_mode = mode;
+
+ return (sd->txglom_mode);
+}
+
+bool
+sdioh_glom_enabled(void)
+{
+ return sd_txglom;
+}
+#endif /* BCMSDIOH_TXGLOM */
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ uint8 is_ddr50 = FALSE;
+ int len;
+ int buflen = (int)buflen_u;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint8 *localbuf = NULL, *tmpbuf = NULL;
+ bool local_blockmode = sd->sd_blockmode;
+ SDIOH_API_RC status = SDIOH_API_RC_SUCCESS;
+
+ sdstd_lock(sd);
+
+ is_ddr50 = (sd_uhsimode == SD3CLKMODE_4_DDR50) ? TRUE : FALSE;
+
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
+
+ /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
+
+ ASSERT(reg_width == 4);
+ ASSERT(buflen_u < (1 << 30));
+ ASSERT(sd->client_block_size[func]);
+
+#ifdef BCMSDIOH_TXGLOM
+ if (sd_txglom) {
+ while (pkt) {
+ sdioh_glom_post(sd, PKTDATA(sd->osh, pkt), pkt, PKTLEN(sd->osh, pkt));
+ pkt = PKTNEXT(sd->osh, pkt);
+ }
+ }
+#endif
+ sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+ __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+ buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+ /* Break buffer down into blocksize chunks:
+ * Bytemode: 1 block at a time.
+ * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
+ * Both: leftovers are handled last (will be sent via bytemode).
+ */
+ while (buflen > 0) {
+ if (local_blockmode) {
+ int max_tran_size = SD_PAGE;
+#ifdef BCMSDIOH_TXGLOM
+ /* There is no alignment requirement for HC3 */
+ if ((sd->version == HOST_CONTR_VER_3) && sd_txglom)
+ max_tran_size = SD_PAGE * 4;
+#endif
+ /* Max xfer is Page size */
+ len = MIN(max_tran_size, buflen);
+
+ /* Round down to a block boundry */
+ if (buflen > sd->client_block_size[func])
+ len = (len/sd->client_block_size[func]) *
+ sd->client_block_size[func];
+ /* XXX Arasan trashes 3-byte transfers, WAR to add one byte extra. */
+ /* XXX In Case of SDIO3.0 DDR50 mode if no of bytes to be
+ * transferred is odd append one more byte to make it even.
+ * Check If odd bytes can come for SDIO_FUNC_2 also.
+ */
+ if ((func == SDIO_FUNC_1) && (((len % 4) == 3) || (((len % 2) == 1) &&
+ (is_ddr50))) && ((rw == SDIOH_WRITE) || (rw == SDIOH_READ))) {
+ sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
+ len++;
+ tmpbuf = buffer;
+ if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
+ sd_err(("out of memory, malloced %d bytes\n",
+ MALLOCED(sd->osh)));
+ status = SDIOH_API_RC_FAIL;
+ goto done;
+ }
+ bcopy(buffer, localbuf, len);
+ buffer = localbuf;
+ }
+ } else {
+ /* Byte mode: One block at a time */
+ len = MIN(sd->client_block_size[func], buflen);
+ }
+
+ if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+ status = SDIOH_API_RC_FAIL;
+ }
+
+ /* XXX Restore len and buffer pointer WAR'ed for Arasan 3-byte transfer problem */
+ /* XXX WAR for SDIO3.0 DDR50 mode. */
+ if (local_blockmode && localbuf) {
+ MFREE(sd->osh, localbuf, len);
+ len--;
+ buffer = tmpbuf;
+ sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
+ }
+
+ if (status == SDIOH_API_RC_FAIL) {
+ goto done;
+ }
+
+ buffer += len;
+ buflen -= len;
+ if (!fifo)
+ addr += len;
+#ifdef BCMSDIOH_TXGLOM
+ /* This loop should not come in case of glommed pkts as it is send in
+ * multiple of blocks or total pkt size less than a block
+ */
+ if (sd->glom_info.count != 0)
+ buflen = 0;
+#endif
+ }
+done:
+
+ /* Change to DATA_TRANSFER_IDLE */
+ sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
+
+ /* check if we have to do tuning; if so, start */
+ sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
+
+ sdstd_unlock(sd);
+
+#ifdef BCMSDIOH_TXGLOM
+ if (sd_txglom)
+ sdioh_glom_clear(sd);
+#endif
+
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ uint offset = 0;
+ uint16 val;
+
+ /* check if upper bank */
+ if (gpio >= SDH_GPIO16) {
+ gpio -= SDH_GPIO16;
+ offset = 2;
+ }
+
+ val = sdstd_rreg16(sd, SD_GPIO_OE + offset);
+ val |= (1 << gpio);
+ sdstd_wreg16(sd, SD_GPIO_OE + offset, val);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ uint offset = 0;
+ uint16 val;
+
+ /* check if upper bank */
+ if (gpio >= SDH_GPIO16) {
+ gpio -= SDH_GPIO16;
+ offset = 2;
+ }
+
+ val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
+ if (enab == TRUE)
+ val |= (1 << gpio);
+ else
+ val &= ~(1 << gpio);
+ sdstd_wreg16(sd, SD_GPIO_Reg + offset, val);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ uint offset = 0;
+ uint16 val;
+
+ /* check if upper bank */
+ if (gpio >= SDH_GPIO16) {
+ gpio -= SDH_GPIO16;
+ offset = 2;
+ }
+
+ val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
+ val = (val >> gpio) & 1;
+
+ return (val == 1);
+}
+
+extern SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ uint rev;
+
+ rev = sdstd_rreg16(sd, SD_HostControllerVersion) >> 8;
+
+ /* Only P206 (fpga rev >= 16) supports gpio */
+ if (rev < 16) {
+ sd_err(("%s: gpio not supported in rev %d \n", __FUNCTION__, rev));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sdstd_wreg16(sd, SD_GPIO_Enable, SDH_GPIO_ENABLE);
+ sdstd_wreg16(sd, SD_GPIO_Enable + 2, SDH_GPIO_ENABLE);
+
+ /* Default to input */
+ sdstd_wreg16(sd, SD_GPIO_OE, 0);
+ sdstd_wreg16(sd, SD_GPIO_OE + 2, 0);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_sleep(sdioh_info_t *sd, bool enab)
+{
+ SDIOH_API_RC status;
+ uint32 cmd_arg = 0, rsp1 = 0;
+ int retry = 100;
+
+ sdstd_lock(sd);
+
+ cmd_arg = SFIELD(cmd_arg, CMD14_RCA, sd->card_rca);
+ cmd_arg = SFIELD(cmd_arg, CMD14_SLEEP, enab);
+
+ /*
+ * For ExitSleep:
+ * 1) Repeat CMD14 until R1 is received
+ * 2) Send CMD7
+ */
+ status = SDIOH_API_RC_FAIL;
+ while (retry-- > 0) {
+ if ((sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_14, cmd_arg)) == SUCCESS) {
+ status = SDIOH_API_RC_SUCCESS;
+ break;
+ }
+ OSL_DELAY(1400);
+ }
+
+ if (status == SDIOH_API_RC_FAIL) {
+ sd_err(("%s: CMD14: failed! enable:%d\n", __FUNCTION__, enab));
+ goto exit;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp1, 1);
+ sd_info(("%s: CMD14 OK: cmd_resp:0x%x\n", __FUNCTION__, rsp1));
+
+ /* ExitSleep: Send CMD7 After R1 */
+ if (enab == FALSE) {
+ /* Select the card */
+ cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) {
+ sd_err(("%s: CMD14 send CMD7 failed!\n", __FUNCTION__));
+ status = SDIOH_API_RC_FAIL;
+ goto exit;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp1, 1);
+ if (rsp1 != SDIOH_CMD7_EXP_STATUS) {
+ sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
+ __FUNCTION__, rsp1));
+ status = SDIOH_API_RC_FAIL;
+ goto exit;
+ }
+ }
+
+exit:
+ sdstd_unlock(sd);
+
+ return status;
+}
+
+/* XXX Copied guts of request_byte and cmd_issue. Might make sense to fold this into
+ * those by passing another parameter indicating command type (abort). [But maybe
+ * keeping it separate is better -- if called internally on command failure it's less
+ * recursion to wrap your head around?]
+ */
+static int
+sdstd_abort(sdioh_info_t *sd, uint func)
+{
+ int err = 0;
+ int retries;
+
+ uint16 cmd_reg;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ uint8 rflags;
+
+ uint16 int_reg = 0;
+ uint16 plain_intstatus;
+
+ /* Argument is write to F0 (CCCR) IOAbort with function number */
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
+
+ /* Command is CMD52 write */
+ cmd_reg = 0;
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
+
+ /* XXX Copied from cmd_issue(), but no SPI response handling! */
+ if (sd->sd_mode == SDIOH_MODE_SPI) {
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ }
+
+ /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
+ /* XXX For a single-threaded driver, what circumstances would result
+ * in cmd_inhibit being on but going off in a short time? Experiment
+ * shows a HW command timeout doesn't leave inhibit on, so maybe a SW
+ * timeout? Then that command should be responsible for clearing...
+ */
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+ if (!--retries) {
+ sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+ if (trap_errs)
+ ASSERT(0);
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+ }
+
+ /* Clear errors from any previous commands */
+ if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
+ sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+ }
+ plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
+ if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
+ sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
+ if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
+ sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
+ }
+ if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
+ sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
+ err = BCME_NODEVICE;
+ goto done;
+ }
+ }
+
+ /* Issue the command */
+ sdstd_wreg(sd, SD_Arg0, cmd_arg);
+ sdstd_wreg16(sd, SD_Command, cmd_reg);
+
+ /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
+ if (!sd->polled_mode)
+ return err;
+
+ /* Otherwise, wait for the command to complete */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries &&
+ (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+ (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+ /* If command completion fails, do a cmd reset and note the error */
+ if (!retries) {
+ sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+
+ sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+ SW_RESET_CMD)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ err = BCME_SDIO_ERROR;
+ }
+
+ /* Clear Command Complete interrupt */
+ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* Check for Errors */
+ if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
+ sd_err(("%s: ErrorintrStatus: 0x%x, "
+ "(intrstatus = 0x%x, present state 0x%x) clearing\n",
+ __FUNCTION__, plain_intstatus,
+ sdstd_rreg16(sd, SD_IntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+
+ sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+ SW_RESET_DAT)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ /* ABORT is dataless, only cmd errs count */
+ /* XXX But what about busy timeout? Response valid? */
+ if (plain_intstatus & ERRINT_CMD_ERRS)
+ err = BCME_SDIO_ERROR;
+ }
+
+ /* If command failed don't bother looking at response */
+ if (err)
+ goto done;
+
+ /* Otherwise, check the response */
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ rflags = GFIELD(rsp5, RSP5_FLAGS);
+
+ if (rflags & SD_RSP_R5_ERRBITS) {
+ sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
+
+ /* The CRC error flag applies to the previous command */
+ if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+ }
+
+ if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
+ ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
+ sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF)) {
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ err = BCME_SDIO_ERROR;
+ goto done;
+ }
+
+done:
+ if (err == BCME_NODEVICE)
+ return err;
+
+ /* XXX As per spec 3.7.1 (and to be safe) do the resets here */
+ sdstd_wreg8(sd, SD_SoftwareReset,
+ SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
+
+ retries = RETRIES_LARGE;
+ do {
+ rflags = sdstd_rreg8(sd, SD_SoftwareReset);
+ if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
+ __FUNCTION__, rflags));
+ err = BCME_SDIO_ERROR;
+ }
+
+ return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint fnum)
+{
+ int ret;
+
+ sdstd_lock(sd);
+ ret = sdstd_abort(sd, fnum);
+ sdstd_unlock(sd);
+
+ return ret;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ sdstd_waitlockfree(sd);
+ return SUCCESS;
+}
+
+static int
+sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
+{
+ uint16 regval;
+ uint retries;
+ uint function = 0;
+
+ /* If no errors, we're done */
+ if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
+ return SUCCESS;
+
+#ifdef BCMQT
+ if (regval == 0xFFFF) {
+ /* XXX - Getting bogus errors under QT
+ * XXX - Not sure why; Just ignore for now
+ */
+ sd_err(("%s: Bogus SD_ErrorIntrStatus: 0x%x????\n", __FUNCTION__, regval));
+ sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+ return SUCCESS;
+ }
+#endif
+
+ sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
+ __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
+ sdstd_rreg(sdioh_info, SD_PresentState)));
+ sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+
+ if (cmd == SDIOH_CMD_14) {
+ if (regval & ERRINT_CMD_TIMEOUT_BIT) {
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ * Getting command timeouts while exiting sleep
+ * with CMD14. Ignore this error due to this PR.
+ */
+ regval &= ~ERRINT_CMD_TIMEOUT_BIT;
+ }
+ }
+
+ /* On command error, issue CMD reset */
+ if (regval & ERRINT_CMD_ERRS) {
+ sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ for (retries = RETRIES_LARGE; retries; retries--)
+ if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
+ break;
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+ }
+
+ /* On data error, issue DAT reset */
+ if (regval & ERRINT_DATA_ERRS) {
+ if (regval & ERRINT_ADMA_BIT)
+ sd_err(("%s:ADMAError: status:0x%x\n",
+ __FUNCTION__, sdstd_rreg(sdioh_info, SD_ADMA_ErrStatus)));
+ sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+ for (retries = RETRIES_LARGE; retries; retries--)
+ if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
+ break;
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+ }
+ }
+
+ /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
+ if (cmd == SDIOH_CMD_53)
+ function = GFIELD(arg, CMD53_FUNCTION);
+ else if (cmd == SDIOH_CMD_52) {
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ */
+ if (GFIELD(arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR)
+ function = GFIELD(arg, CMD52_FUNCTION);
+ }
+ if (function) {
+ sd_trace(("%s: requesting abort for function %d after cmd %d\n",
+ __FUNCTION__, function, cmd));
+ sdstd_abort(sdioh_info, function);
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+
+ return ERROR;
+}
+
+#ifdef BCMINTERNAL
+extern SDIOH_API_RC
+sdioh_test_diag(sdioh_info_t *sd)
+{
+ sd_err(("%s: Implement me\n", __FUNCTION__));
+ return (0);
+}
+#endif /* BCMINTERNAL */
+
+/*
+ * Private/Static work routines
+ */
+static bool
+sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
+{
+ int retries = RETRIES_LARGE;
+ uchar regval;
+
+ if (!sd)
+ return TRUE;
+
+ sdstd_lock(sd);
+ /* Reset client card */
+ if (client_reset && (sd->adapter_slot != -1)) {
+ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
+ sd_err(("%s: Cannot write to card reg 0x%x\n",
+ __FUNCTION__, SDIOD_CCCR_IOABORT));
+ else
+ sd->card_rca = 0;
+ }
+
+ /* Reset host controller */
+ if (host_reset) {
+ regval = SFIELD(0, SW_RESET_ALL, 1);
+ sdstd_wreg8(sd, SD_SoftwareReset, regval);
+ do {
+ sd_trace(("%s: waiting for reset\n", __FUNCTION__));
+ } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
+ sdstd_unlock(sd);
+ return (FALSE);
+ }
+
+ /* A reset should reset bus back to 1 bit mode */
+ sd->sd_mode = SDIOH_MODE_SD1;
+ sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+ }
+ sdstd_unlock(sd);
+ return TRUE;
+}
+
+/* Disable device interrupt */
+void
+sdstd_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ sd->intmask &= ~CLIENT_INTR;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+ }
+}
+
+/* Enable device interrupt */
+void
+sdstd_devintr_on(sdioh_info_t *sd)
+{
+ ASSERT(sd->lockcount == 0);
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ if (sd->use_client_ints) {
+ if (sd->version < HOST_CONTR_VER_3) {
+ uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
+ sdstd_wreg16(sd, SD_IntrStatusEnable, status);
+ }
+
+ sd->intmask |= CLIENT_INTR;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+ }
+}
+
+#ifdef BCMSDYIELD
+/* Enable/disable other interrupts */
+void
+sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ if (err) {
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
+ }
+
+ sd->intmask |= norm;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+
+void
+sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ if (err) {
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+ }
+
+ sd->intmask &= ~norm;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+#endif /* BCMSDYIELD */
+
+static int
+sdstd_host_init(sdioh_info_t *sd)
+{
+ int num_slots, full_slot;
+ uint8 reg8;
+ uint32 card_ins;
+ int slot, first_bar = 0;
+ bool detect_slots = FALSE;
+#ifdef _WIN32
+ NDIS_PHYSICAL_ADDRESS bar;
+#else
+ uint bar;
+#endif
+
+ /* Check for Arasan ID */
+ if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
+ sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
+ detect_slots = TRUE;
+ /* Controller supports SDMA, so turn it on here. */
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
+ sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_BCM27XX;
+ detect_slots = FALSE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
+ sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
+ sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
+ __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
+ detect_slots = TRUE;
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
+ sd_info(("%s: JMicron Standard SDIO Host Controller\n",
+ __FUNCTION__));
+ sd->controller_type = SDIOH_TYPE_JMICRON;
+ detect_slots = TRUE;
+#ifdef BCMINTERNAL
+ } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JINVANI) {
+ sd_info(("%s: Found Jinvani Standard SDIO Host Controller\n", __FUNCTION__));
+ detect_slots = FALSE;
+ sd->controller_type = SDIOH_TYPE_JINVANI_GOLD;
+#endif /* BCMINTERNAL */
+ } else {
+ return ERROR;
+ }
+
+ /*
+ * Determine num of slots
+ * Search each slot
+ */
+
+ first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
+ num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
+ num_slots &= 7;
+ num_slots++; /* map bits to num slots according to spec */
+
+ /* XXX Since the sdio20h core does not present the proper SD_SlotInfo
+ * register at PCI config space offset 0x40, fake it here. Also,
+ * set the BAR0 window to point to the sdio20h core.
+ */
+ if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+ ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
+ sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
+ /* Set BAR0 Window to SDIOSTH core */
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
+
+ /* Set defaults particular to this controller. */
+ detect_slots = TRUE;
+ num_slots = 1;
+ first_bar = 0;
+
+ /* Controller supports ADMA2, so turn it on here. */
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ }
+
+ /* Map in each slot on the board and query it to see if a
+ * card is inserted. Use the first populated slot found.
+ */
+ if (sd->mem_space) {
+ sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+
+ full_slot = -1;
+
+ for (slot = 0; slot < num_slots; slot++) {
+/* XXX :Ugly define, is there a better way */
+#ifdef _WIN32
+ bar.HighPart = 0;
+ bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0
+ + (4*(slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
+ (int32)&bar, SDIOH_REG_WINSZ);
+#else
+ bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
+ (uintptr)bar, SDIOH_REG_WINSZ);
+#endif
+
+ sd->adapter_slot = -1;
+
+ if (detect_slots) {
+ card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
+ } else {
+ card_ins = TRUE;
+ }
+
+ if (card_ins) {
+ sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
+ if (full_slot < 0)
+ full_slot = slot;
+ } else {
+ sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
+ }
+
+ if (sd->mem_space) {
+ sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
+ sd->mem_space = NULL;
+ }
+ }
+
+ if (full_slot < 0) {
+ sd_err(("No slots on SDIO controller are populated\n"));
+ return -1;
+ }
+
+/* XXX :Ugly define, is there a better way */
+#ifdef _WIN32
+ bar.HighPart = 0;
+ bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (int32)&bar, SDIOH_REG_WINSZ);
+#else
+ bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+ sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
+#endif
+
+ sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
+ full_slot,
+ (full_slot + first_bar),
+ OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
+ sd->mem_space));
+
+ sd->adapter_slot = full_slot;
+
+ sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
+ switch (sd->version) {
+ case 0:
+ sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ case 1:
+ sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ case 2:
+ sd_err(("Host Controller version 3.0, Vendor Revision: 0x%02x\n",
+ sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+ break;
+ default:
+ sd_err(("%s: Host Controller version 0x%02x not supported.\n",
+ __FUNCTION__, sd->version));
+ break;
+ }
+
+ sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
+ /* MSB 32 bits of caps supported in sdio 3.0 */
+ sd->caps3 = sdstd_rreg(sd, SD_Capabilities3); /* Cache this for later use */
+ sd3_trace(("sd3: %s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
+ sd3_trace(("sd3: %s: caps3: 0x%x\n", __FUNCTION__, sd->caps3));
+ sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
+
+ sd_info(("%s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
+
+ sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+
+#if defined(BCMINTERNAL)
+ if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+ ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
+ sd_err(("* * * SDIO20H FPGA Build Date: 0x%04x\n", sdstd_rreg(sd, 0x110)));
+ }
+
+ if (GFIELD(sd->caps, CAP_MAXBLOCK) == 0x3) {
+ sd_info(("SD HOST CAPS: Max block size is INVALID\n"));
+ } else {
+ sd_info(("SD HOST CAPS: Max block size is %d bytes\n",
+ 512 << GFIELD(sd->caps, CAP_MAXBLOCK)));
+ }
+
+ sd_info(("SD HOST CAPS: 64-bit DMA is %ssupported.\n",
+ GFIELD(sd->caps, CAP_64BIT_HOST) ? "" : "not "));
+ sd_info(("SD HOST CAPS: Suspend/Resume is %ssupported.\n",
+ GFIELD(sd->caps, CAP_SUSPEND) ? "" : "not "));
+
+ sd_err(("SD HOST CAPS: SD Host supports "));
+ if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
+ sd_err(("3.3V"));
+ if (GFIELD(sd->curr_caps, CAP_CURR_3_3)) {
+ sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_3)));
+ }
+ }
+ if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
+ sd_err((", 3.0V"));
+ if (GFIELD(sd->curr_caps, CAP_CURR_3_0)) {
+ sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_0)));
+ }
+ }
+ if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
+ sd_err((", 1.8V"));
+ if (GFIELD(sd->curr_caps, CAP_CURR_1_8)) {
+ sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_1_8)));
+ }
+ }
+ sd_err(("\n"));
+#endif /* defined(BCMINTERNAL) */
+
+ sdstd_reset(sd, 1, 0);
+
+ /* Read SD4/SD1 mode */
+ if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
+ if (reg8 & SD4_MODE) {
+ sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
+ __FUNCTION__, reg8));
+ }
+ }
+
+ /* Default power on mode is SD1 */
+ sd->sd_mode = SDIOH_MODE_SD1;
+ sd->polled_mode = TRUE;
+ sd->host_init_done = TRUE;
+ sd->card_init_done = FALSE;
+ sd->adapter_slot = full_slot;
+
+ /* XXX: If sd_uhsimode is disabled, which means that, use the HC in SDIO 2.0 mode. */
+ if (sd_uhsimode == SD3CLKMODE_DISABLED) {
+ sd->version = HOST_CONTR_VER_2;
+ sd3_trace(("%s:forcing to SDIO HC 2.0\n", __FUNCTION__));
+ }
+
+ if (sd->version == HOST_CONTR_VER_3) {
+ /* read host ctrl 2 */
+ uint16 reg16 = 0;
+ sd3_trace(("sd3: %s: HC3: reading additional regs\n", __FUNCTION__));
+
+ reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
+
+ sd_info(("%s: HCtrl: 0x%x; HCtrl2:0x%x\n", __FUNCTION__, reg8, reg16));
+ BCM_REFERENCE(reg16);
+
+ /* if HC supports 1.8V and one of the SDR/DDR modes, hc uhci support is PRESENT */
+ if ((GFIELD(sd->caps, CAP_VOLT_1_8)) &&
+ (GFIELD(sd->caps3, CAP3_SDR50_SUP) ||
+ GFIELD(sd->caps3, CAP3_SDR104_SUP) ||
+ GFIELD(sd->caps3, CAP3_DDR50_SUP)))
+ sd->host_UHSISupported = 1;
+ }
+
+#ifdef BCMQT
+ {
+ uint32 intmask;
+
+ /* FIX: force interrupts with QT sdio20 host */
+ /* pci cw [expr $def(configbase) +0x95] 1 2 */
+ intmask = OSL_PCI_READ_CONFIG(sd->osh, PCI_INT_MASK, 4);
+ intmask |= 0x0200;
+ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_INT_MASK, 4, intmask);
+ }
+#endif
+ return (SUCCESS);
+}
+#define CMD5_RETRIES 200
+static int
+get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
+{
+ int retries, status;
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+ retries = CMD5_RETRIES;
+ do {
+ *cmd_rsp = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD5 failed\n", __FUNCTION__));
+ return status;
+ }
+ sdstd_cmd_getrsp(sd, cmd_rsp, 1);
+ if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
+ sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
+ } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
+ if (!retries)
+ return ERROR;
+
+ return (SUCCESS);
+}
+
+static int
+sdstd_client_init(sdioh_info_t *sd)
+{
+ uint32 cmd_arg, cmd_rsp;
+ int status;
+ uint8 fn_ints;
+ uint32 regdata;
+ uint16 powerstat = 0;
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+ /* Handy routine to dump capabilities. */
+ static char caps_buf[500];
+ parse_caps(sd->caps, caps_buf, 500);
+ sd_err((caps_buf));
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+ sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+ /* Clear any pending ints */
+ sdstd_wreg16(sd, SD_IntrStatus, 0x1fff);
+ sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
+
+ /* Enable both Normal and Error Status. This does not enable
+ * interrupts, it only enables the status bits to
+ * become 'live'
+ */
+
+ if (!sd->host_UHSISupported)
+ sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
+ else
+ {
+ /* INT_x interrupts, but DO NOT enable signalling [enable retuning
+ * will happen later]
+ */
+ sdstd_wreg16(sd, SD_IntrStatusEnable, 0x0fff);
+ }
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
+
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
+
+ if (sd->host_UHSISupported) {
+ /* when HC is started for SDIO 3.0 mode, start in lowest voltage mode first. */
+ powerstat = sdstd_start_power(sd, 1);
+ if (SDIO_OCR_READ_FAIL == powerstat) {
+ /* This could be because the device is 3.3V, and possible does
+ * not have sdio3.0 support. So, try in highest voltage
+ */
+ sd_err(("sdstd_start_power: legacy device: trying highest voltage\n"));
+ sd_err(("%s failed\n", __FUNCTION__));
+ return ERROR;
+ } else if (TRUE != powerstat) {
+ sd_err(("sdstd_start_power failed\n"));
+ return ERROR;
+ }
+ } else
+ /* XXX legacy driver: start in highest voltage mode first.
+ * CAUTION: trying to start a legacy dhd with sdio3.0HC and sdio3.0 device could
+ * burn the sdio3.0device if the device has started in 1.8V.
+ */
+ if (TRUE != sdstd_start_power(sd, 0)) {
+ sd_err(("sdstd_start_power failed\n"));
+ return ERROR;
+ }
+
+ if (sd->num_funcs == 0) {
+ sd_err(("%s: No IO funcs!\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ /* In SPI mode, issue CMD0 first */
+ if (sd->sd_mode == SDIOH_MODE_SPI) {
+ cmd_arg = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
+ != SUCCESS) {
+ sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
+ return status;
+ }
+ }
+
+ if (sd->sd_mode != SDIOH_MODE_SPI) {
+ uint16 rsp6_status;
+
+ /* Card is operational. Ask it to send an RCA */
+ cmd_arg = 0;
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
+ return status;
+ }
+
+ /* Verify the card status returned with the cmd response */
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
+ if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
+ GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
+ GFIELD(rsp6_status, RSP6STAT_ERROR)) {
+ sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
+ __FUNCTION__, rsp6_status));
+ return ERROR;
+ }
+
+ /* Save the Card's RCA */
+ sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
+ sd_info(("RCA is 0x%x\n", sd->card_rca));
+
+ if (rsp6_status)
+ sd_err(("raw status is 0x%x\n", rsp6_status));
+
+ /* Select the card */
+ cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
+ != SUCCESS) {
+ sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
+ return status;
+ }
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
+ sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
+ __FUNCTION__, cmd_rsp));
+ return ERROR;
+ }
+ }
+
+ /* Disable default/power-up device Card Detect (CD) pull up resistor on DAT3
+ * via CCCR bus interface control register. Set CD disable bit while leaving
+ * others alone.
+ */
+ if (sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata) != SUCCESS) {
+ sd_err(("Disabling card detect: read of device CCCR BICTRL register failed\n"));
+ return ERROR;
+ }
+ regdata |= BUS_CARD_DETECT_DIS;
+
+ if (sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata) != SUCCESS) {
+ sd_err(("Disabling card detect: write of device CCCR BICTRL register failed\n"));
+ return ERROR;
+ }
+
+ sdstd_card_enablefuncs(sd);
+
+ if (!sdstd_bus_width(sd, sd_sdmode)) {
+ sd_err(("sdstd_bus_width failed\n"));
+ return ERROR;
+ }
+
+ set_client_block_size(sd, 1, sd_f1_blocksize);
+ fn_ints = INTR_CTL_FUNC1_EN;
+
+ if (sd->num_funcs >= 2) {
+ /* XXX Device side can't handle 512 yet */
+ set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+ fn_ints |= INTR_CTL_FUNC2_EN;
+ }
+
+ /* Enable/Disable Client interrupts */
+ /* Turn on here but disable at host controller? */
+ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
+ (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
+ sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ if (sd_uhsimode != SD3CLKMODE_DISABLED) {
+ /* Switch to High-speed clocking mode if both host and device support it */
+ if (sdstd_3_clock_wrapper(sd) != SUCCESS) {
+ sd_err(("sdstd_3_clock_wrapper failed\n"));
+ return ERROR;
+ }
+ } else
+ {
+ if (sdstd_clock_wrapper(sd)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ }
+ sd->card_init_done = TRUE;
+
+ return SUCCESS;
+}
+
+static int
+sdstd_clock_wrapper(sdioh_info_t *sd)
+{
+ sd_trace(("%s:Enter\n", __FUNCTION__));
+ /* After configuring for High-Speed mode, set the desired clock rate. */
+ sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
+
+ if (FALSE == sdstd_start_clock(sd, (uint16)sd_divisor)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ return SUCCESS;
+}
+
+static int
+sdstd_3_clock_wrapper(sdioh_info_t *sd)
+{
+ int retclk = 0;
+ sd_info(("%s: Enter\n", __FUNCTION__));
+ if (sd->card_UHSI_voltage_Supported) {
+ /* check if clk config requested is supported by both host and target. */
+ retclk = sdstd_3_get_matching_uhsi_clkmode(sd, sd_uhsimode);
+
+ /* if no match for requested caps, try to get the max match possible */
+ if (retclk == -1) {
+ /* if auto enabled */
+ if (sd3_autoselect_uhsi_max == 1) {
+ retclk = sdstd_3_get_matching_uhsi_clkmode(sd, SD3CLKMODE_AUTO);
+ /* still NO match */
+ if (retclk == -1) {
+ /* NO match with HC and card capabilities. Now try the
+ * High speed/legacy mode if possible.
+ */
+
+ sd_err(("%s: Not able to set requested clock\n",
+ __FUNCTION__));
+ return ERROR;
+ }
+ } else {
+ /* means user doesn't want auto clock. So return ERROR */
+ sd_err(("%s: Not able to set requested clock, Try"
+ "auto mode\n", __FUNCTION__));
+ return ERROR;
+ }
+ }
+
+ if (retclk != -1) {
+ /* set the current clk to be selected clock */
+ sd_uhsimode = retclk;
+
+ if (BCME_OK != sdstd_3_set_highspeed_uhsi_mode(sd, sd_uhsimode)) {
+ sd_err(("%s: Not able to set requested clock\n", __FUNCTION__));
+ return ERROR;
+ }
+ } else {
+ /* try legacy mode */
+ if (SUCCESS != sdstd_clock_wrapper(sd)) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+ }
+ } else {
+ sd_info(("%s: Legacy Mode Clock\n", __FUNCTION__));
+ /* try legacy mode */
+ if (SUCCESS != sdstd_clock_wrapper(sd)) {
+ sd_err(("%s sdstd_clock_wrapper failed\n", __FUNCTION__));
+ return ERROR;
+ }
+ }
+ return SUCCESS;
+}
+
+int
+sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode)
+{
+ int status, lcount = 0, brr_count = 0;
+ uint16 val1 = 0, bufready = 0;
+ uint32 val2 = 0;
+ uint8 phase_info_local = 0;
+
+ sd3_trace(("sd3: %s: Enter\n", __FUNCTION__));
+ /* if (NOT SDR104) OR
+ * (SDR_50 AND sdr50_tuning_reqd is NOT enabled)
+ * return success, as tuning not reqd.
+ */
+ if (!sd->sd3_tuning_reqd) {
+ sd_info(("%s: Tuning NOT reqd!\n", __FUNCTION__));
+ return SUCCESS;
+ }
+
+ /* execute tuning procedure */
+
+ /* enable Buffer ready status. [donot enable the interrupt right now] */
+ /* Execute tuning */
+ sd_trace(("sd3: %s: Execute tuning\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+
+ do {
+ sd3_trace(("sd3: %s: cmd19 issue\n", __FUNCTION__));
+ /* Issue cmd19 */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_19, 0))
+ != SUCCESS) {
+ sd_err(("%s: CMD19 failed\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return status;
+ }
+
+ /* wait for buffer read ready */
+ brr_count = 0;
+ do {
+ bufready = sdstd_rreg16(sd, SD_IntrStatus);
+
+ if (GFIELD(bufready, INTSTAT_BUF_READ_READY))
+ break;
+
+ /* delay after checking bufready becuase INTSTAT_BUF_READ_READY
+ might have been most likely set already in the first check
+ */
+ OSL_DELAY(1);
+ } while (++brr_count < CLKTUNING_MAX_BRR_RETRIES);
+
+ /* buffer read ready timedout */
+ if (brr_count == CLKTUNING_MAX_BRR_RETRIES) {
+ sd_err(("%s: TUNINGFAILED: BRR response timedout!\n",
+ __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return ERROR;
+ }
+
+ /* In response to CMD19 card will send 64 magic bytes.
+ * Current Aizyc HC h/w doesn't auto clear those bytes.
+ * So read 64 bytes send by card.
+ * Aizyc need to implement in hw to do an auto clear.
+ */
+ if (sd3_sw_read_magic_bytes == TRUE)
+ {
+ uint8 l_cnt_1 = 0;
+ uint32 l_val_1 = 0;
+ for (l_cnt_1 = 0; l_cnt_1 < 16; l_cnt_1++) {
+ l_val_1 = sdstd_rreg(sd, SD_BufferDataPort0);
+ sd_trace(("%s:l_val_1 = 0x%x", __FUNCTION__, l_val_1));
+ }
+ BCM_REFERENCE(l_val_1);
+ }
+
+ /* clear BuffReadReady int */
+ bufready = SFIELD(bufready, INTSTAT_BUF_READ_READY, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, bufready);
+
+ /* wait before continuing */
+ /* OSL_DELAY(PER_TRY_TUNING_DELAY_MS * 1000); */ /* Not required */
+
+ /* check execute tuning bit */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ if (!GFIELD(val1, HOSTCtrl2_EXEC_TUNING)) {
+ /* done tuning, break from loop */
+ break;
+ }
+
+ /* max tuning iterations exceeded */
+ if (lcount++ > MAX_TUNING_ITERS) {
+ sd_err(("%s: TUNINGFAILED: Max tuning iterations"
+ "exceeded!\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return ERROR;
+ }
+ } while (1);
+
+ val2 = sdstd_rreg(sd, SD3_Tuning_Info_Register);
+ phase_info_local = ((val2>>15)& 0x7);
+ sd_info(("Phase passed info: 0x%x\n", (val2>>8)& 0x3F));
+ sd_info(("Phase selected post tune: 0x%x\n", phase_info_local));
+
+ if (phase_info_local > SDSTD_MAX_TUNING_PHASE) {
+ sd_err(("!!Phase selected:%x\n", phase_info_local));
+ }
+
+ /* check sampling clk select */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ if (!GFIELD(val1, HOSTCtrl2_SAMPCLK_SEL)) {
+ /* error in selecting clk */
+ sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__));
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
+ val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ return ERROR;
+ }
+/* done: */
+ sd_info(("%s: TUNING Success!\n", __FUNCTION__));
+ return SUCCESS;
+}
+
+void
+sdstd_3_enable_retuning_int(sdioh_info_t *sd)
+{
+ uint16 raw_int;
+ unsigned long flags;
+
+ sdstd_os_lock_irqsave(sd, &flags);
+ raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int | HC_INTR_RETUNING));
+ /* Enable retuning status */
+ raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int | HC_INTR_RETUNING));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+}
+
+void
+sdstd_3_disable_retuning_int(sdioh_info_t *sd)
+{
+ uint16 raw_int;
+ unsigned long flags;
+
+ sdstd_os_lock_irqsave(sd, &flags);
+ sd->intmask &= ~HC_INTR_RETUNING;
+ raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
+ sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int & (~HC_INTR_RETUNING)));
+ /* Disable retuning status */
+ raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int & (~HC_INTR_RETUNING)));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+}
+
+bool
+sdstd_3_is_retuning_int_set(sdioh_info_t *sd)
+{
+ uint16 raw_int;
+
+ raw_int = sdstd_rreg16(sd, SD_IntrStatus);
+
+ if (GFIELD(raw_int, INTSTAT_RETUNING_INT))
+ return TRUE;
+
+ return FALSE;
+}
+
+/*
+ Assumption: sd3ClkMode is checked to be present in both host/card
+ capabilities before entering this function. VALID values for sd3ClkMode
+ in this function: SD3CLKMODE_2, 3, 4 [0 and 1 NOT supported as
+ they are legacy] For that, need to call
+ sdstd_3_get_matching_uhsi_clkmode()
+*/
+static int
+sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode)
+{
+ uint32 drvstrn;
+ int status;
+ uint8 hc_reg8;
+ uint16 val1 = 0, presetval = 0;
+ uint32 regdata;
+
+ sd3_trace(("sd3: %s:enter:clkmode:%d\n", __FUNCTION__, sd3ClkMode));
+
+ hc_reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+
+ if (HOST_SDR_UNSUPP == sd->global_UHSI_Supp) {
+ sd_err(("%s:Trying to set clk with unsupported global support\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* get [double check, as this is already done in
+ sdstd_3_get_matching_uhsi_clkmode] drvstrn
+ */
+ if (!sdstd_3_get_matching_drvstrn(sd, sd3ClkMode, &drvstrn, &presetval)) {
+ sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
+ "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
+ return BCME_SDIO_ERROR;
+ }
+
+ /* also set driver type select in CCCR */
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, drvstrn)) != BCME_OK) {
+ sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in card Failed!\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ /* ********** change Bus speed select in device */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != SUCCESS) {
+ sd_err(("%s:FAILED 1\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+ sd_info(("Attempting to change BSS.current val:0x%x\n", regdata));
+
+ if (regdata & SDIO_SPEED_SHS) {
+ sd_info(("Device supports High-Speed mode.\n"));
+ /* clear existing BSS */
+ regdata &= ~0xE;
+
+ regdata |= (sd3ClkMode << 1);
+
+ sd_info(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ sd_err(("%s:FAILED 2\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != BCME_OK) {
+ sd_err(("%s:FAILED 3\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ sd_info(("Read %08x from Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
+ }
+ else {
+ sd_err(("Device does not support High-Speed Mode.\n"));
+ }
+
+ /* SD Clock Enable = 0 */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
+
+ /* set to HighSpeed mode */
+ /* TBD: is these to change SD_HostCntrl reqd for UHSI? */
+ hc_reg8 = SFIELD(hc_reg8, HOST_HI_SPEED_EN, 1);
+ sdstd_wreg8(sd, SD_HostCntrl, hc_reg8);
+
+ /* set UHS Mode select in HC2 and also set preset */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_UHSMODE_SEL, sd3ClkMode);
+ if (TRUE != sd3_sw_override1) {
+ val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 1);
+ } else {
+ /* set hC registers manually using the retreived values */
+ /* *set drvstrn */
+ val1 = SFIELD(val1, HOSTCtrl2_DRIVSTRENGTH_SEL,
+ GFIELD(presetval, PRESET_DRIVR_SELECT));
+ val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 0);
+ }
+
+ /* finally write Hcontrol2 */
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+
+ sd_err(("%s:HostCtrl2 final value:0x%x\n", __FUNCTION__, val1));
+
+ /* start clock : clk will be enabled inside. */
+ if (FALSE == sdstd_start_clock(sd, GFIELD(presetval, PRESET_CLK_DIV))) {
+ sd_err(("sdstd_start_clock failed\n"));
+ return ERROR;
+ }
+
+ /* execute first tuning procedure */
+ if (!sd3_sw_override1) {
+ if (SD3_TUNING_REQD(sd, sd3ClkMode)) {
+ sd_err(("%s: Tuning start..\n", __FUNCTION__));
+ sd->sd3_tuning_reqd = TRUE;
+ /* TBD: first time: enabling INT's could be problem? */
+ sdstd_3_start_tuning(sd);
+ }
+ else
+ sd->sd3_tuning_reqd = FALSE;
+ }
+
+ return BCME_OK;
+}
+
+/* Check & do tuning if required */
+void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param)
+{
+ int retries = 0;
+
+ if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) {
+ sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__));
+ if (tuning_param == CHECK_TUNING_PRE_DATA) {
+ if (sd->sd3_tun_state == TUNING_ONGOING) {
+ retries = RETRIES_SMALL;
+ /* check if tuning is already going on */
+ while ((GFIELD(sdstd_rreg(sd, SD3_HostCntrl2),
+ HOSTCtrl2_EXEC_TUNING)) && retries--) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Tuning to complete\n",
+ __FUNCTION__));
+ }
+
+ if (!retries) {
+ sd_err(("%s: Tuning wait timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ }
+ } else if (sd->sd3_tun_state == TUNING_START) {
+ /* check and start tuning if required. */
+ sd3_trace(("sd3 : %s : Doing Tuning before Data Transfer\n",
+ __FUNCTION__));
+ sdstd_3_start_tuning(sd);
+ }
+ } else if (tuning_param == CHECK_TUNING_POST_DATA) {
+ if (sd->sd3_tun_state == TUNING_START_AFTER_DAT) {
+ sd3_trace(("sd3: %s: tuning start\n", __FUNCTION__));
+ /* check and start tuning if required. */
+ sdstd_3_start_tuning(sd);
+ }
+ }
+ }
+}
+/* Need to run this function in interrupt-disabled context */
+bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd)
+{
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ /* if already initiated, just return without anything */
+ if ((sd->sd3_tun_state == TUNING_START) ||
+ (sd->sd3_tun_state == TUNING_ONGOING) ||
+ (sd->sd3_tun_state == TUNING_START_AFTER_DAT)) {
+ /* do nothing */
+ return FALSE;
+ }
+
+ if (sd->sd3_dat_state == DATA_TRANSFER_IDLE) {
+ sd->sd3_tun_state = TUNING_START; /* tuning to be started by the tasklet */
+ return TRUE;
+ } else {
+ /* tuning to be started after finishing the existing data transfer */
+ sd->sd3_tun_state = TUNING_START_AFTER_DAT;
+ }
+ return FALSE;
+}
+
+int sdstd_3_get_data_state(sdioh_info_t *sd)
+{
+ return sd->sd3_dat_state;
+}
+
+void sdstd_3_set_data_state(sdioh_info_t *sd, int state)
+{
+ sd->sd3_dat_state = state;
+}
+
+int sdstd_3_get_tune_state(sdioh_info_t *sd)
+{
+ return sd->sd3_tun_state;
+}
+
+void sdstd_3_set_tune_state(sdioh_info_t *sd, int state)
+{
+ sd->sd3_tun_state = state;
+}
+
+uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd)
+{
+ if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) {
+ return GFIELD(sd->caps3, CAP3_RETUNING_TC);
+ } else {
+ return (uint8)sd_tuning_period;
+ }
+}
+
+uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd)
+{
+ return sd_uhsimode;
+}
+
+/* check, to see if the card supports driver_type corr to the driver_type
+ in preset value, which will be selected by requested UHSI mode
+ input:
+ clk mode: valid values: SD3CLKMODE_2_SDR50, SD3CLKMODE_3_SDR104,
+ SD3CLKMODE_4_DDR50, SD3CLKMODE_AUTO
+ outputs:
+ return_val: TRUE; if a matching drvstrn for the given clkmode is
+ found in both HC and card. otherwise, FALSE.
+ [other outputs below valid ONLY if return_val is TRUE]
+ drvstrn : driver strength read from CCCR.
+ presetval: value of preset reg, corr to the clkmode.
+ */
+static bool
+sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, int sd3_requested_clkmode,
+ uint32 *drvstrn, uint16 *presetval)
+{
+ int status;
+ uint8 presetreg;
+ uint8 cccr_reqd_dtype_mask = 1;
+
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
+ /* CARD: get the card driver strength from cccr */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, drvstrn)) != BCME_OK) {
+ sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
+ "Failed!\n", __FUNCTION__));
+ return FALSE;
+ }
+ if (TRUE != sd3_sw_override1) {
+ /* HOSTC: get the addr of preset register indexed by the clkmode */
+ presetreg = SD3_PresetValStart +
+ (2*sd3_requested_clkmode + 6);
+ *presetval = sdstd_rreg16(sd, presetreg);
+ } else {
+ /* Note: +3 for mapping between SD3CLKMODE_xxx and presetval_sw_table */
+ *presetval = presetval_sw_table[sd3_requested_clkmode + 3];
+ }
+ sd_err(("%s:reqCLK: %d, presetval: 0x%x\n",
+ __FUNCTION__, sd3_requested_clkmode, *presetval));
+
+ cccr_reqd_dtype_mask <<= GFIELD(*presetval, PRESET_DRIVR_SELECT);
+
+ /* compare/match */
+ if (!(cccr_reqd_dtype_mask & GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))) {
+ sd_err(("%s:cccr_reqd_dtype_mask and SDIO_BUS_DRVR_TYPE_CAP"
+ "not matching!:reqd:0x%x, cap:0x%x\n", __FUNCTION__,
+ cccr_reqd_dtype_mask, GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP)));
+ return FALSE;
+ } else {
+ /* check if drive strength override is required. If so, first setit */
+ if (*dhd_sdiod_uhsi_ds_override != DRVSTRN_IGNORE_CHAR) {
+ int ds_offset = 0;
+ uint32 temp = 0;
+
+ /* drvstrn to reflect the preset val: this is default */
+ *drvstrn = GFIELD(*presetval, PRESET_DRIVR_SELECT);
+
+ /* now check override */
+ ds_offset = (((int)DRVSTRN_MAX_CHAR -
+ (int)(*dhd_sdiod_uhsi_ds_override)));
+ if ((ds_offset >= 0) && (ds_offset <= MAX_DTS_INDEX)) {
+ ds_offset = MAX_DTS_INDEX - ds_offset;
+ sd_err(("%s:Drive strength override: %c, offset: "
+ "%d, val: %d\n", __FUNCTION__,
+ *dhd_sdiod_uhsi_ds_override,
+ ds_offset, DTS_vals[ds_offset]));
+ temp = SFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_SEL,
+ DTS_vals[ds_offset]);
+ sd_err(("%s:DrvStrn orig: 0x%x, modif: 0x%x\n",
+ __FUNCTION__, *drvstrn, temp));
+ *drvstrn = temp;
+ } else {
+ /* else case is default: use preset val */
+ sd_err(("%s:override invalid: DrvStrn is from "
+ "preset: 0x%x\n",
+ __FUNCTION__, *drvstrn));
+ }
+ } else {
+ sd_err(("%s:DrvStrn is from preset: 0x%x\n",
+ __FUNCTION__, *drvstrn));
+ }
+ }
+ } else {
+ /* TBD check for sd3_requested_clkmode : -1 also. */
+ sd_err(("%s: Automode not supported!\n", __FUNCTION__));
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/* Returns a matching UHSI clk speed is found. If not, returns -1.
+ Also, if sd3_requested_clkmode is -1, finds the closest max match clk and returns.
+ */
+static int
+sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, int sd3_requested_clkmode)
+{
+ uint32 card_val_uhsisupp;
+ uint8 speedmask = 1;
+ uint32 drvstrn;
+ uint16 presetval;
+ int status;
+
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ sd->global_UHSI_Supp = HOST_SDR_UNSUPP;
+
+ /* for legacy/25MHz/50MHz bus speeds, no checks done here */
+ if ((sd3_requested_clkmode == SD3CLKMODE_0_SDR12) ||
+ (sd3_requested_clkmode == SD3CLKMODE_1_SDR25)) {
+ sd->global_UHSI_Supp = HOST_SDR_12_25;
+ return sd3_requested_clkmode;
+ }
+ /* get cap of card */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_UHSI_SUPPORT,
+ 1, &card_val_uhsisupp)) != BCME_OK) {
+ sd_err(("%s:SDIOD_CCCR_UHSI_SUPPORT query failed!\n", __FUNCTION__));
+ return -1;
+ }
+ sd_info(("%s:Read %08x from Card at %08x\n", __FUNCTION__,
+ card_val_uhsisupp, SDIOD_CCCR_UHSI_SUPPORT));
+
+ if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
+ /* Note: it is assumed that, following are executed when (sd3ClkMode >= 2) */
+ speedmask <<= (sd3_requested_clkmode - SD3CLKMODE_2_SDR50);
+
+ /* check first about 3.0 HS CLK modes */
+ if (!(GFIELD(sd->caps3, CAP3_30CLKCAP) & speedmask)) {
+ sd_err(("%s:HC does not support req 3.0 UHSI mode."
+ "requested:%d; capable:0x%x\n", __FUNCTION__,
+ sd3_requested_clkmode, GFIELD(sd->caps3, CAP3_30CLKCAP)));
+ return -1;
+ }
+
+ /* check first about 3.0 CARD CLK modes */
+ if (!(GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP) & speedmask)) {
+ sd_err(("%s:Card does not support req 3.0 UHSI mode. requested:%d;"
+ "capable:0x%x\n", __FUNCTION__, sd3_requested_clkmode,
+ GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP)));
+ return -1;
+ }
+
+ /* check, to see if the card supports driver_type corr to the
+ driver_type in preset value, which will be selected by
+ requested UHSI mode
+ */
+ if (!sdstd_3_get_matching_drvstrn(sd, sd3_requested_clkmode,
+ &drvstrn, &presetval)) {
+ sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
+ "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
+ return -1;
+ }
+ /* success path. change the support variable accordingly */
+ sd->global_UHSI_Supp = HOST_SDR_50_104_DDR;
+ return sd3_requested_clkmode;
+ } else {
+ /* auto clk selection: get the highest clock capable by both card and HC */
+/* TBD TOBE DONE */
+/* sd->global_UHSI_Supp = TRUE; on success */
+ return -1;
+ }
+}
+
+static int
+sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd)
+{
+ int status;
+ uint32 cmd_rsp = 0, presst;
+ uint16 val1 = 0;
+
+ sd3_trace(("sd3: %s:\n", __FUNCTION__));
+
+ /* Issue cmd11 */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_11, 0))
+ != SUCCESS) {
+ sd_err(("%s: CMD11 failed\n", __FUNCTION__));
+ return status;
+ }
+
+ /* check response */
+ sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+ if (
+ GFIELD(cmd_rsp, RSP1_ERROR) || /* bit 19 */
+ GFIELD(cmd_rsp, RSP1_ILLEGAL_CMD) || /* bit 22 */
+ GFIELD(cmd_rsp, RSP1_COM_CRC_ERROR) || /* bit 23 */
+ GFIELD(cmd_rsp, RSP1_CARD_LOCKED) /* bit 25 */ ) {
+ sd_err(("%s: FAIL:CMD11: cmd_resp:0x%x\n", __FUNCTION__, cmd_rsp));
+ return ERROR;
+ }
+
+ /* SD Clock Enable = 0 */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
+
+ /* check DAT[3..0] using Present State Reg. If not 0, error */
+ presst = sdstd_rreg(sd, SD_PresentState);
+ if (0 != GFIELD(presst, PRES_DAT_SIGNAL)) {
+ sd_err(("%s: FAIL: PRESTT:0x%x\n", __FUNCTION__, presst));
+ return ERROR;
+ }
+
+ /* turn 1.8V sig enable in HC2 */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+
+ /* wait 5ms */
+ OSL_DELAY(5000);
+
+ /* check 1.8V sig enable in HC2. if cleared, error */
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+
+ if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) {
+ sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1));
+ return ERROR;
+ }
+
+ /* SD Clock Enable = 1 */
+ val1 = sdstd_rreg16(sd, SD_ClockCntrl);
+ sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4);
+
+ /* wait 1ms */
+ OSL_DELAY(1000);
+
+ /* check DAT[3..0] using Present State Reg. If not 0b1111, error */
+ presst = sdstd_rreg(sd, SD_PresentState);
+ if (0xf != GFIELD(presst, PRES_DAT_SIGNAL)) {
+ sd_err(("%s: FAIL: PRESTT_FINAL:0x%x\n", __FUNCTION__, presst));
+ return ERROR;
+ }
+
+ return (SUCCESS);
+}
+
+static int
+sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
+{
+ uint32 regdata;
+ int status;
+ uint8 reg8;
+
+ uint32 drvstrn;
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+
+#ifdef BCMINTERNAL
+ /* The Jinvani SD Gold Host forces the highest clock rate in high-speed mode */
+ /* Only enable high-speed mode if the SD clock divisor is 1. */
+ if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) {
+ if (sd_divisor != 1) {
+ HSMode = FALSE;
+ }
+ }
+#endif /* BCMINTERNAL */
+
+ if (HSMode == TRUE) {
+ if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
+ sd_err(("Host Controller does not support hi-speed mode.\n"));
+ return BCME_ERROR;
+ }
+
+ sd_info(("Attempting to enable High-Speed mode.\n"));
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != SUCCESS) {
+ return BCME_SDIO_ERROR;
+ }
+ if (regdata & SDIO_SPEED_SHS) {
+ sd_info(("Device supports High-Speed mode.\n"));
+
+ regdata |= SDIO_SPEED_EHS;
+
+ sd_info(("Writing %08x to Card at %08x\n",
+ regdata, SDIOD_CCCR_SPEED_CONTROL));
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return BCME_SDIO_ERROR;
+ }
+
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != BCME_OK) {
+ return BCME_SDIO_ERROR;
+ }
+
+ sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
+
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
+
+ sd_err(("High-speed clocking mode enabled.\n"));
+ }
+ else {
+ sd_err(("Device does not support High-Speed Mode.\n"));
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+ }
+ } else {
+ /* Force off device bit */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, &regdata)) != BCME_OK) {
+ return status;
+ }
+ if (regdata & SDIO_SPEED_EHS) {
+ regdata &= ~SDIO_SPEED_EHS;
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+ 1, regdata)) != BCME_OK) {
+ return status;
+ }
+ }
+
+ sd_err(("High-speed clocking mode disabled.\n"));
+ reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+ }
+
+ if ((sd->host_UHSISupported) && (sd->card_UHSI_voltage_Supported)) {
+ /* also set the default driver strength in the card/HC [this is reqd because,
+ if earlier we selected any other drv_strn, we need to reset it]
+ */
+ /* get the card driver strength from cccr */
+ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, &drvstrn)) != BCME_OK) {
+ sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
+ "Failed!\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+
+ /* reset card drv strn */
+ drvstrn = SFIELD(drvstrn, SDIO_BUS_DRVR_TYPE_SEL, 0);
+
+ /* set card drv strn */
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
+ 1, drvstrn)) != BCME_OK) {
+ sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in"
+ "card Failed!\n", __FUNCTION__));
+ return BCME_SDIO_ERROR;
+ }
+ }
+
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+ return BCME_OK;
+}
+
+/* Select DMA Mode:
+ * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
+ * Otherwise, pick the selected mode if supported.
+ * If not supported, use PIO mode.
+ */
+static int
+sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
+{
+ uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
+ int8 prev_dma_mode = sd->sd_dma_mode;
+
+ switch (prev_dma_mode) {
+ case DMA_MODE_AUTO:
+ sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
+ __FUNCTION__));
+ if (GFIELD(sd->caps, CAP_ADMA2)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ dma_sel_bits = SDIOH_ADMA2_MODE;
+ } else if (GFIELD(sd->caps, CAP_ADMA1)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA1;
+ dma_sel_bits = SDIOH_ADMA1_MODE;
+ } else if (GFIELD(sd->caps, CAP_DMA)) {
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_NONE:
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ case DMA_MODE_SDMA:
+ if (GFIELD(sd->caps, CAP_DMA)) {
+ sd->sd_dma_mode = DMA_MODE_SDMA;
+ } else {
+ sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA1:
+ if (GFIELD(sd->caps, CAP_ADMA1)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA1;
+ dma_sel_bits = SDIOH_ADMA1_MODE;
+ } else {
+ sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA2:
+ if (GFIELD(sd->caps, CAP_ADMA2)) {
+ sd->sd_dma_mode = DMA_MODE_ADMA2;
+ dma_sel_bits = SDIOH_ADMA2_MODE;
+ } else {
+ sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ }
+ break;
+ case DMA_MODE_ADMA2_64:
+ sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ default:
+ sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
+ prev_dma_mode));
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ break;
+ }
+
+ /* clear SysAddr, only used for SDMA */
+ sdstd_wreg(sd, SD_SysAddr, 0);
+
+ sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+ reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+ sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
+
+ return BCME_OK;
+}
+
+#ifdef BCMDBG
+void
+print_regs(sdioh_info_t *sd)
+{
+ uint8 reg8 = 0;
+ uint16 reg16 = 0;
+ uint32 reg32 = 0;
+ uint8 presetreg;
+ int i;
+
+ reg8 = sdstd_rreg8(sd, SD_BlockSize);
+ printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_BlockCount);
+ printf("REGS: SD_BlockCount [006h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_BlockSize);
+ printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_TransferMode);
+ printf("REGS: SD_TransferMode [00Ch]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+ printf("REGS: SD_HostCntrl [028h]:0x%x\n", reg8);
+
+ reg32 = sdstd_rreg(sd, SD_PresentState);
+ printf("REGS: SD_PresentState [024h]:0x%x\n", reg32);
+
+ reg8 = sdstd_rreg8(sd, SD_PwrCntrl);
+ printf("REGS: SD_PwrCntrl [029h]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_BlockGapCntrl);
+ printf("REGS: SD_BlockGapCntrl [02Ah]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_WakeupCntrl);
+ printf("REGS: SD_WakeupCntrl [02Bh]:0x%x\n", reg8);
+
+ reg16 = sdstd_rreg16(sd, SD_ClockCntrl);
+ printf("REGS: SD_ClockCntrl [02Ch]:0x%x\n", reg16);
+
+ reg8 = sdstd_rreg8(sd, SD_TimeoutCntrl);
+ printf("REGS: SD_TimeoutCntrl [02Eh]:0x%x\n", reg8);
+
+ reg8 = sdstd_rreg8(sd, SD_SoftwareReset);
+ printf("REGS: SD_SoftwareReset [02Fh]:0x%x\n", reg8);
+
+ reg16 = sdstd_rreg16(sd, SD_IntrStatus);
+ printf("REGS: SD_IntrStatus [030h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+ printf("REGS: SD_ErrorIntrStatus [032h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ printf("REGS: SD_IntrStatusEnable [034h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ printf("REGS: SD_ErrorIntrStatusEnable [036h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_IntrSignalEnable);
+ printf("REGS: SD_IntrSignalEnable [038h]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_ErrorIntrSignalEnable);
+ printf("REGS: SD_ErrorIntrSignalEnable [03Ah]:0x%x\n", reg16);
+
+ reg32 = sdstd_rreg(sd, SD_Capabilities);
+ printf("REGS: SD_Capabilities [040h]:0x%x\n", reg32);
+
+ reg32 = sdstd_rreg(sd, SD_MaxCurCap);
+ printf("REGS: SD_MaxCurCap [04Ah]:0x%x\n", reg32);
+
+ reg32 = sdstd_rreg(sd, SD_Capabilities3);
+ printf("REGS: SD_Capabilities3 [044h]:0x%x\n", reg32);
+
+ reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ printf("REGS: SD3_HostCntrl2 [03Eh]:0x%x\n", reg16);
+
+ for (i = 0; i < 8; i++) {
+ presetreg = SD3_PresetValStart + i*2;
+ printf("REGS: Presetvalreg:ix[%d]:0x%x, val=0x%x\n", i,
+ presetreg, sdstd_rreg16(sd, presetreg));
+ }
+
+ reg16 = sdstd_rreg16(sd, SD_SlotInterruptStatus);
+ printf("REGS: SD_SlotInterruptStatus [0FCh]:0x%x\n", reg16);
+
+ reg16 = sdstd_rreg16(sd, SD_HostControllerVersion);
+ printf("REGS: SD_HostControllerVersion [0FEh]:0x%x\n", reg16);
+}
+#endif /* BCMDBG */
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static int
+parse_state(uint32 state, char *buf, int len)
+{
+ char *data = buf;
+
+ sd_err(("Parsing state 0x%x\n", state));
+ if (!len) {
+ return (0);
+ }
+
+ data += sprintf(data, "cmd_inhibit %d\n", GFIELD(state, PRES_CMD_INHIBIT));
+ data += sprintf(data, "dat_inhibit %d\n", GFIELD(state, PRES_DAT_INHIBIT));
+ data += sprintf(data, "dat_busy %d\n", GFIELD(state, PRES_DAT_BUSY));
+ data += sprintf(data, "write_active %d\n", GFIELD(state, PRES_WRITE_ACTIVE));
+ data += sprintf(data, "read_active %d\n", GFIELD(state, PRES_READ_ACTIVE));
+ data += sprintf(data, "write_data_rdy %d\n", GFIELD(state, PRES_WRITE_DATA_RDY));
+ data += sprintf(data, "read_data_rdy %d\n", GFIELD(state, PRES_READ_DATA_RDY));
+ data += sprintf(data, "card_present %d\n", GFIELD(state, PRES_CARD_PRESENT));
+ data += sprintf(data, "card_stable %d\n", GFIELD(state, PRES_CARD_STABLE));
+ data += sprintf(data, "card_present_raw %d\n", GFIELD(state, PRES_CARD_PRESENT_RAW));
+ data += sprintf(data, "write_enabled %d\n", GFIELD(state, PRES_WRITE_ENABLED));
+ data += sprintf(data, "cmd_signal %d\n", GFIELD(state, PRES_CMD_SIGNAL));
+
+ return (data - buf);
+}
+
+static int
+parse_caps(uint32 cap, char *buf, int len)
+{
+ int block = 0xbeef;
+ char *data = buf;
+
+ data += sprintf(data, "TimeOut Clock Freq:\t%d\n", GFIELD(cap, CAP_TO_CLKFREQ));
+ data += sprintf(data, "TimeOut Clock Unit:\t%d\n", GFIELD(cap, CAP_TO_CLKUNIT));
+ data += sprintf(data, "Base Clock:\t\t%d\n", GFIELD(cap, CAP_BASECLK));
+ switch (GFIELD(cap, CAP_MAXBLOCK)) {
+ case 0: block = 512; break;
+ case 1: block = 1024; break;
+ case 2: block = 2048; break;
+ case 3: block = 0; break;
+ }
+ data += sprintf(data, "Max Block Size:\t\t%d\n", block);
+ data += sprintf(data, "Support High Speed:\t%d\n", GFIELD(cap, CAP_HIGHSPEED));
+ data += sprintf(data, "Support DMA:\t\t%d\n", GFIELD(cap, CAP_DMA));
+ data += sprintf(data, "Support Suspend:\t%d\n", GFIELD(cap, CAP_SUSPEND));
+ data += sprintf(data, "Support 3.3 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_3));
+ data += sprintf(data, "Support 3.0 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_0));
+ data += sprintf(data, "Support 1.8 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_1_8));
+ return (data - buf);
+}
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+/* XXX Per SDIO Host Controller Spec section 3.2.1
+ Note: for 2.x HC, new_sd_divisor should be a power of 2, but for 3.0
+ HC, new_sd_divisor should be a multiple of 2.
+*/
+bool
+sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
+{
+ uint rc, count;
+ uint16 divisor;
+ uint16 regdata;
+ uint16 val1;
+
+ sd3_trace(("%s: starting clk\n", __FUNCTION__));
+ /* turn off HC clock */
+ sdstd_wreg16(sd, SD_ClockCntrl,
+ sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
+
+ /* Set divisor */
+ if (sd->host_UHSISupported) {
+#ifdef BCMDBG
+ if ((new_sd_divisor != 1) && /* 1 is a valid value */
+ ((new_sd_divisor & (0x1)) || /* check for multiple of 2 */
+ (new_sd_divisor == 0) ||
+ (new_sd_divisor > 0x3ff))) {
+ sd_err(("3.0: Invalid clock divisor target: %d\n", new_sd_divisor));
+ return FALSE;
+ }
+#endif
+ divisor = (new_sd_divisor >> 1);
+ } else
+ {
+#ifdef BCMDBG
+ if ((new_sd_divisor & (new_sd_divisor-1)) ||
+ (new_sd_divisor == 0)) {
+ sd_err(("Invalid clock divisor target: %d\n", new_sd_divisor));
+ return FALSE;
+ }
+#endif
+ /* new logic: if divisor > 256, restrict to 256 */
+ if (new_sd_divisor > 256)
+ new_sd_divisor = 256;
+ divisor = (new_sd_divisor >> 1) << 8;
+ }
+#ifdef BCMINTERNAL
+ if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) {
+ divisor = (new_sd_divisor >> 2) << 8;
+ }
+#endif /* BCMINTERNAL */
+
+ sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+ if (sd->host_UHSISupported) {
+ /* *get preset value and shift so that.
+ * bits 0-7 are in 15-8 and 9-8 are in 7-6 of clkctrl
+ */
+ val1 = divisor << 2;
+ val1 &= 0x0ffc;
+ val1 |= divisor >> 8;
+ val1 <<= 6;
+ printf("divisor:%x;val1:%x\n", divisor, val1);
+ sdstd_mod_reg16(sd, SD_ClockCntrl, 0xffC0, val1);
+ } else
+ {
+ sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
+ }
+
+ sd_err(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
+ new_sd_divisor, divisor));
+ if (new_sd_divisor > 0)
+ sd_err(("%s:now, divided clk is: %d Hz\n",
+ __FUNCTION__, GFIELD(sd->caps, CAP_BASECLK)*1000000/new_sd_divisor));
+ else
+ sd_err(("Using Primary Clock Freq of %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
+ sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
+ if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
+ ((50 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
+ ((48 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
+ ((33 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 31) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((31 % new_sd_divisor) ? (31000 / new_sd_divisor) : (31 / new_sd_divisor)),
+ ((31 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 8) {
+ sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+ ((8 % new_sd_divisor) ? (8000 / new_sd_divisor) : (8 / new_sd_divisor)),
+ ((8 % new_sd_divisor) ? "KHz" : "MHz")));
+ } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
+ /* XXX - BCM 27XX Standard Host Controller returns 0 for CLKFREQ */
+ } else {
+ sd_err(("Need to determine divisor for %d MHz clocks\n",
+ GFIELD(sd->caps, CAP_BASECLK)));
+ sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
+ return (FALSE);
+ }
+
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
+
+ /* Wait for clock to stabilize */
+ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+ count = 0;
+ while (!rc) {
+ OSL_DELAY(1);
+ sd_info(("Waiting for clock to become stable 0x%x\n", rc));
+ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+ count++;
+ if (count > 10000) {
+ sd_err(("%s:Clocks failed to stabilize after %u attempts\n",
+ __FUNCTION__, count));
+ return (FALSE);
+ }
+ }
+ /* Turn on clock */
+ sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
+
+ OSL_DELAY(20);
+
+ /* Set timeout control (adjust default value based on divisor).
+ * Disabling timeout interrupts during setting is advised by host spec.
+ */
+#ifdef BCMQT
+ if (GFIELD(sd->caps, CAP_BASECLK) < 50)
+#endif
+ {
+ uint toval;
+
+ toval = sd_toctl;
+ divisor = new_sd_divisor;
+
+ while (toval && !(divisor & 1)) {
+ toval -= 1;
+ divisor >>= 1;
+ }
+
+ regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+ sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
+ }
+#ifdef BCMQT
+ else {
+ sd_info(("%s: REsetting err int control\n", __FUNCTION__));
+ /* XXX: turn off timeout INT, it resets clk ctrl bit */
+ regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+ sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+ }
+#endif
+ OSL_DELAY(2);
+
+ sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+
+ return TRUE;
+}
+
+/* XXX Per SDIO Host Controller Spec 3.3
+ * volts_req:
+ * 0 means default: select highest voltage.
+ * 1 means 1.8V
+ * 2 means 3.0V
+ * 3 means 3.3V
+ * returns
+ * TRUE: no error
+ * FALSE: general error
+ * SDIO_OCR_READ_FAIL: ocr reading failure. Now the HC has to try in other available voltages.
+*/
+uint16
+sdstd_start_power(sdioh_info_t *sd, int volts_req)
+{
+ char *s;
+ uint32 cmd_arg;
+ uint32 cmd_rsp;
+ uint8 pwr = 0;
+ int volts = 0;
+ uint16 val1;
+ uint16 init_divider = 0;
+ uint8 baseclk = 0;
+ bool selhighest = (volts_req == 0) ? TRUE : FALSE;
+
+ /* reset the card uhsi volt support to false */
+ sd->card_UHSI_voltage_Supported = FALSE;
+
+ /* Ensure a power on reset by turning off bus power in case it happened to
+ * be on already. (This might happen if driver doesn't unload/clean up correctly,
+ * crash, etc.) Leave off for 100ms to make sure the power off isn't
+ * ignored/filtered by the device. Note we can't skip this step if the power is
+ * off already since we don't know how long it has been off before starting
+ * the driver.
+ */
+ sdstd_wreg8(sd, SD_PwrCntrl, 0);
+ sd_info(("Turning off VDD/bus power briefly (100ms) to ensure reset\n"));
+ OSL_DELAY(100000);
+
+ /* For selecting highest available voltage, start from lowest and iterate */
+ if (!volts_req)
+ volts_req = 1;
+
+ s = NULL;
+
+ if (volts_req == 1) {
+ if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
+ volts = 5;
+ s = "1.8";
+ if (FALSE == selhighest)
+ goto voltsel;
+ else
+ volts_req++;
+ } else {
+ sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
+ volts_req++;
+ }
+ }
+
+ if (volts_req == 2) {
+ if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
+ volts = 6;
+ s = "3.0";
+ if (FALSE == selhighest)
+ goto voltsel;
+ else volts_req++;
+ } else {
+ sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
+ volts_req++;
+ }
+ }
+
+ if (volts_req == 3) {
+ if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
+ volts = 7;
+ s = "3.3";
+ } else {
+ if ((FALSE == selhighest) || (volts == 0)) {
+ sd_err(("HC doesn't support any voltage! error!\n"));
+ return FALSE;
+ }
+ }
+ }
+
+ /* XXX
+ * if UHSI is NOT supported, check for other voltages also. This is a safety measure
+ * for embedded devices also, so that HC starts in lower power first. If this
+ * function fails, the caller may disable UHSISupported
+ * and call start power again to check support in higher voltages.
+ */
+
+voltsel:
+ pwr = SFIELD(pwr, PWR_VOLTS, volts);
+ pwr = SFIELD(pwr, PWR_BUS_EN, 1);
+ sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
+ sd_info(("Setting Bus Power to %s Volts\n", s));
+ BCM_REFERENCE(s);
+
+ /*
+ * PR101766 : BRCM SDIO3.0 card is an embedded SD device. It is not a SD card.
+ * VDDIO signalling will be tied to 1.8v level on all SDIO3.0 based boards.
+ * So program the HC to drive VDDIO at 1.8v level.
+ */
+ if ((sd->version == HOST_CONTR_VER_3) && (volts == 5)) {
+ val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ }
+
+ /* Wait for 500ms for power to stabilize. Some designs have reset IC's
+ * which can hold reset low for close to 300ms. In addition there can
+ * be ramp time for VDD and/or VDDIO which might be provided from a LDO.
+ * For these reasons we need a pretty conservative delay here to have
+ * predictable reset behavior in the face of an unknown design.
+ */
+ OSL_DELAY(500000);
+
+ baseclk = GFIELD(sd->caps, CAP_BASECLK);
+ sd_info(("%s:baseclk: %d MHz\n", __FUNCTION__, baseclk));
+ /* for 3.0, find divisor */
+ if (sd->host_UHSISupported) {
+ /* ToDo : Dynamic modification of preset value table based on base clk */
+ sd3_trace(("sd3: %s: checking divisor\n", __FUNCTION__));
+ if (GFIELD(sd->caps3, CAP3_CLK_MULT) != 0) {
+ sd_err(("%s:Possible error: CLK Mul 1 CLOCKING NOT supported!\n",
+ __FUNCTION__));
+ return FALSE;
+ } else {
+ /* calculate dividor, which leads to 400KHz. */
+ init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
+ /* make it a multiple of 2. */
+ init_divider += (init_divider & 0x1);
+ sd_err(("%s:divider used for init:%d\n",
+ __FUNCTION__, init_divider));
+ }
+ } else {
+ /* Note: sd_divisor assumes that SDIO Base CLK is 50MHz. */
+ int final_freq_based_on_div = 50/sd_divisor;
+ if (baseclk > 50)
+ sd_divisor = baseclk/final_freq_based_on_div;
+ /* TBD: merge both SDIO 2.0 and 3.0 to share same divider logic */
+ init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
+ /* find next power of 2 */
+ NEXT_POW2(init_divider);
+ sd_err(("%s:NONUHSI: divider used for init:%d\n",
+ __FUNCTION__, init_divider));
+ }
+
+ /* Start at ~400KHz clock rate for initialization */
+ if (!sdstd_start_clock(sd, init_divider)) {
+ sd_err(("%s: sdstd_start_clock failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ /* Get the Card's Operation Condition. Occasionally the board
+ * takes a while to become ready
+ */
+ cmd_arg = 0;
+ cmd_rsp = 0;
+ if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
+ sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
+ /* No need to reset as not sure in what state the card is. */
+ return SDIO_OCR_READ_FAIL;
+ }
+
+ sd_info(("cmd_rsp = 0x%x\n", cmd_rsp));
+ sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
+ sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
+ sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
+ sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+ /* Verify that the card supports I/O mode */
+ if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
+ sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
+ return ERROR;
+ }
+ sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
+
+ /* Examine voltage: Arasan only supports 3.3 volts,
+ * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
+ */
+
+ /* XXX Pg 10 SDIO spec v1.10 */
+ if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
+ sd_err(("This client does not support 3.3 volts!\n"));
+ return ERROR;
+ }
+ sd_info(("Leaving bus power at 3.3 Volts\n"));
+
+ cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
+ /* if HC uhsi supported and card voltage set is 3.3V then switch to 1.8V */
+ if ((sd->host_UHSISupported) && (volts == 5)) {
+ /* set S18R also */
+ cmd_arg = SFIELD(cmd_arg, CMD5_S18R, 1);
+ }
+ cmd_rsp = 0;
+ get_ocr(sd, &cmd_arg, &cmd_rsp);
+ sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+ if ((sd->host_UHSISupported)) {
+ /* card responded with s18A => card supports sdio3.0,do tuning proc */
+ if (GFIELD(cmd_rsp, RSP4_S18A) == 1) {
+ if (sdstd_3_sigvoltswitch_proc(sd)) {
+ /* continue with legacy way of working */
+ sd_err(("%s: voltage switch not done. error, stopping\n",
+ __FUNCTION__));
+ /* How to gracefully proceced here? */
+ return FALSE;
+ } else {
+ sd->card_UHSI_voltage_Supported = TRUE;
+ sd_err(("%s: voltage switch SUCCESS!\n", __FUNCTION__));
+ }
+ } else {
+ /* This could happen for 2 cases.
+ * 1) means card is NOT sdio3.0 . Note that
+ * card_UHSI_voltage_Supported is already false.
+ * 2) card is sdio3.0 but it is already in 1.8V.
+ * But now, how to change host controller's voltage?
+ * In this case we need to do the following.
+ * sd->card_UHSI_voltage_Supported = TRUE;
+ * turn 1.8V sig enable in HC2
+ * val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
+ * val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
+ * sdstd_wreg16(sd, SD3_HostCntrl2, val1);
+ */
+ sd_info(("%s: Not sdio3.0: host_UHSISupported: %d; HC volts=%d\n",
+ __FUNCTION__, sd->host_UHSISupported, volts));
+ }
+ } else {
+ sd_info(("%s: Legacy [non sdio3.0] HC\n", __FUNCTION__));
+ }
+
+ return TRUE;
+}
+
+bool
+sdstd_bus_width(sdioh_info_t *sd, int new_mode)
+{
+ uint32 regdata;
+ int status;
+ uint8 reg8;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd->sd_mode == new_mode) {
+ sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
+ /* Could exit, but continue just in case... */
+ }
+
+ /* Set client side via reg 0x7 in CCCR */
+ if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata)) != SUCCESS)
+ return (bool)status;
+ regdata &= ~BUS_SD_DATA_WIDTH_MASK;
+ if (new_mode == SDIOH_MODE_SD4) {
+ sd_info(("Changing to SD4 Mode\n"));
+ regdata |= SD4_MODE;
+ } else if (new_mode == SDIOH_MODE_SD1) {
+ sd_info(("Changing to SD1 Mode\n"));
+ } else {
+ sd_err(("SPI Mode not supported by Standard Host Controller\n"));
+ }
+
+ if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
+ return (bool)status;
+
+ if (sd->host_UHSISupported) {
+ uint32 card_asyncint = 0;
+ uint16 host_asyncint = 0;
+
+ if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_INTR_EXTN, 1,
+ &card_asyncint)) != SUCCESS) {
+ sd_err(("%s:INTR EXT getting failed!, ignoring\n", __FUNCTION__));
+ } else {
+ host_asyncint = sdstd_rreg16(sd, SD3_HostCntrl2);
+
+ /* check if supported by host and card */
+ if ((regdata & SD4_MODE) &&
+ (GFIELD(card_asyncint, SDIO_BUS_ASYNCINT_CAP)) &&
+ (GFIELD(sd->caps, CAP_ASYNCINT_SUP))) {
+ /* set enable async int in card */
+ card_asyncint = SFIELD(card_asyncint, SDIO_BUS_ASYNCINT_SEL, 1);
+
+ if ((status = sdstd_card_regwrite (sd, 0,
+ SDIOD_CCCR_INTR_EXTN, 1, card_asyncint)) != SUCCESS)
+ sd_err(("%s:INTR EXT setting failed!, ignoring\n",
+ __FUNCTION__));
+ else {
+ /* set enable async int in host */
+ host_asyncint = SFIELD(host_asyncint,
+ HOSTCtrl2_ASYINT_EN, 1);
+ sdstd_wreg16(sd, SD3_HostCntrl2, host_asyncint);
+ }
+ } else {
+ sd_err(("%s:INTR EXT NOT supported by either host or"
+ "card!, ignoring\n", __FUNCTION__));
+ }
+ }
+ }
+
+ /* Set host side via Host reg */
+ reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
+ if (new_mode == SDIOH_MODE_SD4)
+ reg8 |= SD4_MODE;
+ sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+ sd->sd_mode = new_mode;
+
+ return TRUE;
+}
+
+static int
+sdstd_driver_init(sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ sd->sd3_tuning_reqd = FALSE;
+ sd->sd3_tuning_disable = FALSE;
+ if ((sdstd_host_init(sd)) != SUCCESS) {
+ return ERROR;
+ }
+
+ /* Give WL_reset before sending CMD5 to dongle for Revx SDIO3 HC's */
+ if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3))
+ {
+ sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x8);
+ OSL_DELAY(sd_delay_value);
+ sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x0);
+ OSL_DELAY(500000);
+ }
+
+ if (sdstd_client_init(sd) != SUCCESS) {
+ return ERROR;
+ }
+
+ /* if the global cap matched and is SDR 104/50 [if 50 it is reqd] enable tuning. */
+ if ((TRUE != sd3_sw_override1) && SD3_TUNING_REQD(sd, sd_uhsimode)) {
+ sd->sd3_tuning_reqd = TRUE;
+
+ /* init OS structs for tuning */
+ sdstd_3_osinit_tuning(sd);
+
+ /* enable HC tuning interrupt OR timer based on tuning method */
+ if (GFIELD(sd->caps3, CAP3_RETUNING_MODES)) {
+ /* enable both RTReq and timer */
+ sd->intmask |= HC_INTR_RETUNING;
+ sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+#ifdef BCMSDYIELD
+ if (sd_forcerb)
+ sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+#endif /* BCMSDYIELD */
+ }
+ }
+
+ return SUCCESS;
+}
+
+static int
+sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdstd_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+static int
+sdstd_card_enablefuncs(sdioh_info_t *sd)
+{
+ int status;
+ uint32 regdata;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ /* Enable function 1 on the card */
+ regdata = SDIO_FUNC_ENABLE_1;
+ if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
+ return status;
+
+ return SUCCESS;
+}
+
+/* Read client card reg */
+static int
+sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+
+#ifdef BCMDBG
+ if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: Entering: ErrorintrStatus 0x%x, intstat = 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg16(sd, SD_IntrStatus)));
+ }
+#endif
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
+
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+ __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ }
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+ *data = GFIELD(rsp5, RSP5_DATA);
+
+ sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
+ } else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (sd->polled_mode) {
+ volatile uint16 int_reg;
+ int retries = RETRIES_LARGE;
+
+ /* Wait for Read Buffer to become ready */
+ do {
+ sdstd_os_yield(sd);
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
+
+ if (!retries) {
+ sd_err(("%s: Timeout on Buf_Read_Ready: "
+ "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+
+ /* Have Buffer Ready, so clear it and read the data */
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
+ if (regsize == 2)
+ *data = sdstd_rreg16(sd, SD_BufferDataPort0);
+ else
+ *data = sdstd_rreg(sd, SD_BufferDataPort0);
+
+ sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
+ /* Check Status.
+ * After the data is read, the Transfer Complete bit should be on
+ */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ if (!retries) {
+ sd_err(("%s: Timeout on xfer complete: "
+ "intr 0x%04x err 0x%04x state 0x%08x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ return (ERROR);
+ }
+
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
+ }
+ }
+ if (sd->polled_mode) {
+ if (regsize == 2)
+ *data &= 0xffff;
+ }
+ return SUCCESS;
+}
+
+bool
+check_client_intr(sdioh_info_t *sd)
+{
+ uint16 raw_int, cur_int, old_int;
+
+ raw_int = sdstd_rreg16(sd, SD_IntrStatus);
+ cur_int = raw_int & sd->intmask;
+
+ if (!cur_int) {
+ /* Not an error -- might share interrupts... */
+ return FALSE;
+ }
+
+ if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
+ unsigned long flags;
+
+ sdstd_os_lock_irqsave(sd, &flags);
+ old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+
+ if (sd->client_intr_enabled && sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+ sdstd_os_lock_irqsave(sd, &flags);
+ old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+ sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 1));
+ sdstd_os_unlock_irqrestore(sd, &flags);
+ } else {
+ /* Local interrupt: disable, set flag, and save intrstatus */
+ sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+ sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+ sd->local_intrcount++;
+ sd->got_hcint = TRUE;
+ sd->last_intrstatus = cur_int;
+ }
+
+ return TRUE;
+}
+
+void
+sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+ uint16 int_reg, err_reg;
+ int retries = RETRIES_LARGE;
+
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+ } while (--retries && !(int_reg & norm) && !(err_reg & err));
+
+ norm |= sd->intmask;
+ if (err_reg & err)
+ norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+ sd->last_intrstatus = int_reg & norm;
+}
+
+/* write a client register */
+static int
+sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+ int status;
+ uint32 cmd_arg, rsp5, flags;
+
+ cmd_arg = 0;
+
+ if ((func == 0) || (regsize == 1)) {
+ cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+ cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+ flags = GFIELD(rsp5, RSP5_FLAGS);
+ if (flags && (flags != 0x10))
+ sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, flags));
+ }
+ else {
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+ /* XXX SDIO spec v 1.10, Sec 5.3 Not FIFO */
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = regsize;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+ != SUCCESS)
+ return status;
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+ if (sd->polled_mode) {
+ uint16 int_reg;
+ int retries = RETRIES_LARGE;
+
+ /* Wait for Write Buffer to become ready */
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
+
+ if (!retries) {
+ sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
+ "errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ return (ERROR);
+ }
+ /* Clear Write Buf Ready bit */
+ int_reg = 0;
+ int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* At this point we have Buffer Ready, so write the data */
+ if (regsize == 2)
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
+ else
+ sdstd_wreg(sd, SD_BufferDataPort0, data);
+
+ /* Wait for Transfer Complete */
+ retries = RETRIES_LARGE;
+ do {
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+ return ERROR;
+
+ if (retries == 0) {
+ sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
+ "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
+ __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
+ int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sd->r_cnt, sd->t_cnt));
+ }
+ /* Clear the status bits */
+ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
+ }
+ }
+ return SUCCESS;
+}
+
+void
+sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
+{
+ int rsp_count;
+ int respaddr = SD_Response0;
+
+ if (count > 4)
+ count = 4;
+
+ for (rsp_count = 0; rsp_count < count; rsp_count++) {
+ *rsp_buffer++ = sdstd_rreg(sd, respaddr);
+ respaddr += 4;
+ }
+}
+
+/*
+ Note: options: 0 - default
+ 1 - tuning option: Means that, this cmd issue is as a part
+ of tuning. So no need to check the start tuning function.
+*/
+static int
+sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
+{
+ uint16 cmd_reg;
+ int retries;
+ uint32 cmd_arg;
+ uint16 xfer_reg = 0;
+
+#ifdef BCMDBG
+ if (sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus) != 0) {
+ sd_err(("%s: Entering: ErrorIntrStatus 0x%x, Expecting 0\n",
+ __FUNCTION__, sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)));
+ }
+#endif
+
+ if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
+ ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
+ sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
+ return ERROR;
+ }
+
+ retries = RETRIES_SMALL;
+ while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
+ if (retries == RETRIES_SMALL)
+ sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
+ __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
+ }
+ if (!retries) {
+ sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ cmd_reg = 0;
+ switch (cmd) {
+ case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_7: /* Select card - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_14: /* eSD Sleep - Response R1 */
+ case SDIOH_CMD_11: /* Select card - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_15: /* Set card to inactive state - Response None */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_19: /* clock tuning - Response R1 */
+ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ /* Host controller reads 64 byte magic pattern from card
+ * Hence Direction = 1 ( READ )
+ */
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ break;
+
+ case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
+
+ sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
+ __FUNCTION__,
+ GFIELD(arg, CMD52_FUNCTION),
+ GFIELD(arg, CMD52_REG_ADDR),
+ GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
+ GFIELD(arg, CMD52_DATA)));
+
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+ break;
+
+ case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
+
+ sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
+ __FUNCTION__,
+ GFIELD(arg, CMD53_FUNCTION),
+ GFIELD(arg, CMD53_REG_ADDR),
+ GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
+ GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
+ GFIELD(arg, CMD53_BYTE_BLK_CNT),
+ GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
+
+ cmd_arg = arg;
+ xfer_reg = 0;
+
+ cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
+ cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+
+ use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
+
+ if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
+ uint16 blocksize;
+ uint16 blockcount;
+ int func;
+
+ ASSERT(sdioh_info->sd_blockmode);
+
+ func = GFIELD(cmd_arg, CMD53_FUNCTION);
+ blocksize = MIN((int)sdioh_info->data_xfer_count,
+ sdioh_info->client_block_size[func]);
+ blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+
+ /* data_xfer_cnt is already setup so that for multiblock mode,
+ * it is the entire buffer length. For non-block or single block,
+ * it is < 64 bytes
+ */
+ if (use_dma) {
+ switch (sdioh_info->sd_dma_mode) {
+ case DMA_MODE_SDMA:
+ sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
+ __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
+ (uint32)sdioh_info->dma_phys));
+ sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+ break;
+ case DMA_MODE_ADMA1:
+ case DMA_MODE_ADMA2:
+ sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
+#ifdef BCMSDIOH_TXGLOM
+ /* multi-descriptor is currently used only for hc3 */
+ if ((sdioh_info->glom_info.count != 0) &&
+ (sdioh_info->txglom_mode == SDPCM_TXGLOM_MDESC)) {
+ uint32 i = 0;
+ for (i = 0;
+ i < sdioh_info->glom_info.count-1;
+ i++) {
+ glom_buf_t *glom_info;
+ glom_info = &(sdioh_info->glom_info);
+ sd_create_adma_descriptor(sdioh_info,
+ i,
+ glom_info->dma_phys_arr[i],
+ glom_info->nbytes[i],
+ ADMA2_ATTRIBUTE_VALID |
+ ADMA2_ATTRIBUTE_ACT_TRAN);
+ }
+
+ sd_create_adma_descriptor(sdioh_info,
+ i,
+ sdioh_info->glom_info.dma_phys_arr[i],
+ sdioh_info->glom_info.nbytes[i],
+ ADMA2_ATTRIBUTE_VALID |
+ ADMA2_ATTRIBUTE_END |
+ ADMA2_ATTRIBUTE_INT |
+ ADMA2_ATTRIBUTE_ACT_TRAN);
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+ sd_create_adma_descriptor(sdioh_info, 0,
+ sdioh_info->dma_phys, blockcount*blocksize,
+ ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
+ ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
+ }
+ /* Dump descriptor if DMA debugging is enabled. */
+ if (sd_msglevel & SDH_DMA_VAL) {
+ sd_dump_adma_dscr(sdioh_info);
+ }
+
+ sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
+ sdioh_info->adma2_dscr_phys);
+ break;
+ default:
+ sd_err(("%s: unsupported DMA mode %d.\n",
+ __FUNCTION__, sdioh_info->sd_dma_mode));
+ break;
+ }
+ }
+
+ sd_trace(("%s: Setting block count %d, block size %d bytes\n",
+ __FUNCTION__, blockcount, blocksize));
+ sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
+ sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
+
+ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
+
+ if (sdioh_info->client_block_size[func] != blocksize)
+ set_client_block_size(sdioh_info, 1, blocksize);
+
+ if (blockcount > 1) {
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ } else {
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ }
+
+ if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ else
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+ PRES_DAT_INHIBIT) && --retries)
+ sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+ __FUNCTION__, cmd));
+ if (!retries) {
+ sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ /* Consider deferring this write to the comment below "Deferred Write" */
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+
+ } else { /* Non block mode */
+ uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+ /* The byte/block count field only has 9 bits,
+ * so, to do a 512-byte bytemode transfer, this
+ * field will contain 0, but we need to tell the
+ * controller we're transferring 512 bytes.
+ */
+ if (bytes == 0) bytes = 512;
+
+ if (use_dma)
+ sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+
+ /* PCI: Transfer Mode register 0x0c */
+ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
+ xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+ if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+ else
+ xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+ /* See table 2-8 Host Controller spec ver 1.00 */
+ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
+ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+
+ sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
+
+ /* XXX This should be a don't care but Arasan needs it
+ * to be one. Its fixed in later versions (but they
+ * don't have version numbers, sigh).
+ */
+ sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
+
+ retries = RETRIES_SMALL;
+ while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+ PRES_DAT_INHIBIT) && --retries)
+ sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+ __FUNCTION__, cmd));
+ if (!retries) {
+ sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+
+ /* Consider deferring this write to the comment below "Deferred Write" */
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+ }
+ break;
+
+ default:
+ sd_err(("%s: Unknown command\n", __FUNCTION__));
+ return ERROR;
+ }
+
+ if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
+ cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+ cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+ }
+
+ /* Setup and issue the SDIO command */
+ sdstd_wreg(sdioh_info, SD_Arg0, arg);
+
+ /* Deferred Write
+ * Consider deferring the two writes above until this point in the code.
+ * The following would do one 32 bit write.
+ *
+ * {
+ * uint32 tmp32 = cmd_reg << 16;
+ * tmp32 |= xfer_reg;
+ * sdstd_wreg(sdioh_info, SD_TransferMode, tmp32);
+ * }
+ */
+
+ /* Alternate to Deferred Write START */
+
+ /* In response to CMD19 card sends 64 byte magic pattern.
+ * So SD_BlockSize = 64 & SD_BlockCount = 1
+ */
+ if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) {
+ sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+ sdstd_wreg16(sdioh_info, SD_BlockSize, 64);
+ sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
+ }
+ sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
+
+ /* Alternate to Deferred Write END */
+
+ /* If we are in polled mode, wait for the command to complete.
+ * In interrupt mode, return immediately. The calling function will
+ * know that the command has completed when the CMDATDONE interrupt
+ * is asserted
+ */
+ if (sdioh_info->polled_mode) {
+ uint16 int_reg = 0;
+ retries = RETRIES_LARGE;
+
+ /* For CMD19 no need to wait for cmd completion */
+ if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19)
+ return SUCCESS;
+
+ do {
+ int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
+ sdstd_os_yield(sdioh_info);
+ } while (--retries &&
+ (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+ (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+ if (!retries) {
+ sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
+ "error stat 0x%x state 0x%x\n",
+ __FUNCTION__, int_reg,
+ sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
+ sdstd_rreg(sdioh_info, SD_PresentState)));
+
+ /* Attempt to reset CMD line when we get a CMD timeout */
+ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+ retries = RETRIES_LARGE;
+ do {
+ sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+ } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
+ SW_RESET_CMD)) && retries--);
+
+ if (!retries) {
+ sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+ }
+
+ if (trap_errs)
+ ASSERT(0);
+ return (ERROR);
+ }
+
+ /* Clear Command Complete interrupt */
+ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+ sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
+
+ /* Check for Errors */
+ if (sdstd_check_errs(sdioh_info, cmd, arg)) {
+ if (trap_errs)
+ ASSERT(0);
+ return ERROR;
+ }
+ }
+ return SUCCESS;
+}
+
+/*
+ * XXX On entry: If single block or non-block, buffersize <= blocksize.
+ * If Mulitblock, buffersize is unlimited.
+ * Question is how to handle the leftovers in either single or multiblock.
+ * I think the caller should break the buffer up so this routine will always
+ * use blocksize == buffersize to handle the end piece of the buffer
+ */
+
+static int
+sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
+{
+ int retval = SUCCESS;
+ int status;
+ uint32 cmd_arg;
+ uint32 rsp5;
+ uint16 int_reg, int_bit;
+ uint flags;
+ int num_blocks, blocksize;
+ bool local_blockmode, local_dma;
+ bool read = rw == SDIOH_READ ? 1 : 0;
+ bool local_yield = FALSE;
+#ifdef BCMSDIOH_TXGLOM
+ uint32 i;
+ uint8 *localbuf = NULL;
+#endif
+
+ ASSERT(nbytes);
+
+ cmd_arg = 0;
+
+ sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+ if (read) sd->r_cnt++; else sd->t_cnt++;
+
+ local_blockmode = sd->sd_blockmode;
+ local_dma = USE_DMA(sd);
+
+#ifdef BCMSDIOH_TXGLOM
+ /* If multiple buffers are there, then calculate the nbytes from that */
+ if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
+ uint32 ii;
+ nbytes = 0;
+ for (ii = 0; ii < sd->glom_info.count; ii++) {
+ nbytes += sd->glom_info.nbytes[ii];
+ }
+ ASSERT(nbytes <= sd->alloced_dma_size);
+ }
+#endif
+
+ /* Don't bother with block mode on small xfers */
+ if (nbytes < sd->client_block_size[func]) {
+ sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
+ nbytes, sd->client_block_size[func]));
+ local_blockmode = FALSE;
+ local_dma = FALSE;
+#ifdef BCMSDIOH_TXGLOM
+ /* In glommed case, create a single pkt from multiple pkts */
+ if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
+ uint32 offset = 0;
+ localbuf = (uint8 *)MALLOC(sd->osh, nbytes);
+ data = (uint32 *)localbuf;
+ for (i = 0; i < sd->glom_info.count; i++) {
+ bcopy(sd->glom_info.dma_buf_arr[i],
+ ((uint8 *)data + offset),
+ sd->glom_info.nbytes[i]);
+ offset += sd->glom_info.nbytes[i];
+ }
+ }
+#endif
+ }
+
+ if (local_blockmode) {
+ blocksize = MIN(sd->client_block_size[func], nbytes);
+ num_blocks = nbytes/blocksize;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
+ } else {
+ num_blocks = 1;
+ blocksize = nbytes;
+ cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
+ cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+ }
+
+ if (local_dma && !read) {
+#ifdef BCMSDIOH_TXGLOM
+ if ((func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
+ /* In case of hc ver 2 DMA_MAP may not work properly due to 4K alignment
+ * requirements. So copying pkt to 4K aligned pre-allocated pkt.
+ * Total length should not cross the pre-alloced memory size
+ */
+ if (sd->txglom_mode == SDPCM_TXGLOM_CPY) {
+ uint32 total_bytes = 0;
+ for (i = 0; i < sd->glom_info.count; i++) {
+ bcopy(sd->glom_info.dma_buf_arr[i],
+ (uint8 *)sd->dma_buf + total_bytes,
+ sd->glom_info.nbytes[i]);
+ total_bytes += sd->glom_info.nbytes[i];
+ }
+ sd_sync_dma(sd, read, total_bytes);
+ }
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+ bcopy(data, sd->dma_buf, nbytes);
+ sd_sync_dma(sd, read, nbytes);
+ }
+ }
+
+ if (fifo)
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); /* XXX SDIO spec v 1.10, Sec 5.3 */
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */
+
+ cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
+ if (read)
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+ else
+ cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+ sd->data_xfer_count = nbytes;
+
+ /* sdstd_cmd_issue() returns with the command complete bit
+ * in the ISR already cleared
+ */
+ if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
+ sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
+ retval = status;
+ goto done;
+ }
+
+ sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+ if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
+ sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
+ "numblocks %d, blocksize %d\n",
+ __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
+
+ if (flags & 1)
+ sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
+ "bytes %d dma %d\n",
+ __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
+ GFIELD(cmd_arg, CMD53_BLK_MODE)));
+ if (flags & 0x8)
+ sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
+
+ sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
+ __FUNCTION__, flags));
+ if (trap_errs)
+ ASSERT(0);
+ retval = ERROR;
+ goto done;
+ }
+
+ if (GFIELD(rsp5, RSP5_STUFF))
+ sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+ __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+#ifdef BCMSDYIELD
+ local_yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
+#endif
+
+ if (!local_dma) {
+ int bytes, ii;
+ uint32 tmp;
+
+ for (ii = 0; ii < num_blocks; ii++) {
+ int words;
+
+ /* Decide which status bit we're waiting for */
+ if (read)
+ int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
+ else
+ int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
+
+ /* If not on, wait for it (or for xfer error) */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS,
+ local_yield, &int_reg);
+ switch (status) {
+ case -1:
+ sd_err(("%s: pio interrupted\n", __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ case -2:
+ sd_err(("%s: pio timeout waiting for interrupt\n",
+ __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ }
+ }
+#ifdef BCMSLTGT
+ /* int_reg = sdstd_rreg16(sd, SD_IntrStatus); */
+#endif
+ /* Confirm we got the bit w/o error */
+ if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
+ sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
+ "errint: 0x%x PresentState 0x%x\n",
+ __FUNCTION__, read ? "Read" : "Write", int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus),
+ sdstd_rreg(sd, SD_PresentState)));
+ sdstd_dumpregs(sd);
+ sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+ retval = ERROR;
+ goto done;
+ }
+
+ /* Clear Buf Ready bit */
+ sdstd_wreg16(sd, SD_IntrStatus, int_bit);
+
+ /* At this point we have Buffer Ready, write the data 4 bytes at a time */
+ for (words = blocksize/4; words; words--) {
+ if (read)
+ *data = sdstd_rreg(sd, SD_BufferDataPort0);
+ else
+ sdstd_wreg(sd, SD_BufferDataPort0, *data);
+ data++;
+ }
+
+ /* XXX
+ * Handle < 4 bytes. wlc_pio.c currently (as of 12/20/05) truncates buflen
+ * to be evenly divisible by 4. However dongle passes arbitrary lengths,
+ * so handle it here
+ */
+ bytes = blocksize % 4;
+
+ /* If no leftover bytes, go to next block */
+ if (!bytes)
+ continue;
+
+ switch (bytes) {
+ case 1:
+ /* R/W 8 bits */
+ if (read)
+ *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
+ else
+ sdstd_wreg8(sd, SD_BufferDataPort0,
+ (uint8)(*(data++) & 0xff));
+ break;
+ case 2:
+ /* R/W 16 bits */
+ if (read)
+ *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+ else
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
+ break;
+ case 3:
+ /* R/W 24 bits:
+ * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
+ */
+ if (read) {
+ tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+ tmp |= ((uint32)(sdstd_rreg8(sd,
+ SD_BufferDataPort1)) << 16);
+ *(data++) = tmp;
+ } else {
+ tmp = *(data++);
+ sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
+ sdstd_wreg8(sd, SD_BufferDataPort1,
+ (uint8)((tmp >> 16) & 0xff));
+ }
+ break;
+ default:
+ sd_err(("%s: Unexpected bytes leftover %d\n",
+ __FUNCTION__, bytes));
+ ASSERT(0);
+ break;
+ }
+ }
+ } /* End PIO processing */
+
+ /* Wait for Transfer Complete or Transfer Error */
+ int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
+
+ /* If not on, wait for it (or for xfer error) */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg);
+ switch (status) {
+ case -1:
+ sd_err(("%s: interrupted\n", __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ case -2:
+ sd_err(("%s: timeout waiting for interrupt\n", __FUNCTION__));
+ retval = ERROR;
+ goto done;
+ }
+ }
+
+ /* Check for any errors from the data phase */
+ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) {
+ retval = ERROR;
+ goto done;
+ }
+
+ /* May have gotten a software timeout if not blocking? */
+ int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+ if (!(int_reg & int_bit)) {
+ sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
+ "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
+ __FUNCTION__, read ? "R" : "W", local_dma,
+ sdstd_rreg(sd, SD_PresentState), int_reg,
+ sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
+ sd->r_cnt, sd->t_cnt));
+ sdstd_dumpregs(sd);
+ retval = ERROR;
+ goto done;
+ }
+
+ /* Clear the status bits */
+ int_reg = int_bit;
+ if (local_dma) {
+ /* DMA Complete */
+ /* XXX Step 14, Section 3.6.2.2 Stnd Cntrlr Spec */
+ /* Reads in particular don't have DMA_COMPLETE set */
+ int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
+ }
+ sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+ /* Fetch data */
+ if (local_dma && read) {
+ sd_sync_dma(sd, read, nbytes);
+ bcopy(sd->dma_buf, data, nbytes);
+ }
+
+done:
+#ifdef BCMSDIOH_TXGLOM
+ if (localbuf)
+ MFREE(sd->osh, localbuf, nbytes);
+#endif
+ return retval;
+}
+
+static int
+set_client_block_size(sdioh_info_t *sd, int func, int block_size)
+{
+ int base;
+ int err = 0;
+
+ sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
+ sd->client_block_size[func] = block_size;
+
+ /* Set the block size in the SDIO Card register */
+ base = func * SDIOD_FBR_SIZE;
+ err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
+ if (!err) {
+ err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
+ (block_size >> 8) & 0xff);
+ }
+
+ /* Do not set the block size in the SDIO Host register, that
+ * is func dependent and will get done on an individual
+ * transaction basis
+ */
+
+ return (err ? BCME_SDIO_ERROR : 0);
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+ uint8 hreg;
+
+ /* Reset the attached device (use slower clock for safety) */
+ if (!sdstd_start_clock(si, 128)) {
+ sd_err(("set clock failed!\n"));
+ return ERROR;
+ }
+ sdstd_reset(si, 0, 1);
+
+ /* Reset portions of the host state accordingly */
+ hreg = sdstd_rreg8(si, SD_HostCntrl);
+ hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
+ hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
+ si->sd_mode = SDIOH_MODE_SD1;
+
+ /* Reinitialize the card */
+ si->card_init_done = FALSE;
+ return sdstd_client_init(si);
+}
+
+#ifdef BCMINTERNAL
+#ifdef NOTUSED
+static void
+cis_fetch(sdioh_info_t *sd, int func, char *data, int len)
+{
+ int count;
+ int offset;
+ char *end = data + len;
+ uint32 foo;
+
+ for (count = 0; count < 512 && data < end; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdstd_card_regread (sd, func, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed\n", __FUNCTION__));
+ return;
+ }
+ data += sprintf(data, "%.2x ", foo & 0xff);
+ if (((count+1) % 16) == 0)
+ data += sprintf(data, "\n");
+ }
+}
+#endif /* NOTUSED */
+#endif /* BCMINTERNAL */
+
+static void
+sd_map_dma(sdioh_info_t * sd)
+{
+
+ int alloced;
+ void *va;
+ uint dma_buf_size = SD_PAGE;
+
+#ifdef BCMSDIOH_TXGLOM
+ /* There is no alignment requirement for HC3 */
+ if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) {
+ /* Max glom packet length is 64KB */
+ dma_buf_size = SD_PAGE * 16;
+ }
+#endif
+
+ alloced = 0;
+ if ((va = DMA_ALLOC_CONSISTENT(sd->osh, dma_buf_size, SD_PAGE_BITS, &alloced,
+ &sd->dma_start_phys, 0x12)) == NULL) {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ sd->dma_start_buf = 0;
+ sd->dma_buf = (void *)0;
+ sd->dma_phys = 0;
+ sd->alloced_dma_size = 0;
+ sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
+ } else {
+ sd->dma_start_buf = va;
+ sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+ sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
+ sd->alloced_dma_size = alloced;
+ sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n",
+ __FUNCTION__, sd->alloced_dma_size, sd->dma_buf,
+ (uint)PHYSADDRHI(sd->dma_phys), (uint)PHYSADDRLO(sd->dma_phys)));
+ sd_fill_dma_data_buf(sd, 0xA5);
+ }
+
+ if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, SD_PAGE_BITS, &alloced,
+ &sd->adma2_dscr_start_phys, 0x12)) == NULL) {
+ sd->sd_dma_mode = DMA_MODE_NONE;
+ sd->adma2_dscr_start_buf = 0;
+ sd->adma2_dscr_buf = (void *)0;
+ sd->adma2_dscr_phys = 0;
+ sd->alloced_adma2_dscr_size = 0;
+ sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
+ "Disabling DMA support.\n", __FUNCTION__));
+ } else {
+ sd->adma2_dscr_start_buf = va;
+ sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+ sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
+ sd->alloced_adma2_dscr_size = alloced;
+ sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n",
+ __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
+ (uint)PHYSADDRHI(sd->adma2_dscr_phys),
+ (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
+ sd_clear_adma_dscr_buf(sd);
+ }
+}
+
+static void
+sd_unmap_dma(sdioh_info_t * sd)
+{
+ if (sd->dma_start_buf) {
+ DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
+ sd->dma_start_phys, 0x12);
+ }
+
+ if (sd->adma2_dscr_start_buf) {
+ DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
+ sd->adma2_dscr_start_phys, 0x12);
+ }
+}
+
+static void
+sd_clear_adma_dscr_buf(sdioh_info_t *sd)
+{
+ bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
+ sd_dump_adma_dscr(sd);
+}
+
+static void
+sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
+{
+ memset((char *)sd->dma_buf, data, SD_PAGE);
+}
+
+static void
+sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
+ uint32 addr_phys, uint16 length, uint16 flags)
+{
+ adma2_dscr_32b_t *adma2_dscr_table;
+ adma1_dscr_t *adma1_dscr_table;
+
+ adma2_dscr_table = sd->adma2_dscr_buf;
+ adma1_dscr_table = sd->adma2_dscr_buf;
+
+ switch (sd->sd_dma_mode) {
+ case DMA_MODE_ADMA2:
+ sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
+ __FUNCTION__, index));
+
+ adma2_dscr_table[index].phys_addr = addr_phys;
+ adma2_dscr_table[index].len_attr = length << 16;
+ adma2_dscr_table[index].len_attr |= flags;
+ break;
+ case DMA_MODE_ADMA1:
+ /* ADMA1 requires two descriptors, one for len
+ * and the other for data transfer
+ */
+ index <<= 1;
+
+ sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
+ __FUNCTION__, index));
+
+ adma1_dscr_table[index].phys_addr_attr = length << 12;
+ adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
+ ADMA2_ATTRIBUTE_VALID);
+ adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
+ adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
+ break;
+ default:
+ sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
+ __FUNCTION__, sd->sd_dma_mode));
+ break;
+ }
+}
+
+static void
+sd_dump_adma_dscr(sdioh_info_t *sd)
+{
+ adma2_dscr_32b_t *adma2_dscr_table;
+ adma1_dscr_t *adma1_dscr_table;
+ uint32 i = 0;
+ uint16 flags;
+ char flags_str[32];
+
+ ASSERT(sd->adma2_dscr_buf != NULL);
+
+ adma2_dscr_table = sd->adma2_dscr_buf;
+ adma1_dscr_table = sd->adma2_dscr_buf;
+
+ switch (sd->sd_dma_mode) {
+ case DMA_MODE_ADMA2:
+ sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n",
+ SD_PAGE, sd->adma2_dscr_buf,
+ (uint)PHYSADDRHI(sd->adma2_dscr_phys),
+ (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
+ sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
+ " |\n"));
+ while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
+ flags = adma2_dscr_table->len_attr & 0xFFFF;
+ sprintf(flags_str, "%s%s%s%s",
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
+ (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
+ (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
+ (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+ sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
+ i, adma2_dscr_table, adma2_dscr_table->phys_addr,
+ adma2_dscr_table->len_attr >> 16, flags, flags_str));
+ i++;
+
+#ifdef linux
+ /* Follow LINK descriptors or skip to next. */
+ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) {
+ adma2_dscr_table = phys_to_virt(
+ adma2_dscr_table->phys_addr);
+ } else {
+ adma2_dscr_table++;
+ }
+#else
+ adma2_dscr_table++;
+#endif /* linux */
+
+ }
+ break;
+ case DMA_MODE_ADMA1:
+ sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n",
+ SD_PAGE, sd->adma2_dscr_buf,
+ (uint)PHYSADDRHI(sd->adma2_dscr_phys),
+ (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
+ sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
+
+ for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
+ flags = adma1_dscr_table->phys_addr_attr & 0x3F;
+ sprintf(flags_str, "%s%s%s%s",
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+ ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
+ (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
+ (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
+ (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+ sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
+ i, adma1_dscr_table,
+ adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
+ flags, flags_str));
+
+#ifdef linux
+ /* Follow LINK descriptors or skip to next. */
+ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+ ADMA2_ATTRIBUTE_ACT_LINK) {
+ adma1_dscr_table = phys_to_virt(
+ adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
+ } else {
+ adma1_dscr_table++;
+ }
+#else
+ adma2_dscr_table++;
+#endif /* linux */
+ }
+ break;
+ default:
+ sd_err(("Unknown DMA Descriptor Table Format.\n"));
+ break;
+ }
+}
+
+static void
+sdstd_dumpregs(sdioh_info_t *sd)
+{
+ sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrStatus),
+ sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+ sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrStatusEnable),
+ sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
+ sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
+ sdstd_rreg16(sd, SD_IntrSignalEnable),
+ sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
+}
diff --git a/bcmdhd.101.10.361.x/bcmsdstd.h b/bcmdhd.101.10.361.x/bcmsdstd.h
new file mode 100755
index 0000000..e0d19e2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdstd.h
@@ -0,0 +1,301 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdstd.h 833030 2019-08-02 17:22:42Z jl904071 $
+ */
+#ifndef _BCM_SD_STD_H
+#define _BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#ifdef BCMDBG
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#define sd_dma(x) do { if (sd_msglevel & SDH_DMA_VAL) printf x; } while (0)
+#else
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+#endif /* BCMDBG */
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#ifdef BCMPERFSTATS
+#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0)
+#else
+#define sd_log(x)
+#endif
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+#define SDIOH_MODE_SD1 1
+#define SDIOH_MODE_SD4 2
+
+#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK 1
+#define SDIOH_TYPE_BCM27XX 2
+#ifdef BCMINTERNAL
+#define SDIOH_TYPE_JINVANI_GOLD 3
+#endif
+#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#if defined(linux) && defined(BCMDONGLEHOST)
+#define BCMSDYIELD
+#endif
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS 0x00001E00
+
+#define RETRIES_LARGE 100000
+#ifdef BCMQT
+extern void sdstd_os_yield(sdioh_info_t *sd);
+#define RETRIES_SMALL 10000
+#else
+#define sdstd_os_yield(sd) do {} while (0)
+#define RETRIES_SMALL 100
+#endif
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+#define USE_FIFO 0x8 /* Fifo vs non-fifo */
+
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+#define HC_INTR_RETUNING 0x1000
+
+#ifdef BCMSDIOH_TXGLOM
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE (40+1)
+
+typedef struct glom_buf {
+ uint32 count; /* Total number of pkts queued */
+ void *dma_buf_arr[SDIOH_MAXGLOM_SIZE]; /* Frame address */
+ dmaaddr_t dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
+ uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */
+} glom_buf_t;
+#endif
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
+
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint target_dev; /* Target device ID */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+ void *bcmsdh; /* handler to upper layer stack (bcmsdh) */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf; /* DMA Buffer virtual address */
+ dmaaddr_t dma_phys; /* DMA Buffer physical address */
+ void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
+ dmaaddr_t adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */
+
+ /* adjustments needed to make the dma align properly */
+ void *dma_start_buf;
+ dmaaddr_t dma_start_phys;
+ uint alloced_dma_size;
+ void *adma2_dscr_start_buf;
+ dmaaddr_t adma2_dscr_start_phys;
+ uint alloced_adma2_dscr_size;
+
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ bool got_hcint; /* local interrupt flag */
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ * Card in terms of Voltage [1.8 or 3.3].
+ */
+ int global_UHSI_Supp; /* type of UHSI support in both host and card.
+ * HOST_SDR_UNSUPP: capabilities not supported/matched
+ * HOST_SDR_12_25: SDR12 and SDR25 supported
+ * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+ */
+ volatile int sd3_dat_state; /* data transfer state used for retuning check */
+ volatile int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
+ bool sd3_tuning_disable; /* tuning disable due to bus sleeping */
+ uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+#ifdef BCMSDIOH_TXGLOM
+ glom_buf_t glom_info; /* pkt information used for glomming */
+ uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#endif
+};
+
+#define DMA_MODE_NONE 0
+#define DMA_MODE_SDMA 1
+#define DMA_MODE_ADMA1 2
+#define DMA_MODE_ADMA2 3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO -1
+
+#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 0
+#define TUNING_START 1
+#define TUNING_START_AFTER_DAT 2
+#define TUNING_ONGOING 3
+
+#define DATA_TRANSFER_IDLE 0
+#define DATA_TRANSFER_ONGOING 1
+
+#define CHECK_TUNING_PRE_DATA 1
+#define CHECK_TUNING_POST_DATA 2
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00
+#endif
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, dmaaddr_t addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, dmaaddr_t addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* _BCM_SD_STD_H */
diff --git a/bcmdhd.101.10.361.x/bcmsdstd_linux.c b/bcmdhd.101.10.361.x/bcmsdstd_linux.c
new file mode 100755
index 0000000..be635ae
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsdstd_linux.c
@@ -0,0 +1,690 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver - linux portion
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <linux/sched.h> /* request_irq() */
+#include <typedefs.h>
+#include <pcicfg.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* SDIO Host Controller Spec header file */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+#include <bcmsdstd.h>
+#include <bcmdevs.h>
+
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+
+/* Extern functions for sdio power save */
+extern uint8 sdstd_turn_on_clock(sdioh_info_t *sd);
+extern uint8 sdstd_turn_off_clock(sdioh_info_t *sd);
+/* Extern variable for sdio power save. This is enabled or disabled using the IOCTL call */
+extern uint sd_3_power_save;
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+ wait_queue_head_t intr_wait_queue;
+ timer_list_compat_t tuning_timer;
+ int tuning_timer_exp;
+ atomic_t timer_enab;
+ struct tasklet_struct tuning_tasklet;
+};
+
+#define SDSTD_WAITBITS_TIMEOUT (5 * HZ) /* seconds * HZ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt()) /* XXX Doesn't handle CONFIG_PREEMPT? */
+#endif
+
+static void
+sdstd_3_ostasklet(ulong data);
+static void
+sdstd_3_tuning_timer(ulong data);
+
+/* Interrupt handler */
+static irqreturn_t
+sdstd_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+ sdioh_info_t *sd;
+ struct sdos_info *sdos;
+ bool ours;
+
+ unsigned long flags;
+ sd = (sdioh_info_t *)dev_id;
+ sdos = (struct sdos_info *)sd->sdos_info;
+
+ if (!sd->card_init_done) {
+ sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+ return IRQ_RETVAL(FALSE);
+ } else {
+ if (sdstd_3_is_retuning_int_set(sd)) {
+ /* for 3.0 host, retuning request might come in this path */
+ /* * disable ISR's */
+ local_irq_save(flags);
+
+ if (sdstd_3_check_and_set_retuning(sd))
+ tasklet_schedule(&sdos->tuning_tasklet);
+
+ /* * enable back ISR's */
+ local_irq_restore(flags);
+
+ /* * disable tuning isr signaling */
+ sdstd_3_disable_retuning_int(sd);
+ /* * note: check_client_intr() checks for intmask also to
+ wakeup. so be careful to use sd->intmask to disable
+ re-tuning ISR.
+ */
+ }
+ ours = check_client_intr(sd);
+
+ /* For local interrupts, wake the waiting process */
+ if (ours && sd->got_hcint) {
+ sd_trace(("INTR->WAKE\n"));
+/* sdos = (struct sdos_info *)sd->sdos_info; */
+ wake_up_interruptible(&sdos->intr_wait_queue);
+ }
+ return IRQ_RETVAL(ours);
+ }
+}
+
+/* Register with Linux for interrupts */
+int
+sdstd_register_irq(sdioh_info_t *sd, uint irq)
+{
+ sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+ if (request_irq(irq, sdstd_isr, IRQF_SHARED, "bcmsdstd", sd) < 0) {
+ sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+ return ERROR;
+ }
+ return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+sdstd_free_irq(uint irq, sdioh_info_t *sd)
+{
+ free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+
+uint32 *
+sdstd_reg_map(osl_t *osh, dmaaddr_t addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+sdstd_reg_unmap(osl_t *osh, dmaaddr_t addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+sdstd_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ atomic_set(&sdos->timer_enab, FALSE);
+ init_waitqueue_head(&sdos->intr_wait_queue);
+ return BCME_OK;
+}
+
+/* initilize tuning related OS structures */
+void
+sdstd_3_osinit_tuning(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+ uint8 timer_count = sdstd_3_get_tuning_exp(sdos->sd);
+
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+ init_timer_compat(&sdos->tuning_timer, sdstd_3_tuning_timer, sdos);
+ if (timer_count == CAP3_RETUNING_TC_DISABLED || timer_count > CAP3_RETUNING_TC_1024S) {
+ sdos->tuning_timer_exp = 0;
+ } else {
+ sdos->tuning_timer_exp = 1 << (timer_count - 1);
+ }
+ tasklet_init(&sdos->tuning_tasklet, sdstd_3_ostasklet, (ulong)sdos);
+ if (sdos->tuning_timer_exp) {
+ timer_expires(&sdos->tuning_timer) = jiffies + sdos->tuning_timer_exp * HZ;
+ add_timer(&sdos->tuning_timer);
+ atomic_set(&sdos->timer_enab, TRUE);
+ }
+}
+
+/* finalize tuning related OS structures */
+void
+sdstd_3_osclean_tuning(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+ if (atomic_read(&sdos->timer_enab) == TRUE) {
+ /* disable timer if it was running */
+ del_timer_sync(&sdos->tuning_timer);
+ atomic_set(&sdos->timer_enab, FALSE);
+ }
+ tasklet_kill(&sdos->tuning_tasklet);
+}
+
+static void
+sdstd_3_ostasklet(ulong data)
+{
+ struct sdos_info *sdos = (struct sdos_info *)data;
+ int tune_state = sdstd_3_get_tune_state(sdos->sd);
+ int data_state = sdstd_3_get_data_state(sdos->sd);
+ if ((tune_state == TUNING_START) || (tune_state == TUNING_ONGOING) ||
+ (tune_state == TUNING_START_AFTER_DAT)) {
+ return;
+ }
+ else if (data_state == DATA_TRANSFER_IDLE)
+ sdstd_3_set_tune_state(sdos->sd, TUNING_START);
+ else if (data_state == DATA_TRANSFER_ONGOING)
+ sdstd_3_set_tune_state(sdos->sd, TUNING_START_AFTER_DAT);
+}
+
+static void
+sdstd_3_tuning_timer(ulong data)
+{
+ struct sdos_info *sdos = (struct sdos_info *)data;
+/* uint8 timeout = 0; */
+ unsigned long int_flags;
+
+ sd_trace(("%s: enter\n", __FUNCTION__));
+ /* schedule tasklet */
+ /* * disable ISR's */
+ local_irq_save(int_flags);
+ if (sdstd_3_check_and_set_retuning(sdos->sd))
+ tasklet_schedule(&sdos->tuning_tasklet);
+
+ /* * enable back ISR's */
+ local_irq_restore(int_flags);
+}
+
+void sdstd_3_start_tuning(sdioh_info_t *sd)
+{
+ int tune_state;
+ unsigned long int_flags = 0;
+ unsigned int timer_enab;
+ struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+ sd_trace(("%s: enter\n", __FUNCTION__));
+ /* * disable ISR's */
+ local_irq_save(int_flags);
+ timer_enab = atomic_read(&sdos->timer_enab);
+
+ tune_state = sdstd_3_get_tune_state(sd);
+
+ if (tune_state == TUNING_ONGOING) {
+ /* do nothing */
+ local_irq_restore(int_flags);
+ goto exit;
+ }
+ /* change state */
+ sdstd_3_set_tune_state(sd, TUNING_ONGOING);
+ /* * enable ISR's */
+ local_irq_restore(int_flags);
+ sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd));
+#ifdef BCMSDIOH_STD_TUNING_WAR
+ /*
+ * Observed intermittent SDIO command error after re-tuning done
+ * successfully. Re-tuning twice is giving much reliable results.
+ */
+ sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd));
+#endif /* BCMSDIOH_STD_TUNING_WAR */
+ /* * disable ISR's */
+ local_irq_save(int_flags);
+ sdstd_3_set_tune_state(sd, TUNING_IDLE);
+ /* * enable ISR's */
+ local_irq_restore(int_flags);
+
+ /* enable retuning intrrupt */
+ sdstd_3_enable_retuning_int(sd);
+
+ /* start retuning timer if enabled */
+ if ((sdos->tuning_timer_exp) && (timer_enab)) {
+ if (sd->sd3_tuning_reqd) {
+ timer_expires(&sdos->tuning_timer) = jiffies + sdos->tuning_timer_exp * HZ;
+ mod_timer(&sdos->tuning_timer, timer_expires(&sdos->tuning_timer));
+ }
+ }
+exit:
+ return;
+
+}
+
+void
+sdstd_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ if (!(sd->host_init_done && sd->card_init_done)) {
+ sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable && !sd->lockcount)
+ sdstd_devintr_on(sd);
+ else
+ sdstd_devintr_off(sd);
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+sdstd_lock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+ int wait_count = 0;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ while (sd->lockcount)
+ {
+ spin_unlock_irqrestore(&sdos->lock, flags);
+ yield();
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (++wait_count == 25000) {
+ if (!(sd->lockcount == 0)) {
+ sd_err(("%s: ERROR: sd->lockcount == 0\n", __FUNCTION__));
+ }
+ }
+ }
+ /* PR86684: Add temporary debugging print */
+ if (wait_count)
+ printk("sdstd_lock: wait count = %d\n", wait_count);
+ sdstd_devintr_off(sd);
+ sd->lockcount++;
+ spin_unlock_irqrestore(&sdos->lock, flags);
+ if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3))
+ sdstd_turn_on_clock(sd);
+}
+
+/* Enable client interrupt */
+void
+sdstd_unlock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+ ASSERT(sd->lockcount > 0);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+ sdstd_devintr_on(sd);
+ }
+ spin_unlock_irqrestore(&sdos->lock, flags);
+ if (sd_3_power_save)
+ {
+ if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) &&
+ (sd->version == HOST_CONTR_VER_3))
+ sdstd_turn_off_clock(sd);
+ }
+}
+
+void
+sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags)
+{
+ struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+ spin_lock_irqsave(&sdos->lock, *flags);
+}
+void
+sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags)
+{
+ struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+ spin_unlock_irqrestore(&sdos->lock, *flags);
+}
+
+void
+sdstd_waitlockfree(sdioh_info_t *sd)
+{
+ if (sd->lockcount) {
+ printk("wait lock free\n");
+ while (sd->lockcount)
+ {
+ yield();
+ }
+ }
+}
+
+#ifdef BCMQT
+void
+sdstd_os_yield(sdioh_info_t *sd)
+{
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 29))
+/*
+ * FC4/11 issue on QT if driver hogs > 10s of CPU causing:
+ * BUG: soft lockup detected on CPU#0!
+ *
+ * XXX Hack: For now, interleave yielding of CPU when we're spinning waiting for
+ * XXX register status
+ */
+ yield();
+#endif
+}
+#endif /* BCMQT */
+
+/* Returns 0 for success, -1 for interrupted, -2 for timeout */
+int
+sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool local_yield, uint16 *bits)
+{
+ struct sdos_info *sdos;
+ int rc = 0;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+
+#ifndef BCMSDYIELD
+ ASSERT(!local_yield);
+#endif
+ sd_trace(("%s: int 0x%02x err 0x%02x yield %d canblock %d\n",
+ __FUNCTION__, norm, err, local_yield, BLOCKABLE()));
+
+ /* Clear the "interrupt happened" flag and last intrstatus */
+ sd->got_hcint = FALSE;
+ sd->last_intrstatus = 0;
+
+#ifdef BCMSDYIELD
+ if (local_yield && BLOCKABLE()) {
+ /* Enable interrupts, wait for the indication, then disable */
+ sdstd_intrs_on(sd, norm, err);
+ rc = wait_event_interruptible_timeout(sdos->intr_wait_queue,
+ (sd->got_hcint),
+ SDSTD_WAITBITS_TIMEOUT);
+ if (rc < 0)
+ rc = -1; /* interrupted */
+ else if (rc == 0)
+ rc = -2; /* timeout */
+ else
+ rc = 0; /* success */
+ sdstd_intrs_off(sd, norm, err);
+ } else
+#endif /* BCMSDYIELD */
+ {
+ sdstd_spinbits(sd, norm, err);
+ }
+
+ sd_trace(("%s: last_intrstatus 0x%04x\n", __FUNCTION__, sd->last_intrstatus));
+
+ *bits = sd->last_intrstatus;
+
+ return rc;
+}
+
+#ifdef DHD_DEBUG
+void sdstd_enable_disable_periodic_timer(sdioh_info_t *sd, uint val)
+{
+ struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info;
+
+ if (val == SD_DHD_ENABLE_PERIODIC_TUNING) {
+ /* start of tuning timer */
+ timer_expires(&sdos->tuning_timer) = jiffies + sdos->tuning_timer_exp * HZ;
+ mod_timer(&sdos->tuning_timer, timer_expires(&sdos->tuning_timer));
+ }
+ if (val == SD_DHD_DISABLE_PERIODIC_TUNING) {
+ /* stop periodic timer */
+ del_timer_sync(&sdos->tuning_timer);
+ }
+}
+#endif /* debugging purpose */
+
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+ { vendor: PCI_ANY_ID,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ },
+ { 0, 0, 0, 0, 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+ node: {&(bcmsdh_pci_driver.node), &(bcmsdh_pci_driver.node)},
+ name: "bcmsdh",
+ id_table: bcmsdh_pci_devid,
+ probe: bcmsdh_pci_probe,
+ remove: bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+ suspend: NULL,
+ resume: NULL,
+ };
+
+extern uint sd_pci_slot; /* Force detection to a particular PCI */
+ /* slot only . Allows for having multiple */
+ /* WL devices at once in a PC */
+ /* Only one instance of dhd will be */
+ /* usable at a time */
+ /* Upper word is bus number, */
+ /* lower word is slot number */
+ /* Default value of 0xffffffff turns this */
+ /* off */
+module_param(sd_pci_slot, uint, 0);
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller. If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ osl_t *osh = NULL;
+ sdioh_info_t *sdioh = NULL;
+ int rc;
+
+ if (sd_pci_slot != 0xFFFFffff) {
+ if (pdev->bus->number != (sd_pci_slot>>16) ||
+ PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+ sd_err(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Found compatible SDIOHC"
+ :"Probing unknown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
+ pdev->device));
+ return -ENODEV;
+ }
+ sd_err(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Using compatible SDIOHC"
+ :"WARNING, forced use of unkown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
+ }
+
+ if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+ (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+ uint32 config_reg;
+
+ sd_err(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+ if (!(osh = osl_attach(pdev, SDIO_BUS, TRUE))) {
+ sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+ /*
+ * Set MMC_SD_DIS bit in FlashMedia Controller.
+ * Disbling the SD/MMC Controller in the FlashMedia Controller
+ * allows the Standard SD Host Controller to take over control
+ * of the SD Slot.
+ */
+ config_reg |= 0x02;
+ OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+ osl_detach(osh);
+ }
+ /* match this pci device with what we support */
+ /* we can't solely rely on this to believe it is our SDIO Host Controller! */
+ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+ if (pdev->vendor == VENDOR_BROADCOM) {
+ sd_err(("%s: Unknown Broadcom device (vendor: %#x, device: %#x).\n",
+ __FUNCTION__, pdev->vendor, pdev->device));
+ }
+ return -ENODEV;
+ }
+
+ /* this is a pci device we might support */
+ sd_err(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+ __FUNCTION__,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq));
+
+ /* use bcmsdh_query_device() to get the vendor ID of the target device so
+ * it will eventually appear in the Broadcom string on the console
+ */
+
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(pdev, SDIO_BUS, TRUE))) {
+ sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* map to address where host can access */
+ pci_set_master(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ sd_err(("%s: Cannot enable PCI device\n", __FUNCTION__));
+ goto err;
+ }
+
+ sdioh = sdioh_attach(osh, (void *)(ulong)pci_resource_start(pdev, 0), pdev->irq);
+ if (sdioh == NULL) {
+ sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ sdioh->bcmsdh = bcmsdh_probe(osh, &pdev->dev, sdioh, NULL, PCI_BUS, -1, -1);
+ if (sdioh->bcmsdh == NULL) {
+ sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ pci_set_drvdata(pdev, sdioh);
+ return 0;
+
+err:
+ if (sdioh != NULL)
+ sdioh_detach(osh, sdioh);
+ if (osh != NULL)
+ osl_detach(osh);
+ return -ENOMEM;
+}
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+ sdioh_info_t *sdioh;
+ osl_t *osh;
+
+ sdioh = pci_get_drvdata(pdev);
+ if (sdioh == NULL) {
+ sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+ return;
+ }
+
+ osh = sdioh->osh;
+ bcmsdh_remove(sdioh->bcmsdh);
+ sdioh_detach(osh, sdioh);
+ osl_detach(osh);
+}
+
+int bcmsdh_register_client_driver(void)
+{
+ return pci_module_init(&bcmsdh_pci_driver);
+}
+
+void bcmsdh_unregister_client_driver(void)
+{
+ pci_unregister_driver(&bcmsdh_pci_driver);
+}
diff --git a/bcmdhd.101.10.361.x/bcmspibrcm.c b/bcmdhd.101.10.361.x/bcmspibrcm.c
new file mode 100755
index 0000000..e58d0cf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmspibrcm.c
@@ -0,0 +1,1922 @@
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifdef BCMDONGLEHOST
+#define HSMODE
+#else
+#endif /* BCMDONGLEHOST */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+#include <spid.h>
+
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#if defined(linux)
+#include <pcicfg.h>
+#endif
+
+/* XXX Quick NDIS hack */
+#ifdef NDIS
+#define inline __inline
+#define PCI_CFG_VID 0
+#define PCI_CFG_BAR0 0x10
+#endif
+
+#include <bcmspibrcm.h>
+#ifdef BCMSPI_ANDROID
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+#else
+#include <bcmspi.h>
+#endif /* BCMSPI_ANDROID */
+
+/* these are for the older cores... for newer cores we have control for each of them */
+#define F0_RESPONSE_DELAY 16
+#define F1_RESPONSE_DELAY 16
+#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY
+
+#define GSPI_F0_RESP_DELAY 0
+#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY
+#define GSPI_F2_RESP_DELAY 0
+#define GSPI_F3_RESP_DELAY 0
+
+#define CMDLEN 4
+
+/* Globals */
+#if defined(BCMDBG) || defined(DHD_DEBUG)
+uint sd_msglevel = SDH_ERROR_VAL;
+#else
+uint sd_msglevel = 0;
+#endif /* BCMDBG || DHD_DEBUG */
+
+uint sd_hiok = FALSE; /* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 64; /* Default blocksize */
+
+uint sd_divisor = 2;
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+
+uint8 spi_outbuf[SPI_MAX_PKT_LEN];
+uint8 spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN 128
+uint8 spi_outbuf2[BUF2_PKT_LEN];
+uint8 spi_inbuf2[BUF2_PKT_LEN];
+#ifdef BCMSPI_ANDROID
+uint *dhd_spi_lockcount = NULL;
+#endif /* BCMSPI_ANDROID */
+
+#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
+#define SPISWAP_WD4(x) bcmswap32(x);
+#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \
+ (bcmswap16((x & 0xffff0000) >> 16) << 16);
+#else
+/* XXX Some SPI host controller changes endianness when writing/reading
+* to/from SPI device TX/RX register in case the bits_per_word is more than 1 byte.
+*/
+#define SPISWAP_WD4(x) x;
+#define SPISWAP_WD2(x) bcmswap32by16(x);
+#endif
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+ uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+ uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ if (spi_osinit(sd) != 0) {
+ sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+#ifndef BCMSPI_ANDROID
+ sd->bar0 = bar0;
+#endif /* !BCMSPI_ANDROID */
+ sd->irq = irq;
+#ifndef BCMSPI_ANDROID
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ sd->intr_handler_valid = FALSE;
+#endif /* !BCMSPI_ANDROID */
+
+ /* Set defaults */
+ sd->use_client_ints = TRUE;
+ sd->sd_use_dma = FALSE; /* DMA Not supported */
+
+ /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+ * mode
+ */
+ sd->wordlen = 2;
+
+#ifdef BCMSPI_ANDROID
+ dhd_spi_lockcount = &sd->lockcount;
+#endif /* BCMSPI_ANDROID */
+
+#ifndef BCMSPI_ANDROID
+ if (!spi_hw_attach(sd)) {
+ sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+#endif /* !BCMSPI_ANDROID */
+
+ if (bcmspi_driver_init(sd) != SUCCESS) {
+ sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+#ifndef BCMSPI_ANDROID
+ spi_hw_detach(sd);
+#endif /* !BCMSPI_ANDROID */
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ if (spi_register_irq(sd, irq) != SUCCESS) {
+ sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+#ifndef BCMSPI_ANDROID
+ spi_hw_detach(sd);
+#endif /* !BCMSPI_ANDROID */
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+
+ return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd) {
+ sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+ spi_free_irq(sd->irq, sd);
+#ifndef BCMSPI_ANDROID
+ spi_hw_detach(sd);
+#endif /* !BCMSPI_ANDROID */
+ spi_osfree(sd);
+#ifdef BCMSPI_ANDROID
+ dhd_spi_lockcount = NULL;
+#endif /* !BCMSPI_ANDROID */
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+#ifndef BCMSPI_ANDROID
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+#endif /* !BCMSPI_ANDROID */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return 0;
+}
+#endif
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+ return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+ sd->chip = chip;
+ sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+ uint8 reg = 0;
+ int status;
+
+ if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+ SUCCESS) {
+ sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+ return;
+ }
+
+ if (set) {
+ reg |= DWORD_PKT_LEN_EN;
+ sd->dwordmode = TRUE;
+ sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+ } else {
+ reg &= ~DWORD_PKT_LEN_EN;
+ sd->dwordmode = FALSE;
+ sd->client_block_size[SPI_FUNC_2] = 2048;
+ }
+
+ if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+ SUCCESS) {
+ sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+ return;
+ }
+}
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_SPIERRSTATS,
+ IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0},
+#ifdef BCMDBG
+ {"sd_hciregs", IOV_HCIREGS, 0, IOVT_BUFFER, 0 },
+#endif
+ {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+ {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, uint len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ uint val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+/*
+ sdioh_regs_t *regs;
+*/
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* XXX Copied from dhd, copied from wl; certainly overkill here? */
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifndef BCMSPI_ANDROID
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ if (!spi_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("%s: set clock failed\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+#endif /* !BCMSPI_ANDROID */
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+
+ if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+ sd_err(("%s: Failed changing highspeed mode to %d.\n",
+ __FUNCTION__, sd_hiok));
+ bcmerror = BCME_ERROR;
+ return ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)si->local_intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ /* XXX Should copy for alignment reasons */
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+#ifdef BCMDBG
+ case IOV_GVAL(IOV_HCIREGS):
+ {
+ struct bcmstrbuf b;
+ bcm_binit(&b, arg, len);
+
+ spi_lock(si);
+ bcm_bprintf(&b, "Unsupported\n");
+ spi_unlock(si);
+
+ if (!b.size)
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+#endif /* BCMDBG */
+
+ case IOV_GVAL(IOV_SPIERRSTATS):
+ {
+ bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SPIERRSTATS):
+ {
+ bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+ break;
+ }
+
+ case IOV_GVAL(IOV_RESP_DELAY_ALL):
+ int_val = (int32)si->resp_delay_all;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RESP_DELAY_ALL):
+ si->resp_delay_all = (bool)int_val;
+ int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+ if (si->resp_delay_all)
+ int_val |= RESP_DELAY_ALL;
+ else {
+ if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+ F1_RESPONSE_DELAY) != SUCCESS) {
+ sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ }
+
+ if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+ != SUCCESS) {
+ sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ /* XXX Remove protective lock after clients all clean... */
+ return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+
+ /* WAR for gSPI for PR55208: Read SFC_WF_TERM before write for write to be
+ * successful on address SBSDIO_FUNC1_FRAMECTRL.
+ */
+ if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+ uint8 dummy_data;
+ status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+ if (status) {
+ sd_err(("sdioh_cfg_read() failed.\n"));
+ return status;
+ }
+ }
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 cis_byte;
+ uint16 *cis = (uint16 *)cisd;
+ uint bar0 = SI_ENUM_BASE(sd->sih);
+ int status;
+ uint8 data;
+
+ sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+ spi_lock(sd);
+
+ /* Set sb window address to 0x18000000 */
+ data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+ status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+ if (status == SUCCESS) {
+ data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+ status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+ } else {
+ sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+ spi_unlock(sd);
+ return (BCME_ERROR);
+ }
+ if (status == SUCCESS) {
+ data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+ status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+ } else {
+ sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+ spi_unlock(sd);
+ return (BCME_ERROR);
+ }
+
+ offset = CC_SROM_OTP; /* OTP offset in chipcommon. */
+ for (count = 0; count < length/2; count++) {
+ if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ spi_unlock(sd);
+ return (BCME_ERROR);
+ }
+
+ *cis = (uint16)cis_byte;
+ cis++;
+ offset += 2;
+ }
+
+ spi_unlock(sd);
+
+ return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 dstatus;
+ uint32 data = (uint32)(*byte);
+
+ spi_lock(sd);
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+ if (rw == SDIOH_READ) {
+ sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr));
+ } else {
+ sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr, data));
+ }
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) {
+ spi_unlock(sd);
+ return status;
+ }
+
+ if (rw == SDIOH_READ) {
+ *byte = (uint8)data;
+ sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte));
+ }
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus=0x%x\n", dstatus));
+
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int status;
+
+ spi_lock(sd);
+
+ if (rw == SDIOH_READ)
+ status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+ else
+ status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+ spi_unlock(sd);
+ return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ int len;
+ int buflen = (int)buflen_u;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+ spi_lock(sd);
+
+ ASSERT(reg_width == 4);
+ ASSERT(buflen_u < (1 << 30));
+ ASSERT(sd->client_block_size[func]);
+
+ sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+ __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+ buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+ /* Break buffer down into blocksize chunks. */
+ while (buflen > 0) {
+ len = MIN(sd->client_block_size[func], buflen);
+ if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+ sd_err(("%s: bcmspi_card_buf %s failed\n",
+ __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+ spi_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ buffer += len;
+ buflen -= len;
+ if (!fifo)
+ addr += len;
+ }
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+ uint32 cmd_arg;
+ uint32 datalen = 1;
+ uint32 hostlen;
+
+ cmd_arg = 0;
+
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+ sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+#ifdef BCMDBG
+ /* Fill up buffers with a value that generates known dutycycle on MOSI/MISO lines. */
+ memset(spi_outbuf2, 0xee, BUF2_PKT_LEN);
+ memset(spi_inbuf2, 0xee, BUF2_PKT_LEN);
+#endif /* BCMDBG */
+
+ /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
+ * according to the wordlen mode(16/32bit) the device is in.
+ */
+ ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+ datalen = ROUNDUP(datalen, sd->wordlen);
+
+ /* Start by copying command in the spi-outbuffer */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg);
+ if (datalen & 0x3)
+ datalen += (4 - (datalen & 0x3));
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg);
+ if (datalen & 0x1)
+ datalen++;
+ } else {
+ sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+ __FUNCTION__, 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ /* for Write, put the data into the output buffer */
+ if (datalen != 0) {
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte);
+ }
+ }
+
+ /* +4 for cmd, +4 for dstatus */
+ hostlen = datalen + 8;
+ hostlen += (4 - (hostlen & 0x3));
+ spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+ /* Last 4bytes are dstatus. Device is configured to return status bits. */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else {
+ sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+ __FUNCTION__, 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ if (sd->card_dstatus)
+ sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+ return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+ if (sd->resp_delay_all == FALSE)
+ return (BCME_OK);
+
+ if (sd->prev_fun == func)
+ return (BCME_OK);
+
+ if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+ return (BCME_OK);
+
+ bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+ /* Remember function for which to avoid reprogramming resp-delay in next iteration */
+ sd->prev_fun = func;
+
+ return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN 0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+ uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+#ifdef BCMDBG
+ /* Fill up buffers with a value that generates known dutycycle on MOSI/MISO lines. */
+ memset(spi_outbuf2, 0xee, BUF2_PKT_LEN);
+ memset(spi_inbuf2, 0xee, BUF2_PKT_LEN);
+#endif /* BCMDBG */
+
+ /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
+ * according to the wordlen mode(16/32bit) the device is in.
+ */
+ ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+ datalen = ROUNDUP(datalen, sd->wordlen);
+
+ /* Start by copying command in the spi-outbuffer */
+ *(uint32 *)spi_outbuf2 = cmd_arg;
+
+ /* for Write, put the data into the output buffer */
+ *(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+ /* +4 for cmd, +4 for dstatus */
+ spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+ /* Last 4bytes are dstatus. Device is configured to return status bits. */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else {
+ sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+ __FUNCTION__, 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ if (sd->card_dstatus)
+ sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+ return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+ uint32 dstatus = sd->card_dstatus;
+ struct spierrstats_t *spierrstats = &sd->spierrstats;
+ int err = SUCCESS;
+
+ sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+ /* Store dstatus of last few gSPI transactions */
+ spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+ spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+ dstatus_count++;
+
+ if (sd->card_init_done == FALSE)
+ return err;
+
+ if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+ spierrstats->dna++;
+ sd_trace(("Read data not available on F1 addr = 0x%x\n",
+ GFIELD(cmd_arg, SPI_REG_ADDR)));
+ /* Clear dna bit */
+ bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+ }
+
+ if (dstatus & STATUS_UNDERFLOW) {
+ spierrstats->rdunderflow++;
+ sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+ }
+
+ if (dstatus & STATUS_OVERFLOW) {
+ spierrstats->wroverflow++;
+ sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+ bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+ bcmspi_resync_f1(sd);
+ sd_err(("Recovering from F1 FIFO overflow.\n"));
+ }
+
+ if (dstatus & STATUS_F2_INTR) {
+ spierrstats->f2interrupt++;
+ sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n"));
+ }
+
+ if (dstatus & STATUS_F3_INTR) {
+ spierrstats->f3interrupt++;
+ sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n"));
+ }
+
+ if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+ spierrstats->hostcmddataerr++;
+ sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+ }
+
+ if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+ spierrstats->f2pktavailable++;
+ sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+ sd_trace(("Packet length = %d\n", sd->dwordmode ?
+ ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+ ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+ }
+
+ if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+ spierrstats->f3pktavailable++;
+ sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+ sd_err(("Packet length = %d\n",
+ (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+ }
+
+ return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+ return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+#ifdef BCMINTERNAL
+extern SDIOH_API_RC
+sdioh_test_diag(sdioh_info_t *sd)
+{
+ sd_err(("%s: Implement me\n", __FUNCTION__));
+ return (0);
+}
+#endif /* BCMINTERNAL */
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+ /* Default power on mode */
+ sd->sd_mode = SDIOH_MODE_SPI;
+ sd->polled_mode = TRUE;
+ sd->host_init_done = TRUE;
+ sd->card_init_done = FALSE;
+ sd->adapter_slot = 1;
+
+ return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+ uint32 regdata[2];
+ int status;
+
+ /* Find F1/F2/F3 max packet size */
+ if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+ 8, regdata)) != SUCCESS) {
+ return status;
+ }
+
+ sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+ regdata[0], regdata[1]));
+
+ sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+ sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+ ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+ sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+ sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+ ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+ sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+ sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+ ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+ return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+ uint32 status_en_reg = 0;
+ sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifndef BCMSPI_ANDROID
+#ifdef HSMODE
+ if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+#else
+ /* Start at ~400KHz clock rate for initialization */
+ if (!spi_start_clock(sd, 128)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+#endif /* HSMODE */
+#endif /* !BCMSPI_ANDROID */
+
+ if (!bcmspi_host_device_init_adapt(sd)) {
+ sd_err(("bcmspi_host_device_init_adapt failed\n"));
+ return ERROR;
+ }
+
+ if (!bcmspi_test_card(sd)) {
+ sd_err(("bcmspi_test_card failed\n"));
+ return ERROR;
+ }
+
+ sd->num_funcs = SPI_MAX_IOFUNCS;
+
+ get_client_blocksize(sd);
+
+ /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+ bcmspi_resync_f1(sd);
+
+ sd->dwordmode = FALSE;
+
+ bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+ sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+ status_en_reg |= INTR_WITH_STATUS;
+
+ if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+ status_en_reg & 0xff) != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+ return ERROR;
+ }
+
+#ifndef HSMODE
+#ifndef BCMSPI_ANDROID
+ /* After configuring for High-Speed mode, set the desired clock rate. */
+ if (!spi_start_clock(sd, 4)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+#endif /* !BCMSPI_ANDROID */
+#endif /* HSMODE */
+
+ /* check to see if the response delay needs to be programmed properly */
+ {
+ uint32 f1_respdelay = 0;
+ bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay);
+ if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) {
+ /* older sdiodevice core and has no separte resp delay for each of */
+ sd_err(("older corerev < 4 so use the same resp delay for all funcs\n"));
+ sd->resp_delay_new = FALSE;
+ }
+ else {
+ /* older sdiodevice core and has no separte resp delay for each of */
+ int ret_val;
+ sd->resp_delay_new = TRUE;
+ sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n"));
+ sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n",
+ GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY,
+ GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY));
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1,
+ GSPI_F0_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__));
+ return ERROR;
+ }
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1,
+ GSPI_F1_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__));
+ return ERROR;
+ }
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1,
+ GSPI_F2_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+ return ERROR;
+ }
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1,
+ GSPI_F3_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+ return ERROR;
+ }
+ }
+ }
+
+/* XXX:Cleanup after finding a common place in dhd or bcmsdh layer to do this */
+#ifndef BCMDONGLEHOST
+ if ((status = bcmspi_card_regwrite(sd, 1, SBSDIO_FUNC1_SBADDRLOW, 4,
+ SB_ENUM_BASE >> 8)) != SUCCESS)
+ return FALSE;
+#endif
+ sd->card_init_done = TRUE;
+
+#ifdef BCMDBG
+ {
+ uint8 regbuf[32];
+ int j;
+ bzero(regbuf, 32);
+ /* Read default F0 registers */
+ sd_trace(("Reading default values of first 32(8bit) F0 spid regs again before"
+ " quitting init.\n"));
+ bcmspi_card_regread(sd, 0, SPID_CONFIG, 32, (uint32 *)regbuf);
+ for (j = 0; j < 32; j++)
+ sd_trace(("regbuf[%d]=0x%x \n", j, regbuf[j]));
+ sd_trace(("\n"));
+ }
+#endif /* BCMDBG */
+ /* get the device rev to program the prop respdelays */
+
+ return SUCCESS;
+}
+
+/* XXX What is clock rate at high and low speeds ? */
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+ uint32 regdata;
+ int status;
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+ 4, &regdata)) != SUCCESS)
+ return status;
+
+ sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+ if (hsmode == TRUE) {
+ sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+ if (regdata & HIGH_SPEED_MODE) {
+ sd_trace(("Device is already in High-Speed mode.\n"));
+ return status;
+ } else {
+ regdata |= HIGH_SPEED_MODE;
+ sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+ 4, regdata)) != SUCCESS) {
+ return status;
+ }
+ }
+ } else {
+ sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+ if (regdata & HIGH_SPEED_MODE) {
+ regdata &= ~HIGH_SPEED_MODE;
+ sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+ 4, regdata)) != SUCCESS)
+ return status;
+ }
+ else {
+ sd_trace(("Device is already in Low-Speed mode.\n"));
+ return status;
+ }
+ }
+#ifndef BCMSPI_ANDROID
+ spi_controller_highspeed_mode(sd, hsmode);
+#endif /* !BCMSPI_ANDROID */
+
+ return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+ sd->wordlen = 2; \
+ status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+ regdata &= 0xff; \
+ if ((regdata == 0xad) || (regdata == 0x5b) || \
+ (regdata == 0x5d) || (regdata == 0x5a)) \
+ break; \
+ sd->wordlen = 4; \
+ status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+ regdata &= 0xff; \
+ if ((regdata == 0xad) || (regdata == 0x5b) || \
+ (regdata == 0x5d) || (regdata == 0x5a)) \
+ break; \
+ sd_trace(("Silicon testability issue: regdata = 0x%x." \
+ " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \
+ OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP 100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+ uint32 wrregdata, regdata = 0;
+ int status;
+ int i;
+#ifdef BCMDBG
+ int j;
+ uint8 regbuf[32];
+ bzero(regbuf, 32);
+#endif /* BCMDBG */
+
+ /* Due to a silicon testability issue, the first command from the Host
+ * to the device will get corrupted (first bit will be lost). So the
+ * Host should poll the device with a safe read request. ie: The Host
+ * should try to read F0 addr 0x14 using the Fixed address mode
+ * (This will prevent a unintended write command to be detected by device)
+ */
+ for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+ /* If device was not power-cycled it will stay in 32bit mode with
+ * response-delay-all bit set. Alternate the iteration so that
+ * read either with or without response-delay for F0 to succeed.
+ */
+ bcmspi_find_curr_mode(sd);
+ sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+ bcmspi_find_curr_mode(sd);
+ sd->dwordmode = TRUE;
+
+ bcmspi_find_curr_mode(sd);
+ sd->dwordmode = FALSE;
+ }
+
+ /* Bail out, device not detected */
+ if (i == INIT_ADAPT_LOOP)
+ return FALSE;
+
+ /* Softreset the spid logic */
+ if ((sd->dwordmode) || (sd->wordlen == 4)) {
+ bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+ bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, &regdata);
+ sd_trace(("reset reg read = 0x%x\n", regdata));
+ sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+ sd->wordlen, sd->resp_delay_all));
+ /* Restore default state after softreset */
+ sd->wordlen = 2;
+ sd->dwordmode = FALSE;
+ }
+
+ if (sd->wordlen == 4) {
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) !=
+ SUCCESS)
+ return FALSE;
+ if (regdata == TEST_RO_DATA_32BIT_LE) {
+ sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+ regdata));
+ sd_trace(("Spid power was left on.\n"));
+ } else {
+ sd_err(("Spid power was left on but signature read failed."
+ " Value read = 0x%x\n", regdata));
+ return FALSE;
+ }
+ } else {
+ sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */
+
+ wrregdata = (CTRL_REG_DEFAULT);
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+ return FALSE;
+ sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+ wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+ wrregdata &= ~HIGH_SPEED_MODE;
+ bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+ for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+ if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+ sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+ &regdata)) != SUCCESS)
+ return FALSE;
+ }
+ OSL_DELAY(1000);
+ }
+
+#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH)
+ /* Change to host controller intr-polarity of active-high */
+ /* XXX With intr-polarity active-high, host platform does not go into suspend mode
+ * since the pin is asserted high.
+ */
+ wrregdata |= INTR_POLARITY;
+#else
+ /* Change to host controller intr-polarity of active-low */
+ wrregdata &= ~INTR_POLARITY;
+#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */
+
+ sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+ wrregdata));
+ /* Change to 32bit mode */
+ wrregdata |= WORD_LENGTH_32;
+ bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+ /* Change command/data packaging in 32bit LE mode */
+ sd->wordlen = 4;
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+ return FALSE;
+
+ if (regdata == TEST_RO_DATA_32BIT_LE) {
+ sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+ sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+ } else {
+ sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+ "0x%x\n", regdata));
+ return FALSE;
+ }
+ }
+
+#ifdef BCMDBG
+ /* Read default F0 registers */
+ sd_trace(("Reading default values of first 32(8bit) F0 spid regs\n"));
+ bcmspi_card_regread(sd, 0, SPID_CONFIG, 32, (uint32 *)regbuf);
+ for (j = 0; j < 32; j++)
+ sd_trace(("regbuf[%d]=0x%x \n", j, regbuf[j]));
+ sd_trace(("\n"));
+#endif /* BCMDBG */
+
+ return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+ uint32 regdata;
+ int status;
+#ifdef BCMDBG
+ uint8 regbuf[32];
+ bzero(regbuf, 32);
+#endif /* BCMDBG */
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+ return FALSE;
+
+ if (regdata == (TEST_RO_DATA_32BIT_LE))
+ sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+ else {
+ sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+ return FALSE;
+ }
+
+#define RW_PATTERN1 0xA0A1A2A3
+#define RW_PATTERN2 0x4B5B6B7B
+
+ regdata = RW_PATTERN1;
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+ return FALSE;
+ regdata = 0;
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+ return FALSE;
+ if (regdata != RW_PATTERN1) {
+ sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+ RW_PATTERN1, regdata));
+ return FALSE;
+ } else
+ sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+ regdata = RW_PATTERN2;
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+ return FALSE;
+ regdata = 0;
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+ return FALSE;
+ if (regdata != RW_PATTERN2) {
+ sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+ RW_PATTERN2, regdata));
+ return FALSE;
+ } else
+ sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+ return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((bcmspi_host_init(sd)) != SUCCESS) {
+ return ERROR;
+ }
+
+ if (bcmspi_client_init(sd) != SUCCESS) {
+ return ERROR;
+ }
+
+ return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg, dstatus;
+
+ ASSERT(regsize);
+
+ if (func == 2)
+ sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+ sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+ __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+ return status;
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus =0x%x\n", dstatus));
+
+ return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ int status;
+ uint32 cmd_arg;
+ uint32 dstatus;
+
+ ASSERT(regsize);
+
+ if (func == 2)
+ sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+ sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+ __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+ return status;
+
+ sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data));
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ sd_trace(("dstatus =0x%x\n", dstatus));
+ return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+ int status;
+ uint32 cmd_arg, dstatus;
+
+ ASSERT(regsize);
+
+ cmd_arg = 0;
+
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+ sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr, regsize, data));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS)
+ return status;
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus=0x%x\n", dstatus));
+
+ return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 dstatus;
+ uint32 data = (uint32)(*byte);
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+ sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr, data));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS)
+ return status;
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus =0x%x\n", dstatus));
+
+ return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+ *dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+ uint32 *data, uint32 datalen)
+{
+ uint32 i, j;
+ uint8 resp_delay = 0;
+ int err = SUCCESS;
+ uint32 hostlen;
+ uint32 spilen = 0;
+ uint32 dstatus_idx = 0;
+ uint16 templen, buslen, len, *ptr = NULL;
+
+ sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+#ifdef BCMDBG
+ /* Fill up buffer with known pattern */
+ memset(spi_outbuf, 0xee, SPI_MAX_PKT_LEN);
+ memset(spi_inbuf, 0xee, SPI_MAX_PKT_LEN);
+#endif /* BCMDBG */
+
+ /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
+ * according to the wordlen mode(16/32bit) the device is in.
+ */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg);
+ if (datalen & 0x3)
+ datalen += (4 - (datalen & 0x3));
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg);
+ if (datalen & 0x1)
+ datalen++;
+ if (datalen < 4)
+ datalen = ROUNDUP(datalen, 4);
+ } else {
+ sd_err(("Host is %d bit spid, could not create SPI command.\n",
+ 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ /* for Write, put the data into the output buffer */
+ if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+ /* We send len field of hw-header always a mod16 size, both from host and dongle */
+ if (datalen != 0) {
+ for (i = 0; i < datalen/4; i++) {
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+ SPISWAP_WD4(data[i]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+ SPISWAP_WD2(data[i]);
+ }
+ }
+ }
+ }
+
+ /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+ if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) {
+ int func = GFIELD(cmd_arg, SPI_FUNCTION);
+ switch (func) {
+ case 0:
+ if (sd->resp_delay_new)
+ resp_delay = GSPI_F0_RESP_DELAY;
+ else
+ resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+ break;
+ case 1:
+ if (sd->resp_delay_new)
+ resp_delay = GSPI_F1_RESP_DELAY;
+ else
+ resp_delay = F1_RESPONSE_DELAY;
+ break;
+ case 2:
+ if (sd->resp_delay_new)
+ resp_delay = GSPI_F2_RESP_DELAY;
+ else
+ resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ /* Program response delay */
+ if (sd->resp_delay_new == FALSE)
+ bcmspi_prog_resp_delay(sd, func, resp_delay);
+ }
+
+ /* +4 for cmd and +4 for dstatus */
+ hostlen = datalen + 8 + resp_delay;
+ hostlen += dstatus_idx;
+#ifdef BCMSPI_ANDROID
+ if (hostlen%4) {
+ sd_err(("Unaligned data len %d, hostlen %d\n",
+ datalen, hostlen));
+#endif /* BCMSPI_ANDROID */
+ hostlen += (4 - (hostlen & 0x3));
+#ifdef BCMSPI_ANDROID
+ }
+#endif /* BCMSPI_ANDROID */
+#ifdef BCMDBG
+ if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 1) &&
+ (sd->dwordmode) &&
+ (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+ sd_trace(("len/~len/spilen/hostlen=0x%x/0x%x/0x%x/0x%x\n",
+ *ptr, ~*(ptr+1), spilen, hostlen));
+ }
+#endif /* BCMDBG */
+ spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+ /* for Read, get the data into the input buffer */
+ if (datalen != 0) {
+ if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+ for (j = 0; j < datalen/4; j++) {
+ if (sd->wordlen == 4) { /* 32bit spid */
+ data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 +
+ CMDLEN + resp_delay]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 +
+ CMDLEN + resp_delay]);
+ }
+ }
+ }
+ }
+
+ dstatus_idx += (datalen + CMDLEN + resp_delay);
+ /* Last 4bytes are dstatus. Device is configured to return status bits. */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]);
+ } else {
+ sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+ 8 * sd->wordlen));
+ return ERROR;
+ }
+ if (sd->card_dstatus == 0xffffffff) {
+ sd_err(("looks like not a GSPI device or device is not powered.\n"));
+ }
+
+ err = bcmspi_update_stats(sd, cmd_arg);
+#ifdef BCMDBG
+ if (err)
+ prhex("Overflowing frame", (uint8 *)data, datalen);
+#endif /* BCMDBG */
+
+ return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ bool write = rw == SDIOH_READ ? 0 : 1;
+ uint retries = 0;
+
+ bool enable;
+ uint32 spilen;
+
+ cmd_arg = 0;
+
+ ASSERT(nbytes);
+ ASSERT(nbytes <= sd->client_block_size[func]);
+
+ if (write) sd->t_cnt++; else sd->r_cnt++;
+
+ if (func == 2) {
+ /* Frame len check limited by gSPI. */
+ if ((nbytes > 2000) && write) {
+ sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+#ifdef BCMDBG
+ prhex("Host for gSPI", (uint8 *)data, 32);
+#endif /* BCMDBG */
+ }
+ /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+ /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+ if (write) {
+ uint32 dstatus;
+ /* check F2 ready with cached one */
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if ((dstatus & STATUS_F2_RX_READY) == 0) {
+ retries = WAIT_F2RXFIFORDY;
+ enable = 0;
+ while (retries-- && !enable) {
+ OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+ bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+ &dstatus);
+ if (dstatus & STATUS_F2_RX_READY)
+ enable = TRUE;
+ }
+ if (!enable) {
+ struct spierrstats_t *spierrstats = &sd->spierrstats;
+ spierrstats->f2rxnotready++;
+ sd_err(("F2 FIFO is not ready to receive data.\n"));
+ return ERROR;
+ }
+ sd_trace(("No of retries on F2 ready %d\n",
+ (WAIT_F2RXFIFORDY - retries)));
+ }
+ }
+ }
+
+ /* F2 transfers happen on 0 addr */
+ addr = (func == 2) ? 0 : addr;
+
+ /* In pio mode buffer is read using fixed address fifo in func 1 */
+ if ((func == 1) && (fifo))
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+ else
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+ spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+ if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+ /* convert len to mod4 size */
+ spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+ } else
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+ if ((func == 2) && (fifo == 1)) {
+ sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+ addr, nbytes, sd->r_cnt, sd->t_cnt));
+ }
+
+ sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+ sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+ addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) {
+ sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+ (write ? "write" : "read")));
+ return status;
+ }
+
+ /* gSPI expects that hw-header-len is equal to spi-command-len */
+ if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+ ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+ ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+ }
+
+ if ((nbytes > 2000) && !write) {
+ sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+#ifdef BCMDBG
+ prhex("Host for gSPI", (uint8 *)data, 32);
+#endif /* BCMDBG */
+ }
+
+ return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+ si->card_init_done = FALSE;
+ return bcmspi_client_init(si);
+}
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ return SDIOH_API_RC_FAIL;
+}
diff --git a/bcmdhd.101.10.361.x/bcmsrom.c b/bcmdhd.101.10.361.x/bcmsrom.c
new file mode 100755
index 0000000..c17a2dc
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmsrom.c
@@ -0,0 +1,6365 @@
+/*
+ * Routines to access SPROM and to parse SROM/CIS variables.
+ *
+ * Despite its file name, OTP contents is also parsed in this file.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/*
+ * List of non obvious preprocessor defines used in this file and their meaning:
+ * DONGLEBUILD : building firmware that runs on the dongle's CPU
+ * BCM_DONGLEVARS : NVRAM variables can be read from OTP/S(P)ROM.
+ * When host may supply nvram vars in addition to the ones in OTP/SROM:
+ * BCMHOSTVARS : full nic / full dongle
+ * BCMDONGLEHOST : defined when building DHD, code executes on the host in a dongle environment.
+ * DHD_SPROM : defined when building a DHD that supports reading/writing to SPROM
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <stdarg.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <sbpcmcia.h>
+#include <pcicfg.h>
+#include <siutils.h>
+#include <bcmsrom.h>
+#include <bcmsrom_tbl.h>
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#endif
+#ifdef BCMSPI
+#include <spid.h>
+#endif
+
+#include <bcmnvram.h>
+#include <bcmotp.h>
+#ifndef BCMUSBDEV_COMPOSITE
+#define BCMUSBDEV_COMPOSITE
+#endif
+#if defined(BCMUSBDEV) || defined(BCMSDIO) || defined(BCMSDIODEV)
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#endif
+
+#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG)
+#include <sbsprom.h>
+#endif
+#include <ethernet.h> /* for sprom content groking */
+
+#include <sbgci.h>
+#ifdef EVENT_LOG_COMPILE
+#include <event_log.h>
+#endif
+
+#if defined(EVENT_LOG_COMPILE) && defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define BS_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_BSROM_ERROR, args)
+#else
+#define BS_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_BSROM_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#elif defined(BCMDBG_ERR) || defined(WLTEST)
+#define BS_ERROR(args) printf args
+#else
+#define BS_ERROR(args)
+#endif /* defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG) */
+
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+static bool BCMATTACHDATA(is_caldata_prsnt) = FALSE;
+static uint16 BCMATTACHDATA(caldata_array)[SROM_MAX / 2];
+static uint8 BCMATTACHDATA(srom_sromrev);
+#endif
+
+static const char BCMATTACHDATA(rstr_uuidstr)[] =
+ "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X";
+static const char BCMATTACHDATA(rstr_paddr)[] = "pa%d=0x%%x";
+static const char BCMATTACHDATA(rstr_pdata)[] = "pd%d=0x%%x";
+static const char BCMATTACHDATA(rstr_pdatah)[] = "pdh%d=0x%%x";
+static const char BCMATTACHDATA(rstr_pdatal)[] = "pdl%d=0x%%x";
+static const char BCMATTACHDATA(rstr_gci_ccreg_entry)[] = "gcr%d=0x%%x";
+static const char BCMATTACHDATA(rstr_hex)[] = "0x%x";
+
+/** curmap: contains host start address of PCI BAR0 window */
+static volatile uint8* srom_offset(si_t *sih, volatile void *curmap)
+{
+ if (sih->ccrev <= 31)
+ return (volatile uint8*)curmap + PCI_BAR0_SPROM_OFFSET;
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return NULL;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ return (uint8 *)((uintptr)SI_ENUM_BASE(sih) + CC_SROM_OTP);
+
+ return (volatile uint8*)curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP;
+}
+
+#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG)
+#define WRITE_ENABLE_DELAY 500 /* 500 ms after write enable/disable toggle */
+#define WRITE_WORD_DELAY 20 /* 20 ms between each word write */
+#endif
+
+srom_info_t *sromh = NULL;
+
+extern char *_vars;
+extern uint _varsz;
+#ifdef DONGLEBUILD
+char * BCMATTACHDATA(_vars_otp) = NULL;
+#define DONGLE_STORE_VARS_OTP_PTR(v) (_vars_otp = (v))
+#else
+#define DONGLE_STORE_VARS_OTP_PTR(v)
+#endif
+
+#define SROM_CIS_SINGLE 1
+
+#if !defined(BCMDONGLEHOST)
+static int initvars_srom_si(si_t *sih, osl_t *osh, volatile void *curmap, char **vars, uint *count);
+static void _initvars_srom_pci(uint8 sromrev, uint16 *srom, uint off, varbuf_t *b);
+static int initvars_srom_pci(si_t *sih, volatile void *curmap, char **vars, uint *count);
+static int initvars_cis_pci(si_t *sih, osl_t *osh, volatile void *curmap, char **vars, uint *count);
+#endif /* !defined(BCMDONGLEHOST) */
+#if !defined(BCMUSBDEV_ENABLED) && !defined(BCMSDIODEV_ENABLED) &&\
+ !defined(BCMDONGLEHOST) && !defined(BCMPCIEDEV_ENABLED)
+static int initvars_flash_si(si_t *sih, char **vars, uint *count);
+#endif /* !defined(BCMUSBDEV) && !defined(BCMSDIODEV) && !defined(BCMDONGLEHOST) */
+#ifdef BCMSDIO
+#if !defined(BCMDONGLEHOST)
+static int initvars_cis_sdio(si_t *sih, osl_t *osh, char **vars, uint *count);
+#endif /* !defined(BCMDONGLEHOST) */
+static int sprom_cmd_sdio(osl_t *osh, uint8 cmd);
+static int sprom_read_sdio(osl_t *osh, uint16 addr, uint16 *data);
+#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG)
+static int sprom_write_sdio(osl_t *osh, uint16 addr, uint16 data);
+#endif /* defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) */
+#endif /* BCMSDIO */
+#if !defined(BCMDONGLEHOST)
+#ifdef BCMSPI
+static int initvars_cis_spi(si_t *sih, osl_t *osh, char **vars, uint *count);
+#endif /* BCMSPI */
+#endif /* !defined(BCMDONGLEHOST) */
+static int sprom_read_pci(osl_t *osh, si_t *sih, volatile uint16 *sprom, uint wordoff, uint16 *buf,
+ uint nwords, bool check_crc);
+#if !defined(BCMDONGLEHOST)
+#if defined(BCMNVRAMW) || defined(BCMNVRAMR)
+static int otp_read_pci(osl_t *osh, si_t *sih, uint16 *buf, uint bufsz);
+#endif /* defined(BCMNVRAMW) || defined(BCMNVRAMR) */
+#endif /* !defined(BCMDONGLEHOST) */
+static uint16 srom_cc_cmd(si_t *sih, osl_t *osh, volatile void *ccregs, uint32 cmd, uint wordoff,
+ uint16 data);
+
+#if !defined(BCMDONGLEHOST)
+static int initvars_flash(si_t *sih, osl_t *osh, char **vp, uint len);
+int dbushost_initvars_flash(si_t *sih, osl_t *osh, char **base, uint len);
+static uint get_max_cis_size(si_t *sih);
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined (BCMHOSTVARS)
+/* Also used by wl_readconfigdata for vars download */
+char BCMATTACHDATA(mfgsromvars)[VARS_MAX];
+int BCMATTACHDATA(defvarslen) = 0;
+#endif /* defined(BCMHOSTVARS) */
+
+#if !defined(BCMDONGLEHOST)
+#if defined (BCMHOSTVARS)
+/* FIXME: Fake 4331 SROM to boot 4331 driver on QT w/o SPROM/OTP */
+static char BCMATTACHDATA(defaultsromvars_4331)[] =
+ "sromrev=9\0"
+ "boardrev=0x1104\0"
+ "boardflags=0x200\0"
+ "boardflags2=0x0\0"
+ "boardtype=0x524\0"
+ "boardvendor=0x14e4\0"
+ "boardnum=0x2064\0"
+ "macaddr=00:90:4c:1a:20:64\0"
+ "ccode=0x0\0"
+ "regrev=0x0\0"
+ "opo=0x0\0"
+ "aa2g=0x7\0"
+ "aa5g=0x7\0"
+ "ag0=0x2\0"
+ "ag1=0x2\0"
+ "ag2=0x2\0"
+ "ag3=0xff\0"
+ "pa0b0=0xfe7f\0"
+ "pa0b1=0x15d9\0"
+ "pa0b2=0xfac6\0"
+ "pa0itssit=0x20\0"
+ "pa0maxpwr=0x48\0"
+ "pa1b0=0xfe89\0"
+ "pa1b1=0x14b1\0"
+ "pa1b2=0xfada\0"
+ "pa1lob0=0xffff\0"
+ "pa1lob1=0xffff\0"
+ "pa1lob2=0xffff\0"
+ "pa1hib0=0xfe8f\0"
+ "pa1hib1=0x13df\0"
+ "pa1hib2=0xfafa\0"
+ "pa1itssit=0x3e\0"
+ "pa1maxpwr=0x3c\0"
+ "pa1lomaxpwr=0x3c\0"
+ "pa1himaxpwr=0x3c\0"
+ "bxa2g=0x3\0"
+ "rssisav2g=0x7\0"
+ "rssismc2g=0xf\0"
+ "rssismf2g=0xf\0"
+ "bxa5g=0x3\0"
+ "rssisav5g=0x7\0"
+ "rssismc5g=0xf\0"
+ "rssismf5g=0xf\0"
+ "tri2g=0xff\0"
+ "tri5g=0xff\0"
+ "tri5gl=0xff\0"
+ "tri5gh=0xff\0"
+ "rxpo2g=0xff\0"
+ "rxpo5g=0xff\0"
+ "txchain=0x7\0"
+ "rxchain=0x7\0"
+ "antswitch=0x0\0"
+ "tssipos2g=0x1\0"
+ "extpagain2g=0x2\0"
+ "pdetrange2g=0x4\0"
+ "triso2g=0x3\0"
+ "antswctl2g=0x0\0"
+ "tssipos5g=0x1\0"
+ "elna2g=0xff\0"
+ "extpagain5g=0x2\0"
+ "pdetrange5g=0x4\0"
+ "triso5g=0x3\0"
+ "antswctl5g=0x0\0"
+ "elna5g=0xff\0"
+ "cckbw202gpo=0x0\0"
+ "cckbw20ul2gpo=0x0\0"
+ "legofdmbw202gpo=0x0\0"
+ "legofdmbw20ul2gpo=0x0\0"
+ "legofdmbw205glpo=0x0\0"
+ "legofdmbw20ul5glpo=0x0\0"
+ "legofdmbw205gmpo=0x0\0"
+ "legofdmbw20ul5gmpo=0x0\0"
+ "legofdmbw205ghpo=0x0\0"
+ "legofdmbw20ul5ghpo=0x0\0"
+ "mcsbw202gpo=0x0\0"
+ "mcsbw20ul2gpo=0x0\0"
+ "mcsbw402gpo=0x0\0"
+ "mcsbw205glpo=0x0\0"
+ "mcsbw20ul5glpo=0x0\0"
+ "mcsbw405glpo=0x0\0"
+ "mcsbw205gmpo=0x0\0"
+ "mcsbw20ul5gmpo=0x0\0"
+ "mcsbw405gmpo=0x0\0"
+ "mcsbw205ghpo=0x0\0"
+ "mcsbw20ul5ghpo=0x0\0"
+ "mcsbw405ghpo=0x0\0"
+ "mcs32po=0x0\0"
+ "legofdm40duppo=0x0\0"
+ "maxp2ga0=0x48\0"
+ "itt2ga0=0x20\0"
+ "itt5ga0=0x3e\0"
+ "pa2gw0a0=0xfe7f\0"
+ "pa2gw1a0=0x15d9\0"
+ "pa2gw2a0=0xfac6\0"
+ "maxp5ga0=0x3c\0"
+ "maxp5gha0=0x3c\0"
+ "maxp5gla0=0x3c\0"
+ "pa5gw0a0=0xfe89\0"
+ "pa5gw1a0=0x14b1\0"
+ "pa5gw2a0=0xfada\0"
+ "pa5glw0a0=0xffff\0"
+ "pa5glw1a0=0xffff\0"
+ "pa5glw2a0=0xffff\0"
+ "pa5ghw0a0=0xfe8f\0"
+ "pa5ghw1a0=0x13df\0"
+ "pa5ghw2a0=0xfafa\0"
+ "maxp2ga1=0x48\0"
+ "itt2ga1=0x20\0"
+ "itt5ga1=0x3e\0"
+ "pa2gw0a1=0xfe54\0"
+ "pa2gw1a1=0x1563\0"
+ "pa2gw2a1=0xfa7f\0"
+ "maxp5ga1=0x3c\0"
+ "maxp5gha1=0x3c\0"
+ "maxp5gla1=0x3c\0"
+ "pa5gw0a1=0xfe53\0"
+ "pa5gw1a1=0x14fe\0"
+ "pa5gw2a1=0xfa94\0"
+ "pa5glw0a1=0xffff\0"
+ "pa5glw1a1=0xffff\0"
+ "pa5glw2a1=0xffff\0"
+ "pa5ghw0a1=0xfe6e\0"
+ "pa5ghw1a1=0x1457\0"
+ "pa5ghw2a1=0xfab9\0"
+ "END\0";
+
+static char BCMATTACHDATA(defaultsromvars_4360)[] =
+ "sromrev=11\0"
+ "boardrev=0x1421\0"
+ "boardflags=0x10401001\0"
+ "boardflags2=0x0\0"
+ "boardtype=0x61b\0"
+ "subvid=0x14e4\0"
+ "boardflags3=0x1\0"
+ "boardnum=62526\0"
+ "macaddr=00:90:4c:0d:f4:3e\0"
+ "ccode=X0\0"
+ "regrev=15\0"
+ "aa2g=7\0"
+ "aa5g=7\0"
+ "agbg0=71\0"
+ "agbg1=71\0"
+ "agbg2=133\0"
+ "aga0=71\0"
+ "aga1=133\0"
+ "aga2=133\0"
+ "antswitch=0\0"
+ "tssiposslope2g=1\0"
+ "epagain2g=0\0"
+ "pdgain2g=9\0"
+ "tworangetssi2g=0\0"
+ "papdcap2g=0\0"
+ "femctrl=2\0"
+ "tssiposslope5g=1\0"
+ "epagain5g=0\0"
+ "pdgain5g=9\0"
+ "tworangetssi5g=0\0"
+ "papdcap5g=0\0"
+ "gainctrlsph=0\0"
+ "tempthresh=255\0"
+ "tempoffset=255\0"
+ "rawtempsense=0x1ff\0"
+ "measpower=0x7f\0"
+ "tempsense_slope=0xff\0"
+ "tempcorrx=0x3f\0"
+ "tempsense_option=0x3\0"
+ "xtalfreq=65535\0"
+ "phycal_tempdelta=255\0"
+ "temps_period=15\0"
+ "temps_hysteresis=15\0"
+ "measpower1=0x7f\0"
+ "measpower2=0x7f\0"
+ "pdoffset2g40ma0=15\0"
+ "pdoffset2g40ma1=15\0"
+ "pdoffset2g40ma2=15\0"
+ "pdoffset2g40mvalid=1\0"
+ "pdoffset40ma0=9010\0"
+ "pdoffset40ma1=12834\0"
+ "pdoffset40ma2=8994\0"
+ "pdoffset80ma0=16\0"
+ "pdoffset80ma1=4096\0"
+ "pdoffset80ma2=0\0"
+ "subband5gver=0x4\0"
+ "cckbw202gpo=0\0"
+ "cckbw20ul2gpo=0\0"
+ "mcsbw202gpo=2571386880\0"
+ "mcsbw402gpo=2571386880\0"
+ "dot11agofdmhrbw202gpo=17408\0"
+ "ofdmlrbw202gpo=0\0"
+ "mcsbw205glpo=4001923072\0"
+ "mcsbw405glpo=4001923072\0"
+ "mcsbw805glpo=4001923072\0"
+ "mcsbw1605glpo=0\0"
+ "mcsbw205gmpo=3431497728\0"
+ "mcsbw405gmpo=3431497728\0"
+ "mcsbw805gmpo=3431497728\0"
+ "mcsbw1605gmpo=0\0"
+ "mcsbw205ghpo=3431497728\0"
+ "mcsbw405ghpo=3431497728\0"
+ "mcsbw805ghpo=3431497728\0"
+ "mcsbw1605ghpo=0\0"
+ "mcslr5glpo=0\0"
+ "mcslr5gmpo=0\0"
+ "mcslr5ghpo=0\0"
+ "sb20in40hrpo=0\0"
+ "sb20in80and160hr5glpo=0\0"
+ "sb40and80hr5glpo=0\0"
+ "sb20in80and160hr5gmpo=0\0"
+ "sb40and80hr5gmpo=0\0"
+ "sb20in80and160hr5ghpo=0\0"
+ "sb40and80hr5ghpo=0\0"
+ "sb20in40lrpo=0\0"
+ "sb20in80and160lr5glpo=0\0"
+ "sb40and80lr5glpo=0\0"
+ "sb20in80and160lr5gmpo=0\0"
+ "sb40and80lr5gmpo=0\0"
+ "sb20in80and160lr5ghpo=0\0"
+ "sb40and80lr5ghpo=0\0"
+ "dot11agduphrpo=0\0"
+ "dot11agduplrpo=0\0"
+ "pcieingress_war=15\0"
+ "sar2g=18\0"
+ "sar5g=15\0"
+ "noiselvl2ga0=31\0"
+ "noiselvl2ga1=31\0"
+ "noiselvl2ga2=31\0"
+ "noiselvl5ga0=31,31,31,31\0"
+ "noiselvl5ga1=31,31,31,31\0"
+ "noiselvl5ga2=31,31,31,31\0"
+ "rxgainerr2ga0=63\0"
+ "rxgainerr2ga1=31\0"
+ "rxgainerr2ga2=31\0"
+ "rxgainerr5ga0=63,63,63,63\0"
+ "rxgainerr5ga1=31,31,31,31\0"
+ "rxgainerr5ga2=31,31,31,31\0"
+ "maxp2ga0=76\0"
+ "pa2ga0=0xff3c,0x172c,0xfd20\0"
+ "rxgains5gmelnagaina0=7\0"
+ "rxgains5gmtrisoa0=15\0"
+ "rxgains5gmtrelnabypa0=1\0"
+ "rxgains5ghelnagaina0=7\0"
+ "rxgains5ghtrisoa0=15\0"
+ "rxgains5ghtrelnabypa0=1\0"
+ "rxgains2gelnagaina0=4\0"
+ "rxgains2gtrisoa0=7\0"
+ "rxgains2gtrelnabypa0=1\0"
+ "rxgains5gelnagaina0=3\0"
+ "rxgains5gtrisoa0=7\0"
+ "rxgains5gtrelnabypa0=1\0"
+ "maxp5ga0=76,76,76,76\0"
+"pa5ga0=0xff3a,0x14d4,0xfd5f,0xff36,0x1626,0xfd2e,0xff42,0x15bd,0xfd47,0xff39,0x15a3,0xfd3d\0"
+ "maxp2ga1=76\0"
+ "pa2ga1=0xff2a,0x16b2,0xfd28\0"
+ "rxgains5gmelnagaina1=7\0"
+ "rxgains5gmtrisoa1=15\0"
+ "rxgains5gmtrelnabypa1=1\0"
+ "rxgains5ghelnagaina1=7\0"
+ "rxgains5ghtrisoa1=15\0"
+ "rxgains5ghtrelnabypa1=1\0"
+ "rxgains2gelnagaina1=3\0"
+ "rxgains2gtrisoa1=6\0"
+ "rxgains2gtrelnabypa1=1\0"
+ "rxgains5gelnagaina1=3\0"
+ "rxgains5gtrisoa1=6\0"
+ "rxgains5gtrelnabypa1=1\0"
+ "maxp5ga1=76,76,76,76\0"
+"pa5ga1=0xff4e,0x1530,0xfd53,0xff58,0x15b4,0xfd4d,0xff58,0x1671,0xfd2f,0xff55,0x15e2,0xfd46\0"
+ "maxp2ga2=76\0"
+ "pa2ga2=0xff3c,0x1736,0xfd1f\0"
+ "rxgains5gmelnagaina2=7\0"
+ "rxgains5gmtrisoa2=15\0"
+ "rxgains5gmtrelnabypa2=1\0"
+ "rxgains5ghelnagaina2=7\0"
+ "rxgains5ghtrisoa2=15\0"
+ "rxgains5ghtrelnabypa2=1\0"
+ "rxgains2gelnagaina2=4\0"
+ "rxgains2gtrisoa2=7\0"
+ "rxgains2gtrelnabypa2=1\0"
+ "rxgains5gelnagaina2=3\0"
+ "rxgains5gtrisoa2=7\0"
+ "rxgains5gtrelnabypa2=1\0"
+ "maxp5ga2=76,76,76,76\0"
+"pa5ga2=0xff2d,0x144a,0xfd63,0xff35,0x15d7,0xfd3b,0xff35,0x1668,0xfd2f,0xff31,0x1664,0xfd27\0"
+ "END\0";
+
+#endif /* defined(BCMHOSTVARS) */
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if !defined(BCMDONGLEHOST)
+#if defined(BCMHOSTVARS)
+static char BCMATTACHDATA(defaultsromvars_wltest)[] =
+ "macaddr=00:90:4c:f8:00:01\0"
+ "et0macaddr=00:11:22:33:44:52\0"
+ "et0phyaddr=30\0"
+ "et0mdcport=0\0"
+ "gpio2=robo_reset\0"
+ "boardvendor=0x14e4\0"
+ "boardflags=0x210\0"
+ "boardflags2=0\0"
+ "boardtype=0x04c3\0"
+ "boardrev=0x1100\0"
+ "sromrev=8\0"
+ "devid=0x432c\0"
+ "ccode=0\0"
+ "regrev=0\0"
+ "aa2g=3\0"
+ "ag0=2\0"
+ "ag1=2\0"
+ "aa5g=3\0"
+ "aa0=2\0"
+ "aa1=2\0"
+ "txchain=3\0"
+ "rxchain=3\0"
+ "antswitch=0\0"
+ "itt2ga0=0x20\0"
+ "maxp2ga0=0x48\0"
+ "pa2gw0a0=0xfe9e\0"
+ "pa2gw1a0=0x15d5\0"
+ "pa2gw2a0=0xfae9\0"
+ "itt2ga1=0x20\0"
+ "maxp2ga1=0x48\0"
+ "pa2gw0a1=0xfeb3\0"
+ "pa2gw1a1=0x15c9\0"
+ "pa2gw2a1=0xfaf7\0"
+ "tssipos2g=1\0"
+ "extpagain2g=0\0"
+ "pdetrange2g=0\0"
+ "triso2g=3\0"
+ "antswctl2g=0\0"
+ "tssipos5g=1\0"
+ "extpagain5g=0\0"
+ "pdetrange5g=0\0"
+ "triso5g=3\0"
+ "antswctl5g=0\0"
+ "cck2gpo=0\0"
+ "ofdm2gpo=0\0"
+ "mcs2gpo0=0\0"
+ "mcs2gpo1=0\0"
+ "mcs2gpo2=0\0"
+ "mcs2gpo3=0\0"
+ "mcs2gpo4=0\0"
+ "mcs2gpo5=0\0"
+ "mcs2gpo6=0\0"
+ "mcs2gpo7=0\0"
+ "cddpo=0\0"
+ "stbcpo=0\0"
+ "bw40po=4\0"
+ "bwduppo=0\0"
+ "END\0";
+
+/**
+ * The contents of this array is a first attempt, is likely incorrect for 43602, needs to be
+ * edited in a later stage.
+ */
+static char BCMATTACHDATA(defaultsromvars_43602)[] =
+ "sromrev=11\0"
+ "boardrev=0x1421\0"
+ "boardflags=0x10401001\0"
+ "boardflags2=0x00000002\0"
+ "boardflags3=0x00000003\0"
+ "boardtype=0x61b\0"
+ "subvid=0x14e4\0"
+ "boardnum=62526\0"
+ "macaddr=00:90:4c:0d:f4:3e\0"
+ "ccode=X0\0"
+ "regrev=15\0"
+ "aa2g=7\0"
+ "aa5g=7\0"
+ "agbg0=71\0"
+ "agbg1=71\0"
+ "agbg2=133\0"
+ "aga0=71\0"
+ "aga1=133\0"
+ "aga2=133\0"
+ "antswitch=0\0"
+ "tssiposslope2g=1\0"
+ "epagain2g=0\0"
+ "pdgain2g=9\0"
+ "tworangetssi2g=0\0"
+ "papdcap2g=0\0"
+ "femctrl=2\0"
+ "tssiposslope5g=1\0"
+ "epagain5g=0\0"
+ "pdgain5g=9\0"
+ "tworangetssi5g=0\0"
+ "papdcap5g=0\0"
+ "gainctrlsph=0\0"
+ "tempthresh=255\0"
+ "tempoffset=255\0"
+ "rawtempsense=0x1ff\0"
+ "measpower=0x7f\0"
+ "tempsense_slope=0xff\0"
+ "tempcorrx=0x3f\0"
+ "tempsense_option=0x3\0"
+ "xtalfreq=40000\0"
+ "phycal_tempdelta=255\0"
+ "temps_period=15\0"
+ "temps_hysteresis=15\0"
+ "measpower1=0x7f\0"
+ "measpower2=0x7f\0"
+ "pdoffset2g40ma0=15\0"
+ "pdoffset2g40ma1=15\0"
+ "pdoffset2g40ma2=15\0"
+ "pdoffset2g40mvalid=1\0"
+ "pdoffset40ma0=9010\0"
+ "pdoffset40ma1=12834\0"
+ "pdoffset40ma2=8994\0"
+ "pdoffset80ma0=16\0"
+ "pdoffset80ma1=4096\0"
+ "pdoffset80ma2=0\0"
+ "subband5gver=0x4\0"
+ "cckbw202gpo=0\0"
+ "cckbw20ul2gpo=0\0"
+ "mcsbw202gpo=2571386880\0"
+ "mcsbw402gpo=2571386880\0"
+ "dot11agofdmhrbw202gpo=17408\0"
+ "ofdmlrbw202gpo=0\0"
+ "mcsbw205glpo=4001923072\0"
+ "mcsbw405glpo=4001923072\0"
+ "mcsbw805glpo=4001923072\0"
+ "mcsbw1605glpo=0\0"
+ "mcsbw205gmpo=3431497728\0"
+ "mcsbw405gmpo=3431497728\0"
+ "mcsbw805gmpo=3431497728\0"
+ "mcsbw1605gmpo=0\0"
+ "mcsbw205ghpo=3431497728\0"
+ "mcsbw405ghpo=3431497728\0"
+ "mcsbw805ghpo=3431497728\0"
+ "mcsbw1605ghpo=0\0"
+ "mcslr5glpo=0\0"
+ "mcslr5gmpo=0\0"
+ "mcslr5ghpo=0\0"
+ "sb20in40hrpo=0\0"
+ "sb20in80and160hr5glpo=0\0"
+ "sb40and80hr5glpo=0\0"
+ "sb20in80and160hr5gmpo=0\0"
+ "sb40and80hr5gmpo=0\0"
+ "sb20in80and160hr5ghpo=0\0"
+ "sb40and80hr5ghpo=0\0"
+ "sb20in40lrpo=0\0"
+ "sb20in80and160lr5glpo=0\0"
+ "sb40and80lr5glpo=0\0"
+ "sb20in80and160lr5gmpo=0\0"
+ "sb40and80lr5gmpo=0\0"
+ "sb20in80and160lr5ghpo=0\0"
+ "sb40and80lr5ghpo=0\0"
+ "dot11agduphrpo=0\0"
+ "dot11agduplrpo=0\0"
+ "pcieingress_war=15\0"
+ "sar2g=18\0"
+ "sar5g=15\0"
+ "noiselvl2ga0=31\0"
+ "noiselvl2ga1=31\0"
+ "noiselvl2ga2=31\0"
+ "noiselvl5ga0=31,31,31,31\0"
+ "noiselvl5ga1=31,31,31,31\0"
+ "noiselvl5ga2=31,31,31,31\0"
+ "rxgainerr2ga0=63\0"
+ "rxgainerr2ga1=31\0"
+ "rxgainerr2ga2=31\0"
+ "rxgainerr5ga0=63,63,63,63\0"
+ "rxgainerr5ga1=31,31,31,31\0"
+ "rxgainerr5ga2=31,31,31,31\0"
+ "maxp2ga0=76\0"
+ "pa2ga0=0xff3c,0x172c,0xfd20\0"
+ "rxgains5gmelnagaina0=7\0"
+ "rxgains5gmtrisoa0=15\0"
+ "rxgains5gmtrelnabypa0=1\0"
+ "rxgains5ghelnagaina0=7\0"
+ "rxgains5ghtrisoa0=15\0"
+ "rxgains5ghtrelnabypa0=1\0"
+ "rxgains2gelnagaina0=4\0"
+ "rxgains2gtrisoa0=7\0"
+ "rxgains2gtrelnabypa0=1\0"
+ "rxgains5gelnagaina0=3\0"
+ "rxgains5gtrisoa0=7\0"
+ "rxgains5gtrelnabypa0=1\0"
+ "maxp5ga0=76,76,76,76\0"
+"pa5ga0=0xff3a,0x14d4,0xfd5f,0xff36,0x1626,0xfd2e,0xff42,0x15bd,0xfd47,0xff39,0x15a3,0xfd3d\0"
+ "maxp2ga1=76\0"
+ "pa2ga1=0xff2a,0x16b2,0xfd28\0"
+ "rxgains5gmelnagaina1=7\0"
+ "rxgains5gmtrisoa1=15\0"
+ "rxgains5gmtrelnabypa1=1\0"
+ "rxgains5ghelnagaina1=7\0"
+ "rxgains5ghtrisoa1=15\0"
+ "rxgains5ghtrelnabypa1=1\0"
+ "rxgains2gelnagaina1=3\0"
+ "rxgains2gtrisoa1=6\0"
+ "rxgains2gtrelnabypa1=1\0"
+ "rxgains5gelnagaina1=3\0"
+ "rxgains5gtrisoa1=6\0"
+ "rxgains5gtrelnabypa1=1\0"
+ "maxp5ga1=76,76,76,76\0"
+"pa5ga1=0xff4e,0x1530,0xfd53,0xff58,0x15b4,0xfd4d,0xff58,0x1671,0xfd2f,0xff55,0x15e2,0xfd46\0"
+ "maxp2ga2=76\0"
+ "pa2ga2=0xff3c,0x1736,0xfd1f\0"
+ "rxgains5gmelnagaina2=7\0"
+ "rxgains5gmtrisoa2=15\0"
+ "rxgains5gmtrelnabypa2=1\0"
+ "rxgains5ghelnagaina2=7\0"
+ "rxgains5ghtrisoa2=15\0"
+ "rxgains5ghtrelnabypa2=1\0"
+ "rxgains2gelnagaina2=4\0"
+ "rxgains2gtrisoa2=7\0"
+ "rxgains2gtrelnabypa2=1\0"
+ "rxgains5gelnagaina2=3\0"
+ "rxgains5gtrisoa2=7\0"
+ "rxgains5gtrelnabypa2=1\0"
+ "maxp5ga2=76,76,76,76\0"
+"pa5ga2=0xff2d,0x144a,0xfd63,0xff35,0x15d7,0xfd3b,0xff35,0x1668,0xfd2f,0xff31,0x1664,0xfd27\0"
+ "END\0";
+
+/**
+ * The contents of this array is a first attempt, was copied from 4378, needs to be edited in
+ * a later stage.
+ */
+static char BCMATTACHDATA(defaultsromvars_4378)[] =
+ "cckdigfilttype=4\0"
+ "sromrev=11\0"
+ "boardrev=0x1102\0"
+ "boardtype=0x0771\0"
+ "boardflags=0x10481201\0"
+ "boardflags2=0x00000000\0"
+ "boardflags3=0x04000080\0"
+ "macaddr=00:90:4c:12:43:47\0"
+ "ccode=0\0"
+ "regrev=0\0"
+ "antswitch=0\0"
+ "pdgain5g=0\0"
+ "pdgain2g=0\0"
+ "tworangetssi2g=0\0"
+ "tworangetssi5g=0\0"
+ "femctrl=16\0"
+ "vendid=0x14e4\0"
+ "devid=0x4425\0"
+ "manfid=0x2d0\0"
+ "nocrc=1\0"
+ "btc_params82=0x1a0\0"
+ "otpimagesize=502\0"
+ "xtalfreq=37400\0"
+ "rxgains2gelnagaina0=3\0"
+ "rxgains2gtrisoa0=7\0"
+ "rxgains2gtrelnabypa0=1\0"
+ "rxgains5gelnagaina0=3\0"
+ "rxgains5gtrisoa0=6\0"
+ "rxgains5gtrelnabypa0=1\0"
+ "rxgains5gmelnagaina0=3\0"
+ "rxgains5gmtrisoa0=6\0"
+ "rxgains5gmtrelnabypa0=1\0"
+ "rxgains5ghelnagaina0=3\0"
+ "rxgains5ghtrisoa0=6\0"
+ "rxgains5ghtrelnabypa0=1\0"
+ "rxgains2gelnagaina1=3\0"
+ "rxgains2gtrisoa1=7\0"
+ "rxgains2gtrelnabypa1=1\0"
+ "rxgains5gelnagaina1=3\0"
+ "rxgains5gtrisoa1=6\0"
+ "rxgains5gtrelnabypa1=1\0"
+ "rxgains5gmelnagaina1=3\0"
+ "rxgains5gmtrisoa1=6\0"
+ "rxgains5gmtrelnabypa1=1\0"
+ "rxgains5ghelnagaina1=3\0"
+ "rxgains5ghtrisoa1=6\0"
+ "rxgains5ghtrelnabypa1=1\0"
+ "rxchain=3\0"
+ "txchain=3\0"
+ "aa2g=3\0"
+ "aa5g=3\0"
+ "agbg0=2\0"
+ "agbg1=2\0"
+ "aga0=2\0"
+ "aga1=2\0"
+ "tssipos2g=1\0"
+ "tssipos5g=1\0"
+ "tempthresh=255\0"
+ "tempoffset=255\0"
+ "rawtempsense=0x1ff\0"
+ "pa2gccka0=-200,7392,-897\0"
+ "pa2gccka1=-198,7522,-907\0"
+ "pa2ga0=-174,7035,-838\0"
+ "pa2ga1=-185,6772,-811\0"
+ "pa5ga0=-175,7296,-887,-164,7553,-910,-155,7801,-936,-149,7908,-951\0"
+ "pa5ga1=-155,7675,-925,-148,7851,-940,-152,7930,-954,-143,8121,-969\0"
+ "pa5gbw4080a0=-178,7872,-959,-173,8107,-986,-165,8398,-1019,-150,8809,-1063\0"
+ "pa5gbw4080a1=-166,8179,-993,-161,8378,-1015,-165,8402,-1019,-155,8757,-1057\0"
+ "maxp2ga0=66\0"
+ "maxp2ga1=66\0"
+ "maxp5ga0=66,66,66,66\0"
+ "maxp5ga1=66,66,66,66\0"
+ "subband5gver=0x4\0"
+ "paparambwver=3\0"
+ "cckpwroffset0=0\0"
+ "cckpwroffset1=0\0"
+ "pdoffset40ma0=0x0000\0"
+ "pdoffset80ma0=0xeeee\0"
+ "pdoffset40ma1=0x0000\0"
+ "pdoffset80ma1=0xeeee\0"
+ "cckbw202gpo=0\0"
+ "cckbw20ul2gpo=0\0"
+ "mcsbw202gpo=0xEC888222\0"
+ "mcsbw402gpo=0xEC888222\0"
+ "dot11agofdmhrbw202gpo=0x6622\0"
+ "ofdmlrbw202gpo=0x0000\0"
+ "mcsbw205glpo=0xCA666000\0"
+ "mcsbw405glpo=0xCA666000\0"
+ "mcsbw805glpo=0xEA666000\0"
+ "mcsbw1605glpo=0\0"
+ "mcsbw205gmpo=0xCA666000\0"
+ "mcsbw405gmpo=0xCA666000\0"
+ "mcsbw805gmpo=0xEA666000\0"
+ "mcsbw1605gmpo=0\0"
+ "mcsbw205ghpo=0xCA666000\0"
+ "mcsbw405ghpo=0xCA666000\0"
+ "mcsbw805ghpo=0xEA666000\0"
+ "mcsbw1605ghpo=0\0"
+ "mcslr5glpo=0x0000\0"
+ "mcslr5gmpo=0x0000\0"
+ "mcslr5ghpo=0x0000\0"
+ "sb20in40hrpo=0x0\0"
+ "sb20in80and160hr5glpo=0x0\0"
+ "sb40and80hr5glpo=0x0\0"
+ "sb20in80and160hr5gmpo=0x0\0"
+ "sb40and80hr5gmpo=0x0\0"
+ "sb20in80and160hr5ghpo=0x0\0"
+ "sb40and80hr5ghpo=0x0\0"
+ "sb20in40lrpo=0x0\0"
+ "sb20in80and160lr5glpo=0x0\0"
+ "sb40and80lr5glpo=0x0\0"
+ "sb20in80and160lr5gmpo=0x0\0"
+ "sb40and80lr5gmpo=0x0\0"
+ "sb20in80and160lr5ghpo=0x0\0"
+ "sb40and80lr5ghpo=0x0\0"
+ "dot11agduphrpo=0x0\0"
+ "dot11agduplrpo=0x0\0"
+ "phycal_tempdelta=15\0"
+ "temps_period=15\0"
+ "temps_hysteresis=15\0"
+ "swctrlmap_2g=0x00000404,0x0a0a0000,0x02020000,0x010a02,0x1fe\0"
+ "swctrlmapext_2g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0"
+ "swctrlmap_5g=0x00001010,0x60600000,0x40400000,0x000000,0x0f0\0"
+ "swctrlmapext_5g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0"
+ "powoffs2gtna0=1,3,3,1,0,0,1,2,2,2,1,1,0,0\0"
+ "powoffs2gtna1=-1,1,1,1,0,0,1,2,3,2,2,0,0,0\0"
+ "END\0";
+
+/**
+ * The contents of this array is a first attempt, was copied from 4387, needs to be edited in
+ * a later stage.
+ */
+static char BCMATTACHDATA(defaultsromvars_4387)[] =
+ "cckdigfilttype=4\0"
+ "sromrev=11\0"
+ "boardrev=0x1102\0"
+ "boardtype=0x0771\0"
+ "boardflags=0x10481201\0"
+ "boardflags2=0x00000000\0"
+ "boardflags3=0x04000080\0"
+ "macaddr=00:90:4c:12:43:47\0"
+ "ccode=0\0"
+ "regrev=0\0"
+ "antswitch=0\0"
+ "pdgain5g=0\0"
+ "pdgain2g=0\0"
+ "tworangetssi2g=0\0"
+ "tworangetssi5g=0\0"
+ "femctrl=16\0"
+ "vendid=0x14e4\0"
+ "devid=0x4425\0"
+ "manfid=0x2d0\0"
+ "nocrc=1\0"
+ "btc_params82=0x1a0\0"
+ "otpimagesize=502\0"
+ "xtalfreq=37400\0"
+ "rxgains2gelnagaina0=3\0"
+ "rxgains2gtrisoa0=7\0"
+ "rxgains2gtrelnabypa0=1\0"
+ "rxgains5gelnagaina0=3\0"
+ "rxgains5gtrisoa0=6\0"
+ "rxgains5gtrelnabypa0=1\0"
+ "rxgains5gmelnagaina0=3\0"
+ "rxgains5gmtrisoa0=6\0"
+ "rxgains5gmtrelnabypa0=1\0"
+ "rxgains5ghelnagaina0=3\0"
+ "rxgains5ghtrisoa0=6\0"
+ "rxgains5ghtrelnabypa0=1\0"
+ "rxgains2gelnagaina1=3\0"
+ "rxgains2gtrisoa1=7\0"
+ "rxgains2gtrelnabypa1=1\0"
+ "rxgains5gelnagaina1=3\0"
+ "rxgains5gtrisoa1=6\0"
+ "rxgains5gtrelnabypa1=1\0"
+ "rxgains5gmelnagaina1=3\0"
+ "rxgains5gmtrisoa1=6\0"
+ "rxgains5gmtrelnabypa1=1\0"
+ "rxgains5ghelnagaina1=3\0"
+ "rxgains5ghtrisoa1=6\0"
+ "rxgains5ghtrelnabypa1=1\0"
+ "rxchain=3\0"
+ "txchain=3\0"
+ "aa2g=3\0"
+ "aa5g=3\0"
+ "agbg0=2\0"
+ "agbg1=2\0"
+ "aga0=2\0"
+ "aga1=2\0"
+ "tssipos2g=1\0"
+ "tssipos5g=1\0"
+ "tempthresh=255\0"
+ "tempoffset=255\0"
+ "rawtempsense=0x1ff\0"
+ "pa2gccka0=-200,7392,-897\0"
+ "pa2gccka1=-198,7522,-907\0"
+ "pa2ga0=-174,7035,-838\0"
+ "pa2ga1=-185,6772,-811\0"
+ "pa5ga0=-175,7296,-887,-164,7553,-910,-155,7801,-936,-149,7908,-951\0"
+ "pa5ga1=-155,7675,-925,-148,7851,-940,-152,7930,-954,-143,8121,-969\0"
+ "pa5gbw4080a0=-178,7872,-959,-173,8107,-986,-165,8398,-1019,-150,8809,-1063\0"
+ "pa5gbw4080a1=-166,8179,-993,-161,8378,-1015,-165,8402,-1019,-155,8757,-1057\0"
+ "maxp2ga0=66\0"
+ "maxp2ga1=66\0"
+ "maxp5ga0=66,66,66,66\0"
+ "maxp5ga1=66,66,66,66\0"
+ "subband5gver=0x4\0"
+ "paparambwver=3\0"
+ "cckpwroffset0=0\0"
+ "cckpwroffset1=0\0"
+ "pdoffset40ma0=0x0000\0"
+ "pdoffset80ma0=0xeeee\0"
+ "pdoffset40ma1=0x0000\0"
+ "pdoffset80ma1=0xeeee\0"
+ "cckbw202gpo=0\0"
+ "cckbw20ul2gpo=0\0"
+ "mcsbw202gpo=0xEC888222\0"
+ "mcsbw402gpo=0xEC888222\0"
+ "dot11agofdmhrbw202gpo=0x6622\0"
+ "ofdmlrbw202gpo=0x0000\0"
+ "mcsbw205glpo=0xCA666000\0"
+ "mcsbw405glpo=0xCA666000\0"
+ "mcsbw805glpo=0xEA666000\0"
+ "mcsbw1605glpo=0\0"
+ "mcsbw205gmpo=0xCA666000\0"
+ "mcsbw405gmpo=0xCA666000\0"
+ "mcsbw805gmpo=0xEA666000\0"
+ "mcsbw1605gmpo=0\0"
+ "mcsbw205ghpo=0xCA666000\0"
+ "mcsbw405ghpo=0xCA666000\0"
+ "mcsbw805ghpo=0xEA666000\0"
+ "mcsbw1605ghpo=0\0"
+ "mcslr5glpo=0x0000\0"
+ "mcslr5gmpo=0x0000\0"
+ "mcslr5ghpo=0x0000\0"
+ "sb20in40hrpo=0x0\0"
+ "sb20in80and160hr5glpo=0x0\0"
+ "sb40and80hr5glpo=0x0\0"
+ "sb20in80and160hr5gmpo=0x0\0"
+ "sb40and80hr5gmpo=0x0\0"
+ "sb20in80and160hr5ghpo=0x0\0"
+ "sb40and80hr5ghpo=0x0\0"
+ "sb20in40lrpo=0x0\0"
+ "sb20in80and160lr5glpo=0x0\0"
+ "sb40and80lr5glpo=0x0\0"
+ "sb20in80and160lr5gmpo=0x0\0"
+ "sb40and80lr5gmpo=0x0\0"
+ "sb20in80and160lr5ghpo=0x0\0"
+ "sb40and80lr5ghpo=0x0\0"
+ "dot11agduphrpo=0x0\0"
+ "dot11agduplrpo=0x0\0"
+ "phycal_tempdelta=15\0"
+ "temps_period=15\0"
+ "temps_hysteresis=15\0"
+ "swctrlmap_2g=0x00000404,0x0a0a0000,0x02020000,0x010a02,0x1fe\0"
+ "swctrlmapext_2g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0"
+ "swctrlmap_5g=0x00001010,0x60600000,0x40400000,0x000000,0x0f0\0"
+ "swctrlmapext_5g=0x00000000,0x00000000,0x00000000,0x000000,0x000\0"
+ "powoffs2gtna0=1,3,3,1,0,0,1,2,2,2,1,1,0,0\0"
+ "powoffs2gtna1=-1,1,1,1,0,0,1,2,3,2,2,0,0,0\0"
+ "END\0";
+
+#endif /* defined(BCMHOSTVARS) */
+#endif /* !defined(BCMDONGLEHOST) */
+
+static bool srvars_inited = FALSE; /* Use OTP/SROM as global variables */
+
+#if (!defined(BCMDONGLEHOST) && defined(BCMHOSTVARS))
+/* It must end with pattern of "END" */
+static uint
+BCMATTACHFN(srom_vars_len)(char *vars)
+{
+ uint pos = 0;
+ uint len;
+ char *s;
+ char *emark = "END";
+ uint emark_len = strlen(emark) + 1;
+
+ for (s = vars; s && *s;) {
+
+ if (strcmp(s, emark) == 0)
+ break;
+
+ len = strlen(s);
+ s += strlen(s) + 1;
+ pos += len + 1;
+ /* BS_ERROR(("len %d vars[pos] %s\n", pos, s)); */
+ if (pos >= (VARS_MAX - emark_len)) {
+ return 0;
+ }
+ }
+
+ return pos + emark_len; /* include the "END\0" */
+}
+#endif /* BCMHOSTVARS */
+
+#if !defined(BCMDONGLEHOST)
+#ifdef BCMNVRAMVARS
+static int
+BCMATTACHFN(initvars_nvram_vars)(si_t *sih, osl_t *osh, char **vars, uint *vars_sz)
+{
+ int ret;
+
+ ASSERT(vars != NULL && vars_sz != NULL);
+
+ /* allocate maximum buffer as we don't know big it should be */
+ *vars = MALLOC(osh, MAXSZ_NVRAM_VARS);
+ if (*vars == NULL) {
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ *vars_sz = MAXSZ_NVRAM_VARS;
+
+ /* query the name=value pairs */
+ if ((ret = nvram_getall(*vars, *vars_sz)) != BCME_OK) {
+ goto fail;
+ }
+
+ /* treat empty name=value list as an error so that we can indicate
+ * the condition up throught error code return...
+ */
+ if (*vars_sz == 0) {
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+ return BCME_OK;
+
+fail:
+ if (*vars != NULL) {
+ MFREE(osh, *vars, MAXSZ_NVRAM_VARS);
+ }
+ *vars = NULL;
+ *vars_sz = 0;
+ return ret;
+}
+#endif /* BCMNVRAMVARS */
+
+/**
+ * Initialize local vars from the right source for this platform. Called from siutils.c.
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ * function set 'vars' to NULL, in that case this function will prematurely return.
+ *
+ * Return 0 on success, nonzero on error.
+ */
+int
+BCMATTACHFN(srom_var_init)(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh,
+ char **vars, uint *count)
+{
+ ASSERT(bustype == BUSTYPE(bustype));
+ if (vars == NULL || count == NULL)
+ return (0);
+
+ *vars = NULL;
+ *count = 0;
+
+ switch (BUSTYPE(bustype)) {
+ case SI_BUS:
+#ifdef BCMPCIEDEV
+ if (BCMPCIEDEV_ENAB()) {
+ int ret;
+
+ ret = initvars_cis_pci(sih, osh, curmap, vars, count);
+
+#ifdef BCMPCIEDEV_SROM_FORMAT
+ if (ret)
+ ret = initvars_srom_pci(sih, curmap, vars, count);
+#endif
+ if (ret)
+ ret = initvars_srom_si(sih, osh, curmap, vars, count);
+ return ret;
+ } else
+#endif /* BCMPCIEDEV */
+ {
+ return initvars_srom_si(sih, osh, curmap, vars, count);
+ }
+ case PCI_BUS: {
+ int ret;
+
+#ifdef BCMNVRAMVARS
+ if ((ret = initvars_nvram_vars(sih, osh, vars, count)) == BCME_OK) {
+ return ret;
+ } else
+#endif
+ {
+ ASSERT(curmap != NULL);
+ if (curmap == NULL)
+ return (-1);
+
+ /* First check for CIS format. if not CIS, try SROM format */
+ if ((ret = initvars_cis_pci(sih, osh, curmap, vars, count)))
+ return initvars_srom_pci(sih, curmap, vars, count);
+ return ret;
+ }
+ }
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ return initvars_cis_sdio(sih, osh, vars, count);
+#endif /* BCMSDIO */
+
+#ifdef BCMSPI
+ case SPI_BUS:
+ return initvars_cis_spi(sih, osh, vars, count);
+#endif /* BCMSPI */
+
+ default:
+ ASSERT(0);
+ }
+ return (-1);
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+/** support only 16-bit word read from srom */
+int
+srom_read(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh,
+ uint byteoff, uint nbytes, uint16 *buf, bool check_crc)
+{
+ uint i, off, nw;
+
+ BCM_REFERENCE(i);
+
+ ASSERT(bustype == BUSTYPE(bustype));
+
+ /* check input - 16-bit access only */
+ if (byteoff & 1 || nbytes & 1 || (byteoff + nbytes) > SROM_MAX)
+ return 1;
+
+ off = byteoff / 2;
+ nw = nbytes / 2;
+
+#ifdef BCMPCIEDEV
+ if ((BUSTYPE(bustype) == SI_BUS) &&
+ (BCM43602_CHIP(sih->chip) ||
+ (BCM4369_CHIP(sih->chip)) ||
+ (BCM4378_CHIP(sih->chip)) ||
+ (BCM4387_CHIP(sih->chip)) ||
+ (BCM4388_CHIP(sih->chip)) ||
+ (BCM4362_CHIP(sih->chip)) ||
+ (BCM4385_CHIP(sih->chip)) ||
+ (BCM4389_CHIP(sih->chip)) ||
+ (BCM4397_CHIP(sih->chip)) ||
+
+#ifdef UNRELEASEDCHIP
+#endif
+
+ FALSE)) { /* building firmware for chips with a PCIe interface and internal SI bus */
+#else
+ if (BUSTYPE(bustype) == PCI_BUS) {
+#endif /* BCMPCIEDEV */
+ if (!curmap)
+ return 1;
+
+ if (si_is_sprom_available(sih)) {
+ volatile uint16 *srom;
+
+ srom = (volatile uint16 *)srom_offset(sih, curmap);
+ if (srom == NULL)
+ return 1;
+
+ if (sprom_read_pci(osh, sih, srom, off, buf, nw, check_crc))
+ return 1;
+ }
+#if !defined(BCMDONGLEHOST) && (defined(BCMNVRAMW) || defined(BCMNVRAMR))
+ else if (!((BUSTYPE(bustype) == SI_BUS) &&
+ (BCM43602_CHIP(sih->chip) ||
+ (BCM4369_CHIP(sih->chip)) ||
+ (BCM4362_CHIP(sih->chip)) ||
+ (BCM4378_CHIP(sih->chip)) ||
+ (BCM4385_CHIP(sih->chip)) ||
+ (BCM4389_CHIP(sih->chip)) ||
+ (BCM4387_CHIP(sih->chip)) ||
+ (BCM4388_CHIP(sih->chip)) ||
+ (BCM4397_CHIP(sih->chip)) ||
+ 0))) {
+ if (otp_read_pci(osh, sih, buf, nbytes))
+ return 1;
+ }
+#endif /* !BCMDONGLEHOST && (BCMNVRAMW||BCMNVRAMR) */
+#ifdef BCMSDIO
+ } else if (BUSTYPE(bustype) == SDIO_BUS) {
+ off = byteoff / 2;
+ nw = nbytes / 2;
+ for (i = 0; i < nw; i++) {
+ if (sprom_read_sdio(osh, (uint16)(off + i), (uint16 *)(buf + i)))
+ return 1;
+ }
+#endif /* BCMSDIO */
+#ifdef BCMSPI
+ } else if (BUSTYPE(bustype) == SPI_BUS) {
+ if (bcmsdh_cis_read(NULL, SDIO_FUNC_1, (uint8 *)buf, byteoff + nbytes) != 0)
+ return 1;
+#endif /* BCMSPI */
+ } else if (BUSTYPE(bustype) == SI_BUS) {
+ return 1;
+ } else {
+ return 1;
+ }
+
+ return 0;
+}
+
+#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG)
+/** support only 16-bit word write into srom */
+int
+srom_write(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh,
+ uint byteoff, uint nbytes, uint16 *buf)
+{
+ uint i, nw, crc_range;
+ uint16 *old, *new;
+ uint8 crc;
+ volatile uint32 val32;
+ int rc = 1;
+
+ ASSERT(bustype == BUSTYPE(bustype));
+
+ /* freed in same function */
+ old = MALLOC_NOPERSIST(osh, SROM_MAXW * sizeof(uint16));
+ new = MALLOC_NOPERSIST(osh, SROM_MAXW * sizeof(uint16));
+
+ if (old == NULL || new == NULL)
+ goto done;
+
+ /* check input - 16-bit access only. use byteoff 0x55aa to indicate
+ * srclear
+ */
+ if ((byteoff != 0x55aa) && ((byteoff & 1) || (nbytes & 1)))
+ goto done;
+
+ if ((byteoff != 0x55aa) && ((byteoff + nbytes) > SROM_MAX))
+ goto done;
+
+ if (FALSE) {
+ }
+#ifdef BCMSDIO
+ else if (BUSTYPE(bustype) == SDIO_BUS) {
+ crc_range = SROM_MAX;
+ }
+#endif
+ else {
+ crc_range = srom_size(sih, osh);
+ }
+
+ nw = crc_range / 2;
+ /* read first small number words from srom, then adjust the length, read all */
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE))
+ goto done;
+
+ BS_ERROR(("srom_write: old[SROM4_SIGN] 0x%x, old[SROM8_SIGN] 0x%x\n",
+ old[SROM4_SIGN], old[SROM8_SIGN]));
+ /* Deal with blank srom */
+ if (old[0] == 0xffff) {
+ /* Do nothing to blank srom when it's srclear */
+ if (byteoff == 0x55aa) {
+ rc = 0;
+ goto done;
+ }
+
+ /* see if the input buffer is valid SROM image or not */
+ if (buf[SROM11_SIGN] == SROM11_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM11_SIGN] 0x%x\n",
+ buf[SROM11_SIGN]));
+
+ /* block invalid buffer size */
+ if (nbytes < SROM11_WORDS * 2) {
+ rc = BCME_BUFTOOSHORT;
+ goto done;
+ } else if (nbytes > SROM11_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM11_WORDS;
+
+ } else if (buf[SROM12_SIGN] == SROM12_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM12_SIGN] 0x%x\n",
+ buf[SROM12_SIGN]));
+
+ /* block invalid buffer size */
+ if (nbytes < SROM12_WORDS * 2) {
+ rc = BCME_BUFTOOSHORT;
+ goto done;
+ } else if (nbytes > SROM12_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM12_WORDS;
+
+ } else if (buf[SROM13_SIGN] == SROM13_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM13_SIGN] 0x%x\n",
+ buf[SROM13_SIGN]));
+
+ /* block invalid buffer size */
+ if (nbytes < SROM13_WORDS * 2) {
+ rc = BCME_BUFTOOSHORT;
+ goto done;
+ } else if (nbytes > SROM13_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM13_WORDS;
+
+ } else if (buf[SROM16_SIGN] == SROM16_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM16_SIGN] 0x%x\n",
+ buf[SROM16_SIGN]));
+
+ /* block invalid buffer size */
+ if (nbytes < SROM16_WORDS * 2) {
+ rc = BCME_BUFTOOSHORT;
+ goto done;
+ } else if (nbytes > SROM16_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM16_WORDS;
+
+ } else if (buf[SROM17_SIGN] == SROM17_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM17_SIGN] 0x%x\n",
+ buf[SROM17_SIGN]));
+
+ /* block invalid buffer size */
+ if (nbytes < SROM17_WORDS * 2) {
+ rc = BCME_BUFTOOSHORT;
+ goto done;
+ } else if (nbytes > SROM17_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM17_WORDS;
+ } else if (buf[SROM18_SIGN] == SROM18_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM18_SIGN] 0x%x\n",
+ buf[SROM18_SIGN]));
+
+ /* block invalid buffer size */
+ /* nbytes can be < SROM18 bytes since host limits transfer chunk size
+ * to 1500 Bytes
+ */
+ if (nbytes > SROM18_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM18_WORDS;
+
+ } else if (buf[SROM11_SIGN] == SROM15_SIGNATURE) {
+ BS_ERROR(("srom_write: buf[SROM15_SIGN] 0x%x\n",
+ buf[SROM11_SIGN]));
+ /* nbytes can be < SROM15 bytes since host limits trasnfer chunk size
+ * to 1518 Bytes
+ */
+ if (nbytes > SROM15_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+ nw = SROM15_WORDS;
+ } else if ((buf[SROM4_SIGN] == SROM4_SIGNATURE) ||
+ (buf[SROM8_SIGN] == SROM4_SIGNATURE)) {
+ BS_ERROR(("srom_write: buf[SROM4_SIGN] 0x%x, buf[SROM8_SIGN] 0x%x\n",
+ buf[SROM4_SIGN], buf[SROM8_SIGN]));
+
+ /* block invalid buffer size */
+ if (nbytes < SROM4_WORDS * 2) {
+ rc = BCME_BUFTOOSHORT;
+ goto done;
+ } else if (nbytes > SROM4_WORDS * 2) {
+ rc = BCME_BUFTOOLONG;
+ goto done;
+ }
+
+ nw = SROM4_WORDS;
+ } else if (nbytes == SROM_WORDS * 2){ /* the other possible SROM format */
+ BS_ERROR(("srom_write: Not SROM4 or SROM8.\n"));
+
+ nw = SROM_WORDS;
+ } else {
+ BS_ERROR(("srom_write: Invalid input file signature\n"));
+ rc = BCME_BADARG;
+ goto done;
+ }
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM18_SIGN] == SROM18_SIGNATURE) {
+ nw = SROM18_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM17_SIGN] == SROM17_SIGNATURE) {
+ nw = SROM17_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM16_SIGN] == SROM16_SIGNATURE) {
+ nw = SROM16_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM15_SIGN] == SROM15_SIGNATURE) {
+ nw = SROM15_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM13_SIGN] == SROM13_SIGNATURE) {
+ nw = SROM13_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM12_SIGN] == SROM12_SIGNATURE) {
+ nw = SROM12_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if (old[SROM11_SIGN] == SROM11_SIGNATURE) {
+ nw = SROM11_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else if ((old[SROM4_SIGN] == SROM4_SIGNATURE) ||
+ (old[SROM8_SIGN] == SROM4_SIGNATURE)) {
+ nw = SROM4_WORDS;
+ crc_range = nw * 2;
+ if (srom_read(sih, bustype, curmap, osh, 0, crc_range, old, FALSE)) {
+ goto done;
+ }
+ } else {
+ /* Assert that we have already read enough for sromrev 2 */
+ ASSERT(crc_range >= SROM_WORDS * 2);
+ nw = SROM_WORDS;
+ crc_range = nw * 2;
+ }
+
+ if (byteoff == 0x55aa) {
+ /* Erase request */
+ crc_range = 0;
+ memset((void *)new, 0xff, nw * 2);
+ } else {
+ /* Copy old contents */
+ bcopy((void *)old, (void *)new, nw * 2);
+ /* make changes */
+ bcopy((void *)buf, (void *)&new[byteoff / 2], nbytes);
+ }
+
+ if (crc_range) {
+ /* calculate crc */
+ htol16_buf(new, crc_range);
+ crc = ~hndcrc8((uint8 *)new, crc_range - 1, CRC8_INIT_VALUE);
+ ltoh16_buf(new, crc_range);
+ new[nw - 1] = (crc << 8) | (new[nw - 1] & 0xff);
+ }
+
+#ifdef BCMPCIEDEV
+ if ((BUSTYPE(bustype) == SI_BUS) &&
+ (BCM43602_CHIP(sih->chip) ||
+ (BCM4369_CHIP(sih->chip)) ||
+ (BCM4362_CHIP(sih->chip)) ||
+ (BCM4378_CHIP(sih->chip)) ||
+ (BCM4387_CHIP(sih->chip)) ||
+ (BCM4388_CHIP(sih->chip)) ||
+ (BCM4385_CHIP(sih->chip)) ||
+ (BCM4389_CHIP(sih->chip)) ||
+ (BCM4397_CHIP(sih->chip)) ||
+
+#ifdef UNRELEASEDCHIP
+#endif /* UNRELEASEDCHIP */
+
+ FALSE)) {
+#else
+ if (BUSTYPE(bustype) == PCI_BUS) {
+#endif /* BCMPCIEDEV */
+ volatile uint16 *srom = NULL;
+ volatile void *ccregs = NULL;
+ uint32 ccval = 0;
+
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ BCM43602_CHIP(sih->chip)) {
+ /* save current control setting */
+ ccval = si_chipcontrl_read(sih);
+ }
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (CHIPREV(sih->chiprev) <= 2))) {
+ si_chipcontrl_srom4360(sih, TRUE);
+ }
+
+ if (FALSE) {
+ si_srom_clk_set(sih); /* corrects srom clock frequency */
+ }
+
+ /* enable writes to the SPROM */
+ if (sih->ccrev > 31) {
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ ccregs = (void *)(uintptr)SI_ENUM_BASE(sih);
+ else
+ ccregs = ((volatile uint8 *)curmap + PCI_16KB0_CCREGS_OFFSET);
+ srom = (volatile uint16 *)((volatile uint8 *)ccregs + CC_SROM_OTP);
+ (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WREN, 0, 0);
+ } else {
+ srom = (volatile uint16 *)
+ ((volatile uint8 *)curmap + PCI_BAR0_SPROM_OFFSET);
+ val32 = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
+ val32 |= SPROM_WRITEEN;
+ OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32);
+ }
+ bcm_mdelay(WRITE_ENABLE_DELAY);
+ /* write srom */
+ for (i = 0; i < nw; i++) {
+ if (old[i] != new[i]) {
+ if (sih->ccrev > 31) {
+ if ((sih->cccaps & CC_CAP_SROM) == 0) {
+ /* No srom support in this chip */
+ BS_ERROR(("srom_write, invalid srom, skip\n"));
+ } else
+ (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRITE,
+ i, new[i]);
+ } else {
+ W_REG(osh, &srom[i], new[i]);
+ }
+ bcm_mdelay(WRITE_WORD_DELAY);
+ }
+ }
+ /* disable writes to the SPROM */
+ if (sih->ccrev > 31) {
+ (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRDIS, 0, 0);
+ } else {
+ OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32 &
+ ~SPROM_WRITEEN);
+ }
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
+ /* Restore config after reading SROM */
+ si_chipcontrl_restore(sih, ccval);
+ }
+#ifdef BCMSDIO
+ } else if (BUSTYPE(bustype) == SDIO_BUS) {
+ /* enable writes to the SPROM */
+ if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WEN))
+ goto done;
+ bcm_mdelay(WRITE_ENABLE_DELAY);
+ /* write srom */
+ for (i = 0; i < nw; i++) {
+ if (old[i] != new[i]) {
+ sprom_write_sdio(osh, (uint16)(i), new[i]);
+ bcm_mdelay(WRITE_WORD_DELAY);
+ }
+ }
+ /* disable writes to the SPROM */
+ if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WDS))
+ goto done;
+#endif /* BCMSDIO */
+ } else if (BUSTYPE(bustype) == SI_BUS) {
+ goto done;
+ } else {
+ goto done;
+ }
+
+ bcm_mdelay(WRITE_ENABLE_DELAY);
+ rc = 0;
+
+done:
+ if (old != NULL)
+ MFREE(osh, old, SROM_MAXW * sizeof(uint16));
+ if (new != NULL)
+ MFREE(osh, new, SROM_MAXW * sizeof(uint16));
+
+ return rc;
+}
+
+/** support only 16-bit word write into srom */
+int
+srom_write_short(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh,
+ uint byteoff, uint16 value)
+{
+ volatile uint32 val32;
+ int rc = 1;
+
+ ASSERT(bustype == BUSTYPE(bustype));
+
+ if (byteoff & 1)
+ goto done;
+
+#ifdef BCMPCIEDEV
+ if ((BUSTYPE(bustype) == SI_BUS) &&
+ (BCM43602_CHIP(sih->chip) ||
+ FALSE)) {
+#else
+ if (BUSTYPE(bustype) == PCI_BUS) {
+#endif /* BCMPCIEDEV */
+ volatile uint16 *srom = NULL;
+ volatile void *ccregs = NULL;
+ uint32 ccval = 0;
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
+ /* save current control setting */
+ ccval = si_chipcontrl_read(sih);
+ }
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (CHIPREV(sih->chiprev) <= 2))) {
+ si_chipcontrl_srom4360(sih, TRUE);
+ }
+
+ if (FALSE) {
+ si_srom_clk_set(sih); /* corrects srom clock frequency */
+ }
+
+ /* enable writes to the SPROM */
+ if (sih->ccrev > 31) {
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ ccregs = (void *)(uintptr)SI_ENUM_BASE(sih);
+ else
+ ccregs = ((volatile uint8 *)curmap + PCI_16KB0_CCREGS_OFFSET);
+ srom = (volatile uint16 *)((volatile uint8 *)ccregs + CC_SROM_OTP);
+ (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WREN, 0, 0);
+ } else {
+ srom = (volatile uint16 *)
+ ((volatile uint8 *)curmap + PCI_BAR0_SPROM_OFFSET);
+ val32 = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
+ val32 |= SPROM_WRITEEN;
+ OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32);
+ }
+ bcm_mdelay(WRITE_ENABLE_DELAY);
+ /* write srom */
+ if (sih->ccrev > 31) {
+ if ((sih->cccaps & CC_CAP_SROM) == 0) {
+ /* No srom support in this chip */
+ BS_ERROR(("srom_write, invalid srom, skip\n"));
+ } else
+ (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRITE,
+ byteoff/2, value);
+ } else {
+ W_REG(osh, &srom[byteoff/2], value);
+ }
+ bcm_mdelay(WRITE_WORD_DELAY);
+
+ /* disable writes to the SPROM */
+ if (sih->ccrev > 31) {
+ (void)srom_cc_cmd(sih, osh, ccregs, SRC_OP_WRDIS, 0, 0);
+ } else {
+ OSL_PCI_WRITE_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32), val32 &
+ ~SPROM_WRITEEN);
+ }
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
+ /* Restore config after reading SROM */
+ si_chipcontrl_restore(sih, ccval);
+ }
+#ifdef BCMSDIO
+ } else if (BUSTYPE(bustype) == SDIO_BUS) {
+ /* enable writes to the SPROM */
+ if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WEN))
+ goto done;
+ bcm_mdelay(WRITE_ENABLE_DELAY);
+ /* write srom */
+ sprom_write_sdio(osh, (uint16)(byteoff/2), value);
+ bcm_mdelay(WRITE_WORD_DELAY);
+
+ /* disable writes to the SPROM */
+ if (sprom_cmd_sdio(osh, SBSDIO_SPROM_WDS))
+ goto done;
+#endif /* BCMSDIO */
+ } else if (BUSTYPE(bustype) == SI_BUS) {
+ goto done;
+ } else {
+ goto done;
+ }
+
+ bcm_mdelay(WRITE_ENABLE_DELAY);
+ rc = 0;
+
+done:
+ return rc;
+}
+#endif /* defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) */
+
+/**
+ * These 'vstr_*' definitions are used to convert from CIS format to a 'NVRAM var=val' format, the
+ * NVRAM format is used throughout the rest of the firmware.
+ */
+#if !defined(BCMDONGLEHOST)
+static const char BCMATTACHDATA(vstr_manf)[] = "manf=%s";
+static const char BCMATTACHDATA(vstr_productname)[] = "productname=%s";
+static const char BCMATTACHDATA(vstr_manfid)[] = "manfid=0x%x";
+static const char BCMATTACHDATA(vstr_prodid)[] = "prodid=0x%x";
+#ifdef BCMSDIO
+static const char BCMATTACHDATA(vstr_sdmaxspeed)[] = "sdmaxspeed=%d";
+static const char BCMATTACHDATA(vstr_sdmaxblk)[][13] =
+ { "sdmaxblk0=%d", "sdmaxblk1=%d", "sdmaxblk2=%d" };
+#endif
+static const char BCMATTACHDATA(vstr_regwindowsz)[] = "regwindowsz=%d";
+static const char BCMATTACHDATA(vstr_sromrev)[] = "sromrev=%d";
+static const char BCMATTACHDATA(vstr_chiprev)[] = "chiprev=%d";
+static const char BCMATTACHDATA(vstr_subvendid)[] = "subvendid=0x%x";
+static const char BCMATTACHDATA(vstr_subdevid)[] = "subdevid=0x%x";
+static const char BCMATTACHDATA(vstr_boardrev)[] = "boardrev=0x%x";
+static const char BCMATTACHDATA(vstr_aa2g)[] = "aa2g=0x%x";
+static const char BCMATTACHDATA(vstr_aa5g)[] = "aa5g=0x%x";
+static const char BCMATTACHDATA(vstr_ag)[] = "ag%d=0x%x";
+static const char BCMATTACHDATA(vstr_cc)[] = "cc=%d";
+static const char BCMATTACHDATA(vstr_opo)[] = "opo=%d";
+static const char BCMATTACHDATA(vstr_pa0b)[][9] = { "pa0b0=%d", "pa0b1=%d", "pa0b2=%d" };
+static const char BCMATTACHDATA(vstr_pa0b_lo)[][12] =
+ { "pa0b0_lo=%d", "pa0b1_lo=%d", "pa0b2_lo=%d" };
+static const char BCMATTACHDATA(vstr_pa0itssit)[] = "pa0itssit=%d";
+static const char BCMATTACHDATA(vstr_pa0maxpwr)[] = "pa0maxpwr=%d";
+static const char BCMATTACHDATA(vstr_pa1b)[][9] = { "pa1b0=%d", "pa1b1=%d", "pa1b2=%d" };
+static const char BCMATTACHDATA(vstr_pa1lob)[][11] =
+ { "pa1lob0=%d", "pa1lob1=%d", "pa1lob2=%d" };
+static const char BCMATTACHDATA(vstr_pa1hib)[][11] =
+ { "pa1hib0=%d", "pa1hib1=%d", "pa1hib2=%d" };
+static const char BCMATTACHDATA(vstr_pa1itssit)[] = "pa1itssit=%d";
+static const char BCMATTACHDATA(vstr_pa1maxpwr)[] = "pa1maxpwr=%d";
+static const char BCMATTACHDATA(vstr_pa1lomaxpwr)[] = "pa1lomaxpwr=%d";
+static const char BCMATTACHDATA(vstr_pa1himaxpwr)[] = "pa1himaxpwr=%d";
+static const char BCMATTACHDATA(vstr_oem)[] = "oem=%02x%02x%02x%02x%02x%02x%02x%02x";
+static const char BCMATTACHDATA(vstr_boardflags)[] = "boardflags=0x%x";
+static const char BCMATTACHDATA(vstr_boardflags2)[] = "boardflags2=0x%x";
+static const char BCMATTACHDATA(vstr_boardflags3)[] = "boardflags3=0x%x";
+static const char BCMATTACHDATA(vstr_boardflags4)[] = "boardflags4=0x%x";
+static const char BCMATTACHDATA(vstr_boardflags5)[] = "boardflags5=0x%x";
+static const char BCMATTACHDATA(vstr_noccode)[] = "ccode=0x0";
+static const char BCMATTACHDATA(vstr_ccode)[] = "ccode=%c%c";
+static const char BCMATTACHDATA(vstr_cctl)[] = "cctl=0x%x";
+static const char BCMATTACHDATA(vstr_cckpo)[] = "cckpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdmpo)[] = "ofdmpo=0x%x";
+static const char BCMATTACHDATA(vstr_rdlid)[] = "rdlid=0x%x";
+#ifdef BCM_BOOTLOADER
+static const char BCMATTACHDATA(vstr_rdlrndis)[] = "rdlrndis=%d";
+static const char BCMATTACHDATA(vstr_rdlrwu)[] = "rdlrwu=%d";
+static const char BCMATTACHDATA(vstr_rdlsn)[] = "rdlsn=%d";
+#endif /* BCM_BOOTLOADER */
+static const char BCMATTACHDATA(vstr_usbfs)[] = "usbfs=%d";
+static const char BCMATTACHDATA(vstr_wpsgpio)[] = "wpsgpio=%d";
+static const char BCMATTACHDATA(vstr_wpsled)[] = "wpsled=%d";
+static const char BCMATTACHDATA(vstr_rssismf2g)[] = "rssismf2g=%d";
+static const char BCMATTACHDATA(vstr_rssismc2g)[] = "rssismc2g=%d";
+static const char BCMATTACHDATA(vstr_rssisav2g)[] = "rssisav2g=%d";
+static const char BCMATTACHDATA(vstr_bxa2g)[] = "bxa2g=%d";
+static const char BCMATTACHDATA(vstr_rssismf5g)[] = "rssismf5g=%d";
+static const char BCMATTACHDATA(vstr_rssismc5g)[] = "rssismc5g=%d";
+static const char BCMATTACHDATA(vstr_rssisav5g)[] = "rssisav5g=%d";
+static const char BCMATTACHDATA(vstr_bxa5g)[] = "bxa5g=%d";
+static const char BCMATTACHDATA(vstr_tri2g)[] = "tri2g=%d";
+static const char BCMATTACHDATA(vstr_tri5gl)[] = "tri5gl=%d";
+static const char BCMATTACHDATA(vstr_tri5g)[] = "tri5g=%d";
+static const char BCMATTACHDATA(vstr_tri5gh)[] = "tri5gh=%d";
+static const char BCMATTACHDATA(vstr_rxpo2g)[] = "rxpo2g=%d";
+static const char BCMATTACHDATA(vstr_rxpo5g)[] = "rxpo5g=%d";
+static const char BCMATTACHDATA(vstr_boardtype)[] = "boardtype=0x%x";
+static const char BCMATTACHDATA(vstr_vendid)[] = "vendid=0x%x";
+static const char BCMATTACHDATA(vstr_devid)[] = "devid=0x%x";
+static const char BCMATTACHDATA(vstr_xtalfreq)[] = "xtalfreq=%d";
+static const char BCMATTACHDATA(vstr_txchain)[] = "txchain=0x%x";
+static const char BCMATTACHDATA(vstr_rxchain)[] = "rxchain=0x%x";
+static const char BCMATTACHDATA(vstr_elna2g)[] = "elna2g=0x%x";
+static const char BCMATTACHDATA(vstr_elna5g)[] = "elna5g=0x%x";
+static const char BCMATTACHDATA(vstr_antswitch)[] = "antswitch=0x%x";
+static const char BCMATTACHDATA(vstr_regrev)[] = "regrev=0x%x";
+static const char BCMATTACHDATA(vstr_antswctl2g)[] = "antswctl2g=0x%x";
+static const char BCMATTACHDATA(vstr_triso2g)[] = "triso2g=0x%x";
+static const char BCMATTACHDATA(vstr_pdetrange2g)[] = "pdetrange2g=0x%x";
+static const char BCMATTACHDATA(vstr_extpagain2g)[] = "extpagain2g=0x%x";
+static const char BCMATTACHDATA(vstr_tssipos2g)[] = "tssipos2g=0x%x";
+static const char BCMATTACHDATA(vstr_antswctl5g)[] = "antswctl5g=0x%x";
+static const char BCMATTACHDATA(vstr_triso5g)[] = "triso5g=0x%x";
+static const char BCMATTACHDATA(vstr_pdetrange5g)[] = "pdetrange5g=0x%x";
+static const char BCMATTACHDATA(vstr_extpagain5g)[] = "extpagain5g=0x%x";
+static const char BCMATTACHDATA(vstr_tssipos5g)[] = "tssipos5g=0x%x";
+static const char BCMATTACHDATA(vstr_maxp2ga)[] = "maxp2ga%d=0x%x";
+static const char BCMATTACHDATA(vstr_itt2ga0)[] = "itt2ga0=0x%x";
+static const char BCMATTACHDATA(vstr_pa)[] = "pa%dgw%da%d=0x%x";
+static const char BCMATTACHDATA(vstr_pahl)[] = "pa%dg%cw%da%d=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5ga0)[] = "maxp5ga0=0x%x";
+static const char BCMATTACHDATA(vstr_itt5ga0)[] = "itt5ga0=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gha0)[] = "maxp5gha0=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gla0)[] = "maxp5gla0=0x%x";
+static const char BCMATTACHDATA(vstr_itt2ga1)[] = "itt2ga1=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5ga1)[] = "maxp5ga1=0x%x";
+static const char BCMATTACHDATA(vstr_itt5ga1)[] = "itt5ga1=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gha1)[] = "maxp5gha1=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gla1)[] = "maxp5gla1=0x%x";
+static const char BCMATTACHDATA(vstr_cck2gpo)[] = "cck2gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm2gpo)[] = "ofdm2gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm5gpo)[] = "ofdm5gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm5glpo)[] = "ofdm5glpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm5ghpo)[] = "ofdm5ghpo=0x%x";
+static const char BCMATTACHDATA(vstr_cddpo)[] = "cddpo=0x%x";
+static const char BCMATTACHDATA(vstr_stbcpo)[] = "stbcpo=0x%x";
+static const char BCMATTACHDATA(vstr_bw40po)[] = "bw40po=0x%x";
+static const char BCMATTACHDATA(vstr_bwduppo)[] = "bwduppo=0x%x";
+static const char BCMATTACHDATA(vstr_mcspo)[] = "mcs%dgpo%d=0x%x";
+static const char BCMATTACHDATA(vstr_mcspohl)[] = "mcs%dg%cpo%d=0x%x";
+static const char BCMATTACHDATA(vstr_custom)[] = "customvar%d=0x%x";
+static const char BCMATTACHDATA(vstr_cckdigfilttype)[] = "cckdigfilttype=%d";
+static const char BCMATTACHDATA(vstr_usbflags)[] = "usbflags=0x%x";
+#ifdef BCM_BOOTLOADER
+static const char BCMATTACHDATA(vstr_mdio)[] = "mdio%d=0x%%x";
+static const char BCMATTACHDATA(vstr_mdioex)[] = "mdioex%d=0x%%x";
+static const char BCMATTACHDATA(vstr_brmin)[] = "brmin=0x%x";
+static const char BCMATTACHDATA(vstr_brmax)[] = "brmax=0x%x";
+static const char BCMATTACHDATA(vstr_pllreg)[] = "pll%d=0x%x";
+static const char BCMATTACHDATA(vstr_ccreg)[] = "chipc%d=0x%x";
+static const char BCMATTACHDATA(vstr_regctrl)[] = "reg%d=0x%x";
+static const char BCMATTACHDATA(vstr_time)[] = "r%dt=0x%x";
+static const char BCMATTACHDATA(vstr_depreg)[] = "r%dd=0x%x";
+static const char BCMATTACHDATA(vstr_usbpredly)[] = "usbpredly=0x%x";
+static const char BCMATTACHDATA(vstr_usbpostdly)[] = "usbpostdly=0x%x";
+static const char BCMATTACHDATA(vstr_usbrdy)[] = "usbrdy=0x%x";
+static const char BCMATTACHDATA(vstr_hsicphyctrl1)[] = "hsicphyctrl1=0x%x";
+static const char BCMATTACHDATA(vstr_hsicphyctrl2)[] = "hsicphyctrl2=0x%x";
+static const char BCMATTACHDATA(vstr_usbdevctrl)[] = "usbdevctrl=0x%x";
+static const char BCMATTACHDATA(vstr_bldr_reset_timeout)[] = "bldr_to=0x%x";
+static const char BCMATTACHDATA(vstr_muxenab)[] = "muxenab=0x%x";
+static const char BCMATTACHDATA(vstr_pubkey)[] = "pubkey=%s";
+#endif /* BCM_BOOTLOADER */
+static const char BCMATTACHDATA(vstr_boardnum)[] = "boardnum=%d";
+static const char BCMATTACHDATA(vstr_macaddr)[] = "macaddr=%s";
+static const char BCMATTACHDATA(vstr_macaddr2)[] = "macaddr2=%s";
+static const char BCMATTACHDATA(vstr_usbepnum)[] = "usbepnum=0x%x";
+#ifdef BCMUSBDEV_COMPOSITE
+static const char BCMATTACHDATA(vstr_usbdesc_composite)[] = "usbdesc_composite=0x%x";
+#endif /* BCMUSBDEV_COMPOSITE */
+static const char BCMATTACHDATA(vstr_usbutmi_ctl)[] = "usbutmi_ctl=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_utmi_ctl0)[] = "usbssphy_utmi_ctl0=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_utmi_ctl1)[] = "usbssphy_utmi_ctl1=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_utmi_ctl2)[] = "usbssphy_utmi_ctl2=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_sleep0)[] = "usbssphy_sleep0=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_sleep1)[] = "usbssphy_sleep1=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_sleep2)[] = "usbssphy_sleep2=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_sleep3)[] = "usbssphy_sleep3=0x%x";
+static const char BCMATTACHDATA(vstr_usbssphy_mdio)[] = "usbssmdio%d=0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_usb30phy_noss)[] = "usbnoss=0x%x";
+static const char BCMATTACHDATA(vstr_usb30phy_u1u2)[] = "usb30u1u2=0x%x";
+static const char BCMATTACHDATA(vstr_usb30phy_regs)[] = "usb30regs%d=0x%x,0x%x,0x%x,0x%x";
+
+/* Power per rate for SROM V9 */
+static const char BCMATTACHDATA(vstr_cckbw202gpo)[][21] =
+ { "cckbw202gpo=0x%x", "cckbw20ul2gpo=0x%x", "cckbw20in802gpo=0x%x" };
+static const char BCMATTACHDATA(vstr_legofdmbw202gpo)[][23] =
+ { "legofdmbw202gpo=0x%x", "legofdmbw20ul2gpo=0x%x" };
+static const char BCMATTACHDATA(vstr_legofdmbw205gpo)[][24] =
+ { "legofdmbw205glpo=0x%x", "legofdmbw20ul5glpo=0x%x",
+ "legofdmbw205gmpo=0x%x", "legofdmbw20ul5gmpo=0x%x",
+ "legofdmbw205ghpo=0x%x", "legofdmbw20ul5ghpo=0x%x" };
+
+static const char BCMATTACHDATA(vstr_mcs2gpo)[][19] =
+{ "mcsbw202gpo=0x%x", "mcsbw20ul2gpo=0x%x", "mcsbw402gpo=0x%x", "mcsbw802gpo=0x%x" };
+
+static const char BCMATTACHDATA(vstr_mcs5glpo)[][20] =
+ { "mcsbw205glpo=0x%x", "mcsbw20ul5glpo=0x%x", "mcsbw405glpo=0x%x" };
+
+static const char BCMATTACHDATA(vstr_mcs5gmpo)[][20] =
+ { "mcsbw205gmpo=0x%x", "mcsbw20ul5gmpo=0x%x", "mcsbw405gmpo=0x%x" };
+
+static const char BCMATTACHDATA(vstr_mcs5ghpo)[][20] =
+ { "mcsbw205ghpo=0x%x", "mcsbw20ul5ghpo=0x%x", "mcsbw405ghpo=0x%x" };
+
+static const char BCMATTACHDATA(vstr_mcs32po)[] = "mcs32po=0x%x";
+static const char BCMATTACHDATA(vstr_legofdm40duppo)[] = "legofdm40duppo=0x%x";
+
+/* SROM V11 */
+static const char BCMATTACHDATA(vstr_tempthresh)[] = "tempthresh=%d"; /* HNBU_TEMPTHRESH */
+static const char BCMATTACHDATA(vstr_temps_period)[] = "temps_period=%d";
+static const char BCMATTACHDATA(vstr_temps_hysteresis)[] = "temps_hysteresis=%d";
+static const char BCMATTACHDATA(vstr_tempoffset)[] = "tempoffset=%d";
+static const char BCMATTACHDATA(vstr_tempsense_slope)[] = "tempsense_slope=%d";
+static const char BCMATTACHDATA(vstr_temp_corrx)[] = "tempcorrx=%d";
+static const char BCMATTACHDATA(vstr_tempsense_option)[] = "tempsense_option=%d";
+static const char BCMATTACHDATA(vstr_phycal_tempdelta)[] = "phycal_tempdelta=%d";
+static const char BCMATTACHDATA(vstr_tssiposslopeg)[] = "tssiposslope%dg=%d"; /* HNBU_FEM_CFG */
+static const char BCMATTACHDATA(vstr_epagaing)[] = "epagain%dg=%d";
+static const char BCMATTACHDATA(vstr_pdgaing)[] = "pdgain%dg=%d";
+static const char BCMATTACHDATA(vstr_tworangetssi)[] = "tworangetssi%dg=%d";
+static const char BCMATTACHDATA(vstr_papdcap)[] = "papdcap%dg=%d";
+static const char BCMATTACHDATA(vstr_femctrl)[] = "femctrl=%d";
+static const char BCMATTACHDATA(vstr_gainctrlsph)[] = "gainctrlsph=%d";
+static const char BCMATTACHDATA(vstr_subband5gver)[] = "subband5gver=%d"; /* HNBU_ACPA_CX */
+static const char BCMATTACHDATA(vstr_pa2ga)[] = "pa2ga%d=0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_maxp5ga)[] = "maxp5ga%d=0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_pa5ga)[] = "pa5ga%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,"
+ "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_subband6gver)[] = "subband6gver=%d"; /* HNBU_ACPA_CX */
+static const char BCMATTACHDATA(vstr_maxp6ga)[] = "maxp6ga%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_pa6ga)[] = "pa6ga%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,"
+ "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_pa2gccka)[] = "pa2gccka%d=0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_pa5gbw40a)[] = "pa5gbw40a%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,"
+ "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_pa5gbw80a)[] = "pa5gbw80a%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,"
+ "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_pa5gbw4080a)[] = "pa5gbw4080a%d=0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,"
+ "0x%x,0x%x,0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_rxgainsgelnagaina)[] = "rxgains%dgelnagaina%d=%d";
+static const char BCMATTACHDATA(vstr_rxgainsgtrisoa)[] = "rxgains%dgtrisoa%d=%d";
+static const char BCMATTACHDATA(vstr_rxgainsgtrelnabypa)[] = "rxgains%dgtrelnabypa%d=%d";
+static const char BCMATTACHDATA(vstr_rxgainsgxelnagaina)[] = "rxgains%dg%celnagaina%d=%d";
+static const char BCMATTACHDATA(vstr_rxgainsgxtrisoa)[] = "rxgains%dg%ctrisoa%d=%d";
+static const char BCMATTACHDATA(vstr_rxgainsgxtrelnabypa)[] = "rxgains%dg%ctrelnabypa%d=%d";
+static const char BCMATTACHDATA(vstr_measpower)[] = "measpower=0x%x"; /* HNBU_MEAS_PWR */
+static const char BCMATTACHDATA(vstr_measpowerX)[] = "measpower%d=0x%x";
+static const char BCMATTACHDATA(vstr_pdoffsetma)[] = "pdoffset%dma%d=0x%x"; /* HNBU_PDOFF */
+static const char BCMATTACHDATA(vstr_pdoffset2gma)[] = "pdoffset2g%dma%d=0x%x"; /* HNBU_PDOFF_2G */
+static const char BCMATTACHDATA(vstr_pdoffset2gmvalid)[] = "pdoffset2g%dmvalid=0x%x";
+static const char BCMATTACHDATA(vstr_rawtempsense)[] = "rawtempsense=0x%x";
+/* HNBU_ACPPR_2GPO */
+static const char BCMATTACHDATA(vstr_dot11agofdmhrbw202gpo)[] = "dot11agofdmhrbw202gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdmlrbw202gpo)[] = "ofdmlrbw202gpo=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw805gpo)[] = "mcsbw805g%cpo=0x%x"; /* HNBU_ACPPR_5GPO */
+static const char BCMATTACHDATA(vstr_mcsbw1605gpo)[] = "mcsbw1605g%cpo=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw80p805gpo)[] = "mcsbw80p805g%cpo=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw80p805g1po)[] = "mcsbw80p805g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw1605g1po)[] = "mcsbw1605g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw805g1po)[] = "mcsbw805g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw405g1po)[] = "mcsbw405g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_mcsbw205g1po)[] = "mcsbw205g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_mcslr5gpo)[] = "mcslr5g%cpo=0x%x";
+static const char BCMATTACHDATA(vstr_mcslr5g1po)[] = "mcslr5g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_mcslr5g80p80po)[] = "mcslr5g80p80po=0x%x";
+/* HNBU_ACPPR_SBPO */
+static const char BCMATTACHDATA(vstr_sb20in40rpo)[] = "sb20in40%crpo=0x%x";
+/* HNBU_ACPPR_SBPO */
+static const char BCMATTACHDATA(vstr_sb20in40and80rpo)[] = "sb20in40and80%crpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in80and160r5gpo)[] = "sb20in80and160%cr5g%cpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in80and160r5g1po)[] = "sb20in80and160%cr5g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_sb2040and80in80p80r5gpo)[] =
+ "sb2040and80in80p80%cr5g%cpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb2040and80in80p80r5g1po)[] =
+ "sb2040and80in80p80%cr5g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in40dot11agofdm2gpo)[] = "sb20in40dot11agofdm2gpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in80dot11agofdm2gpo)[] = "sb20in80dot11agofdm2gpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in40ofdmlrbw202gpo)[] = "sb20in40ofdmlrbw202gpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in80ofdmlrbw202gpo)[] = "sb20in80ofdmlrbw202gpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb20in80p80r5gpo)[] = "sb20in80p80%cr5gpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb40and80r5gpo)[] = "sb40and80%cr5g%cpo=0x%x";
+static const char BCMATTACHDATA(vstr_sb40and80r5g1po)[] = "sb40and80%cr5g%c1po=0x%x";
+static const char BCMATTACHDATA(vstr_dot11agduprpo)[] = "dot11agdup%crpo=0x%x";
+static const char BCMATTACHDATA(vstr_dot11agduppo)[] = "dot11agduppo=0x%x";
+static const char BCMATTACHDATA(vstr_noiselvl2ga)[] = "noiselvl2ga%d=%d"; /* HNBU_NOISELVL */
+static const char BCMATTACHDATA(vstr_noiselvl5ga)[] = "noiselvl5ga%d=%d,%d,%d,%d";
+/* HNBU_RXGAIN_ERR */
+static const char BCMATTACHDATA(vstr_rxgainerr2ga)[] = "rxgainerr2ga%d=0x%x";
+static const char BCMATTACHDATA(vstr_rxgainerr5ga)[] = "rxgainerr5ga%d=0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_agbg)[] = "agbg%d=0x%x"; /* HNBU_AGBGA */
+static const char BCMATTACHDATA(vstr_aga)[] = "aga%d=0x%x";
+static const char BCMATTACHDATA(vstr_txduty_ofdm)[] = "tx_duty_cycle_ofdm_%d_5g=%d";
+static const char BCMATTACHDATA(vstr_txduty_thresh)[] = "tx_duty_cycle_thresh_%d_5g=%d";
+static const char BCMATTACHDATA(vstr_paparambwver)[] = "paparambwver=%d";
+
+static const char BCMATTACHDATA(vstr_uuid)[] = "uuid=%s";
+
+static const char BCMATTACHDATA(vstr_wowlgpio)[] = "wowl_gpio=%d";
+static const char BCMATTACHDATA(vstr_wowlgpiopol)[] = "wowl_gpiopol=%d";
+
+static const char BCMATTACHDATA(rstr_ag0)[] = "ag0";
+static const char BCMATTACHDATA(rstr_sromrev)[] = "sromrev";
+
+static const char BCMATTACHDATA(vstr_paparamrpcalvars)[][20] =
+ {"rpcal2g=0x%x", "rpcal5gb0=0x%x", "rpcal5gb1=0x%x",
+ "rpcal5gb2=0x%x", "rpcal5gb3=0x%x"};
+
+static const char BCMATTACHDATA(vstr_gpdn)[] = "gpdn=0x%x";
+
+/* SROM V13 PA */
+static const char BCMATTACHDATA(vstr_sr13pa2ga)[] = "pa2ga%d=0x%x,0x%x,0x%x,0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gba)[] = "maxp5gb%da%d=0x%x";
+static const char BCMATTACHDATA(vstr_sr13pa5ga)[] = "pa5ga%d=%s";
+static const char BCMATTACHDATA(vstr_sr13pa5gbwa)[] = "pa5g%da%d=%s";
+static const char BCMATTACHDATA(vstr_pa2g40a)[] = "pa2g40a%d=0x%x,0x%x,0x%x,0x%x";
+
+/* RSSI Cal parameters */
+static const char BCMATTACHDATA(vstr_rssicalfrqg)[] =
+ "rssi_cal_freq_grp_2g=0x%x0x%x0x%x0x%x0x%x0x%x0x%x";
+static const char BCMATTACHDATA(vstr_rssidelta2g)[] =
+ "rssi_delta_2gb%d=%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d";
+static const char BCMATTACHDATA(vstr_rssidelta5g)[] =
+ "rssi_delta_5g%s=%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d";
+
+uint8 patch_pair = 0;
+
+/* For dongle HW, accept partial calibration parameters */
+#if defined(BCMSDIODEV) || defined(BCMUSBDEV) || defined(BCMDONGLEHOST)
+#define BCMDONGLECASE(n) case n:
+#else
+#define BCMDONGLECASE(n)
+#endif
+
+#ifdef BCM_BOOTLOADER
+/* The format of the PMUREGS OTP Tuple ->
+ * 1 byte -> Lower 5 bits has the address of the register
+ * Higher 3 bits has the mode of the register like
+ * PLL, ChipCtrl, RegCtrl, UpDwn or Dependency mask
+ * 4 bytes -> Value of the register to be updated.
+ */
+#define PMUREGS_MODE_MASK 0xE0
+#define PMUREGS_MODE_SHIFT 5
+#define PMUREGS_ADDR_MASK 0x1F
+#define PMUREGS_TPL_SIZE 5
+
+enum {
+ PMU_PLLREG_MODE,
+ PMU_CCREG_MODE,
+ PMU_VOLTREG_MODE,
+ PMU_RES_TIME_MODE,
+ PMU_RESDEPEND_MODE
+};
+
+#define USBREGS_TPL_SIZE 5
+enum {
+ USB_DEV_CTRL_REG,
+ HSIC_PHY_CTRL1_REG,
+ HSIC_PHY_CTRL2_REG
+};
+
+#define USBRDY_DLY_TYPE 0x8000 /* Bit indicating if the byte is pre or post delay value */
+#define USBRDY_DLY_MASK 0x7FFF /* Bits indicating the amount of delay */
+#define USBRDY_MAXOTP_SIZE 5 /* Max size of the OTP parameter */
+
+#endif /* BCM_BOOTLOADER */
+
+static uint
+BCMATTACHFN(get_max_cis_size)(si_t *sih)
+{
+ uint max_cis_size;
+ void *oh;
+
+ max_cis_size = (sih && sih->ccrev >= 49) ? CIS_SIZE_12K : CIS_SIZE;
+ if (sih && (oh = otp_init(sih)) != NULL) {
+ max_cis_size -= otp_avsbitslen(oh);
+ }
+ return max_cis_size;
+}
+
+#ifndef BCM_BOOTLOADER
+static uint32
+BCMATTACHFN(srom_data2value)(uint8 *p, uint8 len)
+{
+ uint8 pos = 0;
+ uint32 value = 0;
+
+ ASSERT(len <= 4);
+
+ while (pos < len) {
+ value += (p[pos] << (pos * 8));
+ pos++;
+ }
+
+ return value;
+}
+#endif /* BCM_BOOTLOADER */
+
+/**
+ * Both SROM and OTP contain variables in 'CIS' format, whereas the rest of the firmware works with
+ * 'variable/value' string pairs.
+ */
+int
+BCMATTACHFN(srom_parsecis)(si_t *sih, osl_t *osh, uint8 *pcis[], uint ciscnt, char **vars,
+ uint *count)
+{
+ char eabuf[32];
+ char eabuf2[32];
+ char *base;
+ varbuf_t b;
+ uint8 *cis, tup, tlen, sromrev = 1;
+ uint i;
+ uint16 j;
+#ifndef BCM_BOOTLOADER
+ bool ag_init = FALSE;
+#endif
+ uint32 w32;
+ uint funcid;
+ uint cisnum;
+ int32 boardnum;
+ int err;
+ bool standard_cis;
+ uint max_cis_size;
+ uint var_cis_size = 0;
+
+ ASSERT(count != NULL);
+
+ if (vars == NULL) {
+ ASSERT(0); /* crash debug images for investigation */
+ return BCME_BADARG;
+ }
+
+ boardnum = -1;
+
+ /* freed in same function */
+ max_cis_size = get_max_cis_size(sih);
+ var_cis_size = *count + ((max_cis_size + 2u) * ciscnt);
+
+ ASSERT(var_cis_size <= MAXSZ_NVRAM_VARS);
+
+ base = MALLOC_NOPERSIST(osh, var_cis_size);
+ ASSERT(base != NULL);
+ if (!base)
+ return -2;
+
+ varbuf_init(&b, base, var_cis_size);
+ bzero(base, var_cis_size);
+ /* Append from vars if there's already something inside */
+ if (*vars && **vars && (*count >= 3)) {
+ /* back off \0 at the end, leaving only one \0 for the last param */
+ while (((*vars)[(*count)-1] == '\0') && ((*vars)[(*count)-2] == '\0'))
+ (*count)--;
+
+ bcopy(*vars, base, *count);
+ b.buf += *count;
+ }
+ eabuf[0] = '\0';
+ eabuf2[0] = '\0';
+ for (cisnum = 0; cisnum < ciscnt; cisnum++) {
+ cis = *pcis++;
+ i = 0;
+ funcid = 0;
+ standard_cis = TRUE;
+ do {
+ if (standard_cis) {
+ tup = cis[i++];
+ if (tup == CISTPL_NULL || tup == CISTPL_END)
+ tlen = 0;
+ else
+ tlen = cis[i++];
+ } else {
+ if (cis[i] == CISTPL_NULL || cis[i] == CISTPL_END) {
+ tlen = 0;
+ tup = cis[i];
+ } else {
+ tlen = cis[i];
+ tup = CISTPL_BRCM_HNBU;
+ }
+ ++i;
+ }
+ if ((i + tlen) >= max_cis_size)
+ break;
+
+ switch (tup) {
+ case CISTPL_VERS_1:
+ /* assume the strings are good if the version field checks out */
+ if (((cis[i + 1] << 8) + cis[i]) >= 0x0008) {
+ varbuf_append(&b, vstr_manf, &cis[i + 2]);
+ varbuf_append(&b, vstr_productname,
+ &cis[i + 3 + strlen((char *)&cis[i + 2])]);
+ break;
+ }
+
+ case CISTPL_MANFID:
+ varbuf_append(&b, vstr_manfid, (cis[i + 1] << 8) + cis[i]);
+ varbuf_append(&b, vstr_prodid, (cis[i + 3] << 8) + cis[i + 2]);
+ break;
+
+ case CISTPL_FUNCID:
+ funcid = cis[i];
+ break;
+
+ case CISTPL_FUNCE:
+ switch (funcid) {
+ case CISTPL_FID_SDIO:
+#ifdef BCMSDIO
+ if (cis[i] == 0) {
+ uint8 spd = cis[i + 3];
+ static int lbase[] = {
+ -1, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80
+ };
+ static int mult[] = {
+ 10, 100, 1000, 10000,
+ -1, -1, -1, -1
+ };
+ ASSERT((mult[spd & 0x7] != -1) &&
+ (lbase[(spd >> 3) & 0x0f]));
+ varbuf_append(&b, vstr_sdmaxblk[0],
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_sdmaxspeed,
+ (mult[spd & 0x7] *
+ lbase[(spd >> 3) & 0x0f]));
+ } else if (cis[i] == 1) {
+ varbuf_append(&b, vstr_sdmaxblk[cisnum],
+ (cis[i + 13] << 8) | cis[i + 12]);
+ }
+#endif /* BCMSDIO */
+ funcid = 0;
+ break;
+ default:
+ /* set macaddr if HNBU_MACADDR not seen yet */
+ if (eabuf[0] == '\0' && cis[i] == LAN_NID &&
+ !(ETHER_ISNULLADDR(&cis[i + 2])) &&
+ !(ETHER_ISMULTI(&cis[i + 2]))) {
+ ASSERT(cis[i + 1] == ETHER_ADDR_LEN);
+ bcm_ether_ntoa((struct ether_addr *)&cis[i + 2],
+ eabuf);
+
+ /* set boardnum if HNBU_BOARDNUM not seen yet */
+ if (boardnum == -1)
+ boardnum = (cis[i + 6] << 8) + cis[i + 7];
+ }
+ break;
+ }
+ break;
+
+ case CISTPL_CFTABLE:
+ varbuf_append(&b, vstr_regwindowsz, (cis[i + 7] << 8) | cis[i + 6]);
+ break;
+
+ case CISTPL_BRCM_HNBU:
+ switch (cis[i]) {
+ case HNBU_SROMREV:
+ sromrev = cis[i + 1];
+ varbuf_append(&b, vstr_sromrev, sromrev);
+ break;
+
+ case HNBU_XTALFREQ:
+ varbuf_append(&b, vstr_xtalfreq,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_CHIPID:
+ varbuf_append(&b, vstr_vendid, (cis[i + 2] << 8) +
+ cis[i + 1]);
+ varbuf_append(&b, vstr_devid, (cis[i + 4] << 8) +
+ cis[i + 3]);
+ if (tlen >= 7) {
+ varbuf_append(&b, vstr_chiprev,
+ (cis[i + 6] << 8) + cis[i + 5]);
+ }
+ if (tlen >= 9) {
+ varbuf_append(&b, vstr_subvendid,
+ (cis[i + 8] << 8) + cis[i + 7]);
+ }
+ if (tlen >= 11) {
+ varbuf_append(&b, vstr_subdevid,
+ (cis[i + 10] << 8) + cis[i + 9]);
+ /* subdevid doubles for boardtype */
+ varbuf_append(&b, vstr_boardtype,
+ (cis[i + 10] << 8) + cis[i + 9]);
+ }
+ break;
+
+ case HNBU_BOARDNUM:
+ boardnum = (cis[i + 2] << 8) + cis[i + 1];
+ break;
+
+ case HNBU_PATCH: {
+ char vstr_paddr[16];
+ char vstr_pdata[16];
+
+ /* retrieve the patch pairs
+ * from tlen/6; where 6 is
+ * sizeof(patch addr(2)) +
+ * sizeof(patch data(4)).
+ */
+ patch_pair = tlen/6;
+
+ for (j = 0; j < patch_pair; j++) {
+ snprintf(vstr_paddr, sizeof(vstr_paddr),
+ rstr_paddr, j);
+ snprintf(vstr_pdata, sizeof(vstr_pdata),
+ rstr_pdata, j);
+
+ varbuf_append(&b, vstr_paddr,
+ (cis[i + (j*6) + 2] << 8) |
+ cis[i + (j*6) + 1]);
+
+ varbuf_append(&b, vstr_pdata,
+ (cis[i + (j*6) + 6] << 24) |
+ (cis[i + (j*6) + 5] << 16) |
+ (cis[i + (j*6) + 4] << 8) |
+ cis[i + (j*6) + 3]);
+ }
+ break;
+ }
+
+ case HNBU_BOARDREV:
+ if (tlen == 2)
+ varbuf_append(&b, vstr_boardrev, cis[i + 1]);
+ else
+ varbuf_append(&b, vstr_boardrev,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_BOARDFLAGS:
+ w32 = (cis[i + 2] << 8) + cis[i + 1];
+ if (tlen >= 5)
+ w32 |= ((cis[i + 4] << 24) + (cis[i + 3] << 16));
+ varbuf_append(&b, vstr_boardflags, w32);
+
+ if (tlen >= 7) {
+ w32 = (cis[i + 6] << 8) + cis[i + 5];
+ if (tlen >= 9)
+ w32 |= ((cis[i + 8] << 24) +
+ (cis[i + 7] << 16));
+ varbuf_append(&b, vstr_boardflags2, w32);
+ }
+ if (tlen >= 11) {
+ w32 = (cis[i + 10] << 8) + cis[i + 9];
+ if (tlen >= 13)
+ w32 |= ((cis[i + 12] << 24) +
+ (cis[i + 11] << 16));
+ varbuf_append(&b, vstr_boardflags3, w32);
+ }
+ if (tlen >= 15) {
+ w32 = (cis[i + 14] << 8) + cis[i + 13];
+ if (tlen >= 17)
+ w32 |= ((cis[i + 16] << 24) +
+ (cis[i + 15] << 16));
+ varbuf_append(&b, vstr_boardflags4, w32);
+ }
+ if (tlen >= 19) {
+ w32 = (cis[i + 18] << 8) + cis[i + 17];
+ if (tlen >= 21)
+ w32 |= ((cis[i + 20] << 24) +
+ (cis[i + 19] << 16));
+ varbuf_append(&b, vstr_boardflags5, w32);
+ }
+ break;
+
+ case HNBU_USBFS:
+ varbuf_append(&b, vstr_usbfs, cis[i + 1]);
+ break;
+
+ case HNBU_BOARDTYPE:
+ varbuf_append(&b, vstr_boardtype,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_HNBUCIS:
+ /*
+ * what follows is a nonstandard HNBU CIS
+ * that lacks CISTPL_BRCM_HNBU tags
+ *
+ * skip 0xff (end of standard CIS)
+ * after this tuple
+ */
+ tlen++;
+ standard_cis = FALSE;
+ break;
+
+ case HNBU_USBEPNUM:
+ varbuf_append(&b, vstr_usbepnum,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_PATCH_AUTOINC: {
+ char vstr_paddr[16];
+ char vstr_pdata[16];
+ uint32 addr_inc;
+ uint8 pcnt;
+
+ addr_inc = (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ (cis[i + 1]);
+
+ pcnt = (tlen - 5)/4;
+ for (j = 0; j < pcnt; j++) {
+ snprintf(vstr_paddr, sizeof(vstr_paddr),
+ rstr_paddr, j + patch_pair);
+ snprintf(vstr_pdata, sizeof(vstr_pdata),
+ rstr_pdata, j + patch_pair);
+
+ varbuf_append(&b, vstr_paddr, addr_inc);
+ varbuf_append(&b, vstr_pdata,
+ (cis[i + (j*4) + 8] << 24) |
+ (cis[i + (j*4) + 7] << 16) |
+ (cis[i + (j*4) + 6] << 8) |
+ cis[i + (j*4) + 5]);
+ addr_inc += 4;
+ }
+ patch_pair += pcnt;
+ break;
+ }
+ case HNBU_PATCH2: {
+ char vstr_paddr[16];
+ char vstr_pdata[16];
+
+ /* retrieve the patch pairs
+ * from tlen/8; where 8 is
+ * sizeof(patch addr(4)) +
+ * sizeof(patch data(4)).
+ */
+ patch_pair = tlen/8;
+
+ for (j = 0; j < patch_pair; j++) {
+ snprintf(vstr_paddr, sizeof(vstr_paddr),
+ rstr_paddr, j);
+ snprintf(vstr_pdata, sizeof(vstr_pdata),
+ rstr_pdata, j);
+
+ varbuf_append(&b, vstr_paddr,
+ (cis[i + (j*8) + 4] << 24) |
+ (cis[i + (j*8) + 3] << 16) |
+ (cis[i + (j*8) + 2] << 8) |
+ cis[i + (j*8) + 1]);
+
+ varbuf_append(&b, vstr_pdata,
+ (cis[i + (j*8) + 8] << 24) |
+ (cis[i + (j*8) + 7] << 16) |
+ (cis[i + (j*8) + 6] << 8) |
+ cis[i + (j*8) + 5]);
+ }
+ break;
+ }
+ case HNBU_PATCH_AUTOINC8: {
+ char vstr_paddr[16];
+ char vstr_pdatah[16];
+ char vstr_pdatal[16];
+ uint32 addr_inc;
+ uint8 pcnt;
+
+ addr_inc = (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ (cis[i + 1]);
+
+ pcnt = (tlen - 5)/8;
+ for (j = 0; j < pcnt; j++) {
+ snprintf(vstr_paddr, sizeof(vstr_paddr),
+ rstr_paddr, j + patch_pair);
+ snprintf(vstr_pdatah, sizeof(vstr_pdatah),
+ rstr_pdatah, j + patch_pair);
+ snprintf(vstr_pdatal, sizeof(vstr_pdatal),
+ rstr_pdatal, j + patch_pair);
+
+ varbuf_append(&b, vstr_paddr, addr_inc);
+ varbuf_append(&b, vstr_pdatal,
+ (cis[i + (j*8) + 8] << 24) |
+ (cis[i + (j*8) + 7] << 16) |
+ (cis[i + (j*8) + 6] << 8) |
+ cis[i + (j*8) + 5]);
+ varbuf_append(&b, vstr_pdatah,
+ (cis[i + (j*8) + 12] << 24) |
+ (cis[i + (j*8) + 11] << 16) |
+ (cis[i + (j*8) + 10] << 8) |
+ cis[i + (j*8) + 9]);
+ addr_inc += 8;
+ }
+ patch_pair += pcnt;
+ break;
+ }
+ case HNBU_PATCH8: {
+ char vstr_paddr[16];
+ char vstr_pdatah[16];
+ char vstr_pdatal[16];
+
+ /* retrieve the patch pairs
+ * from tlen/8; where 8 is
+ * sizeof(patch addr(4)) +
+ * sizeof(patch data(4)).
+ */
+ patch_pair = tlen/12;
+
+ for (j = 0; j < patch_pair; j++) {
+ snprintf(vstr_paddr, sizeof(vstr_paddr),
+ rstr_paddr, j);
+ snprintf(vstr_pdatah, sizeof(vstr_pdatah),
+ rstr_pdatah, j);
+ snprintf(vstr_pdatal, sizeof(vstr_pdatal),
+ rstr_pdatal, j);
+
+ varbuf_append(&b, vstr_paddr,
+ (cis[i + (j*12) + 4] << 24) |
+ (cis[i + (j*12) + 3] << 16) |
+ (cis[i + (j*12) + 2] << 8) |
+ cis[i + (j*12) + 1]);
+
+ varbuf_append(&b, vstr_pdatal,
+ (cis[i + (j*12) + 8] << 24) |
+ (cis[i + (j*12) + 7] << 16) |
+ (cis[i + (j*12) + 6] << 8) |
+ cis[i + (j*12) + 5]);
+
+ varbuf_append(&b, vstr_pdatah,
+ (cis[i + (j*12) + 12] << 24) |
+ (cis[i + (j*12) + 11] << 16) |
+ (cis[i + (j*12) + 10] << 8) |
+ cis[i + (j*12) + 9]);
+ }
+ break;
+ }
+ case HNBU_USBFLAGS:
+ varbuf_append(&b, vstr_usbflags,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+#ifdef BCM_BOOTLOADER
+ case HNBU_MDIOEX_REGLIST:
+ case HNBU_MDIO_REGLIST: {
+ /* Format: addr (8 bits) | val (16 bits) */
+ const uint8 msize = 3;
+ char mdiostr[24];
+ const char *mdiodesc;
+ uint8 *st;
+
+ mdiodesc = (cis[i] == HNBU_MDIO_REGLIST) ?
+ vstr_mdio : vstr_mdioex;
+
+ ASSERT(((tlen - 1) % msize) == 0);
+
+ st = &cis[i + 1]; /* start of reg list */
+ for (j = 0; j < (tlen - 1); j += msize, st += msize) {
+ snprintf(mdiostr, sizeof(mdiostr),
+ mdiodesc, st[0]);
+ varbuf_append(&b, mdiostr, (st[2] << 8) | st[1]);
+ }
+ break;
+ }
+ case HNBU_BRMIN:
+ varbuf_append(&b, vstr_brmin,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_BRMAX:
+ varbuf_append(&b, vstr_brmax,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+#endif /* BCM_BOOTLOADER */
+
+ case HNBU_RDLID:
+ varbuf_append(&b, vstr_rdlid,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_GCI_CCR: {
+ /* format:
+ * |0x80| <== brcm
+ * |len| <== variable, multiple of 5
+ * |tup| <== tupletype
+ * |ccreg_ix0| <== ix of ccreg [1byte]
+ * |ccreg_val0| <== corr value [4bytes]
+ * ---
+ * Multiple registers are possible. for eg: we
+ * can specify reg_ix3val3 and reg_ix5val5, etc
+ */
+ char vstr_gci_ccreg_entry[16];
+ uint8 num_entries = 0;
+
+ /* retrieve the index-value pairs
+ * from tlen/5; where 5 is
+ * sizeof(ccreg_ix(1)) +
+ * sizeof(ccreg_val(4)).
+ */
+ num_entries = tlen/5;
+
+ for (j = 0; j < num_entries; j++) {
+ snprintf(vstr_gci_ccreg_entry,
+ sizeof(vstr_gci_ccreg_entry),
+ rstr_gci_ccreg_entry,
+ cis[i + (j*5) + 1]);
+
+ varbuf_append(&b, vstr_gci_ccreg_entry,
+ (cis[i + (j*5) + 5] << 24) |
+ (cis[i + (j*5) + 4] << 16) |
+ (cis[i + (j*5) + 3] << 8) |
+ cis[i + (j*5) + 2]);
+ }
+ break;
+ }
+
+#ifdef BCM_BOOTLOADER
+ case HNBU_RDLRNDIS:
+ varbuf_append(&b, vstr_rdlrndis, cis[i + 1]);
+ break;
+
+ case HNBU_RDLRWU:
+ varbuf_append(&b, vstr_rdlrwu, cis[i + 1]);
+ break;
+
+ case HNBU_RDLSN:
+ if (tlen >= 5)
+ varbuf_append(&b, vstr_rdlsn,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ else
+ varbuf_append(&b, vstr_rdlsn,
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_PMUREGS: {
+ uint8 offset = 1, mode_addr, mode, addr;
+ const char *fmt;
+
+ do {
+ mode_addr = cis[i+offset];
+
+ mode = (mode_addr & PMUREGS_MODE_MASK)
+ >> PMUREGS_MODE_SHIFT;
+ addr = mode_addr & PMUREGS_ADDR_MASK;
+
+ switch (mode) {
+ case PMU_PLLREG_MODE:
+ fmt = vstr_pllreg;
+ break;
+ case PMU_CCREG_MODE:
+ fmt = vstr_ccreg;
+ break;
+ case PMU_VOLTREG_MODE:
+ fmt = vstr_regctrl;
+ break;
+ case PMU_RES_TIME_MODE:
+ fmt = vstr_time;
+ break;
+ case PMU_RESDEPEND_MODE:
+ fmt = vstr_depreg;
+ break;
+ default:
+ fmt = NULL;
+ break;
+ }
+
+ if (fmt != NULL) {
+ varbuf_append(&b, fmt, addr,
+ (cis[i + offset + 4] << 24) |
+ (cis[i + offset + 3] << 16) |
+ (cis[i + offset + 2] << 8) |
+ cis[i + offset + 1]);
+ }
+
+ offset += PMUREGS_TPL_SIZE;
+ } while (offset < tlen);
+ break;
+ }
+
+ case HNBU_USBREGS: {
+ uint8 offset = 1, usb_reg;
+ const char *fmt;
+
+ do {
+ usb_reg = cis[i+offset];
+
+ switch (usb_reg) {
+ case USB_DEV_CTRL_REG:
+ fmt = vstr_usbdevctrl;
+ break;
+ case HSIC_PHY_CTRL1_REG:
+ fmt = vstr_hsicphyctrl1;
+ break;
+ case HSIC_PHY_CTRL2_REG:
+ fmt = vstr_hsicphyctrl2;
+ break;
+ default:
+ fmt = NULL;
+ break;
+ }
+
+ if (fmt != NULL) {
+ varbuf_append(&b, fmt,
+ (cis[i + offset + 4] << 24) |
+ (cis[i + offset + 3] << 16) |
+ (cis[i + offset + 2] << 8) |
+ cis[i + offset + 1]);
+ }
+
+ offset += USBREGS_TPL_SIZE;
+ } while (offset < tlen);
+ break;
+ }
+
+ case HNBU_USBRDY:
+ /* The first byte of this tuple indicate if the host
+ * needs to be informed about the readiness of
+ * the HSIC/USB for enumeration on which GPIO should
+ * the device assert this event.
+ */
+ varbuf_append(&b, vstr_usbrdy, cis[i + 1]);
+
+ /* The following fields in this OTP are optional.
+ * The remaining bytes will indicate the delay required
+ * before and/or after the ch_init(). The delay is defined
+ * using 16-bits of this the MSB(bit15 of 15:0) will be
+ * used indicate if the parameter is for Pre or Post delay.
+ */
+ for (j = 2; j < USBRDY_MAXOTP_SIZE && j < tlen;
+ j += 2) {
+ uint16 usb_delay;
+
+ usb_delay = cis[i + j] | (cis[i + j + 1] << 8);
+
+ /* The bit-15 of the delay field will indicate the
+ * type of delay (pre or post).
+ */
+ if (usb_delay & USBRDY_DLY_TYPE) {
+ varbuf_append(&b, vstr_usbpostdly,
+ (usb_delay & USBRDY_DLY_MASK));
+ } else {
+ varbuf_append(&b, vstr_usbpredly,
+ (usb_delay & USBRDY_DLY_MASK));
+ }
+ }
+ break;
+
+ case HNBU_BLDR_TIMEOUT:
+ /* The Delay after USBConnect for timeout till dongle
+ * receives get_descriptor request.
+ */
+ varbuf_append(&b, vstr_bldr_reset_timeout,
+ (cis[i + 1] | (cis[i + 2] << 8)));
+ break;
+
+ case HNBU_MUXENAB:
+ varbuf_append(&b, vstr_muxenab, cis[i + 1]);
+ break;
+ case HNBU_PUBKEY: {
+ /* The public key is in binary format in OTP,
+ * convert to string format before appending
+ * buffer string.
+ * public key(12 bytes) + crc (1byte) = 129
+ */
+ unsigned char a[300];
+ int k;
+
+ for (k = 1, j = 0; k < 129; k++)
+ j += snprintf((char *)(a + j),
+ sizeof(a) - j,
+ "%02x", cis[i + k]);
+
+ a[256] = 0;
+
+ varbuf_append(&b, vstr_pubkey, a);
+ break;
+ }
+#else
+ case HNBU_AA:
+ varbuf_append(&b, vstr_aa2g, cis[i + 1]);
+ if (tlen >= 3)
+ varbuf_append(&b, vstr_aa5g, cis[i + 2]);
+ break;
+
+ case HNBU_AG:
+ varbuf_append(&b, vstr_ag, 0, cis[i + 1]);
+ if (tlen >= 3)
+ varbuf_append(&b, vstr_ag, 1, cis[i + 2]);
+ if (tlen >= 4)
+ varbuf_append(&b, vstr_ag, 2, cis[i + 3]);
+ if (tlen >= 5)
+ varbuf_append(&b, vstr_ag, 3, cis[i + 4]);
+ ag_init = TRUE;
+ break;
+
+ case HNBU_ANT5G:
+ varbuf_append(&b, vstr_aa5g, cis[i + 1]);
+ varbuf_append(&b, vstr_ag, 1, cis[i + 2]);
+ break;
+
+ case HNBU_CC:
+ ASSERT(sromrev == 1);
+ varbuf_append(&b, vstr_cc, cis[i + 1]);
+ break;
+
+ case HNBU_PAPARMS: {
+ uint8 pa0_lo_offset = 0;
+ switch (tlen) {
+ case 2:
+ ASSERT(sromrev == 1);
+ varbuf_append(&b, vstr_pa0maxpwr, cis[i + 1]);
+ break;
+ /* case 16:
+ ASSERT(sromrev >= 11);
+ for (j = 0; j < 3; j++) {
+ varbuf_append(&b, vstr_pa0b_lo[j],
+ (cis[i + (j * 2) + 11] << 8) +
+ cis[i + (j * 2) + 10]);
+ }
+ FALLTHROUGH
+ */
+ case 10:
+ case 16:
+ ASSERT(sromrev >= 2);
+ varbuf_append(&b, vstr_opo, cis[i + 9]);
+ if (tlen >= 13 && pa0_lo_offset == 0)
+ pa0_lo_offset = 9;
+ /* FALLTHROUGH */
+ case 9:
+ case 15:
+ varbuf_append(&b, vstr_pa0maxpwr, cis[i + 8]);
+ if (tlen >= 13 && pa0_lo_offset == 0)
+ pa0_lo_offset = 8;
+ /* FALLTHROUGH */
+ BCMDONGLECASE(8)
+ BCMDONGLECASE(14)
+ varbuf_append(&b, vstr_pa0itssit, cis[i + 7]);
+ varbuf_append(&b, vstr_maxp2ga, 0, cis[i + 7]);
+ if (tlen >= 13 && pa0_lo_offset == 0)
+ pa0_lo_offset = 7;
+ /* FALLTHROUGH */
+ BCMDONGLECASE(7)
+ BCMDONGLECASE(13)
+ for (j = 0; j < 3; j++) {
+ varbuf_append(&b, vstr_pa0b[j],
+ (cis[i + (j * 2) + 2] << 8) +
+ cis[i + (j * 2) + 1]);
+ }
+ if (tlen >= 13 && pa0_lo_offset == 0)
+ pa0_lo_offset = 6;
+
+ if (tlen >= 13 && pa0_lo_offset != 0) {
+ for (j = 0; j < 3; j++) {
+ varbuf_append(&b, vstr_pa0b_lo[j],
+ (cis[pa0_lo_offset+i+(j*2)+2]<<8)+
+ cis[pa0_lo_offset+i+(j*2)+1]);
+ }
+ }
+ break;
+ default:
+ ASSERT((tlen == 2) || (tlen == 9) || (tlen == 10) ||
+ (tlen == 15) || (tlen == 16));
+ break;
+ }
+ break;
+ }
+ case HNBU_PAPARMS5G:
+ ASSERT((sromrev == 2) || (sromrev == 3));
+ switch (tlen) {
+ case 23:
+ varbuf_append(&b, vstr_pa1himaxpwr, cis[i + 22]);
+ varbuf_append(&b, vstr_pa1lomaxpwr, cis[i + 21]);
+ varbuf_append(&b, vstr_pa1maxpwr, cis[i + 20]);
+ /* FALLTHROUGH */
+ case 20:
+ varbuf_append(&b, vstr_pa1itssit, cis[i + 19]);
+ /* FALLTHROUGH */
+ case 19:
+ for (j = 0; j < 3; j++) {
+ varbuf_append(&b, vstr_pa1b[j],
+ (cis[i + (j * 2) + 2] << 8) +
+ cis[i + (j * 2) + 1]);
+ }
+ for (j = 3; j < 6; j++) {
+ varbuf_append(&b, vstr_pa1lob[j - 3],
+ (cis[i + (j * 2) + 2] << 8) +
+ cis[i + (j * 2) + 1]);
+ }
+ for (j = 6; j < 9; j++) {
+ varbuf_append(&b, vstr_pa1hib[j - 6],
+ (cis[i + (j * 2) + 2] << 8) +
+ cis[i + (j * 2) + 1]);
+ }
+ break;
+ default:
+ ASSERT((tlen == 19) ||
+ (tlen == 20) || (tlen == 23));
+ break;
+ }
+ break;
+
+ case HNBU_OEM:
+ ASSERT(sromrev == 1);
+ varbuf_append(&b, vstr_oem,
+ cis[i + 1], cis[i + 2],
+ cis[i + 3], cis[i + 4],
+ cis[i + 5], cis[i + 6],
+ cis[i + 7], cis[i + 8]);
+ break;
+
+ case HNBU_CCODE:
+ ASSERT(sromrev > 1);
+ if ((cis[i + 1] == 0) || (cis[i + 2] == 0))
+ varbuf_append(&b, vstr_noccode);
+ else
+ varbuf_append(&b, vstr_ccode,
+ cis[i + 1], cis[i + 2]);
+ varbuf_append(&b, vstr_cctl, cis[i + 3]);
+ break;
+
+ case HNBU_CCKPO:
+ ASSERT(sromrev > 2);
+ varbuf_append(&b, vstr_cckpo,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_OFDMPO:
+ ASSERT(sromrev > 2);
+ varbuf_append(&b, vstr_ofdmpo,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_WPS:
+ varbuf_append(&b, vstr_wpsgpio, cis[i + 1]);
+ if (tlen >= 3)
+ varbuf_append(&b, vstr_wpsled, cis[i + 2]);
+ break;
+
+ case HNBU_RSSISMBXA2G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rssismf2g, cis[i + 1] & 0xf);
+ varbuf_append(&b, vstr_rssismc2g, (cis[i + 1] >> 4) & 0xf);
+ varbuf_append(&b, vstr_rssisav2g, cis[i + 2] & 0x7);
+ varbuf_append(&b, vstr_bxa2g, (cis[i + 2] >> 3) & 0x3);
+ break;
+
+ case HNBU_RSSISMBXA5G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rssismf5g, cis[i + 1] & 0xf);
+ varbuf_append(&b, vstr_rssismc5g, (cis[i + 1] >> 4) & 0xf);
+ varbuf_append(&b, vstr_rssisav5g, cis[i + 2] & 0x7);
+ varbuf_append(&b, vstr_bxa5g, (cis[i + 2] >> 3) & 0x3);
+ break;
+
+ case HNBU_TRI2G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_tri2g, cis[i + 1]);
+ break;
+
+ case HNBU_TRI5G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_tri5gl, cis[i + 1]);
+ varbuf_append(&b, vstr_tri5g, cis[i + 2]);
+ varbuf_append(&b, vstr_tri5gh, cis[i + 3]);
+ break;
+
+ case HNBU_RXPO2G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rxpo2g, cis[i + 1]);
+ break;
+
+ case HNBU_RXPO5G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rxpo5g, cis[i + 1]);
+ break;
+
+ case HNBU_MACADDR:
+ if (!(ETHER_ISNULLADDR(&cis[i+1])) &&
+ !(ETHER_ISMULTI(&cis[i+1]))) {
+ bcm_ether_ntoa((struct ether_addr *)&cis[i + 1],
+ eabuf);
+
+ /* set boardnum if HNBU_BOARDNUM not seen yet */
+ if (boardnum == -1)
+ boardnum = (cis[i + 5] << 8) + cis[i + 6];
+ }
+ break;
+
+ case HNBU_CHAINSWITCH:
+ varbuf_append(&b, vstr_txchain, cis[i + 1]);
+ varbuf_append(&b, vstr_rxchain, cis[i + 2]);
+ varbuf_append(&b, vstr_antswitch,
+ (cis[i + 4] << 8) + cis[i + 3]);
+ break;
+
+ case HNBU_ELNA2G:
+ varbuf_append(&b, vstr_elna2g, cis[i + 1]);
+ break;
+
+ case HNBU_ELNA5G:
+ varbuf_append(&b, vstr_elna5g, cis[i + 1]);
+ break;
+
+ case HNBU_REGREV:
+ varbuf_append(&b, vstr_regrev,
+ srom_data2value(&cis[i + 1], tlen - 1));
+ break;
+
+ case HNBU_FEM: {
+ uint16 fem = (cis[i + 2] << 8) + cis[i + 1];
+ varbuf_append(&b, vstr_antswctl2g, (fem &
+ SROM8_FEM_ANTSWLUT_MASK) >>
+ SROM8_FEM_ANTSWLUT_SHIFT);
+ varbuf_append(&b, vstr_triso2g, (fem &
+ SROM8_FEM_TR_ISO_MASK) >>
+ SROM8_FEM_TR_ISO_SHIFT);
+ varbuf_append(&b, vstr_pdetrange2g, (fem &
+ SROM8_FEM_PDET_RANGE_MASK) >>
+ SROM8_FEM_PDET_RANGE_SHIFT);
+ varbuf_append(&b, vstr_extpagain2g, (fem &
+ SROM8_FEM_EXTPA_GAIN_MASK) >>
+ SROM8_FEM_EXTPA_GAIN_SHIFT);
+ varbuf_append(&b, vstr_tssipos2g, (fem &
+ SROM8_FEM_TSSIPOS_MASK) >>
+ SROM8_FEM_TSSIPOS_SHIFT);
+ if (tlen < 5) break;
+
+ fem = (cis[i + 4] << 8) + cis[i + 3];
+ varbuf_append(&b, vstr_antswctl5g, (fem &
+ SROM8_FEM_ANTSWLUT_MASK) >>
+ SROM8_FEM_ANTSWLUT_SHIFT);
+ varbuf_append(&b, vstr_triso5g, (fem &
+ SROM8_FEM_TR_ISO_MASK) >>
+ SROM8_FEM_TR_ISO_SHIFT);
+ varbuf_append(&b, vstr_pdetrange5g, (fem &
+ SROM8_FEM_PDET_RANGE_MASK) >>
+ SROM8_FEM_PDET_RANGE_SHIFT);
+ varbuf_append(&b, vstr_extpagain5g, (fem &
+ SROM8_FEM_EXTPA_GAIN_MASK) >>
+ SROM8_FEM_EXTPA_GAIN_SHIFT);
+ varbuf_append(&b, vstr_tssipos5g, (fem &
+ SROM8_FEM_TSSIPOS_MASK) >>
+ SROM8_FEM_TSSIPOS_SHIFT);
+ break;
+ }
+
+ case HNBU_PAPARMS_C0:
+ varbuf_append(&b, vstr_maxp2ga, 0, cis[i + 1]);
+ varbuf_append(&b, vstr_itt2ga0, cis[i + 2]);
+ varbuf_append(&b, vstr_pa, 2, 0, 0,
+ (cis[i + 4] << 8) + cis[i + 3]);
+ varbuf_append(&b, vstr_pa, 2, 1, 0,
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_pa, 2, 2, 0,
+ (cis[i + 8] << 8) + cis[i + 7]);
+ if (tlen < 31) break;
+
+ varbuf_append(&b, vstr_maxp5ga0, cis[i + 9]);
+ varbuf_append(&b, vstr_itt5ga0, cis[i + 10]);
+ varbuf_append(&b, vstr_maxp5gha0, cis[i + 11]);
+ varbuf_append(&b, vstr_maxp5gla0, cis[i + 12]);
+ varbuf_append(&b, vstr_pa, 5, 0, 0,
+ (cis[i + 14] << 8) + cis[i + 13]);
+ varbuf_append(&b, vstr_pa, 5, 1, 0,
+ (cis[i + 16] << 8) + cis[i + 15]);
+ varbuf_append(&b, vstr_pa, 5, 2, 0,
+ (cis[i + 18] << 8) + cis[i + 17]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 0, 0,
+ (cis[i + 20] << 8) + cis[i + 19]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 1, 0,
+ (cis[i + 22] << 8) + cis[i + 21]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 2, 0,
+ (cis[i + 24] << 8) + cis[i + 23]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 0, 0,
+ (cis[i + 26] << 8) + cis[i + 25]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 1, 0,
+ (cis[i + 28] << 8) + cis[i + 27]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 2, 0,
+ (cis[i + 30] << 8) + cis[i + 29]);
+ break;
+
+ case HNBU_PAPARMS_C1:
+ varbuf_append(&b, vstr_maxp2ga, 1, cis[i + 1]);
+ varbuf_append(&b, vstr_itt2ga1, cis[i + 2]);
+ varbuf_append(&b, vstr_pa, 2, 0, 1,
+ (cis[i + 4] << 8) + cis[i + 3]);
+ varbuf_append(&b, vstr_pa, 2, 1, 1,
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_pa, 2, 2, 1,
+ (cis[i + 8] << 8) + cis[i + 7]);
+ if (tlen < 31) break;
+
+ varbuf_append(&b, vstr_maxp5ga1, cis[i + 9]);
+ varbuf_append(&b, vstr_itt5ga1, cis[i + 10]);
+ varbuf_append(&b, vstr_maxp5gha1, cis[i + 11]);
+ varbuf_append(&b, vstr_maxp5gla1, cis[i + 12]);
+ varbuf_append(&b, vstr_pa, 5, 0, 1,
+ (cis[i + 14] << 8) + cis[i + 13]);
+ varbuf_append(&b, vstr_pa, 5, 1, 1,
+ (cis[i + 16] << 8) + cis[i + 15]);
+ varbuf_append(&b, vstr_pa, 5, 2, 1,
+ (cis[i + 18] << 8) + cis[i + 17]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 0, 1,
+ (cis[i + 20] << 8) + cis[i + 19]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 1, 1,
+ (cis[i + 22] << 8) + cis[i + 21]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 2, 1,
+ (cis[i + 24] << 8) + cis[i + 23]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 0, 1,
+ (cis[i + 26] << 8) + cis[i + 25]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 1, 1,
+ (cis[i + 28] << 8) + cis[i + 27]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 2, 1,
+ (cis[i + 30] << 8) + cis[i + 29]);
+ break;
+
+ case HNBU_PO_CCKOFDM:
+ varbuf_append(&b, vstr_cck2gpo,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_ofdm2gpo,
+ (cis[i + 6] << 24) + (cis[i + 5] << 16) +
+ (cis[i + 4] << 8) + cis[i + 3]);
+ if (tlen < 19) break;
+
+ varbuf_append(&b, vstr_ofdm5gpo,
+ (cis[i + 10] << 24) + (cis[i + 9] << 16) +
+ (cis[i + 8] << 8) + cis[i + 7]);
+ varbuf_append(&b, vstr_ofdm5glpo,
+ (cis[i + 14] << 24) + (cis[i + 13] << 16) +
+ (cis[i + 12] << 8) + cis[i + 11]);
+ varbuf_append(&b, vstr_ofdm5ghpo,
+ (cis[i + 18] << 24) + (cis[i + 17] << 16) +
+ (cis[i + 16] << 8) + cis[i + 15]);
+ break;
+
+ case HNBU_PO_MCS2G:
+ for (j = 0; j <= (tlen/2); j++) {
+ varbuf_append(&b, vstr_mcspo, 2, j,
+ (cis[i + 2 + 2*j] << 8) + cis[i + 1 + 2*j]);
+ }
+ break;
+
+ case HNBU_PO_MCS5GM:
+ for (j = 0; j <= (tlen/2); j++) {
+ varbuf_append(&b, vstr_mcspo, 5, j,
+ (cis[i + 2 + 2*j] << 8) + cis[i + 1 + 2*j]);
+ }
+ break;
+
+ case HNBU_PO_MCS5GLH:
+ for (j = 0; j <= (tlen/4); j++) {
+ varbuf_append(&b, vstr_mcspohl, 5, 'l', j,
+ (cis[i + 2 + 2*j] << 8) + cis[i + 1 + 2*j]);
+ }
+
+ for (j = 0; j <= (tlen/4); j++) {
+ varbuf_append(&b, vstr_mcspohl, 5, 'h', j,
+ (cis[i + ((tlen/2)+2) + 2*j] << 8) +
+ cis[i + ((tlen/2)+1) + 2*j]);
+ }
+
+ break;
+
+ case HNBU_PO_CDD:
+ varbuf_append(&b, vstr_cddpo,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_PO_STBC:
+ varbuf_append(&b, vstr_stbcpo,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_PO_40M:
+ varbuf_append(&b, vstr_bw40po,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_PO_40MDUP:
+ varbuf_append(&b, vstr_bwduppo,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_OFDMPO5G:
+ varbuf_append(&b, vstr_ofdm5gpo,
+ (cis[i + 4] << 24) + (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_ofdm5glpo,
+ (cis[i + 8] << 24) + (cis[i + 7] << 16) +
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_ofdm5ghpo,
+ (cis[i + 12] << 24) + (cis[i + 11] << 16) +
+ (cis[i + 10] << 8) + cis[i + 9]);
+ break;
+ /* Power per rate for SROM V9 */
+ case HNBU_CCKBW202GPO:
+ varbuf_append(&b, vstr_cckbw202gpo[0],
+ ((cis[i + 2] << 8) + cis[i + 1]));
+ if (tlen > 4)
+ varbuf_append(&b, vstr_cckbw202gpo[1],
+ ((cis[i + 4] << 8) + cis[i + 3]));
+ if (tlen > 6)
+ varbuf_append(&b, vstr_cckbw202gpo[2],
+ ((cis[i + 6] << 8) + cis[i + 5]));
+ break;
+
+ case HNBU_LEGOFDMBW202GPO:
+ varbuf_append(&b, vstr_legofdmbw202gpo[0],
+ ((cis[i + 4] << 24) + (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) + cis[i + 1]));
+ if (tlen > 6) {
+ varbuf_append(&b, vstr_legofdmbw202gpo[1],
+ ((cis[i + 8] << 24) + (cis[i + 7] << 16) +
+ (cis[i + 6] << 8) + cis[i + 5]));
+ }
+ break;
+
+ case HNBU_LEGOFDMBW205GPO:
+ for (j = 0; j < 6; j++) {
+ if (tlen < (2 + 4 * j))
+ break;
+ varbuf_append(&b, vstr_legofdmbw205gpo[j],
+ ((cis[4 * j + i + 4] << 24)
+ + (cis[4 * j + i + 3] << 16)
+ + (cis[4 * j + i + 2] << 8)
+ + cis[4 * j + i + 1]));
+ }
+ break;
+
+ case HNBU_MCS2GPO:
+ for (j = 0; j < 4; j++) {
+ if (tlen < (2 + 4 * j))
+ break;
+ varbuf_append(&b, vstr_mcs2gpo[j],
+ ((cis[4 * j + i + 4] << 24)
+ + (cis[4 * j + i + 3] << 16)
+ + (cis[4 * j + i + 2] << 8)
+ + cis[4 * j + i + 1]));
+ }
+ break;
+
+ case HNBU_MCS5GLPO:
+ for (j = 0; j < 3; j++) {
+ if (tlen < (2 + 4 * j))
+ break;
+ varbuf_append(&b, vstr_mcs5glpo[j],
+ ((cis[4 * j + i + 4] << 24)
+ + (cis[4 * j + i + 3] << 16)
+ + (cis[4 * j + i + 2] << 8)
+ + cis[4 * j + i + 1]));
+ }
+ break;
+
+ case HNBU_MCS5GMPO:
+ for (j = 0; j < 3; j++) {
+ if (tlen < (2 + 4 * j))
+ break;
+ varbuf_append(&b, vstr_mcs5gmpo[j],
+ ((cis[4 * j + i + 4] << 24)
+ + (cis[4 * j + i + 3] << 16)
+ + (cis[4 * j + i + 2] << 8)
+ + cis[4 * j + i + 1]));
+ }
+ break;
+
+ case HNBU_MCS5GHPO:
+ for (j = 0; j < 3; j++) {
+ if (tlen < (2 + 4 * j))
+ break;
+ varbuf_append(&b, vstr_mcs5ghpo[j],
+ ((cis[4 * j + i + 4] << 24)
+ + (cis[4 * j + i + 3] << 16)
+ + (cis[4 * j + i + 2] << 8)
+ + cis[4 * j + i + 1]));
+ }
+ break;
+
+ case HNBU_MCS32PO:
+ varbuf_append(&b, vstr_mcs32po,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_LEG40DUPPO:
+ varbuf_append(&b, vstr_legofdm40duppo,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_CUSTOM1:
+ varbuf_append(&b, vstr_custom, 1, ((cis[i + 4] << 24) +
+ (cis[i + 3] << 16) + (cis[i + 2] << 8) +
+ cis[i + 1]));
+ break;
+
+#if defined(BCMSDIO) || defined(BCMCCISSR3)
+ case HNBU_SROM3SWRGN:
+ if (tlen >= 73) {
+ uint16 srom[35];
+ uint8 srev = cis[i + 1 + 70];
+ ASSERT(srev == 3);
+ /* make tuple value 16-bit aligned and parse it */
+ bcopy(&cis[i + 1], srom, sizeof(srom));
+ _initvars_srom_pci(srev, srom, SROM3_SWRGN_OFF, &b);
+ /* 2.4G antenna gain is included in SROM */
+ ag_init = TRUE;
+ /* Ethernet MAC address is included in SROM */
+ eabuf[0] = 0;
+ /* why boardnum is not -1? */
+ boardnum = -1;
+ }
+ /* create extra variables */
+ if (tlen >= 75)
+ varbuf_append(&b, vstr_vendid,
+ (cis[i + 1 + 73] << 8) +
+ cis[i + 1 + 72]);
+ if (tlen >= 77)
+ varbuf_append(&b, vstr_devid,
+ (cis[i + 1 + 75] << 8) +
+ cis[i + 1 + 74]);
+ if (tlen >= 79)
+ varbuf_append(&b, vstr_xtalfreq,
+ (cis[i + 1 + 77] << 8) +
+ cis[i + 1 + 76]);
+ break;
+#endif /* BCMSDIO || BCMCCISSR3 */
+
+ case HNBU_CCKFILTTYPE:
+ varbuf_append(&b, vstr_cckdigfilttype,
+ (cis[i + 1]));
+ break;
+
+ case HNBU_TEMPTHRESH:
+ varbuf_append(&b, vstr_tempthresh,
+ (cis[i + 1]));
+ /* period in msb nibble */
+ varbuf_append(&b, vstr_temps_period,
+ (cis[i + 2] & SROM11_TEMPS_PERIOD_MASK) >>
+ SROM11_TEMPS_PERIOD_SHIFT);
+ /* hysterisis in lsb nibble */
+ varbuf_append(&b, vstr_temps_hysteresis,
+ (cis[i + 2] & SROM11_TEMPS_HYSTERESIS_MASK) >>
+ SROM11_TEMPS_HYSTERESIS_SHIFT);
+ if (tlen >= 4) {
+ varbuf_append(&b, vstr_tempoffset,
+ (cis[i + 3]));
+ varbuf_append(&b, vstr_tempsense_slope,
+ (cis[i + 4]));
+ varbuf_append(&b, vstr_temp_corrx,
+ (cis[i + 5] & SROM11_TEMPCORRX_MASK) >>
+ SROM11_TEMPCORRX_SHIFT);
+ varbuf_append(&b, vstr_tempsense_option,
+ (cis[i + 5] & SROM11_TEMPSENSE_OPTION_MASK) >>
+ SROM11_TEMPSENSE_OPTION_SHIFT);
+ varbuf_append(&b, vstr_phycal_tempdelta,
+ (cis[i + 6]));
+ }
+ break;
+
+ case HNBU_FEM_CFG: {
+ /* fem_cfg1 */
+ uint16 fem_cfg = (cis[i + 2] << 8) + cis[i + 1];
+ varbuf_append(&b, vstr_femctrl,
+ (fem_cfg & SROM11_FEMCTRL_MASK) >>
+ SROM11_FEMCTRL_SHIFT);
+ varbuf_append(&b, vstr_papdcap, 2,
+ (fem_cfg & SROM11_PAPDCAP_MASK) >>
+ SROM11_PAPDCAP_SHIFT);
+ varbuf_append(&b, vstr_tworangetssi, 2,
+ (fem_cfg & SROM11_TWORANGETSSI_MASK) >>
+ SROM11_TWORANGETSSI_SHIFT);
+ varbuf_append(&b, vstr_pdgaing, 2,
+ (fem_cfg & SROM11_PDGAIN_MASK) >>
+ SROM11_PDGAIN_SHIFT);
+ varbuf_append(&b, vstr_epagaing, 2,
+ (fem_cfg & SROM11_EPAGAIN_MASK) >>
+ SROM11_EPAGAIN_SHIFT);
+ varbuf_append(&b, vstr_tssiposslopeg, 2,
+ (fem_cfg & SROM11_TSSIPOSSLOPE_MASK) >>
+ SROM11_TSSIPOSSLOPE_SHIFT);
+ /* fem_cfg2 */
+ fem_cfg = (cis[i + 4] << 8) + cis[i + 3];
+ varbuf_append(&b, vstr_gainctrlsph,
+ (fem_cfg & SROM11_GAINCTRLSPH_MASK) >>
+ SROM11_GAINCTRLSPH_SHIFT);
+ varbuf_append(&b, vstr_papdcap, 5,
+ (fem_cfg & SROM11_PAPDCAP_MASK) >>
+ SROM11_PAPDCAP_SHIFT);
+ varbuf_append(&b, vstr_tworangetssi, 5,
+ (fem_cfg & SROM11_TWORANGETSSI_MASK) >>
+ SROM11_TWORANGETSSI_SHIFT);
+ varbuf_append(&b, vstr_pdgaing, 5,
+ (fem_cfg & SROM11_PDGAIN_MASK) >>
+ SROM11_PDGAIN_SHIFT);
+ varbuf_append(&b, vstr_epagaing, 5,
+ (fem_cfg & SROM11_EPAGAIN_MASK) >>
+ SROM11_EPAGAIN_SHIFT);
+ varbuf_append(&b, vstr_tssiposslopeg, 5,
+ (fem_cfg & SROM11_TSSIPOSSLOPE_MASK) >>
+ SROM11_TSSIPOSSLOPE_SHIFT);
+ break;
+ }
+
+ case HNBU_ACPA_C0: {
+ const int a = 0;
+
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ varbuf_append(&b, vstr_subband5gver,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ /* maxp2g */
+ /* Decoupling this touple to program from NVRAM */
+ varbuf_append(&b, vstr_maxp2ga, a,
+ (cis[i + 4] << 8) + cis[i + 3]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa2g */
+ varbuf_append(&b, vstr_pa2ga, a,
+ (cis[i + 6] << 8) + cis[i + 5],
+ (cis[i + 8] << 8) + cis[i + 7],
+ (cis[i + 10] << 8) + cis[i + 9]);
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp5g */
+ varbuf_append(&b, vstr_maxp5ga, a,
+ cis[i + 11],
+ cis[i + 12],
+ cis[i + 13],
+ cis[i + 14]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa5g */
+ varbuf_append(&b, vstr_pa5ga, a,
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23],
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35],
+ (cis[i + 38] << 8) + cis[i + 37]);
+ break;
+ }
+
+ case HNBU_ACPA_C1: {
+ const int a = 1;
+
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp2g */
+ /* Decoupling this touple to program from NVRAM */
+ varbuf_append(&b, vstr_maxp2ga, a,
+ (cis[i + 2] << 8) + cis[i + 1]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa2g */
+ varbuf_append(&b, vstr_pa2ga, a,
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5],
+ (cis[i + 8] << 8) + cis[i + 7]);
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp5g */
+ varbuf_append(&b, vstr_maxp5ga, a,
+ cis[i + 9],
+ cis[i + 10],
+ cis[i + 11],
+ cis[i + 12]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa5g */
+ varbuf_append(&b, vstr_pa5ga, a,
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23],
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35]);
+ break;
+ }
+
+ case HNBU_ACPA_C2: {
+ const int a = 2;
+
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp2g */
+ /* Decoupling this touple to program from NVRAM */
+ varbuf_append(&b, vstr_maxp2ga, a,
+ (cis[i + 2] << 8) + cis[i + 1]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa2g */
+ varbuf_append(&b, vstr_pa2ga, a,
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5],
+ (cis[i + 8] << 8) + cis[i + 7]);
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp5g */
+ varbuf_append(&b, vstr_maxp5ga, a,
+ cis[i + 9],
+ cis[i + 10],
+ cis[i + 11],
+ cis[i + 12]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa5g */
+ varbuf_append(&b, vstr_pa5ga, a,
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23],
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35]);
+ break;
+ }
+
+ case HNBU_MEAS_PWR:
+ varbuf_append(&b, vstr_measpower, cis[i + 1]);
+ varbuf_append(&b, vstr_measpowerX, 1, (cis[i + 2]));
+ varbuf_append(&b, vstr_measpowerX, 2, (cis[i + 3]));
+ varbuf_append(&b, vstr_rawtempsense,
+ ((cis[i + 5] & 0x1) << 8) + cis[i + 4]);
+ break;
+
+ case HNBU_PDOFF:
+ varbuf_append(&b, vstr_pdoffsetma, 40, 0,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_pdoffsetma, 40, 1,
+ (cis[i + 4] << 8) + cis[i + 3]);
+ varbuf_append(&b, vstr_pdoffsetma, 40, 2,
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_pdoffsetma, 80, 0,
+ (cis[i + 8] << 8) + cis[i + 7]);
+ varbuf_append(&b, vstr_pdoffsetma, 80, 1,
+ (cis[i + 10] << 8) + cis[i + 9]);
+ varbuf_append(&b, vstr_pdoffsetma, 80, 2,
+ (cis[i + 12] << 8) + cis[i + 11]);
+ break;
+
+ case HNBU_ACPPR_2GPO:
+ varbuf_append(&b, vstr_dot11agofdmhrbw202gpo,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_ofdmlrbw202gpo,
+ (cis[i + 4] << 8) + cis[i + 3]);
+
+ if (tlen < 13) break;
+ varbuf_append(&b, vstr_sb20in40dot11agofdm2gpo,
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_sb20in80dot11agofdm2gpo,
+ (cis[i + 8] << 8) + cis[i + 7]);
+ varbuf_append(&b, vstr_sb20in40ofdmlrbw202gpo,
+ (cis[i + 10] << 8) + cis[i + 9]);
+ varbuf_append(&b, vstr_sb20in80ofdmlrbw202gpo,
+ (cis[i + 12] << 8) + cis[i + 11]);
+ break;
+
+ case HNBU_ACPPR_5GPO:
+ varbuf_append(&b, vstr_mcsbw805gpo, 'l',
+ (cis[i + 4] << 24) + (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_mcsbw1605gpo, 'l',
+ (cis[i + 8] << 24) + (cis[i + 7] << 16) +
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_mcsbw805gpo, 'm',
+ (cis[i + 12] << 24) + (cis[i + 11] << 16) +
+ (cis[i + 10] << 8) + cis[i + 9]);
+ varbuf_append(&b, vstr_mcsbw1605gpo, 'm',
+ (cis[i + 16] << 24) + (cis[i + 15] << 16) +
+ (cis[i + 14] << 8) + cis[i + 13]);
+ varbuf_append(&b, vstr_mcsbw805gpo, 'h',
+ (cis[i + 20] << 24) + (cis[i + 19] << 16) +
+ (cis[i + 18] << 8) + cis[i + 17]);
+ varbuf_append(&b, vstr_mcsbw1605gpo, 'h',
+ (cis[i + 24] << 24) + (cis[i + 23] << 16) +
+ (cis[i + 22] << 8) + cis[i + 21]);
+ varbuf_append(&b, vstr_mcslr5gpo, 'l',
+ (cis[i + 26] << 8) + cis[i + 25]);
+ varbuf_append(&b, vstr_mcslr5gpo, 'm',
+ (cis[i + 28] << 8) + cis[i + 27]);
+ varbuf_append(&b, vstr_mcslr5gpo, 'h',
+ (cis[i + 30] << 8) + cis[i + 29]);
+
+ if (tlen < 51) break;
+ varbuf_append(&b, vstr_mcsbw80p805gpo, 'l',
+ (cis[i + 34] << 24) + (cis[i + 33] << 16) +
+ (cis[i + 32] << 8) + cis[i + 31]);
+ varbuf_append(&b, vstr_mcsbw80p805gpo, 'm',
+ (cis[i + 38] << 24) + (cis[i + 37] << 16) +
+ (cis[i + 36] << 8) + cis[i + 35]);
+ varbuf_append(&b, vstr_mcsbw80p805gpo, 'h',
+ (cis[i + 42] << 24) + (cis[i + 41] << 16) +
+ (cis[i + 40] << 8) + cis[i + 39]);
+ varbuf_append(&b, vstr_mcsbw80p805g1po, 'x',
+ (cis[i + 46] << 24) + (cis[i + 45] << 16) +
+ (cis[i + 44] << 8) + cis[i + 43]);
+ varbuf_append(&b, vstr_mcslr5g1po, 'x',
+ (cis[i + 48] << 8) + cis[i + 47]);
+ varbuf_append(&b, vstr_mcslr5g80p80po,
+ (cis[i + 50] << 8) + cis[i + 49]);
+ varbuf_append(&b, vstr_mcsbw805g1po, 'x',
+ (cis[i + 54] << 24) + (cis[i + 53] << 16) +
+ (cis[i + 52] << 8) + cis[i + 51]);
+ varbuf_append(&b, vstr_mcsbw1605g1po, 'x',
+ (cis[i + 58] << 24) + (cis[i + 57] << 16) +
+ (cis[i + 56] << 8) + cis[i + 55]);
+
+ break;
+
+ case HNBU_MCS5Gx1PO:
+ varbuf_append(&b, vstr_mcsbw205g1po, 'x',
+ (cis[i + 4] << 24) + (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_mcsbw405g1po, 'x',
+ (cis[i + 8] << 24) + (cis[i + 7] << 16) +
+ (cis[i + 6] << 8) + cis[i + 5]);
+ break;
+
+ case HNBU_ACPPR_SBPO:
+ varbuf_append(&b, vstr_sb20in40rpo, 'h',
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_sb20in80and160r5gpo, 'h', 'l',
+ (cis[i + 4] << 8) + cis[i + 3]);
+ varbuf_append(&b, vstr_sb40and80r5gpo, 'h', 'l',
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_sb20in80and160r5gpo, 'h', 'm',
+ (cis[i + 8] << 8) + cis[i + 7]);
+ varbuf_append(&b, vstr_sb40and80r5gpo, 'h', 'm',
+ (cis[i + 10] << 8) + cis[i + 9]);
+ varbuf_append(&b, vstr_sb20in80and160r5gpo, 'h', 'h',
+ (cis[i + 12] << 8) + cis[i + 11]);
+ varbuf_append(&b, vstr_sb40and80r5gpo, 'h', 'h',
+ (cis[i + 14] << 8) + cis[i + 13]);
+ varbuf_append(&b, vstr_sb20in40rpo, 'l',
+ (cis[i + 16] << 8) + cis[i + 15]);
+ varbuf_append(&b, vstr_sb20in80and160r5gpo, 'l', 'l',
+ (cis[i + 18] << 8) + cis[i + 17]);
+ varbuf_append(&b, vstr_sb40and80r5gpo, 'l', 'l',
+ (cis[i + 20] << 8) + cis[i + 19]);
+ varbuf_append(&b, vstr_sb20in80and160r5gpo, 'l', 'm',
+ (cis[i + 22] << 8) + cis[i + 21]);
+ varbuf_append(&b, vstr_sb40and80r5gpo, 'l', 'm',
+ (cis[i + 24] << 8) + cis[i + 23]);
+ varbuf_append(&b, vstr_sb20in80and160r5gpo, 'l', 'h',
+ (cis[i + 26] << 8) + cis[i + 25]);
+ varbuf_append(&b, vstr_sb40and80r5gpo, 'l', 'h',
+ (cis[i + 28] << 8) + cis[i + 27]);
+ varbuf_append(&b, vstr_dot11agduprpo, 'h',
+ (cis[i + 32] << 24) + (cis[i + 31] << 16) +
+ (cis[i + 30] << 8) + cis[i + 29]);
+ varbuf_append(&b, vstr_dot11agduprpo, 'l',
+ (cis[i + 36] << 24) + (cis[i + 35] << 16) +
+ (cis[i + 34] << 8) + cis[i + 33]);
+
+ if (tlen < 49) break;
+ varbuf_append(&b, vstr_sb20in40and80rpo, 'h',
+ (cis[i + 38] << 8) + cis[i + 37]);
+ varbuf_append(&b, vstr_sb20in40and80rpo, 'l',
+ (cis[i + 40] << 8) + cis[i + 39]);
+ varbuf_append(&b, vstr_sb20in80and160r5g1po, 'h', 'x',
+ (cis[i + 42] << 8) + cis[i + 41]);
+ varbuf_append(&b, vstr_sb20in80and160r5g1po, 'l', 'x',
+ (cis[i + 44] << 8) + cis[i + 43]);
+ varbuf_append(&b, vstr_sb40and80r5g1po, 'h', 'x',
+ (cis[i + 46] << 8) + cis[i + 45]);
+ varbuf_append(&b, vstr_sb40and80r5g1po, 'l', 'x',
+ (cis[i + 48] << 8) + cis[i + 47]);
+ break;
+
+ case HNBU_ACPPR_SB8080_PO:
+ varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'h', 'l',
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'l', 'l',
+ (cis[i + 4] << 8) + cis[i + 3]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'h', 'm',
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'l', 'm',
+ (cis[i + 8] << 8) + cis[i + 7]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'h', 'h',
+ (cis[i + 10] << 8) + cis[i + 9]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5gpo, 'l', 'h',
+ (cis[i + 12] << 8) + cis[i + 11]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5g1po, 'h', 'x',
+ (cis[i + 14] << 8) + cis[i + 13]);
+ varbuf_append(&b, vstr_sb2040and80in80p80r5g1po, 'l', 'x',
+ (cis[i + 16] << 8) + cis[i + 15]);
+ varbuf_append(&b, vstr_sb20in80p80r5gpo, 'h',
+ (cis[i + 18] << 8) + cis[i + 17]);
+ varbuf_append(&b, vstr_sb20in80p80r5gpo, 'l',
+ (cis[i + 20] << 8) + cis[i + 19]);
+ varbuf_append(&b, vstr_dot11agduppo,
+ (cis[i + 22] << 8) + cis[i + 21]);
+ break;
+
+ case HNBU_NOISELVL:
+ /* noiselvl2g */
+ varbuf_append(&b, vstr_noiselvl2ga, 0,
+ (cis[i + 1] & 0x1f));
+ varbuf_append(&b, vstr_noiselvl2ga, 1,
+ (cis[i + 2] & 0x1f));
+ varbuf_append(&b, vstr_noiselvl2ga, 2,
+ (cis[i + 3] & 0x1f));
+ /* noiselvl5g */
+ varbuf_append(&b, vstr_noiselvl5ga, 0,
+ (cis[i + 4] & 0x1f),
+ (cis[i + 5] & 0x1f),
+ (cis[i + 6] & 0x1f),
+ (cis[i + 7] & 0x1f));
+ varbuf_append(&b, vstr_noiselvl5ga, 1,
+ (cis[i + 8] & 0x1f),
+ (cis[i + 9] & 0x1f),
+ (cis[i + 10] & 0x1f),
+ (cis[i + 11] & 0x1f));
+ varbuf_append(&b, vstr_noiselvl5ga, 2,
+ (cis[i + 12] & 0x1f),
+ (cis[i + 13] & 0x1f),
+ (cis[i + 14] & 0x1f),
+ (cis[i + 15] & 0x1f));
+ break;
+
+ case HNBU_RXGAIN_ERR:
+ varbuf_append(&b, vstr_rxgainerr2ga, 0,
+ (cis[i + 1] & 0x3f));
+ varbuf_append(&b, vstr_rxgainerr2ga, 1,
+ (cis[i + 2] & 0x1f));
+ varbuf_append(&b, vstr_rxgainerr2ga, 2,
+ (cis[i + 3] & 0x1f));
+ varbuf_append(&b, vstr_rxgainerr5ga, 0,
+ (cis[i + 4] & 0x3f),
+ (cis[i + 5] & 0x3f),
+ (cis[i + 6] & 0x3f),
+ (cis[i + 7] & 0x3f));
+ varbuf_append(&b, vstr_rxgainerr5ga, 1,
+ (cis[i + 8] & 0x1f),
+ (cis[i + 9] & 0x1f),
+ (cis[i + 10] & 0x1f),
+ (cis[i + 11] & 0x1f));
+ varbuf_append(&b, vstr_rxgainerr5ga, 2,
+ (cis[i + 12] & 0x1f),
+ (cis[i + 13] & 0x1f),
+ (cis[i + 14] & 0x1f),
+ (cis[i + 15] & 0x1f));
+ break;
+
+ case HNBU_AGBGA:
+ varbuf_append(&b, vstr_agbg, 0, cis[i + 1]);
+ varbuf_append(&b, vstr_agbg, 1, cis[i + 2]);
+ varbuf_append(&b, vstr_agbg, 2, cis[i + 3]);
+ varbuf_append(&b, vstr_aga, 0, cis[i + 4]);
+ varbuf_append(&b, vstr_aga, 1, cis[i + 5]);
+ varbuf_append(&b, vstr_aga, 2, cis[i + 6]);
+ break;
+
+ case HNBU_ACRXGAINS_C0: {
+ int a = 0;
+
+ /* rxgains */
+ uint16 rxgains = (cis[i + 2] << 8) + cis[i + 1];
+ varbuf_append(&b, vstr_rxgainsgtrelnabypa, 5, a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrisoa, 5, a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgelnagaina, 5, a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrelnabypa, 2, a,
+ (rxgains & SROM11_RXGAINS2GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS2GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrisoa, 2, a,
+ (rxgains & SROM11_RXGAINS2GTRISOA_MASK) >>
+ SROM11_RXGAINS2GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgelnagaina, 2, a,
+ (rxgains & SROM11_RXGAINS2GELNAGAINA_MASK) >>
+ SROM11_RXGAINS2GELNAGAINA_SHIFT);
+ /* rxgains1 */
+ rxgains = (cis[i + 4] << 8) + cis[i + 3];
+ varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ break;
+ }
+
+ case HNBU_ACRXGAINS_C1: {
+ int a = 1;
+
+ /* rxgains */
+ uint16 rxgains = (cis[i + 2] << 8) + cis[i + 1];
+ varbuf_append(&b, vstr_rxgainsgtrelnabypa, 5, a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrisoa, 5, a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgelnagaina, 5, a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrelnabypa, 2, a,
+ (rxgains & SROM11_RXGAINS2GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS2GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrisoa, 2, a,
+ (rxgains & SROM11_RXGAINS2GTRISOA_MASK) >>
+ SROM11_RXGAINS2GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgelnagaina, 2, a,
+ (rxgains & SROM11_RXGAINS2GELNAGAINA_MASK) >>
+ SROM11_RXGAINS2GELNAGAINA_SHIFT);
+ /* rxgains1 */
+ rxgains = (cis[i + 4] << 8) + cis[i + 3];
+ varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ break;
+ }
+
+ case HNBU_ACRXGAINS_C2: {
+ int a = 2;
+
+ /* rxgains */
+ uint16 rxgains = (cis[i + 2] << 8) + cis[i + 1];
+ varbuf_append(&b, vstr_rxgainsgtrelnabypa, 5, a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrisoa, 5, a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgelnagaina, 5, a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrelnabypa, 2, a,
+ (rxgains & SROM11_RXGAINS2GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS2GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgtrisoa, 2, a,
+ (rxgains & SROM11_RXGAINS2GTRISOA_MASK) >>
+ SROM11_RXGAINS2GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgelnagaina, 2, a,
+ (rxgains & SROM11_RXGAINS2GELNAGAINA_MASK) >>
+ SROM11_RXGAINS2GELNAGAINA_SHIFT);
+ /* rxgains1 */
+ rxgains = (cis[i + 4] << 8) + cis[i + 3];
+ varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'h', a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrelnabypa, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GTRELNABYPA_MASK) >>
+ SROM11_RXGAINS5GTRELNABYPA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxtrisoa, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GTRISOA_MASK) >>
+ SROM11_RXGAINS5GTRISOA_SHIFT);
+ varbuf_append(&b, vstr_rxgainsgxelnagaina, 5, 'm', a,
+ (rxgains & SROM11_RXGAINS5GELNAGAINA_MASK) >>
+ SROM11_RXGAINS5GELNAGAINA_SHIFT);
+ break;
+ }
+
+ case HNBU_TXDUTY: {
+ varbuf_append(&b, vstr_txduty_ofdm, 40,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ varbuf_append(&b, vstr_txduty_thresh, 40,
+ (cis[i + 4] << 8) + cis[i + 3]);
+ varbuf_append(&b, vstr_txduty_ofdm, 80,
+ (cis[i + 6] << 8) + cis[i + 5]);
+ varbuf_append(&b, vstr_txduty_thresh, 80,
+ (cis[i + 8] << 8) + cis[i + 7]);
+ break;
+ }
+
+ case HNBU_UUID: {
+ /* uuid format 12345678-1234-5678-1234-567812345678 */
+
+ char uuidstr[37]; /* 32 ids, 4 '-', 1 Null */
+
+ snprintf(uuidstr, sizeof(uuidstr),
+ rstr_uuidstr,
+ cis[i + 1], cis[i + 2], cis[i + 3], cis[i + 4],
+ cis[i + 5], cis[i + 6], cis[i + 7], cis[i + 8],
+ cis[i + 9], cis[i + 10], cis[i + 11], cis[i + 12],
+ cis[i + 13], cis[i + 14], cis[i + 15], cis[i + 16]);
+
+ varbuf_append(&b, vstr_uuid, uuidstr);
+ break;
+ }
+
+ case HNBU_WOWLGPIO:
+ varbuf_append(&b, vstr_wowlgpio, ((cis[i + 1]) & 0x7F));
+ varbuf_append(&b, vstr_wowlgpiopol,
+ (((cis[i + 1]) >> 7) & 0x1));
+ break;
+
+#endif /* !BCM_BOOTLOADER */
+#ifdef BCMUSBDEV_COMPOSITE
+ case HNBU_USBDESC_COMPOSITE:
+ varbuf_append(&b, vstr_usbdesc_composite,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+#endif /* BCMUSBDEV_COMPOSITE */
+ case HNBU_USBUTMI_CTL:
+ varbuf_append(&b, vstr_usbutmi_ctl,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_UTMI_CTL0:
+ varbuf_append(&b, vstr_usbssphy_utmi_ctl0,
+ (cis[i + 4] << 24) | (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_UTMI_CTL1:
+ varbuf_append(&b, vstr_usbssphy_utmi_ctl1,
+ (cis[i + 4] << 24) | (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_UTMI_CTL2:
+ varbuf_append(&b, vstr_usbssphy_utmi_ctl2,
+ (cis[i + 4] << 24) | (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_SLEEP0:
+ varbuf_append(&b, vstr_usbssphy_sleep0,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_SLEEP1:
+ varbuf_append(&b, vstr_usbssphy_sleep1,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_SLEEP2:
+ varbuf_append(&b, vstr_usbssphy_sleep2,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+
+ case HNBU_USBSSPHY_SLEEP3:
+ varbuf_append(&b, vstr_usbssphy_sleep3,
+ (cis[i + 2] << 8) | cis[i + 1]);
+ break;
+ case HNBU_USBSSPHY_MDIO: {
+ uint8 setnum;
+ uint16 k;
+
+ setnum = (cis[i + 1])/4;
+ if (setnum == 0)
+ break;
+ for (j = 0; j < setnum; j++) {
+ k = j*12;
+ varbuf_append(&b, vstr_usbssphy_mdio, j,
+ (cis[i+4+k]<<16) | (cis[i+3+k]<<8) | cis[i+2+k],
+ (cis[i+7+k]<<16) | (cis[i+6+k]<<8) | cis[i+5+k],
+ (cis[i+10+k]<<16) | (cis[i+9+k]<<8) | cis[i+8+k],
+ (cis[i+13+k]<<16) | (cis[i+12+k]<<8) | cis[i+11+k]);
+ }
+ break;
+ }
+ case HNBU_USB30PHY_NOSS:
+ varbuf_append(&b, vstr_usb30phy_noss, cis[i + 1]);
+ break;
+ case HNBU_USB30PHY_U1U2:
+ varbuf_append(&b, vstr_usb30phy_u1u2, cis[i + 1]);
+ break;
+ case HNBU_USB30PHY_REGS:
+ varbuf_append(&b, vstr_usb30phy_regs, 0,
+ cis[i+4]|cis[i+3]|cis[i+2]|cis[i+1],
+ cis[i+8]|cis[i+7]|cis[i+6]|cis[i+5],
+ cis[i+12]|cis[i+11]|cis[i+10]|cis[i+9],
+ cis[i+16]|cis[i+15]|cis[i+14]|cis[i+13]);
+ varbuf_append(&b, vstr_usb30phy_regs, 1,
+ cis[i+20]|cis[i+19]|cis[i+18]|cis[i+17],
+ cis[i+24]|cis[i+23]|cis[i+22]|cis[i+21],
+ cis[i+28]|cis[i+27]|cis[i+26]|cis[i+25],
+ cis[i+32]|cis[i+31]|cis[i+30]|cis[i+29]);
+
+ break;
+
+ case HNBU_PDOFF_2G: {
+ uint16 pdoff_2g = (cis[i + 2] << 8) + cis[i + 1];
+ varbuf_append(&b, vstr_pdoffset2gma, 40, 0,
+ (pdoff_2g & SROM11_PDOFF_2G_40M_A0_MASK) >>
+ SROM11_PDOFF_2G_40M_A0_SHIFT);
+ varbuf_append(&b, vstr_pdoffset2gma, 40, 1,
+ (pdoff_2g & SROM11_PDOFF_2G_40M_A1_MASK) >>
+ SROM11_PDOFF_2G_40M_A1_SHIFT);
+ varbuf_append(&b, vstr_pdoffset2gma, 40, 2,
+ (pdoff_2g & SROM11_PDOFF_2G_40M_A2_MASK) >>
+ SROM11_PDOFF_2G_40M_A2_SHIFT);
+ varbuf_append(&b, vstr_pdoffset2gmvalid, 40,
+ (pdoff_2g & SROM11_PDOFF_2G_40M_VALID_MASK) >>
+ SROM11_PDOFF_2G_40M_VALID_SHIFT);
+ break;
+ }
+
+ case HNBU_ACPA_CCK_C0:
+ varbuf_append(&b, vstr_pa2gccka, 0,
+ (cis[i + 2] << 8) + cis[i + 1],
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5]);
+ break;
+
+ case HNBU_ACPA_CCK_C1:
+ varbuf_append(&b, vstr_pa2gccka, 1,
+ (cis[i + 2] << 8) + cis[i + 1],
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5]);
+ break;
+
+ case HNBU_ACPA_40:
+ varbuf_append(&b, vstr_pa5gbw40a, 0,
+ (cis[i + 2] << 8) + cis[i + 1],
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5],
+ (cis[i + 8] << 8) + cis[i + 7],
+ (cis[i + 10] << 8) + cis[i + 9],
+ (cis[i + 12] << 8) + cis[i + 11],
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23]);
+ break;
+
+ case HNBU_ACPA_80:
+ varbuf_append(&b, vstr_pa5gbw80a, 0,
+ (cis[i + 2] << 8) + cis[i + 1],
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5],
+ (cis[i + 8] << 8) + cis[i + 7],
+ (cis[i + 10] << 8) + cis[i + 9],
+ (cis[i + 12] << 8) + cis[i + 11],
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23]);
+ break;
+
+ case HNBU_ACPA_4080:
+ varbuf_append(&b, vstr_pa5gbw4080a, 0,
+ (cis[i + 2] << 8) + cis[i + 1],
+ (cis[i + 4] << 8) + cis[i + 3],
+ (cis[i + 6] << 8) + cis[i + 5],
+ (cis[i + 8] << 8) + cis[i + 7],
+ (cis[i + 10] << 8) + cis[i + 9],
+ (cis[i + 12] << 8) + cis[i + 11],
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23]);
+ varbuf_append(&b, vstr_pa5gbw4080a, 1,
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35],
+ (cis[i + 38] << 8) + cis[i + 37],
+ (cis[i + 40] << 8) + cis[i + 39],
+ (cis[i + 42] << 8) + cis[i + 41],
+ (cis[i + 44] << 8) + cis[i + 43],
+ (cis[i + 46] << 8) + cis[i + 45],
+ (cis[i + 48] << 8) + cis[i + 47]);
+ break;
+
+ case HNBU_ACPA_4X4C0:
+ case HNBU_ACPA_4X4C1:
+ case HNBU_ACPA_4X4C2:
+ case HNBU_ACPA_4X4C3: {
+ int core_num = 0;
+ uint8 tuple = cis[i];
+
+ if (tuple == HNBU_ACPA_4X4C1) {
+ core_num = 1;
+ } else if (tuple == HNBU_ACPA_4X4C2) {
+ core_num = 2;
+ } else if (tuple == HNBU_ACPA_4X4C3) {
+ core_num = 3;
+ }
+
+ varbuf_append(&b, vstr_maxp2ga, core_num, cis[i + 1]);
+ /* pa2g */
+ varbuf_append(&b, vstr_sr13pa2ga, core_num,
+ (cis[i + 3] << 8) + cis[i + 2],
+ (cis[i + 5] << 8) + cis[i + 4],
+ (cis[i + 7] << 8) + cis[i + 6],
+ (cis[i + 9] << 8) + cis[i + 8]);
+ /* pa2g40 */
+ varbuf_append(&b, vstr_pa2g40a, core_num,
+ (cis[i + 11] << 8) + cis[i + 10],
+ (cis[i + 13] << 8) + cis[i + 12],
+ (cis[i + 15] << 8) + cis[i + 14],
+ (cis[i + 17] << 8) + cis[i + 16]);
+ for (j = 0; j < 5; j++) {
+ varbuf_append(&b, vstr_maxp5gba, j, core_num,
+ cis[i + j + 18]);
+ }
+ break;
+ }
+
+ case HNBU_ACPA_BW20_4X4C0:
+ case HNBU_ACPA_BW40_4X4C0:
+ case HNBU_ACPA_BW80_4X4C0:
+ case HNBU_ACPA_BW20_4X4C1:
+ case HNBU_ACPA_BW40_4X4C1:
+ case HNBU_ACPA_BW80_4X4C1:
+ case HNBU_ACPA_BW20_4X4C2:
+ case HNBU_ACPA_BW40_4X4C2:
+ case HNBU_ACPA_BW80_4X4C2:
+ case HNBU_ACPA_BW20_4X4C3:
+ case HNBU_ACPA_BW40_4X4C3:
+ case HNBU_ACPA_BW80_4X4C3: {
+ int k = 0;
+ char pabuf[140]; /* max: 20 '0x????'s + 19 ','s + 1 Null */
+ int core_num = 0, buflen = 0;
+ uint8 tuple = cis[i];
+
+ if (tuple == HNBU_ACPA_BW20_4X4C1 ||
+ tuple == HNBU_ACPA_BW40_4X4C1 ||
+ tuple == HNBU_ACPA_BW80_4X4C1) {
+ core_num = 1;
+ } else if (tuple == HNBU_ACPA_BW20_4X4C2 ||
+ tuple == HNBU_ACPA_BW40_4X4C2 ||
+ tuple == HNBU_ACPA_BW80_4X4C2) {
+ core_num = 2;
+ } else if (tuple == HNBU_ACPA_BW20_4X4C3 ||
+ tuple == HNBU_ACPA_BW40_4X4C3 ||
+ tuple == HNBU_ACPA_BW80_4X4C3) {
+ core_num = 3;
+ }
+
+ buflen = sizeof(pabuf);
+ for (j = 0; j < 20; j++) { /* cis[i+1] - cis[i+40] */
+ k += snprintf(pabuf+k, buflen-k, rstr_hex,
+ ((cis[i + (2*j) + 2] << 8) +
+ cis[i + (2*j) + 1]));
+ if (j < 19) {
+ k += snprintf(pabuf+k, buflen-k,
+ ",");
+ }
+ }
+
+ if (tuple == HNBU_ACPA_BW20_4X4C0 ||
+ tuple == HNBU_ACPA_BW20_4X4C1 ||
+ tuple == HNBU_ACPA_BW20_4X4C2 ||
+ tuple == HNBU_ACPA_BW20_4X4C3) {
+ varbuf_append(&b, vstr_sr13pa5ga, core_num, pabuf);
+ } else {
+ int bw = 40;
+
+ if (tuple == HNBU_ACPA_BW80_4X4C0 ||
+ tuple == HNBU_ACPA_BW80_4X4C1 ||
+ tuple == HNBU_ACPA_BW80_4X4C2 ||
+ tuple == HNBU_ACPA_BW80_4X4C3) {
+ bw = 80;
+ }
+ varbuf_append(&b, vstr_sr13pa5gbwa, bw,
+ core_num, pabuf);
+ }
+ break;
+ }
+
+ case HNBU_RSSI_DELTA_2G_B0:
+ case HNBU_RSSI_DELTA_2G_B1:
+ case HNBU_RSSI_DELTA_2G_B2:
+ case HNBU_RSSI_DELTA_2G_B3:
+ case HNBU_RSSI_DELTA_2G_B4: {
+ uint8 tuple = cis[i];
+ uint8 grp;
+ if (tuple == HNBU_RSSI_DELTA_2G_B0) {
+ grp = 0;
+ } else if (tuple == HNBU_RSSI_DELTA_2G_B1) {
+ grp = 1;
+ } else if (tuple == HNBU_RSSI_DELTA_2G_B2) {
+ grp = 2;
+ } else if (tuple == HNBU_RSSI_DELTA_2G_B3) {
+ grp = 3;
+ } else {
+ grp = 4;
+ }
+ /* 2G Band Gourp = grp */
+ varbuf_append(&b, vstr_rssidelta2g, grp,
+ cis[i + 1], cis[i + 2],
+ cis[i + 3], cis[i + 4],
+ cis[i + 5], cis[i + 6],
+ cis[i + 7], cis[i + 8],
+ cis[i + 9], cis[i + 10],
+ cis[i + 11], cis[i + 12],
+ cis[i + 13], cis[i + 14],
+ cis[i + 15], cis[i + 16]);
+ break;
+ }
+
+ case HNBU_RSSI_CAL_FREQ_GRP_2G:
+ /* 2G Band Gourp Defintion */
+ varbuf_append(&b, vstr_rssicalfrqg,
+ cis[i + 1], cis[i + 2],
+ cis[i + 3], cis[i + 4],
+ cis[i + 5], cis[i + 6],
+ cis[i + 7]);
+ break;
+
+ case HNBU_RSSI_DELTA_5GL:
+ case HNBU_RSSI_DELTA_5GML:
+ case HNBU_RSSI_DELTA_5GMU:
+ case HNBU_RSSI_DELTA_5GH: {
+ uint8 tuple = cis[i];
+ char *band[] = {"l", "ml", "mu", "h"};
+ char *pband;
+ if (tuple == HNBU_RSSI_DELTA_5GL) {
+ pband = band[0];
+ } else if (tuple == HNBU_RSSI_DELTA_5GML) {
+ pband = band[1];
+ } else if (tuple == HNBU_RSSI_DELTA_5GMU) {
+ pband = band[2];
+ } else {
+ pband = band[3];
+ }
+ /* 5G Band = band */
+ varbuf_append(&b, vstr_rssidelta5g, pband,
+ cis[i + 1], cis[i + 2],
+ cis[i + 3], cis[i + 4],
+ cis[i + 5], cis[i + 6],
+ cis[i + 7], cis[i + 8],
+ cis[i + 9], cis[i + 10],
+ cis[i + 11], cis[i + 12],
+ cis[i + 13], cis[i + 14],
+ cis[i + 15], cis[i + 16],
+ cis[i + 17], cis[i + 17],
+ cis[i + 19], cis[i + 20],
+ cis[i + 21], cis[i + 22],
+ cis[i + 9], cis[i + 24]);
+ break;
+ }
+
+ case HNBU_ACPA_6G_C0: {
+ const int a = 0;
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ varbuf_append(&b, vstr_subband6gver,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ /* maxp5g */
+ varbuf_append(&b, vstr_maxp6ga, a,
+ cis[i + 3],
+ cis[i + 4],
+ cis[i + 5],
+ cis[i + 6],
+ cis[i + 7],
+ cis[i + 8]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa5g */
+ varbuf_append(&b, vstr_pa6ga, a,
+ (cis[i + 10] << 8) + cis[i + 9],
+ (cis[i + 12] << 8) + cis[i + 11],
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23],
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35],
+ (cis[i + 38] << 8) + cis[i + 37],
+ (cis[i + 40] << 8) + cis[i + 39],
+ (cis[i + 42] << 8) + cis[i + 41],
+ (cis[i + 44] << 8) + cis[i + 43]);
+ break;
+ }
+
+ case HNBU_ACPA_6G_C1: {
+ const int a = 1;
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp6g */
+ varbuf_append(&b, vstr_maxp6ga, a,
+ cis[i + 1],
+ cis[i + 2],
+ cis[i + 3],
+ cis[i + 4],
+ cis[i + 5],
+ cis[i + 6]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa6g */
+ varbuf_append(&b, vstr_pa6ga, a,
+ (cis[i + 8] << 8) + cis[i + 7],
+ (cis[i + 10] << 8) + cis[i + 9],
+ (cis[i + 12] << 8) + cis[i + 11],
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23],
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35],
+ (cis[i + 38] << 8) + cis[i + 37],
+ (cis[i + 40] << 8) + cis[i + 39],
+ (cis[i + 42] << 8) + cis[i + 41]);
+ break;
+ }
+
+ case HNBU_ACPA_6G_C2: {
+ const int a = 2;
+#ifndef OTP_SKIP_MAXP_PAPARAMS
+ /* maxp6g */
+ varbuf_append(&b, vstr_maxp6ga, a,
+ cis[i + 1],
+ cis[i + 2],
+ cis[i + 3],
+ cis[i + 4],
+ cis[i + 5],
+ cis[i + 6]);
+#endif /* OTP_SKIP_MAXP_PAPARAMS */
+ /* pa6g */
+ varbuf_append(&b, vstr_pa6ga, a,
+ (cis[i + 8] << 8) + cis[i + 7],
+ (cis[i + 10] << 8) + cis[i + 9],
+ (cis[i + 12] << 8) + cis[i + 11],
+ (cis[i + 14] << 8) + cis[i + 13],
+ (cis[i + 16] << 8) + cis[i + 15],
+ (cis[i + 18] << 8) + cis[i + 17],
+ (cis[i + 20] << 8) + cis[i + 19],
+ (cis[i + 22] << 8) + cis[i + 21],
+ (cis[i + 24] << 8) + cis[i + 23],
+ (cis[i + 26] << 8) + cis[i + 25],
+ (cis[i + 28] << 8) + cis[i + 27],
+ (cis[i + 30] << 8) + cis[i + 29],
+ (cis[i + 32] << 8) + cis[i + 31],
+ (cis[i + 34] << 8) + cis[i + 33],
+ (cis[i + 36] << 8) + cis[i + 35],
+ (cis[i + 38] << 8) + cis[i + 37],
+ (cis[i + 40] << 8) + cis[i + 39],
+ (cis[i + 42] << 8) + cis[i + 41]);
+ break;
+ }
+
+ case HNBU_SUBBAND5GVER:
+ varbuf_append(&b, vstr_subband5gver,
+ (cis[i + 2] << 8) + cis[i + 1]);
+ break;
+
+ case HNBU_PAPARAMBWVER:
+ varbuf_append(&b, vstr_paparambwver, cis[i + 1]);
+ break;
+
+ case HNBU_TXBFRPCALS:
+ /* note: all 5 rpcal parameters are expected to be */
+ /* inside one tuple record, i.e written with one */
+ /* wl wrvar cmd as follows: */
+ /* wl wrvar rpcal2g=0x1211 ... rpcal5gb3=0x0 */
+ if (tlen != 11 ) { /* sanity check */
+ BS_ERROR(("srom_parsecis:incorrect length:%d for"
+ " HNBU_TXBFRPCALS tuple\n",
+ tlen));
+ break;
+ }
+
+ varbuf_append(&b, vstr_paparamrpcalvars[0],
+ (cis[i + 1] + (cis[i + 2] << 8)));
+ varbuf_append(&b, vstr_paparamrpcalvars[1],
+ (cis[i + 3] + (cis[i + 4] << 8)));
+ varbuf_append(&b, vstr_paparamrpcalvars[2],
+ (cis[i + 5] + (cis[i + 6] << 8)));
+ varbuf_append(&b, vstr_paparamrpcalvars[3],
+ (cis[i + 7] + (cis[i + 8] << 8)));
+ varbuf_append(&b, vstr_paparamrpcalvars[4],
+ (cis[i + 9] + (cis[i + 10] << 8)));
+ break;
+
+ case HNBU_GPIO_PULL_DOWN:
+ varbuf_append(&b, vstr_gpdn,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_MACADDR2:
+ if (!(ETHER_ISNULLADDR(&cis[i+1])) &&
+ !(ETHER_ISMULTI(&cis[i+1]))) {
+ bcm_ether_ntoa((struct ether_addr *)&cis[i + 1],
+ eabuf2);
+ }
+ break;
+ } /* CISTPL_BRCM_HNBU */
+ break;
+ } /* switch (tup) */
+
+ i += tlen;
+ } while (tup != CISTPL_END);
+ }
+
+ if (boardnum != -1) {
+ varbuf_append(&b, vstr_boardnum, boardnum);
+ }
+
+ if (eabuf[0]) {
+ varbuf_append(&b, vstr_macaddr, eabuf);
+ }
+
+ if (eabuf2[0]) {
+ varbuf_append(&b, vstr_macaddr2, eabuf2);
+ }
+
+#ifndef BCM_BOOTLOADER
+ /* if there is no antenna gain field, set default */
+ sromrev = (sromrev == 1u) ? (uint8)getintvar(NULL, rstr_sromrev) : sromrev;
+ if (sromrev <= 10u && getvar(NULL, rstr_ag0) == NULL && ag_init == FALSE) {
+ varbuf_append(&b, vstr_ag, 0, 0xff);
+ }
+#endif
+
+ /* final nullbyte terminator */
+ ASSERT(b.size >= 1u);
+ *b.buf++ = '\0';
+
+ ASSERT((uint)(b.buf - base) <= var_cis_size);
+
+ /* initvars_table() MALLOCs, copies and assigns the MALLOCed buffer to '*vars' */
+ err = initvars_table(osh, base /* start */, b.buf /* end */, vars, count);
+
+ MFREE(osh, base, var_cis_size);
+ return err;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+/**
+ * In chips with chipcommon rev 32 and later, the srom is in chipcommon,
+ * not in the bus cores.
+ */
+static uint16
+srom_cc_cmd(si_t *sih, osl_t *osh, volatile void *ccregs, uint32 cmd, uint wordoff, uint16 data)
+{
+ chipcregs_t *cc = ccregs;
+ uint wait_cnt = 1000;
+ uint32 byteoff = 0, sprom_size = 0;
+
+ BCM_REFERENCE(sih);
+ byteoff = wordoff * 2;
+
+ sprom_size = R_REG(osh, &cc->sromcontrol);
+ sprom_size = (sprom_size & SROM_SIZE_MASK) >> SROM_SIZE_SHFT_MASK;
+ if (sprom_size == SROM_SIZE_2K)
+ sprom_size = 2048;
+ else if (sprom_size == SROM_SIZE_512)
+ sprom_size = 512;
+ else if (sprom_size == SROM_SIZE_128)
+ sprom_size = 128;
+ if (byteoff >= sprom_size)
+ return 0xffff;
+
+ if ((cmd == SRC_OP_READ) || (cmd == SRC_OP_WRITE)) {
+ if (sih->ccrev >= 59)
+ W_REG(osh, &cc->chipcontrol, (byteoff & SROM16K_BANK_SEL_MASK) >>
+ SROM16K_BANK_SHFT_MASK);
+ W_REG(osh, &cc->sromaddress, (byteoff & SROM16K_ADDR_SEL_MASK));
+ if (cmd == SRC_OP_WRITE)
+ W_REG(osh, &cc->sromdata, data);
+ }
+
+ W_REG(osh, &cc->sromcontrol, SRC_START | cmd);
+
+ while (wait_cnt--) {
+ if ((R_REG(osh, &cc->sromcontrol) & SRC_BUSY) == 0)
+ break;
+ }
+
+ if (!wait_cnt) {
+ BS_ERROR(("srom_cc_cmd: Command 0x%x timed out\n", cmd));
+ return 0xffff;
+ }
+ if (cmd == SRC_OP_READ)
+ return (uint16)R_REG(osh, &cc->sromdata);
+ else
+ return 0xffff;
+}
+
+#define CC_SROM_SHADOW_WSIZE 512 /* 0x800 - 0xC00 */
+
+/**
+ * Read in and validate sprom.
+ * Return 0 on success, nonzero on error.
+ * Returns success on an SPROM containing only ones, unclear if this is intended.
+ */
+static int
+sprom_read_pci(osl_t *osh, si_t *sih, volatile uint16 *sprom, uint wordoff,
+ uint16 *buf, uint nwords, bool check_crc)
+{
+ int err = 0;
+ uint i;
+ volatile void *ccregs = NULL;
+ chipcregs_t *cc = NULL;
+ uint32 ccval = 0, sprom_size = 0;
+ uint32 sprom_num_words;
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
+ /* save current control setting */
+ ccval = si_chipcontrl_read(sih);
+ }
+
+ if (BCM43602_CHIP(sih->chip) ||
+ (((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (CHIPREV(sih->chiprev) <= 2))) {
+ si_chipcontrl_srom4360(sih, TRUE);
+ }
+
+ if (FALSE) {
+ si_srom_clk_set(sih); /* corrects srom clock frequency */
+ }
+
+ ccregs = ((volatile uint8 *)sprom - CC_SROM_OTP);
+ cc = ccregs;
+ sprom_size = R_REG(osh, &cc->sromcontrol);
+ sprom_size = (sprom_size & SROM_SIZE_MASK) >> SROM_SIZE_SHFT_MASK;
+ if (sprom_size == SROM_SIZE_2K)
+ sprom_size = 2048;
+ else if (sprom_size == SROM_SIZE_512)
+ sprom_size = 512;
+ else if (sprom_size == SROM_SIZE_128)
+ sprom_size = 128;
+ sprom_num_words = sprom_size/2;
+
+ /* read the sprom */
+ for (i = 0; i < nwords; i++) {
+ if (sih->ccrev > 31 && ISSIM_ENAB(sih)) {
+ /* use indirect since direct is too slow on QT */
+ if ((sih->cccaps & CC_CAP_SROM) == 0) {
+ err = 1;
+ goto error;
+ }
+
+ /* hack to get ccregs */
+ ccregs = (volatile void *)((volatile uint8 *)sprom - CC_SROM_OTP);
+ buf[i] = srom_cc_cmd(sih, osh, ccregs, SRC_OP_READ, wordoff + i, 0);
+
+ } else {
+ /* Because of the slow emulation we need to read twice in QT */
+ if (ISSIM_ENAB(sih)) {
+ buf[i] = R_REG(osh, &sprom[wordoff + i]);
+ }
+
+ if ((wordoff + i) >= sprom_num_words) {
+ buf[i] = 0xffff;
+ } else if ((wordoff + i) >= CC_SROM_SHADOW_WSIZE) {
+ /* Srom shadow region in chipcommon is only 512 words
+ * use indirect access for Srom beyond 512 words
+ */
+ buf[i] = srom_cc_cmd(sih, osh, ccregs, SRC_OP_READ, wordoff + i, 0);
+ } else {
+ buf[i] = R_REG(osh, &sprom[wordoff + i]);
+ }
+ }
+ if (i == SROM13_SIGN) {
+ if ((buf[SROM13_SIGN] != SROM13_SIGNATURE) && (nwords == SROM13_WORDS)) {
+ err = 1;
+ goto error;
+ }
+ }
+ }
+
+ /* bypass crc checking for simulation to allow srom hack */
+ if (ISSIM_ENAB(sih)) {
+ goto error;
+ }
+
+ if (check_crc) {
+
+ if (buf[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ BS_ERROR(("sprom_read_pci: buf[0] = 0x%x, returning bad-crc\n", buf[0]));
+ err = 1;
+ goto error;
+ }
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, nwords * 2);
+ if (hndcrc8((uint8 *)buf, nwords * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE) {
+ /* DBG only pci always read srom4 first, then srom8/9 */
+ /* BS_ERROR(("sprom_read_pci: bad crc\n")); */
+ err = 1;
+ }
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, nwords * 2);
+ }
+
+error:
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ BCM43602_CHIP(sih->chip)) {
+ /* Restore config after reading SROM */
+ si_chipcontrl_restore(sih, ccval);
+ }
+
+ return err;
+}
+
+#if !defined(BCMDONGLEHOST)
+#if defined(BCMNVRAMW) || defined(BCMNVRAMR)
+static int
+BCMSROMATTACHFN(otp_read_pci)(osl_t *osh, si_t *sih, uint16 *buf, uint bufsz)
+{
+ uint8 *otp;
+ uint sz = OTP_SZ_MAX/2; /* size in words */
+ int err = 0;
+
+ if (bufsz > OTP_SZ_MAX) {
+ return BCME_ERROR;
+ }
+
+ /* freed in same function */
+ if ((otp = MALLOC_NOPERSIST(osh, OTP_SZ_MAX)) == NULL) {
+ return BCME_ERROR;
+ }
+
+ bzero(otp, OTP_SZ_MAX);
+
+ err = otp_read_region(sih, OTP_HW_RGN, (uint16 *)otp, &sz);
+
+ if (err) {
+ MFREE(osh, otp, OTP_SZ_MAX);
+ return err;
+ }
+
+ bcopy(otp, buf, bufsz);
+
+ /* Check CRC */
+ if (((uint16 *)otp)[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ BS_ERROR(("otp_read_pci: otp[0] = 0x%x, returning bad-crc\n",
+ ((uint16 *)otp)[0]));
+ MFREE(osh, otp, OTP_SZ_MAX);
+ return 1;
+ }
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(otp, OTP_SZ_MAX);
+ if (hndcrc8(otp, SROM4_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE &&
+ hndcrc8(otp, SROM10_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE &&
+ hndcrc8(otp, SROM11_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE &&
+ hndcrc8(otp, SROM12_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE &&
+ hndcrc8(otp, SROM13_WORDS * 2, CRC8_INIT_VALUE) != CRC8_GOOD_VALUE) {
+ BS_ERROR(("otp_read_pci: bad crc\n"));
+ err = 1;
+ }
+
+ MFREE(osh, otp, OTP_SZ_MAX);
+
+ return err;
+}
+#endif /* defined(BCMNVRAMW) || defined(BCMNVRAMR) */
+#endif /* !defined(BCMDONGLEHOST) */
+
+int
+srom_otp_write_region_crc(si_t *sih, uint nbytes, uint16* buf16, bool write)
+{
+#if defined(WLTEST) || defined(BCMDBG)
+ int err = 0, crc = 0;
+#if !defined(BCMDONGLEHOST)
+ uint8 *buf8;
+
+ /* Check nbytes is not odd or too big */
+ if ((nbytes & 1) || (nbytes > SROM_MAX))
+ return 1;
+
+ /* block invalid buffer size */
+ if (nbytes < SROM4_WORDS * 2)
+ return BCME_BUFTOOSHORT;
+ else if (nbytes > SROM13_WORDS * 2)
+ return BCME_BUFTOOLONG;
+
+ /* Verify signatures */
+ if (!((buf16[SROM4_SIGN] == SROM4_SIGNATURE) ||
+ (buf16[SROM8_SIGN] == SROM4_SIGNATURE) ||
+ (buf16[SROM10_SIGN] == SROM4_SIGNATURE) ||
+ (buf16[SROM11_SIGN] == SROM11_SIGNATURE)||
+ (buf16[SROM12_SIGN] == SROM12_SIGNATURE)||
+ (buf16[SROM13_SIGN] == SROM13_SIGNATURE))) {
+ BS_ERROR(("srom_otp_write_region_crc: wrong signature SROM4_SIGN %x SROM8_SIGN %x"
+ " SROM10_SIGN %x\n",
+ buf16[SROM4_SIGN], buf16[SROM8_SIGN], buf16[SROM10_SIGN]));
+ return BCME_ERROR;
+ }
+
+ /* Check CRC */
+ if (buf16[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ BS_ERROR(("srom_otp_write_region_crc: invalid buf16[0] = 0x%x\n", buf16[0]));
+ goto out;
+ }
+
+ buf8 = (uint8*)buf16;
+ /* fixup the endianness and then calculate crc */
+ htol16_buf(buf8, nbytes);
+ crc = ~hndcrc8(buf8, nbytes - 1, CRC8_INIT_VALUE);
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf8, nbytes);
+
+ if (nbytes == SROM11_WORDS * 2)
+ buf16[SROM11_CRCREV] = (crc << 8) | (buf16[SROM11_CRCREV] & 0xff);
+ else if (nbytes == SROM12_WORDS * 2)
+ buf16[SROM12_CRCREV] = (crc << 8) | (buf16[SROM12_CRCREV] & 0xff);
+ else if (nbytes == SROM13_WORDS * 2)
+ buf16[SROM13_CRCREV] = (crc << 8) | (buf16[SROM13_CRCREV] & 0xff);
+ else if (nbytes == SROM10_WORDS * 2)
+ buf16[SROM10_CRCREV] = (crc << 8) | (buf16[SROM10_CRCREV] & 0xff);
+ else
+ buf16[SROM4_CRCREV] = (crc << 8) | (buf16[SROM4_CRCREV] & 0xff);
+
+#ifdef BCMNVRAMW
+ /* Write the CRC back */
+ if (write)
+ err = otp_write_region(sih, OTP_HW_RGN, buf16, nbytes/2, 0);
+#endif /* BCMNVRAMW */
+
+out:
+#endif /* !defined(BCMDONGLEHOST) */
+ return write ? err : crc;
+#else
+ BCM_REFERENCE(sih);
+ BCM_REFERENCE(nbytes);
+ BCM_REFERENCE(buf16);
+ BCM_REFERENCE(write);
+ return 0;
+#endif /* WLTEST || BCMDBG */
+
+}
+
+#if !defined(BCMDONGLEHOST)
+int
+BCMATTACHFN(dbushost_initvars_flash)(si_t *sih, osl_t *osh, char **base, uint len)
+{
+ return initvars_flash(sih, osh, base, len);
+}
+
+/**
+ * Find variables with <devpath> from flash. 'base' points to the beginning
+ * of the table upon enter and to the end of the table upon exit when success.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_flash)(si_t *sih, osl_t *osh, char **base, uint len)
+{
+ char *vp = *base;
+ char *flash;
+ int err;
+ char *s;
+ uint l, dl, copy_len;
+ char devpath[SI_DEVPATH_BUFSZ], devpath_pcie[SI_DEVPATH_BUFSZ];
+ char coded_name[SI_DEVPATH_BUFSZ] = {0};
+ int path_len, coded_len, devid_len, pcie_path_len;
+
+ /* allocate memory and read in flash */
+ /* freed in same function */
+ if (!(flash = MALLOC_NOPERSIST(osh, MAX_NVRAM_SPACE)))
+ return BCME_NOMEM;
+ if ((err = nvram_getall(flash, MAX_NVRAM_SPACE)))
+ goto exit;
+
+ /* create legacy devpath prefix */
+ si_devpath(sih, devpath, sizeof(devpath));
+ path_len = strlen(devpath);
+
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ si_devpath_pcie(sih, devpath_pcie, sizeof(devpath_pcie));
+ pcie_path_len = strlen(devpath_pcie);
+ } else
+ pcie_path_len = 0;
+
+ /* create coded devpath prefix */
+ si_coded_devpathvar(sih, coded_name, sizeof(coded_name), "devid");
+
+ /* coded_name now is 'xx:devid, eat ending 'devid' */
+ /* to be 'xx:' */
+ devid_len = strlen("devid");
+ coded_len = strlen(coded_name);
+ if (coded_len > devid_len) {
+ coded_name[coded_len - devid_len] = '\0';
+ coded_len -= devid_len;
+ }
+ else
+ coded_len = 0;
+
+ /* grab vars with the <devpath> prefix or <coded_name> previx in name */
+ for (s = flash; s && *s; s += l + 1) {
+ l = strlen(s);
+
+ /* skip non-matching variable */
+ if (strncmp(s, devpath, path_len) == 0)
+ dl = path_len;
+ else if (pcie_path_len && strncmp(s, devpath_pcie, pcie_path_len) == 0)
+ dl = pcie_path_len;
+ else if (coded_len && strncmp(s, coded_name, coded_len) == 0)
+ dl = coded_len;
+ else
+ continue;
+
+ /* is there enough room to copy? */
+ copy_len = l - dl + 1;
+ if (len < copy_len) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+
+ /* no prefix, just the name=value */
+ strlcpy(vp, &s[dl], copy_len);
+ vp += copy_len;
+ len -= copy_len;
+ }
+
+ /* add null string as terminator */
+ if (len < 1) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ *vp++ = '\0';
+
+ *base = vp;
+
+exit:
+ MFREE(osh, flash, MAX_NVRAM_SPACE);
+ return err;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if !defined(BCMUSBDEV_ENABLED) && !defined(BCMSDIODEV_ENABLED) && \
+ !defined(BCMPCIEDEV_ENABLED)
+#if !defined(BCMDONGLEHOST)
+/**
+ * Initialize nonvolatile variable table from flash.
+ * Return 0 on success, nonzero on error.
+ */
+/* no needs to load the nvram variables from the flash for dongles.
+ * These variables are mainly for supporting SROM-less devices although
+ * we can use the same machenism to support configuration of multiple
+ * cores of the same type.
+ */
+static int
+BCMATTACHFN(initvars_flash_si)(si_t *sih, char **vars, uint *count)
+{
+ osl_t *osh = si_osh(sih);
+ char *vp, *base;
+ int err;
+
+ ASSERT(vars != NULL);
+ ASSERT(count != NULL);
+
+ /* freed in same function */
+ base = vp = MALLOC_NOPERSIST(osh, MAXSZ_NVRAM_VARS);
+ ASSERT(vp != NULL);
+ if (!vp)
+ return BCME_NOMEM;
+
+ if ((err = initvars_flash(sih, osh, &vp, MAXSZ_NVRAM_VARS)) == 0)
+ err = initvars_table(osh, base, vp, vars, count);
+
+ MFREE(osh, base, MAXSZ_NVRAM_VARS);
+
+ return err;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+#endif /* !BCMUSBDEV && !BCMSDIODEV */
+
+#if !defined(BCMDONGLEHOST)
+
+/** returns position of rightmost bit that was set in caller supplied mask */
+static uint
+mask_shift(uint16 mask)
+{
+ uint i;
+ for (i = 0; i < (sizeof(mask) << 3); i ++) {
+ if (mask & (1 << i))
+ return i;
+ }
+ ASSERT(mask);
+ return 0;
+}
+
+static uint
+mask_width(uint16 mask)
+{
+ int i;
+ for (i = (sizeof(mask) << 3) - 1; i >= 0; i --) {
+ if (mask & (1 << i))
+ return (uint)(i - mask_shift(mask) + 1);
+ }
+ ASSERT(mask);
+ return 0;
+}
+
+#ifdef BCMASSERT_SUPPORT
+static bool
+mask_valid(uint16 mask)
+{
+ uint shift = mask_shift(mask);
+ uint width = mask_width(mask);
+ return mask == ((~0 << shift) & ~(~0 << (shift + width)));
+}
+#endif
+#ifdef NVSRCX
+void
+srom_set_sromvars(char *vars)
+{
+ if (sromh)
+ sromh->_srom_vars = vars;
+}
+char *
+srom_get_sromvars()
+{
+ if (sromh)
+ return sromh->_srom_vars;
+ else
+ return NULL;
+}
+
+srom_info_t *
+srom_info_init(osl_t *osh)
+{
+ sromh = (srom_info_t *) MALLOC_NOPERSIST(osh, sizeof(srom_info_t));
+ if (!sromh)
+ return NULL;
+ sromh->_srom_vars = NULL;
+ sromh->is_caldata_prsnt = FALSE;
+ return sromh;
+}
+#endif /* NVSRCX */
+/**
+ * Parses caller supplied SROM contents into name=value pairs. Global array pci_sromvars[] contains
+ * the link between a word offset in SROM and the corresponding NVRAM variable name.'srom' points to
+ * the SROM word array. 'off' specifies the offset of the first word 'srom' points to, which should
+ * be either 0 or SROM3_SWRG_OFF (full SROM or software region).
+ */
+static void
+BCMATTACHFN(_initvars_srom_pci)(uint8 sromrev, uint16 *srom, uint off, varbuf_t *b)
+{
+ uint16 w;
+ uint32 val;
+ const sromvar_t *srv;
+ uint width;
+ uint flags;
+ uint32 sr = (1 << sromrev);
+ bool in_array = FALSE;
+ static char array_temp[256];
+ uint array_curr = 0;
+ const char* array_name = NULL;
+
+ varbuf_append(b, "sromrev=%d", sromrev);
+#if !defined(SROM15_MEMOPT) && !defined(SROM17_MEMOPT)
+ if (sromrev == 15) {
+ srv = pci_srom15vars;
+ } else if (sromrev == 16) {
+ srv = pci_srom16vars;
+ } else if (sromrev == 17) {
+ srv = pci_srom17vars;
+ } else if (sromrev == 18) {
+ srv = pci_srom18vars;
+ } else {
+ srv = pci_sromvars;
+ }
+#else
+#if defined(SROM15_MEMOPT)
+ srv = pci_srom15vars;
+#endif /* defined(SROM15_MEMOPT) */
+#if defined(SROM17_MEMOPT)
+ srv = pci_srom17vars;
+#endif /* defined(SROM17_MEMOPT) */
+#endif /* !defined(SROM15_MEMOPT) && !defined(SROM17_MEMOPT) */
+
+ for (; srv->name != NULL; srv ++) {
+ const char *name;
+ static bool in_array2 = FALSE;
+ static char array_temp2[256];
+ static uint array_curr2 = 0;
+ static const char* array_name2 = NULL;
+
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (srv->off < off)
+ continue;
+
+ flags = srv->flags;
+ name = srv->name;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (flags & SRFL_NOVAR)
+ continue;
+
+ if (flags & SRFL_ETHADDR) {
+ char eabuf[ETHER_ADDR_STR_LEN];
+ struct ether_addr ea;
+
+ ea.octet[0] = (srom[srv->off - off] >> 8) & 0xff;
+ ea.octet[1] = srom[srv->off - off] & 0xff;
+ ea.octet[2] = (srom[srv->off + 1 - off] >> 8) & 0xff;
+ ea.octet[3] = srom[srv->off + 1 - off] & 0xff;
+ ea.octet[4] = (srom[srv->off + 2 - off] >> 8) & 0xff;
+ ea.octet[5] = srom[srv->off + 2 - off] & 0xff;
+ bcm_ether_ntoa(&ea, eabuf);
+
+ varbuf_append(b, "%s=%s", name, eabuf);
+ } else {
+ ASSERT(mask_valid(srv->mask));
+ ASSERT(mask_width(srv->mask));
+
+ /* Start of an array */
+ if (sromrev >= 10 && (srv->flags & SRFL_ARRAY) && !in_array2) {
+ array_curr2 = 0;
+ array_name2 = (const char*)srv->name;
+ bzero((void*)array_temp2, sizeof(array_temp2));
+ in_array2 = TRUE;
+ }
+
+ w = srom[srv->off - off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ while (srv->flags & SRFL_MORE) {
+ srv ++;
+ ASSERT(srv->name != NULL);
+
+ if (srv->off == 0 || srv->off < off)
+ continue;
+
+ ASSERT(mask_valid(srv->mask));
+ ASSERT(mask_width(srv->mask));
+
+ w = srom[srv->off - off];
+ val += ((w & srv->mask) >> mask_shift(srv->mask)) << width;
+ width += mask_width(srv->mask);
+ }
+
+ if ((flags & SRFL_NOFFS) && ((int)val == (1 << width) - 1))
+ continue;
+
+ /* Array support starts in sromrev 10. Skip arrays for sromrev <= 9 */
+ if (sromrev <= 9 && srv->flags & SRFL_ARRAY) {
+ while (srv->flags & SRFL_ARRAY)
+ srv ++;
+ srv ++;
+ }
+
+ if (in_array2) {
+ int ret;
+
+ if (flags & SRFL_PRHEX) {
+ ret = snprintf(array_temp2 + array_curr2,
+ sizeof(array_temp2) - array_curr2, "0x%x,", val);
+ } else if ((flags & SRFL_PRSIGN) &&
+ (val & (1 << (width - 1)))) {
+ ret = snprintf(array_temp2 + array_curr2,
+ sizeof(array_temp2) - array_curr2, "%d,",
+ (int)(val | (~0 << width)));
+ } else {
+ ret = snprintf(array_temp2 + array_curr2,
+ sizeof(array_temp2) - array_curr2, "%u,", val);
+ }
+
+ if (ret > 0) {
+ array_curr2 += ret;
+ } else {
+ BS_ERROR(("_initvars_srom_pci: array %s parsing error."
+ " buffer too short.\n",
+ array_name2));
+ ASSERT(0);
+
+ /* buffer too small, skip this param */
+ while (srv->flags & SRFL_ARRAY)
+ srv ++;
+ srv ++;
+ in_array2 = FALSE;
+ continue;
+ }
+
+ if (!(srv->flags & SRFL_ARRAY)) { /* Array ends */
+ /* Remove the last ',' */
+ array_temp2[array_curr2-1] = '\0';
+ in_array2 = FALSE;
+ varbuf_append(b, "%s=%s", array_name2, array_temp2);
+ }
+ } else if (flags & SRFL_CCODE) {
+ if (val == 0)
+ varbuf_append(b, "ccode=");
+ else
+ varbuf_append(b, "ccode=%c%c", (val >> 8), (val & 0xff));
+ } else if (flags & SRFL_PRHEX) {
+ varbuf_append(b, "%s=0x%x", name, val);
+ } else if ((flags & SRFL_PRSIGN) && (val & (1 << (width - 1)))) {
+ varbuf_append(b, "%s=%d", name, (int)(val | (~0 << width)));
+ } else {
+ varbuf_append(b, "%s=%u", name, val);
+ }
+ }
+ }
+
+ if ((sromrev >= 4) && (sromrev != 16) && (sromrev != 18)) {
+ /* Do per-path variables */
+ uint p, pb, psz, path_num;
+
+ if ((sromrev == 17) || (sromrev == 15)) {
+ pb = psz = 0;
+ path_num = 0;
+ if (sromh)
+ sromh->is_caldata_prsnt = TRUE;
+ } else if (sromrev >= 13) {
+ pb = SROM13_PATH0;
+ psz = SROM13_PATH1 - SROM13_PATH0;
+ path_num = MAX_PATH_SROM_13;
+ } else if (sromrev >= 12) {
+ pb = SROM12_PATH0;
+ psz = SROM12_PATH1 - SROM12_PATH0;
+ path_num = MAX_PATH_SROM_12;
+ } else if (sromrev >= 11) {
+ pb = SROM11_PATH0;
+ psz = SROM11_PATH1 - SROM11_PATH0;
+ path_num = MAX_PATH_SROM_11;
+ } else if (sromrev >= 8) {
+ pb = SROM8_PATH0;
+ psz = SROM8_PATH1 - SROM8_PATH0;
+ path_num = MAX_PATH_SROM;
+ } else {
+ pb = SROM4_PATH0;
+ psz = SROM4_PATH1 - SROM4_PATH0;
+ path_num = MAX_PATH_SROM;
+ }
+
+ for (p = 0; p < path_num; p++) {
+ for (srv = perpath_pci_sromvars; srv->name != NULL; srv ++) {
+
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (pb + srv->off < off)
+ continue;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (srv->flags & SRFL_NOVAR)
+ continue;
+
+ /* Start of an array */
+ if (sromrev >= 10 && (srv->flags & SRFL_ARRAY) && !in_array) {
+ array_curr = 0;
+ array_name = (const char*)srv->name;
+ bzero((void*)array_temp, sizeof(array_temp));
+ in_array = TRUE;
+ }
+
+ w = srom[pb + srv->off - off];
+
+ ASSERT(mask_valid(srv->mask));
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ flags = srv->flags;
+
+ /* Cheating: no per-path var is more than 1 word */
+
+ if ((srv->flags & SRFL_NOFFS) && ((int)val == (1 << width) - 1))
+ continue;
+
+ if (in_array) {
+ int ret;
+
+ if (flags & SRFL_PRHEX) {
+ ret = snprintf(array_temp + array_curr,
+ sizeof(array_temp) - array_curr, "0x%x,", val);
+ } else if ((flags & SRFL_PRSIGN) &&
+ (val & (1 << (width - 1)))) {
+ ret = snprintf(array_temp + array_curr,
+ sizeof(array_temp) - array_curr, "%d,",
+ (int)(val | (~0 << width)));
+ } else {
+ ret = snprintf(array_temp + array_curr,
+ sizeof(array_temp) - array_curr, "%u,", val);
+ }
+
+ if (ret > 0) {
+ array_curr += ret;
+ } else {
+ BS_ERROR(
+ ("_initvars_srom_pci: array %s parsing error."
+ " buffer too short.\n",
+ array_name));
+ ASSERT(0);
+
+ /* buffer too small, skip this param */
+ while (srv->flags & SRFL_ARRAY)
+ srv ++;
+ srv ++;
+ in_array = FALSE;
+ continue;
+ }
+
+ if (!(srv->flags & SRFL_ARRAY)) { /* Array ends */
+ /* Remove the last ',' */
+ array_temp[array_curr-1] = '\0';
+ in_array = FALSE;
+ varbuf_append(b, "%s%d=%s",
+ array_name, p, array_temp);
+ }
+ } else if (srv->flags & SRFL_PRHEX)
+ varbuf_append(b, "%s%d=0x%x", srv->name, p, val);
+ else
+ varbuf_append(b, "%s%d=%d", srv->name, p, val);
+ }
+ if (sromrev >= 13 && (p == (MAX_PATH_SROM_13 - 2))) {
+ psz = SROM13_PATH3 - SROM13_PATH2;
+ }
+ pb += psz;
+ }
+ } /* per path variables */
+} /* _initvars_srom_pci */
+
+int
+BCMATTACHFN(get_srom_pci_caldata_size)(uint32 sromrev)
+{
+ uint32 caldata_size;
+
+ switch (sromrev) {
+ case 15:
+ caldata_size = (SROM15_CALDATA_WORDS * 2);
+ break;
+ case 17:
+ caldata_size = (SROM17_CALDATA_WORDS * 2);
+ break;
+ default:
+ caldata_size = 0;
+ break;
+ }
+ return caldata_size;
+}
+
+uint32
+BCMATTACHFN(get_srom_size)(uint32 sromrev)
+{
+ uint32 size;
+
+ switch (sromrev) {
+ case 15:
+ size = (SROM15_WORDS * 2);
+ break;
+ case 17:
+ size = (SROM17_WORDS * 2);
+ break;
+ default:
+ size = 0;
+ break;
+ }
+ return size;
+}
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+
+int
+BCMATTACHFN(_initvars_srom_pci_caldata)(si_t *sih, uint16 *srom, uint32 sromrev)
+{
+ int err = BCME_ERROR;
+
+ if (sromh && (!sromh->is_caldata_prsnt)) {
+ return err;
+ }
+
+ if (si_is_sprom_available(sih)) {
+ uint32 caldata_size;
+
+ caldata_size = get_srom_pci_caldata_size(sromrev);
+ memcpy(srom, caldata_array, caldata_size);
+ err = BCME_OK;
+ }
+ return err;
+}
+#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */
+/**
+ * Initialize nonvolatile variable table from sprom, or OTP when SPROM is not available, or
+ * optionally a set of 'defaultsromvars' (compiled-in) variables when both OTP and SPROM bear no
+ * contents.
+ *
+ * On success, a buffer containing var/val pairs is allocated and returned in params vars and count.
+ *
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_srom_pci)(si_t *sih, volatile void *curmap, char **vars, uint *count)
+{
+ uint16 *srom;
+ volatile uint16 *sromwindow;
+ uint8 sromrev = 0;
+ uint32 sr;
+ varbuf_t b;
+ char *vp, *base = NULL;
+ osl_t *osh = si_osh(sih);
+ bool flash = FALSE;
+ int err = 0;
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+ uint16 cal_wordoffset;
+#endif
+
+ /*
+ * Apply CRC over SROM content regardless SROM is present or not, and use variable
+ * <devpath>sromrev's existance in flash to decide if we should return an error when CRC
+ * fails or read SROM variables from flash.
+ */
+
+ /* freed in same function */
+ srom = MALLOC_NOPERSIST(osh, SROM_MAX);
+ ASSERT(srom != NULL);
+ if (!srom)
+ return -2;
+
+ sromwindow = (volatile uint16 *)srom_offset(sih, curmap);
+ if (si_is_sprom_available(sih)) {
+ err = sprom_read_pci(osh, sih, sromwindow, 0, srom, SROM_SIGN_MINWORDS + 1, FALSE);
+ if (err == 0) {
+ if (srom[SROM18_SIGN] == SROM18_SIGNATURE) {
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM18_WORDS, TRUE);
+ sromrev = srom[SROM18_CRCREV] & 0xff;
+ } else if (srom[SROM17_SIGN] == SROM17_SIGNATURE) {
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM17_WORDS, TRUE);
+ sromrev = srom[SROM17_CRCREV] & 0xff;
+ } else if (srom[SROM16_SIGN] == SROM16_SIGNATURE) {
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM16_WORDS, TRUE);
+ sromrev = srom[SROM16_CRCREV] & 0xff;
+ } else if (srom[SROM15_SIGN] == SROM15_SIGNATURE) { /* srom 15 */
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM15_WORDS, TRUE);
+ sromrev = srom[SROM15_CRCREV] & 0xff;
+ } else if (srom[SROM11_SIGN] == SROM13_SIGNATURE) {
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM13_WORDS, TRUE);
+ sromrev = srom[SROM13_CRCREV] & 0xff;
+ } else if (srom[SROM11_SIGN] == SROM12_SIGNATURE) {
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM12_WORDS, TRUE);
+ sromrev = srom[SROM12_CRCREV] & 0xff;
+ } else if (srom[SROM11_SIGN] == SROM11_SIGNATURE) {
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM11_WORDS, TRUE);
+ sromrev = srom[SROM11_CRCREV] & 0xff;
+ } else if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) || /* srom 4 */
+ (srom[SROM8_SIGN] == SROM4_SIGNATURE)) { /* srom 8,9 */
+ err = sprom_read_pci(osh, sih, sromwindow,
+ 0, srom, SROM4_WORDS, TRUE);
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ } else {
+ err = sprom_read_pci(osh, sih, sromwindow, 0,
+ srom, SROM_WORDS, TRUE);
+ if (err == 0) {
+ /* srom is good and is rev < 4 */
+ /* top word of sprom contains version and crc8 */
+ sromrev = srom[SROM_CRCREV] & 0xff;
+ /* bcm4401 sroms misprogrammed */
+ if (sromrev == 0x10)
+ sromrev = 1;
+ }
+ }
+ if (err)
+ BS_ERROR(("srom read failed\n"));
+ }
+ else
+ BS_ERROR(("srom read failed\n"));
+ }
+
+#if defined(BCMNVRAMW) || defined(BCMNVRAMR)
+ /* Use OTP if SPROM not available */
+ else if ((err = otp_read_pci(osh, sih, srom, SROM_MAX)) == 0) {
+ /* OTP only contain SROM rev8/rev9/rev10/Rev11 for now */
+
+ if (srom[SROM13_SIGN] == SROM13_SIGNATURE)
+ sromrev = srom[SROM13_CRCREV] & 0xff;
+ else if (srom[SROM12_SIGN] == SROM12_SIGNATURE)
+ sromrev = srom[SROM12_CRCREV] & 0xff;
+ else if (srom[SROM11_SIGN] == SROM11_SIGNATURE)
+ sromrev = srom[SROM11_CRCREV] & 0xff;
+ else if (srom[SROM10_SIGN] == SROM10_SIGNATURE)
+ sromrev = srom[SROM10_CRCREV] & 0xff;
+ else
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ }
+#endif /* defined(BCMNVRAMW) || defined(BCMNVRAMR) */
+ else {
+ err = 1;
+ BS_ERROR(("Neither SPROM nor OTP has valid image\n"));
+ }
+
+ BS_ERROR(("srom rev:%d\n", sromrev));
+
+ /* We want internal/wltest driver to come up with default sromvars so we can
+ * program a blank SPROM/OTP.
+ */
+ if (err || sromrev == 0) {
+ char *value;
+#if defined(BCMHOSTVARS)
+ uint32 val;
+#endif
+
+ if ((value = si_getdevpathvar(sih, "sromrev"))) {
+ sromrev = (uint8)bcm_strtoul(value, NULL, 0);
+ flash = TRUE;
+ goto varscont;
+ }
+
+ BS_ERROR(("initvars_srom_pci, SROM CRC Error\n"));
+
+#if !defined(DONGLEBUILD) || defined(BCMPCIEDEV_SROM_FORMAT)
+ /* NIC build or PCIe FD using SROM format shouldn't load driver
+ * default when external nvram exists.
+ */
+ if ((value = getvar(NULL, "sromrev"))) {
+ BS_ERROR(("initvars_srom_pci, Using external nvram\n"));
+ err = 0;
+ goto errout;
+ }
+#endif /* !DONGLEBUILD || BCMPCIEDEV_SROM_FORMAT */
+
+#if defined(BCMHOSTVARS)
+ /*
+ * CRC failed on srom, so if the device is using OTP
+ * and if OTP is not programmed use the default variables.
+ * for 4311 A1 there is no signature to indicate that OTP is
+ * programmed, so can't really verify the OTP is unprogrammed
+ * or a bad OTP.
+ */
+ val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
+ if ((si_is_sprom_available(sih) && srom[0] == 0xffff) ||
+#ifdef BCMQT
+ (si_is_sprom_available(sih) && sromrev == 0) ||
+#endif
+ (val & SPROM_OTPIN_USE)) {
+ vp = base = mfgsromvars;
+
+ /* For windows internal/wltest driver, a .nvm file with default
+ * nvram parameters is downloaded from the file system (in src/wl/sys:
+ * wl_readconfigdata()).
+ * Only when we cannot download default vars from the file system, use
+ * defaultsromvars_wltest as default
+ */
+ if (defvarslen == 0) {
+ BS_ERROR(("No nvm file, use generic default (for programming"
+ " SPROM/OTP only)\n"));
+
+ if (BCM43602_CHIP(sih->chip)) {
+ defvarslen = srom_vars_len(defaultsromvars_43602);
+ bcopy(defaultsromvars_43602, vp, defvarslen);
+ } else if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
+ defvarslen = srom_vars_len(defaultsromvars_4360);
+ bcopy(defaultsromvars_4360, vp, defvarslen);
+ } else if (BCM4378_CHIP(sih->chip)) {
+ defvarslen = srom_vars_len(defaultsromvars_4378);
+ bcopy(defaultsromvars_4378, vp, defvarslen);
+ } else if (BCM4387_CHIP(sih->chip)) {
+ defvarslen = srom_vars_len(defaultsromvars_4387);
+ bcopy(defaultsromvars_4387, vp, defvarslen);
+ } else {
+ defvarslen = srom_vars_len(defaultsromvars_wltest);
+ bcopy(defaultsromvars_wltest, vp, defvarslen);
+ }
+ } else {
+ BS_ERROR(("Use nvm file as default\n"));
+ }
+
+ vp += defvarslen;
+ /* add final null terminator */
+ *vp++ = '\0';
+
+ BS_ERROR(("Used %d bytes of defaultsromvars\n", defvarslen));
+ goto varsdone;
+
+ } else if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ BCM43602_CHIP(sih->chip)) {
+
+ base = vp = mfgsromvars;
+
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ BCM43602_CHIP(sih->chip))
+ BS_ERROR(("4360 BOOT w/o SPROM or OTP\n"));
+ else
+ BS_ERROR(("BOOT w/o SPROM or OTP\n"));
+
+ if (defvarslen == 0) {
+ if (BCM43602_CHIP(sih->chip)) {
+ defvarslen = srom_vars_len(defaultsromvars_43602);
+ bcopy(defaultsromvars_43602, vp, defvarslen);
+ } else if ((sih->chip == BCM4360_CHIP_ID) ||
+ (sih->chip == BCM4352_CHIP_ID)) {
+ defvarslen = srom_vars_len(defaultsromvars_4360);
+ bcopy(defaultsromvars_4360, vp, defvarslen);
+ } else {
+ defvarslen = srom_vars_len(defaultsromvars_4331);
+ bcopy(defaultsromvars_4331, vp, defvarslen);
+ }
+ }
+ vp += defvarslen;
+ *vp++ = '\0';
+ goto varsdone;
+ } else
+#endif /* defined(BCMHOSTVARS) */
+ {
+ err = -1;
+ goto errout;
+ }
+ }
+#if defined(BCM_ONE_NVRAM_SRC)
+ /* Discard hostvars if SROM parsing is successful, so only one nvram source
+ * will be used.
+ * Routers use combined srom/host nvram so shouldn't define BCM_ONE_NVRAM_SRC.
+ */
+ else {
+ nvram_exit((void *)sih); /* free up global vars */
+ }
+#endif /* BCM_ONE_NVRAM_SRC */
+
+varscont:
+ /* Bitmask for the sromrev */
+ sr = 1 << sromrev;
+
+ /* srom version check: Current valid versions are:
+ * 1-5, 8-11, 12, 13, 15, 16, 17, 18 SROM_MAXREV
+ * This is a bit mask of all valid SROM versions.
+ */
+ if ((sr & 0x7bf3e) == 0) {
+ BS_ERROR(("Invalid SROM rev %d\n", sromrev));
+ err = -2;
+ goto errout;
+ }
+
+ ASSERT(vars != NULL);
+ ASSERT(count != NULL);
+
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+ srom_sromrev = sromrev;
+#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */
+
+ /* freed in same function */
+ base = vp = MALLOC_NOPERSIST(osh, MAXSZ_NVRAM_VARS);
+ ASSERT(vp != NULL);
+ if (!vp) {
+ err = -2;
+ goto errout;
+ }
+
+ /* read variables from flash */
+ if (flash) {
+ if ((err = initvars_flash(sih, osh, &vp, MAXSZ_NVRAM_VARS)))
+ goto errout;
+ goto varsdone;
+ }
+
+ varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
+
+ /* parse SROM into name=value pairs. */
+ _initvars_srom_pci(sromrev, srom, 0, &b);
+
+ /* final nullbyte terminator */
+ ASSERT(b.size >= 1);
+ vp = b.buf;
+ *vp++ = '\0';
+
+ ASSERT((vp - base) <= MAXSZ_NVRAM_VARS);
+
+varsdone:
+ err = initvars_table(osh, base, vp, vars, count); /* allocates buffer in 'vars' */
+
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+ if (sromrev == 18) {
+ int caldata_wordoffset = srom[SROM18_CALDATA_OFFSET_LOC] / 2;
+
+ if ((caldata_wordoffset != 0) &&
+ (caldata_wordoffset + SROM_CALDATA_WORDS < SROM18_WORDS)) {
+ memcpy(caldata_array, srom + caldata_wordoffset, SROM18_CALDATA_WORDS * 2);
+ is_caldata_prsnt = TRUE;
+ }
+ } else if (sromrev == 16) {
+ int caldata_wordoffset = srom[SROM16_CALDATA_OFFSET_LOC] / 2;
+
+ if ((caldata_wordoffset != 0) &&
+ (caldata_wordoffset + SROM_CALDATA_WORDS < SROM16_WORDS)) {
+ memcpy(caldata_array, srom + caldata_wordoffset, SROM_CALDATA_WORDS * 2);
+ is_caldata_prsnt = TRUE;
+ }
+ }
+#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */
+
+#ifdef NVSRCX
+ if (sromrev != 0)
+ nvram_append((void *)sih, *vars, *count, VARBUF_PRIO_SROM);
+#endif
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+ if ((sromrev == 15) || (sromrev == 17)) {
+ uint32 caldata_size = get_srom_pci_caldata_size(sromrev);
+
+ cal_wordoffset = getintvar(NULL, "caldata_offset")/2;
+ memcpy(caldata_array, srom + cal_wordoffset, caldata_size);
+ }
+#endif
+errout:
+#if defined(BCMHOSTVARS)
+ if (base && (base != mfgsromvars))
+#else
+ if (base)
+#endif /* defined(BCMHOSTVARS) */
+ MFREE(osh, base, MAXSZ_NVRAM_VARS);
+
+ MFREE(osh, srom, SROM_MAX);
+ return err;
+}
+
+/**
+ * initvars_cis_pci() parses OTP CIS. This is specifically for PCIe full dongle that has SROM
+ * header plus CIS tuples programmed in OTP.
+ * Return error if the content is not in CIS format or OTP is not present.
+ */
+static int
+BCMATTACHFN(initvars_cis_pci)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *count)
+{
+ uint wsz = 0, sz = 0, base_len = 0;
+ void *oh = NULL;
+ int rc = BCME_OK;
+ uint16 *cisbuf = NULL;
+ uint8 *cis = NULL;
+#if defined (BCMHOSTVARS)
+ char *vp = NULL;
+#endif /* BCMHOSTVARS */
+ char *base = NULL;
+ bool wasup;
+ uint32 min_res_mask = 0;
+ BCM_REFERENCE(curmap);
+
+ /* Bail out if we've dealt with OTP/SPROM before! */
+ if (srvars_inited)
+ goto exit;
+
+ /* Turn on OTP if it's not already on */
+ if (!(wasup = si_is_otp_powered(sih)))
+ si_otp_power(sih, TRUE, &min_res_mask);
+
+ if (si_cis_source(sih) != CIS_OTP)
+ rc = BCME_NOTFOUND;
+ else if ((oh = otp_init(sih)) == NULL)
+ rc = BCME_ERROR;
+ else if (!(((BUSCORETYPE(sih->buscoretype) == PCIE2_CORE_ID) || otp_newcis(oh)) &&
+ (otp_status(oh) & OTPS_GUP_HW))) {
+ /* OTP bit CIS format (507) not used by pcie core - only needed for sdio core */
+ rc = BCME_NOTFOUND;
+ } else if ((sz = otp_size(oh)) != 0) {
+ if ((cisbuf = (uint16*)MALLOC_NOPERSIST(osh, sz))) {
+ /* otp_size() returns bytes, not words. */
+ wsz = sz >> 1;
+ /* for 4389b0 (CCREV-70) sw region is before the hw region */
+ if (CCREV(sih->ccrev) == 70) {
+ rc = otp_read_region(sih, OTP_SW_RGN, cisbuf, &wsz);
+ cis = (uint8*)cisbuf;
+ } else {
+ rc = otp_read_region(sih, OTP_HW_RGN, cisbuf, &wsz);
+ /* Bypass the HW header and signature */
+ cis = (uint8*)(cisbuf + (otp_pcie_hwhdr_sz(sih) / 2));
+ }
+ BS_ERROR(("initvars_cis_pci: Parsing CIS in OTP.\n"));
+ } else
+ rc = BCME_NOMEM;
+ }
+
+ /* Restore original OTP state */
+ if (!wasup)
+ si_otp_power(sih, FALSE, &min_res_mask);
+
+ if (rc != BCME_OK) {
+ BS_ERROR(("initvars_cis_pci: Not CIS format\n"));
+ goto exit;
+ }
+
+#if defined (BCMHOSTVARS)
+ if (defvarslen) {
+ vp = mfgsromvars;
+ vp += defvarslen;
+
+ /* allocates buffer in 'vars' */
+ rc = initvars_table(osh, mfgsromvars, vp, &base, &base_len);
+ if (rc)
+ goto exit;
+
+ *vars = base;
+ *count = base_len;
+
+ BS_ERROR(("initvars_cis_pci external nvram %d bytes\n", defvarslen));
+ }
+
+#endif /* BCMHOSTVARS */
+
+ /* Parse the CIS and allocate a(nother) buffer in 'vars' */
+ rc = srom_parsecis(sih, osh, &cis, SROM_CIS_SINGLE, vars, count);
+
+ srvars_inited = TRUE;
+exit:
+ /* Clean up */
+ if (base)
+ MFREE(osh, base, base_len);
+ if (cisbuf)
+ MFREE(osh, cisbuf, sz);
+
+ /* return OK so the driver will load & use defaults if bad srom/otp */
+ return rc;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+#ifdef BCMSDIO
+#if !defined(BCMDONGLEHOST)
+/**
+ * Read the SDIO cis and call parsecis to allocate and initialize the NVRAM vars buffer.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_cis_sdio)(si_t *sih, osl_t *osh, char **vars, uint *count)
+{
+ uint8 *cis[SBSDIO_NUM_FUNCTION + 1];
+ uint fn, numfn;
+ int rc = 0;
+
+ /* Using MALLOC here causes the Windows driver to crash Needs Investigating */
+#ifdef NDIS
+ uint8 cisd[SBSDIO_NUM_FUNCTION + 1][SBSDIO_CIS_SIZE_LIMIT];
+#endif
+
+ numfn = bcmsdh_query_iofnum(NULL);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ for (fn = 0; fn <= numfn; fn++) {
+#ifdef NDIS
+ cis[fn] = (uint8*)cisd[fn];
+#else
+ /* freed in same function */
+ if ((cis[fn] = MALLOC_NOPERSIST(osh, SBSDIO_CIS_SIZE_LIMIT)) == NULL) {
+ rc = -1;
+ break;
+ }
+#endif /* NDIS */
+
+ bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ if (bcmsdh_cis_read(NULL, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT) != 0) {
+#ifdef NDIS
+ /* nothing to do */
+#else
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+#endif
+ rc = -2;
+ break;
+ }
+ }
+
+ if (!rc)
+ rc = srom_parsecis(sih, osh, cis, fn, vars, count);
+
+#ifdef NDIS
+ /* nothing to do here */
+#else
+ while (fn-- > 0)
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+#endif
+
+ return (rc);
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+/** set SDIO sprom command register */
+static int
+BCMATTACHFN(sprom_cmd_sdio)(osl_t *osh, uint8 cmd)
+{
+ uint8 status = 0;
+ uint wait_cnt = 1000;
+
+ /* write sprom command register */
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_CS, cmd, NULL);
+
+ /* wait status */
+ while (wait_cnt--) {
+ status = bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_CS, NULL);
+ if (status & SBSDIO_SPROM_DONE)
+ return 0;
+ }
+
+ return 1;
+}
+
+/** read a word from the SDIO srom */
+static int
+sprom_read_sdio(osl_t *osh, uint16 addr, uint16 *data)
+{
+ uint8 addr_l, addr_h, data_l, data_h;
+
+ addr_l = (uint8)((addr * 2) & 0xff);
+ addr_h = (uint8)(((addr * 2) >> 8) & 0xff);
+
+ /* set address */
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_HIGH, addr_h, NULL);
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_LOW, addr_l, NULL);
+
+ /* do read */
+ if (sprom_cmd_sdio(osh, SBSDIO_SPROM_READ))
+ return 1;
+
+ /* read data */
+ data_h = bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_HIGH, NULL);
+ data_l = bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_LOW, NULL);
+
+ *data = (data_h << 8) | data_l;
+ return 0;
+}
+
+#if defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG)
+/** write a word to the SDIO srom */
+static int
+sprom_write_sdio(osl_t *osh, uint16 addr, uint16 data)
+{
+ uint8 addr_l, addr_h, data_l, data_h;
+
+ addr_l = (uint8)((addr * 2) & 0xff);
+ addr_h = (uint8)(((addr * 2) >> 8) & 0xff);
+ data_l = (uint8)(data & 0xff);
+ data_h = (uint8)((data >> 8) & 0xff);
+
+ /* set address */
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_HIGH, addr_h, NULL);
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_LOW, addr_l, NULL);
+
+ /* write data */
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_HIGH, data_h, NULL);
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_LOW, data_l, NULL);
+
+ /* do write */
+ return sprom_cmd_sdio(osh, SBSDIO_SPROM_WRITE);
+}
+#endif /* defined(WLTEST) || defined (DHD_SPROM) || defined (BCMDBG) */
+#endif /* BCMSDIO */
+
+#if !defined(BCMDONGLEHOST)
+#ifdef BCMSPI
+/**
+ * Read the SPI cis and call parsecis to allocate and initialize the NVRAM vars buffer.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_cis_spi)(si_t *sih, osl_t *osh, char **vars, uint *count)
+{
+ uint8 *cis;
+ int rc;
+
+ /* Using MALLOC here causes the Windows driver to crash Needs Investigating */
+#ifdef NDIS
+ uint8 cisd[SBSDIO_CIS_SIZE_LIMIT];
+ cis = (uint8*)cisd;
+#else
+ /* freed in same function */
+ if ((cis = MALLOC_NOPERSIST(osh, SBSDIO_CIS_SIZE_LIMIT)) == NULL) {
+ return -1;
+ }
+#endif /* NDIS */
+
+ bzero(cis, SBSDIO_CIS_SIZE_LIMIT);
+
+ if (bcmsdh_cis_read(NULL, SDIO_FUNC_1, cis, SBSDIO_CIS_SIZE_LIMIT) != 0) {
+#ifdef NDIS
+ /* nothing to do */
+#else
+ MFREE(osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+#endif /* NDIS */
+ return -2;
+ }
+
+ rc = srom_parsecis(sih, osh, &cis, SDIO_FUNC_1, vars, count);
+
+#ifdef NDIS
+ /* nothing to do here */
+#else
+ MFREE(osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+#endif
+
+ return (rc);
+}
+#endif /* BCMSPI */
+#endif /* !defined(BCMDONGLEHOST) */
+
+/** Return sprom size in 16-bit words */
+uint
+srom_size(si_t *sih, osl_t *osh)
+{
+ uint size = (SROM16_SIGN + 1) * 2; /* must big enough for SROM16 */
+ return size;
+}
+
+/**
+ * initvars are different for BCMUSBDEV and BCMSDIODEV. This is OK when supporting both at
+ * the same time, but only because all of the code is in attach functions and not in ROM.
+ */
+
+#if defined(BCMUSBDEV_ENABLED)
+#ifdef BCM_DONGLEVARS
+/*** reads a CIS structure (so not an SROM-MAP structure) from either OTP or SROM */
+static int
+BCMATTACHFN(initvars_srom_si_bl)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *varsz)
+{
+ int sel = 0; /* where to read srom/cis: 0 - none, 1 - otp, 2 - sprom */
+ uint sz = 0; /* srom size in bytes */
+ void *oh = NULL;
+ int rc = BCME_OK;
+ uint16 prio = VARBUF_PRIO_INVALID;
+
+ if ((oh = otp_init(sih)) != NULL && (otp_status(oh) & OTPS_GUP_SW)) {
+ /* Access OTP if it is present, powered on, and programmed */
+ sz = otp_size(oh);
+ sel = 1;
+ } else if ((sz = srom_size(sih, osh)) != 0) {
+ /* Access the SPROM if it is present */
+ sz <<= 1;
+ sel = 2;
+ }
+
+ /* Read CIS in OTP/SPROM */
+ if (sel != 0) {
+ uint16 *srom;
+ uint8 *body = NULL;
+ uint otpsz = sz;
+
+ ASSERT(sz);
+
+ /* Allocate memory */
+ if ((srom = (uint16 *)MALLOC(osh, sz)) == NULL)
+ return BCME_NOMEM;
+
+ /* Read CIS */
+ switch (sel) {
+ case 1:
+ rc = otp_read_region(sih, OTP_SW_RGN, srom, &otpsz);
+ sz = otpsz;
+ body = (uint8 *)srom;
+ prio = VARBUF_PRIO_OTP;
+ break;
+ case 2:
+ rc = srom_read(sih, SI_BUS, curmap, osh, 0, sz, srom, TRUE);
+ /* sprom has 8 byte h/w header */
+ body = (uint8 *)srom + SBSDIO_SPROM_CIS_OFFSET;
+ prio = VARBUF_PRIO_SROM;
+ break;
+ default:
+ /* impossible to come here */
+ ASSERT(0);
+ break;
+ }
+
+ /* Parse CIS */
+ if (rc == BCME_OK) {
+ /* each word is in host endian */
+ htol16_buf((uint8 *)srom, sz);
+ ASSERT(body);
+ rc = srom_parsecis(sih, osh, &body, SROM_CIS_SINGLE, vars, varsz);
+ }
+
+ MFREE(osh, srom, sz); /* Clean up */
+
+ /* Make SROM variables global */
+ if (rc == BCME_OK) {
+ nvram_append((void *)sih, *vars, *varsz, prio);
+ DONGLE_STORE_VARS_OTP_PTR(*vars);
+ }
+ }
+
+ return BCME_OK;
+}
+#endif /* #ifdef BCM_DONGLEVARS */
+
+/**
+ * initvars_srom_si() is defined multiple times in this file. This is the 1st variant for chips with
+ * an active USB interface. It is called only for bus types SI_BUS, and only for CIS
+ * format in SPROM and/or OTP. Reads OTP or SPROM (bootloader only) and appends parsed contents to
+ * caller supplied var/value pairs.
+ */
+static int
+BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *varsz)
+{
+
+#if defined(BCM_DONGLEVARS)
+ BCM_REFERENCE(osh);
+ BCM_REFERENCE(sih);
+ BCM_REFERENCE(curmap);
+#endif
+
+ /* Bail out if we've dealt with OTP/SPROM before! */
+ if (srvars_inited)
+ goto exit;
+
+#ifdef BCM_DONGLEVARS /* this flag should be defined for usb bootloader, to read OTP or SROM */
+ if (BCME_OK != initvars_srom_si_bl(sih, osh, curmap, vars, varsz)) /* CIS format only */
+ return BCME_ERROR;
+#endif
+
+ /* update static local var to skip for next call */
+ srvars_inited = TRUE;
+
+exit:
+ /* Tell the caller there is no individual SROM variables */
+ *vars = NULL;
+ *varsz = 0;
+
+ /* return OK so the driver will load & use defaults if bad srom/otp */
+ return BCME_OK;
+}
+
+#elif defined(BCMSDIODEV_ENABLED)
+
+#ifdef BCM_DONGLEVARS
+static uint8 BCMATTACHDATA(defcis4369)[] = { 0x20, 0x4, 0xd0, 0x2, 0x64, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis43012)[] = { 0x20, 0x4, 0xd0, 0x2, 0x04, 0xA8, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis43013)[] = { 0x20, 0x4, 0xd0, 0x2, 0x05, 0xA8, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis43014)[] = { 0x20, 0x4, 0xd0, 0x2, 0x06, 0xA8, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4362)[] = { 0x20, 0x4, 0xd0, 0x2, 0x62, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4378)[] = { 0x20, 0x4, 0xd0, 0x2, 0x78, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4385)[] = { 0x20, 0x4, 0xd0, 0x2, 0x85, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4387)[] = { 0x20, 0x4, 0xd0, 0x2, 0x78, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4388)[] = { 0x20, 0x4, 0xd0, 0x2, 0x88, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4389)[] = { 0x20, 0x4, 0xd0, 0x2, 0x89, 0x43, 0xff, 0xff };
+static uint8 BCMATTACHDATA(defcis4397)[] = { 0x20, 0x4, 0xd0, 0x2, 0x97, 0x43, 0xff, 0xff };
+
+/**
+ * initvars_srom_si() is defined multiple times in this file. This is the 2nd variant for chips with
+ * an active SDIOd interface using DONGLEVARS
+ */
+static int
+BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *varsz)
+{
+ int cis_src;
+ uint msz = 0;
+ uint sz = 0;
+ void *oh = NULL;
+ int rc = BCME_OK;
+ bool new_cisformat = FALSE;
+
+ uint16 *cisbuf = NULL;
+
+ /* # sdiod fns + common + extra */
+ uint8 *cis[SBSDIO_NUM_FUNCTION + 2] = { 0 };
+
+ uint ciss = 0;
+ uint8 *defcis;
+ uint hdrsz;
+ uint16 prio = VARBUF_PRIO_INVALID;
+
+#if defined(BCMSDIODEV_ENABLED) && defined(ATE_BUILD)
+ if (si_chipcap_sdio_ate_only(sih)) {
+ BS_ERROR(("ATE BUILD: skip cis based var init\n"));
+ goto exit;
+ }
+#endif /* BCMSDIODEV_ENABLED && ATE_BUILD */
+
+ /* Bail out if we've dealt with OTP/SPROM before! */
+ if (srvars_inited)
+ goto exit;
+
+ /* Initialize default and cis format count */
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID: ciss = 1; defcis = defcis4369; hdrsz = 4; break;
+ case BCM4378_CHIP_GRPID: ciss = 1; defcis = defcis4378; hdrsz = 4; break;
+ case BCM4385_CHIP_GRPID: ciss = 1; defcis = defcis4385; hdrsz = 4; break;
+ case BCM4387_CHIP_GRPID: ciss = 1; defcis = defcis4387; hdrsz = 4; break;
+ case BCM4388_CHIP_GRPID: ciss = 1; defcis = defcis4388; hdrsz = 4; break;
+ case BCM4389_CHIP_GRPID: ciss = 1; defcis = defcis4389; hdrsz = 4; break;
+ case BCM4397_CHIP_GRPID: ciss = 1; defcis = defcis4397; hdrsz = 4; break;
+ case BCM43012_CHIP_ID: ciss = 1; defcis = defcis43012; hdrsz = 4; break;
+ case BCM43013_CHIP_ID: ciss = 1; defcis = defcis43013; hdrsz = 4; break;
+ case BCM43014_CHIP_ID: ciss = 1; defcis = defcis43014; hdrsz = 4; break;
+ case BCM4362_CHIP_GRPID: ciss = 1; defcis = defcis4362; hdrsz = 4; break;
+ default:
+ BS_ERROR(("initvars_srom_si: Unknown chip 0x%04x\n", CHIPID(sih->chip)));
+ return BCME_ERROR;
+ }
+ if (sih->ccrev >= 36) {
+ uint32 otplayout;
+ if (AOB_ENAB(sih)) {
+ otplayout = si_corereg(sih, si_findcoreidx(sih, GCI_CORE_ID, 0),
+ OFFSETOF(gciregs_t, otplayout), 0, 0);
+ } else {
+ otplayout = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otplayout),
+ 0, 0);
+ }
+ if (otplayout & OTP_CISFORMAT_NEW) {
+ ciss = 1;
+ hdrsz = 2;
+ new_cisformat = TRUE;
+ }
+ else {
+ ciss = 3;
+ hdrsz = 12;
+ }
+ }
+
+ cis_src = si_cis_source(sih);
+ switch (cis_src) {
+ case CIS_SROM:
+ sz = srom_size(sih, osh) << 1;
+ prio = VARBUF_PRIO_SROM;
+ break;
+ case CIS_OTP:
+ /* Note that for *this* type of OTP -- which otp_read_region()
+ * can operate on -- otp_size() returns bytes, not words.
+ */
+ if (((oh = otp_init(sih)) != NULL) && (otp_status(oh) & OTPS_GUP_HW))
+ sz = otp_size(oh);
+ prio = VARBUF_PRIO_OTP;
+ break;
+ }
+
+ if (sz != 0) {
+ /* freed in same function */
+ if ((cisbuf = (uint16*)MALLOC_NOPERSIST(osh, sz)) == NULL)
+ return BCME_NOMEM;
+ msz = sz;
+
+ switch (cis_src) {
+ case CIS_SROM:
+ rc = srom_read(sih, SI_BUS, curmap, osh, 0, sz, cisbuf, FALSE);
+ break;
+ case CIS_OTP:
+ sz >>= 1;
+ rc = otp_read_region(sih, OTP_HW_RGN, cisbuf, &sz);
+ sz <<= 1;
+ break;
+ }
+
+ ASSERT(sz > hdrsz);
+ if (rc == BCME_OK) {
+ if ((cisbuf[0] == 0xffff) || (cisbuf[0] == 0)) {
+ MFREE(osh, cisbuf, msz);
+ } else if (new_cisformat) {
+ cis[0] = (uint8*)(cisbuf + hdrsz);
+ } else {
+ cis[0] = (uint8*)cisbuf + hdrsz;
+ cis[1] = (uint8*)cisbuf + hdrsz +
+ (cisbuf[1] >> 8) + ((cisbuf[2] & 0x00ff) << 8) -
+ SBSDIO_CIS_BASE_COMMON;
+ cis[2] = (uint8*)cisbuf + hdrsz +
+ cisbuf[3] - SBSDIO_CIS_BASE_COMMON;
+ cis[3] = (uint8*)cisbuf + hdrsz +
+ cisbuf[4] - SBSDIO_CIS_BASE_COMMON;
+ ASSERT((cis[1] >= cis[0]) && (cis[1] < (uint8*)cisbuf + sz));
+ ASSERT((cis[2] >= cis[0]) && (cis[2] < (uint8*)cisbuf + sz));
+ ASSERT(((cis[3] >= cis[0]) && (cis[3] < (uint8*)cisbuf + sz)) ||
+ (ciss <= 3));
+ }
+ }
+ }
+
+ /* Use default if strapped to, or strapped source empty */
+ if (cisbuf == NULL) {
+ ciss = 1;
+ cis[0] = defcis;
+ }
+
+ /* Parse the CIS */
+ if (rc == BCME_OK) {
+ if ((rc = srom_parsecis(sih, osh, cis, ciss, vars, varsz)) == BCME_OK) {
+ nvram_append((void *)sih, *vars, *varsz, prio);
+ DONGLE_STORE_VARS_OTP_PTR(*vars);
+ }
+ }
+
+ /* Clean up */
+ if (cisbuf != NULL)
+ MFREE(osh, cisbuf, msz);
+
+ srvars_inited = TRUE;
+exit:
+ /* Tell the caller there is no individual SROM variables */
+ *vars = NULL;
+ *varsz = 0;
+
+ /* return OK so the driver will load & use defaults if bad srom/otp */
+ return BCME_OK;
+} /* initvars_srom_si */
+#else /* BCM_DONGLEVARS */
+
+/**
+ * initvars_srom_si() is defined multiple times in this file. This is the variant for chips with an
+ * active SDIOd interface but without BCM_DONGLEVARS
+ */
+static int
+BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *varsz)
+{
+ *vars = NULL;
+ *varsz = 0;
+ return BCME_OK;
+}
+#endif /* BCM_DONGLEVARS */
+
+#elif defined(BCMPCIEDEV_ENABLED)
+
+/**
+ * initvars_srom_si() is defined multiple times in this file. This is the variant for chips with an
+ * active PCIe interface *and* that use OTP for NVRAM storage.
+ *
+ * On success, a buffer containing var/val values has been allocated in parameter 'vars'.
+ * put an ifdef where if the host wants the dongle wants to parse sprom or not
+ */
+static int
+BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *varsz)
+{
+#ifdef BCM_DONGLEVARS
+ void *oh = NULL;
+ uint8 *cis;
+ uint sz = 0;
+ int rc;
+
+ if (si_cis_source(sih) != CIS_OTP)
+ return BCME_OK;
+
+ if (((oh = otp_init(sih)) != NULL) && (otp_status(oh) & OTPS_GUP_HW))
+ sz = otp_size(oh);
+ if (sz == 0)
+ return BCME_OK;
+
+ if ((cis = MALLOC(osh, sz)) == NULL)
+ return BCME_NOMEM;
+ sz >>= 1;
+ rc = otp_read_region(sih, OTP_HW_RGN, (uint16 *)cis, &sz);
+ sz <<= 1;
+
+ /* account for the Hardware header */
+ if (sz == 128)
+ return BCME_OK;
+
+ cis += 128;
+
+ /* need to find a better way to identify sprom format content and ignore parse */
+ if (*(uint16 *)cis == SROM11_SIGNATURE) {
+ return BCME_OK;
+ }
+
+ if ((rc = srom_parsecis(sih, osh, &cis, SROM_CIS_SINGLE, vars, varsz)) == BCME_OK)
+ nvram_append((void *)sih, *vars, *varsz, VARBUF_PRIO_OTP);
+
+ return rc;
+#else /* BCM_DONGLEVARS */
+ *vars = NULL;
+ *varsz = 0;
+ return BCME_OK;
+#endif /* BCM_DONGLEVARS */
+}
+#else /* !BCMUSBDEV && !BCMSDIODEV && !BCMPCIEDEV */
+
+#ifndef BCMDONGLEHOST
+
+/**
+ * initvars_srom_si() is defined multiple times in this file. This is the variant for:
+ * !BCMDONGLEHOST && !BCMUSBDEV && !BCMSDIODEV && !BCMPCIEDEV
+ * So this function is defined for PCI (not PCIe) builds that are also non DHD builds.
+ * On success, a buffer containing var/val values has been allocated in parameter 'vars'.
+ */
+static int
+BCMATTACHFN(initvars_srom_si)(si_t *sih, osl_t *osh, volatile void *curmap,
+ char **vars, uint *varsz)
+{
+ /* Search flash nvram section for srom variables */
+ BCM_REFERENCE(osh);
+ BCM_REFERENCE(curmap);
+ return initvars_flash_si(sih, vars, varsz);
+} /* initvars_srom_si */
+#endif /* !BCMDONGLEHOST */
+#endif /* !BCMUSBDEV && !BCMSDIODEV && !BCMPCIEDEV */
+
+void
+BCMATTACHFN(srom_var_deinit)(si_t *sih)
+{
+ BCM_REFERENCE(sih);
+
+ srvars_inited = FALSE;
+}
+
+#if defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL)
+bool
+BCMATTACHFN(srom_caldata_prsnt)(si_t *sih)
+{
+ return is_caldata_prsnt;
+}
+
+int
+BCMATTACHFN(srom_get_caldata)(si_t *sih, uint16 *srom)
+{
+ if (!is_caldata_prsnt) {
+ return BCME_ERROR;
+ }
+ if (srom_sromrev == 18) {
+ memcpy(srom, caldata_array, SROM18_CALDATA_WORDS * 2);
+ } else {
+ memcpy(srom, caldata_array, SROM_CALDATA_WORDS * 2);
+ }
+ return BCME_OK;
+}
+#endif /* defined(BCMPCIEDEV_SROM_FORMAT) && defined(WLC_TXCAL) */
diff --git a/bcmdhd.101.10.361.x/bcmstdlib.c b/bcmdhd.101.10.361.x/bcmstdlib.c
new file mode 100755
index 0000000..9e4f3a6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmstdlib.c
@@ -0,0 +1,1251 @@
+/*
+ * stdlib support routines for self-contained images.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/*
+ * bcmstdlib.c file should be used only to construct an OSL or alone without any OSL
+ * It should not be used with any orbitarary OSL's as there could be a conflict
+ * with some of the routines defined here.
+ */
+
+/*
+ * Define BCMSTDLIB_WIN32_APP if this is a Win32 Application compile
+ */
+#if defined(_WIN32) && !defined(NDIS) && !defined(EFI)
+#define BCMSTDLIB_WIN32_APP 1
+#endif /* _WIN32 && !NDIS */
+
+/*
+ * Define BCMSTDLIB_SNPRINTF_ONLY if we only want snprintf & vsnprintf implementations
+ */
+#if defined(_WIN32) && !defined(EFI)
+#define BCMSTDLIB_SNPRINTF_ONLY 1
+#endif /* _WIN32 || !EFI */
+
+#include <typedefs.h>
+#ifdef BCMSTDLIB_WIN32_APP
+/* for size_t definition */
+#include <stddef.h>
+#endif
+#include <stdarg.h>
+#ifndef BCMSTDLIB_WIN32_APP
+#include <bcmutils.h>
+#endif
+#include <bcmstdlib.h>
+
+/* Don't use compiler builtins for stdlib APIs within the implementation of the stdlib itself. */
+#if defined(BCM_FORTIFY_SOURCE) || defined(BCM_STDLIB_USE_BUILTINS)
+#undef memcpy
+#undef memmove
+#undef memset
+#undef strncpy
+#undef snprintf
+#undef vsnprintf
+#endif /* BCM_FORTIFY_SOURCE || BCM_STDLIB_USE_BUILTINS */
+
+#ifdef HND_PRINTF_THREAD_SAFE
+#include <osl.h>
+#include <osl_ext.h>
+#include <bcmstdlib_ext.h>
+
+/* mutex macros for thread safe */
+#define HND_PRINTF_MUTEX_DECL(mutex) static OSL_EXT_MUTEX_DECL(mutex)
+#define HND_PRINTF_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
+#define HND_PRINTF_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
+#define HND_PRINTF_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
+#define HND_PRINTF_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
+
+HND_PRINTF_MUTEX_DECL(printf_mutex);
+int in_isr_handler = 0, in_trap_handler = 0, in_fiq_handler = 0;
+
+bool
+printf_lock_init(void)
+{
+ /* create mutex for critical section locking */
+ if (HND_PRINTF_MUTEX_CREATE("printf_mutex", &printf_mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+ return TRUE;
+}
+
+bool
+printf_lock_cleanup(void)
+{
+ /* create mutex for critical section locking */
+ if (HND_PRINTF_MUTEX_DELETE(&printf_mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+ return TRUE;
+}
+
+/* returns TRUE if allowed to proceed, FALSE to discard.
+* printf from isr hook or fiq hook is not allowed due to IRQ_MODE and FIQ_MODE stack size
+* limitation.
+*/
+static bool
+printf_lock(void)
+{
+
+ /* discard for irq or fiq context, we need to keep irq/fiq stack small. */
+ if (in_isr_handler || in_fiq_handler)
+ return FALSE;
+
+ /* allow printf in trap handler, proceed without mutex. */
+ if (in_trap_handler)
+ return TRUE;
+
+ /* if not in isr or trap, then go thread-protection with mutex. */
+ if (HND_PRINTF_MUTEX_ACQUIRE(&printf_mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+ else
+ return TRUE;
+}
+
+static void
+printf_unlock(void)
+{
+ if (in_isr_handler || in_fiq_handler)
+ return;
+
+ if (in_trap_handler)
+ return;
+
+ if (HND_PRINTF_MUTEX_RELEASE(&printf_mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+#else
+#define printf_lock() (TRUE)
+#define printf_unlock()
+#endif /* HND_PRINTF_THREAD_SAFE */
+
+#ifdef BCMSTDLIB_WIN32_APP
+
+/* for a WIN32 application, use _vsnprintf as basis of vsnprintf/snprintf to
+ * support full set of format specifications.
+ */
+
+int
+BCMPOSTTRAPFN(vsnprintf)(char *buf, size_t bufsize, const char *fmt, va_list ap)
+{
+ int r;
+
+ r = _vsnprintf(buf, bufsize, fmt, ap);
+
+ /* Microsoft _vsnprintf() will not null terminate on overflow,
+ * so null terminate at buffer end on error
+ */
+ if (r < 0 && bufsize > 0)
+ buf[bufsize - 1] = '\0';
+
+ return r;
+}
+
+int
+BCMPOSTTRAPFN(snprintf)(char *buf, size_t bufsize, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(buf, bufsize, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
+#else /* BCMSTDLIB_WIN32_APP */
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_STDLIB_FUNCS)
+
+static const char hex_upper[17] = "0123456789ABCDEF";
+static const char hex_lower[17] = "0123456789abcdef";
+
+static int
+BCMPOSTTRAPFN(__atolx)(char *buf, char * end, unsigned long num, unsigned long radix, int width,
+ const char *digits, int zero_pad)
+{
+ char buffer[32];
+ char *op;
+ int retval;
+
+ op = &buffer[0];
+ retval = 0;
+
+ do {
+ *op++ = digits[num % radix];
+ retval++;
+ num /= radix;
+ } while (num != 0);
+
+ if (width && (width > retval) && zero_pad) {
+ width = width - retval;
+ while (width) {
+ *op++ = '0';
+ retval++;
+ width--;
+ }
+ }
+
+ while (op != buffer) {
+ op--;
+ if (buf <= end)
+ *buf = *op;
+ buf++;
+ }
+
+ return retval;
+}
+
+static int
+BCMPOSTTRAPFN(__atox)(char *buf, char * end, unsigned int num, unsigned int radix, int width,
+ const char *digits, int zero_pad)
+{
+ char buffer[16];
+ char *op;
+ int retval;
+
+ op = &buffer[0];
+ retval = 0;
+
+ do {
+ *op++ = digits[num % radix];
+ retval++;
+ num /= radix;
+ } while (num != 0);
+
+ if (width && (width > retval) && zero_pad) {
+ width = width - retval;
+ while (width) {
+ *op++ = '0';
+ retval++;
+ width--;
+ }
+ }
+
+ while (op != buffer) {
+ op--;
+ if (buf <= end)
+ *buf = *op;
+ buf++;
+ }
+
+ return retval;
+}
+
+int
+BCMPOSTTRAPFN(vsnprintf)(char *buf, size_t size, const char *fmt, va_list ap)
+{
+ char *optr;
+ char *end;
+ const char *iptr, *tmpptr;
+ unsigned int x;
+ int i;
+ int islong;
+ int width;
+ int width2 = 0;
+ int hashash = 0;
+ int zero_pad;
+ unsigned long ul = 0;
+ long int li = 0;
+
+ optr = buf;
+ end = buf + size - 1;
+ iptr = fmt;
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+
+ if (end < buf - 1) {
+ end = ((void *) -1);
+ size = end - buf + 1;
+ }
+
+ while (*iptr) {
+ zero_pad = 0;
+ if (*iptr != '%') {
+ if (optr <= end)
+ *optr = *iptr;
+ ++optr;
+ ++iptr;
+ continue;
+ }
+
+ iptr++;
+
+ if (*iptr == '#') {
+ hashash = 1;
+ iptr++;
+ }
+ if (*iptr == '-') {
+ iptr++;
+ }
+
+ if (*iptr == '0') {
+ zero_pad = 1;
+ iptr++;
+ }
+
+ width = 0;
+ while (*iptr && bcm_isdigit(*iptr)) {
+ width += (*iptr - '0');
+ iptr++;
+ if (bcm_isdigit(*iptr))
+ width *= 10;
+ }
+ if (*iptr == '.') {
+ iptr++;
+ width2 = 0;
+ while (*iptr && bcm_isdigit(*iptr)) {
+ width2 += (*iptr - '0');
+ iptr++;
+ if (bcm_isdigit(*iptr)) width2 *= 10;
+ }
+ }
+
+ islong = 0;
+ if (*iptr == 'l') {
+ islong++;
+ iptr++;
+ if (*iptr == 'l') {
+ ++islong;
+ ++iptr;
+ }
+ }
+
+ switch (*iptr) {
+ case 's':
+ tmpptr = va_arg(ap, const char *);
+ if (!tmpptr)
+ tmpptr = "(null)";
+ if ((width == 0) & (width2 == 0)) {
+ while (*tmpptr) {
+ if (optr <= end)
+ *optr = *tmpptr;
+ ++optr;
+ ++tmpptr;
+ }
+ break;
+ }
+ while (width && *tmpptr) {
+ if (optr <= end)
+ *optr = *tmpptr;
+ ++optr;
+ ++tmpptr;
+ width--;
+ }
+ while (width) {
+ if (optr <= end)
+ *optr = ' ';
+ ++optr;
+ width--;
+ }
+ break;
+ case 'd':
+ case 'i':
+ if (!islong) {
+ i = va_arg(ap, int);
+ if (i < 0) {
+ if (optr <= end)
+ *optr = '-';
+ ++optr;
+ i = -i;
+ }
+ optr += __atox(optr, end, i, 10, width, hex_upper, zero_pad);
+ } else {
+ li = va_arg(ap, long int);
+ if (li < 0) {
+ if (optr <= end)
+ *optr = '-';
+ ++optr;
+ li = -li;
+ }
+ optr += __atolx(optr, end, li, 10, width, hex_upper, zero_pad);
+ }
+ break;
+ case 'u':
+ if (!islong) {
+ x = va_arg(ap, unsigned int);
+ optr += __atox(optr, end, x, 10, width, hex_upper, zero_pad);
+ } else {
+ ul = va_arg(ap, unsigned long);
+ optr += __atolx(optr, end, ul, 10, width, hex_upper, zero_pad);
+ }
+ break;
+ case 'X':
+ case 'x':
+ if (hashash) {
+ *optr++ = '0';
+ *optr++ = *iptr;
+ }
+ if (!islong) {
+ x = va_arg(ap, unsigned int);
+ optr += __atox(optr, end, x, 16, width,
+ (*iptr == 'X') ? hex_upper : hex_lower, zero_pad);
+ } else {
+ ul = va_arg(ap, unsigned long);
+ optr += __atolx(optr, end, ul, 16, width,
+ (*iptr == 'X') ? hex_upper : hex_lower, zero_pad);
+ }
+ break;
+ case 'p':
+ case 'P':
+ x = va_arg(ap, unsigned int);
+ optr += __atox(optr, end, x, 16, 8,
+ (*iptr == 'P') ? hex_upper : hex_lower, zero_pad);
+ break;
+ case 'c':
+ x = va_arg(ap, int);
+ if (optr <= end)
+ *optr = x & 0xff;
+ optr++;
+ break;
+
+ default:
+ if (optr <= end)
+ *optr = *iptr;
+ optr++;
+ break;
+ }
+ iptr++;
+ }
+
+ if (optr <= end) {
+ *optr = '\0';
+ return (int)(optr - buf);
+ } else {
+ *end = '\0';
+ return (int)(end - buf);
+ }
+}
+
+int
+BCMPOSTTRAPFN(snprintf)(char *buf, size_t bufsize, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+
+ va_start(ap, fmt);
+ r = vsnprintf(buf, bufsize, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+#endif /* !BCMROMOFFLOAD_EXCLUDE_STDLIB_FUNCS */
+
+#endif /* BCMSTDLIB_WIN32_APP */
+
+#ifndef BCMSTDLIB_SNPRINTF_ONLY
+int
+BCMPOSTTRAPFN(vsprintf)(char *buf, const char *fmt, va_list ap)
+{
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+ return (vsnprintf(buf, INT_MAX, fmt, ap));
+}
+
+int
+BCMPOSTTRAPFN(sprintf)(char *buf, const char *fmt, ...)
+{
+ va_list ap;
+ int count;
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+
+ va_start(ap, fmt);
+ count = vsprintf(buf, fmt, ap);
+ va_end(ap);
+
+ return count;
+}
+
+#if !defined(EFI) || !defined(COMPILER_INTRINSICS_LIB)
+void *
+memmove(void *dest, const void *src, size_t n)
+{
+ /* only use memcpy if there is no overlap. otherwise copy each byte in a safe sequence */
+ if (((const char *)src >= (char *)dest + n) || ((const char *)src + n <= (char *)dest)) {
+ return memcpy(dest, src, n);
+ }
+
+ /* Overlapping copy forward or backward */
+ if (src < dest) {
+ unsigned char *d = (unsigned char *)dest + (n - 1);
+ const unsigned char *s = (const unsigned char *)src + (n - 1);
+ while (n) {
+ *d-- = *s--;
+ n--;
+ }
+ } else if (src > dest) {
+ unsigned char *d = (unsigned char *)dest;
+ const unsigned char *s = (const unsigned char *)src;
+ while (n) {
+ *d++ = *s++;
+ n--;
+ }
+ }
+
+ return dest;
+}
+#endif /* !EFI || !COMPILER_INTRINSICS_LIB */
+
+#ifndef EFI
+int
+memcmp(const void *s1, const void *s2, size_t n)
+{
+ const unsigned char *ss1;
+ const unsigned char *ss2;
+
+ ss1 = (const unsigned char *)s1;
+ ss2 = (const unsigned char *)s2;
+
+ while (n) {
+ if (*ss1 < *ss2)
+ return -1;
+ if (*ss1 > *ss2)
+ return 1;
+ ss1++;
+ ss2++;
+ n--;
+ }
+
+ return 0;
+}
+
+/* Skip over functions that are being used from DriverLibrary to save space */
+char *
+strncpy(char *dest, const char *src, size_t n)
+{
+ char *endp;
+ char *p;
+
+ p = dest;
+ endp = p + n;
+
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
+
+ /* zero fill remainder */
+ bzero(p, (endp - p));
+
+ return dest;
+}
+
+size_t
+BCMPOSTTRAPFN(strlen)(const char *s)
+{
+ size_t n = 0;
+
+ while (*s) {
+ s++;
+ n++;
+ }
+
+ return n;
+}
+
+int
+BCMPOSTTRAPFN(strcmp)(const char *s1, const char *s2)
+{
+ while (*s2 && *s1) {
+ if (*s1 < *s2)
+ return -1;
+ if (*s1 > *s2)
+ return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2)
+ return 1;
+ if (!*s1 && *s2)
+ return -1;
+ return 0;
+}
+#endif /* EFI */
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+ while (*s2 && *s1 && n) {
+ if (*s1 < *s2)
+ return -1;
+ if (*s1 > *s2)
+ return 1;
+ s1++;
+ s2++;
+ n--;
+ }
+
+ if (!n)
+ return 0;
+ if (*s1 && !*s2)
+ return 1;
+ if (!*s1 && *s2)
+ return -1;
+ return 0;
+}
+
+char *
+strchr(const char *str, int c)
+{
+ const char *x = str;
+
+ while (*x != (char)c) {
+ if (*x++ == '\0')
+ return (NULL);
+ }
+
+ return DISCARD_QUAL(x, char);
+}
+
+char *
+strrchr(const char *str, int c)
+{
+ const char *save = NULL;
+
+ do {
+ if (*str == (char)c)
+ save = str;
+ } while (*str++ != '\0');
+
+ return DISCARD_QUAL(save, char);
+}
+
+/* Skip over functions that are being used from DriverLibrary to save space */
+#ifndef EFI
+char *
+strstr(const char *s, const char *substr)
+{
+ int substr_len = strlen(substr);
+
+ for (; *s; s++)
+ if (strncmp(s, substr, substr_len) == 0)
+ return DISCARD_QUAL(s, char);
+
+ return NULL;
+}
+#endif /* EFI */
+
+size_t
+strspn(const char *s, const char *accept)
+{
+ uint count = 0;
+
+ while (s[count] && strchr(accept, s[count]))
+ count++;
+
+ return count;
+}
+
+size_t
+strcspn(const char *s, const char *reject)
+{
+ uint count = 0;
+
+ while (s[count] && !strchr(reject, s[count]))
+ count++;
+
+ return count;
+}
+
+void *
+memchr(const void *s, int c, size_t n)
+{
+ if (n != 0) {
+ const unsigned char *ptr = s;
+
+ do {
+ if (*ptr == (unsigned char)c)
+ return DISCARD_QUAL(ptr, void);
+ ptr++;
+ n--;
+ } while (n != 0);
+ }
+ return NULL;
+}
+
+unsigned long
+strtoul(const char *cp, char **endp, int base)
+{
+ ulong result, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp - '0' : bcm_toupper(*cp) - 'A' + 10) <
+ (ulong) base) {
+ result = result * base + value;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong)(result * -1);
+
+ if (endp)
+ *endp = DISCARD_QUAL(cp, char);
+
+ return (result);
+}
+
+#ifdef EFI
+int
+putchar(int c)
+{
+ return putc(c, stdout);
+}
+
+int
+puts(const char *s)
+{
+ char c;
+
+ while ((c = *s++))
+ putchar(c);
+
+ putchar('\n');
+
+ return 0;
+}
+
+#else /* !EFI */
+
+/* memset is not in ROM offload because it is used directly by the compiler in
+ * structure assignments/character array initialization with "".
+ */
+void *
+BCMPOSTTRAPFN(memset)(void *dest, int c, size_t n)
+{
+ uint32 w, *dw;
+ unsigned char *d;
+
+ dw = (uint32 *)dest;
+
+ /* 8 min because we have to create w */
+ if ((n >= 8) && (((uintptr)dest & 3) == 0)) {
+ if (c == 0)
+ w = 0;
+ else {
+ unsigned char ch;
+
+ ch = (unsigned char)(c & 0xff);
+ w = (ch << 8) | ch;
+ w |= w << 16;
+ }
+ while (n >= 4) {
+ *dw++ = w;
+ n -= 4;
+ }
+ }
+ d = (unsigned char *)dw;
+
+ while (n) {
+ *d++ = (unsigned char)c;
+ n--;
+ }
+
+ return dest;
+}
+
+/* memcpy is not in ROM offload because it is used directly by the compiler in
+ * structure assignments.
+ */
+#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7A__)
+void *
+BCMPOSTTRAPFN(memcpy)(void *dest, const void *src, size_t n)
+{
+ uint32 *dw;
+ const uint32 *sw;
+ unsigned char *d;
+ const unsigned char *s;
+
+ sw = (const uint32 *)src;
+ dw = (uint32 *)dest;
+
+ if (n >= 4 && ((uintptr)src & 3) == ((uintptr)dest & 3)) {
+ uint32 t1, t2, t3, t4, t5, t6, t7, t8;
+ int i = (4 - ((uintptr)src & 3)) % 4;
+
+ n -= i;
+
+ d = (unsigned char *)dw;
+ s = (const unsigned char *)sw;
+ while (i--) {
+ *d++ = *s++;
+ }
+ sw = (const uint32 *)s;
+ dw = (uint32 *)d;
+
+ if (n >= 32) {
+ const uint32 *sfinal = (const uint32 *)((const uint8 *)sw + (n & ~31));
+
+ asm volatile("\n1:\t"
+ "ldmia.w\t%0!, {%3, %4, %5, %6, %7, %8, %9, %10}\n\t"
+ "stmia.w\t%1!, {%3, %4, %5, %6, %7, %8, %9, %10}\n\t"
+ "cmp\t%2, %0\n\t"
+ "bhi.n\t1b\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (sfinal), "=r" (t1), "=r" (t2),
+ "=r" (t3), "=r" (t4), "=r" (t5), "=r" (t6), "=r" (t7),
+ "=r" (t8)
+ : "0" (sw), "1" (dw), "2" (sfinal));
+
+ n %= 32;
+ }
+
+ /* Copy the remaining words */
+ switch (n / 4) {
+ case 0:
+ break;
+ case 1:
+ asm volatile("ldr\t%2, [%0]\n\t"
+ "str\t%2, [%1]\n\t"
+ "adds\t%0, #4\n\t"
+ "adds\t%1, #4\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1)
+ : "0" (sw), "1" (dw));
+ break;
+ case 2:
+ asm volatile("ldmia.w\t%0!, {%2, %3}\n\t"
+ "stmia.w\t%1!, {%2, %3}\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2)
+ : "0" (sw), "1" (dw));
+ break;
+ case 3:
+ asm volatile("ldmia.w\t%0!, {%2, %3, %4}\n\t"
+ "stmia.w\t%1!, {%2, %3, %4}\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2),
+ "=r" (t3)
+ : "0" (sw), "1" (dw));
+ break;
+ case 4:
+ asm volatile("ldmia.w\t%0!, {%2, %3, %4, %5}\n\t"
+ "stmia.w\t%1!, {%2, %3, %4, %5}\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2),
+ "=r" (t3), "=r" (t4)
+ : "0" (sw), "1" (dw));
+ break;
+ case 5:
+ asm volatile(
+ "ldmia.w\t%0!, {%2, %3, %4, %5, %6}\n\t"
+ "stmia.w\t%1!, {%2, %3, %4, %5, %6}\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2),
+ "=r" (t3), "=r" (t4), "=r" (t5)
+ : "0" (sw), "1" (dw));
+ break;
+ case 6:
+ asm volatile(
+ "ldmia.w\t%0!, {%2, %3, %4, %5, %6, %7}\n\t"
+ "stmia.w\t%1!, {%2, %3, %4, %5, %6, %7}\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2),
+ "=r" (t3), "=r" (t4), "=r" (t5), "=r" (t6)
+ : "0" (sw), "1" (dw));
+ break;
+ case 7:
+ asm volatile(
+ "ldmia.w\t%0!, {%2, %3, %4, %5, %6, %8, %7}\n\t"
+ "stmia.w\t%1!, {%2, %3, %4, %5, %6, %8, %7}\n\t"
+ : "=r" (sw), "=r" (dw), "=r" (t1), "=r" (t2),
+ "=r" (t3), "=r" (t4), "=r" (t5), "=r" (t6),
+ "=r" (t7)
+ : "0" (sw), "1" (dw));
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ n = n % 4;
+ }
+
+ /* Copy the remaining bytes */
+ d = (unsigned char *)dw;
+ s = (const unsigned char *)sw;
+ while (n--) {
+ *d++ = *s++;
+ }
+
+ return dest;
+}
+
+#ifdef __clang__
+/* TODO: remove once toolchain builtin libraries are available */
+/* simulate compiler builtins */
+
+/* not aligned */
+void *__aeabi_memcpy(void *dest, const void *src, size_t n);
+void *
+__aeabi_memcpy(void *dest, const void *src, size_t n)
+{
+ return memcpy(dest, src, n);
+}
+
+/* 4 byte aligned */
+void *__aeabi_memcpy4(void *dest, const void *src, size_t n);
+void *
+__aeabi_memcpy4(void *dest, const void *src, size_t n)
+{
+ return memcpy(dest, src, n);
+}
+
+/* 8 byte aligned */
+void *__aeabi_memcpy8(void *dest, const void *src, size_t n);
+void *
+__aeabi_memcpy8(void *dest, const void *src, size_t n)
+{
+ return memcpy(dest, src, n);
+}
+
+/* 8 byte aligned */
+void *__aeabi_memclr8(void *dest, size_t n);
+void *
+__aeabi_memclr8(void *dest, size_t n)
+{
+ return memset(dest, 0, n);
+}
+#endif /* __clang__ */
+#else
+void *
+memcpy(void *dest, const void *src, size_t n)
+{
+ uint32 *dw;
+ const uint32 *sw;
+ unsigned char *d;
+ const unsigned char *s;
+
+ sw = (const uint32 *)src;
+ dw = (uint32 *)dest;
+
+ if ((n >= 4) && (((uintptr)src & 3) == ((uintptr)dest & 3))) {
+ int i = (4 - ((uintptr)src & 3)) % 4;
+ n -= i;
+ d = (unsigned char *)dw;
+ s = (const unsigned char *)sw;
+ while (i--) {
+ *d++ = *s++;
+ }
+
+ sw = (const uint32 *)s;
+ dw = (uint32 *)d;
+ while (n >= 4) {
+ *dw++ = *sw++;
+ n -= 4;
+ }
+ }
+ d = (unsigned char *)dw;
+ s = (const unsigned char *)sw;
+ while (n--) {
+ *d++ = *s++;
+ }
+
+ return dest;
+}
+#endif /* defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7A__) */
+#endif /* EFI */
+
+/* a hook to send printf output to the host */
+static printf_sendup_output_fn_t g_printf_sendup_output_fn = NULL;
+static void *g_printf_sendup_output_ctx = NULL;
+
+#ifdef DONGLEBUILD
+static bool _rodata_overwritten = FALSE;
+
+/* Ensure this string is not const. */
+CONST char BCMPOST_TRAP_RODATA(warn_str)[] = "RO overwritten %p\n";
+CONST char BCMPOST_TRAP_RODATA(time_str)[] = "%06u.%03u ";
+#endif /* DONGLEBUILD */
+
+void
+printf_set_sendup_output_fn(printf_sendup_output_fn_t fn, void *ctx)
+{
+ g_printf_sendup_output_fn = fn;
+ g_printf_sendup_output_ctx = ctx;
+}
+
+#ifdef DONGLEBUILD
+void
+BCMPOSTTRAPFN(printf_set_rodata_invalid)(void)
+{
+ _rodata_overwritten = TRUE;
+}
+
+bool
+printf_get_rodata_invalid(void)
+{
+ return (_rodata_overwritten);
+}
+#endif /* DONGLEBUILD */
+
+/* Include printf if it has already not been defined as NULL */
+#ifndef printf
+static bool last_nl = FALSE;
+int
+BCMPOSTTRAPFN(printf)(const char *fmt, ...)
+{
+ va_list ap;
+ int count = 0, i;
+ char buffer[PRINTF_BUFLEN + 1];
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+
+ if (!printf_lock())
+ return 0;
+
+#ifdef DONGLEBUILD
+ if (_rodata_overwritten == TRUE) {
+ /* Regular printf will be garbage if ROdata is overwritten. In that case,
+ * print the caller address.
+ */
+ _rodata_overwritten = FALSE;
+ count = printf(warn_str, CALL_SITE);
+ _rodata_overwritten = TRUE;
+ return count;
+ }
+
+ if (last_nl) {
+ /* add the dongle ref time */
+ uint32 dongle_time_ms = hnd_get_reftime_ms();
+ count = sprintf(buffer, time_str, dongle_time_ms / 1000, dongle_time_ms % 1000);
+ }
+#endif /* DONGLEBUILD */
+
+ va_start(ap, fmt);
+ count += vsnprintf(buffer + count, sizeof(buffer) - count, fmt, ap);
+ va_end(ap);
+
+ for (i = 0; i < count; i++) {
+ putchar(buffer[i]);
+
+ /* EFI environment requires CR\LF in a printf, etc.
+ * so unless the string has \r\n, it will not execute CR
+ * So force it!
+ */
+#ifdef EFI
+ if (buffer[i] == '\n')
+ putchar('\r');
+#endif
+ }
+
+ /* send the output up to the host */
+ if (g_printf_sendup_output_fn != NULL) {
+ g_printf_sendup_output_fn(g_printf_sendup_output_ctx, buffer, count);
+ }
+
+ if (buffer[count - 1] == '\n')
+ last_nl = TRUE;
+ else
+ last_nl = FALSE;
+
+ printf_unlock();
+
+ return count;
+}
+#endif /* printf */
+
+#if !defined(_WIN32) && !defined(EFI)
+int
+fputs(const char *s, FILE *stream /* UNUSED */)
+{
+ char c;
+
+ UNUSED_PARAMETER(stream);
+ while ((c = *s++))
+ putchar(c);
+ return 0;
+}
+
+int
+puts(const char *s)
+{
+ fputs(s, stdout);
+ putchar('\n');
+ return 0;
+}
+
+int
+fputc(int c, FILE *stream)
+{
+ return putc(c, stream);
+}
+
+unsigned long
+rand(void)
+{
+ static unsigned long seed = 1;
+ long x, hi, lo, t;
+
+ x = seed;
+ hi = x / 127773;
+ lo = x % 127773;
+ t = 16807 * lo - 2836 * hi;
+ if (t <= 0) t += 0x7fffffff;
+ seed = t;
+ return t;
+}
+#endif /* !_WIN32 && !EFI */
+
+#endif /* BCMSTDLIB_SNPRINTF_ONLY */
+
+#if !defined(_WIN32) || defined(EFI)
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+ const char *b = s;
+ const char *e = s + maxlen;
+
+ while (s < e && *s) {
+ s++;
+ }
+
+ return s - b;
+}
+#endif /* !_WIN32 || EFI */
+
+/* FORTIFY_SOURCE: Implementation of compiler built-in functions for C standard library functions
+ * that provide run-time buffer overflow detection.
+ */
+#if defined(BCM_FORTIFY_SOURCE)
+
+void*
+__memcpy_chk(void *dest, const void *src, size_t n, size_t destsz)
+{
+ if (memcpy_s(dest, destsz, src, n) != 0) {
+ OSL_SYS_HALT();
+ }
+
+ return (dest);
+}
+
+void *
+__memmove_chk(void *dest, const void *src, size_t n, size_t destsz)
+{
+ if (memmove_s(dest, destsz, src, n) != 0) {
+ OSL_SYS_HALT();
+ }
+
+ return (dest);
+}
+
+void *
+__memset_chk(void *dest, int c, size_t n, size_t destsz)
+{
+ if (memset_s(dest, destsz, c, n) != 0) {
+ OSL_SYS_HALT();
+ }
+
+ return (dest);
+}
+
+int
+__snprintf_chk(char *str, size_t n, int flag, size_t destsz, const char *fmt, ...)
+{
+ va_list arg;
+ int rc;
+
+ if (n > destsz) {
+ OSL_SYS_HALT();
+ }
+
+ va_start(arg, fmt);
+ rc = vsnprintf(str, n, fmt, arg);
+ va_end(arg);
+
+ return (rc);
+}
+
+int
+__vsnprintf_chk(char *str, size_t n, int flags, size_t destsz, const char *fmt, va_list ap)
+{
+ if (n > destsz) {
+ OSL_SYS_HALT();
+ }
+
+ return (vsnprintf(str, n, fmt, ap));
+}
+
+char *
+__strncpy_chk(char *dest, const char *src, size_t n, size_t destsz)
+{
+ if (n > destsz) {
+ OSL_SYS_HALT();
+ }
+
+ return (strncpy(dest, src, n));
+}
+#endif /* BCM_FORTIFY_SOURCE */
+
+/* Provide stub implementations for xxx_s() APIs that are remapped to compiler builtins.
+ * This allows the target to link.
+ *
+ * This is only intended as a compile-time test, and should be used by compile-only targets.
+ */
+#if defined(BCM_STDLIB_S_BUILTINS_TEST)
+#undef strcpy
+char* strcpy(char *dest, const char *src);
+char*
+strcpy(char *dest, const char *src)
+{
+ return (NULL);
+}
+
+#undef strcat
+char* strcat(char *dest, const char *src);
+char*
+strcat(char *dest, const char *src)
+{
+ return (NULL);
+}
+#endif /* BCM_STDLIB_S_BUILTINS_TEST */
diff --git a/bcmdhd.101.10.361.x/bcmstdlib_s.c b/bcmdhd.101.10.361.x/bcmstdlib_s.c
new file mode 100755
index 0000000..fd7e83e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmstdlib_s.c
@@ -0,0 +1,298 @@
+/*
+ * Broadcom Secure Standard Library.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#else /* BCMDRIVER */
+#include <stddef.h>
+#include <string.h>
+#endif /* else BCMDRIVER */
+
+#include <bcmstdlib_s.h>
+#include <bcmutils.h>
+
+/* Don't use compiler builtins for stdlib APIs within the implementation of the stdlib itself. */
+#if defined(BCM_STDLIB_S_BUILTINS_TEST)
+ #undef memmove_s
+ #undef memcpy_s
+ #undef memset_s
+ #undef strlcpy
+ #undef strlcat_s
+#endif /* BCM_STDLIB_S_BUILTINS_TEST */
+
+/*
+ * __SIZE_MAX__ value is depending on platform:
+ * Firmware Dongle: RAMSIZE (Dongle Specific Limit).
+ * LINUX NIC/Windows/MACOSX/Application: OS Native or
+ * 0xFFFFFFFFu if not defined.
+ */
+#ifndef SIZE_MAX
+#ifndef __SIZE_MAX__
+#ifdef DONGLEBUILD
+#define __SIZE_MAX__ RAMSIZE
+#else
+#define __SIZE_MAX__ 0xFFFFFFFFu
+#endif /* DONGLEBUILD */
+#endif /* __SIZE_MAX__ */
+#define SIZE_MAX __SIZE_MAX__
+#endif /* SIZE_MAX */
+#define RSIZE_MAX (SIZE_MAX >> 1u)
+
+#if !defined(__STDC_WANT_SECURE_LIB__) && \
+ !(defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__))
+/*
+ * memmove_s - secure memmove
+ * dest : pointer to the object to copy to
+ * destsz : size of the destination buffer
+ * src : pointer to the object to copy from
+ * n : number of bytes to copy
+ * Return Value : zero on success and non-zero on error
+ * Also on error, if dest is not a null pointer and destsz not greater
+ * than RSIZE_MAX, writes destsz zero bytes into the dest object.
+ */
+int
+memmove_s(void *dest, size_t destsz, const void *src, size_t n)
+{
+ int err = BCME_OK;
+
+ if ((!dest) || (((char *)dest + destsz) < (char *)dest)) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (destsz > RSIZE_MAX) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if (destsz < n) {
+ memset(dest, 0, destsz);
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if ((!src) || (((const char *)src + n) < (const char *)src)) {
+ memset(dest, 0, destsz);
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ memmove(dest, src, n);
+exit:
+ return err;
+}
+
+/*
+ * memcpy_s - secure memcpy
+ * dest : pointer to the object to copy to
+ * destsz : size of the destination buffer
+ * src : pointer to the object to copy from
+ * n : number of bytes to copy
+ * Return Value : zero on success and non-zero on error
+ * Also on error, if dest is not a null pointer and destsz not greater
+ * than RSIZE_MAX, writes destsz zero bytes into the dest object.
+ */
+int
+BCMPOSTTRAPFN(memcpy_s)(void *dest, size_t destsz, const void *src, size_t n)
+{
+ int err = BCME_OK;
+ char *d = dest;
+ const char *s = src;
+
+ if ((!d) || ((d + destsz) < d)) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (destsz > RSIZE_MAX) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if (destsz < n) {
+ memset(dest, 0, destsz);
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if ((!s) || ((s + n) < s)) {
+ memset(dest, 0, destsz);
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ /* overlap checking between dest and src */
+ if (!(((d + destsz) <= s) || (d >= (s + n)))) {
+ memset(dest, 0, destsz);
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ (void)memcpy(dest, src, n);
+exit:
+ return err;
+}
+
+/*
+ * memset_s - secure memset
+ * dest : pointer to the object to be set
+ * destsz : size of the destination buffer
+ * c : byte value
+ * n : number of bytes to be set
+ * Return Value : zero on success and non-zero on error
+ * Also on error, if dest is not a null pointer and destsz not greater
+ * than RSIZE_MAX, writes destsz bytes with value c into the dest object.
+ */
+int
+BCMPOSTTRAPFN(memset_s)(void *dest, size_t destsz, int c, size_t n)
+{
+ int err = BCME_OK;
+ if ((!dest) || (((char *)dest + destsz) < (char *)dest)) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (destsz > RSIZE_MAX) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if (destsz < n) {
+ (void)memset(dest, c, destsz);
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ (void)memset(dest, c, n);
+exit:
+ return err;
+}
+#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */
+
+#if !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY)
+/**
+ * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer 0 if input parameters are NOK
+ * return: string leng of src (which is always < size) on success or size on failure
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ */
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t i;
+
+ if (dest == NULL || size == 0) {
+ return 0;
+ }
+
+ if (src == NULL) {
+ *dest = '\0';
+ return 0;
+ }
+
+ for (i = 0; i < size; i++) {
+ dest[i] = src[i];
+ if (dest[i] == '\0') {
+ /* success - src string copied */
+ return i;
+ }
+ }
+
+ /* NULL terminate since not found in src */
+ dest[size - 1u] = '\0';
+
+ /* fail - src string truncated */
+ return size;
+}
+#endif /* !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY) */
+
+/**
+ * strlcat_s - Concatenate a %NUL terminated string with a sized buffer
+ * @dest: Where to concatenate the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ * return: string length of created string (i.e. the initial length of dest plus the length of src)
+ * not including the NUL char, up until size
+ *
+ * Unlike strncat(), strlcat() take the full size of the buffer (not just the number of bytes to
+ * copy) and guarantee to NUL-terminate the result (even when there's nothing to concat).
+ * If the length of dest string concatinated with the src string >= size, truncation occurs.
+ *
+ * Compatible with *BSD: the result is always a valid NUL-terminated string that fits in the buffer
+ * (unless, of course, the buffer size is zero).
+ *
+ * If either src or dest is not NUL-terminated, dest[size-1] will be set to NUL.
+ * If size < strlen(dest) + strlen(src), dest[size-1] will be set to NUL.
+ * If size == 0, dest[0] will be set to NUL.
+ */
+size_t
+strlcat_s(char *dest, const char *src, size_t size)
+{
+ char *d = dest;
+ const char *s = src; /* point to the start of the src string */
+ size_t n = size;
+ size_t dlen;
+ size_t bytes_to_copy = 0;
+
+ if (dest == NULL) {
+ return 0;
+ }
+
+ /* set d to point to the end of dest string (up to size) */
+ while (n != 0 && *d != '\0') {
+ d++;
+ n--;
+ }
+ dlen = (size_t)(d - dest);
+
+ if (s != NULL) {
+ size_t slen = 0;
+
+ /* calculate src len in case it's not null-terminated */
+ n = size;
+ while (n-- != 0 && *(s + slen) != '\0') {
+ ++slen;
+ }
+
+ n = size - dlen; /* maximum num of chars to copy */
+ if (n != 0) {
+ /* copy relevant chars (until end of src buf or given size is reached) */
+ bytes_to_copy = MIN(slen - (size_t)(s - src), n - 1);
+ (void)memcpy(d, s, bytes_to_copy);
+ d += bytes_to_copy;
+ }
+ }
+ if (n == 0 && dlen != 0) {
+ --d; /* nothing to copy, but NUL-terminate dest anyway */
+ }
+ *d = '\0'; /* NUL-terminate dest */
+
+ return (dlen + bytes_to_copy);
+}
diff --git a/bcmdhd.101.10.361.x/bcmutils.c b/bcmdhd.101.10.361.x/bcmutils.c
new file mode 100755
index 0000000..929056f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmutils.c
@@ -0,0 +1,6097 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#if !defined(BCMDONGLEHOST) || defined(BCMNVRAM)
+#include <bcmnvram.h>
+#endif
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+
+#endif /* !BCMDRIVER */
+
+#ifdef WL_UNITTEST
+/*
+ * Definitions and includes needed during software unit test compilation and execution.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef ASSERT
+#undef ASSERT
+#endif /* ASSERT */
+#define ASSERT(exp)
+#endif /* WL_UNITTEST */
+
+#if defined(_WIN32) || defined(NDIS)
+/* Debatable */
+#include <bcmstdlib.h>
+#endif
+#include <bcmstdlib_s.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <ethernet.h>
+#include <vlan.h>
+#include <bcmip.h>
+#include <802.1d.h>
+#include <802.11.h>
+#include <bcmip.h>
+#include <bcmipv6.h>
+#include <bcmtcp.h>
+#ifdef BCMPERFSTATS
+#include <bcmperf.h>
+#endif
+#include <wl_android.h>
+
+#define NUMBER_OF_BITS_BYTE 8u
+
+#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING
+#define CUST_IPV4_TOS_PREC_MASK 0x3F
+#define DCSP_MAX_VALUE 64
+extern uint dhd_dscpmap_enable;
+/* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */
+int dscp2priomap[DCSP_MAX_VALUE]=
+{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */
+ 2, 0, 0, 0, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0,
+ 4, 0, 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 0, 0, 0, 0, 0,
+ 6, 0, 0, 0, 0, 0, 0, 0,
+ 7, 0, 0, 0, 0, 0, 0, 0
+};
+#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
+
+#ifdef PRIVACY_MASK
+struct ether_addr privacy_addrmask;
+
+/* RAM accessor function to avoid 'privacy_addrmask' in ROM/RAM shared data section. */
+static struct ether_addr *
+BCMRAMFN(privacy_addrmask_get)(void)
+{
+ return &privacy_addrmask;
+}
+#endif /* PRIVACY_MASK */
+
+#ifdef BCMDRIVER
+
+#ifndef BCM_ARM_BACKTRACE
+/* function pointers for firmware stack backtrace utility */
+void (*const print_btrace_int_fn)(int depth, uint32 pc, uint32 lr, uint32 sp) = NULL;
+void (*const print_btrace_fn)(int depth) = NULL;
+#else
+void print_backtrace(int depth);
+void (*const print_btrace_fn)(int depth) = print_backtrace;
+void print_backtrace_int(int depth, uint32 pc, uint32 lr, uint32 sp);
+void (*const print_btrace_int_fn)(int depth, uint32 pc, uint32 lr, uint32 sp) = print_backtrace_int;
+#endif
+
+#if !defined(BCMDONGLEHOST)
+/* Forward declarations */
+char * getvar_internal(char *vars, const char *name);
+int getintvar_internal(char *vars, const char *name);
+int getintvararray_internal(char *vars, const char *name, int index);
+int getintvararraysize_internal(char *vars, const char *name);
+
+#ifndef WL_FWSIGN
+/*
+ * Search the name=value vars for a specific one and return its value.
+ * Returns NULL if not found.
+ */
+char *
+getvar(char *vars, const char *name)
+{
+ NVRAM_RECLAIM_CHECK(name);
+ return getvar_internal(vars, name);
+}
+
+char *
+getvar_internal(char *vars, const char *name)
+{
+ char *s;
+ uint len;
+
+ if (!name)
+ return NULL;
+
+ len = strlen(name);
+ if (len == 0u) {
+ return NULL;
+ }
+
+ /* first look in vars[] */
+ for (s = vars; s && *s;) {
+ if ((bcmp(s, name, len) == 0) && (s[len] == '=')) {
+ return (&s[len+1u]);
+ }
+ while (*s++)
+ ;
+ }
+
+ /* then query nvram */
+ return (nvram_get(name));
+}
+
+/*
+ * Search the vars for a specific one and return its value as
+ * an integer. Returns 0 if not found.
+ */
+int
+getintvar(char *vars, const char *name)
+{
+ NVRAM_RECLAIM_CHECK(name);
+ return getintvar_internal(vars, name);
+}
+
+int
+getintvar_internal(char *vars, const char *name)
+{
+ char *val;
+
+ if ((val = getvar_internal(vars, name)) == NULL)
+ return (0);
+
+ return (bcm_strtoul(val, NULL, 0));
+}
+
+int
+getintvararray(char *vars, const char *name, int index)
+{
+ NVRAM_RECLAIM_CHECK(name);
+ return getintvararray_internal(vars, name, index);
+}
+
+int
+getintvararray_internal(char *vars, const char *name, int index)
+{
+ char *buf, *endp;
+ int i = 0;
+ int val = 0;
+
+ if ((buf = getvar_internal(vars, name)) == NULL) {
+ return (0);
+ }
+
+ /* table values are always separated by "," or " " */
+ while (*buf != '\0') {
+ val = bcm_strtoul(buf, &endp, 0);
+ if (i == index) {
+ return val;
+ }
+ buf = endp;
+ /* delimiter is ',' */
+ if (*buf == ',')
+ buf++;
+ i++;
+ }
+ return (0);
+}
+
+int
+getintvararraysize(char *vars, const char *name)
+{
+ NVRAM_RECLAIM_CHECK(name);
+ return getintvararraysize_internal(vars, name);
+}
+
+int
+getintvararraysize_internal(char *vars, const char *name)
+{
+ char *buf, *endp;
+ int count = 0;
+ int val = 0;
+
+ if ((buf = getvar_internal(vars, name)) == NULL) {
+ return (0);
+ }
+
+ /* table values are always separated by "," or " " */
+ while (*buf != '\0') {
+ val = bcm_strtoul(buf, &endp, 0);
+ buf = endp;
+ /* delimiter is ',' */
+ if (*buf == ',')
+ buf++;
+ count++;
+ }
+ BCM_REFERENCE(val);
+ return count;
+}
+
+/* Read an array of values from a possibly slice-specific nvram string
+ * Store the values in either the uint8 dest_array1 or in the int16 dest_array2.
+ * Pass in NULL for the dest_array[12] that is not to be used.
+ */
+static int
+BCMATTACHFN(getintvararray_slicespecific)(osl_t *osh, char *vars, char *vars_table_accessor,
+ const char* name, uint8* dest_array1, int16* dest_array2, uint dest_size)
+{
+ uint i;
+ uint array_size = 0;
+ int err = BCME_OK;
+ uint prefixed_name_sz;
+ char *prefixed_name = NULL;
+ const char *new_name;
+ int val;
+
+ prefixed_name_sz = get_slicespecific_var_name(osh, vars_table_accessor,
+ name, &prefixed_name);
+ if (prefixed_name_sz == 0) {
+ return BCME_NOMEM;
+ }
+
+ new_name = prefixed_name;
+ (void) new_name;
+ if (getvar(vars, new_name) == NULL) {
+ /* Try again without the slice prefix in the name */
+ new_name = name;
+ if (getvar(vars, name) == NULL) {
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ }
+
+ array_size = (uint)getintvararraysize(vars, new_name);
+ if (array_size > dest_size) {
+ err = BCME_BUFTOOSHORT;
+ ASSERT(array_size <= dest_size);
+ goto done;
+ }
+
+ /* limit the initialization to the size of the nvram array */
+ array_size = MIN(array_size, dest_size);
+
+ /* load the destination array with the nvram array values */
+ for (i = 0; i < array_size; i++) {
+ val = getintvararray(vars, new_name, i);
+ if (dest_array1) {
+ dest_array1[i] = (uint8)val;
+ } else if (dest_array2) {
+ dest_array2[i] = (int16)val;
+ }
+ }
+done:
+ MFREE(osh, prefixed_name, prefixed_name_sz);
+ return (err < 0) ? err : (int)array_size;
+}
+
+int
+BCMATTACHFN(get_uint8_vararray_slicespecific)(osl_t *osh, char *vars, char *vars_table_accessor,
+ const char* name, uint8* dest_array, uint dest_size)
+{
+ int ret;
+
+ ret = getintvararray_slicespecific(osh, vars, vars_table_accessor,
+ name, dest_array, NULL, dest_size);
+ return ret;
+}
+
+int
+BCMATTACHFN(get_int16_vararray_slicespecific)(osl_t *osh, char *vars, char *vars_table_accessor,
+ const char* name, int16* dest_array, uint dest_size)
+{
+ return getintvararray_slicespecific(osh, vars, vars_table_accessor,
+ name, NULL, dest_array, dest_size);
+}
+
+/* Prepend a slice-specific accessor to an nvram string name.
+ * Sets name_out to the allocated string. Returns the allocated size of the name string.
+ * Caller is responsible for freeing the resulting name string with MFREE.
+ */
+uint
+BCMATTACHFN(get_slicespecific_var_name)(osl_t *osh, char *vars_table_accessor, const char *name,
+ char **name_out)
+{
+ char *name_with_prefix = NULL;
+ uint sz;
+ uint max_copy_size;
+
+ sz = strlen(name) + strlen(vars_table_accessor) + 1;
+ name_with_prefix = (char *) MALLOC_NOPERSIST(osh, sz);
+ if (name_with_prefix == NULL) {
+ sz = 0;
+ goto end;
+ }
+ name_with_prefix[0] = 0;
+ name_with_prefix[sz - 1] = 0;
+ max_copy_size = sz - 1;
+
+ /* if accessor contains a "slice/N/" string */
+ if (vars_table_accessor[0] != 0) {
+ /* prepend accessor to the vars-name */
+ bcmstrncat(name_with_prefix, vars_table_accessor, max_copy_size);
+ max_copy_size -= strlen(name_with_prefix);
+ }
+
+ /* Append vars-name */
+ bcmstrncat(name_with_prefix, name, max_copy_size);
+end:
+ *name_out = name_with_prefix;
+ return sz;
+}
+#endif /* WL_FWSIGN */
+
+/* Search for token in comma separated token-string */
+static int
+findmatch(const char *string, const char *name)
+{
+ uint len;
+ char *c;
+
+ len = strlen(name);
+ while ((c = strchr(string, ',')) != NULL) {
+ if (len == (uint)(c - string) && !strncmp(string, name, len))
+ return 1;
+ string = c + 1;
+ }
+
+ return (!strcmp(string, name));
+}
+
+/* Return gpio pin number assigned to the named pin
+ *
+ * Variable should be in format:
+ *
+ * gpio<N>=pin_name,pin_name
+ *
+ * This format allows multiple features to share the gpio with mutual
+ * understanding.
+ *
+ * 'def_pin' is returned if a specific gpio is not defined for the requested functionality
+ * and if def_pin is not used by others.
+ */
+uint
+getgpiopin(char *vars, char *pin_name, uint def_pin)
+{
+ char name[] = "gpioXXXX";
+ char *val;
+ uint pin;
+
+ /* Go thru all possibilities till a match in pin name */
+ for (pin = 0; pin < GPIO_NUMPINS; pin ++) {
+ snprintf(name, sizeof(name), "gpio%d", pin);
+ val = getvar(vars, name);
+ if (val && findmatch(val, pin_name))
+ return pin;
+ }
+
+ if (def_pin != GPIO_PIN_NOTDEFINED) {
+ /* make sure the default pin is not used by someone else */
+ snprintf(name, sizeof(name), "gpio%d", def_pin);
+ if (getvar(vars, name)) {
+ def_pin = GPIO_PIN_NOTDEFINED;
+ }
+ }
+ return def_pin;
+}
+#endif /* !BCMDONGLEHOST */
+
+/* return total length of buffer chain. In case of CSO, submsdu may have extra tsohdr and if
+ * pktotlen should not include submsdu tso header, use the API pkttotlen_no_sfhtoe_hdr.
+ */
+uint
+BCMFASTPATH(pkttotlen)(osl_t *osh, void *p)
+{
+ uint total = 0;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ total += PKTLEN(osh, p);
+
+ if (BCMLFRAG_ENAB() && PKTISFRAG(osh, p)) {
+ total += PKTFRAGTOTLEN(osh, p);
+ }
+ }
+
+ return (total);
+}
+
+#ifdef WLCSO
+/* return total length of buffer chain, but exclude tso hdr of submsdu if its added */
+uint
+BCMFASTPATH(pkttotlen_no_sfhtoe_hdr)(osl_t *osh, void *p, uint toe_hdr_len)
+{
+ uint total = 0;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ total += PKTLEN(osh, p);
+
+ /* exclude toe_hdr_len if its part of PKTLEN() */
+ if (PKTISSUBMSDUTOEHDR(osh, p)) {
+ total -= toe_hdr_len;
+ }
+
+ if (BCMLFRAG_ENAB() && PKTISFRAG(osh, p)) {
+ total += PKTFRAGTOTLEN(osh, p);
+ }
+ }
+
+ return (total);
+}
+#endif /* WLCSO */
+
+/* return total length of buffer chain */
+uint
+BCMFASTPATH(pkttotcnt)(osl_t *osh, void *p)
+{
+ uint cnt = 0;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ cnt++;
+ }
+
+ return (cnt);
+}
+
+/* return the last buffer of chained pkt */
+void *
+BCMFASTPATH(pktlast)(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint
+BCMFASTPATH(pktsegcnt)(osl_t *osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+ if (PKTLEN(osh, p)) {
+ cnt++;
+ }
+#ifdef BCMLFRAG
+ if (BCMLFRAG_ENAB() && PKTISFRAG(osh, p)) {
+ cnt += PKTFRAGTOTNUM(osh, p);
+ }
+#endif /* BCMLFRAG */
+ }
+
+ return cnt;
+}
+
+#ifdef DONGLEBUILD
+/**
+ * Takes in a lbuf/lfrag and no of bytes to be trimmed from tail.
+ * trim bytes could be spread out in below 3 formats
+ * 1. entirely in dongle
+ * 2. entirely in host
+ * 3. split between host-dongle
+ */
+void
+BCMFASTPATH(pktfrag_trim_tailbytes)(osl_t * osh, void* p, uint16 trim_len, uint8 type)
+{
+ uint16 tcmseg_len = PKTLEN(osh, p); /* TCM segment length */
+ uint16 hostseg_len = PKTFRAGUSEDLEN(osh, p); /* HOST segment length */
+
+ /* return if zero trim length- Nothing to do */
+ if (trim_len == 0)
+ return;
+
+ /* if header conv is on, there is no fcs at the end */
+ /* JIRA:SW4349-318 */
+ if (PKTISHDRCONVTD(osh, p))
+ return;
+
+ /* if pktfetched, then its already trimmed */
+ if (PKTISPKTFETCHED(osh, p))
+ return;
+
+ if (PKTFRAGUSEDLEN(osh, p) >= trim_len) {
+ /* TRIM bytes entirely in host */
+ ASSERT_FP(PKTISRXFRAG(osh, p));
+
+ PKTSETFRAGUSEDLEN(osh, p, (hostseg_len - trim_len));
+ } else {
+ /* trim bytes either in dongle or split between dongle-host */
+ PKTSETLEN(osh, p, (tcmseg_len - (trim_len - hostseg_len)));
+
+ /* No more contents in host; reset length to zero */
+ if (PKTFRAGUSEDLEN(osh, p))
+ PKTSETFRAGUSEDLEN(osh, p, 0);
+ }
+}
+#endif /* DONGLEBUILD */
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, uint len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN(PKTLEN(osh, p) - offset, len);
+ bcopy(PKTDATA(osh, p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, uint len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN(PKTLEN(osh, p) - offset, len);
+ bcopy(buf, PKTDATA(osh, p) + offset, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+#ifdef NOT_YET
+/* copy data from one pkt buffer (chain) to another */
+uint
+pkt2pktcopy(osl_t *osh, void *p1, uint offs1, void *p2, uint offs2, uint maxlen)
+{
+ uint8 *dp1, *dp2;
+ uint len1, len2, copylen, totallen;
+
+ for (; p1 && offs; p1 = PKTNEXT(osh, p1)) {
+ if (offs1 < (uint)PKTLEN(osh, p1))
+ break;
+ offs1 -= PKTLEN(osh, p1);
+ }
+ for (; p2 && offs; p2 = PKTNEXT(osh, p2)) {
+ if (offs2 < (uint)PKTLEN(osh, p2))
+ break;
+ offs2 -= PKTLEN(osh, p2);
+ }
+
+ /* Heck w/it, only need the above for now */
+}
+#endif /* NOT_YET */
+
+uint8 *
+BCMFASTPATH(pktdataoffset)(osl_t *osh, void *p, uint offset)
+{
+ uint total = pkttotlen(osh, p);
+ uint pkt_off = 0, len = 0;
+ uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+ if (offset > total)
+ return NULL;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ pdata = (uint8 *) PKTDATA(osh, p);
+ pkt_off = offset - len;
+ len += PKTLEN(osh, p);
+ if (len > offset)
+ break;
+ }
+ return (uint8*) (pdata+pkt_off);
+}
+
+/* given a offset in pdata, find the pkt seg hdr */
+void *
+pktoffset(osl_t *osh, void *p, uint offset)
+{
+ uint total = pkttotlen(osh, p);
+ uint len = 0;
+
+ if (offset > total)
+ return NULL;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ len += PKTLEN(osh, p);
+ if (len > offset)
+ break;
+ }
+ return p;
+}
+
+void
+bcm_mdelay(uint ms)
+{
+ uint i;
+
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
+}
+
+#if defined(BCMPERFSTATS) || defined(BCMTSTAMPEDLOGS)
+
+#if defined(__ARM_ARCH_7R__)
+#define BCMLOG_CYCLE_OVERHEAD 54 /* Number of CPU cycle overhead due to bcmlog().
+ * This is to compensate CPU cycle incurred by
+ * added bcmlog() function call for profiling.
+ */
+#else
+#define BCMLOG_CYCLE_OVERHEAD 0
+#endif
+
+#define LOGSIZE 256 /* should be power of 2 to avoid div below */
+static struct {
+ uint cycles;
+ const char *fmt;
+ uint a1;
+ uint a2;
+ uchar indent; /* track indent level for nice printing */
+} logtab[LOGSIZE];
+
+/* last entry logged */
+static uint logi = 0;
+/* next entry to read */
+static uint volatile readi = 0;
+#endif /* defined(BCMPERFSTATS) || defined(BCMTSTAMPEDLOGS) */
+
+#ifdef BCMPERFSTATS
+/* TODO: make the utility configurable (choose between icache, dcache, hits, misses ...) */
+void
+bcm_perf_enable()
+{
+ BCMPERF_ENABLE_INSTRCOUNT();
+ BCMPERF_ENABLE_ICACHE_MISS();
+ BCMPERF_ENABLE_ICACHE_HIT();
+}
+
+/* WARNING: This routine uses OSL_GETCYCLES(), which can give unexpected results on
+ * modern speed stepping CPUs. Use bcmtslog() instead in combination with TSF counter.
+ */
+void
+bcmlog(char *fmt, uint a1, uint a2)
+{
+ static uint last = 0;
+ uint cycles, i, elapsed;
+ OSL_GETCYCLES(cycles);
+
+ i = logi;
+
+ elapsed = cycles - last;
+ if (elapsed > BCMLOG_CYCLE_OVERHEAD)
+ logtab[i].cycles = elapsed - BCMLOG_CYCLE_OVERHEAD;
+ else
+ logtab[i].cycles = 0;
+ logtab[i].fmt = fmt;
+ logtab[i].a1 = a1;
+ logtab[i].a2 = a2;
+
+ logi = (i + 1) % LOGSIZE;
+ last = cycles;
+
+ /* if log buffer is overflowing, readi should be advanced.
+ * Otherwise logi and readi will become out of sync.
+ */
+ if (logi == readi) {
+ readi = (readi + 1) % LOGSIZE;
+ } else {
+ /* This redundant else is to make CPU cycles of bcmlog() function to be uniform,
+ * so that the cycle compensation with BCMLOG_CYCLE_OVERHEAD is more accurate.
+ */
+ readi = readi % LOGSIZE;
+ }
+}
+
+/* Same as bcmlog but specializes the use of a1 and a2 to
+ * store icache misses and instruction count.
+ * TODO : make this use a configuration array to decide what counter to read.
+ * We are limited to 2 numbers but it seems it is the most we can get anyway
+ * since dcache and icache cannot be enabled at the same time. Recording
+ * both the hits and misses at the same time for a given cache is not that useful either.
+*/
+
+void
+bcmstats(char *fmt)
+{
+ static uint last = 0;
+ static uint32 ic_miss = 0;
+ static uint32 instr_count = 0;
+ uint32 ic_miss_cur;
+ uint32 instr_count_cur;
+ uint cycles, i;
+
+ OSL_GETCYCLES(cycles);
+ BCMPERF_GETICACHE_MISS(ic_miss_cur);
+ BCMPERF_GETINSTRCOUNT(instr_count_cur);
+
+ i = logi;
+
+ logtab[i].cycles = cycles - last;
+ logtab[i].a1 = ic_miss_cur - ic_miss;
+ logtab[i].a2 = instr_count_cur - instr_count;
+ logtab[i].fmt = fmt;
+
+ logi = (i + 1) % LOGSIZE;
+
+ last = cycles;
+ instr_count = instr_count_cur;
+ ic_miss = ic_miss_cur;
+
+ /* if log buffer is overflowing, readi should be advanced.
+ * Otherwise logi and readi will become out of sync.
+ */
+ if (logi == readi) {
+ readi = (readi + 1) % LOGSIZE;
+ } else {
+ /* This redundant else is to make CPU cycles of bcmstats() function to be uniform
+ */
+ readi = readi % LOGSIZE;
+ }
+}
+
+/*
+ * TODO (linux version): a "proc" version where the log would be dumped
+ * on the proc file directly.
+ */
+
+void
+bcmdumplog(char *buf, int size)
+{
+ char *limit;
+ int j = 0;
+ int num;
+
+ limit = buf + size - 80;
+ *buf = '\0';
+
+ num = logi - readi;
+
+ if (num < 0)
+ num += LOGSIZE;
+
+ /* print in chronological order */
+
+ for (j = 0; j < num && (buf < limit); readi = (readi + 1) % LOGSIZE, j++) {
+ if (logtab[readi].fmt == NULL)
+ continue;
+ buf += snprintf(buf, (limit - buf), "%d\t", logtab[readi].cycles);
+ buf += snprintf(buf, (limit - buf), logtab[readi].fmt, logtab[readi].a1,
+ logtab[readi].a2);
+ buf += snprintf(buf, (limit - buf), "\n");
+ }
+
+}
+
+/*
+ * Dump one log entry at a time.
+ * Return index of next entry or -1 when no more .
+ */
+int
+bcmdumplogent(char *buf, uint i)
+{
+ bool hit;
+
+ /*
+ * If buf is NULL, return the starting index,
+ * interpreting i as the indicator of last 'i' entries to dump.
+ */
+ if (buf == NULL) {
+ i = ((i > 0) && (i < (LOGSIZE - 1))) ? i : (LOGSIZE - 1);
+ return ((logi - i) % LOGSIZE);
+ }
+
+ *buf = '\0';
+
+ ASSERT(i < LOGSIZE);
+
+ if (i == logi)
+ return (-1);
+
+ hit = FALSE;
+ for (; (i != logi) && !hit; i = (i + 1) % LOGSIZE) {
+ if (logtab[i].fmt == NULL)
+ continue;
+ buf += snprintf(buf, LOGSIZE, "%d: %d\t", i, logtab[i].cycles);
+ buf += snprintf(buf, LOGSIZE, logtab[i].fmt, logtab[i].a1, logtab[i].a2);
+ buf += snprintf(buf, LOGSIZE, "\n");
+ hit = TRUE;
+ }
+
+ return (i);
+}
+
+#endif /* BCMPERFSTATS */
+
+#if defined(BCMTSTAMPEDLOGS)
+/* Store a TSF timestamp and a log line in the log buffer */
+/*
+ a1 is used to signify entering/exiting a routine. When entering
+ the indent level is increased. When exiting, the delta since entering
+ is printed and the indent level is bumped back out.
+ Nesting can go up to level MAX_TS_INDENTS deep.
+*/
+#define MAX_TS_INDENTS 20
+void
+bcmtslog(uint32 tstamp, const char *fmt, uint a1, uint a2)
+{
+ uint i = logi;
+ bool use_delta = TRUE;
+ static uint32 last = 0; /* used only when use_delta is true */
+ static uchar indent = 0;
+ static uint32 indents[MAX_TS_INDENTS];
+
+ logtab[i].cycles = tstamp;
+ if (use_delta)
+ logtab[i].cycles -= last;
+
+ logtab[i].a2 = a2;
+
+ if (a1 == TS_EXIT && indent) {
+ indent--;
+ logtab[i].a2 = tstamp - indents[indent];
+ }
+
+ logtab[i].fmt = fmt;
+ logtab[i].a1 = a1;
+ logtab[i].indent = indent;
+
+ if (a1 == TS_ENTER) {
+ indents[indent] = tstamp;
+ if (indent < MAX_TS_INDENTS - 1)
+ indent++;
+ }
+
+ if (use_delta)
+ last = tstamp;
+ logi = (i + 1) % LOGSIZE;
+}
+
+/* Print out a microsecond timestamp as "sec.ms.us " */
+void
+bcmprinttstamp(uint32 ticks)
+{
+ uint us, ms, sec;
+
+ us = (ticks % TSF_TICKS_PER_MS) * 1000 / TSF_TICKS_PER_MS;
+ ms = ticks / TSF_TICKS_PER_MS;
+ sec = ms / 1000;
+ ms -= sec * 1000;
+ printf("%04u.%03u.%03u ", sec, ms, us);
+}
+
+/* Print out the log buffer with timestamps */
+void
+bcmprinttslogs(void)
+{
+ int j = 0;
+ int num;
+
+ num = logi - readi;
+ if (num < 0)
+ num += LOGSIZE;
+
+ /* Format and print the log entries directly in chronological order */
+ for (j = 0; j < num; readi = (readi + 1) % LOGSIZE, j++) {
+ if (logtab[readi].fmt == NULL)
+ continue;
+ bcmprinttstamp(logtab[readi].cycles);
+ printf(logtab[readi].fmt, logtab[readi].a1, logtab[readi].a2);
+ printf("\n");
+ }
+}
+
+/*
+ Identical to bcmdumplog, but output is based on tsf instead of cycles.
+
+ a1 is used to signify entering/exiting a routine. When entering
+ the indent level is increased. When exiting, the delta since entering
+ is printed and the indent level is bumped back out.
+*/
+void
+bcmdumptslog(struct bcmstrbuf *b)
+{
+ char *limit;
+ int j = 0;
+ int num;
+ uint us, ms, sec;
+ int skip;
+ char *lines = "| | | | | | | | | | | | | | | | | | | |";
+
+ limit = BCMSTRBUF_BUF(b) + BCMSTRBUF_LEN(b) - 80;
+
+ num = logi - readi;
+
+ if (num < 0)
+ num += LOGSIZE;
+
+ /* print in chronological order */
+ for (j = 0; j < num && (BCMSTRBUF_BUF(b) < limit); readi = (readi + 1) % LOGSIZE, j++) {
+ char *last_buf = BCMSTRBUF_BUF(b);
+ if (logtab[readi].fmt == NULL)
+ continue;
+
+ us = (logtab[readi].cycles % TSF_TICKS_PER_MS) * 1000 / TSF_TICKS_PER_MS;
+ ms = logtab[readi].cycles / TSF_TICKS_PER_MS;
+ sec = ms / 1000;
+ ms -= sec * 1000;
+
+ bcm_bprintf(b, "%04u.%03u.%03u ", sec, ms, us);
+
+ /* 2 spaces for each indent level */
+ bcm_bprintf(b, "%.*s", logtab[readi].indent * 2, lines);
+
+ /*
+ * The following call to snprintf generates a compiler warning
+ * due to -Wformat-security. However, the format string is coming
+ * from internal callers rather than external data input, and is a
+ * useful debugging tool serving a variety of diagnostics. Rather
+ * than expand code size by replicating multiple functions with different
+ * argument lists, or disabling the warning globally, let's consider
+ * if we can just disable the warning for this one instance.
+ */
+ bcm_bprintf(b, logtab[readi].fmt);
+
+ /* If a1 is ENTER or EXIT, print the + or - */
+ skip = 0;
+ if (logtab[readi].a1 == TS_ENTER) {
+ bcm_bprintf(b, " +");
+ skip++;
+ }
+ if (logtab[readi].a1 == TS_EXIT) {
+ bcm_bprintf(b, " -");
+ skip++;
+ }
+
+ /* else print the real a1 */
+ if (logtab[readi].a1 && !skip)
+ bcm_bprintf(b, " %d", logtab[readi].a1);
+
+ /*
+ If exiting routine, print a nicely formatted delta since entering.
+ Otherwise, just print a2 normally.
+ */
+ if (logtab[readi].a2) {
+ if (logtab[readi].a1 == TS_EXIT) {
+ int num_space = 75 - (BCMSTRBUF_BUF(b) - last_buf);
+ bcm_bprintf(b, "%*.s", num_space, "");
+ bcm_bprintf(b, "%5d usecs", logtab[readi].a2);
+ } else
+ bcm_bprintf(b, " %d", logtab[readi].a2);
+ }
+ bcm_bprintf(b, "\n");
+ last_buf = BCMSTRBUF_BUF(b);
+ }
+}
+
+#endif /* BCMTSTAMPEDLOGS */
+
+#if defined(BCMDBG) || defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+ void *p;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ for (p = p0; p; p = PKTNEXT(osh, p))
+ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif /* BCMDBG || DHD_DEBUG */
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint
+BCMFASTPATH(pktsetprio)(void *pkt, bool update_vtag)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+ int rc = 0;
+
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ ASSERT_FP(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+ eh = (struct ether_header *) pktdata;
+
+ if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
+
+ evh = (struct ethervlan_header *)eh;
+
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+ if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ if (dscp_prio != 0) {
+ priority = dscp_prio;
+ rc |= PKTPRIO_VDSCP;
+ } else {
+ priority = vlan_prio;
+ rc |= PKTPRIO_VLAN;
+ }
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ rc |= PKTPRIO_UPD;
+ }
+#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
+ } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ priority = PRIO_8021D_NC;
+ rc = PKTPRIO_DSCP;
+#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
+#if defined(WLTDLS)
+ } else if (eh->ether_type == hton16(ETHER_TYPE_89_0D)) {
+ /* Bump up the priority for TDLS frames */
+ priority = PRIO_8021D_VI;
+ rc = PKTPRIO_DSCP;
+#endif /* WLTDLS */
+ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+ switch (dscp) {
+ case DSCP_EF:
+ case DSCP_VA:
+ priority = PRIO_8021D_VO;
+ break;
+ case DSCP_AF31:
+ case DSCP_AF32:
+ case DSCP_AF33:
+ case DSCP_CS3:
+ priority = PRIO_8021D_CL;
+ break;
+ case DSCP_AF21:
+ case DSCP_AF22:
+ case DSCP_AF23:
+ priority = PRIO_8021D_EE;
+ break;
+ case DSCP_AF11:
+ case DSCP_AF12:
+ case DSCP_AF13:
+ case DSCP_CS2:
+ priority = PRIO_8021D_BE;
+ break;
+ case DSCP_CS6:
+ case DSCP_CS7:
+ priority = PRIO_8021D_NC;
+ break;
+ default:
+#ifndef CUSTOM_DSCP_TO_PRIO_MAPPING
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+#else
+ if (dhd_dscpmap_enable) {
+ priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT)
+ & CUST_IPV4_TOS_PREC_MASK)];
+ }
+ else {
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
+ break;
+ }
+
+ rc |= PKTPRIO_DSCP;
+ }
+
+ ASSERT_FP(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+ return (rc | priority);
+}
+
+/* lookup user priority for specified DSCP */
+static uint8
+dscp2up(uint8 *up_table, uint8 dscp)
+{
+ uint8 user_priority = 255;
+
+ /* lookup up from table if parameters valid */
+ if (up_table != NULL && dscp < UP_TABLE_MAX) {
+ user_priority = up_table[dscp];
+ }
+
+ /* 255 is unused value so return up from dscp */
+ if (user_priority == 255) {
+ user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT);
+ }
+
+ return user_priority;
+}
+
+/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */
+uint
+BCMFASTPATH(pktsetprio_qms)(void *pkt, uint8* up_table, bool update_vtag)
+{
+ if (up_table) {
+ uint8 *pktdata;
+ uint pktlen;
+ uint8 dscp;
+ uint user_priority = 0;
+ uint rc = 0;
+
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ pktlen = PKTLEN(OSH_NULL, pkt);
+ if (pktgetdscp(pktdata, pktlen, &dscp)) {
+ rc = PKTPRIO_DSCP;
+ user_priority = dscp2up(up_table, dscp);
+ PKTSETPRIO(pkt, user_priority);
+ }
+
+ return (rc | user_priority);
+ } else {
+ return pktsetprio(pkt, update_vtag);
+ }
+}
+
+/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
+ */
+bool
+BCMFASTPATH(pktgetdscp)(uint8 *pktdata, uint pktlen, uint8 *dscp)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *ip_body;
+ bool rc = FALSE;
+
+ /* minimum length is ether header and IP header */
+ if (pktlen < (sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)) {
+ return FALSE;
+ }
+
+ eh = (struct ether_header *) pktdata;
+
+ if ((eh->ether_type == HTON16(ETHER_TYPE_IP)) ||
+ (eh->ether_type == HTON16(ETHER_TYPE_IPV6))) {
+ ip_body = pktdata + sizeof(struct ether_header);
+ *dscp = IP_DSCP46(ip_body);
+ rc = TRUE;
+ }
+ else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
+ evh = (struct ethervlan_header *)eh;
+
+ /* minimum length is ethervlan header and IP header */
+ if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
+ evh->ether_type == HTON16(ETHER_TYPE_IP)) {
+ ip_body = pktdata + sizeof(struct ethervlan_header);
+ *dscp = IP_DSCP46(ip_body);
+ rc = TRUE;
+ }
+ }
+
+ return rc;
+}
+
+/* usr_prio range from low to high with usr_prio value */
+static bool
+up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high)
+{
+ int i;
+
+ if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) {
+ return FALSE;
+ }
+
+ for (i = low; i <= high; i++) {
+ up_table[i] = usr_prio;
+ }
+
+ return TRUE;
+}
+
+/* set user priority table */
+int
+BCMFASTPATH(wl_set_up_table)(uint8 *up_table, bcm_tlv_t *qos_map_ie)
+{
+ uint8 len;
+
+ if (up_table == NULL || qos_map_ie == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* clear table to check table was set or not */
+ memset(up_table, 0xff, UP_TABLE_MAX);
+
+ /* length of QoS Map IE must be 16+n*2, n is number of exceptions */
+ if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID &&
+ (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH &&
+ (len % 2) == 0) {
+ uint8 *except_ptr = (uint8 *)qos_map_ie->data;
+ uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
+ uint8 *range_ptr = except_ptr + except_len;
+ int i;
+
+ /* fill in ranges */
+ for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
+ uint8 low = range_ptr[i];
+ uint8 high = range_ptr[i + 1];
+ if (low == 255 && high == 255) {
+ continue;
+ }
+
+ if (!up_table_set(up_table, i / 2, low, high)) {
+ /* clear the table on failure */
+ memset(up_table, 0xff, UP_TABLE_MAX);
+ return BCME_ERROR;
+ }
+ }
+
+ /* update exceptions */
+ for (i = 0; i < except_len; i += 2) {
+ uint8 dscp = except_ptr[i];
+ uint8 usr_prio = except_ptr[i+1];
+
+ /* exceptions with invalid dscp/usr_prio are ignored */
+ up_table_set(up_table, usr_prio, dscp, dscp);
+ }
+ }
+
+ return BCME_OK;
+}
+
+#ifndef BCM_BOOTLOADER
+/* The 0.5KB string table is not removed by compiler even though it's unused */
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings */
+/* BCMRAMFN for BCME_LAST usage */
+const char *
+BCMRAMFN(bcmerrorstr)(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
+}
+
+#endif /* !BCM_BOOTLOADER */
+
+#ifdef BCMDBG_PKT /* pkt logging for debugging */
+/* Add a packet to the pktlist */
+static void
+_pktlist_add(pktlist_info_t *pktlist, void *pkt, int line, char *file)
+{
+ uint16 i;
+ char *basename;
+#ifdef BCMDBG_PTRACE
+ uint16 *idx = PKTLIST_IDX(pkt);
+#endif /* BCMDBG_PTRACE */
+
+ ASSERT(pktlist->count < PKTLIST_SIZE);
+
+ /* Verify the packet is not already part of the list */
+ for (i = 0; i < pktlist->count; i++) {
+ if (pktlist->list[i].pkt == pkt)
+ ASSERT(0);
+ }
+ pktlist->list[pktlist->count].pkt = pkt;
+ pktlist->list[pktlist->count].line = line;
+
+ basename = strrchr(file, '/');
+ if (basename)
+ basename++;
+ else
+ basename = file;
+ pktlist->list[pktlist->count].file = basename;
+#ifdef BCMDBG_PTRACE
+ *idx = pktlist->count;
+ bzero(pktlist->list[pktlist->count].pkt_trace, PKTTRACE_MAX_BYTES);
+#endif /* BCMDBG_PTRACE */
+ pktlist->count++;
+
+ return;
+}
+
+void
+pktlist_add(pktlist_info_t *pktlist, void *pkt, int line, char *file)
+{
+ void *p;
+ for (p = pkt; p != NULL; p = PKTCLINK(p))
+ _pktlist_add(pktlist, p, line, file);
+}
+
+/* Remove a packet from the pktlist */
+static void
+_pktlist_remove(pktlist_info_t *pktlist, void *pkt)
+{
+ uint16 i;
+ uint16 num = pktlist->count;
+#ifdef BCMDBG_PTRACE
+ uint16 *idx = PKTLIST_IDX(pkt);
+
+ ASSERT((*idx) < pktlist->count);
+#endif /* BCMDBG_PTRACE */
+
+ /* find the index where pkt exists */
+ for (i = 0; i < num; i++) {
+ /* check for the existence of pkt in the list */
+ if (pktlist->list[i].pkt == pkt) {
+#ifdef BCMDBG_PTRACE
+ ASSERT((*idx) == i);
+#endif /* BCMDBG_PTRACE */
+ /* replace with the last element */
+ pktlist->list[i].pkt = pktlist->list[num-1].pkt;
+ pktlist->list[i].line = pktlist->list[num-1].line;
+ pktlist->list[i].file = pktlist->list[num-1].file;
+#ifdef BCMDBG_PTRACE
+ memcpy(pktlist->list[i].pkt_trace, pktlist->list[num-1].pkt_trace,
+ PKTTRACE_MAX_BYTES);
+ idx = PKTLIST_IDX(pktlist->list[i].pkt);
+ *idx = i;
+#endif /* BCMDBG_PTRACE */
+ pktlist->count--;
+ return;
+ }
+ }
+ ASSERT(0);
+}
+
+void
+pktlist_remove(pktlist_info_t *pktlist, void *pkt)
+{
+ void *p;
+ for (p = pkt; p != NULL; p = PKTCLINK(p))
+ _pktlist_remove(pktlist, p);
+}
+
+#ifdef BCMDBG_PTRACE
+static void
+_pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit)
+{
+ uint16 *idx = PKTLIST_IDX(pkt);
+
+ ASSERT(((*idx) < pktlist->count) && (bit < PKTTRACE_MAX_BITS));
+ ASSERT(pktlist->list[(*idx)].pkt == pkt);
+
+ pktlist->list[(*idx)].pkt_trace[bit/NBBY] |= (1 << ((bit)%NBBY));
+
+}
+void
+pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit)
+{
+ void *p;
+ for (p = pkt; p != NULL; p = PKTCLINK(p))
+ _pktlist_trace(pktlist, p, bit);
+}
+#endif /* BCMDBG_PTRACE */
+
+/* Dump the pktlist (and the contents of each packet if 'data'
+ * is set). 'buf' should be large enough
+ */
+
+char *
+pktlist_dump(pktlist_info_t *pktlist, char *buf)
+{
+ char *obuf = buf;
+ uint16 i;
+
+ if (buf != NULL)
+ buf += sprintf(buf, "Packet list dump:\n");
+ else
+ printf("Packet list dump:\n");
+
+ for (i = 0; i < (pktlist->count); i++) {
+ if (buf != NULL)
+ buf += sprintf(buf, "Pkt_addr: 0x%p Line: %d File: %s\t",
+ OSL_OBFUSCATE_BUF(pktlist->list[i].pkt), pktlist->list[i].line,
+ pktlist->list[i].file);
+ else
+ printf("Pkt_addr: 0x%p Line: %d File: %s\t",
+ OSL_OBFUSCATE_BUF(pktlist->list[i].pkt),
+ pktlist->list[i].line, pktlist->list[i].file);
+
+/* #ifdef NOTDEF Remove this ifdef to print pkttag and pktdata */
+ if (buf != NULL) {
+ if (PKTTAG(pktlist->list[i].pkt)) {
+ /* Print pkttag */
+ buf += sprintf(buf, "Pkttag(in hex): ");
+ buf += bcm_format_hex(buf, PKTTAG(pktlist->list[i].pkt),
+ OSL_PKTTAG_SZ);
+ }
+ buf += sprintf(buf, "Pktdata(in hex): ");
+ buf += bcm_format_hex(buf, PKTDATA(OSH_NULL, pktlist->list[i].pkt),
+ PKTLEN(OSH_NULL, pktlist->list[i].pkt));
+ } else {
+ void *pkt = pktlist->list[i].pkt, *npkt;
+
+ printf("Pkt[%d] Dump:\n", i);
+ while (pkt) {
+ int hroom;
+ uint pktlen;
+ uchar *src;
+#ifdef BCMDBG_PTRACE
+ uint16 *idx = PKTLIST_IDX(pkt);
+
+ ASSERT((*idx) < pktlist->count);
+ prhex("Pkt Trace (in hex):", pktlist->list[(*idx)].pkt_trace,
+ PKTTRACE_MAX_BYTES);
+#endif /* BCMDBG_PTRACE */
+ npkt = (void *)PKTNEXT(OSH_NULL, pkt);
+ PKTSETNEXT(OSH_NULL, pkt, NULL);
+
+ src = (uchar *)(PKTTAG(pkt));
+ pktlen = PKTLEN(OSH_NULL, pkt);
+ hroom = PKTHEADROOM(OSH_NULL, pkt);
+
+ printf("Pkttag_addr: %p\n", OSL_OBFUSCATE_BUF(src));
+ if (src)
+ prhex("Pkttag(in hex): ", src, OSL_PKTTAG_SZ);
+ src = (uchar *) (PKTDATA(OSH_NULL, pkt));
+ printf("Pkthead_addr: %p len: %d\n",
+ OSL_OBFUSCATE_BUF(src - hroom), hroom);
+ prhex("Pkt headroom content(in hex): ", src - hroom, hroom);
+ printf("Pktdata_addr: %p len: %d\n",
+ OSL_OBFUSCATE_BUF(src), pktlen);
+ prhex("Pktdata(in hex): ", src, pktlen);
+
+ pkt = npkt;
+ }
+ }
+/* #endif NOTDEF */
+
+ if (buf != NULL)
+ buf += sprintf(buf, "\n");
+ else
+ printf("\n");
+ }
+ return obuf;
+}
+#endif /* BCMDBG_PKT */
+
+/* iovar table lookup */
+/* could mandate sorted tables and do a binary search */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
+
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
+
+ ASSERT(table != NULL);
+
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
+ }
+ /* ran to end of table */
+
+ return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, uint len, bool set)
+{
+ int bcmerror = 0;
+ BCM_REFERENCE(arg);
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ * non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#if defined(DONGLEBUILD)
+#define BCM_MWBMAP_USE_CNTSETBITS /* runtime count set bits */
+#if defined(PCIEDEV_HOST_PKTID_AUDIT_ENABLED)
+#define BCM_MWBMAP_ITEMS_MAX (38 * 1024)
+#else /* ! PCIEDEV_HOST_PKTID_AUDIT_ENABLED */
+#define BCM_MWBMAP_ITEMS_MAX (7 * 1024)
+#endif /* PCIEDEV_HOST_PKTID_AUDIT_ENABLED */
+#else /* ! DONGLEBUILD */
+#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */
+#endif /* DONGLEBUILD */
+
+#define BCM_MWBMAP_BITS_WORD (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP (5)
+#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP)
+
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr))
+
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+ do { \
+ ASSERT((mwb != NULL) && \
+ (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+ bcm_mwbmap_audit(mwb); \
+ } while (0)
+#define MWBMAP_ASSERT(exp) ASSERT(exp)
+#define MWBMAP_DBG(x) printf x
+#else /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb) do {} while (0)
+#define MWBMAP_ASSERT(exp) do {} while (0)
+#define MWBMAP_DBG(x)
+#endif /* !BCM_MWBMAP_DEBUG */
+
+typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */
+ uint16 wmaps; /* Total number of words in free wd bitmap */
+ uint16 imaps; /* Total number of words in free id bitmap */
+ int32 ifree; /* Count of free indices. Used only in audits */
+ uint16 total; /* Total indices managed by multiword bitmap */
+
+ void * magic; /* Audit handle parameter from user */
+
+ uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+
+ uint32 id_bitmap[0]; /* Second level bitmap */
+} bcm_mwbmap_t;
+
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+BCMATTACHFN(bcm_mwbmap_init)(osl_t *osh, uint32 items_max)
+{
+ struct bcm_mwbmap * mwbmap_p;
+ uint32 wordix, size, words, extra;
+
+ /* Implementation Constraint: Uses 32bit word bitmap */
+ MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+ MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+ MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+ MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+
+ ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+
+ /* Determine the number of words needed in the multiword bitmap */
+ extra = BCM_MWBMAP_MODOP(items_max);
+ words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+
+ /* Allocate runtime state of multiword bitmap */
+ /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+ size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+ mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+ if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+ ASSERT(0);
+ goto error1;
+ }
+ memset(mwbmap_p, 0, size);
+
+ /* Initialize runtime multiword bitmap state */
+ mwbmap_p->imaps = (uint16)words;
+ mwbmap_p->ifree = (int32)items_max;
+ mwbmap_p->total = (uint16)items_max;
+
+ /* Setup magic, for use in audit of handle */
+ mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+
+ /* Setup the second level bitmap of free indices */
+ /* Mark all indices as available */
+ for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+ mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ }
+
+ /* Ensure that extra indices are tagged as un-available */
+ if (extra) { /* fixup the free ids in last bitmap and wd_count */
+ uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+ *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ }
+
+ /* Setup the first level bitmap hierarchy */
+ extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+ words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
+
+ mwbmap_p->wmaps = (uint16)words;
+
+ for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+ mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+ if (extra) {
+ uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+ *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+ }
+
+ return mwbmap_p;
+
+error1:
+ return BCM_MWBMAP_INVALID_HDL;
+}
+
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+BCMATTACHFN(bcm_mwbmap_fini)(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+ + (sizeof(uint32) * mwbmap_p->imaps));
+ return;
+}
+
+/* Allocate a unique small index using a multiword bitmap index allocator. */
+uint32
+BCMFASTPATH(bcm_mwbmap_alloc)(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ /* Start with the first hierarchy */
+ for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
+
+ bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
+
+ if (bitmap != 0U) {
+
+ uint32 count, bitix, *bitmap_p;
+
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+ /* clear all except trailing 1 */
+ if (bitmap != (1u << 31u)) {
+ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+ }
+ MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+ bcm_count_leading_zeros(bitmap));
+ bitix = (BCM_MWBMAP_BITS_WORD - 1)
+ - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ wordix = BCM_MWBMAP_MULOP(wordix) + bitix;
+
+ /* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ mwbmap_p->wd_count[wordix]--;
+ count = mwbmap_p->wd_count[wordix];
+ MWBMAP_ASSERT(count ==
+ (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ MWBMAP_ASSERT(count >= 0);
+
+ /* clear wd_bitmap bit if id_map count is 0 */
+ bitmap = (count == 0) << bitix;
+
+ MWBMAP_DBG((
+ "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
+
+ *bitmap_p ^= bitmap;
+
+ /* Use bitix in the second hierarchy */
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+ MWBMAP_ASSERT(bitmap != 0U);
+
+ /* clear all except trailing 1 */
+ if (bitmap != (1u << 31u)) {
+ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+ }
+ MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+ bcm_count_leading_zeros(bitmap));
+ bitix = BCM_MWBMAP_MULOP(wordix)
+ + (BCM_MWBMAP_BITS_WORD - 1)
+ - bcm_count_leading_zeros(bitmap); /* use asm clz */
+
+ mwbmap_p->ifree--; /* decrement system wide free count */
+ MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+ MWBMAP_DBG((
+ "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+ return bitix;
+ }
+ }
+
+ ASSERT(mwbmap_p->ifree == 0);
+
+ return BCM_MWBMAP_INVALID_IDX;
+}
+
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 count, wordix, bitmap, *bitmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(bitix < mwbmap_p->total);
+
+ /* Start with second hierarchy */
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ ASSERT((*bitmap_p & bitmap) == bitmap);
+
+ mwbmap_p->ifree--; /* update free count */
+ ASSERT(mwbmap_p->ifree >= 0);
+
+ MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p ^= bitmap; /* mark as in use */
+
+ /* Update first hierarchy */
+ bitix = wordix;
+
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ mwbmap_p->wd_count[bitix]--;
+ count = mwbmap_p->wd_count[bitix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ MWBMAP_ASSERT(count >= 0);
+
+ bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+
+ MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+ BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+ (*bitmap_p) ^ bitmap, count));
+
+ *bitmap_p ^= bitmap; /* mark as in use */
+
+ return;
+}
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void
+BCMPOSTTRAPFASTPATH(bcm_mwbmap_free)(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap, *bitmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT_FP(bitix < mwbmap_p->total);
+
+ /* Start with second level hierarchy */
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ ASSERT_FP((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */
+
+ mwbmap_p->ifree++; /* update free count */
+ ASSERT_FP(mwbmap_p->ifree <= mwbmap_p->total);
+
+ MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p |= bitmap; /* mark as available */
+
+ /* Now update first level hierarchy */
+
+ bitix = wordix;
+
+ wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+ {
+ uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[bitix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+
+ MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+ MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+ }
+#endif /* BCM_MWBMAP_DEBUG */
+
+ *bitmap_p |= bitmap;
+
+ return;
+}
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(mwbmap_p->ifree >= 0);
+
+ return mwbmap_p->ifree;
+}
+
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(bitix < mwbmap_p->total);
+
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+
+ return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+}
+
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+ uint32 ix, count;
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n",
+ OSL_OBFUSCATE_BUF((void *)mwbmap_p),
+ mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+ for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+ printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+ bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+ printf("\n");
+ }
+ for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[ix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+ bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+ printf("\n");
+ }
+
+ return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
+
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+ for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+ if ((*bitmap_p) & (1 << bitix)) {
+ idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[idmap_ix];
+ ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ ASSERT(count != 0U);
+ free_cnt += count;
+ }
+ }
+ }
+
+ ASSERT((int)free_cnt == mwbmap_p->ifree);
+}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
+
+/* Simple 16bit Id allocator using a stack implementation. */
+typedef struct id16_map {
+ uint32 failures; /* count of failures */
+ void *dbg; /* debug placeholder */
+ uint16 total; /* total number of ids managed by allocator */
+ uint16 start; /* start value of 16bit ids to be managed */
+ int stack_idx; /* index into stack of available ids */
+ uint16 stack[0]; /* stack of 16 bit ids */
+} id16_map_t;
+
+#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \
+ (sizeof(uint16) * (items)))
+
+#if defined(BCM_DBG)
+
+/* Uncomment BCM_DBG_ID16 to debug double free */
+/* #define BCM_DBG_ID16 */
+
+typedef struct id16_map_dbg {
+ uint16 total;
+ bool avail[0];
+} id16_map_dbg_t;
+#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \
+ (sizeof(bool) * (items)))
+#define ID16_MAP_MSG(x) print x
+#else
+#define ID16_MAP_MSG(x)
+#endif /* BCM_DBG */
+
+void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
+id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+{
+ uint16 idx, val16;
+ id16_map_t * id16_map;
+
+ ASSERT(total_ids > 0);
+
+ /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+ * with random values.
+ */
+ ASSERT((start_val16 == ID16_UNDEFINED) ||
+ (start_val16 + total_ids) < ID16_INVALID);
+
+ id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
+ if (id16_map == NULL) {
+ return NULL;
+ }
+
+ id16_map->total = total_ids;
+ id16_map->start = start_val16;
+ id16_map->failures = 0;
+ id16_map->dbg = NULL;
+
+ /*
+ * Populate stack with 16bit id values, commencing with start_val16.
+ * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map.
+ */
+ id16_map->stack_idx = -1;
+
+ if (id16_map->start != ID16_UNDEFINED) {
+ val16 = start_val16;
+
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
+ }
+ }
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->start != ID16_UNDEFINED) {
+ id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ return (void *)id16_map;
+}
+
+void * /* Destruct an id16 allocator instance */
+id16_map_fini(osl_t *osh, void * id16_map_hndl)
+{
+ uint16 total_ids;
+ id16_map_t * id16_map;
+
+ if (id16_map_hndl == NULL)
+ return NULL;
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+ total_ids = id16_map->total;
+ ASSERT(total_ids > 0);
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ id16_map->total = 0;
+ MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
+
+ return NULL;
+}
+
+void
+id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16)
+{
+ uint16 idx, val16;
+ id16_map_t * id16_map;
+
+ ASSERT(total_ids > 0);
+ /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+ * with random values.
+ */
+ ASSERT((start_val16 == ID16_UNDEFINED) ||
+ (start_val16 + total_ids) < ID16_INVALID);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+ if (id16_map == NULL) {
+ return;
+ }
+
+ id16_map->total = total_ids;
+ id16_map->start = start_val16;
+ id16_map->failures = 0;
+
+ /* Populate stack with 16bit id values, commencing with start_val16 */
+ id16_map->stack_idx = -1;
+
+ if (id16_map->start != ID16_UNDEFINED) {
+ val16 = start_val16;
+
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
+ }
+ }
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->start != ID16_UNDEFINED) {
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+}
+
+uint16 /* Allocate a unique 16bit id */
+BCMFASTPATH(id16_map_alloc)(void * id16_map_hndl)
+{
+ uint16 val16;
+ id16_map_t * id16_map;
+
+ ASSERT_FP(id16_map_hndl != NULL);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+ ASSERT_FP(id16_map->total > 0);
+
+ if (id16_map->stack_idx < 0) {
+ id16_map->failures++;
+ return ID16_INVALID;
+ }
+
+ val16 = id16_map->stack[id16_map->stack_idx];
+ id16_map->stack_idx--;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ ASSERT_FP((id16_map->start == ID16_UNDEFINED) ||
+ (val16 < (id16_map->start + id16_map->total)));
+
+ if (id16_map->dbg) { /* Validate val16 */
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ ASSERT_FP(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
+ id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ return val16;
+}
+
+void /* Free a 16bit id value into the id16 allocator */
+BCMFASTPATH(id16_map_free)(void * id16_map_hndl, uint16 val16)
+{
+ id16_map_t * id16_map;
+
+ ASSERT_FP(id16_map_hndl != NULL);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ ASSERT_FP((id16_map->start == ID16_UNDEFINED) ||
+ (val16 < (id16_map->start + id16_map->total)));
+
+ if (id16_map->dbg) { /* Validate val16 */
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ ASSERT_FP(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
+ id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ id16_map->stack_idx++;
+ id16_map->stack[id16_map->stack_idx] = val16;
+}
+
+uint32 /* Returns number of failures to allocate an unique id16 */
+id16_map_failures(void * id16_map_hndl)
+{
+ ASSERT(id16_map_hndl != NULL);
+ return ((id16_map_t *)id16_map_hndl)->failures;
+}
+
+bool
+id16_map_audit(void * id16_map_hndl)
+{
+ int idx;
+ int insane = 0;
+ id16_map_t * id16_map;
+
+ ASSERT(id16_map_hndl != NULL);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+ ASSERT(id16_map->stack_idx >= -1);
+ ASSERT(id16_map->stack_idx < (int)id16_map->total);
+
+ if (id16_map->start == ID16_UNDEFINED)
+ goto done;
+
+ for (idx = 0; idx <= id16_map->stack_idx; idx++) {
+ ASSERT(id16_map->stack[idx] >= id16_map->start);
+ ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ uint16 val16 = id16_map->stack[idx];
+ if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
+ insane |= 1;
+ ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
+ OSL_OBFUSATE_BUF(id16_map_hndl), idx, val16));
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+ }
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ uint16 avail = 0; /* Audit available ids counts */
+ for (idx = 0; idx < id16_map_dbg->total; idx++) {
+ if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
+ avail++;
+ }
+ if (avail && (avail != (id16_map->stack_idx + 1))) {
+ insane |= 1;
+ ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
+ OSL_OBFUSCATE_BUF(id16_map_hndl),
+ avail, id16_map->stack_idx));
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+done:
+ /* invoke any other system audits */
+ return (!!insane);
+}
+/* END: Simple id16 allocator */
+
+void
+BCMATTACHFN(dll_pool_detach)(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
+{
+ uint32 mem_size;
+ mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ if (pool)
+ MFREE(osh, pool, mem_size);
+}
+dll_pool_t *
+BCMATTACHFN(dll_pool_init)(void * osh, uint16 elems_max, uint16 elem_size)
+{
+ uint32 mem_size, i;
+ dll_pool_t * dll_pool_p;
+ dll_t * elem_p;
+
+ ASSERT(elem_size > sizeof(dll_t));
+
+ mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+
+ if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) {
+ ASSERT(0);
+ return dll_pool_p;
+ }
+
+ dll_init(&dll_pool_p->free_list);
+ dll_pool_p->elems_max = elems_max;
+ dll_pool_p->elem_size = elem_size;
+
+ elem_p = dll_pool_p->elements;
+ for (i = 0; i < elems_max; i++) {
+ dll_append(&dll_pool_p->free_list, elem_p);
+ elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+ }
+
+ dll_pool_p->free_count = elems_max;
+
+ return dll_pool_p;
+}
+
+void *
+dll_pool_alloc(dll_pool_t * dll_pool_p)
+{
+ dll_t * elem_p;
+
+ if (dll_pool_p->free_count == 0) {
+ ASSERT(dll_empty(&dll_pool_p->free_list));
+ return NULL;
+ }
+
+ elem_p = dll_head_p(&dll_pool_p->free_list);
+ dll_delete(elem_p);
+ dll_pool_p->free_count -= 1;
+
+ return (void *)elem_p;
+}
+
+void
+BCMPOSTTRAPFN(dll_pool_free)(dll_pool_t * dll_pool_p, void * elem_p)
+{
+ dll_t * node_p = (dll_t *)elem_p;
+ dll_prepend(&dll_pool_p->free_list, node_p);
+ dll_pool_p->free_count += 1;
+}
+
+void
+dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+{
+ dll_t * node_p = (dll_t *)elem_p;
+ dll_append(&dll_pool_p->free_list, node_p);
+ dll_pool_p->free_count += 1;
+}
+
+#ifdef BCMDBG
+void
+dll_pool_dump(dll_pool_t * dll_pool_p, dll_elem_dump elem_dump)
+{
+ dll_t * elem_p;
+ dll_t * next_p;
+ printf("dll_pool<%p> free_count<%u> elems_max<%u> elem_size<%u>\n",
+ OSL_OBFUSCATE_BUF(dll_pool_p), dll_pool_p->free_count,
+ dll_pool_p->elems_max, dll_pool_p->elem_size);
+
+ for (elem_p = dll_head_p(&dll_pool_p->free_list);
+ !dll_end(&dll_pool_p->free_list, elem_p); elem_p = next_p) {
+
+ next_p = dll_next_p(elem_p);
+ printf("\telem<%p>\n", OSL_OBFUSCATE_BUF(elem_p));
+ if (elem_dump != NULL)
+ elem_dump((void *)elem_p);
+ }
+}
+#endif /* BCMDBG */
+
+#endif /* BCMDRIVER */
+
+#if defined(BCMDRIVER) || defined(WL_UNITTEST)
+
+/* triggers bcm_bprintf to print to kernel log */
+bool bcm_bprintf_bypass = FALSE;
+
+/* Initialization of bcmstrbuf structure */
+void
+BCMPOSTTRAPFN(bcm_binit)(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+ if (size > 0) {
+ buf[0] = '\0';
+ }
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+BCMPOSTTRAPFN(bcm_bprintf)(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+ if (bcm_bprintf_bypass == TRUE) {
+ printf("%s", b->buf);
+ goto exit;
+ }
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ /* r == 0 is also the case when strlen(fmt) is zero.
+ * typically the case when "" is passed as argument.
+ */
+ if ((r == -1) || (r >= (int)b->size)) {
+ b->size = 0;
+ } else {
+ b->size -= r;
+ b->buf += r;
+ }
+
+exit:
+ va_end(ap);
+
+ return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, uint len)
+{
+ uint i;
+
+ if (msg != NULL && msg[0] != '\0')
+ bcm_bprintf(b, "%s", msg);
+ for (i = 0u; i < len; i ++)
+ bcm_bprintf(b, "%02X", buf[i]);
+ if (newline)
+ bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+ int i;
+
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+ int i;
+
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, uint len)
+{
+ uint i;
+ int per_line = 0;
+
+ printf("%s: %d \n", name ? name : "", len);
+ for (i = 0u; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
+ }
+ }
+ printf("\n");
+}
+
+/* Search for an IE having a specific tag and an OUI type from a buffer.
+ * tlvs: buffer to search for IE
+ * tlvs_len: buffer length
+ * tag: IE tag
+ * oui: Specific OUI to match
+ * oui_len: length of the OUI
+ * type: OUI type
+ * Return the matched IE, else return null.
+*/
+bcm_tlv_t *
+bcm_find_ie(const uint8* tlvs, uint tlvs_len, uint8 tag, uint8 oui_len,
+ const char *oui, uint8 type)
+{
+ const bcm_tlv_t *ie;
+
+ COV_TAINTED_DATA_SINK(tlvs_len);
+ COV_NEG_SINK(tlvs_len);
+
+ /* Walk through the IEs looking for an OUI match */
+ while ((ie = bcm_parse_tlvs_advance(&tlvs, &tlvs_len, tag,
+ BCM_TLV_ADVANCE_TO))) {
+ if ((ie->len > oui_len) &&
+ !bcmp(ie->data, oui, oui_len) &&
+ ie->data[oui_len] == type) {
+
+ COV_TAINTED_DATA_ARG(ie);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ return (bcm_tlv_t *)(ie); /* a match */
+ GCC_DIAGNOSTIC_POP();
+ }
+ /* Point to the next IE */
+ bcm_tlv_buffer_advance_past(ie, &tlvs, &tlvs_len);
+ }
+
+ return NULL;
+}
+
+/* Look for vendor-specific IE with specified OUI and optional type */
+bcm_tlv_t *
+bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, uint8 *type, uint type_len)
+{
+ const bcm_tlv_t *ie;
+ uint8 ie_len;
+
+ COV_TAINTED_DATA_SINK(tlvs_len);
+ COV_NEG_SINK(tlvs_len);
+
+ ie = (const bcm_tlv_t*)tlvs;
+
+ /* make sure we are looking at a valid IE */
+ if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
+ return NULL;
+ }
+
+ /* Walk through the IEs looking for an OUI match */
+ do {
+ ie_len = ie->len;
+ if ((ie->id == DOT11_MNG_VS_ID) &&
+ (ie_len >= (DOT11_OUI_LEN + type_len)) &&
+ !bcmp(ie->data, voui, DOT11_OUI_LEN))
+ {
+ /* compare optional type */
+ if (type_len == 0 ||
+ !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
+
+ COV_TAINTED_DATA_ARG(ie);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ return (bcm_tlv_t *)(ie); /* a match */
+ GCC_DIAGNOSTIC_POP();
+ }
+ }
+ } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
+
+ return NULL;
+}
+
+#if defined(WLTINYDUMP) || defined(BCMDBG) || defined(WLMSG_INFORM) || \
+ defined(WLMSG_ASSOC) || defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
+
+ if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+ ASSERT(p < endp);
+
+ return (int)(p - buf);
+}
+#endif /* WLTINYDUMP || BCMDBG || WLMSG_INFORM || WLMSG_ASSOC || WLMSG_PRPKT */
+
+#endif /* BCMDRIVER || WL_UNITTEST */
+
+/* Masking few bytes of MAC address per customer in all prints/eventlogs. */
+int
+BCMRAMFN(bcm_addrmask_set)(int enable)
+{
+#ifdef PRIVACY_MASK
+ struct ether_addr *privacy = privacy_addrmask_get();
+ if (enable) {
+ /* apply mask as (For SS)
+ * orig : 12:34:56:78:90:ab
+ * masked : 12:xx:xx:xx:x0:ab
+ */
+ privacy->octet[1] = privacy->octet[2] =
+ privacy->octet[3] = 0;
+ privacy->octet[0] = privacy->octet[5] = 0xff;
+ privacy->octet[4] = 0x0f;
+ } else
+ {
+ /* No masking. All are 0xff. */
+ memcpy(privacy, &ether_bcast, sizeof(struct ether_addr));
+ }
+
+ return BCME_OK;
+#else
+ BCM_REFERENCE(enable);
+ return BCME_UNSUPPORTED;
+#endif /* PRIVACY_MASK */
+
+}
+
+int
+bcm_addrmask_get(int *val)
+{
+#ifdef PRIVACY_MASK
+ struct ether_addr *privacy = privacy_addrmask_get();
+ if (!eacmp(&ether_bcast, privacy)) {
+ *val = FALSE;
+ } else {
+ *val = TRUE;
+ }
+
+ return BCME_OK;
+#else
+ BCM_REFERENCE(val);
+ return BCME_UNSUPPORTED;
+#endif
+}
+
+uint64
+BCMRAMFN(bcm_ether_ntou64)(const struct ether_addr *ea)
+{
+ uint64 mac;
+ struct ether_addr addr;
+
+ memcpy(&addr, ea, sizeof(struct ether_addr));
+
+#ifdef PRIVACY_MASK
+ struct ether_addr *privacy = privacy_addrmask_get();
+ if (!ETHER_ISMULTI(ea)) {
+ *(uint32*)(&addr.octet[0]) &= *((uint32*)&privacy->octet[0]);
+ *(uint16*)(&addr.octet[4]) &= *((uint16*)&privacy->octet[4]);
+ }
+#endif /* PRIVACY_MASK */
+
+ mac = ((uint64)HTON16(*((const uint16*)&addr.octet[4]))) << 32 |
+ HTON32(*((const uint32*)&addr.octet[0]));
+ return (mac);
+}
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+ static const char hex[] =
+ {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ const uint8 *octet = ea->octet;
+ char *p = buf;
+ int i;
+
+ for (i = 0; i < 6; i++, octet++) {
+ *p++ = hex[(*octet >> 4) & 0xf];
+ *p++ = hex[*octet & 0xf];
+ *p++ = ':';
+ }
+
+ *(p-1) = '\0';
+
+ return (buf);
+}
+
+/* Find the position of first bit set
+ * in the given number.
+ */
+int
+bcm_find_fsb(uint32 num)
+{
+ uint8 pos = 0;
+ if (!num)
+ return pos;
+ while (!(num & 1)) {
+ num >>= 1;
+ pos++;
+ }
+ return (pos+1);
+}
+
+/* TODO: need to pass in the buffer length for validation check */
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+ snprintf(buf, 16, "%d.%d.%d.%d",
+ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+ return (buf);
+}
+
+/* TODO: need to pass in the buffer length for validation check */
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+ /* Implementing RFC 5952 Sections 4 + 5 */
+ /* Not thoroughly tested */
+ uint16 tmp[8];
+ uint16 *a = &tmp[0];
+ char *p = buf;
+ int i, i_max = -1, cnt = 0, cnt_max = 1;
+ uint8 *a4 = NULL;
+ memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
+
+ for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+ if (a[i]) {
+ if (cnt > cnt_max) {
+ cnt_max = cnt;
+ i_max = i - cnt;
+ }
+ cnt = 0;
+ } else
+ cnt++;
+ }
+ if (cnt > cnt_max) {
+ cnt_max = cnt;
+ i_max = i - cnt;
+ }
+ if (i_max == 0 &&
+ /* IPv4-translated: ::ffff:0:a.b.c.d */
+ ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+ /* IPv4-mapped: ::ffff:a.b.c.d */
+ (cnt_max == 5 && a[5] == 0xffff)))
+ a4 = (uint8*) (a + 6);
+
+ for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+ if ((uint8*) (a + i) == a4) {
+ snprintf(p, 17, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+ break;
+ } else if (i == i_max) {
+ *p++ = ':';
+ i += cnt_max - 1;
+ p[0] = ':';
+ p[1] = '\0';
+ } else {
+ if (i)
+ *p++ = ':';
+ p += snprintf(p, 8, "%x", ntoh16(a[i]));
+ }
+ }
+
+ return buf;
+}
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+const unsigned char bcm_ctype[256] = {
+
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
+ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
+ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
+ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
+ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+uint64
+bcm_strtoull(const char *cp, char **endp, uint base)
+{
+ uint64 result, last_result = 0, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus) {
+ if (endp) {
+ /* Go to the end of current number */
+ while (bcm_isxdigit(*cp)) {
+ cp++;
+ }
+ *endp = DISCARD_QUAL(cp, char);
+ }
+ return (ulong)-1;
+ }
+ last_result = result;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong)(-(long)result);
+
+ if (endp)
+ *endp = DISCARD_QUAL(cp, char);
+
+ return (result);
+}
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+ return (ulong) bcm_strtoull(cp, endp, base);
+}
+
+int
+bcm_atoi(const char *s)
+{
+ return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+ uint len, nlen;
+ uint i;
+
+ if ((haystack == NULL) || (needle == NULL))
+ return DISCARD_QUAL(haystack, char);
+
+ nlen = (uint)strlen(needle);
+ if (strlen(haystack) < nlen) {
+ return NULL;
+ }
+ len = (uint)strlen(haystack) - nlen + 1u;
+
+ for (i = 0u; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return DISCARD_QUAL(&haystack[i], char);
+ return (NULL);
+}
+
+char *
+bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
+{
+ for (; s_len >= substr_len; s++, s_len--)
+ if (strncmp(s, substr, substr_len) == 0)
+ return DISCARD_QUAL(s, char);
+
+ return NULL;
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+ char *p;
+
+ p = dest + strlen(dest);
+
+ while ((*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+ char *endp;
+ char *p;
+
+ p = dest + strlen(dest);
+ endp = p + size;
+
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
+
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
+ }
+
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
+
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
+
+ str = (unsigned char*)*string;
+
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
+
+ nextoken = (char*)str;
+
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
+ }
+ }
+
+ *string = (char*)str;
+
+ /* Determine if a token has been found. */
+ if (nextoken == (char *) str) {
+ return NULL;
+ }
+ else {
+ return nextoken;
+ }
+}
+
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+ char dc, sc;
+
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
+
+ if (!cnt) return 0;
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+ int i = 0;
+ char *ep;
+
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+ p = ep;
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+/* parse a nnn.nnn.nnn.nnn format IPV4 address */
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
+
+ int i = 0;
+ char *c;
+ for (;;) {
+ ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+ if (*c++ != '.' || i == IPV4_ADDR_LEN)
+ break;
+ p = c;
+ }
+ return (i == IPV4_ADDR_LEN);
+}
+#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}};
+
+int
+ether_isbcast(const void *ea)
+{
+ return (memcmp(ea, &ether_bcast, sizeof(struct ether_addr)) == 0);
+}
+
+int
+BCMPOSTTRAPFN(ether_isnulladdr)(const void *ea)
+{
+ const uint8 *ea8 = (const uint8 *)ea;
+ return !(ea8[5] || ea8[4] || ea8[3] || ea8[2] || ea8[1] || ea8[0]);
+}
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strlcpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+ ulong copyct = 1;
+ ushort i;
+
+ if (abuflen == 0)
+ return 0;
+
+ /* wbuflen is in bytes */
+ wbuflen /= sizeof(ushort);
+
+ for (i = 0; i < wbuflen; ++i) {
+ if (--abuflen == 0)
+ break;
+ *abuf++ = (char) *wbuf++;
+ ++copyct;
+ }
+ *abuf = '\0';
+
+ return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+#ifdef BCM_OBJECT_TRACE
+
+#define BCM_OBJECT_MERGE_SAME_OBJ 0
+
+/* some place may add / remove the object to trace list for Linux: */
+/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */
+/* remove: osl_pktfree dev_kfree_skb netif_rx */
+
+#if defined(__linux__)
+#define BCM_OBJDBG_COUNT (1024 * 100)
+static spinlock_t dbgobj_lock;
+#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock)
+#define BCM_OBJDBG_LOCK_DESTROY()
+#define BCM_OBJDBG_LOCK spin_lock_irqsave
+#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore
+#else
+#define BCM_OBJDBG_COUNT (256)
+#define BCM_OBJDBG_LOCK_INIT()
+#define BCM_OBJDBG_LOCK_DESTROY()
+#define BCM_OBJDBG_LOCK(x, y)
+#define BCM_OBJDBG_UNLOCK(x, y)
+#endif /* else OS */
+
+#define BCM_OBJDBG_ADDTOHEAD 0
+#define BCM_OBJDBG_ADDTOTAIL 1
+
+#define BCM_OBJDBG_CALLER_LEN 32
+struct bcm_dbgobj {
+ struct bcm_dbgobj *prior;
+ struct bcm_dbgobj *next;
+ uint32 flag;
+ void *obj;
+ uint32 obj_sn;
+ uint32 obj_state;
+ uint32 line;
+ char caller[BCM_OBJDBG_CALLER_LEN];
+};
+
+static struct bcm_dbgobj *dbgobj_freehead = NULL;
+static struct bcm_dbgobj *dbgobj_freetail = NULL;
+static struct bcm_dbgobj *dbgobj_objhead = NULL;
+static struct bcm_dbgobj *dbgobj_objtail = NULL;
+
+static uint32 dbgobj_sn = 0;
+static int dbgobj_count = 0;
+static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT];
+
+void
+bcm_object_trace_init(void)
+{
+ int i = 0;
+ BCM_OBJDBG_LOCK_INIT();
+ memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT);
+ dbgobj_freehead = &bcm_dbg_objs[0];
+ dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1];
+
+ for (i = 0; i < BCM_OBJDBG_COUNT; ++i) {
+ bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ?
+ dbgobj_freehead : &bcm_dbg_objs[i + 1];
+ bcm_dbg_objs[i].prior = (i == 0) ?
+ dbgobj_freetail : &bcm_dbg_objs[i - 1];
+ }
+}
+
+void
+bcm_object_trace_deinit(void)
+{
+ if (dbgobj_objhead || dbgobj_objtail) {
+ printf("bcm_object_trace_deinit: not all objects are released\n");
+ ASSERT(0);
+ }
+ BCM_OBJDBG_LOCK_DESTROY();
+}
+
+static void
+bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj)
+{
+ if ((dbgobj == *head) && (dbgobj == *tail)) {
+ *head = NULL;
+ *tail = NULL;
+ } else if (dbgobj == *head) {
+ *head = (*head)->next;
+ } else if (dbgobj == *tail) {
+ *tail = (*tail)->prior;
+ }
+ dbgobj->next->prior = dbgobj->prior;
+ dbgobj->prior->next = dbgobj->next;
+}
+
+static void
+bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj, int addtotail)
+{
+ if (!(*head) && !(*tail)) {
+ *head = dbgobj;
+ *tail = dbgobj;
+ dbgobj->next = dbgobj;
+ dbgobj->prior = dbgobj;
+ } else if ((*head) && (*tail)) {
+ (*tail)->next = dbgobj;
+ (*head)->prior = dbgobj;
+ dbgobj->next = *head;
+ dbgobj->prior = *tail;
+ if (addtotail == BCM_OBJDBG_ADDTOTAIL)
+ *tail = dbgobj;
+ else
+ *head = dbgobj;
+ } else {
+ ASSERT(0); /* can't be this case */
+ }
+}
+
+static INLINE void
+bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj, int movetotail)
+{
+ if ((*head) && (*tail)) {
+ if (movetotail == BCM_OBJDBG_ADDTOTAIL) {
+ if (dbgobj != (*tail)) {
+ bcm_object_rm_list(head, tail, dbgobj);
+ bcm_object_add_list(head, tail, dbgobj, movetotail);
+ }
+ } else {
+ if (dbgobj != (*head)) {
+ bcm_object_rm_list(head, tail, dbgobj);
+ bcm_object_add_list(head, tail, dbgobj, movetotail);
+ }
+ }
+ } else {
+ ASSERT(0); /* can't be this case */
+ }
+}
+
+void
+bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ if (opt == BCM_OBJDBG_ADD_PKT ||
+ opt == BCM_OBJDBG_ADD) {
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ printf("bcm_object_trace_opr: obj %p allocated from %s(%d),"
+ " allocate again from %s(%d)\n",
+ dbgobj->obj,
+ dbgobj->caller, dbgobj->line,
+ caller, line);
+ ASSERT(0);
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+#if BCM_OBJECT_MERGE_SAME_OBJ
+ dbgobj = dbgobj_freetail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ goto FREED_ENTRY_FOUND;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+
+ dbgobj = dbgobj_freehead;
+#if BCM_OBJECT_MERGE_SAME_OBJ
+FREED_ENTRY_FOUND:
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+ if (!dbgobj) {
+ printf("bcm_object_trace_opr: already got %d objects ?????????????????\n",
+ BCM_OBJDBG_COUNT);
+ ASSERT(0);
+ goto EXIT;
+ }
+
+ bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj);
+ dbgobj->obj = obj;
+ strlcpy(dbgobj->caller, caller, sizeof(dbgobj->caller));
+ dbgobj->line = line;
+ dbgobj->flag = 0;
+ if (opt == BCM_OBJDBG_ADD_PKT) {
+ dbgobj->obj_sn = dbgobj_sn++;
+ dbgobj->obj_state = 0;
+ /* first 4 bytes is pkt sn */
+ if (((unsigned long)PKTTAG(obj)) & 0x3)
+ printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj));
+ *(uint32*)PKTTAG(obj) = dbgobj->obj_sn;
+ }
+ bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj,
+ BCM_OBJDBG_ADDTOTAIL);
+
+ dbgobj_count++;
+
+ } else if (opt == BCM_OBJDBG_REMOVE) {
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (dbgobj->flag) {
+ printf("bcm_object_trace_opr: rm flagged obj %p"
+ " flag 0x%08x from %s(%d)\n",
+ obj, dbgobj->flag, caller, line);
+ }
+ bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj);
+ bzero(dbgobj->caller, sizeof(dbgobj->caller));
+ strlcpy(dbgobj->caller, caller, sizeof(dbgobj->caller));
+ dbgobj->line = line;
+ bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj,
+ BCM_OBJDBG_ADDTOTAIL);
+ dbgobj_count--;
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ dbgobj = dbgobj_freetail;
+ while (dbgobj && dbgobj->obj) {
+ if (dbgobj->obj == obj) {
+ printf("bcm_object_trace_opr: obj %p already freed"
+ " from from %s(%d),"
+ " try free again from %s(%d)\n",
+ obj,
+ dbgobj->caller, dbgobj->line,
+ caller, line);
+ //ASSERT(0); /* release same obj more than one time? */
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+
+ printf("bcm_object_trace_opr: ################### release none-existing"
+ " obj %p from %s(%d)\n",
+ obj, caller, line);
+ //ASSERT(0); /* release same obj more than one time? */
+
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+void
+bcm_object_trace_upd(void *obj, void *obj_new)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ dbgobj->obj = obj_new;
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+void
+bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+ const char *caller, int line)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if ((dbgobj->obj == obj) &&
+ ((!chksn) || (dbgobj->obj_sn == sn))) {
+#if 0
+ printf("bcm_object_trace_chk: (%s:%d) obj %p was allocated from %s(%d)\n",
+ caller, line,
+ dbgobj->obj, dbgobj->caller, dbgobj->line);
+#endif /* #if 0 */
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ dbgobj = dbgobj_freetail;
+ while (dbgobj) {
+ if ((dbgobj->obj == obj) &&
+ ((!chksn) || (dbgobj->obj_sn == sn))) {
+ printf("bcm_object_trace_chk: (%s:%d) obj %p (sn %d state %d)"
+ " was freed from %s(%d)\n",
+ caller, line,
+ dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state,
+ dbgobj->caller, dbgobj->line);
+ goto EXIT;
+ }
+ else if (dbgobj->obj == NULL) {
+ break;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+
+ printf("bcm_object_trace_chk: obj %p not found, check from %s(%d), chksn %s, sn %d\n",
+ obj, caller, line, chksn ? "yes" : "no", sn);
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ printf("bcm_object_trace_chk: (%s:%d) obj %p sn %d was allocated from %s(%d)\n",
+ caller, line,
+ dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line);
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+void
+bcm_object_feature_set(void *obj, uint32 type, uint32 value)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (type == BCM_OBJECT_FEATURE_FLAG) {
+ if (value & BCM_OBJECT_FEATURE_CLEAR)
+ dbgobj->flag &= ~(value);
+ else
+ dbgobj->flag |= (value);
+ } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) {
+ dbgobj->obj_state = value;
+ }
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ printf("bcm_object_feature_set: obj %p not found in active list\n", obj);
+ ASSERT(0);
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+int
+bcm_object_feature_get(void *obj, uint32 type, uint32 value)
+{
+ int rtn = 0;
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (type == BCM_OBJECT_FEATURE_FLAG) {
+ rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR);
+ }
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ printf("bcm_object_feature_get: obj %p not found in active list\n", obj);
+ ASSERT(0);
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return rtn;
+}
+
+#endif /* BCM_OBJECT_TRACE */
+
+uint8 *
+BCMPOSTTRAPFN(bcm_write_tlv)(int type, const void *data, uint datalen, uint8 *dst)
+{
+ uint8 *new_dst = dst;
+ bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
+
+ /* dst buffer should always be valid */
+ ASSERT(dst);
+
+ /* data len must be within valid range */
+ ASSERT((datalen <= BCM_TLV_MAX_DATA_SIZE));
+
+ /* source data buffer pointer should be valid, unless datalen is 0
+ * meaning no data with this TLV
+ */
+ ASSERT((data != NULL) || (datalen == 0));
+
+ /* only do work if the inputs are valid
+ * - must have a dst to write to AND
+ * - datalen must be within range AND
+ * - the source data pointer must be non-NULL if datalen is non-zero
+ * (this last condition detects datalen > 0 with a NULL data pointer)
+ */
+ if ((dst != NULL) &&
+ ((datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+ ((data != NULL) || (datalen == 0u))) {
+
+ /* write type, len fields */
+ dst_tlv->id = (uint8)type;
+ dst_tlv->len = (uint8)datalen;
+
+ /* if data is present, copy to the output buffer and update
+ * pointer to output buffer
+ */
+ if (datalen > 0u) {
+
+ memcpy(dst_tlv->data, data, datalen);
+ }
+
+ /* update the output destination poitner to point past
+ * the TLV written
+ */
+ new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
+ }
+
+ return (new_dst);
+}
+
+uint8 *
+bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst)
+{
+ uint8 *new_dst = dst;
+ bcm_tlv_ext_t *dst_tlv = (bcm_tlv_ext_t *)dst;
+
+ /* dst buffer should always be valid */
+ ASSERT(dst);
+
+ /* data len must be within valid range */
+ ASSERT(datalen <= BCM_TLV_EXT_MAX_DATA_SIZE);
+
+ /* source data buffer pointer should be valid, unless datalen is 0
+ * meaning no data with this TLV
+ */
+ ASSERT((data != NULL) || (datalen == 0));
+
+ /* only do work if the inputs are valid
+ * - must have a dst to write to AND
+ * - datalen must be within range AND
+ * - the source data pointer must be non-NULL if datalen is non-zero
+ * (this last condition detects datalen > 0 with a NULL data pointer)
+ */
+ if ((dst != NULL) &&
+ (datalen <= BCM_TLV_EXT_MAX_DATA_SIZE) &&
+ ((data != NULL) || (datalen == 0))) {
+
+ /* write type, len fields */
+ dst_tlv->id = (uint8)type;
+ dst_tlv->ext = ext;
+ dst_tlv->len = 1 + (uint8)datalen;
+
+ /* if data is present, copy to the output buffer and update
+ * pointer to output buffer
+ */
+ if (datalen > 0) {
+ memcpy(dst_tlv->data, data, datalen);
+ }
+
+ /* update the output destination poitner to point past
+ * the TLV written
+ */
+ new_dst = dst + BCM_TLV_EXT_HDR_SIZE + datalen;
+ }
+
+ return (new_dst);
+}
+
+uint8 *
+BCMPOSTTRAPFN(bcm_write_tlv_safe)(int type, const void *data, uint datalen, uint8 *dst,
+ uint dst_maxlen)
+{
+ uint8 *new_dst = dst;
+
+ if ((datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+
+ /* if len + tlv hdr len is more than destlen, don't do anything
+ * just return the buffer untouched
+ */
+ if ((datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+
+ new_dst = bcm_write_tlv(type, data, datalen, dst);
+ }
+ }
+
+ return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
+{
+ uint8 *new_dst = dst;
+ const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+ uint totlen;
+
+ ASSERT(dst && src);
+ if (dst && src) {
+
+ totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+ memcpy(dst, src_tlv, totlen);
+ new_dst = dst + totlen;
+ }
+
+ return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv_safe(const void *src, uint8 *dst, uint dst_maxlen)
+{
+ uint8 *new_dst = dst;
+ const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+
+ ASSERT(src);
+ if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+ new_dst = bcm_copy_tlv(src, dst);
+ }
+
+ return (new_dst);
+}
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+ const uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+)
+{
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+ const uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
+static const uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(const uint8 *pdata, uint nbytes, uint32 crc)
+{
+ const uint8 *pend;
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+
+ return crc;
+}
+
+#ifdef NOT_YET
+#define CLEN 1499 /* CRC Length */
+#define CBUFSIZ (CLEN+4)
+#define CNBUFS 5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+ uint j, k, l;
+ uint8 *buf;
+ uint len[CNBUFS];
+ uint32 crcr;
+ uint32 crc32tv[CNBUFS] =
+ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+ /* step through all possible alignments */
+ for (l = 0; l <= 4; l++) {
+ for (j = 0; j < CNBUFS; j++) {
+ len[j] = CLEN;
+ for (k = 0; k < len[j]; k++)
+ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+ }
+
+ for (j = 0; j < CNBUFS; j++) {
+ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+ ASSERT(crcr == crc32tv[j]);
+ }
+ }
+
+ MFREE(buf, CBUFSIZ*CNBUFS);
+ return;
+}
+#endif /* NOT_YET */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen)
+{
+ uint len;
+
+ COV_TAINTED_DATA_SINK(buflen);
+ COV_NEG_SINK(buflen);
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen)) {
+ return NULL;
+ }
+
+ /* advance to next elt */
+ len = TLV_HDR_LEN + elt->len;
+ elt = (const bcm_tlv_t*)((const uint8 *)elt + len);
+
+#if defined(__COVERITY__)
+ /* The 'len' value is tainted in Coverity because it is read from the tainted data pointed
+ * to by 'elt'. However, bcm_valid_tlv() verifies that the elt pointer is a valid element,
+ * so its length, len = (TLV_HDR_LEN + elt->len), is in the bounds of the buffer.
+ * Clearing the tainted attribute of 'len' for Coverity.
+ */
+ __coverity_tainted_data_sanitize__(len);
+ if (len > *buflen) {
+ return NULL;
+ }
+#endif /* __COVERITY__ */
+
+ *buflen -= len;
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen)) {
+ return NULL;
+ }
+
+ COV_TAINTED_DATA_ARG(elt);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ return (bcm_tlv_t *)(elt);
+ GCC_DIAGNOSTIC_POP();
+}
+
+/**
+ * Advance a const tlv buffer pointer and length up to the given tlv element pointer
+ * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
+ * are all in the range of the buffer/length.
+ *
+ * @param elt pointer to a valid bcm_tlv_t in the buffer
+ * @param buffer pointer to a tlv buffer
+ * @param buflen length of the buffer in bytes
+ *
+ * On return, if elt is not a tlv in the buffer bounds, the *buffer parameter
+ * will be set to NULL and *buflen parameter will be set to zero. Otherwise,
+ * *buffer will point to elt, and *buflen will have been adjusted by the the
+ * difference between *buffer and elt.
+ */
+void
+bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+{
+ uint new_buflen;
+ const uint8 *new_buffer;
+
+ /* model the input length value as a tainted and negative sink so
+ * Coverity will complain about unvalidated or possibly length values
+ */
+ COV_TAINTED_DATA_SINK(*buflen);
+ COV_NEG_SINK(*buflen);
+
+ new_buffer = (const uint8*)elt;
+
+ /* make sure the input buffer pointer is non-null, that (buffer + buflen) does not wrap,
+ * and that the elt pointer is in the range of [buffer, buffer + buflen]
+ */
+ if ((*buffer != NULL) &&
+ ((uintptr)*buffer < ((uintptr)*buffer + *buflen)) &&
+ (new_buffer >= *buffer) &&
+ (new_buffer < (*buffer + *buflen))) {
+ /* delta between buffer and new_buffer is <= *buflen, so truncating cast to uint
+ * from ptrdiff is ok
+ */
+ uint delta = (uint)(new_buffer - *buffer);
+
+ /* New buffer length is old len minus the delta from the buffer start to elt.
+ * The check just above guarantees that the subtractions does not underflow.
+ */
+ new_buflen = *buflen - delta;
+
+ /* validate current elt */
+ if (bcm_valid_tlv(elt, new_buflen)) {
+ /* All good, so update the input/output parameters */
+ *buffer = new_buffer;
+ *buflen = new_buflen;
+ return;
+ }
+ }
+
+ /* something did not check out, clear out the buffer info */
+ *buffer = NULL;
+ *buflen = 0;
+
+ return;
+}
+
+/**
+ * Advance a const tlv buffer pointer and length past the given tlv element pointer
+ * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
+ * are all in the range of the buffer/length. The function also checks that the
+ * remaining buffer starts with a valid tlv.
+ *
+ * @param elt pointer to a valid bcm_tlv_t in the buffer
+ * @param buffer pointer to a tlv buffer
+ * @param buflen length of the buffer in bytes
+ *
+ * On return, if elt is not a tlv in the buffer bounds, or the remaining buffer
+ * following the elt does not begin with a tlv in the buffer bounds, the *buffer
+ * parameter will be set to NULL and *buflen parameter will be set to zero.
+ * Otherwise, *buffer will point to the first byte past elt, and *buflen will
+ * have the remaining buffer length.
+ */
+void
+bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+{
+ /* Start by advancing the buffer up to the given elt */
+ bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+
+ /* if that did not work, bail out */
+ if (*buflen == 0) {
+ return;
+ }
+
+#if defined(__COVERITY__)
+ /* The elt has been verified by bcm_tlv_buffer_advance_to() to be a valid element,
+ * so its elt->len is in the bounds of the buffer. The following check prevents
+ * Coverity from flagging the (elt->data + elt->len) statement below as using a
+ * tainted elt->len to index into array 'elt->data'.
+ */
+ if (elt->len > *buflen) {
+ return;
+ }
+#endif /* __COVERITY__ */
+
+ /* We know we are advanced up to a good tlv.
+ * Now just advance to the following tlv.
+ */
+ elt = (const bcm_tlv_t*)(elt->data + elt->len);
+
+ bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+
+ return;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(const void *buf, uint buflen, uint key)
+{
+ const bcm_tlv_t *elt;
+ uint totlen;
+
+ COV_TAINTED_DATA_SINK(buflen);
+ COV_NEG_SINK(buflen);
+
+ if ((elt = (const bcm_tlv_t*)buf) == NULL) {
+ return NULL;
+ }
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ uint len = elt->len;
+
+ /* check if elt overruns buffer */
+ if (totlen < (len + TLV_HDR_LEN)) {
+ break;
+ }
+ /* did we find the ID? */
+ if ((elt->id == key)) {
+ COV_TAINTED_DATA_ARG(elt);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ return (bcm_tlv_t *)(elt);
+ GCC_DIAGNOSTIC_POP();
+ }
+ elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.
+ * The 'advance' parmeter specifies what to do to the parse buf/buflen values if a
+ * matching tlv is found:
+ * BCM_TLV_ADVANCE_NONE - do nothing
+ * BCM_TLV_ADVANCE_TO - move the buf up to the discovered tlv, and adjust buflen.
+ * BCM_TLV_ADVANCE_PAST - move the buf past the discovered tlb, and adjust buflen.
+ * If a tlv is not found, no changes are made to buf/buflen
+ *
+ */
+const bcm_tlv_t *
+bcm_parse_tlvs_advance(const uint8 **buf, uint *buflen, uint key, bcm_tlv_advance_mode_t advance)
+{
+ const bcm_tlv_t *elt;
+
+ elt = bcm_parse_tlvs(*buf, *buflen, key);
+
+ if (elt == NULL) {
+ return elt;
+ }
+
+ if (advance == BCM_TLV_ADVANCE_TO) {
+ bcm_tlv_buffer_advance_to(elt, buf, buflen);
+ } else if (advance == BCM_TLV_ADVANCE_PAST) {
+ bcm_tlv_buffer_advance_past(elt, buf, buflen);
+ } else if (advance == BCM_TLV_ADVANCE_NONE) {
+ /* nothing to do */
+ } else {
+ /* there are only 3 modes, but just in case, zero the parse buffer pointer and
+ * length to prevent infinite loops in callers that expect progress.
+ */
+ ASSERT(0);
+ *buf = NULL;
+ *buflen = 0;
+ }
+
+ return elt;
+}
+
+bcm_tlv_t *
+bcm_parse_tlvs_dot11(const void *buf, uint buflen, uint key, bool id_ext)
+{
+ bcm_tlv_t *elt;
+ uint totlen;
+
+ COV_TAINTED_DATA_SINK(buflen);
+ COV_NEG_SINK(buflen);
+
+ /*
+ ideally, we don't want to do that, but returning a const pointer
+ from these parse function spreads casting everywhere in the code
+ */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ elt = (bcm_tlv_t*)buf;
+ GCC_DIAGNOSTIC_POP();
+
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ uint len = elt->len;
+
+ /* validate remaining totlen */
+ if (totlen < (len + TLV_HDR_LEN)) {
+ break;
+ }
+
+ do {
+ if (id_ext) {
+ if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key))
+ break;
+ } else if (elt->id != key) {
+ break;
+ }
+
+ COV_TAINTED_DATA_ARG(elt);
+
+ return (bcm_tlv_t *)(elt); /* a match */
+ } while (0);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or length field < min_varlen
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_min_bodylen(const void *buf, uint buflen, uint key, uint min_bodylen)
+{
+ bcm_tlv_t * ret;
+ ret = bcm_parse_tlvs(buf, buflen, key);
+ if (ret == NULL || ret->len < min_bodylen) {
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or tlv size > max_len or < min_len
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_minmax_len(const void *buf, uint buflen, uint key,
+ uint min_len, uint max_len)
+{
+ bcm_tlv_t * ret;
+ ret = bcm_parse_tlvs(buf, buflen, key);
+ if (ret == NULL ||
+ (BCM_TLV_SIZE(ret) > max_len) ||
+ (BCM_TLV_SIZE(ret) < min_len)) {
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+const bcm_tlv_t *
+bcm_parse_ordered_tlvs(const void *buf, uint buflen, uint key)
+{
+ const bcm_tlv_t *elt;
+ uint totlen;
+
+ COV_TAINTED_DATA_SINK(buflen);
+ COV_NEG_SINK(buflen);
+
+ elt = (const bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ uint id = elt->id;
+ uint len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key) {
+ return (NULL);
+ }
+
+ /* validate remaining totlen */
+ if (totlen < (len + TLV_HDR_LEN)) {
+ break;
+ }
+ if (id == key) {
+ COV_TAINTED_DATA_ARG(elt);
+ return (elt);
+ }
+
+ elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+ return NULL;
+}
+#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+uint
+bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, uint len)
+{
+ uint i, slen = 0;
+ uint32 bit, mask;
+ const char *name;
+ mask = bd->mask;
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) {
+ bit = bd->bitfield[i].bit;
+ if ((flags & mask) == bit) {
+ slen = (int)strlen(name);
+ if (memcpy_s(buf, len, name, slen + 1) != BCME_OK) {
+ slen = 0;
+ }
+ break;
+ }
+ }
+ return slen;
+}
+
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, uint len)
+{
+ uint i;
+ char *p = buf;
+ char *end = (buf + len);
+ char hexstr[16];
+ uint32 bit;
+ const char* name;
+ bool err = FALSE;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, sizeof(hexstr), "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0) {
+ continue;
+ }
+ flags &= ~bit;
+
+ /* Print named bit. */
+ p += strlcpy(p, name, (end - p));
+ if (p == end) {
+ /* Truncation error. */
+ err = TRUE;
+ break;
+ }
+
+ /* Add space delimiter if there are more bits. */
+ if (flags != 0) {
+ p += strlcpy(p, " ", (end - p));
+ if (p == end) {
+ /* Truncation error. */
+ err = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* indicate the str was too short */
+ if (err) {
+ ASSERT(len >= 2u);
+ buf[len - 2u] = '>';
+ }
+
+ return (int)(p - buf);
+}
+
+/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */
+int
+bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz,
+ const uint8 *addr, uint size, char *buf, uint len)
+{
+ uint i;
+ char *p = buf;
+ uint slen = 0, nlen = 0;
+ uint32 bit;
+ const char* name;
+ bool more = FALSE;
+
+ BCM_REFERENCE(size);
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; i < bdsz; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (isset(addr, bit)) {
+ nlen = (int)strlen(name);
+ slen += nlen;
+ /* need SPACE - for simplicity */
+ slen += 1;
+ /* need NULL as well */
+ if (len < slen + 1) {
+ more = TRUE;
+ break;
+ }
+ memcpy(p, name, nlen);
+ p += nlen;
+ p[0] = ' ';
+ p += 1;
+ p[0] = '\0';
+ }
+ }
+
+ if (more) {
+ p[0] = '>';
+ p += 1;
+ p[0] = '\0';
+ }
+
+ return (int)(p - buf);
+}
+
+/* Transform an hexadecimal string into binary.
+ * Output is limited to 64K.
+ * hex : string
+ * hex_len : string length
+ * buf : allocated output buffer
+ * buf_len : allocated size
+ * return : copied length, if successfull, 0 if error.
+ */
+uint16
+bcmhex2bin(const uint8* hex, uint hex_len, uint8 *buf, uint buf_len)
+{
+ uint i = 0;
+ uint16 out_len;
+ char tmp[] = "XX";
+ if (hex_len % 2) {
+ /* hex_len not even */
+ return 0;
+ }
+ /* check for hex radix */
+ if ((hex[0] == '0') && ((hex[1] == 'x') || (hex[1] == 'X'))) {
+ hex += 2;
+ hex_len -= 2;
+ }
+ if (hex_len/2 > 0xFFFF) {
+ /* exceed 64K buffer capacity */
+ return 0;
+ }
+ if ((out_len = hex_len/2) > buf_len) {
+ /* buf too short */
+ return 0;
+ }
+ do {
+ tmp[0] = *hex++;
+ tmp[1] = *hex++;
+ if (!bcm_isxdigit(tmp[0]) || !bcm_isxdigit(tmp[1])) {
+ /* char is not a 256-bit hex number */
+ return 0;
+ }
+ /* okay so far; make this piece a number */
+ buf[i] = (uint8) bcm_strtoul(tmp, NULL, 16);
+ } while (++i < out_len);
+ return out_len;
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, uint len)
+{
+ uint i;
+ char *p = str;
+ const uint8 *src = (const uint8*)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, const uchar *buf, uint nbytes)
+{
+ char line[128], *p;
+ uint len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+ "NONE",
+ "WEP1",
+ "TKIP",
+ "WEP128",
+ "AES_CCM",
+ "AES_OCB_MSDU",
+ "AES_OCB_MPDU",
+ "NALG",
+ "UNDEF",
+ "UNDEF",
+ "UNDEF",
+
+#ifdef BCMWAPI_WAI
+ "WAPI",
+#endif /* BCMWAPI_WAI */
+
+#ifndef BCMWAPI_WAI
+ "UNDEF",
+#endif
+ "PMK",
+ "BIP",
+ "AES_GCM",
+ "AES_CCM256",
+ "AES_GCM256",
+ "BIP_CMAC256",
+ "BIP_GMAC",
+ "BIP_GMAC256",
+ "UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+ return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+#ifdef BCMDBG
+void
+deadbeef(void *p, uint len)
+{
+ static uint8 meat[] = { 0xde, 0xad, 0xbe, 0xef };
+
+ while (len-- > 0) {
+ *(uint8*)p = meat[((uintptr)p) & 3];
+ p = (uint8*)p + 1;
+ }
+}
+#endif /* BCMDBG */
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+ return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 128 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+ uint len, max_len;
+ char c;
+
+ len = (uint)strlen(buf);
+
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+ char *buf, uint32 bufsize)
+{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
+
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
+ break;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32)len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
+ }
+ return filled_len;
+}
+
+uint
+bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = (uint)strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strlcpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ if (data && datalen != 0) {
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+ }
+
+ return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+ nqdBm_to_mW_map[qdbm])/2;
+ if (mw_uint < boundary) break;
+ }
+
+ qdbm += (uint8)offset;
+
+ return (qdbm);
+}
+
+uint
+BCMPOSTTRAPFN(bcm_bitcount)(const uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
+void
+dump_nvram(char *varbuf, int column, unsigned int n, unsigned int len)
+{
+ unsigned int m;
+ char vars[128];
+
+ if (((n==0) && (varbuf[0]=='#')) ||
+ ((column==0) && (android_msg_level & ANDROID_INFO_LEVEL))) {
+ memset(vars, 0x00, sizeof(vars));
+ for (m=n; m<len && (m-n)<(sizeof(vars)-1); m++) {
+ if (varbuf[m] == '\n')
+ break;
+ vars[m-n] = varbuf[m];
+ }
+ printf("%s\n", vars);
+ }
+}
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ unsigned int buf_len, n;
+ unsigned int pad = 0;
+
+ dp = varbuf;
+
+ findNewline = FALSE;
+ column = 0;
+
+ dump_nvram(varbuf, 0, 0, len);
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ dump_nvram(varbuf, column, n, len);
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = (unsigned int)(dp - varbuf);
+ if (buf_len % 4) {
+ pad = 4 - buf_len % 4;
+ if (pad && (buf_len + pad <= len)) {
+ buf_len += pad;
+ }
+ }
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
+
+#ifndef setbit /* As in the header file */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+/* Set bit in byte array. */
+void
+setbit(void *array, uint bit)
+{
+ ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
+}
+
+/* Clear bit in byte array. */
+void
+clrbit(void *array, uint bit)
+{
+ ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+}
+
+/* Test if bit is set in byte array. */
+bool
+isset(const void *array, uint bit)
+{
+ return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+}
+
+/* Test if bit is clear in byte array. */
+bool
+isclr(const void *array, uint bit)
+{
+ return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+}
+#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
+#endif /* setbit */
+
+void
+BCMPOSTTRAPFN(set_bitrange)(void *array, uint start, uint end, uint maxbit)
+{
+ uint startbyte = start/NBBY;
+ uint endbyte = end/NBBY;
+ uint i, startbytelastbit, endbytestartbit;
+
+ if (end >= start) {
+ if (endbyte - startbyte > 1) {
+ startbytelastbit = ((startbyte + 1) * NBBY) - 1;
+ endbytestartbit = endbyte * NBBY;
+ for (i = startbyte + 1; i < endbyte; i++)
+ ((uint8 *)array)[i] = 0xFF;
+ for (i = start; i <= startbytelastbit; i++)
+ setbit(array, i);
+ for (i = endbytestartbit; i <= end; i++)
+ setbit(array, i);
+ } else {
+ for (i = start; i <= end; i++)
+ setbit(array, i);
+ }
+ } else {
+ set_bitrange(array, start, maxbit, maxbit);
+ set_bitrange(array, 0, end, maxbit);
+ }
+}
+
+void
+clr_bitrange(void *array, uint start, uint end, uint maxbit)
+{
+ uint startbyte = start/NBBY;
+ uint endbyte = end/NBBY;
+ uint i, startbytelastbit, endbytestartbit;
+
+ if (end >= start) {
+ if (endbyte - startbyte > 1) {
+ startbytelastbit = ((startbyte + 1) * NBBY) - 1;
+ endbytestartbit = endbyte * NBBY;
+ for (i = startbyte + 1; i < endbyte; i++)
+ ((uint8 *)array)[i] = 0x0;
+ for (i = start; i <= startbytelastbit; i++)
+ clrbit(array, i);
+ for (i = endbytestartbit; i <= end; i++)
+ clrbit(array, i);
+ } else {
+ for (i = start; i <= end; i++)
+ clrbit(array, i);
+ }
+ } else {
+ clr_bitrange(array, start, maxbit, maxbit);
+ clr_bitrange(array, 0, end, maxbit);
+ }
+}
+
+/*
+ * This api (set_bitrange_int_access) as same as set_bitrange but uses int32 operation
+ * This api can be used in the place of set_bitrange but array should be word (32bit) alligned.
+ * This api has to be used when the memory being accessed has restrictions of
+ * not using them in 8bit (byte) mode and needing 32bit (word) mode.
+ */
+void
+set_bitrange_u32(void *array, uint start, uint end, uint maxbit)
+{
+ uint startword = start/SIZE_BITS32(uint32);
+ uint endword = end/SIZE_BITS32(uint32);
+ uint startwordstartbit = start % SIZE_BITS32(uint32);
+ uint endwordlastbit = end % SIZE_BITS32(uint32);
+ /* Used to caluculate bit number from MSB */
+ uint u32msbnum = SIZE_BITS32(uint32) - 1U;
+ uint i;
+ uint32 setbitsword;
+ uint32 u32max = ~0U;
+
+ ASSERT(ISALIGNED(array, sizeof(uint32))); /* array should be alligned for this API */
+
+ if (start > end) {
+ set_bitrange_u32(array, start, maxbit, maxbit);
+ set_bitrange_u32(array, 0U, end, maxbit);
+ return;
+ }
+
+ if (endword - startword) {
+ /* Setting MSB bits including startwordstartbit */
+ setbitsword = u32max << startwordstartbit;
+ ((uint32 *)array)[startword] |= setbitsword;
+
+ /* Setting all bits in 'startword + 1' to 'endword - 1' */
+ for (i = startword + 1U; i <= endword - 1U; i++) {
+ ((uint32 *)array)[i] = u32max;
+ }
+
+ /* Setting LSB bits including endwordlastbit */
+ setbitsword = u32max >> (u32msbnum - endwordlastbit);
+ ((uint32 *)array)[endword] |= setbitsword;
+ } else { /* start and end are in same word */
+ /* Setting start bit to end bit including start and end bits */
+ setbitsword =
+ (u32max << startwordstartbit) & (u32max >> (u32msbnum - endwordlastbit));
+ ((uint32 *)array)[startword] |= setbitsword;
+ }
+}
+
+/*
+ * This api (clr_bitrange_u32) as same as clr_bitrange but uses int32 operation
+ * This api can be used in the place of clr_bitrange but array should be word (32bit) alligned.
+ * This api has to be used when the memory being accessed has restrictions of
+ * not using them in 8bit (byte) mode and needing 32bit (word) mode.
+ */
+void
+clr_bitrange_u32(void *array, uint start, uint end, uint maxbit)
+{
+ uint startword = start/SIZE_BITS32(uint32);
+ uint endword = end/SIZE_BITS32(uint32);
+ uint startwordstartbit = start % SIZE_BITS32(uint32);
+ uint endwordlastbit = end % SIZE_BITS32(uint32);
+ /* Used to caluculate bit number from MSB */
+ uint u32msbnum = SIZE_BITS32(uint32) - 1U;
+ uint i;
+ uint32 clrbitsword;
+ uint32 u32max = ~0U;
+
+ ASSERT(ISALIGNED(array, sizeof(uint32))); /* array should be alligned for this API */
+
+ if (start > end) {
+ clr_bitrange_u32(array, start, maxbit, maxbit);
+ clr_bitrange_u32(array, 0U, end, maxbit);
+ return;
+ }
+
+ if (endword - startword) {
+ /* Clearing MSB bits including startwordstartbit */
+ clrbitsword = ~(u32max << startwordstartbit);
+ ((uint32 *)array)[startword] &= clrbitsword;
+
+ /* Clearing all bits in 'startword + 1' to 'endword - 1' */
+ for (i = startword + 1U; i <= endword - 1U; i++) {
+ ((uint32 *)array)[i] = 0U;
+ }
+
+ /* Clearing LSB bits including endwordlastbit */
+ clrbitsword = ~(u32max >> (u32msbnum - endwordlastbit));
+ ((uint32 *)array)[endword] &= clrbitsword;
+ } else { /* start and end are in same word */
+ /* Clearing start bit to end bit including start and end bits */
+ clrbitsword =
+ ~(u32max << startwordstartbit) | ~(u32max >> (u32msbnum - endwordlastbit));
+ ((uint32 *)array)[startword] &= clrbitsword;
+ }
+}
+
+void
+bcm_bitprint32(const uint32 u32arg)
+{
+ int i;
+ for (i = NBITS(uint32) - 1; i >= 0; i--) {
+ if (isbitset(u32arg, i)) {
+ printf("1");
+ } else {
+ printf("0");
+ }
+
+ if ((i % NBBY) == 0) printf(" ");
+ }
+ printf("\n");
+}
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16
+bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
+{
+ while (len > 1) {
+ sum += (buf[0] << 8) | buf[1];
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len > 0) {
+ sum += (*buf) << 8;
+ }
+
+ while (sum >> 16) {
+ sum = (sum & 0xffff) + (sum >> 16);
+ }
+
+ return ((uint16)~sum);
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+ uint32 r1_lo = *r_lo;
+ (*r_lo) += offset;
+ if (*r_lo < r1_lo)
+ (*r_hi) ++;
+}
+
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+ uint32 r1_lo = *r_lo;
+ (*r_lo) -= offset;
+ if (*r_lo > r1_lo)
+ (*r_hi) --;
+}
+
+int
+BCMRAMFN(valid_bcmerror)(int e)
+{
+ return ((e <= 0) && (e >= BCME_LAST));
+}
+
+#ifdef DEBUG_COUNTER
+#if (OSL_SYSUPTIME_SUPPORT == TRUE)
+void counter_printlog(counter_tbl_t *ctr_tbl)
+{
+ uint32 now;
+
+ if (!ctr_tbl->enabled)
+ return;
+
+ now = OSL_SYSUPTIME();
+
+ if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) {
+ uint8 i = 0;
+ printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print);
+
+ for (i = 0; i < ctr_tbl->needed_cnt; i++) {
+ printf(" %u", ctr_tbl->cnt[i]);
+ }
+ printf("\n");
+
+ ctr_tbl->prev_log_print = now;
+ bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint));
+ }
+}
+#else
+/* OSL_SYSUPTIME is not supported so no way to get time */
+#define counter_printlog(a) do {} while (0)
+#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
+#endif /* DEBUG_COUNTER */
+
+/* calculate partial checksum */
+static uint32
+ip_cksum_partial(uint32 sum, uint8 *val8, uint32 count)
+{
+ uint32 i;
+ uint16 *val16 = (uint16 *)val8;
+
+ ASSERT(val8 != NULL);
+ /* partial chksum calculated on 16-bit values */
+ ASSERT((count % 2) == 0);
+
+ count /= 2;
+
+ for (i = 0; i < count; i++) {
+ sum += *val16++;
+ }
+ return sum;
+}
+
+/* calculate IP checksum */
+static uint16
+ip_cksum(uint32 sum, uint8 *val8, uint32 count)
+{
+ uint16 *val16 = (uint16 *)val8;
+
+ ASSERT(val8 != NULL);
+
+ while (count > 1) {
+ sum += *val16++;
+ count -= 2;
+ }
+ /* add left-over byte, if any */
+ if (count > 0) {
+ sum += (*(uint8 *)val16);
+ }
+
+ /* fold 32-bit sum to 16 bits */
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ return ((uint16)~sum);
+}
+
+/* calculate IPv4 header checksum
+ * - input ip points to IP header in network order
+ * - output cksum is in network order
+ */
+uint16
+ipv4_hdr_cksum(uint8 *ip, uint ip_len)
+{
+ uint32 sum = 0;
+ uint8 *ptr = ip;
+
+ ASSERT(ip != NULL);
+ ASSERT(ip_len >= IPV4_MIN_HEADER_LEN);
+
+ if (ip_len < IPV4_MIN_HEADER_LEN) {
+ return 0;
+ }
+
+ /* partial cksum skipping the hdr_chksum field */
+ sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct ipv4_hdr, hdr_chksum));
+ ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2;
+
+ /* return calculated chksum */
+ return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip));
+}
+
+/* calculate TCP header checksum using partial sum */
+static uint16
+tcp_hdr_chksum(uint32 sum, uint8 *tcp_hdr, uint16 tcp_len)
+{
+ uint8 *ptr = tcp_hdr;
+
+ ASSERT(tcp_hdr != NULL);
+ ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+
+ /* partial TCP cksum skipping the chksum field */
+ sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct bcmtcp_hdr, chksum));
+ ptr += OFFSETOF(struct bcmtcp_hdr, chksum) + 2;
+
+ /* return calculated chksum */
+ return ip_cksum(sum, ptr, tcp_len - OFFSETOF(struct bcmtcp_hdr, urg_ptr));
+}
+
+struct tcp_pseudo_hdr {
+ uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */
+ uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */
+ uint8 zero;
+ uint8 prot;
+ uint16 tcp_size;
+};
+
+/* calculate IPv4 TCP header checksum
+ * - input ip and tcp points to IP and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16
+ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len)
+{
+ struct ipv4_hdr *ip_hdr = (struct ipv4_hdr *)ip;
+ struct tcp_pseudo_hdr tcp_ps;
+ uint32 sum = 0;
+
+ ASSERT(ip != NULL);
+ ASSERT(tcp != NULL);
+ ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+
+ /* pseudo header cksum */
+ memset(&tcp_ps, 0, sizeof(tcp_ps));
+ memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN);
+ memcpy(&tcp_ps.src_ip, ip_hdr->src_ip, IPV4_ADDR_LEN);
+ tcp_ps.zero = 0;
+ tcp_ps.prot = ip_hdr->prot;
+ tcp_ps.tcp_size = hton16(tcp_len);
+ sum = ip_cksum_partial(sum, (uint8 *)&tcp_ps, sizeof(tcp_ps));
+
+ /* return calculated TCP header chksum */
+ return tcp_hdr_chksum(sum, tcp, tcp_len);
+}
+
+struct ipv6_pseudo_hdr {
+ uint8 saddr[IPV6_ADDR_LEN];
+ uint8 daddr[IPV6_ADDR_LEN];
+ uint16 payload_len;
+ uint8 zero;
+ uint8 next_hdr;
+};
+
+/* calculate IPv6 TCP header checksum
+ * - input ipv6 and tcp points to IPv6 and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16
+ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len)
+{
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)ipv6;
+ struct ipv6_pseudo_hdr ipv6_pseudo;
+ uint32 sum = 0;
+
+ ASSERT(ipv6 != NULL);
+ ASSERT(tcp != NULL);
+ ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+
+ /* pseudo header cksum */
+ memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo));
+ memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr,
+ sizeof(ipv6_pseudo.saddr));
+ memcpy((char *)ipv6_pseudo.daddr, (char *)ipv6_hdr->daddr.addr,
+ sizeof(ipv6_pseudo.daddr));
+ ipv6_pseudo.payload_len = ipv6_hdr->payload_len;
+ ipv6_pseudo.next_hdr = ipv6_hdr->nexthdr;
+ sum = ip_cksum_partial(sum, (uint8 *)&ipv6_pseudo, sizeof(ipv6_pseudo));
+
+ /* return calculated TCP header chksum */
+ return tcp_hdr_chksum(sum, tcp, tcp_len);
+}
+
+void *_bcmutils_dummy_fn = NULL;
+
+/* GROUP 1 --- start
+ * These function under GROUP 1 are general purpose functions to do complex number
+ * calculations and square root calculation.
+ */
+
+uint32 sqrt_int(uint32 value)
+{
+ uint32 root = 0, shift = 0;
+
+ /* Compute integer nearest to square root of input integer value */
+ for (shift = 0; shift < 32; shift += 2) {
+ if (((0x40000000 >> shift) + root) <= value) {
+ value -= ((0x40000000 >> shift) + root);
+ root = (root >> 1) | (0x40000000 >> shift);
+ }
+ else {
+ root = root >> 1;
+ }
+ }
+
+ /* round to the nearest integer */
+ if (root < value) ++root;
+
+ return root;
+}
+/* GROUP 1 --- end */
+
+/* read/write field in a consecutive bits in an octet array.
+ * 'addr' is the octet array's start byte address
+ * 'size' is the octet array's byte size
+ * 'stbit' is the value's start bit offset
+ * 'nbits' is the value's bit size
+ * This set of utilities are for convenience. Don't use them
+ * in time critical/data path as there's a great overhead in them.
+ */
+void
+setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val)
+{
+ uint fbyte = stbit >> 3; /* first byte */
+ uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */
+ uint fbit = stbit & 7; /* first bit in the first byte */
+ uint rbits = (nbits > 8 - fbit ?
+ nbits - (8 - fbit) :
+ 0) & 7; /* remaining bits of the last byte when not 0 */
+ uint8 mask;
+ uint byte;
+
+ BCM_REFERENCE(size);
+
+ ASSERT(fbyte < size);
+ ASSERT(lbyte < size);
+ ASSERT(nbits <= (sizeof(val) << 3));
+
+ /* all bits are in the same byte */
+ if (fbyte == lbyte) {
+ mask = ((1 << nbits) - 1) << fbit;
+ addr[fbyte] &= ~mask;
+ addr[fbyte] |= (uint8)(val << fbit);
+ return;
+ }
+
+ /* first partial byte */
+ if (fbit > 0) {
+ mask = (0xff << fbit);
+ addr[fbyte] &= ~mask;
+ addr[fbyte] |= (uint8)(val << fbit);
+ val >>= (8 - fbit);
+ nbits -= (8 - fbit);
+ fbyte ++; /* first full byte */
+ }
+
+ /* last partial byte */
+ if (rbits > 0) {
+ mask = (1 << rbits) - 1;
+ addr[lbyte] &= ~mask;
+ addr[lbyte] |= (uint8)(val >> (nbits - rbits));
+ lbyte --; /* last full byte */
+ }
+
+ /* remaining full byte(s) */
+ for (byte = fbyte; byte <= lbyte; byte ++) {
+ addr[byte] = (uint8)val;
+ val >>= 8;
+ }
+}
+
+uint32
+getbits(const uint8 *addr, uint size, uint stbit, uint nbits)
+{
+ uint fbyte = stbit >> 3; /* first byte */
+ uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */
+ uint fbit = stbit & 7; /* first bit in the first byte */
+ uint rbits = (nbits > 8 - fbit ?
+ nbits - (8 - fbit) :
+ 0) & 7; /* remaining bits of the last byte when not 0 */
+ uint32 val = 0;
+ uint bits = 0; /* bits in first partial byte */
+ uint8 mask;
+ uint byte;
+
+ BCM_REFERENCE(size);
+
+ ASSERT(fbyte < size);
+ ASSERT(lbyte < size);
+ ASSERT(nbits <= (sizeof(val) << 3));
+
+ /* all bits are in the same byte */
+ if (fbyte == lbyte) {
+ mask = ((1 << nbits) - 1) << fbit;
+ val = (addr[fbyte] & mask) >> fbit;
+ return val;
+ }
+
+ /* first partial byte */
+ if (fbit > 0) {
+ bits = 8 - fbit;
+ mask = (0xff << fbit);
+ val |= (addr[fbyte] & mask) >> fbit;
+ fbyte ++; /* first full byte */
+ }
+
+ /* last partial byte */
+ if (rbits > 0) {
+ mask = (1 << rbits) - 1;
+ val |= (addr[lbyte] & mask) << (nbits - rbits);
+ lbyte --; /* last full byte */
+ }
+
+ /* remaining full byte(s) */
+ for (byte = fbyte; byte <= lbyte; byte ++) {
+ val |= (addr[byte] << (((byte - fbyte) << 3) + bits));
+ }
+
+ return val;
+}
+
+#if defined(BCMDBG) || defined(WLMSG_ASSOC)
+/* support for getting 802.11 frame type/name based on frame kind */
+#define FK_NAME_DECL(x) {FC_##x, #x}
+static const struct {
+ uint fk;
+ const char *name;
+} bcm_80211_fk_names[] = {
+ FK_NAME_DECL(ASSOC_REQ),
+ FK_NAME_DECL(ASSOC_RESP),
+ FK_NAME_DECL(REASSOC_REQ),
+ FK_NAME_DECL(REASSOC_RESP),
+ FK_NAME_DECL(PROBE_REQ),
+ FK_NAME_DECL(PROBE_RESP),
+ FK_NAME_DECL(BEACON),
+ FK_NAME_DECL(ATIM),
+ FK_NAME_DECL(DISASSOC),
+ FK_NAME_DECL(AUTH),
+ FK_NAME_DECL(DEAUTH),
+ FK_NAME_DECL(ACTION),
+ FK_NAME_DECL(ACTION_NOACK),
+ FK_NAME_DECL(CTL_TRIGGER),
+ FK_NAME_DECL(CTL_WRAPPER),
+ FK_NAME_DECL(BLOCKACK_REQ),
+ FK_NAME_DECL(BLOCKACK),
+ FK_NAME_DECL(PS_POLL),
+ FK_NAME_DECL(RTS),
+ FK_NAME_DECL(CTS),
+ FK_NAME_DECL(ACK),
+ FK_NAME_DECL(CF_END),
+ FK_NAME_DECL(CF_END_ACK),
+ FK_NAME_DECL(DATA),
+ FK_NAME_DECL(NULL_DATA),
+ FK_NAME_DECL(DATA_CF_ACK),
+ FK_NAME_DECL(QOS_DATA),
+ FK_NAME_DECL(QOS_NULL)
+};
+static const uint n_bcm_80211_fk_names = ARRAYSIZE(bcm_80211_fk_names);
+
+const char *bcm_80211_fk_name(uint fk)
+{
+ uint i;
+ for (i = 0; i < n_bcm_80211_fk_names; ++i) {
+ if (bcm_80211_fk_names[i].fk == fk) {
+ return bcm_80211_fk_names[i].name;
+ }
+ }
+ return "unknown";
+}
+#endif /* BCMDBG || WLMSG_ASSOC */
+
+#ifdef BCMDRIVER
+
+/** allocate variable sized data with 'size' bytes. note: vld should NOT be null.
+ */
+int
+bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size)
+{
+ int ret = BCME_ERROR;
+ uint8 *dat = NULL;
+
+ if (vld == NULL) {
+ ASSERT(0);
+ goto done;
+ }
+
+ /* trying to allocate twice? */
+ if (vld->vdata != NULL) {
+ ASSERT(0);
+ goto done;
+ }
+
+ /* trying to allocate 0 size? */
+ if (size == 0) {
+ ASSERT(0);
+ ret = BCME_BADARG;
+ goto done;
+ }
+
+ dat = MALLOCZ(osh, size);
+ if (dat == NULL) {
+ ret = BCME_NOMEM;
+ goto done;
+ }
+ vld->vlen = size;
+ vld->vdata = dat;
+ ret = BCME_OK;
+done:
+ return ret;
+}
+
+/** free memory associated with variable sized data. note: vld should NOT be null.
+ */
+int
+bcm_vdata_free(osl_t *osh, var_len_data_t *vld)
+{
+ int ret = BCME_ERROR;
+
+ if (vld == NULL) {
+ ASSERT(0);
+ goto done;
+ }
+
+ if (vld->vdata) {
+ MFREE(osh, vld->vdata, vld->vlen);
+ vld->vlen = 0;
+ ret = BCME_OK;
+ }
+done:
+ return ret;
+}
+
+/* return TRUE if :
+ * - both buffers are of length 0
+ * OR
+ * - both buffers are NULL
+ * OR
+ * lengths and contents are the same.
+ */
+bool
+bcm_match_buffers(const uint8 *b1, uint b1_len, const uint8 *b2, uint b2_len)
+
+{
+ if (b1_len == 0 && b2_len == 0) {
+ return TRUE;
+ }
+
+ if (b1 == NULL && b2 == NULL) {
+ return TRUE;
+ }
+
+ /* If they are not both NULL, neither can be */
+ if (b1 == NULL || b2 == NULL) {
+ return FALSE;
+ }
+
+ if ((b1_len == b2_len) && !memcmp(b1, b2, b1_len)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+#ifdef PRIVACY_MASK
+/* applies privacy mask on the input address itself */
+void
+BCMRAMFN(bcm_ether_privacy_mask)(struct ether_addr *addr)
+{
+ struct ether_addr *privacy = privacy_addrmask_get();
+ if (addr && !ETHER_ISMULTI(addr)) {
+ *(uint32*)(&(addr->octet[0])) &= *((uint32*)&privacy->octet[0]);
+ *(uint16*)(&(addr->octet[4])) &= *((uint16*)&privacy->octet[4]);
+ }
+}
+#endif /* PRIVACY_MASK */
+#endif /* BCMDRIVER */
+
+/* Count the number of elements not matching a given value in a null terminated array */
+int
+BCMATTACHFN(array_value_mismatch_count)(uint8 value, uint8 *array, int array_size)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < array_size; i++) {
+ /* exit if a null terminator is found */
+ if (array[i] == 0) {
+ break;
+ }
+ if (array[i] != value) {
+ count++;
+ }
+ }
+ return count;
+}
+
+/* Count the number of non-zero elements in an uint8 array */
+int
+BCMATTACHFN(array_nonzero_count)(uint8 *array, int array_size)
+{
+ return array_value_mismatch_count(0, array, array_size);
+}
+
+/* Count the number of non-zero elements in an int16 array */
+int
+BCMATTACHFN(array_nonzero_count_int16)(int16 *array, int array_size)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if (array[i] != 0) {
+ count++;
+ }
+ }
+ return count;
+}
+
+/* Count the number of zero elements in an uint8 array */
+int
+BCMATTACHFN(array_zero_count)(uint8 *array, int array_size)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if (array[i] == 0) {
+ count++;
+ }
+ }
+ return count;
+}
+
+/* Validate an array that can be 1 of 2 data types.
+ * One of array1 or array2 should be non-NULL. The other should be NULL.
+ */
+static int
+BCMATTACHFN(verify_ordered_array)(uint8 *array1, int16 *array2, int array_size,
+ int range_lo, int range_hi, bool err_if_no_zero_term, bool is_ordered)
+{
+ int ret;
+ int i;
+ int val = 0;
+ int prev_val = 0;
+
+ ret = err_if_no_zero_term ? BCME_NOTFOUND : BCME_OK;
+
+ /* Check that:
+ * - values are in descending order.
+ * - values are within the valid range.
+ */
+ for (i = 0; i < array_size; i++) {
+ if (array1) {
+ val = (int)array1[i];
+ } else if (array2) {
+ val = (int)array2[i];
+ } else {
+ /* both array parameters are NULL */
+ return BCME_NOTFOUND;
+ }
+ if (val == 0) {
+ /* array is zero-terminated */
+ ret = BCME_OK;
+ break;
+ }
+
+ if (is_ordered && i > 0 && val > prev_val) {
+ /* array is not in descending order */
+ ret = BCME_BADOPTION;
+ break;
+ }
+ prev_val = val;
+
+ if (val < range_lo || val > range_hi) {
+ /* array value out of range */
+ ret = BCME_RANGE;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Validate an ordered uint8 configuration array */
+int
+BCMATTACHFN(verify_ordered_array_uint8)(uint8 *array, int array_size,
+ uint8 range_lo, uint8 range_hi)
+{
+ return verify_ordered_array(array, NULL, array_size, (int)range_lo, (int)range_hi,
+ TRUE, TRUE);
+}
+
+/* Validate an ordered int16 non-zero-terminated configuration array */
+int
+BCMATTACHFN(verify_ordered_array_int16)(int16 *array, int array_size,
+ int16 range_lo, int16 range_hi)
+{
+ return verify_ordered_array(NULL, array, array_size, (int)range_lo, (int)range_hi,
+ FALSE, TRUE);
+}
+
+/* Validate all values in an array are in range */
+int
+BCMATTACHFN(verify_array_values)(uint8 *array, int array_size,
+ int range_lo, int range_hi, bool zero_terminated)
+{
+ int ret = BCME_OK;
+ int i;
+ int val = 0;
+
+ /* Check that:
+ * - values are in strict descending order.
+ * - values are within the valid range.
+ */
+ for (i = 0; i < array_size; i++) {
+ val = (int)array[i];
+ if (val == 0 && zero_terminated) {
+ ret = BCME_OK;
+ break;
+ }
+ if (val < range_lo || val > range_hi) {
+ /* array value out of range */
+ ret = BCME_RANGE;
+ break;
+ }
+ }
+ return ret;
+}
+
+/* Adds/replaces NVRAM variable with given value
+ * varbuf[in,out] - Buffer with NVRAM variables (sequence of zero-terminated 'name=value' records,
+ * terminated with additional zero)
+ * buflen[in] - Length of buffer (may, even should, have some unused space)
+ * variable[in] - Variable to add/replace in 'name=value' form
+ * datalen[out,opt] - Optional output parameter - resulting length of data in buffer
+ * Returns TRUE on success, FALSE if buffer too short or variable specified incorrectly
+ */
+bool
+replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable,
+ unsigned int *datalen)
+{
+ char *p;
+ int variable_heading_len, record_len, variable_record_len = (int)strlen(variable) + 1;
+ char *buf_end = varbuf + buflen;
+ p = strchr(variable, '=');
+ if (!p) {
+ return FALSE;
+ }
+ /* Length of given variable name, followed by '=' */
+ variable_heading_len = (int)((const char *)(p + 1) - variable);
+ /* Scanning NVRAM, record by record up to trailing 0 */
+ for (p = varbuf; *p; p += strlen(p) + 1) {
+ /* If given variable found - remove it */
+ if (!strncmp(p, variable, variable_heading_len)) {
+ record_len = (int)strlen(p) + 1;
+ memmove_s(p, buf_end - p, p + record_len, buf_end - (p + record_len));
+ }
+ }
+ /* If buffer does not have space for given variable - return FALSE */
+ if ((p + variable_record_len + 1) > buf_end) {
+ return FALSE;
+ }
+ /* Copy given variable to end of buffer */
+ memmove_s(p, buf_end - p, variable, variable_record_len);
+ /* Adding trailing 0 */
+ p[variable_record_len] = 0;
+ /* Setting optional output parameter - length of data in buffer */
+ if (datalen) {
+ *datalen = (unsigned int)(p + variable_record_len + 1 - varbuf);
+ }
+ return TRUE;
+}
+
+/*
+ * Gets the ceil bit set to the nearest power of 2
+ * val[in] - value for which nearest power of 2 bit set to be returned
+ * bitpos[out] - the position of the nearest power of 2 bit set
+ */
+uint8
+bcm_get_ceil_pow_2(uint val)
+{
+ uint8 bitpos = 0;
+ ASSERT(val);
+ if (val & (val-1)) {
+ /* val is not powers of 2.
+ * pad it, so that allocation will be aligned to
+ * next immediate powers of 2.
+ */
+ bitpos = 1;
+ }
+ while (val >>= 1) {
+ bitpos ++;
+ }
+ return (bitpos);
+}
+
+#if !defined(BCMDONGLEHOST)
+/** Initialization of varbuf structure */
+void
+BCMATTACHFN(varbuf_init)(varbuf_t *b, char *buf, uint size)
+{
+ b->size = size;
+ b->base = b->buf = buf;
+}
+
+/** append a null terminated var=value string */
+int
+BCMATTACHFN(varbuf_append)(varbuf_t *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+ size_t len;
+ char *s;
+
+ if (b->size < 2)
+ return 0;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+ va_end(ap);
+
+ /* C99 snprintf behavior returns r >= size on overflow,
+ * others return -1 on overflow.
+ * All return -1 on format error.
+ * We need to leave room for 2 null terminations, one for the current var
+ * string, and one for final null of the var table. So check that the
+ * strlen written, r, leaves room for 2 chars.
+ */
+ if ((r == -1) || (r > (int)(b->size - 2))) {
+ b->size = 0;
+ return 0;
+ }
+
+ /* Remove any earlier occurrence of the same variable */
+ if ((s = strchr(b->buf, '=')) != NULL) {
+ len = (size_t)(s - b->buf);
+ for (s = b->base; s < b->buf;) {
+ if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') {
+ len = strlen(s) + 1;
+ memmove(s, (s + len), ((b->buf + r + 1) - (s + len)));
+ b->buf -= len;
+ b->size += (unsigned int)len;
+ break;
+ }
+
+ while (*s++)
+ ;
+ }
+ }
+
+ /* skip over this string's null termination */
+ r++;
+ b->size -= r;
+ b->buf += r;
+
+ return r;
+}
+
+#if defined(BCMDRIVER)
+/**
+ * Create variable table from memory.
+ * Return 0 on success, nonzero on error.
+ */
+int
+BCMATTACHFN(initvars_table)(osl_t *osh, char *start, char *end, char **vars,
+ uint *count)
+{
+ int c = (int)(end - start);
+
+ /* do it only when there is more than just the null string */
+ if (c > 1) {
+ char *vp = MALLOC(osh, c);
+ ASSERT(vp != NULL);
+ if (!vp)
+ return BCME_NOMEM;
+ bcopy(start, vp, c);
+ *vars = vp;
+ *count = c;
+ }
+ else {
+ *vars = NULL;
+ *count = 0;
+ }
+
+ return 0;
+}
+#endif /* BCMDRIVER */
+
+#endif /* !BCMDONGLEHOST */
+
+/* bit shift operation in serialized buffer taking input bits % 8 */
+int buf_shift_right(uint8 *buf, uint16 len, uint8 bits)
+{
+ uint16 i;
+
+ if (len == 0 || (bits == 0) || (bits >= NBBY)) {
+ return BCME_BADARG;
+ }
+
+ for (i = len - 1u; i > 0; i--) {
+ buf[i] = (buf[i - 1u] << (NBBY - bits)) | (buf[i] >> bits);
+ }
+ buf[0] >>= bits;
+
+ return BCME_OK;
+}
+
+/* print the content of the 'buf' in hex string format */
+void
+prhexstr(const char *prefix, const uint8 *buf, uint len, bool newline)
+{
+ if (len > 0) {
+ uint i;
+
+ if (prefix != NULL) {
+ printf("%s", prefix);
+ }
+ for (i = 0; i < len; i ++) {
+ printf("%02X", buf[i]);
+ }
+ if (newline) {
+ printf("\n");
+ }
+ }
+}
+
+/* Add to adjust the 802.1x priority */
+void
+pktset8021xprio(void *pkt, int prio)
+{
+ struct ether_header *eh;
+ uint8 *pktdata;
+ if(prio == PKTPRIO(pkt))
+ return;
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+ eh = (struct ether_header *) pktdata;
+ if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ ASSERT(prio >= 0 && prio <= MAXPRIO);
+ PKTSETPRIO(pkt, prio);
+ }
+}
diff --git a/bcmdhd.101.10.361.x/bcmwifi_channels.c b/bcmdhd.101.10.361.x/bcmwifi_channels.c
new file mode 100755
index 0000000..9387207
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmwifi_channels.c
@@ -0,0 +1,3000 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> /* For wlexe/Makefile.wlm_dll */
+#endif
+
+#include <802.11.h>
+
+/* Definitions for D11AC capable (80MHz+) Chanspec type */
+
+/* Chanspec ASCII representation:
+ *
+ * [<band>'g']<channel>['/'<bandwidth>[<primary-sideband>]
+ * ['/'<1st-channel-segment>'-'<2nd-channel-segment>]]
+ *
+ * <band>:
+ * (optional) 2, 4, 5, 6 for 2.4GHz, 4GHz, 5GHz, and 6GHz respectively.
+ * Default value is 2g if channel <= 14, otherwise 5g.
+ * <channel>:
+ * channel number of the 20MHz channel,
+ * or primary 20 MHz channel of 40MHz, 80MHz, 160MHz, 80+80MHz,
+ * 240MHz, 320MHz, or 160+160MHz channels.
+ * <bandwidth>:
+ * (optional) 20, 40, 80, 160, 80+80, 240, 320, or 160+160. Default value is 20.
+ * <primary-sideband>:
+ * 'u' or 'l' (only for 2.4GHz band 40MHz)
+ *
+ * For 2.4GHz band 40MHz channels, the same primary channel may be the
+ * upper sideband for one 40MHz channel, and the lower sideband for an
+ * overlapping 40MHz channel. The {u: upper, l: lower} primary sideband
+ * indication disambiguates which 40MHz channel is being specified.
+ *
+ * For 40MHz in the 5GHz or 6GHz band and all channel bandwidths greater than
+ * 40MHz, the U/L specification is not necessary or allowed since the channels are
+ * non-overlapping and the primary 20MHz channel position is derived from its
+ * position in the wide bandwidth channel.
+ * <1st-channel-segment>
+ * <2nd-channel-segment>:
+ * Required for 80+80 or 160+160, otherwise not allowed.
+ * These fields specify the center channel of the first and the second 80MHz
+ * or 160MHz channels.
+ *
+ * In its simplest form, it is a 20MHz channel number, with the implied band
+ * of 2.4GHz if channel number <= 14, and 5GHz otherwise.
+ *
+ * To allow for backward compatibility with scripts, the old form for
+ * 40MHz channels is also allowed: <channel><primary-sideband>
+ *
+ * <channel>:
+ * primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
+ * <primary-sideband>:
+ * "U" for upper, "L" for lower (or lower case "u" "l")
+ *
+ * 5 GHz Examples:
+ * Chanspec BW Center Ch Channel Range Primary Ch
+ * 5g8 20MHz 8 - -
+ * 52 20MHz 52 - -
+ * 52/40 40MHz 54 52-56 52
+ * 56/40 40MHz 54 52-56 56
+ * 52/80 80MHz 58 52-64 52
+ * 56/80 80MHz 58 52-64 56
+ * 60/80 80MHz 58 52-64 60
+ * 64/80 80MHz 58 52-64 64
+ * 52/160 160MHz 50 36-64 52
+ * 36/160 160MGz 50 36-64 36
+ * 36/80+80/42-106 80+80MHz 42,106 36-48,100-112 36
+ *
+ * 2 GHz Examples:
+ * Chanspec BW Center Ch Channel Range Primary Ch
+ * 2g8 20MHz 8 - -
+ * 8 20MHz 8 - -
+ * 6 20MHz 6 - -
+ * 6/40l 40MHz 8 6-10 6
+ * 6l 40MHz 8 6-10 6
+ * 6/40u 40MHz 4 2-6 6
+ * 6u 40MHz 4 2-6 6
+ */
+
+/* bandwidth ASCII string */
+static const char *wf_chspec_bw_str[] =
+{
+ "320",
+ "160+160",
+ "20",
+ "40",
+ "80",
+ "160",
+ "80+80",
+ "240"
+};
+
+static const uint16 wf_chspec_bw_mhz[] = {
+ 320, 320, 20, 40, 80, 160, 160, 240
+};
+#define WF_NUM_BW ARRAYSIZE(wf_chspec_bw_mhz)
+
+/* 40MHz channels in 2.4GHz band */
+static const uint8 wf_2g_40m_chans[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11
+};
+#define WF_NUM_2G_40M_CHANS ARRAYSIZE(wf_2g_40m_chans)
+
+/* 40MHz channels in 5GHz band */
+static const uint8 wf_5g_40m_chans[] = {
+ 38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159, 167, 175
+};
+#define WF_NUM_5G_40M_CHANS ARRAYSIZE(wf_5g_40m_chans)
+
+/* 80MHz channels in 5GHz band */
+static const uint8 wf_5g_80m_chans[] = {
+ 42, 58, 106, 122, 138, 155, 171
+};
+#define WF_NUM_5G_80M_CHANS ARRAYSIZE(wf_5g_80m_chans)
+
+/* 160MHz channels in 5GHz band */
+static const uint8 wf_5g_160m_chans[] = {
+ 50, 114, 163
+};
+#define WF_NUM_5G_160M_CHANS ARRAYSIZE(wf_5g_160m_chans)
+
+/** 80MHz channels in 6GHz band */
+#define WF_NUM_6G_80M_CHANS 14
+
+/** 160MHz channels in 6GHz band */
+#define WF_NUM_6G_160M_CHANS 7 /* TBD */
+
+/** 240MHz channels in 6GHz band */
+#define WF_NUM_6G_240M_CHANS 4 /* TBD */
+
+/** 320MHz channels in 6GHz band */
+#define WF_NUM_6G_320M_CHANS 3 /* TBD */
+
+/* Define the conditional macro to help with reducing the code size bloat
+ * in other branches and in trunk targets that don't need 11BE features...
+ */
+#define WFC_2VALS_EQ(var, val) ((var) == (val))
+
+/* compare bandwidth unconditionally for 11be related stuff */
+#ifdef WL11BE
+#define WFC_BW_EQ(bw, val) WFC_2VALS_EQ(bw, val)
+#else
+#define WFC_BW_EQ(bw, val) (FALSE)
+#endif
+
+static void wf_chanspec_iter_firstchan(wf_chanspec_iter_t *iter);
+static chanspec_bw_t wf_iter_next_bw(chanspec_bw_t bw);
+static bool wf_chanspec_iter_next_2g(wf_chanspec_iter_t *iter);
+static bool wf_chanspec_iter_next_5g(wf_chanspec_iter_t *iter);
+static int wf_chanspec_iter_next_5g_range(wf_chanspec_iter_t *iter, chanspec_bw_t bw);
+static void wf_chanspec_iter_6g_range_init(wf_chanspec_iter_t *iter, chanspec_bw_t bw);
+static bool wf_chanspec_iter_next_6g(wf_chanspec_iter_t *iter);
+
+/**
+ * Return the chanspec bandwidth in MHz
+ * Bandwidth of 160 MHz will be returned for 80+80MHz chanspecs.
+ *
+ * @param chspec chanspec_t
+ *
+ * @return bandwidth of chspec in MHz units
+ */
+uint
+wf_bw_chspec_to_mhz(chanspec_t chspec)
+{
+ uint bw;
+
+ bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+ return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+/* bw in MHz, return the channel count from the center channel to the
+ * the channel at the edge of the band
+ */
+static uint
+center_chan_to_edge(chanspec_bw_t bw)
+{
+ uint delta = 0;
+
+ /* edge channels separated by BW - 10MHz on each side
+ * delta from cf to edge is half of that,
+ */
+ if (bw == WL_CHANSPEC_BW_40) {
+ /* 10 MHz */
+ delta = 2;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ /* 30 MHz */
+ delta = 6;
+ } else if (bw == WL_CHANSPEC_BW_160) {
+ /* 70 MHz */
+ delta = 14;
+ } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240)) {
+ /* 110 MHz */
+ delta = 22;
+ } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) {
+ /* 150 MHz */
+ delta = 30;
+ }
+ return delta;
+}
+
+/* return channel number of the low edge of the band
+ * given the center channel and BW
+ */
+static uint
+channel_low_edge(uint center_ch, chanspec_bw_t bw)
+{
+ return (center_ch - center_chan_to_edge(bw));
+}
+
+/* return side band number given center channel and primary20 channel
+ * return -1 on error
+ */
+static int
+channel_to_sb(uint center_ch, uint primary_ch, chanspec_bw_t bw)
+{
+ uint lowest = channel_low_edge(center_ch, bw);
+ uint sb;
+
+ if (primary_ch < lowest ||
+ (primary_ch - lowest) % 4) {
+ /* bad primary channel lower than the low edge of the channel,
+ * or not mult 4.
+ */
+ return -1;
+ }
+
+ sb = ((primary_ch - lowest) / 4);
+
+ /* sb must be a index to a 20MHz channel in range */
+ if ((bw == WL_CHANSPEC_BW_20 && sb >= 1) ||
+ (bw == WL_CHANSPEC_BW_40 && sb >= 2) ||
+ (bw == WL_CHANSPEC_BW_80 && sb >= 4) ||
+ (bw == WL_CHANSPEC_BW_160 && sb >= 8) ||
+ (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240) && sb >= 12) ||
+ (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320) && sb >= 16)) {
+ /* primary_ch must have been too high for the center_ch */
+ return -1;
+ }
+
+ return sb;
+}
+
+/* return primary20 channel given center channel and side band */
+static uint
+channel_to_primary20_chan(uint center_ch, chanspec_bw_t bw, uint sb)
+{
+ return (channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+/* return index of 80MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_80mhz_to_id(uint ch)
+{
+ uint i;
+ for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+ if (ch == wf_5g_80m_chans[i])
+ return i;
+ }
+
+ return -1;
+}
+
+/* return index of the 6G 80MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_6g_80mhz_to_id(uint ch)
+{
+ /* The 6GHz center channels start at 7, and have a spacing of 16 */
+ if (ch >= CH_MIN_6G_80M_CHANNEL &&
+ ch <= CH_MAX_6G_80M_CHANNEL &&
+ ((ch - CH_MIN_6G_80M_CHANNEL) % 16) == 0) { // even multiple of 16
+ return (ch - CH_MIN_6G_80M_CHANNEL) / 16;
+ }
+
+ return -1;
+}
+
+/* return index of the 5G 160MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_5g_160mhz_to_id(uint ch)
+{
+ uint i;
+ for (i = 0; i < WF_NUM_5G_160M_CHANS; i ++) {
+ if (ch == wf_5g_160m_chans[i]) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/* return index of the 6G 160MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_6g_160mhz_to_id(uint ch)
+{
+ /* The 6GHz center channels start at 15, and have a spacing of 32 */
+ if (ch >= CH_MIN_6G_160M_CHANNEL &&
+ ch <= CH_MAX_6G_160M_CHANNEL &&
+ ((ch - CH_MIN_6G_160M_CHANNEL) % 32) == 0) {
+ return (ch - CH_MIN_6G_160M_CHANNEL) / 32;
+ }
+
+ return -1;
+}
+
+/* return index of the 6G 240MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_6g_240mhz_to_id(uint ch)
+{
+ /* The 6GHz center channels start at 23, and have a spacing of 48 */
+ if (ch >= CH_MIN_6G_240M_CHANNEL &&
+ ch <= CH_MAX_6G_240M_CHANNEL &&
+ ((ch - CH_MIN_6G_240M_CHANNEL) % 48) == 0) {
+ return (ch - CH_MIN_6G_240M_CHANNEL) / 48;
+ }
+
+ return -1;
+}
+
+/* return index of the 6G 320MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_6g_320mhz_to_id(uint ch)
+{
+ /* The 6GHz center channels start at 31, and have a spacing of 64 */
+ if (ch >= CH_MIN_6G_320M_CHANNEL &&
+ ch <= CH_MAX_6G_320M_CHANNEL &&
+ ((ch - CH_MIN_6G_320M_CHANNEL) % 64) == 0) {
+ return (ch - CH_MIN_6G_320M_CHANNEL) / 64;
+ }
+
+ return -1;
+}
+
+/**
+ * This function returns the the 6GHz 240MHz center channel for the given chanspec 240MHz ID
+ *
+ * @param chan_240MHz_id 240MHz chanspec ID
+ *
+ * @return Return the center channel number, or 0 on error.
+ *
+ */
+static uint8
+wf_chspec_6G_id240_to_ch(uint8 chan_240MHz_id)
+{
+ uint8 ch = 0;
+
+ if (chan_240MHz_id < WF_NUM_6G_240M_CHANS) {
+ /* The 6GHz center channels have a spacing of 48
+ * starting from the first 240MHz center
+ */
+ ch = CH_MIN_6G_240M_CHANNEL + (chan_240MHz_id * 48);
+ }
+
+ return ch;
+}
+
+/* Retrive the chan_id and convert it to center channel */
+uint8
+wf_chspec_240_id2cch(chanspec_t chanspec)
+{
+ if (CHSPEC_BAND(chanspec) == WL_CHANSPEC_BAND_6G &&
+ CHSPEC_BW(chanspec) == WL_CHANSPEC_BW_240) {
+ uint8 ch_id = CHSPEC_GE240_CHAN(chanspec);
+
+ return wf_chspec_6G_id240_to_ch(ch_id);
+ }
+ return 0;
+}
+
+/**
+ * This function returns the the 6GHz 320MHz center channel for the given chanspec 320MHz ID
+ *
+ * @param chan_320MHz_id 320MHz chanspec ID
+ *
+ * @return Return the center channel number, or 0 on error.
+ *
+ */
+static uint8
+wf_chspec_6G_id320_to_ch(uint8 chan_320MHz_id)
+{
+ uint8 ch = 0;
+
+ if (chan_320MHz_id < WF_NUM_6G_320M_CHANS) {
+ /* The 6GHz center channels have a spacing of 64
+ * starting from the first 320MHz center
+ */
+ ch = CH_MIN_6G_320M_CHANNEL + (chan_320MHz_id * 64);
+ }
+
+ return ch;
+}
+
+/* Retrive the chan_id and convert it to center channel */
+uint8
+wf_chspec_320_id2cch(chanspec_t chanspec)
+{
+ if (CHSPEC_BAND(chanspec) == WL_CHANSPEC_BAND_6G &&
+ CHSPEC_BW(chanspec) == WL_CHANSPEC_BW_320) {
+ uint8 ch_id = CHSPEC_GE240_CHAN(chanspec);
+
+ return wf_chspec_6G_id320_to_ch(ch_id);
+ }
+ return 0;
+}
+
+/**
+ * Convert chanspec to ascii string, or formats hex of an invalid chanspec.
+ *
+ * @param chspec chanspec to format
+ * @param buf pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *
+ * @return Returns pointer to passed in buf. The buffer will have the ascii
+ * representation of the given chspec, or "invalid 0xHHHH" where
+ * 0xHHHH is the hex representation of the invalid chanspec.
+ *
+ * @see CHANSPEC_STR_LEN
+ *
+ * Wrapper function for wf_chspec_ntoa. In case of an error it puts
+ * the original chanspec in the output buffer, prepended with "invalid".
+ * Can be directly used in print routines as it takes care of null
+ */
+char *
+wf_chspec_ntoa_ex(chanspec_t chspec, char *buf)
+{
+ if (wf_chspec_ntoa(chspec, buf) == NULL)
+ snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec);
+ return buf;
+}
+
+/**
+ * Convert chanspec to ascii string, or return NULL on error.
+ *
+ * @param chspec chanspec to format
+ * @param buf pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *
+ * @return Returns pointer to passed in buf or NULL on error. On sucess, the buffer
+ * will have the ascii representation of the given chspec.
+ *
+ * @see CHANSPEC_STR_LEN
+ *
+ * Given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer buf.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL.
+ */
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band;
+ uint pri_chan;
+
+ if (wf_chspec_malformed(chspec))
+ return NULL;
+
+ band = "";
+
+ /* check for non-default band spec */
+ if (CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) {
+ band = "2g";
+ } else if (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL) {
+ band = "5g";
+ } else if (CHSPEC_IS6G(chspec)) {
+ band = "6g";
+ }
+
+ /* primary20 channel */
+ pri_chan = wf_chspec_primary20_chan(chspec);
+
+ /* bandwidth and primary20 sideband */
+ if (CHSPEC_IS20(chspec)) {
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, pri_chan);
+ } else if (CHSPEC_IS240(chspec)) {
+ /* 240 */
+ const char *bw;
+
+ bw = wf_chspec_to_bw_str(chspec);
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw);
+ } else if (CHSPEC_IS320(chspec)) {
+ /* 320 */
+ const char *bw;
+
+ bw = wf_chspec_to_bw_str(chspec);
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw);
+ } else {
+ const char *bw;
+ const char *sb = "";
+
+ bw = wf_chspec_to_bw_str(chspec);
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+ /* primary20 sideband string if needed for 2g 40MHz */
+ if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+ sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+ }
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, pri_chan, bw, sb);
+#else
+ /* primary20 sideband string instead of BW for 40MHz */
+ if (CHSPEC_IS40(chspec) && !CHSPEC_IS6G(chspec)) {
+ sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, pri_chan, sb);
+ } else {
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw);
+ }
+#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
+ }
+
+ return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+ unsigned long val;
+ char *endp = NULL;
+
+ val = strtoul(*p, &endp, 10);
+ /* if endp is the initial pointer value, then a number was not read */
+ if (endp == *p)
+ return 0;
+
+ /* advance the buffer pointer to the end of the integer string */
+ *p = endp;
+ /* return the parsed integer */
+ *num = (unsigned int)val;
+
+ return 1;
+}
+
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param a pointer to input string
+ *
+ * @return Return > 0 if successful or 0 otherwise
+ */
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+ chanspec_t chspec;
+ chanspec_band_t chspec_band;
+ chanspec_subband_t chspec_sb;
+ chanspec_bw_t chspec_bw;
+ uint bw;
+ uint num, pri_ch;
+ char c, sb_ul = '\0';
+
+ bw = 20;
+ chspec_sb = 0;
+
+ /* parse channel num or band */
+ if (!read_uint(&a, &num))
+ return 0;
+ /* if we are looking at a 'g', then the first number was a band */
+ c = tolower((int)a[0]);
+ if (c == 'g') {
+ a++; /* consume the char */
+
+ /* band must be "2", "5", or "6" */
+ if (num == 2)
+ chspec_band = WL_CHANSPEC_BAND_2G;
+ else if (num == 5)
+ chspec_band = WL_CHANSPEC_BAND_5G;
+ else if (num == 6)
+ chspec_band = WL_CHANSPEC_BAND_6G;
+ else
+ return 0;
+
+ /* read the channel number */
+ if (!read_uint(&a, &pri_ch))
+ return 0;
+
+ c = tolower((int)a[0]);
+ } else {
+ /* first number is channel, use default for band */
+ pri_ch = num;
+ chspec_band = ((pri_ch <= CH_MAX_2G_CHANNEL) ?
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ }
+
+ if (c == '\0') {
+ /* default BW of 20MHz */
+ chspec_bw = WL_CHANSPEC_BW_20;
+ goto done_read;
+ }
+
+ a ++; /* consume the 'u','l', or '/' */
+
+ /* check 'u'/'l' */
+ if (c == 'u' || c == 'l') {
+ sb_ul = c;
+ chspec_bw = WL_CHANSPEC_BW_40;
+ goto done_read;
+ }
+
+ /* next letter must be '/' */
+ if (c != '/')
+ return 0;
+
+ /* read bandwidth */
+ if (!read_uint(&a, &bw))
+ return 0;
+
+ /* convert to chspec value */
+ if (bw == 20) {
+ chspec_bw = WL_CHANSPEC_BW_20;
+ } else if (bw == 40) {
+ chspec_bw = WL_CHANSPEC_BW_40;
+ } else if (bw == 80) {
+ chspec_bw = WL_CHANSPEC_BW_80;
+ } else if (bw == 160) {
+ chspec_bw = WL_CHANSPEC_BW_160;
+ } else if (WFC_BW_EQ(bw, 240)) {
+ chspec_bw = WL_CHANSPEC_BW_240;
+ } else if (WFC_BW_EQ(bw, 320)) {
+ chspec_bw = WL_CHANSPEC_BW_320;
+ } else {
+ return 0;
+ }
+
+ /* So far we have <band>g<chan>/<bw>
+ * Can now be followed by u/l if bw = 40,
+ */
+
+ c = tolower((int)a[0]);
+
+ /* if we have a 2g/40 channel, we should have a l/u spec now */
+ if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+ if (c == 'u' || c == 'l') {
+ a ++; /* consume the u/l char */
+ sb_ul = c;
+ goto done_read;
+ }
+ }
+
+ /* check for 80+80 or 160+160 */
+ if (c == '+') {
+ return 0;
+ }
+
+done_read:
+ /* skip trailing white space */
+ while (a[0] == ' ') {
+ a ++;
+ }
+
+ /* must be end of string */
+ if (a[0] != '\0')
+ return 0;
+
+ /* Now have all the chanspec string parts read;
+ * chspec_band, pri_ch, chspec_bw, sb_ul.
+ * chspec_band and chspec_bw are chanspec values.
+ * Need to convert pri_ch, and sb_ul into
+ * a center channel (or two) and sideband.
+ */
+
+ /* if a sb u/l string was given, just use that,
+ * guaranteed to be bw = 40 by string parse.
+ */
+ if (sb_ul != '\0') {
+ if (sb_ul == 'l') {
+ chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+ } else if (sb_ul == 'u') {
+ chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+ }
+ chspec = wf_create_40MHz_chspec_primary_sb(pri_ch, chspec_sb, chspec_band);
+ } else if (chspec_bw == WL_CHANSPEC_BW_20) {
+ /* if the bw is 20, only need the primary channel and band */
+ chspec = wf_create_20MHz_chspec(pri_ch, chspec_band);
+ } else {
+ /* If the bw is 40/80/160/240/320 (and not 40MHz 2G), the channels are
+ * non-overlapping in 5G or 6G bands. Each primary channel is contained
+ * in only one higher bandwidth channel. The wf_create_chspec_from_primary()
+ * will create the chanspec. 2G 40MHz is handled just above, assuming a {u,l}
+ * sub-band spec was given.
+ */
+ chspec = wf_create_chspec_from_primary(pri_ch, chspec_bw, chspec_band);
+ }
+
+ if (wf_chspec_malformed(chspec))
+ return 0;
+
+ return chspec;
+}
+
+/**
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, pri_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ *
+ * @param chanspec the chanspec to check
+ *
+ * @return Returns TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
+bool
+#ifdef BCMPOSTTRAPFN
+BCMPOSTTRAPFN(wf_chspec_malformed)(chanspec_t chanspec)
+#else
+wf_chspec_malformed(chanspec_t chanspec)
+#endif
+{
+ uint chspec_bw = CHSPEC_BW(chanspec);
+ uint chspec_sb;
+
+ if (CHSPEC_IS2G(chanspec)) {
+ /* must be valid bandwidth for 2G */
+ if (!BW_LE40(chspec_bw)) {
+ return TRUE;
+ }
+
+ /* check for invalid channel number */
+ if (CHSPEC_CHANNEL(chanspec) == INVCHANNEL) {
+ return TRUE;
+ }
+ } else if (CHSPEC_IS5G(chanspec) || CHSPEC_IS6G(chanspec)) {
+ if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) {
+ uint ch_id;
+
+ ch_id = CHSPEC_GE240_CHAN(chanspec);
+
+ /* channel IDs in 240 must be in range */
+ if (CHSPEC_IS6G(chanspec)) {
+ if (ch_id >= WF_NUM_6G_240M_CHANS) {
+ /* bad 240MHz channel ID for the band */
+ return TRUE;
+ }
+ } else {
+ return TRUE;
+ }
+ } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) {
+ uint ch_id;
+
+ ch_id = CHSPEC_GE240_CHAN(chanspec);
+
+ /* channel IDs in 320 must be in range */
+ if (CHSPEC_IS6G(chanspec)) {
+ if (ch_id >= WF_NUM_6G_320M_CHANS) {
+ /* bad 320MHz channel ID for the band */
+ return TRUE;
+ }
+ } else {
+ return TRUE;
+ }
+ } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
+ chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
+
+ /* check for invalid channel number */
+ if (CHSPEC_CHANNEL(chanspec) == INVCHANNEL) {
+ return TRUE;
+ }
+ } else {
+ /* invalid bandwidth */
+ return TRUE;
+ }
+ } else {
+ /* must be a valid band */
+ return TRUE;
+ }
+
+ /* retrive sideband */
+ if ((WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) ||
+ (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320))) {
+ chspec_sb = CHSPEC_GE240_SB(chanspec);
+ } else {
+ chspec_sb = CHSPEC_CTL_SB(chanspec);
+ }
+
+ /* side band needs to be consistent with bandwidth */
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (chspec_sb != WL_CHANSPEC_CTL_SB_LLL)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_40) {
+ if (chspec_sb > WL_CHANSPEC_CTL_SB_LLU)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ /* both 80MHz and 80+80MHz use 80MHz side bands.
+ * 80+80 SB info is relative to the primary 80MHz sub-band.
+ */
+ if (chspec_sb > WL_CHANSPEC_CTL_SB_LUU)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_160) {
+ ASSERT(chspec_sb <= WL_CHANSPEC_CTL_SB_UUU);
+ } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) {
+ /* FIXME: define the max sideband index */
+ ASSERT((chspec_sb >> WL_CHANSPEC_GE240_SB_SHIFT) <= 11);
+ } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) {
+ /* FIXME: define the max sideband index */
+ ASSERT((chspec_sb >> WL_CHANSPEC_GE240_SB_SHIFT) <= 15);
+ }
+
+ return FALSE;
+}
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param chanspec the chanspec to check
+ *
+ * @return Returns TRUE if the chanspec is a valid 802.11 channel
+ */
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+ chanspec_band_t chspec_band = CHSPEC_BAND(chanspec);
+ chanspec_bw_t chspec_bw = CHSPEC_BW(chanspec);
+ uint chspec_ch = -1;
+
+ if (wf_chspec_malformed(chanspec)) {
+ return FALSE;
+ }
+
+ if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) {
+ if (CHSPEC_IS6G(chanspec)) {
+ chspec_ch = wf_chspec_6G_id240_to_ch(CHSPEC_GE240_CHAN(chanspec));
+ } else {
+ return FALSE;
+ }
+ } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) {
+ if (CHSPEC_IS6G(chanspec)) {
+ chspec_ch = wf_chspec_6G_id320_to_ch(CHSPEC_GE240_CHAN(chanspec));
+ } else {
+ return FALSE;
+ }
+ } else {
+ chspec_ch = CHSPEC_CHANNEL(chanspec);
+ }
+
+ /* After the malformed check, we know that we have
+ * a valid band field,
+ * a valid bandwidth for the band,
+ * and a valid sub-band value for the bandwidth.
+ *
+ * Since all sub-band specs are valid for any channel, the only thing remaining to
+ * check is that
+ * the 20MHz channel,
+ * or the center channel for higher BW,
+ * or both center channels for an 80+80MHz channel,
+ * are valid for the specified band.
+ * Also, 80+80MHz channels need to be non-contiguous.
+ */
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+
+ return wf_valid_20MHz_chan(chspec_ch, chspec_band);
+
+ } else if (chspec_bw == WL_CHANSPEC_BW_40) {
+
+ return wf_valid_40MHz_center_chan(chspec_ch, chspec_band);
+
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+
+ return wf_valid_80MHz_center_chan(chspec_ch, chspec_band);
+
+ } else if (chspec_bw == WL_CHANSPEC_BW_160) {
+
+ return wf_valid_160MHz_center_chan(chspec_ch, chspec_band);
+
+ } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_240)) {
+
+ return wf_valid_240MHz_center_chan(chspec_ch, chspec_band);
+
+ } else if (WFC_BW_EQ(chspec_bw, WL_CHANSPEC_BW_320)) {
+
+ return wf_valid_320MHz_center_chan(chspec_ch, chspec_band);
+
+ }
+
+ return FALSE;
+}
+
+/* 5G band 20MHz channel ranges with even (+4) channel spacing */
+static const struct wf_iter_range wf_5g_iter_ranges[] = {
+ {36, 64},
+ {100, 144},
+ {149, 165}
+};
+
+#define RANGE_ID_INVAL 0xFFu
+enum wf_iter_state {
+ WF_ITER_INIT = 0,
+ WF_ITER_RUN = 1,
+ WF_ITER_DONE = 2
+};
+
+/**
+ * @brief Initialize a chanspec iteration structure.
+ */
+bool
+wf_chanspec_iter_init(wf_chanspec_iter_t *iter, chanspec_band_t band, chanspec_bw_t bw)
+{
+ if (iter == NULL) {
+ return FALSE;
+ }
+
+ /* Initialize the iter structure to the "DONE" state
+ * in case the parameter validation fails.
+ * If the validation fails then the iterator will return INVCHANSPEC as the current
+ * chanspec, and wf_chanspec_iter_next() will return FALSE.
+ */
+ memset(iter, 0, sizeof(*iter));
+ iter->state = WF_ITER_DONE;
+ iter->chanspec = INVCHANSPEC;
+
+ if (band != WL_CHANSPEC_BAND_2G &&
+ band != WL_CHANSPEC_BAND_5G &&
+ band != WL_CHANSPEC_BAND_6G) {
+ ASSERT(0);
+ return FALSE;
+ }
+
+ /* make sure the BW is unspecified (INVCHANSPEC), 20/40,
+ * or (not 2g and 80/160)
+ */
+ if (!(bw == INVCHANSPEC ||
+ bw == WL_CHANSPEC_BW_20 ||
+ bw == WL_CHANSPEC_BW_40 ||
+ (band != WL_CHANSPEC_BAND_2G &&
+ (bw == WL_CHANSPEC_BW_80 ||
+ bw == WL_CHANSPEC_BW_160 ||
+ WFC_BW_EQ(bw, WL_CHANSPEC_BW_240) ||
+ WFC_BW_EQ(bw, WL_CHANSPEC_BW_320))))) {
+
+ ASSERT(0);
+ return FALSE;
+ }
+
+ /* Validation of the params is successful so move to the "INIT" state to
+ * allow the first wf_chanspec_iter_next() move the iteration to the first
+ * chanspec in the set.
+ */
+ iter->state = WF_ITER_INIT;
+ iter->band = band;
+ iter->bw = bw;
+ iter->range_id = RANGE_ID_INVAL;
+
+ return TRUE;
+}
+
+/**
+ * Start the iterator off from the 'init' state.
+ * The internal state is set up and advanced to the first chanspec.
+ */
+static void
+wf_chanspec_iter_firstchan(wf_chanspec_iter_t *iter)
+{
+ chanspec_band_t band = iter->band;
+ chanspec_bw_t bw = iter->bw;
+ chanspec_t chspec;
+
+ /* if BW unspecified (INVCHANSPEC), start with 20 MHz */
+ if (bw == INVCHANSPEC) {
+ bw = WL_CHANSPEC_BW_20;
+ }
+
+ /* calc the initial channel based on band */
+ if (band == WL_CHANSPEC_BAND_2G) {
+ /* 2g has overlapping 40MHz channels, so cannot just use the
+ * wf_create_chspec_from_primary() fn.
+ */
+ if (bw == WL_CHANSPEC_BW_20) {
+ chspec = wf_create_20MHz_chspec(CH_MIN_2G_CHANNEL, band);
+ } else {
+ chspec = (WL_CHANSPEC_BAND_2G | bw | WL_CHANSPEC_CTL_SB_L |
+ CH_MIN_2G_40M_CHANNEL);
+ }
+ } else {
+ if (band == WL_CHANSPEC_BAND_5G) {
+ wf_chanspec_iter_next_5g_range(iter, bw);
+ } else {
+ wf_chanspec_iter_6g_range_init(iter, bw);
+ }
+ chspec = wf_create_chspec_from_primary(iter->range.start, bw, band);
+ }
+
+ iter->chanspec = chspec;
+}
+
+/**
+ * @brief Return the current chanspec of the iteration.
+ */
+chanspec_t
+wf_chanspec_iter_current(wf_chanspec_iter_t *iter)
+{
+ return iter->chanspec;
+}
+
+/**
+ * @brief Advance the iteration to the next chanspec in the set.
+ */
+bool
+wf_chanspec_iter_next(wf_chanspec_iter_t *iter, chanspec_t *chspec)
+{
+ bool ok = FALSE;
+ chanspec_band_t band = iter->band;
+
+ /* Handle the INIT and DONE states. Otherwise, we are in the RUN state
+ * and will dispatch to the 'next' function for the appropriate band.
+ */
+ if (iter->state == WF_ITER_INIT) {
+ iter->state = WF_ITER_RUN;
+ wf_chanspec_iter_firstchan(iter);
+ ok = TRUE;
+ } else if (iter->state == WF_ITER_DONE) {
+ ok = FALSE;
+ } else if (band == WL_CHANSPEC_BAND_2G) {
+ ok = wf_chanspec_iter_next_2g(iter);
+ } else if (band == WL_CHANSPEC_BAND_5G) {
+ ok = wf_chanspec_iter_next_5g(iter);
+ } else if (band == WL_CHANSPEC_BAND_6G) {
+ ok = wf_chanspec_iter_next_6g(iter);
+ }
+
+ /* Return the new chanspec if a pointer was provided.
+ * In case the iteration is done, the return will be INVCHANSPEC.
+ */
+ if (chspec != NULL) {
+ *chspec = iter->chanspec;
+ }
+
+ return ok;
+}
+
+/**
+ * When the iterator completes a particular bandwidth, this function
+ * returns the next BW, or INVCHANSPEC when done.
+ *
+ * Internal iterator helper.
+ */
+static chanspec_bw_t
+wf_iter_next_bw(chanspec_bw_t bw)
+{
+ switch (bw) {
+ case WL_CHANSPEC_BW_20:
+ bw = WL_CHANSPEC_BW_40;
+ break;
+ case WL_CHANSPEC_BW_40:
+ bw = WL_CHANSPEC_BW_80;
+ break;
+ case WL_CHANSPEC_BW_80:
+ bw = WL_CHANSPEC_BW_160;
+ break;
+#ifdef WL11BE
+ case WL_CHANSPEC_BW_160:
+ bw = WL_CHANSPEC_BW_240;
+ break;
+ case WL_CHANSPEC_BW_240:
+ bw = WL_CHANSPEC_BW_320;
+ break;
+#endif
+ default:
+ bw = INVCHANSPEC;
+ break;
+ }
+ return bw;
+}
+
+/**
+ * This is the _iter_next() helper for 2g band chanspec iteration.
+ */
+static bool
+wf_chanspec_iter_next_2g(wf_chanspec_iter_t *iter)
+{
+ chanspec_t chspec = iter->chanspec;
+ uint8 ch = CHSPEC_CHANNEL(chspec);
+
+ if (CHSPEC_IS20(chspec)) {
+ if (ch < CH_MAX_2G_CHANNEL) {
+ ch++;
+ chspec = wf_create_20MHz_chspec(ch, WL_CHANSPEC_BAND_2G);
+ } else if (iter->bw == INVCHANSPEC) {
+ /* hit the end of 20M channels, go to 40M if bw was unspecified */
+ ch = CH_MIN_2G_40M_CHANNEL;
+ chspec = wf_create_40MHz_chspec(LOWER_20_SB(ch), ch, WL_CHANSPEC_BAND_2G);
+ } else {
+ /* done */
+ iter->state = WF_ITER_DONE;
+ chspec = INVCHANSPEC;
+ }
+ } else {
+ /* step through low then high primary sideband, then next 40 center channel */
+ if (CHSPEC_SB_LOWER(iter->chanspec)) {
+ /* move from lower primary 20 to upper */
+ chspec = wf_create_40MHz_chspec(UPPER_20_SB(ch),
+ ch, WL_CHANSPEC_BAND_2G);
+ } else if (ch < CH_MAX_2G_40M_CHANNEL) {
+ /* move to next 40M center and lower primary 20 */
+ ch++;
+ chspec = wf_create_40MHz_chspec(LOWER_20_SB(ch),
+ ch, WL_CHANSPEC_BAND_2G);
+ } else {
+ /* done */
+ iter->state = WF_ITER_DONE;
+ chspec = INVCHANSPEC;
+ }
+ }
+
+ iter->chanspec = chspec;
+
+ return (chspec != INVCHANSPEC);
+}
+
+/**
+ * This is the _iter_next() helper for 5g band chanspec iteration.
+ * The 5g iterator uses ranges of primary 20MHz channels, and the current BW, to create
+ * each chanspec in the set.
+ * When a 5g range is exhausted, wf_chanspec_iter_next_5g_range() is called to get the next
+ * range appropriate to the current BW.
+ */
+static bool
+wf_chanspec_iter_next_5g(wf_chanspec_iter_t *iter)
+{
+ chanspec_t chspec = iter->chanspec;
+ chanspec_bw_t bw = CHSPEC_BW(chspec);
+ uint8 ch = wf_chspec_primary20_chan(chspec);
+ uint8 end = iter->range.end;
+
+ if (ch < end) {
+ /* not at the end of the current range, so
+ * step to the next 20MHz channel and create the current BW
+ * channel with that new primary 20MHz.
+ */
+ ch += CH_20MHZ_APART;
+ } else if (wf_chanspec_iter_next_5g_range(iter, bw)) {
+ /* there was a new range in the current BW, so start at the beginning */
+ ch = iter->range.start;
+ } else if (iter->bw == INVCHANSPEC) {
+ /* hit the end of current bw, so move to the next bw */
+ bw = wf_iter_next_bw(bw);
+ if (bw != INVCHANSPEC) {
+ /* initialize the first range */
+ iter->range_id = RANGE_ID_INVAL;
+ wf_chanspec_iter_next_5g_range(iter, bw);
+ ch = iter->range.start;
+ } else {
+ /* no more BWs */
+ chspec = INVCHANSPEC;
+ }
+ } else {
+ /* no more channels, ranges, or BWs */
+ chspec = INVCHANSPEC;
+ }
+
+ /* if we are not at the end of the iteration, calc the next chanspec from components */
+ if (chspec != INVCHANSPEC) {
+ chspec = wf_create_chspec_from_primary(ch, bw, WL_CHANSPEC_BAND_5G);
+ }
+
+ iter->chanspec = chspec;
+ if (chspec != INVCHANSPEC) {
+ return TRUE;
+ } else {
+ iter->state = WF_ITER_DONE;
+ return FALSE;
+ }
+}
+
+/**
+ * Helper function to set up the next range of primary 20MHz channels to
+ * iterate over for the current BW. This will advance
+ * iter->range_id
+ * and set up
+ * iter->range.start
+ * iter->range.end
+ * for the new range.
+ * Returns FALSE if there are no more ranges in the current BW.
+ */
+static int
+wf_chanspec_iter_next_5g_range(wf_chanspec_iter_t *iter, chanspec_bw_t bw)
+{
+ uint8 range_id = iter->range_id;
+ const uint8 *channels;
+ uint count;
+
+ if (bw == WL_CHANSPEC_BW_20) {
+ if (range_id == RANGE_ID_INVAL) {
+ range_id = 0;
+ } else {
+ range_id++;
+ }
+
+ if (range_id < ARRAYSIZE(wf_5g_iter_ranges)) {
+ iter->range_id = range_id;
+ iter->range = wf_5g_iter_ranges[range_id];
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ if (bw == WL_CHANSPEC_BW_40) {
+ channels = wf_5g_40m_chans;
+ count = WF_NUM_5G_40M_CHANS;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ channels = wf_5g_80m_chans;
+ count = WF_NUM_5G_80M_CHANS;
+ } else if (bw == WL_CHANSPEC_BW_160) {
+ channels = wf_5g_160m_chans;
+ count = WF_NUM_5G_160M_CHANS;
+ } else {
+ return FALSE;
+ }
+
+ if (range_id == RANGE_ID_INVAL) {
+ range_id = 0;
+ } else {
+ range_id++;
+ }
+ if (range_id < count) {
+ uint8 ch = channels[range_id];
+ uint offset = center_chan_to_edge(bw);
+
+ iter->range_id = range_id;
+ iter->range.start = ch - offset;
+ iter->range.end = ch + offset;
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/**
+ * This is the _iter_next() helper for 6g band chanspec iteration.
+ * The 6g iterator uses ranges of primary 20MHz channels, and the current BW, to create
+ * each chanspec in the set.
+ * Each BW in 6g has one contiguous range of primary 20MHz channels. When a range is
+ * exhausted, the iterator moves to the next BW.
+ */
+static bool
+wf_chanspec_iter_next_6g(wf_chanspec_iter_t *iter)
+{
+ chanspec_t chspec = iter->chanspec;
+ chanspec_bw_t bw = CHSPEC_BW(chspec);
+ uint8 ch = wf_chspec_primary20_chan(chspec);
+ uint8 end = iter->range.end;
+
+ if (ch < end) {
+ /* not at the end of the current range, so
+ * step to the next 20MHz channel and create the current BW
+ * channel with that new primary 20MHz.
+ */
+ ch += CH_20MHZ_APART;
+
+ /* try to create a valid channel of the current BW
+ * with a primary20 'ch'
+ */
+ chspec = wf_create_chspec_from_primary(ch, bw, WL_CHANSPEC_BAND_6G);
+
+ /* if chspec is INVCHANSPEC, then we hit the end
+ * of the valid channels in the range.
+ */
+ } else {
+ /* hit the end of the current range */
+ chspec = INVCHANSPEC;
+ }
+
+ /* if we are at the end of the current channel range
+ * check if there is another BW to iterate
+ * Note: (iter->bw == INVCHANSPEC) indicates an unspecified BW for the interation,
+ * so it will iterate over all BWs.
+ */
+ if (chspec == INVCHANSPEC &&
+ iter->bw == INVCHANSPEC &&
+ (bw = wf_iter_next_bw(bw)) != INVCHANSPEC) {
+ /* start the new bw with the first primary20 */
+ ch = iter->range.start;
+ chspec = wf_create_chspec_from_primary(ch, bw, WL_CHANSPEC_BAND_6G);
+ }
+
+ iter->chanspec = chspec;
+ if (chspec != INVCHANSPEC) {
+ return TRUE;
+ } else {
+ iter->state = WF_ITER_DONE;
+ return FALSE;
+ }
+}
+
+/**
+ * Helper used by wf_chanspec_iter_firstchan() to set up the first range of
+ * primary channels for the 6g band and for the BW being iterated.
+ */
+static void
+wf_chanspec_iter_6g_range_init(wf_chanspec_iter_t *iter, chanspec_bw_t bw)
+{
+ switch (bw) {
+ case WL_CHANSPEC_BW_20:
+ case WL_CHANSPEC_BW_40:
+ case WL_CHANSPEC_BW_80:
+ case WL_CHANSPEC_BW_160:
+#ifdef WL11BE
+ case WL_CHANSPEC_BW_240:
+ case WL_CHANSPEC_BW_320:
+#endif
+ iter->range.start = CH_MIN_6G_CHANNEL;
+ iter->range.end = CH_MAX_6G_CHANNEL;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+/**
+ * Verify that the channel is a valid 20MHz channel according to 802.11.
+ *
+ * @param channel 20MHz channel number to validate
+ * @param band chanspec band
+ *
+ * @return Return TRUE if valid
+ */
+bool
+wf_valid_20MHz_chan(uint channel, chanspec_band_t band)
+{
+ if (band == WL_CHANSPEC_BAND_2G) {
+ /* simple range check for 2GHz */
+ return (channel >= CH_MIN_2G_CHANNEL &&
+ channel <= CH_MAX_2G_CHANNEL);
+ } else if (band == WL_CHANSPEC_BAND_5G) {
+ const uint8 *center_ch = wf_5g_40m_chans;
+ uint num_ch = WF_NUM_5G_40M_CHANS;
+ uint i;
+
+ /* We don't have an array of legal 20MHz 5G channels, but they are
+ * each side of the legal 40MHz channels. Check the chanspec
+ * channel against either side of the 40MHz channels.
+ */
+ for (i = 0; i < num_ch; i ++) {
+ if (channel == (uint)LOWER_20_SB(center_ch[i]) ||
+ channel == (uint)UPPER_20_SB(center_ch[i])) {
+ break; /* match found */
+ }
+ }
+
+ if (i == num_ch) {
+ /* check for channel 165 which is not the side band
+ * of 40MHz 5G channel
+ */
+ if (channel == 165) {
+ i = 0;
+ }
+
+ /* check for legacy JP channels on failure */
+ if (channel == 34 || channel == 38 ||
+ channel == 42 || channel == 46) {
+ i = 0;
+ }
+ }
+
+ if (i < num_ch) {
+ /* match found */
+ return TRUE;
+ }
+ }
+
+ else if (band == WL_CHANSPEC_BAND_6G) {
+ /* Use the simple pattern of 6GHz 20MHz channels for validity check */
+ if ((channel >= CH_MIN_6G_CHANNEL &&
+ channel <= CH_MAX_6G_CHANNEL) &&
+ ((((channel - CH_MIN_6G_CHANNEL) % 4) == 0) || // even multiple of 4
+ channel == 2)) { // Or the oddball channel 2
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ * Verify that the center channel is a valid 40MHz center channel according to 802.11.
+ *
+ * @param center_channel 40MHz center channel to validate
+ * @param band chanspec band
+ *
+ * @return Return TRUE if valid
+ */
+bool
+wf_valid_40MHz_center_chan(uint center_channel, chanspec_band_t band)
+{
+ if (band == WL_CHANSPEC_BAND_2G) {
+ /* simple range check for 2GHz */
+ return (center_channel >= CH_MIN_2G_40M_CHANNEL &&
+ center_channel <= CH_MAX_2G_40M_CHANNEL);
+ } else if (band == WL_CHANSPEC_BAND_5G) {
+ uint i;
+
+ /* use the 5GHz lookup of 40MHz channels */
+ for (i = 0; i < WF_NUM_5G_40M_CHANS; i++) {
+ if (center_channel == wf_5g_40m_chans[i]) {
+ return TRUE;
+ }
+ }
+ }
+ else if (band == WL_CHANSPEC_BAND_6G) {
+ /* Use the simple pattern of 6GHz center channels */
+ if ((center_channel >= CH_MIN_6G_40M_CHANNEL &&
+ center_channel <= CH_MAX_6G_40M_CHANNEL) &&
+ ((center_channel - CH_MIN_6G_40M_CHANNEL) % 8) == 0) { // even multiple of 8
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ * Verify that the center channel is a valid 80MHz center channel according to 802.11.
+ *
+ * @param center_channel 80MHz center channel to validate
+ * @param band chanspec band
+ *
+ * @return Return TRUE if valid
+ */
+bool
+wf_valid_80MHz_center_chan(uint center_channel, chanspec_band_t band)
+{
+ if (band == WL_CHANSPEC_BAND_5G) {
+ /* use the 80MHz ID lookup to validate the center channel */
+ if (channel_80mhz_to_id(center_channel) >= 0) {
+ return TRUE;
+ }
+ } else if (band == WL_CHANSPEC_BAND_6G) {
+ /* use the 80MHz ID lookup to validate the center channel */
+ if (channel_6g_80mhz_to_id(center_channel) >= 0) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ * Verify that the center channel is a valid 160MHz center channel according to 802.11.
+ *
+ * @param center_channel 160MHz center channel to validate
+ * @param band chanspec band
+ *
+ * @return Return TRUE if valid
+ */
+bool
+wf_valid_160MHz_center_chan(uint center_channel, chanspec_band_t band)
+{
+ if (band == WL_CHANSPEC_BAND_5G) {
+ uint i;
+
+ /* use the 5GHz lookup of 40MHz channels */
+ for (i = 0; i < WF_NUM_5G_160M_CHANS; i++) {
+ if (center_channel == wf_5g_160m_chans[i]) {
+ return TRUE;
+ }
+ }
+ } else if (band == WL_CHANSPEC_BAND_6G) {
+ /* Use the simple pattern of 6GHz center channels */
+ if ((center_channel >= CH_MIN_6G_160M_CHANNEL &&
+ center_channel <= CH_MAX_6G_160M_CHANNEL) &&
+ ((center_channel - CH_MIN_6G_160M_CHANNEL) % 32) == 0) { // even multiple of 32
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ * Verify that the center channel is a valid 240MHz center channel according to 802.11.
+ *
+ * @param center_channel 240MHz center channel to validate
+ * @param band chanspec band
+ *
+ * @return Return TRUE if valid
+ */
+bool
+wf_valid_240MHz_center_chan(uint center_channel, chanspec_band_t band)
+{
+ if (band == WL_CHANSPEC_BAND_6G) {
+ /* Use the simple pattern of 6GHz center channels */
+ if ((center_channel >= CH_MIN_6G_240M_CHANNEL &&
+ center_channel <= CH_MAX_6G_240M_CHANNEL) &&
+ ((center_channel - CH_MIN_6G_240M_CHANNEL) % 48) == 0) { // even multiple of 48
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ * Verify that the center channel is a valid 320MHz center channel according to 802.11.
+ *
+ * @param center_channel 320MHz center channel to validate
+ * @param band chanspec band
+ *
+ * @return Return TRUE if valid
+ */
+bool
+wf_valid_320MHz_center_chan(uint center_channel, chanspec_band_t band)
+{
+ if (band == WL_CHANSPEC_BAND_6G) {
+ /* Use the simple pattern of 6GHz center channels */
+ if ((center_channel >= CH_MIN_6G_320M_CHANNEL &&
+ center_channel <= CH_MAX_6G_320M_CHANNEL) &&
+ ((center_channel - CH_MIN_6G_320M_CHANNEL) % 64) == 0) { // even multiple of 64
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/*
+ * This function returns TRUE if both the chanspec can co-exist in PHY.
+ * Addition to primary20 channel, the function checks for side band for 2g 40 channels
+ */
+bool
+wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2)
+{
+ bool same_primary;
+
+ same_primary = (wf_chspec_primary20_chan(chspec1) == wf_chspec_primary20_chan(chspec2));
+
+ if (same_primary && CHSPEC_IS2G(chspec1)) {
+ if (CHSPEC_IS40(chspec1) && CHSPEC_IS40(chspec2)) {
+ return (CHSPEC_CTL_SB(chspec1) == CHSPEC_CTL_SB(chspec2));
+ }
+ }
+ return same_primary;
+}
+
+/**
+ * Create a 20MHz chanspec for the given band.
+ *
+ * This function returns a 20MHz chanspec in the given band.
+ *
+ * @param channel 20MHz channel number
+ * @param band a chanspec band (e.g. WL_CHANSPEC_BAND_2G)
+ *
+ * @return Returns a 20MHz chanspec, or IVNCHANSPEC in case of error.
+ */
+chanspec_t
+wf_create_20MHz_chspec(uint channel, chanspec_band_t band)
+{
+ chanspec_t chspec;
+
+ if (channel <= WL_CHANSPEC_CHAN_MASK &&
+ (band == WL_CHANSPEC_BAND_2G ||
+ band == WL_CHANSPEC_BAND_5G ||
+ band == WL_CHANSPEC_BAND_6G)) {
+ chspec = band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | channel;
+ if (!wf_chspec_valid(chspec)) {
+ chspec = INVCHANSPEC;
+ }
+ } else {
+ chspec = INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+/**
+ * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ *
+ * @param primary_channel primary 20Mhz channel
+ * @param center_channel center channel of the 40MHz channel
+ * @param band band of the 40MHz channel (chanspec_band_t value)
+ *
+ * The center_channel can be one of the 802.11 spec valid 40MHz chenter channels
+ * in the given band.
+ *
+ * @return returns a 40MHz chanspec, or INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_create_40MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band)
+{
+ int sb;
+
+ /* Calculate the sideband value for the center and primary channel.
+ * Will return -1 if not a valid pair for 40MHz
+ */
+ sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_40);
+
+ /* return err if the sideband was bad or the center channel is not
+ * valid for the given band.
+ */
+ if (sb < 0 || !wf_valid_40MHz_center_chan(center_channel, band)) {
+ return INVCHANSPEC;
+ }
+
+ /* othewise construct and return the valid 40MHz chanspec */
+ return (chanspec_t)(center_channel | WL_CHANSPEC_BW_40 | band |
+ ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT));
+}
+
+/**
+ * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number,
+ * the sub-band for the primary 20MHz channel, and the band.
+ *
+ * @param primary_channel primary 20Mhz channel
+ * @param primary_subband sub-band of the 20MHz primary channel (chanspec_subband_t value)
+ * @param band band of the 40MHz channel (chanspec_band_t value)
+ *
+ * The primary channel and sub-band should describe one of the 802.11 spec valid
+ * 40MHz channels in the given band.
+ *
+ * @return returns a 40MHz chanspec, or INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_create_40MHz_chspec_primary_sb(uint primary_channel, chanspec_subband_t primary_subband,
+ chanspec_band_t band)
+{
+ uint center_channel;
+
+ /* find the center channel */
+ if (primary_subband == WL_CHANSPEC_CTL_SB_L) {
+ center_channel = primary_channel + CH_10MHZ_APART;
+ } else if (primary_subband == WL_CHANSPEC_CTL_SB_U) {
+ center_channel = primary_channel - CH_10MHZ_APART;
+ } else {
+ return INVCHANSPEC;
+ }
+
+ return wf_create_40MHz_chspec(primary_channel, center_channel, band);
+}
+
+/**
+ * Returns the chanspec for an 80MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ *
+ * @param primary_channel primary 20Mhz channel
+ * @param center_channel center channel of the 80MHz channel
+ * @param band band of the 80MHz channel (chanspec_band_t value)
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155} for 5G,
+ * or {7 + 16*X for 0 <= X <= 13} for 6G.
+ *
+ * @return returns an 80MHz chanspec, or INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_create_80MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band)
+{
+ int sb;
+
+ /* Calculate the sideband value for the center and primary channel.
+ * Will return -1 if not a valid pair for 80MHz
+ */
+ sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_80);
+
+ /* return err if the sideband was bad or the center channel is not
+ * valid for the given band.
+ */
+ if (sb < 0 || !wf_valid_80MHz_center_chan(center_channel, band)) {
+ return INVCHANSPEC;
+ }
+
+ /* othewise construct and return the valid 80MHz chanspec */
+ return (chanspec_t)(center_channel | WL_CHANSPEC_BW_80 | band |
+ ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT));
+}
+
+/**
+ * Returns the chanspec for an 160MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ *
+ * @param primary_channel primary 20Mhz channel
+ * @param center_channel center channel of the 160MHz channel
+ * @param band band of the 160MHz channel (chanspec_band_t value)
+ *
+ * The center_channel can be one of {50, 114} for 5G,
+ * or {15 + 32*X for 0 <= X <= 7} for 6G.
+ *
+ * @return returns an 160MHz chanspec, or INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_create_160MHz_chspec(uint primary_channel, uint center_channel, chanspec_band_t band)
+{
+ int sb;
+
+ /* Calculate the sideband value for the center and primary channel.
+ * Will return -1 if not a valid pair for 160MHz
+ */
+ sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_160);
+
+ /* return err if the sideband was bad or the center channel is not
+ * valid for the given band.
+ */
+ if (sb < 0 || !wf_valid_160MHz_center_chan(center_channel, band)) {
+ return INVCHANSPEC;
+ }
+
+ /* othewise construct and return the valid 160MHz chanspec */
+ return (chanspec_t)(center_channel | WL_CHANSPEC_BW_160 | band |
+ ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT));
+}
+
+/**
+ * Returns the chanspec for an 80+80MHz channel given the primary 20MHz channel number,
+ * the center channel numbers for each frequency segment, and the band.
+ *
+ * @param primary_channel primary 20 Mhz channel
+ * @param chan0 center channel number of one frequency segment
+ * @param chan1 center channel number of the other frequency segment
+ * @param band band of the 80+80 MHz channel (chanspec_band_t value)
+ *
+ * Parameters chan0 and chan1 are valid 80 MHz center channel numbers for the given band.
+ * The primary channel must be contained in one of the 80 MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * @return returns an 80+80 MHz chanspec, or INVCHANSPEC in case of error
+ *
+ * Refer to 802.11-2016 section 21.3.14 "Channelization".
+ */
+chanspec_t
+wf_create_8080MHz_chspec(uint primary_channel, uint chan0, uint chan1,
+ chanspec_band_t band)
+{
+ int sb = 0;
+ chanspec_t chanspec = 0;
+ int chan0_id = -1, chan1_id = -1;
+ int seg0, seg1;
+
+ /* frequency segments need to be non-contiguous, so the channel separation needs
+ * to be greater than 80MHz
+ */
+ if ((uint)ABS((int)(chan0 - chan1)) <= CH_80MHZ_APART) {
+ return INVCHANSPEC;
+ }
+
+ if (band == WL_CHANSPEC_BAND_5G) {
+ chan0_id = channel_80mhz_to_id(chan0);
+ chan1_id = channel_80mhz_to_id(chan1);
+ } else if (band == WL_CHANSPEC_BAND_6G) {
+ chan0_id = channel_6g_80mhz_to_id(chan0);
+ chan1_id = channel_6g_80mhz_to_id(chan1);
+ }
+
+ /* make sure the channel numbers were valid */
+ if (chan0_id == -1 || chan1_id == -1) {
+ return INVCHANSPEC;
+ }
+
+ /* does the primary channel fit with the 1st 80MHz channel ? */
+ sb = channel_to_sb(chan0, primary_channel, WL_CHANSPEC_BW_80);
+ if (sb >= 0) {
+ /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+ seg0 = chan0_id;
+ seg1 = chan1_id;
+ } else {
+ /* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+ sb = channel_to_sb(chan1, primary_channel, WL_CHANSPEC_BW_80);
+ if (sb < 0) {
+ /* no match for pri_ch to either 80MHz center channel */
+ return INVCHANSPEC;
+ }
+ /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+ seg0 = chan1_id;
+ seg1 = chan0_id;
+ }
+
+ chanspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) |
+ (seg1 << WL_CHANSPEC_CHAN1_SHIFT) |
+ (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+ WL_CHANSPEC_BW_8080 |
+ band);
+
+ return chanspec;
+}
+
+/**
+ * Returns the chanspec for an 160+160MHz channel given the primary 20MHz channel number,
+ * the center channel numbers for each frequency segment, and the band.
+ *
+ * @param primary_channel primary 20 Mhz channel
+ * @param chan0 center channel number of one frequency segment
+ * @param chan1 center channel number of the other frequency segment
+ * @param band band of the 160+160 MHz channel (chanspec_band_t value)
+ *
+ * Parameters chan0 and chan1 are valid 160 MHz center channel numbers for the given band.
+ * The primary channel must be contained in one of the 160 MHz channels. This routine
+ * will determine which frequency segment is the primary 160 MHz segment.
+ *
+ * @return returns an 160+160 MHz chanspec, or INVCHANSPEC in case of error
+ *
+ * Refer to <TBD> "Channelization".
+ */
+chanspec_t
+wf_create_160160MHz_chspec(uint primary_channel, uint chan0, uint chan1,
+ chanspec_band_t band)
+{
+ int sb = 0;
+ chanspec_t chanspec = 0;
+ int chan0_id = -1, chan1_id = -1;
+ int seg0, seg1;
+
+ /* frequency segments need to be non-contiguous, so the channel separation needs
+ * to be greater than 160MHz
+ */
+ if ((uint)ABS((int)(chan0 - chan1)) <= CH_160MHZ_APART) {
+ return INVCHANSPEC;
+ }
+
+ if (band == WL_CHANSPEC_BAND_5G) {
+ chan0_id = channel_5g_160mhz_to_id(chan0);
+ chan1_id = channel_5g_160mhz_to_id(chan1);
+ } else if (band == WL_CHANSPEC_BAND_6G) {
+ chan0_id = channel_6g_160mhz_to_id(chan0);
+ chan1_id = channel_6g_160mhz_to_id(chan1);
+ }
+
+ /* make sure the channel numbers were valid */
+ if (chan0_id == -1 || chan1_id == -1) {
+ return INVCHANSPEC;
+ }
+
+ /* does the primary channel fit with the 1st 160MHz channel ? */
+ sb = channel_to_sb(chan0, primary_channel, WL_CHANSPEC_BW_160);
+ if (sb >= 0) {
+ /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+ seg0 = chan0_id;
+ seg1 = chan1_id;
+ } else {
+ /* no, so does the primary channel fit with the 2nd 160MHz channel ? */
+ sb = channel_to_sb(chan1, primary_channel, WL_CHANSPEC_BW_160);
+ if (sb < 0) {
+ /* no match for pri_ch to either 160MHz center channel */
+ return INVCHANSPEC;
+ }
+ /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+ seg0 = chan1_id;
+ seg1 = chan0_id;
+ }
+
+ chanspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) |
+ (seg1 << WL_CHANSPEC_CHAN1_SHIFT) |
+ (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+ WL_CHANSPEC_BW_160160 |
+ band);
+
+ return chanspec;
+}
+
+/**
+ * Returns the chanspec for an 240MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ *
+ * @param primary_channel primary 20 Mhz channel
+ * @param chan center channel number
+ * @param band band of the 240 MHz channel (chanspec_band_t value)
+ *
+ * @return returns an 240 MHz chanspec, or INVCHANSPEC in case of error
+ *
+ * Refer to <TBD> "Channelization".
+ */
+chanspec_t
+wf_create_240MHz_chspec(uint primary_channel, uint center_channel, chanspec_band_t band)
+{
+ int sb = 0;
+ chanspec_t chanspec = 0;
+ int chan_id = -1;
+
+ if (band == WL_CHANSPEC_BAND_6G) {
+ chan_id = channel_6g_240mhz_to_id(center_channel);
+ }
+
+ /* make sure the channel number were valid */
+ if (chan_id == -1) {
+ return INVCHANSPEC;
+ }
+
+ /* Calculate the sideband value for the center and primary channel.
+ * Will return -1 if not a valid pair for 240MHz
+ */
+ sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_240);
+
+ /* return err if the sideband was bad or the center channel is not
+ * valid for the given band.
+ */
+ if (sb < 0 || !wf_valid_240MHz_center_chan(center_channel, band)) {
+ return INVCHANSPEC;
+ }
+
+ chanspec = ((chan_id << WL_CHANSPEC_GE240_CHAN_SHIFT) |
+ (sb << WL_CHANSPEC_GE240_SB_SHIFT) |
+ WL_CHANSPEC_BW_240 |
+ band);
+
+ return chanspec;
+}
+
+/**
+ * Returns the chanspec for an 320MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ *
+ * @param primary_channel primary 20 Mhz channel
+ * @param chan center channel number
+ * @param band band of the 320 MHz channel (chanspec_band_t value)
+ *
+ * Parameters chan is valid 320 MHz center channel numbers for the given band.
+ * The primary channel must be contained in one of the 320 MHz channels.
+ *
+ * @return returns an 320 MHz chanspec, or INVCHANSPEC in case of error
+ *
+ * Refer to <TBD> "Channelization".
+ */
+chanspec_t
+wf_create_320MHz_chspec(uint primary_channel, uint center_channel, chanspec_band_t band)
+{
+ int sb = 0;
+ chanspec_t chanspec = 0;
+ int chan_id = -1;
+
+ if (band == WL_CHANSPEC_BAND_6G) {
+ chan_id = channel_6g_320mhz_to_id(center_channel);
+ }
+
+ /* make sure the channel number were valid */
+ if (chan_id == -1) {
+ return INVCHANSPEC;
+ }
+
+ /* Calculate the sideband value for the center and primary channel.
+ * Will return -1 if not a valid pair for 320MHz
+ */
+ sb = channel_to_sb(center_channel, primary_channel, WL_CHANSPEC_BW_320);
+
+ /* return err if the sideband was bad or the center channel is not
+ * valid for the given band.
+ */
+ if (sb < 0 || !wf_valid_320MHz_center_chan(center_channel, band)) {
+ return INVCHANSPEC;
+ }
+
+ chanspec = ((chan_id << WL_CHANSPEC_GE240_CHAN_SHIFT) |
+ (sb << WL_CHANSPEC_GE240_SB_SHIFT) |
+ WL_CHANSPEC_BW_320 |
+ band);
+
+ return chanspec;
+}
+
+/**
+ * Returns the chanspec given the primary 20MHz channel number,
+ * the center channel number, channel width, and the band. The channel width
+ * must be 20, 40, 80, 160, 240 or 320 MHz.
+ * 80+80 or 160+160 MHz chanspec creation is not handled by this function,
+ * use wf_create_8080MHz_chspec() or wf_create_160160MHz_chspec()instead.
+ *
+ * @param primary_channel primary 20Mhz channel
+ * @param center_channel center channel of the channel
+ * @param bw width of the channel (chanspec_bw_t)
+ * @param band chanspec band of channel (chanspec_band_t)
+ *
+ * The center_channel can be one of the 802.11 spec valid center channels
+ * for the given bandwidth in the given band.
+ *
+ * @return returns a chanspec, or INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_create_chspec(uint primary_channel, uint center_channel,
+ chanspec_bw_t bw, chanspec_band_t band)
+{
+ chanspec_t chspec = INVCHANSPEC;
+ int sb = -1;
+ uint sb_shift;
+
+ /* 20MHz channels have matching center and primary channels */
+ if (bw == WL_CHANSPEC_BW_20 && primary_channel == center_channel) {
+
+ sb = 0;
+
+ } else if (bw == WL_CHANSPEC_BW_40 ||
+ bw == WL_CHANSPEC_BW_80 ||
+ bw == WL_CHANSPEC_BW_160 ||
+ WFC_BW_EQ(bw, WL_CHANSPEC_BW_240) ||
+ WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) {
+
+ /* calculate the sub-band index */
+ sb = channel_to_sb(center_channel, primary_channel, bw);
+ }
+
+ /* if we have a good sub-band, assemble the chanspec, and use wf_chspec_valid()
+ * to check it for correctness
+ */
+ if (sb >= 0) {
+ if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240)) {
+ if (band == WL_CHANSPEC_BAND_6G) {
+ center_channel = channel_6g_240mhz_to_id(center_channel);
+ sb_shift = WL_CHANSPEC_GE240_SB_SHIFT;
+ } else {
+ return INVCHANSPEC;
+ }
+ } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) {
+ if (band == WL_CHANSPEC_BAND_6G) {
+ center_channel = channel_6g_320mhz_to_id(center_channel);
+ sb_shift = WL_CHANSPEC_GE240_SB_SHIFT;
+ } else {
+ return INVCHANSPEC;
+ }
+ } else {
+ sb_shift = WL_CHANSPEC_CTL_SB_SHIFT;
+ }
+ chspec = center_channel | band | bw |
+ ((uint)sb << sb_shift);
+ if (!wf_chspec_valid(chspec)) {
+ chspec = INVCHANSPEC;
+ }
+ }
+
+ return chspec;
+}
+
+/**
+ * Returns the chanspec given the primary 20MHz channel number,
+ * channel width, and the band.
+ *
+ * @param primary_channel primary 20Mhz channel
+ * @param bw width of the channel (chanspec_bw_t)
+ * @param band chanspec band of channel (chanspec_band_t)
+ *
+ * @return returns a chanspec, or INVCHANSPEC in case of error
+ *
+ * This function is a similar to wf_create_chspec() but does not require the
+ * center_channel parameter. As a result, it can not create 40MHz channels on
+ * the 2G band.
+ *
+ * This function supports creating 20MHz bandwidth chanspecs on any band.
+ *
+ * For the 2GHz band, 40MHz channels overlap, so two 40MHz channels may
+ * have the same primary 20MHz channel. This function will return INVCHANSPEC
+ * whenever called with a bandwidth of 40MHz or wider for the 2GHz band.
+ *
+ * 5GHz and 6GHz bands have non-overlapping 40/80/160 MHz channels, so a
+ * 20MHz primary channel uniquely specifies a wider channel in a given band.
+ *
+ * 80+80MHz channels also cannot be uniquely defined. This function will return
+ * INVCHANSPEC whenever bandwidth of WL_CHANSPEC_BW_8080.
+ */
+chanspec_t
+wf_create_chspec_from_primary(uint primary_channel, chanspec_bw_t bw, chanspec_band_t band)
+{
+ chanspec_t chspec = INVCHANSPEC;
+
+ if (bw == WL_CHANSPEC_BW_20) {
+ chspec = wf_create_20MHz_chspec(primary_channel, band);
+ } else if (band == WL_CHANSPEC_BAND_2G || band == WL_CHANSPEC_BAND_5G) {
+ /* For 5GHz, use the lookup tables for valid 40/80/160 center channels
+ * and search for a center channel compatible with the given primary channel.
+ */
+ const uint8 *center_ch = NULL;
+ uint num_ch, i;
+
+ if (band == WL_CHANSPEC_BAND_2G && bw == WL_CHANSPEC_BW_40) {
+ center_ch = wf_2g_40m_chans;
+ num_ch = WF_NUM_2G_40M_CHANS;
+ } else
+ if (bw == WL_CHANSPEC_BW_40) {
+ center_ch = wf_5g_40m_chans;
+ num_ch = WF_NUM_5G_40M_CHANS;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ center_ch = wf_5g_80m_chans;
+ num_ch = WF_NUM_5G_80M_CHANS;
+ } else if (bw == WL_CHANSPEC_BW_160) {
+ center_ch = wf_5g_160m_chans;
+ num_ch = WF_NUM_5G_160M_CHANS;
+ } else {
+ num_ch = 0;
+ }
+
+ for (i = 0; i < num_ch; i ++) {
+ chspec = wf_create_chspec(primary_channel, center_ch[i], bw, band);
+ if (chspec != INVCHANSPEC) {
+ break;
+ }
+ }
+ }
+ else if (band == WL_CHANSPEC_BAND_6G) {
+ /* For 6GHz, use a formula to calculate the valid 40/80/160 center channel from
+ * the primary channel.
+ */
+ uint ch_per_block;
+ uint mask;
+ uint base, center;
+
+ if (bw == WL_CHANSPEC_BW_40) {
+ ch_per_block = 8;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ ch_per_block = 16;
+ } else if (bw == WL_CHANSPEC_BW_160) {
+ ch_per_block = 32;
+ } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_240)) {
+ ch_per_block = 48;
+ } else if (WFC_BW_EQ(bw, WL_CHANSPEC_BW_320)) {
+ ch_per_block = 64;
+ } else {
+ ch_per_block = 0;
+ }
+
+ if (ch_per_block) {
+ /* calculate the base of the block of channel numbers
+ * covered by the given bw
+ */
+ mask = ~(ch_per_block - 1);
+ base = 1 + ((primary_channel - 1) & mask);
+
+ /* calculate the center channel from the base channel */
+ center = base + center_chan_to_edge(bw);
+
+ chspec = wf_create_chspec(primary_channel, center, bw, band);
+ }
+ }
+
+ return chspec;
+}
+
+/**
+ * Return the primary 20MHz channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
+uint8
+wf_chspec_primary20_chan(chanspec_t chspec)
+{
+ uint center_chan = INVCHANNEL;
+ chanspec_bw_t bw;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ /* Is there a sideband ? */
+ if (CHSPEC_IS20(chspec)) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+ if ((CHSPEC_IS240(chspec)) || (CHSPEC_IS320(chspec))) {
+ sb = CHSPEC_GE240_SB(chspec) >> WL_CHANSPEC_GE240_SB_SHIFT;
+ } else {
+ sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+ }
+
+ if (CHSPEC_IS240(chspec)) {
+ /* use bw 240MHz for the primary channel lookup */
+ bw = WL_CHANSPEC_BW_240;
+
+ /* convert from channel index to channel number */
+ if (CHSPEC_IS6G(chspec)) {
+ center_chan = wf_chspec_6G_id240_to_ch(CHSPEC_GE240_CHAN(chspec));
+ }
+ } else if (CHSPEC_IS320(chspec)) {
+ /* use bw 320MHz for the primary channel lookup */
+ bw = WL_CHANSPEC_BW_320;
+
+ /* convert from channel index to channel number */
+ if (CHSPEC_IS6G(chspec)) {
+ center_chan = wf_chspec_6G_id320_to_ch(CHSPEC_GE240_CHAN(chspec));
+ }
+ /* What to return otherwise? */
+ }
+ else {
+ bw = CHSPEC_BW(chspec);
+ center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+ }
+
+ return (uint8)(channel_to_primary20_chan((uint8)center_chan, bw, sb));
+ }
+}
+
+/**
+ * Return the bandwidth string for a given chanspec
+ *
+ * This function returns the bandwidth string for the passed chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the bandwidth string:
+ * "320", "160+160", "20", "40", "80", "160", "80+80", "240"
+ */
+const char *
+BCMRAMFN(wf_chspec_to_bw_str)(chanspec_t chspec)
+{
+ return wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
+}
+
+/**
+ * Return the primary 20MHz chanspec of a given chanspec
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHz channel specified by the chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+chanspec_t
+wf_chspec_primary20_chspec(chanspec_t chspec)
+{
+ chanspec_t pri_chspec = chspec;
+ uint8 pri_chan;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ /* Is there a sideband ? */
+ if (!CHSPEC_IS20(chspec)) {
+ pri_chan = wf_chspec_primary20_chan(chspec);
+ pri_chspec = pri_chan | WL_CHANSPEC_BW_20;
+ pri_chspec |= CHSPEC_BAND(chspec);
+ }
+ return pri_chspec;
+}
+
+/* return chanspec given primary 20MHz channel and bandwidth
+ * return 0 on error
+ * does not support 6G
+ */
+uint16
+wf_channel2chspec(uint pri_ch, uint bw)
+{
+ uint16 chspec;
+ const uint8 *center_ch = NULL;
+ int num_ch = 0;
+ int sb = -1;
+ int i = 0;
+
+ chspec = ((pri_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+
+ chspec |= bw;
+
+ if (bw == WL_CHANSPEC_BW_40) {
+ if (pri_ch <= CH_MAX_2G_CHANNEL) {
+ center_ch = wf_2g_40m_chans;
+ num_ch = WF_NUM_2G_40M_CHANS;
+ } else {
+ center_ch = wf_5g_40m_chans;
+ num_ch = WF_NUM_5G_40M_CHANS;
+ }
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ center_ch = wf_5g_80m_chans;
+ num_ch = WF_NUM_5G_80M_CHANS;
+ } else if (bw == WL_CHANSPEC_BW_160) {
+ center_ch = wf_5g_160m_chans;
+ num_ch = WF_NUM_5G_160M_CHANS;
+ } else if (bw == WL_CHANSPEC_BW_20) {
+ chspec |= pri_ch;
+ return chspec;
+ } else {
+ return 0;
+ }
+
+ for (i = 0; i < num_ch; i ++) {
+ sb = channel_to_sb(center_ch[i], pri_ch, (chanspec_bw_t)bw);
+ if (sb >= 0) {
+ chspec |= center_ch[i];
+ chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT);
+ break;
+ }
+ }
+
+ /* check for no matching sb/center */
+ if (sb < 0) {
+ return 0;
+ }
+
+ return chspec;
+}
+
+/**
+ * Return the primary 40MHz chanspec or a 40MHz or wider channel
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz or wider channel.
+ * The primary 40MHz channel is the 40MHz sub-band that contains the primary 20MHz channel.
+ * The primary 20MHz channel of the returned 40MHz chanspec is the same as the primary 20MHz
+ * channel of the input chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+chanspec_t
+wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec40 = chspec;
+ uint center_chan;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ /* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */
+ if (CHSPEC_IS160(chspec)) {
+ chspec = wf_chspec_primary80_chspec(chspec);
+ }
+
+ /* determine primary 40 MHz sub-channel of an 80 MHz chanspec */
+ if (CHSPEC_IS80(chspec)) {
+ center_chan = CHSPEC_CHANNEL(chspec);
+ sb = CHSPEC_CTL_SB(chspec);
+
+ if (sb < WL_CHANSPEC_CTL_SB_UL) {
+ /* Primary 40MHz is on lower side */
+ center_chan -= CH_20MHZ_APART;
+ /* sideband bits are the same for LL/LU and L/U */
+ } else {
+ /* Primary 40MHz is on upper side */
+ center_chan += CH_20MHZ_APART;
+ /* sideband bits need to be adjusted by UL offset */
+ sb -= WL_CHANSPEC_CTL_SB_UL;
+ }
+
+ /* Create primary 40MHz chanspec */
+ chspec40 = (CHSPEC_BAND(chspec) | WL_CHANSPEC_BW_40 |
+ sb | center_chan);
+ }
+
+ return chspec40;
+}
+
+/**
+ * Return the channel number for a given frequency and base frequency.
+ *
+ * @param freq frequency in MHz of the channel center
+ * @param start_factor starting base frequency in 500 KHz units
+ *
+ * @return Returns a channel number > 0, or -1 on error
+ *
+ * The returned channel number is relative to the given base frequency.
+ *
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G, and WF_CHAN_FACTOR_6_G are
+ * defined for 2.4 GHz, 5 GHz, and 6 GHz bands.
+ *
+ * If the given base frequency is zero these base frequencies are assumed:
+ *
+ * freq (GHz) -> assumed base freq (GHz)
+ * 2G band 2.4 - 2.5 2.407
+ * 5G band 5.0 - 5.940 5.000
+ * 6G band 5.940 - 7.205 5.940
+ *
+ * It is an error if the start_factor is zero and the freq is not in one of
+ * these ranges.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band,
+ * [1, 253] for 6 GHz band, or [1, 200] otherwise.
+ *
+ * It is an error if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel. For any other start factor the frequency
+ * must be an even 5 MHz multiple greater than the base frequency.
+ *
+ * For a start_factor WF_CHAN_FACTOR_6_G, the frequency may be up to 7.205 MHz
+ * (channel 253). For any other start_factor, the frequence can be up to
+ * 1 GHz from the base freqency (channel 200).
+ *
+ * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3
+ */
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+ /* take the default channel start frequency */
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500) {
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ } else if (freq >= 5000 && freq < 5935) {
+ start_factor = WF_CHAN_FACTOR_5_G;
+ } else if (freq >= 5935 && freq <= 7205) {
+ start_factor = WF_CHAN_FACTOR_6_G;
+ }
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) {
+ return 14;
+ } else if (freq == 5935 && start_factor == WF_CHAN_FACTOR_6_G) {
+ /* channel #2 is an oddball, 10MHz below chan #1 */
+ return 2;
+ } else if (freq == 5960 && start_factor == WF_CHAN_FACTOR_6_G) {
+ /* do not return ch #2 for the convetional location that #2 would appear */
+ return -1;
+ }
+
+ base = start_factor / 2;
+
+ if (freq < base) {
+ return -1;
+ }
+
+ offset = freq - base;
+ ch = offset / 5;
+
+ /* check that frequency is a 5MHz multiple from the base */
+ if (offset != (ch * 5))
+ return -1;
+
+ /* channel range checks */
+ if (start_factor == WF_CHAN_FACTOR_2_4_G) {
+ /* 2G should only be up to 13 here as 14 is
+ * handled above as it is a non-5MHz offset
+ */
+ if (ch > 13) {
+ ch = -1;
+ }
+ }
+ else if (start_factor == WF_CHAN_FACTOR_6_G) {
+ /* 6G has a higher channel range than 5G channelization specifies [1,200] */
+ if ((uint)ch > CH_MAX_6G_CHANNEL) {
+ ch = -1;
+ }
+ } else if (ch > 200) {
+ ch = -1;
+ }
+
+ return ch;
+}
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band, [1,253] in the 6 GHz
+ * band, and [1, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G, and WF_CHAN_FACTOR_6_G are
+ * defined for 2.4 GHz, 5 GHz, and 6 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3
+ *
+ * @param channel input channel number
+ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see WF_CHAN_FACTOR_2_4_G
+ * @see WF_CHAN_FACTOR_5_G
+ * @see WF_CHAN_FACTOR_6_G
+ */
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+ int freq;
+
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+ (start_factor == WF_CHAN_FACTOR_6_G && (ch < 1 || ch > 253)) ||
+ (start_factor != WF_CHAN_FACTOR_6_G && (ch < 1 || ch > 200))) {
+ freq = -1;
+ } else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) {
+ freq = 2484;
+ } else if ((start_factor == WF_CHAN_FACTOR_6_G) && (ch == 2)) {
+ freq = 5935;
+ } else {
+ freq = ch * 5 + start_factor / 2;
+ }
+
+ return freq;
+}
+
+static const uint16 sidebands[] = {
+ WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU,
+ WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU,
+ WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU,
+ WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU
+};
+
+/*
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ * primary_channel - primary 20Mhz channel
+ * center_channel - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ *
+ * does not support 6G
+ */
+chanspec_t
+wf_chspec_80(uint8 center_channel, uint8 primary_channel)
+{
+
+ chanspec_t chanspec = INVCHANSPEC;
+ chanspec_t chanspec_cur;
+ uint i;
+
+ for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) {
+ chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]);
+ if (primary_channel == wf_chspec_primary20_chan(chanspec_cur)) {
+ chanspec = chanspec_cur;
+ break;
+ }
+ }
+ /* If the loop ended early, we are good, otherwise we did not
+ * find a 80MHz chanspec with the given center_channel that had a primary channel
+ *matching the given primary_channel.
+ */
+ return chanspec;
+}
+
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ * primary_20mhz - Primary 20 MHz channel
+ * chan0 - center channel number of one frequency segment
+ * chan1 - center channel number of the other frequency segment
+ *
+ * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to 802.11-2016 section 22.3.14 "Channelization".
+ *
+ * does not support 6G
+ */
+chanspec_t
+wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1)
+{
+ int sb = 0;
+ uint16 chanspec = 0;
+ int chan0_id = 0, chan1_id = 0;
+ int seg0, seg1;
+
+ chan0_id = channel_80mhz_to_id(chan0);
+ chan1_id = channel_80mhz_to_id(chan1);
+
+ /* make sure the channel numbers were valid */
+ if (chan0_id == -1 || chan1_id == -1)
+ return INVCHANSPEC;
+
+ /* does the primary channel fit with the 1st 80MHz channel ? */
+ sb = channel_to_sb(chan0, primary_20mhz, WL_CHANSPEC_BW_80);
+ if (sb >= 0) {
+ /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+ seg0 = chan0_id;
+ seg1 = chan1_id;
+ } else {
+ /* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+ sb = channel_to_sb(chan1, primary_20mhz, WL_CHANSPEC_BW_80);
+ if (sb < 0) {
+ /* no match for pri_ch to either 80MHz center channel */
+ return INVCHANSPEC;
+ }
+ /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+ seg0 = chan1_id;
+ seg1 = chan0_id;
+ }
+
+ chanspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) |
+ (seg1 << WL_CHANSPEC_CHAN1_SHIFT) |
+ (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+ WL_CHANSPEC_BW_8080 |
+ WL_CHANSPEC_BAND_5G);
+
+ return chanspec;
+}
+
+/*
+ * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec
+ */
+uint8
+wf_chspec_primary80_channel(chanspec_t chanspec)
+{
+ chanspec_t primary80_chspec;
+ uint8 primary80_chan;
+
+ primary80_chspec = wf_chspec_primary80_chspec(chanspec);
+
+ if (primary80_chspec == INVCHANSPEC) {
+ primary80_chan = INVCHANNEL;
+ } else {
+ primary80_chan = CHSPEC_CHANNEL(primary80_chspec);
+ }
+
+ return primary80_chan;
+}
+
+/*
+ * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec
+ */
+uint8
+wf_chspec_secondary80_channel(chanspec_t chanspec)
+{
+ chanspec_t secondary80_chspec;
+ uint8 secondary80_chan;
+
+ secondary80_chspec = wf_chspec_secondary80_chspec(chanspec);
+
+ if (secondary80_chspec == INVCHANSPEC) {
+ secondary80_chan = INVCHANNEL;
+ } else {
+ secondary80_chan = CHSPEC_CHANNEL(secondary80_chspec);
+ }
+
+ return secondary80_chan;
+}
+
+/*
+ * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel
+ */
+chanspec_t
+wf_chspec_primary80_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec80;
+ uint center_chan;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ if (CHSPEC_IS80(chspec)) {
+ chspec80 = chspec;
+ } else if (CHSPEC_IS160(chspec)) {
+ center_chan = CHSPEC_CHANNEL(chspec);
+ sb = CHSPEC_CTL_SB(chspec);
+
+ if (sb < WL_CHANSPEC_CTL_SB_ULL) {
+ /* Primary 80MHz is on lower side */
+ center_chan -= CH_40MHZ_APART;
+ }
+ else {
+ /* Primary 80MHz is on upper side */
+ center_chan += CH_40MHZ_APART;
+ sb -= WL_CHANSPEC_CTL_SB_ULL;
+ }
+
+ /* Create primary 80MHz chanspec */
+ chspec80 = (CHSPEC_BAND(chspec) | WL_CHANSPEC_BW_80 | sb | center_chan);
+ }
+ else {
+ chspec80 = INVCHANSPEC;
+ }
+
+ return chspec80;
+}
+
+/*
+ * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel
+ */
+chanspec_t
+wf_chspec_secondary80_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec80;
+ uint center_chan;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ if (CHSPEC_IS160(chspec)) {
+ center_chan = CHSPEC_CHANNEL(chspec);
+
+ if (CHSPEC_CTL_SB(chspec) < WL_CHANSPEC_CTL_SB_ULL) {
+ /* Primary 80MHz is on lower side, so the secondary is on
+ * the upper side
+ */
+ center_chan += CH_40MHZ_APART;
+ } else {
+ /* Primary 80MHz is on upper side, so the secondary is on
+ * the lower side
+ */
+ center_chan -= CH_40MHZ_APART;
+ }
+
+ /* Create secondary 80MHz chanspec */
+ chspec80 = (CHSPEC_BAND(chspec) |
+ WL_CHANSPEC_BW_80 |
+ WL_CHANSPEC_CTL_SB_LL |
+ center_chan);
+ }
+ else {
+ chspec80 = INVCHANSPEC;
+ }
+
+ return chspec80;
+}
+
+/*
+ * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels
+ *
+ * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1
+ */
+void
+wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch)
+{
+
+ if (CHSPEC_IS160(chspec)) {
+ uint8 center_chan = CHSPEC_CHANNEL(chspec);
+ ch[0] = center_chan - CH_40MHZ_APART;
+ ch[1] = center_chan + CH_40MHZ_APART;
+ }
+ else {
+ /* for 20, 40, and 80 Mhz */
+ ch[0] = CHSPEC_CHANNEL(chspec);
+ ch[1] = -1;
+ }
+ return;
+
+}
+
+/*
+ * Returns the center channel of the primary 160MHz sub-band of the provided chanspec
+ */
+uint8
+wf_chspec_primary160_channel(chanspec_t chanspec)
+{
+ chanspec_t primary160_chspec;
+ uint8 primary160_chan;
+
+ primary160_chspec = wf_chspec_primary160_chspec(chanspec);
+
+ if (primary160_chspec == INVCHANSPEC) {
+ primary160_chan = INVCHANNEL;
+ } else {
+ primary160_chan = CHSPEC_CHANNEL(primary160_chspec);
+ }
+
+ return primary160_chan;
+}
+
+/*
+ * Returns the chanspec for the primary 160MHz sub-band of an 240/320MHz or 160+160 channel
+ */
+chanspec_t
+wf_chspec_primary160_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec160;
+ uint center_chan;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ if (CHSPEC_IS160(chspec)) {
+ chspec160 = chspec;
+ }
+ else if (CHSPEC_IS240(chspec)) {
+ uint8 ch_id = CHSPEC_GE240_CHAN(chspec);
+ center_chan = wf_chspec_240_id2cch(chspec);
+ sb = CHSPEC_GE240_SB(chspec) >> WL_CHANSPEC_GE240_SB_SHIFT;
+ /*
+ * Identify the chanspec is of the form 160+80 or 80+160 from the channel ID.
+ * Channel ID : even for 160+80 and odd for 80+160
+ */
+ if ((!(ch_id & 0x1u)) && (sb < 8u)) {
+ /* Primary 160MHz is on lower side */
+ center_chan -= CH_40MHZ_APART;
+ } else if ((ch_id & 0x1u) && (sb >= 4u)) {
+ /* Primary 160MHz is on upper side */
+ center_chan += CH_40MHZ_APART;
+ sb -= 4u;
+ } else {
+ chspec160 = INVCHANSPEC;
+ goto done;
+ }
+
+ /* Create primary 160MHz chanspec */
+ chspec160 = (CHSPEC_BAND(chspec) |
+ WL_CHANSPEC_BW_160 |
+ (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+ center_chan);
+ } else if (CHSPEC_IS320(chspec)) {
+ center_chan = wf_chspec_320_id2cch(chspec);
+ sb = CHSPEC_GE240_SB(chspec) >> WL_CHANSPEC_GE240_SB_SHIFT;
+
+ if (sb < 8u) {
+ /* Primary 160MHz is on lower side */
+ center_chan -= CH_80MHZ_APART;
+ }
+ else {
+ /* Primary 160MHz is on upper side */
+ center_chan += CH_80MHZ_APART;
+ sb -= 8u;
+ }
+
+ /* Create primary 160MHz chanspec */
+ chspec160 = (CHSPEC_BAND(chspec) |
+ WL_CHANSPEC_BW_160 |
+ (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+ center_chan);
+ }
+ else {
+ chspec160 = INVCHANSPEC;
+ }
+done:
+ return chspec160;
+}
+
+/* Populates array with all 20MHz side bands of a given chanspec_t in the following order:
+ * primary20, secondary20, two secondary40s, four secondary80s.
+ * 'chspec' is the chanspec of interest
+ * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec
+ *
+ * Works with 20, 40, 80, and 160MHz chspec
+ */
+void
+wf_get_all_ext(chanspec_t chspec, uint8 *pext)
+{
+ chanspec_t t = (CHSPEC_IS160(chspec)) ? /* if bw > 80MHz */
+ wf_chspec_primary80_chspec(chspec) : (chspec); /* extract primary 80 */
+ /* primary20 channel as first element */
+ uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t);
+
+ if (CHSPEC_IS20(chspec)) {
+ return; /* nothing more to do since 20MHz chspec */
+ }
+ /* 20MHz EXT */
+ (pext)[1] = (IS_CTL_IN_L20(t) ? pri_ch + CH_20MHZ_APART : pri_ch - CH_20MHZ_APART);
+
+ if (CHSPEC_IS40(chspec)) {
+ return; /* nothing more to do since 40MHz chspec */
+ }
+ /* center 40MHz EXT */
+ t = wf_channel2chspec((IS_CTL_IN_L40(chspec) ?
+ pri_ch + CH_40MHZ_APART : pri_ch - CH_40MHZ_APART), WL_CHANSPEC_BW_40);
+ GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */
+
+ if (CHSPEC_IS80(chspec)) {
+ return; /* nothing more to do since 80MHz chspec */
+ }
+ t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL);
+ /* get the 20MHz side bands in 80MHz EXT (secondary) */
+ GET_ALL_SB(t, &((pext)[4]));
+}
+
+/*
+ * Given two chanspecs, returns true if they overlap.
+ * (Overlap: At least one 20MHz subband is common between the two chanspecs provided)
+ */
+bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1)
+{
+ uint8 ch0, ch1;
+
+ if (CHSPEC_BAND(chspec0) != CHSPEC_BAND(chspec1)) {
+ return FALSE;
+ }
+
+ FOREACH_20_SB(chspec0, ch0) {
+ FOREACH_20_SB(chspec1, ch1) {
+ if ((uint)ABS(ch0 - ch1) < CH_20MHZ_APART) {
+ return TRUE;
+ }
+ }
+ }
+
+ return FALSE;
+}
+
+uint8
+channel_bw_to_width(chanspec_t chspec)
+{
+ uint8 channel_width;
+
+ if (CHSPEC_IS80(chspec))
+ channel_width = VHT_OP_CHAN_WIDTH_80;
+ else if (CHSPEC_IS160(chspec))
+ channel_width = VHT_OP_CHAN_WIDTH_160;
+ else
+ channel_width = VHT_OP_CHAN_WIDTH_20_40;
+
+ return channel_width;
+}
+
+uint wf_chspec_first_20_sb(chanspec_t chspec)
+{
+#if defined(WL_BW160MHZ)
+ if (CHSPEC_IS160(chspec)) {
+ return LLL_20_SB_160(CHSPEC_CHANNEL(chspec));
+ } else
+#endif
+ if (CHSPEC_IS80(chspec)) {
+ return LL_20_SB(CHSPEC_CHANNEL(chspec));
+ } else if (CHSPEC_IS40(chspec)) {
+ return LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ return CHSPEC_CHANNEL(chspec);
+ }
+}
+
+chanspec_t
+wf_create_chspec_sb(uint sb, uint center_channel, chanspec_bw_t bw, chanspec_band_t band)
+{
+ chanspec_t chspec;
+ if (sb > (WL_CHANSPEC_CTL_SB_MASK >> WL_CHANSPEC_CTL_SB_SHIFT)) {
+ return INVCHANSPEC;
+ }
+ chspec = center_channel | band | bw | ((uint)sb << WL_CHANSPEC_CTL_SB_SHIFT);
+ return wf_chspec_valid(chspec) ? chspec : INVCHANSPEC;
+}
+
+chanspec_t
+wf_create_160160MHz_chspec_sb(uint sb, uint chan0, uint chan1, chanspec_band_t band)
+{
+ int chan0_id, chan1_id, seg0, seg1;
+ chanspec_t chspec;
+
+ if (sb > (WL_CHANSPEC_CTL_SB_UUU >> WL_CHANSPEC_CTL_SB_SHIFT)) {
+ return INVCHANSPEC;
+ }
+ /* From here on sb is not an index, but value for SB field */
+ sb <<= WL_CHANSPEC_CTL_SB_SHIFT;
+
+ /* frequency segments need to be non-contiguous, so the channel
+ * separation needs to be greater than 160MHz
+ */
+ if ((uint)ABS((int)(chan0 - chan1)) <= CH_160MHZ_APART) {
+ return INVCHANSPEC;
+ }
+
+ if (band == WL_CHANSPEC_BAND_5G) {
+ chan0_id = channel_5g_160mhz_to_id(chan0);
+ chan1_id = channel_5g_160mhz_to_id(chan1);
+ } else if (band == WL_CHANSPEC_BAND_6G) {
+ chan0_id = channel_6g_160mhz_to_id(chan0);
+ chan1_id = channel_6g_160mhz_to_id(chan1);
+ } else {
+ return INVCHANSPEC;
+ }
+
+ /* make sure the channel numbers were valid */
+ if ((chan0_id == -1) || (chan1_id == -1)) {
+ return INVCHANSPEC;
+ }
+ /* Optionally swapping channel IDs to make sure that control subchannel
+ * is in chan0
+ */
+ if (sb < WL_CHANSPEC_CTL_SB_ULL) {
+ seg0 = chan0_id;
+ seg1 = chan1_id;
+ } else {
+ seg0 = chan1_id;
+ seg1 = chan0_id;
+ sb -= WL_CHANSPEC_CTL_SB_ULL;
+ }
+ chspec = ((seg0 << WL_CHANSPEC_CHAN0_SHIFT) |
+ (seg1 << WL_CHANSPEC_CHAN1_SHIFT) |
+ sb | WL_CHANSPEC_BW_160160 | band);
+ return wf_chspec_valid(chspec) ? chspec : INVCHANSPEC;
+}
diff --git a/bcmdhd.101.10.361.x/bcmwifi_monitor.c b/bcmdhd.101.10.361.x/bcmwifi_monitor.c
new file mode 100755
index 0000000..1b606eb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmwifi_monitor.c
@@ -0,0 +1,1071 @@
+/*
+ * Monitor Mode routines.
+ * This header file housing the Monitor Mode routines implementation.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <hndd11.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_radiotap.h>
+#include <bcmwifi_monitor.h>
+#include <bcmwifi_rates.h>
+#include <monitor.h>
+#include <d11_cfg.h>
+
+struct monitor_info {
+ ratespec_t ampdu_rspec; /* spec value for AMPDU sniffing */
+ uint16 ampdu_counter;
+ uint16 amsdu_len;
+ uint8* amsdu_pkt;
+ int8 headroom;
+ d11_info_t *d11_info;
+ uint8 ampdu_plcp[D11_PHY_HDR_LEN];
+};
+
+struct he_ltf_gi_info {
+ uint8 gi;
+ uint8 ltf_size;
+ uint8 num_ltf;
+};
+
+struct he_mu_ltf_mp_info {
+ uint8 num_ltf;
+ uint8 mid_per;
+};
+
+/*
+ * su ppdu - mapping of ltf and gi values from plcp to rtap data format
+ * https://www.radiotap.org/fields/HE.html
+ */
+static const struct he_ltf_gi_info he_plcp2ltf_gi[4] = {
+ {3, 0, 7}, /* reserved, reserved, reserved */
+ {0, 2, 1}, /* 0.8us, 2x, 2x */
+ {1, 2, 1}, /* 1.6us, 2x, 2x */
+ {2, 3, 2} /* 3.2us, 4x, 4x */
+};
+
+/*
+ * mu ppdu - mapping of ru type value from phy rxstatus to rtap data format
+ * https://www.radiotap.org/fields/HE.html
+ */
+static const uint8 he_mu_phyrxs2ru_type[7] = {
+ 4, /* 26-tone RU */
+ 5, /* 52-tone RU */
+ 6, /* 106-tone RU */
+ 7, /* 242-tone RU */
+ 8, /* 484-tone RU */
+ 9, /* 996-tone RU */
+ 10 /* 2x996-tone RU */
+};
+
+/*
+ * mu ppdu - doppler:1, mapping of ltf and midamble periodicity values from plcp to rtap data format
+ * https://www.radiotap.org/fields/HE.html
+ */
+static const struct he_mu_ltf_mp_info he_mu_plcp2ltf_mp[8] = {
+ {0, 0}, /* 1x, 10 */
+ {1, 0}, /* 2x, 10 */
+ {2, 0}, /* 4x, 10 */
+ {7, 0}, /* reserved, reserved */
+ {0, 1}, /* 1x, 20 */
+ {1, 1}, /* 2x, 20 */
+ {2, 1}, /* 4x, 20 */
+ {7, 0} /* reserved, reserved */
+};
+
+/*
+ * mu ppdu - doppler:0, mapping of ltf value from plcp to rtap data format
+ * https://www.radiotap.org/fields/HE.html
+ */
+static const uint8 he_mu_plcp2ltf[8] = {
+ 0, /* 1x */
+ 1, /* 2x */
+ 2, /* 4x */
+ 3, /* 6x */
+ 4, /* 8x */
+ 7, /* reserved */
+ 7, /* reserved */
+ 7 /* reserved */
+};
+
+/** Calculate the rate of a received frame and return it as a ratespec (monitor mode) */
+static ratespec_t
+BCMFASTPATH(wlc_recv_mon_compute_rspec)(monitor_info_t* info, wlc_d11rxhdr_t *wrxh, uint8 *plcp)
+{
+ d11rxhdr_t *rxh = &wrxh->rxhdr;
+ ratespec_t rspec = 0;
+ uint16 phy_ft;
+ uint corerev = info->d11_info->major_revid;
+ uint corerev_minor = info->d11_info->minor_revid;
+ BCM_REFERENCE(corerev_minor);
+
+ phy_ft = D11PPDU_FT(rxh, corerev);
+ switch (phy_ft) {
+ case FT_CCK:
+ rspec = CCK_RSPEC(CCK_PHY2MAC_RATE(((cck_phy_hdr_t *)plcp)->signal));
+ rspec |= WL_RSPEC_BW_20MHZ;
+ break;
+ case FT_OFDM:
+ rspec = OFDM_RSPEC(OFDM_PHY2MAC_RATE(((ofdm_phy_hdr_t *)plcp)->rlpt[0]));
+ rspec |= WL_RSPEC_BW_20MHZ;
+ break;
+ case FT_HT: {
+ uint ht_sig1, ht_sig2;
+ uint8 stbc;
+
+ ht_sig1 = plcp[0]; /* only interested in low 8 bits */
+ ht_sig2 = plcp[3] | (plcp[4] << 8); /* only interested in low 10 bits */
+
+ rspec = HT_RSPEC((ht_sig1 & HT_SIG1_MCS_MASK));
+ if (ht_sig1 & HT_SIG1_CBW) {
+ /* indicate rspec is for 40 MHz mode */
+ rspec |= WL_RSPEC_BW_40MHZ;
+ } else {
+ /* indicate rspec is for 20 MHz mode */
+ rspec |= WL_RSPEC_BW_20MHZ;
+ }
+ if (ht_sig2 & HT_SIG2_SHORT_GI)
+ rspec |= WL_RSPEC_SGI;
+ if (ht_sig2 & HT_SIG2_FEC_CODING)
+ rspec |= WL_RSPEC_LDPC;
+ stbc = ((ht_sig2 & HT_SIG2_STBC_MASK) >> HT_SIG2_STBC_SHIFT);
+ if (stbc != 0) {
+ rspec |= WL_RSPEC_STBC;
+ }
+ break;
+ }
+ case FT_VHT:
+ rspec = wf_vht_plcp_to_rspec(plcp);
+ break;
+#ifdef WL11AX
+ case FT_HE:
+ rspec = wf_he_plcp_to_rspec(plcp);
+ break;
+#endif /* WL11AX */
+#ifdef WL11BE
+ case FT_EHT:
+ rspec = wf_eht_plcp_to_rspec(plcp);
+ break;
+#endif
+ default:
+ /* return a valid rspec if not a debug/assert build */
+ rspec = OFDM_RSPEC(6) | WL_RSPEC_BW_20MHZ;
+ break;
+ }
+
+ return rspec;
+} /* wlc_recv_compute_rspec */
+
+static void
+wlc_he_su_fill_rtap_data(struct wl_rxsts *sts, uint8 *plcp)
+{
+ ASSERT(plcp);
+
+ /* he ppdu format */
+ sts->data1 |= WL_RXS_HEF_SIGA_PPDU_SU;
+
+ /* bss color */
+ sts->data1 |= WL_RXS_HEF_SIGA_BSS_COLOR;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, BSS_COLOR);
+
+ /* beam change */
+ sts->data1 |= WL_RXS_HEF_SIGA_BEAM_CHANGE;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, BEAM_CHANGE);
+
+ /* ul/dl */
+ sts->data1 |= WL_RXS_HEF_SIGA_DL_UL;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DL_UL);
+
+ /* data mcs */
+ sts->data1 |= WL_RXS_HEF_SIGA_MCS;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, MCS);
+
+ /* data dcm */
+ sts->data1 |= WL_RXS_HEF_SIGA_DCM;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DCM);
+
+ /* coding */
+ sts->data1 |= WL_RXS_HEF_SIGA_CODING;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, CODING);
+
+ /* ldpc extra symbol segment */
+ sts->data1 |= WL_RXS_HEF_SIGA_LDPC;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, LDPC);
+
+ /* stbc */
+ sts->data1 |= WL_RXS_HEF_SIGA_STBC;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, STBC);
+
+ /* spatial reuse */
+ sts->data1 |= WL_RXS_HEF_SIGA_SPATIAL_REUSE;
+ sts->data4 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, SR);
+
+ /* data bw */
+ sts->data1 |= WL_RXS_HEF_SIGA_BW;
+ sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, BW);
+
+ /* gi */
+ sts->data2 |= WL_RXS_HEF_SIGA_GI;
+ sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, SU, GI, gi);
+
+ /* ltf symbol size */
+ sts->data2 |= WL_RXS_HEF_SIGA_LTF_SIZE;
+ sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, SU, LTF_SIZE, ltf_size);
+
+ /* number of ltf symbols */
+ sts->data2 |= WL_RXS_HEF_SIGA_NUM_LTF;
+ sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, SU, NUM_LTF, num_ltf);
+
+ /* pre-fec padding factor */
+ sts->data2 |= WL_RXS_HEF_SIGA_PADDING;
+ sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, PADDING);
+
+ /* txbf */
+ sts->data2 |= WL_RXS_HEF_SIGA_TXBF;
+ sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, TXBF);
+
+ /* pe disambiguity */
+ sts->data2 |= WL_RXS_HEF_SIGA_PE;
+ sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, PE);
+
+ /*
+ * if doppler (bit:41) is set in plcp to 1 then,
+ * - bit:25 indicates 'midamble periodicity'
+ * - bit:23-24 indicate 'nsts'
+ *
+ * if doppler (bit:41) is set to 0 then,
+ * - bit:23-25 indicate 'nsts'
+ */
+ if (HE_EXTRACT_FROM_PLCP(plcp, SU, DOPPLER)) {
+ /* doppler */
+ sts->data1 |= WL_RXS_HEF_SIGA_DOPPLER;
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DOPPLER);
+
+ /* midamble periodicity */
+ sts->data2 |= WL_RXS_HEF_SIGA_MIDAMBLE;
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, MIDAMBLE);
+
+ /* nsts */
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DOPPLER_SET_NSTS);
+ } else {
+ /* nsts */
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, DOPPLER_NOTSET_NSTS);
+ }
+
+ /* txop */
+ sts->data2 |= WL_RXS_HEF_SIGA_TXOP;
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, SU, TXOP);
+}
+
+static void
+wlc_he_dl_ofdma_fill_rtap_data(struct wl_rxsts *sts, d11rxhdr_t *rxh,
+ uint8 *plcp, uint32 corerev, uint32 corerev_minor)
+{
+ uint8 doppler, midamble, val;
+ ASSERT(rxh);
+ ASSERT(plcp);
+
+ /* he ppdu format */
+ sts->data1 |= WL_RXS_HEF_SIGA_PPDU_MU;
+
+ /* bss color */
+ sts->data1 |= WL_RXS_HEF_SIGA_BSS_COLOR;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, BSS_COLOR);
+
+ /* beam change (doesn't apply to mu ppdu) */
+ sts->data1 &= ~WL_RXS_HEF_SIGA_BEAM_CHANGE;
+
+ /* ul/dl */
+ sts->data1 |= WL_RXS_HEF_SIGA_DL_UL;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, DL_UL);
+
+ /* data mcs */
+ sts->data1 |= WL_RXS_HEF_SIGA_MCS;
+ sts->data3 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, MCS);
+
+ /* data dcm */
+ sts->data1 |= WL_RXS_HEF_SIGA_DCM;
+ sts->data3 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, DCM);
+
+ /* coding */
+ sts->data1 |= WL_RXS_HEF_SIGA_CODING;
+ sts->data3 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, CODING);
+
+ /* ldpc extra symbol segment */
+ sts->data1 |= WL_RXS_HEF_SIGA_LDPC;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, LDPC);
+
+ /* stbc */
+ sts->data1 |= WL_RXS_HEF_SIGA_STBC;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, STBC);
+
+ /* spatial reuse */
+ sts->data1 |= WL_RXS_HEF_SIGA_SPATIAL_REUSE;
+ sts->data4 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SR);
+
+ /* sta-id */
+ sts->data1 |= WL_RXS_HEF_SIGA_STA_ID;
+ sts->data4 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, STAID);
+
+ /* ru allocation */
+ val = he_mu_phyrxs2ru_type[D11PPDU_RU_TYPE(rxh, corerev, corerev_minor)];
+ sts->data1 |= WL_RXS_HEF_SIGA_RU_ALLOC;
+ sts->data5 |= HE_PACK_RTAP_FROM_VAL(val, RU_ALLOC);
+
+ /* doppler */
+ sts->data1 |= WL_RXS_HEF_SIGA_DOPPLER;
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, DOPPLER);
+
+ doppler = HE_EXTRACT_FROM_PLCP(plcp, MU, DOPPLER);
+ midamble = HE_EXTRACT_FROM_PLCP(plcp, MU, MIDAMBLE);
+ if (doppler) {
+ /* number of ltf symbols */
+ val = he_mu_plcp2ltf_mp[midamble].num_ltf;
+ sts->data2 |= WL_RXS_HEF_SIGA_NUM_LTF;
+ sts->data5 |= HE_PACK_RTAP_FROM_VAL(val, NUM_LTF);
+
+ /* midamble periodicity */
+ val = he_mu_plcp2ltf_mp[midamble].mid_per;
+ sts->data2 |= WL_RXS_HEF_SIGA_MIDAMBLE;
+ sts->data6 |= HE_PACK_RTAP_FROM_VAL(val, MIDAMBLE);
+ } else {
+ /* number of ltf symbols */
+ val = he_mu_plcp2ltf[midamble];
+ sts->data2 |= WL_RXS_HEF_SIGA_NUM_LTF;
+ sts->data5 |= HE_PACK_RTAP_FROM_VAL(val, NUM_LTF);
+ }
+
+ /* nsts */
+ sts->data6 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, NSTS);
+
+ /* gi */
+ sts->data2 |= WL_RXS_HEF_SIGA_GI;
+ sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, MU, GI, gi);
+
+ /* ltf symbol size */
+ sts->data2 |= WL_RXS_HEF_SIGA_LTF_SIZE;
+ sts->data5 |= HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, MU, LTF_SIZE, ltf_size);
+
+ /* pre-fec padding factor */
+ sts->data2 |= WL_RXS_HEF_SIGA_PADDING;
+ sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, PADDING);
+
+ /* txbf */
+ sts->data2 |= WL_RXS_HEF_SIGA_TXBF;
+ sts->data5 |= HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, TXBF);
+
+ /* pe disambiguity */
+ sts->data2 |= WL_RXS_HEF_SIGA_PE;
+ sts->data5 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, PE);
+
+ /* txop */
+ sts->data2 |= WL_RXS_HEF_SIGA_TXOP;
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, TXOP);
+}
+
+static void
+wlc_he_dl_ofdma_fill_rtap_flag(struct wl_rxsts *sts, uint8 *plcp, uint32 corerev)
+{
+ ASSERT(plcp);
+
+ /* sig-b mcs */
+ sts->flag1 |= WL_RXS_HEF_SIGB_MCS_KNOWN;
+ sts->flag1 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_MCS);
+
+ /* sig-b dcm */
+ sts->flag1 |= WL_RXS_HEF_SIGB_DCM_KNOWN;
+ sts->flag1 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_DCM);
+
+ /* sig-b compression */
+ sts->flag1 |= WL_RXS_HEF_SIGB_COMP_KNOWN;
+ sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_COMP);
+
+ /* # of he-sig-b symbols/mu-mimo users */
+ sts->flag1 |= WL_RXS_HEF_NUM_SIGB_SYMB_KNOWN;
+ sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, SIGB_SYM_MU_MIMO_USER);
+
+ /* bandwidth from bandwidth field in he-sig-a */
+ sts->flag2 |= WL_RXS_HEF_BW_SIGA_KNOWN;
+ sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, BW_SIGA);
+
+ /* preamble puncturing from bandwidth field in he-sig-a */
+ sts->flag2 |= WL_RXS_HEF_PREPUNCR_SIGA_KNOWN;
+ sts->flag2 |= HE_PACK_RTAP_FROM_PLCP(plcp, MU, PRE_PUNCR_SIGA);
+}
+
+static void
+wlc_he_ul_ofdma_fill_rtap_data(struct wl_rxsts *sts, d11rxhdr_t *rxh, uint8 *plcp,
+ uint32 corerev)
+{
+ ASSERT(rxh);
+ ASSERT(plcp);
+
+ BCM_REFERENCE(rxh);
+
+ /* he ppdu format */
+ sts->data1 |= WL_RXS_HEF_SIGA_PPDU_TRIG;
+
+ /* bss color */
+ sts->data1 |= WL_RXS_HEF_SIGA_BSS_COLOR;
+ sts->data3 |= HE_PACK_RTAP_FROM_PLCP(plcp, TRIG, BSS_COLOR);
+
+ /* beam change (doesn't apply to mu ppdu) */
+ sts->data1 &= ~WL_RXS_HEF_SIGA_BEAM_CHANGE;
+
+ /* ul/dl */
+ sts->data1 |= WL_RXS_HEF_SIGA_DL_UL;
+ sts->data3 |= HE_PACK_RTAP_FROM_VAL(1, DL_UL);
+
+ /* txop */
+ sts->data2 |= WL_RXS_HEF_SIGA_TXOP;
+ sts->data6 |= HE_PACK_RTAP_FROM_PLCP(plcp, TRIG, TXOP);
+}
+
+/* recover 32bit TSF value from the 16bit TSF value */
+/* assumption is time in rxh is within 65ms of the current tsf */
+/* local TSF inserted in the rxh is at RxStart which is before 802.11 header */
+static uint32
+wlc_recover_tsf32(uint16 rxh_tsf, uint32 ts_tsf)
+{
+ uint16 rfdly;
+
+ /* adjust rx dly added in RxTSFTime */
+ /* comment in d11.h:
+ * BWL_PRE_PACKED_STRUCT struct d11rxhdr {
+ * ...
+ * uint16 RxTSFTime; RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY
+ * ...
+ * }
+ */
+
+ /* TODO: add PHY type specific value here... */
+ rfdly = M_BPHY_PLCPRX_DLY;
+
+ rxh_tsf -= rfdly;
+
+ return (((ts_tsf - rxh_tsf) & 0xFFFF0000) | rxh_tsf);
+}
+
+static uint8
+wlc_vht_get_gid(uint8 *plcp)
+{
+ uint32 plcp0 = plcp[0] | (plcp[1] << 8);
+ return (plcp0 & VHT_SIGA1_GID_MASK) >> VHT_SIGA1_GID_SHIFT;
+}
+
+static uint16
+wlc_vht_get_aid(uint8 *plcp)
+{
+ uint32 plcp0 = plcp[0] | (plcp[1] << 8) | (plcp[2] << 16);
+ return (plcp0 & VHT_SIGA1_PARTIAL_AID_MASK) >> VHT_SIGA1_PARTIAL_AID_SHIFT;
+}
+
+static bool
+wlc_vht_get_txop_ps_not_allowed(uint8 *plcp)
+{
+ return !!(plcp[2] & (VHT_SIGA1_TXOP_PS_NOT_ALLOWED >> 16));
+}
+
+static bool
+wlc_vht_get_sgi_nsym_da(uint8 *plcp)
+{
+ return !!(plcp[3] & VHT_SIGA2_GI_W_MOD10);
+}
+
+static bool
+wlc_vht_get_ldpc_extra_symbol(uint8 *plcp)
+{
+ return !!(plcp[3] & VHT_SIGA2_LDPC_EXTRA_OFDM_SYM);
+}
+
+static bool
+wlc_vht_get_beamformed(uint8 *plcp)
+{
+ return !!(plcp[4] & (VHT_SIGA2_BEAMFORM_ENABLE >> 8));
+}
+/* Convert htflags and mcs values to
+* rate in units of 500kbps
+*/
+static uint16
+wlc_ht_phy_get_rate(uint8 htflags, uint8 mcs)
+{
+
+ ratespec_t rspec = HT_RSPEC(mcs);
+
+ if (htflags & WL_RXS_HTF_40)
+ rspec |= WL_RSPEC_BW_40MHZ;
+
+ if (htflags & WL_RXS_HTF_SGI)
+ rspec |= WL_RSPEC_SGI;
+
+ return RSPEC2KBPS(rspec)/500;
+}
+
+static void
+bcmwifi_update_rxpwr_per_ant(monitor_pkt_rxsts_t *pkt_rxsts, wlc_d11rxhdr_t *wrxh)
+{
+ int i = 0;
+ wlc_d11rxhdr_ext_t *wrxh_ext = (wlc_d11rxhdr_ext_t *)((uint8 *)wrxh - WLC_SWRXHDR_EXT_LEN);
+
+ BCM_REFERENCE(wrxh_ext);
+
+ pkt_rxsts->corenum = 0;
+
+ for (i = 0; i < WL_RSSI_ANT_MAX; i++) {
+#ifdef BCM_MON_QDBM_RSSI
+ pkt_rxsts->rxpwr[i].dBm = wrxh_ext->rxpwr[i].dBm;
+ pkt_rxsts->rxpwr[i].decidBm = wrxh_ext->rxpwr[i].decidBm;
+#else
+ pkt_rxsts->rxpwr[i].dBm = wrxh->rxpwr[i];
+ pkt_rxsts->rxpwr[i].decidBm = 0;
+#endif
+ if (pkt_rxsts->rxpwr[i].dBm == 0) {
+ break;
+ }
+ pkt_rxsts->corenum ++;
+ }
+}
+
+static void
+bcmwifi_parse_ampdu(monitor_info_t *info, d11rxhdr_t *rxh, uint16 subtype, ratespec_t rspec,
+ uint8 *plcp, struct wl_rxsts *sts)
+{
+ uint32 corerev = info->d11_info->major_revid;
+ uint32 corerev_minor = info->d11_info->minor_revid;
+ uint32 ft = D11PPDU_FT(rxh, corerev);
+ uint8 plcp_len = D11_PHY_RXPLCP_LEN(corerev);
+ BCM_REFERENCE(corerev_minor);
+ if ((subtype == FC_SUBTYPE_QOS_DATA) || (subtype == FC_SUBTYPE_QOS_NULL)) {
+ /* A-MPDU parsing */
+ switch (ft) {
+ case FT_HT:
+ if (WLC_IS_MIMO_PLCP_AMPDU(plcp)) {
+ sts->nfrmtype |= WL_RXS_NFRM_AMPDU_FIRST;
+ /* Save the rspec & plcp for later */
+ info->ampdu_rspec = rspec;
+ /* src & dst len are same */
+ (void)memcpy_s(info->ampdu_plcp, plcp_len, plcp, plcp_len);
+ } else if (!PLCP_VALID(plcp)) {
+ sts->nfrmtype |= WL_RXS_NFRM_AMPDU_SUB;
+ /* Use the saved rspec & plcp */
+ rspec = info->ampdu_rspec;
+ /* src & dst len are same */
+ (void)memcpy_s(plcp, plcp_len, info->ampdu_plcp, plcp_len);
+ }
+ break;
+
+ case FT_VHT:
+ case FT_HE:
+ case FT_EHT:
+ if (PLCP_VALID(plcp) &&
+ !IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor)) {
+ /* First MPDU:
+ * PLCP header is valid, Phy RxStatus is not valid
+ */
+ sts->nfrmtype |= WL_RXS_NFRM_AMPDU_FIRST;
+ /* Save the rspec & plcp for later */
+ info->ampdu_rspec = rspec;
+ /* src & dst len are same */
+ (void)memcpy_s(info->ampdu_plcp, plcp_len, plcp, plcp_len);
+ info->ampdu_counter++;
+ } else if (!PLCP_VALID(plcp) &&
+ !IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor)) {
+ /* Sub MPDU: * PLCP header is not valid,
+ * Phy RxStatus is not valid
+ */
+ sts->nfrmtype |= WL_RXS_NFRM_AMPDU_SUB;
+ /* Use the saved rspec & plcp */
+ rspec = info->ampdu_rspec;
+ /* src & dst len are same */
+ (void)memcpy_s(plcp, plcp_len, info->ampdu_plcp, plcp_len);
+ } else if (PLCP_VALID(plcp) &&
+ IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor)) {
+ /* MPDU is not a part of A-MPDU:
+ * PLCP header is valid and Phy RxStatus is valid
+ */
+ info->ampdu_counter++;
+ } else {
+ /* Last MPDU */
+ /* done to take care of the last MPDU in A-mpdu
+ * VHT packets are considered A-mpdu
+ * Use the saved rspec
+ */
+ rspec = info->ampdu_rspec;
+ /* src & dst len are same */
+ (void)memcpy_s(plcp, plcp_len, info->ampdu_plcp, plcp_len);
+ }
+
+ sts->ampdu_counter = info->ampdu_counter;
+ break;
+
+ case FT_OFDM:
+ break;
+ default:
+ printf("invalid frame type: %d\n", ft);
+ break;
+ }
+ }
+}
+
+static void
+bcmwifi_update_rate_modulation_info(monitor_info_t *info, d11rxhdr_t *rxh, d11rxhdr_t *rxh_last,
+ ratespec_t rspec, uint8* plcp, struct wl_rxsts *sts)
+{
+ uint32 corerev = info->d11_info->major_revid;
+ uint32 corerev_minor = info->d11_info->minor_revid;
+
+ /* prepare rate/modulation info */
+ if (RSPEC_ISVHT(rspec)) {
+ uint32 bw = RSPEC_BW(rspec);
+ /* prepare VHT rate/modulation info */
+ sts->nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
+ sts->mcs = (rspec & WL_RSPEC_VHT_MCS_MASK);
+
+ if (CHSPEC_IS80(sts->chanspec)) {
+ if (bw == WL_RSPEC_BW_20MHZ) {
+ switch (CHSPEC_CTL_SB(sts->chanspec)) {
+ default:
+ case WL_CHANSPEC_CTL_SB_LL:
+ sts->bw = WL_RXS_VHT_BW_20LL;
+ break;
+ case WL_CHANSPEC_CTL_SB_LU:
+ sts->bw = WL_RXS_VHT_BW_20LU;
+ break;
+ case WL_CHANSPEC_CTL_SB_UL:
+ sts->bw = WL_RXS_VHT_BW_20UL;
+ break;
+ case WL_CHANSPEC_CTL_SB_UU:
+ sts->bw = WL_RXS_VHT_BW_20UU;
+ break;
+ }
+ } else if (bw == WL_RSPEC_BW_40MHZ) {
+ switch (CHSPEC_CTL_SB(sts->chanspec)) {
+ default:
+ case WL_CHANSPEC_CTL_SB_L:
+ sts->bw = WL_RXS_VHT_BW_40L;
+ break;
+ case WL_CHANSPEC_CTL_SB_U:
+ sts->bw = WL_RXS_VHT_BW_40U;
+ break;
+ }
+ } else {
+ sts->bw = WL_RXS_VHT_BW_80;
+ }
+ } else if (CHSPEC_IS40(sts->chanspec)) {
+ if (bw == WL_RSPEC_BW_20MHZ) {
+ switch (CHSPEC_CTL_SB(sts->chanspec)) {
+ default:
+ case WL_CHANSPEC_CTL_SB_L:
+ sts->bw = WL_RXS_VHT_BW_20L;
+ break;
+ case WL_CHANSPEC_CTL_SB_U:
+ sts->bw = WL_RXS_VHT_BW_20U;
+ break;
+ }
+ } else if (bw == WL_RSPEC_BW_40MHZ) {
+ sts->bw = WL_RXS_VHT_BW_40;
+ }
+ } else {
+ sts->bw = WL_RXS_VHT_BW_20;
+ }
+
+ if (RSPEC_ISSTBC(rspec))
+ sts->vhtflags |= WL_RXS_VHTF_STBC;
+ if (wlc_vht_get_txop_ps_not_allowed(plcp))
+ sts->vhtflags |= WL_RXS_VHTF_TXOP_PS;
+ if (RSPEC_ISSGI(rspec)) {
+ sts->vhtflags |= WL_RXS_VHTF_SGI;
+ if (wlc_vht_get_sgi_nsym_da(plcp))
+ sts->vhtflags |= WL_RXS_VHTF_SGI_NSYM_DA;
+ }
+ if (RSPEC_ISLDPC(rspec)) {
+ sts->coding = WL_RXS_VHTF_CODING_LDCP;
+ if (wlc_vht_get_ldpc_extra_symbol(plcp)) {
+ /* need to un-set for MU-MIMO */
+ sts->vhtflags |= WL_RXS_VHTF_LDPC_EXTRA;
+ }
+ }
+ if (wlc_vht_get_beamformed(plcp))
+ sts->vhtflags |= WL_RXS_VHTF_BF;
+
+ sts->gid = wlc_vht_get_gid(plcp);
+ sts->aid = wlc_vht_get_aid(plcp);
+ sts->datarate = RSPEC2KBPS(rspec)/500;
+ } else if (RSPEC_ISHT(rspec)) {
+ /* prepare HT rate/modulation info */
+ sts->mcs = (rspec & WL_RSPEC_HT_MCS_MASK);
+
+ if (CHSPEC_IS40(sts->chanspec) || CHSPEC_IS80(sts->chanspec)) {
+ uint32 bw = RSPEC_BW(rspec);
+
+ if (bw == WL_RSPEC_BW_20MHZ) {
+ if (CHSPEC_CTL_SB(sts->chanspec) == WL_CHANSPEC_CTL_SB_L) {
+ sts->htflags = WL_RXS_HTF_20L;
+ } else {
+ sts->htflags = WL_RXS_HTF_20U;
+ }
+ } else if (bw == WL_RSPEC_BW_40MHZ) {
+ sts->htflags = WL_RXS_HTF_40;
+ }
+ }
+
+ if (RSPEC_ISSGI(rspec))
+ sts->htflags |= WL_RXS_HTF_SGI;
+ if (RSPEC_ISLDPC(rspec))
+ sts->htflags |= WL_RXS_HTF_LDPC;
+ if (RSPEC_ISSTBC(rspec))
+ sts->htflags |= (1 << WL_RXS_HTF_STBC_SHIFT);
+
+ sts->datarate = wlc_ht_phy_get_rate(sts->htflags, sts->mcs);
+ } else if (FALSE ||
+#ifdef WL11BE
+ RSPEC_ISHEEXT(rspec) ||
+#else
+ RSPEC_ISHE(rspec) ||
+#endif
+ FALSE) {
+ sts->nss = (rspec & WL_RSPEC_NSS_MASK) >> WL_RSPEC_NSS_SHIFT;
+ sts->mcs = (rspec & WL_RSPEC_MCS_MASK);
+
+ if (D11PPDU_ISMU_REV80(rxh_last, corerev, corerev_minor)) {
+ if (IS_PHYRXHDR_VALID(rxh_last, corerev, corerev_minor)) {
+ uint16 ff_type = D11PPDU_FF_TYPE(rxh_last,
+ corerev, corerev_minor);
+
+ switch (ff_type) {
+ case HE_MU_PPDU:
+ wlc_he_dl_ofdma_fill_rtap_data(sts, rxh_last,
+ plcp, corerev, corerev_minor);
+ wlc_he_dl_ofdma_fill_rtap_flag(sts, plcp, corerev);
+ break;
+ case HE_TRIG_PPDU:
+ wlc_he_ul_ofdma_fill_rtap_data(sts, rxh_last,
+ plcp, corerev);
+ break;
+ default:
+ /* should not have come here */
+ ASSERT(0);
+ break;
+ }
+ }
+ } else {
+ /* frame format is either SU or SU_RE (assumption only SU is supported) */
+ wlc_he_su_fill_rtap_data(sts, plcp);
+ }
+ } else {
+ /* round non-HT data rate to nearest 500bkps unit */
+ sts->datarate = RSPEC2KBPS(rspec)/500;
+ }
+}
+
+/* Convert RX hardware status to standard format and send to wl_monitor
+ * assume p points to plcp header
+ */
+static uint16
+wl_d11rx_to_rxsts(monitor_info_t* info, monitor_pkt_info_t* pkt_info, wlc_d11rxhdr_t *wrxh,
+ wlc_d11rxhdr_t *wrxh_last, void *pkt, uint16 len, void* pout, uint16 pad_req)
+{
+ struct wl_rxsts sts;
+ monitor_pkt_rxsts_t pkt_rxsts;
+ ratespec_t rspec;
+ uint16 chan_num;
+ uint8 *plcp;
+ uint8 *p = (uint8*)pkt;
+ uint8 hwrxoff = 0;
+ uint32 corerev = 0;
+ uint32 corerev_minor = 0;
+ struct dot11_header *h;
+ uint16 subtype;
+ d11rxhdr_t *rxh = &(wrxh->rxhdr);
+ d11rxhdr_t *rxh_last = &(wrxh_last->rxhdr);
+ d11_info_t* d11i = info->d11_info;
+ uint8 plcp_len = 0;
+
+ BCM_REFERENCE(chan_num);
+
+ ASSERT(p);
+ ASSERT(info);
+ pkt_rxsts.rxsts = &sts;
+
+ hwrxoff = (pkt_info->marker >> 16) & 0xff;
+ corerev = d11i->major_revid;
+ corerev_minor = d11i->minor_revid;
+ BCM_REFERENCE(corerev_minor);
+
+ plcp = (uint8*)p + hwrxoff;
+ plcp_len = D11_PHY_RXPLCP_LEN(corerev);
+
+ /* only non short rxstatus is expected */
+ if (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor)) {
+ printf("short rxstatus is not expected here!\n");
+ ASSERT(0);
+ return 0;
+ }
+
+ if (RXHDR_GET_PAD_PRES(rxh, corerev, corerev_minor)) {
+ plcp += 2;
+ }
+
+ bzero((void *)&sts, sizeof(wl_rxsts_t));
+
+ sts.mactime = wlc_recover_tsf32(pkt_info->ts.ts_high, pkt_info->ts.ts_low);
+
+ /* update rxpwr per antenna */
+ bcmwifi_update_rxpwr_per_ant(&pkt_rxsts, wrxh);
+
+ /* calculate rspec based on ppdu frame type */
+ rspec = wlc_recv_mon_compute_rspec(info, wrxh, plcp);
+
+ h = (struct dot11_header *)(plcp + plcp_len);
+ subtype = (ltoh16(h->fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT;
+
+ /* parse & cache respec for ampdu */
+ bcmwifi_parse_ampdu(info, rxh, subtype, rspec, plcp, &sts);
+
+ /* A-MSDU parsing */
+ if (RXHDR_GET_AMSDU(rxh, corerev, corerev_minor)) {
+ /* it's chained buffer, break it if necessary */
+ sts.nfrmtype |= WL_RXS_NFRM_AMSDU_FIRST | WL_RXS_NFRM_AMSDU_SUB;
+ }
+
+ sts.signal = (pkt_info->marker >> 8) & 0xff;
+ sts.noise = (int8)pkt_info->marker;
+ sts.chanspec = D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxChan);
+
+ if (wf_chspec_malformed(sts.chanspec)) {
+ printf("Malformed chspec, %x\n", sts.chanspec);
+ return 0;
+ }
+
+ /* 4360: is chan_num supposed to be primary or CF channel? */
+ chan_num = CHSPEC_CHANNEL(sts.chanspec);
+
+ if (PRXS5_ACPHY_DYNBWINNONHT(rxh))
+ sts.vhtflags |= WL_RXS_VHTF_DYN_BW_NONHT;
+ else
+ sts.vhtflags &= ~WL_RXS_VHTF_DYN_BW_NONHT;
+
+ switch (PRXS5_ACPHY_CHBWINNONHT(rxh)) {
+ default: case PRXS5_ACPHY_CHBWINNONHT_20MHZ:
+ sts.bw_nonht = WLC_20_MHZ;
+ break;
+ case PRXS5_ACPHY_CHBWINNONHT_40MHZ:
+ sts.bw_nonht = WLC_40_MHZ;
+ break;
+ case PRXS5_ACPHY_CHBWINNONHT_80MHZ:
+ sts.bw_nonht = WLC_80_MHZ;
+ break;
+ case PRXS5_ACPHY_CHBWINNONHT_160MHZ:
+ sts.bw_nonht = WLC_160_MHZ;
+ break;
+ }
+
+ /* update rate and modulation info */
+ bcmwifi_update_rate_modulation_info(info, rxh, rxh_last, rspec, plcp, &sts);
+
+ sts.pktlength = FRAMELEN(corerev, corerev_minor, rxh) - plcp_len;
+
+ sts.phytype = WL_RXS_PHY_N;
+
+ if (RSPEC_ISCCK(rspec)) {
+ sts.encoding = WL_RXS_ENCODING_DSSS_CCK;
+ sts.preamble = (PRXS_SHORTH(rxh, corerev, corerev_minor) ?
+ WL_RXS_PREAMBLE_SHORT : WL_RXS_PREAMBLE_LONG);
+ } else if (RSPEC_ISOFDM(rspec)) {
+ sts.encoding = WL_RXS_ENCODING_OFDM;
+ sts.preamble = WL_RXS_PREAMBLE_SHORT;
+ } if (RSPEC_ISVHT(rspec)) {
+ sts.encoding = WL_RXS_ENCODING_VHT;
+ } else if (RSPEC_ISHE(rspec)) {
+ sts.encoding = WL_RXS_ENCODING_HE;
+ } else if (RSPEC_ISEHT(rspec)) {
+ sts.encoding = WL_RXS_ENCODING_EHT;
+ } else { /* MCS rate */
+ sts.encoding = WL_RXS_ENCODING_HT;
+ sts.preamble = (uint32)((D11HT_MMPLCPLen(rxh) != 0) ?
+ WL_RXS_PREAMBLE_HT_MM : WL_RXS_PREAMBLE_HT_GF);
+ }
+
+ /* translate error code */
+ if (D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxStatus1) & RXS_DECERR)
+ sts.pkterror |= WL_RXS_DECRYPT_ERR;
+ if (D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxStatus1) & RXS_FCSERR)
+ sts.pkterror |= WL_RXS_CRC_ERROR;
+
+ if (RXHDR_GET_PAD_PRES(rxh, corerev, corerev_minor)) {
+ p += 2; len -= 2;
+ }
+
+ p += (hwrxoff + D11_PHY_RXPLCP_LEN(corerev));
+ len -= (hwrxoff + D11_PHY_RXPLCP_LEN(corerev));
+ return (wl_rxsts_to_rtap(&pkt_rxsts, p, len, pout, pad_req));
+}
+
+#ifndef MONITOR_DNGL_CONV
+/* Collect AMSDU subframe packets */
+static uint16
+wl_monitor_amsdu(monitor_info_t* info, monitor_pkt_info_t* pkt_info, wlc_d11rxhdr_t *wrxh,
+ wlc_d11rxhdr_t *wrxh_last, void *pkt, uint16 len, void* pout, uint16* offset)
+{
+ uint8 *p = pkt;
+ uint8 hwrxoff = (pkt_info->marker >> 16) & 0xff;
+ uint16 frame_len = 0;
+ uint16 aggtype = (wrxh->rxhdr.lt80.RxStatus2 & RXS_AGGTYPE_MASK) >> RXS_AGGTYPE_SHIFT;
+
+ switch (aggtype) {
+ case RXS_AMSDU_FIRST:
+ case RXS_AMSDU_N_ONE:
+ /* Flush any previously collected */
+ if (info->amsdu_len) {
+ info->amsdu_len = 0;
+ }
+
+ info->headroom = MAX_RADIOTAP_SIZE - D11_PHY_RXPLCP_LEN(corerev) - hwrxoff;
+ info->headroom -= (wrxh->rxhdr.lt80.RxStatus1 & RXS_PBPRES) ? 2 : 0;
+
+ /* Save the new starting AMSDU subframe */
+ info->amsdu_len = len;
+ info->amsdu_pkt = (uint8*)pout + (info->headroom > 0 ?
+ info->headroom : 0);
+
+ memcpy(info->amsdu_pkt, p, len);
+
+ if (aggtype == RXS_AMSDU_N_ONE) {
+ /* all-in-one AMSDU subframe */
+ frame_len = wl_d11rx_to_rxsts(info, pkt_info, wrxh, wrxh, p,
+ len, info->amsdu_pkt - info->headroom, 0);
+
+ *offset = ABS(info->headroom);
+ frame_len += *offset;
+
+ info->amsdu_len = 0;
+ }
+ break;
+
+ case RXS_AMSDU_INTERMEDIATE:
+ case RXS_AMSDU_LAST:
+ default:
+ /* Check for previously collected */
+ if (info->amsdu_len) {
+ /* Append next AMSDU subframe */
+ p += hwrxoff; len -= hwrxoff;
+
+ if (wrxh->rxhdr.lt80.RxStatus1 & RXS_PBPRES) {
+ p += 2; len -= 2;
+ }
+
+ memcpy(info->amsdu_pkt + info->amsdu_len, p, len);
+ info->amsdu_len += len;
+
+ /* complete AMSDU frame */
+ if (aggtype == RXS_AMSDU_LAST) {
+ frame_len = wl_d11rx_to_rxsts(info, pkt_info, wrxh, wrxh,
+ info->amsdu_pkt, info->amsdu_len,
+ info->amsdu_pkt - info->headroom, 0);
+
+ *offset = ABS(info->headroom);
+ frame_len += *offset;
+
+ info->amsdu_len = 0;
+ }
+ }
+ break;
+ }
+
+ return frame_len;
+}
+#endif /* MONITOR_DNGL_CONV */
+
+uint16 bcmwifi_monitor_create(monitor_info_t** info)
+{
+ *info = MALLOCZ(NULL, sizeof(struct monitor_info));
+ if ((*info) == NULL) {
+ return FALSE;
+ }
+
+ (*info)->d11_info = MALLOCZ(NULL, sizeof(struct d11_info));
+ if ((*info)->d11_info == NULL) {
+ goto fail;
+ }
+
+ return TRUE;
+
+fail:
+ bcmwifi_monitor_delete(*info);
+
+ return FALSE;
+}
+
+void
+bcmwifi_set_corerev_major(monitor_info_t* info, int8 corerev)
+{
+ d11_info_t* d11i = info->d11_info;
+ d11i->major_revid = corerev;
+}
+
+void
+bcmwifi_set_corerev_minor(monitor_info_t* info, int8 corerev)
+{
+ d11_info_t* d11i = info->d11_info;
+ d11i->minor_revid = corerev;
+}
+
+void
+bcmwifi_monitor_delete(monitor_info_t* info)
+{
+ if (info == NULL) {
+ return;
+ }
+
+ if (info->d11_info != NULL) {
+ MFREE(NULL, info->d11_info, sizeof(struct d11_info));
+ }
+
+ MFREE(NULL, info, sizeof(struct monitor_info));
+}
+
+uint16
+bcmwifi_monitor(monitor_info_t* info, monitor_pkt_info_t* pkt_info, void *pkt, uint16 len,
+ void* pout, uint16* offset, uint16 pad_req, void *wrxh_in, void *wrxh_last)
+{
+ wlc_d11rxhdr_t *wrxh;
+ int hdr_ext_offset = 0;
+
+#ifdef MONITOR_DNGL_CONV
+ wrxh = (wlc_d11rxhdr_t *)wrxh_in;
+ if (info == NULL) {
+ return 0;
+ }
+#else
+
+#ifdef BCM_MON_QDBM_RSSI
+ hdr_ext_offset = WLC_SWRXHDR_EXT_LEN;
+#endif
+ /* move beyond the extension, if any */
+ pkt = (void *)((uint8 *)pkt + hdr_ext_offset);
+ wrxh = (wlc_d11rxhdr_t *)pkt;
+
+ if ((wrxh->rxhdr.lt80.RxStatus2 & htol16(RXS_AMSDU_MASK))) {
+ /* Need to add support for AMSDU */
+ return wl_monitor_amsdu(info, pkt_info, wrxh, wrxh_last, pkt, len, pout, offset);
+ } else
+#endif /* NO MONITOR_DNGL_CONV */
+ {
+ info->amsdu_len = 0; /* reset amsdu */
+ *offset = 0;
+ return wl_d11rx_to_rxsts(info, pkt_info, wrxh, wrxh_last,
+ pkt, len - hdr_ext_offset, pout, pad_req);
+ }
+}
diff --git a/bcmdhd.101.10.361.x/bcmwifi_radiotap.c b/bcmdhd.101.10.361.x/bcmwifi_radiotap.c
new file mode 100755
index 0000000..7832981
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmwifi_radiotap.c
@@ -0,0 +1,1035 @@
+/*
+ * RadioTap utility routines for WL
+ * This file housing the functions use by
+ * wl driver.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmwifi_channels.h>
+#include <hndd11.h>
+#include <bcmwifi_radiotap.h>
+
+const struct rtap_field rtap_parse_info[] = {
+ {8, 8}, /* 0: IEEE80211_RADIOTAP_TSFT */
+ {1, 1}, /* 1: IEEE80211_RADIOTAP_FLAGS */
+ {1, 1}, /* 2: IEEE80211_RADIOTAP_RATE */
+ {4, 2}, /* 3: IEEE80211_RADIOTAP_CHANNEL */
+ {2, 2}, /* 4: IEEE80211_RADIOTAP_FHSS */
+ {1, 1}, /* 5: IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
+ {1, 1}, /* 6: IEEE80211_RADIOTAP_DBM_ANTNOISE */
+ {2, 2}, /* 7: IEEE80211_RADIOTAP_LOCK_QUALITY */
+ {2, 2}, /* 8: IEEE80211_RADIOTAP_TX_ATTENUATION */
+ {2, 2}, /* 9: IEEE80211_RADIOTAP_DB_TX_ATTENUATION */
+ {1, 1}, /* 10: IEEE80211_RADIOTAP_DBM_TX_POWER */
+ {1, 1}, /* 11: IEEE80211_RADIOTAP_ANTENNA */
+ {1, 1}, /* 12: IEEE80211_RADIOTAP_DB_ANTSIGNAL */
+ {1, 1}, /* 13: IEEE80211_RADIOTAP_DB_ANTNOISE */
+ {0, 0}, /* 14: netbsd */
+ {2, 2}, /* 15: IEEE80211_RADIOTAP_TXFLAGS */
+ {0, 0}, /* 16: missing */
+ {1, 1}, /* 17: IEEE80211_RADIOTAP_RETRIES */
+ {8, 4}, /* 18: IEEE80211_RADIOTAP_XCHANNEL */
+ {3, 1}, /* 19: IEEE80211_RADIOTAP_MCS */
+ {8, 4}, /* 20: IEEE80211_RADIOTAP_AMPDU_STATUS */
+ {12, 2}, /* 21: IEEE80211_RADIOTAP_VHT */
+ {0, 0}, /* 22: */
+ {0, 0}, /* 23: */
+ {0, 0}, /* 24: */
+ {0, 0}, /* 25: */
+ {0, 0}, /* 26: */
+ {0, 0}, /* 27: */
+ {0, 0}, /* 28: */
+ {0, 0}, /* 29: IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE */
+ {6, 2}, /* 30: IEEE80211_RADIOTAP_VENDOR_NAMESPACE */
+ {0, 0} /* 31: IEEE80211_RADIOTAP_EXT */
+};
+
+static int bitmap = 0;
+
+void
+radiotap_add_vendor_ns(ieee80211_radiotap_header_t *hdr);
+
+void
+radiotap_encode_multi_rssi(monitor_pkt_rxsts_t* rxsts, ieee80211_radiotap_header_t *hdr);
+void
+radiotap_encode_bw_signaling(uint16 mask, struct wl_rxsts* rxsts, ieee80211_radiotap_header_t *hdr);
+#ifdef MONITOR_DNGL_CONV
+void radiotap_encode_alignpad(ieee80211_radiotap_header_t *hdr, uint16 pad_req);
+#endif
+
+static const uint8 brcm_oui[] = {0x00, 0x10, 0x18};
+
+static void
+wl_rtapParseReset(radiotap_parse_t *rtap)
+{
+ rtap->idx = 0; /* reset parse index */
+ rtap->offset = 0; /* reset current field pointer */
+}
+
+static void*
+wl_rtapParseFindField(radiotap_parse_t *rtap, uint search_idx)
+{
+ uint idx; /* first bit index to parse */
+ uint32 btmap; /* presence bitmap */
+ uint offset, field_offset;
+ uint align, len;
+ void *ptr = NULL;
+
+ if (search_idx > IEEE80211_RADIOTAP_EXT)
+ return ptr;
+
+ if (search_idx < rtap->idx)
+ wl_rtapParseReset(rtap);
+
+ btmap = rtap->hdr->it_present;
+ idx = rtap->idx;
+ offset = rtap->offset;
+
+ /* loop through each field index until we get to the target idx */
+ while (idx <= search_idx) {
+ /* if field 'idx' is present, update the offset and check for a match */
+ if ((1 << idx) & btmap) {
+ /* if we hit a field for which we have no parse info
+ * we need to just bail out
+ */
+ if (rtap_parse_info[idx].align == 0)
+ break;
+
+ /* step past any alignment padding */
+ align = rtap_parse_info[idx].align;
+ len = rtap_parse_info[idx].len;
+
+ /* ROUNDUP */
+ field_offset = ((offset + (align - 1)) / align) * align;
+
+ /* if this field is not in the boulds of the header
+ * just bail out
+ */
+ if (field_offset + len > rtap->fields_len)
+ break;
+
+ /* did we find the field? */
+ if (idx == search_idx)
+ ptr = (uint8*)rtap->fields + field_offset;
+
+ /* step past this field */
+ offset = field_offset + len;
+ }
+
+ idx++;
+ }
+
+ rtap->idx = idx;
+ rtap->offset = offset;
+
+ return ptr;
+}
+
+ratespec_t
+wl_calcRspecFromRTap(uint8 *rtap_header)
+{
+ ratespec_t rspec = 0;
+ radiotap_parse_t rtap;
+ uint8 rate = 0;
+ uint8 flags = 0;
+ int flags_present = FALSE;
+ uint8 mcs = 0;
+ uint8 mcs_flags = 0;
+ uint8 mcs_known = 0;
+ int mcs_present = FALSE;
+ void *p;
+
+ wl_rtapParseInit(&rtap, rtap_header);
+
+ p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_FLAGS);
+ if (p != NULL) {
+ flags_present = TRUE;
+ flags = ((uint8*)p)[0];
+ }
+
+ p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_RATE);
+ if (p != NULL)
+ rate = ((uint8*)p)[0];
+
+ p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_MCS);
+ if (p != NULL) {
+ mcs_present = TRUE;
+ mcs_known = ((uint8*)p)[0];
+ mcs_flags = ((uint8*)p)[1];
+ mcs = ((uint8*)p)[2];
+ }
+
+ if (rate != 0) {
+ /* validate the DSSS rates 1,2,5.5,11 */
+ if (rate == 2 || rate == 4 || rate == 11 || rate == 22) {
+ rspec = LEGACY_RSPEC(rate) | WL_RSPEC_OVERRIDE_RATE;
+ if (flags_present && (flags & IEEE80211_RADIOTAP_F_SHORTPRE)) {
+ rspec |= WL_RSPEC_OVERRIDE_MODE | WL_RSPEC_SHORT_PREAMBLE;
+ }
+ }
+ } else if (mcs_present) {
+ /* validate the MCS value */
+ if (mcs <= 23 || mcs == 32) {
+ uint32 override = 0;
+ if (mcs_known &
+ (IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_FMT |
+ IEEE80211_RADIOTAP_MCS_HAVE_FEC)) {
+ override = WL_RSPEC_OVERRIDE_MODE;
+ }
+
+ rspec = HT_RSPEC(mcs) | WL_RSPEC_OVERRIDE_RATE;
+
+ if ((mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI) &&
+ (mcs_flags & IEEE80211_RADIOTAP_MCS_SGI))
+ rspec |= WL_RSPEC_SGI;
+ if ((mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FMT) &&
+ (mcs_flags & IEEE80211_RADIOTAP_MCS_FMT_GF))
+ rspec |= WL_RSPEC_SHORT_PREAMBLE;
+ if ((mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC) &&
+ (mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC))
+ rspec |= WL_RSPEC_LDPC;
+
+ rspec |= override;
+ }
+ }
+
+ return rspec;
+}
+
+bool
+wl_rtapFlags(uint8 *rtap_header, uint8* flags)
+{
+ radiotap_parse_t rtap;
+ void *p;
+
+ wl_rtapParseInit(&rtap, rtap_header);
+
+ p = wl_rtapParseFindField(&rtap, IEEE80211_RADIOTAP_FLAGS);
+ if (p != NULL) {
+ *flags = ((uint8*)p)[0];
+ }
+
+ return (p != NULL);
+}
+
+void
+wl_rtapParseInit(radiotap_parse_t *rtap, uint8 *rtap_header)
+{
+ uint rlen;
+ uint32 *present_word;
+ struct ieee80211_radiotap_header *hdr = (struct ieee80211_radiotap_header*)rtap_header;
+
+ bzero(rtap, sizeof(radiotap_parse_t));
+
+ rlen = hdr->it_len; /* total space in rtap_header */
+
+ /* If a precence word has the IEEE80211_RADIOTAP_EXT bit set it indicates
+ * that there is another precence word.
+ * Step over the presence words until we find the end of the list
+ */
+ present_word = &hdr->it_present;
+ /* remaining length in header past it_present */
+ rlen -= sizeof(struct ieee80211_radiotap_header);
+
+ while ((*present_word & (1<<IEEE80211_RADIOTAP_EXT)) && rlen >= 4) {
+ present_word++;
+ rlen -= 4; /* account for 4 bytes of present_word */
+ }
+
+ rtap->hdr = hdr;
+ rtap->fields = (uint8*)(present_word + 1);
+ rtap->fields_len = rlen;
+ wl_rtapParseReset(rtap);
+}
+
+uint
+wl_radiotap_rx(struct dot11_header *mac_header, wl_rxsts_t *rxsts, bsd_header_rx_t *bsd_header)
+{
+ int channel_frequency;
+ uint32 channel_flags;
+ uint8 flags;
+ uint8 *cp;
+ uint pad_len;
+ uint32 field_map;
+ uint16 fc;
+ uint bsd_header_len;
+ uint16 ampdu_flags = 0;
+
+ fc = LTOH16(mac_header->fc);
+ pad_len = 3;
+ field_map = WL_RADIOTAP_PRESENT_BASIC;
+
+ if (CHSPEC_IS2G(rxsts->chanspec)) {
+ channel_flags = IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN;
+ channel_frequency = wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec),
+ WF_CHAN_FACTOR_2_4_G);
+ } else if (CHSPEC_IS5G(rxsts->chanspec)) {
+ channel_flags = IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM;
+ channel_frequency = wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec),
+ WF_CHAN_FACTOR_5_G);
+ } else {
+ channel_flags = IEEE80211_CHAN_OFDM;
+ channel_frequency = wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec),
+ WF_CHAN_FACTOR_6_G);
+ }
+
+ if ((rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) ||
+ (rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) {
+
+ ampdu_flags = IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+ }
+
+ flags = IEEE80211_RADIOTAP_F_FCS;
+
+ if (rxsts->preamble == WL_RXS_PREAMBLE_SHORT)
+ flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+
+ if ((fc & FC_WEP) == FC_WEP)
+ flags |= IEEE80211_RADIOTAP_F_WEP;
+
+ if ((fc & FC_MOREFRAG) == FC_MOREFRAG)
+ flags |= IEEE80211_RADIOTAP_F_FRAG;
+
+ if (rxsts->pkterror & WL_RXS_CRC_ERROR)
+ flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ if (rxsts->encoding == WL_RXS_ENCODING_HT)
+ field_map = WL_RADIOTAP_PRESENT_HT;
+ else if (rxsts->encoding == WL_RXS_ENCODING_VHT)
+ field_map = WL_RADIOTAP_PRESENT_VHT;
+
+ bsd_header_len = sizeof(struct wl_radiotap_sna); /* start with sna size */
+ /* Test for signal/noise values and update length and field bitmap */
+ if (rxsts->signal == 0) {
+ field_map &= ~(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
+ pad_len = (pad_len - 1);
+ bsd_header_len--;
+ }
+
+ if (rxsts->noise == 0) {
+ field_map &= ~(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
+ pad_len = (pad_len - 1);
+ bsd_header_len--;
+ }
+
+ if (rxsts->encoding == WL_RXS_ENCODING_HT ||
+ rxsts->encoding == WL_RXS_ENCODING_VHT) {
+ struct wl_radiotap_hdr *rtht = &bsd_header->hdr;
+ struct wl_radiotap_ht_tail *tail;
+
+ /*
+ * Header length is complicated due to dynamic
+ * presence of signal and noise fields
+ * and padding for xchannel following
+ * signal/noise/ant.
+ * Start with length of wl_radiotap_ht plus
+ * signal/noise/ant
+ */
+ bsd_header_len += sizeof(struct wl_radiotap_hdr) + pad_len;
+ bsd_header_len += sizeof(struct wl_radiotap_xchan);
+ if (rxsts->nfrmtype == WL_RXS_NFRM_AMPDU_FIRST ||
+ rxsts->nfrmtype == WL_RXS_NFRM_AMPDU_SUB) {
+ bsd_header_len += sizeof(struct wl_radiotap_ampdu);
+ }
+ /* add the length of the tail end of the structure */
+ if (rxsts->encoding == WL_RXS_ENCODING_HT)
+ bsd_header_len += sizeof(struct wl_htmcs);
+ else if (rxsts->encoding == WL_RXS_ENCODING_VHT)
+ bsd_header_len += sizeof(struct wl_vhtmcs);
+ bzero((uint8 *)rtht, sizeof(*rtht));
+
+ rtht->ieee_radiotap.it_version = 0;
+ rtht->ieee_radiotap.it_pad = 0;
+ rtht->ieee_radiotap.it_len = (uint16)HTOL16(bsd_header_len);
+ rtht->ieee_radiotap.it_present = HTOL32(field_map);
+
+ rtht->tsft = HTOL64((uint64)rxsts->mactime);
+ rtht->flags = flags;
+ rtht->channel_freq = (uint16)HTOL16(channel_frequency);
+ rtht->channel_flags = (uint16)HTOL16(channel_flags);
+
+ cp = bsd_header->pad;
+ /* add in signal/noise/ant */
+ if (rxsts->signal != 0) {
+ *cp++ = (int8)rxsts->signal;
+ pad_len--;
+ }
+ if (rxsts->noise != 0) {
+ *cp++ = (int8)rxsts->noise;
+ pad_len--;
+ }
+ *cp++ = (int8)rxsts->antenna;
+ pad_len--;
+
+ tail = (struct wl_radiotap_ht_tail *)(bsd_header->ht);
+ /* Fill in XCHANNEL */
+ if (CHSPEC_IS40(rxsts->chanspec)) {
+ if (CHSPEC_SB_UPPER(rxsts->chanspec))
+ channel_flags |= IEEE80211_CHAN_HT40D;
+ else
+ channel_flags |= IEEE80211_CHAN_HT40U;
+ } else
+ channel_flags |= IEEE80211_CHAN_HT20;
+
+ tail->xc.xchannel_flags = HTOL32(channel_flags);
+ tail->xc.xchannel_freq = (uint16)HTOL16(channel_frequency);
+ tail->xc.xchannel_channel = wf_chspec_ctlchan(rxsts->chanspec);
+ tail->xc.xchannel_maxpower = (17*2);
+ /* fill in A-mpdu Status */
+ tail->ampdu.ref_num = mac_header->seq;
+ tail->ampdu.flags = ampdu_flags;
+ tail->ampdu.delimiter_crc = 0;
+ tail->ampdu.reserved = 0;
+
+ if (rxsts->encoding == WL_RXS_ENCODING_HT) {
+ tail->u.ht.mcs_index = rxsts->mcs;
+ tail->u.ht.mcs_known = (IEEE80211_RADIOTAP_MCS_HAVE_BW |
+ IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_FMT);
+ tail->u.ht.mcs_flags = 0;
+
+ switch (rxsts->htflags & WL_RXS_HTF_BW_MASK) {
+ case WL_RXS_HTF_20L:
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20L;
+ break;
+ case WL_RXS_HTF_20U:
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20U;
+ break;
+ case WL_RXS_HTF_40:
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_40;
+ break;
+ default:
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20;
+ break;
+ }
+
+ if (rxsts->htflags & WL_RXS_HTF_SGI)
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_SGI;
+ if (rxsts->preamble & WL_RXS_PREAMBLE_HT_GF)
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ if (rxsts->htflags & WL_RXS_HTF_LDPC)
+ tail->u.ht.mcs_flags |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
+ } else if (rxsts->encoding == WL_RXS_ENCODING_VHT) {
+ tail->u.vht.vht_known = (IEEE80211_RADIOTAP_VHT_HAVE_STBC |
+ IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS |
+ IEEE80211_RADIOTAP_VHT_HAVE_GI |
+ IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA |
+ IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA |
+ IEEE80211_RADIOTAP_VHT_HAVE_BF |
+ IEEE80211_RADIOTAP_VHT_HAVE_BW |
+ IEEE80211_RADIOTAP_VHT_HAVE_GID |
+ IEEE80211_RADIOTAP_VHT_HAVE_PAID);
+
+ tail->u.vht.vht_flags = (uint8)HTOL16(rxsts->vhtflags);
+
+ switch (rxsts->bw) {
+ case WL_RXS_VHT_BW_20:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20;
+ break;
+ case WL_RXS_VHT_BW_40:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_40;
+ break;
+ case WL_RXS_VHT_BW_20L:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20L;
+ break;
+ case WL_RXS_VHT_BW_20U:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20U;
+ break;
+ case WL_RXS_VHT_BW_80:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_80;
+ break;
+ case WL_RXS_VHT_BW_40L:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_40L;
+ break;
+ case WL_RXS_VHT_BW_40U:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_40U;
+ break;
+ case WL_RXS_VHT_BW_20LL:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20LL;
+ break;
+ case WL_RXS_VHT_BW_20LU:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20LU;
+ break;
+ case WL_RXS_VHT_BW_20UL:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20UL;
+ break;
+ case WL_RXS_VHT_BW_20UU:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20UU;
+ break;
+ default:
+ tail->u.vht.vht_bw = IEEE80211_RADIOTAP_VHT_BW_20;
+ break;
+ }
+
+ tail->u.vht.vht_mcs_nss[0] = (rxsts->mcs << 4) |
+ (rxsts->nss & IEEE80211_RADIOTAP_VHT_NSS);
+ tail->u.vht.vht_mcs_nss[1] = 0;
+ tail->u.vht.vht_mcs_nss[2] = 0;
+ tail->u.vht.vht_mcs_nss[3] = 0;
+
+ tail->u.vht.vht_coding = rxsts->coding;
+ tail->u.vht.vht_group_id = rxsts->gid;
+ tail->u.vht.vht_partial_aid = HTOL16(rxsts->aid);
+ }
+ } else {
+ struct wl_radiotap_hdr *rtl = &bsd_header->hdr;
+
+ /*
+ * Header length is complicated due to dynamic presence of signal and noise fields
+ * Start with length of wl_radiotap_legacy plus signal/noise/ant
+ */
+ bsd_header_len = sizeof(struct wl_radiotap_hdr) + pad_len;
+ bzero((uint8 *)rtl, sizeof(*rtl));
+
+ rtl->ieee_radiotap.it_version = 0;
+ rtl->ieee_radiotap.it_pad = 0;
+ rtl->ieee_radiotap.it_len = (uint16)HTOL16(bsd_header_len);
+ rtl->ieee_radiotap.it_present = HTOL32(field_map);
+
+ rtl->tsft = HTOL64((uint64)rxsts->mactime);
+ rtl->flags = flags;
+ rtl->u.rate = (uint8)rxsts->datarate;
+ rtl->channel_freq = (uint16)HTOL16(channel_frequency);
+ rtl->channel_flags = (uint16)HTOL16(channel_flags);
+
+ /* add in signal/noise/ant */
+ cp = bsd_header->pad;
+ if (rxsts->signal != 0)
+ *cp++ = (int8)rxsts->signal;
+ if (rxsts->noise != 0)
+ *cp++ = (int8)rxsts->noise;
+ *cp++ = (int8)rxsts->antenna;
+ }
+ return bsd_header_len;
+}
+
+static int
+wl_radiotap_rx_channel_frequency(wl_rxsts_t *rxsts)
+{
+ if (CHSPEC_IS2G(rxsts->chanspec)) {
+ return wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec),
+ WF_CHAN_FACTOR_2_4_G);
+ } else if (CHSPEC_IS5G(rxsts->chanspec)) {
+ return wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec),
+ WF_CHAN_FACTOR_5_G);
+ } else {
+ return wf_channel2mhz(wf_chspec_ctlchan(rxsts->chanspec),
+ WF_CHAN_FACTOR_6_G);
+ }
+}
+
+static uint16
+wl_radiotap_rx_channel_flags(wl_rxsts_t *rxsts)
+{
+ if (CHSPEC_IS2G(rxsts->chanspec)) {
+ return (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN);
+ } else if (CHSPEC_IS5G(rxsts->chanspec)) {
+ return (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM);
+ } else {
+ return (IEEE80211_CHAN_OFDM);
+ }
+}
+
+static uint8
+wl_radiotap_rx_flags(struct dot11_header *mac_header, wl_rxsts_t *rxsts)
+{
+ uint8 flags;
+ uint16 fc;
+
+ fc = ltoh16(mac_header->fc);
+
+ flags = IEEE80211_RADIOTAP_F_FCS;
+
+ if (rxsts->preamble == WL_RXS_PREAMBLE_SHORT)
+ flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+
+ if (fc & FC_WEP)
+ flags |= IEEE80211_RADIOTAP_F_WEP;
+
+ if (fc & FC_MOREFRAG)
+ flags |= IEEE80211_RADIOTAP_F_FRAG;
+
+ return flags;
+}
+
+uint
+wl_radiotap_rx_legacy(struct dot11_header *mac_header,
+ wl_rxsts_t *rxsts, ieee80211_radiotap_header_t *rtap_hdr)
+{
+ int channel_frequency;
+ uint16 channel_flags;
+ uint8 flags;
+ uint16 rtap_len = LTOH16(rtap_hdr->it_len);
+ wl_radiotap_legacy_t *rtl = (wl_radiotap_legacy_t *)((uint8*)rtap_hdr + rtap_len);
+
+ rtap_len += sizeof(wl_radiotap_legacy_t);
+ rtap_hdr->it_len = HTOL16(rtap_len);
+ rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_LEGACY);
+
+ channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts);
+ channel_flags = wl_radiotap_rx_channel_flags(rxsts);
+ flags = wl_radiotap_rx_flags(mac_header, rxsts);
+
+ rtl->basic.tsft_l = HTOL32(rxsts->mactime);
+ rtl->basic.tsft_h = 0;
+ rtl->basic.flags = flags;
+ rtl->basic.rate = (uint8)rxsts->datarate;
+ rtl->basic.channel_freq = (uint16)HTOL16(channel_frequency);
+ rtl->basic.channel_flags = HTOL16(channel_flags);
+ rtl->basic.signal = (int8)rxsts->signal;
+ rtl->basic.noise = (int8)rxsts->noise;
+ rtl->basic.antenna = (int8)rxsts->antenna;
+
+ return 0;
+}
+
+uint
+wl_radiotap_rx_ht(struct dot11_header *mac_header,
+ wl_rxsts_t *rxsts, ieee80211_radiotap_header_t *rtap_hdr)
+{
+ int channel_frequency;
+ uint16 channel_flags;
+ uint32 xchannel_flags;
+ uint8 flags;
+
+ uint16 rtap_len = LTOH16(rtap_hdr->it_len);
+ wl_radiotap_ht_t *rtht = (wl_radiotap_ht_t *)((uint8*)rtap_hdr + rtap_len);
+
+ rtap_len += sizeof(wl_radiotap_ht_t);
+ rtap_hdr->it_len = HTOL16(rtap_len);
+ rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_HT);
+
+ channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts);
+ channel_flags = wl_radiotap_rx_channel_flags(rxsts);
+ flags = wl_radiotap_rx_flags(mac_header, rxsts);
+
+ rtht->basic.tsft_l = HTOL32(rxsts->mactime);
+ rtht->basic.tsft_h = 0;
+ rtht->basic.flags = flags;
+ rtht->basic.channel_freq = (uint16)HTOL16(channel_frequency);
+ rtht->basic.channel_flags = HTOL16(channel_flags);
+ rtht->basic.signal = (int8)rxsts->signal;
+ rtht->basic.noise = (int8)rxsts->noise;
+ rtht->basic.antenna = (uint8)rxsts->antenna;
+
+ /* xchannel */
+ xchannel_flags = (uint32)channel_flags;
+ if (CHSPEC_IS40(rxsts->chanspec)) {
+ if (CHSPEC_SB_UPPER(rxsts->chanspec))
+ xchannel_flags |= IEEE80211_CHAN_HT40D;
+ else {
+ xchannel_flags |= IEEE80211_CHAN_HT40U;
+ }
+ } else {
+ xchannel_flags |= IEEE80211_CHAN_HT20;
+ }
+
+ rtht->xchannel_flags = HTOL32(xchannel_flags);
+ rtht->xchannel_freq = (uint16)HTOL16(channel_frequency);
+ rtht->xchannel_channel = wf_chspec_ctlchan(rxsts->chanspec);
+ rtht->xchannel_maxpower = (17*2);
+
+ /* add standard MCS */
+ rtht->mcs_known = (IEEE80211_RADIOTAP_MCS_HAVE_BW |
+ IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_FMT);
+
+ rtht->mcs_flags = 0;
+ switch (rxsts->htflags & WL_RXS_HTF_BW_MASK) {
+ case WL_RXS_HTF_20L:
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20L;
+ break;
+ case WL_RXS_HTF_20U:
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20U;
+ break;
+ case WL_RXS_HTF_40:
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_40;
+ break;
+ default:
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_BW_20;
+ }
+
+ if (rxsts->htflags & WL_RXS_HTF_SGI) {
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_SGI;
+ }
+ if (rxsts->preamble & WL_RXS_PREAMBLE_HT_GF) {
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ }
+ if (rxsts->htflags & WL_RXS_HTF_LDPC) {
+ rtht->mcs_flags |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
+ }
+ rtht->mcs_index = rxsts->mcs;
+ rtht->ampdu_flags = 0;
+ rtht->ampdu_delim_crc = 0;
+
+ rtht->ampdu_ref_num = rxsts->ampdu_counter;
+
+ if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) &&
+ !(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) {
+ rtht->ampdu_flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
+ } else {
+ rtht->ampdu_flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+ }
+ return 0;
+}
+
+uint
+wl_radiotap_rx_vht(struct dot11_header *mac_header,
+ wl_rxsts_t *rxsts, ieee80211_radiotap_header_t *rtap_hdr)
+{
+ int channel_frequency;
+ uint16 channel_flags;
+ uint8 flags;
+
+ uint16 rtap_len = LTOH16(rtap_hdr->it_len);
+ wl_radiotap_vht_t *rtvht = (wl_radiotap_vht_t *)((uint8*)rtap_hdr + rtap_len);
+
+ rtap_len += sizeof(wl_radiotap_vht_t);
+ rtap_hdr->it_len = HTOL16(rtap_len);
+ rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_VHT);
+
+ channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts);
+ channel_flags = wl_radiotap_rx_channel_flags(rxsts);
+ flags = wl_radiotap_rx_flags(mac_header, rxsts);
+
+ rtvht->basic.tsft_l = HTOL32(rxsts->mactime);
+ rtvht->basic.tsft_h = 0;
+ rtvht->basic.flags = flags;
+ rtvht->basic.channel_freq = (uint16)HTOL16(channel_frequency);
+ rtvht->basic.channel_flags = HTOL16(channel_flags);
+ rtvht->basic.signal = (int8)rxsts->signal;
+ rtvht->basic.noise = (int8)rxsts->noise;
+ rtvht->basic.antenna = (uint8)rxsts->antenna;
+
+ rtvht->vht_known = (IEEE80211_RADIOTAP_VHT_HAVE_STBC |
+ IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS |
+ IEEE80211_RADIOTAP_VHT_HAVE_GI |
+ IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA |
+ IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA |
+ IEEE80211_RADIOTAP_VHT_HAVE_BF |
+ IEEE80211_RADIOTAP_VHT_HAVE_BW |
+ IEEE80211_RADIOTAP_VHT_HAVE_GID |
+ IEEE80211_RADIOTAP_VHT_HAVE_PAID);
+
+ STATIC_ASSERT(WL_RXS_VHTF_STBC ==
+ IEEE80211_RADIOTAP_VHT_STBC);
+ STATIC_ASSERT(WL_RXS_VHTF_TXOP_PS ==
+ IEEE80211_RADIOTAP_VHT_TXOP_PS);
+ STATIC_ASSERT(WL_RXS_VHTF_SGI ==
+ IEEE80211_RADIOTAP_VHT_SGI);
+ STATIC_ASSERT(WL_RXS_VHTF_SGI_NSYM_DA ==
+ IEEE80211_RADIOTAP_VHT_SGI_NSYM_DA);
+ STATIC_ASSERT(WL_RXS_VHTF_LDPC_EXTRA ==
+ IEEE80211_RADIOTAP_VHT_LDPC_EXTRA);
+ STATIC_ASSERT(WL_RXS_VHTF_BF ==
+ IEEE80211_RADIOTAP_VHT_BF);
+
+ rtvht->vht_flags = (uint8)HTOL16(rxsts->vhtflags);
+
+ STATIC_ASSERT(WL_RXS_VHT_BW_20 ==
+ IEEE80211_RADIOTAP_VHT_BW_20);
+ STATIC_ASSERT(WL_RXS_VHT_BW_40 ==
+ IEEE80211_RADIOTAP_VHT_BW_40);
+ STATIC_ASSERT(WL_RXS_VHT_BW_20L ==
+ IEEE80211_RADIOTAP_VHT_BW_20L);
+ STATIC_ASSERT(WL_RXS_VHT_BW_20U ==
+ IEEE80211_RADIOTAP_VHT_BW_20U);
+ STATIC_ASSERT(WL_RXS_VHT_BW_80 ==
+ IEEE80211_RADIOTAP_VHT_BW_80);
+ STATIC_ASSERT(WL_RXS_VHT_BW_40L ==
+ IEEE80211_RADIOTAP_VHT_BW_40L);
+ STATIC_ASSERT(WL_RXS_VHT_BW_40U ==
+ IEEE80211_RADIOTAP_VHT_BW_40U);
+ STATIC_ASSERT(WL_RXS_VHT_BW_20LL ==
+ IEEE80211_RADIOTAP_VHT_BW_20LL);
+ STATIC_ASSERT(WL_RXS_VHT_BW_20LU ==
+ IEEE80211_RADIOTAP_VHT_BW_20LU);
+ STATIC_ASSERT(WL_RXS_VHT_BW_20UL ==
+ IEEE80211_RADIOTAP_VHT_BW_20UL);
+ STATIC_ASSERT(WL_RXS_VHT_BW_20UU ==
+ IEEE80211_RADIOTAP_VHT_BW_20UU);
+
+ rtvht->vht_bw = rxsts->bw;
+
+ rtvht->vht_mcs_nss[0] = (rxsts->mcs << 4) |
+ (rxsts->nss & IEEE80211_RADIOTAP_VHT_NSS);
+ rtvht->vht_mcs_nss[1] = 0;
+ rtvht->vht_mcs_nss[2] = 0;
+ rtvht->vht_mcs_nss[3] = 0;
+
+ STATIC_ASSERT(WL_RXS_VHTF_CODING_LDCP ==
+ IEEE80211_RADIOTAP_VHT_CODING_LDPC);
+
+ rtvht->vht_coding = rxsts->coding;
+ rtvht->vht_group_id = rxsts->gid;
+ rtvht->vht_partial_aid = HTOL16(rxsts->aid);
+
+ rtvht->ampdu_flags = 0;
+ rtvht->ampdu_delim_crc = 0;
+ rtvht->ampdu_ref_num = HTOL32(rxsts->ampdu_counter);
+ if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) &&
+ !(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) {
+ rtvht->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_IS_LAST);
+ } else {
+ rtvht->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN);
+ }
+
+ return 0;
+}
+
+/* Rx status to radiotap conversion of HE type */
+uint
+wl_radiotap_rx_he(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t *rtap_hdr)
+{
+ int channel_frequency;
+ uint16 channel_flags;
+ uint8 flags;
+ uint16 rtap_len = LTOH16(rtap_hdr->it_len);
+ wl_radiotap_he_t *rthe = (wl_radiotap_he_t *)((uint8*)rtap_hdr + rtap_len);
+
+ rtap_len += sizeof(wl_radiotap_he_t);
+ rtap_hdr->it_len = HTOL16(rtap_len);
+ rtap_hdr->it_present |= HTOL32(WL_RADIOTAP_PRESENT_HE);
+
+ channel_frequency = (uint16)wl_radiotap_rx_channel_frequency(rxsts);
+ channel_flags = wl_radiotap_rx_channel_flags(rxsts);
+ flags = wl_radiotap_rx_flags(mac_header, rxsts);
+
+ rthe->basic.tsft_l = HTOL32(rxsts->mactime);
+ rthe->basic.tsft_h = 0;
+ rthe->basic.flags = flags;
+ rthe->basic.channel_freq = (uint16)HTOL16(channel_frequency);
+ rthe->basic.channel_flags = HTOL16(channel_flags);
+ rthe->basic.signal = (int8)rxsts->signal;
+ rthe->basic.noise = (int8)rxsts->noise;
+ rthe->basic.antenna = (uint8)rxsts->antenna;
+
+ rthe->ampdu_flags = 0;
+ rthe->ampdu_delim_crc = 0;
+ rthe->ampdu_ref_num = HTOL32(rxsts->ampdu_counter);
+ if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_FIRST) &&
+ !(rxsts->nfrmtype & WL_RXS_NFRM_AMPDU_SUB)) {
+ rthe->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_IS_LAST);
+ } else {
+ rthe->ampdu_flags |= HTOL16(IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN);
+ }
+
+ rthe->data1 = HTOL16(rxsts->data1);
+ rthe->data2 = HTOL16(rxsts->data2);
+ rthe->data3 = HTOL16(rxsts->data3);
+ rthe->data4 = HTOL16(rxsts->data4);
+ rthe->data5 = HTOL16(rxsts->data5);
+ rthe->data6 = HTOL16(rxsts->data6);
+
+ return 0;
+}
+
+/* Rx status to radiotap conversion of EHT type */
+uint
+wl_radiotap_rx_eht(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t *rtap_hdr)
+{
+ ASSERT(!"wl_radiotap_rx_eht: not implemented!");
+ return 0;
+}
+
+uint16
+wl_rxsts_to_rtap(monitor_pkt_rxsts_t *pkt_rxsts, void *payload,
+ uint16 len, void* pout, uint16 pad_req)
+{
+ uint16 rtap_len = 0;
+ struct dot11_header* mac_header;
+ uint8* p = payload;
+ ieee80211_radiotap_header_t* rtap_hdr = (ieee80211_radiotap_header_t*)pout;
+ wl_rxsts_t* rxsts;
+
+ ASSERT(p && pkt_rxsts);
+ rxsts = pkt_rxsts->rxsts;
+ rtap_hdr->it_version = 0;
+ rtap_hdr->it_pad = 0;
+ rtap_hdr->it_len = HTOL16(sizeof(*rtap_hdr));
+ rtap_hdr->it_present = 0;
+ bitmap = 0;
+
+#ifdef MONITOR_DNGL_CONV
+ if (pad_req) {
+ radiotap_add_vendor_ns(rtap_hdr);
+ }
+#endif
+
+#ifdef BCM_MON_QDBM_RSSI
+ /* if per-core RSSI is present, add vendor element */
+ if (pkt_rxsts->corenum != 0) {
+ radiotap_add_vendor_ns(rtap_hdr);
+ }
+#endif
+ mac_header = (struct dot11_header *)(p);
+
+ if (rxsts->encoding == WL_RXS_ENCODING_EHT) {
+ wl_radiotap_rx_eht(mac_header, rxsts, rtap_hdr);
+ } else if (rxsts->encoding == WL_RXS_ENCODING_HE) {
+ wl_radiotap_rx_he(mac_header, rxsts, rtap_hdr);
+ } else if (rxsts->encoding == WL_RXS_ENCODING_VHT) {
+ wl_radiotap_rx_vht(mac_header, rxsts, rtap_hdr);
+ } else if (rxsts->encoding == WL_RXS_ENCODING_HT) {
+ wl_radiotap_rx_ht(mac_header, rxsts, rtap_hdr);
+ } else {
+ uint16 mask = ltoh16(mac_header->fc) & FC_KIND_MASK;
+ if (mask == FC_RTS || mask == FC_CTS) {
+ radiotap_add_vendor_ns(rtap_hdr);
+ }
+ wl_radiotap_rx_legacy(mac_header, rxsts, rtap_hdr);
+ if (mask == FC_RTS || mask == FC_CTS) {
+ radiotap_encode_bw_signaling(mask, rxsts, rtap_hdr);
+ }
+ }
+#ifdef BCM_MON_QDBM_RSSI
+ /* if per-core RSSI is present, add vendor element */
+ if (pkt_rxsts->corenum != 0) {
+ radiotap_encode_multi_rssi(pkt_rxsts, rtap_hdr);
+ }
+#endif
+#ifdef MONITOR_DNGL_CONV
+ if (pad_req) {
+ radiotap_encode_alignpad(rtap_hdr, pad_req);
+ }
+#endif
+ rtap_len = LTOH16(rtap_hdr->it_len);
+ len += rtap_len;
+
+#ifndef MONITOR_DNGL_CONV
+ if (len > MAX_MON_PKT_SIZE) {
+ return 0;
+ }
+ /* copy payload */
+ if (!(rxsts->nfrmtype & WL_RXS_NFRM_AMSDU_FIRST) &&
+ !(rxsts->nfrmtype & WL_RXS_NFRM_AMSDU_SUB)) {
+ memcpy((uint8*)pout + rtap_len, (uint8*)p, len - rtap_len);
+ }
+#endif
+#ifdef MONITOR_DNGL_CONV
+ return rtap_len;
+#else
+ return len;
+#endif
+}
+
+#ifdef BCM_MON_QDBM_RSSI
+void
+radiotap_encode_multi_rssi(monitor_pkt_rxsts_t* rxsts, ieee80211_radiotap_header_t *hdr)
+{
+ uint16 cur_len = LTOH16(hdr->it_len);
+ uint16 len = ROUNDUP(1 + rxsts->corenum * sizeof(monitor_pkt_rssi_t), 4);
+ int i = 0;
+ uint8 *vend_p = (uint8 *)hdr + cur_len;
+ radiotap_vendor_ns_t *vendor_ns = (radiotap_vendor_ns_t*)vend_p;
+ memcpy(vendor_ns->vend_oui, brcm_oui, sizeof(vendor_ns->vend_oui));
+ vendor_ns->sns = 1;
+ vendor_ns->skip_len = HTOL16(len);
+ vend_p += sizeof(*vendor_ns);
+ vend_p[0] = rxsts->corenum;
+ for (i = 0; i < rxsts->corenum; i++) {
+ vend_p[2*i + 1] = rxsts->rxpwr[i].dBm;
+ vend_p[2*i + 2] = rxsts->rxpwr[i].decidBm;
+ }
+ hdr->it_len = HTOL16(cur_len + sizeof(radiotap_vendor_ns_t) + len);
+}
+#endif /* BCM_CORE_RSSI */
+
+#ifdef MONITOR_DNGL_CONV
+#define AILIGN_4BYTES (4u)
+void
+radiotap_encode_alignpad(ieee80211_radiotap_header_t *hdr, uint16 pad_req)
+{
+ uint16 cur_len = LTOH16(hdr->it_len);
+ uint8 *vend_p = (uint8 *)hdr + cur_len;
+ radiotap_vendor_ns_t *vendor_ns = (radiotap_vendor_ns_t*)vend_p;
+ uint16 len;
+ uint16 align_pad = 0;
+
+ memcpy(vendor_ns->vend_oui, brcm_oui, sizeof(vendor_ns->vend_oui));
+ vendor_ns->sns = WL_RADIOTAP_BRCM_PAD_SNS;
+ len = cur_len + sizeof(radiotap_vendor_ns_t);
+ if (len % AILIGN_4BYTES != 0) {
+ align_pad = (AILIGN_4BYTES - (len % AILIGN_4BYTES));
+ }
+ hdr->it_len = HTOL16(len + pad_req + align_pad);
+ vendor_ns->skip_len = HTOL16(pad_req + align_pad);
+}
+#endif /* MONITOR_DNGL_CONV */
+
+void
+radiotap_encode_bw_signaling(uint16 mask,
+ struct wl_rxsts* rxsts, ieee80211_radiotap_header_t *hdr)
+{
+ uint16 cur_len = LTOH16(hdr->it_len);
+ uint8 *vend_p = (uint8 *)hdr + cur_len;
+ radiotap_vendor_ns_t *vendor_ns = (radiotap_vendor_ns_t *)vend_p;
+ wl_radiotap_nonht_vht_t* nonht_vht;
+
+ memcpy(vendor_ns->vend_oui, brcm_oui, sizeof(vendor_ns->vend_oui));
+ vendor_ns->sns = 0;
+ vendor_ns->skip_len = sizeof(wl_radiotap_nonht_vht_t);
+ nonht_vht = (wl_radiotap_nonht_vht_t *)(vend_p + sizeof(*vendor_ns));
+
+ /* VHT b/w signalling */
+ bzero((uint8 *)nonht_vht, sizeof(wl_radiotap_nonht_vht_t));
+ nonht_vht->len = WL_RADIOTAP_NONHT_VHT_LEN;
+ nonht_vht->flags |= WL_RADIOTAP_F_NONHT_VHT_BW;
+ nonht_vht->bw = (uint8)rxsts->bw_nonht;
+
+ if (mask == FC_RTS) {
+ if (rxsts->vhtflags & WL_RXS_VHTF_DYN_BW_NONHT) {
+ nonht_vht->flags |= WL_RADIOTAP_F_NONHT_VHT_DYN_BW;
+ }
+ }
+ hdr->it_len = HTOL16(cur_len + sizeof(radiotap_vendor_ns_t) +
+ sizeof(wl_radiotap_nonht_vht_t));
+}
+
+void
+radiotap_add_vendor_ns(ieee80211_radiotap_header_t *hdr)
+{
+
+ uint32 * it_present = &hdr->it_present;
+ uint16 len = LTOH16(hdr->it_len);
+
+ /* if the last bitmap has a vendor ns, add a new one */
+ if (it_present[bitmap] & (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) {
+ it_present[bitmap] |= 1 << IEEE80211_RADIOTAP_EXT;
+ bitmap++;
+ /* align to 8 bytes */
+ if (bitmap%2) {
+ hdr->it_len = HTOL16(len + 8);
+ }
+ it_present[bitmap] = 1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
+ } else {
+ it_present[bitmap] |= 1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
+ }
+}
diff --git a/bcmdhd.101.10.361.x/bcmwifi_rates.c b/bcmdhd.101.10.361.x/bcmwifi_rates.c
new file mode 100755
index 0000000..c5cebbf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmwifi_rates.c
@@ -0,0 +1,607 @@
+/*
+ * Common [OS-independent] rate management
+ * 802.11 Networking Adapter Device Driver.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#include <typedefs.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#else
+#include <assert.h>
+#ifndef ASSERT
+#define ASSERT(e) assert(e)
+#endif
+#ifndef ASSERT_FP
+#define ASSERT_FP(e) assert(e)
+#endif
+#endif /* BCMDRIVER */
+#include <802.11.h>
+#include <802.11ax.h>
+#include <bcmutils.h>
+
+#include <bcmwifi_rspec.h>
+#include <bcmwifi_rates.h>
+
+/* TODO: Consolidate rate utility functions from wlc_rate.c and bcmwifi_monitor.c
+ * into here if they're shared by non wl layer as well...
+ */
+
+/* ============================================ */
+/* Moved from wlc_rate.c */
+/* ============================================ */
+
+/* HE mcs info */
+struct ieee_80211_mcs_rate_info {
+ uint8 constellation_bits;
+ uint8 coding_q;
+ uint8 coding_d;
+ uint8 dcm_capable; /* 1 if dcm capable */
+};
+
+static const struct ieee_80211_mcs_rate_info wlc_mcs_info[] = {
+ { 1, 1, 2, 1 }, /* MCS 0: MOD: BPSK, CR 1/2, dcm capable */
+ { 2, 1, 2, 1 }, /* MCS 1: MOD: QPSK, CR 1/2, dcm capable */
+ { 2, 3, 4, 0 }, /* MCS 2: MOD: QPSK, CR 3/4, NOT dcm capable */
+ { 4, 1, 2, 1 }, /* MCS 3: MOD: 16QAM, CR 1/2, dcm capable */
+ { 4, 3, 4, 1 }, /* MCS 4: MOD: 16QAM, CR 3/4, dcm capable */
+ { 6, 2, 3, 0 }, /* MCS 5: MOD: 64QAM, CR 2/3, NOT dcm capable */
+ { 6, 3, 4, 0 }, /* MCS 6: MOD: 64QAM, CR 3/4, NOT dcm capable */
+ { 6, 5, 6, 0 }, /* MCS 7: MOD: 64QAM, CR 5/6, NOT dcm capable */
+ { 8, 3, 4, 0 }, /* MCS 8: MOD: 256QAM, CR 3/4, NOT dcm capable */
+ { 8, 5, 6, 0 }, /* MCS 9: MOD: 256QAM, CR 5/6, NOT dcm capable */
+ { 10, 3, 4, 0 }, /* MCS 10: MOD: 1024QAM, CR 3/4, NOT dcm capable */
+ { 10, 5, 6, 0 }, /* MCS 11: MOD: 1024QAM, CR 5/6, NOT dcm capable */
+#ifdef WL11BE
+ /* TODO: for now EHT shares this table with HE,
+ * create a new table if needed once we know more
+ * about EHT rate calculation...
+ */
+ { 12, 3, 4, 0 }, /* MCS 12: MOD: 4096QAM, CR 3/4, NOT dcm capable */
+ { 12, 5, 6, 0 }, /* MCS 13: MOD: 4096QAM, CR 5/6, NOT dcm capable */
+#endif
+};
+
+/* Nsd values Draft0.4 Table 26.63 onwards */
+static const uint wlc_he_nsd[] = {
+ 234, /* BW20 */
+ 468, /* BW40 */
+ 980, /* BW80 */
+ 1960, /* BW160 */
+#ifdef WL11BE
+ /* TODO: for now EHT shares this table with HE,
+ * create a new table if needed once we know more
+ * about EHT rate calculation...
+ */
+ 2940, /* BW240 */
+ 3920 /* BW320 */
+#endif
+};
+
+/* Nsd values Draft3.3 Table 28-15 */
+static const uint wlc_he_ru_nsd[] = {
+ 24, /* 26T */
+ 48, /* 52T */
+ 102, /* 106T */
+ 234, /* 242T/BW20 */
+ 468, /* 484T/BW40 */
+ 980, /* 996T/BW80 */
+ 1960, /* 2*996T/BW160 */
+#ifdef WL11BE
+ /* TODO: for now EHT shares this table with HE,
+ * create a new table if needed once we know more
+ * about EHT rate calculation...
+ */
+ 2940, /* 3*996T/BW240 */
+ 3920 /* 4*996T/BW320 */
+#endif
+};
+
+#define HE_RU_TO_NSD(ru_idx) \
+ (ru_idx < ARRAYSIZE(wlc_he_ru_nsd)) ? \
+ wlc_he_ru_nsd[ru_idx] : 0
+
+/* sym_len = 12.8 us. For calculation purpose, *10 */
+#define HE_SYM_LEN_FACTOR (128)
+
+/* GI values = 0.8 , 1.6 or 3.2 us. For calculation purpose, *10 */
+#define HE_GI_800us_FACTOR (8)
+#define HE_GI_1600us_FACTOR (16)
+#define HE_GI_3200us_FACTOR (32)
+
+/* To avoid ROM invalidation use the old macro as is... */
+#ifdef WL11BE
+#define HE_BW_TO_NSD(bwi) \
+ ((bwi) > 0u && (bwi) <= ARRAYSIZE(wlc_he_nsd)) ? \
+ wlc_he_nsd[(bwi) - 1u] : 0u
+#else
+#define HE_BW_TO_NSD(bwi) \
+ ((bwi) > 0 && ((bwi) << WL_RSPEC_BW_SHIFT) <= WL_RSPEC_BW_160MHZ) ? \
+ wlc_he_nsd[(bwi)-1] : 0
+#endif /* WL11BE */
+
+#define ksps 250 /* kilo symbols per sec, 4 us sym */
+
+#ifdef WL11BE
+/* Table "wlc_nsd" is derived from HT and VHT #defines below, but extended for HE
+ * for rate calculation purpose at a given NSS and bandwidth combination.
+ *
+ * It should and can only be used in where it wants to know the relative rate in kbps
+ * for a different NSS and bandwidth combination at a given mcs e.g. in fallback rate
+ * search. It shouldn not and can not be used in where it calculates the absolute rate
+ * i.e. the result doesn't agree with what the spec says otherwise.
+ *
+ * See Std 802.11-2016 "Table 21-61 VHT-MCSs for optional 160 MHz and 80+80 MHz, NSS = 8"
+ * for VHT, and P802.11ax/D6.0 "Table 27-111 HE-MCSs for 2x996-tone RU, NSS = 8" for HE,
+ * for 160Mhz bandwidth for resulting rate comparison.
+ *
+ * It's again extended for EHT 240/320Mhz bandwidth, for the same purpose.
+ */
+static const uint16 wlc_nsd[] = {
+ 52, /* 20MHz */
+ 108, /* 40MHz */
+ 234, /* 80Mhz */
+ 468, /* 160MHz */
+ 702, /* 240MHz */
+ 936, /* 320MHz */
+};
+
+#define BW_TO_NSD(bwi) \
+ ((bwi) > 0u && (bwi) <= ARRAYSIZE(wlc_nsd)) ? \
+ wlc_nsd[(bwi) - 1u] : 0u
+
+static uint
+wf_nsd2ndbps(uint mcs, uint nss, uint nsd, bool dcm)
+{
+ uint Ndbps;
+
+ /* multiply number of spatial streams,
+ * bits per number from the constellation,
+ * and coding quotient
+ */
+ Ndbps = nsd * nss *
+ wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits;
+
+ /* adjust for the coding rate divisor */
+ Ndbps = Ndbps / wlc_mcs_info[mcs].coding_d;
+
+ /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */
+ if (dcm) {
+ if (wlc_mcs_info[mcs].dcm_capable) {
+ Ndbps >>= 1u;
+ }
+ }
+
+ return Ndbps;
+}
+#else
+/* for HT and VHT? */
+#define Nsd_20MHz 52
+#define Nsd_40MHz 108
+#define Nsd_80MHz 234
+#define Nsd_160MHz 468
+#endif /* WL11BE */
+
+uint
+wf_he_mcs_to_Ndbps(uint mcs, uint nss, uint bw, bool dcm)
+{
+ uint Nsd;
+ uint Ndbps;
+
+ /* find the number of complex numbers per symbol */
+ Nsd = HE_BW_TO_NSD(bw >> WL_RSPEC_BW_SHIFT);
+
+#ifdef WL11BE
+ Ndbps = wf_nsd2ndbps(mcs, nss, Nsd, dcm);
+#else
+ /* multiply number of spatial streams,
+ * bits per number from the constellation,
+ * and coding quotient
+ */
+ Ndbps = Nsd * nss *
+ wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits;
+
+ /* adjust for the coding rate divisor */
+ Ndbps = Ndbps / wlc_mcs_info[mcs].coding_d;
+
+ /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */
+ if (dcm) {
+ if (wlc_mcs_info[mcs].dcm_capable) {
+ Ndbps >>= 1;
+ }
+ }
+#endif /* WL11BE */
+
+ return Ndbps;
+}
+
+uint32
+wf_he_mcs_ru_to_ndbps(uint8 mcs, uint8 nss, bool dcm, uint8 ru_index)
+{
+ uint32 nsd;
+ uint32 ndbps;
+
+ /* find the number of complex numbers per symbol */
+ nsd = HE_RU_TO_NSD(ru_index);
+
+#ifdef WL11BE
+ ndbps = wf_nsd2ndbps(mcs, nss, nsd, dcm);
+#else
+ /* multiply number of spatial streams,
+ * bits per number from the constellation,
+ * and coding quotient
+ * Ndbps = Nss x Nsd x (Nbpscs x R) x (DCM/2)
+ */
+ ndbps = nsd * nss *
+ wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits;
+
+ /* adjust for the coding rate divisor */
+ ndbps = ndbps / wlc_mcs_info[mcs].coding_d;
+
+ /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */
+ if (dcm && wlc_mcs_info[mcs].dcm_capable) {
+ ndbps >>= 1;
+ }
+#endif /* WL11BE */
+ return ndbps;
+}
+
+/**
+ * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi/dcm combination.
+ * 'mcs' : a *single* spatial stream MCS (11ax)
+ * formula as per http:
+ * WLAN&preview=/323036249/344457953/11ax_rate_table.xlsx
+ * Symbol length = 12.8 usec [given as sym_len/10 below]
+ * GI value = 0.8 or 1.6 or 3.2 usec [given as GI_value/10 below]
+ * rate (Kbps) = (Nsd * Nbpscs * nss * (coding_q/coding_d) * 1000) / ((sym_len/10) + (GI_value/10))
+ * Note that, for calculation purpose, following is used. [to be careful with overflows]
+ * rate (Kbps) = (Nsd * Nbpscs * nss * (coding_q/coding_d) * 1000) / ((sym_len + GI_value) / 10)
+ * rate (Kbps) = (Nsd * Nbpscs * nss * (coding_q/coding_d) * 1000) / (sym_len + GI_value) * 10
+ */
+uint
+wf_he_mcs_to_rate(uint mcs, uint nss, uint bw, uint gi, bool dcm)
+{
+ uint rate;
+ uint rate_deno;
+
+ rate = HE_BW_TO_NSD(bw >> WL_RSPEC_BW_SHIFT);
+
+#ifdef WL11BE
+ rate = wf_nsd2ndbps(mcs, nss, rate, dcm);
+#else
+ /* Nbpscs: multiply by bits per number from the constellation in use */
+ rate = rate * wlc_mcs_info[mcs].constellation_bits;
+
+ /* Nss: adjust for the number of spatial streams */
+ rate = rate * nss;
+
+ /* R: adjust for the coding rate given as a quotient and divisor */
+ rate = (rate * wlc_mcs_info[mcs].coding_q) / wlc_mcs_info[mcs].coding_d;
+
+ /* take care of dcm: dcm divides R by 2. If not dcm mcs, ignore */
+ if (dcm) {
+ if (wlc_mcs_info[mcs].dcm_capable) {
+ rate >>= 1;
+ }
+ }
+#endif /* WL11BE */
+
+ /* add sym len factor */
+ rate_deno = HE_SYM_LEN_FACTOR;
+
+ /* get GI for denominator */
+ if (HE_IS_GI_3_2us(gi)) {
+ rate_deno += HE_GI_3200us_FACTOR;
+ } else if (HE_IS_GI_1_6us(gi)) {
+ rate_deno += HE_GI_1600us_FACTOR;
+ } else {
+ /* assuming HE_GI_0_8us */
+ rate_deno += HE_GI_800us_FACTOR;
+ }
+
+ /* as per above formula */
+ rate *= 1000; /* factor of 10. *100 to accommodate 2 places */
+ rate /= rate_deno;
+ rate *= 10; /* *100 was already done above. Splitting is done to avoid overflow. */
+
+ return rate;
+}
+
+uint
+wf_mcs_to_Ndbps(uint mcs, uint nss, uint bw)
+{
+ uint Nsd;
+ uint Ndbps;
+
+ /* This calculation works for 11n HT and 11ac VHT if the HT mcs values
+ * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8.
+ * That is, HT MCS 23 is a base MCS = 7, Nss = 3
+ */
+
+ /* find the number of complex numbers per symbol */
+#ifdef WL11BE
+ Nsd = BW_TO_NSD(bw >> WL_RSPEC_BW_SHIFT);
+
+ Ndbps = wf_nsd2ndbps(mcs, nss, Nsd, FALSE);
+#else
+ if (bw == WL_RSPEC_BW_20MHZ) {
+ Nsd = Nsd_20MHz;
+ } else if (bw == WL_RSPEC_BW_40MHZ) {
+ Nsd = Nsd_40MHz;
+ } else if (bw == WL_RSPEC_BW_80MHZ) {
+ Nsd = Nsd_80MHz;
+ } else if (bw == WL_RSPEC_BW_160MHZ) {
+ Nsd = Nsd_160MHz;
+ } else {
+ Nsd = 0;
+ }
+
+ /* multiply number of spatial streams,
+ * bits per number from the constellation,
+ * and coding quotient
+ */
+ Ndbps = Nsd * nss *
+ wlc_mcs_info[mcs].coding_q * wlc_mcs_info[mcs].constellation_bits;
+
+ /* adjust for the coding rate divisor */
+ Ndbps = Ndbps / wlc_mcs_info[mcs].coding_d;
+#endif /* WL11BE */
+
+ return Ndbps;
+}
+
+/**
+ * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination.
+ * 'mcs' : a *single* spatial stream MCS (11n or 11ac)
+ */
+uint
+wf_mcs_to_rate(uint mcs, uint nss, uint bw, int sgi)
+{
+ uint rate;
+
+ if (mcs == 32) {
+ /* just return fixed values for mcs32 instead of trying to parametrize */
+ rate = (sgi == 0) ? 6000 : 6778;
+ } else {
+ /* This calculation works for 11n HT, 11ac VHT and 11ax HE if the HT mcs values
+ * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8.
+ * That is, HT MCS 23 is a base MCS = 7, Nss = 3
+ */
+
+#if defined(WLPROPRIETARY_11N_RATES)
+ switch (mcs) {
+ case 87:
+ mcs = 8; /* MCS 8: MOD: 256QAM, CR 3/4 */
+ break;
+ case 88:
+ mcs = 9; /* MCS 9: MOD: 256QAM, CR 5/6 */
+ break;
+ default:
+ break;
+ }
+#endif /* WLPROPRIETARY_11N_RATES */
+
+#ifdef WL11BE
+ rate = wf_mcs_to_Ndbps(mcs, nss, bw);
+#else
+ /* find the number of complex numbers per symbol */
+ if (RSPEC_IS20MHZ(bw)) {
+ /* 4360 TODO: eliminate Phy const in rspec bw, then just compare
+ * as in 80 and 160 case below instead of RSPEC_IS20MHZ(bw)
+ */
+ rate = Nsd_20MHz;
+ } else if (RSPEC_IS40MHZ(bw)) {
+ /* 4360 TODO: eliminate Phy const in rspec bw, then just compare
+ * as in 80 and 160 case below instead of RSPEC_IS40MHZ(bw)
+ */
+ rate = Nsd_40MHz;
+ } else if (bw == WL_RSPEC_BW_80MHZ) {
+ rate = Nsd_80MHz;
+ } else if (bw == WL_RSPEC_BW_160MHZ) {
+ rate = Nsd_160MHz;
+ } else {
+ rate = 0;
+ }
+
+ /* multiply by bits per number from the constellation in use */
+ rate = rate * wlc_mcs_info[mcs].constellation_bits;
+
+ /* adjust for the number of spatial streams */
+ rate = rate * nss;
+
+ /* adjust for the coding rate given as a quotient and divisor */
+ rate = (rate * wlc_mcs_info[mcs].coding_q) / wlc_mcs_info[mcs].coding_d;
+#endif /* WL11BE */
+
+ /* multiply by Kilo symbols per sec to get Kbps */
+ rate = rate * ksps;
+
+ /* adjust the symbols per sec for SGI
+ * symbol duration is 4 us without SGI, and 3.6 us with SGI,
+ * so ratio is 10 / 9
+ */
+ if (sgi) {
+ /* add 4 for rounding of division by 9 */
+ rate = ((rate * 10) + 4) / 9;
+ }
+ }
+
+ return rate;
+} /* wf_mcs_to_rate */
+
+/* This function needs update to handle MU frame PLCP as well (MCS is conveyed via VHT-SIGB
+ * field in case of MU frames). Currently this support needs to be added in uCode to communicate
+ * MCS information for an MU frame
+ *
+ * For VHT frame:
+ * bit 0-3 mcs index
+ * bit 6-4 nsts for VHT
+ * bit 7: 1 for VHT
+ * Note: bit 7 is used to indicate to the rate sel the mcs is a non HT mcs!
+ *
+ * Essentially it's the NSS:MCS portions of the rspec
+ */
+uint8
+wf_vht_plcp_to_rate(uint8 *plcp)
+{
+ uint8 rate, gid;
+ uint nss;
+ uint32 plcp0 = plcp[0] + (plcp[1] << 8); /* don't need plcp[2] */
+
+ gid = (plcp0 & VHT_SIGA1_GID_MASK) >> VHT_SIGA1_GID_SHIFT;
+ if (gid > VHT_SIGA1_GID_TO_AP && gid < VHT_SIGA1_GID_NOT_TO_AP) {
+ /* for MU packet we hacked Signal Tail field in VHT-SIG-A2 to save nss and mcs,
+ * copy from murate in d11 rx header.
+ * nss = bit 18:19 (for 11ac 2 bits to indicate maximum 4 nss)
+ * mcs = 20:23
+ */
+ rate = (plcp[5] & 0xF0) >> 4;
+ nss = ((plcp[5] & 0x0C) >> 2) + 1;
+ } else {
+ rate = (plcp[3] >> VHT_SIGA2_MCS_SHIFT);
+ nss = ((plcp0 & VHT_SIGA1_NSTS_SHIFT_MASK_USER0) >>
+ VHT_SIGA1_NSTS_SHIFT) + 1;
+ if (plcp0 & VHT_SIGA1_STBC)
+ nss = nss >> 1;
+ }
+ rate |= ((nss << WL_RSPEC_VHT_NSS_SHIFT) | WF_NON_HT_MCS);
+
+ return rate;
+}
+
+/**
+ * Function for computing NSS:MCS from HE SU PLCP or
+ * MCS:LTF-GI from HE MU PLCP
+ *
+ * based on rev3.10 :
+ * https://docs.google.com/spreadsheets/d/
+ * 1eP6ZCRrtnF924ds1R-XmbcH0IdQ0WNJpS1-FHmWeb9g/edit#gid=1492656555
+ *
+ * For HE SU frame:
+ * bit 0-3 mcs index
+ * bit 6-4 nsts for HE
+ * bit 7: 1 for HE
+ * Note: bit 7 is used to indicate to the rate sel the mcs is a non HT mcs!
+ * Essentially it's the NSS:MCS portions of the rspec
+ *
+ * For HE MU frame:
+ * bit 0-3 mcs index
+ * bit 4-5 LTF-GI value
+ * bit 6 STBC
+ * Essentially it's the MCS and LTF-GI portion of the rspec
+ */
+/* Macros to be used for calculating rate from PLCP */
+#define HE_SU_PLCP2RATE_MCS_MASK 0x0F
+#define HE_SU_PLCP2RATE_MCS_SHIFT 0
+#define HE_SU_PLCP2RATE_NSS_MASK 0x70
+#define HE_SU_PLCP2RATE_NSS_SHIFT 4
+#define HE_MU_PLCP2RATE_LTF_GI_MASK 0x30
+#define HE_MU_PLCP2RATE_LTF_GI_SHIFT 4
+#define HE_MU_PLCP2RATE_STBC_MASK 0x40
+#define HE_MU_PLCP2RATE_STBC_SHIFT 6
+
+uint8
+wf_he_plcp_to_rate(uint8 *plcp, bool is_mu)
+{
+ uint8 rate = 0;
+ uint8 nss = 0;
+ uint32 plcp0 = 0;
+ uint32 plcp1 = 0;
+ uint8 he_ltf_gi;
+ uint8 stbc;
+
+ ASSERT(plcp);
+
+ BCM_REFERENCE(nss);
+ BCM_REFERENCE(he_ltf_gi);
+
+ plcp0 = ((plcp[3] << 24) | (plcp[2] << 16) | (plcp[1] << 8) | plcp[0]);
+ plcp1 = ((plcp[5] << 8) | plcp[4]);
+
+ if (!is_mu) {
+ /* For SU frames return rate in MCS:NSS format */
+ rate = ((plcp0 & HE_SU_RE_SIGA_MCS_MASK) >> HE_SU_RE_SIGA_MCS_SHIFT);
+ nss = ((plcp0 & HE_SU_RE_SIGA_NSTS_MASK) >> HE_SU_RE_SIGA_NSTS_SHIFT) + 1;
+ rate |= ((nss << HE_SU_PLCP2RATE_NSS_SHIFT) | WF_NON_HT_MCS);
+ } else {
+ /* For MU frames return rate in MCS:LTF-GI format */
+ rate = (plcp0 & HE_MU_SIGA_SIGB_MCS_MASK) >> HE_MU_SIGA_SIGB_MCS_SHIFT;
+ he_ltf_gi = (plcp0 & HE_MU_SIGA_GI_LTF_MASK) >> HE_MU_SIGA_GI_LTF_SHIFT;
+ stbc = (plcp1 & HE_MU_SIGA_STBC_MASK) >> HE_MU_SIGA_STBC_SHIFT;
+
+ /* LTF-GI shall take the same position as NSS */
+ rate |= (he_ltf_gi << HE_MU_PLCP2RATE_LTF_GI_SHIFT);
+
+ /* STBC needs to be filled in bit 6 */
+ rate |= (stbc << HE_MU_PLCP2RATE_STBC_SHIFT);
+ }
+
+ return rate;
+}
+
+/**
+ * Function for computing NSS:MCS from EHT SU PLCP or
+ * MCS:LTF-GI from EHT MU PLCP
+ *
+ * TODO: add link to the HW spec.
+ * FIXME: do we really need to support mu?
+ */
+uint8
+wf_eht_plcp_to_rate(uint8 *plcp, bool is_mu)
+{
+ BCM_REFERENCE(plcp);
+ BCM_REFERENCE(is_mu);
+ ASSERT(!"wf_eht_plcp_to_rate: not implemented!");
+ return 0;
+}
+
+/* ============================================ */
+/* Moved from wlc_rate_def.c */
+/* ============================================ */
+
+/**
+ * Some functions require a single stream MCS as an input parameter. Given an MCS, this function
+ * returns the single spatial stream MCS equivalent.
+ */
+uint8
+wf_get_single_stream_mcs(uint mcs)
+{
+ if (mcs < 32) {
+ return mcs % 8;
+ }
+ switch (mcs) {
+ case 32:
+ return 32;
+ case 87:
+ case 99:
+ case 101:
+ return 87; /* MCS 87: SS 1, MOD: 256QAM, CR 3/4 */
+ default:
+ return 88; /* MCS 88: SS 1, MOD: 256QAM, CR 5/6 */
+ }
+}
+
+/* ============================================ */
+/* Moved from wlc_phy_iovar.c */
+/* ============================================ */
+
+const uint8 plcp_ofdm_rate_tbl[] = {
+ DOT11_RATE_48M, /* 8: 48Mbps */
+ DOT11_RATE_24M, /* 9: 24Mbps */
+ DOT11_RATE_12M, /* A: 12Mbps */
+ DOT11_RATE_6M, /* B: 6Mbps */
+ DOT11_RATE_54M, /* C: 54Mbps */
+ DOT11_RATE_36M, /* D: 36Mbps */
+ DOT11_RATE_18M, /* E: 18Mbps */
+ DOT11_RATE_9M /* F: 9Mbps */
+};
diff --git a/bcmdhd.101.10.361.x/bcmwifi_rspec.c b/bcmdhd.101.10.361.x/bcmwifi_rspec.c
new file mode 100755
index 0000000..e3c5957
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmwifi_rspec.c
@@ -0,0 +1,274 @@
+/*
+ * Common [OS-independent] rate management
+ * 802.11 Networking Adapter Device Driver.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <d11.h>
+#include <802.11ax.h>
+
+#include <bcmwifi_rspec.h>
+#include <bcmwifi_rates.h>
+
+/* TODO: Consolidate rspec utility functions from wlc_rate.c and bcmwifi_monitor.c
+ * into here if they're shared by non wl layer as well...
+ */
+
+/* ============================================ */
+/* Moved from wlc_rate.c */
+/* ============================================ */
+
+/**
+ * Returns the rate in [Kbps] units.
+ */
+static uint
+wf_he_rspec_to_rate(ratespec_t rspec, uint max_mcs, uint max_nss)
+{
+ uint mcs = (rspec & WL_RSPEC_HE_MCS_MASK);
+ uint nss = (rspec & WL_RSPEC_HE_NSS_MASK) >> WL_RSPEC_HE_NSS_SHIFT;
+ bool dcm = (rspec & WL_RSPEC_DCM) != 0;
+ uint bw = RSPEC_BW(rspec);
+ uint gi = RSPEC_HE_LTF_GI(rspec);
+
+ ASSERT(mcs <= max_mcs);
+ ASSERT(nss <= max_nss);
+
+ if (mcs > max_mcs) {
+ return 0;
+ }
+ BCM_REFERENCE(max_nss);
+
+ return wf_he_mcs_to_rate(mcs, nss, bw, gi, dcm);
+} /* wf_he_rspec_to_rate */
+
+/* take a well formed ratespec_t arg and return phy rate in [Kbps] units.
+ * 'rsel' indicates if the call comes from rate selection.
+ */
+static uint
+_wf_rspec_to_rate(ratespec_t rspec, bool rsel)
+{
+ uint rate = (uint)(-1);
+
+ if (RSPEC_ISLEGACY(rspec)) {
+ rate = 500 * RSPEC2RATE(rspec);
+ } else if (RSPEC_ISHT(rspec)) {
+ uint mcs = (rspec & WL_RSPEC_HT_MCS_MASK);
+
+ ASSERT_FP(mcs <= 32 || IS_PROPRIETARY_11N_MCS(mcs));
+
+ if (mcs == 32) {
+ rate = wf_mcs_to_rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec));
+ } else {
+#if defined(WLPROPRIETARY_11N_RATES)
+ uint nss = GET_11N_MCS_NSS(mcs);
+ mcs = wf_get_single_stream_mcs(mcs);
+#else /* this ifdef prevents ROM abandons */
+ uint nss = 1 + (mcs / 8);
+ mcs = mcs % 8;
+#endif /* WLPROPRIETARY_11N_RATES */
+
+ rate = wf_mcs_to_rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
+ }
+ } else if (RSPEC_ISVHT(rspec)) {
+ uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK);
+ uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
+
+ if (rsel) {
+ rate = wf_mcs_to_rate(mcs, nss, RSPEC_BW(rspec), 0);
+ } else {
+ ASSERT_FP(mcs <= WLC_MAX_VHT_MCS);
+ ASSERT_FP(nss <= 8);
+
+ rate = wf_mcs_to_rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
+ }
+ } else if (RSPEC_ISHE(rspec)) {
+ rate = wf_he_rspec_to_rate(rspec, WLC_MAX_HE_MCS, 8);
+ } else if (RSPEC_ISEHT(rspec)) {
+ rate = wf_he_rspec_to_rate(rspec, WLC_MAX_EHT_MCS, 16);
+ } else {
+ ASSERT(0);
+ }
+
+ return (rate == 0) ? (uint)(-1) : rate;
+}
+
+/* take a well formed ratespec_t 'rspec' and return phy rate in [Kbps] units */
+uint
+wf_rspec_to_rate(ratespec_t rspec)
+{
+ return _wf_rspec_to_rate(rspec, FALSE);
+}
+
+/* take a well formed ratespec_t 'rspec' and return phy rate in [Kbps] units,
+ * FOR RATE SELECTION ONLY, WHICH USES LEGACY, HT, AND VHT RATES, AND VHT MCS
+ * COULD BE BIGGER THAN WLC_MAX_VHT_MCS!
+ */
+uint
+wf_rspec_to_rate_rsel(ratespec_t rspec)
+{
+ return _wf_rspec_to_rate(rspec, TRUE);
+}
+
+#ifdef BCMDBG
+/* Return the rate in 500Kbps units if the rspec is legacy rate, assert otherwise */
+uint
+wf_rspec_to_rate_legacy(ratespec_t rspec)
+{
+ ASSERT(RSPEC_ISLEGACY(rspec));
+
+ return rspec & WL_RSPEC_LEGACY_RATE_MASK;
+}
+#endif
+
+/**
+ * Function for computing RSPEC from EHT PLCP
+ *
+ * TODO: add link to the HW spec.
+ */
+ratespec_t
+wf_eht_plcp_to_rspec(uint8 *plcp)
+{
+ ASSERT(!"wf_eht_plcp_to_rspec: not implemented!");
+ return 0;
+}
+
+/**
+ * Function for computing RSPEC from HE PLCP
+ *
+ * based on rev3.10 :
+ * https://docs.google.com/spreadsheets/d/
+ * 1eP6ZCRrtnF924ds1R-XmbcH0IdQ0WNJpS1-FHmWeb9g/edit#gid=1492656555
+ */
+ratespec_t
+wf_he_plcp_to_rspec(uint8 *plcp)
+{
+ uint8 rate;
+ uint8 nss;
+ uint8 bw;
+ uint8 gi;
+ ratespec_t rspec;
+
+ /* HE plcp - 6 B */
+ uint32 plcp0;
+ uint16 plcp1;
+
+ ASSERT(plcp);
+
+ plcp0 = ((plcp[3] << 24) | (plcp[2] << 16) | (plcp[1] << 8) | plcp[0]);
+ plcp1 = ((plcp[5] << 8) | plcp[4]);
+
+ /* TBD: only SU supported now */
+ rate = (plcp0 & HE_SU_RE_SIGA_MCS_MASK) >> HE_SU_RE_SIGA_MCS_SHIFT;
+ /* PLCP contains (NSTS - 1) while RSPEC stores NSTS */
+ nss = ((plcp0 & HE_SU_RE_SIGA_NSTS_MASK) >> HE_SU_RE_SIGA_NSTS_SHIFT) + 1;
+ rspec = HE_RSPEC(rate, nss);
+
+ /* GI info comes from CP/LTF */
+ gi = (plcp0 & HE_SU_RE_SIGA_GI_LTF_MASK) >> HE_SU_RE_SIGA_GI_LTF_SHIFT;
+ rspec |= HE_GI_TO_RSPEC(gi);
+
+ /* b19-b20 of plcp indicate bandwidth in the format (2-bit):
+ * 0 for 20M, 1 for 40M, 2 for 80M, and 3 for 80p80/160M
+ * SW store this BW in rspec format (3-bit):
+ * 1 for 20M, 2 for 40M, 3 for 80M, and 4 for 80p80/160M
+ */
+ bw = ((plcp0 & HE_SU_SIGA_BW_MASK) >> HE_SU_SIGA_BW_SHIFT) + 1;
+ rspec |= (bw << WL_RSPEC_BW_SHIFT);
+
+ if (plcp1 & HE_SU_RE_SIGA_BEAMFORM_MASK)
+ rspec |= WL_RSPEC_TXBF;
+ if (plcp1 & HE_SU_RE_SIGA_CODING_MASK)
+ rspec |= WL_RSPEC_LDPC;
+ if (plcp1 & HE_SU_RE_SIGA_STBC_MASK)
+ rspec |= WL_RSPEC_STBC;
+ if (plcp0 & HE_SU_RE_SIGA_DCM_MASK)
+ rspec |= WL_RSPEC_DCM;
+
+ return rspec;
+}
+
+ratespec_t
+wf_vht_plcp_to_rspec(uint8 *plcp)
+{
+ uint8 rate;
+ uint vht_sig_a1, vht_sig_a2;
+ ratespec_t rspec;
+
+ ASSERT(plcp);
+
+ rate = wf_vht_plcp_to_rate(plcp) & ~WF_NON_HT_MCS;
+
+ vht_sig_a1 = plcp[0] | (plcp[1] << 8);
+ vht_sig_a2 = plcp[3] | (plcp[4] << 8);
+
+ rspec = VHT_RSPEC((rate & WL_RSPEC_VHT_MCS_MASK),
+ (rate >> WL_RSPEC_VHT_NSS_SHIFT));
+#if ((((VHT_SIGA1_20MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_20MHZ) || \
+ (((VHT_SIGA1_40MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_40MHZ) || \
+ (((VHT_SIGA1_80MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_80MHZ) || \
+ (((VHT_SIGA1_160MHZ_VAL + 1) << WL_RSPEC_BW_SHIFT) != WL_RSPEC_BW_160MHZ))
+#error "VHT SIGA BW mapping to RSPEC BW needs correction"
+#endif
+ rspec |= ((vht_sig_a1 & VHT_SIGA1_160MHZ_VAL) + 1) << WL_RSPEC_BW_SHIFT;
+ if (vht_sig_a1 & VHT_SIGA1_STBC)
+ rspec |= WL_RSPEC_STBC;
+ if (vht_sig_a2 & VHT_SIGA2_GI_SHORT)
+ rspec |= WL_RSPEC_SGI;
+ if (vht_sig_a2 & VHT_SIGA2_CODING_LDPC)
+ rspec |= WL_RSPEC_LDPC;
+
+ return rspec;
+}
+
+ratespec_t
+wf_ht_plcp_to_rspec(uint8 *plcp)
+{
+ return HT_RSPEC(plcp[0] & MIMO_PLCP_MCS_MASK);
+}
+
+/* ============================================ */
+/* Moved from wlc_rate_def.c */
+/* ============================================ */
+
+/**
+ * Rate info per rate: tells for *pre* 802.11n rates whether a given rate is OFDM or not and its
+ * phy_rate value. Table index is a rate in [500Kbps] units, from 0 to 54Mbps.
+ * Contents of a table element:
+ * d[7] : 1=OFDM rate, 0=DSSS/CCK rate
+ * d[3:0] if DSSS/CCK rate:
+ * index into the 'M_RATE_TABLE_B' table maintained by ucode in shm
+ * d[3:0] if OFDM rate: encode rate per 802.11a-1999 sec 17.3.4.1, with lsb transmitted first.
+ * index into the 'M_RATE_TABLE_A' table maintained by ucode in shm
+ */
+/* Note: make this table 128 elements so the result of (rspec & 0x7f) can be safely
+ * used as the index into this table...
+ */
+const uint8 rate_info[128] = {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+/* 0 */ 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 10 */ 0x00, 0x37, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x00,
+/* 20 */ 0x00, 0x00, 0x6e, 0x00, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00,
+/* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x00,
+/* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 70 */ 0x00, 0x00, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 90 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00,
+/* 100 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c,
+/* ------------- guard ------------ */ 0x00,
+/* 110 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
diff --git a/bcmdhd.101.10.361.x/bcmwpa.c b/bcmdhd.101.10.361.x/bcmwpa.c
new file mode 100755
index 0000000..62738c1
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmwpa.c
@@ -0,0 +1,2648 @@
+/*
+ * bcmwpa.c - shared WPA-related functions
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+/* include wl driver config file if this file is compiled for driver */
+#ifdef BCMDRIVER
+#include <osl.h>
+/* HACK: this case for external supplicant use */
+#else
+#include <string.h>
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#else
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMEXTSUP */
+#endif /* BCMDRIVER */
+
+#include <ethernet.h>
+#include <eapol.h>
+#include <802.11.h>
+#include <wpa.h>
+#include <802.11r.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmwpa.h>
+#include <aeskeywrap.h>
+
+#include <bcmstdlib_s.h>
+
+#include <wlioctl.h>
+
+#include <bcmutils.h>
+#include <bcmwpa.h>
+#ifdef WL_OCV
+#include <bcm_ocv.h>
+#endif /* WL_OCV */
+
+#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK) || \
+ defined(WL_OKC) || defined(WLTDLS) || defined(GTKOE) || defined(WLHOSTFBT)
+#ifdef WLHOSTFBT
+#include <string.h>
+#endif
+#endif /* defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK) ||
+ * defined(WL_OKC) || defined(WLTDLS) || defined(GTKOE) || defined(WLHOSTFBT)
+ */
+
+/* prefix strings */
+#define PMK_NAME_PFX "PMK Name"
+#define FT_PTK_PFX "FT-PTK"
+#define FT_R0_PFX "FT-R0"
+#define FT_R0N_PFX "FT-R0N"
+#define FT_R1_PFX "FT-R1"
+#define FT_R1N_PFX "FT-R1N"
+#define WPA_PTK_PFX "Pairwise key expansion"
+#define TDLS_PMK_PFX "TDLS PMK"
+/* end prefix strings */
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+#define PRF_PREFIXES_NUM 5u
+
+typedef struct key_length_entry {
+ uint8 suite;
+ uint8 len;
+} key_length_entry_t;
+
+/* EAPOL key(PMK/KCK/KEK/TK) length lookup tables */
+static const key_length_entry_t eapol_pmk_len[] = {
+ {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_PMK_SHA384_LEN},
+ {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_PMK_SHA384_LEN},
+ {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_PMK_SHA384_LEN},
+ {0u, EAPOL_WPA_PMK_DEFAULT_LEN} /* default */
+};
+
+static const key_length_entry_t eapol_kck_mic_len[] = {
+ {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_KCK_MIC_SHA384_LEN},
+ {RSN_AKM_FILS_SHA256, 0u},
+ {RSN_AKM_FILS_SHA384, 0u},
+ {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_KCK_MIC_DEFAULT_LEN},
+ {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_KCK2_SHA384_LEN},
+ {RSN_AKM_OWE, EAPOL_WPA_KCK_MIC_DEFAULT_LEN},
+ {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_KCK_MIC_SHA384_LEN},
+ {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_KCK_MIC_SHA384_LEN},
+ {0u, EAPOL_WPA_KCK_MIC_DEFAULT_LEN} /* default */
+};
+
+static const key_length_entry_t eapol_kck_len[] = {
+ {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_KCK_SHA384_LEN},
+ {RSN_AKM_FILS_SHA256, 0u},
+ {RSN_AKM_FILS_SHA384, 0u},
+ {RSN_AKM_FBT_SHA256_FILS, 0u},
+ {RSN_AKM_FBT_SHA384_FILS, 0u},
+ {RSN_AKM_OWE, EAPOL_WPA_KCK_DEFAULT_LEN},
+ {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_KCK_SHA384_LEN},
+ {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_KCK_SHA384_LEN},
+ {0u, EAPOL_WPA_KCK_DEFAULT_LEN} /* default */
+};
+
+static const key_length_entry_t eapol_kek_len[] = {
+ {RSN_AKM_FILS_SHA384, EAPOL_WPA_ENCR_KEY_MAX_LEN},
+ {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_ENCR_KEY_MAX_LEN},
+ {RSN_AKM_SUITEB_SHA384_1X, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2},
+ {RSN_AKM_FILS_SHA256, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2},
+ {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2},
+ {RSN_AKM_OWE, EAPOL_WPA_ENCR_KEY_DEFAULT_LEN},
+ {RSN_AKM_FBT_SHA384_1X, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2},
+ {RSN_AKM_FBT_SHA384_PSK, EAPOL_WPA_ENCR_KEY_MAX_LEN / 2},
+ {0u, EAPOL_WPA_ENCR_KEY_DEFAULT_LEN} /* default */
+};
+
+static const key_length_entry_t eapol_tk_len[] = {
+ {WPA_CIPHER_CCMP_256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN},
+ {WPA_CIPHER_AES_GCM256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN},
+ {WPA_CIPHER_BIP_GMAC_256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN},
+ {WPA_CIPHER_BIP_CMAC_256, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN},
+ {WPA_CIPHER_AES_CCM, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN / 2},
+ {WPA_CIPHER_AES_GCM, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN / 2},
+ {WPA_CIPHER_BIP_GMAC_128, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN / 2},
+ {WPA_CIPHER_TKIP, EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN},
+ {0u, 0u} /* default */
+};
+
+#if defined(WL_FILS) && defined(WLFBT)
+static const key_length_entry_t eapol_kck2_len[] = {
+ {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_KCK2_SHA256_LEN},
+ {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_KCK2_SHA384_LEN},
+ {0u, 0u} /* default */
+};
+
+static const key_length_entry_t eapol_kek2_len[] = {
+ {RSN_AKM_FBT_SHA256_FILS, EAPOL_WPA_KEK2_SHA256_LEN},
+ {RSN_AKM_FBT_SHA384_FILS, EAPOL_WPA_KEK2_SHA384_LEN},
+ {0u, 0u} /* default */
+};
+#endif /* WL_FILS && WLFBT */
+
+typedef struct key_length_lookup {
+ const eapol_key_type_t key;
+ const key_length_entry_t *key_entry;
+} key_length_lookup_t;
+
+static const key_length_lookup_t eapol_key_lookup_tbl[] = {
+ {EAPOL_KEY_PMK, eapol_pmk_len},
+ {EAPOL_KEY_KCK_MIC, eapol_kck_mic_len},
+ {EAPOL_KEY_KCK, eapol_kck_len},
+ {EAPOL_KEY_KEK, eapol_kek_len},
+ {EAPOL_KEY_TK, eapol_tk_len},
+#if defined(WL_FILS) && defined(WLFBT)
+ {EAPOL_KEY_KCK2, eapol_kck2_len},
+ {EAPOL_KEY_KEK2, eapol_kek2_len},
+#endif /* WL_FILS && WLFBT */
+};
+
+typedef struct rsn_akm_lookup_entry {
+ const rsn_akm_t rsn_akm;
+ const sha2_hash_type_t hash_type;
+} rsn_akm_lookup_entry_t;
+
+static const rsn_akm_lookup_entry_t rsn_akm_lookup_tbl[] = {
+ {RSN_AKM_NONE, HASH_SHA1},
+ {RSN_AKM_UNSPECIFIED, HASH_SHA1},
+ {RSN_AKM_PSK, HASH_SHA1},
+ {RSN_AKM_FBT_1X, HASH_SHA256},
+ {RSN_AKM_FBT_PSK, HASH_SHA256},
+ {RSN_AKM_MFP_1X, HASH_SHA256},
+ {RSN_AKM_MFP_PSK, HASH_SHA256},
+ {RSN_AKM_SHA256_1X, HASH_SHA256},
+ {RSN_AKM_SHA256_PSK, HASH_SHA256},
+ {RSN_AKM_TPK, HASH_SHA256},
+ {RSN_AKM_SAE_PSK, HASH_SHA256},
+ {RSN_AKM_SAE_FBT, HASH_SHA256},
+ {RSN_AKM_SUITEB_SHA256_1X, HASH_SHA256},
+ {RSN_AKM_SUITEB_SHA384_1X, HASH_SHA384},
+ {RSN_AKM_FBT_SHA384_1X, HASH_SHA384},
+ {RSN_AKM_FILS_SHA256, HASH_SHA256},
+ {RSN_AKM_FILS_SHA384, HASH_SHA384},
+ {RSN_AKM_FBT_SHA256_FILS, HASH_SHA256},
+ {RSN_AKM_FBT_SHA384_FILS, HASH_SHA384},
+ {RSN_AKM_OWE, HASH_SHA256},
+ {RSN_AKM_FBT_SHA384_PSK, HASH_SHA384},
+ {RSN_AKM_PSK_SHA384, HASH_SHA384},
+};
+
+typedef struct rsn_akm_cipher_match_entry {
+ uint16 akm_type;
+ uint32 u_cast; /* BITMAP */
+ uint32 m_cast; /* BITMAP */
+ uint32 g_mgmt; /* BITMAP */
+} rsn_akm_cipher_match_entry_t;
+
+/* list only explicit cipher restriction for given AKM (e.g SuiteB)
+ * refer to 802.11 spec 9.4.2.24.3
+ * If not listed here, it means no restriction in using any ciphers.
+ */
+static const rsn_akm_cipher_match_entry_t rsn_akm_cipher_match_table[] = {
+ {RSN_AKM_SUITEB_SHA256_1X,
+ BCM_BIT(WPA_CIPHER_AES_GCM),
+ BCM_BIT(WPA_CIPHER_AES_GCM),
+ BCM_BIT(WPA_CIPHER_BIP_GMAC_128)},
+ {RSN_AKM_SUITEB_SHA384_1X,
+ BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_CCMP_256),
+ BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_AES_GCM256),
+ BCM_BIT(WPA_CIPHER_BIP_GMAC_256) | BCM_BIT(WPA_CIPHER_BIP_CMAC_256)},
+ {RSN_AKM_FBT_SHA384_1X,
+ BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_CCMP_256),
+ BCM_BIT(WPA_CIPHER_AES_GCM256) | BCM_BIT(WPA_CIPHER_AES_GCM256),
+ BCM_BIT(WPA_CIPHER_BIP_GMAC_256) | BCM_BIT(WPA_CIPHER_BIP_CMAC_256)}
+};
+
+#if defined(WL_BAND6G)
+static const rsn_akm_mask_t rsn_akm_6g_inval_mask =
+ BCM_BIT(RSN_AKM_PSK) |
+ BCM_BIT(RSN_AKM_FBT_PSK) |
+ BCM_BIT(RSN_AKM_SHA256_PSK) |
+ BCM_BIT(RSN_AKM_FBT_SHA384_PSK) |
+ BCM_BIT(RSN_AKM_PSK_SHA384);
+
+static const rsn_ciphers_t cipher_6g_inval_mask =
+ BCM_BIT(WPA_CIPHER_NONE) |
+ BCM_BIT(WPA_CIPHER_WEP_40) |
+ BCM_BIT(WPA_CIPHER_TKIP) |
+ BCM_BIT(WPA_CIPHER_WEP_104);
+#endif /* WL_BAND6G */
+
+#if defined(BCMSUP_PSK) || defined(BCMSUPPL)
+typedef struct group_cipher_algo_entry {
+ rsn_cipher_t g_mgmt_cipher;
+ uint8 bip_algo;
+} group_cipher_algo_entry_t;
+
+static const group_cipher_algo_entry_t group_mgmt_cipher_algo[] = {
+ {WPA_CIPHER_BIP_GMAC_256, CRYPTO_ALGO_BIP_GMAC256},
+ {WPA_CIPHER_BIP_CMAC_256, CRYPTO_ALGO_BIP_CMAC256},
+ {WPA_CIPHER_BIP_GMAC_128, CRYPTO_ALGO_BIP_GMAC},
+ {WPA_CIPHER_BIP, CRYPTO_ALGO_BIP},
+};
+#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) */
+
+static uint16 wlc_calc_rsn_desc_version(const rsn_ie_info_t *rsn_info);
+static int bcmwpa_is_valid_akm(const rsn_akm_t akm);
+#if defined(BCMSUP_PSK) || defined(BCMAUTH_PSK) || defined(WLFBT) || defined(GTKOE)
+static sha2_hash_type_t bcmwpa_rsn_akm_to_hash(const rsn_akm_t akm);
+#ifdef RSN_IE_INFO_STRUCT_RELOCATED
+static int bcmwpa_decode_cipher_suite(rsn_ie_info_t *info, const uint8 **ptr, uint ie_len, uint
+ *remain_len, uint16 *p_count);
+#endif
+#endif /* defined(BCMSUP_PSK) || defined(BCMAUTH_PSK) || defined(WLFBT) || defined(GTKOE) */
+#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(WL_OKC) || defined(WLHOSTFBT)
+#include <rc4.h>
+
+/* calculate wpa PMKID: HMAC-SHA1-128(PMK, "PMK Name" | AA | SPA) */
+static void
+wpa_calc_pmkid_impl(sha2_hash_type_t hash_type,
+ const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *pmk, uint pmk_len, uint8 *pmkid)
+{
+ int err;
+ hmac_sha2_ctx_t ctx;
+
+ err = hmac_sha2_init(&ctx, hash_type, pmk, pmk_len);
+ if (err != BCME_OK)
+ goto done;
+ hmac_sha2_update(&ctx, (const uint8 *)PMK_NAME_PFX, sizeof(PMK_NAME_PFX) - 1);
+ hmac_sha2_update(&ctx, (const uint8 *)auth_ea, ETHER_ADDR_LEN);
+ hmac_sha2_update(&ctx, (const uint8 *)sta_ea, ETHER_ADDR_LEN);
+ hmac_sha2_final(&ctx, pmkid, WPA2_PMKID_LEN);
+done:;
+}
+
+void
+wpa_calc_pmkid(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *pmk, uint pmk_len, uint8 *pmkid)
+{
+ wpa_calc_pmkid_impl(HASH_SHA1, auth_ea, sta_ea, pmk, pmk_len, pmkid);
+}
+
+void
+kdf_calc_pmkid(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *key, uint key_len, uint8 *pmkid, rsn_ie_info_t *rsn_info)
+{
+ sha2_hash_type_t hash_type;
+
+ if (rsn_info->sta_akm == RSN_AKM_SUITEB_SHA384_1X) {
+ hash_type = HASH_SHA384;
+ } else {
+ hash_type = HASH_SHA256;
+ }
+
+ wpa_calc_pmkid_impl(hash_type, auth_ea, sta_ea, key, key_len, pmkid);
+}
+
+#if defined(WLFBT) || defined(WLHOSTFBT)
+void
+wpa_calc_pmkR0(sha2_hash_type_t hash_type, const uint8 *ssid, uint ssid_len,
+ uint16 mdid, const uint8 *r0kh, uint r0kh_len, const struct ether_addr *sta_ea,
+ const uint8 *pmk, uint pmk_len, uint8 *pmkr0, uint8 *pmkr0name)
+{
+ uint8 out[FBT_R0KH_ID_LEN + WPA2_PMKID_LEN - 1];
+ int out_len = FBT_R0KH_ID_LEN - 1;
+ bcm_const_xlvp_t pfx[7];
+ bcm_const_xlvp_t pfx2[2];
+ int npfx = 0;
+ int npfx2 = 0;
+ uint8 mdid_le[2];
+ uint8 pfx_ssid_len;
+ uint8 pfx_r0kh_len;
+
+ if (hash_type == HASH_SHA384) {
+ out_len += WPA2_PMKID_LEN;
+ }
+
+ /* create prefixes for pmkr0 */
+ pfx[npfx].len = sizeof(FT_R0_PFX) - 1;
+ pfx[npfx++].data = (uint8 *)FT_R0_PFX;
+
+ /* ssid length and ssid */
+ pfx_ssid_len = ssid_len & 0xff;
+ pfx[npfx].len = (uint16)sizeof(pfx_ssid_len);
+ pfx[npfx++].data = &pfx_ssid_len;
+
+ pfx[npfx].len = (uint16)(ssid_len & 0xffff);
+ pfx[npfx++].data = ssid;
+
+ /* mdid */
+ htol16_ua_store(mdid, mdid_le);
+ pfx[npfx].len = sizeof(mdid_le);
+ pfx[npfx++].data = mdid_le;
+
+ /* r0kh len and r0kh */
+ pfx_r0kh_len = r0kh_len & 0xff;
+ pfx[npfx].len = sizeof(pfx_r0kh_len);
+ pfx[npfx++].data = &pfx_r0kh_len;
+
+ pfx[npfx].len = (uint16)(r0kh_len & 0xffff);
+ pfx[npfx++].data = r0kh;
+
+ /* sta addr */
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *)sta_ea;
+
+ hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, out, out_len);
+ (void)memcpy_s(pmkr0, pmk_len, out, pmk_len);
+
+ /* coverity checks overflow if pfx size changes */
+
+ /* create prefixes for pmkr0 name */
+ pfx2[npfx2].len = sizeof(FT_R0N_PFX) - 1;
+ pfx2[npfx2++].data = (uint8 *)FT_R0N_PFX;
+ pfx2[npfx2].len = WPA2_PMKID_LEN;
+ pfx2[npfx2++].data = &out[pmk_len];
+
+ (void)sha2(hash_type, pfx2, npfx2, NULL, 0, pmkr0name, WPA2_PMKID_LEN);
+}
+
+void
+wpa_calc_pmkR1(sha2_hash_type_t hash_type, const struct ether_addr *r1kh,
+ const struct ether_addr *sta_ea, const uint8 *pmk, uint pmk_len, const uint8 *pmkr0name,
+ uint8 *pmkr1, uint8 *pmkr1name)
+{
+ bcm_const_xlvp_t pfx[3];
+ bcm_const_xlvp_t pfx2[4];
+ int npfx = 0;
+ int npfx2 = 0;
+
+ if (!pmkr1 && !pmkr1name)
+ goto done;
+ else if (!pmkr1)
+ goto calc_r1name;
+
+ /* create prefixes for pmkr1 */
+ pfx[npfx].len = sizeof(FT_R1_PFX) - 1;
+ pfx[npfx++].data = (uint8 *)FT_R1_PFX;
+
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *)r1kh;
+
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *)sta_ea;
+
+ hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0,
+ pmkr1, sha2_digest_len(hash_type));
+
+calc_r1name:
+ /* create prefixes for pmkr1 name */
+ pfx2[npfx2].len = sizeof(FT_R1N_PFX) - 1;
+ pfx2[npfx2++].data = (uint8 *)FT_R1N_PFX;
+
+ pfx2[npfx2].len = WPA2_PMKID_LEN;
+ pfx2[npfx2++].data = pmkr0name;
+
+ pfx2[npfx2].len = ETHER_ADDR_LEN;
+ pfx2[npfx2++].data = (const uint8 *)r1kh;
+
+ pfx2[npfx2].len = ETHER_ADDR_LEN;
+ pfx2[npfx2++].data = (const uint8 *)sta_ea;
+
+ sha2(hash_type, pfx2, npfx2, NULL, 0, pmkr1name, WPA2_PMKID_LEN);
+done:;
+}
+
+void
+wpa_calc_ft_ptk(sha2_hash_type_t hash_type,
+ const struct ether_addr *bssid, const struct ether_addr *sta_ea,
+ const uint8 *anonce, const uint8* snonce,
+ const uint8 *pmk, uint pmk_len, uint8 *ptk, uint ptk_len)
+{
+ bcm_const_xlvp_t pfx[5];
+ int npfx = 0;
+
+ /* FT-PTK||SNONCE||ANONCE||BSSID||STA Addr */
+
+ pfx[npfx].len = sizeof(FT_PTK_PFX) - 1;
+ pfx[npfx++].data = (uint8 *)FT_PTK_PFX;
+
+ pfx[npfx].len = EAPOL_WPA_KEY_NONCE_LEN;
+ pfx[npfx++].data = snonce;
+
+ pfx[npfx].len = EAPOL_WPA_KEY_NONCE_LEN;
+ pfx[npfx++].data = anonce;
+
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *)bssid;
+
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *)sta_ea;
+
+ hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, ptk, ptk_len);
+}
+
+void
+wpa_derive_pmkR1_name(sha2_hash_type_t hash_type,
+ struct ether_addr *r1kh, struct ether_addr *sta_ea,
+ uint8 *pmkr0name, uint8 *pmkr1name)
+{
+ wpa_calc_pmkR1(hash_type, r1kh, sta_ea, NULL /* pmk */, 0,
+ pmkr0name, NULL /* pmkr1 */, pmkr1name);
+}
+#endif /* WLFBT || WLHOSTFBT */
+#endif /* BCMSUP_PSK || WLFBT || WL_OKC */
+
+#if defined(BCMSUP_PSK) || defined(GTKOE) || defined(BCMAUTH_PSK) || defined(WLFBT)
+/* Decrypt a key data from a WPA key message */
+int
+wpa_decr_key_data(eapol_wpa_key_header_t *body, uint16 key_info, uint8 *ekey,
+ uint8 *encrkey, rc4_ks_t *rc4key, const rsn_ie_info_t *rsn_info, uint16 *dec_len)
+{
+ uint16 len;
+ int err = BCME_OK;
+ uint8 *key_data;
+
+ switch (key_info & (WPA_KEY_DESC_V1 | WPA_KEY_DESC_V2)) {
+ case WPA_KEY_DESC_V1:
+ err = memcpy_s(encrkey, EAPOL_WPA_KEY_IV_LEN + EAPOL_WPA_ENCR_KEY_MAX_LEN,
+ body->iv, EAPOL_WPA_KEY_IV_LEN);
+ if (err) {
+ ASSERT(0);
+ return err;
+ }
+ err = memcpy_s(&encrkey[EAPOL_WPA_KEY_IV_LEN], EAPOL_WPA_ENCR_KEY_MAX_LEN,
+ ekey, rsn_info->kek_len);
+ if (err) {
+ ASSERT(0);
+ return err;
+ }
+ /* decrypt the key data */
+ prepare_key(encrkey, EAPOL_WPA_KEY_IV_LEN + rsn_info->kek_len, rc4key);
+ rc4(NULL, WPA_KEY_DATA_LEN_256, rc4key); /* dump 256 bytes */
+ len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, rsn_info->kck_mic_len));
+ key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len);
+ rc4(key_data, len, rc4key);
+ break;
+
+ case WPA_KEY_DESC_V2:
+ case WPA_KEY_DESC_V3:
+ case WPA_KEY_DESC_V0:
+ /* fallthrough */
+ len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body, rsn_info->kck_mic_len));
+ if (!len) {
+ *dec_len = 0;
+ break; /* ignore zero length */
+ }
+ key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len);
+ if (aes_unwrap(rsn_info->kek_len, ekey, len, key_data, key_data)) {
+ *dec_len = 0;
+ err = BCME_DECERR;
+ break;
+ }
+ *dec_len = (len > AKW_BLOCK_LEN) ? (len - AKW_BLOCK_LEN) : 0;
+ break;
+
+ default:
+ *dec_len = 0;
+ err = BCME_UNSUPPORTED; /* may need revisiting - see 802.11-2016 */
+ break;
+ }
+
+ return err;
+}
+
+/* internal function - assumes enouch space allocated, retuns written number */
+static int
+wpa_calc_ptk_prefixes(const uint8 *prefix, uint prefix_len,
+ const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *anonce, uint8 anonce_len, const uint8 *snonce, uint8 snonce_len,
+ bcm_const_xlvp_t *pfx)
+{
+ int npfx = 0;
+ const uint8 *nonce;
+
+ /* prefix || min ea || max ea || min nonce || max nonce */
+ pfx[npfx].len = (uint16)(prefix_len & 0xffff);
+ pfx[npfx++].data = prefix;
+
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *) wpa_array_cmp(MIN_ARRAY,
+ (const uint8 *)auth_ea, (const uint8 *)sta_ea, ETHER_ADDR_LEN);
+
+ pfx[npfx].len = ETHER_ADDR_LEN;
+ pfx[npfx++].data = (const uint8 *) wpa_array_cmp(MAX_ARRAY,
+ (const uint8 *)auth_ea, (const uint8 *)sta_ea, ETHER_ADDR_LEN);
+
+ nonce = (const uint8 *)wpa_array_cmp(MIN_ARRAY, snonce, anonce, snonce_len);
+
+ if (nonce == snonce) {
+ pfx[npfx].len = snonce_len;
+ pfx[npfx++].data = snonce;
+ pfx[npfx].len = anonce_len;
+ pfx[npfx++].data = anonce;
+ } else {
+ pfx[npfx].len = anonce_len;
+ pfx[npfx++].data = anonce;
+ pfx[npfx].len = snonce_len;
+ pfx[npfx++].data = snonce;
+ }
+
+ return npfx;
+}
+
+void
+kdf_calc_ptk(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *anonce, const uint8* snonce,
+ const uint8 *pmk, uint pmk_len, uint8 *ptk, uint ptk_len)
+{
+ bcm_const_xlvp_t pfx[5];
+ int npfx;
+
+ /* note: kdf omits trailing NULL in prefix */
+ npfx = wpa_calc_ptk_prefixes((uint8 *)WPA_PTK_PFX, sizeof(WPA_PTK_PFX) - 1,
+ auth_ea, sta_ea, anonce, EAPOL_WPA_KEY_NONCE_LEN, snonce,
+ EAPOL_WPA_KEY_NONCE_LEN, pfx);
+ hmac_sha2_n(HASH_SHA256, pmk, pmk_len, pfx, npfx, NULL, 0, ptk, ptk_len);
+}
+#endif /* BCMSUP_PSK || GTKOE || BCMAUTH_PSK || WLFBT */
+
+#if defined(BCMSUP_PSK) || defined(BCMAUTH_PSK) || defined(WLFBT) || defined(GTKOE)
+/* Compute Message Integrity Code (MIC) over EAPOL message */
+int
+wpa_make_mic(eapol_header_t *eapol, uint key_desc, uint8 *mic_key,
+ rsn_ie_info_t *rsn_info, uchar *mic, uint mic_len)
+{
+ uint data_len;
+ int err = BCME_OK;
+ sha2_hash_type_t type = HASH_NONE;
+
+ /* length of eapol pkt from the version field on */
+ data_len = 4 + ntoh16_ua((uint8 *)&eapol->length);
+
+ /* Create the MIC for the pkt */
+ switch (key_desc) {
+ case WPA_KEY_DESC_V1:
+ type = HASH_MD5;
+ break;
+ case WPA_KEY_DESC_V2:
+ /* note: transparent truncation to mic_len */
+ type = HASH_SHA1;
+ break;
+ case WPA_KEY_DESC_V3:
+ aes_cmac_calc(NULL, 0, &eapol->version, data_len, mic_key,
+ mic_len, mic, AES_BLOCK_SZ);
+ goto exit;
+ case WPA_KEY_DESC_V0:
+ ASSERT(rsn_info != NULL);
+ if (rsn_info == NULL) {
+ return BCME_BADARG;
+ }
+ if (IS_SAE_AKM(rsn_info->sta_akm)) {
+ aes_cmac_calc(NULL, 0, &eapol->version, data_len, mic_key,
+ mic_len, mic, AES_BLOCK_SZ);
+ goto exit;
+ }
+ type = bcmwpa_rsn_akm_to_hash(rsn_info->sta_akm);
+ break;
+ default:
+ /* 11mc D8.0 some AKMs use descriptor version 0 */
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (type) {
+ err = hmac_sha2(type, mic_key, mic_len, NULL, 0, (uint8 *)&eapol->version, data_len,
+ mic, mic_len);
+ }
+exit:
+ return err;
+}
+
+int
+wpa_calc_ptk(rsn_akm_t akm, const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *anonce, uint8 anon_len, const uint8 *snonce, uint8 snon_len, const uint8 *pmk,
+ uint pmk_len, uint8 *ptk, uint ptk_len)
+{
+ bcm_const_xlvp_t pfx[PRF_PREFIXES_NUM];
+ int npfx;
+ int ret = BCME_OK;
+ sha2_hash_type_t hash_type;
+ uint label_len;
+
+ if (RSN_AKM_USE_KDF(akm)) {
+ label_len = sizeof(WPA_PTK_PFX) - 1u;
+ } else { //WPA AKMS
+ label_len = sizeof(WPA_PTK_PFX); /* note: wpa needs trailing NULL in prefix */
+ }
+
+ hash_type = bcmwpa_rsn_akm_to_hash(akm);
+
+ npfx = wpa_calc_ptk_prefixes((uint8 *)WPA_PTK_PFX, label_len,
+ auth_ea, sta_ea, anonce, anon_len, snonce, snon_len, pfx);
+ ret = hmac_sha2_n(hash_type, pmk, pmk_len, pfx, npfx, NULL, 0, ptk, ptk_len);
+ return ret;
+}
+
+bool
+wpa_encr_key_data(eapol_wpa_key_header_t *body, uint16 key_info, uint8 *ekey,
+ uint8 *gtk, uint8 *data, uint8 *encrkey, rc4_ks_t *rc4key, const rsn_ie_info_t *rsn_info)
+{
+ uint16 len;
+ uint8 *key_data;
+
+ switch (key_info & (WPA_KEY_DESC_V1 | WPA_KEY_DESC_V2)) {
+ case WPA_KEY_DESC_V1:
+ if (gtk) {
+ len = ntoh16_ua((uint8 *)&body->key_len);
+ } else {
+ len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body,
+ rsn_info->kck_mic_len));
+ }
+
+ /* create the iv/ptk key */
+ if (memcpy_s(encrkey, EAPOL_WPA_KEY_IV_LEN, body->iv, sizeof(body->iv))) {
+ return FALSE;
+ }
+ if (memcpy_s(&encrkey[EAPOL_WPA_KEY_IV_LEN], EAPOL_WPA_ENCR_KEY_DEFAULT_LEN,
+ ekey, EAPOL_WPA_ENCR_KEY_DEFAULT_LEN)) {
+ return FALSE;
+ }
+ /* encrypt the key data */
+ prepare_key(encrkey, EAPOL_WPA_KEY_IV_LEN + EAPOL_WPA_ENCR_KEY_DEFAULT_LEN,
+ rc4key);
+ rc4(data, WPA_KEY_DATA_LEN_256, rc4key); /* dump 256 bytes */
+ key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len);
+ rc4(key_data, len, rc4key);
+ break;
+ case WPA_KEY_DESC_V2: /* fall through */
+ case WPA_KEY_DESC_V3:
+ case WPA_KEY_DESC_V0:
+ len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body,
+ rsn_info->kck_mic_len));
+ /* FIXME: data_len is length to encrypt, but need to make sure
+ * buffer is big enought
+ * for expansion. how? problem for caller?
+ */
+ key_data = EAPOL_WPA_KEY_HDR_DATA_PTR(body, rsn_info->kck_mic_len);
+ /* pad if needed - min. 16 bytes, 8 byte aligned */
+ /* padding is 0xdd followed by 0's */
+ if (len < 2u *AKW_BLOCK_LEN) {
+ key_data[len] = WPA2_KEY_DATA_PAD;
+ bzero(&key_data[len + 1u], 2u * AKW_BLOCK_LEN - (len + 1u));
+ len = 2u *AKW_BLOCK_LEN;
+ } else if (len % AKW_BLOCK_LEN) {
+ key_data[len] = WPA2_KEY_DATA_PAD;
+ bzero(&key_data[len + 1u],
+ AKW_BLOCK_LEN - ((len + 1u) % AKW_BLOCK_LEN));
+ len += AKW_BLOCK_LEN - (len % AKW_BLOCK_LEN);
+ }
+ if (aes_wrap(rsn_info->kek_len, ekey, len, key_data, key_data)) {
+ return FALSE;
+ }
+ len += AKW_BLOCK_LEN;
+ hton16_ua_store(len,
+ (uint8 *)EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body,
+ rsn_info->kck_mic_len));
+ break;
+ default:
+ /* 11mc D8.0 key descriptor version 0 used */
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Check MIC of EAPOL message */
+bool
+wpa_check_mic(eapol_header_t *eapol, uint key_desc, uint8 *mic_key, rsn_ie_info_t *rsn_info)
+{
+ eapol_wpa_key_header_t *body = NULL;
+ uchar digest[SHA2_MAX_DIGEST_LEN];
+ uchar mic[EAPOL_WPA_KEY_MAX_MIC_LEN];
+
+ if (!mic_key || !rsn_info || !eapol) {
+ return FALSE;
+ }
+
+ body = (eapol_wpa_key_header_t *)eapol->body;
+
+#ifndef EAPOL_KEY_HDR_VER_V2
+ if (rsn_info->kck_mic_len != EAPOL_WPA_KCK_DEFAULT_LEN)
+#else
+ if (rsn_info->kck_mic_len > EAPOL_WPA_KEY_MAX_MIC_LEN)
+#endif /* EAPOL_KEY_HDR_VER_V2 */
+ {
+ ASSERT(0);
+ return FALSE;
+ }
+ /* save MIC and clear its space in message */
+ if (memcpy_s(mic, sizeof(mic), EAPOL_WPA_KEY_HDR_MIC_PTR(body),
+ rsn_info->kck_mic_len)) {
+ return FALSE;
+ }
+ bzero(EAPOL_WPA_KEY_HDR_MIC_PTR(body), rsn_info->kck_mic_len);
+ if (wpa_make_mic(eapol, key_desc, mic_key, rsn_info, digest, rsn_info->kck_mic_len)
+ != BCME_OK) {
+ return FALSE;
+ }
+ return !memcmp(digest, mic, rsn_info->kck_mic_len);
+}
+
+static sha2_hash_type_t bcmwpa_rsn_akm_to_hash(const rsn_akm_t akm)
+{
+ uint i = 0;
+ sha2_hash_type_t type = HASH_NONE;
+
+ for (i = 0; i < ARRAYSIZE(rsn_akm_lookup_tbl); i++) {
+ if (akm == rsn_akm_lookup_tbl[i].rsn_akm) {
+ type = rsn_akm_lookup_tbl[i].hash_type;
+ break;
+ }
+ }
+ return type;
+}
+#endif /* BCMSUP_PSK || BCMAUTH_PSK || WLFBT || GTKOE */
+
+#ifdef WLTDLS
+void
+wpa_calc_tpk(const struct ether_addr *init_ea, const struct ether_addr *resp_ea,
+ const struct ether_addr *bssid, const uint8 *anonce, const uint8* snonce,
+ uint8 *tpk, uint tpk_len)
+{
+ uint8 pmk[SHA2_MAX_DIGEST_LEN];
+ uint pmk_len;
+ bcm_const_xlvp_t ikpfx[2];
+ int nikpfx = 0;
+ bcm_const_xlvp_t tpkpfx[4];
+ int ntpkpfx = 0;
+
+ pmk_len = sha2_digest_len(HASH_SHA256);
+
+ /* compute pmk to use - using anonce and snonce - min and then max */
+ ikpfx[nikpfx].len = EAPOL_WPA_KEY_NONCE_LEN;
+ ikpfx[nikpfx++].data = wpa_array_cmp(MIN_ARRAY, snonce, anonce,
+ EAPOL_WPA_KEY_NONCE_LEN),
+
+ ikpfx[nikpfx].len = EAPOL_WPA_KEY_NONCE_LEN;
+ ikpfx[nikpfx++].data = wpa_array_cmp(MAX_ARRAY, snonce, anonce,
+ EAPOL_WPA_KEY_NONCE_LEN),
+
+ (void)sha2(HASH_SHA256, ikpfx, nikpfx, NULL, 0, pmk, SHA2_SHA256_DIGEST_LEN);
+
+ /* compute the tpk - using prefix, min ea, max ea, bssid */
+ tpkpfx[ntpkpfx].len = sizeof(TDLS_PMK_PFX) - 1;
+ tpkpfx[ntpkpfx++].data = (const uint8 *)TDLS_PMK_PFX;
+
+ tpkpfx[ntpkpfx].len = ETHER_ADDR_LEN;
+ tpkpfx[ntpkpfx++].data = wpa_array_cmp(MIN_ARRAY, (const uint8 *)init_ea,
+ (const uint8 *)resp_ea, ETHER_ADDR_LEN),
+
+ tpkpfx[ntpkpfx].len = ETHER_ADDR_LEN;
+ tpkpfx[ntpkpfx++].data = wpa_array_cmp(MAX_ARRAY, (const uint8 *)init_ea,
+ (const uint8 *)resp_ea, ETHER_ADDR_LEN),
+
+ tpkpfx[ntpkpfx].len = ETHER_ADDR_LEN;
+ tpkpfx[ntpkpfx++].data = (const uint8 *)bssid;
+
+ (void)hmac_sha2_n(HASH_SHA256, pmk, pmk_len, tpkpfx, ntpkpfx, NULL, 0, tpk, tpk_len);
+}
+#endif /* WLTDLS */
+
+/* Convert WPA/WPA2 IE cipher suite to locally used value */
+static bool
+rsn_cipher(wpa_suite_t *suite, ushort *cipher, const uint8 *std_oui, bool wep_ok)
+{
+ bool ret = TRUE;
+
+ if (!memcmp((const char *)suite->oui, std_oui, DOT11_OUI_LEN)) {
+ switch (suite->type) {
+ case WPA_CIPHER_TKIP:
+ *cipher = CRYPTO_ALGO_TKIP;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ *cipher = CRYPTO_ALGO_AES_CCM;
+ break;
+ case WPA_CIPHER_AES_GCM:
+ *cipher = CRYPTO_ALGO_AES_GCM;
+ break;
+ case WPA_CIPHER_AES_GCM256:
+ *cipher = CRYPTO_ALGO_AES_GCM256;
+ break;
+ case WPA_CIPHER_WEP_40:
+ if (wep_ok)
+ *cipher = CRYPTO_ALGO_WEP1;
+ else
+ ret = FALSE;
+ break;
+ case WPA_CIPHER_WEP_104:
+ if (wep_ok)
+ *cipher = CRYPTO_ALGO_WEP128;
+ else
+ ret = FALSE;
+ break;
+ default:
+ ret = FALSE;
+ break;
+ }
+ return ret;
+ }
+
+ return FALSE;
+}
+
+bool
+wpa_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok)
+{
+ return rsn_cipher(suite, cipher, (const uchar*)WPA_OUI, wep_ok);
+}
+
+bool
+wpa2_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok)
+{
+ return rsn_cipher(suite, cipher, (const uchar*)WPA2_OUI, wep_ok);
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+bool
+bcm_has_ie(uint8 *ie, uint8 **tlvs, uint *tlvs_len, const uint8 *oui, uint oui_len, uint8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (uint)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return FALSE;
+}
+
+wpa_ie_fixed_t *
+bcm_find_wpaie(uint8 *parse, uint len)
+{
+ return (wpa_ie_fixed_t *) bcm_find_ie(parse, len, DOT11_MNG_VS_ID,
+ WPA_OUI_LEN, (const char*) WPA_OUI, WPA_OUI_TYPE);
+}
+
+int
+bcm_find_security_ies(uint8 *buf, uint buflen, void **wpa_ie,
+ void **rsn_ie)
+{
+ bcm_tlv_t *tlv = NULL;
+ uint totlen = 0;
+ uint8 *end = NULL;
+ uint len = 0;
+ uint tlvs_len = 0;
+ uint8 *tlvs = NULL;
+
+ if ((tlv = (bcm_tlv_t*)buf) == NULL ||
+ !wpa_ie || !rsn_ie || buflen == 0) {
+ return BCME_BADARG;
+ }
+
+ totlen = buflen;
+ *rsn_ie = *wpa_ie = NULL;
+ end = buf;
+ end += buflen;
+
+ /* find rsn ie and wpa ie */
+ while (totlen >= TLV_HDR_LEN) {
+ len = tlv->len;
+ tlvs_len = buflen;
+ tlvs = buf;
+
+ /* check if tlv overruns buffer */
+ if (totlen < (len + TLV_HDR_LEN)) {
+ return BCME_BUFTOOSHORT;
+ }
+
+ /* validate remaining totlen */
+ if (totlen >= (len + TLV_HDR_LEN)) {
+ if ((*rsn_ie == NULL) && (tlv->id == DOT11_MNG_RSN_ID)) {
+ *rsn_ie = tlv;
+ } else if ((*wpa_ie == NULL) && (tlv->id == DOT11_MNG_VS_ID)) {
+ /* if vendor ie, check if its wpa ie */
+ if (bcm_is_wpa_ie((uint8 *)tlv, &tlvs, &tlvs_len))
+ *wpa_ie = tlv;
+ }
+ }
+
+ if (*rsn_ie && *wpa_ie)
+ break;
+
+ tlv = (bcm_tlv_t*)((uint8*)tlv + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+
+ if (totlen > buflen) {
+ return BCME_BUFTOOLONG;
+ }
+
+ if ((uint8 *)tlv > end) {
+ return BCME_BUFTOOSHORT;
+ }
+
+ }
+
+ if (*wpa_ie || *rsn_ie)
+ return BCME_OK;
+ else
+ return BCME_NOTFOUND;
+}
+
+bcm_tlv_t *
+bcm_find_wmeie(uint8 *parse, uint len, uint8 subtype, uint8 subtype_len)
+{
+ bcm_tlv_t *ie;
+ if ((ie = bcm_find_ie(parse, len, DOT11_MNG_VS_ID, WME_OUI_LEN,
+ (const char*) WME_OUI, WME_OUI_TYPE))) {
+ uint ie_len = TLV_HDR_LEN + ie->len;
+ wme_ie_t *ie_data = (wme_ie_t *)ie->data;
+ /* the subtype_len must include OUI+type+subtype */
+ if (subtype_len > WME_OUI_LEN + 1 &&
+ ie_len == (uint)TLV_HDR_LEN + subtype_len &&
+ ie_data->subtype == subtype) {
+ return ie;
+ }
+ /* move to next IE */
+ len -= (uint)((uint8 *)ie + ie_len - parse);
+ parse = (uint8 *)ie + ie_len;
+ }
+ return NULL;
+}
+
+wps_ie_fixed_t *
+bcm_find_wpsie(const uint8 *parse, uint len)
+{
+ uint8 type = WPS_OUI_TYPE;
+
+ return (wps_ie_fixed_t *)bcm_find_vendor_ie(parse, len, WPS_OUI, &type, sizeof(type));
+}
+
+/* locate the Attribute in the WPS IE */
+/* assume the caller has validated the WPS IE tag and length */
+wps_at_fixed_t *
+bcm_wps_find_at(wps_at_fixed_t *at, uint len, uint16 id)
+{
+ while ((int)len >= WPS_AT_FIXED_LEN) {
+ uint alen = WPS_AT_FIXED_LEN + ntoh16_ua(((wps_at_fixed_t *)at)->len);
+ if (ntoh16_ua(((wps_at_fixed_t *)at)->at) == id && alen <= len)
+ return at;
+ at = (wps_at_fixed_t *)((uint8 *)at + alen);
+ len -= alen;
+ }
+ return NULL;
+}
+
+#ifdef WLP2P
+wifi_p2p_ie_t *
+bcm_find_p2pie(const uint8 *parse, uint len)
+{
+ uint8 type = P2P_OUI_TYPE;
+
+ return (wifi_p2p_ie_t *)bcm_find_vendor_ie(parse, len, P2P_OUI, &type, sizeof(type));
+}
+#endif
+
+bcm_tlv_t *
+bcm_find_hs20ie(uint8 *parse, uint len)
+{
+ return bcm_find_ie(parse, len, DOT11_MNG_VS_ID, WFA_OUI_LEN,
+ (const char *)WFA_OUI, WFA_OUI_TYPE_HS20);
+}
+
+bcm_tlv_t *
+bcm_find_osenie(uint8 *parse, uint len)
+{
+ return bcm_find_ie(parse, len, DOT11_MNG_VS_ID, WFA_OUI_LEN,
+ (const char *) WFA_OUI, WFA_OUI_TYPE_OSEN);
+}
+
+#if defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS)
+#define wpa_is_kde(ie, tlvs, len, type) bcm_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPA2_OUI, WPA2_OUI_LEN, type)
+
+eapol_wpa2_encap_data_t *
+wpa_find_kde(const uint8 *parse, uint len, uint8 type)
+{
+ return (eapol_wpa2_encap_data_t *) bcm_find_ie(parse, len,
+ DOT11_MNG_PROPR_ID, WPA2_OUI_LEN, (const char *) WPA2_OUI, type);
+}
+
+bool
+wpa_is_gtk_encap(uint8 *ie, uint8 **tlvs, uint *tlvs_len)
+{
+ return wpa_is_kde(ie, tlvs, tlvs_len, WPA2_KEY_DATA_SUBTYPE_GTK);
+}
+
+eapol_wpa2_encap_data_t *
+wpa_find_gtk_encap(uint8 *parse, uint len)
+{
+ eapol_wpa2_encap_data_t *data;
+
+ /* minimum length includes kde upto gtk field in eapol_wpa2_key_gtk_encap_t */
+ data = wpa_find_kde(parse, len, WPA2_KEY_DATA_SUBTYPE_GTK);
+ if (data && (data->length < EAPOL_WPA2_GTK_ENCAP_MIN_LEN)) {
+ data = NULL;
+ }
+
+ return data;
+}
+
+int
+wpa_find_eapol_kde_data(eapol_header_t* eapol, uint8 eapol_mic_len,
+ uint8 subtype, eapol_wpa2_encap_data_t **out_data)
+{
+ eapol_wpa_key_header_t *body;
+ uint8 *parse;
+ uint16 body_len;
+ uint16 data_len;
+
+ if (!eapol) {
+ return BCME_BADARG;
+ }
+
+ body = (eapol_wpa_key_header_t *)eapol->body;
+ body_len = ntoh16_ua(&eapol->length);
+
+ data_len = ntoh16_ua(EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(body,
+ eapol_mic_len));
+
+ parse = EAPOL_WPA_KEY_HDR_DATA_PTR(body, eapol_mic_len);
+
+ if (((uint8 *)body + body_len) < ((uint8 *)parse + data_len)) {
+ return BCME_BUFTOOSHORT;
+ }
+
+ return wpa_find_kde_data(parse, data_len, subtype, out_data);
+}
+
+int
+wpa_find_kde_data(const uint8 *kde_buf, uint16 buf_len,
+ uint8 subtype, eapol_wpa2_encap_data_t **out_data)
+{
+ eapol_wpa2_encap_data_t *data;
+ uint8 min_len;
+
+ if (!kde_buf) {
+ return BCME_BADARG;
+ }
+
+ /* minimum length includes kde upto gtk field in eapol_wpa2_key_gtk_encap_t */
+ data = wpa_find_kde(kde_buf, buf_len, subtype);
+ if (!data) {
+ return BCME_IE_NOTFOUND;
+ }
+
+ switch (subtype) {
+ case WPA2_KEY_DATA_SUBTYPE_GTK:
+ min_len = EAPOL_WPA2_GTK_ENCAP_MIN_LEN;
+ break;
+ case WPA2_KEY_DATA_SUBTYPE_IGTK:
+ min_len = EAPOL_WPA2_BIGTK_ENCAP_MIN_LEN;
+ break;
+ case WPA2_KEY_DATA_SUBTYPE_BIGTK:
+ min_len = EAPOL_WPA2_IGTK_ENCAP_MIN_LEN;
+ break;
+#ifdef WL_OCV
+ case WPA2_KEY_DATA_SUBTYPE_OCI:
+ min_len = EAPOL_WPA2_OCI_ENCAP_MIN_LEN;
+ break;
+#endif /* WL_OCV */
+ default:
+ return BCME_UNSUPPORTED;
+ }
+
+ if (data->length < min_len) {
+ return BCME_BADLEN;
+ }
+
+ *out_data = data;
+
+ return BCME_OK;
+}
+
+#ifdef WL_OCV
+bool
+wpa_check_ocv_caps(uint16 local_caps, uint16 peer_caps)
+{
+ bool ocv_enabled =
+ ((local_caps & RSN_CAP_OCVC) &&
+ (peer_caps & RSN_CAP_OCVC));
+ bool mfp_enabled =
+ ((peer_caps & RSN_CAP_MFPC) ||
+ (peer_caps & RSN_CAP_MFPR));
+
+ return (ocv_enabled && mfp_enabled);
+}
+
+int
+wpa_add_oci_encap(chanspec_t chspec, uint8* buf, uint buf_len)
+{
+ int retval = BCME_OK;
+ eapol_wpa2_encap_data_t* oci_kde;
+ uint len = buf_len;
+
+ if (buf_len < WPA_OCV_OCI_KDE_SIZE) {
+ retval = BCME_BUFTOOSHORT;
+ goto done;
+ }
+
+ oci_kde = (eapol_wpa2_encap_data_t*)buf;
+
+ oci_kde->type = DOT11_MNG_WPA_ID;
+ oci_kde->subtype = WPA2_KEY_DATA_SUBTYPE_OCI;
+ oci_kde->length = (WPA_OCV_OCI_KDE_SIZE - TLV_HDR_LEN);
+
+ oci_kde->oui[0u] = WPA2_OUI[0u];
+ oci_kde->oui[1u] = WPA2_OUI[1u];
+ oci_kde->oui[2u] = WPA2_OUI[2u];
+
+ buf += EAPOL_WPA2_ENCAP_DATA_HDR_LEN;
+ len -= EAPOL_WPA2_ENCAP_DATA_HDR_LEN;
+
+ retval = bcm_ocv_write_oci(chspec, buf, len);
+ if (retval != BCME_OK) {
+ goto done;
+ }
+
+done:
+ return retval;
+}
+
+int
+wpa_add_oci_ie(chanspec_t chspec, uint8* buf, uint buf_len)
+{
+ int retval = BCME_OK;
+ uint8* oci_buf = buf + BCM_TLV_EXT_HDR_SIZE;
+
+ if (buf_len < (bcm_ocv_get_oci_len() + BCM_TLV_EXT_HDR_SIZE)) {
+ retval = BCME_BUFTOOSHORT;
+ goto done;
+ }
+
+ retval = bcm_ocv_write_oci(chspec, oci_buf, bcm_ocv_get_oci_len());
+ if (retval != BCME_OK) {
+ goto done;
+ }
+
+ (void)bcm_write_tlv_ext(DOT11_MNG_ID_EXT_ID,
+ OCV_EXTID_MNG_OCI_ID, oci_buf, bcm_ocv_get_oci_len(), buf);
+
+done:
+ return retval;
+}
+
+int
+wpa_add_oci_ft_subelem(chanspec_t chspec, uint8* buf, uint buf_len)
+{
+ int retval = BCME_OK;
+ uint8* oci_buf = buf + BCM_TLV_HDR_SIZE;
+
+ if (buf_len < (bcm_ocv_get_oci_len() + BCM_TLV_HDR_SIZE)) {
+ retval = BCME_BUFTOOSHORT;
+ goto done;
+ }
+
+ retval = bcm_ocv_write_oci(chspec, oci_buf, bcm_ocv_get_oci_len());
+ if (retval != BCME_OK) {
+ goto done;
+ }
+
+ bcm_write_tlv_safe(DOT11_FBT_SUBELEM_ID_OCI,
+ oci_buf, bcm_ocv_get_oci_len(), buf, buf_len);
+
+done:
+ return retval;
+}
+
+int wpa_validate_oci_encap(chanspec_t chspec, const uint8* buf, uint buf_len)
+{
+ int retval = BCME_OK;
+ eapol_wpa2_encap_data_t *encap = NULL;
+
+ retval = wpa_find_kde_data(buf, buf_len, WPA2_KEY_DATA_SUBTYPE_OCI, &encap);
+ if (retval != BCME_OK) {
+ retval = BCME_NOTFOUND;
+ goto done;
+ }
+
+ retval = bcm_ocv_validate_oci(chspec,
+ encap->data, encap->length);
+ if (retval != BCME_OK) {
+ goto done;
+ }
+
+done:
+ return retval;
+}
+
+int wpa_validate_oci_ie(chanspec_t chspec, const uint8* buf, uint buf_len)
+{
+ int retval = BCME_OK;
+ bcm_tlv_ext_t *oci_ie;
+
+ oci_ie = (bcm_tlv_ext_t *)bcm_parse_tlvs_dot11(buf, buf_len,
+ OCV_EXTID_MNG_OCI_ID, TRUE);
+
+ if (!oci_ie) {
+ retval = BCME_NOTFOUND;
+ goto done;
+ }
+
+ retval = bcm_ocv_validate_oci(chspec, oci_ie->data, oci_ie->len);
+ if (retval != BCME_OK) {
+ goto done;
+ }
+
+done:
+ return retval;
+}
+
+int wpa_validate_oci_ft_subelem(chanspec_t chspec, const uint8* buf, uint buf_len)
+{
+ int retval = BCME_OK;
+ bcm_tlv_t *oci_ie;
+
+ oci_ie = (bcm_tlv_t *)bcm_parse_tlvs_dot11(buf, buf_len,
+ DOT11_FBT_SUBELEM_ID_OCI, FALSE);
+
+ if (!oci_ie) {
+ retval = BCME_NOTFOUND;
+ goto done;
+ }
+
+ retval = bcm_ocv_validate_oci(chspec, oci_ie->data, oci_ie->len);
+ if (retval != BCME_OK) {
+ goto done;
+ }
+
+done:
+ return retval;
+}
+#endif /* WL_OCV */
+#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS) */
+
+const uint8 *
+wpa_array_cmp(int max_array, const uint8 *x, const uint8 *y, uint len)
+{
+ uint i;
+ const uint8 *ret = x;
+
+ for (i = 0; i < len; i++)
+ if (x[i] != y[i])
+ break;
+
+ if (i == len) {
+ /* returning null will cause crash, return value used for copying */
+ /* return first param in this case to close security loophole */
+ return x;
+ }
+ if (max_array && (y[i] > x[i]))
+ ret = y;
+ if (!max_array && (y[i] < x[i]))
+ ret = y;
+
+ return (ret);
+}
+
+void
+wpa_incr_array(uint8 *array, uint len)
+{
+ int i;
+
+ for (i = (len-1); i >= 0; i--)
+ if (array[i]++ != 0xff) {
+ break;
+ }
+}
+
+bool
+bcmwpa_akm2WPAauth(uint8 *akm, uint32 *auth, bool sta_iswpa)
+{
+ uint i;
+ oui_akm_wpa_tbl_t wpa_auth_tbl_match[] = {
+ {WPA2_OUI, RSN_AKM_NONE, WPA_AUTH_NONE},
+ {WPA2_OUI, RSN_AKM_UNSPECIFIED, WPA2_AUTH_UNSPECIFIED},
+ {WPA2_OUI, RSN_AKM_PSK, WPA2_AUTH_PSK},
+ {WPA2_OUI, RSN_AKM_FBT_1X, WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT},
+ {WPA2_OUI, RSN_AKM_FBT_PSK, WPA2_AUTH_PSK | WPA2_AUTH_FT},
+ {WPA2_OUI, RSN_AKM_SHA256_1X, WPA2_AUTH_1X_SHA256},
+ {WPA2_OUI, RSN_AKM_SHA256_PSK, WPA2_AUTH_PSK_SHA256},
+ {WPA2_OUI, RSN_AKM_FILS_SHA256, WPA2_AUTH_FILS_SHA256},
+ {WPA2_OUI, RSN_AKM_FILS_SHA384, WPA2_AUTH_FILS_SHA384},
+ {WPA2_OUI, RSN_AKM_FBT_SHA256_FILS, WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT},
+ {WPA2_OUI, RSN_AKM_FBT_SHA384_FILS, WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT},
+ {WPA2_OUI, RSN_AKM_SAE_PSK, WPA3_AUTH_SAE_PSK},
+ {WPA2_OUI, RSN_AKM_SAE_FBT, WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT},
+ {WPA2_OUI, RSN_AKM_OWE, WPA3_AUTH_OWE},
+ {WPA2_OUI, RSN_AKM_SUITEB_SHA256_1X, WPA3_AUTH_1X_SUITE_B_SHA256},
+ {WPA2_OUI, RSN_AKM_SUITEB_SHA384_1X, WPA3_AUTH_1X_SUITE_B_SHA384},
+ {WFA_OUI, OSEN_AKM_UNSPECIFIED, WPA2_AUTH_UNSPECIFIED},
+ {WFA_OUI, RSN_AKM_FBT_SHA256_FILS, WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT},
+ {WFA_OUI, RSN_AKM_FBT_SHA384_FILS, WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT},
+ {WFA_OUI, RSN_AKM_DPP, WPA3_AUTH_DPP_AKM},
+
+#ifdef BCMWAPI_WAI
+ {WAPI_OUI, RSN_AKM_NONE, WAPI_AUTH_NONE},
+ {WAPI_OUI, RSN_AKM_UNSPECIFIED, WAPI_AUTH_UNSPECIFIED},
+ {WAPI_OUI, RSN_AKM_PSK, WAPI_AUTH_PSK},
+#endif /* BCMWAPI_WAI */
+
+ {WPA_OUI, RSN_AKM_NONE, WPA_AUTH_NONE},
+ {WPA_OUI, RSN_AKM_UNSPECIFIED, WPA_AUTH_UNSPECIFIED},
+ {WPA_OUI, RSN_AKM_PSK, WPA_AUTH_PSK},
+ };
+
+ BCM_REFERENCE(sta_iswpa);
+
+ for (i = 0; i < ARRAYSIZE(wpa_auth_tbl_match); i++) {
+ if (!memcmp(akm, wpa_auth_tbl_match[i].oui, DOT11_OUI_LEN)) {
+ if (wpa_auth_tbl_match[i].rsn_akm == akm[DOT11_OUI_LEN]) {
+ *auth = wpa_auth_tbl_match[i].wpa_auth;
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
+/* map cipher suite to internal WSEC_XXXX */
+/* cs points 4 byte cipher suite, and only the type is used for non CCX ciphers */
+bool
+bcmwpa_cipher2wsec(uint8 *cipher, uint32 *wsec)
+{
+
+#ifdef BCMWAPI_WAI
+ if (!memcmp(cipher, WAPI_OUI, DOT11_OUI_LEN)) {
+ switch (WAPI_CSE_WPI_2_CIPHER(cipher[DOT11_OUI_LEN])) {
+ case WAPI_CIPHER_NONE:
+ *wsec = 0;
+ break;
+ case WAPI_CIPHER_SMS4:
+ *wsec = SMS4_ENABLED;
+ break;
+ default:
+ return FALSE;
+ }
+ return TRUE;
+ }
+#endif /* BCMWAPI_WAI */
+
+ switch (cipher[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ *wsec = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ *wsec = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ *wsec = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ /* fall through */
+ case WPA_CIPHER_AES_GCM:
+ /* fall through */
+ case WPA_CIPHER_AES_GCM256:
+ *wsec = AES_ENABLED;
+ break;
+
+#ifdef BCMWAPI_WAI
+ case WAPI_CIPHER_SMS4:
+ *wsec = SMS4_ENABLED;
+ break;
+#endif /* BCMWAPI_WAI */
+
+ default:
+ return FALSE;
+ }
+ return TRUE;
+}
+
+#ifdef RSN_IE_INFO_STRUCT_RELOCATED
+/* map WPA/RSN cipher to internal WSEC */
+uint32
+bcmwpa_wpaciphers2wsec(uint32 wpacipher)
+{
+ uint32 wsec = 0;
+
+ switch (wpacipher) {
+ case BCM_BIT(WPA_CIPHER_WEP_40):
+ case BCM_BIT(WPA_CIPHER_WEP_104):
+ wsec = WEP_ENABLED;
+ break;
+ case BCM_BIT(WPA_CIPHER_TKIP):
+ wsec = TKIP_ENABLED;
+ break;
+ case BCM_BIT(WPA_CIPHER_AES_OCB):
+ /* fall through */
+ case BCM_BIT(WPA_CIPHER_AES_CCM):
+ wsec = AES_ENABLED;
+ break;
+ case BCM_BIT(WPA_CIPHER_AES_GCM):
+ /* fall through */
+ case BCM_BIT(WPA_CIPHER_AES_GCM256):
+ wsec = AES_ENABLED;
+ break;
+
+#ifdef BCMWAPI_WAI
+ case BCM_BIT(WAPI_CIPHER_SMS4):
+ wsec = SMS4_ENABLED;
+ break;
+#endif /* BCMWAPI_WAI */
+
+ default:
+ break;
+ }
+
+ return wsec;
+}
+
+uint32
+wlc_convert_rsn_to_wsec_bitmap(uint32 ap_cipher_mask)
+{
+
+ uint32 ap_wsec = 0;
+ uint32 tmp_mask = ap_cipher_mask;
+ uint32 c;
+
+ FOREACH_BIT(c, tmp_mask) {
+ ap_wsec |= bcmwpa_wpaciphers2wsec(c);
+ }
+
+ return ap_wsec;
+}
+
+#else /* Not RSN_IE_INFO_STRUCT_RELOCATED */
+uint32
+bcmwpa_wpaciphers2wsec(uint8 wpacipher)
+{
+ uint32 wsec = 0;
+
+ switch (wpacipher) {
+ case WPA_CIPHER_NONE:
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ wsec = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ wsec = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_OCB:
+ /* fall through */
+ case WPA_CIPHER_AES_CCM:
+ wsec = AES_ENABLED;
+ break;
+ case WPA_CIPHER_AES_GCM:
+ /* fall through */
+ case WPA_CIPHER_AES_GCM256:
+ wsec = AES_ENABLED;
+ break;
+
+#ifdef BCMWAPI_WAI
+ case WAPI_CIPHER_SMS4:
+ wsec = SMS4_ENABLED;
+ break;
+#endif /* BCMWAPI_WAI */
+
+ default:
+ break;
+ }
+
+ return wsec;
+}
+#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
+
+bool
+bcmwpa_is_wpa_auth(uint32 auth)
+{
+ if ((auth == WPA_AUTH_NONE) ||
+ (auth == WPA_AUTH_UNSPECIFIED) ||
+ (auth == WPA_AUTH_PSK))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+bool
+bcmwpa_includes_wpa_auth(uint32 auth)
+{
+ if (auth & (WPA_AUTH_NONE |
+ WPA_AUTH_UNSPECIFIED |
+ WPA_AUTH_PSK))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+bool
+bcmwpa_is_rsn_auth(uint32 auth)
+{
+ auth = auth & ~WPA2_AUTH_FT;
+
+ if ((auth == WPA2_AUTH_UNSPECIFIED) ||
+ (auth == WPA2_AUTH_PSK) ||
+ (auth == BRCM_AUTH_PSK) ||
+ (auth == WPA2_AUTH_1X_SHA256) ||
+ (auth == WPA2_AUTH_PSK_SHA256) ||
+ (auth == WPA3_AUTH_SAE_PSK) ||
+ (auth == WPA3_AUTH_OWE) ||
+ WPA2_AUTH_IS_FILS(auth) ||
+ (auth == WPA3_AUTH_1X_SUITE_B_SHA256) ||
+ (auth == WPA3_AUTH_1X_SUITE_B_SHA384) ||
+ (auth == WPA3_AUTH_PSK_SHA384) ||
+ (auth == WPA3_AUTH_DPP_AKM)) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+bool
+bcmwpa_includes_rsn_auth(uint32 auth)
+{
+ if (auth & (WPA2_AUTH_UNSPECIFIED |
+ WPA2_AUTH_PSK |
+ BRCM_AUTH_PSK | WPA2_AUTH_1X_SHA256 | WPA2_AUTH_PSK_SHA256 |
+ WPA2_AUTH_IS_FILS(auth) | WPA3_AUTH_SAE_PSK | WPA3_AUTH_OWE |
+ WPA3_AUTH_1X_SUITE_B_SHA256 | WPA3_AUTH_1X_SUITE_B_SHA384 |
+ WPA3_AUTH_PSK_SHA384 | WPA3_AUTH_DPP_AKM))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+#ifdef RSN_IE_INFO_STRUCT_RELOCATED
+/* decode unicast/multicast cipher in RSNIE */
+static int
+bcmwpa_decode_cipher_suite(rsn_ie_info_t *info, const uint8 **ptr_inc, uint ie_len, uint
+ *remain_len, uint16 *p_count)
+{
+ const wpa_suite_ucast_t *ucast;
+ const wpa_suite_mcast_t *mcast;
+ uint i;
+
+ if (!(*remain_len)) {
+ info->g_cipher = WPA_CIPHER_UNSPECIFIED;
+ info->p_ciphers = WPA_P_CIPHERS_UNSPECIFIED;
+ goto done; /* only have upto ver */
+ }
+ *ptr_inc += ie_len - *remain_len;
+
+ if (*remain_len < sizeof(wpa_suite_mcast_t)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ mcast = (const wpa_suite_mcast_t *)*ptr_inc;
+
+ if (IS_WPA_CIPHER(mcast->type)) {
+ info->g_cipher = mcast->type;
+ } else {
+ info->parse_status = BCME_BAD_IE_DATA;
+ goto done;
+ }
+
+ /* for rsn pairwise cipher suite */
+ *ptr_inc += sizeof(wpa_suite_mcast_t);
+ *remain_len -= sizeof(wpa_suite_mcast_t);
+
+ if (!(*remain_len)) {
+ info->p_ciphers = WPA_P_CIPHERS_UNSPECIFIED;
+ info->sta_akm = WPA_CIPHER_UNSPECIFIED;
+ goto done;
+ }
+
+ ucast = (const wpa_suite_ucast_t *)*ptr_inc;
+
+ if ((*remain_len) < sizeof(ucast->count)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (!ucast->count.low && !ucast->count.high) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ *p_count = ltoh16_ua(&ucast->count);
+ if (info->dev_type == DEV_STA && *p_count != 1u) {
+ info->parse_status = BCME_BAD_IE_DATA;
+ goto done;
+ }
+ if ((*remain_len) < (*p_count * WPA_SUITE_LEN + sizeof(ucast->count))) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (info->dev_type == DEV_STA) {
+ if (IS_WPA_CIPHER(ucast->list[0].type)) {
+ /* update the pairwise cipher */
+ info->sta_cipher = ucast->list[0].type;
+ } else {
+ info->parse_status = BCME_BAD_IE_DATA;
+ goto done;
+ }
+ } else {
+ for (i = 0; i < *p_count; i++) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ info->p_ciphers |= BIT(ucast->list[i].type);
+ info->rsn_p_ciphers = info->p_ciphers;
+ } else {
+ info->parse_status = BCME_BAD_IE_DATA;
+ goto done;
+ }
+ }
+ }
+
+ /* update buffer ptr and remaining length */
+ *ptr_inc += (*p_count * WPA_SUITE_LEN) + sizeof(ucast->count);
+ *remain_len -= (*p_count * WPA_SUITE_LEN) + sizeof(ucast->count);
+
+done:
+
+ if (info->parse_status == BCME_OK) {
+ if (info->g_cipher == WPA_CIPHER_UNSPECIFIED) {
+ info->g_cipher = WPA_CIPHER_AES_CCM;
+ }
+ if (info->p_ciphers == WPA_P_CIPHERS_UNSPECIFIED) {
+ info->p_ciphers = BIT(WPA_CIPHER_AES_CCM);
+ info->rsn_p_ciphers = info->p_ciphers;
+ }
+ }
+
+ return info->parse_status;
+}
+/* sta_akm/sta_cipher must be set before this call */
+int
+bcmwpa_rsnie_eapol_key_len(rsn_ie_info_t *info)
+{
+ info->pmk_len = bcmwpa_eapol_key_length(EAPOL_KEY_PMK, info->sta_akm, 0);
+ info->kck_mic_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK_MIC, info->sta_akm, 0);
+ info->kck_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK, info->sta_akm, 0);
+ info->kek_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK, info->sta_akm, 0);
+ info->tk_len = bcmwpa_eapol_key_length(EAPOL_KEY_TK, 0, info->sta_cipher);
+ info->ptk_len = info->kck_len + info->kek_len + info->tk_len;
+#if defined(WL_FILS) && defined(WLFBT)
+ info->kck2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK2, info->sta_akm, 0);
+ info->kek2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK2, info->sta_akm, 0);
+ if (WPA_IS_FILS_FT_AKM(info->sta_akm)) {
+ info->ptk_len += (info->kck2_len + info->kek2_len);
+ }
+#endif /* WL_FILS && WLFBT */
+ return BCME_OK;
+}
+/* Extract and store information from WPA or RSN IEs
+ *
+ * called after either
+ * -an association request has been built (STA),
+ * - an association was received (AP)
+ * - a probe request has been built (AP)
+ * - a probe response was received (STA)
+ *
+ * All available information is extracted to be used for subsequent
+ * bss pruning, association request validation, key descriptor compuation etc.
+ *
+ * To be expanded as needed.
+ *
+ * ie: RSN IE input
+ * rsn_info: parsed information. Placed in either bsscfg for self, or scb for peer.
+ * dev_type: STA_RSN or AP_RSN
+ *
+ * Return : parse status.
+ * NOTE: the parse status is also saved in the the parse_status field.
+ * NOTE 2 : the IE itself is copied at the end of the structure. Since there is
+ * no reference to the osh available here, the allocation has to happen outside
+ * and so the structure cannot be zeroed in this function.
+ * For the STA, it should happen everytime.
+ * For the AP, it should happen right after a new beacon/probe has been acquired.
+ */
+
+int
+bcmwpa_parse_rsnie(const bcm_tlv_t *ie, rsn_ie_info_t *info, device_type_t dev_type)
+{
+
+ const uint8 *ptr_inc = NULL;
+ const wpa_suite_mcast_t *mcast;
+ const wpa_suite_auth_key_mgmt_t *mgmt;
+ const wpa_pmkid_list_t *pmkid_list;
+ uint32 remain_len = 0, i;
+ uint8 auth_ie_type;
+ uint16 p_count = 0;
+ uint16 akm_count;
+
+ ASSERT(info != NULL);
+
+ /* this function might be called from place where there
+ * is no error detection.
+ * e.g. fron the iem callback. Store status here.
+ */
+
+ info->parse_status = BCME_OK;
+
+ if (!ie) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ /* For AP, do not zero this structure since there could be multiple
+ * IEs. In that case, add to the existing
+ * bits in field (ciphers, akms) as necessary.
+ */
+ if (dev_type == DEV_AP) {
+ /* if already created, check device type */
+ if (info->dev_type != DEV_NONE) {
+ if (info->dev_type != DEV_AP) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+ }
+ }
+ info->dev_type = dev_type;
+ ptr_inc = ie->data;
+
+ /* decode auth IE (WPA vs RSN). Fill in the auth_ie_type and version.
+ * Modify remain_len to indicate the position of the pointer.
+ */
+ /* NOTE the status field will be updated in this call */
+ if (bcmwpa_decode_ie_type(ie, info, &remain_len, &auth_ie_type) != BCME_OK) {
+ goto done;
+ }
+
+ /* decode multicast, unicast ciphers */
+ if (bcmwpa_decode_cipher_suite(info, &ptr_inc, ie->len, &remain_len, &p_count) != BCME_OK) {
+ goto done;
+ }
+
+ if (!(remain_len)) {
+ info->akms = BIT(RSN_AKM_UNSPECIFIED);
+ goto done;
+ }
+
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)ptr_inc;
+
+ if (remain_len < sizeof(mgmt->count)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ akm_count = ltoh16_ua(&mgmt->count);
+
+ if (!akm_count) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ if (dev_type == DEV_STA && akm_count != 1) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ if ((remain_len) < (akm_count * WPA_SUITE_LEN + sizeof(mgmt->count))) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (dev_type == DEV_STA) {
+ info->sta_akm = mgmt->list[0].type;
+ }
+ for (i = 0; i < akm_count; i++) {
+ if (bcmwpa_is_valid_akm(mgmt->list[i].type) == BCME_OK) {
+ ASSERT((mgmt->list[i].type) <
+ (sizeof(info->akms) * NBBY));
+ info->akms |= BIT(mgmt->list[i].type);
+ }
+ }
+
+ /* save IE dependent values in their respective fields */
+ if (dev_type == DEV_AP) {
+ if (auth_ie_type == RSN_AUTH_IE) {
+ info->rsn_akms = info->akms;
+ } else if (auth_ie_type == WPA_AUTH_IE) {
+ info->wpa_akms = info->akms;
+ info->wpa_p_ciphers = info->p_ciphers;
+ }
+ }
+
+ /* as a STA, at this point, we can compute the key descriptor version */
+ if (dev_type == DEV_STA) {
+ info->key_desc = wlc_calc_rsn_desc_version(info);
+ /* For STA, we can set the auth ie */
+ if (auth_ie_type == RSN_AUTH_IE) {
+ info->auth_ie = info->rsn_ie;
+ info->auth_ie_len = info->rsn_ie_len;
+ } else {
+ info->auth_ie = info->wpa_ie;
+ info->auth_ie_len = info->wpa_ie_len;
+ }
+ }
+
+ /* RSN AKM/cipher suite related EAPOL key length update */
+ bcmwpa_rsnie_eapol_key_len(info);
+
+ /* for rsn capabilities */
+ ptr_inc += akm_count * WPA_SUITE_LEN + sizeof(mgmt->count);
+ remain_len -= akm_count * WPA_SUITE_LEN + sizeof(mgmt->count);
+
+ if (!(remain_len)) {
+ goto done;
+ }
+ if (remain_len < RSN_CAP_LEN) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (ie->id == DOT11_MNG_RSN_ID) {
+ info->caps = ltoh16_ua(ptr_inc);
+ }
+
+ /* check if AKMs require MFP capable to be set */
+ if ((info->akms & RSN_MFPC_AKM_MASK) && !(info->caps & RSN_CAP_MFPC)) {
+ /* NOTE: Acting as WPA3 CTT testbed device, it requires to send assoc request frame
+ with user provided mfp value as is. So should not return error here.
+ */
+#ifndef WPA3_CTT
+ info->parse_status = BCME_EPERM;
+ goto done;
+#endif /* WPA3_CTT */
+ }
+
+ /* for rsn PMKID */
+ ptr_inc += RSN_CAP_LEN;
+ remain_len -= RSN_CAP_LEN;
+
+ if (!(remain_len)) {
+ goto done;
+ }
+
+ /* here's possible cases after RSN_CAP parsed
+ * a) pmkid_count 2B(00 00)
+ * b) pmkid_count 2B(00 00) + BIP 4B
+ * c) pmkid_count 2B(non zero) + pmkid_count * 16B
+ * d) pmkid_count 2B(non zero) + pmkid_count * 16B + BIP 4B
+ */
+
+ /* pmkids_offset set to
+ * 1) if pmkid_count field(2B) present, point to first PMKID offset in the RSN ID
+ * no matter what pmkid_count value is. (true, even if pmkid_count == 00 00)
+ * 2) if pmkid_count field(2B) not present, it shall be zero.
+ */
+
+ pmkid_list = (const wpa_pmkid_list_t*)ptr_inc;
+
+ if ((remain_len) < sizeof(pmkid_list->count)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ info->pmkid_count = (uint8)ltoh16_ua(&pmkid_list->count);
+ ptr_inc += sizeof(pmkid_list->count);
+ remain_len -= sizeof(pmkid_list->count);
+
+ if (remain_len < (uint32)(info->pmkid_count * WPA2_PMKID_LEN)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ info->pmkids_offset = ie->len + TLV_HDR_LEN - remain_len;
+ /* for rsn group management cipher suite */
+ ptr_inc += info->pmkid_count * WPA2_PMKID_LEN;
+ remain_len -= info->pmkid_count * WPA2_PMKID_LEN;
+
+ if (!(remain_len)) {
+ goto done;
+ }
+ /*
+ * from WPA2_Security_Improvements_Test_Plan_v1.0
+ * 4.2.4 APUT RSNE bounds verification using WPA2-PSK
+ * May content RSNE extensibile element ay this point
+ */
+ if (remain_len < sizeof(wpa_suite_mcast_t)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ mcast = (const wpa_suite_mcast_t *)ptr_inc;
+ if (IS_VALID_BIP_CIPHER((rsn_cipher_t)mcast->type)) {
+ info->g_mgmt_cipher = (rsn_cipher_t)mcast->type;
+ }
+
+done:
+ return info->parse_status;
+}
+
+/* Determine if the IE is of WPA or RSN type. Decode
+ * up to version field. Modify the remaining len parameter to
+ * indicate where the next field is.
+ * Store and return error status.
+ */
+
+int
+bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, uint32 *remaining,
+ uint8 *type)
+{
+ const uint8 * ptr_inc = (const uint8 *)ie->data;
+ uint32 remain_len = ie->len;
+ uint8 version, version_len;
+
+ if (ie->id == DOT11_MNG_WPA_ID) {
+ /* min len check */
+ if (remain_len < WPA_IE_FIXED_LEN) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ /* WPA IE */
+ if (memcmp(WPA_OUI, ie->data, WPA_OUI_LEN)) {
+ /* bad OUI */
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+ ptr_inc += WPA_OUI_LEN;
+ if (*ptr_inc == WPA_OUI_TYPE) {
+ *type = WPA_AUTH_IE;
+ } else if (*ptr_inc == WFA_OUI_TYPE_OSEN) {
+ *type = OSEN_AUTH_IE;
+ }
+ else {
+ /* wrong type */
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ ptr_inc ++;
+ remain_len -= WPA_OUI_LEN + 1u;
+ version_len = WPA_VERSION_LEN;
+ }
+ else if (ie->id == DOT11_MNG_RSN_ID) {
+ if (remain_len < WPA2_VERSION_LEN) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ /* RSN IE */
+ *type = RSN_AUTH_IE;
+ version_len = WPA2_VERSION_LEN;
+ } else {
+ printf("IE ID %d\n", ie->id);
+ /* TODO : add support for CCX, WAPI ? */
+ info->parse_status = BCME_UNSUPPORTED;
+ goto done;
+ }
+ info->auth_ie_type |= *type;
+ /* mask down to uint8 for Windows build */
+ version = 0xff & ltoh16_ua(ptr_inc);
+ if (version > MAX_RSNE_SUPPORTED_VERSION) {
+ info->parse_status = BCME_UNSUPPORTED;
+ goto done;
+ }
+
+ info->version = (uint8)version;
+ *remaining = remain_len - version_len;
+done:
+ return info->parse_status;
+}
+
+/* rsn info allocation management.
+ *
+ * In some cases, the rsn ie info structures are embedded in the scan results
+ * which can be shared by different lists.
+ * To keep track of their allocation, we use a reference counter.
+ * The counter is incremented on demand by rsn_ie_info_add_ref()
+ * at the time the reference is shared.
+ * It is decremented in rsn_ie_info_rel_ref
+ * When ref_count gets to 0, bcmwpa_rsn_ie_info_free_mem
+ * is called to free the whole structure.
+ */
+
+/* free rsn_ie and wpa_ie, if any, and zero the rsn_info */
+void
+bcmwpa_rsn_ie_info_reset(rsn_ie_info_t *rsn_info, osl_t *osh)
+{
+ uint8 ref_count;
+ if (rsn_info == NULL) {
+ return;
+ }
+ ref_count = rsn_info->ref_count;
+ MFREE(osh, rsn_info->rsn_ie, rsn_info->rsn_ie_len);
+ MFREE(osh, rsn_info->wpa_ie, rsn_info->wpa_ie_len);
+ MFREE(osh, rsn_info->rsnxe, rsn_info->rsnxe_len);
+ bzero(rsn_info, sizeof(*rsn_info));
+ rsn_info->ref_count = ref_count;
+
+}
+
+static
+void bcmwpa_rsn_ie_info_free_mem(rsn_ie_info_t **rsn_info, osl_t *osh)
+{
+ bcmwpa_rsn_ie_info_reset(*rsn_info, osh);
+ MFREE(osh, *rsn_info, sizeof(**rsn_info));
+ *rsn_info = NULL;
+}
+
+void bcmwpa_rsn_ie_info_rel_ref(rsn_ie_info_t **rsn_info, osl_t *osh)
+{
+
+ if (rsn_info == NULL || *rsn_info == NULL) {
+ return;
+ }
+
+ /* already freed ? */
+ if ((*rsn_info)->ref_count == 0) {
+ ASSERT(0);
+ return;
+ }
+ /* decrement ref count */
+ (*rsn_info)->ref_count -= 1;
+ /* clear reference. */
+ if ((*rsn_info)->ref_count > 0) {
+ *rsn_info = NULL;
+ return;
+ }
+ /* free memory and clear reference */
+ bcmwpa_rsn_ie_info_free_mem(rsn_info, osh);
+}
+
+int
+bcmwpa_rsn_ie_info_add_ref(rsn_ie_info_t *rsn_info)
+{
+ int status = BCME_OK;
+ if (rsn_info == NULL) {
+ goto done;
+ }
+ if (rsn_info->ref_count == 0) {
+ /* don't increase from 0, which means this structure has been freed earlier.
+ * That reference should not exist anymore.
+ */
+ ASSERT(0);
+ status = BCME_BADARG;
+ goto done;
+ }
+ rsn_info->ref_count++;
+done:
+ return status;
+}
+
+#else /* Not RSN_IE_INFO_STRUCT_RELOCATED */
+
+int
+bcmwpa_parse_rsnie(const bcm_tlv_t *ie, rsn_ie_info_t *info, device_type_t dev_type)
+{
+
+ const uint8 *ptr_inc = NULL;
+ const wpa_suite_ucast_t *ucast;
+ const wpa_suite_mcast_t *mcast;
+ const wpa_suite_auth_key_mgmt_t *mgmt;
+ const wpa_pmkid_list_t *pmkid_list;
+ uint32 remain_len = 0, i;
+
+ ASSERT(info != NULL);
+
+ /* this function might be called from place where there
+ * is no error detection.
+ * e.g. fron the iem callback. Store status here.
+ */
+
+ info->parse_status = BCME_OK;
+
+ if (!ie) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ /* For AP, do not zero this structure since there could be multiple
+ * IEs. In that case, add to the existing
+ * bits in field (ciphers, akms) as necessary.
+ */
+ if (dev_type != DEV_AP) {
+ bzero(info, sizeof(*info));
+ } else {
+ /* if already created, check device type */
+ if (info->dev_type != DEV_NONE) {
+ if (info->dev_type != DEV_AP) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+ }
+ }
+ info->dev_type = dev_type;
+ ptr_inc = ie->data;
+
+ /* decode auth IE (WPA vs RSN). Fill in the auth_ie_type and version.
+ * Modify remain_len to indicate the position of the pointer.
+ */
+ /* NOTE the status field will be updated in this call */
+ if (bcmwpa_decode_ie_type(ie, info, &remain_len) != BCME_OK) {
+ goto done;
+ }
+
+ if (!(remain_len)) {
+ info->g_cipher = WPA_CIPHER_NONE;
+ goto done; /* only have upto ver */
+ }
+ ptr_inc += ie->len - remain_len;
+
+ if (remain_len < sizeof(wpa_suite_mcast_t)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ mcast = (const wpa_suite_mcast_t *)ptr_inc;
+
+ if (IS_WPA_CIPHER(mcast->type)) {
+ info->g_cipher = mcast->type;
+ }
+
+ /* for rsn pairwise cipher suite */
+ ptr_inc += sizeof(wpa_suite_mcast_t);
+ remain_len -= sizeof(wpa_suite_mcast_t);
+
+ if (!(remain_len)) {
+ goto done;
+ }
+
+ ucast = (const wpa_suite_ucast_t *)ptr_inc;
+
+ if ((remain_len) < sizeof(ucast->count)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (!ucast->count.low && !ucast->count.high) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ info->p_count = (uint8)ltoh16_ua(&ucast->count);
+
+ if (dev_type == DEV_STA && info->p_count != 1) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+ if ((remain_len) < (info->p_count * WPA_SUITE_LEN + sizeof(ucast->count))) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (IS_WPA_CIPHER(ucast->list[0].type)) {
+ /* update the pairwise cipher */
+ /* set cipher to invald value */
+ if (dev_type == DEV_STA) {
+ info->sta_cipher = ucast->list[0].type;
+ } else {
+ for (i = 0; i < info->p_count; i++) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ info->p_ciphers |= BIT(ucast->list[i].type);
+ } else {
+ info->parse_status = BCME_BAD_IE_DATA;
+ goto done;
+ }
+ }
+ }
+ } else {
+ info->parse_status = BCME_BAD_IE_DATA;
+ goto done;
+ }
+
+ /* for rsn AKM authentication */
+ ptr_inc += info->p_count * WPA_SUITE_LEN + sizeof(ucast->count);
+ remain_len -= (info->p_count * WPA_SUITE_LEN + sizeof(ucast->count));
+
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)ptr_inc;
+
+ if (remain_len < sizeof(mgmt->count)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ info->akm_count = (uint8)ltoh16_ua(&mgmt->count);
+
+ if (!info->akm_count) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ if (dev_type == DEV_STA && info->akm_count != 1) {
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+
+ if ((remain_len) < (info->akm_count * WPA_SUITE_LEN + sizeof(mgmt->count))) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (dev_type == DEV_STA) {
+ info->sta_akm = mgmt->list[0].type;
+ }
+ for (i = 0; i < info->akm_count; i++) {
+ if (bcmwpa_is_valid_akm(mgmt->list[i].type) == BCME_OK) {
+ ASSERT((mgmt->list[i].type) <
+ (sizeof(info->akms) * NBBY));
+ info->akms |= BIT(mgmt->list[i].type);
+ }
+ }
+
+ /* RSN AKM/cipher suite related EAPOL key length update */
+ info->pmk_len = bcmwpa_eapol_key_length(EAPOL_KEY_PMK, info->sta_akm, 0);
+ info->kck_mic_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK_MIC, info->sta_akm, 0);
+ info->kck_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK, info->sta_akm, 0);
+ info->kek_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK, info->sta_akm, 0);
+ info->tk_len = bcmwpa_eapol_key_length(EAPOL_KEY_TK, 0, info->sta_cipher);
+ info->ptk_len = info->kck_mic_len + info->kek_len + info->tk_len;
+#if defined(WL_FILS) && defined(WLFBT)
+ info->kck2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KCK2, info->sta_akm, 0);
+ info->kek2_len = bcmwpa_eapol_key_length(EAPOL_KEY_KEK2, info->sta_akm, 0);
+#endif /* WL_FILS && WLFBT */
+
+ /* for rsn capabilities */
+ ptr_inc += info->akm_count * WPA_SUITE_LEN + sizeof(mgmt->count);
+ remain_len -= info->akm_count * WPA_SUITE_LEN + sizeof(mgmt->count);
+
+ /* as a STA, at this point, we can compute the key descriptor version */
+ if (dev_type == DEV_STA) {
+ info->key_desc = wlc_calc_rsn_desc_version(info);
+ }
+
+ if (!(remain_len)) {
+ goto done;
+ }
+ if (remain_len < RSN_CAP_LEN) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ if (ie->id == DOT11_MNG_RSN_ID) {
+ info->caps = ltoh16_ua(ptr_inc);
+ }
+
+ /* for WFA If MFP required, check that we are using a SHA256 AKM
+ * or higher and nothing else.
+ * In case MFP Required and MFP Capable do not enforce check of AKM.
+ */
+ if ((info->caps & RSN_CAP_MFPR) && !(info->akms & (1u << RSN_AKM_PSK))) {
+ if ((info->akms & (AKM_SHA256_MASK | AKM_SHA384_MASK)) == 0 ||
+ (info->akms & ~(AKM_SHA256_MASK | AKM_SHA384_MASK))) {
+ info->parse_status = BCME_EPERM;
+ goto done;
+ }
+ }
+
+ /* check if AKMs require MFP capable to be set */
+ if ((info->akms & RSN_MFPC_AKM_MASK) && !(info->caps & RSN_CAP_MFPC)) {
+ info->parse_status = BCME_EPERM;
+ goto done;
+ }
+
+ /* for rsn PMKID */
+ ptr_inc += RSN_CAP_LEN;
+ remain_len -= RSN_CAP_LEN;
+
+ if (!(remain_len)) {
+ goto done;
+ }
+
+ pmkid_list = (const wpa_pmkid_list_t*)ptr_inc;
+
+ if ((remain_len) < sizeof(pmkid_list->count)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ info->pmkid_count = (uint8)ltoh16_ua(&pmkid_list->count);
+ ptr_inc += sizeof(pmkid_list->count);
+ remain_len -= sizeof(pmkid_list->count);
+
+ if (info->pmkid_count) {
+ if (remain_len < (uint32)(info->pmkid_count * WPA2_PMKID_LEN)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ info->pmkids_offset = ie->len + TLV_HDR_LEN - remain_len;
+ /* for rsn group management cipher suite */
+ ptr_inc += info->pmkid_count * WPA2_PMKID_LEN;
+ remain_len -= info->pmkid_count * WPA2_PMKID_LEN;
+ }
+
+ if (!(remain_len)) {
+ goto done;
+ }
+ /*
+ * from WPA2_Security_Improvements_Test_Plan_v1.0
+ * 4.2.4 APUT RSNE bounds verification using WPA2-PSK
+ * May content RSNE extensibile element ay this point
+ */
+ if (remain_len < sizeof(wpa_suite_mcast_t)) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+
+ mcast = (const wpa_suite_mcast_t *)ptr_inc;
+ if (IS_VALID_BIP_CIPHER((rsn_cipher_t)mcast->type)) {
+ info->g_mgmt_cipher = (rsn_cipher_t)mcast->type;
+ }
+
+done:
+ return info->parse_status;
+}
+
+/* Determine if the IE is of WPA or RSN type. Decode
+ * up to version field. Modify the remaining len parameter to
+ * indicate where the next field is.
+ * Store and return error status.
+ */
+
+int
+bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, uint32 *remaining)
+{
+ const uint8 * ptr_inc = (const uint8 *)ie->data;
+ uint32 remain_len = ie->len;
+ uint8 version, version_len;
+
+ if (ie->id == DOT11_MNG_WPA_ID) {
+ /* min len check */
+ if (remain_len < WPA_IE_FIXED_LEN) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ /* WPA IE */
+ if (memcmp(WPA_OUI, ie->data, WPA_OUI_LEN)) {
+ /* bad OUI */
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+ ptr_inc += WPA_OUI_LEN;
+ if (*ptr_inc != WPA_OUI_TYPE) {
+ /* wrong type */
+ info->parse_status = BCME_BADARG;
+ goto done;
+ }
+ ptr_inc ++;
+ remain_len -= WPA_OUI_LEN + 1u;
+ info->auth_ie_type |= WPA_AUTH_IE;
+ version_len = WPA_VERSION_LEN;
+ }
+ else if (ie->id == DOT11_MNG_RSN_ID) {
+ if (remain_len < WPA2_VERSION_LEN) {
+ info->parse_status = BCME_BADLEN;
+ goto done;
+ }
+ /* RSN IE */
+ info->auth_ie_type |= RSN_AUTH_IE;
+ version_len = WPA2_VERSION_LEN;
+ } else {
+ /* TODO : add support for CCX, WAPI ? */
+ info->parse_status = BCME_UNSUPPORTED;
+ goto done;
+ }
+
+ /* mask down to uint8 for Windows build */
+ version = 0xff & ltoh16_ua(ptr_inc);
+ if (version > MAX_RSNE_SUPPORTED_VERSION) {
+ info->parse_status = BCME_UNSUPPORTED;
+ goto done;
+ }
+
+ info->version = (uint8)version;
+ *remaining = remain_len - version_len;
+done:
+ return info->parse_status;
+}
+
+#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
+
+/* return the key descriptor version based on the AKM suite
+ * applicable only for STA with RSN
+ */
+static uint16
+wlc_calc_rsn_desc_version(const rsn_ie_info_t *rsn_info)
+{
+ uint16 key_desc_ver = WPA_KEY_DESC_V0;
+ uint8 akm;
+
+ ASSERT(rsn_info != NULL);
+ ASSERT(rsn_info->dev_type == DEV_STA);
+ akm = rsn_info->sta_akm;
+
+ /* Refer Draft 802.11REVmd_D1.0.pdf Section 12.7.2 */
+ if ((akm == RSN_AKM_UNSPECIFIED) ||
+ (akm == RSN_AKM_PSK)) {
+ if ((rsn_info->sta_cipher == WPA_CIPHER_TKIP) ||
+ (rsn_info->sta_cipher == WPA_CIPHER_NONE)) {
+ key_desc_ver = WPA_KEY_DESC_V1;
+ } else if ((rsn_info->sta_cipher != WPA_CIPHER_TKIP) ||
+ (rsn_info->g_cipher != WPA_CIPHER_TKIP)) {
+ key_desc_ver = WPA_KEY_DESC_V2;
+ }
+ } else if ((akm == RSN_AKM_FBT_1X) ||
+ (akm == RSN_AKM_FBT_PSK) ||
+ (akm == RSN_AKM_SHA256_1X) ||
+ (akm == RSN_AKM_SHA256_PSK)) {
+ key_desc_ver = WPA_KEY_DESC_V3;
+ }
+ return key_desc_ver;
+}
+
+/* get EAPOL key length based on RSN IE AKM/Cipher(unicast) suite
+ * key: EAPOL key type
+ * akm: RSN AKM suite selector
+ * cipher: RSN unicast cipher suite selector
+ * return: key length found in matching key_length_entry table
+ */
+uint8
+bcmwpa_eapol_key_length(eapol_key_type_t key, rsn_akm_t akm, rsn_cipher_t cipher)
+{
+ uint i;
+ uint8 key_length = 0;
+ uint8 suite;
+ const key_length_entry_t *key_entry = NULL;
+
+ if (key == EAPOL_KEY_TK) {
+ suite = cipher;
+ } else {
+ suite = akm;
+ }
+ for (i = 0; i < ARRAYSIZE(eapol_key_lookup_tbl); i++) {
+ if (eapol_key_lookup_tbl[i].key == key) {
+ key_entry = eapol_key_lookup_tbl[i].key_entry;
+ break;
+ }
+ }
+
+ if (key_entry) {
+ i = 0;
+ do {
+ if (key_entry[i].suite == suite || key_entry[i].suite == 0) {
+ key_length = key_entry[i].len;
+ break;
+ }
+ i++;
+ } while (i > 0);
+ }
+
+ return key_length;
+}
+
+/* check if RSM AKM suite is valid */
+static int bcmwpa_is_valid_akm(const rsn_akm_t akm)
+{
+ uint i = 0;
+ for (i = 0; i < ARRAYSIZE(rsn_akm_lookup_tbl); i++) {
+ if (akm == rsn_akm_lookup_tbl[i].rsn_akm) {
+ return BCME_OK;
+ }
+ }
+ return BCME_ERROR;
+}
+
+/* checking cipher suite selector restriction based on AKM */
+int
+bcmwpa_rsn_akm_cipher_match(rsn_ie_info_t *rsn_info)
+{
+ uint i;
+ const rsn_akm_cipher_match_entry_t *p_entry = NULL;
+
+ for (i = 0; i < ARRAYSIZE(rsn_akm_cipher_match_table); i++) {
+ /* akm match */
+ if (rsn_info->sta_akm == rsn_akm_cipher_match_table[i].akm_type) {
+ p_entry = &rsn_akm_cipher_match_table[i];
+ break;
+ }
+ }
+
+ if (p_entry) {
+ /* unicast cipher match */
+ if (!(rsn_info->p_ciphers & p_entry->u_cast)) {
+ return BCME_UNSUPPORTED;
+ }
+ /* multicast cipher match */
+ if (!(BCM_BIT(rsn_info->g_cipher) & p_entry->m_cast)) {
+ return BCME_UNSUPPORTED;
+ }
+ /* group management cipher match */
+ if (!(BCM_BIT(rsn_info->g_mgmt_cipher) & p_entry->g_mgmt)) {
+ return BCME_UNSUPPORTED;
+ }
+ }
+ return BCME_OK;
+}
+
+#if defined(BCMSUP_PSK) || defined(BCMSUPPL)
+uint8 bcmwpa_find_group_mgmt_algo(rsn_cipher_t g_mgmt_cipher)
+{
+ uint8 i;
+ uint8 algo = CRYPTO_ALGO_BIP;
+
+ for (i = 0; i < ARRAYSIZE(group_mgmt_cipher_algo); i++) {
+ if ((group_mgmt_cipher_algo[i].g_mgmt_cipher == g_mgmt_cipher)) {
+ algo = group_mgmt_cipher_algo[i].bip_algo;
+ break;
+ }
+ }
+
+ return algo;
+}
+#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) */
+
+#if defined(WL_BAND6G)
+bool
+bcmwpa_is_invalid_6g_akm(const rsn_akm_mask_t akms_bmp)
+{
+ if (akms_bmp & rsn_akm_6g_inval_mask) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+bool
+bcmwpa_is_invalid_6g_cipher(const rsn_ciphers_t ciphers_bmp)
+{
+ if (ciphers_bmp & cipher_6g_inval_mask) {
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif /* WL_BAND6G */
+
+/*
+ * bcmwpa_get_algo_key_len returns the key_length for the algorithm.
+ * API : bcm_get_algorithm key length
+ * input: algo: Get the crypto algorithm.
+ * km: Keymgmt information.
+ * output: returns the key length and error status.
+ * BCME_OK is valid else BCME_UNSUPPORTED if not supported
+ */
+int
+bcmwpa_get_algo_key_len(uint8 algo, uint16 *key_len)
+{
+ int err = BCME_OK;
+
+ if (key_len == NULL) {
+ return BCME_BADARG;
+ }
+
+ switch (algo) {
+ case CRYPTO_ALGO_WEP1:
+ *key_len = WEP1_KEY_SIZE;
+ break;
+
+ case CRYPTO_ALGO_TKIP:
+ *key_len = TKIP_KEY_SIZE;
+ break;
+
+ case CRYPTO_ALGO_WEP128:
+ *key_len = WEP128_KEY_SIZE;
+ break;
+
+ case CRYPTO_ALGO_AES_CCM: /* fall through */
+ case CRYPTO_ALGO_AES_GCM: /* fall through */
+ case CRYPTO_ALGO_AES_OCB_MSDU : /* fall through */
+ case CRYPTO_ALGO_AES_OCB_MPDU:
+ *key_len = AES_KEY_SIZE;
+ break;
+
+#ifdef BCMWAPI_WPI
+ /* TODO: Need to double check */
+ case CRYPTO_ALGO_SMS4:
+ *key_len = SMS4_KEY_LEN + SMS4_WPI_CBC_MAC_LEN;
+ break;
+#endif /* BCMWAPI_WPI */
+
+ case CRYPTO_ALGO_BIP: /* fall through */
+ case CRYPTO_ALGO_BIP_GMAC:
+ *key_len = BIP_KEY_SIZE;
+ break;
+
+ case CRYPTO_ALGO_AES_CCM256: /* fall through */
+ case CRYPTO_ALGO_AES_GCM256: /* fall through */
+ case CRYPTO_ALGO_BIP_CMAC256: /* fall through */
+ case CRYPTO_ALGO_BIP_GMAC256:
+ *key_len = AES256_KEY_SIZE;
+ break;
+
+ case CRYPTO_ALGO_OFF:
+ *key_len = 0;
+ break;
+
+#if !defined(BCMCCX) && !defined(BCMEXTCCX)
+ case CRYPTO_ALGO_NALG: /* fall through */
+#else
+ case CRYPTO_ALGO_CKIP: /* fall through */
+ case CRYPTO_ALGO_CKIP_MMH: /* fall through */
+ case CRYPTO_ALGO_WEP_MMH: /* fall through */
+ case CRYPTO_ALGO_PMK: /* fall through default */
+#endif /* !defined(BCMCCX) && !defined(BCMEXTCCX) */
+ default:
+ *key_len = 0;
+ err = BCME_UNSUPPORTED;
+ break;
+ }
+ return err;
+}
diff --git a/bcmdhd.101.10.361.x/bcmxtlv.c b/bcmdhd.101.10.361.x/bcmxtlv.c
new file mode 100755
index 0000000..ddc6351
--- /dev/null
+++ b/bcmdhd.101.10.361.x/bcmxtlv.c
@@ -0,0 +1,647 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#else /* !BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* !BCMDRIVER */
+
+#include <bcmtlv.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+
+int
+BCMPOSTTRAPFN(bcm_xtlv_hdr_size)(bcm_xtlv_opts_t opts)
+{
+ int len = (int)OFFSETOF(bcm_xtlv_t, data); /* nominal */
+ if (opts & BCM_XTLV_OPTION_LENU8) --len;
+ if (opts & BCM_XTLV_OPTION_IDU8) --len;
+
+ return len;
+}
+
+bool
+bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts)
+{
+ return elt != NULL &&
+ buf_len >= bcm_xtlv_hdr_size(opts) &&
+ buf_len >= bcm_xtlv_size(elt, opts);
+}
+
+int
+BCMPOSTTRAPFN(bcm_xtlv_size_for_data)(int dlen, bcm_xtlv_opts_t opts)
+{
+ int hsz;
+
+ hsz = bcm_xtlv_hdr_size(opts);
+ return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + hsz, 4)
+ : (dlen + hsz));
+}
+
+int
+bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
+{
+ int size; /* size including header, data, and any pad */
+ int len; /* length wthout padding */
+
+ len = BCM_XTLV_LEN_EX(elt, opts);
+ size = bcm_xtlv_size_for_data(len, opts);
+ return size;
+}
+
+int
+bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
+{
+ const uint8 *lenp;
+ int len;
+
+ lenp = (const uint8 *)elt + OFFSETOF(bcm_xtlv_t, len); /* nominal */
+ if (opts & BCM_XTLV_OPTION_IDU8) {
+ --lenp;
+ }
+
+ if (opts & BCM_XTLV_OPTION_LENU8) {
+ len = *lenp;
+ } else if (opts & BCM_XTLV_OPTION_LENBE) {
+ len = (uint32)hton16(elt->len);
+ } else {
+ len = ltoh16_ua(lenp);
+ }
+
+ return len;
+}
+
+int
+bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
+{
+ int id = 0;
+ if (opts & BCM_XTLV_OPTION_IDU8) {
+ id = *(const uint8 *)elt;
+ } else if (opts & BCM_XTLV_OPTION_IDBE) {
+ id = (uint32)hton16(elt->id);
+ } else {
+ id = ltoh16_ua((const uint8 *)elt);
+ }
+
+ return id;
+}
+
+bcm_xtlv_t *
+bcm_next_xtlv(const bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts)
+{
+ uint sz;
+
+ COV_TAINTED_DATA_SINK(buflen);
+ COV_NEG_SINK(buflen);
+
+ /* validate current elt */
+ if (!bcm_valid_xtlv(elt, *buflen, opts))
+ return NULL;
+
+ /* advance to next elt */
+ sz = BCM_XTLV_SIZE_EX(elt, opts);
+ elt = (const bcm_xtlv_t*)((const uint8 *)elt + sz);
+
+#if defined(__COVERITY__)
+ /* The 'sz' value is tainted in Coverity because it is read from the tainted data pointed
+ * to by 'elt'. However, bcm_valid_xtlv() verifies that the elt pointer is a valid element,
+ * so its size, sz = BCM_XTLV_SIZE_EX(elt, opts), is in the bounds of the buffer.
+ * Clearing the tainted attribute of 'sz' for Coverity.
+ */
+ __coverity_tainted_data_sanitize__(sz);
+ if (sz > *buflen) {
+ return NULL;
+ }
+#endif /* __COVERITY__ */
+
+ *buflen -= sz;
+
+ /* validate next elt */
+ if (!bcm_valid_xtlv(elt, *buflen, opts))
+ return NULL;
+
+ COV_TAINTED_DATA_ARG(elt);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
+ return (bcm_xtlv_t *)(elt);
+ GCC_DIAGNOSTIC_POP()
+}
+
+int
+bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts)
+{
+ if (!tlv_buf || !buf || !len)
+ return BCME_BADARG;
+
+ tlv_buf->opts = opts;
+ tlv_buf->size = len;
+ tlv_buf->head = buf;
+ tlv_buf->buf = buf;
+ return BCME_OK;
+}
+
+uint16
+bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf)
+{
+ uint16 len;
+
+ if (tbuf)
+ len = (uint16)(tbuf->buf - tbuf->head);
+ else
+ len = 0;
+
+ return len;
+}
+
+uint16
+bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf)
+{
+ uint16 rlen;
+ if (tbuf)
+ rlen = tbuf->size - bcm_xtlv_buf_len(tbuf);
+ else
+ rlen = 0;
+
+ return rlen;
+}
+
+uint8 *
+bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf)
+{
+ return tbuf ? tbuf->buf : NULL;
+}
+
+uint8 *
+bcm_xtlv_head(bcm_xtlvbuf_t *tbuf)
+{
+ return tbuf ? tbuf->head : NULL;
+}
+
+void
+BCMPOSTTRAPFN(bcm_xtlv_pack_xtlv)(bcm_xtlv_t *xtlv, uint16 type, uint16 len, const uint8 *data,
+ bcm_xtlv_opts_t opts)
+{
+ uint8 *data_buf;
+ bcm_xtlv_opts_t mask = BCM_XTLV_OPTION_IDU8 | BCM_XTLV_OPTION_LENU8;
+
+ if (!(opts & mask)) { /* default */
+ uint8 *idp = (uint8 *)xtlv;
+ uint8 *lenp = idp + sizeof(xtlv->id);
+ htol16_ua_store(type, idp);
+ htol16_ua_store(len, lenp);
+ data_buf = lenp + sizeof(uint16);
+ } else if ((opts & mask) == mask) { /* u8 id and u8 len */
+ uint8 *idp = (uint8 *)xtlv;
+ uint8 *lenp = idp + 1;
+ *idp = (uint8)type;
+ *lenp = (uint8)len;
+ data_buf = lenp + sizeof(uint8);
+ } else if (opts & BCM_XTLV_OPTION_IDU8) { /* u8 id, u16 len */
+ uint8 *idp = (uint8 *)xtlv;
+ uint8 *lenp = idp + 1;
+ *idp = (uint8)type;
+ htol16_ua_store(len, lenp);
+ data_buf = lenp + sizeof(uint16);
+ } else if (opts & BCM_XTLV_OPTION_LENU8) { /* u16 id, u8 len */
+ uint8 *idp = (uint8 *)xtlv;
+ uint8 *lenp = idp + sizeof(uint16);
+ htol16_ua_store(type, idp);
+ *lenp = (uint8)len;
+ data_buf = lenp + sizeof(uint8);
+ } else {
+ ASSERT(!"Unexpected xtlv option");
+ return;
+ }
+
+ if (opts & BCM_XTLV_OPTION_LENU8) {
+ ASSERT(len <= 0x00ff);
+ len &= 0xff;
+ }
+
+ if (data != NULL) {
+ memcpy(data_buf, data, len);
+ }
+}
+
+/* xtlv header is always packed in LE order */
+void
+bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len,
+ const uint8 **data, bcm_xtlv_opts_t opts)
+{
+ if (type)
+ *type = (uint16)bcm_xtlv_id(xtlv, opts);
+ if (len)
+ *len = (uint16)bcm_xtlv_len(xtlv, opts);
+ if (data)
+ *data = (const uint8 *)xtlv + BCM_XTLV_HDR_SIZE_EX(opts);
+}
+
+int
+bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n)
+{
+ bcm_xtlv_t *xtlv;
+ int size;
+
+ if (tbuf == NULL)
+ return BCME_BADARG;
+
+ size = bcm_xtlv_size_for_data(n, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ bcm_xtlv_pack_xtlv(xtlv, type, (uint16)n, data, tbuf->opts);
+ tbuf->buf += size; /* note: data may be NULL, reserves space */
+ return BCME_OK;
+}
+
+static int
+bcm_xtlv_put_int(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n, int int_sz)
+{
+ bcm_xtlv_t *xtlv;
+ int xtlv_len;
+ uint8 *xtlv_data;
+ int err = BCME_OK;
+
+ if (tbuf == NULL) {
+ err = BCME_BADARG;
+ goto done;
+ }
+
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+
+ /* put type and length in xtlv and reserve data space */
+ xtlv_len = n * int_sz;
+ err = bcm_xtlv_put_data(tbuf, type, NULL, xtlv_len);
+ if (err != BCME_OK)
+ goto done;
+
+ xtlv_data = (uint8 *)xtlv + bcm_xtlv_hdr_size(tbuf->opts);
+
+ /* write data w/ little-endianness into buffer - single loop, aligned access */
+ for (; n != 0; --n, xtlv_data += int_sz, data += int_sz) {
+ switch (int_sz) {
+ case sizeof(uint8):
+ break;
+ case sizeof(uint16):
+ {
+ uint16 v = load16_ua(data);
+ htol16_ua_store(v, xtlv_data);
+ break;
+ }
+ case sizeof(uint32):
+ {
+ uint32 v = load32_ua(data);
+ htol32_ua_store(v, xtlv_data);
+ break;
+ }
+ case sizeof(uint64):
+ {
+ uint64 v = load64_ua(data);
+ htol64_ua_store(v, xtlv_data);
+ break;
+ }
+ default:
+ err = BCME_UNSUPPORTED;
+ goto done;
+ }
+ }
+
+done:
+ return err;
+}
+
+int
+bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n)
+{
+ return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint16));
+}
+
+int
+bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n)
+{
+ return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint32));
+}
+
+int
+bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n)
+{
+ return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint64));
+}
+
+/*
+ * upacks xtlv record from buf checks the type
+ * copies data to callers buffer
+ * advances tlv pointer to next record
+ * caller's resposible for dst space check
+ */
+int
+bcm_unpack_xtlv_entry(const uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len,
+ uint8 *dst_data, bcm_xtlv_opts_t opts)
+{
+ const bcm_xtlv_t *ptlv = (const bcm_xtlv_t *)*tlv_buf;
+ uint16 len;
+ uint16 type;
+ const uint8 *data;
+
+ ASSERT(ptlv);
+
+ bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts);
+ if (len) {
+ if ((type != xpct_type) || (len > xpct_len))
+ return BCME_BADARG;
+ if (dst_data && data)
+ memcpy(dst_data, data, len); /* copy data to dst */
+ }
+
+ *tlv_buf += BCM_XTLV_SIZE_EX(ptlv, opts);
+ return BCME_OK;
+}
+
+/*
+ * packs user data into tlv record and advances tlv pointer to next xtlv slot
+ * buflen is used for tlv_buf space check
+ */
+int
+bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len,
+ const uint8 *src_data, bcm_xtlv_opts_t opts)
+{
+ bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
+ int size;
+
+ ASSERT(ptlv);
+
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ /* copy data from tlv buffer to dst provided by user */
+ if (size > *buflen) {
+ return BCME_BADLEN;
+ }
+
+ bcm_xtlv_pack_xtlv(ptlv, type, len, src_data, opts);
+
+ /* advance callers pointer to tlv buff */
+ *tlv_buf = (uint8*)(*tlv_buf) + size;
+ /* decrement the len */
+ *buflen -= (uint16)size;
+ return BCME_OK;
+}
+
+/*
+ * unpack all xtlv records from the issue a callback
+ * to set function one call per found tlv record
+ */
+int
+bcm_unpack_xtlv_buf(void *ctx, const uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+ bcm_xtlv_unpack_cbfn_t *cbfn)
+{
+ uint16 len;
+ uint16 type;
+ int res = BCME_OK;
+ int size;
+ const bcm_xtlv_t *ptlv;
+ int sbuflen = buflen;
+ const uint8 *data;
+ int hdr_size;
+
+ ASSERT(!buflen || tlv_buf);
+ ASSERT(!buflen || cbfn);
+
+ hdr_size = BCM_XTLV_HDR_SIZE_EX(opts);
+ while (sbuflen >= hdr_size) {
+ ptlv = (const bcm_xtlv_t *)tlv_buf;
+
+ bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts);
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ sbuflen -= size;
+
+ /* check for buffer overrun */
+ if (sbuflen < 0) {
+ break;
+ }
+
+ if ((res = cbfn(ctx, data, type, len)) != BCME_OK) {
+ break;
+ }
+ tlv_buf += size;
+ }
+ return res;
+}
+
+int
+bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+ bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next,
+ int *outlen)
+{
+ int res = BCME_OK;
+ uint16 tlv_id;
+ uint16 tlv_len;
+ uint8 *startp;
+ uint8 *endp;
+ uint8 *buf;
+ bool more;
+ int size;
+ int hdr_size;
+
+ ASSERT(get_next && pack_next);
+
+ buf = tlv_buf;
+ startp = buf;
+ endp = (uint8 *)buf + buflen;
+ more = TRUE;
+ hdr_size = BCM_XTLV_HDR_SIZE_EX(opts);
+
+ while (more && (buf < endp)) {
+ more = get_next(ctx, &tlv_id, &tlv_len);
+ size = bcm_xtlv_size_for_data(tlv_len, opts);
+ if ((buf + size) > endp) {
+ res = BCME_BUFTOOSHORT;
+ goto done;
+ }
+
+ bcm_xtlv_pack_xtlv((bcm_xtlv_t *)buf, tlv_id, tlv_len, NULL, opts);
+ pack_next(ctx, tlv_id, tlv_len, buf + hdr_size);
+ buf += size;
+ }
+
+ if (more)
+ res = BCME_BUFTOOSHORT;
+
+done:
+ if (outlen) {
+ *outlen = (int)(buf - startp);
+ }
+ return res;
+}
+
+/*
+ * pack xtlv buffer from memory according to xtlv_desc_t
+ */
+int
+bcm_pack_xtlv_buf_from_mem(uint8 **tlv_buf, uint16 *buflen, const xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts)
+{
+ int res = BCME_OK;
+ uint8 *ptlv = *tlv_buf;
+
+ while (items->type != 0) {
+ if (items->len && items->ptr) {
+ res = bcm_pack_xtlv_entry(&ptlv, buflen, items->type,
+ items->len, items->ptr, opts);
+ if (res != BCME_OK)
+ break;
+ }
+ items++;
+ }
+
+ *tlv_buf = ptlv; /* update the external pointer */
+ return res;
+}
+
+/*
+ * unpack xtlv buffer to memory according to xtlv_desc_t
+ *
+ */
+int
+bcm_unpack_xtlv_buf_to_mem(const uint8 *tlv_buf, int *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts)
+{
+ int res = BCME_OK;
+ const bcm_xtlv_t *elt;
+
+ elt = bcm_valid_xtlv((const bcm_xtlv_t *)tlv_buf, *buflen, opts) ?
+ (const bcm_xtlv_t *)tlv_buf : NULL;
+ if (!elt || !items) {
+ res = BCME_BADARG;
+ return res;
+ }
+
+ for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) {
+ /* find matches in desc_t items */
+ xtlv_desc_t *dst_desc = items;
+ uint16 len, type;
+ const uint8 *data;
+
+ bcm_xtlv_unpack_xtlv(elt, &type, &len, &data, opts);
+ while (dst_desc->type != 0) {
+ if (type == dst_desc->type) {
+ if (len != dst_desc->len) {
+ res = BCME_BADLEN;
+ } else {
+ memcpy(dst_desc->ptr, data, len);
+ }
+ break;
+ }
+ dst_desc++;
+ }
+ }
+
+ if (res == BCME_OK && *buflen != 0) /* this does not look right */
+ res = BCME_BUFTOOSHORT;
+
+ return res;
+}
+
+/*
+ * return data pointer of a given ID from xtlv buffer.
+ * If the specified xTLV ID is found, on return *datalen will contain
+ * the the data length of the xTLV ID.
+ */
+const uint8*
+bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen, uint16 id,
+ uint16 *datalen, bcm_xtlv_opts_t opts)
+{
+ const uint8 *retptr = NULL;
+ uint16 type, len;
+ int size;
+ const bcm_xtlv_t *ptlv;
+ int sbuflen = buflen;
+ const uint8 *data;
+ int hdr_size;
+
+ COV_TAINTED_DATA_SINK(buflen);
+ COV_NEG_SINK(buflen);
+
+ hdr_size = BCM_XTLV_HDR_SIZE_EX(opts);
+
+ /* Init the datalength */
+ if (datalen) {
+ *datalen = 0;
+ }
+ while (sbuflen >= hdr_size) {
+ ptlv = (const bcm_xtlv_t *)tlv_buf;
+ bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts);
+
+ size = bcm_xtlv_size_for_data(len, opts);
+ sbuflen -= size;
+ if (sbuflen < 0) /* buffer overrun? */
+ break;
+
+ if (id == type) {
+ retptr = data;
+ if (datalen)
+ *datalen = len;
+ break;
+ }
+
+ tlv_buf += size;
+ }
+
+ COV_TAINTED_DATA_ARG(retptr);
+
+ return retptr;
+}
+
+bcm_xtlv_t*
+bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst,
+ int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts)
+{
+ bcm_xtlv_t *dst_next = NULL;
+ src = (src && bcm_valid_xtlv(src, src_buf_len, opts)) ? src : NULL;
+ if (src && dst) {
+ uint16 type;
+ uint16 len;
+ const uint8 *data;
+ int size;
+ bcm_xtlv_unpack_xtlv(src, &type, &len, &data, opts);
+ size = bcm_xtlv_size_for_data(len, opts);
+ if (size <= dst_buf_len) {
+ bcm_xtlv_pack_xtlv(dst, type, len, data, opts);
+ dst_next = (bcm_xtlv_t *)((uint8 *)dst + size);
+ }
+ }
+
+ return dst_next;
+}
diff --git a/bcmdhd.101.10.361.x/dbus.c b/bcmdhd.101.10.361.x/dbus.c
new file mode 100755
index 0000000..f86a864
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dbus.c
@@ -0,0 +1,2928 @@
+/** @file dbus.c
+ *
+ * Hides details of USB / SDIO / SPI interfaces and OS details. It is intended to shield details and
+ * provide the caller with one common bus interface for all dongle devices. In practice, it is only
+ * used for USB interfaces. DBUS is not a protocol, but an abstraction layer.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus.c 553311 2015-04-29 10:23:08Z $
+ */
+
+
+#include "osl.h"
+#include "dbus.h"
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */
+#include <dhd_wlfc.h>
+#endif
+#include <dhd_config.h>
+
+#if defined(BCM_REQUEST_FW)
+#include <bcmsrom_fmt.h>
+#include <trxhdr.h>
+#include <usbrdl.h>
+#include <bcmendian.h>
+#include <sbpcmcia.h>
+#include <bcmnvram.h>
+#include <bcmdevs.h>
+#endif
+
+
+
+#if defined(BCM_REQUEST_FW)
+#ifndef VARS_MAX
+#define VARS_MAX 8192
+#endif
+#endif
+
+#ifdef DBUS_USB_LOOPBACK
+extern bool is_loopback_pkt(void *buf);
+extern int matches_loopback_pkt(void *buf);
+#endif
+
+/** General info for all BUS types */
+typedef struct dbus_irbq {
+ dbus_irb_t *head;
+ dbus_irb_t *tail;
+ int cnt;
+} dbus_irbq_t;
+
+/**
+ * This private structure dhd_bus_t is also declared in dbus_usb_linux.c.
+ * All the fields must be consistent in both declarations.
+ */
+typedef struct dhd_bus {
+ dbus_pub_t pub; /* MUST BE FIRST */
+ dhd_pub_t *dhd;
+
+ void *cbarg;
+ dbus_callbacks_t *cbs; /* callbacks to higher level, e.g. dhd_linux.c */
+ void *bus_info;
+ dbus_intf_t *drvintf; /* callbacks to lower level, e.g. dbus_usb.c or dbus_usb_linux.c */
+ uint8 *fw;
+ int fwlen;
+ uint32 errmask;
+ int rx_low_watermark; /* avoid rx overflow by filling rx with free IRBs */
+ int tx_low_watermark;
+ bool txoff;
+ bool txoverride; /* flow control related */
+ bool rxoff;
+ bool tx_timer_ticking;
+
+
+ dbus_irbq_t *rx_q;
+ dbus_irbq_t *tx_q;
+
+ uint8 *nvram;
+ int nvram_len;
+ uint8 *image; /* buffer for combine fw and nvram */
+ int image_len;
+ uint8 *orig_fw;
+ int origfw_len;
+ int decomp_memsize;
+ dbus_extdl_t extdl;
+ int nvram_nontxt;
+#if defined(BCM_REQUEST_FW)
+ void *firmware;
+ void *nvfile;
+#endif
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+} dhd_bus_t;
+
+struct exec_parms {
+ union {
+ /* Can consolidate same params, if need be, but this shows
+ * group of parameters per function
+ */
+ struct {
+ dbus_irbq_t *q;
+ dbus_irb_t *b;
+ } qenq;
+
+ struct {
+ dbus_irbq_t *q;
+ } qdeq;
+ };
+};
+
+#define EXEC_RXLOCK(info, fn, a) \
+ info->drvintf->exec_rxlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a))
+
+#define EXEC_TXLOCK(info, fn, a) \
+ info->drvintf->exec_txlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a))
+
+/*
+ * Callbacks common for all BUS
+ */
+static void dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb);
+static void dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status);
+static void dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status);
+static void dbus_if_errhandler(void *handle, int err);
+static void dbus_if_ctl_complete(void *handle, int type, int status);
+static void dbus_if_state_change(void *handle, int state);
+static void *dbus_if_pktget(void *handle, uint len, bool send);
+static void dbus_if_pktfree(void *handle, void *p, bool send);
+static struct dbus_irb *dbus_if_getirb(void *cbarg, bool send);
+static void dbus_if_rxerr_indicate(void *handle, bool on);
+
+void * dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype,
+ uint16 bus_no, uint16 slot, uint32 hdrlen);
+void dhd_dbus_disconnect_cb(void *arg);
+void dbus_detach(dhd_bus_t *pub);
+
+/** functions in this file that are called by lower DBUS levels, e.g. dbus_usb.c */
+static dbus_intf_callbacks_t dbus_intf_cbs = {
+ dbus_if_send_irb_timeout,
+ dbus_if_send_irb_complete,
+ dbus_if_recv_irb_complete,
+ dbus_if_errhandler,
+ dbus_if_ctl_complete,
+ dbus_if_state_change,
+ NULL, /* isr */
+ NULL, /* dpc */
+ NULL, /* watchdog */
+ dbus_if_pktget,
+ dbus_if_pktfree,
+ dbus_if_getirb,
+ dbus_if_rxerr_indicate
+};
+
+/*
+ * Need global for probe() and disconnect() since
+ * attach() is not called at probe and detach()
+ * can be called inside disconnect()
+ */
+static dbus_intf_t *g_busintf = NULL;
+static probe_cb_t probe_cb = NULL;
+static disconnect_cb_t disconnect_cb = NULL;
+static void *probe_arg = NULL;
+static void *disc_arg = NULL;
+
+#if defined(BCM_REQUEST_FW)
+int8 *nonfwnvram = NULL; /* stand-alone multi-nvram given with driver load */
+int nonfwnvramlen = 0;
+#endif /* #if defined(BCM_REQUEST_FW) */
+
+static void* q_enq(dbus_irbq_t *q, dbus_irb_t *b);
+static void* q_enq_exec(struct exec_parms *args);
+static dbus_irb_t*q_deq(dbus_irbq_t *q);
+static void* q_deq_exec(struct exec_parms *args);
+static int dbus_tx_timer_init(dhd_bus_t *dhd_bus);
+static int dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout);
+static int dbus_tx_timer_stop(dhd_bus_t *dhd_bus);
+static int dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb);
+static int dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb);
+static int dbus_rxirbs_fill(dhd_bus_t *dhd_bus);
+static int dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info);
+static void dbus_disconnect(void *handle);
+static void *dbus_probe(void *arg, const char *desc, uint32 bustype,
+ uint16 bus_no, uint16 slot, uint32 hdrlen);
+
+#if defined(BCM_REQUEST_FW)
+extern char * dngl_firmware;
+extern unsigned int dngl_fwlen;
+#ifndef EXTERNAL_FW_PATH
+static int dbus_get_nvram(dhd_bus_t *dhd_bus);
+static int dbus_jumbo_nvram(dhd_bus_t *dhd_bus);
+static int dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev);
+static int dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen,
+uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len);
+#endif /* !EXTERNAL_FW_PATH */
+extern int dbus_zlib_decomp(dhd_bus_t *dhd_bus);
+extern void *dbus_zlib_calloc(int num, int size);
+extern void dbus_zlib_free(void *ptr);
+#endif
+
+/* function */
+void
+dbus_flowctrl_tx(void *dbi, bool on)
+{
+ dhd_bus_t *dhd_bus = dbi;
+
+ if (dhd_bus == NULL)
+ return;
+
+ DBUSTRACE(("%s on %d\n", __FUNCTION__, on));
+
+ if (dhd_bus->txoff == on)
+ return;
+
+ dhd_bus->txoff = on;
+
+ if (dhd_bus->cbs && dhd_bus->cbs->txflowcontrol)
+ dhd_bus->cbs->txflowcontrol(dhd_bus->cbarg, on);
+}
+
+/**
+ * if lower level DBUS signaled a rx error, more free rx IRBs should be allocated or flow control
+ * should kick in to make more free rx IRBs available.
+ */
+static void
+dbus_if_rxerr_indicate(void *handle, bool on)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+ DBUSTRACE(("%s, on %d\n", __FUNCTION__, on));
+
+ if (dhd_bus == NULL)
+ return;
+
+ if (dhd_bus->txoverride == on)
+ return;
+
+ dhd_bus->txoverride = on; /* flow control */
+
+ if (!on)
+ dbus_rxirbs_fill(dhd_bus);
+
+}
+
+/** q_enq()/q_deq() are executed with protection via exec_rxlock()/exec_txlock() */
+static void*
+q_enq(dbus_irbq_t *q, dbus_irb_t *b)
+{
+ ASSERT(q->tail != b);
+ ASSERT(b->next == NULL);
+ b->next = NULL;
+ if (q->tail) {
+ q->tail->next = b;
+ q->tail = b;
+ } else
+ q->head = q->tail = b;
+
+ q->cnt++;
+
+ return b;
+}
+
+static void*
+q_enq_exec(struct exec_parms *args)
+{
+ return q_enq(args->qenq.q, args->qenq.b);
+}
+
+static dbus_irb_t*
+q_deq(dbus_irbq_t *q)
+{
+ dbus_irb_t *b;
+
+ b = q->head;
+ if (b) {
+ q->head = q->head->next;
+ b->next = NULL;
+
+ if (q->head == NULL)
+ q->tail = q->head;
+
+ q->cnt--;
+ }
+ return b;
+}
+
+static void*
+q_deq_exec(struct exec_parms *args)
+{
+ return q_deq(args->qdeq.q);
+}
+
+/**
+ * called during attach phase. Status @ Dec 2012: this function does nothing since for all of the
+ * lower DBUS levels dhd_bus->drvintf->tx_timer_init is NULL.
+ */
+static int
+dbus_tx_timer_init(dhd_bus_t *dhd_bus)
+{
+ if (dhd_bus && dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_init)
+ return dhd_bus->drvintf->tx_timer_init(dhd_bus->bus_info);
+ else
+ return DBUS_ERR;
+}
+
+static int
+dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout)
+{
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (dhd_bus->tx_timer_ticking)
+ return DBUS_OK;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_start) {
+ if (dhd_bus->drvintf->tx_timer_start(dhd_bus->bus_info, timeout) == DBUS_OK) {
+ dhd_bus->tx_timer_ticking = TRUE;
+ return DBUS_OK;
+ }
+ }
+
+ return DBUS_ERR;
+}
+
+static int
+dbus_tx_timer_stop(dhd_bus_t *dhd_bus)
+{
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (!dhd_bus->tx_timer_ticking)
+ return DBUS_OK;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_stop) {
+ if (dhd_bus->drvintf->tx_timer_stop(dhd_bus->bus_info) == DBUS_OK) {
+ dhd_bus->tx_timer_ticking = FALSE;
+ return DBUS_OK;
+ }
+ }
+
+ return DBUS_ERR;
+}
+
+/** called during attach phase. */
+static int
+dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb)
+{
+ int i;
+ dbus_irb_t *irb;
+
+ ASSERT(q);
+ ASSERT(dhd_bus);
+
+ for (i = 0; i < nq; i++) {
+ /* MALLOC dbus_irb_tx or dbus_irb_rx, but cast to simple dbus_irb_t linkedlist */
+ irb = (dbus_irb_t *) MALLOC(dhd_bus->pub.osh, size_irb);
+ if (irb == NULL) {
+ ASSERT(irb);
+ return DBUS_ERR;
+ }
+ bzero(irb, size_irb);
+
+ /* q_enq() does not need to go through EXEC_xxLOCK() during init() */
+ q_enq(q, irb);
+ }
+
+ return DBUS_OK;
+}
+
+/** called during detach phase or when attach failed */
+static int
+dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb)
+{
+ dbus_irb_t *irb;
+
+ ASSERT(q);
+ ASSERT(dhd_bus);
+
+ /* q_deq() does not need to go through EXEC_xxLOCK()
+ * during deinit(); all callbacks are stopped by this time
+ */
+ while ((irb = q_deq(q)) != NULL) {
+ MFREE(dhd_bus->pub.osh, irb, size_irb);
+ }
+
+ if (q->cnt)
+ DBUSERR(("deinit: q->cnt=%d > 0\n", q->cnt));
+ return DBUS_OK;
+}
+
+/** multiple code paths require the rx queue to be filled with more free IRBs */
+static int
+dbus_rxirbs_fill(dhd_bus_t *dhd_bus)
+{
+ int err = DBUS_OK;
+
+
+ dbus_irb_rx_t *rxirb;
+ struct exec_parms args;
+
+ ASSERT(dhd_bus);
+ if (dhd_bus->pub.busstate != DBUS_STATE_UP) {
+ DBUSERR(("dbus_rxirbs_fill: DBUS not up \n"));
+ return DBUS_ERR;
+ } else if (!dhd_bus->drvintf || (dhd_bus->drvintf->recv_irb == NULL)) {
+ /* Lower edge bus interface does not support recv_irb().
+ * No need to pre-submit IRBs in this case.
+ */
+ return DBUS_ERR;
+ }
+
+ /* The dongle recv callback is freerunning without lock. So multiple callbacks(and this
+ * refill) can run in parallel. While the rxoff condition is triggered outside,
+ * below while loop has to check and abort posting more to avoid RPC rxq overflow.
+ */
+ args.qdeq.q = dhd_bus->rx_q;
+ while ((!dhd_bus->rxoff) &&
+ (rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) {
+ err = dhd_bus->drvintf->recv_irb(dhd_bus->bus_info, rxirb);
+ if (err == DBUS_ERR_RXDROP || err == DBUS_ERR_RXFAIL) {
+ /* Add the the free rxirb back to the queue
+ * and wait till later
+ */
+ bzero(rxirb, sizeof(dbus_irb_rx_t));
+ args.qenq.q = dhd_bus->rx_q;
+ args.qenq.b = (dbus_irb_t *) rxirb;
+ EXEC_RXLOCK(dhd_bus, q_enq_exec, &args);
+ break;
+ } else if (err != DBUS_OK) {
+ int i = 0;
+ while (i++ < 100) {
+ DBUSERR(("%s :: memory leak for rxirb note?\n", __FUNCTION__));
+ }
+ }
+ }
+ return err;
+} /* dbus_rxirbs_fill */
+
+/** called when the DBUS interface state changed. */
+void
+dbus_flowctrl_rx(dbus_pub_t *pub, bool on)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if (dhd_bus == NULL)
+ return;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus->rxoff == on)
+ return;
+
+ dhd_bus->rxoff = on;
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+ if (!on) {
+ /* post more irbs, resume rx if necessary */
+ dbus_rxirbs_fill(dhd_bus);
+ if (dhd_bus && dhd_bus->drvintf->recv_resume) {
+ dhd_bus->drvintf->recv_resume(dhd_bus->bus_info);
+ }
+ } else {
+ /* ??? cancell posted irbs first */
+
+ if (dhd_bus && dhd_bus->drvintf->recv_stop) {
+ dhd_bus->drvintf->recv_stop(dhd_bus->bus_info);
+ }
+ }
+ }
+}
+
+/**
+ * Several code paths in this file want to send a buffer to the dongle. This function handles both
+ * sending of a buffer or a pkt.
+ */
+static int
+dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_OK;
+ dbus_irb_tx_t *txirb = NULL;
+ int txirb_pending;
+ struct exec_parms args;
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+ dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+ args.qdeq.q = dhd_bus->tx_q;
+ if (dhd_bus->drvintf)
+ txirb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args);
+
+ if (txirb == NULL) {
+ DBUSERR(("Out of tx dbus_bufs\n"));
+ return DBUS_ERR;
+ }
+
+ if (pkt != NULL) {
+ txirb->pkt = pkt;
+ txirb->buf = NULL;
+ txirb->len = 0;
+ } else if (buf != NULL) {
+ txirb->pkt = NULL;
+ txirb->buf = buf;
+ txirb->len = len;
+ } else {
+ ASSERT(0); /* Should not happen */
+ }
+ txirb->info = info;
+ txirb->arg = NULL;
+ txirb->retry_count = 0;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->send_irb) {
+ /* call lower DBUS level send_irb function */
+ err = dhd_bus->drvintf->send_irb(dhd_bus->bus_info, txirb);
+ if (err == DBUS_ERR_TXDROP) {
+ /* tx fail and no completion routine to clean up, reclaim irb NOW */
+ DBUSERR(("%s: send_irb failed, status = %d\n", __FUNCTION__, err));
+ bzero(txirb, sizeof(dbus_irb_tx_t));
+ args.qenq.q = dhd_bus->tx_q;
+ args.qenq.b = (dbus_irb_t *) txirb;
+ EXEC_TXLOCK(dhd_bus, q_enq_exec, &args);
+ } else {
+ dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL);
+ txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt;
+ if (txirb_pending > (dhd_bus->tx_low_watermark * 3)) {
+ dbus_flowctrl_tx(dhd_bus, TRUE);
+ }
+ }
+ }
+ } else {
+ err = DBUS_ERR_TXFAIL;
+ DBUSTRACE(("%s: bus down, send_irb failed\n", __FUNCTION__));
+ }
+
+ return err;
+} /* dbus_send_irb */
+
+#if defined(BCM_REQUEST_FW)
+
+/**
+ * Before downloading a firmware image into the dongle, the validity of the image must be checked.
+ */
+static int
+check_file(osl_t *osh, unsigned char *headers)
+{
+ struct trx_header *trx;
+ int actual_len = -1;
+
+ /* Extract trx header */
+ trx = (struct trx_header *)headers;
+ if (ltoh32(trx->magic) != TRX_MAGIC) {
+ printf("Error: trx bad hdr %x\n", ltoh32(trx->magic));
+ return -1;
+ }
+
+ headers += SIZEOF_TRX(trx);
+
+ /* TRX V1: get firmware len */
+ /* TRX V2: get firmware len and DSG/CFG lengths */
+ if (ltoh32(trx->flag_version) & TRX_UNCOMP_IMAGE) {
+ actual_len = ltoh32(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]) +
+ SIZEOF_TRX(trx);
+#ifdef BCMTRXV2
+ if (ISTRX_V2(trx)) {
+ actual_len += ltoh32(trx->offsets[TRX_OFFSETS_DSG_LEN_IDX]) +
+ ltoh32(trx->offsets[TRX_OFFSETS_CFG_LEN_IDX]);
+ }
+#endif
+ return actual_len;
+ } else {
+ printf("compressed image\n");
+ }
+
+ return -1;
+}
+
+#ifdef EXTERNAL_FW_PATH
+static int
+dbus_get_fw_nvram(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path)
+{
+ int bcmerror = -1, i;
+ uint len, total_len;
+ void *nv_image = NULL, *fw_image = NULL;
+ char *nv_memblock = NULL, *fw_memblock = NULL;
+ char *bufp;
+ bool file_exists;
+ uint8 nvram_words_pad = 0;
+ uint memblock_size = 2048;
+ uint8 *memptr;
+ int actual_fwlen;
+ struct trx_header *hdr;
+ uint32 img_offset = 0;
+ int offset = 0;
+
+ /* For Get nvram */
+ file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+ if (file_exists) {
+ nv_image = dhd_os_open_image1(dhd_bus->dhd, pnv_path);
+ if (nv_image == NULL) {
+ printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
+ goto err;
+ }
+ }
+ nv_memblock = MALLOC(dhd_bus->pub.osh, MAX_NVRAMBUF_SIZE);
+ if (nv_memblock == NULL) {
+ DBUSERR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+ len = dhd_os_get_image_block(nv_memblock, MAX_NVRAMBUF_SIZE, nv_image);
+ if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+ bufp = (char *)nv_memblock;
+ bufp[len] = 0;
+ dhd_bus->nvram_len = process_nvram_vars(bufp, len);
+ if (dhd_bus->nvram_len % 4)
+ nvram_words_pad = 4 - dhd_bus->nvram_len % 4;
+ } else {
+ DBUSERR(("%s: error reading nvram file: %d\n", __FUNCTION__, len));
+ bcmerror = DBUS_ERR_NVRAM;
+ goto err;
+ }
+ if (nv_image) {
+ dhd_os_close_image1(dhd_bus->dhd, nv_image);
+ nv_image = NULL;
+ }
+
+ /* For Get first block of fw to calculate total_len */
+ file_exists = ((pfw_path != NULL) && (pfw_path[0] != '\0'));
+ if (file_exists) {
+ fw_image = dhd_os_open_image1(dhd_bus->dhd, pfw_path);
+ if (fw_image == NULL) {
+ printf("%s: Open fw file failed %s\n", __FUNCTION__, pfw_path);
+ goto err;
+ }
+ }
+ memptr = fw_memblock = MALLOC(dhd_bus->pub.osh, memblock_size);
+ if (fw_memblock == NULL) {
+ DBUSERR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+ memblock_size));
+ goto err;
+ }
+ len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image);
+ if ((actual_fwlen = check_file(dhd_bus->pub.osh, memptr)) <= 0) {
+ DBUSERR(("%s: bad firmware format!\n", __FUNCTION__));
+ goto err;
+ }
+
+ total_len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad;
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+ dhd_bus->image = (uint8*)DHD_OS_PREALLOC(dhd_bus->dhd,
+ DHD_PREALLOC_MEMDUMP_RAM, total_len);
+#else
+ dhd_bus->image = MALLOC(dhd_bus->pub.osh, total_len);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ dhd_bus->image_len = total_len;
+ if (dhd_bus->image == NULL) {
+ DBUSERR(("%s: malloc failed! size=%d\n", __FUNCTION__, total_len));
+ goto err;
+ }
+
+ /* Step1: Copy trx header + firmwre */
+ memptr = fw_memblock;
+ do {
+ if (len < 0) {
+ DBUSERR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+ bcopy(memptr, dhd_bus->image+offset, len);
+ offset += len;
+ } while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image)));
+ /* Step2: Copy NVRAM + pad */
+ hdr = (struct trx_header *)dhd_bus->image;
+ img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX];
+ bcopy(nv_memblock, (uint8 *)(dhd_bus->image + img_offset),
+ dhd_bus->nvram_len);
+ img_offset += dhd_bus->nvram_len;
+ if (nvram_words_pad) {
+ bzero(&dhd_bus->image[img_offset], nvram_words_pad);
+ img_offset += nvram_words_pad;
+ }
+#ifdef BCMTRXV2
+ /* Step3: Copy DSG/CFG for V2 */
+ if (ISTRX_V2(hdr) &&
+ (hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] ||
+ hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) {
+ DBUSERR(("%s: fix me\n", __FUNCTION__));
+ }
+#endif /* BCMTRXV2 */
+ /* Step4: update TRX header for nvram size */
+ hdr = (struct trx_header *)dhd_bus->image;
+ hdr->len = htol32(total_len);
+ /* Pass the actual fw len */
+ hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] =
+ htol32(dhd_bus->nvram_len + nvram_words_pad);
+ /* Calculate CRC over header */
+ hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version,
+ SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version),
+ CRC32_INIT_VALUE);
+
+ /* Calculate CRC over data */
+ for (i = SIZEOF_TRX(hdr); i < total_len; ++i)
+ hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32);
+ hdr->crc32 = htol32(hdr->crc32);
+
+ bcmerror = DBUS_OK;
+
+err:
+ if (fw_memblock)
+ MFREE(dhd_bus->pub.osh, fw_memblock, MAX_NVRAMBUF_SIZE);
+ if (fw_image)
+ dhd_os_close_image1(dhd_bus->dhd, fw_image);
+ if (nv_memblock)
+ MFREE(dhd_bus->pub.osh, nv_memblock, MAX_NVRAMBUF_SIZE);
+ if (nv_image)
+ dhd_os_close_image1(dhd_bus->dhd, nv_image);
+
+ return bcmerror;
+}
+
+/**
+ * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into
+ * the dongle
+ */
+static int
+dbus_do_download(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path)
+{
+ int err = DBUS_OK;
+
+ err = dbus_get_fw_nvram(dhd_bus, pfw_path, pnv_path);
+ if (err) {
+ DBUSERR(("dbus_do_download: fail to get nvram %d\n", err));
+ return err;
+ }
+
+ if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) {
+ err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info,
+ dhd_bus->image, dhd_bus->image_len);
+ if (err == DBUS_OK) {
+ err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info);
+ }
+ } else
+ err = DBUS_ERR;
+
+ if (dhd_bus->image) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+ DHD_OS_PREFREE(dhd_bus->dhd, dhd_bus->image, dhd_bus->image_len);
+#else
+ MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ dhd_bus->image = NULL;
+ dhd_bus->image_len = 0;
+ }
+
+ return err;
+} /* dbus_do_download */
+#else
+
+/**
+ * It is easy for the user to pass one jumbo nvram file to the driver than a set of smaller files.
+ * The 'jumbo nvram' file format is essentially a set of nvram files. Before commencing firmware
+ * download, the dongle needs to be probed so that the correct nvram contents within the jumbo nvram
+ * file is selected.
+ */
+static int
+dbus_jumbo_nvram(dhd_bus_t *dhd_bus)
+{
+ int8 *nvram = NULL;
+ int nvram_len = 0;
+ int ret = DBUS_OK;
+ uint16 boardrev = 0xFFFF;
+ uint16 boardtype = 0xFFFF;
+
+ /* read the otp for boardrev & boardtype
+ * if boardtype/rev are present in otp
+ * select nvram data for that boardtype/rev
+ */
+ dbus_otp(dhd_bus, &boardtype, &boardrev);
+
+ ret = dbus_select_nvram(dhd_bus, dhd_bus->extdl.vars, dhd_bus->extdl.varslen,
+ boardtype, boardrev, &nvram, &nvram_len);
+
+ if (ret == DBUS_JUMBO_BAD_FORMAT)
+ return DBUS_ERR_NVRAM;
+ else if (ret == DBUS_JUMBO_NOMATCH &&
+ (boardtype != 0xFFFF || boardrev != 0xFFFF)) {
+ DBUSERR(("No matching NVRAM for boardtype 0x%02x boardrev 0x%02x\n",
+ boardtype, boardrev));
+ return DBUS_ERR_NVRAM;
+ }
+ dhd_bus->nvram = nvram;
+ dhd_bus->nvram_len = nvram_len;
+
+ return DBUS_OK;
+}
+
+/** before commencing fw download, the correct NVRAM image to download has to be picked */
+static int
+dbus_get_nvram(dhd_bus_t *dhd_bus)
+{
+ int len, i;
+ struct trx_header *hdr;
+ int actual_fwlen;
+ uint32 img_offset = 0;
+
+ dhd_bus->nvram_len = 0;
+ if (dhd_bus->extdl.varslen) {
+ if (DBUS_OK != dbus_jumbo_nvram(dhd_bus))
+ return DBUS_ERR_NVRAM;
+ DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len));
+ }
+#if defined(BCM_REQUEST_FW)
+ else if (nonfwnvram) {
+ dhd_bus->nvram = nonfwnvram;
+ dhd_bus->nvram_len = nonfwnvramlen;
+ DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len));
+ }
+#endif
+ if (dhd_bus->nvram) {
+ uint8 nvram_words_pad = 0;
+ /* Validate the format/length etc of the file */
+ if ((actual_fwlen = check_file(dhd_bus->pub.osh, dhd_bus->fw)) <= 0) {
+ DBUSERR(("%s: bad firmware format!\n", __FUNCTION__));
+ return DBUS_ERR_NVRAM;
+ }
+
+ if (!dhd_bus->nvram_nontxt) {
+ /* host supplied nvram could be in .txt format
+ * with all the comments etc...
+ */
+ dhd_bus->nvram_len = process_nvram_vars(dhd_bus->nvram,
+ dhd_bus->nvram_len);
+ }
+ if (dhd_bus->nvram_len % 4)
+ nvram_words_pad = 4 - dhd_bus->nvram_len % 4;
+
+ len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad;
+ dhd_bus->image = MALLOC(dhd_bus->pub.osh, len);
+ dhd_bus->image_len = len;
+ if (dhd_bus->image == NULL) {
+ DBUSERR(("%s: malloc failed!\n", __FUNCTION__));
+ return DBUS_ERR_NVRAM;
+ }
+ hdr = (struct trx_header *)dhd_bus->fw;
+ /* Step1: Copy trx header + firmwre */
+ img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX];
+ bcopy(dhd_bus->fw, dhd_bus->image, img_offset);
+ /* Step2: Copy NVRAM + pad */
+ bcopy(dhd_bus->nvram, (uint8 *)(dhd_bus->image + img_offset),
+ dhd_bus->nvram_len);
+ img_offset += dhd_bus->nvram_len;
+ if (nvram_words_pad) {
+ bzero(&dhd_bus->image[img_offset],
+ nvram_words_pad);
+ img_offset += nvram_words_pad;
+ }
+#ifdef BCMTRXV2
+ /* Step3: Copy DSG/CFG for V2 */
+ if (ISTRX_V2(hdr) &&
+ (hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] ||
+ hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) {
+
+ bcopy(dhd_bus->fw + SIZEOF_TRX(hdr) +
+ hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX] +
+ hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX],
+ dhd_bus->image + img_offset,
+ hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] +
+ hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX]);
+
+ img_offset += hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] +
+ hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX];
+ }
+#endif /* BCMTRXV2 */
+ /* Step4: update TRX header for nvram size */
+ hdr = (struct trx_header *)dhd_bus->image;
+ hdr->len = htol32(len);
+ /* Pass the actual fw len */
+ hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] =
+ htol32(dhd_bus->nvram_len + nvram_words_pad);
+ /* Calculate CRC over header */
+ hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version,
+ SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version),
+ CRC32_INIT_VALUE);
+
+ /* Calculate CRC over data */
+ for (i = SIZEOF_TRX(hdr); i < len; ++i)
+ hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32);
+ hdr->crc32 = htol32(hdr->crc32);
+ } else {
+ dhd_bus->image = dhd_bus->fw;
+ dhd_bus->image_len = (uint32)dhd_bus->fwlen;
+ }
+
+ return DBUS_OK;
+} /* dbus_get_nvram */
+
+/**
+ * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into
+ * the dongle
+ */
+static int
+dbus_do_download(dhd_bus_t *dhd_bus)
+{
+ int err = DBUS_OK;
+#ifndef BCM_REQUEST_FW
+ int decomp_override = 0;
+#endif
+#ifdef BCM_REQUEST_FW
+ uint16 boardrev = 0xFFFF, boardtype = 0xFFFF;
+ int8 *temp_nvram;
+ int temp_len;
+#endif
+
+#if defined(BCM_REQUEST_FW)
+ dhd_bus->firmware = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid,
+ dhd_bus->pub.attrib.chiprev, &dhd_bus->fw, &dhd_bus->fwlen,
+ DBUS_FIRMWARE, 0, 0);
+ if (!dhd_bus->firmware)
+ return DBUS_ERR;
+#endif
+
+ dhd_bus->image = dhd_bus->fw;
+ dhd_bus->image_len = (uint32)dhd_bus->fwlen;
+
+#ifndef BCM_REQUEST_FW
+ if (UNZIP_ENAB(dhd_bus) && !decomp_override) {
+ err = dbus_zlib_decomp(dhd_bus);
+ if (err) {
+ DBUSERR(("dbus_attach: fw decompress fail %d\n", err));
+ return err;
+ }
+ }
+#endif
+
+#if defined(BCM_REQUEST_FW)
+ /* check if firmware is appended with nvram file */
+ err = dbus_otp(dhd_bus, &boardtype, &boardrev);
+ /* check if nvram is provided as separte file */
+ nonfwnvram = NULL;
+ nonfwnvramlen = 0;
+ dhd_bus->nvfile = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid,
+ dhd_bus->pub.attrib.chiprev, (void *)&temp_nvram, &temp_len,
+ DBUS_NVFILE, boardtype, boardrev);
+ if (dhd_bus->nvfile) {
+ int8 *tmp = MALLOC(dhd_bus->pub.osh, temp_len);
+ if (tmp) {
+ bcopy(temp_nvram, tmp, temp_len);
+ nonfwnvram = tmp;
+ nonfwnvramlen = temp_len;
+ } else {
+ err = DBUS_ERR;
+ goto fail;
+ }
+ }
+#endif /* defined(BCM_REQUEST_FW) */
+
+ err = dbus_get_nvram(dhd_bus);
+ if (err) {
+ DBUSERR(("dbus_do_download: fail to get nvram %d\n", err));
+ return err;
+ }
+
+
+ if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) {
+ err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info,
+ dhd_bus->image, dhd_bus->image_len);
+
+ if (err == DBUS_OK)
+ err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info);
+ } else
+ err = DBUS_ERR;
+
+ if (dhd_bus->nvram) {
+ MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len);
+ dhd_bus->image = dhd_bus->fw;
+ dhd_bus->image_len = (uint32)dhd_bus->fwlen;
+ }
+
+#ifndef BCM_REQUEST_FW
+ if (UNZIP_ENAB(dhd_bus) && (!decomp_override) && dhd_bus->orig_fw) {
+ MFREE(dhd_bus->pub.osh, dhd_bus->fw, dhd_bus->decomp_memsize);
+ dhd_bus->image = dhd_bus->fw = dhd_bus->orig_fw;
+ dhd_bus->image_len = dhd_bus->fwlen = dhd_bus->origfw_len;
+ }
+#endif
+
+#if defined(BCM_REQUEST_FW)
+fail:
+ if (dhd_bus->firmware) {
+ dbus_release_fw_nvfile(dhd_bus->firmware);
+ dhd_bus->firmware = NULL;
+ }
+ if (dhd_bus->nvfile) {
+ dbus_release_fw_nvfile(dhd_bus->nvfile);
+ dhd_bus->nvfile = NULL;
+ }
+ if (nonfwnvram) {
+ MFREE(dhd_bus->pub.osh, nonfwnvram, nonfwnvramlen);
+ nonfwnvram = NULL;
+ nonfwnvramlen = 0;
+ }
+#endif
+ return err;
+} /* dbus_do_download */
+#endif /* EXTERNAL_FW_PATH */
+#endif
+
+/** required for DBUS deregistration */
+static void
+dbus_disconnect(void *handle)
+{
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (disconnect_cb)
+ disconnect_cb(disc_arg);
+}
+
+/**
+ * This function is called when the sent irb times out without a tx response status.
+ * DBUS adds reliability by resending timed out IRBs DBUS_TX_RETRY_LIMIT times.
+ */
+static void
+dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+ if ((dhd_bus == NULL) || (dhd_bus->drvintf == NULL) || (txirb == NULL)) {
+ return;
+ }
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ return;
+
+} /* dbus_if_send_irb_timeout */
+
+/**
+ * When lower DBUS level signals that a send IRB completed, either successful or not, the higher
+ * level (e.g. dhd_linux.c) has to be notified, and transmit flow control has to be evaluated.
+ */
+static void
+dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+ int txirb_pending;
+ struct exec_parms args;
+ void *pktinfo;
+
+ if ((dhd_bus == NULL) || (txirb == NULL)) {
+ return;
+ }
+
+ DBUSTRACE(("%s: status = %d\n", __FUNCTION__, status));
+
+ dbus_tx_timer_stop(dhd_bus);
+
+ /* re-queue BEFORE calling send_complete which will assume that this irb
+ is now available.
+ */
+ pktinfo = txirb->info;
+ bzero(txirb, sizeof(dbus_irb_tx_t));
+ args.qenq.q = dhd_bus->tx_q;
+ args.qenq.b = (dbus_irb_t *) txirb;
+ EXEC_TXLOCK(dhd_bus, q_enq_exec, &args);
+
+ if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) {
+ if ((status == DBUS_OK) || (status == DBUS_ERR_NODEVICE)) {
+ if (dhd_bus->cbs && dhd_bus->cbs->send_complete)
+ dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo,
+ status);
+
+ if (status == DBUS_OK) {
+ txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt;
+ if (txirb_pending)
+ dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL);
+ if ((txirb_pending < dhd_bus->tx_low_watermark) &&
+ dhd_bus->txoff && !dhd_bus->txoverride) {
+ dbus_flowctrl_tx(dhd_bus, OFF);
+ }
+ }
+ } else {
+ DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__,
+ pktinfo));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC)
+ if (pktinfo)
+ if (dhd_bus->cbs && dhd_bus->cbs->send_complete)
+ dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo,
+ status);
+#else
+ dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE);
+#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) */
+ }
+ } else {
+ DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__,
+ pktinfo));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC)
+ if (pktinfo)
+ if (dhd_bus->cbs && dhd_bus->cbs->send_complete)
+ dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo,
+ status);
+#else
+ dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE);
+#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) defined(BCM_RPC_TOC) */
+ }
+} /* dbus_if_send_irb_complete */
+
+/**
+ * When lower DBUS level signals that a receive IRB completed, either successful or not, the higher
+ * level (e.g. dhd_linux.c) has to be notified, and fresh free receive IRBs may have to be given
+ * to lower levels.
+ */
+static void
+dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+ int rxirb_pending;
+ struct exec_parms args;
+
+ if ((dhd_bus == NULL) || (rxirb == NULL)) {
+ return;
+ }
+ DBUSTRACE(("%s\n", __FUNCTION__));
+ if (dhd_bus->pub.busstate != DBUS_STATE_DOWN &&
+ dhd_bus->pub.busstate != DBUS_STATE_SLEEP) {
+ if (status == DBUS_OK) {
+ if ((rxirb->buf != NULL) && (rxirb->actual_len > 0)) {
+#ifdef DBUS_USB_LOOPBACK
+ if (is_loopback_pkt(rxirb->buf)) {
+ matches_loopback_pkt(rxirb->buf);
+ } else
+#endif
+ if (dhd_bus->cbs && dhd_bus->cbs->recv_buf) {
+ dhd_bus->cbs->recv_buf(dhd_bus->cbarg, rxirb->buf,
+ rxirb->actual_len);
+ }
+ } else if (rxirb->pkt != NULL) {
+ if (dhd_bus->cbs && dhd_bus->cbs->recv_pkt)
+ dhd_bus->cbs->recv_pkt(dhd_bus->cbarg, rxirb->pkt);
+ } else {
+ ASSERT(0); /* Should not happen */
+ }
+
+ rxirb_pending = dhd_bus->pub.nrxq - dhd_bus->rx_q->cnt - 1;
+ if ((rxirb_pending <= dhd_bus->rx_low_watermark) &&
+ !dhd_bus->rxoff) {
+ DBUSTRACE(("Low watermark so submit more %d <= %d \n",
+ dhd_bus->rx_low_watermark, rxirb_pending));
+ dbus_rxirbs_fill(dhd_bus);
+ } else if (dhd_bus->rxoff)
+ DBUSTRACE(("rx flow controlled. not filling more. cut_rxq=%d\n",
+ dhd_bus->rx_q->cnt));
+ } else if (status == DBUS_ERR_NODEVICE) {
+ DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__, status,
+ rxirb->buf));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ if (rxirb->buf) {
+ PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf);
+ PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE);
+ }
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */
+ } else {
+ if (status != DBUS_ERR_RXZLP)
+ DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__,
+ status, rxirb->buf));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ if (rxirb->buf) {
+ PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf);
+ PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE);
+ }
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */
+ }
+ } else {
+ DBUSTRACE(("%s: DBUS down, ignoring recv callback. buf %p\n", __FUNCTION__,
+ rxirb->buf));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ if (rxirb->buf) {
+ PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf);
+ PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE);
+ }
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */
+ }
+ if (dhd_bus->rx_q != NULL) {
+ bzero(rxirb, sizeof(dbus_irb_rx_t));
+ args.qenq.q = dhd_bus->rx_q;
+ args.qenq.b = (dbus_irb_t *) rxirb;
+ EXEC_RXLOCK(dhd_bus, q_enq_exec, &args);
+ } else
+ MFREE(dhd_bus->pub.osh, rxirb, sizeof(dbus_irb_tx_t));
+} /* dbus_if_recv_irb_complete */
+
+/**
+ * Accumulate errors signaled by lower DBUS levels and signal them to higher (e.g. dhd_linux.c)
+ * level.
+ */
+static void
+dbus_if_errhandler(void *handle, int err)
+{
+ dhd_bus_t *dhd_bus = handle;
+ uint32 mask = 0;
+
+ if (dhd_bus == NULL)
+ return;
+
+ switch (err) {
+ case DBUS_ERR_TXFAIL:
+ dhd_bus->pub.stats.tx_errors++;
+ mask |= ERR_CBMASK_TXFAIL;
+ break;
+ case DBUS_ERR_TXDROP:
+ dhd_bus->pub.stats.tx_dropped++;
+ mask |= ERR_CBMASK_TXFAIL;
+ break;
+ case DBUS_ERR_RXFAIL:
+ dhd_bus->pub.stats.rx_errors++;
+ mask |= ERR_CBMASK_RXFAIL;
+ break;
+ case DBUS_ERR_RXDROP:
+ dhd_bus->pub.stats.rx_dropped++;
+ mask |= ERR_CBMASK_RXFAIL;
+ break;
+ default:
+ break;
+ }
+
+ if (dhd_bus->cbs && dhd_bus->cbs->errhandler && (dhd_bus->errmask & mask))
+ dhd_bus->cbs->errhandler(dhd_bus->cbarg, err);
+}
+
+/**
+ * When lower DBUS level signals control IRB completed, higher level (e.g. dhd_linux.c) has to be
+ * notified.
+ */
+static void
+dbus_if_ctl_complete(void *handle, int type, int status)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL) {
+ DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) {
+ if (dhd_bus->cbs && dhd_bus->cbs->ctl_complete)
+ dhd_bus->cbs->ctl_complete(dhd_bus->cbarg, type, status);
+ }
+}
+
+/**
+ * Rx related functionality (flow control, posting of free IRBs to rx queue) is dependent upon the
+ * bus state. When lower DBUS level signals a change in the interface state, take appropriate action
+ * and forward the signaling to the higher (e.g. dhd_linux.c) level.
+ */
+static void
+dbus_if_state_change(void *handle, int state)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+ int old_state;
+
+ if (dhd_bus == NULL)
+ return;
+
+ if (dhd_bus->pub.busstate == state)
+ return;
+ old_state = dhd_bus->pub.busstate;
+ if (state == DBUS_STATE_DISCONNECT) {
+ DBUSERR(("DBUS disconnected\n"));
+ }
+
+ /* Ignore USB SUSPEND while not up yet */
+ if (state == DBUS_STATE_SLEEP && old_state != DBUS_STATE_UP)
+ return;
+
+ DBUSTRACE(("dbus state change from %d to to %d\n", old_state, state));
+
+ /* Don't update state if it's PnP firmware re-download */
+ if (state != DBUS_STATE_PNP_FWDL)
+ dhd_bus->pub.busstate = state;
+ else
+ dbus_flowctrl_rx(handle, FALSE);
+ if (state == DBUS_STATE_SLEEP)
+ dbus_flowctrl_rx(handle, TRUE);
+ if (state == DBUS_STATE_UP) {
+ dbus_rxirbs_fill(dhd_bus);
+ dbus_flowctrl_rx(handle, FALSE);
+ }
+
+ if (dhd_bus->cbs && dhd_bus->cbs->state_change)
+ dhd_bus->cbs->state_change(dhd_bus->cbarg, state);
+}
+
+/** Forward request for packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */
+static void *
+dbus_if_pktget(void *handle, uint len, bool send)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+ void *p = NULL;
+
+ if (dhd_bus == NULL)
+ return NULL;
+
+ if (dhd_bus->cbs && dhd_bus->cbs->pktget)
+ p = dhd_bus->cbs->pktget(dhd_bus->cbarg, len, send);
+ else
+ ASSERT(0);
+
+ return p;
+}
+
+/** Forward request to free packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */
+static void
+dbus_if_pktfree(void *handle, void *p, bool send)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+ if (dhd_bus == NULL)
+ return;
+
+ if (dhd_bus->cbs && dhd_bus->cbs->pktfree)
+ dhd_bus->cbs->pktfree(dhd_bus->cbarg, p, send);
+ else
+ ASSERT(0);
+}
+
+/** Lower DBUS level requests either a send or receive IRB */
+static struct dbus_irb*
+dbus_if_getirb(void *cbarg, bool send)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) cbarg;
+ struct exec_parms args;
+ struct dbus_irb *irb;
+
+ if ((dhd_bus == NULL) || (dhd_bus->pub.busstate != DBUS_STATE_UP))
+ return NULL;
+
+ if (send == TRUE) {
+ args.qdeq.q = dhd_bus->tx_q;
+ irb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args);
+ } else {
+ args.qdeq.q = dhd_bus->rx_q;
+ irb = EXEC_RXLOCK(dhd_bus, q_deq_exec, &args);
+ }
+
+ return irb;
+}
+
+/**
+ * Called as part of DBUS bus registration. Calls back into higher level (e.g. dhd_linux.c) probe
+ * function.
+ */
+static void *
+dbus_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no,
+ uint16 slot, uint32 hdrlen)
+{
+ DBUSTRACE(("%s\n", __FUNCTION__));
+ if (probe_cb) {
+ disc_arg = probe_cb(probe_arg, desc, bustype, bus_no, slot, hdrlen);
+ return disc_arg;
+ }
+
+ return (void *)DBUS_ERR;
+}
+
+/**
+ * As part of initialization, higher level (e.g. dhd_linux.c) requests DBUS to prepare for
+ * action.
+ */
+int
+dhd_bus_register(void)
+{
+ int err;
+
+ DBUSTRACE(("%s: Enter\n", __FUNCTION__));
+
+ probe_cb = dhd_dbus_probe_cb;
+ disconnect_cb = dhd_dbus_disconnect_cb;
+ probe_arg = NULL;
+
+ err = dbus_bus_register(0xa5c, 0x48f, dbus_probe, /* call lower DBUS level register function */
+ dbus_disconnect, NULL, &g_busintf, NULL, NULL);
+
+ /* Device not detected */
+ if (err == DBUS_ERR_NODEVICE)
+ err = DBUS_OK;
+
+ return err;
+}
+
+dhd_pub_t *g_pub = NULL;
+void
+dhd_bus_unregister(void)
+{
+ int ret;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ DHD_MUTEX_LOCK();
+ if (g_pub) {
+ g_pub->dhd_remove = TRUE;
+ if (!g_pub->bus) {
+ dhd_dbus_disconnect_cb(g_pub->bus);
+ }
+ }
+ probe_cb = NULL;
+ DHD_MUTEX_UNLOCK();
+ ret = dbus_bus_deregister();
+ disconnect_cb = NULL;
+ probe_arg = NULL;
+}
+
+/** As part of initialization, data structures have to be allocated and initialized */
+dhd_bus_t *
+dbus_attach(osl_t *osh, int rxsize, int nrxq, int ntxq, dhd_pub_t *pub,
+ dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh)
+{
+ dhd_bus_t *dhd_bus;
+ int err;
+
+ if ((g_busintf == NULL) || (g_busintf->attach == NULL) || (cbs == NULL))
+ return NULL;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if ((nrxq <= 0) || (ntxq <= 0))
+ return NULL;
+
+ dhd_bus = MALLOC(osh, sizeof(dhd_bus_t));
+ if (dhd_bus == NULL) {
+ DBUSERR(("%s: malloc failed %zu\n", __FUNCTION__, sizeof(dhd_bus_t)));
+ return NULL;
+ }
+
+ bzero(dhd_bus, sizeof(dhd_bus_t));
+
+ /* BUS-specific driver interface (at a lower DBUS level) */
+ dhd_bus->drvintf = g_busintf;
+ dhd_bus->cbarg = pub;
+ dhd_bus->cbs = cbs;
+
+ dhd_bus->pub.sh = sh;
+ dhd_bus->pub.osh = osh;
+ dhd_bus->pub.rxsize = rxsize;
+
+ dhd_bus->pub.nrxq = nrxq;
+ dhd_bus->rx_low_watermark = nrxq / 2; /* keep enough posted rx urbs */
+ dhd_bus->pub.ntxq = ntxq;
+ dhd_bus->tx_low_watermark = ntxq / 4; /* flow control when too many tx urbs posted */
+
+ dhd_bus->tx_q = MALLOC(osh, sizeof(dbus_irbq_t));
+ if (dhd_bus->tx_q == NULL)
+ goto error;
+ else {
+ bzero(dhd_bus->tx_q, sizeof(dbus_irbq_t));
+ err = dbus_irbq_init(dhd_bus, dhd_bus->tx_q, ntxq, sizeof(dbus_irb_tx_t));
+ if (err != DBUS_OK)
+ goto error;
+ }
+
+ dhd_bus->rx_q = MALLOC(osh, sizeof(dbus_irbq_t));
+ if (dhd_bus->rx_q == NULL)
+ goto error;
+ else {
+ bzero(dhd_bus->rx_q, sizeof(dbus_irbq_t));
+ err = dbus_irbq_init(dhd_bus, dhd_bus->rx_q, nrxq, sizeof(dbus_irb_rx_t));
+ if (err != DBUS_OK)
+ goto error;
+ }
+
+
+ dhd_bus->bus_info = (void *)g_busintf->attach(&dhd_bus->pub,
+ dhd_bus, &dbus_intf_cbs);
+ if (dhd_bus->bus_info == NULL)
+ goto error;
+
+ dbus_tx_timer_init(dhd_bus);
+
+#if defined(BCM_REQUEST_FW)
+ /* Need to copy external image for re-download */
+ if (extdl && extdl->fw && (extdl->fwlen > 0)) {
+ dhd_bus->extdl.fw = MALLOC(osh, extdl->fwlen);
+ if (dhd_bus->extdl.fw) {
+ bcopy(extdl->fw, dhd_bus->extdl.fw, extdl->fwlen);
+ dhd_bus->extdl.fwlen = extdl->fwlen;
+ }
+ }
+
+ if (extdl && extdl->vars && (extdl->varslen > 0)) {
+ dhd_bus->extdl.vars = MALLOC(osh, extdl->varslen);
+ if (dhd_bus->extdl.vars) {
+ bcopy(extdl->vars, dhd_bus->extdl.vars, extdl->varslen);
+ dhd_bus->extdl.varslen = extdl->varslen;
+ }
+ }
+#endif
+
+ return (dhd_bus_t *)dhd_bus;
+
+error:
+ DBUSERR(("%s: Failed\n", __FUNCTION__));
+ dbus_detach(dhd_bus);
+ return NULL;
+} /* dbus_attach */
+
+void
+dbus_detach(dhd_bus_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ osl_t *osh;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return;
+
+ dbus_tx_timer_stop(dhd_bus);
+
+ osh = pub->pub.osh;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->detach)
+ dhd_bus->drvintf->detach((dbus_pub_t *)dhd_bus, dhd_bus->bus_info);
+
+ if (dhd_bus->tx_q) {
+ dbus_irbq_deinit(dhd_bus, dhd_bus->tx_q, sizeof(dbus_irb_tx_t));
+ MFREE(osh, dhd_bus->tx_q, sizeof(dbus_irbq_t));
+ dhd_bus->tx_q = NULL;
+ }
+
+ if (dhd_bus->rx_q) {
+ dbus_irbq_deinit(dhd_bus, dhd_bus->rx_q, sizeof(dbus_irb_rx_t));
+ MFREE(osh, dhd_bus->rx_q, sizeof(dbus_irbq_t));
+ dhd_bus->rx_q = NULL;
+ }
+
+
+ if (dhd_bus->extdl.fw && (dhd_bus->extdl.fwlen > 0)) {
+ MFREE(osh, dhd_bus->extdl.fw, dhd_bus->extdl.fwlen);
+ dhd_bus->extdl.fw = NULL;
+ dhd_bus->extdl.fwlen = 0;
+ }
+
+ if (dhd_bus->extdl.vars && (dhd_bus->extdl.varslen > 0)) {
+ MFREE(osh, dhd_bus->extdl.vars, dhd_bus->extdl.varslen);
+ dhd_bus->extdl.vars = NULL;
+ dhd_bus->extdl.varslen = 0;
+ }
+
+ MFREE(osh, dhd_bus, sizeof(dhd_bus_t));
+} /* dbus_detach */
+
+int dbus_dlneeded(dhd_bus_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int dlneeded = DBUS_ERR;
+
+ if (!dhd_bus) {
+ DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+ return DBUS_ERR;
+ }
+
+ DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate));
+
+ if (dhd_bus->drvintf->dlneeded) {
+ dlneeded = dhd_bus->drvintf->dlneeded(dhd_bus->bus_info);
+ }
+ printf("%s: dlneeded=%d\n", __FUNCTION__, dlneeded);
+
+ /* dlneeded > 0: need to download
+ * dlneeded = 0: downloaded
+ * dlneeded < 0: bus error*/
+ return dlneeded;
+}
+
+#if defined(BCM_REQUEST_FW)
+int dbus_download_firmware(dhd_bus_t *pub, char *pfw_path, char *pnv_path)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_OK;
+
+ if (!dhd_bus) {
+ DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+ return DBUS_ERR;
+ }
+
+ DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate));
+
+ dhd_bus->pub.busstate = DBUS_STATE_DL_PENDING;
+#ifdef EXTERNAL_FW_PATH
+ err = dbus_do_download(dhd_bus, pfw_path, pnv_path);
+#else
+ err = dbus_do_download(dhd_bus);
+#endif /* EXTERNAL_FW_PATH */
+ if (err == DBUS_OK) {
+ dhd_bus->pub.busstate = DBUS_STATE_DL_DONE;
+ } else {
+ DBUSERR(("%s: download failed (%d)\n", __FUNCTION__, err));
+ }
+
+ return err;
+}
+#endif
+
+/**
+ * higher layer requests us to 'up' the interface to the dongle. Prerequisite is that firmware (not
+ * bootloader) must be active in the dongle.
+ */
+int
+dbus_up(struct dhd_bus *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_OK;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL) {
+ DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+ return DBUS_ERR;
+ }
+
+ if ((dhd_bus->pub.busstate == DBUS_STATE_DL_DONE) ||
+ (dhd_bus->pub.busstate == DBUS_STATE_DOWN) ||
+ (dhd_bus->pub.busstate == DBUS_STATE_SLEEP)) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->up) {
+ err = dhd_bus->drvintf->up(dhd_bus->bus_info);
+
+ if (err == DBUS_OK) {
+ dbus_rxirbs_fill(dhd_bus);
+ }
+ }
+ } else
+ err = DBUS_ERR;
+
+ return err;
+}
+
+/** higher layer requests us to 'down' the interface to the dongle. */
+int
+dbus_down(dbus_pub_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ dbus_tx_timer_stop(dhd_bus);
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+ dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->down)
+ return dhd_bus->drvintf->down(dhd_bus->bus_info);
+ }
+
+ return DBUS_ERR;
+}
+
+int
+dbus_shutdown(dbus_pub_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->shutdown)
+ return dhd_bus->drvintf->shutdown(dhd_bus->bus_info);
+
+ return DBUS_OK;
+}
+
+int
+dbus_stop(struct dhd_bus *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+ dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->stop)
+ return dhd_bus->drvintf->stop(dhd_bus->bus_info);
+ }
+
+ return DBUS_ERR;
+}
+
+int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf)
+{
+ return dbus_send_pkt(dbus, pktbuf, pktbuf /* pktinfo */);
+}
+
+int
+dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info)
+{
+ return dbus_send_irb(pub, buf, len, NULL, info);
+}
+
+int
+dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info)
+{
+ return dbus_send_irb(pub, NULL, 0, pkt, info);
+}
+
+int
+dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if (dhd_bus == NULL) {
+ DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+ return DBUS_ERR;
+ }
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+ dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->send_ctl)
+ return dhd_bus->drvintf->send_ctl(dhd_bus->bus_info, buf, len);
+ } else {
+ DBUSERR(("%s: bustate=%d\n", __FUNCTION__, dhd_bus->pub.busstate));
+ }
+
+ return DBUS_ERR;
+}
+
+int
+dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if ((dhd_bus == NULL) || (buf == NULL))
+ return DBUS_ERR;
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+ dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->recv_ctl)
+ return dhd_bus->drvintf->recv_ctl(dhd_bus->bus_info, buf, len);
+ }
+
+ return DBUS_ERR;
+}
+
+/** Only called via RPC (Dec 2012) */
+int
+dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ dbus_irb_rx_t *rxirb;
+ struct exec_parms args;
+ int status;
+
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ args.qdeq.q = dhd_bus->rx_q;
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) {
+ if ((rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) {
+ status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info,
+ rxirb, ep_idx);
+ if (status == DBUS_ERR_RXDROP) {
+ bzero(rxirb, sizeof(dbus_irb_rx_t));
+ args.qenq.q = dhd_bus->rx_q;
+ args.qenq.b = (dbus_irb_t *) rxirb;
+ EXEC_RXLOCK(dhd_bus, q_enq_exec, &args);
+ }
+ }
+ }
+ }
+
+ return DBUS_ERR;
+}
+
+/** only called by dhd_cdc.c (Dec 2012) */
+int
+dbus_poll_intr(dbus_pub_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ int status = DBUS_ERR;
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+ if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) {
+ status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info,
+ NULL, 0xff);
+ }
+ }
+ return status;
+}
+
+/** called by nobody (Dec 2012) */
+void *
+dbus_pktget(dbus_pub_t *pub, int len)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if ((dhd_bus == NULL) || (len < 0))
+ return NULL;
+
+ return PKTGET(dhd_bus->pub.osh, len, TRUE);
+}
+
+/** called by nobody (Dec 2012) */
+void
+dbus_pktfree(dbus_pub_t *pub, void* pkt)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if ((dhd_bus == NULL) || (pkt == NULL))
+ return;
+
+ PKTFREE(dhd_bus->pub.osh, pkt, TRUE);
+}
+
+/** called by nobody (Dec 2012) */
+int
+dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if ((dhd_bus == NULL) || (stats == NULL))
+ return DBUS_ERR;
+
+ bcopy(&dhd_bus->pub.stats, stats, sizeof(dbus_stats_t));
+
+ return DBUS_OK;
+}
+
+int
+dbus_get_attrib(dhd_bus_t *pub, dbus_attrib_t *attrib)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_ERR;
+
+ if ((dhd_bus == NULL) || (attrib == NULL))
+ return DBUS_ERR;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->get_attrib) {
+ err = dhd_bus->drvintf->get_attrib(dhd_bus->bus_info,
+ &dhd_bus->pub.attrib);
+ }
+
+ bcopy(&dhd_bus->pub.attrib, attrib, sizeof(dbus_attrib_t));
+ return err;
+}
+
+int
+dbus_get_device_speed(dbus_pub_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+ if (dhd_bus == NULL)
+ return INVALID_SPEED;
+
+ return (dhd_bus->pub.device_speed);
+}
+
+int
+dbus_set_config(dbus_pub_t *pub, dbus_config_t *config)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_ERR;
+
+ if ((dhd_bus == NULL) || (config == NULL))
+ return DBUS_ERR;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->set_config) {
+ err = dhd_bus->drvintf->set_config(dhd_bus->bus_info,
+ config);
+
+ if ((config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) &&
+ (!err) &&
+ (dhd_bus->pub.busstate == DBUS_STATE_UP)) {
+ dbus_rxirbs_fill(dhd_bus);
+ }
+ }
+
+ return err;
+}
+
+int
+dbus_get_config(dbus_pub_t *pub, dbus_config_t *config)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_ERR;
+
+ if ((dhd_bus == NULL) || (config == NULL))
+ return DBUS_ERR;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->get_config) {
+ err = dhd_bus->drvintf->get_config(dhd_bus->bus_info,
+ config);
+ }
+
+ return err;
+}
+
+int
+dbus_set_errmask(dbus_pub_t *pub, uint32 mask)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_OK;
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ dhd_bus->errmask = mask;
+ return err;
+}
+
+int
+dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_ERR;
+ bool fwdl = FALSE;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+ return DBUS_OK;
+ }
+
+
+
+ if (dhd_bus->drvintf->pnp) {
+ err = dhd_bus->drvintf->pnp(dhd_bus->bus_info,
+ DBUS_PNP_RESUME);
+ }
+
+ if (dhd_bus->drvintf->recv_needed) {
+ if (dhd_bus->drvintf->recv_needed(dhd_bus->bus_info)) {
+ /* Refill after sleep/hibernate */
+ dbus_rxirbs_fill(dhd_bus);
+ }
+ }
+
+
+ if (fw_reload)
+ *fw_reload = fwdl;
+
+ return err;
+} /* dbus_pnp_resume */
+
+int
+dbus_pnp_sleep(dbus_pub_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_ERR;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ dbus_tx_timer_stop(dhd_bus);
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) {
+ err = dhd_bus->drvintf->pnp(dhd_bus->bus_info,
+ DBUS_PNP_SLEEP);
+ }
+
+ return err;
+}
+
+int
+dbus_pnp_disconnect(dbus_pub_t *pub)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+ int err = DBUS_ERR;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ dbus_tx_timer_stop(dhd_bus);
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) {
+ err = dhd_bus->drvintf->pnp(dhd_bus->bus_info,
+ DBUS_PNP_DISCONNECT);
+ }
+
+ return err;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ dhd_bus_t *dhd_bus = (dhd_bus_t *) dhdp->bus;
+ int err = DBUS_ERR;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (dhd_bus == NULL)
+ return DBUS_ERR;
+
+ if (dhd_bus->drvintf && dhd_bus->drvintf->iovar_op) {
+ err = dhd_bus->drvintf->iovar_op(dhd_bus->bus_info,
+ name, params, plen, arg, len, set);
+ }
+
+ return err;
+}
+
+
+void *
+dhd_dbus_txq(const dbus_pub_t *pub)
+{
+ return NULL;
+}
+
+uint
+dhd_dbus_hdrlen(const dbus_pub_t *pub)
+{
+ return 0;
+}
+
+void *
+dbus_get_devinfo(dbus_pub_t *pub)
+{
+ return pub->dev_info;
+}
+
+#if defined(BCM_REQUEST_FW) && !defined(EXTERNAL_FW_PATH)
+static int
+dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev)
+{
+ uint32 value = 0;
+ uint8 *cis;
+ uint16 *otpinfo;
+ uint32 i;
+ bool standard_cis = TRUE;
+ uint8 tup, tlen;
+ bool btype_present = FALSE;
+ bool brev_present = FALSE;
+ int ret;
+ int devid;
+ uint16 btype = 0;
+ uint16 brev = 0;
+ uint32 otp_size = 0, otp_addr = 0, otp_sw_rgn = 0;
+
+ if (dhd_bus == NULL || dhd_bus->drvintf == NULL ||
+ dhd_bus->drvintf->readreg == NULL)
+ return DBUS_ERR;
+
+ devid = dhd_bus->pub.attrib.devid;
+
+ if ((devid == BCM43234_CHIP_ID) || (devid == BCM43235_CHIP_ID) ||
+ (devid == BCM43236_CHIP_ID)) {
+
+ otp_size = BCM_OTP_SIZE_43236;
+ otp_sw_rgn = BCM_OTP_SW_RGN_43236;
+ otp_addr = BCM_OTP_ADDR_43236;
+
+ } else {
+ return DBUS_ERR_NVRAM;
+ }
+
+ cis = MALLOC(dhd_bus->pub.osh, otp_size * 2);
+ if (cis == NULL)
+ return DBUS_ERR;
+
+ otpinfo = (uint16 *) cis;
+
+ for (i = 0; i < otp_size; i++) {
+
+ ret = dhd_bus->drvintf->readreg(dhd_bus->bus_info,
+ otp_addr + ((otp_sw_rgn + i) << 1), 2, &value);
+
+ if (ret != DBUS_OK) {
+ MFREE(dhd_bus->pub.osh, cis, otp_size * 2);
+ return ret;
+ }
+ otpinfo[i] = (uint16) value;
+ }
+
+ for (i = 0; i < (otp_size << 1); ) {
+
+ if (standard_cis) {
+ tup = cis[i++];
+ if (tup == CISTPL_NULL || tup == CISTPL_END)
+ tlen = 0;
+ else
+ tlen = cis[i++];
+ } else {
+ if (cis[i] == CISTPL_NULL || cis[i] == CISTPL_END) {
+ tlen = 0;
+ tup = cis[i];
+ } else {
+ tlen = cis[i];
+ tup = CISTPL_BRCM_HNBU;
+ }
+ ++i;
+ }
+
+ if (tup == CISTPL_END || (i + tlen) >= (otp_size << 1)) {
+ break;
+ }
+
+ switch (tup) {
+
+ case CISTPL_BRCM_HNBU:
+
+ switch (cis[i]) {
+
+ case HNBU_BOARDTYPE:
+
+ btype = (uint16) ((cis[i + 2] << 8) + cis[i + 1]);
+ btype_present = TRUE;
+ DBUSTRACE(("%s: HNBU_BOARDTYPE = 0x%2x\n", __FUNCTION__,
+ (uint32)btype));
+ break;
+
+ case HNBU_BOARDREV:
+
+ if (tlen == 2)
+ brev = (uint16) cis[i + 1];
+ else
+ brev = (uint16) ((cis[i + 2] << 8) + cis[i + 1]);
+ brev_present = TRUE;
+ DBUSTRACE(("%s: HNBU_BOARDREV = 0x%2x\n", __FUNCTION__,
+ (uint32)*boardrev));
+ break;
+
+ case HNBU_HNBUCIS:
+ DBUSTRACE(("%s: HNBU_HNBUCIS\n", __FUNCTION__));
+ tlen++;
+ standard_cis = FALSE;
+ break;
+ }
+ break;
+ }
+
+ i += tlen;
+ }
+
+ MFREE(dhd_bus->pub.osh, cis, otp_size * 2);
+
+ if (btype_present == TRUE && brev_present == TRUE) {
+ *boardtype = btype;
+ *boardrev = brev;
+ DBUSERR(("otp boardtype = 0x%2x boardrev = 0x%2x\n",
+ *boardtype, *boardrev));
+
+ return DBUS_OK;
+ }
+ else
+ return DBUS_ERR;
+} /* dbus_otp */
+
+static int
+dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen,
+uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len)
+{
+ /* Multi board nvram file format is contenation of nvram info with \r
+ * The file format for two contatenated set is
+ * \nBroadcom Jumbo Nvram file\nfirst_set\nsecond_set\nthird_set\n
+ */
+ uint8 *nvram_start = NULL, *nvram_end = NULL;
+ uint8 *nvram_start_prev = NULL, *nvram_end_prev = NULL;
+ uint16 btype = 0, brev = 0;
+ int len = 0;
+ char *field;
+
+ *nvram = NULL;
+ *nvram_len = 0;
+
+ if (strncmp(BCM_JUMBO_START, jumbonvram, strlen(BCM_JUMBO_START))) {
+ /* single nvram file in the native format */
+ DBUSTRACE(("%s: Non-Jumbo NVRAM File \n", __FUNCTION__));
+ *nvram = jumbonvram;
+ *nvram_len = jumbolen;
+ return DBUS_OK;
+ } else {
+ DBUSTRACE(("%s: Jumbo NVRAM File \n", __FUNCTION__));
+ }
+
+ /* sanity test the end of the config sets for proper ending */
+ if (jumbonvram[jumbolen - 1] != BCM_JUMBO_NVRAM_DELIMIT ||
+ jumbonvram[jumbolen - 2] != '\0') {
+ DBUSERR(("%s: Bad Jumbo NVRAM file format\n", __FUNCTION__));
+ return DBUS_JUMBO_BAD_FORMAT;
+ }
+
+ dhd_bus->nvram_nontxt = DBUS_NVRAM_NONTXT;
+
+ nvram_start = jumbonvram;
+
+ while (*nvram_start != BCM_JUMBO_NVRAM_DELIMIT && len < jumbolen) {
+
+ /* consume the first file info line
+ * \nBroadcom Jumbo Nvram file\nfile1\n ...
+ */
+ len ++;
+ nvram_start ++;
+ }
+
+ nvram_end = nvram_start;
+
+ /* search for "boardrev=0xabcd" and "boardtype=0x1234" information in
+ * the concatenated nvram config files /sets
+ */
+
+ while (len < jumbolen) {
+
+ if (*nvram_end == '\0') {
+ /* end of a config set is marked by multiple null characters */
+ len ++;
+ nvram_end ++;
+ DBUSTRACE(("%s: NULL chr len = %d char = 0x%x\n", __FUNCTION__,
+ len, *nvram_end));
+ continue;
+
+ } else if (*nvram_end == BCM_JUMBO_NVRAM_DELIMIT) {
+
+ /* config set delimiter is reached */
+ /* check if next config set is present or not
+ * return if next config is not present
+ */
+
+ /* start search the next config set */
+ nvram_start_prev = nvram_start;
+ nvram_end_prev = nvram_end;
+
+ nvram_end ++;
+ nvram_start = nvram_end;
+ btype = brev = 0;
+ DBUSTRACE(("%s: going to next record len = %d "
+ "char = 0x%x \n", __FUNCTION__, len, *nvram_end));
+ len ++;
+ if (len >= jumbolen) {
+
+ *nvram = nvram_start_prev;
+ *nvram_len = (int)(nvram_end_prev - nvram_start_prev);
+
+ DBUSTRACE(("%s: no more len = %d nvram_end = 0x%p",
+ __FUNCTION__, len, nvram_end));
+
+ return DBUS_JUMBO_NOMATCH;
+
+ } else {
+ continue;
+ }
+
+ } else {
+
+ DBUSTRACE(("%s: config str = %s\n", __FUNCTION__, nvram_end));
+
+ if (bcmp(nvram_end, "boardtype", strlen("boardtype")) == 0) {
+
+ field = strchr(nvram_end, '=');
+ field++;
+ btype = (uint16)bcm_strtoul(field, NULL, 0);
+
+ DBUSTRACE(("%s: btype = 0x%x boardtype = 0x%x \n", __FUNCTION__,
+ btype, boardtype));
+ }
+
+ if (bcmp(nvram_end, "boardrev", strlen("boardrev")) == 0) {
+
+ field = strchr(nvram_end, '=');
+ field++;
+ brev = (uint16)bcm_strtoul(field, NULL, 0);
+
+ DBUSTRACE(("%s: brev = 0x%x boardrev = 0x%x \n", __FUNCTION__,
+ brev, boardrev));
+ }
+ if (btype == boardtype && brev == boardrev) {
+ /* locate nvram config set end - ie.find '\r' char */
+ while (*nvram_end != BCM_JUMBO_NVRAM_DELIMIT)
+ nvram_end ++;
+ *nvram = nvram_start;
+ *nvram_len = (int) (nvram_end - nvram_start);
+ DBUSTRACE(("found len = %d nvram_start = 0x%p "
+ "nvram_end = 0x%p\n", *nvram_len, nvram_start, nvram_end));
+ return DBUS_OK;
+ }
+
+ len += (strlen(nvram_end) + 1);
+ nvram_end += (strlen(nvram_end) + 1);
+ }
+ }
+ return DBUS_JUMBO_NOMATCH;
+} /* dbus_select_nvram */
+
+#endif
+
+#define DBUS_NRXQ 50
+#define DBUS_NTXQ 100
+
+static void
+dhd_dbus_send_complete(void *handle, void *info, int status)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+ void *pkt = info;
+
+ if ((dhd == NULL) || (pkt == NULL)) {
+ DBUSERR(("dhd or pkt is NULL\n"));
+ return;
+ }
+
+ if (status == DBUS_OK) {
+ dhd->dstats.tx_packets++;
+ } else {
+ DBUSERR(("TX error=%d\n", status));
+ dhd->dstats.tx_errors++;
+ }
+#ifdef PROP_TXSTATUS
+ if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) &&
+ (dhd_wlfc_txcomplete(dhd, pkt, status == 0) != WLFC_UNSUPPORTED)) {
+ return;
+ } else
+#endif /* PROP_TXSTATUS */
+ dhd_txcomplete(dhd, pkt, status == 0);
+ PKTFREE(dhd->osh, pkt, TRUE);
+}
+
+static void
+dhd_dbus_recv_pkt(void *handle, void *pkt)
+{
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+ uint pkt_count;
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+ int ifidx = 0;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* If the protocol uses a data header, check and remove it */
+ if (dhd_prot_hdrpull(dhd, &ifidx, pkt, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DBUSERR(("rx protocol error\n"));
+ PKTFREE(dhd->osh, pkt, FALSE);
+ dhd->rx_errors++;
+ return;
+ }
+
+ if (reorder_info_len) {
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(dhd, reorder_info_buf, reorder_info_len,
+ &pkt, &pkt_count);
+ if (pkt_count == 0)
+ return;
+ }
+ else {
+ pkt_count = 1;
+ }
+ dhd_rx_frame(dhd, ifidx, pkt, pkt_count, 0);
+}
+
+static void
+dhd_dbus_recv_buf(void *handle, uint8 *buf, int len)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+ void *pkt;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if ((pkt = PKTGET(dhd->osh, len, FALSE)) == NULL) {
+ DBUSERR(("PKTGET (rx) failed=%d\n", len));
+ return;
+ }
+
+ bcopy(buf, PKTDATA(dhd->osh, pkt), len);
+ dhd_dbus_recv_pkt(dhd, pkt);
+}
+
+static void
+dhd_dbus_txflowcontrol(void *handle, bool onoff)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+ bool wlfc_enabled = FALSE;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, onoff, !onoff) != WLFC_UNSUPPORTED);
+#endif
+
+ if (!wlfc_enabled) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, onoff);
+ }
+}
+
+static void
+dhd_dbus_errhandler(void *handle, int err)
+{
+}
+
+static void
+dhd_dbus_ctl_complete(void *handle, int type, int status)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (type == DBUS_CBCTL_READ) {
+ if (status == DBUS_OK)
+ dhd->rx_ctlpkts++;
+ else
+ dhd->rx_ctlerrs++;
+ } else if (type == DBUS_CBCTL_WRITE) {
+ if (status == DBUS_OK)
+ dhd->tx_ctlpkts++;
+ else
+ dhd->tx_ctlerrs++;
+ }
+
+ dhd_prot_ctl_complete(dhd);
+}
+
+static void
+dhd_dbus_state_change(void *handle, int state)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+ unsigned long flags;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ switch (state) {
+
+ case DBUS_STATE_DL_NEEDED:
+ DBUSERR(("%s: firmware request cannot be handled\n", __FUNCTION__));
+ break;
+ case DBUS_STATE_DOWN:
+ DBUSTRACE(("%s: DBUS is down\n", __FUNCTION__));
+ DHD_LINUX_GENERAL_LOCK(dhd, flags);
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);
+ dhd->busstate = DHD_BUS_DOWN;
+ DHD_LINUX_GENERAL_UNLOCK(dhd, flags);
+ break;
+ case DBUS_STATE_UP:
+ DBUSTRACE(("%s: DBUS is up\n", __FUNCTION__));
+ DHD_LINUX_GENERAL_LOCK(dhd, flags);
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ dhd->busstate = DHD_BUS_DATA;
+ DHD_LINUX_GENERAL_UNLOCK(dhd, flags);
+ break;
+ case DBUS_STATE_SLEEP:
+ DBUSTRACE(("%s: DBUS is suspend\n", __FUNCTION__));
+ DHD_LINUX_GENERAL_LOCK(dhd, flags);
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);
+ dhd->busstate = DHD_BUS_SUSPEND;
+ DHD_LINUX_GENERAL_UNLOCK(dhd, flags);
+ break;
+ default:
+ break;
+ }
+
+ DBUSERR(("%s: DBUS current state=%d\n", __FUNCTION__, state));
+}
+
+static void *
+dhd_dbus_pktget(void *handle, uint len, bool send)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+ void *p = NULL;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if (send == TRUE) {
+ dhd_os_sdlock_txq(dhd);
+ p = PKTGET(dhd->osh, len, TRUE);
+ dhd_os_sdunlock_txq(dhd);
+ } else {
+ dhd_os_sdlock_rxq(dhd);
+ p = PKTGET(dhd->osh, len, FALSE);
+ dhd_os_sdunlock_rxq(dhd);
+ }
+
+ return p;
+}
+
+static void
+dhd_dbus_pktfree(void *handle, void *p, bool send)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)handle;
+
+ if (dhd == NULL) {
+ DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (send == TRUE) {
+#ifdef PROP_TXSTATUS
+ if (DHD_PKTTAG_WLFCPKT(PKTTAG(p)) &&
+ (dhd_wlfc_txcomplete(dhd, p, FALSE) != WLFC_UNSUPPORTED)) {
+ return;
+ }
+#endif /* PROP_TXSTATUS */
+
+ dhd_os_sdlock_txq(dhd);
+ PKTFREE(dhd->osh, p, TRUE);
+ dhd_os_sdunlock_txq(dhd);
+ } else {
+ dhd_os_sdlock_rxq(dhd);
+ PKTFREE(dhd->osh, p, FALSE);
+ dhd_os_sdunlock_rxq(dhd);
+ }
+}
+
+
+static dbus_callbacks_t dhd_dbus_cbs = {
+ dhd_dbus_send_complete,
+ dhd_dbus_recv_buf,
+ dhd_dbus_recv_pkt,
+ dhd_dbus_txflowcontrol,
+ dhd_dbus_errhandler,
+ dhd_dbus_ctl_complete,
+ dhd_dbus_state_change,
+ dhd_dbus_pktget,
+ dhd_dbus_pktfree
+};
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus != NULL);
+ return bus->pub.attrib.devid;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+ ASSERT(bus);
+ ASSERT(bus != NULL);
+ return bus->pub.attrib.chiprev;
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ bcm_bprintf(strbuf, "Bus USB\n");
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pktbuf)
+{
+ DBUSTRACE(("%s\n", __FUNCTION__));
+ if (bus->txoff) {
+ DBUSTRACE(("txoff\n"));
+ return BCME_EPERM;
+ }
+ return dbus_send_txdata(&bus->pub, pktbuf);
+}
+
+static void
+dhd_dbus_advertise_bus_cleanup(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+ if ((timeleft == 0) || (timeleft == 1)) {
+ DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ ASSERT(0);
+ }
+
+ return;
+}
+
+static void
+dhd_dbus_advertise_bus_remove(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_REMOVE;
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+ if ((timeleft == 0) || (timeleft == 1)) {
+ DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ ASSERT(0);
+ }
+
+ return;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ int bcmerror = 0;
+ unsigned long flags;
+ wifi_adapter_info_t *adapter = (wifi_adapter_info_t *)dhdp->adapter;
+
+ if (flag == TRUE) {
+ if (!dhdp->dongle_reset) {
+ DBUSERR(("%s: == Power OFF ==\n", __FUNCTION__));
+ dhd_dbus_advertise_bus_cleanup(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Force flow control as protection when stop come before ifconfig_down */
+ dhd_txflowcontrol(dhdp, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+ dbus_stop(dhdp->bus);
+
+ dhdp->dongle_reset = TRUE;
+ dhdp->up = FALSE;
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_DOWN;
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+ wifi_clr_adapter_status(adapter, WIFI_STATUS_FW_READY);
+
+ printf("%s: WLAN OFF DONE\n", __FUNCTION__);
+ /* App can now remove power from device */
+ } else
+ bcmerror = BCME_ERROR;
+ } else {
+ /* App must have restored power to device before calling */
+ printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__);
+ if (dhdp->dongle_reset) {
+ /* Turn on WLAN */
+ DHD_MUTEX_UNLOCK();
+ wait_event_interruptible_timeout(adapter->status_event,
+ wifi_get_adapter_status(adapter, WIFI_STATUS_FW_READY),
+ msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
+ DHD_MUTEX_LOCK();
+ bcmerror = dbus_up(dhdp->bus);
+ if (bcmerror == BCME_OK) {
+ dhdp->dongle_reset = FALSE;
+ dhdp->up = TRUE;
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Restore flow control */
+ dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF);
+#endif
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+ DBUSTRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+ } else {
+ DBUSERR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, bcmerror));
+ }
+ }
+ }
+
+ return bcmerror;
+}
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path,
+ char *pnv_path, char *pclm_path, char *pconf_path)
+{
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (bus == NULL) {
+ DBUSERR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+ bus->dhd->clm_path = pclm_path;
+ bus->dhd->conf_path = pconf_path;
+
+ dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
+
+}
+
+/*
+ * hdrlen is space to reserve in pkt headroom for DBUS
+ */
+void *
+dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype,
+ uint16 bus_no, uint16 slot, uint32 hdrlen)
+{
+ osl_t *osh = NULL;
+ dhd_bus_t *bus = NULL;
+ dhd_pub_t *pub = NULL;
+ uint rxsz;
+ int dlneeded = 0;
+ wifi_adapter_info_t *adapter = NULL;
+
+ DBUSTRACE(("%s: Enter\n", __FUNCTION__));
+
+ adapter = dhd_wifi_platform_get_adapter(bustype, bus_no, slot);
+
+ if (!g_pub) {
+ /* Ask the OS interface part for an OSL handle */
+ if (!(osh = osl_attach(NULL, bustype, TRUE))) {
+ DBUSERR(("%s: OSL attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Attach to the dhd/OS interface */
+ if (!(pub = dhd_attach(osh, bus, hdrlen, adapter))) {
+ DBUSERR(("%s: dhd_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ } else {
+ pub = g_pub;
+ osh = pub->osh;
+ }
+
+ if (pub->bus) {
+ DBUSERR(("%s: wrong probe\n", __FUNCTION__));
+ goto fail;
+ }
+
+ rxsz = dhd_get_rxsz(pub);
+ bus = dbus_attach(osh, rxsz, DBUS_NRXQ, DBUS_NTXQ, pub, &dhd_dbus_cbs, NULL, NULL);
+ if (bus) {
+ pub->bus = bus;
+ bus->dhd = pub;
+
+ dlneeded = dbus_dlneeded(bus);
+ if (dlneeded >= 0) {
+ if (!g_pub) {
+ dhd_conf_reset(pub);
+ dhd_conf_set_chiprev(pub, bus->pub.attrib.devid, bus->pub.attrib.chiprev);
+ dhd_conf_preinit(pub);
+ }
+ }
+
+ if (g_pub || dhd_download_fw_on_driverload) {
+ if (dlneeded == 0) {
+ wifi_set_adapter_status(adapter, WIFI_STATUS_FW_READY);
+#ifdef BCM_REQUEST_FW
+ } else if (dlneeded > 0) {
+ wifi_clr_adapter_status(adapter, WIFI_STATUS_FW_READY);
+ dhd_set_path(bus->dhd);
+ if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK)
+ goto fail;
+ bus->dhd->busstate = DHD_BUS_LOAD;
+#endif
+ } else {
+ goto fail;
+ }
+ }
+ } else {
+ DBUSERR(("%s: dbus_attach failed\n", __FUNCTION__));
+ }
+
+ if (!g_pub) {
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (dhd_attach_net(bus->dhd, TRUE) != 0)
+ {
+ DBUSERR(("%s: Net attach failed!!\n", __FUNCTION__));
+ goto fail;
+ }
+ pub->hang_report = TRUE;
+#if defined(MULTIPLE_SUPPLICANT)
+ wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif
+ g_pub = pub;
+ }
+
+ DBUSTRACE(("%s: Exit\n", __FUNCTION__));
+ wifi_clr_adapter_status(adapter, WIFI_STATUS_DETTACH);
+ wifi_set_adapter_status(adapter, WIFI_STATUS_ATTACH);
+ wake_up_interruptible(&adapter->status_event);
+ /* This is passed to dhd_dbus_disconnect_cb */
+ return bus;
+
+fail:
+ if (pub && pub->bus) {
+ dbus_detach(pub->bus);
+ pub->bus = NULL;
+ }
+ /* Release resources in reverse order */
+ if (!g_pub) {
+ if (pub) {
+ dhd_detach(pub);
+ dhd_free(pub);
+ }
+ if (osh) {
+ osl_detach(osh);
+ }
+ }
+
+ printf("%s: Failed\n", __FUNCTION__);
+ return NULL;
+}
+
+void
+dhd_dbus_disconnect_cb(void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)arg;
+ dhd_pub_t *pub = g_pub;
+ osl_t *osh;
+ wifi_adapter_info_t *adapter = NULL;
+
+ adapter = (wifi_adapter_info_t *)pub->adapter;
+
+ if (pub && !pub->dhd_remove && bus == NULL) {
+ DBUSERR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+ if (!adapter) {
+ DBUSERR(("%s: adapter is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ printf("%s: Enter dhd_remove=%d on %s\n", __FUNCTION__,
+ pub->dhd_remove, adapter->name);
+ if (!pub->dhd_remove) {
+ /* Advertise bus remove during rmmod */
+ dhd_dbus_advertise_bus_remove(bus->dhd);
+ dbus_detach(pub->bus);
+ pub->bus = NULL;
+ wifi_clr_adapter_status(adapter, WIFI_STATUS_ATTACH);
+ wifi_set_adapter_status(adapter, WIFI_STATUS_DETTACH);
+ wake_up_interruptible(&adapter->status_event);
+ } else {
+ osh = pub->osh;
+ dhd_detach(pub);
+ if (pub->bus) {
+ dbus_detach(pub->bus);
+ pub->bus = NULL;
+ }
+ dhd_free(pub);
+ g_pub = NULL;
+ if (MALLOCED(osh)) {
+ DBUSERR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ }
+ osl_detach(osh);
+ }
+
+ DBUSTRACE(("%s: Exit\n", __FUNCTION__));
+}
+
+#ifdef LINUX_EXTERNAL_MODULE_DBUS
+
+static int __init
+bcm_dbus_module_init(void)
+{
+ printf("Inserting bcm_dbus module \n");
+ return 0;
+}
+
+static void __exit
+bcm_dbus_module_exit(void)
+{
+ printf("Removing bcm_dbus module \n");
+ return;
+}
+
+EXPORT_SYMBOL(dbus_pnp_sleep);
+EXPORT_SYMBOL(dbus_get_devinfo);
+EXPORT_SYMBOL(dbus_detach);
+EXPORT_SYMBOL(dbus_get_attrib);
+EXPORT_SYMBOL(dbus_down);
+EXPORT_SYMBOL(dbus_pnp_resume);
+EXPORT_SYMBOL(dbus_set_config);
+EXPORT_SYMBOL(dbus_flowctrl_rx);
+EXPORT_SYMBOL(dbus_up);
+EXPORT_SYMBOL(dbus_get_device_speed);
+EXPORT_SYMBOL(dbus_send_pkt);
+EXPORT_SYMBOL(dbus_recv_ctl);
+EXPORT_SYMBOL(dbus_attach);
+
+MODULE_LICENSE("GPL");
+
+module_init(bcm_dbus_module_init);
+module_exit(bcm_dbus_module_exit);
+
+#endif /* #ifdef LINUX_EXTERNAL_MODULE_DBUS */
diff --git a/bcmdhd.101.10.361.x/dbus_usb.c b/bcmdhd.101.10.361.x/dbus_usb.c
new file mode 100755
index 0000000..5cee418
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dbus_usb.c
@@ -0,0 +1,1173 @@
+/*
+ * Dongle BUS interface for USB, OS independent
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus_usb.c 565557 2015-06-22 19:29:44Z $
+ */
+
+/**
+ * @file @brief
+ * This file contains DBUS code that is USB, but not OS specific. DBUS is a Broadcom proprietary
+ * host specific abstraction layer.
+ */
+
+#include <osl.h>
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <dbus.h>
+#include <usbrdl.h>
+#include <bcmdevs_legacy.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+
+uint dbus_msglevel = DBUS_ERROR_VAL;
+module_param(dbus_msglevel, int, 0);
+
+
+#define USB_DLIMAGE_RETRY_TIMEOUT 3000 /* retry Timeout */
+#define USB_SFLASH_DLIMAGE_SPINWAIT 150 /* in unit of ms */
+#define USB_SFLASH_DLIMAGE_LIMIT 2000 /* spinwait limit (ms) */
+#define POSTBOOT_ID 0xA123 /* ID to detect if dongle has boot up */
+#define USB_RESETCFG_SPINWAIT 1 /* wait after resetcfg (ms) */
+#define USB_DEV_ISBAD(u) (u->pub->attrib.devid == 0xDEAD)
+#define USB_DLGO_SPINWAIT 100 /* wait after DL_GO (ms) */
+#define TEST_CHIP 0x4328
+
+typedef struct {
+ dbus_pub_t *pub;
+
+ void *cbarg;
+ dbus_intf_callbacks_t *cbs; /** callbacks into higher DBUS level (dbus.c) */
+ dbus_intf_t *drvintf;
+ void *usbosl_info;
+ uint32 rdlram_base_addr;
+ uint32 rdlram_size;
+} usb_info_t;
+
+/*
+ * Callbacks common to all USB
+ */
+static void dbus_usb_disconnect(void *handle);
+static void dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb);
+static void dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status);
+static void dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status);
+static void dbus_usb_errhandler(void *handle, int err);
+static void dbus_usb_ctl_complete(void *handle, int type, int status);
+static void dbus_usb_state_change(void *handle, int state);
+static struct dbus_irb* dbus_usb_getirb(void *handle, bool send);
+static void dbus_usb_rxerr_indicate(void *handle, bool on);
+#if !defined(BCM_REQUEST_FW)
+static int dbus_usb_resetcfg(usb_info_t *usbinfo);
+#endif
+static int dbus_usb_iovar_op(void *bus, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+static int dbus_iovar_process(usb_info_t* usbinfo, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+static int dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+ const char *name, void *params, int plen, void *arg, int len, int val_size);
+static int dhdusb_downloadvars(usb_info_t *bus, void *arg, int len);
+
+static int dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen);
+static int dbus_usb_dlstart(void *bus, uint8 *fw, int len);
+static int dbus_usb_dlneeded(void *bus);
+static int dbus_usb_dlrun(void *bus);
+static int dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo);
+
+
+/* OS specific */
+extern bool dbus_usbos_dl_cmd(void *info, uint8 cmd, void *buffer, int buflen);
+extern int dbus_usbos_wait(void *info, uint16 ms);
+extern int dbus_write_membytes(usb_info_t *usbinfo, bool set, uint32 address,
+ uint8 *data, uint size);
+extern bool dbus_usbos_dl_send_bulk(void *info, void *buffer, int len);
+extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size);
+
+/**
+ * These functions are called by the lower DBUS level (dbus_usb_os.c) to notify this DBUS level
+ * (dbus_usb.c) of an event.
+ */
+static dbus_intf_callbacks_t dbus_usb_intf_cbs = {
+ dbus_usb_send_irb_timeout,
+ dbus_usb_send_irb_complete,
+ dbus_usb_recv_irb_complete,
+ dbus_usb_errhandler,
+ dbus_usb_ctl_complete,
+ dbus_usb_state_change,
+ NULL, /* isr */
+ NULL, /* dpc */
+ NULL, /* watchdog */
+ NULL, /* dbus_if_pktget */
+ NULL, /* dbus_if_pktfree */
+ dbus_usb_getirb,
+ dbus_usb_rxerr_indicate
+};
+
+/* IOVar table */
+enum {
+ IOV_SET_DOWNLOAD_STATE = 1,
+ IOV_DBUS_MSGLEVEL,
+ IOV_MEMBYTES,
+ IOV_VARS,
+ IOV_LOOPBACK_TX
+};
+
+const bcm_iovar_t dhdusb_iovars[] = {
+ {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
+ {"dbus_msglevel", IOV_DBUS_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
+ {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {"usb_lb_txfer", IOV_LOOPBACK_TX, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {NULL, 0, 0, 0, 0 }
+};
+
+/*
+ * Need global for probe() and disconnect() since
+ * attach() is not called at probe and detach()
+ * can be called inside disconnect()
+ */
+static probe_cb_t probe_cb = NULL;
+static disconnect_cb_t disconnect_cb = NULL;
+static void *probe_arg = NULL;
+static void *disc_arg = NULL;
+static dbus_intf_t *g_dbusintf = NULL;
+static dbus_intf_t dbus_usb_intf; /** functions called by higher layer DBUS into lower layer */
+
+/*
+ * dbus_intf_t common to all USB
+ * These functions override dbus_usb_<os>.c.
+ */
+static void *dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs);
+static void dbus_usb_detach(dbus_pub_t *pub, void *info);
+static void * dbus_usb_probe(void *arg, const char *desc, uint32 bustype,
+ uint16 bus_no, uint16 slot, uint32 hdrlen);
+
+/* functions */
+
+/**
+ * As part of DBUS initialization/registration, the higher level DBUS (dbus.c) needs to know what
+ * lower level DBUS functions to call (in both dbus_usb.c and dbus_usb_os.c).
+ */
+static void *
+dbus_usb_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no,
+ uint16 slot, uint32 hdrlen)
+{
+ DBUSTRACE(("%s(): \n", __FUNCTION__));
+ if (probe_cb) {
+
+ if (g_dbusintf != NULL) {
+ /* First, initialize all lower-level functions as default
+ * so that dbus.c simply calls directly to dbus_usb_os.c.
+ */
+ bcopy(g_dbusintf, &dbus_usb_intf, sizeof(dbus_intf_t));
+
+ /* Second, selectively override functions we need, if any. */
+ dbus_usb_intf.attach = dbus_usb_attach;
+ dbus_usb_intf.detach = dbus_usb_detach;
+ dbus_usb_intf.iovar_op = dbus_usb_iovar_op;
+ dbus_usb_intf.dlstart = dbus_usb_dlstart;
+ dbus_usb_intf.dlneeded = dbus_usb_dlneeded;
+ dbus_usb_intf.dlrun = dbus_usb_dlrun;
+ }
+
+ disc_arg = probe_cb(probe_arg, "DBUS USB", USB_BUS, bus_no, slot, hdrlen);
+ return disc_arg;
+ }
+
+ return NULL;
+}
+
+/**
+ * On return, *intf contains this or lower-level DBUS functions to be called by higher
+ * level (dbus.c)
+ */
+int
+dbus_bus_register(int vid, int pid, probe_cb_t prcb,
+ disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2)
+{
+ int err;
+
+ DBUSTRACE(("%s(): \n", __FUNCTION__));
+ probe_cb = prcb;
+ disconnect_cb = discb;
+ probe_arg = prarg;
+
+ *intf = &dbus_usb_intf;
+
+ err = dbus_bus_osl_register(vid, pid, dbus_usb_probe,
+ dbus_usb_disconnect, NULL, &g_dbusintf, param1, param2);
+
+ ASSERT(g_dbusintf);
+ return err;
+}
+
+int
+dbus_bus_deregister()
+{
+ DBUSTRACE(("%s(): \n", __FUNCTION__));
+ return dbus_bus_osl_deregister();
+}
+
+/** initialization consists of registration followed by 'attach'. */
+void *
+dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs)
+{
+ usb_info_t *usb_info;
+
+ DBUSTRACE(("%s(): \n", __FUNCTION__));
+
+ if ((g_dbusintf == NULL) || (g_dbusintf->attach == NULL))
+ return NULL;
+
+ /* Sanity check for BUS_INFO() */
+ ASSERT(OFFSETOF(usb_info_t, pub) == 0);
+
+ usb_info = MALLOC(pub->osh, sizeof(usb_info_t));
+ if (usb_info == NULL)
+ return NULL;
+
+ bzero(usb_info, sizeof(usb_info_t));
+
+ usb_info->pub = pub;
+ usb_info->cbarg = cbarg;
+ usb_info->cbs = cbs;
+
+ usb_info->usbosl_info = (dbus_pub_t *)g_dbusintf->attach(pub,
+ usb_info, &dbus_usb_intf_cbs);
+ if (usb_info->usbosl_info == NULL) {
+ MFREE(pub->osh, usb_info, sizeof(usb_info_t));
+ return NULL;
+ }
+
+ /* Save USB OS-specific driver entry points */
+ usb_info->drvintf = g_dbusintf;
+
+ pub->bus = usb_info;
+#if !defined(BCM_REQUEST_FW)
+ if (!dbus_usb_resetcfg(usb_info)) {
+ usb_info->pub->busstate = DBUS_STATE_DL_DONE;
+ }
+#endif
+ /* Return Lower layer info */
+ return (void *) usb_info->usbosl_info;
+}
+
+void
+dbus_usb_detach(dbus_pub_t *pub, void *info)
+{
+ usb_info_t *usb_info = (usb_info_t *) pub->bus;
+ osl_t *osh = pub->osh;
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->drvintf && usb_info->drvintf->detach)
+ usb_info->drvintf->detach(pub, usb_info->usbosl_info);
+
+ MFREE(osh, usb_info, sizeof(usb_info_t));
+}
+
+void
+dbus_usb_disconnect(void *handle)
+{
+ DBUSTRACE(("%s(): \n", __FUNCTION__));
+ if (disconnect_cb)
+ disconnect_cb(disc_arg);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->cbs && usb_info->cbs->send_irb_timeout)
+ usb_info->cbs->send_irb_timeout(usb_info->cbarg, txirb);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->cbs && usb_info->cbs->send_irb_complete)
+ usb_info->cbs->send_irb_complete(usb_info->cbarg, txirb, status);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->cbs && usb_info->cbs->recv_irb_complete)
+ usb_info->cbs->recv_irb_complete(usb_info->cbarg, rxirb, status);
+}
+
+/** Lower DBUS level (dbus_usb_os.c) requests a free IRB. Pass this on to the higher DBUS level. */
+static struct dbus_irb*
+dbus_usb_getirb(void *handle, bool send)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ if (usb_info == NULL)
+ return NULL;
+
+ if (usb_info->cbs && usb_info->cbs->getirb)
+ return usb_info->cbs->getirb(usb_info->cbarg, send);
+
+ return NULL;
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_rxerr_indicate(void *handle, bool on)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->cbs && usb_info->cbs->rxerr_indicate)
+ usb_info->cbs->rxerr_indicate(usb_info->cbarg, on);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_errhandler(void *handle, int err)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->cbs && usb_info->cbs->errhandler)
+ usb_info->cbs->errhandler(usb_info->cbarg, err);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_ctl_complete(void *handle, int type, int status)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (usb_info == NULL) {
+ DBUSERR(("%s: usb_info is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (usb_info->cbs && usb_info->cbs->ctl_complete)
+ usb_info->cbs->ctl_complete(usb_info->cbarg, type, status);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_state_change(void *handle, int state)
+{
+ usb_info_t *usb_info = (usb_info_t *) handle;
+
+ if (usb_info == NULL)
+ return;
+
+ if (usb_info->cbs && usb_info->cbs->state_change)
+ usb_info->cbs->state_change(usb_info->cbarg, state);
+}
+
+/** called by higher DBUS level (dbus.c) */
+static int
+dbus_usb_iovar_op(void *bus, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ int err = DBUS_OK;
+
+ err = dbus_iovar_process((usb_info_t*)bus, name, params, plen, arg, len, set);
+ return err;
+}
+
+/** process iovar request from higher DBUS level */
+static int
+dbus_iovar_process(usb_info_t* usbinfo, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ uint32 actionid;
+
+ DBUSTRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdusb_iovars, name)) == NULL) {
+ /* Not Supported */
+ bcmerror = BCME_UNSUPPORTED;
+ DBUSTRACE(("%s: IOVAR %s is not supported\n", name, __FUNCTION__));
+ goto exit;
+
+ }
+
+ DBUSTRACE(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dbus_usb_doiovar(usbinfo, vi, actionid,
+ name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+} /* dbus_iovar_process */
+
+static int
+dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ int32 int_val2 = 0;
+ bool bool_val = 0;
+
+ DBUSTRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ if (plen >= (int)sizeof(int_val) * 2)
+ bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ switch (actionid) {
+
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address;
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ BCM_REFERENCE(address);
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DBUSTRACE(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ DBUSTRACE(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+ (set ? "write" : "read"), size, address));
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dbus_usb_dl_writeimage(BUS_INFO(bus, usb_info_t), data, size);
+ }
+ break;
+
+
+ case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+
+ if (bool_val == TRUE) {
+ bcmerror = dbus_usb_dlneeded(bus);
+ dbus_usb_rdl_dwnld_state(BUS_INFO(bus, usb_info_t));
+ } else {
+ usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+ bcmerror = dbus_usb_dlrun(bus);
+ usbinfo->pub->busstate = DBUS_STATE_DL_DONE;
+ }
+ break;
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdusb_downloadvars(BUS_INFO(bus, usb_info_t), arg, len);
+ break;
+
+ case IOV_GVAL(IOV_DBUS_MSGLEVEL):
+ int_val = (int32)dbus_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DBUS_MSGLEVEL):
+ dbus_msglevel = int_val;
+ break;
+
+#ifdef DBUS_USB_LOOPBACK
+ case IOV_SVAL(IOV_LOOPBACK_TX):
+ bcmerror = dbus_usbos_loopback_tx(BUS_INFO(bus, usb_info_t), int_val,
+ int_val2);
+ break;
+#endif
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ return bcmerror;
+} /* dbus_usb_doiovar */
+
+/** higher DBUS level (dbus.c) wants to set NVRAM variables in dongle */
+static int
+dhdusb_downloadvars(usb_info_t *bus, void *arg, int len)
+{
+ int bcmerror = 0;
+ uint32 varsize;
+ uint32 varaddr;
+ uint32 varsizew;
+
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* RAM size is not set. Set it at dbus_usb_dlneeded */
+ if (!bus->rdlram_size)
+ bcmerror = BCME_ERROR;
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = len ? ROUNDUP(len, 4) : 0;
+ varaddr = (bus->rdlram_size - 4) - varsize;
+
+ /* Write the vars list */
+ DBUSTRACE(("WriteVars: @%x varsize=%d\n", varaddr, varsize));
+ bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, (varaddr + bus->rdlram_base_addr),
+ arg, varsize);
+
+ /* adjust to the user specified RAM */
+ DBUSTRACE(("Usable memory size: %d\n", bus->rdlram_size));
+ DBUSTRACE(("Vars are at %d, orig varsize is %d\n", varaddr, varsize));
+
+ varsize = ((bus->rdlram_size - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew = htol32(varsizew);
+ }
+
+ DBUSTRACE(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, ((bus->rdlram_size - 4) +
+ bus->rdlram_base_addr), (uint8*)&varsizew, 4);
+err:
+ return bcmerror;
+} /* dbus_usb_doiovar */
+
+#if !defined(BCM_REQUEST_FW)
+/**
+ * After downloading firmware into dongle and starting it, we need to know if the firmware is
+ * indeed up and running.
+ */
+static int
+dbus_usb_resetcfg(usb_info_t *usbinfo)
+{
+ void *osinfo;
+ bootrom_id_t id;
+ uint16 waittime = 0;
+
+ uint32 starttime = 0;
+ uint32 endtime = 0;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ osinfo = usbinfo->usbosl_info;
+ ASSERT(osinfo);
+
+ /* Give dongle chance to boot */
+ dbus_usbos_wait(osinfo, USB_SFLASH_DLIMAGE_SPINWAIT);
+ waittime = USB_SFLASH_DLIMAGE_SPINWAIT;
+ while (waittime < USB_DLIMAGE_RETRY_TIMEOUT) {
+
+ starttime = OSL_SYSUPTIME();
+
+ id.chip = 0xDEAD; /* Get the ID */
+ dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t));
+ id.chip = ltoh32(id.chip);
+
+ endtime = OSL_SYSUPTIME();
+ waittime += (endtime - starttime);
+
+ if (id.chip == POSTBOOT_ID)
+ break;
+ }
+
+ if (id.chip == POSTBOOT_ID) {
+ DBUSERR(("%s: download done. Bootup time = %d ms postboot chip 0x%x/rev 0x%x\n",
+ __FUNCTION__, waittime, id.chip, id.chiprev));
+
+ dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t));
+
+ dbus_usbos_wait(osinfo, USB_RESETCFG_SPINWAIT);
+ return DBUS_OK;
+ } else {
+ DBUSERR(("%s: Cannot talk to Dongle. Wait time = %d ms. Firmware is not UP \n",
+ __FUNCTION__, waittime));
+ return DBUS_ERR;
+ }
+
+ return DBUS_OK;
+}
+#endif
+
+/** before firmware download, the dongle has to be prepared to receive the fw image */
+static int
+dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo)
+{
+ void *osinfo = usbinfo->usbosl_info;
+ rdl_state_t state;
+ int err = DBUS_OK;
+
+ /* 1) Prepare USB boot loader for runtime image */
+ dbus_usbos_dl_cmd(osinfo, DL_START, &state, sizeof(rdl_state_t));
+
+ state.state = ltoh32(state.state);
+ state.bytes = ltoh32(state.bytes);
+
+ /* 2) Check we are in the Waiting state */
+ if (state.state != DL_WAITING) {
+ DBUSERR(("%s: Failed to DL_START\n", __FUNCTION__));
+ err = DBUS_ERR;
+ goto fail;
+ }
+
+fail:
+ return err;
+}
+
+/**
+ * Dongle contains bootcode in ROM but firmware is (partially) contained in dongle RAM. Therefore,
+ * firmware has to be downloaded into dongle RAM.
+ */
+static int
+dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen)
+{
+ osl_t *osh = usbinfo->pub->osh;
+ void *osinfo = usbinfo->usbosl_info;
+ unsigned int sendlen, sent, dllen;
+ char *bulkchunk = NULL, *dlpos;
+ rdl_state_t state;
+ int err = DBUS_OK;
+ bootrom_id_t id;
+ uint16 wait, wait_time;
+ uint32 dl_trunk_size = RDL_CHUNK;
+
+ if (BCM4350_CHIP(usbinfo->pub->attrib.devid))
+ dl_trunk_size = RDL_CHUNK_MAX;
+
+ while (!bulkchunk) {
+ bulkchunk = MALLOC(osh, dl_trunk_size);
+ if (dl_trunk_size == RDL_CHUNK)
+ break;
+ if (!bulkchunk) {
+ dl_trunk_size /= 2;
+ if (dl_trunk_size < RDL_CHUNK)
+ dl_trunk_size = RDL_CHUNK;
+ }
+ }
+
+ if (bulkchunk == NULL) {
+ err = DBUS_ERR;
+ goto fail;
+ }
+
+ sent = 0;
+ dlpos = fw;
+ dllen = fwlen;
+
+ /* Get chip id and rev */
+ id.chip = usbinfo->pub->attrib.devid;
+ id.chiprev = usbinfo->pub->attrib.chiprev;
+
+ DBUSTRACE(("enter %s: fwlen=%d\n", __FUNCTION__, fwlen));
+
+ dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t));
+
+ /* 3) Load the image */
+ while ((sent < dllen)) {
+ /* Wait until the usb device reports it received all the bytes we sent */
+
+ if (sent < dllen) {
+ if ((dllen-sent) < dl_trunk_size)
+ sendlen = dllen-sent;
+ else
+ sendlen = dl_trunk_size;
+
+ /* simply avoid having to send a ZLP by ensuring we never have an even
+ * multiple of 64
+ */
+ if (!(sendlen % 64))
+ sendlen -= 4;
+
+ /* send data */
+ memcpy(bulkchunk, dlpos, sendlen);
+ if (!dbus_usbos_dl_send_bulk(osinfo, bulkchunk, sendlen)) {
+ err = DBUS_ERR;
+ goto fail;
+ }
+
+ dlpos += sendlen;
+ sent += sendlen;
+ DBUSTRACE(("%s: sendlen %d\n", __FUNCTION__, sendlen));
+ }
+
+ wait = 0;
+ wait_time = USB_SFLASH_DLIMAGE_SPINWAIT;
+ while (!dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state,
+ sizeof(rdl_state_t))) {
+ if ((id.chip == 43236) && (id.chiprev == 0)) {
+ DBUSERR(("%s: 43236a0 SFlash delay, waiting for dongle crc check "
+ "completion!!!\n", __FUNCTION__));
+ dbus_usbos_wait(osinfo, wait_time);
+ wait += wait_time;
+ if (wait >= USB_SFLASH_DLIMAGE_LIMIT) {
+ DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__));
+ err = DBUS_ERR;
+ goto fail;
+ break;
+ }
+ } else {
+ DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__));
+ err = DBUS_ERR;
+ goto fail;
+ }
+ }
+
+ state.state = ltoh32(state.state);
+ state.bytes = ltoh32(state.bytes);
+
+ /* restart if an error is reported */
+ if ((state.state == DL_BAD_HDR) || (state.state == DL_BAD_CRC)) {
+ DBUSERR(("%s: Bad Hdr or Bad CRC\n", __FUNCTION__));
+ err = DBUS_ERR;
+ goto fail;
+ }
+
+ }
+fail:
+ if (bulkchunk)
+ MFREE(osh, bulkchunk, dl_trunk_size);
+
+ return err;
+} /* dbus_usb_dl_writeimage */
+
+/** Higher level DBUS layer (dbus.c) requests this layer to download image into dongle */
+static int
+dbus_usb_dlstart(void *bus, uint8 *fw, int len)
+{
+ usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+ int err;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ if (USB_DEV_ISBAD(usbinfo))
+ return DBUS_ERR;
+
+ err = dbus_usb_rdl_dwnld_state(usbinfo);
+
+ if (DBUS_OK == err) {
+ err = dbus_usb_dl_writeimage(usbinfo, fw, len);
+ if (err == DBUS_OK)
+ usbinfo->pub->busstate = DBUS_STATE_DL_DONE;
+ else
+ usbinfo->pub->busstate = DBUS_STATE_DL_PENDING;
+ } else
+ usbinfo->pub->busstate = DBUS_STATE_DL_PENDING;
+
+ return err;
+}
+
+static bool
+dbus_usb_update_chipinfo(usb_info_t *usbinfo, uint32 chip)
+{
+ bool retval = TRUE;
+ /* based on the CHIP Id, store the ram size which is needed for NVRAM download. */
+ switch (chip) {
+
+ case 0x4319:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_4319;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_4319;
+ break;
+
+ case 0x4329:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_4329;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_4329;
+ break;
+
+ case 43234:
+ case 43235:
+ case 43236:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_43236;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_43236;
+ break;
+
+ case 0x4328:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_4328;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_4328;
+ break;
+
+ case 0x4322:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_4322;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_4322;
+ break;
+
+ case 0x4360:
+ case 0xAA06:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_4360;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_4360;
+ break;
+
+ case 43242:
+ case 43243:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_43242;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_43242;
+ break;
+
+ case 43143:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_43143;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_43143;
+ break;
+
+ case 0x4350:
+ case 43556:
+ case 43558:
+ case 43569:
+ usbinfo->rdlram_size = RDL_RAM_SIZE_4350;
+ usbinfo->rdlram_base_addr = RDL_RAM_BASE_4350;
+ break;
+
+ case POSTBOOT_ID:
+ break;
+
+ default:
+ DBUSERR(("%s: Chip 0x%x Ram size is not known\n", __FUNCTION__, chip));
+ retval = FALSE;
+ break;
+
+ }
+
+ return retval;
+} /* dbus_usb_update_chipinfo */
+
+/** higher DBUS level (dbus.c) wants to know if firmware download is required. */
+static int
+dbus_usb_dlneeded(void *bus)
+{
+ usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+ void *osinfo;
+ bootrom_id_t id;
+ int dl_needed = 1;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ osinfo = usbinfo->usbosl_info;
+ ASSERT(osinfo);
+
+ /* Check if firmware downloaded already by querying runtime ID */
+ id.chip = 0xDEAD;
+ dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t));
+
+ id.chip = ltoh32(id.chip);
+ id.chiprev = ltoh32(id.chiprev);
+
+ if (FALSE == dbus_usb_update_chipinfo(usbinfo, id.chip)) {
+ dl_needed = DBUS_ERR;
+ goto exit;
+ }
+
+ DBUSERR(("%s: chip 0x%x rev 0x%x\n", __FUNCTION__, id.chip, id.chiprev));
+ if (id.chip == POSTBOOT_ID) {
+ /* This code is needed to support two enumerations on USB1.1 scenario */
+ DBUSERR(("%s: Firmware already downloaded\n", __FUNCTION__));
+
+ dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t));
+ dl_needed = DBUS_OK;
+ if (usbinfo->pub->busstate == DBUS_STATE_DL_PENDING)
+ usbinfo->pub->busstate = DBUS_STATE_DL_DONE;
+ } else {
+ usbinfo->pub->attrib.devid = id.chip;
+ usbinfo->pub->attrib.chiprev = id.chiprev;
+ }
+
+exit:
+ return dl_needed;
+}
+
+/** After issuing firmware download, higher DBUS level (dbus.c) wants to start the firmware. */
+static int
+dbus_usb_dlrun(void *bus)
+{
+ usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+ void *osinfo;
+ rdl_state_t state;
+ int err = DBUS_OK;
+
+ DBUSTRACE(("%s\n", __FUNCTION__));
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ if (USB_DEV_ISBAD(usbinfo))
+ return DBUS_ERR;
+
+ osinfo = usbinfo->usbosl_info;
+ ASSERT(osinfo);
+
+ /* Check we are runnable */
+ dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t));
+
+ state.state = ltoh32(state.state);
+ state.bytes = ltoh32(state.bytes);
+
+ /* Start the image */
+ if (state.state == DL_RUNNABLE) {
+ DBUSTRACE(("%s: Issue DL_GO\n", __FUNCTION__));
+ dbus_usbos_dl_cmd(osinfo, DL_GO, &state, sizeof(rdl_state_t));
+
+ if (usbinfo->pub->attrib.devid == TEST_CHIP)
+ dbus_usbos_wait(osinfo, USB_DLGO_SPINWAIT);
+
+// dbus_usb_resetcfg(usbinfo);
+ /* The Donlge may go for re-enumeration. */
+ } else {
+ DBUSERR(("%s: Dongle not runnable\n", __FUNCTION__));
+ err = DBUS_ERR;
+ }
+
+ return err;
+}
+
+/**
+ * As preparation for firmware download, higher DBUS level (dbus.c) requests the firmware image
+ * to be used for the type of dongle detected. Directly called by dbus.c (so not via a callback
+ * construction)
+ */
+void
+dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp)
+{
+ usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+ unsigned int devid;
+ unsigned int crev;
+
+ devid = usbinfo->pub->attrib.devid;
+ crev = usbinfo->pub->attrib.chiprev;
+
+ *fw = NULL;
+ *fwlen = 0;
+
+ switch (devid) {
+ case BCM43236_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43234_CHIP_ID:
+ case BCM43238_CHIP_ID: {
+ if (crev == 3 || crev == 2 || crev == 1) {
+#ifdef EMBED_IMAGE_43236b
+ *fw = (uint8 *)dlarray_43236b;
+ *fwlen = sizeof(dlarray_43236b);
+
+#endif
+ }
+ } break;
+ case BCM4360_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+#ifdef EMBED_IMAGE_43526a
+ if (crev <= 2) {
+ *fw = (uint8 *)dlarray_43526a;
+ *fwlen = sizeof(dlarray_43526a);
+ }
+#endif
+#ifdef EMBED_IMAGE_43526b
+ if (crev > 2) {
+ *fw = (uint8 *)dlarray_43526b;
+ *fwlen = sizeof(dlarray_43526b);
+ }
+#endif
+ break;
+
+ case BCM43242_CHIP_ID:
+#ifdef EMBED_IMAGE_43242a0
+ *fw = (uint8 *)dlarray_43242a0;
+ *fwlen = sizeof(dlarray_43242a0);
+#endif
+ break;
+
+ case BCM43143_CHIP_ID:
+#ifdef EMBED_IMAGE_43143a0
+ *fw = (uint8 *)dlarray_43143a0;
+ *fwlen = sizeof(dlarray_43143a0);
+#endif
+#ifdef EMBED_IMAGE_43143b0
+ *fw = (uint8 *)dlarray_43143b0;
+ *fwlen = sizeof(dlarray_43143b0);
+#endif
+ break;
+
+ case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM43556_CHIP_ID:
+ case BCM43558_CHIP_ID:
+ case BCM43566_CHIP_ID:
+ case BCM43568_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ case BCM4358_CHIP_ID:
+#ifdef EMBED_IMAGE_4350a0
+ if (crev == 0) {
+ *fw = (uint8 *)dlarray_4350a0;
+ *fwlen = sizeof(dlarray_4350a0);
+ }
+#endif
+#ifdef EMBED_IMAGE_4350b0
+ if (crev == 1) {
+ *fw = (uint8 *)dlarray_4350b0;
+ *fwlen = sizeof(dlarray_4350b0);
+ }
+#endif
+#ifdef EMBED_IMAGE_4350b1
+ if (crev == 2) {
+ *fw = (uint8 *)dlarray_4350b1;
+ *fwlen = sizeof(dlarray_4350b1);
+ }
+#endif
+#ifdef EMBED_IMAGE_43556b1
+ if (crev == 2) {
+ *fw = (uint8 *)dlarray_43556b1;
+ *fwlen = sizeof(dlarray_43556b1);
+ }
+#endif
+#ifdef EMBED_IMAGE_4350c0
+ if (crev == 3) {
+ *fw = (uint8 *)dlarray_4350c0;
+ *fwlen = sizeof(dlarray_4350c0);
+ }
+#endif /* EMBED_IMAGE_4350c0 */
+#ifdef EMBED_IMAGE_4350c1
+ if (crev == 4) {
+ *fw = (uint8 *)dlarray_4350c1;
+ *fwlen = sizeof(dlarray_4350c1);
+ }
+#endif /* EMBED_IMAGE_4350c1 */
+ break;
+ case BCM43569_CHIP_ID:
+#ifdef EMBED_IMAGE_43569a0
+ if (crev == 0) {
+ *fw = (uint8 *)dlarray_43569a0;
+ *fwlen = sizeof(dlarray_43569a0);
+ }
+#endif /* EMBED_IMAGE_43569a0 */
+ break;
+ default:
+#ifdef EMBED_IMAGE_GENERIC
+ *fw = (uint8 *)dlarray;
+ *fwlen = sizeof(dlarray);
+#endif
+ break;
+ }
+} /* dbus_bus_fw_get */
diff --git a/bcmdhd.101.10.361.x/dbus_usb_linux.c b/bcmdhd.101.10.361.x/dbus_usb_linux.c
new file mode 100755
index 0000000..0bd7181
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dbus_usb_linux.c
@@ -0,0 +1,3405 @@
+/*
+ * Dongle BUS interface
+ * USB Linux Implementation
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus_usb_linux.c 564663 2015-06-18 02:34:42Z $
+ */
+
+/**
+ * @file @brief
+ * This file contains DBUS code that is USB *and* OS (Linux) specific. DBUS is a Broadcom
+ * proprietary host specific abstraction layer.
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+/**
+ * DBUS_LINUX_RXDPC is created for router platform performance tuning. A separate thread is created
+ * to handle USB RX and avoid the call chain getting too long and enhance cache hit rate.
+ *
+ * DBUS_LINUX_RXDPC setting is in wlconfig file.
+ */
+
+/*
+ * If DBUS_LINUX_RXDPC is off, spin_lock_bh() for CTFPOOL in
+ * linux_osl.c has to be changed to spin_lock_irqsave() because
+ * PKTGET/PKTFREE are no longer in bottom half.
+ *
+ * Right now we have another queue rpcq in wl_linux.c. Maybe we
+ * can eliminate that one to reduce the overhead.
+ *
+ * Enabling 2nd EP and DBUS_LINUX_RXDPC causing traffic from
+ * both EP's to be queued in the same rx queue. If we want
+ * RXDPC to work with 2nd EP. The EP for RPC call return
+ * should bypass the dpc and go directly up.
+ */
+
+/* #define DBUS_LINUX_RXDPC */
+
+/* Dbus histogram for ntxq, nrxq, dpc parameter tuning */
+/* #define DBUS_LINUX_HIST */
+
+#include <usbrdl.h>
+#include <bcmendian.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <dbus.h>
+#include <bcmutils.h>
+#include <bcmdevs_legacy.h>
+#include <bcmdevs.h>
+#include <linux/usb.h>
+#include <usbrdl.h>
+#include <linux/firmware.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if defined(USBOS_THREAD) || defined(USBOS_TX_THREAD)
+
+/**
+ * The usb-thread is designed to provide currency on multiprocessors and SMP linux kernels. On the
+ * dual cores platform, the WLAN driver, without threads, executed only on CPU0. The driver consumed
+ * almost of 100% on CPU0, while CPU1 remained idle. The behavior was observed on Broadcom's STB.
+ *
+ * The WLAN driver consumed most of CPU0 and not CPU1 because tasklets/queues, software irq, and
+ * hardware irq are executing from CPU0, only. CPU0 became the system's bottle-neck. TPUT is lower
+ * and system's responsiveness is slower.
+ *
+ * To improve system responsiveness and TPUT usb-thread was implemented. The system's threads could
+ * be scheduled to run on any core. One core could be processing data in the usb-layer and the other
+ * core could be processing data in the wl-layer.
+ *
+ * For further info see [WlThreadAndUsbThread] Twiki.
+ */
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/hardirq.h>
+#include <linux/list.h>
+#include <linux_osl.h>
+#endif /* USBOS_THREAD || USBOS_TX_THREAD */
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+/**
+ * Starting with the 3.10 kernel release, dynamic PM support for USB is present whenever
+ * the kernel was built with CONFIG_PM_RUNTIME enabled. The CONFIG_USB_SUSPEND option has
+ * been eliminated.
+ */
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)) && defined(CONFIG_USB_SUSPEND)) \
+ || ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) && defined(CONFIG_PM_RUNTIME)) \
+ || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+/* For USB power management support, see Linux kernel: Documentation/usb/power-management.txt */
+#define USB_SUSPEND_AVAILABLE
+#endif
+
+/* Define alternate fw/nvram paths used in Android */
+#ifdef OEM_ANDROID
+#define CONFIG_ANDROID_BCMDHD_FW_PATH "broadcom/dhd/firmware/fw.bin.trx"
+#define CONFIG_ANDROID_BCMDHD_NVRAM_PATH "broadcom/dhd/nvrams/nvm.txt"
+#endif /* OEM_ANDROID */
+
+static inline int usb_submit_urb_linux(struct urb *urb)
+{
+
+#ifdef BCM_MAX_URB_LEN
+ if (urb && (urb->transfer_buffer_length > BCM_MAX_URB_LEN)) {
+ DBUSERR(("URB transfer length=%d exceeded %d ra=%p\n", urb->transfer_buffer_length,
+ BCM_MAX_URB_LEN, __builtin_return_address(0)));
+ return DBUS_ERR;
+ }
+#endif
+
+#ifdef KERNEL26
+ return usb_submit_urb(urb, GFP_ATOMIC);
+#else
+ return usb_submit_urb(urb);
+#endif
+
+}
+
+#define USB_SUBMIT_URB(urb) usb_submit_urb_linux(urb)
+
+#ifdef KERNEL26
+
+#define USB_ALLOC_URB() usb_alloc_urb(0, GFP_ATOMIC)
+#define USB_UNLINK_URB(urb) (usb_kill_urb(urb))
+#define USB_FREE_URB(urb) (usb_free_urb(urb))
+#define USB_REGISTER() usb_register(&dbus_usbdev)
+#define USB_DEREGISTER() usb_deregister(&dbus_usbdev)
+
+#ifdef USB_SUSPEND_AVAILABLE
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#define USB_AUTOPM_SET_INTERFACE(intf) usb_autopm_set_interface(intf)
+#else
+#define USB_ENABLE_AUTOSUSPEND(udev) usb_enable_autosuspend(udev)
+#define USB_DISABLE_AUTOSUSPEND(udev) usb_disable_autosuspend(udev)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#define USB_AUTOPM_GET_INTERFACE(intf) usb_autopm_get_interface(intf)
+#define USB_AUTOPM_PUT_INTERFACE(intf) usb_autopm_put_interface(intf)
+#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) usb_autopm_get_interface_async(intf)
+#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) usb_autopm_put_interface_async(intf)
+#define USB_MARK_LAST_BUSY(dev) usb_mark_last_busy(dev)
+
+#else /* USB_SUSPEND_AVAILABLE */
+
+#define USB_AUTOPM_GET_INTERFACE(intf) do {} while (0)
+#define USB_AUTOPM_PUT_INTERFACE(intf) do {} while (0)
+#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) do {} while (0)
+#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) do {} while (0)
+#define USB_MARK_LAST_BUSY(dev) do {} while (0)
+#endif /* USB_SUSPEND_AVAILABLE */
+
+#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \
+ (data), (size), (timeout))
+#define USB_BULK_MSG(dev, pipe, data, len, actual_length, timeout) \
+ usb_bulk_msg((dev), (pipe), (data), (len), (actual_length), (timeout))
+#define USB_BUFFER_ALLOC(dev, size, mem, dma) usb_buffer_alloc(dev, size, mem, dma)
+#define USB_BUFFER_FREE(dev, size, data, dma) usb_buffer_free(dev, size, data, dma)
+
+#ifdef WL_URB_ZPKT
+#define URB_QUEUE_BULK URB_ZERO_PACKET
+#else
+#define URB_QUEUE_BULK 0
+#endif /* WL_URB_ZPKT */
+
+#define CALLBACK_ARGS struct urb *urb, struct pt_regs *regs
+#define CALLBACK_ARGS_DATA urb, regs
+#define CONFIGDESC(usb) (&((usb)->actconfig)->desc)
+#define IFPTR(usb, idx) ((usb)->actconfig->interface[idx])
+#define IFALTS(usb, idx) (IFPTR((usb), (idx))->altsetting[0])
+#define IFDESC(usb, idx) IFALTS((usb), (idx)).desc
+#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[ep]).desc
+
+#else /* KERNEL26 */
+
+#define USB_ALLOC_URB() usb_alloc_urb(0)
+#define USB_UNLINK_URB(urb) usb_unlink_urb(urb)
+#define USB_FREE_URB(urb) (usb_free_urb(urb))
+#define USB_REGISTER() usb_register(&dbus_usbdev)
+#define USB_DEREGISTER() usb_deregister(&dbus_usbdev)
+#define USB_AUTOPM_GET_INTERFACE(intf) do {} while (0)
+#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) do {} while (0)
+#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) do {} while (0)
+#define USB_MARK_LAST_BUSY(dev) do {} while (0)
+
+#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \
+ (data), (size), (timeout))
+#define USB_BUFFER_ALLOC(dev, size, mem, dma) kmalloc(size, mem)
+#define USB_BUFFER_FREE(dev, size, data, dma) kfree(data)
+
+#ifdef WL_URB_ZPKT
+#define URB_QUEUE_BULK USB_QUEUE_BULK|URB_ZERO_PACKET
+#else
+#define URB_QUEUE_BULK 0
+#endif /* WL_URB_ZPKT */
+
+#define CALLBACK_ARGS struct urb *urb
+#define CALLBACK_ARGS_DATA urb
+#define CONFIGDESC(usb) ((usb)->actconfig)
+#define IFPTR(usb, idx) (&(usb)->actconfig->interface[idx])
+#define IFALTS(usb, idx) ((usb)->actconfig->interface[idx].altsetting[0])
+#define IFDESC(usb, idx) IFALTS((usb), (idx))
+#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[ep])
+
+
+#endif /* KERNEL26 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+#define USB_SPEED_SUPER 5
+#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) */
+
+#define CONTROL_IF 0
+#define BULK_IF 0
+
+#ifdef BCMUSBDEV_COMPOSITE
+#define USB_COMPIF_MAX 4
+
+#define USB_CLASS_WIRELESS 0xe0
+#define USB_CLASS_MISC 0xef
+#define USB_SUBCLASS_COMMON 0x02
+#define USB_PROTO_IAD 0x01
+#define USB_PROTO_VENDOR 0xff
+
+#define USB_QUIRK_NO_SET_INTF 0x04 /* device does not support set_interface */
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#define USB_SYNC_WAIT_TIMEOUT 300 /* ms */
+
+/* Private data kept in skb */
+#define SKB_PRIV(skb, idx) (&((void **)skb->cb)[idx])
+#define SKB_PRIV_URB(skb) (*(struct urb **)SKB_PRIV(skb, 0))
+
+#ifndef DBUS_USB_RXQUEUE_BATCH_ADD
+/* items to add each time within limit */
+#define DBUS_USB_RXQUEUE_BATCH_ADD 8
+#endif
+
+#ifndef DBUS_USB_RXQUEUE_LOWER_WATERMARK
+/* add a new batch req to rx queue when waiting item count reduce to this number */
+#define DBUS_USB_RXQUEUE_LOWER_WATERMARK 4
+#endif
+
+enum usbos_suspend_state {
+ USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow suspend */
+ USBOS_SUSPEND_STATE_SUSPEND_PENDING, /* Device is idle, can be suspended */
+ /* Wating PM to suspend */
+ USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
+};
+
+enum usbos_request_state {
+ USBOS_REQUEST_STATE_UNSCHEDULED = 0, /* USB TX request not scheduled */
+ USBOS_REQUEST_STATE_SCHEDULED, /* USB TX request given to TX thread */
+ USBOS_REQUEST_STATE_SUBMITTED /* USB TX request submitted */
+};
+
+typedef struct {
+ uint32 notification;
+ uint32 reserved;
+} intr_t;
+
+typedef struct {
+ dbus_pub_t *pub;
+
+ void *cbarg;
+ dbus_intf_callbacks_t *cbs;
+
+ /* Imported */
+ struct usb_device *usb; /* USB device pointer from OS */
+ struct urb *intr_urb; /* URB for interrupt endpoint */
+ struct list_head req_rxfreeq;
+ struct list_head req_txfreeq;
+ struct list_head req_rxpostedq; /* Posted down to USB driver for RX */
+ struct list_head req_txpostedq; /* Posted down to USB driver for TX */
+ spinlock_t rxfree_lock; /* Lock for rx free list */
+ spinlock_t txfree_lock; /* Lock for tx free list */
+ spinlock_t rxposted_lock; /* Lock for rx posted list */
+ spinlock_t txposted_lock; /* Lock for tx posted list */
+ uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; /* Pipe numbers for USB I/O */
+ uint rxbuf_len;
+
+ struct list_head req_rxpendingq; /* RXDPC: Pending for dpc to send up */
+ spinlock_t rxpending_lock; /* RXDPC: Lock for rx pending list */
+ long dpc_pid;
+ struct semaphore dpc_sem;
+ struct completion dpc_exited;
+ int rxpending;
+
+ struct urb *ctl_urb;
+ int ctl_in_pipe, ctl_out_pipe;
+ struct usb_ctrlrequest ctl_write;
+ struct usb_ctrlrequest ctl_read;
+ struct semaphore ctl_lock; /* Lock for CTRL transfers via tx_thread */
+#ifdef USBOS_TX_THREAD
+ enum usbos_request_state ctl_state;
+#endif /* USBOS_TX_THREAD */
+
+ spinlock_t rxlock; /* Lock for rxq management */
+ spinlock_t txlock; /* Lock for txq management */
+
+ int intr_size; /* Size of interrupt message */
+ int interval; /* Interrupt polling interval */
+ intr_t intr; /* Data buffer for interrupt endpoint */
+
+ int maxps;
+ atomic_t txposted;
+ atomic_t rxposted;
+ atomic_t txallocated;
+ atomic_t rxallocated;
+ bool rxctl_deferrespok; /* Get a response for setup from dongle */
+
+ wait_queue_head_t wait;
+ bool waitdone;
+ int sync_urb_status;
+
+ struct urb *blk_urb; /* Used for downloading embedded image */
+
+#ifdef USBOS_THREAD
+ spinlock_t ctrl_lock;
+ spinlock_t usbos_list_lock;
+ struct list_head usbos_list;
+ struct list_head usbos_free_list;
+ atomic_t usbos_list_cnt;
+ wait_queue_head_t usbos_queue_head;
+ struct task_struct *usbos_kt;
+#endif /* USBOS_THREAD */
+
+#ifdef USBOS_TX_THREAD
+ spinlock_t usbos_tx_list_lock;
+ struct list_head usbos_tx_list;
+ wait_queue_head_t usbos_tx_queue_head;
+ struct task_struct *usbos_tx_kt;
+#endif /* USBOS_TX_THREAD */
+
+ struct dma_pool *qtd_pool; /* QTD pool for USB optimization only */
+ int tx_ep, rx_ep, rx2_ep; /* EPs for USB optimization */
+ struct usb_device *usb_device; /* USB device for optimization */
+} usbos_info_t;
+
+typedef struct urb_req {
+ void *pkt;
+ int buf_len;
+ struct urb *urb;
+ void *arg;
+ usbos_info_t *usbinfo;
+ struct list_head urb_list;
+} urb_req_t;
+
+#ifdef USBOS_THREAD
+typedef struct usbos_list_entry {
+ struct list_head list; /* must be first */
+ void *urb_context;
+ int urb_length;
+ int urb_status;
+} usbos_list_entry_t;
+
+static void* dbus_usbos_thread_init(usbos_info_t *usbos_info);
+static void dbus_usbos_thread_deinit(usbos_info_t *usbos_info);
+static void dbus_usbos_dispatch_schedule(CALLBACK_ARGS);
+static int dbus_usbos_thread_func(void *data);
+#endif /* USBOS_THREAD */
+
+#ifdef USBOS_TX_THREAD
+void* dbus_usbos_tx_thread_init(usbos_info_t *usbos_info);
+void dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info);
+int dbus_usbos_tx_thread_func(void *data);
+#endif /* USBOS_TX_THREAD */
+
+/* Shared Function prototypes */
+bool dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen);
+int dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms);
+bool dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len);
+int dbus_write_membytes(usbos_info_t *usbinfo, bool set, uint32 address, uint8 *data, uint size);
+
+/* Local function prototypes */
+static void dbus_usbos_send_complete(CALLBACK_ARGS);
+static void dbus_usbos_recv_complete(CALLBACK_ARGS);
+static int dbus_usbos_errhandler(void *bus, int err);
+static int dbus_usbos_state_change(void *bus, int state);
+static void dbusos_stop(usbos_info_t *usbos_info);
+
+#ifdef KERNEL26
+static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id);
+static void dbus_usbos_disconnect(struct usb_interface *intf);
+#if defined(USB_SUSPEND_AVAILABLE)
+static int dbus_usbos_resume(struct usb_interface *intf);
+static int dbus_usbos_suspend(struct usb_interface *intf, pm_message_t message);
+/* at the moment, used for full dongle host driver only */
+static int dbus_usbos_reset_resume(struct usb_interface *intf);
+#endif /* USB_SUSPEND_AVAILABLE */
+#else /* KERNEL26 */
+static void *dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum,
+ const struct usb_device_id *id);
+static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr);
+#endif /* KERNEL26 */
+
+
+/**
+ * have to disable missing-field-initializers warning as last element {} triggers it
+ * and different versions of kernel have different number of members so it is impossible
+ * to specify the initializer. BTW issuing the warning here is bug og GCC as universal
+ * zero {0} specified in C99 standard as correct way of initialization of struct to all zeros
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+
+static struct usb_device_id devid_table[] = {
+ { USB_DEVICE(BCM_DNGL_VID, 0x0000) }, /* Configurable via register() */
+#if defined(BCM_REQUEST_FW)
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4328) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4322) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4319) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43236) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43143) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43242) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4360) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4350) },
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43569) },
+#endif
+#ifdef EXTENDED_VID_PID
+ EXTENDED_VID_PID,
+#endif /* EXTENDED_VID_PID */
+ { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BDC_PID) }, /* Default BDC */
+ { }
+};
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic pop
+#endif
+
+MODULE_DEVICE_TABLE(usb, devid_table);
+
+/** functions called by the Linux kernel USB subsystem */
+static struct usb_driver dbus_usbdev = {
+ name: "dbus_usbdev",
+ probe: dbus_usbos_probe,
+ disconnect: dbus_usbos_disconnect,
+ id_table: devid_table,
+#if defined(USB_SUSPEND_AVAILABLE)
+ suspend: dbus_usbos_suspend,
+ resume: dbus_usbos_resume,
+ reset_resume: dbus_usbos_reset_resume,
+ /* Linux USB core will allow autosuspend for devices bound to this driver */
+ supports_autosuspend: 1
+#endif /* USB_SUSPEND_AVAILABLE */
+};
+
+/**
+ * This stores USB info during Linux probe callback since attach() is not called yet at this point
+ */
+typedef struct {
+ void *usbos_info;
+ struct usb_device *usb; /* USB device pointer from OS */
+ uint rx_pipe; /* Pipe numbers for USB I/O */
+ uint tx_pipe; /* Pipe numbers for USB I/O */
+ uint intr_pipe; /* Pipe numbers for USB I/O */
+ uint rx_pipe2; /* Pipe numbers for USB I/O */
+ int intr_size; /* Size of interrupt message */
+ int interval; /* Interrupt polling interval */
+ bool dldone;
+ int vid;
+ int pid;
+ bool dereged;
+ bool disc_cb_done;
+ DEVICE_SPEED device_speed;
+ enum usbos_suspend_state suspend_state;
+ struct usb_interface *intf;
+} probe_info_t;
+
+/*
+ * USB Linux dbus_intf_t
+ */
+static void *dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs);
+static void dbus_usbos_intf_detach(dbus_pub_t *pub, void *info);
+static int dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb);
+static int dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb);
+static int dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx);
+static int dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb);
+static int dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len);
+static int dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len);
+static int dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib);
+static int dbus_usbos_intf_up(void *bus);
+static int dbus_usbos_intf_down(void *bus);
+static int dbus_usbos_intf_stop(void *bus);
+static int dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value);
+extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size);
+int dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data);
+static int dbus_usbos_intf_set_config(void *bus, dbus_config_t *config);
+static bool dbus_usbos_intf_recv_needed(void *bus);
+static void *dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args);
+static void *dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args);
+#ifdef BCMUSBDEV_COMPOSITE
+static int dbus_usbos_intf_wlan(struct usb_device *usb);
+#endif /* BCMUSBDEV_COMPOSITE */
+
+/** functions called by dbus_usb.c */
+static dbus_intf_t dbus_usbos_intf = {
+ .attach = dbus_usbos_intf_attach,
+ .detach = dbus_usbos_intf_detach,
+ .up = dbus_usbos_intf_up,
+ .down = dbus_usbos_intf_down,
+ .send_irb = dbus_usbos_intf_send_irb,
+ .recv_irb = dbus_usbos_intf_recv_irb,
+ .cancel_irb = dbus_usbos_intf_cancel_irb,
+ .send_ctl = dbus_usbos_intf_send_ctl,
+ .recv_ctl = dbus_usbos_intf_recv_ctl,
+ .get_stats = NULL,
+ .get_attrib = dbus_usbos_intf_get_attrib,
+ .remove = NULL,
+ .resume = NULL,
+ .suspend = NULL,
+ .stop = dbus_usbos_intf_stop,
+ .reset = NULL,
+ .pktget = NULL,
+ .pktfree = NULL,
+ .iovar_op = NULL,
+ .dump = NULL,
+ .set_config = dbus_usbos_intf_set_config,
+ .get_config = NULL,
+ .device_exists = NULL,
+ .dlneeded = NULL,
+ .dlstart = NULL,
+ .dlrun = NULL,
+ .recv_needed = dbus_usbos_intf_recv_needed,
+ .exec_rxlock = dbus_usbos_intf_exec_rxlock,
+ .exec_txlock = dbus_usbos_intf_exec_txlock,
+
+ .tx_timer_init = NULL,
+ .tx_timer_start = NULL,
+ .tx_timer_stop = NULL,
+
+ .sched_dpc = NULL,
+ .lock = NULL,
+ .unlock = NULL,
+ .sched_probe_cb = NULL,
+
+ .shutdown = NULL,
+
+ .recv_stop = NULL,
+ .recv_resume = NULL,
+
+ .recv_irb_from_ep = dbus_usbos_intf_recv_irb_from_ep,
+ .readreg = dbus_usbos_readreg
+};
+
+static probe_info_t g_probe_info;
+static probe_cb_t probe_cb = NULL;
+static disconnect_cb_t disconnect_cb = NULL;
+static void *probe_arg = NULL;
+static void *disc_arg = NULL;
+
+
+
+static volatile int loopback_rx_cnt, loopback_tx_cnt;
+int loopback_size;
+bool is_loopback_pkt(void *buf);
+int matches_loopback_pkt(void *buf);
+
+/**
+ * multiple code paths in this file dequeue a URB request, this function makes sure that it happens
+ * in a concurrency save manner. Don't call this from a sleepable process context.
+ */
+static urb_req_t *
+dbus_usbos_qdeq(struct list_head *urbreq_q, spinlock_t *lock)
+{
+ unsigned long flags;
+ urb_req_t *req;
+
+ ASSERT(urbreq_q != NULL);
+
+ spin_lock_irqsave(lock, flags);
+
+ if (list_empty(urbreq_q)) {
+ req = NULL;
+ } else {
+ ASSERT(urbreq_q->next != NULL);
+ ASSERT(urbreq_q->next != urbreq_q);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ req = list_entry(urbreq_q->next, urb_req_t, urb_list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ list_del_init(&req->urb_list);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return req;
+}
+
+static void
+dbus_usbos_qenq(struct list_head *urbreq_q, urb_req_t *req, spinlock_t *lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ list_add_tail(&req->urb_list, urbreq_q);
+
+ spin_unlock_irqrestore(lock, flags);
+}
+
+/**
+ * multiple code paths in this file remove a URB request from a list, this function makes sure that
+ * it happens in a concurrency save manner. Don't call this from a sleepable process context.
+ * Is quite similar to dbus_usbos_qdeq(), I wonder why this function is needed.
+ */
+static void
+dbus_usbos_req_del(urb_req_t *req, spinlock_t *lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ list_del_init(&req->urb_list);
+
+ spin_unlock_irqrestore(lock, flags);
+}
+
+
+/**
+ * Driver requires a pool of URBs to operate. This function is called during
+ * initialization (attach phase), allocates a number of URBs, and puts them
+ * on the free (req_rxfreeq and req_txfreeq) queue
+ */
+static int
+dbus_usbos_urbreqs_alloc(usbos_info_t *usbos_info, uint32 count, bool is_rx)
+{
+ int i;
+ int allocated = 0;
+ int err = DBUS_OK;
+
+ for (i = 0; i < count; i++) {
+ urb_req_t *req;
+
+ req = MALLOC(usbos_info->pub->osh, sizeof(urb_req_t));
+ if (req == NULL) {
+ DBUSERR(("%s: MALLOC req failed\n", __FUNCTION__));
+ err = DBUS_ERR_NOMEM;
+ goto fail;
+ }
+ bzero(req, sizeof(urb_req_t));
+
+ req->urb = USB_ALLOC_URB();
+ if (req->urb == NULL) {
+ DBUSERR(("%s: USB_ALLOC_URB req->urb failed\n", __FUNCTION__));
+ err = DBUS_ERR_NOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&req->urb_list);
+
+ if (is_rx) {
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ /* don't allocate now. Do it on demand */
+ req->pkt = NULL;
+#else
+ /* pre-allocate buffers never to be released */
+ req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len);
+ if (req->pkt == NULL) {
+ DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__));
+ err = DBUS_ERR_NOMEM;
+ goto fail;
+ }
+#endif
+ req->buf_len = usbos_info->rxbuf_len;
+ dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+ } else {
+ req->buf_len = 0;
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+ }
+ allocated++;
+ continue;
+
+fail:
+ if (req) {
+ if (is_rx && req->pkt) {
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ /* req->pkt is NULL in "NOCOPY" mode */
+#else
+ MFREE(usbos_info->pub->osh, req->pkt, req->buf_len);
+#endif
+ }
+ if (req->urb) {
+ USB_FREE_URB(req->urb);
+ }
+ MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t));
+ }
+ break;
+ }
+
+ atomic_add(allocated, is_rx ? &usbos_info->rxallocated : &usbos_info->txallocated);
+
+ if (is_rx) {
+ DBUSTRACE(("%s: add %d (total %d) rx buf, each has %d bytes\n", __FUNCTION__,
+ allocated, atomic_read(&usbos_info->rxallocated), usbos_info->rxbuf_len));
+ } else {
+ DBUSTRACE(("%s: add %d (total %d) tx req\n", __FUNCTION__,
+ allocated, atomic_read(&usbos_info->txallocated)));
+ }
+
+ return err;
+} /* dbus_usbos_urbreqs_alloc */
+
+/** Typically called during detach or when attach failed. Don't call until all URBs unlinked */
+static int
+dbus_usbos_urbreqs_free(usbos_info_t *usbos_info, bool is_rx)
+{
+ int rtn = 0;
+ urb_req_t *req;
+ struct list_head *req_q;
+ spinlock_t *lock;
+
+ if (is_rx) {
+ req_q = &usbos_info->req_rxfreeq;
+ lock = &usbos_info->rxfree_lock;
+ } else {
+ req_q = &usbos_info->req_txfreeq;
+ lock = &usbos_info->txfree_lock;
+ }
+ while ((req = dbus_usbos_qdeq(req_q, lock)) != NULL) {
+
+ if (is_rx) {
+ if (req->pkt) {
+ /* We do MFREE instead of PKTFREE because the pkt has been
+ * converted to native already
+ */
+ MFREE(usbos_info->pub->osh, req->pkt, req->buf_len);
+ req->pkt = NULL;
+ req->buf_len = 0;
+ }
+ } else {
+ /* sending req should not be assigned pkt buffer */
+ ASSERT(req->pkt == NULL);
+ }
+
+ if (req->urb) {
+ USB_FREE_URB(req->urb);
+ req->urb = NULL;
+ }
+ MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t));
+
+ rtn++;
+ }
+ return rtn;
+} /* dbus_usbos_urbreqs_free */
+
+/**
+ * called by Linux kernel on URB completion. Upper DBUS layer (dbus_usb.c) has to be notified of
+ * send completion.
+ */
+void
+dbus_usbos_send_complete(CALLBACK_ARGS)
+{
+ urb_req_t *req = urb->context;
+ dbus_irb_tx_t *txirb = req->arg;
+ usbos_info_t *usbos_info = req->usbinfo;
+ unsigned long flags;
+ int status = DBUS_OK;
+ int txposted;
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+
+ spin_lock_irqsave(&usbos_info->txlock, flags);
+
+ dbus_usbos_req_del(req, &usbos_info->txposted_lock);
+ txposted = atomic_dec_return(&usbos_info->txposted);
+ if (unlikely (txposted < 0)) {
+ DBUSERR(("%s ERROR: txposted is negative (%d)!!\n", __FUNCTION__, txposted));
+ }
+ spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+ if (unlikely (urb->status)) {
+ status = DBUS_ERR_TXFAIL;
+ DBUSTRACE(("txfail status %d\n", urb->status));
+ }
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ /* sending req should not be assigned pkt buffer */
+ ASSERT(req->pkt == NULL);
+#endif
+ /* txirb should always be set, except for ZLP. ZLP is reusing this callback function. */
+ if (txirb != NULL) {
+ if (txirb->send_buf != NULL) {
+ MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len);
+ txirb->send_buf = NULL;
+ req->buf_len = 0;
+ }
+ if (likely (usbos_info->cbarg && usbos_info->cbs)) {
+ if (likely (usbos_info->cbs->send_irb_complete != NULL))
+ usbos_info->cbs->send_irb_complete(usbos_info->cbarg, txirb, status);
+ }
+ }
+
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+} /* dbus_usbos_send_complete */
+
+/**
+ * In order to receive USB traffic from the dongle, we need to supply the Linux kernel with a free
+ * URB that is going to contain received data.
+ */
+static int
+dbus_usbos_recv_urb_submit(usbos_info_t *usbos_info, dbus_irb_rx_t *rxirb, uint32 ep_idx)
+{
+ urb_req_t *req;
+ int ret = DBUS_OK;
+ unsigned long flags;
+ void *p;
+ uint rx_pipe;
+ int rxposted;
+
+ BCM_REFERENCE(rxposted);
+
+ if (!(req = dbus_usbos_qdeq(&usbos_info->req_rxfreeq, &usbos_info->rxfree_lock))) {
+ DBUSTRACE(("%s No free URB!\n", __FUNCTION__));
+ return DBUS_ERR_RXDROP;
+ }
+
+ spin_lock_irqsave(&usbos_info->rxlock, flags);
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ req->pkt = rxirb->pkt = PKTGET(usbos_info->pub->osh, req->buf_len, FALSE);
+ if (!rxirb->pkt) {
+ DBUSERR(("%s: PKTGET failed\n", __FUNCTION__));
+ dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+ ret = DBUS_ERR_RXDROP;
+ goto fail;
+ }
+ /* consider the packet "native" so we don't count it as MALLOCED in the osl */
+ PKTTONATIVE(usbos_info->pub->osh, req->pkt);
+ rxirb->buf = NULL;
+ p = PKTDATA(usbos_info->pub->osh, req->pkt);
+#else
+ if (req->buf_len != usbos_info->rxbuf_len) {
+ ASSERT(req->pkt);
+ MFREE(usbos_info->pub->osh, req->pkt, req->buf_len);
+ DBUSTRACE(("%s: replace rx buff: old len %d, new len %d\n", __FUNCTION__,
+ req->buf_len, usbos_info->rxbuf_len));
+ req->buf_len = 0;
+ req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len);
+ if (req->pkt == NULL) {
+ DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__));
+ ret = DBUS_ERR_NOMEM;
+ goto fail;
+ }
+ req->buf_len = usbos_info->rxbuf_len;
+ }
+ rxirb->buf = req->pkt;
+ p = rxirb->buf;
+#endif /* defined(BCM_RPC_NOCOPY) */
+ rxirb->buf_len = req->buf_len;
+ req->usbinfo = usbos_info;
+ req->arg = rxirb;
+ if (ep_idx == 0) {
+ rx_pipe = usbos_info->rx_pipe;
+ } else {
+ rx_pipe = usbos_info->rx_pipe2;
+ ASSERT(usbos_info->rx_pipe2);
+ }
+ /* Prepare the URB */
+ usb_fill_bulk_urb(req->urb, usbos_info->usb, rx_pipe,
+ p,
+ rxirb->buf_len,
+ (usb_complete_t)dbus_usbos_recv_complete, req);
+ req->urb->transfer_flags |= URB_QUEUE_BULK;
+
+ if ((ret = USB_SUBMIT_URB(req->urb))) {
+ DBUSERR(("%s USB_SUBMIT_URB failed. status %d\n", __FUNCTION__, ret));
+ dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+ ret = DBUS_ERR_RXFAIL;
+ goto fail;
+ }
+ rxposted = atomic_inc_return(&usbos_info->rxposted);
+
+ dbus_usbos_qenq(&usbos_info->req_rxpostedq, req, &usbos_info->rxposted_lock);
+fail:
+ spin_unlock_irqrestore(&usbos_info->rxlock, flags);
+ return ret;
+} /* dbus_usbos_recv_urb_submit */
+
+
+/**
+ * Called by worked thread when a 'receive URB' completed or Linux kernel when it returns a URB to
+ * this driver.
+ */
+static void
+dbus_usbos_recv_complete_handle(urb_req_t *req, int len, int status)
+{
+ dbus_irb_rx_t *rxirb = req->arg;
+ usbos_info_t *usbos_info = req->usbinfo;
+ unsigned long flags;
+ int rxallocated, rxposted;
+ int dbus_status = DBUS_OK;
+ bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0;
+
+ spin_lock_irqsave(&usbos_info->rxlock, flags);
+ dbus_usbos_req_del(req, &usbos_info->rxposted_lock);
+ rxposted = atomic_dec_return(&usbos_info->rxposted);
+ rxallocated = atomic_read(&usbos_info->rxallocated);
+ spin_unlock_irqrestore(&usbos_info->rxlock, flags);
+
+ if ((rxallocated < usbos_info->pub->nrxq) && (!status) &&
+ (rxposted == DBUS_USB_RXQUEUE_LOWER_WATERMARK)) {
+ DBUSTRACE(("%s: need more rx buf: rxallocated %d rxposted %d!\n",
+ __FUNCTION__, rxallocated, rxposted));
+ dbus_usbos_urbreqs_alloc(usbos_info,
+ MIN(DBUS_USB_RXQUEUE_BATCH_ADD,
+ usbos_info->pub->nrxq - rxallocated), TRUE);
+ }
+
+ /* Handle errors */
+ if (status) {
+ /*
+ * Linux 2.4 disconnect: -ENOENT or -EILSEQ for CRC error; rmmod: -ENOENT
+ * Linux 2.6 disconnect: -EPROTO, rmmod: -ESHUTDOWN
+ */
+ if ((status == -ENOENT && (!killed))|| status == -ESHUTDOWN) {
+ /* NOTE: unlink() can not be called from URB callback().
+ * Do not call dbusos_stop() here.
+ */
+ DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status));
+ dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN);
+ } else if (status == -EPROTO) {
+ DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status));
+ } else if (killed && (status == -EHOSTUNREACH || status == -ENOENT)) {
+ /* Device is suspended */
+ } else {
+ DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status));
+ dbus_usbos_errhandler(usbos_info, DBUS_ERR_RXFAIL);
+ }
+
+ /* On error, don't submit more URBs yet */
+ rxirb->buf = NULL;
+ rxirb->actual_len = 0;
+ dbus_status = DBUS_ERR_RXFAIL;
+ goto fail;
+ }
+
+ /* Make the skb represent the received urb */
+ rxirb->actual_len = len;
+
+ if (rxirb->actual_len < sizeof(uint32)) {
+ DBUSTRACE(("small pkt len %d, process as ZLP\n", rxirb->actual_len));
+ dbus_status = DBUS_ERR_RXZLP;
+ }
+
+fail:
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+ /* detach the packet from the queue */
+ req->pkt = NULL;
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY */
+
+ if (usbos_info->cbarg && usbos_info->cbs) {
+ if (usbos_info->cbs->recv_irb_complete) {
+ usbos_info->cbs->recv_irb_complete(usbos_info->cbarg, rxirb, dbus_status);
+ }
+ }
+
+ dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+
+ /* Mark the interface as busy to reset USB autosuspend timer */
+ USB_MARK_LAST_BUSY(usbos_info->usb);
+} /* dbus_usbos_recv_complete_handle */
+
+/** called by Linux kernel when it returns a URB to this driver */
+static void
+dbus_usbos_recv_complete(CALLBACK_ARGS)
+{
+#ifdef USBOS_THREAD
+ dbus_usbos_dispatch_schedule(CALLBACK_ARGS_DATA);
+#else /* !USBOS_THREAD */
+ dbus_usbos_recv_complete_handle(urb->context, urb->actual_length, urb->status);
+#endif /* USBOS_THREAD */
+}
+
+
+/**
+ * If Linux notifies our driver that a control read or write URB has completed, we should notify
+ * the DBUS layer above us (dbus_usb.c in this case).
+ */
+static void
+dbus_usbos_ctl_complete(usbos_info_t *usbos_info, int type, int urbstatus)
+{
+ int status = DBUS_ERR;
+
+ if (usbos_info == NULL)
+ return;
+
+ switch (urbstatus) {
+ case 0:
+ status = DBUS_OK;
+ break;
+ case -EINPROGRESS:
+ case -ENOENT:
+ default:
+#ifdef INTR_EP_ENABLE
+ DBUSERR(("%s:%d fail status %d bus:%d susp:%d intr:%d ctli:%d ctlo:%d\n",
+ __FUNCTION__, type, urbstatus,
+ usbos_info->pub->busstate, g_probe_info.suspend_state,
+ usbos_info->intr_urb_submitted, usbos_info->ctlin_urb_submitted,
+ usbos_info->ctlout_urb_submitted));
+#else
+ DBUSERR(("%s: failed with status %d\n", __FUNCTION__, urbstatus));
+ status = DBUS_ERR;
+ break;
+#endif /* INTR_EP_ENABLE */
+ }
+
+ if (usbos_info->cbarg && usbos_info->cbs) {
+ if (usbos_info->cbs->ctl_complete)
+ usbos_info->cbs->ctl_complete(usbos_info->cbarg, type, status);
+ }
+}
+
+/** called by Linux */
+static void
+dbus_usbos_ctlread_complete(CALLBACK_ARGS)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *)urb->context;
+
+ ASSERT(urb);
+ usbos_info = (usbos_info_t *)urb->context;
+
+ dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_READ, urb->status);
+
+#ifdef USBOS_THREAD
+ if (usbos_info->rxctl_deferrespok) {
+ usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE;
+ usbos_info->ctl_read.bRequest = 1;
+ }
+#endif
+
+ up(&usbos_info->ctl_lock);
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+}
+
+/** called by Linux */
+static void
+dbus_usbos_ctlwrite_complete(CALLBACK_ARGS)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *)urb->context;
+
+ ASSERT(urb);
+ usbos_info = (usbos_info_t *)urb->context;
+
+ dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_WRITE, urb->status);
+
+#ifdef USBOS_TX_THREAD
+ usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+#endif /* USBOS_TX_THREAD */
+
+ up(&usbos_info->ctl_lock);
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+}
+
+#ifdef INTR_EP_ENABLE
+/** called by Linux */
+static void
+dbus_usbos_intr_complete(CALLBACK_ARGS)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *)urb->context;
+ bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0;
+
+ if (usbos_info == NULL || usbos_info->pub == NULL)
+ return;
+ if ((urb->status == -ENOENT && (!killed)) || urb->status == -ESHUTDOWN ||
+ urb->status == -ENODEV) {
+ dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN);
+ }
+
+ if (usbos_info->pub->busstate == DBUS_STATE_DOWN) {
+ DBUSERR(("%s: intr cb when DBUS down, ignoring\n", __FUNCTION__));
+ return;
+ }
+ dbus_usbos_ctl_complete(usbos_info, DBUS_CBINTR_POLL, urb->status);
+}
+#endif /* INTR_EP_ENABLE */
+
+/**
+ * when the bus is going to sleep or halt, the Linux kernel requires us to take ownership of our
+ * URBs again. Multiple code paths in this file require a list of URBs to be cancelled in a
+ * concurrency save manner.
+ */
+static void
+dbus_usbos_unlink(struct list_head *urbreq_q, spinlock_t *lock)
+{
+ urb_req_t *req;
+
+ /* dbus_usbos_recv_complete() adds req back to req_freeq */
+ while ((req = dbus_usbos_qdeq(urbreq_q, lock)) != NULL) {
+ ASSERT(req->urb != NULL);
+ USB_UNLINK_URB(req->urb);
+ }
+}
+
+/** multiple code paths in this file require the bus to stop */
+static void
+dbus_usbos_cancel_all_urbs(usbos_info_t *usbos_info)
+{
+ int rxposted, txposted;
+
+ DBUSTRACE(("%s: unlink all URBs\n", __FUNCTION__));
+
+#ifdef USBOS_TX_THREAD
+ usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+
+ /* Yield the CPU to TX thread so all pending requests are submitted */
+ while (!list_empty(&usbos_info->usbos_tx_list)) {
+ wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+ OSL_SLEEP(10);
+ }
+#endif /* USBOS_TX_THREAD */
+
+ /* tell Linux kernel to cancel a single intr, ctl and blk URB */
+ if (usbos_info->intr_urb)
+ USB_UNLINK_URB(usbos_info->intr_urb);
+ if (usbos_info->ctl_urb)
+ USB_UNLINK_URB(usbos_info->ctl_urb);
+ if (usbos_info->blk_urb)
+ USB_UNLINK_URB(usbos_info->blk_urb);
+
+ dbus_usbos_unlink(&usbos_info->req_txpostedq, &usbos_info->txposted_lock);
+ dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock);
+
+ /* Wait until the callbacks for all submitted URBs have been called, because the
+ * handler needs to know is an USB suspend is in progress.
+ */
+ SPINWAIT((atomic_read(&usbos_info->txposted) != 0 ||
+ atomic_read(&usbos_info->rxposted) != 0), 10000);
+
+ txposted = atomic_read(&usbos_info->txposted);
+ rxposted = atomic_read(&usbos_info->rxposted);
+ if (txposted != 0 || rxposted != 0) {
+ DBUSERR(("%s ERROR: REQs posted, rx=%d tx=%d!\n",
+ __FUNCTION__, rxposted, txposted));
+ }
+} /* dbus_usbos_cancel_all_urbs */
+
+/** multiple code paths require the bus to stop */
+static void
+dbusos_stop(usbos_info_t *usbos_info)
+{
+ urb_req_t *req;
+ int rxposted;
+ req = NULL;
+ BCM_REFERENCE(req);
+
+ ASSERT(usbos_info);
+
+ dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN);
+
+ dbus_usbos_cancel_all_urbs(usbos_info);
+
+#ifdef USBOS_THREAD
+ /* yield the CPU to rx packet thread */
+ while (1) {
+ if (atomic_read(&usbos_info->usbos_list_cnt) <= 0) break;
+ wake_up_interruptible(&usbos_info->usbos_queue_head);
+ OSL_SLEEP(3);
+ }
+#endif /* USBOS_THREAD */
+
+ rxposted = atomic_read(&usbos_info->rxposted);
+ if (rxposted > 0) {
+ DBUSERR(("%s ERROR: rx REQs posted=%d in stop!\n", __FUNCTION__,
+ rxposted));
+ }
+
+ ASSERT(atomic_read(&usbos_info->txposted) == 0 && rxposted == 0);
+
+} /* dbusos_stop */
+
+#if defined(USB_SUSPEND_AVAILABLE)
+
+/**
+ * Linux kernel sports a 'USB auto suspend' feature. See: http://lwn.net/Articles/373550/
+ * The suspend method is called by the Linux kernel to warn the driver that the device is going to
+ * be suspended. If the driver returns a negative error code, the suspend will be aborted. If the
+ * driver returns 0, it must cancel all outstanding URBs (usb_kill_urb()) and not submit any more.
+ */
+static int
+dbus_usbos_suspend(struct usb_interface *intf,
+ pm_message_t message)
+{
+ DBUSERR(("%s suspend state: %d\n", __FUNCTION__, g_probe_info.suspend_state));
+ /* DHD for full dongle model */
+ g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPEND_PENDING;
+ dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_SLEEP);
+ dbus_usbos_cancel_all_urbs((usbos_info_t*)g_probe_info.usbos_info);
+ g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPENDED;
+
+ return 0;
+}
+
+/**
+ * The resume method is called to tell the driver that the device has been resumed and the driver
+ * can return to normal operation. URBs may once more be submitted.
+ */
+static int dbus_usbos_resume(struct usb_interface *intf)
+{
+ DBUSERR(("%s Device resumed\n", __FUNCTION__));
+
+ dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_UP);
+ g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
+ return 0;
+}
+
+/**
+* This function is directly called by the Linux kernel, when the suspended device has been reset
+* instead of being resumed
+*/
+static int dbus_usbos_reset_resume(struct usb_interface *intf)
+{
+ DBUSERR(("%s Device reset resumed\n", __FUNCTION__));
+
+ /* The device may have lost power, so a firmware download may be required */
+ dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_DL_NEEDED);
+ g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
+ return 0;
+}
+
+#endif /* USB_SUSPEND_AVAILABLE */
+
+/**
+ * Called by Linux kernel at initialization time, kernel wants to know if our driver will accept the
+ * caller supplied USB interface. Note that USB drivers are bound to interfaces, and not to USB
+ * devices.
+ */
+#ifdef KERNEL26
+#define DBUS_USBOS_PROBE() static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id)
+#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_interface *intf)
+#else
+#define DBUS_USBOS_PROBE() static void * dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum, const struct usb_device_id *id)
+#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr)
+#endif /* KERNEL26 */
+
+DBUS_USBOS_PROBE()
+{
+ int ep;
+ struct usb_endpoint_descriptor *endpoint;
+ int ret = 0;
+#ifdef KERNEL26
+ struct usb_device *usb = interface_to_usbdev(intf);
+#else
+ int claimed = 0;
+#endif
+ int num_of_eps;
+#ifdef BCMUSBDEV_COMPOSITE
+ int wlan_if = -1;
+ bool intr_ep = FALSE;
+#endif /* BCMUSBDEV_COMPOSITE */
+ wifi_adapter_info_t *adapter;
+
+ DHD_MUTEX_LOCK();
+
+ DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__,
+ usb->bus->busnum, usb->portnum));
+ adapter = dhd_wifi_platform_attach_adapter(USB_BUS, usb->bus->busnum,
+ usb->portnum, WIFI_STATUS_POWER_ON);
+ if (adapter == NULL) {
+ DBUSERR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef BCMUSBDEV_COMPOSITE
+ wlan_if = dbus_usbos_intf_wlan(usb);
+#ifdef KERNEL26
+ if ((wlan_if >= 0) && (IFPTR(usb, wlan_if) == intf))
+#else
+ if (wlan_if == ifnum)
+#endif /* KERNEL26 */
+ {
+#endif /* BCMUSBDEV_COMPOSITE */
+ g_probe_info.usb = usb;
+ g_probe_info.dldone = TRUE;
+#ifdef BCMUSBDEV_COMPOSITE
+ } else {
+ DBUSTRACE(("dbus_usbos_probe: skip probe for non WLAN interface\n"));
+ ret = BCME_UNSUPPORTED;
+ goto fail;
+ }
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#ifdef KERNEL26
+ g_probe_info.intf = intf;
+#endif /* KERNEL26 */
+
+#ifdef BCMUSBDEV_COMPOSITE
+ if (IFDESC(usb, wlan_if).bInterfaceNumber > USB_COMPIF_MAX)
+#else
+ if (IFDESC(usb, CONTROL_IF).bInterfaceNumber)
+#endif /* BCMUSBDEV_COMPOSITE */
+ {
+ ret = -1;
+ goto fail;
+ }
+ if (id != NULL) {
+ g_probe_info.vid = id->idVendor;
+ g_probe_info.pid = id->idProduct;
+ }
+
+#ifdef KERNEL26
+ usb_set_intfdata(intf, &g_probe_info);
+#endif
+
+ /* Check that the device supports only one configuration */
+ if (usb->descriptor.bNumConfigurations != 1) {
+ ret = -1;
+ goto fail;
+ }
+
+ if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
+#ifdef BCMUSBDEV_COMPOSITE
+ if ((usb->descriptor.bDeviceClass != USB_CLASS_MISC) &&
+ (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS)) {
+#endif /* BCMUSBDEV_COMPOSITE */
+ ret = -1;
+ goto fail;
+#ifdef BCMUSBDEV_COMPOSITE
+ }
+#endif /* BCMUSBDEV_COMPOSITE */
+ }
+
+ /*
+ * Only the BDC interface configuration is supported:
+ * Device class: USB_CLASS_VENDOR_SPEC
+ * if0 class: USB_CLASS_VENDOR_SPEC
+ * if0/ep0: control
+ * if0/ep1: bulk in
+ * if0/ep2: bulk out (ok if swapped with bulk in)
+ */
+ if (CONFIGDESC(usb)->bNumInterfaces != 1) {
+#ifdef BCMUSBDEV_COMPOSITE
+ if (CONFIGDESC(usb)->bNumInterfaces > USB_COMPIF_MAX) {
+#endif /* BCMUSBDEV_COMPOSITE */
+ ret = -1;
+ goto fail;
+#ifdef BCMUSBDEV_COMPOSITE
+ }
+#endif /* BCMUSBDEV_COMPOSITE */
+ }
+
+ /* Check interface */
+#ifndef KERNEL26
+#ifdef BCMUSBDEV_COMPOSITE
+ if (usb_interface_claimed(IFPTR(usb, wlan_if)))
+#else
+ if (usb_interface_claimed(IFPTR(usb, CONTROL_IF)))
+#endif /* BCMUSBDEV_COMPOSITE */
+ {
+ ret = -1;
+ goto fail;
+ }
+#endif /* !KERNEL26 */
+
+#ifdef BCMUSBDEV_COMPOSITE
+ if ((IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
+ IFDESC(usb, wlan_if).bInterfaceSubClass != 2 ||
+ IFDESC(usb, wlan_if).bInterfaceProtocol != 0xff) &&
+ (IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_MISC ||
+ IFDESC(usb, wlan_if).bInterfaceSubClass != USB_SUBCLASS_COMMON ||
+ IFDESC(usb, wlan_if).bInterfaceProtocol != USB_PROTO_IAD))
+#else
+ if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
+ IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
+ IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff)
+#endif /* BCMUSBDEV_COMPOSITE */
+ {
+#ifdef BCMUSBDEV_COMPOSITE
+ DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n",
+ __FUNCTION__,
+ IFDESC(usb, wlan_if).bInterfaceClass,
+ IFDESC(usb, wlan_if).bInterfaceSubClass,
+ IFDESC(usb, wlan_if).bInterfaceProtocol));
+#else
+ DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n",
+ __FUNCTION__,
+ IFDESC(usb, CONTROL_IF).bInterfaceClass,
+ IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
+ IFDESC(usb, CONTROL_IF).bInterfaceProtocol));
+#endif /* BCMUSBDEV_COMPOSITE */
+ ret = -1;
+ goto fail;
+ }
+
+ /* Check control endpoint */
+#ifdef BCMUSBDEV_COMPOSITE
+ endpoint = &IFEPDESC(usb, wlan_if, 0);
+#else
+ endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
+#endif /* BCMUSBDEV_COMPOSITE */
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) {
+#ifdef BCMUSBDEV_COMPOSITE
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+ USB_ENDPOINT_XFER_BULK) {
+#endif /* BCMUSBDEV_COMPOSITE */
+ DBUSERR(("%s: invalid control endpoint %d\n",
+ __FUNCTION__, endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+ ret = -1;
+ goto fail;
+#ifdef BCMUSBDEV_COMPOSITE
+ }
+#endif /* BCMUSBDEV_COMPOSITE */
+ }
+
+#ifdef BCMUSBDEV_COMPOSITE
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) {
+#endif /* BCMUSBDEV_COMPOSITE */
+ g_probe_info.intr_pipe =
+ usb_rcvintpipe(usb, endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+#ifdef BCMUSBDEV_COMPOSITE
+ intr_ep = TRUE;
+ }
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#ifndef KERNEL26
+ /* Claim interface */
+#ifdef BCMUSBDEV_COMPOSITE
+ usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, wlan_if), &g_probe_info);
+#else
+ usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF), &g_probe_info);
+#endif /* BCMUSBDEV_COMPOSITE */
+ claimed = 1;
+#endif /* !KERNEL26 */
+ g_probe_info.rx_pipe = 0;
+ g_probe_info.rx_pipe2 = 0;
+ g_probe_info.tx_pipe = 0;
+#ifdef BCMUSBDEV_COMPOSITE
+ if (intr_ep)
+ ep = 1;
+ else
+ ep = 0;
+ num_of_eps = IFDESC(usb, wlan_if).bNumEndpoints - 1;
+#else
+ num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
+#endif /* BCMUSBDEV_COMPOSITE */
+
+ if ((num_of_eps != 2) && (num_of_eps != 3)) {
+#ifdef BCMUSBDEV_COMPOSITE
+ if (num_of_eps > 7)
+#endif /* BCMUSBDEV_COMPOSITE */
+ ASSERT(0);
+ }
+ /* Check data endpoints and get pipes */
+#ifdef BCMUSBDEV_COMPOSITE
+ for (; ep <= num_of_eps; ep++)
+#else
+ for (ep = 1; ep <= num_of_eps; ep++)
+#endif /* BCMUSBDEV_COMPOSITE */
+ {
+#ifdef BCMUSBDEV_COMPOSITE
+ endpoint = &IFEPDESC(usb, wlan_if, ep);
+#else
+ endpoint = &IFEPDESC(usb, BULK_IF, ep);
+#endif /* BCMUSBDEV_COMPOSITE */
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+ USB_ENDPOINT_XFER_BULK) {
+ DBUSERR(("%s: invalid data endpoint %d\n",
+ __FUNCTION__, ep));
+ ret = -1;
+ goto fail;
+ }
+
+ if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) {
+ /* direction: dongle->host */
+ if (!g_probe_info.rx_pipe) {
+ g_probe_info.rx_pipe = usb_rcvbulkpipe(usb,
+ (endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK));
+ } else {
+ g_probe_info.rx_pipe2 = usb_rcvbulkpipe(usb,
+ (endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK));
+ }
+
+ } else
+ g_probe_info.tx_pipe = usb_sndbulkpipe(usb, (endpoint->bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK));
+ }
+
+ /* Allocate interrupt URB and data buffer */
+ /* RNDIS says 8-byte intr, our old drivers used 4-byte */
+#ifdef BCMUSBDEV_COMPOSITE
+ g_probe_info.intr_size = (IFEPDESC(usb, wlan_if, 0).wMaxPacketSize == 16) ? 8 : 4;
+ g_probe_info.interval = IFEPDESC(usb, wlan_if, 0).bInterval;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21))
+ usb->quirks |= USB_QUIRK_NO_SET_INTF;
+#endif
+#else
+ g_probe_info.intr_size = (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == 16) ? 8 : 4;
+ g_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#ifndef KERNEL26
+ /* usb_fill_int_urb does the interval decoding in 2.6 */
+ if (usb->speed == USB_SPEED_HIGH)
+ g_probe_info.interval = 1 << (g_probe_info.interval - 1);
+#endif
+ if (usb->speed == USB_SPEED_SUPER) {
+ g_probe_info.device_speed = SUPER_SPEED;
+ DBUSERR(("super speed device detected\n"));
+ } else if (usb->speed == USB_SPEED_HIGH) {
+ g_probe_info.device_speed = HIGH_SPEED;
+ DBUSERR(("high speed device detected\n"));
+ } else {
+ g_probe_info.device_speed = FULL_SPEED;
+ DBUSERR(("full speed device detected\n"));
+ }
+ if (g_probe_info.dereged == FALSE && probe_cb) {
+ disc_arg = probe_cb(probe_arg, "", USB_BUS, usb->bus->busnum, usb->portnum, 0);
+ }
+
+ g_probe_info.disc_cb_done = FALSE;
+
+#ifdef KERNEL26
+ intf->needs_remote_wakeup = 1;
+#endif /* KERNEL26 */
+ DHD_MUTEX_UNLOCK();
+
+ /* Success */
+#ifdef KERNEL26
+ return DBUS_OK;
+#else
+ usb_inc_dev_use(usb);
+ return &g_probe_info;
+#endif
+
+fail:
+ printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+#ifdef BCMUSBDEV_COMPOSITE
+ if (ret != BCME_UNSUPPORTED)
+#endif /* BCMUSBDEV_COMPOSITE */
+ DBUSERR(("%s: failed with errno %d\n", __FUNCTION__, ret));
+#ifndef KERNEL26
+ if (claimed)
+#ifdef BCMUSBDEV_COMPOSITE
+ usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if));
+#else
+ usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF));
+#endif /* BCMUSBDEV_COMPOSITE */
+#endif /* !KERNEL26 */
+
+ DHD_MUTEX_UNLOCK();
+#ifdef KERNEL26
+ usb_set_intfdata(intf, NULL);
+ return ret;
+#else
+ return NULL;
+#endif
+} /* dbus_usbos_probe */
+
+/** Called by Linux kernel, is the counter part of dbus_usbos_probe() */
+DBUS_USBOS_DISCONNECT()
+{
+#ifdef KERNEL26
+ struct usb_device *usb = interface_to_usbdev(intf);
+ probe_info_t *probe_usb_init_data = usb_get_intfdata(intf);
+#else
+ probe_info_t *probe_usb_init_data = (probe_info_t *) ptr;
+#endif
+ usbos_info_t *usbos_info;
+
+ DHD_MUTEX_LOCK();
+
+ DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__,
+ usb->bus->busnum, usb->portnum));
+
+ if (probe_usb_init_data) {
+ usbos_info = (usbos_info_t *) probe_usb_init_data->usbos_info;
+ if (usbos_info) {
+ if ((probe_usb_init_data->dereged == FALSE) && disconnect_cb && disc_arg) {
+ disconnect_cb(disc_arg);
+ disc_arg = NULL;
+ probe_usb_init_data->disc_cb_done = TRUE;
+ }
+ }
+ }
+
+ if (usb) {
+#ifndef KERNEL26
+#ifdef BCMUSBDEV_COMPOSITE
+ usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if));
+#else
+ usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF));
+#endif /* BCMUSBDEV_COMPOSITE */
+ usb_dec_dev_use(usb);
+#endif /* !KERNEL26 */
+ }
+ DHD_MUTEX_UNLOCK();
+} /* dbus_usbos_disconnect */
+
+#define LOOPBACK_PKT_START 0xBABE1234
+
+bool is_loopback_pkt(void *buf)
+{
+
+ uint32 *buf_ptr = (uint32 *) buf;
+
+ if (*buf_ptr == LOOPBACK_PKT_START)
+ return TRUE;
+ return FALSE;
+
+}
+
+int matches_loopback_pkt(void *buf)
+{
+ int i, j;
+ unsigned char *cbuf = (unsigned char *) buf;
+
+ for (i = 4; i < loopback_size; i++) {
+ if (cbuf[i] != (i % 256)) {
+ printf("%s: mismatch at i=%d %d : ", __FUNCTION__, i, cbuf[i]);
+ for (j = i; ((j < i+ 16) && (j < loopback_size)); j++) {
+ printf("%d ", cbuf[j]);
+ }
+ printf("\n");
+ return 0;
+ }
+ }
+ loopback_rx_cnt++;
+ return 1;
+}
+
+int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) usbos_info_ptr;
+ unsigned char *buf;
+ int j;
+ void* p = NULL;
+ int rc, last_rx_cnt;
+ int tx_failed_cnt;
+ int max_size = 1650;
+ int usb_packet_size = 512;
+ int min_packet_size = 10;
+
+ if (size % usb_packet_size == 0) {
+ size = size - 1;
+ DBUSERR(("%s: overriding size=%d \n", __FUNCTION__, size));
+ }
+
+ if (size < min_packet_size) {
+ size = min_packet_size;
+ DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, min_packet_size));
+ }
+ if (size > max_size) {
+ size = max_size;
+ DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, max_size));
+ }
+
+ loopback_tx_cnt = 0;
+ loopback_rx_cnt = 0;
+ tx_failed_cnt = 0;
+ loopback_size = size;
+
+ while (loopback_tx_cnt < cnt) {
+ uint32 *x;
+ int pkt_size = loopback_size;
+
+ p = PKTGET(usbos_info->pub->osh, pkt_size, TRUE);
+ if (p == NULL) {
+ DBUSERR(("%s:%d Failed to allocate packet sz=%d\n",
+ __FUNCTION__, __LINE__, pkt_size));
+ return BCME_ERROR;
+ }
+ x = (uint32*) PKTDATA(usbos_info->pub->osh, p);
+ *x = LOOPBACK_PKT_START;
+ buf = (unsigned char*) x;
+ for (j = 4; j < pkt_size; j++) {
+ buf[j] = j % 256;
+ }
+ rc = dbus_send_buf(usbos_info->pub, buf, pkt_size, p);
+ if (rc != BCME_OK) {
+ DBUSERR(("%s:%d Freeing packet \n", __FUNCTION__, __LINE__));
+ PKTFREE(usbos_info->pub->osh, p, TRUE);
+ dbus_usbos_wait(usbos_info, 1);
+ tx_failed_cnt++;
+ } else {
+ loopback_tx_cnt++;
+ tx_failed_cnt = 0;
+ }
+ if (tx_failed_cnt == 5) {
+ DBUSERR(("%s : Failed to send loopback packets cnt=%d loopback_tx_cnt=%d\n",
+ __FUNCTION__, cnt, loopback_tx_cnt));
+ break;
+ }
+ }
+ printf("Transmitted %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size);
+
+ last_rx_cnt = loopback_rx_cnt;
+ while (loopback_rx_cnt < loopback_tx_cnt) {
+ dbus_usbos_wait(usbos_info, 1);
+ if (loopback_rx_cnt <= last_rx_cnt) {
+ DBUSERR(("%s: Matched rx cnt stuck at %d \n", __FUNCTION__, last_rx_cnt));
+ return BCME_ERROR;
+ }
+ last_rx_cnt = loopback_rx_cnt;
+ }
+ printf("Received %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size);
+
+ return BCME_OK;
+} /* dbus_usbos_loopback_tx */
+
+/**
+ * Higher layer (dbus_usb.c) wants to transmit an I/O Request Block
+ * @param[in] txirb txirb->pkt, if non-zero, contains a single or a chain of packets
+ */
+static int
+dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ urb_req_t *req, *req_zlp = NULL;
+ int ret = DBUS_OK;
+ unsigned long flags;
+ void *pkt;
+ uint32 buffer_length;
+ uint8 *buf;
+
+ if ((usbos_info == NULL) || !usbos_info->tx_pipe) {
+ return DBUS_ERR;
+ }
+
+ if (txirb->pkt != NULL) {
+ buffer_length = pkttotlen(usbos_info->pub->osh, txirb->pkt);
+ /* In case of multiple packets the values below may be overwritten */
+ txirb->send_buf = NULL;
+ buf = PKTDATA(usbos_info->pub->osh, txirb->pkt);
+ } else { /* txirb->buf != NULL */
+ ASSERT(txirb->buf != NULL);
+ ASSERT(txirb->send_buf == NULL);
+ buffer_length = txirb->len;
+ buf = txirb->buf;
+ }
+
+ if (!(req = dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) {
+ DBUSERR(("%s No free URB!\n", __FUNCTION__));
+ return DBUS_ERR_TXDROP;
+ }
+
+ /* If not using standard Linux kernel functionality for handling Zero Length Packet(ZLP),
+ * the dbus needs to generate ZLP when length is multiple of MaxPacketSize.
+ */
+#ifndef WL_URB_ZPKT
+ if (!(buffer_length % usbos_info->maxps)) {
+ if (!(req_zlp =
+ dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) {
+ DBUSERR(("%s No free URB for ZLP!\n", __FUNCTION__));
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+ return DBUS_ERR_TXDROP;
+ }
+
+ /* No txirb, so that dbus_usbos_send_complete can differentiate between
+ * DATA and ZLP.
+ */
+ req_zlp->arg = NULL;
+ req_zlp->usbinfo = usbos_info;
+ req_zlp->buf_len = 0;
+
+ usb_fill_bulk_urb(req_zlp->urb, usbos_info->usb, usbos_info->tx_pipe, NULL,
+ 0, (usb_complete_t)dbus_usbos_send_complete, req_zlp);
+
+ req_zlp->urb->transfer_flags |= URB_QUEUE_BULK;
+ }
+#endif /* !WL_URB_ZPKT */
+
+#ifndef USBOS_TX_THREAD
+ /* Disable USB autosuspend until this request completes, request USB resume if needed.
+ * Because this call runs asynchronously, there is no guarantee the bus is resumed before
+ * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid
+ * this.
+ */
+ USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf);
+#endif /* !USBOS_TX_THREAD */
+
+ spin_lock_irqsave(&usbos_info->txlock, flags);
+
+ req->arg = txirb;
+ req->usbinfo = usbos_info;
+ req->buf_len = 0;
+
+ /* Prepare the URB */
+ if (txirb->pkt != NULL) {
+ uint32 pktlen;
+ uint8 *transfer_buf;
+
+ /* For multiple packets, allocate contiguous buffer and copy packet data to it */
+ if (PKTNEXT(usbos_info->pub->osh, txirb->pkt)) {
+ transfer_buf = MALLOC(usbos_info->pub->osh, buffer_length);
+ if (!transfer_buf) {
+ ret = DBUS_ERR_TXDROP;
+ DBUSERR(("fail to alloc to usb buffer\n"));
+ goto fail;
+ }
+
+ pkt = txirb->pkt;
+ txirb->send_buf = transfer_buf;
+ req->buf_len = buffer_length;
+
+ while (pkt) {
+ pktlen = PKTLEN(usbos_info->pub->osh, pkt);
+ bcopy(PKTDATA(usbos_info->pub->osh, pkt), transfer_buf, pktlen);
+ transfer_buf += pktlen;
+ pkt = PKTNEXT(usbos_info->pub->osh, pkt);
+ }
+
+ ASSERT(((uint8 *) txirb->send_buf + buffer_length) == transfer_buf);
+
+ /* Overwrite buf pointer with pointer to allocated contiguous transfer_buf
+ */
+ buf = txirb->send_buf;
+ }
+ }
+
+ usb_fill_bulk_urb(req->urb, usbos_info->usb, usbos_info->tx_pipe, buf,
+ buffer_length, (usb_complete_t)dbus_usbos_send_complete, req);
+
+ req->urb->transfer_flags |= URB_QUEUE_BULK;
+
+#ifdef USBOS_TX_THREAD
+ /* Enqueue TX request, the TX thread will resume the bus if needed and submit
+ * it asynchronously
+ */
+ dbus_usbos_qenq(&usbos_info->usbos_tx_list, req, &usbos_info->usbos_tx_list_lock);
+ if (req_zlp != NULL) {
+ dbus_usbos_qenq(&usbos_info->usbos_tx_list, req_zlp,
+ &usbos_info->usbos_tx_list_lock);
+ }
+ spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+ wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+ return DBUS_OK;
+#else
+ if ((ret = USB_SUBMIT_URB(req->urb))) {
+ ret = DBUS_ERR_TXDROP;
+ goto fail;
+ }
+
+ dbus_usbos_qenq(&usbos_info->req_txpostedq, req, &usbos_info->txposted_lock);
+ atomic_inc(&usbos_info->txposted);
+
+ if (req_zlp != NULL) {
+ if ((ret = USB_SUBMIT_URB(req_zlp->urb))) {
+ DBUSERR(("failed to submit ZLP URB!\n"));
+ ASSERT(0);
+ ret = DBUS_ERR_TXDROP;
+ goto fail2;
+ }
+
+ dbus_usbos_qenq(&usbos_info->req_txpostedq, req_zlp, &usbos_info->txposted_lock);
+ /* Also increment txposted for zlp packet, as it will be decremented in
+ * dbus_usbos_send_complete()
+ */
+ atomic_inc(&usbos_info->txposted);
+ }
+
+ spin_unlock_irqrestore(&usbos_info->txlock, flags);
+ return DBUS_OK;
+#endif /* USBOS_TX_THREAD */
+
+fail:
+ if (txirb->send_buf != NULL) {
+ MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len);
+ txirb->send_buf = NULL;
+ req->buf_len = 0;
+ }
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+#ifndef USBOS_TX_THREAD
+fail2:
+#endif
+ if (req_zlp != NULL) {
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req_zlp, &usbos_info->txfree_lock);
+ }
+
+ spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+#ifndef USBOS_TX_THREAD
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+#endif /* !USBOS_TX_THREAD */
+
+ return ret;
+} /* dbus_usbos_intf_send_irb */
+
+/** Higher layer (dbus_usb.c) recycles a received (and used) packet. */
+static int
+dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ int ret = DBUS_OK;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, 0);
+ return ret;
+}
+
+static int
+dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ int ret = DBUS_OK;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+#ifdef INTR_EP_ENABLE
+ /* By specifying the ep_idx value of 0xff, the cdc layer is asking to
+ * submit an interrupt URB
+ */
+ if (rxirb == NULL && ep_idx == 0xff) {
+ /* submit intr URB */
+ if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb)) < 0) {
+ DBUSERR(("%s intr USB_SUBMIT_URB failed, status %d\n",
+ __FUNCTION__, ret));
+ }
+ return ret;
+ }
+#else
+ if (rxirb == NULL) {
+ return DBUS_ERR;
+ }
+#endif /* INTR_EP_ENABLE */
+
+ ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, ep_idx);
+ return ret;
+}
+
+/** Higher layer (dbus_usb.c) want to cancel an IRB */
+static int
+dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ return DBUS_ERR;
+}
+
+/** Only one CTL transfer can be pending at any time. This function may block. */
+static int
+dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ uint16 size;
+#ifndef USBOS_TX_THREAD
+ int status;
+#endif /* USBOS_TX_THREAD */
+
+ if ((usbos_info == NULL) || (buf == NULL) || (len == 0))
+ return DBUS_ERR;
+
+ if (usbos_info->ctl_urb == NULL)
+ return DBUS_ERR;
+
+ /* Block until a pending CTL transfer has completed */
+ if (down_interruptible(&usbos_info->ctl_lock) != 0) {
+ return DBUS_ERR_TXCTLFAIL;
+ }
+
+#ifdef USBOS_TX_THREAD
+ ASSERT(usbos_info->ctl_state == USBOS_REQUEST_STATE_UNSCHEDULED);
+#else
+ /* Disable USB autosuspend until this request completes, request USB resume if needed.
+ * Because this call runs asynchronously, there is no guarantee the bus is resumed before
+ * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid
+ * this.
+ */
+ USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf);
+#endif /* USBOS_TX_THREAD */
+
+ size = len;
+ usbos_info->ctl_write.wLength = cpu_to_le16p(&size);
+ usbos_info->ctl_urb->transfer_buffer_length = size;
+
+ usb_fill_control_urb(usbos_info->ctl_urb,
+ usbos_info->usb,
+ usb_sndctrlpipe(usbos_info->usb, 0),
+ (unsigned char *) &usbos_info->ctl_write,
+ buf, size, (usb_complete_t)dbus_usbos_ctlwrite_complete, usbos_info);
+
+#ifdef USBOS_TX_THREAD
+ /* Enqueue CTRL request for transmission by the TX thread. The
+ * USB bus will first be resumed if needed.
+ */
+ usbos_info->ctl_state = USBOS_REQUEST_STATE_SCHEDULED;
+ wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+#else
+ status = USB_SUBMIT_URB(usbos_info->ctl_urb);
+ if (status < 0) {
+ DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status));
+ up(&usbos_info->ctl_lock);
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+
+ return DBUS_ERR_TXCTLFAIL;
+ }
+#endif /* USBOS_TX_THREAD */
+
+ return DBUS_OK;
+} /* dbus_usbos_intf_send_ctl */
+
+/** This function does not seem to be called by anyone, including dbus_usb.c */
+static int
+dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ int status;
+ uint16 size;
+
+ if ((usbos_info == NULL) || (buf == NULL) || (len == 0))
+ return DBUS_ERR;
+
+ if (usbos_info->ctl_urb == NULL)
+ return DBUS_ERR;
+
+ /* Block until a pending CTRL transfer has completed */
+ if (down_interruptible(&usbos_info->ctl_lock) != 0) {
+ return DBUS_ERR_TXCTLFAIL;
+ }
+
+ /* Disable USB autosuspend until this request completes, request USB resume if needed. */
+ USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf);
+
+ size = len;
+ usbos_info->ctl_read.wLength = cpu_to_le16p(&size);
+ usbos_info->ctl_urb->transfer_buffer_length = size;
+
+ if (usbos_info->rxctl_deferrespok) {
+ /* BMAC model */
+ usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE;
+ usbos_info->ctl_read.bRequest = DL_DEFER_RESP_OK;
+ } else {
+ /* full dongle model */
+ usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE;
+ usbos_info->ctl_read.bRequest = 1;
+ }
+
+ usb_fill_control_urb(usbos_info->ctl_urb,
+ usbos_info->usb,
+ usb_rcvctrlpipe(usbos_info->usb, 0),
+ (unsigned char *) &usbos_info->ctl_read,
+ buf, size, (usb_complete_t)dbus_usbos_ctlread_complete, usbos_info);
+
+ status = USB_SUBMIT_URB(usbos_info->ctl_urb);
+ if (status < 0) {
+ DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status));
+ up(&usbos_info->ctl_lock);
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+
+ return DBUS_ERR_RXCTLFAIL;
+ }
+
+ return DBUS_OK;
+}
+
+static int
+dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+ if ((usbos_info == NULL) || (attrib == NULL))
+ return DBUS_ERR;
+
+ attrib->bustype = DBUS_USB;
+ attrib->vid = g_probe_info.vid;
+ attrib->pid = g_probe_info.pid;
+ attrib->devid = 0x4322;
+
+ attrib->nchan = 1;
+
+ /* MaxPacketSize for USB hi-speed bulk out is 512 bytes
+ * and 64-bytes for full-speed.
+ * When sending pkt > MaxPacketSize, Host SW breaks it
+ * up into multiple packets.
+ */
+ attrib->mtu = usbos_info->maxps;
+
+ return DBUS_OK;
+}
+
+/** Called by higher layer (dbus_usb.c) when it wants to 'up' the USB interface to the dongle */
+static int
+dbus_usbos_intf_up(void *bus)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ uint16 ifnum;
+#ifdef BCMUSBDEV_COMPOSITE
+ int wlan_if = 0;
+#endif
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ if (usbos_info->usb == NULL)
+ return DBUS_ERR;
+
+#if defined(INTR_EP_ENABLE)
+ /* full dongle use intr EP, bmac doesn't use it */
+ if (usbos_info->intr_urb) {
+ int ret;
+
+ usb_fill_int_urb(usbos_info->intr_urb, usbos_info->usb,
+ usbos_info->intr_pipe, &usbos_info->intr,
+ usbos_info->intr_size, (usb_complete_t)dbus_usbos_intr_complete,
+ usbos_info, usbos_info->interval);
+
+ if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb))) {
+ DBUSERR(("%s USB_SUBMIT_URB failed with status %d\n", __FUNCTION__, ret));
+ return DBUS_ERR;
+ }
+ }
+#endif
+
+ if (usbos_info->ctl_urb) {
+ usbos_info->ctl_in_pipe = usb_rcvctrlpipe(usbos_info->usb, 0);
+ usbos_info->ctl_out_pipe = usb_sndctrlpipe(usbos_info->usb, 0);
+
+#ifdef BCMUSBDEV_COMPOSITE
+ wlan_if = dbus_usbos_intf_wlan(usbos_info->usb);
+ ifnum = cpu_to_le16(IFDESC(usbos_info->usb, wlan_if).bInterfaceNumber);
+#else
+ ifnum = cpu_to_le16(IFDESC(usbos_info->usb, CONTROL_IF).bInterfaceNumber);
+#endif /* BCMUSBDEV_COMPOSITE */
+ /* CTL Write */
+ usbos_info->ctl_write.bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ usbos_info->ctl_write.bRequest = 0;
+ usbos_info->ctl_write.wValue = cpu_to_le16(0);
+ usbos_info->ctl_write.wIndex = cpu_to_le16p(&ifnum);
+
+ /* CTL Read */
+ usbos_info->ctl_read.bRequestType =
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ usbos_info->ctl_read.bRequest = 1;
+ usbos_info->ctl_read.wValue = cpu_to_le16(0);
+ usbos_info->ctl_read.wIndex = cpu_to_le16p(&ifnum);
+ }
+
+ /* Success, indicate usbos_info is fully up */
+ dbus_usbos_state_change(usbos_info, DBUS_STATE_UP);
+
+ return DBUS_OK;
+} /* dbus_usbos_intf_up */
+
+static int
+dbus_usbos_intf_down(void *bus)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ dbusos_stop(usbos_info);
+ return DBUS_OK;
+}
+
+static int
+dbus_usbos_intf_stop(void *bus)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ dbusos_stop(usbos_info);
+ return DBUS_OK;
+}
+
+
+/** Called by higher layer (dbus_usb.c) */
+static int
+dbus_usbos_intf_set_config(void *bus, dbus_config_t *config)
+{
+ int err = DBUS_ERR;
+ usbos_info_t* usbos_info = bus;
+
+ if (config->config_id == DBUS_CONFIG_ID_RXCTL_DEFERRES) {
+ usbos_info->rxctl_deferrespok = config->rxctl_deferrespok;
+ err = DBUS_OK;
+ } else if (config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) {
+ /* DBUS_CONFIG_ID_AGGR_LIMIT shouldn't be called after probe stage */
+ ASSERT(disc_arg == NULL);
+ ASSERT(config->aggr_param.maxrxsf > 0);
+ ASSERT(config->aggr_param.maxrxsize > 0);
+ if (config->aggr_param.maxrxsize > usbos_info->rxbuf_len) {
+ int state = usbos_info->pub->busstate;
+ dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock);
+ while (atomic_read(&usbos_info->rxposted)) {
+ DBUSTRACE(("%s rxposted is %d, delay 1 ms\n", __FUNCTION__,
+ atomic_read(&usbos_info->rxposted)));
+ dbus_usbos_wait(usbos_info, 1);
+ }
+ usbos_info->rxbuf_len = config->aggr_param.maxrxsize;
+ dbus_usbos_state_change(usbos_info, state);
+ }
+ err = DBUS_OK;
+ }
+
+ return err;
+}
+
+
+/** Called by dbus_usb.c when it wants to download firmware into the dongle */
+bool
+dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen)
+{
+ int transferred;
+ int index = 0;
+ char *tmpbuf;
+
+ if ((usbinfo == NULL) || (buffer == NULL) || (buflen == 0))
+ return FALSE;
+
+ tmpbuf = (char *) MALLOC(usbinfo->pub->osh, buflen);
+ if (!tmpbuf) {
+ DBUSERR(("%s: Unable to allocate memory \n", __FUNCTION__));
+ return FALSE;
+ }
+
+#ifdef BCM_REQUEST_FW
+ if (cmd == DL_GO) {
+ index = 1;
+ }
+#endif
+
+ /* Disable USB autosuspend until this request completes, request USB resume if needed. */
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0),
+ cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+ 0, index,
+ (void*) tmpbuf, buflen, USB_CTRL_EP_TIMEOUT);
+ if (transferred == buflen) {
+ memcpy(buffer, tmpbuf, buflen);
+ } else {
+ DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred));
+ }
+
+ USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+ MFREE(usbinfo->pub->osh, tmpbuf, buflen);
+ return (transferred == buflen);
+}
+
+/**
+ * Called by dbus_usb.c when it wants to download a buffer into the dongle (e.g. as part of the
+ * download process, when writing nvram variables).
+ */
+int
+dbus_write_membytes(usbos_info_t* usbinfo, bool set, uint32 address, uint8 *data, uint size)
+{
+ hwacc_t hwacc;
+ int write_bytes = 4;
+ int status;
+ int retval = 0;
+
+ DBUSTRACE(("Enter:%s\n", __FUNCTION__));
+
+ /* Read is not supported */
+ if (set == 0) {
+ DBUSERR(("Currently read is not supported!!\n"));
+ return -1;
+ }
+
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ hwacc.cmd = DL_CMD_WRHW;
+ hwacc.addr = address;
+
+ DBUSTRACE(("Address:%x size:%d", hwacc.addr, size));
+ do {
+ if (size >= 4) {
+ write_bytes = 4;
+ } else if (size >= 2) {
+ write_bytes = 2;
+ } else {
+ write_bytes = 1;
+ }
+
+ hwacc.len = write_bytes;
+
+ while (size >= write_bytes) {
+ hwacc.data = *((unsigned int*)data);
+
+ status = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0),
+ DL_WRHW, (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+ 1, 0, (char *)&hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT);
+
+ if (status < 0) {
+ retval = -1;
+ DBUSERR((" Ctrl write hwacc failed w/status %d @ address:%x \n",
+ status, hwacc.addr));
+ goto err;
+ }
+
+ hwacc.addr += write_bytes;
+ data += write_bytes;
+ size -= write_bytes;
+ }
+ } while (size > 0);
+
+err:
+ USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+ return retval;
+}
+
+int
+dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value)
+{
+ usbos_info_t *usbinfo = (usbos_info_t *) bus;
+ int ret = DBUS_OK;
+ int transferred;
+ uint32 cmd;
+ hwacc_t hwacc;
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ if (datalen == 1)
+ cmd = DL_RDHW8;
+ else if (datalen == 2)
+ cmd = DL_RDHW16;
+ else
+ cmd = DL_RDHW32;
+
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0),
+ cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+ (uint16)(regaddr), (uint16)(regaddr >> 16),
+ (void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT);
+
+ if (transferred >= sizeof(hwacc_t)) {
+ *value = hwacc.data;
+ } else {
+ DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred));
+ ret = DBUS_ERR;
+ }
+
+ USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+ return ret;
+}
+
+int
+dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data)
+{
+ usbos_info_t *usbinfo = (usbos_info_t *) bus;
+ int ret = DBUS_OK;
+ int transferred;
+ uint32 cmd = DL_WRHW;
+ hwacc_t hwacc;
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ hwacc.cmd = DL_WRHW;
+ hwacc.addr = regaddr;
+ hwacc.data = data;
+ hwacc.len = datalen;
+
+ transferred = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0),
+ cmd, (USB_DIR_OUT| USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+ 1, 0,
+ (void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT);
+
+ if (transferred != sizeof(hwacc_t)) {
+ DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred));
+ ret = DBUS_ERR;
+ }
+
+ USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+ return ret;
+}
+
+int
+dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ if (in_interrupt())
+ mdelay(ms);
+ else
+ msleep_interruptible(ms);
+#else
+ wait_ms(ms);
+#endif
+ return DBUS_OK;
+}
+
+/** Called by dbus_usb.c as part of the firmware download process */
+bool
+dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len)
+{
+ bool ret = TRUE;
+ int status;
+ int transferred = 0;
+
+ if (usbinfo == NULL)
+ return DBUS_ERR;
+
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ status = USB_BULK_MSG(usbinfo->usb, usbinfo->tx_pipe,
+ buffer, len,
+ &transferred, USB_BULK_EP_TIMEOUT);
+
+ if (status < 0) {
+ DBUSERR(("%s: usb_bulk_msg failed %d\n", __FUNCTION__, status));
+ ret = FALSE;
+ }
+
+ USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+ return ret;
+}
+
+static bool
+dbus_usbos_intf_recv_needed(void *bus)
+{
+ return FALSE;
+}
+
+/**
+ * Higher layer (dbus_usb.c) wants to execute a function on the condition that the rx spin lock has
+ * been acquired.
+ */
+static void*
+dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ void *ret;
+ unsigned long flags;
+
+ if (usbos_info == NULL)
+ return NULL;
+
+ spin_lock_irqsave(&usbos_info->rxlock, flags);
+ ret = cb(args);
+ spin_unlock_irqrestore(&usbos_info->rxlock, flags);
+
+ return ret;
+}
+
+static void*
+dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+ void *ret;
+ unsigned long flags;
+
+ if (usbos_info == NULL)
+ return NULL;
+
+ spin_lock_irqsave(&usbos_info->txlock, flags);
+ ret = cb(args);
+ spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+ return ret;
+}
+
+/**
+ * if an error condition was detected in this module, the higher DBUS layer (dbus_usb.c) has to
+ * be notified.
+ */
+int
+dbus_usbos_errhandler(void *bus, int err)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ if (usbos_info->cbarg && usbos_info->cbs) {
+ if (usbos_info->cbs->errhandler)
+ usbos_info->cbs->errhandler(usbos_info->cbarg, err);
+ }
+
+ return DBUS_OK;
+}
+
+/**
+ * if a change in bus state was detected in this module, the higher DBUS layer (dbus_usb.c) has to
+ * be notified.
+ */
+int
+dbus_usbos_state_change(void *bus, int state)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+ if (usbos_info == NULL)
+ return DBUS_ERR;
+
+ if (usbos_info->cbarg && usbos_info->cbs) {
+ if (usbos_info->cbs->state_change)
+ usbos_info->cbs->state_change(usbos_info->cbarg, state);
+ }
+
+ usbos_info->pub->busstate = state;
+ return DBUS_OK;
+}
+
+int
+dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb,
+ disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2)
+{
+ bzero(&g_probe_info, sizeof(probe_info_t));
+
+ probe_cb = prcb;
+ disconnect_cb = discb;
+ probe_arg = prarg;
+
+ devid_table[0].idVendor = vid;
+ devid_table[0].idProduct = pid;
+
+ *intf = &dbus_usbos_intf;
+
+ USB_REGISTER();
+
+ return DBUS_ERR_NODEVICE;
+}
+
+int
+dbus_bus_osl_deregister()
+{
+ g_probe_info.dereged = TRUE;
+
+ DHD_MUTEX_LOCK();
+ if (disconnect_cb && disc_arg && (g_probe_info.disc_cb_done == FALSE)) {
+ disconnect_cb(disc_arg);
+ disc_arg = NULL;
+ }
+ DHD_MUTEX_UNLOCK();
+
+ USB_DEREGISTER();
+
+ return DBUS_OK;
+}
+
+void *
+dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs)
+{
+ usbos_info_t *usbos_info;
+
+ if (g_probe_info.dldone == FALSE) {
+ DBUSERR(("%s: err device not downloaded!\n", __FUNCTION__));
+ return NULL;
+ }
+
+ /* Sanity check for BUS_INFO() */
+ ASSERT(OFFSETOF(usbos_info_t, pub) == 0);
+
+ usbos_info = MALLOC(pub->osh, sizeof(usbos_info_t));
+ if (usbos_info == NULL)
+ return NULL;
+
+ bzero(usbos_info, sizeof(usbos_info_t));
+
+ usbos_info->pub = pub;
+ usbos_info->cbarg = cbarg;
+ usbos_info->cbs = cbs;
+
+ /* Needed for disconnect() */
+ g_probe_info.usbos_info = usbos_info;
+
+ /* Update USB Info */
+ usbos_info->usb = g_probe_info.usb;
+ usbos_info->rx_pipe = g_probe_info.rx_pipe;
+ usbos_info->rx_pipe2 = g_probe_info.rx_pipe2;
+ usbos_info->tx_pipe = g_probe_info.tx_pipe;
+ usbos_info->intr_pipe = g_probe_info.intr_pipe;
+ usbos_info->intr_size = g_probe_info.intr_size;
+ usbos_info->interval = g_probe_info.interval;
+ usbos_info->pub->device_speed = g_probe_info.device_speed;
+ if (usbos_info->rx_pipe2) {
+ usbos_info->pub->attrib.has_2nd_bulk_in_ep = 1;
+ } else {
+ usbos_info->pub->attrib.has_2nd_bulk_in_ep = 0;
+ }
+
+ if (usbos_info->tx_pipe)
+ usbos_info->maxps = usb_maxpacket(usbos_info->usb,
+ usbos_info->tx_pipe, usb_pipeout(usbos_info->tx_pipe));
+
+ INIT_LIST_HEAD(&usbos_info->req_rxfreeq);
+ INIT_LIST_HEAD(&usbos_info->req_txfreeq);
+ INIT_LIST_HEAD(&usbos_info->req_rxpostedq);
+ INIT_LIST_HEAD(&usbos_info->req_txpostedq);
+ spin_lock_init(&usbos_info->rxfree_lock);
+ spin_lock_init(&usbos_info->txfree_lock);
+ spin_lock_init(&usbos_info->rxposted_lock);
+ spin_lock_init(&usbos_info->txposted_lock);
+ spin_lock_init(&usbos_info->rxlock);
+ spin_lock_init(&usbos_info->txlock);
+
+ atomic_set(&usbos_info->rxposted, 0);
+ atomic_set(&usbos_info->txposted, 0);
+
+
+#ifdef USB_DISABLE_INT_EP
+ usbos_info->intr_urb = NULL;
+#else
+ if (!(usbos_info->intr_urb = USB_ALLOC_URB())) {
+ DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__));
+ goto fail;
+ }
+#endif
+
+ if (!(usbos_info->ctl_urb = USB_ALLOC_URB())) {
+ DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ init_waitqueue_head(&usbos_info->wait);
+
+ if (!(usbos_info->blk_urb = USB_ALLOC_URB())) { /* for embedded image downloading */
+ DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ usbos_info->rxbuf_len = (uint)usbos_info->pub->rxsize;
+
+
+
+ atomic_set(&usbos_info->txallocated, 0);
+ if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info,
+ usbos_info->pub->ntxq, FALSE)) {
+ goto fail;
+ }
+
+ atomic_set(&usbos_info->rxallocated, 0);
+ if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info,
+ MIN(DBUS_USB_RXQUEUE_BATCH_ADD, usbos_info->pub->nrxq),
+ TRUE)) {
+ goto fail;
+ }
+
+ sema_init(&usbos_info->ctl_lock, 1);
+
+#ifdef USBOS_THREAD
+ if (dbus_usbos_thread_init(usbos_info) == NULL)
+ goto fail;
+#endif /* USBOS_THREAD */
+
+#ifdef USBOS_TX_THREAD
+ if (dbus_usbos_tx_thread_init(usbos_info) == NULL)
+ goto fail;
+#endif /* USBOS_TX_THREAD */
+
+ pub->dev_info = g_probe_info.usb;
+
+
+ return (void *) usbos_info;
+fail:
+ if (usbos_info->intr_urb) {
+ USB_FREE_URB(usbos_info->intr_urb);
+ usbos_info->intr_urb = NULL;
+ }
+
+ if (usbos_info->ctl_urb) {
+ USB_FREE_URB(usbos_info->ctl_urb);
+ usbos_info->ctl_urb = NULL;
+ }
+
+#if defined(BCM_REQUEST_FW)
+ if (usbos_info->blk_urb) {
+ USB_FREE_URB(usbos_info->blk_urb);
+ usbos_info->blk_urb = NULL;
+ }
+#endif
+
+ dbus_usbos_urbreqs_free(usbos_info, TRUE);
+ atomic_set(&usbos_info->rxallocated, 0);
+ dbus_usbos_urbreqs_free(usbos_info, FALSE);
+ atomic_set(&usbos_info->txallocated, 0);
+
+ g_probe_info.usbos_info = NULL;
+
+ MFREE(pub->osh, usbos_info, sizeof(usbos_info_t));
+ return NULL;
+} /* dbus_usbos_intf_attach */
+
+void
+dbus_usbos_intf_detach(dbus_pub_t *pub, void *info)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *) info;
+ osl_t *osh = pub->osh;
+
+ if (usbos_info == NULL) {
+ return;
+ }
+
+#ifdef USBOS_TX_THREAD
+ dbus_usbos_tx_thread_deinit(usbos_info);
+#endif /* USBOS_TX_THREAD */
+
+ /* Must unlink all URBs prior to driver unload;
+ * otherwise an URB callback can occur after driver
+ * has been de-allocated and rmmod'd
+ */
+ dbusos_stop(usbos_info);
+
+ if (usbos_info->intr_urb) {
+ USB_FREE_URB(usbos_info->intr_urb);
+ usbos_info->intr_urb = NULL;
+ }
+
+ if (usbos_info->ctl_urb) {
+ USB_FREE_URB(usbos_info->ctl_urb);
+ usbos_info->ctl_urb = NULL;
+ }
+
+ if (usbos_info->blk_urb) {
+ USB_FREE_URB(usbos_info->blk_urb);
+ usbos_info->blk_urb = NULL;
+ }
+
+ dbus_usbos_urbreqs_free(usbos_info, TRUE);
+ atomic_set(&usbos_info->rxallocated, 0);
+ dbus_usbos_urbreqs_free(usbos_info, FALSE);
+ atomic_set(&usbos_info->txallocated, 0);
+
+#ifdef USBOS_THREAD
+ dbus_usbos_thread_deinit(usbos_info);
+#endif /* USBOS_THREAD */
+
+ g_probe_info.usbos_info = NULL;
+ MFREE(osh, usbos_info, sizeof(usbos_info_t));
+} /* dbus_usbos_intf_detach */
+
+
+#ifdef USBOS_TX_THREAD
+
+void*
+dbus_usbos_tx_thread_init(usbos_info_t *usbos_info)
+{
+ spin_lock_init(&usbos_info->usbos_tx_list_lock);
+ INIT_LIST_HEAD(&usbos_info->usbos_tx_list);
+ init_waitqueue_head(&usbos_info->usbos_tx_queue_head);
+
+ usbos_info->usbos_tx_kt = kthread_create(dbus_usbos_tx_thread_func,
+ usbos_info, "usb-tx-thread");
+
+ if (IS_ERR(usbos_info->usbos_tx_kt)) {
+ DBUSERR(("Thread Creation failed\n"));
+ return (NULL);
+ }
+
+ usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+ wake_up_process(usbos_info->usbos_tx_kt);
+
+ return (usbos_info->usbos_tx_kt);
+}
+
+void
+dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info)
+{
+ urb_req_t *req;
+
+ if (usbos_info->usbos_tx_kt) {
+ wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+ kthread_stop(usbos_info->usbos_tx_kt);
+ }
+
+ /* Move pending requests to free queue so they can be freed */
+ while ((req = dbus_usbos_qdeq(
+ &usbos_info->usbos_tx_list, &usbos_info->usbos_tx_list_lock)) != NULL) {
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+ }
+}
+
+/**
+ * Allow USB in-band resume to block by submitting CTRL and DATA URBs on a separate thread.
+ */
+int
+dbus_usbos_tx_thread_func(void *data)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *)data;
+ urb_req_t *req;
+ dbus_irb_tx_t *txirb;
+ int ret;
+ unsigned long flags;
+
+#ifdef WL_THREADNICE
+ set_user_nice(current, WL_THREADNICE);
+#endif
+
+ while (1) {
+ /* Wait until there are URBs to submit */
+ wait_event_interruptible_timeout(
+ usbos_info->usbos_tx_queue_head,
+ !list_empty(&usbos_info->usbos_tx_list) ||
+ usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED,
+ 100);
+
+ if (kthread_should_stop())
+ break;
+
+ /* Submit CTRL URB if needed */
+ if (usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED) {
+
+ /* Disable USB autosuspend until this request completes. If the
+ * interface was suspended, this call blocks until it has been resumed.
+ */
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ usbos_info->ctl_state = USBOS_REQUEST_STATE_SUBMITTED;
+
+ ret = USB_SUBMIT_URB(usbos_info->ctl_urb);
+ if (ret != 0) {
+ DBUSERR(("%s CTRL USB_SUBMIT_URB failed, status %d\n",
+ __FUNCTION__, ret));
+
+ usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+ up(&usbos_info->ctl_lock);
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+ }
+ }
+
+ /* Submit all available TX URBs */
+ while ((req = dbus_usbos_qdeq(&usbos_info->usbos_tx_list,
+ &usbos_info->usbos_tx_list_lock)) != NULL) {
+
+ /* Disable USB autosuspend until this request completes. If the
+ * interface was suspended, this call blocks until it has been resumed.
+ */
+ USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+ spin_lock_irqsave(&usbos_info->txlock, flags);
+
+ ret = USB_SUBMIT_URB(req->urb);
+ if (ret == 0) {
+ /* URB submitted successfully */
+ dbus_usbos_qenq(&usbos_info->req_txpostedq, req,
+ &usbos_info->txposted_lock);
+ atomic_inc(&usbos_info->txposted);
+ } else {
+ /* Submitting the URB failed. */
+ DBUSERR(("%s TX USB_SUBMIT_URB failed, status %d\n",
+ __FUNCTION__, ret));
+
+ USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+ }
+
+ spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+ if (ret != 0) {
+ /* Cleanup and notify higher layers */
+ dbus_usbos_qenq(&usbos_info->req_txfreeq, req,
+ &usbos_info->txfree_lock);
+
+ txirb = req->arg;
+ if (txirb->send_buf) {
+ MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len);
+ txirb->send_buf = NULL;
+ req->buf_len = 0;
+ }
+
+ if (likely (usbos_info->cbarg && usbos_info->cbs)) {
+ if (likely (usbos_info->cbs->send_irb_complete != NULL))
+ usbos_info->cbs->send_irb_complete(
+ usbos_info->cbarg, txirb, DBUS_ERR_TXDROP);
+ }
+ }
+ }
+ }
+
+ return 0;
+} /* dbus_usbos_tx_thread_func */
+
+#endif /* USBOS_TX_THREAD */
+
+#ifdef USBOS_THREAD
+
+/**
+ * Increase system performance by creating a USB thread that runs parallel to other system
+ * activity.
+ */
+static void*
+dbus_usbos_thread_init(usbos_info_t *usbos_info)
+{
+ usbos_list_entry_t *entry;
+ unsigned long flags, ii;
+
+ spin_lock_init(&usbos_info->usbos_list_lock);
+ spin_lock_init(&usbos_info->ctrl_lock);
+ INIT_LIST_HEAD(&usbos_info->usbos_list);
+ INIT_LIST_HEAD(&usbos_info->usbos_free_list);
+ init_waitqueue_head(&usbos_info->usbos_queue_head);
+ atomic_set(&usbos_info->usbos_list_cnt, 0);
+
+
+ for (ii = 0; ii < (usbos_info->pub->nrxq + usbos_info->pub->ntxq); ii++) {
+ entry = MALLOC(usbos_info->pub->osh, sizeof(usbos_list_entry_t));
+ if (entry) {
+ spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+ list_add_tail((struct list_head*) entry, &usbos_info->usbos_free_list);
+ spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+ } else {
+ DBUSERR(("Failed to create list\n"));
+ }
+ }
+
+ usbos_info->usbos_kt = kthread_create(dbus_usbos_thread_func,
+ usbos_info, "usb-thread");
+
+ if (IS_ERR(usbos_info->usbos_kt)) {
+ DBUSERR(("Thread Creation failed\n"));
+ return (NULL);
+ }
+
+ wake_up_process(usbos_info->usbos_kt);
+
+ return (usbos_info->usbos_kt);
+}
+
+static void
+dbus_usbos_thread_deinit(usbos_info_t *usbos_info)
+{
+ struct list_head *cur, *next;
+ usbos_list_entry_t *entry;
+ unsigned long flags;
+
+ if (usbos_info->usbos_kt) {
+ wake_up_interruptible(&usbos_info->usbos_queue_head);
+ kthread_stop(usbos_info->usbos_kt);
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_safe(cur, next, &usbos_info->usbos_list)
+ {
+ entry = list_entry(cur, struct usbos_list_entry, list);
+ /* detach this entry from the list and then free the entry */
+ spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+ list_del(cur);
+ MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t));
+ spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+ }
+
+ list_for_each_safe(cur, next, &usbos_info->usbos_free_list)
+ {
+ entry = list_entry(cur, struct usbos_list_entry, list);
+ /* detach this entry from the list and then free the entry */
+ spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+ list_del(cur);
+ MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t));
+ spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+/** Process completed URBs in a worker thread */
+static int
+dbus_usbos_thread_func(void *data)
+{
+ usbos_info_t *usbos_info = (usbos_info_t *)data;
+ usbos_list_entry_t *entry;
+ struct list_head *cur, *next;
+ unsigned long flags;
+
+#ifdef WL_THREADNICE
+ set_user_nice(current, WL_THREADNICE);
+#endif
+
+ while (1) {
+ /* If the list is empty, then go to sleep */
+ wait_event_interruptible_timeout
+ (usbos_info->usbos_queue_head,
+ atomic_read(&usbos_info->usbos_list_cnt) > 0,
+ 100);
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+
+ /* For each entry on the list, process it. Remove the entry from
+ * the list when done.
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_safe(cur, next, &usbos_info->usbos_list)
+ {
+ urb_req_t *req;
+ int len;
+ int stat;
+ usbos_info_t *usbos_info_local;
+
+ entry = list_entry(cur, struct usbos_list_entry, list);
+ if (entry == NULL)
+ break;
+
+ req = entry->urb_context;
+ len = entry->urb_length;
+ stat = entry->urb_status;
+ usbos_info_local = req->usbinfo;
+
+ /* detach this entry from the list and attach it to the free list */
+ list_del_init(cur);
+ spin_unlock_irqrestore(&usbos_info_local->usbos_list_lock, flags);
+
+ dbus_usbos_recv_complete_handle(req, len, stat);
+
+ spin_lock_irqsave(&usbos_info_local->usbos_list_lock, flags);
+
+ list_add_tail(cur, &usbos_info_local->usbos_free_list);
+
+ atomic_dec(&usbos_info_local->usbos_list_cnt);
+ }
+
+ spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+ return 0;
+} /* dbus_usbos_thread_func */
+
+/** Called on Linux calling URB callback, see dbus_usbos_recv_complete() */
+static void
+dbus_usbos_dispatch_schedule(CALLBACK_ARGS)
+{
+ urb_req_t *req = urb->context;
+ usbos_info_t *usbos_info = req->usbinfo;
+ usbos_list_entry_t *entry;
+ unsigned long flags;
+ struct list_head *cur;
+
+ spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+
+ cur = usbos_info->usbos_free_list.next;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ entry = list_entry(cur, struct usbos_list_entry, list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+ /* detach this entry from the free list and prepare it insert it to use list */
+ list_del_init(cur);
+
+ if (entry) {
+ entry->urb_context = urb->context;
+ entry->urb_length = urb->actual_length;
+ entry->urb_status = urb->status;
+
+ atomic_inc(&usbos_info->usbos_list_cnt);
+ list_add_tail(cur, &usbos_info->usbos_list);
+ } else {
+ DBUSERR(("!!!!!!OUT OF MEMORY!!!!!!!\n"));
+ }
+
+ spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+
+ /* thread */
+ wake_up_interruptible(&usbos_info->usbos_queue_head);
+} /* dbus_usbos_dispatch_schedule */
+
+#endif /* USBOS_THREAD */
+
+
+
+
+#ifdef BCM_REQUEST_FW
+
+struct request_fw_context {
+ const struct firmware *firmware;
+ struct semaphore lock;
+};
+
+/*
+ * Callback for dbus_request_firmware().
+ */
+static void
+dbus_request_firmware_done(const struct firmware *firmware, void *ctx)
+{
+ struct request_fw_context *context = (struct request_fw_context*)ctx;
+
+ /* Store the received firmware handle in the context and wake requester */
+ context->firmware = firmware;
+ up(&context->lock);
+}
+
+/*
+ * Send a firmware request and wait for completion.
+ *
+ * The use of the asynchronous version of request_firmware() is needed to avoid
+ * kernel oopses when we just come out of system hibernate.
+ */
+static int
+dbus_request_firmware(const char *name, const struct firmware **firmware)
+{
+ struct request_fw_context *context;
+ int ret;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ sema_init(&context->lock, 0);
+
+ ret = request_firmware_nowait(THIS_MODULE, true, name, &g_probe_info.usb->dev,
+ GFP_KERNEL, context, dbus_request_firmware_done);
+ if (ret) {
+ kfree(context);
+ return ret;
+ }
+
+ /* Wait for completion */
+ if (down_interruptible(&context->lock) != 0) {
+ kfree(context);
+ return -ERESTARTSYS;
+ }
+
+ *firmware = context->firmware;
+ kfree(context);
+
+ return *firmware != NULL ? 0 : -ENOENT;
+}
+
+static void *
+dbus_get_fwfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev)
+{
+ const struct firmware *firmware = NULL;
+#ifndef OEM_ANDROID
+ s8 *device_id = NULL;
+ s8 *chip_rev = "";
+#endif /* OEM_ANDROID */
+ s8 file_name[64];
+ int ret;
+
+#ifndef OEM_ANDROID
+ switch (devid) {
+ case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM43556_CHIP_ID:
+ case BCM43558_CHIP_ID:
+ case BCM43566_CHIP_ID:
+ case BCM43568_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ case BCM4358_CHIP_ID:
+ device_id = "4350";
+ break;
+ case BCM43143_CHIP_ID:
+ device_id = "43143";
+ break;
+ case BCM43234_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ device_id = "43236";
+ break;
+ case BCM43242_CHIP_ID:
+ device_id = "43242";
+ break;
+ case BCM43238_CHIP_ID:
+ device_id = "43238";
+ break;
+ case BCM43526_CHIP_ID:
+ device_id = "43526";
+ break;
+ case BCM43569_CHIP_ID:
+ device_id = "43569";
+ switch (chiprev) {
+ case 0:
+ chip_rev = "a0";
+ break;
+ case 2:
+ chip_rev = "a2";
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ DBUSERR(("unsupported device %x\n", devid));
+ return NULL;
+ }
+
+ /* Load firmware */
+ snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-firmware.bin", device_id, chip_rev);
+#else
+ snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_FW_PATH);
+#endif /* OEM_ANDROID */
+
+ ret = dbus_request_firmware(file_name, &firmware);
+ if (ret) {
+ DBUSERR(("fail to request firmware %s\n", file_name));
+ return NULL;
+ }
+
+ *fwlen = firmware->size;
+ *fw = (uint8 *)firmware->data;
+ return (void *)firmware;
+
+}
+
+static void *
+dbus_get_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev)
+{
+ const struct firmware *firmware = NULL;
+#ifndef OEM_ANDROID
+ s8 *device_id = NULL;
+ s8 *chip_rev = "";
+#endif /* OEM_ANDROID */
+ s8 file_name[64];
+ int ret;
+
+#ifndef OEM_ANDROID
+ switch (devid) {
+ case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM43556_CHIP_ID:
+ case BCM43558_CHIP_ID:
+ case BCM43566_CHIP_ID:
+ case BCM43568_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ case BCM4358_CHIP_ID:
+ device_id = "4350";
+ break;
+ case BCM43143_CHIP_ID:
+ device_id = "43143";
+ break;
+ case BCM43234_CHIP_ID:
+ device_id = "43234";
+ break;
+ case BCM43235_CHIP_ID:
+ device_id = "43235";
+ break;
+ case BCM43236_CHIP_ID:
+ device_id = "43236";
+ break;
+ case BCM43238_CHIP_ID:
+ device_id = "43238";
+ break;
+ case BCM43242_CHIP_ID:
+ device_id = "43242";
+ break;
+ case BCM43526_CHIP_ID:
+ device_id = "43526";
+ break;
+ case BCM43569_CHIP_ID:
+ device_id = "43569";
+ switch (chiprev) {
+ case 0:
+ chip_rev = "a0";
+ break;
+ case 2:
+ chip_rev = "a2";
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ DBUSERR(("unsupported device %x\n", devid));
+ return NULL;
+ }
+
+ /* Load board specific nvram file */
+ snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-%2x-%2x.nvm",
+ device_id, chip_rev, boardtype, boardrev);
+#else
+ snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_NVRAM_PATH);
+#endif /* OEM_ANDROID */
+
+ ret = dbus_request_firmware(file_name, &firmware);
+ if (ret) {
+ DBUSERR(("fail to request nvram %s\n", file_name));
+
+#ifndef OEM_ANDROID
+ /* Load generic nvram file */
+ snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s.nvm",
+ device_id, chip_rev);
+
+ ret = dbus_request_firmware(file_name, &firmware);
+#endif /* OEM_ANDROID */
+
+ if (ret) {
+ DBUSERR(("fail to request nvram %s\n", file_name));
+ return NULL;
+ }
+ }
+
+ *fwlen = firmware->size;
+ *fw = (uint8 *)firmware->data;
+ return (void *)firmware;
+}
+
+void *
+dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, uint16 boardtype,
+ uint16 boardrev)
+{
+ switch (type) {
+ case DBUS_FIRMWARE:
+ return dbus_get_fwfile(devid, chiprev, fw, fwlen, boardtype, boardrev);
+ case DBUS_NVFILE:
+ return dbus_get_nvfile(devid, chiprev, fw, fwlen, boardtype, boardrev);
+ default:
+ return NULL;
+ }
+}
+
+void
+dbus_release_fw_nvfile(void *firmware)
+{
+ release_firmware((struct firmware *)firmware);
+}
+#endif /* BCM_REQUEST_FW */
+
+#ifdef BCMUSBDEV_COMPOSITE
+/**
+ * For a composite device the interface order is not guaranteed, scan the device struct for the WLAN
+ * interface.
+ */
+static int
+dbus_usbos_intf_wlan(struct usb_device *usb)
+{
+ int i, num_of_eps, ep, intf_wlan = -1;
+ int num_intf = CONFIGDESC(usb)->bNumInterfaces;
+ struct usb_endpoint_descriptor *endpoint;
+
+ for (i = 0; i < num_intf; i++) {
+ if (IFDESC(usb, i).bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+ continue;
+ num_of_eps = IFDESC(usb, i).bNumEndpoints;
+
+ for (ep = 0; ep < num_of_eps; ep++) {
+ endpoint = &IFEPDESC(usb, i, ep);
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK) {
+ intf_wlan = i;
+ break;
+ }
+ }
+ if (ep < num_of_eps)
+ break;
+ }
+
+ return intf_wlan;
+}
+#endif /* BCMUSBDEV_COMPOSITE */
diff --git a/bcmdhd.101.10.361.x/dhd.h b/bcmdhd.101.10.361.x/dhd.h
new file mode 100755
index 0000000..fd53811
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd.h
@@ -0,0 +1,4655 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#if defined(LINUX)
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+#include <uapi/linux/sched/types.h>
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#include <linux/sched/types.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+#ifdef DHD_BUZZZ_LOG_ENABLED
+#include <dhd_buzzz.h>
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+#if defined(BT_OVER_SDIO)
+#include <dhd_bt_interface.h>
+#endif /* defined (BT_OVER_SDIO) */
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+int get_scheduler_policy(struct task_struct *p);
+#else /* LINUX */
+#define ENOMEM 1
+#define EFAULT 2
+#define EINVAL 3
+#define EIO 4
+#define ETIMEDOUT 5
+#define ENODATA 6
+#define EREMOTEIO 7
+#define ENODEV 8
+#define ERESTARTSYS 512
+#endif /* LINUX */
+#define MAX_EVENT 16
+
+#define ALL_INTERFACES 0xff
+
+/* H2D and D2H ring dump is enabled by default */
+#ifdef PCIE_FULL_DONGLE
+#define DHD_DUMP_PCIE_RINGS
+#endif /* PCIE_FULL_DONGLE */
+
+#include <osl.h>
+
+#include <wlioctl.h>
+#include <dhdioctl.h>
+#include <wlfc_proto.h>
+#include <hnd_armtrap.h>
+#if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG)
+#include <bcmutils.h>
+#endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */
+
+#if defined(BCMWDF)
+#include <wdf.h>
+#include <WdfMiniport.h>
+#endif /* (BCMWDF) */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+#include <dnglioctl.h>
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+#ifdef DHD_ERPOM
+#include <pom.h>
+#ifdef PCIE_OOB
+/*
+ * Both ERPOM and PCIE_OOB depend on ftdi to programme GPIOs.
+ * Both features operating parallelly make the GPIOs go outof sync.
+ * So only one feature is expected to be present at a time.
+ */
+#error "PCIE_OOB enabled"
+#endif /* PCIE_OOB */
+#endif /* DHD_ERPOM */
+
+#include <dngl_stats.h>
+#include <hnd_pktq.h>
+
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+#define MAX_RESCHED_CNT 600
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+
+#if defined(LINUX) || defined(linux)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT))
+#define WL_VENDOR_EXT_SUPPORT
+#endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */
+#endif /* defined (LINUX) || defined(linux) */
+
+#if defined(KEEP_ALIVE)
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR "null_pkt"
+#endif /* KEEP_ALIVE */
+
+/* By default enabled from here, later the WQ code will be removed */
+#define DHD_USE_KTHREAD_FOR_LOGTRACE
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_ioctl;
+struct dhd_dbg;
+struct dhd_ts;
+#ifdef DNGL_AXI_ERROR_LOGGING
+struct dhd_axi_error_dump;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+ DHD_BUS_DOWN, /* Not ready for frame transfers */
+ DHD_BUS_LOAD, /* Download access only (CPU reset) */
+ DHD_BUS_DATA, /* Ready for frame transfers */
+ DHD_BUS_SUSPEND, /* Bus has been suspended */
+ DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */
+ DHD_BUS_REMOVE, /* Bus has been removed */
+};
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_devreset_type {
+ DHD_BUS_DEVRESET_ON = 0, /* ON */
+ DHD_BUS_DEVRESET_OFF = 1, /* OFF */
+ DHD_BUS_DEVRESET_FLR = 2, /* FLR */
+ DHD_BUS_DEVRESET_FLR_FORCE_FAIL = 3, /* FLR FORCE FAIL */
+ DHD_BUS_DEVRESET_QUIESCE = 4, /* FLR */
+};
+
+/*
+ * Bit fields to Indicate clean up process that wait till they are finished.
+ * Future synchronizable processes can add their bit filed below and update
+ * their functionalities accordingly
+ */
+#define DHD_BUS_BUSY_IN_TX 0x01
+#define DHD_BUS_BUSY_IN_SEND_PKT 0x02
+#define DHD_BUS_BUSY_IN_DPC 0x04
+#define DHD_BUS_BUSY_IN_WD 0x08
+#define DHD_BUS_BUSY_IN_IOVAR 0x10
+#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20
+#define DHD_BUS_BUSY_SUSPEND_IN_PROGRESS 0x40
+#define DHD_BUS_BUSY_RESUME_IN_PROGRESS 0x80
+#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100
+#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200
+#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400
+#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \
+ DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \
+ DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
+#define DHD_BUS_BUSY_IN_CHECKDIED 0x800
+#define DHD_BUS_BUSY_IN_MEMDUMP 0x1000
+#define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000
+#define DHD_BUS_BUSY_IN_LOGDUMP 0x4000
+#define DHD_BUS_BUSY_IN_HALDUMP 0x8000
+#define DHD_BUS_BUSY_IN_NAPI 0x10000
+#define DHD_BUS_BUSY_IN_DS_DEASSERT 0x20000
+
+#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX
+#define DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT
+#define DHD_BUS_BUSY_SET_IN_DPC(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC
+#define DHD_BUS_BUSY_SET_IN_WD(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD
+#define DHD_BUS_BUSY_SET_IN_IOVAR(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR
+#define DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR
+#define DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE
+#define DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED
+#define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
+#define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
+#define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
+#define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP
+#define DHD_BUS_BUSY_SET_IN_NAPI(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_NAPI
+#define DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DS_DEASSERT
+
+#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX
+#define DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT
+#define DHD_BUS_BUSY_CLEAR_IN_DPC(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC
+#define DHD_BUS_BUSY_CLEAR_IN_WD(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD
+#define DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR
+#define DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR
+#define DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE
+#define DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED
+#define DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_MEMDUMP
+#define DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP
+#define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP
+#define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP
+#define DHD_BUS_BUSY_CLEAR_IN_NAPI(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_NAPI
+#define DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DS_DEASSERT
+
+#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX)
+#define DHD_BUS_BUSY_CHECK_IN_SEND_PKT(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SEND_PKT)
+#define DHD_BUS_BUSY_CHECK_IN_DPC(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DPC)
+#define DHD_BUS_BUSY_CHECK_IN_WD(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_WD)
+#define DHD_BUS_BUSY_CHECK_IN_IOVAR(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_IOVAR)
+#define DHD_BUS_BUSY_CHECK_IN_DHD_IOVAR(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DHD_IOVAR)
+#define DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_SUSPEND_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RESUME_IN_PROGRESS(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RESUME_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE)
+#define DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RPM_ALL(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL)
+#define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED)
+#define DHD_BUS_BUSY_CHECK_IN_MEMDUMP(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_MEMDUMP)
+#define DHD_BUS_BUSY_CHECK_IN_SSSRDUMP(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP)
+#define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP)
+#define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP)
+#define DHD_BUS_BUSY_CHECK_IN_DS_DEASSERT(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DS_DEASSERT)
+#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \
+ ((dhdp)->dhd_bus_busy_state == 0)
+
+#define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \
+ ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp))
+
+#define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \
+ (DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
+ DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
+
+#define DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp) \
+ ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp))
+
+#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \
+ ((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS || \
+ (dhdp)->busstate == DHD_BUS_REMOVE)
+
+#define DHD_BUS_CHECK_REMOVE(dhdp) \
+ ((dhdp)->busstate == DHD_BUS_REMOVE)
+
+/* IOVar flags for common error checks */
+#define DHD_IOVF_PWRREQ_BYPASS (1<<0) /* flags to prevent bp access during host sleep state */
+
+#define MAX_MTU_SZ (1600u)
+
+#ifdef PCIE_INB_DW
+#define DHD_CHECK_CFG_IN_PROGRESS(dhdp) \
+ ((INBAND_DW_ENAB((dhdp)->bus)) ? dhd_check_cfg_in_progress(dhdp) : FALSE)
+#else
+#define DHD_CHECK_CFG_IN_PROGRESS(dhdp) FALSE
+#endif /* PCIE_INB_DW */
+
+#ifndef USEC_PER_SEC
+#define USEC_PER_SEC (1000 * 1000)
+#endif
+#if (defined (LINUX) || defined(linux))
+/* (u64)result = (u64)dividend / (u64)divisor */
+#define DIV_U64_BY_U64(dividend, divisor) div64_u64(dividend, divisor)
+
+/* (u64)result = (u64)dividend / (u32)divisor */
+#define DIV_U64_BY_U32(dividend, divisor) div_u64(dividend, divisor)
+
+/* Be careful while using this, as it divides dividend also
+ * (u32)remainder = (u64)dividend % (u32)divisor
+ * (u64)dividend = (u64)dividend / (u32)divisor
+ */
+#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) do_div(dividend, divisor)
+
+/* (u32)remainder = (u64)dividend % (u32)divisor */
+#define MOD_U64_BY_U32(dividend, divisor) ({ \
+ uint64 temp_dividend = (dividend); \
+ uint32 rem = DIV_AND_MOD_U64_BY_U32(temp_dividend, (divisor)); \
+ rem; \
+})
+
+#define SEC_USEC_FMT \
+ "%5llu.%06u"
+#else
+/* (u64)result = (u64)dividend / (u64)divisor */
+#define DIV_U64_BY_U64(dividend, divisor) (uint64)(dividend) / (uint64)(divisor)
+
+/* (u64)result = (u64)dividend / (u32)divisor */
+#define DIV_U64_BY_U32(dividend, divisor) (uint64)(dividend) / (uint32)(divisor)
+
+/* Be careful while using this, as it divides dividend also
+ * (u32)remainder = (u64)dividend % (u32)divisor
+ * (u64)dividend = (u64)dividend / (u32)divisor
+ */
+#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) ({ \
+ uint32 rem = (uint64)(dividend) % (uint32)(divisor); \
+ (dividend) = (uint64)(dividend) / (uint32)(divisor); \
+ rem; \
+})
+
+/* (u32)remainder = (u64)dividend % (u32)divisor */
+#define MOD_U64_BY_U32(dividend, divisor) (uint32)((uint64)(dividend) % (uint32)(divisor))
+
+#define SEC_USEC_FMT \
+ "%015llu.%06u"
+#endif /* LINUX || linux */
+
+/* t: time in nano second */
+#define GET_SEC_USEC(t) \
+ DIV_U64_BY_U32(t, NSEC_PER_SEC), \
+ ((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC))
+
+/* Download Types */
+typedef enum download_type {
+ FW,
+ NVRAM,
+ CLM_BLOB,
+ TXCAP_BLOB
+} download_type_t;
+
+#if defined(NDIS)
+/* Firmware requested operation mode */
+#define STA_MASK 0x0001
+#define HOSTAPD_MASK 0x0002
+#define WFD_MASK 0x0004
+#define SOFTAP_FW_MASK 0x0008
+#define P2P_GO_ENABLED 0x0010
+#define P2P_GC_ENABLED 0x0020
+#define CONCURENT_MASK 0x00F0
+#endif /* #if defined(NDIS) */
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS 16
+#ifndef DHD_MAX_STATIC_IFS
+#define DHD_MAX_STATIC_IFS 1
+#endif
+#define DHD_DEL_IF -0xE
+#define DHD_BAD_IF -0xF
+#define DHD_DUMMY_INFO_IF 0xDEAF /* Hack i/f to handle events from INFO Ring */
+/* XXX to avoid build error for NDIS for timebeing */
+#define DHD_EVENT_IF DHD_DUMMY_INFO_IF
+
+#if defined(LINUX) || defined(linux)
+enum dhd_op_flags {
+/* Firmware requested operation mode */
+ DHD_FLAG_STA_MODE = (1 << (0)), /* STA only */
+ DHD_FLAG_HOSTAP_MODE = (1 << (1)), /* SOFTAP only */
+ DHD_FLAG_P2P_MODE = (1 << (2)), /* P2P Only */
+ /* STA + P2P */
+ DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE),
+ /* STA + SoftAP */
+ DHD_FLAG_CONCURR_STA_HOSTAP_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_HOSTAP_MODE),
+ /* XXX MULTI_CHAN mode is meaningful only if it is conccurncy mode */
+ DHD_FLAG_CONCURR_MULTI_CHAN_MODE = (1 << (4)), /* STA + P2P */
+ /* Current P2P mode for P2P connection */
+ DHD_FLAG_P2P_GC_MODE = (1 << (5)),
+ DHD_FLAG_P2P_GO_MODE = (1 << (6)),
+ DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */
+ DHD_FLAG_IBSS_MODE = (1 << (8)),
+ DHD_FLAG_MFG_MODE = (1 << (9)),
+ DHD_FLAG_RSDB_MODE = (1 << (10)),
+ DHD_FLAG_MP2P_MODE = (1 << (11))
+};
+#endif /* defined (LINUX) || defined(linux) */
+
+#if defined(BCMDONGLEHOST)
+#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \
+ (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1)
+#define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) \
+ (dhd ? (((((dhd_pub_t *)dhd)->op_mode) & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) == \
+ DHD_FLAG_CONCURR_STA_HOSTAP_MODE) : 0)
+#else
+#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) -1
+#define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) 0
+#endif /* defined (BCMDONGLEHOST) */
+
+/* Max sequential TX/RX Control timeouts to set HANG event */
+#ifndef MAX_CNTL_TX_TIMEOUT
+#define MAX_CNTL_TX_TIMEOUT 2
+#endif /* MAX_CNTL_TX_TIMEOUT */
+#ifndef MAX_CNTL_RX_TIMEOUT
+#define MAX_CNTL_RX_TIMEOUT 1
+#endif /* MAX_CNTL_RX_TIMEOUT */
+
+#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */
+#ifndef CUSTOM_SCAN_UNASSOC_ACTIVE_TIME
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
+#else
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME CUSTOM_SCAN_UNASSOC_ACTIVE_TIME
+#endif /* CUSTOM_SCAN_UNASSOC_ACTIVE_TIME */
+#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */
+#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */
+#ifndef CUSTOM_SCAN_PASSIVE_TIME
+#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
+#else
+#define DHD_SCAN_PASSIVE_TIME CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */
+#endif /* CUSTOM_SCAN_PASSIVE_TIME */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */
+#endif
+/*
+ * MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds
+ * the NVRAM data. That is the size of the buffer pointed by bus->vars
+ * This also needs to be increased to 24K to support NVRAM size higher than 16K
+ */
+#define MAX_NVRAMBUF_SIZE (24 * 1024) /* max nvram buf size */
+#define MAX_CLM_BUF_SIZE (48 * 1024) /* max clm blob size */
+#define MAX_TXCAP_BUF_SIZE (16 * 1024) /* max txcap blob size */
+#ifdef DHD_DEBUG
+#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
+#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
+#endif /* DHD_DEBUG */
+
+#ifndef CONFIG_BCMDHD_CLM_PATH
+#ifdef OEM_ANDROID
+#if defined(CUSTOMER_HW4) && defined(PLATFORM_SLP)
+#define CONFIG_BCMDHD_CLM_PATH "/lib/firmware/bcmdhd_clm.blob"
+#else
+#define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob"
+#endif /* CUSTOMER_HW4 && PLATFORM_SLP */
+#elif defined(LINUX) || defined(linux)
+#define CONFIG_BCMDHD_CLM_PATH "/var/run/bcmdhd_clm.blob"
+#else
+/* clm download will fail on empty path */
+#define CONFIG_BCMDHD_CLM_PATH ""
+#endif /* OEM_ANDROID */
+#endif /* CONFIG_BCMDHD_CLM_PATH */
+#define WL_CCODE_NULL_COUNTRY "#n"
+
+#ifdef DHD_EFI
+#define FW_VER_STR_LEN 256
+#else
+#define FW_VER_STR_LEN 128
+#endif
+#define FWID_STR_LEN 256
+#define CLM_VER_STR_LEN 128
+#define BUS_API_REV_STR_LEN 128
+#define FW_VER_STR "Version"
+#define FWID_STR_1 "FWID: 01-"
+#define FWID_STR_2 "FWID=01-"
+extern char bus_api_revision[];
+
+enum dhd_bus_wake_state {
+ WAKE_LOCK_OFF = 0,
+ WAKE_LOCK_PRIV = 1,
+ WAKE_LOCK_DPC = 2,
+ WAKE_LOCK_IOCTL = 3,
+ WAKE_LOCK_DOWNLOAD = 4,
+ WAKE_LOCK_TMOUT = 5,
+ WAKE_LOCK_WATCHDOG = 6,
+ WAKE_LOCK_LINK_DOWN_TMOUT = 7,
+ WAKE_LOCK_PNO_FIND_TMOUT = 8,
+ WAKE_LOCK_SOFTAP_SET = 9,
+ WAKE_LOCK_SOFTAP_STOP = 10,
+ WAKE_LOCK_SOFTAP_START = 11,
+ WAKE_LOCK_SOFTAP_THREAD = 12
+};
+
+enum {
+ EVENT_BUF_POOL_LOW = 32,
+ EVENT_BUF_POOL_MEDIUM = 64,
+ EVENT_BUF_POOL_HIGH = 128,
+ EVENT_BUF_POOL_HIGHEST = 256
+};
+
+#ifdef PCIE_INB_DW
+enum dhd_bus_ds_state {
+ DW_DEVICE_DS_INVALID = -1,
+ DW_DEVICE_DS_DEV_SLEEP = 0,
+ DW_DEVICE_DS_DEV_SLEEP_PEND = 1,
+ DW_DEVICE_DS_DISABLED_WAIT = 2,
+ DW_DEVICE_DS_DEV_WAKE = 3,
+ DW_DEVICE_DS_ACTIVE = 4,
+ DW_DEVICE_HOST_SLEEP_WAIT = 5,
+ DW_DEVICE_HOST_SLEEP = 6,
+ DW_DEVICE_HOST_WAKE_WAIT = 7,
+ DW_DEVICE_DS_D3_INFORM_WAIT = 8
+};
+#endif /* PCIE_INB_DW */
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF = 1,
+ DHD_PREALLOC_DATABUF = 2,
+ DHD_PREALLOC_OSL_BUF = 3,
+ DHD_PREALLOC_SKB_BUF = 4,
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+ DHD_PREALLOC_WIPHY_ESCAN1 = 6,
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
+ /* 10 */
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+ DHD_PREALLOC_STAT_REPORT_BUF = 18,
+ DHD_PREALLOC_WL_ESCAN = 19,
+ DHD_PREALLOC_FW_VERBOSE_RING = 20,
+ DHD_PREALLOC_FW_EVENT_RING = 21,
+ DHD_PREALLOC_DHD_EVENT_RING = 22,
+ DHD_PREALLOC_NAN_EVENT_RING = 23
+};
+
+enum dhd_dongledump_mode {
+ DUMP_DISABLED = 0,
+ DUMP_MEMONLY = 1,
+ DUMP_MEMFILE = 2,
+ DUMP_MEMFILE_BUGON = 3,
+ DUMP_MEMFILE_MAX = 4
+};
+
+enum dhd_dongledump_type {
+ DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
+ DUMP_TYPE_D3_ACK_TIMEOUT = 2,
+ DUMP_TYPE_DONGLE_TRAP = 3,
+ DUMP_TYPE_MEMORY_CORRUPTION = 4,
+ DUMP_TYPE_PKTID_AUDIT_FAILURE = 5,
+ DUMP_TYPE_PKTID_INVALID = 6,
+ DUMP_TYPE_SCAN_TIMEOUT = 7,
+ DUMP_TYPE_SCAN_BUSY = 8,
+ DUMP_TYPE_BY_SYSDUMP = 9,
+ DUMP_TYPE_BY_LIVELOCK = 10,
+ DUMP_TYPE_AP_LINKUP_FAILURE = 11,
+ DUMP_TYPE_AP_ABNORMAL_ACCESS = 12,
+ DUMP_TYPE_CFG_VENDOR_TRIGGERED = 13,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_TX = 14,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_RX = 15,
+ DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR = 16,
+ DUMP_TYPE_TRANS_ID_MISMATCH = 17,
+ DUMP_TYPE_IFACE_OP_FAILURE = 18,
+ DUMP_TYPE_DONGLE_INIT_FAILURE = 19,
+ DUMP_TYPE_READ_SHM_FAIL = 20,
+ DUMP_TYPE_DONGLE_HOST_EVENT = 21,
+ DUMP_TYPE_SMMU_FAULT = 22,
+ DUMP_TYPE_RESUMED_UNKNOWN = 23,
+ DUMP_TYPE_DUE_TO_BT = 24,
+ DUMP_TYPE_LOGSET_BEYOND_RANGE = 25,
+ DUMP_TYPE_BY_USER = 26,
+ DUMP_TYPE_CTO_RECOVERY = 27,
+ DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR = 28,
+ DUMP_TYPE_PROXD_TIMEOUT = 29,
+ DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE = 30,
+ DUMP_TYPE_PKTID_POOL_DEPLETED = 31,
+ DUMP_TYPE_ESCAN_SYNCID_MISMATCH = 32,
+ DUMP_TYPE_INVALID_SHINFO_NRFRAGS = 33
+};
+
+enum dhd_hang_reason {
+ HANG_REASON_MASK = 0x8000,
+ HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
+ HANG_REASON_DONGLE_TRAP = 0x8002,
+ HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
+ HANG_REASON_BUS_DOWN = 0x8004,
+ HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
+ HANG_REASON_IFACE_DEL_FAILURE = 0x8007,
+ HANG_REASON_HT_AVAIL_ERROR = 0x8008,
+ HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
+ HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
+ HANG_REASON_IFACE_ADD_FAILURE = 0x800B,
+ HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR = 0x800C,
+ HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR = 0x800D,
+ HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR = 0x800E,
+ HANG_REASON_SCAN_BUSY = 0x800F,
+ HANG_REASON_BSS_UP_FAILURE = 0x8010,
+ HANG_REASON_BSS_DOWN_FAILURE = 0x8011,
+ HANG_REASON_IOCTL_SUSPEND_ERROR = 0x8012,
+ HANG_REASON_ESCAN_SYNCID_MISMATCH = 0x8013,
+ HANG_REASON_PCIE_LINK_DOWN_RC_DETECT = 0x8805,
+ HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
+ HANG_REASON_UNKNOWN = 0x8807,
+ HANG_REASON_PCIE_LINK_DOWN_EP_DETECT = 0x8808,
+ HANG_REASON_PCIE_CTO_DETECT = 0x8809,
+ HANG_REASON_MAX = 0x880A
+};
+
+#define WLC_E_DEAUTH_MAX_REASON 0x0FFF
+
+enum dhd_rsdb_scan_features {
+ /* Downgraded scan feature for AP active */
+ RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01,
+ /* Downgraded scan feature for P2P Discovery */
+ RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02,
+ /* Enable channel pruning for ROAM SCAN */
+ RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10,
+ /* Enable channel pruning for any SCAN */
+ RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20
+};
+
+#define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1)
+#ifdef DHD_EWPR_VER2
+#define VENDOR_SEND_HANG_EXT_INFO_VER 20181111
+#else
+#define VENDOR_SEND_HANG_EXT_INFO_VER 20170905
+#endif /* DHD_EWPR_VER2 */
+
+#define HANG_INFO_TRAP_T_NAME_MAX 6
+#define HANG_INFO_TRAP_T_REASON_IDX 0
+#define HANG_INFO_TRAP_T_SUBTYPE_IDX 2
+#define HANG_INFO_TRAP_T_OFFSET_IDX 3
+#define HANG_INFO_TRAP_T_EPC_IDX 4
+#define HANG_FIELD_STR_MAX_LEN 9
+#define HANG_FIELD_CNT_MAX 69
+#define HANG_FIELD_IF_FAILURE_CNT 10
+#define HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT 8
+#define HANG_FIELD_TRAP_T_STACK_CNT_MAX 16
+#define HANG_FIELD_MISMATCH_CNT 10
+#define HANG_INFO_BIGDATA_KEY_STACK_CNT 4
+
+#define DEBUG_DUMP_TIME_BUF_LEN (16 + 1)
+/* delimiter between values */
+#define HANG_KEY_DEL ' '
+#define HANG_RAW_DEL '_'
+
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BIGDATA_EXTRA_KEY 4
+#define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5
+#endif
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+
+#define DHD_TX_CONTEXT_MASK 0xff
+#define DHD_TX_START_XMIT 0x01
+#define DHD_TX_SEND_PKT 0x02
+#define DHD_IF_SET_TX_ACTIVE(ifp, context) \
+ ifp->tx_paths_active |= context;
+#define DHD_IF_CLR_TX_ACTIVE(ifp, context) \
+ ifp->tx_paths_active &= ~context;
+#define DHD_IF_IS_TX_ACTIVE(ifp) \
+ (ifp->tx_paths_active)
+/**
+ * DMA-able buffer parameters
+ * - dmaaddr_t is 32bits on a 32bit host.
+ * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr
+ * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer.
+ */
+typedef struct dhd_dma_buf {
+ void *va; /* virtual address of buffer */
+ uint32 len; /* user requested buffer length */
+ dmaaddr_t pa; /* physical address of buffer */
+ void *dmah; /* dma mapper handle */
+ void *secdma; /* secure dma sec_cma_info handle */
+ uint32 _alloced; /* actual size of buffer allocated with align and pad */
+} dhd_dma_buf_t;
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+ void **p;
+ uint8 flow_id;
+ uint8 cur_idx;
+ uint8 exp_idx;
+ uint8 max_idx;
+ uint8 pend_pkts;
+} reorder_info_t;
+
+/* throughput test packet format */
+typedef struct tput_pkt {
+ /* header */
+ uint8 mac_sta[ETHER_ADDR_LEN];
+ uint8 mac_ap[ETHER_ADDR_LEN];
+ uint16 pkt_type;
+ uint8 PAD[2];
+ /* data */
+ uint32 crc32;
+ uint32 pkt_id;
+ uint32 num_pkts;
+} tput_pkt_t;
+
+typedef enum {
+ TPUT_PKT_TYPE_NORMAL,
+ TPUT_PKT_TYPE_STOP
+} tput_pkt_type_t;
+
+#define TPUT_TEST_MAX_PAYLOAD 1500
+#define TPUT_TEST_WAIT_TIMEOUT_DEFAULT 5000
+
+#ifdef DHDTCPACK_SUPPRESS
+
+enum {
+ /* TCPACK suppress off */
+ TCPACK_SUP_OFF,
+ /* Replace TCPACK in txq when new coming one has higher ACK number. */
+ TCPACK_SUP_REPLACE,
+ /* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
+ * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
+ * 1. we are able to read TCP DATA packets first from the bus
+ * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
+ */
+ TCPACK_SUP_DELAYTX,
+ TCPACK_SUP_HOLD,
+ TCPACK_SUP_LAST_MODE
+};
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(BCM_ROUTER_DHD)
+#define DHD_DWM_TBL_SIZE 57
+/* DSCP WMM AC Mapping macros and structures */
+#define DHD_TRF_MGMT_DWM_FILTER_BIT 0x8
+#define DHD_TRF_MGMT_DWM_PRIO_BITS 0x7
+#define DHD_TRF_MGMT_DWM_FAVORED_BIT 0x10
+#define DHD_TRF_MGMT_DWM_PRIO(dwm_tbl_entry) ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_PRIO_BITS)
+#define DHD_TRF_MGMT_DWM_IS_FAVORED_SET(dwm_tbl_entry) \
+ ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FAVORED_BIT)
+#define DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry) \
+ ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FAVORED_BIT)
+#define DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_tbl_entry) \
+ ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FILTER_BIT)
+#define DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry) \
+ ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FILTER_BIT)
+
+typedef struct {
+ uint8 dhd_dwm_enabled;
+ uint8 dhd_dwm_tbl[DHD_DWM_TBL_SIZE];
+} dhd_trf_mgmt_dwm_tbl_t;
+#endif /* for BCM_ROUTER_DHD */
+
+#define DHD_NULL_CHK_AND_RET(cond) \
+ if (!cond) { \
+ DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
+ return; \
+ }
+
+#define DHD_NULL_CHK_AND_RET_VAL(cond, value) \
+ if (!cond) { \
+ DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
+ return value; \
+ }
+
+#define DHD_NULL_CHK_AND_GOTO(cond, label) \
+ if (!cond) { \
+ DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
+ goto label; \
+ }
+
+/*
+ * Accumulating the queue lengths of all flowring queues in a parent object,
+ * to assert flow control, when the cummulative queue length crosses an upper
+ * threshold defined on a parent object. Upper threshold may be maintained
+ * at a station level, at an interface level, or at a dhd instance.
+ *
+ * cumm_ctr_t abstraction:
+ * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis
+ * pause on/off threshold callback.
+ * All macros use the address of the cummulative length in the parent objects.
+ *
+ * Cummulative counters in parent objects may be updated without spinlocks.
+ *
+ * If a cummulative queue length is desired across all flows
+ * belonging to either of (a station, or an interface or a dhd instance), then
+ * an atomic operation is required using an atomic_t cummulative counters or
+ * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct.
+ */
+#if defined(BCM_ROUTER_DHD)
+
+typedef atomic_t cumm_ctr_t; /* BCM_ROUTER_DHD Linux: atomic operations */
+#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen))
+#define DHD_CUMM_CTR(clen) DHD_CUMM_CTR_PTR(clen) /* atomic accessor */
+#define DHD_CUMM_CTR_READ(clen) atomic_read(DHD_CUMM_CTR(clen)) /* read */
+#define DHD_CUMM_CTR_INIT(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); \
+ atomic_set(DHD_CUMM_CTR(clen), 0);
+#define DHD_CUMM_CTR_INCR(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); \
+ atomic_add(1, DHD_CUMM_CTR(clen)); \
+ ASSERT(DHD_CUMM_CTR_READ(clen) != 0); /* ensure it does not wrap */
+#define DHD_CUMM_CTR_DECR(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); \
+ ASSERT(DHD_CUMM_CTR_READ(clen) > 0); \
+ atomic_sub(1, DHD_CUMM_CTR(clen));
+
+#else /* ! BCM_ROUTER_DHD */
+
+/* Cummulative length not supported. */
+typedef uint32 cumm_ctr_t;
+#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen))
+#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */
+#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */
+#define DHD_CUMM_CTR_INIT(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+#define DHD_CUMM_CTR_INCR(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+#define DHD_CUMM_CTR_DECR(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+
+#endif /* ! BCM_ROUTER_DHD */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+struct tdls_peer_node {
+ uint8 addr[ETHER_ADDR_LEN];
+ struct tdls_peer_node *next;
+};
+typedef struct tdls_peer_node tdls_peer_node_t;
+typedef struct {
+ tdls_peer_node_t *node;
+ uint8 tdls_peer_count;
+} tdls_peer_tbl_t;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
+typedef enum dhd_ring_id {
+ DEBUG_RING_ID_INVALID = 0x1,
+ FW_VERBOSE_RING_ID = 0x2,
+ DHD_EVENT_RING_ID = 0x3,
+ DRIVER_LOG_RING_ID = 0x4,
+ ROAM_STATS_RING_ID = 0x5,
+ BT_LOG_RING_ID = 0x6,
+ DEBUG_RING_ID_MAX = 0x7
+} dhd_ring_id_t;
+
+#ifdef DHD_LOG_DUMP
+#define DUMP_SSSR_ATTR_START 2
+#define DUMP_SSSR_ATTR_COUNT 10
+
+typedef enum {
+ SSSR_C0_D11_BEFORE = 0,
+ SSSR_C0_D11_AFTER = 1,
+ SSSR_C1_D11_BEFORE = 2,
+ SSSR_C1_D11_AFTER = 3,
+ SSSR_C2_D11_BEFORE = 4,
+ SSSR_C2_D11_AFTER = 5,
+ SSSR_DIG_BEFORE = 6,
+ SSSR_DIG_AFTER = 7
+} EWP_SSSR_DUMP;
+
+typedef enum {
+ DLD_BUF_TYPE_GENERAL = 0,
+ DLD_BUF_TYPE_PRESERVE = 1,
+ DLD_BUF_TYPE_SPECIAL = 2,
+ DLD_BUF_TYPE_ECNTRS = 3,
+ DLD_BUF_TYPE_FILTER = 4,
+ DLD_BUF_TYPE_ALL = 5
+} log_dump_type_t;
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+struct dhd_dbg_ring_buf
+{
+ void *dhd_pub;
+};
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+
+#define LOG_DUMP_MAGIC 0xDEB3DEB3
+#define HEALTH_CHK_BUF_SIZE 256
+#ifdef EWP_ECNTRS_LOGGING
+#define ECNTR_RING_ID 0xECDB
+#define ECNTR_RING_NAME "ewp_ecntr_ring"
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+#define RTT_RING_ID 0xADCD
+#define RTT_RING_NAME "ewp_rtt_ring"
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_BCM_TRACE
+#define BCM_TRACE_RING_ID 0xBCBC
+#define BCM_TRACE_RING_NAME "ewp_bcm_trace_ring"
+#endif /* EWP_BCM_TRACE */
+
+/*
+ * XXX: Always add new enums at the end to compatible with parser,
+ * also add new section in split_ret of EWP_config.py
+ */
+typedef enum {
+ LOG_DUMP_SECTION_GENERAL = 0,
+ LOG_DUMP_SECTION_ECNTRS,
+ LOG_DUMP_SECTION_SPECIAL,
+ LOG_DUMP_SECTION_DHD_DUMP,
+ LOG_DUMP_SECTION_EXT_TRAP,
+ LOG_DUMP_SECTION_HEALTH_CHK,
+ LOG_DUMP_SECTION_PRESERVE,
+ LOG_DUMP_SECTION_COOKIE,
+ LOG_DUMP_SECTION_FLOWRING,
+ LOG_DUMP_SECTION_STATUS,
+ LOG_DUMP_SECTION_RTT,
+ LOG_DUMP_SECTION_BCM_TRACE
+} log_dump_section_type_t;
+
+/* Each section in the debug_dump log file shall begin with a header */
+typedef struct {
+ uint32 magic; /* 0xDEB3DEB3 */
+ uint32 type; /* of type log_dump_section_type_t */
+ uint64 timestamp;
+ uint32 length; /* length of the section that follows */
+} log_dump_section_hdr_t;
+
+/* below structure describe ring buffer. */
+struct dhd_log_dump_buf
+{
+#if defined(LINUX) || defined(linux) || defined(ANDROID) || defined(OEM_ANDROID)
+ spinlock_t lock;
+#endif
+ void *dhd_pub;
+ unsigned int enable;
+ unsigned int wraparound;
+ unsigned long max;
+ unsigned int remain;
+ char* present;
+ char* front;
+ char* buffer;
+};
+
+#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256
+#define DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE (80 * 1024)
+
+extern void dhd_log_dump_write(int type, char *binary_data,
+ int binary_len, const char *fmt, ...);
+#endif /* DHD_LOG_DUMP */
+
+/* DEBUG_DUMP SUB COMMAND */
+enum {
+ CMD_DEFAULT,
+ CMD_UNWANTED,
+ CMD_DISCONNECTED,
+ CMD_MAX
+};
+
+#define DHD_LOG_DUMP_TS_MULTIPLIER_VALUE 60
+#define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS "%02d%02d%02d%02d%02d%02d%04d"
+#define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS "%02d%02d%02d%02d%02d%02d"
+#define DHD_DEBUG_DUMP_TYPE "debug_dump"
+#define DHD_DUMP_SUBSTR_UNWANTED "_unwanted"
+#define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected"
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+#define DHD_DUMP_AXI_ERROR_FILENAME "axi_error"
+#define DHD_DUMP_HAL_FILENAME_SUFFIX "_hal"
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+extern void get_debug_dump_time(char *str);
+extern void clear_debug_dump_time(char *str);
+#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
+extern void copy_debug_dump_time(char *dest, char *src);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
+
+#define FW_LOGSET_MASK_ALL 0xFFFFu
+
+#if defined(CUSTOMER_HW4)
+#ifndef DHD_COMMON_DUMP_PATH
+#define DHD_COMMON_DUMP_PATH "/data/log/wifi/"
+#endif /* !DHD_COMMON_DUMP_PATH */
+#elif defined(CUSTOMER_HW2_DEBUG)
+#define DHD_COMMON_DUMP_PATH PLATFORM_PATH
+#elif defined(BOARD_HIKEY)
+#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/"
+#elif defined(CUSTOMER_HW_AMLOGIC)
+#define DHD_COMMON_DUMP_PATH "/data/vendor/misc/wifi/"
+#elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__)
+#define DHD_COMMON_DUMP_PATH "/data/vendor/wifi/"
+#elif defined(OEM_ANDROID) /* For Brix Live Image */
+#define DHD_COMMON_DUMP_PATH "/installmedia/"
+#else /* Default */
+#define DHD_COMMON_DUMP_PATH "/root/"
+#endif /* CUSTOMER_HW4 */
+
+#define DHD_MEMDUMP_LONGSTR_LEN 180
+
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */
+ char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */
+ int32 custom_locale_rev; /* Custom local revisin default -1 */
+};
+
+#ifdef DHD_PKTTS
+#if defined(linux) || defined(LINUX)
+extern uint dhd_msgbuf_get_ipv6_id(void *pkt);
+#else
+static INLINE uint dhd_msgbuf_get_ipv6_id(void *pkt) { return 0; }
+#endif /* linux || LINUX */
+int dhd_send_msg_to_ts(struct sk_buff *skb, void *data, int size);
+#endif /* DHD_PKTTS */
+
+#if defined(LINUX) || defined(linux)
+int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size);
+#endif /* LINUX || linux */
+#ifdef REPORT_FATAL_TIMEOUTS
+typedef struct timeout_info {
+ void *scan_timer_lock;
+ void *join_timer_lock;
+ void *cmd_timer_lock;
+ void *bus_timer_lock;
+ uint32 scan_timeout_val;
+ uint32 join_timeout_val;
+ uint32 cmd_timeout_val;
+ uint32 bus_timeout_val;
+ bool scan_timer_active;
+ bool join_timer_active;
+ bool cmd_timer_active;
+ bool bus_timer_active;
+ osl_timer_t *scan_timer;
+ osl_timer_t *join_timer;
+ osl_timer_t *cmd_timer;
+ osl_timer_t *bus_timer;
+ uint32 cmd_request_id;
+ uint32 cmd;
+ uint32 cmd_join_error;
+ uint16 escan_syncid;
+ bool escan_aborted;
+ uint16 abort_syncid;
+} timeout_info_t;
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef DMAMAP_STATS
+typedef struct dmamap_stats {
+ uint64 txdata;
+ uint64 txdata_sz;
+ uint64 rxdata;
+ uint64 rxdata_sz;
+ uint64 ioctl_rx;
+ uint64 ioctl_rx_sz;
+ uint64 event_rx;
+ uint64 event_rx_sz;
+ uint64 info_rx;
+ uint64 info_rx_sz;
+ uint64 tsbuf_rx;
+ uint64 tsbuf_rx_sz;
+} dma_stats_t;
+#endif /* DMAMAP_STATS */
+
+#ifdef BT_OVER_PCIE
+enum dhd_bus_quiesce_state {
+ DHD_QUIESCE_INIT = 0,
+ REQUEST_BT_QUIESCE = 1,
+ RESPONSE_BT_QUIESCE = 2,
+ REQUEST_BT_RESUME = 3,
+ RESPONSE_BT_RESUME = 4
+};
+#endif /* BT_OVER_PCIE */
+
+/* see wlfc_proto.h for tx status details */
+#define DHD_MAX_TX_STATUS_MSGS 9u
+
+#ifdef TX_STATUS_LATENCY_STATS
+typedef struct dhd_if_tx_status_latency {
+ /* total number of tx_status received on this interface */
+ uint64 num_tx_status;
+ /* cumulative tx_status latency for this interface */
+ uint64 cum_tx_status_latency;
+} dhd_if_tx_status_latency_t;
+#endif /* TX_STATUS_LATENCY_STATS */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+#define AWDL_NUM_SLOTS 16u /* 0 to 15 are the AWDL slots FW operates on */
+#define AWDL_SLOT_MULT 4u /* AWDL slot information sent by FW is in multiples of 4 */
+typedef struct dhd_awdl_statistics {
+ uint64 slot_start_time; /* AWDL slot start time in us */
+ uint64 cum_slot_time; /* Cumulative time for which this AWDL slot was active */
+ uint64 num_slots; /* Number of times this AWDL slot was active */
+ uint64 cum_tx_status_latency; /* cum tx_status latency while this AWDL slot is active */
+ uint64 num_tx_status; /* Num of AWDL(flowring with role as AWDL) tx status received */
+ uint64 fw_cum_slot_time; /* Cumulative FW time for which this AWDL slot was active */
+ uint32 fw_slot_start_time; /* AWDL slot start time sent by FW in us */
+#if defined(BCMDBG)
+ uint32 tx_status[DHD_MAX_TX_STATUS_MSGS]; /* Dongle return val wrt TX packet sent out */
+#endif /* BCMDBG */
+} dhd_awdl_stats_t;
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+
+/* Bit in dhd_pub_t::gdb_proxy_stop_count set when firmware is stopped by GDB */
+#define GDB_PROXY_STOP_MASK 1
+
+/* Enable Reserve STA flowrings only for Android */
+#if defined(OEM_ANDROID)
+#define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+#endif /* OEM_ANDROID */
+
+typedef enum {
+ FW_UNLOADED = 0,
+ FW_DOWNLOAD_IN_PROGRESS = 1,
+ FW_DOWNLOAD_DONE = 2
+} fw_download_status_t;
+
+#define PCIE_DB7_MAGIC_NUMBER_ISR_TRAP 0xdead0001
+#define PCIE_DB7_MAGIC_NUMBER_DPC_TRAP 0xdead0002
+
+typedef struct dhd_db7_info {
+ bool fw_db7w_trap;
+ bool fw_db7w_trap_inprogress;
+ uint32 db7_magic_number;
+
+ uint32 debug_db7_send_cnt;
+ uint32 debug_db7_trap_cnt;
+ uint32 debug_db7_timing_error_cnt;
+ uint64 debug_db7_send_time;
+ uint64 debug_db7_trap_time;
+ uint64 debug_max_db7_dur;
+ uint64 debug_max_db7_send_time;
+ uint64 debug_max_db7_trap_time;
+} dhd_db7_info_t;
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+typedef struct fwtrace_info fwtrace_info_t; /* forward declaration */
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+typedef enum dhd_induce_error_states
+{
+ DHD_INDUCE_ERROR_CLEAR = 0x0,
+ DHD_INDUCE_IOCTL_TIMEOUT = 0x1,
+ DHD_INDUCE_D3_ACK_TIMEOUT = 0x2,
+ DHD_INDUCE_LIVELOCK = 0x3,
+ DHD_INDUCE_DROP_OOB_IRQ = 0x4,
+ DHD_INDUCE_DROP_AXI_SIG = 0x5,
+ DHD_INDUCE_TX_BIG_PKT = 0x6,
+ DHD_INDUCE_IOCTL_SUSPEND_ERROR = 0x7,
+ /* Big hammer induction */
+ DHD_INDUCE_BH_ON_FAIL_ONCE = 0x10,
+ DHD_INDUCE_BH_ON_FAIL_ALWAYS = 0x11,
+ DHD_INDUCE_BH_CBP_HANG = 0x12,
+ DHD_INDUCE_ERROR_MAX
+} dhd_induce_error_states_t;
+
+#ifdef DHD_HP2P
+#define MAX_TX_HIST_BIN 16
+#define MAX_RX_HIST_BIN 10
+#define MAX_HP2P_FLOWS 16
+#define HP2P_PRIO 7
+#define HP2P_PKT_THRESH 48
+#define HP2P_TIME_THRESH 200
+#define HP2P_PKT_EXPIRY 40
+#define HP2P_TIME_SCALE 32
+
+typedef struct hp2p_info {
+ void *dhd_pub;
+ uint16 flowid;
+ bool hrtimer_init;
+ void *ring;
+ struct hrtimer timer;
+ uint64 num_pkt_limit;
+ uint64 num_timer_limit;
+ uint64 num_timer_start;
+ uint64 tx_t0[MAX_TX_HIST_BIN];
+ uint64 tx_t1[MAX_TX_HIST_BIN];
+ uint64 rx_t0[MAX_RX_HIST_BIN];
+} hp2p_info_t;
+#endif /* DHD_HP2P */
+
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+/* Timestamps to trace dhd_logtrace_thread() */
+struct dhd_logtrace_thr_ts {
+ uint64 entry_time;
+ uint64 sem_down_time;
+ uint64 flush_time;
+ uint64 unexpected_break_time;
+ uint64 complete_time;
+};
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
+
+/**
+ * Common structure for module and instance linkage.
+ * Instantiated once per hardware (dongle) instance that this DHD manages.
+ */
+typedef struct dhd_pub {
+ /* Linkage ponters */
+ osl_t *osh; /* OSL handle */
+ struct dhd_bus *bus; /* Bus module handle */
+ struct dhd_prot *prot; /* Protocol module handle */
+ struct dhd_info *info; /* Info module handle */
+ struct dhd_dbg *dbg; /* Debugability module handle */
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+ struct dhd_logtrace_thr_ts logtrace_thr_ts;
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
+
+ /* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
+#ifdef BCMDBUS
+ struct dbus_pub *dbus;
+#endif /* BCMDBUS */
+
+ /* Internal dhd items */
+ bool up; /* Driver up/down (to OS) */
+#ifdef WL_CFG80211
+ spinlock_t up_lock; /* Synchronization with CFG80211 down */
+#endif /* WL_CFG80211 */
+ bool txoff; /* Transmit flow-controlled */
+ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */
+ enum dhd_bus_state busstate;
+ uint dhd_bus_busy_state; /* Bus busy state */
+ uint hdrlen; /* Total DHD header length (proto + bus) */
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ uint rxsz; /* Rx buffer size bus module should use */
+ uint8 wme_dp; /* wme discard priority */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ uint32 axierror_logbuf_addr;
+ bool axi_error;
+ struct dhd_axi_error_dump *axi_err_dump;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ /* Dongle media info */
+ bool iswl; /* Dongle-resident driver is wl */
+ ulong drv_version; /* Version of dongle-resident driver */
+ struct ether_addr mac; /* MAC address obtained from dongle */
+ dngl_stats_t dstats; /* Stats for dongle-based data */
+
+ /* Additional stats for the bus level */
+ ulong tx_packets; /* Data packets sent to dongle */
+ ulong actual_tx_pkts; /* Actual data packets sent to dongle */
+ ulong tot_txcpl; /* Total Tx completion received */
+ ulong tx_dropped; /* Data packets dropped in dhd */
+ ulong tx_multicast; /* Multicast data packets sent to dongle */
+ ulong tx_errors; /* Errors in sending data to dongle */
+ ulong tx_ctlpkts; /* Control packets sent to dongle */
+ ulong tx_ctlerrs; /* Errors sending control frames to dongle */
+ ulong rx_packets; /* Packets sent up the network interface */
+ ulong rx_multicast; /* Multicast packets sent up the network interface */
+ ulong rx_errors; /* Errors processing rx data packets */
+ ulong rx_ctlpkts; /* Control frames processed from dongle */
+ ulong rx_ctlerrs; /* Errors in processing rx control frames */
+ ulong rx_dropped; /* Packets dropped locally (no memory) */
+ ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */
+ ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */
+ ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */
+ ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */
+ ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
+ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
+ ulong fc_packets; /* Number of flow control pkts recvd */
+ ulong tx_big_packets; /* Dropped data packets that are larger than MAX_MTU_SZ */
+#ifdef DMAMAP_STATS
+ /* DMA Mapping statistics */
+ dma_stats_t dma_stats;
+#endif /* DMAMAP_STATS */
+#ifdef WL_MONITOR
+ bool monitor_enable;
+#endif /* WL_MONITOR */
+ /* Last error return */
+ int bcmerror;
+ uint tickcnt;
+
+ /* Last error from dongle */
+ int dongle_error;
+
+ uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+ /* Suspend disable flag and "in suspend" flag */
+ int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+ int in_suspend; /* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+ int pno_enable; /* pno status : "1" is pno enable */
+ int pno_suspend; /* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+ /* DTIM skip value, default 0(or 1) means wake each DTIM
+ * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
+ */
+ int suspend_bcn_li_dtim; /* bcn_li_dtim value in suspend mode */
+ int early_suspended; /* Early suspend status */
+#ifdef PKT_FILTER_SUPPORT
+ int dhcp_in_progress; /* DHCP period */
+#endif
+
+ /* Pkt filter defination */
+ char * pktfilter[100];
+ int pktfilter_count;
+
+ wl_country_t dhd_cspec; /* Current Locale info */
+#ifdef CUSTOM_COUNTRY_CODE
+ uint dhd_cflags;
+#endif /* CUSTOM_COUNTRY_CODE */
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ bool is_blob; /* Checking for existance of Blob file */
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ bool force_country_change;
+ int op_mode; /* STA, HostAPD, WFD, SoftAP */
+
+#if defined(LINUX) || defined(linux)
+#if defined(OEM_ANDROID)
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#endif /* defined(OEM_ANDROID) */
+#endif /* defined (LINUX) || defined(linux) */
+
+#ifdef NDIS
+ PDEVICE_OBJECT pdo;
+ PDEVICE_OBJECT fdo;
+ PDEVICE_OBJECT nextDeviceObj;
+#if defined(BCMWDF)
+ WDFDEVICE wdfDevice;
+#endif /* (BCMWDF) */
+#endif /* NDIS */
+#ifdef PROP_TXSTATUS
+ bool wlfc_enabled;
+ int wlfc_mode;
+ void* wlfc_state;
+ /*
+ Mode in which the dhd flow control shall operate. Must be set before
+ traffic starts to the device.
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ 3 - Only AMPDU hostreorder used. no wlfc.
+ */
+ uint8 proptxstatus_mode;
+ bool proptxstatus_txoff;
+ bool proptxstatus_module_ignore;
+ bool proptxstatus_credit_ignore;
+ bool proptxstatus_txstatus_ignore;
+
+ bool wlfc_rxpkt_chk;
+#ifdef LIMIT_BORROW
+ bool wlfc_borrow_allowed;
+#endif /* LIMIT_BORROW */
+ /*
+ * implement below functions in each platform if needed.
+ */
+ /* platform specific function whether to skip flow control */
+ bool (*skip_fc)(void * dhdp, uint8 ifx);
+ /* platform specific function for wlfc_enable and wlfc_deinit */
+ void (*plat_init)(void *dhd);
+ void (*plat_deinit)(void *dhd);
+#ifdef DHD_WLFC_THREAD
+ bool wlfc_thread_go;
+#if defined(LINUX)
+ struct task_struct* wlfc_thread;
+ wait_queue_head_t wlfc_wqhead;
+#else
+ #error "wlfc thread not enabled"
+#endif /* LINUX */
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ void *pno_state;
+#endif
+#ifdef RTT_SUPPORT
+ void *rtt_state;
+ bool rtt_supported;
+#endif
+#ifdef ROAM_AP_ENV_DETECTION
+ bool roam_env_detection;
+#endif
+ bool dongle_isolation;
+ bool is_pcie_watchdog_reset;
+
+/* Begin - Variables to track Bus Errors */
+ bool dongle_trap_occured; /* flag for sending HANG event to upper layer */
+#ifdef BT_OVER_PCIE
+ bool dongle_trap_due_to_bt; /* flag to indicate that dongle has trapped due to BT */
+#endif /* BT_OVER_PCIE */
+ bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */
+ bool invalid_shinfo_nrfrags; /* flag to indicate invlaid shinfo nrfrags */
+ bool is_sched_error; /* flag to indicate timeout due to scheduling issue */
+#ifdef PCIE_FULL_DONGLE
+ bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */
+ bool livelock_occured; /* flag to indicate livelock occured */
+ bool pktid_audit_failed; /* flag to indicate pktid audit failure */
+#endif /* PCIE_FULL_DONGLE */
+ bool iface_op_failed; /* flag to indicate interface operation failed */
+ bool scan_timeout_occurred; /* flag to indicate scan has timedout */
+ bool scan_busy_occurred; /* flag to indicate scan busy occurred */
+#ifdef BT_OVER_SDIO
+ bool is_bt_recovery_required;
+#endif
+ bool smmu_fault_occurred; /* flag to indicate SMMU Fault */
+/*
+ * Add any new variables to track Bus errors above
+ * this line. Also ensure that the variable is
+ * cleared from dhd_clear_bus_errors
+ */
+/* End - Variables to track Bus Errors */
+
+ int hang_was_sent;
+ int hang_was_pending;
+ int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
+ int txcnt_timeout; /* counter txcnt timeout to send HANG */
+#ifdef BCMPCIE
+ int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */
+#endif /* BCMPCIE */
+ bool hang_report; /* enable hang report by default */
+ uint16 hang_reason; /* reason codes for HANG event */
+#if defined(DHD_HANG_SEND_UP_TEST)
+ uint req_hang_type;
+#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
+ uint hang_count;
+#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
+#ifdef WLTDLS
+ bool tdls_enable;
+#endif
+ struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+ #define WLC_IOCTL_MAXBUF_FWCAP 1024
+ char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP];
+ #define DHD_IOCTL_MAXBUF_DHDCAP 1024
+ char dhd_capabilities[DHD_IOCTL_MAXBUF_DHDCAP];
+ #define MAXSKBPEND 1024
+ void *skbbuf[MAXSKBPEND];
+ uint32 store_idx;
+ uint32 sent_idx;
+#ifdef DHDTCPACK_SUPPRESS
+ uint8 tcpack_sup_mode; /* TCPACK suppress mode */
+ void *tcpack_sup_module; /* TCPACK suppress module */
+ uint32 tcpack_sup_ratio;
+ uint32 tcpack_sup_delay;
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(ARP_OFFLOAD_SUPPORT)
+ uint32 arp_version;
+ bool hmac_updated;
+#endif
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */
+#endif
+#ifdef BCMINTERNAL
+ bool loopback; /* 1- enable loopback of tx packets, 0 - disable */
+#endif /* BCMINTERNAL */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ bool dhd_bug_on;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+#ifdef CUSTOM_SET_CPUCORE
+ struct task_struct * current_dpc;
+ struct task_struct * current_rxf;
+ int chan_isvht80;
+#endif /* CUSTOM_SET_CPUCORE */
+ void *sta_pool; /* pre-allocated pool of sta objects */
+ void *staid_allocator; /* allocator of sta indexes */
+#ifdef PCIE_FULL_DONGLE
+ bool flow_rings_inited; /* set this flag after initializing flow rings */
+#endif /* PCIE_FULL_DONGLE */
+ void *flowid_allocator; /* unique flowid allocator */
+#if defined(DHD_HTPUT_TUNABLES)
+ void *htput_flowid_allocator; /* unique htput flowid allocator */
+ uint8 htput_client_flow_rings; /* current number of htput client flowrings */
+ uint8 htput_flow_ring_start; /* start index of htput flow rings */
+#endif /* DHD_HTPUT_TUNABLES */
+ void *flow_ring_table; /* flow ring table, include prot and bus info */
+ void *if_flow_lkup; /* per interface flowid lkup hash table */
+ void *flowid_lock; /* per os lock for flowid info protection */
+ void *flowring_list_lock; /* per os lock for flowring list protection */
+ uint8 max_multi_client_flow_rings;
+ uint8 multi_client_flow_rings;
+ uint32 num_h2d_rings; /* Max h2d rings including static and dynamic rings */
+ uint32 max_tx_flowid; /* used to validate flowid */
+ cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */
+ cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */
+ uint32 d2h_sync_mode; /* D2H DMA completion sync mode */
+ uint8 flow_prio_map[NUMPRIO];
+ uint8 flow_prio_map_type;
+ char enable_log[MAX_EVENT];
+ bool dma_d2h_ring_upd_support;
+ bool dma_h2d_ring_upd_support;
+ bool dma_ring_upd_overwrite; /* host overwrites support setting */
+
+ bool idma_enable;
+ uint idma_inited;
+
+ bool ifrm_enable; /* implicit frm enable */
+ uint ifrm_inited; /* implicit frm init */
+
+ bool dar_enable; /* use DAR registers */
+ uint dar_inited;
+
+ bool fast_delete_ring_support; /* fast delete ring supported */
+
+#ifdef DHD_WMF
+ bool wmf_ucast_igmp;
+#ifdef DHD_IGMP_UCQUERY
+ bool wmf_ucast_igmp_query;
+#endif
+#ifdef DHD_UCAST_UPNP
+ bool wmf_ucast_upnp;
+#endif
+#endif /* DHD_WMF */
+#if defined(BCM_ROUTER_DHD)
+ dhd_trf_mgmt_dwm_tbl_t dhd_tm_dwm_tbl;
+#endif /* BCM_ROUTER_DHD */
+#ifdef DHD_L2_FILTER
+ unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_SSSR_DUMP
+ bool sssr_inited;
+ bool sssr_dump_collected; /* Flag to indicate sssr dump is collected */
+ sssr_reg_info_cmn_t *sssr_reg_info;
+ uint8 *sssr_mempool;
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ uint *sssr_d11_before[MAX_NUM_D11_CORES_WITH_SCAN];
+ uint *sssr_dig_buf_before;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ uint *sssr_d11_after[MAX_NUM_D11_CORES_WITH_SCAN];
+ bool sssr_d11_outofreset[MAX_NUM_D11_CORES_WITH_SCAN];
+ uint *sssr_dig_buf_after;
+ uint32 sssr_dump_mode;
+ bool collect_sssr; /* Flag to indicate SSSR dump is required */
+ bool fis_triggered;
+#endif /* DHD_SSSR_DUMP */
+#ifdef DHD_SDTC_ETB_DUMP
+ etb_addr_info_t etb_addr_info;
+ uint8 *sdtc_etb_mempool;
+ bool sdtc_etb_inited;
+ bool collect_sdtc; /* Flag to indicate SDTC dump is required */
+#endif /* DHD_SDTC_ETB_DUMP */
+ uint8 *soc_ram;
+ uint32 soc_ram_length;
+ uint32 memdump_type;
+#ifdef DHD_COREDUMP
+ char memdump_str[DHD_MEMDUMP_LONGSTR_LEN];
+#endif /* DHD_COREDUMP */
+#ifdef DHD_RND_DEBUG
+ uint8 *rnd_buf;
+ uint32 rnd_len;
+#endif /* DHD_RND_DEBUG */
+#ifdef DHD_FW_COREDUMP
+ uint32 memdump_enabled;
+#ifdef DHD_DEBUG_UART
+ bool memdump_success;
+#endif /* DHD_DEBUG_UART */
+#endif /* DHD_FW_COREDUMP */
+#ifdef PCIE_FULL_DONGLE
+#ifdef WLTDLS
+ tdls_peer_tbl_t peer_tbl;
+#endif /* WLTDLS */
+#if defined(LINUX) || defined(linux)
+ uint8 tx_in_progress;
+#endif /* LINUX || linux */
+#endif /* PCIE_FULL_DONGLE */
+#ifdef CACHE_FW_IMAGES
+ char *cached_fw;
+ int cached_fw_length;
+ char *cached_nvram;
+ int cached_nvram_length;
+ char *cached_clm;
+ int cached_clm_length;
+ char *cached_txcap;
+ int cached_txcap_length;
+#endif
+#ifdef KEEP_JP_REGREV
+/* XXX Needed by customer's request */
+ char vars_ccode[WLC_CNTRY_BUF_SZ];
+ uint vars_regrev;
+#endif /* KEEP_JP_REGREV */
+#ifdef WLTDLS
+ uint32 tdls_mode;
+#endif
+#ifdef GSCAN_SUPPORT
+ bool lazy_roam_enable;
+#endif
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ bool apf_set;
+#endif /* PKT_FILTER_SUPPORT && APF */
+ void *macdbg_info;
+#ifdef DHD_WET
+ void *wet_info;
+#endif
+ bool h2d_phase_supported;
+ bool force_dongletrap_on_bad_h2d_phase;
+ uint32 dongle_trap_data;
+ fw_download_status_t fw_download_status;
+ trap_t last_trap_info; /* trap info from the last trap */
+ uint8 rand_mac_oui[DOT11_OUI_LEN];
+#ifdef DHD_LOSSLESS_ROAMING
+ uint8 dequeue_prec_map;
+ uint8 prio_8021x;
+#endif
+#ifdef WL_NATOE
+ struct dhd_nfct_info *nfct;
+ spinlock_t nfct_lock;
+#endif /* WL_NATOE */
+ /* timesync link */
+ struct dhd_ts *ts;
+ bool d2h_hostrdy_supported;
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ atomic_t block_bus;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+ bool d11_tx_status;
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+ uint16 ndo_version; /* ND offload version supported */
+#ifdef NDO_CONFIG_SUPPORT
+ bool ndo_enable; /* ND offload feature enable */
+ bool ndo_host_ip_overflow; /* # of host ip addr exceed FW capacity */
+ uint32 ndo_max_host_ip; /* # of host ip addr supported by FW */
+#endif /* NDO_CONFIG_SUPPORT */
+#if defined(DHD_LOG_DUMP)
+#if defined(DHD_EFI)
+ uint8 log_capture_enable;
+#endif /* DHD_EFI */
+ /* buffer to hold 'dhd dump' data before dumping to file */
+ uint8 *concise_dbg_buf;
+ uint64 last_file_posn;
+ int logdump_periodic_flush;
+#ifdef EWP_ECNTRS_LOGGING
+ void *ecntr_dbg_ring;
+#endif
+#ifdef EWP_RTT_LOGGING
+ void *rtt_dbg_ring;
+#endif
+#ifdef EWP_BCM_TRACE
+ void *bcm_trace_dbg_ring;
+#endif
+#ifdef DNGL_EVENT_SUPPORT
+ uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE];
+#endif
+ void *logdump_cookie;
+#endif /* DHD_LOG_DUMP */
+ uint32 dhd_console_ms; /** interval for polling the dongle for console (log) messages */
+ bool ext_trap_data_supported;
+ uint32 *extended_trap_data;
+#ifdef DUMP_IOCTL_IOV_LIST
+ /* dump iovar list */
+ dll_t dump_iovlist_head;
+ uint8 dump_iovlist_len;
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef REPORT_FATAL_TIMEOUTS
+ timeout_info_t *timeout_info;
+ uint16 esync_id; /* used to track escans */
+ osl_atomic_t set_ssid_rcvd; /* to track if WLC_E_SET_SSID is received during join IOVAR */
+ bool secure_join; /* field to note that the join is secure or not */
+#endif /* REPORT_FATAL_TIMEOUTS */
+#ifdef CUSTOM_SET_ANTNPM
+ uint32 mimo_ant_set;
+#endif /* CUSTOM_SET_ANTNPM */
+#ifdef CUSTOM_SET_OCLOFF
+ bool ocl_off;
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef DHD_DEBUG
+ /* memwaste feature */
+ dll_t mw_list_head; /* memwaste list head */
+ uint32 mw_id; /* memwaste list unique id */
+#endif /* DHD_DEBUG */
+#ifdef WLTDLS
+ spinlock_t tdls_lock;
+#endif /* WLTDLS */
+ uint pcie_txs_metadata_enable;
+#ifdef BTLOG
+ bool bt_logging;
+ bool submit_count_WAR; /* submission count WAR */
+ bool bt_logging_enabled;
+#endif /* BTLOG */
+ uint wbtext_policy; /* wbtext policy of dongle */
+ bool wbtext_support; /* for product policy only */
+#ifdef PCIE_OOB
+ bool d2h_no_oob_dw;
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+ bool d2h_inband_dw;
+ enum dhd_bus_ds_state ds_state;
+#endif /* PCIE_INB_DW */
+ bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */
+#ifdef SNAPSHOT_UPLOAD
+ bool snapshot_upload;
+#endif /* SNAPSHOT_UPLOAD */
+ tput_test_t tput_data;
+ uint64 tput_start_ts;
+ uint64 tput_stop_ts;
+ uint dhd_watchdog_ms_backup;
+ bool wl_event_enabled;
+ bool logtrace_pkt_sendup;
+#ifdef GDB_PROXY
+ /* True if firmware runs under gdb control (this may cause timeouts at any point) */
+ bool gdb_proxy_active;
+ /* True if deadman_to shall be forced to 0 */
+ bool gdb_proxy_nodeadman;
+ /* Counter incremented at each firmware stop/go transition. LSB (GDB_PROXY_STOP_MASK)
+ * is set when firmwar eis stopped, clear when running
+ */
+ uint32 gdb_proxy_stop_count;
+#endif /* GDB_PROXY */
+ int debug_dump_subcmd;
+ uint64 debug_dump_time_sec;
+ bool hscb_enable;
+#if defined(DHD_AWDL)
+#if defined(AWDL_SLOT_STATS)
+ dhd_awdl_stats_t awdl_stats[AWDL_NUM_SLOTS];
+ uint8 awdl_tx_status_slot; /* Slot in which AWDL is active right now */
+ void *awdl_stats_lock; /* Lock to protect against parallel AWDL stats updates */
+ uint16 awdl_aw_counter;
+ uint32 pkt_latency;
+#endif /* AWDL_SLOT_STATS */
+ uint32 awdl_ifidx;
+ uint16 awdl_seq;
+ uint8 awdl_minext;
+ uint8 awdl_presmode;
+ bool awdl_llc_enabled;
+#endif /* DHD_AWDL */
+ uint32 logset_prsrv_mask;
+#ifdef DHD_PKT_LOGGING
+ struct dhd_pktlog *pktlog;
+ char debug_dump_time_pktlog_str[DEBUG_DUMP_TIME_BUF_LEN];
+ bool pktlog_debug;
+#endif /* DHD_PKT_LOGGING */
+#ifdef EWP_EDL
+ bool dongle_edl_support;
+ dhd_dma_buf_t edl_ring_mem;
+#endif /* EWP_EDL */
+#if defined (LINUX) || defined(linux)
+ struct mutex ndev_op_sync;
+#endif /* defined (LINUX) || defined(linux) */
+ bool debug_buf_dest_support;
+ uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX];
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ char *hang_info;
+ int hang_info_cnt;
+ char debug_dump_time_hang_str[DEBUG_DUMP_TIME_BUF_LEN];
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ char debug_dump_time_str[DEBUG_DUMP_TIME_BUF_LEN];
+ void *event_log_filter;
+ uint tput_test_done;
+#if defined(LINUX) || defined(linux)
+ wait_queue_head_t tx_tput_test_wait;
+ wait_queue_head_t tx_completion_wait;
+#ifdef WL_NANHO
+ void *nanhoi; /* NANHO instance */
+#endif /* WL_NANHO */
+#endif /* defined(LINUX) || defined(linux) */
+#ifdef DHD_ERPOM
+ bool enable_erpom;
+ pom_func_handler_t pom_wlan_handler;
+ int (*pom_func_register)(pom_func_handler_t *func);
+ int (*pom_func_deregister)(pom_func_handler_t *func);
+ int (*pom_toggle_reg_on)(uchar func_id, uchar reason);
+#endif /* DHD_ERPOM */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+#define DHD_H2D_LOG_TIME_STAMP_MATCH (10000) /* 10 Seconds */
+ /*
+ * Interval for updating the dongle console message time stamp with the Host (DHD)
+ * time stamp
+ */
+ uint32 dhd_rte_time_sync_ms;
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+ uint32 batch_tx_pkts_cmpl;
+ uint32 batch_tx_num_pkts;
+#ifdef DHD_EFI
+ bool insert_random_mac;
+ /* threshold # of pkts Tx'd/Rx'd after which efi dhd
+ * will switch intr poll period to 100us
+ */
+ uint64 npkts_thresh;
+ /* the period of time in which if no pkt is Tx'd/Rx'd
+ * efi dhd will restore intr poll period to default value
+ */
+ uint64 pkt_intvl_thresh_us;
+ /* time stamp of last Tx'd pkt */
+ uint64 tx_last_pkt_ts;
+ /* time stamp of last Rx'd pkt */
+ uint64 rx_last_pkt_ts;
+ /* used to temporarily store the current intr poll period
+ * during efi dhd iovar execution, so as to restore it back
+ * once iovar completes
+ */
+ uint32 cur_intr_poll_period;
+ /* the intr poll period set by user through dhd iovar */
+ uint32 iovar_intr_poll_period;
+ bool pcie_readshared_done;
+#endif /* DHD_EFI */
+#ifdef DHD_DUMP_MNGR
+ struct _dhd_dump_file_manage *dump_file_manage;
+#endif /* DHD_DUMP_MNGR */
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ fwtrace_info_t *fwtrace_info; /* f/w trace information */
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+ bool event_log_max_sets_queried;
+ uint32 event_log_max_sets;
+#ifdef DHD_STATUS_LOGGING
+ void *statlog;
+#endif /* DHD_STATUS_LOGGING */
+#ifdef DHD_HP2P
+ /* whether enabled from host by user iovar */
+ bool hp2p_enable;
+ bool hp2p_infra_enable;
+ /* whether fw supports it */
+ bool hp2p_capable;
+ bool hp2p_mf_enable;
+ bool hp2p_ts_capable;
+ uint16 pkt_thresh;
+ uint16 time_thresh;
+ uint16 pkt_expiry;
+ hp2p_info_t hp2p_info[MAX_HP2P_FLOWS];
+ /* Flag to allow more hp2p ring creation */
+ bool hp2p_ring_more;
+#endif /* D2H_HP2P */
+#ifdef DHD_DB0TS
+ bool db0ts_capable;
+#endif /* DHD_DB0TS */
+ bool extdtxs_in_txcpl;
+ bool hostrdy_after_init;
+ uint16 dhd_induce_error;
+ uint16 dhd_induce_bh_error;
+ int wlc_ver_major;
+ int wlc_ver_minor;
+#ifdef DHD_PKTTS
+ /* stores the packet meta data buffer length queried via iovar */
+ uint16 pkt_metadata_version;
+ uint16 pkt_metadata_buflen;
+#endif
+#ifdef SUPPORT_SET_TID
+ uint8 tid_mode;
+ uint32 target_uid;
+ uint8 target_tid;
+#endif /* SUPPORT_SET_TID */
+#ifdef CONFIG_SILENT_ROAM
+ bool sroam_turn_on; /* Silent roam monitor enable flags */
+ bool sroamed; /* Silent roam monitor check flags */
+#endif /* CONFIG_SILENT_ROAM */
+#ifdef DHD_PKTDUMP_ROAM
+ void *pktcnts;
+#endif /* DHD_PKTDUMP_ROAM */
+ dhd_db7_info_t db7_trap;
+ bool fw_preinit;
+ bool ring_attached;
+#ifdef DHD_PCIE_RUNTIMEPM
+ bool rx_pending_due_to_rpm;
+#endif /* DHD_PCIE_RUNTIMEPM */
+ bool disable_dtim_in_suspend; /* Disable set bcn_li_dtim in suspend */
+ union {
+ wl_roam_stats_v1_t v1;
+ } roam_evt;
+ bool arpoe_enable;
+ bool arpol_configured;
+#ifdef DHD_TX_PROFILE
+ bool tx_profile_enab;
+ uint8 num_profiles;
+ dhd_tx_profile_protocol_t *protocol_filters;
+#endif /* defined(DHD_TX_PROFILE) */
+#ifdef DHD_MEM_STATS
+ void *mem_stats_lock;
+ uint64 txpath_mem;
+ uint64 rxpath_mem;
+#endif /* DHD_MEM_STATS */
+#ifdef DHD_LB_RXP
+ atomic_t lb_rxp_flow_ctrl;
+ uint32 lb_rxp_stop_thr;
+ uint32 lb_rxp_strt_thr;
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_STATS
+ uint64 lb_rxp_stop_thr_hitcnt;
+ uint64 lb_rxp_strt_thr_hitcnt;
+ uint64 lb_rxp_napi_sched_cnt;
+ uint64 lb_rxp_napi_complete_cnt;
+#endif /* DHD_LB_STATS */
+ bool check_trap_rot;
+ /* if FW supports host insertion of SFH LLC */
+ bool host_sfhllc_supported;
+#ifdef DHD_GRO_ENABLE_HOST_CTRL
+ bool permitted_gro;
+#endif /* DHD_GRO_ENABLE_HOST_CTRL */
+#ifdef CSI_SUPPORT
+ struct list_head csi_list;
+ int csi_count;
+#endif /* CSI_SUPPORT */
+ char *clm_path; /* module_param: path to clm vars file */
+ char *conf_path; /* module_param: path to config vars file */
+ struct dhd_conf *conf; /* Bus module handle */
+ void *adapter; /* adapter information, interrupt, fw path etc. */
+ void *event_params;
+#ifdef BCMDBUS
+ bool dhd_remove;
+#endif /* BCMDBUS */
+#ifdef WL_ESCAN
+ struct wl_escan_info *escan;
+#endif
+#if defined(WL_WIRELESS_EXT)
+ void *wext_info;
+#endif
+#ifdef WL_EXT_IAPSTA
+ void *iapsta_params;
+#endif
+ int hostsleep;
+#ifdef SENDPROB
+ bool recv_probereq;
+#endif
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ bool skip_dhd_stop;
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+#ifdef WL_EXT_GENL
+ void *zconf;
+#endif
+} dhd_pub_t;
+
+#if defined(__linux__)
+int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on);
+#else
+static INLINE int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on) { return 0; }
+#endif /* __linux__ */
+
+typedef struct {
+ uint rxwake;
+ uint rcwake;
+#ifdef DHD_WAKE_RX_STATUS
+ uint rx_bcast;
+ uint rx_arp;
+ uint rx_mcast;
+ uint rx_multi_ipv6;
+ uint rx_icmpv6;
+ uint rx_icmpv6_ra;
+ uint rx_icmpv6_na;
+ uint rx_icmpv6_ns;
+ uint rx_multi_ipv4;
+ uint rx_multi_other;
+ uint rx_ucast;
+#endif /* DHD_WAKE_RX_STATUS */
+#ifdef DHD_WAKE_EVENT_STATUS
+ uint rc_event[WLC_E_LAST];
+#endif /* DHD_WAKE_EVENT_STATUS */
+} wake_counts_t;
+
+#if defined(PCIE_FULL_DONGLE)
+/*
+ * XXX: WARNING: dhd_wlfc.h also defines a dhd_pkttag_t
+ * making wlfc incompatible with PCIE_FULL DONGLE
+ */
+
+/* Packet Tag for PCIE Full Dongle DHD */
+typedef struct dhd_pkttag_fd {
+ uint16 flowid; /* Flowring Id */
+ uint16 ifid;
+#ifdef DHD_SBN
+ uint8 pkt_udr;
+ uint8 pad;
+#endif /* DHD_SBN */
+#if defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)
+ uint16 dataoff; /* start of packet */
+#endif /* BCM_ROUTER_DHD && BCM_GMAC3 */
+#ifndef DHD_PCIE_PKTID
+ uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */
+ dmaaddr_t pa; /* physical address */
+ void *dmah; /* dma mapper handle */
+ void *secdma; /* secure dma sec_cma_info handle */
+#endif /* !DHD_PCIE_PKTID */
+#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_PKTTS)
+ uint64 q_time_us; /* time when tx pkt queued to flowring */
+#endif /* TX_STATUS_LATENCY_STATS || DHD_PKTTS */
+} dhd_pkttag_fd_t;
+
+/* Packet Tag for DHD PCIE Full Dongle */
+#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt)))
+
+#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid)
+#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \
+ DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid)
+
+#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff)
+#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \
+ DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff)
+
+#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len)
+#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \
+ DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len)
+
+#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa)
+#define DHD_PKT_SET_PA(pkt, pkt_pa) \
+ DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa)
+
+#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah)
+#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \
+ DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah)
+
+#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma)
+#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
+ DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
+
+#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_PKTTS)
+#define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us)
+#define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \
+ DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us)
+#endif /* TX_STATUS_LATENCY_STATS || DHD_PKTTS */
+#endif /* PCIE_FULL_DONGLE */
+
+#if defined(BCMWDF)
+typedef struct {
+ dhd_pub_t *dhd_pub;
+} dhd_workitem_context_t;
+
+WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
+#endif /* (BCMWDF) */
+
+#if defined(LINUX) || defined(linux)
+#if defined(CONFIG_PM_SLEEP)
+
+ #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define _DHD_PM_RESUME_WAIT(a, b) do {\
+ int retry = 0; \
+ SMP_RD_BARRIER_DEPENDS(); \
+ while (dhd_mmc_suspend && retry++ != b) { \
+ SMP_RD_BARRIER_DEPENDS(); \
+ wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \
+ } \
+ } while (0)
+ #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { \
+ if (dhd_mmc_suspend) { \
+ printf("%s[%d]: mmc is still in suspend state!!!\n", \
+ __FUNCTION__, __LINE__); \
+ return a; \
+ } \
+ } while (0)
+ #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9999; \
+ while ((exp) && (countdown >= 10000)) { \
+ wait_event_interruptible_timeout(a, FALSE, 1); \
+ countdown -= 10000; \
+ } \
+ } while (0)
+
+#else
+
+ #define DHD_PM_RESUME_WAIT_INIT(a)
+ #define DHD_PM_RESUME_WAIT(a)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a)
+ #define DHD_PM_RESUME_RETURN_ERROR(a)
+ #define DHD_PM_RESUME_RETURN
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a)
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) { \
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+ } while (0)
+
+#endif /* CONFIG_PM_SLEEP */
+#else
+ #define DHD_SPINWAIT_SLEEP_INIT(a)
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) { \
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+ } while (0)
+#endif /* defined (LINUX) || defined(linux) */
+
+#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
+
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* PNO_SUPPORT */
+
+/*
+ * Wake locks are an Android power management concept. They are used by applications and services
+ * to request CPU resources.
+ */
+#if defined(linux) && defined(OEM_ANDROID)
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
+extern void dhd_event_wake_lock(dhd_pub_t *pub);
+extern void dhd_event_wake_unlock(dhd_pub_t *pub);
+extern void dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_pm_wake_unlock(dhd_pub_t *pub);
+extern void dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_txfl_wake_unlock(dhd_pub_t *pub);
+extern void dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_nan_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+extern void dhd_os_wake_lock_init(struct dhd_info *dhd);
+extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd);
+#ifdef DHD_USE_SCAN_WAKELOCK
+extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub);
+#endif /* BCMPCIE_SCAN_WAKELOCK */
+
+#ifdef WLEASYMESH
+extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
+extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
+#endif /* WLEASYMESH */
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if defined(OEM_ANDROID)
+ mutex_init(&dhdp->wl_softap_lock);
+#endif /* OEM_ANDROID */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if defined(OEM_ANDROID)
+ mutex_lock(&dhdp->wl_softap_lock);
+#endif /* OEM_ANDROID */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if defined(OEM_ANDROID)
+ mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* OEM_ANDROID */
+}
+
+#ifdef DHD_DEBUG_WAKE_LOCK
+#define DHD_OS_WAKE_LOCK(pub) \
+ do { \
+ printf("call wake_lock: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock(pub); \
+ } while (0)
+#define DHD_OS_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call wake_unlock: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_unlock(pub); \
+ } while (0)
+#define DHD_EVENT_WAKE_LOCK(pub) \
+ do { \
+ printf("call event wake_lock: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_event_wake_lock(pub); \
+ } while (0)
+#define DHD_EVENT_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call event wake_unlock: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_event_wake_unlock(pub); \
+ } while (0)
+#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \
+ do { \
+ printf("call pm_wake_timeout enable\n"); \
+ dhd_pm_wake_lock_timeout(pub, val); \
+ } while (0)
+#define DHD_PM_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call pm_wake unlock\n"); \
+ dhd_pm_wake_unlock(pub); \
+ } while (0)
+#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \
+ do { \
+ printf("call pm_wake_timeout enable\n"); \
+ dhd_txfl_wake_lock_timeout(pub, val); \
+ } while (0)
+#define DHD_TXFL_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call pm_wake unlock\n"); \
+ dhd_txfl_wake_unlock(pub); \
+ } while (0)
+#define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val) \
+ do { \
+ printf("call pm_wake_timeout enable\n"); \
+ dhd_nan_wake_lock_timeout(pub, val); \
+ } while (0)
+#define DHD_NAN_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call pm_wake unlock\n"); \
+ dhd_nan_wake_unlock(pub); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \
+ do { \
+ printf("call wake_lock_timeout: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_timeout(pub); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
+ do { \
+ printf("call dhd_wake_lock_rx_timeout_enable[%d]: %s %d\n", \
+ val, __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_rx_timeout_enable(pub, val); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
+ do { \
+ printf("call dhd_wake_lock_ctrl_timeout_enable[%d]: %s %d\n", \
+ val, __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+ do { \
+ printf("call dhd_wake_lock_ctrl_timeout_cancel: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_ctrl_timeout_cancel(pub); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub) \
+ do { \
+ printf("call dhd_wake_lock_waive: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_waive(pub); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub) \
+ do { \
+ printf("call dhd_wake_lock_restore: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_restore(pub); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_INIT(dhd) \
+ do { \
+ printf("call dhd_wake_lock_init: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_init(dhd); \
+ } while (0)
+#define DHD_OS_WAKE_LOCK_DESTROY(dhd) \
+ do { \
+ printf("call dhd_wake_dhd_lock_destroy: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_wake_lock_destroy(dhd); \
+ } while (0)
+#else
+#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
+#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub)
+#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub)
+#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) dhd_pm_wake_lock_timeout(pub, val)
+#define DHD_PM_WAKE_UNLOCK(pub) dhd_pm_wake_unlock(pub)
+#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) dhd_txfl_wake_lock_timeout(pub, val)
+#define DHD_TXFL_WAKE_UNLOCK(pub) dhd_txfl_wake_unlock(pub)
+#define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_nan_wake_lock_timeout(pub, val)
+#define DHD_NAN_WAKE_UNLOCK(pub) dhd_nan_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
+ dhd_os_wake_lock_rx_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
+ dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+ dhd_os_wake_lock_ctrl_timeout_cancel(pub)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub)
+#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd);
+#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd);
+#endif /* DHD_DEBUG_WAKE_LOCK */
+
+#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub)
+
+#ifdef DHD_USE_SCAN_WAKELOCK
+#ifdef DHD_DEBUG_SCAN_WAKELOCK
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \
+ do { \
+ printf("call wake_lock_scan: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_scan_wake_lock_timeout(pub, val); \
+ } while (0)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call wake_unlock_scan: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_scan_wake_unlock(pub); \
+ } while (0)
+#else
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub)
+#endif /* DHD_DEBUG_SCAN_WAKELOCK */
+#else
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub)
+#endif /* DHD_USE_SCAN_WAKELOCK */
+
+#else
+
+/* Wake lock are used in Android only (until the Linux community accepts it) */
+#define DHD_OS_WAKE_LOCK(pub)
+#define DHD_OS_WAKE_UNLOCK(pub)
+#define DHD_EVENT_WAKE_LOCK(pub)
+#define DHD_EVENT_WAKE_UNLOCK(pub)
+#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_PM_WAKE_UNLOCK(pub)
+#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_TXFL_WAKE_UNLOCK(pub)
+#define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_NAN_WAKE_UNLOCK(pub)
+#define DHD_OS_WD_WAKE_LOCK(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) UNUSED_PARAMETER(val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) UNUSED_PARAMETER(val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub, val)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub)
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub)
+#define DHD_OS_WAKE_LOCK_INIT(dhd)
+#define DHD_OS_WAKE_LOCK_DESTROY(dhd)
+
+#endif /* #defined(linux) && defined(OEM_ANDROID) */
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+#define OOB_WAKE_LOCK_TIMEOUT 500
+extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
+
+#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
+#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#define DHD_PACKET_TIMEOUT_MS 500
+#define DHD_EVENT_TIMEOUT_MS 1500
+#define SCAN_WAKE_LOCK_TIMEOUT 10000
+#define MAX_TX_TIMEOUT 500
+
+/* Enum for IOCTL recieved status */
+typedef enum dhd_ioctl_recieved_status
+{
+ IOCTL_WAIT = 0,
+ IOCTL_RETURN_ON_SUCCESS,
+ IOCTL_RETURN_ON_TRAP,
+ IOCTL_RETURN_ON_BUS_STOP,
+ IOCTL_RETURN_ON_ERROR
+} dhd_ioctl_recieved_status_t;
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+#if defined(LINUX)
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+#endif /* LINUX */
+
+#if defined(LINUX) || defined(linux)
+#if defined(MULTIPLE_SUPPLICANT)
+extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif /* MULTIPLE_SUPPLICANT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(MULTIPLE_SUPPLICANT)
+extern struct mutex _dhd_mutex_lock_;
+#define DHD_MUTEX_IS_LOCK_RETURN() \
+ if (mutex_is_locked(&_dhd_mutex_lock_) != 0) { \
+ printf("%s : probe is already running! return.\n", __FUNCTION__); \
+ return -EBUSY;; \
+ }
+#define DHD_MUTEX_LOCK() \
+ do { \
+ if (mutex_is_locked(&_dhd_mutex_lock_) == 0) { \
+ printf("%s : no mutex held\n", __FUNCTION__); \
+ } else { \
+ printf("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__); \
+ } \
+ mutex_lock(&_dhd_mutex_lock_); \
+ printf("%s : set mutex lock\n", __FUNCTION__); \
+ } while (0)
+#define DHD_MUTEX_UNLOCK() \
+ do { \
+ printf("%s : mutex is released.\n", __FUNCTION__); \
+ mutex_unlock(&_dhd_mutex_lock_); \
+ } while (0)
+#else
+#define DHD_MUTEX_IS_LOCK_RETURN(a) do {} while (0)
+#define DHD_MUTEX_LOCK(a) do {} while (0)
+#define DHD_MUTEX_UNLOCK(a) do {} while (0)
+#endif
+#endif /* defined (LINUX) || defined(linux) */
+
+typedef enum dhd_attach_states
+{
+ DHD_ATTACH_STATE_INIT = 0x0,
+ DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+ DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+ DHD_ATTACH_STATE_ADD_IF = 0x4,
+ DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+ DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+ DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+ DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+ DHD_ATTACH_STATE_CFG80211 = 0x80,
+ DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+ DHD_ATTACH_TIMESYNC_ATTACH_DONE = 0x200,
+ DHD_ATTACH_LOGTRACE_INIT = 0x400,
+ DHD_ATTACH_STATE_LB_ATTACH_DONE = 0x800,
+ DHD_ATTACH_STATE_DONE = 0x1000
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID -1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID -2
+
+/* default reporting period */
+#define ECOUNTERS_DEFAULT_PERIOD 0
+
+/* default number of reports. '0' indicates forever */
+#define ECOUNTERS_NUM_REPORTS 0
+
+typedef struct ecounters_cfg {
+ uint16 type;
+ uint16 if_slice_idx;
+ uint16 stats_rep;
+} ecounters_cfg_t;
+
+typedef struct event_ecounters_cfg {
+ uint16 event_id;
+ uint16 type;
+ uint16 if_slice_idx;
+ uint16 stats_rep;
+} event_ecounters_cfg_t;
+
+typedef struct ecountersv2_xtlv_list_elt {
+ /* Not quite the exact bcm_xtlv_t type as data could be pointing to other pieces in
+ * memory at the time of parsing arguments.
+ */
+ uint16 id;
+ uint16 len;
+ uint8 *data;
+ struct ecountersv2_xtlv_list_elt *next;
+} ecountersv2_xtlv_list_elt_t;
+
+typedef struct ecountersv2_processed_xtlv_list_elt {
+ uint8 *data;
+ struct ecountersv2_processed_xtlv_list_elt *next;
+} ecountersv2_processed_xtlv_list_elt;
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
+#ifdef BCMDBUS
+ , void *adapter
+#endif
+);
+extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+extern void dhd_clear(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+#ifdef BCMDONGLEHOST
+/* Store the status of a connection attempt for later retrieval by an iovar */
+extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason);
+#endif /* BCMDONGLEHOST */
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Returns the ucode path */
+extern char *dhd_get_ucode_path(dhd_pub_t *dhdp);
+#endif /* DHD_UCODE_DOWNLOAD */
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx);
+extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */
+#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */
+#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */
+#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */
+#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */
+#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */
+#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */
+#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */
+#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */
+#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */
+#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */
+#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */
+#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */
+#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */
+#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */
+#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */
+#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */
+#define WIFI_FEATURE_LOGGER 0x20000 /* WiFi Logger */
+#define WIFI_FEATURE_HAL_EPNO 0x40000 /* WiFi PNO enhanced */
+#define WIFI_FEATURE_RSSI_MONITOR 0x80000 /* RSSI Monitor */
+#define WIFI_FEATURE_MKEEP_ALIVE 0x100000 /* WiFi mkeep_alive */
+#define WIFI_FEATURE_CONFIG_NDO 0x200000 /* ND offload configure */
+#define WIFI_FEATURE_TX_TRANSMIT_POWER 0x400000 /* Capture Tx transmit power levels */
+#define WIFI_FEATURE_CONTROL_ROAMING 0x800000 /* Enable/Disable firmware roaming */
+#define WIFI_FEATURE_FILTER_IE 0x1000000 /* Probe req ie filter */
+#define WIFI_FEATURE_SCAN_RAND 0x2000000 /* MAC & Prb SN randomization */
+#define WIFI_FEATURE_SET_TX_POWER_LIMIT 0x4000000 /* Support Tx Power Limit setting */
+#define WIFI_FEATURE_USE_BODY_HEAD_SAR 0x8000000 /* Support Body/Head Proximity SAR */
+#define WIFI_FEATURE_SET_LATENCY_MODE 0x40000000 /* Support Latency mode setting */
+#define WIFI_FEATURE_P2P_RAND_MAC 0x80000000 /* Support P2P MAC randomization */
+#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */
+
+#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3
+
+#if defined(linux) || defined(LINUX) || defined(OEM_ANDROID)
+extern int dhd_dev_get_feature_set(struct net_device *dev);
+extern int dhd_dev_get_feature_set_matrix(struct net_device *dev, int num);
+extern int dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui);
+extern int dhd_update_rand_mac_addr(dhd_pub_t *dhd);
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs);
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef NDO_CONFIG_SUPPORT
+#ifndef NDO_MAX_HOST_IP_ENTRIES
+#define NDO_MAX_HOST_IP_ENTRIES 10
+#endif /* NDO_MAX_HOST_IP_ENTRIES */
+
+extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable);
+extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev);
+#endif /* NDO_CONFIG_SUPPORT */
+#endif /* (linux) || (LINUX) || (OEM_ANDROID) */
+extern int dhd_set_rand_mac_oui(dhd_pub_t *dhd);
+#ifdef GSCAN_SUPPORT
+extern int dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
+ wlc_roam_exp_params_t *roam_param);
+extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable);
+extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
+ wl_bssid_pref_cfg_t *bssid_pref, uint32 flush);
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
+extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
+ uint32 len, uint32 flush);
+extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist,
+ uint32 len, uint32 flush);
+#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+
+/* OS independent layer functions */
+extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub);
+extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub);
+void dhd_os_logdump_lock(dhd_pub_t *pub);
+void dhd_os_logdump_unlock(dhd_pub_t *pub);
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub);
+extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub);
+#ifdef PCIE_FULL_DONGLE
+extern void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason);
+#else
+static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason)
+{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; }
+#endif
+#ifdef SHOW_LOGTRACE
+/* Bound and delay are fine tuned after several experiments and these
+ * are the best case values to handle bombarding of console logs.
+ */
+#define DHD_EVENT_LOGTRACE_BOUND 10u
+#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u
+extern int dhd_os_read_file(void *file, char *buf, uint32 size);
+extern int dhd_os_seek_file(void *file, int64 offset);
+#endif /* SHOW_LOGTRACE */
+int dhd_os_write_file_posn(void *fp, unsigned long *posn,
+ void *buf, unsigned long buflen);
+int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry,
+ uint message_number, bool unmask);
+
+#if defined(DHD_EFI)
+void dhd_os_set_intr_poll_period(struct dhd_bus *bus, unsigned int period_us);
+unsigned int dhd_os_get_intr_poll_period(void);
+int dhd_intr_poll(struct dhd_bus *bus, char *arg, int len, int set);
+#define INTR_POLL_PERIOD_CRITICAL 100 /* 100us -- in us */
+#define INTR_POLL_NPKTS_THRESH 1
+#define INTR_POLL_PKT_INTERVAL_THRESH 2000000 /* 2000ms -- in us */
+#if defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+void dhd_intr_poll_pkt_thresholds(dhd_pub_t *dhd);
+#endif /* DHD_INTR_POLL_PERIOD_DYNAMIC */
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+
+extern void
+dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr);
+extern void wl_dhdpcie_dump_regs(void * context);
+
+#define DHD_OS_IOCTL_RESP_LOCK(x)
+#define DHD_OS_IOCTL_RESP_UNLOCK(x)
+
+#if defined(NDIS)
+#define dhd_os_open_image(a) wl_os_open_image(a)
+#define dhd_os_close_image(a) wl_os_close_image(a)
+#define dhd_os_get_image_block(a, b, c) wl_os_get_image_block(a, b, c)
+#define dhd_os_get_image_size(a) wl_os_get_image_size(a)
+extern void dhd_os_wakeind(dhd_pub_t * pub, uint32 *val);
+extern void dhd_bus_check_died(void *bus);
+extern void pci_save_state(osl_t *osh, uint32 *buffer);
+extern void pci_restore_state(osl_t *osh, uint32 *buffer);
+#endif /* #if defined(NDIS) */
+
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern int dhd_os_get_image_size(void * image);
+#if defined(BT_OVER_SDIO)
+extern int dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image);
+extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub);
+extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub);
+#endif /* (BT_OVER_SDIO) */
+extern void *dhd_os_open_image1(dhd_pub_t *pub, char *filename); /* rev1 function signature */
+extern void dhd_os_close_image1(dhd_pub_t *pub, void *image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern void dhd_os_runtimepm_timer(void *bus, uint tick);
+#endif /* DHD_PCIE_RUNTIMEPM */
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern unsigned long dhd_os_sdlock_txoff(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txoff(dhd_pub_t * pub, unsigned long flags);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_tracelog(const char *format, ...);
+#ifdef DHDTCPACK_SUPPRESS
+extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub);
+extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags);
+#endif /* DHDTCPACK_SUPPRESS */
+
+extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
+extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
+extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
+#if defined(CUSTOM_COUNTRY_CODE)
+extern void get_customized_country_code(void *adapter, char *country_iso_code,
+ wl_country_t *cspec, u32 flags);
+#else
+extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
+extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
+extern bool dhd_os_check_if_up(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock_all(dhd_pub_t *pub);
+extern int dhd_get_instance(dhd_pub_t *pub);
+#ifdef CUSTOM_SET_CPUCORE
+extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
+#endif /* CUSTOM_SET_CPUCORE */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+#if defined(DHD_FW_COREDUMP)
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
+#endif /* DHD_FW_COREDUMP */
+
+#if defined(linux) || defined(LINUX)
+#if defined(DHD_SSSR_DUMP)
+void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode);
+#endif /* DHD_SSSR_DUMP */
+#ifdef DNGL_AXI_ERROR_LOGGING
+void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef BCMPCIE
+void dhd_schedule_cto_recovery(dhd_pub_t *dhdp);
+#endif /* BCMPCIE */
+#else
+#if defined(DHD_SSSR_DUMP)
+static INLINE void dhd_write_sssr_dump(dhd_pub_t *dhd, uint32 dump_mode) { return; }
+#endif /* DHD_SSSR_DUMP */
+#ifdef DNGL_AXI_ERROR_LOGGING
+static INLINE void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type) { return; }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+/* For non-linux map dhd_schedule_cto_recovery to dhdpcie_cto_recovery_handler */
+#ifdef BCMPCIE
+#define dhd_schedule_cto_recovery(dhdp) dhdpcie_cto_recovery_handler(dhdp)
+#endif /* BCMPCIE */
+#endif /* linux || LINUX */
+
+#ifdef EWP_EDL
+#define EDL_SCHEDULE_DELAY 500 /* 500ms */
+#if defined(linux) || defined(LINUX)
+void dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms);
+#else
+static INLINE void dhd_schedule_edl_work(dhd_pub_t *dhd, uint delay_ms) { return; }
+#endif /* linux || LINUX */
+#endif /* EWP_EDL */
+
+#ifdef SUPPORT_AP_POWERSAVE
+extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
+#endif /* SUPPORT_AP_POWERSAVE */
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_UNICAST_FILTER_NUM 0
+#define DHD_BROADCAST_FILTER_NUM 1
+#define DHD_MULTICAST4_FILTER_NUM 2
+#define DHD_MULTICAST6_FILTER_NUM 3
+#define DHD_MDNS_FILTER_NUM 4
+#define DHD_ARP_FILTER_NUM 5
+#define DHD_BROADCAST_ARP_FILTER_NUM 6
+#define DHD_IP4BCAST_DROP_FILTER_NUM 7
+#define DHD_LLC_STP_DROP_FILTER_NUM 8
+#define DHD_LLC_XID_DROP_FILTER_NUM 9
+#define DHD_UDPNETBIOS_DROP_FILTER_NUM 10
+#define DISCARD_IPV4_MCAST "102 1 6 IP4_H:16 0xf0 0xe0"
+#define DISCARD_IPV6_MCAST "103 1 6 IP6_H:24 0xff 0xff"
+#define DISCARD_IPV4_BCAST "107 1 6 IP4_H:16 0xffffffff 0xffffffff"
+#define DISCARD_LLC_STP "108 1 6 ETH_H:14 0xFFFFFFFFFFFF 0xAAAA0300000C"
+#define DISCARD_LLC_XID "109 1 6 ETH_H:14 0xFFFFFF 0x0001AF"
+#define DISCARD_UDPNETBIOS "110 1 6 UDP_H:2 0xffff 0x0089"
+extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
+extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num);
+extern int net_os_enable_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
+
+#define MAX_PKTFLT_BUF_SIZE 2048
+#define MAX_PKTFLT_FIXED_PATTERN_SIZE 32
+#define MAX_PKTFLT_FIXED_BUF_SIZE \
+ (WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2)
+#define MAXPKT_ARG 16
+#endif /* PKT_FILTER_SUPPORT */
+
+#if defined(OEM_ANDROID) && defined(BCMPCIE)
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
+#else
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+#endif /* OEM_ANDROID && BCMPCIE */
+
+extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+
+#ifdef RSSI_MONITOR_SUPPORT
+extern int dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
+ int8 max_rssi, int8 min_rssi);
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef DHDTCPACK_SUPPRESS
+int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#define DHD_RSSI_MONITOR_EVT_VERSION 1
+typedef struct {
+ uint8 version;
+ int8 cur_rssi;
+ struct ether_addr BSSID;
+} dhd_rssi_monitor_evt_t;
+
+typedef struct {
+ uint32 limit; /* Expiration time (usec) */
+ uint32 increment; /* Current expiration increment (usec) */
+ uint32 elapsed; /* Current elapsed time (usec) */
+ uint32 tick; /* O/S tick time (usec) */
+} dhd_timeout_t;
+
+#ifdef SHOW_LOGTRACE
+typedef struct {
+ uint num_fmts;
+ char **fmts;
+ char *raw_fmts;
+ char *raw_sstr;
+ uint32 fmts_size;
+ uint32 raw_fmts_size;
+ uint32 raw_sstr_size;
+ uint32 ramstart;
+ uint32 rodata_start;
+ uint32 rodata_end;
+ char *rom_raw_sstr;
+ uint32 rom_raw_sstr_size;
+ uint32 rom_ramstart;
+ uint32 rom_rodata_start;
+ uint32 rom_rodata_end;
+} dhd_event_log_t;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+/*
+ * As per Google's current implementation, there will be only one APF filter.
+ * Therefore, userspace doesn't bother about filter id and because of that
+ * DHD has to manage the filter id.
+ */
+#define PKT_FILTER_APF_ID 200
+#define DHD_APF_LOCK(ndev) dhd_apf_lock(ndev)
+#define DHD_APF_UNLOCK(ndev) dhd_apf_unlock(ndev)
+
+extern void dhd_apf_lock(struct net_device *dev);
+extern void dhd_apf_unlock(struct net_device *dev);
+extern int dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version);
+extern int dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len);
+extern int dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
+ uint32 program_len);
+extern int dhd_dev_apf_enable_filter(struct net_device *ndev);
+extern int dhd_dev_apf_disable_filter(struct net_device *ndev);
+extern int dhd_dev_apf_delete_filter(struct net_device *ndev);
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+#ifdef LINUX
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num);
+#endif
+extern bool dhd_wowl_cap(void *bus);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen,
+ wl_event_msg_t *, void **data_ptr, void *);
+extern int wl_process_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen,
+ wl_event_msg_t *, void **data_ptr, void *);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+extern int wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu);
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+ int ifindex);
+extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
+ int cmd, uint8 set, int ifidx);
+extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
+ int cmd, uint8 set, int ifidx);
+extern void dhd_common_init(osl_t *osh);
+
+#if defined(linux) || defined(LINUX) || defined(OEM_ANDROID)
+extern int dhd_do_driver_init(struct net_device *net);
+#endif
+extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+ char *name, uint8 *mac);
+extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+ char *name, uint8 *mac);
+extern int dhd_event_ifchange(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+ char *name, uint8 *mac);
+#ifdef DHD_UPDATE_INTF_MAC
+extern int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx);
+#endif /* DHD_UPDATE_INTF_MAC */
+extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name);
+extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
+#ifdef WL_STATIC_IF
+extern s32 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
+ uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state);
+#endif /* WL_STATIC_IF */
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+#ifdef WL_NATOE
+extern int dhd_natoe_ct_event(dhd_pub_t *dhd, char *data);
+#endif /* WL_NATOE */
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+#ifdef LOG_INTO_TCPDUMP
+extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
+#endif /* LOG_INTO_TCPDUMP */
+#if defined(SHOW_LOGTRACE) && defined(EWP_EDL)
+void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
+#endif
+#if defined(WIFI_TURNON_USE_HALINIT)
+extern int dhd_open(struct net_device *net);
+#endif /* WIFI_TURNON_USE_HALINIT */
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
+extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval);
+#if defined(BCMSDIO) || defined(BCMPCIE)
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
+#endif /* defined(BCMSDIO) || defined(BCMPCIE) */
+#if defined(LINUX) || defined(linux)
+int dhd_bus_get_fw_mode(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_bus_get_fw_mode(dhd_pub_t *dhdp) { return 0; }
+#endif /* LINUX || linux */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+/* linux is defined for DHD EFI builds also,
+* since its cross-compiled for EFI from linux.
+* dbgring_lock apis are meant only for linux
+* to use mutexes, other OSes will continue to
+* use osl_spin_lock
+*/
+#if (defined(LINUX) || defined(linux)) && !defined(DHD_EFI)
+void *dhd_os_dbgring_lock_init(osl_t *osh);
+void dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx);
+unsigned long dhd_os_dbgring_lock(void *lock);
+void dhd_os_dbgring_unlock(void *lock, unsigned long flags);
+#endif /* (LINUX || linux) && !DHD_EFI */
+
+#ifdef PCIE_INB_DW
+#ifdef DHD_EFI
+extern int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ds_enter_wake(dhd_pub_t * pub);
+#else
+static INLINE int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition)
+{ return 1; }
+static INLINE int dhd_os_ds_enter_wake(dhd_pub_t * pub)
+{ return 0; }
+#endif /* DHD_EFI */
+#endif /* PCIE_INB_DW */
+
+#if defined(LINUX) || defined(linux) || defined(DHD_EFI)
+extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_busbusy_wake(dhd_pub_t * pub);
+extern void dhd_os_tx_completion_wake(dhd_pub_t *dhd);
+extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition);
+extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_d3ack_wake(dhd_pub_t * pub);
+extern int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition);
+extern int dhd_os_dmaxfer_wake(dhd_pub_t *pub);
+int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
+ uint bitmask, uint condition);
+#ifdef PCIE_INB_DW
+extern int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ds_exit_wake(dhd_pub_t * pub);
+#endif /* PCIE_INB_DW */
+int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition, uint timeout_ms);
+int dhd_os_tput_test_wake(dhd_pub_t * pub);
+#else
+static INLINE int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition,
+ uint timeout_ms)
+{ return 0; }
+static INLINE int dhd_os_tput_test_wake(dhd_pub_t * pub)
+{ return 0; }
+static INLINE int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition)
+{ return dhd_os_ioctl_resp_wait(pub, condition); }
+static INLINE int dhd_os_d3ack_wake(dhd_pub_t * pub)
+{ return dhd_os_ioctl_resp_wake(pub); }
+#ifdef PCIE_INB_DW
+static INLINE int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition)
+{ DHD_ERROR(("%s is Not supported for this platform", __FUNCTION__)); return 0; }
+static INLINE int dhd_os_ds_exit_wake(dhd_pub_t * pub)
+{ DHD_ERROR(("%s is Not supported for this platform", __FUNCTION__)); return 0; }
+#endif /* PCIE_INB_DW */
+static INLINE int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition)
+{ return 1; }
+static INLINE int dhd_os_busbusy_wake(dhd_pub_t * pub)
+{ return 0; }
+static INLINE int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
+{ return 0; }
+static INLINE int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
+{ return 0; }
+static INLINE int dhd_os_dmaxfer_wake(dhd_pub_t *pub)
+{ return 0; }
+static INLINE int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
+ uint bitmask, uint condition)
+{ return 0; }
+#endif /* LINUX || DHD_EFI */
+
+#if defined(LINUX) || defined(linux)
+/*
+ * Manage sta objects in an interface. Interface is identified by an ifindex and
+ * sta(s) within an interfaces are managed using a MacAddress of the sta.
+ */
+struct dhd_sta;
+extern bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac);
+extern struct dhd_sta *dhd_find_sta(void *pub, int ifidx, void *ea);
+extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea);
+extern void dhd_del_all_sta(void *pub, int ifidx);
+extern void dhd_del_sta(void *pub, int ifidx, void *ea);
+extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
+extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp);
+#else /* LINUX */
+static INLINE bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
+{ return FALSE;}
+static INLINE void* dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL;}
+static INLINE void *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+static INLINE void dhd_del_all_sta(void *pub, int ifidx) { }
+static INLINE void dhd_del_sta(void *pub, int ifidx, void *ea) { }
+static INLINE int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) { return 0; }
+static INLINE int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) { return 0; }
+static INLINE int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) { return 0; }
+#endif /* LINUX */
+
+extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
+int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len,
+ char *res_buf, uint res_len, bool set);
+extern int dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
+ uint cmd_len, char **resptr, uint resp_len);
+
+#ifdef DHD_MCAST_REGEN
+extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif
+typedef enum cust_gpio_modes {
+ WLAN_RESET_ON,
+ WLAN_RESET_OFF,
+ WLAN_POWER_ON,
+ WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+typedef struct dmaxref_mem_map {
+ dhd_dma_buf_t *srcmem;
+ dhd_dma_buf_t *dstmem;
+} dmaxref_mem_map_t;
+
+#if defined(OEM_ANDROID)
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+#endif /* defined(OEM_ANDROID) */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+extern void dhd_flush_rx_tx_wq(dhd_pub_t *dhdp);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+extern bool dhd_os_wd_timer_enabled(void *bus);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern uint dhd_runtimepm_ms;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+/** Default console output poll interval */
+extern uint dhd_console_ms;
+
+extern uint android_msg_level;
+extern uint config_msg_level;
+extern uint sd_msglevel;
+extern uint dump_msg_level;
+#ifdef BCMDBUS
+extern uint dbus_msglevel;
+#endif /* BCMDBUS */
+#ifdef WL_WIRELESS_EXT
+extern uint iw_msg_level;
+#endif
+#ifdef WL_CFG80211
+extern uint wl_dbg_level;
+#endif
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/* Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* TCM verification control */
+extern uint dhd_tcm_test_enable;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#ifdef DHD_USE_IDLECOUNT
+#define DHD_IDLETIME_TICKS 5
+#else
+#define DHD_IDLETIME_TICKS 1
+#endif /* DHD_USE_IDLECOUNT */
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* triggers bcm_bprintf to print to kernel log */
+extern bool bcm_bprintf_bypass;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+
+/* Default bcn_timeout value is 4 */
+#define DEFAULT_BCN_TIMEOUT_VALUE 4
+#ifndef CUSTOM_BCN_TIMEOUT_SETTING
+#define CUSTOM_BCN_TIMEOUT_SETTING DEFAULT_BCN_TIMEOUT_VALUE
+#endif
+
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */
+#ifndef CUSTOM_KEEP_ALIVE_SETTING
+#define CUSTOM_KEEP_ALIVE_SETTING DEFAULT_KEEP_ALIVE_VALUE
+#endif /* DEFAULT_KEEP_ALIVE_VALUE */
+
+#define NULL_PKT_STR "null_pkt"
+
+/* hooks for custom glom setting option via Makefile */
+#define DEFAULT_GLOM_VALUE -1
+#ifndef CUSTOM_GLOM_SETTING
+#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE
+#endif
+#define WL_AUTO_ROAM_TRIGGER -75
+/* hooks for custom Roaming Trigger setting via Makefile */
+#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
+#define DEFAULT_ROAM_TRIGGER_SETTING -1
+#ifndef CUSTOM_ROAM_TRIGGER_SETTING
+#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE
+#endif
+
+/* hooks for custom Roaming Romaing setting via Makefile */
+#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */
+#define DEFAULT_ROAM_DELTA_SETTING -1
+#ifndef CUSTOM_ROAM_DELTA_SETTING
+#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE
+#endif
+
+/* hooks for custom PNO Event wake lock to guarantee enough time
+ for the Platform to detect Event before system suspended
+*/
+#define DEFAULT_PNO_EVENT_LOCK_xTIME 2 /* multiplay of DHD_PACKET_TIMEOUT_MS */
+#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
+#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME
+#endif
+/* hooks for custom dhd_dpc_prio setting option via Makefile */
+#define DEFAULT_DHP_DPC_PRIO 1
+#ifndef CUSTOM_DPC_PRIO_SETTING
+#define CUSTOM_DPC_PRIO_SETTING DEFAULT_DHP_DPC_PRIO
+#endif
+
+#ifndef CUSTOM_LISTEN_INTERVAL
+#define CUSTOM_LISTEN_INTERVAL LISTEN_INTERVAL
+#endif /* CUSTOM_LISTEN_INTERVAL */
+
+#define DEFAULT_SUSPEND_BCN_LI_DTIM 3
+#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
+#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM
+#endif
+
+#ifdef OEM_ANDROID
+#ifndef BCN_TIMEOUT_IN_SUSPEND
+#define BCN_TIMEOUT_IN_SUSPEND 6 /* bcn timeout value in suspend mode */
+#endif
+#endif /* OEM_ANDROID */
+
+#ifndef CUSTOM_RXF_PRIO_SETTING
+#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
+#endif
+
+#define DEFAULT_WIFI_TURNOFF_DELAY 0
+#ifndef WIFI_TURNOFF_DELAY
+#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY
+#endif /* WIFI_TURNOFF_DELAY */
+
+#define DEFAULT_WIFI_TURNON_DELAY 200
+#ifndef WIFI_TURNON_DELAY
+#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY
+#endif /* WIFI_TURNON_DELAY */
+
+#ifdef BCMSDIO
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */
+#else
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 0 /* msec */
+#endif
+#ifndef CUSTOM_DHD_WATCHDOG_MS
+#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS
+#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
+
+#define DHD_INB_DW_DEASSERT_MS 250
+
+#define DEFAULT_ASSOC_RETRY_MAX 3
+#ifndef CUSTOM_ASSOC_RETRY_MAX
+#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX
+#endif /* DEFAULT_ASSOC_RETRY_MAX */
+
+#if defined(BCMSDIO) || defined(DISABLE_FRAMEBURST)
+#define DEFAULT_FRAMEBURST_SET 0
+#else
+#define DEFAULT_FRAMEBURST_SET 1
+#endif /* BCMSDIO */
+
+#ifndef CUSTOM_FRAMEBURST_SET
+#define CUSTOM_FRAMEBURST_SET DEFAULT_FRAMEBURST_SET
+#endif /* CUSTOM_FRAMEBURST_SET */
+
+#ifdef WLTDLS
+#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
+#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
+#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
+#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH
+#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH 100 /* pkt/sec threshold for establishing TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW
+#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW 10 /* pkt/sec threshold for tearing down TDLS link */
+#endif
+#endif /* WLTDLS */
+
+#if defined(VSDB) || defined(ROAM_ENABLE)
+#define DEFAULT_BCN_TIMEOUT 6
+#else
+#define DEFAULT_BCN_TIMEOUT 4
+#endif /* CUSTOMER_HW4 && (VSDB || ROAM_ENABLE) */
+
+#ifndef CUSTOM_BCN_TIMEOUT
+#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT
+#endif
+
+#define DEFAULT_BCN_TIMEOUT_IN_SUSPEND 10
+#ifndef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+#define CUSTOM_BCN_TIMEOUT_IN_SUSPEND DEFAULT_BCN_TIMEOUT_IN_SUSPEND
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+
+#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */
+#ifndef MAX_DTIM_ALLOWED_INTERVAL
+#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
+#endif
+
+#ifdef OEM_ANDROID
+#ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND
+#define MIN_DTIM_FOR_ROAM_THRES_EXTEND 600 /* minimum dtim interval to extend roam threshold */
+#endif
+#endif /* OEM_ANDROID */
+
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+extern int dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g);
+extern int dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g);
+#ifndef CUSTOM_ROAMRSSI_2G
+#define CUSTOM_ROAMRSSI_2G ROAMRSSI_2G_DEFAULT
+#endif /* CUSTOM_ROAMRSSI_2G */
+#ifndef CUSTOM_ROAMRSSI_5G
+#define CUSTOM_ROAMRSSI_5G ROAMRSSI_5G_DEFAULT
+#endif /* CUSTOM_ROAMRSSI_5G */
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+#ifdef CONFIG_ROAM_MIN_DELTA
+extern int dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g);
+extern int dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g);
+#ifndef CUSTOM_ROAM_MIN_DELTA
+#define CUSTOM_ROAM_MIN_DELTA ROAM_MIN_DELTA_DEFAULT
+#endif /* CUSTOM_ROAM_MIN_DELTA */
+#endif /* CONFIG_ROAM_MIN_DELTA */
+
+#define NO_DTIM_SKIP 1
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+#ifdef BCMSLTGT
+/* Account for slow hardware (QT) */
+extern uint htclkratio;
+extern int dngl_xtalfreq;
+#endif
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN 2048
+#define MOD_PARAM_INFOLEN 512
+#define MOD_PARAM_SRLEN 64
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+#if defined(CUSTOMER_HW4)
+#define VENDOR_PATH "/vendor"
+#else
+#define VENDOR_PATH ""
+#endif /* CUSTOMER_HW4 */
+
+/* Platform path Name -
+ * Used to find out where to find the FW debug support files.
+ * 1) If the Platform Makefile mentions from where it should be
+ * picked from use it.
+ * 2) If Platform Makefile does not mention anything,use the
+ * scheme as mapped below
+ */
+#if !defined(PLATFORM_PATH)
+/* First Overrides */
+#if defined(DHD_LEGACY_FILE_PATH)
+/* If Legacy file path is to be used */
+#define PLATFORM_PATH "/data/"
+#elif defined(PLATFORM_SLP)
+/* Path Name for SLP */
+#define PLATFORM_PATH "/opt/etc/"
+#else
+/* End of Overrides, rely on what is dictated by Android */
+#if defined(CUSTOMER_HW4)
+#define PLATFORM_PATH "/data/vendor/conn/"
+#else
+#define PLATFORM_PATH "/data/misc/conn/"
+#endif /* CUSTOMER_HW4 */
+#define DHD_MAC_ADDR_EXPORT
+#define DHD_ADPS_BAM_EXPORT
+#define DHD_EXPORT_CNTL_FILE
+#define DHD_SOFTAP_DUAL_IF_INFO
+#define DHD_SEND_HANG_PRIVCMD_ERRORS
+#endif /* DHD_LEGACY_FILE_PATH */
+#endif /* !PLATFORM_PATH */
+
+#ifdef DHD_MAC_ADDR_EXPORT
+extern struct ether_addr sysfs_mac_addr;
+#endif /* DHD_MAC_ADDR_EXPORT */
+
+#if defined(LINUX) || defined(linux)
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+#ifndef BCMDBUS
+extern int allow_delay_fwdl;
+#endif /* !BCMDBUS */
+
+extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost);
+extern int dhd_write_file(const char *filepath, char *buf, int buf_len);
+extern int dhd_read_file(const char *filepath, char *buf, int buf_len);
+extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len);
+extern int dhd_file_delete(char *path);
+
+#ifdef READ_MACADDR
+extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_set_macaddr_from_file(dhd_pub_t *dhdp) { return 0; }
+#endif /* READ_MACADDR */
+#ifdef WRITE_MACADDR
+extern int dhd_write_macaddr(struct ether_addr *mac);
+#else
+static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
+#endif /* WRITE_MACADDR */
+
+#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG)
+#if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF)
+#define DHD_USE_CISINFO_FROM_OTP
+/* For COB, can't check CID/MAC in OTP, so, define it here */
+#define DHD_READ_CIS_FROM_BP
+#endif /* CONFIG_BCM4361 || CONFIG_BCM4375 || CONFIG_BCM4389_DEF */
+#define MAX_VNAME_LEN 64
+#define MAX_VID_LEN 8
+#define MODULE_NAME_INDEX_MAX 3
+#define MAX_EXTENSION 20
+typedef struct {
+ char cid_ext[MAX_EXTENSION];
+ char nvram_ext[MAX_EXTENSION];
+ char fw_ext[MAX_EXTENSION];
+} naming_info_t;
+#ifdef DHD_EXPORT_CNTL_FILE
+extern char cidinfostr[MAX_VNAME_LEN];
+#endif /* DHD_EXPORT_CNTL_FILE */
+extern int dhd_check_module_cid(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; }
+#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */
+#ifdef USE_CID_CHECK
+extern char *dhd_get_cid_info(unsigned char *vid, int vid_length);
+#endif /* USE_CID_CHECK */
+#ifdef GET_MAC_FROM_OTP
+extern int dhd_check_module_mac(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; }
+#endif /* GET_MAC_FROM_OTP */
+
+#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(USE_CID_CHECK) || \
+ defined(GET_MAC_FROM_OTP)
+#define DHD_USE_CISINFO
+#endif /* READ_MACADDR || WRITE_MACADDR || USE_CID_CHECK || GET_MAC_FROM_OTP */
+
+#ifdef DHD_USE_CISINFO
+int dhd_read_cis(dhd_pub_t *dhdp);
+int dhd_read_otp_sw_rgn(dhd_pub_t *dhdp);
+void dhd_clear_cis(dhd_pub_t *dhdp);
+int dhd_alloc_cis(dhd_pub_t *dhdp);
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
+extern int dhd_check_module_b85a(void);
+extern int dhd_check_module_b90(void);
+#define BCM4359_MODULE_TYPE_B90B 1
+#define BCM4359_MODULE_TYPE_B90S 2
+#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
+#if defined(USE_CID_CHECK)
+extern int dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem);
+extern naming_info_t *
+dhd_find_naming_info(dhd_pub_t *dhdp, char *module_type);
+extern naming_info_t * dhd_find_naming_info_by_chip_rev(dhd_pub_t *dhdp, bool *is_murata_fem);
+#endif /* defined(USE_CID_CHECK) */
+#ifdef USE_DIRECT_VID_TAG
+#define VENDOR_OFF 1
+#define MD_REV_OFF 0
+#define A0_REV "_a0"
+#define B0_REV "_b0"
+extern int dhd_check_stored_module_info(char *vid);
+extern int concate_nvram_by_vid(dhd_pub_t *dhdp, char *nv_path, char *chipstr);
+#endif /* USE_DIRECT_VID_TAG */
+#if defined(USE_CID_CHECK) && defined(USE_DIRECT_VID_TAG)
+#error Please use USE_CID_CHECK/USE_DIRECT_VID_TAG exclusively
+#endif /* USE_CID_CHECK && USE_DIRECT_VID_TAG */
+#else
+static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_read_otp_sw_rgn(dhd_pub_t *dhdp) { return 0; }
+static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { }
+static INLINE int dhd_alloc_cis(dhd_pub_t *dhdp) { return 0; }
+#endif /* DHD_USE_CISINFO */
+
+#else /* LINUX || linux */
+static INLINE int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) { return 0; }
+#endif /* LINUX || linux */
+
+#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
+/* Flags to indicate if we distingish power off policy when
+ * user set the memu "Keep Wi-Fi on during sleep" to "Never"
+ */
+extern int trigger_deep_sleep;
+int dhd_deepsleep(struct net_device *dev, int flag);
+#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock) *lock = 0
+#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \
+ NdisStallExecution(1);
+#define IFUNLOCK(lock) InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL))
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES 8
+void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef WLTDLS
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode);
+#ifdef PCIE_FULL_DONGLE
+int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event);
+int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event);
+int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* WLTDLS */
+
+/* Neighbor Discovery Offload Support */
+extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
+int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
+int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
+
+/* Enhanced ND offload support */
+uint16 dhd_ndo_get_version(dhd_pub_t *dhdp);
+int dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx);
+int dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx);
+int dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx);
+int dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable);
+
+/* ioctl processing for nl80211 */
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+extern int
+concate_revision(struct dhd_bus *bus, char *fwpath, char *nvpath);
+#endif /* SUPPORT_MULTIPLE_REVISION */
+void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
+ char *pclm_path, char *pconf_path);
+void dhd_set_bus_state(void *bus, uint32 state);
+
+/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
+typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
+extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
+
+#ifdef PROP_TXSTATUS
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+extern const uint8 prio2fifo[];
+#endif /* PROP_TXSTATUS */
+
+int dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size);
+int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size);
+int dhd_common_socram_dump(dhd_pub_t *dhdp);
+
+int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen);
+
+int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size);
+void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname);
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
+#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
+#else
+#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
+#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+#ifdef USE_WFA_CERT_CONF
+enum {
+ SET_PARAM_BUS_TXGLOM_MODE,
+ SET_PARAM_ROAMOFF,
+#ifdef USE_WL_FRAMEBURST
+ SET_PARAM_FRAMEBURST,
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ SET_PARAM_TXBF,
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ SET_PARAM_PROPTX,
+ SET_PARAM_PROPTXMODE,
+#endif /* PROP_TXSTATUS */
+ PARAM_LAST_VALUE
+};
+extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
+#ifdef DHD_EXPORT_CNTL_FILE
+#define VALUENOTSET 0xFFFFFFFFu
+extern uint32 bus_txglom;
+extern uint32 roam_off;
+#ifdef USE_WL_FRAMEBURST
+extern uint32 frameburst;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+extern uint32 txbf;
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+extern uint32 proptx;
+#endif /* PROP_TXSTATUS */
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* USE_WFA_CERT_CONF */
+
+#if defined(BCM_ROUTER_DHD)
+#if defined(HNDCTF)
+bool dhd_ctf_hotbrc_check(dhd_pub_t *dhdp, uint8 *eh, int ifidx);
+void dhd_ctf_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+bool dhd_l2_filter_chainable(dhd_pub_t *dhdp, uint8 *eh, int ifidx);
+bool dhd_wet_chainable(dhd_pub_t *dhdap);
+bool dhd_rx_pkt_chainable(dhd_pub_t *dhdp, int ifidx);
+#endif /* HNDCTF */
+extern void dhd_schedule_trap_log_dump(dhd_pub_t *dhdp,
+ uint8 *buf, uint32 size);
+/* When a new flowid is allocated/deallocated, inform dhd. */
+extern void dhd_add_flowid(dhd_pub_t * dhdp, int ifidx,
+ uint8 ac_prio, void * ea, uint16 flowid);
+extern void dhd_del_flowid(dhd_pub_t * dhdp, int ifidx, uint16 flowid);
+#else /* ! BCM_ROUTER_DHD */
+#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0)
+#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0)
+bool dhd_wet_chainable(dhd_pub_t *dhdp);
+#endif /* ! BCM_ROUTER_DHD */
+
+extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/** Miscellaenous DHD Spin Locks */
+
+/* Enable DHD general spin lock/unlock */
+#define DHD_GENERAL_LOCK(dhdp, flags) \
+ (flags) = dhd_os_general_spin_lock(dhdp)
+#define DHD_GENERAL_UNLOCK(dhdp, flags) \
+ dhd_os_general_spin_unlock((dhdp), (flags))
+
+/* Enable DHD timer spin lock/unlock */
+#define DHD_TIMER_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_TIMER_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags))
+
+/* Enable DHD flowring spin lock/unlock */
+#define DHD_FLOWRING_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_FLOWRING_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+/* Enable DHD common flowring info spin lock/unlock */
+#define DHD_FLOWID_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_FLOWID_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+/* Enable DHD common flowring list spin lock/unlock */
+#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_RING_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_RING_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_BUS_LP_STATE_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BUS_LP_STATE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_BAR1_SWITCH_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BAR1_SWITCH_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_BUS_PWR_REQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BUS_PWR_REQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#ifdef PCIE_INB_DW
+#define DHD_BUS_DONGLE_DS_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BUS_DONGLE_DS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+#endif /* PCIE_INB_DW */
+
+/* Enable DHD backplane spin lock/unlock */
+#define DHD_BACKPLANE_ACCESS_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+/* Enable DHD TDLS peer list spin lock/unlock */
+#ifdef WLTDLS
+#define DHD_TDLS_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_TDLS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+#endif /* WLTDLS */
+
+#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#ifdef DBG_PKT_MON
+/* Enable DHD PKT MON spin lock/unlock */
+#define DHD_PKT_MON_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_PKT_MON_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags))
+#endif /* DBG_PKT_MON */
+
+#ifdef DHD_PKT_LOGGING
+/* Enable DHD PKT LOG spin lock/unlock */
+#define DHD_PKT_LOG_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_PKT_LOG_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags))
+#endif /* DHD_PKT_LOGGING */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+#define DHD_AWDL_STATS_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_AWDL_STATS_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags))
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+
+#if defined(linux) || defined(LINUX)
+#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) DHD_GENERAL_LOCK(dhdp, flags)
+#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) DHD_GENERAL_UNLOCK(dhdp, flags)
+#else
+#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) do {BCM_REFERENCE(flags);} while (0)
+#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) do {BCM_REFERENCE(flags);} while (0)
+#endif
+
+#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_RX_NAPI_QUEUE_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_RX_NAPI_QUEUE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_UP_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_UP_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_WAKE_SPIN_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_WAKE_SPIN_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+/*
+ * Temporarily change log dump lock to spin_lock_irqsave as DHD_ERROR/DHD_LOG_MEM
+ * are being called from dhdpcie_bus_isr.
+ * This will be reverted after proper solution is implemented to handle isr prints
+ */
+#define DHD_LOG_DUMP_BUF_LOCK(lock, flags) (flags) = osl_spin_lock_irq(lock)
+#define DHD_LOG_DUMP_BUF_UNLOCK(lock, flags) osl_spin_unlock_irq((lock), (flags))
+
+#define DHD_PKT_WAKE_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_PKT_WAKE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_OOB_IRQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_OOB_IRQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_IF_STA_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_IF_STA_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define DHD_DBG_RING_LOCK_INIT(osh) osl_spin_lock_init(osh)
+#define DHD_DBG_RING_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, (lock))
+#define DHD_DBG_RING_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_DBG_RING_UNLOCK(lock, flags) osl_spin_unlock((lock), flags)
+
+#ifdef DHD_MEM_STATS
+/* memory stats lock/unlock */
+#define DHD_MEM_STATS_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_MEM_STATS_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+#endif /* DHD_MEM_STATS */
+
+extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp);
+
+#if defined(LINUX) || defined(linux)
+extern void dhd_print_tasklet_status(dhd_pub_t *dhd);
+#ifdef PCIE_INB_DW
+extern bool dhd_check_cfg_in_progress(dhd_pub_t *dhdp);
+#endif
+#else
+static INLINE void dhd_print_tasklet_status(dhd_pub_t *dhd) { }
+static INLINE bool dhd_check_cfg_in_progress(dhd_pub_t *dhdp)
+{ return FALSE; }
+#endif /* LINUX | linux */
+
+#ifdef BCMDBUS
+extern uint dhd_get_rxsz(dhd_pub_t *pub);
+extern void dhd_set_path(dhd_pub_t *pub);
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+#endif /* BCMDBUS */
+
+#ifdef DHD_L2_FILTER
+extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif /* DHD_L2_FILTER */
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+extern int dhd_set_qosmap_up_table(dhd_pub_t *dhdp, uint32 idx, bcm_tlv_t *qos_map_ie);
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+
+typedef struct wl_io_pport {
+ dhd_pub_t *dhd_pub;
+ uint ifidx;
+} wl_io_pport_t;
+
+typedef struct wl_evt_pport {
+ dhd_pub_t *dhd_pub;
+ int *ifidx;
+ void *pktdata;
+ uint data_len;
+ void **data_ptr;
+ void *raw_event;
+} wl_evt_pport_t;
+
+extern void *dhd_pub_shim(dhd_pub_t *dhd_pub);
+#ifdef DHD_FW_COREDUMP
+void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length);
+#endif /* DHD_FW_COREDUMP */
+
+#if defined(SET_XPS_CPUS)
+int dhd_xps_cpus_enable(struct net_device *net, int enable);
+int custom_xps_map_set(struct net_device *net, char *buf, size_t len);
+void custom_xps_map_clear(struct net_device *net);
+#endif
+
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable);
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
+void custom_rps_map_clear(struct netdev_rx_queue *queue);
+#define PRIMARY_INF 0
+#define VIRTUAL_INF 1
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890)
+#define RPS_CPUS_MASK "10"
+#define RPS_CPUS_MASK_P2P "10"
+#define RPS_CPUS_MASK_IBSS "10"
+#define RPS_CPUS_WLAN_CORE_ID 4
+#else
+#if defined(DHD_TPUT_PATCH)
+#define RPS_CPUS_MASK "f"
+#define RPS_CPUS_MASK_P2P "f"
+#define RPS_CPUS_MASK_IBSS "f"
+#else
+#define RPS_CPUS_MASK "6"
+#define RPS_CPUS_MASK_P2P "6"
+#define RPS_CPUS_MASK_IBSS "6"
+#endif
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
+#endif // endif
+
+int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
+ char ** buffer, int *length);
+
+void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length);
+
+int dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
+ uint32 len, char *iovar);
+
+int dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path,
+ uint32 len, char *iovar);
+
+int dhd_apply_default_txcap(dhd_pub_t *dhd, char *txcap_path);
+int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path);
+
+#ifdef SHOW_LOGTRACE
+int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
+ dhd_event_log_t *event_log);
+int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart,
+ uint32 *rodata_start, uint32 *rodata_end);
+#ifdef PCIE_FULL_DONGLE
+int dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
+ dhd_event_log_t *event_data);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* SHOW_LOGTRACE */
+
+/*
+ * control_logtrace:
+ * "0" -> do not print event log messages in any form
+ * "1" -> print event log messages as EL
+ * "2" -> print event log messages as formatted CONSOLE_E if logstrs.bin etc. files are available
+ */
+typedef enum logtrace_ctrl {
+ LOGTRACE_DISABLE = 0,
+ LOGTRACE_RAW_FMT = 1,
+ LOGTRACE_PARSED_FMT = 2
+} logtrace_ctrl_t;
+
+#define DEFAULT_CONTROL_LOGTRACE LOGTRACE_PARSED_FMT
+#ifndef CUSTOM_CONTROL_LOGTRACE
+#define CUSTOM_CONTROL_LOGTRACE DEFAULT_CONTROL_LOGTRACE
+#endif
+
+extern uint8 control_logtrace;
+
+#ifdef BTLOG
+int dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf);
+#endif /* BTLOG */
+
+#if defined(NDIS)
+bool dhd_is_device_removed(dhd_pub_t *dhd);
+#else
+#define dhd_is_device_removed(x) FALSE
+#define dhd_os_ind_firmware_stall(x)
+#endif /* defined(NDIS) */
+
+#if defined(DHD_FW_COREDUMP)
+#if defined(linux) || defined(LINUX)
+extern void dhd_get_memdump_info(dhd_pub_t *dhd);
+#else
+static INLINE void dhd_get_memdump_info(dhd_pub_t *dhd)
+{ return; }
+#endif /* linux || LINUX */
+#endif /* defined(DHD_FW_COREDUMP) */
+#ifdef BCMASSERT_LOG
+extern void dhd_get_assert_info(dhd_pub_t *dhd);
+#else
+static INLINE void dhd_get_assert_info(dhd_pub_t *dhd) { }
+#endif /* BCMASSERT_LOG */
+
+#if defined(LINUX) || defined(linux)
+#define DMAXFER_FREE(dhdp, dmap) dhd_schedule_dmaxfer_free(dhdp, dmap);
+#else /* !(LINUX || linux) */
+#define DMAXFER_FREE(dhdp, dmmap) dmaxfer_free_prev_dmaaddr(dhdp, dmmap);
+#endif /* linux || LINUX */
+
+#if defined(PCIE_FULL_DONGLE)
+extern void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap);
+void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap);
+#endif /* PCIE_FULL_DONGLE */
+
+#define DHD_LB_STATS_NOOP do { /* noop */ } while (0)
+#if defined(DHD_LB_STATS)
+#include <bcmutils.h>
+extern void dhd_lb_stats_init(dhd_pub_t *dhd);
+extern void dhd_lb_stats_deinit(dhd_pub_t *dhd);
+extern void dhd_lb_stats_reset(dhd_pub_t *dhd);
+#ifdef DHD_MEM_STATS
+extern uint64 dhd_lb_mem_usage(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+#endif /* DHD_MEM_STATS */
+extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp);
+extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp);
+#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp)
+#define DHD_LB_STATS_DEINIT(dhdp) dhd_lb_stats_deinit(dhdp)
+/* Reset is called from common layer so it takes dhd_pub_t as argument */
+#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_reset(dhdp)
+#define DHD_LB_STATS_CLR(x) (x) = 0U
+#define DHD_LB_STATS_INCR(x) (x) = (x) + 1
+#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c)
+#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \
+ { \
+ int cpu = get_cpu(); put_cpu(); \
+ DHD_LB_STATS_INCR(x[cpu]); \
+ }
+#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x)
+#else /* !DHD_LB_STATS */
+#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_DEINIT(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#endif /* !DHD_LB_STATS */
+
+#ifdef BCMDBG
+extern void dhd_schedule_macdbg_dump(dhd_pub_t *dhdp);
+#endif /* BCMDBG */
+
+#ifdef DHD_SSSR_DUMP
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+#define DHD_SSSR_MEMPOOL_SIZE (2 * 1024 * 1024) /* 2MB size */
+#else
+#define DHD_SSSR_MEMPOOL_SIZE (1 * 1024 * 1024) /* 1MB size */
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+/* used in sssr_dump_mode */
+#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */
+#define SSSR_DUMP_MODE_FIS 1 /* dump *after* files only */
+
+extern int dhd_sssr_mempool_init(dhd_pub_t *dhd);
+extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
+extern int dhd_sssr_dump_init(dhd_pub_t *dhd);
+extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd);
+extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
+extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path);
+extern int dhd_sssr_reg_info_init(dhd_pub_t *dhd);
+extern void dhd_sssr_reg_info_deinit(dhd_pub_t *dhd);
+extern uint dhd_sssr_dig_buf_size(dhd_pub_t *dhdp);
+extern uint dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp);
+extern uint dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx);
+extern uint dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx);
+extern uint dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx);
+
+#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
+#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp)
+#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp)
+#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path)
+#define DHD_SSSR_REG_INFO_INIT(dhdp) dhd_sssr_reg_info_init(dhdp)
+#define DHD_SSSR_REG_INFO_DEINIT(dhdp) dhd_sssr_reg_info_deinit(dhdp)
+#else
+#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) do { /* noop */ } while (0)
+#define DHD_SSSR_REG_INFO_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_REG_INFO_DEINIT(dhdp) do { /* noop */ } while (0)
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef BCMPCIE
+extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
+extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp);
+extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd);
+extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd);
+#else
+#define dhd_prot_debug_info_print(x)
+static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub)
+{ return 0; }
+#endif /* BCMPCIE */
+
+#if defined(LINUX) || defined(linux)
+void dhd_show_kirqstats(dhd_pub_t *dhd);
+#else
+static INLINE void dhd_show_kirqstats(dhd_pub_t *dhd)
+{ return; }
+#endif /* defined(LINUX) || defined(linux) */
+
+/* Bitmask used for Join Timeout */
+#define WLC_SSID_MASK 0x01
+#define WLC_WPA_MASK 0x02
+
+#if defined(LINUX) || defined(linux) || defined(DHD_EFI)
+fw_download_status_t dhd_fw_download_status(dhd_pub_t *dhd_pub);
+extern int dhd_start_join_timer(dhd_pub_t *pub);
+extern int dhd_stop_join_timer(dhd_pub_t *pub);
+extern int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan);
+extern int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id);
+extern int dhd_start_cmd_timer(dhd_pub_t *pub);
+extern int dhd_stop_cmd_timer(dhd_pub_t *pub);
+extern int dhd_start_bus_timer(dhd_pub_t *pub);
+extern int dhd_stop_bus_timer(dhd_pub_t *pub);
+extern uint16 dhd_get_request_id(dhd_pub_t *pub);
+extern int dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd);
+extern void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask);
+extern void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val);
+extern void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val);
+extern void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val);
+extern void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val);
+extern int dhd_start_timesync_timer(dhd_pub_t *pub);
+extern int dhd_stop_timesync_timer(dhd_pub_t *pub);
+#else
+static INLINE fw_download_status_t dhd_fw_download_status(dhd_pub_t *dhd_pub)
+{ return FW_UNLOADED; }
+static INLINE int dhd_start_join_timer(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_stop_join_timer(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan) { return 0; }
+static INLINE int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id) { return 0; }
+static INLINE int dhd_start_cmd_timer(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_stop_cmd_timer(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_start_bus_timer(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_stop_bus_timer(dhd_pub_t *pub) { return 0; }
+static INLINE uint16 dhd_get_request_id(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_set_request_id(dhd_pub_t *pub, uint16 id) { return 0; }
+static INLINE void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask) { return; }
+static INLINE void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val) { return; }
+static INLINE void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val) { return; }
+static INLINE void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val) { return; }
+static INLINE void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val) { return; }
+static INLINE void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val) { return; }
+static INLINE void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val) { return; }
+static INLINE void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val) { return; }
+static INLINE void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val) { return; }
+static INLINE int dhd_start_timesync_timer(dhd_pub_t *pub) { return 0; }
+static INLINE int dhd_stop_timesync_timer(dhd_pub_t *pub) { return 0; }
+#endif /* defined(LINUX) || defined(linux) */
+
+#ifdef DHD_PKTID_AUDIT_ENABLED
+#if defined(LINUX) || defined(linux)
+void dhd_pktid_error_handler(dhd_pub_t *dhdp);
+#else /* !(LINUX || linux) */
+static INLINE void dhd_pktid_error_handler(dhd_pub_t *dhdp) { ASSERT(0); }
+#endif /* LINUX || linux */
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+#if defined(LINUX) || defined(linux)
+extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp);
+#else /* !(LINUX || linux) */
+static INLINE void dhd_pktid_logging_dump(dhd_pub_t *dhdp) { }
+#endif /* LINUX || linux */
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+#define DEFAULT_DHD_RUNTIME_MS 100
+#ifndef CUSTOM_DHD_RUNTIME_MS
+#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
+#endif /* CUSTOM_DHD_RUNTIME_MS */
+
+#ifndef MAX_IDLE_COUNT
+#define MAX_IDLE_COUNT 11
+#endif /* MAX_IDLE_COUNT */
+
+extern bool dhd_runtimepm_state(dhd_pub_t *dhd);
+extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr);
+extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr);
+extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp);
+extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp);
+/* Disable the Runtime PM thread and wake up if the bus is already in suspend */
+#define DHD_DISABLE_RUNTIME_PM(dhdp) \
+do { \
+ dhd_runtime_pm_disable(dhdp); \
+} while (0);
+
+/* Enable the Runtime PM thread */
+#define DHD_ENABLE_RUNTIME_PM(dhdp) \
+do { \
+ dhd_runtime_pm_enable(dhdp); \
+} while (0);
+
+/* Stop the timer and disable RPM thread */
+#define DHD_STOP_RPM_TIMER(dhdp) \
+do { \
+ dhd_os_runtimepm_timer(dhdp, 0); \
+ DHD_DISABLE_RUNTIME_PM(dhdp) \
+} while (0);
+
+/* Start the timer and enable RPM thread */
+#define DHD_START_RPM_TIMER(dhdp) \
+do { \
+ dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); \
+ DHD_ENABLE_RUNTIME_PM(dhdp) \
+} while (0);
+#else
+#define DHD_DISABLE_RUNTIME_PM(dhdp)
+#define DHD_ENABLE_RUNTIME_PM(dhdp)
+#define DHD_STOP_RPM_TIMER(dhdp)
+#define DHD_START_RPM_TIMER(dhdp)
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
+extern void dhd_prot_dump_ring_ptrs(void *prot_info);
+
+#if defined(LINUX) || defined(linux)
+#if defined(DHD_TRACE_WAKE_LOCK)
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
+#endif
+#endif /* LINUX || linux */
+
+extern bool dhd_query_bus_erros(dhd_pub_t *dhdp);
+void dhd_clear_bus_errors(dhd_pub_t *dhdp);
+
+#if (defined(linux) || defined(LINUX)) && defined(CONFIG_64BIT)
+#define DHD_SUPPORT_64BIT
+#elif defined(DHD_EFI)
+#define DHD_SUPPORT_64BIT
+/* by default disabled for other platforms, can enable appropriate macro to enable 64 bit support */
+#endif /* (linux || LINUX) && CONFIG_64BIT */
+
+#if defined(DHD_EFI) || defined(DHD_ERPOM)
+extern void dhd_schedule_reset(dhd_pub_t *dhdp);
+#else
+static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;}
+#endif /* DHD_EFI || DHD_ERPOM */
+
+extern void init_dhd_timeouts(dhd_pub_t *pub);
+extern void deinit_dhd_timeouts(dhd_pub_t *pub);
+
+typedef enum timeout_resons {
+ DHD_REASON_COMMAND_TO,
+ DHD_REASON_JOIN_TO,
+ DHD_REASON_SCAN_TO,
+ DHD_REASON_OQS_TO
+} timeout_reasons_t;
+
+#ifdef REPORT_FATAL_TIMEOUTS
+void dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason);
+#endif
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+extern int dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_bus_dw_deassert(dhd_pub_t *dhd);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+extern void dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level);
+int dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data);
+void dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt);
+#ifdef DHD_EFI
+int dhd_get_max_txbufs(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_get_max_txbufs(dhd_pub_t *dhdp)
+{ return -1; }
+#endif
+
+#ifdef FILTER_IE
+int dhd_read_from_file(dhd_pub_t *dhd);
+int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf);
+int dhd_get_filter_ie_count(dhd_pub_t *dhd, uint8 *buf);
+int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len);
+int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len);
+#endif /* FILTER_IE */
+
+uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp);
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+enum {
+ DHD_AFFINITY_OFF = 0,
+ DHD_AFFINITY_TPUT_150MBPS,
+ DHD_AFFINITY_TPUT_300MBPS,
+ DHD_AFFINITY_LAST
+};
+
+extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd);
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num);
+#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef BTLOG
+extern void dhd_rx_bt_log(dhd_pub_t *dhdp, void *pkt);
+#endif /* BTLOG */
+
+#ifdef DHD_RND_DEBUG
+int dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len);
+int dhd_get_rnd_info(dhd_pub_t *dhd);
+#endif /* DHD_RND_DEBUG */
+
+#ifdef DHD_WAKE_STATUS
+wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp);
+#endif /* DHD_WAKE_STATUS */
+extern int dhd_get_random_bytes(uint8 *buf, uint len);
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+int dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask);
+#ifdef DHD_LOG_DUMP
+void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type);
+void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
+#endif
+
+#ifdef DHD_LOG_DUMP
+int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
+ unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr,
+ uint32 sec_type);
+int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
+ log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type);
+int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp,
+ const void *user_buf, unsigned long *f_pos);
+int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf);
+uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp);
+int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size);
+void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp);
+void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type);
+int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size);
+int dhd_logdump_cookie_count(dhd_pub_t *dhdp);
+int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp,
+ uint32 len, int type, void *pos);
+#if defined(BCMPCIE)
+int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* BCMPCIE */
+int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos);
+#ifdef DHD_DUMP_PCIE_RINGS
+int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* DHD_DUMP_PCIE_RINGS */
+#ifdef DHD_STATUS_LOGGING
+extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp,
+ const void *user_buf, void *fp, uint32 len, void *pos);
+extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* DHD_STATUS_LOGGING */
+int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp,
+ char *dump_path, int size);
+uint32 dhd_get_time_str_len(void);
+uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_dld_len(int log_type);
+void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr);
+extern char *dhd_log_dump_get_timestamp(void);
+bool dhd_log_dump_ecntr_enabled(void);
+bool dhd_log_dump_rtt_enabled(void);
+void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len);
+int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type);
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+int
+dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core);
+int
+dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len);
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+int
+dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core);
+int
+dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len);
+#ifdef DHD_PKT_LOGGING
+extern int dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len);
+extern uint32 dhd_os_get_pktlog_dump_size(struct net_device *dev);
+extern void dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len);
+#endif /* DHD_PKT_LOGGING */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len);
+extern int dhd_os_get_axi_error_dump_size(struct net_device *dev);
+extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#endif /* DHD_LOG_DUMP */
+
+#define DHD_WORD_TO_LEN_SHIFT (2u) /* WORD to BYTES SHIFT */
+
+#if defined(linux) || defined(LINUX) || defined(DHD_EFI)
+int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos);
+#else
+static int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf,
+ uint32 buf_len, void *pos)
+{ return 0; }
+#endif /* linux || LINUX */
+#if defined(linux) || defined(LINUX)
+#define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev)
+#define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev)
+#elif defined(DHD_EFI) || defined(NDIS)
+/* For EFI the pcie config space which is saved during init
+* is the one that should always be restored, so NOP for save
+*/
+#define DHD_PCIE_CONFIG_SAVE(bus)
+#define DHD_PCIE_CONFIG_RESTORE(bus) dhdpcie_config_restore(bus, TRUE)
+#else
+#define DHD_PCIE_CONFIG_SAVE(bus) do { /* noop */ } while (0)
+#define DHD_PCIE_CONFIG_RESTORE(bus) do { /* noop */ } while (0)
+#endif /* linux || LINUX */
+
+typedef struct dhd_pkt_parse {
+ uint32 proto; /* Network layer protocol */
+ uint32 t1; /* n-tuple */
+ uint32 t2;
+} dhd_pkt_parse_t;
+
+/* ========= RING API functions : exposed to others ============= */
+#define DHD_RING_TYPE_FIXED 1
+#define DHD_RING_TYPE_SINGLE_IDX 2
+uint32 dhd_ring_get_hdr_size(void);
+void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
+ uint32 elem_cnt, uint32 type);
+void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring);
+void *dhd_ring_get_first(void *_ring);
+void dhd_ring_free_first(void *_ring);
+void dhd_ring_set_read_idx(void *_ring, uint32 read_idx);
+void dhd_ring_set_write_idx(void *_ring, uint32 write_idx);
+uint32 dhd_ring_get_read_idx(void *_ring);
+uint32 dhd_ring_get_write_idx(void *_ring);
+void *dhd_ring_get_last(void *_ring);
+void *dhd_ring_get_next(void *_ring, void *cur);
+void *dhd_ring_get_prev(void *_ring, void *cur);
+void *dhd_ring_get_empty(void *_ring);
+int dhd_ring_get_cur_size(void *_ring);
+void dhd_ring_lock(void *ring, void *fist_ptr, void *last_ptr);
+void dhd_ring_lock_free(void *ring);
+void *dhd_ring_lock_get_first(void *_ring);
+void *dhd_ring_lock_get_last(void *_ring);
+int dhd_ring_lock_get_count(void *_ring);
+void dhd_ring_lock_free_first(void *ring);
+void dhd_ring_whole_lock(void *ring);
+void dhd_ring_whole_unlock(void *ring);
+
+#ifdef GDB_PROXY
+/** Firmware loaded and GDB proxy may access memory and registers */
+#define DHD_GDB_PROXY_PROBE_ACCESS_ENABLED 0x00000001
+/** Firmware loaded, access to it is enabled but it is not running yet */
+#define DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING 0x00000002
+/** Firmware is running */
+#define DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING 0x00000004
+/** Firmware was started in bootloader mode */
+#define DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE 0x00000008
+/** Host memory code offload present */
+#define DHD_GDB_PROXY_PROBE_HOSTMEM_CODE 0x00000010
+
+/* Data structure, returned by "gdb_proxy_probe" iovar */
+typedef struct dhd_gdb_proxy_probe_data {
+ uint32 data_len; /* Length of data in structure */
+ uint32 magic; /* Must contain DHD_IOCTL_MAGIC */
+ uint32 flags; /* Set of DHD_GDB_PROXY_PROBE_... bits */
+ uint32 last_id; /* 0 or proxy ID last set */
+ uint32 hostmem_code_win_base; /* Hostmem code window start in ARM physical address space
+ */
+ uint32 hostmem_code_win_length; /* Hostmem code window length */
+} dhd_gdb_proxy_probe_data_t;
+#endif /* GDB_PROXY */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+void dhd_clear_awdl_stats(dhd_pub_t *dhd);
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+#ifdef DHD_EFI
+extern void dhd_insert_random_mac_addr(dhd_pub_t *dhd, char *nvram_mem, uint *len);
+#endif /* DHD_EFI */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif
+
+#ifdef DHD_DUMP_PCIE_RINGS
+extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write);
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_EDL
+#define DHD_EDL_RING_SIZE (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_ITEMSIZE)
+int dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
+ void *evt_decode_data);
+int dhd_edl_mem_init(dhd_pub_t *dhd);
+void dhd_edl_mem_deinit(dhd_pub_t *dhd);
+void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd);
+#define DHD_EDL_MEM_INIT(dhdp) dhd_edl_mem_init(dhdp)
+#define DHD_EDL_MEM_DEINIT(dhdp) dhd_edl_mem_deinit(dhdp)
+#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) \
+ dhd_prot_edl_ring_tcm_rd_update(dhdp)
+#else
+#define DHD_EDL_MEM_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_EDL_MEM_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) do { /* noop */ } while (0)
+#endif /* EWP_EDL */
+
+#ifdef BIGDATA_SOFTAP
+void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e);
+#endif /* BIGDATA_SOFTAP */
+
+#ifdef DHD_PKTTS
+int dhd_get_pktts_enab(dhd_pub_t *dhdp);
+int dhd_set_pktts_enab(dhd_pub_t *dhdp, bool val);
+
+int dhd_get_pktts_flow(dhd_pub_t *dhdp, void *args, int len);
+int dhd_set_pktts_flow(dhd_pub_t *dhdp, void *params, int plen);
+pktts_flow_t *dhd_match_pktts_flow(dhd_pub_t *dhdp, uint32 checksum,
+ uint32 *idx, uint32 *num_config);
+#endif /* DHD_PKTTS */
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp);
+void dhd_h2d_log_time_sync(dhd_pub_t *dhdp);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+extern void dhd_cleanup_if(struct net_device *net);
+
+void dhd_schedule_logtrace(void *dhd_info);
+int dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath);
+
+#if defined(LINUX) || defined(linux)
+/* configuration of ecounters. API's tp start/stop. currently supported only for linux */
+extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable);
+extern int dhd_start_ecounters(dhd_pub_t *dhd);
+extern int dhd_stop_ecounters(dhd_pub_t *dhd);
+extern int dhd_start_event_ecounters(dhd_pub_t *dhd);
+extern int dhd_stop_event_ecounters(dhd_pub_t *dhd);
+#endif /* LINUX || linux */
+
+#define DHD_DUMP_TYPE_NAME_SIZE 32
+#define DHD_DUMP_FILE_PATH_SIZE 256
+#define DHD_DUMP_FILE_COUNT_MAX 5
+#define DHD_DUMP_TYPE_COUNT_MAX 10
+
+#ifdef DHD_DUMP_MNGR
+typedef struct _DFM_elem {
+ char type_name[DHD_DUMP_TYPE_NAME_SIZE];
+ char file_path[DHD_DUMP_FILE_COUNT_MAX][DHD_DUMP_FILE_PATH_SIZE];
+ int file_idx;
+} DFM_elem_t;
+
+typedef struct _dhd_dump_file_manage {
+ DFM_elem_t elems[DHD_DUMP_TYPE_COUNT_MAX];
+} dhd_dump_file_manage_t;
+
+extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname);
+#endif /* DHD_DUMP_MNGR */
+
+#define HD_PREFIX_SIZE 2 /* hexadecimal prefix size */
+#define HD_BYTE_SIZE 2 /* hexadecimal byte size */
+
+#ifdef DHD_HP2P
+extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub);
+extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags);
+#endif /* DHD_HP2P */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+extern void dhd_axi_error(dhd_pub_t *dhd);
+#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
+extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp);
+#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef DHD_STATUS_LOGGING
+#include <dhd_statlog.h>
+#else
+#define ST(x) 0
+#define STDIR(x) 0
+#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \
+ do { /* noop */ } while (0)
+#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \
+ do { BCM_REFERENCE(cond); } while (0)
+#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \
+ do { /* noop */ } while (0)
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef SUPPORT_SET_TID
+enum dhd_set_tid_mode {
+ /* Disalbe changing TID */
+ SET_TID_OFF = 0,
+ /* Change TID for all UDP frames */
+ SET_TID_ALL_UDP,
+ /* Change TID for UDP frames based on UID */
+ SET_TID_BASED_ON_UID
+};
+#if defined(linux) || defined(LINUX)
+extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt);
+#else
+static INLINE void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt) { return; }
+#endif /* linux || LINUX */
+#endif /* SUPPORT_SET_TID */
+
+#ifdef CONFIG_SILENT_ROAM
+extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set);
+typedef wlc_sroam_info_v1_t wlc_sroam_info_t;
+#endif /* CONFIG_SILENT_ROAM */
+
+#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
+#define FILE_NAME_HAL_TAG ""
+#else
+#define FILE_NAME_HAL_TAG "_hal" /* The tag name concatenated by HAL */
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
+static inline uint32 next_larger_power2(uint32 num)
+{
+ if (num) {
+ num--;
+ num |= (num >> 1);
+ num |= (num >> 2);
+ num |= (num >> 4);
+ num |= (num >> 8);
+ num |= (num >> 16);
+ }
+ return (num + 1);
+}
+
+extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+uint8 dhd_d11_slices_num_get(dhd_pub_t *dhdp);
+#ifdef WL_AUTO_QOS
+extern void dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off);
+#endif /* WL_AUTO_QOS */
+
+void *dhd_get_roam_evt(dhd_pub_t *dhdp);
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab);
+extern uint8 control_he_enab;
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#ifdef DHD_SDTC_ETB_DUMP
+
+#define DHD_SDTC_ETB_MEMPOOL_SIZE (33 * 1024)
+extern int dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd);
+extern void dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd);
+extern void dhd_sdtc_etb_init(dhd_pub_t *dhd);
+extern void dhd_sdtc_etb_deinit(dhd_pub_t *dhd);
+extern void dhd_sdtc_etb_dump(dhd_pub_t *dhd);
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef DHD_TX_PROFILE
+int dhd_tx_profile_attach(dhd_pub_t *dhdp);
+int dhd_tx_profile_detach(dhd_pub_t *dhdp);
+#endif /* defined (DHD_TX_PROFILE) */
+#if defined(DHD_LB_RXP)
+uint32 dhd_lb_rxp_process_qlen(dhd_pub_t *dhdp);
+/*
+ * To avoid OOM, Flow control will be kicked in when packet size in process_queue
+ * crosses LB_RXP_STOP_THR * rcpl ring size * 1500(pkt size) and will stop
+ * when it goes below LB_RXP_STRT_THR * rcpl ring size * 1500(pkt size)
+ */
+#define LB_RXP_STOP_THR 200 /* 200 * 1024 * 1500 = 300MB */
+#define LB_RXP_STRT_THR 199 /* 199 * 1024 * 1500 = 291MB */
+#endif /* DHD_LB_RXP */
+#ifdef DHD_SUPPORT_HDM
+extern bool hdm_trigger_init;
+extern int dhd_module_init_hdm(void);
+extern void dhd_hdm_wlan_sysfs_init(void);
+extern void dhd_hdm_wlan_sysfs_deinit(struct work_struct *);
+#define SYSFS_DEINIT_MS 10
+#endif /* DHD_SUPPORT_HDM */
+
+#if defined(linux) || defined(LINUX)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
+void dhd_ctrl_tcp_limit_output_bytes(int level);
+#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
+#endif /* linux || LINUX */
+
+#if defined(__linux__)
+extern void dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay);
+extern void dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata,
+ uint32 pktid, uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake,
+ bool pkt_log);
+#else
+static INLINE void dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay)
+ { return; }
+static INLINE void dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata,
+ uint32 pktid, uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake,
+ bool pkt_log) { return; }
+#endif /* __linux */
+
+#if defined(BCMPCIE) && defined(__linux__)
+extern int dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, dmaaddr_t *pa, uint32 pktid);
+#else
+static INLINE int dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, dmaaddr_t *pa,
+ uint32 pktid) { return BCME_OK; }
+#endif /* BCMPCIE && __linux__ */
+
+#ifdef HOST_SFH_LLC
+int dhd_ether_to_8023_hdr(osl_t *osh, struct ether_header *eh, void *p);
+int dhd_8023_llc_to_ether_hdr(osl_t *osh, struct ether_header *eh8023, void *p);
+#endif
+int dhd_schedule_socram_dump(dhd_pub_t *dhdp);
+
+#ifdef DHD_AWDL
+int dhd_ether_to_awdl_llc_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p);
+int dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p);
+#endif /* DHD_AWDL */
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+#ifndef DEBUGABILITY
+#error "DHD_DEBUGABILITY_LOG_DUMP_RING without DEBUGABILITY"
+#endif /* DEBUGABILITY */
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+#ifdef WL_MONITOR
+void dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val);
+#endif /* WL_MONITOR */
+#endif /* _dhd_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_bitpack.c b/bcmdhd.101.10.361.x/dhd_bitpack.c
new file mode 100755
index 0000000..c3aecaf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_bitpack.c
@@ -0,0 +1,228 @@
+/*
+ * Bit packing and Base64 utils for EWP
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <dhd_bitpack.h>
+
+#define BIT_PACK_OVERFLOW 0xFFFFFFFFu
+
+const char* base64_table = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+#define BASE64_MAX_VALUE 63u
+
+#define BASE64_UNIT_LEN 6u
+
+#define BASE64_OFFSET0 0u
+#define BASE64_OFFSET1 6u
+#define BASE64_OFFSET2 12u
+
+#define MASK_UPPER_6BIT 0xfc
+#define MASK_LOWER_6BIT 0x3f
+
+#define MASK_UPPER_4BIT 0xf0
+#define MASK_LOWER_4BIT 0x0f
+
+#define MASK_UPPER_2BIT 0xc0
+#define MASK_LOWER_2BIT 0x03
+
+#define SHIFT_2BIT 2u
+#define SHIFT_4BIT 4u
+#define SHIFT_6BIT 6u
+
+#define BASE64_PADDING_MARGIN 4u
+
+/*
+ * Function: dhd_bit_pack
+ *
+ * Purpose: bit data packing to given buffer
+ *
+ * Input Parameters:
+ * buf buffer to pack bit data
+ * buf_len total buffer length
+ * bit_offset offset in buffer (bitwise)
+ * data data to pack (max 32 bit)
+ * bit_length bit length to pack
+ *
+ * Output:
+ * Updated bit offset in buf
+ */
+int32
+dhd_bit_pack(char *buf, int buf_len, int bit_offset, uint32 data, int32 bit_length)
+{
+
+ int32 byte_shift = (bit_offset / 8);
+ int32 local_bit_offset = bit_offset % 8;
+ int32 available_bit = 8 - local_bit_offset;
+ int32 remain_bit = bit_length;
+ uint32 cropped_data;
+ int32 idx;
+ int32 total_byte = BYTE_SIZE(local_bit_offset + bit_length);
+
+ if (bit_length > 32) {
+ /* exceeded max bit length, do nothing */
+ return bit_offset;
+ }
+ if (BYTE_SIZE(bit_offset + bit_length) > buf_len) {
+ /* can't pack more bits if expected offset is
+ * exceeded then buffer size
+ */
+ return bit_offset;
+ }
+ if (bit_length < 32 && data >= 1<<bit_length) {
+ cropped_data = BIT_PACK_OVERFLOW << (32 - bit_length);
+ cropped_data = cropped_data >> (32 - bit_length);
+ } else {
+ cropped_data = data << (32 - bit_length);
+ cropped_data = cropped_data >> (32 - bit_length);
+ }
+
+ buf += byte_shift;
+
+ remain_bit = bit_length;
+ if (total_byte > 10) {
+ return bit_offset;
+ }
+ for (idx = 0; idx < total_byte; idx++) {
+ char temp_byte = 0x00;
+ if (idx == 0) {
+ local_bit_offset = bit_offset % 8;
+ } else {
+ local_bit_offset = 0;
+ }
+
+ available_bit = 8 - local_bit_offset;
+ remain_bit -= available_bit;
+ if (remain_bit >= 0) {
+ temp_byte = cropped_data >> remain_bit;
+ } else {
+ temp_byte = cropped_data << (-1*remain_bit);
+ }
+ *buf = *buf | temp_byte;
+ buf ++;
+ }
+ bit_offset += bit_length;
+
+ return bit_offset;
+}
+
+static char
+dhd_base64_get_code(char input)
+{
+ if (input > BASE64_MAX_VALUE) {
+ return '=';
+ }
+ return base64_table[(int)input];
+}
+
+/*
+ * Function: dhd_base64_encode
+ *
+ * Purpose: base64 encoding module which converts from 8 bits to
+ * 6 bit based, base64 format using base64_table
+ * eg: input: hex-123456
+ * bin-0001|0010|0011|0100|0101|0110
+ * encode every 6 bit :
+ * bin-000100|100011|010001|010110
+ * base64 code :
+ * base64-EjRW
+ *
+ * Input Parameters:
+ * in_buf input buffer
+ * in_buf_len length of input buffer
+ * out_buf output buffer
+ * out_buf_len length_ of output buffer
+ *
+ * Output:
+ * length of encoded base64 string
+ */
+int32
+dhd_base64_encode(char* in_buf, int32 in_buf_len, char* out_buf, int32 out_buf_len)
+{
+ char* input_pos;
+ char* input_end;
+ char* base64_out;
+ char* base64_out_pos;
+ char* base64_output_end;
+ char current_byte = 0;
+ char masked_byte = 0;
+ int32 estimated_out_len = 0;
+ int32 offset = 0;
+
+ if (!in_buf || !out_buf || in_buf_len == 0 || out_buf_len == 0) {
+ /* wrong input parameters */
+ return 0;
+ }
+
+ input_pos = in_buf;
+ input_end = in_buf + in_buf_len;
+ base64_out = out_buf;
+ base64_out_pos = base64_out;
+ base64_output_end = out_buf + out_buf_len - BASE64_PADDING_MARGIN;
+ estimated_out_len = in_buf_len / 3 * 4;
+
+ if (estimated_out_len > out_buf_len) {
+ /* estimated output length is
+ * larger than output buffer size
+ */
+ return 0;
+ }
+
+ while (input_pos != input_end) {
+ if (base64_out_pos > base64_output_end) {
+ /* outbuf buffer size exceeded, finish encoding */
+ break;
+ }
+ if (offset == BASE64_OFFSET0) {
+ current_byte = *input_pos++;
+ masked_byte = (current_byte & MASK_UPPER_6BIT) >> SHIFT_2BIT;
+ *base64_out_pos++ = dhd_base64_get_code(masked_byte);
+ masked_byte = (current_byte & MASK_LOWER_2BIT) << SHIFT_4BIT;
+ offset += BASE64_UNIT_LEN;
+ } else if (offset == BASE64_OFFSET1) {
+ current_byte = *input_pos++;
+ masked_byte |= (current_byte & MASK_UPPER_4BIT) >> SHIFT_4BIT;
+ *base64_out_pos++ = dhd_base64_get_code(masked_byte);
+ masked_byte = (current_byte & MASK_LOWER_4BIT) << SHIFT_2BIT;
+ offset += BASE64_UNIT_LEN;
+ } else if (offset == BASE64_OFFSET2) {
+ current_byte = *input_pos++;
+ masked_byte |= (current_byte & MASK_UPPER_2BIT) >> SHIFT_6BIT;
+ *base64_out_pos++ = dhd_base64_get_code(masked_byte);
+ offset += BASE64_UNIT_LEN;
+ masked_byte = (current_byte & MASK_LOWER_6BIT);
+ *base64_out_pos++ = dhd_base64_get_code(masked_byte);
+ offset = BASE64_OFFSET0;
+ }
+ }
+ if (offset == BASE64_OFFSET1) {
+ *base64_out_pos++ = dhd_base64_get_code(masked_byte);
+ *base64_out_pos++ = '=';
+ *base64_out_pos++ = '=';
+ } else if (offset == BASE64_OFFSET2) {
+ *base64_out_pos++ = dhd_base64_get_code(masked_byte);
+ *base64_out_pos++ = '=';
+ }
+
+ return base64_out_pos - base64_out;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_bitpack.h b/bcmdhd.101.10.361.x/dhd_bitpack.h
new file mode 100755
index 0000000..74bebf3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_bitpack.h
@@ -0,0 +1,33 @@
+/*
+ * Bit packing and Base64 utils for EWP
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __BITPACK_H_
+#define __BITPACK_H_
+
+#define BYTE_SIZE(a) ((a + 7)/8)
+
+extern int32 dhd_bit_pack(char *buf, int32 buf_len, int bit_offset, uint32 data, int32 bit_length);
+extern int32 dhd_base64_encode(char* in_buf, int32 in_buf_len, char* out_buf, int32 out_buf_len);
+#endif /* __BITPACK_H */
diff --git a/bcmdhd.101.10.361.x/dhd_bus.h b/bcmdhd.101.10.361.x/dhd_bus.h
new file mode 100755
index 0000000..5618b02
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_bus.h
@@ -0,0 +1,424 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+extern int dbus_up(struct dhd_bus *pub);
+extern int dbus_stop(struct dhd_bus *pub);
+extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* global variable for the bus */
+extern struct dhd_bus *g_dhd_bus;
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *fw_path, char *nv_path, char *clm_path, char *conf_path);
+#if defined(BT_OVER_SDIO)
+extern int dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, char *btfw_path);
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Send a data frame to the dongle. Callee disposes of txp. */
+#ifdef BCMPCIE
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
+#else
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+#endif
+
+#ifdef BCMPCIE
+extern uint16 dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd);
+extern uint16 dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd);
+extern uint16 dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd);
+extern void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd);
+#endif /* BCMPCIE */
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable);
+extern int dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp);
+extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub);
+extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub);
+extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub);
+
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#ifdef CONSOLE_DPC
+extern int dhd_bus_txcons(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, uint plen, void *arg, uint len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* return the dongle chiprev */
+extern uint dhd_bus_chiprev(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern void *dhd_bus_sih(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+#ifdef BCMSDIO
+extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+/* return sdio io status */
+extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus);
+#else
+#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
+#endif
+
+#define DHD_SET_BUS_STATE_DOWN(_bus) do { \
+ (_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable);
+extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
+ uint32 *slot_num);
+
+#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO))
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+extern int dhd_bus_get_mem_dump(dhd_pub_t *dhdp);
+#else
+#define dhd_bus_mem_dump(x)
+#define dhd_bus_get_mem_dump(x)
+#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */
+
+#ifdef BCMPCIE
+enum {
+ /* Scratch buffer confiuguration update */
+ D2H_DMA_SCRATCH_BUF,
+ D2H_DMA_SCRATCH_BUF_LEN,
+
+ /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */
+ H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */
+ H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */
+ D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */
+ D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */
+
+ /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */
+ H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */
+ H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */
+ D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */
+ D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */
+
+ /* DHD Indices array buffers and update for: H2D flow ring WR */
+ H2D_IFRM_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */
+ H2D_IFRM_INDX_WR_UPD, /* update H2D WR dma indices buf base addr to dongle */
+
+ /* H2D and D2H Mailbox data update */
+ H2D_MB_DATA,
+ D2H_MB_DATA,
+
+ /* (Common) MsgBuf Ring configuration update */
+ RING_BUF_ADDR, /* update ring base address to dongle */
+ RING_ITEM_LEN, /* update ring item size to dongle */
+ RING_MAX_ITEMS, /* update ring max items to dongle */
+
+ /* Update of WR or RD index, for a MsgBuf Ring */
+ RING_RD_UPD, /* update ring read index from/to dongle */
+ RING_WR_UPD, /* update ring write index from/to dongle */
+
+ TOTAL_LFRAG_PACKET_CNT,
+ MAX_HOST_RXBUFS,
+ HOST_API_VERSION,
+#ifdef D2H_MINIDUMP
+ DNGL_TO_HOST_TRAP_ADDR_LEN,
+#endif /* D2H_MINIDUMP */
+ DNGL_TO_HOST_TRAP_ADDR,
+ HOST_SCB_ADDR, /* update host scb base address to dongle */
+};
+
+typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
+typedef void (*dhd_mb_ring_2_t) (struct dhd_bus *, uint32, bool);
+extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type,
+ uint16 ringid);
+extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value);
+extern void dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake);
+extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid);
+extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus);
+extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
+extern void dhd_bus_start_queue(struct dhd_bus *bus);
+extern void dhd_bus_stop_queue(struct dhd_bus *bus);
+extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
+extern dhd_mb_ring_2_t dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus);
+extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus,
+ void * data, uint16 flowid);
+extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus,
+ void * data, uint8 flowid);
+extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status);
+extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus);
+extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+
+#ifdef IDLE_TX_FLOW_MGMT
+extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef BCMDBG
+extern void
+dhd_bus_flow_ring_cnt_update(struct dhd_bus *bus, uint16 flowid, uint32 txstatus);
+#endif
+
+#if defined(LINUX) || defined(linux)
+extern int dhdpcie_bus_start_host_dev(struct dhd_bus *bus);
+extern int dhdpcie_bus_stop_host_dev(struct dhd_bus *bus);
+extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_disable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus);
+extern void dhdpcie_bus_free_resource(struct dhd_bus *bus);
+extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus);
+extern int dhd_bus_release_dongle(struct dhd_bus *bus);
+extern int dhd_bus_request_irq(struct dhd_bus *bus);
+extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq);
+extern void dhd_bus_aer_config(struct dhd_bus *bus);
+#else
+static INLINE void dhd_bus_aer_config(struct dhd_bus *bus) { }
+#endif /* LINUX || linux */
+
+extern struct device * dhd_bus_to_dev(struct dhd_bus *bus);
+
+extern int dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
+extern int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable);
+
+extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus);
+
+#ifdef DHD_FW_COREDUMP
+extern int dhd_dongle_mem_dump(void);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef IDLE_TX_FLOW_MGMT
+extern void dhd_bus_idle_tx_ring_suspend(dhd_pub_t *dhd, uint16 flow_ring_id);
+#endif /* IDLE_TX_FLOW_MGMT */
+extern void dhd_bus_handle_mb_data(struct dhd_bus *bus, uint32 d2h_mb_data);
+#endif /* BCMPCIE */
+
+/* dump the device trap informtation */
+extern void dhd_bus_dump_trap_info(struct dhd_bus *bus, struct bcmstrbuf *b);
+extern void dhd_bus_copy_trap_sig(struct dhd_bus *bus, trap_t *tr);
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+extern void dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t *dhd, int *bytes_written);
+void copy_hang_info_linkdown(dhd_pub_t *dhd);
+void copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr);
+void copy_hang_info_trap(dhd_pub_t *dhd);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+/* Function to set default min res mask */
+extern bool dhd_bus_set_default_min_res_mask(struct dhd_bus *bus);
+
+/* Function to reset PMU registers */
+extern void dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp);
+
+extern void dhd_bus_ucode_download(struct dhd_bus *bus);
+
+extern int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read);
+extern int dhd_get_idletime(dhd_pub_t *dhd);
+extern bool dhd_get_rpm_state(dhd_pub_t *dhd);
+extern void dhd_set_rpm_state(dhd_pub_t *dhd, bool state);
+#ifdef BCMPCIE
+extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus);
+extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp);
+extern bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp);
+extern int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type);
+extern bool dhd_bus_check_driver_up(void);
+extern int dhd_bus_get_cto(dhd_pub_t *dhdp);
+extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
+extern int dhd_bus_get_linkdown(dhd_pub_t *dhdp);
+#ifdef CONFIG_ARCH_MSM
+extern void dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t *dhdp, bool up);
+#endif /* CONFIG_ARCH_MSM */
+extern int dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size);
+#else
+#define dhd_bus_dump_console_buffer(x)
+static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); }
+static INLINE bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) { return 0; }
+static INLINE bool dhd_bus_check_driver_up(void) { return FALSE; }
+static INLINE void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) { }
+static INLINE int dhd_bus_get_linkdown(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_bus_get_cto(dhd_pub_t *dhdp) { return 0; }
+extern INLINE int dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size) { return 0; }
+#endif /* BCMPCIE */
+
+#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
+void dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, uint8 *ext_trap_data,
+ void *event_decode_data);
+#endif
+
+extern uint16 dhd_get_chipid(struct dhd_bus *bus);
+
+#ifdef BTLOG
+extern void dhd_bus_rx_bt_log(struct dhd_bus *bus, void* pkt);
+#endif /* BTLOG */
+
+#ifdef DHD_WAKE_STATUS
+extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd);
+extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd);
+#endif /* DHD_WAKE_STATUS */
+
+#ifdef BT_OVER_SDIO
+/*
+ * SDIO layer clock control functions exposed to be called from other layers.
+ * This is required especially in the case where the BUS is shared between
+ * BT and SDIO and we have to control the clock. The callers of this function
+ * are expected to hold the sdlock
+ */
+int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait);
+int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait);
+void dhdsdio_reset_bt_use_count(struct dhd_bus *bus);
+#endif /* BT_OVER_SDIO */
+
+int dhd_bus_perform_flr(struct dhd_bus *bus, bool force_fail);
+extern bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus);
+
+extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable);
+extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable);
+
+bool dhd_bus_is_multibp_capable(struct dhd_bus *bus);
+
+#ifdef BT_OVER_PCIE
+int dhd_bus_pwr_off(dhd_pub_t *dhdp, int reason);
+int dhd_bus_pwr_on(dhd_pub_t *dhdp, int reason);
+int dhd_bus_pwr_toggle(dhd_pub_t *dhdp, int reason);
+bool dhdpcie_is_btop_chip(struct dhd_bus *bus);
+bool dhdpcie_is_bt_loaded(struct dhd_bus *bus);
+int dhdpcie_redownload_fw(dhd_pub_t *dhdp);
+extern void dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus);
+int dhd_bus_perform_flr_with_quiesce(dhd_pub_t *dhdp, struct dhd_bus *bus,
+ bool init_deinit_path);
+#endif /* BT_OVER_PCIE */
+
+#ifdef BCMPCIE
+extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp);
+extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd);
+extern void dhdpcie_induce_cbp_hang(dhd_pub_t *dhd);
+#endif /* BCMPCIE */
+
+extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus);
+extern void dhd_bwm_bt_quiesce(struct dhd_bus *bus);
+extern void dhd_bwm_bt_resume(struct dhd_bus *bus);
+
+#ifdef DHD_SSSR_DUMP
+extern int dhd_bus_fis_trigger(dhd_pub_t *dhd);
+extern int dhd_bus_fis_dump(dhd_pub_t *dhd);
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef PCIE_FULL_DONGLE
+extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef D2H_MINIDUMP
+#ifndef DHD_FW_COREDUMP
+/* Minidump depends on DHD_FW_COREDUMP to dump minidup
+ * This dependency is intentional to avoid multiple work queue
+ * to dump the SOCRAM, minidum ..etc.
+ */
+#error "Minidump doesnot work as DHD_FW_COREDUMP is not defined"
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCM_BUZZZ
+/*
+ * In pciedev_shared_t buzz_dbg_ptr and device_trap_debug_buffer_len
+ * are overloaded. So when BCM_BUZZZ is defined MINIDUMP should not be defined or
+ * vice versa.
+ */
+#error "Minidump doesnot work as BCM_BUZZZ is defined"
+#endif /* BCM_BUZZZ */
+extern bool dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp);
+dhd_dma_buf_t* dhd_prot_get_minidump_buf(dhd_pub_t *dhd);
+#endif /* D2H_MINIDUMP */
+
+#ifdef DHD_CFG80211_SUSPEND_RESUME
+extern void dhd_cfg80211_suspend(dhd_pub_t *dhdp);
+extern void dhd_cfg80211_resume(dhd_pub_t *dhdp);
+#endif /* DHD_CFG80211_SUSPEND_RESUME */
+
+#ifdef DHD_SDTC_ETB_DUMP
+extern int dhd_bus_get_etb_info(dhd_pub_t *dhd, uint32 etb_info_addr, etb_info_t *etb_info);
+extern int dhd_bus_get_sdtc_etb(dhd_pub_t *dhd, uint8 *sdtc_etb_mempool,
+ uint addr, uint read_bytes);
+#endif /* DHD_SDTC_ETB_DUMP */
+
+extern int dhd_socram_dump(struct dhd_bus *bus);
+
+extern int dhdpcie_get_max_eventbufpost(struct dhd_bus *bus);
+
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+extern void dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd);
+extern void dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd);
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+#endif /* _dhd_bus_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_buzzz.h b/bcmdhd.101.10.361.x/dhd_buzzz.h
new file mode 100755
index 0000000..0e04c75
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_buzzz.h
@@ -0,0 +1,224 @@
+#ifndef _DHD_BUZZZ_H_INCLUDED_
+#define _DHD_BUZZZ_H_INCLUDED_
+
+*/
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#if defined(DHD_BUZZZ_LOG_ENABLED)
+/*
+ * Broadcom proprietary logging system. Deleted performance counters.
+ */
+void dhd_buzzz_attach(void);
+void dhd_buzzz_detach(void);
+void dhd_buzzz_panic(uint32 crash);
+void dhd_buzzz_dump(void);
+void dhd_buzzz_log_disable(void);
+void dhd_buzzz_crash(void);
+
+void dhd_buzzz_log0(uint32 evt_id);
+void dhd_buzzz_log1(uint32 evt_id, uint32 arg1);
+void dhd_buzzz_log2(uint32 evt_id, uint32 arg1, uintptr arg2);
+
+void dhd_buzzz_fmt_reg(uint32 id, char * fmt);
+
+extern void* dhd_os_create_buzzz_thread(void);
+extern void dhd_os_destroy_buzzz_thread(void *thr_hdl);
+extern void dhd_os_sched_buzzz_thread(void *thr_hdl);
+
+#undef BUZZZ_EVT
+#define BUZZZ_EVT(ID) BUZZZ_EVT__## ID,
+
+#undef BUZZZ_FMT
+#define BUZZZ_FMT(ID, format) \
+ dhd_buzzz_fmt_reg(BUZZZ_EVT__## ID, "\t" format);
+
+typedef enum buzzz_evt_id
+{
+ BUZZZ_EVT__DHD = 100, /* BUZZZ_EVT(DHD) */
+ BUZZZ_EVT(GENERAL_LOCK)
+ BUZZZ_EVT(GENERAL_UNLOCK)
+ BUZZZ_EVT(FLOWRING_LOCK)
+ BUZZZ_EVT(FLOWRING_UNLOCK)
+ BUZZZ_EVT(FLOWID_LOCK)
+ BUZZZ_EVT(FLOWID_UNLOCK)
+
+ BUZZZ_EVT(START_XMIT_BGN)
+ BUZZZ_EVT(START_XMIT_END)
+ BUZZZ_EVT(PROCESS_CTRL_BGN)
+ BUZZZ_EVT(PROCESS_CTRL_END)
+ BUZZZ_EVT(UPDATE_TXFLOWRINGS_BGN)
+ BUZZZ_EVT(UPDATE_TXFLOWRINGS_END)
+ BUZZZ_EVT(PROCESS_TXCPL_BGN)
+ BUZZZ_EVT(PROCESS_TXCPL_END)
+ BUZZZ_EVT(PROCESS_RXCPL_BGN)
+ BUZZZ_EVT(PROCESS_RXCPL_END)
+
+ BUZZZ_EVT(GET_SRC_ADDR)
+ BUZZZ_EVT(WRITE_COMPLETE)
+ BUZZZ_EVT(ALLOC_RING_SPACE)
+ BUZZZ_EVT(ALLOC_RING_SPACE_RET)
+ BUZZZ_EVT(ALLOC_RING_SPACE_FAIL)
+
+ BUZZZ_EVT(PKTID_MAP_CLEAR)
+ BUZZZ_EVT(PKTID_NOT_AVAILABLE)
+ BUZZZ_EVT(PKTID_MAP_RSV)
+ BUZZZ_EVT(PKTID_MAP_SAVE)
+ BUZZZ_EVT(PKTID_MAP_ALLOC)
+ BUZZZ_EVT(PKTID_MAP_FREE)
+ BUZZZ_EVT(LOCKER_INUSE_ABORT)
+ BUZZZ_EVT(BUFFER_TYPE_ABORT1)
+ BUZZZ_EVT(BUFFER_TYPE_ABORT2)
+
+ BUZZZ_EVT(UPD_READ_IDX)
+ BUZZZ_EVT(STORE_RXCPLN_RD)
+ BUZZZ_EVT(EARLY_UPD_RXCPLN_RD)
+
+ BUZZZ_EVT(POST_TXDATA)
+ BUZZZ_EVT(RETURN_RXBUF)
+ BUZZZ_EVT(RXBUF_POST)
+ BUZZZ_EVT(RXBUF_POST_EVENT)
+ BUZZZ_EVT(RXBUF_POST_IOCTL)
+ BUZZZ_EVT(RXBUF_POST_CTRL_PKTGET_FAIL)
+ BUZZZ_EVT(RXBUF_POST_PKTGET_FAIL)
+ BUZZZ_EVT(RXBUF_POST_PKTID_FAIL)
+
+ BUZZZ_EVT(DHD_DUPLICATE_ALLOC)
+ BUZZZ_EVT(DHD_DUPLICATE_FREE)
+ BUZZZ_EVT(DHD_TEST_IS_ALLOC)
+ BUZZZ_EVT(DHD_TEST_IS_FREE)
+
+ BUZZZ_EVT(DHD_PROT_IOCT_BGN)
+ BUZZZ_EVT(DHDMSGBUF_CMPLT_BGN)
+ BUZZZ_EVT(DHDMSGBUF_CMPLT_END)
+ BUZZZ_EVT(DHD_PROT_IOCT_END)
+ BUZZZ_EVT(DHD_FILLUP_IOCT_REQST_BGN)
+ BUZZZ_EVT(DHD_FILLUP_IOCT_REQST_END)
+ BUZZZ_EVT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_BGN)
+ BUZZZ_EVT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_END)
+ BUZZZ_EVT(DHD_PROT_IOCTCMPLT_PROCESS_ONE)
+ BUZZZ_EVT(DHD_PROT_IOCTCMPLT_PROCESS_TWO)
+ BUZZZ_EVT(DHD_PROT_EVENT_PROCESS_BGN)
+ BUZZZ_EVT(DHD_PROT_EVENT_PROCESS_END)
+ BUZZZ_EVT(DHD_PROT_D2H_SYNC_LIVELOCK)
+ BUZZZ_EVT(DHD_IOCTL_BUFPOST)
+ BUZZZ_EVT(DHD_EVENT_BUFPOST)
+ BUZZZ_EVT(DHD_PROC_MSG_TYPE)
+ BUZZZ_EVT(DHD_BUS_RXCTL_ONE)
+ BUZZZ_EVT(DHD_BUS_RXCTL_TWO)
+} buzzz_evt_id_t;
+
+static inline void dhd_buzzz_fmt_init(void)
+{
+ BUZZZ_FMT(DHD, "DHD events")
+ BUZZZ_FMT(GENERAL_LOCK, "+++LOCK GENERAL flags<0x%08x>")
+ BUZZZ_FMT(GENERAL_UNLOCK, "---UNLK GENERAL flags<0x%08x>")
+ BUZZZ_FMT(FLOWRING_LOCK, "+++LOCK FLOWRING flags<0x%08x>")
+ BUZZZ_FMT(FLOWRING_UNLOCK, "---UNLK FLOWRING flags<0x%08x>")
+ BUZZZ_FMT(FLOWID_LOCK, "+++LOCK FLOWID flags<0x%08x>")
+ BUZZZ_FMT(FLOWID_UNLOCK, "---UNLK FLOWID flags<0x%08x>")
+
+ BUZZZ_FMT(START_XMIT_BGN, "{ dhd_start_xmit() ifidx<%u> skb<0x%p>")
+ BUZZZ_FMT(START_XMIT_END, "} dhd_start_xmit()")
+ BUZZZ_FMT(PROCESS_CTRL_BGN, "{ dhd_prot_process_ctrlbuf()")
+ BUZZZ_FMT(PROCESS_CTRL_END, "} dhd_prot_process_ctrlbuf()")
+ BUZZZ_FMT(UPDATE_TXFLOWRINGS_BGN, "{ dhd_update_txflowrings()");
+ BUZZZ_FMT(UPDATE_TXFLOWRINGS_END, "} dhd_update_txflowrings()");
+ BUZZZ_FMT(PROCESS_TXCPL_BGN, "{ dhd_prot_process_msgbuf_txcpl()")
+ BUZZZ_FMT(PROCESS_TXCPL_END, "} dhd_prot_process_msgbuf_txcpl()")
+ BUZZZ_FMT(PROCESS_RXCPL_BGN, "{ dhd_prot_process_msgbuf_rxcpl()")
+ BUZZZ_FMT(PROCESS_RXCPL_END, "} dhd_prot_process_msgbuf_rxcpl()")
+
+ BUZZZ_FMT(GET_SRC_ADDR, "bytes<%u> @<0x%p> prot_get_src_addr()")
+ BUZZZ_FMT(WRITE_COMPLETE, "WR<%u> prot_ring_write_complete")
+ BUZZZ_FMT(ALLOC_RING_SPACE, "{ dhd_alloc_ring_space nitems<%d>")
+ BUZZZ_FMT(ALLOC_RING_SPACE_RET, "} dhd_alloc_ring_space() alloc<%d> @<0x%p>")
+ BUZZZ_FMT(ALLOC_RING_SPACE_FAIL, "FAILURE } dhd_alloc_ring_space() alloc<%d>")
+
+ BUZZZ_FMT(PKTID_MAP_CLEAR, "pktid map clear")
+ BUZZZ_FMT(PKTID_NOT_AVAILABLE, "FAILURE pktid pool depletion failures<%u>")
+ BUZZZ_FMT(PKTID_MAP_RSV, "pktid<%u> pkt<0x%p> dhd_pktid_map_reserve()")
+ BUZZZ_FMT(PKTID_MAP_SAVE, "pktid<%u> pkt<0x%p> dhd_pktid_map_save()")
+ BUZZZ_FMT(PKTID_MAP_ALLOC, "pktid<%u> pkt<0x%p> dhd_pktid_map_alloc()")
+ BUZZZ_FMT(PKTID_MAP_FREE, "pktid<%u> pkt<0x%p> dhd_pktid_map_free()")
+ BUZZZ_FMT(LOCKER_INUSE_ABORT, "ASSERT pktid<%u> pkt<0x%p> locker->inuse")
+ BUZZZ_FMT(BUFFER_TYPE_ABORT1, "ASSERT pktid<%u> pkt<0x%p> locker->dma")
+ BUZZZ_FMT(BUFFER_TYPE_ABORT2, "ASSERT locker->dma<%u> buf_type<%u>")
+
+ BUZZZ_FMT(UPD_READ_IDX, "RD<%u> prot_upd_read_idx()")
+ BUZZZ_FMT(STORE_RXCPLN_RD, "RD<%u> prot_store_rxcpln_read_idx()")
+ BUZZZ_FMT(EARLY_UPD_RXCPLN_RD, "RD<%u> prot_early_upd_rxcpln_read_idx()")
+
+ BUZZZ_FMT(POST_TXDATA, "flr<%u> pkt<0x%p> dhd_prot_txdata()")
+ BUZZZ_FMT(RETURN_RXBUF, "cnt<%u> dhd_prot_return_rxbuf()");
+ BUZZZ_FMT(RXBUF_POST, "cnt<%u> dhd_prot_rxbufpost()");
+ BUZZZ_FMT(RXBUF_POST_EVENT, "event dhd_prot_rxbufpost_ctrl()");
+ BUZZZ_FMT(RXBUF_POST_IOCTL, "ioctl dhd_prot_rxbufpost_ctrl()");
+ BUZZZ_FMT(RXBUF_POST_CTRL_PKTGET_FAIL, "FAILURE pktget dhd_prot_rxbufpost_ctrl()");
+ BUZZZ_FMT(RXBUF_POST_PKTGET_FAIL, "FAILURE pktget loop<%u> dhd_prot_rxbufpost()")
+ BUZZZ_FMT(RXBUF_POST_PKTID_FAIL, "FAILURE pktid loop<%u> dhd_prot_rxbufpost()")
+
+ BUZZZ_FMT(DHD_DUPLICATE_ALLOC, "ASSERT dhd_pktid_audit(%u) DHD_DUPLICATE_ALLOC")
+ BUZZZ_FMT(DHD_DUPLICATE_FREE, "ASSERT dhd_pktid_audit(%u) DHD_DUPLICATE_FREE")
+ BUZZZ_FMT(DHD_TEST_IS_ALLOC, "ASSERT dhd_pktid_audit(%u) DHD_TEST_IS_ALLOC")
+ BUZZZ_FMT(DHD_TEST_IS_FREE, "ASSERT dhd_pktid_audit(%u) DHD_TEST_IS_FREE")
+
+ BUZZZ_FMT(DHD_PROT_IOCT_BGN, "{ dhd_prot_ioct pending<%u> thread<0x%p>")
+ BUZZZ_FMT(DHDMSGBUF_CMPLT_BGN, "{ dhdmsgbuf_cmplt bus::retlen<%u> bus::pktid<%u>")
+ BUZZZ_FMT(DHDMSGBUF_CMPLT_END, "} dhdmsgbuf_cmplt resp_len<%d> pktid<%u>")
+ BUZZZ_FMT(DHD_PROT_IOCT_END, "} dhd_prot_ioct pending<%u> thread<0x%p>")
+ BUZZZ_FMT(DHD_FILLUP_IOCT_REQST_BGN, "{ dhd_fillup_ioct_reqst_ptrbased cmd<%u> transid<%u>")
+ BUZZZ_FMT(DHD_FILLUP_IOCT_REQST_END,
+ "} dhd_fillup_ioct_reqst_ptrbased transid<%u> bus::pktid<%u>")
+ BUZZZ_FMT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_BGN,
+ "{ dhd_msgbuf_rxbuf_post_ioctlresp_bufs cur_posted<%u> bus::pktid<%u>")
+ BUZZZ_FMT(DHD_MSGBUF_RXBUF_POST_IOCTLRESP_BUFS_END,
+ "} dhd_msgbuf_rxbuf_post_ioctlresp_bufs cur_posted<%u> bus::pktid<%u>")
+ BUZZZ_FMT(DHD_PROT_IOCTCMPLT_PROCESS_ONE,
+ "{ dhd_prot_ioctlcmplt_process cmd<%d> transid<%d>")
+ BUZZZ_FMT(DHD_PROT_IOCTCMPLT_PROCESS_TWO,
+ "} dhd_prot_ioctlcmplt_process resplen<%u> pktid<%u>")
+ BUZZZ_FMT(DHD_PROT_EVENT_PROCESS_BGN, "{ dhd_prot_event_process pktid<%u>")
+ BUZZZ_FMT(DHD_PROT_EVENT_PROCESS_END, "} dhd_prot_event_process buflen<%u> pkt<0x%p>")
+ BUZZZ_FMT(DHD_PROT_D2H_SYNC_LIVELOCK, " dhd_prot_d2h_sync_livelock seqnum<%u>")
+ BUZZZ_FMT(DHD_IOCTL_BUFPOST, " dhd_prot_rxbufpost_ctrl ioctl pktid<%u> phyaddr<0x%x>")
+ BUZZZ_FMT(DHD_EVENT_BUFPOST, " dhd_prot_rxbufpost_ctrl event pktid<%u> phyaddr<0x%x>")
+ BUZZZ_FMT(DHD_PROC_MSG_TYPE, " dhd_process_msgtype msg<0x%x> epoch<%u>")
+ BUZZZ_FMT(DHD_BUS_RXCTL_ONE, "dhd_bus_rxctl prev resplen<%u> pktid<%u>")
+ BUZZZ_FMT(DHD_BUS_RXCTL_TWO, "dhd_bus_rxctl cur resplen<%u> pktid<%u>")
+}
+
+#define BUZZZ_LOG(ID, N, ARG...) dhd_buzzz_log ##N(BUZZZ_EVT__ ##ID, ##ARG)
+
+#else /* DHD_BUZZZ_LOG_ENABLED */
+/*
+ * Broadcom logging system - Empty implementaiton
+ */
+
+#define dhd_buzzz_attach() do { /* noop */ } while (0)
+#define dhd_buzzz_detach() do { /* noop */ } while (0)
+#define dhd_buzzz_panic(x) do { /* noop */ } while (0)
+#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0)
+
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+
+#endif /* _DHD_BUZZZ_H_INCLUDED_ */
diff --git a/bcmdhd.101.10.361.x/dhd_ccode.c b/bcmdhd.101.10.361.x/dhd_ccode.c
new file mode 100755
index 0000000..62b9eec
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_ccode.c
@@ -0,0 +1,274 @@
+
+#ifdef CCODE_LIST
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif /* COMFIG_COMPAT */
+#include <typedefs.h>
+#include <dhd_config.h>
+
+#ifdef BCMSDIO
+#define CCODE_43438
+#define CCODE_43436
+#define CCODE_43455C0
+#endif
+#if defined(BCMSDIO) || defined(BCMPCIE)
+#define CCODE_4356A2
+#define CCODE_4359C0
+#endif
+#if defined(BCMPCIE)
+#define CCODE_4375B4
+#endif
+#ifdef BCMDBUS
+#define CCODE_4358U
+#endif
+
+#ifdef BCMSDIO
+#ifdef CCODE_43438
+const char ccode_43438[] = "RU/13";
+#else
+const char ccode_43438[] = "";
+#endif
+
+#ifdef CCODE_43436
+const char ccode_43436[] = \
+"AE/1 AR/1 AT/1 AU/2 "\
+"BE/1 BG/1 BN/1 "\
+"CA/2 CH/1 CN/38 CY/1 CZ/1 "\
+"DE/3 DK/1 "\
+"EE/1 ES/1 "\
+"FI/1 FR/1 "\
+"GB/1 GR/1 "\
+"HR/1 HU/1 "\
+"ID/5 IE/1 IS/1 IT/1 "\
+"JP/3 "\
+"KR/4 KW/1 "\
+"LI/1 LT/1 LU/1 LV/1 "\
+"MA/1 MT/1 MX/1 "\
+"NL/1 NO/1 "\
+"PL/1 PT/1 PY/1 "\
+"RO/1 RU/5 "\
+"SE/1 SI/1 SK/1 "\
+"TR/7 TW/2 "\
+"US/26 "\
+"XZ/11";
+#else
+const char ccode_43436[] = "";
+#endif
+
+#ifdef CCODE_43455C0
+const char ccode_43455c0[] = \
+"AE/6 AG/2 AI/1 AL/2 AS/12 AT/4 AU/6 AW/2 AZ/2 "\
+"BA/2 BD/1 BE/4 BG/4 BH/4 BM/12 BN/4 BR/2 BS/2 BY/3 "\
+"CA/2 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\
+"DE/7 DK/4 "\
+"EC/21 EE/4 EG/13 ES/4 ET/2 "\
+"FI/4 FR/5 "\
+"GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/30 "\
+"HK/2 HR/4 HU/4 "\
+"ID/1 IE/5 IL/14 IN/3 IS/4 IT/4 "\
+"JO/3 JP/45 "\
+"KH/2 KR/96 KW/5 KY/3 "\
+"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\
+"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MQ/2 MR/2 MT/4 MU/2 MV/3 MW/1 MX/44 MY/3 "\
+"NI/2 NL/4 NO/4 NZ/4 "\
+"OM/4 "\
+"PA/17 PE/20 PH/5 PL/4 PR/38 PT/4 PY/2 "\
+"Q2/993 "\
+"RE/2 RO/4 RS/2 RU/13 "\
+"SE/4 SI/4 SK/4 SV/25 "\
+"TH/5 TN/1 TR/7 TT/3 TW/65 "\
+"UA/8 US/988 "\
+"VA/2 VE/3 VG/2 VN/4 "\
+"XZ/11 "\
+"YT/2 "\
+"ZA/6";
+#else
+const char ccode_43455c0[] = "";
+#endif
+#endif
+
+#ifdef CCODE_4356A2
+const char ccode_4356a2[] = \
+"AE/6 AG/2 AI/1 AL/2 AN/2 AR/21 AS/12 AT/4 AU/6 AW/2 AZ/2 "\
+"BA/2 BD/2 BE/4 BG/4 BH/4 BM/12 BN/4 BR/4 BS/2 BY/3 "\
+"CA/31 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\
+"DE/7 DK/4 DZ/1 "\
+"EC/21 EE/4 ES/4 ET/2 "\
+"FI/4 FR/5 "\
+"GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/12 "\
+"HK/2 HR/4 HU/4 "\
+"ID/13 IE/5 IL/7 IN/28 IS/4 IT/4 "\
+"JO/3 JP/45 "\
+"KH/2 KR/57 KW/5 KY/3 "\
+"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\
+"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MO/2 MR/2 MT/4 MQ/2 MU/2 MV/3 MW/1 MX/20 MY/16 "\
+"NI/2 NL/4 NO/4 NP/3 NZ/4 "\
+"OM/4 "\
+"PA/17 PE/20 PG/2 PH/5 PL/4 PR/20 PT/4 PY/2 "\
+"RE/2 RO/4 RS/2 RU/986 "\
+"SE/4 SG/19 SI/4 SK/4 SN/2 SV/19 "\
+"TH/9 TN/1 TR/7 TT/3 TW/1 "\
+"UA/8 UG/2 US/1 UY/1 "\
+"VA/2 VE/3 VG/2 VI/13 VN/4 "\
+"XZ/11 "\
+"YT/2 "\
+"ZM/2 "\
+"E0/32";
+#else
+const char ccode_4356a2[] = "";
+#endif
+
+#ifdef CCODE_4359C0
+const char ccode_4359c0[] = \
+"AD/1 AE/6 AG/2 AI/1 AL/3 AS/12 AT/21 AU/6 AW/2 AZ/8 "\
+"BA/4 BD/1 BE/19 BG/18 BH/4 BM/12 BN/4 BR/2 BS/2 BY/3 "\
+"CA/2 CN/38 CO/17 CR/17 CY/18 CZ/18 "\
+"DE/30 DK/19 "\
+"E0/32 EC/21 EE/18 EG/13 ES/21 ET/2 "\
+"FI/19 FR/21 "\
+"GB/996 GD/2 GE/1 GF/2 GP/2 GR/18 GT/1 GU/30 "\
+"HK/2 HR/18 HU/18 "\
+"ID/1 IE/21 IL/276 IN/3 IS/17 IT/20 "\
+"JO/3 JP/967 "\
+"KH/2 KR/70 KW/5 KY/3 "\
+"LA/2 LB/5 LI/17 LK/1 LS/2 LT/18 LU/18 LV/18 "\
+"MA/2 MC/2 MD/3 ME/5 MK/4 MN/1 MQ/2 MR/2 MT/18 MU/2 MV/3 MW/1 MX/44 MY/3 "\
+"NI/2 NL/19 NO/18 NZ/4 "\
+"OM/4 "\
+"PA/17 PE/20 PH/5 PL/18 PR/38 PT/20 PY/2 "\
+"Q1/947 Q2/993 "\
+"RE/2 RO/18 RS/4 RU/986 "\
+"SE/19 SI/18 SK/18 SM/1 SV/25 "\
+"TH/5 TN/1 TR/18 TT/3 TW/980 "\
+"UA/16 US/988 "\
+"VA/3 VE/3 VG/2 VN/4 "\
+"XZ/11 "\
+"YT/2 "\
+"ZA/6";
+#else
+const char ccode_4359c0[] = "";
+#endif
+
+#ifdef CCODE_4375B4
+const char ccode_4375b4[] = \
+"AE/6 AL/2 AM/1 AN/5 AR/21 AT/4 AU/6 AZ/2 "\
+"BA/2 BE/4 BG/4 BH/4 BN/4 BO/5 BR/17 BY/3 "\
+"CA/2 CH/4 CL/7 CN/38 CO/17 CR/17 CY/4 CZ/4 "\
+"DE/7 DK/4 DZ/2 EC/18 EE/4 EG/13 ES/4 "\
+"FI/4 FR/5 "\
+"GB/6 GR/4 "\
+"HK/999 HN/8 HR/4 HU/4 "\
+"ID/5 IE/5 IL/7 IN/3 IS/4 IT/4 "\
+"JO/3 JP/72 "\
+"KE/1 KR/96 KW/5 KZ/5 "\
+"LA/2 LB/5 LI/4 LK/2 LT/4 LU/4 LV/4 "\
+"MA/7 MC/1 ME/2 MK/2 MO/4 MT/4 MX/20 MY/19 "\
+"NL/4 NO/4 NZ/4 "\
+"OM/4 "\
+"PA/17 PE/20 PH/5 PK/2 PL/4 PR/20 PT/4 "\
+"RO/4 RU/62 "\
+"SA/5 SE/4 SG/12 SI/4 SK/4 SV/17 "\
+"TH/5 TN/1 TR/7 TT/3 TW/65 "\
+"UA/16 US/140 UY/10 "\
+"VE/3 VN/4 "\
+"XZ/11 "\
+"ZA/19";
+#else
+const char ccode_4375b4[] = "";
+#endif
+
+#ifdef CCODE_4358U
+const char ccode_4358u[] = \
+"BE/4 BR/4 "\
+"CA/2 CH/4 CN/38 CY/4 "\
+"DE/7 DK/4 "\
+"ES/4 "\
+"FI/4 FR/5 "\
+"GB/6 GR/4 "\
+"HK/2 HU/4 "\
+"IE/5 IL/7 IS/4 IT/4 "\
+"JP/72 "\
+"KE/0 KR/4 "\
+"MY/3 "\
+"NL/4 "\
+"PT/4 "\
+"SA/5 SE/4 SG/0 SZ/0 "\
+"TH/5 TR/7 TW/230 "\
+"US/0 "\
+"VN/4";
+#else
+const char ccode_4358u[] = "";
+#endif
+
+typedef struct ccode_list_map_t {
+ uint chip;
+ uint chiprev;
+ const char *ccode_list;
+ const char *ccode_ww;
+} ccode_list_map_t;
+
+extern const char ccode_43438[];
+extern const char ccode_43455c0[];
+extern const char ccode_4356a2[];
+extern const char ccode_4359c0[];
+extern const char ccode_4358u[];
+
+const ccode_list_map_t ccode_list_map[] = {
+ /* ChipID Chiprev ccode */
+#ifdef BCMSDIO
+ {BCM43430_CHIP_ID, 0, ccode_43438, ""},
+ {BCM43430_CHIP_ID, 1, ccode_43438, ""},
+ {BCM43430_CHIP_ID, 2, ccode_43436, ""},
+ {BCM4345_CHIP_ID, 6, ccode_43455c0, "XZ/11"},
+ {BCM43454_CHIP_ID, 6, ccode_43455c0, "XZ/11"},
+ {BCM4345_CHIP_ID, 9, ccode_43455c0, "XZ/11"},
+ {BCM43454_CHIP_ID, 9, ccode_43455c0, "XZ/11"},
+ {BCM4354_CHIP_ID, 2, ccode_4356a2, "XZ/11"},
+ {BCM4356_CHIP_ID, 2, ccode_4356a2, "XZ/11"},
+ {BCM4371_CHIP_ID, 2, ccode_4356a2, "XZ/11"},
+ {BCM4359_CHIP_ID, 9, ccode_4359c0, "XZ/11"},
+#endif
+#ifdef BCMPCIE
+ {BCM4354_CHIP_ID, 2, ccode_4356a2, "XZ/11"},
+ {BCM4356_CHIP_ID, 2, ccode_4356a2, "XZ/11"},
+ {BCM4359_CHIP_ID, 9, ccode_4359c0, "XZ/11"},
+ {BCM4375_CHIP_ID, 5, ccode_4375b4, "XZ/11"},
+#endif
+#ifdef BCMDBUS
+ {BCM43569_CHIP_ID, 2, ccode_4358u, "XW/0"},
+#endif
+};
+
+int
+dhd_ccode_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1, i;
+ uint chip = dhd->conf->chip, chiprev = dhd->conf->chiprev;
+ const char *ccode_list = NULL, *ccode_ww = NULL;
+ char *pch;
+
+ for (i=0; i<sizeof(ccode_list_map)/sizeof(ccode_list_map[0]); i++) {
+ const ccode_list_map_t* row = &ccode_list_map[i];
+ if (row->chip == chip && row->chiprev == chiprev) {
+ ccode_list = row->ccode_list;
+ ccode_ww = row->ccode_ww;
+ break;
+ }
+ }
+
+ if (ccode_list) {
+ pch = strstr(ccode_list, cspec->ccode);
+ if (pch) {
+ cspec->rev = (int)simple_strtol(pch+strlen(cspec->ccode)+1, NULL, 0);
+ bcmerror = 0;
+ }
+ }
+
+ if (bcmerror && ccode_ww && strlen(ccode_ww)>=4) {
+ memcpy(cspec->ccode, ccode_ww, 2);
+ cspec->rev = (int)simple_strtol(ccode_ww+3, NULL, 0);
+ }
+
+ return bcmerror;
+}
+#endif
diff --git a/bcmdhd.101.10.361.x/dhd_cdc.c b/bcmdhd.101.10.361.x/dhd_cdc.c
new file mode 100755
index 0000000..0152c18
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_cdc.c
@@ -0,0 +1,1035 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#ifdef EXT_STA
+#include <siutils.h>
+#include <wlc_cfg.h>
+#include <wlc_pub.h>
+#endif /* EXT_STA */
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+#ifdef BCMDBUS
+#include <dhd_config.h>
+#endif /* BCMDBUS */
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE
+ * defined in dhd_sdio.c (amount of header tha might be added)
+ * plus any space that might be needed for alignment padding.
+ */
+#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+ * round off at the end of buffer
+ */
+
+/* This value is from Legacy chipsets */
+#define DEFAULT_WLC_API_VERSION_MAJOR 3
+#define DEFAULT_WLC_API_VERSION_MINOR 0
+
+typedef struct dhd_prot {
+ uint16 reqid;
+ uint8 pending;
+ uint32 lastcmd;
+#ifdef BCMDBUS
+ uint ctl_completed;
+#endif /* BCMDBUS */
+ uint8 bus_header[BUS_HEADER_LEN];
+ cdc_ioctl_t msg;
+ unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+uint16
+dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
+{
+ /* SDIO does not have ioctl_trans_id yet, so return -1 */
+ return -1;
+}
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+#ifdef BCMDBUS
+ int timeout = 0;
+#endif /* BCMDBUS */
+ int err = 0;
+ dhd_prot_t *prot = dhd->prot;
+ int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(dhd);
+
+ /* NOTE : cdc->msg.len holds the desired length of the buffer to be
+ * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+ * is actually sent to the dongle
+ */
+ if (len > CDC_MAX_MSG_SIZE)
+ len = CDC_MAX_MSG_SIZE;
+
+ /* Send request */
+#ifdef BCMDBUS
+ prot->ctl_completed = FALSE;
+ err = dbus_send_ctl(dhd->bus, (void *)&prot->msg, len);
+ if (err) {
+ DHD_ERROR(("dbus_send_ctl error=0x%x\n", err));
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return err;
+ }
+#else
+ err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+#endif /* BCMDBUS */
+
+#ifdef BCMDBUS
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
+ if ((!timeout) || (!prot->ctl_completed)) {
+ DHD_ERROR(("Txctl timeout %d ctl_completed %d\n",
+ timeout, prot->ctl_completed));
+ DHD_ERROR(("Txctl wait timed out\n"));
+ err = -1;
+ }
+#endif /* BCMDBUS */
+#if defined(BCMDBUS) && defined(INTR_EP_ENABLE)
+ /* If the ctl write is successfully completed, wait for an acknowledgement
+ * that indicates that it is now ok to do ctl read from the dongle
+ */
+ if (err != -1) {
+ prot->ctl_completed = FALSE;
+ if (dbus_poll_intr(dhd->dbus)) {
+ DHD_ERROR(("dbus_poll_intr not submitted\n"));
+ } else {
+ /* interrupt polling is sucessfully submitted. Wait for dongle to send
+ * interrupt
+ */
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
+ if (!timeout) {
+ DHD_ERROR(("intr poll wait timed out\n"));
+ }
+ }
+ }
+#endif /* defined(BCMDBUS) && defined(INTR_EP_ENABLE) */
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+#ifdef BCMDBUS
+ int timeout = 0;
+#endif /* BCMDBUS */
+ int ret;
+ int cdc_len = len + sizeof(cdc_ioctl_t);
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ do {
+#ifdef BCMDBUS
+ prot->ctl_completed = FALSE;
+ ret = dbus_recv_ctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+ if (ret) {
+ DHD_ERROR(("dbus_recv_ctl error=0x%x(%d)\n", ret, ret));
+ goto done;
+ }
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
+ if ((!timeout) || (!prot->ctl_completed)) {
+ DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n",
+ timeout, prot->ctl_completed));
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ /* XXX FIX: Must return cdc_len, not len, because after query_ioctl()
+ * it subtracts sizeof(cdc_ioctl_t); The other approach is
+ * to have dbus_recv_ctl() return actual len.
+ */
+ ret = cdc_len;
+#else
+ ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+#endif /* BCMDBUS */
+ if (ret < 0)
+ break;
+ } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+ /* update ret to len on success */
+ if (ret == cdc_len) {
+ ret = len;
+ }
+
+#ifdef BCMDBUS
+done:
+#endif /* BCMDBUS */
+ return ret;
+}
+
+/* XXX: due to overlays this should not be called directly; call dhd_wl_ioctl_cmd() instead */
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ int ret = 0, retries = 0;
+ uint32 id, flags = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!strcmp((char *)buf, "bcmerrorstr"))
+ {
+ strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), len);
+ goto done;
+ }
+ else if (!strcmp((char *)buf, "bcmerror"))
+ {
+ *(int *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+#ifdef BCMSPI
+ /* 11bit gSPI bus allows 2048bytes of max-data. We restrict 'len'
+ * value which is 8Kbytes for various 'get' commands to 2000. 48 bytes are
+ * left for sw headers and misc.
+ */
+ if (len > 2000) {
+ DHD_ERROR(("dhdcdc_query_ioctl: len is truncated to 2000 bytes\n"));
+ len = 2000;
+ }
+#endif /* BCMSPI */
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ if (!dhd->hang_was_sent)
+ DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if ((id < prot->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Copy info buffer */
+ if (buf)
+ {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, (void*) prot->buf, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+/* XXX: due to overlays this should not be called directly; call dhd_wl_ioctl_cmd() instead */
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ int ret = 0;
+ uint32 flags, id;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ if (cmd == WLC_SET_PM) {
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ if (g_pm_control == TRUE) {
+ DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
+ __FUNCTION__, buf ? *(char *)buf : 0));
+ goto done;
+ }
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+#ifdef DHD_PM_OVERRIDE
+ {
+ extern bool g_pm_override;
+ if (g_pm_override == TRUE) {
+ DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n",
+ __FUNCTION__, buf ? *(char *)buf : 0));
+ goto done;
+ }
+ }
+#endif /* DHD_PM_OVERRIDE */
+#if defined(WLAIBSS)
+ if (dhd->op_mode == DHD_FLAG_IBSS_MODE) {
+ DHD_ERROR(("%s: SET PM ignored for IBSS!(Requested:%d)\n",
+ __FUNCTION__, buf ? *(char *)buf : 0));
+ goto done;
+ }
+#endif /* WLAIBSS */
+ DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Copy fw response to buf */
+ if (buf) {
+ ASSERT(ret == len);
+ memcpy(buf, (void*) prot->buf, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+#ifdef BCMDBUS
+int
+dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot;
+
+ if (dhd == NULL)
+ return BCME_ERROR;
+
+ prot = dhd->prot;
+
+ ASSERT(prot);
+ prot->ctl_completed = TRUE;
+ dhd_os_ioctl_resp_wake(dhd);
+ return 0;
+}
+#endif /* BCMDBUS */
+
+/* XXX: due to overlays this should not be called directly; call dhd_wl_ioctl() instead */
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = -1;
+ uint8 action;
+ static int error_cnt = 0;
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n",
+ __FUNCTION__, dhd->busstate, dhd->hang_was_sent));
+ goto done;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ if (prot->pending == TRUE) {
+ DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ DHD_TRACE(("iovar cmd=%s\n", buf ? (char*)buf : "\0"));
+ }
+ goto done;
+ }
+
+ prot->pending = TRUE;
+ prot->lastcmd = ioc->cmd;
+ action = ioc->set;
+ if (action & WL_IOCTL_ACTION_SET)
+ ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ else {
+ ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0)
+ ioc->used = ret - sizeof(cdc_ioctl_t);
+ }
+ // terence 20130805: send hang event to wpa_supplicant
+ if (ret == -EIO) {
+ error_cnt++;
+ if (error_cnt > 2)
+ ret = -ETIMEDOUT;
+ } else
+ error_cnt = 0;
+
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0)
+ ret = 0;
+ else {
+ cdc_ioctl_t *msg = &prot->msg;
+ ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+ }
+
+ /* Intercept the wme_dp ioctl here */
+ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ prot->pending = FALSE;
+
+done:
+
+ return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ if (!dhdp || !dhdp->prot) {
+ return;
+ }
+
+ bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+/* The FreeBSD PKTPUSH could change the packet buf pinter
+ so we need to make it changable
+*/
+#define PKTBUF pktbuf
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif /* BDC */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Push BDC header used to convey priority for buses that don't */
+
+ PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN);
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF);
+
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(PKTBUF))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+#ifdef EXT_STA
+ /* save pkt encryption exemption info for dongle */
+ h->flags &= ~BDC_FLAG_EXEMPT;
+ h->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(pktbuf)) & BDC_FLAG_EXEMPT);
+#endif /* EXT_STA */
+
+ h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = 0;
+#endif /* BDC */
+ BDC_SET_IF_IDX(h, ifidx);
+}
+#undef PKTBUF /* Only defined in the above routine */
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+ uint hdrlen = 0;
+#ifdef BDC
+ /* Length of BDC(+WLFC) headers pushed */
+ hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4);
+#endif
+ return hdrlen;
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+ uint *reorder_info_len)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif
+ uint8 data_offset = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ if (reorder_info_len)
+ *reorder_info_len = 0;
+ /* Pop BDC header used to convey priority for buses that don't */
+
+ if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ if (!ifidx) {
+ /* for tx packet, skip the analysis */
+ data_offset = h->dataOffset;
+ PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+ goto exit;
+ }
+
+ *ifidx = BDC_GET_IF_IDX(h);
+
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+ DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+ h->dataOffset = 0;
+ else
+ return BCME_ERROR;
+ }
+
+ if (h->flags & BDC_FLAG_SUM_GOOD) {
+ DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ PKTSETSUMGOOD(pktbuf, TRUE);
+ }
+
+ PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+ data_offset = h->dataOffset;
+ PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+#ifdef PROP_TXSTATUS
+ if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
+ /*
+ - parse txstatus only for packets that came from the firmware
+ */
+ dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2),
+ reorder_buf_info, reorder_info_len);
+
+#ifdef BCMDBUS
+#ifndef DHD_WLFC_THREAD
+ dhd_wlfc_commit_packets(dhd,
+ (f_commitpkt_t)dhd_bus_txdata, dhd->bus, NULL, FALSE);
+#endif /* DHD_WLFC_THREAD */
+#endif /* BCMDBUS */
+ }
+#endif /* PROP_TXSTATUS */
+
+exit:
+ PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
+ return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *cdc;
+
+ if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(cdc, 0, sizeof(dhd_prot_t));
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+ DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+ goto fail;
+ }
+
+ dhd->prot = cdc;
+#ifdef BDC
+ dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+ dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+ return 0;
+
+fail:
+ if (cdc != NULL)
+ DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t));
+ return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_deinit(dhd);
+#endif
+ DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
+ dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ /* copy bus stats */
+
+ dhd->dstats.tx_packets = dhd->tx_packets;
+ dhd->dstats.tx_errors = dhd->tx_errors;
+ dhd->dstats.rx_packets = dhd->rx_packets;
+ dhd->dstats.rx_errors = dhd->rx_errors;
+ dhd->dstats.rx_dropped = dhd->rx_dropped;
+ dhd->dstats.multicast = dhd->rx_multicast;
+ return;
+}
+
+int
+dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ wlc_rev_info_t revinfo;
+ char buf[128];
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifndef OEM_ANDROID
+ /* Get the device MAC address */
+ strcpy(buf, "cur_etheraddr");
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0)
+ goto done;
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+#endif /* OEM_ANDROID */
+#ifdef DHD_FW_COREDUMP
+ /* Check the memdump capability */
+ dhd_get_memdump_info(dhd);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef BCMASSERT_LOG
+ dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0)
+ goto done;
+#if defined(BCMDBUS)
+ if (dhd_download_fw_on_driverload) {
+ dhd_conf_reset(dhd);
+ dhd_conf_set_chiprev(dhd, revinfo.chipnum, revinfo.chiprev);
+ dhd_conf_preinit(dhd);
+ dhd_conf_read_config(dhd, dhd->conf_path);
+ }
+#endif /* BCMDBUS */
+
+ /* query for 'wlc_ver' to get version info from firmware */
+ /* memsetting to zero */
+ bzero(buf, sizeof(buf));
+ ret = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf));
+ if (ret == 0) {
+ ret = BCME_BUFTOOSHORT;
+ goto done;
+ }
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret == BCME_UNSUPPORTED) {
+ dhd->wlc_ver_major = DEFAULT_WLC_API_VERSION_MAJOR;
+ dhd->wlc_ver_minor = DEFAULT_WLC_API_VERSION_MINOR;
+ } else if (ret < 0) {
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ goto done;
+ } else {
+ dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major;
+ dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor;
+ }
+ DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor));
+
+#if defined(BCMDBUS) && defined(BCMDHDUSB)
+ /* dbus_set_revinfo(dhd->dbus, revinfo.chipnum, revinfo.chiprev); */
+#endif /* BCMDBUS && BCMDHDUSB */
+
+ DHD_SSSR_DUMP_INIT(dhd);
+
+ dhd_process_cid_mac(dhd, TRUE);
+ ret = dhd_preinit_ioctls(dhd);
+ dhd_process_cid_mac(dhd, FALSE);
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+
+ /* XXX Could use WLC_GET_REVINFO to get driver version? */
+done:
+ return ret;
+}
+
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+ return BCME_OK;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+/* Nothing to do for CDC */
+}
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+ uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+ void *plast = NULL, *p;
+ uint32 pkt_cnt = 0;
+
+ if (ptr->pend_pkts == 0) {
+ DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+ *pplast = NULL;
+ *pkt_count = 0;
+ *pkt = NULL;
+ return;
+ }
+ do {
+ p = (void *)(ptr->p[start]);
+ ptr->p[start] = NULL;
+
+ if (p != NULL) {
+ if (plast == NULL)
+ *pkt = p;
+ else
+ PKTSETNEXT(osh, plast, p);
+
+ plast = p;
+ pkt_cnt++;
+ }
+ start++;
+ if (start > ptr->max_idx)
+ start = 0;
+ } while (start != end);
+ *pplast = plast;
+ *pkt_count = pkt_cnt;
+ ptr->pend_pkts -= (uint8)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+ void **pkt, uint32 *pkt_count)
+{
+ uint8 flow_id, max_idx, cur_idx, exp_idx;
+ struct reorder_info *ptr;
+ uint8 flags;
+ void *cur_pkt, *plast = NULL;
+ uint32 cnt = 0;
+
+ if (pkt == NULL) {
+ if (pkt_count != NULL)
+ *pkt_count = 0;
+ return 0;
+ }
+
+ flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+ flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+ DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+ reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+ reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+ reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+ *pkt_count = 1;
+ return 0;
+ }
+
+ cur_pkt = *pkt;
+ *pkt = NULL;
+
+ ptr = dhd->reorder_bufs[flow_id];
+ if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+ __FUNCTION__, flow_id));
+
+ if (ptr == NULL) {
+ DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n",
+ __FUNCTION__, flow_id));
+ *pkt_count = 1;
+ *pkt = cur_pkt;
+ return 0;
+ }
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, ptr->exp_idx);
+ /* set it to the last packet */
+ if (plast) {
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ cnt++;
+ }
+ else {
+ if (cnt != 0) {
+ DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+ __FUNCTION__, cnt));
+ }
+ *pkt = cur_pkt;
+ cnt = 1;
+ }
+ buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+ MFREE(dhd->osh, ptr, buf_size);
+ dhd->reorder_bufs[flow_id] = NULL;
+ *pkt_count = cnt;
+ return 0;
+ }
+ /* all the other cases depend on the existance of the reorder struct for that flow id */
+ if (ptr == NULL) {
+ uint32 buf_size_alloc = sizeof(reorder_info_t);
+ max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+ buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+ /* allocate space to hold the buffers, index etc */
+
+ DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+ __FUNCTION__, buf_size_alloc, flow_id, max_idx));
+ ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+ if (ptr == NULL) {
+ DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+ *pkt_count = 1;
+ return 0;
+ }
+ bzero(ptr, buf_size_alloc);
+ dhd->reorder_bufs[flow_id] = ptr;
+ ptr->p = (void *)(ptr+1);
+ ptr->max_idx = max_idx;
+ }
+ /* XXX: validate cur, exp indices */
+ if (flags & WLHOST_REORDERDATA_NEW_HOLE) {
+ DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+ if (ptr->pend_pkts) {
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, ptr->exp_idx);
+ ptr->pend_pkts = 0;
+ }
+ ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+ ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+ ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+ ptr->p[ptr->cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ *pkt_count = cnt;
+ }
+ else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+ cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+ exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+ if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (ptr->p[cur_idx] != NULL) {
+ DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+ __FUNCTION__));
+ PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+ ptr->p[cur_idx] = NULL;
+ }
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ ptr->cur_idx = cur_idx;
+ DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+ __FUNCTION__, ptr->pend_pkts));
+ *pkt_count = 0;
+ *pkt = NULL;
+ }
+ else if (ptr->exp_idx == cur_idx) {
+ /* got the right one ..flush from cur to exp and update exp */
+ DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+ __FUNCTION__, cur_idx));
+ if (ptr->p[cur_idx] != NULL) {
+ DHD_REORDER(("%s: Error buffer pending..free it\n",
+ __FUNCTION__));
+ PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+ ptr->p[cur_idx] = NULL;
+ }
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+
+ ptr->cur_idx = cur_idx;
+ ptr->exp_idx = exp_idx;
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ cur_idx, exp_idx);
+ *pkt_count = cnt;
+ DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+ __FUNCTION__, cnt, ptr->pend_pkts));
+ }
+ else {
+ uint8 end_idx;
+ bool flush_current = FALSE;
+ /* both cur and exp are moved now .. */
+ DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+ __FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+ ptr->exp_idx, exp_idx));
+ if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+ end_idx = ptr->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, end_idx);
+
+ if (cur_idx == ptr->max_idx) {
+ if (exp_idx == 0)
+ flush_current = TRUE;
+ } else {
+ if (exp_idx == cur_idx + 1)
+ flush_current = TRUE;
+ }
+ if (flush_current) {
+ if (plast)
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ else
+ *pkt = cur_pkt;
+ cnt++;
+ }
+ else {
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ }
+ ptr->exp_idx = exp_idx;
+ ptr->cur_idx = cur_idx;
+ *pkt_count = cnt;
+ }
+ }
+ else {
+ uint8 end_idx;
+ /* no real packet but update to exp_seq...that means explicit window move */
+ exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+ DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+ __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+ if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+ end_idx = ptr->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+ if (plast)
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ else
+ *pkt = cur_pkt;
+ cnt++;
+ *pkt_count = cnt;
+ /* set the new expected idx */
+ ptr->exp_idx = exp_idx;
+ }
+ return 0;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_cfg80211.c b/bcmdhd.101.10.361.x/dhd_cfg80211.c
new file mode 100755
index 0000000..59258ec
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_cfg80211.c
@@ -0,0 +1,597 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <linux/vmalloc.h>
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+
+#ifdef PKT_FILTER_SUPPORT
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+static int dhd_dongle_up = FALSE;
+#define PKT_FILTER_BUF_SIZE 64
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <brcm_nl80211.h>
+#include <dhd_cfg80211.h>
+#endif /* defined(BCMDONGLEHOST) */
+
+static s32 wl_dongle_up(struct net_device *ndev);
+static s32 wl_dongle_down(struct net_device *ndev);
+#ifndef OEM_ANDROID
+#ifndef CUSTOMER_HW6
+static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode);
+#ifdef BCMSDIO /* glomming is a sdio specific feature */
+static s32 wl_dongle_glom(struct net_device *ndev, s32 glom, u32 dongle_align);
+#endif
+static s32 wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, s32 scan_unassoc_time);
+static s32 wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol);
+static s32 wl_pattern_atoh(s8 *src, s8 *dst);
+static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode);
+#endif /* !CUSTOMER_HW6 */
+#endif /* !OEM_ANDROID */
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *ndev;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ if (!dhd_dongle_up) {
+ WL_INFORM_MEM(("Dongle is already down\n"));
+ err = 0;
+ goto done;
+ }
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wl_dongle_down(ndev);
+done:
+ return err;
+}
+
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd->op_mode |= val;
+ WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode));
+
+ return 0;
+}
+
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE);
+ WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode));
+
+ return 0;
+}
+#ifdef WL_STATIC_IF
+int32
+wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ int ifidx, uint8 *addr, int bssidx, char *name, int if_state)
+{
+ return dhd_update_iflist_info(cfg->pub, ndev, ifidx, addr, bssidx, name, if_state);
+}
+#endif /* WL_STATIC_IF */
+struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, const char *name,
+ uint8 *mac, uint8 bssidx, const char *dngl_name)
+{
+ return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name);
+}
+
+int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg,
+ int ifidx, struct net_device* ndev, bool rtnl_lock_reqd)
+{
+ return dhd_register_if(cfg->pub, ifidx, rtnl_lock_reqd);
+}
+
+int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
+ int ifidx, struct net_device* ndev, bool rtnl_lock_reqd)
+{
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(cfg->pub, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+ return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd);
+}
+
+void wl_cfg80211_cleanup_if(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(cfg->pub, CAN_SLEEP(), __builtin_return_address(0));
+#else
+ BCM_REFERENCE(cfg);
+#endif /* DHD_PCIE_RUNTIMEPM */
+ dhd_cleanup_if(net);
+}
+
+struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg;
+
+ if (ndev) {
+ cfg = wl_get_cfg(ndev);
+ if (ndev->ieee80211_ptr) {
+ MFREE(cfg->osh, ndev->ieee80211_ptr, sizeof(struct wireless_dev));
+ ndev->ieee80211_ptr = NULL;
+ }
+ free_netdev(ndev);
+ return NULL;
+ }
+
+ return ndev;
+}
+
+void dhd_netdev_free(struct net_device *ndev)
+{
+#ifdef WL_CFG80211
+ ndev = dhd_cfg80211_netdev_free(ndev);
+#endif
+ if (ndev)
+ free_netdev(ndev);
+}
+
+static s32
+wl_dongle_up(struct net_device *ndev)
+{
+ s32 err = 0;
+ u32 local_up = 0;
+#ifdef WLAN_ACCEL_BOOT
+ u32 bus_host_access = 1;
+ err = wldev_iovar_setint(ndev, "bus:host_access", bus_host_access);
+ if (unlikely(err)) {
+ WL_ERR(("bus:host_access(%d) error (%d)\n", bus_host_access, err));
+ }
+#endif /* WLAN_ACCEL_BOOT */
+ err = wldev_ioctl_set(ndev, WLC_UP, &local_up, sizeof(local_up));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ } else {
+ WL_INFORM_MEM(("wl up\n"));
+ dhd_dongle_up = TRUE;
+ }
+ return err;
+}
+
+static s32
+wl_dongle_down(struct net_device *ndev)
+{
+ s32 err = 0;
+ u32 local_down = 0;
+#ifdef WLAN_ACCEL_BOOT
+ u32 bus_host_access = 0;
+#endif /* WLAN_ACCEL_BOOT */
+
+ err = wldev_ioctl_set(ndev, WLC_DOWN, &local_down, sizeof(local_down));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_DOWN error (%d)\n", err));
+ }
+#ifdef WLAN_ACCEL_BOOT
+ err = wldev_iovar_setint(ndev, "bus:host_access", bus_host_access);
+ if (unlikely(err)) {
+ WL_ERR(("bus:host_access(%d) error (%d)\n", bus_host_access, err));
+ }
+#endif /* WLAN_ACCEL_BOOT */
+ WL_INFORM_MEM(("wl down\n"));
+ dhd_dongle_up = FALSE;
+
+ return err;
+}
+
+#ifndef OEM_ANDROID
+#ifndef CUSTOMER_HW6
+static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode)
+{
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ err = wldev_ioctl_set(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_PM error (%d)\n", err));
+ }
+ return err;
+}
+
+#ifdef BCMSDIO
+static s32
+wl_dongle_glom(struct net_device *ndev, s32 glom, u32 dongle_align)
+{
+ s32 err = 0;
+
+ /* Match Host and Dongle rx alignment */
+ err = wldev_iovar_setint(ndev, "bus:txglomalign", dongle_align);
+ if (unlikely(err)) {
+ WL_ERR(("txglomalign error (%d)\n", err));
+ goto dongle_glom_out;
+ }
+ /* disable glom option per default */
+ if (glom != DEFAULT_GLOM_VALUE) {
+ err = wldev_iovar_setint(ndev, "bus:txglom", glom);
+ if (unlikely(err)) {
+ WL_ERR(("txglom error (%d)\n", err));
+ goto dongle_glom_out;
+ }
+ }
+dongle_glom_out:
+ return err;
+}
+
+#endif /* BCMSDIO */
+#endif /* !CUSTOMER_HW6 */
+#endif /* !OEM_ANDROID */
+
+s32
+wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+{
+ s32 err = 0;
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ if (roamvar) {
+ err = wldev_iovar_setint(ndev, "bcn_timeout", bcn_timeout);
+ if (unlikely(err)) {
+ WL_ERR(("bcn_timeout error (%d)\n", err));
+ goto dongle_rom_out;
+ }
+ }
+ /* Enable/Disable built-in roaming to allow supplicant to take care of roaming */
+ err = wldev_iovar_setint(ndev, "roam_off", roamvar);
+ if (unlikely(err)) {
+ WL_ERR(("roam_off error (%d)\n", err));
+ goto dongle_rom_out;
+ }
+dongle_rom_out:
+ return err;
+}
+
+#ifndef OEM_ANDROID
+#ifndef CUSTOMER_HW6
+static s32
+wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+ s32 scan_unassoc_time)
+{
+ s32 err = 0;
+
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_CHANNEL_TIME, &scan_assoc_time,
+ sizeof(scan_assoc_time));
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFORM(("Scan assoc time is not supported\n"));
+ } else {
+ WL_ERR(("Scan assoc time error (%d)\n", err));
+ }
+ goto dongle_scantime_out;
+ }
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_UNASSOC_TIME, &scan_unassoc_time,
+ sizeof(scan_unassoc_time));
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFORM(("Scan unassoc time is not supported\n"));
+ } else {
+ WL_ERR(("Scan unassoc time error (%d)\n", err));
+ }
+ goto dongle_scantime_out;
+ }
+
+dongle_scantime_out:
+ return err;
+}
+
+static s32
+wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol)
+{
+ s8 iovbuf[WLC_IOCTL_SMLEN];
+ s32 err = 0;
+ s32 len;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+ /* Set ARP offload */
+ len = bcm_mkiovar("arpoe", (char *)&arpoe, sizeof(arpoe), iovbuf, sizeof(iovbuf));
+ if (!len) {
+ WL_ERR(("%s: bcm_mkiovar failed:%d\n", __FUNCTION__, len));
+ return BCME_BADARG;
+ }
+ err = wldev_ioctl_set(ndev, WLC_SET_VAR, iovbuf, len);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFORM(("arpoe is not supported\n"));
+ else
+ WL_ERR(("arpoe error (%d)\n", err));
+
+ goto dongle_offload_out;
+ }
+ len = bcm_mkiovar("arp_ol", (char *)&arp_ol, sizeof(arp_ol), iovbuf, sizeof(iovbuf));
+ if (!len) {
+ WL_ERR(("%s: bcm_mkiovar failed:%d\n", __FUNCTION__, len));
+ return BCME_BADARG;
+ }
+ err = wldev_ioctl_set(ndev, WLC_SET_VAR, iovbuf, len);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ WL_INFORM(("arp_ol is not supported\n"));
+ else
+ WL_ERR(("arp_ol error (%d)\n", err));
+
+ goto dongle_offload_out;
+ }
+
+ dhd->arpoe_enable = TRUE;
+ dhd->arpol_configured = TRUE;
+ WL_ERR(("arpoe:%d arpol:%d\n",
+ dhd->arpoe_enable, dhd->arpol_configured));
+
+dongle_offload_out:
+ return err;
+}
+
+static s32 wl_pattern_atoh(s8 *src, s8 *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
+ WL_ERR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ WL_ERR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ if ((num[0] = src[0]) != '\0') {
+ num[1] = src[1];
+ }
+ num[2] = '\0';
+ dst[i] = (u8) simple_strtoul(num, NULL, 16);
+ src += 2;
+ }
+
+ return i;
+}
+
+static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
+{
+ const s8 *str;
+ struct wl_pkt_filter pkt_filter;
+ struct wl_pkt_filter *pkt_filterp;
+ s32 buf_len;
+ s32 str_len;
+ u32 mask_size;
+ u32 pattern_size;
+ s8 buf[PKT_FILTER_BUF_SIZE] = {0};
+ s32 err = 0;
+
+ /* add a default packet filter pattern */
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ strlcpy(buf, str, sizeof(buf));
+ buf_len = str_len + 1;
+
+ pkt_filterp = (struct wl_pkt_filter *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(100);
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(0);
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(0);
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(0);
+
+ /* Parse pattern filter mask. */
+ mask_size = htod32(wl_pattern_atoh("0xff",
+ (char *)pkt_filterp->u.pattern.
+ mask_and_pattern));
+
+ if (mask_size == (typeof(mask_size))-1 ||
+ (mask_size > (PKT_FILTER_BUF_SIZE - (buf_len) +
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN))) {
+ /* mask_size has to be equal to pattern_size */
+ err = -EINVAL;
+ goto dongle_filter_out;
+ }
+ /* Parse pattern filter pattern. */
+ pattern_size = htod32(wl_pattern_atoh("0x00",
+ (char *)&pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+ if (mask_size != pattern_size) {
+ WL_ERR(("Mask and pattern not the same size\n"));
+ err = -EINVAL;
+ goto dongle_filter_out;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local
+ * variable (keep_alive_pkt), and
+ * then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp, &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ err = wldev_ioctl_set(ndev, WLC_SET_VAR, buf, buf_len);
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFORM(("filter not supported\n"));
+ } else {
+ WL_ERR(("filter (%d)\n", err));
+ }
+ goto dongle_filter_out;
+ }
+
+ /* set mode to allow pattern */
+ err = wldev_iovar_setint(ndev, "pkt_filter_mode", filter_mode);
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ WL_INFORM(("filter_mode not supported\n"));
+ } else {
+ WL_ERR(("filter_mode (%d)\n", err));
+ }
+ goto dongle_filter_out;
+ }
+
+dongle_filter_out:
+ return err;
+}
+#endif /* !CUSTOMER_HW6 */
+#endif /* !OEM_ANDROID */
+
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+ struct net_device *ndev;
+ s32 err = 0;
+ dhd_pub_t *dhd = NULL;
+#if !defined(OEM_ANDROID) && defined(BCMSDIO)
+ s32 glom = CUSTOM_GLOM_SETTING;
+ BCM_REFERENCE(glom);
+#endif
+
+ WL_TRACE(("In\n"));
+
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+ err = wl_dongle_up(ndev);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_up failed\n"));
+ goto default_conf_out;
+ }
+
+ if (dhd && dhd->fw_preinit) {
+ /* Init config will be done by fw preinit context */
+ return BCME_OK;
+ }
+
+#ifndef OEM_ANDROID
+#ifndef CUSTOMER_HW6
+ err = wl_dongle_power(ndev, PM_FAST);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_power failed\n"));
+ goto default_conf_out;
+ }
+#ifdef BCMSDIO
+ err = wl_dongle_glom(ndev, glom, DHD_SDALIGN);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_glom failed\n"));
+ goto default_conf_out;
+ }
+#endif /* BCMSDIO */
+ err = wl_dongle_roam(ndev, (cfg->roam_on ? 0 : 1), 3);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_roam failed\n"));
+ goto default_conf_out;
+ }
+ wl_dongle_scantime(ndev, 40, 80);
+ wl_dongle_offload(ndev, 1, 0xf);
+ wl_dongle_filter(ndev, 1);
+#endif /* !CUSTOMER_HW6 */
+#endif /* OEM_ANDROID */
+
+default_conf_out:
+
+ return err;
+
+}
+
+int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+ const struct bcm_nlmsg_hdr *nlioc, void *buf)
+{
+ struct net_device *ndev = NULL;
+ dhd_pub_t *dhd;
+ dhd_ioctl_t ioc = { 0, NULL, 0, 0, 0, 0, 0};
+ int ret = 0;
+ int8 index;
+
+ WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+
+ dhd = cfg->pub;
+ DHD_OS_WAKE_LOCK(dhd);
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ index = dhd_net2idx(dhd->info, ndev);
+ if (index == DHD_BAD_IF) {
+ WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ ioc.cmd = nlioc->cmd;
+ ioc.len = nlioc->len;
+ ioc.set = nlioc->set;
+ ioc.driver = nlioc->magic;
+ ioc.buf = buf;
+ ret = dhd_ioctl_process(dhd, index, &ioc, buf);
+ if (ret) {
+ WL_TRACE(("dhd_ioctl_process return err %d\n", ret));
+ ret = OSL_ERROR(ret);
+ goto done;
+ }
+
+done:
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return ret;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_cfg80211.h b/bcmdhd.101.10.361.x/dhd_cfg80211.h
new file mode 100755
index 0000000..1abf42b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_cfg80211.h
@@ -0,0 +1,49 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <brcm_nl80211.h>
+
+#ifndef WL_ERR
+#define WL_ERR CFG80211_ERR
+#endif
+#ifndef WL_TRACE
+#define WL_TRACE CFG80211_TRACE
+#endif
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
+int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data);
+s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout);
+#endif /* __DHD_CFG80211__ */
diff --git a/bcmdhd.101.10.361.x/dhd_common.c b/bcmdhd.101.10.361.x/dhd_common.c
new file mode 100755
index 0000000..a8f8ef6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_common.c
@@ -0,0 +1,11596 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_ip.h>
+#include <bcmevent.h>
+#include <dhdioctl.h>
+#ifdef DHD_SDTC_ETB_DUMP
+#include <bcmiov.h>
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef BCMDBG
+#include <dhd_macdbg.h>
+#endif /* BCMDBG */
+
+#ifdef PCIE_FULL_DONGLE
+#include <bcmmsgbuf.h>
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef SHOW_LOGTRACE
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <bcmsdbus.h>
+#include <dhd_dbg.h>
+#include <802.1d.h>
+#include <dhd_debug.h>
+#include <dhd_dbg_ring.h>
+#include <dhd_mschdbg.h>
+#include <msgtrace.h>
+#include <dhd_config.h>
+#include <wl_android.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#include <wl_cfgvif.h>
+#endif
+#if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
+#include <dhd_pno.h>
+#endif /* (OEM_ANDROID) && (PNO_SUPPORT) */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif
+
+#ifdef DNGL_EVENT_SUPPORT
+#include <dnglevent.h>
+#endif
+
+#ifdef IL_BIGENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#if defined(__linux__)
+#include <dhd_linux.h>
+#endif /* __linux__ */
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef DHD_L2_FILTER
+#include <dhd_l2_filter.h>
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_PSTA
+#include <dhd_psta.h>
+#endif /* DHD_PSTA */
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+
+#ifdef DHD_WET
+#include <dhd_wet.h>
+#endif /* DHD_WET */
+#if defined(NDIS)
+#include <siutils.h>
+#endif
+
+#ifdef DHD_LOG_DUMP
+#include <dhd_dbg.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+int log_print_threshold = 0;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+int dbgring_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_INFO_VAL
+ | DHD_EVENT_VAL | DHD_PKT_MON_VAL | DHD_IOVAR_MEM_VAL;
+int dhd_msg_level = DHD_ERROR_VAL;
+#else
+int dbgring_msg_level = 0;
+/* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */
+int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+
+#ifdef NDIS
+extern uint wl_msg_level;
+#endif
+
+#if defined(WL_WLC_SHIM)
+#include <wl_shim.h>
+#else
+#if defined(NDIS)
+#include <wl_port_if.h>
+#endif
+#endif /* WL_WLC_SHIM */
+
+#ifdef DHD_DEBUG
+#include <sdiovar.h>
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#include <linux/pm_runtime.h>
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef CSI_SUPPORT
+#include <dhd_csi.h>
+#endif /* CSI_SUPPORT */
+
+#if defined(BTLOG) && !defined(BCMPCIE)
+#error "BT logging supported only with PCIe"
+#endif /* defined(BTLOG) && !defined(BCMPCIE) */
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+#ifdef PROP_TXSTATUS
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS */
+
+#ifdef REPORT_FATAL_TIMEOUTS
+#ifdef BCMINTERNAL
+/*
+ * Internal Builds are used by DVT.
+ * The timeouts are not required for DVT builds, since they use IOVARs like
+ * SROM programming etc, that takes long time. So make the timeout values
+ * as 0. If DVT needs to use this feature they can enable them using IOVAR
+ *
+ * SVT any way uses external builds
+ */
+#define SCAN_TIMEOUT_DEFAULT 0
+#define JOIN_TIMEOUT_DEFAULT 0
+#define BUS_TIMEOUT_DEFAULT 0
+#define CMD_TIMEOUT_DEFAULT 0
+#else
+/* Default timeout value in ms */
+#ifdef DHD_EFI
+#define BUS_TIMEOUT_DEFAULT 800 /* 800ms */
+#define CMD_TIMEOUT_DEFAULT 1500 /* 1.5s */
+#define SCAN_TIMEOUT_DEFAULT 0
+#define JOIN_TIMEOUT_DEFAULT 0
+#else
+#define BUS_TIMEOUT_DEFAULT 800
+#define CMD_TIMEOUT_DEFAULT 1200
+#define SCAN_TIMEOUT_DEFAULT 17000
+#define JOIN_TIMEOUT_DEFAULT 7500
+#endif /* DHD_EFI */
+#endif /* BCMINTERNAL */
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef SHOW_LOGTRACE
+#define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
+#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
+#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
+static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
+static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
+static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
+#define RAMSTART_BIT 0x01
+#define RDSTART_BIT 0x02
+#define RDEND_BIT 0x04
+#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
+#endif /* SHOW_LOGTRACE */
+
+#ifdef SHOW_LOGTRACE
+#if defined(LINUX) || defined(linux)
+/* the fw file path is taken from either the module parameter at
+ * insmod time or is defined as a constant of different values
+ * for different platforms
+ */
+extern char *st_str_file_path;
+#else
+static char *st_str_file_path = "rtecdc.bin";
+#endif /* LINUX */
+#endif /* SHOW_LOGTRACE */
+
+#ifdef EWP_EDL
+typedef struct msg_hdr_edl {
+ uint32 infobuf_ver;
+ info_buf_payload_hdr_t pyld_hdr;
+ msgtrace_hdr_t trace_hdr;
+} msg_hdr_edl_t;
+#endif /* EWP_EDL */
+
+#define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
+extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
+#endif
+
+extern int dhd_socram_dump(struct dhd_bus *bus);
+extern void dhd_set_packet_filter(dhd_pub_t *dhd);
+
+#ifdef DNGL_EVENT_SUPPORT
+static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
+ bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
+static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
+ size_t pktlen);
+#endif /* DNGL_EVENT_SUPPORT */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+#ifdef REPORT_FATAL_TIMEOUTS
+static void dhd_set_join_error(dhd_pub_t *pub, uint32 mask);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
+#define MAX_IOCTL_SUSPEND_ERROR 10
+static int ioctl_suspend_error = 0;
+#endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
+
+/* Should ideally read this from target(taken from wlu) */
+#define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
+
+#if defined(OEM_ANDROID)
+/* note these variables will be used with wext */
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
+
+#ifdef WLEASYMESH
+extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
+extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
+#endif /* WLEASYMESH */
+
+#define CHIPID_MISMATCH 8
+
+#define DHD_VERSION "Dongle Host Driver, version " EPI_VERSION_STR "\n"
+
+#if defined(DHD_DEBUG) && defined(DHD_COMPILED)
+const char dhd_version[] = DHD_VERSION DHD_COMPILED " compiled on "
+ __DATE__ " at " __TIME__ "\n\0<TIMESTAMP>";
+#else
+const char dhd_version[] = DHD_VERSION;
+#endif /* DHD_DEBUG && DHD_COMPILED */
+
+char fw_version[FW_VER_STR_LEN] = "\0";
+char clm_version[CLM_VER_STR_LEN] = "\0";
+
+char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+#if defined(BCM_ROUTER_DHD)
+static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
+ trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len);
+#endif
+
+static char* ioctl2str(uint32 ioctl);
+
+/* IOVar table */
+enum {
+ IOV_VERSION = 1,
+ IOV_WLMSGLEVEL,
+ IOV_MSGLEVEL,
+ IOV_BCMERRORSTR,
+ IOV_BCMERROR,
+ IOV_WDTICK,
+ IOV_DUMP,
+ IOV_CLEARCOUNTS,
+ IOV_LOGDUMP,
+ IOV_LOGCAL,
+ IOV_LOGSTAMP,
+ IOV_GPIOOB,
+ IOV_IOCTLTIMEOUT,
+ IOV_CONS,
+ IOV_DCONSOLE_POLL,
+#if defined(DHD_DEBUG)
+ IOV_DHD_JOIN_TIMEOUT_DBG,
+ IOV_SCAN_TIMEOUT,
+ IOV_MEM_DEBUG,
+#ifdef BCMPCIE
+ IOV_FLOW_RING_DEBUG,
+#endif /* BCMPCIE */
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+ IOV_PROPTXSTATUS_ENABLE,
+ IOV_PROPTXSTATUS_MODE,
+ IOV_PROPTXSTATUS_OPT,
+#ifdef QMONITOR
+ IOV_QMON_TIME_THRES,
+ IOV_QMON_TIME_PERCENT,
+#endif /* QMONITOR */
+ IOV_PROPTXSTATUS_MODULE_IGNORE,
+ IOV_PROPTXSTATUS_CREDIT_IGNORE,
+ IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
+ IOV_PROPTXSTATUS_RXPKT_CHK,
+#endif /* PROP_TXSTATUS */
+ IOV_BUS_TYPE,
+ IOV_CHANGEMTU,
+ IOV_HOSTREORDER_FLOWS,
+#ifdef DHDTCPACK_SUPPRESS
+ IOV_TCPACK_SUPPRESS,
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ IOV_WMF_BSS_ENAB,
+ IOV_WMF_UCAST_IGMP,
+ IOV_WMF_MCAST_DATA_SENDUP,
+#ifdef WL_IGMP_UCQUERY
+ IOV_WMF_UCAST_IGMP_QUERY,
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ IOV_WMF_UCAST_UPNP,
+#endif /* DHD_UCAST_UPNP */
+ IOV_WMF_PSTA_DISABLE,
+#endif /* DHD_WMF */
+#if defined(BCM_ROUTER_DHD)
+ IOV_TRAFFIC_MGMT_DWM,
+#endif /* BCM_ROUTER_DHD */
+ IOV_AP_ISOLATE,
+#ifdef DHD_L2_FILTER
+ IOV_DHCP_UNICAST,
+ IOV_BLOCK_PING,
+ IOV_PROXY_ARP,
+ IOV_GRAT_ARP,
+ IOV_BLOCK_TDLS,
+#endif /* DHD_L2_FILTER */
+ IOV_DHD_IE,
+#ifdef DHD_PSTA
+ IOV_PSTA,
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+ IOV_WET,
+ IOV_WET_HOST_IPV4,
+ IOV_WET_HOST_MAC,
+#endif /* DHD_WET */
+ IOV_CFG80211_OPMODE,
+ IOV_ASSERT_TYPE,
+#if defined(NDIS)
+ IOV_WAKEIND,
+#endif /* NDIS */
+#if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
+ IOV_LMTEST,
+#endif
+#ifdef DHD_MCAST_REGEN
+ IOV_MCAST_REGEN_BSS_ENABLE,
+#endif
+#ifdef BCMDBG
+ IOV_MACDBG_PD11REGS,
+ IOV_MACDBG_REGLIST,
+ IOV_MACDBG_PSVMPMEMS,
+#endif /* BCMDBG */
+#ifdef SHOW_LOGTRACE
+ IOV_DUMP_TRACE_LOG,
+#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+ IOV_SCAN_TO,
+ IOV_JOIN_TO,
+ IOV_CMD_TO,
+ IOV_OQS_TO,
+#endif /* REPORT_FATAL_TIMEOUTS */
+ IOV_DONGLE_TRAP_TYPE,
+ IOV_DONGLE_TRAP_INFO,
+ IOV_BPADDR,
+ IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
+#if defined(DHD_LOG_DUMP)
+#if defined(DHD_EFI)
+ IOV_LOG_CAPTURE_ENABLE,
+#endif
+ IOV_LOG_DUMP,
+#endif /* DHD_LOG_DUMP */
+#ifdef BTLOG
+ IOV_DUMP_BT_LOG,
+ IOV_BTLOG,
+#endif /* BTLOG */
+#ifdef SNAPSHOT_UPLOAD
+ IOV_BT_MEM_DUMP,
+ IOV_BT_UPLOAD,
+#endif /* SNAPSHOT_UPLOAD */
+ IOV_TPUT_TEST,
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ IOV_PKT_LATENCY,
+#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
+ IOV_DEBUG_BUF_DEST_STAT,
+#ifdef DHD_PKTTS
+ IOV_PKTTS_ENAB,
+ IOV_PKTTS_FLOW,
+#endif /* DHD_PKTTS */
+#ifdef DHD_DEBUG
+ IOV_INDUCE_ERROR,
+#endif /* DHD_DEBUG */
+#if defined(DHD_EFI)
+ IOV_INTR_POLL,
+#endif
+ IOV_FIS_TRIGGER,
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ IOV_CONC_DISC,
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ IOV_IFACE_POLICY,
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
+#ifdef RTT_GEOFENCE_CONT
+#if defined (RTT_SUPPORT) && defined (WL_NAN)
+ IOV_RTT_GEOFENCE_TYPE_OVRD,
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_CONT */
+ IOV_FW_VBS,
+#ifdef DHD_TX_PROFILE
+ IOV_TX_PROFILE_TAG,
+ IOV_TX_PROFILE_ENABLE,
+ IOV_TX_PROFILE_DUMP,
+#endif /* defined(DHD_TX_PROFILE) */
+ IOV_CHECK_TRAP_ROT,
+#if defined(DHD_AWDL)
+ IOV_AWDL_LLC_ENABLE,
+#endif
+#ifdef WLEASYMESH
+ IOV_1905_AL_UCAST,
+ IOV_1905_AL_MCAST,
+#endif /* WLEASYMESH */
+ IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+ /* name varid flags flags2 type minlen */
+ {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, 0},
+ {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_DEBUG
+ {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
+ {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
+#ifdef BCMPCIE
+ {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
+#endif /* BCMPCIE */
+#ifdef NDIS
+ {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0},
+#endif /* NDIS */
+#endif /* DHD_DEBUG */
+ {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
+ {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
+ {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
+ {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN_32K},
+ {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
+ {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
+#ifdef BCMPERFSTATS
+ {"logdump", IOV_LOGDUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
+ {"logcal", IOV_LOGCAL, 0, 0, IOVT_UINT32, 0},
+ {"logstamp", IOV_LOGSTAMP, 0, 0, IOVT_BUFFER, 0},
+#endif
+ {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
+ {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
+#ifdef PROP_TXSTATUS
+ {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
+ /*
+ set the proptxtstatus operation mode:
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 },
+ {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
+#ifdef QMONITOR
+ {"qtime_thres", IOV_QMON_TIME_THRES, 0, 0, IOVT_UINT32, 0 },
+ {"qtime_percent", IOV_QMON_TIME_PERCENT, 0, 0, IOVT_UINT32, 0 },
+#endif /* QMONITOR */
+ {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
+ {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
+ {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
+ {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
+#endif /* PROP_TXSTATUS */
+ {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
+ {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
+ {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
+ (WLHOST_REORDERDATA_MAXFLOWS + 1) },
+#ifdef DHDTCPACK_SUPPRESS
+ {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, 0, IOVT_BOOL, 0 },
+ {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, 0, IOVT_BOOL, 0 },
+ {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, 0, IOVT_BOOL, 0 },
+#ifdef WL_IGMP_UCQUERY
+ {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 },
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_UCAST_UPNP */
+ {"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_WMF */
+#if defined(BCM_ROUTER_DHD)
+ {"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0},
+#endif /* BCM_ROUTER_DHD */
+#ifdef DHD_L2_FILTER
+ {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_L2_FILTER */
+ {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
+#ifdef DHD_L2_FILTER
+ {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
+ {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
+ {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
+ {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
+#endif /* DHD_L2_FILTER */
+ {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
+#ifdef DHD_PSTA
+ /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
+ {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
+#endif /* DHD PSTA */
+#ifdef DHD_WET
+ /* WET Mode configuration. 0: DIABLED 1: WET */
+ {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
+ {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
+ {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
+#endif /* DHD WET */
+ {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 },
+ {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
+#if defined(NDIS)
+ { "wowl_wakeind", IOV_WAKEIND, 0, 0, IOVT_UINT32, 0 },
+#endif /* NDIS */
+#if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
+ {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
+#endif
+#ifdef DHD_MCAST_REGEN
+ {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
+#endif
+#ifdef BCMDBG
+ {"pd11regs", IOV_MACDBG_PD11REGS, 0, 0, IOVT_BUFFER, 0},
+ {"mreglist", IOV_MACDBG_REGLIST, 0, 0, IOVT_BUFFER, 0},
+ {"psvmpmems", IOV_MACDBG_PSVMPMEMS, 0, 0, IOVT_BUFFER, 0},
+#endif /* BCMDBG */
+#ifdef SHOW_LOGTRACE
+ {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
+#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+ {"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 },
+ {"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 },
+ {"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 },
+ {"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 },
+#endif /* REPORT_FATAL_TIMEOUTS */
+ {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
+ {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
+#ifdef DHD_DEBUG
+ {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+#endif /* DHD_DEBUG */
+ {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
+ MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
+#if defined(DHD_LOG_DUMP)
+#if defined(DHD_EFI)
+ {"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0},
+#endif
+ {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
+#endif /* DHD_LOG_DUMP */
+#ifdef BTLOG
+ {"dump_bt_log", IOV_DUMP_BT_LOG, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) },
+ {"btlog", IOV_BTLOG, 0, 0, IOVT_UINT32, 0 },
+#endif /* BTLOG */
+#ifdef SNAPSHOT_UPLOAD
+ {"bt_mem_dump", IOV_BT_MEM_DUMP, 0, 0, IOVT_UINT32, 0},
+ {"bt_upload", IOV_BT_UPLOAD, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) },
+#endif /* SNAPSHOT_UPLOAD */
+ {"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
+ {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_PKTTS
+ {"pktts_enab", IOV_PKTTS_ENAB, (0), 0, IOVT_BOOL, 0 },
+ {"pktts_flow", IOV_PKTTS_FLOW, (0), 0, IOVT_BUFFER, sizeof(tput_test_t) },
+#endif /* DHD_PKTTS */
+#if defined(DHD_EFI)
+ {"intr_poll", IOV_INTR_POLL, 0, 0, IOVT_BUFFER, sizeof(intr_poll_t)},
+#endif
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ {"pkt_latency", IOV_PKT_LATENCY, 0, 0, IOVT_UINT32, 0 },
+#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
+#if defined(DHD_SSSR_DUMP)
+ {"fis_trigger", IOV_FIS_TRIGGER, 0, 0, IOVT_UINT32, 0},
+#endif
+#ifdef DHD_DEBUG
+ {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
+#endif /* DHD_DEBUG */
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
+#ifdef RTT_GEOFENCE_CONT
+#if defined (RTT_SUPPORT) && defined (WL_NAN)
+ {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_CONT */
+ {"fw_verbose", IOV_FW_VBS, 0, 0, IOVT_UINT32, 0},
+#ifdef DHD_TX_PROFILE
+ {"tx_profile_tag", IOV_TX_PROFILE_TAG, 0, 0, IOVT_BUFFER,
+ sizeof(dhd_tx_profile_protocol_t)},
+ {"tx_profile_enable", IOV_TX_PROFILE_ENABLE, 0, 0, IOVT_BOOL, 0},
+ {"tx_profile_dump", IOV_TX_PROFILE_DUMP, 0, 0, IOVT_UINT32, 0},
+#endif /* defined(DHD_TX_PROFILE) */
+ {"check_trap_rot", IOV_CHECK_TRAP_ROT, (0), 0, IOVT_BOOL, 0},
+#if defined(DHD_AWDL)
+ {"awdl_llc_enable", IOV_AWDL_LLC_ENABLE, 0, 0, IOVT_BOOL, 0},
+#endif
+ /* --- add new iovars *ABOVE* this line --- */
+#ifdef WLEASYMESH
+ {"1905_al_ucast", IOV_1905_AL_UCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN},
+ {"1905_al_mcast", IOV_1905_AL_MCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN},
+#endif /* WLEASYMESH */
+ {NULL, 0, 0, 0, 0, 0 }
+};
+
+#define DHD_IOVAR_BUF_SIZE 128
+
+#if defined(LINUX) || defined(linux) || defined(DHD_EFI)
+fw_download_status_t
+dhd_fw_download_status(dhd_pub_t * dhd_pub)
+{
+ return dhd_pub->fw_download_status;
+}
+#endif /* defined(LINUX) || defined(linux) || defined(DHD_EFI) */
+
+bool
+dhd_query_bus_erros(dhd_pub_t *dhdp)
+{
+ bool ret = FALSE;
+
+ if (dhdp->dongle_reset) {
+ DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->dongle_trap_occured) {
+ DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+#ifdef OEM_ANDROID
+ dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
+ dhd_os_send_hang_message(dhdp);
+#endif /* OEM_ANDROID */
+ }
+
+ if (dhdp->iovar_timeout_occured) {
+ DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+#ifdef PCIE_FULL_DONGLE
+ if (dhdp->d3ack_timeout_occured) {
+ DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+ if (dhdp->livelock_occured) {
+ DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->pktid_audit_failed) {
+ DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+ if (dhdp->iface_op_failed) {
+ DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->scan_timeout_occurred) {
+ DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->scan_busy_occurred) {
+ DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhdp->axi_error) {
+ DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(BCMPCIE)
+ if (dhd_bus_get_linkdown(dhdp)) {
+ DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhd_bus_get_cto(dhdp)) {
+ DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+#endif
+
+ return ret;
+}
+
+void
+dhd_clear_bus_errors(dhd_pub_t *dhdp)
+{
+ if (!dhdp)
+ return;
+
+ dhdp->dongle_reset = FALSE;
+ dhdp->dongle_trap_occured = FALSE;
+ dhdp->iovar_timeout_occured = FALSE;
+#ifdef PCIE_FULL_DONGLE
+ dhdp->d3ack_timeout_occured = FALSE;
+ dhdp->livelock_occured = FALSE;
+ dhdp->pktid_audit_failed = FALSE;
+#endif
+ dhdp->iface_op_failed = FALSE;
+ dhdp->scan_timeout_occurred = FALSE;
+ dhdp->scan_busy_occurred = FALSE;
+#ifdef BT_OVER_PCIE
+ dhdp->dongle_trap_due_to_bt = FALSE;
+#endif
+}
+
+#ifdef DHD_SSSR_DUMP
+
+/* This can be overwritten by module parameter defined in dhd_linux.c */
+uint sssr_enab = TRUE;
+
+#ifdef DHD_FIS_DUMP
+uint fis_enab = TRUE;
+#else
+uint fis_enab = FALSE;
+#endif /* DHD_FIS_DUMP */
+
+int
+dhd_sssr_mempool_init(dhd_pub_t *dhd)
+{
+ dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
+ if (dhd->sssr_mempool == NULL) {
+ DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+void
+dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
+{
+ if (dhd->sssr_mempool) {
+ MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
+ dhd->sssr_mempool = NULL;
+ }
+}
+
+int
+dhd_sssr_reg_info_init(dhd_pub_t *dhd)
+{
+ dhd->sssr_reg_info = (sssr_reg_info_cmn_t *) MALLOCZ(dhd->osh, sizeof(sssr_reg_info_cmn_t));
+ if (dhd->sssr_reg_info == NULL) {
+ DHD_ERROR(("%s: MALLOC of sssr_reg_info failed\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+void
+dhd_sssr_reg_info_deinit(dhd_pub_t *dhd)
+{
+ if (dhd->sssr_reg_info) {
+ MFREE(dhd->osh, dhd->sssr_reg_info, sizeof(sssr_reg_info_cmn_t));
+ dhd->sssr_reg_info = NULL;
+ }
+}
+
+#ifdef DHD_PCIE_REG_ACCESS
+static void
+dhd_dump_sssr_reg_info_v2(dhd_pub_t *dhd)
+{
+ sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
+ sssr_reg_info_v2_t *sssr_reg_info = (sssr_reg_info_v2_t *)&sssr_reg_info_cmn->rev2;
+ int i, j;
+ uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
+ DHD_ERROR(("pmu_regs\n"));
+ DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
+ "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
+ sssr_reg_info->pmu_regs.base_regs.resreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
+ DHD_ERROR(("chipcommon_regs\n"));
+ DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
+ sssr_reg_info->chipcommon_regs.base_regs.intmask,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
+ sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
+ DHD_ERROR(("arm_regs\n"));
+ DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
+ " resetctrl=0x%x extrsrcreq=0x%x\n",
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
+ sssr_reg_info->arm_regs.wrapper_regs.extrsrcreq));
+ DHD_ERROR(("pcie_regs\n"));
+ DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x extrsrcreq=0x%x\n",
+ sssr_reg_info->pcie_regs.base_regs.ltrstate,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->pcie_regs.wrapper_regs.extrsrcreq));
+
+ for (i = 0; i < num_d11cores; i++) {
+ DHD_ERROR(("mac_regs core[%d]\n", i));
+ DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x\n",
+ sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
+ sssr_reg_info->mac_regs[i].base_regs.xmtdata,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
+ DHD_ERROR(("resetctrl=0x%x extrsrcreq=0x%x ioctrl=0x%x\n",
+ sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
+ sssr_reg_info->mac_regs[i].wrapper_regs.extrsrcreq,
+ sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
+ for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
+ DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
+ sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
+ }
+ DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
+ }
+ DHD_ERROR(("dig_regs\n"));
+ DHD_ERROR(("dig_sr_addr=0x%x dig_sr_size=0x%x\n",
+ sssr_reg_info->dig_mem_info.dig_sr_addr,
+ sssr_reg_info->dig_mem_info.dig_sr_size));
+}
+
+static void
+dhd_dump_sssr_reg_info_v3(dhd_pub_t *dhd)
+{
+ sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
+ sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3;
+ int i;
+
+ dhd_dump_sssr_reg_info_v2(dhd);
+
+ DHD_ERROR(("FIS Enab in fw : %d\n", sssr_reg_info->fis_enab));
+
+ DHD_ERROR(("HWA regs for reset \n"));
+ DHD_ERROR(("clkenable 0x%x, clkgatingenable 0x%x, clkext 0x%x, "
+ "clkctlstatus 0x%x, ioctrl 0x%x, resetctrl 0x%x\n",
+ sssr_reg_info->hwa_regs.base_regs.clkenable,
+ sssr_reg_info->hwa_regs.base_regs.clkgatingenable,
+ sssr_reg_info->hwa_regs.base_regs.clkext,
+ sssr_reg_info->hwa_regs.base_regs.clkctlstatus,
+ sssr_reg_info->hwa_regs.wrapper_regs.ioctrl,
+ sssr_reg_info->hwa_regs.wrapper_regs.resetctrl));
+ DHD_ERROR(("HWA regs value seq for reset \n"));
+ for (i = 0; i < SSSR_HWA_RESET_SEQ_STEPS; i++) {
+ DHD_ERROR(("hwa_resetseq_val[%d] 0x%x", i,
+ sssr_reg_info->hwa_regs.hwa_resetseq_val[i]));
+ }
+}
+
+static void
+dhd_dump_sssr_reg_info_v1(dhd_pub_t *dhd)
+{
+ sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
+ sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1;
+ int i, j;
+ uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ DHD_ERROR(("pmu_regs\n"));
+ DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
+ "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
+ sssr_reg_info->pmu_regs.base_regs.resreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
+ DHD_ERROR(("chipcommon_regs\n"));
+ DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
+ sssr_reg_info->chipcommon_regs.base_regs.intmask,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
+ sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
+ DHD_ERROR(("arm_regs\n"));
+ DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
+ " resetctrl=0x%x itopoobb=0x%x\n",
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
+ sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
+ DHD_ERROR(("pcie_regs\n"));
+ DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
+ sssr_reg_info->pcie_regs.base_regs.ltrstate,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
+ DHD_ERROR(("vasip_regs\n"));
+ DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
+ sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
+ sssr_reg_info->vasip_regs.vasip_sr_addr,
+ sssr_reg_info->vasip_regs.vasip_sr_size));
+
+ for (i = 0; i < num_d11cores; i++) {
+ DHD_ERROR(("mac_regs core[%d]\n", i));
+ DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x\n",
+ sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
+ sssr_reg_info->mac_regs[i].base_regs.xmtdata,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
+ DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
+ sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
+ sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
+ sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
+ for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
+ DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
+ sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
+ }
+ DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
+ }
+}
+
+#endif /* DHD_PCIE_REG_ACCESS */
+
+void
+dhd_dump_sssr_reg_info(dhd_pub_t *dhd)
+{
+#ifdef DHD_PCIE_REG_ACCESS
+ sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
+ sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1;
+
+ DHD_ERROR(("************** SSSR REG INFO start version:%d ****************\n",
+ sssr_reg_info->version));
+ switch (sssr_reg_info->version) {
+ case SSSR_REG_INFO_VER_3 :
+ dhd_dump_sssr_reg_info_v3(dhd);
+ break;
+ case SSSR_REG_INFO_VER_2 :
+ dhd_dump_sssr_reg_info_v2(dhd);
+ break;
+ default:
+ dhd_dump_sssr_reg_info_v1(dhd);
+ break;
+ }
+ DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
+#endif /* DHD_PCIE_REG_ACCESS */
+}
+
+int
+dhd_get_sssr_reg_info(dhd_pub_t *dhd)
+{
+ int ret;
+ /* get sssr_reg_info from firmware */
+ ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)dhd->sssr_reg_info,
+ sizeof(sssr_reg_info_cmn_t), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
+ __FUNCTION__, ret));
+ return BCME_ERROR;
+ }
+
+ dhd_dump_sssr_reg_info(dhd);
+ return BCME_OK;
+}
+
+uint32
+dhd_get_sssr_bufsize(dhd_pub_t *dhd)
+{
+ int i;
+ uint32 sssr_bufsize = 0;
+ uint8 num_d11cores;
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ for (i = 0; i < num_d11cores; i++) {
+ sssr_bufsize += dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
+ }
+ if ((dhd->sssr_reg_info->rev2.length >
+ OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
+ dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
+ sssr_bufsize += 0; /* TBD */
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ for (i = 0; i < num_d11cores; i++) {
+ sssr_bufsize += dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
+ }
+ if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
+ sssr_bufsize += dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
+ } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
+ dig_mem_info)) && dhd->sssr_reg_info->rev1.
+ dig_mem_info.dig_sr_addr) {
+ sssr_bufsize += dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ for (i = 0; i < num_d11cores; i++) {
+ sssr_bufsize += dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
+ }
+ if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
+ sssr_bufsize += dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ /* Double the size as different dumps will be saved before and after SR */
+ sssr_bufsize = 2 * sssr_bufsize;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ return sssr_bufsize;
+}
+
+int
+dhd_sssr_dump_init(dhd_pub_t *dhd)
+{
+ int i;
+ uint32 sssr_bufsize;
+ uint32 mempool_used = 0;
+ uint8 num_d11cores = 0;
+ bool alloc_sssr = FALSE;
+ uint32 sr_size = 0;
+
+ dhd->sssr_inited = FALSE;
+ if (!sssr_enab) {
+ DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
+ return BCME_OK;
+ }
+
+ /* check if sssr mempool is allocated */
+ if (dhd->sssr_mempool == NULL) {
+ DHD_ERROR(("%s: sssr_mempool is not allocated\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* check if sssr mempool is allocated */
+ if (dhd->sssr_reg_info == NULL) {
+ DHD_ERROR(("%s: sssr_reg_info is not allocated\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Get SSSR reg info */
+ if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
+ printf("DEBUG_SSSr: %s: dhd_get_sssr_reg_info failed\n", __FUNCTION__);
+ return BCME_ERROR;
+ }
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+ /* Validate structure version and length */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ if (dhd->sssr_reg_info->rev3.length != sizeof(sssr_reg_info_v3_t)) {
+ DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
+ "mismatch on rev2\n", __FUNCTION__,
+ (int)dhd->sssr_reg_info->rev3.length,
+ (int)sizeof(sssr_reg_info_v3_t)));
+ return BCME_ERROR;
+ }
+ break;
+ case SSSR_REG_INFO_VER_2 :
+ if (dhd->sssr_reg_info->rev2.length != sizeof(sssr_reg_info_v2_t)) {
+ DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
+ "mismatch on rev2\n", __FUNCTION__,
+ (int)dhd->sssr_reg_info->rev2.length,
+ (int)sizeof(sssr_reg_info_v2_t)));
+ return BCME_ERROR;
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ if (dhd->sssr_reg_info->rev1.length != sizeof(sssr_reg_info_v1_t)) {
+ DHD_ERROR(("%s: dhd->sssr_reg_info->rev1.length (%d : %d)"
+ "mismatch on rev1\n", __FUNCTION__,
+ (int)dhd->sssr_reg_info->rev1.length,
+ (int)sizeof(sssr_reg_info_v1_t)));
+ return BCME_ERROR;
+ }
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ if (dhd->sssr_reg_info->rev0.length != sizeof(sssr_reg_info_v0_t)) {
+ DHD_ERROR(("%s: dhd->sssr_reg_info->rev0.length (%d : %d)"
+ "mismatch on rev0\n", __FUNCTION__,
+ (int)dhd->sssr_reg_info->rev0.length,
+ (int)sizeof(sssr_reg_info_v0_t)));
+ return BCME_ERROR;
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* validate fifo size */
+ sssr_bufsize = dhd_get_sssr_bufsize(dhd);
+ if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
+ DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
+ __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
+ return BCME_ERROR;
+ }
+
+ /* init all pointers to NULL */
+ for (i = 0; i < num_d11cores; i++) {
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd->sssr_d11_before[i] = NULL;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ dhd->sssr_d11_after[i] = NULL;
+ }
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd->sssr_dig_buf_before = NULL;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ dhd->sssr_dig_buf_after = NULL;
+
+ /* Allocate memory */
+ for (i = 0; i < num_d11cores; i++) {
+ alloc_sssr = FALSE;
+ sr_size = 0;
+
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ if (dhd->sssr_reg_info->rev2.mac_regs[i].sr_size) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ if (dhd->sssr_reg_info->rev1.mac_regs[i].sr_size) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ if (dhd->sssr_reg_info->rev0.mac_regs[i].sr_size) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ if (alloc_sssr) {
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ mempool_used += sr_size;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ mempool_used += sr_size;
+ }
+ }
+
+ /* Allocate dump memory for VASIP (version 0 or 1) or digital core (version 0, 1, or 2) */
+ alloc_sssr = FALSE;
+ sr_size = 0;
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ if ((dhd->sssr_reg_info->rev2.length >
+ OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
+ dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
+ } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
+ dig_mem_info)) && dhd->sssr_reg_info->rev1.
+ dig_mem_info.dig_sr_addr) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ if (alloc_sssr) {
+ dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ mempool_used += sr_size;
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ /* DIG dump before suspend is not applicable. */
+ dhd->sssr_dig_buf_before = NULL;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ }
+
+ dhd->sssr_inited = TRUE;
+
+ return BCME_OK;
+
+}
+
+void
+dhd_sssr_dump_deinit(dhd_pub_t *dhd)
+{
+ int i;
+
+ dhd->sssr_inited = FALSE;
+ /* init all pointers to NULL */
+ for (i = 0; i < MAX_NUM_D11_CORES_WITH_SCAN; i++) {
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd->sssr_d11_before[i] = NULL;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ dhd->sssr_d11_after[i] = NULL;
+ }
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd->sssr_dig_buf_before = NULL;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ dhd->sssr_dig_buf_after = NULL;
+
+ return;
+}
+
+void
+dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
+{
+ bool print_info = FALSE;
+ int dump_mode;
+
+ if (!dhd || !path) {
+ DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (!dhd->sssr_dump_collected) {
+ /* SSSR dump is not collected */
+ return;
+ }
+
+ dump_mode = dhd->sssr_dump_mode;
+
+ if (bcmstrstr(path, "core_0_before")) {
+ if (dhd->sssr_d11_outofreset[0] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_0_after")) {
+ if (dhd->sssr_d11_outofreset[0]) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_1_before")) {
+ if (dhd->sssr_d11_outofreset[1] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_1_after")) {
+ if (dhd->sssr_d11_outofreset[1]) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_2_before")) {
+ if (dhd->sssr_d11_outofreset[2] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_2_after")) {
+ if (dhd->sssr_d11_outofreset[2]) {
+ print_info = TRUE;
+ }
+ } else {
+ print_info = TRUE;
+ }
+
+ if (print_info) {
+ DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
+ path, FILE_NAME_HAL_TAG));
+ }
+}
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_SDTC_ETB_DUMP
+/*
+ * sdtc: system debug trace controller
+ * etb: embedded trace buf
+ */
+void
+dhd_sdtc_etb_init(dhd_pub_t *dhd)
+{
+ bcm_iov_buf_t *iov_req = NULL;
+ etb_addr_info_t *p_etb_addr_info = NULL;
+ bcm_iov_buf_t *iov_resp = NULL;
+ uint8 *buf = NULL;
+ int ret = 0;
+ uint16 iovlen = 0;
+ uint16 version = 0;
+
+ BCM_REFERENCE(p_etb_addr_info);
+ dhd->sdtc_etb_inited = FALSE;
+
+ iov_req = MALLOCZ(dhd->osh, WLC_IOCTL_SMLEN);
+ if (iov_req == NULL) {
+ DHD_ERROR(("%s: Failed to alloc buffer for iovar request\n", __FUNCTION__));
+ goto exit;
+ }
+
+ buf = MALLOCZ(dhd->osh, WLC_IOCTL_MAXLEN);
+ if (buf == NULL) {
+ DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /* fill header */
+ iov_req->version = WL_SDTC_IOV_VERSION;
+ iov_req->id = WL_SDTC_CMD_ETB_INFO;
+ iov_req->len = sizeof(etb_addr_info_t);
+ iovlen = OFFSETOF(bcm_iov_buf_t, data) + iov_req->len;
+
+ ret = dhd_iovar(dhd, 0, "sdtc", (char *)iov_req, iovlen,
+ (char *)buf, WLC_IOCTL_MAXLEN, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s failed to get sdtc etb_info %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ version = dtoh16(*(uint16 *)buf);
+ /* Check for version */
+ if (version != WL_SDTC_IOV_VERSION) {
+ DHD_ERROR(("%s WL_SDTC_IOV_VERSION mis match\n", __FUNCTION__));
+ goto exit;
+ }
+ iov_resp = (bcm_iov_buf_t *)buf;
+ if (iov_resp->id == iov_req->id) {
+ p_etb_addr_info = (etb_addr_info_t*)iov_resp->data;
+ dhd->etb_addr_info.version = p_etb_addr_info->version;
+ dhd->etb_addr_info.len = p_etb_addr_info->len;
+ dhd->etb_addr_info.etbinfo_addr = p_etb_addr_info->etbinfo_addr;
+
+ DHD_ERROR(("%s etb_addr_info: ver:%d, len:%d, addr:0x%x\n", __FUNCTION__,
+ dhd->etb_addr_info.version, dhd->etb_addr_info.len,
+ dhd->etb_addr_info.etbinfo_addr));
+ } else {
+ DHD_ERROR(("%s Unknown CMD-ID (%d) as response for request ID %d\n",
+ __FUNCTION__, iov_resp->id, iov_req->id));
+ goto exit;
+ }
+
+ /* since all the requirements for SDTC and ETB are met mark the capability as TRUE */
+ dhd->sdtc_etb_inited = TRUE;
+ DHD_ERROR(("%s sdtc_etb_inited: %d\n", __FUNCTION__, dhd->sdtc_etb_inited));
+exit:
+ if (iov_req) {
+ MFREE(dhd->osh, iov_req, WLC_IOCTL_SMLEN);
+ }
+ if (buf) {
+ MFREE(dhd->osh, buf, WLC_IOCTL_MAXLEN);
+ }
+ return;
+}
+
+void
+dhd_sdtc_etb_deinit(dhd_pub_t *dhd)
+{
+ dhd->sdtc_etb_inited = FALSE;
+}
+
+int
+dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd)
+{
+ dhd->sdtc_etb_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SDTC_ETB_MEMPOOL_SIZE);
+ if (dhd->sdtc_etb_mempool == NULL) {
+ DHD_ERROR(("%s: MALLOC of sdtc_etb_mempool failed\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+void
+dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd)
+{
+ if (dhd->sdtc_etb_mempool) {
+ MFREE(dhd->osh, dhd->sdtc_etb_mempool, DHD_SDTC_ETB_MEMPOOL_SIZE);
+ dhd->sdtc_etb_mempool = NULL;
+ }
+}
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef DHD_FW_COREDUMP
+void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
+{
+ if (!dhd_pub->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
+ DHD_PREALLOC_MEMDUMP_RAM, length);
+#else
+ dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
+
+ if ((dhd_pub->soc_ram == NULL) && CAN_SLEEP()) {
+ DHD_ERROR(("%s: Try to allocate virtual memory for fw crash snap shot.\n",
+ __FUNCTION__));
+ dhd_pub->soc_ram = (uint8*) VMALLOC(dhd_pub->osh, length);
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ }
+
+ if (dhd_pub->soc_ram == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
+ __FUNCTION__));
+ dhd_pub->soc_ram_length = 0;
+ } else {
+ memset(dhd_pub->soc_ram, 0, length);
+ dhd_pub->soc_ram_length = length;
+ }
+
+ /* soc_ram free handled in dhd_{free,clear} */
+ return dhd_pub->soc_ram;
+}
+#endif /* DHD_FW_COREDUMP */
+
+/* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
+int
+dhd_common_socram_dump(dhd_pub_t *dhdp)
+{
+#ifdef BCMDBUS
+ return 0;
+#else
+ return dhd_socram_dump(dhdp->bus);
+#endif /* BCMDBUS */
+}
+
+int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+#ifdef DHD_MEM_STATS
+ uint64 malloc_mem = 0;
+ uint64 total_txpath_mem = 0;
+ uint64 txpath_bkpq_len = 0;
+ uint64 txpath_bkpq_mem = 0;
+ uint64 total_dhd_mem = 0;
+#endif /* DHD_MEM_STATS */
+
+ if (!dhdp || !dhdp->prot || !buf) {
+ return BCME_ERROR;
+ }
+
+ bcm_binit(strbuf, buf, buflen);
+
+ /* Base DHD info */
+ bcm_bprintf(strbuf, "%s\n", dhd_version);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+ dhdp->up, dhdp->txoff, dhdp->busstate);
+ bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
+ dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+ bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
+ dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
+ bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
+
+ bcm_bprintf(strbuf, "dongle stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
+ dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+ dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+ bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
+ dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+ dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+ bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
+
+ bcm_bprintf(strbuf, "bus stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
+ dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
+ bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
+ dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+ bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
+ dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+ bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
+ dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+ bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
+ dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+ bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
+ dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
+ bcm_bprintf(strbuf, "tx_big_packets %lu\n",
+ dhdp->tx_big_packets);
+ bcm_bprintf(strbuf, "\n");
+#ifdef DMAMAP_STATS
+ /* Add DMA MAP info */
+ bcm_bprintf(strbuf, "DMA MAP stats: \n");
+ bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
+ dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
+ dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
+#ifndef IOCTLRESP_USE_CONSTMEM
+ bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
+ dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
+ "TSBUF RX: %lu size %luK\n",
+ dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
+ dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
+ dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
+ bcm_bprintf(strbuf, "Total : %luK \n",
+ KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
+ dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
+ dhdp->dma_stats.tsbuf_rx_sz));
+#endif /* DMAMAP_STATS */
+ bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
+ /* Add any prot info */
+ dhd_prot_dump(dhdp, strbuf);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any bus info */
+ dhd_bus_dump(dhdp, strbuf);
+#if defined(BCM_ROUTER_DHD) && defined(HNDCTF)
+ /* Add ctf info */
+ dhd_ctf_dump(dhdp, strbuf);
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#if defined(DHD_LB_STATS)
+ dhd_lb_stats_dump(dhdp, strbuf);
+#endif /* DHD_LB_STATS */
+
+#ifdef DHD_MEM_STATS
+
+ malloc_mem = MALLOCED(dhdp->osh);
+
+ txpath_bkpq_len = dhd_active_tx_flowring_bkpq_len(dhdp);
+ /*
+ * Instead of traversing the entire queue to find the skbs length,
+ * considering MAX_MTU_SZ as lenth of each skb.
+ */
+ txpath_bkpq_mem = (txpath_bkpq_len* MAX_MTU_SZ);
+ total_txpath_mem = dhdp->txpath_mem + txpath_bkpq_mem;
+
+ bcm_bprintf(strbuf, "\nDHD malloc memory_usage: %llubytes %lluKB\n",
+ malloc_mem, (malloc_mem / 1024));
+
+ bcm_bprintf(strbuf, "\nDHD tx-bkpq len: %llu memory_usage: %llubytes %lluKB\n",
+ txpath_bkpq_len, txpath_bkpq_mem, (txpath_bkpq_mem / 1024));
+ bcm_bprintf(strbuf, "DHD tx-path memory_usage: %llubytes %lluKB\n",
+ total_txpath_mem, (total_txpath_mem / 1024));
+
+ total_dhd_mem = malloc_mem + total_txpath_mem;
+#if defined(DHD_LB_STATS)
+ total_dhd_mem += dhd_lb_mem_usage(dhdp, strbuf);
+#endif /* DHD_LB_STATS */
+ bcm_bprintf(strbuf, "\nDHD Totoal memory_usage: %llubytes %lluKB \n",
+ total_dhd_mem, (total_dhd_mem / 1024));
+#endif /* DHD_MEM_STATS */
+#if defined(DHD_LB_STATS)
+ bcm_bprintf(strbuf, "\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
+ dhdp->lb_rxp_stop_thr_hitcnt, dhdp->lb_rxp_strt_thr_hitcnt);
+ bcm_bprintf(strbuf, "\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
+ dhdp->lb_rxp_napi_sched_cnt, dhdp->lb_rxp_napi_complete_cnt);
+#endif /* DHD_LB_STATS */
+
+#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
+ dhd_mqstats_dump(dhdp, strbuf);
+#endif
+
+#ifdef DHD_WET
+ if (dhd_get_wet_mode(dhdp)) {
+ bcm_bprintf(strbuf, "Wet Dump:\n");
+ dhd_wet_dump(dhdp, strbuf);
+ }
+#endif /* DHD_WET */
+
+ DHD_ERROR(("%s bufsize: %d free: %d\n", __FUNCTION__, buflen, strbuf->size));
+ /* return remaining buffer length */
+ return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
+}
+
+void
+dhd_dump_to_kernelog(dhd_pub_t *dhdp)
+{
+ char buf[512];
+
+ DHD_ERROR(("F/W version: %s\n", fw_version));
+ bcm_bprintf_bypass = TRUE;
+ dhd_dump(dhdp, buf, sizeof(buf));
+ bcm_bprintf_bypass = FALSE;
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
+{
+ wl_ioctl_t ioc;
+
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
+}
+
+int
+dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
+ int cmd, uint8 set, int ifidx)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
+ ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
+ if (!ret) {
+ *pval = ltoh32(*((uint*)iovbuf));
+ } else {
+ DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
+ __FUNCTION__, name, ret));
+ }
+ } else {
+ DHD_ERROR(("%s: mkiovar %s failed\n",
+ __FUNCTION__, name));
+ }
+
+ return ret;
+}
+
+int
+dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
+ int cmd, uint8 set, int ifidx)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+ int lval = htol32(val);
+ uint len;
+
+ len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
+
+ if (len) {
+ ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
+ if (ret) {
+ DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
+ __FUNCTION__, name, ret));
+ }
+ } else {
+ DHD_ERROR(("%s: mkiovar %s failed\n",
+ __FUNCTION__, name));
+ }
+
+ return ret;
+}
+
+static struct ioctl2str_s {
+ uint32 ioctl;
+ char *name;
+} ioctl2str_array[] = {
+ {WLC_UP, "UP"},
+ {WLC_DOWN, "DOWN"},
+ {WLC_SET_PROMISC, "SET_PROMISC"},
+ {WLC_SET_INFRA, "SET_INFRA"},
+ {WLC_SET_AUTH, "SET_AUTH"},
+ {WLC_SET_SSID, "SET_SSID"},
+ {WLC_RESTART, "RESTART"},
+ {WLC_SET_CHANNEL, "SET_CHANNEL"},
+ {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
+ {WLC_SET_KEY, "SET_KEY"},
+ {WLC_SCAN, "SCAN"},
+ {WLC_DISASSOC, "DISASSOC"},
+ {WLC_REASSOC, "REASSOC"},
+ {WLC_SET_COUNTRY, "SET_COUNTRY"},
+ {WLC_SET_WAKE, "SET_WAKE"},
+ {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
+ {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
+ {WLC_SET_WSEC, "SET_WSEC"},
+ {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
+ {WLC_SET_RADAR, "SET_RADAR"},
+ {0, NULL}
+};
+
+static char *
+ioctl2str(uint32 ioctl)
+{
+ struct ioctl2str_s *p = ioctl2str_array;
+
+ while (p->name != NULL) {
+ if (p->ioctl == ioctl) {
+ return p->name;
+ }
+ p++;
+ }
+
+ return "";
+}
+
+/**
+ * @param ioc IO control struct, members are partially used by this function.
+ * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
+ * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
+ */
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
+{
+ int ret = BCME_ERROR;
+ unsigned long flags;
+#ifdef DUMP_IOCTL_IOV_LIST
+ dhd_iov_li_t *iov_li;
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef REPORT_FATAL_TIMEOUTS
+ wl_escan_params_t *eparams;
+ uint8 *buf_ptr = (uint8 *)buf;
+ uint16 action = 0;
+#endif /* REPORT_FATAL_TIMEOUTS */
+ int hostsleep_set = 0;
+ int hostsleep_val = 0;
+
+ if (dhd_query_bus_erros(dhd_pub)) {
+ return -ENODEV;
+ }
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ DHD_OS_WAKE_LOCK(dhd_pub);
+ if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
+ DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
+ DHD_OS_WAKE_UNLOCK(dhd_pub);
+ return BCME_ERROR;
+ }
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef KEEPIF_ON_DEVICE_RESET
+ if (ioc->cmd == WLC_GET_VAR) {
+ dbus_config_t config;
+ config.general_param = 0;
+ if (buf) {
+ if (!strcmp(buf, "wowl_activate")) {
+ /* 1 (TRUE) after decreased by 1 */
+ config.general_param = 2;
+ } else if (!strcmp(buf, "wowl_clear")) {
+ /* 0 (FALSE) after decreased by 1 */
+ config.general_param = 1;
+ }
+ }
+ if (config.general_param) {
+ config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
+ config.general_param--;
+ dbus_set_config(dhd_pub->dbus, &config);
+ }
+ }
+#endif /* KEEPIF_ON_DEVICE_RESET */
+
+ if (dhd_os_proto_block(dhd_pub))
+ {
+#ifdef DHD_LOG_DUMP
+ int slen, val, lval, min_len;
+ char *msg, tmp[64];
+
+ /* WLC_GET_VAR */
+ if (ioc->cmd == WLC_GET_VAR && buf) {
+ min_len = MIN(sizeof(tmp) - 1, strlen(buf));
+ memset(tmp, 0, sizeof(tmp));
+ bcopy(buf, tmp, min_len);
+ tmp[min_len] = '\0';
+ }
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_DISCONNECT_TRACE
+ if (WLC_DISASSOC == ioc->cmd || WLC_DOWN == ioc->cmd ||
+ WLC_DISASSOC_MYAP == ioc->cmd) {
+ DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
+ }
+#endif /* HW_DISCONNECT_TRACE */
+ /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
+ if (ioc->set == TRUE) {
+ char *pars = (char *)buf; // points at user buffer
+ if (ioc->cmd == WLC_SET_VAR && buf) {
+ DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
+ if (ioc->len > 1 + sizeof(uint32)) {
+ // skip iovar name:
+ pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
+ pars++; // skip NULL character
+ }
+ } else {
+ DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
+ ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
+ }
+ if (pars != NULL) {
+ DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
+ } else {
+ DHD_DNGL_IOVAR_SET((" NULL\n"));
+ }
+ }
+
+ DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
+#ifdef DHD_EFI
+ DHD_INFO(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhd_pub->busstate));
+#else
+ DHD_INFO(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhd_pub->busstate));
+#endif /* DHD_EFI */
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_proto_unblock(dhd_pub);
+ return -ENODEV;
+ }
+ DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd_pub) ||
+ dhd_pub->dhd_induce_error == DHD_INDUCE_IOCTL_SUSPEND_ERROR) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
+#ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
+ ioctl_suspend_error++;
+ if (ioctl_suspend_error > MAX_IOCTL_SUSPEND_ERROR) {
+ dhd_pub->hang_reason = HANG_REASON_IOCTL_SUSPEND_ERROR;
+ dhd_os_send_hang_message(dhd_pub);
+ ioctl_suspend_error = 0;
+ }
+#endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
+ DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_proto_unblock(dhd_pub);
+ return -ENODEV;
+ }
+#ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
+ ioctl_suspend_error = 0;
+#endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#if defined(WL_WLC_SHIM)
+ {
+ struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+
+ wl_io_pport_t io_pport;
+ io_pport.dhd_pub = dhd_pub;
+ io_pport.ifidx = ifidx;
+
+ ret = wl_shim_ioctl(shim, ioc, len, &io_pport);
+ if (ret != BCME_OK) {
+ DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n",
+ __FUNCTION__, ioc->cmd, ret));
+ }
+ }
+#else
+#ifdef DUMP_IOCTL_IOV_LIST
+ if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
+ if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
+ DHD_ERROR(("iovar dump list item allocation Failed\n"));
+ } else {
+ iov_li->cmd = ioc->cmd;
+ if (buf)
+ bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
+ dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
+ &iov_li->list);
+ }
+ }
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* fill in the sync_id to ensure that the scan timeout is always for the
+ * current running escan in the FW - the wl app does not fill in an
+ * incrementing number for sync_id, it only fills in a random number which
+ * increases the chance of 2 consecutive escans having the same sync id
+ * This should happen here after dhd_proto_block()
+ * is called, so that sync_id does not
+ * get incremented if 2 consecutive escans are fired in quick succession
+ */
+ if ((ioc->cmd == WLC_SET_VAR &&
+ buf != NULL &&
+ strcmp("escan", buf) == 0)) {
+ eparams = (wl_escan_params_t *) (buf_ptr + strlen("escan") + 1);
+ action = dtoh16(eparams->action);
+ if (action == WL_SCAN_ACTION_START) {
+ ++dhd_pub->esync_id;
+ /* sync id of 0 is not used for escan,
+ * it is used to indicate
+ * a normal scan timer is running, so as
+ * to ensure that escan abort event
+ * does not cancel a normal scan timeout
+ */
+ if (dhd_pub->esync_id == 0)
+ ++dhd_pub->esync_id;
+ DHD_INFO(("%s:escan sync id set to = %u \n",
+ __FUNCTION__, dhd_pub->esync_id));
+ eparams->sync_id = htod16(dhd_pub->esync_id);
+ }
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
+ &hostsleep_set, &hostsleep_val, &ret))
+ goto exit;
+ ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+ dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
+
+#ifdef DUMP_IOCTL_IOV_LIST
+ if (ret == -ETIMEDOUT) {
+ DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
+ IOV_LIST_MAX_LEN));
+ dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
+ }
+#endif /* DUMP_IOCTL_IOV_LIST */
+#endif /* defined(WL_WLC_SHIM) */
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ if (ret == -ETIMEDOUT) {
+ copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
+ }
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+#ifdef DHD_LOG_DUMP
+ if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
+ buf != NULL) {
+ if (buf) {
+ lval = 0;
+ slen = strlen(buf) + 1;
+ msg = (char*)buf;
+ if (len >= slen + sizeof(lval)) {
+ if (ioc->cmd == WLC_GET_VAR) {
+ msg = tmp;
+ lval = *(int*)buf;
+ } else {
+ min_len = MIN(ioc->len - slen, sizeof(int));
+ bcopy((msg + slen), &lval, min_len);
+ }
+ if (!strncmp(msg, "cur_etheraddr",
+ strlen("cur_etheraddr"))) {
+ lval = 0;
+ }
+ }
+ DHD_IOVAR_MEM((
+ "%s: cmd: %d, msg: %s val: 0x%x,"
+ " len: %d, set: %d, txn-id: %d\n",
+ ioc->cmd == WLC_GET_VAR ?
+ "WLC_GET_VAR" : "WLC_SET_VAR",
+ ioc->cmd, msg, lval, ioc->len, ioc->set,
+ dhd_prot_get_ioctl_trans_id(dhd_pub)));
+ } else {
+ DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
+ ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
+ ioc->cmd, ioc->len, ioc->set,
+ dhd_prot_get_ioctl_trans_id(dhd_pub)));
+ }
+ } else {
+ slen = ioc->len;
+ if (buf != NULL && slen != 0) {
+ if (slen >= 4) {
+ val = *(int*)buf;
+ } else if (slen >= 2) {
+ val = *(short*)buf;
+ } else {
+ val = *(char*)buf;
+ }
+ /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
+ if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
+ DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
+ "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
+ }
+ } else {
+ DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
+ }
+ }
+#endif /* DHD_LOG_DUMP */
+#if defined(OEM_ANDROID)
+ if (ret && dhd_pub->up) {
+ /* Send hang event only if dhd_open() was success */
+ dhd_os_check_hang(dhd_pub, ifidx, ret);
+ }
+
+ if (ret == -ETIMEDOUT && !dhd_pub->up) {
+ DHD_ERROR(("%s: 'resumed on timeout' error is "
+ "occurred before the interface does not"
+ " bring up\n", __FUNCTION__));
+ }
+#endif /* defined(OEM_ANDROID) */
+
+exit:
+ DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ if ((ret == BCME_OK && ioc->cmd == WLC_SET_VAR &&
+ buf != NULL &&
+ strcmp("escan", buf) == 0)) {
+ if (action == WL_SCAN_ACTION_START)
+ dhd_start_scan_timer(dhd_pub, TRUE);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ dhd_os_proto_unblock(dhd_pub);
+
+#ifdef DETAIL_DEBUG_LOG_FOR_IOCTL
+ if (ret < 0) {
+ if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
+ buf != NULL) {
+ if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
+ DHD_ERROR_MEM(("%s: %s: %s, %s\n",
+ __FUNCTION__, ioc->cmd == WLC_GET_VAR ?
+ "WLC_GET_VAR" : "WLC_SET_VAR",
+ buf? (char *)buf:"NO MESSAGE",
+ ret == BCME_UNSUPPORTED ? "UNSUPPORTED"
+ : "NOT ASSOCIATED"));
+ } else {
+ DHD_ERROR_MEM(("%s: %s: %s, ret = %d\n",
+ __FUNCTION__, ioc->cmd == WLC_GET_VAR ?
+ "WLC_GET_VAR" : "WLC_SET_VAR",
+ (char *)buf, ret));
+ }
+ } else {
+ if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
+ DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, %s\n",
+ __FUNCTION__, ioc->cmd,
+ ret == BCME_UNSUPPORTED ? "UNSUPPORTED" :
+ "NOT ASSOCIATED"));
+ } else {
+ DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, ret = %d\n",
+ __FUNCTION__, ioc->cmd, ret));
+ }
+ }
+ }
+#endif /* DETAIL_DEBUG_LOG_FOR_IOCTL */
+ }
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
+
+ DHD_OS_WAKE_UNLOCK(dhd_pub);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef WL_MONITOR
+ /* Intercept monitor ioctl here, add/del monitor if */
+ if (ret == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
+ int val = 0;
+ if (buf != NULL && len != 0) {
+ if (len >= 4) {
+ val = *(int*)buf;
+ } else if (len >= 2) {
+ val = *(short*)buf;
+ } else {
+ val = *(char*)buf;
+ }
+ }
+ dhd_set_monitor(dhd_pub, ifidx, val);
+ }
+#endif /* WL_MONITOR */
+
+ return ret;
+}
+
+uint wl_get_port_num(wl_io_pport_t *io_pport)
+{
+ return 0;
+}
+
+/* Get bssidx from iovar params
+ * Input: dhd_pub - pointer to dhd_pub_t
+ * params - IOVAR params
+ * Output: idx - BSS index
+ * val - ponter to the IOVAR arguments
+ */
+static int
+dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
+{
+ char *prefix = "bsscfg:";
+ uint32 bssidx;
+
+ if (!(strncmp(params, prefix, strlen(prefix)))) {
+ /* per bss setting should be prefixed with 'bsscfg:' */
+ const char *p = params + strlen(prefix);
+
+ /* Skip Name */
+ while (*p != '\0')
+ p++;
+ /* consider null */
+ p = p + 1;
+ bcopy(p, &bssidx, sizeof(uint32));
+ /* Get corresponding dhd index */
+ bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
+
+ if (bssidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* skip bss idx */
+ p += sizeof(uint32);
+ *val = p;
+ *idx = bssidx;
+ } else {
+ DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+#if defined(DHD_DEBUG) && defined(BCMDBUS)
+/* USB Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+ DHD_TRACE(("%s \n", __FUNCTION__));
+
+ return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
+
+}
+#endif /* DHD_DEBUG && BCMDBUS */
+
+#ifdef DHD_DEBUG
+int
+dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+ unsigned long int_arg = 0;
+ char *p;
+ char *end_ptr = NULL;
+ dhd_dbg_mwli_t *mw_li;
+ dll_t *item, *next;
+ /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
+ p = bcmstrstr((char *)msg, " ");
+ if (p != NULL) {
+ /* space should be converted to null as separation flag for firmware */
+ *p = '\0';
+ /* store the argument in int_arg */
+ int_arg = bcm_strtoul(p+1, &end_ptr, 10);
+ }
+
+ if (!p && !strcmp(msg, "query")) {
+ /* lets query the list inetrnally */
+ if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
+ DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
+ } else {
+ for (item = dll_head_p(&dhd->mw_list_head);
+ !dll_end(&dhd->mw_list_head, item); item = next) {
+ next = dll_next_p(item);
+ mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
+ DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
+ }
+ }
+ } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
+ int32 alloc_handle;
+ /* convert size into KB and append as integer */
+ *((int32 *)(p+1)) = int_arg*1024;
+ *(p+1+sizeof(int32)) = '\0';
+
+ /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
+ * 1 bytes for null caracter
+ */
+ msglen = strlen(msg) + sizeof(int32) + 1;
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
+ DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
+ }
+
+ /* returned allocated handle from dongle, basically address of the allocated unit */
+ alloc_handle = *((int32 *)msg);
+
+ /* add a node in the list with tuple <id, handle, size> */
+ if (alloc_handle == 0) {
+ DHD_ERROR(("Reuqested size could not be allocated\n"));
+ } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
+ DHD_ERROR(("mw list item allocation Failed\n"));
+ } else {
+ mw_li->id = dhd->mw_id++;
+ mw_li->handle = alloc_handle;
+ mw_li->size = int_arg;
+ /* append the node in the list */
+ dll_append(&dhd->mw_list_head, &mw_li->list);
+ }
+ } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
+ /* inform dongle to free wasted chunk */
+ int handle = 0;
+ int size = 0;
+ for (item = dll_head_p(&dhd->mw_list_head);
+ !dll_end(&dhd->mw_list_head, item); item = next) {
+ next = dll_next_p(item);
+ mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
+
+ if (mw_li->id == (int)int_arg) {
+ handle = mw_li->handle;
+ size = mw_li->size;
+ dll_delete(item);
+ MFREE(dhd->osh, mw_li, sizeof(*mw_li));
+ if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
+ /* reset the id */
+ dhd->mw_id = 0;
+ }
+ }
+ }
+ if (handle) {
+ int len;
+ /* append the free handle and the chunk size in first 8 bytes
+ * after the command and null character
+ */
+ *((int32 *)(p+1)) = handle;
+ *((int32 *)((p+1)+sizeof(int32))) = size;
+ /* append null as terminator */
+ *(p+1+2*sizeof(int32)) = '\0';
+ /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
+ * + 1 bytes for null caracter
+ */
+ len = strlen(msg) + 2*sizeof(int32) + 1;
+ /* send iovar to free the chunk */
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
+ DHD_ERROR(("IOCTL failed for memdebug free\n"));
+ }
+ } else {
+ DHD_ERROR(("specified id does not exist\n"));
+ }
+ } else {
+ /* for all the wrong argument formats */
+ return BCME_BADARG;
+ }
+ return 0;
+}
+extern void
+dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
+{
+ dll_t *item;
+ dhd_dbg_mwli_t *mw_li;
+ while (!(dll_empty(list_head))) {
+ item = dll_head_p(list_head);
+ mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
+ dll_delete(item);
+ MFREE(dhd->osh, mw_li, sizeof(*mw_li));
+ }
+}
+#ifdef BCMPCIE
+int
+dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
+{
+ flow_ring_table_t *flow_ring_table;
+ char *cmd;
+ char *end_ptr = NULL;
+ uint8 prio;
+ uint16 flowid;
+ int i;
+ int ret = 0;
+ cmd = bcmstrstr(msg, " ");
+ BCM_REFERENCE(prio);
+ if (cmd != NULL) {
+ /* in order to use string operations append null */
+ *cmd = '\0';
+ } else {
+ DHD_ERROR(("missing: create/delete args\n"));
+ return BCME_ERROR;
+ }
+ if (cmd && !strcmp(msg, "create")) {
+ /* extract <"source address", "destination address", "priority"> */
+ uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
+ BCM_REFERENCE(sa);
+ BCM_REFERENCE(da);
+ msg = msg + strlen("create") + 1;
+ /* fill ethernet source address */
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
+ if (*end_ptr == ':') {
+ msg = (end_ptr + 1);
+ } else if (i != 5) {
+ DHD_ERROR(("not a valid source mac addr\n"));
+ return BCME_ERROR;
+ }
+ }
+ if (*end_ptr != ' ') {
+ DHD_ERROR(("missing: destiantion mac id\n"));
+ return BCME_ERROR;
+ } else {
+ /* skip space */
+ msg = end_ptr + 1;
+ }
+ /* fill ethernet destination address */
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
+ if (*end_ptr == ':') {
+ msg = (end_ptr + 1);
+ } else if (i != 5) {
+ DHD_ERROR(("not a valid destination mac addr\n"));
+ return BCME_ERROR;
+ }
+ }
+ if (*end_ptr != ' ') {
+ DHD_ERROR(("missing: priority\n"));
+ return BCME_ERROR;
+ } else {
+ msg = end_ptr + 1;
+ }
+ /* parse priority */
+ prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
+ if (prio > MAXPRIO) {
+ DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (*end_ptr != '\0') {
+ DHD_ERROR(("msg not truncated with NULL character\n"));
+ return BCME_ERROR;
+ }
+ ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+
+ } else if (cmd && !strcmp(msg, "delete")) {
+ msg = msg + strlen("delete") + 1;
+ /* parse flowid */
+ flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
+ if (*end_ptr != '\0') {
+ DHD_ERROR(("msg not truncated with NULL character\n"));
+ return BCME_ERROR;
+ }
+
+ /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
+ if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
+ {
+ DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
+ return BCME_ERROR;
+ }
+
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+ }
+ DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
+ return BCME_ERROR;
+}
+#endif /* BCMPCIE */
+#endif /* DHD_DEBUG */
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, uint len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ uint32 dhd_ver_len, bus_api_rev_len;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_VERSION):
+ /* Need to have checked buffer length */
+ dhd_ver_len = sizeof(dhd_version) - 1;
+ bus_api_rev_len = strlen(bus_api_revision);
+ if (len > dhd_ver_len + bus_api_rev_len) {
+ bcmerror = memcpy_s((char *)arg, len, dhd_version, dhd_ver_len);
+ if (bcmerror != BCME_OK) {
+ break;
+ }
+ bcmerror = memcpy_s((char *)arg + dhd_ver_len, len - dhd_ver_len,
+ bus_api_revision, bus_api_rev_len);
+ if (bcmerror != BCME_OK) {
+ break;
+ }
+ *((char *)arg + dhd_ver_len + bus_api_rev_len) = '\0';
+ }
+ break;
+
+ case IOV_GVAL(IOV_WLMSGLEVEL):
+ printf("android_msg_level=0x%x\n", android_msg_level);
+ printf("config_msg_level=0x%x\n", config_msg_level);
+#if defined(WL_WIRELESS_EXT)
+ int_val = (int32)iw_msg_level;
+ bcopy(&int_val, arg, val_size);
+ printf("iw_msg_level=0x%x\n", iw_msg_level);
+#endif
+#ifdef WL_CFG80211
+ int_val = (int32)wl_dbg_level;
+ bcopy(&int_val, arg, val_size);
+ printf("cfg_msg_level=0x%x\n", wl_dbg_level);
+#endif
+ break;
+
+ case IOV_SVAL(IOV_WLMSGLEVEL):
+ if (int_val & DHD_ANDROID_VAL) {
+ android_msg_level = (uint)(int_val & 0xFFFF);
+ printf("android_msg_level=0x%x\n", android_msg_level);
+ }
+ if (int_val & DHD_CONFIG_VAL) {
+ config_msg_level = (uint)(int_val & 0xFFFF);
+ printf("config_msg_level=0x%x\n", config_msg_level);
+ }
+#if defined(WL_WIRELESS_EXT)
+ if (int_val & DHD_IW_VAL) {
+ iw_msg_level = (uint)(int_val & 0xFFFF);
+ printf("iw_msg_level=0x%x\n", iw_msg_level);
+ }
+#endif
+#ifdef WL_CFG80211
+ if (int_val & DHD_CFG_VAL) {
+ wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF));
+ }
+#endif
+ break;
+
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)dhd_msg_level;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ dhd_msg_level = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BCMERRORSTR):
+ bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+ ((char *)arg)[BCME_STRLEN - 1] = 0x00;
+ break;
+
+ case IOV_GVAL(IOV_BCMERROR):
+ int_val = (int32)dhd_pub->bcmerror;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifndef BCMDBUS
+ case IOV_GVAL(IOV_WDTICK):
+ int_val = (int32)dhd_watchdog_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* !BCMDBUS */
+
+ case IOV_SVAL(IOV_WDTICK):
+ if (!dhd_pub->up) {
+ bcmerror = BCME_NOTUP;
+ break;
+ }
+
+ dhd_watchdog_ms = (uint)int_val;
+
+ dhd_os_wd_timer(dhd_pub, (uint)int_val);
+ break;
+
+ case IOV_GVAL(IOV_DUMP):
+ if (dhd_dump(dhd_pub, arg, len) <= 0)
+ bcmerror = BCME_ERROR;
+ else
+ bcmerror = BCME_OK;
+ break;
+
+#ifndef BCMDBUS
+ case IOV_GVAL(IOV_DCONSOLE_POLL):
+ int_val = (int32)dhd_pub->dhd_console_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DCONSOLE_POLL):
+ dhd_pub->dhd_console_ms = (uint)int_val;
+ break;
+
+#if defined(DHD_DEBUG)
+ case IOV_SVAL(IOV_CONS):
+ if (len > 0) {
+#ifdef CONSOLE_DPC
+ bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1);
+#else
+ bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+#endif
+ }
+ break;
+#endif /* DHD_DEBUG */
+#endif /* !BCMDBUS */
+
+ case IOV_SVAL(IOV_CLEARCOUNTS):
+ dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+ dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+ dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+ dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+ dhd_pub->tx_dropped = 0;
+ dhd_pub->rx_dropped = 0;
+ dhd_pub->tx_pktgetfail = 0;
+ dhd_pub->rx_pktgetfail = 0;
+ dhd_pub->rx_readahead_cnt = 0;
+ dhd_pub->tx_realloc = 0;
+ dhd_pub->wd_dpc_sched = 0;
+ dhd_pub->tx_big_packets = 0;
+ memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+ dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+ /* clear proptxstatus related counters */
+ dhd_wlfc_clear_counts(dhd_pub);
+#endif /* PROP_TXSTATUS */
+#if defined(DHD_LB_STATS)
+ DHD_LB_STATS_RESET(dhd_pub);
+#endif /* DHD_LB_STATS */
+ break;
+
+#ifdef BCMPERFSTATS
+ case IOV_GVAL(IOV_LOGDUMP): {
+ bcmdumplog((char*)arg, len);
+ break;
+ }
+
+ case IOV_SVAL(IOV_LOGCAL): {
+ bcmlog("Starting OSL_DELAY (%d usecs)", (uint)int_val, 0);
+ OSL_DELAY((uint)int_val);
+ bcmlog("Finished OSL_DELAY (%d usecs)", (uint)int_val, 0);
+ break;
+ }
+
+ case IOV_SVAL(IOV_LOGSTAMP): {
+ int int_val2;
+
+ if (plen >= 2 * sizeof(int)) {
+ bcopy((char *)params + sizeof(int_val), &int_val2, sizeof(int_val2));
+ bcmlog("User message %d %d", (uint)int_val, (uint)int_val2);
+ } else if (plen >= sizeof(int)) {
+ bcmlog("User message %d", (uint)int_val, 0);
+ } else {
+ bcmlog("User message", 0, 0);
+ }
+ break;
+ }
+#endif /* BCMPERFSTATS */
+
+ case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+ int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+ break;
+ }
+
+#ifdef PROP_TXSTATUS
+ case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
+ bool wlfc_enab = FALSE;
+ bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ int_val = wlfc_enab ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
+ bool wlfc_enab = FALSE;
+ bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+ if (bcmerror != BCME_OK)
+ goto exit;
+
+ /* wlfc is already set as desired */
+ if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
+ goto exit;
+
+ if (int_val == TRUE && disable_proptx) {
+ disable_proptx = 0;
+ }
+
+ if (int_val == TRUE)
+ bcmerror = dhd_wlfc_init(dhd_pub);
+ else
+ bcmerror = dhd_wlfc_deinit(dhd_pub);
+
+ break;
+ }
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
+ bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+ dhd_wlfc_set_mode(dhd_pub, int_val);
+ break;
+#ifdef QMONITOR
+ case IOV_GVAL(IOV_QMON_TIME_THRES): {
+ int_val = dhd_qmon_thres(dhd_pub, FALSE, 0);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_QMON_TIME_THRES): {
+ dhd_qmon_thres(dhd_pub, TRUE, int_val);
+ break;
+ }
+
+ case IOV_GVAL(IOV_QMON_TIME_PERCENT): {
+ int_val = dhd_qmon_getpercent(dhd_pub);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+#endif /* QMONITOR */
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+ bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+ dhd_wlfc_set_module_ignore(dhd_pub, int_val);
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+ bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+ dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+ bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+ dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+ bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+ dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
+ break;
+
+#endif /* PROP_TXSTATUS */
+
+ case IOV_GVAL(IOV_BUS_TYPE):
+ /* The dhd application queries the driver to check if its usb or sdio. */
+#ifdef BCMDBUS
+ int_val = BUS_TYPE_USB;
+#endif
+#ifdef BCMSDIO
+ int_val = BUS_TYPE_SDIO;
+#endif
+#ifdef PCIE_FULL_DONGLE
+ int_val = BUS_TYPE_PCIE;
+#endif
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CHANGEMTU):
+ int_val &= 0xffff;
+ bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+ {
+ uint i = 0;
+ uint8 *ptr = (uint8 *)arg;
+ uint8 count = 0;
+
+ ptr++;
+ for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+ if (dhd_pub->reorder_bufs[i] != NULL) {
+ *ptr = dhd_pub->reorder_bufs[i]->flow_id;
+ ptr++;
+ count++;
+ }
+ }
+ ptr = (uint8 *)arg;
+ *ptr = count;
+ break;
+ }
+#ifdef DHDTCPACK_SUPPRESS
+ case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
+ int_val = (uint32)dhd_pub->tcpack_sup_mode;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
+ bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
+ break;
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ case IOV_GVAL(IOV_WMF_BSS_ENAB): {
+ uint32 bssidx;
+ dhd_wmf_t *wmf;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ wmf = dhd_wmf_conf(dhd_pub, bssidx);
+ int_val = wmf->wmf_enable ? 1 :0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_WMF_BSS_ENAB): {
+ /* Enable/Disable WMF */
+ uint32 bssidx;
+ dhd_wmf_t *wmf;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ wmf = dhd_wmf_conf(dhd_pub, bssidx);
+ if (wmf->wmf_enable == int_val)
+ break;
+ if (int_val) {
+ /* Enable WMF */
+ if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
+ DHD_ERROR(("%s: Error in creating WMF instance\n",
+ __FUNCTION__));
+ break;
+ }
+ if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
+ break;
+ }
+ wmf->wmf_enable = TRUE;
+ } else {
+ /* Disable WMF */
+ wmf->wmf_enable = FALSE;
+ dhd_wmf_stop(dhd_pub, bssidx);
+ dhd_wmf_instance_del(dhd_pub, bssidx);
+ }
+ break;
+ }
+ case IOV_GVAL(IOV_WMF_UCAST_IGMP):
+ int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_IGMP):
+ if (dhd_pub->wmf_ucast_igmp == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_igmp = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+ case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
+ int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
+ dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
+ break;
+
+#ifdef WL_IGMP_UCQUERY
+ case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
+ int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
+ if (dhd_pub->wmf_ucast_igmp_query == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_igmp_query = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ case IOV_GVAL(IOV_WMF_UCAST_UPNP):
+ int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_UPNP):
+ if (dhd_pub->wmf_ucast_upnp == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_upnp = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+#endif /* DHD_UCAST_UPNP */
+
+ case IOV_GVAL(IOV_WMF_PSTA_DISABLE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_WMF_PSTA_DISABLE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val);
+ break;
+ }
+#endif /* DHD_WMF */
+
+#if defined(BCM_ROUTER_DHD)
+ case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): {
+ trf_mgmt_filter_list_t *trf_mgmt_filter_list =
+ (trf_mgmt_filter_list_t *)(arg);
+ bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len);
+ }
+ break;
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef DHD_L2_FILTER
+ case IOV_GVAL(IOV_DHCP_UNICAST): {
+ uint32 bssidx;
+ const char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+ __FUNCTION__, name));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_DHCP_UNICAST): {
+ uint32 bssidx;
+ const char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+ __FUNCTION__, name));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+ case IOV_GVAL(IOV_BLOCK_PING): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_BLOCK_PING): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+ case IOV_GVAL(IOV_PROXY_ARP): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_parp_status(dhd_pub, bssidx);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_PROXY_ARP): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ bcopy(val, &int_val, sizeof(int_val));
+
+ /* Issue a iovar request to WL to update the proxy arp capability bit
+ * in the Extended Capability IE of beacons/probe responses.
+ */
+ bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
+ NULL, 0, TRUE);
+ if (bcmerror == BCME_OK) {
+ dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ }
+ break;
+ }
+ case IOV_GVAL(IOV_GRAT_ARP): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_GRAT_ARP): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+ case IOV_GVAL(IOV_BLOCK_TDLS): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_BLOCK_TDLS): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+#endif /* DHD_L2_FILTER */
+ case IOV_SVAL(IOV_DHD_IE): {
+ uint32 bssidx;
+ const char *val;
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ uint8 ie_type;
+ bcm_tlv_t *qos_map_ie = NULL;
+ ie_setbuf_t *ie_getbufp = (ie_setbuf_t *)(arg+4);
+ ie_type = ie_getbufp->ie_buffer.ie_list[0].ie_data.id;
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ qos_map_ie = (bcm_tlv_t *)(&(ie_getbufp->ie_buffer.ie_list[0].ie_data));
+ if (qos_map_ie != NULL && (ie_type == DOT11_MNG_QOS_MAP_ID)) {
+ bcmerror = dhd_set_qosmap_up_table(dhd_pub, bssidx, qos_map_ie);
+ }
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+ break;
+ }
+ case IOV_GVAL(IOV_AP_ISOLATE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_AP_ISOLATE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
+ break;
+ }
+#ifdef DHD_PSTA
+ case IOV_GVAL(IOV_PSTA): {
+ int_val = dhd_get_psta_mode(dhd_pub);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_PSTA): {
+ if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
+ dhd_set_psta_mode(dhd_pub, int_val);
+ } else {
+ bcmerror = BCME_RANGE;
+ }
+ break;
+ }
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+ case IOV_GVAL(IOV_WET):
+ int_val = dhd_get_wet_mode(dhd_pub);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WET):
+ if (int_val == 0 || int_val == 1) {
+ dhd_set_wet_mode(dhd_pub, int_val);
+ /* Delete the WET DB when disabled */
+ if (!int_val) {
+ dhd_wet_sta_delete_list(dhd_pub);
+ }
+ } else {
+ bcmerror = BCME_RANGE;
+ }
+ break;
+ case IOV_SVAL(IOV_WET_HOST_IPV4):
+ dhd_set_wet_host_ipv4(dhd_pub, params, plen);
+ break;
+ case IOV_SVAL(IOV_WET_HOST_MAC):
+ dhd_set_wet_host_mac(dhd_pub, params, plen);
+ break;
+#endif /* DHD_WET */
+#ifdef DHD_MCAST_REGEN
+ case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
+ break;
+ }
+#endif /* DHD_MCAST_REGEN */
+
+ case IOV_GVAL(IOV_CFG80211_OPMODE): {
+ int_val = (int32)dhd_pub->op_mode;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_CFG80211_OPMODE): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_pub->op_mode = int_val;
+ break;
+ }
+
+ case IOV_GVAL(IOV_ASSERT_TYPE):
+ int_val = g_assert_type;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ASSERT_TYPE):
+ g_assert_type = (uint32)int_val;
+ break;
+
+#if defined(NDIS)
+ case IOV_GVAL(IOV_WAKEIND):
+ dhd_os_wakeind(dhd_pub, &int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* NDIS */
+
+#if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
+ case IOV_GVAL(IOV_LMTEST): {
+ *(uint32 *)arg = (uint32)lmtest;
+ break;
+ }
+
+ case IOV_SVAL(IOV_LMTEST): {
+ uint32 val = *(uint32 *)arg;
+ if (val > 50)
+ bcmerror = BCME_BADARG;
+ else {
+ lmtest = (uint)val;
+ DHD_ERROR(("%s: lmtest %s\n",
+ __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
+ }
+ break;
+ }
+#endif /* !NDIS && !BCM_ROUTER_DHD */
+#ifdef BCMDBG
+ case IOV_GVAL(IOV_MACDBG_PD11REGS):
+ bcmerror = dhd_macdbg_pd11regs(dhd_pub, params, plen, arg, len);
+ break;
+ case IOV_GVAL(IOV_MACDBG_REGLIST):
+ bcmerror = dhd_macdbg_reglist(dhd_pub, arg, len);
+ break;
+ case IOV_GVAL(IOV_MACDBG_PSVMPMEMS):
+ bcmerror = dhd_macdbg_psvmpmems(dhd_pub, params, plen, arg, len);
+ break;
+#endif /* BCMDBG */
+
+#ifdef SHOW_LOGTRACE
+ case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
+ trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
+ dhd_dbg_ring_t *dbg_verbose_ring = NULL;
+
+ dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
+ if (dbg_verbose_ring == NULL) {
+ DHD_ERROR(("dbg_verbose_ring is NULL\n"));
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+ if (trace_buf_info != NULL) {
+ bzero(trace_buf_info, sizeof(trace_buf_info_t));
+ dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
+ } else {
+ DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
+ bcmerror = BCME_NOMEM;
+ }
+ break;
+ }
+#endif /* SHOW_LOGTRACE */
+#ifdef BTLOG
+ case IOV_GVAL(IOV_DUMP_BT_LOG): {
+ bt_log_buf_info_t *bt_log_buf_info = (bt_log_buf_info_t *)arg;
+ uint32 rlen;
+
+ rlen = dhd_dbg_pull_single_from_ring(dhd_pub, BT_LOG_RING_ID, bt_log_buf_info->buf,
+ BT_LOG_BUF_MAX_SIZE, TRUE);
+ bt_log_buf_info->size = rlen;
+ bt_log_buf_info->availability = BT_LOG_NEXT_BUF_NOT_AVAIL;
+ if (rlen == 0) {
+ bt_log_buf_info->availability = BT_LOG_BUF_NOT_AVAILABLE;
+ } else {
+ dhd_dbg_ring_status_t ring_status;
+ dhd_dbg_get_ring_status(dhd_pub, BT_LOG_RING_ID, &ring_status);
+ if (ring_status.written_bytes != ring_status.read_bytes) {
+ bt_log_buf_info->availability = BT_LOG_NEXT_BUF_AVAIL;
+ }
+ }
+ break;
+ }
+ case IOV_GVAL(IOV_BTLOG):
+ {
+ uint32 btlog_val = dhd_pub->bt_logging_enabled ? 1 : 0;
+ bcopy(&btlog_val, arg, val_size);
+ }
+ break;
+ case IOV_SVAL(IOV_BTLOG):
+ {
+ if (dhd_pub->busstate != DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ break;
+ }
+ if (int_val)
+ dhd_pub->bt_logging_enabled = TRUE;
+ else
+ dhd_pub->bt_logging_enabled = FALSE;
+ }
+ break;
+
+#endif /* BTLOG */
+#ifdef SNAPSHOT_UPLOAD
+ case IOV_SVAL(IOV_BT_MEM_DUMP): {
+ dhd_prot_send_snapshot_request(dhd_pub, SNAPSHOT_TYPE_BT, int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_BT_UPLOAD): {
+ int status;
+ bt_mem_req_t req;
+ bt_log_buf_info_t *mem_info = (bt_log_buf_info_t *)arg;
+ uint32 size;
+ bool is_more;
+
+ memcpy(&req, params, sizeof(req));
+
+ status = dhd_prot_get_snapshot(dhd_pub, SNAPSHOT_TYPE_BT, req.offset,
+ req.buf_size, mem_info->buf, &size, &is_more);
+ if (status == BCME_OK) {
+ mem_info->size = size;
+ mem_info->availability = is_more ?
+ BT_LOG_NEXT_BUF_AVAIL : BT_LOG_NEXT_BUF_NOT_AVAIL;
+ } else if (status == BCME_NOTREADY) {
+ mem_info->size = 0;
+ mem_info->availability = BT_LOG_NOT_READY;
+ } else {
+ mem_info->size = 0;
+ mem_info->availability = BT_LOG_BUF_NOT_AVAILABLE;
+ }
+ break;
+ }
+#endif /* SNAPSHOT_UPLOAD */
+#ifdef REPORT_FATAL_TIMEOUTS
+ case IOV_GVAL(IOV_SCAN_TO): {
+ dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_SCAN_TO): {
+ dhd_set_scan_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_JOIN_TO): {
+ dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_JOIN_TO): {
+ dhd_set_join_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_CMD_TO): {
+ dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_CMD_TO): {
+ dhd_set_cmd_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_OQS_TO): {
+ dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_OQS_TO): {
+ dhd_set_bus_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+ case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
+ if (dhd_pub->dongle_trap_occured)
+ int_val = ltoh32(dhd_pub->last_trap_info.type);
+ else
+ int_val = 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
+ {
+ struct bcmstrbuf strbuf;
+ bcm_binit(&strbuf, arg, len);
+ if (dhd_pub->dongle_trap_occured == FALSE) {
+ bcm_bprintf(&strbuf, "no trap recorded\n");
+ break;
+ }
+#ifndef BCMDBUS
+ dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
+#endif /* BCMDBUS */
+ break;
+ }
+#ifdef DHD_DEBUG
+#if defined(BCMSDIO) || defined(BCMPCIE)
+
+ case IOV_GVAL(IOV_BPADDR):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ memcpy(&sdreg, params, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ size = sdreg.func;
+
+ bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
+ (uint *)&int_val, TRUE);
+
+ memcpy(arg, &int_val, sizeof(int32));
+
+ break;
+ }
+
+ case IOV_SVAL(IOV_BPADDR):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ memcpy(&sdreg, params, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ size = sdreg.func;
+
+ bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
+ (uint *)&sdreg.value,
+ FALSE);
+
+ break;
+ }
+#endif /* BCMSDIO || BCMPCIE */
+#ifdef BCMPCIE
+ case IOV_SVAL(IOV_FLOW_RING_DEBUG):
+ {
+ bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
+ break;
+ }
+#endif /* BCMPCIE */
+ case IOV_SVAL(IOV_MEM_DEBUG):
+ if (len > 0) {
+ bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
+ }
+ break;
+#endif /* DHD_DEBUG */
+#if defined(DHD_LOG_DUMP)
+#if defined(DHD_EFI)
+ case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE):
+ {
+ int_val = dhd_pub->log_capture_enable;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE):
+ {
+ dhd_pub->log_capture_enable = (uint8)int_val;
+ break;
+ }
+#endif /* DHD_EFI */
+ case IOV_GVAL(IOV_LOG_DUMP):
+ {
+ dhd_prot_debug_info_print(dhd_pub);
+ dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
+ break;
+ }
+#endif /* DHD_LOG_DUMP */
+
+ case IOV_GVAL(IOV_TPUT_TEST):
+ {
+ tput_test_t *tput_data = NULL;
+ if (params && plen >= sizeof(tput_test_t)) {
+ tput_data = (tput_test_t *)params;
+ bcmerror = dhd_tput_test(dhd_pub, tput_data);
+ } else {
+ DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ }
+ break;
+ }
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ case IOV_SVAL(IOV_PKT_LATENCY):
+ dhd_pub->pkt_latency = (uint32)int_val;
+ break;
+ case IOV_GVAL(IOV_PKT_LATENCY):
+ int_val = (int32)dhd_pub->pkt_latency;
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
+ case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
+ {
+ if (dhd_pub->debug_buf_dest_support) {
+ debug_buf_dest_stat_t *debug_buf_dest_stat =
+ (debug_buf_dest_stat_t *)arg;
+ memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
+ sizeof(dhd_pub->debug_buf_dest_stat));
+ } else {
+ bcmerror = BCME_DISABLED;
+ }
+ break;
+ }
+
+#ifdef DHD_PKTTS
+ case IOV_GVAL(IOV_PKTTS_ENAB): {
+ int_val = dhd_get_pktts_enab(dhd_pub);
+ (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_PKTTS_ENAB): {
+ dhd_set_pktts_enab(dhd_pub, !!int_val);
+ break;
+ }
+
+ case IOV_GVAL(IOV_PKTTS_FLOW): {
+ bcmerror = dhd_get_pktts_flow(dhd_pub, arg, len);
+ break;
+ }
+ case IOV_SVAL(IOV_PKTTS_FLOW): {
+ bcmerror = dhd_set_pktts_flow(dhd_pub, params, plen);
+ break;
+ }
+#endif /* DHD_PKTTS */
+
+#if defined(DHD_EFI)
+ case IOV_SVAL(IOV_INTR_POLL):
+ bcmerror = dhd_intr_poll(dhd_pub->bus, arg, len, TRUE);
+ break;
+
+ case IOV_GVAL(IOV_INTR_POLL):
+ bcmerror = dhd_intr_poll(dhd_pub->bus, params, plen, FALSE);
+ break;
+#endif /* DHD_EFI */
+
+#if defined(DHD_SSSR_DUMP)
+ case IOV_GVAL(IOV_FIS_TRIGGER):
+ bcmerror = dhd_bus_fis_trigger(dhd_pub);
+
+ if (bcmerror == BCME_OK) {
+ bcmerror = dhd_bus_fis_dump(dhd_pub);
+ }
+
+ int_val = bcmerror;
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* defined(DHD_SSSR_DUMP) */
+
+#ifdef DHD_DEBUG
+ case IOV_SVAL(IOV_INDUCE_ERROR): {
+ if (int_val >= DHD_INDUCE_ERROR_MAX) {
+ DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
+ } else {
+ dhd_pub->dhd_induce_error = (uint16)int_val;
+#ifdef BCMPCIE
+ if (dhd_pub->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) {
+ dhdpcie_induce_cbp_hang(dhd_pub);
+ }
+#endif /* BCMPCIE */
+ }
+ break;
+ }
+#endif /* DHD_DEBUG */
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ case IOV_GVAL(IOV_CONC_DISC): {
+ int_val = wl_cfg80211_get_iface_conc_disc(
+ dhd_linux_get_primary_netdev(dhd_pub));
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_CONC_DISC): {
+ bcmerror = wl_cfg80211_set_iface_conc_disc(
+ dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
+ break;
+ }
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ case IOV_GVAL(IOV_IFACE_POLICY): {
+ int_val = wl_cfg80211_get_iface_policy(
+ dhd_linux_get_primary_netdev(dhd_pub));
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_IFACE_POLICY): {
+ bcmerror = wl_cfg80211_set_iface_policy(
+ dhd_linux_get_primary_netdev(dhd_pub),
+ arg, len);
+ break;
+ }
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
+#ifdef RTT_GEOFENCE_CONT
+#if defined (RTT_SUPPORT) && defined (WL_NAN)
+ case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
+ bool enable = 0;
+ dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
+ int_val = enable ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
+ bool enable = *(bool *)arg;
+ dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
+ break;
+ }
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_CONT */
+ case IOV_GVAL(IOV_FW_VBS): {
+ *(uint32 *)arg = (uint32)dhd_dbg_get_fwverbose(dhd_pub);
+ break;
+ }
+
+ case IOV_SVAL(IOV_FW_VBS): {
+ if (int_val < 0) {
+ int_val = 0;
+ }
+ dhd_dbg_set_fwverbose(dhd_pub, (uint32)int_val);
+ break;
+ }
+
+#ifdef DHD_TX_PROFILE
+ case IOV_SVAL(IOV_TX_PROFILE_TAG):
+ {
+ /* note: under the current implementation only one type of packet may be
+ * tagged per profile
+ */
+ const dhd_tx_profile_protocol_t *protocol = NULL;
+ /* for example, we might have a profile of profile_index 6, but at
+ * offset 2 from dhd_pub->protocol_filters.
+ */
+ uint8 offset;
+
+ if (params == NULL) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+
+ protocol = (dhd_tx_profile_protocol_t *)params;
+
+ /* validate */
+ if (protocol->version != DHD_TX_PROFILE_VERSION) {
+ bcmerror = BCME_VERSION;
+ break;
+ }
+ if (protocol->profile_index > DHD_MAX_PROFILE_INDEX) {
+ DHD_ERROR(("%s:\tprofile index must be between 0 and %d\n",
+ __FUNCTION__, DHD_MAX_PROFILE_INDEX));
+ bcmerror = BCME_RANGE;
+ break;
+ }
+ if (protocol->layer != DHD_TX_PROFILE_DATA_LINK_LAYER && protocol->layer
+ != DHD_TX_PROFILE_NETWORK_LAYER) {
+ DHD_ERROR(("%s:\tlayer must be %d or %d\n", __FUNCTION__,
+ DHD_TX_PROFILE_DATA_LINK_LAYER,
+ DHD_TX_PROFILE_NETWORK_LAYER));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (protocol->protocol_number > __UINT16_MAX__) {
+ DHD_ERROR(("%s:\tprotocol number must be <= %d\n", __FUNCTION__,
+ __UINT16_MAX__));
+ bcmerror = BCME_BADLEN;
+ break;
+ }
+
+ /* find the dhd_tx_profile_protocol_t */
+ for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
+ if (dhd_pub->protocol_filters[offset].profile_index ==
+ protocol->profile_index) {
+ break;
+ }
+ }
+
+ if (offset >= DHD_MAX_PROFILES) {
+#if DHD_MAX_PROFILES > 1
+ DHD_ERROR(("%s:\tonly %d profiles supported at present\n",
+ __FUNCTION__, DHD_MAX_PROFILES));
+#else /* DHD_MAX_PROFILES > 1 */
+ DHD_ERROR(("%s:\tonly %d profile supported at present\n",
+ __FUNCTION__, DHD_MAX_PROFILES));
+ DHD_ERROR(("%s:\tthere is a profile of index %d\n", __FUNCTION__,
+ dhd_pub->protocol_filters->profile_index));
+#endif /* DHD_MAX_PROFILES > 1 */
+ bcmerror = BCME_NOMEM;
+ break;
+ }
+
+ /* memory already allocated in dhd_attach; just assign the value */
+ dhd_pub->protocol_filters[offset] = *protocol;
+
+ if (offset >= dhd_pub->num_profiles) {
+ dhd_pub->num_profiles = offset + 1;
+ }
+
+ break;
+ }
+
+ case IOV_SVAL(IOV_TX_PROFILE_ENABLE):
+ dhd_pub->tx_profile_enab = int_val ? TRUE : FALSE;
+ break;
+
+ case IOV_GVAL(IOV_TX_PROFILE_ENABLE):
+ int_val = dhd_pub->tx_profile_enab;
+ bcmerror = memcpy_s(arg, val_size, &int_val, sizeof(int_val));
+ break;
+
+ case IOV_SVAL(IOV_TX_PROFILE_DUMP):
+ {
+ const dhd_tx_profile_protocol_t *protocol = NULL;
+ uint8 offset;
+ char *format = "%s:\ttx_profile %s: %d\n";
+
+ for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
+ if (dhd_pub->protocol_filters[offset].profile_index == int_val) {
+ protocol = &(dhd_pub->protocol_filters[offset]);
+ break;
+ }
+ }
+
+ if (protocol == NULL) {
+ DHD_ERROR(("%s:\tno profile with index %d\n", __FUNCTION__,
+ int_val));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+
+ printf(format, __FUNCTION__, "profile_index", protocol->profile_index);
+ printf(format, __FUNCTION__, "layer", protocol->layer);
+ printf(format, __FUNCTION__, "protocol_number", protocol->protocol_number);
+ printf(format, __FUNCTION__, "src_port", protocol->src_port);
+ printf(format, __FUNCTION__, "dest_port", protocol->dest_port);
+
+ break;
+ }
+#endif /* defined(DHD_TX_PROFILE) */
+
+ case IOV_GVAL(IOV_CHECK_TRAP_ROT): {
+ int_val = dhd_pub->check_trap_rot? 1 : 0;
+ (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_CHECK_TRAP_ROT): {
+ dhd_pub->check_trap_rot = *(bool *)arg;
+ break;
+ }
+
+#if defined(DHD_AWDL)
+ case IOV_SVAL(IOV_AWDL_LLC_ENABLE): {
+ bool bval = *(bool *)arg;
+ if (bval != 0 && bval != 1)
+ bcmerror = BCME_ERROR;
+ else
+ dhd_pub->awdl_llc_enabled = bval;
+ break;
+ }
+ case IOV_GVAL(IOV_AWDL_LLC_ENABLE):
+ int_val = dhd_pub->awdl_llc_enabled;
+ (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
+ break;
+#endif
+#ifdef WLEASYMESH
+ case IOV_SVAL(IOV_1905_AL_UCAST): {
+ uint32 bssidx;
+ const char *val;
+ uint8 ea[6] = {0};
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ bcopy(val, ea, ETHER_ADDR_LEN);
+ printf("IOV_1905_AL_UCAST:" MACDBG "\n", MAC2STRDBG(ea));
+ bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, FALSE);
+ break;
+ }
+ case IOV_GVAL(IOV_1905_AL_UCAST): {
+ uint32 bssidx;
+ const char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, FALSE);
+ break;
+ }
+ case IOV_SVAL(IOV_1905_AL_MCAST): {
+ uint32 bssidx;
+ const char *val;
+ uint8 ea[6] = {0};
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ bcopy(val, ea, ETHER_ADDR_LEN);
+ printf("IOV_1905_AL_MCAST:" MACDBG "\n", MAC2STRDBG(ea));
+ bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, TRUE);
+ break;
+ }
+ case IOV_GVAL(IOV_1905_AL_MCAST): {
+ uint32 bssidx;
+ const char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, TRUE);
+ break;
+ }
+#endif /* WLEASYMESH */
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+ return bcmerror;
+}
+
+#ifdef BCMDONGLEHOST
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+ /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+ * because an encryption/rsn mismatch results in both events, and
+ * the important information is in the WLC_E_PRUNE.
+ */
+ if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+ dhd_conn_event == WLC_E_PRUNE)) {
+ dhd_conn_event = event;
+ dhd_conn_status = status;
+ dhd_conn_reason = reason;
+ }
+}
+#else
+#error "BCMDONGLEHOST not defined"
+#endif /* BCMDONGLEHOST */
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+ void *p;
+ int eprec = -1; /* precedence to evict from */
+ bool discard_oldest;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktqprec_full(q, prec) && !pktq_full(q)) {
+ pktq_penq(q, prec, pkt);
+ return TRUE;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktqprec_full(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = pktq_peek_tail(q, &eprec);
+ ASSERT(p);
+ if (eprec > prec || eprec < 0)
+ return FALSE;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktqprec_empty(q, eprec));
+ discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+ if (eprec == prec && !discard_oldest)
+ return FALSE; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+ ASSERT(p);
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ PKTFREE(dhdp->osh, p, TRUE);
+ }
+
+ /* Enqueue */
+ p = pktq_penq(q, prec, pkt);
+ ASSERT(p);
+
+ return TRUE;
+}
+
+/*
+ * Functions to drop proper pkts from queue:
+ * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
+ * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
+ * If can't find pkts matching upper 2 cases, drop first pkt anyway
+ */
+bool
+dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
+{
+ struct pktq_prec *q = NULL;
+ void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
+ pkt_frag_t frag_info;
+
+ ASSERT(dhdp && pq);
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+ p = q->head;
+
+ if (p == NULL)
+ return FALSE;
+
+ while (p) {
+ frag_info = pkt_frag_info(dhdp->osh, p);
+ if (frag_info == DHD_PKT_FRAG_NONE) {
+ break;
+ } else if (frag_info == DHD_PKT_FRAG_FIRST) {
+ if (first) {
+ /* No last frag pkt, use prev as last */
+ last = prev;
+ break;
+ } else {
+ first = p;
+ prev_first = prev;
+ }
+ } else if (frag_info == DHD_PKT_FRAG_LAST) {
+ if (first) {
+ last = p;
+ break;
+ }
+ }
+
+ prev = p;
+ p = PKTLINK(p);
+ }
+
+ if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
+ /* Not found matching pkts, use oldest */
+ prev = NULL;
+ p = q->head;
+ frag_info = 0;
+ }
+
+ if (frag_info == DHD_PKT_FRAG_NONE) {
+ first = last = p;
+ prev_first = prev;
+ }
+
+ p = first;
+ while (p) {
+ next = PKTLINK(p);
+ q->n_pkts--;
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(p, NULL);
+
+ if (fn)
+ fn(dhdp, prec, p, TRUE);
+
+ if (p == last)
+ break;
+
+ p = next;
+ }
+
+ if (prev_first == NULL) {
+ if ((q->head = next) == NULL)
+ q->tail = NULL;
+ } else {
+ PKTSETLINK(prev_first, next);
+ if (!next)
+ q->tail = prev_first;
+ }
+
+ return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+ void *params, int plen, void *arg, uint len, bool set)
+{
+ int bcmerror = 0;
+ uint val_size;
+ const bcm_iovar_t *vi = NULL;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+ bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
+{
+ int bcmerror = 0;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!buf) {
+ return BCME_BADARG;
+ }
+
+ dhd_os_dhdiovar_lock(dhd_pub);
+ switch (ioc->cmd) {
+ case DHD_GET_MAGIC:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_MAGIC;
+ break;
+
+ case DHD_GET_VERSION:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_VERSION;
+ break;
+
+ case DHD_GET_VAR:
+ case DHD_SET_VAR:
+ {
+ char *arg;
+ uint arglen;
+
+ DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
+ bcmstricmp((char *)buf, "devreset")) {
+ /* In platforms like FC19, the FW download is done via IOCTL
+ * and should not return error for IOCTLs fired before FW
+ * Download is done
+ */
+ if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
+ DHD_ERROR(("%s: return as fw_download_status=%d\n",
+ __FUNCTION__,
+ dhd_fw_download_status(dhd_pub)));
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return -ENODEV;
+ }
+ }
+ DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
+ /* If Suspend/Resume is tested via pcie_suspend IOVAR
+ * then continue to execute the IOVAR, return from here for
+ * other IOVARs, also include pciecfgreg and devreset to go
+ * through.
+ */
+#ifdef DHD_EFI
+ if (bcmstricmp((char *)buf, "pcie_suspend") &&
+ bcmstricmp((char *)buf, "pciecfgreg") &&
+ bcmstricmp((char *)buf, "devreset") &&
+ bcmstricmp((char *)buf, "sdio_suspend") &&
+ bcmstricmp((char *)buf, "control_signal"))
+#else
+ if (bcmstricmp((char *)buf, "pcie_suspend") &&
+ bcmstricmp((char *)buf, "pciecfgreg") &&
+ bcmstricmp((char *)buf, "devreset") &&
+ bcmstricmp((char *)buf, "sdio_suspend"))
+#endif /* DHD_EFI */
+ {
+ DHD_ERROR(("%s: bus is in suspend(%d)"
+ "or suspending(0x%x) state\n",
+ __FUNCTION__, dhd_pub->busstate,
+ dhd_pub->dhd_bus_busy_state));
+ DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return -ENODEV;
+ }
+ }
+ /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
+ * which will wait for all the busy contexts to get over for
+ * particular time and call ASSERT if timeout happens. As during
+ * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
+ * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
+ * not used in Production platforms but only used in FC19 setups.
+ */
+ if (!bcmstricmp((char *)buf, "devreset") ||
+#ifdef BCMPCIE
+ (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
+ !bcmstricmp((char *)buf, "dwnldstate")) ||
+#endif /* BCMPCIE */
+#if defined(DHD_EFI) && defined (BT_OVER_PCIE)
+ !bcmstricmp((char *)buf, "btop_test") ||
+ !bcmstricmp((char *)buf, "control_signal") ||
+#endif /* DHD_EFI && BT_OVER_PCIE */
+ FALSE)
+ {
+ DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
+ }
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+ /* scan past the name to any arguments */
+ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+ ;
+
+ if (arglen == 0 || *arg) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto unlock_exit;
+ }
+
+ /* account for the NUL terminator */
+ arg++, arglen--;
+ /* call with the appropriate arguments */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+ buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
+ arg, arglen, IOV_SET);
+ }
+ if (bcmerror != BCME_UNSUPPORTED) {
+ goto unlock_exit;
+ }
+
+ /* not in generic table, try protocol module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+ if (bcmerror != BCME_UNSUPPORTED) {
+ goto unlock_exit;
+ }
+
+ /* if still not found, try bus module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ arg, arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+ if (bcmerror != BCME_UNSUPPORTED) {
+ goto unlock_exit;
+ }
+
+#ifdef DHD_TIMESYNC
+ /* check TS module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+#endif /* DHD_TIMESYNC */
+ }
+ goto unlock_exit;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ }
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return bcmerror;
+
+unlock_exit:
+ DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+static void
+dhd_update_awdl_stats(dhd_pub_t *dhd_pub, const awdl_aws_event_data_t *aw)
+{
+ dhd_awdl_stats_t *awdl_stats;
+ unsigned long lock_flags;
+
+ /* since AWDL stats are read on clear to protect against clear,
+ * lock before update
+ */
+ DHD_AWDL_STATS_LOCK(dhd_pub->awdl_stats_lock, lock_flags);
+ /* Start of AWDL slot */
+ if (!(aw->flags & AWDL_AW_LAST_EXT)) {
+ dhd_pub->awdl_tx_status_slot =
+ ((aw->aw_counter/AWDL_SLOT_MULT) % AWDL_NUM_SLOTS);
+ awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot];
+ awdl_stats->slot_start_time = OSL_SYSUPTIME_US();
+ awdl_stats->fw_slot_start_time = ntoh32_ua(&aw->fw_time);
+ awdl_stats->num_slots++;
+ } else {
+ /* End of AWDL slot */
+ awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot];
+ if (awdl_stats->slot_start_time) {
+ awdl_stats->cum_slot_time +=
+ OSL_SYSUPTIME_US() - awdl_stats->slot_start_time;
+ /* FW reports time in us in a 32bit number.
+ * This 32bit number wrap-arround in ~90 minutes.
+ * Below logic considers wrap-arround too
+ */
+ awdl_stats->fw_cum_slot_time +=
+ ((ntoh32_ua(&aw->fw_time) - awdl_stats->fw_slot_start_time) &
+ (UINT_MAX));
+
+ }
+ }
+ DHD_AWDL_STATS_UNLOCK(dhd_pub->awdl_stats_lock, lock_flags);
+}
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+
+static void
+wl_show_roam_event(dhd_pub_t *dhd_pub, uint status, uint datalen,
+ const char *event_name, char *eabuf, void *event_data)
+{
+#ifdef REPORT_FATAL_TIMEOUTS
+ OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE);
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ } else {
+#ifdef REPORT_FATAL_TIMEOUTS
+ /*
+ * For secure join if WLC_E_SET_SSID returns with any failure case,
+ * donot expect WLC_E_PSK_SUP. So clear the mask.
+ */
+ dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ if (datalen) {
+ uint8 id = *((uint8 *)event_data);
+ if (id != DOT11_MNG_PROPR_ID) {
+ wl_roam_event_t *roam_data =
+ (wl_roam_event_t *)event_data;
+ bcm_xtlv_t *tlv = (bcm_xtlv_t *)roam_data->xtlvs;
+ if (tlv->id == WLC_ROAM_NO_NETWORKS_TLV_ID) {
+ uint32 *fail_reason = (uint32 *)tlv->data;
+ switch (*fail_reason) {
+ case WLC_E_REASON_NO_NETWORKS:
+ DHD_EVENT(("MACEVENT: %s,"
+ " no networks found\n",
+ event_name));
+ break;
+ case WLC_E_REASON_NO_NETWORKS_BY_SCORE:
+ DHD_EVENT(("MACEVENT: %s,"
+ " no networks found by score\n",
+ event_name));
+ break;
+ default:
+ DHD_ERROR(("MACEVENT: %s,"
+ " unknown fail reason 0x%x\n",
+ event_name,
+ *fail_reason));
+ ASSERT(0);
+ }
+ } else {
+ DHD_EVENT(("MACEVENT: %s,"
+ " no networks found\n",
+ event_name));
+ }
+ } else {
+ DHD_EVENT(("MACEVENT: %s,"
+ " no networks found\n",
+ event_name));
+ }
+ } else {
+ DHD_EVENT(("MACEVENT: %s, no networks found\n",
+ event_name));
+ }
+ } else {
+ DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
+ }
+ }
+}
+
+static void
+wl_show_roam_cache_update_event(const char *name, uint status,
+ uint reason, uint datalen, void *event_data)
+{
+ wlc_roam_cache_update_event_t *cache_update;
+ uint16 len_of_tlvs;
+ void *val_tlv_ptr;
+ bcm_xtlv_t *val_xtlv;
+ char ntoa_buf[ETHER_ADDR_STR_LEN];
+ uint idx;
+ const char* reason_name = NULL;
+ const char* status_name = NULL;
+ static struct {
+ uint event;
+ const char *event_name;
+ } reason_names[] = {
+ {WLC_E_REASON_INITIAL_ASSOC, "INITIAL ASSOCIATION"},
+ {WLC_E_REASON_LOW_RSSI, "LOW_RSSI"},
+ {WLC_E_REASON_DEAUTH, "RECEIVED DEAUTHENTICATION"},
+ {WLC_E_REASON_DISASSOC, "RECEIVED DISASSOCATION"},
+ {WLC_E_REASON_BCNS_LOST, "BEACONS LOST"},
+ {WLC_E_REASON_BETTER_AP, "BETTER AP FOUND"},
+ {WLC_E_REASON_MINTXRATE, "STUCK AT MIN TX RATE"},
+ {WLC_E_REASON_BSSTRANS_REQ, "REQUESTED ROAM"},
+ {WLC_E_REASON_TXFAIL, "TOO MANY TXFAILURES"}
+ };
+
+ static struct {
+ uint event;
+ const char *event_name;
+ } status_names[] = {
+ {WLC_E_STATUS_SUCCESS, "operation was successful"},
+ {WLC_E_STATUS_FAIL, "operation failed"},
+ {WLC_E_STATUS_TIMEOUT, "operation timed out"},
+ {WLC_E_STATUS_NO_NETWORKS, "failed due to no matching network found"},
+ {WLC_E_STATUS_ABORT, "operation was aborted"},
+ {WLC_E_STATUS_NO_ACK, "protocol failure: packet not ack'd"},
+ {WLC_E_STATUS_UNSOLICITED, "AUTH or ASSOC packet was unsolicited"},
+ {WLC_E_STATUS_ATTEMPT, "attempt to assoc to an auto auth configuration"},
+ {WLC_E_STATUS_PARTIAL, "scan results are incomplete"},
+ {WLC_E_STATUS_NEWSCAN, "scan aborted by another scan"},
+ {WLC_E_STATUS_NEWASSOC, "scan aborted due to assoc in progress"},
+ {WLC_E_STATUS_11HQUIET, "802.11h quiet period started"},
+ {WLC_E_STATUS_SUPPRESS, "user disabled scanning"},
+ {WLC_E_STATUS_NOCHANS, "no allowable channels to scan"},
+ {WLC_E_STATUS_CS_ABORT, "abort channel select"},
+ {WLC_E_STATUS_ERROR, "request failed due to error"},
+ {WLC_E_STATUS_INVALID, "Invalid status code"}
+ };
+
+ switch (reason) {
+ case WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is new roam cache\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_JOIN:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is start of join\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_RSSI_DELTA:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is delta in rssi\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is motion delta in rssi\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is missed channel\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is start of split scan\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is start of full scan\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_INIT_ASSOC:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is init association\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is failure in full scan\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is empty scan result\n", status));
+ break;
+ case WLC_ROAM_CACHE_UPDATE_MISSING_AP:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is missed ap\n", status));
+ break;
+ default:
+ DHD_EVENT(("Current roam cache status %d, "
+ "reason for cache update is unknown %d\n", status, reason));
+ break;
+ }
+
+ if (datalen < sizeof(wlc_roam_cache_update_event_t)) {
+ DHD_ERROR(("MACEVENT: %s, missing event data\n", name));
+ return;
+ }
+
+ cache_update = (wlc_roam_cache_update_event_t *)event_data;
+ val_tlv_ptr = (void *)cache_update->xtlvs;
+ len_of_tlvs = datalen - sizeof(wlc_roam_cache_update_event_t);
+ val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
+ if (val_xtlv->id != WL_RMC_RPT_CMD_DATA) {
+ DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
+ name, val_xtlv->id));
+ return;
+ }
+ val_tlv_ptr = (uint8 *)val_tlv_ptr + BCM_XTLV_HDR_SIZE;
+ len_of_tlvs = val_xtlv->len;
+
+ while (len_of_tlvs && len_of_tlvs > BCM_XTLV_HDR_SIZE) {
+ val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
+ switch (val_xtlv->id) {
+ case WL_RMC_RPT_XTLV_BSS_INFO:
+ {
+ rmc_bss_info_v1_t *bss_info = (rmc_bss_info_v1_t *)(val_xtlv->data);
+ DHD_EVENT(("\t Current BSS INFO:\n"));
+ DHD_EVENT(("\t\tRSSI: %d\n", bss_info->rssi));
+ DHD_EVENT(("\t\tNumber of full scans performed "
+ "on current BSS: %d\n", bss_info->fullscan_count));
+ for (idx = 0; idx < ARRAYSIZE(reason_names); idx++) {
+ if (reason_names[idx].event == bss_info->reason) {
+ reason_name = reason_names[idx].event_name;
+ }
+ }
+ DHD_EVENT(("\t\tReason code for last full scan: %s(%d)\n",
+ reason_name, bss_info->reason));
+ DHD_EVENT(("\t\tDelta between current time and "
+ "last full scan: %d\n", bss_info->time_full_scan));
+ for (idx = 0; idx < ARRAYSIZE(status_names); idx++) {
+ if (status_names[idx].event == bss_info->status)
+ status_name = status_names[idx].event_name;
+ }
+ DHD_EVENT(("\t\tLast status code for not roaming: %s(%d)\n",
+ status_name, bss_info->status));
+
+ }
+ break;
+ case WL_RMC_RPT_XTLV_CANDIDATE_INFO:
+ case WL_RMC_RPT_XTLV_USER_CACHE_INFO:
+ {
+ rmc_candidate_info_v1_t *candidate_info =
+ (rmc_candidate_info_v1_t *)(val_xtlv->data);
+ if (val_xtlv->id == WL_RMC_RPT_XTLV_CANDIDATE_INFO) {
+ DHD_EVENT(("\t Candidate INFO:\n"));
+ } else {
+ DHD_EVENT(("\t User Candidate INFO:\n"));
+ }
+ DHD_EVENT(("\t\tBSSID: %s\n",
+ bcm_ether_ntoa((const struct ether_addr *)
+ &candidate_info->bssid, ntoa_buf)));
+ DHD_EVENT(("\t\tRSSI: %d\n", candidate_info->rssi));
+ DHD_EVENT(("\t\tChannel: %d\n", candidate_info->ctl_channel));
+ DHD_EVENT(("\t\tDelta between current time and last "
+ "seen time: %d\n", candidate_info->time_last_seen));
+ DHD_EVENT(("\t\tBSS load: %d\n", candidate_info->bss_load));
+ }
+ break;
+ default:
+ DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
+ name, val_xtlv->id));
+ return;
+ }
+ val_tlv_ptr = (uint8 *)val_tlv_ptr + bcm_xtlv_size(val_xtlv,
+ BCM_XTLV_OPTION_NONE);
+ len_of_tlvs -= (uint16)bcm_xtlv_size(val_xtlv, BCM_XTLV_OPTION_NONE);
+ }
+}
+
+static void
+wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
+ void *raw_event_ptr, char *eventmask)
+{
+ uint i, status, reason;
+ bool group = FALSE, flush_txq = FALSE, link = FALSE;
+ bool host_data = FALSE; /* prints event data after the case when set */
+ const char *auth_str;
+ const char *event_name;
+ const uchar *buf;
+ char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+ uint event_type, flags, auth_type, datalen;
+
+ event_type = ntoh32(event->event_type);
+ flags = ntoh16(event->flags);
+ status = ntoh32(event->status);
+ reason = ntoh32(event->reason);
+ BCM_REFERENCE(reason);
+ auth_type = ntoh32(event->auth_type);
+ datalen = (event_data != NULL) ? ntoh32(event->datalen) : 0;
+
+ /* debug dump of event messages */
+ snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
+
+ event_name = bcmevent_get_name(event_type);
+ BCM_REFERENCE(event_name);
+
+ if (flags & WLC_EVENT_MSG_LINK)
+ link = TRUE;
+ if (flags & WLC_EVENT_MSG_GROUP)
+ group = TRUE;
+ if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+ flush_txq = TRUE;
+
+ switch (event_type) {
+ case WLC_E_START:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+ case WLC_E_DEAUTH:
+ case WLC_E_DISASSOC:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ break;
+
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+#ifdef REPORT_FATAL_TIMEOUTS
+ if (status != WLC_E_STATUS_SUCCESS) {
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ break;
+
+ case WLC_E_ASSOC:
+ case WLC_E_REASSOC:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
+ event_name, eabuf, (int)status, (int)reason));
+ } else if (status == WLC_E_STATUS_SUPPRESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, SUPPRESS\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_NO_ACK) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, NOACK\n", event_name, eabuf));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+ event_name, eabuf, (int)status));
+ }
+#ifdef REPORT_FATAL_TIMEOUTS
+ if (status != WLC_E_STATUS_SUCCESS) {
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ break;
+
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+ break;
+
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ if (auth_type == DOT11_OPEN_SYSTEM)
+ auth_str = "Open System";
+ else if (auth_type == DOT11_SHARED_KEY)
+ auth_str = "Shared Key";
+ else if (auth_type == DOT11_SAE)
+ auth_str = "SAE";
+ else {
+ snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
+ auth_str = err_msg;
+ }
+
+ if (event_type == WLC_E_AUTH_IND) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
+ event_name, eabuf, auth_str, (int)status, (int)reason));
+ } else if (status == WLC_E_STATUS_SUPPRESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUPPRESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_NO_ACK) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
+ event_name, eabuf, auth_str));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
+ event_name, eabuf, auth_str, (int)status, (int)reason));
+ }
+ BCM_REFERENCE(auth_str);
+#ifdef REPORT_FATAL_TIMEOUTS
+ if (status != WLC_E_STATUS_SUCCESS) {
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ break;
+
+ case WLC_E_ROAM:
+ wl_show_roam_event(dhd_pub, status, datalen,
+ event_name, eabuf, event_data);
+ break;
+ case WLC_E_ROAM_START:
+ if (datalen >= sizeof(wlc_roam_start_event_t)) {
+ const wlc_roam_start_event_t *roam_start =
+ (wlc_roam_start_event_t *)event_data;
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d,"
+ " reason %d, auth %d, current bss rssi %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type, (int)roam_start->rssi));
+ } else {
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ }
+ break;
+ case WLC_E_ROAM_PREP:
+ if (datalen >= sizeof(wlc_roam_prep_event_t)) {
+ const wlc_roam_prep_event_t *roam_prep =
+ (wlc_roam_prep_event_t *)event_data;
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d,"
+ " reason %d, auth %d, target bss rssi %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type, (int)roam_prep->rssi));
+ } else {
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ }
+ break;
+ case WLC_E_ROAM_CACHE_UPDATE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ wl_show_roam_cache_update_event(event_name, status,
+ reason, datalen, event_data);
+ break;
+ case WLC_E_JOIN:
+ case WLC_E_SET_SSID:
+#ifdef REPORT_FATAL_TIMEOUTS
+ OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE);
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ } else {
+#ifdef REPORT_FATAL_TIMEOUTS
+ /*
+ * For secure join if WLC_E_SET_SSID returns with any failure case,
+ * donot expect WLC_E_PSK_SUP. So clear the mask.
+ */
+ dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
+ }
+ }
+ break;
+
+ case WLC_E_BEACON_RX:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+ }
+ break;
+
+ case WLC_E_LINK:
+ DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d reason:%d\n",
+ event_name, link?"UP":"DOWN", flags, status, reason));
+#ifdef PCIE_FULL_DONGLE
+#ifdef REPORT_FATAL_TIMEOUTS
+ {
+ uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
+ uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
+ if ((role == WLC_E_IF_ROLE_STA) && (!link)) {
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+#endif /* REPORT_FATAL_TIMEOUTS */
+ BCM_REFERENCE(link);
+ break;
+
+ case WLC_E_MIC_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq));
+ BCM_REFERENCE(group);
+ BCM_REFERENCE(flush_txq);
+ break;
+
+ case WLC_E_ICV_ERROR:
+ case WLC_E_UNICAST_DECODE_ERROR:
+ case WLC_E_MULTICAST_DECODE_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+ event_name, eabuf));
+ break;
+
+ case WLC_E_TXFAIL:
+ DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
+ break;
+
+ case WLC_E_ASSOC_REQ_IE:
+ case WLC_E_ASSOC_RESP_IE:
+ case WLC_E_PMKID_CACHE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_SCAN_COMPLETE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_stop_scan_timer(dhd_pub, FALSE, 0);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ break;
+ case WLC_E_RSSI_LQM:
+ case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_NET_LOST:
+ case WLC_E_PFN_SCAN_COMPLETE:
+ case WLC_E_PFN_SCAN_NONE:
+ case WLC_E_PFN_SCAN_ALLGONE:
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ case WLC_E_PFN_SSID_EXT:
+ DHD_EVENT(("PNOEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PFN_SCAN_BACKOFF:
+ case WLC_E_PFN_BSSID_SCAN_BACKOFF:
+ DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason));
+ break;
+
+ case WLC_E_PSK_SUP:
+ case WLC_E_PRUNE:
+ DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ break;
+
+#ifdef WIFI_ACT_FRAME
+ case WLC_E_ACTION_FRAME:
+ DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+ break;
+ case WLC_E_ACTION_FRAME_COMPLETE:
+ if (datalen >= sizeof(uint32)) {
+ const uint32 *pktid = event_data;
+ BCM_REFERENCE(pktid);
+ DHD_EVENT(("MACEVENT: %s status %d, reason %d, pktid 0x%x\n",
+ event_name, (int)status, (int)reason, *pktid));
+ }
+ break;
+#endif /* WIFI_ACT_FRAME */
+
+#ifdef SHOW_LOGTRACE
+ case WLC_E_TRACE:
+ {
+ dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
+ break;
+ }
+#endif /* SHOW_LOGTRACE */
+
+ case WLC_E_RSSI:
+ if (datalen >= sizeof(int)) {
+ DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+ }
+ break;
+
+ case WLC_E_SERVICE_FOUND:
+ case WLC_E_P2PO_ADD_DEVICE:
+ case WLC_E_P2PO_DEL_DEVICE:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+#ifdef BT_WIFI_HANDOBER
+ case WLC_E_BT_WIFI_HANDOVER_REQ:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+#endif
+#ifdef DHD_AWDL
+ case WLC_E_AWDL_AW:
+ if (datalen >= sizeof(awdl_aws_event_data_t)) {
+ const awdl_aws_event_data_t *aw =
+ (awdl_aws_event_data_t *)event_data;
+ BCM_REFERENCE(aw);
+ DHD_EVENT(("MACEVENT: %s, MAC %s aw_cnt %u ext_cnt %u flags %u "
+ "aw_ch %u\n", event_name, eabuf, aw->aw_counter,
+ aw->aw_ext_count, aw->flags, CHSPEC_CHANNEL(aw->aw_chan)));
+ host_data = TRUE;
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ dhd_update_awdl_stats(dhd_pub, aw);
+ /* Store last received aw counter */
+ dhd_pub->awdl_aw_counter = aw->aw_counter;
+#endif /* DHD_AWDL */
+ }
+ break;
+ case WLC_E_AWDL_ROLE:
+ DHD_EVENT(("MACEVENT: %s, MAC %s ROLE %d\n", event_name, eabuf, (int)status));
+ break;
+ case WLC_E_AWDL_EVENT:
+ DHD_EVENT(("MACEVENT: %s, MAC %s status %d reason %d\n",
+ event_name, eabuf, (int)status, (int)reason));
+ if (datalen >= OFFSETOF(awdl_scan_event_data_t, chan_list)) {
+ const awdl_scan_event_data_t *scan_evt =
+ (awdl_scan_event_data_t *)event_data;
+ BCM_REFERENCE(scan_evt);
+ DHD_EVENT(("scan_usage %d, nscan_chans %d, ncached_chans %d, "
+ "iscan_flags 0x%x\n", scan_evt->scan_usage,
+ scan_evt->nscan_chans, scan_evt->ncached_chans,
+ scan_evt->flags));
+ host_data = TRUE;
+ }
+ break;
+#endif /* DHD_AWDL */
+
+ case WLC_E_CCA_CHAN_QUAL:
+ /* I would like to check here that datalen >= sizeof(cca_chan_qual_event_t)
+ * but since definition of cca_chan_qual_event_t is different
+ * between blazar and legacy firmware, I will
+ * check only that datalen is bigger than 0.
+ */
+ if (datalen > 0) {
+ const cca_chan_qual_event_t *cca_event =
+ (cca_chan_qual_event_t *)event_data;
+ if ((cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) ||
+ (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE)) {
+ const cca_only_chan_qual_event_t *cca_only_event =
+ (const cca_only_chan_qual_event_t *)cca_event;
+ BCM_REFERENCE(cca_only_event);
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec));
+ DHD_EVENT((
+ "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
+ " ts 0x%08x)\n",
+ cca_only_event->cca_busy_ext.duration,
+ cca_only_event->cca_busy_ext.congest_ibss,
+ cca_only_event->cca_busy_ext.congest_obss,
+ cca_only_event->cca_busy_ext.interference,
+ cca_only_event->cca_busy_ext.timestamp));
+ DHD_EVENT((
+ "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
+ cca_only_event->cca_busy_nopm.duration,
+ cca_only_event->cca_busy_nopm.congest_ibss,
+ cca_only_event->cca_busy_nopm.congest_obss,
+ cca_only_event->cca_busy_nopm.interference));
+ DHD_EVENT((
+ "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
+ cca_only_event->cca_busy_pm.duration,
+ cca_only_event->cca_busy_pm.congest_ibss,
+ cca_only_event->cca_busy_pm.congest_obss,
+ cca_only_event->cca_busy_pm.interference));
+ if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE) {
+ DHD_EVENT(("\t OFDM desense %d\n",
+ ((const cca_only_chan_qual_event_v2_t *)
+ cca_only_event)->ofdm_desense));
+ }
+ } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
+ " ts 0x%08x)\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec,
+ cca_event->cca_busy_ext.duration,
+ cca_event->cca_busy_ext.congest_ibss,
+ cca_event->cca_busy_ext.congest_obss,
+ cca_event->cca_busy_ext.interference,
+ cca_event->cca_busy_ext.timestamp));
+ } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec,
+ cca_event->cca_busy.duration,
+ cca_event->cca_busy.congest,
+ cca_event->cca_busy.timestamp));
+ } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
+ (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x (NF[%d] %ddB)\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec,
+ cca_event->id, cca_event->noise));
+ } else {
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x (unknown ID %d)\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec,
+ cca_event->id));
+ }
+ }
+ break;
+ case WLC_E_ESCAN_RESULT:
+ if (datalen >= sizeof(wl_escan_result_v2_t)) {
+ const wl_escan_result_v2_t *escan_result =
+ (wl_escan_result_v2_t *)event_data;
+ BCM_REFERENCE(escan_result);
+#ifdef OEM_ANDROID
+ /* Because WLC_E_ESCAN_RESULT event log are being print too many.
+ * So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform.
+ */
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n",
+ event_name, event_type, eabuf, (int)status));
+#else
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
+ event_name, event_type, eabuf,
+ (int)status, dtoh16(escan_result->sync_id)));
+#endif /* CUSTOMER_HW4 */
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* a 'partial' status means the escan is still in progress
+ * any other status implies the escan has either finished or aborted
+ */
+ if (status != WLC_E_STATUS_PARTIAL) {
+ unsigned long timeout_flags = 0;
+ uint16 syncid = dtoh16(escan_result->sync_id);
+ /* this is to take care of the specific case where
+ * escan event returns abort and is processed immediately
+ * by dhd before the escan iovar has returned. In that case
+ * if the iovar returns success, then we will be starting a
+ * timeout even though the escan has already been aborted !
+ * So the flag below is checked before starting the escan timeout
+ */
+ if (dhd_pub->timeout_info) {
+ DHD_TIMER_LOCK(dhd_pub->timeout_info->scan_timer_lock,
+ timeout_flags);
+ if (!dhd_pub->timeout_info->scan_timer_active &&
+ syncid == dhd_pub->esync_id) {
+ dhd_pub->timeout_info->escan_aborted = TRUE;
+ dhd_pub->timeout_info->abort_syncid = syncid;
+ DHD_TIMER_UNLOCK(
+ dhd_pub->timeout_info->scan_timer_lock,
+ timeout_flags);
+ break;
+ } else {
+ dhd_pub->timeout_info->escan_aborted = FALSE;
+ }
+ DHD_TIMER_UNLOCK(dhd_pub->timeout_info->scan_timer_lock,
+ timeout_flags);
+ }
+ dhd_stop_scan_timer(dhd_pub, TRUE, dtoh16(escan_result->sync_id));
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+ }
+ break;
+ case WLC_E_IF:
+ if (datalen >= sizeof(struct wl_event_data_if)) {
+ const struct wl_event_data_if *ifevent =
+ (struct wl_event_data_if *)event_data;
+ BCM_REFERENCE(ifevent);
+
+ DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
+ event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
+ }
+ break;
+#ifdef SHOW_LOGTRACE
+ case WLC_E_MSCH:
+ {
+ wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
+ break;
+ }
+#endif /* SHOW_LOGTRACE */
+
+ case WLC_E_PSK_AUTH:
+ DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
+ event_name, eabuf, status, reason));
+ break;
+ case WLC_E_AGGR_EVENT:
+ if (datalen >= sizeof(event_aggr_data_t)) {
+ const event_aggr_data_t *aggrbuf = event_data;
+ int j = 0, len = 0;
+ const uint8 *data = aggrbuf->data;
+ DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
+ event_name, aggrbuf->num_events, aggrbuf->len));
+ for (j = 0; j < aggrbuf->num_events; j++)
+ {
+ const wl_event_msg_t * sub_event = (const wl_event_msg_t *)data;
+ if (len > aggrbuf->len) {
+ DHD_ERROR(("%s: Aggr events corrupted!",
+ __FUNCTION__));
+ break;
+ }
+ DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
+ len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
+ sizeof(wl_event_msg_t)), sizeof(uint64));
+ buf = (const uchar *)(data + sizeof(wl_event_msg_t));
+ BCM_REFERENCE(buf);
+ DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
+ for (i = 0; i < ntoh32(sub_event->datalen); i++) {
+ DHD_EVENT((" 0x%02x ", buf[i]));
+ }
+ data = aggrbuf->data + len;
+ }
+ DHD_EVENT(("\n"));
+ }
+ break;
+ case WLC_E_PHY_CAL:
+ {
+ DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
+ break;
+ }
+ case WLC_E_NAN_CRITICAL:
+ {
+ DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
+ break;
+ }
+ case WLC_E_NAN_NON_CRITICAL:
+ {
+ DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
+ break;
+ }
+ case WLC_E_PROXD:
+ if (datalen >= sizeof(wl_proxd_event_t)) {
+ const wl_proxd_event_t *proxd =
+ (wl_proxd_event_t*)event_data;
+ DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
+ event_name, proxd->type, reason));
+ }
+ break;
+ case WLC_E_RPSNOA:
+ if (datalen >= sizeof(rpsnoa_stats_t)) {
+ const rpsnoa_stats_t *stat = event_data;
+ if (datalen == sizeof(*stat)) {
+ DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
+ (stat->band == WLC_BAND_2G) ? "2G":"5G",
+ stat->state, stat->last_pps));
+ }
+ }
+ break;
+ case WLC_E_WA_LQM:
+ if (datalen >= sizeof(wl_event_wa_lqm_t)) {
+ const wl_event_wa_lqm_t *event_wa_lqm =
+ (wl_event_wa_lqm_t *)event_data;
+ const bcm_xtlv_t *subevent;
+ const wl_event_wa_lqm_basic_t *elqm_basic;
+
+ if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
+ (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
+ DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
+ event_name, event_wa_lqm->ver, event_wa_lqm->len));
+ break;
+ }
+
+ subevent = (const bcm_xtlv_t *)event_wa_lqm->subevent;
+ if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
+ (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
+ DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
+ event_name, subevent->id, subevent->len));
+ break;
+ }
+
+ elqm_basic = (const wl_event_wa_lqm_basic_t *)subevent->data;
+ BCM_REFERENCE(elqm_basic);
+ DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
+ event_name, elqm_basic->rssi, elqm_basic->snr,
+ elqm_basic->tx_rate, elqm_basic->rx_rate));
+ }
+ break;
+
+ case WLC_E_OBSS_DETECTION:
+ {
+ DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
+ break;
+ }
+
+ case WLC_E_AP_BCN_MUTE:
+ if (datalen >= sizeof(wlc_bcn_mute_miti_event_data_v1_t)) {
+ const wlc_bcn_mute_miti_event_data_v1_t
+ *bcn_mute_miti_evnt_data = event_data;
+ DHD_EVENT(("MACEVENT: %s, reason :%d uatbtt_count: %d\n",
+ event_name, reason, bcn_mute_miti_evnt_data->uatbtt_count));
+ }
+ break;
+
+ case WLC_E_TWT_SETUP:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+ case WLC_E_TWT_TEARDOWN:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+ case WLC_E_TWT_INFO_FRM:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+ default:
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ break;
+ }
+
+ /* show any appended data if message level is set to bytes or host_data is set */
+ if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
+ buf = (uchar *) event_data;
+ BCM_REFERENCE(buf);
+ DHD_EVENT((" data (%d) : ", datalen));
+ for (i = 0; i < datalen; i++) {
+ DHD_EVENT((" 0x%02x ", buf[i]));
+ }
+ DHD_EVENT(("\n"));
+ }
+} /* wl_show_host_event */
+#endif /* SHOW_EVENTS */
+
+#ifdef DNGL_EVENT_SUPPORT
+/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
+int
+dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
+{
+ bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
+
+ dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
+ return BCME_OK;
+}
+
+#ifdef PARSE_DONGLE_HOST_EVENT
+typedef struct hck_id_to_str_s {
+ uint32 id;
+ char *name;
+} hck_id_to_str_t;
+
+hck_id_to_str_t hck_sw_id_to_str[] = {
+ {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
+ {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
+ {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
+ {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
+ {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
+ {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
+ {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
+ {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
+ {0, NULL}
+};
+
+hck_id_to_str_t hck_pcie_module_to_str[] = {
+ {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
+ {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
+ {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
+ {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
+ {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
+ {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
+ {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
+ {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
+ {0, NULL}
+};
+
+hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
+ {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
+ {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
+ {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
+ {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
+ {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
+ {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
+ {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
+ {0, NULL}
+};
+
+static void
+dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
+{
+ while (hck->name != NULL) {
+ if (hck->id == id) {
+ DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
+ return;
+ }
+ hck++;
+ }
+}
+
+void
+dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
+{
+
+ wl_rx_hc_info_v2_t *hck_rx_stall_v2;
+ uint16 id;
+
+ id = ltoh16(wl_hc->id);
+
+ if (id == WL_HC_DD_RX_STALL_V2) {
+ /* map the hck_rx_stall_v2 structure to the value of the XTLV */
+ hck_rx_stall_v2 =
+ (wl_rx_hc_info_v2_t*)wl_hc;
+ DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
+ " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
+ hck_rx_stall_v2->type,
+ hck_rx_stall_v2->length,
+ hck_rx_stall_v2->if_idx,
+ hck_rx_stall_v2->ac,
+ hck_rx_stall_v2->rx_hc_pkts,
+ hck_rx_stall_v2->rx_hc_dropped_all,
+ hck_rx_stall_v2->rx_hc_alert_th,
+ hck_rx_stall_v2->reason,
+ ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
+ dhd_print_dongle_hck_id(
+ ltoh32(hck_rx_stall_v2->reason),
+ hck_rx_stall_v2_to_str);
+ } else {
+ dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
+ hck_sw_id_to_str);
+ }
+
+}
+
+#endif /* PARSE_DONGLE_HOST_EVENT */
+
+void
+dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
+ bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
+{
+ uint8 *p = (uint8 *)(event + 1);
+ uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
+ uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
+ uint16 version = ntoh16_ua((void *)&dngl_event->version);
+
+ DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
+ if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
+ return;
+ }
+ if (version != BCM_DNGL_EVENT_MSG_VERSION) {
+ DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
+ version, BCM_DNGL_EVENT_MSG_VERSION));
+ return;
+ }
+ switch (type) {
+ case DNGL_E_SOCRAM_IND:
+ {
+ bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
+ uint16 tag = ltoh32(socramind_ptr->tag);
+ uint16 taglen = ltoh32(socramind_ptr->length);
+ p = (uint8 *)socramind_ptr->value;
+ DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
+ switch (tag) {
+ case SOCRAM_IND_ASSERT_TAG:
+ {
+ /*
+ * The payload consists of -
+ * null terminated function name padded till 32 bit boundary +
+ * Line number - (32 bits)
+ * Caller address (32 bits)
+ */
+ char *fnname = (char *)p;
+ if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
+ sizeof(uint32) * 2)) {
+ DHD_ERROR(("Wrong length:%d\n", datalen));
+ return;
+ }
+ DHD_EVENT(("ASSRT Function:%s ", p));
+ p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
+ DHD_EVENT(("Line:%d ", *(uint32 *)p));
+ p += sizeof(uint32);
+ DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
+#endif /* PARSE_DONGLE_HOST_EVENT */
+ break;
+ }
+ case SOCRAM_IND_TAG_HEALTH_CHECK:
+ {
+ bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
+ DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
+ ltoh32(dngl_hc->top_module_tag),
+ ltoh32(dngl_hc->top_module_len),
+ datalen));
+ if (DHD_EVENT_ON()) {
+ prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
+ + BCM_XTLV_HDR_SIZE, datalen));
+ }
+#ifdef DHD_LOG_DUMP
+ memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
+ memcpy(dhdp->health_chk_event_data, p,
+ MIN(ltoh32(dngl_hc->top_module_len),
+ HEALTH_CHK_BUF_SIZE));
+#endif /* DHD_LOG_DUMP */
+ p = (uint8 *)dngl_hc->value;
+
+ switch (ltoh32(dngl_hc->top_module_tag)) {
+ case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
+ {
+ bcm_dngl_pcie_hc_t *pcie_hc;
+ pcie_hc = (bcm_dngl_pcie_hc_t *)p;
+ BCM_REFERENCE(pcie_hc);
+ if (ltoh32(dngl_hc->top_module_len) <
+ sizeof(bcm_dngl_pcie_hc_t)) {
+ DHD_ERROR(("Wrong length:%d\n",
+ ltoh32(dngl_hc->top_module_len)));
+ return;
+ }
+ DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
+ " control:0x%x\n",
+ ltoh32(pcie_hc->version),
+ ltoh32(pcie_hc->pcie_err_ind_type),
+ ltoh32(pcie_hc->pcie_flag),
+ ltoh32(pcie_hc->pcie_control_reg)));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ dhd_print_dongle_hck_id(
+ ltoh32(pcie_hc->pcie_err_ind_type),
+ hck_pcie_module_to_str);
+#endif /* PARSE_DONGLE_HOST_EVENT */
+ break;
+ }
+#ifdef HCHK_COMMON_SW_EVENT
+ case HCHK_SW_ENTITY_WL_PRIMARY:
+ case HCHK_SW_ENTITY_WL_SECONDARY:
+ {
+ bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
+
+ if (ltoh32(dngl_hc->top_module_len) <
+ sizeof(bcm_xtlv_t)) {
+ DHD_ERROR(("WL SW HC Wrong length:%d\n",
+ ltoh32(dngl_hc->top_module_len)));
+ return;
+ }
+ BCM_REFERENCE(wl_hc);
+ DHD_EVENT(("WL SW HC type %d len %d\n",
+ ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
+
+#ifdef PARSE_DONGLE_HOST_EVENT
+ dhd_parse_hck_common_sw_event(wl_hc);
+#endif /* PARSE_DONGLE_HOST_EVENT */
+ break;
+
+ }
+#endif /* HCHK_COMMON_SW_EVENT */
+ default:
+ {
+ DHD_ERROR(("%s:Unknown module TAG:%d\n",
+ __FUNCTION__,
+ ltoh32(dngl_hc->top_module_tag)));
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
+ if (p && DHD_EVENT_ON()) {
+ prhex("SOCRAMIND", p, taglen);
+ }
+ break;
+ }
+ break;
+ }
+ default:
+ DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
+ if (p && DHD_EVENT_ON()) {
+ prhex("SOCRAMIND", p, datalen);
+ }
+ break;
+ }
+#ifndef BCMDBUS
+#ifdef DHD_FW_COREDUMP
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
+ if (
+#ifdef GDB_PROXY
+ !dhdp->gdb_proxy_active &&
+#endif /* GDB_PROXY */
+ dhd_schedule_socram_dump(dhdp)) {
+ DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
+ }
+ }
+#else
+ dhd_dbg_send_urgent_evt(dhdp, p, datalen);
+#endif /* DHD_FW_COREDUMP */
+#endif /* !BCMDBUS */
+}
+
+#endif /* DNGL_EVENT_SUPPORT */
+
+/* Stub for now. Will become real function as soon as shim
+ * is being integrated to Android, Linux etc.
+ */
+#if !defined(NDIS)
+int
+wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
+{
+ return BCME_OK;
+}
+#endif
+
+int
+wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+ uint pktlen, void **data_ptr, void *raw_event)
+{
+ wl_evt_pport_t evt_pport;
+ wl_event_msg_t event;
+ bcm_event_msg_u_t evu;
+ int ret;
+
+ /* make sure it is a BRCM event pkt and record event data */
+ ret = wl_host_event_get_data(pktdata, pktlen, &evu);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+
+ memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
+
+ /* convert event from network order to host order */
+ wl_event_to_host_order(&event);
+
+ /* record event params to evt_pport */
+ evt_pport.dhd_pub = dhd_pub;
+ evt_pport.ifidx = ifidx;
+ evt_pport.pktdata = pktdata;
+ evt_pport.data_ptr = data_ptr;
+ evt_pport.raw_event = raw_event;
+ evt_pport.data_len = pktlen;
+
+#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS)
+ {
+ struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+ if (shim) {
+ ret = wl_shim_event_process(shim, &event, &evt_pport);
+ } else {
+ /* events can come even before shim is initialized
+ (when waiting for "wlc_ver" response)
+ * handle them in a non-shim way.
+ */
+ DHD_ERROR(("%s: Events coming before shim initialization!\n",
+ __FUNCTION__));
+ ret = wl_event_process_default(&event, &evt_pport);
+ }
+ }
+#else
+ ret = wl_event_process_default(&event, &evt_pport);
+#endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */
+
+ return ret;
+} /* wl_event_process */
+
+/* Check whether packet is a BRCM event pkt. If it is, record event data. */
+int
+wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
+{
+ int ret;
+
+ ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Invalid event frame, err = %d\n",
+ __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+int
+wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
+ wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+ bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+ bcm_event_msg_u_t evu;
+ uint8 *event_data;
+ uint32 type, status, datalen, reason;
+ uint16 flags;
+ uint evlen;
+ int ret;
+ uint16 usr_subtype;
+#if defined(__linux__)
+ dhd_if_t *ifp = NULL;
+ BCM_REFERENCE(ifp);
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
+ ret = wl_host_event_get_data(pktdata, pktlen, &evu);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+
+ usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
+ switch (usr_subtype) {
+ case BCMILCP_BCM_SUBTYPE_EVENT:
+ memcpy(event, &evu.event, sizeof(wl_event_msg_t));
+ *data_ptr = &pvt_data[1];
+ break;
+ case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
+#ifdef DNGL_EVENT_SUPPORT
+ /* If it is a DNGL event process it first */
+ if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
+ /*
+ * Return error purposely to prevent DNGL event being processed
+ * as BRCM event
+ */
+ return BCME_ERROR;
+ }
+#endif /* DNGL_EVENT_SUPPORT */
+ return BCME_NOTFOUND;
+ default:
+ return BCME_NOTFOUND;
+ }
+
+ /* start wl_event_msg process */
+ event_data = *data_ptr;
+ type = ntoh32_ua((void *)&event->event_type);
+ flags = ntoh16_ua((void *)&event->flags);
+ status = ntoh32_ua((void *)&event->status);
+ reason = ntoh32_ua((void *)&event->reason);
+ datalen = ntoh32_ua((void *)&event->datalen);
+ evlen = datalen + sizeof(bcm_event_t);
+
+ switch (type) {
+#ifdef PROP_TXSTATUS
+ case WLC_E_FIFO_CREDIT_MAP:
+ dhd_wlfc_enable(dhd_pub);
+ dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
+ WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+ "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+ event_data[2],
+ event_data[3], event_data[4], event_data[5]));
+ break;
+
+ case WLC_E_BCMC_CREDIT_SUPPORT:
+ dhd_wlfc_BCMCCredit_support_event(dhd_pub);
+ break;
+#ifdef LIMIT_BORROW
+ case WLC_E_ALLOW_CREDIT_BORROW:
+ dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
+ break;
+#endif /* LIMIT_BORROW */
+#endif /* PROP_TXSTATUS */
+
+ case WLC_E_ULP:
+ break;
+ case WLC_E_TDLS_PEER_EVENT:
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+ {
+ dhd_tdls_event_handler(dhd_pub, event);
+ }
+#endif
+ break;
+
+ case WLC_E_IF:
+ {
+ struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
+
+ /* Ignore the event if NOIF is set */
+ if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
+ DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
+ return (BCME_UNSUPPORTED);
+ }
+#ifdef PCIE_FULL_DONGLE
+ dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
+ ifevent->opcode, ifevent->role);
+#endif
+#ifdef PROP_TXSTATUS
+ {
+ uint8* ea = pvt_data->eth.ether_dhost;
+ WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
+ ifevent->ifidx,
+ ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
+ ((ifevent->role == 0) ? "STA":"AP "),
+ MAC2STRDBG(ea)));
+ (void)ea;
+
+ if (ifevent->opcode == WLC_E_IF_CHANGE)
+ dhd_wlfc_interface_event(dhd_pub,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ ifevent->ifidx, ifevent->role, ea);
+ else
+ dhd_wlfc_interface_event(dhd_pub,
+ ((ifevent->opcode == WLC_E_IF_ADD) ?
+ eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+ ifevent->ifidx, ifevent->role, ea);
+
+ /* dhd already has created an interface by default, for 0 */
+ if (ifevent->ifidx == 0)
+ break;
+ }
+#endif /* PROP_TXSTATUS */
+
+ if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+ if (ifevent->opcode == WLC_E_IF_ADD) {
+ if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
+ event->addr.octet)) {
+
+ DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+ return (BCME_ERROR);
+ }
+ } else if (ifevent->opcode == WLC_E_IF_DEL) {
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_delete(dhd_pub,
+ (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname));
+#endif /* PCIE_FULL_DONGLE */
+ dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
+ event->addr.octet);
+ } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
+#ifdef WL_CFG80211
+ dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
+ event->addr.octet);
+#endif /* WL_CFG80211 */
+ }
+ } else {
+#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
+ DHD_INFO(("%s: Invalid ifidx %d for %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
+ }
+ /* send up the if event: btamp user needs it */
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ break;
+ }
+
+ case WLC_E_NDIS_LINK:
+ break;
+ case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
+ case WLC_E_PFN_NET_LOST:
+ break;
+#if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ case WLC_E_PFN_BEST_BATCHING:
+ dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
+ break;
+#endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
+#if defined(RTT_SUPPORT)
+ case WLC_E_PROXD:
+#ifndef WL_CFG80211
+ dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
+#endif /* WL_CFG80211 */
+ break;
+#endif /* RTT_SUPPORT */
+ /* These are what external supplicant/authenticator wants */
+ case WLC_E_ASSOC_IND:
+ case WLC_E_AUTH_IND:
+ case WLC_E_REASSOC_IND:
+ dhd_findadd_sta(dhd_pub,
+ dhd_ifname2idx(dhd_pub->info, event->ifname),
+ &event->addr.octet);
+ break;
+#if !defined(BCMDBUS) && defined(DHD_FW_COREDUMP)
+ case WLC_E_PSM_WATCHDOG:
+ DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
+ if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
+ DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
+ }
+ break;
+#endif
+#ifdef DHD_WMF
+ case WLC_E_PSTA_PRIMARY_INTF_IND:
+ dhd_update_psta_interface_for_sta(dhd_pub, event->ifname,
+ (void *)(event->addr.octet), (void*) event_data);
+ break;
+#endif
+#ifdef BCM_ROUTER_DHD
+ case WLC_E_DPSTA_INTF_IND:
+ dhd_update_dpsta_interface_for_sta(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+ event->ifname), (void*) event_data);
+ break;
+#endif /* BCM_ROUTER_DHD */
+#ifdef BCMDBG
+ case WLC_E_MACDBG:
+ dhd_macdbg_event_handler(dhd_pub, reason, event_data, datalen);
+ break;
+#endif /* BCMDBG */
+ case WLC_E_NATOE_NFCT:
+#ifdef WL_NATOE
+ DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
+ dhd_natoe_ct_event(dhd_pub, event_data);
+#endif /* WL_NATOE */
+ break;
+ case WLC_E_SLOTTED_BSS_PEER_OP:
+ DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
+ "" MACDBG ", status = %d\n",
+ __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
+ if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
+ dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+ event->ifname), &event->addr.octet);
+ } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
+ uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
+ BCM_REFERENCE(ifindex);
+ dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+ event->ifname), &event->addr.octet);
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+ (char *)&event->addr.octet[0]);
+#endif
+ } else {
+ DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
+ __FUNCTION__, status));
+ }
+ break;
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ case WLC_E_REASSOC:
+ ifp = dhd_get_ifp(dhd_pub, event->ifidx);
+
+ if (!ifp)
+ break;
+
+ /* Consider STA role only since roam is disabled on P2P GC.
+ * Drop EAPOL M1 frame only if roam is done to same BSS.
+ */
+ if ((status == WLC_E_STATUS_SUCCESS) &&
+ IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
+ wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
+ ifp->recv_reassoc_evt = TRUE;
+ }
+ break;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+#if defined(CSI_SUPPORT)
+ case WLC_E_CSI:
+ dhd_csi_event_handler(dhd_pub, event, (void *)event_data);
+ break;
+#endif /* CSI_SUPPORT */
+ case WLC_E_LINK:
+#ifdef PCIE_FULL_DONGLE
+ if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+ event->ifname), (uint8)flags) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
+ __FUNCTION__));
+ break;
+ }
+ if (!flags) {
+ DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
+ __FUNCTION__));
+ /* Delete all sta and flowrings */
+ dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
+ dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+ event->ifname));
+ }
+ /* fall through */
+#endif /* PCIE_FULL_DONGLE */
+ case WLC_E_DEAUTH:
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC:
+ case WLC_E_DISASSOC_IND:
+#ifdef PCIE_FULL_DONGLE
+ if (type != WLC_E_LINK) {
+ uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
+ uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
+ uint8 del_sta = TRUE;
+#ifdef WL_CFG80211
+ if (role == WLC_E_IF_ROLE_STA &&
+ !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
+ !wl_cfg80211_is_event_from_connected_bssid(
+ dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
+ del_sta = FALSE;
+ }
+#endif /* WL_CFG80211 */
+ DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
+ __FUNCTION__, type, flags, status, role, del_sta));
+
+ if (del_sta) {
+ DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(event->addr.octet)));
+
+ dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+ event->ifname), &event->addr.octet);
+ /* Delete all flowrings for STA and P2P Client */
+ if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
+ dhd_flow_rings_delete(dhd_pub, ifindex);
+ } else {
+ dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+ (char *)&event->addr.octet[0]);
+ }
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ /* fall through */
+ ifp = dhd_get_ifp(dhd_pub, event->ifidx);
+ if (ifp) {
+ ifp->recv_reassoc_evt = FALSE;
+ ifp->post_roam_evt = FALSE;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+ /* fall through */
+ default:
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+#ifdef DHD_UPDATE_INTF_MAC
+ if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) {
+ dhd_event_ifchange(dhd_pub->info,
+ (struct wl_event_data_if *)event,
+ event->ifname,
+ event->addr.octet);
+ }
+#endif /* DHD_UPDATE_INTF_MAC */
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+ BCM_REFERENCE(flags);
+ BCM_REFERENCE(status);
+ BCM_REFERENCE(reason);
+
+ break;
+ }
+#if defined(BCM_ROUTER_DHD) || defined(STBAP)
+ /* For routers, EAPD will be working on these events.
+ * Overwrite interface name to that event is pushed
+ * to host with its registered interface name
+ */
+ memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
+#endif
+
+#ifdef DHD_STATUS_LOGGING
+ if (dhd_pub->statlog) {
+ dhd_statlog_process_event(dhd_pub, type, *ifidx,
+ status, reason, flags);
+ }
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef SHOW_EVENTS
+ if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
+ wl_show_host_event(dhd_pub, event,
+ (void *)event_data, raw_event, dhd_pub->enable_log);
+ }
+#endif /* SHOW_EVENTS */
+
+ return (BCME_OK);
+} /* wl_process_host_event */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
+ wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+ return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
+ raw_event);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+ int i, j = 0;
+ unsigned char *buf = pbuf;
+
+ if (bytes_per_line == 0) {
+ bytes_per_line = len;
+ }
+
+ for (i = 0; i < len; i++) {
+ printf("%2.2x", *buf++);
+ j++;
+ if (j == bytes_per_line) {
+ printf("\n");
+ j = 0;
+ } else {
+ printf(":");
+ }
+ }
+ printf("\n");
+#endif /* DHD_DEBUG */
+}
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+
+/* Convert user's input in hex pattern to byte-size mask */
+int
+wl_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 &&
+ strncmp(src, "0X", 2) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ bcm_strncpy_s(num, sizeof(num), src, 2);
+ num[2] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+#if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
+int
+pattern_atoh_len(char *src, char *dst, int len)
+{
+ int i;
+ if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
+ strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + HD_PREFIX_SIZE; /* Skip past 0x */
+ if (strlen(src) % HD_BYTE_SIZE != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[HD_BYTE_SIZE + 1];
+
+ if (i > len - 1) {
+ DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
+ return -1;
+ }
+ bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
+ num[HD_BYTE_SIZE] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += HD_BYTE_SIZE;
+ }
+ return i;
+}
+#endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
+
+#ifdef PKT_FILTER_SUPPORT
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+ char *argv[8];
+ int i = 0;
+ const char *str;
+ int buf_len;
+ int str_len;
+ char *arg_save = 0, *arg_org = 0;
+ int rc;
+ char buf[32] = {0};
+ wl_pkt_filter_enable_t enable_parm;
+ wl_pkt_filter_enable_t * pkt_filterp;
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ arg_org = arg_save;
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_enable";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
+ buf[ sizeof(buf) - 1 ] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+ if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
+ goto fail;
+
+ /* Parse enable/disable value. */
+ enable_parm.enable = htod32(enable);
+
+ buf_len += sizeof(enable_parm);
+ memcpy((char *)pkt_filterp,
+ &enable_parm,
+ sizeof(enable_parm));
+
+ /* Enable/disable the specified filter. */
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc) {
+ DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
+ __FUNCTION__, enable?"enable":"disable", arg, rc));
+ dhd_set_packet_filter(dhd);
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc) {
+ DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ } else {
+ DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+ }
+ }
+ else
+ DHD_TRACE(("%s: successfully %s pktfilter %s\n",
+ __FUNCTION__, enable?"enable":"disable", arg));
+
+ /* Contorl the master mode */
+ rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
+ master_mode, WLC_SET_VAR, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
+ __FUNCTION__, master_mode, rc));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+/* Packet filter section: extended filters have named offsets, add table here */
+typedef struct {
+ char *name;
+ uint16 base;
+} wl_pfbase_t;
+
+static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
+
+static int
+wl_pkt_filter_base_parse(char *name)
+{
+ uint i;
+ char *bname, *uname;
+
+ for (i = 0; i < ARRAYSIZE(basenames); i++) {
+ bname = basenames[i].name;
+ for (uname = name; *uname; bname++, uname++) {
+ if (*bname != bcm_toupper(*uname)) {
+ break;
+ }
+ }
+ if (!*uname && !*bname) {
+ break;
+ }
+ }
+
+ if (i < ARRAYSIZE(basenames)) {
+ return basenames[i].base;
+ } else {
+ return -1;
+ }
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc = -1;
+ uint32 mask_size;
+ uint32 pattern_size;
+ char *argv[MAXPKT_ARG] = {0}, * buf = 0;
+ int i = 0;
+ char *arg_save = 0, *arg_org = 0;
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ arg_org = arg_save;
+
+ if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
+ DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
+ DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+ goto fail;
+ }
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+ while (argv[i++]) {
+ if (i >= MAXPKT_ARG) {
+ DHD_ERROR(("Invalid args provided\n"));
+ goto fail;
+ }
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+ }
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
+ buf[ str_len ] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Polarity not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Filter type not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+ if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Offset not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Bitmask not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter mask. */
+ rc = wl_pattern_atoh(argv[i],
+ (char *) pkt_filterp->u.pattern.mask_and_pattern);
+
+ if (rc == -1) {
+ DHD_ERROR(("Rejecting: %s\n", argv[i]));
+ goto fail;
+ }
+ mask_size = htod32(rc);
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Pattern not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter pattern. */
+ rc = wl_pattern_atoh(argv[i],
+ (char *) &pkt_filterp->u.pattern.mask_and_pattern[rc]);
+
+ if (rc == -1) {
+ DHD_ERROR(("Rejecting: %s\n", argv[i]));
+ goto fail;
+ }
+ pattern_size = htod32(rc);
+ if (mask_size != pattern_size) {
+ DHD_ERROR(("Mask and pattern not the same size\n"));
+ goto fail;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * rc);
+
+ /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
+ * then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp,
+ &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+ } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
+ int list_cnt = 0;
+ char *endptr = NULL;
+ wl_pkt_filter_pattern_listel_t *pf_el =
+ (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
+
+ while (argv[++i] != NULL) {
+ /* Check valid buffer size. */
+ if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
+ DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter base and offset. */
+ if (bcm_isdigit(*argv[i])) {
+ /* Numeric base */
+ rc = strtoul(argv[i], &endptr, 0);
+ } else {
+ endptr = strchr(argv[i], ':');
+ if (endptr) {
+ *endptr = '\0';
+ rc = wl_pkt_filter_base_parse(argv[i]);
+ if (rc == -1) {
+ printf("Invalid base %s\n", argv[i]);
+ goto fail;
+ }
+ *endptr = ':';
+ }
+ }
+
+ if (endptr == NULL) {
+ printf("Invalid [base:]offset format: %s\n", argv[i]);
+ goto fail;
+ }
+
+ if (*endptr == ':') {
+ pf_el->base_offs = htod16(rc);
+ rc = strtoul(endptr + 1, &endptr, 0);
+ } else {
+ /* Must have had a numeric offset only */
+ pf_el->base_offs = htod16(0);
+ }
+
+ if (*endptr) {
+ printf("Invalid [base:]offset format: %s\n", argv[i]);
+ goto fail;
+ }
+ if (rc > 0x0000FFFF) {
+ printf("Offset too large\n");
+ goto fail;
+ }
+ pf_el->rel_offs = htod16(rc);
+
+ /* Clear match_flag (may be set in parsing which follows) */
+ pf_el->match_flags = htod16(0);
+
+ /* Parse pattern filter mask and pattern directly into ioctl buffer */
+ if (argv[++i] == NULL) {
+ printf("Bitmask not provided\n");
+ goto fail;
+ }
+ rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
+ if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
+ printf("Rejecting: %s\n", argv[i]);
+ goto fail;
+ }
+ mask_size = htod16(rc);
+
+ if (argv[++i] == NULL) {
+ printf("Pattern not provided\n");
+ goto fail;
+ }
+
+ endptr = argv[i];
+ if (*endptr == '!') {
+ pf_el->match_flags =
+ htod16(WL_PKT_FILTER_MFLAG_NEG);
+ if (*(++endptr) == '\0') {
+ printf("Pattern not provided\n");
+ goto fail;
+ }
+ }
+ rc = wl_pattern_atoh(endptr, (char*)&pf_el->mask_and_data[rc]);
+ if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
+ printf("Rejecting: %s\n", argv[i]);
+ goto fail;
+ }
+ pattern_size = htod16(rc);
+
+ if (mask_size != pattern_size) {
+ printf("Mask and pattern not the same size\n");
+ goto fail;
+ }
+
+ pf_el->size_bytes = mask_size;
+
+ /* Account for the size of this pattern element */
+ buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
+
+ /* Move to next element location in ioctl buffer */
+ pf_el = (wl_pkt_filter_pattern_listel_t*)
+ ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
+
+ /* Count list element */
+ list_cnt++;
+ }
+
+ /* Account for initial fixed size, and copy initial fixed fields */
+ buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
+
+ if (buf_len > MAX_PKTFLT_BUF_SIZE) {
+ DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
+ goto fail;
+ }
+
+ /* Update list count and total size */
+ pkt_filter.u.patlist.list_cnt = list_cnt;
+ pkt_filter.u.patlist.PAD1[0] = 0;
+ pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
+ pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
+
+ memcpy((char *)pkt_filterp, &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
+ } else {
+ DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
+ goto fail;
+ }
+
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+
+ if (rc)
+ DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+ if (buf)
+ MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
+}
+
+void
+dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
+{
+ int ret;
+
+ ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
+ id, WLC_SET_VAR, TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
+ __FUNCTION__, id, ret));
+ }
+ else
+ DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
+ __FUNCTION__, id));
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+ int retcode;
+
+ retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
+ arp_mode, WLC_SET_VAR, TRUE, 0);
+
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode) {
+ DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ __FUNCTION__, arp_mode, retcode));
+ } else {
+ DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
+ __FUNCTION__, arp_mode));
+ dhd->arpol_configured = TRUE;
+ }
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+ int retcode;
+
+ if (!dhd->arpol_configured) {
+ /* If arpol is not applied, apply it */
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ }
+
+ retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
+ arp_enable, WLC_SET_VAR, TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+ __FUNCTION__, arp_enable, retcode));
+ else
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+#else
+ DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+#endif /* DHD_LOG_DUMP */
+ if (arp_enable) {
+ uint32 version;
+ retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
+ &version, WLC_GET_VAR, FALSE, 0);
+ if (retcode) {
+ DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
+ __FUNCTION__, retcode));
+ dhd->arp_version = 1;
+ }
+ else {
+ DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
+ dhd->arp_version = version;
+ }
+ }
+}
+
+/* XXX ANDREY: clear AOE arp_table */
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
+{
+ int ret = 0;
+
+ if (dhd == NULL) return;
+ if (dhd->arp_version == 1)
+ idx = 0;
+
+ ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ else {
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
+#else
+ DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
+#endif /* DHD_LOG_DUMP */
+ }
+ /* mac address isn't cleared here but it will be cleared after dongle off */
+ dhd->hmac_updated = 0;
+}
+
+/* XXX ANDREY: clear hostip table */
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
+{
+ int ret = 0;
+
+ if (dhd == NULL) return;
+ if (dhd->arp_version == 1)
+ idx = 0;
+
+ ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ else {
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
+#else
+ DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
+#endif /* DHD_LOG_DUMP */
+ }
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
+{
+ int ret;
+
+ if (dhd == NULL) return;
+ if (dhd->arp_version == 1)
+ idx = 0;
+
+ ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
+ else {
+ /* mac address is updated in the dongle */
+ dhd->hmac_updated = 1;
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
+#else
+ DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
+#endif /* DHD_LOG_DUMP */
+ }
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
+{
+ int ret, i;
+ uint32 *ptr32 = buf;
+ bool clr_bottom = FALSE;
+
+ if (!buf)
+ return -1;
+ if (dhd == NULL) return -1;
+ if (dhd->arp_version == 1)
+ idx = 0;
+
+ ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
+ FALSE);
+ if (ret) {
+ DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
+ __FUNCTION__, ret));
+
+ return -1;
+ }
+
+ /* clean up the buf, ascii reminder */
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (!clr_bottom) {
+ if (*ptr32 == 0)
+ clr_bottom = TRUE;
+ } else {
+ *ptr32 = 0;
+ }
+ ptr32++;
+ }
+
+ return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/*
+ * Neighbor Discovery Offload: enable NDO feature
+ * Called by ipv6 event handler when interface comes up/goes down
+ */
+int
+dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
+{
+ int retcode;
+
+ if (dhd == NULL)
+ return -1;
+
+#if defined(WL_CFG80211) && defined(WL_NAN)
+ if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
+ /* If nan dp is active, skip NDO */
+ DHD_INFO(("Active NAN DP, skip NDO\n"));
+ return 0;
+ }
+#endif /* WL_CFG80211 && WL_NAN */
+#ifdef WL_CFG80211
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ /* NDO disable on STA+SOFTAP mode */
+ ndo_enable = FALSE;
+ }
+#endif /* WL_CFG80211 */
+ retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
+ ndo_enable, WLC_SET_VAR, TRUE, 0);
+ if (retcode)
+ DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
+ __FUNCTION__, ndo_enable, retcode));
+ else
+ DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
+ __FUNCTION__, ndo_enable));
+
+ return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called by ipv6 event handler when interface comes up
+ */
+int
+dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
+{
+ int iov_len = 0;
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int retcode;
+
+ if (dhd == NULL)
+ return -1;
+
+ iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
+ IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return -1;
+ }
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+ if (retcode)
+ DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
+ __FUNCTION__, retcode));
+ else
+ DHD_TRACE(("%s: ndo ipaddr entry added \n",
+ __FUNCTION__));
+
+ return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called by ipv6 event handler when interface goes down
+ */
+int
+dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
+{
+ int iov_len = 0;
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int retcode;
+
+ if (dhd == NULL)
+ return -1;
+
+ iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
+ 0, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return -1;
+ }
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+ if (retcode)
+ DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
+ __FUNCTION__, retcode));
+ else
+ DHD_TRACE(("%s: ndo ipaddr entry removed \n",
+ __FUNCTION__));
+
+ return retcode;
+}
+/* Enhanced ND offload */
+uint16
+dhd_ndo_get_version(dhd_pub_t *dhdp)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ wl_nd_hostip_t ndo_get_ver;
+ int iov_len;
+ int retcode;
+ uint16 ver = 0;
+
+ if (dhdp == NULL) {
+ return BCME_ERROR;
+ }
+
+ memset(&iovbuf, 0, sizeof(iovbuf));
+ ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
+ ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
+ ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
+ ndo_get_ver.u.version = 0;
+ iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
+ WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
+
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return BCME_ERROR;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
+
+ if (retcode) {
+ DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+ /* ver iovar not supported. NDO version is 0 */
+ ver = 0;
+ } else {
+ wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
+
+ if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
+ (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
+ (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
+ + sizeof(uint16))) {
+ /* nd_hostip iovar version */
+ ver = dtoh16(ndo_ver_ret->u.version);
+ }
+
+ DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
+ }
+
+ return ver;
+}
+
+int
+dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ wl_nd_hostip_t ndo_add_addr;
+ int iov_len;
+ int retcode;
+
+ if (dhdp == NULL || ipv6addr == 0) {
+ return BCME_ERROR;
+ }
+
+ /* wl_nd_hostip_t fixed param */
+ ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
+ ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
+ ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
+ /* wl_nd_host_ip_addr_t param for add */
+ memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
+ ndo_add_addr.u.host_ip.type = type;
+
+ iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
+ WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return BCME_ERROR;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+ if (retcode) {
+ DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+#ifdef NDO_CONFIG_SUPPORT
+ if (retcode == BCME_NORESOURCE) {
+ /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
+ DHD_INFO(("%s: Host IP count exceed device capacity,"
+ "ND offload deactivated\n", __FUNCTION__));
+ dhdp->ndo_host_ip_overflow = TRUE;
+ dhd_ndo_enable(dhdp, FALSE);
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+ } else {
+ DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
+ }
+
+ return retcode;
+}
+
+int
+dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ wl_nd_hostip_t ndo_del_addr;
+ int iov_len;
+ int retcode;
+
+ if (dhdp == NULL || ipv6addr == 0) {
+ return BCME_ERROR;
+ }
+
+ /* wl_nd_hostip_t fixed param */
+ ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
+ ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
+ ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
+ /* wl_nd_host_ip_addr_t param for del */
+ memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
+ ndo_del_addr.u.host_ip.type = 0; /* don't care */
+
+ iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
+ WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
+
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return BCME_ERROR;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+ if (retcode) {
+ DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+ } else {
+ DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
+ }
+
+ return retcode;
+}
+
+int
+dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ wl_nd_hostip_t ndo_del_addr;
+ int iov_len;
+ int retcode;
+
+ if (dhdp == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* wl_nd_hostip_t fixed param */
+ ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
+ if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
+ ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
+ } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
+ ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
+ } else {
+ return BCME_BADARG;
+ }
+ ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
+
+ iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
+ iovbuf, sizeof(iovbuf));
+
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return BCME_ERROR;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+ if (retcode) {
+ DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+ } else {
+ DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
+ }
+
+ return retcode;
+}
+
+int
+dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int iov_len;
+ int retcode;
+
+ if (dhdp == NULL) {
+ return BCME_ERROR;
+ }
+
+ iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
+ iovbuf, sizeof(iovbuf));
+
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return BCME_ERROR;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+ if (retcode)
+ DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
+ __FUNCTION__, enable, retcode));
+ else {
+ DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
+ __FUNCTION__, enable));
+ }
+
+ return retcode;
+}
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id = 0;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+ iscan_buf_t *iscanbuf_alloc = 0;
+ iscan_buf_t *iscanbuf_head;
+
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+ dhd_iscan_lock();
+
+ iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+ if (iscanbuf_alloc == NULL)
+ goto fail;
+
+ iscanbuf_alloc->next = NULL;
+ iscanbuf_head = *iscanbuf;
+
+ DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+ "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+ __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+ if (iscanbuf_head == NULL) {
+ *iscanbuf = iscanbuf_alloc;
+ DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+ goto fail;
+ }
+
+ while (iscanbuf_head->next)
+ iscanbuf_head = iscanbuf_head->next;
+
+ iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+ dhd_iscan_unlock();
+ return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+ iscan_buf_t *iscanbuf_free = 0;
+ iscan_buf_t *iscanbuf_prv = 0;
+ iscan_buf_t *iscanbuf_cur;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+
+ dhd_iscan_lock();
+
+ iscanbuf_cur = iscan_chain;
+
+ /* If iscan_delete is null then delete the entire
+ * chain or else delete specific one provided
+ */
+ if (!iscan_delete) {
+ while (iscanbuf_cur) {
+ iscanbuf_free = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ iscanbuf_free->next = 0;
+ MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+ }
+ iscan_chain = 0;
+ } else {
+ while (iscanbuf_cur) {
+ if (iscanbuf_cur == iscan_delete)
+ break;
+ iscanbuf_prv = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ }
+ if (iscanbuf_prv)
+ iscanbuf_prv->next = iscan_delete->next;
+
+ iscan_delete->next = 0;
+ MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+ if (!iscanbuf_prv)
+ iscan_chain = 0;
+ }
+ dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+ return iscan_chain;
+}
+
+int
+dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
+{
+ int rc = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ char *buf;
+ char iovar[] = "iscan";
+ uint32 allocSize = 0;
+ wl_ioctl_t ioctl;
+ int len;
+
+ if (pParams) {
+ allocSize = (size + strlen(iovar) + 1);
+ if ((allocSize < size) || (allocSize < strlen(iovar)))
+ {
+ DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
+ __FUNCTION__, allocSize, size, strlen(iovar)));
+ goto cleanUp;
+ }
+ buf = MALLOC(dhd->osh, allocSize);
+
+ if (buf == NULL)
+ {
+ DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
+ goto cleanUp;
+ }
+ ioctl.cmd = WLC_SET_VAR;
+ len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
+ if (len == 0) {
+ rc = BCME_BUFTOOSHORT;
+ goto cleanUp;
+ }
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
+ }
+
+cleanUp:
+ if (buf) {
+ MFREE(dhd->osh, buf, allocSize);
+ }
+
+ return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+ wl_iscan_results_t *list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ iscan_buf_t *iscan_cur;
+ int status = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ int rc;
+ wl_ioctl_t ioctl;
+ int len;
+
+ DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
+
+ iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+ if (!iscan_cur) {
+ DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+ dhd_iscan_free_buf(dhdp, 0);
+ dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(dhdp, FALSE);
+ goto fail;
+ }
+
+ dhd_iscan_lock();
+
+ memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+ iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+ if (len == 0) {
+ dhd_iscan_free_buf(dhdp, 0);
+ dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(dhdp, FALSE);
+ status = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ ioctl.cmd = WLC_GET_VAR;
+ ioctl.set = FALSE;
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ *scan_count = results->count = dtoh32(results->count);
+ status = dtoh32(list_buf->status);
+ DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
+
+ dhd_iscan_unlock();
+
+ if (!(*scan_count)) {
+ /* TODO: race condition when FLUSH already called */
+ dhd_iscan_free_buf(dhdp, 0);
+ }
+fail:
+ return status;
+}
+
+#ifdef NDIS
+/* XXXX Following code had bit of OS dependency.
+ * Cleanup to move the OS dependency to other
+ * per port code so that iscan logic here can be
+ * leveraged across all OS's
+ */
+NDIS_EVENT iscan_event;
+HANDLE tHandle;
+NDIS_SPIN_LOCK dhd_iscan_queue_lock;
+
+void
+dhd_iscan_lock(void)
+{
+ NdisAcquireSpinLock(&dhd_iscan_queue_lock);
+}
+
+void
+dhd_iscan_unlock(void)
+{
+ NdisReleaseSpinLock(&dhd_iscan_queue_lock);
+}
+
+void
+dhd_iscan_notify(void)
+{
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+ NdisSetEvent(&iscan_event);
+}
+
+static void
+dhd_iscan_func(void *h)
+{
+ int status;
+ uint scan_count;
+ dhd_pub_t *dhd = dhd_bus_pub(h);
+
+ /* Read the priority from registry */
+ CeSetThreadPriority(GetCurrentThread(), 128);
+ DHD_ISCAN(("%s: thread created\n", __FUNCTION__));
+
+ while (TRUE) {
+ NdisWaitEvent(&iscan_event, 0); /* wait forever */
+ NdisResetEvent(&iscan_event); /* reset the event */
+ DHD_ISCAN(("%s: thread scheduled\n", __FUNCTION__));
+
+ status = dhd_iscan_get_partial_result(h, &scan_count);
+
+ if (status == WL_SCAN_RESULTS_PARTIAL) {
+ dhd_iscan_request(h, WL_SCAN_ACTION_CONTINUE);
+ } else if (status == WL_SCAN_RESULTS_SUCCESS) {
+ if (dhd_iscan_in_progress(h)) {
+ dhd_ind_scan_confirm(h, TRUE);
+ }
+ } else if (status == WL_SCAN_RESULTS_ABORTED ||
+ status == WL_SCAN_RESULTS_NO_MEM) {
+ dhd_iscan_request(h, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(h, FALSE);
+ } else {
+ dhd_iscan_request(h, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(h, FALSE);
+ }
+ }
+}
+
+int
+dhd_iscan_attach(void *dhdp)
+{
+ DHD_ISCAN(("%s: dhdp = 0x%x\n", __FUNCTION__, dhdp));
+
+ NdisInitializeEvent(&iscan_event);
+ NdisResetEvent(&iscan_event);
+ NdisAllocateSpinLock(&dhd_iscan_queue_lock);
+
+ /* XXX - should move to ndishared sublayer */
+ tHandle = CreateThread(NULL,
+ 0,
+ (LPTHREAD_START_ROUTINE)dhd_iscan_func,
+ (void *)dhdp,
+ 0,
+ &iscan_thread_id);
+
+ if (!iscan_thread_id)
+ return NDIS_STATUS_FAILURE;
+
+ return NDIS_STATUS_SUCCESS;
+}
+
+void
+dhd_iscan_deattach(void *dhdp)
+{
+ if (iscan_thread_id)
+ {
+ NdisFreeEvent(&iscan_event);
+ NdisFreeSpinLock(&dhd_iscan_queue_lock);
+ CloseHandle(tHandle);
+ iscan_thread_id = 0;
+ }
+}
+#endif /* NDIS */
+#endif /* SIMPLE_ISCAN */
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
+{
+ char bssid[6], zbuf[6];
+ int ret = -1;
+
+ bzero(bssid, 6);
+ bzero(zbuf, 6);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
+ ETHER_ADDR_LEN, FALSE, ifidx);
+ /* XXX:AS!!! res can be: -17(BCME_NOTASSOCIATED),-22(BCME_NORESOURCE), and 0(OK)
+ OK - doesn't mean associated yet, the returned bssid
+ still needs to be checked for non zero array
+ */
+ DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+ if (ret == BCME_NOTASSOCIATED) {
+ DHD_ERROR(("%s: WLC_GET_BSSID, NOT ASSOCIATED\n", __FUNCTION__));
+ }
+
+ if (retval)
+ *retval = ret;
+
+ if (ret < 0)
+ return FALSE;
+
+ if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
+ DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/* Function to estimate possible DTIM_SKIP value */
+#if defined(OEM_ANDROID) && defined(BCMPCIE)
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
+{
+ int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+ int ret = -1;
+ int allowed_skip_dtim_cnt = 0;
+
+ if (dhd->disable_dtim_in_suspend) {
+ DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
+ bcn_li_dtim = 0;
+ return bcn_li_dtim;
+ }
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+ return bcn_li_dtim;
+ }
+
+ if (dtim_period == NULL || bcn_interval == NULL)
+ return bcn_li_dtim;
+
+ /* read associated AP beacon interval */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+ bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+ return bcn_li_dtim;
+ }
+
+ /* read associated AP dtim setup */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+ dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ return bcn_li_dtim;
+ }
+
+ /* if not assocated just return */
+ if (*dtim_period == 0) {
+ return bcn_li_dtim;
+ }
+
+ if (dhd->max_dtim_enable) {
+ bcn_li_dtim =
+ (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
+ if (bcn_li_dtim == 0) {
+ bcn_li_dtim = 1;
+ }
+ } else {
+ /* attemp to use platform defined dtim skip interval */
+ bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+ /* check if sta listen interval fits into AP dtim */
+ if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
+ /* AP DTIM to big for our Listen Interval : no dtim skiping */
+ bcn_li_dtim = NO_DTIM_SKIP;
+ DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+ __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
+ return bcn_li_dtim;
+ }
+
+ if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+ allowed_skip_dtim_cnt =
+ MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
+ bcn_li_dtim =
+ (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+ }
+
+ if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
+ /* Round up dtim_skip to fit into STAs Listen Interval */
+ bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
+ DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+ }
+ }
+
+ if (dhd->conf->suspend_bcn_li_dtim >= 0)
+ bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
+ DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+ __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+ return bcn_li_dtim;
+}
+#else /* OEM_ANDROID && BCMPCIE */
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
+{
+ int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+ int ret = -1;
+ int dtim_period = 0;
+ int ap_beacon = 0;
+ int allowed_skip_dtim_cnt = 0;
+
+ if (dhd->disable_dtim_in_suspend) {
+ DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
+ bcn_li_dtim = 0;
+ goto exit;
+ }
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ /* read associated AP beacon interval */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+ &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ /* read associated ap's dtim setup */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+ &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ /* if not assocated just exit */
+ if (dtim_period == 0) {
+ goto exit;
+ }
+
+ if (dhd->max_dtim_enable) {
+ bcn_li_dtim =
+ (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
+ if (bcn_li_dtim == 0) {
+ bcn_li_dtim = 1;
+ }
+ } else {
+ /* attemp to use platform defined dtim skip interval */
+ bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+ /* check if sta listen interval fits into AP dtim */
+ if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
+ /* AP DTIM to big for our Listen Interval : no dtim skiping */
+ bcn_li_dtim = NO_DTIM_SKIP;
+ DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+ __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
+ goto exit;
+ }
+
+ if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+ allowed_skip_dtim_cnt =
+ MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
+ bcn_li_dtim =
+ (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+ }
+
+ if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
+ /* Round up dtim_skip to fit into STAs Listen Interval */
+ bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
+ DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+ }
+ }
+
+ if (dhd->conf->suspend_bcn_li_dtim >= 0)
+ bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
+ DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+ __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+exit:
+ return bcn_li_dtim;
+}
+#endif /* OEM_ANDROID && BCMPCIE */
+
+#ifdef CONFIG_SILENT_ROAM
+int
+dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
+{
+ int ret = BCME_OK;
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
+ return ret;
+ }
+
+ if (set && (dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
+ DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
+ return ret;
+ }
+
+ if (!dhd->sroam_turn_on) {
+ DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
+ return ret;
+ }
+ psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
+ if (!psroam) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
+ ret = BCME_VERSION;
+ goto done;
+ }
+
+ sroam = (wlc_sroam_info_t *)psroam->data;
+ sroam->sroam_on = set;
+ DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
+
+ ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
+ }
+
+done:
+ if (psroam) {
+ MFREE(dhd->osh, psroam, sroamlen);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_SILENT_ROAM */
+
+/* Check if the mode supports STA MODE */
+bool dhd_support_sta_mode(dhd_pub_t *dhd)
+{
+
+#ifdef WL_CFG80211
+ if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+ return FALSE;
+ else
+#endif /* WL_CFG80211 */
+ return TRUE;
+}
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+ char buf[32] = {0};
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int buf_len;
+ int str_len;
+ int res = -1;
+
+ if (!dhd_support_sta_mode(dhd))
+ return res;
+
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strlcpy(buf, str, sizeof(buf));
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+ mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ /* Setup keep alive zero for null packet generation */
+ mkeep_alive_pkt.keep_alive_id = 0;
+ mkeep_alive_pkt.len_bytes = 0;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
+ /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+ res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+ return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+#if defined(OEM_ANDROID)
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+/*
+ * SSIDs list parsing from cscan tlv list
+ */
+int
+wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
+{
+ char* str;
+ int idx = 0;
+ uint8 len;
+
+ if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+ str = *list_str;
+ while (*bytes_left > 0) {
+ if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+ *list_str = str;
+ DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+
+ if (idx >= max) {
+ DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
+ return BCME_BADARG;
+ }
+
+ /* Get proper CSCAN_TLV_TYPE_SSID_IE */
+ *bytes_left -= 1;
+ if (*bytes_left == 0) {
+ DHD_ERROR(("%s no length field.\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+ str += 1;
+ ssid[idx].rssi_thresh = 0;
+ ssid[idx].flags = 0;
+ len = str[0];
+ if (len == 0) {
+ /* Broadcast SSID */
+ ssid[idx].SSID_len = 0;
+ memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+ *bytes_left -= 1;
+ str += 1;
+
+ DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
+ } else if (len <= DOT11_MAX_SSID_LEN) {
+ /* Get proper SSID size */
+ ssid[idx].SSID_len = len;
+ *bytes_left -= 1;
+ /* Get SSID */
+ if (ssid[idx].SSID_len > *bytes_left) {
+ DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+ __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+ return BCME_BADARG;
+ }
+ str += 1;
+ memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+ *bytes_left -= ssid[idx].SSID_len;
+ str += ssid[idx].SSID_len;
+ ssid[idx].hidden = TRUE;
+
+ DHD_TRACE(("%s :size=%d left=%d\n",
+ (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+ } else {
+ DHD_ERROR(("### SSID size more than %d\n", str[0]));
+ return BCME_BADARG;
+ }
+ idx++;
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+#if defined(WL_WIRELESS_EXT)
+/* Android ComboSCAN support */
+
+/*
+ * data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+ int input_size, int *bytes_left)
+{
+ char* str;
+ uint16 short_temp;
+ uint32 int_temp;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+ str = *list_str;
+
+ /* Clean all dest bytes */
+ memset(dst, 0, dst_size);
+ if (*bytes_left > 0) {
+
+ if (str[0] != token) {
+ DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+ __FUNCTION__, token, str[0], *bytes_left));
+ return -1;
+ }
+
+ *bytes_left -= 1;
+ str += 1;
+
+ if (input_size == 1) {
+ memcpy(dst, str, input_size);
+ }
+ else if (input_size == 2) {
+ memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+ input_size);
+ }
+ else if (input_size == 4) {
+ memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+ input_size);
+ }
+
+ *bytes_left -= input_size;
+ str += input_size;
+ *list_str = str;
+ return 1;
+ }
+ return 1;
+}
+
+/*
+ * channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+ int channel_num, int *bytes_left)
+{
+ char* str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+ str = *list_str;
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+ *list_str = str;
+ DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+ /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* All channels */
+ channel_list[idx] = 0x0;
+ }
+ else {
+ channel_list[idx] = (uint16)str[0];
+ DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
+ }
+ *bytes_left -= 1;
+ str += 1;
+
+ if (idx++ > 255) {
+ DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx. Max specifies size of the ssid array. Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied. Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+ char* str, *ptr;
+
+ if ((list_str == NULL) || (*list_str == NULL))
+ return -1;
+
+ for (str = *list_str; str != NULL; str = ptr) {
+
+ /* check for next TAG */
+ if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+ *list_str = str + strlen(GET_CHANNEL);
+ return idx;
+ }
+
+ if ((ptr = strchr(str, ',')) != NULL) {
+ *ptr++ = '\0';
+ }
+
+ if (strlen(str) > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+ return -1;
+ }
+
+ if (strlen(str) == 0)
+ ssid[idx].SSID_len = 0;
+
+ if (idx < max) {
+ bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
+ strlcpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID));
+ ssid[idx].SSID_len = sizeof(ssid[idx].SSID);
+ }
+ idx++;
+ }
+ return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+ int num;
+ int val;
+ char* str;
+ char* endptr = NULL;
+
+ if ((list_str == NULL)||(*list_str == NULL))
+ return -1;
+
+ str = *list_str;
+ num = 0;
+ while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+ val = (int)strtoul(str, &endptr, 0);
+ if (endptr == str) {
+ printf("could not parse channel number starting at"
+ " substring \"%s\" in list:\n%s\n",
+ str, *list_str);
+ return -1;
+ }
+ str = endptr + strspn(endptr, " ,");
+
+ if (num == channel_num) {
+ DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+ channel_num, *list_str));
+ return -1;
+ }
+
+ channel_list[num++] = (uint16)val;
+ }
+ *list_str = str;
+ return num;
+}
+#endif
+#endif /* defined(OEM_ANDROID) */
+
+#if defined(BCM_ROUTER_DHD)
+static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
+ trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len)
+{
+ int ret = 0;
+ uint32 i;
+ trf_mgmt_filter_t *trf_mgmt_filter;
+ uint8 dwm_tbl_entry;
+ uint32 dscp = 0;
+ uint16 dwm_filter_enabled = 0;
+
+ /* Check parameter length is adequate */
+ if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) +
+ trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) {
+ ret = BCME_BUFTOOSHORT;
+ return ret;
+ }
+
+ bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
+
+ for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) {
+ trf_mgmt_filter = &trf_mgmt_filter_list->filter[i];
+
+ dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM);
+
+ if (dwm_filter_enabled) {
+ dscp = trf_mgmt_filter->dscp;
+ if (dscp >= DHD_DWM_TBL_SIZE) {
+ ret = BCME_BADARG;
+ return ret;
+ }
+ }
+
+ dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1;
+ /* set WMM AC bits */
+ dwm_tbl_entry = (uint8) trf_mgmt_filter->priority;
+ DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry);
+
+ /* set favored bits */
+ if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED)
+ DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry);
+
+ dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] = dwm_tbl_entry;
+ }
+ return ret;
+}
+#endif /* BCM_ROUTER_DHD */
+
+/* Given filename and download type, returns a buffer pointer and length
+* for download to f/w. Type can be FW or NVRAM.
+*
+*/
+int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
+ char ** buffer, int *length)
+
+{
+ int ret = BCME_ERROR;
+ int len = 0;
+ int file_len;
+ void *image = NULL;
+ uint8 *buf = NULL;
+
+ /* Point to cache if available. */
+#ifdef CACHE_FW_IMAGES
+ if (component == FW) {
+ if (dhd->cached_fw_length) {
+ len = dhd->cached_fw_length;
+ buf = dhd->cached_fw;
+ }
+ } else if (component == NVRAM) {
+ if (dhd->cached_nvram_length) {
+ len = dhd->cached_nvram_length;
+ buf = dhd->cached_nvram;
+ }
+ } else if (component == CLM_BLOB) {
+ if (dhd->cached_clm_length) {
+ len = dhd->cached_clm_length;
+ buf = dhd->cached_clm;
+ }
+ } else if (component == TXCAP_BLOB) {
+ if (dhd->cached_txcap_length) {
+ len = dhd->cached_txcap_length;
+ buf = dhd->cached_txcap;
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid component arg %d\n",
+ __FUNCTION__, component));
+ ret = BCME_BADARG;
+ return ret;
+ }
+#endif /* CACHE_FW_IMAGES */
+ /* No Valid cache found on this call */
+ if (!len) {
+ file_len = *length;
+ *length = 0;
+
+ if (file_path) {
+ image = dhd_os_open_image1(dhd, file_path);
+ if (image == NULL) {
+ printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
+ goto err;
+ }
+ }
+
+ buf = MALLOCZ(dhd->osh, file_len);
+ if (buf == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, file_len));
+ goto err;
+ }
+
+ /* Download image */
+#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
+ if (!image) {
+ memcpy(buf, nvram_arr, sizeof(nvram_arr));
+ len = sizeof(nvram_arr);
+ } else {
+ len = dhd_os_get_image_block((char *)buf, file_len, image);
+ if ((len <= 0 || len > file_len)) {
+ MFREE(dhd->osh, buf, file_len);
+ goto err;
+ }
+ }
+#else
+ len = dhd_os_get_image_block((char *)buf, file_len, image);
+ if ((len <= 0 || len > file_len)) {
+ MFREE(dhd->osh, buf, file_len);
+ goto err;
+ }
+#endif /* DHD_EFI */
+ }
+
+ ret = BCME_OK;
+ *length = len;
+ *buffer = (char *)buf;
+
+ /* Cache if first call. */
+#ifdef CACHE_FW_IMAGES
+ if (component == FW) {
+ if (!dhd->cached_fw_length) {
+ dhd->cached_fw = buf;
+ dhd->cached_fw_length = len;
+ }
+ } else if (component == NVRAM) {
+ if (!dhd->cached_nvram_length) {
+ dhd->cached_nvram = buf;
+ dhd->cached_nvram_length = len;
+ }
+ } else if (component == CLM_BLOB) {
+ if (!dhd->cached_clm_length) {
+ dhd->cached_clm = buf;
+ dhd->cached_clm_length = len;
+ }
+ } else if (component == TXCAP_BLOB) {
+ if (!dhd->cached_txcap_length) {
+ dhd->cached_txcap = buf;
+ dhd->cached_txcap_length = len;
+ }
+ }
+#endif /* CACHE_FW_IMAGES */
+
+err:
+ if (image)
+ dhd_os_close_image1(dhd, image);
+
+ return ret;
+}
+
+int
+dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type,
+ unsigned char *dload_buf, int len)
+{
+ struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
+ int err = 0;
+ int dload_data_offset;
+ static char iovar_buf[WLC_IOCTL_MEDLEN];
+ int iovar_len;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+
+ dload_data_offset = OFFSETOF(wl_dload_data_t, data);
+ dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
+ dload_ptr->dload_type = dload_type;
+ dload_ptr->len = htod32(len - dload_data_offset);
+ dload_ptr->crc = 0;
+ len = ROUNDUP(len, 8);
+
+ iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
+ (uint)len, iovar_buf, sizeof(iovar_buf));
+ if (iovar_len == 0) {
+ DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
+ __FUNCTION__, iovar));
+ return BCME_BUFTOOSHORT;
+ }
+
+ err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
+ iovar_len, IOV_SET, 0);
+
+ return err;
+}
+
+int
+dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
+ uint32 len, char *iovar)
+
+{
+ int chunk_len;
+#if !defined(LINUX) && !defined(linux)
+ int cumulative_len = 0;
+#endif /* !LINUX && !linux */
+ int size2alloc;
+ unsigned char *new_buf;
+ int err = 0, data_offset;
+ uint16 dl_flag = DL_BEGIN;
+
+ data_offset = OFFSETOF(wl_dload_data_t, data);
+ size2alloc = data_offset + MAX_CHUNK_LEN;
+ size2alloc = ROUNDUP(size2alloc, 8);
+
+ if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
+ do {
+#if !defined(LINUX) && !defined(linux)
+ if (len >= MAX_CHUNK_LEN)
+ chunk_len = MAX_CHUNK_LEN;
+ else
+ chunk_len = len;
+
+ memcpy(new_buf + data_offset, buf + cumulative_len, chunk_len);
+ cumulative_len += chunk_len;
+#else
+ chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
+ MAX_CHUNK_LEN, buf);
+ if (chunk_len < 0) {
+ DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
+ __FUNCTION__, chunk_len));
+ err = BCME_ERROR;
+ goto exit;
+ }
+#endif /* !LINUX && !linux */
+ if (len - chunk_len == 0)
+ dl_flag |= DL_END;
+
+ err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
+ new_buf, data_offset + chunk_len);
+
+ dl_flag &= ~DL_BEGIN;
+
+ len = len - chunk_len;
+ } while ((len > 0) && (err == 0));
+#if !defined(LINUX) && !defined(linux)
+ MFREE(dhd->osh, new_buf, size2alloc);
+#endif /* !LINUX && !linux */
+ } else {
+ err = BCME_NOMEM;
+ }
+#if defined(LINUX) || defined(linux)
+exit:
+ if (new_buf) {
+ MFREE(dhd->osh, new_buf, size2alloc);
+ }
+#endif /* LINUX || linux */
+ return err;
+}
+
+#if defined(CACHE_FW_IMAGES)
+int
+dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path,
+ uint32 len, char *iovar)
+{
+ int ret = BCME_ERROR;
+ uint chunk_len, size2alloc, data_offset, file_offset;
+ unsigned char *pay_load, *dnld_buf;
+ char *memblock;
+ uint16 dl_flag = DL_BEGIN;
+ download_type_t dl_type;
+
+ data_offset = OFFSETOF(wl_dload_data_t, data);
+ size2alloc = data_offset + MAX_CHUNK_LEN;
+ size2alloc = ROUNDUP(size2alloc, 8);
+ file_offset = 0;
+
+ if ((dnld_buf = MALLOCZ(dhd->osh, size2alloc)) == NULL) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ pay_load = (dnld_buf + data_offset);
+
+ if (!memcmp("clmload", iovar, strlen("clmload"))) {
+ dl_type = CLM_BLOB;
+ } else if (!memcmp("txcapload", iovar, strlen("txcapload"))) {
+ dl_type = TXCAP_BLOB;
+ } else {
+ DHD_ERROR(("%s Invalid iovar :%s \n", __FUNCTION__, iovar));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ ret = dhd_get_download_buffer(dhd, file_path, dl_type, &memblock, (int *)&len);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: error getting buffer for %s, %s \n", __FUNCTION__,
+ file_path, bcmerrorstr(ret)));
+ goto exit;
+ }
+
+ do {
+ chunk_len = MIN(len, MAX_CHUNK_LEN);
+ memcpy(pay_load, memblock + file_offset, chunk_len);
+ if (len - chunk_len == 0) {
+ dl_flag |= DL_END;
+ }
+
+ ret = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
+ dnld_buf, data_offset + chunk_len);
+
+ dl_flag &= ~DL_BEGIN;
+ len = len - chunk_len;
+ file_offset += chunk_len;
+ } while ((len > 0) && (ret == 0));
+
+exit:
+ if (dnld_buf) {
+ MFREE(dhd->osh, dnld_buf, size2alloc);
+ }
+
+ return ret;
+}
+
+int
+dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
+{
+ int ret = BCME_ERROR;
+ ret = dhd_download_blob_cached(dhd, path, MAX_TXCAP_BUF_SIZE, "txcapload");
+ if (ret) {
+ DHD_ERROR(("%s: error downloading blob: %s \n", __FUNCTION__, bcmerrorstr(ret)));
+ }
+ return ret;
+}
+
+int
+dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
+{
+ char *clm_blob_path;
+ int len;
+ unsigned char *imgbuf = NULL;
+ int err = BCME_OK;
+ char iovbuf[WLC_IOCTL_SMLEN];
+ wl_country_t *cspec;
+
+ if (clm_path[0] != '\0') {
+ if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
+ DHD_ERROR(("clm path exceeds max len\n"));
+ return BCME_ERROR;
+ }
+ clm_blob_path = clm_path;
+ DHD_TRACE(("clm path from module param:%s\n", clm_path));
+ } else {
+ clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
+ }
+
+ /* If CLM blob file is found on the filesystem, download the file.
+ * After CLM file download or If the blob file is not present,
+ * validate the country code before proceeding with the initialization.
+ * If country code is not valid, fail the initialization.
+ */
+
+ imgbuf = dhd_os_open_image((char *)clm_blob_path);
+ if (imgbuf == NULL) {
+ goto exit;
+ }
+
+ len = dhd_os_get_image_size(imgbuf);
+
+ if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) {
+ len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
+ if (len == 0) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ if (err) {
+ DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
+ goto exit;
+ }
+
+ cspec = (wl_country_t *)iovbuf;
+ if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) != 0) {
+ DHD_ERROR(("%s: CLM already exist in F/W, "
+ "new CLM data will be added to the end of existing CLM data!\n",
+ __FUNCTION__));
+ }
+
+ /* Found blob file. Download the file */
+ DHD_ERROR(("clm file download from %s \n", clm_blob_path));
+ if (imgbuf) {
+ dhd_os_close_image(imgbuf);
+ imgbuf = NULL;
+ }
+ err = dhd_download_blob_cached(dhd, clm_blob_path, MAX_CLM_BUF_SIZE, "clmload");
+ if (err) {
+ DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
+ if (!dhd_bus_skip_clm(dhd)) {
+ /* Retrieve clmload_status and print */
+ len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf,
+ sizeof(iovbuf));
+ if (len == 0) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf,
+ sizeof(iovbuf), FALSE, 0);
+ if (err) {
+ DHD_ERROR(("%s: clmload_status get failed err=%d \n",
+ __FUNCTION__, err));
+ } else {
+ DHD_ERROR(("%s: clmload_status: %d \n",
+ __FUNCTION__, *((int *)iovbuf)));
+ if (*((int *)iovbuf) == CHIPID_MISMATCH) {
+ DHD_ERROR(("Chip ID mismatch error \n"));
+ }
+ }
+ err = BCME_ERROR;
+ goto exit;
+ }
+ } else {
+ DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
+ }
+ } else {
+ DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf));
+#ifdef DHD_USE_CLMINFO_PARSER
+ err = BCME_ERROR;
+ goto exit;
+#endif /* DHD_USE_CLMINFO_PARSER */
+ }
+
+ /* Verify country code */
+ len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
+ if (len == 0) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ if (err) {
+ DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
+ goto exit;
+ }
+
+ cspec = (wl_country_t *)iovbuf;
+ if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
+ /* Country code not initialized or CLM download not proper */
+ DHD_ERROR(("country code not initialized\n"));
+ err = BCME_ERROR;
+ }
+exit:
+
+ if (imgbuf) {
+ dhd_os_close_image(imgbuf);
+ }
+
+ return err;
+}
+#else
+
+int
+dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
+{
+ return 0;
+}
+
+int
+dhd_check_current_clm_data(dhd_pub_t *dhd)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ wl_country_t *cspec;
+ int err = BCME_OK;
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
+ if (err == 0) {
+ err = BCME_BUFTOOSHORT;
+ DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
+ return err;
+ }
+ err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ if (err) {
+ DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
+ return err;
+ }
+ cspec = (wl_country_t *)iovbuf;
+ if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
+ DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+ DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
+ __FUNCTION__));
+ return TRUE;
+}
+
+int
+dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
+{
+ char *clm_blob_path;
+ int len;
+ char *memblock = NULL;
+ int err = BCME_OK;
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int status = FALSE;
+
+ if (clm_path && clm_path[0] != '\0') {
+ if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
+ DHD_ERROR(("clm path exceeds max len\n"));
+ return BCME_ERROR;
+ }
+ clm_blob_path = clm_path;
+ DHD_TRACE(("clm path from module param:%s\n", clm_path));
+ } else {
+ clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
+ }
+
+ /* If CLM blob file is found on the filesystem, download the file.
+ * After CLM file download or If the blob file is not present,
+ * validate the country code before proceeding with the initialization.
+ * If country code is not valid, fail the initialization.
+ */
+#if !defined(LINUX) && !defined(linux)
+ len = MAX_CLM_BUF_SIZE;
+ dhd_get_download_buffer(dhd, clm_blob_path, CLM_BLOB, &memblock, &len);
+#else
+ memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
+ if (memblock == NULL) {
+ printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (dhd->is_blob) {
+ err = BCME_ERROR;
+ } else {
+ status = dhd_check_current_clm_data(dhd);
+ if (status == TRUE) {
+ err = BCME_OK;
+ } else {
+ err = status;
+ }
+ }
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ goto exit;
+ }
+
+ len = dhd_os_get_image_size(memblock);
+#endif /* !LINUX && !linux */
+
+ if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
+ status = dhd_check_current_clm_data(dhd);
+ if (status == TRUE) {
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
+ if (dhd->is_blob) {
+ err = BCME_ERROR;
+ }
+ goto exit;
+ }
+#else
+ DHD_ERROR(("%s: CLM already exist in F/W, "
+ "new CLM data will be added to the end of existing CLM data!\n",
+ __FUNCTION__));
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ } else if (status != FALSE) {
+ err = status;
+ goto exit;
+ }
+
+ /* Found blob file. Download the file */
+ DHD_TRACE(("clm file download from %s \n", clm_blob_path));
+ err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
+ if (err) {
+ DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
+ /* Retrieve clmload_status and print */
+ memset(iovbuf, 0, sizeof(iovbuf));
+ len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
+ if (len == 0) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ if (err) {
+ DHD_ERROR(("%s: clmload_status get failed err=%d \n",
+ __FUNCTION__, err));
+ } else {
+ DHD_ERROR(("%s: clmload_status: %d \n",
+ __FUNCTION__, *((int *)iovbuf)));
+ if (*((int *)iovbuf) == CHIPID_MISMATCH) {
+ DHD_ERROR(("Chip ID mismatch error \n"));
+ }
+ }
+ err = BCME_ERROR;
+ goto exit;
+ } else {
+ DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
+ }
+ } else {
+ DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
+ }
+
+ /* Verify country code */
+ status = dhd_check_current_clm_data(dhd);
+
+ if (status != TRUE) {
+ /* Country code not initialized or CLM download not proper */
+ DHD_ERROR(("country code not initialized\n"));
+ err = status;
+ }
+exit:
+
+ if (memblock) {
+#if defined(LINUX) || defined(linux)
+ dhd_os_close_image1(dhd, memblock);
+#else
+ dhd_free_download_buffer(dhd, memblock, MAX_CLM_BUF_SIZE);
+#endif /* LINUX || linux */
+ }
+
+ return err;
+}
+#endif /* defined(CACHE_FW_IMAGES) */
+
+void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
+{
+#ifdef CACHE_FW_IMAGES
+ return;
+#endif
+ MFREE(dhd->osh, buffer, length);
+}
+
+#ifdef REPORT_FATAL_TIMEOUTS
+void
+init_dhd_timeouts(dhd_pub_t *pub)
+{
+ pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t));
+ if (pub->timeout_info == NULL) {
+ DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("Initializing dhd_timeouts\n"));
+ pub->timeout_info->scan_timer_lock = osl_spin_lock_init(pub->osh);
+ pub->timeout_info->join_timer_lock = osl_spin_lock_init(pub->osh);
+ pub->timeout_info->bus_timer_lock = osl_spin_lock_init(pub->osh);
+ pub->timeout_info->cmd_timer_lock = osl_spin_lock_init(pub->osh);
+ pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT;
+ pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT;
+ pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT;
+ pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT;
+ pub->timeout_info->scan_timer_active = FALSE;
+ pub->timeout_info->join_timer_active = FALSE;
+ pub->timeout_info->cmd_timer_active = FALSE;
+ pub->timeout_info->bus_timer_active = FALSE;
+ pub->timeout_info->cmd_join_error = FALSE;
+ pub->timeout_info->cmd_request_id = 0;
+ OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE);
+ }
+}
+
+void
+deinit_dhd_timeouts(dhd_pub_t *pub)
+{
+ /* stop the join, scan bus, cmd timers
+ * as failing to do so may cause a kernel panic if
+ * an rmmod is done
+ */
+ if (!pub->timeout_info) {
+ DHD_ERROR(("%s timeout_info pointer is NULL\n", __FUNCTION__));
+ ASSERT(0);
+ return;
+ }
+ if (dhd_stop_scan_timer(pub, FALSE, 0)) {
+ DHD_ERROR(("%s dhd_stop_scan_timer failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ if (dhd_stop_bus_timer(pub)) {
+ DHD_ERROR(("%s dhd_stop_bus_timer failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ if (dhd_stop_cmd_timer(pub)) {
+ DHD_ERROR(("%s dhd_stop_cmd_timer failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ if (dhd_stop_join_timer(pub)) {
+ DHD_ERROR(("%s dhd_stop_join_timer failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+
+ osl_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock);
+ osl_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock);
+ osl_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock);
+ osl_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock);
+ MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t));
+}
+
+static void
+dhd_cmd_timeout(void *ctx)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ASSERT(0);
+ return;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+ if (pub->timeout_info && pub->timeout_info->cmd_timer_active) {
+ DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val));
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+#ifdef PCIE_OOB
+ /* Assert device_wake so that UART_Rx is available */
+ if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
+ DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+#endif /* PCIE_OOB */
+ if (dhd_stop_cmd_timer(pub)) {
+ DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR);
+ if (!dhd_query_bus_erros(pub))
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO);
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+ }
+}
+
+int
+dhd_start_cmd_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 cmd_to_ms;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+ cmd_to_ms = pub->timeout_info->cmd_timeout_val;
+
+ if (pub->timeout_info->cmd_timeout_val == 0) {
+ /* Disable Command timer timeout */
+ DHD_INFO(("DHD: Command Timeout Disabled\n"));
+ goto exit;
+ }
+ if (pub->timeout_info->cmd_timer_active) {
+ DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ } else {
+ pub->timeout_info->cmd_timer = osl_timer_init(pub->osh,
+ "cmd_timer", dhd_cmd_timeout, pub);
+ osl_timer_update(pub->osh, pub->timeout_info->cmd_timer,
+ cmd_to_ms, 0);
+ pub->timeout_info->cmd_timer_active = TRUE;
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__));
+ }
+exit:
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+exit_null:
+ return ret;
+}
+
+int
+dhd_stop_cmd_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+
+ if (!pub) {
+ DHD_ERROR(("DHD: pub NULL\n"));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+
+ if (pub->timeout_info->cmd_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->cmd_timer);
+ pub->timeout_info->cmd_timer_active = FALSE;
+ }
+ else {
+ DHD_INFO(("DHD: CMD timer is not active\n"));
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__));
+ }
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+exit:
+ return ret;
+}
+
+static int
+__dhd_stop_join_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ if (!pub) {
+ DHD_ERROR(("DHD: pub NULL\n"));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+
+ if (pub->timeout_info->join_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->join_timer);
+ pub->timeout_info->join_timer_active = FALSE;
+ DHD_INFO(("%s join timer stopped\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("%s join timer is not active\n", __FUNCTION__));
+ }
+
+ return ret;
+}
+
+static void
+dhd_join_timeout(void *ctx)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__));
+ ASSERT(0);
+ return;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ if (pub->timeout_info->join_timer_active) {
+ if (__dhd_stop_join_timer(pub)) {
+ DHD_ERROR(("%s: __dhd_stop_join_timer() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ if (pub->timeout_info->cmd_join_error) {
+ DHD_ERROR(("\n%s ERROR JOIN TIMEOUT TO:%d:0x%x\n", __FUNCTION__,
+ pub->timeout_info->join_timeout_val,
+ pub->timeout_info->cmd_join_error));
+ if (!dhd_query_bus_erros(pub)) {
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_JOIN_TO);
+ }
+ pub->timeout_info->cmd_join_error = 0;
+ }
+ }
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+}
+
+int
+dhd_start_join_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 join_to_ms;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit;
+ }
+
+ join_to_ms = pub->timeout_info->join_timeout_val;
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ if (pub->timeout_info->join_timer_active) {
+ DHD_ERROR(("%s: stopping active timer\n", __FUNCTION__));
+ __dhd_stop_join_timer(pub);
+ }
+ if (pub->timeout_info->join_timeout_val == 0) {
+ /* Disable Join timer timeout */
+ DHD_INFO(("%s DHD: join timeout disabled\n", __FUNCTION__));
+ } else {
+ pub->timeout_info->join_timer = osl_timer_init(pub->osh,
+ "join_timer", dhd_join_timeout, pub);
+ osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0);
+ pub->timeout_info->join_timer_active = TRUE;
+ pub->timeout_info->cmd_join_error = 0;
+ dhd_set_join_error(pub, WLC_SSID_MASK);
+ if (pub->secure_join) {
+ dhd_set_join_error(pub, WLC_WPA_MASK);
+ }
+ DHD_ERROR(("%s: join timer started 0x%x\n", __FUNCTION__,
+ pub->timeout_info->cmd_join_error));
+ }
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+exit:
+ return ret;
+}
+
+int
+dhd_stop_join_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags;
+
+ if (!pub) {
+ DHD_ERROR(("%s DHD: pub NULL\n", __FUNCTION__));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ ret = __dhd_stop_join_timer(pub);
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+ return ret;
+}
+
+static void
+dhd_set_join_error(dhd_pub_t *pub, uint32 mask)
+{
+ DHD_INFO(("Setting join Error %d\n", mask));
+ if (pub->timeout_info) {
+ pub->timeout_info->cmd_join_error |= mask;
+ }
+}
+
+void
+dhd_clear_join_error(dhd_pub_t *pub, uint32 mask)
+{
+ unsigned long flags;
+
+ DHD_INFO(("%s clear join error %d\n", __FUNCTION__, mask));
+ if (!(pub->timeout_info)) {
+ return;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ pub->timeout_info->cmd_join_error &= ~mask;
+ /* If both WLC_SSID_MASK, WLC_WPA_MASK are received cancel the timer */
+ if (!(pub->timeout_info->cmd_join_error)) {
+ if (__dhd_stop_join_timer(pub)) {
+ DHD_ERROR(("%s: dhd_stop_join_timer failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ }
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+}
+
+static void
+dhd_scan_timeout(void *ctx)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
+
+ if (!pub) {
+ DHD_ERROR(("DHD: pub NULL\n"));
+ ASSERT(0);
+ return;
+ }
+
+ if (pub->timeout_info == NULL) {
+ DHD_ERROR(("timeout_info pointer is NULL\n"));
+ ASSERT(0);
+ return;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+ if (pub->timeout_info->scan_timer_active) {
+ DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val));
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+ dhd_stop_scan_timer(pub, FALSE, 0);
+ if (!dhd_query_bus_erros(pub))
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO);
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+ }
+}
+
+int
+dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 scan_to_ms;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+ scan_to_ms = pub->timeout_info->scan_timeout_val;
+
+ if (is_escan) {
+ if (pub->timeout_info->escan_aborted &&
+ pub->esync_id == pub->timeout_info->abort_syncid) {
+ pub->timeout_info->escan_aborted = FALSE;
+ DHD_INFO(("%s: escan already aborted, do not start timer \n",
+ __FUNCTION__));
+ goto exit;
+ }
+ pub->timeout_info->escan_syncid = pub->esync_id;
+ } else {
+ pub->timeout_info->escan_syncid = 0;
+ }
+
+ if (pub->timeout_info->scan_timer_active) {
+ /* cancel any earlier running timer */
+ DHD_INFO(("%s:Timer already active, stopping it.\n", __FUNCTION__));
+ osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
+ pub->timeout_info->scan_timer_active = FALSE;
+ }
+
+ if (pub->timeout_info->scan_timeout_val == 0) {
+ /* Disable Scan timer timeout */
+ DHD_INFO(("DHD: Scan Timeout Disabled\n"));
+ } else {
+ pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer",
+ dhd_scan_timeout, pub);
+ pub->timeout_info->scan_timer_active = TRUE;
+ osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0);
+ DHD_INFO(("%s Scan Timer started\n", __FUNCTION__));
+ }
+
+exit:
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+exit_null:
+ return ret;
+}
+
+int
+dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+
+ if (!pub) {
+ DHD_ERROR(("DHD: pub NULL\n"));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+
+ if (pub->timeout_info->scan_timer_active) {
+ if (is_escan) {
+ if (sync_id == pub->timeout_info->escan_syncid) {
+ osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
+ pub->timeout_info->scan_timer_active = FALSE;
+ DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
+ }
+ } else {
+ osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
+ pub->timeout_info->scan_timer_active = FALSE;
+ DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
+ }
+
+ } else {
+ DHD_INFO(("DHD: SCAN timer is not active\n"));
+ }
+
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+
+exit_null:
+ return ret;
+}
+
+static void
+dhd_bus_timeout(void *ctx)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
+
+ if (pub->timeout_info == NULL) {
+ DHD_ERROR(("timeout_info pointer is NULL\n"));
+ ASSERT(0);
+ return;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+ if (pub->timeout_info && pub->timeout_info->bus_timer_active) {
+ DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val));
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+#ifdef PCIE_OOB
+ /* Assert device_wake so that UART_Rx is available */
+ if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
+ DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+#endif /* PCIE_OOB */
+ if (dhd_stop_bus_timer(pub)) {
+ DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ if (!dhd_query_bus_erros(pub)) {
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO);
+ }
+#ifdef BCMPCIE
+ dhd_msgbuf_iovar_timeout_dump(pub);
+#endif /* BCMPCIE */
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+ }
+}
+
+int
+dhd_start_bus_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 bus_to_ms;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+ bus_to_ms = pub->timeout_info->bus_timeout_val;
+
+ if (pub->timeout_info->bus_timeout_val == 0) {
+ /* Disable Bus timer timeout */
+ DHD_INFO(("DHD: Bus Timeout Disabled\n"));
+ goto exit;
+ }
+ if (pub->timeout_info->bus_timer_active) {
+ DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ } else {
+ pub->timeout_info->bus_timer = osl_timer_init(pub->osh,
+ "bus_timer", dhd_bus_timeout, pub);
+ pub->timeout_info->bus_timer_active = TRUE;
+ osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0);
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__));
+ }
+exit:
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+exit_null:
+ return ret;
+}
+
+int
+dhd_stop_bus_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags;
+
+ if (!pub) {
+ DHD_ERROR(("DHD: pub NULL\n"));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+
+ if (pub->timeout_info->bus_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->bus_timer);
+ pub->timeout_info->bus_timer_active = FALSE;
+ }
+ else {
+ DHD_INFO(("DHD: BUS timer is not active\n"));
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__));
+ }
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+exit:
+ return ret;
+}
+
+int
+dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd)
+{
+ DHD_INFO(("%s: id:%d\n", __FUNCTION__, id));
+ if (pub->timeout_info) {
+ pub->timeout_info->cmd_request_id = id;
+ pub->timeout_info->cmd = cmd;
+ return BCME_OK;
+ } else {
+ return BCME_ERROR;
+ }
+}
+
+uint16
+dhd_get_request_id(dhd_pub_t *pub)
+{
+ if (pub->timeout_info) {
+ return (pub->timeout_info->cmd_request_id);
+ } else {
+ return 0;
+ }
+}
+
+void
+dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->scan_timeout_val;
+ } else {
+ *to_val = 0;
+ }
+}
+
+void
+dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting scan TO val:%d\n", to_val));
+ pub->timeout_info->scan_timeout_val = to_val;
+ }
+}
+
+void
+dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->join_timeout_val;
+ } else {
+ *to_val = 0;
+ }
+}
+
+void
+dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting join TO val:%d\n", to_val));
+ pub->timeout_info->join_timeout_val = to_val;
+ }
+}
+
+void
+dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->cmd_timeout_val;
+ } else {
+ *to_val = 0;
+ }
+}
+
+void
+dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting cmd TO val:%d\n", to_val));
+ pub->timeout_info->cmd_timeout_val = to_val;
+ }
+}
+
+void
+dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->bus_timeout_val;
+ } else {
+ *to_val = 0;
+ }
+}
+
+void
+dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting bus TO val:%d\n", to_val));
+ pub->timeout_info->bus_timeout_val = to_val;
+ }
+}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef SHOW_LOGTRACE
+int
+dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
+ dhd_event_log_t *event_log)
+{
+ uint32 *lognums = NULL;
+ char *logstrs = NULL;
+ logstr_trailer_t *trailer = NULL;
+ int ram_index = 0;
+ char **fmts = NULL;
+ int num_fmts = 0;
+ bool match_fail = TRUE;
+ int32 i = 0;
+ uint8 *pfw_id = NULL;
+ uint32 fwid = 0;
+ void *file = NULL;
+ int file_len = 0;
+ char fwid_str[FWID_STR_LEN];
+ uint32 hdr_logstrs_size = 0;
+
+ /* Read last three words in the logstrs.bin file */
+ trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
+ sizeof(logstr_trailer_t));
+
+ if (trailer->log_magic == LOGSTRS_MAGIC) {
+ /*
+ * logstrs.bin has a header.
+ */
+ if (trailer->version == 1) {
+ logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
+ logstrs_size - sizeof(logstr_header_v1_t));
+ DHD_INFO(("%s: logstr header version = %u\n",
+ __FUNCTION__, hdr_v1->version));
+ num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
+ ram_index = (hdr_v1->ram_lognums_offset -
+ hdr_v1->rom_lognums_offset) / sizeof(uint32);
+ lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
+ logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
+ hdr_logstrs_size = hdr_v1->logstrs_size;
+ } else if (trailer->version == 2) {
+ logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
+ sizeof(logstr_header_t));
+ DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
+ __FUNCTION__, hdr->version, hdr->flags));
+
+ /* For ver. 2 of the header, need to match fwid of
+ * both logstrs.bin and fw bin
+ */
+
+ /* read the FWID from fw bin */
+ file = dhd_os_open_image1(NULL, st_str_file_path);
+ if (!file) {
+ DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
+ goto error;
+ }
+ file_len = dhd_os_get_image_size(file);
+ if (file_len <= 0) {
+ DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
+ goto error;
+ }
+ /* fwid is at the end of fw bin in string format */
+ if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
+ DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
+ goto error;
+ }
+
+ memset(fwid_str, 0, sizeof(fwid_str));
+ if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
+ DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
+ goto error;
+ }
+ pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
+ FWID_STR_1, strlen(FWID_STR_1));
+ if (!pfw_id) {
+ pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
+ FWID_STR_2, strlen(FWID_STR_2));
+ if (!pfw_id) {
+ DHD_ERROR(("%s: could not find id in FW bin!\n",
+ __FUNCTION__));
+ goto error;
+ }
+ }
+ /* search for the '-' in the fw id str, after which the
+ * actual 4 byte fw id is present
+ */
+ while (pfw_id && *pfw_id != '-') {
+ ++pfw_id;
+ }
+ ++pfw_id;
+ fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
+
+ /* check if fw id in logstrs.bin matches the fw one */
+ if (hdr->fw_id != fwid) {
+ DHD_ERROR(("%s: logstr id does not match FW!"
+ "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
+ __FUNCTION__, hdr->fw_id, fwid));
+ goto error;
+ }
+
+ match_fail = FALSE;
+ num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
+ ram_index = (hdr->ram_lognums_offset -
+ hdr->rom_lognums_offset) / sizeof(uint32);
+ lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
+ logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
+ hdr_logstrs_size = hdr->logstrs_size;
+
+error:
+ if (file) {
+ dhd_os_close_image1(NULL, file);
+ }
+ if (match_fail) {
+ return BCME_DECERR;
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
+ trailer->version));
+ return BCME_ERROR;
+ }
+ if (logstrs_size != hdr_logstrs_size) {
+ DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
+ return BCME_ERROR;
+ }
+ } else {
+ /*
+ * Legacy logstrs.bin format without header.
+ */
+ num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
+
+ /* Legacy RAM-only logstrs.bin format:
+ * - RAM 'lognums' section
+ * - RAM 'logstrs' section.
+ *
+ * 'lognums' is an array of indexes for the strings in the
+ * 'logstrs' section. The first uint32 is an index to the
+ * start of 'logstrs'. Therefore, if this index is divided
+ * by 'sizeof(uint32)' it provides the number of logstr
+ * entries.
+ */
+ ram_index = 0;
+ lognums = (uint32 *) raw_fmts;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ }
+ if (num_fmts) {
+ if (event_log->fmts != NULL) {
+ fmts = event_log->fmts; /* reuse existing malloced fmts */
+ } else {
+ fmts = MALLOC(osh, num_fmts * sizeof(char *));
+ }
+ }
+ if (fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ event_log->fmts_size = num_fmts * sizeof(char *);
+
+ for (i = 0; i < num_fmts; i++) {
+ /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
+ * (they are 0-indexed relative to 'rom_logstrs_offset').
+ *
+ * RAM lognums are already indexed to point to the correct RAM logstrs (they
+ * are 0-indexed relative to the start of the logstrs.bin file).
+ */
+ if (i == ram_index) {
+ logstrs = raw_fmts;
+ }
+ fmts[i] = &logstrs[lognums[i]];
+ }
+ event_log->fmts = fmts;
+ event_log->raw_fmts_size = logstrs_size;
+ event_log->raw_fmts = raw_fmts;
+ event_log->num_fmts = num_fmts;
+ return BCME_OK;
+} /* dhd_parse_logstrs_file */
+
+int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
+ uint32 *rodata_end)
+{
+ char *raw_fmts = NULL, *raw_fmts_loc = NULL;
+ uint32 read_size = READ_NUM_BYTES;
+ int error = 0;
+ char * cptr = NULL;
+ char c;
+ uint8 count = 0;
+
+ *ramstart = 0;
+ *rodata_start = 0;
+ *rodata_end = 0;
+
+ /* Allocate 1 byte more than read_size to terminate it with NULL */
+ raw_fmts = MALLOCZ(osh, read_size + 1);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* read ram start, rodata_start and rodata_end values from map file */
+ while (count != ALL_MAP_VAL)
+ {
+ error = dhd_os_read_file(file, raw_fmts, read_size);
+ if (error < 0) {
+ DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
+ error));
+ goto fail;
+ }
+
+ /* End raw_fmts with NULL as strstr expects NULL terminated strings */
+ raw_fmts[read_size] = '\0';
+
+ /* Get ramstart address */
+ raw_fmts_loc = raw_fmts;
+ if (!(count & RAMSTART_BIT) &&
+ (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
+ strlen(ramstart_str)))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c text_start", ramstart, &c);
+ count |= RAMSTART_BIT;
+ }
+
+ /* Get ram rodata start address */
+ raw_fmts_loc = raw_fmts;
+ if (!(count & RDSTART_BIT) &&
+ (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
+ strlen(rodata_start_str)))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
+ count |= RDSTART_BIT;
+ }
+
+ /* Get ram rodata end address */
+ raw_fmts_loc = raw_fmts;
+ if (!(count & RDEND_BIT) &&
+ (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
+ strlen(rodata_end_str)))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
+ count |= RDEND_BIT;
+ }
+
+ if (error < (int)read_size) {
+ /*
+ * since we reset file pos back to earlier pos by
+ * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
+ * The reason for this is if string is spreaded across
+ * bytes, the read function should not miss it.
+ * So if ret value is less than read_size, reached EOF don't read further
+ */
+ break;
+ }
+ memset(raw_fmts, 0, read_size);
+ /*
+ * go back to predefined NUM of bytes so that we won't miss
+ * the string and addr even if it comes as splited in next read.
+ */
+ dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
+ }
+
+fail:
+ if (raw_fmts) {
+ MFREE(osh, raw_fmts, read_size + 1);
+ raw_fmts = NULL;
+ }
+ if (count == ALL_MAP_VAL) {
+ return BCME_OK;
+ }
+ else {
+ DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
+ count));
+ return BCME_ERROR;
+ }
+
+} /* dhd_parse_map_file */
+
+#ifdef PCIE_FULL_DONGLE
+int
+dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
+ dhd_event_log_t *event_data)
+{
+ uint32 infobuf_version;
+ info_buf_payload_hdr_t *payload_hdr_ptr;
+ uint16 payload_hdr_type;
+ uint16 payload_hdr_length;
+
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+ if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
+ DHD_ERROR(("%s: infobuf too small for version field\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
+ PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
+ if (infobuf_version != PCIE_INFOBUF_V1) {
+ DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
+ __FUNCTION__, infobuf_version));
+ goto exit;
+ }
+
+ /* Version 1 infobuf has a single type/length (and then value) field */
+ if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
+ DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ /* Process/parse the common info payload header (type/length) */
+ payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
+ payload_hdr_type = ltoh16(payload_hdr_ptr->type);
+ payload_hdr_length = ltoh16(payload_hdr_ptr->length);
+ if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
+ DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
+ __FUNCTION__, payload_hdr_type));
+ goto exit;
+ }
+ PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
+
+ /* Validate that the specified length isn't bigger than the
+ * provided data.
+ */
+ if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
+ DHD_ERROR(("%s: infobuf logtrace length is bigger"
+ " than actual buffer data\n", __FUNCTION__));
+ goto exit;
+ }
+ dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
+ event_data, payload_hdr_length);
+
+ return BCME_OK;
+
+exit:
+ return BCME_ERROR;
+} /* dhd_event_logtrace_infobuf_pkt_process */
+#endif /* PCIE_FULL_DONGLE */
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+int
+dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf)
+{
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+ dhd_dbg_bt_log_handler(dhdp,
+ PKTDATA(dhdp->osh, pktbuf), PKTLEN(dhdp->osh, pktbuf));
+
+ return BCME_OK;
+}
+#endif /* BTLOG */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+
+/* To handle the TDLS event in the dhd_common.c
+ */
+int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
+{
+ int ret = BCME_OK;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
+ ret = dhd_tdls_update_peer_info(dhd_pub, event);
+ GCC_DIAGNOSTIC_POP()
+
+ return ret;
+}
+
+int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
+{
+ tdls_peer_node_t *cur = NULL, *prev = NULL;
+ if (!dhd_pub)
+ return BCME_ERROR;
+ cur = dhd_pub->peer_tbl.node;
+
+ if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
+ return BCME_ERROR;
+
+ while (cur != NULL) {
+ prev = cur;
+ cur = cur->next;
+ MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
+ }
+ dhd_pub->peer_tbl.tdls_peer_count = 0;
+ dhd_pub->peer_tbl.node = NULL;
+ return BCME_OK;
+}
+#endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
+/* pretty hex print a contiguous buffer
+* based on the debug level specified
+*/
+void
+dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0')) {
+ if (dbg_level == DHD_ERROR_VAL)
+ DHD_ERROR(("%s:\n", msg));
+ else if (dbg_level == DHD_INFO_VAL)
+ DHD_INFO(("%s:\n", msg));
+ else if (dbg_level == DHD_TRACE_VAL)
+ DHD_TRACE(("%s:\n", msg));
+ }
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ /* flush line */
+ if (dbg_level == DHD_ERROR_VAL)
+ DHD_ERROR(("%s:\n", line));
+ else if (dbg_level == DHD_INFO_VAL)
+ DHD_INFO(("%s:\n", line));
+ else if (dbg_level == DHD_TRACE_VAL)
+ DHD_TRACE(("%s:\n", line));
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line) {
+ if (dbg_level == DHD_ERROR_VAL)
+ DHD_ERROR(("%s:\n", line));
+ else if (dbg_level == DHD_INFO_VAL)
+ DHD_INFO(("%s:\n", line));
+ else if (dbg_level == DHD_TRACE_VAL)
+ DHD_TRACE(("%s:\n", line));
+ }
+}
+
+int
+dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
+{
+ struct ether_header ether_hdr;
+ tput_pkt_t tput_pkt;
+ void *pkt = NULL;
+ uint8 *pktdata = NULL;
+ uint32 pktsize = 0;
+ uint64 total_size = 0;
+ uint32 *crc = 0;
+ uint32 pktid = 0;
+ uint32 total_num_tx_pkts = 0;
+ int err = 0, err_exit = 0;
+ uint32 i = 0;
+ uint64 time_taken = 0;
+ int max_txbufs = 0;
+ uint32 n_batches = 0;
+ uint32 n_remain = 0;
+ uint8 tput_pkt_hdr_size = 0;
+ bool batch_cnt = FALSE;
+ bool tx_stop_pkt = FALSE;
+
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ uint32 cur_intr_poll_period = 0;
+ cur_intr_poll_period = dhd_os_get_intr_poll_period();
+ /* before running tput_test, set interrupt poll period to a lesser value */
+ dhd_os_set_intr_poll_period(dhd->bus, INTR_POLL_PERIOD_CRITICAL);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+
+ if (tput_data->version != TPUT_TEST_T_VER ||
+ tput_data->length != TPUT_TEST_T_LEN) {
+ DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
+ err_exit = BCME_BADARG;
+ goto exit_error;
+ }
+
+ if (dhd->tput_data.tput_test_running) {
+ DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
+ err_exit = BCME_BUSY;
+ goto exit_error;
+ }
+#ifdef PCIE_FULL_DONGLE
+ /*
+ * 100 bytes to accommodate ether header and tput header. As of today
+ * both occupy 30 bytes. Rest is reserved.
+ */
+ if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
+ (tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
+ DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
+ __FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
+ (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
+ err_exit = BCME_BUFTOOLONG;
+ goto exit_error;
+ }
+#endif
+ max_txbufs = dhd_get_max_txbufs(dhd);
+ max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
+
+ if (!(tput_data->num_pkts > 0)) {
+ DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
+ __FUNCTION__, tput_data->num_pkts));
+ err_exit = BCME_ERROR;
+ goto exit_error;
+ }
+
+ memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
+ memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
+ dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
+ dhd->tput_data.pkts_cmpl = 0;
+ dhd->tput_start_ts = dhd->tput_stop_ts = 0;
+
+ if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
+ pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
+ (tput_data->payload_size - 12);
+ } else {
+ pktsize = sizeof(tput_pkt_t) +
+ (tput_data->payload_size - 12);
+ }
+
+ tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
+ (uint8 *)&tput_pkt.mac_sta);
+
+ /* mark the tput test as started */
+ dhd->tput_data.tput_test_running = TRUE;
+
+ if (tput_data->direction == TPUT_DIR_TX) {
+ /* for ethernet header */
+ memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
+ memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
+ ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
+
+ /* fill in the tput pkt */
+ memset(&tput_pkt, 0, sizeof(tput_pkt));
+ memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
+ memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
+ tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
+ tput_pkt.num_pkts = hton32(tput_data->num_pkts);
+
+ if (tput_data->num_pkts > (uint32)max_txbufs) {
+ n_batches = tput_data->num_pkts / max_txbufs;
+ n_remain = tput_data->num_pkts % max_txbufs;
+ } else {
+ n_batches = 0;
+ n_remain = tput_data->num_pkts;
+ }
+ DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
+ __FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
+
+ do {
+ /* reset before every batch */
+ dhd->batch_tx_pkts_cmpl = 0;
+ if (n_batches) {
+ dhd->batch_tx_num_pkts = max_txbufs;
+ --n_batches;
+ } else if (n_remain) {
+ dhd->batch_tx_num_pkts = n_remain;
+ n_remain = 0;
+ } else {
+ DHD_ERROR(("Invalid. This should not hit\n"));
+ }
+
+ dhd->tput_start_ts = OSL_SYSUPTIME_US();
+ for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
+ pkt = PKTGET(dhd->osh, pktsize, TRUE);
+ if (!pkt) {
+ dhd->tput_data.tput_test_running = FALSE;
+ DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
+ __FUNCTION__));
+ DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
+ __FUNCTION__, dhd->tput_data.pkts_good,
+ dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
+ err_exit = BCME_NOMEM;
+ goto exit_error;
+ }
+ pktdata = PKTDATA(dhd->osh, pkt);
+ PKTSETLEN(dhd->osh, pkt, pktsize);
+ memset(pktdata, 0, pktsize);
+ if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
+ memcpy(pktdata, &ether_hdr, sizeof(ether_hdr));
+ pktdata += sizeof(ether_hdr);
+ }
+ /* send stop pkt as last pkt */
+ if (tx_stop_pkt) {
+ tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
+ tx_stop_pkt = FALSE;
+ } else
+ tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
+ tput_pkt.pkt_id = hton32(pktid++);
+ tput_pkt.crc32 = 0;
+ memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
+ /* compute crc32 over the pkt-id, num-pkts and data fields */
+ crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
+ *crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
+ 8 + (tput_data->payload_size - 12),
+ CRC32_INIT_VALUE));
+
+ err = dhd_sendpkt(dhd, 0, pkt);
+ if (err != BCME_OK) {
+ DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
+ __FUNCTION__, pktid, err));
+ dhd->tput_data.pkts_bad++;
+ }
+ total_num_tx_pkts++;
+ if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
+ tx_stop_pkt = TRUE;
+ }
+ }
+ DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
+ if (!dhd_os_tput_test_wait(dhd, NULL,
+ TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
+ dhd->tput_stop_ts = OSL_SYSUPTIME_US();
+ dhd->tput_data.tput_test_running = FALSE;
+ DHD_ERROR(("%s: TX completion timeout !"
+ " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
+ __FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
+ err_exit = BCME_ERROR;
+ goto exit_error;
+ }
+ if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
+ (dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
+ if (!time_taken) {
+ time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
+ }
+ } else {
+ dhd->tput_data.tput_test_running = FALSE;
+ DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
+ __FUNCTION__));
+ err_exit = BCME_ERROR;
+ goto exit_error;
+ }
+ if (n_batches || n_remain) {
+ batch_cnt = TRUE;
+ } else {
+ batch_cnt = FALSE;
+ }
+ } while (batch_cnt);
+ } else {
+ /* TPUT_DIR_RX */
+ DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
+ if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
+ DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
+ dhd->tput_stop_ts = OSL_SYSUPTIME_US();
+ }
+ }
+
+ /* calculate the throughput in bits per sec */
+ if (dhd->tput_start_ts && dhd->tput_stop_ts &&
+ (dhd->tput_stop_ts > dhd->tput_start_ts)) {
+ time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
+ time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
+ dhd->tput_data.time_ms = time_taken;
+ if (time_taken) {
+ total_size = pktsize * dhd->tput_data.pkts_cmpl * 8;
+ dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
+ /* convert from ms to seconds */
+ dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * 1000;
+ }
+ } else {
+ DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
+ }
+ DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
+ dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
+
+ memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
+
+ dhd->tput_data.tput_test_running = FALSE;
+
+ err_exit = BCME_OK;
+
+exit_error:
+ DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
+ __FUNCTION__, dhd->tput_data.pkts_good,
+ dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ /* restore interrupt poll period to the previous existing value */
+ dhd_os_set_intr_poll_period(dhd->bus, cur_intr_poll_period);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+
+ return err_exit;
+}
+
+void
+dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
+{
+ uint8 *pktdata = NULL;
+ tput_pkt_t *tput_pkt = NULL;
+ uint32 crc = 0;
+ uint8 tput_pkt_hdr_size = 0;
+
+ pktdata = PKTDATA(dhd->osh, pkt);
+ if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
+ pktdata += sizeof(struct ether_header);
+ tput_pkt = (tput_pkt_t *)pktdata;
+
+ /* record the timestamp of the first packet received */
+ if (dhd->tput_data.pkts_cmpl == 0) {
+ dhd->tput_start_ts = OSL_SYSUPTIME_US();
+ }
+
+ if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
+ dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
+ dhd->tput_data.pkts_cmpl++;
+ }
+ /* drop rx packets received beyond the specified # */
+ if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
+ return;
+
+ DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
+ ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
+
+ /* discard if mac addr of AP/STA does not match the specified ones */
+ if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
+ ETHER_ADDR_LEN) != 0) ||
+ (memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
+ ETHER_ADDR_LEN) != 0)) {
+ dhd->tput_data.pkts_bad++;
+ DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
+ __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
+ return;
+ }
+
+ tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
+ (uint8 *)&tput_pkt->mac_sta);
+ pktdata += tput_pkt_hdr_size + 4;
+ crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
+ CRC32_INIT_VALUE);
+ if (crc != ntoh32(tput_pkt->crc32)) {
+ DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
+ __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
+ dhd->tput_data.pkts_bad++;
+ return;
+ }
+
+ if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
+ dhd->tput_data.pkts_good++;
+
+ /* if we have received the stop packet or all the # of pkts, we're done */
+ if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
+ dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
+ dhd->tput_stop_ts = OSL_SYSUPTIME_US();
+ dhd_os_tput_test_wake(dhd);
+ }
+}
+
+#ifdef DUMP_IOCTL_IOV_LIST
+void
+dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
+{
+ dll_t *item;
+ dhd_iov_li_t *iov_li;
+ dhd->dump_iovlist_len++;
+
+ if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
+ item = dll_head_p(list_head);
+ iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+ dll_delete(item);
+ MFREE(dhd->osh, iov_li, sizeof(*iov_li));
+ dhd->dump_iovlist_len--;
+ }
+ dll_append(list_head, node);
+}
+
+void
+dhd_iov_li_print(dll_t *list_head)
+{
+ dhd_iov_li_t *iov_li;
+ dll_t *item, *next;
+ uint8 index = 0;
+ for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
+ next = dll_next_p(item);
+ iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+ DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
+ }
+}
+
+void
+dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
+{
+ dll_t *item;
+ dhd_iov_li_t *iov_li;
+ while (!(dll_empty(list_head))) {
+ item = dll_head_p(list_head);
+ iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+ dll_delete(item);
+ MFREE(dhd->osh, iov_li, sizeof(*iov_li));
+ }
+}
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+#ifdef EWP_EDL
+/* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
+* The reason being that, in hikey, if we try to DMA_MAP prealloced memory
+* it is failing with an 'out of space in SWIOTLB' error
+*/
+int
+dhd_edl_mem_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+
+ memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
+ ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+/*
+ * NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
+ * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
+ */
+void
+dhd_edl_mem_deinit(dhd_pub_t *dhd)
+{
+ if (dhd->edl_ring_mem.va != NULL)
+ dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
+}
+
+int
+dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
+ void *evt_decode_data)
+{
+ msg_hdr_edl_t *msg = NULL;
+ cmn_msg_hdr_t *cmn_msg_hdr = NULL;
+ uint8 *buf = NULL;
+
+ if (!data || !dhdp || !evt_decode_data) {
+ DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* format of data in each work item in the EDL ring:
+ * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
+ * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
+ */
+ cmn_msg_hdr = (cmn_msg_hdr_t *)data;
+ msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
+ buf = (uint8 *)msg;
+ /* validate the fields */
+ if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
+ DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
+ " expected (0x%x)\n", __FUNCTION__,
+ msg->infobuf_ver, PCIE_INFOBUF_V1));
+ return BCME_VERSION;
+ }
+
+ /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
+ if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
+ DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
+ __FUNCTION__));
+ return BCME_BUFTOOLONG;
+ }
+
+ if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
+ DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
+ __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
+ return BCME_BADOPTION;
+ }
+
+ if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
+ DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
+ " than available buffer size %u\n", __FUNCTION__,
+ ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
+ return BCME_BADLEN;
+ }
+
+ /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
+ buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
+ dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
+ ltoh16(msg->pyld_hdr.length));
+
+ /*
+ * check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
+ * copy the event data to the skb and send it up the stack
+ */
+ if (dhdp->logtrace_pkt_sendup) {
+ DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
+ (uint32)(ltoh16(msg->pyld_hdr.length) +
+ sizeof(info_buf_payload_hdr_t) + 4)));
+ dhd_sendup_info_buf(dhdp, (uint8 *)msg);
+ }
+
+ return BCME_OK;
+}
+#endif /* EWP_EDL */
+
+#ifdef DHD_LOG_DUMP
+#define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
+void
+dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
+{
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ log_dump_type_t *flush_type;
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+ uint64 current_time_sec;
+
+ if (!dhdp) {
+ DHD_ERROR(("dhdp is NULL !\n"));
+ return;
+ }
+
+ if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
+ DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
+ return;
+ }
+
+ current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+
+ DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
+ __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
+ DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
+
+ if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
+ DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
+ __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
+ return;
+ }
+
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* wake up RPM if SYSDUMP is triggered */
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+ /* */
+
+ dhdp->debug_dump_subcmd = subcmd;
+
+ dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ /* flush_type is freed at do_dhd_log_dump function */
+ flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = DLD_BUF_TYPE_ALL;
+ dhd_schedule_log_dump(dhdp, flush_type);
+ } else {
+ DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
+ return;
+ }
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ /* Inside dhd_mem_dump, event notification will be sent to HAL and
+ * from other context DHD pushes memdump, debug_dump and pktlog dump
+ * to HAL and HAL will write into file
+ */
+#if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
+ dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
+ dhd_bus_mem_dump(dhdp);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+
+#if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ dhd_schedule_pktlog_dump(dhdp);
+#endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+}
+#endif /* DHD_LOG_DUMP */
+
+#if (defined(LINUX) || defined(DHD_EFI)) && defined(SHOW_LOGTRACE)
+int
+dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
+{
+ void *file = NULL;
+ int size = 0;
+ char buf[FW_VER_STR_LEN];
+ char *str = NULL;
+ int ret = BCME_OK;
+
+ if (!fwpath)
+ return BCME_BADARG;
+
+ file = dhd_os_open_image1(dhdp, fwpath);
+ if (!file) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ size = dhd_os_get_image_size(file);
+ if (!size) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* seek to the last 'X' bytes in the file */
+ if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* read the last 'X' bytes of the file to a buffer */
+ memset(buf, 0, FW_VER_STR_LEN);
+ if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* search for 'Version' in the buffer */
+ str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
+ if (!str) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* go back in the buffer to the last ascii character */
+ while (str != buf &&
+ (*str >= ' ' && *str <= '~')) {
+ --str;
+ }
+ /* reverse the final decrement, so that str is pointing
+ * to the first ascii character in the buffer
+ */
+ ++str;
+
+ if (strlen(str) > (FW_VER_STR_LEN - 1)) {
+ ret = BCME_BADLEN;
+ goto exit;
+ }
+
+ DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
+ /* copy to global variable, so that in case FW load fails, the
+ * core capture logs will contain FW version read from the file
+ */
+ memset(fw_version, 0, FW_VER_STR_LEN);
+ strlcpy(fw_version, str, FW_VER_STR_LEN);
+
+exit:
+ if (file)
+ dhd_os_close_image1(dhdp, file);
+
+ return ret;
+}
+#endif /* LINUX || DHD_EFI */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+void
+dhd_clear_awdl_stats(dhd_pub_t *dhd)
+{
+ unsigned long flags;
+ /*
+ * Since event path(ex: WLC_E_AWDL_AW) and bus path(tx status process) update
+ * the AWDL data acquire lock before clearing the AWDL stats.
+ */
+ DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, flags);
+ memset(dhd->awdl_stats, 0, sizeof(dhd->awdl_stats));
+ DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, flags);
+}
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+
+static void
+copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
+{
+ int remain_len;
+ int i;
+ int *cnt;
+ char *dest;
+ int bytes_written;
+ uint32 ioc_dwlen = 0;
+
+ if (!dhd || !dhd->hang_info) {
+ DHD_ERROR(("%s dhd=%p hang_info=%p\n",
+ __FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
+ return;
+ }
+
+ cnt = &dhd->hang_info_cnt;
+ dest = dhd->hang_info;
+
+ memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ (*cnt) = 0;
+
+ bytes_written = 0;
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
+
+ bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
+ HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
+ dhd->debug_dump_time_hang_str,
+ ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
+ (*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
+
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+
+ /* Access ioc->buf only if the ioc->len is more than 4 bytes */
+ ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
+ if (ioc_dwlen > 0) {
+ const uint32 *ioc_buf = (const uint32 *)ioc->buf;
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ bytes_written += scnprintf(&dest[bytes_written], remain_len,
+ "%08x", *(uint32 *)(ioc_buf++));
+ GCC_DIAGNOSTIC_POP();
+ (*cnt)++;
+ if ((*cnt) >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+
+ for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
+ i++, (*cnt)++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
+ GCC_DIAGNOSTIC_POP();
+ }
+ }
+
+ DHD_INFO(("%s hang info len: %d data: %s\n",
+ __FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
+}
+
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+/*
+ * Helper function:
+ * Used for Dongle console message time syncing with Host printk
+ */
+void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
+{
+ uint64 ts;
+
+ /*
+ * local_clock() returns time in nano seconds.
+ * Dongle understand only milli seconds time.
+ */
+ ts = local_clock();
+ /* Nano seconds to milli seconds */
+ do_div(ts, 1000000);
+ if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
+ DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
+ /* Stopping HOST Dongle console time syncing */
+ dhd->dhd_rte_time_sync_ms = 0;
+ }
+}
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+#if defined(LINUX) || defined(linux)
+/* configuations of ecounters to be enabled by default in FW */
+static ecounters_cfg_t ecounters_cfg_tbl[] = {
+ /* Global ecounters */
+ {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
+ // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
+ // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
+
+ /* Slice specific ecounters */
+ {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
+ {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
+ {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
+
+ /* Interface specific ecounters */
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
+
+ /* secondary interface */
+ /* XXX REMOVE for temporal, will be enabled after decision
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_GENERIC},
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
+ {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_MGT_CNT},
+ */
+};
+
+/* XXX: Same event id shall be defined in consecutive order in the below table */
+static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
+ /* Interface specific event ecounters */
+ {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
+};
+
+/* Accepts an argument to -s, -g or -f and creates an XTLV */
+int
+dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
+ uint16 stats_rep, uint8 **xtlv)
+{
+ uint8 *req_xtlv = NULL;
+ ecounters_stats_types_report_req_t *req;
+ bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
+ ecountersv2_xtlv_list_elt_t temp;
+ uint16 xtlv_len = 0, total_len = 0;
+ int rc = BCME_OK;
+
+ /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
+ temp.id = stats_rep;
+ temp.len = 0;
+
+ /* Hence len/data = 0/NULL */
+ xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
+
+ /* Total length of the container */
+ total_len = BCM_XTLV_HDR_SIZE +
+ OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
+
+ /* Now allocate a structure for the entire request */
+ if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+
+ /* container XTLV context */
+ bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
+ BCM_XTLV_OPTION_ALIGN32);
+
+ /* Fill other XTLVs in the container. Leave space for XTLV headers */
+ req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
+ req->flags = type;
+ if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
+ req->slice_mask = 0x1 << if_slice_idx;
+ } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
+ req->if_index = if_slice_idx;
+ }
+
+ /* Fill remaining XTLVs */
+ bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
+ DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
+ rc = BCME_ERROR;
+ goto fail;
+ }
+
+ /* fill the top level container and get done with the XTLV container */
+ rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
+ bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
+ stats_types_req));
+
+ if (rc) {
+ DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
+ goto fail;
+ }
+
+fail:
+ if (rc && req_xtlv) {
+ MFREE(dhd->osh, req_xtlv, total_len);
+ req_xtlv = NULL;
+ }
+
+ /* update the xtlv pointer */
+ *xtlv = req_xtlv;
+ return rc;
+}
+
+static int
+dhd_ecounter_autoconfig(dhd_pub_t *dhd)
+{
+ int rc = BCME_OK;
+ uint32 buf;
+ rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+
+ if (rc != BCME_OK) {
+
+ if (rc != BCME_UNSUPPORTED) {
+ rc = BCME_OK;
+ DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
+ } else {
+ DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
+ }
+ }
+
+ return rc;
+}
+
+int
+dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
+{
+ int rc = BCME_OK;
+ if (enable) {
+ if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
+ if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
+ } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
+ }
+ }
+ } else {
+ if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
+ } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
+ }
+ }
+ return rc;
+}
+
+int
+dhd_start_ecounters(dhd_pub_t *dhd)
+{
+ uint8 i = 0;
+ uint8 *start_ptr;
+ int rc = BCME_OK;
+ bcm_xtlv_t *elt;
+ ecounters_config_request_v2_t *req = NULL;
+ ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
+ ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
+ uint16 total_processed_containers_len = 0;
+
+ for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
+ ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
+
+ if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
+ MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
+ DHD_ERROR(("Ecounters v2: No memory to process\n"));
+ goto fail;
+ }
+
+ rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
+ ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
+
+ if (rc) {
+ DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
+ ecounter_stat->stats_rep, rc));
+
+ /* Free allocated memory and go to fail to release any memories allocated
+ * in previous iterations. Note that list_elt->data gets populated in
+ * dhd_create_ecounters_params() and gets freed there itself.
+ */
+ MFREE(dhd->osh, list_elt, sizeof(*list_elt));
+ list_elt = NULL;
+ goto fail;
+ }
+ elt = (bcm_xtlv_t *) list_elt->data;
+
+ /* Put the elements in the order they are processed */
+ if (processed_containers_list == NULL) {
+ processed_containers_list = list_elt;
+ } else {
+ tail->next = list_elt;
+ }
+ tail = list_elt;
+ /* Size of the XTLV returned */
+ total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
+ }
+
+ /* Now create ecounters config request with totallength */
+ req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
+ total_processed_containers_len);
+
+ if (req == NULL) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+
+ req->version = ECOUNTERS_VERSION_2;
+ req->logset = EVENT_LOG_SET_ECOUNTERS;
+ req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
+ req->num_reports = ECOUNTERS_NUM_REPORTS;
+ req->len = total_processed_containers_len +
+ OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
+
+ /* Copy config */
+ start_ptr = req->ecounters_xtlvs;
+
+ /* Now go element by element in the list */
+ while (processed_containers_list) {
+ list_elt = processed_containers_list;
+
+ elt = (bcm_xtlv_t *)list_elt->data;
+
+ memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
+ start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
+ processed_containers_list = processed_containers_list->next;
+
+ /* Free allocated memories */
+ MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
+ MFREE(dhd->osh, list_elt, sizeof(*list_elt));
+ }
+
+ if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
+ DHD_ERROR(("failed to start ecounters\n"));
+ }
+
+fail:
+ if (req) {
+ MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
+ }
+
+ /* Now go element by element in the list */
+ while (processed_containers_list) {
+ list_elt = processed_containers_list;
+ elt = (bcm_xtlv_t *)list_elt->data;
+ processed_containers_list = processed_containers_list->next;
+
+ /* Free allocated memories */
+ MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
+ MFREE(dhd->osh, list_elt, sizeof(*list_elt));
+ }
+ return rc;
+}
+
+int
+dhd_stop_ecounters(dhd_pub_t *dhd)
+{
+ int rc = BCME_OK;
+ ecounters_config_request_v2_t *req;
+
+ /* Now create ecounters config request with totallength */
+ req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
+
+ if (req == NULL) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+
+ req->version = ECOUNTERS_VERSION_2;
+ req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
+
+ if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
+ DHD_ERROR(("failed to stop ecounters\n"));
+ }
+
+fail:
+ if (req) {
+ MFREE(dhd->osh, req, sizeof(*req));
+ }
+ return rc;
+}
+
+/* configured event_id_array for event ecounters */
+typedef struct event_id_array {
+ uint8 event_id;
+ uint8 str_idx;
+} event_id_array_t;
+
+/* get event id array only from event_ecounters_cfg_tbl[] */
+static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
+{
+ uint8 i;
+ uint8 idx = 0;
+ int32 prev_evt_id = -1;
+
+ for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
+ if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
+ if (prev_evt_id >= 0)
+ idx++;
+ event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
+ event_array[idx].str_idx = i;
+ }
+ prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
+ }
+ return idx;
+}
+
+/* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
+#define ECNTRS_MAX_XTLV_NUM (31 * 2)
+
+int
+dhd_start_event_ecounters(dhd_pub_t *dhd)
+{
+ uint8 i, j = 0;
+ uint8 event_id_cnt = 0;
+ uint16 processed_containers_len = 0;
+ uint16 max_xtlv_len = 0;
+ int rc = BCME_OK;
+ uint8 *ptr;
+ uint8 *data;
+ event_id_array_t *id_array;
+ bcm_xtlv_t *elt = NULL;
+ event_ecounters_config_request_v2_t *req = NULL;
+
+ /* XXX: the size of id_array is limited by the size of event_ecounters_cfg_tbl */
+ id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
+ ARRAYSIZE(event_ecounters_cfg_tbl));
+
+ if (id_array == NULL) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+ event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
+
+ max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
+ OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
+ ECNTRS_MAX_XTLV_NUM);
+
+ /* Now create ecounters config request with max allowed length */
+ req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
+ sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
+
+ if (req == NULL) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i <= event_id_cnt; i++) {
+ /* req initialization by event id */
+ req->version = ECOUNTERS_VERSION_2;
+ req->logset = EVENT_LOG_SET_ECOUNTERS;
+ req->event_id = id_array[i].event_id;
+ req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
+ req->len = 0;
+ processed_containers_len = 0;
+
+ /* Copy config */
+ ptr = req->ecounters_xtlvs;
+
+ for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
+ event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
+ if (id_array[i].event_id != event_ecounter_stat->event_id)
+ break;
+
+ rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
+ event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
+ &data);
+
+ if (rc) {
+ DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
+ __FUNCTION__, event_ecounter_stat->stats_rep, rc));
+ goto fail;
+ }
+
+ elt = (bcm_xtlv_t *)data;
+
+ memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
+ ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
+ processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
+
+ /* Free allocated memories alloced by dhd_create_ecounters_params */
+ MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
+
+ if (processed_containers_len > max_xtlv_len) {
+ DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
+ __FUNCTION__));
+ rc = BCME_BADLEN;
+ goto fail;
+ }
+ }
+
+ req->len = processed_containers_len +
+ OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
+
+ DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
+ __FUNCTION__, req->version, req->logset, req->event_id,
+ req->flags, req->len));
+
+ rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
+
+ if (rc < 0) {
+ DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
+ req->event_id, rc));
+ goto fail;
+ }
+ }
+
+fail:
+ /* Free allocated memories */
+ if (req) {
+ MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
+ }
+ if (id_array) {
+ MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
+ ARRAYSIZE(event_ecounters_cfg_tbl));
+ }
+
+ return rc;
+}
+
+int
+dhd_stop_event_ecounters(dhd_pub_t *dhd)
+{
+ int rc = BCME_OK;
+ event_ecounters_config_request_v2_t *req;
+
+ /* Now create ecounters config request with totallength */
+ req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
+
+ if (req == NULL) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+
+ req->version = ECOUNTERS_VERSION_2;
+ req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
+ req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
+
+ if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
+ DHD_ERROR(("failed to stop event_ecounters\n"));
+ }
+
+fail:
+ if (req) {
+ MFREE(dhd->osh, req, sizeof(*req));
+ }
+ return rc;
+}
+#ifdef DHD_LOG_DUMP
+int
+dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
+ log_dump_section_hdr_t *sec_hdr,
+ char *text_hdr, int buflen, uint32 sec_type)
+{
+ uint32 rlen = 0;
+ uint32 data_len = 0;
+ void *data = NULL;
+ unsigned long flags = 0;
+ int ret = 0;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
+ int pos = 0;
+ int fpos_sechdr = 0;
+
+ if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
+ return BCME_BADARG;
+ }
+ /* do not allow further writes to the ring
+ * till we flush it
+ */
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_SUSPEND;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ if (dhdp->concise_dbg_buf) {
+ /* re-use concise debug buffer temporarily
+ * to pull ring data, to write
+ * record by record to file
+ */
+ data_len = CONCISE_DUMP_BUFLEN;
+ data = dhdp->concise_dbg_buf;
+ ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
+ /* write the section header now with zero length,
+ * once the correct length is found out, update
+ * it later
+ */
+ fpos_sechdr = pos;
+ sec_hdr->type = sec_type;
+ sec_hdr->length = 0;
+ ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
+ sizeof(*sec_hdr), &pos);
+ do {
+ rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
+ if (rlen > 0) {
+ /* write the log */
+ ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
+ }
+ DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
+ } while ((rlen > 0));
+ /* now update the section header length in the file */
+ /* Complete ring size is dumped by HAL, hence updating length to ring size */
+ sec_hdr->length = ring->ring_size;
+ ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
+ sizeof(*sec_hdr), &fpos_sechdr);
+ } else {
+ DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
+ }
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ /* Resetting both read and write pointer,
+ * since all items are read.
+ */
+ ring->rp = ring->wp = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return ret;
+}
+
+int
+dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
+ unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
+ char *text_hdr, uint32 sec_type)
+{
+ uint32 rlen = 0;
+ uint32 data_len = 0, total_len = 0;
+ void *data = NULL;
+ unsigned long fpos_sechdr = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
+
+ if (!dhdp || !ring || !file || !sec_hdr ||
+ !file_posn || !text_hdr)
+ return BCME_BADARG;
+
+ /* do not allow further writes to the ring
+ * till we flush it
+ */
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_SUSPEND;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ if (dhdp->concise_dbg_buf) {
+ /* re-use concise debug buffer temporarily
+ * to pull ring data, to write
+ * record by record to file
+ */
+ data_len = CONCISE_DUMP_BUFLEN;
+ data = dhdp->concise_dbg_buf;
+ dhd_os_write_file_posn(file, file_posn, text_hdr,
+ strlen(text_hdr));
+ /* write the section header now with zero length,
+ * once the correct length is found out, update
+ * it later
+ */
+ dhd_init_sec_hdr(sec_hdr);
+ fpos_sechdr = *file_posn;
+ sec_hdr->type = sec_type;
+ sec_hdr->length = 0;
+ dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
+ sizeof(*sec_hdr));
+ do {
+ rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
+ if (rlen > 0) {
+ /* write the log */
+ ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ return BCME_ERROR;
+ }
+ }
+ total_len += rlen;
+ } while (rlen > 0);
+ /* now update the section header length in the file */
+ sec_hdr->length = total_len;
+ dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
+ } else {
+ DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
+ }
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ /* Resetting both read and write pointer,
+ * since all items are read.
+ */
+ ring->rp = ring->wp = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ return BCME_OK;
+}
+
+/* logdump cookie */
+#define MAX_LOGUDMP_COOKIE_CNT 10u
+#define LOGDUMP_COOKIE_STR_LEN 50u
+int
+dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
+{
+ uint32 ring_size;
+
+ if (!dhdp || !buf) {
+ DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
+ return BCME_ERROR;
+ }
+
+ ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
+ if (buf_size < ring_size) {
+ DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
+ ring_size, buf_size));
+ return BCME_ERROR;
+ }
+
+ dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
+ LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
+ DHD_RING_TYPE_FIXED);
+ if (!dhdp->logdump_cookie) {
+ DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+void
+dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
+{
+ if (!dhdp) {
+ return;
+ }
+ if (dhdp->logdump_cookie) {
+ dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
+ }
+
+ return;
+}
+
+#ifdef DHD_TX_PROFILE
+int
+dhd_tx_profile_detach(dhd_pub_t *dhdp)
+{
+ int result = BCME_ERROR;
+
+ if (dhdp != NULL && dhdp->protocol_filters != NULL) {
+ MFREE(dhdp->osh, dhdp->protocol_filters, DHD_MAX_PROFILES *
+ sizeof(*(dhdp->protocol_filters)));
+ dhdp->protocol_filters = NULL;
+
+ result = BCME_OK;
+ }
+
+ return result;
+}
+
+int
+dhd_tx_profile_attach(dhd_pub_t *dhdp)
+{
+ int result = BCME_ERROR;
+
+ if (dhdp != NULL) {
+ dhdp->protocol_filters = (dhd_tx_profile_protocol_t*)MALLOCZ(dhdp->osh,
+ DHD_MAX_PROFILES * sizeof(*(dhdp->protocol_filters)));
+
+ if (dhdp->protocol_filters != NULL) {
+ result = BCME_OK;
+ }
+ }
+
+ if (result != BCME_OK) {
+ DHD_ERROR(("%s:\tMALLOC of tx profile protocol filters failed\n",
+ __FUNCTION__));
+ }
+
+ return result;
+}
+#endif /* defined(DHD_TX_PROFILE) */
+
+void
+dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
+{
+ char *ptr;
+
+ if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
+ " type = %p, cookie_cfg:%p\n", __FUNCTION__,
+ dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
+ return;
+ }
+ ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
+ if (ptr == NULL) {
+ DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
+ return;
+ }
+ scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
+ return;
+}
+
+int
+dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
+{
+ char *ptr;
+
+ if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
+ "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
+ dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
+ return BCME_ERROR;
+ }
+ ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
+ if (ptr == NULL) {
+ DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
+ dhd_ring_free_first(dhdp->logdump_cookie);
+ return BCME_OK;
+}
+
+int
+dhd_logdump_cookie_count(dhd_pub_t *dhdp)
+{
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
+ return 0;
+ }
+ return dhd_ring_get_cur_size(dhdp->logdump_cookie);
+}
+
+static inline int
+__dhd_log_dump_cookie_to_file(
+ dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
+ char *buf, uint32 buf_size)
+{
+
+ uint32 remain = buf_size;
+ int ret = BCME_ERROR;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ uint32 read_idx;
+ uint32 write_idx;
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+ remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+
+ ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
+ return ret;
+ }
+ sec_hdr.magic = LOG_DUMP_MAGIC;
+ sec_hdr.timestamp = local_clock();
+ sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
+ sec_hdr.length = buf_size - remain;
+
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
+ }
+
+ return ret;
+}
+
+uint32
+dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
+{
+ int len = 0;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ char *buf = NULL;
+ int ret = BCME_ERROR;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ uint32 read_idx;
+ uint32 write_idx;
+ uint32 remain;
+
+ remain = buf_size;
+
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
+ goto exit;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto exit;
+ }
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ remain -= (uint32)strlen(tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+ len += strlen(COOKIE_LOG_HDR);
+ len += sizeof(sec_hdr);
+ len += (buf_size - remain);
+exit:
+ if (buf)
+ MFREE(dhdp->osh, buf, buf_size);
+ return len;
+}
+
+int
+dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
+{
+ int ret = BCME_ERROR;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ char *buf = NULL;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ int pos = 0;
+ uint32 read_idx;
+ uint32 write_idx;
+ uint32 remain;
+
+ remain = buf_size;
+
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
+ goto exit;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto exit;
+ }
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+ ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
+ sec_hdr.magic = LOG_DUMP_MAGIC;
+ sec_hdr.timestamp = local_clock();
+ sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
+ sec_hdr.length = buf_size - remain;
+ ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
+ ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
+exit:
+ if (buf)
+ MFREE(dhdp->osh, buf, buf_size);
+ return ret;
+}
+
+int
+dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
+{
+ char *buf;
+ int ret = BCME_ERROR;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+
+ if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p fp = %p f_pos = %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
+ return ret;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return ret;
+ }
+ ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
+ MFREE(dhdp->osh, buf, buf_size);
+
+ return ret;
+}
+#endif /* DHD_LOG_DUMP */
+#endif /* LINUX || linux */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+int
+dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
+{
+ int ret = BCME_OK;
+ bcm_xtlv_t *pxtlv = NULL;
+ uint8 mybuf[DHD_IOVAR_BUF_SIZE];
+ uint16 mybuf_len = sizeof(mybuf);
+ pxtlv = (bcm_xtlv_t *)mybuf;
+
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
+ &he_enab, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
+ return ret;
+ }
+
+ ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
+ __FUNCTION__, he_enab, bcmerrorstr(ret)));
+ } else {
+ DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
+ }
+
+ return ret;
+}
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+int
+dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g)
+{
+ wlc_roam_rssi_limit_t *plmt;
+ wlc_roam_rssi_lmt_info_v1_t *pinfo;
+ int ret = BCME_OK;
+ int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
+
+ plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
+ if (!plmt) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* Get roam rssi limit */
+ ret = dhd_iovar(dhd, 0, "roam_rssi_limit", NULL, 0, (char *)plmt, plmt_len, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if (plmt->ver != WLC_ROAM_RSSI_LMT_VER_1) {
+ ret = BCME_VERSION;
+ goto done;
+ }
+
+ pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
+ *lmt2g = (int)pinfo->rssi_limit_2g;
+ *lmt5g = (int)pinfo->rssi_limit_5g;
+
+done:
+ if (plmt) {
+ MFREE(dhd->osh, plmt, plmt_len);
+ }
+ return ret;
+}
+
+int
+dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g)
+{
+ wlc_roam_rssi_limit_t *plmt;
+ wlc_roam_rssi_lmt_info_v1_t *pinfo;
+ int ret = BCME_OK;
+ int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
+
+ /* Sanity check RSSI limit Value */
+ if ((lmt2g < ROAMRSSI_2G_MIN) || (lmt2g > ROAMRSSI_2G_MAX)) {
+ DHD_ERROR(("%s Not In Range 2G ROAM RSSI Limit\n", __FUNCTION__));
+ return BCME_RANGE;
+ }
+ if ((lmt2g < ROAMRSSI_5G_MIN) || (lmt2g > ROAMRSSI_5G_MAX)) {
+ DHD_ERROR(("%s Not In Range 5G ROAM RSSI Limit\n", __FUNCTION__));
+ return BCME_RANGE;
+ }
+
+ plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
+ if (!plmt) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ plmt->ver = WLC_ROAM_RSSI_LMT_VER_1;
+ plmt->len = sizeof(*pinfo);
+ pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
+ pinfo->rssi_limit_2g = (int16)lmt2g;
+ pinfo->rssi_limit_5g = (int16)lmt5g;
+
+ /* Set roam rssi limit */
+ ret = dhd_iovar(dhd, 0, "roam_rssi_limit", (char *)plmt, plmt_len, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+done:
+ if (plmt) {
+ MFREE(dhd->osh, plmt, plmt_len);
+ }
+ return ret;
+}
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+
+#ifdef CONFIG_ROAM_MIN_DELTA
+int
+dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g)
+{
+ wlc_roam_min_delta_t *pmin_delta;
+ wlc_roam_min_delta_info_v1_t *pmin_delta_info;
+ int ret = BCME_OK;
+ int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN;
+
+ pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen);
+ if (!pmin_delta) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* Get Minimum ROAM score delta */
+ ret = dhd_iovar(dhd, 0, "roam_min_delta", NULL, 0, (char *)pmin_delta, plen, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Get roam_min_delta %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if (pmin_delta->ver != WLC_ROAM_MIN_DELTA_VER_1) {
+ ret = BCME_VERSION;
+ goto done;
+ }
+
+ pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data;
+ *dt2g = (uint32)pmin_delta_info->roam_min_delta_2g;
+ *dt5g = (uint32)pmin_delta_info->roam_min_delta_5g;
+
+done:
+ if (pmin_delta) {
+ MFREE(dhd->osh, pmin_delta, plen);
+ }
+ return ret;
+}
+
+int
+dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g)
+{
+ wlc_roam_min_delta_t *pmin_delta;
+ wlc_roam_min_delta_info_v1_t *pmin_delta_info;
+ int ret = BCME_OK;
+ int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN;
+
+ /* Sanity check Minimum ROAM score delta */
+ if ((dt2g > ROAM_MIN_DELTA_MAX) || (dt5g > ROAM_MIN_DELTA_MAX)) {
+ DHD_ERROR(("%s Not In Range Minimum ROAM score delta, 2G: %d, 5G: %d\n",
+ __FUNCTION__, dt2g, dt5g));
+ return BCME_RANGE;
+ }
+
+ pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen);
+ if (!pmin_delta) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ pmin_delta->ver = WLC_ROAM_MIN_DELTA_VER_1;
+ pmin_delta->len = sizeof(*pmin_delta_info);
+ pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data;
+ pmin_delta_info->roam_min_delta_2g = (uint32)dt2g;
+ pmin_delta_info->roam_min_delta_5g = (uint32)dt5g;
+
+ /* Set Minimum ROAM score delta */
+ ret = dhd_iovar(dhd, 0, "roam_min_delta", (char *)pmin_delta, plen, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Set roam_min_delta %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+done:
+ if (pmin_delta) {
+ MFREE(dhd->osh, pmin_delta, plen);
+ }
+ return ret;
+}
+#endif /* CONFIG_ROAM_MIN_DELTA */
+
+#ifdef HOST_SFH_LLC
+#define SSTLOOKUP(proto) (((proto) == 0x80f3) || ((proto) == 0x8137))
+/** Convert Ethernet to 802.3 per 802.1H (use bridge-tunnel if type in SST)
+ * Note:- This function will overwrite the ethernet header in the pkt
+ * with a 802.3 ethernet + LLC/SNAP header by utilising the headroom
+ * in the packet. The pkt data pointer should be pointing to the
+ * start of the packet (at the ethernet header) when the function is called.
+ * The pkt data pointer will be pointing to the
+ * start of the new 802.3 header if the function returns successfully
+ *
+ *
+ * Original Ethernet (header length = 14):
+ * ----------------------------------------------------------------------------------------
+ * | | DA | SA | T | Data... |
+ * ----------------------------------------------------------------------------------------
+ * 6 6 2
+ *
+ * Conversion to 802.3 (header length = 22):
+ * (LLC includes ether_type in last 2 bytes):
+ * ----------------------------------------------------------------------------------------
+ * | | DA | SA | L | LLC/SNAP | T | Data... |
+ * ----------------------------------------------------------------------------------------
+ * 6 6 2 6 2
+ */
+int
+BCMFASTPATH(dhd_ether_to_8023_hdr)(osl_t *osh, struct ether_header *eh, void *p)
+{
+ struct ether_header *neh;
+ struct dot11_llc_snap_header *lsh;
+ uint16 plen, ether_type;
+
+ if (PKTHEADROOM(osh, p) < DOT11_LLC_SNAP_HDR_LEN) {
+ DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__));
+ ASSERT(0);
+ return BCME_BUFTOOSHORT;
+ }
+
+ ether_type = ntoh16(eh->ether_type);
+ neh = (struct ether_header *)PKTPUSH(osh, p, DOT11_LLC_SNAP_HDR_LEN);
+
+ /* 802.3 MAC header */
+ eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost);
+ eacopy((char*)eh->ether_shost, (char*)neh->ether_shost);
+ plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN;
+ neh->ether_type = hton16(plen);
+
+ /* 802.2 LLC header */
+ lsh = (struct dot11_llc_snap_header *)&neh[1];
+ lsh->dsap = 0xaa;
+ lsh->ssap = 0xaa;
+ lsh->ctl = 0x03;
+
+ /* 802.2 SNAP header Use RFC1042 or bridge-tunnel if type in SST per 802.1H */
+ lsh->oui[0] = 0x00;
+ lsh->oui[1] = 0x00;
+ if (SSTLOOKUP(ether_type))
+ lsh->oui[2] = 0xf8;
+ else
+ lsh->oui[2] = 0x00;
+ lsh->type = hton16(ether_type);
+
+ return BCME_OK;
+}
+
+/** Convert 802.3+LLC to ethernet
+ * Note:- This function will overwrite the 802.3+LLC hdr in the pkt
+ * with an ethernet header. The pkt data pointer should be pointing to the
+ * start of the packet (at the 802.3 header) when the function is called.
+ * The pkt data pointer will be pointing to the
+ * start of the ethernet header if the function returns successfully
+ */
+int
+BCMFASTPATH(dhd_8023_llc_to_ether_hdr)(osl_t *osh, struct ether_header *eh8023, void *p)
+{
+ struct dot11_llc_snap_header *lsh = NULL;
+ uint16 ether_type = 0;
+ uint8 *pdata = NULL;
+
+ if (!p || !eh8023)
+ return BCME_BADARG;
+
+ pdata = PKTDATA(osh, p);
+ ether_type = ntoh16(eh8023->ether_type);
+ /* ether type in 802.3 hdr for sfh llc host insertion case
+ * contains length, replace it with actual ether type at the
+ * end of the LLC hdr
+ */
+ if (ether_type < ETHER_TYPE_MIN) {
+ /* 802.2 LLC header */
+ lsh = (struct dot11_llc_snap_header *)(pdata + sizeof(*eh8023));
+ eh8023->ether_type = lsh->type;
+ pdata = PKTPULL(osh, p, DOT11_LLC_SNAP_HDR_LEN);
+ memcpy_s(pdata, sizeof(*eh8023), eh8023, sizeof(*eh8023));
+ } else {
+ DHD_ERROR_RLMT(("ethertype 0x%x is not a length !\n", ether_type));
+ return BCME_BADARG;
+ }
+
+ return BCME_OK;
+}
+#endif /* HOST_SFH_LLC */
+
+#ifdef DHD_AWDL
+
+#define AWDL_MIN_EXTENSION_DEFAULT 0x3u
+#define AWDL_PRESENCE_MODE_DEFAULT 0x4u
+#define AWDL_FLAGS_DEFAULT 0x0000u
+#define AWDL_PID 0x0800u
+#define AWDL_USERDATA_SIZE 6u
+/** Convert Ethernet to 802.3 + AWDL LLC SNAP header
+ * Note:- This function will overwrite the ethernet header in the pkt 'p'
+ * with a 802.3 ethernet + AWDL LLC/SNAP header by utilising the headroom
+ * in the packet. The pkt data pointer should be pointing to the
+ * start of the packet (at the ethernet header) when the function is called.
+ * The pkt data pointer will be pointing to the
+ * start of the new 802.3 header if the function returns successfully
+ */
+int
+BCMFASTPATH(dhd_ether_to_awdl_llc_hdr)(struct dhd_pub *dhd, struct ether_header *eh, void *p)
+{
+ osl_t *osh = dhd->osh;
+ struct ether_header *neh;
+ struct dot11_llc_snap_header *lsh;
+ uint16 plen, ether_type;
+ uint8 *awdl_data = NULL;
+ uint16 *seq = NULL;
+ uint16 *flags = NULL;
+ uint16 *type = NULL;
+
+ if (PKTHEADROOM(osh, p) < (2 * DOT11_LLC_SNAP_HDR_LEN)) {
+ DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__));
+ ASSERT(0);
+ return BCME_BUFTOOSHORT;
+ }
+
+ ether_type = ntoh16(eh->ether_type);
+ neh = (struct ether_header *)PKTPUSH(osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN);
+
+ /* 802.3 MAC header */
+ eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost);
+ eacopy((char*)eh->ether_shost, (char*)neh->ether_shost);
+ plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN;
+ neh->ether_type = hton16(plen);
+
+ /* 802.2 LLC header */
+ lsh = (struct dot11_llc_snap_header *)&neh[1];
+ lsh->dsap = 0xaa;
+ lsh->ssap = 0xaa;
+ lsh->ctl = 0x03;
+
+ /* 802.2 SNAP header */
+ lsh->oui[0] = 0x00;
+ lsh->oui[1] = 0x17;
+ lsh->oui[2] = 0xf2;
+ lsh->type = hton16(AWDL_PID);
+
+ /* AWDL upper layer data */
+ awdl_data = (uint8 *)&lsh[1];
+
+ awdl_data[0] = dhd->awdl_minext;
+ awdl_data[1] = dhd->awdl_presmode;
+
+ seq = (uint16 *)&awdl_data[2];
+ *seq = dhd->awdl_seq++;
+
+ flags = (uint16 *)&awdl_data[4];
+ *flags = hton16(AWDL_FLAGS_DEFAULT);
+
+ type = (uint16 *)&awdl_data[6];
+ *type = hton16(ether_type);
+
+ return BCME_OK;
+}
+
+/** Convert 802.3 + AWDL LLC SNAP header to ethernet header
+ * Note:- This function will overwrite the existing
+ * 802.3 ethernet + AWDL LLC/SNAP header in the packet 'p'
+ * with a 14 byte ethernet header
+ * The pkt data pointer should be pointing to the
+ * start of the packet (at the 802.3 header) when the function is called.
+ * The pkt data pointer will be pointing to the
+ * start of the new ethernet header if the function returns successfully
+ */
+int
+dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p)
+{
+ uint16 *ethertype = NULL;
+ uint8 *ptr = NULL;
+
+ if (!eh || !p || !dhd)
+ return BCME_BADARG;
+
+ ptr = PKTDATA(dhd->osh, p);
+
+ /* copy ether type instead of length from the
+ * end of the awdl llc header to the ethernet header
+ */
+ ptr += sizeof(*eh) + DOT11_LLC_SNAP_HDR_LEN + AWDL_USERDATA_SIZE;
+ ethertype = (uint16 *)ptr;
+ eh->ether_type = *ethertype;
+
+ /* overwrite awdl llc header with ethernet header */
+ PKTPULL(dhd->osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN);
+ ptr = PKTDATA(dhd->osh, p);
+ memcpy_s(ptr, sizeof(*eh), eh, sizeof(*eh));
+ return BCME_OK;
+}
+#endif /* DHD_AWDL */
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
+ uint res_len, bool set)
+{
+ char *buf = NULL;
+ uint input_len;
+ wl_ioctl_t ioc;
+ int ret;
+
+ if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
+ return BCME_BADARG;
+
+ input_len = strlen(name) + 1 + param_len;
+
+ /* WAR to fix GET iovar returning buf too short error
+ * If param len is 0 for get iovar, increment input_len by sizeof(int)
+ * to avoid the length check error in fw
+ */
+ if (!set && !param_len) {
+ input_len += sizeof(int);
+ }
+ if (input_len > WLC_IOCTL_MAXLEN)
+ return BCME_BADARG;
+
+ buf = NULL;
+ if (set) {
+ if (res_buf || res_len != 0) {
+ DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ buf = MALLOCZ(pub->osh, input_len);
+ if (!buf) {
+ DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
+ if (!ret) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = input_len;
+ ioc.set = set;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ } else {
+ if (!res_buf || !res_len) {
+ DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ if (res_len < input_len) {
+ DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
+ res_len, input_len));
+ buf = MALLOCZ(pub->osh, input_len);
+ if (!buf) {
+ DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
+ if (!ret) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = input_len;
+ ioc.set = set;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+
+ if (ret == BCME_OK) {
+ memcpy(res_buf, buf, res_len);
+ }
+ } else {
+ memset(res_buf, 0, res_len);
+ ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
+ if (!ret) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = res_buf;
+ ioc.len = res_len;
+ ioc.set = set;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ }
+ }
+exit:
+ if (buf) {
+ MFREE(pub->osh, buf, input_len);
+ }
+ return ret;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_config.c b/bcmdhd.101.10.361.x/dhd_config.c
new file mode 100755
index 0000000..77028e0
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_config.c
@@ -0,0 +1,5175 @@
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <bcmsdbus.h>
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+#include <bcmdefs.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbchipc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+#include <dhd_config.h>
+#include <dhd_dbg.h>
+#include <wl_android.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+#include <dhd_linux.h>
+#include <dhd_bus.h>
+#ifdef BCMSDIO
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#endif /* defined(BCMSDIO) */
+#endif
+
+/* message levels */
+#define CONFIG_ERROR_LEVEL (1 << 0)
+#define CONFIG_TRACE_LEVEL (1 << 1)
+#define CONFIG_MSG_LEVEL (1 << 0)
+
+uint config_msg_level = CONFIG_ERROR_LEVEL | CONFIG_MSG_LEVEL;
+uint dump_msg_level = 0;
+
+#define CONFIG_MSG(x, args...) \
+ do { \
+ if (config_msg_level & CONFIG_MSG_LEVEL) { \
+ printf("%s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define CONFIG_ERROR(x, args...) \
+ do { \
+ if (config_msg_level & CONFIG_ERROR_LEVEL) { \
+ printf("CONFIG-ERROR) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define CONFIG_TRACE(x, args...) \
+ do { \
+ if (config_msg_level & CONFIG_TRACE_LEVEL) { \
+ printf("CONFIG-TRACE) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+
+#define MAXSZ_BUF 4096
+#define MAXSZ_CONFIG 8192
+
+#if defined(BCMSDIO) && defined(DYNAMIC_MAX_HDR_READ)
+extern uint firstread;
+#endif
+
+#if defined(PROP_TXSTATUS)
+#include <dhd_wlfc.h>
+#endif /* PROP_TXSTATUS */
+
+#define MAX_EVENT_BUF_NUM 16
+typedef struct eventmsg_buf {
+ u16 num;
+ struct {
+ u16 type;
+ bool set;
+ } event [MAX_EVENT_BUF_NUM];
+} eventmsg_buf_t;
+
+typedef struct chip_name_map_t {
+ uint chip;
+ uint chiprev;
+ uint ag_type;
+ char *chip_name;
+ char *module_name;
+} chip_name_map_t;
+
+/* Map of WLC_E events to connection failure strings */
+#define DONT_CARE 9999
+const chip_name_map_t chip_name_map[] = {
+ /* ChipID Chiprev AG ChipName ModuleName */
+#ifdef BCMSDIO
+ {BCM43362_CHIP_ID, 0, DONT_CARE, "bcm40181a0", ""},
+ {BCM43362_CHIP_ID, 1, DONT_CARE, "bcm40181a2", ""},
+ {BCM4330_CHIP_ID, 4, FW_TYPE_G, "bcm40183b2", ""},
+ {BCM4330_CHIP_ID, 4, FW_TYPE_AG, "bcm40183b2_ag", ""},
+ {BCM43430_CHIP_ID, 0, DONT_CARE, "bcm43438a0", "ap6212"},
+ {BCM43430_CHIP_ID, 1, DONT_CARE, "bcm43438a1", "ap6212a"},
+ {BCM43430_CHIP_ID, 2, DONT_CARE, "bcm43436b0", "ap6236"},
+ {BCM43012_CHIP_ID, 1, FW_TYPE_G, "bcm43013b0", ""},
+ {BCM43012_CHIP_ID, 1, FW_TYPE_AG, "bcm43013c0_ag", ""},
+ {BCM43012_CHIP_ID, 2, DONT_CARE, "bcm43013c1_ag", ""},
+ {BCM4334_CHIP_ID, 3, DONT_CARE, "bcm4334b1_ag", ""},
+ {BCM43340_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""},
+ {BCM43341_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""},
+ {BCM4324_CHIP_ID, 5, DONT_CARE, "bcm43241b4_ag", ""},
+ {BCM4335_CHIP_ID, 2, DONT_CARE, "bcm4339a0_ag", ""},
+ {BCM4339_CHIP_ID, 1, DONT_CARE, "bcm4339a0_ag", "ap6335"},
+ {BCM4345_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", "ap6255"},
+ {BCM43454_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", ""},
+ {BCM4345_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", "ap6256"},
+ {BCM43454_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", ""},
+ {BCM4354_CHIP_ID, 1, DONT_CARE, "bcm4354a1_ag", ""},
+ {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", "ap6356"},
+ {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""},
+ {BCM4371_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""},
+ {BCM43569_CHIP_ID, 3, DONT_CARE, "bcm4358a3_ag", ""},
+ {BCM4359_CHIP_ID, 5, DONT_CARE, "bcm4359b1_ag", ""},
+ {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_ag", "ap6398s"},
+ {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_ag", ""},
+ {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_ag", ""},
+ {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_ag", ""},
+ {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_ag", "ap6275s"},
+#endif
+#ifdef BCMPCIE
+ {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""},
+ {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""},
+ {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_pcie_ag", ""},
+ {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_pcie_ag", ""},
+ {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_pcie_ag", ""},
+ {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_pcie_ag", ""},
+ {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_pcie_ag", ""},
+ {BCM4375_CHIP_ID, 5, DONT_CARE, "bcm4375b4_pcie_ag", "ap6275hh3"},
+#endif
+#ifdef BCMDBUS
+ {BCM43143_CHIP_ID, 2, DONT_CARE, "bcm43143b0", ""},
+ {BCM43242_CHIP_ID, 1, DONT_CARE, "bcm43242a1_ag", ""},
+ {BCM43569_CHIP_ID, 2, DONT_CARE, "bcm4358u_ag", "ap62x8"},
+#endif
+};
+
+#ifdef UPDATE_MODULE_NAME
+typedef void (compat_func_t)(dhd_pub_t *dhd);
+typedef struct module_name_map_t {
+ uint devid;
+ uint chip;
+ uint chiprev;
+ uint svid;
+ uint ssid;
+ char *module_name;
+ char *chip_name;
+ compat_func_t *compat_func;
+} module_name_map_t;
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+static void dhd_conf_compat_vht(dhd_pub_t *dhd);
+#endif
+
+const module_name_map_t module_name_map[] = {
+ /* Devce ID Chip ID Chiprev SVID SSID
+ * ModuleName ChipName Compat function
+ */
+#ifdef BCMSDIO
+ {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0,
+ "ap6398s2", "bcm4359c51a2_ag", dhd_conf_compat_vht},
+ {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0,
+ "ap6398sr32", "bcm4359c51a2_ag", dhd_conf_compat_vht},
+ {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0,
+ "ap6398sv", "bcm4359c51a2_ag", dhd_conf_compat_vht},
+ {BCM43751_CHIP_ID, BCM43752_CHIP_ID, 2, 0, 0,
+ "ap6398sv3", "bcm4359c51a2_ag", dhd_conf_compat_vht},
+#endif
+#ifdef BCMPCIE
+ {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x179F, 0x003C,
+ "ap6398p2", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht},
+ {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003C,
+ "ap6398p2", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht},
+ {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003D,
+ "ap6398pr32", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht},
+ {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003E,
+ "ap6398pv", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht},
+ {BCM43751_D11AX_ID, BCM43752_CHIP_ID, 2, 0x17F9, 0x003F,
+ "ap6398pv3", "bcm4359c51a2_pcie_ag", dhd_conf_compat_vht},
+#endif
+};
+#endif
+
+#ifdef BCMPCIE
+typedef struct chip_cisaddr_map_t {
+ uint chip;
+ uint chiprev;
+ uint start_addr;
+ uint end_addr;
+} chip_cisaddr_map_t;
+const chip_cisaddr_map_t chip_cisaddr_map[] = {
+ /* ChipID Chiprev Start End */
+ {BCM4354_CHIP_ID, 2, 0x0, 0x0},
+ {BCM4356_CHIP_ID, 2, 0x0, 0x0},
+ {BCM4359_CHIP_ID, 9, 0x0, 0x0},
+// {BCM43752_CHIP_ID, 2, 0x18011120, 0x18011177},
+// {BCM4375_CHIP_ID, 5, 0x18011120, 0x18011177},
+};
+#endif
+
+#ifdef DHD_TPUT_PATCH
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+#endif
+
+void
+dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list)
+{
+ CONFIG_TRACE("called\n");
+
+ if (chip_nv_list->m_chip_nv_path_head) {
+ CONFIG_TRACE("Free %p\n", chip_nv_list->m_chip_nv_path_head);
+ kfree(chip_nv_list->m_chip_nv_path_head);
+ chip_nv_list->m_chip_nv_path_head = NULL;
+ }
+ chip_nv_list->count = 0;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+typedef struct cis_tuple_format {
+ uint8 id;
+ uint8 len; /* total length of tag and data */
+ uint8 tag;
+ uint8 data[1];
+} cis_tuple_format_t;
+#define SBSDIO_CIS_SIZE_LIMIT 0x200
+#define SBSDIO_TUPLE_SIZE_LIMIT 0xff
+#define CIS_TUPLE_ID_BRCM 0x80
+#define CIS_TUPLE_TAG_MACADDR 0x19
+#define CIS_TUPLE_ID_AMPAK 0x8e
+#define CIS_TUPLE_TAG_MODULE 0x41
+#define CIS_TUPLE_LENGTH 1
+#define CIS_TUPLE_HDR_LEN 2
+#endif
+
+#ifdef BCMSDIO
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+void
+dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih)
+{
+ uint32 gpiocontrol, addr;
+
+ if (CHIPID(sih->chip) == BCM43362_CHIP_ID) {
+ CONFIG_MSG("Enable HW OOB for 43362\n");
+ addr = SI_ENUM_BASE(sih) + OFFSETOF(chipcregs_t, gpiocontrol);
+ gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);
+ gpiocontrol |= 0x2;
+ bcmsdh_reg_write(sdh, addr, 4, gpiocontrol);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL);
+ }
+}
+#endif
+
+void
+dhd_conf_get_otp(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih)
+{
+ int i, err = -1;
+ uint8 *cis, *ptr = 0;
+ uint8 mac_header[3] = {0x80, 0x07, 0x19};
+ cis_tuple_format_t *tuple;
+ int totlen, len;
+
+ if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ CONFIG_ERROR("cis malloc failed\n");
+ }
+ bzero(cis, SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) {
+ CONFIG_ERROR("cis read err %d\n", err);
+ MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+ return;
+ }
+ tuple = (cis_tuple_format_t *)cis;
+ totlen = SBSDIO_CIS_SIZE_LIMIT;
+ if (config_msg_level & CONFIG_TRACE_LEVEL) {
+ prhex("CIS", &tuple->id, totlen);
+ }
+ while (totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) {
+ len = tuple->len;
+ if ((config_msg_level & CONFIG_TRACE_LEVEL) && tuple->id) {
+ prhex("TPL", &tuple->id, tuple->len + CIS_TUPLE_HDR_LEN);
+ }
+ if (tuple->id == 0xff || tuple->len == 0xff)
+ break;
+ if ((tuple->id == CIS_TUPLE_ID_BRCM) &&
+ (tuple->tag == CIS_TUPLE_TAG_MACADDR) &&
+ (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
+ memcpy(&dhd->conf->otp_mac, tuple->data, ETHER_ADDR_LEN);
+ }
+#ifdef GET_OTP_MODULE_NAME
+ else if (tuple->id == CIS_TUPLE_ID_AMPAK && (tuple->len) &&
+ tuple->tag == CIS_TUPLE_TAG_MODULE) {
+ int len = tuple->len - 1;
+ if (len <= sizeof(dhd->conf->module_name) - 1) {
+ strncpy(dhd->conf->module_name, tuple->data, len);
+ CONFIG_MSG("module_name=%s\n", dhd->conf->module_name);
+ } else {
+ CONFIG_ERROR("len is too long %d >= %d\n",
+ len, (int)sizeof(dhd->conf->module_name) - 1);
+ }
+ }
+#endif
+ tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
+ totlen -= (len + CIS_TUPLE_HDR_LEN);
+ }
+
+ if (!memcmp(&ether_null, &dhd->conf->otp_mac, ETHER_ADDR_LEN)) {
+ ptr = cis;
+ /* Special OTP */
+ if (bcmsdh_reg_read(sdh, SI_ENUM_BASE(sih), 4) == 0x16044330) {
+ for (i=0; i<SBSDIO_CIS_SIZE_LIMIT; i++) {
+ if (!memcmp(mac_header, ptr, 3)) {
+ memcpy(&dhd->conf->otp_mac, ptr+3, ETHER_ADDR_LEN);
+ break;
+ }
+ ptr++;
+ }
+ }
+ }
+
+ ASSERT(cis);
+ MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+}
+
+#ifdef SET_FWNV_BY_MAC
+void
+dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list)
+{
+ int i;
+
+ CONFIG_TRACE("called\n");
+ if (mac_list->m_mac_list_head) {
+ for (i=0; i<mac_list->count; i++) {
+ if (mac_list->m_mac_list_head[i].mac) {
+ CONFIG_TRACE("Free mac %p\n", mac_list->m_mac_list_head[i].mac);
+ kfree(mac_list->m_mac_list_head[i].mac);
+ }
+ }
+ CONFIG_TRACE("Free m_mac_list_head %p\n", mac_list->m_mac_list_head);
+ kfree(mac_list->m_mac_list_head);
+ }
+ mac_list->count = 0;
+}
+
+void
+dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, char *fw_path)
+{
+ int i, j;
+ uint8 *mac = (uint8 *)&dhd->conf->otp_mac;
+ int fw_num=0, mac_num=0;
+ uint32 oui, nic;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ int fw_type, fw_type_new;
+ char *name_ptr;
+
+ mac_list = dhd->conf->fw_by_mac.m_mac_list_head;
+ fw_num = dhd->conf->fw_by_mac.count;
+ if (!mac_list || !fw_num)
+ return;
+
+ oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);
+ nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);
+
+ /* find out the last '/' */
+ i = strlen(fw_path);
+ while (i > 0) {
+ if (fw_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &fw_path[i];
+
+ if (strstr(name_ptr, "_apsta"))
+ fw_type = FW_TYPE_APSTA;
+ else if (strstr(name_ptr, "_p2p"))
+ fw_type = FW_TYPE_P2P;
+ else if (strstr(name_ptr, "_mesh"))
+ fw_type = FW_TYPE_MESH;
+ else if (strstr(name_ptr, "_ezmesh"))
+ fw_type = FW_TYPE_EZMESH;
+ else if (strstr(name_ptr, "_es"))
+ fw_type = FW_TYPE_ES;
+ else if (strstr(name_ptr, "_mfg"))
+ fw_type = FW_TYPE_MFG;
+ else
+ fw_type = FW_TYPE_STA;
+
+ for (i=0; i<fw_num; i++) {
+ mac_num = mac_list[i].count;
+ mac_range = mac_list[i].mac;
+ if (strstr(mac_list[i].name, "_apsta"))
+ fw_type_new = FW_TYPE_APSTA;
+ else if (strstr(mac_list[i].name, "_p2p"))
+ fw_type_new = FW_TYPE_P2P;
+ else if (strstr(mac_list[i].name, "_mesh"))
+ fw_type_new = FW_TYPE_MESH;
+ else if (strstr(mac_list[i].name, "_ezmesh"))
+ fw_type_new = FW_TYPE_EZMESH;
+ else if (strstr(mac_list[i].name, "_es"))
+ fw_type_new = FW_TYPE_ES;
+ else if (strstr(mac_list[i].name, "_mfg"))
+ fw_type_new = FW_TYPE_MFG;
+ else
+ fw_type_new = FW_TYPE_STA;
+ if (fw_type != fw_type_new) {
+ CONFIG_MSG("fw_typ=%d != fw_type_new=%d\n", fw_type, fw_type_new);
+ continue;
+ }
+ for (j=0; j<mac_num; j++) {
+ if (oui == mac_range[j].oui) {
+ if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {
+ strcpy(name_ptr, mac_list[i].name);
+ CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic);
+ CONFIG_MSG("fw_path=%s\n", fw_path);
+ return;
+ }
+ }
+ }
+ }
+}
+
+void
+dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, char *nv_path)
+{
+ int i, j;
+ uint8 *mac = (uint8 *)&dhd->conf->otp_mac;
+ int nv_num=0, mac_num=0;
+ uint32 oui, nic;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ char *pnv_name;
+
+ mac_list = dhd->conf->nv_by_mac.m_mac_list_head;
+ nv_num = dhd->conf->nv_by_mac.count;
+ if (!mac_list || !nv_num)
+ return;
+
+ oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);
+ nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);
+
+ /* find out the last '/' */
+ i = strlen(nv_path);
+ while (i > 0) {
+ if (nv_path[i] == '/') break;
+ i--;
+ }
+ pnv_name = &nv_path[i+1];
+
+ for (i=0; i<nv_num; i++) {
+ mac_num = mac_list[i].count;
+ mac_range = mac_list[i].mac;
+ for (j=0; j<mac_num; j++) {
+ if (oui == mac_range[j].oui) {
+ if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {
+ strcpy(pnv_name, mac_list[i].name);
+ CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic);
+ CONFIG_MSG("nv_path=%s\n", nv_path);
+ return;
+ }
+ }
+ }
+ }
+}
+#endif
+#endif
+
+#ifdef BCMPCIE
+static int
+dhd_conf_read_otp_from_bp(si_t *sih, uint32 *data_buf,
+ uint32 cis_start_addr, uint32 cis_max_cnt)
+{
+ int int_val = 0, i = 0, bp_idx = 0;
+ int boardtype_backplane_addr[] = {
+ 0x18010324, /* OTP Control 1 */
+ 0x18012618, /* PMU min resource mask */
+ };
+ int boardtype_backplane_data[] = {
+ 0x00fa0000,
+ 0x0e4fffff /* Keep on ARMHTAVAIL */
+ };
+ uint32 org_boardtype_backplane_data[] = {
+ 0,
+ 0
+ };
+
+ for (bp_idx=0; bp_idx<ARRAYSIZE(boardtype_backplane_addr); bp_idx++) {
+ /* Read OTP Control 1 and PMU min_rsrc_mask before writing */
+ if (si_backplane_access(sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &org_boardtype_backplane_data[bp_idx], TRUE) != BCME_OK) {
+ CONFIG_ERROR("invalid size/addr combination\n");
+ return BCME_ERROR;
+ }
+
+ /* Write new OTP and PMU configuration */
+ if (si_backplane_access(sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &boardtype_backplane_data[bp_idx], FALSE) != BCME_OK) {
+ CONFIG_ERROR("invalid size/addr combination\n");
+ return BCME_ERROR;
+ }
+
+ if (si_backplane_access(sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &int_val, TRUE) != BCME_OK) {
+ CONFIG_ERROR("invalid size/addr combination\n");
+ return BCME_ERROR;
+ }
+
+ CONFIG_TRACE("boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
+ boardtype_backplane_addr[bp_idx], int_val);
+ }
+
+ /* read tuple raw data */
+ for (i=0; i<cis_max_cnt; i++) {
+ if (si_backplane_access(sih, cis_start_addr + i * sizeof(uint32),
+ sizeof(uint32), &data_buf[i], TRUE) != BCME_OK) {
+ break;
+ }
+ CONFIG_TRACE("tuple index %d, raw data 0x%08x\n", i, data_buf[i]);
+ }
+
+ for (bp_idx=0; bp_idx<ARRAYSIZE(boardtype_backplane_addr); bp_idx++) {
+ /* Write original OTP and PMU configuration */
+ if (si_backplane_access(sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &org_boardtype_backplane_data[bp_idx], FALSE) != BCME_OK) {
+ CONFIG_ERROR("invalid size/addr combination\n");
+ return BCME_ERROR;
+ }
+
+ if (si_backplane_access(sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &int_val, TRUE) != BCME_OK) {
+ CONFIG_ERROR("invalid size/addr combination\n");
+ return BCME_ERROR;
+ }
+
+ CONFIG_TRACE("boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
+ boardtype_backplane_addr[bp_idx], int_val);
+ }
+
+ return i * sizeof(uint32);
+}
+
+int
+dhd_conf_get_otp(dhd_pub_t *dhd, si_t *sih)
+{
+ int totlen, len;
+ uint32 *raw_data = NULL;
+ cis_tuple_format_t *tuple;
+ uint32 cis_start_addr = 0, cis_end_addr = 0, cis_max_cnt;
+ uint chip, chiprev;
+ int i, ret = BCME_OK;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ for (i=0; i<sizeof(chip_cisaddr_map)/sizeof(chip_cisaddr_map[0]); i++) {
+ const chip_cisaddr_map_t* row = &chip_cisaddr_map[i];
+ if (row->chip == chip && row->chiprev == chiprev) {
+ cis_start_addr = row->start_addr;
+ cis_end_addr = row->end_addr;
+ }
+ }
+
+ if (!cis_start_addr || !cis_end_addr) {
+ CONFIG_TRACE("no matched chip\n");
+ goto exit;
+ }
+ cis_max_cnt = (cis_end_addr - cis_start_addr + 1) / sizeof(uint32);
+
+ raw_data = kmalloc(cis_max_cnt, GFP_KERNEL);
+ if (raw_data == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", cis_max_cnt);
+ goto exit;
+ }
+
+ totlen = dhd_conf_read_otp_from_bp(sih, raw_data, cis_start_addr, cis_max_cnt);
+ if (totlen == BCME_ERROR || totlen == 0) {
+ CONFIG_ERROR("Can't read the OTP\n");
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ tuple = (cis_tuple_format_t *)raw_data;
+
+ if (config_msg_level & CONFIG_TRACE_LEVEL) {
+ CONFIG_TRACE("start: 0x%x, end: 0x%x, totlen: %d\n",
+ cis_start_addr, cis_end_addr, totlen);
+ prhex("CIS", &tuple->id, totlen);
+ }
+
+ /* check the first tuple has tag 'start' */
+ if (tuple->id != CIS_TUPLE_ID_BRCM) {
+ CONFIG_ERROR("Can not find the TAG\n");
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* find tagged parameter */
+ while (totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) {
+ len = tuple->len;
+ if ((config_msg_level & CONFIG_TRACE_LEVEL) && tuple->id) {
+ prhex("TPL", &tuple->id, tuple->len+CIS_TUPLE_HDR_LEN);
+ }
+ if ((tuple->id == CIS_TUPLE_ID_BRCM) &&
+ (tuple->tag == CIS_TUPLE_TAG_MACADDR) &&
+ (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
+ memcpy(&dhd->conf->otp_mac, tuple->data, ETHER_ADDR_LEN);
+ }
+ tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
+ totlen -= (len + CIS_TUPLE_HDR_LEN);
+ }
+
+exit:
+ if(raw_data)
+ kfree(raw_data);
+ return ret;
+}
+
+bool
+dhd_conf_legacy_msi_chip(dhd_pub_t *dhd)
+{
+ uint chip;
+
+ chip = dhd->conf->chip;
+
+ if (chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
+ chip == BCM4371_CHIP_ID ||
+ chip == BCM4359_CHIP_ID) {
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+void
+dhd_conf_free_country_list(struct dhd_conf *conf)
+{
+ country_list_t *country = conf->country_head;
+ int count = 0;
+
+ CONFIG_TRACE("called\n");
+ while (country) {
+ CONFIG_TRACE("Free cspec %s\n", country->cspec.country_abbrev);
+ conf->country_head = country->next;
+ kfree(country);
+ country = conf->country_head;
+ count++;
+ }
+ CONFIG_TRACE("%d country released\n", count);
+}
+
+void
+dhd_conf_free_mchan_list(struct dhd_conf *conf)
+{
+ mchan_params_t *mchan = conf->mchan;
+ int count = 0;
+
+ CONFIG_TRACE("called\n");
+ while (mchan) {
+ CONFIG_TRACE("Free cspec %p\n", mchan);
+ conf->mchan = mchan->next;
+ kfree(mchan);
+ mchan = conf->mchan;
+ count++;
+ }
+ CONFIG_TRACE("%d mchan released\n", count);
+}
+
+const chip_name_map_t*
+dhd_conf_match_chip(dhd_pub_t *dhd, uint ag_type)
+{
+ uint chip, chiprev;
+ int i;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ for (i=0; i<sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {
+ const chip_name_map_t* row = &chip_name_map[i];
+ if (row->chip == chip && row->chiprev == chiprev &&
+ (row->ag_type == ag_type ||
+ ag_type == DONT_CARE || row->ag_type == DONT_CARE)) {
+ return row;
+ }
+ }
+
+ return NULL;
+}
+
+#ifdef UPDATE_MODULE_NAME
+const module_name_map_t*
+dhd_conf_match_module(dhd_pub_t *dhd)
+{
+ uint devid, chip, chiprev;
+#ifdef BCMPCIE
+ uint svid, ssid;
+#endif
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ int i;
+#endif
+
+ devid = dhd->conf->devid;
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+#ifdef BCMPCIE
+ svid = dhd->conf->svid;
+ ssid = dhd->conf->ssid;
+#endif
+
+#ifdef BCMSDIO
+ for (i=0; i<sizeof(module_name_map)/sizeof(module_name_map[0]); i++) {
+ const module_name_map_t* row = &module_name_map[i];
+ if (row->devid == devid && row->chip == chip && row->chiprev == chiprev &&
+ !strcmp(row->module_name, dhd->conf->module_name)) {
+ return row;
+ }
+ }
+#endif
+
+#ifdef BCMPCIE
+ for (i=0; i<sizeof(module_name_map)/sizeof(module_name_map[0]); i++) {
+ const module_name_map_t* row = &module_name_map[i];
+ if (row->devid == devid && row->chip == chip && row->chiprev == chiprev &&
+ row->svid == svid && row->ssid == ssid) {
+ return row;
+ }
+ }
+#endif
+
+ return NULL;
+}
+#endif
+
+int
+dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)
+{
+#ifdef UPDATE_MODULE_NAME
+ const module_name_map_t* row_module = NULL;
+#endif
+ const chip_name_map_t* row_chip = NULL;
+ int fw_type, ag_type;
+ uint chip, chiprev;
+ char *name_ptr;
+ int i;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (fw_path[0] == '\0') {
+#ifdef CONFIG_BCMDHD_FW_PATH
+ bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
+ if (fw_path[0] == '\0')
+#endif
+ {
+ CONFIG_MSG("firmware path is null\n");
+ return 0;
+ }
+ }
+#ifndef FW_PATH_AUTO_SELECT
+ return DONT_CARE;
+#endif
+
+ /* find out the last '/' */
+ i = strlen(fw_path);
+ while (i > 0) {
+ if (fw_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &fw_path[i];
+#ifdef BAND_AG
+ ag_type = FW_TYPE_AG;
+#else
+ ag_type = strstr(name_ptr, "_ag") ? FW_TYPE_AG : FW_TYPE_G;
+#endif
+ if (strstr(name_ptr, "_apsta"))
+ fw_type = FW_TYPE_APSTA;
+ else if (strstr(name_ptr, "_p2p"))
+ fw_type = FW_TYPE_P2P;
+ else if (strstr(name_ptr, "_mesh"))
+ fw_type = FW_TYPE_MESH;
+ else if (strstr(name_ptr, "_ezmesh"))
+ fw_type = FW_TYPE_EZMESH;
+ else if (strstr(name_ptr, "_es"))
+ fw_type = FW_TYPE_ES;
+ else if (strstr(name_ptr, "_mfg"))
+ fw_type = FW_TYPE_MFG;
+ else if (strstr(name_ptr, "_minime"))
+ fw_type = FW_TYPE_MINIME;
+ else
+ fw_type = FW_TYPE_STA;
+#ifdef WLEASYMESH
+ if (dhd->conf->fw_type == FW_TYPE_EZMESH)
+ fw_type = FW_TYPE_EZMESH;
+#endif /* WLEASYMESH */
+
+ row_chip = dhd_conf_match_chip(dhd, ag_type);
+ if (row_chip && strlen(row_chip->chip_name)) {
+ strcpy(name_ptr, "fw_");
+ strcat(name_ptr, row_chip->chip_name);
+#ifdef BCMUSBDEV_COMPOSITE
+ strcat(name_ptr, "_cusb");
+#endif
+ if (fw_type == FW_TYPE_APSTA)
+ strcat(name_ptr, "_apsta.bin");
+ else if (fw_type == FW_TYPE_P2P)
+ strcat(name_ptr, "_p2p.bin");
+ else if (fw_type == FW_TYPE_MESH)
+ strcat(name_ptr, "_mesh.bin");
+ else if (fw_type == FW_TYPE_EZMESH)
+ strcat(name_ptr, "_ezmesh.bin");
+ else if (fw_type == FW_TYPE_ES)
+ strcat(name_ptr, "_es.bin");
+ else if (fw_type == FW_TYPE_MFG)
+ strcat(name_ptr, "_mfg.bin");
+ else if (fw_type == FW_TYPE_MINIME)
+ strcat(name_ptr, "_minime.bin");
+ else
+ strcat(name_ptr, ".bin");
+ }
+
+#ifdef UPDATE_MODULE_NAME
+ row_module = dhd_conf_match_module(dhd);
+ if (row_module && strlen(row_module->chip_name)) {
+ strcpy(name_ptr, "fw_");
+ strcat(name_ptr, row_module->chip_name);
+#ifdef BCMUSBDEV_COMPOSITE
+ strcat(name_ptr, "_cusb");
+#endif
+ if (fw_type == FW_TYPE_APSTA)
+ strcat(name_ptr, "_apsta.bin");
+ else if (fw_type == FW_TYPE_P2P)
+ strcat(name_ptr, "_p2p.bin");
+ else if (fw_type == FW_TYPE_MESH)
+ strcat(name_ptr, "_mesh.bin");
+ else if (fw_type == FW_TYPE_EZMESH)
+ strcat(name_ptr, "_ezmesh.bin");
+ else if (fw_type == FW_TYPE_ES)
+ strcat(name_ptr, "_es.bin");
+ else if (fw_type == FW_TYPE_MFG)
+ strcat(name_ptr, "_mfg.bin");
+ else if (fw_type == FW_TYPE_MINIME)
+ strcat(name_ptr, "_minime.bin");
+ else
+ strcat(name_ptr, ".bin");
+ }
+#endif
+
+ dhd->conf->fw_type = fw_type;
+
+#ifndef MINIME
+ if (fw_type == FW_TYPE_MINIME)
+ CONFIG_ERROR("***** Please enable MINIME in Makefile *****\n");
+#endif
+
+ CONFIG_TRACE("firmware_path=%s\n", fw_path);
+ return ag_type;
+}
+
+void
+dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path, int ag_type)
+{
+#ifdef UPDATE_MODULE_NAME
+ const module_name_map_t* row_module = NULL;
+#endif
+ const chip_name_map_t* row_chip = NULL;
+ uint chip, chiprev;
+ char *name_ptr;
+ int i;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (clm_path[0] == '\0') {
+ CONFIG_MSG("clm path is null\n");
+ return;
+ }
+
+ /* find out the last '/' */
+ i = strlen(clm_path);
+ while (i > 0) {
+ if (clm_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &clm_path[i];
+
+ row_chip = dhd_conf_match_chip(dhd, ag_type);
+ if (row_chip && strlen(row_chip->chip_name)) {
+ strcpy(name_ptr, "clm_");
+ strcat(name_ptr, row_chip->chip_name);
+ strcat(name_ptr, ".blob");
+ }
+
+#ifdef UPDATE_MODULE_NAME
+ row_module = dhd_conf_match_module(dhd);
+ if (row_module && strlen(row_module->chip_name)) {
+ strcpy(name_ptr, "clm_");
+ strcat(name_ptr, row_module->chip_name);
+ strcat(name_ptr, ".blob");
+ }
+#endif
+
+ CONFIG_TRACE("clm_path=%s\n", clm_path);
+}
+
+void
+dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path, int ag_type)
+{
+#if defined(BCMPCIE) && defined(UPDATE_MODULE_NAME)
+ const module_name_map_t* row_module = NULL;
+#endif
+ const chip_name_map_t* row_chip = NULL;
+ uint chip, chiprev;
+ char *name_ptr, nv_name[32];
+ int i;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (nv_path[0] == '\0') {
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+ if (nv_path[0] == '\0')
+#endif
+ {
+ CONFIG_MSG("nvram path is null\n");
+ return;
+ }
+ }
+
+ /* find out the last '/' */
+ i = strlen(nv_path);
+ while (i > 0) {
+ if (nv_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &nv_path[i];
+
+ row_chip = dhd_conf_match_chip(dhd, ag_type);
+ if (row_chip && strlen(row_chip->module_name)) {
+ strcpy(name_ptr, "nvram_");
+ strcat(name_ptr, row_chip->module_name);
+#ifdef BCMUSBDEV_COMPOSITE
+ strcat(name_ptr, "_cusb");
+#endif
+ strcat(name_ptr, ".txt");
+ }
+ strcpy(nv_name, name_ptr);
+
+#if defined(BCMSDIO) && defined(GET_OTP_MODULE_NAME)
+ if (strlen(dhd->conf->module_name)) {
+ strcpy(name_ptr, "nvram_");
+ strcat(name_ptr, dhd->conf->module_name);
+ strcat(name_ptr, ".txt");
+#ifdef COMPAT_OLD_MODULE
+ if (dhd->conf->chip == BCM4359_CHIP_ID) {
+ struct file *fp;
+ // compatible for AP6398S and AP6398SA
+ fp = filp_open(nv_path, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ strcpy(name_ptr, nv_name);
+ } else {
+ filp_close((struct file *)fp, NULL);
+ }
+ }
+#endif
+ }
+#endif
+
+#if defined(BCMPCIE) && defined(UPDATE_MODULE_NAME)
+ row_module = dhd_conf_match_module(dhd);
+ if (row_module && strlen(row_module->module_name)) {
+ strcpy(name_ptr, "nvram_");
+ strcat(name_ptr, row_module->module_name);
+ strcat(name_ptr, ".txt");
+ }
+#endif
+
+ for (i=0; i<dhd->conf->nv_by_chip.count; i++) {
+ if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip &&
+ chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) {
+ strcpy(name_ptr, dhd->conf->nv_by_chip.m_chip_nv_path_head[i].name);
+ break;
+ }
+ }
+
+ CONFIG_TRACE("nvram_path=%s\n", nv_path);
+}
+
+void
+dhd_conf_copy_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path)
+{
+ int i;
+
+ if (src_path[0] == '\0') {
+ CONFIG_MSG("src_path is null\n");
+ return;
+ } else
+ strcpy(dst_path, src_path);
+
+ /* find out the last '/' */
+ i = strlen(dst_path);
+ while (i > 0) {
+ if (dst_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ strcpy(&dst_path[i], dst_name);
+
+ CONFIG_TRACE("dst_path=%s\n", dst_path);
+}
+
+#ifdef CONFIG_PATH_AUTO_SELECT
+void
+dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path)
+{
+#ifdef UPDATE_MODULE_NAME
+ const module_name_map_t* row_module = NULL;
+#endif
+ const chip_name_map_t* row_chip = NULL;
+ uint chip, chiprev;
+ char *name_ptr;
+ int i;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (conf_path[0] == '\0') {
+ CONFIG_MSG("config path is null\n");
+ return;
+ }
+
+ /* find out the last '/' */
+ i = strlen(conf_path);
+ while (i > 0) {
+ if (conf_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &conf_path[i];
+
+ row_chip = dhd_conf_match_chip(dhd, DONT_CARE);
+ if (row_chip && strlen(row_chip->chip_name)) {
+ strcpy(name_ptr, "config_");
+ strcat(name_ptr, row_chip->chip_name);
+ strcat(name_ptr, ".txt");
+ }
+
+#ifdef UPDATE_MODULE_NAME
+ row_module = dhd_conf_match_module(dhd);
+ if (row_module && strlen(row_module->chip_name)) {
+ strcpy(name_ptr, "config_");
+ strcat(name_ptr, row_module->chip_name);
+ strcat(name_ptr, ".txt");
+ }
+#endif
+
+ CONFIG_TRACE("config_path=%s\n", conf_path);
+}
+#endif
+
+#ifdef TPUT_MONITOR
+void
+dhd_conf_tput_monitor(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ if (conf->tput_monitor_ms && conf->data_drop_mode >= FW_DROP) {
+ if (conf->tput_ts.tv_sec == 0 && conf->tput_ts.tv_nsec == 0) {
+ osl_do_gettimeofday(&conf->tput_ts);
+ } else {
+ struct osl_timespec cur_ts;
+ int32 tput_tx = 0, tput_rx = 0, tput_tx_kb = 0,
+ tput_rx_kb = 0, tput_net = 0, tput_net_kb = 0;
+ uint32 diff_ms;
+ unsigned long diff_bytes;
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, &conf->tput_ts)/1000;
+ if (diff_ms >= conf->tput_monitor_ms) {
+ diff_bytes = dhd->dstats.tx_bytes - conf->last_tx;
+ tput_tx = (int32)((diff_bytes/1024/1024)*8)*1000/diff_ms;
+ if (tput_tx == 0) {
+ tput_tx = (int32)(diff_bytes*8/1024/1024)*1000/diff_ms;
+ tput_tx_kb = (int32)(diff_bytes*8*1000/1024)/diff_ms;
+ tput_tx_kb = tput_tx_kb % 1000;
+ }
+ diff_bytes = dhd->dstats.rx_bytes - conf->last_rx;
+ tput_rx = (int32)((diff_bytes/1024/1024)*8)*1000/diff_ms;
+ if (tput_rx == 0) {
+ tput_rx = (int32)(diff_bytes*8/1024/1024)*1000/diff_ms;
+ tput_rx_kb = (int32)(diff_bytes*8*1000/1024)/diff_ms;
+ tput_rx_kb = tput_tx_kb % 1000;
+ }
+ diff_bytes = conf->net_len - conf->last_net_tx;
+ tput_net = (int32)((diff_bytes/1024/1024)*8)*1000/diff_ms;
+ if (tput_net == 0) {
+ tput_net = (int32)(diff_bytes*8/1024/1024)*1000/diff_ms;
+ tput_net_kb = (int32)(diff_bytes*8*1000/1024)/diff_ms;
+ tput_net_kb = tput_net_kb % 1000;
+ }
+ conf->last_tx = dhd->dstats.tx_bytes;
+ conf->last_rx = dhd->dstats.rx_bytes;
+ conf->last_net_tx = conf->net_len;
+ memcpy(&conf->tput_ts, &cur_ts, sizeof(struct osl_timespec));
+ CONFIG_TRACE("xmit=%3d.%d%d%d Mbps, tx=%3d.%d%d%d Mbps, rx=%3d.%d%d%d Mbps\n",
+ tput_net, (tput_net_kb/100)%10, (tput_net_kb/10)%10, (tput_net_kb)%10,
+ tput_tx, (tput_tx_kb/100)%10, (tput_tx_kb/10)%10, (tput_tx_kb)%10,
+ tput_rx, (tput_rx_kb/100)%10, (tput_rx_kb/10)%10, (tput_rx_kb)%10);
+ }
+ }
+ }
+}
+#endif
+
+#ifdef DHD_TPUT_PATCH
+void
+dhd_conf_set_tput_patch(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ if (conf->tput_patch) {
+ conf->mtu = 1500;
+ conf->pktsetsum = TRUE;
+#ifdef BCMSDIO
+ conf->dhd_dpc_prio = 98;
+/* need to check if CPU can support multi-core first,
+ * so don't enable it by default.
+ */
+// conf->dpc_cpucore = 2;
+// conf->rxf_cpucore = 3;
+// conf->disable_proptx = 1;
+ conf->frameburst = 1;
+#ifdef DYNAMIC_MAX_HDR_READ
+ conf->max_hdr_read = 256;
+ firstread = 256;
+#endif /* DYNAMIC_MAX_HDR_READ */
+ dhd_rxbound = 512;
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+#if defined(SET_XPS_CPUS)
+ conf->xps_cpus = TRUE;
+#endif /* SET_XPS_CPUS */
+#if defined(SET_RPS_CPUS)
+ conf->rps_cpus = TRUE;
+#endif /* SET_RPS_CPUS */
+ conf->orphan_move = 3;
+ conf->flow_ring_queue_threshold = 2048;
+#endif /* BCMPCIE */
+#ifdef DHDTCPACK_SUPPRESS
+ conf->tcpack_sup_ratio = 15;
+ conf->tcpack_sup_delay = 10;
+#endif /* DHDTCPACK_SUPPRESS */
+ }
+ else {
+ conf->mtu = 0;
+ conf->pktsetsum = FALSE;
+#ifdef BCMSDIO
+ conf->dhd_dpc_prio = -1;
+ conf->disable_proptx = -1;
+ conf->frameburst = 1;
+#ifdef DYNAMIC_MAX_HDR_READ
+ conf->max_hdr_read = 0;
+ firstread = 32;
+#endif /* DYNAMIC_MAX_HDR_READ */
+ dhd_rxbound = 128;
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+#if defined(SET_XPS_CPUS)
+ conf->xps_cpus = FALSE;
+#endif /* SET_XPS_CPUS */
+#if defined(SET_RPS_CPUS)
+ conf->rps_cpus = FALSE;
+#endif /* SET_RPS_CPUS */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ conf->orphan_move = 1;
+#else
+ conf->orphan_move = 0;
+#endif
+ conf->flow_ring_queue_threshold = 2048;
+#endif /* BCMPCIE */
+#ifdef DHDTCPACK_SUPPRESS
+ conf->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO;
+ conf->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME;
+#endif /* DHDTCPACK_SUPPRESS */
+ }
+}
+
+void
+dhd_conf_dump_tput_patch(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ CONFIG_TRACE("tput_patch=%d\n", conf->tput_patch);
+ CONFIG_TRACE("mtu=%d\n", conf->mtu);
+ CONFIG_TRACE("pktsetsum=%d\n", conf->pktsetsum);
+ CONFIG_TRACE("orphan_move=%d\n", conf->orphan_move);
+#ifdef DHDTCPACK_SUPPRESS
+ CONFIG_TRACE("tcpack_sup_ratio=%d\n", conf->tcpack_sup_ratio);
+ CONFIG_TRACE("tcpack_sup_delay=%d\n", conf->tcpack_sup_delay);
+#endif
+
+#ifdef BCMSDIO
+ CONFIG_TRACE("dhd_dpc_prio=%d\n", conf->dhd_dpc_prio);
+ CONFIG_TRACE("dhd_poll=%d\n", conf->dhd_poll);
+ CONFIG_TRACE("disable_proptx=%d\n", conf->disable_proptx);
+ CONFIG_TRACE("frameburst=%d\n", conf->frameburst);
+#ifdef DYNAMIC_MAX_HDR_READ
+ CONFIG_TRACE("max_hdr_read=%d\n", conf->max_hdr_read);
+ CONFIG_TRACE("firstread=%d\n", firstread);
+#endif
+ CONFIG_TRACE("dhd_rxbound=%d\n", dhd_rxbound);
+#endif
+
+#ifdef BCMPCIE
+ CONFIG_TRACE("flow_ring_queue_threshold=%d\n", conf->flow_ring_queue_threshold);
+#endif
+
+#if defined(SET_XPS_CPUS)
+ CONFIG_TRACE("xps_cpus=%d\n", conf->xps_cpus);
+#endif
+#if defined(SET_RPS_CPUS)
+ CONFIG_TRACE("rps_cpus=%d\n", conf->rps_cpus);
+#endif
+
+}
+#endif /* DHD_TPUT_PATCH */
+
+void
+dhd_conf_set_path_params(dhd_pub_t *dhd, char *fw_path, char *nv_path)
+{
+ int ag_type;
+
+ /* External conf takes precedence if specified */
+ dhd_conf_preinit(dhd);
+
+ if (dhd->conf_path[0] == '\0') {
+ dhd_conf_copy_path(dhd, "config.txt", dhd->conf_path, nv_path);
+ }
+ if (dhd->clm_path[0] == '\0') {
+ dhd_conf_copy_path(dhd, "clm.blob", dhd->clm_path, fw_path);
+ }
+#ifdef CONFIG_PATH_AUTO_SELECT
+ dhd_conf_set_conf_name_by_chip(dhd, dhd->conf_path);
+#endif
+
+ dhd_conf_read_config(dhd, dhd->conf_path);
+#ifdef DHD_TPUT_PATCH
+ dhd_conf_dump_tput_patch(dhd);
+#endif
+
+ ag_type = dhd_conf_set_fw_name_by_chip(dhd, fw_path);
+ dhd_conf_set_nv_name_by_chip(dhd, nv_path, ag_type);
+ dhd_conf_set_clm_name_by_chip(dhd, dhd->clm_path, ag_type);
+#ifdef SET_FWNV_BY_MAC
+ dhd_conf_set_fw_name_by_mac(dhd, fw_path);
+ dhd_conf_set_nv_name_by_mac(dhd, nv_path);
+#endif
+
+ CONFIG_MSG("Final fw_path=%s\n", fw_path);
+ CONFIG_MSG("Final nv_path=%s\n", nv_path);
+ CONFIG_MSG("Final clm_path=%s\n", dhd->clm_path);
+ CONFIG_MSG("Final conf_path=%s\n", dhd->conf_path);
+}
+
+int
+dhd_conf_set_intiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name, int val,
+ int def, bool down)
+{
+ int ret = -1;
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+
+ if (val >= def) {
+ if (down) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)
+ CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret);
+ }
+ if (cmd == WLC_SET_VAR) {
+ CONFIG_TRACE("set %s %d\n", name, val);
+ bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ CONFIG_ERROR("%s setting failed %d\n", name, ret);
+ } else {
+ CONFIG_TRACE("set %s %d %d\n", name, cmd, val);
+ if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0)
+ CONFIG_ERROR("%s setting failed %d\n", name, ret);
+ }
+ }
+
+ return ret;
+}
+
+static int
+dhd_conf_set_bufiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name,
+ char *buf, int len, bool down)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ s32 iovar_len;
+ int ret = -1;
+
+ if (down) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, ifidx)) < 0)
+ CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret);
+ }
+
+ if (cmd == WLC_SET_VAR) {
+ iovar_len = bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf));
+ if (iovar_len > 0)
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx);
+ else
+ ret = BCME_BUFTOOSHORT;
+ if (ret < 0)
+ CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len);
+ } else {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, ifidx)) < 0)
+ CONFIG_ERROR("%s setting failed %d\n", name, ret);
+ }
+
+ return ret;
+}
+
+static int
+dhd_conf_iovar_buf(dhd_pub_t *dhd, int ifidx, int cmd, char *name,
+ char *buf, int len)
+{
+ char *iovbuf = NULL;
+ int ret = -1, iovbuf_len = WLC_IOCTL_MEDLEN;
+ s32 iovar_len;
+
+ iovbuf = kmalloc(iovbuf_len, GFP_KERNEL);
+ if (iovbuf == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", iovbuf_len);
+ goto exit;
+ }
+
+ if (cmd == WLC_GET_VAR) {
+ if (bcm_mkiovar(name, buf, len, iovbuf, iovbuf_len)) {
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovbuf_len, FALSE, ifidx);
+ if (!ret) {
+ memcpy(buf, iovbuf, len);
+ } else {
+ CONFIG_ERROR("get iovar %s failed %d\n", name, ret);
+ }
+ } else {
+ CONFIG_ERROR("mkiovar %s failed\n", name);
+ }
+ } else if (cmd == WLC_SET_VAR) {
+ iovar_len = bcm_mkiovar(name, buf, len, iovbuf, iovbuf_len);
+ if (iovar_len > 0)
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx);
+ else
+ ret = BCME_BUFTOOSHORT;
+ if (ret < 0)
+ CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len);
+ }
+
+exit:
+ if (iovbuf)
+ kfree(iovbuf);
+ return ret;
+}
+
+static int
+dhd_conf_get_iovar(dhd_pub_t *dhd, int ifidx, int cmd, char *name,
+ char *buf, int len)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+
+ if (cmd == WLC_GET_VAR) {
+ if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx);
+ if (!ret) {
+ memcpy(buf, iovbuf, len);
+ } else {
+ CONFIG_ERROR("get iovar %s failed %d\n", name, ret);
+ }
+ } else {
+ CONFIG_ERROR("mkiovar %s failed\n", name);
+ }
+ } else {
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0);
+ if (ret < 0)
+ CONFIG_ERROR("get iovar %s failed %d\n", name, ret);
+ }
+
+ return ret;
+}
+
+static int
+dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *cmd, char *buf)
+{
+ wl_config_t rsdb_mode_cfg = {1, 0};
+
+ if (buf) {
+ rsdb_mode_cfg.config = (int)simple_strtol(buf, NULL, 0);
+ CONFIG_MSG("rsdb_mode %d\n", rsdb_mode_cfg.config);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, cmd, (char *)&rsdb_mode_cfg,
+ sizeof(rsdb_mode_cfg), TRUE);
+ }
+
+ return 0;
+}
+
+int
+dhd_conf_reg2args(dhd_pub_t *dhd, char *cmd, bool set, uint32 index, uint32 *val)
+{
+ char var[WLC_IOCTL_SMLEN];
+ uint32 int_val, len;
+ void *ptr = NULL;
+ int ret = 0;
+
+ len = sizeof(int_val);
+ int_val = htod32(index);
+ memset(var, 0, sizeof(var));
+ memcpy(var, (char *)&int_val, sizeof(int_val));
+
+ if (set) {
+ int_val = htod32(*val);
+ memcpy(&var[len], (char *)&int_val, sizeof(int_val));
+ len += sizeof(int_val);
+ dhd_conf_iovar_buf(dhd, 0, WLC_SET_VAR, cmd, var, sizeof(var));
+ } else {
+ ret = dhd_conf_iovar_buf(dhd, 0, WLC_GET_VAR, cmd, var, sizeof(var));
+ if (ret < 0)
+ return ret;
+ ptr = var;
+ *val = dtoh32(*(int *)ptr);
+ }
+
+ return ret;
+}
+
+static int
+dhd_conf_btc_params(dhd_pub_t *dhd, char *cmd, char *buf)
+{
+ int ret = BCME_OK;
+ uint32 cur_val;
+ int index = 0, mask = 0, value = 0;
+ // btc_params=[index] [mask] [value]
+ // Ex: btc_params=82 0x0021 0x0001
+
+ if (buf) {
+ sscanf(buf, "%d %x %x", &index, &mask, &value);
+ }
+
+ CONFIG_TRACE("%s%d mask=0x%04x value=0x%04x\n", cmd, index, mask, value);
+
+ ret = dhd_conf_reg2args(dhd, cmd, FALSE, index, &cur_val);
+ CONFIG_TRACE("%s%d = 0x%04x\n", cmd, index, cur_val);
+ cur_val &= (~mask);
+ cur_val |= value;
+
+ // need to WLC_UP before btc_params
+ dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE);
+
+ CONFIG_TRACE("wl %s%d 0x%04x\n", cmd, index, cur_val);
+ ret = dhd_conf_reg2args(dhd, cmd, TRUE, index, &cur_val);
+
+ ret = dhd_conf_reg2args(dhd, cmd, FALSE, index, &cur_val);
+ CONFIG_MSG("%s%d = 0x%04x\n", cmd, index, cur_val);
+
+ return ret;
+}
+
+typedef struct sub_cmd_t {
+ char *name;
+ uint16 id; /* id for the dongle f/w switch/case */
+ uint16 type; /* base type of argument IOVT_XXXX */
+} sub_cmd_t;
+
+/* wl he sub cmd list */
+static const sub_cmd_t he_cmd_list[] = {
+ {"enab", WL_HE_CMD_ENAB, IOVT_UINT8},
+ {"features", WL_HE_CMD_FEATURES, IOVT_UINT32},
+ {"bsscolor", WL_HE_CMD_BSSCOLOR, IOVT_UINT8},
+ {"partialbsscolor", WL_HE_CMD_PARTIAL_BSSCOLOR, IOVT_UINT8},
+ {"cap", WL_HE_CMD_CAP, IOVT_UINT8},
+ {"staid", WL_HE_CMD_STAID, IOVT_UINT16},
+ {"rtsdurthresh", WL_HE_CMD_RTSDURTHRESH, IOVT_UINT16},
+ {"peduration", WL_HE_CMD_PEDURATION, IOVT_UINT8},
+ {"testbed_mode", WL_HE_CMD_TESTBED_MODE, IOVT_UINT32},
+ {"omi_ulmu_throttle", WL_HE_CMD_OMI_ULMU_THROTTLE, IOVT_UINT16},
+ {"omi_dlmu_rr_mpf_map", WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP, IOVT_UINT32},
+ {"ulmu_disable_policy", WL_HE_CMD_ULMU_DISABLE_POLICY, IOVT_UINT8},
+ {"sr_prohibit", WL_HE_CMD_SR_PROHIBIT, IOVT_UINT8},
+};
+
+static uint
+wl_he_iovt2len(uint iovt)
+{
+ switch (iovt) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_UINT8:
+ return sizeof(uint8);
+ case IOVT_INT16:
+ case IOVT_UINT16:
+ return sizeof(uint16);
+ case IOVT_INT32:
+ case IOVT_UINT32:
+ return sizeof(uint32);
+ default:
+ /* ASSERT(0); */
+ return 0;
+ }
+}
+
+static int
+dhd_conf_he_cmd(dhd_pub_t * dhd, char *cmd, char *buf)
+{
+ int ret = BCME_OK, i;
+ bcm_xtlv_t *pxtlv = NULL;
+ uint8 mybuf[128];
+ uint16 he_id = -1, he_len = 0, mybuf_len = sizeof(mybuf);
+ uint32 he_val;
+ const sub_cmd_t *tpl = he_cmd_list;
+ char sub_cmd[32], he_val_str[10];
+
+ if (buf) {
+ sscanf(buf, "%s %s", sub_cmd, he_val_str);
+ }
+
+ for (i=0; i<ARRAY_SIZE(he_cmd_list); i++, tpl++) {
+ if (!strcmp(tpl->name, sub_cmd)) {
+ he_id = tpl->id;
+ he_len = wl_he_iovt2len(tpl->type);
+ break;
+ }
+ }
+ if (he_id < 0) {
+ CONFIG_ERROR("No he id found for %s\n", sub_cmd);
+ return 0;
+ }
+
+ pxtlv = (bcm_xtlv_t *)mybuf;
+
+ if (strlen(he_val_str)) {
+ he_val = simple_strtol(he_val_str, NULL, 0);
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, he_id,
+ he_len, (uint8 *)&he_val, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ CONFIG_ERROR("failed to pack he enab, err: %s\n", bcmerrorstr(ret));
+ return 0;
+ }
+ CONFIG_TRACE("he %s 0x%x\n", sub_cmd, he_val);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, cmd, (char *)&mybuf,
+ sizeof(mybuf), TRUE);
+ }
+
+ return 0;
+}
+
+#ifndef SUPPORT_RANDOM_MAC_SCAN
+int
+dhd_conf_scan_mac(dhd_pub_t * dhd, char *cmd, char *buf)
+{
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
+ wl_scanmac_t *sm = NULL;
+ wl_scanmac_enable_t *sm_enable = NULL;
+ int enable = 0, len = 0, ret = -1;
+ char sub_cmd[32], iovbuf[WLC_IOCTL_SMLEN];
+ s32 iovar_len;
+
+ memset(sub_cmd, 0, sizeof(sub_cmd));
+ if (buf) {
+ sscanf(buf, "%s %d", sub_cmd, &enable);
+ }
+
+ if (!strcmp(sub_cmd, "enable")) {
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = enable;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+ CONFIG_TRACE("scanmac enable %d\n", sm_enable->enable);
+
+ iovar_len = bcm_mkiovar("scanmac", buffer, len, iovbuf, sizeof(iovbuf));
+ if (iovar_len > 0)
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+ else
+ ret = BCME_BUFTOOSHORT;
+ if (ret == BCME_UNSUPPORTED)
+ CONFIG_TRACE("scanmac, UNSUPPORTED\n");
+ else if (ret != BCME_OK)
+ CONFIG_ERROR("%s setting failed %d, len=%d\n", "scanmac", ret, len);
+ }
+ else {
+ CONFIG_ERROR("wrong cmd \"%s %d\"\n", sub_cmd, enable);
+ }
+
+ return 0;
+}
+#endif
+
+typedef int (tpl_parse_t)(dhd_pub_t *dhd, char *name, char *buf);
+
+typedef struct iovar_tpl_t {
+ int cmd;
+ char *name;
+ tpl_parse_t *parse;
+} iovar_tpl_t;
+
+const iovar_tpl_t iovar_tpl_list[] = {
+ {WLC_SET_VAR, "rsdb_mode", dhd_conf_rsdb_mode},
+ {WLC_SET_VAR, "he", dhd_conf_he_cmd},
+ {WLC_SET_VAR, "btc_params", dhd_conf_btc_params},
+#ifndef SUPPORT_RANDOM_MAC_SCAN
+ {WLC_SET_VAR, "scanmac", dhd_conf_scan_mac},
+#endif
+};
+
+static int iovar_tpl_parse(const iovar_tpl_t *tpl, int tpl_count,
+ dhd_pub_t *dhd, int cmd, char *name, char *buf)
+{
+ int i, ret = 0;
+
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if (tpl->cmd == cmd && !strcmp(tpl->name, name))
+ break;
+ }
+ if (i < tpl_count && tpl->parse) {
+ ret = tpl->parse(dhd, name, buf);
+ } else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static bool
+dhd_conf_set_wl_cmd(dhd_pub_t *dhd, char *data, bool down)
+{
+ int cmd, val, ret = 0, len;
+ char name[32], *pch, *pick_tmp, *pick_tmp2, *pdata = NULL;
+
+ /* Process wl_preinit:
+ * wl_preinit=[cmd]=[val], [cmd]=[val]
+ * Ex: wl_preinit=86=0, mpc=0
+ */
+
+ if (data == NULL)
+ return FALSE;
+
+ len = strlen(data);
+ pdata = kmalloc(len+1, GFP_KERNEL);
+ if (pdata == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", len+1);
+ goto exit;
+ }
+ memset(pdata, 0, len+1);
+ strcpy(pdata, data);
+
+ pick_tmp = pdata;
+ while (pick_tmp && (pick_tmp2 = bcmstrtok(&pick_tmp, ",", 0)) != NULL) {
+ char *pEnd;
+ pch = bcmstrtok(&pick_tmp2, "=", 0);
+ if (!pch)
+ break;
+ if (*pch == ' ') {
+ pch++;
+ }
+ memset(name, 0 , sizeof (name));
+ cmd = bcm_strtoul(pch, &pEnd, 0);
+ if (cmd == 0 || strlen(pEnd)) {
+ cmd = WLC_SET_VAR;
+ strcpy(name, pch);
+ }
+ pch = bcmstrtok(&pick_tmp2, ",", 0);
+ if (!pch) {
+ break;
+ }
+ ret = iovar_tpl_parse(iovar_tpl_list, ARRAY_SIZE(iovar_tpl_list),
+ dhd, cmd, name, pch);
+ if (ret) {
+ val = (int)simple_strtol(pch, NULL, 0);
+ dhd_conf_set_intiovar(dhd, 0, cmd, name, val, -1, down);
+ }
+ }
+
+exit:
+ if (pdata)
+ kfree(pdata);
+ return true;
+}
+
+int
+dhd_conf_get_band(dhd_pub_t *dhd)
+{
+ int band = -1;
+
+ if (dhd && dhd->conf)
+ band = dhd->conf->band;
+ else
+ CONFIG_ERROR("dhd or conf is NULL\n");
+
+ return band;
+}
+
+int
+dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1;
+
+ memset(cspec, 0, sizeof(wl_country_t));
+ bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t),
+ FALSE, 0)) < 0)
+ CONFIG_ERROR("country code getting failed %d\n", bcmerror);
+
+ return bcmerror;
+}
+
+int
+dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1;
+ struct dhd_conf *conf = dhd->conf;
+ country_list_t *country = conf->country_head;
+
+#ifdef CCODE_LIST
+ bcmerror = dhd_ccode_map_country_list(dhd, cspec);
+#endif
+ // **:XZ/11 => return XZ/11 if not found
+ // **:**/0 => return user specified ccode if not found, but set regrev 0
+ while (country != NULL) {
+ if (!strncmp("**", country->cspec.country_abbrev, 2)) {
+ if (!strncmp("**", country->cspec.ccode, 2)) {
+ cspec->rev = 0;
+ bcmerror = 0;
+ break;
+ }
+ memcpy(cspec->ccode, country->cspec.ccode, WLC_CNTRY_BUF_SZ);
+ cspec->rev = country->cspec.rev;
+ bcmerror = 0;
+ break;
+ } else if (!strncmp(cspec->country_abbrev,
+ country->cspec.country_abbrev, 2)) {
+ memcpy(cspec->ccode, country->cspec.ccode, WLC_CNTRY_BUF_SZ);
+ cspec->rev = country->cspec.rev;
+ bcmerror = 0;
+ break;
+ }
+ country = country->next;
+ }
+
+ if (!bcmerror)
+ CONFIG_MSG("%s/%d\n", cspec->ccode, cspec->rev);
+
+ return bcmerror;
+}
+
+int
+dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1;
+
+ memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t));
+
+ CONFIG_MSG("set country %s, revision %d\n", cspec->ccode, cspec->rev);
+ bcmerror = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "country", (char *)cspec,
+ sizeof(wl_country_t), FALSE);
+ dhd_conf_get_country(dhd, cspec);
+ CONFIG_MSG("Country code: %s (%s/%d)\n",
+ cspec->country_abbrev, cspec->ccode, cspec->rev);
+
+ return bcmerror;
+}
+
+int
+dhd_conf_fix_country(dhd_pub_t *dhd)
+{
+ int bcmerror = -1;
+ int band;
+ wl_uint32_list_t *list;
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ wl_country_t cspec;
+
+ if (!(dhd && dhd->conf)) {
+ return bcmerror;
+ }
+
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list,
+ sizeof(valid_chan_list), FALSE, 0)) < 0) {
+ CONFIG_ERROR("get channels failed with %d\n", bcmerror);
+ }
+
+ band = dhd_conf_get_band(dhd);
+
+ if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G || band==-1) &&
+ dtoh32(list->count)<11)) {
+ CONFIG_ERROR("bcmerror=%d, # of channels %d\n",
+ bcmerror, dtoh32(list->count));
+ dhd_conf_map_country_list(dhd, &dhd->conf->cspec);
+ if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) {
+ strcpy(cspec.country_abbrev, "US");
+ cspec.rev = 0;
+ strcpy(cspec.ccode, "US");
+ dhd_conf_map_country_list(dhd, &cspec);
+ dhd_conf_set_country(dhd, &cspec);
+ }
+ }
+
+ return bcmerror;
+}
+
+bool
+dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel)
+{
+ int i;
+ bool match = false;
+
+ if (dhd && dhd->conf) {
+ if (dhd->conf->channels.count == 0)
+ return true;
+ for (i=0; i<dhd->conf->channels.count; i++) {
+ if (channel == dhd->conf->channels.channel[i])
+ match = true;
+ }
+ } else {
+ match = true;
+ CONFIG_ERROR("dhd or conf is NULL\n");
+ }
+
+ return match;
+}
+
+int
+dhd_conf_set_roam(dhd_pub_t *dhd)
+{
+ int bcmerror = -1;
+ struct dhd_conf *conf = dhd->conf;
+ uint wnm_bsstrans_resp = 0;
+
+ if (dhd->conf->chip == BCM4359_CHIP_ID) {
+ dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "wnm_bsstrans_resp",
+ (char *)&wnm_bsstrans_resp, sizeof(wnm_bsstrans_resp));
+ if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT) {
+ dhd->wbtext_policy = WL_BSSTRANS_POLICY_ROAM_ALWAYS;
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "wnm_bsstrans_resp",
+ WL_BSSTRANS_POLICY_ROAM_ALWAYS, 0, FALSE);
+ }
+ }
+
+ dhd_roam_disable = conf->roam_off;
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE);
+
+ if (!conf->roam_off || !conf->roam_off_suspend) {
+ CONFIG_MSG("set roam_trigger %d\n", conf->roam_trigger[0]);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER",
+ (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);
+
+ CONFIG_MSG("set roam_scan_period %d\n", conf->roam_scan_period[0]);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD",
+ (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);
+
+ CONFIG_MSG("set roam_delta %d\n", conf->roam_delta[0]);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA",
+ (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE);
+
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "fullroamperiod",
+ dhd->conf->fullroamperiod, 1, FALSE);
+ }
+
+ return bcmerror;
+}
+
+void
+dhd_conf_add_to_eventbuffer(struct eventmsg_buf *ev, u16 event, bool set)
+{
+ if (!ev || (event > WLC_E_LAST))
+ return;
+
+ if (ev->num < MAX_EVENT_BUF_NUM) {
+ ev->event[ev->num].type = event;
+ ev->event[ev->num].set = set;
+ ev->num++;
+ } else {
+ CONFIG_ERROR("evenbuffer doesn't support > %u events. Update"
+ " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM);
+ ASSERT(0);
+ }
+}
+
+s32
+dhd_conf_apply_eventbuffer(dhd_pub_t *dhd, eventmsg_buf_t *ev)
+{
+ char eventmask[WL_EVENTING_MASK_LEN];
+ int i, ret = 0;
+
+ if (!ev || (!ev->num))
+ return -EINVAL;
+
+ /* Read event_msgs mask */
+ ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", eventmask,
+ sizeof(eventmask));
+ if (unlikely(ret)) {
+ CONFIG_ERROR("Get event_msgs error (%d)\n", ret);
+ goto exit;
+ }
+
+ /* apply the set bits */
+ for (i = 0; i < ev->num; i++) {
+ if (ev->event[i].set)
+ setbit(eventmask, ev->event[i].type);
+ else
+ clrbit(eventmask, ev->event[i].type);
+ }
+
+ /* Write updated Event mask */
+ ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", eventmask,
+ sizeof(eventmask), FALSE);
+ if (unlikely(ret)) {
+ CONFIG_ERROR("Set event_msgs error (%d)\n", ret);
+ }
+
+exit:
+ return ret;
+}
+
+static int
+dhd_conf_enable_roam_offload(dhd_pub_t *dhd, int enable)
+{
+ int err;
+ eventmsg_buf_t ev_buf;
+
+ if (dhd->conf->roam_off_suspend)
+ return 0;
+
+ err = dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "roam_offload", enable, 0, FALSE);
+ if (err)
+ return err;
+
+ bzero(&ev_buf, sizeof(eventmsg_buf_t));
+ dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+ dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+ dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+ dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+ dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+ dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+ err = dhd_conf_apply_eventbuffer(dhd, &ev_buf);
+
+ CONFIG_TRACE("roam_offload %d\n", enable);
+
+ return err;
+}
+
+void
+dhd_conf_set_bw_cap(dhd_pub_t *dhd)
+{
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+
+ if (dhd->conf->bw_cap[0] >= 0) {
+ memset(&param, 0, sizeof(param));
+ param.band = WLC_BAND_2G;
+ param.bw_cap = (uint)dhd->conf->bw_cap[0];
+ CONFIG_MSG("set bw_cap 2g 0x%x\n", param.bw_cap);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)&param,
+ sizeof(param), TRUE);
+ }
+
+ if (dhd->conf->bw_cap[1] >= 0) {
+ memset(&param, 0, sizeof(param));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = (uint)dhd->conf->bw_cap[1];
+ CONFIG_MSG("set bw_cap 5g 0x%x\n", param.bw_cap);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)&param,
+ sizeof(param), TRUE);
+ }
+}
+
+void
+dhd_conf_get_wme(dhd_pub_t *dhd, int ifidx, int mode, edcf_acparam_t *acp)
+{
+ int bcmerror = -1;
+ char iovbuf[WLC_IOCTL_SMLEN];
+ edcf_acparam_t *acparam;
+
+ bzero(iovbuf, sizeof(iovbuf));
+
+ /*
+ * Get current acparams, using buf as an input buffer.
+ * Return data is array of 4 ACs of wme params.
+ */
+ if (mode == 0)
+ bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf));
+ else
+ bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf));
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
+ FALSE, ifidx)) < 0) {
+ CONFIG_ERROR("wme_ac_sta getting failed %d\n", bcmerror);
+ return;
+ }
+ memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT);
+
+ acparam = &acp[AC_BK];
+ CONFIG_TRACE("BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP);
+ acparam = &acp[AC_BE];
+ CONFIG_TRACE("BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP);
+ acparam = &acp[AC_VI];
+ CONFIG_TRACE("VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP);
+ acparam = &acp[AC_VO];
+ CONFIG_TRACE("VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP);
+
+ return;
+}
+
+void
+dhd_conf_update_wme(dhd_pub_t *dhd, int ifidx, int mode,
+ edcf_acparam_t *acparam_cur, int aci)
+{
+ int aifsn, ecwmin, ecwmax, txop;
+ edcf_acparam_t *acp;
+ struct dhd_conf *conf = dhd->conf;
+ wme_param_t *wme;
+
+ if (mode == 0)
+ wme = &conf->wme_sta;
+ else
+ wme = &conf->wme_ap;
+
+ /* Default value */
+ aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK;
+ ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK;
+ ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT;
+ txop = acparam_cur->TXOP;
+
+ /* Modified value */
+ if (wme->aifsn[aci] > 0)
+ aifsn = wme->aifsn[aci];
+ if (wme->ecwmin[aci] > 0)
+ ecwmin = wme->ecwmin[aci];
+ if (wme->ecwmax[aci] > 0)
+ ecwmax = wme->ecwmax[aci];
+ if (wme->txop[aci] > 0)
+ txop = wme->txop[aci];
+
+ if (!(wme->aifsn[aci] || wme->ecwmin[aci] ||
+ wme->ecwmax[aci] || wme->txop[aci]))
+ return;
+
+ /* Update */
+ acp = acparam_cur;
+ acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK);
+ acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK);
+ acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK));
+ acp->TXOP = txop;
+
+ CONFIG_MSG("wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ mode?"ap":"sta", acp->ACI, acp->ACI&EDCF_AIFSN_MASK,
+ acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acp->TXOP);
+
+ /*
+ * Now use buf as an output buffer.
+ * Put WME acparams after "wme_ac\0" in buf.
+ * NOTE: only one of the four ACs can be set at a time.
+ */
+ if (mode == 0)
+ dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_sta", (char *)acp,
+ sizeof(edcf_acparam_t), FALSE);
+ else
+ dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_ap", (char *)acp,
+ sizeof(edcf_acparam_t), FALSE);
+
+}
+
+void
+dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode)
+{
+ edcf_acparam_t acparam_cur[AC_COUNT];
+
+ if (dhd && dhd->conf) {
+ if (!dhd->conf->force_wme_ac) {
+ CONFIG_TRACE("force_wme_ac is not enabled %d\n",
+ dhd->conf->force_wme_ac);
+ return;
+ }
+
+ CONFIG_TRACE("Before change:\n");
+ dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur);
+
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BK], AC_BK);
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BE], AC_BE);
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VI], AC_VI);
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VO], AC_VO);
+
+ CONFIG_TRACE("After change:\n");
+ dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur);
+ } else {
+ CONFIG_ERROR("dhd or conf is NULL\n");
+ }
+
+ return;
+}
+
+void
+dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int p2p_mode, int miracast_mode)
+{
+ struct dhd_conf *conf = dhd->conf;
+ mchan_params_t *mchan = conf->mchan;
+ bool set = true;
+
+ while (mchan != NULL) {
+ set = true;
+ set &= (mchan->bw >= 0);
+ set &= ((mchan->p2p_mode == -1) | (mchan->p2p_mode == p2p_mode));
+ set &= ((mchan->miracast_mode == -1) | (mchan->miracast_mode == miracast_mode));
+ if (set) {
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "mchan_bw", mchan->bw, 0, FALSE);
+ }
+ mchan = mchan->next;
+ }
+
+ return;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+void
+dhd_conf_add_pkt_filter(dhd_pub_t *dhd)
+{
+ int i, j;
+ char str[16];
+#define MACS "%02x%02x%02x%02x%02x%02x"
+
+ /* 0) suspend_mode=1
+ * Case 0: default is unicast pkt and event wake up
+ * Case 1: no connection in suspend
+ * 1) wl_suspend=3=0
+ * 2) wl_resume=2=0
+ * 3) insuspend=0x7
+ * Case 2: keep connection in suspend, but no pkt and event wake up
+ * 1) dhd_master_mode=1
+ * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107
+ * 3) pkt_filter_add=141 0 0 0 0xFFFFFFFFFFFF 0x000000000000
+ * 4) insuspend=0x7
+ * 5) rekey_offload=1
+ * Case 3: magic pkt and event wake up
+ * 1) dhd_master_mode=1
+ * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107
+ * 3) pkt_filter_add=141 0 0 0 0xFFFFFFFFFFFF 0x000000000000
+ * 4) magic_pkt_filter_add=141 0 1 12
+ * 5) rekey_offload=1
+ */
+ for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {
+ dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i];
+ CONFIG_MSG("%s\n", dhd->pktfilter[i+dhd->pktfilter_count]);
+ }
+ dhd->pktfilter_count += i;
+
+ if (dhd->conf->magic_pkt_filter_add) {
+ strcat(dhd->conf->magic_pkt_filter_add, " 0x");
+ strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");
+ for (j=0; j<16; j++)
+ strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");
+ strcat(dhd->conf->magic_pkt_filter_add, " 0x");
+ strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");
+ sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet));
+ for (j=0; j<16; j++)
+ strncat(dhd->conf->magic_pkt_filter_add, str, 12);
+ dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add;
+ dhd->pktfilter_count += 1;
+ }
+}
+
+bool
+dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id)
+{
+ int i;
+
+ if (dhd && dhd->conf) {
+ for (i=0; i<dhd->conf->pkt_filter_del.count; i++) {
+ if (id == dhd->conf->pkt_filter_del.id[i]) {
+ CONFIG_MSG("%d\n", dhd->conf->pkt_filter_del.id[i]);
+ return true;
+ }
+ }
+ return false;
+ }
+ return false;
+}
+
+void
+dhd_conf_discard_pkt_filter(dhd_pub_t *dhd)
+{
+ dhd->pktfilter_count = 6;
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333";
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+ /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/
+ dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL;
+
+ /* IPv4 broadcast address XXX.XXX.XXX.255 */
+ dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";
+ dhd->pktfilter_count++;
+ /* discard IPv4 multicast address 224.0.0.0/4 */
+ dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";
+ dhd->pktfilter_count++;
+ /* discard IPv6 multicast address FF00::/8 */
+ dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";
+ dhd->pktfilter_count++;
+ /* discard Netbios pkt */
+ dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089";
+ dhd->pktfilter_count++;
+
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_conf_get_pm(dhd_pub_t *dhd)
+{
+ if (dhd && dhd->conf) {
+ return dhd->conf->pm;
+ }
+ return -1;
+}
+
+int
+dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len,
+ int *hostsleep_set, int *hostsleep_val, int *ret)
+{
+ if (dhd->hang_reason) {
+ *ret = BCME_EPERM;
+ goto exit;
+ }
+ if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) {
+ if (cmd == WLC_SET_VAR) {
+ char *psleep = NULL;
+ psleep = strstr(buf, "hostsleep");
+ if (psleep) {
+ *hostsleep_set = 1;
+ memcpy(hostsleep_val, psleep+strlen("hostsleep")+1, sizeof(int));
+ }
+ }
+ if (dhd->hostsleep && (!*hostsleep_set || *hostsleep_val)) {
+ CONFIG_TRACE("block all none hostsleep clr cmd\n");
+ *ret = BCME_EPERM;
+ goto exit;
+ } else if (*hostsleep_set && *hostsleep_val) {
+ CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val);
+ dhd->hostsleep = *hostsleep_val;
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);
+ }
+ if (dhd->hostsleep == 2) {
+ *ret = 0;
+ goto exit;
+ }
+ } else if (dhd->hostsleep == 2 && !*hostsleep_val) {
+ CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val);
+ dhd->hostsleep = *hostsleep_val;
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ *ret = 0;
+ goto exit;
+ }
+ }
+#ifdef NO_POWER_SAVE
+ if (cmd == WLC_SET_PM) {
+ if (*(const u32*)buf != 0) {
+ CONFIG_TRACE("skip PM\n");
+ *ret = BCME_OK;
+ goto exit;
+ }
+ } else if (cmd == WLC_SET_VAR) {
+ int cmd_len = strlen("mpc");
+ if (!strncmp(buf, "mpc", cmd_len)) {
+ if (*((u32 *)((u8*)buf+cmd_len+1)) != 0) {
+ CONFIG_TRACE("skip mpc\n");
+ *ret = BCME_OK;
+ goto exit;
+ }
+ }
+ }
+#endif
+
+ return 0;
+exit:
+ return -1;
+}
+
+void
+dhd_conf_get_hostsleep(dhd_pub_t *dhd,
+ int hostsleep_set, int hostsleep_val, int ret)
+{
+ if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) {
+ if (hostsleep_set) {
+ if (hostsleep_val && ret) {
+ CONFIG_TRACE("reset hostsleep %d => 0\n", dhd->hostsleep);
+ dhd->hostsleep = 0;
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ } else if (!hostsleep_val && !ret) {
+ CONFIG_TRACE("set hostsleep %d => 0\n", dhd->hostsleep);
+ dhd->hostsleep = 0;
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ }
+ }
+ }
+}
+
+#ifdef WL_EXT_WOWL
+#define WL_WOWL_TCPFIN (1 << 26)
+typedef struct wl_wowl_pattern2 {
+ char cmd[4];
+ wl_wowl_pattern_t wowl_pattern;
+} wl_wowl_pattern2_t;
+static int
+dhd_conf_wowl_pattern(dhd_pub_t *dhd, int ifidx, bool add, char *data)
+{
+ uint buf_len = 0;
+ int id, type, polarity, offset;
+ char cmd[4]="\0", mask[128]="\0", pattern[128]="\0", mask_tmp[128]="\0", *pmask_tmp;
+ uint32 masksize, patternsize, pad_len = 0;
+ wl_wowl_pattern2_t *wowl_pattern2 = NULL;
+ char *mask_and_pattern;
+ int ret = 0, i, j, v;
+
+ if (data) {
+ if (add)
+ strcpy(cmd, "add");
+ else
+ strcpy(cmd, "clr");
+ if (!strcmp(cmd, "clr")) {
+ CONFIG_TRACE("wowl_pattern clr\n");
+ ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wowl_pattern", cmd,
+ sizeof(cmd), FALSE);
+ goto exit;
+ }
+ sscanf(data, "%d %d %d %d %s %s", &id, &type, &polarity, &offset,
+ mask_tmp, pattern);
+ masksize = strlen(mask_tmp) -2;
+ CONFIG_TRACE("0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // add pading
+ if (masksize % 16)
+ pad_len = (16 - masksize % 16);
+ for (i=0; i<pad_len; i++)
+ strcat(mask_tmp, "0");
+ masksize += pad_len;
+ CONFIG_TRACE("1 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // translate 0x00 to 0, others to 1
+ j = 0;
+ pmask_tmp = &mask_tmp[2];
+ for (i=0; i<masksize/2; i++) {
+ if(strncmp(&pmask_tmp[i*2], "00", 2))
+ pmask_tmp[j] = '1';
+ else
+ pmask_tmp[j] = '0';
+ j++;
+ }
+ pmask_tmp[j] = '\0';
+ masksize = masksize / 2;
+ CONFIG_TRACE("2 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // reorder per 8bits
+ pmask_tmp = &mask_tmp[2];
+ for (i=0; i<masksize/8; i++) {
+ char c;
+ for (j=0; j<4; j++) {
+ c = pmask_tmp[i*8+j];
+ pmask_tmp[i*8+j] = pmask_tmp[(i+1)*8-j-1];
+ pmask_tmp[(i+1)*8-j-1] = c;
+ }
+ }
+ CONFIG_TRACE("3 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // translate 8bits to 1byte
+ j = 0; v = 0;
+ pmask_tmp = &mask_tmp[2];
+ strcpy(mask, "0x");
+ for (i=0; i<masksize; i++) {
+ v = (v<<1) | (pmask_tmp[i]=='1');
+ if (((i+1)%4) == 0) {
+ if (v < 10)
+ mask[j+2] = v + '0';
+ else
+ mask[j+2] = (v-10) + 'a';
+ j++;
+ v = 0;
+ }
+ }
+ mask[j+2] = '\0';
+ masksize = j/2;
+ CONFIG_TRACE("4 mask=%s, masksize=%d\n", mask, masksize);
+
+ patternsize = (strlen(pattern)-2)/2;
+ buf_len = sizeof(wl_wowl_pattern2_t) + patternsize + masksize;
+ wowl_pattern2 = kmalloc(buf_len, GFP_KERNEL);
+ if (wowl_pattern2 == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", buf_len);
+ goto exit;
+ }
+ memset(wowl_pattern2, 0, sizeof(wl_wowl_pattern2_t));
+
+ strncpy(wowl_pattern2->cmd, cmd, sizeof(cmd));
+ wowl_pattern2->wowl_pattern.id = id;
+ wowl_pattern2->wowl_pattern.type = 0;
+ wowl_pattern2->wowl_pattern.offset = offset;
+ mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t);
+
+ wowl_pattern2->wowl_pattern.masksize = masksize;
+ ret = wl_pattern_atoh(mask, mask_and_pattern);
+ if (ret == -1) {
+ CONFIG_ERROR("rejecting mask=%s\n", mask);
+ goto exit;
+ }
+
+ mask_and_pattern += wowl_pattern2->wowl_pattern.masksize;
+ wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) +
+ wowl_pattern2->wowl_pattern.masksize;
+
+ wowl_pattern2->wowl_pattern.patternsize = patternsize;
+ ret = wl_pattern_atoh(pattern, mask_and_pattern);
+ if (ret == -1) {
+ CONFIG_ERROR("rejecting pattern=%s\n", pattern);
+ goto exit;
+ }
+
+ CONFIG_TRACE("%s %d %s %s\n", cmd, offset, mask, pattern);
+
+ ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wowl_pattern",
+ (char *)wowl_pattern2, buf_len, FALSE);
+ }
+
+exit:
+ if (wowl_pattern2)
+ kfree(wowl_pattern2);
+ return ret;
+}
+
+static int
+dhd_conf_wowl_wakeind(dhd_pub_t *dhd, int ifidx, bool clear)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_wowl_wakeind_t *wake = NULL;
+ int ret = -1;
+ char clr[6]="clear", wakeind_str[32]="\0";
+
+ if (clear) {
+ CONFIG_TRACE("wowl_wakeind clear\n");
+ ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wowl_wakeind",
+ clr, sizeof(clr), 0);
+ } else {
+ ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "wowl_wakeind",
+ iovar_buf, sizeof(iovar_buf));
+ if (!ret) {
+ wake = (wl_wowl_wakeind_t *) iovar_buf;
+ if (wake->ucode_wakeind & WL_WOWL_MAGIC)
+ strcpy(wakeind_str, "(MAGIC packet)");
+ if (wake->ucode_wakeind & WL_WOWL_NET)
+ strcpy(wakeind_str, "(Netpattern)");
+ if (wake->ucode_wakeind & WL_WOWL_DIS)
+ strcpy(wakeind_str, "(Disassoc/Deauth)");
+ if (wake->ucode_wakeind & WL_WOWL_BCN)
+ strcpy(wakeind_str, "(Loss of beacon)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME)
+ strcpy(wakeind_str, "(TCPKA timeout)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA)
+ strcpy(wakeind_str, "(TCPKA data)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPFIN)
+ strcpy(wakeind_str, "(TCP FIN)");
+ CONFIG_MSG("wakeind=0x%x %s\n", wake->ucode_wakeind, wakeind_str);
+ }
+ }
+
+ return ret;
+}
+#endif
+
+int
+dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period,
+ char *packet, bool bcast)
+{
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int ret = 0, len_bytes=0, buf_len=0;
+ char *buf = NULL, *iovar_buf = NULL;
+ uint8 *pdata;
+
+ CONFIG_TRACE("id=%d, period=%d, packet=%s\n", id, period, packet);
+ if (period >= 0) {
+ buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (buf == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ iovar_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (iovar_buf == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf;
+ mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ mkeep_alive_pktp->keep_alive_id = id;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ mkeep_alive_pktp->period_msec = period;
+ if (packet && strlen(packet)) {
+ len_bytes = wl_pattern_atoh(packet, (char *)mkeep_alive_pktp->data);
+ buf_len += len_bytes;
+ if (bcast) {
+ memcpy(mkeep_alive_pktp->data, &ether_bcast, ETHER_ADDR_LEN);
+ }
+ ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr",
+ iovar_buf, WLC_IOCTL_SMLEN);
+ if (!ret) {
+ pdata = mkeep_alive_pktp->data;
+ memcpy(pdata+6, iovar_buf, ETHER_ADDR_LEN);
+ }
+ }
+ mkeep_alive_pktp->len_bytes = htod16(len_bytes);
+ ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "mkeep_alive",
+ buf, buf_len, FALSE);
+ }
+
+exit:
+ if (buf)
+ kfree(buf);
+ if (iovar_buf)
+ kfree(iovar_buf);
+ return ret;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable)
+{
+ int i, len = 0, total_len = WLC_IOCTL_SMLEN;
+ char *iovar_buf = NULL, *packet = NULL;
+
+ if (!dhd->conf->garp || ifidx != 0 || !(dhd->op_mode & DHD_FLAG_STA_MODE))
+ return;
+
+ CONFIG_TRACE("enable=%d\n", enable);
+
+ if (enable) {
+ iovar_buf = kmalloc(total_len, GFP_KERNEL);
+ if (iovar_buf == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len);
+ goto exit;
+ }
+ packet = kmalloc(total_len, GFP_KERNEL);
+ if (packet == NULL) {
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len);
+ goto exit;
+ }
+ dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", iovar_buf, total_len);
+
+ len += snprintf(packet+len, total_len, "0xffffffffffff");
+ for (i=0; i<ETHER_ADDR_LEN; i++)
+ len += snprintf(packet+len, total_len, "%02x", iovar_buf[i]);
+ len += snprintf(packet+len, total_len, "08060001080006040001");
+ // Sender Hardware Addr.
+ for (i=0; i<ETHER_ADDR_LEN; i++)
+ len += snprintf(packet+len, total_len, "%02x", iovar_buf[i]);
+ // Sender IP Addr.
+ len += snprintf(packet+len, total_len, "%02x%02x%02x%02x",
+ ipa&0xff, (ipa>>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff);
+ // Target Hardware Addr.
+ len += snprintf(packet+len, total_len, "ffffffffffff");
+ // Target IP Addr.
+ len += snprintf(packet+len, total_len, "%02x%02x%02x%02x",
+ ipa&0xff, (ipa>>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff);
+ len += snprintf(packet+len, total_len, "000000000000000000000000000000000000");
+ }
+
+ dhd_conf_mkeep_alive(dhd, ifidx, 0, dhd->conf->keep_alive_period, packet, TRUE);
+
+exit:
+ if (iovar_buf)
+ kfree(iovar_buf);
+ if (packet)
+ kfree(packet);
+ return;
+}
+#endif
+
+uint
+dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask)
+{
+ uint insuspend = 0;
+
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ insuspend = dhd->conf->insuspend &
+ (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND |
+ ROAM_OFFLOAD_IN_SUSPEND | WOWL_IN_SUSPEND);
+ } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ insuspend = dhd->conf->insuspend &
+ (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND |
+ AP_DOWN_IN_SUSPEND | AP_FILTER_IN_SUSPEND);
+ }
+
+ return (insuspend & mask);
+}
+
+static void
+dhd_conf_check_connection(dhd_pub_t *dhd, int ifidx, int suspend)
+{
+ struct dhd_conf *conf = dhd->conf;
+ struct ether_addr bssid;
+ wl_event_msg_t msg;
+ int pm;
+#ifdef WL_CFG80211
+ struct net_device *net;
+ unsigned long flags = 0;
+#endif /* defined(WL_CFG80211) */
+
+ if (suspend) {
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, ifidx);
+ if (memcmp(&ether_null, &bssid, ETHER_ADDR_LEN))
+ memcpy(&conf->bssid_insuspend, &bssid, ETHER_ADDR_LEN);
+ else
+ memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN);
+ }
+ else {
+ if (memcmp(&ether_null, &conf->bssid_insuspend, ETHER_ADDR_LEN)) {
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, ifidx);
+ if (memcmp(&ether_null, &bssid, ETHER_ADDR_LEN)) {
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", 0, 0, FALSE);
+ dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "send_nulldata",
+ (char *)&bssid, ETHER_ADDR_LEN, FALSE);
+ OSL_SLEEP(100);
+ if (conf->pm >= 0)
+ pm = conf->pm;
+ else
+ pm = PM_FAST;
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);
+ } else {
+ CONFIG_TRACE("send WLC_E_DEAUTH_IND event\n");
+ bzero(&msg, sizeof(wl_event_msg_t));
+ msg.ifidx = ifidx;
+ memcpy(&msg.addr, &conf->bssid_insuspend, ETHER_ADDR_LEN);
+ msg.event_type = hton32(WLC_E_DEAUTH_IND);
+ msg.status = 0;
+ msg.reason = hton32(DOT11_RC_DEAUTH_LEAVING);
+#ifdef WL_EVENT
+ wl_ext_event_send(dhd->event_params, &msg, NULL);
+#endif
+#ifdef WL_CFG80211
+ spin_lock_irqsave(&dhd->up_lock, flags);
+ net = dhd_idx2net(dhd, ifidx);
+ if (net && dhd->up) {
+ wl_cfg80211_event(net, &msg, NULL);
+ }
+ spin_unlock_irqrestore(&dhd->up_lock, flags);
+#endif /* defined(WL_CFG80211) */
+ }
+ }
+ }
+}
+
+#ifdef SUSPEND_EVENT
+static void
+dhd_conf_set_suspend_event(dhd_pub_t *dhd, int suspend)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char suspend_eventmask[WL_EVENTING_MASK_LEN];
+
+ CONFIG_TRACE("Enter\n");
+ if (suspend) {
+#ifdef PROP_TXSTATUS
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ if (dhd->wlfc_enabled) {
+ dhd_wlfc_deinit(dhd);
+ conf->wlfc = TRUE;
+ } else {
+ conf->wlfc = FALSE;
+ }
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS */
+ dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs",
+ conf->resume_eventmask, sizeof(conf->resume_eventmask));
+ memset(suspend_eventmask, 0, sizeof(suspend_eventmask));
+ setbit(suspend_eventmask, WLC_E_ESCAN_RESULT);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs",
+ suspend_eventmask, sizeof(suspend_eventmask), FALSE);
+ }
+ else {
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs",
+ conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE);
+#ifdef PROP_TXSTATUS
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ if (conf->wlfc) {
+ dhd_wlfc_init(dhd);
+ dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE);
+ }
+#endif
+#endif /* PROP_TXSTATUS */
+ }
+
+}
+#endif
+
+int
+dhd_conf_suspend_resume_sta(dhd_pub_t *dhd, int ifidx, int suspend)
+{
+ struct dhd_conf *conf = dhd->conf;
+ uint insuspend = 0;
+ int pm;
+#ifdef WL_EXT_WOWL
+ int i;
+#endif
+
+ insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);
+ if (insuspend)
+ WL_MSG(dhd_ifname(dhd, ifidx), "suspend %d\n", suspend);
+
+ if (suspend) {
+ dhd_conf_check_connection(dhd, ifidx, suspend);
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "roam_off",
+ conf->roam_off_suspend, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "bcn_li_dtim",
+ conf->suspend_bcn_li_dtim, 0, FALSE);
+ if (conf->pm_in_suspend >= 0)
+ pm = conf->pm_in_suspend;
+ else if (conf->pm >= 0)
+ pm = conf->pm;
+ else
+ pm = PM_FAST;
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);
+#ifdef WL_EXT_WOWL
+ if ((insuspend & WOWL_IN_SUSPEND) && dhd_master_mode) {
+ dhd_conf_wowl_pattern(dhd, ifidx, FALSE, "clr");
+ for(i=0; i<conf->pkt_filter_add.count; i++) {
+ dhd_conf_wowl_pattern(dhd, ifidx, TRUE, conf->pkt_filter_add.filter[i]);
+ }
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl", conf->wowl, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl_activate", 1, 0, FALSE);
+ dhd_conf_wowl_wakeind(dhd, ifidx, TRUE);
+ }
+#endif
+ }
+ else {
+ dhd_conf_get_iovar(dhd, 0, WLC_GET_PM, "WLC_GET_PM", (char *)&pm, sizeof(pm));
+ CONFIG_TRACE("PM in suspend = %d\n", pm);
+ if (conf->pm >= 0)
+ pm = conf->pm;
+ else
+ pm = PM_FAST;
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);
+#ifdef WL_EXT_WOWL
+ if (insuspend & WOWL_IN_SUSPEND) {
+ dhd_conf_wowl_wakeind(dhd, ifidx, FALSE);
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl_activate", 0, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "wowl", 0, 0, FALSE);
+ dhd_conf_wowl_pattern(dhd, ifidx, FALSE, "clr");
+ }
+#endif
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "bcn_li_dtim", 0, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_SET_VAR, "roam_off",
+ conf->roam_off, 0, FALSE);
+ dhd_conf_check_connection(dhd, ifidx, suspend);
+ }
+
+ return 0;
+}
+
+#ifndef WL_EXT_IAPSTA
+static int
+dhd_conf_suspend_resume_ap(dhd_pub_t *dhd, int ifidx, int suspend)
+{
+ struct dhd_conf *conf = dhd->conf;
+ uint insuspend = 0;
+
+ insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);
+ if (insuspend)
+ WL_MSG(dhd_ifname(dhd, ifidx), "suspend %d\n", suspend);
+
+ if (suspend) {
+ if (insuspend & AP_DOWN_IN_SUSPEND) {
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_DOWN, "WLC_DOWN", 1, 0, FALSE);
+ }
+ } else {
+ if (insuspend & AP_DOWN_IN_SUSPEND) {
+ dhd_conf_set_intiovar(dhd, ifidx, WLC_UP, "WLC_UP", 0, 0, FALSE);
+ }
+ }
+
+ return 0;
+}
+#endif /* !WL_EXT_IAPSTA */
+
+static int
+dhd_conf_suspend_resume_bus(dhd_pub_t *dhd, int suspend)
+{
+ uint insuspend = 0;
+
+ insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);
+ if (insuspend)
+ CONFIG_MSG("suspend %d\n", suspend);
+
+ if (suspend) {
+ if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) {
+#ifdef BCMSDIO
+ uint32 intstatus = 0;
+ int ret = 0;
+#endif
+ int hostsleep = 2;
+#ifdef WL_EXT_WOWL
+ hostsleep = 1;
+#endif
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "hostsleep", hostsleep, 0, FALSE);
+#ifdef BCMSDIO
+ ret = dhd_bus_sleep(dhd, TRUE, &intstatus);
+ CONFIG_TRACE("ret = %d, intstatus = 0x%x\n", ret, intstatus);
+#endif
+ }
+ } else {
+ if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) {
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "hostsleep", 0, 0, FALSE);
+ }
+ }
+
+ return 0;
+}
+
+int
+dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend)
+{
+ struct dhd_conf *conf = dhd->conf;
+ uint insuspend = 0;
+
+ insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);
+ if (insuspend)
+ CONFIG_MSG("op_mode %d, suspend %d, suspended %d, insuspend 0x%x, suspend_mode=%d\n",
+ dhd->op_mode, suspend, conf->suspended, insuspend, conf->suspend_mode);
+
+ if (conf->suspended == suspend || !dhd->up) {
+ return 0;
+ }
+
+ if (suspend) {
+ if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) {
+ if (conf->suspend_mode == PM_NOTIFIER)
+#ifdef WL_EXT_IAPSTA
+ wl_iapsta_wait_event_complete(dhd);
+#else
+ wl_ext_wait_event_complete(dhd, 0);
+#endif /* WL_EXT_IAPSTA */
+ }
+ if (insuspend & NO_TXDATA_IN_SUSPEND) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);
+ }
+#if defined(WL_CFG80211) || defined(WL_ESCAN)
+ if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) {
+ if (conf->suspend_mode == PM_NOTIFIER)
+ wl_ext_user_sync(dhd, 0, TRUE);
+ }
+#endif
+ if (insuspend & ROAM_OFFLOAD_IN_SUSPEND)
+ dhd_conf_enable_roam_offload(dhd, 2);
+#ifdef SUSPEND_EVENT
+ if (insuspend & NO_EVENT_IN_SUSPEND) {
+ dhd_conf_set_suspend_event(dhd, suspend);
+ }
+#endif
+#ifdef WL_EXT_IAPSTA
+ wl_iapsta_suspend_resume(dhd, suspend);
+#else
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ dhd_conf_suspend_resume_sta(dhd, 0, suspend);
+ } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ dhd_conf_suspend_resume_ap(dhd, 0, suspend);
+ }
+#endif /* WL_EXT_IAPSTA */
+ dhd_conf_set_wl_cmd(dhd, conf->wl_suspend, FALSE);
+ dhd_conf_suspend_resume_bus(dhd, suspend);
+ conf->suspended = TRUE;
+ }
+ else {
+ dhd_conf_suspend_resume_bus(dhd, suspend);
+#ifdef SUSPEND_EVENT
+ if (insuspend & NO_EVENT_IN_SUSPEND) {
+ dhd_conf_set_suspend_event(dhd, suspend);
+ }
+#endif
+ if (insuspend & ROAM_OFFLOAD_IN_SUSPEND)
+ dhd_conf_enable_roam_offload(dhd, 0);
+ dhd_conf_set_wl_cmd(dhd, conf->wl_resume, FALSE);
+#ifdef WL_EXT_IAPSTA
+ wl_iapsta_suspend_resume(dhd, suspend);
+#else
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ dhd_conf_suspend_resume_sta(dhd, 0, suspend);
+ } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ dhd_conf_suspend_resume_ap(dhd, 0, suspend);
+ }
+#endif /* WL_EXT_IAPSTA */
+#if defined(WL_CFG80211) || defined(WL_ESCAN)
+ if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) {
+ if (conf->suspend_mode == PM_NOTIFIER)
+ wl_ext_user_sync(dhd, 0, FALSE);
+ }
+#endif
+ if (insuspend & NO_TXDATA_IN_SUSPEND) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ conf->suspended = FALSE;
+ }
+
+ return 0;
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_conf_get_disable_proptx(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+ int disable_proptx = -1;
+ int fw_proptx = 0;
+
+ /* check fw proptx priority:
+ * 1st: check fw support by wl cap
+ * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default
+ * if you would like to disable it, please set disable_proptx=1 in config.txt
+ * 3th: disable when proptxstatus not support in wl cap
+ */
+ if (FW_SUPPORTED(dhd, proptxstatus)) {
+ fw_proptx = 1;
+ } else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||
+ dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ fw_proptx = 1;
+ } else {
+ fw_proptx = 0;
+ }
+
+ /* returned disable_proptx value:
+ * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled)
+ * 0: depend on fw support
+ * 1: always disable proptx
+ */
+ if (conf->disable_proptx == 0) {
+ // check fw support as well
+ if (fw_proptx)
+ disable_proptx = 0;
+ else
+ disable_proptx = 1;
+ } else if (conf->disable_proptx >= 1) {
+ disable_proptx = 1;
+ } else {
+ // check fw support as well
+ if (fw_proptx)
+ disable_proptx = -1;
+ else
+ disable_proptx = 1;
+ }
+
+ CONFIG_MSG("fw_proptx=%d, disable_proptx=%d\n", fw_proptx, disable_proptx);
+
+ return disable_proptx;
+}
+#endif
+
+uint
+pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf, int picklen)
+{
+ bool findNewline, changenewline=FALSE, pick=FALSE;
+ int column;
+ uint n, pick_column=0;
+
+ findNewline = FALSE;
+ column = 0;
+
+ if (start_pos >= len) {
+ CONFIG_ERROR("wrong start pos\n");
+ return 0;
+ }
+
+ for (n = start_pos; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if ((findNewline || changenewline) && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\\') {
+ changenewline = TRUE;
+ continue;
+ }
+ if (!changenewline && varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ column = 0;
+ continue;
+ }
+ if (changenewline && varbuf[n] == '\n') {
+ changenewline = FALSE;
+ continue;
+ }
+
+ if (column==0 && !pick) { // start to pick
+ pick = TRUE;
+ column++;
+ pick_column = 0;
+ } else {
+ if (pick && column==0) { // stop to pick
+ pick = FALSE;
+ break;
+ } else
+ column++;
+ }
+ if (pick) {
+ if (varbuf[n] == 0x9)
+ continue;
+ if (pick_column >= picklen)
+ break;
+ pickbuf[pick_column] = varbuf[n];
+ pick_column++;
+ }
+ }
+
+ return n; // return current position
+}
+
+bool
+dhd_conf_read_chiprev(dhd_pub_t *dhd, int *chip_match,
+ char *full_param, uint len_param)
+{
+ char *data = full_param+len_param, *pick_tmp, *pch;
+ uint chip = 0, rev = 0;
+
+ /* Process chip, regrev:
+ * chip=[chipid], rev==[rev]
+ * Ex: chip=0x4359, rev=9
+ */
+ if (!strncmp("chip=", full_param, len_param)) {
+ chip = (int)simple_strtol(data, NULL, 0);
+ pick_tmp = data;
+ pch = bcmstrstr(pick_tmp, "rev=");
+ if (pch) {
+ rev = (int)simple_strtol(pch+strlen("rev="), NULL, 0);
+ }
+ if (chip == dhd->conf->chip && rev == dhd->conf->chiprev)
+ *chip_match = 1;
+ else
+ *chip_match = 0;
+ CONFIG_MSG("chip=0x%x, rev=%d, chip_match=%d\n", chip, rev, *chip_match);
+ }
+
+ return TRUE;
+}
+
+bool
+dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ char *data = full_param+len_param;
+
+ if (!strncmp("dhd_msg_level=", full_param, len_param)) {
+ dhd_msg_level = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("dhd_msg_level = 0x%X\n", dhd_msg_level);
+ }
+ else if (!strncmp("dump_msg_level=", full_param, len_param)) {
+ dump_msg_level = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("dump_msg_level = 0x%X\n", dump_msg_level);
+ }
+#ifdef BCMSDIO
+ else if (!strncmp("sd_msglevel=", full_param, len_param)) {
+ sd_msglevel = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("sd_msglevel = 0x%X\n", sd_msglevel);
+ }
+#endif
+#ifdef BCMDBUS
+ else if (!strncmp("dbus_msglevel=", full_param, len_param)) {
+ dbus_msglevel = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("dbus_msglevel = 0x%X\n", dbus_msglevel);
+ }
+#endif
+ else if (!strncmp("android_msg_level=", full_param, len_param)) {
+ android_msg_level = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("android_msg_level = 0x%X\n", android_msg_level);
+ }
+ else if (!strncmp("config_msg_level=", full_param, len_param)) {
+ config_msg_level = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("config_msg_level = 0x%X\n", config_msg_level);
+ }
+#ifdef WL_CFG80211
+ else if (!strncmp("wl_dbg_level=", full_param, len_param)) {
+ wl_dbg_level = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("wl_dbg_level = 0x%X\n", wl_dbg_level);
+ }
+#endif
+#if defined(WL_WIRELESS_EXT)
+ else if (!strncmp("iw_msg_level=", full_param, len_param)) {
+ iw_msg_level = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("iw_msg_level = 0x%X\n", iw_msg_level);
+ }
+#endif
+#if defined(DHD_DEBUG)
+ else if (!strncmp("dhd_console_ms=", full_param, len_param)) {
+ dhd->dhd_console_ms = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("dhd_console_ms = %d\n", dhd->dhd_console_ms);
+ }
+#endif
+ else
+ return false;
+
+ return true;
+}
+
+void
+dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val)
+{
+ char *pick_tmp, *pch;
+
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "aifsn ");
+ if (pch) {
+ wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0);
+ CONFIG_MSG("ac_val=%d, aifsn=%d\n", ac_val, wme->aifsn[ac_val]);
+ }
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "ecwmin ");
+ if (pch) {
+ wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0);
+ CONFIG_MSG("ac_val=%d, ecwmin=%d\n", ac_val, wme->ecwmin[ac_val]);
+ }
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "ecwmax ");
+ if (pch) {
+ wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0);
+ CONFIG_MSG("ac_val=%d, ecwmax=%d\n", ac_val, wme->ecwmax[ac_val]);
+ }
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "txop ");
+ if (pch) {
+ wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0);
+ CONFIG_MSG("ac_val=%d, txop=0x%x\n", ac_val, wme->txop[ac_val]);
+ }
+
+}
+
+bool
+dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e
+ // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e
+
+ if (!strncmp("force_wme_ac=", full_param, len_param)) {
+ conf->force_wme_ac = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("force_wme_ac = %d\n", conf->force_wme_ac);
+ }
+ else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE);
+ }
+ else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK);
+ }
+ else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI);
+ }
+ else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO);
+ }
+ else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE);
+ }
+ else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK);
+ }
+ else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI);
+ }
+ else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO);
+ }
+ else
+ return false;
+
+ return true;
+}
+
+#ifdef SET_FWNV_BY_MAC
+bool
+dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i, j;
+ char *pch, *pick_tmp;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process fw_by_mac:
+ * fw_by_mac=[fw_mac_num] \
+ * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \
+ * [oui1-1] [nic_start1-1] [nic_end1-1]... \
+ * [oui1-n] [nic_start1-n] [nic_end1-n] \
+ * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \
+ * [oui2-1] [nic_start2-1] [nic_end2-1]... \
+ * [oui2-n] [nic_start2-n] [nic_end2-n] \
+ * Ex: fw_by_mac=2 \
+ * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \
+ * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \
+ * 0x983B16 0x916157 0x916487
+ */
+
+ if (!strncmp("fw_by_mac=", full_param, len_param)) {
+ dhd_conf_free_mac_list(&conf->fw_by_mac);
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count,
+ GFP_KERNEL))) {
+ conf->fw_by_mac.count = 0;
+ CONFIG_ERROR("kmalloc failed\n");
+ }
+ CONFIG_MSG("fw_count=%d\n", conf->fw_by_mac.count);
+ conf->fw_by_mac.m_mac_list_head = mac_list;
+ for (i=0; i<conf->fw_by_mac.count; i++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ strcpy(mac_list[i].name, pch);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);
+ CONFIG_MSG("name=%s, mac_count=%d\n",
+ mac_list[i].name, mac_list[i].count);
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count,
+ GFP_KERNEL))) {
+ mac_list[i].count = 0;
+ CONFIG_ERROR("kmalloc failed\n");
+ break;
+ }
+ mac_list[i].mac = mac_range;
+ for (j=0; j<mac_list[i].count; j++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);
+ CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",
+ mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end);
+ }
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i, j;
+ char *pch, *pick_tmp;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process nv_by_mac:
+ * [nv_by_mac]: The same format as fw_by_mac
+ */
+ if (!strncmp("nv_by_mac=", full_param, len_param)) {
+ dhd_conf_free_mac_list(&conf->nv_by_mac);
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count,
+ GFP_KERNEL))) {
+ conf->nv_by_mac.count = 0;
+ CONFIG_ERROR("kmalloc failed\n");
+ }
+ CONFIG_MSG("nv_count=%d\n", conf->nv_by_mac.count);
+ conf->nv_by_mac.m_mac_list_head = mac_list;
+ for (i=0; i<conf->nv_by_mac.count; i++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ strcpy(mac_list[i].name, pch);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);
+ CONFIG_MSG("name=%s, mac_count=%d\n",
+ mac_list[i].name, mac_list[i].count);
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count,
+ GFP_KERNEL))) {
+ mac_list[i].count = 0;
+ CONFIG_ERROR("kmalloc failed\n");
+ break;
+ }
+ mac_list[i].mac = mac_range;
+ for (j=0; j<mac_list[i].count; j++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);
+ CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",
+ mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end);
+ }
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+bool
+dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i;
+ char *pch, *pick_tmp;
+ wl_chip_nv_path_t *chip_nv_path;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process nv_by_chip:
+ * nv_by_chip=[nv_chip_num] \
+ * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \
+ * Ex: nv_by_chip=2 \
+ * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \
+ */
+ if (!strncmp("nv_by_chip=", full_param, len_param)) {
+ dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0);
+ if (!(chip_nv_path = kmalloc(sizeof(wl_chip_nv_path_t)*conf->nv_by_chip.count,
+ GFP_KERNEL))) {
+ conf->nv_by_chip.count = 0;
+ CONFIG_ERROR("kmalloc failed\n");
+ }
+ CONFIG_MSG("nv_by_chip_count=%d\n", conf->nv_by_chip.count);
+ conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path;
+ for (i=0; i<conf->nv_by_chip.count; i++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ strcpy(chip_nv_path[i].name, pch);
+ CONFIG_MSG("chip=0x%x, chiprev=%d, name=%s\n",
+ chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("roam_off=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->roam_off = 0;
+ else
+ conf->roam_off = 1;
+ CONFIG_MSG("roam_off = %d\n", conf->roam_off);
+ }
+ else if (!strncmp("roam_off_suspend=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->roam_off_suspend = 0;
+ else
+ conf->roam_off_suspend = 1;
+ CONFIG_MSG("roam_off_suspend = %d\n", conf->roam_off_suspend);
+ }
+ else if (!strncmp("roam_trigger=", full_param, len_param)) {
+ conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("roam_trigger = %d\n", conf->roam_trigger[0]);
+ }
+ else if (!strncmp("roam_scan_period=", full_param, len_param)) {
+ conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("roam_scan_period = %d\n", conf->roam_scan_period[0]);
+ }
+ else if (!strncmp("roam_delta=", full_param, len_param)) {
+ conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("roam_delta = %d\n", conf->roam_delta[0]);
+ }
+ else if (!strncmp("fullroamperiod=", full_param, len_param)) {
+ conf->fullroamperiod = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("fullroamperiod = %d\n", conf->fullroamperiod);
+ } else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_country(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ country_list_t *country_next = NULL, *country;
+ int i, count = 0;
+ char *pch, *pick_tmp, *pick_tmp2;
+ char *data = full_param+len_param;
+ uint len_data = strlen(data);
+
+ /* Process country_list:
+ * country_list=[country1]:[ccode1]/[regrev1],
+ * [country2]:[ccode2]/[regrev2] \
+ * Ex: country_list=US:US/0, TW:TW/1
+ */
+ if (!strncmp("ccode=", full_param, len_param)) {
+ len_data = min((uint)WLC_CNTRY_BUF_SZ, len_data);
+ memset(&conf->cspec, 0, sizeof(wl_country_t));
+ memcpy(conf->cspec.country_abbrev, data, len_data);
+ memcpy(conf->cspec.ccode, data, len_data);
+ CONFIG_MSG("ccode = %s\n", conf->cspec.ccode);
+ }
+ else if (!strncmp("regrev=", full_param, len_param)) {
+ conf->cspec.rev = (int32)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("regrev = %d\n", conf->cspec.rev);
+ }
+ else if (!strncmp("country_list=", full_param, len_param)) {
+ dhd_conf_free_country_list(conf);
+ pick_tmp = data;
+ for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {
+ pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);
+ if (!pick_tmp2)
+ break;
+ pch = bcmstrtok(&pick_tmp2, ":", 0);
+ if (!pch)
+ break;
+ country = NULL;
+ if (!(country = kmalloc(sizeof(country_list_t), GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ break;
+ }
+ memset(country, 0, sizeof(country_list_t));
+
+ memcpy(country->cspec.country_abbrev, pch, 2);
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ kfree(country);
+ break;
+ }
+ memcpy(country->cspec.ccode, pch, 2);
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ kfree(country);
+ break;
+ }
+ country->cspec.rev = (int32)simple_strtol(pch, NULL, 10);
+ count++;
+ if (!conf->country_head) {
+ conf->country_head = country;
+ country_next = country;
+ } else {
+ country_next->next = country;
+ country_next = country;
+ }
+ CONFIG_TRACE("abbrev=%s, ccode=%s, regrev=%d\n",
+ country->cspec.country_abbrev, country->cspec.ccode, country->cspec.rev);
+ }
+ CONFIG_MSG("%d country in list\n", count);
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i;
+ char *pch, *pick_tmp, *pick_tmp2;
+ struct dhd_conf *conf = dhd->conf;
+ mchan_params_t *mchan_next = NULL, *mchan;
+ char *data = full_param+len_param;
+
+ /* Process mchan_bw:
+ * mchan_bw=[val]/[any/go/gc]/[any/source/sink]
+ * Ex: mchan_bw=80/go/source, 30/gc/sink
+ */
+ if (!strncmp("mchan_bw=", full_param, len_param)) {
+ dhd_conf_free_mchan_list(conf);
+ pick_tmp = data;
+ for (i=0; i<MCHAN_MAX_NUM; i++) {
+ pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);
+ if (!pick_tmp2)
+ break;
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch)
+ break;
+
+ mchan = NULL;
+ if (!(mchan = kmalloc(sizeof(mchan_params_t), GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ break;
+ }
+ memset(mchan, 0, sizeof(mchan_params_t));
+
+ mchan->bw = (int)simple_strtol(pch, NULL, 0);
+ if (mchan->bw < 0 || mchan->bw > 100) {
+ CONFIG_ERROR("wrong bw %d\n", mchan->bw);
+ kfree(mchan);
+ break;
+ }
+
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ kfree(mchan);
+ break;
+ } else {
+ if (bcmstrstr(pch, "any")) {
+ mchan->p2p_mode = -1;
+ } else if (bcmstrstr(pch, "go")) {
+ mchan->p2p_mode = WL_P2P_IF_GO;
+ } else if (bcmstrstr(pch, "gc")) {
+ mchan->p2p_mode = WL_P2P_IF_CLIENT;
+ }
+ }
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ kfree(mchan);
+ break;
+ } else {
+ if (bcmstrstr(pch, "any")) {
+ mchan->miracast_mode = -1;
+ } else if (bcmstrstr(pch, "source")) {
+ mchan->miracast_mode = MIRACAST_SOURCE;
+ } else if (bcmstrstr(pch, "sink")) {
+ mchan->miracast_mode = MIRACAST_SINK;
+ }
+ }
+ if (!conf->mchan) {
+ conf->mchan = mchan;
+ mchan_next = mchan;
+ } else {
+ mchan_next->next = mchan;
+ mchan_next = mchan;
+ }
+ CONFIG_TRACE("mchan_bw=%d/%d/%d\n", mchan->bw,mchan->p2p_mode,
+ mchan->miracast_mode);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+bool
+dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ char *pch, *pick_tmp;
+ int i;
+
+ /* Process pkt filter:
+ * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000
+ * 2) pkt_filter_delete=100, 102, 103, 104, 105
+ * 3) magic_pkt_filter_add=141 0 1 12
+ */
+ if (!strncmp("dhd_master_mode=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ dhd_master_mode = FALSE;
+ else
+ dhd_master_mode = TRUE;
+ CONFIG_MSG("dhd_master_mode = %d\n", dhd_master_mode);
+ }
+ else if (!strncmp("pkt_filter_add=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, ",.-", 0);
+ i=0;
+ while (pch != NULL && i<DHD_CONF_FILTER_MAX) {
+ strcpy(&conf->pkt_filter_add.filter[i][0], pch);
+ CONFIG_MSG("pkt_filter_add[%d][] = %s\n",
+ i, &conf->pkt_filter_add.filter[i][0]);
+ pch = bcmstrtok(&pick_tmp, ",.-", 0);
+ i++;
+ }
+ conf->pkt_filter_add.count = i;
+ }
+ else if (!strncmp("pkt_filter_delete=", full_param, len_param) ||
+ !strncmp("pkt_filter_del=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i=0;
+ while (pch != NULL && i<DHD_CONF_FILTER_MAX) {
+ conf->pkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10);
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i++;
+ }
+ conf->pkt_filter_del.count = i;
+ CONFIG_MSG("pkt_filter_del id = ");
+ for (i=0; i<conf->pkt_filter_del.count; i++)
+ printk(KERN_CONT "%d ", conf->pkt_filter_del.id[i]);
+ printk(KERN_CONT "\n");
+ }
+ else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) {
+ if (conf->magic_pkt_filter_add) {
+ kfree(conf->magic_pkt_filter_add);
+ conf->magic_pkt_filter_add = NULL;
+ }
+ if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ } else {
+ memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN);
+ strcpy(conf->magic_pkt_filter_add, data);
+ CONFIG_MSG("magic_pkt_filter_add = %s\n", conf->magic_pkt_filter_add);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef ISAM_PREINIT
+#if !defined(WL_EXT_IAPSTA)
+#error "WL_EXT_IAPSTA should be defined to enable ISAM_PREINIT"
+#endif /* !WL_EXT_IAPSTA */
+/*
+ * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1]
+ * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x]
+ hidden [y|n] maxassoc [x]
+ amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]
+ emode [none|wep|tkip|aes|tkipaes]
+ key [xxxxx]
+ * isam_enable=ifname [wlan0|wlan1]
+*/
+bool
+dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("isam_init=", full_param, len_param)) {
+ sprintf(conf->isam_init, "isam_init %s", data);
+ CONFIG_MSG("isam_init=%s\n", conf->isam_init);
+ }
+ else if (!strncmp("isam_config=", full_param, len_param)) {
+ sprintf(conf->isam_config, "isam_config %s", data);
+ CONFIG_MSG("isam_config=%s\n", conf->isam_config);
+ }
+ else if (!strncmp("isam_enable=", full_param, len_param)) {
+ sprintf(conf->isam_enable, "isam_enable %s", data);
+ CONFIG_MSG("isam_enable=%s\n", conf->isam_enable);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef IDHCP
+bool
+dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ struct ipv4_addr ipa_set;
+
+ if (!strncmp("dhcpc_enable=", full_param, len_param)) {
+ conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhcpc_enable = %d\n", conf->dhcpc_enable);
+ }
+ else if (!strncmp("dhcpd_enable=", full_param, len_param)) {
+ conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhcpd_enable = %d\n", conf->dhcpd_enable);
+ }
+ else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set)) {
+ CONFIG_ERROR("dhcpd_ip_addr adress setting failed.n");
+ return false;
+ }
+ memcpy(&conf->dhcpd_ip_addr, &ipa_set, sizeof(struct ipv4_addr));
+ CONFIG_MSG("dhcpd_ip_addr = %s\n", data);
+ }
+ else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set)) {
+ CONFIG_ERROR("dhcpd_ip_mask adress setting failed\n");
+ return false;
+ }
+ memcpy(&conf->dhcpd_ip_mask, &ipa_set, sizeof(struct ipv4_addr));
+ CONFIG_MSG("dhcpd_ip_mask = %s\n", data);
+ }
+ else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set)) {
+ CONFIG_ERROR("dhcpd_ip_start adress setting failed\n");
+ return false;
+ }
+ memcpy(&conf->dhcpd_ip_start, &ipa_set, sizeof(struct ipv4_addr));
+ CONFIG_MSG("dhcpd_ip_start = %s\n", data);
+ }
+ else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set)) {
+ CONFIG_ERROR("dhcpd_ip_end adress setting failed\n");
+ return false;
+ }
+ memcpy(&conf->dhcpd_ip_end, &ipa_set, sizeof(struct ipv4_addr));
+ CONFIG_MSG("dhcpd_ip_end = %s\n", data);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef BCMSDIO
+bool
+dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("dhd_doflow=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ dhd_doflow = FALSE;
+ else
+ dhd_doflow = TRUE;
+ CONFIG_MSG("dhd_doflow = %d\n", dhd_doflow);
+ }
+ else if (!strncmp("dhd_slpauto=", full_param, len_param) ||
+ !strncmp("kso_enable=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ dhd_slpauto = FALSE;
+ else
+ dhd_slpauto = TRUE;
+ CONFIG_MSG("dhd_slpauto = %d\n", dhd_slpauto);
+ }
+ else if (!strncmp("use_rxchain=", full_param, len_param)) {
+ conf->use_rxchain = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("use_rxchain = %d\n", conf->use_rxchain);
+ }
+ else if (!strncmp("dhd_txminmax=", full_param, len_param)) {
+ conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhd_txminmax = %d\n", conf->dhd_txminmax);
+ }
+ else if (!strncmp("txinrx_thres=", full_param, len_param)) {
+ conf->txinrx_thres = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("txinrx_thres = %d\n", conf->txinrx_thres);
+ }
+#ifdef DYNAMIC_MAX_HDR_READ
+ else if (!strncmp("max_hdr_read=", full_param, len_param)) {
+ conf->max_hdr_read = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("max_hdr_read = %d\n", conf->max_hdr_read);
+ }
+ else if (!strncmp("dhd_firstread=", full_param, len_param)) {
+ firstread = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhd_firstread = %d\n", firstread);
+ }
+#endif
+#if defined(HW_OOB)
+ else if (!strncmp("oob_enabled_later=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->oob_enabled_later = FALSE;
+ else
+ conf->oob_enabled_later = TRUE;
+ CONFIG_MSG("oob_enabled_later = %d\n", conf->oob_enabled_later);
+ }
+#endif
+ else if (!strncmp("dpc_cpucore=", full_param, len_param)) {
+ conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dpc_cpucore = %d\n", conf->dpc_cpucore);
+ }
+ else if (!strncmp("rxf_cpucore=", full_param, len_param)) {
+ conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("rxf_cpucore = %d\n", conf->rxf_cpucore);
+ }
+ else if (!strncmp("dhd_dpc_prio=", full_param, len_param)) {
+ conf->dhd_dpc_prio = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhd_dpc_prio = %d\n", conf->dhd_dpc_prio);
+ }
+#if defined(BCMSDIOH_TXGLOM)
+ else if (!strncmp("txglomsize=", full_param, len_param)) {
+ conf->txglomsize = (uint)simple_strtol(data, NULL, 10);
+ if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)
+ conf->txglomsize = SDPCM_MAXGLOM_SIZE;
+ CONFIG_MSG("txglomsize = %d\n", conf->txglomsize);
+ }
+ else if (!strncmp("txglom_ext=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->txglom_ext = FALSE;
+ else
+ conf->txglom_ext = TRUE;
+ CONFIG_MSG("txglom_ext = %d\n", conf->txglom_ext);
+ if (conf->txglom_ext) {
+ if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID))
+ conf->txglom_bucket_size = 1680;
+ else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID)
+ conf->txglom_bucket_size = 1684;
+ }
+ CONFIG_MSG("txglom_bucket_size = %d\n", conf->txglom_bucket_size);
+ }
+ else if (!strncmp("bus:rxglom=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->bus_rxglom = FALSE;
+ else
+ conf->bus_rxglom = TRUE;
+ CONFIG_MSG("bus:rxglom = %d\n", conf->bus_rxglom);
+ }
+ else if (!strncmp("deferred_tx_len=", full_param, len_param)) {
+ conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("deferred_tx_len = %d\n", conf->deferred_tx_len);
+ }
+ else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) {
+ conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("txctl_tmo_fix = %d\n", conf->txctl_tmo_fix);
+ }
+ else if (!strncmp("tx_max_offset=", full_param, len_param)) {
+ conf->tx_max_offset = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("tx_max_offset = %d\n", conf->tx_max_offset);
+ }
+ else if (!strncmp("txglom_mode=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->txglom_mode = FALSE;
+ else
+ conf->txglom_mode = TRUE;
+ CONFIG_MSG("txglom_mode = %d\n", conf->txglom_mode);
+ }
+#if defined(SDIO_ISR_THREAD)
+ else if (!strncmp("intr_extn=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->intr_extn = FALSE;
+ else
+ conf->intr_extn = TRUE;
+ CONFIG_MSG("intr_extn = %d\n", conf->intr_extn);
+ }
+#endif
+#ifdef BCMSDIO_RXLIM_POST
+ else if (!strncmp("rxlim_en=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->rxlim_en = FALSE;
+ else
+ conf->rxlim_en = TRUE;
+ CONFIG_MSG("rxlim_en = %d\n", conf->rxlim_en);
+ }
+#endif
+#ifdef BCMSDIO_TXSEQ_SYNC
+ else if (!strncmp("txseq_sync=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->txseq_sync = FALSE;
+ else
+ conf->txseq_sync = TRUE;
+ CONFIG_MSG("txseq_sync = %d\n", conf->txseq_sync);
+ }
+#endif
+#endif
+#ifdef MINIME
+ else if (!strncmp("ramsize=", full_param, len_param)) {
+ conf->ramsize = (uint32)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("ramsize = %d\n", conf->ramsize);
+ }
+#endif
+#ifdef BCMSDIO_INTSTATUS_WAR
+ else if (!strncmp("read_intr_mode=", full_param, len_param)) {
+ conf->read_intr_mode = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("read_intr_mode = %d\n", conf->read_intr_mode);
+ }
+#endif
+ else if (!strncmp("kso_try_max=", full_param, len_param)) {
+ conf->kso_try_max = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("kso_try_max = %d\n", conf->kso_try_max);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef BCMPCIE
+bool
+dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("bus:deepsleep_disable=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->bus_deepsleep_disable = 0;
+ else
+ conf->bus_deepsleep_disable = 1;
+ CONFIG_MSG("bus:deepsleep_disable = %d\n", conf->bus_deepsleep_disable);
+ }
+ else if (!strncmp("flow_ring_queue_threshold=", full_param, len_param)) {
+ conf->flow_ring_queue_threshold = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("flow_ring_queue_threshold = %d\n", conf->flow_ring_queue_threshold);
+ }
+ else if (!strncmp("d2h_intr_control=", full_param, len_param)) {
+ conf->d2h_intr_control = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("d2h_intr_control = %d\n", conf->d2h_intr_control);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+bool
+dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("deepsleep=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->deepsleep = TRUE;
+ else
+ conf->deepsleep = FALSE;
+ CONFIG_MSG("deepsleep = %d\n", conf->deepsleep);
+ }
+ else if (!strncmp("PM=", full_param, len_param)) {
+ conf->pm = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("PM = %d\n", conf->pm);
+ }
+ else if (!strncmp("pm_in_suspend=", full_param, len_param)) {
+ conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("pm_in_suspend = %d\n", conf->pm_in_suspend);
+ }
+ else if (!strncmp("suspend_mode=", full_param, len_param)) {
+ conf->suspend_mode = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("suspend_mode = %d\n", conf->suspend_mode);
+ if (conf->suspend_mode == EARLY_SUSPEND)
+ conf->insuspend &= ~(NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND);
+ else if (conf->suspend_mode == PM_NOTIFIER ||
+ conf->suspend_mode == SUSPEND_MODE_2)
+ conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND);
+ CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);
+ }
+ else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) {
+ conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("suspend_bcn_li_dtim = %d\n", conf->suspend_bcn_li_dtim);
+ }
+ else if (!strncmp("xmit_in_suspend=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->insuspend &= ~NO_TXDATA_IN_SUSPEND;
+ else
+ conf->insuspend |= NO_TXDATA_IN_SUSPEND;
+ CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);
+ }
+ else if (!strncmp("insuspend=", full_param, len_param)) {
+ conf->insuspend = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);
+ }
+#ifdef WL_EXT_WOWL
+ else if (!strncmp("wowl=", full_param, len_param)) {
+ conf->wowl = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("wowl = 0x%x\n", conf->wowl);
+ }
+#endif
+ else if (!strncmp("rekey_offload=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->rekey_offload = TRUE;
+ else
+ conf->rekey_offload = FALSE;
+ CONFIG_MSG("rekey_offload = %d\n", conf->rekey_offload);
+ }
+ else
+ return false;
+
+ return true;
+}
+
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+int
+bcm_str2hex(const char *p, char *ea, int size)
+{
+ int i = 0;
+ char *ep;
+
+ for (;;) {
+ ea[i++] = (char) bcm_strtoul(p, &ep, 16);
+ p = ep;
+ if (!*p++ || i == size)
+ break;
+ }
+
+ return (i == size);
+}
+#endif
+
+bool
+dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ char *pch, *pick_tmp;
+ int i;
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ struct ether_addr ea_addr;
+ char macpad[56];
+#endif
+
+ if (!strncmp("dhd_poll=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->dhd_poll = 0;
+ else
+ conf->dhd_poll = 1;
+ CONFIG_MSG("dhd_poll = %d\n", conf->dhd_poll);
+ }
+ else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) {
+ dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhd_watchdog_ms = %d\n", dhd_watchdog_ms);
+ }
+ else if (!strncmp("band=", full_param, len_param)) {
+ /* Process band:
+ * band=a for 5GHz only and band=b for 2.4GHz only
+ */
+ if (!strcmp(data, "b"))
+ conf->band = WLC_BAND_2G;
+ else if (!strcmp(data, "a"))
+ conf->band = WLC_BAND_5G;
+ else
+ conf->band = WLC_BAND_AUTO;
+ CONFIG_MSG("band = %d\n", conf->band);
+ }
+ else if (!strncmp("bw_cap_2g=", full_param, len_param)) {
+ conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("bw_cap_2g = %d\n", conf->bw_cap[0]);
+ }
+ else if (!strncmp("bw_cap_5g=", full_param, len_param)) {
+ conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("bw_cap_5g = %d\n", conf->bw_cap[1]);
+ }
+ else if (!strncmp("bw_cap=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ if (pch != NULL) {
+ conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0);
+ CONFIG_MSG("bw_cap 2g = %d\n", conf->bw_cap[0]);
+ }
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ if (pch != NULL) {
+ conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0);
+ CONFIG_MSG("bw_cap 5g = %d\n", conf->bw_cap[1]);
+ }
+ }
+ else if (!strncmp("channels=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i=0;
+ while (pch != NULL && i<WL_NUMCHANNELS) {
+ conf->channels.channel[i] = (uint32)simple_strtol(pch, NULL, 10);
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i++;
+ }
+ conf->channels.count = i;
+ CONFIG_MSG("channels = ");
+ for (i=0; i<conf->channels.count; i++)
+ printk(KERN_CONT "%d ", conf->channels.channel[i]);
+ printk(KERN_CONT "\n");
+ }
+ else if (!strncmp("keep_alive_period=", full_param, len_param)) {
+ conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("keep_alive_period = %d\n", conf->keep_alive_period);
+ }
+#ifdef ARP_OFFLOAD_SUPPORT
+ else if (!strncmp("garp=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->garp = FALSE;
+ else
+ conf->garp = TRUE;
+ CONFIG_MSG("garp = %d\n", conf->garp);
+ }
+#endif
+ else if (!strncmp("srl=", full_param, len_param)) {
+ conf->srl = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("srl = %d\n", conf->srl);
+ }
+ else if (!strncmp("lrl=", full_param, len_param)) {
+ conf->lrl = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("lrl = %d\n", conf->lrl);
+ }
+ else if (!strncmp("bcn_timeout=", full_param, len_param)) {
+ conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("bcn_timeout = %d\n", conf->bcn_timeout);
+ }
+ else if (!strncmp("frameburst=", full_param, len_param)) {
+ conf->frameburst = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("frameburst = %d\n", conf->frameburst);
+ }
+ else if (!strncmp("disable_proptx=", full_param, len_param)) {
+ conf->disable_proptx = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("disable_proptx = %d\n", conf->disable_proptx);
+ }
+#ifdef DHDTCPACK_SUPPRESS
+ else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) {
+ conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("tcpack_sup_mode = %d\n", conf->tcpack_sup_mode);
+ }
+ else if (!strncmp("tcpack_sup_ratio=", full_param, len_param)) {
+ conf->tcpack_sup_ratio = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("tcpack_sup_ratio = %d\n", conf->tcpack_sup_ratio);
+ }
+ else if (!strncmp("tcpack_sup_delay=", full_param, len_param)) {
+ conf->tcpack_sup_delay = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("tcpack_sup_delay = %d\n", conf->tcpack_sup_delay);
+ }
+#endif
+ else if (!strncmp("pktprio8021x=", full_param, len_param)) {
+ conf->pktprio8021x = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("pktprio8021x = %d\n", conf->pktprio8021x);
+ }
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ else if (!strncmp("dhd_txbound=", full_param, len_param)) {
+ dhd_txbound = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhd_txbound = %d\n", dhd_txbound);
+ }
+ else if (!strncmp("dhd_rxbound=", full_param, len_param)) {
+ dhd_rxbound = (uint)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("dhd_rxbound = %d\n", dhd_rxbound);
+ }
+#endif
+ else if (!strncmp("orphan_move=", full_param, len_param)) {
+ conf->orphan_move = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("orphan_move = %d\n", conf->orphan_move);
+ }
+ else if (!strncmp("tsq=", full_param, len_param)) {
+ conf->tsq = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("tsq = %d\n", conf->tsq);
+ }
+ else if (!strncmp("ctrl_resched=", full_param, len_param)) {
+ conf->ctrl_resched = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("ctrl_resched = %d\n", conf->ctrl_resched);
+ }
+ else if (!strncmp("rxcnt_timeout=", full_param, len_param)) {
+ conf->rxcnt_timeout = (int)simple_strtol(data, NULL, 10);
+ CONFIG_MSG("rxcnt_timeout = %d\n", conf->rxcnt_timeout);
+ }
+ else if (!strncmp("in4way=", full_param, len_param)) {
+ conf->in4way = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("in4way = 0x%x\n", conf->in4way);
+ }
+ else if (!strncmp("war=", full_param, len_param)) {
+ conf->war = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("war = 0x%x\n", conf->war);
+ }
+ else if (!strncmp("wl_preinit=", full_param, len_param)) {
+ if (conf->wl_preinit) {
+ kfree(conf->wl_preinit);
+ conf->wl_preinit = NULL;
+ }
+ if (!(conf->wl_preinit = kmalloc(strlen(data)+1, GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ } else {
+ memset(conf->wl_preinit, 0, strlen(data)+1);
+ strcpy(conf->wl_preinit, data);
+ CONFIG_MSG("wl_preinit = %s\n", conf->wl_preinit);
+ }
+ }
+ else if (!strncmp("wl_suspend=", full_param, len_param)) {
+ if (conf->wl_suspend) {
+ kfree(conf->wl_suspend);
+ conf->wl_suspend = NULL;
+ }
+ if (!(conf->wl_suspend = kmalloc(strlen(data)+1, GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ } else {
+ memset(conf->wl_suspend, 0, strlen(data)+1);
+ strcpy(conf->wl_suspend, data);
+ CONFIG_MSG("wl_suspend = %s\n", conf->wl_suspend);
+ }
+ }
+ else if (!strncmp("wl_resume=", full_param, len_param)) {
+ if (conf->wl_resume) {
+ kfree(conf->wl_resume);
+ conf->wl_resume = NULL;
+ }
+ if (!(conf->wl_resume = kmalloc(strlen(data)+1, GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ } else {
+ memset(conf->wl_resume, 0, strlen(data)+1);
+ strcpy(conf->wl_resume, data);
+ CONFIG_MSG("wl_resume = %s\n", conf->wl_resume);
+ }
+ }
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ else if (!strncmp("mac=", full_param, len_param)) {
+ if (!bcm_ether_atoe(data, &ea_addr)) {
+ CONFIG_ERROR("mac adress read error");
+ return false;
+ }
+ memcpy(&conf->hw_ether, &ea_addr, ETHER_ADDR_LEN);
+ CONFIG_MSG("mac = %s\n", data);
+ }
+ else if (!strncmp("macpad=", full_param, len_param)) {
+ if (!bcm_str2hex(data, macpad, sizeof(macpad))) {
+ CONFIG_ERROR("macpad adress read error");
+ return false;
+ }
+ memcpy(&conf->hw_ether[ETHER_ADDR_LEN], macpad, sizeof(macpad));
+ if (config_msg_level & CONFIG_TRACE_LEVEL) {
+ CONFIG_MSG("macpad =\n");
+ for (i=0; i<sizeof(macpad); i++) {
+ printk(KERN_CONT "0x%02x, ", conf->hw_ether[ETHER_ADDR_LEN+i]);
+ if ((i+1)%8 == 0)
+ printk(KERN_CONT "\n");
+ }
+ }
+ }
+#endif
+#ifdef PROPTX_MAXCOUNT
+ else if (!strncmp("proptx_maxcnt_2g=", full_param, len_param)) {
+ conf->proptx_maxcnt_2g = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("proptx_maxcnt_2g = %d\n", conf->proptx_maxcnt_2g);
+ }
+ else if (!strncmp("proptx_maxcnt_5g=", full_param, len_param)) {
+ conf->proptx_maxcnt_5g = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("proptx_maxcnt_5g = %d\n", conf->proptx_maxcnt_5g);
+ }
+#endif
+#ifdef TPUT_MONITOR
+ else if (!strncmp("data_drop_mode=", full_param, len_param)) {
+ conf->data_drop_mode = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("data_drop_mode = %d\n", conf->data_drop_mode);
+ }
+ else if (!strncmp("tput_monitor_ms=", full_param, len_param)) {
+ conf->tput_monitor_ms = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("tput_monitor_ms = %d\n", conf->tput_monitor_ms);
+ }
+#ifdef BCMSDIO
+ else if (!strncmp("doflow_tput_thresh=", full_param, len_param)) {
+ conf->doflow_tput_thresh = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("doflow_tput_thresh = %d\n", conf->doflow_tput_thresh);
+ if (conf->doflow_tput_thresh > 0)
+ conf->tput_monitor_ms = 1000;
+ }
+#endif
+#endif
+#ifdef SCAN_SUPPRESS
+ else if (!strncmp("scan_intput=", full_param, len_param)) {
+ conf->scan_intput = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("scan_intput = 0x%x\n", conf->scan_intput);
+ }
+ else if (!strncmp("scan_tput_thresh=", full_param, len_param)) {
+ conf->scan_tput_thresh = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("scan_tput_thresh = %d\n", conf->scan_tput_thresh);
+ if (conf->scan_tput_thresh > 0)
+ conf->tput_monitor_ms = 1000;
+ }
+ else if (!strncmp("scan_busy_tmo=", full_param, len_param)) {
+ conf->scan_busy_tmo = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("scan_busy_tmo = %d\n", conf->scan_busy_tmo);
+ }
+ else if (!strncmp("scan_busy_thresh=", full_param, len_param)) {
+ conf->scan_busy_thresh = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("scan_busy_thresh = %d\n", conf->scan_busy_thresh);
+ }
+#endif
+#ifdef DHD_TPUT_PATCH
+ else if (!strncmp("tput_patch=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->tput_patch = TRUE;
+ else
+ conf->tput_patch = FALSE;
+ CONFIG_MSG("tput_patch = %d\n", conf->tput_patch);
+ dhd_conf_set_tput_patch(dhd);
+ }
+ else if (!strncmp("mtu=", full_param, len_param)) {
+ conf->mtu = (int)simple_strtol(data, NULL, 0);
+ CONFIG_MSG("mtu = %d\n", conf->mtu);
+ }
+ else if (!strncmp("pktsetsum=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->pktsetsum = TRUE;
+ else
+ conf->pktsetsum = FALSE;
+ CONFIG_MSG("pktsetsum = %d\n", conf->pktsetsum);
+ }
+#endif
+#ifdef SET_XPS_CPUS
+ else if (!strncmp("xps_cpus=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->xps_cpus = TRUE;
+ else
+ conf->xps_cpus = FALSE;
+ CONFIG_MSG("xps_cpus = %d\n", conf->xps_cpus);
+ }
+#endif
+#ifdef SET_RPS_CPUS
+ else if (!strncmp("rps_cpus=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->rps_cpus = TRUE;
+ else
+ conf->rps_cpus = FALSE;
+ CONFIG_MSG("rps_cpus = %d\n", conf->rps_cpus);
+ }
+#endif
+#ifdef CHECK_DOWNLOAD_FW
+ else if (!strncmp("fwchk=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->fwchk = TRUE;
+ else
+ conf->fwchk = FALSE;
+ CONFIG_MSG("fwchk = %d\n", conf->fwchk);
+ }
+#endif
+ else if (!strncmp("vndr_ie_assocreq=", full_param, len_param)) {
+ if (conf->vndr_ie_assocreq) {
+ kfree(conf->vndr_ie_assocreq);
+ conf->vndr_ie_assocreq = NULL;
+ }
+ if (!(conf->vndr_ie_assocreq = kmalloc(strlen(data)+1, GFP_KERNEL))) {
+ CONFIG_ERROR("kmalloc failed\n");
+ } else {
+ memset(conf->vndr_ie_assocreq, 0, strlen(data)+1);
+ strcpy(conf->vndr_ie_assocreq, data);
+ CONFIG_MSG("vndr_ie_assocreq = %s\n", conf->vndr_ie_assocreq);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+int
+dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path)
+{
+ int bcmerror = -1, chip_match = -1;
+ uint len = 0, start_pos=0, end_pos=0;
+ void *image = NULL;
+ char *memblock = NULL;
+ char *bufp, *pick = NULL, *pch;
+ bool conf_file_exists;
+ uint len_param;
+
+ conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0'));
+ if (!conf_file_exists) {
+ CONFIG_MSG("config path %s\n", conf_path);
+ return (0);
+ }
+
+ if (conf_file_exists) {
+ image = dhd_os_open_image1(dhd, conf_path);
+ if (image == NULL) {
+ CONFIG_MSG("Ignore config file %s\n", conf_path);
+ goto err;
+ }
+ }
+
+ memblock = MALLOC(dhd->osh, MAXSZ_CONFIG);
+ if (memblock == NULL) {
+ CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_CONFIG);
+ goto err;
+ }
+
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);
+ if (!pick) {
+ CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_BUF);
+ goto err;
+ }
+
+ /* Read variables */
+ if (conf_file_exists) {
+ len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image);
+ }
+ if (len > 0 && len < MAXSZ_CONFIG) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+
+ while (start_pos < len) {
+ memset(pick, 0, MAXSZ_BUF);
+ end_pos = pick_config_vars(bufp, len, start_pos, pick, MAXSZ_BUF);
+ if (end_pos - start_pos >= MAXSZ_BUF)
+ CONFIG_ERROR("out of buf to read MAXSIZ_BUF=%d\n", MAXSZ_BUF);
+ start_pos = end_pos;
+ pch = strchr(pick, '=');
+ if (pch != NULL) {
+ len_param = pch-pick+1;
+ if (len_param == strlen(pick)) {
+ CONFIG_ERROR("not a right parameter %s\n", pick);
+ continue;
+ }
+ } else {
+ CONFIG_ERROR("not a right parameter %s\n", pick);
+ continue;
+ }
+
+ dhd_conf_read_chiprev(dhd, &chip_match, pick, len_param);
+ if (!chip_match)
+ continue;
+
+ if (dhd_conf_read_log_level(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_roam_params(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param))
+ continue;
+#ifdef SET_FWNV_BY_MAC
+ else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param))
+ continue;
+#endif
+ else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_country(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_mchan_params(dhd, pick, len_param))
+ continue;
+#ifdef PKT_FILTER_SUPPORT
+ else if (dhd_conf_read_pkt_filter(dhd, pick, len_param))
+ continue;
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ISAM_PREINIT
+ else if (dhd_conf_read_isam(dhd, pick, len_param))
+ continue;
+#endif /* ISAM_PREINIT */
+#ifdef IDHCP
+ else if (dhd_conf_read_dhcp_params(dhd, pick, len_param))
+ continue;
+#endif /* IDHCP */
+#ifdef BCMSDIO
+ else if (dhd_conf_read_sdio_params(dhd, pick, len_param))
+ continue;
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+ else if (dhd_conf_read_pcie_params(dhd, pick, len_param))
+ continue;
+#endif /* BCMPCIE */
+ else if (dhd_conf_read_pm_params(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_others(dhd, pick, len_param))
+ continue;
+ else
+ continue;
+ }
+
+ bcmerror = 0;
+ } else {
+ CONFIG_ERROR("error reading config file: %d\n", len);
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (pick)
+ MFREE(dhd->osh, pick, MAXSZ_BUF);
+
+ if (memblock)
+ MFREE(dhd->osh, memblock, MAXSZ_CONFIG);
+
+ if (image)
+ dhd_os_close_image1(dhd, image);
+
+ return bcmerror;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+void
+dhd_conf_set_devid(dhd_pub_t *dhd)
+{
+ wifi_adapter_info_t *adapter = NULL;
+ uint32 bus_type = -1;
+ uint32 bus_num = -1;
+ uint32 slot_num = -1;
+
+ dhd_bus_get_ids(dhd->bus, &bus_type, &bus_num, &slot_num);
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+ if (adapter) {
+#if defined(BCMSDIO)
+ dhd->conf->devid = adapter->sdio_func->device;
+#endif
+#if defined(BCMPCIE)
+ dhd->conf->devid = adapter->pci_dev->device;
+ dhd->conf->svid = adapter->pci_dev->subsystem_vendor;
+ dhd->conf->ssid = adapter->pci_dev->subsystem_device;
+#endif
+ } else {
+ CONFIG_ERROR("can't find adapter\n");
+ }
+
+ return;
+}
+#endif
+
+int
+dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)
+{
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ dhd_conf_set_devid(dhd);
+#endif
+ dhd->conf->chip = chip;
+ dhd->conf->chiprev = chiprev;
+
+#if defined(BCMSDIO)
+ CONFIG_MSG("devid=0x%x, chip=0x%x, chiprev=%d\n",
+ dhd->conf->devid, dhd->conf->chip, dhd->conf->chiprev);
+#endif
+#if defined(BCMPCIE)
+ CONFIG_MSG("devid=0x%x, chip=0x%x, chiprev=%d, svid=0x%04x, ssid=0x%04x\n",
+ dhd->conf->devid, dhd->conf->chip, dhd->conf->chiprev,
+ dhd->conf->svid, dhd->conf->ssid);
+#endif
+#if defined(BCMDBUS)
+ CONFIG_MSG("chip=0x%x, chiprev=%d\n", dhd->conf->chip, dhd->conf->chiprev);
+#endif
+
+ return 0;
+}
+
+uint
+dhd_conf_get_chip(void *context)
+{
+ dhd_pub_t *dhd = context;
+
+ if (dhd && dhd->conf)
+ return dhd->conf->chip;
+ return 0;
+}
+
+uint
+dhd_conf_get_chiprev(void *context)
+{
+ dhd_pub_t *dhd = context;
+
+ if (dhd && dhd->conf)
+ return dhd->conf->chiprev;
+ return 0;
+}
+
+#ifdef BCMSDIO
+void
+dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ if (enable) {
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;
+ }
+#endif
+ // other parameters set in preinit or config.txt
+ if (conf->txglom_ext)
+ CONFIG_MSG("txglom_ext=%d, txglom_bucket_size=%d\n",
+ conf->txglom_ext, conf->txglom_bucket_size);
+ CONFIG_MSG("txglom_mode=%s\n",
+ conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");
+ CONFIG_MSG("txglomsize=%d, deferred_tx_len=%d\n",
+ conf->txglomsize, conf->deferred_tx_len);
+ CONFIG_MSG("txinrx_thres=%d, dhd_txminmax=%d\n",
+ conf->txinrx_thres, conf->dhd_txminmax);
+ CONFIG_MSG("tx_max_offset=%d, txctl_tmo_fix=%d\n",
+ conf->tx_max_offset, conf->txctl_tmo_fix);
+ } else {
+ // clear txglom parameters
+ conf->txglom_ext = FALSE;
+ conf->txglom_bucket_size = 0;
+ conf->txglomsize = 0;
+ conf->deferred_tx_len = 0;
+ }
+
+}
+#endif
+
+#ifdef UPDATE_MODULE_NAME
+#if defined(BCMSDIO) || defined(BCMPCIE)
+static void
+dhd_conf_compat_vht(dhd_pub_t *dhd)
+{
+ char vht_features[] = "vht_features=0";
+
+ CONFIG_TRACE("Enter\n");
+
+ dhd_conf_set_wl_cmd(dhd, vht_features, TRUE);
+}
+#endif
+
+int
+dhd_conf_compat_func(dhd_pub_t *dhd)
+{
+ const module_name_map_t* row = NULL;
+
+ row = dhd_conf_match_module(dhd);
+ if (row && row->compat_func) {
+ row->compat_func(dhd);
+ }
+
+ return 0;
+}
+#endif
+
+void
+dhd_conf_postinit_ioctls(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char wl_preinit[] = "assoc_retry_max=10";
+#ifdef NO_POWER_SAVE
+ char wl_no_power_save[] = "mpc=0, 86=0";
+ dhd_conf_set_wl_cmd(dhd, wl_no_power_save, FALSE);
+#endif
+
+ dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE);
+ dhd_conf_map_country_list(dhd, &conf->cspec);
+ dhd_conf_set_country(dhd, &conf->cspec);
+ dhd_conf_fix_country(dhd);
+ dhd_conf_get_country(dhd, &dhd->dhd_cspec);
+
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_PM, "WLC_SET_PM", conf->pm, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE);
+ dhd_conf_set_bw_cap(dhd);
+ dhd_conf_set_roam(dhd);
+
+#if defined(BCMPCIE)
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "bus:deepsleep_disable",
+ conf->bus_deepsleep_disable, 0, FALSE);
+#endif /* defined(BCMPCIE) */
+
+#ifdef IDHCP
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable,
+ 0, FALSE);
+ if (conf->dhcpd_enable >= 0) {
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_addr",
+ (char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_mask",
+ (char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_start",
+ (char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE);
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_end",
+ (char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE);
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "dhcpd_enable",
+ conf->dhcpd_enable, 0, FALSE);
+ }
+#endif
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG",
+ conf->frameburst, 0, FALSE);
+
+ dhd_conf_set_wl_cmd(dhd, wl_preinit, TRUE);
+#if defined(BCMSDIO)
+ if (conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID ||
+ conf->chip == BCM4375_CHIP_ID) {
+ char ampdu_mpdu[] = "ampdu_mpdu=32";
+ dhd_conf_set_wl_cmd(dhd, ampdu_mpdu, TRUE);
+ } else {
+ char ampdu_mpdu[] = "ampdu_mpdu=16";
+ dhd_conf_set_wl_cmd(dhd, ampdu_mpdu, TRUE);
+ }
+#endif
+
+#ifdef DHD_TPUT_PATCH
+ if (dhd->conf->mtu)
+ dhd_change_mtu(dhd, dhd->conf->mtu, 0);
+#endif
+ if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||
+ conf->chip == BCM4371_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||
+ conf->chip == BCM43569_CHIP_ID ||
+ conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID ||
+ conf->chip == BCM4375_CHIP_ID) {
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "txbf", 1, 0, FALSE);
+ }
+ if (conf->chip == BCM4375_CHIP_ID) {
+ char he_cmd[] = "110=1, nmode=1, vhtmode=1, he=enab 1";
+ dhd_conf_set_wl_cmd(dhd, he_cmd, TRUE);
+ }
+ if (conf->chip == BCM43752_CHIP_ID || conf->chip == BCM4359_CHIP_ID) {
+ char txack_alive[] = "txack_alive=0";
+ dhd_conf_set_wl_cmd(dhd, txack_alive, TRUE);
+ }
+#if defined(WLEASYMESH)
+ if (conf->fw_type == FW_TYPE_EZMESH) {
+ if (conf->chip == BCM4359_CHIP_ID) {
+ char ezmesh[] = "mbss=1, rsdb_mode=0";
+ dhd_conf_set_wl_cmd(dhd, ezmesh, TRUE);
+ } else {
+ char ezmesh[] = "mbss=1";
+ dhd_conf_set_wl_cmd(dhd, ezmesh, TRUE);
+ }
+ }
+#endif /* WLEASYMESH */
+#if defined(BCMSDIO)
+ if (conf->devid == BCM43751_CHIP_ID)
+#elif defined(BCMPCIE)
+ if (conf->devid == BCM43751_D11AX_ID)
+#endif
+ {
+ if (FW_SUPPORTED(dhd, mbo)) {
+ char he_features[] = "he=enab 0,he=features 0";
+ dhd_conf_set_wl_cmd(dhd, he_features, TRUE);
+ }
+ }
+#ifdef UPDATE_MODULE_NAME
+ dhd_conf_compat_func(dhd);
+#endif
+#ifndef SUPPORT_RANDOM_MAC_SCAN
+ {
+ char scanmac[] = "scanmac=enable 0";
+ dhd_conf_set_wl_cmd(dhd, scanmac, TRUE);
+ }
+#endif
+ dhd_conf_set_wl_cmd(dhd, conf->wl_preinit, TRUE);
+
+#ifndef WL_CFG80211
+ dhd_conf_set_intiovar(dhd, 0, WLC_UP, "WLC_UP", 0, 0, FALSE);
+#endif
+
+}
+
+int
+dhd_conf_preinit(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ CONFIG_TRACE("Enter\n");
+
+#ifdef SET_FWNV_BY_MAC
+ dhd_conf_free_mac_list(&conf->fw_by_mac);
+ dhd_conf_free_mac_list(&conf->nv_by_mac);
+#endif
+ dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);
+ dhd_conf_free_country_list(conf);
+ dhd_conf_free_mchan_list(conf);
+#ifdef PKT_FILTER_SUPPORT
+ if (conf->magic_pkt_filter_add) {
+ kfree(conf->magic_pkt_filter_add);
+ conf->magic_pkt_filter_add = NULL;
+ }
+#endif
+ if (conf->wl_preinit) {
+ kfree(conf->wl_preinit);
+ conf->wl_preinit = NULL;
+ }
+ if (conf->wl_suspend) {
+ kfree(conf->wl_suspend);
+ conf->wl_suspend = NULL;
+ }
+ if (conf->wl_resume) {
+ kfree(conf->wl_resume);
+ conf->wl_resume = NULL;
+ }
+ if (conf->vndr_ie_assocreq) {
+ kfree(conf->vndr_ie_assocreq);
+ conf->vndr_ie_assocreq = NULL;
+ }
+ conf->band = -1;
+ memset(&conf->bw_cap, -1, sizeof(conf->bw_cap));
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {
+ strcpy(conf->cspec.country_abbrev, "ALL");
+ strcpy(conf->cspec.ccode, "ALL");
+ conf->cspec.rev = 0;
+ } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||
+ conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||
+ conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||
+ conf->chip == BCM4375_CHIP_ID) {
+ strcpy(conf->cspec.country_abbrev, "CN");
+ strcpy(conf->cspec.ccode, "CN");
+ conf->cspec.rev = 38;
+ } else {
+ strcpy(conf->cspec.country_abbrev, "CN");
+ strcpy(conf->cspec.ccode, "CN");
+ conf->cspec.rev = 0;
+ }
+ memset(&conf->channels, 0, sizeof(wl_channel_list_t));
+ conf->roam_off = 1;
+ conf->roam_off_suspend = 1;
+ conf->roam_trigger[0] = -65;
+ conf->roam_trigger[1] = WLC_BAND_ALL;
+ conf->roam_scan_period[0] = 10;
+ conf->roam_scan_period[1] = WLC_BAND_ALL;
+ conf->roam_delta[0] = 10;
+ conf->roam_delta[1] = WLC_BAND_ALL;
+ conf->fullroamperiod = 20;
+ conf->keep_alive_period = 30000;
+#ifdef ARP_OFFLOAD_SUPPORT
+ conf->garp = FALSE;
+#endif
+ conf->force_wme_ac = 0;
+ memset(&conf->wme_sta, 0, sizeof(wme_param_t));
+ memset(&conf->wme_ap, 0, sizeof(wme_param_t));
+#ifdef PKT_FILTER_SUPPORT
+ memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));
+ memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));
+#endif
+ conf->srl = -1;
+ conf->lrl = -1;
+ conf->bcn_timeout = 16;
+ conf->disable_proptx = -1;
+ conf->dhd_poll = -1;
+#ifdef BCMSDIO
+ conf->use_rxchain = 0;
+ conf->bus_rxglom = TRUE;
+ conf->txglom_ext = FALSE;
+ conf->tx_max_offset = 0;
+ conf->txglomsize = SDPCM_DEFGLOM_SIZE;
+ conf->txctl_tmo_fix = 300;
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;
+ conf->deferred_tx_len = 0;
+ conf->dhd_txminmax = 1;
+ conf->txinrx_thres = -1;
+#ifdef MINIME
+ conf->ramsize = 0x80000;
+#endif
+#if defined(SDIO_ISR_THREAD)
+ conf->intr_extn = FALSE;
+#endif
+#ifdef BCMSDIO_RXLIM_POST
+ conf->rxlim_en = FALSE;
+#endif
+#ifdef BCMSDIO_TXSEQ_SYNC
+ conf->txseq_sync = FALSE;
+#endif
+#if defined(HW_OOB)
+ conf->oob_enabled_later = FALSE;
+#endif
+#ifdef BCMSDIO_INTSTATUS_WAR
+ conf->read_intr_mode = 0;
+#endif
+ conf->kso_try_max = 0;
+#ifdef KSO_DEBUG
+ memset(&conf->kso_try_array, 0, sizeof(conf->kso_try_array));
+#endif
+#endif
+#ifdef BCMPCIE
+ conf->bus_deepsleep_disable = 1;
+ conf->flow_ring_queue_threshold = FLOW_RING_QUEUE_THRESHOLD;
+ conf->d2h_intr_method = -1;
+ conf->d2h_intr_control = -1;
+#endif
+ conf->dpc_cpucore = -1;
+ conf->rxf_cpucore = -1;
+ conf->dhd_dpc_prio = -1;
+ conf->frameburst = -1;
+ conf->deepsleep = FALSE;
+ conf->pm = -1;
+ conf->pm_in_suspend = -1;
+ conf->insuspend = 0;
+ conf->suspend_mode = PM_NOTIFIER;
+ conf->suspend_bcn_li_dtim = -1;
+ conf->rekey_offload = FALSE;
+#ifdef WL_EXT_WOWL
+ dhd_master_mode = TRUE;
+ conf->wowl = WL_WOWL_NET|WL_WOWL_DIS|WL_WOWL_BCN;
+ conf->insuspend |= (WOWL_IN_SUSPEND | NO_TXDATA_IN_SUSPEND);
+#endif
+ if (conf->suspend_mode == PM_NOTIFIER || conf->suspend_mode == SUSPEND_MODE_2)
+ conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND);
+ conf->suspended = FALSE;
+ memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN);
+#ifdef SUSPEND_EVENT
+ memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask));
+ conf->wlfc = FALSE;
+#endif
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ memset(&conf->hw_ether, 0, sizeof(conf->hw_ether));
+#endif
+#ifdef IDHCP
+ conf->dhcpc_enable = -1;
+ conf->dhcpd_enable = -1;
+#endif
+ conf->orphan_move = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ conf->tsq = 10;
+#else
+ conf->tsq = 0;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMPCIE
+ conf->tcpack_sup_mode = TCPACK_SUP_HOLD;
+#else
+ conf->tcpack_sup_mode = TCPACK_SUP_OFF;
+#endif
+ conf->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO;
+ conf->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME;
+#endif
+ conf->pktprio8021x = -1;
+ conf->ctrl_resched = 2;
+ conf->rxcnt_timeout = 3;
+ conf->in4way = STA_NO_SCAN_IN4WAY | STA_WAIT_DISCONNECTED |
+ AP_WAIT_STA_RECONNECT;
+ if (conf->chip == BCM43752_CHIP_ID)
+ conf->war = SET_CHAN_INCONN | FW_REINIT_INCSA | FW_REINIT_EMPTY_SCAN;
+ else
+ conf->war = 0;
+#ifdef P2P_AP_CONCURRENT
+ conf->war |= P2P_AP_MAC_CONFLICT;
+#endif
+#ifdef PROPTX_MAXCOUNT
+ conf->proptx_maxcnt_2g = 46;
+ conf->proptx_maxcnt_5g = WL_TXSTATUS_FREERUNCTR_MASK;
+#endif /* DYNAMIC_PROPTX_MAXCOUNT */
+#ifdef TPUT_MONITOR
+ conf->data_drop_mode = NO_DATA_DROP;
+ conf->tput_monitor_ms = 0;
+#ifdef BCMSDIO
+ if (conf->chip == BCM43752_CHIP_ID || conf->chip == BCM4375_CHIP_ID)
+ conf->doflow_tput_thresh = 200;
+ else
+ conf->doflow_tput_thresh = 9999;
+ if (conf->doflow_tput_thresh > 0 && conf->doflow_tput_thresh < 9999)
+ conf->tput_monitor_ms = 1000;
+#endif
+#endif
+#ifdef SCAN_SUPPRESS
+ conf->scan_intput = SCAN_CURCHAN_INTPUT;
+ conf->scan_busy_thresh = 10;
+ conf->scan_busy_tmo = 120;
+ if (conf->chip == BCM43752_CHIP_ID || conf->chip == BCM4375_CHIP_ID)
+ conf->scan_tput_thresh = 100;
+ else
+ conf->scan_tput_thresh = 9999;
+ if (conf->scan_tput_thresh > 0 && conf->scan_tput_thresh < 9999)
+ conf->tput_monitor_ms = 1000;
+#endif
+#ifdef DHD_TPUT_PATCH
+ conf->tput_patch = FALSE;
+ conf->mtu = 0;
+ conf->pktsetsum = FALSE;
+#endif
+#ifdef SET_XPS_CPUS
+ conf->xps_cpus = FALSE;
+#endif
+#ifdef SET_RPS_CPUS
+ conf->rps_cpus = FALSE;
+#endif
+#ifdef CHECK_DOWNLOAD_FW
+ conf->fwchk = FALSE;
+#endif
+#ifdef ISAM_PREINIT
+ memset(conf->isam_init, 0, sizeof(conf->isam_init));
+ memset(conf->isam_config, 0, sizeof(conf->isam_config));
+ memset(conf->isam_enable, 0, sizeof(conf->isam_enable));
+#endif
+#if defined(SDIO_ISR_THREAD)
+ if (conf->chip == BCM43012_CHIP_ID ||
+ conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||
+ conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID ||
+ conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||
+ conf->chip == BCM4359_CHIP_ID ||
+ conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID ||
+ conf->chip == BCM4375_CHIP_ID) {
+ conf->intr_extn = TRUE;
+ }
+#endif
+ if ((conf->chip == BCM43430_CHIP_ID && conf->chiprev == 2) ||
+ conf->chip == BCM43012_CHIP_ID ||
+ conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||
+ conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID ||
+ conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||
+ conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||
+ conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID ||
+ conf->chip == BCM4375_CHIP_ID) {
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMSDIO
+ conf->tcpack_sup_mode = TCPACK_SUP_REPLACE;
+#endif
+#endif
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ dhd_rxbound = 128;
+ dhd_txbound = 64;
+#endif
+ conf->frameburst = 1;
+#ifdef BCMSDIO
+ conf->dhd_txminmax = -1;
+ conf->txinrx_thres = 128;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ conf->orphan_move = 1;
+#else
+ conf->orphan_move = 0;
+#endif
+ }
+#ifdef DHD_TPUT_PATCH
+ if (conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID ||
+ conf->chip == BCM4375_CHIP_ID) {
+ conf->tput_patch = TRUE;
+ dhd_conf_set_tput_patch(dhd);
+ }
+#endif
+
+#ifdef BCMSDIO
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ conf->txglom_ext = TRUE;
+ } else {
+ conf->txglom_ext = FALSE;
+ }
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {
+ conf->txglom_bucket_size = 1680; // fixed value, don't change
+ conf->txglomsize = 6;
+ }
+ if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||
+ conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ conf->txglom_bucket_size = 1684; // fixed value, don't change
+ conf->txglomsize = 16;
+ }
+#endif
+ if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)
+ conf->txglomsize = SDPCM_MAXGLOM_SIZE;
+#endif
+ init_waitqueue_head(&conf->event_complete);
+
+#if defined(BCMSDIO) && defined(CUSTOMER_HW_AMLOGIC)
+ if (conf->chip != BCM43752_CHIP_ID) {
+ dhd_slpauto = FALSE;
+ }
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;
+ conf->rekey_offload = TRUE;
+#endif
+
+ return 0;
+}
+
+int
+dhd_conf_reset(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+#ifdef SET_FWNV_BY_MAC
+ dhd_conf_free_mac_list(&conf->fw_by_mac);
+ dhd_conf_free_mac_list(&conf->nv_by_mac);
+#endif
+ dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);
+ dhd_conf_free_country_list(conf);
+ dhd_conf_free_mchan_list(conf);
+#ifdef PKT_FILTER_SUPPORT
+ if (conf->magic_pkt_filter_add) {
+ kfree(conf->magic_pkt_filter_add);
+ conf->magic_pkt_filter_add = NULL;
+ }
+#endif
+ if (conf->wl_preinit) {
+ kfree(conf->wl_preinit);
+ conf->wl_preinit = NULL;
+ }
+ if (conf->wl_suspend) {
+ kfree(conf->wl_suspend);
+ conf->wl_suspend = NULL;
+ }
+ if (conf->wl_resume) {
+ kfree(conf->wl_resume);
+ conf->wl_resume = NULL;
+ }
+ if (conf->vndr_ie_assocreq) {
+ kfree(conf->vndr_ie_assocreq);
+ conf->vndr_ie_assocreq = NULL;
+ }
+ memset(conf, 0, sizeof(dhd_conf_t));
+ return 0;
+}
+
+int
+dhd_conf_attach(dhd_pub_t *dhd)
+{
+ dhd_conf_t *conf;
+
+ CONFIG_TRACE("Enter\n");
+
+ if (dhd->conf != NULL) {
+ CONFIG_MSG("config is attached before!\n");
+ return 0;
+ }
+ /* Allocate private bus interface state */
+ if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) {
+ CONFIG_ERROR("MALLOC failed\n");
+ goto fail;
+ }
+ memset(conf, 0, sizeof(dhd_conf_t));
+
+ dhd->conf = conf;
+
+ return 0;
+
+fail:
+ if (conf != NULL)
+ MFREE(dhd->osh, conf, sizeof(dhd_conf_t));
+ return BCME_NOMEM;
+}
+
+void
+dhd_conf_detach(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ CONFIG_TRACE("Enter\n");
+ if (dhd->conf) {
+#ifdef SET_FWNV_BY_MAC
+ dhd_conf_free_mac_list(&conf->fw_by_mac);
+ dhd_conf_free_mac_list(&conf->nv_by_mac);
+#endif
+ dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);
+ dhd_conf_free_country_list(conf);
+ dhd_conf_free_mchan_list(conf);
+#ifdef PKT_FILTER_SUPPORT
+ if (conf->magic_pkt_filter_add) {
+ kfree(conf->magic_pkt_filter_add);
+ conf->magic_pkt_filter_add = NULL;
+ }
+#endif
+ if (conf->wl_preinit) {
+ kfree(conf->wl_preinit);
+ conf->wl_preinit = NULL;
+ }
+ if (conf->wl_suspend) {
+ kfree(conf->wl_suspend);
+ conf->wl_suspend = NULL;
+ }
+ if (conf->wl_resume) {
+ kfree(conf->wl_resume);
+ conf->wl_resume = NULL;
+ }
+ if (conf->vndr_ie_assocreq) {
+ kfree(conf->vndr_ie_assocreq);
+ conf->vndr_ie_assocreq = NULL;
+ }
+ MFREE(dhd->osh, conf, sizeof(dhd_conf_t));
+ }
+ dhd->conf = NULL;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_config.h b/bcmdhd.101.10.361.x/dhd_config.h
new file mode 100755
index 0000000..306c2b8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_config.h
@@ -0,0 +1,441 @@
+
+#ifndef _dhd_config_
+#define _dhd_config_
+
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <wlioctl.h>
+#include <802.11.h>
+
+#define FW_TYPE_STA 0
+#define FW_TYPE_APSTA 1
+#define FW_TYPE_P2P 2
+#define FW_TYPE_MESH 3
+#define FW_TYPE_EZMESH 4
+#define FW_TYPE_ES 5
+#define FW_TYPE_MFG 6
+#define FW_TYPE_MINIME 7
+#define FW_TYPE_G 0
+#define FW_TYPE_AG 1
+
+#define FW_PATH_AUTO_SELECT 1
+#define CONFIG_PATH_AUTO_SELECT
+extern char firmware_path[MOD_PARAM_PATHLEN];
+#if defined(BCMSDIO) || defined(BCMPCIE)
+extern uint dhd_rxbound;
+extern uint dhd_txbound;
+#endif
+#ifdef BCMSDIO
+#define TXGLOM_RECV_OFFSET 8
+extern uint dhd_doflow;
+extern uint dhd_slpauto;
+#endif
+
+#ifdef SET_FWNV_BY_MAC
+typedef struct wl_mac_range {
+ uint32 oui;
+ uint32 nic_start;
+ uint32 nic_end;
+} wl_mac_range_t;
+
+typedef struct wl_mac_list {
+ int count;
+ wl_mac_range_t *mac;
+ char name[MOD_PARAM_PATHLEN];
+} wl_mac_list_t;
+
+typedef struct wl_mac_list_ctrl {
+ int count;
+ struct wl_mac_list *m_mac_list_head;
+} wl_mac_list_ctrl_t;
+#endif
+
+typedef struct wl_chip_nv_path {
+ uint chip;
+ uint chiprev;
+ char name[MOD_PARAM_PATHLEN];
+} wl_chip_nv_path_t;
+
+typedef struct wl_chip_nv_path_list_ctrl {
+ int count;
+ struct wl_chip_nv_path *m_chip_nv_path_head;
+} wl_chip_nv_path_list_ctrl_t;
+
+typedef struct wl_channel_list {
+ uint32 count;
+ uint32 channel[WL_NUMCHANNELS];
+} wl_channel_list_t;
+
+typedef struct wmes_param {
+ int aifsn[AC_COUNT];
+ int ecwmin[AC_COUNT];
+ int ecwmax[AC_COUNT];
+ int txop[AC_COUNT];
+} wme_param_t;
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_CONF_FILTER_MAX 8
+#define PKT_FILTER_LEN 300
+#define MAGIC_PKT_FILTER_LEN 450
+typedef struct conf_pkt_filter_add {
+ uint32 count;
+ char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN];
+} conf_pkt_filter_add_t;
+
+typedef struct conf_pkt_filter_del {
+ uint32 count;
+ uint32 id[DHD_CONF_FILTER_MAX];
+} conf_pkt_filter_del_t;
+#endif
+
+#define CONFIG_COUNTRY_LIST_SIZE 500
+typedef struct country_list {
+ struct country_list *next;
+ wl_country_t cspec;
+} country_list_t;
+
+/* mchan_params */
+#define MCHAN_MAX_NUM 4
+#define MIRACAST_SOURCE 1
+#define MIRACAST_SINK 2
+typedef struct mchan_params {
+ struct mchan_params *next;
+ int bw;
+ int p2p_mode;
+ int miracast_mode;
+} mchan_params_t;
+
+#ifdef SCAN_SUPPRESS
+enum scan_intput_flags {
+ NO_SCAN_INTPUT = (1 << (0)),
+ SCAN_CURCHAN_INTPUT = (1 << (1)),
+ SCAN_LIGHT_INTPUT = (1 << (2)),
+};
+#endif
+
+enum war_flags {
+ SET_CHAN_INCONN = (1 << (0)),
+ FW_REINIT_INCSA = (1 << (1)),
+ FW_REINIT_EMPTY_SCAN = (1 << (2)),
+ P2P_AP_MAC_CONFLICT = (1 << (3)),
+ RESEND_EAPOL_PKT = (1 << (4))
+};
+
+enum in4way_flags {
+ STA_NO_SCAN_IN4WAY = (1 << (0)),
+ STA_NO_BTC_IN4WAY = (1 << (1)),
+ STA_WAIT_DISCONNECTED = (1 << (2)),
+ AP_WAIT_STA_RECONNECT = (1 << (3)),
+ STA_FAKE_SCAN_IN_CONNECT = (1 << (4)),
+ STA_REASSOC_RETRY = (1 << (5)),
+};
+
+enum in_suspend_flags {
+ NO_EVENT_IN_SUSPEND = (1 << (0)),
+ NO_TXDATA_IN_SUSPEND = (1 << (1)),
+ NO_TXCTL_IN_SUSPEND = (1 << (2)),
+ AP_DOWN_IN_SUSPEND = (1 << (3)),
+ ROAM_OFFLOAD_IN_SUSPEND = (1 << (4)),
+ AP_FILTER_IN_SUSPEND = (1 << (5)),
+ WOWL_IN_SUSPEND = (1 << (6)),
+ ALL_IN_SUSPEND = 0xFFFFFFFF,
+};
+
+enum in_suspend_mode {
+ EARLY_SUSPEND = 0,
+ PM_NOTIFIER = 1,
+ SUSPEND_MODE_2 = 2
+};
+
+#ifdef TPUT_MONITOR
+enum data_drop_mode {
+ NO_DATA_DROP = -1,
+ FW_DROP = 0,
+ TXPKT_DROP = 1,
+ XMIT_DROP = 2
+};
+#endif
+
+enum conn_state {
+ CONN_STATE_IDLE = 0,
+ CONN_STATE_CONNECTING = 1,
+ CONN_STATE_AUTH_SAE_M1 = 2,
+ CONN_STATE_AUTH_SAE_M2 = 3,
+ CONN_STATE_AUTH_SAE_M3 = 4,
+ CONN_STATE_AUTH_SAE_M4 = 5,
+ CONN_STATE_REQID = 6,
+ CONN_STATE_RSPID = 7,
+ CONN_STATE_WSC_START = 8,
+ CONN_STATE_WPS_M1 = 9,
+ CONN_STATE_WPS_M2 = 10,
+ CONN_STATE_WPS_M3 = 11,
+ CONN_STATE_WPS_M4 = 12,
+ CONN_STATE_WPS_M5 = 13,
+ CONN_STATE_WPS_M6 = 14,
+ CONN_STATE_WPS_M7 = 15,
+ CONN_STATE_WPS_M8 = 16,
+ CONN_STATE_WSC_DONE = 17,
+ CONN_STATE_4WAY_M1 = 18,
+ CONN_STATE_4WAY_M2 = 19,
+ CONN_STATE_4WAY_M3 = 20,
+ CONN_STATE_4WAY_M4 = 21,
+ CONN_STATE_CONNECTED = 22,
+ CONN_STATE_GROUPKEY_M1 = 23,
+ CONN_STATE_GROUPKEY_M2 = 24,
+};
+
+typedef struct dhd_conf {
+ uint devid;
+ uint chip;
+ uint chiprev;
+#if defined(BCMPCIE)
+ uint svid;
+ uint ssid;
+#endif
+#ifdef GET_OTP_MODULE_NAME
+ char module_name[16];
+#endif
+ struct ether_addr otp_mac;
+ int fw_type;
+#ifdef SET_FWNV_BY_MAC
+ wl_mac_list_ctrl_t fw_by_mac;
+ wl_mac_list_ctrl_t nv_by_mac;
+#endif
+ wl_chip_nv_path_list_ctrl_t nv_by_chip;
+ country_list_t *country_head;
+ int band;
+ int bw_cap[2];
+ wl_country_t cspec;
+ wl_channel_list_t channels;
+ uint roam_off;
+ uint roam_off_suspend;
+ int roam_trigger[2];
+ int roam_scan_period[2];
+ int roam_delta[2];
+ int fullroamperiod;
+ uint keep_alive_period;
+ bool rekey_offload;
+#ifdef ARP_OFFLOAD_SUPPORT
+ bool garp;
+#endif
+ int force_wme_ac;
+ wme_param_t wme_sta;
+ wme_param_t wme_ap;
+#ifdef PKT_FILTER_SUPPORT
+ conf_pkt_filter_add_t pkt_filter_add;
+ conf_pkt_filter_del_t pkt_filter_del;
+ char *magic_pkt_filter_add;
+#endif
+ int srl;
+ int lrl;
+ uint bcn_timeout;
+ int disable_proptx;
+ int dhd_poll;
+#ifdef BCMSDIO
+ int use_rxchain;
+ bool bus_rxglom;
+ bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */
+ /* terence 20161011:
+ 1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing
+ 2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported,
+ but not happened in sw txglom
+ */
+ int tx_max_offset;
+ uint txglomsize;
+ int txctl_tmo_fix;
+ bool txglom_mode;
+ uint deferred_tx_len;
+ /*txglom_bucket_size:
+ * 43362/4330: 1680
+ * 43340/43341/43241: 1684
+ */
+ int txglom_bucket_size;
+ int txinrx_thres;
+ int dhd_txminmax; // -1=DATABUFCNT(bus)
+#ifdef DYNAMIC_MAX_HDR_READ
+ int max_hdr_read;
+#endif
+ bool oob_enabled_later;
+#ifdef MINIME
+ uint32 ramsize;
+#endif
+#if defined(SDIO_ISR_THREAD)
+ bool intr_extn;
+#endif
+#ifdef BCMSDIO_RXLIM_POST
+ bool rxlim_en;
+#endif
+#ifdef BCMSDIO_TXSEQ_SYNC
+ bool txseq_sync;
+#endif
+#ifdef BCMSDIO_INTSTATUS_WAR
+ uint read_intr_mode;
+#endif
+ int kso_try_max;
+#ifdef KSO_DEBUG
+ uint kso_try_array[10];
+#endif
+#endif
+#ifdef BCMPCIE
+ int bus_deepsleep_disable;
+ int flow_ring_queue_threshold;
+ int d2h_intr_method;
+ int d2h_intr_control;
+#endif
+ int dpc_cpucore;
+ int rxf_cpucore;
+ int dhd_dpc_prio;
+ int frameburst;
+ bool deepsleep;
+ int pm;
+ int pm_in_suspend;
+ int suspend_mode;
+ int suspend_bcn_li_dtim;
+#ifdef DHDTCPACK_SUPPRESS
+ uint8 tcpack_sup_mode;
+ uint32 tcpack_sup_ratio;
+ uint32 tcpack_sup_delay;
+#endif
+ int pktprio8021x;
+ uint insuspend;
+ bool suspended;
+ struct ether_addr bssid_insuspend;
+#ifdef SUSPEND_EVENT
+ char resume_eventmask[WL_EVENTING_MASK_LEN];
+ bool wlfc;
+#endif
+#ifdef IDHCP
+ int dhcpc_enable;
+ int dhcpd_enable;
+ struct ipv4_addr dhcpd_ip_addr;
+ struct ipv4_addr dhcpd_ip_mask;
+ struct ipv4_addr dhcpd_ip_start;
+ struct ipv4_addr dhcpd_ip_end;
+#endif
+#ifdef ISAM_PREINIT
+ char isam_init[50];
+ char isam_config[300];
+ char isam_enable[50];
+#endif
+ int ctrl_resched;
+ uint rxcnt_timeout;
+ mchan_params_t *mchan;
+ char *wl_preinit;
+ char *wl_suspend;
+ char *wl_resume;
+ int tsq;
+ int orphan_move;
+ uint in4way;
+ uint war;
+#ifdef WL_EXT_WOWL
+ uint wowl;
+#endif
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ char hw_ether[62];
+#endif
+ wait_queue_head_t event_complete;
+#ifdef PROPTX_MAXCOUNT
+ int proptx_maxcnt_2g;
+ int proptx_maxcnt_5g;
+#endif /* DYNAMIC_PROPTX_MAXCOUNT */
+#ifdef TPUT_MONITOR
+ int data_drop_mode;
+ unsigned long net_len;
+ uint tput_monitor_ms;
+ struct osl_timespec tput_ts;
+ unsigned long last_tx;
+ unsigned long last_rx;
+ unsigned long last_net_tx;
+#ifdef BCMSDIO
+ int32 doflow_tput_thresh;
+#endif
+#endif
+#ifdef SCAN_SUPPRESS
+ uint scan_intput;
+ int scan_busy_thresh;
+ int scan_busy_tmo;
+ int32 scan_tput_thresh;
+#endif
+#ifdef DHD_TPUT_PATCH
+ bool tput_patch;
+ int mtu;
+ bool pktsetsum;
+#endif
+#ifdef SET_XPS_CPUS
+ bool xps_cpus;
+#endif
+#ifdef SET_RPS_CPUS
+ bool rps_cpus;
+#endif
+#ifdef CHECK_DOWNLOAD_FW
+ bool fwchk;
+#endif
+ char *vndr_ie_assocreq;
+} dhd_conf_t;
+
+#ifdef BCMSDIO
+void dhd_conf_get_otp(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih);
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih);
+#endif
+void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable);
+#endif
+#ifdef BCMPCIE
+int dhd_conf_get_otp(dhd_pub_t *dhd, si_t *sih);
+bool dhd_conf_legacy_msi_chip(dhd_pub_t *dhd);
+#endif
+void dhd_conf_set_path_params(dhd_pub_t *dhd, char *fw_path, char *nv_path);
+int dhd_conf_set_intiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name,
+ int val, int def, bool down);
+int dhd_conf_get_band(dhd_pub_t *dhd);
+int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec);
+int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec);
+int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec);
+#ifdef CCODE_LIST
+int dhd_ccode_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec);
+#endif
+int dhd_conf_fix_country(dhd_pub_t *dhd);
+bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel);
+void dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode);
+void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source);
+void dhd_conf_add_pkt_filter(dhd_pub_t *dhd);
+bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id);
+void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd);
+int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path);
+int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev);
+uint dhd_conf_get_chip(void *context);
+uint dhd_conf_get_chiprev(void *context);
+int dhd_conf_get_pm(dhd_pub_t *dhd);
+int dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len,
+ int *hostsleep_set, int *hostsleep_val, int *ret);
+void dhd_conf_get_hostsleep(dhd_pub_t *dhd,
+ int hostsleep_set, int hostsleep_val, int ret);
+int dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period,
+ char *packet, bool bcast);
+#ifdef ARP_OFFLOAD_SUPPORT
+void dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable);
+#endif
+#ifdef PROP_TXSTATUS
+int dhd_conf_get_disable_proptx(dhd_pub_t *dhd);
+#endif
+#ifdef TPUT_MONITOR
+void dhd_conf_tput_monitor(dhd_pub_t *dhd);
+#endif
+uint dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask);
+int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend);
+void dhd_conf_postinit_ioctls(dhd_pub_t *dhd);
+int dhd_conf_preinit(dhd_pub_t *dhd);
+int dhd_conf_reset(dhd_pub_t *dhd);
+int dhd_conf_attach(dhd_pub_t *dhd);
+void dhd_conf_detach(dhd_pub_t *dhd);
+void *dhd_get_pub(struct net_device *dev);
+int wl_pattern_atoh(char *src, char *dst);
+int dhd_conf_suspend_resume_sta(dhd_pub_t *dhd, int ifidx, int suspend);
+/* Add to adjust 802.1x priority */
+extern void pktset8021xprio(void *pkt, int prio);
+#ifdef BCMSDIO
+extern int dhd_bus_sleep(dhd_pub_t *dhdp, bool sleep, uint32 *intstatus);
+#endif
+#endif /* _dhd_config_ */
diff --git a/bcmdhd.101.10.361.x/dhd_csi.c b/bcmdhd.101.10.361.x/dhd_csi.c
new file mode 100755
index 0000000..5cacb9c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_csi.c
@@ -0,0 +1,219 @@
+/*
+ * Broadcom Dongle Host Driver (DHD)
+ *
+ * Copyright (C) 1999-2018, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_csi.c 606280 2015-12-15 05:28:25Z $
+ */
+#include <osl.h>
+
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <bcmevent.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_csi.h>
+
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
+
+#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+ (ts).tv_nsec / NSEC_PER_USEC)
+
+#define NULL_ADDR "\x00\x00\x00\x00\x00\x00"
+
+int
+dhd_csi_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+ int ret = BCME_OK;
+ bool is_new = TRUE;
+ cfr_dump_data_t *p_event;
+ cfr_dump_list_t *ptr, *next, *new;
+
+ NULL_CHECK(dhd, "dhd is NULL", ret);
+
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
+
+ if (!event_data) {
+ DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ p_event = (cfr_dump_data_t *)event_data;
+
+ /* check if this addr exist */
+ if (!list_empty(&dhd->csi_list)) {
+ list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {
+ if (bcmp(&ptr->entry.header.peer_macaddr, &p_event->header.peer_macaddr,
+ ETHER_ADDR_LEN) == 0) {
+ int pos = 0, dump_len = 0, remain = 0;
+ is_new = FALSE;
+ DHD_INFO(("CSI data exist\n"));
+ if (p_event->header.status == 0) {
+ bcopy(&p_event->header, &ptr->entry.header, sizeof(cfr_dump_header_t));
+ dump_len = p_event->header.cfr_dump_length;
+ if (dump_len < MAX_EVENT_SIZE) {
+ bcopy(&p_event->data, &ptr->entry.data, dump_len);
+ } else {
+ /* for big csi data */
+ uint8 *p = (uint8 *)&ptr->entry.data;
+ remain = p_event->header.remain_length;
+ if (remain) {
+ pos = dump_len - remain - MAX_EVENT_SIZE;
+ p += pos;
+ bcopy(&p_event->data, p, MAX_EVENT_SIZE);
+ }
+ /* copy rest of csi data */
+ else {
+ pos = dump_len - (dump_len % MAX_EVENT_SIZE);
+ p += pos;
+ bcopy(&p_event->data, p, (dump_len % MAX_EVENT_SIZE));
+ }
+ }
+ return BCME_OK;
+ }
+ }
+ }
+ }
+ if (is_new) {
+ if (dhd->csi_count < MAX_CSI_NUM) {
+ new = (cfr_dump_list_t *)MALLOCZ(dhd->osh, sizeof(cfr_dump_list_t));
+ if (!new){
+ DHD_ERROR(("Malloc cfr dump list error\n"));
+ return BCME_NOMEM;
+ }
+ bcopy(&p_event->header, &new->entry.header, sizeof(cfr_dump_header_t));
+ DHD_INFO(("New entry data size %d\n", p_event->header.cfr_dump_length));
+ /* for big csi data */
+ if (p_event->header.remain_length) {
+ DHD_TRACE(("remain %d\n", p_event->header.remain_length));
+ bcopy(&p_event->data, &new->entry.data, MAX_EVENT_SIZE);
+ }
+ else
+ bcopy(&p_event->data, &new->entry.data, p_event->header.cfr_dump_length);
+ INIT_LIST_HEAD(&(new->list));
+ list_add_tail(&(new->list), &dhd->csi_list);
+ dhd->csi_count++;
+ }
+ else {
+ DHD_TRACE(("Over maximum CSI Number 8. SKIP it.\n"));
+ }
+ }
+ return ret;
+}
+
+int
+dhd_csi_init(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ INIT_LIST_HEAD(&dhd->csi_list);
+ dhd->csi_count = 0;
+
+ return err;
+}
+
+int
+dhd_csi_deinit(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ cfr_dump_list_t *ptr, *next;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ if (!list_empty(&dhd->csi_list)) {
+ list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {
+ list_del(&ptr->list);
+ MFREE(dhd->osh, ptr, sizeof(cfr_dump_list_t));
+ }
+ }
+ return err;
+}
+
+void
+dhd_csi_clean_list(dhd_pub_t *dhd)
+{
+ cfr_dump_list_t *ptr, *next;
+ int num = 0;
+
+ if (!dhd) {
+ DHD_ERROR(("NULL POINTER: %s\n", __FUNCTION__));
+ return;
+ }
+
+ if (!list_empty(&dhd->csi_list)) {
+ list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {
+ if (0 == ptr->entry.header.remain_length) {
+ list_del(&ptr->list);
+ num++;
+ MFREE(dhd->osh, ptr, sizeof(cfr_dump_list_t));
+ }
+ }
+ }
+ dhd->csi_count = 0;
+ DHD_TRACE(("Clean up %d record\n", num));
+}
+
+int
+dhd_csi_dump_list(dhd_pub_t *dhd, char *buf)
+{
+ int ret = BCME_OK;
+ cfr_dump_list_t *ptr, *next;
+ uint8 * pbuf = buf;
+ int num = 0;
+ int length = 0;
+
+ NULL_CHECK(dhd, "dhd is NULL", ret);
+
+ /* check if this addr exist */
+ if (!list_empty(&dhd->csi_list)) {
+ list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {
+ if (ptr->entry.header.remain_length) {
+ DHD_ERROR(("data not ready %d\n", ptr->entry.header.remain_length));
+ continue;
+ }
+ bcopy(&ptr->entry.header, pbuf, sizeof(cfr_dump_header_t));
+ length += sizeof(cfr_dump_header_t);
+ pbuf += sizeof(cfr_dump_header_t);
+ DHD_TRACE(("Copy data size %d\n", ptr->entry.header.cfr_dump_length));
+ bcopy(&ptr->entry.data, pbuf, ptr->entry.header.cfr_dump_length);
+ length += ptr->entry.header.cfr_dump_length;
+ pbuf += ptr->entry.header.cfr_dump_length;
+ num++;
+ }
+ }
+ DHD_TRACE(("dump %d record %d bytes\n", num, length));
+
+ return length;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_csi.h b/bcmdhd.101.10.361.x/dhd_csi.h
new file mode 100755
index 0000000..e1c0aaa
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_csi.h
@@ -0,0 +1,76 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), CSI
+ *
+ * Copyright (C) 1999-2018, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_csi.h 558438 2015-05-22 06:05:11Z $
+ */
+#ifndef __DHD_CSI_H__
+#define __DHD_CSI_H__
+
+/* Maxinum csi file dump size */
+#define MAX_CSI_FILESZ (32 * 1024)
+/* Maxinum subcarrier number */
+#define MAXINUM_CFR_DATA 256 * 4
+#define CSI_DUMP_PATH "/sys/bcm-dhd/csi"
+#define MAX_EVENT_SIZE 1400
+/* maximun csi number stored at dhd */
+#define MAX_CSI_NUM 8
+
+typedef struct cfr_dump_header {
+ /* 0 - successful; 1 - Failed */
+ uint8 status;
+ /* Peer MAC address */
+ uint8 peer_macaddr[6];
+ /* Number of Space Time Streams */
+ uint8 sts;
+ /* Number of RX chain */
+ uint8 num_rx;
+ /* Number of subcarrier */
+ uint16 num_carrier;
+ /* Length of the CSI dump */
+ uint32 cfr_dump_length;
+ /* remain unsend CSI data length */
+ uint32 remain_length;
+ /* RSSI */
+ int8 rssi;
+} __attribute__((packed)) cfr_dump_header_t;
+
+typedef struct cfr_dump_data {
+ cfr_dump_header_t header;
+ uint32 data[MAXINUM_CFR_DATA];
+} cfr_dump_data_t;
+
+typedef struct {
+ struct list_head list;
+ cfr_dump_data_t entry;
+} cfr_dump_list_t;
+
+int dhd_csi_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+
+int dhd_csi_init(dhd_pub_t *dhd);
+
+int dhd_csi_deinit(dhd_pub_t *dhd);
+
+void dhd_csi_clean_list(dhd_pub_t *dhd);
+
+int dhd_csi_dump_list(dhd_pub_t *dhd, char *buf);
+#endif /* __DHD_CSI_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_cis.c b/bcmdhd.101.10.361.x/dhd_custom_cis.c
new file mode 100755
index 0000000..7105a9e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_cis.c
@@ -0,0 +1,2010 @@
+/*
+ * Process CIS information from OTP for customer platform
+ * (Handle the MAC address and module information)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <ethernet.h>
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux.h>
+#include <bcmdevs.h>
+
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <bcmiov.h>
+
+#ifdef DHD_USE_CISINFO_FROM_OTP
+#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
+#include <siutils.h>
+#include <pcie_core.h>
+#include <dhd_pcie.h>
+#endif /* DHD_USE_CISINFO_FROM_OTP */
+
+#ifdef DHD_USE_CISINFO_FROM_OTP
+#define CIS_TUPLE_HDR_LEN 2
+#if defined(BCM4375_CHIP)
+#define CIS_TUPLE_START_ADDRESS 0x18011120
+#define CIS_TUPLE_END_ADDRESS 0x18011177
+#elif defined(BCM4389_CHIP_DEF)
+#define CIS_TUPLE_START_ADDRESS 0x18011058
+#define CIS_TUPLE_END_ADDRESS 0x180110AF
+#else
+#define CIS_TUPLE_START_ADDRESS 0x18011110
+#define CIS_TUPLE_END_ADDRESS 0x18011167
+#endif /* defined(BCM4375_CHIP) */
+#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
+ + 1) / sizeof(uint32))
+#define CIS_TUPLE_TAG_START 0x80
+#define CIS_TUPLE_TAG_VENDOR 0x81
+#define CIS_TUPLE_TAG_BOARDTYPE 0x1b
+#define CIS_TUPLE_TAG_LENGTH 1
+
+typedef struct cis_tuple_format {
+ uint8 id;
+ uint8 len; /* total length of tag and data */
+ uint8 tag;
+ uint8 data[1];
+} cis_tuple_format_t;
+
+static int
+read_otp_from_bp(dhd_bus_t *bus, uint32 *data_buf)
+{
+ int int_val = 0, i = 0, bp_idx = 0;
+ int boardtype_backplane_addr[] = {
+ 0x18010324, /* OTP Control 1 */
+ 0x18012618, /* PMU min resource mask */
+ };
+ int boardtype_backplane_data[] = {
+ 0x00fa0000,
+ 0x0e4fffff /* Keep on ARMHTAVAIL */
+ };
+
+ uint32 cis_start_addr = CIS_TUPLE_START_ADDRESS;
+ uint32 org_boardtype_backplane_data[] = {
+ 0,
+ 0
+ };
+
+ for (bp_idx = 0; bp_idx < ARRAYSIZE(boardtype_backplane_addr); bp_idx++) {
+ /* Read OTP Control 1 and PMU min_rsrc_mask before writing */
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &org_boardtype_backplane_data[bp_idx], TRUE) != BCME_OK) {
+ DHD_ERROR(("invalid size/addr combination\n"));
+ return BCME_ERROR;
+ }
+
+ /* Write new OTP and PMU configuration */
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &boardtype_backplane_data[bp_idx], FALSE) != BCME_OK) {
+ DHD_ERROR(("invalid size/addr combination\n"));
+ return BCME_ERROR;
+ }
+
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &int_val, TRUE) != BCME_OK) {
+ DHD_ERROR(("invalid size/addr combination\n"));
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
+ __FUNCTION__, boardtype_backplane_addr[bp_idx], int_val));
+ }
+
+ /* read tuple raw data */
+ for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
+ if (si_backplane_access(bus->sih, cis_start_addr + i * sizeof(uint32),
+ sizeof(uint32), &data_buf[i], TRUE) != BCME_OK) {
+ break;
+ }
+ DHD_INFO(("%s: tuple index %d, raw data 0x%08x\n", __FUNCTION__, i, data_buf[i]));
+ }
+
+ for (bp_idx = 0; bp_idx < ARRAYSIZE(boardtype_backplane_addr); bp_idx++) {
+ /* Write original OTP and PMU configuration */
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &org_boardtype_backplane_data[bp_idx], FALSE) != BCME_OK) {
+ DHD_ERROR(("invalid size/addr combination\n"));
+ return BCME_ERROR;
+ }
+
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[bp_idx], sizeof(int),
+ &int_val, TRUE) != BCME_OK) {
+ DHD_ERROR(("invalid size/addr combination\n"));
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
+ __FUNCTION__, boardtype_backplane_addr[bp_idx], int_val));
+ }
+
+ return i * sizeof(uint32);
+}
+
+static int
+dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
+ unsigned char *vid, int *vid_length)
+{
+ int totlen, len;
+ uint32 raw_data[CIS_TUPLE_MAX_COUNT];
+ cis_tuple_format_t *tuple;
+
+ totlen = read_otp_from_bp(bus, raw_data);
+ if (totlen == BCME_ERROR || totlen == 0) {
+ DHD_ERROR(("%s : Can't read the OTP\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ tuple = (cis_tuple_format_t *)raw_data;
+
+ /* check the first tuple has tag 'start' */
+ if (tuple->id != CIS_TUPLE_TAG_START) {
+ DHD_ERROR(("%s: Can not find the TAG\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ *vid_length = *boardtype = 0;
+
+ /* find tagged parameter */
+ while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
+ (*vid_length == 0 || *boardtype == 0)) {
+ len = tuple->len;
+
+ if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
+ (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
+ /* found VID */
+ memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
+ *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
+ prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
+ }
+ else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
+ (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
+ /* found boardtype */
+ *boardtype = (int)tuple->data[0];
+ prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
+ }
+
+ tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
+ totlen -= (len + CIS_TUPLE_HDR_LEN);
+ }
+
+ if (*vid_length <= 0 || *boardtype <= 0) {
+ DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
+ *vid_length, *boardtype));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+#ifdef USE_CID_CHECK
+#define CHIP_REV_A0 1
+#define CHIP_REV_A1 2
+#define CHIP_REV_B0 3
+#define CHIP_REV_B1 4
+#define CHIP_REV_B2 5
+#define CHIP_REV_C0 6
+#define BOARD_TYPE_EPA 0x080f
+#define BOARD_TYPE_IPA 0x0827
+#define BOARD_TYPE_IPA_OLD 0x081a
+#define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
+#define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
+#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
+#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
+
+naming_info_t bcm4361_naming_table[] = {
+ { {""}, {""}, {""} },
+ { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
+ { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
+ { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
+ { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
+ { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
+ { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
+ { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
+ { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
+ { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
+ { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
+ { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
+ { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
+ { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
+ { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
+ { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
+ { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
+ { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
+ { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
+ { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
+ { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
+ { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
+ { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
+ { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
+ { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
+ { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
+ { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
+ { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
+ { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
+ { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
+ { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
+ { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
+ { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} }
+};
+
+naming_info_t bcm4375_naming_table[] = {
+ { {""}, {""}, {""} },
+ { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
+ { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
+ { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
+ { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
+ { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
+ { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
+ { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
+ { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
+ { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
+ { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
+ { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
+ { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
+ { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
+ { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
+ { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
+ { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
+ { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
+ { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
+ { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
+};
+
+naming_info_t bcm4389_naming_table[] = {
+ { {""}, {""}, {""} },
+ { {"e53_es23"}, {"_ES10_semco_b0"}, {"_b0"} },
+ { {"e53_es24"}, {"_ES20_semco_b0"}, {"_b0"} },
+ { {"e53_es25"}, {"_ES21_semco_b0"}, {"_b0"} },
+ { {"e53_es31"}, {"_ES30_semco_c0"}, {"_c0"} },
+ { {"e53_es32"}, {"_ES32_semco_c0"}, {"_c0"} },
+ { {"e53_es40"}, {"_ES40_semco_c1"}, {"_c1"} },
+ { {"1wk_es21"}, {"_1wk_es21_b0"}, {"_b0"} },
+ { {"1wk_es30"}, {"_1wk_es30_b0"}, {"_b0"} },
+ { {"1wk_es31"}, {"_1wk_es31_b0"}, {"_b0"} },
+ { {"1wk_es32"}, {"_1wk_es32_b0"}, {"_b0"} },
+ { {"1wk_es40"}, {"_1wk_es40_c0"}, {"_c0"} },
+ { {"1wk_es41"}, {"_1wk_es41_c0"}, {"_c0"} },
+ { {"1wk_es42"}, {"_1wk_es42_c0"}, {"_c0"} },
+ { {"1wk_es43"}, {"_1wk_es43_c0"}, {"_c0"} },
+ { {"1wk_es50"}, {"_1wk_es50_c1"}, {"_c1"} }
+};
+
+/* select the NVRAM/FW tag naming table */
+naming_info_t *
+select_naming_table(dhd_pub_t *dhdp, int *table_size)
+{
+ naming_info_t * info = NULL;
+
+ if (!dhdp || !dhdp->bus || !dhdp->bus->sih)
+ {
+ DHD_ERROR(("%s : Invalid pointer \n", __FUNCTION__));
+ return info;
+ }
+
+ switch (si_chipid(dhdp->bus->sih)) {
+ case BCM4361_CHIP_ID:
+ case BCM4347_CHIP_ID:
+ info = &bcm4361_naming_table[0];
+ *table_size = ARRAYSIZE(bcm4361_naming_table);
+ DHD_INFO(("%s: info %p, ret %d\n", __FUNCTION__, info, *table_size));
+ break;
+ case BCM4375_CHIP_ID:
+ info = &bcm4375_naming_table[0];
+ *table_size = ARRAYSIZE(bcm4375_naming_table);
+ DHD_INFO(("%s: info %p, ret %d\n", __FUNCTION__, info, *table_size));
+ break;
+ case BCM4389_CHIP_ID:
+ info = &bcm4389_naming_table[0];
+ *table_size = ARRAYSIZE(bcm4389_naming_table);
+ DHD_INFO(("%s: info %p, ret %d\n", __FUNCTION__, info, *table_size));
+ break;
+ default:
+ DHD_ERROR(("%s: No MODULE NAMING TABLE found\n", __FUNCTION__));
+ break;
+ }
+
+ return info;
+}
+
+#define CID_FEM_MURATA "_mur_"
+naming_info_t *
+dhd_find_naming_info(dhd_pub_t *dhdp, char *module_type)
+{
+ int i = 0;
+ naming_info_t *info = NULL;
+ int table_size = 0;
+
+ info = select_naming_table(dhdp, &table_size);
+ if (!info || !table_size) {
+ DHD_ERROR(("%s : Can't select the naming table\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if (module_type && strlen(module_type) > 0) {
+ for (i = 1, info++; i < table_size; info++, i++) {
+ DHD_INFO(("%s : info %p, %d, info->cid_ext : %s\n",
+ __FUNCTION__, info, i, info->cid_ext));
+ if (!strncmp(info->cid_ext, module_type, strlen(info->cid_ext))) {
+ break;
+ }
+ }
+ }
+
+ return info;
+}
+
+static naming_info_t *
+dhd_find_naming_info_by_cid(dhd_pub_t *dhdp, char *cid_info)
+{
+ int i = 0;
+ char *ptr;
+ naming_info_t *info = NULL;
+ int table_size = 0;
+
+ info = select_naming_table(dhdp, &table_size);
+ if (!info || !table_size) {
+ DHD_ERROR(("%s : Can't select the naming table\n", __FUNCTION__));
+ return NULL;
+ }
+
+ /* truncate extension */
+ for (i = 1, ptr = cid_info; i < MODULE_NAME_INDEX_MAX && ptr; i++) {
+ ptr = bcmstrstr(ptr, "_");
+ if (ptr) {
+ ptr++;
+ }
+ }
+
+ for (i = 1, info++; i < table_size && ptr; info++, i++) {
+ DHD_INFO(("%s : info %p, %d, info->cid_ext : %s\n",
+ __FUNCTION__, info, i, info->cid_ext));
+ if (!strncmp(info->cid_ext, ptr, strlen(info->cid_ext))) {
+ break;
+ }
+ }
+
+ return info;
+}
+
+naming_info_t *
+dhd_find_naming_info_by_chip_rev(dhd_pub_t *dhdp, bool *is_murata_fem)
+{
+ int board_type = 0, chip_rev = 0, vid_length = 0;
+ unsigned char vid[MAX_VID_LEN];
+ naming_info_t *info = NULL;
+ char *cid_info = NULL;
+ dhd_bus_t *bus = NULL;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL \n", __FUNCTION__));
+ return NULL;
+ }
+
+ bus = dhdp->bus;
+
+ if (!bus || !bus->sih) {
+ DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
+ return NULL;
+ }
+
+ chip_rev = bus->sih->chiprev;
+
+ if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
+ != BCME_OK) {
+ DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
+
+#ifdef BCM4361_CHIP
+ /* A0 chipset has exception only */
+ if (chip_rev == CHIP_REV_A0) {
+ if (board_type == BOARD_TYPE_EPA) {
+ info = dhd_find_naming_info(dhdp, DEFAULT_CIDINFO_FOR_EPA);
+ } else if ((board_type == BOARD_TYPE_IPA) ||
+ (board_type == BOARD_TYPE_IPA_OLD)) {
+ info = dhd_find_naming_info(dhdp, DEFAULT_CIDINFO_FOR_IPA);
+ }
+ } else
+#endif /* BCM4361_CHIP */
+ {
+ cid_info = dhd_get_cid_info(vid, vid_length);
+ if (cid_info) {
+ info = dhd_find_naming_info_by_cid(dhdp, cid_info);
+ if (strstr(cid_info, CID_FEM_MURATA)) {
+ *is_murata_fem = TRUE;
+ }
+ }
+ }
+
+ return info;
+}
+#endif /* USE_CID_CHECK */
+#ifdef USE_DIRECT_VID_TAG
+static int
+concate_nvram_by_vid(dhd_pub_t *dhdp, char *nv_path, char *chipstr)
+{
+ unsigned char vid[MAX_VID_LEN];
+ unsigned char vid2str[MAX_VID_LEN];
+
+ memset(vid, 0, sizeof(vid));
+ memset(vid2str, 0, sizeof(vid2str));
+
+ if (dhd_check_stored_module_info(vid) == BCME_OK) {
+ /* concate chip string tag */
+ strncat(nv_path, chipstr, strlen(nv_path));
+ /* concate nvram tag */
+ snprintf(vid2str, sizeof(vid2str), "_%x%x", vid[VENDOR_OFF], vid[MD_REV_OFF]);
+ strncat(nv_path, vid2str, strlen(nv_path));
+ DHD_ERROR(("%s: nvram_path : %s\n", __FUNCTION__, nv_path));
+ } else {
+ int board_type = 0, vid_length = 0;
+ dhd_bus_t *bus = NULL;
+ if (!dhdp) {
+
+ DHD_ERROR(("%s : dhdp is NULL \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ bus = dhdp->bus;
+ if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
+ != BCME_OK) {
+ DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ /* concate chip string tag */
+ strncat(nv_path, chipstr, strlen(nv_path));
+ /* vid from CIS - vid[1] = vendor, vid[0] - module rev. */
+ snprintf(vid2str, sizeof(vid2str), "_%x%x",
+ vid[VENDOR_OFF], vid[MD_REV_OFF]);
+ /* concate nvram tag */
+ strncat(nv_path, vid2str, strlen(nv_path));
+ DHD_ERROR(("%s: nvram_path : %s\n", __FUNCTION__, nv_path));
+ }
+ }
+ return BCME_OK;
+}
+#endif /* USE_DIRECT_VID_TAG */
+#endif /* DHD_USE_CISINFO_FROM_OTP */
+
+#ifdef DHD_USE_CISINFO
+
+/* File Location to keep each information */
+#ifdef OEM_ANDROID
+#define MACINFO PLATFORM_PATH".mac.info"
+#define CIDINFO PLATFORM_PATH".cid.info"
+#ifdef PLATFORM_SLP
+#define MACINFO_EFS "/csa/.mac.info"
+#else
+#define MACINFO_EFS "/efs/wifi/.mac.info"
+#define CIDINFO_DATA "/data/.cid.info"
+#endif /* PLATFORM_SLP */
+#else
+#define MACINFO "/opt/.mac.info"
+#define MACINFO_EFS "/opt/.efs.mac.info"
+#define CIDINFO "/opt/.cid.info"
+#endif /* OEM_ANDROID */
+
+/* Definitions for MAC address */
+#define MAC_BUF_SIZE 20
+#define MAC_CUSTOM_FORMAT "%02X:%02X:%02X:%02X:%02X:%02X"
+
+/* Definitions for CIS information */
+#if defined(BCM4359_CHIP) || defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || \
+ defined(BCM4389_CHIP_DEF)
+#define CIS_BUF_SIZE 1280
+#else
+#define CIS_BUF_SIZE 512
+#endif /* BCM4359_CHIP */
+
+#define DUMP_CIS_SIZE 48
+
+#define CIS_TUPLE_TAG_START 0x80
+#define CIS_TUPLE_TAG_VENDOR 0x81
+#define CIS_TUPLE_TAG_MACADDR 0x19
+#define CIS_TUPLE_TAG_BOARDTYPE 0x1b
+#define CIS_TUPLE_LEN_MACADDR 7
+#define CIS_DUMP_END 0xff
+#define CIS_TUPLE_NULL 0X00
+
+#ifdef CONFIG_BCMDHD_PCIE
+#if defined(BCM4361_CHIP) || defined(BCM4375_CHIP)
+#define OTP_OFFSET 208
+#elif defined(BCM4389_CHIP_DEF)
+#define OTP_OFFSET 0
+#else
+#define OTP_OFFSET 128
+#endif /* BCM4361 | BCM4375 = 208, BCM4389 = 0, Others = 128 */
+#else /* CONFIG_BCMDHD_PCIE */
+#define OTP_OFFSET 12 /* SDIO */
+#endif /* CONFIG_BCMDHD_PCIE */
+
+unsigned char *g_cis_buf = NULL;
+
+/* Definitions for common interface */
+typedef struct tuple_entry {
+ struct list_head list; /* head of the list */
+ uint32 cis_idx; /* index of each tuples */
+} tuple_entry_t;
+
+extern int _dhd_set_mac_address(struct dhd_info *dhd, int ifidx, struct ether_addr *addr);
+#if defined(GET_MAC_FROM_OTP) || defined(USE_CID_CHECK)
+static tuple_entry_t *dhd_alloc_tuple_entry(dhd_pub_t *dhdp, const int idx);
+static void dhd_free_tuple_entry(dhd_pub_t *dhdp, struct list_head *head);
+static int dhd_find_tuple_list_from_otp(dhd_pub_t *dhdp, int req_tup,
+ unsigned char* req_tup_len, struct list_head *head);
+#endif /* GET_MAC_FROM_OTP || USE_CID_CHECK */
+
+/* otp region read/write information */
+typedef struct otp_rgn_rw_info {
+ uint8 rgnid;
+ uint8 preview;
+ uint8 integrity_chk;
+ uint16 rgnsize;
+ uint16 datasize;
+ uint8 *data;
+} otp_rgn_rw_info_t;
+
+/* otp region status information */
+typedef struct otp_rgn_stat_info {
+ uint8 rgnid;
+ uint16 rgnstart;
+ uint16 rgnsize;
+} otp_rgn_stat_info_t;
+
+typedef int (pack_handler_t)(void *ctx, uint8 *buf, uint16 *buflen);
+
+/* Common Interface Functions */
+int
+dhd_alloc_cis(dhd_pub_t *dhdp)
+{
+ if (g_cis_buf == NULL) {
+ g_cis_buf = MALLOCZ(dhdp->osh, CIS_BUF_SIZE);
+ if (g_cis_buf == NULL) {
+ DHD_ERROR(("%s: Failed to alloc buffer for CIS\n", __FUNCTION__));
+ return BCME_NOMEM;
+ } else {
+ DHD_ERROR(("%s: Local CIS buffer is alloced\n", __FUNCTION__));
+ memset(g_cis_buf, 0, CIS_BUF_SIZE);
+ }
+ }
+ return BCME_OK;
+}
+
+void
+dhd_clear_cis(dhd_pub_t *dhdp)
+{
+ if (g_cis_buf) {
+ MFREE(dhdp->osh, g_cis_buf, CIS_BUF_SIZE);
+ g_cis_buf = NULL;
+ DHD_ERROR(("%s: Local CIS buffer is freed\n", __FUNCTION__));
+ }
+}
+
+#ifdef DHD_READ_CIS_FROM_BP
+int
+dhd_read_cis(dhd_pub_t *dhdp)
+{
+ int ret = 0, totlen = 0;
+ uint32 raw_data[CIS_TUPLE_MAX_COUNT];
+
+ int cis_offset = OTP_OFFSET + sizeof(cis_rw_t);
+#if defined(BCM4389_CHIP_DEF)
+ /* override OTP_OFFSET for 4389 */
+ cis_offset = OTP_OFFSET;
+#endif /* BCM4389_CHIP_DEF */
+
+ totlen = read_otp_from_bp(dhdp->bus, raw_data);
+ if (totlen == BCME_ERROR || totlen == 0) {
+ DHD_ERROR(("%s : Can't read the OTP\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ (void)memcpy_s(g_cis_buf + cis_offset, CIS_BUF_SIZE, raw_data, totlen);
+ return ret;
+}
+#else
+int
+dhd_read_cis(dhd_pub_t *dhdp)
+{
+ int ret = 0;
+ cis_rw_t *cish;
+ int buf_size = CIS_BUF_SIZE;
+ int length = strlen("cisdump");
+
+ if (length >= buf_size) {
+ DHD_ERROR(("%s: check CIS_BUF_SIZE\n", __FUNCTION__));
+ return BCME_BADLEN;
+ }
+
+ /* Try reading out from CIS */
+ cish = (cis_rw_t *)(g_cis_buf + 8);
+ cish->source = 0;
+ cish->byteoff = 0;
+ cish->nbytes = buf_size;
+ strlcpy(g_cis_buf, "cisdump", buf_size);
+
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, g_cis_buf, buf_size, 0, 0);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s: get cisdump, UNSUPPORTED\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s : get cisdump err(%d)\n",
+ __FUNCTION__, ret));
+ }
+ /* free local buf */
+ dhd_clear_cis(dhdp);
+ }
+
+ return ret;
+}
+#endif /* DHD_READ_CIS_FROM_BP */
+
+static int
+dhd_otp_process_iov_resp_buf(void *ctx, void *iov_resp, uint16 cmd_id,
+ bcm_xtlv_unpack_cbfn_t cbfn)
+{
+ bcm_iov_buf_t *p_resp = NULL;
+ int ret = BCME_OK;
+ uint16 version;
+
+ /* check for version */
+ version = dtoh16(*(uint16 *)iov_resp);
+ if (version != WL_OTP_IOV_VERSION) {
+ return BCME_VERSION;
+ }
+
+ p_resp = (bcm_iov_buf_t *)iov_resp;
+ if ((p_resp->id == cmd_id) && (cbfn != NULL)) {
+ ret = bcm_unpack_xtlv_buf(ctx, (uint8 *)p_resp->data, p_resp->len,
+ BCM_XTLV_OPTION_ALIGN32, cbfn);
+ }
+
+ return ret;
+}
+
+static int
+dhd_otp_get_iov_resp(dhd_pub_t *dhdp, const uint16 cmd_id, void *ctx,
+ pack_handler_t packfn, bcm_xtlv_unpack_cbfn_t cbfn)
+{
+ bcm_iov_buf_t *iov_buf = NULL;
+ uint8 *iov_resp = NULL;
+ int ret = BCME_OK;
+ int buf_size = CIS_BUF_SIZE;
+ uint16 iovlen = 0, buflen = 0, buflen_start = 0;
+
+ /* allocate input buffer */
+ iov_buf = MALLOCZ(dhdp->osh, WLC_IOCTL_SMLEN);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("%s: Failed to alloc buffer for iovar input\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ iov_resp = MALLOCZ(dhdp->osh, WLC_IOCTL_MAXLEN);
+ if (iov_resp == NULL) {
+ DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ /* parse and pack config parameters */
+ buflen = buflen_start = (WLC_IOCTL_SMLEN - sizeof(*iov_buf));
+ ret = packfn(ctx, (uint8 *)&iov_buf->data[0], &buflen);
+ if (ret != BCME_OK) {
+ goto fail;
+ }
+
+ /* fill header portion */
+ iov_buf->version = WL_OTP_IOV_VERSION;
+ iov_buf->len = (buflen_start - buflen);
+ iov_buf->id = cmd_id;
+
+ /* issue get iovar and process response */
+ iovlen = sizeof(*iov_buf) + iov_buf->len;
+ ret = dhd_iovar(dhdp, 0, "otp", (char *)iov_buf, iovlen,
+ iov_resp, WLC_IOCTL_MAXLEN, FALSE);
+ if (ret == BCME_OK) {
+ ret = dhd_otp_process_iov_resp_buf(ctx, iov_resp, cmd_id, cbfn);
+ } else {
+ DHD_ERROR(("%s: Failed to get otp iovar\n", __FUNCTION__));
+ }
+
+fail:
+ if (iov_buf) {
+ MFREE(dhdp->osh, iov_buf, WLC_IOCTL_SMLEN);
+ }
+ if (iov_resp) {
+ MFREE(dhdp->osh, iov_resp, buf_size);
+ }
+ if (ret < 0) {
+ /* free local buf */
+ dhd_clear_cis(dhdp);
+ }
+ return ret;
+}
+
+static int
+dhd_otp_cbfn_rgnstatus(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ otp_rgn_stat_info_t *stat_info = (otp_rgn_stat_info_t *)ctx;
+
+ BCM_REFERENCE(len);
+
+ if (data == NULL) {
+ DHD_ERROR(("%s: bad argument !!!\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+
+ switch (type) {
+ case WL_OTP_XTLV_RGN:
+ stat_info->rgnid = *data;
+ break;
+ case WL_OTP_XTLV_ADDR:
+ stat_info->rgnstart = dtoh16((uint16)*data);
+ break;
+ case WL_OTP_XTLV_SIZE:
+ stat_info->rgnsize = dtoh16((uint16)*data);
+ break;
+ default:
+ DHD_ERROR(("%s: unknown tlv %u\n", __FUNCTION__, type));
+ break;
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhd_otp_packfn_rgnstatus(void *ctx, uint8 *buf, uint16 *buflen)
+{
+ uint8 *pxtlv = buf;
+ int ret = BCME_OK;
+ uint16 len = *buflen;
+ uint8 rgnid = OTP_RGN_SW;
+
+ BCM_REFERENCE(ctx);
+
+ /* pack option <-r region> */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &len, WL_OTP_XTLV_RGN, sizeof(rgnid),
+ &rgnid, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed pack xtlv entry of region: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ *buflen = len;
+ return ret;
+}
+
+static int
+dhd_otp_packfn_rgndump(void *ctx, uint8 *buf, uint16 *buflen)
+{
+ uint8 *pxtlv = buf;
+ int ret = BCME_OK;
+ uint16 len = *buflen, size = WLC_IOCTL_MAXLEN;
+ uint8 rgnid = OTP_RGN_SW;
+
+ /* pack option <-r region> */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &len, WL_OTP_XTLV_RGN,
+ sizeof(rgnid), &rgnid, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed pack xtlv entry of region: %d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ /* pack option [-s size] */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &len, WL_OTP_XTLV_SIZE,
+ sizeof(size), (uint8 *)&size, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed pack xtlv entry of size: %d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+ *buflen = len;
+fail:
+ return ret;
+}
+
+static int
+dhd_otp_cbfn_rgndump(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ otp_rgn_rw_info_t *rw_info = (otp_rgn_rw_info_t *)ctx;
+
+ BCM_REFERENCE(len);
+
+ if (data == NULL) {
+ DHD_ERROR(("%s: bad argument !!!\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+
+ switch (type) {
+ case WL_OTP_XTLV_RGN:
+ rw_info->rgnid = *data;
+ break;
+ case WL_OTP_XTLV_DATA:
+ /*
+ * intentionally ignoring the return value of memcpy_s as it is just
+ * a variable copy and because of this size is within the bounds
+ */
+ (void)memcpy_s(&rw_info->data, sizeof(rw_info->data),
+ &data, sizeof(rw_info->data));
+ rw_info->datasize = len;
+ break;
+ default:
+ DHD_ERROR(("%s: unknown tlv %u\n", __FUNCTION__, type));
+ break;
+ }
+ return BCME_OK;
+}
+
+int
+dhd_read_otp_sw_rgn(dhd_pub_t *dhdp)
+{
+ int ret = BCME_OK;
+ otp_rgn_rw_info_t rw_info;
+ otp_rgn_stat_info_t stat_info;
+
+ memset(&rw_info, 0, sizeof(rw_info));
+ memset(&stat_info, 0, sizeof(stat_info));
+
+ ret = dhd_otp_get_iov_resp(dhdp, WL_OTP_CMD_RGNSTATUS, &stat_info,
+ dhd_otp_packfn_rgnstatus, dhd_otp_cbfn_rgnstatus);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: otp region status failed, ret=%d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ rw_info.rgnsize = stat_info.rgnsize;
+ ret = dhd_otp_get_iov_resp(dhdp, WL_OTP_CMD_RGNDUMP, &rw_info,
+ dhd_otp_packfn_rgndump, dhd_otp_cbfn_rgndump);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: otp region dump failed, ret=%d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ ret = memcpy_s(g_cis_buf, CIS_BUF_SIZE, rw_info.data, rw_info.datasize);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed to copy otp dump, ret=%d\n", __FUNCTION__, ret));
+ }
+fail:
+ return ret;
+
+}
+
+#if defined(GET_MAC_FROM_OTP) || defined(USE_CID_CHECK)
+static tuple_entry_t*
+dhd_alloc_tuple_entry(dhd_pub_t *dhdp, const int idx)
+{
+ tuple_entry_t *entry;
+
+ entry = MALLOCZ(dhdp->osh, sizeof(tuple_entry_t));
+ if (!entry) {
+ DHD_ERROR(("%s: failed to alloc entry\n", __FUNCTION__));
+ return NULL;
+ }
+
+ entry->cis_idx = idx;
+
+ return entry;
+}
+
+static void
+dhd_free_tuple_entry(dhd_pub_t *dhdp, struct list_head *head)
+{
+ tuple_entry_t *entry;
+
+ while (!list_empty(head)) {
+ entry = list_entry(head->next, tuple_entry_t, list);
+ list_del(&entry->list);
+
+ MFREE(dhdp->osh, entry, sizeof(tuple_entry_t));
+ }
+}
+
+static int
+dhd_find_tuple_list_from_otp(dhd_pub_t *dhdp, int req_tup,
+ unsigned char* req_tup_len, struct list_head *head)
+{
+ int idx = OTP_OFFSET + sizeof(cis_rw_t);
+ int tup, tup_len = 0;
+ int buf_len = CIS_BUF_SIZE;
+ int found = 0;
+
+#if defined(BCM4389_CHIP_DEF)
+ /* override OTP_OFFEST for 4389 */
+ idx = OTP_OFFSET;
+#endif /* BCM4389_CHIP_DEF */
+
+ if (!g_cis_buf) {
+ DHD_ERROR(("%s: Couldn't find cis info from"
+ " local buffer\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ do {
+ tup = g_cis_buf[idx++];
+ if (tup == CIS_TUPLE_NULL || tup == CIS_DUMP_END) {
+ tup_len = 0;
+ } else {
+ tup_len = g_cis_buf[idx++];
+ if ((idx + tup_len) > buf_len) {
+ return BCME_ERROR;
+ }
+
+ if (tup == CIS_TUPLE_TAG_START &&
+ tup_len != CIS_TUPLE_NULL &&
+ g_cis_buf[idx] == req_tup) {
+ idx++;
+ if (head) {
+ tuple_entry_t *entry;
+ entry = dhd_alloc_tuple_entry(dhdp, idx);
+ if (entry) {
+ list_add_tail(&entry->list, head);
+ found++;
+ }
+ }
+ if (found == 1 && req_tup_len) {
+ *req_tup_len = tup_len;
+ }
+ tup_len--;
+ }
+ }
+ idx += tup_len;
+ } while (tup != CIS_DUMP_END && (idx < buf_len));
+
+ return (found > 0) ? found : BCME_ERROR;
+}
+#endif /* GET_MAC_FROM_OTP || USE_CID_CHECK */
+
+#ifdef DUMP_CIS
+static void
+dhd_dump_cis_buf(dhd_pub_t *dhdp, int size)
+{
+ int i;
+ int cis_offset = 0;
+
+ cis_offset = OTP_OFFSET + sizeof(cis_rw_t);
+#if defined(BCM4389_CHIP_DEF)
+ /* override OTP_OFFEST for 4389 */
+ cis_offset = OTP_OFFSET;
+#endif /* BCM4389_CHIP_DEF */
+
+ if (size <= 0) {
+ return;
+ }
+
+ if (size > CIS_BUF_SIZE) {
+ size = CIS_BUF_SIZE;
+ }
+
+ DHD_ERROR(("========== START CIS DUMP ==========\n"));
+ for (i = 0; i < size; i++) {
+ DHD_ERROR(("%02X ", g_cis_buf[i + cis_offset]));
+ if ((i % 16) == 15) {
+ DHD_ERROR(("\n"));
+ }
+ }
+ if ((i % 16) != 15) {
+ DHD_ERROR(("\n"));
+ }
+ DHD_ERROR(("========== END CIS DUMP ==========\n"));
+}
+#endif /* DUMP_CIS */
+
+/* MAC address mangement functions */
+#ifdef READ_MACADDR
+static void
+dhd_create_random_mac(char *buf, unsigned int buf_len)
+{
+ char random_mac[3];
+
+ memset(random_mac, 0, sizeof(random_mac));
+ get_random_bytes(random_mac, 3);
+
+ snprintf(buf, buf_len, MAC_CUSTOM_FORMAT, 0x00, 0x12, 0x34,
+ (uint32)random_mac[0], (uint32)random_mac[1], (uint32)random_mac[2]);
+
+ DHD_ERROR(("%s: The Random Generated MAC ID: %s\n",
+ __FUNCTION__, random_mac));
+}
+
+#ifndef DHD_MAC_ADDR_EXPORT
+int
+dhd_set_macaddr_from_file(dhd_pub_t *dhdp)
+{
+ char mac_buf[MAC_BUF_SIZE];
+ char *filepath_efs = MACINFO_EFS;
+#ifdef PLATFORM_SLP
+ char *filepath_mac = MACINFO;
+#endif /* PLATFORM_SLP */
+ int ret;
+ struct dhd_info *dhd;
+ struct ether_addr *mac;
+ char *invalid_mac = "00:00:00:00:00:00";
+
+ if (dhdp) {
+ dhd = dhdp->info;
+ mac = &dhdp->mac;
+ } else {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ memset(mac_buf, 0, sizeof(mac_buf));
+
+ /* Read MAC address from the specified file */
+ ret = dhd_read_file(filepath_efs, mac_buf, sizeof(mac_buf) - 1);
+
+ /* Check if the file does not exist or contains invalid data */
+ if (ret || (!ret && strstr(mac_buf, invalid_mac))) {
+ /* Generate a new random MAC address */
+ dhd_create_random_mac(mac_buf, sizeof(mac_buf));
+
+ /* Write random MAC address to the file */
+ if (dhd_write_file(filepath_efs, mac_buf, strlen(mac_buf)) < 0) {
+ DHD_ERROR(("%s: MAC address [%s] Failed to write into File:"
+ " %s\n", __FUNCTION__, mac_buf, filepath_efs));
+ return BCME_ERROR;
+ } else {
+ DHD_ERROR(("%s: MAC address [%s] written into File: %s\n",
+ __FUNCTION__, mac_buf, filepath_efs));
+ }
+ }
+#ifdef PLATFORM_SLP
+ /* Write random MAC address for framework */
+ if (dhd_write_file(filepath_mac, mac_buf, strlen(mac_buf)) < 0) {
+ DHD_ERROR(("%s: MAC address [%c%c:xx:xx:xx:x%c:%c%c] Failed to write into File:"
+ " %s\n", __FUNCTION__, mac_buf[0], mac_buf[1],
+ mac_buf[13], mac_buf[15], mac_buf[16], filepath_mac));
+ } else {
+ DHD_ERROR(("%s: MAC address [%c%c:xx:xx:xx:x%c:%c%c] written into File: %s\n",
+ __FUNCTION__, mac_buf[0], mac_buf[1], mac_buf[13],
+ mac_buf[15], mac_buf[16], filepath_mac));
+ }
+#endif /* PLATFORM_SLP */
+
+ mac_buf[sizeof(mac_buf) - 1] = '\0';
+
+ /* Write the MAC address to the Dongle */
+ sscanf(mac_buf, MAC_CUSTOM_FORMAT,
+ (uint32 *)&(mac->octet[0]), (uint32 *)&(mac->octet[1]),
+ (uint32 *)&(mac->octet[2]), (uint32 *)&(mac->octet[3]),
+ (uint32 *)&(mac->octet[4]), (uint32 *)&(mac->octet[5]));
+
+ if (_dhd_set_mac_address(dhd, 0, mac) == 0) {
+ DHD_INFO(("%s: MAC Address is overwritten\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+ }
+
+ return 0;
+}
+#else
+int
+dhd_set_macaddr_from_file(dhd_pub_t *dhdp)
+{
+ char mac_buf[MAC_BUF_SIZE];
+
+ struct dhd_info *dhd;
+ struct ether_addr *mac;
+
+ if (dhdp) {
+ dhd = dhdp->info;
+ mac = &dhdp->mac;
+ } else {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ memset(mac_buf, 0, sizeof(mac_buf));
+ if (ETHER_ISNULLADDR(&sysfs_mac_addr)) {
+ /* Generate a new random MAC address */
+ dhd_create_random_mac(mac_buf, sizeof(mac_buf));
+ if (!bcm_ether_atoe(mac_buf, &sysfs_mac_addr)) {
+ DHD_ERROR(("%s : mac parsing err\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
+
+ /* Write the MAC address to the Dongle */
+ memcpy(mac, &sysfs_mac_addr, sizeof(sysfs_mac_addr));
+
+ if (_dhd_set_mac_address(dhd, 0, mac) == 0) {
+ DHD_INFO(("%s: MAC Address is overwritten\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+ }
+
+ return 0;
+}
+#endif /* !DHD_MAC_ADDR_EXPORT */
+#endif /* READ_MACADDR */
+
+#ifdef GET_MAC_FROM_OTP
+static int
+dhd_set_default_macaddr(dhd_pub_t *dhdp)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ struct ether_addr *mac;
+ int ret;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+
+ mac = &dhdp->mac;
+
+ /* Read the default MAC address */
+ ret = dhd_iovar(dhdp, 0, "cur_etheraddr", NULL, 0, iovbuf, sizeof(iovbuf),
+ FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Can't get the default MAC address\n", __FUNCTION__));
+ return BCME_NOTUP;
+ }
+
+ /* Update the default MAC address */
+ memcpy(mac, iovbuf, ETHER_ADDR_LEN);
+#ifdef DHD_MAC_ADDR_EXPORT
+ memcpy(&sysfs_mac_addr, mac, sizeof(sysfs_mac_addr));
+#endif /* DHD_MAC_ADDR_EXPORT */
+
+ return 0;
+}
+
+static int
+dhd_verify_macaddr(dhd_pub_t *dhdp, struct list_head *head)
+{
+ tuple_entry_t *cur, *next;
+ int idx = -1; /* Invalid index */
+
+ list_for_each_entry(cur, head, list) {
+ list_for_each_entry(next, &cur->list, list) {
+ if ((unsigned long)next == (unsigned long)head) {
+ DHD_INFO(("%s: next ptr %p is same as head ptr %p\n",
+ __FUNCTION__, next, head));
+ break;
+ }
+ if (!memcmp(&g_cis_buf[cur->cis_idx],
+ &g_cis_buf[next->cis_idx], ETHER_ADDR_LEN)) {
+ idx = cur->cis_idx;
+ break;
+ }
+ }
+ }
+
+ return idx;
+}
+
+int
+dhd_check_module_mac(dhd_pub_t *dhdp)
+{
+#ifndef DHD_MAC_ADDR_EXPORT
+ char *filepath_efs = MACINFO_EFS;
+#endif /* !DHD_MAC_ADDR_EXPORT */
+ unsigned char otp_mac_buf[MAC_BUF_SIZE];
+ struct ether_addr *mac;
+ struct dhd_info *dhd;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+
+ dhd = dhdp->info;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+
+#if defined(DHD_READ_CIS_FROM_BP) && defined(READ_MACADDR)
+ /*
+ * For KOR Module, CID update is required only
+ * so, clearing and making g_cis_buf = NULL before processing it when read_cis from STA FW
+ * It will get MAC from sysfs && won't update sysfs mac
+ */
+ if (dhd_bus_get_fw_mode(dhdp) == DHD_FLAG_STA_MODE) {
+ dhd_clear_cis(dhdp);
+ }
+#endif /* DHD_READ_CIS_FROM_BP && READ_MACADDR */
+
+ mac = &dhdp->mac;
+ memset(otp_mac_buf, 0, sizeof(otp_mac_buf));
+
+ if (!g_cis_buf) {
+#ifndef DHD_MAC_ADDR_EXPORT
+ char eabuf[ETHER_ADDR_STR_LEN];
+ DHD_INFO(("%s: Couldn't read CIS information\n", __FUNCTION__));
+
+ /* Read the MAC address from the specified file */
+ if (dhd_read_file(filepath_efs, otp_mac_buf, sizeof(otp_mac_buf) - 1) < 0) {
+ DHD_ERROR(("%s: Couldn't read the file, "
+ "use the default MAC Address\n", __FUNCTION__));
+ if (dhd_set_default_macaddr(dhdp) < 0) {
+ return BCME_BADARG;
+ }
+ } else {
+ bzero((char *)eabuf, sizeof(eabuf));
+ strlcpy(eabuf, otp_mac_buf, sizeof(eabuf));
+ if (!bcm_ether_atoe(eabuf, mac)) {
+ DHD_ERROR(("%s : mac parsing err\n", __FUNCTION__));
+ if (dhd_set_default_macaddr(dhdp) < 0) {
+ return BCME_BADARG;
+ }
+ }
+ }
+#else
+ DHD_INFO(("%s: Couldn't read CIS information\n", __FUNCTION__));
+
+ /* Read the MAC address from the specified file */
+ if (ETHER_ISNULLADDR(&sysfs_mac_addr)) {
+ DHD_ERROR(("%s: Couldn't read the file, "
+ "use the default MAC Address\n", __FUNCTION__));
+ if (dhd_set_default_macaddr(dhdp) < 0) {
+ return BCME_BADARG;
+ }
+ } else {
+ /* sysfs mac addr is confirmed with valid format in set_mac_addr */
+ memcpy(mac, &sysfs_mac_addr, sizeof(sysfs_mac_addr));
+ }
+#endif /* !DHD_MAC_ADDR_EXPORT */
+ } else {
+ struct list_head mac_list;
+ unsigned char tuple_len = 0;
+ int found = 0;
+ int idx = -1; /* Invalid index */
+
+#ifdef DUMP_CIS
+ dhd_dump_cis_buf(dhdp, DUMP_CIS_SIZE);
+#endif /* DUMP_CIS */
+
+ /* Find a new tuple tag */
+ INIT_LIST_HEAD(&mac_list);
+ found = dhd_find_tuple_list_from_otp(dhdp, CIS_TUPLE_TAG_MACADDR,
+ &tuple_len, &mac_list);
+ if ((found > 0) && tuple_len == CIS_TUPLE_LEN_MACADDR) {
+ if (found == 1) {
+ tuple_entry_t *cur = list_entry((&mac_list)->next,
+ tuple_entry_t, list);
+ idx = cur->cis_idx;
+ } else {
+ /* Find the start index of MAC address */
+ idx = dhd_verify_macaddr(dhdp, &mac_list);
+ }
+ }
+
+ /* Find the MAC address */
+ if (idx > 0) {
+#ifdef DHD_EXPORT_CNTL_FILE
+ /*
+ * WAR for incorrect otp mac address (including multicast bit)
+ * for SEMCo e53_es31 module
+ */
+ if (strcmp(cidinfostr, "semco_sem_e53_es31") == 0) {
+ g_cis_buf[idx] &= 0xFE;
+ }
+#endif /* DHD_EXPORT_CNTL_FILE */
+ /* update MAC address */
+ snprintf(otp_mac_buf, sizeof(otp_mac_buf), MAC_CUSTOM_FORMAT,
+ (uint32)g_cis_buf[idx], (uint32)g_cis_buf[idx + 1],
+ (uint32)g_cis_buf[idx + 2], (uint32)g_cis_buf[idx + 3],
+ (uint32)g_cis_buf[idx + 4], (uint32)g_cis_buf[idx + 5]);
+ DHD_ERROR(("%s: MAC address is taken from OTP: " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(&g_cis_buf[idx])));
+ } else {
+ /* Not found MAC address info from the OTP, use the default value */
+ if (dhd_set_default_macaddr(dhdp) < 0) {
+ dhd_free_tuple_entry(dhdp, &mac_list);
+ return BCME_BADARG;
+ }
+ snprintf(otp_mac_buf, sizeof(otp_mac_buf), MAC_CUSTOM_FORMAT,
+ (uint32)mac->octet[0], (uint32)mac->octet[1],
+ (uint32)mac->octet[2], (uint32)mac->octet[3],
+ (uint32)mac->octet[4], (uint32)mac->octet[5]);
+ DHD_ERROR(("%s: Cannot find MAC address info from OTP,"
+ " Check module mac by initial value: " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(mac->octet)));
+ }
+
+ dhd_free_tuple_entry(dhdp, &mac_list);
+#ifndef DHD_MAC_ADDR_EXPORT
+ dhd_write_file(filepath_efs, otp_mac_buf, strlen(otp_mac_buf));
+#else
+ /* Export otp_mac_buf to the sys/mac_addr */
+ if (!bcm_ether_atoe(otp_mac_buf, &sysfs_mac_addr)) {
+ DHD_ERROR(("%s : mac parsing err\n", __FUNCTION__));
+ if (dhd_set_default_macaddr(dhdp) < 0) {
+ return BCME_BADARG;
+ }
+ } else {
+ DHD_INFO(("%s : set mac address properly\n", __FUNCTION__));
+ /* set otp mac to sysfs */
+ memcpy(mac, &sysfs_mac_addr, sizeof(sysfs_mac_addr));
+ }
+#endif /* !DHD_MAC_ADDR_EXPORT */
+ }
+
+ if (_dhd_set_mac_address(dhd, 0, mac) == 0) {
+ DHD_INFO(("%s: MAC Address is set\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: Failed to set MAC address\n", __FUNCTION__));
+ }
+
+ return 0;
+}
+#endif /* GET_MAC_FROM_OTP */
+
+/*
+ * XXX:SWWLAN-210178 SysFS MAC ADDR export
+ * framework controls mac addr with sysfs mac_addr kernel object without file system
+ * For this reason, DHD doesn't need to write mac address to file system directly
+ */
+#ifndef DHD_MAC_ADDR_EXPORT
+#ifdef WRITE_MACADDR
+int
+dhd_write_macaddr(struct ether_addr *mac)
+{
+ char *filepath_data = MACINFO;
+ char *filepath_efs = MACINFO_EFS;
+ char mac_buf[MAC_BUF_SIZE];
+ int ret = 0;
+ int retry_cnt = 0;
+
+ memset(mac_buf, 0, sizeof(mac_buf));
+ snprintf(mac_buf, sizeof(mac_buf), MAC_CUSTOM_FORMAT,
+ (uint32)mac->octet[0], (uint32)mac->octet[1],
+ (uint32)mac->octet[2], (uint32)mac->octet[3],
+ (uint32)mac->octet[4], (uint32)mac->octet[5]);
+
+ if (filepath_data) {
+ for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) {
+ /* Write MAC information into /data/.mac.info */
+ ret = dhd_write_file_and_check(filepath_data, mac_buf, strlen(mac_buf));
+ if (!ret) {
+ break;
+ }
+ }
+
+ if (ret < 0) {
+ DHD_ERROR(("%s: MAC address [%s] Failed to write into"
+ " File: %s\n", __FUNCTION__, mac_buf, filepath_data));
+ return BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("%s: filepath_data doesn't exist\n", __FUNCTION__));
+ }
+
+ if (filepath_efs) {
+ for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) {
+ /* Write MAC information into /efs/wifi/.mac.info */
+ ret = dhd_write_file_and_check(filepath_efs, mac_buf, strlen(mac_buf));
+ if (!ret) {
+ break;
+ }
+ }
+
+ if (ret < 0) {
+ DHD_ERROR(("%s: MAC address [%s] Failed to write into"
+ " File: %s\n", __FUNCTION__, mac_buf, filepath_efs));
+ return BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("%s: filepath_efs doesn't exist\n", __FUNCTION__));
+ }
+
+ return ret;
+}
+#endif /* WRITE_MACADDR */
+#endif /* !DHD_MAC_ADDR_EXPORT */
+
+#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG)
+static int
+dhd_find_tuple_idx_from_otp(dhd_pub_t *dhdp, int req_tup, unsigned char *req_tup_len)
+{
+ struct list_head head;
+ int start_idx;
+ int entry_num;
+
+ if (!g_cis_buf) {
+ DHD_ERROR(("%s: Couldn't find cis info from"
+ " local buffer\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ INIT_LIST_HEAD(&head);
+ entry_num = dhd_find_tuple_list_from_otp(dhdp, req_tup, req_tup_len, &head);
+ /* find the first cis index from the tuple list */
+ if (entry_num > 0) {
+ tuple_entry_t *cur = list_entry((&head)->next, tuple_entry_t, list);
+ start_idx = cur->cis_idx;
+ } else {
+ start_idx = -1; /* Invalid index */
+ }
+
+ dhd_free_tuple_entry(dhdp, &head);
+
+ return start_idx;
+}
+#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */
+
+#ifdef USE_CID_CHECK
+/* Definitions for module information */
+#define MAX_VID_LEN 8
+
+#ifdef SUPPORT_MULTIPLE_BOARDTYPE
+#define MAX_BNAME_LEN 6
+
+typedef struct {
+ uint8 b_len;
+ unsigned char btype[MAX_VID_LEN];
+ char bname[MAX_BNAME_LEN];
+} board_info_t;
+
+#if defined(BCM4361_CHIP)
+board_info_t semco_PA_info[] = {
+ { 3, { 0x0f, 0x08, }, { "_ePA" } }, /* semco All ePA */
+ { 3, { 0x27, 0x08, }, { "_iPA" } }, /* semco 2g iPA, 5g ePA */
+ { 3, { 0x1a, 0x08, }, { "_iPA" } }, /* semco 2g iPA, 5g ePA old */
+ { 0, { 0x00, }, { "" } } /* Default: Not specified yet */
+};
+#else
+board_info_t semco_board_info[] = {
+ { 3, { 0x51, 0x07, }, { "_b90b" } }, /* semco three antenna */
+ { 3, { 0x61, 0x07, }, { "_b90b" } }, /* semco two antenna */
+ { 0, { 0x00, }, { "" } } /* Default: Not specified yet */
+};
+board_info_t murata_board_info[] = {
+ { 3, { 0xa5, 0x07, }, { "_b90" } }, /* murata three antenna */
+ { 3, { 0xb0, 0x07, }, { "_b90b" } }, /* murata two antenna */
+ { 3, { 0xb1, 0x07, }, { "_es5" } }, /* murata two antenna */
+ { 0, { 0x00, }, { "" } } /* Default: Not specified yet */
+};
+#endif /* BCM4361_CHIP */
+#endif /* SUPPORT_MULTIPLE_BOARDTYPE */
+
+typedef struct {
+ uint8 vid_length;
+ unsigned char vid[MAX_VID_LEN];
+ char cid_info[MAX_VNAME_LEN];
+} vid_info_t;
+
+#if defined(BCM4335_CHIP)
+vid_info_t vid_info[] = {
+ { 3, { 0x33, 0x66, }, { "semcosh" } }, /* B0 Sharp 5G-FEM */
+ { 3, { 0x33, 0x33, }, { "semco" } }, /* B0 Skyworks 5G-FEM and A0 chip */
+ { 3, { 0x33, 0x88, }, { "semco3rd" } }, /* B0 Syri 5G-FEM */
+ { 3, { 0x00, 0x11, }, { "muratafem1" } }, /* B0 ANADIGICS 5G-FEM */
+ { 3, { 0x00, 0x22, }, { "muratafem2" } }, /* B0 TriQuint 5G-FEM */
+ { 3, { 0x00, 0x33, }, { "muratafem3" } }, /* 3rd FEM: Reserved */
+ { 0, { 0x00, }, { "murata" } } /* Default: for Murata A0 module */
+};
+#elif defined(BCM4339_CHIP) || defined(BCM4354_CHIP) || \
+ defined(BCM4356_CHIP)
+vid_info_t vid_info[] = { /* 4339:2G FEM+5G FEM ,4354: 2G FEM+5G FEM */
+ { 3, { 0x33, 0x33, }, { "semco" } }, /* 4339:Skyworks+sharp,4354:Panasonic+Panasonic */
+ { 3, { 0x33, 0x66, }, { "semco" } }, /* 4339: , 4354:Panasonic+SEMCO */
+ { 3, { 0x33, 0x88, }, { "semco3rd" } }, /* 4339: , 4354:SEMCO+SEMCO */
+ { 3, { 0x90, 0x01, }, { "wisol" } }, /* 4339: , 4354:Microsemi+Panasonic */
+ { 3, { 0x90, 0x02, }, { "wisolfem1" } }, /* 4339: , 4354:Panasonic+Panasonic */
+ { 3, { 0x90, 0x03, }, { "wisolfem2" } }, /* 4354:Murata+Panasonic */
+ { 3, { 0x00, 0x11, }, { "muratafem1" } }, /* 4339: , 4354:Murata+Anadigics */
+ { 3, { 0x00, 0x22, }, { "muratafem2"} }, /* 4339: , 4354:Murata+Triquint */
+ { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */
+};
+#elif defined(BCM4358_CHIP)
+vid_info_t vid_info[] = {
+ { 3, { 0x33, 0x33, }, { "semco_b85" } },
+ { 3, { 0x33, 0x66, }, { "semco_b85" } },
+ { 3, { 0x33, 0x88, }, { "semco3rd_b85" } },
+ { 3, { 0x90, 0x01, }, { "wisol_b85" } },
+ { 3, { 0x90, 0x02, }, { "wisolfem1_b85" } },
+ { 3, { 0x90, 0x03, }, { "wisolfem2_b85" } },
+ { 3, { 0x31, 0x90, }, { "wisol_b85b" } },
+ { 3, { 0x00, 0x11, }, { "murata_b85" } },
+ { 3, { 0x00, 0x22, }, { "murata_b85"} },
+ { 6, { 0x00, 0xFF, 0xFF, 0x00, 0x00, }, { "murata_b85"} },
+ { 3, { 0x10, 0x33, }, { "semco_b85a" } },
+ { 3, { 0x30, 0x33, }, { "semco_b85b" } },
+ { 3, { 0x31, 0x33, }, { "semco_b85b" } },
+ { 3, { 0x10, 0x22, }, { "murata_b85a" } },
+ { 3, { 0x20, 0x22, }, { "murata_b85a" } },
+ { 3, { 0x21, 0x22, }, { "murata_b85a" } },
+ { 3, { 0x23, 0x22, }, { "murata_b85a" } },
+ { 3, { 0x31, 0x22, }, { "murata_b85b" } },
+ { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */
+};
+#elif defined(BCM4359_CHIP)
+vid_info_t vid_info[] = {
+#if defined(SUPPORT_BCM4359_MIXED_MODULES)
+ { 3, { 0x34, 0x33, }, { "semco_b90b" } },
+ { 3, { 0x40, 0x33, }, { "semco_b90b" } },
+ { 3, { 0x41, 0x33, }, { "semco_b90b" } },
+ { 3, { 0x11, 0x33, }, { "semco_b90b" } },
+ { 3, { 0x33, 0x66, }, { "semco_b90b" } },
+ { 3, { 0x23, 0x22, }, { "murata_b90b" } },
+ { 3, { 0x40, 0x22, }, { "murata_b90b" } },
+ { 3, { 0x10, 0x90, }, { "wisol_b90b" } },
+ { 3, { 0x33, 0x33, }, { "semco_b90s_b1" } },
+ { 3, { 0x66, 0x33, }, { "semco_b90s_c0" } },
+ { 3, { 0x60, 0x22, }, { "murata_b90s_b1" } },
+ { 3, { 0x61, 0x22, }, { "murata_b90s_b1" } },
+ { 3, { 0x62, 0x22, }, { "murata_b90s_b1" } },
+ { 3, { 0x63, 0x22, }, { "murata_b90s_b1" } },
+ { 3, { 0x70, 0x22, }, { "murata_b90s_c0" } },
+ { 3, { 0x71, 0x22, }, { "murata_b90s_c0" } },
+ { 3, { 0x72, 0x22, }, { "murata_b90s_c0" } },
+ { 3, { 0x73, 0x22, }, { "murata_b90s_c0" } },
+ { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */
+#else /* SUPPORT_BCM4359_MIXED_MODULES */
+ { 3, { 0x34, 0x33, }, { "semco" } },
+ { 3, { 0x40, 0x33, }, { "semco" } },
+ { 3, { 0x41, 0x33, }, { "semco" } },
+ { 3, { 0x11, 0x33, }, { "semco" } },
+ { 3, { 0x33, 0x66, }, { "semco" } },
+ { 3, { 0x23, 0x22, }, { "murata" } },
+ { 3, { 0x40, 0x22, }, { "murata" } },
+ { 3, { 0x51, 0x22, }, { "murata" } },
+ { 3, { 0x52, 0x22, }, { "murata" } },
+ { 3, { 0x10, 0x90, }, { "wisol" } },
+ { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */
+#endif /* SUPPORT_BCM4359_MIXED_MODULES */
+};
+#elif defined(BCM4361_CHIP)
+vid_info_t vid_info[] = {
+#if defined(SUPPORT_MIXED_MODULES)
+ { 3, { 0x66, 0x33, }, { "semco_sky_r00a_e000_a0" } },
+ { 3, { 0x30, 0x33, }, { "semco_sky_r01a_e30a_a1" } },
+ { 3, { 0x31, 0x33, }, { "semco_sky_r02a_e30a_a1" } },
+ { 3, { 0x32, 0x33, }, { "semco_sky_r02a_e30a_a1" } },
+ { 3, { 0x51, 0x33, }, { "semco_sky_r01d_e31_b0" } },
+ { 3, { 0x61, 0x33, }, { "semco_sem_r01f_e31_b0" } },
+ { 3, { 0x62, 0x33, }, { "semco_sem_r02g_e31_b0" } },
+ { 3, { 0x71, 0x33, }, { "semco_sky_r01h_e32_b0" } },
+ { 3, { 0x81, 0x33, }, { "semco_sem_r01i_e32_b0" } },
+ { 3, { 0x82, 0x33, }, { "semco_sem_r02j_e32_b0" } },
+ { 3, { 0x91, 0x33, }, { "semco_sem_r02a_e32a_b2" } },
+ { 3, { 0xa1, 0x33, }, { "semco_sem_r02b_e32a_b2" } },
+ { 3, { 0x12, 0x22, }, { "murata_nxp_r012_1kl_a1" } },
+ { 3, { 0x13, 0x22, }, { "murata_mur_r013_1kl_b0" } },
+ { 3, { 0x14, 0x22, }, { "murata_mur_r014_1kl_b0" } },
+ { 3, { 0x15, 0x22, }, { "murata_mur_r015_1kl_b0" } },
+ { 3, { 0x20, 0x22, }, { "murata_mur_r020_1kl_b0" } },
+ { 3, { 0x21, 0x22, }, { "murata_mur_r021_1kl_b0" } },
+ { 3, { 0x22, 0x22, }, { "murata_mur_r022_1kl_b0" } },
+ { 3, { 0x23, 0x22, }, { "murata_mur_r023_1kl_b0" } },
+ { 3, { 0x24, 0x22, }, { "murata_mur_r024_1kl_b0" } },
+ { 3, { 0x30, 0x22, }, { "murata_mur_r030_1kl_b0" } },
+ { 3, { 0x31, 0x22, }, { "murata_mur_r031_1kl_b0" } },
+ { 3, { 0x32, 0x22, }, { "murata_mur_r032_1kl_b0" } },
+ { 3, { 0x33, 0x22, }, { "murata_mur_r033_1kl_b0" } },
+ { 3, { 0x34, 0x22, }, { "murata_mur_r034_1kl_b0" } },
+ { 3, { 0x50, 0x22, }, { "murata_mur_r020_1qw_b2" } },
+ { 3, { 0x51, 0x22, }, { "murata_mur_r021_1qw_b2" } },
+ { 3, { 0x52, 0x22, }, { "murata_mur_r022_1qw_b2" } },
+ { 3, { 0x61, 0x22, }, { "murata_mur_r031_1qw_b2" } },
+ { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */
+#endif /* SUPPORT_MIXED_MODULES */
+};
+#elif defined(BCM4375_CHIP)
+vid_info_t vid_info[] = {
+#if defined(SUPPORT_MIXED_MODULES)
+ { 3, { 0x11, 0x33, }, { "semco_sky_e41_es11" } },
+ { 3, { 0x33, 0x33, }, { "semco_sem_e43_es33" } },
+ { 3, { 0x34, 0x33, }, { "semco_sem_e43_es34" } },
+ { 3, { 0x35, 0x33, }, { "semco_sem_e43_es35" } },
+ { 3, { 0x36, 0x33, }, { "semco_sem_e43_es36" } },
+ { 3, { 0x41, 0x33, }, { "semco_sem_e43_cs41" } },
+ { 3, { 0x51, 0x33, }, { "semco_sem_e43_cs51" } },
+ { 3, { 0x53, 0x33, }, { "semco_sem_e43_cs53" } },
+ { 3, { 0x61, 0x33, }, { "semco_sky_e43_cs61" } },
+ { 3, { 0x10, 0x22, }, { "murata_mur_1rh_es10" } },
+ { 3, { 0x11, 0x22, }, { "murata_mur_1rh_es11" } },
+ { 3, { 0x12, 0x22, }, { "murata_mur_1rh_es12" } },
+ { 3, { 0x13, 0x22, }, { "murata_mur_1rh_es13" } },
+ { 3, { 0x20, 0x22, }, { "murata_mur_1rh_es20" } },
+ { 3, { 0x32, 0x22, }, { "murata_mur_1rh_es32" } },
+ { 3, { 0x41, 0x22, }, { "murata_mur_1rh_es41" } },
+ { 3, { 0x42, 0x22, }, { "murata_mur_1rh_es42" } },
+ { 3, { 0x43, 0x22, }, { "murata_mur_1rh_es43" } },
+ { 3, { 0x44, 0x22, }, { "murata_mur_1rh_es44" } }
+#endif /* SUPPORT_MIXED_MODULES */
+};
+#elif defined(BCM4389_CHIP_DEF)
+vid_info_t vid_info[] = {
+#if defined(SUPPORT_MIXED_MODULES)
+ { 3, { 0x21, 0x33, }, { "semco_sem_e53_es23" } },
+ { 3, { 0x23, 0x33, }, { "semco_sem_e53_es23" } },
+ { 3, { 0x24, 0x33, }, { "semco_sem_e53_es24" } },
+ { 3, { 0x25, 0x33, }, { "semco_sem_e53_es25" } },
+ { 3, { 0x31, 0x33, }, { "semco_sem_e53_es31" } },
+ { 3, { 0x32, 0x33, }, { "semco_sem_e53_es32" } },
+ { 3, { 0x40, 0x33, }, { "semco_sem_e53_es40" } },
+ { 3, { 0x21, 0x22, }, { "murata_mur_1wk_es21" } },
+ { 3, { 0x30, 0x22, }, { "murata_mur_1wk_es30" } },
+ { 3, { 0x31, 0x22, }, { "murata_mur_1wk_es31" } },
+ { 3, { 0x32, 0x22, }, { "murata_mur_1wk_es32" } },
+ { 3, { 0x40, 0x22, }, { "murata_mur_1wk_es40" } },
+ { 3, { 0x41, 0x22, }, { "murata_mur_1wk_es41" } },
+ { 3, { 0x42, 0x22, }, { "murata_mur_1wk_es42" } },
+ { 3, { 0x43, 0x22, }, { "murata_mur_1wk_es43" } },
+ { 3, { 0x50, 0x22, }, { "murata_mur_1wk_es50" } }
+#endif /* SUPPORT_MIXED_MODULES */
+};
+#else
+vid_info_t vid_info[] = {
+ { 0, { 0x00, }, { "samsung" } } /* Default: Not specified yet */
+};
+#endif /* BCM_CHIP_ID */
+
+/* CID managment functions */
+
+char *
+dhd_get_cid_info(unsigned char *vid, int vid_length)
+{
+ int i;
+
+ for (i = 0; i < ARRAYSIZE(vid_info); i++) {
+ if (vid_info[i].vid_length-1 == vid_length &&
+ !memcmp(vid_info[i].vid, vid, vid_length)) {
+ return vid_info[i].cid_info;
+ }
+ }
+
+ DHD_ERROR(("%s : Can't find the cid info\n", __FUNCTION__));
+ return NULL;
+}
+
+int
+dhd_check_module_cid(dhd_pub_t *dhdp)
+{
+ int ret = -1;
+#ifndef DHD_EXPORT_CNTL_FILE
+ const char *cidfilepath = CIDINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+ int idx, max;
+ vid_info_t *cur_info;
+ unsigned char *tuple_start = NULL;
+ unsigned char tuple_length = 0;
+ unsigned char cid_info[MAX_VNAME_LEN];
+ int found = FALSE;
+#ifdef SUPPORT_MULTIPLE_BOARDTYPE
+ board_info_t *cur_b_info = NULL;
+ board_info_t *vendor_b_info = NULL;
+ unsigned char *btype_start;
+ unsigned char boardtype_len = 0;
+#endif /* SUPPORT_MULTIPLE_BOARDTYPE */
+
+ /* Try reading out from CIS */
+ if (!g_cis_buf) {
+ DHD_INFO(("%s: Couldn't read CIS info\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: Reading CIS from local buffer\n", __FUNCTION__));
+#ifdef DUMP_CIS
+ dhd_dump_cis_buf(dhdp, DUMP_CIS_SIZE);
+#endif /* DUMP_CIS */
+
+ idx = dhd_find_tuple_idx_from_otp(dhdp, CIS_TUPLE_TAG_VENDOR, &tuple_length);
+ if (idx > 0) {
+ found = TRUE;
+ tuple_start = &g_cis_buf[idx];
+ }
+
+ if (found) {
+ max = sizeof(vid_info) / sizeof(vid_info_t);
+ for (idx = 0; idx < max; idx++) {
+ cur_info = &vid_info[idx];
+#ifdef BCM4358_CHIP
+ if (cur_info->vid_length == 6 && tuple_length == 6) {
+ if (cur_info->vid[0] == tuple_start[0] &&
+ cur_info->vid[3] == tuple_start[3] &&
+ cur_info->vid[4] == tuple_start[4]) {
+ goto check_board_type;
+ }
+ }
+#endif /* BCM4358_CHIP */
+ if ((cur_info->vid_length == tuple_length) &&
+ (cur_info->vid_length != 0) &&
+ (memcmp(cur_info->vid, tuple_start,
+ cur_info->vid_length - 1) == 0)) {
+ goto check_board_type;
+ }
+ }
+ }
+
+ /* find default nvram, if exist */
+ DHD_ERROR(("%s: cannot find CIS TUPLE set as default\n", __FUNCTION__));
+ max = sizeof(vid_info) / sizeof(vid_info_t);
+ for (idx = 0; idx < max; idx++) {
+ cur_info = &vid_info[idx];
+ if (cur_info->vid_length == 0) {
+ goto write_cid;
+ }
+ }
+ DHD_ERROR(("%s: cannot find default CID\n", __FUNCTION__));
+ return BCME_ERROR;
+
+check_board_type:
+#ifdef SUPPORT_MULTIPLE_BOARDTYPE
+ idx = dhd_find_tuple_idx_from_otp(dhdp, CIS_TUPLE_TAG_BOARDTYPE, &tuple_length);
+ if (idx > 0) {
+ btype_start = &g_cis_buf[idx];
+ boardtype_len = tuple_length;
+ DHD_INFO(("%s: board type found.\n", __FUNCTION__));
+ } else {
+ boardtype_len = 0;
+ }
+#if defined(BCM4361_CHIP)
+ vendor_b_info = semco_PA_info;
+ max = sizeof(semco_PA_info) / sizeof(board_info_t);
+#else
+ if (strcmp(cur_info->cid_info, "semco") == 0) {
+ vendor_b_info = semco_board_info;
+ max = sizeof(semco_board_info) / sizeof(board_info_t);
+ } else if (strcmp(cur_info->cid_info, "murata") == 0) {
+ vendor_b_info = murata_board_info;
+ max = sizeof(murata_board_info) / sizeof(board_info_t);
+ } else {
+ max = 0;
+ }
+#endif /* BCM4361_CHIP */
+ if (boardtype_len) {
+ for (idx = 0; idx < max; idx++) {
+ cur_b_info = vendor_b_info;
+ if ((cur_b_info->b_len == boardtype_len) &&
+ (cur_b_info->b_len != 0) &&
+ (memcmp(cur_b_info->btype, btype_start,
+ cur_b_info->b_len - 1) == 0)) {
+ DHD_INFO(("%s : board type name : %s\n",
+ __FUNCTION__, cur_b_info->bname));
+ break;
+ }
+ cur_b_info = NULL;
+ vendor_b_info++;
+ }
+ }
+#endif /* SUPPORT_MULTIPLE_BOARDTYPE */
+
+write_cid:
+#ifdef SUPPORT_MULTIPLE_BOARDTYPE
+ if (cur_b_info && cur_b_info->b_len > 0) {
+ strcpy(cid_info, cur_info->cid_info);
+ strcpy(cid_info + strlen(cur_info->cid_info), cur_b_info->bname);
+ } else
+#endif /* SUPPORT_MULTIPLE_BOARDTYPE */
+ strcpy(cid_info, cur_info->cid_info);
+
+ DHD_INFO(("%s: CIS MATCH FOUND : %s\n", __FUNCTION__, cid_info));
+#ifndef DHD_EXPORT_CNTL_FILE
+ dhd_write_file(cidfilepath, cid_info, strlen(cid_info) + 1);
+#else
+ strlcpy(cidinfostr, cid_info, MAX_VNAME_LEN);
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ return ret;
+}
+
+#ifdef SUPPORT_MULTIPLE_MODULE_CIS
+#ifndef DHD_EXPORT_CNTL_FILE
+static bool
+dhd_check_module(char *module_name)
+{
+ char vname[MAX_VNAME_LEN];
+ const char *cidfilepath = CIDINFO;
+ int ret;
+
+ memset(vname, 0, sizeof(vname));
+ ret = dhd_read_file(cidfilepath, vname, sizeof(vname) - 1);
+ if (ret < 0) {
+ return FALSE;
+ }
+ DHD_INFO(("%s: This module is %s \n", __FUNCTION__, vname));
+ return strstr(vname, module_name) ? TRUE : FALSE;
+}
+#else
+bool
+dhd_check_module(char *module_name)
+{
+ return strstr(cidinfostr, module_name) ? TRUE : FALSE;
+}
+#endif /* !DHD_EXPORT_CNTL_FILE */
+
+int
+dhd_check_module_b85a(void)
+{
+ int ret;
+ char *vname_b85a = "_b85a";
+
+ if (dhd_check_module(vname_b85a)) {
+ DHD_INFO(("%s: It's a b85a module\n", __FUNCTION__));
+ ret = 1;
+ } else {
+ DHD_INFO(("%s: It is not a b85a module\n", __FUNCTION__));
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+dhd_check_module_b90(void)
+{
+ int ret = 0;
+ char *vname_b90b = "_b90b";
+ char *vname_b90s = "_b90s";
+
+ if (dhd_check_module(vname_b90b)) {
+ DHD_INFO(("%s: It's a b90b module \n", __FUNCTION__));
+ ret = BCM4359_MODULE_TYPE_B90B;
+ } else if (dhd_check_module(vname_b90s)) {
+ DHD_INFO(("%s: It's a b90s module\n", __FUNCTION__));
+ ret = BCM4359_MODULE_TYPE_B90S;
+ } else {
+ DHD_ERROR(("%s: It's neither b90b nor b90s\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+
+ return ret;
+}
+#endif /* SUPPORT_MULTIPLE_MODULE_CIS */
+
+#define CID_FEM_MURATA "_mur_"
+/* extract module type from cid information */
+/* XXX: extract string by delimiter '_' at specific counting position.
+ * it would be used for module type information.
+ * for example, cid information is 'semco_sky_r02a_e30a_a1',
+ * then output (module type) is 'r02a_e30a_a1' when index is 3.
+ */
+int
+dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem)
+{
+ int ret = 0, i;
+ char vname[MAX_VNAME_LEN];
+ char *ptr = NULL;
+#ifndef DHD_EXPORT_CNTL_FILE
+ const char *cidfilepath = CIDINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ memset(vname, 0, sizeof(vname));
+
+#ifndef DHD_EXPORT_CNTL_FILE
+ ret = dhd_read_file(cidfilepath, vname, sizeof(vname) - 1);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to get module infomaion from .cid.info\n",
+ __FUNCTION__));
+ return ret;
+ }
+#else
+ strlcpy(vname, cidinfostr, MAX_VNAME_LEN);
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ for (i = 1, ptr = vname; i < index && ptr; i++) {
+ ptr = bcmstrstr(ptr, "_");
+ if (ptr) {
+ ptr++;
+ }
+ }
+
+ if (bcmstrnstr(vname, MAX_VNAME_LEN, CID_FEM_MURATA, 5)) {
+ *is_murata_fem = TRUE;
+ }
+
+ if (ptr) {
+ memcpy(module_type, ptr, strlen(ptr));
+ } else {
+ DHD_ERROR(("%s: failed to get module infomaion\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: module type = %s \n", __FUNCTION__, module_type));
+
+ return ret;
+}
+#endif /* USE_CID_CHECK */
+
+#ifdef USE_DIRECT_VID_TAG
+int
+dhd_check_module_cid(dhd_pub_t *dhdp)
+{
+ int ret = BCME_ERROR;
+ int idx;
+ unsigned char tuple_length = 0;
+ unsigned char *vid = NULL;
+ unsigned char cid_info[MAX_VNAME_LEN];
+#ifndef DHD_EXPORT_CNTL_FILE
+ const char *cidfilepath = CIDINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ /* Try reading out from CIS */
+ if (!g_cis_buf) {
+ DHD_INFO(("%s: Couldn't read CIS info\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: Reading CIS from local buffer\n", __FUNCTION__));
+#ifdef DUMP_CIS
+ dhd_dump_cis_buf(dhdp, DUMP_CIS_SIZE);
+#endif /* DUMP_CIS */
+ idx = dhd_find_tuple_idx_from_otp(dhdp, CIS_TUPLE_TAG_VENDOR, &tuple_length);
+ if (idx > 0) {
+ vid = &g_cis_buf[idx];
+ DHD_INFO(("%s: VID FOUND : 0x%x%x\n", __FUNCTION__,
+ vid[VENDOR_OFF], vid[MD_REV_OFF]));
+ } else {
+ DHD_ERROR(("%s: use nvram default\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ memset(cid_info, 0, sizeof(MAX_VNAME_LEN));
+ cid_info[MD_REV_OFF] = vid[MD_REV_OFF];
+ cid_info[VENDOR_OFF] = vid[VENDOR_OFF];
+#ifndef DHD_EXPORT_CNTL_FILE
+ dhd_write_file(cidfilepath, cid_info, strlen(cid_info) + 1);
+#else
+ strlcpy(cidinfostr, cid_info, MAX_VNAME_LEN);
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ DHD_INFO(("%s: cidinfostr %x%x\n", __FUNCTION__,
+ cidinfostr[VENDOR_OFF], cidinfostr[MD_REV_OFF]));
+ return ret;
+}
+
+int
+dhd_check_stored_module_info(char *vid)
+{
+ int ret = BCME_OK;
+#ifndef DHD_EXPORT_CNTL_FILE
+ const char *cidfilepath = CIDINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ memset(vid, 0, MAX_VID_LEN);
+
+#ifndef DHD_EXPORT_CNTL_FILE
+ ret = dhd_read_file(cidfilepath, vid, MAX_VID_LEN - 1);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: failed to get module infomaion from .cid.info\n",
+ __FUNCTION__));
+ return ret;
+ }
+#else
+ strlcpy(vid, cidinfostr, MAX_VID_LEN);
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+ if (vid[0] == (char)0) {
+ DHD_ERROR(("%s : Failed to get module information \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_INFO(("%s: stored VID= 0x%x%x\n", __FUNCTION__, vid[VENDOR_OFF], vid[MD_REV_OFF]));
+ return ret;
+}
+#endif /* USE_DIRECT_VID_TAG */
+#endif /* DHD_USE_CISINFO */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_exynos.c b/bcmdhd.101.10.361.x/dhd_custom_exynos.c
new file mode 100755
index 0000000..01e1f54
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_exynos.c
@@ -0,0 +1,333 @@
+/*
+ * Platform Dependent file for Samsung Exynos
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <linux/unistd.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
+ defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) || \
+ defined(CONFIG_SOC_EXYNOS2100) || defined(CONFIG_SOC_EXYNOS1000)
+#include <linux/exynos-pci-ctrl.h>
+#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 ||
+ * CONFIG_SOC_EXYNOS9820 || CONFIG_SOC_EXYNOS9830 ||
+ * CONFIG_SOC_EXYNOS2100 || CONFIG_SOC_EXYNOS1000
+ */
+
+#if defined(CONFIG_64BIT)
+#include <asm-generic/gpio.h>
+#endif /* CONFIG_64BIT */
+
+#ifdef BCMDHD_MODULAR
+#if IS_ENABLED(CONFIG_SEC_SYSFS)
+#include <linux/sec_sysfs.h>
+#endif /* CONFIG_SEC_SYSFS */
+#if IS_ENABLED(CONFIG_DRV_SAMSUNG)
+#include <linux/sec_class.h>
+#endif /* CONFIG_SEC_SYSFS */
+#else
+#if defined(CONFIG_SEC_SYSFS)
+#include <linux/sec_sysfs.h>
+#elif defined(CONFIG_DRV_SAMSUNG)
+#include <linux/sec_class.h>
+#endif /* CONFIG_SEC_SYSFS */
+#endif /* BCMDHD_MODULAR */
+#include <linux/wlan_plat.h>
+
+#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE)
+#define PINCTL_DELAY 150
+#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+extern void dhd_exit_wlan_mem(void);
+extern int dhd_init_wlan_mem(void);
+extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+#define WIFI_TURNON_DELAY 200
+static int wlan_pwr_on = -1;
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+static int wlan_host_wake_irq = 0;
+static unsigned int wlan_host_wake_up = -1;
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE)
+extern struct device *mmc_dev_for_wlan;
+#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */
+
+#ifdef CONFIG_BCMDHD_PCIE
+extern int pcie_ch_num;
+extern void exynos_pcie_pm_resume(int);
+extern void exynos_pcie_pm_suspend(int);
+#endif /* CONFIG_BCMDHD_PCIE */
+
+#if defined(CONFIG_SOC_EXYNOS7870) || defined(CONFIG_SOC_EXYNOS9110)
+extern struct mmc_host *wlan_mmc;
+extern void mmc_ctrl_power(struct mmc_host *host, bool onoff);
+#endif /* SOC_EXYNOS7870 || CONFIG_SOC_EXYNOS9110 */
+
+static int
+dhd_wlan_power(int onoff)
+{
+#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE)
+ struct pinctrl *pinctrl = NULL;
+#endif /* CONFIG_MACH_A7LTE || ONFIG_NOBLESSE */
+
+ printk(KERN_INFO"%s Enter: power %s\n", __FUNCTION__, onoff ? "on" : "off");
+
+#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE)
+ if (onoff) {
+ pinctrl = devm_pinctrl_get_select(mmc_dev_for_wlan, "sdio_wifi_on");
+ if (IS_ERR(pinctrl))
+ printk(KERN_INFO "%s WLAN SDIO GPIO control error\n", __FUNCTION__);
+ msleep(PINCTL_DELAY);
+ }
+#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */
+
+ if (gpio_direction_output(wlan_pwr_on, onoff)) {
+ printk(KERN_ERR "%s failed to control WLAN_REG_ON to %s\n",
+ __FUNCTION__, onoff ? "HIGH" : "LOW");
+ return -EIO;
+ }
+
+#if defined(CONFIG_MACH_A7LTE) || defined(CONFIG_NOBLESSE)
+ if (!onoff) {
+ pinctrl = devm_pinctrl_get_select(mmc_dev_for_wlan, "sdio_wifi_off");
+ if (IS_ERR(pinctrl))
+ printk(KERN_INFO "%s WLAN SDIO GPIO control error\n", __FUNCTION__);
+ }
+#endif /* CONFIG_MACH_A7LTE || CONFIG_NOBLESSE */
+
+#if defined(CONFIG_SOC_EXYNOS7870) || defined(CONFIG_SOC_EXYNOS9110)
+ if (wlan_mmc)
+ mmc_ctrl_power(wlan_mmc, onoff);
+#endif /* SOC_EXYNOS7870 || CONFIG_SOC_EXYNOS9110 */
+ return 0;
+}
+
+static int
+dhd_wlan_reset(int onoff)
+{
+ return 0;
+}
+
+#ifndef CONFIG_BCMDHD_PCIE
+extern void (*notify_func_callback)(void *dev_id, int state);
+extern void *mmc_host_dev;
+#endif /* !CONFIG_BCMDHD_PCIE */
+
+static int
+dhd_wlan_set_carddetect(int val)
+{
+#ifndef CONFIG_BCMDHD_PCIE
+ pr_err("%s: notify_func=%p, mmc_host_dev=%p, val=%d\n",
+ __FUNCTION__, notify_func_callback, mmc_host_dev, val);
+
+ if (notify_func_callback) {
+ notify_func_callback(mmc_host_dev, val);
+ } else {
+ pr_warning("%s: Nobody to notify\n", __FUNCTION__);
+ }
+#else
+ if (val) {
+ exynos_pcie_pm_resume(pcie_ch_num);
+ } else {
+ exynos_pcie_pm_suspend(pcie_ch_num);
+ }
+#endif /* CONFIG_BCMDHD_PCIE */
+
+ return 0;
+}
+
+int __init
+dhd_wlan_init_gpio(void)
+{
+ const char *wlan_node = "samsung,brcm-wlan";
+ struct device_node *root_node = NULL;
+ struct device *wlan_dev;
+
+ wlan_dev = sec_device_create(NULL, "wlan");
+
+ root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+ if (!root_node) {
+ WARN(1, "failed to get device node of bcm4354\n");
+ return -ENODEV;
+ }
+
+ /* ========== WLAN_PWR_EN ============ */
+ wlan_pwr_on = of_get_gpio(root_node, 0);
+ if (!gpio_is_valid(wlan_pwr_on)) {
+ WARN(1, "Invalied gpio pin : %d\n", wlan_pwr_on);
+ return -ENODEV;
+ }
+
+ if (gpio_request(wlan_pwr_on, "WLAN_REG_ON")) {
+ WARN(1, "fail to request gpio(WLAN_REG_ON)\n");
+ return -ENODEV;
+ }
+#ifdef CONFIG_BCMDHD_PCIE
+ gpio_direction_output(wlan_pwr_on, 1);
+ msleep(WIFI_TURNON_DELAY);
+#else
+ gpio_direction_output(wlan_pwr_on, 0);
+#endif /* CONFIG_BCMDHD_PCIE */
+ gpio_export(wlan_pwr_on, 1);
+ if (wlan_dev)
+ gpio_export_link(wlan_dev, "WLAN_REG_ON", wlan_pwr_on);
+
+#ifdef CONFIG_BCMDHD_PCIE
+ exynos_pcie_pm_resume(pcie_ch_num);
+#endif /* CONFIG_BCMDHD_PCIE */
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ /* ========== WLAN_HOST_WAKE ============ */
+ wlan_host_wake_up = of_get_gpio(root_node, 1);
+ if (!gpio_is_valid(wlan_host_wake_up)) {
+ WARN(1, "Invalied gpio pin : %d\n", wlan_host_wake_up);
+ return -ENODEV;
+ }
+
+ if (gpio_request(wlan_host_wake_up, "WLAN_HOST_WAKE")) {
+ WARN(1, "fail to request gpio(WLAN_HOST_WAKE)\n");
+ return -ENODEV;
+ }
+ gpio_direction_input(wlan_host_wake_up);
+ gpio_export(wlan_host_wake_up, 1);
+ if (wlan_dev)
+ gpio_export_link(wlan_dev, "WLAN_HOST_WAKE", wlan_host_wake_up);
+
+ wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+ return 0;
+}
+
+#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE)
+int
+dhd_get_wlan_oob_gpio(void)
+{
+ return gpio_is_valid(wlan_host_wake_up) ?
+ gpio_get_value(wlan_host_wake_up) : -1;
+}
+EXPORT_SYMBOL(dhd_get_wlan_oob_gpio);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */
+
+struct resource dhd_wlan_resources = {
+ .name = "bcmdhd_wlan_irq",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE |
+#ifdef CONFIG_BCMDHD_PCIE
+ IORESOURCE_IRQ_HIGHEDGE,
+#else
+ IORESOURCE_IRQ_HIGHLEVEL,
+#endif /* CONFIG_BCMDHD_PCIE */
+};
+EXPORT_SYMBOL(dhd_wlan_resources);
+
+struct wifi_platform_data dhd_wlan_control = {
+ .set_power = dhd_wlan_power,
+ .set_reset = dhd_wlan_reset,
+ .set_carddetect = dhd_wlan_set_carddetect,
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ .mem_prealloc = dhd_wlan_mem_prealloc,
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+};
+EXPORT_SYMBOL(dhd_wlan_control);
+
+int __init
+dhd_wlan_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s: START.......\n", __FUNCTION__);
+ ret = dhd_wlan_init_gpio();
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n",
+ __FUNCTION__, ret);
+ goto fail;
+ }
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ dhd_wlan_resources.start = wlan_host_wake_irq;
+ dhd_wlan_resources.end = wlan_host_wake_irq;
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ ret = dhd_init_wlan_mem();
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to alloc reserved memory,"
+ " ret=%d\n", __FUNCTION__, ret);
+ }
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+fail:
+ return ret;
+}
+
+int
+dhd_wlan_deinit(void)
+{
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ gpio_free(wlan_host_wake_up);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+ gpio_free(wlan_pwr_on);
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ dhd_exit_wlan_mem();
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+ return 0;
+}
+
+#ifndef BCMDHD_MODULAR
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
+ defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
+ defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_wlan_init);
+#else
+late_initcall(dhd_wlan_init);
+#endif /* CONFIG_DEFERRED_INITCALLS */
+#else
+device_initcall(dhd_wlan_init);
+#endif /* CONFIG Exynos PCIE Platforms */
+#endif /* !BCMDHD_MODULAR */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_gpio.c b/bcmdhd.101.10.361.x/dhd_custom_gpio.c
new file mode 100755
index 0000000..baf4740
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_gpio.c
@@ -0,0 +1,437 @@
+/*
+ * Customer code to add GPIO control during WLAN start/stop
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+
+#include <wlioctl.h>
+
+#ifndef BCMDONGLEHOST
+#include <wlc_pub.h>
+#include <wl_dbg.h>
+#else
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+#endif
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC) */
+
+/* Customer specific Host GPIO defintion */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ * 1) return : Host gpio interrupt number per customer platform
+ * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ * NOTE :
+ * Customer should check his platform definitions
+ * and his Host Interrupt spec
+ * to figure out the proper setting for his platform.
+ * Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr)
+{
+ int host_oob_irq = 0;
+
+#if defined(CUSTOMER_HW2) || defined(CUSTOMER_HW4) || defined(BOARD_HIKEY)
+ host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+ if (dhd_oob_gpio_num < 0) {
+ dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+ }
+#endif /* CUSTOMER_OOB_GPIO_NUM */
+
+ if (dhd_oob_gpio_num < 0) {
+ WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+ __FUNCTION__));
+ return (dhd_oob_gpio_num);
+ }
+
+ WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+ __FUNCTION__, dhd_oob_gpio_num));
+
+#endif /* CUSTOMER_HW2 || CUSTOMER_HW4 || BOARD_HIKEY */
+
+ return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+/* Customer function to control hw specific wlan gpios */
+int
+dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff)
+{
+ int err = 0;
+
+ return err;
+}
+
+#if 0
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
+{
+ int ret = 0;
+
+ WL_TRACE(("%s Enter\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+
+ /* Customer access to MAC address stored outside of DHD driver */
+#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10) || defined(BOARD_HIKEY)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ ret = wifi_platform_get_mac_addr(adapter, buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+ /* EXAMPLE code */
+ {
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+ }
+#endif /* EXAMPLE_GET_MAC */
+
+ return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifndef CUSTOMER_HW4
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+ {"", "XY", 4}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+ {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */
+ {"AT", "EU", 5},
+ {"BE", "EU", 5},
+ {"BG", "EU", 5},
+ {"CY", "EU", 5},
+ {"CZ", "EU", 5},
+ {"DK", "EU", 5},
+ {"EE", "EU", 5},
+ {"FI", "EU", 5},
+ {"FR", "EU", 5},
+ {"DE", "EU", 5},
+ {"GR", "EU", 5},
+ {"HU", "EU", 5},
+ {"IE", "EU", 5},
+ {"IT", "EU", 5},
+ {"LV", "EU", 5},
+ {"LI", "EU", 5},
+ {"LT", "EU", 5},
+ {"LU", "EU", 5},
+ {"MT", "EU", 5},
+ {"NL", "EU", 5},
+ {"PL", "EU", 5},
+ {"PT", "EU", 5},
+ {"RO", "EU", 5},
+ {"SK", "EU", 5},
+ {"SI", "EU", 5},
+ {"ES", "EU", 5},
+ {"SE", "EU", 5},
+ {"GB", "EU", 5},
+ {"KR", "XY", 3},
+ {"AU", "XY", 3},
+ {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+ {"TW", "XY", 3},
+ {"AR", "XY", 3},
+ {"MX", "XY", 3},
+ {"IL", "IL", 0},
+ {"CH", "CH", 0},
+ {"TR", "TR", 0},
+ {"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+#if (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) && !defined(CUSTOMER_HW5)
+#if defined(BCM4335_CHIP)
+ {"", "XZ", 11}, /* Universal if Country code is unknown or empty */
+#endif
+ {"AE", "AE", 1},
+ {"AR", "AR", 1},
+ {"AT", "AT", 1},
+ {"AU", "AU", 2},
+ {"BE", "BE", 1},
+ {"BG", "BG", 1},
+ {"BN", "BN", 1},
+ {"CA", "CA", 2},
+ {"CH", "CH", 1},
+ {"CY", "CY", 1},
+ {"CZ", "CZ", 1},
+ {"DE", "DE", 3},
+ {"DK", "DK", 1},
+ {"EE", "EE", 1},
+ {"ES", "ES", 1},
+ {"FI", "FI", 1},
+ {"FR", "FR", 1},
+ {"GB", "GB", 1},
+ {"GR", "GR", 1},
+ {"HR", "HR", 1},
+ {"HU", "HU", 1},
+ {"IE", "IE", 1},
+ {"IS", "IS", 1},
+ {"IT", "IT", 1},
+ {"ID", "ID", 1},
+ {"JP", "JP", 8},
+ {"KR", "KR", 24},
+ {"KW", "KW", 1},
+ {"LI", "LI", 1},
+ {"LT", "LT", 1},
+ {"LU", "LU", 1},
+ {"LV", "LV", 1},
+ {"MA", "MA", 1},
+ {"MT", "MT", 1},
+ {"MX", "MX", 1},
+ {"NL", "NL", 1},
+ {"NO", "NO", 1},
+ {"PL", "PL", 1},
+ {"PT", "PT", 1},
+ {"PY", "PY", 1},
+ {"RO", "RO", 1},
+ {"SE", "SE", 1},
+ {"SI", "SI", 1},
+ {"SK", "SK", 1},
+ {"TR", "TR", 7},
+ {"TW", "TW", 1},
+ {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */
+ {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */
+ {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */
+ {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */
+ {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
+ {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
+ {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */
+#elif defined(CUSTOMER_HW5)
+ {"", "XZ", 11},
+ {"AE", "AE", 212},
+ {"AG", "AG", 2},
+ {"AI", "AI", 2},
+ {"AL", "AL", 2},
+ {"AN", "AN", 3},
+ {"AR", "AR", 212},
+ {"AS", "AS", 15},
+ {"AT", "AT", 4},
+ {"AU", "AU", 212},
+ {"AW", "AW", 2},
+ {"AZ", "AZ", 2},
+ {"BA", "BA", 2},
+ {"BD", "BD", 2},
+ {"BE", "BE", 4},
+ {"BG", "BG", 4},
+ {"BH", "BH", 4},
+ {"BM", "BM", 15},
+ {"BN", "BN", 4},
+ {"BR", "BR", 212},
+ {"BS", "BS", 2},
+ {"BY", "BY", 3},
+ {"BW", "BW", 1},
+ {"CA", "CA", 212},
+ {"CH", "CH", 212},
+ {"CL", "CL", 212},
+ {"CN", "CN", 212},
+ {"CO", "CO", 212},
+ {"CR", "CR", 21},
+ {"CY", "CY", 212},
+ {"CZ", "CZ", 212},
+ {"DE", "DE", 212},
+ {"DK", "DK", 4},
+ {"DZ", "DZ", 1},
+ {"EC", "EC", 23},
+ {"EE", "EE", 4},
+ {"EG", "EG", 212},
+ {"ES", "ES", 212},
+ {"ET", "ET", 2},
+ {"FI", "FI", 4},
+ {"FR", "FR", 212},
+ {"GB", "GB", 212},
+ {"GD", "GD", 2},
+ {"GF", "GF", 2},
+ {"GP", "GP", 2},
+ {"GR", "GR", 212},
+ {"GT", "GT", 0},
+ {"GU", "GU", 17},
+ {"HK", "HK", 212},
+ {"HR", "HR", 4},
+ {"HU", "HU", 4},
+ {"IN", "IN", 212},
+ {"ID", "ID", 212},
+ {"IE", "IE", 5},
+ {"IL", "IL", 7},
+ {"IN", "IN", 212},
+ {"IS", "IS", 4},
+ {"IT", "IT", 212},
+ {"JO", "JO", 3},
+ {"JP", "JP", 212},
+ {"KH", "KH", 4},
+ {"KI", "KI", 1},
+ {"KR", "KR", 212},
+ {"KW", "KW", 5},
+ {"KY", "KY", 4},
+ {"KZ", "KZ", 212},
+ {"LA", "LA", 4},
+ {"LB", "LB", 6},
+ {"LI", "LI", 4},
+ {"LK", "LK", 3},
+ {"LS", "LS", 2},
+ {"LT", "LT", 4},
+ {"LR", "LR", 2},
+ {"LU", "LU", 3},
+ {"LV", "LV", 4},
+ {"MA", "MA", 2},
+ {"MC", "MC", 1},
+ {"MD", "MD", 2},
+ {"ME", "ME", 2},
+ {"MK", "MK", 2},
+ {"MN", "MN", 0},
+ {"MO", "MO", 2},
+ {"MR", "MR", 2},
+ {"MT", "MT", 4},
+ {"MQ", "MQ", 2},
+ {"MU", "MU", 2},
+ {"MV", "MV", 3},
+ {"MX", "MX", 212},
+ {"MY", "MY", 212},
+ {"NI", "NI", 0},
+ {"NL", "NL", 212},
+ {"NO", "NO", 4},
+ {"NP", "NP", 3},
+ {"NZ", "NZ", 9},
+ {"OM", "OM", 4},
+ {"PA", "PA", 17},
+ {"PE", "PE", 212},
+ {"PG", "PG", 2},
+ {"PH", "PH", 212},
+ {"PL", "PL", 212},
+ {"PR", "PR", 25},
+ {"PT", "PT", 212},
+ {"PY", "PY", 4},
+ {"RE", "RE", 2},
+ {"RO", "RO", 212},
+ {"RS", "RS", 2},
+ {"RU", "RU", 212},
+ {"SA", "SA", 212},
+ {"SE", "SE", 212},
+ {"SG", "SG", 212},
+ {"SI", "SI", 4},
+ {"SK", "SK", 212},
+ {"SN", "SN", 2},
+ {"SV", "SV", 25},
+ {"TH", "TH", 212},
+ {"TR", "TR", 212},
+ {"TT", "TT", 5},
+ {"TW", "TW", 212},
+ {"UA", "UA", 212},
+ {"UG", "UG", 2},
+ {"US", "US", 212},
+ {"UY", "UY", 5},
+ {"VA", "VA", 2},
+ {"VE", "VE", 3},
+ {"VG", "VG", 2},
+ {"VI", "VI", 18},
+ {"VN", "VN", 4},
+ {"YT", "YT", 2},
+ {"ZA", "ZA", 212},
+ {"ZM", "ZM", 2},
+ {"XT", "XT", 212},
+ {"XZ", "XZ", 11},
+ {"XV", "XV", 17},
+ {"Q1", "Q1", 77},
+#endif /* (CUSTOMER_HW2 || BOARD_HIKEY) && CUSTOMER_HW5 */
+};
+
+/* Customized Locale convertor
+* input : ISO 3166-1 country abbreviation
+* output: customized cspec
+*/
+void
+#ifdef CUSTOM_COUNTRY_CODE
+get_customized_country_code(void *adapter, char *country_iso_code,
+ wl_country_t *cspec, u32 flags)
+#else
+get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
+#endif /* CUSTOM_COUNTRY_CODE */
+{
+#if defined(OEM_ANDROID)
+#if (defined(CUSTOMER_HW) || defined(CUSTOMER_HW2)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+ struct cntry_locales_custom *cloc_ptr;
+
+ if (!cspec)
+ return;
+#ifdef CUSTOM_COUNTRY_CODE
+ cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, flags);
+#else
+ cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code);
+#endif /* CUSTOM_COUNTRY_CODE */
+
+ if (cloc_ptr) {
+ strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = cloc_ptr->custom_locale_rev;
+ }
+ return;
+#else
+ int size, i;
+
+ size = ARRAYSIZE(translate_custom_table);
+
+ if (cspec == 0)
+ return;
+
+ if (size == 0)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+ memcpy(cspec->ccode,
+ translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[i].custom_locale_rev;
+ return;
+ }
+ }
+#ifdef EXAMPLE_TABLE
+ /* if no country code matched return first universal code from translate_custom_table */
+ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+ return;
+#endif /* (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) &&
+ * (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
+ */
+#endif /* OEM_ANDROID */
+}
+#endif /* !CUSTOMER_HW4 */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_hikey.c b/bcmdhd.101.10.361.x/dhd_custom_hikey.c
new file mode 100755
index 0000000..cb5b715
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_hikey.c
@@ -0,0 +1,290 @@
+/*
+ * Platform Dependent file for Hikey
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/skbuff.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/of_gpio.h>
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#include <linux/wlan_plat.h>
+#else
+#include <dhd_plat.h>
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#include <dhd_dbg.h>
+#include <dhd.h>
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+extern int dhd_init_wlan_mem(void);
+extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+#define WLAN_REG_ON_GPIO 491
+#define WLAN_HOST_WAKE_GPIO 493
+
+static int wlan_reg_on = -1;
+#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan"
+#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on"
+
+static int wlan_host_wake_up = -1;
+static int wlan_host_wake_irq = 0;
+#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake"
+
+int
+dhd_wifi_init_gpio(void)
+{
+ int gpio_reg_on_val;
+ /* ========== WLAN_PWR_EN ============ */
+ char *wlan_node = DHD_DT_COMPAT_ENTRY;
+ struct device_node *root_node = NULL;
+
+ root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+ if (root_node) {
+ wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0);
+ wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0);
+ } else {
+ DHD_ERROR(("failed to get device node of BRCM WLAN, use default GPIOs\n"));
+ wlan_reg_on = WLAN_REG_ON_GPIO;
+ wlan_host_wake_up = WLAN_HOST_WAKE_GPIO;
+ }
+
+ /* ========== WLAN_PWR_EN ============ */
+ DHD_INFO(("%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on));
+
+ /*
+ * For reg_on, gpio_request will fail if the gpio is configured to output-high
+ * in the dts using gpio-hog, so do not return error for failure.
+ */
+ if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_HIGH, "WL_REG_ON")) {
+ DHD_ERROR(("%s: Failed to request gpio %d for WL_REG_ON, "
+ "might have configured in the dts\n",
+ __FUNCTION__, wlan_reg_on));
+ } else {
+ DHD_ERROR(("%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n",
+ __FUNCTION__, wlan_reg_on));
+ }
+
+ gpio_reg_on_val = gpio_get_value(wlan_reg_on);
+ DHD_INFO(("%s: Initial WL_REG_ON: [%d]\n",
+ __FUNCTION__, gpio_get_value(wlan_reg_on)));
+
+ if (gpio_reg_on_val == 0) {
+ DHD_INFO(("%s: WL_REG_ON is LOW, drive it HIGH\n", __FUNCTION__));
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ DHD_ERROR(("%s: WL_REG_ON is failed to pull up\n", __FUNCTION__));
+ return -EIO;
+ }
+ }
+
+ /* Wait for WIFI_TURNON_DELAY due to power stability */
+ msleep(WIFI_TURNON_DELAY);
+
+ /* ========== WLAN_HOST_WAKE ============ */
+ DHD_INFO(("%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up));
+
+ if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) {
+ DHD_ERROR(("%s: Failed to request gpio %d for WLAN_HOST_WAKE\n",
+ __FUNCTION__, wlan_host_wake_up));
+ return -ENODEV;
+ } else {
+ DHD_ERROR(("%s: gpio_request WLAN_HOST_WAKE done"
+ " - WLAN_HOST_WAKE: GPIO %d\n",
+ __FUNCTION__, wlan_host_wake_up));
+ }
+
+ if (gpio_direction_input(wlan_host_wake_up)) {
+ DHD_ERROR(("%s: Failed to set WL_HOST_WAKE gpio direction\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
+
+ return 0;
+}
+
+extern void kirin_pcie_power_on_atu_fixup(void) __attribute__ ((weak));
+extern int kirin_pcie_lp_ctrl(u32 enable) __attribute__ ((weak));
+
+int
+dhd_wlan_power(int onoff)
+{
+ DHD_INFO(("------------------------------------------------"));
+ DHD_INFO(("------------------------------------------------\n"));
+ DHD_INFO(("%s Enter: power %s\n", __func__, onoff ? "on" : "off"));
+
+ if (onoff) {
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ DHD_ERROR(("%s: WL_REG_ON is failed to pull up\n", __FUNCTION__));
+ return -EIO;
+ }
+ if (gpio_get_value(wlan_reg_on)) {
+ DHD_INFO(("WL_REG_ON on-step-2 : [%d]\n",
+ gpio_get_value(wlan_reg_on)));
+ } else {
+ DHD_ERROR(("[%s] gpio value is 0. We need reinit.\n", __func__));
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ DHD_ERROR(("%s: WL_REG_ON is "
+ "failed to pull up\n", __func__));
+ }
+ }
+
+ /* Wait for WIFI_TURNON_DELAY due to power stability */
+ msleep(WIFI_TURNON_DELAY);
+
+ /*
+ * Call Kiric RC ATU fixup else si_attach will fail due to
+ * improper BAR0/1 address translations
+ */
+ if (kirin_pcie_power_on_atu_fixup) {
+ kirin_pcie_power_on_atu_fixup();
+ } else {
+ DHD_ERROR(("[%s] kirin_pcie_power_on_atu_fixup is NULL. "
+ "REG_ON may not work\n", __func__));
+ }
+ /* Enable ASPM after powering ON */
+ if (kirin_pcie_lp_ctrl) {
+ kirin_pcie_lp_ctrl(onoff);
+ } else {
+ DHD_ERROR(("[%s] kirin_pcie_lp_ctrl is NULL. "
+ "ASPM may not work\n", __func__));
+ }
+ } else {
+ /* Disable ASPM before powering off */
+ if (kirin_pcie_lp_ctrl) {
+ kirin_pcie_lp_ctrl(onoff);
+ } else {
+ DHD_ERROR(("[%s] kirin_pcie_lp_ctrl is NULL. "
+ "ASPM may not work\n", __func__));
+ }
+ if (gpio_direction_output(wlan_reg_on, 0)) {
+ DHD_ERROR(("%s: WL_REG_ON is failed to pull up\n", __FUNCTION__));
+ return -EIO;
+ }
+ if (gpio_get_value(wlan_reg_on)) {
+ DHD_INFO(("WL_REG_ON on-step-2 : [%d]\n",
+ gpio_get_value(wlan_reg_on)));
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dhd_wlan_power);
+
+static int
+dhd_wlan_reset(int onoff)
+{
+ return 0;
+}
+
+static int
+dhd_wlan_set_carddetect(int val)
+{
+ return 0;
+}
+
+#ifdef BCMSDIO
+static int dhd_wlan_get_wake_irq(void)
+{
+ return gpio_to_irq(wlan_host_wake_up);
+}
+#endif /* BCMSDIO */
+
+#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE)
+int
+dhd_get_wlan_oob_gpio(void)
+{
+ return gpio_is_valid(wlan_host_wake_up) ?
+ gpio_get_value(wlan_host_wake_up) : -1;
+}
+EXPORT_SYMBOL(dhd_get_wlan_oob_gpio);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */
+
+struct resource dhd_wlan_resources = {
+ .name = "bcmdhd_wlan_irq",
+ .start = 0, /* Dummy */
+ .end = 0, /* Dummy */
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE |
+ IORESOURCE_IRQ_HIGHEDGE,
+};
+EXPORT_SYMBOL(dhd_wlan_resources);
+
+struct wifi_platform_data dhd_wlan_control = {
+ .set_power = dhd_wlan_power,
+ .set_reset = dhd_wlan_reset,
+ .set_carddetect = dhd_wlan_set_carddetect,
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ .mem_prealloc = dhd_wlan_mem_prealloc,
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+#ifdef BCMSDIO
+ .get_wake_irq = dhd_wlan_get_wake_irq,
+#endif
+};
+EXPORT_SYMBOL(dhd_wlan_control);
+
+int
+dhd_wlan_init(void)
+{
+ int ret;
+
+ DHD_INFO(("%s: START.......\n", __FUNCTION__));
+ ret = dhd_wifi_init_gpio();
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to initiate GPIO, ret=%d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+
+ dhd_wlan_resources.start = wlan_host_wake_irq;
+ dhd_wlan_resources.end = wlan_host_wake_irq;
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ ret = dhd_init_wlan_mem();
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to alloc reserved memory,"
+ " ret=%d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+fail:
+ DHD_INFO(("%s: FINISH.......\n", __FUNCTION__));
+ return ret;
+}
+
+int
+dhd_wlan_deinit(void)
+{
+ gpio_free(wlan_host_wake_up);
+ gpio_free(wlan_reg_on);
+ return 0;
+}
+#ifndef BCMDHD_MODULAR
+/* Required only for Built-in DHD */
+device_initcall(dhd_wlan_init);
+#endif /* BOARD_HIKEY_MODULAR */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_memprealloc.c b/bcmdhd.101.10.361.x/dhd_custom_memprealloc.c
new file mode 100755
index 0000000..f43d7e3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_memprealloc.c
@@ -0,0 +1,500 @@
+/*
+ * Platform Dependent file for usage of Preallocted Memory
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <linux/unistd.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+
+#define WLAN_STATIC_SCAN_BUF0 5
+#define WLAN_STATIC_SCAN_BUF1 6
+#define WLAN_STATIC_DHD_INFO_BUF 7
+#define WLAN_STATIC_DHD_WLFC_BUF 8
+#define WLAN_STATIC_DHD_IF_FLOW_LKUP 9
+#define WLAN_STATIC_DHD_MEMDUMP_RAM 11
+#define WLAN_STATIC_DHD_WLFC_HANGER 12
+#define WLAN_STATIC_DHD_PKTID_MAP 13
+#define WLAN_STATIC_DHD_PKTID_IOCTL_MAP 14
+#define WLAN_STATIC_DHD_LOG_DUMP_BUF 15
+#define WLAN_STATIC_DHD_LOG_DUMP_BUF_EX 16
+#define WLAN_STATIC_DHD_PKTLOG_DUMP_BUF 17
+
+#define WLAN_SCAN_BUF_SIZE (64 * 1024)
+
+#define WLAN_DHD_INFO_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024)
+/* Have 2MB ramsize to accomodate future chips */
+#define WLAN_DHD_MEMDUMP_SIZE (3 * 1024 * 1024)
+
+#define PREALLOC_WLAN_SEC_NUM 4
+#define PREALLOC_WLAN_BUF_NUM 160
+#define PREALLOC_WLAN_SECTION_HEADER 24
+
+#ifdef CONFIG_BCMDHD_PCIE
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_1 0
+#define WLAN_SECTION_SIZE_2 0
+#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024)
+
+#define DHD_SKB_1PAGE_BUF_NUM 0
+#define DHD_SKB_2PAGE_BUF_NUM 192
+#define DHD_SKB_4PAGE_BUF_NUM 0
+
+#else
+#define DHD_SKB_HDRSIZE 336
+#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+
+#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512)
+#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024)
+
+#define DHD_SKB_1PAGE_BUF_NUM 8
+#define DHD_SKB_2PAGE_BUF_NUM 8
+#define DHD_SKB_4PAGE_BUF_NUM 1
+#endif /* CONFIG_BCMDHD_PCIE */
+
+#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \
+ (DHD_SKB_2PAGE_BUF_NUM))
+#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + \
+ (DHD_SKB_4PAGE_BUF_NUM))
+
+#define WLAN_MAX_PKTID_ITEMS (8192)
+#define WLAN_DHD_PKTID_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_ITEMS + 1))
+#define WLAN_DHD_PKTID_MAP_ITEM_SIZE (32)
+#define WLAN_DHD_PKTID_MAP_SIZE ((WLAN_DHD_PKTID_MAP_HDR_SIZE) + \
+ ((WLAN_MAX_PKTID_ITEMS+1) * WLAN_DHD_PKTID_MAP_ITEM_SIZE))
+
+#define WLAN_MAX_PKTID_IOCTL_ITEMS (32)
+#define WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_IOCTL_ITEMS + 1))
+#define WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE (32)
+#define WLAN_DHD_PKTID_IOCTL_MAP_SIZE ((WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE) + \
+ ((WLAN_MAX_PKTID_IOCTL_ITEMS+1) * WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE))
+
+#define DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * 4)
+#define DHD_LOG_DUMP_BUF_EX_SIZE (1024 * 1024 * 2)
+
+#define DHD_PKTLOG_DUMP_BUF_SIZE (64 * 1024)
+
+#define WLAN_DHD_WLFC_HANGER_MAXITEMS 3072
+#define WLAN_DHD_WLFC_HANGER_ITEM_SIZE 32
+#define WLAN_DHD_WLFC_HANGER_SIZE ((WLAN_DHD_WLFC_HANGER_ITEM_SIZE) + \
+ ((WLAN_DHD_WLFC_HANGER_MAXITEMS) * (WLAN_DHD_WLFC_HANGER_ITEM_SIZE)))
+
+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];
+
+struct wlan_mem_prealloc {
+ void *mem_ptr;
+ unsigned long size;
+};
+
+static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = {
+ {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)},
+ {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)},
+ {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)},
+ {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)}
+};
+
+static void *wlan_static_scan_buf0 = NULL;
+static void *wlan_static_scan_buf1 = NULL;
+static void *wlan_static_dhd_info_buf = NULL;
+static void *wlan_static_dhd_wlfc_buf = NULL;
+static void *wlan_static_if_flow_lkup = NULL;
+static void *wlan_static_dhd_memdump_ram = NULL;
+static void *wlan_static_dhd_wlfc_hanger = NULL;
+static void *wlan_static_dhd_pktid_map = NULL;
+static void *wlan_static_dhd_pktid_ioctl_map = NULL;
+static void *wlan_static_dhd_log_dump_buf = NULL;
+static void *wlan_static_dhd_log_dump_buf_ex = NULL;
+static void *wlan_static_dhd_pktlog_dump_buf = NULL;
+
+void dhd_exit_wlan_mem(void);
+
+void
+*dhd_wlan_mem_prealloc(int section, unsigned long size)
+{
+ if (section == PREALLOC_WLAN_SEC_NUM) {
+ return wlan_static_skb;
+ }
+
+ if (section == WLAN_STATIC_SCAN_BUF0) {
+ return wlan_static_scan_buf0;
+ }
+
+ if (section == WLAN_STATIC_SCAN_BUF1) {
+ return wlan_static_scan_buf1;
+ }
+
+ if (section == WLAN_STATIC_DHD_INFO_BUF) {
+ if (size > WLAN_DHD_INFO_BUF_SIZE) {
+ pr_err("request DHD_INFO size(%lu) is bigger than"
+ " static size(%d).\n", size,
+ WLAN_DHD_INFO_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_info_buf;
+ }
+
+ if (section == WLAN_STATIC_DHD_WLFC_BUF) {
+ if (size > WLAN_DHD_WLFC_BUF_SIZE) {
+ pr_err("request DHD_WLFC size(%lu) is bigger than"
+ " static size(%d).\n",
+ size, WLAN_DHD_WLFC_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_buf;
+ }
+
+ if (section == WLAN_STATIC_DHD_WLFC_HANGER) {
+ if (size > WLAN_DHD_WLFC_HANGER_SIZE) {
+ pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than"
+ " static size(%d).\n",
+ size, WLAN_DHD_WLFC_HANGER_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_hanger;
+ }
+
+ if (section == WLAN_STATIC_DHD_IF_FLOW_LKUP) {
+ if (size > WLAN_DHD_IF_FLOW_LKUP_SIZE) {
+ pr_err("request DHD_WLFC size(%lu) is bigger than"
+ " static size(%d).\n",
+ size, WLAN_DHD_WLFC_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_if_flow_lkup;
+ }
+
+ if (section == WLAN_STATIC_DHD_MEMDUMP_RAM) {
+ if (size > WLAN_DHD_MEMDUMP_SIZE) {
+ pr_err("request DHD_MEMDUMP_RAM size(%lu) is bigger"
+ " than static size(%d).\n",
+ size, WLAN_DHD_MEMDUMP_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_memdump_ram;
+ }
+
+ if (section == WLAN_STATIC_DHD_PKTID_MAP) {
+ if (size > WLAN_DHD_PKTID_MAP_SIZE) {
+ pr_err("request DHD_PKTID_MAP size(%lu) is bigger than"
+ " static size(%d).\n",
+ size, WLAN_DHD_PKTID_MAP_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_pktid_map;
+ }
+
+ if (section == WLAN_STATIC_DHD_PKTID_IOCTL_MAP) {
+ if (size > WLAN_DHD_PKTID_IOCTL_MAP_SIZE) {
+ pr_err("request DHD_PKTID_IOCTL_MAP size(%lu) is bigger than"
+ " static size(%d).\n",
+ size, WLAN_DHD_PKTID_IOCTL_MAP_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_pktid_ioctl_map;
+ }
+
+ if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF) {
+ if (size > DHD_LOG_DUMP_BUF_SIZE) {
+ pr_err("request DHD_LOG_DUMP_BUF size(%lu) is bigger then"
+ " static size(%d).\n",
+ size, DHD_LOG_DUMP_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_log_dump_buf;
+ }
+
+ if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF_EX) {
+ if (size > DHD_LOG_DUMP_BUF_EX_SIZE) {
+ pr_err("request DHD_LOG_DUMP_BUF_EX size(%lu) is bigger then"
+ " static size(%d).\n",
+ size, DHD_LOG_DUMP_BUF_EX_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_log_dump_buf_ex;
+ }
+
+ if (section == WLAN_STATIC_DHD_PKTLOG_DUMP_BUF) {
+ if (size > DHD_PKTLOG_DUMP_BUF_SIZE) {
+ pr_err("request DHD_PKTLOG_DUMP_BUF size(%lu) is bigger then"
+ " static size(%d).\n",
+ size, DHD_PKTLOG_DUMP_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_pktlog_dump_buf;
+ }
+
+ if ((section < 0) || (section >= PREALLOC_WLAN_SEC_NUM)) {
+ return NULL;
+ }
+
+ if (wlan_mem_array[section].size < size) {
+ return NULL;
+ }
+
+ return wlan_mem_array[section].mem_ptr;
+}
+EXPORT_SYMBOL(dhd_wlan_mem_prealloc);
+
+int
+dhd_init_wlan_mem(void)
+{
+ int i;
+ int j;
+
+#if !defined(CONFIG_BCMDHD_PCIE)
+ for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
+ wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL);
+ if (!wlan_static_skb[i]) {
+ pr_err("Failed to alloc 1PAGE SKB BUF\n");
+ goto err_skb_alloc;
+ }
+ }
+#endif /* !CONFIG_BCMDHD_PCIE */
+
+ for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
+ wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL);
+ if (!wlan_static_skb[i]) {
+ pr_err("Failed to alloc 2PAGE SKB BUF\n");
+ goto err_skb_alloc;
+ }
+ }
+
+#if !defined(CONFIG_BCMDHD_PCIE)
+ wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE, GFP_KERNEL);
+ if (!wlan_static_skb[i]) {
+ pr_err("Failed to alloc 4PAGE SKB BUF\n");
+ goto err_skb_alloc;
+ }
+#endif /* !CONFIG_BCMDHD_PCIE */
+
+ for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
+ if (wlan_mem_array[i].size > 0) {
+ wlan_mem_array[i].mem_ptr =
+ kmalloc(wlan_mem_array[i].size, GFP_KERNEL);
+
+ if (!wlan_mem_array[i].mem_ptr) {
+ pr_err("Failed to mem_alloc for WLAN\n");
+ goto err_mem_alloc;
+ }
+ }
+ }
+
+ wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_scan_buf0) {
+ pr_err("Failed to alloc wlan_static_scan_buf0\n");
+ goto err_mem_alloc;
+ }
+
+ wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_scan_buf1) {
+ pr_err("Failed to alloc wlan_static_scan_buf1\n");
+ goto err_mem_alloc;
+ }
+
+ wlan_static_dhd_log_dump_buf = kmalloc(DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_log_dump_buf) {
+ pr_err("Failed to alloc wlan_static_dhd_log_dump_buf\n");
+ goto err_mem_alloc;
+ }
+
+ wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_log_dump_buf_ex) {
+ pr_err("Failed to alloc wlan_static_dhd_log_dump_buf_ex\n");
+ goto err_mem_alloc;
+ }
+
+ wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_info_buf) {
+ pr_err("Failed to alloc wlan_static_dhd_info_buf\n");
+ goto err_mem_alloc;
+ }
+
+#ifdef CONFIG_BCMDHD_PCIE
+ wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE,
+ GFP_KERNEL);
+ if (!wlan_static_if_flow_lkup) {
+ pr_err("Failed to alloc wlan_static_if_flow_lkup\n");
+ goto err_mem_alloc;
+ }
+
+#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP
+ wlan_static_dhd_pktid_map = kmalloc(WLAN_DHD_PKTID_MAP_SIZE,
+ GFP_KERNEL);
+ if (!wlan_static_dhd_pktid_map) {
+ pr_err("Failed to alloc wlan_static_dhd_pktid_map\n");
+ goto err_mem_alloc;
+ }
+
+ wlan_static_dhd_pktid_ioctl_map = kmalloc(WLAN_DHD_PKTID_IOCTL_MAP_SIZE,
+ GFP_KERNEL);
+ if (!wlan_static_dhd_pktid_ioctl_map) {
+ pr_err("Failed to alloc wlan_static_dhd_pktid_ioctl_map\n");
+ goto err_mem_alloc;
+ }
+#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */
+#else
+ wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE,
+ GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_buf) {
+ pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n");
+ goto err_mem_alloc;
+ }
+
+ wlan_static_dhd_wlfc_hanger = kmalloc(WLAN_DHD_WLFC_HANGER_SIZE,
+ GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_hanger) {
+ pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger\n");
+ goto err_mem_alloc;
+ }
+#endif /* CONFIG_BCMDHD_PCIE */
+
+#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP
+ wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_memdump_ram) {
+ pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n");
+ goto err_mem_alloc;
+ }
+#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */
+
+ wlan_static_dhd_pktlog_dump_buf = kmalloc(DHD_PKTLOG_DUMP_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_pktlog_dump_buf) {
+ pr_err("Failed to alloc wlan_static_dhd_pktlog_dump_buf\n");
+ goto err_mem_alloc;
+ }
+
+ pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__);
+ return 0;
+
+err_mem_alloc:
+ dhd_exit_wlan_mem();
+ return -ENOMEM;
+
+err_skb_alloc:
+ /*
+ * When all the skb alloc buf couldn't alloced, free these buf with alloced size
+ * dhd_exit_wlan_mem will free with total size (don't know alloced size)
+ */
+ pr_err("Failed to skb_alloc for WLAN\n");
+ for (j = 0; j < i; j++) {
+ dev_kfree_skb(wlan_static_skb[j]);
+ }
+ return -ENOMEM;
+}
+
+EXPORT_SYMBOL(dhd_init_wlan_mem);
+
+void
+dhd_exit_wlan_mem(void)
+{
+ int i = 0;
+
+#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP
+ if (wlan_static_dhd_memdump_ram) {
+ kfree(wlan_static_dhd_memdump_ram);
+ }
+
+#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */
+
+#ifdef CONFIG_BCMDHD_PCIE
+ if (wlan_static_if_flow_lkup) {
+ kfree(wlan_static_if_flow_lkup);
+ }
+
+#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP
+ if (wlan_static_dhd_pktid_map) {
+ kfree(wlan_static_dhd_pktid_map);
+ }
+
+ if (wlan_static_dhd_pktid_ioctl_map) {
+ kfree(wlan_static_dhd_pktid_ioctl_map);
+ }
+#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */
+#else
+ if (wlan_static_dhd_wlfc_buf) {
+ kfree(wlan_static_dhd_wlfc_buf);
+ }
+
+ if (wlan_static_dhd_wlfc_hanger) {
+ kfree(wlan_static_dhd_wlfc_hanger);
+ }
+#endif /* CONFIG_BCMDHD_PCIE */
+ if (wlan_static_dhd_info_buf) {
+ kfree(wlan_static_dhd_info_buf);
+ }
+
+ if (wlan_static_dhd_log_dump_buf) {
+ kfree(wlan_static_dhd_log_dump_buf);
+ }
+
+ if (wlan_static_dhd_log_dump_buf_ex) {
+ kfree(wlan_static_dhd_log_dump_buf_ex);
+ }
+
+ if (wlan_static_scan_buf1) {
+ kfree(wlan_static_scan_buf1);
+ }
+
+ if (wlan_static_scan_buf0) {
+ kfree(wlan_static_scan_buf0);
+ }
+
+ if (wlan_static_dhd_pktlog_dump_buf) {
+ kfree(wlan_static_dhd_pktlog_dump_buf);
+ }
+
+ for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
+ if (wlan_mem_array[i].mem_ptr) {
+ kfree(wlan_mem_array[i].mem_ptr);
+ }
+ }
+
+ for (i = 0; i < WLAN_SKB_BUF_NUM; i++) {
+ dev_kfree_skb(wlan_static_skb[i]);
+ }
+
+ return;
+}
+EXPORT_SYMBOL(dhd_exit_wlan_mem);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_msm.c b/bcmdhd.101.10.361.x/dhd_custom_msm.c
new file mode 100755
index 0000000..eb16b69
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_msm.c
@@ -0,0 +1,283 @@
+/*
+ * Platform Dependent file for Qualcomm MSM/APQ
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/skbuff.h>
+#include <linux/mmc/host.h>
+#ifdef CONFIG_BCMDHD_PCIE
+#include <linux/msm_pcie.h>
+#endif /* CONFIG_BCMDHD_PCIE */
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/of_gpio.h>
+#include <linux/wlan_plat.h>
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+extern void dhd_exit_wlan_mem(void);
+extern int dhd_init_wlan_mem(void);
+extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+#define WIFI_TURNON_DELAY 200
+static int wlan_reg_on = -1;
+#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan"
+#ifdef CUSTOMER_HW2
+#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on"
+#else
+#define WIFI_WL_REG_ON_PROPNAME "wlan-en-gpio"
+#endif /* CUSTOMER_HW2 */
+
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
+ defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \
+ defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA)
+#define MSM_PCIE_CH_NUM 0
+#else
+#define MSM_PCIE_CH_NUM 1
+#endif /* MSM PCIE Platforms */
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+static int wlan_host_wake_up = -1;
+static int wlan_host_wake_irq = 0;
+#ifdef CUSTOMER_HW2
+#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake"
+#else
+#define WIFI_WLAN_HOST_WAKE_PROPNAME "wlan-host-wake-gpio"
+#endif /* CUSTOMER_HW2 */
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+int __init
+dhd_wifi_init_gpio(void)
+{
+ char *wlan_node = DHD_DT_COMPAT_ENTRY;
+ struct device_node *root_node = NULL;
+
+ root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+ if (!root_node) {
+ WARN(1, "failed to get device node of BRCM WLAN\n");
+ return -ENODEV;
+ }
+
+ /* ========== WLAN_PWR_EN ============ */
+ wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0);
+ printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on);
+
+ if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_LOW, "WL_REG_ON")) {
+ printk(KERN_ERR "%s: Faiiled to request gpio %d for WL_REG_ON\n",
+ __FUNCTION__, wlan_reg_on);
+ } else {
+ printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n",
+ __FUNCTION__, wlan_reg_on);
+ }
+
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ printk(KERN_ERR "%s: WL_REG_ON failed to pull up\n", __FUNCTION__);
+ } else {
+ printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__);
+ }
+
+ if (gpio_get_value(wlan_reg_on)) {
+ printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n",
+ __FUNCTION__, gpio_get_value(wlan_reg_on));
+ }
+
+ /* Wait for WIFI_TURNON_DELAY due to power stability */
+ msleep(WIFI_TURNON_DELAY);
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ /* ========== WLAN_HOST_WAKE ============ */
+ wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0);
+ printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up);
+
+#ifndef CUSTOMER_HW2
+ if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) {
+ printk(KERN_ERR "%s: Faiiled to request gpio %d for WLAN_HOST_WAKE\n",
+ __FUNCTION__, wlan_host_wake_up);
+ return -ENODEV;
+ } else {
+ printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done"
+ " - WLAN_HOST_WAKE: GPIO %d\n",
+ __FUNCTION__, wlan_host_wake_up);
+ }
+#endif /* !CUSTOMER_HW2 */
+
+ gpio_direction_input(wlan_host_wake_up);
+ wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#ifdef CONFIG_BCMDHD_PCIE
+ printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__);
+ msm_pcie_enumerate(MSM_PCIE_CH_NUM);
+#endif /* CONFIG_BCMDHD_PCIE */
+
+ return 0;
+}
+
+int
+dhd_wlan_power(int onoff)
+{
+ printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off");
+
+ if (onoff) {
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
+ return -EIO;
+ }
+ if (gpio_get_value(wlan_reg_on)) {
+ printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
+ gpio_get_value(wlan_reg_on));
+ } else {
+ printk("[%s] gpio value is 0. We need reinit.\n", __func__);
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ printk(KERN_ERR "%s: WL_REG_ON is "
+ "failed to pull up\n", __func__);
+ }
+ }
+ } else {
+ if (gpio_direction_output(wlan_reg_on, 0)) {
+ printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
+ return -EIO;
+ }
+ if (gpio_get_value(wlan_reg_on)) {
+ printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
+ gpio_get_value(wlan_reg_on));
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dhd_wlan_power);
+
+static int
+dhd_wlan_reset(int onoff)
+{
+ return 0;
+}
+
+static int
+dhd_wlan_set_carddetect(int val)
+{
+#ifdef CONFIG_BCMDHD_PCIE
+ printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__);
+ msm_pcie_enumerate(MSM_PCIE_CH_NUM);
+#endif /* CONFIG_BCMDHD_PCIE */
+ return 0;
+}
+
+#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE)
+int
+dhd_get_wlan_oob_gpio(void)
+{
+ return gpio_is_valid(wlan_host_wake_up) ?
+ gpio_get_value(wlan_host_wake_up) : -1;
+}
+EXPORT_SYMBOL(dhd_get_wlan_oob_gpio);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */
+
+struct resource dhd_wlan_resources = {
+ .name = "bcmdhd_wlan_irq",
+ .start = 0, /* Dummy */
+ .end = 0, /* Dummy */
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE |
+#ifdef CONFIG_BCMDHD_PCIE
+ IORESOURCE_IRQ_HIGHEDGE,
+#else
+ IORESOURCE_IRQ_HIGHLEVEL,
+#endif /* CONFIG_BCMDHD_PCIE */
+};
+EXPORT_SYMBOL(dhd_wlan_resources);
+
+struct wifi_platform_data dhd_wlan_control = {
+ .set_power = dhd_wlan_power,
+ .set_reset = dhd_wlan_reset,
+ .set_carddetect = dhd_wlan_set_carddetect,
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ .mem_prealloc = dhd_wlan_mem_prealloc,
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+};
+EXPORT_SYMBOL(dhd_wlan_control);
+
+int __init
+dhd_wlan_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO"%s: START.......\n", __FUNCTION__);
+ ret = dhd_wifi_init_gpio();
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n",
+ __FUNCTION__, ret);
+ goto fail;
+ }
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ dhd_wlan_resources.start = wlan_host_wake_irq;
+ dhd_wlan_resources.end = wlan_host_wake_irq;
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ ret = dhd_init_wlan_mem();
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to alloc reserved memory,"
+ " ret=%d\n", __FUNCTION__, ret);
+ }
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+fail:
+ printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__);
+ return ret;
+}
+
+int
+dhd_wlan_deinit(void)
+{
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ gpio_free(wlan_host_wake_up);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+ gpio_free(wlan_reg_on);
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ dhd_exit_wlan_mem();
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+ return 0;
+}
+
+#ifndef BCMDHD_MODULAR
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
+ defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \
+ defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_wlan_init);
+#else
+late_initcall(dhd_wlan_init);
+#endif /* CONFIG_DEFERRED_INITCALLS */
+#else
+device_initcall(dhd_wlan_init);
+#endif /* MSM PCIE Platforms */
+#endif /* !BCMDHD_MODULAR */
diff --git a/bcmdhd.101.10.361.x/dhd_custom_sec.c b/bcmdhd.101.10.361.x/dhd_custom_sec.c
new file mode 100755
index 0000000..fd5607b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_custom_sec.c
@@ -0,0 +1,1040 @@
+/*
+ * Customer HW 4 dependant file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_custom_sec.c 334946 2012-05-24 20:38:00Z chanyun $
+ */
+#if defined(CUSTOMER_HW4) || defined(CUSTOMER_HW40)
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <ethernet.h>
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
+
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+
+const struct cntry_locales_custom translate_custom_table[] = {
+ /* default ccode/regrev */
+ {"", "XZ", 11}, /* Universal if Country code is unknown or empty */
+ {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */
+ {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */
+ {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
+ {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
+ {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */
+ {"GL", "GP", 2},
+ {"AL", "AL", 2},
+#ifdef DHD_SUPPORT_GB_999
+ {"DZ", "GB", 999},
+#else
+ {"DZ", "GB", 6},
+#endif /* DHD_SUPPORT_GB_999 */
+ {"AS", "AS", 12},
+ {"AI", "AI", 1},
+ {"AF", "AD", 0},
+ {"AG", "AG", 2},
+ {"AR", "AU", 6},
+ {"AW", "AW", 2},
+ {"AU", "AU", 6},
+ {"AT", "AT", 4},
+ {"AZ", "AZ", 2},
+ {"BS", "BS", 2},
+ {"BH", "BH", 4},
+ {"BD", "BD", 1},
+ {"BY", "BY", 3},
+ {"BE", "BE", 4},
+ {"BM", "BM", 12},
+ {"BA", "BA", 2},
+ {"BR", "BR", 2},
+ {"VG", "VG", 2},
+ {"BN", "BN", 4},
+ {"BG", "BG", 4},
+ {"KH", "KH", 2},
+ {"KY", "KY", 3},
+ {"CN", "CN", 38},
+ {"CO", "CO", 17},
+ {"CR", "CR", 17},
+ {"HR", "HR", 4},
+ {"CY", "CY", 4},
+ {"CZ", "CZ", 4},
+ {"DK", "DK", 4},
+ {"EE", "EE", 4},
+ {"ET", "ET", 2},
+ {"FI", "FI", 4},
+ {"FR", "FR", 5},
+ {"GF", "GF", 2},
+ {"DE", "DE", 7},
+ {"GR", "GR", 4},
+ {"GD", "GD", 2},
+ {"GP", "GP", 2},
+ {"GU", "GU", 30},
+ {"HK", "HK", 2},
+ {"HU", "HU", 4},
+ {"IS", "IS", 4},
+ {"IN", "IN", 3},
+ {"ID", "ID", 1},
+ {"IE", "IE", 5},
+ {"IL", "IL", 14},
+ {"IT", "IT", 4},
+ {"JP", "JP", 45},
+ {"JO", "JO", 3},
+ {"KE", "SA", 0},
+ {"KW", "KW", 5},
+ {"LA", "LA", 2},
+ {"LV", "LV", 4},
+ {"LB", "LB", 5},
+ {"LS", "LS", 2},
+ {"LI", "LI", 4},
+ {"LT", "LT", 4},
+ {"LU", "LU", 3},
+ {"MO", "SG", 0},
+ {"MK", "MK", 2},
+ {"MW", "MW", 1},
+ {"MY", "MY", 3},
+ {"MV", "MV", 3},
+ {"MT", "MT", 4},
+ {"MQ", "MQ", 2},
+ {"MR", "MR", 2},
+ {"MU", "MU", 2},
+ {"YT", "YT", 2},
+ {"MX", "MX", 44},
+ {"MD", "MD", 2},
+ {"MC", "MC", 1},
+ {"ME", "ME", 2},
+ {"MA", "MA", 2},
+ {"NL", "NL", 4},
+ {"AN", "GD", 2},
+ {"NZ", "NZ", 4},
+ {"NO", "NO", 4},
+ {"OM", "OM", 4},
+ {"PA", "PA", 17},
+ {"PG", "AU", 6},
+ {"PY", "PY", 2},
+ {"PE", "PE", 20},
+ {"PH", "PH", 5},
+ {"PL", "PL", 4},
+ {"PT", "PT", 4},
+ {"PR", "PR", 38},
+ {"RE", "RE", 2},
+ {"RO", "RO", 4},
+ {"SN", "MA", 2},
+ {"RS", "RS", 2},
+ {"SK", "SK", 4},
+ {"SI", "SI", 4},
+ {"ES", "ES", 4},
+ {"LK", "LK", 1},
+ {"SE", "SE", 4},
+ {"CH", "CH", 4},
+ {"TW", "TW", 1},
+ {"TH", "TH", 5},
+ {"TT", "TT", 3},
+ {"TR", "TR", 7},
+ {"AE", "AE", 6},
+#ifdef DHD_SUPPORT_GB_999
+ {"GB", "GB", 999},
+#else
+ {"GB", "GB", 6},
+#endif /* DHD_SUPPORT_GB_999 */
+ {"UY", "VE", 3},
+ {"VI", "PR", 38},
+ {"VA", "VA", 2},
+ {"VE", "VE", 3},
+ {"VN", "VN", 4},
+ {"ZM", "LA", 2},
+ {"EC", "EC", 21},
+ {"SV", "SV", 25},
+#if defined(BCM4358_CHIP) || defined(BCM4359_CHIP)
+ {"KR", "KR", 70},
+#else
+ {"KR", "KR", 48},
+#endif
+ {"RU", "RU", 13},
+ {"UA", "UA", 8},
+ {"GT", "GT", 1},
+ {"MN", "MN", 1},
+ {"NI", "NI", 2},
+ {"UZ", "MA", 2},
+ {"ZA", "ZA", 6},
+ {"EG", "EG", 13},
+ {"TN", "TN", 1},
+ {"AO", "AD", 0},
+ {"BT", "BJ", 0},
+ {"BW", "BJ", 0},
+ {"LY", "LI", 4},
+ {"BO", "NG", 0},
+ {"UM", "PR", 38},
+ /* Support FCC 15.407 (Part 15E) Changes, effective June 2 2014 */
+ /* US/988, Q2/993 country codes with higher power on UNII-1 5G band */
+ {"US", "US", 988},
+ {"CU", "US", 988},
+ {"CA", "Q2", 993},
+};
+
+/* Customized Locale convertor
+* input : ISO 3166-1 country abbreviation
+* output: customized cspec
+*/
+void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
+{
+ int size, i;
+
+ size = ARRAYSIZE(translate_custom_table);
+
+ if (cspec == 0)
+ return;
+
+ if (size == 0)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+ memcpy(cspec->ccode,
+ translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[i].custom_locale_rev;
+ return;
+ }
+ }
+ return;
+}
+
+#define PSMINFO PLATFORM_PATH".psm.info"
+#define ANTINFO PLATFORM_PATH".ant.info"
+#define WIFIVERINFO PLATFORM_PATH".wifiver.info"
+#define LOGTRACEINFO PLATFORM_PATH".logtrace.info"
+#define SOFTAPINFO PLATFORM_PATH".softap.info"
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+/* XXX This function used for setup PM related value control by read from file.
+ * Normally, PM related value Turn Offed for MFG process
+ */
+extern bool g_pm_control;
+#ifdef DHD_EXPORT_CNTL_FILE
+extern uint32 pmmode_val;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+void sec_control_pm(dhd_pub_t *dhd, uint *power_mode)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ struct file *fp = NULL;
+ char *filepath = PSMINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+ char power_val = 0;
+ int ret = 0;
+#ifdef DHD_ENABLE_LPC
+ uint32 lpc = 0;
+#endif /* DHD_ENABLE_LPC */
+
+#ifndef DHD_EXPORT_CNTL_FILE
+ g_pm_control = FALSE;
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp) || (fp == NULL)) {
+ /* Enable PowerSave Mode */
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode,
+ sizeof(uint), TRUE, 0);
+ DHD_ERROR(("[WIFI_SEC] %s: %s doesn't exist"
+ " so set PM to %d\n",
+ __FUNCTION__, filepath, *power_mode));
+ return;
+ } else {
+ kernel_read_compat(fp, fp->f_pos, &power_val, 1);
+ DHD_ERROR(("[WIFI_SEC] %s: POWER_VAL = %c \r\n", __FUNCTION__, power_val));
+ filp_close(fp, NULL);
+ }
+#else
+ g_pm_control = FALSE;
+ /* Not set from the framework side */
+ if (pmmode_val == 0xFFu) {
+ /* Enable PowerSave Mode */
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode,
+ sizeof(uint), TRUE, 0);
+ DHD_ERROR(("[WIFI_SEC] %s: doesn't set from sysfs"
+ " so set PM to %d\n",
+ __FUNCTION__, *power_mode));
+ return;
+
+ } else {
+ power_val = (char)pmmode_val;
+ }
+#endif /* !DHD_EXPORT_CNTL_FILE */
+
+#ifdef DHD_EXPORT_CNTL_FILE
+ if (power_val == 0) {
+#else
+ /* XXX: power_val is compared with character type read from .psm.info file */
+ if (power_val == '0') {
+#endif /* DHD_EXPORT_CNTL_FILE */
+#ifdef ROAM_ENABLE
+ uint roamvar = 1;
+#endif
+ uint32 wl_updown = 1;
+
+ *power_mode = PM_OFF;
+ /* Disable PowerSave Mode */
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode,
+ sizeof(uint), TRUE, 0);
+#ifndef CUSTOM_SET_ANTNPM
+ /* Turn off MPC in AP mode */
+ ret = dhd_iovar(dhd, 0, "mpc", (char *)power_mode, sizeof(*power_mode),
+ NULL, 0, TRUE);
+#endif /* !CUSTOM_SET_ANTNPM */
+ g_pm_control = TRUE;
+#ifdef ROAM_ENABLE
+ /* Roaming off of dongle */
+ ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL,
+ 0, TRUE);
+#endif
+#ifdef DHD_ENABLE_LPC
+ /* Set lpc 0 */
+ ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set lpc failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* DHD_ENABLE_LPC */
+#ifdef DHD_PCIE_RUNTIMEPM
+ DHD_ERROR(("[WIFI_SEC] %s : Turn Runtime PM off \n", __FUNCTION__));
+ /* Turn Runtime PM off */
+ dhdpcie_block_runtime_pm(dhd);
+#endif /* DHD_PCIE_RUNTIMEPM */
+ /* Disable ocl */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_updown,
+ sizeof(wl_updown), TRUE, 0)) < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: WLC_UP faield %d\n", __FUNCTION__, ret));
+ }
+#ifndef CUSTOM_SET_OCLOFF
+ {
+ uint32 ocl_enable = 0;
+ ret = dhd_iovar(dhd, 0, "ocl_enable", (char *)&ocl_enable,
+ sizeof(ocl_enable), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set ocl_enable %d failed %d\n",
+ __FUNCTION__, ocl_enable, ret));
+ } else {
+ DHD_ERROR(("[WIFI_SEC] %s: Set ocl_enable %d OK %d\n",
+ __FUNCTION__, ocl_enable, ret));
+ }
+ }
+#else
+ dhd->ocl_off = TRUE;
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef WLADPS
+ if ((ret = dhd_enable_adps(dhd, ADPS_DISABLE)) < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: dhd_enable_adps failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* WLADPS */
+
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_updown,
+ sizeof(wl_updown), TRUE, 0)) < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: WLC_DOWN faield %d\n",
+ __FUNCTION__, ret));
+ }
+ } else {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)power_mode,
+ sizeof(uint), TRUE, 0);
+ }
+}
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#ifdef MIMO_ANT_SETTING
+int get_ant_val_from_file(uint32 *read_val)
+{
+ int ret = -1;
+ struct file *fp = NULL;
+ char *filepath = ANTINFO;
+ char *p_ant_val = NULL;
+ uint32 ant_val = 0;
+
+ /* Read antenna settings from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ ret = -ENOENT;
+ return ret;
+ } else {
+ ret = kernel_read_compat(fp, 0, (char *)&ant_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ return ret;
+ }
+
+ p_ant_val = (char *)&ant_val;
+ p_ant_val[sizeof(uint32) - 1] = '\0';
+ ant_val = bcm_atoi(p_ant_val);
+
+ DHD_ERROR(("[WIFI_SEC]%s: ANT val = %d\n", __FUNCTION__, ant_val));
+ filp_close(fp, NULL);
+
+ /* Check value from the file */
+ if (ant_val < 1 || ant_val > 3) {
+ DHD_ERROR(("[WIFI_SEC] %s: Invalid value %d read from the file %s\n",
+ __FUNCTION__, ant_val, filepath));
+ return -1;
+ }
+ }
+ *read_val = ant_val;
+ return ret;
+}
+
+int dhd_sel_ant_from_file(dhd_pub_t *dhd)
+{
+ int ret = -1;
+ uint32 ant_val = 0;
+ uint32 btc_mode = 0;
+ uint chip_id = dhd_bus_chip_id(dhd);
+#ifndef CUSTOM_SET_ANTNPM
+ wl_config_t rsdb_mode;
+
+ memset(&rsdb_mode, 0, sizeof(rsdb_mode));
+#endif /* !CUSTOM_SET_ANTNPM */
+
+ /* Check if this chip can support MIMO */
+ if (chip_id != BCM4350_CHIP_ID &&
+ chip_id != BCM4354_CHIP_ID &&
+ chip_id != BCM43569_CHIP_ID &&
+ chip_id != BCM4358_CHIP_ID &&
+ chip_id != BCM4359_CHIP_ID &&
+ chip_id != BCM4355_CHIP_ID &&
+ chip_id != BCM4347_CHIP_ID &&
+ chip_id != BCM4361_CHIP_ID &&
+ chip_id != BCM4375_CHIP_ID &&
+ chip_id != BCM4389_CHIP_ID) {
+ DHD_ERROR(("[WIFI_SEC] %s: This chipset does not support MIMO\n",
+ __FUNCTION__));
+ return ret;
+ }
+
+#ifndef DHD_EXPORT_CNTL_FILE
+ ret = get_ant_val_from_file(&ant_val);
+#else
+ ant_val = (uint32)antsel;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+ if (ant_val == 0) {
+#ifdef CUSTOM_SET_ANTNPM
+ dhd->mimo_ant_set = 0;
+#endif /* CUSTOM_SET_ANTNPM */
+ return ret;
+ }
+ DHD_ERROR(("[WIFI_SEC]%s: ANT val = %d\n", __FUNCTION__, ant_val));
+
+ /* bt coex mode off */
+ if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) {
+ ret = dhd_iovar(dhd, 0, "btc_mode", (char *)&btc_mode, sizeof(btc_mode), NULL, 0,
+ TRUE);
+ if (ret) {
+ DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): "
+ "btc_mode, ret=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+ }
+
+#ifndef CUSTOM_SET_ANTNPM
+ /* rsdb mode off */
+ DHD_ERROR(("[WIFI_SEC] %s: %s the RSDB mode!\n",
+ __FUNCTION__, rsdb_mode.config ? "Enable" : "Disable"));
+ ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode), NULL, 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): "
+ "rsdb_mode, ret=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ /* Select Antenna */
+ ret = dhd_iovar(dhd, 0, "txchain", (char *)&ant_val, sizeof(ant_val), NULL, 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): txchain, ret=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ ret = dhd_iovar(dhd, 0, "rxchain", (char *)&ant_val, sizeof(ant_val), NULL, 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("[WIFI_SEC] %s: Fail to execute dhd_wl_ioctl_cmd(): rxchain, ret=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+#else
+ dhd->mimo_ant_set = ant_val;
+ DHD_ERROR(("[WIFI_SEC] %s: mimo_ant_set = %d\n", __FUNCTION__, dhd->mimo_ant_set));
+#endif /* CUSTOM_SET_ANTNPM */
+
+ return 0;
+}
+#endif /* MIMO_ANTENNA_SETTING */
+
+#ifdef LOGTRACE_FROM_FILE
+/*
+ * LOGTRACEINFO = .logtrace.info
+ * - logtrace = 1 => Enable LOGTRACE Event
+ * - logtrace = 0 => Disable LOGTRACE Event
+ * - file not exist => Disable LOGTRACE Event
+ */
+int dhd_logtrace_from_file(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ struct file *fp = NULL;
+ int ret = -1;
+ uint32 logtrace = 0;
+ char *filepath = LOGTRACEINFO;
+ char *p_logtrace = NULL;
+
+ /* Read LOGTRACE Event on/off request from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ return 0;
+ } else {
+ ret = kernel_read_compat(fp, 0, (char *)&logtrace, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ return 0;
+ }
+
+ p_logtrace = (char *)&logtrace;
+ p_logtrace[sizeof(uint32) - 1] = '\0';
+ logtrace = bcm_atoi(p_logtrace);
+
+ DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from file = %d\n",
+ __FUNCTION__, logtrace));
+ filp_close(fp, NULL);
+
+ /* Check value from the file */
+ if (logtrace > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Invalid value %d read from the file %s\n",
+ __FUNCTION__, logtrace, filepath));
+ return 0;
+ }
+ }
+
+ return (int)logtrace;
+#else
+ DHD_ERROR(("[WIFI_SEC] %s : LOGTRACE On/Off from sysfs = %d\n",
+ __FUNCTION__, (int)logtrace_val));
+ return (int)logtrace_val;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+}
+#endif /* LOGTRACE_FROM_FILE */
+
+#ifdef USE_WFA_CERT_CONF
+#ifndef DHD_EXPORT_CNTL_FILE
+int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val)
+{
+ struct file *fp = NULL;
+ char *filepath = NULL;
+ int val = 0;
+ char *p_val = NULL;
+
+ if (!dhd || (mode < SET_PARAM_BUS_TXGLOM_MODE) ||
+ (mode >= PARAM_LAST_VALUE)) {
+ DHD_ERROR(("[WIFI_SEC] %s: invalid argument\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ switch (mode) {
+#ifdef BCMSDIO
+ case SET_PARAM_BUS_TXGLOM_MODE:
+ filepath = PLATFORM_PATH".bustxglom.info";
+ break;
+#endif /* BCMSDIO */
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+ case SET_PARAM_ROAMOFF:
+ filepath = PLATFORM_PATH".roamoff.info";
+ break;
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#ifdef USE_WL_FRAMEBURST
+ case SET_PARAM_FRAMEBURST:
+ filepath = PLATFORM_PATH".frameburst.info";
+ break;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ case SET_PARAM_TXBF:
+ filepath = PLATFORM_PATH".txbf.info";
+ break;
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ case SET_PARAM_PROPTX:
+ filepath = PLATFORM_PATH".proptx.info";
+ break;
+#endif /* PROP_TXSTATUS */
+ default:
+ DHD_ERROR(("[WIFI_SEC] %s: File to find file name for index=%d\n",
+ __FUNCTION__, mode));
+ return BCME_ERROR;
+ }
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp) || (fp == NULL)) {
+ DHD_ERROR(("[WIFI_SEC] %s: File open failed, file path=%s\n",
+ __FUNCTION__, filepath));
+ return BCME_ERROR;
+ } else {
+ if (kernel_read_compat(fp, fp->f_pos, (char *)&val, sizeof(uint32)) < 0) {
+ filp_close(fp, NULL);
+ /* File operation is failed so we will return error code */
+ DHD_ERROR(("[WIFI_SEC] %s: read failed, file path=%s\n",
+ __FUNCTION__, filepath));
+ return BCME_ERROR;
+ }
+ filp_close(fp, NULL);
+ }
+
+ p_val = (char *)&val;
+ p_val[sizeof(uint32) - 1] = '\0';
+ val = bcm_atoi(p_val);
+
+ switch (mode) {
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+ case SET_PARAM_ROAMOFF:
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#ifdef USE_WL_FRAMEBURST
+ case SET_PARAM_FRAMEBURST:
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ case SET_PARAM_TXBF:
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ case SET_PARAM_PROPTX:
+#endif /* PROP_TXSTATUS */
+ if (val < 0 || val > 1) {
+ DHD_ERROR(("[WIFI_SEC] %s: value[%d] is out of range\n",
+ __FUNCTION__, *read_val));
+ return BCME_ERROR;
+ }
+ break;
+ default:
+ return BCME_ERROR;
+ }
+ *read_val = (uint)val;
+ return BCME_OK;
+}
+#else
+int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val)
+{
+ uint val = 0;
+
+ if (!dhd || (mode < SET_PARAM_BUS_TXGLOM_MODE) ||
+ (mode >= PARAM_LAST_VALUE)) {
+ DHD_ERROR(("[WIFI_SEC] %s: invalid argument\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ switch (mode) {
+#ifdef BCMSDIO
+ case SET_PARAM_BUS_TXGLOM_MODE:
+ if (bus_txglom == VALUENOTSET)
+ return BCME_ERROR;
+ else
+ val = (uint)bus_txglom;
+ break;
+#endif /* BCMSDIO */
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+ case SET_PARAM_ROAMOFF:
+ if (roam_off == VALUENOTSET)
+ return BCME_ERROR;
+ else
+ val = (uint)roam_off;
+ break;
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#ifdef USE_WL_FRAMEBURST
+ case SET_PARAM_FRAMEBURST:
+ if (frameburst == VALUENOTSET)
+ return BCME_ERROR;
+ else
+ val = (uint)frameburst;
+ break;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ case SET_PARAM_TXBF:
+ if (txbf == VALUENOTSET)
+ return BCME_ERROR;
+ else
+ val = (uint)txbf;
+ break;
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ case SET_PARAM_PROPTX:
+ if (proptx == VALUENOTSET)
+ return BCME_ERROR;
+ else
+ val = (uint)proptx;
+ break;
+#endif /* PROP_TXSTATUS */
+ default:
+ return BCME_ERROR;
+ }
+ *read_val = val;
+ return BCME_OK;
+}
+#endif /* !DHD_EXPORT_CNTL_FILE */
+#endif /* USE_WFA_CERT_CONF */
+
+#ifdef WRITE_WLANINFO
+#define FIRM_PREFIX "Firm_ver:"
+#define DHD_PREFIX "DHD_ver:"
+#define NV_PREFIX "Nv_info:"
+#define CLM_PREFIX "CLM_ver:"
+#define max_len(a, b) ((sizeof(a)/(2)) - (strlen(b)) - (3))
+#define tstr_len(a, b) ((strlen(a)) + (strlen(b)) + (3))
+
+char version_info[MAX_VERSION_LEN];
+char version_old_info[MAX_VERSION_LEN];
+
+int write_filesystem(struct file *file, unsigned long long offset,
+ unsigned char* data, unsigned int size)
+{
+ mm_segment_t oldfs;
+ int ret;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = vfs_write(file, data, size, &offset);
+
+ set_fs(oldfs);
+ return ret;
+}
+
+uint32 sec_save_wlinfo(char *firm_ver, char *dhd_ver, char *nvram_p, char *clm_ver)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ struct file *fp = NULL;
+ char *filepath = WIFIVERINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+ struct file *nvfp = NULL;
+ int min_len, str_len = 0;
+ int ret = 0;
+ char* nvram_buf;
+ char temp_buf[256];
+
+ DHD_TRACE(("[WIFI_SEC] %s: Entered.\n", __FUNCTION__));
+
+ DHD_INFO(("[WIFI_SEC] firmware version : %s\n", firm_ver));
+ DHD_INFO(("[WIFI_SEC] dhd driver version : %s\n", dhd_ver));
+ DHD_INFO(("[WIFI_SEC] nvram path : %s\n", nvram_p));
+ DHD_INFO(("[WIFI_SEC] clm version : %s\n", clm_ver));
+
+ memset(version_info, 0, sizeof(version_info));
+
+ if (strlen(dhd_ver)) {
+ min_len = min(strlen(dhd_ver), max_len(temp_buf, DHD_PREFIX));
+ min_len += strlen(DHD_PREFIX) + 3;
+ DHD_INFO(("[WIFI_SEC] DHD ver length : %d\n", min_len));
+ snprintf(version_info+str_len, min_len, DHD_PREFIX " %s\n", dhd_ver);
+ str_len = strlen(version_info);
+
+ DHD_INFO(("[WIFI_SEC] Driver version_info len : %d\n", str_len));
+ DHD_INFO(("[WIFI_SEC] Driver version_info : %s\n", version_info));
+ } else {
+ DHD_ERROR(("[WIFI_SEC] Driver version is missing.\n"));
+ }
+
+ if (strlen(firm_ver)) {
+ min_len = min(strlen(firm_ver), max_len(temp_buf, FIRM_PREFIX));
+ min_len += strlen(FIRM_PREFIX) + 3;
+ DHD_INFO(("[WIFI_SEC] firmware ver length : %d\n", min_len));
+ snprintf(version_info+str_len, min_len, FIRM_PREFIX " %s\n", firm_ver);
+ str_len = strlen(version_info);
+
+ DHD_INFO(("[WIFI_SEC] Firmware version_info len : %d\n", str_len));
+ DHD_INFO(("[WIFI_SEC] Firmware version_info : %s\n", version_info));
+ } else {
+ DHD_ERROR(("[WIFI_SEC] Firmware version is missing.\n"));
+ }
+
+ if (nvram_p) {
+ memset(temp_buf, 0, sizeof(temp_buf));
+ nvfp = filp_open(nvram_p, O_RDONLY, 0);
+ if (IS_ERR(nvfp) || (nvfp == NULL)) {
+ DHD_ERROR(("[WIFI_SEC] %s: Nvarm File open failed.\n", __FUNCTION__));
+ return -1;
+ } else {
+ ret = kernel_read_compat(nvfp, nvfp->f_pos, temp_buf, sizeof(temp_buf));
+ filp_close(nvfp, NULL);
+ }
+
+ if (strlen(temp_buf)) {
+ nvram_buf = temp_buf;
+ bcmstrtok(&nvram_buf, "\n", 0);
+ DHD_INFO(("[WIFI_SEC] nvram tolkening : %s(%zu) \n",
+ temp_buf, strlen(temp_buf)));
+ snprintf(version_info+str_len, tstr_len(temp_buf, NV_PREFIX),
+ NV_PREFIX " %s\n", temp_buf);
+ str_len = strlen(version_info);
+ DHD_INFO(("[WIFI_SEC] NVRAM version_info : %s\n", version_info));
+ DHD_INFO(("[WIFI_SEC] NVRAM version_info len : %d, nvram len : %zu\n",
+ str_len, strlen(temp_buf)));
+ } else {
+ DHD_ERROR(("[WIFI_SEC] NVRAM info is missing.\n"));
+ }
+ } else {
+ DHD_ERROR(("[WIFI_SEC] Not exist nvram path\n"));
+ }
+
+ if (strlen(clm_ver)) {
+ min_len = min(strlen(clm_ver), max_len(temp_buf, CLM_PREFIX));
+ min_len += strlen(CLM_PREFIX) + 3;
+ DHD_INFO(("[WIFI_SEC] clm ver length : %d\n", min_len));
+ snprintf(version_info+str_len, min_len, CLM_PREFIX " %s\n", clm_ver);
+ str_len = strlen(version_info);
+
+ DHD_INFO(("[WIFI_SEC] CLM version_info len : %d\n", str_len));
+ DHD_INFO(("[WIFI_SEC] CLM version_info : %s\n", version_info));
+ } else {
+ DHD_ERROR(("[WIFI_SEC] CLM version is missing.\n"));
+ }
+
+ DHD_INFO(("[WIFI_SEC] version_info : %s, strlen : %zu\n",
+ version_info, strlen(version_info)));
+
+#ifndef DHD_EXPORT_CNTL_FILE
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp) || (fp == NULL)) {
+ DHD_ERROR(("[WIFI_SEC] %s: .wifiver.info File open failed.\n", __FUNCTION__));
+ } else {
+ memset(version_old_info, 0, sizeof(version_old_info));
+ ret = kernel_read_compat(fp, fp->f_pos, version_old_info, sizeof(version_info));
+ filp_close(fp, NULL);
+ DHD_INFO(("[WIFI_SEC] kernel_read ret : %d.\n", ret));
+ if (strcmp(version_info, version_old_info) == 0) {
+ DHD_ERROR(("[WIFI_SEC] .wifiver.info already saved.\n"));
+ return 0;
+ }
+ }
+
+ fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
+ if (IS_ERR(fp) || (fp == NULL)) {
+ DHD_ERROR(("[WIFI_SEC] %s: .wifiver.info File open failed.\n",
+ __FUNCTION__));
+ } else {
+ ret = write_filesystem(fp, fp->f_pos, version_info, sizeof(version_info));
+ DHD_INFO(("[WIFI_SEC] sec_save_wlinfo done. ret : %d\n", ret));
+ DHD_ERROR(("[WIFI_SEC] save .wifiver.info file.\n"));
+ filp_close(fp, NULL);
+ }
+#endif /* DHD_EXPORT_CNTL_FILE */
+ return ret;
+}
+#endif /* WRITE_WLANINFO */
+
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
+unsigned int system_hw_rev;
+static int
+__init get_hw_rev(char *arg)
+{
+ get_option(&arg, &system_hw_rev);
+ printk("dhd : hw_rev : %d\n", system_hw_rev);
+ return 0;
+}
+
+early_param("androidboot.hw_rev", get_hw_rev);
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */
+
+#ifdef GEN_SOFTAP_INFO_FILE
+#define SOFTAP_INFO_FILE_FIRST_LINE "#.softap.info"
+/*
+ * # Does RSDB Wifi sharing support?
+ * DualBandConcurrency
+ * # Both wifi and hotspot can be turned on at the same time?
+ * DualInterface
+ * # 5Ghz band support?
+ * 5G
+ * # How many clients can be connected?
+ * maxClient
+ * # Does hotspot support PowerSave mode?
+ * PowerSave
+ * # Does android_net_wifi_set_Country_Code_Hal feature supported?
+ * HalFn_setCountryCodeHal
+ * # Does android_net_wifi_getValidChannels supported?
+ * HalFn_getValidChannels
+ */
+const char *softap_info_items[] = {
+ "DualBandConcurrency",
+#ifdef DHD_SOFTAP_DUAL_IF_INFO
+ "DualInterface",
+#endif /* DHD_SOFTAP_DUAL_IF_INFO */
+ "5G", "maxClient", "PowerSave",
+ "HalFn_setCountryCodeHal", "HalFn_getValidChannels", NULL
+};
+#if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF)
+const char *softap_info_values[] = {
+ "yes",
+#ifdef DHD_SOFTAP_DUAL_IF_INFO
+ "yes",
+#endif /* DHD_SOFTAP_DUAL_IF_INFO */
+ "yes", "10", "yes", "yes", "yes", NULL
+};
+#elif defined(BCM43455_CHIP)
+const char *softap_info_values[] = {
+ "no",
+#ifdef DHD_SOFTAP_DUAL_IF_INFO
+ "no",
+#endif /* DHD_SOFTAP_DUAL_IF_INFO */
+ "yes", "10", "no", "yes", "yes", NULL
+};
+#elif defined(BCM43430_CHIP)
+const char *softap_info_values[] = {
+ "no",
+#ifdef DHD_SOFTAP_DUAL_IF_INFO
+ "no",
+#endif /* DHD_SOFTAP_DUAL_IF_INFO */
+ "no", "10", "no", "yes", "yes", NULL
+};
+#else
+const char *softap_info_values[] = {
+ "UNDEF",
+#ifdef DHD_SOFTAP_DUAL_IF_INFO
+ "UNDEF",
+#endif /* DHD_SOFTAP_DUAL_IF_INFO */
+ "UNDEF", "UNDEF", "UNDEF", "UNDEF", "UNDEF", NULL
+};
+#endif /* defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF) */
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#ifdef GEN_SOFTAP_INFO_FILE
+uint32 sec_save_softap_info(void)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ struct file *fp = NULL;
+ char *filepath = SOFTAPINFO;
+#endif /* DHD_EXPORT_CNTL_FILE */
+ char temp_buf[SOFTAP_INFO_BUF_SZ];
+ int ret = -1, idx = 0, rem = 0, written = 0;
+ char *pos = NULL;
+
+ DHD_TRACE(("[WIFI_SEC] %s: Entered.\n", __FUNCTION__));
+ memset(temp_buf, 0, sizeof(temp_buf));
+
+ pos = temp_buf;
+ rem = sizeof(temp_buf);
+ written = snprintf(pos, sizeof(temp_buf), "%s\n",
+ SOFTAP_INFO_FILE_FIRST_LINE);
+ do {
+ int len = strlen(softap_info_items[idx]) +
+ strlen(softap_info_values[idx]) + 2;
+ pos += written;
+ rem -= written;
+ if (len > rem) {
+ break;
+ }
+ written = snprintf(pos, rem, "%s=%s\n",
+ softap_info_items[idx], softap_info_values[idx]);
+ } while (softap_info_items[++idx] != NULL);
+
+#ifndef DHD_EXPORT_CNTL_FILE
+ fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
+ if (IS_ERR(fp) || (fp == NULL)) {
+ DHD_ERROR(("[WIFI_SEC] %s: %s File open failed.\n",
+ SOFTAPINFO, __FUNCTION__));
+ } else {
+ ret = write_filesystem(fp, fp->f_pos, temp_buf, strlen(temp_buf));
+ DHD_INFO(("[WIFI_SEC] %s done. ret : %d\n", __FUNCTION__, ret));
+ DHD_ERROR(("[WIFI_SEC] save %s file.\n", SOFTAPINFO));
+ filp_close(fp, NULL);
+ }
+#else
+ strlcpy(softapinfostr, temp_buf, SOFTAP_INFO_BUF_SZ);
+
+ ret = BCME_OK;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+ return ret;
+}
+#endif /* GEN_SOFTAP_INFO_FILE */
+#endif /* CUSTOMER_HW4 || CUSTOMER_HW40 */
+
+/* XXX WAR: disable pm_bcnrx , scan_ps for BCM4354 WISOL module.
+ * WISOL module have ANT_1 Rx sensitivity issue.
+*/
+#if defined(FORCE_DISABLE_SINGLECORE_SCAN)
+void
+dhd_force_disable_singlcore_scan(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ struct file *fp = NULL;
+ char *filepath = PLATFORM_PATH".cid.info";
+ char vender[10] = {0, };
+ uint32 pm_bcnrx = 0;
+ uint32 scan_ps = 0;
+
+ if (BCM4354_CHIP_ID != dhd_bus_chip_id(dhd))
+ return;
+
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s file open error\n", filepath));
+ } else {
+ ret = kernel_read_compat(fp, 0, (char *)vender, 5);
+
+ if (ret > 0 && NULL != strstr(vender, "wisol")) {
+ DHD_ERROR(("wisol module : set pm_bcnrx=0, set scan_ps=0\n"));
+
+ ret = dhd_iovar(dhd, 0, "pm_bcnrx", (char *)&pm_bcnrx, sizeof(pm_bcnrx),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("Set pm_bcnrx error (%d)\n", ret));
+
+ ret = dhd_iovar(dhd, 0, "scan_ps", (char *)&scan_ps, sizeof(scan_ps), NULL,
+ 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("Set scan_ps error (%d)\n", ret));
+ }
+ filp_close(fp, NULL);
+ }
+}
+#endif /* FORCE_DISABLE_SINGLECORE_SCAN */
+
+#ifdef BCM4335_XTAL_WAR
+bool
+check_bcm4335_rev(void)
+{
+ int ret = -1;
+ struct file *fp = NULL;
+ char *filepath = "/data/.rev";
+ char chip_rev[10] = {0, };
+ bool is_revb0 = TRUE;
+
+ DHD_ERROR(("check BCM4335, check_bcm4335_rev \n"));
+ fp = filp_open(filepath, O_RDONLY, 0);
+
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("/data/.rev file open error\n"));
+ is_revb0 = TRUE;
+ } else {
+ DHD_ERROR(("/data/.rev file Found\n"));
+ ret = kernel_read_compat(fp, 0, (char *)chip_rev, 9);
+ if (ret != -1 && NULL != strstr(chip_rev, "BCM4335B0")) {
+ DHD_ERROR(("Found BCM4335B0\n"));
+ is_revb0 = TRUE;
+ } else {
+ is_revb0 = FALSE;
+ }
+ filp_close(fp, NULL);
+ }
+ return is_revb0;
+}
+#endif /* BCM4335_XTAL_WAR */
diff --git a/bcmdhd.101.10.361.x/dhd_dbg.h b/bcmdhd.101.10.361.x/dhd_dbg.h
new file mode 100755
index 0000000..c955e38
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_dbg.h
@@ -0,0 +1,637 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#if defined(NDIS)
+#include "wl_nddbg.h"
+#endif /* defined(NDIS) */
+
+#ifdef DHD_LOG_DUMP
+extern char *dhd_log_dump_get_timestamp(void);
+#ifdef DHD_EFI
+/* FW verbose/console output to FW ring buffer */
+extern void dhd_log_dump_print(const char *fmt, ...);
+/* DHD verbose/console output to DHD ring buffer */
+extern void dhd_log_dump_print_drv(const char *fmt, ...);
+#define DHD_LOG_DUMP_WRITE(fmt, ...) dhd_log_dump_print_drv(fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_FW(fmt, ...) dhd_log_dump_print(fmt, ##__VA_ARGS__)
+#else
+#ifndef _DHD_LOG_DUMP_DEFINITIONS_
+#define _DHD_LOG_DUMP_DEFINITIONS_
+#define GENERAL_LOG_HDR "\n-------------------- General log ---------------------------\n"
+#define PRESERVE_LOG_HDR "\n-------------------- Preserve log ---------------------------\n"
+#define SPECIAL_LOG_HDR "\n-------------------- Special log ---------------------------\n"
+#define DHD_DUMP_LOG_HDR "\n-------------------- 'dhd dump' log -----------------------\n"
+#define EXT_TRAP_LOG_HDR "\n-------------------- Extended trap data -------------------\n"
+#define HEALTH_CHK_LOG_HDR "\n-------------------- Health check data --------------------\n"
+#ifdef DHD_DUMP_PCIE_RINGS
+#define FLOWRING_DUMP_HDR "\n-------------------- Flowring dump --------------------\n"
+#endif /* DHD_DUMP_PCIE_RINGS */
+#define DHD_LOG_DUMP_DLD(fmt, ...) \
+ dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_DLD_EX(fmt, ...) \
+ dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, NULL, 0, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_DLD_PRSRV(fmt, ...) \
+ dhd_log_dump_write(DLD_BUF_TYPE_PRESERVE, NULL, 0, fmt, ##__VA_ARGS__)
+#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
+
+#ifndef DHD_LOG_DUMP_RING_DEFINITIONS
+#define DHD_LOG_DUMP_RING_DEFINITIONS
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+/* Enabled DHD_DEBUGABILITY_LOG_DUMP_RING */
+extern void dhd_dbg_ring_write(int type, char *binary_data,
+ int binary_len, const char *fmt, ...);
+extern char* dhd_dbg_get_system_timestamp(void);
+#define DHD_DBG_RING(fmt, ...) \
+ dhd_dbg_ring_write(DRIVER_LOG_RING_ID, NULL, 0, fmt, ##__VA_ARGS__)
+#define DHD_DBG_RING_EX(fmt, ...) \
+ dhd_dbg_ring_write(FW_VERBOSE_RING_ID, NULL, 0, fmt, ##__VA_ARGS__)
+#define DHD_DBG_RING_ROAM(fmt, ...) \
+ dhd_dbg_ring_write(ROAM_STATS_RING_ID, NULL, 0, fmt, ##__VA_ARGS__)
+
+#define DHD_LOG_DUMP_WRITE DHD_DBG_RING
+#define DHD_LOG_DUMP_WRITE_EX DHD_DBG_RING_EX
+#define DHD_LOG_DUMP_WRITE_PRSRV DHD_DBG_RING
+#define DHD_LOG_DUMP_WRITE_ROAM DHD_DBG_RING_ROAM
+
+#define DHD_PREFIX_TS "[%s][%s]: ", dhd_dbg_get_system_timestamp(), dhd_log_dump_get_timestamp()
+#define DHD_PREFIX_TS_FN DHD_PREFIX_TS
+#define DHD_LOG_DUMP_WRITE_TS DHD_DBG_RING(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_TS_FN DHD_DBG_RING(DHD_PREFIX_TS_FN)
+#define DHD_LOG_DUMP_WRITE_EX_TS DHD_DBG_RING_EX(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_EX_TS_FN DHD_DBG_RING_EX(DHD_PREFIX_TS_FN)
+#define DHD_LOG_DUMP_WRITE_PRSRV_TS DHD_DBG_RING(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_PRSRV_TS_FN DHD_DBG_RING(DHD_PREFIX_TS_FN)
+#define DHD_LOG_DUMP_WRITE_ROAM_TS DHD_DBG_RING_ROAM(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_ROAM_TS_FN DHD_DBG_RING_ROAM(DHD_PREFIX_TS_FN)
+#else
+/* Not enabled DHD_DEBUGABILITY_LOG_DUMP_RING */
+#define DHD_LOG_DUMP_WRITE DHD_LOG_DUMP_DLD
+#define DHD_LOG_DUMP_WRITE_EX DHD_LOG_DUMP_DLD_EX
+#define DHD_LOG_DUMP_WRITE_PRSRV DHD_LOG_DUMP_DLD_PRSRV
+#define DHD_LOG_DUMP_WRITE_ROAM DHD_LOG_DUMP_DLD
+
+#define DHD_PREFIX_TS "[%s]: ", dhd_log_dump_get_timestamp()
+#define DHD_PREFIX_TS_FN "[%s] %s: ", dhd_log_dump_get_timestamp(), __func__
+#define DHD_LOG_DUMP_WRITE_TS DHD_LOG_DUMP_DLD(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_TS_FN DHD_LOG_DUMP_DLD(DHD_PREFIX_TS_FN)
+#define DHD_LOG_DUMP_WRITE_EX_TS DHD_LOG_DUMP_DLD_EX(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_EX_TS_FN DHD_LOG_DUMP_DLD_EX(DHD_PREFIX_TS_FN)
+#define DHD_LOG_DUMP_WRITE_PRSRV_TS DHD_LOG_DUMP_DLD_PRSRV(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_PRSRV_TS_FN DHD_LOG_DUMP_DLD_PRSRV(DHD_PREFIX_TS_FN)
+#define DHD_LOG_DUMP_WRITE_ROAM_TS DHD_LOG_DUMP_DLD(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_ROAM_TS_FN DHD_LOG_DUMP_DLD(DHD_PREFIX_TS_FN)
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+#endif /* DHD_LOG_DUMP_RING_DEFINITIONS */
+
+#endif /* DHD_EFI */
+#define CONCISE_DUMP_BUFLEN 32 * 1024
+#define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n"
+#ifdef DHD_STATUS_LOGGING
+#define STATUS_LOG_HDR "\n-------------------- Status log -----------------------\n"
+#endif /* DHD_STATUS_LOGGING */
+#define RTT_LOG_HDR "\n-------------------- RTT log --------------------------\n"
+#define BCM_TRACE_LOG_HDR "\n-------------------- BCM Trace log --------------------------\n"
+#define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n"
+#endif /* DHD_LOG_DUMP */
+
+#if defined(CUSTOMER_DBG_SYSTEM_TIME) && defined(DHD_DEBUGABILITY_LOG_DUMP_RING)
+#define DBG_PRINT_PREFIX "[%s][dhd][wlan]", dhd_dbg_get_system_timestamp()
+#else
+#define DBG_PRINT_PREFIX
+#endif
+#define DBG_PRINT_SYSTEM_TIME pr_cont(DBG_PRINT_PREFIX)
+
+#if defined(BCMDBG) || defined(DHD_DEBUG)
+
+#if defined(NDIS)
+#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \
+ {printf args; DHD_NDDBG_OUTPUT args;}} while (0)
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) \
+ {printf args; DHD_NDDBG_OUTPUT args;}} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) \
+ {printf args; DHD_NDDBG_OUTPUT args;}} while (0)
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#else
+/* NON-NDIS cases */
+#ifdef DHD_LOG_DUMP
+#ifdef DHD_EFI
+/* defined(DHD_EFI) && defined(DHD_LOG_DUMP) */
+#define DHD_ERROR(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+
+#define DHD_INFO(args) \
+do { \
+ if (dhd_msg_level & DHD_INFO_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#else /* DHD_EFI */
+/* !defined(DHD_EFI) and defined(DHD_LOG_DUMP) */
+#define DHD_ERROR(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE_TS; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#endif /* DHD_EFI */
+#else /* DHD_LOG_DUMP */
+/* !defined(DHD_LOG_DUMP cases) */
+#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#endif /* DHD_LOG_DUMP */
+
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#endif /* defined(NDIS) */
+
+#ifdef DHD_LOG_DUMP
+/* LOG_DUMP defines common to EFI and NON-EFI */
+#ifdef DHD_EFI
+/* EFI builds with LOG DUMP enabled */
+#define DHD_ERROR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_IOVAR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ if (dhd_msg_level & DHD_IOVAR_MEM_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_LOG_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+
+#define DHD_EVENT(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE_FW args; \
+ } \
+} while (0)
+#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#define DHD_MSGTRACE_LOG(args) \
+do { \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE_FW args; \
+ } \
+} while (0)
+#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
+#else
+/* NON-EFI builds with LOG DUMP enabled */
+#define DHD_ERROR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_IOVAR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ if (dhd_msg_level & DHD_IOVAR_MEM_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_LOG_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+
+#define DHD_EVENT(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE_PRSRV_TS; \
+ DHD_LOG_DUMP_WRITE_PRSRV args; \
+ } \
+} while (0)
+#define DHD_PRSRV_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ if (dhd_msg_level & DHD_PRSRV_MEM_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE_TS; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+/* Re-using 'DHD_MSGTRACE_VAL' for controlling printing of ecounter binary event
+* logs to console and debug dump -- need to cleanup in the future to use separate
+* 'DHD_ECNTR_VAL' bitmap flag. 'DHD_MSGTRACE_VAL' will be defined only
+* for non-android builds.
+*/
+#define DHD_ECNTR_LOG(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE_TS; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } \
+} while (0)
+#define DHD_ERROR_EX(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE_EX_TS; \
+ DHD_LOG_DUMP_WRITE_EX args; \
+ } \
+} while (0)
+#define DHD_MSGTRACE_LOG(args) \
+do { \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE_TS; \
+ DHD_LOG_DUMP_WRITE args; \
+} while (0)
+
+#define DHD_ERROR_ROAM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE_ROAM_TS; \
+ DHD_LOG_DUMP_WRITE_ROAM args; \
+ } \
+} while (0)
+#endif /* DHD_EFI */
+#else /* DHD_LOG_DUMP */
+/* !DHD_LOG_DUMP */
+#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0)
+#define DHD_ERROR_MEM(args) DHD_ERROR(args)
+#define DHD_IOVAR_MEM(args) DHD_ERROR(args)
+#define DHD_LOG_MEM(args) DHD_ERROR(args)
+#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
+#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#endif /* DHD_LOG_DUMP */
+
+#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
+#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
+#define DHD_RPM(args) do {if (dhd_msg_level & DHD_RPM_VAL) printf args;} while (0)
+#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
+
+#if defined(DHD_LOG_DUMP)
+#if defined(DHD_EFI)
+#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args)
+#elif defined(DHD_LOG_PRINT_RATE_LIMIT)
+#define DHD_FW_VERBOSE(args) \
+do { \
+ if (dbgring_msg_level & DHD_FWLOG_VAL) { \
+ DHD_LOG_DUMP_WRITE_EX args; \
+ } \
+} while (0)
+#define DHD_FWLOG(args) \
+ do { \
+ if (dhd_msg_level & DHD_FWLOG_VAL) { \
+ if (control_logtrace && !log_print_threshold) \
+ printf args; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } while (0)
+#else
+#define DHD_FW_VERBOSE(args) \
+do { \
+ if (dbgring_msg_level & DHD_FWLOG_VAL) { \
+ DHD_LOG_DUMP_WRITE_EX args; \
+ } \
+} while (0)
+
+#define DHD_FWLOG(args) \
+ do { \
+ if (dhd_msg_level & DHD_FWLOG_VAL) { \
+ if (control_logtrace) \
+ printf args; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } while (0)
+#endif /* DHD_EFI */
+#else /* DHD_LOG_DUMP */
+#if defined(NDIS) && (NDISVER >= 0x0630)
+#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) \
+ {printf args; DHD_NDDBG_OUTPUT args;}} while (0)
+#else
+#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
+#endif /* defined(NDIS) && (NDISVER >= 0x0630) */
+#endif /* DHD_LOG_DUMP */
+
+#define DHD_DBGIF(args) do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0)
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#define DHD_RPM(args) do {if (dhd_msg_level & DHD_RPM_VAL) printf args;} while (0)
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4 DHD_ERROR
+#define DHD_INFO_HW4 DHD_ERROR
+#define DHD_ERROR_NO_HW4 DHD_INFO
+#else
+#define DHD_TRACE_HW4 DHD_TRACE
+#define DHD_INFO_HW4 DHD_INFO
+#define DHD_ERROR_NO_HW4 DHD_ERROR
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL)
+#define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL)
+#define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL)
+#define DHD_RTT_ON() (dhd_msg_level & DHD_RTT_VAL)
+#define DHD_MSGTRACE_ON() (dhd_msg_level & DHD_MSGTRACE_VAL)
+#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL)
+#define DHD_DBGIF_ON() (dhd_msg_level & DHD_DBGIF_VAL)
+#define DHD_PKT_MON_ON() (dhd_msg_level & DHD_PKT_MON_VAL)
+#define DHD_PKT_MON_DUMP_ON() (dhd_msg_level & DHD_PKT_MON_DUMP_VAL)
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#define DHD_RPM_ON() (dhd_msg_level & DHD_RPM_VAL)
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#if defined(NDIS)
+#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \
+ {printf args; DHD_NDDBG_OUTPUT args;}} while (0)
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) \
+ {DHD_NDDBG_OUTPUT args;}} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) \
+ {DHD_NDDBG_OUTPUT args;}} while (0)
+#elif defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_ERROR(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_INFO(args) \
+do { \
+ if (dhd_msg_level & DHD_INFO_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_TRACE(args)
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#else /* DHD_EFI && DHD_LOG_DUMP */
+
+#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \
+ printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#endif /* defined(NDIS) */
+
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_EVENT(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE_FW args; \
+ } \
+} while (0)
+#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
+#else
+#define DHD_EVENT(args)
+#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+
+#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
+
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+#define DHD_PNO(args)
+#define DHD_RTT(args)
+#define DHD_PKT_MON(args)
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_MSGTRACE_LOG(args) \
+do { \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE_FW args; \
+ } \
+} while (0)
+#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args)
+#else
+#define DHD_MSGTRACE_LOG(args)
+#define DHD_FWLOG(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+
+#define DHD_DBGIF(args)
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_ERROR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_IOVAR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_LOG_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+} while (0)
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#else
+#define DHD_ERROR_MEM(args) DHD_ERROR(args)
+#define DHD_IOVAR_MEM(args) DHD_ERROR(args)
+#define DHD_LOG_MEM(args) DHD_ERROR(args)
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#endif /* DHD_EFI */
+#define DHD_ERROR_ROAM(args) DHD_ERROR(args)
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4 DHD_ERROR
+#define DHD_INFO_HW4 DHD_ERROR
+#define DHD_ERROR_NO_HW4 DHD_INFO
+#else
+#define DHD_TRACE_HW4 DHD_TRACE
+#define DHD_INFO_HW4 DHD_INFO
+#define DHD_ERROR_NO_HW4 DHD_ERROR
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#define DHD_ERROR_ON() 0
+#define DHD_TRACE_ON() 0
+#define DHD_INFO_ON() 0
+#define DHD_DATA_ON() 0
+#define DHD_CTL_ON() 0
+#define DHD_TIMER_ON() 0
+#define DHD_HDRS_ON() 0
+#define DHD_BYTES_ON() 0
+#define DHD_INTR_ON() 0
+#define DHD_GLOM_ON() 0
+#define DHD_EVENT_ON() 0
+#define DHD_BTA_ON() 0
+#define DHD_ISCAN_ON() 0
+#define DHD_ARPOE_ON() 0
+#define DHD_REORDER_ON() 0
+#define DHD_NOCHECKDIED_ON() 0
+#define DHD_PNO_ON() 0
+#define DHD_RTT_ON() 0
+#define DHD_PKT_MON_ON() 0
+#define DHD_PKT_MON_DUMP_ON() 0
+#define DHD_MSGTRACE_ON() 0
+#define DHD_FWLOG_ON() 0
+#define DHD_DBGIF_ON() 0
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#define DHD_RPM_ON() 0
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#endif /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define PRINT_RATE_LIMIT_PERIOD 5000000u /* 5s in units of us */
+#define DHD_ERROR_RLMT(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ static uint64 __err_ts = 0; \
+ static uint32 __err_cnt = 0; \
+ uint64 __cur_ts = 0; \
+ __cur_ts = OSL_SYSUPTIME_US(); \
+ if (__err_ts == 0 || (__cur_ts > __err_ts && \
+ (__cur_ts - __err_ts > PRINT_RATE_LIMIT_PERIOD))) { \
+ __err_ts = __cur_ts; \
+ DHD_ERROR(args); \
+ DHD_ERROR(("[Repeats %u times]\n", __err_cnt)); \
+ __err_cnt = 0; \
+ } else { \
+ ++__err_cnt; \
+ } \
+ } \
+} while (0)
+
+/* even in non-BCMDBG builds, logging of dongle iovars should be available */
+#define DHD_DNGL_IOVAR_SET(args) \
+ do {if (dhd_msg_level & DHD_DNGL_IOVAR_SET_VAL) printf args;} while (0)
+
+#ifdef BCMPERFSTATS
+#define DHD_LOG(args) do {if (dhd_msg_level & DHD_LOG_VAL) bcmlog args;} while (0)
+#else
+#define DHD_LOG(args)
+#endif
+
+#if defined(BCMINTERNAL) && defined(LINUX) && defined(BCMSDIO) && (defined(BCMDBG) || \
+ defined(DHD_DEBUG))
+extern void dhd_blog(char *cp, int size);
+#define DHD_BLOG(cp, size) do { dhd_blog(cp, size);} while (0)
+#else
+#define DHD_BLOG(cp, size)
+#endif
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+extern int dbgring_msg_level;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+extern int log_print_threshold;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/bcmdhd.101.10.361.x/dhd_dbg_ring.c b/bcmdhd.101.10.361.x/dhd_dbg_ring.c
new file mode 100755
index 0000000..d78d21e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_dbg_ring.c
@@ -0,0 +1,473 @@
+/*
+ * DHD debug ring API and structures - implementation
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_dbg_ring.h>
+
+dhd_dbg_ring_t *
+dhd_dbg_ring_alloc_init(dhd_pub_t *dhd, uint16 ring_id,
+ char *ring_name, uint32 ring_sz, void *allocd_buf,
+ bool pull_inactive)
+{
+ dhd_dbg_ring_t *ring = NULL;
+ int ret = 0;
+ unsigned long flags = 0;
+
+ ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
+ if (!ring)
+ goto fail;
+
+ ret = dhd_dbg_ring_init(dhd, ring, ring_id,
+ (uint8 *)ring_name, ring_sz,
+ allocd_buf, pull_inactive);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: unable to init ring %s!\n",
+ __FUNCTION__, ring_name));
+ goto fail;
+ }
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ ring->threshold = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return ring;
+
+fail:
+ if (ring) {
+ dhd_dbg_ring_deinit(dhd, ring);
+ ring->ring_buf = NULL;
+ ring->ring_size = 0;
+ MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
+ }
+ return NULL;
+}
+
+void
+dhd_dbg_ring_dealloc_deinit(void **ring_ptr, dhd_pub_t *dhd)
+{
+ dhd_dbg_ring_t *ring = NULL;
+ dhd_dbg_ring_t **dbgring = (dhd_dbg_ring_t **)ring_ptr;
+
+ if (!dbgring)
+ return;
+
+ ring = *dbgring;
+
+ if (ring) {
+ dhd_dbg_ring_deinit(dhd, ring);
+ ring->ring_buf = NULL;
+ ring->ring_size = 0;
+ MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
+ *dbgring = NULL;
+ }
+}
+
+int
+dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
+ uint32 ring_sz, void *allocd_buf, bool pull_inactive)
+{
+ void *buf;
+ unsigned long flags = 0;
+
+ if (allocd_buf == NULL) {
+ return BCME_NOMEM;
+ } else {
+ buf = allocd_buf;
+ }
+
+ ring->lock = DHD_DBG_RING_LOCK_INIT(dhdp->osh);
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->id = id;
+ strlcpy((char *)ring->name, (char *)name, sizeof(ring->name));
+ ring->ring_size = ring_sz;
+ ring->wp = ring->rp = 0;
+ ring->ring_buf = buf;
+ ring->threshold = DBGRING_FLUSH_THRESHOLD(ring);
+ ring->state = RING_SUSPEND;
+ ring->rem_len = 0;
+ ring->sched_pull = TRUE;
+ ring->pull_inactive = pull_inactive;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return BCME_OK;
+}
+
+void
+dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring)
+{
+ unsigned long flags = 0;
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->id = 0;
+ ring->name[0] = 0;
+ ring->wp = ring->rp = 0;
+ memset(&ring->stat, 0, sizeof(ring->stat));
+ ring->threshold = 0;
+ ring->state = RING_STOP;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ DHD_DBG_RING_LOCK_DEINIT(dhdp->osh, ring->lock);
+}
+
+void
+dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len,
+ os_pullreq_t pull_fn, void *os_pvt, const int id)
+{
+ unsigned long flags = 0;
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ /* if the current pending size is bigger than threshold and
+ * threshold is set
+ */
+ if (ring->threshold > 0 &&
+ (pending_len >= ring->threshold) && ring->sched_pull) {
+ /*
+ * Update the state and release the lock before calling
+ * the pull_fn. Do not transfer control to other layers
+ * with locks held. If the call back again calls into
+ * the same layer fro this context, can lead to deadlock.
+ */
+ ring->sched_pull = FALSE;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ pull_fn(os_pvt, id);
+ } else {
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ }
+}
+
+uint32
+dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring)
+{
+ uint32 pending_len = 0;
+ unsigned long flags = 0;
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ if (ring->stat.written_bytes > ring->stat.read_bytes) {
+ pending_len = ring->stat.written_bytes - ring->stat.read_bytes;
+ } else if (ring->stat.written_bytes < ring->stat.read_bytes) {
+ pending_len = PENDING_LEN_MAX - ring->stat.read_bytes + ring->stat.written_bytes;
+ } else {
+ pending_len = 0;
+ }
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return pending_len;
+}
+
+int
+dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data)
+{
+ unsigned long flags;
+ uint32 w_len;
+ uint32 avail_size;
+ dhd_dbg_ring_entry_t *w_entry, *r_entry;
+
+ if (!ring || !hdr || !data) {
+ return BCME_BADARG;
+ }
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+
+ if (ring->state != RING_ACTIVE) {
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ return BCME_OK;
+ }
+
+ w_len = ENTRY_LENGTH(hdr);
+
+ DHD_DBGIF(("%s: RING%d[%s] hdr->len=%u, w_len=%u, wp=%d, rp=%d, ring_start=0x%p;"
+ " ring_size=%u\n",
+ __FUNCTION__, ring->id, ring->name, hdr->len, w_len, ring->wp, ring->rp,
+ ring->ring_buf, ring->ring_size));
+
+ if (w_len > ring->ring_size) {
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ DHD_ERROR(("%s: RING%d[%s] w_len=%u, ring_size=%u,"
+ " write size exceeds ring size !\n",
+ __FUNCTION__, ring->id, ring->name, w_len, ring->ring_size));
+ return BCME_ERROR;
+ }
+ /* Claim the space */
+ do {
+ avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size);
+ if (avail_size <= w_len) {
+ /* Prepare the space */
+ if (ring->rp <= ring->wp) {
+ ring->tail_padded = TRUE;
+ ring->rem_len = ring->ring_size - ring->wp;
+ DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space,"
+ " rp=%d, wp=%d, rem_len=%d, ring_size=%d,"
+ " avail_size=%d, w_len=%d\n", __FUNCTION__,
+ ring->id, ring->name, ring->rp, ring->wp,
+ ring->rem_len, ring->ring_size, avail_size,
+ w_len));
+
+ /* 0 pad insufficient tail space */
+ memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len);
+ /* If read pointer is still at the beginning, make some room */
+ if (ring->rp == 0) {
+ r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
+ ring->rp);
+ ring->rp += ENTRY_LENGTH(r_entry);
+ ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+ DHD_DBGIF(("%s: rp at 0, move by one entry length"
+ " (%u bytes)\n",
+ __FUNCTION__, (uint32)ENTRY_LENGTH(r_entry)));
+ }
+ if (ring->rp == ring->wp) {
+ ring->rp = 0;
+ }
+ ring->wp = 0;
+ DHD_DBGIF(("%s: new rp=%u, wp=%u\n",
+ __FUNCTION__, ring->rp, ring->wp));
+ } else {
+ /* Not enough space for new entry, free some up */
+ r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
+ ring->rp);
+ /* check bounds before incrementing read ptr */
+ if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) {
+ DHD_ERROR(("%s: RING%d[%s] rp points out of boundary,"
+ "ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->wp,
+ ring->rp, ring->ring_size));
+ ASSERT(0);
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ return BCME_BUFTOOSHORT;
+ }
+ ring->rp += ENTRY_LENGTH(r_entry);
+ /* skip padding if there is one */
+ if (ring->tail_padded &&
+ ((ring->rp + ring->rem_len) == ring->ring_size)) {
+ DHD_DBGIF(("%s: RING%d[%s] Found padding,"
+ " avail_size=%d, w_len=%d, set rp = 0\n",
+ __FUNCTION__,
+ ring->id, ring->name, avail_size, w_len));
+ ring->rp = 0;
+ ring->tail_padded = FALSE;
+ ring->rem_len = 0;
+ }
+ ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+ DHD_DBGIF(("%s: RING%d[%s] read_bytes=%d, wp=%d, rp=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes,
+ ring->wp, ring->rp));
+ }
+ } else {
+ break;
+ }
+ } while (TRUE);
+
+ /* check before writing to the ring */
+ if (ring->wp + w_len >= ring->ring_size) {
+ DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, "
+ "wp=%d, ring_size=%d, w_len=%u\n", __FUNCTION__, ring->id,
+ ring->name, ring->wp, ring->ring_size, w_len));
+ ASSERT(0);
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ return BCME_BUFTOOLONG;
+ }
+
+ w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp);
+ /* header */
+ memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE);
+ w_entry->len = hdr->len;
+ /* payload */
+ memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len);
+ /* update write pointer */
+ ring->wp += w_len;
+
+ /* update statistics */
+ ring->stat.written_records++;
+ ring->stat.written_bytes += w_len;
+ DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d,"
+ " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name,
+ ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes,
+ ring->threshold, ring->wp, ring->rp));
+
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ return BCME_OK;
+}
+
+/*
+ * This function folds ring->lock, so callers of this function
+ * should not hold ring->lock.
+ */
+int
+dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_header)
+{
+ dhd_dbg_ring_entry_t *r_entry = NULL;
+ uint32 rlen = 0;
+ char *buf = NULL;
+
+ if (!ring || !data || buf_len <= 0) {
+ return 0;
+ }
+
+ /* pull from ring is allowed for inactive (suspended) ring
+ * in case of ecounters only, this is because, for ecounters
+ * when a trap occurs the ring is suspended and data is then
+ * pulled to dump it to a file. For other rings if ring is
+ * not in active state return without processing (as before)
+ */
+ if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
+ goto exit;
+ }
+
+ if (ring->rp == ring->wp) {
+ goto exit;
+ }
+
+ DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n",
+ __FUNCTION__, ring->id, ring->name, buf_len, ring->wp, ring->rp,
+ ring->ring_buf, ring->ring_size));
+
+ r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp);
+
+ /* Boundary Check */
+ rlen = ENTRY_LENGTH(r_entry);
+ if ((ring->rp + rlen) > ring->ring_size) {
+ DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
+ " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
+ ring->ring_size, ring->id, ring->name, ring->rp));
+ rlen = 0;
+ goto exit;
+ }
+
+ if (strip_header) {
+ rlen = r_entry->len;
+ buf = (char *)r_entry + DBG_RING_ENTRY_SIZE;
+ } else {
+ rlen = ENTRY_LENGTH(r_entry);
+ buf = (char *)r_entry;
+ }
+ if (rlen > buf_len) {
+ DHD_ERROR(("%s: buf len %d is too small for entry len %d\n",
+ __FUNCTION__, buf_len, rlen));
+ DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->ring_size,
+ ring->wp, ring->rp));
+ ASSERT(0);
+ rlen = 0;
+ goto exit;
+ }
+
+ memcpy(data, buf, rlen);
+ /* update ring context */
+ ring->rp += ENTRY_LENGTH(r_entry);
+ /* don't pass wp but skip padding if there is one */
+ if (ring->rp != ring->wp &&
+ ring->tail_padded && ((ring->rp + ring->rem_len) >= ring->ring_size)) {
+ DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
+ ring->rp = 0;
+ ring->tail_padded = FALSE;
+ ring->rem_len = 0;
+ }
+ if (ring->rp >= ring->ring_size) {
+ DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary,"
+ " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
+ ring->name, ring->rp, ring->ring_size));
+ ASSERT(0);
+ rlen = 0;
+ goto exit;
+ }
+ ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+ DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
+ ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
+
+exit:
+
+ return rlen;
+}
+
+int
+dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr)
+{
+ int32 r_len, total_r_len = 0;
+
+ if (!ring || !data)
+ return 0;
+
+ if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
+ return 0;
+ }
+
+ while (buf_len > 0) {
+ r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr);
+ if (r_len == 0)
+ break;
+ data = (uint8 *)data + r_len;
+ buf_len -= r_len;
+ total_r_len += r_len;
+ }
+
+ return total_r_len;
+}
+
+int
+dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold)
+{
+ unsigned long flags = 0;
+
+ if (!ring)
+ return BCME_BADADDR;
+
+ if (ring->state == RING_STOP)
+ return BCME_UNSUPPORTED;
+
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+
+ if (log_level == 0)
+ ring->state = RING_SUSPEND;
+ else
+ ring->state = RING_ACTIVE;
+
+ ring->log_level = log_level;
+ ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring));
+
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return BCME_OK;
+}
+
+void
+dhd_dbg_ring_start(dhd_dbg_ring_t *ring)
+{
+ if (!ring)
+ return;
+
+ /* Initialize the information for the ring */
+ ring->state = RING_SUSPEND;
+ ring->log_level = 0;
+ ring->rp = ring->wp = 0;
+ ring->threshold = 0;
+ memset(&ring->stat, 0, sizeof(struct ring_statistics));
+ memset(ring->ring_buf, 0, ring->ring_size);
+}
diff --git a/bcmdhd.101.10.361.x/dhd_dbg_ring.h b/bcmdhd.101.10.361.x/dhd_dbg_ring.h
new file mode 100755
index 0000000..bab8646
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_dbg_ring.h
@@ -0,0 +1,146 @@
+/*
+ * DHD debug ring header file - interface
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_DBG_RING_H__
+#define __DHD_DBG_RING_H__
+
+#include <bcmutils.h>
+
+#if defined(LINUX)
+#define PACKED_STRUCT __attribute__ ((packed))
+#else
+#define PACKED_STRUCT
+#endif
+
+#define DBGRING_NAME_MAX 32
+
+enum dbg_ring_state {
+ RING_STOP = 0, /* ring is not initialized */
+ RING_ACTIVE, /* ring is live and logging */
+ RING_SUSPEND /* ring is initialized but not logging */
+};
+
+/* each entry in dbg ring has below header, to handle
+ * variable length records in ring
+ */
+typedef struct dhd_dbg_ring_entry {
+ uint16 len; /* payload length excluding the header */
+ uint8 flags;
+ uint8 type; /* Per ring specific */
+ uint64 timestamp; /* present if has_timestamp bit is set. */
+} PACKED_STRUCT dhd_dbg_ring_entry_t;
+
+struct ring_statistics {
+ /* number of bytes that was written to the buffer by driver */
+ uint32 written_bytes;
+ /* number of bytes that was read from the buffer by user land */
+ uint32 read_bytes;
+ /* number of records that was written to the buffer by driver */
+ uint32 written_records;
+};
+
+typedef struct dhd_dbg_ring_status {
+ uint8 name[DBGRING_NAME_MAX];
+ uint32 flags;
+ int ring_id; /* unique integer representing the ring */
+ /* total memory size allocated for the buffer */
+ uint32 ring_buffer_byte_size;
+ uint32 verbose_level;
+ /* number of bytes that was written to the buffer by driver */
+ uint32 written_bytes;
+ /* number of bytes that was read from the buffer by user land */
+ uint32 read_bytes;
+ /* number of records that was read from the buffer by user land */
+ uint32 written_records;
+} dhd_dbg_ring_status_t;
+
+typedef struct dhd_dbg_ring {
+ int id; /* ring id */
+ uint8 name[DBGRING_NAME_MAX]; /* name string */
+ uint32 ring_size; /* numbers of item in ring */
+ uint32 wp; /* write pointer */
+ uint32 rp; /* read pointer */
+ uint32 rp_tmp; /* tmp read pointer */
+ uint32 log_level; /* log_level */
+ uint32 threshold; /* threshold bytes */
+ void * ring_buf; /* pointer of actually ring buffer */
+ void * lock; /* lock for ring access */
+ struct ring_statistics stat; /* statistics */
+ enum dbg_ring_state state; /* ring state enum */
+ bool tail_padded; /* writer does not have enough space */
+ uint32 rem_len; /* number of bytes from wp_pad to end */
+ bool sched_pull; /* schedule reader immediately */
+ bool pull_inactive; /* pull contents from ring even if it is inactive */
+} dhd_dbg_ring_t;
+
+#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3)
+#define RING_STAT_TO_STATUS(ring, status) \
+ do { \
+ /* status.name/ring->name are the same length so no need to check return value */ \
+ (void)memcpy_s(status.name, sizeof(status.name), ring->name, sizeof(ring->name)); \
+ status.ring_id = ring->id; \
+ status.ring_buffer_byte_size = ring->ring_size; \
+ status.written_bytes = ring->stat.written_bytes; \
+ status.written_records = ring->stat.written_records; \
+ status.read_bytes = ring->stat.read_bytes; \
+ status.verbose_level = ring->log_level; \
+ } while (0)
+
+#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t))
+#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE)
+#define PAYLOAD_MAX_LEN 65535
+#define PAYLOAD_ECNTR_MAX_LEN 1648u
+#define PAYLOAD_RTT_MAX_LEN 1648u
+#define PAYLOAD_BCM_TRACE_MAX_LEN 1648u
+#define PENDING_LEN_MAX 0xFFFFFFFF
+#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
+
+#define TXACTIVESZ(r, w, d) (((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w)))
+#define DBG_RING_READ_AVAIL_SPACE(w, r, d) (((w) >= (r)) ? ((w) - (r)) : ((d) - (r)))
+#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d) (((w) >= (r)) ? ((d) - (w)) : ((r) - (w)))
+#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d) (d - (TXACTIVESZ(r, w, d)))
+#define DBG_RING_CHECK_WRITE_SPACE(r, w, d) \
+ MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d))
+
+typedef void (*os_pullreq_t)(void *os_priv, const int ring_id);
+
+dhd_dbg_ring_t *dhd_dbg_ring_alloc_init(dhd_pub_t *dhd, uint16 ring_id,
+ char *ring_name, uint32 ring_sz, void *allocd_buf,
+ bool pull_inactive);
+void dhd_dbg_ring_dealloc_deinit(void **dbgring, dhd_pub_t *dhd);
+int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
+ uint32 ring_sz, void *allocd_buf, bool pull_inactive);
+void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring);
+int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data);
+int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
+ bool strip_hdr);
+int dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
+ bool strip_header);
+uint32 dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring);
+void dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len,
+ os_pullreq_t pull_fn, void *os_pvt, const int id);
+int dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold);
+void dhd_dbg_ring_start(dhd_dbg_ring_t *ring);
+#endif /* __DHD_DBG_RING_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_debug.c b/bcmdhd.101.10.361.x/dhd_debug.c
new file mode 100755
index 0000000..f8f6c92
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_debug.c
@@ -0,0 +1,2853 @@
+/*
+ * DHD debugability support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_dbg_ring.h>
+#include <dhd_debug.h>
+#include <dhd_mschdbg.h>
+#include <dhd_bus.h>
+
+#include <event_log.h>
+#include <event_trace.h>
+#include <msgtrace.h>
+
+#if defined(DHD_EVENT_LOG_FILTER)
+#include <dhd_event_log_filter.h>
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#if defined(DHD_EFI) || defined(NDIS)
+#if !defined(offsetof)
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif /* !defined(offsetof) */
+
+#define container_of(ptr, type, member) \
+ (type *)((char *)(ptr) - offsetof(type, member))
+#endif /* defined(DHD_EFI ) || defined(NDIS) */
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+uint8 control_logtrace = LOGTRACE_RAW_FMT;
+#else
+uint8 control_logtrace = CUSTOM_CONTROL_LOGTRACE;
+#endif
+
+struct map_table {
+ uint16 fw_id;
+ uint16 host_id;
+ char *desc;
+};
+
+struct map_table event_map[] = {
+ {WLC_E_AUTH, WIFI_EVENT_AUTH_COMPLETE, "AUTH_COMPLETE"},
+ {WLC_E_ASSOC, WIFI_EVENT_ASSOC_COMPLETE, "ASSOC_COMPLETE"},
+ {TRACE_FW_AUTH_STARTED, WIFI_EVENT_FW_AUTH_STARTED, "AUTH STARTED"},
+ {TRACE_FW_ASSOC_STARTED, WIFI_EVENT_FW_ASSOC_STARTED, "ASSOC STARTED"},
+ {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_FW_RE_ASSOC_STARTED, "REASSOC STARTED"},
+ {TRACE_G_SCAN_STARTED, WIFI_EVENT_G_SCAN_STARTED, "GSCAN STARTED"},
+ {WLC_E_PFN_SCAN_COMPLETE, WIFI_EVENT_G_SCAN_COMPLETE, "GSCAN COMPLETE"},
+ {WLC_E_DISASSOC, WIFI_EVENT_DISASSOCIATION_REQUESTED, "DIASSOC REQUESTED"},
+ {WLC_E_REASSOC, WIFI_EVENT_RE_ASSOCIATION_REQUESTED, "REASSOC REQUESTED"},
+ {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_REQUESTED, "ROAM REQUESTED"},
+ {WLC_E_BEACON_FRAME_RX, WIFI_EVENT_BEACON_RECEIVED, "BEACON Received"},
+ {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_SCAN_STARTED, "ROAM SCAN STARTED"},
+ {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"},
+ {TRACE_ROAM_AUTH_STARTED, WIFI_EVENT_ROAM_AUTH_STARTED, "ROAM AUTH STARTED"},
+ {WLC_E_AUTH, WIFI_EVENT_ROAM_AUTH_COMPLETE, "ROAM AUTH COMPLETED"},
+ {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_ROAM_ASSOC_STARTED, "ROAM ASSOC STARTED"},
+ {WLC_E_ASSOC, WIFI_EVENT_ROAM_ASSOC_COMPLETE, "ROAM ASSOC COMPLETED"},
+ {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"},
+ {TRACE_BT_COEX_BT_SCO_START, WIFI_EVENT_BT_COEX_BT_SCO_START, "BT SCO START"},
+ {TRACE_BT_COEX_BT_SCO_STOP, WIFI_EVENT_BT_COEX_BT_SCO_STOP, "BT SCO STOP"},
+ {TRACE_BT_COEX_BT_SCAN_START, WIFI_EVENT_BT_COEX_BT_SCAN_START, "BT COEX SCAN START"},
+ {TRACE_BT_COEX_BT_SCAN_STOP, WIFI_EVENT_BT_COEX_BT_SCAN_STOP, "BT COEX SCAN STOP"},
+ {TRACE_BT_COEX_BT_HID_START, WIFI_EVENT_BT_COEX_BT_HID_START, "BT HID START"},
+ {TRACE_BT_COEX_BT_HID_STOP, WIFI_EVENT_BT_COEX_BT_HID_STOP, "BT HID STOP"},
+ {WLC_E_EAPOL_MSG, WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, "FW EAPOL PKT RECEIVED"},
+ {TRACE_FW_EAPOL_FRAME_TRANSMIT_START, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START,
+ "FW EAPOL PKT TRANSMITED"},
+ {TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP,
+ "FW EAPOL PKT TX STOPPED"},
+ {TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE, WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE,
+ "BLOCK ACK NEGO COMPLETED"},
+};
+
+struct map_table event_tag_map[] = {
+ {TRACE_TAG_VENDOR_SPECIFIC, WIFI_TAG_VENDOR_SPECIFIC, "VENDOR SPECIFIC DATA"},
+ {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
+ {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
+ {TRACE_TAG_SSID, WIFI_TAG_SSID, "SSID"},
+ {TRACE_TAG_STATUS, WIFI_TAG_STATUS, "STATUS"},
+ {TRACE_TAG_CHANNEL_SPEC, WIFI_TAG_CHANNEL_SPEC, "CHANSPEC"},
+ {TRACE_TAG_WAKE_LOCK_EVENT, WIFI_TAG_WAKE_LOCK_EVENT, "WAKELOCK EVENT"},
+ {TRACE_TAG_ADDR1, WIFI_TAG_ADDR1, "ADDR_1"},
+ {TRACE_TAG_ADDR2, WIFI_TAG_ADDR2, "ADDR_2"},
+ {TRACE_TAG_ADDR3, WIFI_TAG_ADDR3, "ADDR_3"},
+ {TRACE_TAG_ADDR4, WIFI_TAG_ADDR4, "ADDR_4"},
+ {TRACE_TAG_TSF, WIFI_TAG_TSF, "TSF"},
+ {TRACE_TAG_IE, WIFI_TAG_IE, "802.11 IE"},
+ {TRACE_TAG_INTERFACE, WIFI_TAG_INTERFACE, "INTERFACE"},
+ {TRACE_TAG_REASON_CODE, WIFI_TAG_REASON_CODE, "REASON CODE"},
+ {TRACE_TAG_RATE_MBPS, WIFI_TAG_RATE_MBPS, "RATE"},
+};
+
+/* define log level per ring type */
+struct log_level_table fw_verbose_level_map[] = {
+ {1, EVENT_LOG_TAG_PCI_ERROR, "PCI_ERROR"},
+#ifndef DISABLE_PCI_LOGGING
+ {1, EVENT_LOG_TAG_PCI_WARN, "PCI_WARN"},
+ {2, EVENT_LOG_TAG_PCI_INFO, "PCI_INFO"},
+ {3, EVENT_LOG_TAG_PCI_DBG, "PCI_DEBUG"},
+#endif
+#ifndef DISABLE_BEACON_LOGGING
+ {3, EVENT_LOG_TAG_BEACON_LOG, "BEACON_LOG"},
+#endif
+ {2, EVENT_LOG_TAG_WL_ASSOC_LOG, "ASSOC_LOG"},
+ {2, EVENT_LOG_TAG_WL_ROAM_LOG, "ROAM_LOG"},
+ {1, EVENT_LOG_TAG_TRACE_WL_INFO, "WL INFO"},
+ {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, "BTCOEX INFO"},
+#ifdef DHD_RANDMAC_LOGGING
+ {1, EVENT_LOG_TAG_RANDMAC_ERR, "RANDMAC_ERR"},
+#endif /* DHD_RANDMAC_LOGGING */
+#ifdef CUSTOMER_HW4_DEBUG
+ {3, EVENT_LOG_TAG_SCAN_WARN, "SCAN_WARN"},
+#else
+ {1, EVENT_LOG_TAG_SCAN_WARN, "SCAN_WARN"},
+#endif /* CUSTOMER_HW4_DEBUG */
+ {1, EVENT_LOG_TAG_SCAN_ERROR, "SCAN_ERROR"},
+ {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, "SCAN_TRACE_LOW"},
+ {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"},
+#ifdef DHD_WL_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_WL_ERROR, "WL_ERROR"},
+#endif
+#ifdef DHD_IE_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_IE_ERROR, "IE_ERROR"},
+#endif
+#ifdef DHD_ASSOC_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_ASSOC_ERROR, "ASSOC_ERROR"},
+#endif
+#ifdef DHD_PMU_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_PMU_ERROR, "PMU_ERROR"},
+#endif
+#ifdef DHD_8021X_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_4WAYHANDSHAKE, "8021X_ERROR"},
+#endif
+#ifdef DHD_AMPDU_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_AMSDU_ERROR, "AMPDU_ERROR"},
+#endif
+#ifdef DHD_SAE_ERROR_LOGGING
+ {3, EVENT_LOG_TAG_SAE_ERROR, "SAE_ERROR"},
+#endif
+};
+
+/* reference tab table */
+uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0};
+
+typedef struct dhddbg_loglist_item {
+ dll_t list;
+ prcd_event_log_hdr_t prcd_log_hdr;
+} loglist_item_t;
+
+typedef struct dhbdbg_pending_item {
+ dll_t list;
+ dhd_dbg_ring_status_t ring_status;
+ dhd_dbg_ring_entry_t *ring_entry;
+} pending_item_t;
+
+/* trace log entry header user space processing */
+struct tracelog_header {
+ int magic_num;
+ int buf_size;
+ int seq_num;
+};
+#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06
+
+int
+dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data)
+{
+ dhd_dbg_ring_t *ring;
+ int ret = 0;
+ uint32 pending_len = 0;
+
+ if (!dhdp || !dhdp->dbg) {
+ return BCME_BADADDR;
+ }
+
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return BCME_RANGE;
+ }
+
+ ring = &dhdp->dbg->dbg_rings[ring_id];
+
+ ret = dhd_dbg_ring_push(ring, hdr, data);
+ if (ret != BCME_OK)
+ return ret;
+
+ pending_len = dhd_dbg_ring_get_pending_len(ring);
+ dhd_dbg_ring_sched_pull(ring, pending_len, dhdp->dbg->pullreq,
+ dhdp->dbg->private, ring->id);
+
+ return ret;
+}
+
+dhd_dbg_ring_t *
+dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id)
+{
+ if (!dhdp || !dhdp->dbg) {
+ return NULL;
+ }
+
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return NULL;
+ }
+
+ return &dhdp->dbg->dbg_rings[ring_id];
+}
+
+int
+dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
+ bool strip_header)
+{
+ dhd_dbg_ring_t *ring;
+
+ if (!dhdp || !dhdp->dbg) {
+ return 0;
+ }
+
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return BCME_RANGE;
+ }
+
+ ring = &dhdp->dbg->dbg_rings[ring_id];
+
+ return dhd_dbg_ring_pull_single(ring, data, buf_len, strip_header);
+}
+
+int
+dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len)
+{
+ dhd_dbg_ring_t *ring;
+
+ if (!dhdp || !dhdp->dbg)
+ return 0;
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return BCME_RANGE;
+ }
+ ring = &dhdp->dbg->dbg_rings[ring_id];
+ return dhd_dbg_ring_pull(ring, data, buf_len, FALSE);
+}
+
+static int
+dhd_dbg_msgtrace_seqchk(uint32 *prev, uint32 cur)
+{
+ /* normal case including wrap around */
+ if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) {
+ goto done;
+ } else if (cur == *prev) {
+ DHD_EVENT(("%s duplicate trace\n", __FUNCTION__));
+ return -1;
+ } else if (cur > *prev) {
+ DHD_EVENT(("%s lost %d packets\n", __FUNCTION__, cur - *prev));
+ } else {
+ DHD_EVENT(("%s seq out of order, dhd %d, dongle %d\n",
+ __FUNCTION__, *prev, cur));
+ }
+done:
+ *prev = cur;
+ return 0;
+}
+
+static void
+dhd_dbg_msgtrace_msg_parser(void *event_data)
+{
+ msgtrace_hdr_t *hdr;
+ char *data, *s;
+ static uint32 seqnum_prev = 0;
+
+ if (!event_data) {
+ DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ hdr = (msgtrace_hdr_t *)event_data;
+ data = (char *)event_data + MSGTRACE_HDRLEN;
+
+ /* There are 2 bytes available at the end of data */
+ data[ntoh16(hdr->len)] = '\0';
+
+ if (ntoh32(hdr->discarded_bytes) || ntoh32(hdr->discarded_printf)) {
+ DHD_DBGIF(("WLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr->discarded_bytes),
+ ntoh32(hdr->discarded_printf)));
+ }
+
+ if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum)))
+ return;
+
+ /* Display the trace buffer. Advance from
+ * \n to \n to avoid display big
+ * printf (issue with Linux printk )
+ */
+ while (*data != '\0' && (s = strstr(data, "\n")) != NULL) {
+ *s = '\0';
+ DHD_FWLOG(("[FWLOG] %s\n", data));
+ data = s+1;
+ }
+ if (*data)
+ DHD_FWLOG(("[FWLOG] %s", data));
+}
+#ifdef SHOW_LOGTRACE
+#define DATA_UNIT_FOR_LOG_CNT 4
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+int
+replace_percent_p_to_x(char *fmt)
+{
+ int p_to_x_done = FALSE;
+
+ while (*fmt != '\0')
+ {
+ /* Skip characters will we see a % */
+ if (*fmt++ != '%')
+ {
+ continue;
+ }
+
+ /*
+ * Skip any flags, field width and precision:
+ *Flags: Followed by %
+ * #, 0, -, ' ', +
+ */
+ if (*fmt == '#')
+ fmt++;
+
+ if (*fmt == '0' || *fmt == '-' || *fmt == '+')
+ fmt++;
+
+ /*
+ * Field width:
+ * An optional decimal digit string (with non-zero first digit)
+ * specifying a minimum field width
+ */
+ while (*fmt && bcm_isdigit(*fmt))
+ fmt++;
+
+ /*
+ * Precision:
+ * An optional precision, in the form of a period ('.') followed by an
+ * optional decimal digit string.
+ */
+ if (*fmt == '.')
+ {
+ fmt++;
+ while (*fmt && bcm_isdigit(*fmt)) fmt++;
+ }
+
+ /* If %p is seen, change it to %x */
+ if (*fmt == 'p')
+ {
+ *fmt = 'x';
+ p_to_x_done = TRUE;
+ }
+ if (*fmt)
+ fmt++;
+ }
+
+ return p_to_x_done;
+}
+
+/* To identify format of types %Ns where N >= 0 is a number */
+bool
+check_valid_string_format(char *curr_ptr)
+{
+ char *next_ptr;
+ if ((next_ptr = bcmstrstr(curr_ptr, "s")) != NULL) {
+ /* Default %s format */
+ if (curr_ptr == next_ptr) {
+ return TRUE;
+ }
+
+ /* Verify each charater between '%' and 's' is a valid number */
+ while (curr_ptr < next_ptr) {
+ if (bcm_isdigit(*curr_ptr) == FALSE) {
+ return FALSE;
+ }
+ curr_ptr++;
+ }
+
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+/* To identify format of non string format types */
+bool
+check_valid_non_string_format(char *curr_ptr)
+{
+ char *next_ptr;
+ char *next_fmt_stptr;
+ char valid_fmt_types[17] = {'d', 'i', 'x', 'X', 'c', 'p', 'u',
+ 'f', 'F', 'e', 'E', 'g', 'G', 'o',
+ 'a', 'A', 'n'};
+ int i;
+ bool valid = FALSE;
+
+ /* Check for next % in the fmt str */
+ next_fmt_stptr = bcmstrstr(curr_ptr, "%");
+
+ for (next_ptr = curr_ptr; *next_ptr != '\0'; next_ptr++) {
+ for (i = 0; i < (int)((sizeof(valid_fmt_types))/sizeof(valid_fmt_types[0])); i++) {
+ if (*next_ptr == valid_fmt_types[i]) {
+ /* Check whether format type found corresponds to current %
+ * and not the next one, if exists.
+ */
+ if ((next_fmt_stptr == NULL) ||
+ (next_fmt_stptr && (next_ptr < next_fmt_stptr))) {
+ /* Not validating for length/width fields in
+ * format specifier.
+ */
+ valid = TRUE;
+ }
+ goto done;
+ }
+ }
+ }
+
+done:
+ return valid;
+}
+
+#define MAX_NO_OF_ARG 16
+#define FMTSTR_SIZE 200
+#define ROMSTR_SIZE 268
+#define SIZE_LOC_STR 50
+#define LOG_PRINT_CNT_MAX 16u
+#define EL_MSEC_PER_SEC 1000
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+#define MAX_LOG_PRINT_COUNT 100u
+#define LOG_PRINT_THRESH (1u * USEC_PER_SEC)
+#endif
+#define EL_PARSE_VER "V02"
+static uint64 verboselog_ts_saved = 0;
+
+bool
+dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr)
+{
+ event_log_extended_hdr_t *ext_log_hdr;
+ uint16 event_log_fmt_num;
+ uint8 event_log_hdr_type;
+
+ /* Identify the type of event tag, payload type etc.. */
+ event_log_hdr_type = log_hdr->fmt_num & DHD_EVENT_LOG_HDR_MASK;
+ event_log_fmt_num = (log_hdr->fmt_num >> DHD_EVENT_LOG_FMT_NUM_OFFSET) &
+ DHD_EVENT_LOG_FMT_NUM_MASK;
+
+ switch (event_log_hdr_type) {
+ case DHD_OW_NB_EVENT_LOG_HDR:
+ prcd_log_hdr->ext_event_log_hdr = FALSE;
+ prcd_log_hdr->binary_payload = FALSE;
+ break;
+ case DHD_TW_NB_EVENT_LOG_HDR:
+ prcd_log_hdr->ext_event_log_hdr = TRUE;
+ prcd_log_hdr->binary_payload = FALSE;
+ break;
+ case DHD_BI_EVENT_LOG_HDR:
+ if (event_log_fmt_num == DHD_OW_BI_EVENT_FMT_NUM) {
+ prcd_log_hdr->ext_event_log_hdr = FALSE;
+ prcd_log_hdr->binary_payload = TRUE;
+ } else if (event_log_fmt_num == DHD_TW_BI_EVENT_FMT_NUM) {
+ prcd_log_hdr->ext_event_log_hdr = TRUE;
+ prcd_log_hdr->binary_payload = TRUE;
+ } else {
+ DHD_ERROR(("%s: invalid format number 0x%X\n",
+ __FUNCTION__, event_log_fmt_num));
+ return FALSE;
+ }
+ break;
+ case DHD_INVALID_EVENT_LOG_HDR:
+ default:
+ DHD_ERROR(("%s: invalid event log header type 0x%X\n",
+ __FUNCTION__, event_log_hdr_type));
+ return FALSE;
+ }
+
+ /* Parse extended and legacy event log headers and populate prcd_event_log_hdr_t */
+ if (prcd_log_hdr->ext_event_log_hdr) {
+ ext_log_hdr = (event_log_extended_hdr_t *)
+ ((uint8 *)log_hdr - sizeof(event_log_hdr_t));
+ prcd_log_hdr->tag = ((ext_log_hdr->extended_tag &
+ DHD_TW_VALID_TAG_BITS_MASK) << DHD_TW_EVENT_LOG_TAG_OFFSET) | log_hdr->tag;
+ } else {
+ prcd_log_hdr->tag = log_hdr->tag;
+ }
+ prcd_log_hdr->count = log_hdr->count;
+ prcd_log_hdr->fmt_num_raw = log_hdr->fmt_num;
+ prcd_log_hdr->fmt_num = event_log_fmt_num;
+
+ /* update arm cycle */
+ /*
+ * For loegacy event tag :-
+ * |payload........|Timestamp| Tag
+ *
+ * For extended event tag:-
+ * |payload........|Timestamp|extended Tag| Tag.
+ *
+ */
+ prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ?
+ *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET) :
+ *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_OFFSET);
+
+ /* update event log data pointer address */
+ prcd_log_hdr->log_ptr =
+ (uint32 *)log_hdr - log_hdr->count - prcd_log_hdr->ext_event_log_hdr;
+
+ /* handle error cases above this */
+ return TRUE;
+}
+
+static void
+dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
+ void *raw_event_ptr, uint32 logset, uint16 block, uint32* data)
+{
+ event_log_hdr_t *ts_hdr;
+ uint32 *log_ptr = plog_hdr->log_ptr;
+ char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
+ uint32 rom_str_len = 0;
+ uint32 *ts_data;
+
+ if (!raw_event_ptr) {
+ return;
+ }
+
+ if (log_ptr < data) {
+ DHD_ERROR(("Invalid log pointer, logptr : %p data : %p \n", log_ptr, data));
+ return;
+ }
+
+ if (log_ptr > data) {
+ /* Get time stamp if it's updated */
+ ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t));
+ if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+ ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+ if (ts_data >= data) {
+ verboselog_ts_saved = (uint64)ts_data[0];
+ DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n",
+ ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1]));
+ }
+ } else if (ts_hdr->tag == EVENT_LOG_TAG_ENHANCED_TS) {
+ ets_msg_v1_t *ets;
+ ets = (ets_msg_v1_t *)ts_hdr - ts_hdr->count;
+ if ((uint32*)ets >= data &&
+ ts_hdr->count >= (sizeof(ets_msg_v1_t) / sizeof(uint32)) &&
+ ets->version == ENHANCED_TS_MSG_VERSION_1) {
+ DHD_MSGTRACE_LOG(("EVENT_LOG_ENHANCED_TS_V1: "
+ "SYS:%08x CPU:%08x CPUFREQ:%u\n",
+ ets->timestamp, ets->cyclecount, ets->cpu_freq));
+ }
+ }
+ }
+
+ if (plog_hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
+ rom_str_len = (plog_hdr->count - 1) * sizeof(uint32);
+ if (rom_str_len >= (ROMSTR_SIZE -1))
+ rom_str_len = ROMSTR_SIZE - 1;
+
+ /* copy all ascii data for ROM printf to local string */
+ memcpy(fmtstr_loc_buf, log_ptr, rom_str_len);
+ /* add end of line at last */
+ fmtstr_loc_buf[rom_str_len] = '\0';
+
+ DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s",
+ log_ptr[plog_hdr->count - 1], fmtstr_loc_buf));
+
+ /* Add newline if missing */
+ if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n')
+ DHD_MSGTRACE_LOG(("\n"));
+
+ return;
+ }
+
+ if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE ||
+ plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
+ wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, plog_hdr, log_ptr);
+ return;
+ }
+
+ /* print the message out in a logprint */
+ dhd_dbg_verboselog_printf(dhdp, plog_hdr, raw_event_ptr, log_ptr, logset, block);
+}
+
+void
+dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
+ void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block)
+{
+ dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr;
+ uint16 count;
+ int log_level, id;
+ char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
+ char (*str_buf)[SIZE_LOC_STR] = NULL;
+ char *str_tmpptr = NULL;
+ uint32 addr = 0;
+ typedef union {
+ uint32 val;
+ char * addr;
+ } u_arg;
+ u_arg arg[MAX_NO_OF_ARG] = {{0}};
+ char *c_ptr = NULL;
+ struct bcmstrbuf b;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ static int log_print_count = 0;
+ static uint64 ts0 = 0;
+ uint64 ts1 = 0;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+
+ BCM_REFERENCE(arg);
+
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ if (!ts0)
+ ts0 = OSL_SYSUPTIME_US();
+
+ ts1 = OSL_SYSUPTIME_US();
+
+ if (((ts1 - ts0) <= LOG_PRINT_THRESH) && (log_print_count >= MAX_LOG_PRINT_COUNT)) {
+ log_print_threshold = 1;
+ ts0 = 0;
+ log_print_count = 0;
+ DHD_ERROR(("%s: Log print water mark is reached,"
+ " console logs are dumped only to debug_dump file\n", __FUNCTION__));
+ } else if ((ts1 - ts0) > LOG_PRINT_THRESH) {
+ log_print_threshold = 0;
+ ts0 = 0;
+ log_print_count = 0;
+ }
+
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+ /* print the message out in a logprint */
+ if ((control_logtrace == LOGTRACE_RAW_FMT) || !(raw_event->fmts)) {
+ if (dhdp->dbg) {
+ log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
+ for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) {
+ if ((fw_verbose_level_map[id].tag == plog_hdr->tag) &&
+ (fw_verbose_level_map[id].log_level > log_level))
+ return;
+ }
+ }
+ if (plog_hdr->binary_payload) {
+ DHD_ECNTR_LOG(("%d.%d EL:tag=%d len=%d fmt=0x%x",
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ plog_hdr->tag,
+ plog_hdr->count,
+ plog_hdr->fmt_num_raw));
+
+ for (count = 0; count < (plog_hdr->count - 1); count++) {
+ /* XXX: skip first line feed in case count 0 */
+ if (count && (count % LOG_PRINT_CNT_MAX == 0)) {
+ DHD_ECNTR_LOG(("\n\t%08x", log_ptr[count]));
+ } else {
+ DHD_ECNTR_LOG((" %08x", log_ptr[count]));
+ }
+ }
+ DHD_ECNTR_LOG(("\n"));
+ }
+ else {
+ bcm_binit(&b, fmtstr_loc_buf, FMTSTR_SIZE);
+ /* XXX: The 'hdr->count - 1' is dongle time */
+#ifndef OEM_ANDROID
+ bcm_bprintf(&b, "%06d.%03d EL: %d 0x%x",
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ plog_hdr->tag,
+ plog_hdr->fmt_num_raw);
+#else
+ bcm_bprintf(&b, "%06d.%03d EL:%s:%u:%u %d %d 0x%x",
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ EL_PARSE_VER, logset, block,
+ plog_hdr->tag,
+ plog_hdr->count,
+ plog_hdr->fmt_num_raw);
+#endif /* !OEM_ANDROID */
+ for (count = 0; count < (plog_hdr->count - 1); count++) {
+ bcm_bprintf(&b, " %x", log_ptr[count]);
+ }
+
+ /* ensure preserve fw logs go to debug_dump only in case of customer4 */
+ if (logset < dhdp->event_log_max_sets &&
+ ((0x01u << logset) & dhdp->logset_prsrv_mask)) {
+ DHD_PRSRV_MEM(("%s\n", b.origbuf));
+ } else {
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ DHD_FW_VERBOSE(("%s\n", b.origbuf));
+#else
+ DHD_FWLOG(("%s\n", b.origbuf));
+#endif
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ log_print_count++;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+ }
+ }
+ return;
+ }
+
+ str_buf = MALLOCZ(dhdp->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR));
+ if (!str_buf) {
+ DHD_ERROR(("%s: malloc failed str_buf\n", __FUNCTION__));
+ return;
+ }
+
+ if ((plog_hdr->fmt_num) < raw_event->num_fmts) {
+ if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+ snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s",
+ raw_event->fmts[plog_hdr->fmt_num]);
+ plog_hdr->count++;
+ } else {
+ snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E:%u:%u %06d.%03d %s",
+ logset, block,
+ (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
+ (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
+ raw_event->fmts[plog_hdr->fmt_num]);
+ }
+ c_ptr = fmtstr_loc_buf;
+ } else {
+ /* for ecounters, don't print the error as it will flood */
+ if ((plog_hdr->fmt_num != DHD_OW_BI_EVENT_FMT_NUM) &&
+ (plog_hdr->fmt_num != DHD_TW_BI_EVENT_FMT_NUM)) {
+ DHD_ERROR(("%s: fmt number: 0x%x out of range\n",
+ __FUNCTION__, plog_hdr->fmt_num));
+ } else {
+ DHD_INFO(("%s: fmt number: 0x%x out of range\n",
+ __FUNCTION__, plog_hdr->fmt_num));
+ }
+
+ goto exit;
+ }
+
+ if (plog_hdr->count > MAX_NO_OF_ARG) {
+ DHD_ERROR(("%s: plog_hdr->count(%d) out of range\n",
+ __FUNCTION__, plog_hdr->count));
+ goto exit;
+ }
+
+ /* print the format string which will be needed for debugging incorrect formats */
+ DHD_INFO(("%s: fmtstr_loc_buf = %s\n", __FUNCTION__, fmtstr_loc_buf));
+
+ /* Replace all %p to %x to handle 32 bit %p */
+ replace_percent_p_to_x(fmtstr_loc_buf);
+
+ for (count = 0; count < (plog_hdr->count - 1); count++) {
+ if (c_ptr != NULL)
+ if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL)
+ c_ptr++;
+
+ if (c_ptr != NULL) {
+ if (check_valid_string_format(c_ptr)) {
+ if ((raw_event->raw_sstr) &&
+ ((log_ptr[count] > raw_event->rodata_start) &&
+ (log_ptr[count] < raw_event->rodata_end))) {
+ /* ram static string */
+ addr = log_ptr[count] - raw_event->rodata_start;
+ str_tmpptr = raw_event->raw_sstr + addr;
+ memcpy(str_buf[count], str_tmpptr,
+ SIZE_LOC_STR);
+ str_buf[count][SIZE_LOC_STR-1] = '\0';
+ arg[count].addr = str_buf[count];
+ } else if ((raw_event->rom_raw_sstr) &&
+ ((log_ptr[count] >
+ raw_event->rom_rodata_start) &&
+ (log_ptr[count] <
+ raw_event->rom_rodata_end))) {
+ /* rom static string */
+ addr = log_ptr[count] - raw_event->rom_rodata_start;
+ str_tmpptr = raw_event->rom_raw_sstr + addr;
+ memcpy(str_buf[count], str_tmpptr,
+ SIZE_LOC_STR);
+ str_buf[count][SIZE_LOC_STR-1] = '\0';
+ arg[count].addr = str_buf[count];
+ } else {
+ /*
+ * Dynamic string OR
+ * No data for static string.
+ * So store all string's address as string.
+ */
+ snprintf(str_buf[count], SIZE_LOC_STR,
+ "(s)0x%x", log_ptr[count]);
+ arg[count].addr = str_buf[count];
+ }
+ } else if (check_valid_non_string_format(c_ptr)) {
+ /* Other than string format */
+ arg[count].val = log_ptr[count];
+ } else {
+ /* There is nothing copied after % or improper format specifier
+ * after current %, because of not enough buffer size for complete
+ * copy of original fmt string.
+ * This is causing error mentioned below.
+ * Error: "Please remove unsupported %\x00 in format string"
+ * error(lib/vsprintf.c:1900 format_decode+0x3bc/0x470).
+ * Refer to JIRA: SWWLAN-200629 for detailed info.
+ *
+ * Terminate the string at current .
+ */
+ *(c_ptr - 1) = '\0';
+ break;
+ }
+ }
+ }
+
+ /* ensure preserve fw logs go to debug_dump only in case of customer4 */
+ if (logset < dhdp->event_log_max_sets &&
+ ((0x01u << logset) & dhdp->logset_prsrv_mask)) {
+ if (dhd_msg_level & DHD_EVENT_VAL) {
+ if (dhd_msg_level & DHD_PRSRV_MEM_VAL)
+ printk(fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
+ arg[11], arg[12], arg[13], arg[14], arg[15]);
+ }
+ } else {
+ if (dhd_msg_level & DHD_FWLOG_VAL) {
+ printk(fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
+ arg[11], arg[12], arg[13], arg[14], arg[15]);
+ }
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ log_print_count++;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+ }
+
+exit:
+ MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR));
+}
+
+#if defined(EWP_BCM_TRACE) || defined(EWP_RTT_LOGGING) || \
+ defined(EWP_ECNTRS_LOGGING)
+static int
+dhd_dbg_send_evtlog_to_ring(prcd_event_log_hdr_t *plog_hdr,
+ dhd_dbg_ring_entry_t *msg_hdr, dhd_dbg_ring_t *ring,
+ uint16 max_payload_len, uint8 *logbuf)
+{
+ event_log_hdr_t *log_hdr;
+ struct tracelog_header *logentry_header;
+ uint16 len_chk = 0;
+
+ BCM_REFERENCE(log_hdr);
+ BCM_REFERENCE(logentry_header);
+ /*
+ * check msg hdr len before pushing.
+ * FW msg_hdr.len includes length of event log hdr,
+ * logentry header and payload.
+ */
+ len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) +
+ max_payload_len);
+ /* account extended event log header(extended_event_log_hdr) */
+ if (plog_hdr->ext_event_log_hdr) {
+ len_chk += sizeof(*log_hdr);
+ }
+ if (msg_hdr->len > len_chk) {
+ DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: "
+ "msg_hdr->len=%u, max allowed for %s=%u\n",
+ __FUNCTION__, msg_hdr->len, ring->name, len_chk));
+ return BCME_ERROR;
+ }
+ dhd_dbg_ring_push(ring, msg_hdr, logbuf);
+ return BCME_OK;
+}
+#endif /* EWP_BCM_TRACE || EWP_RTT_LOGGING || EWP_ECNTRS_LOGGING */
+
+void
+dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data,
+ void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present,
+ uint32 msgtrace_seqnum)
+{
+ msgtrace_hdr_t *hdr;
+ char *data, *tmpdata;
+ const uint32 log_hdr_len = sizeof(event_log_hdr_t);
+ uint32 log_pyld_len;
+ static uint32 seqnum_prev = 0;
+ event_log_hdr_t *log_hdr;
+ bool msg_processed = FALSE;
+ prcd_event_log_hdr_t prcd_log_hdr;
+ prcd_event_log_hdr_t *plog_hdr;
+ dll_t list_head, *cur;
+ loglist_item_t *log_item;
+ dhd_dbg_ring_entry_t msg_hdr;
+ char *logbuf;
+ struct tracelog_header *logentry_header;
+ uint ring_data_len = 0;
+ bool ecntr_pushed = FALSE;
+ bool rtt_pushed = FALSE;
+ bool bcm_trace_pushed = FALSE;
+ bool dll_inited = FALSE;
+ uint32 logset = 0;
+ uint16 block = 0;
+ bool event_log_max_sets_queried;
+ uint32 event_log_max_sets;
+ uint min_expected_len = 0;
+ uint16 len_chk = 0;
+
+ BCM_REFERENCE(ecntr_pushed);
+ BCM_REFERENCE(rtt_pushed);
+ BCM_REFERENCE(bcm_trace_pushed);
+ BCM_REFERENCE(len_chk);
+
+ /* store event_logset_queried and event_log_max_sets in local variables
+ * to avoid race conditions as they were set from different contexts(preinit)
+ */
+ event_log_max_sets_queried = dhdp->event_log_max_sets_queried;
+ /* Make sure queried is read first with wmb and then max_sets,
+ * as it is done in reverse order during preinit ioctls.
+ */
+ OSL_SMP_WMB();
+ event_log_max_sets = dhdp->event_log_max_sets;
+
+ if (msgtrace_hdr_present)
+ min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN);
+ else
+ min_expected_len = EVENT_LOG_BLOCK_LEN;
+
+ /* log trace event consists of:
+ * msgtrace header
+ * event log block header
+ * event log payload
+ */
+ if (!event_data || (datalen <= min_expected_len)) {
+ DHD_ERROR(("%s: Not processing due to invalid event_data : %p or length : %d\n",
+ __FUNCTION__, event_data, datalen));
+ if (event_data && msgtrace_hdr_present) {
+ prhex("event_data dump", event_data, datalen);
+ tmpdata = (char *)event_data + MSGTRACE_HDRLEN;
+ if (tmpdata) {
+ DHD_ERROR(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n",
+ ltoh16(*((uint16 *)(tmpdata+2))),
+ ltoh32(*((uint32 *)(tmpdata + 4))),
+ ltoh16(*((uint16 *)(tmpdata)))));
+ }
+ } else if (!event_data) {
+ DHD_ERROR(("%s: event_data is NULL, cannot dump prhex\n", __FUNCTION__));
+ }
+
+ return;
+ }
+
+ if (msgtrace_hdr_present) {
+ hdr = (msgtrace_hdr_t *)event_data;
+ data = (char *)event_data + MSGTRACE_HDRLEN;
+ datalen -= MSGTRACE_HDRLEN;
+ msgtrace_seqnum = ntoh32(hdr->seqnum);
+ } else {
+ data = (char *)event_data;
+ }
+
+ if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, msgtrace_seqnum))
+ return;
+
+ /* Save the whole message to event log ring */
+ memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+ logbuf = VMALLOC(dhdp->osh, sizeof(*logentry_header) + datalen);
+ if (logbuf == NULL)
+ return;
+ logentry_header = (struct tracelog_header *)logbuf;
+ logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER;
+ logentry_header->buf_size = datalen;
+ logentry_header->seq_num = msgtrace_seqnum;
+ msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE;
+
+ ring_data_len = datalen + sizeof(*logentry_header);
+
+ if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) {
+ DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__,
+ ((uint)sizeof(*logentry_header) + datalen)));
+ goto exit;
+ }
+
+ msg_hdr.len = sizeof(*logentry_header) + datalen;
+ memcpy(logbuf + sizeof(*logentry_header), data, datalen);
+ DHD_DBGIF(("%s: datalen %d %d\n", __FUNCTION__, msg_hdr.len, datalen));
+#ifndef DHD_DEBUGABILITY_LOG_DUMP_RING
+ dhd_dbg_push_to_ring(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf);
+#endif
+ /* Print sequence number, originating set and length of received
+ * event log buffer. Refer to event log buffer structure in
+ * event_log.h
+ */
+ DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n",
+ ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))),
+ ltoh16(*((uint16 *)(data)))));
+
+ logset = ltoh32(*((uint32 *)(data + 4)));
+
+ if (logset >= event_log_max_sets) {
+ DHD_ERROR(("%s logset: %d max: %d out of range queried: %d\n",
+ __FUNCTION__, logset, event_log_max_sets, event_log_max_sets_queried));
+#ifdef DHD_FW_COREDUMP
+ if (event_log_max_sets_queried) {
+ DHD_ERROR(("%s: collect socram for DUMP_TYPE_LOGSET_BEYOND_RANGE\n",
+ __FUNCTION__));
+ dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+
+ block = ltoh16(*((uint16 *)(data + 2)));
+
+ data += EVENT_LOG_BLOCK_HDRLEN;
+ datalen -= EVENT_LOG_BLOCK_HDRLEN;
+
+ /* start parsing from the tail of packet
+ * Sameple format of a meessage
+ * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639
+ * 001d3c54 00000064 00000064 035d6d89 0c580439
+ * 0x0c580439 -- 39 is tag, 04 is count, 580c is format number
+ * all these uint32 values comes in reverse order as group as EL data
+ * while decoding we can only parse from last to first
+ * |<- datalen ->|
+ * |----(payload and maybe more logs)----|event_log_hdr_t|
+ * data log_hdr
+ */
+ dll_init(&list_head);
+ dll_inited = TRUE;
+
+ while (datalen > log_hdr_len) {
+ log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len);
+ memset(&prcd_log_hdr, 0, sizeof(prcd_log_hdr));
+ if (!dhd_dbg_process_event_log_hdr(log_hdr, &prcd_log_hdr)) {
+ DHD_ERROR(("%s: Error while parsing event log header\n",
+ __FUNCTION__));
+ }
+
+ /* skip zero padding at end of frame */
+ if (prcd_log_hdr.tag == EVENT_LOG_TAG_NULL) {
+ datalen -= log_hdr_len;
+ continue;
+ }
+ /* Check argument count (for non-ecounter events only),
+ * any event log should contain at least
+ * one argument (4 bytes) for arm cycle count and up to 16
+ * arguments except EVENT_LOG_TAG_STATS which could use the
+ * whole payload of 256 words
+ */
+ if (prcd_log_hdr.count == 0) {
+ break;
+ }
+ /* Both tag_stats and proxd are binary payloads so skip
+ * argument count check for these.
+ */
+ if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) &&
+ (prcd_log_hdr.tag != EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
+ (prcd_log_hdr.tag != EVENT_LOG_TAG_ROAM_ENHANCED_LOG) &&
+ (prcd_log_hdr.tag != EVENT_LOG_TAG_BCM_TRACE) &&
+ (prcd_log_hdr.count > MAX_NO_OF_ARG)) {
+ break;
+ }
+
+ log_pyld_len = (prcd_log_hdr.count + prcd_log_hdr.ext_event_log_hdr) *
+ DATA_UNIT_FOR_LOG_CNT;
+ /* log data should not cross the event data boundary */
+ if ((uint32)((char *)log_hdr - data) < log_pyld_len) {
+ break;
+ }
+ /* skip 4 bytes time stamp packet */
+ if (prcd_log_hdr.tag == EVENT_LOG_TAG_TS ||
+ prcd_log_hdr.tag == EVENT_LOG_TAG_ENHANCED_TS) {
+ datalen -= (log_pyld_len + log_hdr_len);
+ continue;
+ }
+ if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) {
+ DHD_ERROR(("%s allocating log list item failed\n",
+ __FUNCTION__));
+ break;
+ }
+
+ log_item->prcd_log_hdr.tag = prcd_log_hdr.tag;
+ log_item->prcd_log_hdr.count = prcd_log_hdr.count;
+ log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num;
+ log_item->prcd_log_hdr.fmt_num_raw = prcd_log_hdr.fmt_num_raw;
+ log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle;
+ log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr;
+ log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len;
+ log_item->prcd_log_hdr.ext_event_log_hdr = prcd_log_hdr.ext_event_log_hdr;
+ log_item->prcd_log_hdr.binary_payload = prcd_log_hdr.binary_payload;
+
+ dll_insert(&log_item->list, &list_head);
+ datalen -= (log_pyld_len + log_hdr_len);
+ }
+
+ while (!dll_empty(&list_head)) {
+ msg_processed = FALSE;
+ cur = dll_head_p(&list_head);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
+ GCC_DIAGNOSTIC_POP();
+
+ plog_hdr = &log_item->prcd_log_hdr;
+#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP)
+ /* Ecounter tag can be time_data or log_stats+binary paloaod */
+ if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) ||
+ ((plog_hdr->tag == EVENT_LOG_TAG_STATS) &&
+ (plog_hdr->binary_payload))) {
+ if (!ecntr_pushed && dhd_log_dump_ecntr_enabled()) {
+ if (dhd_dbg_send_evtlog_to_ring(plog_hdr, &msg_hdr,
+ dhdp->ecntr_dbg_ring,
+ PAYLOAD_ECNTR_MAX_LEN, logbuf) != BCME_OK) {
+ goto exit;
+ }
+ ecntr_pushed = TRUE;
+ }
+ }
+#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */
+
+ if (plog_hdr->tag == EVENT_LOG_TAG_ROAM_ENHANCED_LOG) {
+ print_roam_enhanced_log(plog_hdr);
+ msg_processed = TRUE;
+ }
+#if defined(EWP_RTT_LOGGING) && defined(DHD_LOG_DUMP)
+ if ((plog_hdr->tag == EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
+ plog_hdr->binary_payload) {
+ if (!rtt_pushed && dhd_log_dump_rtt_enabled()) {
+ if (dhd_dbg_send_evtlog_to_ring(plog_hdr, &msg_hdr,
+ dhdp->rtt_dbg_ring,
+ PAYLOAD_RTT_MAX_LEN, logbuf) != BCME_OK) {
+ goto exit;
+ }
+ rtt_pushed = TRUE;
+ }
+ }
+#endif /* EWP_RTT_LOGGING && DHD_LOG_DUMP */
+
+#ifdef EWP_BCM_TRACE
+ if ((logset == EVENT_LOG_SET_BCM_TRACE) && !bcm_trace_pushed &&
+ plog_hdr->binary_payload) {
+ if (dhd_dbg_send_evtlog_to_ring(plog_hdr, &msg_hdr,
+ dhdp->bcm_trace_dbg_ring,
+ PAYLOAD_BCM_TRACE_MAX_LEN, logbuf) != BCME_OK) {
+ goto exit;
+ }
+ bcm_trace_pushed = TRUE;
+ }
+#endif /* EWP_BCM_TRACE */
+
+#if defined (DHD_EVENT_LOG_FILTER)
+ if (plog_hdr->tag == EVENT_LOG_TAG_STATS) {
+ dhd_event_log_filter_event_handler(dhdp, plog_hdr, plog_hdr->log_ptr);
+ }
+#endif /* DHD_EVENT_LOG_FILTER */
+ if (!msg_processed) {
+ dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr,
+ logset, block, (uint32 *)data);
+ }
+ dll_delete(cur);
+ MFREE(dhdp->osh, log_item, sizeof(*log_item));
+
+ }
+ BCM_REFERENCE(log_hdr);
+exit:
+ while (dll_inited && (!dll_empty(&list_head))) {
+ cur = dll_head_p(&list_head);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
+ GCC_DIAGNOSTIC_POP();
+
+ dll_delete(cur);
+ MFREE(dhdp->osh, log_item, sizeof(*log_item));
+ }
+
+ VMFREE(dhdp->osh, logbuf, ring_data_len);
+}
+#else /* !SHOW_LOGTRACE */
+static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp,
+ prcd_event_log_hdr_t *plog_hdr, void *raw_event_ptr, uint32 logset, uint16 block,
+ uint32 *data) {};
+INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp,
+ void *event_data, void *raw_event_ptr, uint datalen,
+ bool msgtrace_hdr_present, uint32 msgtrace_seqnum) {};
+#endif /* SHOW_LOGTRACE */
+void
+dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
+ void *raw_event_ptr, uint datalen)
+{
+ msgtrace_hdr_t *hdr;
+
+ hdr = (msgtrace_hdr_t *)event_data;
+
+ if (hdr->version != MSGTRACE_VERSION) {
+ DHD_DBGIF(("%s unsupported MSGTRACE version, dhd %d, dongle %d\n",
+ __FUNCTION__, MSGTRACE_VERSION, hdr->version));
+ return;
+ }
+
+ if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG)
+ dhd_dbg_msgtrace_msg_parser(event_data);
+ else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG)
+ dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen, TRUE, 0);
+}
+
+#ifdef BTLOG
+void
+dhd_dbg_bt_log_handler(dhd_pub_t *dhdp, void *data, uint datalen)
+{
+ dhd_dbg_ring_entry_t msg_hdr;
+ int ret;
+
+ /* push to ring */
+ memset(&msg_hdr, 0, sizeof(msg_hdr));
+ msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE;
+ msg_hdr.len = datalen;
+ ret = dhd_dbg_push_to_ring(dhdp, BT_LOG_RING_ID, &msg_hdr, data);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s ring push failed %d\n", __FUNCTION__, ret));
+ }
+}
+#endif /* BTLOG */
+
+/*
+ * dhd_dbg_set_event_log_tag : modify the state of an event log tag
+ */
+void
+dhd_dbg_set_event_log_tag(dhd_pub_t *dhdp, uint16 tag, uint8 set)
+{
+ wl_el_tag_params_t pars;
+ char *cmd = "event_log_tag_control";
+ char iovbuf[WLC_IOCTL_SMLEN] = { 0 };
+ int ret;
+
+ memset(&pars, 0, sizeof(pars));
+ pars.tag = tag;
+ pars.set = set;
+ pars.flags = EVENT_LOG_TAG_FLAG_LOG;
+
+ if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) {
+ DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__));
+ return;
+ }
+
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+// if (ret) {
+// DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret));
+// }
+}
+
+int
+dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold)
+{
+ dhd_dbg_ring_t *ring;
+ uint8 set = 1;
+ int i, array_len = 0;
+ struct log_level_table *log_level_tbl = NULL;
+ if (!dhdp || !dhdp->dbg)
+ return BCME_BADADDR;
+
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return BCME_RANGE;
+ }
+
+ ring = &dhdp->dbg->dbg_rings[ring_id];
+ dhd_dbg_ring_config(ring, log_level, threshold);
+
+ if (log_level > 0)
+ set = TRUE;
+
+ if (ring->id == FW_VERBOSE_RING_ID) {
+ log_level_tbl = fw_verbose_level_map;
+ array_len = ARRAYSIZE(fw_verbose_level_map);
+ }
+
+ for (i = 0; i < array_len; i++) {
+ if (log_level == 0 || (log_level_tbl[i].log_level > log_level)) {
+ /* clear the reference per ring */
+ ref_tag_tbl[log_level_tbl[i].tag] &= ~(1 << ring_id);
+ } else {
+ /* set the reference per ring */
+ ref_tag_tbl[log_level_tbl[i].tag] |= (1 << ring_id);
+ }
+ set = (ref_tag_tbl[log_level_tbl[i].tag])? 1 : 0;
+ DHD_DBGIF(("%s TAG(%s) is %s for the ring(%s)\n", __FUNCTION__,
+ log_level_tbl[i].desc, (set)? "SET" : "CLEAR", ring->name));
+ dhd_dbg_set_event_log_tag(dhdp, log_level_tbl[i].tag, set);
+ }
+ return BCME_OK;
+}
+
+int
+__dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *get_ring_status)
+{
+ dhd_dbg_ring_status_t ring_status;
+ int ret = BCME_OK;
+
+ if (ring == NULL) {
+ return BCME_BADADDR;
+ }
+
+ bzero(&ring_status, sizeof(dhd_dbg_ring_status_t));
+ RING_STAT_TO_STATUS(ring, ring_status);
+ *get_ring_status = ring_status;
+
+ return ret;
+}
+
+/*
+* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer
+* Return: An error code or 0 on success.
+*/
+
+int
+dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status)
+{
+ int ret = BCME_OK;
+ int id = 0;
+ dhd_dbg_t *dbg;
+ dhd_dbg_ring_t *dbg_ring;
+ if (!dhdp || !dhdp->dbg)
+ return BCME_BADADDR;
+ dbg = dhdp->dbg;
+
+ for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
+ dbg_ring = &dbg->dbg_rings[id];
+ if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) {
+ __dhd_dbg_get_ring_status(dbg_ring, dbg_ring_status);
+ break;
+ }
+ }
+ if (!VALID_RING(id)) {
+ DHD_ERROR(("%s : cannot find the ring_id : %d\n", __FUNCTION__, ring_id));
+ ret = BCME_NOTFOUND;
+ }
+ return ret;
+}
+
+#ifdef SHOW_LOGTRACE
+void
+dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info)
+{
+ dhd_dbg_ring_status_t ring_status;
+ uint32 rlen = 0;
+
+ rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE);
+
+ trace_buf_info->size = rlen;
+ trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
+ if (rlen == 0) {
+ trace_buf_info->availability = BUF_NOT_AVAILABLE;
+ return;
+ }
+
+ __dhd_dbg_get_ring_status(ring, &ring_status);
+
+ if (ring_status.written_bytes != ring_status.read_bytes) {
+ trace_buf_info->availability = NEXT_BUF_AVAIL;
+ }
+}
+#endif /* SHOW_LOGTRACE */
+
+/*
+* dhd_dbg_find_ring_id : return ring_id based on ring_name
+* Return: An invalid ring id for failure or valid ring id on success.
+*/
+
+int
+dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name)
+{
+ int id;
+ dhd_dbg_t *dbg;
+ dhd_dbg_ring_t *ring;
+
+ if (!dhdp || !dhdp->dbg)
+ return BCME_BADADDR;
+
+ dbg = dhdp->dbg;
+ for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
+ ring = &dbg->dbg_rings[id];
+ if (!strncmp((char *)ring->name, ring_name, sizeof(ring->name) - 1))
+ break;
+ }
+ return id;
+}
+
+/*
+* dhd_dbg_get_priv : get the private data of dhd dbugability module
+* Return : An NULL on failure or valid data address
+*/
+void *
+dhd_dbg_get_priv(dhd_pub_t *dhdp)
+{
+ if (!dhdp || !dhdp->dbg)
+ return NULL;
+ return dhdp->dbg->private;
+}
+
+/*
+* dhd_dbg_start : start and stop All of Ring buffers
+* Return: An error code or 0 on success.
+*/
+int
+dhd_dbg_start(dhd_pub_t *dhdp, bool start)
+{
+ int ret = BCME_OK;
+ int ring_id;
+ dhd_dbg_t *dbg;
+ dhd_dbg_ring_t *dbg_ring;
+ if (!dhdp)
+ return BCME_BADARG;
+ dbg = dhdp->dbg;
+
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+ dbg_ring = &dbg->dbg_rings[ring_id];
+ if (!start) {
+ if (VALID_RING(dbg_ring->id)) {
+ dhd_dbg_ring_start(dbg_ring);
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * dhd_dbg_send_urgent_evt: send the health check evt to Upper layer
+ *
+ * Return: An error code or 0 on success.
+ */
+
+int
+dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len)
+{
+ dhd_dbg_t *dbg;
+ int ret = BCME_OK;
+ if (!dhdp || !dhdp->dbg)
+ return BCME_BADADDR;
+
+ dbg = dhdp->dbg;
+ if (dbg->urgent_notifier) {
+ dbg->urgent_notifier(dhdp, data, len);
+ }
+ return ret;
+}
+
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+uint32
+__dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid)
+{
+ uint32 __pkt;
+ uint32 __pktid;
+
+ __pkt = ((int)pkt) >= 0 ? (2 * pkt) : (-2 * pkt - 1);
+ __pktid = ((int)pktid) >= 0 ? (2 * pktid) : (-2 * pktid - 1);
+
+ return (__pkt >= __pktid ? (__pkt * __pkt + __pkt + __pktid) :
+ (__pkt + __pktid * __pktid));
+}
+
+#define __TIMESPEC_TO_US(ts) \
+ (((uint32)(ts).tv_sec * USEC_PER_SEC) + ((ts).tv_nsec / NSEC_PER_USEC))
+
+uint32
+__dhd_dbg_driver_ts_usec(void)
+{
+ struct osl_timespec ts;
+
+ osl_get_monotonic_boottime(&ts);
+ return ((uint32)(__TIMESPEC_TO_US(ts)));
+}
+
+wifi_tx_packet_fate
+__dhd_dbg_map_tx_status_to_pkt_fate(uint16 status)
+{
+ wifi_tx_packet_fate pkt_fate;
+
+ switch (status) {
+ case WLFC_CTL_PKTFLAG_DISCARD:
+ pkt_fate = TX_PKT_FATE_ACKED;
+ break;
+ case WLFC_CTL_PKTFLAG_D11SUPPRESS:
+ /* intensional fall through */
+ case WLFC_CTL_PKTFLAG_WLSUPPRESS:
+ pkt_fate = TX_PKT_FATE_FW_QUEUED;
+ break;
+ case WLFC_CTL_PKTFLAG_TOSSED_BYWLC:
+ pkt_fate = TX_PKT_FATE_FW_DROP_INVALID;
+ break;
+ case WLFC_CTL_PKTFLAG_DISCARD_NOACK:
+ pkt_fate = TX_PKT_FATE_SENT;
+ break;
+ case WLFC_CTL_PKTFLAG_EXPIRED:
+ pkt_fate = TX_PKT_FATE_FW_DROP_EXPTIME;
+ break;
+#ifndef OEM_ANDROID
+ case WLFC_CTL_PKTFLAG_MKTFREE:
+ pkt_fate = TX_PKT_FATE_FW_PKT_FREE;
+ break;
+#endif /* !OEM_ANDROID */
+ default:
+ pkt_fate = TX_PKT_FATE_FW_DROP_OTHER;
+ break;
+ }
+
+ return pkt_fate;
+}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+
+#ifdef DBG_PKT_MON
+static int
+__dhd_dbg_free_tx_pkts(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkts,
+ uint16 pkt_count)
+{
+ uint16 count;
+
+ count = 0;
+ while ((count < pkt_count) && tx_pkts) {
+ if (tx_pkts->info.pkt) {
+ PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE);
+ }
+ tx_pkts++;
+ count++;
+ }
+
+ return BCME_OK;
+}
+
+static int
+__dhd_dbg_free_rx_pkts(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkts,
+ uint16 pkt_count)
+{
+ uint16 count;
+
+ count = 0;
+ while ((count < pkt_count) && rx_pkts) {
+ if (rx_pkts->info.pkt) {
+ PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE);
+ }
+ rx_pkts++;
+ count++;
+ }
+
+ return BCME_OK;
+}
+
+void
+__dhd_dbg_dump_pkt_info(dhd_pub_t *dhdp, dhd_dbg_pkt_info_t *info)
+{
+ if (DHD_PKT_MON_DUMP_ON()) {
+ DHD_PKT_MON(("payload type = %d\n", info->payload_type));
+ DHD_PKT_MON(("driver ts = %u\n", info->driver_ts));
+ DHD_PKT_MON(("firmware ts = %u\n", info->firmware_ts));
+ DHD_PKT_MON(("packet hash = %u\n", info->pkt_hash));
+ DHD_PKT_MON(("packet length = %zu\n", info->pkt_len));
+ DHD_PKT_MON(("packet address = %p\n", info->pkt));
+ DHD_PKT_MON(("packet data = \n"));
+ if (DHD_PKT_MON_ON()) {
+ prhex(NULL, PKTDATA(dhdp->osh, info->pkt), info->pkt_len);
+ }
+ }
+}
+
+void
+__dhd_dbg_dump_tx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkt,
+ uint16 count)
+{
+ if (DHD_PKT_MON_DUMP_ON()) {
+ DHD_PKT_MON(("\nTX (count: %d)\n", ++count));
+ DHD_PKT_MON(("packet fate = %d\n", tx_pkt->fate));
+ __dhd_dbg_dump_pkt_info(dhdp, &tx_pkt->info);
+ }
+}
+
+void
+__dhd_dbg_dump_rx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkt,
+ uint16 count)
+{
+ if (DHD_PKT_MON_DUMP_ON()) {
+ DHD_PKT_MON(("\nRX (count: %d)\n", ++count));
+ DHD_PKT_MON(("packet fate = %d\n", rx_pkt->fate));
+ __dhd_dbg_dump_pkt_info(dhdp, &rx_pkt->info);
+ }
+}
+
+int
+dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
+ dbg_mon_tx_pkts_t tx_pkt_mon,
+ dbg_mon_tx_status_t tx_status_mon,
+ dbg_mon_rx_pkts_t rx_pkt_mon)
+{
+
+ dhd_dbg_tx_report_t *tx_report = NULL;
+ dhd_dbg_rx_report_t *rx_report = NULL;
+ dhd_dbg_tx_info_t *tx_pkts = NULL;
+ dhd_dbg_rx_info_t *rx_pkts = NULL;
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ uint32 alloc_len;
+ int ret = BCME_OK;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ tx_status_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+ if (PKT_MON_ATTACHED(tx_pkt_state) || PKT_MON_ATTACHED(tx_status_state) ||
+ PKT_MON_ATTACHED(rx_pkt_state)) {
+ DHD_PKT_MON(("%s(): packet monitor is already attached, "
+ "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+ __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ /* return success as the intention was to initialize packet monitor */
+ return BCME_OK;
+ }
+
+ /* allocate and initialize tx packet monitoring */
+ alloc_len = sizeof(*tx_report);
+ tx_report = (dhd_dbg_tx_report_t *)MALLOCZ(dhdp->osh, alloc_len);
+ if (unlikely(!tx_report)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_dbg_tx_report_t\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN);
+ tx_pkts = (dhd_dbg_tx_info_t *)MALLOCZ(dhdp->osh, alloc_len);
+ if (unlikely(!tx_pkts)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_dbg_tx_info_t\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ dhdp->dbg->pkt_mon.tx_report = tx_report;
+ dhdp->dbg->pkt_mon.tx_report->tx_pkts = tx_pkts;
+ dhdp->dbg->pkt_mon.tx_pkt_mon = tx_pkt_mon;
+ dhdp->dbg->pkt_mon.tx_status_mon = tx_status_mon;
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_ATTACHED;
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_ATTACHED;
+
+ /* allocate and initialze rx packet monitoring */
+ alloc_len = sizeof(*rx_report);
+ rx_report = (dhd_dbg_rx_report_t *)MALLOCZ(dhdp->osh, alloc_len);
+ if (unlikely(!rx_report)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_dbg_rx_report_t\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN);
+ rx_pkts = (dhd_dbg_rx_info_t *)MALLOCZ(dhdp->osh, alloc_len);
+ if (unlikely(!rx_pkts)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_dbg_rx_info_t\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ dhdp->dbg->pkt_mon.rx_report = rx_report;
+ dhdp->dbg->pkt_mon.rx_report->rx_pkts = rx_pkts;
+ dhdp->dbg->pkt_mon.rx_pkt_mon = rx_pkt_mon;
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED;
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__));
+ return ret;
+
+fail:
+ /* tx packet monitoring */
+ if (tx_pkts) {
+ alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN);
+ MFREE(dhdp->osh, tx_pkts, alloc_len);
+ }
+ if (tx_report) {
+ alloc_len = sizeof(*tx_report);
+ MFREE(dhdp->osh, tx_report, alloc_len);
+ }
+ dhdp->dbg->pkt_mon.tx_report = NULL;
+ dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
+ dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
+ dhdp->dbg->pkt_mon.tx_status_mon = NULL;
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED;
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED;
+
+ /* rx packet monitoring */
+ if (rx_pkts) {
+ alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN);
+ MFREE(dhdp->osh, rx_pkts, alloc_len);
+ }
+ if (rx_report) {
+ alloc_len = sizeof(*rx_report);
+ MFREE(dhdp->osh, rx_report, alloc_len);
+ }
+ dhdp->dbg->pkt_mon.rx_report = NULL;
+ dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
+ dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__));
+ return ret;
+}
+
+int
+dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp)
+{
+ dhd_dbg_tx_report_t *tx_report;
+ dhd_dbg_rx_report_t *rx_report;
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+ if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
+ PKT_MON_DETACHED(rx_pkt_state)) {
+ DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
+ "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+ __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return -EINVAL;
+ }
+
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTING;
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTING;
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTING;
+
+ tx_report = dhdp->dbg->pkt_mon.tx_report;
+ rx_report = dhdp->dbg->pkt_mon.rx_report;
+ if (!tx_report || !rx_report) {
+ DHD_PKT_MON(("%s(): tx_report=%p, rx_report=%p\n",
+ __FUNCTION__, tx_report, rx_report));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return -EINVAL;
+ }
+
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+ /* Safe to free packets as state pkt_state is STARTING */
+ __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, tx_report->pkt_pos);
+
+ __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, rx_report->pkt_pos);
+
+ /* reset array postion */
+ tx_report->pkt_pos = 0;
+ tx_report->status_pos = 0;
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTED;
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTED;
+
+ rx_report->pkt_pos = 0;
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED;
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+
+ DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__));
+ return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid)
+{
+ dhd_dbg_tx_report_t *tx_report;
+ dhd_dbg_tx_info_t *tx_pkts;
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ uint32 pkt_hash, driver_ts;
+ uint16 pkt_pos;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ if (PKT_MON_STARTED(tx_pkt_state)) {
+ tx_report = dhdp->dbg->pkt_mon.tx_report;
+ pkt_pos = tx_report->pkt_pos;
+
+ if (!PKT_MON_PKT_FULL(pkt_pos)) {
+ tx_pkts = tx_report->tx_pkts;
+ pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ driver_ts = __dhd_dbg_driver_ts_usec();
+
+ tx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt);
+ tx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt);
+ tx_pkts[pkt_pos].info.pkt_hash = pkt_hash;
+ tx_pkts[pkt_pos].info.driver_ts = driver_ts;
+ tx_pkts[pkt_pos].info.firmware_ts = 0U;
+ tx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II;
+ tx_pkts[pkt_pos].fate = TX_PKT_FATE_DRV_QUEUED;
+
+ tx_report->pkt_pos++;
+ } else {
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED;
+ DHD_PKT_MON(("%s(): tx pkt logging stopped, reached "
+ "max limit\n", __FUNCTION__));
+ }
+ }
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
+ uint16 status)
+{
+ dhd_dbg_tx_report_t *tx_report;
+ dhd_dbg_tx_info_t *tx_pkt;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ wifi_tx_packet_fate pkt_fate;
+ uint32 pkt_hash, temp_hash;
+ uint16 pkt_pos, status_pos;
+ int16 count;
+ bool found = FALSE;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+ if (PKT_MON_STARTED(tx_status_state)) {
+ tx_report = dhdp->dbg->pkt_mon.tx_report;
+ pkt_pos = tx_report->pkt_pos;
+ status_pos = tx_report->status_pos;
+
+ if (!PKT_MON_STATUS_FULL(pkt_pos, status_pos)) {
+ pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status);
+
+ /* best bet (in-order tx completion) */
+ count = status_pos;
+ tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + status_pos);
+ while ((count < pkt_pos) && tx_pkt) {
+ temp_hash = tx_pkt->info.pkt_hash;
+ if (temp_hash == pkt_hash) {
+ tx_pkt->fate = pkt_fate;
+ tx_report->status_pos++;
+ found = TRUE;
+ break;
+ }
+ tx_pkt++;
+ count++;
+ }
+
+ /* search until beginning (handles out-of-order completion) */
+ if (!found) {
+ count = status_pos - 1;
+ tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + count);
+ while ((count >= 0) && tx_pkt) {
+ temp_hash = tx_pkt->info.pkt_hash;
+ if (temp_hash == pkt_hash) {
+ tx_pkt->fate = pkt_fate;
+ tx_report->status_pos++;
+ found = TRUE;
+ break;
+ }
+ tx_pkt--;
+ count--;
+ }
+
+ if (!found) {
+ /* still couldn't match tx_status */
+ DHD_INFO(("%s(): couldn't match tx_status, pkt_pos=%u, "
+ "status_pos=%u, pkt_fate=%u\n", __FUNCTION__,
+ pkt_pos, status_pos, pkt_fate));
+ }
+ }
+ } else {
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED;
+ DHD_PKT_MON(("%s(): tx_status logging stopped, reached "
+ "max limit\n", __FUNCTION__));
+ }
+ }
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt)
+{
+ dhd_dbg_rx_report_t *rx_report;
+ dhd_dbg_rx_info_t *rx_pkts;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ uint32 driver_ts;
+ uint16 pkt_pos;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+ if (PKT_MON_STARTED(rx_pkt_state)) {
+ rx_report = dhdp->dbg->pkt_mon.rx_report;
+ pkt_pos = rx_report->pkt_pos;
+
+ if (!PKT_MON_PKT_FULL(pkt_pos)) {
+ rx_pkts = rx_report->rx_pkts;
+ driver_ts = __dhd_dbg_driver_ts_usec();
+
+ rx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt);
+ rx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt);
+ rx_pkts[pkt_pos].info.pkt_hash = 0U;
+ rx_pkts[pkt_pos].info.driver_ts = driver_ts;
+ rx_pkts[pkt_pos].info.firmware_ts = 0U;
+ rx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II;
+ rx_pkts[pkt_pos].fate = RX_PKT_FATE_SUCCESS;
+
+ rx_report->pkt_pos++;
+ } else {
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED;
+ DHD_PKT_MON(("%s(): rx pkt logging stopped, reached "
+ "max limit\n", __FUNCTION__));
+ }
+ }
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return BCME_OK;
+}
+
+int
+dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp)
+{
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+ if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
+ PKT_MON_DETACHED(rx_pkt_state)) {
+ DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
+ "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+ __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return -EINVAL;
+ }
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED;
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED;
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED;
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+
+ DHD_PKT_MON(("%s(): packet monitor stopped\n", __FUNCTION__));
+ return BCME_OK;
+}
+
+#define __COPY_TO_USER(to, from, n) \
+ do { \
+ int __ret; \
+ __ret = copy_to_user((void __user *)(to), (void *)(from), \
+ (unsigned long)(n)); \
+ if (unlikely(__ret)) { \
+ DHD_ERROR(("%s():%d: copy_to_user failed, ret=%d\n", \
+ __FUNCTION__, __LINE__, __ret)); \
+ return __ret; \
+ } \
+ } while (0);
+
+int
+dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count)
+{
+ dhd_dbg_tx_report_t *tx_report;
+ dhd_dbg_tx_info_t *tx_pkt;
+ wifi_tx_report_t *ptr;
+ compat_wifi_tx_report_t *cptr;
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ uint16 pkt_count, count;
+ unsigned long flags;
+
+ BCM_REFERENCE(ptr);
+ BCM_REFERENCE(cptr);
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+ if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state)) {
+ DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
+ "tx_pkt_state=%d, tx_status_state=%d\n", __FUNCTION__,
+ tx_pkt_state, tx_status_state));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return -EINVAL;
+ }
+
+ count = 0;
+ tx_report = dhdp->dbg->pkt_mon.tx_report;
+ tx_pkt = tx_report->tx_pkts;
+ pkt_count = MIN(req_count, tx_report->status_pos);
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif
+ {
+ cptr = (compat_wifi_tx_report_t *)user_buf;
+ while ((count < pkt_count) && tx_pkt && cptr) {
+ compat_wifi_tx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr);
+ compat_dhd_dbg_pkt_info_t compat_tx_pkt;
+ __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count);
+ __COPY_TO_USER(&comp_ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate));
+
+ compat_tx_pkt.payload_type = tx_pkt->info.payload_type;
+ compat_tx_pkt.pkt_len = tx_pkt->info.pkt_len;
+ compat_tx_pkt.driver_ts = tx_pkt->info.driver_ts;
+ compat_tx_pkt.firmware_ts = tx_pkt->info.firmware_ts;
+ compat_tx_pkt.pkt_hash = tx_pkt->info.pkt_hash;
+ __COPY_TO_USER(&comp_ptr->frame_inf.payload_type,
+ &compat_tx_pkt.payload_type,
+ OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash));
+ __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii,
+ PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len);
+
+ cptr++;
+ tx_pkt++;
+ count++;
+ }
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ ptr = (wifi_tx_report_t *)user_buf;
+ while ((count < pkt_count) && tx_pkt && ptr) {
+ __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count);
+ __COPY_TO_USER(&ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate));
+ __COPY_TO_USER(&ptr->frame_inf.payload_type,
+ &tx_pkt->info.payload_type,
+ OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash));
+ __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii,
+ PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len);
+
+ ptr++;
+ tx_pkt++;
+ count++;
+ }
+ }
+ *resp_count = pkt_count;
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ if (!pkt_count) {
+ DHD_ERROR(("%s(): no tx_status in tx completion messages, "
+ "make sure that 'd11status' is enabled in firmware, "
+ "status_pos=%u\n", __FUNCTION__, pkt_count));
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count)
+{
+ dhd_dbg_rx_report_t *rx_report;
+ dhd_dbg_rx_info_t *rx_pkt;
+ wifi_rx_report_t *ptr;
+ compat_wifi_rx_report_t *cptr;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ uint16 pkt_count, count;
+ unsigned long flags;
+
+ BCM_REFERENCE(ptr);
+ BCM_REFERENCE(cptr);
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+ if (PKT_MON_DETACHED(rx_pkt_state)) {
+ DHD_PKT_MON(("%s(): packet fetch is not allowed , "
+ "rx_pkt_state=%d\n", __FUNCTION__, rx_pkt_state));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return -EINVAL;
+ }
+
+ count = 0;
+ rx_report = dhdp->dbg->pkt_mon.rx_report;
+ rx_pkt = rx_report->rx_pkts;
+ pkt_count = MIN(req_count, rx_report->pkt_pos);
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif
+ {
+ cptr = (compat_wifi_rx_report_t *)user_buf;
+ while ((count < pkt_count) && rx_pkt && cptr) {
+ compat_wifi_rx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr);
+ compat_dhd_dbg_pkt_info_t compat_rx_pkt;
+ __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count);
+ __COPY_TO_USER(&comp_ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate));
+
+ compat_rx_pkt.payload_type = rx_pkt->info.payload_type;
+ compat_rx_pkt.pkt_len = rx_pkt->info.pkt_len;
+ compat_rx_pkt.driver_ts = rx_pkt->info.driver_ts;
+ compat_rx_pkt.firmware_ts = rx_pkt->info.firmware_ts;
+ compat_rx_pkt.pkt_hash = rx_pkt->info.pkt_hash;
+ __COPY_TO_USER(&comp_ptr->frame_inf.payload_type,
+ &compat_rx_pkt.payload_type,
+ OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash));
+ __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii,
+ PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len);
+
+ cptr++;
+ rx_pkt++;
+ count++;
+ }
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ ptr = (wifi_rx_report_t *)user_buf;
+ while ((count < pkt_count) && rx_pkt && ptr) {
+ __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count);
+
+ __COPY_TO_USER(&ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate));
+ __COPY_TO_USER(&ptr->frame_inf.payload_type,
+ &rx_pkt->info.payload_type,
+ OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash));
+ __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii,
+ PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len);
+
+ ptr++;
+ rx_pkt++;
+ count++;
+ }
+ }
+
+ *resp_count = pkt_count;
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+
+ return BCME_OK;
+}
+
+int
+dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp)
+{
+ dhd_dbg_tx_report_t *tx_report;
+ dhd_dbg_rx_report_t *rx_report;
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ unsigned long flags;
+
+ if (!dhdp || !dhdp->dbg) {
+ DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+ dhdp, (dhdp ? dhdp->dbg : NULL)));
+ return -EINVAL;
+ }
+
+ DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+ tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+ tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+ rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+ if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
+ PKT_MON_DETACHED(rx_pkt_state)) {
+ DHD_PKT_MON(("%s(): packet monitor is already detached, "
+ "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+ __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ return -EINVAL;
+ }
+
+ tx_report = dhdp->dbg->pkt_mon.tx_report;
+ rx_report = dhdp->dbg->pkt_mon.rx_report;
+
+ /* free and de-initalize tx packet monitoring */
+ dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED;
+ dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED;
+ if (tx_report) {
+ if (tx_report->tx_pkts) {
+ __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts,
+ tx_report->pkt_pos);
+ MFREE(dhdp->osh, tx_report->tx_pkts,
+ (sizeof(*tx_report->tx_pkts) * MAX_FATE_LOG_LEN));
+ dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
+ }
+ MFREE(dhdp->osh, tx_report, sizeof(*tx_report));
+ dhdp->dbg->pkt_mon.tx_report = NULL;
+ }
+ dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
+ dhdp->dbg->pkt_mon.tx_status_mon = NULL;
+
+ /* free and de-initalize rx packet monitoring */
+ dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
+ if (rx_report) {
+ if (rx_report->rx_pkts) {
+ __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts,
+ rx_report->pkt_pos);
+ MFREE(dhdp->osh, rx_report->rx_pkts,
+ (sizeof(*rx_report->rx_pkts) * MAX_FATE_LOG_LEN));
+ dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
+ }
+ MFREE(dhdp->osh, rx_report, sizeof(*rx_report));
+ dhdp->dbg->pkt_mon.rx_report = NULL;
+ }
+ dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
+
+ DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+ DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__));
+ return BCME_OK;
+}
+#endif /* DBG_PKT_MON */
+
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+/*
+ * XXX: WAR: Because of the overloading by DMA marker field,
+ * tx_status in TX completion message cannot be used. As a WAR,
+ * send d11 tx_status through unused status field of PCIe
+ * completion header.
+ */
+bool
+dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
+ uint16 status)
+{
+ bool pkt_fate = TRUE;
+ if (dhdp->d11_tx_status) {
+ pkt_fate = (status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
+ DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status);
+ }
+ return pkt_fate;
+}
+#else /* DBG_PKT_MON || DHD_PKT_LOGGING */
+bool
+dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt,
+ uint32 pktid, uint16 status)
+{
+ return TRUE;
+}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+
+#define EL_LOG_STR_LEN 512
+
+#define PRINT_CHN_PER_LINE 8
+#define PRINT_CHAN_LINE(cnt) \
+{\
+ cnt ++; \
+ if (cnt >= PRINT_CHN_PER_LINE) { \
+ DHD_ERROR(("%s\n", b.origbuf)); \
+ bcm_binit(&b, pr_buf, EL_LOG_STR_LEN); \
+ bcm_bprintf(&b, "%s: ", prefix); \
+ cnt = 0; \
+ } \
+}
+
+void print_roam_chan_list(char *prefix, uint chan_num, uint16 band_2g,
+ uint16 uni2a, uint8 uni3, uint8 *uni2c)
+{
+ struct bcmstrbuf b;
+ char pr_buf[EL_LOG_STR_LEN] = { 0 };
+ int cnt = 0;
+ int idx, idx2;
+
+ bcm_binit(&b, pr_buf, EL_LOG_STR_LEN);
+ bcm_bprintf(&b, "%s: count(%d)", prefix, chan_num);
+ /* 2G channnels */
+ for (idx = 0; idx < NBITS(uint16); idx++) {
+ if (BCM_BIT(idx) & band_2g) {
+ bcm_bprintf(&b, " %d", idx);
+ PRINT_CHAN_LINE(cnt);
+
+ }
+ }
+
+ /* 5G UNII BAND 1, UNII BAND 2A */
+ for (idx = 0; idx < NBITS(uint16); idx++) {
+ if (BCM_BIT(idx) & uni2a) {
+ bcm_bprintf(&b, " %u", ROAM_CHN_UNI_2A + idx * ROAM_CHN_SPACE);
+ PRINT_CHAN_LINE(cnt);
+ }
+ }
+
+ /* 5G UNII BAND 2C */
+ for (idx2 = 0; idx2 < 3; idx2++) {
+ for (idx = 0; idx < NBITS(uint8); idx++) {
+ if (BCM_BIT(idx) & uni2c[idx2]) {
+ bcm_bprintf(&b, " %u", ROAM_CHN_UNI_2C +
+ idx2 * ROAM_CHN_SPACE * NBITS(uint8) +
+ idx * ROAM_CHN_SPACE);
+ PRINT_CHAN_LINE(cnt);
+ }
+ }
+ }
+
+ /* 5G UNII BAND 3 */
+ for (idx = 0; idx < NBITS(uint8); idx++) {
+ if (BCM_BIT(idx) & uni3) {
+ bcm_bprintf(&b, " %u", ROAM_CHN_UNI_3 + idx * ROAM_CHN_SPACE);
+ PRINT_CHAN_LINE(cnt);
+ }
+ }
+
+ if (cnt != 0) {
+ DHD_ERROR(("%s\n", b.origbuf));
+ }
+}
+
+void pr_roam_scan_start_v1(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_scan_cmpl_v1(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_cmpl_v1(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_nbr_req_v1(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_nbr_rep_v1(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_bcn_req_v1(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_bcn_rep_v1(prcd_event_log_hdr_t *plog_hdr);
+
+void pr_roam_scan_start_v2(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_scan_cmpl_v2(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_nbr_rep_v2(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_bcn_rep_v2(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_btm_rep_v2(prcd_event_log_hdr_t *plog_hdr);
+
+void pr_roam_bcn_req_v3(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_bcn_rep_v3(prcd_event_log_hdr_t *plog_hdr);
+void pr_roam_btm_rep_v3(prcd_event_log_hdr_t *plog_hdr);
+
+static const pr_roam_tbl_t roam_log_print_tbl[] =
+{
+ {ROAM_LOG_VER_1, ROAM_LOG_SCANSTART, pr_roam_scan_start_v1},
+ {ROAM_LOG_VER_1, ROAM_LOG_SCAN_CMPLT, pr_roam_scan_cmpl_v1},
+ {ROAM_LOG_VER_1, ROAM_LOG_ROAM_CMPLT, pr_roam_cmpl_v1},
+ {ROAM_LOG_VER_1, ROAM_LOG_NBR_REQ, pr_roam_nbr_req_v1},
+ {ROAM_LOG_VER_1, ROAM_LOG_NBR_REP, pr_roam_nbr_rep_v1},
+ {ROAM_LOG_VER_1, ROAM_LOG_BCN_REQ, pr_roam_bcn_req_v1},
+ {ROAM_LOG_VER_1, ROAM_LOG_BCN_REP, pr_roam_bcn_rep_v1},
+
+ {ROAM_LOG_VER_2, ROAM_LOG_SCANSTART, pr_roam_scan_start_v2},
+ {ROAM_LOG_VER_2, ROAM_LOG_SCAN_CMPLT, pr_roam_scan_cmpl_v2},
+ {ROAM_LOG_VER_2, ROAM_LOG_ROAM_CMPLT, pr_roam_cmpl_v1},
+ {ROAM_LOG_VER_2, ROAM_LOG_NBR_REQ, pr_roam_nbr_req_v1},
+ {ROAM_LOG_VER_2, ROAM_LOG_NBR_REP, pr_roam_nbr_rep_v2},
+ {ROAM_LOG_VER_2, ROAM_LOG_BCN_REQ, pr_roam_bcn_req_v1},
+ {ROAM_LOG_VER_2, ROAM_LOG_BCN_REP, pr_roam_bcn_rep_v2},
+ {ROAM_LOG_VER_2, ROAM_LOG_BTM_REP, pr_roam_btm_rep_v2},
+
+ {ROAM_LOG_VER_3, ROAM_LOG_SCANSTART, pr_roam_scan_start_v2},
+ {ROAM_LOG_VER_3, ROAM_LOG_SCAN_CMPLT, pr_roam_scan_cmpl_v2},
+ {ROAM_LOG_VER_3, ROAM_LOG_ROAM_CMPLT, pr_roam_cmpl_v1},
+ {ROAM_LOG_VER_3, ROAM_LOG_NBR_REQ, pr_roam_nbr_req_v1},
+ {ROAM_LOG_VER_3, ROAM_LOG_NBR_REP, pr_roam_nbr_rep_v2},
+ {ROAM_LOG_VER_3, ROAM_LOG_BCN_REQ, pr_roam_bcn_req_v3},
+ {ROAM_LOG_VER_3, ROAM_LOG_BCN_REP, pr_roam_bcn_rep_v3},
+ {ROAM_LOG_VER_3, ROAM_LOG_BTM_REP, pr_roam_btm_rep_v3},
+
+ {0, PRSV_PERIODIC_ID_MAX, NULL}
+
+};
+
+void pr_roam_scan_start_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_trig_v1_t *log = (roam_log_trig_v1_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_SCANSTART time: %d,"
+ " version:%d reason: %d rssi:%d cu:%d result:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->reason,
+ log->rssi, log->current_cu, log->result));
+ if (log->reason == WLC_E_REASON_DEAUTH ||
+ log->reason == WLC_E_REASON_DISASSOC) {
+ DHD_ERROR_ROAM((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n",
+ log->prt_roam.rcvd_reason));
+ } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) {
+ DHD_ERROR_ROAM((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d "
+ "duration disassoc:%d valid:%d term:%d\n",
+ log->bss_trans.req_mode, log->bss_trans.nbrlist_size,
+ log->bss_trans.token, log->bss_trans.disassoc_dur,
+ log->bss_trans.validity_dur, log->bss_trans.bss_term_dur));
+ }
+}
+
+void pr_roam_scan_cmpl_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_scan_cmplt_v1_t *log = (roam_log_scan_cmplt_v1_t *)plog_hdr->log_ptr;
+ char chanspec_buf[CHANSPEC_STR_LEN];
+ int i;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_SCAN_CMPL: time:%d version:%d"
+ "is_full:%d scan_count:%d score_delta:%d",
+ plog_hdr->armcycle, log->hdr.version, log->full_scan,
+ log->scan_count, log->score_delta));
+ DHD_ERROR_ROAM((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n",
+ MAC2STRDBG((uint8 *)&log->cur_info.addr),
+ log->cur_info.rssi,
+ log->cur_info.score,
+ wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf)));
+ for (i = 0; i < log->scan_list_size; i++) {
+ DHD_ERROR_ROAM((" ROAM_LOG_CANDIDATE %d: " MACDBG
+ "rssi:%d score:%d channel:%s TPUT:%dkbps\n",
+ i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr),
+ log->scan_list[i].rssi, log->scan_list[i].score,
+ wf_chspec_ntoa_ex(log->scan_list[i].chanspec,
+ chanspec_buf),
+ log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT?
+ log->scan_list[i].estm_tput:0));
+ }
+}
+
+void pr_roam_cmpl_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_cmplt_v1_t *log = (roam_log_cmplt_v1_t *)plog_hdr->log_ptr;
+ char chanspec_buf[CHANSPEC_STR_LEN];
+
+ DHD_ERROR_ROAM(("ROAM_LOG_ROAM_CMPL: time: %d, version:%d"
+ "status: %d reason: %d channel:%s retry:%d " MACDBG "\n",
+ plog_hdr->armcycle, log->hdr.version, log->status, log->reason,
+ wf_chspec_ntoa_ex(log->chanspec, chanspec_buf),
+ log->retry, MAC2STRDBG((uint8 *)&log->addr)));
+}
+
+void pr_roam_nbr_req_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_nbrreq_v1_t *log = (roam_log_nbrreq_v1_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_NBR_REQ: time: %d, version:%d token:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->token));
+}
+
+void pr_roam_nbr_rep_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_nbrrep_v1_t *log = (roam_log_nbrrep_v1_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->channel_num));
+}
+
+void pr_roam_bcn_req_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_bcnrpt_req_v1_t *log = (roam_log_bcnrpt_req_v1_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d"
+ "class:%d num_chan:%d ",
+ plog_hdr->armcycle, log->hdr.version,
+ log->result, log->reg, log->channel));
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d"
+ "ssid_len:%d\n", log->mode, log->bssid_wild,
+ log->duration, log->ssid_len));
+}
+
+void pr_roam_bcn_rep_v1(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_bcnrpt_rep_v1_t *log = (roam_log_bcnrpt_rep_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d\n",
+ plog_hdr->armcycle, log->hdr.version,
+ log->count));
+}
+
+void pr_roam_scan_start_v2(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_trig_v2_t *log = (roam_log_trig_v2_t *)plog_hdr->log_ptr;
+ DHD_ERROR_ROAM(("ROAM_LOG_SCANSTART time: %d,"
+ " version:%d reason: %d rssi:%d cu:%d result:%d full_scan:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->reason,
+ log->rssi, log->current_cu, log->result,
+ log->result?(-1):log->full_scan));
+ if (log->reason == WLC_E_REASON_DEAUTH ||
+ log->reason == WLC_E_REASON_DISASSOC) {
+ DHD_ERROR_ROAM((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n",
+ log->prt_roam.rcvd_reason));
+ } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) {
+ DHD_ERROR_ROAM((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d "
+ "duration disassoc:%d valid:%d term:%d\n",
+ log->bss_trans.req_mode, log->bss_trans.nbrlist_size,
+ log->bss_trans.token, log->bss_trans.disassoc_dur,
+ log->bss_trans.validity_dur, log->bss_trans.bss_term_dur));
+ } else if (log->reason == WLC_E_REASON_LOW_RSSI) {
+ DHD_ERROR_ROAM((" ROAM_LOG_LOW_RSSI: threshold:%d\n",
+ log->low_rssi.rssi_threshold));
+ }
+}
+
+void pr_roam_scan_cmpl_v2(prcd_event_log_hdr_t *plog_hdr)
+{
+ int i;
+ roam_log_scan_cmplt_v2_t *log = (roam_log_scan_cmplt_v2_t *)plog_hdr->log_ptr;
+ char chanspec_buf[CHANSPEC_STR_LEN];
+
+ DHD_ERROR_ROAM(("ROAM_LOG_SCAN_CMPL: time:%d version:%d"
+ "scan_count:%d score_delta:%d",
+ plog_hdr->armcycle, log->hdr.version,
+ log->scan_count, log->score_delta));
+ DHD_ERROR_ROAM((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n",
+ MAC2STRDBG((uint8 *)&log->cur_info.addr),
+ log->cur_info.rssi,
+ log->cur_info.score,
+ wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf)));
+ for (i = 0; i < log->scan_list_size; i++) {
+ DHD_ERROR_ROAM((" ROAM_LOG_CANDIDATE %d: " MACDBG
+ "rssi:%d score:%d cu :%d channel:%s TPUT:%dkbps\n",
+ i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr),
+ log->scan_list[i].rssi, log->scan_list[i].score,
+ log->scan_list[i].cu * 100 / WL_MAX_CHANNEL_USAGE,
+ wf_chspec_ntoa_ex(log->scan_list[i].chanspec,
+ chanspec_buf),
+ log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT?
+ log->scan_list[i].estm_tput:0));
+ }
+ if (log->chan_num != 0) {
+ print_roam_chan_list("ROAM_LOG_SCAN_CHANLIST", log->chan_num,
+ log->band2g_chan_list, log->uni2a_chan_list,
+ log->uni3_chan_list, log->uni2c_chan_list);
+ }
+
+}
+
+void pr_roam_nbr_rep_v2(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_nbrrep_v2_t *log = (roam_log_nbrrep_v2_t *)plog_hdr->log_ptr;
+ DHD_ERROR_ROAM(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->channel_num));
+ if (log->channel_num != 0) {
+ print_roam_chan_list("ROAM_LOG_NBR_REP_CHANLIST", log->channel_num,
+ log->band2g_chan_list, log->uni2a_chan_list,
+ log->uni3_chan_list, log->uni2c_chan_list);
+ }
+}
+
+void pr_roam_bcn_rep_v2(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_bcnrpt_rep_v2_t *log = (roam_log_bcnrpt_rep_v2_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d mode:%d\n",
+ plog_hdr->armcycle, log->hdr.version,
+ log->count, log->reason));
+}
+
+void pr_roam_btm_rep_v2(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_btm_rep_v2_t *log = (roam_log_btm_rep_v2_t *)plog_hdr->log_ptr;
+ DHD_ERROR_ROAM(("ROAM_LOG_BTM_REP: time:%d version:%d req_mode:%d "
+ "status:%d ret:%d\n",
+ plog_hdr->armcycle, log->hdr.version,
+ log->req_mode, log->status, log->result));
+}
+
+void pr_roam_bcn_req_v3(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_bcnrpt_req_v3_t *log = (roam_log_bcnrpt_req_v3_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d"
+ "class:%d %s ",
+ plog_hdr->armcycle, log->hdr.version,
+ log->result, log->reg, log->channel?"":"all_chan"));
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d"
+ "ssid_len:%d\n", log->mode, log->bssid_wild,
+ log->duration, log->ssid_len));
+ if (log->channel_num != 0) {
+ print_roam_chan_list("ROAM_LOG_BCNREQ_SCAN_CHANLIST", log->channel_num,
+ log->band2g_chan_list, log->uni2a_chan_list,
+ log->uni3_chan_list, log->uni2c_chan_list);
+ }
+}
+
+static const char*
+pr_roam_bcn_rep_reason(uint16 reason_detail)
+{
+ static const char* reason_tbl[] = {
+ "BCNRPT_RSN_SUCCESS",
+ "BCNRPT_RSN_BADARG",
+ "BCNRPT_RSN_SCAN_ING",
+ "BCNRPT_RSN_SCAN_FAIL",
+ "UNKNOWN"
+ };
+
+ if (reason_detail >= ARRAYSIZE(reason_tbl)) {
+ DHD_ERROR_ROAM(("UNKNOWN Reason:%u\n", reason_detail));
+ ASSERT(0);
+ reason_detail = ARRAYSIZE(reason_tbl) - 1;
+
+ }
+ return reason_tbl[reason_detail];
+}
+
+void pr_roam_bcn_rep_v3(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_bcnrpt_rep_v3_t *log = (roam_log_bcnrpt_rep_v3_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d mode:%d\n",
+ plog_hdr->armcycle, log->hdr.version,
+ log->count, log->reason));
+ DHD_ERROR_ROAM(("ROAM_LOG_BCN_REP: mode reason(%d):%s scan_stus:%u duration:%u\n",
+ log->reason_detail, pr_roam_bcn_rep_reason(log->reason_detail),
+ (log->reason_detail == BCNRPT_RSN_SCAN_FAIL)? log->scan_status:0,
+ log->duration));
+}
+
+void pr_roam_btm_rep_v3(prcd_event_log_hdr_t *plog_hdr)
+{
+ roam_log_btm_rep_v3_t *log = (roam_log_btm_rep_v3_t *)plog_hdr->log_ptr;
+ DHD_ERROR_ROAM(("ROAM_LOG_BTM_REP: time:%d version:%d req_mode:%d "
+ "status:%d ret:%d target:" MACDBG "\n",
+ plog_hdr->armcycle, log->hdr.version,
+ log->req_mode, log->status, log->result,
+ MAC2STRDBG((uint8 *)&log->target_addr)));
+}
+
+void
+print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr)
+{
+ prsv_periodic_log_hdr_t *hdr = (prsv_periodic_log_hdr_t *)plog_hdr->log_ptr;
+ uint32 *ptr = (uint32 *)plog_hdr->log_ptr;
+ int i;
+ int loop_cnt = hdr->length / sizeof(uint32);
+ struct bcmstrbuf b;
+ char pr_buf[EL_LOG_STR_LEN] = { 0 };
+ const pr_roam_tbl_t *cur_elem = &roam_log_print_tbl[0];
+
+ while (cur_elem && cur_elem->pr_func) {
+ if (hdr->version == cur_elem->version &&
+ hdr->id == cur_elem->id) {
+ cur_elem->pr_func(plog_hdr);
+ return;
+ }
+ cur_elem++;
+ }
+
+ bcm_binit(&b, pr_buf, EL_LOG_STR_LEN);
+ bcm_bprintf(&b, "ROAM_LOG_UNKNOWN ID:%d ver:%d armcycle:%d",
+ hdr->id, hdr->version, plog_hdr->armcycle);
+ for (i = 0; i < loop_cnt && b.size > 0; i++) {
+ bcm_bprintf(&b, " %x", *ptr);
+ ptr++;
+ }
+ DHD_ERROR_ROAM(("%s\n", b.origbuf));
+}
+
+/*
+ * dhd_dbg_attach: initialziation of dhd dbugability module
+ *
+ * Return: An error code or 0 on success.
+ */
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+struct dhd_dbg_ring_buf g_ring_buf;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+int
+dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
+ dbg_urgent_noti_t os_urgent_notifier, void *os_priv)
+{
+ dhd_dbg_t *dbg = NULL;
+ dhd_dbg_ring_t *ring = NULL;
+ int ret = BCME_ERROR, ring_id = 0;
+ void *buf = NULL;
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ struct dhd_dbg_ring_buf *ring_buf;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+
+ dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t));
+ if (!dbg)
+ return BCME_NOMEM;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE);
+#else
+ buf = MALLOCZ(dhdp->osh, FW_VERBOSE_RING_SIZE);
+#endif
+ if (!buf)
+ goto error;
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID,
+ (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf, FALSE);
+ if (ret)
+ goto error;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE);
+#else
+ buf = MALLOCZ(dhdp->osh, DHD_EVENT_RING_SIZE);
+#endif
+ if (!buf)
+ goto error;
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID,
+ (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf, FALSE);
+ if (ret)
+ goto error;
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ buf = MALLOCZ(dhdp->osh, DRIVER_LOG_RING_SIZE);
+ if (!buf)
+ goto error;
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DRIVER_LOG_RING_ID], DRIVER_LOG_RING_ID,
+ (uint8 *)DRIVER_LOG_RING_NAME, DRIVER_LOG_RING_SIZE, buf, FALSE);
+ if (ret)
+ goto error;
+
+ buf = MALLOCZ(dhdp->osh, ROAM_STATS_RING_SIZE);
+ if (!buf)
+ goto error;
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[ROAM_STATS_RING_ID], ROAM_STATS_RING_ID,
+ (uint8 *)ROAM_STATS_RING_NAME, ROAM_STATS_RING_SIZE, buf, FALSE);
+ if (ret)
+ goto error;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+#ifdef BTLOG
+ buf = MALLOCZ(dhdp->osh, BT_LOG_RING_SIZE);
+ if (!buf)
+ goto error;
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[BT_LOG_RING_ID], BT_LOG_RING_ID,
+ BT_LOG_RING_NAME, BT_LOG_RING_SIZE, buf, FALSE);
+ if (ret)
+ goto error;
+#endif /* BTLOG */
+
+ dbg->private = os_priv;
+ dbg->pullreq = os_pullreq;
+ dbg->urgent_notifier = os_urgent_notifier;
+ dhdp->dbg = dbg;
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ ring_buf = &g_ring_buf;
+ ring_buf->dhd_pub = dhdp;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+ return BCME_OK;
+
+error:
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+ if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
+ ring = &dbg->dbg_rings[ring_id];
+ dhd_dbg_ring_deinit(dhdp, ring);
+ if (ring->ring_buf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhdp->osh, ring->ring_buf, ring->ring_size);
+#endif
+ ring->ring_buf = NULL;
+ }
+ ring->ring_size = 0;
+ }
+ }
+ MFREE(dhdp->osh, dbg, sizeof(dhd_dbg_t));
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ ring_buf = &g_ring_buf;
+ ring_buf->dhd_pub = NULL;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+
+ return ret;
+}
+
+/*
+ * dhd_dbg_detach: clean up dhd dbugability module
+ */
+void
+dhd_dbg_detach(dhd_pub_t *dhdp)
+{
+ int ring_id;
+ dhd_dbg_t *dbg;
+ dhd_dbg_ring_t *ring = NULL;
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ struct dhd_dbg_ring_buf *ring_buf;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+
+ if (!dhdp->dbg)
+ return;
+
+ dbg = dhdp->dbg;
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+ if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
+ ring = &dbg->dbg_rings[ring_id];
+ dhd_dbg_ring_deinit(dhdp, ring);
+ if (ring->ring_buf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhdp->osh, ring->ring_buf, ring->ring_size);
+#endif
+ ring->ring_buf = NULL;
+ }
+ ring->ring_size = 0;
+ }
+ }
+ MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ ring_buf = &g_ring_buf;
+ ring_buf->dhd_pub = NULL;
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+}
+
+uint32
+dhd_dbg_get_fwverbose(dhd_pub_t *dhdp)
+{
+ if (dhdp && dhdp->dbg) {
+ return dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
+ }
+ return 0;
+}
+
+void
+dhd_dbg_set_fwverbose(dhd_pub_t *dhdp, uint32 new_val)
+{
+
+ if (dhdp && dhdp->dbg) {
+ dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level = new_val;
+ }
+}
diff --git a/bcmdhd.101.10.361.x/dhd_debug.h b/bcmdhd.101.10.361.x/dhd_debug.h
new file mode 100755
index 0000000..08ba4a5
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_debug.h
@@ -0,0 +1,891 @@
+/*
+ * DHD debugability header file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dhd_debug_h_
+#define _dhd_debug_h_
+#include <event_log.h>
+#include <bcmutils.h>
+#include <dhd_dbg_ring.h>
+
+enum {
+ /* Feature set */
+ DBG_MEMORY_DUMP_SUPPORTED = (1 << (0)), /* Memory dump of FW */
+ DBG_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), /* PKT Status */
+ DBG_CONNECT_EVENT_SUPPORTED = (1 << (2)), /* Connectivity Event */
+ DBG_POWER_EVENT_SUPOORTED = (1 << (3)), /* POWER of Driver */
+ DBG_WAKE_LOCK_SUPPORTED = (1 << (4)), /* WAKE LOCK of Driver */
+ DBG_VERBOSE_LOG_SUPPORTED = (1 << (5)), /* verbose log of FW */
+ DBG_HEALTH_CHECK_SUPPORTED = (1 << (6)), /* monitor the health of FW */
+ DBG_DRIVER_DUMP_SUPPORTED = (1 << (7)), /* dumps driver state */
+ DBG_PACKET_FATE_SUPPORTED = (1 << (8)), /* tracks connection packets' fate */
+ DBG_NAN_EVENT_SUPPORTED = (1 << (9)), /* NAN Events */
+};
+
+enum {
+ /* set for binary entries */
+ DBG_RING_ENTRY_FLAGS_HAS_BINARY = (1 << (0)),
+ /* set if 64 bits timestamp is present */
+ DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1))
+};
+
+/* firmware verbose ring, ring id 1 */
+#define FW_VERBOSE_RING_NAME "fw_verbose"
+#define FW_VERBOSE_RING_SIZE (256 * 1024)
+/* firmware event ring, ring id 2 */
+#define FW_EVENT_RING_NAME "fw_event"
+#define FW_EVENT_RING_SIZE (64 * 1024)
+/* DHD connection event ring, ring id 3 */
+#define DHD_EVENT_RING_NAME "dhd_event"
+#define DHD_EVENT_RING_SIZE (64 * 1024)
+/* NAN event ring, ring id 4 */
+#define NAN_EVENT_RING_NAME "nan_event"
+#define NAN_EVENT_RING_SIZE (64 * 1024)
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+/* DHD driver log ring */
+#define DRIVER_LOG_RING_NAME "driver_log"
+#define DRIVER_LOG_RING_SIZE (256 * 1024)
+/* ROAM stats log ring */
+#define ROAM_STATS_RING_NAME "roam_stats"
+#define ROAM_STATS_RING_SIZE (64 * 1024)
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+
+#ifdef BTLOG
+/* BT log ring, ring id 5 */
+#define BT_LOG_RING_NAME "bt_log"
+#define BT_LOG_RING_SIZE (64 * 1024)
+#endif /* BTLOG */
+
+#define TLV_LOG_SIZE(tlv) ((tlv) ? (sizeof(tlv_log) + (tlv)->len) : 0)
+
+#define TLV_LOG_NEXT(tlv) \
+ ((tlv) ? ((tlv_log *)((uint8 *)tlv + TLV_LOG_SIZE(tlv))) : 0)
+
+#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
+
+#define VALID_RING(id) \
+ ((id > DEBUG_RING_ID_INVALID) && (id < DEBUG_RING_ID_MAX))
+
+#ifdef DEBUGABILITY
+#define DBG_RING_ACTIVE(dhdp, ring_id) \
+ ((dhdp)->dbg->dbg_rings[(ring_id)].state == RING_ACTIVE)
+#else
+#define DBG_RING_ACTIVE(dhdp, ring_id) 0
+#endif /* DEBUGABILITY */
+
+enum {
+ /* driver receive association command from kernel */
+ WIFI_EVENT_ASSOCIATION_REQUESTED = 0,
+ WIFI_EVENT_AUTH_COMPLETE,
+ WIFI_EVENT_ASSOC_COMPLETE,
+ /* received firmware event indicating auth frames are sent */
+ WIFI_EVENT_FW_AUTH_STARTED,
+ /* received firmware event indicating assoc frames are sent */
+ WIFI_EVENT_FW_ASSOC_STARTED,
+ /* received firmware event indicating reassoc frames are sent */
+ WIFI_EVENT_FW_RE_ASSOC_STARTED,
+ WIFI_EVENT_DRIVER_SCAN_REQUESTED,
+ WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND,
+ WIFI_EVENT_DRIVER_SCAN_COMPLETE,
+ WIFI_EVENT_G_SCAN_STARTED,
+ WIFI_EVENT_G_SCAN_COMPLETE,
+ WIFI_EVENT_DISASSOCIATION_REQUESTED,
+ WIFI_EVENT_RE_ASSOCIATION_REQUESTED,
+ WIFI_EVENT_ROAM_REQUESTED,
+ /* received beacon from AP (event enabled only in verbose mode) */
+ WIFI_EVENT_BEACON_RECEIVED,
+ /* firmware has triggered a roam scan (not g-scan) */
+ WIFI_EVENT_ROAM_SCAN_STARTED,
+ /* firmware has completed a roam scan (not g-scan) */
+ WIFI_EVENT_ROAM_SCAN_COMPLETE,
+ /* firmware has started searching for roam candidates (with reason =xx) */
+ WIFI_EVENT_ROAM_SEARCH_STARTED,
+ /* firmware has stopped searching for roam candidates (with reason =xx) */
+ WIFI_EVENT_ROAM_SEARCH_STOPPED,
+ WIFI_EVENT_UNUSED_0,
+ /* received channel switch anouncement from AP */
+ WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT,
+ /* fw start transmit eapol frame, with EAPOL index 1-4 */
+ WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START,
+ /* fw gives up eapol frame, with rate, success/failure and number retries */
+ WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP,
+ /* kernel queue EAPOL for transmission in driver with EAPOL index 1-4 */
+ WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED,
+ /* with rate, regardless of the fact that EAPOL frame is accepted or
+ * rejected by firmware
+ */
+ WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED,
+ WIFI_EVENT_UNUSED_1,
+ /* with rate, and eapol index, driver has received */
+ /* EAPOL frame and will queue it up to wpa_supplicant */
+ WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED,
+ /* with success/failure, parameters */
+ WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE,
+ WIFI_EVENT_BT_COEX_BT_SCO_START,
+ WIFI_EVENT_BT_COEX_BT_SCO_STOP,
+ /* for paging/scan etc..., when BT starts transmiting twice per BT slot */
+ WIFI_EVENT_BT_COEX_BT_SCAN_START,
+ WIFI_EVENT_BT_COEX_BT_SCAN_STOP,
+ WIFI_EVENT_BT_COEX_BT_HID_START,
+ WIFI_EVENT_BT_COEX_BT_HID_STOP,
+ /* firmware sends auth frame in roaming to next candidate */
+ WIFI_EVENT_ROAM_AUTH_STARTED,
+ /* firmware receive auth confirm from ap */
+ WIFI_EVENT_ROAM_AUTH_COMPLETE,
+ /* firmware sends assoc/reassoc frame in */
+ WIFI_EVENT_ROAM_ASSOC_STARTED,
+ /* firmware receive assoc/reassoc confirm from ap */
+ WIFI_EVENT_ROAM_ASSOC_COMPLETE,
+ /* firmware sends stop G_SCAN */
+ WIFI_EVENT_G_SCAN_STOP,
+ /* firmware indicates G_SCAN scan cycle started */
+ WIFI_EVENT_G_SCAN_CYCLE_STARTED,
+ /* firmware indicates G_SCAN scan cycle completed */
+ WIFI_EVENT_G_SCAN_CYCLE_COMPLETED,
+ /* firmware indicates G_SCAN scan start for a particular bucket */
+ WIFI_EVENT_G_SCAN_BUCKET_STARTED,
+ /* firmware indicates G_SCAN scan completed for particular bucket */
+ WIFI_EVENT_G_SCAN_BUCKET_COMPLETED,
+ /* Event received from firmware about G_SCAN scan results being available */
+ WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE,
+ /* Event received from firmware with G_SCAN capabilities */
+ WIFI_EVENT_G_SCAN_CAPABILITIES,
+ /* Event received from firmware when eligible candidate is found */
+ WIFI_EVENT_ROAM_CANDIDATE_FOUND,
+ /* Event received from firmware when roam scan configuration gets enabled or disabled */
+ WIFI_EVENT_ROAM_SCAN_CONFIG,
+ /* firmware/driver timed out authentication */
+ WIFI_EVENT_AUTH_TIMEOUT,
+ /* firmware/driver timed out association */
+ WIFI_EVENT_ASSOC_TIMEOUT,
+ /* firmware/driver encountered allocation failure */
+ WIFI_EVENT_MEM_ALLOC_FAILURE,
+ /* driver added a PNO network in firmware */
+ WIFI_EVENT_DRIVER_PNO_ADD,
+ /* driver removed a PNO network in firmware */
+ WIFI_EVENT_DRIVER_PNO_REMOVE,
+ /* driver received PNO networks found indication from firmware */
+ WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND,
+ /* driver triggered a scan for PNO networks */
+ WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED,
+ /* driver received scan results of PNO networks */
+ WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND,
+ /* driver updated scan results from PNO candidates to cfg */
+ WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE
+};
+
+enum {
+ WIFI_TAG_VENDOR_SPECIFIC = 0, /* take a byte stream as parameter */
+ WIFI_TAG_BSSID, /* takes a 6 bytes MAC address as parameter */
+ WIFI_TAG_ADDR, /* takes a 6 bytes MAC address as parameter */
+ WIFI_TAG_SSID, /* takes a 32 bytes SSID address as parameter */
+ WIFI_TAG_STATUS, /* takes an integer as parameter */
+ WIFI_TAG_CHANNEL_SPEC, /* takes one or more wifi_channel_spec as parameter */
+ WIFI_TAG_WAKE_LOCK_EVENT, /* takes a wake_lock_event struct as parameter */
+ WIFI_TAG_ADDR1, /* takes a 6 bytes MAC address as parameter */
+ WIFI_TAG_ADDR2, /* takes a 6 bytes MAC address as parameter */
+ WIFI_TAG_ADDR3, /* takes a 6 bytes MAC address as parameter */
+ WIFI_TAG_ADDR4, /* takes a 6 bytes MAC address as parameter */
+ WIFI_TAG_TSF, /* take a 64 bits TSF value as parameter */
+ WIFI_TAG_IE,
+ /* take one or more specific 802.11 IEs parameter, IEs are in turn
+ * indicated in TLV format as per 802.11 spec
+ */
+ WIFI_TAG_INTERFACE, /* take interface name as parameter */
+ WIFI_TAG_REASON_CODE, /* take a reason code as per 802.11 as parameter */
+ WIFI_TAG_RATE_MBPS, /* take a wifi rate in 0.5 mbps */
+ WIFI_TAG_REQUEST_ID, /* take an integer as parameter */
+ WIFI_TAG_BUCKET_ID, /* take an integer as parameter */
+ WIFI_TAG_GSCAN_PARAMS, /* takes a wifi_scan_cmd_params struct as parameter */
+ WIFI_TAG_GSCAN_CAPABILITIES, /* takes a wifi_gscan_capabilities struct as parameter */
+ WIFI_TAG_SCAN_ID, /* take an integer as parameter */
+ WIFI_TAG_RSSI, /* takes s16 as parameter */
+ WIFI_TAG_CHANNEL, /* takes u16 as parameter */
+ WIFI_TAG_LINK_ID, /* take an integer as parameter */
+ WIFI_TAG_LINK_ROLE, /* take an integer as parameter */
+ WIFI_TAG_LINK_STATE, /* take an integer as parameter */
+ WIFI_TAG_LINK_TYPE, /* take an integer as parameter */
+ WIFI_TAG_TSCO, /* take an integer as parameter */
+ WIFI_TAG_RSCO, /* take an integer as parameter */
+ WIFI_TAG_EAPOL_MESSAGE_TYPE /* take an integer as parameter */
+};
+
+/* NAN events */
+typedef enum {
+ NAN_EVENT_INVALID = 0,
+ NAN_EVENT_CLUSTER_STARTED = 1,
+ NAN_EVENT_CLUSTER_JOINED = 2,
+ NAN_EVENT_CLUSTER_MERGED = 3,
+ NAN_EVENT_ROLE_CHANGED = 4,
+ NAN_EVENT_SCAN_COMPLETE = 5,
+ NAN_EVENT_STATUS_CHNG = 6,
+ /* ADD new events before this line */
+ NAN_EVENT_MAX
+} nan_event_id_t;
+
+typedef struct {
+ uint16 tag;
+ uint16 len; /* length of value */
+ uint8 value[0];
+} tlv_log;
+
+typedef struct per_packet_status_entry {
+ uint8 flags;
+ uint8 tid; /* transmit or received tid */
+ uint16 MCS; /* modulation and bandwidth */
+ /*
+ * TX: RSSI of ACK for that packet
+ * RX: RSSI of packet
+ */
+ uint8 rssi;
+ uint8 num_retries; /* number of attempted retries */
+ uint16 last_transmit_rate; /* last transmit rate in .5 mbps */
+ /* transmit/reeive sequence for that MPDU packet */
+ uint16 link_layer_transmit_sequence;
+ /*
+ * TX: firmware timestamp (us) when packet is queued within firmware buffer
+ * for SDIO/HSIC or into PCIe buffer
+ * RX : firmware receive timestamp
+ */
+ uint64 firmware_entry_timestamp;
+ /*
+ * firmware timestamp (us) when packet start contending for the
+ * medium for the first time, at head of its AC queue,
+ * or as part of an MPDU or A-MPDU. This timestamp is not updated
+ * for each retry, only the first transmit attempt.
+ */
+ uint64 start_contention_timestamp;
+ /*
+ * fimrware timestamp (us) when packet is successfully transmitted
+ * or aborted because it has exhausted its maximum number of retries
+ */
+ uint64 transmit_success_timestamp;
+ /*
+ * packet data. The length of packet data is determined by the entry_size field of
+ * the wifi_ring_buffer_entry structure. It is expected that first bytes of the
+ * packet, or packet headers only (up to TCP or RTP/UDP headers) will be copied into the ring
+ */
+ uint8 *data;
+} per_packet_status_entry_t;
+
+#if defined(LINUX)
+#define PACKED_STRUCT __attribute__ ((packed))
+#else
+#define PACKED_STRUCT
+#endif
+
+#if defined(LINUX)
+typedef struct log_conn_event {
+ uint16 event;
+ tlv_log tlvs[0];
+ /*
+ * separate parameter structure per event to be provided and optional data
+ * the event_data is expected to include an official android part, with some
+ * parameter as transmit rate, num retries, num scan result found etc...
+ * as well, event_data can include a vendor proprietary part which is
+ * understood by the developer only.
+ */
+} PACKED_STRUCT log_conn_event_t;
+#endif /* defined(LINUX) */
+
+/*
+ * Ring buffer name for power events ring. note that power event are extremely frequents
+ * and thus should be stored in their own ring/file so as not to clobber connectivity events
+ */
+
+typedef struct wake_lock_event {
+ uint32 status; /* 0 taken, 1 released */
+ uint32 reason; /* reason why this wake lock is taken */
+ char *name; /* null terminated */
+} wake_lock_event_t;
+
+typedef struct wifi_power_event {
+ uint16 event;
+ tlv_log *tlvs;
+} wifi_power_event_t;
+
+#define NAN_EVENT_VERSION 1
+typedef struct log_nan_event {
+ uint8 version;
+ uint8 pad;
+ uint16 event;
+ tlv_log *tlvs;
+} log_nan_event_t;
+
+/* entry type */
+enum {
+ DBG_RING_ENTRY_EVENT_TYPE = 1,
+ DBG_RING_ENTRY_PKT_TYPE,
+ DBG_RING_ENTRY_WAKE_LOCK_EVENT_TYPE,
+ DBG_RING_ENTRY_POWER_EVENT_TYPE,
+ DBG_RING_ENTRY_DATA_TYPE,
+ DBG_RING_ENTRY_NAN_EVENT_TYPE
+};
+
+struct log_level_table {
+ int log_level;
+ uint16 tag;
+ char *desc;
+};
+
+#ifdef OEM_ANDROID
+/*
+ * Assuming that the Ring lock is mutex, bailing out if the
+ * callers are from atomic context. On a long term, one has to
+ * schedule a job to execute in sleepable context so that
+ * contents are pushed to the ring.
+ */
+#define DBG_EVENT_LOG(dhdp, connect_state) \
+{ \
+ do { \
+ uint16 state = connect_state; \
+ if (CAN_SLEEP() && DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID, \
+ &state, sizeof(state)); \
+ } while (0); \
+}
+#else
+#define DBG_EVENT_LOG(dhd, connect_state)
+#endif /* !OEM_ANDROID */
+
+/*
+ * Packet logging - HAL specific data
+ * XXX: These should be moved to wl_cfgvendor.h
+ */
+
+#define MD5_PREFIX_LEN 4
+#define MAX_FATE_LOG_LEN 32
+#define MAX_FRAME_LEN_ETHERNET 1518
+#define MAX_FRAME_LEN_80211_MGMT 2352 /* 802.11-2012 Fig. 8-34 */
+
+typedef enum {
+ /* Sent over air and ACKed. */
+ TX_PKT_FATE_ACKED,
+
+ /* Sent over air but not ACKed. (Normal for broadcast/multicast.) */
+ TX_PKT_FATE_SENT,
+
+ /* Queued within firmware, but not yet sent over air. */
+ TX_PKT_FATE_FW_QUEUED,
+
+ /*
+ * Dropped by firmware as invalid. E.g. bad source address,
+ * bad checksum, or invalid for current state.
+ */
+ TX_PKT_FATE_FW_DROP_INVALID,
+
+ /* Dropped by firmware due to lifetime expiration. */
+ TX_PKT_FATE_FW_DROP_EXPTIME,
+
+ /*
+ * Dropped by firmware for any other reason. Includes
+ * frames that were sent by driver to firmware, but
+ * unaccounted for by firmware.
+ */
+ TX_PKT_FATE_FW_DROP_OTHER,
+
+ /* Queued within driver, not yet sent to firmware. */
+ TX_PKT_FATE_DRV_QUEUED,
+
+ /*
+ * Dropped by driver as invalid. E.g. bad source address,
+ * or invalid for current state.
+ */
+ TX_PKT_FATE_DRV_DROP_INVALID,
+
+ /* Dropped by driver due to lack of buffer space. */
+ TX_PKT_FATE_DRV_DROP_NOBUFS,
+
+ /* Dropped by driver for any other reason. */
+ TX_PKT_FATE_DRV_DROP_OTHER,
+
+ /* Packet free by firmware. */
+ TX_PKT_FATE_FW_PKT_FREE,
+
+ } wifi_tx_packet_fate;
+
+typedef enum {
+ /* Valid and delivered to network stack (e.g., netif_rx()). */
+ RX_PKT_FATE_SUCCESS,
+
+ /* Queued within firmware, but not yet sent to driver. */
+ RX_PKT_FATE_FW_QUEUED,
+
+ /* Dropped by firmware due to host-programmable filters. */
+ RX_PKT_FATE_FW_DROP_FILTER,
+
+ /*
+ * Dropped by firmware as invalid. E.g. bad checksum,
+ * decrypt failed, or invalid for current state.
+ */
+ RX_PKT_FATE_FW_DROP_INVALID,
+
+ /* Dropped by firmware due to lack of buffer space. */
+ RX_PKT_FATE_FW_DROP_NOBUFS,
+
+ /* Dropped by firmware for any other reason. */
+ RX_PKT_FATE_FW_DROP_OTHER,
+
+ /* Queued within driver, not yet delivered to network stack. */
+ RX_PKT_FATE_DRV_QUEUED,
+
+ /* Dropped by driver due to filter rules. */
+ RX_PKT_FATE_DRV_DROP_FILTER,
+
+ /* Dropped by driver as invalid. E.g. not permitted in current state. */
+ RX_PKT_FATE_DRV_DROP_INVALID,
+
+ /* Dropped by driver due to lack of buffer space. */
+ RX_PKT_FATE_DRV_DROP_NOBUFS,
+
+ /* Dropped by driver for any other reason. */
+ RX_PKT_FATE_DRV_DROP_OTHER,
+
+ /* Indicate RX Host Wake up packet. */
+ RX_PKT_FATE_WAKE_PKT,
+
+ } wifi_rx_packet_fate;
+
+typedef enum {
+ FRAME_TYPE_UNKNOWN,
+ FRAME_TYPE_ETHERNET_II,
+ FRAME_TYPE_80211_MGMT,
+ } frame_type;
+
+typedef struct wifi_frame_info {
+ /*
+ * The type of MAC-layer frame that this frame_info holds.
+ * - For data frames, use FRAME_TYPE_ETHERNET_II.
+ * - For management frames, use FRAME_TYPE_80211_MGMT.
+ * - If the type of the frame is unknown, use FRAME_TYPE_UNKNOWN.
+ */
+ frame_type payload_type;
+
+ /*
+ * The number of bytes included in |frame_content|. If the frame
+ * contents are missing (e.g. RX frame dropped in firmware),
+ * |frame_len| should be set to 0.
+ */
+ size_t frame_len;
+
+ /*
+ * Host clock when this frame was received by the driver (either
+ * outbound from the host network stack, or inbound from the
+ * firmware).
+ * - The timestamp should be taken from a clock which includes time
+ * the host spent suspended (e.g. ktime_get_boottime()).
+ * - If no host timestamp is available (e.g. RX frame was dropped in
+ * firmware), this field should be set to 0.
+ */
+ uint32 driver_timestamp_usec;
+
+ /*
+ * Firmware clock when this frame was received by the firmware
+ * (either outbound from the host, or inbound from a remote
+ * station).
+ * - The timestamp should be taken from a clock which includes time
+ * firmware spent suspended (if applicable).
+ * - If no firmware timestamp is available (e.g. TX frame was
+ * dropped by driver), this field should be set to 0.
+ * - Consumers of |frame_info| should _not_ assume any
+ * synchronization between driver and firmware clocks.
+ */
+ uint32 firmware_timestamp_usec;
+
+ /*
+ * Actual frame content.
+ * - Should be provided for TX frames originated by the host.
+ * - Should be provided for RX frames received by the driver.
+ * - Optionally provided for TX frames originated by firmware. (At
+ * discretion of HAL implementation.)
+ * - Optionally provided for RX frames dropped in firmware. (At
+ * discretion of HAL implementation.)
+ * - If frame content is not provided, |frame_len| should be set
+ * to 0.
+ */
+ union {
+ char ethernet_ii[MAX_FRAME_LEN_ETHERNET];
+ char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT];
+ } frame_content;
+} wifi_frame_info_t;
+
+typedef struct wifi_tx_report {
+ /*
+ * Prefix of MD5 hash of |frame_inf.frame_content|. If frame
+ * content is not provided, prefix of MD5 hash over the same data
+ * that would be in frame_content, if frame content were provided.
+ */
+ char md5_prefix[MD5_PREFIX_LEN];
+ wifi_tx_packet_fate fate;
+ wifi_frame_info_t frame_inf;
+} wifi_tx_report_t;
+
+typedef struct wifi_rx_report {
+ /*
+ * Prefix of MD5 hash of |frame_inf.frame_content|. If frame
+ * content is not provided, prefix of MD5 hash over the same data
+ * that would be in frame_content, if frame content were provided.
+ */
+ char md5_prefix[MD5_PREFIX_LEN];
+ wifi_rx_packet_fate fate;
+ wifi_frame_info_t frame_inf;
+} wifi_rx_report_t;
+
+typedef struct compat_wifi_frame_info {
+ frame_type payload_type;
+
+ uint32 frame_len;
+
+ uint32 driver_timestamp_usec;
+
+ uint32 firmware_timestamp_usec;
+
+ union {
+ char ethernet_ii[MAX_FRAME_LEN_ETHERNET];
+ char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT];
+ } frame_content;
+} compat_wifi_frame_info_t;
+
+typedef struct compat_wifi_tx_report {
+ char md5_prefix[MD5_PREFIX_LEN];
+ wifi_tx_packet_fate fate;
+ compat_wifi_frame_info_t frame_inf;
+} compat_wifi_tx_report_t;
+
+typedef struct compat_wifi_rx_report {
+ char md5_prefix[MD5_PREFIX_LEN];
+ wifi_rx_packet_fate fate;
+ compat_wifi_frame_info_t frame_inf;
+} compat_wifi_rx_report_t;
+
+/*
+ * Packet logging - internal data
+ */
+
+typedef enum dhd_dbg_pkt_mon_state {
+ PKT_MON_INVALID = 0,
+ PKT_MON_ATTACHED,
+ PKT_MON_STARTING,
+ PKT_MON_STARTED,
+ PKT_MON_STOPPING,
+ PKT_MON_STOPPED,
+ PKT_MON_DETACHED,
+ } dhd_dbg_pkt_mon_state_t;
+
+typedef struct dhd_dbg_pkt_info {
+ frame_type payload_type;
+ size_t pkt_len;
+ uint32 driver_ts;
+ uint32 firmware_ts;
+ uint32 pkt_hash;
+ void *pkt;
+} dhd_dbg_pkt_info_t;
+
+typedef struct compat_dhd_dbg_pkt_info {
+ frame_type payload_type;
+ uint32 pkt_len;
+ uint32 driver_ts;
+ uint32 firmware_ts;
+ uint32 pkt_hash;
+ void *pkt;
+} compat_dhd_dbg_pkt_info_t;
+
+typedef struct dhd_dbg_tx_info
+{
+ wifi_tx_packet_fate fate;
+ dhd_dbg_pkt_info_t info;
+} dhd_dbg_tx_info_t;
+
+typedef struct dhd_dbg_rx_info
+{
+ wifi_rx_packet_fate fate;
+ dhd_dbg_pkt_info_t info;
+} dhd_dbg_rx_info_t;
+
+typedef struct dhd_dbg_tx_report
+{
+ dhd_dbg_tx_info_t *tx_pkts;
+ uint16 pkt_pos;
+ uint16 status_pos;
+} dhd_dbg_tx_report_t;
+
+typedef struct dhd_dbg_rx_report
+{
+ dhd_dbg_rx_info_t *rx_pkts;
+ uint16 pkt_pos;
+} dhd_dbg_rx_report_t;
+
+typedef void (*dbg_pullreq_t)(void *os_priv, const int ring_id);
+typedef void (*dbg_urgent_noti_t) (dhd_pub_t *dhdp, const void *data, const uint32 len);
+typedef int (*dbg_mon_tx_pkts_t) (dhd_pub_t *dhdp, void *pkt, uint32 pktid);
+typedef int (*dbg_mon_tx_status_t) (dhd_pub_t *dhdp, void *pkt,
+ uint32 pktid, uint16 status);
+typedef int (*dbg_mon_rx_pkts_t) (dhd_pub_t *dhdp, void *pkt);
+
+typedef struct dhd_dbg_pkt_mon
+{
+ dhd_dbg_tx_report_t *tx_report;
+ dhd_dbg_rx_report_t *rx_report;
+ dhd_dbg_pkt_mon_state_t tx_pkt_state;
+ dhd_dbg_pkt_mon_state_t tx_status_state;
+ dhd_dbg_pkt_mon_state_t rx_pkt_state;
+
+ /* call backs */
+ dbg_mon_tx_pkts_t tx_pkt_mon;
+ dbg_mon_tx_status_t tx_status_mon;
+ dbg_mon_rx_pkts_t rx_pkt_mon;
+} dhd_dbg_pkt_mon_t;
+
+typedef struct dhd_dbg {
+ dhd_dbg_ring_t dbg_rings[DEBUG_RING_ID_MAX];
+ void *private; /* os private_data */
+ dhd_dbg_pkt_mon_t pkt_mon;
+ void *pkt_mon_lock; /* spin lock for packet monitoring */
+ dbg_pullreq_t pullreq;
+ dbg_urgent_noti_t urgent_notifier;
+} dhd_dbg_t;
+
+#define PKT_MON_ATTACHED(state) \
+ (((state) > PKT_MON_INVALID) && ((state) < PKT_MON_DETACHED))
+#define PKT_MON_DETACHED(state) \
+ (((state) == PKT_MON_INVALID) || ((state) == PKT_MON_DETACHED))
+#define PKT_MON_STARTED(state) ((state) == PKT_MON_STARTED)
+#define PKT_MON_STOPPED(state) ((state) == PKT_MON_STOPPED)
+#define PKT_MON_NOT_OPERATIONAL(state) \
+ (((state) != PKT_MON_STARTED) && ((state) != PKT_MON_STOPPED))
+#define PKT_MON_SAFE_TO_FREE(state) \
+ (((state) == PKT_MON_STARTING) || ((state) == PKT_MON_STOPPED))
+#define PKT_MON_PKT_FULL(pkt_count) ((pkt_count) >= MAX_FATE_LOG_LEN)
+#define PKT_MON_STATUS_FULL(pkt_count, status_count) \
+ (((status_count) >= (pkt_count)) || ((status_count) >= MAX_FATE_LOG_LEN))
+
+#ifdef DBG_PKT_MON
+#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) \
+ do { \
+ if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_pkt_mon && (pkt)) { \
+ (dhdp)->dbg->pkt_mon.tx_pkt_mon((dhdp), (pkt), (pktid)); \
+ } \
+ } while (0);
+#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) \
+ do { \
+ if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_status_mon && (pkt)) { \
+ (dhdp)->dbg->pkt_mon.tx_status_mon((dhdp), (pkt), (pktid), (status)); \
+ } \
+ } while (0);
+#define DHD_DBG_PKT_MON_RX(dhdp, pkt) \
+ do { \
+ if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.rx_pkt_mon && (pkt)) { \
+ if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \
+ (dhdp)->dbg->pkt_mon.rx_pkt_mon((dhdp), (pkt)); \
+ } \
+ } \
+ } while (0);
+
+#define DHD_DBG_PKT_MON_START(dhdp) \
+ dhd_os_dbg_start_pkt_monitor((dhdp));
+#define DHD_DBG_PKT_MON_STOP(dhdp) \
+ dhd_os_dbg_stop_pkt_monitor((dhdp));
+#else
+#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid)
+#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status)
+#define DHD_DBG_PKT_MON_RX(dhdp, pkt)
+#define DHD_DBG_PKT_MON_START(dhdp)
+#define DHD_DBG_PKT_MON_STOP(dhdp)
+#endif /* DBG_PKT_MON */
+
+#ifdef DUMP_IOCTL_IOV_LIST
+typedef struct dhd_iov_li {
+ dll_t list;
+ uint32 cmd; /* command number */
+ char buff[100]; /* command name */
+} dhd_iov_li_t;
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+#define IOV_LIST_MAX_LEN 5
+
+#ifdef DHD_DEBUG
+typedef struct {
+ dll_t list;
+ uint32 id; /* wasted chunk id */
+ uint32 handle; /* wasted chunk handle */
+ uint32 size; /* wasted chunk size */
+} dhd_dbg_mwli_t;
+#endif /* DHD_DEBUG */
+
+#define DHD_OW_BI_RAW_EVENT_LOG_FMT 0xFFFF
+
+/* LSB 2 bits of format number to identify the type of event log */
+#define DHD_EVENT_LOG_HDR_MASK 0x3
+
+#define DHD_EVENT_LOG_FMT_NUM_OFFSET 2
+#define DHD_EVENT_LOG_FMT_NUM_MASK 0x3FFF
+/**
+ * OW:- one word
+ * TW:- two word
+ * NB:- non binary
+ * BI:- binary
+ */
+#define DHD_OW_NB_EVENT_LOG_HDR 0
+#define DHD_TW_NB_EVENT_LOG_HDR 1
+#define DHD_BI_EVENT_LOG_HDR 3
+#define DHD_INVALID_EVENT_LOG_HDR 2
+
+#define DHD_TW_VALID_TAG_BITS_MASK 0xF
+#define DHD_OW_BI_EVENT_FMT_NUM 0x3FFF
+#define DHD_TW_BI_EVENT_FMT_NUM 0x3FFE
+
+#define DHD_TW_EVENT_LOG_TAG_OFFSET 8
+
+#define EVENT_TAG_TIMESTAMP_OFFSET 1
+#define EVENT_TAG_TIMESTAMP_EXT_OFFSET 2
+
+typedef struct prcd_event_log_hdr {
+ uint32 tag; /* Event_log entry tag */
+ uint32 count; /* Count of 4-byte entries */
+ uint32 fmt_num_raw; /* Format number */
+ uint32 fmt_num; /* Format number >> 2 */
+ uint32 armcycle; /* global ARM CYCLE for TAG */
+ uint32 *log_ptr; /* start of payload */
+ uint32 payload_len;
+ /* Extended event log header info
+ * 0 - legacy, 1 - extended event log header present
+ */
+ bool ext_event_log_hdr;
+ bool binary_payload; /* 0 - non binary payload, 1 - binary payload */
+} prcd_event_log_hdr_t; /* Processed event log header */
+
+/* dhd_dbg functions */
+extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
+ void *raw_event_ptr, uint datalen);
+void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data,
+ void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present,
+ uint32 msgtrace_seqnum);
+
+#ifdef BTLOG
+extern void dhd_dbg_bt_log_handler(dhd_pub_t *dhdp, void *data, uint datalen);
+#endif /* BTLOG */
+extern int dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
+ dbg_urgent_noti_t os_urgent_notifier, void *os_priv);
+extern void dhd_dbg_detach(dhd_pub_t *dhdp);
+extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start);
+extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id,
+ int log_level, int flags, uint32 threshold);
+extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name);
+extern dhd_dbg_ring_t *dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id);
+extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp);
+extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len);
+extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
+ void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block);
+int dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len);
+int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
+ bool strip_header);
+int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr,
+ void *data);
+int __dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *ring_status);
+int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id,
+ dhd_dbg_ring_status_t *dbg_ring_status);
+#ifdef SHOW_LOGTRACE
+void dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef DBG_PKT_MON
+extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
+ dbg_mon_tx_pkts_t tx_pkt_mon,
+ dbg_mon_tx_status_t tx_status_mon,
+ dbg_mon_rx_pkts_t rx_pkt_mon);
+extern int dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid);
+extern int dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt,
+ uint32 pktid, uint16 status);
+extern int dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt);
+extern int dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count);
+extern int dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count);
+extern int dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp);
+#endif /* DBG_PKT_MON */
+
+extern bool dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt,
+ uint32 pktid, uint16 status);
+
+/* os wrapper function */
+extern int dhd_os_dbg_attach(dhd_pub_t *dhdp);
+extern void dhd_os_dbg_detach(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_register_callback(int ring_id,
+ void (*dbg_ring_sub_cb)(void *ctx, const int ring_id, const void *data,
+ const uint32 len, const dhd_dbg_ring_status_t dbg_ring_status));
+extern int dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp,
+ void (*urgent_noti)(void *ctx, const void *data, const uint32 len, const uint32 fw_len));
+
+extern int dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level,
+ int flags, int time_intval, int threshold);
+extern int dhd_os_reset_logging(dhd_pub_t *dhdp);
+extern int dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress);
+
+extern int dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id,
+ dhd_dbg_ring_status_t *dbg_ring_status);
+extern int dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name);
+extern int dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len);
+extern int dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features);
+
+#ifdef DBG_PKT_MON
+extern int dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt,
+ uint32 pktid);
+extern int dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt,
+ uint32 pktid, uint16 status);
+extern int dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt);
+extern int dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp,
+ void __user *user_buf, uint16 req_count, uint16 *resp_count);
+extern int dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp,
+ void __user *user_buf, uint16 req_count, uint16 *resp_count);
+extern int dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp);
+#endif /* DBG_PKT_MON */
+
+#ifdef DUMP_IOCTL_IOV_LIST
+extern void dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node);
+extern void dhd_iov_li_print(dll_t *list_head);
+extern void dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head);
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+#ifdef DHD_DEBUG
+extern void dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head);
+#endif /* DHD_DEBUG */
+
+void print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr);
+
+typedef void (*print_roam_enhance_log_func)(prcd_event_log_hdr_t *plog_hdr);
+typedef struct _pr_roam_tbl {
+ uint8 version;
+ uint8 id;
+ print_roam_enhance_log_func pr_func;
+} pr_roam_tbl_t;
+
+extern uint32 dhd_dbg_get_fwverbose(dhd_pub_t *dhdp);
+extern void dhd_dbg_set_fwverbose(dhd_pub_t *dhdp, uint32 new_val);
+#endif /* _dhd_debug_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_debug_linux.c b/bcmdhd.101.10.361.x/dhd_debug_linux.c
new file mode 100755
index 0000000..81aa730
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_debug_linux.c
@@ -0,0 +1,528 @@
+/*
+ * DHD debugability Linux os layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+
+#include <net/cfg80211.h>
+#include <wl_cfgvendor.h>
+#include <dhd_config.h>
+
+typedef void (*dbg_ring_send_sub_t)(void *ctx, const int ring_id, const void *data,
+ const uint32 len, const dhd_dbg_ring_status_t ring_status);
+typedef void (*dbg_urgent_noti_sub_t)(void *ctx, const void *data,
+ const uint32 len, const uint32 fw_len);
+
+static dbg_ring_send_sub_t ring_send_sub_cb[DEBUG_RING_ID_MAX];
+static dbg_urgent_noti_sub_t urgent_noti_sub_cb;
+typedef struct dhd_dbg_os_ring_info {
+ dhd_pub_t *dhdp;
+ int ring_id;
+ int log_level;
+ unsigned long interval;
+ struct delayed_work work;
+ uint64 tsoffset;
+} linux_dbgring_info_t;
+
+struct log_level_table dhd_event_map[] = {
+ {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, "DRIVER EAPOL TX REQ"},
+ {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, "DRIVER EAPOL RX"},
+ {2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, "SCAN_REQUESTED"},
+ {2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, "SCAN COMPELETE"},
+ {3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, "SCAN RESULT FOUND"},
+ {2, WIFI_EVENT_DRIVER_PNO_ADD, "PNO ADD"},
+ {2, WIFI_EVENT_DRIVER_PNO_REMOVE, "PNO REMOVE"},
+ {2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, "PNO NETWORK FOUND"},
+ {2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, "PNO SCAN_REQUESTED"},
+ {1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, "PNO SCAN RESULT FOUND"},
+ {1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, "PNO SCAN COMPELETE"}
+};
+
+static void
+debug_data_send(dhd_pub_t *dhdp, int ring_id, const void *data, const uint32 len,
+ const dhd_dbg_ring_status_t ring_status)
+{
+ struct net_device *ndev;
+ dbg_ring_send_sub_t ring_sub_send;
+ ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (!ndev)
+ return;
+ if (!VALID_RING(ring_id))
+ return;
+ if (ring_send_sub_cb[ring_id]) {
+ ring_sub_send = ring_send_sub_cb[ring_id];
+ ring_sub_send(ndev, ring_id, data, len, ring_status);
+ }
+}
+
+static void
+dhd_os_dbg_urgent_notifier(dhd_pub_t *dhdp, const void *data, const uint32 len)
+{
+ struct net_device *ndev;
+ ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (!ndev)
+ return;
+ if (urgent_noti_sub_cb) {
+ urgent_noti_sub_cb(ndev, data, len, dhdp->soc_ram_length);
+ }
+}
+
+static void
+dbg_ring_poll_worker(struct work_struct *work)
+{
+ struct delayed_work *d_work = to_delayed_work(work);
+ bool sched = TRUE;
+ dhd_dbg_ring_t *ring;
+ linux_dbgring_info_t *ring_info;
+ dhd_pub_t *dhdp;
+ int ringid;
+ dhd_dbg_ring_status_t ring_status;
+ void *buf;
+ dhd_dbg_ring_entry_t *hdr;
+ uint32 buflen, rlen;
+ unsigned long flags;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ ring_info = container_of(d_work, linux_dbgring_info_t, work);
+ GCC_DIAGNOSTIC_POP();
+
+ dhdp = ring_info->dhdp;
+ ringid = ring_info->ring_id;
+
+ ring = &dhdp->dbg->dbg_rings[ringid];
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ dhd_dbg_get_ring_status(dhdp, ringid, &ring_status);
+
+ if (ring->wp > ring->rp) {
+ buflen = ring->wp - ring->rp;
+ } else if (ring->wp < ring->rp) {
+ buflen = ring->ring_size - ring->rp + ring->wp;
+ } else {
+ goto exit;
+ }
+
+ if (buflen > ring->ring_size) {
+ goto exit;
+ }
+
+ buf = MALLOCZ(dhdp->osh, buflen);
+ if (!buf) {
+ DHD_ERROR(("%s failed to allocate read buf\n", __FUNCTION__));
+ sched = FALSE;
+ goto exit;
+ }
+
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ rlen = dhd_dbg_pull_from_ring(dhdp, ringid, buf, buflen);
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+
+ if (!ring->sched_pull) {
+ ring->sched_pull = TRUE;
+ }
+
+ hdr = (dhd_dbg_ring_entry_t *)buf;
+ while (rlen > 0) {
+ ring_status.read_bytes += ENTRY_LENGTH(hdr);
+ /* offset fw ts to host ts */
+ hdr->timestamp += ring_info->tsoffset;
+ debug_data_send(dhdp, ringid, hdr, ENTRY_LENGTH(hdr),
+ ring_status);
+ rlen -= ENTRY_LENGTH(hdr);
+ hdr = (dhd_dbg_ring_entry_t *)((char *)hdr + ENTRY_LENGTH(hdr));
+ }
+ MFREE(dhdp->osh, buf, buflen);
+
+exit:
+ if (sched) {
+ /* retrigger the work at same interval */
+ if ((ring_status.written_bytes == ring_status.read_bytes) &&
+ (ring_info->interval)) {
+ schedule_delayed_work(d_work, ring_info->interval);
+ }
+ }
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return;
+}
+
+int
+dhd_os_dbg_register_callback(int ring_id, dbg_ring_send_sub_t callback)
+{
+ if (!VALID_RING(ring_id))
+ return BCME_RANGE;
+
+ ring_send_sub_cb[ring_id] = callback;
+ return BCME_OK;
+}
+
+int
+dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, dbg_urgent_noti_sub_t urgent_noti_sub)
+{
+ if (!dhdp || !urgent_noti_sub)
+ return BCME_BADARG;
+ urgent_noti_sub_cb = urgent_noti_sub;
+
+ return BCME_OK;
+}
+
+int
+dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level,
+ int flags, int time_intval, int threshold)
+{
+ int ret = BCME_OK;
+ int ring_id;
+ linux_dbgring_info_t *os_priv, *ring_info;
+
+ ring_id = dhd_dbg_find_ring_id(dhdp, ring_name);
+ if (!VALID_RING(ring_id))
+ return BCME_UNSUPPORTED;
+
+ DHD_INFO(("%s , log_level : %d, time_intval : %d, threshod %d Bytes\n",
+ __FUNCTION__, log_level, time_intval, threshold));
+
+ /* change the configuration */
+ ret = dhd_dbg_set_configuration(dhdp, ring_id, log_level, flags, threshold);
+ if (ret) {
+ DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret));
+ return ret;
+ }
+
+ os_priv = dhd_dbg_get_priv(dhdp);
+ if (!os_priv)
+ return BCME_ERROR;
+ ring_info = &os_priv[ring_id];
+ ring_info->log_level = log_level;
+ if (time_intval == 0 || log_level == 0) {
+ ring_info->interval = 0;
+ cancel_delayed_work_sync(&ring_info->work);
+ } else {
+ ring_info->interval = msecs_to_jiffies(time_intval * MSEC_PER_SEC);
+ cancel_delayed_work_sync(&ring_info->work);
+ schedule_delayed_work(&ring_info->work, ring_info->interval);
+ }
+
+ return ret;
+}
+
+int
+dhd_os_reset_logging(dhd_pub_t *dhdp)
+{
+ int ret = BCME_OK;
+ int ring_id;
+ linux_dbgring_info_t *os_priv, *ring_info;
+
+ os_priv = dhd_dbg_get_priv(dhdp);
+ if (!os_priv)
+ return BCME_ERROR;
+
+ /* Stop all rings */
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+ DHD_INFO(("%s: Stop ring buffer %d\n", __FUNCTION__, ring_id));
+
+ ring_info = &os_priv[ring_id];
+ /* cancel any pending work */
+ cancel_delayed_work_sync(&ring_info->work);
+ /* log level zero makes stop logging on that ring */
+ ring_info->log_level = 0;
+ ring_info->interval = 0;
+ /* change the configuration */
+ ret = dhd_dbg_set_configuration(dhdp, ring_id, 0, 0, 0);
+ if (ret) {
+ DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+#define SUPPRESS_LOG_LEVEL 1
+int
+dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress)
+{
+ int ret = BCME_OK;
+ int max_log_level;
+ int enable = (suppress) ? 0 : 1;
+ linux_dbgring_info_t *os_priv;
+
+ os_priv = dhd_dbg_get_priv(dhdp);
+ if (!os_priv)
+ return BCME_ERROR;
+
+ max_log_level = os_priv[FW_VERBOSE_RING_ID].log_level;
+
+ if (max_log_level == SUPPRESS_LOG_LEVEL) {
+ /* suppress the logging in FW not to wake up host while device in suspend mode */
+ ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0,
+ TRUE);
+ if (ret < 0 && (ret != BCME_UNSUPPORTED)) {
+ DHD_ERROR(("logtrace is failed : %d\n", ret));
+ }
+ }
+
+ return ret;
+}
+
+int
+dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status)
+{
+ return dhd_dbg_get_ring_status(dhdp, ring_id, dbg_ring_status);
+}
+
+int
+dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name)
+{
+ int ret = BCME_OK;
+ int ring_id;
+ linux_dbgring_info_t *os_priv, *ring_info;
+ ring_id = dhd_dbg_find_ring_id(dhdp, ring_name);
+ if (!VALID_RING(ring_id))
+ return BCME_UNSUPPORTED;
+ os_priv = dhd_dbg_get_priv(dhdp);
+ if (os_priv) {
+ ring_info = &os_priv[ring_id];
+ if (ring_info->interval) {
+ cancel_delayed_work_sync(&ring_info->work);
+ }
+ schedule_delayed_work(&ring_info->work, 0);
+ } else {
+ DHD_ERROR(("%s : os_priv is NULL\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
+int
+dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len)
+{
+ int ret = BCME_OK, i;
+ dhd_dbg_ring_entry_t msg_hdr;
+ log_conn_event_t *event_data = (log_conn_event_t *)data;
+ linux_dbgring_info_t *os_priv, *ring_info = NULL;
+
+ if (!VALID_RING(ring_id))
+ return BCME_UNSUPPORTED;
+ os_priv = dhd_dbg_get_priv(dhdp);
+
+ if (os_priv) {
+ ring_info = &os_priv[ring_id];
+ } else
+ return BCME_NORESOURCE;
+
+ memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+
+ if (ring_id == DHD_EVENT_RING_ID) {
+ msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE;
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
+ msg_hdr.timestamp = osl_localtime_ns();
+ /* convert to ms */
+ msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC);
+ msg_hdr.len = data_len;
+ /* filter the event for higher log level with current log level */
+ for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) {
+ if ((dhd_event_map[i].tag == event_data->event) &&
+ dhd_event_map[i].log_level > ring_info->log_level) {
+ return ret;
+ }
+ }
+ }
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ else if (ring_id == FW_VERBOSE_RING_ID || ring_id == DRIVER_LOG_RING_ID ||
+ ring_id == ROAM_STATS_RING_ID) {
+ msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE;
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+ msg_hdr.timestamp = osl_localtime_ns();
+ msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC);
+ msg_hdr.len = strlen(data);
+ }
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+ ret = dhd_dbg_push_to_ring(dhdp, ring_id, &msg_hdr, event_data);
+ if (ret) {
+ DHD_ERROR(("%s : failed to push data into the ring (%d) with ret(%d)\n",
+ __FUNCTION__, ring_id, ret));
+ }
+
+ return ret;
+}
+
+#ifdef DBG_PKT_MON
+int
+dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp)
+{
+ return dhd_dbg_attach_pkt_monitor(dhdp, dhd_os_dbg_monitor_tx_pkts,
+ dhd_os_dbg_monitor_tx_status, dhd_os_dbg_monitor_rx_pkts);
+}
+
+int
+dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp)
+{
+ return dhd_dbg_start_pkt_monitor(dhdp);
+}
+
+int
+dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid)
+{
+ return dhd_dbg_monitor_tx_pkts(dhdp, pkt, pktid);
+}
+
+int
+dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
+ uint16 status)
+{
+ return dhd_dbg_monitor_tx_status(dhdp, pkt, pktid, status);
+}
+
+int
+dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt)
+{
+ return dhd_dbg_monitor_rx_pkts(dhdp, pkt);
+}
+
+int
+dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp)
+{
+ return dhd_dbg_stop_pkt_monitor(dhdp);
+}
+
+int
+dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count)
+{
+ return dhd_dbg_monitor_get_tx_pkts(dhdp, user_buf, req_count, resp_count);
+}
+
+int
+dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count)
+{
+ return dhd_dbg_monitor_get_rx_pkts(dhdp, user_buf, req_count, resp_count);
+}
+
+int
+dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp)
+{
+ return dhd_dbg_detach_pkt_monitor(dhdp);
+}
+#endif /* DBG_PKT_MON */
+
+int
+dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features)
+{
+ int ret = BCME_OK;
+#ifdef DEBUGABILITY
+#ifndef DEBUGABILITY_DISABLE_MEMDUMP
+ struct dhd_conf *conf = dhdp->conf;
+#endif /* !DEBUGABILITY_DISABLE_MEMDUMP */
+#endif
+
+ /* XXX : we need to find a way to get the features for dbg */
+ *features = 0;
+#ifdef DEBUGABILITY
+#ifndef DEBUGABILITY_DISABLE_MEMDUMP
+ // fix for RequestFirmwareDebugDump issue of VTS
+ if ((conf->chip != BCM4359_CHIP_ID) && (conf->chip != BCM43751_CHIP_ID) &&
+ (conf->chip != BCM43752_CHIP_ID) && (conf->chip != BCM4375_CHIP_ID))
+ *features |= DBG_MEMORY_DUMP_SUPPORTED;
+#endif /* !DEBUGABILITY_DISABLE_MEMDUMP */
+ if (FW_SUPPORTED(dhdp, logtrace)) {
+ *features |= DBG_CONNECT_EVENT_SUPPORTED;
+ *features |= DBG_VERBOSE_LOG_SUPPORTED;
+ }
+ if (FW_SUPPORTED(dhdp, hchk)) {
+ *features |= DBG_HEALTH_CHECK_SUPPORTED;
+ }
+#ifdef DBG_PKT_MON
+ if (FW_SUPPORTED(dhdp, d11status)) {
+ *features |= DBG_PACKET_FATE_SUPPORTED;
+ }
+#endif /* DBG_PKT_MON */
+#endif /* DEBUGABILITY */
+ return ret;
+}
+
+static void
+dhd_os_dbg_pullreq(void *os_priv, int ring_id)
+{
+ linux_dbgring_info_t *ring_info;
+
+ ring_info = &((linux_dbgring_info_t *)os_priv)[ring_id];
+ cancel_delayed_work(&ring_info->work);
+ schedule_delayed_work(&ring_info->work, 0);
+}
+
+int
+dhd_os_dbg_attach(dhd_pub_t *dhdp)
+{
+ int ret = BCME_OK;
+ linux_dbgring_info_t *os_priv, *ring_info;
+ int ring_id;
+
+ /* os_dbg data */
+ os_priv = MALLOCZ(dhdp->osh, sizeof(*os_priv) * DEBUG_RING_ID_MAX);
+ if (!os_priv)
+ return BCME_NOMEM;
+
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX;
+ ring_id++) {
+ ring_info = &os_priv[ring_id];
+ INIT_DELAYED_WORK(&ring_info->work, dbg_ring_poll_worker);
+ ring_info->dhdp = dhdp;
+ ring_info->ring_id = ring_id;
+ }
+
+ ret = dhd_dbg_attach(dhdp, dhd_os_dbg_pullreq, dhd_os_dbg_urgent_notifier, os_priv);
+ if (ret)
+ MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX);
+
+ return ret;
+}
+
+void
+dhd_os_dbg_detach(dhd_pub_t *dhdp)
+{
+ linux_dbgring_info_t *os_priv, *ring_info;
+ int ring_id;
+ /* free os_dbg data */
+ os_priv = dhd_dbg_get_priv(dhdp);
+ if (!os_priv)
+ return;
+ /* abort pending any job */
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+ ring_info = &os_priv[ring_id];
+ if (ring_info->interval) {
+ ring_info->interval = 0;
+ cancel_delayed_work_sync(&ring_info->work);
+ }
+ }
+ MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX);
+
+ return dhd_dbg_detach(dhdp);
+}
diff --git a/bcmdhd.101.10.361.x/dhd_event_log_filter.c b/bcmdhd.101.10.361.x/dhd_event_log_filter.c
new file mode 100755
index 0000000..7964c7b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_event_log_filter.c
@@ -0,0 +1,3236 @@
+/*
+ * Wifi dongle status Filter and Report
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/*
+ * Filter MODULE and Report MODULE
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <event_log.h>
+#include <event_trace.h>
+#include <bcmtlv.h>
+#include <bcmwifi_channels.h>
+#include <dhd_event_log_filter.h>
+#include <wl_cfg80211.h>
+#include <dhd_bitpack.h>
+#include <dhd_pktlog.h>
+#ifdef DHD_STATUS_LOGGING
+#include <dhd_statlog.h>
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef IL_BIGENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINAN */
+
+#define DHD_FILTER_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
+#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
+#define DHD_FILTER_TRACE_INTERNAL(fmt, ...) DHD_TRACE(("EWPF-" fmt, ##__VA_ARGS__))
+#else
+#define DHD_FILTER_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
+#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
+
+#define DHD_FILTER_ERR(x) DHD_FILTER_ERR_INTERNAL x
+#define DHD_FILTER_TRACE(x) DHD_FILTER_TRACE_INTERNAL x
+
+/* ========= EWP Filter functions ============= */
+//#define EWPF_DEBUG
+#define EWPF_DEBUG_BUF_LEN 512
+#define EWPF_VAL_CNT_PLINE 16
+
+#define EWPF_REPORT_MAX_DATA 32 /* MAX record per slice */
+
+#define EWPF_INVALID (-1)
+#define EWPF_XTLV_INVALID 0
+
+#define EWPF_MAX_IDX_TYPE 4
+#define EWPF_IDX_TYPE_SLICE 1
+#define EWPF_IDX_TYPE_IFACE 2
+#define EWPF_IDX_TYPE_EVENT 3
+#define EWPF_IDX_TYPE_KEY_INFO 4
+
+#define EWPF_MAX_SLICE 2 /* MAX slice in dongle */
+#define EWPF_SLICE_MAIN 0 /* SLICE ID for 5GHZ */
+#define EWPF_SLICE_AUX 1 /* SLICE ID for 2GHZ */
+
+#define EWPF_MAX_IFACE 2 /* MAX IFACE supported, 0: STA */
+#define EWPF_MAX_EVENT 1 /* MAX EVENT counter supported */
+#define EWPF_MAX_KEY_INFO 1 /* MAX KEY INFO counter supported */
+
+#define EWPF_ARM_TO_MSEC 1
+#define EWPF_NO_UNIT_CONV 1
+#define EWPF_MSEC_TO_SEC 1000
+#define EWPF_USEC_TO_MSEC 1000
+#define EWPF_NSEC_TO_MSEC 1000000
+#define EWPF_USEC_TO_SEC 1000000
+#define EWPF_EPOCH 1000
+#define EWPF_NONSEC_TO_SEC 1000000000
+#define EWPF_REPORT_YEAR_MUL 10000
+#define EWPF_REPORT_MON_MUL 100
+#define EWPF_REPORT_HOUR_MUL 10000
+#define EWPF_REPORT_MIN_MUL 100
+#define EWPF_REPORT_MINUTES 60
+#define EWPF_REPORT_YEAR_BASE 1900
+
+#define EWPF_NO_ABS FALSE
+#define EWPF_NEED_ABS TRUE
+
+#define EWPF_MAX_INFO_TYPE 5
+#define EWPF_INFO_VER 0
+#define EWPF_INFO_TYPE 1
+#define EWPF_INFO_ECNT 2
+#define EWPF_INFO_IOVAR 3
+#define EWPF_INFO_CPLOG 4
+#define EWPF_INFO_DHDSTAT 5
+
+#define EWPF_UPDATE_ARM_CYCLE_OFFSET 1
+
+/* EWPF element of slice type */
+typedef struct {
+ uint32 armcycle; /* dongle arm cycle for this record */
+ union {
+ wl_periodic_compact_cntrs_v1_t compact_cntr_v1;
+ wl_periodic_compact_cntrs_v2_t compact_cntr_v2;
+ wl_periodic_compact_cntrs_v3_t compact_cntr_v3;
+ };
+ evt_hist_compact_toss_stats_v1_t hist_tx_toss_stat;
+ evt_hist_compact_toss_stats_v1_t hist_rx_toss_stat;
+ wlc_btc_stats_v4_t btc_stat;
+ wl_compact_he_cnt_wlc_v2_t compact_he_cnt;
+} EWPF_slc_elem_t;
+
+/* EWPF element for interface type */
+typedef struct {
+ uint32 armcycle; /* dongle arm cycle for this record */
+ wl_if_stats_t if_stat;
+ wl_lqm_t lqm;
+ wl_if_infra_stats_t infra;
+ wl_if_mgt_stats_t mgmt_stat;
+ wl_if_state_compact_t if_comp_stat;
+ wl_adps_dump_summary_v2_t adps_dump_summary;
+ wl_adps_energy_gain_v1_t adps_energy_gain;
+ wl_roam_stats_v1_t roam_stat;
+} EWPF_ifc_elem_t;
+
+typedef struct {
+ uint32 first_armcycle; /* first dongle arm cycle for this record */
+ uint32 updated_armcycle; /* last updated dongle arm cycle for this record */
+ wl_event_based_statistics_v4_t event_stat;
+} EWPF_event_elem_t;
+
+typedef struct {
+ uint32 first_armcycle; /* first dongle arm cycle for this record */
+ uint32 updated_armcycle; /* last updaated dongle arm cycle for this record */
+ key_update_info_v1_t key_update_info;
+} EWPF_key_info_elem_t;
+
+typedef struct {
+ uint32 first_armcycle; /* first dongle arm cycle for this record */
+ uint32 updated_armcycle; /* last updated dongle arm cycle for this record */
+ wl_roam_stats_v1_t roam_stat;
+} EWPF_roam_stats_event_elem_t;
+
+typedef struct {
+ int enabled; /* enabled/disabled */
+ dhd_pub_t *dhdp;
+ uint32 tmp_armcycle; /* global ARM CYCLE for TAG */
+ int idx_type; /* 0 : SLICE, 1: IFACE */
+ int xtlv_idx; /* Slice/Interface index : global for TAG */
+ void *s_ring[EWPF_MAX_SLICE];
+ void *i_ring[EWPF_MAX_IFACE];
+ void *e_ring[EWPF_MAX_EVENT];
+ void *k_ring[EWPF_MAX_KEY_INFO];
+
+ /* used by Report module */
+ uint8 last_bssid[ETHER_ADDR_LEN]; /* BSSID of last conencted/request */
+ int last_channel;
+ uint32 last_armcycle; /* ARM CYCLE prior last connection */
+} EWP_filter_t;
+
+/* status gathering functions : XTLV callback functions */
+typedef int (*EWPF_filter_cb)(void *ctx, const uint8 *data, uint16 type, uint16 len);
+static int evt_xtlv_print_cb(void *ctx, const uint8 *data, uint16 type, uint16 len);
+static int evt_xtlv_copy_cb(void *ctx, const uint8 *data, uint16 type, uint16 len);
+static int evt_xtlv_idx_cb(void *ctx, const uint8 *data, uint16 type, uint16 len);
+static int evt_xtlv_type_cb(void *ctx, const uint8 *data, uint16 type, uint16 len);
+static int filter_main_cb(void *ctx, const uint8 *data, uint16 type, uint16 len);
+static int evt_xtlv_roam_cb(void *ctx, const uint8 *data, uint16 type, uint16 len);
+
+/* ========= Event Handler functions and its callbacks: ============= */
+typedef struct _EWPF_tbl {
+ uint16 xtlv_id; /* XTLV ID, to handle */
+ EWPF_filter_cb cb_func; /* specific call back function, usually for structre */
+ int idx_type; /* structure specific info: belonged type */
+ int max_idx; /* structure specific info: ALLOWED MAX IDX */
+ uint32 offset; /* offset of structure in EWPF_elem-t, valid if cb is not null */
+ uint32 member_length; /* MAX length of reserved for this structure */
+ struct _EWPF_tbl *tbl; /* sub table if XTLV map to XLTV */
+} EWPF_tbl_t;
+
+/* Context structre for XTLV callback */
+typedef struct {
+ dhd_pub_t *dhdp;
+ EWPF_tbl_t *tbl;
+} EWPF_ctx_t;
+
+#define SLICE_INFO(a) EWPF_IDX_TYPE_SLICE, EWPF_MAX_SLICE, OFFSETOF(EWPF_slc_elem_t, a), \
+ sizeof(((EWPF_slc_elem_t *)NULL)->a)
+#define IFACE_INFO(a) EWPF_IDX_TYPE_IFACE, EWPF_MAX_IFACE, OFFSETOF(EWPF_ifc_elem_t, a), \
+ sizeof(((EWPF_ifc_elem_t *)NULL)->a)
+#define EVENT_INFO(a) EWPF_IDX_TYPE_EVENT, EWPF_MAX_EVENT, OFFSETOF(EWPF_event_elem_t, a), \
+ sizeof(((EWPF_event_elem_t *)NULL)->a)
+#define KEY_INFO(a) EWPF_IDX_TYPE_KEY_INFO, EWPF_MAX_KEY_INFO, OFFSETOF(EWPF_key_info_elem_t, a), \
+ sizeof(((EWPF_key_info_elem_t *)NULL)->a)
+
+#define SLICE_U_SIZE(a) sizeof(((EWPF_slc_elem_t *)NULL)->a)
+#define SLICE_INFO_UNION(a) EWPF_IDX_TYPE_SLICE, EWPF_MAX_SLICE, OFFSETOF(EWPF_slc_elem_t, a)
+#define NONE_INFO(a) 0, 0, a, 0
+/* XTLV TBL for WL_SLICESTATS_XTLV_PERIODIC_STATE */
+static EWPF_tbl_t EWPF_periodic[] =
+{
+ {
+ WL_STATE_COMPACT_COUNTERS,
+ evt_xtlv_copy_cb,
+ SLICE_INFO(compact_cntr_v3),
+ NULL
+ },
+ {
+ WL_STATE_COMPACT_HE_COUNTERS,
+ evt_xtlv_copy_cb,
+ SLICE_INFO(compact_he_cnt),
+ NULL
+ },
+ {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL}
+};
+
+static EWPF_tbl_t EWPF_if_periodic[] =
+{
+ {
+ WL_STATE_IF_COMPACT_STATE,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(if_comp_stat),
+ NULL
+ },
+ {
+ WL_STATE_IF_ADPS_STATE,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(adps_dump_summary),
+ NULL
+ },
+ {
+ WL_STATE_IF_ADPS_ENERGY_GAIN,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(adps_energy_gain),
+ NULL
+ },
+ {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL}
+};
+
+static EWPF_tbl_t EWPF_roam[] =
+{
+ {
+ WL_IFSTATS_XTLV_ROAM_STATS_EVENT,
+ evt_xtlv_print_cb,
+ NONE_INFO(0),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(roam_stat),
+ NULL
+ },
+ {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL}
+};
+
+/* XTLV TBL for EVENT_LOG_TAG_STATS */
+static EWPF_tbl_t EWPF_main[] =
+{
+ /* MAIN XTLV */
+ {
+ WL_IFSTATS_XTLV_WL_SLICE,
+ evt_xtlv_type_cb,
+ NONE_INFO(0),
+ EWPF_main
+ },
+ {
+ WL_IFSTATS_XTLV_IF,
+ evt_xtlv_type_cb,
+ NONE_INFO(0),
+ EWPF_main
+ },
+ /* ID XTLVs */
+ {
+ WL_IFSTATS_XTLV_SLICE_INDEX,
+ evt_xtlv_idx_cb,
+ NONE_INFO(0),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_IF_INDEX,
+ evt_xtlv_idx_cb,
+ NONE_INFO(0),
+ NULL
+ },
+ /* NORMAL XTLVS */
+ {
+ WL_SLICESTATS_XTLV_PERIODIC_STATE,
+ NULL,
+ NONE_INFO(0),
+ EWPF_periodic
+ },
+ {
+ WL_IFSTATS_XTLV_IF_LQM,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(lqm),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_GENERIC,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(if_stat),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_MGT_CNT,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(mgmt_stat),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_IF_PERIODIC_STATE,
+ NULL,
+ NONE_INFO(0),
+ EWPF_if_periodic
+ },
+ {
+ WL_IFSTATS_XTLV_INFRA_SPECIFIC,
+ evt_xtlv_copy_cb,
+ IFACE_INFO(infra),
+ NULL
+ },
+ {
+ WL_SLICESTATS_XTLV_HIST_TX_STATS,
+ evt_xtlv_copy_cb,
+ SLICE_INFO(hist_tx_toss_stat),
+ NULL
+ },
+ {
+ WL_SLICESTATS_XTLV_HIST_RX_STATS,
+ evt_xtlv_copy_cb,
+ SLICE_INFO(hist_rx_toss_stat),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_WL_SLICE_BTCOEX,
+ evt_xtlv_copy_cb,
+ SLICE_INFO(btc_stat),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_IF_EVENT_STATS,
+ evt_xtlv_copy_cb,
+ EVENT_INFO(event_stat),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_IF_EVENT_STATS,
+ evt_xtlv_print_cb,
+ NONE_INFO(0),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_KEY_PLUMB_INFO,
+ evt_xtlv_copy_cb,
+ KEY_INFO(key_update_info),
+ NULL
+ },
+ {
+ WL_IFSTATS_XTLV_ROAM_STATS_EVENT,
+ evt_xtlv_roam_cb,
+ NONE_INFO(0),
+ EWPF_roam
+ },
+ {
+ WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC,
+ evt_xtlv_roam_cb,
+ IFACE_INFO(roam_stat),
+ EWPF_roam
+ },
+
+ {EWPF_XTLV_INVALID, NULL, NONE_INFO(0), NULL}
+};
+
+#if defined(DHD_EWPR_VER2) && defined(DHD_STATUS_LOGGING)
+
+#define EWP_DHD_STAT_SIZE 2
+
+uint8
+dhd_statlog_filter[] =
+{
+ ST(WLAN_POWER_ON), /* Wi-Fi Power on */
+ ST(WLAN_POWER_OFF), /* Wi-Fi Power off */
+ ST(ASSOC_START), /* connect to the AP triggered by upper layer */
+ ST(AUTH_DONE), /* complete to authenticate with the AP */
+ ST(ASSOC_REQ), /* send or receive Assoc Req */
+ ST(ASSOC_RESP), /* send or receive Assoc Resp */
+ ST(ASSOC_DONE), /* complete to disconnect to the associated AP */
+ ST(DISASSOC_START), /* disconnect to the associated AP by upper layer */
+ ST(DISASSOC_INT_START), /* initiate the disassoc by DHD */
+ ST(DISASSOC_DONE), /* complete to disconnect to the associated AP */
+ ST(DISASSOC), /* send or receive Disassoc */
+ ST(DEAUTH), /* send or receive Deauth */
+ ST(LINKDOWN), /* receive the link down event */
+ ST(REASSOC_START), /* reassoc the candidate AP */
+ ST(REASSOC_INFORM), /* inform reassoc completion to upper layer */
+ ST(REASSOC_DONE), /* complete to reassoc */
+ ST(EAPOL_M1), /* send or receive the EAPOL M1 */
+ ST(EAPOL_M2), /* send or receive the EAPOL M2 */
+ ST(EAPOL_M3), /* send or receive the EAPOL M3 */
+ ST(EAPOL_M4), /* send or receive the EAPOL M4 */
+ ST(EAPOL_GROUPKEY_M1), /* send or receive the EAPOL Group key handshake M1 */
+ ST(EAPOL_GROUPKEY_M2), /* send or receive the EAPOL Group key handshake M2 */
+ ST(EAP_REQ_IDENTITY), /* send or receive the EAP REQ IDENTITY */
+ ST(EAP_RESP_IDENTITY), /* send or receive the EAP RESP IDENTITY */
+ ST(EAP_REQ_TLS), /* send or receive the EAP REQ TLS */
+ ST(EAP_RESP_TLS), /* send or receive the EAP RESP TLS */
+ ST(EAP_REQ_LEAP), /* send or receive the EAP REQ LEAP */
+ ST(EAP_RESP_LEAP), /* send or receive the EAP RESP LEAP */
+ ST(EAP_REQ_TTLS), /* send or receive the EAP REQ TTLS */
+ ST(EAP_RESP_TTLS), /* send or receive the EAP RESP TTLS */
+ ST(EAP_REQ_AKA), /* send or receive the EAP REQ AKA */
+ ST(EAP_RESP_AKA), /* send or receive the EAP RESP AKA */
+ ST(EAP_REQ_PEAP), /* send or receive the EAP REQ PEAP */
+ ST(EAP_RESP_PEAP), /* send or receive the EAP RESP PEAP */
+ ST(EAP_REQ_FAST), /* send or receive the EAP REQ FAST */
+ ST(EAP_RESP_FAST), /* send or receive the EAP RESP FAST */
+ ST(EAP_REQ_PSK), /* send or receive the EAP REQ PSK */
+ ST(EAP_RESP_PSK), /* send or receive the EAP RESP PSK */
+ ST(EAP_REQ_AKAP), /* send or receive the EAP REQ AKAP */
+ ST(EAP_RESP_AKAP), /* send or receive the EAP RESP AKAP */
+ ST(EAP_SUCCESS), /* send or receive the EAP SUCCESS */
+ ST(EAP_FAILURE), /* send or receive the EAP FAILURE */
+ ST(EAPOL_START), /* send or receive the EAPOL-START */
+ ST(WSC_START), /* send or receive the WSC START */
+ ST(WSC_DONE), /* send or receive the WSC DONE */
+ ST(WPS_M1), /* send or receive the WPS M1 */
+ ST(WPS_M2), /* send or receive the WPS M2 */
+ ST(WPS_M3), /* send or receive the WPS M3 */
+ ST(WPS_M4), /* send or receive the WPS M4 */
+ ST(WPS_M5), /* send or receive the WPS M5 */
+ ST(WPS_M6), /* send or receive the WPS M6 */
+ ST(WPS_M7), /* send or receive the WPS M7 */
+ ST(WPS_M8), /* send or receive the WPS M8 */
+ ST(8021X_OTHER), /* send or receive the other 8021X frames */
+ ST(INSTALL_KEY), /* install the key */
+ ST(DELETE_KEY), /* remove the key */
+ ST(INSTALL_PMKSA), /* install PMKID information */
+ ST(INSTALL_OKC_PMK), /* install PMKID information for OKC */
+ ST(DHCP_DISCOVER), /* send or recv DHCP Discover */
+ ST(DHCP_OFFER), /* send or recv DHCP Offer */
+ ST(DHCP_REQUEST), /* send or recv DHCP Request */
+ ST(DHCP_DECLINE), /* send or recv DHCP Decline */
+ ST(DHCP_ACK), /* send or recv DHCP ACK */
+ ST(DHCP_NAK), /* send or recv DHCP NACK */
+ ST(DHCP_RELEASE), /* send or recv DHCP Release */
+ ST(DHCP_INFORM), /* send or recv DHCP Inform */
+ ST(REASSOC_SUCCESS), /* reassociation success */
+ ST(REASSOC_FAILURE), /* reassociation failure */
+ ST(AUTH_TIMEOUT), /* authentication timeout */
+ ST(AUTH_FAIL), /* authentication failure */
+ ST(AUTH_NO_ACK), /* authentication failure due to no ACK */
+ ST(AUTH_OTHERS), /* authentication failure with other status */
+ ST(ASSOC_TIMEOUT), /* association timeout */
+ ST(ASSOC_FAIL), /* association failure */
+ ST(ASSOC_NO_ACK), /* association failure due to no ACK */
+ ST(ASSOC_ABORT), /* association abort */
+ ST(ASSOC_UNSOLICITED), /* association unsolicited */
+ ST(ASSOC_NO_NETWORKS), /* association failure due to no networks */
+ ST(ASSOC_OTHERS), /* association failure due to no networks */
+ ST(REASSOC_DONE_OTHERS) /* complete to reassoc with other reason */
+};
+#endif /* DHD_EWPR_VER2 && DHD_STATUS_LOGGING */
+
+/* ========= Module functions : exposed to others ============= */
+int
+dhd_event_log_filter_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
+{
+
+ EWP_filter_t *filter;
+ int idx;
+ uint32 req_size;
+ uint32 s_ring_size; /* slice ring */
+ uint32 i_ring_size; /* interface ring */
+ uint32 e_ring_size; /* event counter ring */
+ uint32 k_ring_size; /* key info ring */
+ uint8 *buf_ptr = buf;
+ EWPF_ctx_t ctx;
+ wl_event_based_statistics_v4_t dummy_event_stat;
+ key_update_info_v1_t dummy_key_update_info;
+#if defined(DHD_EWPR_VER2) && defined(DHD_STATUS_LOGGING)
+ stat_bdmask_req_t req;
+#endif /* DHD_EWPR_VER2 && DHD_STATUS_LOGGING */
+
+ DHD_FILTER_ERR(("STARTED\n"));
+
+ if (!dhdp || !buf) {
+ DHD_FILTER_ERR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
+ return BCME_ERROR;
+ }
+
+ i_ring_size = s_ring_size = e_ring_size = k_ring_size = dhd_ring_get_hdr_size();
+ s_ring_size += ((uint32)sizeof(EWPF_slc_elem_t)) * EWPF_REPORT_MAX_DATA;
+ i_ring_size += ((uint32)sizeof(EWPF_ifc_elem_t)) * EWPF_REPORT_MAX_DATA;
+ e_ring_size += ((uint32)sizeof(EWPF_event_elem_t)) * EWPF_REPORT_MAX_DATA;
+ k_ring_size += ((uint32)sizeof(EWPF_key_info_elem_t)) * EWPF_REPORT_MAX_DATA;
+
+ req_size = s_ring_size * EWPF_MAX_SLICE + i_ring_size * EWPF_MAX_IFACE +
+ e_ring_size * EWPF_MAX_EVENT + k_ring_size * EWPF_MAX_KEY_INFO;
+ req_size += (uint32)sizeof(EWP_filter_t);
+
+ if (buf_size < req_size) {
+ DHD_FILTER_ERR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
+ req_size, buf_size));
+ return BCME_ERROR;
+ }
+
+ BCM_REFERENCE(dhdp);
+ filter = (EWP_filter_t *)buf;
+ buf_ptr += sizeof(EWP_filter_t);
+
+ /* initialize control block */
+ memset(filter, 0, sizeof(EWP_filter_t));
+
+ filter->idx_type = EWPF_INVALID;
+ filter->xtlv_idx = EWPF_INVALID;
+ filter->tmp_armcycle = 0;
+
+ for (idx = 0; idx < EWPF_MAX_SLICE; idx++) {
+ filter->s_ring[idx] = dhd_ring_init(dhdp, buf_ptr, s_ring_size,
+ sizeof(EWPF_slc_elem_t), EWPF_REPORT_MAX_DATA,
+ DHD_RING_TYPE_FIXED);
+ if (!filter->s_ring[idx]) {
+ DHD_FILTER_ERR(("FAIL TO INIT SLICE RING: %d\n", idx));
+ return BCME_ERROR;
+ }
+ buf_ptr += s_ring_size;
+ }
+
+ for (idx = 0; idx < EWPF_MAX_IFACE; idx++) {
+ filter->i_ring[idx] = dhd_ring_init(dhdp, buf_ptr, i_ring_size,
+ sizeof(EWPF_ifc_elem_t), EWPF_REPORT_MAX_DATA,
+ DHD_RING_TYPE_FIXED);
+ if (!filter->i_ring[idx]) {
+ DHD_FILTER_ERR(("FAIL TO INIT INTERFACE RING: %d\n", idx));
+ return BCME_ERROR;
+ }
+ buf_ptr += i_ring_size;
+ }
+
+ for (idx = 0; idx < EWPF_MAX_EVENT; idx++) {
+ filter->e_ring[idx] = dhd_ring_init(dhdp, buf_ptr, e_ring_size,
+ sizeof(EWPF_event_elem_t), EWPF_REPORT_MAX_DATA,
+ DHD_RING_TYPE_FIXED);
+ if (!filter->e_ring[idx]) {
+ DHD_FILTER_ERR(("FAIL TO INIT INTERFACE RING: %d\n", idx));
+ return BCME_ERROR;
+ }
+ buf_ptr += e_ring_size;
+ }
+
+ for (idx = 0; idx < EWPF_MAX_KEY_INFO; idx++) {
+ filter->k_ring[idx] = dhd_ring_init(dhdp, buf_ptr, k_ring_size,
+ sizeof(EWPF_key_info_elem_t), EWPF_REPORT_MAX_DATA,
+ DHD_RING_TYPE_FIXED);
+ if (!filter->k_ring[idx]) {
+ DHD_FILTER_ERR(("FAIL TO INIT INTERFACE RING: %d\n", idx));
+ return BCME_ERROR;
+ }
+ buf_ptr += k_ring_size;
+ }
+
+ dhdp->event_log_filter = filter;
+ filter->dhdp = dhdp;
+ filter->enabled = TRUE;
+
+ /*
+ * put dummy element of event based encounters to prevent error
+ * in case of no event happened when data collection is triggered
+ */
+ ctx.dhdp = dhdp;
+ ctx.tbl = EWPF_main;
+ memset(&dummy_event_stat, 0x00, sizeof(dummy_event_stat));
+ evt_xtlv_copy_cb(&ctx, (uint8 *)&dummy_event_stat, WL_IFSTATS_XTLV_IF_EVENT_STATS,
+ sizeof(wl_event_based_statistics_v4_t));
+
+ memset(&dummy_key_update_info, 0x00, sizeof(dummy_key_update_info));
+ evt_xtlv_copy_cb(&ctx, (uint8 *)&dummy_key_update_info, WL_IFSTATS_XTLV_KEY_PLUMB_INFO,
+ sizeof(key_update_info_v1_t));
+
+#if defined(DHD_EWPR_VER2) && defined(DHD_STATUS_LOGGING)
+ /* create status filter for bigdata logging */
+ req.req_buf = dhd_statlog_filter;
+ req.req_buf_len = sizeof(dhd_statlog_filter);
+ dhd_statlog_generate_bdmask(dhdp, &req);
+#endif /* DHD_EWPR_VER2 && DHD_STATUS_LOGGING */
+
+ return BCME_OK;
+}
+
+void
+dhd_event_log_filter_deinit(dhd_pub_t *dhdp)
+{
+ EWP_filter_t *filter;
+ int idx;
+
+ if (!dhdp) {
+ return;
+ }
+
+ if (dhdp->event_log_filter) {
+ filter = (EWP_filter_t *)dhdp->event_log_filter;
+ for (idx = 0; idx < EWPF_MAX_SLICE; idx ++) {
+ dhd_ring_deinit(dhdp, filter->s_ring[idx]);
+ }
+ for (idx = 0; idx < EWPF_MAX_IFACE; idx ++) {
+ dhd_ring_deinit(dhdp, filter->i_ring[idx]);
+ }
+ for (idx = 0; idx < EWPF_MAX_EVENT; idx ++) {
+ dhd_ring_deinit(dhdp, filter->e_ring[idx]);
+ }
+ for (idx = 0; idx < EWPF_MAX_KEY_INFO; idx ++) {
+ dhd_ring_deinit(dhdp, filter->k_ring[idx]);
+ }
+ dhdp->event_log_filter = NULL;
+ }
+}
+
+void
+dhd_event_log_filter_notify_connect_request(dhd_pub_t *dhdp, uint8 *bssid, int channel)
+{
+ EWP_filter_t *filter;
+ void *last_elem;
+
+ if (!dhdp || !dhdp->event_log_filter) {
+ return;
+ }
+
+ filter = (EWP_filter_t *)dhdp->event_log_filter;
+ if (filter->enabled != TRUE) {
+ DHD_FILTER_ERR(("EWP Filter is not enabled\n"));
+ return;
+ }
+
+ memcpy(filter->last_bssid, bssid, ETHER_ADDR_LEN);
+ filter->last_channel = channel;
+
+ /* Refer STA interface */
+ last_elem = dhd_ring_get_last(filter->i_ring[0]);
+ if (last_elem == NULL) {
+ filter->last_armcycle = 0;
+ } else {
+ /* EXCLUDE before connect start */
+ filter->last_armcycle = *(uint32 *)last_elem + EWPF_EPOCH + 1;
+ }
+}
+
+void
+dhd_event_log_filter_notify_connect_done(dhd_pub_t *dhdp, uint8 *bssid, int roam)
+{
+ EWP_filter_t *filter;
+ void *last_elem;
+ int channel;
+ char buf[EWPF_DEBUG_BUF_LEN];
+ int ret;
+ uint32 armcycle;
+ struct channel_info *ci;
+
+ if (!dhdp || !dhdp->event_log_filter) {
+ return;
+ }
+
+ filter = (EWP_filter_t *)dhdp->event_log_filter;
+ if (filter->enabled != TRUE) {
+ DHD_FILTER_ERR(("EWP Filter is not enabled\n"));
+ return;
+ }
+
+ /* GET CHANNEL */
+ *(uint32 *)buf = htod32(EWPF_DEBUG_BUF_LEN);
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_CHANNEL, buf, EWPF_DEBUG_BUF_LEN, FALSE, 0);
+ if (ret != BCME_OK) {
+ DHD_FILTER_ERR(("FAIL TO GET BSS INFO: %d\n", ret));
+ return;
+ }
+
+ ci = (struct channel_info *)(buf + sizeof(uint32));
+ channel = dtoh32(ci->hw_channel);
+ DHD_FILTER_TRACE(("CHANNEL:prev %d new:%d\n", filter->last_channel, channel));
+
+ memcpy(filter->last_bssid, bssid, ETHER_ADDR_LEN);
+ filter->last_channel = channel;
+ if (roam == FALSE) {
+ return;
+ }
+
+ /* update connect time for roam */
+ /* Refer STA interface */
+ last_elem = dhd_ring_get_last(filter->i_ring[0]);
+ if (last_elem == NULL) {
+ armcycle = 0;
+ } else {
+ /* EXCLUDE before roam done */
+ armcycle = *(uint32 *)last_elem + EWPF_EPOCH + 1;
+ }
+
+ filter->last_armcycle = armcycle;
+}
+
+static int
+evt_xtlv_print_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx;
+ EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter;
+ uint32 armcycle = 0;
+ uint8 bssid[ETHER_ADDR_LEN];
+ uint32 initial_assoc_time = 0;
+ uint32 prev_roam_time = 0;
+ uint32 last_roam_event_type = 0;
+ uint32 last_roam_event_status = 0;
+ uint32 last_roam_event_reason = 0;
+ wl_wips_event_info_t wips_event;
+ bzero(&wips_event, sizeof(wips_event));
+
+ DHD_FILTER_TRACE(("%s type:%d %x len:%d %x\n", __FUNCTION__, type, type, len, len));
+
+ /* get current armcycle */
+ if (filter) {
+ armcycle = filter->tmp_armcycle;
+ }
+ if (type == WL_IFSTATS_XTLV_IF_EVENT_STATS) {
+ wl_event_based_statistics_v1_t *elem;
+
+ elem = (wl_event_based_statistics_v1_t *)(uintptr_t)data;
+ if (elem->txdeauthivalclass > 0) {
+ memcpy(bssid, &elem->BSSID, ETHER_ADDR_LEN);
+ DHD_ERROR(("DHD STA sent DEAUTH frame with invalid class : %d times"
+ ", BSSID("MACDBG")\n", elem->txdeauthivalclass, MAC2STRDBG(bssid)));
+ }
+ if (elem->version == WL_EVENT_STATISTICS_VER_2) {
+ wl_event_based_statistics_v2_t *elem_v2;
+
+ elem_v2 = (wl_event_based_statistics_v2_t *)(uintptr_t)data;
+ memcpy(&wips_event.bssid, &elem_v2->last_deauth, ETHER_ADDR_LEN);
+ wips_event.misdeauth = elem_v2->misdeauth;
+ wips_event.current_RSSI = elem_v2->cur_rssi;
+ wips_event.deauth_RSSI = elem_v2->deauth_rssi;
+ wips_event.timestamp = elem_v2->timestamp;
+ } else if (elem->version == WL_EVENT_STATISTICS_VER_3) {
+ wl_event_based_statistics_v3_t *elem_v3;
+
+ elem_v3 = (wl_event_based_statistics_v3_t *)(uintptr_t)data;
+ memcpy(&wips_event.bssid, &elem_v3->last_deauth, ETHER_ADDR_LEN);
+ wips_event.misdeauth = elem_v3->misdeauth;
+ wips_event.current_RSSI = elem_v3->cur_rssi;
+ wips_event.deauth_RSSI = elem_v3->deauth_rssi;
+ wips_event.timestamp = elem_v3->timestamp;
+ /* roam statistics */
+ initial_assoc_time = elem_v3->initial_assoc_time;
+ prev_roam_time = elem_v3->prev_roam_time;
+ last_roam_event_type = elem_v3->last_roam_event_type;
+ last_roam_event_status = elem_v3->last_roam_event_status;
+ last_roam_event_reason = elem_v3->last_roam_event_reason;
+ } else if (elem->version == WL_EVENT_STATISTICS_VER_4) {
+ wl_event_based_statistics_v4_t *elem_v4;
+
+ elem_v4 = (wl_event_based_statistics_v4_t *)(uintptr_t)data;
+ memcpy(&wips_event.bssid, &elem_v4->last_deauth, ETHER_ADDR_LEN);
+ wips_event.misdeauth = elem_v4->misdeauth;
+ wips_event.current_RSSI = elem_v4->cur_rssi;
+ wips_event.deauth_RSSI = elem_v4->deauth_rssi;
+ wips_event.timestamp = elem_v4->timestamp;
+ }
+ if (wips_event.misdeauth > 1) {
+ DHD_ERROR(("WIPS attack!! cnt=%d, curRSSI=%d, deauthRSSI=%d "
+ ", time=%d, MAC="MACDBG"\n",
+ wips_event.misdeauth, wips_event.current_RSSI,
+ wips_event.deauth_RSSI, wips_event.timestamp,
+ MAC2STRDBG(&wips_event.bssid)));
+#if defined(WL_CFG80211) && defined(WL_WIPSEVT)
+ wl_cfg80211_wips_event_ext(&wips_event);
+#endif /* WL_CFG80211 && WL_WIPSEVT */
+ }
+ } else if (type == WL_IFSTATS_XTLV_ROAM_STATS_EVENT) {
+ wl_roam_stats_v1_t *roam_elem;
+ roam_elem = (wl_roam_stats_v1_t *)(uintptr_t)data;
+ if (roam_elem->version == WL_ROAM_STATS_VER_1) {
+ wl_roam_stats_v1_t *roam_elem_v1;
+
+ roam_elem_v1 = (wl_roam_stats_v1_t *)(uintptr_t)data;
+ /* roam statistics */
+ initial_assoc_time = roam_elem_v1->initial_assoc_time;
+ prev_roam_time = roam_elem_v1->prev_roam_time;
+ last_roam_event_type = roam_elem_v1->last_roam_event_type;
+ last_roam_event_status = roam_elem_v1->last_roam_event_status;
+ last_roam_event_reason = roam_elem_v1->last_roam_event_reason;
+ }
+ } else {
+ DHD_FILTER_ERR(("%s TYPE(%d) IS NOT SUPPORTED TO PRINT\n",
+ __FUNCTION__, type));
+ return BCME_ERROR;
+ }
+ if (initial_assoc_time > 0 && prev_roam_time > 0) {
+ DHD_ERROR(("Last roam event before disconnection : "
+ "current armcycle %d, initial assoc time %d, "
+ "last event time %d, type %d, status %d, reason %d\n",
+ armcycle, initial_assoc_time, prev_roam_time,
+ last_roam_event_type, last_roam_event_status,
+ last_roam_event_reason));
+ }
+
+ return BCME_OK;
+}
+
+#ifdef BCM_SDC
+static int
+evt_get_last_toss_hist(uint8 *ptr, const uint8 *data, uint16 len)
+{
+ bcm_xtlv_t *bcm_xtlv_desc = (bcm_xtlv_t *)data;
+ wl_hist_compact_toss_stats_v2_t *ewp_stats;
+ evt_hist_compact_toss_stats_v1_t bidata_stats;
+ int16 max_rcidx = EWPF_INVALID, secnd_rcidx = EWPF_INVALID;
+ uint16 cur_rnidx = 0, prev_rnidx = 0;
+ uint16 max_rccnt = 0, cur_rccnt = 0;
+ uint16 idx;
+
+ if (!ptr || !data) {
+ return BCME_ERROR;
+ }
+
+ if (bcm_xtlv_desc->len != sizeof(wl_hist_compact_toss_stats_v2_t)) {
+ DHD_FILTER_ERR(("%s : size is not matched %d\n", __FUNCTION__,
+ bcm_xtlv_desc->len));
+ return BCME_ERROR;
+ }
+
+ ewp_stats = (wl_hist_compact_toss_stats_v2_t *)(&bcm_xtlv_desc->data[0]);
+ if (ewp_stats->htr_type == WL_STATE_HIST_TX_TOSS_REASONS) {
+ if (ewp_stats->version != WL_HIST_COMPACT_TOSS_STATS_TX_VER_2) {
+ DHD_FILTER_ERR(("%s : unsupported version %d (type: %d)\n",
+ __FUNCTION__, ewp_stats->version, ewp_stats->htr_type));
+ return BCME_ERROR;
+ }
+ } else if (ewp_stats->htr_type == WL_STATE_HIST_RX_TOSS_REASONS) {
+ if (ewp_stats->version != WL_HIST_COMPACT_TOSS_STATS_RX_VER_2) {
+ DHD_FILTER_ERR(("%s : unsupported version %d (type: %d)\n",
+ __FUNCTION__, ewp_stats->version, ewp_stats->htr_type));
+ return BCME_ERROR;
+ }
+ } else {
+ DHD_FILTER_ERR(("%s : unsupported type %d\n", __FUNCTION__,
+ ewp_stats->htr_type));
+ return BCME_ERROR;
+ }
+ /*
+ * htr_rnidx is pointing the next empty slot to be used
+ * Need to get previous index which is valid
+ */
+ if (ewp_stats->htr_rnidx > 0) {
+ cur_rnidx = ewp_stats->htr_rnidx - 1;
+ } else {
+ cur_rnidx = WLC_HIST_TOSS_LEN - 1;
+ }
+ if (cur_rnidx > 0) {
+ prev_rnidx = cur_rnidx - 1;
+ } else {
+ prev_rnidx = WLC_HIST_TOSS_LEN - 1;
+ }
+ /*
+ * Need to get largest count of toss reasons
+ */
+ for (idx = 0; idx < WLC_HIST_TOSS_LEN; idx ++) {
+ cur_rccnt = (uint16)((ewp_stats->htr_rc[idx] &
+ HIST_TOSS_RC_COUNT_MASK)>>HIST_TOSS_RC_COUNT_POS);
+ DHD_FILTER_TRACE(("%s: idx %d htr_rc %04x cur_rccnt %d\n",
+ __FUNCTION__, idx, ewp_stats->htr_rc[idx], cur_rccnt));
+ if (ewp_stats->htr_rc_ts[idx] && max_rccnt < cur_rccnt) {
+ max_rccnt = cur_rccnt;
+ secnd_rcidx = max_rcidx;
+ max_rcidx = idx;
+ DHD_FILTER_TRACE(("%s: max_rcidx updated -"
+ "max_rcidx %d secnd_rcidx %d\n",
+ __FUNCTION__, max_rcidx, secnd_rcidx));
+ }
+ }
+
+ memset(&bidata_stats, 0x00, sizeof(bidata_stats));
+ bidata_stats.version = ewp_stats->version;
+ bidata_stats.htr_type = ewp_stats->htr_type;
+ bidata_stats.htr_num = ewp_stats->htr_num;
+ bidata_stats.htr_rn_last = ewp_stats->htr_running[cur_rnidx];
+ bidata_stats.htr_rn_ts_last = ewp_stats->htr_rn_ts[cur_rnidx];
+ bidata_stats.htr_rn_prev = ewp_stats->htr_running[prev_rnidx];
+ bidata_stats.htr_rn_ts_prev = ewp_stats->htr_rn_ts[prev_rnidx];
+ if (max_rcidx != EWPF_INVALID) {
+ bidata_stats.htr_rc_max = ewp_stats->htr_rc[max_rcidx];
+ bidata_stats.htr_rc_ts_max = ewp_stats->htr_rc_ts[max_rcidx];
+ }
+ if (secnd_rcidx != EWPF_INVALID) {
+ bidata_stats.htr_rc_secnd = ewp_stats->htr_rc[secnd_rcidx];
+ bidata_stats.htr_rc_ts_secnd = ewp_stats->htr_rc_ts[secnd_rcidx];
+ }
+ DHD_FILTER_TRACE(("%s: ver %d type %d num %d "
+ "htr_rn_last %d htr_rn_ts_last %d htr_rn_prev %d htr_rn_ts_prev %d "
+ "htr_rc_max %d htr_rc_ts_max %d htr_rc_secnd %d htr_rc_ts_secnd %d\n",
+ __FUNCTION__, bidata_stats.version,
+ bidata_stats.htr_type, bidata_stats.htr_num,
+ bidata_stats.htr_rn_last, bidata_stats.htr_rn_ts_last,
+ bidata_stats.htr_rn_prev, bidata_stats.htr_rn_ts_prev,
+ bidata_stats.htr_rc_max, bidata_stats.htr_rc_ts_max,
+ bidata_stats.htr_rc_secnd, bidata_stats.htr_rc_ts_secnd));
+
+ memcpy(ptr, &bidata_stats, sizeof(bidata_stats));
+
+ return BCME_OK;
+}
+#endif /* BCM_SDC */
+
+static int
+evt_xtlv_copy_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx;
+ EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter;
+ uint32 *armcycle;
+ EWPF_tbl_t *tbl;
+ void *ring;
+ void *target;
+ uint8 *ptr;
+ int tbl_idx;
+ uint32 elem_size;
+
+ DHD_FILTER_TRACE(("%s type:%d %x len:%d %x\n", __FUNCTION__, type, type, len, len));
+
+ for (tbl_idx = 0; ; tbl_idx++) {
+ if (cur_ctx->tbl[tbl_idx].xtlv_id == EWPF_XTLV_INVALID) {
+ DHD_FILTER_ERR(("%s NOT SUPPORTED TYPE(%d)\n", __FUNCTION__, type));
+ return BCME_OK;
+ }
+ if (cur_ctx->tbl[tbl_idx].xtlv_id == type) {
+ tbl = &cur_ctx->tbl[tbl_idx];
+ break;
+ }
+ }
+
+ /* Set index type and xtlv_idx for event stats and key plumb info */
+ if (type == WL_IFSTATS_XTLV_IF_EVENT_STATS) {
+ filter->idx_type = EWPF_IDX_TYPE_EVENT;
+ filter->xtlv_idx = 0;
+ DHD_FILTER_TRACE(("EVENT XTLV\n"));
+ } else if (type == WL_IFSTATS_XTLV_KEY_PLUMB_INFO) {
+ filter->idx_type = EWPF_IDX_TYPE_KEY_INFO;
+ filter->xtlv_idx = 0;
+ DHD_FILTER_TRACE(("KEY INFO XTLV\n"));
+ }
+
+ /* Check Validation */
+ if (filter->idx_type == EWPF_INVALID ||
+ filter->xtlv_idx == EWPF_INVALID ||
+ filter->idx_type != tbl->idx_type ||
+ filter->xtlv_idx >= tbl->max_idx) {
+ DHD_FILTER_ERR(("XTLV VALIDATION FAILED: type:%x xtlv:%x idx:%d\n",
+ filter->idx_type, tbl->xtlv_id, filter->xtlv_idx));
+ return BCME_OK;
+ }
+
+ /* SET RING INFO */
+ if (filter->idx_type == EWPF_IDX_TYPE_SLICE) {
+ ring = filter->s_ring[filter->xtlv_idx];
+ elem_size = sizeof(EWPF_slc_elem_t);
+ } else if (filter->idx_type == EWPF_IDX_TYPE_IFACE) {
+ ring = filter->i_ring[filter->xtlv_idx];
+ elem_size = sizeof(EWPF_ifc_elem_t);
+ } else if (filter->idx_type == EWPF_IDX_TYPE_EVENT) {
+ DHD_FILTER_TRACE(("%s: EWPF_IDX_TYPE_EVENT FOUND\n",
+ __FUNCTION__));
+ ring = filter->e_ring[filter->xtlv_idx];
+ elem_size = sizeof(EWPF_event_elem_t);
+ } else if (filter->idx_type == EWPF_IDX_TYPE_KEY_INFO) {
+ DHD_FILTER_TRACE(("%s: EWPF_IDX_TYPE_KEY_INFO FOUND\n",
+ __FUNCTION__));
+ ring = filter->k_ring[filter->xtlv_idx];
+ elem_size = sizeof(EWPF_key_info_elem_t);
+ } else {
+ DHD_FILTER_TRACE(("%s unsupported idx_type:%d\n",
+ __FUNCTION__, filter->idx_type));
+ return BCME_OK;
+ }
+
+ /* Check armcycle epoch is changed */
+ target = dhd_ring_get_last(ring);
+ if (target != NULL) {
+ armcycle = (uint32 *)target;
+ if (*armcycle + EWPF_EPOCH <= filter->tmp_armcycle) {
+ /* EPOCH is changed (longer than 1sec) */
+ target = NULL;
+ } else if (*armcycle - EWPF_EPOCH >= filter->tmp_armcycle) {
+ /* dongle is rebooted */
+ target = NULL;
+ }
+ }
+
+ if (target == NULL) {
+ /* Get new idx */
+ target = dhd_ring_get_empty(ring);
+ if (target == NULL) {
+ /* no available slot due to oldest slot is locked */
+ DHD_FILTER_ERR(("SKIP to logging xltv(%x) due to locking\n", type));
+ return BCME_OK;
+ }
+
+ /* clean up target */
+ armcycle = (uint32 *)target;
+ memset(target, 0, elem_size);
+ memcpy(armcycle, &filter->tmp_armcycle, sizeof(*armcycle));
+ }
+
+#ifdef EWPF_DEBUG
+ DHD_FILTER_ERR(("idx:%d write_:%p %u %u\n",
+ filter->xtlv_idx, target, *armcycle, filter->tmp_armcycle));
+#endif
+
+ /* Additionally put updated armcycle for event based EWP */
+ if (filter->idx_type == EWPF_IDX_TYPE_EVENT ||
+ filter->idx_type == EWPF_IDX_TYPE_KEY_INFO) {
+ DHD_FILTER_TRACE(("%s: updated armcycle for event based EWP\n",
+ __FUNCTION__));
+ memcpy((uint32 *)(armcycle + EWPF_UPDATE_ARM_CYCLE_OFFSET),
+ &filter->tmp_armcycle, sizeof(*armcycle));
+ }
+
+ ptr = (uint8 *)target;
+
+#ifdef DHD_EWPR_VER2
+ if (tbl->xtlv_id == WL_SLICESTATS_XTLV_HIST_TX_STATS ||
+ tbl->xtlv_id == WL_SLICESTATS_XTLV_HIST_RX_STATS) {
+#ifdef BCM_SDC
+ int err;
+
+ DHD_FILTER_TRACE(("TOSS_REASONS received (%d)\n", tbl->xtlv_id));
+
+ err = evt_get_last_toss_hist(ptr + cur_ctx->tbl[tbl_idx].offset, data, len);
+ if (err) {
+ DHD_FILTER_ERR(("%s: get toss hist failed\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+#else
+ DHD_FILTER_ERR(("%s: Unabled to copy hist TX/RX stats, BCM_SDC must be included\n",
+ __FUNCTION__));
+#endif /* BCM_SDC */
+ } else
+#endif /* DHD_EWPR_VER2 */
+ {
+ /* XXX multiversion shall be use same structure of old version */
+ if (len > cur_ctx->tbl[tbl_idx].member_length) {
+ DHD_FILTER_TRACE(("data Length is too big to save: (alloc = %d), "
+ "(data = %d)\n", cur_ctx->tbl[tbl_idx].member_length, len));
+ len = cur_ctx->tbl[tbl_idx].member_length;
+ }
+
+ memcpy(ptr + cur_ctx->tbl[tbl_idx].offset, data, len);
+ }
+ return BCME_OK;
+}
+
+static int
+evt_xtlv_idx_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx;
+ EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter;
+
+ filter->xtlv_idx = data[0];
+
+ if (filter->idx_type == EWPF_IDX_TYPE_SLICE) {
+ if (type != WL_IFSTATS_XTLV_SLICE_INDEX ||
+ filter->xtlv_idx >= EWPF_MAX_SLICE) {
+ goto idx_fail;
+ }
+ } else if (filter->idx_type == EWPF_IDX_TYPE_IFACE) {
+ if (type != WL_IFSTATS_XTLV_IF_INDEX ||
+ filter->xtlv_idx >= EWPF_MAX_IFACE) {
+ DHD_FILTER_ERR(("CHANGE IFACE TO 0 in FORCE\n"));
+ return BCME_OK;
+ }
+ } else {
+ goto idx_fail;
+ }
+ return BCME_OK;
+
+idx_fail:
+ DHD_FILTER_ERR(("UNEXPECTED IDX XTLV: filter_type:%d input_type%x idx:%d\n",
+ filter->idx_type, type, filter->xtlv_idx));
+ filter->idx_type = EWPF_INVALID;
+ filter->xtlv_idx = EWPF_INVALID;
+ return BCME_OK;
+}
+
+static int
+evt_xtlv_type_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx;
+ EWP_filter_t *filter = (EWP_filter_t *)cur_ctx->dhdp->event_log_filter;
+
+ if (type == WL_IFSTATS_XTLV_WL_SLICE) {
+ filter->idx_type = EWPF_IDX_TYPE_SLICE;
+ DHD_FILTER_TRACE(("SLICE XTLV\n"));
+ } else if (type == WL_IFSTATS_XTLV_IF) {
+ filter->idx_type = EWPF_IDX_TYPE_IFACE;
+ DHD_FILTER_TRACE(("IFACE XTLV\n"));
+ }
+
+ bcm_unpack_xtlv_buf(ctx, data, len,
+ BCM_XTLV_OPTION_ALIGN32, filter_main_cb);
+ return BCME_OK;
+}
+
+static int
+evt_xtlv_roam_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+
+ EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx;
+ EWPF_tbl_t *new_tbl = EWPF_roam;
+ EWPF_ctx_t sub_ctx;
+ int idx;
+
+ for (idx = 0; ; idx++) {
+ if (new_tbl[idx].xtlv_id == EWPF_XTLV_INVALID) {
+ DHD_FILTER_TRACE(("%s NOT SUPPORTED TYPE(%d)\n", __FUNCTION__, type));
+ return BCME_OK;
+ }
+ if (new_tbl[idx].xtlv_id == type) {
+ break;
+ }
+ }
+
+ /* MULTI version may not applied */
+ if (len > sizeof(cur_ctx->dhdp->roam_evt)) {
+ DHD_FILTER_ERR(("data length is too big :max= %d, cur=%d\n",
+ (int)sizeof(cur_ctx->dhdp->roam_evt), len));
+ len = sizeof(cur_ctx->dhdp->roam_evt);
+ }
+
+ /* save latest roam event to report via get_bss_info */
+ (void)memcpy_s((char *)&cur_ctx->dhdp->roam_evt, sizeof(cur_ctx->dhdp->roam_evt),
+ data, len);
+
+ sub_ctx.dhdp = cur_ctx->dhdp;
+ sub_ctx.tbl = new_tbl;
+ new_tbl[idx].cb_func(&sub_ctx, data, type, len);
+ return BCME_OK;
+}
+
+static int
+filter_main_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ EWPF_ctx_t *cur_ctx = (EWPF_ctx_t *)ctx;
+ EWPF_ctx_t sub_ctx;
+ int idx;
+ int err = BCME_OK;
+
+ DHD_FILTER_TRACE(("%s type:%x len:%d\n", __FUNCTION__, type, len));
+
+ sub_ctx.dhdp = cur_ctx->dhdp;
+ for (idx = 0; ; idx++) {
+ if (cur_ctx->tbl[idx].xtlv_id == EWPF_XTLV_INVALID) {
+ DHD_FILTER_TRACE(("%s NOT SUPPORTED TYPE(%d)\n", __FUNCTION__, type));
+ return BCME_OK;
+ }
+ if (cur_ctx->tbl[idx].xtlv_id == type) {
+ /* parse sub xtlv */
+ if (cur_ctx->tbl[idx].cb_func == NULL) {
+ sub_ctx.tbl = cur_ctx->tbl[idx].tbl;
+ err = bcm_unpack_xtlv_buf(&sub_ctx, data, len,
+ BCM_XTLV_OPTION_ALIGN32, filter_main_cb);
+ return err;
+ }
+
+ /* handle for structure/variable */
+ err = cur_ctx->tbl[idx].cb_func(ctx, data, type, len);
+ if (err != BCME_OK) {
+ return err;
+ }
+ }
+ }
+
+ return err;
+}
+
+void
+dhd_event_log_filter_event_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, uint32 *data)
+{
+ int err;
+ EWP_filter_t *filter;
+ EWPF_ctx_t ctx;
+
+ if (!dhdp->event_log_filter) {
+ DHD_FILTER_ERR(("NO FILTER MODULE\n"));
+ return;
+ }
+
+ if (!plog_hdr || !data) {
+ /* XXX Validation check is done by caller */
+ DHD_FILTER_ERR(("INVALID PARAMETER\n"));
+ return;
+ }
+
+ filter = (EWP_filter_t *)dhdp->event_log_filter;
+ if (filter->enabled != TRUE) {
+ DHD_FILTER_ERR(("FITLER IS NOT STARTED\n"));
+ return;
+ }
+
+ /* get ARMCYCLE */
+ filter->tmp_armcycle = plog_hdr->armcycle;
+ filter->idx_type = EWPF_INVALID;
+ filter->xtlv_idx = EWPF_INVALID;
+
+#ifdef EWPF_DEBUG
+ {
+ char buf[EWPF_DEBUG_BUF_LEN];
+ int idx;
+
+ memset(buf, 0, sizeof(buf));
+ DHD_FILTER_ERR(("tag %d(%x) count %d(%x)\n",
+ plog_hdr->tag, plog_hdr->tag, plog_hdr->count, plog_hdr->count));
+ for (idx = 0; idx < plog_hdr->count; idx++) {
+ sprintf(&buf[strlen(buf)], "%08x ", data[idx]);
+ if ((idx + 1) % EWPF_VAL_CNT_PLINE == 0) {
+ DHD_FILTER_ERR(("%s\n", buf));
+ memset(buf, 0, sizeof(buf));
+ }
+ }
+ if (strlen(buf) > 0) {
+ DHD_FILTER_ERR(("%s\n", buf));
+ }
+ }
+#endif /* EWPF_DEBUG */
+
+ ctx.dhdp = dhdp;
+ ctx.tbl = EWPF_main;
+ if ((err = bcm_unpack_xtlv_buf(
+ &ctx,
+ (const uint8 *)data,
+ (plog_hdr->count - 1) * sizeof(uint32),
+ BCM_XTLV_OPTION_ALIGN32,
+ filter_main_cb))) {
+ DHD_FILTER_ERR(("FAIL TO UNPACK XTLV: err(%d)\n", err));
+ }
+}
+/* ========= Private Command(Serialize) ============= */
+/* XXX REPORT MODULE will be done after discuss with customer */
+/* XXX Current implementation is temporal to verify FILTER MODULE works */
+//#define EWPR_DEBUG
+#ifdef EWPR_DEBUG
+#undef DHD_FILTER_TRACE
+#define DHD_FILTER_TRACE DHD_FILTER_ERR
+#endif /* EWPR_DEBUG */
+#define EWPR_DEBUG_BUF_LEN 512
+
+#define EWP_REPORT_ELEM_PRINT_BUF 256
+#define EWP_REPORT_NAME_MAX 64
+
+#ifdef DHD_EWPR_VER2
+#define EWP_REPORT_VERSION 0x20190514
+#define EWP_REPORT_SET_DEFAULT 0x01
+#define EWPR_CSDCLIENT_DIFF 10
+#define EWPR_INTERVAL 3
+#define EWPR_DELTA_CNT 1 /* 3 seconds before */
+#define EWPR_ARRAY_CNT 10 /* INTERVAL * ARRAY total 30 seconds to lock */
+#define EWPR_DELTA_LAST_POS 6
+
+#define INDEX_STR_SIZE 6
+#define DHD_STAT_STR_SIZE 2
+#define REPORT_VERSION_STR_SIZE 8
+#define DELIMITER_LEN 1
+#else
+#define EWP_REPORT_VERSION 0x20170905
+#define EWPR_CSDCLIENT_DIFF 4
+#define EWPR_INTERVAL 3
+#define EWPR_ARRAY_CNT 10 /* INTERVAL * ARRAY total 30 seconds to lock */
+#endif /* DHD_EWPR_VER2 */
+
+#define EWPR_DELTA3_POS 3
+#define EWPR_DELTA2_POS 2
+#define EWPR_DELTA1_POS 1
+#define EWPR_NOW_POS 0
+
+#define EWPR_DELTA1_CNT 2 /* 6 seconds before */
+#define EWPR_DELTA2_CNT 5 /* 15 seconds before */
+#define EWPR_DELTA3_CNT 9 /* 27 seconds before */
+
+#define EWPR_CNT_PER_LINE 5
+
+/* EWP Reporter display format */
+#define EWP_DEC 1
+#define EWP_HEX 2
+#define EWP_BIN 3
+
+/* EWP Filter Data type */
+/* BASIC : signed + length */
+#define EWP_UINT8 2
+#define EWP_UINT16 4
+#define EWP_UINT32 8
+#define EWP_UINT64 16
+#define EWP_INT8 102
+#define EWP_INT16 104
+#define EWP_INT32 108
+#define EWP_BIT 201
+
+/* NON BAISC : need special handling */
+#define EWP_NON_BASIC 200
+#define EWP_DATE 201
+#define EWP_TIME 202
+#define EWP_BSSID 203
+#define EWP_OUI 204
+
+/* Delimiter between values */
+#define KEY_DEL ' '
+#define RAW_DEL '_'
+
+/* IOVAR BUF SIZE */
+#define EWPR_IOV_BUF_LEN 64
+
+typedef struct {
+ void *ring; /* INPUT ring to lock */
+ void **elem_list; /* OUTPUT elem ptr list for each delta */
+ uint32 max_armcycle; /* IN/OUT arm cycle should be less than this */
+ uint32 min_armcycle; /* IN/OUT arm cycle should be bigger than this */
+ uint32 max_period; /* IN allowed time diff between first and last */
+ uint32 delta_cnt; /* IN finding delta count */
+ uint32 *delta_list; /* IN delta values to find */
+} ewpr_lock_param_t;
+
+#define MAX_MULTI_VER 3
+typedef struct {
+ uint32 version; /* VERSION for multiple version struct */
+ uint32 offset; /* offset of the member at the version */
+} ewpr_MVT_offset_elem_t; /* elem for multi version type */
+
+typedef struct {
+ uint32 version_offset; /* offset of version */
+ ewpr_MVT_offset_elem_t opv[MAX_MULTI_VER]; /* offset per version */
+} ewpr_MVT_offset_t; /* multi_version type */
+
+typedef struct {
+ char name[EWP_REPORT_NAME_MAX];
+ int ring_type; /* Ring Type : EWPF_IDX_TYPE_SLICE, EWPF_IDX_TYPE_IFACE */
+ int is_multi_version; /* is multi version */
+ union {
+ uint32 offset; /* Offset from start of element structure */
+ ewpr_MVT_offset_t v_info;
+ };
+ int data_type; /* Data type : one of EWP Filter Data Type */
+ int display_format; /* Display format : one of EWP Reporter display */
+ int display_type; /* MAX display BYTE : valid for HEX and BIN FORM */
+#ifdef DHD_EWPR_VER2
+ int info_type; /* info type : EWPF_INFO_ECNT, EWPF_INFO_IOVAR, ... */
+ int display_bit_length; /* packing bit : valid for BIN FORM */
+ int display_array_size; /* array size */
+ int display_method; /* serial or diff */
+ int unit_convert; /* unit conversion
+ * 0 or 1 : no conversion, put data as is
+ * greater than 1, divide value by unit_convert
+ */
+ bool need_abs; /* need absolute function for negative value */
+#endif /* DHD_EWPR_VER2 */
+} ewpr_serial_info_t;
+
+/* offset defines */
+#define EWPR_CNT_VERSION_OFFSET \
+ OFFSETOF(EWPF_slc_elem_t, compact_cntr_v3)
+
+#define EWPR_CNT_V1_OFFSET(a) \
+ WL_PERIODIC_COMPACT_CNTRS_VER_1, \
+ (OFFSETOF(EWPF_slc_elem_t, compact_cntr_v1) + OFFSETOF(wl_periodic_compact_cntrs_v1_t, a))
+#define EWPR_CNT_V2_OFFSET(a) \
+ WL_PERIODIC_COMPACT_CNTRS_VER_2, \
+ (OFFSETOF(EWPF_slc_elem_t, compact_cntr_v2) + OFFSETOF(wl_periodic_compact_cntrs_v2_t, a))
+#define EWPR_CNT_V3_OFFSET(a) \
+ WL_PERIODIC_COMPACT_CNTRS_VER_3, \
+ (OFFSETOF(EWPF_slc_elem_t, compact_cntr_v3) + OFFSETOF(wl_periodic_compact_cntrs_v3_t, a))
+#define EWPR_STAT_OFFSET(a) \
+ (OFFSETOF(EWPF_ifc_elem_t, if_stat) + OFFSETOF(wl_if_stats_t, a))
+#define EWPR_INFRA_OFFSET(a) \
+ (OFFSETOF(EWPF_ifc_elem_t, infra) + OFFSETOF(wl_if_infra_stats_t, a))
+#define EWPR_MGMT_OFFSET(a) \
+ (OFFSETOF(EWPF_ifc_elem_t, mgmt_stat) + OFFSETOF(wl_if_mgt_stats_t, a))
+#define EWPR_LQM_OFFSET(a) \
+ (OFFSETOF(EWPF_ifc_elem_t, lqm) + OFFSETOF(wl_lqm_t, a))
+#define EWPR_SIGNAL_OFFSET(a) \
+ (EWPR_LQM_OFFSET(current_bss) + OFFSETOF(wl_rx_signal_metric_t, a))
+#define EWPR_IF_COMP_OFFSET(a) \
+ (OFFSETOF(EWPF_ifc_elem_t, if_comp_stat) + OFFSETOF(wl_if_state_compact_t, a))
+#define EWPR_EVENT_COUNTER_OFFSET(a) \
+ (OFFSETOF(EWPF_event_elem_t, event_stat) + OFFSETOF(wl_event_based_statistics_v4_t, a))
+#define EWPR_KEY_INFO_OFFSET(a) \
+ (OFFSETOF(EWPF_key_info_elem_t, key_update_info) + OFFSETOF(key_update_info_v1_t, a))
+#define EWPR_TX_TOSS_HIST_OFFSET(a) \
+ (OFFSETOF(EWPF_slc_elem_t, hist_tx_toss_stat) + \
+ OFFSETOF(evt_hist_compact_toss_stats_v1_t, a))
+#define EWPR_RX_TOSS_HIST_OFFSET(a) \
+ (OFFSETOF(EWPF_slc_elem_t, hist_rx_toss_stat) + \
+ OFFSETOF(evt_hist_compact_toss_stats_v1_t, a))
+#define EWPR_BTC_STAT_OFFSET(a) \
+ (OFFSETOF(EWPF_slc_elem_t, btc_stat) + \
+ OFFSETOF(wlc_btc_stats_v4_t, a))
+#define EWPR_COMPACT_HE_CNT_OFFSET(a) \
+ (OFFSETOF(EWPF_slc_elem_t, compact_he_cnt) + \
+ OFFSETOF(wl_compact_he_cnt_wlc_v2_t, a))
+#define EWPR_ROAM_STATS_PERIODIC_OFFSET(a) \
+ (OFFSETOF(EWPF_ifc_elem_t, roam_stat) + OFFSETOF(wl_roam_stats_v1_t, a))
+
+/* serail info type define */
+#define EWPR_SERIAL_CNT(a) {\
+ #a, EWPF_IDX_TYPE_SLICE, TRUE, \
+ .v_info = { EWPR_CNT_VERSION_OFFSET, \
+ {{EWPR_CNT_V1_OFFSET(a)}, \
+ {EWPR_CNT_V2_OFFSET(a)}, \
+ {EWPR_CNT_V3_OFFSET(a)}}}, \
+ EWP_UINT32, EWP_HEX, EWP_UINT32}
+#define EWPR_SERIAL_CNT_16(a) {\
+ #a, EWPF_IDX_TYPE_SLICE, TRUE, \
+ .v_info = { EWPR_CNT_VERSION_OFFSET, \
+ {{EWPR_CNT_V1_OFFSET(a)}, \
+ {EWPR_CNT_V2_OFFSET(a)}, \
+ {EWPR_CNT_V3_OFFSET(a)}}}, \
+ EWP_UINT32, EWP_HEX, EWP_UINT16}
+#define EWPR_SERIAL_STAT(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_STAT_OFFSET(a), \
+ EWP_UINT64, EWP_HEX, EWP_UINT32}
+#define EWPR_SERIAL_INFRA(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_INFRA_OFFSET(a), \
+ EWP_UINT32, EWP_HEX, EWP_UINT16}
+#define EWPR_SERIAL_MGMT(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_MGMT_OFFSET(a), \
+ EWP_UINT32, EWP_HEX, EWP_UINT16}
+#define EWPR_SERIAL_LQM(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_LQM_OFFSET(a), \
+ EWP_INT32, EWP_DEC, EWP_INT8}
+#define EWPR_SERIAL_SIGNAL(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_SIGNAL_OFFSET(a), \
+ EWP_INT32, EWP_DEC, EWP_INT8}
+#define EWPR_SERIAL_IFCOMP_8(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_IF_COMP_OFFSET(a), \
+ EWP_INT8, EWP_DEC, EWP_INT8}
+#define EWPR_SERIAL_IFCOMP_16(a) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_IF_COMP_OFFSET(a), \
+ EWP_UINT16, EWP_DEC, EWP_UINT16}
+#define EWPR_SERIAL_ARM(a) {\
+ "armcycle:" #a, EWPF_IDX_TYPE_##a, FALSE, {0, }, \
+ EWP_UINT32, EWP_DEC, EWP_UINT32}
+#define EWPR_SERIAL_NONE {"", EWPF_INVALID, FALSE, {0, }, 0, 0, 0}
+
+#ifdef DHD_EWPR_VER2
+
+#define RAW_BUFFER_SIZE 720u
+#define BASE64_BUFFER_SIZE 960u /* 33 percent larger than original binary data */
+#define EWPR_HEADER_SIZE 39u
+#define EWPR_MAX_STR_SIZE EWPR_HEADER_SIZE + EWPR_HEADER_SIZE
+
+#define EWPR_DISPLAY_METHOD_SINGLE 0
+#define EWPR_DISPLAY_METHOD_DIFF 1
+
+#define MAX_BIT_SIZE 8
+#define MAX_BIT_SHIFT 7
+
+#define INDEX_UNSPECIFIED 0u
+
+enum ewpr_context_type {
+ EWP_CONTEXT_TYPE_UNWANTED_NETWORK = 0,
+ EWP_CONTEXT_TYPE_ASSOC_FAIL = 1,
+ EWP_CONTEXT_TYPE_ABNORMAL_DISCONNECT = 2,
+ EWP_CONTEXT_TYPE_MAX = 3
+};
+
+enum ewpr_unwanted_net_sub_type {
+ EWP_UNWANT_NET_SUB_TYPE_UNSPECIFIED = 0,
+ EWP_UNWANT_NET_SUB_TYPE_ARP_FAIL = 1,
+ EWP_UNWANT_NET_SUB_TYPE_TXBAD = 2,
+ EWP_UNWANT_NET_SUB_TYPE_MAX = 3
+};
+
+enum ewpr_assoc_fail_sub_type {
+ EWP_ASSOC_FAIL_SUB_TYPE_UNSPECIFIED = 0,
+ EWP_ASSOC_FAIL_SUB_TYPE_DHCP_FAIL = 1,
+ EWP_ASSOC_FAIL_SUB_TYPE_EAP_FAIL = 2,
+ EWP_ASSOC_FAIL_SUB_TYPE_EAP_TIMEOUT = 3,
+ EWP_ASSOC_FAIL_SUB_TYPE_4WAY_FAIL = 4,
+ EWP_ASSOC_FAIL_SUB_TYPE_MAX = 5
+};
+
+enum ewpr_abnormal_disconnect_sub_type {
+ EWP_ABNRML_DISCONNCET_SUB_TYPE_UNSPECIFIED = 0,
+ EWP_ABNRML_DISCONNCET_SUB_TYPE_DISCONNECT_BY_HOST = 1,
+ EWP_ABNRML_DISCONNCET_SUB_TYPE_MAX = 2
+};
+
+typedef struct {
+ uint32 index1;
+ uint32 index2;
+ uint32 index3;
+ ewpr_serial_info_t *table;
+} ewpr_serial_context_info_t;
+
+#define EWPR_SINGLE_DEFAULT EWPR_DISPLAY_METHOD_SINGLE, EWPF_NO_UNIT_CONV
+#define EWPR_DIFF_DEFAULT EWPR_DISPLAY_METHOD_DIFF, EWPF_NO_UNIT_CONV
+
+#define EWPR_SINGLE_NSEC_TO_MSEC EWPR_DISPLAY_METHOD_SINGLE, EWPF_NSEC_TO_MSEC
+#define EWPR_SINGLE_USEC_TO_MSEC EWPR_DISPLAY_METHOD_SINGLE, EWPF_USEC_TO_MSEC
+#define EWPR_SINGLE_USEC_TO_SEC EWPR_DISPLAY_METHOD_SINGLE, EWPF_USEC_TO_SEC
+
+/* serail info type define */
+#define EWPR_SERIAL_CNT_V3_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_SLICE, TRUE, \
+ .v_info = { EWPR_CNT_VERSION_OFFSET, \
+ {{EWPR_CNT_V3_OFFSET(a)}}}, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_STAT_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_STAT_OFFSET(a), \
+ EWP_UINT64, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_INFRA_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_INFRA_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_MGMT_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_MGMT_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_LQM_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_LQM_OFFSET(a), \
+ EWP_INT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_SIGNAL_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_SIGNAL_OFFSET(a), \
+ EWP_INT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_IFCOMP_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_IF_COMP_OFFSET(a), \
+ EWP_INT8, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_EVENT_COUNTER_16_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_EVENT, FALSE, .offset = EWPR_EVENT_COUNTER_OFFSET(a), \
+ EWP_UINT16, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_EVENT_COUNTER_32_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_EVENT, FALSE, .offset = EWPR_EVENT_COUNTER_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_KEY_INFO_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_KEY_INFO, FALSE, .offset = EWPR_KEY_INFO_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_ROAM_STATS_PERIODIC_OFFSET(a), \
+ EWP_UINT16, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_IFACE, FALSE, .offset = EWPR_ROAM_STATS_PERIODIC_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_ARM_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_##a, FALSE, {0, }, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_IOVAR_BIT(a, b) {\
+ #a, 0, 0, .offset = 0, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_IOVAR, b, 1, EWPR_SINGLE_DEFAULT}
+#define EWPR_SERIAL_VERSION_BIT(a, b) {\
+ #a, 0, 0, .offset = 0, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_VER, b, 1, EWPR_SINGLE_DEFAULT}
+#define EWPR_SERIAL_TYPE_BIT(a, b) {\
+ #a, 0, 0, .offset = 0, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_TYPE, b, 1, EWPR_SINGLE_DEFAULT}
+#define EWPR_SERIAL_CPLOG_BIT(a, b, c) {\
+ #a, 0, 0, .offset = 0, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_CPLOG, b, c, EWPR_SINGLE_DEFAULT}
+#define EWPR_SERIAL_DHDSTAT_BIT(a, b, c, d) {\
+ #a, 0, 0, .offset = 0, \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_DHDSTAT, b, c, d}
+#define EWPR_SERIAL_TX_TOSS_HIST_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_TX_TOSS_HIST_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_RX_TOSS_HIST_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_RX_TOSS_HIST_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_BTC_STAT_16_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_BTC_STAT_OFFSET(a), \
+ EWP_UINT16, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_BTC_STAT_32_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_BTC_STAT_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+#define EWPR_SERIAL_COMPACT_HE_CNT_BIT(a, b, c, d) {\
+ #a, EWPF_IDX_TYPE_SLICE, FALSE, .offset = EWPR_COMPACT_HE_CNT_OFFSET(a), \
+ EWP_UINT32, EWP_BIN, EWP_BIT, EWPF_INFO_ECNT, b, c, d}
+
+#define EWPR_SERIAL_NONE_BIT {"", EWPF_INVALID, FALSE, {0, }, 0, 0, 0, 0, 0, 0, 0}
+
+#ifdef EWPR_DEBUG
+static void ewpr_print_byte_as_bits(char val);
+#endif /* EWPR_DEBUG */
+
+static int32
+ewpr_diff_bit_pack(ewpr_serial_info_t *info, char *buf, int buf_len,
+ void *_f_op, void *_s_op, int32 bit_offset);
+static int32
+ewpr_single_bit_pack(ewpr_serial_info_t *info, char *buf, int buf_len,
+ void *_ptr, int32 bit_offset);
+static int32
+ewpr_bit_pack_basic(char *buf, int buf_len, uint32 data, int32 format,
+ int32 display_type, int32 display_bit_length, int32 bit_offset);
+
+static char*
+ewpr_base64_encode(dhd_pub_t *dhdp, char* input, int32 length);
+#endif /* DHD_EWPR_VER2 */
+
+ewpr_serial_info_t
+ewpr_serial_CSDCLIENT_key_tbl[] = {
+ EWPR_SERIAL_STAT(txframe),
+ EWPR_SERIAL_STAT(txerror),
+ EWPR_SERIAL_STAT(rxframe),
+ EWPR_SERIAL_STAT(rxerror),
+ EWPR_SERIAL_STAT(txretrans),
+ EWPR_SERIAL_INFRA(rxbeaconmbss),
+ EWPR_SERIAL_CNT(txallfrm),
+ EWPR_SERIAL_CNT(rxrsptmout),
+ EWPR_SERIAL_CNT(rxbadplcp),
+ EWPR_SERIAL_CNT(rxcrsglitch),
+ EWPR_SERIAL_CNT(rxbadfcs),
+ EWPR_SERIAL_CNT_16(rxbeaconmbss),
+ EWPR_SERIAL_CNT_16(rxbeaconobss),
+ EWPR_SERIAL_NONE
+};
+
+ewpr_serial_info_t
+ewpr_serial_CSDCLIENT_diff_tbl[] = {
+ EWPR_SERIAL_STAT(txframe),
+ EWPR_SERIAL_STAT(txerror),
+ EWPR_SERIAL_STAT(rxframe),
+ EWPR_SERIAL_STAT(rxerror),
+ EWPR_SERIAL_STAT(txretrans),
+ EWPR_SERIAL_INFRA(rxbeaconmbss),
+ EWPR_SERIAL_MGMT(txassocreq),
+ EWPR_SERIAL_MGMT(txreassocreq),
+ EWPR_SERIAL_MGMT(txdisassoc),
+ EWPR_SERIAL_MGMT(rxdisassoc),
+ EWPR_SERIAL_MGMT(rxassocrsp),
+ EWPR_SERIAL_MGMT(rxreassocrsp),
+ EWPR_SERIAL_MGMT(txauth),
+ EWPR_SERIAL_MGMT(rxauth),
+ EWPR_SERIAL_MGMT(txdeauth),
+ EWPR_SERIAL_MGMT(rxdeauth),
+ EWPR_SERIAL_MGMT(txaction),
+ EWPR_SERIAL_MGMT(rxaction),
+ EWPR_SERIAL_CNT(txallfrm),
+ EWPR_SERIAL_CNT(rxrsptmout),
+ EWPR_SERIAL_CNT(rxbadplcp),
+ EWPR_SERIAL_CNT(rxcrsglitch),
+ EWPR_SERIAL_CNT(rxbadfcs),
+ EWPR_SERIAL_CNT_16(rxbeaconmbss),
+ EWPR_SERIAL_CNT_16(rxbeaconobss),
+ EWPR_SERIAL_NONE
+};
+
+ewpr_serial_info_t
+ewpr_serial_CSDCLIENT_array_tbl[] = {
+ EWPR_SERIAL_IFCOMP_8(rssi_sum),
+ EWPR_SERIAL_IFCOMP_8(snr),
+ EWPR_SERIAL_IFCOMP_8(noise_level),
+ EWPR_SERIAL_NONE
+};
+
+#ifdef EWPR_DEBUG
+ewpr_serial_info_t
+ewpr_serial_dbg_tbl[] = {
+ EWPR_SERIAL_ARM(IFACE),
+ EWPR_SERIAL_ARM(SLICE),
+ EWPR_SERIAL_NONE
+};
+#endif /* EWPR_DEBUG */
+
+#ifdef DHD_EWPR_VER2
+
+ewpr_serial_info_t
+ewpr_serial_bit_unwanted_network_default_tbl[] = {
+ EWPR_SERIAL_VERSION_BIT(version, 32),
+ EWPR_SERIAL_TYPE_BIT(type, 5),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IOVAR_BIT(auth, 8),
+ EWPR_SERIAL_IOVAR_BIT(wsec, 8),
+ EWPR_SERIAL_IOVAR_BIT(mfp, 1),
+ EWPR_SERIAL_IOVAR_BIT(bip, 8),
+ EWPR_SERIAL_ARM_BIT(IFACE, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txframe, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txerror, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(rxframe, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(rxerror, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txretrans, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txassocreq, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txreassocreq, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txdisassoc, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxdisassoc, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxassocrsp, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxreassocrsp, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txdeauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxdeauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txaction, 7, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxaction, 7, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(txallfrm, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxrsptmout, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbadplcp, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxcrsglitch, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbadfcs, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbeaconmbss, 5, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbeaconobss, 12, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(lqcm_report, 19, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(tx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxretry, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxdup, 15, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(chswitch_cnt, 8, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(pm_dur, 12, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxholes, 15, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_map, 16, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_a2dp_hiwat_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_datadelay_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_crtpri_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_pri_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf5cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf6cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf7cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_32_BIT(bt_gnt_dur, 12, 3, EWPR_SINGLE_USEC_TO_MSEC),
+ EWPR_SERIAL_IFCOMP_BIT(rssi_sum, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IFCOMP_BIT(snr, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IFCOMP_BIT(noise_level, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(initial_assoc_time, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(prev_roam_time, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_type, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_status, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_reason, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_success_cnt, 10, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_fail_cnt, 10, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_attempt_cnt, 11, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(partial_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(full_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxtrig_myaid, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_colormiss_cnt, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxmsta_back, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_txtbppdu, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_null_tbppdu, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(timestamp, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(algo, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(key_flags, 16, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_CPLOG_BIT(packtlog, 22, 70),
+ EWPR_SERIAL_NONE
+};
+
+ewpr_serial_info_t
+ewpr_serial_bit_assoc_fail_default_tbl[] = {
+ EWPR_SERIAL_VERSION_BIT(version, 32),
+ EWPR_SERIAL_TYPE_BIT(type, 5),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IOVAR_BIT(auth, 8),
+ EWPR_SERIAL_IOVAR_BIT(wsec, 8),
+ EWPR_SERIAL_IOVAR_BIT(mfp, 1),
+ EWPR_SERIAL_IOVAR_BIT(bip, 8),
+ EWPR_SERIAL_ARM_BIT(IFACE, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txframe, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txerror, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(rxframe, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(rxerror, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txretrans, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txassocreq, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txreassocreq, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txdisassoc, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxdisassoc, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxassocrsp, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxreassocrsp, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txdeauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxdeauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txaction, 7, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxaction, 7, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(txallfrm, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxrsptmout, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbadplcp, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxcrsglitch, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbadfcs, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbeaconmbss, 5, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbeaconobss, 12, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(lqcm_report, 19, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(tx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxretry, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxdup, 15, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(chswitch_cnt, 8, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(pm_dur, 12, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxholes, 15, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_map, 16, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_a2dp_hiwat_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_datadelay_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_crtpri_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_pri_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf5cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf6cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf7cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_32_BIT(bt_gnt_dur, 12, 3, EWPR_SINGLE_USEC_TO_MSEC),
+ EWPR_SERIAL_IFCOMP_BIT(rssi_sum, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IFCOMP_BIT(snr, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IFCOMP_BIT(noise_level, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxtrig_myaid, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_colormiss_cnt, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxmsta_back, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_txtbppdu, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_null_tbppdu, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(timestamp, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(algo, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(key_flags, 16, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_CPLOG_BIT(packtlog, 22, 70),
+ EWPR_SERIAL_NONE
+};
+
+ewpr_serial_info_t
+ewpr_serial_bit_abnormal_disconnect_default_tbl[] = {
+ EWPR_SERIAL_VERSION_BIT(version, 32),
+ EWPR_SERIAL_TYPE_BIT(type, 5),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_last, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev_ts, 32, 1, EWPR_SINGLE_USEC_TO_SEC),
+ EWPR_SERIAL_DHDSTAT_BIT(dhdstat_prev, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IOVAR_BIT(auth, 8),
+ EWPR_SERIAL_IOVAR_BIT(wsec, 8),
+ EWPR_SERIAL_IOVAR_BIT(mfp, 1),
+ EWPR_SERIAL_IOVAR_BIT(bip, 8),
+ EWPR_SERIAL_ARM_BIT(IFACE, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txframe, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txerror, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(rxframe, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(rxerror, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_STAT_BIT(txretrans, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txassocreq, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txreassocreq, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txdisassoc, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxdisassoc, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxassocrsp, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxreassocrsp, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txdeauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxdeauth, 4, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(txaction, 7, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_MGMT_BIT(rxaction, 7, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(txallfrm, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxrsptmout, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbadplcp, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxcrsglitch, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbadfcs, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbeaconmbss, 5, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxbeaconobss, 12, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(lqcm_report, 19, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(tx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rx_toss_cnt, 18, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxretry, 17, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxdup, 15, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(chswitch_cnt, 8, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(pm_dur, 12, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_CNT_V3_BIT(rxholes, 15, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_map, 16, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_dcsn_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_a2dp_hiwat_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_datadelay_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_crtpri_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(bt_pri_cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf5cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf6cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_16_BIT(a2dpbuf7cnt, 12, 3, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_BTC_STAT_32_BIT(bt_gnt_dur, 12, 3, EWPR_SINGLE_USEC_TO_MSEC),
+ EWPR_SERIAL_IFCOMP_BIT(rssi_sum, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IFCOMP_BIT(snr, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_IFCOMP_BIT(noise_level, 7, 6, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(initial_assoc_time, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(prev_roam_time, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_type, 8, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_status, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_32_BIT(last_roam_event_reason, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_success_cnt, 10, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_fail_cnt, 10, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(roam_attempt_cnt, 11, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_roam_target_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(max_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(min_cached_ch_cnt, 5, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(partial_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_ROAM_STAT_PERIODIC_16_BIT(full_roam_scan_cnt, 11, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxtrig_myaid, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_colormiss_cnt, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_rxmsta_back, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_txtbppdu, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_COMPACT_HE_CNT_BIT(he_null_tbppdu, 10, 6, EWPR_DIFF_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(timestamp, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(algo, 6, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_KEY_INFO_BIT(key_flags, 16, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_TX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_last, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_ts_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rn_prev, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_max, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_ts_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_RX_TOSS_HIST_BIT(htr_rc_secnd, 32, 1, EWPR_SINGLE_DEFAULT),
+ EWPR_SERIAL_CPLOG_BIT(packtlog, 22, 70),
+ EWPR_SERIAL_NONE
+};
+
+ewpr_serial_context_info_t ewpr_serial_context_info[] = {
+ {EWP_CONTEXT_TYPE_UNWANTED_NETWORK, EWP_UNWANT_NET_SUB_TYPE_UNSPECIFIED,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_unwanted_network_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_UNWANTED_NETWORK, EWP_UNWANT_NET_SUB_TYPE_ARP_FAIL,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_unwanted_network_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_UNWANTED_NETWORK, EWP_UNWANT_NET_SUB_TYPE_TXBAD,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_unwanted_network_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_UNSPECIFIED,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_DHCP_FAIL,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_EAP_FAIL,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_EAP_TIMEOUT,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ASSOC_FAIL, EWP_ASSOC_FAIL_SUB_TYPE_4WAY_FAIL,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_assoc_fail_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ABNORMAL_DISCONNECT, EWP_ABNRML_DISCONNCET_SUB_TYPE_UNSPECIFIED,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_abnormal_disconnect_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_ABNORMAL_DISCONNECT, EWP_ABNRML_DISCONNCET_SUB_TYPE_DISCONNECT_BY_HOST,
+ INDEX_UNSPECIFIED, &ewpr_serial_bit_abnormal_disconnect_default_tbl[0]},
+ {EWP_CONTEXT_TYPE_MAX, INDEX_UNSPECIFIED, INDEX_UNSPECIFIED, NULL}
+};
+#endif /* DHD_EWPR_VER2 */
+
+int ewpr_set_period_lock(ewpr_lock_param_t *param);
+int ewpr_diff_serial(ewpr_serial_info_t *info, char *buf,
+ int buf_len, void *_f_op, void *_s_op, char del);
+int ewpr_single_serial(ewpr_serial_info_t *info, char *buf, int buf_len, void *ptr, char del);
+
+int
+ewpr_serial_basic(char *buf, int buf_len, uint32 data, int format, int display_type, char del)
+{
+ if (format == EWP_HEX) {
+ switch (display_type) {
+ case EWP_INT8:
+ case EWP_UINT8:
+ return scnprintf(buf, buf_len, "%c%02x", del, data & 0xff);
+ case EWP_INT16:
+ case EWP_UINT16:
+ return scnprintf(buf, buf_len, "%c%04x", del, data & 0xffff);
+ case EWP_INT32:
+ case EWP_UINT32:
+ return scnprintf(buf, buf_len, "%c%08x", del, data & 0xffffffff);
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type));
+ return 0;
+ }
+ }
+
+ if (format == EWP_DEC) {
+ int32 sdata = (int32) data;
+ switch (display_type) {
+ case EWP_INT8:
+ case EWP_UINT8:
+ return scnprintf(buf, buf_len, "%c%04d", del, sdata);
+ case EWP_INT16:
+ case EWP_UINT16:
+ return scnprintf(buf, buf_len, "%c%06d", del, sdata);
+ case EWP_INT32:
+ case EWP_UINT32:
+ return scnprintf(buf, buf_len, "%c%011d", del, sdata);
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type));
+ return 0;
+ }
+ }
+
+ if (format == EWP_BIN) {
+ int32 sdata = (int32) data;
+ switch (display_type) {
+ case EWP_BIT:
+ return scnprintf(buf, buf_len, "%c%011d", del, sdata);
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type));
+ return 0;
+ }
+ }
+
+ DHD_FILTER_ERR(("INVALID FORMAT for Serial:%d", format));
+ return 0;
+}
+
+static int
+ewpr_get_multi_offset(uint16 looking_version, ewpr_serial_info_t *info)
+{
+ int idx;
+ ewpr_MVT_offset_elem_t *opv;
+
+ DHD_FILTER_TRACE(("FINDING MULTI OFFSET: type = %s version = %d\n",
+ info->name, looking_version));
+ for (idx = 0; idx < MAX_MULTI_VER; idx ++) {
+ opv = &(info->v_info.opv[idx]);
+
+ /* END OF MULTI VERSION */
+ if (opv->version == 0) {
+ break;
+ }
+ if (looking_version == opv->version) {
+ return opv->offset;
+ }
+ }
+ /* return first version if no version is found */
+ return info->v_info.opv[0].offset;
+}
+int
+ewpr_single_serial(ewpr_serial_info_t *info, char *buf, int buf_len, void *_ptr, char del)
+{
+ uint32 sval = 0;
+ char *ptr = (char *)_ptr;
+ uint32 offset = EWPF_INVALID;
+ uint16 version;
+
+ if (info->is_multi_version == TRUE) {
+ version = *(uint16 *)((char *)_ptr + info->v_info.version_offset);
+ offset = ewpr_get_multi_offset(version, info);
+ } else {
+ offset = info->offset;
+ }
+
+ if (offset == EWPF_INVALID) {
+ DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name));
+ return 0;
+ }
+
+ ptr += offset;
+
+ switch (info->data_type) {
+ case EWP_INT8:
+ sval = *(int8 *)ptr;
+ break;
+ case EWP_UINT8:
+ sval = *(uint8 *)ptr;
+ break;
+ case EWP_INT16:
+ sval = *(int16 *)ptr;
+ break;
+ case EWP_UINT16:
+ sval = *(uint16 *)ptr;
+ break;
+ case EWP_INT32:
+ sval = *(int32 *)ptr;
+ break;
+ case EWP_UINT32:
+ sval = *(uint32 *)ptr;
+ break;
+ /* XXX UINT64 is used only for debug */
+#ifdef EWPR_DEBUG
+ case EWP_UINT64:
+ sval = (uint32)(*(uint64 *)ptr);
+ break;
+#endif /* EWPR_DEBUG */
+ case EWP_BIT:
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE for Single Serial:%d", info->data_type));
+ return 0;
+ }
+
+ return ewpr_serial_basic(buf, buf_len, sval, info->display_format, info->display_type, del);
+}
+
+int
+ewpr_diff_serial(ewpr_serial_info_t *info,
+ char *buf, int buf_len, void *_f_op, void *_s_op, char del)
+{
+ char *f_op = (char *)_f_op;
+ char *s_op = (char *)_s_op;
+ uint32 diff;
+ uint32 offset = EWPF_INVALID;
+ uint16 version;
+
+ if (info->is_multi_version == TRUE) {
+ version = *(uint16 *)(f_op + info->v_info.version_offset);
+ offset = ewpr_get_multi_offset(version, info);
+ } else {
+ offset = info->offset;
+ }
+
+ if (offset == EWPF_INVALID) {
+ DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name));
+ return 0;
+ }
+
+ f_op = f_op + offset;
+ s_op = s_op + offset;
+
+ switch (info->data_type) {
+ case EWP_INT8:
+ case EWP_UINT8:
+ diff = *(uint8 *)f_op - *(uint8 *)s_op;
+ break;
+ case EWP_INT16:
+ case EWP_UINT16:
+ diff = *(uint16 *)f_op - *(uint16 *)s_op;
+ break;
+ case EWP_INT32:
+ case EWP_UINT32:
+ diff = *(uint32 *)f_op - *(uint32 *)s_op;
+ break;
+ case EWP_UINT64:
+ diff = (uint32)(*(uint64 *)f_op - *(uint64 *)s_op);
+ break;
+ case EWP_BIT:
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE to DIFF:%d", info->data_type));
+ return 0;
+ }
+
+ return ewpr_serial_basic(buf, buf_len, diff, info->display_format, info->display_type, del);
+}
+
+#ifdef EWPR_DEBUG
+void
+ewpr_debug_dump(ewpr_serial_info_t *tbl, void **ring)
+{
+ void *elem;
+ int idx, idx2;
+ ewpr_serial_info_t *info;
+ char buf[EWPR_DEBUG_BUF_LEN];
+ uint32 bytes_written;
+ int lock_cnt;
+
+ for (idx = 0; strlen(tbl[idx].name) != 0; idx++) {
+ info = &tbl[idx];
+#ifdef DHD_EWPR_VER2
+ if (info->info_type != EWPF_INFO_ECNT) {
+ DHD_FILTER_ERR(("%s: unable to dump value\n", info->name));
+ break;
+ }
+#endif /* DHD_EWPR_VER2 */
+ memset(buf, 0, sizeof(buf));
+ lock_cnt = dhd_ring_lock_get_count(ring[info->ring_type - 1]);
+ elem = dhd_ring_lock_get_first(ring[info->ring_type - 1]);
+ bytes_written = scnprintf(buf, EWPR_DEBUG_BUF_LEN, "%s:", info->name);
+ for (idx2 = 0; elem && (idx2 < lock_cnt); idx2++) {
+ bytes_written += ewpr_single_serial(info, &buf[bytes_written],
+ EWPR_DEBUG_BUF_LEN - bytes_written, elem, KEY_DEL);
+ elem = dhd_ring_get_next(ring[info->ring_type - 1], elem);
+ }
+ DHD_FILTER_ERR(("%s\n", buf));
+ }
+}
+#endif /* EWPR_DEBUG */
+
+uint32
+dhd_event_log_filter_serialize(dhd_pub_t *dhdp, char *in_buf, uint32 tot_len, int type)
+{
+ EWP_filter_t *filter = (EWP_filter_t *)dhdp->event_log_filter;
+ void *ring[EWPF_MAX_IDX_TYPE];
+ char *ret_buf = in_buf;
+ int slice_id;
+ int iface_id;
+ int idx, idx2;
+ uint32 bytes_written = 0;
+ void *elem[EWPF_MAX_IDX_TYPE][EWPR_CSDCLIENT_DIFF];
+ void **elem_list;
+ int lock_cnt, lock_cnt2;
+ char *last_print;
+ void *arr_elem;
+ uint32 delta_list[EWPR_CSDCLIENT_DIFF];
+ ewpr_lock_param_t lock_param;
+ int print_name = FALSE;
+ char cookie_str[DEBUG_DUMP_TIME_BUF_LEN];
+ char iov_buf[EWPR_IOV_BUF_LEN];
+
+ if (type != 0) {
+ DHD_FILTER_ERR(("NOT SUPPORTED TYPE: %d\n", type));
+ return 0;
+ }
+
+ iface_id = 0; /* STA INTERFACE ONLY */
+ if (filter->last_channel <= CH_MAX_2G_CHANNEL) {
+ slice_id = EWPF_SLICE_AUX;
+ } else {
+ slice_id = EWPF_SLICE_MAIN;
+ }
+ ring[EWPF_IDX_TYPE_SLICE - 1] = filter->s_ring[slice_id];
+ ring[EWPF_IDX_TYPE_IFACE - 1] = filter->i_ring[iface_id];
+
+ /* Configure common LOCK parameter */
+ lock_param.max_armcycle = (uint32)EWPF_INVALID;
+ lock_param.min_armcycle = filter->last_armcycle;
+ lock_param.max_period = (EWPR_ARRAY_CNT - 1)* EWPR_INTERVAL;
+ lock_param.max_period *= EWPF_MSEC_TO_SEC * EWPF_ARM_TO_MSEC;
+ lock_param.delta_cnt = ARRAYSIZE(delta_list);
+ lock_param.delta_list = delta_list;
+
+ delta_list[EWPR_DELTA3_POS] = EWPR_DELTA3_CNT;
+ delta_list[EWPR_DELTA2_POS] = EWPR_DELTA2_CNT;
+ delta_list[EWPR_DELTA1_POS] = EWPR_DELTA1_CNT;
+ delta_list[EWPR_NOW_POS] = 0;
+ lock_param.ring = ring[EWPF_IDX_TYPE_IFACE -1];
+ lock_param.elem_list = elem[EWPF_IDX_TYPE_IFACE -1];
+ lock_cnt = ewpr_set_period_lock(&lock_param);
+ if (lock_cnt <= 0) {
+ DHD_FILTER_ERR(("FAIL TO GET IFACE LOCK: %d\n", iface_id));
+ bytes_written = 0;
+ goto finished;
+ }
+
+ delta_list[EWPR_DELTA3_POS] = EWPR_DELTA3_CNT;
+ delta_list[EWPR_DELTA2_POS] = EWPR_DELTA2_CNT;
+ delta_list[EWPR_DELTA1_POS] = EWPR_DELTA1_CNT;
+ delta_list[EWPR_NOW_POS] = 0;
+ lock_param.ring = ring[EWPF_IDX_TYPE_SLICE -1];
+ lock_param.elem_list = elem[EWPF_IDX_TYPE_SLICE -1];
+ lock_cnt2 = ewpr_set_period_lock(&lock_param);
+ if (lock_cnt2 <= 0) {
+ DHD_FILTER_ERR(("FAIL TO GET SLICE LOCK: %d\n", slice_id));
+ goto finished;
+ }
+
+ if (lock_cnt != lock_cnt2) {
+ DHD_FILTER_ERR(("Lock Count is Diff: iface:%d slice:%d\n", lock_cnt, lock_cnt2));
+ lock_cnt = MIN(lock_cnt, lock_cnt2);
+ }
+
+#ifdef EWPR_DEBUG
+ print_name = TRUE;
+ ewpr_debug_dump(ewpr_serial_dbg_tbl, ring);
+ ewpr_debug_dump(ewpr_serial_CSDCLIENT_diff_tbl, ring);
+ ewpr_debug_dump(ewpr_serial_CSDCLIENT_array_tbl, ring);
+#endif /* EWPR_DEBUG */
+
+ memset(ret_buf, 0, tot_len);
+ memset(cookie_str, 0, DEBUG_DUMP_TIME_BUF_LEN);
+ bytes_written = 0;
+ last_print = ret_buf;
+
+ /* XXX Counters BIG DATA not matched to file yet */
+ get_debug_dump_time(cookie_str);
+#ifdef DHD_LOG_DUMP
+ dhd_logdump_cookie_save(dhdp, cookie_str, "ECNT");
+#endif
+
+ /* KEY DATA */
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%08x", EWP_REPORT_VERSION);
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%s", KEY_DEL, cookie_str);
+ DHD_FILTER_ERR(("%d: %s\n", bytes_written, last_print));
+ last_print = &ret_buf[bytes_written];
+
+ for (idx = 0; strlen(ewpr_serial_CSDCLIENT_key_tbl[idx].name) != 0; idx++) {
+ ewpr_serial_info_t *info = &ewpr_serial_CSDCLIENT_key_tbl[idx];
+ elem_list = elem[info->ring_type - 1];
+ if (print_name) {
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, " %s:", info->name);
+ }
+ bytes_written += ewpr_diff_serial(info, &ret_buf[bytes_written],
+ tot_len - bytes_written,
+ elem_list[EWPR_NOW_POS],
+ elem_list[EWPR_DELTA1_POS],
+ KEY_DEL);
+ if ((idx + 1) % EWPR_CNT_PER_LINE == 0) {
+ DHD_FILTER_ERR(("%d:%s\n", bytes_written, last_print));
+ last_print = &ret_buf[bytes_written];
+ }
+ }
+
+ /* RAW DATA */
+ /* XXX FIRST data shall use space:KEY delimiter */
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%08x", KEY_DEL, EWP_REPORT_VERSION);
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%s", RAW_DEL, cookie_str);
+
+ for (idx = 0; strlen(ewpr_serial_CSDCLIENT_diff_tbl[idx].name) != 0; idx++) {
+ ewpr_serial_info_t *info = &ewpr_serial_CSDCLIENT_diff_tbl[idx];
+ elem_list = elem[info->ring_type - 1];
+ if (print_name) {
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, " %s:", info->name);
+ }
+ bytes_written += ewpr_diff_serial(info, &ret_buf[bytes_written],
+ tot_len - bytes_written,
+ elem_list[EWPR_NOW_POS],
+ elem_list[EWPR_DELTA1_POS],
+ RAW_DEL);
+ bytes_written += ewpr_diff_serial(info, &ret_buf[bytes_written],
+ tot_len - bytes_written,
+ elem_list[EWPR_DELTA1_POS],
+ elem_list[EWPR_DELTA2_POS],
+ RAW_DEL);
+ if ((idx + 1) % EWPR_CNT_PER_LINE == 0) {
+ DHD_FILTER_ERR(("%d:%s\n", bytes_written, last_print));
+ last_print = &ret_buf[bytes_written];
+ }
+ }
+
+ /* FILL BSS SPECIFIC DATA LATER */
+ if (dhd_iovar(dhdp, 0, "auth", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) {
+ DHD_FILTER_ERR(("fail to get auth\n"));
+ *(uint32 *)iov_buf = EWPF_INVALID;
+
+ }
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%08x", RAW_DEL, *(uint32 *)iov_buf);
+
+ if (dhd_iovar(dhdp, 0, "wsec", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) {
+ DHD_FILTER_ERR(("fail to get wsec\n"));
+ *(uint32 *)iov_buf = EWPF_INVALID;
+
+ }
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%08x", RAW_DEL, *(uint32 *)iov_buf);
+
+ if (dhd_iovar(dhdp, 0, "mfp", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) {
+ DHD_FILTER_ERR(("fail to get mfp\n"));
+ *(uint8 *)iov_buf = EWPF_INVALID;
+
+ }
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%02x", RAW_DEL, *(uint8 *)iov_buf);
+
+ if (dhd_iovar(dhdp, 0, "bip", NULL, 0, iov_buf, ARRAYSIZE(iov_buf), FALSE) < 0) {
+ DHD_FILTER_ERR(("fail to get bip\n"));
+ *(uint8 *)iov_buf = EWPF_INVALID;
+ }
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, "%c%02x", RAW_DEL, *(uint8 *)iov_buf);
+
+ for (idx = 0; strlen(ewpr_serial_CSDCLIENT_array_tbl[idx].name) != 0; idx++) {
+ ewpr_serial_info_t *info = &ewpr_serial_CSDCLIENT_array_tbl[idx];
+ if (print_name) {
+ bytes_written += scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, " %s:", info->name);
+ }
+ for (idx2 = 0; idx2 < EWPR_ARRAY_CNT - lock_cnt; idx2++) {
+ bytes_written += ewpr_serial_basic(&ret_buf[bytes_written],
+ tot_len - bytes_written, 0,
+ info->display_format, info->display_type, RAW_DEL);
+ }
+ arr_elem = elem[info->ring_type - 1][EWPR_DELTA3_POS];
+ for (; idx2 < EWPR_ARRAY_CNT; idx2++) {
+ if (arr_elem == NULL) {
+ DHD_FILTER_ERR(("ARR IS NULL : %d %p \n",
+ idx2, elem[info->ring_type - 1][EWPR_DELTA3_POS]));
+ break;
+ }
+ bytes_written += ewpr_single_serial(info, &ret_buf[bytes_written],
+ tot_len - bytes_written, arr_elem, RAW_DEL);
+ arr_elem = dhd_ring_get_next(ring[info->ring_type - 1], arr_elem);
+ }
+ DHD_FILTER_ERR(("%d:%s\n", bytes_written, last_print));
+ last_print = &ret_buf[bytes_written];
+ }
+
+finished:
+ DHD_FILTER_ERR(("RET LEN:%d\n", (int)strlen(ret_buf)));
+ dhd_ring_lock_free(ring[EWPF_IDX_TYPE_SLICE - 1]);
+ dhd_ring_lock_free(ring[EWPF_IDX_TYPE_IFACE - 1]);
+ return bytes_written;
+}
+
+int
+ewpr_set_period_lock(ewpr_lock_param_t *param)
+{
+ void *last;
+ void *first;
+ void *cur;
+ int lock_cnt;
+ int idx2;
+ int delta_idx;
+ uint32 last_armcycle;
+ uint32 first_armcycle;
+ uint32 cur_armcycle = 0;
+ void *ring = param->ring;
+
+ /* GET LATEST PTR */
+ last = dhd_ring_get_last(ring);
+ while (TRUE) {
+ if (last == NULL) {
+ DHD_FILTER_ERR(("NO LAST\n"));
+ return -1;
+ }
+ last_armcycle = *(uint32 *)last;
+ if (last_armcycle <= param->max_armcycle ||
+ last_armcycle + EWPF_EPOCH >= param->max_armcycle) {
+ break;
+ }
+ last = dhd_ring_get_prev(ring, last);
+ }
+
+ if (last_armcycle != param->max_armcycle) {
+ DHD_FILTER_TRACE(("MAX ARMCYCLE IS CHANGEd new:%u prev:%u\n",
+ last_armcycle, param->max_armcycle));
+ param->max_armcycle = last_armcycle;
+ }
+
+ if (last_armcycle < param->min_armcycle) {
+ param->min_armcycle = 0;
+ }
+
+ /* GET FIRST PTR */
+ first_armcycle = last_armcycle;
+ first = last;
+ while (TRUE) {
+ cur = dhd_ring_get_prev(ring, first);
+ if (cur == NULL) {
+ break;
+ }
+ cur_armcycle = *(uint32 *)cur;
+ if (cur_armcycle >= first_armcycle) {
+ DHD_FILTER_TRACE(("case 1: %u %u\n", first_armcycle, cur_armcycle));
+ /* dongle is rebooted */
+ break;
+ }
+ if (cur_armcycle + EWPF_EPOCH < param->min_armcycle) {
+ DHD_FILTER_TRACE(("case 2: %u %u\n", param->min_armcycle, cur_armcycle));
+ /* Reach Limitation */
+ break;
+ }
+ if (cur_armcycle + param->max_period + EWPF_EPOCH < last_armcycle) {
+ DHD_FILTER_TRACE(("case 3: %u %u\n", param->max_period, cur_armcycle));
+ /* exceed max period */
+ break;
+ }
+ first = cur;
+ first_armcycle = cur_armcycle;
+ }
+
+ if (first_armcycle != param->min_armcycle) {
+ DHD_FILTER_TRACE(("MIN ARMCYCLE IS CHANGEd new:%u prev:%u %u\n",
+ first_armcycle, param->min_armcycle, cur_armcycle));
+ param->min_armcycle = first_armcycle;
+ }
+
+ DHD_FILTER_TRACE(("ARM CYCLE of first(%u), last(%u)\n", first_armcycle, last_armcycle));
+
+ dhd_ring_lock(ring, first, last);
+
+ lock_cnt = dhd_ring_lock_get_count(ring);
+ if (lock_cnt <= 0) {
+ DHD_FILTER_ERR((" NO VALID RECORD : %d\n", lock_cnt));
+ return -1;
+ }
+ DHD_FILTER_TRACE(("Lock Count:%d\n", lock_cnt));
+
+ /* Validate delta position */
+ for (idx2 = 0; idx2 < param->delta_cnt - 1; idx2++) {
+ if (param->delta_list[idx2] >= param->delta_list[idx2 + 1]) {
+ DHD_FILTER_ERR(("INVALID DELTA at %d\n", idx2 + 1));
+ param->delta_list[idx2 + 1] = param->delta_list[idx2];
+ }
+ }
+
+ delta_idx = 0;
+ for (idx2 = 0; idx2 < lock_cnt && delta_idx < param->delta_cnt; idx2++) {
+ if (idx2 == 0) {
+ cur = dhd_ring_lock_get_last(ring);
+ } else {
+ cur = dhd_ring_get_prev(ring, cur);
+ }
+
+ if (idx2 >= param->delta_list[delta_idx]) {
+ param->elem_list[delta_idx] = cur;
+ delta_idx ++;
+ }
+ }
+
+ /* COPY last elem to rest of the list */
+ delta_idx--;
+ for (idx2 = delta_idx + 1; idx2 < param->delta_cnt; idx2++) {
+ param->elem_list[idx2] = cur;
+ }
+ return lock_cnt;
+}
+
+#ifdef DHD_EWPR_VER2
+static int
+ewpr_single_bit_pack(ewpr_serial_info_t * info, char * buf, int buf_len,
+ void * _ptr, int bit_offset)
+{
+ int32 sval = 0;
+ char *ptr = (char *)_ptr;
+ uint32 offset = EWPF_INVALID;
+ uint16 version;
+ bool is_signed = FALSE;
+
+ if (info->is_multi_version == TRUE) {
+ version = *(uint16 *)((char *)_ptr + info->v_info.version_offset);
+ offset = ewpr_get_multi_offset(version, info);
+ } else {
+ offset = info->offset;
+ }
+
+ if (offset == EWPF_INVALID) {
+ DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name));
+ return 0;
+ }
+
+ ptr += offset;
+
+ switch (info->data_type) {
+ case EWP_INT8:
+ sval = *(int8 *)ptr;
+ is_signed = TRUE;
+ break;
+ case EWP_UINT8:
+ sval = *(uint8 *)ptr;
+ break;
+ case EWP_INT16:
+ sval = *(int16 *)ptr;
+ is_signed = TRUE;
+ break;
+ case EWP_UINT16:
+ sval = *(uint16 *)ptr;
+ break;
+ case EWP_INT32:
+ sval = *(int32 *)ptr;
+ is_signed = TRUE;
+ break;
+ case EWP_UINT32:
+ sval = *(uint32 *)ptr;
+ break;
+#ifdef EWPR_DEBUG
+ case EWP_UINT64:
+ sval = (int32)(*(uint64 *)ptr);
+ break;
+#endif /* EWPR_DEBUG */
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE for Single Serial:%d", info->data_type));
+ return 0;
+ }
+
+ /* convert negative value to positive before bit packing */
+ if (is_signed) {
+ if (sval < 0) {
+ DHD_FILTER_TRACE(("convert to positive value %d\n", sval));
+ sval = ABS(sval);
+ }
+ }
+
+ if (info->unit_convert > 1) {
+ DHD_FILTER_TRACE(("convert unit %d / %d\n", sval, info->unit_convert));
+ sval = sval / info->unit_convert;
+ }
+
+ if (is_signed) {
+ DHD_FILTER_TRACE(("%s : signed value : %d, bit length: %d",
+ info->name, sval, info->display_bit_length));
+ } else {
+ DHD_FILTER_TRACE(("%s : unsigned value : %u, bit length: %d",
+ info->name, sval, info->display_bit_length));
+ }
+
+ return ewpr_bit_pack_basic(buf, buf_len, sval, info->display_format,
+ info->display_type, info->display_bit_length, bit_offset);
+}
+
+static int
+ewpr_diff_bit_pack(ewpr_serial_info_t *info, char *buf, int buf_len,
+ void *_f_op, void *_s_op, int bit_offset)
+{
+ char *f_op = (char *)_f_op;
+ char *s_op = (char *)_s_op;
+ int32 diff;
+ uint32 offset = EWPF_INVALID;
+ uint16 version;
+
+ if (info->is_multi_version == TRUE) {
+ version = *(uint16 *)(f_op + info->v_info.version_offset);
+ offset = ewpr_get_multi_offset(version, info);
+ } else {
+ offset = info->offset;
+ }
+
+ if (offset == EWPF_INVALID) {
+ DHD_FILTER_ERR(("INVALID TYPE to OFFSET:%s\n", info->name));
+ return 0;
+ }
+
+ f_op = f_op + offset;
+ s_op = s_op + offset;
+
+ switch (info->data_type) {
+ case EWP_INT8:
+ case EWP_UINT8:
+ diff = *(uint8 *)f_op - *(uint8 *)s_op;
+ break;
+ case EWP_INT16:
+ case EWP_UINT16:
+ diff = *(uint16 *)f_op - *(uint16 *)s_op;
+ break;
+ case EWP_INT32:
+ case EWP_UINT32:
+ diff = *(uint32 *)f_op - *(uint32 *)s_op;
+ break;
+ case EWP_UINT64:
+ diff = (uint32)(*(uint64 *)f_op - *(uint64 *)s_op);
+ break;
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE to DIFF:%d", info->data_type));
+ return 0;
+ }
+
+ if (diff < 0) {
+ DHD_FILTER_TRACE(("convert to positive value %d\n", diff));
+ diff = ABS(diff);
+ }
+
+ if (info->unit_convert > 1) {
+ DHD_FILTER_TRACE(("convert unit %d / %d\n", diff, info->unit_convert));
+ diff = diff / info->unit_convert;
+ }
+
+ DHD_FILTER_TRACE(("%s : value : %d, bit length: %d",
+ info->name, diff, info->display_bit_length));
+ return ewpr_bit_pack_basic(buf, buf_len, diff, info->display_format,
+ info->display_type, info->display_bit_length, bit_offset);
+}
+
+static int
+ewpr_bit_pack_basic(char *buf, int buf_len, uint32 data, int format, int display_type,
+ int display_bit_length, int bit_offset)
+{
+ if (format == EWP_BIN) {
+ uint32 sdata = (uint32) data;
+ switch (display_type) {
+ case EWP_BIT:
+ /* call bit packing */
+ return dhd_bit_pack(buf, buf_len, bit_offset,
+ sdata, display_bit_length);
+ default:
+ DHD_FILTER_ERR(("INVALID TYPE for Serial:%d", display_type));
+ return 0;
+ }
+ }
+
+ DHD_FILTER_ERR(("INVALID FORMAT for Serial:%d", format));
+ return 0;
+}
+
+static ewpr_serial_info_t*
+ewpr_find_context_info(int index1, int index2, int index3)
+{
+ int idx = 0;
+ ewpr_serial_info_t *context_info = NULL;
+
+ for (idx = 0; ewpr_serial_context_info[idx].table != NULL; idx++) {
+ if (index1 == ewpr_serial_context_info[idx].index1 &&
+ index2 == ewpr_serial_context_info[idx].index2 &&
+ index3 == ewpr_serial_context_info[idx].index3) {
+ context_info = ewpr_serial_context_info[idx].table;
+ break;
+ }
+ }
+
+ if (context_info == NULL) {
+ DHD_FILTER_ERR(("unable to find context info for index number: %02x:%02x:%02x\n",
+ index1, index2, index3));
+ return NULL;
+ }
+
+ return context_info;
+}
+
+static int
+ewpr_find_context_type(ewpr_serial_info_t* context_info)
+{
+ int idx = 0;
+ int context_type = BCME_ERROR;
+
+ /* index2, index3 are reserved */
+
+ for (idx = 0; ewpr_serial_context_info[idx].table != NULL; idx++) {
+ if (context_info == ewpr_serial_context_info[idx].table) {
+ context_type = idx;
+ break;
+ }
+ }
+
+ return context_type;
+}
+
+static uint32
+ewpr_scnprintf(char *buf, uint32 buf_len, uint32 input_len, char *data_type, char *fmt, ...)
+{
+ va_list args;
+
+ if (buf_len < input_len) {
+ DHD_FILTER_ERR(("%s: input length(%d) is larger than "
+ "remain buffer length(%d)\n", data_type,
+ input_len, buf_len));
+ }
+ va_start(args, fmt);
+ buf_len = vscnprintf(buf, buf_len, fmt, args);
+ va_end(args);
+
+ return buf_len;
+}
+
+uint32
+dhd_event_log_filter_serialize_bit(dhd_pub_t *dhdp, char *in_buf, uint32 tot_len,
+ int index1, int index2, int index3)
+{
+ EWP_filter_t *filter = (EWP_filter_t *)dhdp->event_log_filter;
+ void *ring[EWPF_MAX_IDX_TYPE];
+ char *ret_buf = in_buf;
+ int slice_id;
+ int iface_id;
+ int event_id;
+ int key_info_id;
+ int idx;
+ int idx2;
+ uint32 bytes_written = 0;
+ int bits_written = 0;
+ void *elem[EWPF_MAX_IDX_TYPE][EWPR_CSDCLIENT_DIFF];
+ void **elem_list;
+ int lock_cnt, lock_cnt2;
+ uint32 delta_list[EWPR_CSDCLIENT_DIFF];
+ ewpr_lock_param_t lock_param;
+ char cookie_str[DEBUG_DUMP_TIME_BUF_LEN];
+ char iov_buf[EWPR_IOV_BUF_LEN];
+ char *raw_buf = NULL;
+ char *raw_encode_buf = NULL;
+ int raw_buf_size;
+ int ret = 0;
+ ewpr_serial_info_t *context_info = NULL;
+ int context_type;
+#ifdef DHD_STATUS_LOGGING
+ uint32 conv_cnt = 0;
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef DHD_STATUS_LOGGING
+ stat_elem_t dhd_stat[EWP_DHD_STAT_SIZE];
+ stat_query_t query;
+
+ memset(&dhd_stat[0], 0x00, sizeof(stat_elem_t) * EWP_DHD_STAT_SIZE);
+#endif /* DHD_STATUS_LOGGING */
+
+ context_info = ewpr_find_context_info(index1, index2, index3);
+ if (!context_info) {
+ return bytes_written;
+ }
+
+ if (tot_len < EWPR_MAX_STR_SIZE) {
+ DHD_FILTER_ERR(("%s: insufficient buffer size %d\n",
+ __FUNCTION__, tot_len));
+ return bytes_written;
+ }
+
+ iface_id = 0; /* STA INTERFACE ONLY */
+ event_id = 0; /* COMMON ID */
+ key_info_id = 0; /* COMMON ID */
+ if (filter->last_channel <= CH_MAX_2G_CHANNEL) {
+ slice_id = EWPF_SLICE_AUX;
+ } else {
+ slice_id = EWPF_SLICE_MAIN;
+ }
+ ring[EWPF_IDX_TYPE_SLICE - 1] = filter->s_ring[slice_id];
+ ring[EWPF_IDX_TYPE_IFACE - 1] = filter->i_ring[iface_id];
+ ring[EWPF_IDX_TYPE_EVENT - 1] = filter->e_ring[event_id];
+ ring[EWPF_IDX_TYPE_KEY_INFO - 1] = filter->k_ring[key_info_id];
+
+ /* Configure common LOCK parameter */
+ lock_param.max_armcycle = (uint32)EWPF_INVALID;
+ lock_param.min_armcycle = filter->last_armcycle;
+ lock_param.max_period = (EWPR_ARRAY_CNT - 1)* EWPR_INTERVAL;
+ lock_param.max_period *= EWPF_MSEC_TO_SEC * EWPF_ARM_TO_MSEC;
+ lock_param.delta_cnt = ARRAYSIZE(delta_list);
+ lock_param.delta_list = delta_list;
+
+ for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) {
+ delta_list[idx] = idx * EWPR_DELTA_CNT;
+ }
+
+ lock_param.ring = ring[EWPF_IDX_TYPE_IFACE -1];
+ lock_param.elem_list = elem[EWPF_IDX_TYPE_IFACE -1];
+ lock_cnt = ewpr_set_period_lock(&lock_param);
+ if (lock_cnt <= 0) {
+ DHD_FILTER_ERR(("FAIL TO GET IFACE LOCK: %d\n", iface_id));
+ bytes_written = 0;
+ goto finished;
+ }
+
+ for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) {
+ delta_list[idx] = idx * EWPR_DELTA_CNT;
+ }
+
+ lock_param.ring = ring[EWPF_IDX_TYPE_SLICE -1];
+ lock_param.elem_list = elem[EWPF_IDX_TYPE_SLICE -1];
+ lock_cnt2 = ewpr_set_period_lock(&lock_param);
+ if (lock_cnt2 <= 0) {
+ DHD_FILTER_ERR(("FAIL TO GET SLICE LOCK: %d\n", slice_id));
+ goto finished;
+ }
+
+ if (lock_cnt != lock_cnt2) {
+ DHD_FILTER_ERR(("Lock Count is Diff: iface:%d slice:%d\n", lock_cnt, lock_cnt2));
+ lock_cnt = MIN(lock_cnt, lock_cnt2);
+ }
+
+ for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) {
+ delta_list[idx] = idx * EWPR_DELTA_CNT;
+ }
+
+ lock_param.ring = ring[EWPF_IDX_TYPE_EVENT -1];
+ lock_param.elem_list = elem[EWPF_IDX_TYPE_EVENT -1];
+ lock_cnt = ewpr_set_period_lock(&lock_param);
+ if (lock_cnt <= 0) {
+ DHD_FILTER_ERR(("FAIL TO GET EVENT ECNT LOCK: %d\n", iface_id));
+ bytes_written = 0;
+ goto finished;
+ }
+
+ for (idx = 0; idx < EWPR_CSDCLIENT_DIFF; idx++) {
+ delta_list[idx] = idx * EWPR_DELTA_CNT;
+ }
+
+ lock_param.ring = ring[EWPF_IDX_TYPE_KEY_INFO -1];
+ lock_param.elem_list = elem[EWPF_IDX_TYPE_KEY_INFO -1];
+ lock_cnt = ewpr_set_period_lock(&lock_param);
+ if (lock_cnt <= 0) {
+ DHD_FILTER_ERR(("FAIL TO GET KEY INFO LOCK: %d\n", iface_id));
+ bytes_written = 0;
+ goto finished;
+ }
+
+#ifdef EWPR_DEBUG
+ ewpr_debug_dump(context_info, ring);
+#endif /* EWPR_DEBUG */
+
+ memset(ret_buf, 0, tot_len);
+ memset(cookie_str, 0, DEBUG_DUMP_TIME_BUF_LEN);
+ bytes_written = 0;
+ bits_written = 0;
+
+ /* XXX Counters BIG DATA not matched to file yet */
+ get_debug_dump_time(cookie_str);
+#ifdef DHD_LOG_DUMP
+ dhd_logdump_cookie_save(dhdp, cookie_str, "ECNT");
+#endif /* DHD_LOG_DUMP */
+
+ /* KEY DATA */
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, REPORT_VERSION_STR_SIZE,
+ "report version", "%08x", EWP_REPORT_VERSION);
+
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + strlen(cookie_str),
+ "cookie string", "%c%s", KEY_DEL, cookie_str);
+
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + INDEX_STR_SIZE,
+ "host trigger index", "%c%02x%02x%02x", KEY_DEL, index1, index2, index3);
+
+#ifdef DHD_STATUS_LOGGING
+ DHD_FILTER_TRACE(("print dhd_stat size:%d * %d, size of filter list: %d\n",
+ (uint32)sizeof(dhd_stat[0]), EWP_DHD_STAT_SIZE,
+ (uint32)sizeof(dhd_statlog_filter)));
+ query.req_buf = NULL;
+ query.req_buf_len = 0;
+ query.resp_buf = (char *)dhd_stat;
+ query.resp_buf_len = DHD_STATLOG_RING_SIZE(EWP_DHD_STAT_SIZE);
+ query.req_num = EWP_DHD_STAT_SIZE;
+ ret = dhd_statlog_get_latest_info(dhdp, (void *)&query);
+ if (ret < 0) {
+ DHD_FILTER_ERR(("fail to get dhd statlog - %d\n", ret));
+ }
+#ifdef EWPR_DEBUG
+ for (idx = 0; idx < EWP_DHD_STAT_SIZE; idx++) {
+ DHD_FILTER_TRACE(("DHD status index: %d, timestamp: %llu, stat: %d\n",
+ idx, dhd_stat[idx].ts, dhd_stat[idx].stat));
+ }
+#endif
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE,
+ "current dhd status", "%c%02x", KEY_DEL, dhd_stat[0].stat);
+
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE,
+ "previous dhd status", "%c%02x", KEY_DEL, dhd_stat[1].stat);
+#else
+ /* reserved for dhd status information */
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE,
+ "current dhd status", "%c%02x", KEY_DEL, 0x00);
+
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + DHD_STAT_STR_SIZE,
+ "previous dhd status", "%c%02x", KEY_DEL, 0x00);
+#endif /* DHD_STATUS_LOGGING */
+
+ /* RAW DATA */
+ raw_buf = MALLOCZ(dhdp->osh, RAW_BUFFER_SIZE);
+
+ for (idx = 0; strlen(context_info[idx].name) != 0; idx++) {
+ ewpr_serial_info_t *info = &context_info[idx];
+ elem_list = elem[info->ring_type - 1];
+ DHD_FILTER_TRACE(("%s : array_size: %d\n", info->name, info->display_array_size));
+ switch (info->info_type) {
+ case EWPF_INFO_VER:
+ DHD_FILTER_TRACE(("write %s - value: %d\n", info->name,
+ EWP_REPORT_VERSION));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, bits_written,
+ EWP_REPORT_VERSION, info->display_bit_length);
+ break;
+ case EWPF_INFO_TYPE:
+ context_type = ewpr_find_context_type(context_info);
+ if (context_type < 0) {
+ DHD_FILTER_ERR(("fail to get context_type - %d\n",
+ context_type));
+ break;
+ }
+ DHD_FILTER_TRACE(("write %s - value: %d\n", info->name,
+ (uint32)context_type));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE, bits_written,
+ (uint32)context_type, info->display_bit_length);
+ break;
+ case EWPF_INFO_DHDSTAT:
+ if (strcmp("dhdstat_last_ts", info->name) == 0) {
+#ifdef DHD_STATUS_LOGGING
+ if (info->unit_convert > 1) {
+ conv_cnt = dhd_stat[0].ts_tz / info->unit_convert;
+ } else {
+ conv_cnt = dhd_stat[0].ts_tz;
+ }
+ DHD_FILTER_TRACE(("DHD status last timestamp:"
+ " %llu, %u", dhd_stat[0].ts_tz,
+ conv_cnt));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, conv_cnt,
+ info->display_bit_length);
+#else
+ DHD_FILTER_TRACE(("No DHD status log timestamp\n"));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, 0x00, info->display_bit_length);
+#endif /* DHD_STATUS_LOGGING */
+ } else if (strcmp("dhdstat_last", info->name) == 0) {
+#ifdef DHD_STATUS_LOGGING
+ DHD_FILTER_TRACE(("DHD status last stat: %d(0x%02x)",
+ dhd_stat[0].stat, dhd_stat[0].stat));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, (uint32)dhd_stat[0].stat,
+ info->display_bit_length);
+#else
+ DHD_FILTER_TRACE(("No DHD status log value\n"));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, 0x00, info->display_bit_length);
+#endif /* DHD_STATUS_LOGGING */
+ } else if (strcmp("dhdstat_prev_ts", info->name) == 0) {
+#ifdef DHD_STATUS_LOGGING
+ if (info->unit_convert > 1) {
+ conv_cnt = dhd_stat[1].ts_tz / info->unit_convert;
+ } else {
+ conv_cnt = dhd_stat[1].ts_tz;
+ }
+ DHD_FILTER_TRACE(("DHD status prev timestamp:"
+ " %llu, %u", dhd_stat[1].ts_tz,
+ conv_cnt));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, conv_cnt,
+ info->display_bit_length);
+#else
+ DHD_FILTER_TRACE(("No DHD status log timestamp\n"));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, 0x00, info->display_bit_length);
+#endif /* DHD_STATUS_LOGGING */
+ } else if (strcmp("dhdstat_prev", info->name) == 0) {
+#ifdef DHD_STATUS_LOGGING
+ DHD_FILTER_TRACE(("DHD status prev stat: %d(0x%02x)",
+ dhd_stat[1].stat, dhd_stat[1].stat));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, (uint32)dhd_stat[1].stat,
+ info->display_bit_length);
+#else
+ DHD_FILTER_TRACE(("No DHD status log value\n"));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, 0x00, info->display_bit_length);
+#endif /* DHD_STATUS_LOGGING */
+ } else {
+ DHD_FILTER_ERR(("unknown dhdstat name - %s\n",
+ info->name));
+ }
+ break;
+ case EWPF_INFO_ECNT:
+ for (idx2 = 0; idx2 < info->display_array_size; idx2++) {
+ if (info->display_method == EWPR_DISPLAY_METHOD_SINGLE) {
+ bits_written = ewpr_single_bit_pack(info, raw_buf,
+ RAW_BUFFER_SIZE, elem_list[idx2],
+ bits_written);
+ } else {
+ bits_written = ewpr_diff_bit_pack(info, raw_buf,
+ RAW_BUFFER_SIZE, elem_list[idx2],
+ elem_list[idx2+1], bits_written);
+ }
+ }
+ break;
+ case EWPF_INFO_IOVAR:
+ if (dhd_iovar(dhdp, 0, info->name, NULL, 0, iov_buf,
+ ARRAYSIZE(iov_buf), FALSE) < 0) {
+ DHD_FILTER_ERR(("fail to get auth\n"));
+ *(uint32 *)iov_buf = EWPF_INVALID;
+ }
+ DHD_FILTER_TRACE(("write %s - value: %d\n", info->name,
+ *(uint8 *)iov_buf));
+ bits_written = dhd_bit_pack(raw_buf, RAW_BUFFER_SIZE,
+ bits_written, *(uint8 *)iov_buf,
+ info->display_bit_length);
+ break;
+ case EWPF_INFO_CPLOG:
+ DHD_FILTER_TRACE(("write compact packt log\n"));
+ ret = 0;
+#if defined(DHD_PKT_LOGGING) && defined(DHD_COMPACT_PKT_LOG)
+ ret = dhd_cpkt_log_proc(dhdp, raw_buf, RAW_BUFFER_SIZE,
+ bits_written, info->display_array_size);
+#endif /* DHD_PKT_LOGGING && DHD_COMPACT_PKT_LOG */
+ if (ret < 0) {
+ DHD_FILTER_ERR(("fail to get compact packet log - %d\n",
+ ret));
+ break;
+ }
+ /* update bit offset */
+ DHD_FILTER_TRACE(("%d bits written\n", ret));
+ if (ret > 0) {
+ bits_written = ret;
+ }
+ break;
+ default:
+ DHD_FILTER_ERR(("unsupported info type\n"));
+ break;
+ }
+ DHD_FILTER_TRACE(("%d bits written\n", bits_written));
+ }
+
+ /* encode data */
+ raw_buf_size = BYTE_SIZE(bits_written);
+ raw_encode_buf = ewpr_base64_encode(dhdp, raw_buf, raw_buf_size);
+
+#ifdef EWPR_DEBUG
+ DHD_FILTER_ERR(("raw_buf:\n"));
+ for (idx = 0; idx < raw_buf_size; idx++) {
+ ewpr_print_byte_as_bits(raw_buf[idx]);
+ }
+#endif /* EWPR_DEBUG */
+ DHD_FILTER_TRACE(("base64 encoding result:\n"));
+ DHD_FILTER_TRACE(("%s", raw_encode_buf));
+
+ bytes_written += ewpr_scnprintf(&ret_buf[bytes_written],
+ tot_len - bytes_written, DELIMITER_LEN + strlen(raw_encode_buf),
+ "base64 encoded raw data", "%c%s", KEY_DEL, raw_encode_buf);
+
+finished:
+ DHD_FILTER_ERR(("RET LEN:%d\n", (int)strlen(ret_buf)));
+ DHD_FILTER_TRACE(("ret_buf: %s", ret_buf));
+
+ dhd_ring_lock_free(ring[EWPF_IDX_TYPE_SLICE - 1]);
+ dhd_ring_lock_free(ring[EWPF_IDX_TYPE_IFACE - 1]);
+ dhd_ring_lock_free(ring[EWPF_IDX_TYPE_EVENT - 1]);
+ dhd_ring_lock_free(ring[EWPF_IDX_TYPE_KEY_INFO - 1]);
+
+ MFREE(dhdp->osh, raw_buf, RAW_BUFFER_SIZE);
+ MFREE(dhdp->osh, raw_encode_buf, BASE64_BUFFER_SIZE);
+ return bytes_written;
+}
+
+#ifdef EWPR_DEBUG
+static void
+ewpr_print_byte_as_bits(char val)
+{
+ int32 idx;
+ char buf[EWPR_DEBUG_BUF_LEN];
+ for (idx = 0; idx < MAX_BIT_SIZE; idx++) {
+ scnprintf(&buf[idx], EWPR_DEBUG_BUF_LEN-idx, "%c",
+ (val & (1 << (MAX_BIT_SHIFT-idx))) ? '1' : '0');
+ }
+ buf[MAX_BIT_SIZE] = 0x0;
+ DHD_FILTER_ERR(("%s\n", buf));
+}
+#endif /* EWPR_DEBUG */
+
+static char*
+ewpr_base64_encode(dhd_pub_t *dhdp, char* input, int32 length)
+{
+ /* set up a destination buffer large enough to hold the encoded data */
+ char *output = MALLOCZ(dhdp->osh, BASE64_BUFFER_SIZE);
+ int32 cnt = 0;
+
+ if (length > RAW_BUFFER_SIZE) {
+ DHD_FILTER_ERR(("%s: input data size is too big, size is limited to %d\n",
+ __FUNCTION__, RAW_BUFFER_SIZE));
+ length = RAW_BUFFER_SIZE;
+ }
+
+ cnt = dhd_base64_encode(input, length, output, BASE64_BUFFER_SIZE);
+ if (cnt == 0) {
+ DHD_FILTER_ERR(("%s: base64 encoding error\n", __FUNCTION__));
+ }
+ return output;
+}
+#endif /* DHD_EWPR_VER2 */
+
+#ifdef WLADPS_ENERGY_GAIN
+#define ADPS_GAIN_ENERGY_CONV_UNIT 100000 /* energy unit(10^-2) * dur unit(10^-3) */
+static int
+dhd_calculate_adps_energy_gain(wl_adps_energy_gain_v1_t *data)
+{
+ int i;
+ int energy_gain = 0;
+
+ /* energy unit: (uAh * 10^-2)/sec */
+ int pm0_idle_energy[MAX_BANDS] =
+ {ADPS_GAIN_2G_PM0_IDLE, ADPS_GAIN_5G_PM0_IDLE};
+ int txpspoll_energy[MAX_BANDS] =
+ {ADPS_GAIN_2G_TX_PSPOLL, ADPS_GAIN_5G_TX_PSPOLL};
+
+ if (data->version == 0 || data->length != sizeof(*data)) {
+ DHD_FILTER_ERR(("%s - invalid adps_energy_gain data\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* dur unit: mSec */
+ for (i = 0; i < MAX_BANDS; i++) {
+ energy_gain += (data->gain_data[i].pm_dur_gain * pm0_idle_energy[i]);
+ energy_gain -= (data->gain_data[i].step0_dur * txpspoll_energy[i]);
+ }
+ energy_gain /= ADPS_GAIN_ENERGY_CONV_UNIT;
+
+ if (energy_gain < 0) {
+ energy_gain = 0;
+ }
+
+ return energy_gain;
+}
+
+int dhd_event_log_filter_adps_energy_gain(dhd_pub_t *dhdp)
+{
+ int ret;
+
+ void *last_elem;
+ EWP_filter_t *filter;
+ EWPF_ifc_elem_t *ifc_elem;
+
+ if (!dhdp || !dhdp->event_log_filter) {
+ DHD_FILTER_ERR(("%s - dhdp or event_log_filter is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ filter = (EWP_filter_t *)dhdp->event_log_filter;
+
+ if (filter->enabled != TRUE) {
+ DHD_FILTER_ERR(("%s - EWP Filter is not enabled\n", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Refer to STA interface */
+ last_elem = dhd_ring_get_last(filter->i_ring[0]);
+ if (last_elem == NULL) {
+ DHD_FILTER_ERR(("%s - last_elem is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ifc_elem = (EWPF_ifc_elem_t *)last_elem;
+ ret = dhd_calculate_adps_energy_gain(&ifc_elem->adps_energy_gain);
+
+ return ret;
+}
+#endif /* WLADPS_ENERGY_GAIN */
diff --git a/bcmdhd.101.10.361.x/dhd_event_log_filter.h b/bcmdhd.101.10.361.x/dhd_event_log_filter.h
new file mode 100755
index 0000000..a39da3b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_event_log_filter.h
@@ -0,0 +1,56 @@
+/*
+ * Wifi dongle status Filter and Report
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef dhd_event_log_filter_h
+#define dhd_event_log_filter_h
+#include <dhd.h>
+#include <event_log_tag.h>
+#include <dhd_debug.h>
+
+typedef struct {
+ uint16 version;
+ uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */
+ uint8 htr_num; /* number of elements in htr_running or htr_rc */
+ uint32 htr_rn_last; /* last reasons along with seq, etc */
+ uint32 htr_rn_ts_last; /* last time stamps corr to htr_rn_last */
+ uint32 htr_rn_prev; /* last reasons along with seq, etc */
+ uint32 htr_rn_ts_prev; /* last time stamps corr to htr_rn_prev */
+ uint32 htr_rc_max; /* largest toss reasons and counts */
+ uint32 htr_rc_ts_max; /* latest time stamp corr to htr_rc_max */
+ uint32 htr_rc_secnd; /* second largest toss reasons and counts */
+ uint32 htr_rc_ts_secnd; /* latest time stamps corr to htr_rc_second */
+} evt_hist_compact_toss_stats_v1_t;
+
+int dhd_event_log_filter_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size);
+void dhd_event_log_filter_deinit(dhd_pub_t *dhdp);
+void dhd_event_log_filter_event_handler(
+ dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, uint32 *data);
+
+void dhd_event_log_filter_notify_connect_request(dhd_pub_t *dhdp, uint8 *bssid, int channel);
+void dhd_event_log_filter_notify_connect_done(dhd_pub_t *dhdp, uint8 *bssid, int roam);
+#ifdef WLADPS_ENERGY_GAIN
+int dhd_event_log_filter_adps_energy_gain(dhd_pub_t *dhdp);
+#endif /* WLADPS_ENERGY_GAIN */
+#endif /* !dhd_event_log_filter_h */
diff --git a/bcmdhd.101.10.361.x/dhd_flowring.c b/bcmdhd.101.10.361.x/dhd_flowring.c
new file mode 100755
index 0000000..0841176
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_flowring.c
@@ -0,0 +1,1466 @@
+/*
+ * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
+ *
+ * Flow rings are transmit traffic (=propagating towards antenna) related entities
+ *
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/** XXX Twiki: [PCIeFullDongleArchitecture] */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <ethernet.h>
+#include <bcmevent.h>
+#include <dngl_stats.h>
+
+#include <dhd.h>
+
+#include <dhd_flowring.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <802.1d.h>
+#include <pcie_core.h>
+#include <bcmmsgbuf.h>
+#include <dhd_pcie.h>
+#include <dhd_config.h>
+
+static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
+
+static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da);
+
+static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da);
+
+static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da, uint16 *flowid);
+int dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
+
+#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
+#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
+
+#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
+#define DHD_FLOWRING_INFO DHD_TRACE
+#else
+#define DHD_FLOWRING_INFO DHD_INFO
+#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
+
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
+const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
+static INLINE int
+dhd_flow_queue_throttle(flow_queue_t *queue)
+{
+#if defined(BCM_ROUTER_DHD)
+ /* Two tests
+ * 1) Test whether overall level 2 (grandparent) cummulative threshold crossed.
+ * 2) Or test whether queue's budget and overall cummulative threshold crossed.
+ */
+ void *gp_clen_ptr = DHD_FLOW_QUEUE_L2CLEN_PTR(queue);
+ void *parent_clen_ptr = DHD_FLOW_QUEUE_CLEN_PTR(queue);
+ int gp_cumm_threshold = DHD_FLOW_QUEUE_L2THRESHOLD(queue);
+ int cumm_threshold = DHD_FLOW_QUEUE_THRESHOLD(queue);
+
+ int ret = ((DHD_CUMM_CTR_READ(gp_clen_ptr) > gp_cumm_threshold) ||
+ ((DHD_FLOW_QUEUE_OVFL(queue, DHD_FLOW_QUEUE_MAX(queue))) &&
+ (DHD_CUMM_CTR_READ(parent_clen_ptr) > cumm_threshold)));
+ return ret;
+#else
+ return DHD_FLOW_QUEUE_FULL(queue);
+#endif /* ! BCM_ROUTER_DHD */
+}
+
+int
+BCMFASTPATH(dhd_flow_queue_overflow)(flow_queue_t *queue, void *pkt)
+{
+ return BCME_NORESOURCE;
+}
+
+/** Returns flow ring given a flowid */
+flow_ring_node_t *
+dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
+{
+ flow_ring_node_t * flow_ring_node;
+
+ ASSERT(dhdp != (dhd_pub_t*)NULL);
+ ASSERT(flowid <= dhdp->max_tx_flowid);
+ if (flowid > dhdp->max_tx_flowid) {
+ return NULL;
+ }
+
+ flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
+
+ ASSERT(flow_ring_node->flowid == flowid);
+ return flow_ring_node;
+}
+
+/** Returns 'backup' queue given a flowid */
+flow_queue_t *
+dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
+{
+ flow_ring_node_t * flow_ring_node = NULL;
+
+ flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+ if (flow_ring_node)
+ return &flow_ring_node->queue;
+ else
+ return NULL;
+}
+
+/* Flow ring's queue management functions */
+
+/** Reinitialize a flow ring's queue. */
+void
+dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
+{
+ ASSERT((queue != NULL) && (max > 0));
+
+ queue->head = queue->tail = NULL;
+ queue->len = 0;
+
+ /* Set queue's threshold and queue's parent cummulative length counter */
+ ASSERT(max > 1);
+ DHD_FLOW_QUEUE_SET_MAX(queue, max);
+ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
+ DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
+ DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
+
+ queue->failures = 0U;
+ queue->cb = &dhd_flow_queue_overflow;
+}
+
+/** Initialize a flow ring's queue, called on driver initialization. */
+void
+dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
+{
+ ASSERT((queue != NULL) && (max > 0));
+
+ dll_init(&queue->list);
+ dhd_flow_queue_reinit(dhdp, queue, max);
+}
+
+/** Register an enqueue overflow callback handler */
+void
+dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
+{
+ ASSERT(queue != NULL);
+ queue->cb = cb;
+}
+
+/**
+ * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
+ * to the flow ring itself.
+ */
+int
+BCMFASTPATH(dhd_flow_queue_enqueue)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+ int ret = BCME_OK;
+
+ ASSERT(queue != NULL);
+
+ if (dhd_flow_queue_throttle(queue)) {
+ queue->failures++;
+ ret = (*queue->cb)(queue, pkt);
+ goto done;
+ }
+
+ if (queue->head) {
+ FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
+ } else {
+ queue->head = pkt;
+ }
+
+ FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
+
+ queue->tail = pkt; /* at tail */
+
+ queue->len++;
+ /* increment parent's cummulative length */
+ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
+ /* increment grandparent's cummulative length */
+ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+
+done:
+ return ret;
+}
+
+/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
+void *
+BCMFASTPATH(dhd_flow_queue_dequeue)(dhd_pub_t *dhdp, flow_queue_t *queue)
+{
+ void * pkt;
+
+ ASSERT(queue != NULL);
+
+ pkt = queue->head; /* from head */
+
+ if (pkt == NULL) {
+ ASSERT((queue->len == 0) && (queue->tail == NULL));
+ goto done;
+ }
+
+ queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
+ if (queue->head == NULL)
+ queue->tail = NULL;
+
+ queue->len--;
+ /* decrement parent's cummulative length */
+ DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
+ /* decrement grandparent's cummulative length */
+ DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+
+ FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
+
+done:
+ return pkt;
+}
+
+/** Reinsert a dequeued 802.3 packet back at the head */
+void
+BCMFASTPATH(dhd_flow_queue_reinsert)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+ if (queue->head == NULL) {
+ queue->tail = pkt;
+ }
+
+ FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
+ queue->head = pkt;
+ queue->len++;
+ /* increment parent's cummulative length */
+ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
+ /* increment grandparent's cummulative length */
+ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+}
+
+/** Fetch the backup queue for a flowring, and assign flow control thresholds */
+void
+dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
+ int queue_budget, int cumm_threshold, void *cumm_ctr,
+ int l2cumm_threshold, void *l2cumm_ctr)
+{
+ flow_queue_t * queue = NULL;
+
+ ASSERT(dhdp != (dhd_pub_t*)NULL);
+ ASSERT(queue_budget > 1);
+ ASSERT(cumm_threshold > 1);
+ ASSERT(cumm_ctr != (void*)NULL);
+ ASSERT(l2cumm_threshold > 1);
+ ASSERT(l2cumm_ctr != (void*)NULL);
+
+ queue = dhd_flow_queue(dhdp, flowid);
+ if (queue) {
+ DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
+
+ /* Set the queue's parent threshold and cummulative counter */
+ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
+ DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
+
+ /* Set the queue's grandparent threshold and cummulative counter */
+ DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
+ DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
+ }
+}
+
+/*
+ * This function returns total number of flowrings that can be created for a INFRA STA.
+ * For prio2ac mapping, it will return 4, prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }
+ * For prio2tid mapping, it will return 8, prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }
+ */
+uint8
+dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
+{
+ uint8 prio_count = 0;
+ int i;
+ /* Pick all elements one by one */
+ for (i = 0; i < NUMPRIO; i++)
+ {
+ /* Check if the picked element is already counted */
+ int j;
+ for (j = 0; j < i; j++) {
+ if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
+ break;
+ }
+ }
+ /* If not counted earlier, then count it */
+ if (i == j) {
+ prio_count++;
+ }
+ }
+
+ return prio_count;
+}
+
+uint8
+dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
+{
+ uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
+ uint8 total_tx_flow_rings = (uint8)dhd_get_max_flow_rings(dhdp);
+ uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
+ return max_multi_client_flow_rings;
+}
+
+int
+dhd_flowid_map_init(dhd_pub_t *dhdp, uint16 max_tx_flow_rings)
+{
+#if defined(DHD_HTPUT_TUNABLES)
+ uint16 max_normal_tx_flow_rings = max_tx_flow_rings - HTPUT_TOTAL_FLOW_RINGS;
+#else
+ uint16 max_normal_tx_flow_rings = max_tx_flow_rings;
+#endif /* DHD_HTPUT_TUNABLES */
+
+ /* Construct a normal flowid allocator from FLOWID_RESERVED to
+ * (max_normal_tx_flow_rings - 1)
+ */
+ dhdp->flowid_allocator = id16_map_init(dhdp->osh, max_normal_tx_flow_rings,
+ FLOWID_RESERVED);
+ if (dhdp->flowid_allocator == NULL) {
+ DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+#if defined(DHD_HTPUT_TUNABLES)
+ if (HTPUT_TOTAL_FLOW_RINGS > 0) {
+ dhdp->htput_flow_ring_start = max_normal_tx_flow_rings + FLOWID_RESERVED;
+ /* Construct a htput flowid allocator from htput_flow_ring_start to
+ * (htput_flow_ring_start + HTPUT_TOTAL_FLOW_RINGS - 1)
+ */
+ dhdp->htput_flowid_allocator = id16_map_init(dhdp->osh, HTPUT_TOTAL_FLOW_RINGS,
+ dhdp->htput_flow_ring_start);
+ if (dhdp->htput_flowid_allocator == NULL) {
+ DHD_ERROR(("%s: htput flowid allocator init failure\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ dhdp->htput_client_flow_rings = 0u;
+ }
+#endif /* !DHD_HTPUT_TUNABLES */
+
+ return BCME_OK;
+}
+
+void
+dhd_flowid_map_deinit(dhd_pub_t *dhdp)
+{
+ if (dhdp->flowid_allocator) {
+ dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
+ }
+ ASSERT(dhdp->flowid_allocator == NULL);
+
+#if defined(DHD_HTPUT_TUNABLES)
+ if (dhdp->htput_flowid_allocator) {
+ dhdp->htput_flowid_allocator = id16_map_fini(dhdp->osh,
+ dhdp->htput_flowid_allocator);
+ ASSERT(dhdp->htput_flowid_allocator == NULL);
+ }
+ dhdp->htput_client_flow_rings = 0u;
+#endif /* !DHD_HTPUT_TUNABLES */
+ return;
+}
+
+/** Initializes data structures of multiple flow rings
+ * num_h2d_rings - max_h2d_rings including static and dynamic rings
+ */
+int
+dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_h2d_rings)
+{
+ uint32 idx;
+ uint32 flow_ring_table_sz = 0;
+ uint32 if_flow_lkup_sz = 0;
+ flow_ring_table_t *flow_ring_table = NULL;
+ if_flow_lkup_t *if_flow_lkup = NULL;
+ void *lock = NULL;
+ void *list_lock = NULL;
+ unsigned long flags;
+ uint16 max_tx_flow_rings;
+
+ DHD_INFO(("%s\n", __FUNCTION__));
+
+ /*
+ * Only 16-bit flowid map will be allocated for actual number of Tx flowrings
+ * excluding common rings.
+ * Rest all flowring data structure will be allocated for all num_h2d_rings.
+ */
+ max_tx_flow_rings = dhd_get_max_flow_rings(dhdp);
+ if (dhd_flowid_map_init(dhdp, max_tx_flow_rings) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_flowid_map_init failure\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Any Tx flow id should not be > max_tx_flowid */
+ dhdp->max_tx_flowid = max_tx_flow_rings + FLOWID_RESERVED - 1;
+
+ /* Allocate a flow ring table, comprising of requested number of rings */
+ flow_ring_table_sz = (num_h2d_rings * sizeof(flow_ring_node_t));
+ flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
+ if (flow_ring_table == NULL) {
+ DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Initialize flow ring table state */
+ DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
+ DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
+ bzero((uchar *)flow_ring_table, flow_ring_table_sz);
+ for (idx = 0; idx < num_h2d_rings; idx++) {
+ flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
+ flow_ring_table[idx].flowid = (uint16)idx;
+ flow_ring_table[idx].lock = osl_spin_lock_init(dhdp->osh);
+#ifdef IDLE_TX_FLOW_MGMT
+ flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
+#endif /* IDLE_TX_FLOW_MGMT */
+ if (flow_ring_table[idx].lock == NULL) {
+ DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ dll_init(&flow_ring_table[idx].list);
+
+ /* Initialize the per flow ring backup queue */
+ dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
+ dhdp->conf->flow_ring_queue_threshold);
+ }
+
+ /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
+ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+ if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
+ DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
+ if (if_flow_lkup == NULL) {
+ DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Initialize per interface hash table */
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ int hash_ix;
+ if_flow_lkup[idx].status = 0;
+ if_flow_lkup[idx].role = 0;
+ for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
+ if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
+ }
+
+ lock = osl_spin_lock_init(dhdp->osh);
+ if (lock == NULL)
+ goto fail;
+
+ list_lock = osl_spin_lock_init(dhdp->osh);
+ if (list_lock == NULL)
+ goto lock_fail;
+
+ dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
+ bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+ dhdp->multi_client_flow_rings = 0U;
+
+#ifdef DHD_LOSSLESS_ROAMING
+ dhdp->dequeue_prec_map = ALLPRIO;
+#endif
+ /* Now populate into dhd pub */
+ DHD_FLOWID_LOCK(lock, flags);
+ dhdp->num_h2d_rings = num_h2d_rings;
+ dhdp->flow_ring_table = (void *)flow_ring_table;
+ dhdp->if_flow_lkup = (void *)if_flow_lkup;
+ dhdp->flowid_lock = lock;
+ dhdp->flow_rings_inited = TRUE;
+ dhdp->flowring_list_lock = list_lock;
+ DHD_FLOWID_UNLOCK(lock, flags);
+
+ DHD_INFO(("%s done\n", __FUNCTION__));
+ return BCME_OK;
+
+lock_fail:
+ /* deinit the spinlock */
+ osl_spin_lock_deinit(dhdp->osh, lock);
+
+fail:
+ /* Destruct the per interface flow lkup table */
+ if (if_flow_lkup != NULL) {
+ DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
+ }
+ if (flow_ring_table != NULL) {
+ for (idx = 0; idx < num_h2d_rings; idx++) {
+ if (flow_ring_table[idx].lock != NULL)
+ osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+ }
+ MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+ }
+ dhd_flowid_map_deinit(dhdp);
+
+ return BCME_NOMEM;
+}
+
+/** Deinit Flow Ring specific data structures */
+void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
+{
+ uint16 idx;
+ uint32 flow_ring_table_sz;
+ uint32 if_flow_lkup_sz;
+ flow_ring_table_t *flow_ring_table;
+ unsigned long flags;
+ void *lock;
+
+ DHD_INFO(("dhd_flow_rings_deinit\n"));
+
+ if (!(dhdp->flow_rings_inited)) {
+ DHD_ERROR(("dhd_flow_rings not initialized!\n"));
+ return;
+ }
+
+ if (dhdp->flow_ring_table != NULL) {
+
+ ASSERT(dhdp->num_h2d_rings > 0);
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+ dhdp->flow_ring_table = NULL;
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ for (idx = 0; idx < dhdp->num_h2d_rings; idx++) {
+ if (flow_ring_table[idx].active) {
+ dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
+ }
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
+
+ /* Deinit flow ring queue locks before destroying flow ring table */
+ if (flow_ring_table[idx].lock != NULL) {
+ osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+ }
+ flow_ring_table[idx].lock = NULL;
+
+ }
+
+ /* Destruct the flow ring table */
+ flow_ring_table_sz = dhdp->num_h2d_rings * sizeof(flow_ring_table_t);
+ MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+ }
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+
+ /* Destruct the per interface flow lkup table */
+ if (dhdp->if_flow_lkup != NULL) {
+ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+ bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
+ DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
+ dhdp->if_flow_lkup = NULL;
+ }
+
+ /* Destruct the flowid allocator */
+ dhd_flowid_map_deinit(dhdp);
+
+ dhdp->num_h2d_rings = 0U;
+ bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ dhdp->max_multi_client_flow_rings = 0U;
+ dhdp->multi_client_flow_rings = 0U;
+
+ lock = dhdp->flowid_lock;
+ dhdp->flowid_lock = NULL;
+
+ if (lock) {
+ DHD_FLOWID_UNLOCK(lock, flags);
+ osl_spin_lock_deinit(dhdp->osh, lock);
+ }
+
+ osl_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
+ dhdp->flowring_list_lock = NULL;
+
+ ASSERT(dhdp->if_flow_lkup == NULL);
+ ASSERT(dhdp->flow_ring_table == NULL);
+ dhdp->flow_rings_inited = FALSE;
+}
+
+/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
+uint8
+dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
+{
+ if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ ASSERT(if_flow_lkup);
+ return if_flow_lkup[ifindex].role;
+}
+
+#ifdef WLTDLS
+bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
+{
+ unsigned long flags;
+ tdls_peer_node_t *cur = NULL;
+
+ DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+ /* Check only if tdls peer is added */
+ if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da))) {
+ cur = dhdp->peer_tbl.node;
+
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+ return TRUE;
+ }
+ cur = cur->next;
+ }
+ }
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+ return FALSE;
+}
+#endif /* WLTDLS */
+
+/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
+static INLINE uint16
+dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+ int hash;
+ bool ismcast = FALSE;
+ flow_hash_info_t *cur;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return FLOWID_INVALID;
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ ASSERT(if_flow_lkup);
+
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+#ifdef WLTDLS
+ if (is_tdls_destination(dhdp, da)) {
+ hash = DHD_FLOWRING_HASHINDEX(da, prio);
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+ while (cur != NULL) {
+ if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return cur->flowid;
+ }
+ cur = cur->next;
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return FLOWID_INVALID;
+ }
+#endif /* WLTDLS */
+ /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
+ cur = if_flow_lkup[ifindex].fl_hash[prio];
+ if (cur) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return cur->flowid;
+ }
+ } else {
+
+ if (ETHER_ISMULTI(da)) {
+ ismcast = TRUE;
+ hash = 0;
+ } else {
+ hash = DHD_FLOWRING_HASHINDEX(da, prio);
+ }
+
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+
+ while (cur) {
+ if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
+ (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
+ (cur->flow_info.tid == prio))) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return cur->flowid;
+ }
+ cur = cur->next;
+ }
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+#ifdef DHD_EFI
+ DHD_TRACE(("%s: cannot find flowid\n", __FUNCTION__));
+#else
+ DHD_FLOWRING_INFO(("%s: cannot find flowid\n", __FUNCTION__));
+#endif
+ return FLOWID_INVALID;
+} /* dhd_flowid_find */
+
+static uint16
+dhd_flowid_map_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *da)
+{
+ uint16 flowid = FLOWID_INVALID;
+ ASSERT(dhdp->flowid_allocator != NULL);
+
+#if defined(DHD_HTPUT_TUNABLES)
+ if (dhdp->htput_flowid_allocator) {
+ if (prio == HTPUT_FLOW_RING_PRIO) {
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+ /* For STA case, only one flowring per PRIO is created,
+ * so no need to have a HTPUT counter variable for STA case.
+ * If already HTPUT flowring is allocated for given HTPUT_PRIO,
+ * then this function will not even get called as dhd_flowid_find
+ * will take care assigning same for those HTPUT_PRIO packets.
+ */
+ flowid = id16_map_alloc(dhdp->htput_flowid_allocator);
+ } else if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex) && !ETHER_ISMULTI(da)) {
+ /* Use HTPUT flowrings for only HTPUT_NUM_CLIENT_FLOW_RINGS */
+ if (dhdp->htput_client_flow_rings < HTPUT_NUM_CLIENT_FLOW_RINGS) {
+ flowid = id16_map_alloc(dhdp->htput_flowid_allocator);
+ /* increment htput client counter */
+ if (flowid != FLOWID_INVALID) {
+ dhdp->htput_client_flow_rings++;
+ }
+ }
+ }
+ }
+ }
+#endif /* !DHD_HTPUT_TUNABLES */
+
+ BCM_REFERENCE(flowid);
+
+ /*
+ * For HTPUT case, if the high throughput flowrings are already allocated
+ * for the given role, the control comes here.
+ */
+ if (flowid == FLOWID_INVALID) {
+ flowid = id16_map_alloc(dhdp->flowid_allocator);
+ }
+
+ return flowid;
+}
+
+/** Create unique Flow ID, called when a flow ring is created. */
+static INLINE uint16
+dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+ flow_hash_info_t *fl_hash_node, *cur;
+ if_flow_lkup_t *if_flow_lkup;
+ int hash;
+ uint16 flowid;
+ unsigned long flags;
+
+ fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
+ if (fl_hash_node == NULL) {
+ DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
+ return FLOWID_INVALID;
+ }
+ memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ flowid = dhd_flowid_map_alloc(dhdp, ifindex, prio, da);
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ if (flowid == FLOWID_INVALID) {
+ MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
+ DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
+ return FLOWID_INVALID;
+ }
+
+ fl_hash_node->flowid = flowid;
+ fl_hash_node->flow_info.tid = prio;
+ fl_hash_node->flow_info.ifindex = ifindex;
+ fl_hash_node->next = NULL;
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+ /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
+#ifdef WLTDLS
+ if (is_tdls_destination(dhdp, da)) {
+ hash = DHD_FLOWRING_HASHINDEX(da, prio);
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+ if (cur) {
+ while (cur->next) {
+ cur = cur->next;
+ }
+ cur->next = fl_hash_node;
+ } else {
+ if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+ }
+ } else
+#endif /* WLTDLS */
+ if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
+ } else {
+
+ /* For bcast/mcast assign first slot in in interface */
+ hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
+ cur = if_flow_lkup[ifindex].fl_hash[hash];
+ if (cur) {
+ while (cur->next) {
+ cur = cur->next;
+ }
+ cur->next = fl_hash_node;
+ } else
+ if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ DHD_FLOWRING_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
+
+ if (fl_hash_node->flowid > dhdp->max_tx_flowid) {
+ DHD_ERROR(("%s: flowid=%d max_tx_flowid=%d ifindex=%d prio=%d role=%d\n",
+ __FUNCTION__, fl_hash_node->flowid, dhdp->max_tx_flowid,
+ ifindex, prio, if_flow_lkup[ifindex].role));
+ dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
+ dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
+ return FLOWID_INVALID;
+ }
+
+ return fl_hash_node->flowid;
+} /* dhd_flowid_alloc */
+
+/** Get flow ring ID, if not present try to create one */
+static INLINE int
+dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da, uint16 *flowid)
+{
+ uint16 id;
+ flow_ring_node_t *flow_ring_node;
+ flow_ring_table_t *flow_ring_table;
+ unsigned long flags;
+ int ret;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ if (!dhdp->flow_ring_table) {
+ return BCME_ERROR;
+ }
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return BCME_BADARG;
+
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
+ id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
+
+ if (id == FLOWID_INVALID) {
+ bool if_role_multi_client;
+ if_flow_lkup_t *if_flow_lkup;
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (!if_flow_lkup[ifindex].status)
+ return BCME_ERROR;
+
+ /* check role for multi client case */
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
+ /* Abort Flowring creation if multi client flowrings crossed the threshold */
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ if (if_role_multi_client &&
+ (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
+ DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
+ __FUNCTION__, dhdp->multi_client_flow_rings,
+ dhdp->max_multi_client_flow_rings));
+ return BCME_ERROR;
+ }
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ /* Do not create Flowring if peer is not associated */
+#if (defined(linux) || defined(LINUX)) && defined(PCIE_FULL_DONGLE)
+ if (if_role_multi_client && !ETHER_ISMULTI(da) &&
+ !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
+ DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
+
+ id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
+ if (id == FLOWID_INVALID) {
+ DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
+ __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
+ return BCME_ERROR;
+ }
+
+ ASSERT(id <= dhdp->max_tx_flowid);
+
+ /* Only after flowid alloc, increment multi_client_flow_rings */
+ if (if_role_multi_client) {
+ dhdp->multi_client_flow_rings++;
+ }
+
+ /* register this flowid in dhd_pub */
+ dhd_add_flowid(dhdp, ifindex, prio, da, id);
+
+ flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ /* Init Flow info */
+ memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
+ memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
+ flow_ring_node->flow_info.tid = prio;
+ flow_ring_node->flow_info.ifindex = ifindex;
+ flow_ring_node->active = TRUE;
+ flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
+
+#ifdef DEVICE_TX_STUCK_DETECT
+ flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
+ flow_ring_node->stuck_count = 0;
+#endif /* DEVICE_TX_STUCK_DETECT */
+#ifdef TX_STATUS_LATENCY_STATS
+ flow_ring_node->flow_info.num_tx_status = 0;
+ flow_ring_node->flow_info.cum_tx_status_latency = 0;
+ flow_ring_node->flow_info.num_tx_pkts = 0;
+#endif /* TX_STATUS_LATENCY_STATS */
+#ifdef BCMDBG
+ bzero(&flow_ring_node->flow_info.tx_status[0],
+ sizeof(uint32) * DHD_MAX_TX_STATUS_MSGS);
+#endif
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Create and inform device about the new flow */
+ if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
+ != BCME_OK) {
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
+ flow_ring_node->active = FALSE;
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
+ return BCME_ERROR;
+ }
+
+ *flowid = id;
+ return BCME_OK;
+ } else {
+ /* if the Flow id was found in the hash */
+
+ if (id > dhdp->max_tx_flowid) {
+ DHD_ERROR(("%s: Invalid flow id : %u, max_tx_flowid : %u\n",
+ __FUNCTION__, id, dhdp->max_tx_flowid));
+ *flowid = FLOWID_INVALID;
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+
+ flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ /*
+ * If the flow_ring_node is in Open State or Status pending state then
+ * we can return the Flow id to the caller.If the flow_ring_node is in
+ * FLOW_RING_STATUS_PENDING this means the creation is in progress and
+ * hence the packets should be queued.
+ *
+ * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
+ * FLOW_RING_STATUS_CLOSED, then we should return Error.
+ * Note that if the flowing is being deleted we would mark it as
+ * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
+ * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
+ * We should drop the packets in that case.
+ * The decission to return OK should NOT be based on 'active' variable, beause
+ * active is made TRUE when a flow_ring_node gets allocated and is made
+ * FALSE when the flow ring gets removed and does not reflect the True state
+ * of the Flow ring.
+ * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
+ * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
+ * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
+ * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
+ * FLOW_RING_STATUS_CREATE_PENDING.
+ */
+ if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
+ flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
+ *flowid = FLOWID_INVALID;
+ ret = BCME_ERROR;
+ } else {
+ *flowid = id;
+ ret = BCME_OK;
+ }
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ return ret;
+ } /* Flow Id found in the hash */
+} /* dhd_flowid_lookup */
+
+int
+dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+ int hashidx = 0;
+ bool found = FALSE;
+ flow_hash_info_t *cur;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ if (!dhdp->flow_ring_table) {
+ DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
+ cur = if_flow_lkup[ifindex].fl_hash[hashidx];
+ if (cur) {
+ if (cur->flowid == flowid) {
+ found = TRUE;
+ }
+
+ while (!found && cur) {
+ if (cur->flowid == flowid) {
+ found = TRUE;
+ break;
+ }
+ cur = cur->next;
+ }
+
+ if (found) {
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ return BCME_OK;
+ }
+ }
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ return BCME_ERROR;
+}
+
+int
+dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da, uint16 *flowid)
+{
+ return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
+}
+
+/**
+ * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
+ * select the flowring to send the packet to the dongle.
+ */
+int
+BCMFASTPATH(dhd_flowid_update)(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
+{
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+ uint16 flowid = 0;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+
+ if (ifindex >= DHD_MAX_IFS) {
+ return BCME_BADARG;
+ }
+
+ if (!dhdp->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
+ &flowid) != BCME_OK) {
+ return BCME_ERROR;
+ }
+
+ DHD_FLOWRING_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
+
+ /* Tag the packet with flowid */
+ DHD_PKT_SET_FLOWID(pktbuf, flowid);
+ return BCME_OK;
+}
+
+static void
+dhd_flowid_map_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+#if defined(DHD_HTPUT_TUNABLES)
+ if (dhdp->htput_flowid_allocator) {
+ if (DHD_IS_FLOWID_HTPUT(dhdp, flowid)) {
+ id16_map_free(dhdp->htput_flowid_allocator, flowid);
+ /* decrement htput client counter */
+ if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex)) {
+ dhdp->htput_client_flow_rings--;
+ }
+ return;
+ }
+ }
+#endif /* !DHD_HTPUT_TUNABLES */
+
+ id16_map_free(dhdp->flowid_allocator, flowid);
+
+ return;
+}
+
+void
+dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+ int hashix;
+ bool found = FALSE;
+ flow_hash_info_t *cur, *prev;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+ bool if_role_multi_client;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
+ for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
+
+ cur = if_flow_lkup[ifindex].fl_hash[hashix];
+
+ if (cur) {
+ if (cur->flowid == flowid) {
+ found = TRUE;
+ }
+
+ prev = NULL;
+ while (!found && cur) {
+ if (cur->flowid == flowid) {
+ found = TRUE;
+ break;
+ }
+ prev = cur;
+ cur = cur->next;
+ }
+ if (found) {
+ if (!prev) {
+ if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
+ } else {
+ prev->next = cur->next;
+ }
+
+ /* Decrement multi_client_flow_rings */
+ if (if_role_multi_client) {
+ dhdp->multi_client_flow_rings--;
+ }
+
+ /* deregister flowid from dhd_pub. */
+ dhd_del_flowid(dhdp, ifindex, flowid);
+
+ dhd_flowid_map_free(dhdp, ifindex, flowid);
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
+
+ return;
+ }
+ }
+ }
+
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
+ __FUNCTION__, flowid));
+} /* dhd_flowid_free */
+
+/**
+ * Delete all Flow rings associated with the given interface. Is called when eg the dongle
+ * indicates that a wireless link has gone down.
+ */
+void
+dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
+{
+ uint32 id;
+ flow_ring_table_t *flow_ring_table;
+
+ DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ if (!dhdp->flow_ring_table)
+ return;
+
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+ for (id = 0; id < dhdp->num_h2d_rings; id++) {
+ if (flow_ring_table[id].active &&
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+ (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+ dhd_bus_flow_ring_delete_request(dhdp->bus,
+ (void *) &flow_ring_table[id]);
+ }
+ }
+}
+
+void
+dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
+{
+ uint32 id;
+ flow_ring_table_t *flow_ring_table;
+
+ DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ if (!dhdp->flow_ring_table)
+ return;
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
+ for (id = 0; id < dhdp->num_h2d_rings; id++) {
+ if (flow_ring_table[id].active &&
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+ (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+ dhd_bus_flow_ring_flush_request(dhdp->bus,
+ (void *) &flow_ring_table[id]);
+ }
+ }
+}
+
+/** Delete flow ring(s) for given peer address. */
+void
+dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
+{
+ uint32 id;
+ flow_ring_table_t *flow_ring_table;
+
+ DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ if (!dhdp->flow_ring_table)
+ return;
+
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+ for (id = 0; id < dhdp->num_h2d_rings; id++) {
+ /*
+ * Send flowring delete request even if flowring status is
+ * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
+ * event comes ahead of flowring create response.
+ * Otherwise the flowring will not be deleted later as there will not be any
+ * DISASSOC_IND event. With this change, when create response event comes to DHD,
+ * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
+ * event will come, upon which DHD will delete the flowring.
+ */
+ if (flow_ring_table[id].active &&
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+ (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
+ ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
+ (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
+ DHD_ERROR(("%s: deleting flowid %d\n",
+ __FUNCTION__, flow_ring_table[id].flowid));
+ dhd_bus_flow_ring_delete_request(dhdp->bus,
+ (void *) &flow_ring_table[id]);
+ }
+ }
+}
+
+/** Handles interface ADD, CHANGE, DEL indications from the dongle */
+void
+dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 op, uint8 role)
+{
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+
+ DHD_INFO(("%s: ifindex %u op %u role is %u \n",
+ __FUNCTION__, ifindex, op, role));
+ if (!dhdp->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
+
+ if_flow_lkup[ifindex].role = role;
+
+ if (role == WLC_E_IF_ROLE_WDS) {
+ /**
+ * WDS role does not send WLC_E_LINK event after interface is up.
+ * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
+ * same is true while making the status as FALSE.
+ * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
+ * interfaces are handled uniformly.
+ */
+ if_flow_lkup[ifindex].status = TRUE;
+ DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
+ __FUNCTION__, ifindex, role));
+ }
+ } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
+ if_flow_lkup[ifindex].status = FALSE;
+ DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
+ __FUNCTION__, ifindex, role));
+ }
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+}
+
+/** Handles a STA 'link' indication from the dongle */
+int
+dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
+{
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return BCME_BADARG;
+
+ DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
+
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+ if (status) {
+ if_flow_lkup[ifindex].status = TRUE;
+ } else {
+ if_flow_lkup[ifindex].status = FALSE;
+ }
+
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ return BCME_OK;
+}
+
+/** Update flow priority mapping, called on IOVAR */
+int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
+{
+ uint16 flowid;
+ flow_ring_node_t *flow_ring_node;
+
+ if (map > DHD_FLOW_PRIO_LLR_MAP)
+ return BCME_BADOPTION;
+
+ /* Check if we need to change prio map */
+ if (map == dhdp->flow_prio_map_type)
+ return BCME_OK;
+
+ /* If any ring is active we cannot change priority mapping for flow rings */
+ for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
+ flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+ if (flow_ring_node->active)
+ return BCME_EPERM;
+ }
+
+ /* Inform firmware about new mapping type */
+ if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
+ return BCME_ERROR;
+
+ /* update internal structures */
+ dhdp->flow_prio_map_type = map;
+ if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
+ bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+ else
+ bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+
+ return BCME_OK;
+}
+
+/** Inform firmware on updated flow priority mapping, called on IOVAR */
+int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
+{
+ uint8 iovbuf[WLC_IOCTL_SMLEN];
+ int len;
+ uint32 val;
+ if (!set) {
+ bzero(&iovbuf, sizeof(iovbuf));
+ len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+ if (len == 0) {
+ return BCME_BUFTOOSHORT;
+ }
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+ DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ *map = iovbuf[0];
+ return BCME_OK;
+ }
+ val = (uint32)map[0];
+ len = bcm_mkiovar("bus:fl_prio_map", (char *)&val, sizeof(val),
+ (char*)iovbuf, sizeof(iovbuf));
+ if (len == 0) {
+ return BCME_BUFTOOSHORT;
+ }
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: failed to set fl_prio_map \n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+uint32
+dhd_active_tx_flowring_bkpq_len(dhd_pub_t *dhd)
+{
+ unsigned long list_lock_flags;
+ dll_t *item, *prev;
+ flow_ring_node_t *flow_ring_node;
+ dhd_bus_t *bus = dhd->bus;
+ uint32 active_tx_flowring_qlen = 0;
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+
+ for (item = dll_tail_p(&bus->flowring_active_list);
+ !dll_end(&bus->flowring_active_list, item); item = prev) {
+
+ prev = dll_prev_p(item);
+
+ flow_ring_node = dhd_constlist_to_flowring(item);
+ if (flow_ring_node->active) {
+ DHD_INFO(("%s :%d\n", __FUNCTION__, flow_ring_node->queue.len));
+ active_tx_flowring_qlen += flow_ring_node->queue.len;
+ }
+ }
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+ return active_tx_flowring_qlen;
+}
+
+#ifdef DHD_AWDL
+/**
+ * Handle/Intercept awdl peer op IOVAR fired by user
+ * buf = NULL means delete all peers in awdl interface
+ */
+void
+dhd_awdl_peer_op(dhd_pub_t *dhdp, uint8 ifindex, void *buf, uint32 buflen)
+{
+ awdl_peer_op_t *peer = (awdl_peer_op_t *)buf;
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex >= DHD_MAX_IFS)
+ return;
+ if (!buf) {
+ /* Delete all peers in awdl interface */
+ if_flow_lkup_t *if_flow_lkup;
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ if (if_flow_lkup[ifindex].role != WLC_E_IF_ROLE_AWDL) {
+ DHD_ERROR(("%s: Iinterface %d is not a awdl peer \n",
+ __FUNCTION__, ifindex));
+ return;
+ }
+ dhd_flow_rings_delete(dhdp, ifindex);
+ return;
+ }
+ /* Parse awdl_peer_op info now */
+ if (buflen < sizeof(awdl_peer_op_t)) {
+ DHD_ERROR(("%s: cannot handle awdl_peer_op add/del\n", __FUNCTION__));
+ return;
+ }
+ /**
+ * Only flowring deletion is handled here
+ * Flowring addition is taken care in dhd_flowid_lookup
+ */
+ if (peer->opcode == AWDL_PEER_OP_DEL) {
+ dhd_del_sta(dhdp, ifindex, &peer->addr.octet[0]);
+ dhd_flow_rings_delete_for_peer(dhdp, ifindex, (char *)&peer->addr.octet[0]);
+ } else if (peer->opcode == AWDL_PEER_OP_ADD) {
+ dhd_findadd_sta(dhdp, ifindex, &peer->addr.octet[0]);
+ }
+ return;
+}
+#endif /* DHD_AWDL */
diff --git a/bcmdhd.101.10.361.x/dhd_flowring.h b/bcmdhd.101.10.361.x/dhd_flowring.h
new file mode 100755
index 0000000..873ca68
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_flowring.h
@@ -0,0 +1,350 @@
+/*
+ * @file Header file describing the flow rings DHD interfaces.
+ *
+ * Flow rings are transmit traffic (=propagating towards antenna) related entities.
+ *
+ * Provides type definitions and function prototypes used to create, delete and manage flow rings at
+ * high level.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/** XXX Twiki: [PCIeFullDongleArchitecture] */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_flowrings_h_
+#define _dhd_flowrings_h_
+
+/* Max pkts held in a flow ring's backup queue */
+#define FLOW_RING_QUEUE_THRESHOLD (2048)
+
+/* Number of H2D common rings */
+#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS
+
+#define FLOWID_INVALID (ID16_INVALID)
+#define FLOWID_RESERVED (FLOW_RING_COMMON)
+
+#define FLOW_RING_STATUS_OPEN 0
+#define FLOW_RING_STATUS_CREATE_PENDING 1
+#define FLOW_RING_STATUS_CLOSED 2
+#define FLOW_RING_STATUS_DELETE_PENDING 3
+#define FLOW_RING_STATUS_FLUSH_PENDING 4
+
+#ifdef IDLE_TX_FLOW_MGMT
+#define FLOW_RING_STATUS_SUSPENDED 5
+#define FLOW_RING_STATUS_RESUME_PENDING 6
+#endif /* IDLE_TX_FLOW_MGMT */
+#define FLOW_RING_STATUS_STA_FREEING 7
+
+#if defined(DHD_HTPUT_TUNABLES)
+#define HTPUT_FLOW_RING_PRIO PRIO_8021D_BE
+#define HTPUT_NUM_STA_FLOW_RINGS 1u
+#define HTPUT_NUM_CLIENT_FLOW_RINGS 3u
+#define HTPUT_TOTAL_FLOW_RINGS (HTPUT_NUM_STA_FLOW_RINGS + HTPUT_NUM_CLIENT_FLOW_RINGS)
+#define DHD_IS_FLOWID_HTPUT(pub, flowid) \
+ ((flowid >= (pub)->htput_flow_ring_start) && \
+ (flowid < ((pub)->htput_flow_ring_start + HTPUT_TOTAL_FLOW_RINGS)))
+#endif /* DHD_HTPUT_TUNABLES */
+
+#ifdef DHD_EFI
+/*
+ * Each lbuf is of size 2048 bytes. But the last 112 bytes is occupied for lbuf header.
+ * Since lbuf is crucial data structure we want to avoid operations very close to lbuf.
+ * so providing a pad of 136 bytes. so lbuf and pad together is 248 bytes.
+ *
+ * So the maximum usable lbuf size is 1800 bytes.
+ *
+ * These 1800 bytes is utilized for below purposes.
+ *
+ * 1. FW operating in mode2 requires 98 bytes for extra headers
+ * like SNAP, PLCP etc. Whereas FW operating in mode4 requires 70 bytes.
+ * So in EFI DHD we will consider 98 bytes which fits for chips operating in both mode2 and mode4.
+ *
+ * 2. For TPUT tests in EFI user can request a maximum payload of 1500 bytes.
+ * To add ethernet header and TPUT header etc we are reserving 100bytes. So 1600 bytes are utilized
+ * for headers and payload.
+ *
+ * so 1698(98 + 1600) bytes by are consumed by 1 and 2.
+ * So we still have 112 bytes which can be utilized
+ * if FW needs buffer for more headers in future.
+ *
+ * --Update-- 13Jul2018 (above comments preserved for history)
+ * 3. In case of 11ax chips more headroom is required, FW requires a min. of 1920 bytes for Rx
+ * buffers, or it will trap. Therefore bumping up the size to 1920 bytes. Which leaves
+ * only 16 bytes pad between data and lbuf header ! Further size increase may not be possible !!
+ */
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 1920
+#else
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048
+#endif /* DHD_EFI */
+
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX 4096
+
+#define DHD_FLOWRING_TX_BIG_PKT_SIZE (3700u)
+
+#define DHD_FLOW_PRIO_AC_MAP 0
+#define DHD_FLOW_PRIO_TID_MAP 1
+/* Flow ring prority map for lossless roaming */
+#define DHD_FLOW_PRIO_LLR_MAP 2
+
+/* Hashing a MacAddress for lkup into a per interface flow hash table */
+#define DHD_FLOWRING_HASH_SIZE 256
+#define DHD_FLOWRING_HASHINDEX(ea, prio) \
+ ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \
+ % DHD_FLOWRING_HASH_SIZE)
+
+#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
+#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
+#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
+#define DHD_IF_ROLE_P2PGC(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_CLIENT)
+#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
+#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS)
+#define DHD_IF_ROLE_IBSS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_IBSS)
+#define DHD_IF_ROLE_NAN(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_NAN)
+
+#define DHD_IF_ROLE_GENERIC_STA(pub, idx) \
+ (DHD_IF_ROLE_STA(pub, idx) || DHD_IF_ROLE_P2PGC(pub, idx) || DHD_IF_ROLE_WDS(pub, idx))
+
+#ifdef DHD_AWDL
+#define DHD_IF_ROLE_AWDL(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AWDL)
+#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \
+ (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) || DHD_IF_ROLE_AWDL(pub, idx) ||\
+ DHD_IF_ROLE_NAN(pub, idx))
+#else
+#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \
+ (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) ||\
+ DHD_IF_ROLE_NAN(pub, idx))
+#endif /* DHD_AWDL */
+
+#define DHD_FLOW_RING(dhdp, flowid) \
+ (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
+
+struct flow_queue;
+
+/* Flow Ring Queue Enqueue overflow callback */
+typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt);
+
+/**
+ * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred
+ * between queue and ring. A packet from the host stack is first added to the queue, and in a later
+ * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the
+ * flow ring are device owned.
+ */
+typedef struct flow_queue {
+ dll_t list; /* manage a flowring queue in a double linked list */
+ void * head; /* first packet in the queue */
+ void * tail; /* last packet in the queue */
+ uint16 len; /* number of packets in the queue */
+ uint16 max; /* maximum or min budget (used in cumm) */
+ uint32 threshold; /* parent's cummulative length threshold */
+ void * clen_ptr; /* parent's cummulative length counter */
+ uint32 failures; /* enqueue failures due to queue overflow */
+ flow_queue_cb_t cb; /* callback invoked on threshold crossing */
+ uint32 l2threshold; /* grandparent's (level 2) cummulative length threshold */
+ void * l2clen_ptr; /* grandparent's (level 2) cummulative length counter */
+} flow_queue_t;
+
+#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len)
+#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max)
+#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold)
+#define DHD_FLOW_QUEUE_L2THRESHOLD(queue) ((int)(queue)->l2threshold)
+#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0)
+#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures)
+
+#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len))
+#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max)
+
+#define DHD_FLOW_QUEUE_OVFL(queue, budget) \
+ (((queue)->len) > budget)
+
+#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \
+ ((queue)->max) = ((budget) - 1)
+
+/* Queue's cummulative threshold. */
+#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \
+ ((queue)->threshold) = ((cumm_threshold) - 1)
+
+/* Queue's cummulative length object accessor. */
+#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr)
+
+/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */
+#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \
+ ((queue)->clen_ptr) = (void *)(parent_clen_ptr)
+
+/* Queue's level 2 cummulative threshold. */
+#define DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold) \
+ ((queue)->l2threshold) = ((l2cumm_threshold) - 1)
+
+/* Queue's level 2 cummulative length object accessor. */
+#define DHD_FLOW_QUEUE_L2CLEN_PTR(queue) ((queue)->l2clen_ptr)
+
+/* Set a queue's level 2 cumm_len point to a grandparent's cumm_ctr_t cummulative length */
+#define DHD_FLOW_QUEUE_SET_L2CLEN(queue, grandparent_clen_ptr) \
+ ((queue)->l2clen_ptr) = (void *)(grandparent_clen_ptr)
+
+#if defined(BCMDBG)
+#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) \
+ dhd_bus_flow_ring_cnt_update(bus, flowid, txstatus)
+#else
+#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus)
+#endif /* BCMDBG */
+
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_pkttag_fr {
+ uint16 flowid;
+ uint16 ifid;
+} dhd_pkttag_fr_t;
+
+#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx))
+#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa))
+#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen))
+#define DHD_PKTTAG_IFID(tag) ((tag)->ifid)
+#define DHD_PKTTAG_PA(tag) ((tag)->physaddr)
+#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len)
+
+/** each flow ring is dedicated to a tid/sa/da combination */
+typedef struct flow_info {
+ uint8 tid;
+ uint8 ifindex;
+ uchar sa[ETHER_ADDR_LEN];
+ uchar da[ETHER_ADDR_LEN];
+#if defined(BCMDBG)
+ uint32 tx_status[DHD_MAX_TX_STATUS_MSGS];
+#endif
+#ifdef TX_STATUS_LATENCY_STATS
+ /* total number of tx_status received on this flowid */
+ uint64 num_tx_status;
+ /* cumulative tx_status latency for this flowid */
+ uint64 cum_tx_status_latency;
+ /* num tx packets sent on this flowring */
+ uint64 num_tx_pkts;
+#endif /* TX_STATUS_LATENCY_STATS */
+} flow_info_t;
+
+/** a flow ring is used for outbound (towards antenna) 802.3 packets */
+typedef struct flow_ring_node {
+ dll_t list; /* manage a constructed flowring in a dll, must be at first place */
+ flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */
+ bool active;
+ uint8 status;
+ /*
+ * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For
+ * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation.
+ */
+ uint16 flowid;
+ flow_info_t flow_info;
+ void *prot_info;
+ void *lock; /* lock for flowring access protection */
+
+#ifdef IDLE_TX_FLOW_MGMT
+ uint64 last_active_ts; /* contains last active timestamp */
+#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef DEVICE_TX_STUCK_DETECT
+ /* Time stamp(msec) when last time a Tx packet completion is received on this flow ring */
+ uint32 tx_cmpl;
+ /*
+ * Holds the tx_cmpl which was read during the previous
+ * iteration of the stuck detection algo
+ */
+ uint32 tx_cmpl_prev;
+ /* counter to decide if this particlur flow is stuck or not */
+ uint32 stuck_count;
+#endif /* DEVICE_TX_STUCK_DETECT */
+#ifdef DHD_HP2P
+ bool hp2p_ring;
+#endif /* DHD_HP2P */
+} flow_ring_node_t;
+
+typedef flow_ring_node_t flow_ring_table_t;
+
+typedef struct flow_hash_info {
+ uint16 flowid;
+ flow_info_t flow_info;
+ struct flow_hash_info *next;
+} flow_hash_info_t;
+
+typedef struct if_flow_lkup {
+ bool status;
+ uint8 role; /* Interface role: STA/AP */
+ flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */
+} if_flow_lkup_t;
+
+static INLINE flow_ring_node_t *
+dhd_constlist_to_flowring(dll_t *item)
+{
+ return ((flow_ring_node_t *)item);
+}
+
+/* Exported API */
+
+/* Flow ring's queue management functions */
+extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid);
+extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid);
+
+extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
+extern void dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
+extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb);
+extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue);
+extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+
+extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
+ int queue_budget, int cumm_threshold, void *cumm_ctr,
+ int l2cumm_threshold, void *l2cumm_ctr);
+extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_h2d_rings);
+
+extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp);
+
+extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
+ void *pktbuf);
+extern int dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da, uint16 *flowid);
+extern int dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifidex, uint16 flowid);
+
+extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid);
+
+extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex);
+extern void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex);
+
+extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex,
+ char *addr);
+
+/* Handle Interface ADD, DEL operations */
+extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 op, uint8 role);
+
+/* Handle a STA interface link status update */
+extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 status);
+extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set);
+extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map);
+extern uint32 dhd_active_tx_flowring_bkpq_len(dhd_pub_t *dhdp);
+#ifdef DHD_AWDL
+/* DHD handler for awdl peer op IOVAR */
+extern void dhd_awdl_peer_op(dhd_pub_t *dhdp, uint8 ifindex,
+ void *buf, uint32 buflen);
+#endif /* DHD_AWDL */
+extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex);
+#endif /* _dhd_flowrings_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_fwtrace.c b/bcmdhd.101.10.361.x/dhd_fwtrace.c
new file mode 100755
index 0000000..4737b43
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_fwtrace.c
@@ -0,0 +1,563 @@
+/*
+ * Firmware trace handling on the DHD side. Kernel thread reads the trace data and writes
+ * to the file and implements various utility functions.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ * $Id$
+ */
+
+#ifdef BCMINTERNAL
+
+#ifdef DHD_FWTRACE
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+
+#include <dhd_fwtrace.h>
+
+static int fwtrace_write_to_file(uint8 *buf, uint16 buf_len, dhd_pub_t *dhdp);
+static int fwtrace_close_file(dhd_pub_t *dhdp);
+static int fwtrace_open_file(uint32 fw_trace_enabled, dhd_pub_t *dhdp);
+static fwtrace_buf_t *fwtrace_get_trace_data_ptr(dhd_pub_t *dhdp);
+static void fwtrace_free_trace_buf(dhd_pub_t *dhdp);
+
+typedef struct fwtrace_info {
+ struct file *fw_trace_fp;
+ int file_index;
+ int part_index;
+ int trace_buf_index;
+ int trace_buf_count;
+ uint16 overflow_counter;
+
+ char trace_file[TRACE_FILE_NAME_LEN];
+
+ fwtrace_buf_t *trace_data_ptr;
+
+ uint16 prev_seq;
+
+ uint32 fwtrace_enable; /* Enable firmware tracing and the
+ * trace file management.
+ */
+ struct mutex fwtrace_lock; /* Synchronization between the
+ * ioctl and the kernel thread.
+ */
+ dhd_dma_buf_t fwtrace_buf; /* firmware trace buffer */
+} fwtrace_info_t;
+
+int
+dhd_fwtrace_attach(dhd_pub_t *dhdp)
+{
+ fwtrace_info_t *fwtrace_info;
+
+ /* Allocate prot structure */
+ if (!(fwtrace_info = (fwtrace_info_t *)VMALLOCZ(dhdp->osh, sizeof(*fwtrace_info)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ return (BCME_NOMEM);
+ }
+
+ bzero(fwtrace_info, sizeof(*fwtrace_info));
+ dhdp->fwtrace_info = fwtrace_info;
+
+ mutex_init(&dhdp->fwtrace_info->fwtrace_lock);
+
+ DHD_INFO(("allocated DHD fwtrace\n"));
+
+ return BCME_OK;
+}
+
+int
+dhd_fwtrace_detach(dhd_pub_t *dhdp)
+{
+ fwtrace_info_t *fwtrace_info;
+
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, __LINE__));
+
+ if (!dhdp) {
+ return BCME_OK;
+ }
+
+ if (!dhdp->fwtrace_info) {
+ return BCME_OK;
+ }
+
+ fwtrace_info = dhdp->fwtrace_info;
+
+ dhd_dma_buf_free(dhdp, &dhdp->fwtrace_info->fwtrace_buf);
+
+ /* close the file if valid */
+ if (!(IS_ERR_OR_NULL(dhdp->fwtrace_info->fw_trace_fp))) {
+ (void) filp_close(dhdp->fwtrace_info->fw_trace_fp, 0);
+ }
+
+ mutex_destroy(&dhdp->fwtrace_info->fwtrace_lock);
+
+ VMFREE(dhdp->osh, fwtrace_info, sizeof(*fwtrace_info));
+
+ dhdp->fwtrace_info = NULL;
+
+ DHD_INFO(("Deallocated DHD fwtrace_info\n"));
+
+ return (BCME_OK);
+}
+
+uint16
+get_fw_trace_overflow_counter(dhd_pub_t *dhdp)
+{
+ return (dhdp->fwtrace_info->overflow_counter);
+}
+
+void
+process_fw_trace_data(dhd_pub_t *dhdp)
+{
+ fwtrace_info_t *fwtrace_info = dhdp->fwtrace_info;
+ uint16 length;
+ uint16 incoming_seq;
+ uint32 trace_buf_index = fwtrace_info->trace_buf_index;
+ fwtrace_buf_t * trace_buf;
+ fwtrace_buf_t * curr_buf;
+
+ mutex_lock(&fwtrace_info->fwtrace_lock);
+
+ if (fwtrace_info->fw_trace_fp == NULL) {
+ goto done;
+ }
+
+ if ((trace_buf = fwtrace_get_trace_data_ptr(dhdp)) == NULL) {
+ goto done;
+ }
+
+ do {
+ curr_buf = trace_buf + trace_buf_index;
+
+ length = curr_buf->info.length;
+ /* If the incoming length is 0, means nothing is updated by the firmware */
+ if (length == 0) {
+ break;
+ }
+
+ incoming_seq = curr_buf->info.seq_num;
+
+ if (((uint16)(fwtrace_info->prev_seq + 1) != incoming_seq) &&
+ length != sizeof(*curr_buf)) {
+ DHD_ERROR(("*** invalid trace len idx = %u, length = %u, "
+ "cur seq = %u, in-seq = %u \n",
+ trace_buf_index, length,
+ fwtrace_info->prev_seq, incoming_seq));
+ break;
+ }
+
+ DHD_TRACE(("*** TRACE BUS: IDX:%d, in-seq:%d(prev-%d), ptr:%p(%llu), len:%d\n",
+ trace_buf_index, incoming_seq, fwtrace_info->prev_seq,
+ curr_buf, (uint64)curr_buf, length));
+
+ /* Write trace data to a file */
+ if (fwtrace_write_to_file((uint8 *) curr_buf, length, dhdp) != BCME_OK) {
+ DHD_ERROR(("*** fwtrace_write_to_file has failed \n"));
+ break;
+ }
+
+ /* Reset length after consuming the fwtrace data */
+ curr_buf->info.length = 0;
+
+ if ((fwtrace_info->prev_seq + 1) != incoming_seq) {
+ DHD_ERROR(("*** seq mismatch, index = %u, length = %u, "
+ "cur seq = %u, in-seq = %u \n",
+ trace_buf_index, length,
+ fwtrace_info->prev_seq, incoming_seq));
+ }
+ fwtrace_info->prev_seq = incoming_seq;
+
+ trace_buf_index++;
+ trace_buf_index &= (fwtrace_info->trace_buf_count - 1u);
+ fwtrace_info->trace_buf_index = trace_buf_index;
+ } while (true);
+
+done:
+ mutex_unlock(&fwtrace_info->fwtrace_lock);
+ return;
+}
+
+/*
+ * Write the incoming trace data to a file. The maximum file size is 1MB. After that
+ * the trace data is saved into a new file.
+ */
+static int
+fwtrace_write_to_file(uint8 *buf, uint16 buf_len, dhd_pub_t *dhdp)
+{
+ fwtrace_info_t *fwtrace_info = dhdp->fwtrace_info;
+ int ret_val = BCME_OK;
+ int ret_val_1 = 0;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+ struct kstat stat;
+ int error;
+
+ /* Change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ if (buf == NULL) {
+ ret_val = BCME_ERROR;
+ goto done;
+ }
+
+ if (IS_ERR_OR_NULL(fwtrace_info->fw_trace_fp)) {
+ ret_val = BCME_ERROR;
+ goto done;
+ }
+
+ //
+ // Get the file size
+ // if the size + buf_len > TRACE_FILE_SIZE, then write to a different file.
+ //
+ error = vfs_stat(fwtrace_info->trace_file, &stat);
+ if (error) {
+ DHD_ERROR(("vfs_stat has failed with error code = %d\n", error));
+ goto done;
+ }
+
+ if ((int) stat.size + buf_len > TRACE_FILE_SIZE) {
+ fwtrace_close_file(dhdp);
+ (fwtrace_info->part_index)++;
+ fwtrace_open_file(TRUE, dhdp);
+ }
+
+ pos = fwtrace_info->fw_trace_fp->f_pos;
+ /* Write buf to file */
+ ret_val_1 = vfs_write(fwtrace_info->fw_trace_fp,
+ (char *) buf, (uint32) buf_len, &pos);
+ if (ret_val_1 < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret_val_1));
+ ret_val = BCME_ERROR;
+ goto done;
+ }
+ fwtrace_info->fw_trace_fp->f_pos = pos;
+
+ /* Sync file from filesystem to physical media */
+ ret_val_1 = vfs_fsync(fwtrace_info->fw_trace_fp, 0);
+ if (ret_val_1 < 0) {
+ DHD_ERROR(("sync file error, error = %d\n", ret_val_1));
+ ret_val = BCME_ERROR;
+ goto done;
+ }
+
+done:
+ /* restore previous address limit */
+ set_fs(old_fs);
+ return (ret_val);
+}
+
+/*
+ * Start the trace, gets called from the ioctl handler.
+ */
+int
+fw_trace_start(dhd_pub_t *dhdp, uint32 fw_trace_enabled)
+{
+ int ret_val = BCME_OK;
+
+ (dhdp->fwtrace_info->file_index)++;
+ dhdp->fwtrace_info->part_index = 1;
+
+ dhdp->fwtrace_info->trace_buf_index = 0;
+
+ mutex_lock(&dhdp->fwtrace_info->fwtrace_lock);
+ ret_val = fwtrace_open_file(fw_trace_enabled, dhdp);
+ if (ret_val == BCME_OK) {
+ dhdp->fwtrace_info->fwtrace_enable = fw_trace_enabled;
+ }
+ mutex_unlock(&dhdp->fwtrace_info->fwtrace_lock);
+
+ return (ret_val);
+}
+
+/*
+ * Stop the trace collection and close the file descriptor.
+ */
+int
+fw_trace_stop(dhd_pub_t *dhdp)
+{
+ int ret_val = BCME_OK;
+
+ /* Check to see if there is any trace data */
+ process_fw_trace_data(dhdp);
+
+ mutex_lock(&dhdp->fwtrace_info->fwtrace_lock); /* acquire lock */
+ /* flush the trace buffer */
+ ret_val = fwtrace_close_file(dhdp);
+
+ /* free the trace buffer */
+ fwtrace_free_trace_buf(dhdp);
+ mutex_unlock(&dhdp->fwtrace_info->fwtrace_lock); /* release the lock */
+
+ return (ret_val);
+}
+
+/*
+ * The trace file format is: fw_trace_w_part_x_y_z
+ * where w is the file index, x is the part index,
+ * y is in seconds and z is in milliseconds
+ *
+ * fw_trace_1_part_1_1539298163209110
+ * fw_trace_1_part_2_1539298194739003 etc.
+ *
+ */
+static int
+fwtrace_open_file(uint32 fw_trace_enabled, dhd_pub_t *dhdp)
+{
+ fwtrace_info_t *fwtrace_info = dhdp->fwtrace_info;
+ int ret_val = BCME_OK;
+ uint32 file_mode;
+ char ts_str[DEBUG_DUMP_TIME_BUF_LEN];
+
+ if (fw_trace_enabled) {
+ if (!(IS_ERR_OR_NULL(fwtrace_info->fw_trace_fp))) {
+ (void) filp_close(fwtrace_info->fw_trace_fp, 0);
+ }
+
+ DHD_INFO((" *** Creating the trace file \n"));
+
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+ clear_debug_dump_time(ts_str);
+ get_debug_dump_time(ts_str);
+
+ snprintf(fwtrace_info->trace_file,
+ sizeof(fwtrace_info->trace_file),
+ "%sfw_trace_%d_part_%d_%x_%s",
+ DHD_COMMON_DUMP_PATH, fwtrace_info->file_index,
+ fwtrace_info->part_index,
+ dhd_bus_get_bp_base(dhdp),
+ ts_str);
+
+ fwtrace_info->fw_trace_fp =
+ filp_open(fwtrace_info->trace_file, file_mode, 0664);
+
+ if (IS_ERR(fwtrace_info->fw_trace_fp)) {
+ DHD_ERROR(("Unable to create the fw trace file file: %s\n",
+ fwtrace_info->trace_file));
+ ret_val = BCME_ERROR;
+ goto done;
+ }
+ }
+
+done:
+ return (ret_val);
+}
+
+static int
+fwtrace_close_file(dhd_pub_t *dhdp)
+{
+ int ret_val = BCME_OK;
+
+ if (!(IS_ERR_OR_NULL(dhdp->fwtrace_info->fw_trace_fp))) {
+ (void) filp_close(dhdp->fwtrace_info->fw_trace_fp, 0);
+ }
+
+ dhdp->fwtrace_info->fw_trace_fp = NULL;
+
+ return (ret_val);
+}
+
+#define FWTRACE_HADDR_PARAMS_SIZE 256u
+#define FW_TRACE_FLUSH 0x8u /* bit 3 */
+
+static int send_fw_trace_val(dhd_pub_t *dhdp, int val);
+
+/*
+ * Initialize FWTRACE.
+ * Allocate trace buffer and open trace file.
+ */
+int
+fwtrace_init(dhd_pub_t *dhdp)
+{
+ int ret_val = BCME_OK;
+ fwtrace_hostaddr_info_t host_buf_info;
+
+ if (dhdp->fwtrace_info->fwtrace_buf.va != NULL) {
+ /* Already initialized */
+ goto done;
+ }
+
+ ret_val = fwtrace_get_haddr(dhdp, &host_buf_info);
+
+ if (ret_val != BCME_OK) {
+ goto done;
+ }
+
+ DHD_INFO(("dhd_get_trace_haddr: addr = %llx, len = %u\n",
+ host_buf_info.haddr.u64, host_buf_info.num_bufs));
+
+ /* Initialize and setup the file */
+ ret_val = fw_trace_start(dhdp, TRUE);
+
+done:
+ return ret_val;
+}
+
+/*
+ * Process the fwtrace set command to enable/disable firmware tracing.
+ * Always, enable preceeds with disable.
+ */
+int
+handle_set_fwtrace(dhd_pub_t *dhdp, uint32 val)
+{
+ int ret, ret_val = BCME_OK;
+
+ /* On set, consider only lower two bytes for now */
+ dhdp->fwtrace_info->fwtrace_enable = (val & 0xFFFF);
+
+ if (val & FW_TRACE_FLUSH) { /* only flush the trace buffer */
+ if ((ret_val = send_fw_trace_val(dhdp, val)) != BCME_OK) {
+ goto done;
+ }
+ } else if (val == 0) { /* disable the tracing */
+ /* Disable the trace in the firmware */
+ if ((ret_val = send_fw_trace_val(dhdp, val)) != BCME_OK) {
+ goto done;
+ }
+
+ /* cleanup in the driver */
+ fw_trace_stop(dhdp);
+ } else { /* enable the tracing */
+ fwtrace_hostaddr_info_t haddr_info;
+
+ ret_val = fwtrace_init(dhdp);
+ if (ret_val != BCME_OK) {
+ goto done;
+ }
+
+ if ((ret_val = fwtrace_get_haddr(dhdp, &haddr_info)) != BCME_OK) {
+ DHD_ERROR(("%s: set dhd_iovar has failed for "
+ "fw_trace_haddr, "
+ "ret=%d\n", __FUNCTION__, ret_val));
+ goto done;
+ }
+
+ ret = dhd_iovar(dhdp, 0, "dngl:fwtrace_haddr",
+ (char *) &haddr_info, sizeof(haddr_info),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set dhd_iovar has failed for "
+ "fwtrace_haddr, "
+ "ret=%d\n", __FUNCTION__, ret));
+ ret_val = BCME_NOMEM;
+ goto done;
+ }
+
+ /* Finaly, enable the trace in the firmware */
+ if ((ret_val = send_fw_trace_val(dhdp, val)) != BCME_OK) {
+ goto done;
+ }
+ }
+done:
+ return (ret_val);
+}
+
+/*
+ * Send dngl:fwtrace IOVAR to the firmware.
+ */
+
+static int
+send_fw_trace_val(dhd_pub_t *dhdp, int val)
+{
+ int ret_val = BCME_OK;
+
+ if ((ret_val = dhd_iovar(dhdp, 0, "dngl:fwtrace", (char *)&val, sizeof(val),
+ NULL, 0, TRUE)) < 0) {
+ DHD_ERROR(("%s: set dhd_iovar has failed fwtrace, "
+ "ret=%d\n", __FUNCTION__, ret_val));
+ }
+
+ return (ret_val);
+}
+
+/*
+ * Returns the virual address for the firmware trace buffer.
+ * DHD monitors this buffer for an update from the firmware.
+ */
+static fwtrace_buf_t *
+fwtrace_get_trace_data_ptr(dhd_pub_t *dhdp)
+{
+ return ((fwtrace_buf_t *) dhdp->fwtrace_info->fwtrace_buf.va);
+}
+
+int
+fwtrace_get_haddr(dhd_pub_t *dhdp, fwtrace_hostaddr_info_t *haddr_info)
+{
+ int ret_val = BCME_NOMEM;
+ int num_host_buffers = FWTRACE_NUM_HOST_BUFFERS;
+
+ if (haddr_info == NULL) {
+ ret_val = BCME_BADARG;
+ goto done;
+ }
+
+ if (dhdp->fwtrace_info->fwtrace_buf.va != NULL) {
+ /* Use the existing buffer and send to the firmware */
+ haddr_info->haddr.u64 = HTOL64(*(uint64 *)
+ &dhdp->fwtrace_info->fwtrace_buf.pa);
+ haddr_info->num_bufs = dhdp->fwtrace_info->trace_buf_count;
+ haddr_info->buf_len = sizeof(fwtrace_buf_t);
+ ret_val = BCME_OK;
+ goto done;
+ }
+
+ do {
+ /* Initialize firmware trace buffer */
+ if (dhd_dma_buf_alloc(dhdp, &dhdp->fwtrace_info->fwtrace_buf,
+ sizeof(fwtrace_buf_t) * num_host_buffers) == BCME_OK) {
+ dhdp->fwtrace_info->trace_buf_count = num_host_buffers;
+ ret_val = BCME_OK;
+ break;
+ }
+
+ DHD_ERROR(("%s: Allocing %d buffers of size %lu bytes failed\n",
+ __FUNCTION__, num_host_buffers,
+ sizeof(fwtrace_buf_t) * num_host_buffers));
+
+ /* Retry with smaller numbers */
+ num_host_buffers >>= 1;
+ } while (num_host_buffers > 0);
+
+ haddr_info->haddr.u64 = HTOL64(*(uint64 *)&dhdp->fwtrace_info->fwtrace_buf.pa);
+ haddr_info->num_bufs = num_host_buffers;
+ haddr_info->buf_len = sizeof(fwtrace_buf_t);
+
+ DHD_INFO(("Firmware trace buffer, host address = %llx, count = %u \n",
+ haddr_info->haddr.u64,
+ haddr_info->num_bufs));
+done:
+ return (ret_val);
+}
+
+/*
+ * Frees the host buffer.
+ */
+static void
+fwtrace_free_trace_buf(dhd_pub_t *dhdp)
+{
+ dhd_dma_buf_free(dhdp, &dhdp->fwtrace_info->fwtrace_buf);
+ return;
+}
+
+#endif /* DHD_FWTRACE */
+
+#endif /* BCMINTERNAL */
diff --git a/bcmdhd.101.10.361.x/dhd_fwtrace.h b/bcmdhd.101.10.361.x/dhd_fwtrace.h
new file mode 100755
index 0000000..4e977bf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_fwtrace.h
@@ -0,0 +1,55 @@
+/*
+ * Data structures required for the firmware tracing support on Linux.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ * $Id$
+ */
+
+#ifndef _DHD_FWTRACE_H
+#define _DHD_FWTRACE_H
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE /* firmware tracing */
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_fwtrace.h>
+
+#include <linux/mutex.h>
+#include <bcm_fwtrace.h>
+
+#define TRACE_FILE_NAME_LEN 128u /* bytes */
+#define TRACE_FILE_SIZE (1024u * 1024u) /* Trace file size is 1 MB */
+
+/* Prototypes */
+void dhd_event_logtrace_enqueue_fwtrace(dhd_pub_t *dhdp);
+int dhd_fwtrace_attach(dhd_pub_t *dhdinfo);
+int dhd_fwtrace_detach(dhd_pub_t *dhdinfo);
+
+void process_fw_trace_data(dhd_pub_t *dhdp);
+uint32 dhd_bus_get_bp_base(dhd_pub_t *dhdp);
+int fwtrace_init(dhd_pub_t *dhdp);
+int fw_trace_start(dhd_pub_t *dhdp, uint32 fw_trace_enabled);
+int fw_trace_stop(dhd_pub_t *dhdp);
+int handle_set_fwtrace(dhd_pub_t *dhdp, uint32 val);
+uint16 get_fw_trace_overflow_counter(dhd_pub_t *dhdp);
+int fwtrace_get_haddr(dhd_pub_t *dhdp, fwtrace_hostaddr_info_t *haddr_info);
+
+#endif /* DHD_FWTRACE */
+
+#endif /* BCMINTERNAL */
+
+#endif /* _DHD_FWTRACE_H */
diff --git a/bcmdhd.101.10.361.x/dhd_gpio.c b/bcmdhd.101.10.361.x/dhd_gpio.c
new file mode 100755
index 0000000..60c04be
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_gpio.c
@@ -0,0 +1,497 @@
+
+#include <osl.h>
+#include <dhd_linux.h>
+#include <linux/gpio.h>
+
+#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO)
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#endif /* defined(BUS_POWER_RESTORE) && defined(BCMSDIO) */
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#ifdef DHD_STATIC_IN_DRIVER
+extern int dhd_static_buf_init(void);
+extern void dhd_static_buf_exit(void);
+#endif /* DHD_STATIC_IN_DRIVER */
+#ifdef BCMDHD_MDRIVER
+extern void *bcmdhd_mem_prealloc(uint bus_type, int index,
+ int section, unsigned long size);
+#else
+extern void *bcmdhd_mem_prealloc(int section, unsigned long size);
+#endif
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+#ifdef BCMDHD_DTS
+/* This is sample code in dts file.
+bcmdhd {
+ compatible = "android,bcmdhd_wlan";
+ gpio_wl_reg_on = <&gpio GPIOH_4 GPIO_ACTIVE_HIGH>;
+ gpio_wl_host_wake = <&gpio GPIOZ_15 GPIO_ACTIVE_HIGH>;
+};
+*/
+#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan"
+#define GPIO_WL_REG_ON_PROPNAME "gpio_wl_reg_on"
+#define GPIO_WL_HOST_WAKE_PROPNAME "gpio_wl_host_wake"
+#endif
+
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+#include <linux/amlogic/aml_gpio_consumer.h>
+extern int wifi_irq_trigger_level(void);
+extern u8 *wifi_get_mac(void);
+extern u8 *wifi_get_ap_mac(void);
+#endif
+extern void sdio_reinit(void);
+extern void set_usb_bt_power(int is_power);
+extern void set_usb_wifi_power(int is_power);
+extern void extern_wifi_set_enable(int is_on);
+extern void pci_remove_reinit(unsigned int vid, unsigned int pid, int delBus);
+//extern void amlogic_pcie_power_on_atu_fixup(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+extern int wifi_irq_num(void);
+#endif
+int dhd_pwr_ctrl = 1;
+module_param(dhd_pwr_ctrl, int, 0);
+#endif
+
+static int
+dhd_wlan_set_power(int on, wifi_adapter_info_t *adapter)
+{
+ int gpio_wl_reg_on = adapter->gpio_wl_reg_on;
+ int err = 0;
+
+#ifdef CUSTOMER_HW_AMLOGIC
+ printf("######### dhd_pwr_ctrl=%d #########\n", dhd_pwr_ctrl);
+#endif
+
+ if (on) {
+ printf("======== PULL WL_REG_ON(%d) HIGH! ========\n", gpio_wl_reg_on);
+ if (gpio_wl_reg_on >= 0) {
+ err = gpio_direction_output(gpio_wl_reg_on, 1);
+ if (err) {
+ printf("%s: WL_REG_ON didn't output high\n", __FUNCTION__);
+ return -EIO;
+ }
+ }
+#ifdef CUSTOMER_HW_AMLOGIC
+#ifdef BCMSDIO
+ extern_wifi_set_enable(0);
+ mdelay(200);
+ extern_wifi_set_enable(1);
+ mdelay(200);
+// sdio_reinit();
+#endif
+#ifdef BCMDBUS
+ if (dhd_pwr_ctrl) {
+ set_usb_wifi_power(0);
+ mdelay(200);
+ set_usb_wifi_power(1);
+ mdelay(200);
+ }
+#endif
+#ifdef BCMPCIE
+// extern_wifi_set_enable(0);
+// mdelay(200);
+// extern_wifi_set_enable(1);
+// mdelay(200);
+// amlogic_pcie_power_on_atu_fixup();
+#endif
+#endif
+#ifdef BUS_POWER_RESTORE
+#ifdef BCMSDIO
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+ if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
+ mdelay(100);
+ printf("======== mmc_power_restore_host! ========\n");
+ mmc_power_restore_host(adapter->sdio_func->card->host);
+ }
+#endif
+#elif defined(BCMPCIE)
+ if (adapter->pci_dev) {
+ mdelay(100);
+ printf("======== pci_set_power_state PCI_D0! ========\n");
+ pci_set_power_state(adapter->pci_dev, PCI_D0);
+ if (adapter->pci_saved_state)
+ pci_load_and_free_saved_state(adapter->pci_dev, &adapter->pci_saved_state);
+ pci_restore_state(adapter->pci_dev);
+ err = pci_enable_device(adapter->pci_dev);
+ if (err < 0)
+ printf("%s: PCI enable device failed", __FUNCTION__);
+ pci_set_master(adapter->pci_dev);
+ }
+#endif /* BCMPCIE */
+#endif /* BUS_POWER_RESTORE */
+ /* Lets customer power to get stable */
+ mdelay(100);
+ } else {
+#ifdef BUS_POWER_RESTORE
+#ifdef BCMSDIO
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+ if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
+ printf("======== mmc_power_save_host! ========\n");
+ mmc_power_save_host(adapter->sdio_func->card->host);
+ }
+#endif
+#elif defined(BCMPCIE)
+ if (adapter->pci_dev) {
+ printf("======== pci_set_power_state PCI_D3hot! ========\n");
+ pci_save_state(adapter->pci_dev);
+ adapter->pci_saved_state = pci_store_saved_state(adapter->pci_dev);
+ if (pci_is_enabled(adapter->pci_dev))
+ pci_disable_device(adapter->pci_dev);
+ pci_set_power_state(adapter->pci_dev, PCI_D3hot);
+ }
+#endif /* BCMPCIE */
+#endif /* BUS_POWER_RESTORE */
+ printf("======== PULL WL_REG_ON(%d) LOW! ========\n", gpio_wl_reg_on);
+ if (gpio_wl_reg_on >= 0) {
+ err = gpio_direction_output(gpio_wl_reg_on, 0);
+ if (err) {
+ printf("%s: WL_REG_ON didn't output low\n", __FUNCTION__);
+ return -EIO;
+ }
+ }
+#ifdef CUSTOMER_HW_AMLOGIC
+#ifdef BCMSDIO
+ extern_wifi_set_enable(0);
+ mdelay(200);
+#endif
+#ifdef BCMDBUS
+ if (dhd_pwr_ctrl) {
+ set_usb_wifi_power(0);
+ mdelay(200);
+ }
+#endif
+#ifdef BCMPCIE
+// extern_wifi_set_enable(0);
+// mdelay(200);
+#endif
+#endif
+ }
+
+ return err;
+}
+
+static int dhd_wlan_set_reset(int onoff)
+{
+ return 0;
+}
+
+static int dhd_wlan_set_carddetect(int present)
+{
+ int err = 0;
+
+#if !defined(BUS_POWER_RESTORE)
+ if (present) {
+#if defined(BCMSDIO)
+ printf("======== Card detection to detect SDIO card! ========\n");
+#ifdef CUSTOMER_HW_PLATFORM
+ err = sdhci_force_presence_change(&sdmmc_channel, 1);
+#endif /* CUSTOMER_HW_PLATFORM */
+#ifdef CUSTOMER_HW_AMLOGIC
+ sdio_reinit();
+#endif
+#elif defined(BCMPCIE)
+ printf("======== Card detection to detect PCIE card! ========\n");
+#endif
+ } else {
+#if defined(BCMSDIO)
+ printf("======== Card detection to remove SDIO card! ========\n");
+#ifdef CUSTOMER_HW_PLATFORM
+ err = sdhci_force_presence_change(&sdmmc_channel, 0);
+#endif /* CUSTOMER_HW_PLATFORM */
+#ifdef CUSTOMER_HW_AMLOGIC
+ extern_wifi_set_enable(0);
+ mdelay(200);
+#endif
+#elif defined(BCMPCIE)
+ printf("======== Card detection to remove PCIE card! ========\n");
+#ifdef CUSTOMER_HW_AMLOGIC
+// extern_wifi_set_enable(0);
+// mdelay(200);
+#endif
+#endif
+ }
+#endif /* BUS_POWER_RESTORE */
+
+ return err;
+}
+
+static int dhd_wlan_get_mac_addr(unsigned char *buf, int ifidx)
+{
+ int err = 0;
+
+ if (ifidx == 1) {
+#ifdef EXAMPLE_GET_MAC
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+#endif /* EXAMPLE_GET_MAC */
+#ifdef CUSTOMER_HW_AMLOGIC
+#ifdef CUSTOM_AP_MAC
+ bcopy((char *)wifi_get_ap_mac(), buf, sizeof(struct ether_addr));
+ if (buf[0] == 0xff) {
+ printf("custom wifi ap mac is not set\n");
+ err = -1;
+ } else
+ printf("custom wifi ap mac-addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ buf[0], buf[1], buf[2],
+ buf[3], buf[4], buf[5]);
+#else
+ err = -1;
+#endif
+#endif
+ } else {
+#ifdef EXAMPLE_GET_MAC
+ struct ether_addr ea_example = {{0x02, 0x11, 0x22, 0x33, 0x44, 0x55}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+#endif /* EXAMPLE_GET_MAC */
+#ifdef CUSTOMER_HW_AMLOGIC
+ bcopy((char *)wifi_get_mac(), buf, sizeof(struct ether_addr));
+ if (buf[0] == 0xff) {
+ printf("custom wifi mac is not set\n");
+ err = -1;
+ } else
+ printf("custom wifi mac-addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ buf[0], buf[1], buf[2],
+ buf[3], buf[4], buf[5]);
+#endif
+ }
+
+#ifdef EXAMPLE_GET_MAC_VER2
+ /* EXAMPLE code */
+ {
+ char macpad[56]= {
+ 0x00,0xaa,0x9c,0x84,0xc7,0xbc,0x9b,0xf6,
+ 0x02,0x33,0xa9,0x4d,0x5c,0xb4,0x0a,0x5d,
+ 0xa8,0xef,0xb0,0xcf,0x8e,0xbf,0x24,0x8a,
+ 0x87,0x0f,0x6f,0x0d,0xeb,0x83,0x6a,0x70,
+ 0x4a,0xeb,0xf6,0xe6,0x3c,0xe7,0x5f,0xfc,
+ 0x0e,0xa7,0xb3,0x0f,0x00,0xe4,0x4a,0xaf,
+ 0x87,0x08,0x16,0x6d,0x3a,0xe3,0xc7,0x80};
+ bcopy(macpad, buf+6, sizeof(macpad));
+ }
+#endif /* EXAMPLE_GET_MAC_VER2 */
+
+ printf("======== %s err=%d ========\n", __FUNCTION__, err);
+
+ return err;
+}
+
+static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = {
+ /* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+ {"", "XT", 49}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 0},
+#endif /* EXMAPLE_TABLE */
+};
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+struct cntry_locales_custom brcm_wlan_translate_nodfs_table[] = {
+#ifdef EXAMPLE_TABLE
+ {"", "XT", 50}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 0},
+#endif /* EXMAPLE_TABLE */
+};
+#endif
+
+static void *dhd_wlan_get_country_code(char *ccode
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ , u32 flags
+#endif
+)
+{
+ struct cntry_locales_custom *locales;
+ int size;
+ int i;
+
+ if (!ccode)
+ return NULL;
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ if (flags & WLAN_PLAT_NODFS_FLAG) {
+ locales = brcm_wlan_translate_nodfs_table;
+ size = ARRAY_SIZE(brcm_wlan_translate_nodfs_table);
+ } else {
+#endif
+ locales = brcm_wlan_translate_custom_table;
+ size = ARRAY_SIZE(brcm_wlan_translate_custom_table);
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ }
+#endif
+
+ for (i = 0; i < size; i++)
+ if (strcmp(ccode, locales[i].iso_abbrev) == 0)
+ return &locales[i];
+ return NULL;
+}
+
+struct wifi_platform_data dhd_wlan_control = {
+ .set_power = dhd_wlan_set_power,
+ .set_reset = dhd_wlan_set_reset,
+ .set_carddetect = dhd_wlan_set_carddetect,
+ .get_mac_addr = dhd_wlan_get_mac_addr,
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ .mem_prealloc = bcmdhd_mem_prealloc,
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ .get_country_code = dhd_wlan_get_country_code,
+};
+
+int dhd_wlan_init_gpio(wifi_adapter_info_t *adapter)
+{
+#ifdef BCMDHD_DTS
+ char wlan_node[32];
+ struct device_node *root_node = NULL;
+#endif
+ int err = 0;
+ int gpio_wl_reg_on;
+#ifdef CUSTOMER_OOB
+ int gpio_wl_host_wake;
+ int host_oob_irq = -1;
+ uint host_oob_irq_flags = 0;
+#endif
+
+ /* Please check your schematic and fill right GPIO number which connected to
+ * WL_REG_ON and WL_HOST_WAKE.
+ */
+#ifdef BCMDHD_DTS
+ strcpy(wlan_node, DHD_DT_COMPAT_ENTRY);
+ printf("======== Get GPIO from DTS(%s) ========\n", wlan_node);
+ root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+ if (root_node) {
+ gpio_wl_reg_on = of_get_named_gpio(root_node, GPIO_WL_REG_ON_PROPNAME, 0);
+#ifdef CUSTOMER_OOB
+ gpio_wl_host_wake = of_get_named_gpio(root_node, GPIO_WL_HOST_WAKE_PROPNAME, 0);
+#endif
+ } else
+#endif
+ {
+ gpio_wl_reg_on = -1;
+#ifdef CUSTOMER_OOB
+ gpio_wl_host_wake = -1;
+#endif
+ }
+
+#ifdef CUSTOMER_HW_AMLOGIC
+#if defined(BCMPCIE)
+ printf("======== Card detection to detect PCIE card! ========\n");
+// pci_remove_reinit(0x14e4, 0x43ec, 1);
+#endif
+#endif
+
+ if (gpio_wl_reg_on >= 0) {
+ err = gpio_request(gpio_wl_reg_on, "WL_REG_ON");
+ if (err < 0) {
+ printf("%s: gpio_request(%d) for WL_REG_ON failed\n",
+ __FUNCTION__, gpio_wl_reg_on);
+ gpio_wl_reg_on = -1;
+ }
+ }
+ adapter->gpio_wl_reg_on = gpio_wl_reg_on;
+
+#ifdef CUSTOMER_OOB
+ if (gpio_wl_host_wake >= 0) {
+ err = gpio_request(gpio_wl_host_wake, "bcmdhd");
+ if (err < 0) {
+ printf("%s: gpio_request(%d) for WL_HOST_WAKE failed\n",
+ __FUNCTION__, gpio_wl_host_wake);
+ return -1;
+ }
+ adapter->gpio_wl_host_wake = gpio_wl_host_wake;
+ err = gpio_direction_input(gpio_wl_host_wake);
+ if (err < 0) {
+ printf("%s: gpio_direction_input(%d) for WL_HOST_WAKE failed\n",
+ __FUNCTION__, gpio_wl_host_wake);
+ gpio_free(gpio_wl_host_wake);
+ return -1;
+ }
+ host_oob_irq = gpio_to_irq(gpio_wl_host_wake);
+ if (host_oob_irq < 0) {
+ printf("%s: gpio_to_irq(%d) for WL_HOST_WAKE failed\n",
+ __FUNCTION__, gpio_wl_host_wake);
+ gpio_free(gpio_wl_host_wake);
+ return -1;
+ }
+ }
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ host_oob_irq = INT_GPIO_4;
+#else
+ host_oob_irq = wifi_irq_num();
+#endif
+#endif
+
+#ifdef HW_OOB
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ if (wifi_irq_trigger_level() == GPIO_IRQ_LOW)
+ host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE;
+ else
+ host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+#ifdef HW_OOB_LOW_LEVEL
+ host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+ host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#endif
+#endif
+#else
+ host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE;
+#endif
+ host_oob_irq_flags &= IRQF_TRIGGER_MASK;
+
+ adapter->irq_num = host_oob_irq;
+ adapter->intr_flags = host_oob_irq_flags;
+ printf("%s: WL_HOST_WAKE=%d, oob_irq=%d, oob_irq_flags=0x%x\n", __FUNCTION__,
+ gpio_wl_host_wake, host_oob_irq, host_oob_irq_flags);
+#endif /* CUSTOMER_OOB */
+ printf("%s: WL_REG_ON=%d\n", __FUNCTION__, gpio_wl_reg_on);
+
+ return 0;
+}
+
+static void dhd_wlan_deinit_gpio(wifi_adapter_info_t *adapter)
+{
+ int gpio_wl_reg_on = adapter->gpio_wl_reg_on;
+#ifdef CUSTOMER_OOB
+ int gpio_wl_host_wake = adapter->gpio_wl_host_wake;
+#endif
+
+ if (gpio_wl_reg_on >= 0) {
+ printf("%s: gpio_free(WL_REG_ON %d)\n", __FUNCTION__, gpio_wl_reg_on);
+ gpio_free(gpio_wl_reg_on);
+ gpio_wl_reg_on = -1;
+ }
+#ifdef CUSTOMER_OOB
+ if (gpio_wl_host_wake >= 0) {
+ printf("%s: gpio_free(WL_HOST_WAKE %d)\n", __FUNCTION__, gpio_wl_host_wake);
+ gpio_free(gpio_wl_host_wake);
+ gpio_wl_host_wake = -1;
+ }
+#endif /* CUSTOMER_OOB */
+}
+
+int dhd_wlan_init_plat_data(wifi_adapter_info_t *adapter)
+{
+ int err = 0;
+
+ printf("======== %s ========\n", __FUNCTION__);
+ if (adapter->index == -1) {
+ adapter->index = 0;
+ }
+ err = dhd_wlan_init_gpio(adapter);
+
+#ifdef DHD_STATIC_IN_DRIVER
+ dhd_static_buf_init();
+#endif
+ return err;
+}
+
+void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter)
+{
+ printf("======== %s ========\n", __FUNCTION__);
+#ifdef DHD_STATIC_IN_DRIVER
+ dhd_static_buf_exit();
+#endif
+ dhd_wlan_deinit_gpio(adapter);
+}
diff --git a/bcmdhd.101.10.361.x/dhd_ip.c b/bcmdhd.101.10.361.x/dhd_ip.c
new file mode 100755
index 0000000..3f9b625
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_ip.c
@@ -0,0 +1,1425 @@
+/*
+ * IP Packet Parser Module.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <ethernet.h>
+#include <vlan.h>
+#include <802.3.h>
+#include <bcmip.h>
+#include <bcmendian.h>
+
+#include <dhd_dbg.h>
+
+#include <dhd_ip.h>
+#include <dhd_config.h>
+
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <bcmtcp.h>
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
+
+/* special values */
+/* 802.3 llc/snap header */
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+pkt_frag_t pkt_frag_info(osl_t *osh, void *p)
+{
+ uint8 *frame;
+ int length;
+ uint8 *pt; /* Pointer to type field */
+ uint16 ethertype;
+ struct ipv4_hdr *iph; /* IP frame pointer */
+ int ipl; /* IP frame length */
+ uint16 iph_frag;
+
+ ASSERT(osh && p);
+
+ frame = PKTDATA(osh, p);
+ length = PKTLEN(osh, p);
+
+ /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+ if (length < ETHER_HDR_LEN) {
+ DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+ return DHD_PKT_FRAG_NONE;
+ } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+ /* Frame is Ethernet II */
+ pt = frame + ETHER_TYPE_OFFSET;
+ } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+ !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+ pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+ } else {
+ DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+ return DHD_PKT_FRAG_NONE;
+ }
+
+ ethertype = ntoh16(*(uint16 *)pt);
+
+ /* Skip VLAN tag, if any */
+ if (ethertype == ETHER_TYPE_8021Q) {
+ pt += VLAN_TAG_LEN;
+
+ if (pt + ETHER_TYPE_LEN > frame + length) {
+ DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+ return DHD_PKT_FRAG_NONE;
+ }
+
+ ethertype = ntoh16(*(uint16 *)pt);
+ }
+
+ if (ethertype != ETHER_TYPE_IP) {
+ DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+ __FUNCTION__, ethertype, length));
+ return DHD_PKT_FRAG_NONE;
+ }
+
+ iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+ ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+ /* We support IPv4 only */
+ if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) {
+ DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+ return DHD_PKT_FRAG_NONE;
+ }
+
+ iph_frag = ntoh16(iph->frag);
+
+ if (iph_frag & IPV4_FRAG_DONT) {
+ return DHD_PKT_FRAG_NONE;
+ } else if ((iph_frag & IPV4_FRAG_MORE) == 0) {
+ return DHD_PKT_FRAG_LAST;
+ } else {
+ return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST;
+ }
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+
+typedef struct {
+ void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */
+ void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */
+ int ifidx;
+ uint8 supp_cnt;
+ dhd_pub_t *dhdp;
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ timer_list_compat_t timer;
+#else
+ struct tasklet_hrtimer timer;
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+} tcpack_info_t;
+
+typedef struct _tdata_psh_info_t {
+ uint32 end_seq; /* end seq# of a received TCP PSH DATA pkt */
+ struct _tdata_psh_info_t *next; /* next pointer of the link chain */
+} tdata_psh_info_t;
+
+typedef struct {
+ struct {
+ uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */
+ uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */
+ } ip_addr;
+ struct {
+ uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */
+ uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */
+ } tcp_port;
+ tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */
+ tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */
+ uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */
+} tcpdata_info_t;
+
+/* TCPACK SUPPRESS module */
+typedef struct {
+ int tcpack_info_cnt;
+ tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM]; /* Info of TCP ACK to send */
+ int tcpdata_info_cnt;
+ tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM]; /* Info of received TCP DATA */
+ tdata_psh_info_t *tdata_psh_info_pool; /* Pointer to tdata_psh_info elements pool */
+ tdata_psh_info_t *tdata_psh_info_free; /* free tdata_psh_info elements chain in pool */
+#ifdef DHDTCPACK_SUP_DBG
+ int psh_info_enq_num; /* Number of free TCP PSH DATA info elements in pool */
+#endif /* DHDTCPACK_SUP_DBG */
+} tcpack_sup_module_t;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1};
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+static void
+_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod,
+ tdata_psh_info_t *tdata_psh_info)
+{
+ if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) {
+ DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__,
+ tcpack_sup_mod, tdata_psh_info));
+ return;
+ }
+
+ ASSERT(tdata_psh_info->next == NULL);
+ tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free;
+ tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
+#ifdef DHDTCPACK_SUP_DBG
+ tcpack_sup_mod->psh_info_enq_num++;
+#endif
+}
+
+static tdata_psh_info_t*
+_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod)
+{
+ tdata_psh_info_t *tdata_psh_info = NULL;
+
+ if (tcpack_sup_mod == NULL) {
+ DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__,
+ tcpack_sup_mod));
+ return NULL;
+ }
+
+ tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free;
+ if (tdata_psh_info == NULL)
+ DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__));
+ else {
+ tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+#ifdef DHDTCPACK_SUP_DBG
+ tcpack_sup_mod->psh_info_enq_num--;
+#endif /* DHDTCPACK_SUP_DBG */
+ }
+
+ return tdata_psh_info;
+}
+
+#ifdef BCMSDIO
+static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
+ tcpack_sup_module_t *tcpack_sup_mod)
+{
+ tdata_psh_info_t *tdata_psh_info_pool = NULL;
+ uint i;
+
+ DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+ if (tcpack_sup_mod == NULL)
+ return BCME_ERROR;
+
+ ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL);
+ ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL);
+
+ tdata_psh_info_pool =
+ MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+ if (tdata_psh_info_pool == NULL)
+ return BCME_NOMEM;
+ bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+#ifdef DHDTCPACK_SUP_DBG
+ tcpack_sup_mod->psh_info_enq_num = 0;
+#endif /* DHDTCPACK_SUP_DBG */
+
+ /* Enqueue newly allocated tcpdata psh info elements to the pool */
+ for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++)
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]);
+
+ ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL);
+ tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool;
+
+ return BCME_OK;
+}
+
+static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp,
+ tcpack_sup_module_t *tcpack_sup_mod)
+{
+ uint i;
+ tdata_psh_info_t *tdata_psh_info;
+
+ DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+ if (tcpack_sup_mod == NULL) {
+ DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n",
+ __FUNCTION__, __LINE__));
+ return;
+ }
+
+ for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+ tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+ /* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */
+ while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+ tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+ }
+ tcpdata_info->tdata_psh_info_tail = NULL;
+ }
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+ i = 0;
+ /* Be sure we recollected all tdata_psh_info elements */
+ while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) {
+ tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+ i++;
+ }
+ ASSERT(i == TCPDATA_PSH_INFO_MAXNUM);
+ MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool,
+ sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+ return;
+}
+#endif /* BCMSDIO */
+
+#ifdef BCMPCIE
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+static void dhd_tcpack_send(ulong data)
+#else
+static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer)
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+{
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *cur_tbl;
+ dhd_pub_t *dhdp;
+ int ifidx;
+ void* pkt;
+ unsigned long flags;
+
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ cur_tbl = (tcpack_info_t *)data;
+#else
+ cur_tbl = container_of(timer, tcpack_info_t, timer.timer);
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+
+ if (!cur_tbl) {
+ goto done;
+ }
+
+ dhdp = cur_tbl->dhdp;
+ if (!dhdp) {
+ goto done;
+ }
+
+ flags = dhd_os_tcpacklock(dhdp);
+
+ if (unlikely(dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD)) {
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto done;
+ }
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+ __FUNCTION__, __LINE__));
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto done;
+ }
+ pkt = cur_tbl->pkt_in_q;
+ ifidx = cur_tbl->ifidx;
+ if (!pkt) {
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto done;
+ }
+ cur_tbl->pkt_in_q = NULL;
+ cur_tbl->pkt_ether_hdr = NULL;
+ cur_tbl->ifidx = 0;
+ cur_tbl->supp_cnt = 0;
+ if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+ DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+ }
+
+ dhd_os_tcpackunlock(dhdp, flags);
+
+ dhd_sendpkt(dhdp, ifidx, pkt);
+
+done:
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ return;
+#else
+ return HRTIMER_NORESTART;
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+}
+#endif /* BCMPCIE */
+
+int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
+{
+ int ret = BCME_OK;
+ unsigned long flags;
+ tcpack_sup_module_t *tcpack_sup_module;
+ uint8 invalid_mode = FALSE;
+ int prev_mode;
+ int i = 0;
+
+ flags = dhd_os_tcpacklock(dhdp);
+ tcpack_sup_module = dhdp->tcpack_sup_module;
+ prev_mode = dhdp->tcpack_sup_mode;
+
+ if (prev_mode == mode) {
+ DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
+ goto exit;
+ }
+
+ invalid_mode |= (mode >= TCPACK_SUP_LAST_MODE);
+#ifdef BCMSDIO
+ invalid_mode |= (mode == TCPACK_SUP_HOLD);
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+ invalid_mode |= ((mode == TCPACK_SUP_REPLACE) || (mode == TCPACK_SUP_DELAYTX));
+#endif /* BCMPCIE */
+
+ if (invalid_mode) {
+ DHD_ERROR(("%s %d: Invalid TCP ACK Suppress mode %d\n",
+ __FUNCTION__, __LINE__, mode));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ printf("%s: TCP ACK Suppress mode %d -> mode %d\n",
+ __FUNCTION__, dhdp->tcpack_sup_mode, mode);
+ printf("%s: TCPACK_INFO_MAXNUM=%d, TCPDATA_INFO_MAXNUM=%d\n",
+ __FUNCTION__, TCPACK_INFO_MAXNUM, TCPDATA_INFO_MAXNUM);
+
+ /* Pre-process routines to change a new mode as per previous mode */
+ switch (prev_mode) {
+ case TCPACK_SUP_OFF:
+ if (tcpack_sup_module == NULL) {
+ tcpack_sup_module = MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t));
+ if (tcpack_sup_module == NULL) {
+ DHD_ERROR(("%s[%d]: Failed to allocate the new memory for "
+ "tcpack_sup_module\n", __FUNCTION__, __LINE__));
+ dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ dhdp->tcpack_sup_module = tcpack_sup_module;
+ }
+ bzero(tcpack_sup_module, sizeof(tcpack_sup_module_t));
+ break;
+#ifdef BCMSDIO
+ case TCPACK_SUP_DELAYTX:
+ if (tcpack_sup_module) {
+ /* We won't need tdata_psh_info pool and
+ * tcpddata_info_tbl anymore
+ */
+ _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_module);
+ tcpack_sup_module->tcpdata_info_cnt = 0;
+ bzero(tcpack_sup_module->tcpdata_info_tbl,
+ sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM);
+ }
+
+ /* For half duplex bus interface, tx precedes rx by default */
+ if (dhdp->bus) {
+ dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+ }
+
+ if (tcpack_sup_module == NULL) {
+ DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n",
+ __FUNCTION__, __LINE__));
+ dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+ goto exit;
+ }
+ break;
+#endif /* BCMSDIO */
+ }
+
+ /* Update a new mode */
+ dhdp->tcpack_sup_mode = mode;
+
+ /* Process for a new mode */
+ switch (mode) {
+ case TCPACK_SUP_OFF:
+ ASSERT(tcpack_sup_module != NULL);
+ /* Clean up timer/data structure for
+ * any remaining/pending packet or timer.
+ */
+ if (tcpack_sup_module) {
+ /* Check if previous mode is TCAPACK_SUP_HOLD */
+ if (prev_mode == TCPACK_SUP_HOLD) {
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ tcpack_info_t *tcpack_info_tbl =
+ &tcpack_sup_module->tcpack_info_tbl[i];
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ del_timer(&tcpack_info_tbl->timer);
+#else
+ hrtimer_cancel(&tcpack_info_tbl->timer.timer);
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ if (tcpack_info_tbl->pkt_in_q) {
+ PKTFREE(dhdp->osh,
+ tcpack_info_tbl->pkt_in_q, TRUE);
+ tcpack_info_tbl->pkt_in_q = NULL;
+ }
+ }
+ }
+ MFREE(dhdp->osh, tcpack_sup_module, sizeof(tcpack_sup_module_t));
+ dhdp->tcpack_sup_module = NULL;
+ } else {
+ DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n",
+ __FUNCTION__, __LINE__));
+ }
+ break;
+#ifdef BCMSDIO
+ case TCPACK_SUP_REPLACE:
+ /* There is nothing to configure for this mode */
+ break;
+ case TCPACK_SUP_DELAYTX:
+ ret = _tdata_psh_info_pool_init(dhdp, tcpack_sup_module);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s %d: pool init fail with %d\n",
+ __FUNCTION__, __LINE__, ret));
+ break;
+ }
+ if (dhdp->bus) {
+ dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
+ }
+ break;
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+ case TCPACK_SUP_HOLD:
+ dhdp->tcpack_sup_ratio = dhdp->conf->tcpack_sup_ratio;
+ dhdp->tcpack_sup_delay = dhdp->conf->tcpack_sup_delay;
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ tcpack_info_t *tcpack_info_tbl =
+ &tcpack_sup_module->tcpack_info_tbl[i];
+ tcpack_info_tbl->dhdp = dhdp;
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ init_timer_compat(&tcpack_info_tbl->timer, dhd_tcpack_send,
+ tcpack_info_tbl);
+#else
+ tasklet_hrtimer_init(&tcpack_info_tbl->timer,
+ dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ }
+ break;
+#endif /* BCMPCIE */
+ }
+
+exit:
+ dhd_os_tcpackunlock(dhdp, flags);
+ return ret;
+}
+
+void
+dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
+{
+ tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+ int i;
+ unsigned long flags;
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+ goto exit;
+
+ flags = dhd_os_tcpacklock(dhdp);
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+ __FUNCTION__, __LINE__));
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) {
+ PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q,
+ TRUE);
+ tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL;
+ tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL;
+ tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0;
+ tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0;
+ }
+ }
+ } else {
+ tcpack_sup_mod->tcpack_info_cnt = 0;
+ bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+ }
+
+ dhd_os_tcpackunlock(dhdp, flags);
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer);
+#else
+ hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer);
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ }
+ }
+
+exit:
+ return;
+}
+
+inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 i;
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *tcpack_info_tbl;
+ int tbl_cnt;
+ int ret = BCME_OK;
+ void *pdata;
+ uint32 pktlen;
+ unsigned long flags;
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+ goto exit;
+
+ pdata = PKTDATA(dhdp->osh, pkt);
+ pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata);
+
+ if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) {
+ DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+ __FUNCTION__, __LINE__, pktlen));
+ goto exit;
+ }
+
+ flags = dhd_os_tcpacklock(dhdp);
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+ tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
+ tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+ ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM);
+
+ for (i = 0; i < tbl_cnt; i++) {
+ if (tcpack_info_tbl[i].pkt_in_q == pkt) {
+ DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n",
+ __FUNCTION__, __LINE__, pkt, i, tbl_cnt));
+ /* This pkt is being transmitted so remove the tcp_ack_info of it. */
+ if (i < tbl_cnt - 1) {
+ bcopy(&tcpack_info_tbl[tbl_cnt - 1],
+ &tcpack_info_tbl[i], sizeof(tcpack_info_t));
+ }
+ bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t));
+ if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+ DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ }
+ dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+ return ret;
+}
+
+static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr,
+ uint8 *tcp_hdr, uint32 tcp_ack_num)
+{
+ tcpack_sup_module_t *tcpack_sup_mod;
+ int i;
+ tcpdata_info_t *tcpdata_info = NULL;
+ tdata_psh_info_t *tdata_psh_info = NULL;
+ bool ret = FALSE;
+
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+ goto exit;
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+ tcp_ack_num));
+
+ for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+ tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+ DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)),
+ IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)),
+ ntoh16_ua(tcpdata_info_tmp->tcp_port.src),
+ ntoh16_ua(tcpdata_info_tmp->tcp_port.dst)));
+
+ /* If either IP address or TCP port number does not match, skip. */
+ if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+ tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 &&
+ memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
+ tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 &&
+ memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+ tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 &&
+ memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
+ tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) {
+ tcpdata_info = tcpdata_info_tmp;
+ break;
+ }
+ }
+
+ if (tcpdata_info == NULL) {
+ DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ if (tcpdata_info->tdata_psh_info_head == NULL) {
+ DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__));
+ }
+
+ while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+ if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) {
+ DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n",
+ __FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq));
+ tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+ ret = TRUE;
+ } else
+ break;
+ }
+ if (tdata_psh_info == NULL)
+ tcpdata_info->tdata_psh_info_tail = NULL;
+
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+exit:
+ return ret;
+}
+
+bool
+dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *new_ether_hdr; /* Ethernet header of the new packet */
+ uint16 new_ether_type; /* Ethernet type of the new packet */
+ uint8 *new_ip_hdr; /* IP header of the new packet */
+ uint8 *new_tcp_hdr; /* TCP header of the new packet */
+ uint32 new_ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */
+ uint16 new_ip_total_len; /* Total length of IP packet for the new packet */
+ uint32 new_tcp_hdr_len; /* TCP header length of the new packet */
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *tcpack_info_tbl;
+ int i;
+ bool ret = FALSE;
+ bool set_dotxinrx = TRUE;
+ unsigned long flags;
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+ goto exit;
+
+ new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+ DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+ __FUNCTION__, __LINE__, cur_framelen));
+ goto exit;
+ }
+
+ new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+ if (new_ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, new_ether_type));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+ new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+ new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+ if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+ goto exit;
+ }
+
+ new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+ cur_framelen -= new_ip_hdr_len;
+
+ ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+ DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+ /* is it an ack ? Allow only ACK flag, not to suppress others. */
+ if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+ DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+ __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+ goto exit;
+ }
+
+ new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+ new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ /* This packet has TCP data, so just send */
+ if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+ DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+ new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+ __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+ flags = dhd_os_tcpacklock(dhdp);
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ counter_printlog(&tack_tbl);
+ tack_tbl.cnt[0]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+ tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+
+ if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) {
+ /* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[5]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ } else
+ set_dotxinrx = FALSE;
+
+ for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) {
+ void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */
+ uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+ uint32 old_ip_hdr_len, old_tcp_hdr_len;
+ uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */
+
+ if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+ DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+ break;
+ }
+
+ if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+ DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+ break;
+ }
+
+ old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+ old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+ old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+ old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+ old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* If either of IP address or TCP port number does not match, skip.
+ * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+ * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+ */
+ if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+ &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+ memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+ &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2))
+ continue;
+
+ old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) {
+ /* New packet has higher TCP ACK number, so it replaces the old packet */
+ if (new_ip_hdr_len == old_ip_hdr_len &&
+ new_tcp_hdr_len == old_tcp_hdr_len) {
+ ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0);
+ bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len);
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n",
+ __FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num));
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[2]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ ret = TRUE;
+ } else {
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[6]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d"
+ " ACK %u -> %u\n", __FUNCTION__, __LINE__,
+ new_ip_hdr_len, old_ip_hdr_len,
+ new_tcp_hdr_len, old_tcp_hdr_len,
+ old_tcpack_num, new_tcp_ack_num));
+ }
+ } else if (new_tcp_ack_num == old_tcpack_num) {
+ set_dotxinrx = TRUE;
+ /* TCPACK retransmission */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[3]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ } else {
+ DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n",
+ __FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
+ new_tcp_ack_num, pkt));
+ }
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+
+ if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) {
+ /* No TCPACK packet with the same IP addr and TCP port is found
+ * in tcp_ack_info_tbl. So add this packet to the table.
+ */
+ DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+ __FUNCTION__, __LINE__, pkt, new_ether_hdr,
+ tcpack_sup_mod->tcpack_info_cnt));
+
+ tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt;
+ tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr;
+ tcpack_sup_mod->tcpack_info_cnt++;
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[1]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ } else {
+ ASSERT(i == tcpack_sup_mod->tcpack_info_cnt);
+ DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+ __FUNCTION__, __LINE__));
+ }
+ dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+ /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx)
+ dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+
+ return ret;
+}
+
+bool
+dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *ether_hdr; /* Ethernet header of the new packet */
+ uint16 ether_type; /* Ethernet type of the new packet */
+ uint8 *ip_hdr; /* IP header of the new packet */
+ uint8 *tcp_hdr; /* TCP header of the new packet */
+ uint32 ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint16 ip_total_len; /* Total length of IP packet for the new packet */
+ uint32 tcp_hdr_len; /* TCP header length of the new packet */
+ uint32 tcp_seq_num; /* TCP sequence number of the new packet */
+ uint16 tcp_data_len; /* TCP DATA length that excludes IP and TCP headers */
+ uint32 end_tcp_seq_num; /* TCP seq number of the last byte in the new packet */
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpdata_info_t *tcpdata_info = NULL;
+ tdata_psh_info_t *tdata_psh_info;
+
+ int i;
+ bool ret = FALSE;
+ unsigned long flags;
+
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+ goto exit;
+
+ ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+ if (ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, ether_type));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type));
+
+ ip_hdr = ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+ ip_hdr_len = IPV4_HLEN(ip_hdr);
+ if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+ goto exit;
+ }
+
+ tcp_hdr = ip_hdr + ip_hdr_len;
+ cur_framelen -= ip_hdr_len;
+
+ ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+ DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+ ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]);
+ tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]);
+
+ /* This packet is mere TCP ACK, so do nothing */
+ if (ip_total_len == ip_hdr_len + tcp_hdr_len) {
+ DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len);
+
+ if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) {
+ DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n",
+ __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+ tcp_hdr[TCP_FLAGS_OFFSET]));
+
+ flags = dhd_os_tcpacklock(dhdp);
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+
+ /* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */
+ i = 0;
+ while (i < tcpack_sup_mod->tcpdata_info_cnt) {
+ tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+ uint32 now_in_ms = OSL_SYSUPTIME();
+ DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)),
+ IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)),
+ ntoh16_ua(tdata_info_tmp->tcp_port.src),
+ ntoh16_ua(tdata_info_tmp->tcp_port.dst)));
+
+ /* If both IP address and TCP port number match, we found it so break.
+ * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+ * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+ */
+ if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+ (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+ memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+ (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) {
+ tcpdata_info = tdata_info_tmp;
+ tcpdata_info->last_used_time = now_in_ms;
+ break;
+ }
+
+ if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) {
+ tdata_psh_info_t *tdata_psh_info_tmp;
+ tcpdata_info_t *last_tdata_info;
+
+ while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) {
+ tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next;
+ tdata_psh_info_tmp->next = NULL;
+ DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n",
+ __FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq));
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp);
+ }
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+ tcpack_sup_mod->tcpdata_info_cnt--;
+ ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0);
+
+ last_tdata_info =
+ &tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt];
+ if (i < tcpack_sup_mod->tcpdata_info_cnt) {
+ ASSERT(last_tdata_info != tdata_info_tmp);
+ bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
+ }
+ bzero(last_tdata_info, sizeof(tcpdata_info_t));
+ DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
+ /* Don't increase "i" here, so that the prev last tcpdata_info is checked */
+ } else
+ i++;
+ }
+
+ tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]);
+ tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len;
+ end_tcp_seq_num = tcp_seq_num + tcp_data_len;
+
+ if (tcpdata_info == NULL) {
+ ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt);
+ if (i >= TCPDATA_INFO_MAXNUM) {
+ DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+ tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+
+ /* No TCP flow with the same IP addr and TCP port is found
+ * in tcp_data_info_tbl. So add this flow to the table.
+ */
+ DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+ /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+ * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+ */
+ bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr,
+ IPV4_ADDR_LEN * 2);
+ bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port,
+ TCP_PORT_LEN * 2);
+
+ tcpdata_info->last_used_time = OSL_SYSUPTIME();
+ tcpack_sup_mod->tcpdata_info_cnt++;
+ }
+
+ ASSERT(tcpdata_info != NULL);
+
+ tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod);
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+ if (tdata_psh_info == NULL) {
+ DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+ tdata_psh_info->end_seq = end_tcp_seq_num;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[4]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+ DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n",
+ __FUNCTION__, __LINE__, tdata_psh_info->end_seq));
+
+ ASSERT(tdata_psh_info->next == NULL);
+
+ if (tcpdata_info->tdata_psh_info_head == NULL)
+ tcpdata_info->tdata_psh_info_head = tdata_psh_info;
+ else {
+ ASSERT(tcpdata_info->tdata_psh_info_tail);
+ tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info;
+ }
+ tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
+
+ dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+ return ret;
+}
+
+bool
+dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ uint8 *new_ether_hdr; /* Ethernet header of the new packet */
+ uint16 new_ether_type; /* Ethernet type of the new packet */
+ uint8 *new_ip_hdr; /* IP header of the new packet */
+ uint8 *new_tcp_hdr; /* TCP header of the new packet */
+ uint32 new_ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */
+ uint16 new_ip_total_len; /* Total length of IP packet for the new packet */
+ uint32 new_tcp_hdr_len; /* TCP header length of the new packet */
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *tcpack_info_tbl;
+ int i, free_slot = TCPACK_INFO_MAXNUM;
+ bool hold = FALSE;
+ unsigned long flags;
+
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+ goto exit;
+ }
+
+ if (dhdp->tcpack_sup_ratio == 1) {
+ goto exit;
+ }
+
+ new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+ DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+ __FUNCTION__, __LINE__, cur_framelen));
+ goto exit;
+ }
+
+ new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+ if (new_ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, new_ether_type));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+ new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+ new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+ if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+ goto exit;
+ }
+
+ new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+ cur_framelen -= new_ip_hdr_len;
+
+ ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+ DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+ /* is it an ack ? Allow only ACK flag, not to suppress others. */
+ if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+ DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+ __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+ goto exit;
+ }
+
+ new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+ new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ /* This packet has TCP data, so just send */
+ if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+ DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+ new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+ __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+ flags = dhd_os_tcpacklock(dhdp);
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+ tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+
+ hold = TRUE;
+
+ for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+ void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */
+ uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+ uint32 old_ip_hdr_len;
+ uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */
+
+ if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+ if (free_slot == TCPACK_INFO_MAXNUM) {
+ free_slot = i;
+ }
+ continue;
+ }
+
+ if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+ DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n",
+ __FUNCTION__, __LINE__, i));
+ hold = FALSE;
+ dhd_os_tcpackunlock(dhdp, flags);
+ goto exit;
+ }
+
+ old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+ old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+ old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+ old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+
+ DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* If either of IP address or TCP port number does not match, skip. */
+ if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+ &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+ memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+ &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) {
+ continue;
+ }
+
+ old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) {
+ tcpack_info_tbl[i].supp_cnt++;
+ if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) {
+ tcpack_info_tbl[i].pkt_in_q = NULL;
+ tcpack_info_tbl[i].pkt_ether_hdr = NULL;
+ tcpack_info_tbl[i].ifidx = 0;
+ tcpack_info_tbl[i].supp_cnt = 0;
+ hold = FALSE;
+ } else {
+ tcpack_info_tbl[i].pkt_in_q = pkt;
+ tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr;
+ tcpack_info_tbl[i].ifidx = ifidx;
+ }
+ PKTFREE(dhdp->osh, oldpkt, TRUE);
+ } else {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
+ dhd_os_tcpackunlock(dhdp, flags);
+
+ if (!hold) {
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ del_timer_sync(&tcpack_info_tbl[i].timer);
+#else
+ hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer);
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ }
+ goto exit;
+ }
+
+ if (free_slot < TCPACK_INFO_MAXNUM) {
+ /* No TCPACK packet with the same IP addr and TCP port is found
+ * in tcp_ack_info_tbl. So add this packet to the table.
+ */
+ DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+ __FUNCTION__, __LINE__, pkt, new_ether_hdr,
+ free_slot));
+
+ tcpack_info_tbl[free_slot].pkt_in_q = pkt;
+ tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr;
+ tcpack_info_tbl[free_slot].ifidx = ifidx;
+ tcpack_info_tbl[free_slot].supp_cnt = 1;
+#ifndef TCPACK_SUPPRESS_HOLD_HRT
+ mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
+ jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay));
+#else
+ tasklet_hrtimer_start(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
+ ktime_set(0, dhdp->tcpack_sup_delay*1000000),
+ HRTIMER_MODE_REL);
+#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ tcpack_sup_mod->tcpack_info_cnt++;
+ } else {
+ DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+ __FUNCTION__, __LINE__));
+ }
+ dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+ return hold;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+tcp_hdr_flag_t
+dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *ether_hdr; /* Ethernet header of the new packet */
+ uint16 ether_type; /* Ethernet type of the new packet */
+ uint8 *ip_hdr; /* IP header of the new packet */
+ uint8 *tcp_hdr; /* TCP header of the new packet */
+ uint32 ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint8 flags;
+
+ ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+ if (ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, ether_type));
+ return FLAG_OTHERS;
+ }
+
+ ip_hdr = ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ if (cur_framelen < IPV4_MIN_HEADER_LEN) {
+ return FLAG_OTHERS;
+ }
+
+ ip_hdr_len = IPV4_HLEN(ip_hdr);
+ if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+ return FLAG_OTHERS;
+ }
+
+ tcp_hdr = ip_hdr + ip_hdr_len;
+
+ flags = (uint8)tcp_hdr[TCP_FLAGS_OFFSET];
+
+ if (flags & TCP_FLAG_SYN) {
+ if (flags & TCP_FLAG_ACK) {
+ return FLAG_SYNCACK;
+ }
+ return FLAG_SYNC;
+ }
+ return FLAG_OTHERS;
+}
+#endif /* DHDTCPSYNC_FLOOD_BLK */
diff --git a/bcmdhd.101.10.361.x/dhd_ip.h b/bcmdhd.101.10.361.x/dhd_ip.h
new file mode 100755
index 0000000..a478c89
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_ip.h
@@ -0,0 +1,96 @@
+/*
+ * Header file describing the common ip parser function.
+ *
+ * Provides type definitions and function prototypes used to parse ip packet.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dhd_ip_h_
+#define _dhd_ip_h_
+
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
+
+typedef enum pkt_frag
+{
+ DHD_PKT_FRAG_NONE = 0,
+ DHD_PKT_FRAG_FIRST,
+ DHD_PKT_FRAG_CONT,
+ DHD_PKT_FRAG_LAST
+} pkt_frag_t;
+
+extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+typedef enum tcp_hdr_flags {
+ FLAG_SYNC,
+ FLAG_SYNCACK,
+ FLAG_RST,
+ FLAG_OTHERS
+} tcp_hdr_flag_t;
+
+extern tcp_hdr_flag_t dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef DHDTCPACK_SUPPRESS
+#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
+/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
+#define TCPACKSZMAX (TCPACKSZMIN + 100)
+
+/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */
+#ifndef TCPACK_INFO_MAXNUM
+#define TCPACK_INFO_MAXNUM 4
+#endif
+#ifndef TCPDATA_INFO_MAXNUM
+#define TCPDATA_INFO_MAXNUM 4
+#endif
+#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM)
+
+#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */
+
+#define DEFAULT_TCPACK_SUPP_RATIO 3
+#ifndef CUSTOM_TCPACK_SUPP_RATIO
+#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO
+#endif /* CUSTOM_TCPACK_SUPP_RATIO */
+
+#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */
+#ifndef CUSTOM_TCPACK_DELAY_TIME
+#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME
+#endif /* CUSTOM_TCPACK_DELAY_TIME */
+
+extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
+extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
+extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx);
+/* #define DHDTCPACK_SUP_DBG */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+extern counter_tbl_t tack_tbl;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#endif /* _dhd_ip_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_linux.c b/bcmdhd.101.10.361.x/dhd_linux.c
new file mode 100755
index 0000000..f2ab283
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux.c
@@ -0,0 +1,29878 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface.
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE)
+#include <bcmmsgbuf.h>
+#endif /* PCIE_FULL_DONGLE */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#if defined(CONFIG_TIZEN)
+#include <linux/net_stat_tizen.h>
+#endif /* CONFIG_TIZEN */
+#include <net/addrconf.h>
+#ifdef ENABLE_ADAPTIVE_SCHED
+#include <linux/cpufreq.h>
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#include <linux/rtc.h>
+#include <linux/namei.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <dhd_linux_priv.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
+#include <bcmiov.h>
+#include <bcmstdlib_s.h>
+
+#include <ethernet.h>
+#include <bcmevent.h>
+#include <vlan.h>
+#include <802.3.h>
+
+#ifdef WL_NANHO
+#include <nanho.h>
+#endif /* WL_NANHO */
+#include <dhd_linux_wq.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
+#ifdef DHD_WET
+#include <dhd_wet.h>
+#endif /* DHD_WET */
+#ifdef PCIE_FULL_DONGLE
+#include <dhd_flowring.h>
+#endif
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_config.h>
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
+#include <dhd_dbg.h>
+#include <dhd_dbg_ring.h>
+#include <dhd_debug.h>
+#if defined(WL_CFG80211)
+#include <wl_cfg80211.h>
+#ifdef WL_BAM
+#include <wl_bam.h>
+#endif /* WL_BAM */
+#endif /* WL_CFG80211 */
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#include <linux/ip.h>
+#include <net/icmp.h>
+#endif /* DHD_TIMESYNC */
+
+#include <dhd_linux_sock_qos.h>
+
+#ifdef CSI_SUPPORT
+#include <dhd_csi.h>
+#endif /* CSI_SUPPORT */
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef CONFIG_ARCH_EXYNOS
+#ifndef SUPPORT_EXYNOS7420
+#include <linux/exynos-pci-ctrl.h>
+#endif /* SUPPORT_EXYNOS7420 */
+#endif /* CONFIG_ARCH_EXYNOS */
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef DHD_L2_FILTER
+#include <bcmicmp.h>
+#include <bcm_l2_filter.h>
+#include <dhd_l2_filter.h>
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_PSTA
+#include <dhd_psta.h>
+#endif /* DHD_PSTA */
+
+#ifdef AMPDU_VO_ENABLE
+/* XXX: Enabling VO AMPDU to reduce FER */
+#include <802.1d.h>
+#endif /* AMPDU_VO_ENABLE */
+
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
+#include <dhd_daemon.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+#include <eapol.h>
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_DEBUG_PAGEALLOC
+typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
+void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
+extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+#ifdef ENABLE_DHD_GRO
+#include <net/sch_generic.h>
+#endif /* ENABLE_DHD_GRO */
+
+#define IP_PROT_RESERVED 0xFF
+
+#ifdef DHD_MQ
+#define MQ_MAX_QUEUES AC_COUNT
+#define MQ_MAX_CPUS 16
+int enable_mq = TRUE;
+module_param(enable_mq, int, 0644);
+int mq_select_disable = FALSE;
+#endif
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+#include <dhd_fwtrace.h>
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#if defined(DHD_LB)
+#if !defined(PCIE_FULL_DONGLE)
+#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
+#endif /* !PCIE_FULL_DONGLE */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP) || defined(DHD_LB_STATS)
+#if !defined(DHD_LB)
+#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
+#endif /* !DHD_LB */
+#endif /* DHD_LB_RXP || DHD_LB_TXP || DHD_LB_STATS */
+
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+static void dhd_m4_state_handler(struct work_struct * work);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
+static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
+
+#ifdef FIX_CPU_MIN_CLOCK
+#include <linux/pm_qos.h>
+#endif /* FIX_CPU_MIN_CLOCK */
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
+#ifndef CUSTOM_CPUFREQ_THRESH
+#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
+#endif /* CUSTOM_CPUFREQ_THRESH */
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#if defined(OEM_ANDROID)
+#include <wl_android.h>
+#endif
+
+/* Maximum STA per radio */
+#if defined(BCM_ROUTER_DHD)
+#define DHD_MAX_STA 128
+#else
+#define DHD_MAX_STA 32
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef CUSTOMER_HW_AMLOGIC
+#include <linux/amlogic/wifi_dt.h>
+#endif
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+#include <ctf/hndctf.h>
+
+#ifdef CTFPOOL
+#define RXBUFPOOLSZ 2048
+#define RXBUFSZ DHD_FLOWRING_RX_BUFPOST_PKTSZ /* packet data buffer size */
+#endif /* CTFPOOL */
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#ifdef BCMDBG
+#include <dhd_macdbg.h>
+#endif /* BCMDBG */
+
+#ifdef DHD_EVENT_LOG_FILTER
+#include <dhd_event_log_filter.h>
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+static void dhd_blk_tsfl_handler(struct work_struct * work);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef WL_NATOE
+#include <dhd_linux_nfct.h>
+#endif /* WL_NATOE */
+
+#ifdef DHD_TX_PROFILE
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <bcmudp.h>
+#include <bcmproto.h>
+#endif /* defined(DHD_TX_PROFILE) */
+
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
+static uint dhd_use_tcp_window_size_adjust = FALSE;
+static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
+#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
+#endif
+static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+#if defined(BCM_ROUTER_DHD)
+/*
+ * Queue budget: Minimum number of packets that a queue must be allowed to hold
+ * to prevent starvation.
+ */
+#define DHD_QUEUE_BUDGET_DEFAULT (256)
+int dhd_queue_budget = DHD_QUEUE_BUDGET_DEFAULT;
+
+module_param(dhd_queue_budget, int, 0);
+
+/*
+ * Per station pkt threshold: Sum total of all packets in the backup queues of
+ * flowrings belonging to the station, not including packets already admitted
+ * to flowrings.
+ */
+#define DHD_STA_THRESHOLD_DEFAULT (2048)
+int dhd_sta_threshold = DHD_STA_THRESHOLD_DEFAULT;
+module_param(dhd_sta_threshold, int, 0);
+
+/*
+ * Per interface pkt threshold: Sum total of all packets in the backup queues of
+ * flowrings belonging to the interface, not including packets already admitted
+ * to flowrings.
+ */
+#define DHD_IF_THRESHOLD_DEFAULT (2048 * 32)
+int dhd_if_threshold = DHD_IF_THRESHOLD_DEFAULT;
+module_param(dhd_if_threshold, int, 0);
+#endif /* BCM_ROUTER_DHD */
+
+/* XXX: where does this belong? */
+/* XXX: this needs to reviewed for host OS. */
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static struct notifier_block dhd_inetaddr_notifier = {
+ .notifier_call = dhd_inetaddr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in the kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inetaddr_notifier_registered = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+int dhd_inet6addr_notifier_call(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static struct notifier_block dhd_inet6addr_notifier = {
+ .notifier_call = dhd_inet6addr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inet6addr_notifier_registered = FALSE;
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+
+#if defined (CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+volatile bool dhd_mmc_wake = FALSE;
+long long temp_raw;
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+#endif /* defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#if defined(OEM_ANDROID)
+static void dhd_hang_process(struct work_struct *work_data);
+#endif /* OEM_ANDROID */
+MODULE_LICENSE("GPL and additional rights");
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+DEFINE_MUTEX(_dhd_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force);
+
+#include <dhd_bus.h>
+
+/* XXX Set up an MTU change notifier per linux/notifier.h? */
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+
+#ifdef PROP_TXSTATUS
+extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
+extern void dhd_wlfc_plat_init(void *dhd);
+extern void dhd_wlfc_plat_deinit(void *dhd);
+#endif /* PROP_TXSTATUS */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+extern uint sd_f2_blocksize;
+extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef CONFIG_PARTIALSUSPEND_SLP
+/* XXX SLP use defferent earlysuspend header file and some functions
+ * But most of meaning is same as Android
+ */
+#include <linux/partialsuspend_slp.h>
+#define CONFIG_HAS_EARLYSUSPEND
+#define DHD_USE_EARLYSUSPEND
+#define register_early_suspend register_pre_suspend
+#define unregister_early_suspend unregister_pre_suspend
+#define early_suspend pre_suspend
+#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
+#else
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+#endif /* CONFIG_PARTIALSUSPEND_SLP */
+
+#ifdef CONFIG_IRQ_HISTORY
+#include <linux/power/irq_history.h>
+#endif /* CONFIG_IRQ_HISTORY */
+
+#if defined(OEM_ANDROID)
+#include <linux/nl80211.h>
+#endif /* OEM_ANDROID */
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
+ u8* program, uint32 program_len);
+static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
+ uint32 mode, uint32 enable);
+static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+#ifdef DHD_FW_COREDUMP
+static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef DHD_LOG_DUMP
+
+struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
+
+/* Only header for log dump buffers is stored in array
+ * header for sections like 'dhd dump', 'ext trap'
+ * etc, is not in the array, because they are not log
+ * ring buffers
+ */
+dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
+ {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
+ {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
+ {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
+};
+static int dld_buf_size[DLD_BUFFER_NUM] = {
+ LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
+ LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
+ LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
+};
+
+static void dhd_log_dump_init(dhd_pub_t *dhd);
+static void dhd_log_dump_deinit(dhd_pub_t *dhd);
+static void dhd_log_dump(void *handle, void *event_info, u8 event);
+static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
+static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
+static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
+char *dhd_dbg_get_system_timestamp(void);
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef DHD_DEBUG_UART
+#include <linux/kmod.h>
+#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
+static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
+static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
+#endif /* DHD_DEBUG_UART */
+
+static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
+static struct notifier_block dhd_reboot_notifier = {
+ .notifier_call = dhd_reboot_callback,
+ .priority = 1,
+};
+
+#ifdef OEM_ANDROID
+#ifdef BCMPCIE
+static int is_reboot = 0;
+#endif /* BCMPCIE */
+#endif /* OEM_ANDROID */
+
+dhd_pub_t *g_dhd_pub = NULL;
+
+#if defined(BT_OVER_SDIO)
+#include "dhd_bt_interface.h"
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef WL_NANHO
+static int dhd_nho_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf);
+static int dhd_nho_ioctl_cb(void *drv_ctx, int ifidx, wl_ioctl_t *ioc, bool drv_lock);
+static int dhd_nho_evt_cb(void *drv_ctx, int ifidx, bcm_event_t *evt, uint16 evt_len);
+#endif /* WL_NANHO */
+
+#ifdef WL_STATIC_IF
+bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
+#endif /* WL_STATIC_IF */
+
+atomic_t exit_in_progress = ATOMIC_INIT(0);
+
+static void dhd_process_daemon_msg(struct sk_buff *skb);
+static void dhd_destroy_to_notifier_skt(void);
+static int dhd_create_to_notifier_skt(void);
+static struct sock *nl_to_event_sk = NULL;
+int sender_pid = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+struct netlink_kernel_cfg dhd_netlink_cfg = {
+ .groups = 1,
+ .input = dhd_process_daemon_msg,
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+
+#ifdef DHD_PKTTS
+static int dhd_create_to_notifier_ts(void);
+static void dhd_destroy_to_notifier_ts(void);
+
+static struct sock *nl_to_ts = NULL;
+int sender_pid_ts = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static void dhd_recv_msg_from_ts(struct sk_buff *skb);
+
+struct netlink_kernel_cfg dhd_netlink_ts = {
+ .groups = 1,
+ .input = dhd_recv_msg_from_ts,
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+
+#define GET_METADATA_VER(val) ((uint16)((val & 0xffff0000) >> 16))
+#define GET_METADATA_BUFLEN(val) ((uint16)(val & 0x0000ffff))
+#endif /* DHD_PKTTS */
+
+#if defined(BT_OVER_SDIO)
+/* Flag to indicate if driver is initialized */
+uint dhd_driver_init_done = TRUE;
+#else
+/* Flag to indicate if driver is initialized */
+uint dhd_driver_init_done = FALSE;
+#endif
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+char clm_path[MOD_PARAM_PATHLEN];
+char config_path[MOD_PARAM_PATHLEN];
+#ifdef DHD_UCODE_DOWNLOAD
+char ucode_path[MOD_PARAM_PATHLEN];
+#endif /* DHD_UCODE_DOWNLOAD */
+
+module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
+
+/* backup buffer for firmware and nvram path */
+char fw_bak_path[MOD_PARAM_PATHLEN];
+char nv_bak_path[MOD_PARAM_PATHLEN];
+
+/* information string to keep firmware, chio, cheip version info visiable from log */
+char info_string[MOD_PARAM_INFOLEN];
+module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
+int op_mode = 0;
+int disable_proptx = 0;
+module_param(op_mode, int, 0644);
+#if defined(OEM_ANDROID)
+extern int wl_control_wl_start(struct net_device *dev);
+#if defined(BCMLXSDMMC) || defined(BCMDBUS)
+struct semaphore dhd_registration_sem;
+#endif /* BCMXSDMMC */
+#endif /* defined(OEM_ANDROID) */
+void dhd_generate_rand_mac_addr(struct ether_addr *ea_addr);
+
+#ifdef DHD_LOG_DUMP
+int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
+module_param(logdump_max_filesize, int, 0644);
+int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
+module_param(logdump_max_bufsize, int, 0644);
+int logdump_periodic_flush = FALSE;
+module_param(logdump_periodic_flush, int, 0644);
+#ifdef EWP_ECNTRS_LOGGING
+int logdump_ecntr_enable = TRUE;
+#else
+int logdump_ecntr_enable = FALSE;
+#endif /* EWP_ECNTRS_LOGGING */
+module_param(logdump_ecntr_enable, int, 0644);
+#ifdef EWP_RTT_LOGGING
+int logdump_rtt_enable = TRUE;
+#else
+int logdump_rtt_enable = FALSE;
+#endif /* EWP_RTT_LOGGING */
+int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
+#endif /* DHD_LOG_DUMP */
+
+#ifdef EWP_EDL
+int host_edl_support = TRUE;
+module_param(host_edl_support, int, 0644);
+#endif
+
+/* deferred handlers */
+static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
+#ifdef BCM_ROUTER_DHD
+static void dhd_inform_dhd_monitor_handler(void *handle, void *event_info, u8 event);
+#endif
+#ifdef WL_NATOE
+static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
+static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
+#endif /* WL_NATOE */
+
+#ifdef DHD_UPDATE_INTF_MAC
+static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
+#endif /* DHD_UPDATE_INTF_MAC */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+#ifdef WL_CFG80211
+extern void dhd_netdev_free(struct net_device *ndev);
+#endif /* WL_CFG80211 */
+static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
+
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
+
+#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
+/* update rx_pkt_chainable state of dhd interface */
+static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
+#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+#if defined(WL_WIRELESS_EXT)
+module_param(iw_msg_level, int, 0);
+#endif
+#ifdef WL_CFG80211
+module_param(wl_dbg_level, int, 0);
+#endif
+module_param(android_msg_level, int, 0);
+module_param(config_msg_level, int, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+/* XXX ARP HOST Auto Reply can cause dongle trap at VSDB situation */
+/* XXX ARP OL SNOOP can be used to more good quility */
+
+#ifdef ENABLE_ARP_SNOOP_MODE
+uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
+ ARP_OL_UPDATE_HOST_CACHE);
+#else
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
+#endif /* ENABLE_ARP_SNOOP_MODE */
+
+module_param(dhd_arp_mode, uint, 0);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* Disable Prop tx */
+module_param(disable_proptx, int, 0644);
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
+#ifdef DHD_UCODE_DOWNLOAD
+module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
+#endif /* DHD_UCODE_DOWNLOAD */
+
+/* wl event forwarding */
+#ifdef WL_EVENT_ENAB
+uint wl_event_enable = true;
+#else
+uint wl_event_enable = false;
+#endif /* WL_EVENT_ENAB */
+module_param(wl_event_enable, uint, 0660);
+
+/* wl event forwarding */
+#ifdef LOGTRACE_PKT_SENDUP
+uint logtrace_pkt_sendup = true;
+#else
+uint logtrace_pkt_sendup = false;
+#endif /* LOGTRACE_PKT_SENDUP */
+module_param(logtrace_pkt_sendup, uint, 0660);
+
+/* Watchdog interval */
+/* extend watchdog expiration to 2 seconds when DPC is running */
+#define WATCHDOG_EXTEND_INTERVAL (2000)
+
+uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
+#endif /* DHD_PCIE_RUNTIMEPMT */
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+#if defined(OEM_ANDROID)
+uint dhd_console_ms = 0; /* XXX andrey by default no fw msg prints */
+#else
+uint dhd_console_ms = 250;
+#endif /* OEM_ANDROID */
+module_param(dhd_console_ms, uint, 0644);
+#else
+uint dhd_console_ms = 0;
+#endif /* DHD_DEBUG */
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+uint dhd_master_mode = FALSE;
+#else
+uint dhd_master_mode = FALSE;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
+module_param(dhd_master_mode, uint, 0);
+
+int dhd_watchdog_prio = 0;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority */
+int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
+module_param(dhd_dpc_prio, int, 0);
+
+/* RX frame thread priority */
+int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
+module_param(dhd_rxf_prio, int, 0);
+
+#if !defined(BCMDBUS)
+extern int dhd_dongle_ramsize;
+module_param(dhd_dongle_ramsize, int, 0);
+#endif /* !BCMDBUS */
+
+#ifdef WL_CFG80211
+int passive_channel_skip = 0;
+module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
+#endif /* WL_CFG80211 */
+static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
+
+#ifdef DHD_MSI_SUPPORT
+uint enable_msi = TRUE;
+module_param(enable_msi, uint, 0);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_SSSR_DUMP
+int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
+module_param(sssr_enab, uint, 0);
+module_param(fis_enab, uint, 0);
+#endif /* DHD_SSSR_DUMP */
+
+/* Keep track of number of instances */
+static int dhd_found = 0;
+static int instance_base = 0; /* Starting instance number */
+module_param(instance_base, int, 0644);
+
+#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+/*
+ * Rx path process budget(dhd_napi_weight) number of packets in one go and hands over
+ * the packets to network stack.
+ *
+ * dhd_dpc tasklet is the producer(packets received from dongle) and dhd_napi_poll()
+ * is the consumer. The maximum number of packets that can be received from the dongle
+ * at any given point of time are D2HRING_RXCMPLT_MAX_ITEM.
+ * Also DHD will always post fresh rx buffers to dongle while processing rx completions.
+ *
+ * The consumer must consume the packets at equal are better rate than the producer.
+ * i.e if dhd_napi_poll() does not process at the same rate as the producer(dhd_dpc),
+ * rx_process_queue depth increases, which can even consume the entire system memory.
+ * Such situation will be tacken care by rx flow control.
+ *
+ * Device drivers are strongly advised to not use bigger value than NAPI_POLL_WEIGHT
+ */
+static int dhd_napi_weight = NAPI_POLL_WEIGHT;
+module_param(dhd_napi_weight, int, 0644);
+#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+
+#ifdef PCIE_FULL_DONGLE
+extern int h2d_max_txpost;
+module_param(h2d_max_txpost, int, 0644);
+
+#if defined(DHD_HTPUT_TUNABLES)
+extern int h2d_htput_max_txpost;
+module_param(h2d_htput_max_txpost, int, 0644);
+#endif /* DHD_HTPUT_TUNABLES */
+
+#ifdef AGG_H2D_DB
+extern bool agg_h2d_db_enab;
+module_param(agg_h2d_db_enab, bool, 0644);
+extern uint agg_h2d_db_timeout;
+module_param(agg_h2d_db_timeout, uint, 0644);
+extern uint agg_h2d_db_inflight_thresh;
+module_param(agg_h2d_db_inflight_thresh, uint, 0644);
+#endif /* AGG_H2D_DB */
+
+extern uint dma_ring_indices;
+module_param(dma_ring_indices, uint, 0644);
+
+extern bool h2d_phase;
+module_param(h2d_phase, bool, 0644);
+extern bool force_trap_bad_h2d_phase;
+module_param(force_trap_bad_h2d_phase, bool, 0644);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef FORCE_TPOWERON
+/*
+ * On Fire's reference platform, coming out of L1.2,
+ * there is a constant delay of 45us between CLKREQ# and stable REFCLK
+ * Due to this delay, with tPowerOn < 50
+ * there is a chance of the refclk sense to trigger on noise.
+ *
+ * 0x29 when written to L1SSControl2 translates to 50us.
+ */
+#define FORCE_TPOWERON_50US 0x29
+uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
+module_param(tpoweron_scale, uint, 0644);
+#endif /* FORCE_TPOWERON */
+
+#ifdef SHOW_LOGTRACE
+#if defined(CUSTOMER_HW4_DEBUG)
+#define WIFI_PATH "/etc/wifi/"
+static char *logstrs_path = VENDOR_PATH WIFI_PATH"logstrs.bin";
+char *st_str_file_path = VENDOR_PATH WIFI_PATH"rtecdc.bin";
+static char *map_file_path = VENDOR_PATH WIFI_PATH"rtecdc.map";
+static char *rom_st_str_file_path = VENDOR_PATH WIFI_PATH"roml.bin";
+static char *rom_map_file_path = VENDOR_PATH WIFI_PATH"roml.map";
+#else
+static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
+char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
+static char *map_file_path = PLATFORM_PATH"rtecdc.map";
+static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
+static char *rom_map_file_path = PLATFORM_PATH"roml.map";
+#endif /* CUSTOMER_HW4_DEBUG */
+
+static char *ram_file_str = "rtecdc";
+static char *rom_file_str = "roml";
+
+module_param(logstrs_path, charp, S_IRUGO);
+module_param(st_str_file_path, charp, S_IRUGO);
+module_param(map_file_path, charp, S_IRUGO);
+module_param(rom_st_str_file_path, charp, S_IRUGO);
+module_param(rom_map_file_path, charp, S_IRUGO);
+
+static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
+static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
+ uint32 *rodata_end);
+static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
+ char *map_file);
+#endif /* SHOW_LOGTRACE */
+
+#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
+static void gdb_proxy_fs_try_create(dhd_info_t *dhd, const char *dev_name);
+static void gdb_proxy_fs_remove(dhd_info_t *dhd);
+#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
+
+#ifdef D2H_MINIDUMP
+void dhd_d2h_minidump(dhd_pub_t *dhdp);
+#endif /* D2H_MINIDUMP */
+
+#define DHD_MEMDUMP_TYPE_STR_LEN 32
+#define DHD_MEMDUMP_PATH_STR_LEN 128
+
+#ifdef DHD_TX_PROFILE
+/* process layer 3 headers, to ultimately determine if a
+ * dhd_tx_profile_protocol_t matches
+ */
+static int process_layer3_headers(uint8 **p, int plen, uint16 *type);
+
+/* process layer 2 headers, to ultimately determine if a
+ * dhd_tx_profile_protocol_t matches
+ */
+static int process_layer2_headers(uint8 **p, int *plen, uint16 *type, bool is_host_sfhllc);
+
+/* whether or not a dhd_tx_profile_protocol_t matches with data in a packet */
+bool dhd_protocol_matches_profile(uint8 *p, int plen, const
+ dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc);
+#endif /* defined(DHD_TX_PROFILE) */
+
+#define PATH_BANDLOCK_INFO PLATFORM_PATH".bandlock.info"
+
+static void dhd_set_bandlock(dhd_pub_t * dhd);
+
+static void
+dhd_tx_stop_queues(struct net_device *net)
+{
+#ifdef DHD_MQ
+ netif_tx_stop_all_queues(net);
+#else
+ netif_stop_queue(net);
+#endif
+}
+
+static void
+dhd_tx_start_queues(struct net_device *net)
+{
+#ifdef DHD_MQ
+ netif_tx_wake_all_queues(net);
+#else
+ netif_wake_queue(net);
+#endif
+}
+
+#ifdef USE_WFA_CERT_CONF
+int g_frameburst = 1;
+#endif /* USE_WFA_CERT_CONF */
+
+static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
+
+#ifdef PCIE_FULL_DONGLE
+#define DHD_IF_STA_LIST_LOCK_INIT(lock) spin_lock_init(lock)
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
+ struct list_head *snapshot_list);
+static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+#endif /* PCIE_FULL_DONGLE */
+
+/* Control fw roaming */
+#ifdef BCMCCX
+uint dhd_roam_disable = 0;
+#else
+#ifdef OEM_ANDROID
+uint dhd_roam_disable = 0;
+#else
+uint dhd_roam_disable = 1;
+#endif
+#endif /* BCMCCX */
+
+#ifdef BCMDBGFS
+extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
+extern void dhd_dbgfs_remove(void);
+#endif
+
+/* Enable TX status metadta report: 0=disable 1=enable 2=debug */
+static uint pcie_txs_metadata_enable = 0;
+module_param(pcie_txs_metadata_enable, int, 0);
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* DS Exit response timeout */
+int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMINTERNAL
+extern uint dhd_anychip;
+module_param(dhd_anychip, uint, 0);
+#endif /* BCMINTERNAL */
+#endif /* BCMSDIO */
+
+#ifdef BCMSLTGT
+#ifdef BCMFPGA_HW
+/* For FPGA use fixed htclkration as 30 */
+uint htclkratio = 30;
+#else
+uint htclkratio = 1;
+#endif /* BCMFPGA_HW */
+module_param(htclkratio, uint, 0);
+
+int dngl_xtalfreq = 0;
+module_param(dngl_xtalfreq, int, 0);
+#endif /* BCMSLTGT */
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING
+uint dhd_dscpmap_enable = 1;
+module_param(dhd_dscpmap_enable, uint, 0644);
+#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
+
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+/* Use in dongle supplicant for 4-way handshake */
+#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
+/* Enable idsup by default (if supported in fw) */
+uint dhd_use_idsup = 1;
+#else
+uint dhd_use_idsup = 0;
+#endif /* WLFBT || WL_ENABLE_IDSUP */
+module_param(dhd_use_idsup, uint, 0);
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+
+#ifndef BCMDBUS
+#if defined(OEM_ANDROID)
+/* Allow delayed firmware download for debug purpose */
+int allow_delay_fwdl = FALSE;
+#elif defined(BCM_ROUTER_DHD)
+/* Allow delayed firmware download for debug purpose */
+int allow_delay_fwdl = FALSE;
+#else
+int allow_delay_fwdl = TRUE;
+#endif /* OEM_ANDROID */
+module_param(allow_delay_fwdl, int, 0);
+#endif /* !BCMDBUS */
+
+#ifdef GDB_PROXY
+/* Adds/replaces deadman_to= in NVRAM file with deadman_to=0 */
+static uint nodeadman = 0;
+module_param(nodeadman, uint, 0);
+#endif /* GDB_PROXY */
+
+#ifdef ECOUNTER_PERIODIC_DISABLE
+uint enable_ecounter = FALSE;
+#else
+uint enable_ecounter = TRUE;
+#endif
+module_param(enable_ecounter, uint, 0);
+
+#ifdef BCMQT_HW
+int qt_flr_reset = FALSE;
+module_param(qt_flr_reset, int, 0);
+
+int qt_dngl_timeout = 0; // dongle attach timeout in ms
+module_param(qt_dngl_timeout, int, 0);
+#endif /* BCMQT_HW */
+
+/* TCM verification flag */
+uint dhd_tcm_test_enable = FALSE;
+module_param(dhd_tcm_test_enable, uint, 0644);
+
+extern char dhd_version[];
+extern char fw_version[];
+extern char clm_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+bool g_pm_control;
+#ifdef DHD_EXPORT_CNTL_FILE
+uint32 pmmode_val = 0xFF;
+#endif /* DHD_EXPORT_CNTL_FILE */
+#ifdef CUSTOMER_HW10
+void dhd_control_pm(dhd_pub_t *dhd, uint *);
+#else
+void sec_control_pm(dhd_pub_t *dhd, uint *);
+#endif /* CUSTOMER_HW10 */
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef DHD_PM_OVERRIDE
+bool g_pm_override;
+#endif /* DHD_PM_OVERRIDE */
+
+#ifndef BCMDBUS
+static void dhd_dpc(ulong data);
+#endif /* !BCMDBUS */
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+ bool suspend = FALSE;
+ dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, const dhd_info_t, pm_notifier);
+ dhd_pub_t *dhd = &dhdinfo->pub;
+ struct dhd_conf *conf = dhd->conf;
+ int suspend_mode = conf->suspend_mode;
+
+ BCM_REFERENCE(dhdinfo);
+ BCM_REFERENCE(suspend);
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ suspend = TRUE;
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ suspend = FALSE;
+ break;
+ }
+
+ if(!dhd->early_suspended && suspend_mode != PM_NOTIFIER) {
+ suspend_mode = PM_NOTIFIER;
+ conf->suspend_mode = PM_NOTIFIER;
+ conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND);
+ printf("%s: switch suspend_mode to %d\n", __FUNCTION__, suspend_mode);
+ }
+ printf("%s: action=%ld, suspend=%d, suspend_mode=%d\n",
+ __FUNCTION__, action, suspend, suspend_mode);
+ if (suspend) {
+ DHD_OS_WAKE_LOCK_WAIVE(dhd);
+ if (suspend_mode == PM_NOTIFIER)
+ dhd_suspend_resume_helper(dhdinfo, suspend, 0);
+#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
+ dhd_wlfc_suspend(dhd);
+#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
+ if (suspend_mode == PM_NOTIFIER || suspend_mode == SUSPEND_MODE_2)
+ dhd_conf_set_suspend_resume(dhd, suspend);
+ DHD_OS_WAKE_LOCK_RESTORE(dhd);
+ } else {
+ if (suspend_mode == PM_NOTIFIER || suspend_mode == SUSPEND_MODE_2)
+ dhd_conf_set_suspend_resume(dhd, suspend);
+#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
+ dhd_wlfc_resume(dhd);
+#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
+ if (suspend_mode == PM_NOTIFIER)
+ dhd_suspend_resume_helper(dhdinfo, suspend, 0);
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 39))
+ dhd_mmc_suspend = suspend;
+ smp_mb();
+#endif
+
+ return ret;
+}
+
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
+
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* CONFIG_PM_SLEEP */
+
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void
+dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+ dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+ dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
+ int ifidx; /* interface index */
+ void * lkup;
+} dhd_dev_priv_t;
+
+#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
+
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = (dhd_info_t *)NULL;
+ dev_priv->ifp = (dhd_if_t *)NULL;
+ dev_priv->ifidx = DHD_BAD_IF;
+ dev_priv->lkup = (void *)NULL;
+}
+
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+ int ifidx)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = dhd;
+ dev_priv->ifp = ifp;
+ dev_priv->ifidx = ifidx;
+}
+
+/* Return interface pointer */
+struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+{
+ ASSERT(ifidx < DHD_MAX_IFS);
+
+ if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
+ return NULL;
+
+ return dhdp->info->iflist[ifidx];
+}
+
+#ifdef WLEASYMESH
+int
+dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast)
+{
+ dhd_if_t *ifp;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp(dhdp, ifidx);
+ if (ifp == NULL) {
+ return BCME_ERROR;
+ }
+ if (mcast) {
+ memcpy(ifp->_1905_al_mcast, ea, ETHER_ADDR_LEN);
+ } else {
+ memcpy(ifp->_1905_al_ucast, ea, ETHER_ADDR_LEN);
+ }
+ return BCME_OK;
+}
+int
+dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast)
+{
+ dhd_if_t *ifp;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp(dhdp, ifidx);
+ if (ifp == NULL) {
+ return BCME_ERROR;
+ }
+ if (mcast) {
+ memcpy(ea, ifp->_1905_al_mcast, ETHER_ADDR_LEN);
+ } else {
+ memcpy(ea, ifp->_1905_al_ucast, ETHER_ADDR_LEN);
+ }
+ return BCME_OK;
+}
+#endif /* WLEASYMESH */
+
+#ifdef PCIE_FULL_DONGLE
+
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
+
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+ .pub = {
+ .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+ .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(BCM_ROUTER_DHD)
+ .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE },
+#endif
+ .up = FALSE,
+ .busstate = DHD_BUS_DOWN
+ }
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL (&dhd_info_null.pub)
+
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+ .reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
+
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#ifdef WMF
+ .wmf = { .wmf_enable = TRUE },
+#endif
+ .info = DHD_INFO_NULL,
+ .net = DHD_NET_DEV_NULL,
+ .idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL (&dhd_if_null)
+
+/* XXX should we use the sta_pool[0] object as DHD_STA_NULL? */
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+
+/** Interface STA list management. */
+
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
+
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+{
+ int prio;
+
+ ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+ /*
+ * Flush and free all packets in all flowring's queues belonging to sta.
+ * Packets in flow ring will be flushed later.
+ */
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ uint16 flowid = sta->flowid[prio];
+
+ if (flowid != FLOWID_INVALID) {
+ unsigned long flags;
+ flow_ring_node_t * flow_ring_node;
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
+
+ flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+ if (flow_ring_node) {
+ flow_queue_t *queue = &flow_ring_node->queue;
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+
+ if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
+ void * pkt;
+ while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
+ NULL) {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
+ }
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+ }
+ }
+
+ sta->flowid[prio] = FLOWID_INVALID;
+ }
+
+ id16_map_free(dhdp->staid_allocator, sta->idx);
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+ sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+ sta->ifidx = DHD_BAD_IF;
+ bzero(sta->ea.octet, ETHER_ADDR_LEN);
+ INIT_LIST_HEAD(&sta->list);
+ sta->idx = ID16_INVALID; /* implying free */
+}
+
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
+{
+ uint16 idx;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+ idx = id16_map_alloc(dhdp->staid_allocator);
+ if (idx == ID16_INVALID) {
+ DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
+
+ sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+ sta = &sta_pool[idx];
+
+ ASSERT((sta->idx == ID16_INVALID) &&
+ (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+
+ sta->idx = idx; /* implying allocated */
+
+ return sta;
+}
+
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
+{
+ dhd_sta_t *sta, *next;
+ unsigned long flags;
+
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+
+ return;
+}
+
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, prio, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void * staid_allocator;
+
+ ASSERT(dhdp != (dhd_pub_t *)NULL);
+ ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+ if (staid_allocator == NULL) {
+ DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Pre allocate a pool of dhd_sta objects (one extra). */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+ sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+ if (sta_pool == NULL) {
+ DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+ id16_map_fini(dhdp->osh, staid_allocator);
+ return BCME_ERROR;
+ }
+
+ dhdp->sta_pool = sta_pool;
+ dhdp->staid_allocator = staid_allocator;
+
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+ }
+ dhd_sta_free(dhdp, sta);
+ }
+
+ return BCME_OK;
+}
+
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
+{
+ dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+
+ if (sta_pool) {
+ int idx;
+ int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ for (idx = 1; idx <= max_sta; idx++) {
+ ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+ ASSERT(sta_pool[idx].idx == ID16_INVALID);
+ }
+ MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+ }
+
+ id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+ dhdp->staid_allocator = NULL;
+}
+
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void
+dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, prio, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void *staid_allocator;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+ staid_allocator = dhdp->staid_allocator;
+
+ if (!sta_pool) {
+ DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!staid_allocator) {
+ DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* clear free pool */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ id16_map_clear(staid_allocator, max_sta, 1);
+
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+ }
+ dhd_sta_free(dhdp, sta);
+ }
+}
+
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
+
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+ DHD_INFO(("%s: Found STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG((char *)ea)));
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+ return sta;
+ }
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+
+ return DHD_STA_NULL;
+}
+
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
+
+ if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
+ return DHD_STA_NULL;
+ }
+
+ sta = dhd_sta_alloc((dhd_pub_t *)pub);
+ if (sta == DHD_STA_NULL) {
+ DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
+
+ memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
+
+ /* link the sta and the dhd interface */
+ sta->ifp = ifp;
+ sta->ifidx = ifidx;
+#ifdef DHD_WMF
+ sta->psta_prim = NULL;
+#endif
+ INIT_LIST_HEAD(&sta->list);
+
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+
+ list_add_tail(&sta->list, &ifp->sta_list);
+
+ DHD_ERROR(("%s: Adding STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG((char *)ea)));
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+
+ return sta;
+}
+
+/** Delete all STAs from the interface's STA list. */
+void
+dhd_del_all_sta(void *pub, int ifidx)
+{
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return;
+
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
+ ifp->phnd_arp_table, FALSE,
+ sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+ }
+#endif /* DHD_L2_FILTER */
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+
+ return;
+}
+
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return;
+
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+ }
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
+ ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+ }
+#endif /* DHD_L2_FILTER */
+ return;
+}
+
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+
+ sta = dhd_find_sta(pub, ifidx, ea);
+
+ if (!sta) {
+ /* Add entry */
+ sta = dhd_add_sta(pub, ifidx, ea);
+ }
+
+ return sta;
+}
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head *
+dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
+{
+ unsigned long flags;
+ dhd_sta_t *sta, *snapshot;
+
+ INIT_LIST_HEAD(snapshot_list);
+
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ /* allocate one and add to snapshot */
+ snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
+ if (snapshot == NULL) {
+ DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
+ continue;
+ }
+
+ memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
+
+ INIT_LIST_HEAD(&snapshot->list);
+ list_add_tail(&snapshot->list, snapshot_list);
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+
+ return snapshot_list;
+}
+
+static void
+dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
+{
+ dhd_sta_t *sta, *next;
+
+ list_for_each_entry_safe(sta, next, snapshot_list, list) {
+ list_del(&sta->list);
+ MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
+ }
+}
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
+#else
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef BCM_ROUTER_DHD
+/** Bind a flowid to the dhd_sta's flowid table. */
+void
+dhd_add_flowid(dhd_pub_t * dhdp, int ifidx, uint8 ac_prio, void * ea,
+ uint16 flowid)
+{
+ int prio;
+ dhd_if_t * ifp;
+ dhd_sta_t * sta;
+ flow_queue_t * queue;
+
+ ASSERT((dhdp != (dhd_pub_t *)NULL) && (ea != NULL));
+
+ /* Fetch the dhd_if object given the if index */
+ ifp = dhd_get_ifp(dhdp, ifidx);
+ if (ifp == (dhd_if_t *)NULL) /* ifp fetched from dhdp iflist[] */
+ return;
+
+ /* Intializing the backup queue parameters */
+ if (DHD_IF_ROLE_WDS(dhdp, ifidx) ||
+#ifdef DHD_WET
+ WET_ENABLED(dhdp) ||
+#endif /* DHD_WET */
+ 0) {
+ queue = dhd_flow_queue(dhdp, flowid);
+ dhd_flow_ring_config_thresholds(dhdp, flowid,
+ dhd_queue_budget, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue),
+ dhd_if_threshold, (void *)&ifp->cumm_ctr);
+ return;
+ } else if ((sta = dhd_find_sta(dhdp, ifidx, ea)) == DHD_STA_NULL) {
+ /* Fetch the station with a matching Mac address. */
+ /* Update queue's grandparent cummulative length threshold */
+ if (ETHER_ISMULTI((char *)ea)) {
+ queue = dhd_flow_queue(dhdp, flowid);
+ if (ifidx != 0 && DHD_IF_ROLE_STA(dhdp, ifidx)) {
+ /* Use default dhdp->cumm_ctr and dhdp->l2cumm_ctr,
+ * in PSTA mode the ifp will be deleted but we don't delete
+ * the PSTA flowring.
+ */
+ dhd_flow_ring_config_thresholds(dhdp, flowid,
+ queue->max, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue),
+ dhd_if_threshold, DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+ }
+ else if (DHD_FLOW_QUEUE_L2CLEN_PTR(queue) != (void *)&ifp->cumm_ctr) {
+ dhd_flow_ring_config_thresholds(dhdp, flowid,
+ queue->max, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue),
+ dhd_if_threshold, (void *)&ifp->cumm_ctr);
+ }
+ }
+ return;
+ }
+
+ /* Set queue's min budget and queue's parent cummulative length threshold */
+ dhd_flow_ring_config_thresholds(dhdp, flowid, dhd_queue_budget,
+ dhd_sta_threshold, (void *)&sta->cumm_ctr,
+ dhd_if_threshold, (void *)&ifp->cumm_ctr);
+
+ /* Populate the flowid into the stations flowid table, for all packet
+ * priorities that would match the given flow's ac priority.
+ */
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ if (dhdp->flow_prio_map[prio] == ac_prio) {
+ /* flowring shared for all these pkt prio */
+ sta->flowid[prio] = flowid;
+ }
+ }
+}
+
+/** Unbind a flowid to the sta's flowid table. */
+void
+dhd_del_flowid(dhd_pub_t * dhdp, int ifidx, uint16 flowid)
+{
+ int prio;
+ dhd_if_t * ifp;
+ dhd_sta_t * sta;
+ unsigned long flags;
+
+ /* Fetch the dhd_if object given the if index */
+ ifp = dhd_get_ifp(dhdp, ifidx);
+ if (ifp == (dhd_if_t *)NULL) /* ifp fetched from dhdp iflist[] */
+ return;
+
+ /* Walk all stations and delete clear any station's reference to flowid */
+ DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
+
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ if (sta->flowid[prio] == flowid) {
+ sta->flowid[prio] = FLOWID_INVALID;
+ }
+ }
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
+}
+#endif /* BCM_ROUTER_DHD */
+
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+void
+dhd_axi_error_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ schedule_work(&dhd->axi_error_dispatcher_work);
+}
+
+static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, axi_error_dispatcher_work);
+ dhd_axi_error(&dhd->pub);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
+/** Returns dhd iflist index corresponding the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
+{
+ dhd_if_t *ifp;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
+
+ ASSERT(bssidx < DHD_MAX_IFS);
+ ASSERT(dhdp);
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ifp = dhd->iflist[i];
+ if (ifp && (ifp->bssidx == bssidx)) {
+ DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+ ifp->name, bssidx, i));
+ break;
+ }
+ }
+ return i;
+}
+
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
+{
+ uint32 store_idx;
+ uint32 sent_idx;
+
+ if (!skb) {
+ DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+ return BCME_ERROR;
+ }
+
+ dhd_os_rxflock(dhdp);
+ store_idx = dhdp->store_idx;
+ sent_idx = dhdp->sent_idx;
+ if (dhdp->skbbuf[store_idx] != NULL) {
+ /* Make sure the previous packets are processed */
+ dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+ DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+ skb, store_idx, sent_idx));
+ return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
+ DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+ skb, store_idx, sent_idx));
+ /* removed msleep here, should use wait_event_timeout if we
+ * want to give rx frame thread a chance to run
+ */
+#if defined(WAIT_DEQUEUE)
+ OSL_SLEEP(1);
+#endif
+ return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+ }
+ DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+ skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+ dhdp->skbbuf[store_idx] = skb;
+ dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+ dhd_os_rxfunlock(dhdp);
+
+ return BCME_OK;
+}
+
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+{
+ uint32 store_idx;
+ uint32 sent_idx;
+ void *skb;
+
+ dhd_os_rxflock(dhdp);
+
+ store_idx = dhdp->store_idx;
+ sent_idx = dhdp->sent_idx;
+ skb = dhdp->skbbuf[sent_idx];
+
+ if (skb == NULL) {
+ dhd_os_rxfunlock(dhdp);
+ DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+ store_idx, sent_idx));
+ return NULL;
+ }
+
+ dhdp->skbbuf[sent_idx] = NULL;
+ dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
+
+ DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+ skb, sent_idx));
+
+ dhd_os_rxfunlock(dhdp);
+
+ return skb;
+}
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+{
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ uint chipid = dhd_bus_chip_id(dhdp);
+ int ret = BCME_OK;
+ if (prepost) { /* pre process */
+ ret = dhd_alloc_cis(dhdp);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+ switch (chipid) {
+#ifndef DHD_READ_CIS_FROM_BP
+ case BCM4389_CHIP_GRPID:
+ /* BCM4389B0 or higher rev is used new otp iovar */
+ dhd_read_otp_sw_rgn(dhdp);
+ break;
+#endif /* !DHD_READ_CIS_FROM_BP */
+ default:
+ dhd_read_cis(dhdp);
+ break;
+ }
+ dhd_check_module_cid(dhdp);
+ dhd_check_module_mac(dhdp);
+ dhd_set_macaddr_from_file(dhdp);
+ } else { /* post process */
+ dhd_write_macaddr(&dhdp->mac);
+ dhd_clear_cis(dhdp);
+ }
+#endif
+
+ return BCME_OK;
+}
+
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(PKT_FILTER_SUPPORT)
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
+{
+ bool _apply = FALSE;
+ /* In case of IBSS mode, apply arp pkt filter */
+ if (op_mode_param & DHD_FLAG_IBSS_MODE) {
+ _apply = TRUE;
+ goto exit;
+ }
+ /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+ if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
+ _apply = TRUE;
+ goto exit;
+ }
+
+exit:
+ return _apply;
+}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+void
+dhd_set_packet_filter(dhd_pub_t *dhd)
+{
+ int i;
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+ if (dhd_pkt_filter_enable) {
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ }
+ }
+}
+
+void
+dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+{
+ int i;
+
+ DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
+ if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value &&
+ !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) {
+ DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
+ return;
+ }
+ /* 1 - Enable packet filter, only allow unicast packet to send up */
+ /* 0 - Disable packet filter */
+ if (dhd_pkt_filter_enable && (!value ||
+ (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) ||
+ dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)))
+ {
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+ if (value && (i == DHD_ARP_FILTER_NUM) &&
+ !_turn_on_arp_filter(dhd, dhd->op_mode)) {
+ DHD_TRACE(("Do not turn on ARP white list pkt filter:"
+ "val %d, cnt %d, op_mode 0x%x\n",
+ value, i, dhd->op_mode));
+ continue;
+ }
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+#ifdef APSTA_BLOCK_ARP_DURING_DHCP
+ if (value && (i == DHD_BROADCAST_ARP_FILTER_NUM) &&
+ dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM]) {
+ /* XXX: BROADCAST_ARP_FILTER is only for the
+ * STA/SoftAP concurrent mode (Please refer to RB:90348)
+ * Remove the filter for other cases explicitly
+ */
+ DHD_ERROR(("%s: Remove the DHD_BROADCAST_ARP_FILTER\n",
+ __FUNCTION__));
+ dhd_packet_filter_add_remove(dhd, FALSE,
+ DHD_BROADCAST_ARP_FILTER_NUM);
+ }
+#endif /* APSTA_BLOCK_ARP_DURING_DHCP */
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ value, dhd_master_mode);
+ }
+ }
+}
+
+int
+dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
+{
+ char *filterp = NULL;
+ int filter_id = 0;
+
+ switch (num) {
+ case DHD_BROADCAST_FILTER_NUM:
+ filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ filter_id = 101;
+ break;
+ case DHD_MULTICAST4_FILTER_NUM:
+ filter_id = 102;
+ if (FW_SUPPORTED((dhdp), pf6)) {
+ if (dhdp->pktfilter[num] != NULL) {
+ dhd_pktfilter_offload_delete(dhdp, filter_id);
+ dhdp->pktfilter[num] = NULL;
+ }
+ if (!add_remove) {
+ filterp = DISCARD_IPV4_MCAST;
+ add_remove = 1;
+ break;
+ }
+ } /* XXX: intend omitting else case */
+ filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+ break;
+ case DHD_MULTICAST6_FILTER_NUM:
+ filter_id = 103;
+ if (FW_SUPPORTED((dhdp), pf6)) {
+ if (dhdp->pktfilter[num] != NULL) {
+ dhd_pktfilter_offload_delete(dhdp, filter_id);
+ dhdp->pktfilter[num] = NULL;
+ }
+ if (!add_remove) {
+ filterp = DISCARD_IPV6_MCAST;
+ add_remove = 1;
+ break;
+ }
+ } /* XXX: intend omitting else case */
+ filterp = "103 0 0 0 0xFFFF 0x3333";
+ break;
+ case DHD_MDNS_FILTER_NUM:
+ filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+ filter_id = 104;
+ break;
+ case DHD_ARP_FILTER_NUM:
+ filterp = "105 0 0 12 0xFFFF 0x0806";
+ filter_id = 105;
+ break;
+ case DHD_BROADCAST_ARP_FILTER_NUM:
+ filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
+ " 0xFFFFFFFFFFFF0000000000000806";
+ filter_id = 106;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Add filter */
+ if (add_remove) {
+ dhdp->pktfilter[num] = filterp;
+ dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
+ } else { /* Delete filter */
+ if (dhdp->pktfilter[num] != NULL) {
+ dhd_pktfilter_offload_delete(dhdp, filter_id);
+ dhdp->pktfilter[num] = NULL;
+ }
+ }
+
+ return 0;
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+#ifndef SUPPORT_PM2_ONLY
+ int power_mode = PM_MAX;
+#endif /* SUPPORT_PM2_ONLY */
+ /* wl_pkt_filter_enable_t enable_parm; */
+ int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+ int ret = 0;
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ int roam_time_thresh = 0; /* (ms) */
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+ uint roamvar = 1;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ int bcn_li_bcn = 1;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+ uint nd_ra_filter = 0;
+#ifdef ENABLE_IPMCAST_FILTER
+ int ipmcast_l2filter;
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#endif /* DHD_USE_EARLYSUSPEND */
+#ifdef PASS_ALL_MCAST_PKTS
+ struct dhd_info *dhdinfo;
+ uint32 allmulti;
+ uint i;
+#endif /* PASS_ALL_MCAST_PKTS */
+#ifdef DYNAMIC_SWOOB_DURATION
+#ifndef CUSTOM_INTR_WIDTH
+#define CUSTOM_INTR_WIDTH 100
+ int intr_width = 0;
+#endif /* CUSTOM_INTR_WIDTH */
+#endif /* DYNAMIC_SWOOB_DURATION */
+
+#if defined(DHD_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND in suspend, otherwise CUSTOM_BCN_TIMEOUT */
+ int bcn_timeout = CUSTOM_BCN_TIMEOUT;
+#endif /* DHD_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
+#if defined(OEM_ANDROID) && defined(BCMPCIE)
+ int lpas = 0;
+ int dtim_period = 0;
+ int bcn_interval = 0;
+ int bcn_to_dly = 0;
+#endif /* OEM_ANDROID && BCMPCIE */
+
+ if (!dhd)
+ return -ENODEV;
+
+#ifdef PASS_ALL_MCAST_PKTS
+ dhdinfo = dhd->info;
+#endif /* PASS_ALL_MCAST_PKTS */
+
+ DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+ __FUNCTION__, value, dhd->in_suspend));
+
+ dhd_suspend_lock(dhd);
+
+#ifdef CUSTOM_SET_CPUCORE
+ DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+ /* set specific cpucore */
+ dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
+ if (dhd->up) {
+ if (value && dhd->in_suspend) {
+ dhd->early_suspended = 1;
+ /* Kernel suspended */
+ DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+#ifndef SUPPORT_PM2_ONLY
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter,
+ * only allow unicast packet to send up
+ */
+ dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+ dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd->arpoe_enable) {
+ dhd_arp_offload_enable(dhd, TRUE);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PASS_ALL_MCAST_PKTS
+ allmulti = 0;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) {
+ ret = dhd_iovar(dhd, i, "allmulti",
+ (char *)&allmulti,
+ sizeof(allmulti),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s allmulti failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ }
+#endif /* PASS_ALL_MCAST_PKTS */
+
+ /* If DTIM skip is set up as default, force it to wake
+ * each third DTIM for better power savings. Note that
+ * one side effect is a chance to miss BC/MC packet.
+ */
+#ifdef WLTDLS
+ /* Do not set bcn_li_ditm on WFD mode */
+ if (dhd->tdls_mode) {
+ bcn_li_dtim = 0;
+ } else
+#endif /* WLTDLS */
+#if defined(OEM_ANDROID) && defined(BCMPCIE)
+ bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
+ &bcn_interval);
+ ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s bcn_li_dtim failed %d\n",
+ __FUNCTION__, ret));
+ }
+ if ((bcn_li_dtim * dtim_period * bcn_interval) >=
+ MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
+ /*
+ * Increase max roaming threshold from 2 secs to 8 secs
+ * the real roam threshold is MIN(max_roam_threshold,
+ * bcn_timeout/2)
+ */
+ lpas = 1;
+ ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s lpas, UNSUPPORTED\n",
+ __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s set lpas failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ bcn_to_dly = 1;
+ /*
+ * if bcn_to_dly is 1, the real roam threshold is
+ * MIN(max_roam_threshold, bcn_timeout -1);
+ * notify link down event after roaming procedure complete
+ * if we hit bcn_timeout while we are in roaming progress.
+ */
+ ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
+ sizeof(bcn_to_dly), NULL, 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s bcn_to_dly, UNSUPPORTED\n",
+ __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s set bcn_to_dly failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ }
+#else
+ bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
+ if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
+ DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+#endif /* OEM_ANDROID && BCMPCIE */
+
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef DHD_BCN_TIMEOUT_IN_SUSPEND
+ bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
+ ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+ sizeof(bcn_timeout), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
+ ret));
+ }
+#endif /* DHD_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
+ ret = dhd_iovar(dhd, 0, "roam_time_thresh",
+ (char *)&roam_time_thresh,
+ sizeof(roam_time_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s roam_time_thresh failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+ /* Disable firmware roaming during suspend */
+ ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
+ sizeof(roamvar), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s roam_off failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ if (bcn_li_dtim) {
+ bcn_li_bcn = 0;
+ }
+ ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+ sizeof(bcn_li_bcn), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#if defined(WL_CFG80211) && defined(WL_BCNRECV)
+ ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
+ if (ret != BCME_OK) {
+ DHD_ERROR(("failed to stop beacon recv event on"
+ " suspend state (%d)\n", ret));
+ }
+#endif /* WL_CFG80211 && WL_BCNRECV */
+#ifdef NDO_CONFIG_SUPPORT
+ if (dhd->ndo_enable) {
+ if (!dhd->ndo_host_ip_overflow) {
+ /* enable ND offload on suspend */
+ ret = dhd_ndo_enable(dhd, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to enable NDO\n",
+ __FUNCTION__));
+ }
+ } else {
+ DHD_INFO(("%s: NDO disabled on suspend due to"
+ "HW capacity\n", __FUNCTION__));
+ }
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+#ifndef APF
+ if (FW_SUPPORTED(dhd, ndoe))
+#else
+ if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
+#endif /* APF */
+ {
+ /* enable IPv6 RA filter in firmware during suspend */
+ nd_ra_filter = 1;
+ ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
+ (char *)&nd_ra_filter, sizeof(nd_ra_filter),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+ ret));
+ }
+ dhd_os_suppress_logging(dhd, TRUE);
+#ifdef ENABLE_IPMCAST_FILTER
+ ipmcast_l2filter = 1;
+ ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
+ (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
+ }
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef DYNAMIC_SWOOB_DURATION
+ intr_width = CUSTOM_INTR_WIDTH;
+ ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
+ sizeof(intr_width), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+ }
+#endif /* DYNAMIC_SWOOB_DURATION */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
+ ret = dhd_iovar(dhd, 0, "const_awake_thresh",
+ (char *)&pm_awake_thresh,
+ sizeof(pm_awake_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef CONFIG_SILENT_ROAM
+ if (!dhd->sroamed) {
+ ret = dhd_sroam_set_mon(dhd, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set sroam failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
+#endif /* DHD_USE_EARLYSUSPEND */
+ } else {
+ dhd->early_suspended = 0;
+ /* Kernel resumed */
+ DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
+#ifdef DYNAMIC_SWOOB_DURATION
+ intr_width = 0;
+ ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
+ sizeof(intr_width), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+ }
+#endif /* DYNAMIC_SWOOB_DURATION */
+#ifndef SUPPORT_PM2_ONLY
+ power_mode = PM_FAST;
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+#if defined(WL_CFG80211) && defined(WL_BCNRECV)
+ ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
+ if (ret != BCME_OK) {
+ DHD_ERROR(("failed to resume beacon recv state (%d)\n",
+ ret));
+ }
+#endif /* WL_CF80211 && WL_BCNRECV */
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd->arpoe_enable) {
+ dhd_arp_offload_enable(dhd, FALSE);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+ /* disable pkt filter */
+ dhd_enable_packet_filter(0, dhd);
+#ifdef APF
+ dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef PASS_ALL_MCAST_PKTS
+ allmulti = 1;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+ ret = dhd_iovar(dhd, i, "allmulti",
+ (char *)&allmulti,
+ sizeof(allmulti), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: allmulti failed:%d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* PASS_ALL_MCAST_PKTS */
+#if defined(OEM_ANDROID) && defined(BCMPCIE)
+ /* restore pre-suspend setting */
+ ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
+ __FUNCTION__, ret));
+ }
+ ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s lpas, UNSUPPORTED\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s set lpas failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
+ sizeof(bcn_to_dly), NULL, 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s bcn_to_dly UNSUPPORTED\n",
+ __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s set bcn_to_dly failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#else
+ /* restore pre-suspend setting for dtim_skip */
+ ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
+ }
+#endif /* OEM_ANDROID && BCMPCIE */
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef DHD_BCN_TIMEOUT_IN_SUSPEND
+ bcn_timeout = CUSTOM_BCN_TIMEOUT;
+ ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+ sizeof(bcn_timeout), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s:bcn_timeout failed:%d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* DHD_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ roam_time_thresh = 2000;
+ ret = dhd_iovar(dhd, 0, "roam_time_thresh",
+ (char *)&roam_time_thresh,
+ sizeof(roam_time_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
+ __FUNCTION__, ret));
+ }
+
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+ roamvar = dhd_roam_disable;
+ ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
+ sizeof(roamvar), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
+ }
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+ sizeof(bcn_li_bcn), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#ifdef NDO_CONFIG_SUPPORT
+ if (dhd->ndo_enable) {
+ /* Disable ND offload on resume */
+ ret = dhd_ndo_enable(dhd, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to disable NDO\n",
+ __FUNCTION__));
+ }
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+#ifndef APF
+ if (FW_SUPPORTED(dhd, ndoe))
+#else
+ if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
+#endif /* APF */
+ {
+ /* disable IPv6 RA filter in firmware during suspend */
+ nd_ra_filter = 0;
+ ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
+ (char *)&nd_ra_filter, sizeof(nd_ra_filter),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+ ret));
+ }
+ }
+ dhd_os_suppress_logging(dhd, FALSE);
+#ifdef ENABLE_IPMCAST_FILTER
+ ipmcast_l2filter = 0;
+ ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
+ (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
+ }
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ ret = dhd_iovar(dhd, 0, "const_awake_thresh",
+ (char *)&pm_awake_thresh,
+ sizeof(pm_awake_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef CONFIG_SILENT_ROAM
+ ret = dhd_sroam_set_mon(dhd, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_SILENT_ROAM */
+#endif /* DHD_USE_EARLYSUSPEND */
+ }
+ }
+ dhd_suspend_unlock(dhd);
+
+ return 0;
+}
+
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
+{
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+
+ /* Set flag when early suspend was called */
+ dhdp->in_suspend = val;
+ if ((force || !dhdp->suspend_disable_flag) &&
+ (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND)))
+ {
+ ret = dhd_set_suspend(val, dhdp);
+ }
+
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+static void dhd_early_suspend(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+ DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+ if (dhd && (dhd->pub.conf->suspend_mode == EARLY_SUSPEND ||
+ dhd->pub.conf->suspend_mode == SUSPEND_MODE_2)) {
+ dhd_suspend_resume_helper(dhd, 1, 0);
+ if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND)
+ dhd_conf_set_suspend_resume(&dhd->pub, 1);
+ }
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+ DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+ if (dhd && (dhd->pub.conf->suspend_mode == EARLY_SUSPEND ||
+ dhd->pub.conf->suspend_mode == SUSPEND_MODE_2)) {
+ dhd_conf_set_suspend_resume(&dhd->pub, 0);
+ if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND)
+ dhd_suspend_resume_helper(dhd, 0, 0);
+ }
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+/*
+ * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay. Usage:
+ *
+ * dhd_timeout_start(&tmo, usec);
+ * while (!dhd_timeout_expired(&tmo))
+ * if (poll_something())
+ * break;
+ * if (dhd_timeout_expired(&tmo))
+ * fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+#ifdef BCMQT
+ tmo->limit = usec * htclkratio;
+#else
+ tmo->limit = usec;
+#endif
+ tmo->increment = 0;
+ tmo->elapsed = 0;
+ tmo->tick = 10 * USEC_PER_MSEC; /* 10 msec */
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+ /* Does nothing the first call */
+ if (tmo->increment == 0) {
+ tmo->increment = USEC_PER_MSEC; /* Start with 1 msec */
+ return 0;
+ }
+
+ if (tmo->elapsed >= tmo->limit)
+ return 1;
+
+ DHD_INFO(("%s: CAN_SLEEP():%d tmo->increment=%ld msec\n",
+ __FUNCTION__, CAN_SLEEP(), tmo->increment / USEC_PER_MSEC));
+
+ CAN_SLEEP() ? OSL_SLEEP(tmo->increment / USEC_PER_MSEC) : OSL_DELAY(tmo->increment);
+
+ /* Till tmo->tick, the delay will be in 2x, after that delay will be constant
+ * tmo->tick (10 msec), till timer elapses.
+ */
+ tmo->increment = (tmo->increment >= tmo->tick) ? tmo->tick : (tmo->increment * 2);
+
+ /* Add the delay that's about to take place */
+#ifdef BCMQT
+ tmo->elapsed += tmo->increment * htclkratio;
+#else
+ tmo->elapsed += tmo->increment;
+#endif
+
+ return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+ int i = 0;
+
+ if (!dhd) {
+ DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
+ return DHD_BAD_IF;
+ }
+
+ while (i < DHD_MAX_IFS) {
+ if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
+ return i;
+ i++;
+ }
+
+ return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+ struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+ struct dhd_info *dhd_info;
+
+ if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+ return NULL;
+ dhd_info = dhd_pub->info;
+ if (dhd_info && dhd_info->iflist[ifidx])
+ return dhd_info->iflist[ifidx]->net;
+ return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+ int i = DHD_MAX_IFS;
+
+ ASSERT(dhd);
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0)
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
+ break;
+
+ DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+ return i; /* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ ASSERT(dhd);
+
+ if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+ return "<if_bad>";
+ }
+
+ if (dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+ return "<if_null>";
+ }
+
+ if (dhd->iflist[ifidx]->net)
+ return dhd->iflist[ifidx]->net->name;
+
+ return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+ int i;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+ ASSERT(dhd);
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+ return dhd->iflist[i]->mac_addr;
+
+ return NULL;
+}
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+ struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *mclist;
+#endif
+ uint32 allmulti, cnt;
+
+ wl_ioctl_t ioc;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+#ifdef MCAST_LIST_ACCUMULATION
+ int i;
+ uint32 cnt_iface[DHD_MAX_IFS];
+ cnt = 0;
+ allmulti = 0;
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ dev = dhd->iflist[i]->net;
+ if (!dev)
+ continue;
+ netif_addr_lock_bh(dev);
+ cnt_iface[i] = netdev_mc_count(dev);
+ cnt += cnt_iface[i];
+ netif_addr_unlock_bh(dev);
+
+ /* Determine initial value of allmulti flag */
+ allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+ }
+ }
+#else /* !MCAST_LIST_ACCUMULATION */
+ if (!dhd->iflist[ifidx]) {
+ DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
+ return;
+ }
+ dev = dhd->iflist[ifidx]->net;
+ if (!dev)
+ return;
+ netif_addr_lock_bh(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ cnt = netdev_mc_count(dev);
+#else
+ cnt = dev->mc_count;
+#endif /* LINUX_VERSION_CODE */
+
+ netif_addr_unlock_bh(dev);
+
+ /* Determine initial value of allmulti flag */
+ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+#endif /* MCAST_LIST_ACCUMULATION */
+
+#ifdef PASS_ALL_MCAST_PKTS
+#ifdef PKT_FILTER_SUPPORT
+ if (!dhd->pub.early_suspended)
+#endif /* PKT_FILTER_SUPPORT */
+ allmulti = TRUE;
+#endif /* PASS_ALL_MCAST_PKTS */
+
+ /* Send down the multicast list first. */
+
+ /* XXX Not using MAXMULTILIST to avoid including wlc_pub.h; but
+ * maybe we should? (Or should that be in wlioctl.h instead?)
+ */
+
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ return;
+ }
+
+ strlcpy(bufp, "mcast_list", buflen);
+ bufp += strlen("mcast_list") + 1;
+
+ cnt = htol32(cnt);
+ memcpy(bufp, &cnt, sizeof(cnt));
+ bufp += sizeof(cnt);
+
+#ifdef MCAST_LIST_ACCUMULATION
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
+ dev = dhd->iflist[i]->net;
+
+ netif_addr_lock_bh(dev);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ netdev_for_each_mc_addr(ha, dev) {
+ GCC_DIAGNOSTIC_POP();
+ if (!cnt_iface[i])
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ DHD_TRACE(("_dhd_set_multicast_list: cnt "
+ "%d " MACDBG "\n",
+ cnt_iface[i], MAC2STRDBG(ha->addr)));
+ cnt_iface[i]--;
+ }
+ netif_addr_unlock_bh(dev);
+ }
+ }
+#else /* !MCAST_LIST_ACCUMULATION */
+ netif_addr_lock_bh(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ netdev_for_each_mc_addr(ha, dev) {
+ GCC_DIAGNOSTIC_POP();
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
+ }
+#else
+ for (mclist = dev->mc_list; (mclist && (cnt > 0));
+ cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
+#endif /* LINUX_VERSION_CODE */
+ netif_addr_unlock_bh(dev);
+#endif /* MCAST_LIST_ACCUMULATION */
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ allmulti = cnt ? TRUE : allmulti;
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ allmulti = htol32(allmulti);
+ ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
+ sizeof(allmulti), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set allmulti %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+#ifdef MCAST_LIST_ACCUMULATION
+ allmulti = 0;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ dev = dhd->iflist[i]->net;
+ allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+ }
+ }
+#else
+ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+#endif /* MCAST_LIST_ACCUMULATION */
+
+ allmulti = htol32(allmulti);
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_PROMISC;
+ ioc.buf = &allmulti;
+ ioc.len = sizeof(allmulti);
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set promisc %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+}
+
+int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr, bool skip_stop)
+{
+ int ret;
+
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ if (skip_stop) {
+ WL_MSG(dhd_ifname(&dhd->pub, ifidx), "close dev for mac changing\n");
+ dhd->pub.skip_dhd_stop = TRUE;
+ dev_close(dhd->iflist[ifidx]->net);
+ }
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+
+ ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
+ ETHER_ADDR_LEN, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set cur_etheraddr %pM failed ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), addr, ret));
+ goto exit;
+ } else {
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ if (ifidx == 0)
+ memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+ WL_MSG(dhd_ifname(&dhd->pub, ifidx), "MACID %pM is overwritten\n", addr);
+ }
+
+exit:
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ if (skip_stop) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
+ dev_open(dhd->iflist[ifidx]->net, NULL);
+#else
+ dev_open(dhd->iflist[ifidx]->net);
+#endif
+ dhd->pub.skip_dhd_stop = FALSE;
+ WL_MSG(dhd_ifname(&dhd->pub, ifidx), "notify mac changed done\n");
+ }
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+
+ return ret;
+}
+
+int dhd_update_rand_mac_addr(dhd_pub_t *dhd)
+{
+ struct ether_addr mac_addr;
+ dhd_generate_rand_mac_addr(&mac_addr);
+ if (_dhd_set_mac_address(dhd->info, 0, mac_addr.octet, TRUE) != 0) {
+ DHD_ERROR(("randmac setting failed\n"));
+#ifdef STA_RANDMAC_ENFORCED
+ return BCME_BADADDR;
+#endif /* STA_RANDMAC_ENFORCED */
+ }
+ return BCME_OK;
+}
+
+#ifdef BCM_ROUTER_DHD
+void dhd_update_dpsta_interface_for_sta(dhd_pub_t* dhdp, int ifidx, void* event_data)
+{
+ struct wl_dpsta_intf_event *dpsta_prim_event = (struct wl_dpsta_intf_event *)event_data;
+ dhd_if_t *ifp = dhdp->info->iflist[ifidx];
+
+ if (dpsta_prim_event->intf_type == WL_INTF_DWDS) {
+ ifp->primsta_dwds = TRUE;
+ } else {
+ ifp->primsta_dwds = FALSE;
+ }
+}
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef DHD_WMF
+void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea,
+ void* event_data)
+{
+ struct wl_psta_primary_intf_event *psta_prim_event =
+ (struct wl_psta_primary_intf_event*)event_data;
+ dhd_sta_t *psta_interface = NULL;
+ dhd_sta_t *sta = NULL;
+ uint8 ifindex;
+ ASSERT(ifname);
+ ASSERT(psta_prim_event);
+ ASSERT(ea);
+
+ ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname);
+ sta = dhd_find_sta(dhdp, ifindex, ea);
+ if (sta != NULL) {
+ psta_interface = dhd_find_sta(dhdp, ifindex,
+ (void *)(psta_prim_event->prim_ea.octet));
+ if (psta_interface != NULL) {
+ sta->psta_prim = psta_interface;
+ }
+ }
+}
+
+/* Get wmf_psta_disable configuration configuration */
+int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+ return ifp->wmf_psta_disable;
+}
+
+/* Set wmf_psta_disable configuration configuration */
+int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+ ifp->wmf_psta_disable = val;
+ return 0;
+}
+#endif /* DHD_WMF */
+
+#ifdef DHD_PSTA
+/* Get psta/psr configuration configuration */
+int dhd_get_psta_mode(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return (int)dhd->psta_mode;
+}
+/* Set psta/psr configuration configuration */
+int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd->psta_mode = val;
+ return 0;
+}
+#endif /* DHD_PSTA */
+
+#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
+static void
+dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ if (
+#ifdef DHD_L2_FILTER
+ (ifp->block_ping) ||
+#endif
+#ifdef DHD_WET
+ (dhd->wet_mode) ||
+#endif
+#ifdef DHD_MCAST_REGEN
+ (ifp->mcast_regen_bss_enable) ||
+#endif
+ FALSE) {
+ ifp->rx_pkt_chainable = FALSE;
+ }
+}
+#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
+
+#ifdef DHD_WET
+/* Get wet configuration configuration */
+int dhd_get_wet_mode(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return (int)dhd->wet_mode;
+}
+
+/* Set wet configuration configuration */
+int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd->wet_mode = val;
+ dhd_update_rx_pkt_chainable_state(dhdp, 0);
+ return 0;
+}
+#endif /* DHD_WET */
+
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+int32 dhd_role_to_nl80211_iftype(int32 role)
+{
+ switch (role) {
+ case WLC_E_IF_ROLE_STA:
+ return NL80211_IFTYPE_STATION;
+ case WLC_E_IF_ROLE_AP:
+ return NL80211_IFTYPE_AP;
+ case WLC_E_IF_ROLE_WDS:
+ return NL80211_IFTYPE_WDS;
+ case WLC_E_IF_ROLE_P2P_GO:
+ return NL80211_IFTYPE_P2P_GO;
+ case WLC_E_IF_ROLE_P2P_CLIENT:
+ return NL80211_IFTYPE_P2P_CLIENT;
+ case WLC_E_IF_ROLE_IBSS:
+ case WLC_E_IF_ROLE_NAN:
+ return NL80211_IFTYPE_ADHOC;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+static void
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_if_event_t *if_event = event_info;
+ int ifidx, bssidx;
+ int ret = 0;
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ struct wl_if_event_info info;
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ struct net_device *ndev = NULL;
+#endif
+#else
+ struct net_device *ndev;
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+#ifdef DHD_AWDL
+ bool is_awdl_iface = FALSE;
+#endif /* DHD_AWDL */
+
+ BCM_REFERENCE(ret);
+ if (event != DHD_WQ_WORK_IF_ADD) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ ifidx = if_event->event.ifidx;
+ bssidx = if_event->event.bssidx;
+ DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+
+#ifdef DHD_AWDL
+ if (if_event->event.opcode == WLC_E_IF_ADD &&
+ if_event->event.role == WLC_E_IF_ROLE_AWDL) {
+ dhd->pub.awdl_ifidx = ifidx;
+ is_awdl_iface = TRUE;
+ }
+#endif /* DHD_AWDL */
+
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (if_event->event.ifidx > 0) {
+ u8 *mac_addr;
+ bzero(&info, sizeof(info));
+ info.ifidx = ifidx;
+ info.bssidx = bssidx;
+ info.role = if_event->event.role;
+ strlcpy(info.name, if_event->name, sizeof(info.name));
+ if (is_valid_ether_addr(if_event->mac)) {
+ mac_addr = if_event->mac;
+ } else {
+ mac_addr = NULL;
+ }
+
+#ifdef WLEASYMESH
+ if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
+ &info, mac_addr, if_event->name, true)) == NULL)
+#else
+ if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
+ &info, mac_addr, NULL, true) == NULL)
+#endif
+ {
+ /* Do the post interface create ops */
+ DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
+ ret = BCME_ERROR;
+ goto done;
+ }
+ }
+#else
+ /* This path is for non-android case */
+ /* The interface name in host and in event msg are same */
+ /* if name in event msg is used to create dongle if list on host */
+ ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+ if_event->mac, bssidx, TRUE, if_event->name);
+ if (!ndev) {
+ DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ goto done;
+ }
+
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+#ifndef PCIE_FULL_DONGLE
+ /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
+ if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
+ uint32 var_int = 1;
+ ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
+ NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+done:
+#ifdef DHD_AWDL
+ if (ret != BCME_OK && is_awdl_iface) {
+ dhd->pub.awdl_ifidx = 0;
+ }
+#endif /* DHD_AWDL */
+
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ if (dhd->pub.info->iflist[ifidx]) {
+ dhd_bridge_dev_set(dhd, ifidx, ndev);
+ }
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ int ifidx;
+ dhd_if_event_t *if_event = event_info;
+
+ if (event != DHD_WQ_WORK_IF_DEL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ ifidx = if_event->event.ifidx;
+ DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ if (dhd->pub.info->iflist[ifidx]) {
+ dhd_bridge_dev_set(dhd, ifidx, NULL);
+ }
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
+
+ if (!dhd->pub.info->iflist[ifidx]) {
+ /* No matching netdev found */
+ DHD_ERROR(("Netdev not found! Do nothing.\n"));
+ goto done;
+ }
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (if_event->event.ifidx > 0) {
+ /* Do the post interface del ops */
+ if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
+ true, if_event->event.ifidx) != 0) {
+ DHD_TRACE(("Post ifdel ops failed. Returning \n"));
+ goto done;
+ }
+ }
+#else
+ /* For non-cfg80211 drivers */
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+#ifdef DHD_AWDL
+ if (if_event->event.opcode == WLC_E_IF_DEL &&
+ if_event->event.role == WLC_E_IF_ROLE_AWDL) {
+ dhd->pub.awdl_ifidx = 0;
+ }
+#endif /* DHD_AWDL */
+
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+done:
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+#ifdef DHD_UPDATE_INTF_MAC
+static void
+dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ int ifidx;
+ dhd_if_event_t *if_event = event_info;
+
+ if (event != DHD_WQ_WORK_IF_UPDATE) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ ifidx = if_event->event.ifidx;
+ DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
+
+ dhd_op_if_update(&dhd->pub, ifidx);
+
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
+{
+ dhd_info_t * dhdinfo = NULL;
+ dhd_if_t * ifp = NULL;
+ int ret = 0;
+ char buf[128];
+
+ if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
+ DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
+ return -1;
+ } else {
+ dhdinfo = (dhd_info_t *)dhdpub->info;
+ ifp = dhdinfo->iflist[ifidx];
+ if (NULL==ifp) {
+ DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
+ return -2;
+ }
+ }
+
+ DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+ // Get MAC address
+ strcpy(buf, "cur_etheraddr");
+ ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
+ if (0>ret) {
+ DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
+ // avoid collision
+ dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
+ // force locally administrate address
+ ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
+ } else {
+ DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ ifp->name, ifp->idx,
+ (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
+ (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
+ memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
+ if (dhdinfo->iflist[ifp->idx]->net) {
+ memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
+ }
+ }
+
+ return ret;
+}
+#endif /* DHD_UPDATE_INTF_MAC */
+
+static void
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_if_t *ifp = event_info;
+
+ if (event != DHD_WQ_WORK_SET_MAC) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ // terence 20160907: fix for not able to set mac when wlan0 is down
+ if (ifp == NULL || !ifp->set_macaddress) {
+ goto done;
+ }
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
+ }
+
+ ifp->set_macaddress = FALSE;
+
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ rtnl_lock();
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+
+ if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr, TRUE) == 0)
+ DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
+ else
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ rtnl_unlock();
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+
+done:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ int ifidx = (int)((long int)event_info);
+ dhd_if_t *ifp = NULL;
+
+ if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ ifp = dhd->iflist[ifidx];
+
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
+ }
+
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
+ }
+
+ ifidx = ifp->idx;
+
+#ifdef MCAST_LIST_ACCUMULATION
+ ifidx = 0;
+#endif /* MCAST_LIST_ACCUMULATION */
+
+ _dhd_set_multicast_list(dhd, ifidx);
+ DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+ int ret = 0;
+
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+ dhd_if_t *dhdif;
+#ifdef WL_STATIC_IF
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* WL_STATIC_IF */
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ BCM_REFERENCE(ifidx);
+
+ DHD_TRACE(("%s \n", __func__));
+
+ dhdif = dhd_get_ifp_by_ndev(dhdp, dev);
+ if (!dhdif) {
+ return -ENODEV;
+ }
+ ifidx = dhdif->idx;
+ dhd_net_if_lock_local(dhd);
+ memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+ dhdif->set_macaddress = TRUE;
+ dhd_net_if_unlock_local(dhd);
+
+ WL_MSG(dev->name, "macaddr = %pM\n", dhdif->mac_addr);
+#ifdef WL_CFG80211
+ /* Check wdev->iftype for the role */
+ if (wl_cfg80211_macaddr_sync_reqd(dev)) {
+ /* Supplicant and certain user layer applications expect macaddress to be
+ * set once the context returns. so set it from the same context
+ */
+#ifdef WL_STATIC_IF
+ if (wl_cfg80211_static_if(cfg, dev) && !(dev->flags & IFF_UP)) {
+ /* In softap case, the macaddress will be applied before interface up
+ * and hence curether_addr can't be done at this stage (no fw iface
+ * available). Store the address and return. macaddr will be applied
+ * from interface create context.
+ */
+ (void)memcpy_s(dev->dev_addr, ETH_ALEN, dhdif->mac_addr, ETH_ALEN);
+#ifdef DHD_NOTIFY_MAC_CHANGED
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
+ dev_open(dev, NULL);
+#else
+ dev_open(dev);
+#endif
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+ return ret;
+ }
+#endif /* WL_STATIC_IF */
+ wl_cfg80211_handle_macaddr_change(dev, dhdif->mac_addr);
+ return _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr, TRUE);
+ }
+#endif /* WL_CFG80211 */
+
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
+ dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return;
+
+ dhd->iflist[ifidx]->set_multicast = TRUE;
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
+ DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
+
+ // terence 20160907: fix for not able to set mac when wlan0 is down
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
+ DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+}
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Get ucode path */
+char *
+dhd_get_ucode_path(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return dhd->uc_path;
+}
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+ ASSERT(di != NULL);
+ /* terence 20161229: don't do spin lock if proptx not enabled */
+ if (disable_proptx)
+ return 1;
+#ifdef BCMDBUS
+ spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
+#else
+ spin_lock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+ return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+ ASSERT(di != NULL);
+ /* terence 20161229: don't do spin lock if proptx not enabled */
+ if (disable_proptx)
+ return 1;
+#ifdef BCMDBUS
+ spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
+#else
+ spin_unlock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+ return 1;
+}
+
+#endif /* PROP_TXSTATUS */
+
+#if defined(WL_MONITOR) && defined(BCMSDIO)
+static void
+dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx);
+bool
+dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
+#endif /* WL_MONITOR && BCMSDIO */
+
+/* This routine do not support Packet chain feature, Currently tested for
+ * proxy arp feature
+ */
+int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
+{
+ struct sk_buff *skb;
+ void *skbhead = NULL;
+ void *skbprev = NULL;
+ dhd_if_t *ifp;
+ ASSERT(!PKTISCHAINED(p));
+ skb = PKTTONATIVE(dhdp->osh, p);
+
+ ifp = dhdp->info->iflist[ifidx];
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ netif_rx(skb);
+ } else {
+ if (dhdp->info->rxthread_enabled) {
+ if (!skbhead) {
+ skbhead = skb;
+ } else {
+ PKTSETNEXT(dhdp->osh, skbprev, skb);
+ }
+ skbprev = skb;
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+#if defined(WL_MONITOR) && defined(BCMSDIO)
+ if (dhd_monitor_enabled(dhdp, ifidx))
+ dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx);
+ else
+#endif /* WL_MONITOR && BCMSDIO */
+ netif_rx_ni(skb);
+ }
+ }
+
+ if (dhdp->info->rxthread_enabled && skbhead)
+ dhd_sched_rxf(dhdp, skbhead);
+
+ return BCME_OK;
+}
+
+void
+dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata, uint32 pktid,
+ uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake, bool pkt_log)
+{
+ struct ether_header *eh;
+ uint16 ether_type;
+ uint32 pkthash;
+ uint8 pkt_type = PKT_TYPE_DATA;
+
+ if (!pktdata || pktlen < ETHER_HDR_LEN) {
+ return;
+ }
+
+ eh = (struct ether_header *)pktdata;
+ ether_type = ntoh16(eh->ether_type);
+
+ /* Check packet type */
+ if (dhd_check_ip_prot(pktdata, ether_type)) {
+ if (dhd_check_dhcp(pktdata)) {
+ pkt_type = PKT_TYPE_DHCP;
+ } else if (dhd_check_icmp(pktdata)) {
+ pkt_type = PKT_TYPE_ICMP;
+ } else if (dhd_check_dns(pktdata)) {
+ pkt_type = PKT_TYPE_DNS;
+ }
+ }
+ else if (dhd_check_arp(pktdata, ether_type)) {
+ pkt_type = PKT_TYPE_ARP;
+ }
+ else if (ether_type == ETHER_TYPE_802_1X) {
+ pkt_type = PKT_TYPE_EAP;
+ }
+
+#ifdef DHD_SBN
+ /* Set UDR based on packet type */
+ if (dhd_udr && (pkt_type == PKT_TYPE_DHCP ||
+ pkt_type == PKT_TYPE_DNS ||
+ pkt_type == PKT_TYPE_ARP)) {
+ *dhd_udr = TRUE;
+ }
+#endif /* DHD_SBN */
+
+#ifdef DHD_PKT_LOGGING
+#ifdef DHD_SKIP_PKTLOGGING_FOR_DATA_PKTS
+ if (pkt_type != PKT_TYPE_DATA)
+#endif
+ {
+ if (pkt_log) {
+ if (tx) {
+ if (pktfate) {
+ /* Tx status */
+ DHD_PKTLOG_TXS(dhdp, pkt, pktdata, pktid, *pktfate);
+ } else {
+ /* Tx packet */
+ DHD_PKTLOG_TX(dhdp, pkt, pktdata, pktid);
+ }
+ pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ } else {
+ struct sk_buff *skb = (struct sk_buff *)pkt;
+ if (pkt_wake) {
+ DHD_PKTLOG_WAKERX(dhdp, skb, pktdata);
+ } else {
+ DHD_PKTLOG_RX(dhdp, skb, pktdata);
+ }
+ }
+ }
+ }
+#endif /* DHD_PKT_LOGGING */
+
+ /* Dump packet data */
+ if (!tx) {
+ switch (pkt_type) {
+ case PKT_TYPE_DHCP:
+ dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
+ break;
+ case PKT_TYPE_ICMP:
+ dhd_icmp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
+ break;
+ case PKT_TYPE_DNS:
+ dhd_dns_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
+ break;
+ case PKT_TYPE_ARP:
+ dhd_arp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
+ break;
+ case PKT_TYPE_EAP:
+ dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen, tx, &pkthash, pktfate);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+int
+BCMFASTPATH(__dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh = NULL;
+ uint8 pkt_flow_prio;
+
+#if (defined(DHD_L2_FILTER) || (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)))
+ dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
+#endif /* DHD_L2_FILTER || (BCM_ROUTER_DHD && QOS_MAP_SET) */
+
+ /* Reject if down */
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ /* free the packet here since the caller won't */
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+
+#ifdef PCIE_FULL_DONGLE
+ if (dhdp->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return NETDEV_TX_BUSY;
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+ /* Reject if pktlen > MAX_MTU_SZ */
+ if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
+ /* free the packet here since the caller won't */
+ dhdp->tx_big_packets++;
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+
+#ifdef DHD_L2_FILTER
+ /* if dhcp_unicast is enabled, we need to convert the */
+ /* broadcast DHCP ACK/REPLY packets to Unicast. */
+ if (ifp->dhcp_unicast) {
+ uint8* mac_addr;
+ uint8* ehptr = NULL;
+ int ret;
+ ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
+ if (ret == BCME_OK) {
+ /* if given mac address having valid entry in sta list
+ * copy the given mac address, and return with BCME_OK
+ */
+ if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
+ ehptr = PKTDATA(dhdp->osh, pktbuf);
+ bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ }
+ }
+ }
+
+ if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+ }
+
+ if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
+
+ /* Drop the packets if l2 filter has processed it already
+ * otherwise continue with the normal path
+ */
+ if (ret == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+ }
+#endif /* DHD_L2_FILTER */
+ /* Update multicast statistic */
+ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *)pktdata;
+
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ dhdp->tx_multicast++;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
+#ifdef DHD_LOSSLESS_ROAMING
+ uint8 prio = (uint8)PKTPRIO(pktbuf);
+
+ /* back up 802.1x's priority */
+ dhdp->prio_8021x = prio;
+#endif /* DHD_LOSSLESS_ROAMING */
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
+ atomic_inc(&dhd->pend_8021x_cnt);
+#if defined(WL_CFG80211) && defined (WL_WPS_SYNC)
+ wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
+ pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
+#endif /* WL_CFG80211 && WL_WPS_SYNC */
+#ifdef EAPOL_RESEND
+ wl_ext_backup_eapol_txpkt(dhdp, ifidx, pktbuf);
+#endif /* EAPOL_RESEND */
+ }
+ dhd_dump_pkt(dhdp, ifidx, pktdata,
+ (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ if (ifp->qosmap_up_table_enable) {
+ pktsetprio_qms(pktbuf, ifp->qosmap_up_table, FALSE);
+ }
+ else
+#endif
+ {
+ /* Look into the packet and update the packet priority */
+#ifndef PKTPRIO_OVERRIDE
+ /* XXX RB:6270 Ignore skb->priority from TCP/IP stack */
+ if (PKTPRIO(pktbuf) == 0)
+#endif /* !PKTPRIO_OVERRIDE */
+ {
+#if (!defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
+#else
+ /* For LLR, pkt prio will be changed to 7(NC) here */
+ pktsetprio(pktbuf, FALSE);
+#endif /* QOS_MAP_SET */
+ }
+#ifndef PKTPRIO_OVERRIDE
+ else {
+ /* Some protocols like OZMO use priority values from 256..263.
+ * these are magic values to indicate a specific 802.1d priority.
+ * make sure that priority field is in range of 0..7
+ */
+ PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
+ }
+#endif /* !PKTPRIO_OVERRIDE */
+ }
+
+#if defined(BCM_ROUTER_DHD)
+ traffic_mgmt_pkt_set_prio(dhdp, pktbuf);
+
+#endif /* BCM_ROUTER_DHD */
+
+ BCM_REFERENCE(pkt_flow_prio);
+ /* Intercept and create Socket level statistics */
+ /*
+ * TODO: Some how moving this code block above the pktsetprio code
+ * is resetting the priority back to 0, but this does not happen for
+ * packets generated from iperf uisng -S option. Can't understand why.
+ */
+ dhd_update_sock_flows(dhd, pktbuf);
+
+#ifdef SUPPORT_SET_TID
+ dhd_set_tid_based_on_uid(dhdp, pktbuf);
+#endif /* SUPPORT_SET_TID */
+
+#ifdef PCIE_FULL_DONGLE
+ /*
+ * Lkup the per interface hash table, for a matching flowring. If one is not
+ * available, allocate a unique flowid and add a flowring entry.
+ * The found or newly created flowid is placed into the pktbuf's tag.
+ */
+
+#ifdef DHD_TX_PROFILE
+ if (dhdp->tx_profile_enab && dhdp->num_profiles > 0 &&
+ dhd_protocol_matches_profile(PKTDATA(dhdp->osh, pktbuf),
+ PKTLEN(dhdp->osh, pktbuf), dhdp->protocol_filters,
+ dhdp->host_sfhllc_supported)) {
+ /* we only have support for one tx_profile at the moment */
+
+ /* tagged packets must be put into TID 6 */
+ pkt_flow_prio = PRIO_8021D_VO;
+ } else
+#endif /* defined(DHD_TX_PROFILE) */
+ {
+ pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
+ }
+
+ ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
+ if (ret != BCME_OK) {
+ PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
+ return ret;
+ }
+#endif /* PCIE_FULL_DONGLE */
+ /* terence 20150901: Micky add to ajust the 802.1X priority */
+ /* Set the 802.1X packet with the highest priority 7 */
+ if (dhdp->conf->pktprio8021x >= 0)
+ pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
+
+#ifdef PROP_TXSTATUS
+ if (dhd_wlfc_is_supported(dhdp)) {
+ /* store the interface ID */
+ DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+ /* store destination MAC in the tag as well */
+ DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+ /* decide which FIFO this packet belongs to */
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ /* one additional queue index (highest AC + 1) is used for bc/mc queue */
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+ else
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+ } else
+#endif /* PROP_TXSTATUS */
+ {
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+ }
+
+ /* Use bus module to send data frame */
+#ifdef PROP_TXSTATUS
+ {
+ if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+ dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+ /* non-proptxstatus way */
+#ifdef BCMPCIE
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+ }
+ }
+#else
+#ifdef BCMPCIE
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+#endif /* PROP_TXSTATUS */
+#ifdef BCMDBUS
+ if (ret)
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+#endif /* BCMDBUS */
+
+ return ret;
+}
+
+int
+BCMFASTPATH(dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret = 0;
+ unsigned long flags;
+ dhd_if_t *ifp;
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ ifp = dhd_get_ifp(dhdp, ifidx);
+ if (!ifp || ifp->del_in_progress) {
+ DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
+ __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhdp->busstate));
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+ DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
+ DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
+ DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ ret = -EBUSY;
+ goto exit;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+ DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
+ DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
+ dhd_os_tx_completion_wake(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+exit:
+#endif
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
+ DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
+ dhd_os_tx_completion_wake(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ return ret;
+}
+
+#ifdef DHD_MQ
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+static uint16
+BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+#else
+static uint16
+BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb)
+#endif /* LINUX_VERSION_CODE */
+{
+ dhd_info_t *dhd_info = DHD_DEV_INFO(net);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ uint16 prio = 0;
+
+ BCM_REFERENCE(dhd_info);
+ BCM_REFERENCE(dhdp);
+ BCM_REFERENCE(prio);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if (mq_select_disable) {
+ /* if driver side queue selection is disabled via sysfs, call the kernel
+ * supplied fallback function to select the queue, which is usually
+ * '__netdev_pick_tx()' in net/core/dev.c
+ */
+ return fallback(net, skb);
+ }
+#endif /* LINUX_VERSION */
+
+ prio = dhdp->flow_prio_map[skb->priority];
+ if (prio < AC_COUNT)
+ return prio;
+ else
+ return AC_BK;
+}
+#endif /* DHD_MQ */
+
+netdev_tx_t
+BCMFASTPATH(dhd_start_xmit)(struct sk_buff *skb, struct net_device *net)
+{
+ int ret;
+ uint datalen;
+ void *pktbuf;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp = NULL;
+ int ifidx;
+ unsigned long flags;
+#if !defined(BCM_ROUTER_DHD)
+ uint8 htsfdlystat_sz = 0;
+#endif /* ! BCM_ROUTER_DHD */
+#ifdef DHD_WMF
+ struct ether_header *eh;
+ uint8 *iph;
+#endif /* DHD_WMF */
+#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
+ int qidx = 0;
+ int cpuid = 0;
+ int prio = 0;
+#endif /* DHD_MQ && DHD_MQ_STATS */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
+ qidx = skb_get_queue_mapping(skb);
+ /* if in a non pre-emptable context, smp_processor_id can be used
+ * else get_cpu and put_cpu should be used
+ */
+ if (!CAN_SLEEP()) {
+ cpuid = smp_processor_id();
+ }
+ else {
+ cpuid = get_cpu();
+ put_cpu();
+ }
+ prio = dhd->pub.flow_prio_map[skb->priority];
+ DHD_TRACE(("%s: Q idx = %d, CPU = %d, prio = %d \n", __FUNCTION__,
+ qidx, cpuid, prio));
+ dhd->pktcnt_qac_histo[qidx][prio]++;
+ dhd->pktcnt_per_ac[prio]++;
+ dhd->cpu_qstats[qidx][cpuid]++;
+#endif /* DHD_MQ && DHD_MQ_STATS */
+
+ if (dhd_query_bus_erros(&dhd->pub)) {
+ return -ENODEV;
+ }
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
+ /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
+ /* stop the network queue temporarily until resume done */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ if (!dhdpcie_is_resume_done(&dhd->pub)) {
+ dhd_bus_stop_queue(dhd->pub.bus);
+ }
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ return NETDEV_TX_BUSY;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+#ifdef BCMPCIE
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+#ifdef PCIE_FULL_DONGLE
+ /* Stop tx queues if suspend is in progress */
+ if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+ dhd_bus_stop_queue(dhd->pub.bus);
+ }
+#endif /* PCIE_FULL_DONGLE */
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ return NETDEV_TX_BUSY;
+ }
+#else
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
+ }
+#endif
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ dhd->pub.busstate = DHD_BUS_DOWN;
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+ /* Reject if down */
+ /* XXX kernel panic issue when first bootup time,
+ * rmmod without interface down make unnecessary hang event.
+ */
+ if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
+ DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+ __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+ dhd_tx_stop_queues(net);
+#if defined(OEM_ANDROID)
+ /* Send Event when bus down detected during data session */
+ if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
+ DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+ dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
+ net_os_send_hang_message(net);
+ }
+#endif /* OEM_ANDROID */
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return NETDEV_TX_BUSY;
+ }
+
+ ifp = DHD_DEV_IFP(net);
+ ifidx = DHD_DEV_IFIDX(net);
+#ifdef DHD_BUZZZ_LOG_ENABLED
+ BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+ dhd_tx_stop_queues(net);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return NETDEV_TX_BUSY;
+ }
+
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+ /* If tput test is in progress */
+ if (dhd->pub.tput_data.tput_test_running) {
+ return NETDEV_TX_BUSY;
+ }
+
+ ASSERT(ifidx == dhd_net2idx(dhd, net));
+ ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
+
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
+
+ /* re-align socket buffer if "skb->data" is odd address */
+ if (((unsigned long)(skb->data)) & 0x1) {
+ unsigned char *data = skb->data;
+ uint32 length = skb->len;
+ PKTPUSH(dhd->pub.osh, skb, 1);
+ memmove(skb->data, data, length);
+ PKTSETLEN(dhd->pub.osh, skb, length);
+ }
+
+ datalen = PKTLEN(dhd->pub.osh, skb);
+
+#ifdef TPUT_MONITOR
+ if (dhd->pub.conf->tput_monitor_ms) {
+ dhd_os_sdlock_txq(&dhd->pub);
+ dhd->pub.conf->net_len += datalen;
+ dhd_os_sdunlock_txq(&dhd->pub);
+ if ((dhd->pub.conf->data_drop_mode == XMIT_DROP) &&
+ (PKTLEN(dhd->pub.osh, skb) > 500)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+ /* Make sure there's enough room for any header */
+#if !defined(BCM_ROUTER_DHD)
+ if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+ struct sk_buff *skb2;
+
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dhd->pub.tx_realloc++;
+
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
+ skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+ dev_kfree_skb(skb);
+ if ((skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ ret = -ENOMEM;
+ goto done;
+ }
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
+ }
+#endif /* !BCM_ROUTER_DHD */
+
+ /* move from dhdsdio_sendfromq(), try to orphan skb early */
+ if (dhd->pub.conf->orphan_move == 2)
+ PKTORPHAN(skb, dhd->pub.conf->tsq);
+ else if (dhd->pub.conf->orphan_move == 3)
+ skb_orphan(skb);
+
+ /* Convert to packet */
+ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+ DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+#ifdef DHD_WET
+ /* wet related packet proto manipulation should be done in DHD
+ since dongle doesn't have complete payload
+ */
+ if (WET_ENABLED(&dhd->pub) &&
+ (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
+ DHD_INFO(("%s:%s: wet send proc failed\n",
+ __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
+ PKTFREE(dhd->pub.osh, pktbuf, FALSE);
+ ret = -EFAULT;
+ goto done;
+ }
+#endif /* DHD_WET */
+
+#ifdef DHD_WMF
+ eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
+ iph = (uint8 *)eh + ETHER_HDR_LEN;
+
+ /* WMF processing for multicast packets
+ * Only IPv4 packets are handled
+ */
+ if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
+ (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
+ ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+ void *sdu_clone;
+ bool ucast_convert = FALSE;
+#ifdef DHD_UCAST_UPNP
+ uint32 dest_ip;
+
+ dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+ ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
+#endif /* DHD_UCAST_UPNP */
+#ifdef DHD_IGMP_UCQUERY
+ ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
+ (IPV4_PROT(iph) == IP_PROT_IGMP) &&
+ (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
+#endif /* DHD_IGMP_UCQUERY */
+ if (ucast_convert) {
+ dhd_sta_t *sta;
+ unsigned long flags;
+ struct list_head snapshot_list;
+ struct list_head *wmf_ucforward_list;
+
+ ret = NETDEV_TX_OK;
+
+ /* For non BCM_GMAC3 platform we need a snapshot sta_list to
+ * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
+ */
+ wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ /* Convert upnp/igmp query to unicast for each assoc STA */
+ list_for_each_entry(sta, wmf_ucforward_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ /* Skip sending to proxy interfaces of proxySTA */
+ if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) {
+ continue;
+ }
+ if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
+ ret = WMF_NOP;
+ break;
+ }
+ dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
+ }
+ DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ if (ret == NETDEV_TX_OK)
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+
+ return ret;
+ } else
+#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
+ {
+ /* There will be no STA info if the packet is coming from LAN host
+ * Pass as NULL
+ */
+ ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
+ switch (ret) {
+ case WMF_TAKEN:
+ case WMF_DROP:
+ /* Either taken by WMF or we should drop it.
+ * Exiting send path
+ */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return NETDEV_TX_OK;
+ default:
+ /* Continue the transmit path */
+ break;
+ }
+ }
+ }
+#endif /* DHD_WMF */
+#ifdef DHD_PSTA
+ /* PSR related packet proto manipulation should be done in DHD
+ * since dongle doesn't have complete payload
+ */
+ if (PSR_ENABLED(&dhd->pub) &&
+#ifdef BCM_ROUTER_DHD
+ !(ifp->primsta_dwds) &&
+#endif /* BCM_ROUTER_DHD */
+ (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
+
+ DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
+ dhd_ifname(&dhd->pub, ifidx)));
+ }
+#endif /* DHD_PSTA */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_PACING_SHIFT)
+#ifndef DHD_DEFAULT_TCP_PACING_SHIFT
+#define DHD_DEFAULT_TCP_PACING_SHIFT 7
+#endif /* DHD_DEFAULT_TCP_PACING_SHIFT */
+ if (skb->sk) {
+ sk_pacing_shift_update(skb->sk, DHD_DEFAULT_TCP_PACING_SHIFT);
+ }
+#endif /* LINUX_VERSION_CODE >= 4.19.0 && DHD_TCP_PACING_SHIFT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
+ ifp->tsyncack_txed ++;
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ /* If this packet has been hold or got freed, just return */
+ if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
+ ret = 0;
+ goto done;
+ }
+ } else {
+ /* If this packet has replaced another packet and got freed, just return */
+ if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
+ ret = 0;
+ goto done;
+ }
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+
+ /*
+ * If Load Balance is enabled queue the packet
+ * else send directly from here.
+ */
+#if defined(DHD_LB_TXP)
+ ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
+#else
+ ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+#endif
+
+done:
+ /* XXX Bus modules may have different "native" error spaces? */
+ /* XXX USB is native linux and it'd be nice to retain errno */
+ /* XXX meaning, but SDIO is not so we'd need an OSL_ERROR. */
+ if (ret) {
+ ifp->stats.tx_dropped++;
+ dhd->pub.tx_dropped++;
+ } else {
+#ifdef PROP_TXSTATUS
+ /* tx_packets counter can counted only when wlfc is disabled */
+ if (!dhd_wlfc_is_supported(&dhd->pub))
+#endif
+ {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
+ }
+ dhd->pub.actual_tx_pkts++;
+ }
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
+ dhd_os_tx_completion_wake(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#ifdef DHD_BUZZZ_LOG_ENABLED
+ BUZZZ_LOG(START_XMIT_END, 0);
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+ /* Return ok: we always eat the packet */
+ return NETDEV_TX_OK;
+}
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+void dhd_rx_wq_wakeup(struct work_struct *ptr)
+{
+ struct dhd_rx_tx_work *work;
+ struct dhd_pub * pub;
+
+ work = container_of(ptr, struct dhd_rx_tx_work, work);
+
+ pub = work->pub;
+
+ DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
+
+ if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
+ return;
+ }
+
+ DHD_OS_WAKE_LOCK(pub);
+ if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
+
+ // do nothing but wakeup the bus.
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
+ }
+ DHD_OS_WAKE_UNLOCK(pub);
+ kfree(work);
+}
+
+void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
+{
+ struct dhd_rx_tx_work *work;
+ netdev_tx_t ret;
+ dhd_info_t *dhd;
+ struct dhd_bus * bus;
+
+ work = container_of(ptr, struct dhd_rx_tx_work, work);
+
+ dhd = DHD_DEV_INFO(work->net);
+
+ bus = dhd->pub.bus;
+
+ if (atomic_read(&dhd->pub.block_bus)) {
+ kfree_skb(work->skb);
+ kfree(work);
+ dhd_netif_start_queue(bus);
+ return;
+ }
+
+ if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
+ ret = dhd_start_xmit(work->skb, work->net);
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
+ }
+ kfree(work);
+ dhd_netif_start_queue(bus);
+
+ if (ret)
+ netdev_err(work->net,
+ "error: dhd_start_xmit():%d\n", ret);
+}
+
+netdev_tx_t
+BCMFASTPATH(dhd_start_xmit_wrapper)(struct sk_buff *skb, struct net_device *net)
+{
+ struct dhd_rx_tx_work *start_xmit_work;
+ netdev_tx_t ret;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+ if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
+ DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
+
+ dhd_netif_stop_queue(dhd->pub.bus);
+
+ start_xmit_work = (struct dhd_rx_tx_work*)
+ kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
+
+ if (!start_xmit_work) {
+ netdev_err(net,
+ "error: failed to alloc start_xmit_work\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
+ start_xmit_work->skb = skb;
+ start_xmit_work->net = net;
+ queue_work(dhd->tx_wq, &start_xmit_work->work);
+ ret = NET_XMIT_SUCCESS;
+
+ } else if (dhd->pub.busstate == DHD_BUS_DATA) {
+ ret = dhd_start_xmit(skb, net);
+ } else {
+ /* when bus is down */
+ ret = -ENODEV;
+ }
+
+exit:
+ return ret;
+}
+void
+dhd_bus_wakeup_work(dhd_pub_t *dhdp)
+{
+ struct dhd_rx_tx_work *rx_work;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
+ if (!rx_work) {
+ DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
+ return;
+ }
+
+ INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
+ rx_work->pub = dhdp;
+ queue_work(dhd->rx_wq, &rx_work->work);
+
+}
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+static void
+__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
+{
+ if (state == ON) {
+ if (!netif_queue_stopped(net)) {
+ DHD_INFO(("%s: Stop Netif Queue\n", __FUNCTION__));
+ netif_stop_queue(net);
+ } else {
+ DHD_INFO(("%s: Netif Queue already stopped\n", __FUNCTION__));
+ }
+ }
+
+ if (state == OFF) {
+ if (netif_queue_stopped(net)) {
+ DHD_INFO(("%s: Start Netif Queue\n", __FUNCTION__));
+ netif_wake_queue(net);
+ } else {
+ DHD_INFO(("%s: Netif Queue already started\n", __FUNCTION__));
+ }
+ }
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+ struct net_device *net;
+ dhd_info_t *dhd = dhdp->info;
+ unsigned long flags;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(dhd);
+
+#ifdef DHD_LOSSLESS_ROAMING
+ /* block flowcontrol during roaming */
+ if ((dhdp->dequeue_prec_map == (1 << dhdp->flow_prio_map[PRIO_8021D_NC])) && (state == ON))
+ {
+ DHD_ERROR_RLMT(("%s: Roaming in progress, cannot stop network queue (0x%x:%d)\n",
+ __FUNCTION__, dhdp->dequeue_prec_map, dhdp->flow_prio_map[PRIO_8021D_NC]));
+ return;
+ }
+#endif
+
+ flags = dhd_os_sdlock_txoff(&dhd->pub);
+ if (ifidx == ALL_INTERFACES) {
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ net = dhd->iflist[i]->net;
+ __dhd_txflowcontrol(dhdp, net, state);
+ }
+ }
+ } else {
+ if (dhd->iflist[ifidx]) {
+ net = dhd->iflist[ifidx]->net;
+ __dhd_txflowcontrol(dhdp, net, state);
+ }
+ }
+ dhdp->txoff = state;
+ dhd_os_sdunlock_txoff(&dhd->pub, flags);
+}
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+
+/* Dump CTF stats */
+void
+dhd_ctf_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ bcm_bprintf(strbuf, "CTF stats:\n");
+ ctf_dump(dhd->cih, strbuf);
+}
+
+bool
+BCMFASTPATH(dhd_rx_pkt_chainable)(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp = dhd->iflist[ifidx];
+
+ return ifp->rx_pkt_chainable;
+}
+
+/* Returns FALSE if block ping is enabled */
+bool
+BCMFASTPATH(dhd_l2_filter_chainable)(dhd_pub_t *dhdp, uint8 *eh, int ifidx)
+{
+#ifdef DHD_L2_FILTER
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp = dhd->iflist[ifidx];
+ ASSERT(ifp != NULL);
+ return ifp->block_ping ? FALSE : TRUE;
+#else
+ return TRUE;
+#endif /* DHD_L2_FILTER */
+}
+/* Returns FALSE if WET is enabled */
+bool
+BCMFASTPATH(dhd_wet_chainable)(dhd_pub_t *dhdp)
+{
+#ifdef DHD_WET
+ return (!WET_ENABLED(dhdp));
+#else
+ return TRUE;
+#endif
+}
+
+/* Returns TRUE if hot bridge entry for this da is present */
+bool
+BCMFASTPATH(dhd_ctf_hotbrc_check)(dhd_pub_t *dhdp, uint8 *eh, int ifidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp = dhd->iflist[ifidx];
+
+ ASSERT(ifp != NULL);
+
+ if (!dhd->brc_hot)
+ return FALSE;
+
+ return CTF_HOTBRC_CMP(dhd->brc_hot, (eh), (void *)(ifp->net));
+}
+
+/*
+ * Try to forward the complete packet chain through CTF.
+ * If unsuccessful,
+ * - link the chain by skb->next
+ * - change the pnext to the 2nd packet of the chain
+ * - the chained packets will be sent up to the n/w stack
+ */
+static inline int32
+BCMFASTPATH(dhd_ctf_forward)(dhd_info_t *dhd, struct sk_buff *skb, void **pnext)
+{
+ dhd_pub_t *dhdp = &dhd->pub;
+ void *p, *n;
+ void *old_pnext;
+
+ /* try cut thru first */
+ if (!CTF_ENAB(dhd->cih) || (ctf_forward(dhd->cih, skb, skb->dev) == BCME_ERROR)) {
+ /* Fall back to slow path if ctf is disabled or if ctf_forward fails */
+
+ /* clear skipct flag before sending up */
+ PKTCLRSKIPCT(dhdp->osh, skb);
+
+#ifdef CTFPOOL
+ /* allocate and add a new skb to the pkt pool */
+ if (PKTISFAST(dhdp->osh, skb))
+ osl_ctfpool_add(dhdp->osh);
+
+ /* clear fast buf flag before sending up */
+ PKTCLRFAST(dhdp->osh, skb);
+
+ /* re-init the hijacked field */
+ CTFPOOLPTR(dhdp->osh, skb) = NULL;
+#endif /* CTFPOOL */
+
+ /* link the chained packets by skb->next */
+ if (PKTISCHAINED(skb)) {
+ old_pnext = *pnext;
+ PKTFRMNATIVE(dhdp->osh, skb);
+ p = (void *)skb;
+ FOREACH_CHAINED_PKT(p, n) {
+ PKTCLRCHAINED(dhdp->osh, p);
+ PKTCCLRFLAGS(p);
+ if (p == (void *)skb)
+ PKTTONATIVE(dhdp->osh, p);
+ if (n)
+ PKTSETNEXT(dhdp->osh, p, n);
+ else
+ PKTSETNEXT(dhdp->osh, p, old_pnext);
+ }
+ *pnext = PKTNEXT(dhdp->osh, skb);
+ PKTSETNEXT(dhdp->osh, skb, NULL);
+ }
+ return (BCME_ERROR);
+ }
+
+ return (BCME_OK);
+}
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#ifdef DHD_WMF
+bool
+dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ return dhd->rxthread_enabled;
+}
+#endif /* DHD_WMF */
+
+#ifdef DHD_MCAST_REGEN
+/*
+ * Description: This function is called to do the reverse translation
+ *
+ * Input eh - pointer to the ethernet header
+ */
+int32
+dhd_mcast_reverse_translation(struct ether_header *eh)
+{
+ uint8 *iph;
+ uint32 dest_ip;
+
+ iph = (uint8 *)eh + ETHER_HDR_LEN;
+ dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+
+ /* Only IP packets are handled */
+ if (eh->ether_type != hton16(ETHER_TYPE_IP))
+ return BCME_ERROR;
+
+ /* Non-IPv4 multicast packets are not handled */
+ if (IP_VER(iph) != IP_VER_4)
+ return BCME_ERROR;
+
+ /*
+ * The packet has a multicast IP and unicast MAC. That means
+ * we have to do the reverse translation
+ */
+ if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
+ ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+#endif /* MCAST_REGEN */
+
+void
+dhd_dpc_tasklet_dispatcher_work(struct work_struct * work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct dhd_info *dhd;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(dw, struct dhd_info, dhd_dpc_dispatcher_work);
+ GCC_DIAGNOSTIC_POP();
+
+ DHD_INFO(("%s:\n", __FUNCTION__));
+
+ tasklet_schedule(&dhd->tasklet);
+}
+
+void
+dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int dpc_cpu = atomic_read(&dhd->dpc_cpu);
+ DHD_INFO(("%s:\n", __FUNCTION__));
+
+ /* scheduler will take care of scheduling to appropriate cpu if dpc_cpu is not online */
+ schedule_delayed_work_on(dpc_cpu, &dhd->dhd_dpc_dispatcher_work, delay);
+
+ return;
+}
+
+#ifdef SHOW_LOGTRACE
+static void
+dhd_netif_rx_ni(struct sk_buff * skb)
+{
+ /* Do not call netif_recieve_skb as this workqueue scheduler is
+ * not from NAPI Also as we are not in INTR context, do not call
+ * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
+ * does netif_rx, disables irq, raise NET_IF_RX softirq and
+ * enables interrupts back
+ */
+ netif_rx_ni(skb);
+}
+
+static int
+dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int ret = BCME_OK;
+ uint datalen;
+ bcm_event_msg_u_t evu;
+ void *data = NULL;
+ void *pktdata = NULL;
+ bcm_event_t *pvt_data;
+ uint pktlen;
+
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+ /* In dhd_rx_frame, header is stripped using skb_pull
+ * of size ETH_HLEN, so adjust pktlen accordingly
+ */
+ pktlen = skb->len + ETH_HLEN;
+
+ pktdata = (void *)skb_mac_header(skb);
+ ret = wl_host_event_get_data(pktdata, pktlen, &evu);
+
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+ __FUNCTION__, ret));
+ goto exit;
+ }
+
+ datalen = ntoh32(evu.event.datalen);
+
+ pvt_data = (bcm_event_t *)pktdata;
+ data = &pvt_data[1];
+
+ dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
+
+exit:
+ return ret;
+}
+
+/*
+ * dhd_event_logtrace_process_items processes
+ * each skb from evt_trace_queue.
+ * Returns TRUE if more packets to be processed
+ * else returns FALSE
+ */
+
+static int
+dhd_event_logtrace_process_items(dhd_info_t *dhd)
+{
+ dhd_pub_t *dhdp;
+ struct sk_buff *skb;
+ uint32 qlen;
+ uint32 process_len;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return 0;
+ }
+
+ dhdp = &dhd->pub;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
+ return 0;
+ }
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ /* Check if there is any update in the firmware trace buffer */
+ process_fw_trace_data(dhdp);
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+ qlen = skb_queue_len(&dhd->evt_trace_queue);
+ process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
+
+ /* Run while loop till bound is reached or skb queue is empty */
+ while (process_len--) {
+ int ifid = 0;
+ skb = skb_dequeue(&dhd->evt_trace_queue);
+ if (skb == NULL) {
+ DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
+ __FUNCTION__));
+ break;
+ }
+ BCM_REFERENCE(ifid);
+#ifdef PCIE_FULL_DONGLE
+ /* Check if pkt is from INFO ring or WLC_E_TRACE */
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+ if (ifid == DHD_DUMMY_INFO_IF) {
+ /* Process logtrace from info rings */
+ dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
+ } else
+#endif /* PCIE_FULL_DONGLE */
+ {
+ /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
+ dhd_event_logtrace_pkt_process(dhdp, skb);
+ }
+
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
+
+ /* Send packet up if logtrace_pkt_sendup is TRUE */
+ if (dhdp->logtrace_pkt_sendup) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ /* If bufs are allocated via static buf pool
+ * and logtrace_pkt_sendup enabled, make a copy,
+ * free the local one and send the copy up.
+ */
+ void *npkt = PKTDUP(dhdp->osh, skb);
+ /* Clone event and send it up */
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+ if (npkt) {
+ skb = npkt;
+ } else {
+ DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
+ /* Packet is already freed, go to next packet */
+ continue;
+ }
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#ifdef PCIE_FULL_DONGLE
+ /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
+ * to send skb to network layer, assign skb->dev with
+ * Primary interface n/w device
+ */
+ if (ifid == DHD_DUMMY_INFO_IF) {
+ skb = PKTTONATIVE(dhdp->osh, skb);
+ skb->dev = dhd->iflist[0]->net;
+ }
+#endif /* PCIE_FULL_DONGLE */
+ /* Send pkt UP */
+ dhd_netif_rx_ni(skb);
+ } else {
+ /* Don't send up. Free up the packet. */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+ PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ }
+ }
+
+ /* Reschedule if more packets to be processed */
+ return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
+}
+
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+static int
+dhd_logtrace_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
+ int ret;
+
+ while (1) {
+ dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
+ if (!binary_sema_down(tsk)) {
+ dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
+ SMP_RD_BARRIER_DEPENDS();
+ if (dhd->pub.dongle_reset == FALSE) {
+ do {
+ /* Check terminated before processing the items */
+ if (tsk->terminated) {
+ DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
+ goto exit;
+ }
+#ifdef EWP_EDL
+ /* check if EDL is being used */
+ if (dhd->pub.dongle_edl_support) {
+ ret = dhd_prot_process_edl_complete(&dhd->pub,
+ &dhd->event_data);
+ } else {
+ ret = dhd_event_logtrace_process_items(dhd);
+ }
+#else
+ ret = dhd_event_logtrace_process_items(dhd);
+#endif /* EWP_EDL */
+ /* if ret > 0, bound has reached so to be fair to other
+ * processes need to yield the scheduler.
+ * The comment above yield()'s definition says:
+ * If you want to use yield() to wait for something,
+ * use wait_event().
+ * If you want to use yield() to be 'nice' for others,
+ * use cond_resched().
+ * If you still want to use yield(), do not!
+ */
+ if (ret > 0) {
+ cond_resched();
+ OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
+ } else if (ret < 0) {
+ DHD_ERROR(("%s: ERROR should not reach here\n",
+ __FUNCTION__));
+ }
+ } while (ret > 0);
+ }
+ if (tsk->flush_ind) {
+ DHD_ERROR(("%s: flushed\n", __FUNCTION__));
+ dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
+ tsk->flush_ind = 0;
+ complete(&tsk->flushed);
+ }
+ } else {
+ DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
+ dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
+ break;
+ }
+ }
+exit:
+ complete_and_exit(&tsk->completed, 0);
+ dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
+}
+#else
+static void
+dhd_event_logtrace_process(struct work_struct * work)
+{
+/* Ignore compiler warnings due to -Werror=cast-qual */
+ struct delayed_work *dw = to_delayed_work(work);
+ struct dhd_info *dhd;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(dw, struct dhd_info, event_log_dispatcher_work);
+ GCC_DIAGNOSTIC_POP();
+
+#ifdef EWP_EDL
+ if (dhd->pub.dongle_edl_support) {
+ ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
+ } else {
+ ret = dhd_event_logtrace_process_items(dhd);
+ }
+#else
+ ret = dhd_event_logtrace_process_items(dhd);
+#endif /* EWP_EDL */
+
+ if (ret > 0) {
+ schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
+ msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
+ }
+ return;
+}
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+
+void
+dhd_schedule_logtrace(void *dhd_info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhd_info;
+
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
+ binary_sema_up(&dhd->thr_logtrace_ctl);
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+ return;
+}
+
+void
+dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
+ PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+}
+
+void
+dhd_flush_logtrace_process(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
+ PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ flush_delayed_work(&dhd->event_log_dispatcher_work);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+}
+
+int
+dhd_init_logtrace_process(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
+ PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
+ if (dhd->thr_logtrace_ctl.thr_pid < 0) {
+ DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+#else
+ INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+ return BCME_OK;
+}
+
+int
+dhd_reinit_logtrace_process(dhd_info_t *dhd)
+{
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ /* Re-init only if PROC_STOP from dhd_stop was called
+ * which can be checked via thr_pid
+ */
+ if (dhd->thr_logtrace_ctl.thr_pid < 0) {
+ PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
+ 0, "dhd_logtrace_thread");
+ if (dhd->thr_logtrace_ctl.thr_pid < 0) {
+ DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
+ dhd->thr_logtrace_ctl.thr_pid));
+ }
+ }
+#else
+ /* No need to re-init for WQ as calcel_delayed_work_sync will
+ * will not delete the WQ
+ */
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+ return BCME_OK;
+}
+
+void
+dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+#ifdef PCIE_FULL_DONGLE
+ /* Add ifidx in the PKTTAG */
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
+#endif /* PCIE_FULL_DONGLE */
+ skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
+
+ dhd_schedule_logtrace(dhd);
+}
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+void
+dhd_event_logtrace_enqueue_fwtrace(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
+
+ /* Schedule a kernel thread */
+ dhd_schedule_logtrace(dhd);
+
+ return;
+}
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+void
+dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+ PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ }
+}
+
+#ifdef EWP_EDL
+void
+dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
+{
+ struct sk_buff *skb = NULL;
+ uint32 pktsize = 0;
+ void *pkt = NULL;
+ info_buf_payload_hdr_t *infobuf = NULL;
+ dhd_info_t *dhd = dhdp->info;
+ uint8 *pktdata = NULL;
+
+ if (!msg)
+ return;
+
+ /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
+ infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
+ pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
+ sizeof(uint32));
+ pkt = PKTGET(dhdp->osh, pktsize, FALSE);
+ if (!pkt) {
+ DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
+ } else {
+ PKTSETLEN(dhdp->osh, pkt, pktsize);
+ pktdata = PKTDATA(dhdp->osh, pkt);
+ memcpy(pktdata, msg, pktsize);
+ /* For infobuf packets assign skb->dev with
+ * Primary interface n/w device
+ */
+ skb = PKTTONATIVE(dhdp->osh, pkt);
+ skb->dev = dhd->iflist[0]->net;
+ /* Send pkt UP */
+ dhd_netif_rx_ni(skb);
+ }
+}
+#endif /* EWP_EDL */
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+static void
+dhd_bt_log_process(struct work_struct *work)
+{
+ struct dhd_info *dhd;
+ dhd_pub_t *dhdp;
+ struct sk_buff *skb;
+
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(work, struct dhd_info, bt_log_dispatcher_work);
+ GCC_DIAGNOSTIC_POP();
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ dhdp = &dhd->pub;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+ /* Run while(1) loop till all skbs are dequeued */
+ while ((skb = skb_dequeue(&dhd->bt_log_queue)) != NULL) {
+ dhd_bt_log_pkt_process(dhdp, skb);
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+ PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ }
+}
+
+void
+dhd_rx_bt_log(dhd_pub_t *dhdp, void *pkt)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ skb_queue_tail(&dhd->bt_log_queue, pkt);
+
+ /* schedule workqueue to process bt logs */
+ schedule_work(&dhd->bt_log_dispatcher_work);
+}
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+static void
+dhd_edl_process_work(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct dhd_info *dhd_info;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd_info = container_of(dw, struct dhd_info, edl_dispatcher_work);
+ GCC_DIAGNOSTIC_POP();
+
+ if (dhd_info)
+ dhd_prot_process_edl_complete(&dhd_info->pub, &dhd_info->event_data);
+}
+
+void
+dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ schedule_delayed_work(&dhd->edl_dispatcher_work, msecs_to_jiffies(delay_ms));
+}
+#endif /* EWP_EDL */
+
+#ifdef WL_NANHO
+/* forward NAN event to NANHO host module. API returns TRUE if event is consumed by NANHO */
+static bool
+dhd_nho_evt_process(dhd_pub_t *pub, int ifidx, wl_event_msg_t *evt_msg,
+ void *pktdata, uint16 pktlen)
+{
+ uint32 evt_type = ntoh32_ua(&evt_msg->event_type);
+ bool consumed = FALSE;
+
+ if ((evt_type == WLC_E_NAN_CRITICAL) || (evt_type == WLC_E_NAN_NON_CRITICAL)) {
+ bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+ uint32 event_len = sizeof(wl_event_msg_t) + ntoh32_ua(&evt_msg->datalen);
+
+ bcm_nanho_evt(pub->nanhoi, &pvt_data->event, event_len, &consumed);
+ }
+ return consumed;
+}
+
+static int
+dhd_nho_evt_cb(void *drv_ctx, int ifidx, bcm_event_t *evt, uint16 evt_len)
+{
+ struct sk_buff *p, *skb;
+ dhd_if_t *ifp;
+ dhd_pub_t *dhdp = (dhd_pub_t *)drv_ctx;
+
+ if ((p = PKTGET(dhdp->osh, evt_len, FALSE))) {
+ memcpy(PKTDATA(dhdp->osh, p), (uint8 *)evt, evt_len);
+ skb = PKTTONATIVE(dhdp->osh, p);
+
+ ifp = dhdp->info->iflist[ifidx];
+ if (ifp == NULL) {
+ /* default to main interface */
+ ifp = dhdp->info->iflist[0];
+ }
+ ASSERT(ifp);
+
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ /* strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ } else {
+ DHD_ERROR(("NHO: dhd_nho_evt_cb: unable to alloc sk_buf"));
+ return BCME_NOMEM;
+ }
+
+ return BCME_OK;
+}
+#endif /* WL_NANHO */
+
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+static void
+update_wake_pkt_info(struct sk_buff *skb)
+{
+ struct iphdr *ip_header;
+ struct ipv6hdr *ipv6_header;
+ struct udphdr *udp_header;
+ struct tcphdr *tcp_header;
+ uint16 dport = 0;
+
+ ip_header = (struct iphdr *)(skb->data);
+
+ temp_raw |= ((long long)ntoh16(skb->protocol)) << 48;
+
+ DHD_INFO(("eth_hdr(skb)->h_dest : %pM\n", eth_hdr(skb)->h_dest));
+ if (eth_hdr(skb)->h_dest[0] & 0x01) {
+ temp_raw |= (long long)1 << 39;
+ }
+
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+ wl_event_msg_t event;
+ bcm_event_msg_u_t evu;
+ int ret;
+ uint event_type;
+
+ ret = wl_host_event_get_data(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ skb_mac_header(skb),
+#else
+ skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+ skb->len, &evu);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+ __FUNCTION__, ret));
+ }
+
+ memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
+ event_type = ntoh32_ua((void *)&event.event_type);
+
+ temp_raw |= (long long)event_type << 40;
+ } else if (ntoh16(skb->protocol) == ETHER_TYPE_IP ||
+ ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
+ if (ip_header->version == 6) {
+ ipv6_header = (struct ipv6hdr *)ip_header;
+ temp_raw |= ((long long)ipv6_header->nexthdr) << 40;
+ dport = 0;
+
+ if (ipv6_header->daddr.s6_addr[0] & 0xff) {
+ temp_raw |= (long long)1 << 38;
+ }
+
+ DHD_INFO(("IPv6 [%x]%pI6c > %pI6c:%d\n",
+ ip_header->protocol, &(ipv6_header->saddr.s6_addr),
+ &(ipv6_header->daddr.s6_addr), dport));
+ } else if (ip_header->version == 4) {
+ temp_raw |= ((long long)ip_header->protocol) << 40;
+
+#define IP_HDR_OFFSET ((char *)ip_header + IPV4_HLEN(ip_header))
+ if (ip_header->protocol == IPPROTO_TCP) {
+ tcp_header = (struct tcphdr *)IP_HDR_OFFSET;
+ dport = ntohs(tcp_header->dest);
+ }
+ else if (ip_header->protocol == IPPROTO_UDP) {
+ udp_header = (struct udphdr *)IP_HDR_OFFSET;
+ dport = ntohs(udp_header->dest);
+ }
+
+ if (ipv4_is_multicast(ip_header->daddr)) {
+ temp_raw |= (long long)1 << 38;
+ }
+
+ DHD_INFO(("IP [%x] %pI4 > %pI4:%d\n",
+ ip_header->protocol, &(ip_header->saddr),
+ &(ip_header->daddr), dport));
+ }
+
+ temp_raw |= (long long)dport << 16;
+ }
+}
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+
+#if defined(BCMPCIE)
+int
+dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf,
+ dmaaddr_t *pa, uint32 pktid)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *shinfo;
+
+ if (!pktbuf)
+ return BCME_ERROR;
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+ shinfo = skb_shinfo(skb);
+
+ if (shinfo->nr_frags) {
+#ifdef CONFIG_64BIT
+ DHD_ERROR(("!!Invalid nr_frags: %u pa.loaddr: 0x%llx pa.hiaddr: 0x%llx "
+ "skb: 0x%llx skb_data: 0x%llx skb_head: 0x%llx skb_tail: 0x%llx "
+ "skb_end: 0x%llx skb_len: %u shinfo: 0x%llx pktid: %u\n",
+ shinfo->nr_frags, (uint64)(pa->loaddr), (uint64)(pa->hiaddr),
+ (uint64)skb, (uint64)(skb->data), (uint64)(skb->head), (uint64)(skb->tail),
+ (uint64)(skb->end), skb->len, (uint64)shinfo, pktid));
+#else
+ DHD_ERROR(("!!Invalid nr_frags: %u "
+ "skb: 0x%x skb_data: 0x%x skb_head: 0x%x skb_tail: 0x%x "
+ "skb_end: 0x%x skb_len: %u shinfo: 0x%x pktid: %u\n",
+ shinfo->nr_frags,
+ (uint)skb, (uint)(skb->data), (uint)(skb->head), (uint)(skb->tail),
+ (uint)(skb->end), skb->len, (uint)shinfo, pktid));
+#endif
+ prhex("shinfo", (char*)shinfo, sizeof(struct skb_shared_info));
+ if (!dhd_query_bus_erros(dhdp)) {
+#ifdef DHD_FW_COREDUMP
+ /* Collect socram dump */
+ if (dhdp->memdump_enabled) {
+ /* collect core dump */
+ dhdp->memdump_type = DUMP_TYPE_INVALID_SHINFO_NRFRAGS;
+ dhd_bus_mem_dump(dhdp);
+ } else
+#endif /* DHD_FW_COREDUMP */
+ {
+ shinfo->nr_frags = 0;
+ /* In production case, free the packet and continue
+ * if nfrags is corrupted. Whereas in non-production
+ * case collect memdump and call BUG_ON().
+ */
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ }
+ }
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+#endif /* BCMPCIE */
+
+/** Called when a frame is received by the dongle on interface 'ifidx' */
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ uchar *eth;
+ uint len;
+ void *data, *pnext = NULL;
+ int i;
+ dhd_if_t *ifp;
+ wl_event_msg_t event;
+#if defined(OEM_ANDROID)
+ int tout_rx = 0;
+ int tout_ctrl = 0;
+#endif /* OEM_ANDROID */
+ void *skbhead = NULL;
+ void *skbprev = NULL;
+ uint16 protocol;
+ unsigned char *dump_data;
+#ifdef DHD_MCAST_REGEN
+ uint8 interface_role;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+#endif
+#ifdef DHD_WAKE_STATUS
+ wake_counts_t *wcp = NULL;
+#endif /* DHD_WAKE_STATUS */
+ int pkt_wake = 0;
+#ifdef ENABLE_DHD_GRO
+ bool dhd_gro_enable = TRUE;
+ struct Qdisc *qdisc = NULL;
+#endif /* ENABLE_DHD_GRO */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ BCM_REFERENCE(dump_data);
+ BCM_REFERENCE(pkt_wake);
+
+#ifdef DHD_TPUT_PATCH
+ if (dhdp->conf->pktsetsum)
+ PKTSETSUMGOOD(pktbuf, TRUE);
+#endif
+
+#ifdef ENABLE_DHD_GRO
+ if (ifidx < DHD_MAX_IFS) {
+ ifp = dhd->iflist[ifidx];
+ if (ifp && ifp->net->qdisc) {
+ if (ifp->net->qdisc->ops->cl_ops) {
+ dhd_gro_enable = FALSE;
+ DHD_TRACE(("%s: disable sw gro becasue of"
+ " qdisc tx traffic control\n", __FUNCTION__));
+ }
+
+ if (dev_ingress_queue(ifp->net)) {
+ qdisc = dev_ingress_queue(ifp->net)->qdisc_sleeping;
+ if (qdisc != NULL && (qdisc->flags & TCQ_F_INGRESS)) {
+ dhd_gro_enable = FALSE;
+ DHD_TRACE(("%s: disable sw gro because of"
+ " qdisc rx traffic control\n", __FUNCTION__));
+ }
+ }
+ }
+ }
+#ifdef DHD_GRO_ENABLE_HOST_CTRL
+ if (!dhdp->permitted_gro && dhd_gro_enable) {
+ dhd_gro_enable = FALSE;
+ }
+#endif /* DHD_GRO_ENABLE_HOST_CTRL */
+#endif /* ENABLE_DHD_GRO */
+
+ for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+ struct ether_header *eh;
+
+ pnext = PKTNEXT(dhdp->osh, pktbuf);
+ PKTSETNEXT(dhdp->osh, pktbuf, NULL);
+
+ /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
+ * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
+ * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
+ */
+ if (ifidx == DHD_DUMMY_INFO_IF) {
+ /* Event msg printing is called from dhd_rx_frame which is in Tasklet
+ * context in case of PCIe FD, in case of other bus this will be from
+ * DPC context. If we get bunch of events from Dongle then printing all
+ * of them from Tasklet/DPC context that too in data path is costly.
+ * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
+ * events with type WLC_E_TRACE.
+ * We'll print this console logs from the WorkQueue context by enqueing SKB
+ * here and Dequeuing will be done in WorkQueue and will be freed only if
+ * logtrace_pkt_sendup is TRUE
+ */
+#ifdef SHOW_LOGTRACE
+ dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
+#else /* !SHOW_LOGTRACE */
+ /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
+ * free the PKT here itself
+ */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#endif /* SHOW_LOGTRACE */
+ continue;
+ }
+#ifdef DHD_WAKE_STATUS
+#ifdef BCMDBUS
+ wcp = NULL;
+#else
+ pkt_wake = dhd_bus_get_bus_wake(dhdp);
+ wcp = dhd_bus_get_wakecount(dhdp);
+#endif /* BCMDBUS */
+ if (wcp == NULL) {
+ /* If wakeinfo count buffer is null do not update wake count values */
+ pkt_wake = 0;
+ }
+#endif /* DHD_WAKE_STATUS */
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+#ifdef DHD_AWDL
+ if (dhdp->awdl_llc_enabled &&
+ dhdp->awdl_ifidx && ifidx == dhdp->awdl_ifidx) {
+ if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+ dhd_awdl_llc_to_eth_hdr(dhdp, eh, pktbuf);
+ }
+ }
+#endif /* DHD_AWDL */
+
+ if (dhd->pub.tput_data.tput_test_running &&
+ dhd->pub.tput_data.direction == TPUT_DIR_RX &&
+ ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ dhd_tput_test_rx(dhdp, pktbuf);
+ PKTFREE(dhd->pub.osh, pktbuf, FALSE);
+ continue;
+ }
+
+ if (ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
+ __FUNCTION__, ifidx));
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ }
+ continue;
+ }
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL) {
+ DHD_ERROR_RLMT(("%s: ifp is NULL. drop packet\n",
+ __FUNCTION__));
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ }
+ continue;
+ }
+
+ /* Dropping only data packets before registering net device to avoid kernel panic */
+#ifndef PROP_TXSTATUS_VSDB
+ if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
+#else
+ if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
+#endif /* PROP_TXSTATUS_VSDB */
+ {
+ DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+ __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+
+#ifdef PROP_TXSTATUS
+ if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
+ /* WLFC may send header only packet when
+ there is an urgent message but no packet to
+ piggy-back on
+ */
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+#endif
+#ifdef DHD_L2_FILTER
+ /* If block_ping is enabled drop the ping packet */
+ if (ifp->block_ping) {
+ if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+ if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
+ if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+ if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
+
+ /* Drop the packets if l2 filter has processed it already
+ * otherwise continue with the normal path
+ */
+ if (ret == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+ }
+ if (ifp->block_tdls) {
+ if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_MCAST_REGEN
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ ASSERT(if_flow_lkup);
+
+ interface_role = if_flow_lkup[ifidx].role;
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+ if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
+ !DHD_IF_ROLE_AP(dhdp, ifidx) &&
+ ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
+#ifdef DHD_PSTA
+ /* Change bsscfg to primary bsscfg for unicast-multicast packets */
+ if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
+ (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
+ if (ifidx != 0) {
+ /* Let the primary in PSTA interface handle this
+ * frame after unicast to Multicast conversion
+ */
+ ifp = dhd_get_ifp(dhdp, 0);
+ ASSERT(ifp);
+ }
+ }
+ }
+#endif /* PSTA */
+ }
+#endif /* MCAST_REGEN */
+
+#ifdef DHD_WMF
+ /* WMF processing for multicast packets */
+ if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
+ dhd_sta_t *sta;
+ int ret;
+
+ sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
+ ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
+ switch (ret) {
+ case WMF_TAKEN:
+ /* The packet is taken by WMF. Continue to next iteration */
+ continue;
+ case WMF_DROP:
+ /* Packet DROP decision by WMF. Toss it */
+ DHD_ERROR(("%s: WMF decides to drop packet\n",
+ __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ default:
+ /* Continue the transmit path */
+ break;
+ }
+ }
+#endif /* DHD_WMF */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
+ int delta_sec;
+ int delta_sync;
+ int sync_per_sec;
+ u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ ifp->tsync_rcvd ++;
+ delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
+ delta_sec = curr_time - ifp->last_sync;
+ if (delta_sec > 1) {
+ sync_per_sec = delta_sync/delta_sec;
+ if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
+ schedule_work(&ifp->blk_tsfl_work);
+ DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
+ "sync recvied %d pkt/sec \n",
+ ifidx, sync_per_sec));
+ ifp->tsync_per_sec = sync_per_sec;
+ }
+ dhd_reset_tcpsync_info_by_ifp(ifp);
+ }
+
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+#ifdef DHD_WET
+ /* wet related packet proto manipulation should be done in DHD
+ * since dongle doesn't have complete payload
+ */
+ if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
+ pktbuf) < 0)) {
+ DHD_INFO(("%s:%s: wet recv proc failed\n",
+ __FUNCTION__, dhd_ifname(dhdp, ifidx)));
+ }
+#endif /* DHD_WET */
+
+#ifdef DHD_PSTA
+ if (PSR_ENABLED(dhdp) &&
+#ifdef BCM_ROUTER_DHD
+ !(ifp->primsta_dwds) &&
+#endif /* BCM_ROUTER_DHD */
+ (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
+ DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
+ dhd_ifname(dhdp, ifidx)));
+ }
+#endif /* DHD_PSTA */
+
+#if defined(BCM_ROUTER_DHD)
+ /* XXX Use WOFA for both dhdap and dhdap-atlas router. */
+ /* XXX dhd_sendpkt verify pkt accounting (TO/FRM NATIVE) and PKTCFREE */
+
+ if (DHD_IF_ROLE_AP(dhdp, ifidx) && (!ifp->ap_isolate)) {
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+ if (ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+ dhd_sendpkt(dhdp, ifidx, pktbuf);
+ continue;
+ }
+ } else {
+ void *npkt;
+#if defined(HNDCTF)
+ if (PKTISCHAINED(pktbuf)) { /* XXX WAR */
+ DHD_ERROR(("Error: %s():%d Chained non unicast pkt<%p>\n",
+ __FUNCTION__, __LINE__, pktbuf));
+ PKTFRMNATIVE(dhdp->osh, pktbuf);
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+#endif /* HNDCTF */
+ if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
+ ((npkt = PKTDUP(dhdp->osh, pktbuf)) != NULL))
+ dhd_sendpkt(dhdp, ifidx, npkt);
+ }
+ }
+
+#if defined(HNDCTF)
+ /* try cut thru' before sending up */
+ if (dhd_ctf_forward(dhd, skb, &pnext) == BCME_OK) {
+ continue;
+ }
+#endif /* HNDCTF */
+
+#else /* !BCM_ROUTER_DHD */
+#ifdef PCIE_FULL_DONGLE
+ if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+ (!ifp->ap_isolate)) {
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+ if (ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+ dhd_sendpkt(dhdp, ifidx, pktbuf);
+ continue;
+ }
+ } else {
+ if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE)) {
+ void *npktbuf = NULL;
+ /*
+ * If host_sfhllc_supported enabled, do skb_copy as SFHLLC
+ * header will be inserted during Tx, due to which network
+ * stack will not decode the Rx packet.
+ * Else PKTDUP(skb_clone) is enough.
+ */
+ if (dhdp->host_sfhllc_supported) {
+ npktbuf = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ npktbuf = PKTDUP(dhdp->osh, pktbuf);
+ }
+ if (npktbuf != NULL) {
+ dhd_sendpkt(dhdp, ifidx, npktbuf);
+ }
+ }
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+#endif /* BCM_ROUTER_DHD */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
+ (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
+ (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
+ DHD_ERROR(("%s: Reassoc is in progress. "
+ "Drop EAPOL M1 frame\n", __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+#ifdef WLEASYMESH
+ if ((dhdp->conf->fw_type == FW_TYPE_EZMESH) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+ uint16 * da = (uint16 *)(eh->ether_dhost);
+ ASSERT(ISALIGNED(da, 2));
+
+ /* XXX: Special handling for 1905 messages
+ * if DA matches with configured 1905 AL MAC addresses
+ * bypass fwder and foward it to linux stack
+ */
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_1905_1) {
+ if (!eacmp(da, ifp->_1905_al_ucast) || !eacmp(da, ifp->_1905_al_mcast)) {
+ //skb->fwr_flood = 0;
+ } else {
+ //skb->fwr_flood = 1;
+ }
+ }
+ }
+#endif /* WLEASYMESH */
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+ dump_data = skb->data;
+ protocol = (skb->data[12] << 8) | skb->data[13];
+
+ if (protocol == ETHER_TYPE_802_1X) {
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
+#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
+ wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
+#endif /* WL_CFG80211 && WL_WPS_SYNC */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
+ OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
+ }
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhdp, ifidx, TRUE);
+#endif /* EAPOL_RESEND */
+ }
+ dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
+
+#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
+ if (pkt_wake) {
+ dhd_prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 64), DHD_ERROR_VAL);
+ DHD_ERROR(("config check in_suspend: %d ", dhdp->in_suspend));
+#ifdef ARP_OFFLOAD_SUPPORT
+ DHD_ERROR(("arp hmac_update:%d \n", dhdp->hmac_updated));
+#endif /* ARP_OFFLOAD_SUPPORT */
+ }
+#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
+
+#ifdef BCMINTERNAL
+ if (dhd->pub.loopback) {
+ struct ether_header *local_eh = (struct ether_header *)eth;
+ if (ntoh16(local_eh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *myp = (uint8 *)local_eh;
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)(myp + ETHER_HDR_LEN);
+ uint16 iplen = (iph->version_ihl & 0xf) * sizeof(uint32);
+ if (iph->prot == 1) {
+ uint8 *icmph = (uint8 *)iph + iplen;
+ if (icmph[0] == 8) {
+ uint8 temp_addr[ETHER_ADDR_LEN];
+ uint8 temp_ip[IPV4_ADDR_LEN];
+ /* Ether header flip */
+ memcpy(temp_addr, local_eh->ether_dhost,
+ ETHER_ADDR_LEN);
+ memcpy(local_eh->ether_dhost,
+ local_eh->ether_shost, ETHER_ADDR_LEN);
+ memcpy(local_eh->ether_shost, temp_addr,
+ ETHER_ADDR_LEN);
+
+ /* IP header flip */
+ memcpy(temp_ip, iph->src_ip, IPV4_ADDR_LEN);
+ memcpy(iph->src_ip, iph->dst_ip, IPV4_ADDR_LEN);
+ memcpy(iph->dst_ip, temp_ip, IPV4_ADDR_LEN);
+
+ /* ICMP header flip */
+ icmph[0] = 0;
+ }
+ } else if (iph->prot == 17) {
+ uint8 *udph = (uint8 *)iph + iplen;
+ uint16 destport = ntoh16(*((uint16 *)udph + 1));
+ if (destport == 8888) {
+ uint8 temp_addr[ETHER_ADDR_LEN];
+ uint8 temp_ip[IPV4_ADDR_LEN];
+ /* Ether header flip */
+ memcpy(temp_addr, local_eh->ether_dhost,
+ ETHER_ADDR_LEN);
+ memcpy(local_eh->ether_dhost,
+ local_eh->ether_shost, ETHER_ADDR_LEN);
+ memcpy(local_eh->ether_shost, temp_addr,
+ ETHER_ADDR_LEN);
+
+ /* IP header flip */
+ memcpy(temp_ip, iph->src_ip, IPV4_ADDR_LEN);
+ memcpy(iph->src_ip, iph->dst_ip, IPV4_ADDR_LEN);
+ memcpy(iph->dst_ip, temp_ip, IPV4_ADDR_LEN);
+
+ /* Reset UDP checksum to */
+ *((uint16 *)udph + 3) = 0;
+ }
+ }
+ }
+ }
+#endif /* BCMINTERNAL */
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ dhd->pub.rx_multicast++;
+ ifp->stats.multicast++;
+ }
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* TODO: XXX: re-look into dropped packets. */
+ DHD_DBG_PKT_MON_RX(dhdp, skb);
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+#ifdef ENABLE_WAKEUP_PKT_DUMP
+ if (dhd_mmc_wake) {
+ DHD_INFO(("wake_pkt %s(%d)\n", __FUNCTION__, __LINE__));
+ if (DHD_INFO_ON()) {
+ prhex("wake_pkt", (char*) eth, MIN(len, 48));
+ }
+ update_wake_pkt_info(skb);
+#ifdef CONFIG_IRQ_HISTORY
+ add_irq_history(0, "WIFI");
+#endif
+ dhd_mmc_wake = FALSE;
+ }
+#endif /* ENABLE_WAKEUP_PKT_DUMP */
+
+ /* Process special event packets and then discard them */
+ /* XXX Decide on a better way to fit this in */
+ memset(&event, 0, sizeof(event));
+
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+ bcm_event_msg_u_t evu;
+ int ret_event, event_type;
+ void *pkt_data = skb_mac_header(skb);
+
+ ret_event = wl_host_event_get_data(pkt_data, len, &evu);
+
+ if (ret_event != BCME_OK) {
+ DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+ __FUNCTION__, ret_event));
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
+
+ memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
+ event_type = ntoh32_ua((void *)&event.event_type);
+#ifdef SHOW_LOGTRACE
+ /* Event msg printing is called from dhd_rx_frame which is in Tasklet
+ * context in case of PCIe FD, in case of other bus this will be from
+ * DPC context. If we get bunch of events from Dongle then printing all
+ * of them from Tasklet/DPC context that too in data path is costly.
+ * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
+ * events with type WLC_E_TRACE.
+ * We'll print this console logs from the WorkQueue context by enqueing SKB
+ * here and Dequeuing will be done in WorkQueue and will be freed only if
+ * logtrace_pkt_sendup is true
+ */
+ if (event_type == WLC_E_TRACE) {
+ DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
+ dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
+ continue;
+ }
+#endif /* SHOW_LOGTRACE */
+
+#ifdef WL_NANHO
+ /* Process firmware NAN event by NANHO host module */
+ if (dhd_nho_evt_process(dhdp, ifidx, &event, pkt_data, len)) {
+ /* NANHO host module consumed NAN event. free pkt here. */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
+#endif /* WL_NANHO */
+
+ ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
+
+ wl_event_to_host_order(&event);
+#if defined(OEM_ANDROID)
+ if (!tout_ctrl)
+ tout_ctrl = DHD_PACKET_TIMEOUT_MS;
+#endif /* OEM_ANDROID */
+
+#if (defined(OEM_ANDROID) && defined(PNO_SUPPORT))
+ if (event_type == WLC_E_PFN_NET_FOUND) {
+ /* enforce custom wake lock to garantee that Kernel not suspended */
+ tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+ }
+#endif /* PNO_SUPPORT */
+ if (numpkt != 1) {
+ DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
+ __FUNCTION__));
+ }
+
+#ifdef DHD_WAKE_STATUS
+ if (unlikely(pkt_wake)) {
+#ifdef DHD_WAKE_EVENT_STATUS
+ if (event.event_type < WLC_E_LAST) {
+ wcp->rc_event[event.event_type]++;
+ wcp->rcwake++;
+ pkt_wake = 0;
+ }
+#endif /* DHD_WAKE_EVENT_STATUS */
+ }
+#endif /* DHD_WAKE_STATUS */
+
+ /* For delete virtual interface event, wl_host_event returns positive
+ * i/f index, do not proceed. just free the pkt.
+ */
+ if ((event_type == WLC_E_IF) && (ret_event > 0)) {
+ DHD_ERROR(("%s: interface is deleted. Free event packet\n",
+ __FUNCTION__));
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
+
+ /*
+ * For the event packets, there is a possibility
+ * of ifidx getting modifed.Thus update the ifp
+ * once again.
+ */
+ ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+ ifp = dhd->iflist[ifidx];
+#ifndef PROP_TXSTATUS_VSDB
+ if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
+#else
+ if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
+ dhd->pub.up))
+#endif /* PROP_TXSTATUS_VSDB */
+ {
+ DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
+ __FUNCTION__));
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
+
+#ifdef SENDPROB
+ if (dhdp->wl_event_enabled ||
+ (dhdp->recv_probereq && (event.event_type == WLC_E_PROBREQ_MSG)))
+#else
+ if (dhdp->wl_event_enabled)
+#endif
+ {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ /* If event bufs are allocated via static buf pool
+ * and wl events are enabled, make a copy, free the
+ * local one and send the copy up.
+ */
+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+ /* Copy event and send it up */
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+ if (nskb) {
+ skb = nskb;
+ } else {
+ DHD_ERROR(("skb clone failed. dropping event.\n"));
+ continue;
+ }
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ /* If event enabled not explictly set, drop events */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ continue;
+ }
+ } else {
+#if defined(OEM_ANDROID)
+ tout_rx = DHD_PACKET_TIMEOUT_MS;
+#endif /* OEM_ANDROID */
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
+#endif /* PROP_TXSTATUS */
+
+#ifdef DHD_WAKE_STATUS
+ if (unlikely(pkt_wake)) {
+ wcp->rxwake++;
+#ifdef DHD_WAKE_RX_STATUS
+#define ETHER_ICMP6_HEADER 20
+#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
+#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
+#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
+
+ if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
+ wcp->rx_arp++;
+ if (dump_data[0] == 0xFF) { /* Broadcast */
+ wcp->rx_bcast++;
+ } else if (dump_data[0] & 0x01) { /* Multicast */
+ wcp->rx_mcast++;
+ if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
+ wcp->rx_multi_ipv6++;
+ if ((skb->len > ETHER_ICMP6_HEADER) &&
+ (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
+ wcp->rx_icmpv6++;
+ if (skb->len > ETHER_ICMPV6_TYPE) {
+ switch (dump_data[ETHER_ICMPV6_TYPE]) {
+ case NDISC_ROUTER_ADVERTISEMENT:
+ wcp->rx_icmpv6_ra++;
+ break;
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ wcp->rx_icmpv6_na++;
+ break;
+ case NDISC_NEIGHBOUR_SOLICITATION:
+ wcp->rx_icmpv6_ns++;
+ break;
+ }
+ }
+ }
+ } else if (dump_data[2] == 0x5E) {
+ wcp->rx_multi_ipv4++;
+ } else {
+ wcp->rx_multi_other++;
+ }
+ } else { /* Unicast */
+ wcp->rx_ucast++;
+ }
+#undef ETHER_ICMP6_HEADER
+#undef ETHER_IPV6_SADDR
+#undef ETHER_IPV6_DAADR
+#undef ETHER_ICMPV6_TYPE
+#endif /* DHD_WAKE_RX_STATUS */
+ pkt_wake = 0;
+ }
+#endif /* DHD_WAKE_STATUS */
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ ifp->net->last_rx = jiffies;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
+
+ if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
+ }
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+ if (dhd_use_tcp_window_size_adjust) {
+ if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
+ dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
+ }
+ }
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+ /* XXX WL here makes sure data is 4-byte aligned? */
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+#if defined(DHD_LB_RXP)
+#ifdef ENABLE_DHD_GRO
+ /* The pktlog module clones a skb using skb_clone and
+ * stores the skb point to the ring buffer of the pktlog module.
+ * Once the buffer is full,
+ * the PKTFREE is called for removing the oldest skb.
+ * The kernel panic occurred when the pktlog module free
+ * the rx frame handled by napi_gro_receive().
+ * It is a fix code that DHD don't use napi_gro_receive() for
+ * the packet used in pktlog module.
+ */
+ if (dhd_gro_enable && !skb_cloned(skb) &&
+ ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+ napi_gro_receive(&dhd->rx_napi_struct, skb);
+ } else {
+ netif_receive_skb(skb);
+ }
+#else
+#if defined(WL_MONITOR) && defined(BCMSDIO)
+ if (dhd_monitor_enabled(dhdp, ifidx))
+ dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx);
+ else
+#endif /* WL_MONITOR && BCMSDIO */
+ netif_receive_skb(skb);
+#endif /* ENABLE_DHD_GRO */
+#else /* !defined(DHD_LB_RXP) */
+ netif_rx(skb);
+#endif /* !defined(DHD_LB_RXP) */
+ } else {
+ if (dhd->rxthread_enabled) {
+ if (!skbhead)
+ skbhead = skb;
+ else
+ PKTSETNEXT(dhdp->osh, skbprev, skb);
+ skbprev = skb;
+ } else {
+
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+
+#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
+#if defined(DHD_LB_RXP)
+#ifdef ENABLE_DHD_GRO
+ if (dhd_gro_enable && !skb_cloned(skb) &&
+ ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+ napi_gro_receive(&dhd->rx_napi_struct, skb);
+ } else {
+ netif_receive_skb(skb);
+ }
+#else
+ netif_receive_skb(skb);
+#endif /* ENABLE_DHD_GRO */
+#else /* !defined(DHD_LB_RXP) */
+ netif_rx_ni(skb);
+#endif /* !defined(DHD_LB_RXP) */
+ }
+ }
+ }
+
+ if (dhd->rxthread_enabled && skbhead)
+ dhd_sched_rxf(dhdp, skbhead);
+
+#if defined(OEM_ANDROID)
+ DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
+#endif /* OEM_ANDROID */
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+ /* Linux version has nothing to do */
+ return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+
+ if (dhdp->tput_data.tput_test_running) {
+
+ dhdp->batch_tx_pkts_cmpl++;
+
+ /* don't count the stop pkt */
+ if (success &&
+ dhdp->batch_tx_pkts_cmpl <= dhdp->batch_tx_num_pkts)
+ dhdp->tput_data.pkts_good++;
+ else if (!success)
+ dhdp->tput_data.pkts_bad++;
+
+ /* we dont care for the stop packet in tput test */
+ if (dhdp->batch_tx_pkts_cmpl == dhdp->batch_tx_num_pkts) {
+ dhdp->tput_stop_ts = OSL_SYSUPTIME_US();
+ dhdp->tput_data.pkts_cmpl += dhdp->batch_tx_pkts_cmpl;
+ dhdp->tput_data.num_pkts += dhdp->batch_tx_num_pkts;
+ dhd_os_tput_test_wake(dhdp);
+ }
+ }
+ /* XXX where does this stuff belong to? */
+ dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+ /* XXX Use packet tag when it is available to identify its type */
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+
+ if (type == ETHER_TYPE_802_1X) {
+ atomic_dec(&dhd->pend_8021x_cnt);
+ }
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
+ dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
+ uint datalen = PKTLEN(dhd->pub.osh, txp);
+ if (ifp != NULL) {
+ if (success) {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
+ } else {
+ ifp->stats.tx_dropped++;
+ }
+ }
+ }
+#endif
+ if (success) {
+ dhd->pub.tot_txcpl++;
+ }
+}
+
+int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition,
+ uint timeout_ms)
+{
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(timeout_ms);
+ pub->tput_test_done = FALSE;
+ condition = (uint *)&pub->tput_test_done;
+ timeout = wait_event_timeout(pub->tx_tput_test_wait,
+ (*condition), timeout);
+
+ return timeout;
+}
+
+int dhd_os_tput_test_wake(dhd_pub_t * pub)
+{
+ OSL_SMP_WMB();
+ pub->tput_test_done = TRUE;
+ OSL_SMP_WMB();
+ wake_up(&(pub->tx_tput_test_wait));
+ return 0;
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ goto error;
+ }
+
+ ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
+ if (!ifp) {
+ /* return empty stats */
+ DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+ goto error;
+ }
+
+ if (dhd->pub.up) {
+ /* Use the protocol to get dongle stats */
+ dhd_prot_dstats(&dhd->pub);
+ }
+ return &ifp->stats;
+
+error:
+ memset(&net->stats, 0, sizeof(net->stats));
+ return &net->stats;
+}
+
+#ifndef BCMDBUS
+static int
+dhd_watchdog_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+ dhd_watchdog_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ while (1) {
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+ unsigned long jiffies_at_start = jiffies;
+ unsigned long time_lapse;
+#ifdef BCMPCIE
+ DHD_OS_WD_WAKE_LOCK(&dhd->pub);
+#endif /* BCMPCIE */
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+#ifdef BCMPCIE
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+#endif /* BCMPCIE */
+ break;
+ }
+
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+ dhd_analyze_sock_flows(dhd, dhd_watchdog_ms);
+ dhd_bus_watchdog(&dhd->pub);
+
+#ifdef DHD_TIMESYNC
+ /* Call the timesync module watchdog */
+ dhd_timesync_watchdog(&dhd->pub);
+#endif /* DHD_TIMESYNC */
+#if defined(BCM_ROUTER_DHD) && defined(CTFPOOL)
+ /* allocate and add a new skb to the pkt pool */
+ if (CTF_ENAB(dhd->cih))
+ osl_ctfpool_replenish(dhd->pub.osh, CTFPOOL_REFILL_THRESH);
+#endif /* BCM_ROUTER_DHD && CTFPOOL */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+#ifdef DHD_L2_FILTER
+ dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
+ time_lapse = jiffies - jiffies_at_start;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid) {
+ mod_timer(&dhd->timer,
+ jiffies +
+ msecs_to_jiffies(dhd_watchdog_ms) -
+ min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+ }
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
+#ifdef BCMPCIE
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+#endif /* BCMPCIE */
+ } else {
+ break;
+ }
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_watchdog(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+ unsigned long flags;
+
+ if (dhd->pub.dongle_reset) {
+ return;
+ }
+
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ up(&dhd->thr_wdt_ctl.sema);
+ return;
+ }
+
+#ifdef BCMPCIE
+ DHD_OS_WD_WAKE_LOCK(&dhd->pub);
+#endif /* BCMPCIE */
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+#ifdef DHD_TIMESYNC
+ /* Call the timesync module watchdog */
+ dhd_timesync_watchdog(&dhd->pub);
+#endif /* DHD_TIMESYNC */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+#ifdef DHD_L2_FILTER
+ dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#ifdef BCMPCIE
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+#endif /* BCMPCIE */
+#if defined(BCM_ROUTER_DHD) && defined(CTFPOOL)
+ /* allocate and add a new skb to the pkt pool */
+ if (CTF_ENAB(dhd->cih))
+ osl_ctfpool_replenish(dhd->pub.osh, CTFPOOL_REFILL_THRESH);
+#endif /* BCM_ROUTER_DHD && CTFPOOL */
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+static int
+dhd_rpm_state_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+ while (1) {
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+ unsigned long jiffies_at_start = jiffies;
+ unsigned long time_lapse;
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+ if (dhd->pub.up) {
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ dhd_bus_dw_deassert(&dhd->pub);
+#endif /* PCIE_OOB || PCIE_INB_DW */
+ if (dhd_get_rpm_state(&dhd->pub)) {
+ dhd_runtimepm_state(&dhd->pub);
+ }
+ }
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ time_lapse = jiffies - jiffies_at_start;
+
+ /* Reschedule the watchdog */
+ if (dhd->rpm_timer_valid) {
+ mod_timer(&dhd->rpm_timer,
+ jiffies +
+ msecs_to_jiffies(dhd_runtimepm_ms) -
+ min(msecs_to_jiffies(dhd_runtimepm_ms),
+ time_lapse));
+ }
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
+ } else {
+ break;
+ }
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_runtimepm(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ if (dhd->pub.dongle_reset) {
+ return;
+ }
+
+ if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rpm_ctl.sema);
+ return;
+ }
+}
+
+void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
+{
+ dhd_set_rpm_state(dhdp, FALSE);
+ dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
+}
+
+void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
+{
+ /* Enable Runtime PM except for MFG Mode */
+ if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
+ if (dhd_get_idletime(dhdp)) {
+ dhd_set_rpm_state(dhdp, TRUE);
+ }
+ }
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+static void
+dhd_sched_policy(int prio)
+{
+ struct sched_param param;
+ if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+ param.sched_priority = 0;
+ setScheduler(current, SCHED_NORMAL, &param);
+ } else {
+ if (get_scheduler_policy(current) != SCHED_FIFO) {
+ param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+ }
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+ dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+ struct cpufreq_freqs *freq = data;
+ if (dhd) {
+ if (!dhd->new_freq)
+ goto exit;
+ if (val == CPUFREQ_POSTCHANGE) {
+ DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+ freq->new, freq->cpu));
+ *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+ }
+ }
+exit:
+ return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
+
+static int
+dhd_dpc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_dpc_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+#ifdef CUSTOM_DPC_CPUCORE
+ set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
+ /* Run until signal received */
+ while (1) {
+ if (dhd->pub.conf->dpc_cpucore >= 0) {
+ printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+ set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
+ dhd->pub.conf->dpc_cpucore = -1;
+ }
+ if (dhd->pub.conf->dhd_dpc_prio >= 0) {
+ struct sched_param param;
+ printf("%s: set dhd_dpc_prio %d\n", __FUNCTION__, dhd->pub.conf->dhd_dpc_prio);
+ param.sched_priority = (dhd->pub.conf->dhd_dpc_prio < MAX_RT_PRIO)?
+ dhd->pub.conf->dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ dhd->pub.conf->dhd_dpc_prio = -1;
+ }
+ if (!binary_sema_down(tsk)) {
+#ifdef ENABLE_ADAPTIVE_SCHED
+ dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ break;
+ }
+
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ int resched_cnt = 0;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ dhd_os_wd_timer_extend(&dhd->pub, TRUE);
+ while (dhd_bus_dpc(dhd->pub.bus)) {
+ /* process all data */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ resched_cnt++;
+ if (resched_cnt > MAX_RESCHED_CNT) {
+ DHD_INFO(("%s Calling msleep to"
+ "let other processes run. \n",
+ __FUNCTION__));
+ dhd->pub.dhd_bug_on = true;
+ resched_cnt = 0;
+ OSL_SLEEP(1);
+ }
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ }
+ dhd_os_wd_timer_extend(&dhd->pub, FALSE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ if (dhd->pub.up)
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ } else {
+ break;
+ }
+ }
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_rxf_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
+ ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
+ dhd_pub_t *pub = &dhd->pub;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_rxf_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
+ /* Run until signal received */
+ while (1) {
+ if (dhd->pub.conf->rxf_cpucore >= 0) {
+ printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
+ set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
+ dhd->pub.conf->rxf_cpucore = -1;
+ }
+ if (down_interruptible(&tsk->sema) == 0) {
+ void *skb;
+#ifdef ENABLE_ADAPTIVE_SCHED
+ dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+ SMP_RD_BARRIER_DEPENDS();
+
+ if (tsk->terminated) {
+ DHD_OS_WAKE_UNLOCK(pub);
+ break;
+ }
+ skb = dhd_rxf_dequeue(pub);
+
+ if (skb == NULL) {
+ continue;
+ }
+ while (skb) {
+ void *skbnext = PKTNEXT(pub->osh, skb);
+ PKTSETNEXT(pub->osh, skb, NULL);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+#if defined(WL_MONITOR) && defined(BCMSDIO)
+ if (dhd_monitor_enabled(pub, 0))
+ dhd_rx_mon_pkt_sdio(pub, skb, 0);
+ else
+#endif /* WL_MONITOR && BCMSDIO */
+ netif_rx_ni(skb);
+ skb = skbnext;
+ }
+#if defined(WAIT_DEQUEUE)
+ if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+ OSL_SLEEP(1);
+ watchdogTime = OSL_SYSUPTIME();
+ }
+#endif
+
+ DHD_OS_WAKE_UNLOCK(pub);
+ } else {
+ break;
+ }
+ }
+ complete_and_exit(&tsk->completed, 0);
+}
+
+#ifdef BCMPCIE
+void dhd_dpc_enable(dhd_pub_t *dhdp)
+{
+#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
+ dhd_info_t *dhd;
+
+ if (!dhdp || !dhdp->info)
+ return;
+ dhd = dhdp->info;
+#endif /* DHD_LB_RXP || DHD_LB_TXP */
+
+#ifdef DHD_LB_RXP
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_TXP
+ skb_queue_head_init(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+}
+#endif /* BCMPCIE */
+
+#ifdef BCMPCIE
+void
+dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ if (!dhdp) {
+ return;
+ }
+
+ dhd = dhdp->info;
+
+ if (!dhd) {
+ return;
+ }
+
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ tasklet_kill(&dhd->tasklet);
+ DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
+ }
+
+ cancel_delayed_work_sync(&dhd->dhd_dpc_dispatcher_work);
+#ifdef DHD_LB
+#ifdef DHD_LB_RXP
+ cancel_work_sync(&dhd->rx_napi_dispatcher_work);
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+ cancel_work_sync(&dhd->tx_dispatcher_work);
+ skb_queue_purge(&dhd->tx_pend_queue);
+ tasklet_kill(&dhd->tx_tasklet);
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
+}
+
+void
+dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ if (!dhdp) {
+ return;
+ }
+
+ dhd = dhdp->info;
+
+ if (!dhd) {
+ return;
+ }
+
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ tasklet_kill(&dhd->tasklet);
+ }
+}
+#endif /* BCMPCIE */
+
+static void
+dhd_dpc(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ int curr_cpu = get_cpu();
+ put_cpu();
+
+ /* Store current cpu as dpc_cpu */
+ atomic_set(&dhd->dpc_cpu, curr_cpu);
+
+ /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+ * down below , wake lock is set,
+ * the tasklet is initialized in dhd_attach()
+ */
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
+ DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
+#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ tasklet_schedule(&dhd->tasklet);
+ }
+ } else {
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ }
+
+ /* Store as prev_dpc_cpu, which will be used in Rx load balancing for deciding candidacy */
+ atomic_set(&dhd->prev_dpc_cpu, curr_cpu);
+
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* If the semaphore does not get up,
+ * wake unlock should be done here
+ */
+ if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ }
+ return;
+ } else {
+ tasklet_schedule(&dhd->tasklet);
+ }
+}
+#endif /* BCMDBUS */
+
+static void
+dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+ int ret = BCME_OK;
+ int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+
+ DHD_OS_WAKE_LOCK(dhdp);
+
+ DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+ do {
+ ret = dhd_rxf_enqueue(dhdp, skb);
+ if (ret == BCME_OK || ret == BCME_ERROR)
+ break;
+ else
+ OSL_SLEEP(50); /* waiting for dequeueing */
+ } while (retry-- > 0);
+
+ if (retry <= 0 && ret == BCME_BUSY) {
+ void *skbp = skb;
+
+ while (skbp) {
+ void *skbnext = PKTNEXT(dhdp->osh, skbp);
+ PKTSETNEXT(dhdp->osh, skbp, NULL);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ netif_rx_ni(skbp);
+ skbp = skbnext;
+ }
+ DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
+ } else {
+ if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rxf_ctl.sema);
+ }
+ }
+#else /* RXF_DEQUEUE_ON_BUSY */
+ do {
+ if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
+ break;
+ } while (1);
+ if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rxf_ctl.sema);
+ } else {
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ }
+ return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+}
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+ char buf[32];
+ int ret;
+
+ ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+
+ if (ret < 0) {
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
+ ifidx)));
+ return -EOPNOTSUPP;
+ }
+
+ DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ memcpy(toe_ol, buf, sizeof(uint32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+ int toe, ret;
+
+ /* Set toe_ol as requested */
+ ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+ toe = (toe_ol != 0);
+ ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* TOE */
+
+#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
+void dhd_set_scb_probe(dhd_pub_t *dhd)
+{
+ wl_scb_probe_t scb_probe;
+ char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
+ int ret;
+
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ return;
+ }
+
+ ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
+ }
+
+ memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
+
+ scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
+
+ ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
+ return;
+ }
+}
+#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
+
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+ snprintf(info->driver, sizeof(info->driver), "wl");
+ snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+};
+
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ uint32 cmd;
+#ifdef TOE
+ struct ethtool_value edata;
+ uint32 toe_cmpnt, csum_dir;
+ int ret;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ bzero(&info.driver, sizeof(info.driver));
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ if (info.driver[sizeof(info.driver) - 1] != '\0') {
+ DHD_ERROR(("%s: Exceeds the size of info.driver"
+ "truncating last byte with null\n", __FUNCTION__));
+ info.driver[sizeof(info.driver) - 1] = '\0';
+ }
+ strlcpy(drvname, info.driver, sizeof(drvname));
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if dhd requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ snprintf(info.driver, sizeof(info.driver), "dhd");
+ strlcpy(info.version, EPI_VERSION_STR, sizeof(info.version));
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!dhd->pub.up) {
+ DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (dhd->pub.iswl)
+ snprintf(info.driver, sizeof(info.driver), "wl");
+ else
+ snprintf(info.driver, sizeof(info.driver), "xx");
+
+ snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+ (int)sizeof(drvname), drvname, info.driver));
+ break;
+
+#ifdef TOE
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+#endif /* TOE */
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* XXX function to detect that FW is dead and send Event up */
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+#if defined(OEM_ANDROID)
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ if (!dhdp->up)
+ return FALSE;
+
+#if (!defined(BCMDBUS) && !defined(BCMPCIE))
+ if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
+ DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
+ return FALSE;
+ }
+#endif /* BCMDBUS */
+
+ if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+ ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+#ifdef BCMPCIE
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
+ __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
+ dhdp->d3ackcnt_timeout, error, dhdp->busstate));
+#else
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+#endif /* BCMPCIE */
+ if (dhdp->hang_reason == 0) {
+ if (dhdp->dongle_trap_occured) {
+ dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
+#ifdef BCMPCIE
+ } else if (dhdp->d3ackcnt_timeout) {
+ dhdp->hang_reason = dhdp->is_sched_error ?
+ HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
+ HANG_REASON_D3_ACK_TIMEOUT;
+#endif /* BCMPCIE */
+ } else {
+ dhdp->hang_reason = dhdp->is_sched_error ?
+ HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
+ HANG_REASON_IOCTL_RESP_TIMEOUT;
+ }
+ }
+ printf("%s\n", info_string);
+ printf("MAC %pM\n", &dhdp->mac);
+ net_os_send_hang_message(net);
+ return TRUE;
+ }
+#endif /* OEM_ANDROID */
+ return FALSE;
+}
+
+#ifdef WL_MONITOR
+bool
+dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
+{
+ return (dhd->info->monitor_type != 0);
+}
+
+#ifdef BCMSDIO
+static void
+dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
+ return;
+ }
+
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dhd->monitor_skb = NULL;
+ return;
+ }
+
+ dhd->monitor_skb->protocol =
+ eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+ dhd->monitor_len = 0;
+
+ netif_rx_ni(dhd->monitor_skb);
+
+ dhd->monitor_skb = NULL;
+}
+#elif defined(BCMPCIE)
+
+void
+dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef HOST_RADIOTAP_CONV
+ if (dhd->host_radiotap_conv) {
+ uint16 len = 0, offset = 0;
+ monitor_pkt_info_t pkt_info;
+
+ memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker));
+ memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t));
+
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
+ return;
+ }
+
+ len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt),
+ PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset);
+
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dev_kfree_skb(dhd->monitor_skb);
+ return;
+ }
+
+ PKTFREE(dhdp->osh, pkt, FALSE);
+
+ if (!len) {
+ return;
+ }
+
+ skb_put(dhd->monitor_skb, len);
+ skb_pull(dhd->monitor_skb, offset);
+
+ dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb,
+ dhd->monitor_skb->dev);
+ }
+ else
+#endif /* HOST_RADIOTAP_CONV */
+ {
+ uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
+ BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
+ switch (amsdu_flag) {
+ case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
+ default:
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
+ == NULL)
+ return;
+ }
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dhd->monitor_skb = NULL;
+ return;
+ }
+ dhd->monitor_skb->protocol =
+ eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+ dhd->monitor_len = 0;
+ break;
+
+ case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
+ == NULL)
+ return;
+ dhd->monitor_len = 0;
+ }
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dev_kfree_skb(dhd->monitor_skb);
+ return;
+ }
+ memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
+ PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+ dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ return;
+
+ case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
+ memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
+ PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+ dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ return;
+
+ case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
+ memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
+ PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+ dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ skb_put(dhd->monitor_skb, dhd->monitor_len);
+ dhd->monitor_skb->protocol =
+ eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+ dhd->monitor_len = 0;
+ break;
+ }
+ }
+
+ if (skb_headroom(dhd->monitor_skb) < ETHER_HDR_LEN) {
+ struct sk_buff *skb2;
+
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+
+ skb2 = skb_realloc_headroom(dhd->monitor_skb, ETHER_HDR_LEN);
+
+ dev_kfree_skb(dhd->monitor_skb);
+ if ((dhd->monitor_skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return;
+ }
+ }
+ PKTPUSH(dhd->pub.osh, dhd->monitor_skb, ETHER_HDR_LEN);
+
+ /* XXX WL here makes sure data is 4-byte aligned? */
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ netif_rx(dhd->monitor_skb);
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+
+ netif_rx_ni(dhd->monitor_skb);
+ }
+
+ dhd->monitor_skb = NULL;
+}
+#endif
+
+typedef struct dhd_mon_dev_priv {
+ struct net_device_stats stats;
+} dhd_mon_dev_priv_t;
+
+#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
+#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
+
+static int
+dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
+{
+ PKTFREE(NULL, skb, FALSE);
+ return 0;
+}
+
+#if defined(BT_OVER_SDIO)
+
+void
+dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
+{
+ dhdp->info->bus_user_count++;
+}
+
+void
+dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
+{
+ dhdp->info->bus_user_count--;
+}
+
+/* Return values:
+ * Success: Returns 0
+ * Failure: Returns -1 or errono code
+ */
+int
+dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int ret = 0;
+
+ mutex_lock(&dhd->bus_user_lock);
+ ++dhd->bus_user_count;
+ if (dhd->bus_user_count < 0) {
+ DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (dhd->bus_user_count == 1) {
+
+ dhd->pub.hang_was_sent = 0;
+
+ /* First user, turn on WL_REG, start the bus */
+ DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
+
+ if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
+ /* Enable F1 */
+ ret = dhd_bus_resume(dhdp, 0);
+ if (ret) {
+ DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
+ __FUNCTION__, ret));
+ goto exit;
+ }
+ }
+
+ /* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware
+ * name. This is indeed a hack but we have to make it work properly before we have
+ * a better solution
+ */
+ dhd_update_fw_nv_path(dhd);
+ /* update firmware and nvram path to sdio bus */
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path);
+ /* download the firmware, Enable F2 */
+ /* TODO: Should be done only in case of FW switch */
+ ret = dhd_bus_devreset(dhdp, FALSE);
+ dhd_bus_resume(dhdp, 1);
+ if (!ret) {
+ if (dhd_sync_with_dongle(&dhd->pub) < 0) {
+ DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
+ ret = -EFAULT;
+ }
+ } else {
+ DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
+ }
+ } else {
+ DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
+ __FUNCTION__, dhd->bus_user_count));
+ }
+exit:
+ mutex_unlock(&dhd->bus_user_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_get);
+
+/* Return values:
+ * Success: Returns 0
+ * Failure: Returns -1 or errono code
+ */
+int
+dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int ret = 0;
+ BCM_REFERENCE(owner);
+
+ mutex_lock(&dhd->bus_user_lock);
+ --dhd->bus_user_count;
+ if (dhd->bus_user_count < 0) {
+ DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
+ dhd->bus_user_count = 0;
+ ret = -1;
+ goto exit;
+ }
+
+ if (dhd->bus_user_count == 0) {
+ /* Last user, stop the bus and turn Off WL_REG */
+ DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
+ __FUNCTION__));
+#ifdef PROP_TXSTATUS
+ if (dhd->pub.wlfc_enabled) {
+ dhd_wlfc_deinit(&dhd->pub);
+ }
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ if (dhd->pub.pno_state) {
+ dhd_pno_deinit(&dhd->pub);
+ }
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+ if (dhd->pub.rtt_state) {
+ dhd_rtt_deinit(&dhd->pub);
+ }
+#endif /* RTT_SUPPORT */
+ ret = dhd_bus_devreset(dhdp, TRUE);
+ if (!ret) {
+ dhd_bus_suspend(dhdp);
+ wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
+ }
+ } else {
+ DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
+ __FUNCTION__, dhd->bus_user_count));
+ }
+exit:
+ mutex_unlock(&dhd->bus_user_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_put);
+
+int
+dhd_net_bus_get(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_get(&dhd->pub, WLAN_MODULE);
+}
+
+int
+dhd_net_bus_put(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_put(&dhd->pub, WLAN_MODULE);
+}
+
+/*
+ * Function to enable the Bus Clock
+ * Returns BCME_OK on success and BCME_xxx on failure
+ *
+ * This function is not callable from non-sleepable context
+ */
+int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+ int ret;
+
+ dhd_os_sdlock(dhdp);
+ /*
+ * The second argument is TRUE, that means, we expect
+ * the function to "wait" until the clocks are really
+ * available
+ */
+ ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
+ dhd_os_sdunlock(dhdp);
+
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_clk_enable);
+
+/*
+ * Function to disable the Bus Clock
+ * Returns BCME_OK on success and BCME_xxx on failure
+ *
+ * This function is not callable from non-sleepable context
+ */
+int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+ int ret;
+
+ dhd_os_sdlock(dhdp);
+ /*
+ * The second argument is TRUE, that means, we expect
+ * the function to "wait" until the clocks are really
+ * disabled
+ */
+ ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
+ dhd_os_sdunlock(dhdp);
+
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_clk_disable);
+
+/*
+ * Function to reset bt_use_count counter to zero.
+ *
+ * This function is not callable from non-sleepable context
+ */
+void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+ /* take the lock and reset bt use count */
+ dhd_os_sdlock(dhdp);
+ dhdsdio_reset_bt_use_count(dhdp->bus);
+ dhd_os_sdunlock(dhdp);
+}
+EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
+
+void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+
+ dhdp->hang_was_sent = 0;
+
+ dhd_os_send_hang_message(&dhd->pub);
+}
+EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
+
+#endif /* BT_OVER_SDIO */
+
+static int
+dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return 0;
+}
+
+static struct net_device_stats*
+dhd_monitor_get_stats(struct net_device *dev)
+{
+ return &DHD_MON_DEV_STATS(dev);
+}
+
+static const struct net_device_ops netdev_monitor_ops =
+{
+ .ndo_start_xmit = dhd_monitor_start,
+ .ndo_get_stats = dhd_monitor_get_stats,
+ .ndo_do_ioctl = dhd_monitor_ioctl
+};
+
+static void
+dhd_add_monitor_if(dhd_info_t *dhd)
+{
+ struct net_device *dev;
+ char *devname;
+#ifdef HOST_RADIOTAP_CONV
+ dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
+#endif /* HOST_RADIOTAP_CONV */
+ uint32 scan_suppress = FALSE;
+ int ret = BCME_OK;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd->monitor_dev) {
+ DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
+ return;
+ }
+
+ dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
+ if (!dev) {
+ DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
+ return;
+ }
+
+ devname = "radiotap";
+
+ snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
+
+#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
+#define ARPHRD_IEEE80211_PRISM 802
+#endif
+
+#ifndef ARPHRD_IEEE80211_RADIOTAP
+#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
+#endif /* ARPHRD_IEEE80211_RADIOTAP */
+
+ dev->type = ARPHRD_IEEE80211_RADIOTAP;
+
+ dev->netdev_ops = &netdev_monitor_ops;
+
+ /* XXX: This is called from IOCTL path, in this case, rtnl_lock is already taken.
+ * So, register_netdev() shouldn't be called. It leads to deadlock.
+ * To avoid deadlock due to rtnl_lock(), register_netdevice() should be used.
+ */
+ if (register_netdevice(dev)) {
+ DHD_ERROR(("%s, register_netdev failed for %s\n",
+ __FUNCTION__, dev->name));
+ free_netdev(dev);
+ return;
+ }
+
+ if (FW_SUPPORTED((&dhd->pub), monitor)) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Disable RuntimePM in monitor mode */
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+ DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ scan_suppress = TRUE;
+ /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
+ ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
+ sizeof(scan_suppress), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
+ }
+ }
+
+#ifdef HOST_RADIOTAP_CONV
+ bcmwifi_monitor_create(&dhd->monitor_info);
+ bcmwifi_set_corerev_major(dhd->monitor_info, dhdpcie_get_corerev_major(dhdp));
+ bcmwifi_set_corerev_minor(dhd->monitor_info, dhdpcie_get_corerev_minor(dhdp));
+#endif /* HOST_RADIOTAP_CONV */
+ dhd->monitor_dev = dev;
+}
+
+static void
+dhd_del_monitor_if(dhd_info_t *dhd)
+{
+ int ret = BCME_OK;
+ uint32 scan_suppress = FALSE;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd->monitor_dev) {
+ DHD_ERROR(("%s: monitor i/f doesn't exist\n", __FUNCTION__));
+ return;
+ }
+
+ if (FW_SUPPORTED((&dhd->pub), monitor)) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Enable RuntimePM */
+ DHD_ENABLE_RUNTIME_PM(&dhd->pub);
+ DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ scan_suppress = FALSE;
+ /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
+ ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
+ sizeof(scan_suppress), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
+ }
+ }
+
+ if (dhd->monitor_dev) {
+ if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(dhd->monitor_dev);
+ } else {
+ if (rtnl_is_locked()) {
+ unregister_netdevice(dhd->monitor_dev);
+ } else {
+ unregister_netdev(dhd->monitor_dev);
+ }
+ }
+ dhd->monitor_dev = NULL;
+ }
+#ifdef HOST_RADIOTAP_CONV
+ if (dhd->monitor_info) {
+ bcmwifi_monitor_delete(dhd->monitor_info);
+ dhd->monitor_info = NULL;
+ }
+#endif /* HOST_RADIOTAP_CONV */
+}
+
+void
+dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
+{
+ dhd_info_t *dhd = pub->info;
+
+ DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
+
+ dhd_net_if_lock_local(dhd);
+ if (!val) {
+ /* Delete monitor */
+ dhd_del_monitor_if(dhd);
+ } else {
+ /* Add monitor */
+ dhd_add_monitor_if(dhd);
+ }
+ dhd->monitor_type = val;
+ dhd_net_if_unlock_local(dhd);
+}
+#endif /* WL_MONITOR */
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+/*
+ * Helper function:
+ * Used for RTE console message time syncing with Host printk
+ */
+void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
+{
+ dhd_info_t *info = dhdp->info;
+
+ /* Ideally the "state" should be always TRUE */
+ dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
+ DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
+ dhd_deferred_work_rte_log_time_sync,
+ DHD_WQ_WORK_PRIORITY_LOW);
+}
+
+void
+dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd_info = handle;
+ dhd_pub_t *dhd;
+
+ if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd_info) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ dhd = &dhd_info->pub;
+
+ /*
+ * Function to send IOVAR for console timesyncing
+ * between Host and Dongle.
+ * If the IOVAR fails,
+ * 1. dhd_rte_time_sync_ms is set to 0 and
+ * 2. HOST Dongle console time sync will *not* happen.
+ */
+ dhd_h2d_log_time_sync(dhd);
+}
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+ int bcmerror = BCME_OK;
+ int buflen = 0;
+ struct net_device *net;
+
+ net = dhd_idx2net(pub, ifidx);
+ if (!net) {
+ bcmerror = BCME_BADARG;
+ /*
+ * The netdev pointer is bad means the DHD can't communicate
+ * to higher layers, so just return from here
+ */
+ return bcmerror;
+ }
+
+ /* check for local dhd ioctl and handle it */
+ if (ioc->driver == DHD_IOCTL_MAGIC) {
+ if (data_buf) {
+ /* Return error if nvram size is too big */
+ if (!bcmstricmp((char *)data_buf, "vars")) {
+ DHD_ERROR(("%s: nvram len(%d) MAX_NVRAMBUF_SIZE(%d)\n",
+ __FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE));
+ if (ioc->len > MAX_NVRAMBUF_SIZE) {
+ DHD_ERROR(("%s: nvram len(%d) > MAX_NVRAMBUF_SIZE(%d)\n",
+ __FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE));
+ bcmerror = BCME_BUFTOOLONG;
+ goto done;
+ }
+ buflen = ioc->len;
+ } else if (!bcmstricmp((char *)data_buf, "dump")) {
+ buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN_32K);
+ } else {
+ /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
+ buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
+ }
+ }
+ bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
+ if (bcmerror)
+ pub->bcmerror = bcmerror;
+ goto done;
+ }
+
+ /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
+ if (data_buf)
+ buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
+
+#ifndef BCMDBUS
+ /* send to dongle (must be up, and wl). */
+ if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
+ if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
+ int ret;
+ if (atomic_read(&exit_in_progress)) {
+ DHD_ERROR(("%s module exit in progress\n", __func__));
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ ret = dhd_bus_start(pub);
+ if (ret != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ } else {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ }
+
+ if (!pub->iswl) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+#endif /* !BCMDBUS */
+
+ /*
+ * Flush the TX queue if required for proper message serialization:
+ * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+ * prevent M4 encryption and
+ * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+ * prevent disassoc frame being sent before WPS-DONE frame.
+ */
+ if (ioc->cmd == WLC_SET_KEY ||
+ (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+ strncmp("wsec_key", data_buf, 9) == 0) ||
+ (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+ strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
+ ioc->cmd == WLC_DISASSOC)
+ dhd_wait_pend8021x(net);
+
+ if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
+ data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto done;
+ }
+
+ /* XXX this typecast is BAD !!! */
+ bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* ensure that the timeouts/flags are started/set after the ioctl returns success */
+ if (bcmerror == BCME_OK) {
+ if (ioc->cmd == WLC_SET_WPA_AUTH) {
+ int wpa_auth;
+
+ wpa_auth = *((int *)ioc->buf);
+ DHD_INFO(("wpa_auth:%d\n", wpa_auth));
+ if (wpa_auth != WPA_AUTH_DISABLED) {
+ /* If AP is with security then enable
+ * WLC_E_PSK_SUP event checking
+ */
+ pub->secure_join = TRUE;
+ } else {
+ /* If AP is with open then disable
+ * WLC_E_PSK_SUP event checking
+ */
+ pub->secure_join = FALSE;
+ }
+ }
+
+ if (ioc->cmd == WLC_SET_AUTH) {
+ int auth;
+ auth = *((int *)ioc->buf);
+ DHD_INFO(("Auth:%d\n", auth));
+
+ if (auth != WL_AUTH_OPEN_SYSTEM) {
+ /* If AP is with security then enable
+ * WLC_E_PSK_SUP event checking
+ */
+ pub->secure_join = TRUE;
+ } else {
+ /* If AP is with open then disable WLC_E_PSK_SUP event checking */
+ pub->secure_join = FALSE;
+ }
+ }
+
+ if (ioc->cmd == WLC_SET_SSID) {
+ bool set_ssid_rcvd = OSL_ATOMIC_READ(pub->osh, &pub->set_ssid_rcvd);
+ if ((!set_ssid_rcvd) && (!pub->secure_join)) {
+ dhd_start_join_timer(pub);
+ } else {
+ DHD_ERROR(("%s: didnot start join timer."
+ "open join, set_ssid_rcvd: %d secure_join: %d\n",
+ __FUNCTION__, set_ssid_rcvd, pub->secure_join));
+ OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE);
+ }
+ }
+
+ if (ioc->cmd == WLC_SCAN) {
+ dhd_start_scan_timer(pub, 0);
+ }
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+done:
+#if defined(OEM_ANDROID)
+ dhd_check_hang(net, pub, bcmerror);
+#endif /* OEM_ANDROID */
+
+ return bcmerror;
+}
+
+#ifdef WL_NANHO
+static bool
+dhd_nho_iovar_filter(dhd_ioctl_t *ioc)
+{
+ bool forward_to_nanho = FALSE;
+
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ if ((ioc->len >= sizeof("nan")) && !strcmp(ioc->buf, "nan")) {
+ /* forward nan iovar to nanho module */
+ forward_to_nanho = TRUE;
+ } else if ((ioc->len >= sizeof("slot_bss")) && !strcmp(ioc->buf, "slot_bss")) {
+ /* forward slot_bss iovar to nanho module */
+ forward_to_nanho = TRUE;
+ }
+ }
+ return forward_to_nanho;
+}
+
+static int
+dhd_nho_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+ int err;
+
+ if (dhd_nho_iovar_filter(ioc)) {
+ /* forward iovar to nanho module */
+ err = bcm_nanho_iov(pub->nanhoi, ifidx, (wl_ioctl_t *)ioc);
+ } else {
+ /* all other iovars bypass nanho and issued through normal path */
+ err = dhd_ioctl_process(pub, ifidx, ioc, data_buf);
+ }
+ return err;
+}
+
+static int
+dhd_nho_ioctl_cb(void *drv_ctx, int ifidx, wl_ioctl_t *ioc, bool drv_lock)
+{
+ int err;
+
+ if (drv_lock) {
+ DHD_OS_WAKE_LOCK((dhd_pub_t *)drv_ctx);
+ }
+
+ err = dhd_ioctl_process((dhd_pub_t *)drv_ctx, ifidx, (dhd_ioctl_t *)ioc, ioc->buf);
+
+ if (drv_lock) {
+ DHD_OS_WAKE_UNLOCK((dhd_pub_t *)drv_ctx);
+ }
+
+ return err;
+}
+#endif /* WL_NANHO */
+
+/* XXX For the moment, local ioctls will return BCM errors */
+/* XXX Others return linux codes, need to be changed... */
+/**
+ * Called by the OS (optionally via a wrapper function).
+ * @param net Linux per dongle instance
+ * @param ifr Linux request structure
+ * @param cmd e.g. SIOCETHTOOL
+ */
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ void __user *data,
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+ int cmd)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_ioctl_t ioc;
+ int bcmerror = 0;
+ int ifidx;
+ int ret;
+ void *local_buf = NULL; /**< buffer in kernel space */
+ void __user *ioc_buf_user = NULL; /**< buffer in user space */
+ u16 buflen = 0;
+
+ if (atomic_read(&exit_in_progress)) {
+ DHD_ERROR(("%s module exit in progress\n", __func__));
+ bcmerror = BCME_DONGLE_DOWN;
+ return OSL_ERROR(bcmerror);
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+#if defined(OEM_ANDROID)
+ /* Interface up check for built-in type */
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
+ DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return OSL_ERROR(BCME_NOTUP);
+ }
+#endif /* (OEM_ANDROID) */
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+#if defined(WL_STATIC_IF)
+ /* skip for static ndev when it is down */
+ if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
+ }
+#endif /* WL_STATIC_iF */
+
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
+ }
+
+#if defined(WL_WIRELESS_EXT)
+ /* linux wireless extensions */
+ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+ /* may recurse, do NOT lock */
+ ret = wl_iw_ioctl(net, ifr, cmd);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* defined(WL_WIRELESS_EXT) */
+
+ if (cmd == SIOCETHTOOL) {
+ ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+#if defined(OEM_ANDROID)
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(net, ifr);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+#endif /* OEM_ANDROID */
+
+ if (cmd != SIOCDEVPRIVATE) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -EOPNOTSUPP;
+ }
+
+ memset(&ioc, 0, sizeof(ioc));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, data, sizeof(wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+#else
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif /* LINUX_VER >= 4.6 */
+ {
+ compat_wl_ioctl_t compat_ioc;
+ if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ ioc.cmd = compat_ioc.cmd;
+ if (ioc.cmd & WLC_SPEC_FLAG) {
+ memset(&ioc, 0, sizeof(ioc));
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
+
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+
+ } else { /* ioc.cmd & WLC_SPEC_FLAG */
+ ioc.buf = compat_ptr(compat_ioc.buf);
+ ioc.len = compat_ioc.len;
+ ioc.set = compat_ioc.set;
+ ioc.used = compat_ioc.used;
+ ioc.needed = compat_ioc.needed;
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ } /* ioc.cmd & WLC_SPEC_FLAG */
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+#ifdef CONFIG_COMPAT
+ ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
+#endif
+
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ }
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+
+ if (!capable(CAP_NET_ADMIN)) {
+ bcmerror = BCME_EPERM;
+ goto done;
+ }
+
+ /* Take backup of ioc.buf and restore later */
+ ioc_buf_user = ioc.buf;
+
+ if (ioc.len > 0) {
+ /*
+ * some IOVARs in DHD require 32K user memory. So allocate the
+ * maximum local buffer.
+ *
+ * For IOVARS which donot require 32K user memory, dhd_ioctl_process()
+ * takes care of trimming the length to DHD_IOCTL_MAXLEN(16K). So that DHD
+ * will not overflow the buffer size while updating the buffer.
+ */
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN_32K);
+ if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(local_buf, ioc.buf, buflen)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+
+ *((char *)local_buf + buflen) = '\0';
+
+ /* For some platforms accessing userspace memory
+ * of ioc.buf is causing kernel panic, so to avoid that
+ * make ioc.buf pointing to kernel space memory local_buf
+ */
+ ioc.buf = local_buf;
+ }
+
+#if defined(OEM_ANDROID)
+ /* Skip all the non DHD iovars (wl iovars) after f/w hang */
+ if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
+ DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+#endif /* OEM_ANDROID */
+
+#ifdef WL_NANHO
+ bcmerror = dhd_nho_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+#else
+ bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+#endif /* WL_NANHO */
+
+ /* Restore back userspace pointer to ioc.buf */
+ ioc.buf = ioc_buf_user;
+ if (!bcmerror && buflen && local_buf && ioc.buf) {
+ if (copy_to_user(ioc.buf, local_buf, buflen))
+ bcmerror = -EFAULT;
+ }
+
+done:
+ if (local_buf)
+ MFREE(dhd->pub.osh, local_buf, buflen+1);
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return OSL_ERROR(bcmerror);
+}
+
+#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
+/* Flags to indicate if we distingish power off policy when
+ * user set the memu "Keep Wi-Fi on during sleep" to "Never"
+ */
+int trigger_deep_sleep = 0;
+#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
+
+#ifdef FIX_CPU_MIN_CLOCK
+static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
+{
+ if (dhd) {
+ mutex_init(&dhd->cpufreq_fix);
+ dhd->cpufreq_fix_status = FALSE;
+ }
+ return 0;
+}
+
+static void dhd_fix_cpu_freq(dhd_info_t *dhd)
+{
+ mutex_lock(&dhd->cpufreq_fix);
+ if (dhd && !dhd->cpufreq_fix_status) {
+ pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
+#ifdef FIX_BUS_MIN_CLOCK
+ pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
+#endif /* FIX_BUS_MIN_CLOCK */
+ DHD_ERROR(("pm_qos_add_requests called\n"));
+
+ dhd->cpufreq_fix_status = TRUE;
+ }
+ mutex_unlock(&dhd->cpufreq_fix);
+}
+
+static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
+{
+ mutex_lock(&dhd ->cpufreq_fix);
+ if (dhd && dhd->cpufreq_fix_status != TRUE) {
+ mutex_unlock(&dhd->cpufreq_fix);
+ return;
+ }
+
+ pm_qos_remove_request(&dhd->dhd_cpu_qos);
+#ifdef FIX_BUS_MIN_CLOCK
+ pm_qos_remove_request(&dhd->dhd_bus_qos);
+#endif /* FIX_BUS_MIN_CLOCK */
+ DHD_ERROR(("pm_qos_add_requests called\n"));
+
+ dhd->cpufreq_fix_status = FALSE;
+ mutex_unlock(&dhd->cpufreq_fix);
+}
+#endif /* FIX_CPU_MIN_CLOCK */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+static int
+dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ void __user *data,
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+ int cmd)
+{
+ int error;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+ if (atomic_read(&dhd->pub.block_bus))
+ return -EHOSTDOWN;
+
+ if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
+ return BCME_ERROR;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ error = dhd_ioctl_entry(net, ifr, data, cmd);
+#else
+ error = dhd_ioctl_entry(net, ifr, cmd);
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
+
+ return error;
+}
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef CONFIG_HAS_WAKELOCK
+#define dhd_wake_lock_unlock_destroy(wlock) \
+{ \
+ if (dhd_wake_lock_active(wlock)) { \
+ dhd_wake_unlock(wlock); \
+ } \
+ dhd_wake_lock_destroy(wlock); \
+}
+#endif /* CONFIG_HAS_WAKELOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
+#define DHD_TCP_LIMIT_OUTPUT_BYTES (4 * 1024 * 1024)
+#ifndef TCP_DEFAULT_LIMIT_OUTPUT
+#define TCP_DEFAULT_LIMIT_OUTPUT (256 * 1024)
+#endif /* TSQ_DEFAULT_LIMIT_OUTPUT */
+void
+dhd_ctrl_tcp_limit_output_bytes(int level)
+{
+ if (level == 0) {
+ init_net.ipv4.sysctl_tcp_limit_output_bytes = TCP_DEFAULT_LIMIT_OUTPUT;
+ } else if (level == 1) {
+ init_net.ipv4.sysctl_tcp_limit_output_bytes = DHD_TCP_LIMIT_OUTPUT_BYTES;
+ }
+}
+#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
+
+static int
+dhd_stop(struct net_device *net)
+{
+ int ifidx = 0;
+ bool skip_reset = false;
+#ifdef WL_CFG80211
+ unsigned long flags = 0;
+#ifdef WL_STATIC_IF
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+#endif /* WL_STATIC_IF */
+#endif /* WL_CFG80211 */
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ WL_MSG(net->name, "Enter\n");
+ dhd->pub.rxcnt_timeout = 0;
+ dhd->pub.txcnt_timeout = 0;
+
+#ifdef BCMPCIE
+ dhd->pub.d3ackcnt_timeout = 0;
+#endif /* BCMPCIE */
+
+ mutex_lock(&dhd->pub.ndev_op_sync);
+ if (dhd->pub.up == 0) {
+ goto exit;
+ }
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhd->pub.req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhd->pub.req_hang_type));
+ dhd->pub.req_hang_type = 0;
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#if defined(WLAN_ACCEL_BOOT)
+ if (!dhd->wl_accel_force_reg_on && dhd_query_bus_erros(&dhd->pub)) {
+ DHD_ERROR(("%s: set force reg on\n", __FUNCTION__));
+ dhd->wl_accel_force_reg_on = TRUE;
+ }
+#endif /* WLAN_ACCEL_BOOT */
+
+#ifdef FIX_CPU_MIN_CLOCK
+ if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
+ dhd_rollback_cpu_freq(dhd);
+#endif /* FIX_CPU_MIN_CLOCK */
+
+ ifidx = dhd_net2idx(dhd, net);
+ BCM_REFERENCE(ifidx);
+
+ DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx));
+
+#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
+ /* If static if is operational, don't reset the chip */
+ if (wl_cfg80211_static_if_active(cfg)) {
+ WL_MSG(net->name, "static if operational. skip chip reset.\n");
+ skip_reset = true;
+ wl_cfg80211_sta_ifdown(net);
+ goto exit;
+ }
+#endif /* WL_STATIC_IF && WL_CFG80211 */
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ if (dhd->pub.skip_dhd_stop) {
+ WL_MSG(net->name, "skip chip reset.\n");
+ skip_reset = true;
+#if defined(WL_CFG80211)
+ wl_cfg80211_sta_ifdown(net);
+#endif /* WL_CFG80211 */
+ goto exit;
+ }
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+
+#ifdef WL_CFG80211
+ if (ifidx == 0) {
+ dhd_if_t *ifp;
+ wl_cfg80211_down(net);
+
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
+#ifdef WL_CFG80211
+ /* Disable Runtime PM before interface down */
+ DHD_STOP_RPM_TIMER(&dhd->pub);
+
+ DHD_UP_LOCK(&dhd->pub.up_lock, flags);
+ dhd->pub.up = 0;
+ DHD_UP_UNLOCK(&dhd->pub.up_lock, flags);
+#else
+ dhd->pub.up = 0;
+#endif /* WL_CFG80211 */
+#if defined(BCMPCIE) && defined(CONFIG_ARCH_MSM)
+ dhd_bus_inform_ep_loaded_to_rc(&dhd->pub, dhd->pub.up);
+#endif /* BCMPCIE && CONFIG_ARCH_MSM */
+
+ ifp = dhd->iflist[0];
+ /*
+ * For CFG80211: Clean up all the left over virtual interfaces
+ * when the primary Interface is brought down. [ifconfig wlan0 down]
+ */
+ if (!dhd_download_fw_on_driverload) {
+ DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ int i;
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_clear(&dhd->pub);
+#endif /* DHD_PKTDUMP_ROAM */
+
+ dhd_net_if_lock_local(dhd);
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ dhd_remove_if(&dhd->pub, i, FALSE);
+
+ if (ifp && ifp->net) {
+ dhd_if_del_sta_list(ifp);
+ }
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = FALSE;
+ unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = FALSE;
+ unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+ dhd_net_if_unlock_local(dhd);
+ }
+#if 0
+ // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
+ cancel_work_sync(dhd->dhd_deferred_wq);
+#endif
+
+#ifdef SHOW_LOGTRACE
+ /* Wait till event logs work/kthread finishes */
+ dhd_cancel_logtrace_process_sync(dhd);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+ /* Wait till bt_log_dispatcher_work finishes */
+ cancel_work_sync(&dhd->bt_log_dispatcher_work);
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+ cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
+#endif
+
+#if defined(DHD_LB_RXP)
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+ skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+ }
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(DHD_LB_RXP)
+ if (ifp && ifp->net == dhd->rx_napi_netdev) {
+ DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+ skb_queue_purge(&dhd->rx_napi_queue);
+ napi_disable(&dhd->rx_napi_struct);
+ netif_napi_del(&dhd->rx_napi_struct);
+ dhd->rx_napi_netdev = NULL;
+ }
+#endif /* DHD_LB_RXP */
+ }
+#endif /* WL_CFG80211 */
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
+#endif
+#ifdef SHOW_LOGTRACE
+ if (!dhd_download_fw_on_driverload) {
+ /* Release the skbs from queue for WLC_E_TRACE event */
+ dhd_event_logtrace_flush_queue(&dhd->pub);
+ if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
+ if (dhd->event_data.fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.fmts,
+ dhd->event_data.fmts_size);
+ }
+ if (dhd->event_data.raw_fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
+ dhd->event_data.raw_fmts_size);
+ }
+ if (dhd->event_data.raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
+ dhd->event_data.raw_sstr_size);
+ }
+ if (dhd->event_data.rom_raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
+ dhd->event_data.rom_raw_sstr_size);
+ }
+ dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
+ }
+ }
+#endif /* SHOW_LOGTRACE */
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ /* Stop all ring buffer */
+ dhd_os_reset_logging(&dhd->pub);
+#endif
+#ifdef APF
+ dhd_dev_apf_delete_filter(net);
+#endif /* APF */
+
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+ OLD_MOD_DEC_USE_COUNT;
+exit:
+ if (skip_reset == false) {
+#ifdef WL_ESCAN
+ if (ifidx == 0) {
+ wl_escan_down(net);
+ }
+#endif /* WL_ESCAN */
+ if (ifidx == 0 && !dhd_download_fw_on_driverload) {
+#if defined(WLAN_ACCEL_BOOT)
+ wl_android_wifi_accel_off(net, dhd->wl_accel_force_reg_on);
+#else
+#if defined (BT_OVER_SDIO)
+ dhd_bus_put(&dhd->pub, WLAN_MODULE);
+ wl_android_set_wifi_on_flag(FALSE);
+#else
+ wl_android_wifi_off(net, TRUE);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_dettach_netdev(net, ifidx);
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_event_dettach(net, ifidx);
+#endif /* WL_ESCAN */
+#ifdef WL_EVENT
+ wl_ext_event_dettach_netdev(net, ifidx);
+#endif /* WL_EVENT */
+#endif /* BT_OVER_SDIO */
+#endif /* WLAN_ACCEL_BOOT */
+ }
+#ifdef SUPPORT_DEEP_SLEEP
+ else {
+ /* CSP#505233: Flags to indicate if we distingish
+ * power off policy when user set the memu
+ * "Keep Wi-Fi on during sleep" to "Never"
+ */
+ if (trigger_deep_sleep) {
+ dhd_deepsleep(net, 1);
+ trigger_deep_sleep = 0;
+ }
+ }
+#endif /* SUPPORT_DEEP_SLEEP */
+ dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_was_pending = 0;
+
+ /* Clear country spec for for built-in type driver */
+ if (!dhd_download_fw_on_driverload) {
+ dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
+ dhd->pub.dhd_cspec.rev = 0;
+ dhd->pub.dhd_cspec.ccode[0] = 0x00;
+ }
+
+#ifdef BCMDBGFS
+ dhd_dbgfs_remove();
+#endif
+ }
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ /* Destroy wakelock */
+ if (!dhd_download_fw_on_driverload &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
+ (skip_reset == false)) {
+ DHD_OS_WAKE_LOCK_DESTROY(dhd);
+ dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
+ dhd_ctrl_tcp_limit_output_bytes(0);
+#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
+ WL_MSG(net->name, "Exit\n");
+
+ mutex_unlock(&dhd->pub.ndev_op_sync);
+ return 0;
+}
+
+#if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
+ defined(USE_INITIAL_SHORT_DWELL_TIME))
+extern bool g_first_broadcast_scan;
+#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
+
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+ uint32 enable = true;
+ int ret = BCME_OK;
+
+ ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+#endif /* WL11u */
+
+#if defined(WLAN_ACCEL_BOOT)
+void
+dhd_verify_firmware_mode_change(dhd_info_t *dhd)
+{
+ int current_mode = 0;
+
+ /*
+ * check for the FW change
+ * previous FW mode - dhd->pub.op_mode remember the previous mode
+ * current mode - update fw/nv path, get current FW mode from dhd->fw_path
+ */
+ dhd_update_fw_nv_path(dhd);
+#ifdef WL_MONITOR
+ DHD_INFO(("%s : check monitor mode with fw_path : %s\n", __FUNCTION__, dhd->fw_path));
+
+ if (strstr(dhd->fw_path, "_mon") != NULL) {
+ DHD_ERROR(("%s : monitor mode is enabled, set force reg on", __FUNCTION__));
+ dhd->wl_accel_force_reg_on = TRUE;
+ return;
+ } else if (dhd->pub.monitor_enable == TRUE) {
+ DHD_ERROR(("%s : monitor was enabled, changed to other fw_mode", __FUNCTION__));
+ dhd->wl_accel_force_reg_on = TRUE;
+ return;
+ }
+#endif /* WL_MONITOR */
+ current_mode = dhd_get_fw_mode(dhd);
+
+ DHD_ERROR(("%s: current_mode 0x%x, prev_opmode 0x%x", __FUNCTION__,
+ current_mode, dhd->pub.op_mode));
+
+ if (!(dhd->pub.op_mode & current_mode)) {
+ DHD_ERROR(("%s: firmware path has changed, set force reg on", __FUNCTION__));
+ dhd->wl_accel_force_reg_on = TRUE;
+ }
+}
+
+#ifndef DHD_FS_CHECK_RETRY_DELAY_MS
+#define DHD_FS_CHECK_RETRY_DELAY_MS 3000
+#endif
+
+#ifndef DHD_FS_CHECK_RETRIES
+#define DHD_FS_CHECK_RETRIES 3
+#endif
+
+static bool
+dhd_check_filesystem_is_up(void)
+{
+ struct file *fp;
+ const char *clm = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
+ fp = filp_open(clm, O_RDONLY, 0);
+
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: filp_open(%s) failed(%d) schedule wl_accel_work\n",
+ __FUNCTION__, clm, (int)IS_ERR(fp)));
+ return FALSE;
+ }
+ filp_close(fp, NULL);
+
+ return TRUE;
+}
+
+static void
+dhd_wifi_accel_on_work_cb(struct work_struct *work)
+{
+ int ret = 0;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct dhd_info *dhd;
+ struct net_device *net;
+
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(dw, struct dhd_info, wl_accel_work);
+ GCC_DIAGNOSTIC_POP();
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* Initialise force regon to TRUE and it will be made FALSE at the end */
+ dhd->wl_accel_force_reg_on = TRUE;
+
+ if (!dhd_check_filesystem_is_up()) {
+ if (!dhd->fs_check_retry--) {
+ DHD_ERROR(("%s: max retry reached, BACKOFF\n", __FUNCTION__));
+ return;
+ }
+ schedule_delayed_work(&dhd->wl_accel_work,
+ msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS));
+ return;
+ }
+
+ net = dhd->iflist[0]->net;
+
+ /*
+ * Keep wlan turn on and download firmware during bootup
+ * by making g_wifi_on = FALSE
+ */
+ ret = wl_android_wifi_on(net);
+ if (ret) {
+ DHD_ERROR(("%s: wl_android_wifi_on failed(%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ /* Disable host access from dongle */
+ ret = dhd_wl_ioctl_set_intiovar(&dhd->pub, "bus:host_access", 0, WLC_SET_VAR, TRUE, 0);
+ if (ret) {
+ /* Proceed even if iovar fails for backward compatibilty */
+ DHD_ERROR(("%s: bus:host_access(0) failed(%d)\n", __FUNCTION__, ret));
+ }
+
+ /* After bootup keep in suspend state */
+ ret = dhd_net_bus_suspend(net);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_net_bus_suspend failed(%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ /* Set force regon to FALSE and it will be set for Big Hammer case */
+ dhd->wl_accel_force_reg_on = FALSE;
+
+fail:
+ /* mark wl_accel_boot_on_done for dhd_open to proceed */
+ dhd->wl_accel_boot_on_done = TRUE;
+ return;
+
+}
+#endif /* WLAN_ACCEL_BOOT */
+
+int
+dhd_open(struct net_device *net)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+#ifdef TOE
+ uint32 toe_ol;
+#endif
+ int ifidx;
+ int32 ret = 0;
+#if defined(OOB_INTR_ONLY)
+ uint32 bus_type = -1;
+ uint32 bus_num = -1;
+ uint32 slot_num = -1;
+ wifi_adapter_info_t *adapter = NULL;
+#endif
+#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
+ int bytes_written = 0;
+#endif
+ int retry = POWERUP_MAX_RETRY;
+
+#if defined(PREVENT_REOPEN_DURING_HANG)
+ /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
+ if (dhd->pub.hang_was_sent == 1) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ /* Force to bring down WLAN interface in case dhd_stop() is not called
+ * from the upper layer when HANG event is triggered.
+ */
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
+ DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
+ dhd_stop(net);
+ } else {
+ return -1;
+ }
+ }
+#endif /* PREVENT_REOPEN_DURING_HANG */
+
+ mutex_lock(&dhd->pub.ndev_op_sync);
+
+#ifdef SCAN_SUPPRESS
+ wl_ext_reset_scan_busy(&dhd->pub);
+#endif
+
+ if (dhd->pub.up == 1) {
+ /* already up */
+ WL_MSG(net->name, "Primary net_device is already up\n");
+ mutex_unlock(&dhd->pub.ndev_op_sync);
+ return BCME_OK;
+ }
+
+ if (!dhd_download_fw_on_driverload) {
+#if defined(WLAN_ACCEL_BOOT)
+ if (dhd->wl_accel_boot_on_done == FALSE) {
+#if defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH)
+ dhd_wifi_accel_on_work_cb(&dhd->wl_accel_work.work);
+#else
+ DHD_ERROR(("%s: WLAN accel boot not done yet\n", __FUNCTION__));
+ mutex_unlock(&dhd->pub.ndev_op_sync);
+ return -1;
+#endif /* WLAN_ACCEL_SKIP_WQ_IN_ATTACH */
+ }
+ if (!dhd->wl_accel_force_reg_on && dhd_query_bus_erros(&dhd->pub)) {
+ DHD_ERROR(("%s: set force reg on\n", __FUNCTION__));
+ dhd->wl_accel_force_reg_on = TRUE;
+ }
+#endif /* WLAN_ACCEL_BOOT */
+ if (!dhd_driver_init_done) {
+ DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
+ mutex_unlock(&dhd->pub.ndev_op_sync);
+ return -1;
+ }
+ }
+
+ WL_MSG(net->name, "Enter\n");
+ DHD_ERROR(("%s\n", dhd_version));
+ /* Init wakelock */
+ if (!dhd_download_fw_on_driverload) {
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_OS_WAKE_LOCK_INIT(dhd);
+ dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+ }
+
+#ifdef SHOW_LOGTRACE
+ skb_queue_head_init(&dhd->evt_trace_queue);
+
+ if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
+ ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
+ if (ret == BCME_OK) {
+ dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
+ st_str_file_path, map_file_path);
+ dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
+ rom_st_str_file_path, rom_map_file_path);
+ dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
+ }
+ }
+#endif /* SHOW_LOGTRACE */
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ dhd->pub.dongle_trap_occured = 0;
+#ifdef BT_OVER_PCIE
+ dhd->pub.dongle_trap_due_to_bt = 0;
+#endif /* BT_OVER_PCIE */
+ dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_was_pending = 0;
+ dhd->pub.hang_reason = 0;
+ dhd->pub.iovar_timeout_occured = 0;
+#ifdef PCIE_FULL_DONGLE
+ dhd->pub.d3ack_timeout_occured = 0;
+ dhd->pub.livelock_occured = 0;
+ dhd->pub.pktid_audit_failed = 0;
+#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ dhd->pub.smmu_fault_occurred = 0;
+#ifdef DHD_LOSSLESS_ROAMING
+ dhd->pub.dequeue_prec_map = ALLPRIO;
+#endif
+#ifdef DHD_GRO_ENABLE_HOST_CTRL
+ dhd->pub.permitted_gro = TRUE;
+#endif /* DHD_GRO_ENABLE_HOST_CTRL */
+#if 0
+ /*
+ * Force start if ifconfig_up gets called before START command
+ * We keep WEXT's wl_control_wl_start to provide backward compatibility
+ * This should be removed in the future
+ */
+ ret = wl_control_wl_start(net);
+ if (ret != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
+ }
+
+#endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ if (ifidx < 0) {
+ DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (!dhd->iflist[ifidx]) {
+ DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx));
+
+#if defined(WLAN_ACCEL_BOOT)
+ dhd_verify_firmware_mode_change(dhd);
+#endif /* WLAN_ACCEL_BOOT */
+
+ if (ifidx == 0) {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+ if (!dhd_download_fw_on_driverload) {
+ DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
+#ifdef WL_EVENT
+ wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
+#endif /* WL_EVENT */
+#ifdef WL_ESCAN
+ wl_escan_event_attach(net, ifidx);
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
+#endif /* WL_EXT_IAPSTA */
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+#ifdef SHOW_LOGTRACE
+ /* dhd_cancel_logtrace_process_sync is called in dhd_stop
+ * for built-in models. Need to start logtrace kthread before
+ * calling wifi on, because once wifi is on, EDL will be in action
+ * any moment, and if kthread is not active, FW event logs will
+ * not be available
+ */
+ if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
+ goto exit;
+ }
+#endif /* SHOW_LOGTRACE */
+#if defined(WLAN_ACCEL_BOOT)
+ ret = wl_android_wifi_accel_on(net, dhd->wl_accel_force_reg_on);
+ /* Enable wl_accel_force_reg_on if ON fails, else disable it */
+ if (ret) {
+ dhd->wl_accel_force_reg_on = TRUE;
+ } else {
+ dhd->wl_accel_force_reg_on = FALSE;
+ }
+#else
+#if defined(BT_OVER_SDIO)
+ ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
+ wl_android_set_wifi_on_flag(TRUE);
+#else
+ do {
+ dhd->pub.hang_reason = 0;
+ ret = wl_android_wifi_on(net);
+ if (!dhd->pub.hang_reason) {
+ break;
+ }
+ DHD_ERROR(("%s: hang_reason=%d, retry %d\n",
+ __FUNCTION__, dhd->pub.hang_reason, retry));
+ wl_android_wifi_off(net, TRUE);
+ } while (retry-- > 0);
+#endif /* BT_OVER_SDIO */
+#endif /* WLAN_ACCEL_BOOT */
+ if (ret != 0) {
+ DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
+ __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
+ }
+ }
+#ifdef SUPPORT_DEEP_SLEEP
+ else {
+ /* Flags to indicate if we distingish
+ * power off policy when user set the memu
+ * "Keep Wi-Fi on during sleep" to "Never"
+ */
+ if (trigger_deep_sleep) {
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+ dhd_deepsleep(net, 0);
+ trigger_deep_sleep = 0;
+ }
+ }
+#endif /* SUPPORT_DEEP_SLEEP */
+#ifdef FIX_CPU_MIN_CLOCK
+ if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
+ dhd_init_cpufreq_fix(dhd);
+ dhd_fix_cpu_freq(dhd);
+ }
+#endif /* FIX_CPU_MIN_CLOCK */
+#if defined(OOB_INTR_ONLY)
+ if (dhd->pub.conf->dpc_cpucore >= 0) {
+ dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+ if (adapter) {
+ printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+ irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
+ }
+ }
+#endif
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+#ifdef BCMDBUS
+ dhd_set_path(&dhd->pub);
+ DHD_MUTEX_UNLOCK();
+ wait_event_interruptible_timeout(dhd->adapter->status_event,
+ wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
+ msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
+ DHD_MUTEX_LOCK();
+ if ((ret = dbus_up(dhd->pub.bus)) != 0) {
+ DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
+ goto exit;
+ } else {
+ dhd->pub.busstate = DHD_BUS_DATA;
+ }
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+#else
+ /* try to bring up bus */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
+ ret = dhd_bus_start(&dhd->pub);
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
+ }
+#else
+ ret = dhd_bus_start(&dhd->pub);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ if (ret) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
+ }
+#endif /* !BCMDBUS */
+
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_attach_name(net, ifidx);
+#endif
+
+#ifdef BT_OVER_SDIO
+ if (dhd->pub.is_bt_recovery_required) {
+ DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
+ bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
+ }
+ dhd->pub.is_bt_recovery_required = FALSE;
+#endif
+
+ /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+ /* Get current TOE mode from dongle */
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
+ dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ } else {
+ dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+#endif /* TOE */
+
+#ifdef DHD_LB
+#ifdef ENABLE_DHD_GRO
+ dhd->iflist[ifidx]->net->features |= NETIF_F_GRO;
+#endif /* ENABLE_DHD_GRO */
+
+#ifdef HOST_SFH_LLC
+ dhd->iflist[ifidx]->net->needed_headroom = DOT11_LLC_SNAP_HDR_LEN;
+#endif
+
+#if defined(DHD_LB_RXP)
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+ if (dhd->rx_napi_netdev == NULL) {
+ dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
+ memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
+ netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
+ dhd_napi_poll, dhd_napi_weight);
+ DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s> dhd_napi_weight: %d\n",
+ __FUNCTION__, &dhd->rx_napi_struct, net,
+ net->name, dhd_napi_weight));
+ napi_enable(&dhd->rx_napi_struct);
+ DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
+ skb_queue_head_init(&dhd->rx_napi_queue);
+ __skb_queue_head_init(&dhd->rx_process_queue);
+ } /* rx_napi_netdev == NULL */
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+ /* Use the variant that uses locks */
+ skb_queue_head_init(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+ dhd->dhd_lb_candidacy_override = FALSE;
+#endif /* DHD_LB */
+ netdev_update_features(net);
+#ifdef DHD_PM_OVERRIDE
+ g_pm_override = FALSE;
+#endif /* DHD_PM_OVERRIDE */
+#if defined(WL_CFG80211)
+ if (unlikely(wl_cfg80211_up(net))) {
+ DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+ if (!dhd_download_fw_on_driverload) {
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ if (!dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = TRUE;
+ register_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (!dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = TRUE;
+ register_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+ }
+
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
+ dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+ dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+#if defined(NUM_SCB_MAX_PROBE)
+ dhd_set_scb_probe(&dhd->pub);
+#endif /* NUM_SCB_MAX_PROBE */
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+ if (unlikely(wl_escan_up(net))) {
+ DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+#endif /* WL_ESCAN */
+#if defined(ISAM_PREINIT)
+ if (!dhd_download_fw_on_driverload) {
+ if (dhd->pub.conf) {
+ wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written);
+ wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written);
+ wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written);
+ }
+ }
+#endif
+ }
+
+ dhd->pub.up = 1;
+#if defined(BCMPCIE) && defined(CONFIG_ARCH_MSM)
+ dhd_bus_inform_ep_loaded_to_rc(&dhd->pub, dhd->pub.up);
+#endif /* BCMPCIE && CONFIG_ARCH_MSM */
+ DHD_START_RPM_TIMER(&dhd->pub);
+
+ if (wl_event_enable) {
+ /* For wl utility to receive events */
+ dhd->pub.wl_event_enabled = true;
+ } else {
+ dhd->pub.wl_event_enabled = false;
+ }
+
+ if (logtrace_pkt_sendup) {
+ /* For any deamon to recieve logtrace */
+ dhd->pub.logtrace_pkt_sendup = true;
+ } else {
+ dhd->pub.logtrace_pkt_sendup = false;
+ }
+
+ OLD_MOD_INC_USE_COUNT;
+
+#ifdef BCMDBGFS
+ dhd_dbgfs_init(&dhd->pub);
+#endif
+
+exit:
+ mutex_unlock(&dhd->pub.ndev_op_sync);
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && defined(NO_POWER_OFF_AFTER_OPEN)
+ dhd_download_fw_on_driverload = TRUE;
+ dhd_driver_init_done = TRUE;
+#elif defined(ENABLE_INSMOD_NO_FW_LOAD) && defined(ENABLE_INSMOD_NO_POWER_OFF)
+ dhd_download_fw_on_driverload = FALSE;
+ dhd_driver_init_done = TRUE;
+#endif
+ if (ret) {
+ dhd_stop(net);
+ }
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ WL_MSG(net->name, "Exit ret=%d\n", ret);
+ return ret;
+}
+
+/*
+ * ndo_start handler for primary ndev
+ */
+static int
+dhd_pri_open(struct net_device *net)
+{
+ s32 ret;
+
+ DHD_MUTEX_IS_LOCK_RETURN();
+ DHD_MUTEX_LOCK();
+ ret = dhd_open(net);
+ if (unlikely(ret)) {
+ DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
+ DHD_MUTEX_UNLOCK();
+ return ret;
+ }
+
+ /* Allow transmit calls */
+ dhd_tx_start_queues(net);
+ WL_MSG(net->name, "tx queue started\n");
+
+#if defined(SET_RPS_CPUS)
+ dhd_rps_cpus_enable(net, TRUE);
+#endif
+
+#if defined(SET_XPS_CPUS)
+ dhd_xps_cpus_enable(net, TRUE);
+#endif
+ DHD_MUTEX_UNLOCK();
+
+ return ret;
+}
+
+/*
+ * ndo_stop handler for primary ndev
+ */
+static int
+dhd_pri_stop(struct net_device *net)
+{
+ s32 ret;
+
+ /* Set state and stop OS transmissions */
+ dhd_tx_stop_queues(net);
+ WL_MSG(net->name, "tx queue stopped\n");
+
+ ret = dhd_stop(net);
+ if (unlikely(ret)) {
+ DHD_ERROR(("dhd_stop failed: %d\n", ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+#ifdef PCIE_INB_DW
+bool
+dhd_check_cfg_in_progress(dhd_pub_t *dhdp)
+{
+#if defined(WL_CFG80211)
+ return wl_cfg80211_check_in_progress(dhd_linux_get_primary_netdev(dhdp));
+#endif /* WL_CFG80211 */
+ return FALSE;
+}
+#endif
+
+#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
+/*
+ * For static I/Fs, the firmware interface init
+ * is done from the IFF_UP context.
+ */
+static int
+dhd_static_if_open(struct net_device *net)
+{
+ s32 ret = 0;
+ struct bcm_cfg80211 *cfg;
+ struct net_device *primary_netdev = NULL;
+#ifdef WLEASYMESH
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+#endif /* WLEASYMESH */
+
+ DHD_MUTEX_LOCK();
+ cfg = wl_get_cfg(net);
+ primary_netdev = bcmcfg_to_prmry_ndev(cfg);
+
+ if (!wl_cfg80211_static_if(cfg, net)) {
+ WL_MSG(net->name, "non-static interface ..do nothing\n");
+ ret = BCME_OK;
+ goto done;
+ }
+
+ WL_MSG(net->name, "Enter\n");
+ /* Ensure fw is initialized. If it is already initialized,
+ * dhd_open will return success.
+ */
+#ifdef WLEASYMESH
+ WL_MSG(net->name, "switch to EasyMesh fw\n");
+ dhd->pub.conf->fw_type = FW_TYPE_EZMESH;
+ ret = dhd_stop(primary_netdev);
+ if (unlikely(ret)) {
+ printf("===>%s, Failed to close primary dev ret %d\n", __FUNCTION__, ret);
+ goto done;
+ }
+ OSL_SLEEP(1);
+#endif /* WLEASYMESH */
+ ret = dhd_open(primary_netdev);
+ if (unlikely(ret)) {
+ DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
+ goto done;
+ }
+
+ ret = wl_cfg80211_static_if_open(net);
+ if (ret == BCME_OK) {
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ }
+done:
+ WL_MSG(net->name, "Exit ret=%d\n", ret);
+ DHD_MUTEX_UNLOCK();
+ return ret;
+}
+
+static int
+dhd_static_if_stop(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg;
+ struct net_device *primary_netdev = NULL;
+ int ret = BCME_OK;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+ WL_MSG(net->name, "Enter\n");
+
+ cfg = wl_get_cfg(net);
+ if (!wl_cfg80211_static_if(cfg, net)) {
+ DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
+ return BCME_OK;
+ }
+#ifdef DHD_NOTIFY_MAC_CHANGED
+ if (dhd->pub.skip_dhd_stop) {
+ WL_MSG(net->name, "Exit skip stop\n");
+ return BCME_OK;
+ }
+#endif /* DHD_NOTIFY_MAC_CHANGED */
+
+ /* Ensure queue is disabled */
+ netif_tx_disable(net);
+
+ dhd_net_if_lock_local(dhd);
+ ret = wl_cfg80211_static_if_close(net);
+ dhd_net_if_unlock_local(dhd);
+
+ if (dhd->pub.up == 0) {
+ /* If fw is down, return */
+ DHD_ERROR(("fw down\n"));
+ return BCME_OK;
+ }
+ /* If STA iface is not in operational, invoke dhd_close from this
+ * context.
+ */
+ primary_netdev = bcmcfg_to_prmry_ndev(cfg);
+#ifdef WLEASYMESH
+ if (dhd->pub.conf->fw_type == FW_TYPE_EZMESH) {
+ WL_MSG(net->name, "switch to STA fw\n");
+ dhd->pub.conf->fw_type = FW_TYPE_STA;
+ } else
+#endif /* WLEASYMESH */
+ if (!(primary_netdev->flags & IFF_UP)) {
+ ret = dhd_stop(primary_netdev);
+ } else {
+ DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
+ }
+ WL_MSG(net->name, "Exit ret=%d\n", ret);
+
+ return ret;
+}
+#endif /* WL_STATIC_IF && WL_CF80211 */
+
+int dhd_do_driver_init(struct net_device *net)
+{
+ dhd_info_t *dhd = NULL;
+ int ret = 0;
+
+ if (!net) {
+ DHD_ERROR(("Primary Interface not initialized \n"));
+ return -EINVAL;
+ }
+
+ DHD_MUTEX_IS_LOCK_RETURN();
+ DHD_MUTEX_LOCK();
+
+ /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
+ dhd = DHD_DEV_INFO(net);
+
+ /* If driver is already initialized, do nothing
+ */
+ if (dhd->pub.busstate == DHD_BUS_DATA) {
+ DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+ goto exit;
+ }
+
+ if (dhd_open(net) < 0) {
+ DHD_ERROR(("Driver Init Failed \n"));
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+ DHD_MUTEX_UNLOCK();
+ return ret;
+}
+
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+ ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
+ return BCME_OK;
+#endif
+
+ /* handle IF event caused by wl commands, SoftAP, WEXT and
+ * anything else. This has to be done asynchronously otherwise
+ * DPC will be blocked (and iovars will timeout as DPC has no chance
+ * to read the response back)
+ */
+ if (ifevent->ifidx > 0) {
+ dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
+
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strlcpy(if_event->name, name, sizeof(if_event->name));
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
+ DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+ dhd_if_event_t *if_event;
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+ ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+ return BCME_OK;
+#endif /* WL_CFG80211 */
+
+ /* handle IF event caused by wl commands, SoftAP, WEXT and
+ * anything else
+ */
+ if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strlcpy(if_event->name, name, sizeof(if_event->name));
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
+ dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+
+ return BCME_OK;
+}
+
+int
+dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+#ifdef DHD_UPDATE_INTF_MAC
+ dhd_if_event_t *if_event;
+#endif /* DHD_UPDATE_INTF_MAC */
+
+#ifdef WL_CFG80211
+ wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+ ifevent->ifidx, name, mac, ifevent->bssidx);
+#endif /* WL_CFG80211 */
+
+#ifdef DHD_UPDATE_INTF_MAC
+ /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
+ * anything else
+ */
+ if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ // construct a change event
+ if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
+ if_event->event.opcode = WLC_E_IF_CHANGE;
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strncpy(if_event->name, name, IFNAMSIZ);
+ if_event->name[IFNAMSIZ - 1] = '\0';
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
+ dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+#endif /* DHD_UPDATE_INTF_MAC */
+
+ return BCME_OK;
+}
+
+#ifdef WL_NATOE
+/* Handler to update natoe info and bind with new subscriptions if there is change in config */
+static void
+dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ wl_event_data_natoe_t *natoe = event_info;
+ dhd_nfct_info_t *nfct = dhd->pub.nfct;
+
+ if (event != DHD_WQ_WORK_NATOE_EVENT) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+ if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
+ (natoe->start_port < natoe->end_port)) {
+ /* Rebind subscriptions to start receiving notifications from groups */
+ if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
+ dhd_ct_close(nfct);
+ }
+ dhd_ct_send_dump_req(nfct);
+ } else if (!natoe->natoe_active) {
+ /* Rebind subscriptions to stop receiving notifications from groups */
+ if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
+ dhd_ct_close(nfct);
+ }
+ }
+}
+
+/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
+ * Scheduling workq to switch from tasklet context as bind call may sleep in handler
+ */
+int
+dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
+{
+ wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
+
+ if (dhd->nfct) {
+ wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
+ uint8 prev_enable = natoe->natoe_active;
+
+ spin_lock_bh(&dhd->nfct_lock);
+ memcpy(natoe, event_data, sizeof(*event_data));
+ spin_unlock_bh(&dhd->nfct_lock);
+
+ if (prev_enable != event_data->natoe_active) {
+ dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
+ (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
+ dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
+ }
+ return BCME_OK;
+ }
+ DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
+ return BCME_ERROR;
+}
+
+/* Handler to send natoe ioctl to dongle */
+static void
+dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_ct_ioc_t *ct_ioc = event_info;
+
+ if (event != DHD_WQ_WORK_NATOE_IOCTL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
+ DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
+ }
+}
+
+/* When Netlink message contains port collision info, the info must be sent to dongle FW
+ * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
+ */
+void
+dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
+{
+
+ dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
+ DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
+ DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* WL_NATOE */
+
+/* This API maps ndev to ifp inclusive of static IFs */
+static dhd_if_t *
+dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
+{
+ dhd_if_t *ifp = NULL;
+#ifdef WL_STATIC_IF
+ u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
+#else
+ u32 ifidx = (DHD_MAX_IFS - 1);
+#endif /* WL_STATIC_IF */
+
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
+ do {
+ ifp = dhdinfo->iflist[ifidx];
+ if (ifp && (ifp->net == ndev)) {
+ DHD_TRACE(("match found for %s. ifidx:%d\n",
+ ndev->name, ifidx));
+ return ifp;
+ }
+ } while (ifidx--);
+
+ DHD_ERROR(("no entry found for %s\n", ndev->name));
+ return NULL;
+}
+
+bool
+dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
+{
+ dhd_if_t *ifp = NULL;
+
+ if (!dhdp || !ndev) {
+ DHD_ERROR(("wrong input\n"));
+ ASSERT(0);
+ return false;
+ }
+
+ ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
+ return (ifp && (ifp->static_if == true));
+}
+
+#ifdef WL_STATIC_IF
+/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
+ * are not known. For e.g: static i/f case. This function lets to update it once
+ * it is known.
+ */
+s32
+dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
+ uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
+{
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
+ dhd_if_t *ifp, *ifp_new;
+ s32 cur_idx;
+ dhd_dev_priv_t * dev_priv;
+
+ DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
+ if_state, ifidx));
+
+ ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
+
+ if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
+ return -ENODEV;
+ }
+ cur_idx = ifp->idx;
+
+ if (if_state == NDEV_STATE_OS_IF_CREATED) {
+ /* mark static if */
+ ifp->static_if = TRUE;
+ return BCME_OK;
+ }
+
+ ifp_new = dhdinfo->iflist[ifidx];
+ if (ifp_new && (ifp_new != ifp)) {
+ /* There should be only one entry for a given ifidx. */
+ DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
+ ASSERT(0);
+ dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
+ net_os_send_hang_message(ifp->net);
+ return -EINVAL;
+ }
+
+ /* For static if delete case, cleanup the if before ifidx update */
+ if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
+ (if_state == NDEV_STATE_FW_IF_FAILED)) {
+ dhd_cleanup_if(ifp->net);
+ dev_priv = DHD_DEV_PRIV(ndev);
+ dev_priv->ifidx = ifidx;
+ }
+
+ /* update the iflist ifidx slot with cached info */
+ dhdinfo->iflist[ifidx] = ifp;
+ dhdinfo->iflist[cur_idx] = NULL;
+
+ /* update the values */
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+
+ if (if_state == NDEV_STATE_FW_IF_CREATED) {
+ dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
+ /* initialize the dongle provided if name */
+ if (dngl_name) {
+ strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
+ } else if (ndev->name[0] != '\0') {
+ strncpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
+ }
+ if (mac != NULL && ifp->set_macaddress == FALSE) {
+ /* To and fro locations have same size - ETHER_ADDR_LEN */
+ (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
+ }
+#ifdef WL_EVENT
+ wl_ext_event_attach_netdev(ndev, ifidx, bssidx);
+#endif /* WL_EVENT */
+#ifdef WL_ESCAN
+ wl_escan_event_attach(ndev, ifidx);
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_ifadding(ndev, ifidx);
+ wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx);
+ wl_ext_iapsta_attach_name(ndev, ifidx);
+#endif /* WL_EXT_IAPSTA */
+ }
+ else if (if_state == NDEV_STATE_FW_IF_DELETED) {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_dettach_netdev(ndev, cur_idx);
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_event_dettach(ndev, cur_idx);
+#endif /* WL_ESCAN */
+#ifdef WL_EVENT
+ wl_ext_event_dettach_netdev(ndev, cur_idx);
+#endif /* WL_EVENT */
+ }
+ DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
+ ifidx, cur_idx, if_state));
+ return BCME_OK;
+}
+#endif /* WL_STATIC_IF */
+
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
+{
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+ dhd_if_t *ifp;
+
+ ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
+ if (!dhdinfo || ifidx < 0 || ifidx >= (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)) {
+ return NULL;
+ }
+
+ ifp = dhdinfo->iflist[ifidx];
+
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
+ __FUNCTION__, ifp->net->name, ifidx));
+
+ if (ifidx == 0) {
+ /* For primary ifidx (0), there shouldn't be
+ * any netdev present already.
+ */
+ DHD_ERROR(("Primary ifidx populated already\n"));
+ ASSERT(0);
+ return NULL;
+ }
+
+ dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+#if defined(CONFIG_TIZEN)
+ net_stat_tizen_unregister(ifp->net);
+#endif /* CONFIG_TIZEN */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+ dhd_tx_stop_queues(ifp->net);
+ if (need_rtnl_lock)
+ unregister_netdev(ifp->net);
+ else
+ unregister_netdevice(ifp->net);
+ }
+ ifp->net = NULL;
+ }
+ } else {
+ ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+ return NULL;
+ }
+ }
+
+ memset(ifp, 0, sizeof(dhd_if_t));
+ ifp->info = dhdinfo;
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+#ifdef DHD_MCAST_REGEN
+ ifp->mcast_regen_bss_enable = FALSE;
+#endif
+ /* set to TRUE rx_pkt_chainable at alloc time */
+ ifp->rx_pkt_chainable = TRUE;
+
+ if (mac != NULL)
+ memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+
+ /* Allocate etherdev, including space for private structure */
+#ifdef DHD_MQ
+ if (enable_mq) {
+ ifp->net = alloc_etherdev_mq(DHD_DEV_PRIV_SIZE, MQ_MAX_QUEUES);
+ } else {
+ ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+ }
+#else
+ ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+#endif /* DHD_MQ */
+
+ if (ifp->net == NULL) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+ goto fail;
+ }
+
+ /* Setup the dhd interface's netdevice private structure. */
+ dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+
+ if (name && name[0]) {
+ strlcpy(ifp->net->name, name, IFNAMSIZ);
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
+ /* as priv_destructor calls free_netdev, no need to set need_free_netdev */
+ ifp->net->needs_free_netdev = 0;
+#ifdef WL_CFG80211
+ if (ifidx == 0)
+ ifp->net->priv_destructor = free_netdev;
+ else
+ ifp->net->priv_destructor = dhd_netdev_free;
+#else
+ ifp->net->priv_destructor = free_netdev;
+#endif /* WL_CFG80211 */
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */
+#ifdef WL_CFG80211
+ if (ifidx == 0)
+ ifp->net->destructor = free_netdev;
+ else
+ ifp->net->destructor = dhd_netdev_free;
+#else
+ ifp->net->destructor = free_netdev;
+#endif /* WL_CFG80211 */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */
+ strlcpy(ifp->name, ifp->net->name, sizeof(ifp->name));
+ dhdinfo->iflist[ifidx] = ifp;
+
+/* initialize the dongle provided if name */
+ if (dngl_name) {
+ strlcpy(ifp->dngl_name, dngl_name, sizeof(ifp->dngl_name));
+ } else if (name) {
+ strlcpy(ifp->dngl_name, name, sizeof(ifp->dngl_name));
+ }
+
+#ifdef PCIE_FULL_DONGLE
+ /* Initialize STA info list */
+ INIT_LIST_HEAD(&ifp->sta_list);
+ DHD_IF_STA_LIST_LOCK_INIT(&ifp->sta_list_lock);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_L2_FILTER
+ ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
+ ifp->parp_allnode = TRUE;
+#endif /* DHD_L2_FILTER */
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ ifp->qosmap_up_table = ((uint8*)MALLOCZ(dhdpub->osh, UP_TABLE_MAX));
+ ifp->qosmap_up_table_enable = FALSE;
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+
+ DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ ifp->recv_reassoc_evt = FALSE;
+ ifp->post_roam_evt = FALSE;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
+ dhd_reset_tcpsync_info_by_ifp(ifp);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+ return ifp->net;
+
+fail:
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+ if (ifp->net == dhdinfo->rx_napi_netdev) {
+ napi_disable(&dhdinfo->rx_napi_struct);
+ netif_napi_del(&dhdinfo->rx_napi_struct);
+ skb_queue_purge(&dhdinfo->rx_napi_queue);
+ dhdinfo->rx_napi_netdev = NULL;
+ }
+#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+ dhd_dev_priv_clear(ifp->net);
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+ }
+ dhdinfo->iflist[ifidx] = NULL;
+ return NULL;
+}
+
+static void
+dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
+{
+#ifdef PCIE_FULL_DONGLE
+ s32 ifidx = 0;
+ if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+#endif /* PCIE_FULL_DONGLE */
+
+ if (ifp != NULL) {
+ if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
+ ASSERT(0);
+ return;
+ }
+#ifdef DHD_L2_FILTER
+ bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdpub->tickcnt);
+ deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ MFREE(dhdpub->osh, ifp->qosmap_up_table, UP_TABLE_MAX);
+ ifp->qosmap_up_table = NULL;
+ ifp->qosmap_up_table_enable = FALSE;
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+
+ dhd_if_del_sta_list(ifp);
+#ifdef PCIE_FULL_DONGLE
+ /* Delete flowrings of virtual interface */
+ ifidx = ifp->idx;
+ if ((ifidx != 0) &&
+ ((if_flow_lkup != NULL) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP))) {
+ dhd_flow_rings_delete(dhdp, ifidx);
+ }
+#endif /* PCIE_FULL_DONGLE */
+ }
+}
+
+void
+dhd_cleanup_if(struct net_device *net)
+{
+ dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
+ dhd_pub_t *dhdp = &dhdinfo->pub;
+ dhd_if_t *ifp;
+
+ ifp = dhd_get_ifp_by_ndev(dhdp, net);
+ if (ifp) {
+ if (ifp->idx >= DHD_MAX_IFS) {
+ DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp->idx));
+ ASSERT(0);
+ return;
+ }
+ dhd_cleanup_ifp(dhdp, ifp);
+ }
+}
+
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+#define DHD_TX_COMPLETION_TIMEOUT 5000
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+{
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+ dhd_if_t *ifp;
+ unsigned long flags;
+ long timeout;
+
+ ifp = dhdinfo->iflist[ifidx];
+
+ if (ifp != NULL) {
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ cancel_delayed_work_sync(&ifp->m4state_work);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ cancel_work_sync(&ifp->blk_tsfl_work);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+ dhd_cleanup_ifp(dhdpub, ifp);
+#ifdef WL_STATIC_IF
+ if (ifp->static_if) {
+ /* static IF will be handled in detach */
+ DHD_TRACE(("Skip del iface for static interface\n"));
+ return BCME_OK;
+ }
+#endif /* WL_STATIC_IF */
+ if (ifp->net != NULL) {
+ DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+ DHD_GENERAL_LOCK(dhdpub, flags);
+ ifp->del_in_progress = true;
+ DHD_GENERAL_UNLOCK(dhdpub, flags);
+
+ /* If TX is in progress, hold the if del */
+ if (DHD_IF_IS_TX_ACTIVE(ifp)) {
+ DHD_INFO(("TX in progress. Wait for it to be complete."));
+ timeout = wait_event_timeout(dhdpub->tx_completion_wait,
+ ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
+ msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
+ if (!timeout) {
+ /* Tx completion timeout. Attempt proceeding ahead */
+ DHD_ERROR(("Tx completion timed out!\n"));
+ ASSERT(0);
+ }
+ } else {
+ DHD_TRACE(("No outstanding TX!\n"));
+ }
+ dhdinfo->iflist[ifidx] = NULL;
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+ netif_tx_disable(ifp->net);
+
+#if defined(SET_RPS_CPUS)
+ custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ if (dhdinfo->cih)
+ ctf_dev_unregister(dhdinfo->cih, ifp->net);
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
+ dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
+ if (need_rtnl_lock)
+ unregister_netdev(ifp->net);
+ else
+ unregister_netdevice(ifp->net);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_event_dettach(ifp->net, ifidx);
+#endif /* WL_ESCAN */
+#ifdef WL_EVENT
+ wl_ext_event_dettach_netdev(ifp->net, ifidx);
+#endif /* WL_EVENT */
+ }
+ ifp->net = NULL;
+ DHD_GENERAL_LOCK(dhdpub, flags);
+ ifp->del_in_progress = false;
+ DHD_GENERAL_UNLOCK(dhdpub, flags);
+ }
+#ifdef DHD_WMF
+ dhd_wmf_cleanup(dhdpub, ifidx);
+#endif /* DHD_WMF */
+ DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+
+ MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+ ifp = NULL;
+ }
+
+ return BCME_OK;
+}
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+int
+dhd_set_qosmap_up_table(dhd_pub_t *dhdp, uint32 idx, bcm_tlv_t *qos_map_ie)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ if (!ifp)
+ return BCME_ERROR;
+
+ wl_set_up_table(ifp->qosmap_up_table, qos_map_ie);
+ ifp->qosmap_up_table_enable = TRUE;
+
+ return BCME_OK;
+}
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+
+static struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_pri_open,
+ .ndo_stop = dhd_pri_stop,
+ .ndo_get_stats = dhd_get_stats,
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ .ndo_siocdevprivate = dhd_ioctl_entry_wrapper,
+#else
+ .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+ .ndo_start_xmit = dhd_start_xmit_wrapper,
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ .ndo_siocdevprivate = dhd_ioctl_entry,
+#else
+ .ndo_do_ioctl = dhd_ioctl_entry,
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+ .ndo_start_xmit = dhd_start_xmit,
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+#ifdef DHD_MQ
+ .ndo_select_queue = dhd_select_queue
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
+ .ndo_open = dhd_static_if_open,
+ .ndo_stop = dhd_static_if_stop,
+#endif
+ .ndo_get_stats = dhd_get_stats,
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ .ndo_siocdevprivate = dhd_ioctl_entry_wrapper,
+#else
+ .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+ .ndo_start_xmit = dhd_start_xmit_wrapper,
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ .ndo_siocdevprivate = dhd_ioctl_entry,
+#else
+ .ndo_do_ioctl = dhd_ioctl_entry,
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0) */
+ .ndo_start_xmit = dhd_start_xmit,
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+static void
+dhd_ctf_detach(ctf_t *ci, void *arg)
+{
+ dhd_info_t *dhd = (dhd_info_t *)arg;
+ dhd->cih = NULL;
+
+#ifdef CTFPOOL
+ /* free the buffers in fast pool */
+ osl_ctfpool_cleanup(dhd->pub.osh);
+#endif /* CTFPOOL */
+
+ return;
+}
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+int
+dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
+ unsigned long buflen)
+{
+ loff_t wr_posn = *posn;
+
+ if (!fp || !buf || buflen == 0)
+ return -1;
+
+ if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
+ return -1;
+
+ *posn = wr_posn;
+ return 0;
+}
+
+#ifdef SHOW_LOGTRACE
+int
+dhd_os_read_file(void *file, char *buf, uint32 size)
+{
+ struct file *filep = (struct file *)file;
+
+ if (!file || !buf)
+ return -1;
+
+ return vfs_read(filep, buf, size, &filep->f_pos);
+}
+
+int
+dhd_os_seek_file(void *file, int64 offset)
+{
+ struct file *filep = (struct file *)file;
+ if (!file)
+ return -1;
+
+ /* offset can be -ve */
+ filep->f_pos = filep->f_pos + offset;
+
+ return 0;
+}
+
+static int
+dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
+{
+ struct file *filep = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ struct kstat stat;
+ mm_segment_t fs;
+ int error = 0;
+#endif
+ char *raw_fmts = NULL;
+ int logstrs_size = 0;
+
+ if (control_logtrace != LOGTRACE_PARSED_FMT) {
+ DHD_ERROR_NO_HW4(("%s : turned off logstr parsing\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ filep = filp_open(logstrs_path, O_RDONLY, 0);
+
+ if (IS_ERR(filep)) {
+ DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
+ goto fail;
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ error = vfs_stat(logstrs_path, &stat);
+ if (error) {
+ DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
+ goto fail;
+ }
+ logstrs_size = (int) stat.size;
+#else
+ logstrs_size = dhd_os_get_image_size(filep);
+#endif
+ if (logstrs_size <= 0) {
+ DHD_ERROR(("%s: get file size fails %d! \n", __FUNCTION__, logstrs_size));
+ goto fail;
+ }
+
+ if (temp->raw_fmts != NULL) {
+ raw_fmts = temp->raw_fmts; /* reuse already malloced raw_fmts */
+ } else {
+ raw_fmts = MALLOC(osh, logstrs_size);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
+ DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
+ goto fail;
+ }
+
+ if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
+ == BCME_OK) {
+ filp_close(filep, NULL);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(fs);
+#endif
+ return BCME_OK;
+ }
+
+fail:
+ if (raw_fmts) {
+ MFREE(osh, raw_fmts, logstrs_size);
+ }
+ if (temp->fmts != NULL) {
+ MFREE(osh, temp->fmts, temp->num_fmts * sizeof(char *));
+ }
+
+fail1:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(fs);
+#endif
+ temp->fmts = NULL;
+ temp->raw_fmts = NULL;
+
+ return BCME_ERROR;
+}
+
+static int
+dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
+ uint32 *rodata_end)
+{
+ struct file *filep = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t fs;
+#endif
+ int err = BCME_ERROR;
+
+ if (fname == NULL) {
+ DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ filep = filp_open(fname, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
+ goto fail;
+ }
+
+ if ((err = dhd_parse_map_file(osh, filep, ramstart,
+ rodata_start, rodata_end)) < 0)
+ goto fail;
+
+fail:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(fs);
+#endif
+
+ return err;
+}
+#ifdef DHD_COREDUMP
+#define PC_FOUND_BIT 0x01
+#define LR_FOUND_BIT 0x02
+#define ALL_ADDR_VAL (PC_FOUND_BIT | LR_FOUND_BIT)
+#define READ_NUM_BYTES 1000
+#define DHD_FUNC_STR_LEN 80
+static int
+dhd_lookup_map(osl_t *osh, char *fname, uint32 pc, char *pc_fn,
+ uint32 lr, char *lr_fn)
+{
+ struct file *filep = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t fs;
+#endif
+ char *raw_fmts = NULL, *raw_fmts_loc = NULL, *cptr = NULL;
+ uint32 read_size = READ_NUM_BYTES;
+ int err = BCME_ERROR;
+ uint32 addr = 0, addr1 = 0, addr2 = 0;
+ char type = '?', type1 = '?', type2 = '?';
+ char func[DHD_FUNC_STR_LEN] = "\0";
+ char func1[DHD_FUNC_STR_LEN] = "\0";
+ char func2[DHD_FUNC_STR_LEN] = "\0";
+ uint8 count = 0;
+ int num, len = 0, offset;
+
+ DHD_TRACE(("%s: fname %s pc 0x%x lr 0x%x \n",
+ __FUNCTION__, fname, pc, lr));
+ if (fname == NULL) {
+ DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Allocate 1 byte more than read_size to terminate it with NULL */
+ raw_fmts = MALLOCZ(osh, read_size + 1);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ filep = filp_open(fname, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
+ goto fail;
+ }
+
+ if (pc_fn == NULL) {
+ count |= PC_FOUND_BIT;
+ }
+ if (lr_fn == NULL) {
+ count |= LR_FOUND_BIT;
+ }
+ while (count != ALL_ADDR_VAL)
+ {
+ err = dhd_os_read_file(filep, raw_fmts, read_size);
+ if (err < 0) {
+ DHD_ERROR(("%s: map file read failed err:%d \n",
+ __FUNCTION__, err));
+ goto fail;
+ }
+
+ /* End raw_fmts with NULL as strstr expects NULL terminated
+ * strings
+ */
+ raw_fmts[read_size] = '\0';
+ raw_fmts_loc = raw_fmts;
+ offset = 0;
+
+ while ((count != ALL_ADDR_VAL) && (offset < read_size))
+ {
+ cptr = bcmstrtok(&raw_fmts_loc, "\n", 0);
+ if (cptr == NULL) {
+ DHD_TRACE(("%s: cptr is NULL, offset %d"
+ " raw_fmts_loc %s \n",
+ __FUNCTION__, offset, raw_fmts_loc));
+ break;
+ }
+ DHD_TRACE(("%s: %s \n", __FUNCTION__, cptr));
+ if ((type2 == 'A') ||
+ (type2 == 'T') ||
+ (type2 == 'W')) {
+ addr1 = addr2;
+ type1 = type2;
+ (void)memcpy_s(func1, DHD_FUNC_STR_LEN,
+ func2, DHD_FUNC_STR_LEN);
+ DHD_TRACE(("%s: %x %c %s \n",
+ __FUNCTION__, addr1, type1, func1));
+ }
+ len = strlen(cptr);
+ num = sscanf(cptr, "%x %c %79s", &addr, &type, func);
+ DHD_TRACE(("%s: num %d addr %x type %c func %s \n",
+ __FUNCTION__, num, addr, type, func));
+ if (num == 3) {
+ addr2 = addr;
+ type2 = type;
+ (void)memcpy_s(func2, DHD_FUNC_STR_LEN,
+ func, DHD_FUNC_STR_LEN);
+ }
+
+ if (!(count & PC_FOUND_BIT) &&
+ (pc >= addr1 && pc < addr2)) {
+ if ((cptr = strchr(func1, '$')) != NULL) {
+ (void)strncpy(func, cptr + 1,
+ DHD_FUNC_STR_LEN - 1);
+ } else {
+ (void)memcpy_s(func, DHD_FUNC_STR_LEN,
+ func1, DHD_FUNC_STR_LEN);
+ }
+ if ((cptr = strstr(func, "__bcmromfn"))
+ != NULL) {
+ *cptr = 0;
+ }
+ if (pc > addr1) {
+ sprintf(pc_fn, "%.68s+0x%x",
+ func, pc - addr1);
+ } else {
+ (void)memcpy_s(pc_fn, DHD_FUNC_STR_LEN,
+ func, DHD_FUNC_STR_LEN);
+ }
+ count |= PC_FOUND_BIT;
+ DHD_INFO(("%s: found addr1 %x pc %x"
+ " addr2 %x \n",
+ __FUNCTION__, addr1, pc, addr2));
+ }
+ if (!(count & LR_FOUND_BIT) &&
+ (lr >= addr1 && lr < addr2)) {
+ if ((cptr = strchr(func1, '$')) != NULL) {
+ (void)strncpy(func, cptr + 1,
+ DHD_FUNC_STR_LEN - 1);
+ } else {
+ (void)memcpy_s(func, DHD_FUNC_STR_LEN,
+ func1, DHD_FUNC_STR_LEN);
+ }
+ if ((cptr = strstr(func, "__bcmromfn"))
+ != NULL) {
+ *cptr = 0;
+ }
+ if (lr > addr1) {
+ sprintf(lr_fn, "%.68s+0x%x",
+ func, lr - addr1);
+ } else {
+ (void)memcpy_s(lr_fn, DHD_FUNC_STR_LEN,
+ func, DHD_FUNC_STR_LEN);
+ }
+ count |= LR_FOUND_BIT;
+ DHD_INFO(("%s: found addr1 %x lr %x"
+ " addr2 %x \n",
+ __FUNCTION__, addr1, lr, addr2));
+ }
+ offset += (len + 1);
+ }
+
+ if (err < (int)read_size) {
+ /*
+ * since we reset file pos back to earlier pos by
+ * bytes of one line we won't reach EOF.
+ * The reason for this is if string is spreaded across
+ * bytes, the read function should not miss it.
+ * So if ret value is less than read_size, reached EOF
+ * don't read further
+ */
+ break;
+ }
+ memset(raw_fmts, 0, read_size);
+ /*
+ * go back to bytes of one line so that we won't miss
+ * the string and addr even if it comes as splited in next read.
+ */
+ dhd_os_seek_file(filep, -(len + 1));
+ DHD_TRACE(("%s: seek %d \n", __FUNCTION__, -(len + 1)));
+ }
+
+fail:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(fs);
+#endif
+
+ if (!(count & PC_FOUND_BIT)) {
+ sprintf(pc_fn, "0x%08x", pc);
+ }
+ if (!(count & LR_FOUND_BIT)) {
+ sprintf(lr_fn, "0x%08x", lr);
+ }
+ return err;
+}
+#endif /* DHD_COREDUMP */
+
+static int
+dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
+{
+ struct file *filep = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t fs;
+#endif
+ char *raw_fmts = NULL;
+ uint32 logstrs_size = 0;
+ int error = 0;
+ uint32 ramstart = 0;
+ uint32 rodata_start = 0;
+ uint32 rodata_end = 0;
+ uint32 logfilebase = 0;
+
+ error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
+ if (error != BCME_OK) {
+ DHD_ERROR(("readmap Error!! \n"));
+ /* don't do event log parsing in actual case */
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = NULL;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = NULL;
+ }
+ return error;
+ }
+ DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
+ ramstart, rodata_start, rodata_end));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ filep = filp_open(str_file, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
+ goto fail;
+ }
+
+ if (TRUE) {
+ /* Full file size is huge. Just read required part */
+ logstrs_size = rodata_end - rodata_start;
+ logfilebase = rodata_start - ramstart;
+ }
+
+ if (logstrs_size == 0) {
+ DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
+ goto fail1;
+ }
+
+ if (strstr(str_file, ram_file_str) != NULL && temp->raw_sstr != NULL) {
+ raw_fmts = temp->raw_sstr; /* reuse already malloced raw_fmts */
+ } else if (strstr(str_file, rom_file_str) != NULL && temp->rom_raw_sstr != NULL) {
+ raw_fmts = temp->rom_raw_sstr; /* reuse already malloced raw_fmts */
+ } else {
+ raw_fmts = MALLOC(osh, logstrs_size);
+
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (TRUE) {
+ error = generic_file_llseek(filep, logfilebase, SEEK_SET);
+ if (error < 0) {
+ DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
+ goto fail;
+ }
+ }
+
+ error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
+ if (error != logstrs_size) {
+ DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
+ goto fail;
+ }
+
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = raw_fmts;
+ temp->raw_sstr_size = logstrs_size;
+ temp->rodata_start = rodata_start;
+ temp->rodata_end = rodata_end;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = raw_fmts;
+ temp->rom_raw_sstr_size = logstrs_size;
+ temp->rom_rodata_start = rodata_start;
+ temp->rom_rodata_end = rodata_end;
+ }
+
+ filp_close(filep, NULL);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(fs);
+#endif
+
+ return BCME_OK;
+
+fail:
+ if (raw_fmts) {
+ MFREE(osh, raw_fmts, logstrs_size);
+ }
+
+fail1:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(fs);
+#endif
+
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = NULL;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = NULL;
+ }
+
+ return error;
+} /* dhd_init_static_strs_array */
+
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BT_OVER_PCIE
+void request_bt_quiesce(bool quiesce) __attribute__ ((weak));
+void response_bt_quiesce(bool quiesce);
+
+static void (*request_bt_quiesce_ptr)(bool);
+typedef void (*response_bt_quiesce_ptr)(bool);
+
+response_bt_quiesce_ptr
+register_request_bt_quiesce(void (*fnc)(bool))
+{
+ request_bt_quiesce_ptr = fnc;
+ return response_bt_quiesce;
+}
+EXPORT_SYMBOL(register_request_bt_quiesce);
+
+void
+unregister_request_bt_quiesce(void)
+{
+ request_bt_quiesce_ptr = NULL;
+ return;
+}
+EXPORT_SYMBOL(unregister_request_bt_quiesce);
+#endif /* BT_OVER_PCIE */
+
+#ifdef DHD_ERPOM
+uint enable_erpom = 0;
+module_param(enable_erpom, int, 0);
+
+int
+dhd_wlan_power_off_handler(void *handler, unsigned char reason)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handler;
+ bool dongle_isolation = dhdp->dongle_isolation;
+
+ DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
+
+ if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump to a file */
+ if (dhdp->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+ DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
+ dhdp->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+
+ /* pause data on all the interfaces */
+ dhd_bus_stop_queue(dhdp->bus);
+
+ /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
+ dhdp->dongle_isolation = TRUE;
+ dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
+ dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
+ return 0;
+}
+
+int
+dhd_wlan_power_on_handler(void *handler, unsigned char reason)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handler;
+ bool dongle_isolation = dhdp->dongle_isolation;
+
+ DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
+ /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
+ dhdp->dongle_isolation = TRUE;
+ dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
+ dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
+ /* resume data on all the interfaces */
+ dhd_bus_start_queue(dhdp->bus);
+ return 0;
+
+}
+
+#endif /* DHD_ERPOM */
+
+#ifdef BCMDBUS
+uint
+dhd_get_rxsz(dhd_pub_t *pub)
+{
+ struct net_device *net = NULL;
+ dhd_info_t *dhd = NULL;
+ uint rxsz;
+
+ /* Assign rxsz for dbus_attach */
+ dhd = pub->info;
+ net = dhd->iflist[0]->net;
+ net->hard_header_len = ETH_HLEN + pub->hdrlen;
+ rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+ return rxsz;
+}
+
+void
+dhd_set_path(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = NULL;
+
+ dhd = pub->info;
+
+ /* try to download image and nvram to the dongle */
+ if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
+ DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
+ __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+ }
+}
+#endif
+
+/** Called once for each hardware (dongle) instance that this DHD manages */
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
+#ifdef BCMDBUS
+ , void *data
+#endif
+)
+{
+ dhd_info_t *dhd = NULL;
+ struct net_device *net = NULL;
+ char if_name[IFNAMSIZ] = {'\0'};
+#ifdef SHOW_LOGTRACE
+ int ret;
+#endif /* SHOW_LOGTRACE */
+#ifdef DHD_ERPOM
+ pom_func_handler_t *pom_handler;
+#endif /* DHD_ERPOM */
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ uint32 bus_type = -1;
+ uint32 bus_num = -1;
+ uint32 slot_num = -1;
+ wifi_adapter_info_t *adapter = NULL;
+#elif defined(BCMDBUS)
+ wifi_adapter_info_t *adapter = data;
+#endif
+
+ dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef PCIE_FULL_DONGLE
+ ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
+ ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
+#endif /* PCIE_FULL_DONGLE */
+
+ /* will implement get_ids for DBUS later */
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+#endif
+
+ /* Allocate primary dhd_info */
+ dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+ if (dhd == NULL) {
+ dhd = MALLOC(osh, sizeof(dhd_info_t));
+ if (dhd == NULL) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto dhd_null_flag;
+ }
+ }
+ memset(dhd, 0, sizeof(dhd_info_t));
+ dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+ dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
+
+ dhd->pub.osh = osh;
+#ifdef DUMP_IOCTL_IOV_LIST
+ dll_init(&(dhd->pub.dump_iovlist_head));
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+ dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */
+
+ dhd->adapter = adapter;
+ dhd->pub.adapter = (void *)adapter;
+#ifdef BT_OVER_SDIO
+ dhd->pub.is_bt_recovery_required = FALSE;
+ mutex_init(&dhd->bus_user_lock);
+#endif /* BT_OVER_SDIO */
+
+ g_dhd_pub = &dhd->pub;
+
+#ifdef DHD_DEBUG
+ dll_init(&(dhd->pub.mw_list_head));
+#endif /* DHD_DEBUG */
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef CUSTOM_COUNTRY_CODE
+ get_customized_country_code(dhd->adapter,
+ dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
+ dhd->pub.dhd_cflags);
+#endif /* CUSTOM_COUNTRY_CODE */
+#ifndef BCMDBUS
+ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+ dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#ifdef DHD_WET
+ dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
+#endif /* DHD_WET */
+#ifdef WL_NANHO
+ /* initialize NANHO host module */
+ if (bcm_nanho_init(&dhd->pub.nanhoi, &dhd->pub,
+ dhd_nho_ioctl_cb, dhd_nho_evt_cb, NULL) != BCME_OK) {
+ goto fail;
+ }
+#endif /* WL_NANHO */
+ /* Initialize thread based operation and lock */
+ sema_init(&dhd->sdsem, 1);
+#endif /* BCMDBUS */
+#if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV)
+ dhd->host_radiotap_conv = FALSE;
+#endif /* WL_MONITOR */
+ dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
+
+ /* Link to info module */
+ dhd->pub.info = dhd;
+
+ /* Link to bus module */
+ dhd->pub.bus = bus;
+ dhd->pub.hdrlen = bus_hdrlen;
+ dhd->pub.txoff = FALSE;
+#ifdef CHECK_TRAP_ROT
+ dhd->pub.check_trap_rot = TRUE;
+#else
+ dhd->pub.check_trap_rot = FALSE;
+#endif /* CHECK_TRAP_ROT */
+
+ /* dhd_conf must be attached after linking dhd to dhd->pub.info,
+ * because dhd_detech will check .info is NULL or not.
+ */
+ if (dhd_conf_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_conf_attach failed\n"));
+ goto fail;
+ }
+#ifndef BCMDBUS
+ dhd_conf_reset(&dhd->pub);
+ dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
+ dhd_conf_preinit(&dhd->pub);
+#endif /* !BCMDBUS */
+
+ /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+ * This is indeed a hack but we have to make it work properly before we have a better
+ * solution
+ */
+ dhd_update_fw_nv_path(dhd);
+
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+ int len;
+ char ch;
+ strlcpy(if_name, iface_name, sizeof(if_name));
+ len = strlen(if_name);
+ ch = if_name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) {
+ strncat(if_name, "%d", sizeof(if_name) - len - 1);
+ }
+ }
+
+ /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
+ net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
+ if (net == NULL) {
+ goto fail;
+ }
+ mutex_init(&dhd->pub.ndev_op_sync);
+
+ dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+#ifdef DHD_L2_FILTER
+ /* initialize the l2_filter_cnt */
+ dhd->pub.l2_filter_cnt = 0;
+#endif
+ net->netdev_ops = NULL;
+
+ mutex_init(&dhd->dhd_iovar_mutex);
+ sema_init(&dhd->proto_sem, 1);
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+ dhd->pub.req_hang_type = 0;
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef PROP_TXSTATUS
+ spin_lock_init(&dhd->wlfc_spinlock);
+
+ dhd->pub.skip_fc = dhd_wlfc_skip_fc;
+ dhd->pub.plat_init = dhd_wlfc_plat_init;
+ dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+
+#ifdef DHD_WLFC_THREAD
+ init_waitqueue_head(&dhd->pub.wlfc_wqhead);
+ dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
+ if (IS_ERR(dhd->pub.wlfc_thread)) {
+ DHD_ERROR(("create wlfc thread failed\n"));
+ goto fail;
+ } else {
+ wake_up_process(dhd->pub.wlfc_thread);
+ }
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+
+ /* Initialize other structure content */
+ /* XXX Some of this goes away, leftover from USB */
+ /* XXX Some could also move to bus_init()? */
+ init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->pub.tx_tput_test_wait);
+ init_waitqueue_head(&dhd->d3ack_wait);
+#ifdef PCIE_INB_DW
+ init_waitqueue_head(&dhd->ds_exit_wait);
+#endif /* PCIE_INB_DW */
+ init_waitqueue_head(&dhd->ctrl_wait);
+ init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
+ init_waitqueue_head(&dhd->dmaxfer_wait);
+#ifdef BT_OVER_PCIE
+ init_waitqueue_head(&dhd->quiesce_wait);
+#endif /* BT_OVER_PCIE */
+ init_waitqueue_head(&dhd->pub.tx_completion_wait);
+ dhd->pub.dhd_bus_busy_state = 0;
+ /* Initialize the spinlocks */
+ spin_lock_init(&dhd->sdlock);
+ spin_lock_init(&dhd->txqlock);
+ spin_lock_init(&dhd->dhd_lock);
+ spin_lock_init(&dhd->txoff_lock);
+ spin_lock_init(&dhd->rxf_lock);
+#ifdef WLTDLS
+ spin_lock_init(&dhd->pub.tdls_lock);
+#endif /* WLTDLS */
+#if defined(RXFRAME_THREAD)
+ dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
+
+#ifdef DHDTCPACK_SUPPRESS
+ spin_lock_init(&dhd->tcpack_lock);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef DHD_HP2P
+ spin_lock_init(&dhd->hp2p_lock);
+#endif
+ /* Initialize Wakelock stuff */
+ spin_lock_init(&dhd->wakelock_spinlock);
+ spin_lock_init(&dhd->wakelock_evt_spinlock);
+ DHD_OS_WAKE_LOCK_INIT(dhd);
+ dhd->wakelock_counter = 0;
+ /* wakelocks prevent a system from going into a low power state */
+#ifdef CONFIG_HAS_WAKELOCK
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ dhd_wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ dhd_wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
+
+#if defined(OEM_ANDROID)
+ mutex_init(&dhd->dhd_net_if_mutex);
+ mutex_init(&dhd->dhd_suspend_mutex);
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ mutex_init(&dhd->dhd_apf_mutex);
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif /* defined(OEM_ANDROID) */
+ dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+ /* Attach and link in the protocol */
+ if (dhd_prot_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_prot_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef DHD_TIMESYNC
+ /* attach the timesync module */
+ if (dhd_timesync_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_timesync_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE;
+#endif /* DHD_TIMESYNC */
+
+#ifdef WL_CFG80211
+ spin_lock_init(&dhd->pub.up_lock);
+ /* Attach and link in the cfg80211 */
+ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+ DHD_ERROR(("wl_cfg80211_attach failed\n"));
+ goto fail;
+ }
+
+ dhd_monitor_init(&dhd->pub);
+ dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+
+#ifdef WL_EVENT
+ if (wl_ext_event_attach(net) != 0) {
+ DHD_ERROR(("wl_ext_event_attach failed\n"));
+ goto fail;
+ }
+#endif /* WL_EVENT */
+#ifdef WL_ESCAN
+ /* Attach and link in the escan */
+ if (wl_escan_attach(net) != 0) {
+ DHD_ERROR(("wl_escan_attach failed\n"));
+ goto fail;
+ }
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ if (wl_ext_iapsta_attach(net) != 0) {
+ DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
+ goto fail;
+ }
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_EXT_GENL
+ if (wl_ext_genl_init(net)) {
+ DHD_ERROR(("wl_ext_genl_init failed\n"));
+ goto fail;
+ }
+#endif
+#if defined(WL_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ if (wl_iw_attach(net) != 0) {
+ DHD_ERROR(("wl_iw_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef SHOW_LOGTRACE
+ ret = dhd_init_logstrs_array(osh, &dhd->event_data);
+ if (ret == BCME_OK) {
+ dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
+ dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
+ rom_map_file_path);
+ dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
+ }
+#endif /* SHOW_LOGTRACE */
+
+ /* attach debug if support */
+ if (dhd_os_dbg_attach(&dhd->pub)) {
+ DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+#ifdef DEBUGABILITY
+#if !defined(OEM_ANDROID) && defined(SHOW_LOGTRACE)
+ /* enable verbose ring to support dump_trace_buf */
+ dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
+#endif /* !OEM_ANDROID && SHOW_LOGTRACE */
+
+#if !defined(OEM_ANDROID) && defined(BTLOG)
+ /* enable bt log ring to support dump_bt_log */
+ dhd_os_start_logging(&dhd->pub, BT_LOG_RING_NAME, 3, 0, 0, 0);
+#endif /* !OEM_ANDROID && BTLOG */
+#ifdef DBG_PKT_MON
+ dhd->pub.dbg->pkt_mon_lock = osl_spin_lock_init(dhd->pub.osh);
+#ifdef DBG_PKT_MON_INIT_DEFAULT
+ dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON_INIT_DEFAULT */
+#endif /* DBG_PKT_MON */
+
+#endif /* DEBUGABILITY */
+
+#ifdef DHD_MEM_STATS
+ dhd->pub.mem_stats_lock = osl_spin_lock_init(dhd->pub.osh);
+ dhd->pub.txpath_mem = 0;
+ dhd->pub.rxpath_mem = 0;
+#endif /* DHD_MEM_STATS */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ dhd->pub.awdl_stats_lock = osl_spin_lock_init(dhd->pub.osh);
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+
+#ifdef DHD_STATUS_LOGGING
+ dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
+ MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
+ if (dhd->pub.statlog == NULL) {
+ DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
+ }
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef DHD_LOG_DUMP
+ dhd_log_dump_init(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_init(&dhd->pub);
+#endif /* DHD_PKTDUMP_ROAM */
+#ifdef DHD_PKT_LOGGING
+ dhd_os_attach_pktlog(&dhd->pub);
+#endif /* DHD_PKT_LOGGING */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ if (dhd->pub.hang_info == NULL) {
+ DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
+ }
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
+ DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
+ goto fail;
+ }
+
+#ifdef BCM_ROUTER_DHD
+#if defined(HNDCTF)
+ dhd->cih = ctf_attach(dhd->pub.osh, "dhd", &dhd_msg_level, dhd_ctf_detach, dhd);
+ if (!dhd->cih) {
+ DHD_ERROR(("%s: ctf_attach() failed\n", __FUNCTION__));
+ }
+#ifdef CTFPOOL
+ {
+ int poolsz = RXBUFPOOLSZ;
+ if (CTF_ENAB(dhd->cih) && (osl_ctfpool_init(dhd->pub.osh,
+ poolsz, RXBUFSZ + BCMEXTRAHDROOM) < 0)) {
+ DHD_ERROR(("%s: osl_ctfpool_init() failed\n", __FUNCTION__));
+ }
+ }
+#endif /* CTFPOOL */
+#endif /* HNDCTF */
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!dhd->tx_wq) {
+ DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
+ goto fail;
+ }
+ dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!dhd->rx_wq) {
+ DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
+ destroy_workqueue(dhd->tx_wq);
+ dhd->tx_wq = NULL;
+ goto fail;
+ }
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifndef BCMDBUS
+ /* Set up the watchdog timer */
+ init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
+ dhd->default_wd_interval = dhd_watchdog_ms;
+
+ if (dhd_watchdog_prio >= 0) {
+ /* Initialize watchdog thread */
+ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+ if (dhd->thr_wdt_ctl.thr_pid < 0) {
+ goto fail;
+ }
+
+ } else {
+ dhd->thr_wdt_ctl.thr_pid = -1;
+ }
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Setup up the runtime PM Idlecount timer */
+ init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
+ dhd->rpm_timer_valid = FALSE;
+
+ dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
+ PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
+ if (dhd->thr_rpm_ctl.thr_pid < 0) {
+ goto fail;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef SHOW_LOGTRACE
+ skb_queue_head_init(&dhd->evt_trace_queue);
+
+ /* Create ring proc entries */
+ dhd_dbg_ring_proc_create(&dhd->pub);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+ skb_queue_head_init(&dhd->bt_log_queue);
+#endif /* BTLOG */
+
+#ifdef BT_OVER_PCIE
+ mutex_init(&dhd->quiesce_flr_lock);
+ mutex_init(&dhd->quiesce_lock);
+#endif
+
+ /* Set up the bottom half handler */
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ goto fail;
+ }
+ } else {
+ /* use tasklet for dpc */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->thr_dpc_ctl.thr_pid = -1;
+ }
+
+ if (dhd->rxthread_enabled) {
+ bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+ /* Initialize RXF thread */
+ PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+ if (dhd->thr_rxf_ctl.thr_pid < 0) {
+ goto fail;
+ }
+ }
+#endif /* !BCMDBUS */
+
+ dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+#if defined(CONFIG_PM_SLEEP)
+ if (!dhd_pm_notifier_registered) {
+ dhd_pm_notifier_registered = TRUE;
+ dhd->pm_notifier.notifier_call = dhd_pm_callback;
+ dhd->pm_notifier.priority = 10;
+ register_pm_notifier(&dhd->pm_notifier);
+ }
+
+#endif /* CONFIG_PM_SLEEP */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+ dhd->early_suspend.suspend = dhd_early_suspend;
+ dhd->early_suspend.resume = dhd_late_resume;
+ register_early_suspend(&dhd->early_suspend);
+ dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ if (!dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = TRUE;
+ register_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (!dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = TRUE;
+ register_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+ dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#if defined (OEM_ANDROID)
+ INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
+#endif /* OEM_ANDROID */
+#ifdef DEBUG_CPU_FREQ
+ dhd->new_freq = alloc_percpu(int);
+ dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+ cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMSDIO
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
+#elif defined(BCMPCIE)
+ /* xxx : In case of PCIe based Samsung Android project, enable TCP ACK Suppress
+ * when throughput is higher than threshold, following rps_cpus setting.
+ */
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
+#else
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* BCMSDIO */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef DHD_DEBUG_PAGEALLOC
+ register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+ INIT_DELAYED_WORK(&dhd->dhd_dpc_dispatcher_work, dhd_dpc_tasklet_dispatcher_work);
+
+#if defined(DHD_LB)
+#if defined(DHD_LB_HOST_CTRL)
+ dhd->permitted_primary_cpu = FALSE;
+#endif /* DHD_LB_HOST_CTRL */
+ dhd_lb_set_default_cpus(dhd);
+ DHD_LB_STATS_INIT(&dhd->pub);
+
+ /* Initialize the CPU Masks */
+ if (dhd_cpumasks_init(dhd) == 0) {
+ /* Now we have the current CPU maps, run through candidacy */
+ dhd_select_cpu_candidacy(dhd);
+
+ /* Register the call backs to CPU Hotplug sub-system */
+ dhd_register_cpuhp_callback(dhd);
+
+ } else {
+ /*
+ * We are unable to initialize CPU masks, so candidacy algorithm
+ * won't run, but still Load Balancing will be honoured based
+ * on the CPUs allocated for a given job statically during init
+ */
+ dhd->cpu_notifier.notifier_call = NULL;
+ DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
+ __FUNCTION__));
+ }
+
+#ifdef DHD_LB_TXP
+#ifdef DHD_LB_TXP_DEFAULT_ENAB
+ /* Trun ON the feature by default */
+ atomic_set(&dhd->lb_txp_active, 1);
+#else
+ /* Trun OFF the feature by default */
+ atomic_set(&dhd->lb_txp_active, 0);
+#endif /* DHD_LB_TXP_DEFAULT_ENAB */
+#endif /* DHD_LB_TXP */
+
+#ifdef DHD_LB_RXP
+ /* Trun ON the feature by default */
+ atomic_set(&dhd->lb_rxp_active, 1);
+#endif /* DHD_LB_RXP */
+
+ /* Initialize the Load Balancing Tasklets and Napi object */
+#if defined(DHD_LB_RXP)
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+ skb_queue_head_init(&dhd->rx_napi_queue);
+ __skb_queue_head_init(&dhd->rx_process_queue);
+ /* Initialize the work that dispatches NAPI job to a given core */
+ INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_work);
+ DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
+ /* Initialize the work that dispatches DPC tasklet to a given core */
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+ INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
+ skb_queue_head_init(&dhd->tx_pend_queue);
+ /* Initialize the work that dispatches TX job to a given core */
+ tasklet_init(&dhd->tx_tasklet,
+ dhd_lb_tx_handler, (ulong)(dhd));
+ DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
+#endif /* DHD_LB_TXP */
+
+ dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
+#endif /* DHD_LB */
+
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
+#ifdef BCMDBG
+ if (dhd_macdbg_attach(&dhd->pub) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_macdbg_attach fail\n", __FUNCTION__));
+ goto fail;
+ }
+#endif /* BCMDBG */
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ init_dhd_timeouts(&dhd->pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
+#if defined(BCMPCIE)
+ dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ if (dhd->pub.extended_trap_data == NULL) {
+ DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
+ }
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
+ if (dhd->pub.axi_err_dump == NULL) {
+ DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* BCMPCIE */
+
+#ifdef SHOW_LOGTRACE
+ if (dhd_init_logtrace_process(dhd) != BCME_OK) {
+ goto fail;
+ }
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+ INIT_WORK(&dhd->bt_log_dispatcher_work, dhd_bt_log_process);
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+ INIT_DELAYED_WORK(&dhd->edl_dispatcher_work, dhd_edl_process_work);
+#endif
+
+ DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
+ DHD_SSSR_REG_INFO_INIT(&dhd->pub);
+
+#ifdef DHD_SDTC_ETB_DUMP
+ dhd_sdtc_etb_mempool_init(&dhd->pub);
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef EWP_EDL
+ if (host_edl_support) {
+ if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
+ host_edl_support = FALSE;
+ }
+ }
+#endif /* EWP_EDL */
+
+ dhd_init_sock_flows_buf(dhd, dhd_watchdog_ms);
+
+ (void)dhd_sysfs_init(dhd);
+
+#ifdef WL_NATOE
+ /* Open Netlink socket for NF_CONNTRACK notifications */
+ dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
+ CT_ALL);
+#endif /* WL_NATOE */
+#ifdef GDB_PROXY
+ dhd->pub.gdb_proxy_nodeadman = nodeadman != 0;
+#endif /* GDB_PROXY */
+ dhd_state |= DHD_ATTACH_STATE_DONE;
+ dhd->dhd_state = dhd_state;
+
+ dhd_found++;
+
+#ifdef CSI_SUPPORT
+ dhd_csi_init(&dhd->pub);
+#endif /* CSI_SUPPORT */
+
+#ifdef DHD_FW_COREDUMP
+ /* Set memdump default values */
+#ifdef CUSTOMER_HW4_DEBUG
+ dhd->pub.memdump_enabled = DUMP_DISABLED;
+#elif defined(OEM_ANDROID)
+#ifdef DHD_COREDUMP
+ dhd->pub.memdump_enabled = DUMP_MEMFILE;
+#else
+ dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
+#endif /* DHD_COREDUMP */
+#else
+ dhd->pub.memdump_enabled = DUMP_MEMFILE;
+#endif /* CUSTOMER_HW4_DEBUG */
+ /* Check the memdump capability */
+ dhd_get_memdump_info(&dhd->pub);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef DHD_ERPOM
+ if (enable_erpom) {
+ pom_handler = &dhd->pub.pom_wlan_handler;
+ pom_handler->func_id = WLAN_FUNC_ID;
+ pom_handler->handler = (void *)g_dhd_pub;
+ pom_handler->power_off = dhd_wlan_power_off_handler;
+ pom_handler->power_on = dhd_wlan_power_on_handler;
+
+ dhd->pub.pom_func_register = NULL;
+ dhd->pub.pom_func_deregister = NULL;
+ dhd->pub.pom_toggle_reg_on = NULL;
+
+ dhd->pub.pom_func_register = symbol_get(pom_func_register);
+ dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
+ dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
+
+ symbol_put(pom_func_register);
+ symbol_put(pom_func_deregister);
+ symbol_put(pom_toggle_reg_on);
+
+ if (!dhd->pub.pom_func_register ||
+ !dhd->pub.pom_func_deregister ||
+ !dhd->pub.pom_toggle_reg_on) {
+ DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
+ "POM is not loaded\n", __FUNCTION__));
+ ASSERT(0);
+ goto fail;
+ }
+ dhd->pub.pom_func_register(pom_handler);
+ dhd->pub.enable_erpom = TRUE;
+
+ }
+#endif /* DHD_ERPOM */
+
+#ifdef DHD_DUMP_MNGR
+ dhd->pub.dump_file_manage =
+ (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
+ if (unlikely(!dhd->pub.dump_file_manage)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_dump_file_manage_t\n", __FUNCTION__));
+ }
+#endif /* DHD_DUMP_MNGR */
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ /* Attach the fwtrace */
+ if (dhd_fwtrace_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_fwtrace_attach has failed\n"));
+ goto fail;
+ }
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef RTT_SUPPORT
+ if (dhd_rtt_attach(&dhd->pub)) {
+ DHD_ERROR(("dhd_rtt_attach has failed\n"));
+ goto fail;
+ }
+#endif /* RTT_SUPPORT */
+
+#ifdef DHD_TX_PROFILE
+ if (dhd_tx_profile_attach(&dhd->pub) != BCME_OK) {
+ DHD_ERROR(("%s:\tdhd_tx_profile_attach has failed\n", __FUNCTION__));
+ goto fail;
+ }
+#endif /* defined(DHD_TX_PROFILE) */
+
+ return &dhd->pub;
+
+fail:
+ if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
+ DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+ __FUNCTION__, dhd_state, &dhd->pub));
+ dhd->dhd_state = dhd_state;
+ dhd_detach(&dhd->pub);
+ dhd_free(&dhd->pub);
+ }
+
+dhd_null_flag:
+ return NULL;
+}
+
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+{
+ if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+ return DHD_FLAG_HOSTAP_MODE;
+ if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+ return DHD_FLAG_P2P_MODE;
+ if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+ return DHD_FLAG_IBSS_MODE;
+ if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+ return DHD_FLAG_MFG_MODE;
+
+ return DHD_FLAG_STA_MODE;
+}
+
+int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
+{
+ return dhd_get_fw_mode(dhdp->info);
+}
+
+extern char * nvram_get(const char *name);
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
+{
+ int fw_len;
+ int nv_len;
+ int clm_len;
+ int conf_len;
+ const char *fw = NULL;
+ const char *nv = NULL;
+ const char *clm = NULL;
+ const char *conf = NULL;
+#ifdef DHD_UCODE_DOWNLOAD
+ int uc_len;
+ const char *uc = NULL;
+#endif /* DHD_UCODE_DOWNLOAD */
+ wifi_adapter_info_t *adapter = dhdinfo->adapter;
+ int fw_path_len = sizeof(dhdinfo->fw_path);
+ int nv_path_len = sizeof(dhdinfo->nv_path);
+
+ /* Update firmware and nvram path. The path may be from adapter info or module parameter
+ * The path from adapter info is used for initialization only (as it won't change).
+ *
+ * The firmware_path/nvram_path module parameter may be changed by the system at run
+ * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+ * command may change dhdinfo->fw_path. As such we need to clear the path info in
+ * module parameter after it is copied. We won't update the path until the module parameter
+ * is changed again (first character is not '\0')
+ */
+
+ /* set default firmware and nvram path for built-in type driver */
+// if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_FW_PATH
+ fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+// }
+
+ /* check if we need to initialize the path */
+ if (dhdinfo->fw_path[0] == '\0') {
+ if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+ fw = adapter->fw_path;
+
+ }
+ if (dhdinfo->nv_path[0] == '\0') {
+ if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+ nv = adapter->nv_path;
+ }
+ if (dhdinfo->clm_path[0] == '\0') {
+ if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
+ clm = adapter->clm_path;
+ }
+ if (dhdinfo->conf_path[0] == '\0') {
+ if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
+ conf = adapter->conf_path;
+ }
+
+ /* Use module parameter if it is valid, EVEN IF the path has not been initialized
+ *
+ * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+ */
+ if (firmware_path[0] != '\0')
+ fw = firmware_path;
+
+ if (nvram_path[0] != '\0')
+ nv = nvram_path;
+ if (clm_path[0] != '\0')
+ clm = clm_path;
+ if (config_path[0] != '\0')
+ conf = config_path;
+
+#ifdef DHD_UCODE_DOWNLOAD
+ if (ucode_path[0] != '\0')
+ uc = ucode_path;
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#ifdef BCM_ROUTER_DHD
+ if (!fw) {
+ char var[32];
+
+ snprintf(var, sizeof(var), "firmware_path%d", dhdinfo->unit);
+ fw = nvram_get(var);
+ }
+ if (!nv) {
+ char var[32];
+
+ snprintf(var, sizeof(var), "nvram_path%d", dhdinfo->unit);
+ nv = nvram_get(var);
+ }
+ DHD_ERROR(("dhd:%d: fw path:%s nv path:%s\n", dhdinfo->unit, fw, nv));
+#endif
+
+ if (fw && fw[0] != '\0') {
+ fw_len = strlen(fw);
+ if (fw_len >= fw_path_len) {
+ DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+ return FALSE;
+ }
+ strlcpy(dhdinfo->fw_path, fw, fw_path_len);
+ }
+ if (nv && nv[0] != '\0') {
+ nv_len = strlen(nv);
+ if (nv_len >= nv_path_len) {
+ DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+ return FALSE;
+ }
+ memset(dhdinfo->nv_path, 0, nv_path_len);
+ strlcpy(dhdinfo->nv_path, nv, nv_path_len);
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
+ /* Remove "_net" or "_mfg" tag from current nvram path */
+ {
+ char *nvram_tag = "nvram_";
+ char *ext_tag = ".txt";
+ char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
+ bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
+ strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
+ if (valid_buf) {
+ char *sp = sp_nvram + strlen(nvram_tag) - 1;
+ uint32 padding_size = (uint32)(dhdinfo->nv_path +
+ nv_path_len - sp);
+ memset(sp, 0, padding_size);
+ strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
+ nv_len = strlen(dhdinfo->nv_path);
+ DHD_INFO(("%s: new nvram path = %s\n",
+ __FUNCTION__, dhdinfo->nv_path));
+ } else if (sp_nvram) {
+ DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
+ __FUNCTION__));
+ return FALSE;
+ } else {
+ DHD_ERROR(("%s: Couldn't find the nvram tag. current"
+ " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
+ }
+ }
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+ }
+ if (clm && clm[0] != '\0') {
+ clm_len = strlen(clm);
+ if (clm_len >= sizeof(dhdinfo->clm_path)) {
+ DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
+ if (dhdinfo->clm_path[clm_len-1] == '\n')
+ dhdinfo->clm_path[clm_len-1] = '\0';
+ }
+ if (conf && conf[0] != '\0') {
+ conf_len = strlen(conf);
+ if (conf_len >= sizeof(dhdinfo->conf_path)) {
+ DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
+ if (dhdinfo->conf_path[conf_len-1] == '\n')
+ dhdinfo->conf_path[conf_len-1] = '\0';
+ }
+#ifdef DHD_UCODE_DOWNLOAD
+ if (uc && uc[0] != '\0') {
+ uc_len = strlen(uc);
+ if (uc_len >= sizeof(dhdinfo->uc_path)) {
+ DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
+ return FALSE;
+ }
+ strlcpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
+ }
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#if 0
+ /* clear the path in module parameter */
+ if (dhd_download_fw_on_driverload) {
+ firmware_path[0] = '\0';
+ nvram_path[0] = '\0';
+ clm_path[0] = '\0';
+ config_path[0] = '\0';
+ }
+#endif
+#ifdef DHD_UCODE_DOWNLOAD
+ ucode_path[0] = '\0';
+ DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#ifndef BCMEMBEDIMAGE
+ /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
+ if (dhdinfo->fw_path[0] == '\0') {
+ DHD_ERROR(("firmware path not found\n"));
+ return FALSE;
+ }
+ if (dhdinfo->nv_path[0] == '\0') {
+ DHD_ERROR(("nvram path not found\n"));
+ return FALSE;
+ }
+#endif /* BCMEMBEDIMAGE */
+
+ return TRUE;
+}
+
+#if defined(BT_OVER_SDIO)
+extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
+{
+ int fw_len;
+ const char *fw = NULL;
+ wifi_adapter_info_t *adapter = dhdinfo->adapter;
+
+ /* Update bt firmware path. The path may be from adapter info or module parameter
+ * The path from adapter info is used for initialization only (as it won't change).
+ *
+ * The btfw_path module parameter may be changed by the system at run
+ * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
+ * command may change dhdinfo->btfw_path. As such we need to clear the path info in
+ * module parameter after it is copied. We won't update the path until the module parameter
+ * is changed again (first character is not '\0')
+ */
+
+ /* set default firmware and nvram path for built-in type driver */
+ if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_BTFW_PATH
+ fw = CONFIG_BCMDHD_BTFW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+ }
+
+ /* check if we need to initialize the path */
+ if (dhdinfo->btfw_path[0] == '\0') {
+ if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
+ fw = adapter->btfw_path;
+ }
+
+ /* Use module parameter if it is valid, EVEN IF the path has not been initialized
+ */
+ if (btfw_path[0] != '\0')
+ fw = btfw_path;
+
+ if (fw && fw[0] != '\0') {
+ fw_len = strlen(fw);
+ if (fw_len >= sizeof(dhdinfo->btfw_path)) {
+ DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
+ return FALSE;
+ }
+ strlcpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
+ }
+
+ /* clear the path in module parameter */
+ btfw_path[0] = '\0';
+
+ if (dhdinfo->btfw_path[0] == '\0') {
+ DHD_ERROR(("bt firmware path not found\n"));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef CUSTOMER_HW4_DEBUG
+bool dhd_validate_chipid(dhd_pub_t *dhdp)
+{
+ uint chipid = dhd_bus_chip_id(dhdp);
+ uint config_chipid;
+
+#ifdef BCM4389_CHIP_DEF
+ config_chipid = BCM4389_CHIP_ID;
+#elif defined(BCM4375_CHIP)
+ config_chipid = BCM4375_CHIP_ID;
+#elif defined(BCM4361_CHIP)
+ config_chipid = BCM4361_CHIP_ID;
+#elif defined(BCM4359_CHIP)
+ config_chipid = BCM4359_CHIP_ID;
+#elif defined(BCM4358_CHIP)
+ config_chipid = BCM4358_CHIP_ID;
+#elif defined(BCM4354_CHIP)
+ config_chipid = BCM4354_CHIP_ID;
+#elif defined(BCM4339_CHIP)
+ config_chipid = BCM4339_CHIP_ID;
+#elif defined(BCM4335_CHIP)
+ config_chipid = BCM4335_CHIP_ID;
+#elif defined(BCM43430_CHIP)
+ config_chipid = BCM43430_CHIP_ID;
+#elif defined(BCM43018_CHIP)
+ config_chipid = BCM43018_CHIP_ID;
+#elif defined(BCM43455_CHIP)
+ config_chipid = BCM4345_CHIP_ID;
+#elif defined(BCM43454_CHIP)
+ config_chipid = BCM43454_CHIP_ID;
+#elif defined(BCM43012_CHIP_)
+ config_chipid = BCM43012_CHIP_ID;
+#elif defined(BCM43013_CHIP)
+ config_chipid = BCM43012_CHIP_ID;
+#else
+ DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
+ " please add CONFIG_BCMXXXX into the Kernel and"
+ " BCMXXXX_CHIP definition into the DHD driver\n",
+ __FUNCTION__));
+ config_chipid = 0;
+
+ return FALSE;
+#endif /* BCM4354_CHIP */
+
+#if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
+ if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
+#if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
+ if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
+#if defined(BCM4359_CHIP)
+ if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4359_CHIP */
+#if defined(BCM4361_CHIP)
+ if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4361_CHIP */
+
+ return config_chipid == chipid;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#if defined(BT_OVER_SDIO)
+wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
+{
+ DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
+ /* assuming that dhd_pub_t type pointer is available from a global variable */
+ return (wlan_bt_handle_t) g_dhd_pub;
+} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
+
+int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
+{
+ int ret = -1;
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+
+ /* Download BT firmware image to the dongle */
+ if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
+ DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
+ ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to download btfw from: %s\n",
+ __FUNCTION__, dhd->btfw_path));
+ return ret;
+ }
+ }
+ return ret;
+} EXPORT_SYMBOL(dhd_download_btfw);
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifndef BCMDBUS
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+ int ret = -1;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ unsigned long flags;
+
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
+#endif /* DHD_DEBUG && BCMSDIO */
+ ASSERT(dhd);
+
+ DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+ dhdp->memdump_type = 0;
+ dhdp->dongle_trap_occured = 0;
+#if defined(BCMPCIE)
+ if (dhdp->extended_trap_data) {
+ memset(dhdp->extended_trap_data, 0, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+#endif /* BCMPCIE */
+#ifdef DHD_SSSR_DUMP
+ /* Flag to indicate sssr dump is collected */
+ dhdp->sssr_dump_collected = 0;
+#endif /* DHD_SSSR_DUMP */
+#ifdef BT_OVER_PCIE
+ dhd->pub.dongle_trap_due_to_bt = 0;
+#endif /* BT_OVER_PCIE */
+ dhdp->iovar_timeout_occured = 0;
+#ifdef PCIE_FULL_DONGLE
+ dhdp->d3ack_timeout_occured = 0;
+ dhdp->livelock_occured = 0;
+ dhdp->pktid_audit_failed = 0;
+#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ /* Retain BH induced errors and clear induced error during initialize */
+ if (dhd->pub.dhd_induce_error) {
+ dhd->pub.dhd_induce_bh_error = dhd->pub.dhd_induce_error;
+ }
+ dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
+#ifdef DHD_PKTTS
+ dhd->latency = 0;
+#endif
+ dhd->pub.tput_test_done = FALSE;
+
+#if defined(BCMINTERNAL) && defined(BCMPCIE)
+ {
+ /* JIRA:SW4349-436 JIRA:HW4349-302 Work around for 4349a0 PCIE-D11 DMA bug */
+ uint chipid = dhd_bus_chip_id(&dhd->pub);
+ uint revid = dhd_bus_chiprev_id(&dhd->pub);
+
+ if ((chipid == BCM4349_CHIP_ID) && (revid == 1)) {
+ DHD_INFO(("%s:Detected 4349 A0 enable 16MB Mem restriction Flag",
+ __FUNCTION__));
+ osl_flag_set(dhd->pub.osh, OSL_PHYS_MEM_LESS_THAN_16MB);
+ }
+ }
+#endif /* BCMINTERNAL && BCMINTERNAL */
+ /* try to download image and nvram to the dongle */
+ if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+ /* Indicate FW Download has not yet done */
+ dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
+ DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
+ __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ fw_download_start = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+ ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ fw_download_end = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to download firmware %s\n",
+ __FUNCTION__, dhd->fw_path));
+ return ret;
+ }
+ /* Indicate FW Download has succeeded */
+ dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
+ }
+ if (dhd->pub.busstate != DHD_BUS_LOAD) {
+ return -ENETDOWN;
+ }
+
+#ifdef BCMSDIO
+ dhd_os_sdlock(dhdp);
+#endif /* BCMSDIO */
+
+ /* Start the watchdog timer */
+ dhd->pub.tickcnt = 0;
+ dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+ /* Bring up the bus */
+ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+ DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+ return ret;
+ }
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
+ /* Host registration for OOB interrupt */
+ if (dhd_bus_oob_intr_register(dhdp)) {
+ /* deactivate timer and wait for the handler to finish */
+#if !defined(BCMPCIE_OOB_HOST_WAKE)
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+ DHD_STOP_RPM_TIMER(&dhd->pub);
+
+ DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ return -ENODEV;
+ }
+
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ dhd_bus_oob_intr_set(dhdp, TRUE);
+#else
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#elif defined(FORCE_WOWLAN)
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_FULL_DONGLE
+ {
+ /* max_h2d_rings includes H2D common rings */
+ uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
+
+ DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
+ max_h2d_rings));
+ if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+ return ret;
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+ /* set default value for now. Will be updated again in dhd_preinit_ioctls()
+ * after querying FW
+ */
+ dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ dhdp->event_log_max_sets_queried = FALSE;
+
+ dhdp->smmu_fault_occurred = 0;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->axi_error = FALSE;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+ /* Do protocol initialization necessary for IOCTL/IOVAR */
+ ret = dhd_prot_init(&dhd->pub);
+ if (unlikely(ret) != BCME_OK) {
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+ /* If bus is not ready, can't come up */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+ DHD_STOP_RPM_TIMER(&dhd->pub);
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ return -ENODEV;
+ }
+
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+
+ /* Bus is ready, query any dongle information */
+ /* XXX Since dhd_sync_with_dongle can sleep, should module count surround it? */
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ f2_sync_start = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+#ifdef BT_OVER_PCIE
+ /* Enable L1SS of RC and EP */
+ dhd_bus_l1ss_enable_rc_ep(dhdp->bus, TRUE);
+#endif /* BT_OVER_PCIE */
+
+#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
+#if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420)
+ /* XXX: JIRA SWWLAN-139454: Added L1ss enable
+ * after firmware download completion due to link down issue
+ * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point
+ */
+ DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
+#if defined(CONFIG_SOC_GS101)
+ exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1);
+#else
+ exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
+#endif /* CONFIG_SOC_GS101 */
+#endif /* !CONFIG_SOC_EXYNOS8890 && !SUPPORT_EXYNOS7420 */
+#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ f2_sync_end = OSL_SYSUPTIME();
+ DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
+ (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
+#endif /* DHD_DEBUG && BCMSDIO */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+ /* XXX Assume pending ip address is belong to primary interface */
+ aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ dhd->pend_ipaddr = 0;
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(BCM_ROUTER_DHD)
+ bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
+#endif /* BCM_ROUTER_DHD */
+ return 0;
+}
+#endif /* !BCMDBUS */
+
+#ifdef WLTDLS
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+ uint32 tdls = tdls_on;
+ int ret = 0;
+ uint32 tdls_auto_op = 0;
+ uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
+ int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+ int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+ uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
+ uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
+
+ BCM_REFERENCE(mac);
+ if (!FW_SUPPORTED(dhd, tdls))
+ return BCME_ERROR;
+
+ if (dhd->tdls_enable == tdls_on)
+ goto auto_mode;
+ ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
+ goto exit;
+ }
+ dhd->tdls_enable = tdls_on;
+auto_mode:
+
+ tdls_auto_op = auto_on;
+ ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ if (tdls_auto_op) {
+ ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
+ sizeof(tdls_idle_time), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
+ sizeof(tdls_rssi_high), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
+ sizeof(tdls_rssi_low), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
+ sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
+ sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+ if (dhd)
+ ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+ else
+ ret = BCME_ERROR;
+ return ret;
+}
+
+int
+dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
+{
+ int ret = 0;
+ bool auto_on = false;
+ uint32 mode = wfd_mode;
+
+#ifdef ENABLE_TDLS_AUTO_MODE
+ if (wfd_mode) {
+ auto_on = false;
+ } else {
+ auto_on = true;
+ }
+#else
+ auto_on = false;
+#endif /* ENABLE_TDLS_AUTO_MODE */
+ ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
+ if (ret < 0) {
+ DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
+ return ret;
+ }
+
+ ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
+ if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
+ DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
+ if (ret < 0) {
+ DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
+ return ret;
+ }
+
+ dhd->tdls_mode = mode;
+ return ret;
+}
+#ifdef PCIE_FULL_DONGLE
+int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
+{
+ dhd_pub_t *dhd_pub = dhdp;
+ tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
+ tdls_peer_node_t *new = NULL, *prev = NULL;
+ int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ uint8 *da = (uint8 *)&event->addr.octet[0];
+ bool connect = FALSE;
+ uint32 reason = ntoh32(event->reason);
+ unsigned long flags;
+
+ /* No handling needed for peer discovered reason */
+ if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
+ return BCME_ERROR;
+ }
+ if (reason == WLC_E_TDLS_PEER_CONNECTED)
+ connect = TRUE;
+ else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
+ connect = FALSE;
+ else
+ {
+ DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (ifindex == DHD_BAD_IF)
+ return BCME_ERROR;
+
+ if (connect) {
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: TDLS Peer exist already %d\n",
+ __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+ cur = cur->next;
+ }
+
+ new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
+ if (new == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ memcpy(new->addr, da, ETHER_ADDR_LEN);
+ DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+ new->next = dhd_pub->peer_tbl.node;
+ dhd_pub->peer_tbl.node = new;
+ dhd_pub->peer_tbl.tdls_peer_count++;
+ DHD_ERROR(("%s: Add TDLS peer, count=%d " MACDBG "\n",
+ __FUNCTION__, dhd_pub->peer_tbl.tdls_peer_count,
+ MAC2STRDBG((char *)da)));
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+
+ } else {
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
+ DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+ if (prev)
+ prev->next = cur->next;
+ else
+ dhd_pub->peer_tbl.node = cur->next;
+ MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
+ dhd_pub->peer_tbl.tdls_peer_count--;
+ DHD_ERROR(("%s: Remove TDLS peer, count=%d " MACDBG "\n",
+ __FUNCTION__, dhd_pub->peer_tbl.tdls_peer_count,
+ MAC2STRDBG((char *)da)));
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+ return BCME_OK;
+ }
+ prev = cur;
+ cur = cur->next;
+ }
+ DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
+ }
+ return BCME_OK;
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif /* BCMDBUS */
+
+bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
+{
+ if (!dhd)
+ return FALSE;
+
+ if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+ return TRUE;
+ else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+ DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+ return TRUE;
+ else
+ return FALSE;
+}
+#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+{
+ int32 ret = 0;
+ char buf[WLC_IOCTL_SMLEN];
+ bool mchan_supported = FALSE;
+ /* if dhd->op_mode is already set for HOSTAP and Manufacturing
+ * test mode, that means we only will use the mode as it is
+ */
+ if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+ return 0;
+ if (FW_SUPPORTED(dhd, vsdb)) {
+ mchan_supported = TRUE;
+ }
+ if (!FW_SUPPORTED(dhd, p2p)) {
+ DHD_TRACE(("Chip does not support p2p\n"));
+ return 0;
+ } else {
+ /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+ memset(buf, 0, sizeof(buf));
+ ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
+ sizeof(buf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+ return 0;
+ } else {
+ if (buf[0] == 1) {
+ /* By default, chip supports single chan concurrency,
+ * now lets check for mchan
+ */
+ ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+ if (mchan_supported)
+ ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ ret |= DHD_FLAG_RSDB_MODE;
+ }
+#ifdef WL_SUPPORT_MULTIP2P
+ if (FW_SUPPORTED(dhd, mp2p)) {
+ ret |= DHD_FLAG_MP2P_MODE;
+ }
+#endif /* WL_SUPPORT_MULTIP2P */
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+ return ret;
+#else
+ return 0;
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+ }
+ }
+ }
+ return 0;
+}
+#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
+
+#ifdef SUPPORT_AP_POWERSAVE
+#define RXCHAIN_PWRSAVE_PPS 10
+#define RXCHAIN_PWRSAVE_QUIET_TIME 10
+#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
+int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
+{
+ int32 pps = RXCHAIN_PWRSAVE_PPS;
+ int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
+ int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
+ int ret;
+
+ if (enable) {
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
+ NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to enable AP power save"));
+ }
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0,
+ TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to set pps"));
+ }
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time,
+ sizeof(quiet_time), NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to set quiet time"));
+ }
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check",
+ (char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to set stas assoc check"));
+ }
+ } else {
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
+ NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to disable AP power save"));
+ }
+ }
+
+ return 0;
+}
+#endif /* SUPPORT_AP_POWERSAVE */
+
+#if defined(READ_CONFIG_FROM_FILE)
+#include <linux/fs.h>
+#include <linux/ctype.h>
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+bool PM_control = TRUE;
+
+static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
+{
+ int var_int;
+ wl_country_t cspec = {{0}, -1, {0}};
+ char *revstr;
+ char *endptr = NULL;
+#ifdef ROAM_AP_ENV_DETECTION
+ int roam_env_mode = AP_ENV_INDETERMINATE;
+#endif /* ROAM_AP_ENV_DETECTION */
+
+ if (!strcmp(name, "country")) {
+ revstr = strchr(value, '/');
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (dhd->is_blob) {
+ cspec.rev = 0;
+ memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
+ } else
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+ if (revstr) {
+ cspec.rev = strtoul(revstr + 1, &endptr, 10);
+ memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+ cspec.country_abbrev[2] = '\0';
+ memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
+ } else {
+ cspec.rev = -1;
+ memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
+ get_customized_country_code(dhd->info->adapter,
+ (char *)&cspec.country_abbrev, &cspec);
+ }
+
+ }
+ DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
+ cspec.country_abbrev, cspec.rev));
+ return dhd_iovar(dhd, 0, "country", (char*)&cspec, sizeof(cspec), NULL, 0, TRUE);
+ } else if (!strcmp(name, "roam_scan_period")) {
+ var_int = (int)simple_strtol(value, NULL, 0);
+ return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
+ &var_int, sizeof(var_int), TRUE, 0);
+ } else if (!strcmp(name, "roam_delta")) {
+ struct {
+ int val;
+ int band;
+ } x;
+ x.val = (int)simple_strtol(value, NULL, 0);
+ /* x.band = WLC_BAND_AUTO; */
+ x.band = WLC_BAND_ALL;
+ return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
+ } else if (!strcmp(name, "roam_trigger")) {
+ int ret = 0;
+ int roam_trigger[2];
+
+ roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
+ roam_trigger[1] = WLC_BAND_ALL;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
+ sizeof(roam_trigger), TRUE, 0);
+
+#ifdef ROAM_AP_ENV_DETECTION
+ if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
+ if (dhd_iovar(dhd, 0, "roam_env_detection",
+ (char *)&roam_env_mode, sizeof(roam_env_mode), NULL,
+ 0, TRUE) == BCME_OK) {
+ dhd->roam_env_detection = TRUE;
+ } else {
+ dhd->roam_env_detection = FALSE;
+ }
+ }
+#endif /* ROAM_AP_ENV_DETECTION */
+ return ret;
+ } else if (!strcmp(name, "PM")) {
+ int ret = 0;
+ var_int = (int)simple_strtol(value, NULL, 0);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
+ &var_int, sizeof(var_int), TRUE, 0);
+
+#if defined(DHD_PM_CONTROL_FROM_FILE) || defined(CONFIG_PM_LOCK)
+ if (var_int == 0) {
+ g_pm_control = TRUE;
+ printk("%s var_int=%d don't control PM\n", __func__, var_int);
+ } else {
+ g_pm_control = FALSE;
+ printk("%s var_int=%d do control PM\n", __func__, var_int);
+ }
+#endif
+
+ return ret;
+ }
+ else if (!strcmp(name, "band")) {
+ int ret;
+ if (!strcmp(value, "auto"))
+ var_int = WLC_BAND_AUTO;
+ else if (!strcmp(value, "a"))
+ var_int = WLC_BAND_5G;
+ else if (!strcmp(value, "b"))
+ var_int = WLC_BAND_2G;
+ else if (!strcmp(value, "all"))
+ var_int = WLC_BAND_ALL;
+ else {
+ printk(" set band value should be one of the a or b or all\n");
+ var_int = WLC_BAND_AUTO;
+ }
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
+ sizeof(var_int), TRUE, 0)) < 0)
+ printk(" set band err=%d\n", ret);
+ return ret;
+ } else if (!strcmp(name, "cur_etheraddr")) {
+ struct ether_addr ea;
+ int ret;
+
+ bcm_ether_atoe(value, &ea);
+
+ ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
+ if (ret == 0) {
+ DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
+ return 0;
+ }
+
+ DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
+ ea.octet[0], ea.octet[1], ea.octet[2],
+ ea.octet[3], ea.octet[4], ea.octet[5]));
+
+ ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ return ret;
+ } else {
+ memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
+ return ret;
+ }
+ } else if (!strcmp(name, "lpc")) {
+ int ret = 0;
+ var_int = (int)simple_strtol(value, NULL, 0);
+ if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
+ }
+ ret = dhd_iovar(dhd, 0, "lpc", (char *)&var_int, sizeof(var_int), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
+ }
+ if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
+ }
+ return ret;
+ } else if (!strcmp(name, "vht_features")) {
+ int ret = 0;
+ var_int = (int)simple_strtol(value, NULL, 0);
+
+ if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
+ }
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&var_int, sizeof(var_int), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
+ }
+ if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
+ DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
+ }
+ return ret;
+ } else {
+ /* wlu_iovar_setint */
+ var_int = (int)simple_strtol(value, NULL, 0);
+
+ /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
+
+ DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
+
+ return dhd_iovar(dhd, 0, name, (char *)&var_int,
+ sizeof(var_int), NULL, 0, TRUE);
+ }
+
+ return 0;
+}
+
+static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
+{
+ mm_segment_t old_fs;
+ struct kstat stat;
+ struct file *fp = NULL;
+ unsigned int len;
+ char *buf = NULL, *p, *name, *value;
+ int ret = 0;
+ char *config_path;
+
+ config_path = CONFIG_BCMDHD_CONFIG_PATH;
+
+ if (!config_path)
+ {
+ printk(KERN_ERR "config_path can't read. \n");
+ return 0;
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ if ((ret = vfs_stat(config_path, &stat))) {
+ set_fs(old_fs);
+ printk(KERN_ERR "%s: Failed to get information (%d)\n",
+ config_path, ret);
+ return ret;
+ }
+ set_fs(old_fs);
+
+ if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
+ printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
+ return -ENOMEM;
+ }
+ memset(buf, 0x0, stat.size + 1);
+ printk("dhd_preinit_config : config path : %s \n", config_path);
+
+ if (!(fp = dhd_os_open_image1(dhd, config_path)) ||
+ (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
+ goto err;
+
+ if (len != stat.size) {
+ printk("dhd_preinit_config : Error - read length mismatched len = %d\n", len);
+ goto err;
+ }
+
+ buf[stat.size] = '\0';
+ for (p = buf; *p; p++) {
+ if (isspace(*p))
+ continue;
+ for (name = p++; *p && !isspace(*p); p++) {
+ if (*p == '=') {
+ *p = '\0';
+ p++;
+ for (value = p; *p && !isspace(*p); p++);
+ *p = '\0';
+ if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
+ printk(KERN_ERR "%s: %s=%s\n",
+ bcmerrorstr(ret), name, value);
+ }
+ break;
+ }
+ }
+ }
+ ret = 0;
+
+out:
+ if (fp)
+ dhd_os_close_image1(dhd, fp);
+ if (buf)
+ MFREE(dhd->osh, buf, stat.size+1);
+ return ret;
+
+err:
+ ret = -1;
+ goto out;
+}
+#endif /* READ_CONFIG_FROM_FILE */
+
+#ifdef WLAIBSS
+int
+dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
+{
+ int ret = BCME_OK;
+ aibss_bcn_force_config_t bcn_config;
+ uint32 aibss;
+#ifdef WLAIBSS_PS
+ uint32 aibss_ps;
+ s32 atim;
+#endif /* WLAIBSS_PS */
+ int ibss_coalesce;
+
+ aibss = 1;
+ ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s aibss , UNSUPPORTED\n", __FUNCTION__));
+ return BCME_OK;
+ } else {
+ DHD_ERROR(("%s Set aibss to %d err(%d)\n", __FUNCTION__, aibss, ret));
+ return ret;
+ }
+ }
+
+#ifdef WLAIBSS_PS
+ aibss_ps = 1;
+ ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
+ __FUNCTION__, aibss, ret));
+ return ret;
+ }
+
+ atim = 10;
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
+ (char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+#endif /* WLAIBSS_PS */
+
+ memset(&bcn_config, 0, sizeof(bcn_config));
+ bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
+ bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
+ bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
+ bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
+ bcn_config.len = sizeof(bcn_config);
+
+ ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
+ sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
+ __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
+ AIBSS_BCN_FLOOD_DUR, ret));
+ return ret;
+ }
+
+ ibss_coalesce = IBSS_COALESCE_DEFAULT;
+ ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
+ sizeof(ibss_coalesce), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ dhd->op_mode |= DHD_FLAG_IBSS_MODE;
+ return BCME_OK;
+}
+#endif /* WLAIBSS */
+
+#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
+#ifdef WL_BAM
+static int
+dhd_check_adps_bad_ap(dhd_pub_t *dhd)
+{
+ struct net_device *ndev;
+ struct bcm_cfg80211 *cfg;
+ struct wl_profile *profile;
+ struct ether_addr bssid;
+
+ if (!dhd_is_associated(dhd, 0, NULL)) {
+ DHD_ERROR(("%s - not associated\n", __FUNCTION__));
+ return BCME_OK;
+ }
+
+ ndev = dhd_linux_get_primary_netdev(dhd);
+ if (!ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ profile = wl_get_profile_by_netdev(cfg, ndev);
+ memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
+ if (wl_adps_bad_ap_check(cfg, &bssid)) {
+ if (wl_adps_enabled(cfg, ndev)) {
+ wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
+ }
+ }
+
+ return BCME_OK;
+}
+#endif /* WL_BAM */
+
+int
+dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
+{
+ int i;
+ int len;
+ int ret = BCME_OK;
+
+ bcm_iov_buf_t *iov_buf = NULL;
+ wl_adps_params_v1_t *data = NULL;
+
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+ iov_buf = MALLOC(dhd->osh, len);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ iov_buf->version = WL_ADPS_IOV_VER;
+ iov_buf->len = sizeof(*data);
+ iov_buf->id = WL_ADPS_IOV_MODE;
+
+ data = (wl_adps_params_v1_t *)iov_buf->data;
+ data->version = ADPS_SUB_IOV_VERSION_1;
+ data->length = sizeof(*data);
+ data->mode = on;
+
+ for (i = 1; i <= MAX_BANDS; i++) {
+ data->band = i;
+ ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s adps, UNSUPPORTED\n", __FUNCTION__));
+ ret = BCME_OK;
+ goto exit;
+ }
+ else {
+ DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
+ __FUNCTION__, on ? "On" : "Off", i, ret));
+ goto exit;
+ }
+ }
+ }
+
+#ifdef WL_BAM
+ if (on) {
+ dhd_check_adps_bad_ap(dhd);
+ }
+#endif /* WL_BAM */
+
+exit:
+ if (iov_buf) {
+ MFREE(dhd->osh, iov_buf, len);
+ }
+ return ret;
+}
+#endif /* WLADPS || WLADPS_PRIVATE_CMD */
+
+int
+dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
+{
+ wl_el_set_type_t logset_type, logset_op;
+ wl_el_set_all_type_v1_t *logset_all_type_op = NULL;
+ bool use_logset_all_type = FALSE;
+ int ret = BCME_ERROR;
+ int err = 0;
+ uint8 i = 0;
+ int el_set_all_type_len;
+
+ if (!dhd || !logset_mask)
+ return BCME_BADARG;
+
+ el_set_all_type_len = OFFSETOF(wl_el_set_all_type_v1_t, set_type) +
+ (sizeof(wl_el_set_type_v1_t) * dhd->event_log_max_sets);
+
+ logset_all_type_op = (wl_el_set_all_type_v1_t *) MALLOC(dhd->osh, el_set_all_type_len);
+ if (logset_all_type_op == NULL) {
+ DHD_ERROR(("%s: failed to allocate %d bytes for logset_all_type_op\n",
+ __FUNCTION__, el_set_all_type_len));
+ return BCME_NOMEM;
+ }
+
+ *logset_mask = 0;
+ memset(&logset_type, 0, sizeof(logset_type));
+ memset(&logset_op, 0, sizeof(logset_op));
+ logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
+ logset_type.len = htod16(sizeof(wl_el_set_type_t));
+
+ /* Try with set = event_log_max_sets, if fails, use legacy event_log_set_type */
+ logset_type.set = dhd->event_log_max_sets;
+ err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, sizeof(logset_type),
+ (char *)logset_all_type_op, el_set_all_type_len, FALSE);
+ if (err == BCME_OK) {
+ DHD_ERROR(("%s: use optimised use_logset_all_type\n", __FUNCTION__));
+ use_logset_all_type = TRUE;
+ }
+
+ for (i = 0; i < dhd->event_log_max_sets; i++) {
+ if (use_logset_all_type) {
+ logset_op.type = logset_all_type_op->set_type[i].type_val;
+ } else {
+ logset_type.set = i;
+ err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
+ sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
+ }
+ /* the iovar may return 'unsupported' error if a log set number is not present
+ * in the fw, so we should not return on error !
+ */
+ if (err == BCME_OK &&
+ logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
+ *logset_mask |= 0x01u << i;
+ ret = BCME_OK;
+ DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
+ }
+ }
+
+ MFREE(dhd->osh, logset_all_type_op, el_set_all_type_len);
+ return ret;
+}
+
+#ifndef OEM_ANDROID
+/* For non-android FC modular builds, override firmware preinited values */
+void
+dhd_override_fwprenit(dhd_pub_t * dhd)
+{
+ int ret = 0;
+
+ {
+ /* Disable bcn_li_bcn */
+ uint32 bcn_li_bcn = 0;
+ ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+ sizeof(bcn_li_bcn), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
+ __FUNCTION__, ret));
+ }
+ }
+
+ {
+ /* Disable apsta */
+ uint32 apsta = 0;
+ ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta,
+ sizeof(apsta), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: apsta failed:%d\n",
+ __FUNCTION__, ret));
+ }
+ }
+
+ {
+ int ap_mode = 0;
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, (char *)&ap_mode,
+ sizeof(ap_mode), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: set apmode failed :%d\n", __FUNCTION__, ret));
+ }
+ }
+}
+#endif /* !OEM_ANDROID */
+
+int
+dhd_get_fw_capabilities(dhd_pub_t * dhd)
+{
+
+ int ret = 0;
+ uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
+ memset(dhd->fw_capabilities, 0, cap_buf_size);
+ ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
+ FALSE);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
+ dhd->fw_capabilities[0] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 2] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 1] = '\0';
+
+ return 0;
+}
+
+int
+dhd_optimised_preinit_ioctls(dhd_pub_t * dhd)
+{
+ int ret = 0;
+ /* Room for "event_msgs_ext" + '\0' + bitvec */
+ char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
+#ifdef DHD_PKTTS
+ uint32 val = 0;
+#endif
+ uint32 event_log_max_sets = 0;
+ char* iov_buf = NULL;
+ /* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED,
+ * based on FW build tag.
+ */
+ int ret2 = 0;
+#if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV)
+ uint monitor = 0;
+ dhd_info_t *dhdinfo = (dhd_info_t*)dhd->info;
+#endif /* WL_MONITOR */
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ uint32 sup_wpa = 1;
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+
+ uint32 frameburst = CUSTOM_FRAMEBURST_SET;
+ uint wnm_bsstrans_resp = 0;
+#ifdef DHD_BUS_MEM_ACCESS
+ uint32 enable_memuse = 1;
+#endif /* DHD_BUS_MEM_ACCESS */
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ uint power_mode = PM_FAST;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+ char buf[WLC_IOCTL_SMLEN];
+ char *ptr;
+#ifdef ROAM_ENABLE
+ uint roamvar = 0;
+#ifdef ROAM_AP_ENV_DETECTION
+ int roam_env_mode = 0;
+#endif /* ROAM_AP_ENV_DETECTION */
+#endif /* ROAM_ENABLE */
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+/* xxx andrey tmp fix for dk8000 build error */
+ struct ether_addr p2p_ea;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ uint32 hostwake_oob = 0;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ wl_wlc_version_t wlc_ver;
+
+#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
+ uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2;
+ uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME;
+#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = TRUE;
+#ifdef APF
+ dhd->apf_set = FALSE;
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+ dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
+ dhd->max_dtim_enable = TRUE;
+#else
+ dhd->max_dtim_enable = FALSE;
+#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
+ dhd->disable_dtim_in_suspend = FALSE;
+#ifdef CUSTOM_SET_OCLOFF
+ dhd->ocl_off = FALSE;
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef SUPPORT_SET_TID
+ dhd->tid_mode = SET_TID_OFF;
+ dhd->target_uid = 0;
+ dhd->target_tid = 0;
+#endif /* SUPPORT_SET_TID */
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
+ dhd->op_mode = 0;
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* arpoe will be applied from the supsend context */
+ dhd->arpoe_enable = TRUE;
+ dhd->arpol_configured = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ /* clear AP flags */
+#if defined(CUSTOM_COUNTRY_CODE)
+ dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!dhd_validate_chipid(dhd)) {
+ DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
+ __FUNCTION__, dhd_bus_chip_id(dhd)));
+#ifndef SUPPORT_MULTIPLE_CHIPS
+ ret = BCME_BADARG;
+ goto done;
+#endif /* !SUPPORT_MULTIPLE_CHIPS */
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ DHD_ERROR(("Firmware version = %s\n", buf));
+ strncpy(fw_version, buf, FW_VER_STR_LEN);
+ fw_version[FW_VER_STR_LEN-1] = '\0';
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ dhd_set_version_info(dhd, buf);
+#endif /* BCMSDIO || BCMPCIE */
+ }
+
+ /* query for 'wlc_ver' to get version info from firmware */
+ /* memsetting to zero */
+ memset_s(&wlc_ver, sizeof(wl_wlc_version_t), 0,
+ sizeof(wl_wlc_version_t));
+ ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
+ sizeof(wl_wlc_version_t), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
+ dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
+ }
+#ifdef BOARD_HIKEY
+ /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
+ if (strstr(fw_version, "WLTEST") != NULL) {
+ DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
+ __FUNCTION__));
+ op_mode = DHD_FLAG_MFG_MODE;
+ }
+#endif /* BOARD_HIKEY */
+ /* get a capabilities from firmware */
+ ret = dhd_get_fw_capabilities(dhd);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+ dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ /* disable runtimePM by default in MFG mode. */
+ pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Disable RuntimePM in mfg mode */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+ DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ /* Check and adjust IOCTL response timeout for Manufactring firmware */
+ dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+ DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+ __FUNCTION__));
+
+#if defined(ARP_OFFLOAD_SUPPORT)
+ dhd->arpoe_enable = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+#ifndef CUSTOM_SET_ANTNPM
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ wl_config_t rsdb_mode;
+ memset(&rsdb_mode, 0, sizeof(rsdb_mode));
+ ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* !CUSTOM_SET_ANTNPM */
+ } else {
+ uint32 concurrent_mode = 0;
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+ DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+
+ BCM_REFERENCE(concurrent_mode);
+
+ dhd->op_mode = DHD_FLAG_STA_MODE;
+
+ BCM_REFERENCE(p2p_ea);
+#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
+ if ((concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+ dhd->op_mode |= concurrent_mode;
+ }
+
+ /* Check if we are enabling p2p */
+ if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+ memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+ ETHER_SET_LOCALADDR(&p2p_ea);
+ ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+ else
+ DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
+ }
+#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
+
+ }
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
+ sizeof(hostwake_oob), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
+ } else {
+ if (hostwake_oob == 0) {
+ DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
+ __FUNCTION__));
+ ret = BCME_UNSUPPORTED;
+ goto done;
+ } else {
+ DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
+ }
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
+ sizeof(dhd->axierror_logbuf_addr), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
+ dhd->axierror_logbuf_addr = 0;
+ } else {
+ DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
+ __FUNCTION__, dhd->axierror_logbuf_addr));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet, 0);
+ if (!ret) {
+ ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ ret = BCME_NOTUP;
+ goto done;
+ }
+ memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
+ } else
+#endif /* GET_CUSTOM_MAC_ENABLE */
+ {
+ /* Get the default device MAC address directly from firmware */
+ ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+ ret = BCME_NOTUP;
+ goto done;
+ }
+
+ DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
+ __FUNCTION__, MAC2STRDBG(&buf)));
+
+#ifdef MACADDR_PROVISION_ENFORCED
+ if (ETHER_IS_LOCALADDR(buf)) {
+ DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__));
+ ret = BCME_BADADDR;
+ goto done;
+ }
+#endif /* MACADDR_PROVISION_ENFORCED */
+
+ /* Update public MAC address after reading from Firmware */
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+ }
+
+ if (ETHER_ISNULLADDR(dhd->mac.octet)) {
+ DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__));
+ ret = BCME_BADADDR;
+ goto done;
+ } else {
+ (void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN,
+ dhd->mac.octet, ETHER_ADDR_LEN);
+ }
+
+ if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
+ DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
+ goto done;
+ }
+
+ DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+ dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (!dhd->is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+ /* get a ccode and revision for the country code */
+#if defined(CUSTOM_COUNTRY_CODE)
+ get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+ &dhd->dhd_cspec, dhd->dhd_cflags);
+#else
+ get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+ &dhd->dhd_cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+ }
+
+#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
+ dhd->info->rxthread_enabled = FALSE;
+ else
+ dhd->info->rxthread_enabled = TRUE;
+#endif
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ }
+
+#if defined(ROAM_ENABLE)
+ BCM_REFERENCE(roamvar);
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
+ DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
+ }
+ /* roamvar is set to 0 by preinit fw, change only if roamvar is non-zero */
+ if (roamvar != 0) {
+ /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+ ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
+ }
+ }
+#endif /* USE_WFA_CERT_CONF */
+
+#ifdef ROAM_AP_ENV_DETECTION
+ /* Changed to GET iovar to read roam_env_mode */
+ dhd->roam_env_detection = FALSE;
+ ret = dhd_iovar(dhd, 0, "roam_env_detection", NULL, 0, (char *)&roam_env_mode,
+ sizeof(roam_env_mode), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: roam_env_detection IOVAR not present\n", __FUNCTION__));
+ } else {
+ if (roam_env_mode == AP_ENV_INDETERMINATE) {
+ dhd->roam_env_detection = TRUE;
+ }
+ }
+#endif /* ROAM_AP_ENV_DETECTION */
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+ ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G);
+ if (ret < 0) {
+ DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+#ifdef CONFIG_ROAM_MIN_DELTA
+ ret = dhd_roam_min_delta_set(dhd, CUSTOM_ROAM_MIN_DELTA, CUSTOM_ROAM_MIN_DELTA);
+ if (ret < 0) {
+ DHD_ERROR(("%s set roam_min_delta failed ret %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_ROAM_MIN_DELTA */
+#endif /* ROAM_ENABLE */
+
+#ifdef WLTDLS
+ dhd->tdls_enable = FALSE;
+ /* query tdls_eable */
+ ret = dhd_iovar(dhd, 0, "tdls_enable", NULL, 0, (char *)&dhd->tdls_enable,
+ sizeof(dhd->tdls_enable), FALSE);
+ DHD_ERROR(("%s: tdls_enable=%d ret=%d\n", __FUNCTION__, dhd->tdls_enable, ret));
+#endif /* WLTDLS */
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+#ifdef CUSTOMER_HW10
+ dhd_control_pm(dhd, &power_mode);
+#else
+ sec_control_pm(dhd, &power_mode);
+#endif /* CUSTOMER_HW10 */
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#ifdef MIMO_ANT_SETTING
+ dhd_sel_ant_from_file(dhd);
+#endif /* MIMO_ANT_SETTING */
+
+#if defined(OEM_ANDROID) && defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ }
+#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
+
+#if defined(KEEP_ALIVE)
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ if (!(dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+ if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* defined(KEEP_ALIVE) */
+
+ ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
+ sizeof(event_log_max_sets), FALSE);
+ if (ret == BCME_OK) {
+ dhd->event_log_max_sets = event_log_max_sets;
+ } else {
+ dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ }
+ BCM_REFERENCE(iovbuf);
+ /* Make sure max_sets is set first with wmb and then sets_queried,
+ * this will be used during parsing the logsets in the reverse order.
+ */
+ OSL_SMP_WMB();
+ dhd->event_log_max_sets_queried = TRUE;
+ DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
+ __FUNCTION__, dhd->event_log_max_sets, ret));
+#ifdef DHD_BUS_MEM_ACCESS
+ ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
+ sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ DHD_ERROR(("%s: enable_memuse = %d\n",
+ __FUNCTION__, enable_memuse));
+ }
+#endif /* DHD_BUS_MEM_ACCESS */
+
+#ifdef USE_WFA_CERT_CONF
+#ifdef USE_WL_FRAMEBURST
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
+ DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
+ }
+#endif /* USE_WL_FRAMEBURST */
+ g_frameburst = frameburst;
+#endif /* USE_WFA_CERT_CONF */
+
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ /* Disable Framebursting for SofAP */
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ frameburst = 0;
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+
+ BCM_REFERENCE(frameburst);
+#if defined(USE_WL_FRAMEBURST) || defined(DISABLE_WL_FRAMEBURST_SOFTAP)
+ /* frameburst is set to 1 by preinit fw, change if otherwise */
+ if (frameburst != 1) {
+ /* Set frameburst to value */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+ sizeof(frameburst), TRUE, 0)) < 0) {
+ DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
+ }
+ }
+#endif /* USE_WL_FRAMEBURST || DISABLE_WL_FRAMEBURST_SOFTAP */
+
+ iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ /* Read 4-way handshake requirements */
+ if (dhd_use_idsup == 1) {
+ ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
+ (char *)&iovbuf, sizeof(iovbuf), FALSE);
+ /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
+ * in-dongle supplicant.
+ */
+ if (ret >= 0 || ret == BCME_NOTREADY)
+ dhd->fw_4way_handshake = TRUE;
+ DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
+ }
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+
+#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
+
+#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ DHD_ERROR(("arp_enable:%d arp_ol:%d\n",
+ dhd->arpoe_enable, dhd->arpol_configured));
+#endif /* ARP_OFFLOAD_SUPPORT */
+ /*
+ * Retaining pktfilter fotr temporary, once fw preinit includes this,
+ * this will be removed. Caution is to skip the pktfilter check during
+ * each pktfilter removal.
+ */
+#ifdef PKT_FILTER_SUPPORT
+ /* Setup default defintions for pktfilter , enable in suspend */
+ dhd->pktfilter_count = 6;
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+ if (!FW_SUPPORTED(dhd, pf6)) {
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+ } else {
+ /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
+ }
+ /* apply APP pktfilter */
+ dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+
+#ifdef BLOCK_IPV6_PACKET
+ /* Setup filter to allow only IPv4 unicast frames */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
+ HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
+ " "
+ HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
+#else
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+#endif /* BLOCK_IPV6_PACKET */
+
+#ifdef PASS_IPV4_SUSPEND
+ /* XXX customer want to get IPv4 multicast packets */
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
+#else
+ /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+#endif /* PASS_IPV4_SUSPEND */
+ if (FW_SUPPORTED(dhd, pf6)) {
+ /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
+ dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
+ /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
+ dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
+ /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
+ dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
+ /* Immediately pkt filter TYPE 6 Dicard NETBIOS packet(port 137) */
+ dhd->pktfilter[DHD_UDPNETBIOS_DROP_FILTER_NUM] = DISCARD_UDPNETBIOS;
+ dhd->pktfilter_count = 11;
+ }
+
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd->pktfilter_count = 4;
+ /* Setup filter to block broadcast and NAT Keepalive packets */
+ /* discard all broadcast packets */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded) {
+ /* XXX Andrey: fo SOFTAP disable pkt filters (if there were any ) */
+ dhd_enable_packet_filter(0, dhd);
+ }
+#endif /* defined(SOFTAP) */
+ dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* query for 'clmver' to get clm version info from firmware */
+ bzero(buf, sizeof(buf));
+ ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ char *ver_temp_buf = NULL;
+
+ if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
+ DHD_ERROR(("Couldn't find \"Data:\"\n"));
+ } else {
+ ptr = (ver_temp_buf + strlen("Data:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
+ DHD_ERROR(("Couldn't find New line character\n"));
+ } else {
+ bzero(clm_version, CLM_VER_STR_LEN);
+ strlcpy(clm_version, ver_temp_buf,
+ MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN));
+ DHD_INFO(("CLM version = %s\n", clm_version));
+ }
+ }
+
+#if defined(CUSTOMER_HW4_DEBUG)
+ if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
+ DHD_ERROR(("Couldn't find \"Customization:\"\n"));
+ } else {
+ char tokenlim;
+ ptr = (ver_temp_buf + strlen("Customization:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
+ DHD_ERROR(("Couldn't find project blob version"
+ "or New line character\n"));
+ } else if (tokenlim == '(') {
+ snprintf(clm_version,
+ CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
+ clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
+ DHD_ERROR(("Couldn't find New line character\n"));
+ } else {
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(ver_temp_buf),
+ "%s%s", clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
+ clm_version));
+
+ }
+ } else if (tokenlim == '\n') {
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
+ "%s, Blob ver = Major : ", clm_version);
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(ver_temp_buf) + 1,
+ "%s%s", clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
+ }
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+ if (strlen(clm_version)) {
+ DHD_ERROR(("CLM version = %s\n", clm_version));
+ } else {
+ DHD_ERROR(("Couldn't find CLM version!\n"));
+ }
+
+ }
+
+#ifdef WRITE_WLANINFO
+ sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
+#endif /* WRITE_WLANINFO */
+
+#ifdef GEN_SOFTAP_INFO_FILE
+ sec_save_softap_info();
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#ifdef PNO_SUPPORT
+ if (!dhd->pno_state) {
+ dhd_pno_init(dhd);
+ }
+#endif
+
+#ifdef DHD_PKTTS
+ /* get the pkt metadata buffer length supported by FW */
+ if (dhd_wl_ioctl_get_intiovar(dhd, "bus:metadata_info", &val,
+ WLC_GET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("%s: failed to get pkt metadata buflen, use IPC pkt TS.\n",
+ __FUNCTION__));
+ /*
+ * if iovar fails, IPC method of collecting
+ * TS should be used, hence set metadata_buflen as
+ * 0 here. This will be checked later on Tx completion
+ * to decide if IPC or metadata method of reading TS
+ * should be used
+ */
+ dhd->pkt_metadata_version = 0;
+ dhd->pkt_metadata_buflen = 0;
+ } else {
+ dhd->pkt_metadata_version = GET_METADATA_VER(val);
+ dhd->pkt_metadata_buflen = GET_METADATA_BUFLEN(val);
+ }
+
+ /* Check FW supports pktlat, if supports enable pktts_enab iovar */
+ ret = dhd_set_pktts_enab(dhd, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret));
+ }
+#endif /* DHD_PKTTS */
+
+#ifdef RTT_SUPPORT
+ if (dhd->rtt_state) {
+ ret = dhd_rtt_init(dhd);
+ if (ret < 0) {
+ DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
+ }
+ }
+#endif
+
+#ifdef FILTER_IE
+ /* Failure to configure filter IE is not a fatal error, ignore it. */
+ if (FW_SUPPORTED(dhd, fie) &&
+ !(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+ dhd_read_from_file(dhd);
+ }
+#endif /* FILTER_IE */
+
+#ifdef NDO_CONFIG_SUPPORT
+ dhd->ndo_enable = FALSE;
+ dhd->ndo_host_ip_overflow = FALSE;
+ dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
+#endif /* NDO_CONFIG_SUPPORT */
+
+ /* ND offload version supported */
+ dhd->ndo_version = dhd_ndo_get_version(dhd);
+
+ /* check dongle supports wbtext (product policy) or not */
+ dhd->wbtext_support = FALSE;
+ if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
+ WLC_GET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
+ }
+ dhd->wbtext_policy = wnm_bsstrans_resp;
+ if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
+ dhd->wbtext_support = TRUE;
+ }
+#ifndef WBTEXT
+ /* driver can turn off wbtext feature through makefile */
+ if (dhd->wbtext_support) {
+ if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
+ WL_BSSTRANS_POLICY_ROAM_ALWAYS,
+ WLC_SET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to disable WBTEXT\n"));
+ }
+ }
+#endif /* !WBTEXT */
+
+#ifdef DHD_NON_DMA_M2M_CORRUPTION
+ /* check pcie non dma loopback */
+ if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
+ (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
+ goto done;
+ }
+#endif /* DHD_NON_DMA_M2M_CORRUPTION */
+
+#ifdef CUSTOM_ASSOC_TIMEOUT
+ /* set recreate_bi_timeout to increase assoc timeout :
+ * 20 * 100TU * 1024 / 1000 = 2 secs
+ * (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000)
+ */
+ if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout",
+ CUSTOM_ASSOC_TIMEOUT,
+ WLC_SET_VAR, TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to set assoc timeout\n"));
+ }
+#endif /* CUSTOM_ASSOC_TIMEOUT */
+
+ BCM_REFERENCE(ret2);
+#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
+ if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win",
+ (char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win),
+ NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set RRM BCN request thrtl_win\n"));
+ }
+ if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time",
+ (char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time),
+ NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n"));
+ }
+#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
+#ifdef WL_MONITOR
+#ifdef HOST_RADIOTAP_CONV
+ /* 'Wl monitor' IOVAR is fired to check whether the FW supports radiotap conversion or not.
+ * This is indicated through MSB(1<<31) bit, based on which host radiotap conversion
+ * will be enabled or disabled.
+ * 0 - Host supports Radiotap conversion.
+ * 1 - FW supports Radiotap conversion.
+ */
+ bcm_mkiovar("monitor", (char *)&monitor, sizeof(monitor), iovbuf, sizeof(iovbuf));
+ if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_MONITOR, iovbuf,
+ sizeof(iovbuf), FALSE, 0)) == 0) {
+ memcpy(&monitor, iovbuf, sizeof(monitor));
+ dhdinfo->host_radiotap_conv = (monitor & HOST_RADIOTAP_CONV_BIT) ? TRUE : FALSE;
+ } else {
+ DHD_ERROR(("%s Failed to get monitor mode, err %d\n",
+ __FUNCTION__, ret2));
+ }
+#endif /* HOST_RADIOTAP_CONV */
+ if (FW_SUPPORTED(dhd, monitor)) {
+ dhd->monitor_enable = TRUE;
+ DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
+ } else {
+ dhd->monitor_enable = FALSE;
+ DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
+ }
+#endif /* WL_MONITOR */
+
+ /* store the preserve log set numbers */
+ if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
+ != BCME_OK) {
+ DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
+ }
+
+#ifdef CONFIG_SILENT_ROAM
+ dhd->sroam_turn_on = TRUE;
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
+
+#ifndef OEM_ANDROID
+ /* For non-android FC modular builds, override firmware preinited values */
+ dhd_override_fwprenit(dhd);
+#endif /* !OEM_ANDROID */
+ dhd_set_bandlock(dhd);
+
+done:
+ if (iov_buf) {
+ MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
+ }
+ return ret;
+}
+
+int
+dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ /* Room for "event_msgs_ext" + '\0' + bitvec */
+ char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
+ char *mask;
+ uint32 buf_key_b4_m4 = 1;
+#ifdef DHD_PKTTS
+ uint32 val = 0;
+#endif
+ uint8 msglen;
+ eventmsgs_ext_t *eventmask_msg = NULL;
+ uint32 event_log_max_sets = 0;
+ char* iov_buf = NULL;
+ /* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED,
+ * based on FW build tag.
+ */
+ int ret2 = 0;
+ uint32 wnm_cap = 0;
+#if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV)
+ uint monitor = 0;
+ dhd_info_t *dhdinfo = (dhd_info_t*)dhd->info;
+#endif /* WL_MONITOR */
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ uint32 sup_wpa = 1;
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
+ defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+ uint32 ampdu_ba_wsize = 0;
+#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+#if defined(CUSTOM_AMPDU_MPDU)
+ int32 ampdu_mpdu = 0;
+#endif
+#if defined(CUSTOM_AMPDU_RELEASE)
+ int32 ampdu_release = 0;
+#endif
+#if defined(CUSTOM_AMSDU_AGGSF)
+ int32 amsdu_aggsf = 0;
+#endif
+
+#if defined(BCMSDIO) || defined(BCMDBUS)
+#ifdef PROP_TXSTATUS
+ int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+ uint32 hostreorder = 1;
+ uint wl_down = 1;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+#endif /* defined(BCMSDIO) || defined(BCMDBUS) */
+
+#ifndef PCIE_FULL_DONGLE
+ uint32 wl_ap_isolate;
+#endif /* PCIE_FULL_DONGLE */
+ uint32 frameburst = CUSTOM_FRAMEBURST_SET;
+ uint wnm_bsstrans_resp = 0;
+#ifdef SUPPORT_SET_CAC
+ uint32 cac = 1;
+#endif /* SUPPORT_SET_CAC */
+#ifdef DHD_BUS_MEM_ACCESS
+ uint32 enable_memuse = 1;
+#endif /* DHD_BUS_MEM_ACCESS */
+#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
+ uint32 vht_features = 0; /* init to 0, will be set based on each support */
+#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
+
+#ifdef OEM_ANDROID
+#ifdef DHD_ENABLE_LPC
+ uint32 lpc = 1;
+#endif /* DHD_ENABLE_LPC */
+ uint power_mode = PM_FAST;
+#if defined(BCMSDIO)
+ uint32 dongle_align = DHD_SDALIGN;
+ uint32 glom = CUSTOM_GLOM_SETTING;
+#endif /* defined(BCMSDIO) */
+ uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
+ uint scancache_enab = TRUE;
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ uint32 bcn_li_bcn = 1;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+ uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
+ int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+ int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+ int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+ char buf[WLC_IOCTL_SMLEN];
+ char *ptr;
+ uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+ wl_el_tag_params_t *el_tag = NULL;
+#endif /* DHD_8021X_DUMP */
+#ifdef DHD_RANDMAC_LOGGING
+ uint privacy_mask = 0;
+#endif /* DHD_RANDMAC_LOGGING */
+#ifdef ROAM_ENABLE
+ uint roamvar = 0;
+ int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
+ int roam_scan_period[2] = {10, WLC_BAND_ALL};
+ int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
+#ifdef ROAM_AP_ENV_DETECTION
+ int roam_env_mode = AP_ENV_INDETERMINATE;
+#endif /* ROAM_AP_ENV_DETECTION */
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+ int roam_fullscan_period = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+ int roam_fullscan_period = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#ifdef DISABLE_BCNLOSS_ROAM
+ uint roam_bcnloss_off = 1;
+#endif /* DISABLE_BCNLOSS_ROAM */
+#else
+#ifdef DISABLE_BUILTIN_ROAM
+ uint roamvar = 1;
+#endif /* DISABLE_BUILTIN_ROAM */
+#endif /* ROAM_ENABLE */
+
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+/* xxx andrey tmp fix for dk8000 build error */
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+ struct ether_addr p2p_ea;
+#endif
+#ifdef BCMCCX
+ uint32 ccx = 1;
+#endif
+#ifdef SOFTAP_UAPSD_OFF
+ uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
+#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
+ uint32 apsta = 1; /* Enable APSTA mode */
+#elif defined(SOFTAP_AND_GC)
+ uint32 apsta = 0;
+ int ap_mode = 1;
+#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+ char hw_ether[62];
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef OKC_SUPPORT
+ uint32 okc = 1;
+#endif
+
+#ifdef DISABLE_11N
+ uint32 nmode = 0;
+#endif /* DISABLE_11N */
+
+#if defined(DISABLE_11AC)
+ uint32 vhtmode = 0;
+#endif /* DISABLE_11AC */
+#ifdef USE_WL_TXBF
+ uint32 txbf = 1;
+#endif /* USE_WL_TXBF */
+#ifdef DISABLE_TXBFR
+ uint32 txbf_bfr_cap = 0;
+#endif /* DISABLE_TXBFR */
+#ifdef AMPDU_VO_ENABLE
+ /* XXX: Enabling VO AMPDU to reduce FER */
+ struct ampdu_tid_control tid;
+#endif
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+ uint32 proptx = 0;
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
+#ifdef DHD_SET_FW_HIGHSPEED
+ uint32 ack_ratio = 250;
+ uint32 ack_ratio_depth = 64;
+#endif /* DHD_SET_FW_HIGHSPEED */
+#ifdef DISABLE_11N_PROPRIETARY_RATES
+ uint32 ht_features = 0;
+#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#ifdef CUSTOM_PSPRETEND_THR
+ uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+ uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef DISABLE_PRUNED_SCAN
+ uint32 scan_features = 0;
+#endif /* DISABLE_PRUNED_SCAN */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ uint32 hostwake_oob = 0;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef EVENT_LOG_RATE_HC
+ /* threshold number of lines per second */
+#define EVENT_LOG_RATE_HC_THRESHOLD 1000
+ uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
+#endif /* EVENT_LOG_RATE_HC */
+#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
+ uint32 btmdelta = WBTEXT_BTMDELTA;
+#endif /* WBTEXT && WBTEXT_BTMDELTA */
+#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
+ uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2;
+ uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME;
+#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
+#endif /* OEM_ANDROID */
+
+ BCM_REFERENCE(iovbuf);
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* arpoe will be applied from the supsend context */
+ dhd->arpoe_enable = TRUE;
+ dhd->arpol_configured = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef OEM_ANDROID
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = TRUE;
+#ifdef APF
+ dhd->apf_set = FALSE;
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+ dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
+ dhd->max_dtim_enable = TRUE;
+#else
+ dhd->max_dtim_enable = FALSE;
+#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
+ dhd->disable_dtim_in_suspend = FALSE;
+#ifdef CUSTOM_SET_OCLOFF
+ dhd->ocl_off = FALSE;
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef SUPPORT_SET_TID
+ dhd->tid_mode = SET_TID_OFF;
+ dhd->target_uid = 0;
+ dhd->target_tid = 0;
+#endif /* SUPPORT_SET_TID */
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
+#endif
+ dhd->op_mode = 0;
+
+ /* clear AP flags */
+#if defined(CUSTOM_COUNTRY_CODE)
+ dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!dhd_validate_chipid(dhd)) {
+ DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
+ __FUNCTION__, dhd_bus_chip_id(dhd)));
+#ifndef SUPPORT_MULTIPLE_CHIPS
+ ret = BCME_BADARG;
+ goto done;
+#endif /* !SUPPORT_MULTIPLE_CHIPS */
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ strncpy(fw_version, buf, FW_VER_STR_LEN);
+ fw_version[FW_VER_STR_LEN-1] = '\0';
+ }
+
+#ifdef BOARD_HIKEY
+ /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
+ if (strstr(fw_version, "WLTEST") != NULL) {
+ DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
+ __FUNCTION__));
+ op_mode = DHD_FLAG_MFG_MODE;
+ }
+#endif /* BOARD_HIKEY */
+
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+ dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ /* disable runtimePM by default in MFG mode. */
+ pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Disable RuntimePM in mfg mode */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+ DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ /* Check and adjust IOCTL response timeout for Manufactring firmware */
+ dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+ DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+ __FUNCTION__));
+ } else {
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+ DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
+ sizeof(hostwake_oob), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
+ } else {
+ if (hostwake_oob == 0) {
+ DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
+ __FUNCTION__));
+ ret = BCME_UNSUPPORTED;
+ goto done;
+ } else {
+ DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
+ }
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
+ sizeof(dhd->axierror_logbuf_addr), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
+ dhd->axierror_logbuf_addr = 0;
+ } else {
+ DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
+ __FUNCTION__, dhd->axierror_logbuf_addr));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef EVENT_LOG_RATE_HC
+ ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
+ sizeof(event_log_rate_hc), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
+ } else {
+ DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
+ event_log_rate_hc));
+ }
+#endif /* EVENT_LOG_RATE_HC */
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ memset(hw_ether, 0, sizeof(hw_ether));
+ ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, 0);
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ if (!memcmp(&ether_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) {
+ ret = 0;
+ } else
+#endif
+ if (!ret) {
+ memset(buf, 0, sizeof(buf));
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether));
+#endif
+ bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret) {
+ DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
+ __FUNCTION__, MAC2STRDBG(hw_ether), ret));
+ prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN);
+ ret = BCME_NOTUP;
+ goto done;
+ }
+ }
+ } else {
+ DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
+ ret = BCME_NOTUP;
+ goto done;
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+ /* Get the default device MAC address directly from firmware */
+ ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+ ret = BCME_NOTUP;
+ goto done;
+ }
+
+ DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
+ __FUNCTION__, MAC2STRDBG(&buf)));
+
+#ifdef MACADDR_PROVISION_ENFORCED
+ if (ETHER_IS_LOCALADDR(buf)) {
+ DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__));
+ ret = BCME_BADADDR;
+ goto done;
+ }
+#endif /* MACADDR_PROVISION_ENFORCED */
+
+ /* Update public MAC address after reading from Firmware */
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+ if (ETHER_ISNULLADDR(dhd->mac.octet)) {
+ DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__));
+ ret = BCME_BADADDR;
+ goto done;
+ } else {
+ (void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN,
+ dhd->mac.octet, ETHER_ADDR_LEN);
+ }
+#if defined(WL_STA_ASSOC_RAND) && defined(WL_STA_INIT_RAND)
+ /* Set cur_etheraddr of primary interface to randomized address to ensure
+ * that any action frame transmission will happen using randomized macaddr
+ * primary netdev->perm_addr will hold the original factory MAC.
+ */
+ {
+ if ((ret = dhd_update_rand_mac_addr(dhd)) < 0) {
+ DHD_ERROR(("%s: failed to set macaddress\n", __FUNCTION__));
+ goto done;
+ }
+ }
+#endif /* WL_STA_ASSOC_RAND && WL_STA_INIT_RAND */
+
+ if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
+ DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
+ goto done;
+ }
+
+ /* get a capabilities from firmware */
+ {
+ uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
+ memset(dhd->fw_capabilities, 0, cap_buf_size);
+ ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
+ FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+ __FUNCTION__, ret));
+ return 0;
+ }
+
+ memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
+ dhd->fw_capabilities[0] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 2] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 1] = '\0';
+ }
+
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
+ (op_mode == DHD_FLAG_HOSTAP_MODE)) {
+#ifdef SET_RANDOM_MAC_SOFTAP
+ uint rand_mac;
+#endif /* SET_RANDOM_MAC_SOFTAP */
+ dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#ifdef PKT_FILTER_SUPPORT
+ if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))
+ dhd_pkt_filter_enable = TRUE;
+ else
+ dhd_pkt_filter_enable = FALSE;
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+ SRANDOM32((uint)jiffies);
+ rand_mac = RANDOM32();
+ iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
+ iovbuf[1] = (unsigned char)(vendor_oui >> 8);
+ iovbuf[2] = (unsigned char)vendor_oui;
+ iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+ iovbuf[4] = (unsigned char)(rand_mac >> 8);
+ iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+ ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+#endif /* SET_RANDOM_MAC_SOFTAP */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE */
+#ifdef SOFTAP_UAPSD_OFF
+ ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
+ __FUNCTION__, ret));
+ }
+#endif /* SOFTAP_UAPSD_OFF */
+
+ /* set AP flag for specific country code of SOFTAP */
+#if defined(CUSTOM_COUNTRY_CODE)
+ dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE */
+ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+ dhd->arpoe_enable = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+ dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ /* XXX The 'wl counters' command triggers SDIO bus error
+ * if F2 block size is greater than 128 bytes using 4354A1
+ * manufacturing firmware. To avoid this problem, F2 block
+ * size is set to 128 bytes only for DHD_FLAG_MFG_MODE.
+ * There is no problem for other chipset since big data
+ * transcation through SDIO bus is not happened during
+ * manufacturing test.
+ */
+ dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+#ifndef CUSTOM_SET_ANTNPM
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ wl_config_t rsdb_mode;
+ memset(&rsdb_mode, 0, sizeof(rsdb_mode));
+ ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* !CUSTOM_SET_ANTNPM */
+ } else {
+ uint32 concurrent_mode = 0;
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
+ (op_mode == DHD_FLAG_P2P_MODE)) {
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+ dhd->op_mode = DHD_FLAG_P2P_MODE;
+ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+ (op_mode == DHD_FLAG_IBSS_MODE)) {
+ dhd->op_mode = DHD_FLAG_IBSS_MODE;
+ } else
+ dhd->op_mode = DHD_FLAG_STA_MODE;
+#if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
+ if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
+ (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+ dhd->op_mode |= concurrent_mode;
+ }
+
+ /* Check if we are enabling p2p */
+ if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+ ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
+ TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
+
+#if defined(SOFTAP_AND_GC)
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+ (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
+ }
+#endif
+ memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+ ETHER_SET_LOCALADDR(&p2p_ea);
+ ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+ else
+ DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
+ }
+#else
+ (void)concurrent_mode;
+#endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
+ }
+
+#ifdef DISABLE_PRUNED_SCAN
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
+ sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s get scan_features, UNSUPPORTED\n",
+ __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s get scan_features err(%d)\n",
+ __FUNCTION__, ret));
+ }
+
+ } else {
+ memcpy(&scan_features, iovbuf, 4);
+ scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
+ ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
+ sizeof(scan_features), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set scan_features err(%d)\n",
+ __FUNCTION__, ret));
+ }
+ }
+ }
+#endif /* DISABLE_PRUNED_SCAN */
+
+ DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+ dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (!dhd->is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+ /* get a ccode and revision for the country code */
+#if defined(CUSTOM_COUNTRY_CODE)
+ get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+ &dhd->dhd_cspec, dhd->dhd_cflags);
+#else
+ get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+ &dhd->dhd_cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+ }
+
+#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
+ dhd->info->rxthread_enabled = FALSE;
+ else
+ dhd->info->rxthread_enabled = TRUE;
+#endif
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ }
+
+#if defined(DISABLE_11AC)
+ ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode, sizeof(vhtmode), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11AC */
+
+ /* Set Listen Interval */
+ ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
+ DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
+ }
+#endif /* USE_WFA_CERT_CONF */
+ /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+ ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#if defined(ROAM_ENABLE)
+#ifdef DISABLE_BCNLOSS_ROAM
+ ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
+ sizeof(roam_bcnloss_off), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s roam_bcnloss_off failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* DISABLE_BCNLOSS_ROAM */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
+ sizeof(roam_scan_period), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
+ if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
+ sizeof(roam_delta), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
+ ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
+ sizeof(roam_fullscan_period), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
+#ifdef ROAM_AP_ENV_DETECTION
+ if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
+ if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
+ sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
+ dhd->roam_env_detection = TRUE;
+ else
+ dhd->roam_env_detection = FALSE;
+ }
+#endif /* ROAM_AP_ENV_DETECTION */
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+ ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G);
+ if (ret < 0) {
+ DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+#ifdef CONFIG_ROAM_MIN_DELTA
+ ret = dhd_roam_min_delta_set(dhd, CUSTOM_ROAM_MIN_DELTA, CUSTOM_ROAM_MIN_DELTA);
+ if (ret < 0) {
+ DHD_ERROR(("%s set roam_min_delta failed ret %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_ROAM_MIN_DELTA */
+#endif /* ROAM_ENABLE */
+
+#ifdef CUSTOM_EVENT_PM_WAKE
+ /* XXX need to check time value */
+ ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
+ sizeof(pm_awake_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef OKC_SUPPORT
+ dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
+#endif
+#ifdef BCMCCX
+ dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
+#endif /* BCMCCX */
+
+#ifdef WLTDLS
+ dhd->tdls_enable = FALSE;
+ dhd_tdls_set_mode(dhd, false);
+#endif /* WLTDLS */
+
+#ifdef DHD_ENABLE_LPC
+ /* Set lpc 1 */
+ ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
+
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+ (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+
+ ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+ DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+ }
+ }
+#endif /* DHD_ENABLE_LPC */
+
+#ifdef WLADPS
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK &&
+ (ret != BCME_UNSUPPORTED)) {
+ DHD_ERROR(("%s dhd_enable_adps failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* WLADPS */
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+#ifdef CUSTOMER_HW10
+ dhd_control_pm(dhd, &power_mode);
+#else
+ sec_control_pm(dhd, &power_mode);
+#endif /* CUSTOMER_HW10 */
+#else
+ /* Set PowerSave mode */
+ (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#if defined(BCMSDIO)
+ /* Match Host and Dongle rx alignment */
+ ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set bus:txglomalign failed %d\n", __FUNCTION__, ret));
+ }
+
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
+ DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
+ }
+#endif /* USE_WFA_CERT_CONF */
+ if (glom != DEFAULT_GLOM_VALUE) {
+ DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
+ ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set bus:txglom failed %d\n", __FUNCTION__, ret));
+ }
+ }
+#endif /* defined(BCMSDIO) */
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set bcn_timeout failed %d\n", __FUNCTION__, ret));
+ }
+
+ /* Setup assoc_retry_max count to reconnect target AP in dongle */
+ ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set assoc_retry_max failed %d\n", __FUNCTION__, ret));
+ }
+
+#if defined(AP) && !defined(WLP2P)
+ ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set apsta failed %d\n", __FUNCTION__, ret));
+ }
+
+#endif /* defined(AP) && !defined(WLP2P) */
+
+#ifdef MIMO_ANT_SETTING
+ dhd_sel_ant_from_file(dhd);
+#endif /* MIMO_ANT_SETTING */
+
+#if defined(OEM_ANDROID) && defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ }
+#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
+
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
+
+#if defined(OEM_ANDROID) && defined(SOFTAP)
+ if (ap_fw_loaded == FALSE)
+#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
+ if (!(dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+ if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, res));
+ }
+ }
+#endif /* defined(KEEP_ALIVE) */
+
+#ifdef USE_WL_TXBF
+ ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
+
+#endif /* USE_WL_TXBF */
+
+ ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
+ }
+
+#else /* OEM_ANDROID */
+ if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
+ DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
+ goto done;
+ }
+
+#if defined(KEEP_ALIVE)
+ if (!(dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+ if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif
+
+ /* get a capabilities from firmware */
+ memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
+ ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities),
+ FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+#endif /* OEM_ANDROID */
+
+ ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
+ sizeof(event_log_max_sets), FALSE);
+ if (ret == BCME_OK) {
+ dhd->event_log_max_sets = event_log_max_sets;
+ } else {
+ dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ }
+ /* Make sure max_sets is set first with wmb and then sets_queried,
+ * this will be used during parsing the logsets in the reverse order.
+ */
+ OSL_SMP_WMB();
+ dhd->event_log_max_sets_queried = TRUE;
+ DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
+ __FUNCTION__, dhd->event_log_max_sets, ret));
+#ifdef DHD_BUS_MEM_ACCESS
+ ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
+ sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ DHD_ERROR(("%s: enable_memuse = %d\n",
+ __FUNCTION__, enable_memuse));
+ }
+#endif /* DHD_BUS_MEM_ACCESS */
+
+#ifdef DISABLE_TXBFR
+ ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* DISABLE_TXBFR */
+
+#ifdef USE_WFA_CERT_CONF
+#ifdef USE_WL_FRAMEBURST
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
+ DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
+ }
+#endif /* USE_WL_FRAMEBURST */
+ g_frameburst = frameburst;
+#endif /* USE_WFA_CERT_CONF */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ /* Disable Framebursting for SofAP */
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ frameburst = 0;
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+ /* Set frameburst to value */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+ sizeof(frameburst), TRUE, 0)) < 0) {
+ DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
+ }
+#ifdef DHD_SET_FW_HIGHSPEED
+ /* Set ack_ratio */
+ ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
+ }
+
+ /* Set ack_ratio_depth */
+ ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
+ sizeof(ack_ratio_depth), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* DHD_SET_FW_HIGHSPEED */
+
+ iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ BCM_REFERENCE(ret2);
+
+#ifdef WLAIBSS
+ /* Apply AIBSS configurations */
+ if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
+ DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+#endif /* WLAIBSS */
+
+#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
+ defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+ /* Set ampdu ba wsize to 64 or 16 */
+#ifdef CUSTOM_AMPDU_BA_WSIZE
+ ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
+ if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
+ ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
+#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
+ if (ampdu_ba_wsize != 0) {
+ ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
+ sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
+ __FUNCTION__, ampdu_ba_wsize, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+
+#if defined(CUSTOM_AMPDU_MPDU)
+ ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+ if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+ ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_MPDU */
+
+#if defined(CUSTOM_AMPDU_RELEASE)
+ ampdu_release = CUSTOM_AMPDU_RELEASE;
+ if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
+ ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
+ sizeof(ampdu_release), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_RELEASE */
+
+#if defined(CUSTOM_AMSDU_AGGSF)
+ amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
+ if (amsdu_aggsf != 0) {
+ ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
+ }
+ }
+#endif /* CUSTOM_AMSDU_AGGSF */
+
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ /* Read 4-way handshake requirements */
+ if (dhd_use_idsup == 1) {
+ ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
+ (char *)&iovbuf, sizeof(iovbuf), FALSE);
+ /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
+ * in-dongle supplicant.
+ */
+ if (ret >= 0 || ret == BCME_NOTREADY)
+ dhd->fw_4way_handshake = TRUE;
+ DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
+ }
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
+ (char *)&vht_features, sizeof(vht_features), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
+ vht_features = 0;
+ } else {
+#ifdef SUPPORT_2G_VHT
+ vht_features |= 0x3; /* 2G support */
+#endif /* SUPPORT_2G_VHT */
+#ifdef SUPPORT_5G_1024QAM_VHT
+ vht_features |= 0x6; /* 5G 1024 QAM support */
+#endif /* SUPPORT_5G_1024QAM_VHT */
+ }
+ if (vht_features) {
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+ (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
+ " vht_features = 0x%x\n",
+ __FUNCTION__, ret, vht_features));
+
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
+ sizeof(vht_features), NULL, 0, TRUE);
+
+ DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
+ }
+ if (ret != BCME_BADOPTION) {
+ DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
+ } else {
+ DHD_INFO(("%s vht_features ret(%d) - need to check BANDLOCK\n",
+ __FUNCTION__, ret));
+ }
+ }
+ }
+#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
+#ifdef DISABLE_11N_PROPRIETARY_RATES
+ ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+#if defined(DISABLE_HE_ENAB)
+ /* XXX DISABLE_HE_ENAB has higher priority than CUSTOM_CONTROL_HE_ENAB */
+ control_he_enab = 0;
+#endif /* DISABLE_HE_ENAB */
+ dhd_control_he_enab(dhd, control_he_enab);
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#ifdef CUSTOM_PSPRETEND_THR
+ /* Turn off MPC in AP mode */
+ ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
+ sizeof(pspretend_thr), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif
+
+ /* XXX Enable firmware key buffering before sent 4-way M4 */
+ ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
+ }
+#ifdef SUPPORT_SET_CAC
+ ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
+ }
+#endif /* SUPPORT_SET_CAC */
+ /* make up event mask ext message iovar for event larger than 128 */
+ msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE;
+ eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
+ if (eventmask_msg == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+ bzero(eventmask_msg, msglen);
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+ /* Read event_msgs_ext mask */
+ ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
+ WLC_IOCTL_SMLEN, FALSE);
+
+ /* event_msgs_ext must be supported */
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ bcopy(iov_buf, eventmask_msg, msglen);
+ /* make up event mask ext message iovar for event larger than 128 */
+ mask = eventmask_msg->mask;
+
+ /* Setup event_msgs */
+ setbit(mask, WLC_E_SET_SSID);
+ setbit(mask, WLC_E_PRUNE);
+ setbit(mask, WLC_E_AUTH);
+ setbit(mask, WLC_E_AUTH_IND);
+ setbit(mask, WLC_E_ASSOC);
+ setbit(mask, WLC_E_REASSOC);
+ setbit(mask, WLC_E_REASSOC_IND);
+ if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
+ setbit(mask, WLC_E_DEAUTH);
+ setbit(mask, WLC_E_DEAUTH_IND);
+ setbit(mask, WLC_E_DISASSOC_IND);
+ setbit(mask, WLC_E_DISASSOC);
+ setbit(mask, WLC_E_JOIN);
+ setbit(mask, WLC_E_START);
+ setbit(mask, WLC_E_ASSOC_IND);
+ setbit(mask, WLC_E_PSK_SUP);
+ setbit(mask, WLC_E_LINK);
+ setbit(mask, WLC_E_MIC_ERROR);
+ setbit(mask, WLC_E_ASSOC_REQ_IE);
+ setbit(mask, WLC_E_ASSOC_RESP_IE);
+#ifdef LIMIT_BORROW
+ setbit(mask, WLC_E_ALLOW_CREDIT_BORROW);
+#endif
+#ifndef WL_CFG80211
+ setbit(mask, WLC_E_PMKID_CACHE);
+// setbit(mask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
+#endif
+ setbit(mask, WLC_E_JOIN_START);
+// setbit(mask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
+#ifdef DHD_DEBUG
+ setbit(mask, WLC_E_SCAN_CONFIRM_IND);
+#endif
+#ifdef PNO_SUPPORT
+ setbit(mask, WLC_E_PFN_NET_FOUND);
+ setbit(mask, WLC_E_PFN_BEST_BATCHING);
+ setbit(mask, WLC_E_PFN_BSSID_NET_FOUND);
+ setbit(mask, WLC_E_PFN_BSSID_NET_LOST);
+#endif /* PNO_SUPPORT */
+ /* enable dongle roaming event */
+#ifdef WL_CFG80211
+#if !defined(ROAM_EVT_DISABLE)
+ setbit(mask, WLC_E_ROAM);
+#endif /* !ROAM_EVT_DISABLE */
+ setbit(mask, WLC_E_BSSID);
+#endif /* WL_CFG80211 */
+#ifdef BCMCCX
+ setbit(mask, WLC_E_ADDTS_IND);
+ setbit(mask, WLC_E_DELTS_IND);
+#endif /* BCMCCX */
+#ifdef WLTDLS
+ setbit(mask, WLC_E_TDLS_PEER_EVENT);
+#endif /* WLTDLS */
+#ifdef WL_ESCAN
+ setbit(mask, WLC_E_ESCAN_RESULT);
+#endif /* WL_ESCAN */
+#ifdef CSI_SUPPORT
+ setbit(mask, WLC_E_CSI);
+#endif /* CSI_SUPPORT */
+#ifdef RTT_SUPPORT
+ setbit(mask, WLC_E_PROXD);
+#endif /* RTT_SUPPORT */
+#if !defined(WL_CFG80211) && !defined(OEM_ANDROID)
+ setbit(mask, WLC_E_ESCAN_RESULT);
+#endif
+#ifdef WL_CFG80211
+ setbit(mask, WLC_E_ESCAN_RESULT);
+ setbit(mask, WLC_E_AP_STARTED);
+ setbit(mask, WLC_E_ACTION_FRAME_RX);
+ if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+ setbit(mask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ }
+#endif /* WL_CFG80211 */
+#ifdef WLAIBSS
+ setbit(mask, WLC_E_AIBSS_TXFAIL);
+#endif /* WLAIBSS */
+
+#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
+ if (dhd_logtrace_from_file(dhd)) {
+ setbit(mask, WLC_E_TRACE);
+ } else {
+ clrbit(mask, WLC_E_TRACE);
+ }
+#elif defined(SHOW_LOGTRACE)
+ setbit(mask, WLC_E_TRACE);
+#else
+ clrbit(mask, WLC_E_TRACE);
+#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
+
+ setbit(mask, WLC_E_CSA_COMPLETE_IND);
+#ifdef DHD_WMF
+ setbit(mask, WLC_E_PSTA_PRIMARY_INTF_IND);
+#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+ setbit(mask, WLC_E_EXCESS_PM_WAKE_EVENT);
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef DHD_LOSSLESS_ROAMING
+ setbit(mask, WLC_E_ROAM_PREP);
+#endif
+ /* nan events */
+ setbit(mask, WLC_E_NAN);
+#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
+
+#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
+
+#ifdef RSSI_MONITOR_SUPPORT
+ setbit(mask, WLC_E_RSSI_LQM);
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef GSCAN_SUPPORT
+ setbit(mask, WLC_E_PFN_GSCAN_FULL_RESULT);
+ setbit(mask, WLC_E_PFN_SCAN_COMPLETE);
+ setbit(mask, WLC_E_PFN_SSID_EXT);
+ setbit(mask, WLC_E_ROAM_EXP_EVENT);
+#endif /* GSCAN_SUPPORT */
+ setbit(mask, WLC_E_RSSI_LQM);
+#ifdef BT_WIFI_HANDOVER
+ setbit(mask, WLC_E_BT_WIFI_HANDOVER_REQ);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef DBG_PKT_MON
+ setbit(mask, WLC_E_ROAM_PREP);
+#endif /* DBG_PKT_MON */
+#ifdef WL_NATOE
+ setbit(mask, WLC_E_NATOE_NFCT);
+#endif /* WL_NATOE */
+#ifdef BCM_ROUTER_DHD
+ setbit(mask, WLC_E_DPSTA_INTF_IND);
+#endif /* BCM_ROUTER_DHD */
+ setbit(mask, WLC_E_SLOTTED_BSS_PEER_OP);
+#ifdef WL_BCNRECV
+ setbit(mask, WLC_E_BCNRECV_ABORTED);
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+ setbit(mask, WLC_E_MBO);
+#endif /* WL_MBO */
+#ifdef WL_CLIENT_SAE
+ setbit(mask, WLC_E_JOIN_START);
+#endif /* WL_CLIENT_SAE */
+#ifdef WL_CAC_TS
+ setbit(mask, WLC_E_ADDTS_IND);
+ setbit(mask, WLC_E_DELTS_IND);
+#endif /* WL_BCNRECV */
+#ifdef CUSTOMER_HW6
+ setbit(mask, WLC_E_COUNTRY_CODE_CHANGED);
+#endif /* CUSTOMER_HW6 */
+
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+ ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+ /* Enabling event log trace for EAP events */
+ el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
+ if (el_tag == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
+ (int)sizeof(wl_el_tag_params_t)));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+ el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
+ el_tag->set = 1;
+ el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
+ ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set event_log_tag_control fail %d\n", __FUNCTION__, ret));
+ }
+#endif /* DHD_8021X_DUMP */
+#ifdef DHD_RANDMAC_LOGGING
+ if (FW_SUPPORTED((dhd), event_log)) {
+ if (dhd_iovar(dhd, 0, "privacy_mask", (char *)&privacy_mask, sizeof(privacy_mask),
+ NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set privacy mask\n"));
+ }
+ } else {
+ /* Don't enable feature to prevent macaddr print in clr text */
+ DHD_ERROR(("skip privacy_mask set. event_log not enabled\n"));
+ }
+#endif /* DHD_RANDMAC_LOGGING */
+
+#ifdef OEM_ANDROID
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+ sizeof(scan_assoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+ sizeof(scan_unassoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+ sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ DHD_ERROR(("arp_enable:%d arp_ol:%d\n",
+ dhd->arpoe_enable, dhd->arpol_configured));
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Setup default defintions for pktfilter , enable in suspend */
+ if (dhd_master_mode) {
+ dhd->pktfilter_count = 6;
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+ if (!FW_SUPPORTED(dhd, pf6)) {
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+ } else {
+ /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
+ }
+ /* apply APP pktfilter */
+ dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+
+#ifdef BLOCK_IPV6_PACKET
+ /* Setup filter to allow only IPv4 unicast frames */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
+ HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
+ " "
+ HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
+#else
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+#endif /* BLOCK_IPV6_PACKET */
+
+#ifdef PASS_IPV4_SUSPEND
+ /* XXX customer want to get IPv4 multicast packets */
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
+#else
+ /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+#endif /* PASS_IPV4_SUSPEND */
+ if (FW_SUPPORTED(dhd, pf6)) {
+ /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
+ dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
+ dhd->pktfilter_count = 8;
+ }
+
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd->pktfilter_count = 4;
+ /* Setup filter to block broadcast and NAT Keepalive packets */
+ /* discard all broadcast packets */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
+ } else
+ dhd_conf_discard_pkt_filter(dhd);
+ dhd_conf_add_pkt_filter(dhd);
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded) {
+ /* XXX Andrey: fo SOFTAP disable pkt filters (if there were any ) */
+ dhd_enable_packet_filter(0, dhd);
+ }
+#endif /* defined(SOFTAP) */
+ dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_11N
+ ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11N */
+
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set bcn_li_bcn failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#ifdef AMPDU_VO_ENABLE
+ /* XXX: Enabling VO AMPDU to reduce FER */
+ tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
+ tid.enable = TRUE;
+ ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret));
+ }
+
+ tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
+ tid.enable = TRUE;
+ ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret));
+ }
+#endif
+#if defined(SOFTAP_TPUT_ENHANCE)
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+#if defined(BCMSDIO)
+ dhd_bus_setidletime(dhd, (int)100);
+#endif /* BCMSDIO */
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+#endif
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+ dhd_use_tcp_window_size_adjust = TRUE;
+#endif
+
+#if defined(BCMSDIO)
+ memset(buf, 0, sizeof(buf));
+ ret = dhd_iovar(dhd, 0, "bus:txglom_auto_control", NULL, 0, buf, sizeof(buf),
+ FALSE);
+ if (ret < 0) {
+ glom = 0;
+ ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s bus:txglom failed %d\n", __FUNCTION__, ret));
+ }
+ } else {
+ if (buf[0] == 0) {
+ glom = 1;
+ ret = dhd_iovar(dhd, 0, "bus:txglom_auto_control", (char *)&glom,
+ sizeof(glom), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s bus:txglom_auto_control failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ }
+#endif /* BCMSDIO */
+ }
+#endif /* SOFTAP_TPUT_ENHANCE */
+ /* query for 'clmver' to get clm version info from firmware */
+ bzero(buf, sizeof(buf));
+ ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s clmver failed %d\n", __FUNCTION__, ret));
+ else {
+ char *ver_temp_buf = NULL, *ver_date_buf = NULL;
+ int len;
+
+ if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
+ DHD_ERROR(("Couldn't find \"Data:\"\n"));
+ } else {
+ ver_date_buf = bcmstrstr(buf, "Creation:");
+ ptr = (ver_temp_buf + strlen("Data:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
+ DHD_ERROR(("Couldn't find New line character\n"));
+ } else {
+ memset(clm_version, 0, CLM_VER_STR_LEN);
+ len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf);
+ if (ver_date_buf) {
+ ptr = (ver_date_buf + strlen("Creation:"));
+ ver_date_buf = bcmstrtok(&ptr, "\n", 0);
+ if (ver_date_buf)
+ snprintf(clm_version+len, CLM_VER_STR_LEN-1-len,
+ " (%s)", ver_date_buf);
+ }
+ DHD_INFO(("CLM version = %s\n", clm_version));
+ }
+ }
+
+#if defined(CUSTOMER_HW4_DEBUG)
+ if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
+ DHD_ERROR(("Couldn't find \"Customization:\"\n"));
+ } else {
+ char tokenlim;
+ ptr = (ver_temp_buf + strlen("Customization:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
+ DHD_ERROR(("Couldn't find project blob version"
+ "or New line character\n"));
+ } else if (tokenlim == '(') {
+ snprintf(clm_version,
+ CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
+ clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
+ DHD_ERROR(("Couldn't find New line character\n"));
+ } else {
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(ver_temp_buf),
+ "%s%s", clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
+ clm_version));
+
+ }
+ } else if (tokenlim == '\n') {
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
+ "%s, Blob ver = Major : ", clm_version);
+ snprintf(clm_version,
+ strlen(clm_version) + strlen(ver_temp_buf) + 1,
+ "%s%s", clm_version, ver_temp_buf);
+ DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
+ }
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+ if (strlen(clm_version)) {
+ DHD_INFO(("CLM version = %s\n", clm_version));
+ } else {
+ DHD_ERROR(("Couldn't find CLM version!\n"));
+ }
+ }
+ dhd_set_version_info(dhd, fw_version);
+
+#ifdef WRITE_WLANINFO
+ sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
+#endif /* WRITE_WLANINFO */
+
+#endif /* defined(OEM_ANDROID) */
+#ifdef GEN_SOFTAP_INFO_FILE
+ sec_save_softap_info();
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#if defined(BCMSDIO)
+ dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
+#endif /* defined(BCMSDIO) */
+
+#if defined(BCMSDIO) || defined(BCMDBUS)
+#ifdef PROP_TXSTATUS
+ if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+ /* enable WLFC only if the firmware is VSDB when it is in STA mode */
+ (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+ FALSE) {
+ wlfc_enable = FALSE;
+ }
+ ret = dhd_conf_get_disable_proptx(dhd);
+ if (ret == 0){
+ disable_proptx = 0;
+ wlfc_enable = TRUE;
+ } else if (ret >= 1) {
+ disable_proptx = 1;
+ wlfc_enable = FALSE;
+ /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
+ hostreorder = 0;
+ }
+
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
+ DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
+ wlfc_enable = proptx;
+ }
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
+
+#ifndef DISABLE_11N
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
+ NULL, 0, TRUE);
+ if (ret2 < 0) {
+ DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
+
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
+ sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
+ __FUNCTION__, ret2, hostreorder));
+
+ ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
+ sizeof(hostreorder), NULL, 0, TRUE);
+ DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
+ }
+ if (ret2 != BCME_OK)
+ hostreorder = 0;
+ }
+#endif /* DISABLE_11N */
+
+#ifdef READ_CONFIG_FROM_FILE
+ dhd_preinit_config(dhd, 0);
+#endif /* READ_CONFIG_FROM_FILE */
+
+ if (wlfc_enable) {
+ dhd_wlfc_init(dhd);
+ /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
+ }
+#ifndef DISABLE_11N
+ else if (hostreorder)
+ dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+#else
+ /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
+ printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMDBUS */
+#ifndef PCIE_FULL_DONGLE
+ /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
+ if (FW_SUPPORTED(dhd, ap)) {
+ wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
+ ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* PCIE_FULL_DONGLE */
+#ifdef PNO_SUPPORT
+ if (!dhd->pno_state) {
+ dhd_pno_init(dhd);
+ }
+#endif
+
+#ifdef DHD_PKTTS
+ /* get the pkt metadata buffer length supported by FW */
+ if (dhd_wl_ioctl_get_intiovar(dhd, "bus:metadata_info", &val,
+ WLC_GET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("%s: failed to get pkt metadata buflen, use IPC pkt TS.\n",
+ __FUNCTION__));
+ /*
+ * if iovar fails, IPC method of collecting
+ * TS should be used, hence set metadata_buflen as
+ * 0 here. This will be checked later on Tx completion
+ * to decide if IPC or metadata method of reading TS
+ * should be used
+ */
+ dhd->pkt_metadata_version = 0;
+ dhd->pkt_metadata_buflen = 0;
+ } else {
+ dhd->pkt_metadata_version = GET_METADATA_VER(val);
+ dhd->pkt_metadata_buflen = GET_METADATA_BUFLEN(val);
+ }
+
+ /* Check FW supports pktlat, if supports enable pktts_enab iovar */
+ ret = dhd_set_pktts_enab(dhd, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret));
+ }
+#endif /* DHD_PKTTS */
+
+#ifdef RTT_SUPPORT
+ if (dhd->rtt_state) {
+ ret = dhd_rtt_init(dhd);
+ if (ret < 0) {
+ DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
+ }
+ }
+#endif
+#ifdef FILTER_IE
+ /* Failure to configure filter IE is not a fatal error, ignore it. */
+ if (FW_SUPPORTED(dhd, fie) &&
+ !(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+ dhd_read_from_file(dhd);
+ }
+#endif /* FILTER_IE */
+#ifdef WL11U
+ dhd_interworking_enable(dhd);
+#endif /* WL11U */
+
+#ifdef NDO_CONFIG_SUPPORT
+ dhd->ndo_enable = FALSE;
+ dhd->ndo_host_ip_overflow = FALSE;
+ dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
+#endif /* NDO_CONFIG_SUPPORT */
+
+ /* ND offload version supported */
+ dhd->ndo_version = dhd_ndo_get_version(dhd);
+ if (dhd->ndo_version > 0) {
+ DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
+
+#ifdef NDO_CONFIG_SUPPORT
+ /* enable Unsolicited NA filter */
+ ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
+ if (ret < 0) {
+ DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+ }
+
+ /* check dongle supports wbtext (product policy) or not */
+ dhd->wbtext_support = FALSE;
+ if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
+ WLC_GET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
+ }
+ dhd->wbtext_policy = wnm_bsstrans_resp;
+ if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
+ dhd->wbtext_support = TRUE;
+ }
+#ifndef WBTEXT
+ /* driver can turn off wbtext feature through makefile */
+ if (dhd->wbtext_support) {
+ if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
+ WL_BSSTRANS_POLICY_ROAM_ALWAYS,
+ WLC_SET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to disable WBTEXT\n"));
+ }
+ }
+#endif /* !WBTEXT */
+
+#ifdef DHD_NON_DMA_M2M_CORRUPTION
+ /* check pcie non dma loopback */
+ if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
+ (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
+ goto done;
+ }
+#endif /* DHD_NON_DMA_M2M_CORRUPTION */
+
+ /* WNM capabilities */
+ wnm_cap = 0
+#ifdef WL11U
+ | WL_WNM_BSSTRANS | WL_WNM_NOTIF
+#endif
+#ifdef WBTEXT
+ | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
+#endif
+ ;
+#if defined(WL_MBO) && defined(WL_OCE)
+ if (FW_SUPPORTED(dhd, estm)) {
+ wnm_cap |= WL_WNM_ESTM;
+ }
+#endif /* WL_MBO && WL_OCE */
+ if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set WNM capabilities\n"));
+ }
+
+#ifdef CUSTOM_ASSOC_TIMEOUT
+ /* set recreate_bi_timeout to increase assoc timeout :
+ * 20 * 100TU * 1024 / 1000 = 2 secs
+ * (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000)
+ */
+ if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout",
+ CUSTOM_ASSOC_TIMEOUT,
+ WLC_SET_VAR, TRUE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to set assoc timeout\n"));
+ }
+#endif /* CUSTOM_ASSOC_TIMEOUT */
+
+#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
+ if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
+ NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set BTM delta\n"));
+ }
+#endif /* WBTEXT && WBTEXT_BTMDELTA */
+#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
+ if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win",
+ (char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win),
+ NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set RRM BCN request thrtl_win\n"));
+ }
+ if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time",
+ (char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time),
+ NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n"));
+ }
+#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
+
+#ifdef WL_MONITOR
+#ifdef HOST_RADIOTAP_CONV
+ /* 'Wl monitor' IOVAR is fired to check whether the FW supports radiotap conversion or not.
+ * This is indicated through MSB(1<<31) bit, based on which host radiotap conversion
+ * will be enabled or disabled.
+ * 0 - Host supports Radiotap conversion.
+ * 1 - FW supports Radiotap conversion.
+ */
+ bcm_mkiovar("monitor", (char *)&monitor, sizeof(monitor), iovbuf, sizeof(iovbuf));
+ if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_MONITOR, iovbuf,
+ sizeof(iovbuf), FALSE, 0)) == 0) {
+ memcpy(&monitor, iovbuf, sizeof(monitor));
+ dhdinfo->host_radiotap_conv = (monitor & HOST_RADIOTAP_CONV_BIT) ? TRUE : FALSE;
+ } else {
+ DHD_ERROR(("%s Failed to get monitor mode, err %d\n",
+ __FUNCTION__, ret2));
+ }
+#endif /* HOST_RADIOTAP_CONV */
+ if (FW_SUPPORTED(dhd, monitor)) {
+ dhd->monitor_enable = TRUE;
+ DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
+ } else {
+ dhd->monitor_enable = FALSE;
+ DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
+ }
+#endif /* WL_MONITOR */
+
+ /* store the preserve log set numbers */
+ if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
+ != BCME_OK) {
+ DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
+ }
+
+ if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
+ dhd_ecounter_configure(dhd, TRUE);
+ }
+
+#ifdef CONFIG_SILENT_ROAM
+ dhd->sroam_turn_on = TRUE;
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
+ dhd_set_bandlock(dhd);
+
+ dhd_conf_postinit_ioctls(dhd);
+done:
+
+ if (eventmask_msg) {
+ MFREE(dhd->osh, eventmask_msg, msglen);
+ }
+ if (iov_buf) {
+ MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
+ }
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+ if (el_tag) {
+ MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
+ }
+#endif /* DHD_8021X_DUMP */
+ return ret;
+}
+
+/* Deafult enable preinit optimisation */
+#define DHD_PREINIT_OPTIMISATION
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+ int ret = 0;
+
+#ifdef DHD_PREINIT_OPTIMISATION
+ int preinit_status = 0;
+ ret = dhd_iovar(dhd, 0, "preinit_status", NULL, 0, (char *)&preinit_status,
+ sizeof(preinit_status), FALSE);
+
+ if (ret == BCME_OK) {
+ DHD_ERROR(("%s: preinit_status IOVAR present, use optimised preinit\n",
+ __FUNCTION__));
+ dhd->fw_preinit = TRUE;
+ ret = dhd_optimised_preinit_ioctls(dhd);
+ } else if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s: preinit_status IOVAR not supported, use legacy preinit\n",
+ __FUNCTION__));
+ dhd->fw_preinit = FALSE;
+ ret = dhd_legacy_preinit_ioctls(dhd);
+ } else {
+ DHD_ERROR(("%s: preinit_status IOVAR returned err(%d), ABORT\n",
+ __FUNCTION__, ret));
+ }
+#else
+ dhd->fw_preinit = FALSE;
+ ret = dhd_legacy_preinit_ioctls(dhd);
+#endif /* DHD_PREINIT_OPTIMISATION */
+ return ret;
+}
+
+int
+dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
+ uint cmd_len, char **resptr, uint resp_len)
+{
+ int len = resp_len;
+ int ret;
+ char *buf = *resptr;
+ wl_ioctl_t ioc;
+ if (resp_len > WLC_IOCTL_MAXLEN)
+ return BCME_BADARG;
+
+ memset(buf, 0, resp_len);
+
+ ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+ if (ret == 0) {
+ return BCME_BUFTOOSHORT;
+ }
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = 0;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+
+ return ret;
+}
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+ struct dhd_info *dhd = dhdp->info;
+ struct net_device *dev = NULL;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ ASSERT(dev);
+
+#ifndef DHD_TPUT_PATCH
+ if (netif_running(dev)) {
+ DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
+ return BCME_NOTDOWN;
+ }
+#endif
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+ if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+ DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+ return BCME_BADARG;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
+static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
+{
+ int ret = BCME_OK;
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+ unsigned long flags = 0;
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ cfg = wl_get_cfg(primary_ndev);
+
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* check for hal started and only then send event if not clear dump state here */
+ if (wl_cfg80211_is_hal_started(cfg)) {
+ int timeleft = 0;
+
+ DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
+ dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
+
+ DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
+ &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
+ if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
+ DHD_ERROR(("%s: Timed out(%d) dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, timeleft, dhdp->dhd_bus_busy_state));
+ ret = BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ /* In case of dhd_os_busbusy_wait_bitmask() timeout,
+ * hal dump bit will not be cleared. Hence clearing it here.
+ */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ return ret;
+}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
+/* XXX add operation is more efficent */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
+{
+ u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+ int i;
+ int ret;
+
+ bzero(ipv4_buf, sizeof(ipv4_buf));
+
+ /* display what we've got */
+ ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+ DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+ /* now we saved hoste_ip table, clr it in the dongle AOE */
+ dhd_aoe_hostip_clr(dhd_pub, idx);
+
+ if (ret) {
+ DHD_ERROR(("%s failed\n", __FUNCTION__));
+ return;
+ }
+
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (add && (ipv4_buf[i] == 0)) {
+ ipv4_buf[i] = ipa;
+ add = FALSE; /* added ipa to local table */
+ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+ __FUNCTION__, i));
+ } else if (ipv4_buf[i] == ipa) {
+ ipv4_buf[i] = 0;
+ DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+ __FUNCTION__, ipa, i));
+ }
+
+ if (ipv4_buf[i] != 0) {
+ /* add back host_ip entries from our local cache */
+ dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
+ DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+ __FUNCTION__, ipv4_buf[i], i));
+ }
+ }
+#ifdef AOE_DBG
+ /* see the resulting hostip table */
+ dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+ DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+/* XXX this function is only for IP address */
+/*
+ * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
+ * whenever there is an event related to an IP address.
+ * ptr : kernel provided pointer to IP address that has changed
+ */
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub;
+ int idx;
+
+ if (!ifa || !(ifa->ifa_dev->dev))
+ return NOTIFY_DONE;
+
+ /* Filter notifications meant for non Broadcom devices */
+ if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
+ (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
+#if defined(WL_ENABLE_P2P_IF)
+ if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
+#endif /* WL_ENABLE_P2P_IF */
+ return NOTIFY_DONE;
+ }
+
+ dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
+ if (!dhd)
+ return NOTIFY_DONE;
+
+ dhd_pub = &dhd->pub;
+
+ if (!dhd_pub->arpoe_enable) {
+ DHD_ERROR(("arpoe_enable not set"));
+ return NOTIFY_DONE;
+ }
+
+ if (dhd_pub->arp_version == 1) {
+ idx = 0;
+ } else {
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
+ break;
+ }
+ if (idx < DHD_MAX_IFS)
+ DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
+ dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
+ else {
+ DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
+ idx = 0;
+ }
+ }
+
+ switch (event) {
+ case NETDEV_UP:
+ DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+ /*
+ * Skip if Bus is not in a state to transport the IOVAR
+ * (or) the Dongle is not ready.
+ */
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
+ dhd->pub.busstate == DHD_BUS_LOAD) {
+ DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
+ __FUNCTION__, dhd->pub.busstate));
+ if (dhd->pend_ipaddr) {
+ DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+ __FUNCTION__, dhd->pend_ipaddr));
+ }
+ dhd->pend_ipaddr = ifa->ifa_address;
+ break;
+ }
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+ /* XXX HOSTAPD will be rerturned at first */
+ DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+ __FUNCTION__));
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE);
+ break;
+
+ case NETDEV_DOWN:
+ DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+ dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+ /* XXX HOSTAPD will be rerturned at first */
+ DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+ __FUNCTION__));
+ if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
+ (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+ } else
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ {
+ /* XXX clear ALL arp and hostip tables */
+ dhd_aoe_hostip_clr(&dhd->pub, idx);
+ dhd_aoe_arp_clr(&dhd->pub, idx);
+ }
+ dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE);
+ break;
+
+ default:
+ DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+ __func__, ifa->ifa_label, event));
+ break;
+ }
+ return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
+{
+ struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+ dhd_info_t *dhd = (dhd_info_t *)dhd_info;
+ dhd_pub_t *dhdp;
+ int ret;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
+ goto done;
+ }
+ dhdp = &dhd->pub;
+
+ if (event != DHD_WQ_WORK_IPV6_NDO) {
+ DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
+ goto done;
+ }
+
+ if (!ndo_work) {
+ DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
+ return;
+ }
+
+ switch (ndo_work->event) {
+ case NETDEV_UP:
+#ifndef NDO_CONFIG_SUPPORT
+ DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* !NDO_CONFIG_SUPPORT */
+ DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
+ if (dhdp->ndo_version > 0) {
+ /* inet6 addr notifier called only for unicast address */
+ ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
+ WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
+ } else {
+ ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
+ ndo_work->if_idx);
+ }
+ if (ret < 0) {
+ DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ }
+ break;
+ case NETDEV_DOWN:
+ if (dhdp->ndo_version > 0) {
+ DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip_by_addr(dhdp,
+ &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+ } else {
+ DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
+ }
+ if (ret < 0) {
+ DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+#ifdef NDO_CONFIG_SUPPORT
+ if (dhdp->ndo_host_ip_overflow) {
+ ret = dhd_dev_ndo_update_inet6addr(
+ dhd_idx2net(dhdp, ndo_work->if_idx));
+ if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+ DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+ }
+#else /* !NDO_CONFIG_SUPPORT */
+ DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+ break;
+
+ default:
+ DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
+ break;
+ }
+done:
+
+ /* free ndo_work. alloced while scheduling the work */
+ if (ndo_work) {
+ kfree(ndo_work);
+ }
+
+ return;
+} /* dhd_init_logstrs_array */
+
+/*
+ * Neighbor Discovery Offload: Called when an interface
+ * is assigned with ipv6 address.
+ * Handles only primary interface
+ */
+int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ struct inet6_ifaddr *inet6_ifa = ptr;
+ struct ipv6_work_info_t *ndo_info;
+ int idx;
+
+ /* Filter notifications meant for non Broadcom devices */
+ if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
+ return NOTIFY_DONE;
+ }
+
+ dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
+ if (!dhd) {
+ return NOTIFY_DONE;
+ }
+ dhdp = &dhd->pub;
+
+ /* Supports only primary interface */
+ idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
+ if (idx != 0) {
+ return NOTIFY_DONE;
+ }
+
+ /* FW capability */
+ if (!FW_SUPPORTED(dhdp, ndoe)) {
+ return NOTIFY_DONE;
+ }
+
+ ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+ if (!ndo_info) {
+ DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+ return NOTIFY_DONE;
+ }
+
+ /* fill up ndo_info */
+ ndo_info->event = event;
+ ndo_info->if_idx = idx;
+ memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
+
+ /* defer the work to thread as it may block kernel */
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+ dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ return NOTIFY_DONE;
+}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+
+/* Network attach to be invoked from the bus probe handlers */
+int
+dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
+{
+ struct net_device *primary_ndev;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ char hw_ether[62];
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#if defined(GET_CUSTOM_MAC_ENABLE) || defined(GET_OTP_MAC_ENABLE)
+ int ret = BCME_ERROR;
+#endif /* GET_CUSTOM_MAC_ENABLE || GET_OTP_MAC_ENABLE */
+
+ BCM_REFERENCE(primary_ndev);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ ret = wifi_platform_get_mac_addr(dhdp->adapter, hw_ether, 0);
+ if (!ret)
+ bcopy(hw_ether, dhdp->mac.octet, ETHER_ADDR_LEN);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef GET_OTP_MAC_ENABLE
+ if (ret && memcmp(&ether_null, &dhdp->conf->otp_mac, ETHER_ADDR_LEN))
+ bcopy(&dhdp->conf->otp_mac, &dhdp->mac, ETHER_ADDR_LEN);
+#endif /* GET_OTP_MAC_ENABLE */
+
+ /* Register primary net device */
+ if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
+ return BCME_ERROR;
+ }
+
+#if defined(WL_CFG80211)
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (wl_cfg80211_net_attach(primary_ndev) < 0) {
+ /* fail the init */
+ dhd_remove_if(dhdp, 0, TRUE);
+ return BCME_ERROR;
+ }
+#endif /* WL_CFG80211 */
+ return BCME_OK;
+}
+
+int
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ dhd_if_t *ifp;
+ struct net_device *net = NULL;
+ int err = 0;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ ifp = dhd->iflist[ifidx];
+ net = ifp->net;
+ ASSERT(net && (ifp->idx == ifidx));
+
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &dhd_ops_virt;
+
+ /* Ok, link into the network layer... */
+ if (ifidx == 0) {
+ /*
+ * device functions for the primary interface only
+ */
+ net->netdev_ops = &dhd_ops_pri;
+ if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
+ memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ } else {
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
+#if defined(OEM_ANDROID)
+ /*
+ * Android sets the locally administered bit to indicate that this is a
+ * portable hotspot. This will not work in simultaneous AP/STA mode,
+ * nor with P2P. Need to set the Donlge's MAC address, and then use that.
+ */
+ if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+ ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+ __func__, net->name));
+ temp_addr[0] |= 0x02;
+ }
+#endif /* defined(OEM_ANDROID) */
+ }
+
+ net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#ifdef HOST_SFH_LLC
+ net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN;
+#endif
+
+#ifdef DHD_AWDL
+ if (dhdp->awdl_ifidx &&
+ ifidx == dhdp->awdl_ifidx) {
+ /* A total of 30 bytes are required for the
+ * ethernet + AWDL LLC header. Out of this 14
+ * bytes in the form of ethernet header is already
+ * present in the skb handed over by the stack.
+ * So we need to reserve an additonal 16 bytes as
+ * headroom. Out of these 16 bytes, if the host
+ * sfh llc feature is being used, then additonal
+ * 8 bytes are already being reserved
+ * during dhd_register_if (below), hence reserving
+ * only an additional 8 bytes is enough. If the host
+ * sfh llc feature is not used, then all of the 16
+ * bytes need to be reserved from here
+ */
+ net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN;
+#ifndef HOST_SFH_LLC
+ net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN;
+#endif /* HOST_SFH_LLC */
+ }
+#endif /* DHD_AWDL */
+
+ net->ethtool_ops = &dhd_ethtool_ops;
+
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+ net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+ net->wireless_handlers = &wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+ /* XXX Set up an MTU change notifier as per linux/notifier.h? */
+ dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+#ifdef WLMESH
+ if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
+ temp_addr[4] ^= 0x80;
+ temp_addr[4] += ifidx;
+ temp_addr[5] += ifidx;
+ }
+#endif
+ /*
+ * XXX Linux 2.6.25 does not like a blank MAC address, so use a
+ * dummy address until the interface is brought up.
+ */
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ if (ifidx == 0)
+ printf("%s\n", dhd_version);
+ else {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_net_device(net, ifidx);
+#endif /* WL_EXT_IAPSTA */
+ if (dhd->pub.up == 1) {
+ if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr, FALSE) == 0)
+ DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
+ else
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+ }
+ }
+
+ if (need_rtnl_lock)
+ err = register_netdev(net);
+ else {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) && defined(WL_CFG80211)
+ err = cfg80211_register_netdevice(net);
+#else
+ err = register_netdevice(net);
+#endif
+ }
+
+ if (err != 0) {
+ DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
+ goto fail;
+ }
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ if ((ctf_dev_register(dhd->cih, net, FALSE) != BCME_OK) ||
+ (ctf_enable(dhd->cih, net, TRUE, &dhd->brc_hot) != BCME_OK)) {
+ DHD_ERROR(("%s:%d: ctf_dev_register/ctf_enable failed for interface %d\n",
+ __FUNCTION__, __LINE__, ifidx));
+ goto fail;
+ }
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#ifdef WL_EVENT
+ wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
+#endif /* WL_EVENT */
+#ifdef WL_ESCAN
+ wl_escan_event_attach(net, ifidx);
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
+ wl_ext_iapsta_attach_name(net, ifidx);
+#endif /* WL_EXT_IAPSTA */
+
+#if defined(CONFIG_TIZEN)
+ net_stat_tizen_register(net);
+#endif /* CONFIG_TIZEN */
+
+ printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
+#if defined(CUSTOMER_HW4_DEBUG)
+ MAC2STRDBG(dhd->pub.mac.octet));
+#else
+ MAC2STRDBG(net->dev_addr));
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#if defined(OEM_ANDROID) && (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS))
+ if (ifidx == 0) {
+#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+ up(&dhd_registration_sem);
+#endif /* BCMLXSDMMC */
+ if (!dhd_download_fw_on_driverload) {
+#ifdef WL_CFG80211
+ wl_terminate_event_handler(net);
+#endif /* WL_CFG80211 */
+#if defined(DHD_LB_RXP)
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+ skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+
+#ifdef SHOW_LOGTRACE
+ /* Release the skbs from queue for WLC_E_TRACE event */
+ dhd_event_logtrace_flush_queue(dhdp);
+#endif /* SHOW_LOGTRACE */
+
+#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
+
+#if defined(WLAN_ACCEL_BOOT)
+ dhd->fs_check_retry = DHD_FS_CHECK_RETRIES;
+ dhd->wl_accel_boot_on_done = FALSE;
+ INIT_DELAYED_WORK(&dhd->wl_accel_work, dhd_wifi_accel_on_work_cb);
+#if !defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH)
+ /* If the WLAN_ACCEL_SKIP_WQ_IN_ATTACH feature is enabled,
+ * the dhd_wifi_accel_on_work_cb() is called in dhd_open()
+ * to skip dongle firmware downloading during insmod and dhd_attach.
+ */
+ schedule_delayed_work(&dhd->wl_accel_work,
+ msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS));
+#endif /* !defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH) */
+#else
+ /* Turn off Wifi after boot up */
+#if defined (BT_OVER_SDIO)
+ dhd_bus_put(&dhd->pub, WLAN_MODULE);
+ wl_android_set_wifi_on_flag(FALSE);
+#else
+ wl_android_wifi_off(net, TRUE);
+#endif /* BT_OVER_SDIO */
+#endif /* WLAN_ACCEL_BOOT */
+
+ }
+ }
+#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC) */
+#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
+ gdb_proxy_fs_try_create(ifp->info, net->name);
+#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
+ return 0;
+
+fail:
+ net->netdev_ops = NULL;
+ return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
+
+ /*
+ * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+ * calling stop again will cuase SD read/write errors.
+ */
+ if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+ /* Stop the bus module */
+#ifdef BCMDBUS
+ /* Force Dongle terminated */
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
+ DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
+ __FUNCTION__));
+ dbus_stop(dhd->pub.bus);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ dhd->pub.busstate = DHD_BUS_DOWN;
+#else
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+#endif /* BCMDBUS */
+ }
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
+ dhd_bus_oob_intr_unregister(dhdp);
+#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
+ }
+ }
+}
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ unsigned long flags;
+ int timer_valid = FALSE;
+ struct net_device *dev = NULL;
+ dhd_if_t *ifp;
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = NULL;
+#endif
+ if (!dhdp)
+ return;
+
+ dhd = (dhd_info_t *)dhdp->info;
+ if (!dhd)
+ return;
+
+#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
+ gdb_proxy_fs_remove(dhd);
+#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
+
+ /* primary interface 0 */
+ ifp = dhd->iflist[0];
+ if (ifp && ifp->net) {
+ dev = ifp->net;
+ }
+
+ if (dev) {
+ rtnl_lock();
+#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_static_if_dev_close(dev);
+ }
+#endif /* WL_CFG80211 && WL_STATIC_IF */
+ if (dev->flags & IFF_UP) {
+ /* If IFF_UP is still up, it indicates that
+ * "ifconfig wlan0 down" hasn't been called.
+ * So invoke dev_close explicitly here to
+ * bring down the interface.
+ */
+ DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
+ dev_close(dev);
+ }
+ rtnl_unlock();
+ }
+
+ DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+ /* XXX kernel panic issue when first bootup time,
+ * rmmod without interface down make unnecessary hang event.
+ */
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
+ dhd->pub.up = 0;
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+ /* Give sufficient time for threads to start running in case
+ * dhd_attach() has failed
+ */
+ OSL_SLEEP(100);
+ }
+#ifdef DHD_WET
+ dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
+#endif /* DHD_WET */
+#ifdef WL_NANHO
+ /* deinit NANHO host module */
+ bcm_nanho_deinit(dhd->pub.nanhoi);
+#endif /* WL_NANHO */
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef PROP_TXSTATUS
+#ifdef DHD_WLFC_THREAD
+ if (dhd->pub.wlfc_thread) {
+ kthread_stop(dhd->pub.wlfc_thread);
+ dhdp->wlfc_thread_go = TRUE;
+ wake_up_interruptible(&dhdp->wlfc_wqhead);
+ }
+ dhd->pub.wlfc_thread = NULL;
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+
+#ifdef DHD_TIMESYNC
+ if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
+ dhd_timesync_detach(dhdp);
+ }
+#endif /* DHD_TIMESYNC */
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+
+#if defined(OEM_ANDROID) || !defined(BCMSDIO)
+ dhd_bus_detach(dhdp);
+#endif /* OEM_ANDROID || !BCMSDIO */
+#ifdef OEM_ANDROID
+#ifdef BCMPCIE
+ if (is_reboot == SYS_RESTART) {
+ extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
+ if (dhd_wifi_platdata && !dhdp->dongle_reset) {
+ dhdpcie_bus_stop_host_dev(dhdp->bus);
+ wifi_platform_set_power(dhd_wifi_platdata->adapters,
+ FALSE, WIFI_TURNOFF_DELAY);
+ }
+ }
+#endif /* BCMPCIE */
+#endif /* OEM_ANDROID */
+#ifndef PCIE_FULL_DONGLE
+#if defined(OEM_ANDROID) || !defined(BCMSDIO)
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+#endif /* OEM_ANDROID || !BCMSDIO */
+#endif /* !PCIE_FULL_DONGLE */
+ }
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = FALSE;
+ unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = FALSE;
+ unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+ if (dhd->early_suspend.suspend)
+ unregister_early_suspend(&dhd->early_suspend);
+ }
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#if defined(WL_WIRELESS_EXT)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+ /* Detatch and unlink in the iw */
+ wl_iw_detach(dev);
+ }
+#endif /* defined(WL_WIRELESS_EXT) */
+#ifdef WL_EXT_GENL
+ wl_ext_genl_deinit(dev);
+#endif
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_dettach(dev);
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_detach(dev);
+#endif /* WL_ESCAN */
+#ifdef WL_EVENT
+ wl_ext_event_dettach(dhdp);
+#endif /* WL_EVENT */
+
+ /* delete all interfaces, start with virtual */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+ int i = 1;
+
+ /* Cleanup virtual interfaces */
+ dhd_net_if_lock_local(dhd);
+ for (i = 1; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ dhd_remove_if(&dhd->pub, i, TRUE);
+ }
+ }
+ dhd_net_if_unlock_local(dhd);
+
+ /* 'ifp' indicates primary interface 0, clean it up. */
+ if (ifp && ifp->net) {
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ if (dhd->cih)
+ ctf_dev_unregister(dhd->cih, ifp->net);
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#ifdef WL_CFG80211
+ cfg = wl_get_cfg(ifp->net);
+#endif
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+#ifdef SET_RPS_CPUS
+ custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
+ netif_tx_disable(ifp->net);
+ unregister_netdev(ifp->net);
+ }
+#ifdef PCIE_FULL_DONGLE
+ ifp->net = DHD_NET_DEV_NULL;
+#else
+ ifp->net = NULL;
+#endif /* PCIE_FULL_DONGLE */
+#if defined(BCMSDIO) && !defined(OEM_ANDROID)
+ dhd_bus_detach(dhdp);
+
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+#endif /* BCMSDIO && !OEM_ANDROID */
+
+#ifdef DHD_WMF
+ dhd_wmf_cleanup(dhdp, 0);
+#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+ bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdp->tickcnt);
+ deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
+
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ MFREE(dhdp->osh, ifp->qosmap_up_table, UP_TABLE_MAX);
+ ifp->qosmap_up_table_enable = FALSE;
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+
+ dhd_if_del_sta_list(ifp);
+
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ ifp = NULL;
+#ifdef WL_CFG80211
+ if (cfg && cfg->wdev)
+ cfg->wdev->netdev = NULL;
+#endif
+ }
+ }
+
+ /* Clear the watchdog timer */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ timer_valid = dhd->wd_timer_valid;
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ if (timer_valid)
+ del_timer_sync(&dhd->timer);
+ DHD_STOP_RPM_TIMER(&dhd->pub);
+
+#ifdef BCMDBUS
+ tasklet_kill(&dhd->tasklet);
+#else
+ if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_rpm_ctl);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_wdt_ctl);
+ }
+
+ if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_rxf_ctl);
+ }
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_dpc_ctl);
+ } else
+ {
+ tasklet_kill(&dhd->tasklet);
+ }
+ }
+#endif /* BCMDBUS */
+
+#ifdef WL_NATOE
+ if (dhd->pub.nfct) {
+ dhd_ct_close(dhd->pub.nfct);
+ }
+#endif /* WL_NATOE */
+
+ cancel_delayed_work_sync(&dhd->dhd_dpc_dispatcher_work);
+#ifdef DHD_LB
+ if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
+ /* Clear the flag first to avoid calling the cpu notifier */
+ dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
+
+ /* Kill the Load Balancing Tasklets */
+#ifdef DHD_LB_RXP
+ cancel_work_sync(&dhd->rx_napi_dispatcher_work);
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+ cancel_work_sync(&dhd->tx_dispatcher_work);
+ tasklet_kill(&dhd->tx_tasklet);
+ __skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+
+ /* Unregister from CPU Hotplug framework */
+ dhd_unregister_cpuhp_callback(dhd);
+
+ dhd_cpumasks_deinit(dhd);
+ DHD_LB_STATS_DEINIT(&dhd->pub);
+ }
+#endif /* DHD_LB */
+
+#ifdef CSI_SUPPORT
+ dhd_csi_deinit(dhdp);
+#endif /* CSI_SUPPORT */
+
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ cancel_work_sync(&dhd->axi_error_dispatcher_work);
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
+ DHD_SSSR_REG_INFO_DEINIT(&dhd->pub);
+ DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
+
+#ifdef DHD_SDTC_ETB_DUMP
+ dhd_sdtc_etb_mempool_deinit(&dhd->pub);
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef EWP_EDL
+ if (host_edl_support) {
+ DHD_EDL_MEM_DEINIT(dhdp);
+ host_edl_support = FALSE;
+ }
+#endif /* EWP_EDL */
+
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ if (!cfg) {
+ DHD_ERROR(("cfg NULL!\n"));
+ ASSERT(0);
+ } else {
+ wl_cfg80211_detach(cfg);
+ dhd_monitor_uninit();
+ }
+ }
+#endif
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ destroy_workqueue(dhd->tx_wq);
+ dhd->tx_wq = NULL;
+ destroy_workqueue(dhd->rx_wq);
+ dhd->rx_wq = NULL;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DEBUGABILITY
+ if (dhdp->dbg) {
+#ifdef DBG_PKT_MON
+ dhd_os_dbg_detach_pkt_monitor(dhdp);
+ osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
+#endif /* DBG_PKT_MON */
+ }
+#endif /* DEBUGABILITY */
+ if (dhdp->dbg) {
+ dhd_os_dbg_detach(dhdp);
+ }
+#ifdef DHD_MEM_STATS
+ osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.mem_stats_lock);
+#endif /* DHD_MEM_STATS */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.awdl_stats_lock);
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+#ifdef DHD_PKT_LOGGING
+ dhd_os_detach_pktlog(dhdp);
+#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_STATUS_LOGGING
+ dhd_detach_statlog(dhdp);
+#endif /* DHD_STATUS_LOGGING */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_deinit(dhdp);
+#endif /* DHD_PKTDUMP_ROAM */
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ if (dhd->pub.hang_info) {
+ MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ }
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+#ifdef SHOW_LOGTRACE
+ /* Release the skbs from queue for WLC_E_TRACE event */
+ dhd_event_logtrace_flush_queue(dhdp);
+
+ /* Wait till event logtrace context finishes */
+ dhd_cancel_logtrace_process_sync(dhd);
+
+ /* Remove ring proc entries */
+ dhd_dbg_ring_proc_destroy(&dhd->pub);
+
+ if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
+ if (dhd->event_data.fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.fmts,
+ dhd->event_data.fmts_size);
+ }
+ if (dhd->event_data.raw_fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
+ dhd->event_data.raw_fmts_size);
+ }
+ if (dhd->event_data.raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
+ dhd->event_data.raw_sstr_size);
+ }
+ if (dhd->event_data.rom_raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
+ dhd->event_data.rom_raw_sstr_size);
+ }
+ dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
+ }
+#endif /* SHOW_LOGTRACE */
+#ifdef BTLOG
+ skb_queue_purge(&dhd->bt_log_queue);
+#endif /* BTLOG */
+#ifdef PNO_SUPPORT
+ if (dhdp->pno_state)
+ dhd_pno_deinit(dhdp);
+#endif
+#ifdef RTT_SUPPORT
+ if (dhdp->rtt_state) {
+ dhd_rtt_detach(dhdp);
+ }
+#endif
+#if defined(CONFIG_PM_SLEEP)
+ if (dhd_pm_notifier_registered) {
+ unregister_pm_notifier(&dhd->pm_notifier);
+ dhd_pm_notifier_registered = FALSE;
+ }
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef DEBUG_CPU_FREQ
+ if (dhd->new_freq)
+ free_percpu(dhd->new_freq);
+ dhd->new_freq = NULL;
+ cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+ DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd->wakelock_wd_counter = 0;
+ dhd_wake_lock_unlock_destroy(&dhd->wl_wdwake);
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ dhd_wake_lock_unlock_destroy(&dhd->wl_wifi);
+#endif /* CONFIG_HAS_WAKELOCK */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+ DHD_OS_WAKE_LOCK_DESTROY(dhd);
+ }
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* This will free all MEM allocated for TCPACK SUPPRESS */
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_deinit(dhdp);
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+#endif
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+ dhd_free_tdls_peer_list(dhdp);
+#endif
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ /* Release CTF pool ONLY after the prot layer is dettached and
+ * pkts, possibly from fast ctfpool are freed into ctfpool/kernel
+ */
+#ifdef CTFPOOL
+ /* free the buffers in fast pool */
+ osl_ctfpool_cleanup(dhd->pub.osh);
+#endif /* CTFPOOL */
+
+ /* free ctf resources */
+ if (dhd->cih)
+ ctf_detach(dhd->cih);
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+#ifdef BCMDBG
+ dhd_macdbg_detach(dhdp);
+#endif /* BCMDBG */
+
+#ifdef DUMP_IOCTL_IOV_LIST
+ dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef DHD_DEBUG
+ /* memory waste feature list initilization */
+ dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
+#endif /* DHD_DEBUG */
+#ifdef WL_MONITOR
+ dhd_del_monitor_if(dhd);
+#endif /* WL_MONITOR */
+
+#ifdef DHD_ERPOM
+ if (dhdp->enable_erpom) {
+ dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
+ }
+#endif /* DHD_ERPOM */
+
+ cancel_work_sync(&dhd->dhd_hang_process_work);
+
+ /* Prefer adding de-init code above this comment unless necessary.
+ * The idea is to cancel work queue, sysfs and flags at the end.
+ */
+ dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+ dhd->dhd_deferred_wq = NULL;
+
+ /* log dump related buffers should be freed after wq is purged */
+#ifdef DHD_LOG_DUMP
+ dhd_log_dump_deinit(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
+#if defined(BCMPCIE)
+ if (dhdp->extended_trap_data)
+ {
+ MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ dhdp->extended_trap_data = NULL;
+ }
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhdp->axi_err_dump)
+ {
+ MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
+ dhdp->axi_err_dump = NULL;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* BCMPCIE */
+
+#ifdef BTLOG
+ /* Wait till bt_log_dispatcher_work finishes */
+ cancel_work_sync(&dhd->bt_log_dispatcher_work);
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+ cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
+#endif
+
+ (void)dhd_deinit_sock_flows_buf(dhd);
+
+#ifdef DHD_DUMP_MNGR
+ if (dhd->pub.dump_file_manage) {
+ MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
+ sizeof(dhd_dump_file_manage_t));
+ }
+#endif /* DHD_DUMP_MNGR */
+
+ dhd_sysfs_exit(dhd);
+ dhd->pub.fw_download_status = FW_UNLOADED;
+
+#if defined(BT_OVER_SDIO)
+ mutex_destroy(&dhd->bus_user_lock);
+#endif /* BT_OVER_SDIO */
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ (void) dhd_fwtrace_detach(dhdp);
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef DHD_TX_PROFILE
+ (void)dhd_tx_profile_detach(dhdp);
+#endif /* defined(DHD_TX_PROFILE) */
+ dhd_conf_detach(dhdp);
+
+} /* dhd_detach */
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ int i;
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ ptr = dhdp->reorder_bufs[i];
+
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
+
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ }
+ }
+
+ dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
+
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+ if (is_vmalloc_addr(dhdp->soc_ram)) {
+ VMFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+ }
+ else {
+ MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhdp->soc_ram = NULL;
+ }
+#ifdef CACHE_FW_IMAGES
+ if (dhdp->cached_fw) {
+ MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
+ }
+
+ if (dhdp->cached_nvram) {
+ MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
+ }
+#endif
+ if (dhd != NULL) {
+#ifdef REPORT_FATAL_TIMEOUTS
+ deinit_dhd_timeouts(&dhd->pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
+ if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
+ DHD_PREALLOC_DHD_INFO, 0, FALSE))
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ dhd = NULL;
+ }
+ }
+}
+
+void
+dhd_clear(dhd_pub_t *dhdp)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ int i;
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean up timer/data structure for any remaining/pending packet or timer. */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ ptr = dhdp->reorder_bufs[i];
+
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
+
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ }
+ }
+
+ dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
+
+ if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+ if (is_vmalloc_addr(dhdp->soc_ram)) {
+ VMFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+ }
+ else {
+ MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhdp->soc_ram = NULL;
+ }
+ }
+}
+
+static void
+dhd_module_cleanup(void)
+{
+ printf("%s: Enter\n", __FUNCTION__);
+
+ dhd_bus_unregister();
+
+#if defined(OEM_ANDROID)
+ wl_android_exit();
+#endif /* OEM_ANDROID */
+
+ dhd_wifi_platform_unregister_drv();
+
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ wifi_teardown_dt();
+#endif
+#endif
+ printf("%s: Exit\n", __FUNCTION__);
+}
+
+static void __exit
+dhd_module_exit(void)
+{
+ atomic_set(&exit_in_progress, 1);
+#ifdef DHD_BUZZZ_LOG_ENABLED
+ dhd_buzzz_detach();
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+ dhd_module_cleanup();
+ unregister_reboot_notifier(&dhd_reboot_notifier);
+ dhd_destroy_to_notifier_skt();
+#ifdef DHD_PKTTS
+ dhd_destroy_to_notifier_ts();
+#endif /* DHD_PKTTS */
+}
+
+static int
+_dhd_module_init(void)
+{
+ int err;
+ int retry = POWERUP_MAX_RETRY;
+
+ printk(KERN_ERR PERCENT_S DHD_LOG_PREFIXS "%s: in %s\n",
+ PRINTF_SYSTEM_TIME, __FUNCTION__, dhd_version);
+ if (ANDROID_VERSION > 0)
+ printf("ANDROID_VERSION = %d\n", ANDROID_VERSION);
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ if (wifi_setup_dt()) {
+ printf("wifi_dt : fail to setup dt\n");
+ }
+#endif
+#endif
+
+#ifdef DHD_BUZZZ_LOG_ENABLED
+ dhd_buzzz_attach();
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+
+#if defined(BCM_ROUTER_DHD)
+ { /* XXX Should we maintain nvram budget/thresholds per 5G|2G radio? */
+ char * var;
+ if ((var = getvar(NULL, "dhd_queue_budget")) != NULL) {
+ dhd_queue_budget = bcm_strtoul(var, NULL, 0);
+ }
+ DHD_ERROR(("dhd_queue_budget = %d\n", dhd_queue_budget));
+
+ if ((var = getvar(NULL, "dhd_sta_threshold")) != NULL) {
+ dhd_sta_threshold = bcm_strtoul(var, NULL, 0);
+ }
+ DHD_ERROR(("dhd_sta_threshold = %d\n", dhd_sta_threshold));
+
+ if ((var = getvar(NULL, "dhd_if_threshold")) != NULL) {
+ dhd_if_threshold = bcm_strtoul(var, NULL, 0);
+ }
+ DHD_ERROR(("dhd_if_threshold = %d\n", dhd_if_threshold));
+ }
+#endif /* BCM_ROUTER_DHD */
+
+ if (firmware_path[0] != '\0') {
+ strlcpy(fw_bak_path, firmware_path, sizeof(fw_bak_path));
+ }
+
+ if (nvram_path[0] != '\0') {
+ strlcpy(nv_bak_path, nvram_path, sizeof(nv_bak_path));
+ }
+
+ do {
+ err = dhd_wifi_platform_register_drv();
+ if (!err) {
+ register_reboot_notifier(&dhd_reboot_notifier);
+ break;
+ } else {
+ DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
+ __FUNCTION__, retry));
+ strlcpy(firmware_path, fw_bak_path, sizeof(firmware_path));
+ strlcpy(nvram_path, nv_bak_path, sizeof(nvram_path));
+ }
+ } while (retry--);
+
+ dhd_create_to_notifier_skt();
+
+#ifdef DHD_PKTTS
+ dhd_create_to_notifier_ts();
+#endif /* DHD_PKTTS */
+
+ if (err) {
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ wifi_teardown_dt();
+#endif
+#endif
+ DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
+ } else {
+ if (!dhd_download_fw_on_driverload) {
+ dhd_driver_init_done = TRUE;
+ }
+ }
+
+ printf("%s: Exit err=%d\n", __FUNCTION__, err);
+ return err;
+}
+
+static int __init
+dhd_module_init(void)
+{
+ int err;
+
+ err = _dhd_module_init();
+#ifdef DHD_SUPPORT_HDM
+ if (err && !dhd_download_fw_on_driverload) {
+ dhd_hdm_wlan_sysfs_init();
+ }
+#endif /* DHD_SUPPORT_HDM */
+ return err;
+
+}
+
+#ifdef DHD_SUPPORT_HDM
+bool hdm_trigger_init = FALSE;
+struct delayed_work hdm_sysfs_wq;
+
+int
+dhd_module_init_hdm(void)
+{
+ int err = 0;
+
+ hdm_trigger_init = TRUE;
+
+ if (dhd_driver_init_done) {
+ DHD_INFO(("%s : Module is already inited\n", __FUNCTION__));
+ return err;
+ }
+
+ err = _dhd_module_init();
+
+ /* remove sysfs file after module load properly */
+ if (!err && !dhd_download_fw_on_driverload) {
+ INIT_DELAYED_WORK(&hdm_sysfs_wq, dhd_hdm_wlan_sysfs_deinit);
+ schedule_delayed_work(&hdm_sysfs_wq, msecs_to_jiffies(SYSFS_DEINIT_MS));
+ }
+
+ hdm_trigger_init = FALSE;
+ return err;
+}
+#endif /* DHD_SUPPORT_HDM */
+
+static int
+dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
+{
+ DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
+ if (code == SYS_RESTART) {
+#ifdef OEM_ANDROID
+#ifdef BCMPCIE
+ is_reboot = code;
+#endif /* BCMPCIE */
+#else
+ dhd_module_cleanup();
+#endif /* OEM_ANDROID */
+ }
+ return NOTIFY_DONE;
+}
+
+#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
+/* XXX To decrease the device boot time, deferred_module_init() macro can be
+ * used. The detailed principle and implemenation of deferred_module_init()
+ * is found at http://elinux.org/Deferred_Initcalls
+ * To enable this feature for module build, it needs to add another
+ * deferred_module_init() definition to include/linux/init.h in Linux Kernel.
+ * #define deferred_module_init(fn) module_init(fn)
+ */
+#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS)
+deferred_module_init_sync(dhd_module_init);
+#else
+deferred_module_init(dhd_module_init);
+#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
+#else
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+
+module_exit(dhd_module_exit);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ down(&dhd->proto_sem);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ up(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+dhd_os_dhdiovar_lock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_lock(&dhd->dhd_iovar_mutex);
+ }
+}
+
+void
+dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_unlock(&dhd->dhd_iovar_mutex);
+ }
+}
+
+void
+dhd_os_logdump_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = NULL;
+
+ if (!pub)
+ return;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_lock(&dhd->logdump_lock);
+ }
+}
+
+void
+dhd_os_logdump_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = NULL;
+
+ if (!pub)
+ return;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_unlock(&dhd->logdump_lock);
+ }
+}
+
+unsigned long
+dhd_os_dbgring_lock(void *lock)
+{
+ if (!lock)
+ return 0;
+
+ mutex_lock((struct mutex *)lock);
+
+ return 0;
+}
+
+void
+dhd_os_dbgring_unlock(void *lock, unsigned long flags)
+{
+ BCM_REFERENCE(flags);
+
+ if (!lock)
+ return;
+
+ mutex_unlock((struct mutex *)lock);
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+ return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+ dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+
+#ifdef BCMQT_HW
+ DHD_ERROR(("%s, Timeout wait until %d mins (%d ms) in QT mode\n",
+ __FUNCTION__, (dhd_ioctl_timeout_msec / (60 * 1000)), dhd_ioctl_timeout_msec));
+#endif /* BCMQT_HW */
+
+ timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+
+ return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ wake_up(&dhd->ioctl_resp_wait);
+ return 0;
+}
+
+int
+dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
+#ifdef BCMSLTGT
+ timeout *= htclkratio;
+#endif /* BCMSLTGT */
+
+ timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
+
+ return timeout;
+}
+
+#ifdef PCIE_INB_DW
+int
+dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(ds_exit_timeout_msec);
+#ifdef BCMSLTGT
+ timeout *= htclkratio;
+#endif /* BCMSLTGT */
+
+ timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
+
+ return timeout;
+}
+
+int
+dhd_os_ds_exit_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ wake_up_all(&dhd->ds_exit_wait);
+ return 0;
+}
+
+#endif /* PCIE_INB_DW */
+
+int
+dhd_os_d3ack_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ wake_up(&dhd->d3ack_wait);
+ return 0;
+}
+
+int
+dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Wait for bus usage contexts to gracefully exit within some timeout value
+ * Set time out to little higher than dhd_ioctl_timeout_msec,
+ * so that IOCTL timeout should not get affected.
+ */
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+
+ timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
+
+ return timeout;
+}
+
+/*
+ * Wait until the condition *var == condition is met.
+ * Returns 0 if the @condition evaluated to false after the timeout elapsed
+ * Returns 1 if the @condition evaluated to true
+ */
+int
+dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+
+ timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
+
+ return timeout;
+}
+
+/*
+ * Wait until the '(*var & bitmask) == condition' is met.
+ * Returns 0 if the @condition evaluated to false after the timeout elapsed
+ * Returns 1 if the @condition evaluated to true
+ */
+int
+dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
+ uint bitmask, uint condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+
+ timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
+ ((*var & bitmask) == condition), timeout);
+
+ return timeout;
+}
+
+int
+dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
+{
+ int ret = 0;
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
+
+ ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
+
+ return ret;
+
+}
+
+int
+dhd_os_dmaxfer_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ wake_up(&dhd->dmaxfer_wait);
+ return 0;
+}
+
+void
+dhd_os_tx_completion_wake(dhd_pub_t *dhd)
+{
+ /* Call wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ wake_up(&dhd->tx_completion_wait);
+}
+
+/* Fix compilation error for FC11 */
+INLINE int
+dhd_os_busbusy_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ /* Call wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ wake_up(&dhd->dhd_bus_busy_state_wait);
+ return 0;
+}
+
+void
+dhd_os_wd_timer_extend(void *bus, bool extend)
+{
+#ifndef BCMDBUS
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+ if (extend)
+ dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+ else
+ dhd_os_wd_timer(bus, dhd->default_wd_interval);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+#ifndef BCMDBUS
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(pub, flags);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN) {
+ DHD_GENERAL_UNLOCK(pub, flags);
+#ifdef BCMSDIO
+ if (!wdtick) {
+ DHD_OS_WD_WAKE_UNLOCK(pub);
+ }
+#endif /* BCMSDIO */
+ return;
+ }
+
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->timer);
+#ifdef BCMSDIO
+ DHD_OS_WD_WAKE_UNLOCK(pub);
+#endif /* BCMSDIO */
+ return;
+ }
+
+ if (wdtick) {
+#ifdef BCMSDIO
+ DHD_OS_WD_WAKE_LOCK(pub);
+ dhd_watchdog_ms = (uint)wdtick;
+#endif /* BCMSDIO */
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+ dhd->wd_timer_valid = TRUE;
+ }
+ DHD_GENERAL_UNLOCK(pub, flags);
+#endif /* BCMDBUS */
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+void
+dhd_os_runtimepm_timer(void *bus, uint tick)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(pub, flags);
+
+ /* don't start the RPM until fw is loaded */
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
+ DHD_GENERAL_UNLOCK(pub, flags);
+ return;
+ }
+
+ /* If tick is non-zero, the request is to start the timer */
+ if (tick) {
+ /* Start the timer only if its not already running */
+ if (dhd->rpm_timer_valid == FALSE) {
+ mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
+ dhd->rpm_timer_valid = TRUE;
+ DHD_ERROR(("DHD Runtime PM Timer ON\n"));
+ }
+ } else {
+ /* tick is zero, we have to stop the timer */
+ /* Stop the timer only if its running, otherwise we don't have to do anything */
+ if (dhd->rpm_timer_valid == TRUE) {
+ dhd->rpm_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->rpm_timer);
+ DHD_ERROR(("DHD Runtime PM Timer OFF \n"));
+ /* we have already released the lock, so just go to exit */
+ goto exit;
+ }
+ }
+
+ DHD_GENERAL_UNLOCK(pub, flags);
+exit:
+ return;
+
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+void *
+dhd_os_open_image1(dhd_pub_t *pub, char *filename)
+{
+ struct file *fp;
+ int size;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp)) {
+ fp = NULL;
+ goto err;
+ }
+
+ if (!S_ISREG(file_inode(fp)->i_mode)) {
+ DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
+ fp = NULL;
+ goto err;
+ }
+
+ size = i_size_read(file_inode(fp));
+ if (size <= 0) {
+ DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
+ fp = NULL;
+ goto err;
+ }
+
+ DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
+
+err:
+ return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+ int size;
+
+ if (!image) {
+ return 0;
+ }
+
+ size = i_size_read(file_inode(fp));
+ rdlen = kernel_read_compat(fp, fp->f_pos, buf, MIN(len, size));
+
+ if (len >= size && size != rdlen) {
+ return -EIO;
+ }
+
+ if (rdlen > 0) {
+ fp->f_pos += rdlen;
+ }
+
+ return rdlen;
+}
+
+#if defined(BT_OVER_SDIO)
+int
+dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rd_len;
+ uint str_len = 0;
+ char *str_end = NULL;
+
+ if (!image)
+ return 0;
+
+ rd_len = kernel_read_compat(fp, fp->f_pos, str, len);
+ str_end = strnchr(str, len, '\n');
+ if (str_end == NULL) {
+ goto err;
+ }
+ str_len = (uint)(str_end - str);
+
+ /* Advance file pointer past the string length */
+ fp->f_pos += str_len + 1;
+ bzero(str_end, rd_len - str_len);
+
+err:
+ return str_len;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+int
+dhd_os_get_image_size(void *image)
+{
+ struct file *fp = (struct file *)image;
+ int size;
+ if (!image) {
+ return 0;
+ }
+
+ size = i_size_read(file_inode(fp));
+
+ return size;
+}
+
+void
+dhd_os_close_image1(dhd_pub_t *pub, void *image)
+{
+ if (image) {
+ filp_close((struct file *)image, NULL);
+ }
+}
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef BCMDBUS
+ spin_lock_bh(&dhd->sdlock);
+#else
+ if (dhd_dpc_prio >= 0)
+ down(&dhd->sdsem);
+ else
+ spin_lock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef BCMDBUS
+ spin_unlock_bh(&dhd->sdlock);
+#else
+ if (dhd_dpc_prio >= 0)
+ up(&dhd->sdsem);
+ else
+ spin_unlock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+ spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
+#else
+ spin_lock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+ spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
+#else
+ spin_unlock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
+
+unsigned long
+dhd_os_sdlock_txoff(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_irqsave(&dhd->txoff_lock, flags);
+
+ return flags;
+}
+
+void
+dhd_os_sdunlock_txoff(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_irqrestore(&dhd->txoff_lock, flags);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->rxf_lock);
+
+}
+
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->rxf_lock);
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+unsigned long
+dhd_os_tcpacklock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+#ifdef BCMSDIO
+ spin_lock_bh(&dhd->tcpack_lock);
+#else
+ flags = osl_spin_lock(&dhd->tcpack_lock);
+#endif /* BCMSDIO */
+ }
+
+ return flags;
+}
+
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd;
+
+#ifdef BCMSDIO
+ BCM_REFERENCE(flags);
+#endif /* BCMSDIO */
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+#ifdef BCMSDIO
+ spin_unlock_bh(&dhd->tcpack_lock);
+#else
+ osl_spin_unlock(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
+ }
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+{
+ uint8* buf;
+ gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+
+ buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+ if (buf == NULL && kmalloc_if_fail)
+ buf = kmalloc(size, flags);
+
+ return buf;
+}
+
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+{
+}
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (!dhd->pub.up) {
+ return NULL;
+ }
+
+ if (!(dev->flags & IFF_UP)) {
+ return NULL;
+ }
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
+}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+ wl_event_msg_t *event, void **data)
+{
+ int bcmerror = 0;
+#ifdef WL_CFG80211
+ unsigned long flags = 0;
+#endif /* WL_CFG80211 */
+ ASSERT(dhd != NULL);
+
+#ifdef SHOW_LOGTRACE
+ bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+ &dhd->event_data);
+#else
+ bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+ NULL);
+#endif /* SHOW_LOGTRACE */
+ if (unlikely(bcmerror != BCME_OK)) {
+ return bcmerror;
+ }
+
+ if (ntoh32(event->event_type) == WLC_E_IF) {
+ /* WLC_E_IF event types are consumed by wl_process_host_event.
+ * For ifadd/del ops, the netdev ptr may not be valid at this
+ * point. so return before invoking cfg80211/wext handlers.
+ */
+ return BCME_OK;
+ }
+
+#ifdef WL_EVENT
+ wl_ext_event_send(dhd->pub.event_params, event, *data);
+#endif
+
+#ifdef WL_CFG80211
+ if (dhd->iflist[ifidx]->net) {
+ DHD_UP_LOCK(&dhd->pub.up_lock, flags);
+ if (dhd->pub.up) {
+ wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
+ }
+ DHD_UP_UNLOCK(&dhd->pub.up_lock, flags);
+ }
+#endif /* defined(WL_CFG80211) */
+
+ return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+ /* Handle error case or further events here */
+ default:
+ break;
+ }
+}
+
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+ struct sk_buff *p, *skb;
+ uint32 pktlen;
+ int len;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *skb_data;
+ int ifidx = 0;
+ struct ether_header eth;
+
+ pktlen = sizeof(eth) + data_len;
+ dhd = dhdp->info;
+
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+ bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
+ eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+ bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
+ bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+ skb = PKTTONATIVE(dhdp->osh, p);
+ skb_data = skb->data;
+ len = skb->len;
+
+ ifidx = dhd_ifname2idx(dhd, "wlan0");
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->data = skb_data;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ } else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
+ }
+}
+#endif /* LOG_INTO_TCPDUMP */
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if defined(BCMSDIO)
+ struct dhd_info *dhdinfo = dhd->info;
+
+ int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
+
+ dhd_os_sdunlock(dhd);
+ wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+ dhd_os_sdlock(dhd);
+#endif /* defined(BCMSDIO) */
+ return;
+} /* dhd_init_static_strs_array */
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if defined(BCMSDIO)
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up(&dhdinfo->ctrl_wait);
+#endif
+ return;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
+{
+ int ret;
+
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
+ return BCME_ERROR;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ if (flag == TRUE) {
+#ifndef WL_CFG80211
+ /* Issue wl down command for non-cfg before resetting the chip */
+ if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
+ }
+#endif /* !WL_CFG80211 */
+#ifdef PROP_TXSTATUS
+ if (dhd->pub.wlfc_enabled) {
+ dhd_wlfc_deinit(&dhd->pub);
+ }
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ if (dhd->pub.pno_state) {
+ dhd_pno_deinit(&dhd->pub);
+ }
+#endif
+#ifdef RTT_SUPPORT
+ if (dhd->pub.rtt_state) {
+ dhd_rtt_deinit(&dhd->pub);
+ }
+#endif /* RTT_SUPPORT */
+
+ DHD_SSSR_DUMP_DEINIT(&dhd->pub);
+#ifdef DHD_SDTC_ETB_DUMP
+ if (dhd->pub.sdtc_etb_inited) {
+ dhd_sdtc_etb_deinit(&dhd->pub);
+ }
+#endif /* DHD_SDTC_ETB_DUMP */
+/*
+ * XXX Detach only if the module is not attached by default at dhd_attach.
+ * If attached by default, we need to keep it till dhd_detach, so that
+ * module is not detached at wifi on/off
+ */
+#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
+ dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON */
+ }
+
+#ifdef BCMSDIO
+ /* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+ * This is indeed a hack but we have to make it work properly before we have a better
+ * solution
+ */
+ if (!flag) {
+ dhd_update_fw_nv_path(dhd);
+ /* update firmware and nvram path to sdio bus */
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+ }
+#endif /* BCMSDIO */
+#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
+#if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420)
+ /* XXX: JIRA SWWLAN-139454: Added L1ss enable
+ * after firmware download completion due to link down issue
+ * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point
+ */
+ DHD_ERROR(("%s Disable L1ss EP side\n", __FUNCTION__));
+ if (flag == FALSE && dhd->pub.busstate == DHD_BUS_DOWN) {
+#if defined(CONFIG_SOC_GS101)
+ exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1);
+#else
+ exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI);
+#endif /* CONFIG_SOC_GS101 */
+ }
+#endif /* !CONFIG_SOC_EXYNOS8890 && !defined(SUPPORT_EXYNOS7420) */
+#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
+
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ if (flag) {
+ /* Clear some flags for recovery logic */
+ dhd->pub.dongle_trap_occured = 0;
+#ifdef BT_OVER_PCIE
+ dhd->pub.dongle_trap_due_to_bt = 0;
+#endif /* BT_OVER_PCIE */
+ dhd->pub.iovar_timeout_occured = 0;
+#ifdef PCIE_FULL_DONGLE
+ dhd->pub.d3ack_timeout_occured = 0;
+ dhd->pub.livelock_occured = 0;
+ dhd->pub.pktid_audit_failed = 0;
+#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.smmu_fault_occurred = 0;
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ }
+
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+int
+dhd_net_bus_suspend(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_suspend(&dhd->pub);
+}
+
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_resume(&dhd->pub, stage);
+}
+
+#endif /* BCMSDIO || BCMPCIE */
+#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd) {
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
+ }
+ return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val, int force)
+{
+ int ret = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd && (dhd->pub.conf->suspend_mode == EARLY_SUSPEND ||
+ dhd->pub.conf->suspend_mode == SUSPEND_MODE_2)) {
+ if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND && !val)
+ dhd_conf_set_suspend_resume(&dhd->pub, val);
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ ret = dhd_set_suspend(val, &dhd->pub);
+#else
+ ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif
+#ifdef WL_CFG80211
+ wl_cfg80211_update_power_mode(dev);
+#endif
+ if (dhd->pub.conf->suspend_mode == EARLY_SUSPEND && val)
+ dhd_conf_set_suspend_resume(&dhd->pub, val);
+ }
+ return ret;
+}
+
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd) {
+ DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
+ __FUNCTION__, val));
+ dhd->pub.suspend_bcn_li_dtim = val;
+ }
+
+ return 0;
+}
+
+int net_os_set_max_dtim_enable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd) {
+ DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (val ? "Enable" : "Disable")));
+ if (val) {
+ dhd->pub.max_dtim_enable = TRUE;
+ } else {
+ dhd->pub.max_dtim_enable = FALSE;
+ }
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef DISABLE_DTIM_IN_SUSPEND
+int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd) {
+ DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (val ? "Enable" : "Disable")));
+ if (val) {
+ dhd->pub.disable_dtim_in_suspend = TRUE;
+ } else {
+ dhd->pub.disable_dtim_in_suspend = FALSE;
+ }
+ } else {
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+#endif /* DISABLE_DTIM_IN_SUSPEND */
+
+#ifdef PKT_FILTER_SUPPORT
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+ int ret = 0;
+
+#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (!dhd_master_mode)
+ add_remove = !add_remove;
+ DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
+ return 0;
+ }
+
+#ifdef BLOCK_IPV6_PACKET
+ /* customer want to use NO IPV6 packets only */
+ if (num == DHD_MULTICAST6_FILTER_NUM) {
+ return 0;
+ }
+#endif /* BLOCK_IPV6_PACKET */
+
+ if (num >= dhd->pub.pktfilter_count) {
+ return -EINVAL;
+ }
+
+ ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+ return ret;
+}
+
+/* XXX RB:4238 Change net_os_set_packet_filter() function name to net_os_enable_packet_filter()
+ * previous code do 'set' & 'enable' in one fucntion.
+ * but from now on, we are going to separate 'set' and 'enable' feature.
+ * - set : net_os_rxfilter_add_remove() -> dhd_set_packet_filter() -> dhd_pktfilter_offload_set()
+ * - enable : net_os_enable_packet_filter() -> dhd_enable_packet_filter()
+ * -> dhd_pktfilter_offload_enable()
+ */
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
+{
+ int ret = 0;
+
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhdp && dhdp->up) {
+ if (dhdp->in_suspend) {
+ if (!val || (val && !dhdp->suspend_disable_flag))
+ dhd_enable_packet_filter(val, dhdp);
+ }
+ }
+ return ret;
+}
+
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
+ return dhd_os_enable_packet_filter(&dhd->pub, val);
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret;
+
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
+ goto done;
+
+done:
+ return ret;
+}
+
+int
+dhd_dev_get_feature_set(struct net_device *dev)
+{
+ dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhd = (&ptr->pub);
+ int feature_set = 0;
+
+ /* tdls capability or othters can be missed because of initialization */
+ if (dhd_get_fw_capabilities(dhd) < 0) {
+ DHD_ERROR(("Capabilities rechecking fail\n"));
+ }
+
+ if (FW_SUPPORTED(dhd, sta))
+ feature_set |= WIFI_FEATURE_INFRA;
+ if (FW_SUPPORTED(dhd, dualband))
+ feature_set |= WIFI_FEATURE_INFRA_5G;
+ if (FW_SUPPORTED(dhd, p2p))
+ feature_set |= WIFI_FEATURE_P2P;
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ feature_set |= WIFI_FEATURE_SOFT_AP;
+ if (FW_SUPPORTED(dhd, tdls))
+ feature_set |= WIFI_FEATURE_TDLS;
+ if (FW_SUPPORTED(dhd, vsdb))
+ feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
+ if (FW_SUPPORTED(dhd, nan)) {
+ feature_set |= WIFI_FEATURE_NAN;
+ /* NAN is essentail for d2d rtt */
+ if (FW_SUPPORTED(dhd, rttd2d))
+ feature_set |= WIFI_FEATURE_D2D_RTT;
+ }
+#ifdef RTT_SUPPORT
+ if (dhd->rtt_supported) {
+ feature_set |= WIFI_FEATURE_D2D_RTT;
+ feature_set |= WIFI_FEATURE_D2AP_RTT;
+ }
+#endif /* RTT_SUPPORT */
+#ifdef LINKSTAT_SUPPORT
+ feature_set |= WIFI_FEATURE_LINKSTAT;
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef CUSTOMER_HW_AMLOGIC
+ feature_set |= WIFI_FEATURE_SET_LATENCY_MODE;
+#endif
+
+#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
+ if (dhd_is_pno_supported(dhd)) {
+ feature_set |= WIFI_FEATURE_PNO;
+#ifdef BATCH_SCAN
+ /* Deprecated */
+ feature_set |= WIFI_FEATURE_BATCH_SCAN;
+#endif /* BATCH_SCAN */
+#ifdef GSCAN_SUPPORT
+ /* terence 20171115: remove to get GTS PASS
+ * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
+ */
+// feature_set |= WIFI_FEATURE_GSCAN;
+// feature_set |= WIFI_FEATURE_HAL_EPNO;
+#endif /* GSCAN_SUPPORT */
+ }
+#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
+#ifdef RSSI_MONITOR_SUPPORT
+ if (FW_SUPPORTED(dhd, rssi_mon)) {
+ feature_set |= WIFI_FEATURE_RSSI_MONITOR;
+ }
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef WL11U
+ feature_set |= WIFI_FEATURE_HOTSPOT;
+#endif /* WL11U */
+#ifdef KEEP_ALIVE
+ feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
+#endif /* KEEP_ALIVE */
+#ifdef NDO_CONFIG_SUPPORT
+ feature_set |= WIFI_FEATURE_CONFIG_NDO;
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef SUPPORT_RANDOM_MAC_SCAN
+ feature_set |= WIFI_FEATURE_SCAN_RAND;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef FILTER_IE
+ if (FW_SUPPORTED(dhd, fie)) {
+ feature_set |= WIFI_FEATURE_FILTER_IE;
+ }
+#endif /* FILTER_IE */
+#ifdef ROAMEXP_SUPPORT
+ feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
+#endif /* ROAMEXP_SUPPORT */
+#ifdef WL_LATENCY_MODE
+ feature_set |= WIFI_FEATURE_SET_LATENCY_MODE;
+#endif /* WL_LATENCY_MODE */
+#ifdef WL_P2P_RAND
+ feature_set |= WIFI_FEATURE_P2P_RAND_MAC;
+#endif /* WL_P2P_RAND */
+#ifdef WL_SAR_TX_POWER
+ feature_set |= WIFI_FEATURE_SET_TX_POWER_LIMIT;
+ feature_set |= WIFI_FEATURE_USE_BODY_HEAD_SAR;
+#endif /* WL_SAR_TX_POWER */
+#ifdef WL_STATIC_IF
+ feature_set |= WIFI_FEATURE_AP_STA;
+#endif /* WL_STATIC_IF */
+ return feature_set;
+}
+
+int
+dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
+{
+ int feature_set_full;
+ int ret = 0;
+
+ feature_set_full = dhd_dev_get_feature_set(dev);
+
+ /* Common feature set for all interface */
+ ret = (feature_set_full & WIFI_FEATURE_INFRA) |
+ (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+ (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+ (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+ (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
+ (feature_set_full & WIFI_FEATURE_EPR);
+
+ /* Specific feature group for each interface */
+ switch (num) {
+ case 0:
+ ret |= (feature_set_full & WIFI_FEATURE_P2P) |
+ /* Not supported yet */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_PNO) |
+ (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
+ (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
+ (feature_set_full & WIFI_FEATURE_GSCAN) |
+ (feature_set_full & WIFI_FEATURE_HOTSPOT) |
+ (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
+ break;
+
+ case 1:
+ ret |= (feature_set_full & WIFI_FEATURE_P2P);
+ /* Not yet verified NAN with P2P */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ break;
+
+ case 2:
+ ret |= (feature_set_full & WIFI_FEATURE_NAN) |
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
+ break;
+
+ default:
+ ret = WIFI_FEATURE_INVALID;
+ DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+int
+dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (nodfs)
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ else
+ dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+ return 0;
+}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
+#ifdef NDO_CONFIG_SUPPORT
+int
+dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
+
+ if (enable) {
+ /* enable ND offload feature (will be enabled in FW on suspend) */
+ dhdp->ndo_enable = TRUE;
+
+ /* Update changes of anycast address & DAD failed address */
+ ret = dhd_dev_ndo_update_inet6addr(dev);
+ if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+ DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ } else {
+ /* disable ND offload feature */
+ dhdp->ndo_enable = FALSE;
+
+ /* disable ND offload in FW */
+ ret = dhd_ndo_enable(dhdp, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
+ }
+ }
+ return ret;
+}
+
+static int
+dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
+{
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *acaddr = NULL;
+ int addr_count = 0;
+
+ /* lock */
+ read_lock_bh(&inet6->lock);
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ /* Count valid unicast address */
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ GCC_DIAGNOSTIC_POP();
+ if ((ifa->flags & IFA_F_DADFAILED) == 0) {
+ addr_count++;
+ }
+ }
+
+ /* Count anycast address */
+ acaddr = inet6->ac_list;
+ while (acaddr) {
+ addr_count++;
+ acaddr = acaddr->aca_next;
+ }
+
+ /* unlock */
+ read_unlock_bh(&inet6->lock);
+
+ return addr_count;
+}
+
+int
+dhd_dev_ndo_update_inet6addr(struct net_device *dev)
+{
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ struct inet6_dev *inet6;
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *acaddr = NULL;
+ struct in6_addr *ipv6_addr = NULL;
+ int cnt, i;
+ int ret = BCME_OK;
+
+ /*
+ * this function evaulates host ip address in struct inet6_dev
+ * unicast addr in inet6_dev->addr_list
+ * anycast addr in inet6_dev->ac_list
+ * while evaluating inet6_dev, read_lock_bh() is required to prevent
+ * access on null(freed) pointer.
+ */
+
+ if (dev) {
+ inet6 = dev->ip6_ptr;
+ if (!inet6) {
+ DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ dhdp = &dhd->pub;
+
+ if (dhd_net2idx(dhd, dev) != 0) {
+ DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Check host IP overflow */
+ cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
+ if (cnt > dhdp->ndo_max_host_ip) {
+ if (!dhdp->ndo_host_ip_overflow) {
+ dhdp->ndo_host_ip_overflow = TRUE;
+ /* Disable ND offload in FW */
+ DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, FALSE);
+ }
+
+ return ret;
+ }
+
+ /*
+ * Allocate ipv6 addr buffer to store addresses to be added/removed.
+ * driver need to lock inet6_dev while accessing structure. but, driver
+ * cannot use ioctl while inet6_dev locked since it requires scheduling
+ * hence, copy addresses to the buffer and do ioctl after unlock.
+ */
+ ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
+ sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+ if (!ipv6_addr) {
+ DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* Find DAD failed unicast address to be removed */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ GCC_DIAGNOSTIC_POP();
+ /* DAD failed unicast address */
+ if ((ifa->flags & IFA_F_DADFAILED) &&
+ (cnt < dhdp->ndo_max_host_ip)) {
+ memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
+ cnt++;
+ }
+ }
+ read_unlock_bh(&inet6->lock);
+
+ /* Remove DAD failed unicast address */
+ for (i = 0; i < cnt; i++) {
+ DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
+
+ /* Remove all anycast address */
+ ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
+
+ /*
+ * if ND offload was disabled due to host ip overflow,
+ * attempt to add valid unicast address.
+ */
+ if (dhdp->ndo_host_ip_overflow) {
+ /* Find valid unicast address */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ GCC_DIAGNOSTIC_POP();
+ /* valid unicast address */
+ if (!(ifa->flags & IFA_F_DADFAILED) &&
+ (cnt < dhdp->ndo_max_host_ip)) {
+ memcpy(&ipv6_addr[cnt], &ifa->addr,
+ sizeof(struct in6_addr));
+ cnt++;
+ }
+ }
+ read_unlock_bh(&inet6->lock);
+
+ /* Add valid unicast address */
+ for (i = 0; i < cnt; i++) {
+ ret = dhd_ndo_add_ip_with_type(dhdp,
+ (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
+ }
+
+ /* Find anycast address */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ acaddr = inet6->ac_list;
+ while (acaddr) {
+ if (cnt < dhdp->ndo_max_host_ip) {
+ memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
+ cnt++;
+ }
+ acaddr = acaddr->aca_next;
+ }
+ read_unlock_bh(&inet6->lock);
+
+ /* Add anycast address */
+ for (i = 0; i < cnt; i++) {
+ ret = dhd_ndo_add_ip_with_type(dhdp,
+ (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
+
+ /* Now All host IP addr were added successfully */
+ if (dhdp->ndo_host_ip_overflow) {
+ dhdp->ndo_host_ip_overflow = FALSE;
+ if (dhdp->in_suspend) {
+ /* drvier is in (early) suspend state, need to enable ND offload in FW */
+ DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, TRUE);
+ }
+ }
+
+done:
+ if (ipv6_addr) {
+ MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+ }
+
+ return ret;
+}
+
+#endif /* NDO_CONFIG_SUPPORT */
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ return (dhd_pno_stop_for_ssid(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+ uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+ pno_repeat, pno_freq_expo_max, channel_list, nchan));
+}
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ return (dhd_pno_enable(&dhd->pub, enable));
+}
+
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
+int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+ struct dhd_pno_hotlist_params *hotlist_params)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_stop_for_batch(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
+
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+#endif /* PNO_SUPPORT */
+
+#if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
+#ifdef GSCAN_SUPPORT
+bool
+dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_is_legacy_pno_enabled(&dhd->pub));
+}
+
+int
+dhd_dev_set_epno(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ return BCME_ERROR;
+ }
+ return dhd_pno_set_epno(&dhd->pub);
+}
+int
+dhd_dev_flush_fw_epno(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ return BCME_ERROR;
+ }
+ return dhd_pno_flush_fw_epno(&dhd->pub);
+}
+
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_get_gscan */
+void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *info, uint32 *len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
+}
+
+/* Linux wrapper to call common dhd_wait_batch_results_complete */
+int
+dhd_dev_wait_batch_results_complete(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_wait_batch_results_complete(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_lock_batch_results */
+int
+dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_lock_batch_results(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_unlock_batch_results */
+void
+dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_unlock_batch_results(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
+int
+dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
+int
+dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
+}
+
+/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
+void *
+dhd_dev_hotlist_scan_event(struct net_device *dev,
+ const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
+}
+
+/* Linux wrapper to call common dhd_process_full_gscan_result */
+void *
+dhd_dev_process_full_gscan_result(struct net_device *dev,
+const void *data, uint32 len, int *send_evt_bytes)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
+}
+
+void
+dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
+
+ return;
+}
+
+int
+dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_retreive_batch_scan_results */
+int
+dhd_dev_retrieve_batch_scan(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_retreive_batch_scan_results(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_process_epno_result */
+void * dhd_dev_process_epno_result(struct net_device *dev,
+ const void *data, uint32 event, int *send_evt_bytes)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
+}
+
+int
+dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
+ wlc_roam_exp_params_t *roam_param)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_roam_exp_cfg_t roam_exp_cfg;
+ int err;
+
+ if (!roam_param) {
+ return BCME_BADARG;
+ }
+
+ DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
+ roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
+ DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
+ roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
+ roam_param->cur_bssid_boost));
+ DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
+ roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
+
+ memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
+ roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+ roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
+ if (dhd->pub.lazy_roam_enable) {
+ roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
+ }
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+ (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+
+int
+dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
+{
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_roam_exp_cfg_t roam_exp_cfg;
+
+ memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
+ roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+ if (enable) {
+ roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
+ }
+
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+ (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+ } else {
+ dhd->pub.lazy_roam_enable = (enable != 0);
+ }
+ return err;
+}
+
+int
+dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
+ wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
+{
+ int err;
+ uint len;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ bssid_pref->version = BSSID_PREF_LIST_VERSION;
+ /* By default programming bssid pref flushes out old values */
+ bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
+ len = sizeof(wl_bssid_pref_cfg_t);
+ if (bssid_pref->count) {
+ len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
+ }
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
+ (char *)bssid_pref, len, NULL, 0, TRUE);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
+int
+dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
+ uint32 len, uint32 flush)
+{
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int macmode;
+
+ if (blacklist) {
+ err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
+ len, TRUE, 0);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
+ return err;
+ }
+ }
+ /* By default programming blacklist flushes out old values */
+ macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
+ err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
+ sizeof(macmode), TRUE, 0);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+
+int
+dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
+ uint32 len, uint32 flush)
+{
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_ssid_whitelist_t whitelist_ssid_flush;
+
+ if (!ssid_whitelist) {
+ if (flush) {
+ ssid_whitelist = &whitelist_ssid_flush;
+ ssid_whitelist->ssid_count = 0;
+ } else {
+ DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+ }
+ ssid_whitelist->version = SSID_WHITELIST_VERSION;
+ ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
+ 0, TRUE);
+ if (err != BCME_OK) {
+ if (err == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s : roam_exp_bssid_pref, UNSUPPORTED \n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n",
+ __FUNCTION__, err));
+ }
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+#endif /* defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
+
+#ifdef RSSI_MONITOR_SUPPORT
+int
+dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
+ int8 max_rssi, int8 min_rssi)
+{
+ int err;
+ wl_rssi_monitor_cfg_t rssi_monitor;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ rssi_monitor.version = RSSI_MONITOR_VERSION;
+ rssi_monitor.max_rssi = max_rssi;
+ rssi_monitor.min_rssi = min_rssi;
+ rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
+ err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
+ NULL, 0, TRUE);
+ if (err < 0 && err != BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef DHDTCPACK_SUPPRESS
+int
+dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
+{
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ err = dhd_tcpack_suppress_set(&dhd->pub, enable);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+int
+dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ if (!dhdp || !oui) {
+ DHD_ERROR(("NULL POINTER : %s\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (ETHER_ISMULTI(oui)) {
+ DHD_ERROR(("Expected unicast OUI\n"));
+ return BCME_ERROR;
+ } else {
+ uint8 *rand_mac_oui = dhdp->rand_mac_oui;
+ memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
+ DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
+ MACOUI2STRDBG(rand_mac_oui)));
+ }
+ return BCME_OK;
+}
+
+int
+dhd_set_rand_mac_oui(dhd_pub_t *dhd)
+{
+ int err;
+ wl_pfn_macaddr_cfg_t wl_cfg;
+ uint8 *rand_mac_oui = dhd->rand_mac_oui;
+
+ memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
+ memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
+ wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
+ if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
+ wl_cfg.flags = 0;
+ } else {
+ wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+ }
+
+ DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
+ MACOUI2STRDBG(rand_mac_oui)));
+
+ err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+
+#if defined(RTT_SUPPORT) && defined(WL_CFG80211)
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_set_cfg(&dhd->pub, buf));
+}
+
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
+}
+
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
+}
+
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
+}
+
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_capability(&dhd->pub, capa));
+}
+
+int
+dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
+}
+
+int
+dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
+}
+
+int dhd_dev_rtt_cancel_responder(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_cancel_responder(&dhd->pub));
+}
+
+#endif /* RTT_SUPPORT */
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static void _dhd_apf_lock_local(dhd_info_t *dhd)
+{
+ if (dhd) {
+ mutex_lock(&dhd->dhd_apf_mutex);
+ }
+}
+
+static void _dhd_apf_unlock_local(dhd_info_t *dhd)
+{
+ if (dhd) {
+ mutex_unlock(&dhd->dhd_apf_mutex);
+ }
+}
+
+static int
+__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
+ u8* program, uint32 program_len)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ wl_pkt_filter_t * pkt_filterp;
+ wl_apf_program_t *apf_program;
+ char *buf;
+ u32 cmd_len, buf_len;
+ int ifidx, ret;
+ char cmd[] = "pkt_filter_add";
+
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ cmd_len = sizeof(cmd);
+
+ /* Check if the program_len is more than the expected len
+ * and if the program is NULL return from here.
+ */
+ if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
+ DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
+ __FUNCTION__, program_len, program));
+ return -EINVAL;
+ }
+ buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
+ WL_APF_PROGRAM_FIXED_LEN + program_len;
+
+ buf = MALLOCZ(dhdp->osh, buf_len);
+ if (unlikely(!buf)) {
+ DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+ return -ENOMEM;
+ }
+
+ memcpy(buf, cmd, cmd_len);
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
+ pkt_filterp->id = htod32(filter_id);
+ pkt_filterp->negate_match = htod32(FALSE);
+ pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
+
+ apf_program = &pkt_filterp->u.apf_program;
+ apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
+ apf_program->instr_len = htod16(program_len);
+ memcpy(apf_program->instrs, program, program_len);
+
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ }
+
+ if (buf) {
+ MFREE(dhdp->osh, buf, buf_len);
+ }
+ return ret;
+}
+
+static int
+__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
+ uint32 mode, uint32 enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ wl_pkt_filter_enable_t * pkt_filterp;
+ char *buf;
+ u32 cmd_len, buf_len;
+ int ifidx, ret;
+ char cmd[] = "pkt_filter_enable";
+
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ cmd_len = sizeof(cmd);
+ buf_len = cmd_len + sizeof(*pkt_filterp);
+
+ buf = MALLOCZ(dhdp->osh, buf_len);
+ if (unlikely(!buf)) {
+ DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+ return -ENOMEM;
+ }
+
+ memcpy(buf, cmd, cmd_len);
+
+ pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
+ pkt_filterp->id = htod32(filter_id);
+ pkt_filterp->enable = htod32(enable);
+
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ goto exit;
+ }
+
+ ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
+ WLC_SET_VAR, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ }
+
+exit:
+ if (buf) {
+ MFREE(dhdp->osh, buf, buf_len);
+ }
+ return ret;
+}
+
+static int
+__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
+
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
+ htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ }
+
+ return ret;
+}
+
+void dhd_apf_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ _dhd_apf_lock_local(dhd);
+}
+
+void dhd_apf_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ _dhd_apf_unlock_local(dhd);
+}
+
+int
+dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
+
+ if (!FW_SUPPORTED(dhdp, apf)) {
+ DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+
+ /*
+ * Notify Android framework that APF is not supported by setting
+ * version as zero.
+ */
+ *version = 0;
+ return BCME_OK;
+ }
+
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
+ WLC_GET_VAR, FALSE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
+ __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+int
+dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
+
+ if (!FW_SUPPORTED(dhdp, apf)) {
+ DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+ *max_len = 0;
+ return BCME_OK;
+ }
+
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
+ WLC_GET_VAR, FALSE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
+ __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+int
+dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
+ uint32 program_len)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret;
+
+ DHD_APF_LOCK(ndev);
+
+ /* delete, if filter already exists */
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+ if (unlikely(ret)) {
+ goto exit;
+ }
+ dhdp->apf_set = FALSE;
+ }
+
+ ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
+ if (ret) {
+ goto exit;
+ }
+ dhdp->apf_set = TRUE;
+
+ if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ /* Driver is still in (early) suspend state, enable APF filter back */
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+ }
+exit:
+ DHD_APF_UNLOCK(ndev);
+
+ return ret;
+}
+
+int
+dhd_dev_apf_enable_filter(struct net_device *ndev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
+ bool nan_dp_active = false;
+
+ DHD_APF_LOCK(ndev);
+#ifdef WL_NAN
+ nan_dp_active = wl_cfgnan_is_dp_active(ndev);
+#endif /* WL_NAN */
+ if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
+ !nan_dp_active)) {
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+ }
+
+ DHD_APF_UNLOCK(ndev);
+
+ return ret;
+}
+
+int
+dhd_dev_apf_disable_filter(struct net_device *ndev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
+
+ DHD_APF_LOCK(ndev);
+
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
+ }
+
+ DHD_APF_UNLOCK(ndev);
+
+ return ret;
+}
+
+int
+dhd_dev_apf_delete_filter(struct net_device *ndev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
+
+ DHD_APF_LOCK(ndev);
+
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+ if (!ret) {
+ dhdp->apf_set = FALSE;
+ }
+ }
+
+ DHD_APF_UNLOCK(ndev);
+
+ return ret;
+}
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+#if defined(OEM_ANDROID)
+static void dhd_hang_process(struct work_struct *work_data)
+{
+ struct net_device *dev;
+#ifdef IFACE_HANG_FORCE_DEV_CLOSE
+ struct net_device *ndev;
+ uint8 i = 0;
+#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
+ struct dhd_info *dhd;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(work_data, dhd_info_t, dhd_hang_process_work);
+ GCC_DIAGNOSTIC_POP();
+
+ if (!dhd || !dhd->iflist[0])
+ return;
+ dev = dhd->iflist[0]->net;
+
+ if (dev) {
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+ wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+ }
+#ifdef IFACE_HANG_FORCE_DEV_CLOSE
+ /*
+ * For HW2, dev_close need to be done to recover
+ * from upper layer after hang. For Interposer skip
+ * dev_close so that dhd iovars can be used to take
+ * socramdump after crash, also skip for HW4 as
+ * handling of hang event is different
+ */
+
+ rtnl_lock();
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
+ if (ndev && (ndev->flags & IFF_UP)) {
+ DHD_ERROR(("ndev->name : %s dev close\n",
+ ndev->name));
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+ dhd_download_fw_on_driverload = FALSE;
+#endif
+ dev_close(ndev);
+ }
+ }
+ rtnl_unlock();
+#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
+}
+
+#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
+extern dhd_pub_t *link_recovery;
+void dhd_host_recover_link(void)
+{
+ DHD_ERROR(("****** %s ******\n", __FUNCTION__));
+ link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
+ dhd_bus_set_linkdown(link_recovery, TRUE);
+ dhd_os_send_hang_message(link_recovery);
+}
+EXPORT_SYMBOL(dhd_host_recover_link);
+#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
+
+#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
+#define MAX_CONSECUTIVE_MFG_HANG_COUNT 2
+#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+{
+ int ret = 0;
+ dhd_info_t *dhd_info = NULL;
+#ifdef WL_CFG80211
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+#endif /* WL_CFG80211 */
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_info = (dhd_info_t *)dhdp->info;
+ BCM_REFERENCE(dhd_info);
+
+#if defined(WLAN_ACCEL_BOOT)
+ if (!dhd_info->wl_accel_force_reg_on) {
+ DHD_ERROR(("%s: set force reg on\n", __FUNCTION__));
+ dhd_info->wl_accel_force_reg_on = TRUE;
+ }
+#endif /* WLAN_ACCEL_BOOT */
+
+ if (!dhdp->hang_report) {
+ DHD_ERROR(("%s: hang_report is disabled\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ if (dhd_info->scheduled_memdump) {
+ DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
+ dhdp->hang_was_pending = 1;
+ return BCME_OK;
+ }
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+#ifdef WL_CFG80211
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return -ENODEV;
+ }
+ cfg = wl_get_cfg(primary_ndev);
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ /* Skip sending HANG event to framework if driver is not ready */
+ if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
+ DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
+ return -ENODEV;
+ }
+#endif /* WL_CFG80211 */
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhdp->req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhdp->req_hang_type));
+ dhdp->req_hang_type = 0;
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+ if (!dhdp->hang_was_sent) {
+#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
+ if (dhdp->op_mode & DHD_FLAG_MFG_MODE) {
+ dhdp->hang_count++;
+ if (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT) {
+ DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
+ __FUNCTION__, dhdp->hang_count));
+ BUG_ON(1);
+ }
+ }
+#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
+#ifdef DHD_DEBUG_UART
+ /* If PCIe lane has broken, execute the debug uart application
+ * to gether a ramdump data from dongle via uart
+ */
+ if (!dhdp->info->duart_execute) {
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
+ dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
+ }
+#endif /* DHD_DEBUG_UART */
+ dhdp->hang_was_sent = 1;
+#ifdef BT_OVER_SDIO
+ dhdp->is_bt_recovery_required = TRUE;
+#endif
+ schedule_work(&dhdp->info->dhd_hang_process_work);
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
+ printf("%s\n", info_string);
+ printf("MAC %pM\n", &dhdp->mac);
+ }
+ return ret;
+}
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd) {
+ /* Report FW problem when enabled */
+ if (dhd->pub.hang_report) {
+#ifdef BT_OVER_SDIO
+ if (netif_running(dev)) {
+#endif /* BT_OVER_SDIO */
+ ret = dhd_os_send_hang_message(&dhd->pub);
+#ifdef BT_OVER_SDIO
+ }
+ DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
+ bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
+#endif /* BT_OVER_SDIO */
+ } else {
+ DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+ __FUNCTION__));
+ }
+ }
+ return ret;
+}
+
+int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
+{
+ dhd_info_t *dhd = NULL;
+ dhd_pub_t *dhdp = NULL;
+ int reason;
+
+ dhd = DHD_DEV_INFO(dev);
+ if (dhd) {
+ dhdp = &dhd->pub;
+ }
+
+ if (!dhd || !dhdp) {
+ return 0;
+ }
+
+ reason = bcm_strtoul(string_num, NULL, 0);
+ DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
+
+ if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+ reason = 0;
+ }
+
+ dhdp->hang_reason = reason;
+
+ return net_os_send_hang_message(dev);
+}
+#endif /* OEM_ANDROID */
+
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
+
+int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long delay_msec = on ? WIFI_TURNON_DELAY : WIFI_TURNOFF_DELAY;
+ return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
+
+bool dhd_force_country_change(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd && dhd->pub.up)
+ return dhd->pub.force_country_change;
+ return FALSE;
+}
+
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+ wl_country_t *cspec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (!dhd->pub.is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+#if defined(CUSTOM_COUNTRY_CODE)
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec,
+ dhd->pub.dhd_cflags);
+#else
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+ }
+#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
+ else {
+ /* Replace the ccode to XZ if ccode is undefined country */
+ if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
+ strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
+ DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
+ }
+ }
+#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
+
+#ifdef KEEP_JP_REGREV
+/* XXX Needed by customer's request */
+ if (strncmp(country_iso_code, "JP", 3) == 0) {
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (dhd->pub.is_blob) {
+ if (strncmp(dhd->pub.vars_ccode, "J1", 3) == 0) {
+ memcpy(cspec->ccode, dhd->pub.vars_ccode,
+ sizeof(dhd->pub.vars_ccode));
+ }
+ } else
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+ if (strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) {
+ cspec->rev = dhd->pub.vars_regrev;
+ }
+ }
+ }
+#endif /* KEEP_JP_REGREV */
+ BCM_REFERENCE(dhd);
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
+
+ if (dhd && dhd->pub.up) {
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+#ifdef WL_CFG80211
+ wl_update_wiphybands(cfg, notify);
+#endif
+ }
+}
+
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
+ if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+ wl_update_wiphybands(cfg, true);
+#endif
+ }
+}
+
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (!fw || fw[0] == '\0')
+ return -EINVAL;
+
+ strlcpy(dhd->fw_path, fw, sizeof(dhd->fw_path));
+
+#if defined(OEM_ANDROID) && defined(SOFTAP)
+ if (strstr(fw, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+#endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
+ return 0;
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if defined(OEM_ANDROID)
+ if (dhd)
+ mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if defined(OEM_ANDROID)
+ if (dhd)
+ mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+#if defined(OEM_ANDROID)
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ if (dhd)
+ mutex_lock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+static void dhd_suspend_unlock(dhd_pub_t *pub)
+{
+#if defined(OEM_ANDROID)
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ if (dhd)
+ mutex_unlock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
+
+ if (dhd) {
+ flags = osl_spin_lock(&dhd->dhd_lock);
+ }
+
+ return flags;
+}
+
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ osl_spin_unlock(&dhd->dhd_lock, flags);
+ }
+}
+
+void *
+dhd_os_dbgring_lock_init(osl_t *osh)
+{
+ struct mutex *mtx = NULL;
+
+ mtx = MALLOCZ(osh, sizeof(*mtx));
+ if (mtx)
+ mutex_init(mtx);
+
+ return mtx;
+}
+
+void
+dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
+{
+ if (mtx) {
+ mutex_destroy(mtx);
+ MFREE(osh, mtx, sizeof(struct mutex));
+ }
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX 100
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int timeout = msecs_to_jiffies(10);
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
+ }
+ if (ntimes == 0)
+ {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+ WL_MSG(dev->name, "TIMEOUT\n");
+ }
+ return pend;
+}
+
+#if defined(BCM_ROUTER_DHD) || defined(DHD_DEBUG)
+int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t old_fs;
+#endif
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ /* open file to write */
+ fp = filp_open(file_name, flags, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
+ goto exit;
+ }
+
+ /* Write buf to file */
+ ret = vfs_write(fp, buf, size, &pos);
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
+ }
+
+ /* Sync file from filesystem to physical media */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ ret = vfs_fsync(fp, 0);
+#else
+ ret = vfs_fsync(fp, fp->f_path.dentry, 0);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("sync file error, error = %d\n", ret));
+ goto exit;
+ }
+ ret = BCME_OK;
+
+exit:
+ /* close file before return */
+ if (!IS_ERR(fp))
+ filp_close(fp, current->files);
+
+ /* restore previous address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(old_fs);
+#endif
+
+ return ret;
+}
+#endif /* BCM_ROUTER_DHD || DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_convert_memdump_type_to_str(uint32 type, char *buf, size_t buf_len, int substr_type)
+{
+ char *type_str = NULL;
+
+ switch (type) {
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT:
+ type_str = "resumed_on_timeout";
+ break;
+ case DUMP_TYPE_D3_ACK_TIMEOUT:
+ type_str = "D3_ACK_timeout";
+ break;
+ case DUMP_TYPE_DONGLE_TRAP:
+ type_str = "Dongle_Trap";
+ break;
+ case DUMP_TYPE_MEMORY_CORRUPTION:
+ type_str = "Memory_Corruption";
+ break;
+ case DUMP_TYPE_PKTID_AUDIT_FAILURE:
+ type_str = "PKTID_AUDIT_Fail";
+ break;
+ case DUMP_TYPE_PKTID_INVALID:
+ type_str = "PKTID_INVALID";
+ break;
+ case DUMP_TYPE_SCAN_TIMEOUT:
+ type_str = "SCAN_timeout";
+ break;
+ case DUMP_TYPE_SCAN_BUSY:
+ type_str = "SCAN_Busy";
+ break;
+ case DUMP_TYPE_BY_SYSDUMP:
+ if (substr_type == CMD_UNWANTED) {
+ type_str = "BY_SYSDUMP_FORUSER_unwanted";
+ } else if (substr_type == CMD_DISCONNECTED) {
+ type_str = "BY_SYSDUMP_FORUSER_disconnected";
+ } else {
+ type_str = "BY_SYSDUMP_FORUSER";
+ }
+ break;
+ case DUMP_TYPE_BY_LIVELOCK:
+ type_str = "BY_LIVELOCK";
+ break;
+ case DUMP_TYPE_AP_LINKUP_FAILURE:
+ type_str = "BY_AP_LINK_FAILURE";
+ break;
+ case DUMP_TYPE_AP_ABNORMAL_ACCESS:
+ type_str = "INVALID_ACCESS";
+ break;
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
+ type_str = "ERROR_RX_TIMED_OUT";
+ break;
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
+ type_str = "ERROR_TX_TIMED_OUT";
+ break;
+ case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
+ type_str = "CFG_VENDOR_TRIGGERED";
+ break;
+ case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
+ type_str = "BY_INVALID_RING_RDWR";
+ break;
+ case DUMP_TYPE_IFACE_OP_FAILURE:
+ type_str = "BY_IFACE_OP_FAILURE";
+ break;
+ case DUMP_TYPE_TRANS_ID_MISMATCH:
+ type_str = "BY_TRANS_ID_MISMATCH";
+ break;
+#ifdef DEBUG_DNGL_INIT_FAIL
+ case DUMP_TYPE_DONGLE_INIT_FAILURE:
+ type_str = "DONGLE_INIT_FAIL";
+ break;
+#endif /* DEBUG_DNGL_INIT_FAIL */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ case DUMP_TYPE_READ_SHM_FAIL:
+ type_str = "READ_SHM_FAIL";
+ break;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ case DUMP_TYPE_DONGLE_HOST_EVENT:
+ type_str = "BY_DONGLE_HOST_EVENT";
+ break;
+ case DUMP_TYPE_SMMU_FAULT:
+ type_str = "SMMU_FAULT";
+ break;
+#ifdef DHD_ERPOM
+ case DUMP_TYPE_DUE_TO_BT:
+ type_str = "DUE_TO_BT";
+ break;
+#endif /* DHD_ERPOM */
+ case DUMP_TYPE_BY_USER:
+ type_str = "BY_USER";
+ break;
+ case DUMP_TYPE_LOGSET_BEYOND_RANGE:
+ type_str = "LOGSET_BEYOND_RANGE";
+ break;
+ case DUMP_TYPE_CTO_RECOVERY:
+ type_str = "CTO_RECOVERY";
+ break;
+ case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
+ type_str = "SEQUENTIAL_PRIVCMD_ERROR";
+ break;
+ case DUMP_TYPE_PROXD_TIMEOUT:
+ type_str = "PROXD_TIMEOUT";
+ break;
+ case DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE:
+ type_str = "INBAND_DEVICE_WAKE_FAILURE";
+ break;
+ case DUMP_TYPE_PKTID_POOL_DEPLETED:
+ type_str = "PKTID_POOL_DEPLETED";
+ break;
+ case DUMP_TYPE_ESCAN_SYNCID_MISMATCH:
+ type_str = "ESCAN_SYNCID_MISMATCH";
+ break;
+ case DUMP_TYPE_INVALID_SHINFO_NRFRAGS:
+ type_str = "INVALID_SHINFO_NRFRAGS";
+ break;
+ default:
+ type_str = "Unknown_type";
+ break;
+ }
+
+ strlcpy(buf, type_str, buf_len);
+}
+
+void
+dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
+{
+ char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN];
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ /* Init file name */
+ memset(memdump_path, 0, len);
+ memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN);
+ dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN,
+ dhdp->debug_dump_subcmd);
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
+ get_debug_dump_time(dhdp->debug_dump_time_str);
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+
+ if (strstr(fname, "sssr_dump")) {
+ DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
+ } else {
+ DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
+ memdump_path, FILE_NAME_HAL_TAG));
+ }
+}
+
+int
+write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
+{
+ int ret = 0;
+ char memdump_path[DHD_MEMDUMP_PATH_STR_LEN];
+ char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN];
+ uint32 file_mode;
+
+ /* Init file name */
+ memset(memdump_path, 0, DHD_MEMDUMP_PATH_STR_LEN);
+ memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN);
+ dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN,
+ dhd->debug_dump_subcmd);
+ clear_debug_dump_time(dhd->debug_dump_time_str);
+ get_debug_dump_time(dhd->debug_dump_time_str);
+
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
+#ifdef CUSTOMER_HW4_DEBUG
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__)
+ file_mode = O_CREAT | O_WRONLY;
+#elif defined(OEM_ANDROID)
+ /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
+ * calling BUG_ON immediately after collecting the socram dump.
+ * So the file write operation should directly write the contents into the
+ * file instead of caching it. O_TRUNC flag ensures that file will be re-written
+ * instead of appending.
+ */
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+ {
+ struct file *fp = filp_open(memdump_path, file_mode, 0664);
+ /* Check if it is live Brix image having /installmedia, else use /data */
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("open file %s, try /data/\n", memdump_path));
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ "/data/", fname, memdump_type, dhd->debug_dump_time_str);
+ } else {
+ filp_close(fp, NULL);
+ }
+ }
+#else
+ file_mode = O_CREAT | O_WRONLY;
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ /* print SOCRAM dump file path */
+ DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
+
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
+#endif /* DHD_LOG_DUMP */
+
+ /* Write file */
+ ret = write_file(memdump_path, file_mode, buf, size);
+
+#ifdef DHD_DUMP_MNGR
+ if (ret == BCME_OK) {
+ dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
+ }
+#endif /* DHD_DUMP_MNGR */
+
+ return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+ dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wakelock_rx_timeout_enable)
+ dhd_wake_lock_timeout(&dhd->wl_rxwake,
+ msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+ if (dhd->wakelock_ctrl_timeout_enable)
+ dhd_wake_lock_timeout(&dhd->wl_ctrlwake,
+ msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+#endif
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_rx_timeout_enable)
+ dhd->wakelock_rx_timeout_enable = val;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_ctrl_timeout_enable)
+ dhd->wakelock_ctrl_timeout_enable = val;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd_wake_lock_active(&dhd->wl_ctrlwake))
+ dhd_wake_unlock(&dhd->wl_ctrlwake);
+#endif
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+ return ret;
+}
+
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
+ return ret;
+}
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#include <linux/hashtable.h>
+#else
+#include <linux/hash.h>
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+/* Define 2^5 = 32 bucket size hash table */
+DEFINE_HASHTABLE(wklock_history, 5);
+#else
+/* Define 2^5 = 32 bucket size hash table */
+struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+atomic_t trace_wklock_onoff;
+typedef enum dhd_wklock_type {
+ DHD_WAKE_LOCK,
+ DHD_WAKE_UNLOCK,
+ DHD_WAIVE_LOCK,
+ DHD_RESTORE_LOCK
+} dhd_wklock_t;
+
+struct wk_trace_record {
+ unsigned long addr; /* Address of the instruction */
+ dhd_wklock_t lock_type; /* lock_type */
+ unsigned long long counter; /* counter information */
+ struct hlist_node wklock_node; /* hash node */
+};
+
+static struct wk_trace_record *find_wklock_entry(unsigned long addr)
+{
+ struct wk_trace_record *wklock_info;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
+#else
+ struct hlist_node *entry;
+ int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ GCC_DIAGNOSTIC_POP();
+ if (wklock_info->addr == addr) {
+ return wklock_info;
+ }
+ }
+ return NULL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ hash_add(hashtable, node, key); \
+ } while (0);
+#else
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
+ hlist_add_head(node, &hashtable[index]); \
+ } while (0);
+#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
+
+#define STORE_WKLOCK_RECORD(wklock_type) \
+ do { \
+ struct wk_trace_record *wklock_info = NULL; \
+ unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
+ wklock_info = find_wklock_entry(func_addr); \
+ if (wklock_info) { \
+ if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ } else { \
+ wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
+ if (!wklock_info) {\
+ printk("Can't allocate wk_trace_record \n"); \
+ } else { \
+ wklock_info->addr = func_addr; \
+ wklock_info->lock_type = wklock_type; \
+ if (wklock_type == DHD_WAIVE_LOCK || \
+ wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
+ } \
+ } \
+ } while (0);
+
+static inline void dhd_wk_lock_rec_dump(void)
+{
+ int bkt;
+ struct wk_trace_record *wklock_info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
+#else
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ GCC_DIAGNOSTIC_POP();
+ switch (wklock_info->lock_type) {
+ case DHD_WAKE_LOCK:
+ DHD_ERROR(("wakelock lock : %pS lock_counter : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ case DHD_WAKE_UNLOCK:
+ DHD_ERROR(("wakelock unlock : %pS,"
+ " unlock_counter : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ case DHD_WAIVE_LOCK:
+ DHD_ERROR(("wakelock waive : %pS before_waive : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ case DHD_RESTORE_LOCK:
+ DHD_ERROR(("wakelock restore : %pS, after_waive : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ }
+ }
+}
+
+static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
+{
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ int i;
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_init(wklock_history);
+#else
+ for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
+ INIT_HLIST_HEAD(&wklock_history[i]);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ atomic_set(&trace_wklock_onoff, 1);
+}
+
+static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
+{
+ int bkt;
+ struct wk_trace_record *wklock_info;
+ struct hlist_node *tmp;
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
+#else
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry_safe(wklock_info, entry, tmp,
+ &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ {
+ GCC_DIAGNOSTIC_POP();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_del(&wklock_info->wklock_node);
+#else
+ hlist_del_init(&wklock_info->wklock_node);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ kfree(wklock_info);
+ }
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+}
+
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ unsigned long flags;
+
+ DHD_ERROR(("DHD Printing wl_wake Lock/Unlock Record \r\n"));
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ dhd_wk_lock_rec_dump();
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+
+}
+#else
+#define STORE_WKLOCK_RECORD(wklock_type)
+#endif /* ! DHD_TRACE_WAKE_LOCK */
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_stay_awake(pub);
+#endif
+ }
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ dhd->wakelock_counter++;
+ ret = dhd->wakelock_counter;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+
+ return ret;
+}
+
+void dhd_event_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_lock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_stay_awake(pub);
+#endif
+ }
+}
+
+void
+dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ dhd_wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
+}
+
+void
+dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ dhd_wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
+}
+
+void
+dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ dhd_wake_lock_timeout(&dhd->wl_nanwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+
+ if (dhd->wakelock_counter > 0) {
+ dhd->wakelock_counter--;
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_relax(pub);
+#endif
+ }
+ ret = dhd->wakelock_counter;
+ }
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+void dhd_event_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_unlock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_relax(pub);
+#endif
+ }
+}
+
+void dhd_pm_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_pmwake is active, unlock it */
+ if (dhd_wake_lock_active(&dhd->wl_pmwake)) {
+ dhd_wake_unlock(&dhd->wl_pmwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void dhd_txfl_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_txflwake is active, unlock it */
+ if (dhd_wake_lock_active(&dhd->wl_txflwake)) {
+ dhd_wake_unlock(&dhd->wl_txflwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void dhd_nan_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_nanwake is active, unlock it */
+ if (dhd_wake_lock_active(&dhd->wl_nanwake)) {
+ dhd_wake_unlock(&dhd->wl_nanwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+int dhd_os_check_wakelock(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
+#if defined(CONFIG_HAS_WAKELOCK)
+ int l1, l2;
+ int c, lock_active;
+#endif /* CONFIG_HAS_WAKELOCK */
+ dhd_info_t *dhd;
+
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+ if (!dhd) {
+ return 0;
+ }
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+ c = dhd->wakelock_counter;
+ l1 = dhd_wake_lock_active(&dhd->wl_wifi);
+ l2 = dhd_wake_lock_active(&dhd->wl_wdwake);
+ lock_active = (l1 || l2);
+ /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+ if (lock_active) {
+ DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d\n",
+ __FUNCTION__, c, l1, l2));
+ return 1;
+ }
+#elif defined(BCMSDIO)
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
+ DHD_ERROR(("%s wakelock c-%d\n", __FUNCTION__, dhd->wakelock_counter));
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+int
+dhd_os_check_wakelock_all(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
+#if defined(CONFIG_HAS_WAKELOCK)
+ int l1, l2, l3, l4, l7, l8, l9, l10;
+ int l5 = 0, l6 = 0;
+ int c, lock_active;
+#endif /* CONFIG_HAS_WAKELOCK */
+ dhd_info_t *dhd;
+
+ if (!pub) {
+ return 0;
+ }
+ if (pub->up == 0) {
+ DHD_ERROR(("%s: skip as down in progress\n", __FUNCTION__));
+ return 0;
+ }
+ dhd = (dhd_info_t *)(pub->info);
+ if (!dhd) {
+ return 0;
+ }
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+ c = dhd->wakelock_counter;
+ l1 = dhd_wake_lock_active(&dhd->wl_wifi);
+ l2 = dhd_wake_lock_active(&dhd->wl_wdwake);
+ l3 = dhd_wake_lock_active(&dhd->wl_rxwake);
+ l4 = dhd_wake_lock_active(&dhd->wl_ctrlwake);
+ l7 = dhd_wake_lock_active(&dhd->wl_evtwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ l5 = dhd_wake_lock_active(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ l6 = dhd_wake_lock_active(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ l8 = dhd_wake_lock_active(&dhd->wl_pmwake);
+ l9 = dhd_wake_lock_active(&dhd->wl_txflwake);
+ l10 = dhd_wake_lock_active(&dhd->wl_nanwake);
+ lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9 || l10);
+
+ /* Indicate to the Host to avoid going to suspend if internal locks are up */
+ if (lock_active) {
+ DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
+ "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d nan-%d\n",
+ __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10));
+ return 1;
+ }
+#elif defined(BCMSDIO)
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
+ DHD_ERROR(("%s wakelock c-%d\n", __FUNCTION__, dhd->wakelock_counter));
+ return 1;
+ }
+#endif /* defined(BCMSDIO) */
+ return 0;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ /* if wakelock_wd_counter was never used : lock it at once */
+ dhd_wake_lock(&dhd->wl_wdwake);
+#endif
+ }
+ dhd->wakelock_wd_counter++;
+ ret = dhd->wakelock_wd_counter;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_wd_counter > 0) {
+ dhd->wakelock_wd_counter = 0;
+ if (!dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_unlock(&dhd->wl_wdwake);
+#endif
+ }
+ }
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+void
+dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ dhd_wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void
+dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_intrwake is active, unlock it */
+ if (dhd_wake_lock_active(&dhd->wl_intrwake)) {
+ dhd_wake_unlock(&dhd->wl_intrwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DHD_USE_SCAN_WAKELOCK
+void
+dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ dhd_wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void
+dhd_os_scan_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_scanwake is active, unlock it */
+ if (dhd_wake_lock_active(&dhd->wl_scanwake)) {
+ dhd_wake_unlock(&dhd->wl_scanwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+#endif /* DHD_USE_SCAN_WAKELOCK */
+
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (dhd->waive_wakelock == FALSE) {
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ /* record current lock status */
+ dhd->wakelock_before_waive = dhd->wakelock_counter;
+ dhd->waive_wakelock = TRUE;
+ }
+ ret = dhd->wakelock_wd_counter;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!dhd)
+ return 0;
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
+ return 0;
+
+ DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
+
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (!dhd->waive_wakelock)
+ goto exit;
+
+ dhd->waive_wakelock = FALSE;
+ /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+ * we need to make it up by calling dhd_wake_lock or pm_stay_awake. or if somebody releases
+ * the lock in between, do the same by calling dhd_wake_unlock or pm_relax
+ */
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+ if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif
+ } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
+ }
+ dhd->wakelock_before_waive = 0;
+exit:
+ ret = dhd->wakelock_wd_counter;
+ DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
+ return ret;
+}
+
+void dhd_os_wake_lock_init(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ /* wakelocks prevent a system from going into a low power state */
+#ifdef CONFIG_HAS_WAKELOCK
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ dhd_wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+ dhd_wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+ dhd_wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
+ dhd_wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
+ dhd_wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ dhd_wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ dhd_wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ dhd_wake_lock_init(&dhd->wl_nanwake, WAKE_LOCK_SUSPEND, "wlan_nan_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_init(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+}
+
+void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ dhd_wake_lock_unlock_destroy(&dhd->wl_rxwake);
+ dhd_wake_lock_unlock_destroy(&dhd->wl_ctrlwake);
+ dhd_wake_lock_unlock_destroy(&dhd->wl_evtwake);
+ dhd_wake_lock_unlock_destroy(&dhd->wl_pmwake);
+ dhd_wake_lock_unlock_destroy(&dhd->wl_txflwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ dhd_wake_lock_unlock_destroy(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ dhd_wake_lock_unlock_destroy(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ dhd_wake_lock_unlock_destroy(&dhd->wl_nanwake);
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_deinit(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+#else /* !CONFIG_HAS_WAKELOCK */
+ if (dhd->wakelock_counter > 0) {
+ DHD_ERROR(("%s: wake lock count=%d\n",
+ __FUNCTION__, dhd->wakelock_counter));
+ while (dhd_os_wake_unlock(&dhd->pub));
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+ if (!pub)
+ return FALSE;
+ return pub->up;
+}
+
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
+{
+ int i;
+
+ i = snprintf(info_string, sizeof(info_string),
+ " Driver: %s\n%s Firmware: %s\n%s CLM: %s ",
+ EPI_VERSION_STR,
+ DHD_LOG_PREFIXS, fw,
+ DHD_LOG_PREFIXS, clm_version);
+ printf("%s\n", info_string);
+
+ if (!dhdp)
+ return;
+
+ i = snprintf(&info_string[i], sizeof(info_string) - i,
+ "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
+ dhd_conf_get_chiprev(dhdp));
+}
+
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+ int ifidx;
+ int ret = 0;
+ dhd_info_t *dhd = NULL;
+
+ if (!net || !DEV_PRIV(net)) {
+ DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
+ __FUNCTION__, net, DEV_PRIV(net)));
+ return -EINVAL;
+ }
+
+ dhd = DHD_DEV_INFO(net);
+ if (!dhd)
+ return -EINVAL;
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+ dhd_check_hang(net, &dhd->pub, ret);
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+ struct net_device *net;
+
+ net = dhd_idx2net(dhdp, ifidx);
+ if (!net) {
+ DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+ return -EINVAL;
+ }
+
+ return dhd_check_hang(net, dhdp, ret);
+}
+
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
+{
+ return dhdp->info->unit;
+}
+
+#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
+#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
+int dhd_deepsleep(struct net_device *dev, int flag)
+{
+ char iovbuf[20];
+ uint powervar = 0;
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ int cnt = 0;
+ int ret = 0;
+
+ dhd = DHD_DEV_INFO(dev);
+ dhdp = &dhd->pub;
+
+ switch (flag) {
+ case 1 : /* Deepsleep on */
+ DHD_ERROR(("[WiFi] Deepsleep On\n"));
+ /* give some time to sysioc_work before deepsleep */
+ OSL_SLEEP(200);
+#ifdef PKT_FILTER_SUPPORT
+ /* disable pkt filter */
+ dhd_enable_packet_filter(0, dhdp);
+#endif /* PKT_FILTER_SUPPORT */
+ /* Disable MPC */
+ powervar = 0;
+ ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
+ 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret));
+ }
+ /* Enable Deepsleep */
+ powervar = 1;
+ ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
+ NULL, 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret));
+ }
+ break;
+
+ case 0: /* Deepsleep Off */
+ DHD_ERROR(("[WiFi] Deepsleep Off\n"));
+
+ /* Disable Deepsleep */
+ for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
+ powervar = 0;
+ ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
+ sizeof(powervar), NULL, 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret));
+ }
+
+ ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
+ sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("the error of dhd deepsleep status"
+ " ret value :%d\n", ret));
+ } else {
+ if (!(*(int *)iovbuf)) {
+ DHD_ERROR(("deepsleep mode is 0,"
+ " count: %d\n", cnt));
+ break;
+ }
+ }
+ }
+
+ /* Enable MPC */
+ powervar = 1;
+ ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar),
+ NULL, 0, TRUE);
+ if (ret) {
+ DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret));
+ }
+ break;
+ }
+
+ return 0;
+}
+#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
+
+#ifdef PROP_TXSTATUS
+
+void dhd_wlfc_plat_init(void *dhd)
+{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ return;
+}
+
+void dhd_wlfc_plat_deinit(void *dhd)
+{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ return;
+}
+
+bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
+{
+#ifdef SKIP_WLFC_ON_CONCURRENT
+
+#ifdef WL_CFG80211
+ struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
+ if (net)
+ /* enable flow control in vsdb mode */
+ return !(wl_cfg80211_is_concurrent_mode(net));
+#else
+ return TRUE; /* skip flow control */
+#endif /* WL_CFG80211 */
+
+#else
+ return FALSE;
+#endif /* SKIP_WLFC_ON_CONCURRENT */
+ return FALSE;
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+#include <linux/debugfs.h>
+
+typedef struct dhd_dbgfs {
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_mem;
+ dhd_pub_t *dhdp;
+ uint32 size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t rval;
+ uint32 tmp;
+ loff_t pos = *ppos;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ /* XXX: The user can request any length they want, but they are getting 4 bytes */
+ /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+ ret = copy_to_user(ubuf, &tmp, 4);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+ rval = count;
+
+ return rval;
+}
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ size_t ret;
+ uint32 buf;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+ if (ret == count)
+ return -EFAULT;
+
+ /* XXX: The user can request any length they want, but they are getting 4 bytes */
+ /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+ return count;
+}
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ loff_t pos = -1;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = g_dbgfs.size - off;
+ }
+ return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+ .read = dhd_dbg_state_read,
+ .write = dhd_debugfs_write,
+ .open = dhd_dbg_state_open,
+ .llseek = dhd_debugfs_lseek
+};
+
+static void dhd_dbgfs_create(void)
+{
+ if (g_dbgfs.debugfs_dir) {
+ g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+ NULL, &dhd_dbg_state_ops);
+ }
+}
+
+void dhd_dbgfs_init(dhd_pub_t *dhdp)
+{
+ g_dbgfs.dhdp = dhdp;
+ g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+ if (IS_ERR(g_dbgfs.debugfs_dir)) {
+ g_dbgfs.debugfs_dir = NULL;
+ return;
+ }
+
+ dhd_dbgfs_create();
+
+ return;
+}
+
+void dhd_dbgfs_remove(void)
+{
+ debugfs_remove(g_dbgfs.debugfs_mem);
+ debugfs_remove(g_dbgfs.debugfs_dir);
+
+ bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+}
+#endif /* BCMDBGFS */
+
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+ int e_dpc = 0, e_rxf = 0, retry_set = 0;
+
+ if (!(dhd->chan_isvht80)) {
+ DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+ return;
+ }
+
+ if (DPC_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(DPC_CPUCORE));
+ } else {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+ return;
+ }
+ if (e_dpc < 0)
+ OSL_SLEEP(1);
+ } while (e_dpc < 0);
+ }
+ if (RXF_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(RXF_CPUCORE));
+ } else {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+ return;
+ }
+ if (e_rxf < 0)
+ OSL_SLEEP(1);
+ } while (e_rxf < 0);
+ }
+ DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+ return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+static int dhd_port_list_match(int port)
+{
+ int i;
+ for (i = 0; i < MAX_TARGET_PORTS; i++) {
+ if (target_ports[i] == port)
+ return 1;
+ }
+ return 0;
+}
+static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
+{
+ struct iphdr *ipheader;
+ struct tcphdr *tcpheader;
+ uint16 win_size;
+ int32 incremental_checksum;
+
+ if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
+ return;
+ if (skb == NULL || skb->data == NULL)
+ return;
+
+ ipheader = (struct iphdr*)(skb->data);
+
+ if (ipheader->protocol == IPPROTO_TCP) {
+ tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
+ if (tcpheader) {
+ win_size = ntoh16(tcpheader->window);
+ if (win_size < MIN_TCP_WIN_SIZE &&
+ dhd_port_list_match(ntoh16(tcpheader->dest))) {
+ incremental_checksum = ntoh16(tcpheader->check);
+ incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
+ if (incremental_checksum < 0)
+ --incremental_checksum;
+ tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
+ tcpheader->check = hton16((unsigned short)incremental_checksum);
+ }
+ }
+ skb_push(skb, (ipheader->ihl)<<2);
+ }
+}
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+#ifdef DHD_MCAST_REGEN
+/* Get interface specific ap_isolate configuration */
+int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ return ifp->mcast_regen_bss_enable;
+}
+
+/* Set interface specific mcast_regen configuration */
+int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ifp->mcast_regen_bss_enable = val;
+
+ /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
+ * is enabled
+ */
+ dhd_update_rx_pkt_chainable_state(dhdp, idx);
+ return BCME_OK;
+}
+#endif /* DHD_MCAST_REGEN */
+
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ return ifp->ap_isolate;
+}
+
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ if (ifp)
+ ifp->ap_isolate = val;
+
+ return 0;
+}
+
+#ifdef DHD_RND_DEBUG
+/*
+ * XXX The filename to store .rnd.(in/out) is defined for each platform.
+ * - The default path of CUSTOMER_HW4 device is "PLATFORM_PATH/.memdump.info"
+ * - Brix platform will take default path "/installmedia/.memdump.info"
+ * New platforms can add their ifdefs accordingly below.
+ */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define RNDINFO PLATFORM_PATH".rnd"
+#elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
+#define RNDINFO "/data/misc/wifi/.rnd"
+#elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__)
+#define RNDINFO "/data/misc/wifi/.rnd"
+#elif defined(OEM_ANDROID)
+#define RNDINFO_LIVE "/installmedia/.rnd"
+#define RNDINFO_INST "/data/.rnd"
+#define RNDINFO RNDINFO_LIVE
+#else /* FC19 and Others */
+#define RNDINFO "/root/.rnd"
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#define RND_IN RNDINFO".in"
+#define RND_OUT RNDINFO".out"
+
+int
+dhd_get_rnd_info(dhd_pub_t *dhd)
+{
+ struct file *fp = NULL;
+ int ret = BCME_ERROR;
+ char *filepath = RND_IN;
+ uint32 file_mode = O_RDONLY;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, file_mode, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86) && defined(OEM_ANDROID)
+ /* Check if it is Live Brix Image */
+ if (bcmstrstr(filepath, RNDINFO_LIVE)) {
+ goto err1;
+ }
+ /* Try if it is Installed Brix Image */
+ filepath = RNDINFO_INST".in";
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, file_mode, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto err1;
+ }
+#else /* Non Brix Android platform */
+ goto err1;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* Handle success case */
+ ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_len read error, ret=%d\n", __FUNCTION__, ret));
+ goto err2;
+ }
+
+ dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len);
+ if (!dhd->rnd_buf) {
+ DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
+ goto err2;
+ }
+
+ ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_buf read error, ret=%d\n", __FUNCTION__, ret));
+ goto err3;
+ }
+
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+
+ DHD_ERROR(("%s: RND read from %s\n", __FUNCTION__, filepath));
+ return BCME_OK;
+
+err3:
+ MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len);
+ dhd->rnd_buf = NULL;
+err2:
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+err1:
+ return BCME_ERROR;
+}
+
+int
+dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len)
+{
+ struct file *fp = NULL;
+ int ret = BCME_OK;
+ char *filepath = RND_OUT;
+ uint32 file_mode = O_CREAT | O_WRONLY | O_SYNC;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, file_mode, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86) && defined(OEM_ANDROID)
+ /* Check if it is Live Brix Image */
+ if (bcmstrstr(filepath, RNDINFO_LIVE)) {
+ goto err1;
+ }
+ /* Try if it is Installed Brix Image */
+ filepath = RNDINFO_INST".out";
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, file_mode, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto err1;
+ }
+#else /* Non Brix Android platform */
+ goto err1;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* Handle success case */
+ ret = vfs_write(fp, (char *)&rnd_len, sizeof(rnd_len), &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_len write error, ret=%d\n", __FUNCTION__, ret));
+ goto err2;
+ }
+
+ ret = vfs_write(fp, (char *)rnd_buf, rnd_len, &pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: rnd_buf write error, ret=%d\n", __FUNCTION__, ret));
+ goto err2;
+ }
+
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+ DHD_ERROR(("%s: RND written to %s\n", __FUNCTION__, filepath));
+ return BCME_OK;
+
+err2:
+ set_fs(old_fs);
+ filp_close(fp, NULL);
+err1:
+ return BCME_ERROR;
+
+}
+#endif /* DHD_RND_DEBUG */
+
+#ifdef DHD_FW_COREDUMP
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
+{
+ dhd_dump_t *dump = NULL;
+ unsigned long flags = 0;
+ dhd_info_t *dhd_info = NULL;
+#if defined(DHD_LOG_DUMP) && !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ log_dump_type_t type = DLD_BUF_TYPE_ALL;
+#endif /* DHD_LOG_DUMP && !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ dhd_info = (dhd_info_t *)dhdp->info;
+ dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
+ if (dump == NULL) {
+ DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
+ return;
+ }
+ dump->buf = buf;
+ dump->bufsize = size;
+#ifdef BCMPCIE
+ dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
+ (uint32 *)(&dump->hscb_bufsize));
+#else
+ dump->hscb_bufsize = 0;
+#endif /* BCMPCIE */
+
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhdp, "memdump", buf, size);
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ /* Print out buffer infomation */
+ dhd_log_dump_buf_addr(dhdp, &type);
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+#endif /* DHD_LOG_DUMP */
+
+ if (dhdp->memdump_enabled == DUMP_MEMONLY) {
+ BUG_ON(1);
+ }
+
+ if ((dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
+ (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
+ (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT))
+ {
+ dhd_info->scheduled_memdump = FALSE;
+ dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
+ /* No need to collect debug dump for init failure */
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) {
+ return;
+ }
+#ifdef DHD_LOG_DUMP
+ {
+ log_dump_type_t *flush_type = NULL;
+ /* for dongle init fail cases, 'dhd_mem_dump' does
+ * not call 'dhd_log_dump', so call it here.
+ */
+ flush_type = MALLOCZ(dhdp->osh,
+ sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = DLD_BUF_TYPE_ALL;
+ DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
+ dhd_log_dump(dhdp->info, flush_type, 0);
+ }
+ }
+#endif /* DHD_LOG_DUMP */
+ return;
+ }
+
+ dhd_info->scheduled_memdump = TRUE;
+
+ /* bus busy bit for mem dump will be cleared in mem dump
+ * work item context, after mem dump file is written
+ */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
+ DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+
+static void
+dhd_mem_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp = NULL;
+ unsigned long flags = 0;
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ int ret = 0;
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+ dhd_dump_t *dump = NULL;
+#ifdef DHD_COREDUMP
+ char pc_fn[DHD_FUNC_STR_LEN] = "\0";
+ char lr_fn[DHD_FUNC_STR_LEN] = "\0";
+ char *map_path = VENDOR_PATH CONFIG_BCMDHD_MAP_PATH;
+ trap_t *tr;
+#endif /* DHD_COREDUMP */
+
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdp = &dhd->pub;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ goto exit;
+ }
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ dump = (dhd_dump_t *)event_info;
+ if (!dump) {
+ DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+#ifdef DHD_SDTC_ETB_DUMP
+ if (dhdp->collect_sdtc) {
+ dhd_sdtc_etb_dump(dhdp);
+ dhdp->collect_sdtc = FALSE;
+ }
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef DHD_SSSR_DUMP
+ DHD_ERROR(("%s: sssr_enab=%d dhdp->sssr_inited=%d dhdp->collect_sssr=%d\n",
+ __FUNCTION__, sssr_enab, dhdp->sssr_inited, dhdp->collect_sssr));
+ if (sssr_enab && dhdp->sssr_inited && dhdp->collect_sssr) {
+ if (fis_enab && dhdp->sssr_reg_info->rev3.fis_enab) {
+ int bcmerror = dhd_bus_fis_trigger(dhdp);
+
+ if (bcmerror == BCME_OK) {
+ dhd_bus_fis_dump(dhdp);
+ } else {
+ DHD_ERROR(("%s: FIS trigger failed: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ } else {
+ DHD_ERROR(("%s: FIS not enabled (%d:%d), collect legacy sssr\n",
+ __FUNCTION__, fis_enab, dhdp->sssr_reg_info->rev3.fis_enab));
+ dhdpcie_sssr_dump(dhdp);
+ }
+ }
+ dhdp->collect_sssr = FALSE;
+#endif /* DHD_SSSR_DUMP */
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ ret = dhd_wait_for_file_dump(dhdp);
+#ifdef BOARD_HIKEY
+ /* For Hikey do force kernel write of socram if HAL dump fails */
+ if (ret) {
+ if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
+ DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
+ }
+ }
+#endif /* BOARD_HIKEY */
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+#ifdef DHD_COREDUMP
+ memset_s(dhdp->memdump_str, DHD_MEMDUMP_LONGSTR_LEN, 0, DHD_MEMDUMP_LONGSTR_LEN);
+ dhd_convert_memdump_type_to_str(dhdp->memdump_type, dhdp->memdump_str,
+ DHD_MEMDUMP_LONGSTR_LEN, dhdp->debug_dump_subcmd);
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_TRAP &&
+ dhdp->dongle_trap_occured == TRUE) {
+ tr = &dhdp->last_trap_info;
+ dhd_lookup_map(dhdp->osh, map_path,
+ ltoh32(tr->epc), pc_fn, ltoh32(tr->r14), lr_fn);
+ sprintf(&dhdp->memdump_str[strlen(dhdp->memdump_str)], "_%.79s_%.79s",
+ pc_fn, lr_fn);
+ }
+ DHD_ERROR(("%s: dump reason: %s\n", __FUNCTION__, dhdp->memdump_str));
+ if (wifi_platform_set_coredump(dhd->adapter, dump->buf, dump->bufsize, dhdp->memdump_str)) {
+ DHD_ERROR(("%s: writing SoC_RAM dump failed\n", __FUNCTION__));
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
+ }
+#endif /* DHD_COREDUMP */
+
+ /*
+ * If kernel does not have file write access enabled
+ * then skip writing dumps to files.
+ * The dumps will be pushed to HAL layer which will
+ * write into files
+ */
+#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
+
+#ifdef D2H_MINIDUMP
+ /* dump minidump */
+ if (dhd_bus_is_minidump_enabled(dhdp)) {
+ dhd_d2h_minidump(&dhd->pub);
+ } else {
+ DHD_ERROR(("minidump is not enabled\n"));
+ }
+#endif /* D2H_MINIDUMP */
+
+ if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
+ DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
+ }
+
+ if (dump->hscb_buf && dump->hscb_bufsize) {
+ if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
+ dump->hscb_bufsize, "mem_dump_hscb")) {
+ DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
+ }
+ }
+
+#ifndef DHD_PKT_LOGGING
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
+#endif /* !DHD_PKT_LOGGING */
+
+ /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
+ * context, no need to schedule another work queue for log dump. In case of
+ * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
+ * cfg layer is itself scheduling the log_dump work queue.
+ * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
+ * collect debug_dump as it may be called from non-sleepable context.
+ */
+#ifdef DHD_LOG_DUMP
+ if (dhd->scheduled_memdump &&
+ dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
+ log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
+ sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = DLD_BUF_TYPE_ALL;
+ DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
+ dhd_log_dump(dhd, flush_type, 0);
+ }
+ }
+#endif /* DHD_LOG_DUMP */
+
+ /* before calling bug on, wait for other logs to be dumped.
+ * we cannot wait in case dhd_mem_dump is called directly
+ * as it may not be from a sleepable context
+ */
+ if (dhd->scheduled_memdump) {
+ uint bitmask = 0;
+ int timeleft = 0;
+#ifdef DHD_SSSR_DUMP
+ bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
+#endif
+ if (bitmask != 0) {
+ DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
+ &dhdp->dhd_bus_busy_state, bitmask, 0);
+ if ((timeleft == 0) || (timeleft == 1)) {
+ DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ }
+ }
+ }
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
+#ifdef WLAN_ACCEL_BOOT
+ /* BUG_ON only if wlan accel boot up is done */
+ dhd->wl_accel_boot_on_done == TRUE &&
+#endif /* WLAN_ACCEL_BOOT */
+#ifdef DHD_LOG_DUMP
+ dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
+#endif /* DHD_LOG_DUMP */
+ dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success == TRUE &&
+#endif /* DHD_DEBUG_UART */
+#ifdef DNGL_EVENT_SUPPORT
+ dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
+#endif /* DNGL_EVENT_SUPPORT */
+ dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
+#ifdef SHOW_LOGTRACE
+ /* Wait till logtrace context is flushed */
+ dhd_flush_logtrace_process(dhd);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+ /* Wait till bt_log_dispatcher_work finishes */
+ cancel_work_sync(&dhd->bt_log_dispatcher_work);
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+ cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
+#endif
+
+ printf("%s\n", info_string);
+ printf("MAC %pM\n", &dhdp->mac);
+ DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
+// BUG_ON(1);
+ }
+
+exit:
+ if (dump) {
+ MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
+ }
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ dhd->scheduled_memdump = FALSE;
+
+#ifdef OEM_ANDROID
+ if (dhdp->hang_was_pending) {
+ DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
+ dhd_os_send_hang_message(dhdp);
+ dhdp->hang_was_pending = 0;
+ }
+#endif /* OEM_ANDROID */
+ DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
+
+ return;
+}
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef D2H_MINIDUMP
+void
+dhd_d2h_minidump(dhd_pub_t *dhdp)
+{
+ char d2h_minidump[128];
+ dhd_dma_buf_t *minidump_buf;
+
+ minidump_buf = dhd_prot_get_minidump_buf(dhdp);
+ if (minidump_buf->va == NULL) {
+ DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* Init file name */
+ memset(d2h_minidump, 0, sizeof(d2h_minidump));
+ snprintf(d2h_minidump, sizeof(d2h_minidump), "%s", "d2h_minidump");
+
+ if (write_dump_to_file(dhdp, (uint8 *)minidump_buf->va, minidump_buf->len, d2h_minidump)) {
+ DHD_ERROR(("%s: failed to dump d2h_minidump to file\n", __FUNCTION__));
+ }
+}
+#endif /* D2H_MINIDUMP */
+
+#ifdef DHD_SSSR_DUMP
+uint
+dhd_sssr_dig_buf_size(dhd_pub_t *dhdp)
+{
+ uint dig_buf_size = 0;
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3:
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ if ((dhdp->sssr_reg_info->rev2.length >
+ OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
+ } else if ((dhdp->sssr_reg_info->rev1.length >
+ OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ return dig_buf_size;
+}
+
+uint
+dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp)
+{
+ uint dig_buf_addr = 0;
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ if ((dhdp->sssr_reg_info->rev2.length >
+ OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
+ dig_buf_addr = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr;
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
+ dig_buf_addr = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_addr;
+ } else if ((dhdp->sssr_reg_info->rev1.length >
+ OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) {
+ dig_buf_addr = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_addr;
+ }
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
+ dig_buf_addr = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_addr;
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ return dig_buf_addr;
+}
+
+uint
+dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx)
+{
+ uint mac_buf_size = 0;
+ uint8 num_d11cores;
+
+ num_d11cores = dhd_d11_slices_num_get(dhdp);
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ if (core_idx < num_d11cores) {
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ mac_buf_size = dhdp->sssr_reg_info->rev2.mac_regs[core_idx].sr_size;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ mac_buf_size = dhdp->sssr_reg_info->rev1.mac_regs[core_idx].sr_size;
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ mac_buf_size = dhdp->sssr_reg_info->rev0.mac_regs[core_idx].sr_size;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ return mac_buf_size;
+}
+
+uint
+dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx)
+{
+ uint xmtaddress = 0;
+ uint8 num_d11cores;
+
+ num_d11cores = dhd_d11_slices_num_get(dhdp);
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ if (core_idx < num_d11cores) {
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ xmtaddress = dhdp->sssr_reg_info->rev2.
+ mac_regs[core_idx].base_regs.xmtaddress;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ xmtaddress = dhdp->sssr_reg_info->rev1.
+ mac_regs[core_idx].base_regs.xmtaddress;
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ xmtaddress = dhdp->sssr_reg_info->rev0.
+ mac_regs[core_idx].base_regs.xmtaddress;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ return xmtaddress;
+}
+
+uint
+dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx)
+{
+ uint xmtdata = 0;
+ uint8 num_d11cores;
+
+ num_d11cores = dhd_d11_slices_num_get(dhdp);
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ if (core_idx < num_d11cores) {
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ xmtdata = dhdp->sssr_reg_info->rev2.
+ mac_regs[core_idx].base_regs.xmtdata;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ xmtdata = dhdp->sssr_reg_info->rev1.
+ mac_regs[core_idx].base_regs.xmtdata;
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ xmtdata = dhdp->sssr_reg_info->rev0.
+ mac_regs[core_idx].base_regs.xmtdata;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ return xmtdata;
+}
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+int
+dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+ uint dig_buf_size = 0;
+
+ dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
+
+ if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
+ NULL, user_buf, dig_buf_size, &pos);
+ }
+ return ret;
+}
+
+int
+dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+
+ if (dhdp->sssr_d11_before[core] &&
+ dhdp->sssr_d11_outofreset[core] &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
+ NULL, user_buf, len, &pos);
+ }
+ return ret;
+}
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+int
+dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+ uint dig_buf_size = 0;
+
+ dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
+
+ if (dhdp->sssr_dig_buf_after) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
+ NULL, user_buf, dig_buf_size, &pos);
+ }
+ return ret;
+}
+
+int
+dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+
+ if (dhdp->sssr_d11_after[core] &&
+ dhdp->sssr_d11_outofreset[core]) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
+ NULL, user_buf, len, &pos);
+ }
+ return ret;
+}
+
+void
+dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
+{
+ dhd_info_t *dhd = dhdinfo;
+ dhd_pub_t *dhdp;
+ int i;
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ char before_sr_dump[128];
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ char after_sr_dump[128];
+ unsigned long flags = 0;
+ uint dig_buf_size = 0;
+ uint8 num_d11cores = 0;
+ uint d11_buf_size = 0;
+
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdp = &dhd->pub;
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
+ goto exit;
+ }
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ num_d11cores = dhd_d11_slices_num_get(dhdp);
+
+ for (i = 0; i < num_d11cores; i++) {
+ /* Init file name */
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ memset(before_sr_dump, 0, sizeof(before_sr_dump));
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ memset(after_sr_dump, 0, sizeof(after_sr_dump));
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
+ "sssr_dump_core", i, "before_SR");
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
+ "sssr_dump_core", i, "after_SR");
+
+ d11_buf_size = dhd_sssr_mac_buf_size(dhdp, i);
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
+ d11_buf_size, before_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
+ d11_buf_size, after_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+ }
+
+ dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
+ dig_buf_size, "sssr_dump_dig_before_SR")) {
+ DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ if (dhdp->sssr_dig_buf_after) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
+ dig_buf_size, "sssr_dump_dig_after_SR")) {
+ DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+
+exit:
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+}
+
+void
+dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
+{
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ dhdp->sssr_dump_mode = dump_mode;
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ /*
+ * If kernel does not have file write access enabled
+ * then skip writing dumps to files.
+ * The dumps will be pushed to HAL layer which will
+ * write into files
+ */
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ return;
+#else
+ /*
+ * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
+ * Without workqueue -
+ * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
+ * : These are called in own handler, not in the interrupt context
+ * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
+ * Thus, it doesn't neeed to dump SSSR in workqueue
+ */
+ DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
+ dhd_sssr_dump_to_file(dhdp->info);
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+}
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_SDTC_ETB_DUMP
+void
+dhd_sdtc_etb_dump(dhd_pub_t *dhd)
+{
+ etb_info_t etb_info;
+ uint8 *sdtc_etb_dump;
+ uint8 *sdtc_etb_mempool;
+ uint etb_dump_len;
+ int ret = 0;
+
+ if (!dhd->sdtc_etb_inited) {
+ DHD_ERROR(("%s, SDTC ETB dump not supported\n", __FUNCTION__));
+ return;
+ }
+
+ bzero(&etb_info, sizeof(etb_info));
+
+ if ((ret = dhd_bus_get_etb_info(dhd, dhd->etb_addr_info.etbinfo_addr, &etb_info))) {
+ DHD_ERROR(("%s: failed to get etb info %d\n", __FUNCTION__, ret));
+ return;
+ }
+
+ if (etb_info.read_bytes == 0) {
+ DHD_ERROR(("%s ETB is of zero size. Hence donot collect SDTC ETB\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("%s etb_info ver:%d len:%d rwp:%d etb_full:%d etb:addr:0x%x, len:%d\n",
+ __FUNCTION__, etb_info.version, etb_info.len,
+ etb_info.read_write_p, etb_info.etb_full,
+ etb_info.addr, etb_info.read_bytes));
+
+ /*
+ * etb mempool format = etb_info + etb
+ */
+ etb_dump_len = etb_info.read_bytes + sizeof(etb_info);
+ if (etb_dump_len > DHD_SDTC_ETB_MEMPOOL_SIZE) {
+ DHD_ERROR(("%s etb_dump_len: %d is more than the alloced %d.Hence cannot collect\n",
+ __FUNCTION__, etb_dump_len, DHD_SDTC_ETB_MEMPOOL_SIZE));
+ return;
+ }
+ sdtc_etb_mempool = dhd->sdtc_etb_mempool;
+ memcpy(sdtc_etb_mempool, &etb_info, sizeof(etb_info));
+ sdtc_etb_dump = sdtc_etb_mempool + sizeof(etb_info);
+ if ((ret = dhd_bus_get_sdtc_etb(dhd, sdtc_etb_dump, etb_info.addr, etb_info.read_bytes))) {
+ DHD_ERROR(("%s: error to get SDTC ETB ret: %d\n", __FUNCTION__, ret));
+ return;
+ }
+
+ if (write_dump_to_file(dhd, (uint8 *)sdtc_etb_mempool,
+ etb_dump_len, "sdtc_etb_dump")) {
+ DHD_ERROR(("%s: failed to dump sdtc_etb to file\n",
+ __FUNCTION__));
+ }
+}
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef DHD_LOG_DUMP
+static void
+dhd_log_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ log_dump_type_t *type = (log_dump_type_t *)event_info;
+
+ if (!dhd || !type) {
+ DHD_ERROR(("%s: dhd/type is NULL\n", __FUNCTION__));
+ return;
+ }
+
+#ifdef WL_CFG80211
+ /* flush the fw preserve logs */
+ wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
+ FW_LOGSET_MASK_ALL);
+#endif
+
+ /* there are currently 3 possible contexts from which
+ * log dump can be scheduled -
+ * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
+ * 3.HEALTH CHECK event
+ * The concise debug info buffer is a shared resource
+ * and in case a trap is one of the contexts then both the
+ * scheduled work queues need to run because trap data is
+ * essential for debugging. Hence a mutex lock is acquired
+ * before calling do_dhd_log_dump().
+ */
+ DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
+ dhd_os_logdump_lock(&dhd->pub);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
+ DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
+ }
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_os_logdump_unlock(&dhd->pub);
+}
+
+void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
+{
+ DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
+
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ type, DHD_WQ_WORK_DHD_LOG_DUMP,
+ dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+
+static void
+dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
+{
+#ifdef DHD_FW_COREDUMP
+ if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
+ (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
+ (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
+#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
+ (dhdp->op_mode & DHD_FLAG_MFG_MODE &&
+ (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT-1)) ||
+#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
+ FALSE)
+#else
+ if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)
+#endif
+ {
+#if defined(CONFIG_ARM64)
+ DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
+ name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
+#elif defined(__ARM_ARCH_7A__)
+ DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
+ name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
+#endif /* __ARM_ARCH_7A__ */
+ }
+}
+
+static void
+dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
+{
+ int i;
+ unsigned long wr_size = 0;
+ struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
+ size_t log_size = 0;
+ char buf_name[DHD_PRINT_BUF_NAME_LEN];
+ dhd_dbg_ring_t *ring = NULL;
+
+ BCM_REFERENCE(ring);
+
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ log_size = (unsigned long)dld_buf->max -
+ (unsigned long)dld_buf->buffer;
+ if (dld_buf->wraparound) {
+ wr_size = log_size;
+ } else {
+ wr_size = (unsigned long)dld_buf->present -
+ (unsigned long)dld_buf->front;
+ }
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
+ }
+
+#ifdef DEBUGABILITY_ECNTRS_LOGGING
+ /* periodic flushing of ecounters is NOT supported */
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_ecntr_enable &&
+ dhdp->ecntr_dbg_ring) {
+
+ ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
+ dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
+ dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
+ LOG_DUMP_ECNTRS_MAX_BUFSIZE);
+ }
+#endif /* DEBUGABILITY_ECNTRS_LOGGING */
+
+#if defined(BCMPCIE)
+ if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
+ dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ /* if health check event was received */
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
+ HEALTH_CHK_BUF_SIZE);
+ }
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+ /* append the concise debug information */
+ if (dhdp->concise_dbg_buf) {
+ dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
+ CONCISE_DUMP_BUFLEN);
+ }
+}
+
+#ifdef CUSTOMER_HW4_DEBUG
+static void
+dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
+{
+ char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
+ char *end = NULL;
+ unsigned long plen = 0;
+
+ if (!bufptr || !len)
+ return;
+
+ memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
+ end = bufptr + len;
+ while (bufptr < end) {
+ if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
+ memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
+ tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
+ printf("%s", tmp_buf);
+ bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
+ } else {
+ plen = (unsigned long)end - (unsigned long)bufptr;
+ memcpy(tmp_buf, bufptr, plen);
+ tmp_buf[plen] = '\0';
+ printf("%s", tmp_buf);
+ bufptr += plen;
+ }
+ }
+}
+
+static void
+dhd_log_dump_print_tail(dhd_pub_t *dhdp,
+ struct dhd_log_dump_buf *dld_buf,
+ uint tail_len)
+{
+ char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
+ unsigned long len_flush1 = 0, len_flush2 = 0;
+ unsigned long flags = 0;
+
+ /* need to hold the lock before accessing 'present' and 'remain' ptrs */
+ DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
+ flush_ptr1 = dld_buf->present - tail_len;
+ if (flush_ptr1 >= dld_buf->front) {
+ /* tail content is within the buffer */
+ flush_ptr2 = NULL;
+ len_flush1 = tail_len;
+ } else if (dld_buf->wraparound) {
+ /* tail content spans the buffer length i.e, wrap around */
+ flush_ptr1 = dld_buf->front;
+ len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
+ len_flush2 = (unsigned long)tail_len - len_flush1;
+ flush_ptr2 = (char *)((unsigned long)dld_buf->max -
+ (unsigned long)len_flush2);
+ } else {
+ /* amt of logs in buffer is less than tail size */
+ flush_ptr1 = dld_buf->front;
+ flush_ptr2 = NULL;
+ len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
+ }
+ DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
+
+ printf("\n================= LOG_DUMP tail =================\n");
+ if (flush_ptr2) {
+ dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
+ }
+ dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
+ printf("\n===================================================\n");
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#ifdef DHD_SSSR_DUMP
+int
+dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
+{
+ int i = 0;
+ uint dig_buf_size = 0;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* core 0 */
+ i = 0;
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+
+ arr_len[SSSR_C0_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i);
+ DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C0_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C0_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i);
+ DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C0_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
+
+ /* core 1 */
+ i = 1;
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ arr_len[SSSR_C1_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i);
+ DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C1_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C1_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i);
+ DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C1_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
+
+ /* core 2 scan core */
+ if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_2) {
+ i = 2;
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ arr_len[SSSR_C2_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i);
+ DHD_ERROR(("%s: arr_len[SSSR_C2_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C2_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C2_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C2_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C2_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i);
+ DHD_ERROR(("%s: arr_len[SSSR_C2_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C2_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C2_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C2_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
+ }
+
+ /* DIG core or VASIP */
+ dig_buf_size = dhd_sssr_dig_buf_size(dhd);
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_dig_buf_before) ? dig_buf_size : 0;
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ if (dhd->sssr_dig_buf_before) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
+ dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
+ }
+#endif /* DHD_LOG_DUMP */
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ arr_len[SSSR_DIG_AFTER] = (dhd->sssr_dig_buf_after) ? dig_buf_size : 0;
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_AFTER]));
+#ifdef DHD_LOG_DUMP
+ if (dhd->sssr_dig_buf_after) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
+ dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
+ }
+#endif /* DHD_LOG_DUMP */
+
+ return BCME_OK;
+}
+
+void
+dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+
+ if (dhdp->sssr_dump_collected) {
+ dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
+ }
+}
+#endif /* DHD_SSSR_DUMP */
+
+uint32
+dhd_get_time_str_len()
+{
+ char *ts = NULL, time_str[128];
+
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, sizeof(time_str),
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ return strlen(time_str);
+}
+
+#if defined(BCMPCIE)
+uint32
+dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->extended_trap_data) {
+ length = (strlen(EXT_TRAP_LOG_HDR)
+ + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+ return length;
+}
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+uint32
+dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ length = (strlen(HEALTH_CHK_LOG_HDR)
+ + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
+ }
+ return length;
+}
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+uint32
+dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
+{
+ uint32 length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+ int remain_len = 0;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->concise_dbg_buf) {
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
+ DHD_ERROR(("%s: error getting concise debug info ! remain_len: %d\n",
+ __FUNCTION__, remain_len));
+ return length;
+ }
+
+ length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len);
+ }
+
+ length += (uint32)(strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr));
+ return length;
+}
+
+uint32
+dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
+{
+ int length = 0;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
+ length = dhd_log_dump_cookie_len(dhdp);
+ }
+ return length;
+
+}
+
+#ifdef DHD_DUMP_PCIE_RINGS
+uint32
+dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
+{
+ uint32 length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+ uint16 h2d_flowrings_total;
+ int remain_len = 0;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->concise_dbg_buf) {
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
+ DHD_ERROR(("%s: error getting concise debug info ! remain_len: %d\n",
+ __FUNCTION__, remain_len));
+ return length;
+ }
+
+ length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len);
+ }
+
+ length += (uint32) strlen(FLOWRING_DUMP_HDR);
+ length += (uint32) sizeof(sec_hdr);
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
+ length += ((D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
+ + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
+ + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
+ + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
+ + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
+#ifdef EWP_EDL
+ + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+#else
+ + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
+ + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
+#endif /* EWP_EDL */
+
+#if defined(DHD_HTPUT_TUNABLES)
+ /* flowring lengths are different for HTPUT rings, handle accordingly */
+ length += ((H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_htput_max_txpost(dhdp) *
+ HTPUT_TOTAL_FLOW_RINGS) +
+ (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) *
+ (h2d_flowrings_total - HTPUT_TOTAL_FLOW_RINGS)));
+#else
+ length += (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) *
+ h2d_flowrings_total);
+#endif /* DHD_HTPUT_TUNABLES */
+
+ return length;
+}
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_ECNTRS_LOGGING
+uint32
+dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ log_dump_section_hdr_t sec_hdr;
+ int length = 0;
+ dhd_dbg_ring_t *ring;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
+ length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
+ }
+ return length;
+}
+#endif /* EWP_ECNTRS_LOGGING */
+
+int
+dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, int type, void *pos)
+{
+ int ret = BCME_OK;
+ struct dhd_log_dump_buf *dld_buf;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ dld_buf = &g_dld_buf[type];
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ } else if (!dhdp) {
+ return BCME_ERROR;
+ }
+
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ /* write the section header first */
+ ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
+ strlen(dld_hdrs[type].hdr_str), pos);
+ if (ret < 0)
+ goto exit;
+ len -= (uint32)strlen(dld_hdrs[type].hdr_str);
+ len -= (uint32)sizeof(sec_hdr);
+ sec_hdr.type = dld_hdrs[type].sec_type;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+ ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+
+exit:
+ return ret;
+}
+
+static int
+dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
+{
+ unsigned long flags = 0;
+#ifdef EWP_EDL
+ int i = 0;
+#endif /* EWP_EDL */
+ dhd_info_t *dhd_info = NULL;
+
+ BCM_REFERENCE(dhd_info);
+
+ /* if dhdp is null, its extremely unlikely that log dump will be scheduled
+ * so not freeing 'type' here is ok, even if we want to free 'type'
+ * we cannot do so, since 'dhdp->osh' is unavailable
+ * as dhdp is null
+ */
+ if (!dhdp || !type) {
+ if (dhdp) {
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ }
+ return BCME_ERROR;
+ }
+
+#if defined(BCMPCIE)
+ if (dhd_bus_get_linkdown(dhdp)) {
+ /* As link is down donot collect any data over PCIe.
+ * Also return BCME_OK to caller, so that caller can
+ * dump all the outstanding data to file
+ */
+ return BCME_OK;
+ }
+#endif /* BCMPCIE */
+
+ dhd_info = (dhd_info_t *)dhdp->info;
+ /* in case of trap get preserve logs from ETD */
+#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
+ if (dhdp->dongle_trap_occured &&
+ dhdp->extended_trap_data) {
+ dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
+ &dhd_info->event_data);
+ }
+#endif /* BCMPCIE */
+
+ /* flush the event work items to get any fw events/logs
+ * flush_work is a blocking call
+ */
+#ifdef SHOW_LOGTRACE
+#ifdef EWP_EDL
+ if (dhd_info->pub.dongle_edl_support) {
+ /* wait till existing edl items are processed */
+ dhd_flush_logtrace_process(dhd_info);
+ /* dhd_flush_logtrace_process will ensure the work items in the ring
+ * (EDL ring) from rd to wr are processed. But if wr had
+ * wrapped around, only the work items from rd to ring-end are processed.
+ * So to ensure that the work items at the
+ * beginning of ring are also processed in the wrap around case, call
+ * it twice
+ */
+ for (i = 0; i < 2; i++) {
+ /* blocks till the edl items are processed */
+ dhd_flush_logtrace_process(dhd_info);
+ }
+ } else {
+ dhd_flush_logtrace_process(dhd_info);
+ }
+#else
+ dhd_flush_logtrace_process(dhd_info);
+#endif /* EWP_EDL */
+#endif /* SHOW_LOGTRACE */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ /* print last 'x' KB of preserve buffer data to kmsg console
+ * this is to address cases where debug_dump is not
+ * available for debugging
+ */
+ dhd_log_dump_print_tail(dhdp,
+ &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
+#endif /* CUSTOMER_HW4_DEBUG */
+ return BCME_OK;
+}
+
+int
+dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
+{
+ int ret;
+ int len = 0;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ memset(dump_path, 0, size);
+
+ ret = snprintf(dump_path, size, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
+ len += ret;
+
+ /* Keep the same timestamp across different dump logs */
+ if (!dhdp->logdump_periodic_flush) {
+ struct rtc_time tm;
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
+ get_debug_dump_time(dhdp->debug_dump_time_str);
+ sscanf(dhdp->debug_dump_time_str, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS,
+ &tm.tm_year, &tm.tm_mon, &tm.tm_mday,
+ &tm.tm_hour, &tm.tm_min, &tm.tm_sec);
+ ret = snprintf(dump_path + len, size - len, "_" DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ len += ret;
+ }
+
+ ret = 0;
+ switch (dhdp->debug_dump_subcmd) {
+ case CMD_UNWANTED:
+ ret = snprintf(dump_path + len, size - len, "%s", DHD_DUMP_SUBSTR_UNWANTED);
+ break;
+ case CMD_DISCONNECTED:
+ ret = snprintf(dump_path + len, size - len, "%s", DHD_DUMP_SUBSTR_DISCONNECTED);
+ break;
+ default:
+ break;
+ }
+ len += ret;
+
+ return BCME_OK;
+}
+
+uint32
+dhd_get_dld_len(int log_type)
+{
+ unsigned long wr_size = 0;
+ unsigned long buf_size = 0;
+ unsigned long flags = 0;
+ struct dhd_log_dump_buf *dld_buf;
+ log_dump_section_hdr_t sec_hdr;
+
+ /* calculate the length of the log */
+ dld_buf = &g_dld_buf[log_type];
+ buf_size = (unsigned long)dld_buf->max -
+ (unsigned long)dld_buf->buffer;
+
+ if (dld_buf->wraparound) {
+ wr_size = buf_size;
+ } else {
+ /* need to hold the lock before accessing 'present' and 'remain' ptrs */
+ DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
+ wr_size = (unsigned long)dld_buf->present -
+ (unsigned long)dld_buf->front;
+ DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
+ }
+ return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
+}
+
+static void
+dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
+{
+ char *ts = NULL;
+ memset(time_str, 0, size);
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, size,
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+}
+
+int
+dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
+{
+ char *ts = NULL;
+ int ret = 0;
+ char time_str[128];
+
+ memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, sizeof(time_str),
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+
+ /* write the timestamp hdr to the file first */
+ ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ }
+ return ret;
+}
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+int
+dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ /* write the section header first */
+ ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
+ strlen(HEALTH_CHK_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
+ sec_hdr.length = HEALTH_CHK_BUF_SIZE;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+ /* write the log */
+ ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
+ user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+ }
+exit:
+ return ret;
+}
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+#if defined(BCMPCIE)
+int
+dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ /* append extended trap data to the file in case of traps */
+ if (dhdp->dongle_trap_occured &&
+ dhdp->extended_trap_data) {
+ /* write the section header first */
+ ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
+ strlen(EXT_TRAP_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
+ sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
+ ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+ /* write the log */
+ ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
+ user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+ }
+exit:
+ return ret;
+}
+#endif /* BCMPCIE */
+
+int
+dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+
+ if (dhdp->concise_dbg_buf) {
+ dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+int
+dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
+ ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
+ }
+ return ret;
+}
+
+#ifdef DHD_DUMP_PCIE_RINGS
+int
+dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ int remain_len = 0;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
+ DHD_ERROR(("%s: error getting concise debug info !\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
+
+ /* write the section header first */
+ ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
+ strlen(FLOWRING_DUMP_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ /* Write the ring summary */
+ ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
+ (CONCISE_DUMP_BUFLEN - remain_len), pos);
+ if (ret < 0)
+ goto exit;
+
+ sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ /* write the log */
+ ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
+ if (ret < 0)
+ goto exit;
+
+exit:
+ return ret;
+}
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_ECNTRS_LOGGING
+int
+dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (logdump_ecntr_enable &&
+ dhdp->ecntr_dbg_ring) {
+ sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
+ ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
+ user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
+ }
+ return ret;
+
+}
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+int
+dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return BCME_ERROR;
+
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
+ ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
+ user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
+ }
+ return ret;
+
+}
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef DHD_STATUS_LOGGING
+int
+dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp) {
+ return BCME_ERROR;
+ }
+
+ return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
+}
+
+uint32
+dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ uint32 length = 0;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (dhdp) {
+ length = dhd_statlog_get_logbuf_len(dhdp);
+ }
+
+ return length;
+}
+#endif /* DHD_STATUS_LOGGING */
+
+void
+dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
+{
+ /* prep the section header */
+ memset(sec_hdr, 0, sizeof(*sec_hdr));
+ sec_hdr->magic = LOG_DUMP_MAGIC;
+ sec_hdr->timestamp = local_clock();
+}
+
+/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
+static int
+do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
+{
+ int ret = 0, i = 0;
+ struct file *fp = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t old_fs;
+ struct kstat stat;
+#endif
+ loff_t pos = 0;
+ char dump_path[128];
+ uint32 file_mode;
+ unsigned long flags = 0;
+ size_t log_size = 0;
+ size_t fspace_remain = 0;
+ char time_str[128];
+ unsigned int len = 0;
+ log_dump_section_hdr_t sec_hdr;
+ uint32 file_size = 0;
+
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
+ goto exit1;
+ }
+ DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ if ((ret = dhd_log_flush(dhdp, type)) < 0) {
+ goto exit1;
+ }
+ /* change to KERNEL_DS address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
+
+ DHD_ERROR(("debug_dump_path = %s\n", dump_path));
+ DHD_ERROR(("DHD version: %s\n", dhd_version));
+ DHD_ERROR(("F/W version: %s\n", fw_version));
+
+ dhd_log_dump_buf_addr(dhdp, type);
+
+ dhd_get_time_str(dhdp, time_str, 128);
+
+ /* if this is the first time after dhd is loaded,
+ * or, if periodic flush is disabled, clear the log file
+ */
+ if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
+ file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
+ else
+ file_mode = O_CREAT | O_RDWR | O_SYNC;
+
+ fp = filp_open(dump_path, file_mode, 0664);
+ if (IS_ERR(fp)) {
+ /* If android installed image, try '/data' directory */
+#if defined(CONFIG_X86) && defined(OEM_ANDROID)
+ DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
+ __FUNCTION__));
+ snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
+ if (!dhdp->logdump_periodic_flush) {
+ snprintf(dump_path + strlen(dump_path),
+ sizeof(dump_path) - strlen(dump_path),
+ "_%s", dhdp->debug_dump_time_str);
+ }
+ fp = filp_open(dump_path, file_mode, 0664);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ DHD_ERROR(("open file error, err = %d\n", ret));
+ goto exit2;
+ }
+ DHD_ERROR(("debug_dump_path = %s\n", dump_path));
+#else
+ ret = PTR_ERR(fp);
+ DHD_ERROR(("open file error, err = %d\n", ret));
+ goto exit2;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ ret = vfs_stat(dump_path, &stat);
+ if (ret < 0) {
+ DHD_ERROR(("file stat error, err = %d\n", ret));
+ goto exit2;
+ }
+ file_size = stat.size;
+#else
+ file_size = dhd_os_get_image_size(fp);
+ if (file_size <= 0) {
+ DHD_ERROR(("%s: get file size fails ! %d\n", __FUNCTION__, file_size));
+ goto exit2;
+ }
+#endif
+
+ /* if some one else has changed the file */
+ if (dhdp->last_file_posn != 0 &&
+ file_size < dhdp->last_file_posn) {
+ dhdp->last_file_posn = 0;
+ }
+
+ /* XXX: periodic flush is disabled by default, if enabled
+ * only periodic flushing of 'GENERAL' log dump buffer
+ * is supported, its not recommended to turn on periodic
+ * flushing, except for developer unit test.
+ */
+ if (dhdp->logdump_periodic_flush) {
+ log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
+ /* calculate the amount of space required to dump all logs */
+ for (i = 0; i < DLD_BUFFER_NUM; ++i) {
+ if (*type != DLD_BUF_TYPE_ALL && i != *type)
+ continue;
+
+ if (g_dld_buf[i].wraparound) {
+ log_size += (unsigned long)g_dld_buf[i].max
+ - (unsigned long)g_dld_buf[i].buffer;
+ } else {
+ DHD_LOG_DUMP_BUF_LOCK(&g_dld_buf[i].lock, flags);
+ log_size += (unsigned long)g_dld_buf[i].present -
+ (unsigned long)g_dld_buf[i].front;
+ DHD_LOG_DUMP_BUF_UNLOCK(&g_dld_buf[i].lock, flags);
+ }
+ log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
+
+ if (*type != DLD_BUF_TYPE_ALL && i == *type)
+ break;
+ }
+
+ ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
+ if (ret < 0) {
+ DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
+ goto exit2;
+ }
+ pos = fp->f_pos;
+
+ /* if the max file size is reached, wrap around to beginning of the file
+ * we're treating the file as a large ring buffer
+ */
+ fspace_remain = logdump_max_filesize - pos;
+ if (log_size > fspace_remain) {
+ fp->f_pos -= pos;
+ pos = fp->f_pos;
+ }
+ }
+
+ dhd_print_time_str(0, fp, len, &pos);
+
+ for (i = 0; i < DLD_BUFFER_NUM; ++i) {
+
+ if (*type != DLD_BUF_TYPE_ALL && i != *type)
+ continue;
+
+ len = dhd_get_dld_len(i);
+ dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
+ if (*type != DLD_BUF_TYPE_ALL)
+ break;
+ }
+
+#ifdef EWP_ECNTRS_LOGGING
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_ecntr_enable &&
+ dhdp->ecntr_dbg_ring) {
+ dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
+ fp, (unsigned long *)&pos,
+ &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef DHD_STATUS_LOGGING
+ if (dhdp->statlog) {
+ /* write the statlog */
+ len = dhd_get_status_log_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
+ len, &pos) < 0) {
+ goto exit2;
+ }
+ }
+ }
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef DHD_STATUS_LOGGING
+ if (dhdp->statlog) {
+ dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
+ dhd_statlog_get_logbuf_len(dhdp));
+ }
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_rtt_enable &&
+ dhdp->rtt_dbg_ring) {
+ dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
+ fp, (unsigned long *)&pos,
+ &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
+ }
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef EWP_BCM_TRACE
+ if (*type == DLD_BUF_TYPE_ALL &&
+ dhdp->bcm_trace_dbg_ring) {
+ dhd_log_dump_ring_to_file(dhdp, dhdp->bcm_trace_dbg_ring,
+ fp, (unsigned long *)&pos,
+ &sec_hdr, BCM_TRACE_LOG_HDR, LOG_DUMP_SECTION_BCM_TRACE);
+ }
+#endif /* EWP_BCM_TRACE */
+
+#ifdef BCMPCIE
+ len = dhd_get_ext_trap_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined (DNGL_EVENT_SUPPORT)
+ len = dhd_get_health_chk_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_health_chk_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+ len = dhd_get_dhd_dump_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+
+ len = dhd_get_cookie_log_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+
+#ifdef DHD_DUMP_PCIE_RINGS
+ len = dhd_get_flowring_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
+#endif
+
+ if (dhdp->logdump_periodic_flush) {
+ /* store the last position written to in the file for future use */
+ dhdp->last_file_posn = pos;
+ }
+
+exit2:
+ if (!IS_ERR(fp) && fp != NULL) {
+ filp_close(fp, NULL);
+ DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
+ __FUNCTION__, dump_path));
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(old_fs);
+#endif
+exit1:
+ if (type) {
+ MFREE(dhdp->osh, type, sizeof(*type));
+ }
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+#ifdef DHD_DUMP_MNGR
+ if (ret >= 0) {
+ dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
+ }
+#endif /* DHD_DUMP_MNGR */
+
+ return (ret < 0) ? BCME_ERROR : BCME_OK;
+}
+#endif /* DHD_LOG_DUMP */
+
+/* This function writes data to the file pointed by fp, OR
+ * copies data to the user buffer sent by upper layer(HAL).
+ */
+int
+dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos)
+{
+ int ret = BCME_OK;
+
+ if (fp) {
+ ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
+ }
+ } else {
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif /* LINUX_VER >= 4.6 */
+ {
+ void * usr_ptr = compat_ptr((uintptr_t) user_buf);
+ ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)),
+ mem_buf, buf_len);
+ if (ret) {
+ DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
+ goto exit;
+ }
+ }
+ else
+#endif /* CONFIG_COMPAT */
+ {
+ ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
+ mem_buf, buf_len);
+ if (ret) {
+ DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
+ goto exit;
+ }
+ }
+ (*(int *)pos) += buf_len;
+ }
+exit:
+ return ret;
+}
+
+#ifdef BCM_ROUTER_DHD
+void dhd_schedule_trap_log_dump(dhd_pub_t *dhdp,
+ uint8 *buf, uint32 size)
+{
+ dhd_write_file_t *wf = NULL;
+ wf = (dhd_write_file_t *)MALLOC(dhdp->osh, sizeof(dhd_write_file_t));
+ if (wf == NULL) {
+ DHD_ERROR(("%s: dhd write file memory allocation failed\n", __FUNCTION__));
+ return;
+ }
+ snprintf(wf->file_path, sizeof(wf->file_path), "%s", "/tmp/failed_if.txt");
+ wf->file_flags = O_CREAT | O_WRONLY | O_SYNC;
+ wf->buf = buf;
+ wf->bufsize = size;
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)wf,
+ DHD_WQ_WORK_INFORM_DHD_MON, dhd_inform_dhd_monitor_handler,
+ DHD_WQ_WORK_PRIORITY_HIGH);
+}
+
+/* Returns the pid of a the userspace process running with the given name */
+static struct task_struct *
+_get_task_info(const char *pname)
+{
+ struct task_struct *task;
+ if (!pname)
+ return NULL;
+
+ for_each_process(task) {
+ if (strcmp(pname, task->comm) == 0)
+ return task;
+ }
+
+ return NULL;
+}
+
+#define DHD_MONITOR_NS "dhd_monitor"
+extern void emergency_restart(void);
+
+static void
+dhd_inform_dhd_monitor_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_write_file_t *wf = event_info;
+ struct task_struct *monitor_task;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+ if (!event_info) {
+ DHD_ERROR(("%s: File info is NULL\n", __FUNCTION__));
+ return;
+ }
+ if (!wf->buf) {
+ DHD_ERROR(("%s: Unable to get failed interface name\n", __FUNCTION__));
+ goto exit;
+ }
+ if (write_file(wf->file_path, wf->file_flags, wf->buf, wf->bufsize)) {
+ DHD_ERROR(("%s: writing to the file failed\n", __FUNCTION__));
+ }
+exit:
+ MFREE(dhd->pub.osh, wf, sizeof(dhd_write_file_t));
+
+ /* check if dhd_monitor is running */
+ monitor_task = _get_task_info(DHD_MONITOR_NS);
+ if (monitor_task == NULL) {
+ /* If dhd_monitor is not running, handle recovery from here */
+
+ char *val = nvram_get("watchdog");
+ if (val && bcm_atoi(val)) {
+ /* watchdog enabled, so reboot */
+ DHD_ERROR(("%s: Dongle(wl%d) trap detected. Restarting the system\n",
+ __FUNCTION__, dhd->unit));
+
+ mdelay(1000);
+ emergency_restart();
+ while (1)
+ cpu_relax();
+ } else {
+ DHD_ERROR(("%s: Dongle(wl%d) trap detected. No watchdog.\n",
+ __FUNCTION__, dhd->unit));
+ }
+
+ return;
+ }
+
+ /* If monitor daemon is running, let's signal the monitor for recovery */
+ DHD_ERROR(("%s: Dongle(wl%d) trap detected. Send signal to dhd_monitor.\n",
+ __FUNCTION__, dhd->unit));
+
+ send_sig_info(SIGUSR1, (void *)1L, monitor_task);
+}
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef BCMDBG
+#define DUMPMAC_BUF_SZ (128 * 1024)
+#define DUMPMAC_FILENAME_SZ 32
+
+static void
+_dhd_schedule_macdbg_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp = &dhd->pub;
+#ifndef BCM_ROUTER_DHD
+ char *dumpbuf = NULL;
+ int dumpbuf_len = 0;
+ uint16 dump_signature;
+ char dumpfilename[DUMPMAC_FILENAME_SZ] = {0, };
+#endif /* BCM_ROUTER_DHD */
+
+ ASSERT(event == DHD_WQ_WORK_MACDBG);
+ BCM_REFERENCE(event_info);
+
+ DHD_ERROR(("%s: Dongle(wl%d) macreg dump scheduled\n",
+ __FUNCTION__, dhd->unit));
+
+ DHD_OS_WAKE_LOCK(dhdp);
+
+ /* Make sure dongle stops running to avoid race condition in reading mac registers */
+ (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
+
+ /* In router, skip macregs dump as dhd_monitor will dump them */
+#ifndef BCM_ROUTER_DHD
+ dumpbuf = (char *)MALLOCZ(dhdp->osh, DUMPMAC_BUF_SZ);
+ if (dumpbuf) {
+ /* Write macdump to a file */
+
+ /* Get dump file signature */
+ dump_signature = (uint16)OSL_RAND();
+
+ /* PSMr */
+ if (dhd_macdbg_dumpmac(dhdp, dumpbuf, DUMPMAC_BUF_SZ,
+ &dumpbuf_len, FALSE) == BCME_OK) {
+ snprintf(dumpfilename, DUMPMAC_FILENAME_SZ,
+ "/tmp/d11reg_dump_%04X.txt", dump_signature);
+ DHD_ERROR(("%s: PSMr macreg dump to %s\n", __FUNCTION__, dumpfilename));
+ /* Write to a file */
+ if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC),
+ dumpbuf, dumpbuf_len)) {
+ DHD_ERROR(("%s: writing mac dump to the file failed\n",
+ __FUNCTION__));
+ }
+ memset(dumpbuf, 0, DUMPMAC_BUF_SZ);
+ memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ);
+ dumpbuf_len = 0;
+ }
+
+ /* PSMx */
+ if (dhd_macdbg_dumpmac(dhdp, dumpbuf, DUMPMAC_BUF_SZ,
+ &dumpbuf_len, TRUE) == BCME_OK) {
+ snprintf(dumpfilename, DUMPMAC_FILENAME_SZ,
+ "/tmp/d11regx_dump_%04X.txt", dump_signature);
+ DHD_ERROR(("%s: PSMx macreg dump to %s\n", __FUNCTION__, dumpfilename));
+ /* Write to a file */
+ if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC),
+ dumpbuf, dumpbuf_len)) {
+ DHD_ERROR(("%s: writing mac dump to the file failed\n",
+ __FUNCTION__));
+ }
+ memset(dumpbuf, 0, DUMPMAC_BUF_SZ);
+ memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ);
+ dumpbuf_len = 0;
+ }
+
+ /* SVMP */
+ if (dhd_macdbg_dumpsvmp(dhdp, dumpbuf, DUMPMAC_BUF_SZ,
+ &dumpbuf_len) == BCME_OK) {
+ snprintf(dumpfilename, DUMPMAC_FILENAME_SZ,
+ "/tmp/svmp_dump_%04X.txt", dump_signature);
+ DHD_ERROR(("%s: SVMP mems dump to %s\n", __FUNCTION__, dumpfilename));
+ /* Write to a file */
+ if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC),
+ dumpbuf, dumpbuf_len)) {
+ DHD_ERROR(("%s: writing svmp dump to the file failed\n",
+ __FUNCTION__));
+ }
+ memset(dumpbuf, 0, DUMPMAC_BUF_SZ);
+ memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ);
+ dumpbuf_len = 0;
+ }
+
+ MFREE(dhdp->osh, dumpbuf, DUMPMAC_BUF_SZ);
+ } else {
+ DHD_ERROR(("%s: print macdump\n", __FUNCTION__));
+ /* Just printf the dumps */
+ (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, FALSE); /* PSMr */
+ (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, TRUE); /* PSMx */
+ (void) dhd_macdbg_dumpsvmp(dhdp, NULL, 0, NULL);
+ }
+#endif /* BCM_ROUTER_DHD */
+
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ dhd_deferred_work_set_skip(dhd->dhd_deferred_wq,
+ DHD_WQ_WORK_MACDBG, FALSE);
+}
+
+void
+dhd_schedule_macdbg_dump(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: Dongle(wl%d) schedule macreg dump\n",
+ __FUNCTION__, dhdp->info->unit));
+
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
+ DHD_WQ_WORK_MACDBG, _dhd_schedule_macdbg_dump, DHD_WQ_WORK_PRIORITY_LOW);
+ dhd_deferred_work_set_skip(dhdp->info->dhd_deferred_wq,
+ DHD_WQ_WORK_MACDBG, TRUE);
+}
+#endif /* BCMDBG */
+
+/*
+ * This call is to get the memdump size so that,
+ * halutil can alloc that much buffer in user space.
+ */
+int
+dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ if (dhdp->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+ return BCME_ERROR;
+ }
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+ ret = dhd_common_socram_dump(dhdp);
+ if (ret == BCME_OK) {
+ *dump_size = dhdp->soc_ram_length;
+ }
+ return ret;
+}
+
+/*
+ * This is to get the actual memdup after getting the memdump size
+ */
+int
+dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
+{
+ int ret = BCME_OK;
+ int orig_len = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ if (buf == NULL)
+ return BCME_ERROR;
+ orig_len = *size;
+ if (dhdp->soc_ram) {
+ if (orig_len >= dhdp->soc_ram_length) {
+ *buf = dhdp->soc_ram;
+ *size = dhdp->soc_ram_length;
+ } else {
+ ret = BCME_BUFTOOSHORT;
+ DHD_ERROR(("The length of the buffer is too short"
+ " to save the memory dump with %d\n", dhdp->soc_ram_length));
+ }
+ } else {
+ DHD_ERROR(("socram_dump is not ready to get\n"));
+ ret = BCME_NOTREADY;
+ }
+ return ret;
+}
+
+#ifdef EWP_RTT_LOGGING
+uint32
+dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ log_dump_section_hdr_t sec_hdr;
+ int length = 0;
+ dhd_dbg_ring_t *ring;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
+ length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
+ }
+ return length;
+}
+#endif /* EWP_RTT_LOGGING */
+
+int
+dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
+{
+ char *fw_str;
+
+ if (size == 0)
+ return BCME_BADARG;
+
+ fw_str = strstr(info_string, "Firmware: ");
+ if (fw_str == NULL) {
+ return BCME_ERROR;
+ }
+
+ bzero(*buf, size);
+ if (dhd_ver) {
+ strlcpy(*buf, dhd_version, size);
+ } else {
+ strlcpy(*buf, fw_str, size);
+ }
+ return BCME_OK;
+}
+
+#ifdef DHD_PKT_LOGGING
+int
+dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ if (user_buf == NULL) {
+ DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len);
+ if (ret < 0) {
+ DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ return ret;
+}
+
+uint32
+dhd_os_get_pktlog_dump_size(struct net_device *dev)
+{
+ uint32 size = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ size = dhd_pktlog_get_dump_length(dhdp);
+ if (size == 0) {
+ DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size));
+ }
+ return size;
+}
+
+void
+dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ dhd_pktlog_get_filename(dhdp, dump_path, len);
+}
+#endif /* DHD_PKT_LOGGING */
+#ifdef DNGL_AXI_ERROR_LOGGING
+int
+dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ loff_t pos = 0;
+ if (user_buf == NULL) {
+ DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
+ NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ return ret;
+}
+
+int
+dhd_os_get_axi_error_dump_size(struct net_device *dev)
+{
+ int size = -1;
+
+ size = sizeof(dhd_axi_error_dump_t);
+ if (size < 0) {
+ DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
+ }
+ return size;
+}
+
+void
+dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
+{
+ snprintf(dump_path, len, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+ return &ifp->wmf;
+}
+#endif /* DHD_WMF */
+
+#if defined(BCM_ROUTER_DHD)
+void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata, *ip_body;
+ uint8 dwm_filter;
+ uint8 tos_tc = 0;
+ uint8 dscp = 0;
+ pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *) pktdata;
+ ip_body = NULL;
+
+ if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) {
+ if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+ evh = (struct ethervlan_header *)eh;
+ if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ ip_body = pktdata + sizeof(struct ethervlan_header);
+ }
+ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ ip_body = pktdata + sizeof(struct ether_header);
+ }
+ if (ip_body) {
+ tos_tc = IP_TOS46(ip_body);
+ dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+ }
+
+ if (dscp < DHD_DWM_TBL_SIZE) {
+ dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp];
+ if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) {
+ PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter));
+ }
+ }
+ }
+}
+#endif /* BCM_ROUTER_DHD */
+
+bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
+{
+ return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
+}
+
+#ifdef DHD_L2_FILTER
+arp_table_t*
+dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(bssidx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[bssidx];
+ return ifp->phnd_arp_table;
+}
+
+int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ if (ifp)
+ return ifp->parp_enable;
+ else
+ return FALSE;
+}
+
+/* Set interface specific proxy arp configuration */
+int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ if (!ifp)
+ return BCME_ERROR;
+
+ /* At present all 3 variables are being
+ * handled at once
+ */
+ ifp->parp_enable = val;
+ ifp->parp_discard = val;
+ ifp->parp_allnode = val;
+
+ /* Flush ARP entries when disabled */
+ if (val == FALSE) {
+ bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
+ FALSE, dhdp->tickcnt);
+ }
+ return BCME_OK;
+}
+
+bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+ return ifp->parp_discard;
+}
+
+bool
+dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->parp_allnode;
+}
+
+int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->dhcp_unicast;
+}
+
+int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ ifp->dhcp_unicast = val;
+ return BCME_OK;
+}
+
+int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->block_ping;
+}
+
+int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ ifp->block_ping = val;
+ /* Disable rx_pkt_chain feature for interface if block_ping option is
+ * enabled
+ */
+ dhd_update_rx_pkt_chainable_state(dhdp, idx);
+ return BCME_OK;
+}
+
+int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->grat_arp;
+}
+
+int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ ifp->grat_arp = val;
+
+ return BCME_OK;
+}
+
+int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->block_tdls;
+}
+
+int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ ifp->block_tdls = val;
+
+ return BCME_OK;
+}
+#endif /* DHD_L2_FILTER */
+
+#if defined(SET_XPS_CPUS)
+int dhd_xps_cpus_enable(struct net_device *net, int enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp;
+ int ifidx;
+ char * XPS_CPU_SETBUF;
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ if (!dhd->pub.conf->xps_cpus)
+ return -ENODEV;
+
+ if (ifidx == PRIMARY_INF) {
+ if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
+ DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
+ XPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
+ } else {
+ DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
+ XPS_CPU_SETBUF = RPS_CPUS_MASK;
+ }
+ } else if (ifidx == VIRTUAL_INF) {
+ DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
+ XPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
+ } else {
+ DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
+ return -EINVAL;
+ }
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp) {
+ if (enable) {
+ DHD_INFO(("%s : set xps_cpus as [%s]\n", __FUNCTION__, XPS_CPU_SETBUF));
+ custom_xps_map_set(ifp->net, XPS_CPU_SETBUF, strlen(XPS_CPU_SETBUF));
+ } else {
+ custom_xps_map_clear(ifp->net);
+ }
+ } else {
+ DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
+ return -ENODEV;
+ }
+ return BCME_OK;
+}
+
+int custom_xps_map_set(struct net_device *net, char *buf, size_t len)
+{
+ cpumask_var_t mask;
+ int err;
+
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
+ return err;
+ }
+
+ err = netif_set_xps_queue(net, mask, 0);
+
+ free_cpumask_var(mask);
+
+ if (0 == err)
+ WL_MSG(net->name, "Done. mapping cpu\n");
+
+ return err;
+}
+
+void custom_xps_map_clear(struct net_device *net)
+{
+ struct xps_dev_maps *dev_maps;
+
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+ rcu_read_lock();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
+ dev_maps = rcu_dereference(net->xps_cpus_map);
+#else
+ dev_maps = rcu_dereference(net->xps_maps);
+#endif
+ rcu_read_unlock();
+
+ if (dev_maps) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
+ RCU_INIT_POINTER(net->xps_cpus_map, NULL);
+#else
+ RCU_INIT_POINTER(net->xps_maps, NULL);
+#endif
+ kfree_rcu(dev_maps, rcu);
+ DHD_INFO(("%s : xps_cpus map clear.\n", __FUNCTION__));
+ }
+}
+#endif // endif
+
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp;
+ int ifidx;
+ char * RPS_CPU_SETBUF;
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ if (!dhd->pub.conf->rps_cpus)
+ return -ENODEV;
+
+ if (ifidx == PRIMARY_INF) {
+ if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
+ DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
+ } else {
+ DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK;
+ }
+ } else if (ifidx == VIRTUAL_INF) {
+ DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
+ } else {
+ DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
+ return -EINVAL;
+ }
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp) {
+ if (enable) {
+ DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
+ custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
+ } else {
+ custom_rps_map_clear(ifp->net->_rx);
+ }
+ } else {
+ DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
+ return -ENODEV;
+ }
+ return BCME_OK;
+}
+
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
+{
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_SPINLOCK(rps_map_lock);
+
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
+ return err;
+ }
+
+ map = kzalloc(max_t(unsigned int,
+ RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+ GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_cpu(cpu, mask) {
+ map->cpus[i++] = cpu;
+ }
+
+ if (i) {
+ map->len = i;
+ } else {
+ kfree(map);
+ map = NULL;
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
+ return -1;
+ }
+
+ spin_lock(&rps_map_lock);
+ old_map = rcu_dereference_protected(queue->rps_map,
+ lockdep_is_held(&rps_map_lock));
+ rcu_assign_pointer(queue->rps_map, map);
+ spin_unlock(&rps_map_lock);
+
+ if (map) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
+ static_key_slow_inc(&rps_needed.key);
+#else
+ static_key_slow_inc(&rps_needed);
+#endif
+ }
+ if (old_map) {
+ kfree_rcu(old_map, rcu);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
+ static_key_slow_dec(&rps_needed.key);
+#else
+ static_key_slow_dec(&rps_needed);
+#endif
+ }
+ free_cpumask_var(mask);
+
+ DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
+ return map->len;
+}
+
+void custom_rps_map_clear(struct netdev_rx_queue *queue)
+{
+ struct rps_map *map;
+
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+ map = rcu_dereference_protected(queue->rps_map, 1);
+ if (map) {
+ RCU_INIT_POINTER(queue->rps_map, NULL);
+ kfree_rcu(map, rcu);
+ DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
+ }
+}
+#endif // endif
+
+#ifdef DHD_BUZZZ_LOG_ENABLED
+
+static int
+dhd_buzzz_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+
+ DAEMONIZE("dhd_buzzz");
+
+ /* signal: thread has started */
+ complete(&tsk->completed);
+
+ /* Run until signal received */
+ while (1) {
+ if (down_interruptible(&tsk->sema) == 0) {
+ if (tsk->terminated) {
+ break;
+ }
+ printk("%s: start to dump...\n", __FUNCTION__);
+ dhd_buzzz_dump();
+ } else {
+ break;
+ }
+ }
+ complete_and_exit(&tsk->completed, 0);
+}
+
+void* dhd_os_create_buzzz_thread(void)
+{
+ tsk_ctl_t *thr_buzzz_ctl = NULL;
+
+ thr_buzzz_ctl = kmalloc(sizeof(tsk_ctl_t), GFP_KERNEL);
+ if (!thr_buzzz_ctl) {
+ return NULL;
+ }
+
+ PROC_START(dhd_buzzz_thread, NULL, thr_buzzz_ctl, 0, "dhd_buzzz");
+
+ return (void *)thr_buzzz_ctl;
+}
+
+void dhd_os_destroy_buzzz_thread(void *thr_hdl)
+{
+ tsk_ctl_t *thr_buzzz_ctl = (tsk_ctl_t *)thr_hdl;
+
+ if (!thr_buzzz_ctl) {
+ return;
+ }
+
+ PROC_STOP(thr_buzzz_ctl);
+ kfree(thr_buzzz_ctl);
+}
+
+void dhd_os_sched_buzzz_thread(void *thr_hdl)
+{
+ tsk_ctl_t *thr_buzzz_ctl = (tsk_ctl_t *)thr_hdl;
+
+ if (!thr_buzzz_ctl) {
+ return;
+ }
+
+ if (thr_buzzz_ctl->thr_pid >= 0) {
+ up(&thr_buzzz_ctl->sema);
+ }
+}
+#endif /* DHD_BUZZZ_LOG_ENABLED */
+
+#ifdef DHD_DEBUG_PAGEALLOC
+/* XXX Additional Kernel implemenation is needed to use this function at
+ * the top of the check_poison_mem() function in mm/debug-pagealloc.c file.
+ * Please check if below codes are implemenated your Linux Kernel first.
+ *
+ * - mm/debug-pagealloc.c
+ *
+ * // for DHD_DEBUG_PAGEALLOC
+ * typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, uint addr_len);
+ * page_corrupt_cb_t corrupt_cb = NULL;
+ * void *corrupt_cb_handle = NULL;
+ *
+ * void register_page_corrupt_cb(page_corrupt_cb_t cb, void *handle)
+ * {
+ * corrupt_cb = cb;
+ * corrupt_cb_handle = handle;
+ * }
+ * EXPORT_SYMBOL(register_page_corrupt_cb);
+ *
+ * extern void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
+ *
+ * static void check_poison_mem(unsigned char *mem, size_t bytes)
+ * {
+ * ......
+ *
+ * if (!__ratelimit(&ratelimit))
+ * return;
+ * else if (start == end && single_bit_flip(*start, PAGE_POISON))
+ * printk(KERN_ERR "pagealloc: single bit error\n");
+ * else
+ * printk(KERN_ERR "pagealloc: memory corruption\n");
+ *
+ * print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
+ * end - start + 1, 1);
+ *
+ * // for DHD_DEBUG_PAGEALLOC
+ * dhd_page_corrupt_cb(corrupt_cb_handle, start, end - start + 1);
+ *
+ * dump_stack();
+ * }
+ *
+ */
+
+void
+dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+ DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
+ __FUNCTION__, addr_corrupt, (uint32)len));
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ prhex("Page Corruption:", addr_corrupt, len);
+ dhd_dump_to_kernelog(dhdp);
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ /* Load the dongle side dump to host memory and then BUG_ON() */
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
+ dhd_bus_mem_dump(dhdp);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
+}
+EXPORT_SYMBOL(dhd_page_corrupt_cb);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
+void
+dhd_pktid_error_handler(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(dhdp);
+ dhd_dump_to_kernelog(dhdp);
+#ifdef DHD_FW_COREDUMP
+ /* Load the dongle side dump to host memory */
+ if (dhdp->memdump_enabled == DUMP_DISABLED) {
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ }
+ dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DHD_FW_COREDUMP */
+#ifdef OEM_ANDROID
+ /* XXX Send HANG event to Android Framework for recovery */
+ dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
+ dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
+#endif /* OEM_ANDROID */
+ DHD_OS_WAKE_UNLOCK(dhdp);
+}
+#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
+
+struct net_device *
+dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ if (dhd->iflist[0] && dhd->iflist[0]->net)
+ return dhd->iflist[0]->net;
+ else
+ return NULL;
+}
+
+#ifdef DHD_PKTTS
+/**
+ * dhd_msgbuf_get_ipv6_id - return ipv6 identification number
+ * return 0 in case of error
+ *
+ * @pkt: packet pointer
+ */
+uint
+dhd_msgbuf_get_ipv6_id(void *pkt)
+{
+ struct frag_hdr _frag;
+ const struct sk_buff *skb;
+ const struct frag_hdr *fh;
+ unsigned int offset = 0;
+ int err;
+
+ skb = (struct sk_buff *)pkt;
+ err = ipv6_find_hdr(skb, &offset, NEXTHDR_FRAGMENT, NULL, NULL);
+ if (err < 0) {
+ return 0;
+ }
+
+ fh = skb_header_pointer(skb, offset, sizeof(_frag), &_frag);
+ if (fh == NULL) {
+ return 0;
+ }
+
+ return ntohl(fh->identification);
+}
+
+/**
+ * dhd_create_to_notifier_ts - create BCM_NL_TS netlink socket
+ *
+ * @void:
+ */
+int
+dhd_create_to_notifier_ts(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ /* Kernel 3.6 onwards this API accepts only 3 arguments. */
+ nl_to_ts = netlink_kernel_create(&init_net, BCM_NL_TS, &dhd_netlink_ts);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+ if (!nl_to_ts) {
+ DHD_ERROR(("Error creating ts socket.\n"));
+ return -1;
+ }
+ DHD_INFO(("nl_to socket created successfully...\n"));
+ return 0;
+}
+
+/**
+ * dhd_destroy_to_notifier_ts - destroy BCM_NL_TS netlink socket
+ *
+ * @void:
+ */
+void
+dhd_destroy_to_notifier_ts(void)
+{
+ DHD_INFO(("Destroying nl_to_ts socket\n"));
+ if (nl_to_ts) {
+ netlink_kernel_release(nl_to_ts);
+ nl_to_ts = NULL;
+ }
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+/**
+ * dhd_recv_msg_from_ts - this is called on BCM_NL_TS netlink recv message
+ * this api updates app pid of app which is currenty using this netlink socket
+ *
+ * @skb: rx packet socket buffer
+ */
+static void
+dhd_recv_msg_from_ts(struct sk_buff *skb)
+{
+ sender_pid_ts = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
+ DHD_INFO(("DHD Daemon Started, PID:%d\n", sender_pid_ts));
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+
+/**
+ * dhd_send_msg_to_ts - send data to BCM_NL_TS netlink socket
+ *
+ * @skb: socket buffer (unused)
+ * @data: output data
+ * @size: size of output data
+ */
+int
+dhd_send_msg_to_ts(struct sk_buff *skb, void *data, int size)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb_out = NULL;
+ int ret = BCME_ERROR;
+
+ BCM_REFERENCE(skb);
+ if (sender_pid_ts == 0) {
+ goto err;
+ }
+
+ if ((skb_out = nlmsg_new(size, GFP_ATOMIC)) == NULL) {
+ DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
+ if (nlh == NULL) {
+ DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
+ (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
+
+ if ((ret = nlmsg_unicast(nl_to_ts, skb_out, sender_pid_ts)) < 0) {
+ DHD_ERROR(("Error sending message, ret:%d\n", ret));
+ /* skb is already freed inside nlmsg_unicast() on error case */
+ /* explicitly making skb_out to NULL to avoid double free */
+ skb_out = NULL;
+ goto err;
+ }
+ return BCME_OK;
+
+err:
+ if (skb_out) {
+ nlmsg_free(skb_out);
+ }
+ return ret;
+}
+#endif /* DHD_PKTTS */
+
+static int
+dhd_create_to_notifier_skt(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ /* Kernel 3.7 onwards this API accepts only 3 arguments. */
+ /* Kernel version 3.6 is a special case which accepts 4 arguments */
+ nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ /* Kernel version 3.5 and below use this old API format */
+ nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
+ dhd_process_daemon_msg, NULL, THIS_MODULE);
+#else
+ nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
+ &dhd_netlink_cfg);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
+ if (!nl_to_event_sk)
+ {
+ printf("Error creating socket.\n");
+ return -1;
+ }
+ DHD_INFO(("nl_to socket created successfully...\n"));
+ return 0;
+}
+
+void
+dhd_destroy_to_notifier_skt(void)
+{
+ DHD_INFO(("Destroying nl_to socket\n"));
+ netlink_kernel_release(nl_to_event_sk);
+}
+
+static void
+dhd_recv_msg_from_daemon(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ bcm_to_info_t *cmd;
+
+ nlh = (struct nlmsghdr *)skb->data;
+ cmd = (bcm_to_info_t *)nlmsg_data(nlh);
+ if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
+ sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
+ DHD_INFO(("DHD Daemon Started\n"));
+ }
+}
+
+int
+dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb_out;
+ int ret = BCME_ERROR;
+
+ BCM_REFERENCE(skb);
+ if (sender_pid == 0) {
+ DHD_INFO(("Invalid PID 0\n"));
+ skb_out = NULL;
+ goto err;
+ }
+
+ if ((skb_out = nlmsg_new(size, 0)) == NULL) {
+ DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto err;
+ }
+ nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
+ if (nlh == NULL) {
+ DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
+ goto err;
+ }
+ NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
+ (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
+
+ if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
+ DHD_ERROR(("Error sending message, ret:%d\n", ret));
+ /* skb is already freed inside nlmsg_unicast() on error case */
+ /* explicitly making skb_out to NULL to avoid double free */
+ skb_out = NULL;
+ goto err;
+ }
+ return BCME_OK;
+err:
+ if (skb_out) {
+ nlmsg_free(skb_out);
+ }
+ return ret;
+}
+
+static void
+dhd_process_daemon_msg(struct sk_buff *skb)
+{
+ bcm_to_info_t to_info;
+
+ to_info.magic = BCM_TO_MAGIC;
+ to_info.reason = REASON_DAEMON_STARTED;
+ to_info.trap = NO_TRAP;
+
+ dhd_recv_msg_from_daemon(skb);
+ dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
+}
+
+#ifdef REPORT_FATAL_TIMEOUTS
+static void
+dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap)
+{
+ bcm_to_info_t to_info;
+
+ to_info.magic = BCM_TO_MAGIC;
+ to_info.reason = reason;
+ to_info.trap = trap;
+
+ DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap));
+ dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
+}
+
+void
+dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason)
+{
+ int to_reason;
+ int trap = NO_TRAP;
+ switch (reason) {
+ case DHD_REASON_COMMAND_TO:
+ to_reason = REASON_COMMAND_TO;
+ trap = DO_TRAP;
+ break;
+ case DHD_REASON_JOIN_TO:
+ to_reason = REASON_JOIN_TO;
+ trap = DO_TRAP;
+ break;
+ case DHD_REASON_SCAN_TO:
+ to_reason = REASON_SCAN_TO;
+ trap = DO_TRAP;
+ break;
+ case DHD_REASON_OQS_TO:
+ to_reason = REASON_OQS_TO;
+ trap = DO_TRAP;
+ break;
+ default:
+ to_reason = REASON_UNKOWN;
+ }
+ dhd_send_trap_to_fw(pub, to_reason, trap);
+}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+char*
+dhd_dbg_get_system_timestamp(void)
+{
+ static char timebuf[DEBUG_DUMP_TIME_BUF_LEN];
+ struct osl_timespec tv;
+ unsigned long local_time;
+ struct rtc_time tm;
+
+ memset_s(timebuf, DEBUG_DUMP_TIME_BUF_LEN, 0, DEBUG_DUMP_TIME_BUF_LEN);
+ osl_do_gettimeofday(&tv);
+ local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60));
+ rtc_time_to_tm(local_time, &tm);
+ scnprintf(timebuf, DEBUG_DUMP_TIME_BUF_LEN,
+ "%02d:%02d:%02d.%06lu",
+ tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_usec);
+ return timebuf;
+}
+
+char*
+dhd_log_dump_get_timestamp(void)
+{
+ static char buf[32];
+ u64 ts_nsec;
+ unsigned long rem_nsec;
+
+ ts_nsec = local_clock();
+ rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
+ snprintf(buf, sizeof(buf), "%5lu.%06lu",
+ (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
+
+ return buf;
+}
+
+#ifdef DHD_LOG_DUMP
+bool
+dhd_log_dump_ecntr_enabled(void)
+{
+ return (bool)logdump_ecntr_enable;
+}
+
+bool
+dhd_log_dump_rtt_enabled(void)
+{
+ return (bool)logdump_rtt_enable;
+}
+
+void
+dhd_log_dump_init(dhd_pub_t *dhd)
+{
+ struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
+ int i = 0;
+ uint8 *prealloc_buf = NULL, *bufptr = NULL;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ int ret;
+ dhd_dbg_ring_t *ring = NULL;
+ unsigned long flags = 0;
+ dhd_info_t *dhd_info = dhd->info;
+#if defined(EWP_ECNTRS_LOGGING)
+ void *cookie_buf = NULL;
+#endif
+
+ BCM_REFERENCE(ret);
+ BCM_REFERENCE(ring);
+ BCM_REFERENCE(flags);
+
+ /* sanity check */
+ if (logdump_prsrv_tailsize <= 0 ||
+ logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
+ logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
+ }
+ /* now adjust the preserve log flush size based on the
+ * kernel printk log buffer size
+ */
+#ifdef CONFIG_LOG_BUF_SHIFT
+ DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
+ " limit prsrv tail size to = %uKB\n",
+ __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
+ logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
+
+ if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
+ logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
+ }
+#else
+ DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
+ __FUNCTION__, logdump_prsrv_tailsize/1024);
+#endif /* CONFIG_LOG_BUF_SHIFT */
+
+ mutex_init(&dhd_info->logdump_lock);
+ /* initialize log dump buf structures */
+ memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
+
+ /* set the log dump buffer size based on the module_param */
+ if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
+ logdump_max_bufsize <= 0)
+ dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
+ else
+ dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
+
+ /* pre-alloc the memory for the log buffers & 'special' buffer */
+ dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
+ dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
+ dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
+#else
+ prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
+ dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+
+ if (!prealloc_buf) {
+ DHD_ERROR(("Failed to allocate memory for log buffers\n"));
+ goto fail;
+ }
+ if (!dld_buf_special->buffer) {
+ DHD_ERROR(("Failed to allocate memory for special buffer\n"));
+ goto fail;
+ }
+#ifdef BCMINTERNAL
+ DHD_ERROR(("prealloc_buf:%p dld_buf_special->buffer:%p\n",
+ prealloc_buf, dld_buf_special->buffer));
+#endif /* BCMINTERNAL */
+
+ bufptr = prealloc_buf;
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ dld_buf->dhd_pub = dhd;
+ spin_lock_init(&dld_buf->lock);
+ dld_buf->wraparound = 0;
+ if (i != DLD_BUF_TYPE_SPECIAL) {
+ dld_buf->buffer = bufptr;
+ dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
+ bufptr = (uint8 *)dld_buf->max;
+ } else {
+ dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
+ }
+ dld_buf->present = dld_buf->front = dld_buf->buffer;
+ dld_buf->remain = dld_buf_size[i];
+ dld_buf->enable = 1;
+ }
+
+ /* now use the rest of the pre-alloc'd memory for other rings */
+#ifdef EWP_ECNTRS_LOGGING
+ dhd->ecntr_dbg_ring = dhd_dbg_ring_alloc_init(dhd,
+ ECNTR_RING_ID, ECNTR_RING_NAME,
+ LOG_DUMP_ECNTRS_MAX_BUFSIZE,
+ bufptr, TRUE);
+ if (!dhd->ecntr_dbg_ring) {
+ DHD_ERROR(("%s: unable to init ecounters dbg ring !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ dhd->rtt_dbg_ring = dhd_dbg_ring_alloc_init(dhd,
+ RTT_RING_ID, RTT_RING_NAME,
+ LOG_DUMP_RTT_MAX_BUFSIZE,
+ bufptr, TRUE);
+ if (!dhd->rtt_dbg_ring) {
+ DHD_ERROR(("%s: unable to init rtt dbg ring !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef EWP_BCM_TRACE
+ dhd->bcm_trace_dbg_ring = dhd_dbg_ring_alloc_init(dhd,
+ BCM_TRACE_RING_ID, BCM_TRACE_RING_NAME,
+ LOG_DUMP_BCM_TRACE_MAX_BUFSIZE,
+ bufptr, TRUE);
+ if (!dhd->bcm_trace_dbg_ring) {
+ DHD_ERROR(("%s: unable to init bcm trace dbg ring !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ bufptr += LOG_DUMP_BCM_TRACE_MAX_BUFSIZE;
+#endif /* EWP_BCM_TRACE */
+
+ /* Concise buffer is used as intermediate buffer for following purposes
+ * a) pull ecounters records temporarily before
+ * writing it to file
+ * b) to store dhd dump data before putting it to file
+ * It should have a size equal to
+ * MAX(largest possible ecntr record, 'dhd dump' data size)
+ */
+ dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
+ if (!dhd->concise_dbg_buf) {
+ DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+#if defined(DHD_EVENT_LOG_FILTER)
+ /* XXX init filter last, because filter use buffer which alloced by log dump */
+ ret = dhd_event_log_filter_init(dhd,
+ bufptr,
+ LOG_DUMP_FILTER_MAX_BUFSIZE);
+ if (ret != BCME_OK) {
+ goto fail;
+ }
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#if defined(EWP_ECNTRS_LOGGING)
+ cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
+ if (!cookie_buf) {
+ DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+ ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
+ if (ret != BCME_OK) {
+ MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
+ goto fail;
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+ return;
+
+fail:
+
+#if defined(DHD_EVENT_LOG_FILTER)
+ /* XXX deinit filter first, because filter use buffer which alloced by log dump */
+ if (dhd->event_log_filter) {
+ dhd_event_log_filter_deinit(dhd);
+ }
+#endif /* DHD_EVENT_LOG_FILTER */
+
+ if (dhd->concise_dbg_buf) {
+ MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ }
+
+#ifdef EWP_ECNTRS_LOGGING
+ if (dhd->logdump_cookie) {
+ dhd_logdump_cookie_deinit(dhd);
+ MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
+ dhd->logdump_cookie = NULL;
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ if (prealloc_buf) {
+ DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
+ }
+ if (dld_buf_special->buffer) {
+ DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
+ dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
+ }
+#else
+ if (prealloc_buf) {
+ MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
+ }
+ if (dld_buf_special->buffer) {
+ MFREE(dhd->osh, dld_buf_special->buffer,
+ dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ dld_buf->enable = 0;
+ dld_buf->buffer = NULL;
+ }
+ mutex_destroy(&dhd_info->logdump_lock);
+}
+
+void
+dhd_log_dump_deinit(dhd_pub_t *dhd)
+{
+ struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
+ int i = 0;
+ dhd_info_t *dhd_info = dhd->info;
+ dhd_dbg_ring_t *ring = NULL;
+
+ BCM_REFERENCE(ring);
+
+ if (dhd->concise_dbg_buf) {
+ MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ dhd->concise_dbg_buf = NULL;
+ }
+
+#ifdef EWP_ECNTRS_LOGGING
+ if (dhd->logdump_cookie) {
+ dhd_logdump_cookie_deinit(dhd);
+ MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
+ dhd->logdump_cookie = NULL;
+ }
+
+ if (dhd->ecntr_dbg_ring) {
+ dhd_dbg_ring_dealloc_deinit(&dhd->ecntr_dbg_ring, dhd);
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (dhd->rtt_dbg_ring) {
+ dhd_dbg_ring_dealloc_deinit(&dhd->rtt_dbg_ring, dhd);
+ }
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef EWP_BCM_TRACE
+ if (dhd->bcm_trace_dbg_ring) {
+ dhd_dbg_ring_dealloc_deinit(&dhd->bcm_trace_dbg_ring, dhd);
+ }
+#endif /* EWP_BCM_TRACE */
+
+ /* 'general' buffer points to start of the pre-alloc'd memory */
+ dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
+ dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ if (dld_buf->buffer) {
+ DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
+ }
+ if (dld_buf_special->buffer) {
+ DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
+ dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
+ }
+#else
+ if (dld_buf->buffer) {
+ MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
+ }
+ if (dld_buf_special->buffer) {
+ MFREE(dhd->osh, dld_buf_special->buffer,
+ dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ dld_buf->enable = 0;
+ dld_buf->buffer = NULL;
+ }
+ mutex_destroy(&dhd_info->logdump_lock);
+}
+
+void
+dhd_log_dump_write(int type, char *binary_data,
+ int binary_len, const char *fmt, ...)
+{
+ int len = 0;
+ char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
+ va_list args;
+ unsigned long flags = 0;
+ struct dhd_log_dump_buf *dld_buf = NULL;
+ bool flush_log = FALSE;
+
+ if (type < 0 || type >= DLD_BUFFER_NUM) {
+ DHD_INFO(("%s: Unsupported DHD_LOG_DUMP_BUF_TYPE(%d).\n",
+ __FUNCTION__, type));
+ return;
+ }
+
+ dld_buf = &g_dld_buf[type];
+ if (dld_buf->enable != 1) {
+ return;
+ }
+
+ va_start(args, fmt);
+ len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
+ /* Non ANSI C99 compliant returns -1,
+ * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
+ */
+ va_end(args);
+ if (len < 0) {
+ return;
+ }
+
+ if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
+ len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
+ tmp_buf[len] = '\0';
+ }
+
+ /* make a critical section to eliminate race conditions */
+ DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
+ if (dld_buf->remain < len) {
+ dld_buf->wraparound = 1;
+ dld_buf->present = dld_buf->front;
+ dld_buf->remain = dld_buf_size[type];
+ /* if wrap around happens, flush the ring buffer to the file */
+ flush_log = TRUE;
+ }
+
+ memcpy(dld_buf->present, tmp_buf, len);
+ dld_buf->remain -= len;
+ dld_buf->present += len;
+ DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
+
+ /* double check invalid memory operation */
+ ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
+
+ if (dld_buf->dhd_pub) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
+ dhdp->logdump_periodic_flush =
+ logdump_periodic_flush;
+ if (logdump_periodic_flush && flush_log) {
+ log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
+ sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = type;
+ dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
+ }
+ }
+ }
+}
+
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+extern struct dhd_dbg_ring_buf g_ring_buf;
+void
+dhd_dbg_ring_write(int type, char *binary_data,
+ int binary_len, const char *fmt, ...)
+{
+ int len = 0;
+ va_list args;
+ struct dhd_dbg_ring_buf *ring_buf = NULL;
+ char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
+
+ ring_buf = &g_ring_buf;
+
+ va_start(args, fmt);
+ len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
+ /* Non ANSI C99 compliant returns -1,
+ * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
+ */
+ va_end(args);
+ if (len < 0) {
+ return;
+ }
+
+ if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
+ len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
+ tmp_buf[len] = '\0';
+ }
+
+ if (ring_buf->dhd_pub) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)ring_buf->dhd_pub;
+ if (type == DRIVER_LOG_RING_ID || type == FW_VERBOSE_RING_ID ||
+ type == ROAM_STATS_RING_ID) {
+ if (DBG_RING_ACTIVE(dhdp, type)) {
+ dhd_os_push_push_ring_data(dhdp, type,
+ tmp_buf, strlen(tmp_buf));
+ return;
+ }
+ }
+ }
+ return;
+}
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+void
+dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
+{
+ dhd_info_t * dhd;
+
+ if (dhdp) {
+ dhd = dhdp->info;
+ if (dhd) {
+ flush_workqueue(dhd->tx_wq);
+ flush_workqueue(dhd->rx_wq);
+ }
+ }
+
+ return;
+}
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef DHD_DEBUG_UART
+bool
+dhd_debug_uart_is_running(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd->duart_execute) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static void
+dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
+{
+ dhd_pub_t *dhdp = handle;
+ dhd_debug_uart_exec(dhdp, "rd");
+}
+
+static void
+dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
+{
+ int ret;
+
+ char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
+ char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
+
+#ifdef DHD_FW_COREDUMP
+ if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
+#endif
+ {
+ if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
+ dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
+#ifdef DHD_FW_COREDUMP
+ dhdp->memdump_success == FALSE ||
+#endif
+ FALSE) {
+ dhdp->info->duart_execute = TRUE;
+ DHD_ERROR(("DHD: %s - execute %s %s\n",
+ __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
+ __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
+ dhdp->info->duart_execute = FALSE;
+
+#ifdef DHD_LOG_DUMP
+ if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
+#endif
+ {
+ BUG_ON(1);
+ }
+ }
+ }
+}
+#endif /* DHD_DEBUG_UART */
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+void
+dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
+{
+ struct file *fp;
+ char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
+
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
+ filepath));
+ dhdp->is_blob = FALSE;
+ } else {
+ DHD_ERROR(("%s: ----- blob file exists (%s) -----\n", __FUNCTION__, filepath));
+ dhdp->is_blob = TRUE;
+#if defined(CONCATE_BLOB)
+ strncat(fw_path, "_blob", strlen("_blob"));
+#else
+ BCM_REFERENCE(fw_path);
+#endif /* SKIP_CONCATE_BLOB */
+ filp_close(fp, NULL);
+ }
+}
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+#if defined(PCIE_FULL_DONGLE)
+/** test / loopback */
+void
+dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
+{
+ dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
+ dhd_info_t *dhd_info = (dhd_info_t *)handle;
+
+ if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+ if (dhd_info == NULL) {
+ DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
+ return;
+ }
+ if (dmmap == NULL) {
+ DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
+ return;
+ }
+ dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
+}
+
+void
+dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
+{
+ dhd_info_t *dhd_info = dhdp->info;
+
+ dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
+ DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+}
+#endif /* PCIE_FULL_DONGLE */
+/* ---------------------------- End of sysfs implementation ------------------------------------- */
+#ifdef SET_PCIE_IRQ_CPU_CORE
+void
+dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
+{
+ unsigned int pcie_irq = 0;
+#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
+ struct dhd_info *dhd = NULL;
+#endif /* DHD_LB && DHD_LB_HOST_CTRL */
+
+ if (!dhdp) {
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhdp->bus) {
+ DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (affinity_cmd < DHD_AFFINITY_OFF || affinity_cmd > DHD_AFFINITY_LAST) {
+ DHD_ERROR(("Wrong Affinity cmds:%d, %s\n", affinity_cmd, __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
+
+ if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
+ DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
+ return;
+ }
+
+#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
+ dhd = dhdp->info;
+
+ if (affinity_cmd == DHD_AFFINITY_OFF) {
+ dhd->permitted_primary_cpu = FALSE;
+ } else if (affinity_cmd == DHD_AFFINITY_TPUT_150MBPS ||
+ affinity_cmd == DHD_AFFINITY_TPUT_300MBPS) {
+ dhd->permitted_primary_cpu = TRUE;
+ }
+ dhd_select_cpu_candidacy(dhd);
+ /*
+ * It needs to NAPI disable -> enable to raise NET_RX napi CPU core
+ * during Rx traffic
+ * NET_RX does not move to NAPI CPU core if continusly calling napi polling
+ * function
+ */
+ napi_disable(&dhd->rx_napi_struct);
+ napi_enable(&dhd->rx_napi_struct);
+#endif /* DHD_LB && DHD_LB_HOST_CTRL */
+
+ /*
+ irq_set_affinity() assign dedicated CPU core PCIe interrupt
+ If dedicated CPU core is not on-line,
+ PCIe interrupt scheduled on CPU core 0
+ */
+#if defined(CONFIG_ARCH_SM8150) || defined(CONFIG_ARCH_KONA)
+ /* For SDM platform */
+ switch (affinity_cmd) {
+ case DHD_AFFINITY_OFF:
+#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
+ irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_secondary);
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_secondary);
+#endif /* DHD_LB && DHD_LB_HOST_CTRL */
+ break;
+ case DHD_AFFINITY_TPUT_150MBPS:
+ case DHD_AFFINITY_TPUT_300MBPS:
+ irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
+ break;
+ default:
+ DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
+ __FUNCTION__, affinity_cmd));
+ }
+#elif defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830)
+ /* For Exynos platform */
+ switch (affinity_cmd) {
+ case DHD_AFFINITY_OFF:
+#if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_secondary);
+#endif /* DHD_LB && DHD_LB_HOST_CTRL */
+ break;
+ case DHD_AFFINITY_TPUT_150MBPS:
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
+ break;
+ case DHD_AFFINITY_TPUT_300MBPS:
+ DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
+ __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
+ irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
+ break;
+ default:
+ DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
+ __FUNCTION__, affinity_cmd));
+ }
+#else /* For Undefined platform */
+ DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
+ __FUNCTION__, affinity_cmd));
+#endif /* End of Platfrom define */
+
+}
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+int
+dhd_write_file(const char *filepath, char *buf, int buf_len)
+{
+ struct file *fp = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t old_fs;
+#endif
+ int ret = 0;
+
+ /* change to KERNEL_DS address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ /* File is always created. */
+ fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
+ __FUNCTION__, filepath, PTR_ERR(fp)));
+ ret = BCME_ERROR;
+ } else {
+ if (fp->f_mode & FMODE_WRITE) {
+ ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Couldn't write file '%s'\n",
+ __FUNCTION__, filepath));
+ ret = BCME_ERROR;
+ } else {
+ ret = BCME_OK;
+ }
+ }
+ filp_close(fp, NULL);
+ }
+
+ /* restore previous address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(old_fs);
+#endif
+
+ return ret;
+}
+
+int
+dhd_read_file(const char *filepath, char *buf, int buf_len)
+{
+ struct file *fp = NULL;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ mm_segment_t old_fs;
+#endif
+ int ret;
+
+ /* change to KERNEL_DS address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+#endif
+
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(old_fs);
+#endif
+ DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
+ return BCME_ERROR;
+ }
+
+ ret = kernel_read_compat(fp, 0, buf, buf_len);
+ filp_close(fp, NULL);
+
+ /* restore previous address limit */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+ set_fs(old_fs);
+#endif
+
+ /* Return the number of bytes read */
+ if (ret > 0) {
+ /* Success to read */
+ ret = 0;
+ } else {
+ DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
+ __FUNCTION__, filepath, ret));
+ ret = BCME_ERROR;
+ }
+
+ return ret;
+}
+
+int
+dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
+{
+ int ret;
+
+ ret = dhd_write_file(filepath, buf, buf_len);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Read the file again and check if the file size is not zero */
+ memset(buf, 0, buf_len);
+ ret = dhd_read_file(filepath, buf, buf_len);
+
+ return ret;
+}
+
+#ifdef FILTER_IE
+int dhd_read_from_file(dhd_pub_t *dhd)
+{
+ int ret = 0, nread = 0;
+ void *fd;
+ uint8 *buf;
+ NULL_CHECK(dhd, "dhd is NULL", ret);
+
+ buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
+ if (!buf) {
+ DHD_ERROR(("error: failed to alllocate buf.\n"));
+ return BCME_NOMEM;
+ }
+
+ /* open file to read */
+ fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
+ if (!fd) {
+ DHD_ERROR(("No filter file(not an error), filter path%s\n", FILTER_IE_PATH));
+ ret = BCME_EPERM;
+ goto exit;
+ }
+ nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
+ if (nread > 0) {
+ buf[nread] = '\0';
+ if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
+ DHD_ERROR(("error: failed to parse filter ie\n"));
+ }
+ } else {
+ DHD_ERROR(("error: zero length file.failed to read\n"));
+ ret = BCME_ERROR;
+ }
+ dhd_os_close_image1(dhd, fd);
+exit:
+ if (buf) {
+ MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
+ }
+ return ret;
+}
+
+int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
+{
+ uint8* pstr = buf;
+ int element_count = 0;
+
+ if (buf == NULL) {
+ return BCME_ERROR;
+ }
+
+ while (*pstr != '\0') {
+ if (*pstr == '\n') {
+ element_count++;
+ }
+ pstr++;
+ }
+ /*
+ * New line character must not be present after last line.
+ * To count last line
+ */
+ element_count++;
+
+ return element_count;
+}
+
+int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
+{
+ uint8 i, j, msb, lsb, oui_len = 0;
+ /*
+ * OUI can vary from 3 bytes to 5 bytes.
+ * While reading from file as ascii input it can
+ * take maximum size of 14 bytes and minumum size of
+ * 8 bytes including ":"
+ * Example 5byte OUI <AB:DE:BE:CD:FA>
+ * Example 3byte OUI <AB:DC:EF>
+ */
+
+ if ((inbuf == NULL) || (len < 8) || (len > 14)) {
+ DHD_ERROR(("error: failed to parse OUI \n"));
+ return BCME_ERROR;
+ }
+
+ for (j = 0, i = 0; i < len; i += 3, ++j) {
+ if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
+ DHD_ERROR(("error: invalid OUI format \n"));
+ return BCME_ERROR;
+ }
+ msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
+ lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
+ 'A' + 10 : inbuf[i + 1] - '0';
+ oui[j] = (msb << 4) | lsb;
+ }
+ /* Size of oui.It can vary from 3/4/5 */
+ oui_len = j;
+
+ return oui_len;
+}
+
+int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
+{
+ int i = 0;
+
+ while (i < len) {
+ if (!bcm_isdigit(buf[i])) {
+ DHD_ERROR(("error: non digit value found in filter_ie \n"));
+ return BCME_ERROR;
+ }
+ i++;
+ }
+ if (bcm_atoi((char*)buf) > 255) {
+ DHD_ERROR(("error: element id cannot be greater than 255 \n"));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
+{
+ int element_count = 0, i = 0, oui_size = 0, ret = 0;
+ uint16 bufsize, buf_space_left, id = 0, len = 0;
+ uint16 filter_iovsize, all_tlvsize;
+ wl_filter_ie_tlv_t *p_ie_tlv = NULL;
+ wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
+ char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
+ uint8 data[20];
+
+ element_count = dhd_get_filter_ie_count(dhd, buf);
+ DHD_INFO(("total element count %d \n", element_count));
+ /* Calculate the whole buffer size */
+ filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
+ p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
+
+ if (p_filter_iov == NULL) {
+ DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
+ return BCME_ERROR;
+ }
+
+ /* setup filter iovar header */
+ p_filter_iov->version = WL_FILTER_IE_VERSION;
+ p_filter_iov->len = filter_iovsize;
+ p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
+ p_filter_iov->pktflag = FC_PROBE_REQ;
+ p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
+ /* setup TLVs */
+ bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
+ p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
+ buf_space_left = bufsize;
+
+ while ((i < element_count) && (buf != NULL)) {
+ len = 0;
+ /* token contains one line of input data */
+ token = bcmstrtok((char**)&buf, "\n", NULL);
+ if (token == NULL) {
+ break;
+ }
+ if ((ele_token = bcmstrstr(token, ",")) == NULL) {
+ /* only element id is present */
+ if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
+ DHD_ERROR(("error: Invalid element id \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ id = bcm_atoi((char*)token);
+ data[len++] = WL_FILTER_IE_SET;
+ } else {
+ /* oui is present */
+ ele_token = bcmstrtok(&token, ",", NULL);
+ if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
+ strlen(ele_token)) == BCME_ERROR)) {
+ DHD_ERROR(("error: Invalid element id \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ id = bcm_atoi((char*)ele_token);
+ data[len++] = WL_FILTER_IE_SET;
+ if ((oui_token = bcmstrstr(token, ",")) == NULL) {
+ oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
+ if (oui_size == BCME_ERROR) {
+ DHD_ERROR(("error: Invalid OUI \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ len += oui_size;
+ } else {
+ /* type is present */
+ oui_token = bcmstrtok(&token, ",", NULL);
+ if ((oui_token == NULL) || ((oui_size =
+ dhd_parse_oui(dhd, oui_token,
+ &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
+ DHD_ERROR(("error: Invalid OUI \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ len += oui_size;
+ if ((type = bcmstrstr(token, ",")) == NULL) {
+ if (dhd_check_valid_ie(dhd, token,
+ strlen(token)) == BCME_ERROR) {
+ DHD_ERROR(("error: Invalid type \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ data[len++] = bcm_atoi((char*)token);
+ } else {
+ /* subtype is present */
+ type = bcmstrtok(&token, ",", NULL);
+ if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
+ strlen(type)) == BCME_ERROR)) {
+ DHD_ERROR(("error: Invalid type \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ data[len++] = bcm_atoi((char*)type);
+ /* subtype is last element */
+ if ((token == NULL) || (*token == '\0') ||
+ (dhd_check_valid_ie(dhd, token,
+ strlen(token)) == BCME_ERROR)) {
+ DHD_ERROR(("error: Invalid subtype \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ data[len++] = bcm_atoi((char*)token);
+ }
+ }
+ }
+ ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
+ &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
+ "status=%d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ i++;
+ }
+ if (i == 0) {
+ /* file is empty or first line is blank */
+ DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* update the iov header, set len to include all TLVs + header */
+ all_tlvsize = (bufsize - buf_space_left);
+ p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
+ ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
+ p_filter_iov->len, NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
+ }
+exit:
+ /* clean up */
+ if (p_filter_iov) {
+ MFREE(dhd->osh, p_filter_iov, filter_iovsize);
+ }
+ return ret;
+}
+#endif /* FILTER_IE */
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_get_wakecount(dhd_pub_t *dhdp)
+{
+#ifdef BCMDBUS
+ return NULL;
+#else
+ return dhd_bus_get_wakecount(dhdp);
+#endif /* BCMDBUS */
+}
+#endif /* DHD_WAKE_STATUS */
+
+int
+dhd_get_random_bytes(uint8 *buf, uint len)
+{
+#ifdef BCMPCIE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
+ int rndlen = get_random_bytes_arch(buf, len);
+ if (rndlen != len) {
+ bzero(buf, len);
+ get_random_bytes(buf, len);
+ }
+#else
+ get_random_bytes_arch(buf, len);
+#endif
+#endif /* BCMPCIE */
+ return BCME_OK;
+}
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+void
+dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
+{
+ dhd_info_t *dhd = NULL;
+ dhd_pub_t *dhdp = NULL;
+ uint reason = HANG_REASON_MAX;
+ uint32 fw_test_code = 0;
+ dhd = DHD_DEV_INFO(dev);
+
+ if (dhd) {
+ dhdp = &dhd->pub;
+ }
+
+ if (!dhd || !dhdp) {
+ return;
+ }
+
+ reason = (uint) bcm_strtoul(string_num, NULL, 0);
+ DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
+
+ if (reason == 0) {
+ if (dhdp->req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhdp->req_hang_type));
+ dhdp->req_hang_type = 0;
+ return;
+ } else {
+ DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
+ return;
+ }
+ } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+ DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
+ return;
+ }
+
+ if (dhdp->req_hang_type != 0) {
+ DHD_ERROR(("Already HANG requested for test\n"));
+ return;
+ }
+
+ switch (reason) {
+ case HANG_REASON_IOCTL_RESP_TIMEOUT:
+ DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ fw_test_code = 102; /* resumed on timeour */
+ (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
+ WLC_SET_VAR, TRUE, 0);
+ break;
+ case HANG_REASON_DONGLE_TRAP:
+ DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ fw_test_code = 99; /* dongle trap */
+ (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
+ WLC_SET_VAR, TRUE, 0);
+ break;
+ case HANG_REASON_D3_ACK_TIMEOUT:
+ DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ case HANG_REASON_BUS_DOWN:
+ DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT:
+ case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT:
+ case HANG_REASON_MSGBUF_LIVELOCK:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
+ break;
+ case HANG_REASON_IFACE_DEL_FAILURE:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
+ break;
+ case HANG_REASON_HT_AVAIL_ERROR:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
+ break;
+ case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
+ DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ default:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
+ break;
+ }
+}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef BT_OVER_PCIE
+#define BT_QUIESCE TRUE
+#define BT_RESUME FALSE
+#define BT_QUIESCE_RESPONSE_TIMEOUT 4000
+
+int
+dhd_request_bt_quiesce(dhd_pub_t *dhdp)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
+ long timeout = BT_QUIESCE_RESPONSE_TIMEOUT;
+
+ if (request_bt_quiesce_ptr == NULL) {
+ DHD_ERROR(("%s: BT not loaded\n", __FUNCTION__));
+ return BCME_OK;
+ }
+
+ mutex_lock(&dhd->quiesce_lock);
+ DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
+ if (dhd->dhd_quiesce_state != DHD_QUIESCE_INIT) {
+ DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
+ mutex_unlock(&dhd->quiesce_lock);
+ return BCME_ERROR;
+ }
+ dhd->dhd_quiesce_state = REQUEST_BT_QUIESCE;
+ request_bt_quiesce_ptr(BT_QUIESCE);
+
+ timeout = wait_event_timeout(dhd->quiesce_wait,
+ (dhd->dhd_quiesce_state == RESPONSE_BT_QUIESCE), timeout);
+
+ DHD_ERROR(("%s: after wait quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
+
+ mutex_unlock(&dhd->quiesce_lock);
+ if (!timeout) {
+ DHD_ERROR(("%s: timeout quiesce_state = %d\n",
+ __FUNCTION__, dhd->dhd_quiesce_state));
+ return BCME_BUSY;
+ }
+ return BCME_OK;
+}
+
+int
+dhd_request_bt_resume(dhd_pub_t *dhdp)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
+ long timeout = BT_QUIESCE_RESPONSE_TIMEOUT;
+
+ if (request_bt_quiesce_ptr == NULL) {
+ DHD_ERROR(("%s: BT not loaded\n", __FUNCTION__));
+ return BCME_OK;
+ }
+
+ DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
+ mutex_lock(&dhd->quiesce_lock);
+ if (dhd->dhd_quiesce_state != RESPONSE_BT_QUIESCE) {
+ mutex_unlock(&dhd->quiesce_lock);
+ return BCME_ERROR;
+ }
+ dhd->dhd_quiesce_state = REQUEST_BT_RESUME;
+ request_bt_quiesce_ptr(BT_RESUME);
+
+ timeout = wait_event_timeout(dhd->quiesce_wait,
+ (dhd->dhd_quiesce_state == RESPONSE_BT_RESUME), timeout);
+
+ DHD_ERROR(("%s: after wait quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
+
+ dhd->dhd_quiesce_state = DHD_QUIESCE_INIT;
+ mutex_unlock(&dhd->quiesce_lock);
+ if (!timeout) {
+ DHD_ERROR(("%s: timeout quiesce_state = %d\n",
+ __FUNCTION__, dhd->dhd_quiesce_state));
+ return BCME_BUSY;
+ }
+ return BCME_OK;
+}
+
+void
+response_bt_quiesce(bool quiesce)
+{
+ dhd_pub_t *dhdp = g_dhd_pub;
+ dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
+ if (quiesce == BT_QUIESCE) {
+ if (dhd->dhd_quiesce_state == REQUEST_BT_QUIESCE) {
+ dhd->dhd_quiesce_state = RESPONSE_BT_QUIESCE;
+ wake_up(&dhd->quiesce_wait);
+ return;
+ }
+ } else if (quiesce == BT_RESUME) {
+ if (dhd->dhd_quiesce_state == REQUEST_BT_RESUME) {
+ dhd->dhd_quiesce_state = RESPONSE_BT_RESUME;
+ wake_up(&dhd->quiesce_wait);
+ return;
+ }
+ }
+ DHD_ERROR(("%s: Wrong Queisce Response=%d in State=%d\n",
+ __FUNCTION__, quiesce, dhd->dhd_quiesce_state));
+ return;
+}
+
+int
+dhd_bus_perform_flr_with_quiesce(dhd_pub_t *dhdp, struct dhd_bus *bus,
+ bool init_deinit_path)
+{
+ int ret;
+ dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
+ bool dongle_isolation = dhdp->dongle_isolation;
+ mutex_lock(&dhd->quiesce_flr_lock);
+ dhd->dhd_quiesce_state = DHD_QUIESCE_INIT;
+
+ /* pause data on all the interfaces */
+ dhd_bus_stop_queue(dhdp->bus);
+
+ /* Since we are about to do FLR advertise that bus down is in progress
+ * to other bus user contexts like Tx, Rx, IOVAR, WD etc
+ */
+ dhdpcie_advertise_bus_cleanup(dhdp);
+
+#ifdef BT_OVER_PCIE
+ /* Disable L1SS of RC and EP
+ * L1SS is enabled again in dhd_bus_start if dhd_sync_with_dongle succeed
+ */
+ dhd_bus_l1ss_enable_rc_ep(dhdp->bus, FALSE);
+#endif /* BT_OVER_PCIE */
+
+ if (dhd_bus_force_bt_quiesce_enabled(dhdp->bus)) {
+ DHD_ERROR(("%s: Request Quiesce\n", __FUNCTION__));
+ /* Request BT quiesce right before F0 FLR to minimise latency */
+ ret = dhd_request_bt_quiesce(dhdp); /* Handle return value */
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Error(%d) in Request Quiesce\n", __FUNCTION__, ret));
+ /* TODO: plugin API for Toggle REGON Here */
+ mutex_unlock(&dhd->quiesce_flr_lock);
+ return ret;
+ }
+ }
+
+ dhd_bus_pcie_pwr_req_reload_war(dhdp->bus);
+
+ DHD_ERROR(("%s: Perform FLR\n", __FUNCTION__));
+
+ ret = dhd_bus_perform_flr(dhdp->bus, dhd_bus_get_flr_force_fail(dhdp->bus));
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Error(%d) in Performing FLR\n", __FUNCTION__, ret));
+ /* TODO: Ensure that BT Host Driver is out of Quiesce state before REGON
+ * Either by sending an unquiesce message Here OR as a part of ON/OFF API.
+ */
+ /* TODO: plugin API for Toggle REGON Here */
+ mutex_unlock(&dhd->quiesce_flr_lock);
+ return ret;
+ }
+
+ if (dhd_bus_force_bt_quiesce_enabled(dhdp->bus)) {
+ DHD_ERROR(("%s: Request Resume\n", __FUNCTION__));
+ /* Resume BT right after F0 FLR to minimise latency */
+ ret = dhd_request_bt_resume(dhdp); /* Handle return value */
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Error(%d) in Request Resume\n", __FUNCTION__, ret));
+ /* TODO: plugin API for Toggle REGON Here */
+ mutex_unlock(&dhd->quiesce_flr_lock);
+ return ret;
+ }
+ }
+
+ /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
+ dhdp->dongle_isolation = TRUE;
+
+ DHD_ERROR(("%s: Devreset ON\n", __FUNCTION__));
+ dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
+
+ DHD_ERROR(("%s: Devreset OFF\n", __FUNCTION__));
+ dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
+
+ dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
+
+ /* resume data on all the interfaces */
+ dhd_bus_start_queue(dhdp->bus);
+ mutex_unlock(&dhd->quiesce_flr_lock);
+
+ DHD_ERROR(("%s: done\n", __FUNCTION__));
+ return BCME_DNGL_DEVRESET;
+}
+#endif /* BT_OVER_PCIE */
+
+#ifdef DHD_TX_PROFILE
+static int
+process_layer2_headers(uint8 **p, int *plen, uint16 *type, bool is_host_sfhllc)
+{
+ int err = BCME_OK;
+
+ if (*type < ETHER_TYPE_MIN) {
+ struct dot3_mac_llc_snap_header *sh = (struct dot3_mac_llc_snap_header *)*p;
+
+ if (bcmp(&sh->dsap, llc_snap_hdr, SNAP_HDR_LEN) == 0) {
+ *type = ntoh16(sh->type);
+ if (*type == ETHER_TYPE_8021Q ||
+ (is_host_sfhllc && *type != ETHER_TYPE_8021Q)) {
+ *p += sizeof(struct dot3_mac_llc_snap_header);
+ if ((*plen -= sizeof(struct dot3_mac_llc_snap_header)) <= 0) {
+ err = BCME_ERROR;
+ }
+ }
+ else {
+ struct dot3_mac_llc_snapvlan_header *svh = (struct
+ dot3_mac_llc_snapvlan_header *)*p;
+
+ *type = ntoh16(svh->ether_type);
+ *p += sizeof(struct dot3_mac_llc_snapvlan_header);
+ if ((*plen -= sizeof(struct dot3_mac_llc_snapvlan_header)) <= 0) {
+ err = BCME_ERROR;
+ }
+ }
+ }
+ else {
+ err = BCME_ERROR;
+ }
+ }
+ else {
+ if (*type == ETHER_TYPE_8021Q) {
+ struct ethervlan_header *evh = (struct ethervlan_header *)*p;
+
+ *type = ntoh16(evh->ether_type);
+ *p += ETHERVLAN_HDR_LEN;
+ if ((*plen -= ETHERVLAN_HDR_LEN) <= 0) {
+ err = BCME_ERROR;
+ }
+ }
+ else {
+ *p += ETHER_HDR_LEN;
+ if ((*plen -= ETHER_HDR_LEN) <= 0) {
+ err = BCME_ERROR;
+ }
+ }
+ }
+
+ return err;
+}
+
+static int
+process_layer3_headers(uint8 **p, int plen, uint16 *type)
+{
+ int err = BCME_OK;
+
+ if (*type == ETHER_TYPE_IP) {
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)*p;
+ uint16 len = IPV4_HLEN(iph);
+ if ((plen -= len) <= 0) {
+ err = BCME_ERROR;
+ } else if (IP_VER(iph) == IP_VER_4 && len >= IPV4_MIN_HEADER_LEN) {
+ *type = IPV4_PROT(iph);
+ *p += len;
+ } else {
+ err = BCME_ERROR;
+ }
+ } else if (*type == ETHER_TYPE_IPV6) {
+ struct ipv6_hdr *ip6h = (struct ipv6_hdr *)*p;
+ if ((plen -= IPV6_MIN_HLEN) <= 0) {
+ err = BCME_ERROR;
+ } else if (IP_VER(ip6h) == IP_VER_6) {
+ *type = IPV6_PROT(ip6h);
+ *p += IPV6_MIN_HLEN;
+ if (IPV6_EXTHDR(*type)) {
+ uint8 proto_6 = 0;
+ int32 exth_len = ipv6_exthdr_len(*p, &proto_6);
+ if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
+ err = BCME_ERROR;
+ } else {
+ *type = proto_6;
+ *p += exth_len;
+ }
+ }
+ } else {
+ err = BCME_ERROR;
+ }
+ }
+
+ return err;
+}
+
+bool
+dhd_protocol_matches_profile(uint8 *p, int plen, const dhd_tx_profile_protocol_t
+ *proto, bool is_host_sfhllc)
+{
+ struct ether_header *eh = NULL;
+ bool result = FALSE;
+ uint16 type = 0, ether_type = 0;
+
+ ASSERT(proto != NULL);
+ ASSERT(p != NULL);
+
+ if (plen <= 0) {
+ result = FALSE;
+ } else {
+ eh = (struct ether_header *)p;
+ type = ntoh16(eh->ether_type);
+ if (type < ETHER_TYPE_MIN && is_host_sfhllc) {
+ struct dot3_mac_llc_snap_header *dot3 =
+ (struct dot3_mac_llc_snap_header *)p;
+ ether_type = ntoh16(dot3->type);
+ } else {
+ ether_type = type;
+ }
+
+ if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER &&
+ proto->protocol_number == ether_type) {
+ result = TRUE;
+ } else if (process_layer2_headers(&p, &plen, &type, is_host_sfhllc) != BCME_OK) {
+ /* pass 'type' instead of 'ether_type' to process_layer2_headers
+ * because process_layer2_headers will take care of extraction
+ * of protocol types if llc snap header is present, based on
+ * the condition (type < ETHER_TYPE_MIN)
+ */
+ result = FALSE;
+ } else if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER) {
+ result = proto->protocol_number == type;
+ } else if (proto->layer != DHD_TX_PROFILE_NETWORK_LAYER) {
+ result = FALSE;
+ } else if (process_layer3_headers(&p, plen, &type) != BCME_OK) {
+ result = FALSE;
+ } else if (proto->protocol_number == type) {
+ /* L4, only check TCP/UDP case */
+ if ((type == IP_PROT_TCP) || (type == IP_PROT_UDP)) {
+ /* src/dst port are the first two uint16 fields in both tcp/udp
+ * hdr
+ */
+ struct bcmudp_hdr *hdr = (struct bcmudp_hdr *)p;
+
+ /* note that a src_port or dest_port of zero counts as a match
+ */
+ result = ((proto->src_port == 0) || (proto->src_port ==
+ ntoh16(hdr->src_port))) && ((proto->dest_port == 0) ||
+ (proto->dest_port == ntoh16(hdr->dst_port)));
+ } else {
+ /* at this point we know we are dealing with layer 3, and we
+ * know we are not dealing with TCP or UDP; this is considered a
+ * match
+ */
+ result = TRUE;
+ }
+ }
+ }
+
+ return result;
+}
+#endif /* defined(DHD_TX_PROFILE) */
+
+#ifdef DHD_TIMESYNC
+void
+BCMFASTPATH(dhd_parse_proto)(uint8 *pktdata, dhd_pkt_parse_t *parse)
+{
+ uint8 *pkt = NULL;
+ struct iphdr *iph = NULL;
+ struct ether_header *eh = (struct ether_header *)pktdata;
+
+ if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+ pkt = (uint8 *)&pktdata[ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN];
+ } else {
+ pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ }
+
+ iph = (struct iphdr *)pkt;
+
+ parse->proto = IP_PROT_RESERVED;
+ parse->t1 = 0;
+ parse->t2 = 0;
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) != IPV4_HLEN_MIN) || (IP_VER(iph) != IP_VER_4)) {
+ return;
+ }
+
+ if (iph->protocol == IP_PROT_ICMP) {
+ struct icmphdr *icmph;
+
+ parse->proto = iph->protocol;
+ icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
+
+ if ((icmph->type == ICMP_ECHO) || (icmph->type == ICMP_ECHOREPLY)) {
+ parse->t1 = icmph->type;
+ parse->t2 = ntoh16(icmph->un.echo.sequence);
+ } else {
+ parse->t1 = icmph->type;
+ parse->t2 = icmph->code;
+ }
+ } else {
+ parse->proto = iph->protocol;
+ }
+
+ return;
+}
+#endif /* DHD_TIMESYNC */
+
+#ifdef BCMPCIE
+#define KIRQ_PRINT_BUF_LEN 256
+
+void
+dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
+{
+ unsigned long flags = 0;
+ struct irq_desc *desc;
+ int i; /* cpu iterator */
+ struct bcmstrbuf strbuf;
+ char tmp_buf[KIRQ_PRINT_BUF_LEN];
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+ desc = irq_to_desc(irq_num);
+ if (!desc) {
+ DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
+ return;
+ }
+ bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
+ for_each_online_cpu(i)
+ bcm_bprintf(&strbuf, "%10u ",
+ desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
+ if (desc->irq_data.chip) {
+ if (desc->irq_data.chip->name)
+ bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
+ else
+ bcm_bprintf(&strbuf, " %8s", "-");
+ } else {
+ bcm_bprintf(&strbuf, " %8s", "None");
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+ if (desc->irq_data.domain)
+ bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
+#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
+ bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
+#endif
+#endif /* LINUX VERSION > 3.1.0 */
+
+ if (desc->name)
+ bcm_bprintf(&strbuf, "-%-8s", desc->name);
+
+ DHD_ERROR(("%s\n", strbuf.origbuf));
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+#endif /* LINUX VERSION > 2.6.28 */
+}
+#endif /* BCMPCIE */
+
+void
+dhd_show_kirqstats(dhd_pub_t *dhd)
+{
+ unsigned int irq = -1;
+#ifdef BCMPCIE
+ dhdpcie_get_pcieirq(dhd->bus, &irq);
+#endif /* BCMPCIE */
+#ifdef BCMSDIO
+ irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
+#endif /* BCMSDIO */
+ if (irq != -1) {
+#ifdef BCMPCIE
+ DHD_ERROR(("DUMP data kernel irq stats : \n"));
+ dhd_print_kirqstats(dhd, irq);
+#endif /* BCMPCIE */
+#ifdef BCMSDIO
+ DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
+#endif /* BCMSDIO */
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ irq = dhd_bus_get_oob_irq_num(dhd);
+ if (irq) {
+ DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
+ dhd_print_kirqstats(dhd, irq);
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+}
+
+void
+dhd_print_tasklet_status(dhd_pub_t *dhd)
+{
+ dhd_info_t *dhdinfo;
+
+ if (!dhd) {
+ DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
+ return;
+ }
+
+ dhdinfo = dhd->info;
+
+ if (!dhdinfo) {
+ DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
+}
+
+#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
+void
+dhd_mqstats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd = NULL;
+ int i = 0, j = 0;
+
+ if (!dhdp || !strbuf)
+ return;
+
+ dhd = dhdp->info;
+ bcm_bprintf(strbuf, "\nMQ STATS:\n=========\n");
+
+ bcm_bprintf(strbuf, "\nTx packet arrival AC histogram:\n");
+ bcm_bprintf(strbuf, "AC_BE \tAC_BK \tAC_VI \tAC_VO\n");
+ bcm_bprintf(strbuf, "----- \t----- \t----- \t-----\n");
+ for (i = 0; i < AC_COUNT; i++)
+ bcm_bprintf(strbuf, "%-10d\t", dhd->pktcnt_per_ac[i]);
+
+ bcm_bprintf(strbuf, "\n\nTx packet arrival Q-AC histogram:\n");
+ bcm_bprintf(strbuf, "\tAC_BE \tAC_BK \tAC_VI \tAC_VO\n");
+ bcm_bprintf(strbuf, "\t----- \t----- \t----- \t-----");
+ for (i = 0; i < MQ_MAX_QUEUES; i++) {
+ bcm_bprintf(strbuf, "\nQ%d\t", i);
+ for (j = 0; j < AC_COUNT; j++)
+ bcm_bprintf(strbuf, "%-8d\t", dhd->pktcnt_qac_histo[i][j]);
+ }
+
+ bcm_bprintf(strbuf, "\n\nTx Q-CPU scheduling histogram:\n");
+ bcm_bprintf(strbuf, "\t");
+ for (i = 0; i < nr_cpu_ids; i++)
+ bcm_bprintf(strbuf, "CPU%d \t", i);
+ for (i = 0; i < MQ_MAX_QUEUES; i++) {
+ bcm_bprintf(strbuf, "\nQ%d\t", i);
+ for (j = 0; j < nr_cpu_ids; j++)
+ bcm_bprintf(strbuf, "%-8d\t", dhd->cpu_qstats[i][j]);
+ }
+ bcm_bprintf(strbuf, "\n");
+}
+#endif /* DHD_MQ && DHD_MQ_STATS */
+
+#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
+/* Procfs that provides to GDB Proxy asynchronous access to "sbreg", "membytes",
+ * "gdb_proxy_probe", "gdb_proxy_stop_count" iovars.
+ * Procfs is comprised of the root directory,
+ * /proc/dhd_gdb_proxy_<dev_name> (here <dev_name> is like 'eth0',
+ * etc.) that contains files: "sbreg", "membytes", "gdb_proxy_probe",
+ * "gdb_proxy_stop_count". These files are to be used to access respective
+ * iovars. Difference from iovar is that access to these files is not blocked
+ * by current iovar processing (i.e. file might be accessed while wl iovar is
+ * stuck on breakpoint inside firmware)
+ * Setting address for "membytes" and "sbreg" files is performed by means of
+ * seek position
+ * For now "membytes" and "sbreg" may only be used to read/write 1, 2 or 4
+ * bytes - this may be expanded later.
+ * For now "gdb_proxy_probe" only returns current Proxy ID, but does not set
+ * a new one (unlike iovar that may do both things)
+ */
+
+/* Size of firmware address space */
+#define GDB_PROXY_FS_MEM_SIZE ((loff_t)1 << 32)
+
+/* Common part of 'llseek' routine for all files */
+static loff_t
+gdb_proxy_fs_llseek(struct file *fp, loff_t off, int whence, loff_t file_len)
+{
+ loff_t pos = -1;
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = off;
+ break;
+ case SEEK_CUR:
+ pos = fp->f_pos + off;
+ break;
+ case SEEK_END:
+ pos = file_len - off;
+ break;
+ }
+ if ((pos < 0) || (pos > file_len)) {
+ return -EINVAL;
+ }
+ fp->f_pos = pos;
+ return pos;
+}
+
+/* Common read/write procedure for "gdb_proxy_probe" and "gdb_proxy_stop_count"
+ * procfs files
+ * fp: file descriptor
+ * user_buffer_in: userspace buffer address for write operation, NULL for read
+ * operation
+ * user_buffer_out: userspace buffer address for read operation, NULL for write
+ * operation
+ * count: maximum number of bytes to read/write
+ * position: seek position incremented by length of data read/written
+ * iovar: name of iovar being accessed
+ * iovar_data_buf: intermediate buffer to store iovar data
+ * iovar_data_len: length of data, corresponded to iovar
+ * read_params: NULL or address of input parameter for iovar read
+ * read_plen: 0 or length of input parameter for iovar read
+ * Returns number of bytes read/written or error code
+ */
+static ssize_t
+gdb_proxy_fs_iovar_data_op(struct file *fp, const char __user *user_buffer_in,
+ char __user *user_buffer_out, size_t count, loff_t *position,
+ const char *iovar, void *iovar_data_buf, size_t iovar_data_len,
+ void *read_params, size_t read_plen)
+{
+ dhd_info_t *dhd = (dhd_info_t *)PDE_DATA(file_inode(fp));
+ int err;
+ if (count == 0) {
+ return 0;
+ }
+ /* If position out of data length - read nothing */
+ if ((*position < 0) || (*position >= (loff_t)iovar_data_len)) {
+ return 0;
+ }
+ /* If buffer end is past structure lenght - truncate it */
+ if ((*position + count) > (loff_t)iovar_data_len) {
+ count = (size_t)((loff_t)iovar_data_len - *position);
+ }
+ if (user_buffer_in) {
+ /* SET operation */
+ /* Read/modify/write if not whole-buffer-operation */
+ if ((*position != 0) || (count < iovar_data_len)) {
+ err = dhd_bus_iovar_op(&(dhd->pub), iovar,
+ (char *)read_params, (uint)read_plen,
+ (char *)iovar_data_buf, (uint)iovar_data_len, IOV_GET);
+ if (err) {
+ return -EPERM;
+ }
+ }
+ if (copy_from_user((char *)iovar_data_buf + (uint)*position, user_buffer_in, count))
+ {
+ return -EPERM;
+ }
+ /* This params/plen of NULL/0 is a 'legal fiction', imposed by
+ * strange assert in dhd_bus_iovar_op(). After this strange
+ * assert, arg/arglen is copied to params/plen - and even used
+ * inside iovar handler!
+ */
+ err = dhd_bus_iovar_op(&(dhd->pub), iovar, NULL, 0,
+ (char *)iovar_data_buf, (uint)iovar_data_len, IOV_SET);
+ } else {
+ /* GET operation */
+ err = dhd_bus_iovar_op(&(dhd->pub), iovar, (char *)read_params, (uint)read_plen,
+ (char *)iovar_data_buf, (uint)iovar_data_len, IOV_GET);
+ }
+ if (err) {
+ return -EPERM;
+ }
+ if (user_buffer_out) {
+ if (copy_to_user(user_buffer_out, (char *)iovar_data_buf + (uint)*position, count))
+ {
+ return -EPERM;
+ }
+ }
+ *position += count;
+ return count;
+}
+
+/* Read for "gdb_proxy_probe" procfs file */
+static ssize_t
+gdb_proxy_fs_probe_read(struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ uint32 proxy_id = 0;
+ dhd_gdb_proxy_probe_data_t probe_data;
+ return gdb_proxy_fs_iovar_data_op(fp, NULL, user_buffer, count, position, "gdb_proxy_probe",
+ &probe_data, sizeof(probe_data), &proxy_id, sizeof(proxy_id));
+}
+
+/* Seek for "gdb_proxy_probe" file */
+static loff_t
+gdb_proxy_fs_probe_llseek(struct file *fp, loff_t off, int whence)
+{
+ return gdb_proxy_fs_llseek(fp, off, whence, sizeof(dhd_gdb_proxy_probe_data_t));
+}
+
+/* File operations for "gdb_proxy_probe" procfs file */
+static const struct file_operations
+gdb_proxy_fs_probe_file_ops = {
+ .read = gdb_proxy_fs_probe_read,
+ .llseek = gdb_proxy_fs_probe_llseek,
+};
+
+/* Read for "gdb_proxy_stop_count" procfs file */
+static ssize_t
+gdb_proxy_fs_stop_count_read(struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ uint32 stop_count;
+ return gdb_proxy_fs_iovar_data_op(fp, NULL, user_buffer, count, position,
+ "gdb_proxy_stop_count", &stop_count, sizeof(stop_count), NULL, 0);
+}
+
+/* Write for "gdb_proxy_stop_count" procfs file */
+static ssize_t
+gdb_proxy_fs_stop_count_write(struct file *fp, const char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ uint32 stop_count;
+ return gdb_proxy_fs_iovar_data_op(fp, user_buffer, NULL, count, position,
+ "gdb_proxy_stop_count", &stop_count, sizeof(stop_count), NULL, 0);
+}
+
+/* Seek for "gdb_proxy_stop_count" file */
+static loff_t
+gdb_proxy_fs_stop_count_llseek(struct file *fp, loff_t off, int whence)
+{
+ return gdb_proxy_fs_llseek(fp, off, whence, sizeof(uint32));
+}
+
+/* File operations for "gdb_proxy_stop_count" procfs file */
+static const struct file_operations
+gdb_proxy_fs_stop_count_file_ops = {
+ .read = gdb_proxy_fs_stop_count_read,
+ .write = gdb_proxy_fs_stop_count_write,
+ .llseek = gdb_proxy_fs_stop_count_llseek,
+};
+
+/* Common read/write procedure for "membytes" and "sbreg" procfs files
+ * fp: file descriptor
+ * buffer_in: userspace buffer address for write operation, NULL for read
+ * operation
+ * buffer_out: userspace buffer address for read operation, NULL for write
+ * operation
+ * count: maximum number of bytes to read/write
+ * position: seek position (interpreted as memory address in firmware address
+ * space),
+ * incremented by length of data read/written
+ * iovar: name of iovar being accessed
+ * address_first: TRUE if address shall be packed first, FALSE if width
+ * Returns number of bytes read/written or error code
+ */
+static ssize_t
+gdb_proxy_fs_iovar_mem_op(struct file *fp, const char __user *user_buffer_in,
+ char __user *user_buffer_out, size_t count, loff_t *position,
+ const char *iovar, bool address_first)
+{
+ dhd_info_t *dhd = (dhd_info_t *)PDE_DATA(file_inode(fp));
+ uint32 buf[3];
+ int err;
+ if (count == 0) {
+ return 0;
+ }
+ if ((count > sizeof(uint32)) || (count & (count - 1))) {
+ return -EINVAL;
+ }
+ buf[address_first ? 0 : 1] = (uint32)(*position);
+ buf[address_first ? 1 : 0] = (uint32)count;
+ if (user_buffer_in) {
+ /* SET operation */
+ if (copy_from_user(&buf[2], user_buffer_in, count)) {
+ return -EPERM;
+ }
+ /* This params/plen of NULL/0 is a 'legal fiction', imposed by
+ * strange assert in dhd_bus_iovar_op(). After this strange
+ * assert, arg/arglen is copied to params/plen - and even used
+ * inside iovar handler!
+ */
+ err = dhd_bus_iovar_op(&(dhd->pub), iovar, NULL, 0, (char *)buf, sizeof(*buf) * 3,
+ IOV_SET);
+ } else {
+ /* GET operation */
+ /* This arglen of 8 bytes (where 4 would suffice) is due to
+ * strange requirement of minimum arglen to be 8, hardcoded into
+ * "membytes" iovar definition
+ */
+ err = dhd_bus_iovar_op(&(dhd->pub), iovar, (char *)buf, sizeof(*buf) * 2,
+ (char *)buf, sizeof(*buf) * 2, IOV_GET);
+ }
+ if (err) {
+ return -EPERM;
+ }
+ *position += count;
+ if (user_buffer_out) {
+ if (copy_to_user(user_buffer_out, &buf[0], count)) {
+ return -EPERM;
+ }
+ }
+ return count;
+}
+
+/* Common seek procedure for "membytes" and "sbreg" procfs files */
+static loff_t
+gdb_proxy_fs_memory_llseek(struct file *fp, loff_t off, int whence)
+{
+ return gdb_proxy_fs_llseek(fp, off, whence, GDB_PROXY_FS_MEM_SIZE);
+}
+
+/* Read for "membytes" procfs file */
+static ssize_t
+gdb_proxy_fs_membytes_read(struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ return gdb_proxy_fs_iovar_mem_op(fp, NULL, user_buffer, count, position, "membytes", TRUE);
+}
+
+/* Write for "membytes" procfs file */
+static ssize_t
+gdb_proxy_fs_membytes_write(struct file *fp, const char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ return gdb_proxy_fs_iovar_mem_op(fp, user_buffer, NULL, count, position, "membytes", TRUE);
+}
+
+/* File operations for "membytes" procfs file */
+static const struct file_operations
+gdb_proxy_fs_membytes_file_ops = {
+ .read = gdb_proxy_fs_membytes_read,
+ .write = gdb_proxy_fs_membytes_write,
+ .llseek = gdb_proxy_fs_memory_llseek,
+};
+
+/* Read for "sbreg" procfs file */
+static ssize_t
+gdb_proxy_fs_sbreg_read(struct file *fp, char __user *user_buffer, size_t count, loff_t *position)
+{
+ return gdb_proxy_fs_iovar_mem_op(fp, NULL, user_buffer, count, position, "sbreg", FALSE);
+}
+
+/* Write for "sbreg" procfs file */
+static ssize_t
+gdb_proxy_fs_sbreg_write(struct file *fp, const char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ return gdb_proxy_fs_iovar_mem_op(fp, user_buffer, NULL, count, position, "sbreg", FALSE);
+}
+
+/* File operations for "sbreg" procfs file */
+static const struct file_operations
+gdb_proxy_fs_sbreg_file_ops = {
+ .read = gdb_proxy_fs_sbreg_read,
+ .write = gdb_proxy_fs_sbreg_write,
+ .llseek = gdb_proxy_fs_memory_llseek,
+};
+
+/* If GDB Proxy procfs files set not yet created for given dhd instance - creates it */
+static void
+gdb_proxy_fs_try_create(dhd_info_t *dhd, const char *dev_name)
+{
+ char dir_name[sizeof(dhd->gdb_proxy_fs_root_name)] = "dhd_gdb_proxy_";
+ struct proc_dir_entry *root_dentry;
+ int i;
+ static const struct {
+ const char *file_name;
+ const struct file_operations *fops;
+ } fileinfos[] = {
+ {"gdb_proxy_probe", &gdb_proxy_fs_probe_file_ops},
+ {"gdb_proxy_stop_count", &gdb_proxy_fs_stop_count_file_ops},
+ {"membytes", &gdb_proxy_fs_membytes_file_ops},
+ {"sbreg", &gdb_proxy_fs_sbreg_file_ops},
+ };
+ if (!dev_name || !*dev_name || dhd->gdb_proxy_fs_root) {
+ return;
+ }
+ strlcat_s(dir_name, dev_name, sizeof(dir_name));
+ dir_name[sizeof(dir_name) - 1] = 0;
+ root_dentry = proc_mkdir(dir_name, NULL);
+ if ((root_dentry == NULL) || IS_ERR(root_dentry)) {
+ return;
+ }
+ for (i = 0; i < ARRAYSIZE(fileinfos); ++i) {
+ struct proc_dir_entry *file_dentry = proc_create_data(fileinfos[i].file_name,
+ S_IRUGO | (fileinfos[i].fops->write ? S_IWUGO : 0), root_dentry,
+ fileinfos[i].fops, dhd);
+ if ((file_dentry == NULL) || IS_ERR(file_dentry)) {
+ goto fail;
+ }
+ }
+ dhd->gdb_proxy_fs_root = root_dentry;
+ memcpy_s(dhd->gdb_proxy_fs_root_name, sizeof(dhd->gdb_proxy_fs_root_name),
+ dir_name, sizeof(dhd->gdb_proxy_fs_root_name));
+ return;
+fail:
+ if (root_dentry) {
+ remove_proc_subtree(dir_name, NULL);
+ }
+}
+
+/* If GDB Proxy procfs files set created for given dhd instance - removes it */
+static void
+gdb_proxy_fs_remove(dhd_info_t *dhd)
+{
+ if (dhd->gdb_proxy_fs_root) {
+ remove_proc_subtree(dhd->gdb_proxy_fs_root_name, NULL);
+ dhd->gdb_proxy_fs_root = NULL;
+ bzero(dhd->gdb_proxy_fs_root_name, sizeof(dhd->gdb_proxy_fs_root_name));
+ }
+}
+#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
+
+#ifdef DHD_MAP_LOGGING
+/* Will be called from SMMU fault handler */
+void
+dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
+ uint32 irq = (uint32)-1;
+
+ DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
+ DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
+ dhdp->smmu_fault_occurred = TRUE;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->axi_error = TRUE;
+ dhdp->axi_err_dump->axid = axid;
+ dhdp->axi_err_dump->fault_address = fault_addr;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+ /* Disable PCIe IRQ */
+ dhdpcie_get_pcieirq(dhdp->bus, &irq);
+ if (irq != (uint32)-1) {
+ disable_irq_nosync(irq);
+ }
+
+ /* Take debug information first */
+ DHD_OS_WAKE_LOCK(dhdp);
+ dhd_prot_smmu_fault_dump(dhdp);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+
+ /* Take AXI information if possible */
+#ifdef DNGL_AXI_ERROR_LOGGING
+#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
+ dhd_axi_error_dispatch(dhdp);
+#else
+ dhd_axi_error(dhdp);
+#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#endif /* DNGL_AXI_ERROR_LOGGING */
+}
+EXPORT_SYMBOL(dhd_smmu_fault_handler);
+#endif /* DHD_MAP_LOGGING */
+
+#ifdef DHD_PKTTS
+/* Get pktts flow configuration */
+int
+dhd_get_pktts_flow(dhd_pub_t *dhdp, void *arg, int len)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ if (!arg || len <= (sizeof(pktts_flow_t) * PKTTS_CONFIG_MAX)) {
+ return BCME_BADARG;
+ }
+
+ return memcpy_s(arg, len, &dhd->config[0], sizeof(dhd->config));
+}
+
+/* Set pktts flow configuration */
+int
+dhd_set_pktts_flow(dhd_pub_t *dhdp, void *params, int plen)
+{
+ dhd_info_t *dhd = dhdp->info;
+ pktts_flow_t *config;
+ uint32 checksum = 0;
+ int ret = BCME_OK;
+ uint32 temp;
+ uint32 idx = PKTTS_CONFIG_MAX;
+ uint32 num_config = 0;
+
+ if (plen < sizeof(*config)) {
+ DHD_ERROR(("dhd_set_pktts_flow: invalid buffer length (%d)\n", plen));
+ return BCME_BADLEN;
+ }
+
+ config = (pktts_flow_t *)params;
+
+ temp = htonl(config->src_ip);
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
+ sizeof(temp) / sizeof(uint32));
+ temp = htonl(config->dst_ip);
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
+ sizeof(temp) / sizeof(uint32));
+
+ temp = (hton16(config->dst_port) << 16) | hton16(config->src_port);
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
+ sizeof(temp) / sizeof(uint32));
+ temp = config->proto;
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
+ sizeof(temp) / sizeof(uint32));
+
+ /* Look for checksum match: for delete or update */
+ dhd_match_pktts_flow(dhdp, checksum, &idx, &num_config);
+
+ /* no matching config */
+ if (idx == PKTTS_CONFIG_MAX) {
+ if (config->pkt_offset == PKTTS_OFFSET_INVALID) {
+ /* no matching config found for deletion */
+ return BCME_NOTFOUND;
+ }
+
+ /* look for free config space */
+ for (idx = 0; idx < PKTTS_CONFIG_MAX; idx++) {
+ if (dhd->config[idx].chksum == 0) {
+ break;
+ }
+ }
+
+ if (idx == PKTTS_CONFIG_MAX) {
+ /* no config space left */
+ return BCME_NORESOURCE;
+ }
+ }
+
+ if (config->pkt_offset == PKTTS_OFFSET_INVALID) {
+ /* reset if pkt_offset is zero */
+ memset(&dhd->config[idx], 0, sizeof(dhd->config[idx]));
+ } else {
+ ret = memcpy_s(&dhd->config[idx], sizeof(dhd->config[idx]),
+ config, sizeof(*config));
+ if (ret == BCME_OK) {
+ dhd->config[idx].chksum = checksum;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * dhd_match_pktts_flow - this api returns matching pktts config against checksum
+ *
+ * @dhdp: pointer to dhd_pub object
+ * @checksum: five tuple checksum
+ * @idx: returns index of matching pktts config
+ * @num_config: returns number of valid pktts config
+ */
+pktts_flow_t *
+dhd_match_pktts_flow(dhd_pub_t *dhdp, uint32 checksum, uint32 *idx, uint32 *num_config)
+{
+ dhd_info_t *dhd = dhdp->info;
+ pktts_flow_t *flow = NULL;
+ uint8 i;
+
+ for (i = 0; i < PKTTS_CONFIG_MAX; i++) {
+ if (dhd->config[i].chksum) {
+ (*num_config)++;
+ }
+
+ if (checksum && (dhd->config[i].chksum == checksum)) {
+ flow = &dhd->config[i];
+ break;
+ }
+ }
+
+ /* update matching config index */
+ if (idx) {
+ *idx = i;
+ }
+
+ /* countinue with valid config count */
+ for (; i < PKTTS_CONFIG_MAX; i++) {
+ if (dhd->config[i].chksum) {
+ (*num_config)++;
+ }
+ }
+
+ return flow;
+}
+
+/* Get pktts enab configuration */
+int dhd_get_pktts_enab(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ return dhd->latency;
+}
+
+/* Set pktts enable configuration */
+int dhd_set_pktts_enab(dhd_pub_t *dhdp, bool val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ uint32 var_int = val;
+ int ret = BCME_OK;
+ uint power_val;
+
+ /* check FW supports pktlat_ipc or pktlat_meta */
+ if (!FW_SUPPORTED(dhdp, pktlat_ipc) && !FW_SUPPORTED(dhdp, pktlat_meta)) {
+ BCM_REFERENCE(power_val);
+ DHD_INFO(("Chip does not support pktlat\n"));
+ return ret;
+ }
+ power_val = 0;
+ /* Disabling mpc and PM mode for pktlat */
+ ret = dhd_iovar(dhdp, 0, "mpc", (char *)&power_val, sizeof(power_val), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Unable to set mpc 0, ret=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ power_val = PM_OFF;
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_PM, (char *)&power_val, sizeof(power_val),
+ TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Unable to set PM 0, ret=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ ret = dhd_iovar(dhdp, 0, "pktts_enab", (char *)&var_int, sizeof(var_int), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ dhd->latency = val;
+
+ return 0;
+}
+#endif /* DHD_PKTTS */
+
+#ifdef DHD_ERPOM
+static void
+dhd_error_recovery(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp;
+ int ret = 0;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdp = &dhd->pub;
+
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+ DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
+ __FUNCTION__));
+ return;
+ }
+
+#ifdef BT_OVER_PCIE
+ if (dhdp->dongle_trap_due_to_bt) {
+ DHD_ERROR(("WLAN trapped due to BT, toggle REG_ON\n"));
+ /* toggle REG_ON */
+ dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_BT);
+ return;
+ }
+#endif /* BT_OVER_PCIE */
+
+ ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
+ if (ret != BCME_DNGL_DEVRESET) {
+ DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
+ "toggle REG_ON\n", __FUNCTION__, ret));
+ /* toggle REG_ON */
+ dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
+ return;
+ }
+}
+
+void
+dhd_schedule_reset(dhd_pub_t *dhdp)
+{
+ if (dhdp->enable_erpom) {
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
+ DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
+ }
+}
+#endif /* DHD_ERPOM */
+
+#ifdef DHD_PKT_LOGGING
+int
+dhd_pktlog_debug_dump(dhd_pub_t *dhdp)
+{
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+ unsigned long flags = 0;
+
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ cfg = wl_get_cfg(primary_ndev);
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: HAL dump is already triggered \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_OS_WAKE_LOCK(dhdp);
+
+ if (wl_cfg80211_is_hal_started(cfg)) {
+ dhdp->pktlog_debug = TRUE;
+ dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
+ } else {
+ DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
+ }
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ /* In case of dhd_os_busbusy_wait_bitmask() timeout,
+ * hal dump bit will not be cleared. Hence clearing it here.
+ */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ return BCME_OK;
+}
+
+void
+dhd_pktlog_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd_pktlog_dump_write_file(&dhd->pub)) {
+ DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__));
+ return;
+ }
+}
+
+void
+dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
+{
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
+ dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DHD_PKT_LOGGING */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+static void dhd_blk_tsfl_handler(struct work_struct * work)
+{
+ dhd_if_t *ifp = NULL;
+ dhd_pub_t *dhdp = NULL;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ ifp = container_of(work, dhd_if_t, blk_tsfl_work);
+ GCC_DIAGNOSTIC_POP();
+
+ if (ifp) {
+ dhdp = &ifp->info->pub;
+ if (dhdp) {
+ if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
+ (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
+ wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
+ } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
+ (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
+ DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
+ wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
+ }
+ ifp->disconnect_tsync_flood = TRUE;
+ }
+ }
+}
+void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
+{
+ ifp->tsync_rcvd = 0;
+ ifp->tsyncack_txed = 0;
+ ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+}
+void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
+{
+ dhd_if_t *ifp = NULL;
+ if (dev) {
+ ifp = DHD_DEV_IFP(dev);
+ }
+ if (ifp) {
+ ifp->tsync_rcvd = 0;
+ ifp->tsyncack_txed = 0;
+ ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ ifp->tsync_per_sec = 0;
+ ifp->disconnect_tsync_flood = FALSE;
+ }
+}
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+static void dhd_m4_state_handler(struct work_struct *work)
+{
+ dhd_if_t *ifp = NULL;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+ struct delayed_work *dw = to_delayed_work(work);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ ifp = container_of(dw, dhd_if_t, m4state_work);
+ GCC_DIAGNOSTIC_POP();
+
+ if (ifp && ifp->net &&
+ (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
+ DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
+ ifp->net->name));
+ wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
+ }
+}
+
+void
+dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+
+ if (!success) {
+ /* XXX where does this stuff belong to? */
+ dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+ /* XXX Use packet tag when it is available to identify its type */
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+ if (type == ETHER_TYPE_802_1X) {
+ if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
+ dhd_if_t *ifp = NULL;
+ ifp = dhd->iflist[ifidx];
+ if (!ifp || !ifp->net) {
+ return;
+ }
+
+ DHD_INFO(("%s: M4 TX failed on %d.\n",
+ __FUNCTION__, ifidx));
+
+ OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
+ schedule_delayed_work(&ifp->m4state_work,
+ msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
+ }
+ }
+ }
+}
+
+void
+dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhdinfo;
+ dhd_if_t *ifp;
+
+ if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
+ return;
+ }
+
+ dhdinfo = (dhd_info_t *)(dhdp->info);
+ if (!dhdinfo) {
+ DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ifp = dhdinfo->iflist[ifidx];
+ if (ifp) {
+ cancel_delayed_work_sync(&ifp->m4state_work);
+ }
+}
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef BIGDATA_SOFTAP
+void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
+{
+ struct bcm_cfg80211 *cfg;
+ dhd_pub_t *dhdp;
+ ap_sta_wq_data_t *p_wq_data;
+
+ if (!bcm_cfg || !ndev || !e) {
+ WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
+ return;
+ }
+
+ cfg = (struct bcm_cfg80211 *)bcm_cfg;
+ dhdp = (dhd_pub_t *)cfg->pub;
+
+ if (!dhdp || !cfg->ap_sta_info) {
+ WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
+ return;
+ }
+
+ p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
+ if (unlikely(!p_wq_data)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "ap_sta_wq_data_t\n", __FUNCTION__));
+ return;
+ }
+
+ mutex_lock(&cfg->ap_sta_info->wq_data_sync);
+
+ memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
+ p_wq_data->dhdp = dhdp;
+ p_wq_data->bcm_cfg = cfg;
+ p_wq_data->ndev = (struct net_device *)ndev;
+
+ mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
+
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
+ wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
+
+}
+#endif /* BIGDATA_SOFTAP */
+
+void
+get_debug_dump_time(char *str)
+{
+ struct osl_timespec curtime;
+ unsigned long local_time;
+ struct rtc_time tm;
+
+ if (!strlen(str)) {
+ osl_do_gettimeofday(&curtime);
+ local_time = (u32)(curtime.tv_sec -
+ (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
+ rtc_time_to_tm(local_time, &tm);
+ snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
+ tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
+ tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
+ }
+}
+
+void
+clear_debug_dump_time(char *str)
+{
+ memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
+}
+#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
+void
+copy_debug_dump_time(char *dest, char *src)
+{
+ memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
+}
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
+
+/*
+ * DHD RING
+ */
+#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
+#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
+
+#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
+#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
+
+#define DHD_RING_MAGIC 0x20170910
+#define DHD_RING_IDX_INVALID 0xffffffff
+
+#define DHD_RING_SYNC_LOCK_INIT(osh) osl_spin_lock_init(osh)
+#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock)
+#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_RING_SYNC_UNLOCK(lock, flags) osl_spin_unlock(lock, flags)
+
+typedef struct {
+ uint32 elem_size;
+ uint32 elem_cnt;
+ uint32 write_idx; /* next write index, -1 : not started */
+ uint32 read_idx; /* next read index, -1 : not start */
+
+ /* protected elements during serialization */
+ int lock_idx; /* start index of locked, element will not be overried */
+ int lock_count; /* number of locked, from lock idx */
+
+ /* saved data elements */
+ void *elem;
+} dhd_fixed_ring_info_t;
+
+typedef struct {
+ uint32 elem_size;
+ uint32 elem_cnt;
+ uint32 idx; /* -1 : not started */
+ uint32 rsvd; /* reserved for future use */
+
+ /* protected elements during serialization */
+ atomic_t ring_locked;
+ /* check the overwriting */
+ uint32 ring_overwrited;
+
+ /* saved data elements */
+ void *elem;
+} dhd_singleidx_ring_info_t;
+
+typedef struct {
+ uint32 magic;
+ uint32 type;
+ void *ring_sync; /* spinlock for sync */
+ union {
+ dhd_fixed_ring_info_t fixed;
+ dhd_singleidx_ring_info_t single;
+ };
+} dhd_ring_info_t;
+
+uint32
+dhd_ring_get_hdr_size(void)
+{
+ return sizeof(dhd_ring_info_t);
+}
+
+void *
+dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
+ uint32 elem_cnt, uint32 type)
+{
+ dhd_ring_info_t *ret_ring;
+
+ if (!buf) {
+ DHD_RING_ERR(("NO RING BUFFER\n"));
+ return NULL;
+ }
+
+ if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
+ DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
+ return NULL;
+ }
+
+ if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
+ DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
+ return NULL;
+ }
+
+ ret_ring = (dhd_ring_info_t *)buf;
+ ret_ring->type = type;
+ ret_ring->ring_sync = (void *)DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
+ ret_ring->magic = DHD_RING_MAGIC;
+
+ if (type == DHD_RING_TYPE_FIXED) {
+ ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
+ ret_ring->fixed.elem_size = elem_size;
+ ret_ring->fixed.elem_cnt = elem_cnt;
+ } else {
+ ret_ring->single.idx = DHD_RING_IDX_INVALID;
+ atomic_set(&ret_ring->single.ring_locked, 0);
+ ret_ring->single.ring_overwrited = 0;
+ ret_ring->single.rsvd = 0;
+ ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
+ ret_ring->single.elem_size = elem_size;
+ ret_ring->single.elem_cnt = elem_cnt;
+ }
+
+ return ret_ring;
+}
+
+void
+dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ if (!ring) {
+ return;
+ }
+
+ if (ring->magic != DHD_RING_MAGIC) {
+ return;
+ }
+
+ if (ring->type != DHD_RING_TYPE_FIXED &&
+ ring->type != DHD_RING_TYPE_SINGLE_IDX) {
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
+ ring->ring_sync = NULL;
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ dhd_fixed_ring_info_t *fixed = &ring->fixed;
+ memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
+ fixed->elem_size = fixed->elem_cnt = 0;
+ } else {
+ dhd_singleidx_ring_info_t *single = &ring->single;
+ memset(single->elem, 0, single->elem_size * single->elem_cnt);
+ single->elem_size = single->elem_cnt = 0;
+ }
+ ring->type = 0;
+ ring->magic = 0;
+}
+
+static inline uint32
+__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
+{
+ uint32 diff;
+ uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
+ uint32 elem_size, elem_cnt;
+ void *elem;
+
+ if (type == DHD_RING_TYPE_FIXED) {
+ dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
+ elem_size = fixed->elem_size;
+ elem_cnt = fixed->elem_cnt;
+ elem = fixed->elem;
+ } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
+ dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
+ elem_size = single->elem_size;
+ elem_cnt = single->elem_cnt;
+ elem = single->elem;
+ } else {
+ DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
+ return ret_idx;
+ }
+
+ if (ptr < elem) {
+ DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
+ return ret_idx;
+ }
+ diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
+ if (diff % elem_size != 0) {
+ DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
+ return ret_idx;
+ }
+ ret_idx = diff / elem_size;
+ if (ret_idx >= elem_cnt) {
+ DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
+ }
+ return ret_idx;
+}
+
+/* Sub functions for fixed ring */
+/* get counts between two indexes of ring buffer (internal only) */
+static inline int
+__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
+{
+ if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
+ return 0;
+ }
+
+ return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
+}
+
+static inline int
+__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
+{
+ return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
+}
+
+static inline void *
+__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
+{
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+ return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
+}
+
+static inline void
+__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
+{
+ uint32 next_idx;
+
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return;
+ }
+
+ next_idx = (ring->read_idx + 1) % ring->elem_cnt;
+ if (ring->read_idx == ring->write_idx) {
+ /* Become empty */
+ ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
+ return;
+ }
+
+ ring->read_idx = next_idx;
+ return;
+}
+
+static inline void *
+__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
+{
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+ return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
+}
+
+static inline void *
+__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
+{
+ uint32 tmp_idx;
+
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ ring->read_idx = ring->write_idx = 0;
+ return (uint8 *)ring->elem;
+ }
+
+ /* check next index is not locked */
+ tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
+ if (ring->lock_idx == tmp_idx) {
+ return NULL;
+ }
+
+ ring->write_idx = tmp_idx;
+ if (ring->write_idx == ring->read_idx) {
+ /* record is full, drop oldest one */
+ ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
+
+ }
+ return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
+}
+
+static inline void *
+__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (cur_idx == ring->write_idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + 1) % ring->elem_cnt;
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void *
+__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+ if (cur_idx == ring->read_idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void
+__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
+{
+ uint32 first_idx;
+ uint32 last_idx;
+ uint32 ring_filled_cnt;
+ uint32 tmp_cnt;
+
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return;
+ }
+
+ if (first_ptr) {
+ first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
+ if (first_idx >= ring->elem_cnt) {
+ return;
+ }
+ } else {
+ first_idx = ring->read_idx;
+ }
+
+ if (last_ptr) {
+ last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
+ if (last_idx >= ring->elem_cnt) {
+ return;
+ }
+ } else {
+ last_idx = ring->write_idx;
+ }
+
+ ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
+ tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
+ if (tmp_cnt > ring_filled_cnt) {
+ DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
+ ring->write_idx, ring->read_idx, first_idx));
+ return;
+ }
+
+ tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
+ if (tmp_cnt > ring_filled_cnt) {
+ DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
+ ring->write_idx, ring->read_idx, last_idx));
+ return;
+ }
+
+ ring->lock_idx = first_idx;
+ ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
+ return;
+}
+
+static inline void
+__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
+{
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return;
+ }
+
+ ring->lock_idx = DHD_RING_IDX_INVALID;
+ ring->lock_count = 0;
+ return;
+}
+static inline void *
+__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
+{
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+ if (ring->lock_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("NO LOCK POINT\n"));
+ return NULL;
+ }
+ return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
+}
+
+static inline void *
+__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
+{
+ int lock_last_idx;
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+ if (ring->lock_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("NO LOCK POINT\n"));
+ return NULL;
+ }
+
+ lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
+ return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
+}
+
+static inline int
+__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
+{
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return BCME_ERROR;
+ }
+ if (ring->lock_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("NO LOCK POINT\n"));
+ return BCME_ERROR;
+ }
+ return ring->lock_count;
+}
+
+static inline void
+__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
+{
+ if (ring->read_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return;
+ }
+ if (ring->lock_idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("NO LOCK POINT\n"));
+ return;
+ }
+
+ ring->lock_count--;
+ if (ring->lock_count <= 0) {
+ ring->lock_idx = DHD_RING_IDX_INVALID;
+ } else {
+ ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
+ }
+ return;
+}
+
+static inline void
+__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+{
+ ring->read_idx = idx;
+}
+
+static inline void
+__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+{
+ ring->write_idx = idx;
+}
+
+static inline uint32
+__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
+{
+ return ring->read_idx;
+}
+
+static inline uint32
+__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
+{
+ return ring->write_idx;
+}
+
+/* Sub functions for single index ring */
+static inline void *
+__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
+{
+ uint32 tmp_idx = 0;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+
+ if (ring->ring_overwrited) {
+ tmp_idx = (ring->idx + 1) % ring->elem_cnt;
+ }
+
+ return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
+{
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+
+ return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
+{
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ ring->idx = 0;
+ return (uint8 *)ring->elem;
+ }
+
+ /* check the lock is held */
+ if (atomic_read(&ring->ring_locked)) {
+ return NULL;
+ }
+
+ /* check the index rollover */
+ if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
+ ring->ring_overwrited = 1;
+ }
+
+ ring->idx = (ring->idx + 1) % ring->elem_cnt;
+
+ return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (cur_idx == ring->idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + 1) % ring->elem_cnt;
+
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void *
+__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (!ring->ring_overwrited && cur_idx == 0) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
+ if (ring->ring_overwrited && cur_idx == ring->idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void
+__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
+{
+ if (!atomic_read(&ring->ring_locked)) {
+ atomic_set(&ring->ring_locked, 1);
+ }
+}
+
+static inline void
+__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
+{
+ if (atomic_read(&ring->ring_locked)) {
+ atomic_set(&ring->ring_locked, 0);
+ }
+}
+
+/* Get first element : oldest element */
+void *
+dhd_ring_get_first(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_get_first(&ring->fixed);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_first(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+/* Free first element : oldest element */
+void
+dhd_ring_free_first(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_free_first(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+uint32
+dhd_ring_get_read_idx(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ uint32 read_idx = DHD_RING_IDX_INVALID;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return read_idx;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
+ return read_idx;
+}
+
+uint32
+dhd_ring_get_write_idx(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ uint32 write_idx = DHD_RING_IDX_INVALID;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return write_idx;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
+ return write_idx;
+}
+
+/* Get latest element */
+void *
+dhd_ring_get_last(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_get_last(&ring->fixed);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_last(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+/* Get next point can be written
+ * will overwrite which doesn't read
+ * will return NULL if next pointer is locked.
+ */
+void *
+dhd_ring_get_empty(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_get_empty(&ring->fixed);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_empty(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+void *
+dhd_ring_get_next(void *_ring, void *cur)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+void *
+dhd_ring_get_prev(void *_ring, void *cur)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+int
+dhd_ring_get_cur_size(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ int cnt = 0;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return cnt;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return cnt;
+}
+
+/* protect element between lock_ptr and write_idx */
+void
+dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+/* free all lock */
+void
+dhd_ring_lock_free(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_lock_free(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void *
+dhd_ring_lock_get_first(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+void *
+dhd_ring_lock_get_last(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ void *ret = NULL;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return NULL;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+int
+dhd_ring_lock_get_count(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ int ret = BCME_ERROR;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return ret;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
+}
+
+/* free first locked element */
+void
+dhd_ring_lock_free_first(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_lock_free_first(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_whole_lock(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ __dhd_singleidx_ring_whole_lock(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_whole_unlock(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ __dhd_singleidx_ring_whole_unlock(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+/* END of DHD RING */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+#define DHD_VFS_INODE(dir) (dir->d_inode)
+#else
+#define DHD_VFS_INODE(dir) d_inode(dir)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
+#else
+#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+
+#if ((defined DHD_DUMP_MNGR) || (defined DNGL_AXI_ERROR_LOGGING))
+int
+dhd_file_delete(char *path)
+{
+ struct path file_path;
+ int err;
+ struct dentry *dir;
+
+ err = kern_path(path, 0, &file_path);
+
+ if (err < 0) {
+ DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
+ return err;
+ }
+ if (
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ !d_is_file(file_path.dentry) ||
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
+ d_really_is_negative(file_path.dentry) ||
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
+ FALSE)
+ {
+ err = -EINVAL;
+ } else {
+ dir = dget_parent(file_path.dentry);
+
+ if (!IS_ERR(dir)) {
+ err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
+ dput(dir);
+ } else {
+ err = PTR_ERR(dir);
+ }
+ }
+
+ path_put(&file_path);
+
+ if (err < 0) {
+ DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
+ }
+
+ return err;
+}
+#endif
+
+#ifdef DHD_DUMP_MNGR
+static int
+dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
+{
+ int i;
+ int fm_idx = -1;
+
+ for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
+ /* XXX dump file manager enqueues the type name to empty slot,
+ * so it's impossible that empty slot is in the middle.
+ */
+ if (strlen(fm_ptr->elems[i].type_name) == 0) {
+ fm_idx = i;
+ break;
+ }
+ if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
+ fm_idx = i;
+ break;
+ }
+ }
+
+ if (fm_idx == -1) {
+ return fm_idx;
+ }
+
+ if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
+ strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
+ fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
+ fm_ptr->elems[fm_idx].file_idx = 0;
+ }
+
+ return fm_idx;
+}
+
+/*
+ * dhd_dump_file_manage_enqueue - enqueue dump file path
+ * and delete odest file if file count is max.
+ */
+void
+dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
+{
+ int fm_idx;
+ int fp_idx;
+ dhd_dump_file_manage_t *fm_ptr;
+ DFM_elem_t *elem;
+
+ if (!dhd || !dhd->dump_file_manage) {
+ DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
+ __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
+ return;
+ }
+
+ fm_ptr = dhd->dump_file_manage;
+
+ /* find file_manage idx */
+ DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
+ if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
+ DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
+ __FUNCTION__, fname));
+ return;
+ }
+
+ elem = &fm_ptr->elems[fm_idx];
+ fp_idx = elem->file_idx;
+ DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
+ __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
+
+ /* delete oldest file */
+ if (strlen(elem->file_path[fp_idx]) != 0) {
+ if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
+ DHD_ERROR(("%s(): Failed to delete file: %s\n",
+ __FUNCTION__, elem->file_path[fp_idx]));
+ } else {
+ DHD_ERROR(("%s(): Successed to delete file: %s\n",
+ __FUNCTION__, elem->file_path[fp_idx]));
+ }
+ }
+
+ /* save dump file path */
+ strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
+ elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
+
+ /* change file index to next file index */
+ elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
+}
+#endif /* DHD_DUMP_MNGR */
+
+#ifdef DHD_HP2P
+unsigned long
+dhd_os_hp2plock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ flags = osl_spin_lock(&dhd->hp2p_lock);
+ }
+
+ return flags;
+}
+
+void
+dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ osl_spin_unlock(&dhd->hp2p_lock, flags);
+ }
+}
+#endif /* DHD_HP2P */
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void
+dhd_axi_error_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = (dhd_info_t *)handle;
+ dhd_pub_t *dhdp = NULL;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+ dhdp = &dhd->pub;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /**
+ * First save axi error information to a file
+ * because panic should happen right after this.
+ * After dhd reset, dhd reads the file, and do hang event process
+ * to send axi error stored on the file to Bigdata server
+ */
+ if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
+ __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
+ }
+
+ DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHD_FW_COREDUMP
+#ifdef DHD_SSSR_DUMP
+ DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
+ dhdp->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
+
+exit:
+ /* Trigger kernel panic after taking necessary dumps */
+ BUG_ON(1);
+}
+
+void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
+{
+ DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ type, DHD_WQ_WORK_AXI_ERROR_DUMP,
+ dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef SUPPORT_SET_TID
+/*
+ * Set custom TID value for UDP frame based on UID value.
+ * This will be triggered by android private command below.
+ * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
+ * Mode 0(SET_TID_OFF) : Disable changing TID
+ * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
+ * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
+*/
+void
+dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
+{
+ struct ether_header *eh = NULL;
+ struct sock *sk = NULL;
+ uint8 *pktdata = NULL;
+ uint8 *ip_hdr = NULL;
+ uint8 cur_prio;
+ uint8 prio;
+ uint32 uid;
+
+ if (dhdp->tid_mode == SET_TID_OFF) {
+ return;
+ }
+
+ pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
+ eh = (struct ether_header *) pktdata;
+ ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
+
+ if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
+ return;
+ }
+
+ cur_prio = PKTPRIO(pkt);
+ prio = dhdp->target_tid;
+ uid = dhdp->target_uid;
+
+ if ((cur_prio == prio) ||
+ (cur_prio != PRIO_8021D_BE)) {
+ return;
+ }
+
+ sk = ((struct sk_buff*)(pkt))->sk;
+
+ if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
+ (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
+ PKTSETPRIO(pkt, prio);
+ }
+}
+#endif /* SUPPORT_SET_TID */
+
+#ifdef BCMPCIE
+static void
+dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp = NULL;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ BUG_ON(1);
+ return;
+ }
+
+ dhdp = &dhd->pub;
+ if (dhdp->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) {
+ DHD_ERROR(("%s: skip cto recovery for DHD_INDUCE_BH_CBP_HANG\n",
+ __FUNCTION__));
+ return;
+ }
+ dhdpcie_cto_recovery_handler(dhdp);
+}
+
+void
+dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ NULL, DHD_WQ_WORK_CTO_RECOVERY,
+ dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* BCMPCIE */
+
+#ifdef DHD_WIFI_SHUTDOWN
+void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
+{
+ dhd_pub_t *dhd_pub = NULL;
+ dhd_info_t *dhd_info = NULL;
+ dhd_if_t *dhd_if = NULL;
+
+ DHD_ERROR(("%s enter\n", __FUNCTION__));
+ dhd_pub = g_dhd_pub;
+
+ if (dhd_os_check_if_up(dhd_pub)) {
+ dhd_info = (dhd_info_t *)dhd_pub->info;
+ dhd_if = dhd_info->iflist[0];
+ ASSERT(dhd_if);
+ ASSERT(dhd_if->net);
+ if (dhd_if && dhd_if->net) {
+ dhd_stop(dhd_if->net);
+ }
+ }
+}
+#endif /* DHD_WIFI_SHUTDOWN */
+#ifdef WL_AUTO_QOS
+void
+dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off)
+{
+ dhd_sock_qos_set_status(dhdp->info, on_off);
+}
+#endif /* WL_AUTO_QOS */
+
+#ifdef DHD_CFG80211_SUSPEND_RESUME
+void
+dhd_cfg80211_suspend(dhd_pub_t *dhdp)
+{
+ struct net_device *net = dhd_idx2net((dhd_pub_t *)dhdp, 0);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ wl_cfg80211_suspend(cfg);
+}
+
+void
+dhd_cfg80211_resume(dhd_pub_t *dhdp)
+{
+ struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, 0);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ wl_cfg80211_resume(cfg);
+}
+#endif /* DHD_CFG80211_SUSPEND_RESUME */
+
+void
+dhd_generate_rand_mac_addr(struct ether_addr *ea_addr)
+{
+ RANDOM_BYTES(ea_addr->octet, ETHER_ADDR_LEN);
+ /* restore mcast and local admin bits to 0 and 1 */
+ ETHER_SET_UNICAST(ea_addr->octet);
+ ETHER_SET_LOCALADDR(ea_addr->octet);
+ DHD_ERROR(("%s:generated new MAC="MACDBG" \n",
+ __FUNCTION__, MAC2STRDBG(ea_addr->octet)));
+ return;
+}
+
+void *
+dhd_get_roam_evt(dhd_pub_t *dhdp)
+{
+#if defined(DHD_PUB_ROAM_EVT)
+ return (void *)&(dhdp->roam_evt);
+#else
+ return NULL;
+#endif /* DHD_PUB_ROAM_EVT */
+}
+
+/* BANDLOCK_FILE is for Hikey only and BANDLOCK has a priority than BANDLOCK_FILE */
+static void dhd_set_bandlock(dhd_pub_t * dhd)
+{
+#if defined(BANDLOCK)
+ int band = BANDLOCK;
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) {
+ DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band));
+ }
+#elif defined(BANDLOCK_FILE)
+ int band;
+ char val[2] = {0, 0};
+ if (dhd_read_file(PATH_BANDLOCK_INFO, (char *)val, sizeof(char)) == BCME_OK) {
+ band = bcm_atoi(val);
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) {
+ DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band));
+ }
+ }
+#endif /* BANDLOCK */
+}
+
+#ifdef PCIE_FULL_DONGLE
+/* API to delete flowings and Stations
+* corresponding to the interface(ndev)
+*/
+void
+dhd_net_del_flowrings_sta(dhd_pub_t *dhd, struct net_device *ndev)
+{
+ dhd_if_t *ifp = NULL;
+
+ ifp = dhd_get_ifp_by_ndev(dhd, ndev);
+ if (ifp == NULL) {
+ DHD_ERROR(("DHD Iface Info corresponding to %s not found\n", ndev->name));
+ return;
+ }
+
+ /* For now called only in iface delete path..
+ * Add reason codes if this API need to be reused in any other paths.
+ */
+ DHD_ERROR(("%s:Clean up IFACE idx %d due to interface delete\n",
+ __FUNCTION__, ifp->idx));
+
+ dhd_del_all_sta(dhd, ifp->idx);
+ dhd_flow_rings_delete(dhd, ifp->idx);
+}
+#endif /* PCIE_FULL_DONGLE */
+
+#ifndef BCMDBUS
+static void
+dhd_deferred_socram_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)event_info;
+ DHD_ERROR(("%s ... scheduled to collect memdump over bus\n", __FUNCTION__));
+ dhd_socram_dump(dhdp->bus);
+}
+
+int
+dhd_schedule_socram_dump(dhd_pub_t *dhdp)
+{
+ int ret = 0;
+ ret = dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
+ DHD_WQ_WORK_SOC_RAM_COLLECT, dhd_deferred_socram_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+ return ret;
+}
+#endif
+
+void *dhd_get_pub(struct net_device *dev)
+{
+ dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+ if (dhdinfo)
+ return (void *)&dhdinfo->pub;
+ else {
+ printf("%s: null dhdinfo\n", __FUNCTION__);
+ return NULL;
+ }
+}
+
+void *dhd_get_conf(struct net_device *dev)
+{
+ dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+ if (dhdinfo)
+ return (void *)dhdinfo->pub.conf;
+ else {
+ printf("%s: null dhdinfo\n", __FUNCTION__);
+ return NULL;
+ }
+}
+
+bool dhd_os_wd_timer_enabled(void *bus)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+ return FALSE;
+ }
+ return dhd->wd_timer_valid;
+}
+
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */
+static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev)
+{
+ struct net_device *primary_ndev = NULL, *br_dev = NULL;
+ int cmd;
+ struct ifreq ifr;
+
+ /* add new interface to bridge dev */
+ if (dev) {
+ int found = 0, i;
+ DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx));
+ for (i = 0 ; i < ifidx; i++) {
+ DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx));
+ /* search the primary interface */
+ if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) {
+ primary_ndev = dhd->pub.info->iflist[i]->net;
+ DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name));
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ DHD_ERROR(("Can not find primary dev %s\n", dev->name));
+ return;
+ }
+ cmd = SIOCBRADDIF;
+ ifr.ifr_ifindex = dev->ifindex;
+ } else { /* del interface from bridge dev */
+ primary_ndev = dhd->pub.info->iflist[ifidx]->net;
+ cmd = SIOCBRDELIF;
+ ifr.ifr_ifindex = primary_ndev->ifindex;
+ }
+ /* if primary net device is bridged */
+ if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) {
+ rtnl_lock();
+ /* get bridge device */
+ br_dev = netdev_master_upper_dev_get(primary_ndev);
+ if (br_dev) {
+ const struct net_device_ops *ops = br_dev->netdev_ops;
+ DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name));
+ if (ops) {
+ if (cmd == SIOCBRADDIF) {
+ DHD_ERROR(("br call ndo_add_slave\n"));
+ ops->ndo_add_slave(br_dev, dev);
+ /* Also bring wds0.x interface up automatically */
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ }
+ else {
+ DHD_ERROR(("br call ndo_del_slave\n"));
+ ops->ndo_del_slave(br_dev, primary_ndev);
+ }
+ }
+ }
+ else {
+ DHD_ERROR(("no br dev\n"));
+ }
+ rtnl_unlock();
+ }
+ else {
+ DHD_ERROR(("device %s is not bridged\n", primary_ndev->name));
+ }
+}
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
diff --git a/bcmdhd.101.10.361.x/dhd_linux.h b/bcmdhd.101.10.361.x/dhd_linux.h
new file mode 100755
index 0000000..531505d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux.h
@@ -0,0 +1,523 @@
+/*
+ * DHD Linux header file (dhd_linux exports for cfg80211 and other components)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/* wifi platform functions for power, interrupt and pre-alloc, either
+ * from Android-like platform device data, or Broadcom wifi platform
+ * device data.
+ *
+ */
+#ifndef __DHD_LINUX_H__
+#define __DHD_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+#ifdef BCMPCIE
+#include <bcmmsgbuf.h>
+#endif /* BCMPCIE */
+
+#ifdef PCIE_FULL_DONGLE
+#include <etd.h>
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef WL_MONITOR
+#ifdef HOST_RADIOTAP_CONV
+#include <bcmwifi_monitor.h>
+#else
+#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */
+#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
+#endif /* HOST_RADIOTAP_CONV */
+#endif /* WL_MONITOR */
+
+/* dongle status */
+enum wifi_adapter_status {
+ WIFI_STATUS_POWER_ON = 0,
+ WIFI_STATUS_ATTACH,
+ WIFI_STATUS_FW_READY,
+ WIFI_STATUS_DETTACH
+};
+#define wifi_chk_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status))
+#define wifi_get_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status))
+#define wifi_set_adapter_status(adapter, stat) (set_bit(stat, &(adapter)->status))
+#define wifi_clr_adapter_status(adapter, stat) (clear_bit(stat, &(adapter)->status))
+#define wifi_chg_adapter_status(adapter, stat) (change_bit(stat, &(adapter)->status))
+
+#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
+#define DHD_FW_READY_TIMEOUT 5000 /* msec : allowed time to finished fw download */
+
+typedef struct wifi_adapter_info {
+ const char *name;
+ uint irq_num;
+ uint intr_flags;
+ const char *fw_path;
+ const char *nv_path;
+ const char *clm_path;
+ const char *conf_path;
+ void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */
+ uint bus_type;
+ uint bus_num;
+ uint slot_num;
+ int index;
+ int gpio_wl_reg_on;
+#ifdef CUSTOMER_OOB
+ int gpio_wl_host_wake;
+#endif
+ wait_queue_head_t status_event;
+ unsigned long status;
+#if defined (BT_OVER_SDIO)
+ const char *btfw_path;
+#endif /* defined (BT_OVER_SDIO) */
+#if defined(BCMSDIO)
+ struct sdio_func *sdio_func;
+#endif /* BCMSDIO */
+#if defined(BCMPCIE)
+ struct pci_dev *pci_dev;
+ struct pci_saved_state *pci_saved_state;
+#endif /* BCMPCIE */
+} wifi_adapter_info_t;
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC) || defined(CUSTOMER_HW4)
+#include <linux/wlan_plat.h>
+#else
+#include <dhd_plat.h>
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+typedef struct bcmdhd_wifi_platdata {
+ uint num_adapters;
+ wifi_adapter_info_t *adapters;
+} bcmdhd_wifi_platdata_t;
+
+/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
+typedef struct dhd_sta {
+ cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
+ uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */
+ void * ifp; /* associated dhd_if */
+ struct ether_addr ea; /* stations ethernet mac address */
+ struct list_head list; /* link into dhd_if::sta_list */
+ int idx; /* index of self in dhd_pub::sta_pool[] */
+ int ifidx; /* index of interface in dhd */
+#ifdef DHD_WMF
+ struct dhd_sta *psta_prim; /* primary index of psta interface */
+#endif /* DHD_WMF */
+} dhd_sta_t;
+typedef dhd_sta_t dhd_sta_pool_t;
+
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+typedef enum {
+ NONE_4WAY,
+ M1_4WAY,
+ M2_4WAY,
+ M3_4WAY,
+ M4_4WAY
+} msg_4way_t;
+typedef enum {
+ M3_RXED,
+ M4_TXFAILED
+} msg_4way_state_t;
+#define MAX_4WAY_TIMEOUT_MS 2000
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#if defined(DHD_LB)
+/* Dynamic CPU selection for load balancing. */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+/* FIXME: Make this a module param or a sysfs. */
+#if !defined(DHD_LB_PRIMARY_CPUS)
+#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
+#endif
+#if !defined(DHD_LB_SECONDARY_CPUS)
+#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
+#endif
+
+#define HIST_BIN_SIZE 9
+
+#if defined(DHD_LB_TXP)
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_tx_lb_pkttag_fr {
+ struct net_device *net;
+ int ifidx;
+} dhd_tx_lb_pkttag_fr_t;
+
+#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
+#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
+
+#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
+#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
+
+#define FILE_DUMP_MAX_WAIT_TIME 4000
+
+#ifdef IL_BIGENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+#define MIN_TCP_WIN_SIZE 18000
+#define WIN_SIZE_SCALE_FACTOR 2
+#define MAX_TARGET_PORTS 5
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+#ifdef BLOCK_IPV6_PACKET
+#define HEX_PREF_STR "0x"
+#define UNI_FILTER_STR "010000000000"
+#define ZERO_ADDR_STR "000000000000"
+#define ETHER_TYPE_STR "0000"
+#define IPV6_FILTER_STR "20"
+#define ZERO_TYPE_STR "00"
+#endif /* BLOCK_IPV6_PACKET */
+
+#if defined(OEM_ANDROID) && defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+#if defined(OEM_ANDROID) && defined(BCMPCIE)
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
+#else
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+#endif /* OEM_ANDROID && BCMPCIE */
+
+#ifdef CUSTOMER_HW4
+#ifdef MIMO_ANT_SETTING
+#ifdef DHD_EXPORT_CNTL_FILE
+extern unsigned long antsel;
+#endif /* DHD_EXPORT_CNTL_FILE */
+extern int dhd_sel_ant_from_file(dhd_pub_t *dhd);
+#endif /* MIMO_ANT_SETTING */
+#ifdef WRITE_WLANINFO
+#define MAX_VERSION_LEN 512
+#ifdef DHD_EXPORT_CNTL_FILE
+extern char version_info[MAX_VERSION_LEN];
+#endif /* DHD_EXPORT_CNTL_FILE */
+extern uint32 sec_save_wlinfo(char *firm_ver, char *dhd_ver, char *nvram_p, char *clm_ver);
+#endif /* WRITE_WLANINFO */
+#ifdef LOGTRACE_FROM_FILE
+extern int dhd_logtrace_from_file(dhd_pub_t *dhd);
+#ifdef DHD_EXPORT_CNTL_FILE
+extern unsigned long logtrace_val;
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* LOGTRACE_FROM_FILE */
+#ifdef GEN_SOFTAP_INFO_FILE
+#define SOFTAP_INFO_BUF_SZ 512
+#ifdef DHD_EXPORT_CNTL_FILE
+extern char softapinfostr[SOFTAP_INFO_BUF_SZ];
+#endif /* DHD_EXPORT_CNTL_FILE */
+extern uint32 sec_save_softap_info(void);
+#endif /* GEN_SOFTAP_INFO_FILE */
+#endif /* CUSTOMER_HW4 */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+extern uint32 report_hang_privcmd_err;
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#if defined(SOFTAP_TPUT_ENHANCE)
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
+#endif /* SOFTAP_TPUT_ENHANCE */
+
+#if defined(BCM_ROUTER_DHD)
+void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf);
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef DHD_LOG_DUMP
+/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE
+* 2: DLD_BUF_TYPE_SPECIAL
+*/
+#define DLD_BUFFER_NUM 3
+
+#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
+#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
+#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
+
+#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+
+/*
+ * Below are different sections that use the prealloced buffer
+ * and sum of the sizes of these should not cross LOG_DUMP_TOTAL_BUFSIZE
+ */
+#ifdef EWP_BCM_TRACE
+#define LOG_DUMP_GENERAL_MAX_BUFSIZE (192 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_BCM_TRACE_MAX_BUFSIZE (64 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#else
+#define LOG_DUMP_GENERAL_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_BCM_TRACE_MAX_BUFSIZE 0
+#endif /* EWP_BCM_TRACE */
+#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_RTT_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+
+#if LOG_DUMP_TOTAL_BUFSIZE < \
+ (LOG_DUMP_GENERAL_MAX_BUFSIZE + LOG_DUMP_PRESERVE_MAX_BUFSIZE + \
+ LOG_DUMP_ECNTRS_MAX_BUFSIZE + LOG_DUMP_RTT_MAX_BUFSIZE + \
+ LOG_DUMP_BCM_TRACE_MAX_BUFSIZE + LOG_DUMP_FILTER_MAX_BUFSIZE)
+#error "LOG_DUMP_TOTAL_BUFSIZE is lesser than sum of all rings"
+#endif
+
+/* Special buffer is allocated as separately in prealloc */
+#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024)
+
+#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */
+
+#ifdef CONFIG_LOG_BUF_SHIFT
+/* 15% of kernel log buf size, if for example klog buf size is 512KB
+* 15% of 512KB ~= 80KB
+*/
+#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \
+ (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100))
+#endif /* CONFIG_LOG_BUF_SHIFT */
+
+#define LOG_DUMP_COOKIE_BUFSIZE 1024u
+typedef struct {
+ char *hdr_str;
+ log_dump_section_type_t sec_type;
+} dld_hdr_t;
+
+#define DHD_PRINT_BUF_NAME_LEN 30
+void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
+void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
+#endif /* DHD_LOG_DUMP */
+
+typedef struct dhd_if_event {
+ struct list_head list;
+ wl_event_data_if_t event;
+ char name[IFNAMSIZ+1];
+ uint8 mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ int idx; /* iface idx in dongle */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool set_macaddress;
+ bool set_multicast;
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
+ struct net_device_stats stats;
+#ifdef DHD_WMF
+ dhd_wmf_t wmf; /* per bsscfg wmf setting */
+ bool wmf_psta_disable; /* enable/disable MC pkt to each mac
+ * of MC group behind PSTA
+ */
+#endif /* DHD_WMF */
+#ifdef PCIE_FULL_DONGLE
+ struct list_head sta_list; /* sll of associated stations */
+ spinlock_t sta_list_lock; /* lock for manipulating sll */
+#endif /* PCIE_FULL_DONGLE */
+ uint32 ap_isolate; /* ap-isolation settings */
+#ifdef DHD_L2_FILTER
+ bool parp_enable;
+ bool parp_discard;
+ bool parp_allnode;
+ arp_table_t *phnd_arp_table;
+ /* for Per BSS modification */
+ bool dhcp_unicast;
+ bool block_ping;
+ bool grat_arp;
+ bool block_tdls;
+#endif /* DHD_L2_FILTER */
+#if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
+ uint8 *qosmap_up_table; /* user priority table, size is UP_TABLE_MAX */
+ bool qosmap_up_table_enable; /* flag set only when app want to set additional UP */
+#endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
+#ifdef DHD_MCAST_REGEN
+ bool mcast_regen_bss_enable;
+#endif
+ bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
+ cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
+#ifdef BCM_ROUTER_DHD
+ bool primsta_dwds; /* DWDS status of primary sta interface */
+#endif /* BCM_ROUTER_DHD */
+ uint8 tx_paths_active;
+ bool del_in_progress;
+ bool static_if; /* used to avoid some operations on static_if */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ struct delayed_work m4state_work;
+ atomic_t m4state;
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ uint32 tsync_rcvd;
+ uint32 tsyncack_txed;
+ u64 last_sync;
+ struct work_struct blk_tsfl_work;
+ uint32 tsync_per_sec;
+ bool disconnect_tsync_flood;
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ bool recv_reassoc_evt;
+ bool post_roam_evt;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+#ifdef WLEASYMESH
+ uint8 _1905_al_ucast[ETHER_ADDR_LEN];
+ uint8 _1905_al_mcast[ETHER_ADDR_LEN];
+#endif /* WLEASYMESH */
+} dhd_if_t;
+
+struct ipv6_work_info_t {
+ uint8 if_idx;
+ char ipv6_addr[IPV6_ADDR_LEN];
+ unsigned long event;
+};
+
+typedef struct dhd_dump {
+ uint8 *buf;
+ int bufsize;
+ uint8 *hscb_buf;
+ int hscb_bufsize;
+} dhd_dump_t;
+#ifdef DNGL_AXI_ERROR_LOGGING
+typedef struct dhd_axi_error_dump {
+ ulong fault_address;
+ uint32 axid;
+ struct hnd_ext_trap_axi_error_v1 etd_axi_error_v1;
+} dhd_axi_error_dump_t;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef BCM_ROUTER_DHD
+typedef struct dhd_write_file {
+ char file_path[64];
+ uint32 file_flags;
+ uint8 *buf;
+ int bufsize;
+} dhd_write_file_t;
+#endif
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+struct dhd_rx_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *net;
+ struct dhd_pub *pub;
+};
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef FILTER_IE
+#define FILTER_IE_PATH "/vendor/etc/wifi/filter_ie"
+#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */
+#define FILE_BLOCK_READ_SIZE 256
+#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs)
+#endif /* FILTER_IE */
+
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
+
+int dhd_wifi_platform_register_drv(void);
+void dhd_wifi_platform_unregister_drv(void);
+wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type,
+ uint32 bus_num, uint32 slot_num, unsigned long status);
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
+ uint32 slot_num);
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf, int ifidx);
+#ifdef DHD_COREDUMP
+int wifi_platform_set_coredump(wifi_adapter_info_t *adapter, const char *buf, int buf_len,
+ const char *info);
+#endif /* DHD_COREDUMP */
+#ifdef CUSTOM_COUNTRY_CODE
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode,
+ u32 flags);
+#else
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode);
+#endif /* CUSTOM_COUNTRY_CODE */
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
+
+int dhd_get_fw_mode(struct dhd_info *dhdinfo);
+bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+#ifdef BCM_ROUTER_DHD
+void dhd_update_dpsta_interface_for_sta(dhd_pub_t* dhdp, int ifidx, void* event_data);
+#endif /* BCM_ROUTER_DHD */
+#ifdef DHD_WMF
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx);
+int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx);
+int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val);
+void dhd_update_psta_interface_for_sta(dhd_pub_t *dhdp, char* ifname,
+ void* mac_addr, void* event_data);
+#endif /* DHD_WMF */
+
+#if defined (BT_OVER_SDIO)
+int dhd_net_bus_get(struct net_device *dev);
+int dhd_net_bus_put(struct net_device *dev);
+#endif /* BT_OVER_SDIO */
+#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
+#define ADPS_ENABLE 1
+#define ADPS_DISABLE 0
+
+int dhd_enable_adps(dhd_pub_t *dhd, uint8 on);
+#endif /* WLADPS || WLADPS_PRIVATE_CMD */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp);
+extern void dhd_reset_tcpsync_info_by_dev(struct net_device *dev);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+#ifdef PCIE_FULL_DONGLE
+extern void dhd_net_del_flowrings_sta(dhd_pub_t * dhd, struct net_device * ndev);
+#endif /* PCIE_FULL_DONGLE */
+int dhd_get_fw_capabilities(dhd_pub_t * dhd);
+#endif /* __DHD_LINUX_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_exportfs.c b/bcmdhd.101.10.361.x/dhd_linux_exportfs.c
new file mode 100755
index 0000000..ef5b0cc
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_exportfs.c
@@ -0,0 +1,2994 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <linux/kobject.h>
+#include <linux/proc_fs.h>
+#include <linux/sysfs.h>
+#include <osl.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_priv.h>
+#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM)
+#include <wl_bam.h>
+#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */
+#ifdef PWRSTATS_SYSFS
+#include <wldev_common.h>
+#endif /* PWRSTATS_SYSFS */
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+#ifdef CSI_SUPPORT
+#include <dhd_csi.h>
+#endif /* CSI_SUPPORT */
+
+#ifdef SHOW_LOGTRACE
+extern dhd_pub_t* g_dhd_pub;
+static int dhd_ring_proc_open(struct inode *inode, struct file *file);
+ssize_t dhd_ring_proc_read(struct file *file, char *buffer, size_t tt, loff_t *loff);
+
+static const struct file_operations dhd_ring_proc_fops = {
+ .open = dhd_ring_proc_open,
+ .read = dhd_ring_proc_read,
+ .release = single_release,
+};
+
+static int
+dhd_ring_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = BCME_ERROR;
+ if (inode) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+ ret = single_open(file, 0, PDE_DATA(inode));
+#else
+ /* This feature is not supported for lower kernel versions */
+ ret = single_open(file, 0, NULL);
+#endif
+ } else {
+ DHD_ERROR(("%s: inode is NULL\n", __FUNCTION__));
+ }
+ return ret;
+}
+
+ssize_t
+dhd_ring_proc_read(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
+{
+ trace_buf_info_t *trace_buf_info;
+ int ret = BCME_ERROR;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)((struct seq_file *)(file->private_data))->private;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: ring is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ASSERT(g_dhd_pub);
+
+ trace_buf_info = (trace_buf_info_t *)MALLOCZ(g_dhd_pub->osh, sizeof(trace_buf_info_t));
+ if (trace_buf_info) {
+ dhd_dbg_read_ring_into_trace_buf(ring, trace_buf_info);
+ if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
+ {
+ ret = -EFAULT;
+ goto exit;
+ }
+ if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
+ ret = BUF_NOT_AVAILABLE;
+ else
+ ret = trace_buf_info->size;
+ } else
+ DHD_ERROR(("Memory allocation Failed\n"));
+
+exit:
+ if (trace_buf_info) {
+ MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
+ }
+ return ret;
+}
+
+void
+dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
+{
+#ifdef DEBUGABILITY
+ dhd_dbg_ring_t *dbg_verbose_ring = NULL;
+
+ dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhdp, FW_VERBOSE_RING_ID);
+ if (dbg_verbose_ring) {
+ if (!proc_create_data("dhd_trace", S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dbg_verbose_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_trace procfs interface\n"));
+ }
+ } else {
+ DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n"));
+ }
+#endif /* DEBUGABILITY */
+
+#ifdef EWP_ECNTRS_LOGGING
+ if (!proc_create_data("dhd_ecounters", S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dhdp->ecntr_dbg_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n"));
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (!proc_create_data("dhd_rtt", S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dhdp->rtt_dbg_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n"));
+ }
+#endif /* EWP_RTT_LOGGING */
+}
+
+void
+dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp)
+{
+#ifdef DEBUGABILITY
+ remove_proc_entry("dhd_trace", NULL);
+#endif /* DEBUGABILITY */
+
+#ifdef EWP_ECNTRS_LOGGING
+ remove_proc_entry("dhd_ecounters", NULL);
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ remove_proc_entry("dhd_rtt", NULL);
+#endif /* EWP_RTT_LOGGING */
+
+}
+#endif /* SHOW_LOGTRACE */
+
+/* ----------------------------------------------------------------------------
+ * Infrastructure code for sysfs interface support for DHD
+ *
+ * What is sysfs interface?
+ * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
+ *
+ * Why sysfs interface?
+ * This is the Linux standard way of changing/configuring Run Time parameters
+ * for a driver. We can use this interface to control "linux" specific driver
+ * parameters.
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+extern atomic_t trace_wklock_onoff;
+
+/* Function to show the history buffer */
+static ssize_t
+show_wklock_trace(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ buf[ret] = '\n';
+ buf[ret+1] = 0;
+
+ dhd_wk_lock_stats_dump(&dhd->pub);
+ return ret+1;
+}
+
+/* Function to enable/disable wakelock trace */
+static ssize_t
+wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ BCM_REFERENCE(dhd);
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ atomic_set(&trace_wklock_onoff, onoff);
+ if (atomic_read(&trace_wklock_onoff)) {
+ DHD_ERROR(("ENABLE WAKLOCK TRACE\n"));
+ } else {
+ DHD_ERROR(("DISABLE WAKELOCK TRACE\n"));
+ }
+
+ return (ssize_t)(onoff+1);
+}
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+#ifdef DHD_LOG_DUMP
+extern int logdump_periodic_flush;
+extern int logdump_ecntr_enable;
+static ssize_t
+show_logdump_periodic_flush(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long val;
+
+ val = logdump_periodic_flush;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
+ return ret;
+}
+
+static ssize_t
+logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ val = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &val);
+ if (val != 0 && val != 1) {
+ return -EINVAL;
+ }
+ logdump_periodic_flush = val;
+ return count;
+}
+
+static ssize_t
+show_logdump_ecntr(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long val;
+
+ val = logdump_ecntr_enable;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
+ return ret;
+}
+
+static ssize_t
+logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ val = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &val);
+ if (val != 0 && val != 1) {
+ return -EINVAL;
+ }
+ logdump_ecntr_enable = val;
+ return count;
+}
+
+#endif /* DHD_LOG_DUMP */
+
+extern uint enable_ecounter;
+static ssize_t
+show_enable_ecounter(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+
+ onoff = enable_ecounter;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+ dhdp = &dhd->pub;
+ if (!FW_SUPPORTED(dhdp, ecounters)) {
+ DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__));
+ return count;
+ }
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ if (enable_ecounter == onoff) {
+ DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter));
+ return count;
+ }
+
+ enable_ecounter = onoff;
+ dhd_ecounter_configure(dhdp, enable_ecounter);
+
+ return count;
+}
+
+#if defined(DHD_QOS_ON_SOCK_FLOW)
+#include <dhd_linux_sock_qos.h>
+
+static ssize_t
+show_sock_qos_onoff(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = dhd_sock_qos_get_status(dhd);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ dhd_sock_qos_set_status(dhd, onoff);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_upgrade(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = dhd_sock_qos_get_force_upgrade(dhd);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_upgrade(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ dhd_sock_qos_set_force_upgrade(dhd, onoff);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_numfl_upgrd_thresh(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ int upgrade_thresh;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ upgrade_thresh = dhd_sock_qos_get_numfl_upgrd_thresh(dhd);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d \n",
+ upgrade_thresh);
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_numfl_upgrd_thresh(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int upgrade_thresh;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ sscanf(buf, "%d", &upgrade_thresh);
+ if (upgrade_thresh < 0) {
+ return -EINVAL;
+ }
+
+ dhd_sock_qos_set_numfl_upgrd_thresh(dhd, upgrade_thresh);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_avgpktsize_thresh(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long avgpktsize_low, avgpktsize_high;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ dhd_sock_qos_get_avgpktsize_thresh(dhd, &avgpktsize_low, &avgpktsize_high);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu %lu\n",
+ avgpktsize_low, avgpktsize_high);
+
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_avgpktsize_thresh(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long avgpktsize_low, avgpktsize_high;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ sscanf(buf, "%lu %lu", &avgpktsize_low, &avgpktsize_high);
+
+ dhd_sock_qos_set_avgpktsize_thresh(dhd, avgpktsize_low, avgpktsize_high);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_numpkts_thresh(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long numpkts_low, numpkts_high;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ dhd_sock_qos_get_numpkts_thresh(dhd, &numpkts_low, &numpkts_high);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu %lu\n",
+ numpkts_low, numpkts_high);
+
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_numpkts_thresh(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long numpkts_low, numpkts_high;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ sscanf(buf, "%lu %lu", &numpkts_low, &numpkts_high);
+
+ dhd_sock_qos_set_numpkts_thresh(dhd, numpkts_low, numpkts_high);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_detectcnt_thresh(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned char detectcnt_inc, detectcnt_dec;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ dhd_sock_qos_get_detectcnt_thresh(dhd, &detectcnt_inc, &detectcnt_dec);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d %d\n",
+ detectcnt_inc, detectcnt_dec);
+
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_detectcnt_thresh(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned int detectcnt_inc, detectcnt_dec;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ sscanf(buf, "%u %u", &detectcnt_inc, &detectcnt_dec);
+
+ dhd_sock_qos_set_detectcnt_thresh(dhd, detectcnt_inc, detectcnt_dec);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_detectcnt_upgrd_thresh(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned int detectcnt_upgrd_thresh;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ detectcnt_upgrd_thresh = dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d \n", detectcnt_upgrd_thresh);
+
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_detectcnt_upgrd_thresh(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned int detectcnt_upgrd_thresh;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ sscanf(buf, "%u", &detectcnt_upgrd_thresh);
+
+ dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd, detectcnt_upgrd_thresh);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_maxfl(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned int maxfl;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ maxfl = dhd_sock_qos_get_maxfl(dhd);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%u \n", maxfl);
+
+ return ret;
+}
+
+static ssize_t
+update_sock_qos_maxfl(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned int maxfl;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ sscanf(buf, "%u", &maxfl);
+
+ dhd_sock_qos_set_maxfl(dhd, maxfl);
+
+ return count;
+}
+
+static ssize_t
+show_sock_qos_stats(struct dhd_info *dev, char *buf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ dhd_sock_qos_show_stats(dhd, buf, PAGE_SIZE);
+
+ return PAGE_SIZE - 1;
+}
+
+static ssize_t
+clear_sock_qos_stats(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long clear;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ clear = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &clear);
+ if (clear != 0) {
+ return -EINVAL;
+ }
+
+ dhd_sock_qos_clear_stats(dhd);
+
+ return count;
+}
+
+#ifdef DHD_QOS_ON_SOCK_FLOW_UT
+
+/*
+ * test_id sub_id Description
+ * ------ ------ -----------
+ * 1 0 psk_qos->sk_fl
+ * The number of free sk_fl entries in the Table is exhausted
+ * and more sockets are still getting created
+ *
+ * 1 1 psk_qos->sk_fl
+ * is Full for more than x seconds, there are lot of periodic
+ * flows, but none of them are detected for upgrade for more
+ * than 'x' seconds
+ *
+ * 2 Force upgrade the socket flows to reach skfl_upgrade_thresh
+ * check the behaviour
+ *
+ * Downgrade one of the sk_fls and check if the 'next' pending
+ * sk_fl is getting upgraded. The sk_fl getting upgraded
+ * should follow FIFO scheme.
+ *
+ * 3 Upgrade a socket flow ... after some time downgrade the
+ * same and check if the sk_fl is actually getting downgraded
+ * Keep switching the behavior every 'x' seconds and observe
+ * the switches
+ */
+static ssize_t
+do_sock_qos_unit_test(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned int test_id = 0;
+ unsigned int sub_id = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ int ret;
+
+ BCM_REFERENCE(dhd);
+
+ ret = sscanf(buf, "%d %d", &test_id, &sub_id);
+ if (ret < 1) {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+#endif /* DHD_QOS_ON_SOCK_FLOW_UT */
+#endif /* DHD_QOS_ON_SOCK_FLOW */
+
+#ifdef DHD_SSSR_DUMP
+static ssize_t
+show_sssr_enab(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+
+ onoff = sssr_enab;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+set_sssr_enab(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ sssr_enab = (uint)onoff;
+
+ return count;
+}
+
+static ssize_t
+show_fis_enab(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+
+ onoff = fis_enab;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+set_fis_enab(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ fis_enab = (uint)onoff;
+
+ return count;
+}
+#endif /* DHD_SSSR_DUMP */
+
+#define FMT_BUFSZ 32
+extern char firmware_path[];
+
+static ssize_t
+show_firmware_path(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", firmware_path);
+
+ return ret;
+}
+
+static ssize_t
+store_firmware_path(struct dhd_info *dev, const char *buf, size_t count)
+{
+ char fmt_spec[FMT_BUFSZ] = "";
+
+ if ((int)strlen(buf) >= MOD_PARAM_PATHLEN) {
+ return -EINVAL;
+ }
+
+ snprintf(fmt_spec, FMT_BUFSZ, "%%%ds", MOD_PARAM_PATHLEN - 1);
+ sscanf(buf, fmt_spec, firmware_path);
+
+ return count;
+}
+
+extern char nvram_path[];
+
+static ssize_t
+show_nvram_path(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", nvram_path);
+
+ return ret;
+}
+
+static ssize_t
+store_nvram_path(struct dhd_info *dev, const char *buf, size_t count)
+{
+ char fmt_spec[FMT_BUFSZ] = "";
+
+ if ((int)strlen(buf) >= MOD_PARAM_PATHLEN) {
+ return -EINVAL;
+ }
+
+ snprintf(fmt_spec, FMT_BUFSZ, "%%%ds", MOD_PARAM_PATHLEN - 1);
+ sscanf(buf, fmt_spec, nvram_path);
+
+ return count;
+}
+
+#ifdef PWRSTATS_SYSFS
+typedef struct wl_pwrstats_sysfs {
+ uint64 current_ts;
+ uint64 pm_cnt;
+ uint64 pm_dur;
+ uint64 pm_last_entry_us;
+ uint64 awake_cnt;
+ uint64 awake_dur;
+ uint64 awake_last_entry_us;
+ uint64 l0_cnt;
+ uint64 l0_dur_us;
+ uint64 l1_cnt;
+ uint64 l1_dur_us;
+ uint64 l1_1_cnt;
+ uint64 l1_1_dur_us;
+ uint64 l1_2_cnt;
+ uint64 l1_2_dur_us;
+ uint64 l2_cnt;
+ uint64 l2_dur_us;
+} wl_pwrstats_sysfs_t;
+
+uint64 last_delta = 0;
+wl_pwrstats_sysfs_t accumstats = {0, };
+wl_pwrstats_sysfs_t laststats = {0, };
+static const char pwrstr_cnt[] = "count:";
+static const char pwrstr_dur[] = "duration_usec:";
+static const char pwrstr_ts[] = "last_entry_timestamp_usec:";
+
+void update_pwrstats_cum(uint64 *accum, uint64 *last, uint64 *now, bool force)
+{
+ if (accum) { /* accumulation case, ex; counts, duration */
+ if (*now < *last) {
+ if (force || ((*last - *now) > USEC_PER_MSEC)) {
+ /* not to update accum for pm_dur/awake_dur case */
+ *accum += *now;
+ *last = *now;
+ }
+ } else {
+ *accum += (*now - *last);
+ *last = *now;
+ }
+ } else if (*now != 0) { /* last entry timestamp case */
+ *last = *now + last_delta;
+ }
+}
+
+static const uint16 pwrstats_req_type[] = {
+ WL_PWRSTATS_TYPE_PCIE,
+ WL_PWRSTATS_TYPE_PM_ACCUMUL
+};
+#define PWRSTATS_REQ_TYPE_NUM sizeof(pwrstats_req_type) / sizeof(uint16)
+#define PWRSTATS_IOV_BUF_LEN OFFSETOF(wl_pwrstats_t, data) \
+ + sizeof(uint32) * PWRSTATS_REQ_TYPE_NUM \
+ + sizeof(wl_pwr_pcie_stats_t) \
+ + sizeof(wl_pwr_pm_accum_stats_v1_t) \
+ + (uint)strlen("pwrstats") + 1
+
+static ssize_t
+show_pwrstats_path(struct dhd_info *dev, char *buf)
+{
+ int err = 0;
+ void *p_data = NULL;
+ ssize_t ret = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ struct net_device *ndev = dhd_linux_get_primary_netdev(&dhd->pub);
+ char *iovar_buf = NULL;
+ wl_pwrstats_query_t *p_query = NULL;
+ wl_pwrstats_sysfs_t pwrstats_sysfs = {0, };
+ wl_pwrstats_t *pwrstats;
+ uint len, taglen, i;
+ uint16 type;
+ uint64 ts_sec, ts_usec, time_delta;
+
+ ASSERT(g_dhd_pub);
+
+ len = PWRSTATS_IOV_BUF_LEN;
+ iovar_buf = (char *)MALLOCZ(g_dhd_pub->osh, len);
+ if (iovar_buf == NULL) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto done;
+ }
+
+ /* Alloc req buffer */
+ len = OFFSETOF(wl_pwrstats_query_t, type) +
+ PWRSTATS_REQ_TYPE_NUM * sizeof(uint16);
+ p_query = (wl_pwrstats_query_t *)MALLOCZ(g_dhd_pub->osh, len);
+ if (p_query == NULL) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto done;
+ }
+
+ /* Build a list of types */
+ p_query->length = PWRSTATS_REQ_TYPE_NUM;
+ for (i = 0; i < PWRSTATS_REQ_TYPE_NUM; i++) {
+ p_query->type[i] = pwrstats_req_type[i];
+ }
+
+ /* Query with desired type list */
+ err = wldev_iovar_getbuf(ndev, "pwrstats", p_query, len,
+ iovar_buf, PWRSTATS_IOV_BUF_LEN, NULL);
+ if (err != BCME_OK) {
+ DHD_ERROR(("error (%d) - size = %zu\n", err, sizeof(wl_pwrstats_t)));
+ goto done;
+ }
+
+ /* Check version */
+ pwrstats = (wl_pwrstats_t *) iovar_buf;
+ if (dtoh16(pwrstats->version) != WL_PWRSTATS_VERSION) {
+ DHD_ERROR(("PWRSTATS Version mismatch\n"));
+ goto done;
+ }
+
+ /* Parse TLVs */
+ len = dtoh16(pwrstats->length) - WL_PWR_STATS_HDRLEN;
+ p_data = pwrstats->data;
+ do {
+ type = dtoh16(((uint16*)p_data)[0]);
+ taglen = dtoh16(((uint16*)p_data)[1]);
+
+ if ((taglen < BCM_XTLV_HDR_SIZE) || (taglen > len)) {
+ DHD_ERROR(("Bad len %d for tag %d, remaining len %d\n",
+ taglen, type, len));
+ goto done;
+ }
+
+ if (taglen & 0xF000) {
+ DHD_ERROR(("Resrved bits in len %d for tag %d, remaining len %d\n",
+ taglen, type, len));
+ goto done;
+ }
+
+ switch (type) {
+ case WL_PWRSTATS_TYPE_PCIE:
+ {
+ wl_pwr_pcie_stats_t *stats =
+ (wl_pwr_pcie_stats_t *)p_data;
+
+ if (taglen < sizeof(wl_pwr_pcie_stats_t)) {
+ DHD_ERROR(("Short len for %d: %d < %d\n",
+ type, taglen, (int)sizeof(wl_pwr_pcie_stats_t)));
+ goto done;
+ }
+
+ if (dtoh32(stats->pcie.l0_cnt) == 0) {
+ DHD_ERROR(("link stats are not supported for this pcie core\n"));
+ }
+
+ pwrstats_sysfs.l0_cnt = dtoh32(stats->pcie.l0_cnt);
+ pwrstats_sysfs.l0_dur_us = dtoh32(stats->pcie.l0_usecs);
+ pwrstats_sysfs.l1_cnt = dtoh32(stats->pcie.l1_cnt);
+ pwrstats_sysfs.l1_dur_us = dtoh32(stats->pcie.l1_usecs);
+ pwrstats_sysfs.l1_1_cnt = dtoh32(stats->pcie.l1_1_cnt);
+ pwrstats_sysfs.l1_1_dur_us = dtoh32(stats->pcie.l1_1_usecs);
+ pwrstats_sysfs.l1_2_cnt = dtoh32(stats->pcie.l1_2_cnt);
+ pwrstats_sysfs.l1_2_dur_us = dtoh32(stats->pcie.l1_2_usecs);
+ pwrstats_sysfs.l2_cnt = dtoh32(stats->pcie.l2_cnt);
+ pwrstats_sysfs.l2_dur_us = dtoh32(stats->pcie.l2_usecs);
+ }
+ break;
+
+ case WL_PWRSTATS_TYPE_PM_ACCUMUL:
+ {
+ wl_pwr_pm_accum_stats_v1_t *stats =
+ (wl_pwr_pm_accum_stats_v1_t *)p_data;
+
+ if (taglen < sizeof(wl_pwr_pm_accum_stats_v1_t)) {
+ DHD_ERROR(("Short len for %d: %d < %d\n", type,
+ taglen, (int)sizeof(wl_pwr_pm_accum_stats_v1_t)));
+ goto done;
+ }
+
+ pwrstats_sysfs.current_ts =
+ dtoh64(stats->accum_data.current_ts);
+ pwrstats_sysfs.pm_cnt =
+ dtoh64(stats->accum_data.pm_cnt);
+ pwrstats_sysfs.pm_dur =
+ dtoh64(stats->accum_data.pm_dur);
+ pwrstats_sysfs.pm_last_entry_us =
+ dtoh64(stats->accum_data.pm_last_entry_us);
+ pwrstats_sysfs.awake_cnt =
+ dtoh64(stats->accum_data.awake_cnt);
+ pwrstats_sysfs.awake_dur =
+ dtoh64(stats->accum_data.awake_dur);
+ pwrstats_sysfs.awake_last_entry_us =
+ dtoh64(stats->accum_data.awake_last_entry_us);
+ }
+ break;
+
+ default:
+ DHD_ERROR(("Skipping uknown %d-byte tag %d\n", taglen, type));
+ break;
+ }
+
+ /* Adjust length to account for padding, but don't exceed total len */
+ taglen = (ROUNDUP(taglen, 4) > len) ? len : ROUNDUP(taglen, 4);
+ len -= taglen;
+ *(uint8**)&p_data += taglen;
+ } while (len >= BCM_XTLV_HDR_SIZE);
+
+ OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
+ time_delta = ts_sec * USEC_PER_SEC + ts_usec - pwrstats_sysfs.current_ts;
+ if ((time_delta > last_delta) &&
+ ((time_delta - last_delta) > USEC_PER_SEC)) {
+ last_delta = time_delta;
+ }
+
+ update_pwrstats_cum(&accumstats.awake_cnt, &laststats.awake_cnt,
+ &pwrstats_sysfs.awake_cnt, TRUE);
+ update_pwrstats_cum(&accumstats.awake_dur, &laststats.awake_dur,
+ &pwrstats_sysfs.awake_dur, FALSE);
+ update_pwrstats_cum(&accumstats.pm_cnt, &laststats.pm_cnt, &pwrstats_sysfs.pm_cnt,
+ TRUE);
+ update_pwrstats_cum(&accumstats.pm_dur, &laststats.pm_dur, &pwrstats_sysfs.pm_dur,
+ FALSE);
+ update_pwrstats_cum(NULL, &laststats.awake_last_entry_us,
+ &pwrstats_sysfs.awake_last_entry_us, TRUE);
+ update_pwrstats_cum(NULL, &laststats.pm_last_entry_us,
+ &pwrstats_sysfs.pm_last_entry_us, TRUE);
+
+ ret += scnprintf(buf, PAGE_SIZE - 1, "AWAKE:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.awake_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.awake_dur);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_ts,
+ laststats.awake_last_entry_us);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "ASLEEP:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.pm_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.pm_dur);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_ts,
+ laststats.pm_last_entry_us);
+
+ update_pwrstats_cum(&accumstats.l0_cnt, &laststats.l0_cnt, &pwrstats_sysfs.l0_cnt,
+ TRUE);
+ update_pwrstats_cum(&accumstats.l0_dur_us, &laststats.l0_dur_us,
+ &pwrstats_sysfs.l0_dur_us, TRUE);
+ update_pwrstats_cum(&accumstats.l1_cnt, &laststats.l1_cnt, &pwrstats_sysfs.l1_cnt,
+ TRUE);
+ update_pwrstats_cum(&accumstats.l1_dur_us, &laststats.l1_dur_us,
+ &pwrstats_sysfs.l1_dur_us, TRUE);
+ update_pwrstats_cum(&accumstats.l1_1_cnt, &laststats.l1_1_cnt,
+ &pwrstats_sysfs.l1_1_cnt, TRUE);
+ update_pwrstats_cum(&accumstats.l1_1_dur_us, &laststats.l1_1_dur_us,
+ &pwrstats_sysfs.l1_1_dur_us, TRUE);
+ update_pwrstats_cum(&accumstats.l1_2_cnt, &laststats.l1_2_cnt,
+ &pwrstats_sysfs.l1_2_cnt, TRUE);
+ update_pwrstats_cum(&accumstats.l1_2_dur_us, &laststats.l1_2_dur_us,
+ &pwrstats_sysfs.l1_2_dur_us, TRUE);
+ update_pwrstats_cum(&accumstats.l2_cnt, &laststats.l2_cnt, &pwrstats_sysfs.l2_cnt,
+ TRUE);
+ update_pwrstats_cum(&accumstats.l2_dur_us, &laststats.l2_dur_us,
+ &pwrstats_sysfs.l2_dur_us, TRUE);
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L0:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.l0_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.l0_dur_us);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L1:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.l1_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.l1_dur_us);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L1_1:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.l1_1_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.l1_1_dur_us);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L1_2:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.l1_2_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.l1_2_dur_us);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "L2:\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_cnt,
+ accumstats.l2_cnt);
+ ret += scnprintf(buf + ret, PAGE_SIZE - 1 - ret, "%s 0x%0llx\n", pwrstr_dur,
+ accumstats.l2_dur_us);
+
+done:
+ if (p_query) {
+ MFREE(g_dhd_pub->osh, p_query, len);
+ }
+ if (iovar_buf) {
+ MFREE(g_dhd_pub->osh, iovar_buf, PWRSTATS_IOV_BUF_LEN);
+ }
+
+ return ret;
+}
+#endif /* PWRSTATS_SYSFS */
+
+/*
+ * Generic Attribute Structure for DHD.
+ * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
+ * to instantiate an object of type dhd_attr, populate it with
+ * the required show/store functions (ex:- dhd_attr_cpumask_primary)
+ * and add the object to default_attrs[] array, that gets registered
+ * to the kobject of dhd (named bcm-dhd).
+ */
+
+struct dhd_attr {
+ struct attribute attr;
+ ssize_t(*show)(struct dhd_info *, char *);
+ ssize_t(*store)(struct dhd_info *, const char *, size_t count);
+};
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+static struct dhd_attr dhd_attr_wklock =
+ __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
+#endif /* defined(DHD_TRACE_WAKE_LOCK */
+
+#ifdef DHD_LOG_DUMP
+static struct dhd_attr dhd_attr_logdump_periodic_flush =
+ __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush,
+ logdump_periodic_flush_onoff);
+static struct dhd_attr dhd_attr_logdump_ecntr =
+ __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr,
+ logdump_ecntr_onoff);
+#endif /* DHD_LOG_DUMP */
+
+static struct dhd_attr dhd_attr_ecounters =
+ __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff);
+
+#if defined(DHD_QOS_ON_SOCK_FLOW)
+static struct dhd_attr dhd_attr_sock_qos_onoff =
+ __ATTR(sock_qos_onoff, 0660, show_sock_qos_onoff, update_sock_qos_onoff);
+
+static struct dhd_attr dhd_attr_sock_qos_stats =
+ __ATTR(sock_qos_stats, 0660, show_sock_qos_stats, clear_sock_qos_stats);
+
+static struct dhd_attr dhd_attr_sock_qos_upgrade =
+ __ATTR(sock_qos_upgrade, 0660, show_sock_qos_upgrade, update_sock_qos_upgrade);
+
+static struct dhd_attr dhd_attr_sock_qos_numfl_upgrd_thresh =
+ __ATTR(sock_qos_numfl_upgrd_thresh, 0660, show_sock_qos_numfl_upgrd_thresh,
+ update_sock_qos_numfl_upgrd_thresh);
+
+static struct dhd_attr dhd_attr_sock_qos_avgpktsize_thresh =
+ __ATTR(sock_qos_avgpktsize_thresh, 0660, show_sock_qos_avgpktsize_thresh,
+ update_sock_qos_avgpktsize_thresh);
+
+static struct dhd_attr dhd_attr_sock_qos_numpkts_thresh =
+ __ATTR(sock_qos_numpkts_thresh, 0660, show_sock_qos_numpkts_thresh,
+ update_sock_qos_numpkts_thresh);
+
+static struct dhd_attr dhd_attr_sock_qos_detectcnt_thresh =
+ __ATTR(sock_qos_detectcnt_thresh, 0660, show_sock_qos_detectcnt_thresh,
+ update_sock_qos_detectcnt_thresh);
+
+static struct dhd_attr dhd_attr_sock_qos_detectcnt_upgrd_thresh =
+ __ATTR(sock_qos_detectcnt_upgrd_thresh, 0660, show_sock_qos_detectcnt_upgrd_thresh,
+ update_sock_qos_detectcnt_upgrd_thresh);
+
+static struct dhd_attr dhd_attr_sock_qos_maxfl =
+ __ATTR(sock_qos_maxfl, 0660, show_sock_qos_maxfl,
+ update_sock_qos_maxfl);
+#if defined(DHD_QOS_ON_SOCK_FLOW_UT)
+static struct dhd_attr dhd_attr_sock_qos_unit_test =
+ __ATTR(sock_qos_unit_test, 0660, NULL, do_sock_qos_unit_test);
+#endif
+#endif /* DHD_QOS_ON_SOCK_FLOW */
+
+#ifdef DHD_SSSR_DUMP
+static struct dhd_attr dhd_attr_sssr_enab =
+ __ATTR(sssr_enab, 0660, show_sssr_enab, set_sssr_enab);
+static struct dhd_attr dhd_attr_fis_enab =
+ __ATTR(fis_enab, 0660, show_fis_enab, set_fis_enab);
+#endif /* DHD_SSSR_DUMP */
+
+static struct dhd_attr dhd_attr_firmware_path =
+ __ATTR(firmware_path, 0660, show_firmware_path, store_firmware_path);
+
+static struct dhd_attr dhd_attr_nvram_path =
+ __ATTR(nvram_path, 0660, show_nvram_path, store_nvram_path);
+
+#ifdef PWRSTATS_SYSFS
+static struct dhd_attr dhd_attr_pwrstats_path =
+ __ATTR(power_stats, 0660, show_pwrstats_path, NULL);
+#endif /* PWRSTATS_SYSFS */
+
+#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
+#define to_attr(a) container_of(a, struct dhd_attr, attr)
+
+#ifdef DHD_MAC_ADDR_EXPORT
+struct ether_addr sysfs_mac_addr;
+static ssize_t
+show_mac_addr(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, MACF,
+ (uint32)sysfs_mac_addr.octet[0], (uint32)sysfs_mac_addr.octet[1],
+ (uint32)sysfs_mac_addr.octet[2], (uint32)sysfs_mac_addr.octet[3],
+ (uint32)sysfs_mac_addr.octet[4], (uint32)sysfs_mac_addr.octet[5]);
+
+ return ret;
+}
+
+static ssize_t
+set_mac_addr(struct dhd_info *dev, const char *buf, size_t count)
+{
+ if (!bcm_ether_atoe(buf, &sysfs_mac_addr)) {
+ DHD_ERROR(("Invalid Mac Address \n"));
+ return -EINVAL;
+ }
+
+ DHD_ERROR(("Mac Address set with "MACDBG"\n", MAC2STRDBG(&sysfs_mac_addr)));
+
+ return count;
+}
+
+static struct dhd_attr dhd_attr_macaddr =
+ __ATTR(mac_addr, 0660, show_mac_addr, set_mac_addr);
+#endif /* DHD_MAC_ADDR_EXPORT */
+
+#ifdef DHD_FW_COREDUMP
+/*
+ * XXX The filename to store memdump is defined for each platform.
+ * - The default path of CUSTOMER_HW4 device is "PLATFORM_PATH/.memdump.info"
+ * - Brix platform will take default path "/installmedia/.memdump.info"
+ * New platforms can add their ifdefs accordingly below.
+ */
+
+#ifdef CONFIG_X86
+#if defined(OEM_ANDROID)
+#define MEMDUMPINFO_LIVE PLATFORM_PATH".memdump.info"
+#define MEMDUMPINFO_INST "/data/.memdump.info"
+#define MEMDUMPINFO MEMDUMPINFO_LIVE
+#else /* FC19 and Others */
+#define MEMDUMPINFO PLATFORM_PATH".memdump.info"
+#endif /* OEM_ANDROID */
+#else /* For non x86 platforms */
+#define MEMDUMPINFO PLATFORM_PATH".memdump.info"
+#endif /* CONFIG_X86 */
+
+uint32
+get_mem_val_from_file(void)
+{
+ struct file *fp = NULL;
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+ char *p_mem_val = NULL;
+ char *filepath = MEMDUMPINFO;
+ int ret = 0;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86) && defined(OEM_ANDROID)
+ /* Check if it is Live Brix Image */
+ if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
+ goto done;
+ }
+ /* Try if it is Installed Brix Image */
+ filepath = MEMDUMPINFO_INST;
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto done;
+ }
+#else /* Non Brix Android platform */
+ goto done;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+ /* Handle success case */
+ ret = kernel_read_compat(fp, 0, (char *)&mem_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ goto done;
+ }
+
+ p_mem_val = (char*)&mem_val;
+ p_mem_val[sizeof(uint32) - 1] = '\0';
+ mem_val = bcm_atoi(p_mem_val);
+
+ filp_close(fp, NULL);
+
+done:
+ return mem_val;
+}
+
+void dhd_get_memdump_info(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+
+ mem_val = get_mem_val_from_file();
+ if (mem_val != DUMP_MEMFILE_MAX)
+ dhd->memdump_enabled = mem_val;
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
+ mem_val = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+#else
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (dhd->memdump_enabled == 0 || dhd->memdump_enabled == DUMP_MEMFILE_MAX)
+ dhd->memdump_enabled = DUMP_MEMFILE;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+#endif /* !DHD_EXPORT_CNTL_FILE */
+#ifdef BCMQT
+ /* In QT environment collecting memdump on FW TRAP, IOVAR timeouts,
+ * is taking more time and makes system unresponsive so disabling it.
+ * if needed memdump can be collected through 'dhd upload' command.
+ */
+ dhd->memdump_enabled = DUMP_DISABLED;
+#endif
+#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
+ /* override memdump_enabled value to avoid once trap issues */
+ if (dhd_bus_get_fw_mode(dhd) == DHD_FLAG_MFG_MODE &&
+ (dhd->memdump_enabled == DUMP_MEMONLY ||
+ dhd->memdump_enabled == DUMP_MEMFILE_BUGON)) {
+ dhd->memdump_enabled = DUMP_MEMFILE;
+ DHD_ERROR(("%s : Override memdump_value to %d\n",
+ __FUNCTION__, dhd->memdump_enabled));
+ }
+#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %u\n", __FUNCTION__, dhd->memdump_enabled));
+}
+
+#ifdef DHD_EXPORT_CNTL_FILE
+static ssize_t
+show_memdump_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_pub_t *dhdp;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ dhdp = &dev->pub;
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", dhdp->memdump_enabled);
+ return ret;
+}
+
+static ssize_t
+set_memdump_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long memval;
+ dhd_pub_t *dhdp;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+ dhdp = &dev->pub;
+
+ memval = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &memval);
+
+ dhdp->memdump_enabled = (uint32)memval;
+
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %u\n", __FUNCTION__, dhdp->memdump_enabled));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_memdump =
+ __ATTR(memdump, 0660, show_memdump_info, set_memdump_info);
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef BCMASSERT_LOG
+/*
+ * XXX The filename to store assert type is defined for each platform.
+ * New platforms can add their ifdefs accordingly below.
+ */
+#define ASSERTINFO PLATFORM_PATH".assert.info"
+
+int
+get_assert_val_from_file(void)
+{
+ struct file *fp = NULL;
+ char *filepath = ASSERTINFO;
+ char *p_mem_val = NULL;
+ int mem_val = -1;
+
+ /*
+ * Read assert info from the file
+ * 0: Trigger Kernel crash by panic()
+ * 1: Print out the logs and don't trigger Kernel panic. (default)
+ * 2: Trigger Kernel crash by BUG()
+ * File doesn't exist: Keep default value (1).
+ */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ } else {
+ int ret = kernel_read_compat(fp, 0, (char *)&mem_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ } else {
+ p_mem_val = (char *)&mem_val;
+ p_mem_val[sizeof(uint32) - 1] = '\0';
+ mem_val = bcm_atoi(p_mem_val);
+ DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ }
+ filp_close(fp, NULL);
+ }
+
+#ifdef CUSTOMER_HW4_DEBUG
+ mem_val = (mem_val >= 0) ? mem_val : 1;
+#else
+ mem_val = (mem_val >= 0) ? mem_val : 0;
+#endif /* CUSTOMER_HW4_DEBUG */
+ return mem_val;
+}
+
+void dhd_get_assert_info(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ int mem_val = -1;
+
+ mem_val = get_assert_val_from_file();
+
+ g_assert_type = mem_val;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+}
+
+#ifdef DHD_EXPORT_CNTL_FILE
+static ssize_t
+show_assert_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%d\n", g_assert_type);
+ return ret;
+
+}
+
+static ssize_t
+set_assert_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long assert_val;
+
+ assert_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &assert_val);
+
+ g_assert_type = (uint32)assert_val;
+
+ DHD_ERROR(("%s: ASSERT ENABLED = %lu\n", __FUNCTION__, assert_val));
+ return count;
+
+}
+
+static struct dhd_attr dhd_attr_assert =
+ __ATTR(assert, 0660, show_assert_info, set_assert_info);
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* BCMASSERT_LOG */
+
+#ifdef DHD_EXPORT_CNTL_FILE
+#if defined(WRITE_WLANINFO)
+static ssize_t
+show_wifiver_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", version_info);
+ return ret;
+}
+
+static ssize_t
+set_wifiver_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ DHD_ERROR(("Do not set version info\n"));
+ return -EINVAL;
+}
+
+static struct dhd_attr dhd_attr_wifiver =
+ __ATTR(wifiver, 0660, show_wifiver_info, set_wifiver_info);
+#endif /* WRITE_WLANINFO */
+
+#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG)
+char cidinfostr[MAX_VNAME_LEN];
+
+static ssize_t
+show_cid_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+#ifdef USE_DIRECT_VID_TAG
+ ret = scnprintf(buf, PAGE_SIZE -1, "%x%x", cidinfostr[VENDOR_OFF], cidinfostr[MD_REV_OFF]);
+#endif /* USE_DIRECT_VID_TAG */
+#ifdef USE_CID_CHECK
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", cidinfostr);
+#endif /* USE_CID_CHECK */
+ return ret;
+}
+
+static ssize_t
+set_cid_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+#ifdef USE_DIRECT_VID_TAG
+ uint32 stored_vid = 0, md_rev = 0, vendor = 0;
+ uint32 vendor_mask = 0x00FF;
+
+ stored_vid = bcm_strtoul(buf, NULL, 16);
+
+ DHD_ERROR(("%s : stored_vid : 0x%x\n", __FUNCTION__, stored_vid));
+ md_rev = stored_vid & vendor_mask;
+ vendor = stored_vid >> 8;
+
+ memset(cidinfostr, 0, sizeof(cidinfostr));
+
+ cidinfostr[MD_REV_OFF] = (char)md_rev;
+ cidinfostr[VENDOR_OFF] = (char)vendor;
+ DHD_INFO(("CID string %x%x\n", cidinfostr[VENDOR_OFF], cidinfostr[MD_REV_OFF]));
+#endif /* USE_DIRECT_VID_TAG */
+#ifdef USE_CID_CHECK
+ int len = strlen(buf) + 1;
+ int maxstrsz;
+ maxstrsz = MAX_VNAME_LEN;
+
+ scnprintf(cidinfostr, ((len > maxstrsz) ? maxstrsz : len), "%s", buf);
+ DHD_INFO(("%s : CID info string\n", cidinfostr));
+#endif /* USE_CID_CHECK */
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cidinfo =
+ __ATTR(cid, 0660, show_cid_info, set_cid_info);
+#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */
+
+#if defined(GEN_SOFTAP_INFO_FILE)
+char softapinfostr[SOFTAP_INFO_BUF_SZ];
+static ssize_t
+show_softap_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", softapinfostr);
+ return ret;
+}
+
+static ssize_t
+set_softap_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ DHD_ERROR(("Do not set sofap related info\n"));
+ return -EINVAL;
+}
+
+static struct dhd_attr dhd_attr_softapinfo =
+ __ATTR(softap, 0660, show_softap_info, set_softap_info);
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#if defined(MIMO_ANT_SETTING)
+unsigned long antsel;
+
+static ssize_t
+show_ant_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", antsel);
+ return ret;
+}
+
+static ssize_t
+set_ant_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long ant_val;
+
+ ant_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &ant_val);
+
+ /*
+ * Check value
+ * 0 - Not set, handle same as file not exist
+ */
+ if (ant_val > 3) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, ant_val));
+ return -EINVAL;
+ }
+
+ antsel = ant_val;
+ DHD_ERROR(("[WIFI_SEC] %s: Set Antinfo val = %lu \n", __FUNCTION__, antsel));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_antinfo =
+ __ATTR(ant, 0660, show_ant_info, set_ant_info);
+#endif /* MIMO_ANT_SETTING */
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern uint32 pmmode_val;
+static ssize_t
+show_pm_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (pmmode_val == 0xFF) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "PM mode is not set\n");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", pmmode_val);
+ }
+ return ret;
+}
+
+static ssize_t
+set_pm_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long pm_val;
+
+ pm_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &pm_val);
+
+ if (pm_val > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, pm_val));
+ return -EINVAL;
+ }
+
+ pmmode_val = (uint32)pm_val;
+ DHD_ERROR(("[WIFI_SEC] %s: Set pminfo val = %u\n", __FUNCTION__, pmmode_val));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_pminfo =
+ __ATTR(pm, 0660, show_pm_info, set_pm_info);
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#ifdef LOGTRACE_FROM_FILE
+unsigned long logtrace_val = 1;
+
+static ssize_t
+show_logtrace_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", logtrace_val);
+ return ret;
+}
+
+static ssize_t
+set_logtrace_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ logtrace_val = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from sysfs = %lu\n",
+ __FUNCTION__, logtrace_val));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_logtraceinfo =
+ __ATTR(logtrace, 0660, show_logtrace_info, set_logtrace_info);
+#endif /* LOGTRACE_FROM_FILE */
+
+#ifdef USE_WFA_CERT_CONF
+#ifdef BCMSDIO
+uint32 bus_txglom = VALUENOTSET;
+
+static ssize_t
+show_bustxglom(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (bus_txglom == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", "bustxglom not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", bus_txglom);
+ }
+ return ret;
+}
+
+static ssize_t
+set_bustxglom(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = (uint32)bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ bus_txglom = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: BUS TXGLOM On/Off from sysfs = %u\n",
+ __FUNCTION__, bus_txglom));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_bustxglom =
+ __ATTR(bustxglom, 0660, show_bustxglom, set_bustxglom);
+#endif /* BCMSDIO */
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+uint32 roam_off = VALUENOTSET;
+
+static ssize_t
+show_roamoff(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (roam_off == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "roam_off not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", roam_off);
+ }
+ return ret;
+}
+
+static ssize_t
+set_roamoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ roam_off = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: ROAM On/Off from sysfs = %u\n",
+ __FUNCTION__, roam_off));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_roamoff =
+ __ATTR(roamoff, 0660, show_roamoff, set_roamoff);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+
+#ifdef USE_WL_FRAMEBURST
+uint32 frameburst = VALUENOTSET;
+
+static ssize_t
+show_frameburst(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (frameburst == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "frameburst not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", frameburst);
+ }
+ return ret;
+}
+
+static ssize_t
+set_frameburst(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ frameburst = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, frameburst));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_frameburst =
+ __ATTR(frameburst, 0660, show_frameburst, set_frameburst);
+#endif /* USE_WL_FRAMEBURST */
+
+#ifdef USE_WL_TXBF
+uint32 txbf = VALUENOTSET;
+
+static ssize_t
+show_txbf(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (txbf == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "txbf not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", txbf);
+ }
+ return ret;
+}
+
+static ssize_t
+set_txbf(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ txbf = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, txbf));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_txbf =
+ __ATTR(txbf, 0660, show_txbf, set_txbf);
+#endif /* USE_WL_TXBF */
+
+#ifdef PROP_TXSTATUS
+uint32 proptx = VALUENOTSET;
+
+static ssize_t
+show_proptx(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (proptx == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "proptx not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", proptx);
+ }
+ return ret;
+}
+
+static ssize_t
+set_proptx(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ proptx = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, txbf));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_proptx =
+ __ATTR(proptx, 0660, show_proptx, set_proptx);
+
+#endif /* PROP_TXSTATUS */
+#endif /* USE_WFA_CERT_CONF */
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM)
+#define BAD_AP_MAC_ADDR_ELEMENT_NUM 6
+wl_bad_ap_mngr_t *g_bad_ap_mngr = NULL;
+
+static ssize_t
+show_adps_bam_list(struct dhd_info *dev, char *buf)
+{
+ int offset = 0;
+ ssize_t ret = 0;
+
+ wl_bad_ap_info_t *bad_ap;
+ wl_bad_ap_info_entry_t *entry;
+
+ if (g_bad_ap_mngr == NULL)
+ return ret;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(entry, &g_bad_ap_mngr->list, list) {
+ bad_ap = &entry->bad_ap;
+
+ ret = scnprintf(buf + offset, PAGE_SIZE - 1, MACF"\n",
+ bad_ap->bssid.octet[0], bad_ap->bssid.octet[1],
+ bad_ap->bssid.octet[2], bad_ap->bssid.octet[3],
+ bad_ap->bssid.octet[4], bad_ap->bssid.octet[5]);
+
+ offset += ret;
+ }
+ GCC_DIAGNOSTIC_POP();
+
+ return offset;
+}
+
+static ssize_t
+store_adps_bam_list(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int ret;
+ size_t len;
+ int offset;
+ char tmp[128];
+ wl_bad_ap_info_t bad_ap;
+
+ if (g_bad_ap_mngr == NULL)
+ return count;
+
+ len = count;
+ offset = 0;
+ do {
+ ret = sscanf(buf + offset, MACF"\n",
+ (uint32 *)&bad_ap.bssid.octet[0], (uint32 *)&bad_ap.bssid.octet[1],
+ (uint32 *)&bad_ap.bssid.octet[2], (uint32 *)&bad_ap.bssid.octet[3],
+ (uint32 *)&bad_ap.bssid.octet[4], (uint32 *)&bad_ap.bssid.octet[5]);
+ if (ret != BAD_AP_MAC_ADDR_ELEMENT_NUM) {
+ DHD_ERROR(("%s - fail to parse bad ap data\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ ret = wl_bad_ap_mngr_add(g_bad_ap_mngr, &bad_ap);
+ if (ret < 0)
+ return ret;
+
+ ret = snprintf(tmp, ARRAYSIZE(tmp), MACF"\n",
+ bad_ap.bssid.octet[0], bad_ap.bssid.octet[1],
+ bad_ap.bssid.octet[2], bad_ap.bssid.octet[3],
+ bad_ap.bssid.octet[4], bad_ap.bssid.octet[5]);
+ if (ret < 0) {
+ DHD_ERROR(("%s - fail to get bad ap data length(%d)\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ len -= ret;
+ offset += ret;
+ } while (len > 0);
+
+ return count;
+}
+
+static struct dhd_attr dhd_attr_adps_bam =
+ __ATTR(bad_ap_list, 0660, show_adps_bam_list, store_adps_bam_list);
+#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+uint32 report_hang_privcmd_err = 1;
+
+static ssize_t
+show_hang_privcmd_err(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%u\n", report_hang_privcmd_err);
+ return ret;
+}
+
+static ssize_t
+set_hang_privcmd_err(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+ sscanf(buf, "%u", &val);
+
+ report_hang_privcmd_err = val ? 1 : 0;
+ DHD_INFO(("%s: Set report HANG for private cmd error: %d\n",
+ __FUNCTION__, report_hang_privcmd_err));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_hang_privcmd_err =
+ __ATTR(hang_privcmd_err, 0660, show_hang_privcmd_err, set_hang_privcmd_err);
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#if defined(SHOW_LOGTRACE)
+static ssize_t
+show_control_logtrace(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_logtrace);
+ return ret;
+}
+
+static ssize_t
+set_control_logtrace(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+
+ control_logtrace = val;
+ DHD_ERROR(("%s: Set control logtrace: %d\n", __FUNCTION__, control_logtrace));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_control_logtrace =
+__ATTR(control_logtrace, 0660, show_control_logtrace, set_control_logtrace);
+#endif /* SHOW_LOGTRACE */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+uint8 control_he_enab = 1;
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#if defined(CUSTOM_CONTROL_HE_ENAB)
+static ssize_t
+show_control_he_enab(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_he_enab);
+ return ret;
+}
+
+static ssize_t
+set_control_he_enab(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+
+ control_he_enab = val ? 1 : 0;
+ DHD_ERROR(("%s: Set control he enab: %d\n", __FUNCTION__, control_he_enab));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_control_he_enab=
+__ATTR(control_he_enab, 0660, show_control_he_enab, set_control_he_enab);
+#endif /* CUSTOM_CONTROL_HE_ENAB */
+
+#if defined(WLAN_ACCEL_BOOT)
+static ssize_t
+show_wl_accel_force_reg_on(struct dhd_info *dhd, char *buf)
+{
+ ssize_t ret = 0;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", dhd->wl_accel_force_reg_on);
+ return ret;
+}
+
+static ssize_t
+set_wl_accel_force_reg_on(struct dhd_info *dhd, const char *buf, size_t count)
+{
+ uint32 val;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+
+ val = bcm_atoi(buf);
+
+ dhd->wl_accel_force_reg_on = val ? 1 : 0;
+ DHD_ERROR(("%s: wl_accel_force_reg_on: %d\n", __FUNCTION__, dhd->wl_accel_force_reg_on));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_wl_accel_force_reg_on=
+__ATTR(wl_accel_force_reg_on, 0660, show_wl_accel_force_reg_on, set_wl_accel_force_reg_on);
+#endif /* WLAN_ACCEL_BOOT */
+
+#if defined(AGG_H2D_DB)
+extern bool agg_h2d_db_enab;
+extern uint32 agg_h2d_db_timeout;
+extern uint32 agg_h2d_db_inflight_thresh;
+
+static ssize_t
+show_agg_h2d_db_enab(struct dhd_info *dhd, char *buf)
+{
+ ssize_t ret = 0;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", agg_h2d_db_enab);
+ return ret;
+}
+
+static ssize_t
+set_agg_h2d_db_enab(struct dhd_info *dhd, const char *buf, size_t count)
+{
+ uint32 val;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+
+ val = bcm_atoi(buf);
+
+ agg_h2d_db_enab = val ? TRUE : FALSE;
+ DHD_ERROR(("%s: agg_h2d_db_timeout: %d\n", __FUNCTION__, agg_h2d_db_enab));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_agg_h2d_db_enab =
+__ATTR(agg_h2d_db_enab, 0660, show_agg_h2d_db_enab, set_agg_h2d_db_enab);
+
+static ssize_t
+show_agg_h2d_db_inflight_thresh(struct dhd_info *dhd, char *buf)
+{
+ ssize_t ret = 0;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", agg_h2d_db_inflight_thresh);
+ return ret;
+}
+
+static ssize_t
+set_agg_h2d_db_inflight_thresh(struct dhd_info *dhd, const char *buf, size_t count)
+{
+ uint32 val;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+
+ val = bcm_atoi(buf);
+
+ agg_h2d_db_inflight_thresh = val;
+ DHD_ERROR(("%s: agg_h2d_db_timeout: %d\n", __FUNCTION__, agg_h2d_db_inflight_thresh));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_agg_h2d_db_inflight_thresh =
+__ATTR(agg_h2d_db_inflight_thresh, 0660, show_agg_h2d_db_inflight_thresh,
+ set_agg_h2d_db_inflight_thresh);
+
+static ssize_t
+show_agg_h2d_db_timeout(struct dhd_info *dhd, char *buf)
+{
+ ssize_t ret = 0;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", agg_h2d_db_timeout);
+ return ret;
+}
+
+static ssize_t
+set_agg_h2d_db_timeout(struct dhd_info *dhd, const char *buf, size_t count)
+{
+ uint32 val;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+
+ val = bcm_atoi(buf);
+
+ agg_h2d_db_timeout = val;
+ DHD_ERROR(("%s: agg_h2d_db_timeout: %d\n", __FUNCTION__, agg_h2d_db_timeout));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_agg_h2d_db_timeout =
+__ATTR(agg_h2d_db_timeout, 0660, show_agg_h2d_db_timeout, set_agg_h2d_db_timeout);
+#endif /* WLAN_ACCEL_BOOT */
+/*
+ * Dumps the lock and other state information useful for debug
+ *
+ */
+static ssize_t
+dhd_debug_dump_stateinfo(struct dhd_info *dhd, char *buf)
+{
+ u32 buf_size = PAGE_SIZE - 1;
+ u8 *ptr = buf;
+ ssize_t len = 0;
+
+ len += scnprintf(ptr, buf_size, "[DHD]\nlock info:\n");
+#ifdef BT_OVER_SDIO
+ len += scnprintf((ptr+len), (buf_size-len), "bus_user_lock:\n",
+ mutex_is_locked(&dhd->bus_user_lock));
+#endif /* BT_OVER_SDIO */
+
+#ifdef WL_CFG80211
+ len += wl_cfg80211_debug_data_dump(dhd_linux_get_primary_netdev(&dhd->pub),
+ (ptr + len), (buf_size - len));
+#endif /* WL_CFG80211 */
+
+ /* Ensure buffer ends with null char */
+ buf[len] = '\0';
+ return len + 1;
+}
+static struct dhd_attr dhd_attr_dhd_debug_data =
+__ATTR(dump_stateinfo, 0660, dhd_debug_dump_stateinfo, NULL);
+
+#ifdef WL_CFG80211
+#define _S(x) #x
+#define S(x) _S(x)
+#define SUBLOGLEVEL 20
+#define SUBLOGLEVELZ ((SUBLOGLEVEL) + (1))
+static const struct {
+ u32 log_level;
+ char *sublogname;
+} sublogname_map[] = {
+ {WL_DBG_ERR, "ERR"},
+ {WL_DBG_INFO, "INFO"},
+ {WL_DBG_DBG, "DBG"},
+ {WL_DBG_SCAN, "SCAN"},
+ {WL_DBG_TRACE, "TRACE"},
+ {WL_DBG_P2P_ACTION, "P2PACTION"}
+};
+
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/wifi/wl_dbg_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/wifi/wl_dbg_level
+* To see current setting of debug level,
+* cat /sys/wifi/wl_dbg_level
+*/
+static ssize_t
+show_wl_debug_level(struct dhd_info *dhd, char *buf)
+{
+ char *param;
+ char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)];
+ uint i;
+ ssize_t ret = 0;
+
+ bzero(tbuf, sizeof(tbuf));
+ param = &tbuf[0];
+ for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+ param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+ sublogname_map[i].sublogname,
+ (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+ }
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%s \n", tbuf);
+ return ret;
+}
+
+static ssize_t
+set_wl_debug_level(struct dhd_info *dhd, const char *buf, size_t count)
+{
+ char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)], sublog[SUBLOGLEVELZ];
+ char *params, *token, *colon;
+ uint i, tokens, log_on = 0;
+ size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count);
+
+ bzero(tbuf, sizeof(tbuf));
+ bzero(sublog, sizeof(sublog));
+ strlcpy(tbuf, buf, minsize);
+
+ DHD_INFO(("current wl_dbg_level %d \n", wl_dbg_level));
+
+ tbuf[minsize] = '\0';
+ params = &tbuf[0];
+ colon = strchr(params, '\n');
+ if (colon != NULL)
+ *colon = '\0';
+ while ((token = strsep(&params, " ")) != NULL) {
+ bzero(sublog, sizeof(sublog));
+ if (token == NULL || !*token)
+ break;
+ if (*token == '\0')
+ continue;
+ colon = strchr(token, ':');
+ if (colon != NULL) {
+ *colon = ' ';
+ }
+ tokens = sscanf(token, "%"S(SUBLOGLEVEL)"s %u", sublog, &log_on);
+ if (colon != NULL)
+ *colon = ':';
+
+ if (tokens == 2) {
+ for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+ if (!strncmp(sublog, sublogname_map[i].sublogname,
+ strlen(sublogname_map[i].sublogname))) {
+ if (log_on)
+ wl_dbg_level |=
+ (sublogname_map[i].log_level);
+ else
+ wl_dbg_level &=
+ ~(sublogname_map[i].log_level);
+ }
+ }
+ } else
+ WL_ERR(("%s: can't parse '%s' as a "
+ "SUBMODULE:LEVEL (%d tokens)\n",
+ tbuf, token, tokens));
+
+ }
+ DHD_INFO(("changed wl_dbg_level %d \n", wl_dbg_level));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_wl_dbg_level =
+__ATTR(wl_dbg_level, 0660, show_wl_debug_level, set_wl_debug_level);
+#endif /* WL_CFG80211 */
+
+/* Attribute object that gets registered with "wifi" kobject tree */
+static struct attribute *default_file_attrs[] = {
+#ifdef DHD_MAC_ADDR_EXPORT
+ &dhd_attr_macaddr.attr,
+#endif /* DHD_MAC_ADDR_EXPORT */
+#ifdef DHD_EXPORT_CNTL_FILE
+#ifdef DHD_FW_COREDUMP
+ &dhd_attr_memdump.attr,
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+ &dhd_attr_assert.attr,
+#endif /* BCMASSERT_LOG */
+#ifdef WRITE_WLANINFO
+ &dhd_attr_wifiver.attr,
+#endif /* WRITE_WLANINFO */
+#if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG)
+ &dhd_attr_cidinfo.attr,
+#endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */
+#ifdef GEN_SOFTAP_INFO_FILE
+ &dhd_attr_softapinfo.attr,
+#endif /* GEN_SOFTAP_INFO_FILE */
+#ifdef MIMO_ANT_SETTING
+ &dhd_attr_antinfo.attr,
+#endif /* MIMO_ANT_SETTING */
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ &dhd_attr_pminfo.attr,
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+#ifdef LOGTRACE_FROM_FILE
+ &dhd_attr_logtraceinfo.attr,
+#endif /* LOGTRACE_FROM_FILE */
+#ifdef USE_WFA_CERT_CONF
+#ifdef BCMSDIO
+ &dhd_attr_bustxglom.attr,
+#endif /* BCMSDIO */
+ &dhd_attr_roamoff.attr,
+#ifdef USE_WL_FRAMEBURST
+ &dhd_attr_frameburst.attr,
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ &dhd_attr_txbf.attr,
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ &dhd_attr_proptx.attr,
+#endif /* PROP_TXSTATUS */
+#endif /* USE_WFA_CERT_CONF */
+#endif /* DHD_EXPORT_CNTL_FILE */
+#if defined(DHD_ADPS_BAM_EXPORT) && defined(WL_BAM)
+ &dhd_attr_adps_bam.attr,
+#endif /* DHD_ADPS_BAM_EXPORT && WL_BAM */
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+ &dhd_attr_hang_privcmd_err.attr,
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+#if defined(SHOW_LOGTRACE)
+ &dhd_attr_control_logtrace.attr,
+#endif /* SHOW_LOGTRACE */
+#if defined(DHD_TRACE_WAKE_LOCK)
+ &dhd_attr_wklock.attr,
+#endif
+#ifdef DHD_LOG_DUMP
+ &dhd_attr_logdump_periodic_flush.attr,
+ &dhd_attr_logdump_ecntr.attr,
+#endif
+ &dhd_attr_ecounters.attr,
+#ifdef DHD_QOS_ON_SOCK_FLOW
+ &dhd_attr_sock_qos_onoff.attr,
+ &dhd_attr_sock_qos_stats.attr,
+ &dhd_attr_sock_qos_upgrade.attr,
+ &dhd_attr_sock_qos_numfl_upgrd_thresh.attr,
+ &dhd_attr_sock_qos_avgpktsize_thresh.attr,
+ &dhd_attr_sock_qos_numpkts_thresh.attr,
+ &dhd_attr_sock_qos_detectcnt_thresh.attr,
+ &dhd_attr_sock_qos_detectcnt_upgrd_thresh.attr,
+ &dhd_attr_sock_qos_maxfl.attr,
+#ifdef DHD_QOS_ON_SOCK_FLOW_UT
+ &dhd_attr_sock_qos_unit_test.attr,
+#endif /* DHD_QOS_ON_SOCK_FLOW_UT */
+#endif /* DHD_QOS_ON_SOCK_FLOW */
+#ifdef DHD_SSSR_DUMP
+ &dhd_attr_sssr_enab.attr,
+ &dhd_attr_fis_enab.attr,
+#endif /* DHD_SSSR_DUMP */
+ &dhd_attr_firmware_path.attr,
+ &dhd_attr_nvram_path.attr,
+#if defined(CUSTOM_CONTROL_HE_ENAB)
+ &dhd_attr_control_he_enab.attr,
+#endif /* CUSTOM_CONTROL_HE_ENAB */
+#if defined(WLAN_ACCEL_BOOT)
+ &dhd_attr_wl_accel_force_reg_on.attr,
+#endif /* WLAN_ACCEL_BOOT */
+#ifdef PWRSTATS_SYSFS
+ &dhd_attr_pwrstats_path.attr,
+#endif /* PWRSTATS_SYSFS */
+#if defined(WL_CFG80211)
+ &dhd_attr_wl_dbg_level.attr,
+#endif /* WL_CFG80211 */
+ &dhd_attr_dhd_debug_data.attr,
+#if defined(AGG_H2D_DB)
+ &dhd_attr_agg_h2d_db_enab.attr,
+ &dhd_attr_agg_h2d_db_inflight_thresh.attr,
+ &dhd_attr_agg_h2d_db_timeout.attr,
+#endif /* AGG_H2D_DB */
+ NULL
+};
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi" the show function is called.
+ */
+static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ dhd_info_t *dhd;
+ struct dhd_attr *d_attr;
+ int ret;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = to_dhd(kobj);
+ d_attr = to_attr(attr);
+ GCC_DIAGNOSTIC_POP();
+
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi" the store function is called.
+ */
+static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ dhd_info_t *dhd;
+ struct dhd_attr *d_attr;
+ int ret;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = to_dhd(kobj);
+ d_attr = to_attr(attr);
+ GCC_DIAGNOSTIC_POP();
+
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_ops = {
+ .show = dhd_show,
+ .store = dhd_store,
+};
+
+static struct kobj_type dhd_ktype = {
+ .sysfs_ops = &dhd_sysfs_ops,
+ .default_attrs = default_file_attrs,
+};
+
+#ifdef CSI_SUPPORT
+/* Function to show current ccode */
+static ssize_t read_csi_data(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ dhd_info_t *dhd = to_dhd(kobj);
+ int n = 0;
+
+ n = dhd_csi_dump_list(&dhd->pub, buf);
+ DHD_INFO(("Dump data to file, size %d\n", n));
+ dhd_csi_clean_list(&dhd->pub);
+
+ return n;
+}
+
+static struct bin_attribute dhd_attr_csi = {
+ .attr = { .name = "csi" BUS_TYPE,
+ .mode = 0660, },
+ .size = MAX_CSI_FILESZ,
+ .read = read_csi_data,
+};
+#endif /* CSI_SUPPORT */
+
+/*
+ * sysfs for dhd_lb
+ */
+#ifdef DHD_LB
+#if defined(DHD_LB_TXP)
+static ssize_t
+show_lbtxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = atomic_read(&dhd->lb_txp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ int i;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+ atomic_set(&dhd->lb_txp_active, onoff);
+
+ /* Since the scheme is changed clear the counters */
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+ }
+
+ return count;
+}
+
+static struct dhd_attr dhd_attr_lbtxp =
+ __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+static ssize_t
+show_lbrxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = atomic_read(&dhd->lb_rxp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+lbrxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+ atomic_set(&dhd->lb_rxp_active, onoff);
+
+ return count;
+}
+static struct dhd_attr dhd_attr_lbrxp =
+ __ATTR(lbrxp, 0660, show_lbrxp, lbrxp_onoff);
+
+static ssize_t
+get_lb_rxp_stop_thr(struct dhd_info *dev, char *buf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+ ssize_t ret = 0;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ dhdp = &dhd->pub;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%u \n",
+ (dhdp->lb_rxp_stop_thr / D2HRING_RXCMPLT_MAX_ITEM));
+ return ret;
+}
+
+#define ONE_GB (1024 * 1024 * 1024)
+
+static ssize_t
+set_lb_rxp_stop_thr(struct dhd_info *dev, const char *buf, size_t count)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+ uint32 lb_rxp_stop_thr;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ dhdp = &dhd->pub;
+
+ lb_rxp_stop_thr = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%u", &lb_rxp_stop_thr);
+
+ /* disable lb_rxp flow ctrl */
+ if (lb_rxp_stop_thr == 0) {
+ dhdp->lb_rxp_stop_thr = 0;
+ dhdp->lb_rxp_strt_thr = 0;
+ atomic_set(&dhd->pub.lb_rxp_flow_ctrl, FALSE);
+ return count;
+ }
+ /* 1. by the time lb_rxp_stop_thr gets into picture,
+ * DHD RX path should not consume more than 1GB
+ * 2. lb_rxp_stop_thr should always be more than dhdp->lb_rxp_strt_thr
+ */
+ if (((lb_rxp_stop_thr *
+ D2HRING_RXCMPLT_MAX_ITEM *
+ dhd_prot_get_rxbufpost_sz(dhdp)) > ONE_GB) ||
+ (lb_rxp_stop_thr <= (dhdp->lb_rxp_strt_thr / D2HRING_RXCMPLT_MAX_ITEM))) {
+ return -EINVAL;
+ }
+
+ dhdp->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * lb_rxp_stop_thr);
+ return count;
+}
+
+static struct dhd_attr dhd_attr_lb_rxp_stop_thr =
+ __ATTR(lbrxp_stop_thr, 0660, get_lb_rxp_stop_thr, set_lb_rxp_stop_thr);
+
+static ssize_t
+get_lb_rxp_strt_thr(struct dhd_info *dev, char *buf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+ ssize_t ret = 0;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ dhdp = &dhd->pub;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%u \n",
+ (dhdp->lb_rxp_strt_thr / D2HRING_RXCMPLT_MAX_ITEM));
+ return ret;
+}
+
+static ssize_t
+set_lb_rxp_strt_thr(struct dhd_info *dev, const char *buf, size_t count)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+ uint32 lb_rxp_strt_thr;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ dhdp = &dhd->pub;
+
+ lb_rxp_strt_thr = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%u", &lb_rxp_strt_thr);
+
+ /* disable lb_rxp flow ctrl */
+ if (lb_rxp_strt_thr == 0) {
+ dhdp->lb_rxp_strt_thr = 0;
+ dhdp->lb_rxp_stop_thr = 0;
+ atomic_set(&dhd->pub.lb_rxp_flow_ctrl, FALSE);
+ return count;
+ }
+ /* should be less than dhdp->lb_rxp_stop_thr */
+ if ((lb_rxp_strt_thr <= 0) ||
+ (lb_rxp_strt_thr >= (dhdp->lb_rxp_stop_thr / D2HRING_RXCMPLT_MAX_ITEM))) {
+ return -EINVAL;
+ }
+ dhdp->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * lb_rxp_strt_thr);
+ return count;
+}
+static struct dhd_attr dhd_attr_lb_rxp_strt_thr =
+ __ATTR(lbrxp_strt_thr, 0660, get_lb_rxp_strt_thr, set_lb_rxp_strt_thr);
+
+#endif /* DHD_LB_RXP */
+
+static ssize_t
+show_candidacy_override(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1,
+ "%d\n", (int)dev->dhd_lb_candidacy_override);
+ return ret;
+}
+
+static ssize_t
+set_candidacy_override(struct dhd_info *dev, const char *buf, size_t count)
+{
+
+ int val = 0;
+ val = bcm_atoi(buf);
+
+ if (val > 0) {
+ dev->dhd_lb_candidacy_override = TRUE;
+ } else {
+ dev->dhd_lb_candidacy_override = FALSE;
+ }
+
+ DHD_ERROR(("set dhd_lb_candidacy_override %d\n", dev->dhd_lb_candidacy_override));
+ return count;
+}
+
+static struct dhd_attr dhd_candidacy_override =
+__ATTR(candidacy_override, 0660, show_candidacy_override, set_candidacy_override);
+
+static ssize_t
+show_primary_mask(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1,
+ "%02lx\n", *cpumask_bits(dev->cpumask_primary));
+ return ret;
+}
+
+static ssize_t
+set_primary_mask(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int ret;
+
+ cpumask_var_t primary_mask;
+
+ if (!alloc_cpumask_var(&primary_mask, GFP_KERNEL)) {
+ DHD_ERROR(("Can't allocate cpumask vars\n"));
+ return count;
+ }
+
+ cpumask_clear(primary_mask);
+ ret = cpumask_parse(buf, primary_mask);
+ if (ret < 0) {
+ DHD_ERROR(("Setting cpumask failed ret = %d\n", ret));
+ return count;
+ }
+
+ cpumask_clear(dev->cpumask_primary);
+ cpumask_or(dev->cpumask_primary, dev->cpumask_primary, primary_mask);
+
+ DHD_ERROR(("set cpumask results cpumask_primary 0x%2lx\n",
+ *cpumask_bits(dev->cpumask_primary)));
+
+ dhd_select_cpu_candidacy(dev);
+ return count;
+}
+
+static struct dhd_attr dhd_primary_mask =
+__ATTR(primary_mask, 0660, show_primary_mask, set_primary_mask);
+
+static ssize_t
+show_secondary_mask(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1,
+ "%02lx\n", *cpumask_bits(dev->cpumask_secondary));
+ return ret;
+}
+
+static ssize_t
+set_secondary_mask(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int ret;
+
+ cpumask_var_t secondary_mask;
+
+ if (!alloc_cpumask_var(&secondary_mask, GFP_KERNEL)) {
+ DHD_ERROR(("Can't allocate cpumask vars\n"));
+ return count;
+ }
+
+ cpumask_clear(secondary_mask);
+
+ ret = cpumask_parse(buf, secondary_mask);
+
+ if (ret < 0) {
+ DHD_ERROR(("Setting cpumask failed ret = %d\n", ret));
+ return count;
+ }
+
+ cpumask_clear(dev->cpumask_secondary);
+ cpumask_or(dev->cpumask_secondary, dev->cpumask_secondary, secondary_mask);
+
+ DHD_ERROR(("set cpumask results cpumask_secondary 0x%2lx\n",
+ *cpumask_bits(dev->cpumask_secondary)));
+
+ dhd_select_cpu_candidacy(dev);
+
+ return count;
+}
+
+static struct dhd_attr dhd_secondary_mask =
+__ATTR(secondary_mask, 0660, show_secondary_mask, set_secondary_mask);
+
+static ssize_t
+show_rx_cpu(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", atomic_read(&dev->rx_napi_cpu));
+ return ret;
+}
+
+static ssize_t
+set_rx_cpu(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ if (!dev->dhd_lb_candidacy_override) {
+ DHD_ERROR(("dhd_lb_candidacy_override is required %d\n",
+ dev->dhd_lb_candidacy_override));
+ return count;
+ }
+
+ val = (uint32)bcm_atoi(buf);
+ if (val >= nr_cpu_ids)
+ {
+ DHD_ERROR(("%s : can't set the value out of number of cpus, val = %u\n",
+ __FUNCTION__, val));
+ }
+
+ atomic_set(&dev->rx_napi_cpu, val);
+ DHD_ERROR(("%s: rx_napi_cpu = %d\n", __FUNCTION__, atomic_read(&dev->rx_napi_cpu)));
+ return count;
+}
+
+static struct dhd_attr dhd_rx_cpu =
+__ATTR(rx_cpu, 0660, show_rx_cpu, set_rx_cpu);
+
+static ssize_t
+show_tx_cpu(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", atomic_read(&dev->tx_cpu));
+ return ret;
+}
+
+static ssize_t
+set_tx_cpu(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ if (!dev->dhd_lb_candidacy_override) {
+ DHD_ERROR(("dhd_lb_candidacy_override is required %d\n",
+ dev->dhd_lb_candidacy_override));
+ return count;
+ }
+
+ val = (uint32)bcm_atoi(buf);
+ if (val >= nr_cpu_ids)
+ {
+ DHD_ERROR(("%s : can't set the value out of number of cpus, val = %u\n",
+ __FUNCTION__, val));
+ return count;
+ }
+
+ atomic_set(&dev->tx_cpu, val);
+ DHD_ERROR(("%s: tx_cpu = %d\n", __FUNCTION__, atomic_read(&dev->tx_cpu)));
+ return count;
+}
+
+static struct dhd_attr dhd_tx_cpu =
+__ATTR(tx_cpu, 0660, show_tx_cpu, set_tx_cpu);
+
+static struct attribute *debug_lb_attrs[] = {
+#if defined(DHD_LB_TXP)
+ &dhd_attr_lbtxp.attr,
+#endif /* DHD_LB_TXP */
+#if defined(DHD_LB_RXP)
+ &dhd_attr_lbrxp.attr,
+ &dhd_attr_lb_rxp_stop_thr.attr,
+ &dhd_attr_lb_rxp_strt_thr.attr,
+#endif /* DHD_LB_RXP */
+ &dhd_candidacy_override.attr,
+ &dhd_primary_mask.attr,
+ &dhd_secondary_mask.attr,
+ &dhd_rx_cpu.attr,
+ &dhd_tx_cpu.attr,
+ NULL
+};
+
+#define to_dhd_lb(k) container_of(k, struct dhd_info, dhd_lb_kobj)
+
+/*
+ * wifi/lb kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi/lb" the show function is called.
+ */
+static ssize_t dhd_lb_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ dhd_info_t *dhd;
+ struct dhd_attr *d_attr;
+ int ret;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = to_dhd_lb(kobj);
+ d_attr = to_attr(attr);
+ GCC_DIAGNOSTIC_POP();
+
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi/lb" the store function is called.
+ */
+static ssize_t dhd_lb_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ dhd_info_t *dhd;
+ struct dhd_attr *d_attr;
+ int ret;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = to_dhd_lb(kobj);
+ d_attr = to_attr(attr);
+ GCC_DIAGNOSTIC_POP();
+
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_lb_ops = {
+ .show = dhd_lb_show,
+ .store = dhd_lb_store,
+};
+
+static struct kobj_type dhd_lb_ktype = {
+ .sysfs_ops = &dhd_sysfs_lb_ops,
+ .default_attrs = debug_lb_attrs,
+};
+#endif /* DHD_LB */
+
+/* Create a kobject and attach to sysfs interface */
+int dhd_sysfs_init(dhd_info_t *dhd)
+{
+ int ret = -1;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Initialize the kobject */
+ ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "wifi" BUS_TYPE);
+ if (ret) {
+ kobject_put(&dhd->dhd_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+
+#ifdef CSI_SUPPORT
+ ret = sysfs_create_bin_file(&dhd->dhd_kobj, &dhd_attr_csi);
+ if (ret) {
+ DHD_ERROR(("%s: can't create %s\n", __FUNCTION__, dhd_attr_csi.attr.name));
+ kobject_put(&dhd->dhd_kobj);
+ return ret;
+ }
+#endif /* CSI_SUPPORT */
+
+ /*
+ * We are always responsible for sending the uevent that the kobject
+ * was added to the system.
+ */
+ kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
+
+#ifdef DHD_LB
+ ret = kobject_init_and_add(&dhd->dhd_lb_kobj,
+ &dhd_lb_ktype, &dhd->dhd_kobj, "lb");
+ if (ret) {
+ kobject_put(&dhd->dhd_lb_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ kobject_uevent(&dhd->dhd_lb_kobj, KOBJ_ADD);
+#endif /* DHD_LB */
+
+ return ret;
+}
+
+/* Done with the kobject and detach the sysfs interface */
+void dhd_sysfs_exit(dhd_info_t *dhd)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return;
+ }
+
+#ifdef DHD_LB
+ kobject_put(&dhd->dhd_lb_kobj);
+#endif /* DHD_LB */
+
+ /* Releae the kobject */
+ if (dhd->dhd_kobj.state_initialized)
+ kobject_put(&dhd->dhd_kobj);
+}
+
+#ifdef DHD_SUPPORT_HDM
+static ssize_t
+hdm_load_module(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int val = bcm_atoi(buf);
+
+ if (val == 1) {
+ DHD_ERROR(("%s : Load module from the hdm %d\n", __FUNCTION__, val));
+ dhd_module_init_hdm();
+ } else {
+ DHD_ERROR(("Module load triggered with invalid value : %d\n", val));
+ }
+
+ return count;
+}
+
+static struct kobj_attribute hdm_wlan_attr =
+ __ATTR(hdm_wlan_loader, 0660, NULL, hdm_load_module);
+
+void
+dhd_hdm_wlan_sysfs_init()
+{
+ DHD_ERROR(("export hdm_wlan_loader\n"));
+ if (sysfs_create_file(kernel_kobj, &hdm_wlan_attr.attr)) {
+ DHD_ERROR(("export hdm_load failed\n"));
+ }
+}
+
+void
+dhd_hdm_wlan_sysfs_deinit(struct work_struct *work)
+{
+ sysfs_remove_file(kernel_kobj, &hdm_wlan_attr.attr);
+
+}
+#endif /* DHD_SUPPORT_HDM */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_lb.c b/bcmdhd.101.10.361.x/dhd_linux_lb.c
new file mode 100755
index 0000000..69b3081
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_lb.c
@@ -0,0 +1,1402 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <dhd_linux_priv.h>
+
+extern dhd_pub_t* g_dhd_pub;
+
+#if defined(DHD_LB)
+
+#ifdef DHD_LB_STATS
+#define DHD_NUM_NAPI_LATENCY_ROWS (17u)
+#define DHD_NAPI_LATENCY_SIZE (sizeof(uint64) * DHD_NUM_NAPI_LATENCY_ROWS)
+#endif /* DHD_LB_STATS */
+
+#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
+#define DHD_LB_INFO DHD_TRACE
+#else
+#define DHD_LB_INFO DHD_INFO
+#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
+
+void
+dhd_lb_set_default_cpus(dhd_info_t *dhd)
+{
+ /* Default CPU allocation for the jobs */
+ atomic_set(&dhd->rx_napi_cpu, 1);
+ atomic_set(&dhd->tx_cpu, 2);
+ atomic_set(&dhd->net_tx_cpu, 0);
+ atomic_set(&dhd->dpc_cpu, 0);
+}
+
+void
+dhd_cpumasks_deinit(dhd_info_t *dhd)
+{
+ free_cpumask_var(dhd->cpumask_curr_avail);
+ free_cpumask_var(dhd->cpumask_primary);
+ free_cpumask_var(dhd->cpumask_primary_new);
+ free_cpumask_var(dhd->cpumask_secondary);
+ free_cpumask_var(dhd->cpumask_secondary_new);
+}
+
+int
+dhd_cpumasks_init(dhd_info_t *dhd)
+{
+ int id;
+ uint32 cpus, num_cpus = num_possible_cpus();
+ int ret = 0;
+
+ DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
+ DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
+
+ /* FIXME: If one alloc fails we must free_cpumask_var the previous */
+ if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
+ DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
+ cpumask_clear(dhd->cpumask_primary);
+ cpumask_clear(dhd->cpumask_secondary);
+
+ if (num_cpus > 32) {
+ DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
+ ASSERT(0);
+ }
+
+ cpus = DHD_LB_PRIMARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_primary);
+ }
+
+ cpus = DHD_LB_SECONDARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_secondary);
+ }
+
+ return ret;
+fail:
+ dhd_cpumasks_deinit(dhd);
+ return ret;
+}
+
+/*
+ * The CPU Candidacy Algorithm
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * The available CPUs for selection are divided into two groups
+ * Primary Set - A CPU mask that carries the First Choice CPUs
+ * Secondary Set - A CPU mask that carries the Second Choice CPUs.
+ *
+ * There are two types of Job, that needs to be assigned to
+ * the CPUs, from one of the above mentioned CPU group. The Jobs are
+ * 1) Rx Packet Processing - napi_cpu
+ *
+ * To begin with napi_cpu is on CPU0. Whenever a CPU goes
+ * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
+ * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
+ *
+ */
+void dhd_select_cpu_candidacy(dhd_info_t *dhd)
+{
+ uint32 primary_available_cpus; /* count of primary available cpus */
+ uint32 secondary_available_cpus; /* count of secondary available cpus */
+ uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
+ uint32 tx_cpu = 0; /* cpu selected for tx processing job */
+ uint32 dpc_cpu = atomic_read(&dhd->dpc_cpu);
+ uint32 net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
+
+ cpumask_clear(dhd->cpumask_primary_new);
+ cpumask_clear(dhd->cpumask_secondary_new);
+
+ /*
+ * Now select from the primary mask. Even if a Job is
+ * already running on a CPU in secondary group, we still move
+ * to primary CPU. So no conditional checks.
+ */
+ cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
+ dhd->cpumask_curr_avail);
+
+ cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
+ dhd->cpumask_curr_avail);
+
+ /* Clear DPC cpu from new masks so that dpc cpu is not chosen for LB */
+ cpumask_clear_cpu(dpc_cpu, dhd->cpumask_primary_new);
+ cpumask_clear_cpu(dpc_cpu, dhd->cpumask_secondary_new);
+
+ /* Clear net_tx_cpu from new masks so that same is not chosen for LB */
+ cpumask_clear_cpu(net_tx_cpu, dhd->cpumask_primary_new);
+ cpumask_clear_cpu(net_tx_cpu, dhd->cpumask_secondary_new);
+
+ primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
+
+#if defined(DHD_LB_HOST_CTRL)
+ /* Does not use promary cpus if DHD received affinity off cmd
+ * from framework
+ */
+ if (primary_available_cpus > 0 && dhd->permitted_primary_cpu)
+#else
+ if (primary_available_cpus > 0)
+#endif /* DHD_LB_HOST_CTRL */
+ {
+ napi_cpu = cpumask_first(dhd->cpumask_primary_new);
+
+ /* If no further CPU is available,
+ * cpumask_next returns >= nr_cpu_ids
+ */
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
+ }
+
+ DHD_INFO(("%s After primary CPU check napi_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, tx_cpu));
+
+ /* -- Now check for the CPUs from the secondary mask -- */
+ secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
+
+ DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
+ __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
+
+ if (secondary_available_cpus > 0) {
+ /* At this point if napi_cpu is unassigned it means no CPU
+ * is online from Primary Group
+ */
+#if defined(DHD_LB_TXP_LITTLE_CORE_CTRL)
+ /* Clear tx_cpu, so that it can be picked from little core */
+ tx_cpu = 0;
+#endif /* DHD_LB_TXP_LITTLE_CORE_CTRL */
+ if (napi_cpu == 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
+ } else if (tx_cpu == 0) {
+ tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ }
+
+ /* If no CPU was available for tx processing, choose CPU 0 */
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
+ }
+
+ if ((primary_available_cpus == 0) &&
+ (secondary_available_cpus == 0)) {
+ /* No CPUs available from primary or secondary mask */
+ napi_cpu = 1;
+ tx_cpu = 2;
+ }
+
+ DHD_INFO(("%s After secondary CPU check napi_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, tx_cpu));
+
+ ASSERT(napi_cpu < nr_cpu_ids);
+ ASSERT(tx_cpu < nr_cpu_ids);
+
+ atomic_set(&dhd->rx_napi_cpu, napi_cpu);
+ atomic_set(&dhd->tx_cpu, tx_cpu);
+
+ return;
+}
+
+/*
+ * Function to handle CPU Hotplug notifications.
+ * One of the task it does is to trigger the CPU Candidacy algorithm
+ * for load balancing.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int dhd_cpu_startup_callback(unsigned int cpu)
+{
+ dhd_info_t *dhd = g_dhd_pub->info;
+
+ DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+
+ return 0;
+}
+
+int dhd_cpu_teardown_callback(unsigned int cpu)
+{
+ dhd_info_t *dhd = g_dhd_pub->info;
+
+ DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+
+ return 0;
+}
+#else
+int
+dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned long int cpu = (unsigned long int)hcpu;
+ dhd_info_t *dhd;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(nfb, dhd_info_t, cpu_notifier);
+ GCC_DIAGNOSTIC_POP();
+
+ if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
+ DHD_INFO(("%s(): LB data is not initialized yet.\n",
+ __FUNCTION__));
+ return NOTIFY_BAD;
+ }
+
+ /* XXX: Do we need other action types ? */
+ switch (action)
+ {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+
+int dhd_register_cpuhp_callback(dhd_info_t *dhd)
+{
+ int cpuhp_ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd",
+ dhd_cpu_startup_callback, dhd_cpu_teardown_callback);
+
+ if (cpuhp_ret < 0) {
+ DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n",
+ __FUNCTION__, cpuhp_ret));
+ }
+#else
+ /*
+ * If we are able to initialize CPU masks, lets register to the
+ * CPU Hotplug framework to change the CPU for each job dynamically
+ * using candidacy algorithm.
+ */
+ dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
+ register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+ return cpuhp_ret;
+}
+
+int dhd_unregister_cpuhp_callback(dhd_info_t *dhd)
+{
+ int ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ /* Don't want to call tear down while unregistering */
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+#else
+ if (dhd->cpu_notifier.notifier_call != NULL) {
+ unregister_cpu_notifier(&dhd->cpu_notifier);
+ }
+#endif
+ return ret;
+}
+
+#if defined(DHD_LB_STATS)
+void dhd_lb_stats_reset(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int i, j, num_cpus = num_possible_cpus();
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s dhd pub pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+ DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+
+ /* reset NAPI latency stats */
+ if (dhd->napi_latency) {
+ bzero(dhd->napi_latency, DHD_NAPI_LATENCY_SIZE);
+ }
+ /* reset NAPI per cpu stats */
+ if (dhd->napi_percpu_run_cnt) {
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+ }
+ }
+
+ DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+
+ if (dhd->rxc_percpu_run_cnt) {
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+ }
+ }
+
+ DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+
+ if (dhd->txc_percpu_run_cnt) {
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
+ }
+ }
+
+ if (dhd->txp_percpu_run_cnt) {
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+ }
+ }
+
+ if (dhd->tx_start_percpu_run_cnt) {
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+ }
+ }
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+
+ dhd->pub.lb_rxp_strt_thr_hitcnt = 0;
+ dhd->pub.lb_rxp_stop_thr_hitcnt = 0;
+
+ dhd->pub.lb_rxp_napi_sched_cnt = 0;
+ dhd->pub.lb_rxp_napi_complete_cnt = 0;
+ return;
+}
+
+void dhd_lb_stats_init(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int i, j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+ DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+
+ /* NAPI latency stats */
+ dhd->napi_latency = (uint64 *)MALLOCZ(dhdp->osh, DHD_NAPI_LATENCY_SIZE);
+ /* NAPI per cpu stats */
+ dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_percpu_run_cnt) {
+ DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+
+ dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+
+ dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
+
+ dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_online_cnt) {
+ DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
+
+ dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_offline_cnt) {
+ DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
+
+ dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txp_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+
+ dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->tx_start_percpu_run_cnt) {
+ DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_rx_hist[j]) {
+ DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+
+ dhd->pub.lb_rxp_strt_thr_hitcnt = 0;
+ dhd->pub.lb_rxp_stop_thr_hitcnt = 0;
+
+ dhd->pub.lb_rxp_napi_sched_cnt = 0;
+ dhd->pub.lb_rxp_napi_complete_cnt = 0;
+ return;
+}
+
+void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd->napi_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
+ }
+ if (dhd->rxc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
+ }
+ if (dhd->txc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
+ }
+ if (dhd->cpu_online_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
+ }
+ if (dhd->cpu_offline_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
+ }
+
+ if (dhd->txp_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
+ }
+ if (dhd->tx_start_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
+ }
+ if (dhd->napi_latency) {
+ MFREE(dhdp->osh, dhd->napi_latency, DHD_NAPI_LATENCY_SIZE);
+ }
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ if (dhd->napi_rx_hist[j]) {
+ MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
+ }
+ }
+
+ return;
+}
+
+void dhd_lb_stats_dump_napi_latency(dhd_pub_t *dhdp,
+ struct bcmstrbuf *strbuf, uint64 *napi_latency)
+{
+ uint32 i;
+
+ bcm_bprintf(strbuf, "napi-latency(us): \t count\n");
+ for (i = 0; i < DHD_NUM_NAPI_LATENCY_ROWS; i++) {
+ bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<<i, napi_latency[i]);
+ }
+}
+
+void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
+ struct bcmstrbuf *strbuf, uint32 **hist)
+{
+ int i, j;
+ uint32 *per_cpu_total;
+ uint32 total = 0;
+ uint32 num_cpus = num_possible_cpus();
+
+ per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
+ if (!per_cpu_total) {
+ DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
+ return;
+ }
+ bzero(per_cpu_total, sizeof(uint32) * num_cpus);
+
+ bcm_bprintf(strbuf, "CPU: \t\t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\nBin\n");
+
+ for (i = 0; i < HIST_BIN_SIZE; i++) {
+ bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
+ for (j = 0; j < num_cpus; j++) {
+ bcm_bprintf(strbuf, "%d\t", hist[i][j]);
+ }
+ bcm_bprintf(strbuf, "\n");
+ }
+ bcm_bprintf(strbuf, "Per CPU Total \t");
+ total = 0;
+ for (i = 0; i < num_cpus; i++) {
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ per_cpu_total[i] += (hist[j][i] * (1<<j));
+ }
+ bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
+ total += per_cpu_total[i];
+ }
+ bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
+
+ if (per_cpu_total) {
+ MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
+ }
+ return;
+}
+
+void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
+{
+ int i, num_cpus = num_possible_cpus();
+
+ bcm_bprintf(strbuf, "CPU: \t\t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\n");
+
+ bcm_bprintf(strbuf, "Val: \t\t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%u\t", *(p+i));
+ bcm_bprintf(strbuf, "\n");
+ return;
+}
+
+#ifdef DHD_MEM_STATS
+uint64 dhd_lb_mem_usage(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd;
+ uint16 rxbufpost_sz;
+ uint16 rx_post_active = 0;
+ uint16 rx_cmpl_active = 0;
+ uint64 rx_path_memory_usage = 0;
+
+ if (dhdp == NULL || strbuf == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+ __FUNCTION__, dhdp, strbuf));
+ return 0;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return 0;
+ }
+ rxbufpost_sz = dhd_prot_get_rxbufpost_sz(dhdp);
+ if (rxbufpost_sz == 0) {
+ rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ }
+ rx_path_memory_usage = rxbufpost_sz * (skb_queue_len(&dhd->rx_pend_queue) +
+ skb_queue_len(&dhd->rx_napi_queue) +
+ skb_queue_len(&dhd->rx_process_queue));
+ rx_post_active = dhd_prot_get_h2d_rx_post_active(dhdp);
+ if (rx_post_active != 0) {
+ rx_path_memory_usage += (rxbufpost_sz * rx_post_active);
+ }
+
+ rx_cmpl_active = dhd_prot_get_d2h_rx_cpln_active(dhdp);
+ if (rx_cmpl_active != 0) {
+ rx_path_memory_usage += (rxbufpost_sz * rx_cmpl_active);
+ }
+
+ dhdp->rxpath_mem = rx_path_memory_usage;
+ bcm_bprintf(strbuf, "\nrxbufpost_sz: %d rx_post_active: %d rx_cmpl_active: %d "
+ "pend_queue_len: %d napi_queue_len: %d process_queue_len: %d\n",
+ rxbufpost_sz, rx_post_active, rx_cmpl_active,
+ skb_queue_len(&dhd->rx_pend_queue),
+ skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_process_queue));
+ bcm_bprintf(strbuf, "DHD rx-path memory_usage: %llubytes %lluKB \n",
+ rx_path_memory_usage, (rx_path_memory_usage/ 1024));
+ return rx_path_memory_usage;
+}
+#endif /* DHD_MEM_STATS */
+
+void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd;
+
+ if (dhdp == NULL || strbuf == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+ __FUNCTION__, dhdp, strbuf));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
+
+ bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
+
+ bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
+ dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
+ dhd->txc_sched_cnt);
+
+ bcm_bprintf(strbuf, "\nCPUs: dpc_cpu %u napi_cpu %u net_tx_cpu %u tx_cpu %u\n",
+ atomic_read(&dhd->dpc_cpu),
+ atomic_read(&dhd->rx_napi_cpu),
+ atomic_read(&dhd->net_tx_cpu),
+ atomic_read(&dhd->tx_cpu));
+
+#ifdef DHD_LB_RXP
+ bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
+ bcm_bprintf(strbuf, "\nNAPI poll latency stats ie from napi schedule to napi execution\n");
+ dhd_lb_stats_dump_napi_latency(dhdp, strbuf, dhd->napi_latency);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_TXP
+ bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
+
+ bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
+#endif /* DHD_LB_TXP */
+}
+
+void dhd_lb_stats_update_napi_latency(uint64 *bin, uint32 latency)
+{
+ uint64 *p;
+ uint32 bin_power;
+ bin_power = next_larger_power2(latency);
+
+ switch (bin_power) {
+ case 1: p = bin + 0; break;
+ case 2: p = bin + 1; break;
+ case 4: p = bin + 2; break;
+ case 8: p = bin + 3; break;
+ case 16: p = bin + 4; break;
+ case 32: p = bin + 5; break;
+ case 64: p = bin + 6; break;
+ case 128: p = bin + 7; break;
+ case 256: p = bin + 8; break;
+ case 512: p = bin + 9; break;
+ case 1024: p = bin + 10; break;
+ case 2048: p = bin + 11; break;
+ case 4096: p = bin + 12; break;
+ case 8192: p = bin + 13; break;
+ case 16384: p = bin + 14; break;
+ case 32768: p = bin + 15; break;
+ default : p = bin + 16; break;
+ }
+ ASSERT((p - bin) < DHD_NUM_NAPI_LATENCY_ROWS);
+ *p = *p + 1;
+ return;
+
+}
+
+void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
+{
+ uint32 bin_power;
+ uint32 *p;
+ bin_power = next_larger_power2(count);
+
+ switch (bin_power) {
+ case 1: p = bin[0] + cpu; break;
+ case 2: p = bin[1] + cpu; break;
+ case 4: p = bin[2] + cpu; break;
+ case 8: p = bin[3] + cpu; break;
+ case 16: p = bin[4] + cpu; break;
+ case 32: p = bin[5] + cpu; break;
+ case 64: p = bin[6] + cpu; break;
+ case 128: p = bin[7] + cpu; break;
+ default : p = bin[8] + cpu; break;
+ }
+
+ *p = *p + 1;
+ return;
+}
+
+void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
+}
+
+void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
+}
+#endif /* DHD_LB_STATS */
+
+/**
+ * dhd_tasklet_schedule - Function that runs in IPI context of the destination
+ * CPU and schedules a tasklet.
+ * @tasklet: opaque pointer to the tasklet
+ */
+INLINE void
+dhd_tasklet_schedule(void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+}
+
+/**
+ * dhd_work_schedule_on - Executes the passed work in a given CPU
+ * @work: work to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * schedule_work_on and the work function
+ * will be invoked to schedule the specified work on the requested CPU.
+ */
+
+INLINE void
+dhd_work_schedule_on(struct work_struct *work, int on_cpu)
+{
+ schedule_work_on(on_cpu, work);
+}
+
+INLINE void
+dhd_delayed_work_schedule_on(struct delayed_work *dwork, int on_cpu, ulong delay)
+{
+ schedule_delayed_work_on(on_cpu, dwork, delay);
+}
+
+#if defined(DHD_LB_TXP)
+void dhd_tx_dispatcher_work(struct work_struct * work)
+{
+ struct dhd_info *dhd;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(work, struct dhd_info, tx_dispatcher_work);
+ GCC_DIAGNOSTIC_POP();
+
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+}
+
+/**
+ * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
+ * on another cpu. The tx_tasklet will take care of actually putting
+ * the skbs into appropriate flow ring and ringing H2D interrupt
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+ int tx_cpu;
+ int prev_net_tx_cpu;
+
+ /*
+ * Get cpu will disable pre-ermption and will not allow any cpu to go offline
+ * and call put_cpu() only after scheduling rx_napi_dispatcher_work.
+ */
+ curr_cpu = get_cpu();
+
+ /* Record the CPU in which the TX request from Network stack came */
+ prev_net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
+ atomic_set(&dhd->net_tx_cpu, curr_cpu);
+
+ tx_cpu = atomic_read(&dhd->tx_cpu);
+
+ /*
+ * Avoid cpu candidacy, if override is set via sysfs for changing cpu mannually
+ */
+ if (dhd->dhd_lb_candidacy_override) {
+ if (!cpu_online(tx_cpu)) {
+ tx_cpu = curr_cpu;
+ }
+ } else {
+ /*
+ * Now if the NET TX has scheduled in the same CPU
+ * that is chosen for Tx processing
+ * OR scheduled on different cpu than previously it was scheduled,
+ * OR if tx_cpu is offline,
+ * Call cpu candidacy algorithm to recompute tx_cpu.
+ */
+ if ((curr_cpu == tx_cpu) || (curr_cpu != prev_net_tx_cpu) ||
+ !cpu_online(tx_cpu)) {
+ /* Re compute LB CPUs */
+ dhd_select_cpu_candidacy(dhd);
+ /* Use updated tx cpu */
+ tx_cpu = atomic_read(&dhd->tx_cpu);
+ }
+ }
+ /*
+ * Schedule tx_dispatcher_work to on the cpu which
+ * in turn will schedule tx_tasklet.
+ */
+ dhd_work_schedule_on(&dhd->tx_dispatcher_work, tx_cpu);
+
+ put_cpu();
+}
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+
+/**
+ * dhd_napi_poll - Load balance napi poll function to process received
+ * packets and send up the network stack using netif_receive_skb()
+ *
+ * @napi: napi object in which context this poll function is invoked
+ * @budget: number of packets to be processed.
+ *
+ * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
+ * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
+ * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
+ * packet tag and sendup.
+ */
+int
+dhd_napi_poll(struct napi_struct *napi, int budget)
+{
+ int ifid;
+ const int pkt_count = 1;
+ const int chan = 0;
+ struct sk_buff * skb;
+ unsigned long flags;
+ struct dhd_info *dhd;
+ int processed = 0;
+ int dpc_cpu;
+#ifdef DHD_LB_STATS
+ uint32 napi_latency;
+#endif /* DHD_LB_STATS */
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(napi, struct dhd_info, rx_napi_struct);
+ GCC_DIAGNOSTIC_POP();
+
+#ifdef DHD_LB_STATS
+ napi_latency = (uint32)(OSL_SYSUPTIME_US() - dhd->napi_schedule_time);
+ dhd_lb_stats_update_napi_latency(dhd->napi_latency, napi_latency);
+#endif /* DHD_LB_STATS */
+ DHD_LB_INFO(("%s napi_queue<%d> budget<%d>\n",
+ __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
+
+ /*
+ * Extract the entire rx_napi_queue into another rx_process_queue
+ * and process only 'budget' number of skbs from rx_process_queue.
+ * If there are more items to be processed, napi poll will be rescheduled
+ * During the next iteration, next set of skbs from
+ * rx_napi_queue will be extracted and attached to the tail of rx_process_queue.
+ * Again budget number of skbs will be processed from rx_process_queue.
+ * If there are less than budget number of skbs in rx_process_queue,
+ * call napi_complete to stop rescheduling napi poll.
+ */
+ DHD_RX_NAPI_QUEUE_LOCK(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_napi_queue, &dhd->rx_process_queue);
+ DHD_RX_NAPI_QUEUE_UNLOCK(&dhd->rx_napi_queue.lock, flags);
+
+ while ((processed < budget) && (skb = __skb_dequeue(&dhd->rx_process_queue)) != NULL) {
+ OSL_PREFETCH(skb->data);
+
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_LB_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+ __FUNCTION__, skb, ifid));
+
+ dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
+ processed++;
+ }
+
+ if (atomic_read(&dhd->pub.lb_rxp_flow_ctrl) &&
+ (dhd_lb_rxp_process_qlen(&dhd->pub) <= dhd->pub.lb_rxp_strt_thr)) {
+ /*
+ * If the dpc CPU is online Schedule dhd_dpc_dispatcher_work on the dpc cpu which
+ * in turn will schedule dpc tasklet. Else schedule dpc takslet.
+ */
+ get_cpu();
+ dpc_cpu = atomic_read(&dhd->dpc_cpu);
+ if (!cpu_online(dpc_cpu)) {
+ dhd_tasklet_schedule(&dhd->tasklet);
+ } else {
+ dhd_delayed_work_schedule_on(&dhd->dhd_dpc_dispatcher_work, dpc_cpu, 0);
+ }
+ put_cpu();
+ }
+ DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
+
+ DHD_LB_INFO(("%s processed %d\n", __FUNCTION__, processed));
+
+ /*
+ * Signal napi complete only when no more packets are processed and
+ * none are left in the enqueued queue.
+ */
+ if ((processed == 0) && (skb_queue_len(&dhd->rx_napi_queue) == 0)) {
+ napi_complete(napi);
+#ifdef DHD_LB_STATS
+ dhd->pub.lb_rxp_napi_complete_cnt++;
+#endif /* DHD_LB_STATS */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_NAPI(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ return 0;
+ }
+
+#ifdef DHD_LB_STATS
+ dhd->napi_schedule_time = OSL_SYSUPTIME_US();
+#endif /* DHD_LB_STATS */
+
+ /* Return budget so that it gets rescheduled immediately */
+ return budget;
+}
+
+/**
+ * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
+ * poll list. This function may be invoked via the smp_call_function_single
+ * from a remote CPU.
+ *
+ * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
+ * after the napi_struct is added to the softnet data's poll_list
+ *
+ * @info: pointer to a dhd_info struct
+ */
+static void
+dhd_napi_schedule(void *info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)info;
+ unsigned long flags;
+
+ DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
+
+ /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
+ if (napi_schedule_prep(&dhd->rx_napi_struct)) {
+
+ /*
+ * Set busbusystate in NAPI, which will be cleared after
+ * napi_complete from napi_poll context
+ */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_SET_IN_NAPI(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+#ifdef DHD_LB_STATS
+ dhd->napi_schedule_time = OSL_SYSUPTIME_US();
+ dhd->pub.lb_rxp_napi_sched_cnt++;
+#endif /* DHD_LB_STATS */
+ __napi_schedule(&dhd->rx_napi_struct);
+#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
+ raise_softirq(NET_RX_SOFTIRQ);
+#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
+ }
+
+ /*
+ * If the rx_napi_struct was already running, then we let it complete
+ * processing all its packets. The rx_napi_struct may only run on one
+ * core at a time, to avoid out-of-order handling.
+ */
+}
+
+/**
+ * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
+ * action after placing the dhd's rx_process napi object in the the remote CPU's
+ * softnet data's poll_list.
+ *
+ * @dhd: dhd_info which has the rx_process napi object
+ * @on_cpu: desired remote CPU id
+ */
+static INLINE int
+dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
+{
+ int wait = 0; /* asynchronous IPI */
+ DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
+ __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
+
+ if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
+ DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
+ __FUNCTION__, on_cpu));
+ }
+
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+
+ return 0;
+}
+
+/*
+ * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
+ * Why should we do this?
+ * The candidacy algorithm is run from the call back function
+ * registered to CPU hotplug notifier. This call back happens from Worker
+ * context. The dhd_napi_schedule_on is also from worker context.
+ * Note that both of this can run on two different CPUs at the same time.
+ * So we can possibly have a window where a given CPUn is being brought
+ * down from CPUm while we try to run a function on CPUn.
+ * To prevent this its better have the whole code to execute an SMP
+ * function under get_online_cpus.
+ * This function call ensures that hotplug mechanism does not kick-in
+ * until we are done dealing with online CPUs
+ * If the hotplug worker is already running, no worries because the
+ * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
+ *
+ * The below mentioned code structure is proposed in
+ * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+ * for the question
+ * Q: I need to ensure that a particular cpu is not removed when there is some
+ * work specific to this cpu is in progress
+ *
+ * According to the documentation calling get_online_cpus is NOT required, if
+ * we are running from tasklet context. Since dhd_rx_napi_dispatcher_work can
+ * run from Work Queue context we have to call these functions
+ */
+void dhd_rx_napi_dispatcher_work(struct work_struct * work)
+{
+ struct dhd_info *dhd;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ dhd = container_of(work, struct dhd_info, rx_napi_dispatcher_work);
+ GCC_DIAGNOSTIC_POP();
+
+ dhd_napi_schedule(dhd);
+}
+
+/**
+ * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
+ * to run on another CPU. The rx_napi_struct's poll function will retrieve all
+ * the packets enqueued into the rx_napi_queue and sendup.
+ * The producer's rx packet queue is appended to the rx_napi_queue before
+ * dispatching the rx_napi_struct.
+ */
+void
+dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+ int rx_napi_cpu;
+ int prev_dpc_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+ skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
+
+ /* append the producer's queue of packets to the napi's rx process queue */
+ DHD_RX_NAPI_QUEUE_LOCK(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
+ DHD_RX_NAPI_QUEUE_UNLOCK(&dhd->rx_napi_queue.lock, flags);
+
+ /* If sysfs lb_rxp_active is not set, schedule on current cpu */
+ if (!atomic_read(&dhd->lb_rxp_active))
+ {
+ dhd_napi_schedule(dhd);
+ return;
+ }
+
+ /*
+ * Get cpu will disable pre-ermption and will not allow any cpu to go offline
+ * and call put_cpu() only after scheduling rx_napi_dispatcher_work.
+ */
+ curr_cpu = get_cpu();
+
+ prev_dpc_cpu = atomic_read(&dhd->prev_dpc_cpu);
+
+ rx_napi_cpu = atomic_read(&dhd->rx_napi_cpu);
+
+ /*
+ * Avoid cpu candidacy, if override is set via sysfs for changing cpu mannually
+ */
+ if (dhd->dhd_lb_candidacy_override) {
+ if (!cpu_online(rx_napi_cpu)) {
+ rx_napi_cpu = curr_cpu;
+ }
+ } else {
+ /*
+ * Now if the DPC has scheduled in the same CPU
+ * that is chosen for Rx napi processing
+ * OR scheduled on different cpu than previously it was scheduled,
+ * OR if rx_napi_cpu is offline,
+ * Call cpu candidacy algorithm to recompute napi_cpu.
+ */
+ if ((curr_cpu == rx_napi_cpu) || (curr_cpu != prev_dpc_cpu) ||
+ !cpu_online(rx_napi_cpu)) {
+ /* Re compute LB CPUs */
+ dhd_select_cpu_candidacy(dhd);
+ /* Use updated napi cpu */
+ rx_napi_cpu = atomic_read(&dhd->rx_napi_cpu);
+ }
+
+ }
+
+ DHD_LB_INFO(("%s : schedule to curr_cpu : %d, rx_napi_cpu : %d\n",
+ __FUNCTION__, curr_cpu, rx_napi_cpu));
+ dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, rx_napi_cpu);
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+
+ put_cpu();
+}
+
+/**
+ * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
+ */
+void
+dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ DHD_LB_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+ pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
+ __skb_queue_tail(&dhd->rx_pend_queue, pkt);
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
+}
+
+unsigned long
+dhd_read_lb_rxp(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return atomic_read(&dhd->lb_rxp_active);
+}
+
+uint32
+dhd_lb_rxp_process_qlen(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return skb_queue_len(&dhd->rx_process_queue);
+}
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+int
+BCMFASTPATH(dhd_lb_sendpkt)(dhd_info_t *dhd, struct net_device *net,
+ int ifidx, void *skb)
+{
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
+
+ /* If the feature is disabled run-time do TX from here */
+ if (atomic_read(&dhd->lb_txp_active) == 0) {
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+ return __dhd_sendpkt(&dhd->pub, ifidx, skb);
+ }
+
+ /* Store the address of net device and interface index in the Packet tag */
+ DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
+ DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
+
+ /* Enqueue the skb into tx_pend_queue */
+ skb_queue_tail(&dhd->tx_pend_queue, skb);
+
+ DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
+
+ /* Dispatch the Tx job to be processed by the tx_tasklet */
+ dhd_lb_tx_dispatch(&dhd->pub);
+
+ return NETDEV_TX_OK;
+}
+#endif /* DHD_LB_TXP */
+
+#ifdef DHD_LB_TXP
+#define DHD_LB_TXBOUND 64
+/*
+ * Function that performs the TX processing on a given CPU
+ */
+bool
+dhd_lb_tx_process(dhd_info_t *dhd)
+{
+ struct sk_buff *skb;
+ int cnt = 0;
+ struct net_device *net;
+ int ifidx;
+ bool resched = FALSE;
+
+ DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
+ if (dhd == NULL) {
+ DHD_ERROR((" Null pointer DHD \r\n"));
+ return resched;
+ }
+
+ BCM_REFERENCE(net);
+
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+
+ /* Base Loop to perform the actual Tx */
+ do {
+ skb = skb_dequeue(&dhd->tx_pend_queue);
+ if (skb == NULL) {
+ DHD_TRACE(("Dequeued a Null Packet \r\n"));
+ break;
+ }
+ cnt++;
+
+ net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+ ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
+ net, ifidx));
+
+ __dhd_sendpkt(&dhd->pub, ifidx, skb);
+
+ if (cnt >= DHD_LB_TXBOUND) {
+ resched = TRUE;
+ break;
+ }
+
+ } while (1);
+
+ DHD_LB_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
+
+ return resched;
+}
+
+void
+dhd_lb_tx_handler(unsigned long data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ if (dhd_lb_tx_process(dhd)) {
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+ }
+}
+
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
+
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+void
+dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask)
+{
+ unsigned int irq = (unsigned int)-1;
+ int err = BCME_OK;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhdp->bus) {
+ DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("%s : irq set affinity cpu:0x%lx\n",
+ __FUNCTION__, *cpumask_bits(cpumask)));
+
+ dhdpcie_get_pcieirq(dhdp->bus, &irq);
+#ifdef BCMDHD_MODULAR
+ err = irq_set_affinity_hint(irq, cpumask);
+#else
+ err = irq_set_affinity(irq, cpumask);
+#endif /* BCMDHD_MODULAR */
+ if (err)
+ DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
+ __FUNCTION__, *cpumask_bits(cpumask)));
+}
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_pktdump.c b/bcmdhd.101.10.361.x/dhd_linux_pktdump.c
new file mode 100755
index 0000000..66e0b44
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_pktdump.c
@@ -0,0 +1,1578 @@
+/*
+ * Packet dump helper functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <bcmutils.h>
+#include <bcmevent.h>
+#include <bcmendian.h>
+#include <bcmtlv.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <bcmip.h>
+#include <bcmudp.h>
+#include <bcmdhcp.h>
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <dhd_linux_pktdump.h>
+#include <dhd_config.h>
+#include <wl_android.h>
+
+#define DHD_PKTDUMP(arg) printf arg
+#define DHD_PKTDUMP_MEM(arg) printf arg
+#define PACKED_STRUCT __attribute__ ((packed))
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* EAPOL-Key header field size */
+#define AKW_BLOCK_LEN 8
+#define WPA_KEY_REPLAY_LEN 8
+#define WPA_KEY_NONCE_LEN 32
+#define WPA_KEY_IV_LEN 16
+#define WPA_KEY_RSC_LEN 8
+#define WPA_KEY_ID_LEN 8
+#define WPA_KEY_MIC_LEN 16
+#define WPA_MAX_KEY_SIZE 32
+#define WPA_KEY_DATA_LEN (WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+
+/* Key information bit */
+#define KEYINFO_TYPE_MASK (1 << 3)
+#define KEYINFO_INSTALL_MASK (1 << 6)
+#define KEYINFO_KEYACK_MASK (1 << 7)
+#define KEYINFO_KEYMIC_MASK (1 << 8)
+#define KEYINFO_SECURE_MASK (1 << 9)
+#define KEYINFO_ERROR_MASK (1 << 10)
+#define KEYINFO_REQ_MASK (1 << 11)
+
+/* EAP Code */
+#define EAP_CODE_REQUEST 1 /* Request */
+#define EAP_CODE_RESPONSE 2 /* Response */
+#define EAP_CODE_SUCCESS 3 /* Success */
+#define EAP_CODE_FAILURE 4 /* Failure */
+
+/* EAP Type */
+#define EAP_TYPE_RSVD 0 /* Reserved */
+#define EAP_TYPE_IDENT 1 /* Identify */
+#define EAP_TYPE_NOTI 2 /* Notification */
+#define EAP_TYPE_TLS 13 /* EAP-TLS */
+#define EAP_TYPE_LEAP 17 /* Cisco-LEAP */
+#define EAP_TYPE_TTLS 21 /* EAP-TTLS */
+#define EAP_TYPE_AKA 23 /* EAP-AKA */
+#define EAP_TYPE_PEAP 25 /* EAP-PEAP */
+#define EAP_TYPE_FAST 43 /* EAP-FAST */
+#define EAP_TYPE_PSK 47 /* EAP-PSK */
+#define EAP_TYPE_AKAP 50 /* EAP-AKA' */
+#define EAP_TYPE_EXP 254 /* Reserved for Expended Type */
+
+/* WSC */
+#define EAP_HDR_LEN 5
+#define EAP_WSC_NONCE_OFFSET 10
+#define EAP_WSC_DATA_OFFSET (OFFSETOF(eap_wsc_fmt_t, data))
+#define EAP_WSC_MIN_DATA_LEN ((EAP_HDR_LEN) + (EAP_WSC_DATA_OFFSET))
+#define WFA_VID "\x00\x37\x2A" /* WFA SMI code */
+#define WFA_VID_LEN 3 /* WFA VID length */
+#define WFA_VTYPE 1u /* WFA Vendor type */
+
+/* WSC opcode */
+#define WSC_OPCODE_UPNP 0
+#define WSC_OPCODE_START 1
+#define WSC_OPCODE_ACK 2
+#define WSC_OPCODE_NACK 3
+#define WSC_OPCODE_MSG 4
+#define WSC_OPCODE_DONE 5
+#define WSC_OPCODE_FRAG_ACK 6
+
+/* WSC flag */
+#define WSC_FLAG_MF 1 /* more fragements */
+#define WSC_FLAG_LF 2 /* length field */
+
+/* WSC message code */
+#define WSC_ATTR_MSG 0x1022
+#define WSC_MSG_M1 0x04
+#define WSC_MSG_M2 0x05
+#define WSC_MSG_M3 0x07
+#define WSC_MSG_M4 0x08
+#define WSC_MSG_M5 0x09
+#define WSC_MSG_M6 0x0A
+#define WSC_MSG_M7 0x0B
+#define WSC_MSG_M8 0x0C
+
+/* Debug prints */
+typedef enum pkt_cnt_type {
+ PKT_CNT_TYPE_INVALID = 0,
+ PKT_CNT_TYPE_ARP = 1,
+ PKT_CNT_TYPE_DNS = 2,
+ PKT_CNT_TYPE_MAX = 3
+} pkt_cnt_type_t;
+
+typedef struct pkt_cnt {
+ uint32 tx_cnt;
+ uint32 tx_err_cnt;
+ uint32 rx_cnt;
+} pkt_cnt_t;
+
+typedef struct pkt_cnt_log {
+ bool enabled;
+ uint16 reason;
+ timer_list_compat_t pktcnt_timer;
+ pkt_cnt_t arp_cnt;
+ pkt_cnt_t dns_cnt;
+} pkt_cnts_log_t;
+
+#define PKT_CNT_TIMER_INTERNVAL_MS 5000 /* packet count timeout(ms) */
+#define PKT_CNT_RSN_VALID(rsn) \
+ (((rsn) > (PKT_CNT_RSN_INVALID)) && ((rsn) < (PKT_CNT_RSN_MAX)))
+
+#ifdef DHD_PKTDUMP_ROAM
+static const char pkt_cnt_msg[][20] = {
+ "INVALID",
+ "ROAM_SUCCESS",
+ "GROUP_KEY_UPDATE",
+ "CONNECT_SUCCESS",
+ "INVALID"
+};
+#endif
+
+static const char tx_pktfate[][30] = {
+ "TX_PKT_FATE_ACKED", /* 0: WLFC_CTL_PKTFLAG_DISCARD */
+ "TX_PKT_FATE_FW_QUEUED", /* 1: WLFC_CTL_PKTFLAG_D11SUPPRESS */
+ "TX_PKT_FATE_FW_QUEUED", /* 2: WLFC_CTL_PKTFLAG_WLSUPPRESS */
+ "TX_PKT_FATE_FW_DROP_INVALID", /* 3: WLFC_CTL_PKTFLAG_TOSSED_BYWLC */
+ "TX_PKT_FATE_SENT", /* 4: WLFC_CTL_PKTFLAG_DISCARD_NOACK */
+ "TX_PKT_FATE_FW_DROP_OTHER", /* 5: WLFC_CTL_PKTFLAG_SUPPRESS_ACKED */
+ "TX_PKT_FATE_FW_DROP_EXPTIME", /* 6: WLFC_CTL_PKTFLAG_EXPIRED */
+ "TX_PKT_FATE_FW_DROP_OTHER", /* 7: WLFC_CTL_PKTFLAG_DROPPED */
+ "TX_PKT_FATE_FW_PKT_FREE", /* 8: WLFC_CTL_PKTFLAG_MKTFREE */
+};
+
+#define DBGREPLAY " Replay Counter: %02x%02x%02x%02x%02x%02x%02x%02x"
+#define REPLAY_FMT(key) ((const eapol_key_hdr_t *)(key))->replay[0], \
+ ((const eapol_key_hdr_t *)(key))->replay[1], \
+ ((const eapol_key_hdr_t *)(key))->replay[2], \
+ ((const eapol_key_hdr_t *)(key))->replay[3], \
+ ((const eapol_key_hdr_t *)(key))->replay[4], \
+ ((const eapol_key_hdr_t *)(key))->replay[5], \
+ ((const eapol_key_hdr_t *)(key))->replay[6], \
+ ((const eapol_key_hdr_t *)(key))->replay[7]
+#define TXFATE_FMT " TX_PKTHASH:0x%X TX_PKT_FATE:%s"
+#define TX_PKTHASH(pkthash) ((pkthash) ? (*pkthash) : (0))
+#define TX_FATE_STR(fate) (((*fate) <= (WLFC_CTL_PKTFLAG_MKTFREE)) ? \
+ (tx_pktfate[(*fate)]) : "TX_PKT_FATE_FW_DROP_OTHER")
+#define TX_FATE(fate) ((fate) ? (TX_FATE_STR(fate)) : "N/A")
+#define TX_FATE_ACKED(fate) ((fate) ? ((*fate) == (WLFC_CTL_PKTFLAG_DISCARD)) : (0))
+
+#define EAP_PRINT(x, args...) \
+ do { \
+ if (dump_msg_level & DUMP_EAPOL_VAL) { \
+ if (tx) { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s)"TXFATE_FMT"\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s)\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf)); \
+ } \
+ } \
+ } while (0)
+
+#define EAP_PRINT_REPLAY(x, args...) \
+ do { \
+ if (dump_msg_level & DUMP_EAPOL_VAL) { \
+ if (tx) { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s)"DBGREPLAY TXFATE_FMT"\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ REPLAY_FMT(eap_key), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s)"DBGREPLAY"\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ REPLAY_FMT(eap_key))); \
+ } \
+ } \
+ } while (0)
+
+#define EAP_PRINT_OTHER(x, args...) \
+ do { \
+ if (dump_msg_level & DUMP_EAPOL_VAL) { \
+ if (tx) { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s) " \
+ "ver %d, type %d"TXFATE_FMT"\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ eapol_hdr->version, eapol_hdr->type, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s) " \
+ "ver %d, type %d\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ eapol_hdr->version, eapol_hdr->type)); \
+ } \
+ } \
+ } while (0)
+
+#define EAP_PRINT_OTHER_4WAY(x, args...) \
+ do { \
+ if (dump_msg_level & DUMP_EAPOL_VAL) { \
+ if (tx) { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [TX] : (%s) %s (%s) " \
+ "ver %d type %d keytype %d keyinfo 0x%02X"TXFATE_FMT"\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ eapol_hdr->version, eapol_hdr->type, eap_key->type, \
+ (uint32)hton16(eap_key->key_info), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[%s] 802_1X " x " [RX] : (%s) %s (%s) " \
+ "ver %d type %d keytype %d keyinfo 0x%02X\n", \
+ ifname, ## args, \
+ tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
+ eapol_hdr->version, eapol_hdr->type, eap_key->type, \
+ (uint32)hton16(eap_key->key_info))); \
+ } \
+ } \
+ } while (0)
+
+#define UDP_PORT_DNS 53 /* UDP DNS port */
+
+/* EAPOL header */
+typedef struct eapol_header {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ uint8 version; /* EAPOL protocol version */
+ uint8 type; /* EAPOL type */
+ uint16 length; /* Length of body */
+ uint8 body[1]; /* Body (optional) */
+} PACKED_STRUCT eapol_header_t;
+
+/* EAP header */
+typedef struct eap_header_fmt {
+ uint8 code;
+ uint8 id;
+ uint16 len;
+ uint8 type;
+ uint8 data[1];
+} PACKED_STRUCT eap_header_fmt_t;
+
+/* WSC EAP format */
+typedef struct eap_wsc_fmt {
+ uint8 oui[3];
+ uint32 ouitype;
+ uint8 opcode;
+ uint8 flags;
+ uint8 data[1];
+} PACKED_STRUCT eap_wsc_fmt_t;
+
+/* EAPOL-Key */
+typedef struct eapol_key_hdr {
+ uint8 type; /* Key Descriptor Type */
+ uint16 key_info; /* Key Information (unaligned) */
+ uint16 key_len; /* Key Length (unaligned) */
+ uint8 replay[WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ uint8 nonce[WPA_KEY_NONCE_LEN]; /* Nonce */
+ uint8 iv[WPA_KEY_IV_LEN]; /* Key IV */
+ uint8 rsc[WPA_KEY_RSC_LEN]; /* Key RSC */
+ uint8 id[WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ uint8 mic[WPA_KEY_MIC_LEN]; /* Key MIC */
+ uint16 data_len; /* Key Data Length */
+ uint8 data[WPA_KEY_DATA_LEN]; /* Key data */
+} PACKED_STRUCT eapol_key_hdr_t;
+
+typedef struct hdr_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+} PACKED_STRUCT hdr_fmt_t;
+
+msg_eapol_t
+dhd_is_4way_msg(uint8 *pktdata)
+{
+ eapol_header_t *eapol_hdr;
+ eapol_key_hdr_t *eap_key;
+ msg_eapol_t type = EAPOL_OTHER;
+ bool pair, ack, mic, kerr, req, sec, install;
+ uint16 key_info;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return type;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
+ if (eap_key->type != EAPOL_WPA2_KEY && eap_key->type != EAPOL_WPA_KEY) {
+ return type;
+ }
+
+ key_info = hton16(eap_key->key_info);
+ pair = !!(key_info & KEYINFO_TYPE_MASK);
+ ack = !!(key_info & KEYINFO_KEYACK_MASK);
+ mic = !!(key_info & KEYINFO_KEYMIC_MASK);
+ kerr = !!(key_info & KEYINFO_ERROR_MASK);
+ req = !!(key_info & KEYINFO_REQ_MASK);
+ sec = !!(key_info & KEYINFO_SECURE_MASK);
+ install = !!(key_info & KEYINFO_INSTALL_MASK);
+
+ if (eap_key->type == EAPOL_WPA2_KEY) {
+ if (pair && !install && ack && !mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M1;
+ } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M2;
+ } else if (pair && ack && mic && sec && !kerr && !req) {
+ type = EAPOL_4WAY_M3;
+ } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_4WAY_M4;
+ } else if (!pair && !install && ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M1;
+ } else if (!pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M2;
+ } else {
+ type = EAPOL_OTHER;
+ if (dump_msg_level & DUMP_EAPOL_VAL) {
+ printf("WPA2: key_info=0x%x, pair=%d, ack=%d, mic=%d, sec=%d, kerr=%d, req=%d\n",
+ key_info, pair, ack, mic, sec, kerr, req);
+ }
+ }
+ }
+ else if (eap_key->type == EAPOL_WPA_KEY) {
+ if (pair && !install && ack && !mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M1;
+ } else if (pair && !install && !ack && mic && !sec && !kerr && !req && eap_key->data_len) {
+ type = EAPOL_4WAY_M2;
+ } else if (pair && install && ack && mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M3;
+ } else if (pair && !install && !ack && mic && !sec && !req && !kerr) {
+ type = EAPOL_4WAY_M4;
+ } else if (!pair && !install && ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M1;
+ } else if (!pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M2;
+ } else {
+ type = EAPOL_OTHER;
+ if (dump_msg_level & DUMP_EAPOL_VAL) {
+ printf("WPA: key_info=0x%x, pair=%d, ack=%d, mic=%d, sec=%d, kerr=%d, req=%d\n",
+ key_info, pair, ack, mic, sec, kerr, req);
+ }
+ }
+ }
+ else {
+ type = EAPOL_OTHER;
+ if (dump_msg_level & DUMP_EAPOL_VAL) {
+ printf("OTHER: key_info=0x%x, pair=%d, ack=%d, mic=%d, sec=%d, kerr=%d, req=%d\n",
+ key_info, pair, ack, mic, sec, kerr, req);
+ }
+ }
+
+ return type;
+}
+
+void
+dhd_dump_pkt(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen,
+ bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ struct ether_header *eh;
+ uint16 ether_type;
+
+ if (!pktdata || pktlen < ETHER_HDR_LEN) {
+ return;
+ }
+
+#if defined(BCMPCIE) && defined(DHD_PKT_LOGGING)
+ if (tx && !pkthash && !pktfate) {
+ return;
+ }
+#endif /* BCMPCIE && DHD_PKT_LOGGING */
+
+ eh = (struct ether_header *)pktdata;
+ ether_type = ntoh16(eh->ether_type);
+ if (ether_type == ETHER_TYPE_802_1X) {
+ dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen,
+ tx, pkthash, pktfate);
+ }
+ if (ether_type == ETHER_TYPE_IP) {
+ if (dhd_check_dhcp(pktdata)) {
+ dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ } else if (dhd_check_icmp(pktdata)) {
+ dhd_icmp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ } else if (dhd_check_dns(pktdata)) {
+ dhd_dns_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ }
+ }
+ if (ether_type == ETHER_TYPE_ARP) {
+ if (dhd_check_arp(pktdata, ether_type)) {
+ dhd_arp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ }
+ }
+ dhd_trx_pkt_dump(dhdp, ifidx, pktdata, pktlen, tx);
+}
+
+#ifdef DHD_PKTDUMP_ROAM
+static void
+dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype)
+{
+ pkt_cnts_log_t *pktcnts;
+ pkt_cnt_t *cnt;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ if (!pktcnts) {
+ DHD_ERROR(("%s: pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!pktcnts->enabled || (tx && !pktfate)) {
+ return;
+ }
+
+ if (pkttype == PKT_CNT_TYPE_ARP) {
+ cnt = (pkt_cnt_t *)&pktcnts->arp_cnt;
+ } else if (pkttype == PKT_CNT_TYPE_DNS) {
+ cnt = (pkt_cnt_t *)&pktcnts->dns_cnt;
+ } else {
+ /* invalid packet type */
+ return;
+ }
+
+ if (tx) {
+ TX_FATE_ACKED(pktfate) ? cnt->tx_cnt++ : cnt->tx_err_cnt++;
+ } else {
+ cnt->rx_cnt++;
+ }
+}
+
+static void
+dhd_dump_pkt_timer(unsigned long data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)data;
+ pkt_cnts_log_t *pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+
+ pktcnts->enabled = FALSE;
+
+ /* print out the packet counter value */
+ DHD_PKTDUMP(("============= PACKET COUNT SUMMARY ============\n"));
+ DHD_PKTDUMP(("- Reason: %s\n", pkt_cnt_msg[pktcnts->reason]));
+ DHD_PKTDUMP(("- Duration: %d msec(s)\n", PKT_CNT_TIMER_INTERNVAL_MS));
+ DHD_PKTDUMP(("- ARP PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
+ pktcnts->arp_cnt.tx_cnt, pktcnts->arp_cnt.tx_err_cnt,
+ pktcnts->arp_cnt.rx_cnt));
+ DHD_PKTDUMP(("- DNS PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
+ pktcnts->dns_cnt.tx_cnt, pktcnts->dns_cnt.tx_err_cnt,
+ pktcnts->dns_cnt.rx_cnt));
+ DHD_PKTDUMP(("============= END OF COUNT SUMMARY ============\n"));
+}
+
+void
+dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or dhdp->pktcnts is NULL\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (!PKT_CNT_RSN_VALID(rsn)) {
+ DHD_ERROR(("%s: invalid reason code %d\n",
+ __FUNCTION__, rsn));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ if (timer_pending(&pktcnts->pktcnt_timer)) {
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ }
+
+ bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
+ bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
+ pktcnts->reason = rsn;
+ pktcnts->enabled = TRUE;
+ mod_timer(&pktcnts->pktcnt_timer,
+ jiffies + msecs_to_jiffies(PKT_CNT_TIMER_INTERNVAL_MS));
+ DHD_PKTDUMP(("%s: Arm the pktcnt timer. reason=%d\n",
+ __FUNCTION__, rsn));
+}
+
+void
+dhd_dump_pkt_init(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)MALLOCZ(dhdp->osh, sizeof(pkt_cnts_log_t));
+ if (!pktcnts) {
+ DHD_ERROR(("%s: failed to allocate memory for pktcnts\n",
+ __FUNCTION__));
+ return;
+ }
+
+ /* init timers */
+ init_timer_compat(&pktcnts->pktcnt_timer, dhd_dump_pkt_timer, dhdp);
+ dhdp->pktcnts = pktcnts;
+}
+
+void
+dhd_dump_pkt_deinit(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ pktcnts->enabled = FALSE;
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ MFREE(dhdp->osh, dhdp->pktcnts, sizeof(pkt_cnts_log_t));
+ dhdp->pktcnts = NULL;
+}
+
+void
+dhd_dump_pkt_clear(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ pktcnts->enabled = FALSE;
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ pktcnts->reason = 0;
+ bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
+ bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
+}
+
+bool
+dhd_dump_pkt_enabled(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ return FALSE;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+
+ return pktcnts->enabled;
+}
+#else
+static INLINE void
+dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) { }
+static INLINE bool
+dhd_dump_pkt_enabled(dhd_pub_t *dhdp) { return FALSE; }
+#endif /* DHD_PKTDUMP_ROAM */
+
+#ifdef DHD_8021X_DUMP
+static void
+dhd_dump_wsc_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eap_header_fmt_t *eap_hdr;
+ eap_wsc_fmt_t *eap_wsc;
+ char *ifname;
+ uint16 eap_len;
+ bool cond;
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (pktlen < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
+ DHD_ERROR(("%s: invalid pkt length\n", __FUNCTION__));
+ return;
+ }
+
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
+ if (eap_hdr->type != EAP_TYPE_EXP) {
+ return;
+ }
+
+ eap_len = ntoh16(eap_hdr->len);
+ if (eap_len < EAP_WSC_MIN_DATA_LEN) {
+ return;
+ }
+
+ eap_wsc = (eap_wsc_fmt_t *)(eap_hdr->data);
+ if (bcmp(eap_wsc->oui, (const uint8 *)WFA_VID, WFA_VID_LEN) ||
+ (ntoh32(eap_wsc->ouitype) != WFA_VTYPE)) {
+ return;
+ }
+
+ if (eap_wsc->flags) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_wsc->opcode == WSC_OPCODE_MSG) {
+ const uint8 *tlv_buf = (const uint8 *)(eap_wsc->data);
+ const uint8 *msg;
+ uint16 msglen;
+ uint16 wsc_data_len = (uint16)(eap_len - EAP_HDR_LEN - EAP_WSC_DATA_OFFSET);
+ bcm_xtlv_opts_t opt = BCM_XTLV_OPTION_IDBE | BCM_XTLV_OPTION_LENBE;
+
+ msg = bcm_get_data_from_xtlv_buf(tlv_buf, wsc_data_len,
+ WSC_ATTR_MSG, &msglen, opt);
+ if (msg && msglen) {
+ switch (*msg) {
+ case WSC_MSG_M1:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M1);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M1), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M1");
+ break;
+ case WSC_MSG_M2:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M2);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M2), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M2");
+ break;
+ case WSC_MSG_M3:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M3);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M3), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M3");
+ break;
+ case WSC_MSG_M4:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M4);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M4), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M4");
+ break;
+ case WSC_MSG_M5:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M5);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M5), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M5");
+ break;
+ case WSC_MSG_M6:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M6);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M6), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M6");
+ break;
+ case WSC_MSG_M7:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M7);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M7), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M7");
+ break;
+ case WSC_MSG_M8:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WPS_M8);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WPS_M8), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M8");
+ break;
+ default:
+ EAP_PRINT("EAP Packet, WPS MSG TYPE %d", *msg);
+ break;
+ }
+ }
+ } else if (eap_wsc->opcode == WSC_OPCODE_START) {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WSC_START);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WSC_START), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WSC Start");
+ } else if (eap_wsc->opcode == WSC_OPCODE_DONE) {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_WSC_DONE);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(WSC_DONE), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WSC Done");
+ } else {
+ EAP_PRINT("EAP Packet, WSC opcode=%d", eap_wsc->opcode);
+ }
+}
+
+static void
+dhd_dump_eap_packet(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eap_header_fmt_t *eap_hdr;
+ char *ifname;
+ bool cond;
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_hdr->code == EAP_CODE_REQUEST ||
+ eap_hdr->code == EAP_CODE_RESPONSE) {
+ bool isreq = (eap_hdr->code == EAP_CODE_REQUEST);
+ switch (eap_hdr->type) {
+ case EAP_TYPE_IDENT:
+ if (isreq) {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_REQID);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_IDENTITY), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, Identity");
+ } else {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_RSPID);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_IDENTITY), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, Identity");
+ }
+ break;
+ case EAP_TYPE_TLS:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, TLS");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, TLS");
+ }
+ break;
+ case EAP_TYPE_LEAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_LEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, LEAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_LEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, LEAP");
+ }
+ break;
+ case EAP_TYPE_TTLS:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TTLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, TTLS");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TTLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, TTLS");
+ }
+ break;
+ case EAP_TYPE_AKA:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKA), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, AKA");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKA), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, AKA");
+ }
+ break;
+ case EAP_TYPE_PEAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, PEAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, PEAP");
+ }
+ break;
+ case EAP_TYPE_FAST:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_FAST), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, FAST");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_FAST), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, FAST");
+ }
+ break;
+ case EAP_TYPE_PSK:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PSK), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, PSK");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PSK), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, PSK");
+ }
+ break;
+ case EAP_TYPE_AKAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, AKAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, AKAP");
+ }
+ break;
+ case EAP_TYPE_EXP:
+ dhd_dump_wsc_message(dhd, ifidx, pktdata, pktlen, tx,
+ pkthash, pktfate);
+ break;
+ default:
+ EAP_PRINT("EAP Packet, EAP TYPE %d", eap_hdr->type);
+ break;
+ }
+ } else if (eap_hdr->code == EAP_CODE_SUCCESS) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_SUCCESS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Success");
+ } else if (eap_hdr->code == EAP_CODE_FAILURE) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_FAILURE), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Failure");
+ } else {
+ EAP_PRINT("EAP Packet, EAP CODE %d", eap_hdr->code);
+ }
+}
+
+static void
+dhd_dump_eapol_4way_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eapol_key_hdr_t *eap_key;
+ msg_eapol_t type;
+ char *ifname;
+ bool cond;
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+
+ type = dhd_is_4way_msg(pktdata);
+ ifname = dhd_ifname(dhd, ifidx);
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_key->type != EAPOL_WPA2_KEY && eap_key->type != EAPOL_WPA_KEY) {
+ EAP_PRINT_OTHER("NON EAPOL_WPA2_KEY %d", eap_key->type);
+ return;
+ }
+
+ switch (type) {
+ case EAPOL_4WAY_M1:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M1);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M1), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
+ break;
+ case EAPOL_4WAY_M2:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M2);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M2), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
+ break;
+ case EAPOL_4WAY_M3:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M3);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M3), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
+ break;
+ case EAPOL_4WAY_M4:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_4WAY_M4);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M4), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
+ break;
+ case EAPOL_GROUPKEY_M1:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_GROUPKEY_M1);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M1), ifidx, tx, cond);
+ EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M1");
+ break;
+ case EAPOL_GROUPKEY_M2:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_conn_state(dhd, ifidx, CONN_STATE_GROUPKEY_M2);
+#endif
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M2), ifidx, tx, cond);
+ EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M2");
+ if (ifidx == 0 && tx && pktfate) {
+ dhd_dump_mod_pkt_timer(dhd, PKT_CNT_RSN_GRPKEY_UP);
+ }
+ break;
+ default:
+ DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
+ EAP_PRINT_OTHER("OTHER 4WAY type=%d", type);
+ break;
+ }
+}
+
+void
+dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ char *ifname;
+ eapol_header_t *eapol_hdr = (eapol_header_t *)pktdata;
+ bool cond;
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eapol_hdr->type == EAP_PACKET) {
+ dhd_dump_eap_packet(dhd, ifidx, pktdata, pktlen, tx,
+ pkthash, pktfate);
+ } else if (eapol_hdr->type == EAPOL_START) {
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_START), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, EAPOL-Start");
+ } else if (eapol_hdr->type == EAPOL_KEY) {
+ dhd_dump_eapol_4way_message(dhd, ifidx, pktdata, tx,
+ pkthash, pktfate);
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
+ EAP_PRINT_OTHER("OTHER 8021X");
+ }
+}
+#endif /* DHD_8021X_DUMP */
+
+bool
+dhd_check_ip_prot(uint8 *pktdata, uint16 ether_type)
+{
+ hdr_fmt_t *b = (hdr_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &b->iph;
+
+ /* check IP header */
+ if ((ether_type != ETHER_TYPE_IP) ||
+ (IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ (IP_VER(iph) != IP_VER_4)) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+bool
+dhd_check_dhcp(uint8 *pktdata)
+{
+ hdr_fmt_t *b = (hdr_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &b->iph;
+
+ if (IPV4_PROT(iph) != IP_PROT_UDP) {
+ return FALSE;
+ }
+
+ /* check UDP port for bootp (67, 68) */
+ if (b->udph.src_port != htons(DHCP_PORT_SERVER) &&
+ b->udph.src_port != htons(DHCP_PORT_CLIENT) &&
+ b->udph.dst_port != htons(DHCP_PORT_SERVER) &&
+ b->udph.dst_port != htons(DHCP_PORT_CLIENT)) {
+ return FALSE;
+ }
+
+ /* check header length */
+ if (ntohs(iph->tot_len) < ntohs(b->udph.len) + sizeof(struct bcmudp_hdr)) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+#ifdef DHD_DHCP_DUMP
+#define BOOTP_CHADDR_LEN 16
+#define BOOTP_SNAME_LEN 64
+#define BOOTP_FILE_LEN 128
+#define BOOTP_MIN_DHCP_OPT_LEN 312
+#define BOOTP_MAGIC_COOKIE_LEN 4
+
+#define DHCP_MSGTYPE_DISCOVER 1
+#define DHCP_MSGTYPE_OFFER 2
+#define DHCP_MSGTYPE_REQUEST 3
+#define DHCP_MSGTYPE_DECLINE 4
+#define DHCP_MSGTYPE_ACK 5
+#define DHCP_MSGTYPE_NAK 6
+#define DHCP_MSGTYPE_RELEASE 7
+#define DHCP_MSGTYPE_INFORM 8
+
+#define DHCP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("[%s] " str " %8s[%8s] [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \
+ ifname, typestr, opstr, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[%s] " str " %8s[%8s] [RX] : %s(%s) %s %s(%s)\n", \
+ ifname, typestr, opstr, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \
+ } \
+ } while (0)
+
+typedef struct bootp_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+ uint8 op;
+ uint8 htype;
+ uint8 hlen;
+ uint8 hops;
+ uint32 transaction_id;
+ uint16 secs;
+ uint16 flags;
+ uint32 client_ip;
+ uint32 assigned_ip;
+ uint32 server_ip;
+ uint32 relay_ip;
+ uint8 hw_address[BOOTP_CHADDR_LEN];
+ uint8 server_name[BOOTP_SNAME_LEN];
+ uint8 file_name[BOOTP_FILE_LEN];
+ uint8 options[BOOTP_MIN_DHCP_OPT_LEN];
+} PACKED_STRUCT bootp_fmt_t;
+
+static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
+static char dhcp_ops[][10] = {
+ "NA", "REQUEST", "REPLY"
+};
+static char dhcp_types[][10] = {
+ "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
+};
+
+#ifdef DHD_STATUS_LOGGING
+static const int dhcp_types_stat[9] = {
+ ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST),
+ ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE),
+ ST(DHCP_INFORM)
+};
+#endif /* DHD_STATUS_LOGGING */
+
+void
+dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &b->iph;
+ uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len);
+ int dhcp_type = 0, len, opt_len;
+ char *ifname = NULL, *typestr = NULL, *opstr = NULL;
+ bool cond;
+ char sabuf[20]="", dabuf[20]="";
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!(dump_msg_level & DUMP_DHCP_VAL))
+ return;
+ bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
+ bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ len = ntohs(b->udph.len) - sizeof(struct bcmudp_hdr);
+ opt_len = len - (sizeof(*b) - sizeof(struct ipv4_hdr) -
+ sizeof(struct bcmudp_hdr) - sizeof(b->options));
+
+ /* parse bootp options */
+ if (opt_len >= BOOTP_MAGIC_COOKIE_LEN &&
+ !memcmp(b->options, bootp_magic_cookie, BOOTP_MAGIC_COOKIE_LEN)) {
+ ptr = &b->options[BOOTP_MAGIC_COOKIE_LEN];
+ while (ptr < end && *ptr != 0xff) {
+ opt = ptr++;
+ if (*opt == 0) {
+ continue;
+ }
+ ptr += *ptr + 1;
+ if (ptr >= end) {
+ break;
+ }
+ if (*opt == DHCP_OPT_MSGTYPE) {
+ if (opt[1]) {
+ dhcp_type = opt[2];
+ typestr = dhcp_types[dhcp_type];
+ opstr = dhcp_ops[b->op];
+ DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type],
+ ifidx, tx, cond);
+ DHCP_PRINT("DHCP");
+ break;
+ }
+ }
+ }
+ }
+}
+#endif /* DHD_DHCP_DUMP */
+
+bool
+dhd_check_icmp(uint8 *pktdata)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt;
+
+ if (IPV4_PROT(iph) != IP_PROT_ICMP) {
+ return FALSE;
+ }
+
+ /* check header length */
+ if (ntohs(iph->tot_len) - IPV4_HLEN(iph) < sizeof(struct bcmicmp_hdr)) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+#ifdef DHD_ICMP_DUMP
+#define ICMP_TYPE_DEST_UNREACH 3
+#define ICMP_ECHO_SEQ_OFFSET 6
+#define ICMP_ECHO_SEQ(h) (*(uint16 *)((uint8 *)(h) + (ICMP_ECHO_SEQ_OFFSET)))
+#define ICMP_PING_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s) SEQNUM=%d" \
+ TXFATE_FMT"\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s) SEQNUM=%d\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum)); \
+ } \
+ } while (0)
+
+#define ICMP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s) TYPE=%d, CODE=%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s) TYPE=%d," \
+ " CODE=%d\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code)); \
+ } \
+ } while (0)
+
+void
+dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt;
+ struct bcmicmp_hdr *icmph;
+ char *ifname;
+ bool cond;
+ uint16 seqnum, type, code;
+ char sabuf[20]="", dabuf[20]="";
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!(dump_msg_level & DUMP_ICMP_VAL))
+ return;
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ icmph = (struct bcmicmp_hdr *)((uint8 *)pkt + sizeof(struct ipv4_hdr));
+ seqnum = 0;
+ type = icmph->type;
+ code = icmph->code;
+ bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
+ bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+ if (type == ICMP_TYPE_ECHO_REQUEST) {
+ seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_REQ), ifidx, tx, cond);
+ ICMP_PING_PRINT("PING REQUEST");
+ } else if (type == ICMP_TYPE_ECHO_REPLY) {
+ seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_RESP), ifidx, tx, cond);
+ ICMP_PING_PRINT("PING REPLY ");
+ } else if (type == ICMP_TYPE_DEST_UNREACH) {
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_DEST_UNREACH), ifidx, tx, cond);
+ ICMP_PRINT("ICMP DEST UNREACH");
+ } else {
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_OTHER), ifidx, tx, cond);
+ ICMP_PRINT("ICMP OTHER");
+ }
+}
+#endif /* DHD_ICMP_DUMP */
+
+bool
+dhd_check_arp(uint8 *pktdata, uint16 ether_type)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct bcmarp *arph = (struct bcmarp *)pkt;
+
+ /* validation check */
+ if ((ether_type != ETHER_TYPE_ARP) ||
+ (arph->htype != hton16(HTYPE_ETHERNET)) ||
+ (arph->hlen != ETHER_ADDR_LEN) ||
+ (arph->plen != 4)) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+#ifdef DHD_ARP_DUMP
+#ifdef BOARD_HIKEY
+/* On Hikey, due to continuous ARP prints
+ * DPC not scheduled. Hence rate limit the prints.
+ */
+#define DHD_PKTDUMP_ARP DHD_ERROR_RLMT
+#else
+#define DHD_PKTDUMP_ARP DHD_PKTDUMP
+#endif /* BOARD_HIKEY */
+
+#define ARP_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[%s] "str " [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s)\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \
+ } \
+ } while (0) \
+
+#define ARP_PRINT_OTHER(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[%s] "str " [TX] : %s(%s) %s %s(%s) op_code=%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [TX] : %s(%s) %s %s(%s) op_code=%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] "str " [RX] : %s(%s) %s %s(%s) op_code=%d\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode)); \
+ } \
+ } while (0)
+
+void
+dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct bcmarp *arph = (struct bcmarp *)pkt;
+ char *ifname;
+ uint16 opcode;
+ bool cond, dump_enabled;
+ char sabuf[20]="", dabuf[20]="";
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!(dump_msg_level & DUMP_ARP_VAL))
+ return;
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ opcode = ntoh16(arph->oper);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ dump_enabled = dhd_dump_pkt_enabled(dhdp);
+ bcm_ip_ntoa((struct ipv4_addr *)arph->src_ip, sabuf);
+ bcm_ip_ntoa((struct ipv4_addr *)arph->dst_ip, dabuf);
+ bcm_ether_ntoa((struct ether_addr *)arph->dst_eth, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)arph->src_eth, seabuf);
+ if (opcode == ARP_OPC_REQUEST) {
+ DHD_STATLOG_DATA(dhdp, ST(ARP_REQ), ifidx, tx, cond);
+ ARP_PRINT("ARP REQUEST ");
+ } else if (opcode == ARP_OPC_REPLY) {
+ DHD_STATLOG_DATA(dhdp, ST(ARP_RESP), ifidx, tx, cond);
+ ARP_PRINT("ARP RESPONSE");
+ } else {
+ ARP_PRINT_OTHER("ARP OTHER");
+ }
+
+ if (ifidx == 0) {
+ dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_ARP);
+ }
+}
+#endif /* DHD_ARP_DUMP */
+
+bool
+dhd_check_dns(uint8 *pktdata)
+{
+ hdr_fmt_t *dnsh = (hdr_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &dnsh->iph;
+
+ if (IPV4_PROT(iph) != IP_PROT_UDP) {
+ return FALSE;
+ }
+
+ /* check UDP port for DNS */
+ if (dnsh->udph.src_port != hton16(UDP_PORT_DNS) &&
+ dnsh->udph.dst_port != hton16(UDP_PORT_DNS)) {
+ return FALSE;
+ }
+
+ /* check header length */
+ if (ntoh16(iph->tot_len) < (ntoh16(dnsh->udph.len) +
+ sizeof(struct bcmudp_hdr))) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+#ifdef DHD_DNS_DUMP
+typedef struct dns_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+ uint16 id;
+ uint16 flags;
+ uint16 qdcount;
+ uint16 ancount;
+ uint16 nscount;
+ uint16 arcount;
+} PACKED_STRUCT dns_fmt_t;
+
+#define DNS_QR_LOC 15
+#define DNS_OPCODE_LOC 11
+#define DNS_RCODE_LOC 0
+#define DNS_QR_MASK ((0x1) << (DNS_QR_LOC))
+#define DNS_OPCODE_MASK ((0xF) << (DNS_OPCODE_LOC))
+#define DNS_RCODE_MASK ((0xF) << (DNS_RCODE_LOC))
+#define GET_DNS_QR(flags) (((flags) & (DNS_QR_MASK)) >> (DNS_QR_LOC))
+#define GET_DNS_OPCODE(flags) (((flags) & (DNS_OPCODE_MASK)) >> (DNS_OPCODE_LOC))
+#define GET_DNS_RCODE(flags) (((flags) & (DNS_RCODE_MASK)) >> (DNS_RCODE_LOC))
+#define DNS_UNASSIGNED_OPCODE(flags) ((GET_DNS_OPCODE(flags) >= (6)))
+
+static const char dns_opcode_types[][11] = {
+ "QUERY", "IQUERY", "STATUS", "UNASSIGNED", "NOTIFY", "UPDATE"
+};
+
+#define DNSOPCODE(op) \
+ (DNS_UNASSIGNED_OPCODE(flags) ? "UNASSIGNED" : dns_opcode_types[op])
+
+#define DNS_REQ_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ id, DNSOPCODE(opcode), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ id, DNSOPCODE(opcode), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] " str " [RX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, tx?"->":"<-", \
+ tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode))); \
+ } \
+ } while (0)
+
+#define DNS_RESP_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode), \
+ GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode), \
+ GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[%s] " str " [RX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ id, DNSOPCODE(opcode), GET_DNS_RCODE(flags))); \
+ } \
+ } while (0)
+
+void
+dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ dns_fmt_t *dnsh = (dns_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &dnsh->iph;
+ uint16 flags, opcode, id;
+ char *ifname;
+ bool cond, dump_enabled;
+ char sabuf[20]="", dabuf[20]="";
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ if (!(dump_msg_level & DUMP_DNS_VAL))
+ return;
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ dump_enabled = dhd_dump_pkt_enabled(dhdp);
+ flags = hton16(dnsh->flags);
+ opcode = GET_DNS_OPCODE(flags);
+ id = hton16(dnsh->id);
+ bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
+ bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+ if (GET_DNS_QR(flags)) {
+ /* Response */
+ DHD_STATLOG_DATA(dhdp, ST(DNS_RESP), ifidx, tx, cond);
+ DNS_RESP_PRINT("DNS RESPONSE");
+ } else {
+ /* Request */
+ DHD_STATLOG_DATA(dhdp, ST(DNS_QUERY), ifidx, tx, cond);
+ DNS_REQ_PRINT("DNS REQUEST");
+ }
+
+ if (ifidx == 0) {
+ dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_DNS);
+ }
+}
+#endif /* DHD_DNS_DUMP */
+
+#ifdef DHD_TRX_DUMP
+void
+dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, bool tx)
+{
+ struct ether_header *eh;
+ uint16 protocol;
+ char *pkttype = "UNKNOWN";
+
+ if (!(dump_msg_level & DUMP_TRX_VAL))
+ return;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eh = (struct ether_header *)pktdata;
+ protocol = hton16(eh->ether_type);
+ BCM_REFERENCE(pktlen);
+
+ switch (protocol) {
+ case ETHER_TYPE_IP:
+ pkttype = "IP";
+ break;
+ case ETHER_TYPE_ARP:
+ pkttype = "ARP";
+ break;
+ case ETHER_TYPE_BRCM:
+ pkttype = "BRCM";
+ break;
+ case ETHER_TYPE_802_1X:
+ pkttype = "802.1X";
+ break;
+ case ETHER_TYPE_WAI:
+ pkttype = "WAPI";
+ break;
+ default:
+ break;
+ }
+
+ if (protocol != ETHER_TYPE_BRCM) {
+ if (pktdata[0] == 0xFF) {
+ DHD_PKTDUMP(("[%s] %s BROADCAST DUMP - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
+ } else if (pktdata[0] & 1) {
+ DHD_PKTDUMP(("[%s] %s MULTICAST DUMP " MACDBG " - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", MAC2STRDBG(pktdata), pkttype));
+ } else {
+ DHD_PKTDUMP(("[%s] %s DUMP - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
+ }
+#ifdef DHD_RX_FULL_DUMP
+ prhex("Data", pktdata, pktlen);
+#endif /* DHD_RX_FULL_DUMP */
+ }
+ else {
+ DHD_PKTDUMP(("[%s] %s DUMP - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
+ }
+}
+#endif /* DHD_RX_DUMP */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_pktdump.h b/bcmdhd.101.10.361.x/dhd_linux_pktdump.h
new file mode 100755
index 0000000..7d7ce72
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_pktdump.h
@@ -0,0 +1,132 @@
+/*
+ * Header file for the Packet dump helper functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_LINUX_PKTDUMP_H_
+#define __DHD_LINUX_PKTDUMP_H_
+
+#include <typedefs.h>
+#include <dhd.h>
+
+typedef enum {
+ EAPOL_OTHER = 0,
+ EAPOL_4WAY_M1,
+ EAPOL_4WAY_M2,
+ EAPOL_4WAY_M3,
+ EAPOL_4WAY_M4,
+ EAPOL_GROUPKEY_M1,
+ EAPOL_GROUPKEY_M2
+} msg_eapol_t;
+
+typedef enum pkt_cnt_rsn {
+ PKT_CNT_RSN_INVALID = 0,
+ PKT_CNT_RSN_ROAM = 1,
+ PKT_CNT_RSN_GRPKEY_UP = 2,
+ PKT_CNT_RSN_CONNECT = 3,
+ PKT_CNT_RSN_MAX = 4
+} pkt_cnt_rsn_t;
+
+enum pkt_type {
+ PKT_TYPE_DATA = 0,
+ PKT_TYPE_DHCP = 1,
+ PKT_TYPE_ICMP = 2,
+ PKT_TYPE_DNS = 3,
+ PKT_TYPE_ARP = 4,
+ PKT_TYPE_EAP = 5
+};
+
+extern msg_eapol_t dhd_is_4way_msg(uint8 *pktdata);
+extern void dhd_dump_pkt(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
+#ifdef DHD_PKTDUMP_ROAM
+extern void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn);
+extern void dhd_dump_pkt_init(dhd_pub_t *dhdp);
+extern void dhd_dump_pkt_deinit(dhd_pub_t *dhdp);
+extern void dhd_dump_pkt_clear(dhd_pub_t *dhdp);
+#else
+static INLINE void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) { }
+static INLINE void dhd_dump_pkt_init(dhd_pub_t *dhdp) { }
+static INLINE void dhd_dump_pkt_deinit(dhd_pub_t *dhdp) { }
+static INLINE void dhd_dump_pkt_clear(dhd_pub_t *dhdp) { }
+#endif /* DHD_PKTDUMP_ROAM */
+
+/* Rx packet dump */
+#ifdef DHD_TRX_DUMP
+extern void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx);
+#else
+static INLINE void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx) { }
+#endif /* DHD_TRX_DUMP */
+
+/* DHCP packet dump */
+#ifdef DHD_DHCP_DUMP
+extern void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_DHCP_DUMP */
+
+/* DNS packet dump */
+#ifdef DHD_DNS_DUMP
+extern void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_DNS_DUMP */
+
+/* ICMP packet dump */
+#ifdef DHD_ICMP_DUMP
+extern void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_ICMP_DUMP */
+
+/* ARP packet dump */
+#ifdef DHD_ARP_DUMP
+extern void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_ARP_DUMP */
+
+/* 802.1X packet dump */
+#ifdef DHD_8021X_DUMP
+extern void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_8021X_DUMP */
+extern bool dhd_check_ip_prot(uint8 *pktdata, uint16 ether_type);
+extern bool dhd_check_arp(uint8 *pktdata, uint16 ether_type);
+extern bool dhd_check_dhcp(uint8 *pktdata);
+extern bool dhd_check_icmp(uint8 *pktdata);
+extern bool dhd_check_dns(uint8 *pktdata);
+#endif /* __DHD_LINUX_PKTDUMP_H_ */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_platdev.c b/bcmdhd.101.10.361.x/dhd_linux_platdev.c
new file mode 100755
index 0000000..8410748
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_platdev.c
@@ -0,0 +1,1108 @@
+/*
+ * Linux platform device for DHD WLAN adapter
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_linux.h>
+#if defined(OEM_ANDROID)
+#include <wl_android.h>
+#endif
+#if defined(CONFIG_WIFI_CONTROL_FUNC) || defined(CUSTOMER_HW4)
+#include <linux/wlan_plat.h>
+#else
+#include <dhd_plat.h>
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#ifdef BCMDBUS
+#include <dbus.h>
+#endif
+#ifdef CONFIG_DTS
+#include<linux/regulator/consumer.h>
+#include<linux/of_gpio.h>
+#endif /* CONFIG_DTS */
+
+#if defined(CUSTOMER_HW)
+extern int dhd_wlan_init_plat_data(wifi_adapter_info_t *adapter);
+extern void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter);
+#endif /* CUSTOMER_HW */
+
+#define WIFI_PLAT_NAME "bcmdhd_wlan"
+#define WIFI_PLAT_NAME2 "bcm4329_wlan"
+#define WIFI_PLAT_EXT "bcmdhd_wifi_platform"
+
+#ifdef DHD_WIFI_SHUTDOWN
+extern void wifi_plat_dev_drv_shutdown(struct platform_device *pdev);
+#endif
+
+#ifdef CONFIG_DTS
+struct regulator *wifi_regulator = NULL;
+#endif /* CONFIG_DTS */
+
+bool cfg_multichip = FALSE;
+bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL;
+static int wifi_plat_dev_probe_ret = 0;
+static bool is_power_on = FALSE;
+/* XXX Some Qualcomm based CUSTOMER_HW4 platforms are using platform
+ * device structure even if the Kernel uses device tree structure.
+ * Therefore, the CONFIG_ARCH_MSM condition is temporarly remained
+ * to support in this case.
+ */
+#if !defined(CONFIG_DTS)
+#if defined(DHD_OF_SUPPORT)
+static bool dts_enabled = TRUE;
+extern struct resource dhd_wlan_resources;
+extern struct wifi_platform_data dhd_wlan_control;
+#else
+static bool dts_enabled = FALSE;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+struct resource dhd_wlan_resources = {0};
+struct wifi_platform_data dhd_wlan_control = {0};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+#endif /* !defind(CONFIG_DTS) */
+
+static int dhd_wifi_platform_load(void);
+
+extern void* wl_cfg80211_get_dhdp(struct net_device *dev);
+
+#ifdef BCMDHD_MODULAR
+//extern int dhd_wlan_init(void);
+//extern int dhd_wlan_deinit(void);
+#ifdef WBRC
+extern int wbrc_init(void);
+extern void wbrc_exit(void);
+#endif /* WBRC */
+#endif /* BCMDHD_MODULAR */
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+#ifdef BCM4335_XTAL_WAR
+extern bool check_bcm4335_rev(void);
+#endif /* BCM4335_XTAL_WAR */
+
+wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type,
+ uint32 bus_num, uint32 slot_num, unsigned long status)
+{
+ int i;
+
+ if (dhd_wifi_platdata == NULL)
+ return NULL;
+
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+ if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+ (adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+ (adapter->slot_num == -1 || adapter->slot_num == slot_num)
+#if defined(ENABLE_INSMOD_NO_FW_LOAD)
+ && (wifi_chk_adapter_status(adapter, status))
+#endif
+ ) {
+ DHD_ERROR(("attach adapter info '%s'\n", adapter->name));
+ return adapter;
+ }
+ }
+ return NULL;
+}
+
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num)
+{
+ int i;
+
+ if (dhd_wifi_platdata == NULL)
+ return NULL;
+
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+ if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+ (adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+ (adapter->slot_num == -1 || adapter->slot_num == slot_num)) {
+ DHD_TRACE(("found adapter info '%s'\n", adapter->name));
+ return adapter;
+ }
+ }
+ return NULL;
+}
+
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size)
+{
+ void *alloc_ptr = NULL;
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return NULL;
+ plat_data = adapter->wifi_plat_data;
+ if (plat_data->mem_prealloc) {
+#ifdef BCMDHD_MDRIVER
+ alloc_ptr = plat_data->mem_prealloc(adapter->bus_type, adapter->index, section, size);
+#else
+ alloc_ptr = plat_data->mem_prealloc(section, size);
+#endif
+ if (alloc_ptr) {
+ DHD_INFO(("success alloc section %d\n", section));
+ if (size != 0L)
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
+ }
+ } else
+ return NULL;
+
+ DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section));
+ return NULL;
+}
+
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter)
+{
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return NULL;
+ plat_data = adapter->wifi_plat_data;
+ return plat_data->mem_prealloc;
+}
+
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr)
+{
+ if (adapter == NULL)
+ return -1;
+ if (irq_flags_ptr)
+ *irq_flags_ptr = adapter->intr_flags;
+ return adapter->irq_num;
+}
+
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec)
+{
+ int err = 0;
+#ifndef CONFIG_DTS
+ struct wifi_platform_data *plat_data;
+#endif
+#ifdef BT_OVER_SDIO
+ if (is_power_on == on) {
+ return -EINVAL;
+ }
+#endif /* BT_OVER_SDIO */
+ if (on) {
+ wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+ } else {
+ wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+ }
+#ifdef CONFIG_DTS
+ if (on) {
+ printf("======== PULL WL_REG_ON HIGH! ========\n");
+ err = regulator_enable(wifi_regulator);
+ is_power_on = TRUE;
+ }
+ else {
+ printf("======== PULL WL_REG_ON LOW! ========\n");
+ err = regulator_disable(wifi_regulator);
+ is_power_on = FALSE;
+ }
+ if (err < 0) {
+ DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__));
+ goto fail;
+ }
+#else
+ if (!adapter || !adapter->wifi_plat_data) {
+ err = -EINVAL;
+ goto fail;
+ }
+ plat_data = adapter->wifi_plat_data;
+
+ DHD_ERROR(("%s = %d, delay: %lu msec\n", __FUNCTION__, on, msec));
+ if (plat_data->set_power) {
+#ifdef ENABLE_4335BT_WAR
+ if (on) {
+ printk("WiFi: trying to acquire BT lock\n");
+ if (bcm_bt_lock(lock_cookie_wifi) != 0)
+ printk("** WiFi: timeout in acquiring bt lock**\n");
+ printk("%s: btlock acquired\n", __FUNCTION__);
+ }
+ else {
+ /* For a exceptional case, release btlock */
+ bcm_bt_unlock(lock_cookie_wifi);
+ }
+#endif /* ENABLE_4335BT_WAR */
+
+ err = plat_data->set_power(on, adapter);
+ }
+
+ if (msec && !err)
+ OSL_SLEEP(msec);
+
+ if (on && !err)
+ is_power_on = TRUE;
+ else
+ is_power_on = FALSE;
+
+#endif /* CONFIG_DTS */
+
+ return err;
+fail:
+ if (on) {
+ wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+ } else {
+ wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+ }
+ return err;
+}
+
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present)
+{
+ int err = 0;
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return -EINVAL;
+ plat_data = adapter->wifi_plat_data;
+
+ DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present));
+ if (plat_data->set_carddetect) {
+ err = plat_data->set_carddetect(device_present);
+ }
+ return err;
+
+}
+
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf,
+ int ifidx)
+{
+ struct wifi_platform_data *plat_data;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+ if (!buf || !adapter || !adapter->wifi_plat_data)
+ return -EINVAL;
+ plat_data = adapter->wifi_plat_data;
+ if (plat_data->get_mac_addr) {
+ return plat_data->get_mac_addr(buf, ifidx);
+ }
+ return -EOPNOTSUPP;
+}
+
+#ifdef DHD_COREDUMP
+int wifi_platform_set_coredump(wifi_adapter_info_t *adapter, const char *buf,
+ int buf_len, const char *info)
+{
+ struct wifi_platform_data *plat_data;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+ if (!buf || !adapter || !adapter->wifi_plat_data)
+ return -EINVAL;
+ plat_data = adapter->wifi_plat_data;
+ if (plat_data->set_coredump) {
+ return plat_data->set_coredump(buf, buf_len, info);
+ }
+ return -EOPNOTSUPP;
+}
+#endif /* DHD_COREDUMP */
+
+void *
+#ifdef CUSTOM_COUNTRY_CODE
+wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags)
+#else
+wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+#endif /* CUSTOM_COUNTRY_CODE */
+{
+ /* get_country_code was added after 2.6.39 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct wifi_platform_data *plat_data;
+
+ if (!ccode || !adapter || !adapter->wifi_plat_data)
+ return NULL;
+ plat_data = adapter->wifi_plat_data;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ if (plat_data->get_country_code) {
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ return plat_data->get_country_code(ccode, flags);
+#else
+ return plat_data->get_country_code(ccode);
+#endif /* CUSTOM_COUNTRY_CODE */
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+ return NULL;
+}
+
+#ifndef CUSTOMER_HW
+static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ wifi_adapter_info_t *adapter;
+#if defined(CONFIG_DTS) && defined(CUSTOMER_OOB)
+ int irq, gpio;
+#endif /* CONFIG_DTS */
+
+ /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+ * is kept for backward compatibility and supports only 1 adapter
+ */
+ ASSERT(dhd_wifi_platdata != NULL);
+ ASSERT(dhd_wifi_platdata->num_adapters == 1);
+ adapter = &dhd_wifi_platdata->adapters[0];
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
+#else
+ adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+#endif
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+ if (resource == NULL)
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
+ if (resource) {
+ adapter->irq_num = resource->start;
+ adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+#ifdef DHD_ISR_NO_SUSPEND
+ adapter->intr_flags |= IRQF_NO_SUSPEND;
+#endif
+ }
+
+#ifdef CONFIG_DTS
+ wifi_regulator = regulator_get(&pdev->dev, "wlreg_on");
+ if (wifi_regulator == NULL) {
+ DHD_ERROR(("%s regulator is null\n", __FUNCTION__));
+ return -1;
+ }
+
+#if defined(CUSTOMER_OOB)
+ /* This is to get the irq for the OOB */
+ gpio = of_get_gpio(pdev->dev.of_node, 0);
+
+ if (gpio < 0) {
+ DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__));
+ return -1;
+ }
+ irq = gpio_to_irq(gpio);
+ if (irq < 0) {
+ DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__));
+ return -1;
+ }
+ adapter->irq_num = irq;
+
+ /* need to change the flags according to our requirement */
+#ifdef HW_OOB
+ adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
+ IORESOURCE_IRQ_SHAREABLE;
+#else
+ adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
+ IORESOURCE_IRQ_SHAREABLE;
+#endif
+#endif
+#endif /* CONFIG_DTS */
+
+ wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+ return wifi_plat_dev_probe_ret;
+}
+
+static int wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+ wifi_adapter_info_t *adapter;
+
+ /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+ * is kept for backward compatibility and supports only 1 adapter
+ */
+ ASSERT(dhd_wifi_platdata != NULL);
+ ASSERT(dhd_wifi_platdata->num_adapters == 1);
+ adapter = &dhd_wifi_platdata->adapters[0];
+ if (is_power_on) {
+#ifdef BCMPCIE
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+#else
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+#endif /* BCMPCIE */
+ }
+
+#ifdef CONFIG_DTS
+ regulator_put(wifi_regulator);
+#endif /* CONFIG_DTS */
+ return 0;
+}
+
+static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+ defined(BCMSDIO)
+ bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static int wifi_plat_dev_drv_resume(struct platform_device *pdev)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+ defined(BCMSDIO)
+ if (dhd_os_check_if_up(wl_cfg80211_get_dhdp()))
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+#ifdef CONFIG_DTS
+static const struct of_device_id wifi_device_dt_match[] = {
+ { .compatible = "android,bcmdhd_wlan", },
+ {},
+};
+#endif /* CONFIG_DTS */
+
+static struct platform_driver wifi_platform_dev_driver = {
+ .probe = wifi_plat_dev_drv_probe,
+ .remove = wifi_plat_dev_drv_remove,
+ .suspend = wifi_plat_dev_drv_suspend,
+ .resume = wifi_plat_dev_drv_resume,
+#ifdef DHD_WIFI_SHUTDOWN
+ .shutdown = wifi_plat_dev_drv_shutdown,
+#endif /* DHD_WIFI_SHUTDOWN */
+ .driver = {
+ .name = WIFI_PLAT_NAME,
+#ifdef CONFIG_DTS
+ .of_match_table = wifi_device_dt_match,
+#endif /* CONFIG_DTS */
+ }
+};
+
+static struct platform_driver wifi_platform_dev_driver_legacy = {
+ .probe = wifi_plat_dev_drv_probe,
+ .remove = wifi_plat_dev_drv_remove,
+ .suspend = wifi_plat_dev_drv_suspend,
+ .resume = wifi_plat_dev_drv_resume,
+#ifdef DHD_WIFI_SHUTDOWN
+ .shutdown = wifi_plat_dev_drv_shutdown,
+#endif /* DHD_WIFI_SHUTDOWN */
+ .driver = {
+ .name = WIFI_PLAT_NAME2,
+ }
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
+static int wifi_platdev_match(struct device *dev, const void *data)
+#else
+static int wifi_platdev_match(struct device *dev, void *data)
+#endif /* LINUX_VER >= 5.3.0 */
+{
+ char *name = (char*)data;
+ const struct platform_device *pdev;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ pdev = to_platform_device(dev);
+ GCC_DIAGNOSTIC_POP();
+
+ if (strcmp(pdev->name, name) == 0) {
+ DHD_ERROR(("found wifi platform device %s\n", name));
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif
+
+static int wifi_ctrlfunc_register_drv(void)
+{
+ wifi_adapter_info_t *adapter;
+
+#ifndef CUSTOMER_HW
+ int err = 0;
+ struct device *dev1, *dev2;
+ dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+ dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+#endif
+
+#ifdef BCMDHD_MODULAR
+// dhd_wlan_init();
+#ifdef WBRC
+ wbrc_init();
+#endif /* WBRC */
+#endif /* BCMDHD_MODULAR */
+
+#if !defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
+ if (!dts_enabled) {
+ if (dev1 == NULL && dev2 == NULL) {
+ DHD_ERROR(("no wifi platform data, skip\n"));
+ return -ENXIO;
+ }
+ }
+#endif /* !defined(CONFIG_DTS) */
+
+ /* multi-chip support not enabled, build one adapter information for
+ * DHD (either SDIO, USB or PCIe)
+ */
+ adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+ if (adapter == NULL) {
+ DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__));
+ return -ENOMEM;
+ }
+ adapter->name = "DHD generic adapter";
+ adapter->index = -1;
+#ifdef BCMDHD_MDRIVER
+#ifdef BCMSDIO
+ adapter->index = 0;
+#elif defined(BCMPCIE)
+ adapter->index = 1;
+#elif defined(BCMDBUS)
+ adapter->index = 2;
+#endif
+#endif
+ adapter->bus_type = -1;
+ adapter->bus_num = -1;
+ adapter->slot_num = -1;
+ adapter->irq_num = -1;
+ is_power_on = FALSE;
+ wifi_plat_dev_probe_ret = 0;
+ dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL);
+ dhd_wifi_platdata->num_adapters = 1;
+ dhd_wifi_platdata->adapters = adapter;
+ init_waitqueue_head(&adapter->status_event);
+
+#ifndef CUSTOMER_HW
+ if (dev1) {
+ err = platform_driver_register(&wifi_platform_dev_driver);
+ if (err) {
+ DHD_ERROR(("%s: failed to register wifi ctrl func driver\n",
+ __FUNCTION__));
+ return err;
+ }
+ }
+ if (dev2) {
+ err = platform_driver_register(&wifi_platform_dev_driver_legacy);
+ if (err) {
+ DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n",
+ __FUNCTION__));
+ return err;
+ }
+ }
+#endif
+
+#if !defined(CONFIG_DTS)
+ if (dts_enabled) {
+ adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+#ifdef CUSTOMER_HW
+ wifi_plat_dev_probe_ret = dhd_wlan_init_plat_data(adapter);
+ if (wifi_plat_dev_probe_ret)
+ return wifi_plat_dev_probe_ret;
+#endif
+#ifdef DHD_ISR_NO_SUSPEND
+ adapter->intr_flags |= IRQF_NO_SUSPEND;
+#endif
+ wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+ }
+#endif /* !defined(CONFIG_DTS) */
+
+#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
+ wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver);
+#endif /* CONFIG_DTS */
+
+ /* return probe function's return value if registeration succeeded */
+ return wifi_plat_dev_probe_ret;
+}
+
+void wifi_ctrlfunc_unregister_drv(void)
+{
+#ifndef CONFIG_DTS
+ wifi_adapter_info_t *adapter = NULL;
+#endif
+
+#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
+ DHD_ERROR(("unregister wifi platform drivers\n"));
+ platform_driver_unregister(&wifi_platform_dev_driver);
+#else
+#ifndef CUSTOMER_HW
+ struct device *dev1, *dev2;
+ dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+ dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+ if (!dts_enabled)
+ if (dev1 == NULL && dev2 == NULL)
+ return;
+#endif
+ DHD_ERROR(("unregister wifi platform drivers\n"));
+#ifndef CUSTOMER_HW
+ if (dev1)
+ platform_driver_unregister(&wifi_platform_dev_driver);
+ if (dev2)
+ platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+#endif
+ if (dts_enabled) {
+ adapter = &dhd_wifi_platdata->adapters[0];
+ if (is_power_on) {
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ }
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ }
+#ifdef BCMDHD_MODULAR
+// dhd_wlan_deinit();
+#ifdef WBRC
+ wbrc_exit();
+#endif /* WBRC */
+#endif /* BCMDHD_MODULAR */
+
+#endif /* !defined(CONFIG_DTS) */
+
+#if defined(CUSTOMER_HW)
+ dhd_wlan_deinit_plat_data(adapter);
+#endif
+
+ kfree(dhd_wifi_platdata->adapters);
+ dhd_wifi_platdata->adapters = NULL;
+ dhd_wifi_platdata->num_adapters = 0;
+ kfree(dhd_wifi_platdata);
+ dhd_wifi_platdata = NULL;
+}
+
+#ifndef CUSTOMER_HW
+static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+ dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
+
+ return dhd_wifi_platform_load();
+}
+
+static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+ int i;
+ wifi_adapter_info_t *adapter;
+ ASSERT(dhd_wifi_platdata != NULL);
+
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ }
+ return 0;
+}
+
+static struct platform_driver dhd_wifi_platform_dev_driver = {
+ .probe = bcmdhd_wifi_plat_dev_drv_probe,
+ .remove = bcmdhd_wifi_plat_dev_drv_remove,
+ .driver = {
+ .name = WIFI_PLAT_EXT,
+ }
+};
+#endif
+
+int dhd_wifi_platform_register_drv(void)
+{
+ int err = 0;
+#ifndef CUSTOMER_HW
+ struct device *dev;
+
+ /* register Broadcom wifi platform data driver if multi-chip is enabled,
+ * otherwise use Android style wifi platform data (aka wifi control function)
+ * if it exists
+ *
+ * to support multi-chip DHD, Broadcom wifi platform data device must
+ * be added in kernel early boot (e.g. board config file).
+ */
+ if (cfg_multichip) {
+ dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match);
+ if (dev == NULL) {
+ DHD_ERROR(("bcmdhd wifi platform data device not found!!\n"));
+ return -ENXIO;
+ }
+ err = platform_driver_register(&dhd_wifi_platform_dev_driver);
+ } else
+#endif
+ {
+ err = wifi_ctrlfunc_register_drv();
+
+ /* no wifi ctrl func either, load bus directly and ignore this error */
+ if (err) {
+ if (err == -ENXIO) {
+ /* wifi ctrl function does not exist */
+ err = dhd_wifi_platform_load();
+ } else {
+ /* unregister driver due to initialization failure */
+ wifi_ctrlfunc_unregister_drv();
+ }
+ }
+ }
+
+ return err;
+}
+
+#ifdef BCMPCIE
+static int dhd_wifi_platform_load_pcie(void)
+{
+ int err = 0;
+ int i;
+ wifi_adapter_info_t *adapter;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(adapter);
+
+ if (dhd_wifi_platdata == NULL) {
+ /* XXX For x86 Bringup PC or BRIX */
+ err = dhd_bus_register();
+ } else {
+#ifdef DHD_SUPPORT_HDM
+ if (dhd_download_fw_on_driverload || hdm_trigger_init) {
+#else
+ if (dhd_download_fw_on_driverload) {
+#endif /* DHD_SUPPORT_HDM */
+ /* power up all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ int retry = POWERUP_MAX_RETRY;
+ adapter = &dhd_wifi_platdata->adapters[i];
+
+ DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+ DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+ adapter->irq_num, adapter->intr_flags, adapter->fw_path,
+ adapter->nv_path));
+ DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+ adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+ do {
+ err = wifi_platform_set_power(adapter,
+ TRUE, WIFI_TURNON_DELAY);
+ if (err) {
+ DHD_ERROR(("failed to power up %s,"
+ " %d retry left\n",
+ adapter->name, retry));
+ /* WL_REG_ON state unknown, Power off forcely */
+ wifi_platform_set_power(adapter,
+ FALSE, WIFI_TURNOFF_DELAY);
+ continue;
+ } else {
+ err = wifi_platform_bus_enumerate(adapter, TRUE);
+ if (err) {
+ DHD_ERROR(("failed to enumerate bus %s, "
+ "%d retry left\n",
+ adapter->name, retry));
+ wifi_platform_set_power(adapter, FALSE,
+ WIFI_TURNOFF_DELAY);
+ } else {
+ break;
+ }
+ }
+ } while (retry--);
+
+ if (retry < 0) {
+ DHD_ERROR(("failed to power up %s, max retry reached**\n",
+ adapter->name));
+ return -ENODEV;
+ }
+ }
+ }
+
+ err = dhd_bus_register();
+
+ if (err) {
+ DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__));
+ if (dhd_download_fw_on_driverload) {
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ wifi_platform_set_power(adapter,
+ FALSE, WIFI_TURNOFF_DELAY);
+ }
+ }
+ }
+ }
+
+ return err;
+}
+#else
+static int dhd_wifi_platform_load_pcie(void)
+{
+ return 0;
+}
+#endif /* BCMPCIE */
+
+void dhd_wifi_platform_unregister_drv(void)
+{
+#ifndef CUSTOMER_HW
+ if (cfg_multichip)
+ platform_driver_unregister(&dhd_wifi_platform_dev_driver);
+ else
+#endif
+ wifi_ctrlfunc_unregister_drv();
+}
+
+extern int dhd_watchdog_prio;
+extern int dhd_dpc_prio;
+extern uint dhd_deferred_tx;
+#if defined(OEM_ANDROID) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
+extern struct semaphore dhd_registration_sem;
+#endif /* defined(OEM_ANDROID) && defined(BCMLXSDMMC) */
+
+#ifdef BCMSDIO
+static int dhd_wifi_platform_load_sdio(void)
+{
+ int i;
+ int err = 0;
+ wifi_adapter_info_t *adapter;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(adapter);
+ /* Sanity check on the module parameters
+ * - Both watchdog and DPC as tasklets are ok
+ * - If both watchdog and DPC are threads, TX must be deferred
+ */
+ if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) &&
+ !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx))
+ return -EINVAL;
+
+#if defined(OEM_ANDROID) && defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+ sema_init(&dhd_registration_sem, 0);
+#endif
+
+ if (dhd_wifi_platdata == NULL) {
+ DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
+ DHD_ERROR(("DHD registering bus directly\n"));
+ /* x86 bring-up PC needs no power-up operations */
+ err = dhd_bus_register();
+ return err;
+ }
+
+#if defined(OEM_ANDROID) && defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+ /* power up all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ bool chip_up = FALSE;
+ int retry = POWERUP_MAX_RETRY;
+ struct semaphore dhd_chipup_sem;
+
+ adapter = &dhd_wifi_platdata->adapters[i];
+
+ DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+ DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+ adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+ DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+ adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+ do {
+#ifndef CUSTOMER_HW_AMLOGIC
+ sema_init(&dhd_chipup_sem, 0);
+ err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+ if (err) {
+ DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+ __FUNCTION__, err));
+ return err;
+ }
+#endif
+ err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+ if (err) {
+ DHD_ERROR(("%s: wifi pwr on error ! \n", __FUNCTION__));
+ dhd_bus_unreg_sdio_notify();
+ /* WL_REG_ON state unknown, Power off forcely */
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ continue;
+ } else {
+ wifi_platform_bus_enumerate(adapter, TRUE);
+ }
+#ifdef CUSTOMER_HW_AMLOGIC
+ sema_init(&dhd_chipup_sem, 0);
+ err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+ if (err) {
+ DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+ __FUNCTION__, err));
+ return err;
+ }
+#endif
+
+ if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+ dhd_bus_unreg_sdio_notify();
+ chip_up = TRUE;
+ break;
+ }
+
+ DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry));
+ dhd_bus_unreg_sdio_notify();
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ } while (retry--);
+
+ if (!chip_up) {
+ DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name));
+ return -ENODEV;
+ }
+
+ }
+
+ err = dhd_bus_register();
+
+ if (err) {
+ DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /*
+ * Wait till MMC sdio_register_driver callback called and made driver attach.
+ * It's needed to make sync up exit from dhd insmod and
+ * Kernel MMC sdio device callback registration
+ */
+ err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+ if (err) {
+ DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
+ dhd_bus_unregister();
+ goto fail;
+ }
+
+ return err;
+
+fail:
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ }
+#else
+ /* x86 bring-up PC needs no power-up operations */
+ err = dhd_bus_register();
+#endif /* defined(OEM_ANDROID) && defined(BCMLXSDMMC) */
+
+ return err;
+}
+#else /* BCMSDIO */
+static int dhd_wifi_platform_load_sdio(void)
+{
+ return 0;
+}
+#endif /* BCMSDIO */
+
+#ifdef BCMDBUS
+static int dhd_wifi_platform_load_usb(void)
+{
+ int err = 0;
+#if !defined(DHD_PRELOAD)
+ wifi_adapter_info_t *adapter;
+ s32 timeout = -1;
+ int i;
+ enum wifi_adapter_status wait_status;
+#endif
+
+#if !defined(DHD_PRELOAD)
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_set_power(adapter, FALSE, 0);
+ if (err) {
+ DHD_ERROR(("failed to wifi_platform_set_power on %s\n", adapter->name));
+ goto exit;
+ }
+ }
+ OSL_SLEEP(200);
+#endif
+
+ err = dhd_bus_register();
+ if (err) {
+ DHD_ERROR(("%s: usb_register failed\n", __FUNCTION__));
+ goto exit;
+ }
+
+#if !defined(DHD_PRELOAD)
+ /* power up all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+ DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+ adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+ DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+ adapter->bus_type, adapter->bus_num, adapter->slot_num));
+ err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+ if (err) {
+ DHD_ERROR(("failed to wifi_platform_set_power on %s\n", adapter->name));
+ goto fail;
+ }
+ if (dhd_download_fw_on_driverload)
+ wait_status = WIFI_STATUS_ATTACH;
+ else
+ wait_status = WIFI_STATUS_DETTACH;
+ timeout = wait_event_interruptible_timeout(adapter->status_event,
+ wifi_get_adapter_status(adapter, wait_status),
+ msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+ if (timeout <= 0) {
+ err = -1;
+ DHD_ERROR(("%s: usb_register_driver timeout\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+#endif
+
+exit:
+ return err;
+
+#if !defined(DHD_PRELOAD)
+fail:
+ dhd_bus_unregister();
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ }
+
+ return err;
+#endif
+}
+#else /* BCMDBUS */
+static int dhd_wifi_platform_load_usb(void)
+{
+ return 0;
+}
+#endif /* BCMDBUS */
+
+static int dhd_wifi_platform_load()
+{
+ int err = 0;
+ printf("%s: Enter\n", __FUNCTION__);
+
+#if defined(OEM_ANDROID)
+ wl_android_init();
+#endif /* OEM_ANDROID */
+
+ if ((err = dhd_wifi_platform_load_usb()))
+ goto end;
+ else if ((err = dhd_wifi_platform_load_sdio()))
+ goto end;
+ else
+ err = dhd_wifi_platform_load_pcie();
+
+end:
+#if defined(OEM_ANDROID)
+ if (err)
+ wl_android_exit();
+#if !defined(MULTIPLE_SUPPLICANT)
+ else
+ wl_android_post_init();
+#endif
+#endif /* OEM_ANDROID */
+
+ return err;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_linux_priv.h b/bcmdhd.101.10.361.x/dhd_linux_priv.h
new file mode 100755
index 0000000..1eeb27e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_priv.h
@@ -0,0 +1,518 @@
+/*
+ * DHD Linux header file - contains private structure definition of the Linux specific layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_LINUX_PRIV_H__
+#define __DHD_LINUX_PRIV_H__
+
+#include <osl.h>
+
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif /* CONFIG COMPAT */
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/pm_wakeup.h>
+#endif /* CONFIG_HAS_WAKELOCK */
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_linux.h>
+#include <dhd_bus.h>
+
+#ifdef PCIE_FULL_DONGLE
+#include <bcmmsgbuf.h>
+#include <dhd_flowring.h>
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_QOS_ON_SOCK_FLOW
+struct dhd_sock_qos_info;
+#endif /* DHD_QOS_ON_SOCK_FLOW */
+
+/*
+ * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
+ * Local private structure (extension of pub)
+ */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+ dhd_pub_t pub;
+ /* for supporting multiple interfaces.
+ * static_ifs hold the net ifaces without valid FW IF
+ */
+ dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
+ wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
+ char fw_path[PATH_MAX]; /* path to firmware image */
+ char nv_path[PATH_MAX]; /* path to nvram vars file */
+ char clm_path[PATH_MAX]; /* path to clm vars file */
+ char conf_path[PATH_MAX]; /* path to config vars file */
+#ifdef DHD_UCODE_DOWNLOAD
+ char uc_path[PATH_MAX]; /* path to ucode image */
+#endif /* DHD_UCODE_DOWNLOAD */
+
+ /* serialize dhd iovars */
+ struct mutex dhd_iovar_mutex;
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+
+#ifdef BCMDBUS
+ ulong wlfc_lock_flags;
+ ulong wlfc_pub_lock_flags;
+#endif /* BCMDBUS */
+#endif /* PROP_TXSTATUS */
+ wait_queue_head_t ioctl_resp_wait;
+ wait_queue_head_t d3ack_wait;
+ wait_queue_head_t dhd_bus_busy_state_wait;
+ wait_queue_head_t dmaxfer_wait;
+#ifdef BT_OVER_PCIE
+ wait_queue_head_t quiesce_wait;
+#endif /* BT_OVER_PCIE */
+ uint32 default_wd_interval;
+
+ timer_list_compat_t timer;
+ bool wd_timer_valid;
+#ifdef DHD_PCIE_RUNTIMEPM
+ timer_list_compat_t rpm_timer;
+ bool rpm_timer_valid;
+ tsk_ctl_t thr_rpm_ctl;
+#endif /* DHD_PCIE_RUNTIMEPM */
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+ spinlock_t txoff_lock;
+#ifdef BCMDBUS
+ ulong txqlock_flags;
+#endif /* BCMDBUS */
+
+#ifndef BCMDBUS
+ struct semaphore sdsem;
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+#endif /* BCMDBUS */
+
+ tsk_ctl_t thr_rxf_ctl;
+ spinlock_t rxf_lock;
+ bool rxthread_enabled;
+
+ /* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK)
+ struct wakeup_source wl_wifi; /* Wifi wakelock */
+ struct wakeup_source wl_rxwake; /* Wifi rx wakelock */
+ struct wakeup_source wl_ctrlwake; /* Wifi ctrl wakelock */
+ struct wakeup_source wl_wdwake; /* Wifi wd wakelock */
+ struct wakeup_source wl_evtwake; /* Wifi event wakelock */
+ struct wakeup_source wl_pmwake; /* Wifi pm handler wakelock */
+ struct wakeup_source wl_txflwake; /* Wifi tx flow wakelock */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ struct wakeup_source wl_intrwake; /* Host wakeup wakelock */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ struct wakeup_source wl_scanwake; /* Wifi scan wakelock */
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ struct wakeup_source wl_nanwake; /* NAN wakelock */
+#endif /* CONFIG_HAS_WAKELOCK */
+
+#if defined(OEM_ANDROID)
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+ struct mutex dhd_suspend_mutex;
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ struct mutex dhd_apf_mutex;
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif /* OEM_ANDROID */
+ spinlock_t wakelock_spinlock;
+ spinlock_t wakelock_evt_spinlock;
+ uint32 wakelock_counter;
+ int wakelock_wd_counter;
+ int wakelock_rx_timeout_enable;
+ int wakelock_ctrl_timeout_enable;
+ bool waive_wakelock;
+ uint32 wakelock_before_waive;
+
+ /* Thread to issue ioctl for multicast */
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+ dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef DHDTCPACK_SUPPRESS
+ spinlock_t tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef FIX_CPU_MIN_CLOCK
+ bool cpufreq_fix_status;
+ struct mutex cpufreq_fix;
+ struct pm_qos_request dhd_cpu_qos;
+#ifdef FIX_BUS_MIN_CLOCK
+ struct pm_qos_request dhd_bus_qos;
+#endif /* FIX_BUS_MIN_CLOCK */
+#endif /* FIX_CPU_MIN_CLOCK */
+ void *dhd_deferred_wq;
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ ctf_t *cih; /* ctf instance handle */
+ ctf_brc_hot_t *brc_hot; /* hot ctf bridge cache entry */
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+#ifdef DEBUG_CPU_FREQ
+ struct notifier_block freq_trans;
+ int __percpu *new_freq;
+#endif
+ unsigned int unit;
+ struct notifier_block pm_notifier;
+#ifdef DHD_PSTA
+ uint32 psta_mode; /* PSTA or PSR */
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+ uint32 wet_mode;
+#endif /* DHD_WET */
+#ifdef DHD_DEBUG
+ dhd_dump_t *dump;
+ timer_list_compat_t join_timer;
+ u32 join_timeout_val;
+ bool join_timer_active;
+ uint scan_time_count;
+ timer_list_compat_t scan_timer;
+ bool scan_timer_active;
+#endif
+ struct delayed_work dhd_dpc_dispatcher_work;
+
+ /* CPU on which the DHD DPC is running */
+ atomic_t dpc_cpu;
+ atomic_t prev_dpc_cpu;
+#if defined(DHD_LB)
+#if defined(DHD_LB_HOST_CTRL)
+ bool permitted_primary_cpu;
+#endif /* DHD_LB_HOST_CTRL */
+ /* CPU Load Balance dynamic CPU selection */
+
+ /* Variable that tracks the currect CPUs available for candidacy */
+ cpumask_var_t cpumask_curr_avail;
+
+ /* Primary and secondary CPU mask */
+ cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
+ cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
+
+ struct notifier_block cpu_notifier;
+
+ /* Napi struct for handling rx packet sendup. Packets are removed from
+ * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
+ * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
+ * to run to rx_napi_cpu.
+ */
+ struct sk_buff_head rx_pend_queue ____cacheline_aligned;
+ struct sk_buff_head rx_napi_queue ____cacheline_aligned;
+ struct sk_buff_head rx_process_queue ____cacheline_aligned;
+ struct napi_struct rx_napi_struct ____cacheline_aligned;
+ atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
+ struct net_device *rx_napi_netdev; /* netdev of primary interface */
+
+ struct work_struct rx_napi_dispatcher_work;
+ struct work_struct tx_compl_dispatcher_work;
+ struct work_struct tx_dispatcher_work;
+ struct work_struct rx_compl_dispatcher_work;
+
+ /* Number of times DPC Tasklet ran */
+ uint32 dhd_dpc_cnt;
+ /* Number of times NAPI processing got scheduled */
+ uint32 napi_sched_cnt;
+ /* NAPI latency stats */
+ uint64 *napi_latency;
+ uint64 napi_schedule_time;
+ /* Number of times NAPI processing ran on each available core */
+ uint32 *napi_percpu_run_cnt;
+ /* Number of times RX Completions got scheduled */
+ uint32 rxc_sched_cnt;
+ /* Number of times RX Completion ran on each available core */
+ uint32 *rxc_percpu_run_cnt;
+ /* Number of times TX Completions got scheduled */
+ uint32 txc_sched_cnt;
+ /* Number of times TX Completions ran on each available core */
+ uint32 *txc_percpu_run_cnt;
+ /* CPU status */
+ /* Number of times each CPU came online */
+ uint32 *cpu_online_cnt;
+ /* Number of times each CPU went offline */
+ uint32 *cpu_offline_cnt;
+
+ /* Number of times TX processing run on each core */
+ uint32 *txp_percpu_run_cnt;
+ /* Number of times TX start run on each core */
+ uint32 *tx_start_percpu_run_cnt;
+
+ /* Tx load balancing */
+
+ /* TODO: Need to see if batch processing is really required in case of TX
+ * processing. In case of RX the Dongle can send a bunch of rx completions,
+ * hence we took a 3 queue approach
+ * enque - adds the skbs to rx_pend_queue
+ * dispatch - uses a lock and adds the list of skbs from pend queue to
+ * napi queue
+ * napi processing - copies the pend_queue into a local queue and works
+ * on it.
+ * But for TX its going to be 1 skb at a time, so we are just thinking
+ * of using only one queue and use the lock supported skb queue functions
+ * to add and process it. If its in-efficient we'll re-visit the queue
+ * design.
+ */
+
+ /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
+ /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
+ /*
+ * From the Tasklet that actually sends out data
+ * copy the list tx_pend_queue into tx_active_queue. There by we need
+ * to spinlock to only perform the copy the rest of the code ie to
+ * construct the tx_pend_queue and the code to process tx_active_queue
+ * can be lockless. The concept is borrowed as is from RX processing
+ */
+ /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
+
+ /* Control TXP in runtime, enable by default */
+ atomic_t lb_txp_active;
+
+ /* Control RXP in runtime, enable by default */
+ atomic_t lb_rxp_active;
+
+ /*
+ * When the NET_TX tries to send a TX packet put it into tx_pend_queue
+ * For now, the processing tasklet will also direcly operate on this
+ * queue
+ */
+ struct sk_buff_head tx_pend_queue ____cacheline_aligned;
+
+ /* cpu on which the DHD Tx is happenning */
+ atomic_t tx_cpu;
+
+ /* CPU on which the Network stack is calling the DHD's xmit function */
+ atomic_t net_tx_cpu;
+
+ /* Tasklet context from which the DHD's TX processing happens */
+ struct tasklet_struct tx_tasklet;
+
+ /*
+ * Consumer Histogram - NAPI RX Packet processing
+ * -----------------------------------------------
+ * On Each CPU, when the NAPI RX Packet processing call back was invoked
+ * how many packets were processed is captured in this data structure.
+ * Now its difficult to capture the "exact" number of packets processed.
+ * So considering the packet counter to be a 32 bit one, we have a
+ * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
+ * processed is rounded off to the next power of 2 and put in the
+ * approriate "bin" the value in the bin gets incremented.
+ * For example, assume that in CPU 1 if NAPI Rx runs 3 times
+ * and the packet count processed is as follows (assume the bin counters are 0)
+ * iteration 1 - 10 (the bin counter 2^4 increments to 1)
+ * iteration 2 - 30 (the bin counter 2^5 increments to 1)
+ * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
+ */
+ uint32 *napi_rx_hist[HIST_BIN_SIZE];
+ uint32 *txc_hist[HIST_BIN_SIZE];
+ uint32 *rxc_hist[HIST_BIN_SIZE];
+ struct kobject dhd_lb_kobj;
+ bool dhd_lb_candidacy_override;
+#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ struct work_struct axi_error_dispatcher_work;
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#ifdef SHOW_LOGTRACE
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ tsk_ctl_t thr_logtrace_ctl;
+#else
+ struct delayed_work event_log_dispatcher_work;
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BTLOG
+ struct work_struct bt_log_dispatcher_work;
+#endif /* SHOW_LOGTRACE */
+#ifdef EWP_EDL
+ struct delayed_work edl_dispatcher_work;
+#endif
+#if defined(WLAN_ACCEL_BOOT)
+ int fs_check_retry;
+ struct delayed_work wl_accel_work;
+ bool wl_accel_force_reg_on;
+ bool wl_accel_boot_on_done;
+#endif
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#if defined(BCMDBUS)
+ struct task_struct *fw_download_task;
+ struct semaphore fw_download_lock;
+#endif /* BCMDBUS */
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+ struct kobject dhd_kobj;
+ timer_list_compat_t timesync_timer;
+#if defined(BT_OVER_SDIO)
+ char btfw_path[PATH_MAX];
+#endif /* defined (BT_OVER_SDIO) */
+#ifdef WL_MONITOR
+ struct net_device *monitor_dev; /* monitor pseudo device */
+ struct sk_buff *monitor_skb;
+ uint monitor_len;
+ uint monitor_type; /* monitor pseudo device */
+#ifdef HOST_RADIOTAP_CONV
+ monitor_info_t *monitor_info;
+ uint host_radiotap_conv;
+#endif /* HOST_RADIOTAP_CONV */
+#endif /* WL_MONITOR */
+#if defined (BT_OVER_SDIO)
+ struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
+ int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
+#endif /* BT_OVER_SDIO */
+#ifdef SHOW_LOGTRACE
+ struct sk_buff_head evt_trace_queue ____cacheline_aligned;
+#endif
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ struct workqueue_struct *tx_wq;
+ struct workqueue_struct *rx_wq;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef BTLOG
+ struct sk_buff_head bt_log_queue ____cacheline_aligned;
+#endif /* BTLOG */
+#ifdef PCIE_INB_DW
+ wait_queue_head_t ds_exit_wait;
+#endif /* PCIE_INB_DW */
+#ifdef DHD_DEBUG_UART
+ bool duart_execute;
+#endif /* DHD_DEBUG_UART */
+#ifdef BT_OVER_PCIE
+ struct mutex quiesce_flr_lock;
+ struct mutex quiesce_lock;
+ enum dhd_bus_quiesce_state dhd_quiesce_state;
+#endif /* BT_OVER_PCIE */
+ struct mutex logdump_lock;
+#if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
+ /* Root directory for GDB Proxy's (proc)fs files, used by first (default) interface */
+ struct proc_dir_entry *gdb_proxy_fs_root;
+ /* Name of procfs root directory */
+ char gdb_proxy_fs_root_name[100];
+#endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
+#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
+ uint64 pktcnt_qac_histo[MQ_MAX_QUEUES][AC_COUNT];
+ uint64 pktcnt_per_ac[AC_COUNT];
+ uint64 cpu_qstats[MQ_MAX_QUEUES][MQ_MAX_CPUS];
+#endif /* DHD_MQ && DHD_MQ_STATS */
+ /* indicates mem_dump was scheduled as work queue or called directly */
+ bool scheduled_memdump;
+#ifdef DHD_PKTTS
+ bool latency; /* pktts enab flag */
+ pktts_flow_t config[PKTTS_CONFIG_MAX]; /* pktts user config */
+#endif /* DHD_PKTTS */
+ struct work_struct dhd_hang_process_work;
+#ifdef DHD_HP2P
+ spinlock_t hp2p_lock;
+#endif /* DHD_HP2P */
+#ifdef DHD_QOS_ON_SOCK_FLOW
+ struct dhd_sock_qos_info *psk_qos;
+#endif
+} dhd_info_t;
+
+#ifdef WL_MONITOR
+#define MONPKT_EXTRA_LEN 48u
+#endif /* WL_MONITOR */
+
+extern int dhd_sysfs_init(dhd_info_t *dhd);
+extern void dhd_sysfs_exit(dhd_info_t *dhd);
+extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
+extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
+
+int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
+
+void dhd_dpc_tasklet_dispatcher_work(struct work_struct * work);
+#if defined(DHD_LB)
+#if defined(DHD_LB_TXP)
+int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
+void dhd_tx_dispatcher_work(struct work_struct * work);
+void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
+void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
+void dhd_lb_tx_handler(unsigned long data);
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+int dhd_napi_poll(struct napi_struct *napi, int budget);
+void dhd_rx_napi_dispatcher_work(struct work_struct * work);
+void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
+void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
+#endif /* DHD_LB_RXP */
+
+void dhd_lb_set_default_cpus(dhd_info_t *dhd);
+void dhd_cpumasks_deinit(dhd_info_t *dhd);
+int dhd_cpumasks_init(dhd_info_t *dhd);
+
+void dhd_select_cpu_candidacy(dhd_info_t *dhd);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int dhd_cpu_startup_callback(unsigned int cpu);
+int dhd_cpu_teardown_callback(unsigned int cpu);
+#else
+int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+
+int dhd_register_cpuhp_callback(dhd_info_t *dhd);
+int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
+#endif /* DHD_LB */
+
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+#ifdef DHD_SSSR_DUMP
+extern uint sssr_enab;
+extern uint fis_enab;
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef CONFIG_HAS_WAKELOCK
+enum {
+ WAKE_LOCK_SUSPEND, /* Prevent suspend */
+ WAKE_LOCK_TYPE_COUNT
+};
+#define dhd_wake_lock_init(wakeup_source, type, name) wakeup_source_add(wakeup_source)
+#define dhd_wake_lock_destroy(wakeup_source) wakeup_source_remove(wakeup_source)
+#define dhd_wake_lock(wakeup_source) __pm_stay_awake(wakeup_source)
+#define dhd_wake_unlock(wakeup_source) __pm_relax(wakeup_source)
+#define dhd_wake_lock_active(wakeup_source) ((wakeup_source)->active)
+#define dhd_wake_lock_timeout(wakeup_source, timeout) \
+ __pm_wakeup_event(wakeup_source, jiffies_to_msecs(timeout))
+#endif /* CONFIG_HAS_WAKELOCK */
+
+#endif /* __DHD_LINUX_PRIV_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_sched.c b/bcmdhd.101.10.361.x/dhd_linux_sched.c
new file mode 100755
index 0000000..3d6786a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_sched.c
@@ -0,0 +1,47 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+ int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
+ sched_set_fifo_low(p);
+#else
+ rc = sched_setscheduler(p, policy, param);
+#endif
+ return rc;
+}
+
+int get_scheduler_policy(struct task_struct *p)
+{
+ int rc = SCHED_NORMAL;
+ rc = p->policy;
+ return rc;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_linux_sock_qos.c b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.c
new file mode 100755
index 0000000..3348d89
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.c
@@ -0,0 +1,1034 @@
+/*
+ * Source file for DHD QOS on Socket Flow.
+ *
+ * Defines a socket flow and maintains a table of socket flows
+ * for further analysis in order to upgrade the QOS of the flow.
+
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ */
+
+#include <dhd_linux_priv.h>
+#include <dhd_dbg.h>
+#include <bcmstdlib_s.h>
+#include <bcmendian.h>
+#include <dhd_linux_sock_qos.h>
+#include <dhd_qos_algo.h>
+#include <dhd.h>
+
+#include <net/sock.h>
+#include <linux/sock_diag.h>
+#include <linux/netlink.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+#include <linux/pkt_sched.h>
+#include <linux_pkt.h>
+#include <net/tcp.h>
+
+/* Maximum number of Socket Flows supported */
+#define MAX_SOCK_FLOW (1024UL)
+
+#define SOCK_FLOW_UPGRADE_THRESHOLD (3)
+/*
+ * Mark a Socket Flow as inactive and free the resources
+ * if there is no packet receied for SOCK_IDLE_THREASHOLD_MS
+ * of time. Note that this parameter is in milli seconds.
+ */
+#define SOCK_IDLE_THRESHOLD_MS (2000UL)
+
+#define DSCP_TOS_CS7 0XE0u
+
+extern uint dhd_watchdog_ms;
+
+/* Defines Socket Flow */
+struct dhd_sock_flow_info
+{
+ /* Unique identifiers */
+ struct sock *sk;
+ unsigned long ino;
+
+ /* statistics */
+ qos_stat_t stats;
+ u64 last_pkt_ns;
+ kuid_t uid;
+
+ /* Elements related to upgrade management */
+
+ /* 0 - No upgrade
+ * 1 - Upgrade
+ */
+ unsigned int cur_up_state;
+ unsigned int rcm_up_state;
+ unsigned int bus_flow_id;
+
+ /* TODO:
+ * Handling Out Of Order during upgrade
+ * Once an upgrade is decided we cannot handover the skb to
+ * FW in the upgraded Flow Ring ... it will create Out of Order Packets.
+ * Instead we can have a output_q per socket flow. Once the upgrade is
+ * decided, we can start adding skbs to the output_q. The last 'skb' given
+ * to the actual Flow ring should be remembered in 'last_skb_orig_fl'.
+ * Once we get a Tx completion for last_skb_orig_fl we can flush the
+ * contents of output_q to the 'upgraded flowring'. In this solution,
+ * we should also handle the case where output_q hits the watermark
+ * before the completion for 'last_skb_orig_fl' is received. If this condition
+ * happens, not to worry about OOO and flush the contents of output_q.
+ * Probably the last_skb_orig_fl is not sent out due latency in the
+ * existing flow ... the actual problem we are trying to solve.
+ */
+
+ /* Management elements */
+ struct list_head list;
+ unsigned int in_use;
+};
+
+typedef enum _frameburst_state
+{
+ FRMBRST_DISABLED = 0,
+ FRMBRST_ENABLED = 1
+} frameburst_state_t;
+
+/* Sock QOS Module Structure */
+typedef struct dhd_sock_qos_info
+{
+ /* Table of Socket Flows */
+ struct dhd_sock_flow_info *sk_fl;
+ /* maximum number for socket flows supported */
+ uint32 max_sock_fl;
+
+ /* TODO: need to make it per flow later on */
+ /* global qos algo parameters */
+ qos_algo_params_t qos_params;
+ /* List in which active Socket Flows live */
+ struct list_head sk_fl_list_head;
+ void *list_lock;
+
+ /* Time interval a socket flow resource is moved out of the active list */
+ uint32 sock_idle_thresh;
+ /*
+ * Keep track of number of flows upgraded.
+ * If it reaches a threshold we should stop ugrading
+ * This is to avoid the problem where we overwhelm
+ * the Dongle with upgraded traffic.
+ */
+ int num_skfl_upgraded;
+ int skfl_upgrade_thresh;
+
+ /* flag that is set to true when the first flow is upgraded
+ * so that FW frameburst is disabled, and set to false
+ * when no more flows are in upgraded state, so that
+ * FW frameburst is re-enabled
+ */
+ bool upgrade_active;
+ /* fw frameburst state */
+ frameburst_state_t frmbrst_state;
+
+ atomic_t on_off;
+ atomic_t force_upgrade;
+
+ /* required for enabling/disabling watchdog timer at runtime */
+ uint watchdog_ms;
+} dhd_sock_qos_info_t;
+
+#define SK_FL_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define SK_FL_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+int
+dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms)
+{
+ unsigned long sz;
+ unsigned int i;
+ struct dhd_sock_flow_info *sk_fl = NULL;
+ int val = 0, ret = 0;
+
+ if (dhd == NULL)
+ return BCME_BADARG;
+
+ dhd->psk_qos = MALLOCZ(dhd->pub.osh, sizeof(dhd_sock_qos_info_t));
+ if (dhd->psk_qos == NULL) {
+ DHD_ERROR(("%s(): Failed to alloc psk_qos ! \n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ dhd->psk_qos->max_sock_fl = MAX_SOCK_FLOW;
+ sz = sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW;
+ dhd->psk_qos->sk_fl = MALLOCZ(dhd->pub.osh, sz);
+ if (dhd->psk_qos->sk_fl == NULL) {
+ DHD_ERROR(("%s(): Failed to allocated sk_fl \r\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ sk_fl = dhd->psk_qos->sk_fl;
+ for (i = 0; i < MAX_SOCK_FLOW; i++, sk_fl++) {
+ sk_fl->in_use = 0;
+ }
+
+ dhd->psk_qos->sock_idle_thresh = SOCK_IDLE_THRESHOLD_MS;
+
+ dhd->psk_qos->skfl_upgrade_thresh = SOCK_FLOW_UPGRADE_THRESHOLD;
+
+ INIT_LIST_HEAD(&dhd->psk_qos->sk_fl_list_head);
+ dhd->psk_qos->list_lock = osl_spin_lock_init(dhd->pub.osh);
+
+ dhd->psk_qos->watchdog_ms = watchdog_ms;
+ /* feature is DISABLED by default */
+ dhd_sock_qos_set_status(dhd, 0);
+
+ qos_algo_params_init(&dhd->psk_qos->qos_params);
+
+ dhd->psk_qos->frmbrst_state = FRMBRST_ENABLED;
+ /* read the initial state of frameburst from FW, cannot
+ * assume that it will always be in enabled state by default.
+ * We will cache the FW frameburst state in host and change
+ * it everytime we change it from host during QoS upgrade.
+ * This decision is taken, because firing an iovar everytime
+ * to query FW frameburst state before deciding whether to
+ * changing the frameburst state or not from host, is sub-optimal,
+ * especially in the Tx path.
+ */
+ ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val,
+ sizeof(val), FALSE, 0);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: get fw frameburst failed,"
+ " err=%d\n", __FUNCTION__, ret));
+ } else {
+ DHD_INFO(("%s:fw frameburst = %d", __FUNCTION__, val));
+ dhd->psk_qos->frmbrst_state =
+ (val == 1) ? FRMBRST_ENABLED : FRMBRST_DISABLED;
+ }
+ return BCME_OK;
+}
+
+int
+dhd_deinit_sock_flows_buf(dhd_info_t *dhd)
+{
+ if (dhd == NULL)
+ return BCME_BADARG;
+
+ if (dhd->psk_qos->sk_fl) {
+ MFREE(dhd->pub.osh, dhd->psk_qos->sk_fl,
+ sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW);
+ dhd->psk_qos->sk_fl = NULL;
+ }
+
+ osl_spin_lock_deinit(dhd->pub.osh, dhd->psk_qos->list_lock);
+ MFREE(dhd->pub.osh, dhd->psk_qos, sizeof(dhd_sock_qos_info_t));
+ dhd->psk_qos = NULL;
+
+ return BCME_OK;
+}
+
+/* Caller should hold list_lock */
+static inline struct dhd_sock_flow_info *
+__dhd_find_sock_stream_info(dhd_sock_qos_info_t *psk_qos, unsigned long ino)
+{
+ struct dhd_sock_flow_info *sk_fl = NULL;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head,
+ list) {
+ if (sk_fl && (sk_fl->ino == ino)) {
+ return sk_fl;
+ }
+ } /* end of list iteration */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ /* If control comes here, the ino is not found */
+ DHD_INFO(("%s(): ino:%lu not found \r\n", __FUNCTION__, ino));
+
+ return NULL;
+}
+
+static struct dhd_sock_flow_info *
+dhd_alloc_sock_stream_info(dhd_sock_qos_info_t *psk_qos)
+{
+ struct dhd_sock_flow_info *sk_fl = psk_qos->sk_fl;
+ int i;
+
+ for (i = 0; i < psk_qos->max_sock_fl; i++, sk_fl++) {
+ if (sk_fl->in_use == 0) {
+ DHD_ERROR(("%s: Use sk_fl %p \r\n", __FUNCTION__, sk_fl));
+ return sk_fl;
+ }
+ }
+ DHD_INFO(("No Free Socket Stream info \r\n"));
+ return NULL;
+}
+
+/* Caller should hold list_lock */
+static inline void
+__dhd_free_sock_stream_info(dhd_sock_qos_info_t *psk_qos,
+ struct dhd_sock_flow_info *sk_fl)
+{
+ /*
+ * If the socket flow getting freed is an upgraded socket flow,
+ * we can upgrade one more flow.
+ */
+ if (sk_fl->cur_up_state == 1) {
+ --psk_qos->num_skfl_upgraded;
+ ASSERT(psk_qos->num_skfl_upgraded >= 0);
+ }
+
+ /* Remove the flow from active list */
+ list_del(&sk_fl->list);
+
+ DHD_ERROR(("%s(): Cleaning Socket Flow ino:%lu psk_qos->num_skfl_upgraded=%d\r\n",
+ __FUNCTION__, sk_fl->ino, psk_qos->num_skfl_upgraded));
+
+ /* Clear its content */
+ memset_s(sk_fl, sizeof(*sk_fl), 0, sizeof(*sk_fl));
+
+ return;
+}
+
+static void
+dhd_clean_idle_sock_streams(dhd_sock_qos_info_t *psk_qos)
+{
+ struct dhd_sock_flow_info *sk_fl = NULL, *next = NULL;
+ u64 now;
+ u64 diff;
+ unsigned long flags = 0;
+ now = local_clock();
+
+ SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry_safe(sk_fl, next, &psk_qos->sk_fl_list_head, list) {
+ if (sk_fl) {
+
+ if (sk_fl->in_use == 0) {
+ DHD_ERROR_RLMT(("%s:Something wrong,"
+ " a free sk_fl living in active stream\n",
+ __FUNCTION__));
+ DHD_ERROR_RLMT(("sk_fl:%p sk:%p ino:%lu \r\n",
+ sk_fl, sk_fl->sk, sk_fl->ino));
+ continue;
+ }
+
+ /* XXX: TODO: need to investigate properly in future.
+ * it is observed that in some hosts (FC25), the
+ * current timestamp is lesser than previous timestamp
+ * leading to false cleanups
+ */
+ if (now <= sk_fl->last_pkt_ns)
+ continue;
+
+ diff = now - sk_fl->last_pkt_ns;
+
+ /* Convert diff which is in ns to ms */
+ diff = div64_u64(diff, 1000000UL);
+ if (diff >= psk_qos->sock_idle_thresh) {
+ DHD_ERROR(("sk_fl->sk:%p sk_fl->i_no:%lu \r\n",
+ sk_fl->sk, sk_fl->ino));
+ if (sk_fl->cur_up_state == 1 &&
+ psk_qos->num_skfl_upgraded == 1) {
+ psk_qos->upgrade_active = FALSE;
+ }
+ __dhd_free_sock_stream_info(psk_qos, sk_fl);
+ }
+ }
+ } /* end of list iteration */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
+
+}
+
+static inline int
+__dhd_upgrade_sock_flow(dhd_info_t *dhd,
+ struct dhd_sock_flow_info *sk_fl,
+ struct sk_buff *skb)
+{
+ dhd_sock_qos_info_t *psk_qos = dhd->psk_qos;
+#ifdef DHD_HP2P
+ dhd_pub_t *dhdp = &dhd->pub;
+#endif
+ uint8 *pktdat = NULL;
+ struct ether_header *eh = NULL;
+ struct iphdr *iph = NULL;
+
+ /* Before upgrading a flow,
+ * Check the bound to control the number of flows getting upgraded
+ */
+ if ((sk_fl->rcm_up_state == 1) && (sk_fl->cur_up_state == 0)) {
+ if (psk_qos->num_skfl_upgraded >= psk_qos->skfl_upgrade_thresh) {
+ DHD_ERROR_RLMT(("%s(): Thresh hit num_skfl_upgraded:%d"
+ "skfl_upgrade_thresh:%d \r\n",
+ __FUNCTION__, psk_qos->num_skfl_upgraded,
+ psk_qos->skfl_upgrade_thresh));
+ return BCME_ERROR;
+ } else {
+ if (psk_qos->num_skfl_upgraded == 0) {
+ /* if no flows upgraded till now, and this is the
+ * first flow to be upgraded,
+ * then disable frameburst in FW.
+ * The actual iovar to disable frameburst cannot
+ * be fired here because Tx can happen in atomic context
+ * and dhd_iovar can sleep due to proto_block lock being
+ * held. Instead the flag is checked from
+ * 'dhd_analyze_sock_flows' which execs in non-atomic context
+ * and the iovar is fired from there
+ */
+ DHD_TRACE(("%s: disable frameburst ..", __FUNCTION__));
+ psk_qos->upgrade_active = TRUE;
+ }
+ ++psk_qos->num_skfl_upgraded;
+ DHD_ERROR_RLMT(("%s(): upgrade flow sk_fl %p,"
+ "num_skfl_upgraded:%d skfl_upgrade_thresh:%d \r\n",
+ __FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded,
+ psk_qos->skfl_upgrade_thresh));
+ }
+ }
+
+ /* Upgrade the skb */
+#ifdef DHD_HP2P
+ if (dhdp->hp2p_capable)
+ skb->priority = TC_PRIO_CONTROL;
+ else
+ skb->priority = TC_PRIO_INTERACTIVE;
+#else
+ skb->priority = TC_PRIO_INTERACTIVE;
+#endif /* DHD_HP2P */
+
+ pktdat = PKTDATA(dhd->pub.osh, skb);
+ eh = (struct ether_header *) pktdat;
+ if (pktdat && (eh->ether_type == hton16(ETHER_TYPE_IP))) {
+ /* 'upgrade' DSCP also, else it is observed that on
+ * AP side if DSCP value is not in sync with L2 prio
+ * then out of order packets are observed
+ */
+ iph = (struct iphdr *)(pktdat + sizeof(struct ether_header));
+ iph->tos = DSCP_TOS_CS7;
+ /* re-compute ip hdr checksum
+ * NOTE: this takes around 1us, need to profile more
+ * accurately to get the number of cpu cycles it takes
+ * in order to get a better idea of the impact of
+ * re computing ip hdr chksum in data path
+ */
+ ip_send_check(iph);
+ }
+
+ /* Mark the Flow as 'upgraded' */
+ if (sk_fl->cur_up_state == 0)
+ sk_fl->cur_up_state = 1;
+
+ return BCME_OK;
+}
+
+static inline int
+__dhd_downgrade_sock_flow(dhd_info_t *dhd,
+ struct dhd_sock_flow_info *sk_fl,
+ struct sk_buff *skb)
+{
+ dhd_sock_qos_info_t *psk_qos = dhd->psk_qos;
+
+ if ((sk_fl->rcm_up_state == 0) && (sk_fl->cur_up_state == 1)) {
+ /* sanity check */
+ ASSERT(psk_qos->num_skfl_upgraded > 0);
+ if (psk_qos->num_skfl_upgraded <= 0) {
+ DHD_ERROR_RLMT(("%s(): FATAL ! no upgraded flows !\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (psk_qos->num_skfl_upgraded == 1) {
+ /* if this is the
+ * last flow to be downgraded,
+ * then re-enable frameburst in FW.
+ * The actual iovar to enable frameburst cannot
+ * be fired here because Tx can happen in atomic context
+ * and dhd_iovar can sleep due to proto_block lock being
+ * held. Instead the flag is checked from
+ * 'dhd_analyze_sock_flows' which execs in non-atomic context
+ * and the iovar is fired from there
+ */
+ DHD_TRACE(("%s: enable frameburst ..", __FUNCTION__));
+ psk_qos->upgrade_active = FALSE;
+ }
+ --psk_qos->num_skfl_upgraded;
+ DHD_ERROR_RLMT(("%s(): downgrade flow sk_fl %p,"
+ "num_skfl_upgraded:%d \r\n",
+ __FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded));
+ }
+
+ /* Mark the Flow as 'downgraded' */
+ if (sk_fl->cur_up_state == 1)
+ sk_fl->cur_up_state = 0;
+
+ return BCME_OK;
+}
+
+/*
+ * Update the stats of a Socket flow.
+ * Create a new flow if need be.
+ * If a socket flow has been recommended for upgrade, do so.
+ */
+void
+dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb)
+{
+ struct sock *sk = NULL;
+ unsigned long ino = 0;
+ struct dhd_sock_flow_info *sk_fl = NULL;
+ dhd_sock_qos_info_t *psk_qos = NULL;
+ unsigned long flags = 0;
+ uint8 prio;
+
+ BCM_REFERENCE(prio);
+
+ if ((dhd == NULL) || (skb == NULL)) {
+ DHD_ERROR_RLMT(("%s: Invalid args \n", __FUNCTION__));
+ return;
+ }
+
+ /* If the Feature is disabled, return */
+ if (dhd_sock_qos_get_status(dhd) == 0)
+ return;
+
+ psk_qos = dhd->psk_qos;
+ sk = (struct sock *)PKTSOCK(dhd->pub.osh, skb);
+
+ /* TODO:
+ * Some times sk is NULL, what does that mean ...
+ * is it a broadcast packet generated by Network Stack ????
+ */
+ if (sk == NULL) {
+ return;
+ }
+ ino = sock_i_ino(sk);
+
+ /* TODO:
+ * List Lock need not be held for allocating sock stream .. optimize
+ */
+ SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
+
+ sk_fl = __dhd_find_sock_stream_info(psk_qos, ino);
+ if (sk_fl == NULL) {
+ /* Allocate new sock stream */
+ sk_fl = dhd_alloc_sock_stream_info(psk_qos);
+ if (sk_fl == NULL) {
+ SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
+ goto done;
+ }
+ else {
+ /* SK Flow elements updated first time */
+ sk_fl->in_use = 1;
+ sk_fl->sk = sk;
+ sk_fl->ino = ino;
+ /* TODO: Seeing a Kernel Warning ... check */
+ /* sk_fl->uid = sock_i_uid(sk); */
+ sk_fl->cur_up_state = 0;
+ list_add_tail(&sk_fl->list, &psk_qos->sk_fl_list_head);
+ DHD_ERROR(("%s(): skb %p sk %p sk_fl %p ino %lu"
+ " prio 0x%x \r\n", __FUNCTION__, skb,
+ sk, sk_fl, ino, skb->priority));
+ } /* end of new sk flow allocation */
+ } /* end of case when sk flow is found */
+
+ sk_fl->stats.tx_pkts++;
+ sk_fl->stats.tx_bytes += skb->len;
+ sk_fl->last_pkt_ns = local_clock();
+
+ SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
+
+ if (sk_fl->rcm_up_state == 1) {
+ __dhd_upgrade_sock_flow(dhd, sk_fl, skb);
+ } else {
+ __dhd_downgrade_sock_flow(dhd, sk_fl, skb);
+ }
+
+ prio = PKTPRIO(skb);
+ DHD_INFO(("%s(): skb:%p skb->priority 0x%x prio %d sk_fl %p\r\n", __FUNCTION__, skb,
+ skb->priority, prio, sk_fl));
+done:
+ return;
+}
+
+static int
+dhd_change_frameburst_state(frameburst_state_t newstate, dhd_info_t *dhd)
+{
+ int ret = 0, val = 0;
+ dhd_sock_qos_info_t *psk_qos = NULL;
+
+ if (!dhd)
+ return BCME_BADARG;
+ if (!dhd->psk_qos)
+ return BCME_BADARG;
+
+ psk_qos = dhd->psk_qos;
+
+ /* Check with the cached frameburst state on host
+ * instead of querying FW frameburst state.
+ * This decision is taken, because firing an iovar everytime
+ * to query FW frameburst state before deciding whether to
+ * changing the frameburst state or not is sub-optimal,
+ * especially in the Tx path.
+ */
+ if (psk_qos->frmbrst_state == newstate)
+ return BCME_BADOPTION;
+
+ val = (newstate == FRMBRST_ENABLED) ? 1 : 0;
+ ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val,
+ sizeof(val), TRUE, 0);
+ if (ret != BCME_OK) {
+ DHD_ERROR_RLMT(("%s: set frameburst=%d failed,"
+ " err=%d\n", __FUNCTION__, val, ret));
+ } else {
+ /* change the state */
+ DHD_INFO(("%s: set frameburst=%d\n", __FUNCTION__, val));
+ psk_qos->frmbrst_state = newstate;
+ }
+
+ return ret;
+}
+
+void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms)
+{
+ struct dhd_sock_flow_info *sk_fl = NULL;
+ dhd_sock_qos_info_t *psk_qos = NULL;
+ unsigned long flags = 0;
+
+ if (dhd == NULL) {
+ DHD_ERROR_RLMT(("%s: Bad argument \r\n", __FUNCTION__));
+ return;
+ }
+
+ /* Check whether the feature is disabled */
+ if (dhd_sock_qos_get_status(dhd) == 0)
+ return;
+
+ psk_qos = dhd->psk_qos;
+
+ dhd_clean_idle_sock_streams(dhd->psk_qos);
+
+ /* TODO: Plug in the QoS Algorithm here */
+ SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) {
+
+ sk_fl->rcm_up_state = dhd_qos_algo(dhd, &sk_fl->stats, &psk_qos->qos_params);
+
+ /* TODO: Handle downgrades */
+
+ /* update sk_flow previous elements on every sampling interval */
+ sk_fl->stats.tx_pkts_prev = sk_fl->stats.tx_pkts;
+ sk_fl->stats.tx_bytes_prev = sk_fl->stats.tx_bytes;
+
+ /* TODO: Handle the condition where num_skfl_upgraded reaches the threshold */
+
+ /* TODO: Handle the condition where we upgrade all the socket flows
+ * of the uid on which one flow is detected to be upgraded.
+ */
+
+ } /* end of list iteration */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
+
+ /* disable frameburst in FW on the first flow upgraded */
+ if (psk_qos->upgrade_active) {
+ dhd_change_frameburst_state(FRMBRST_DISABLED, dhd);
+ } else {
+ /* if no upgraded flows remain, either after cleanup,
+ * or after a downgrade,
+ * then re-enable frameburst in FW
+ */
+ dhd_change_frameburst_state(FRMBRST_ENABLED, dhd);
+ }
+
+ return;
+}
+
+void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf,
+ uint32 bus_flow_id)
+{
+ BCM_REFERENCE(dhd);
+ BCM_REFERENCE(pktbuf);
+ BCM_REFERENCE(bus_flow_id);
+ return;
+}
+
+/* ================= Sysfs interfce support functions ======================== */
+
+unsigned long dhd_sock_qos_get_status(dhd_info_t *dhd)
+{
+ if (dhd == NULL)
+ return 0;
+
+ return (atomic_read(&dhd->psk_qos->on_off));
+}
+
+void dhd_sock_qos_set_status(dhd_info_t *dhd, unsigned long on_off)
+{
+ if (dhd == NULL)
+ return;
+
+ atomic_set(&dhd->psk_qos->on_off, on_off);
+ if (on_off) {
+ dhd_watchdog_ms = QOS_SAMPLING_INTVL_MS;
+ /* enable watchdog to monitor the socket flows */
+ dhd_os_wd_timer(&dhd->pub, QOS_SAMPLING_INTVL_MS);
+ } else {
+ dhd_watchdog_ms = dhd->psk_qos->watchdog_ms;
+ /* disable watchdog or set it back to the original value */
+ dhd_os_wd_timer(&dhd->pub, dhd->psk_qos->watchdog_ms);
+ }
+ return;
+}
+
+ssize_t dhd_sock_qos_show_stats(dhd_info_t *dhd, char *buf,
+ ssize_t sz)
+{
+ dhd_sock_qos_info_t *psk_qos = NULL;
+ struct dhd_sock_flow_info *sk_fl = NULL;
+ unsigned long flags = 0;
+ ssize_t ret = 0;
+ char *p = buf;
+
+ /* TODO: Should be actual record length */
+ unsigned long rec_len = 100;
+
+ if (dhd == NULL)
+ return -1;
+
+ psk_qos = dhd->psk_qos;
+
+ ret += scnprintf(p, sz-ret-1, "\nino\t sk\t\t\t tx_pkts\t tx_bytes\t"
+ "last_pkt_ns\r\n");
+ p += ret;
+
+ SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) {
+ /* Protect the buffer from over run */
+ if (ret + rec_len >= sz)
+ break;
+
+ ret += scnprintf(p, sz-ret-1, "%lu\t %p\t %lu\t %lu\t %llu\t \r\n",
+ sk_fl->ino, sk_fl->sk, sk_fl->stats.tx_pkts, sk_fl->stats.tx_bytes,
+ sk_fl->last_pkt_ns);
+
+ p += ret;
+
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
+
+ return ret + 1;
+}
+
+void dhd_sock_qos_clear_stats(dhd_info_t *dhd)
+{
+ dhd_sock_qos_info_t *psk_qos = NULL;
+ struct dhd_sock_flow_info *sk_fl = NULL;
+ unsigned long flags = 0;
+
+ if (dhd == NULL)
+ return;
+
+ psk_qos = dhd->psk_qos;
+
+ SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) {
+ sk_fl->stats.tx_pkts = 0;
+ sk_fl->stats.tx_bytes = 0;
+ sk_fl->stats.tx_pkts_prev = 0;
+ sk_fl->stats.tx_bytes_prev = 0;
+ sk_fl->last_pkt_ns = 0;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
+
+ return;
+}
+
+unsigned long dhd_sock_qos_get_force_upgrade(dhd_info_t *dhd)
+{
+ if (dhd == NULL)
+ return 0;
+
+ return (atomic_read(&dhd->psk_qos->force_upgrade));
+}
+
+void dhd_sock_qos_set_force_upgrade(dhd_info_t *dhd, unsigned long force_upgrade)
+{
+ if (dhd == NULL)
+ return;
+
+ atomic_set(&dhd->psk_qos->force_upgrade, force_upgrade);
+ return;
+}
+
+int dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t *dhd)
+{
+ if (dhd == NULL)
+ return 0;
+
+ return dhd->psk_qos->skfl_upgrade_thresh;
+}
+
+void dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t *dhd,
+ int upgrade_thresh)
+{
+ if (dhd == NULL)
+ return;
+
+ dhd->psk_qos->skfl_upgrade_thresh = upgrade_thresh;
+ return;
+}
+
+void dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t *dhd,
+ unsigned long *avgpktsize_low,
+ unsigned long *avgpktsize_high)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL || avgpktsize_low == NULL ||
+ avgpktsize_high == NULL) {
+ return;
+ }
+
+ pqos_params = QOS_PARAMS(dhd);
+ *avgpktsize_low = pqos_params->avg_pkt_size_low_thresh;
+ *avgpktsize_high = pqos_params->avg_pkt_size_high_thresh;
+ return;
+}
+
+void dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t *dhd,
+ unsigned long avgpktsize_low,
+ unsigned long avgpktsize_high)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL)
+ return;
+
+ pqos_params = QOS_PARAMS(dhd);
+ pqos_params->avg_pkt_size_low_thresh = avgpktsize_low;
+ pqos_params->avg_pkt_size_high_thresh = avgpktsize_high;
+ return;
+}
+
+void dhd_sock_qos_get_numpkts_thresh(dhd_info_t *dhd,
+ unsigned long *numpkts_low,
+ unsigned long *numpkts_high)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL || numpkts_low == NULL ||
+ numpkts_high == NULL) {
+ return;
+ }
+
+ pqos_params = QOS_PARAMS(dhd);
+ *numpkts_low = pqos_params->num_pkts_low_thresh;
+ *numpkts_high = pqos_params->num_pkts_high_thresh;
+}
+
+void dhd_sock_qos_set_numpkts_thresh(dhd_info_t *dhd,
+ unsigned long numpkts_low,
+ unsigned long numpkts_high)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL)
+ return;
+ pqos_params = QOS_PARAMS(dhd);
+ pqos_params->num_pkts_low_thresh = numpkts_low;
+ pqos_params->num_pkts_high_thresh = numpkts_high;
+ return;
+}
+
+void dhd_sock_qos_get_detectcnt_thresh(dhd_info_t *dhd,
+ unsigned char *detectcnt_inc,
+ unsigned char *detectcnt_dec)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL || detectcnt_inc == NULL ||
+ detectcnt_dec == NULL) {
+ return;
+ }
+
+ pqos_params = QOS_PARAMS(dhd);
+ *detectcnt_inc = pqos_params->detect_cnt_inc_thresh;
+ *detectcnt_dec = pqos_params->detect_cnt_dec_thresh;
+}
+
+void dhd_sock_qos_set_detectcnt_thresh(dhd_info_t *dhd,
+ unsigned char detectcnt_inc,
+ unsigned char detectcnt_dec)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL)
+ return;
+
+ pqos_params = QOS_PARAMS(dhd);
+ pqos_params->detect_cnt_inc_thresh = detectcnt_inc;
+ pqos_params->detect_cnt_dec_thresh = detectcnt_dec;
+ return;
+}
+
+int dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t *dhd)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL)
+ return 0;
+
+ pqos_params = QOS_PARAMS(dhd);
+ return pqos_params->detect_cnt_upgrade_thresh;
+}
+
+void dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t *dhd,
+ unsigned char detect_upgrd_thresh)
+{
+ qos_algo_params_t *pqos_params = NULL;
+
+ if (dhd == NULL)
+ return;
+
+ pqos_params = QOS_PARAMS(dhd);
+ pqos_params->detect_cnt_upgrade_thresh = detect_upgrd_thresh;
+}
+
+int dhd_sock_qos_get_maxfl(dhd_info_t *dhd)
+{
+ if (dhd == NULL)
+ return 0;
+
+ return dhd->psk_qos->max_sock_fl;
+}
+
+void dhd_sock_qos_set_maxfl(dhd_info_t *dhd,
+ unsigned int maxfl)
+{
+ if (dhd == NULL)
+ return;
+
+ dhd->psk_qos->max_sock_fl = maxfl;
+}
+/* ================= End of Sysfs interfce support functions ======================== */
+
+/* ================= QOS Algorithm ======================== */
+
+/*
+ * Operates on a flow and returns 1 for upgrade and 0 for
+ * no up-grade - Has the potential of moving into a separate file
+ * Takes the dhd pointer too in case if it has to access any platform
+ * functions like MALLOC that takes dhd->pub.osh as argument.
+ */
+int dhd_qos_algo(dhd_info_t *dhd, qos_stat_t *qos, qos_algo_params_t *pqos_params)
+{
+ unsigned long tx_bytes, tx_pkts, tx_avg_pkt_size;
+
+ if (!dhd || !qos || !pqos_params) {
+ return 0;
+ }
+
+ /* if the user has set the sysfs variable to force upgrade */
+ if (atomic_read(&dhd->psk_qos->force_upgrade) == 1) {
+ return 1;
+ }
+
+ DHD_TRACE(("%s(): avgpktsize_thrsh %lu:%lu; "
+ "numpkts_thrs %lu:%lu; detectcnt_thrs %d:%d;"
+ " detectcnt_upgrd_thrs %d\n", __FUNCTION__,
+ pqos_params->avg_pkt_size_low_thresh,
+ pqos_params->avg_pkt_size_high_thresh,
+ pqos_params->num_pkts_low_thresh,
+ pqos_params->num_pkts_high_thresh,
+ pqos_params->detect_cnt_inc_thresh,
+ pqos_params->detect_cnt_dec_thresh,
+ pqos_params->detect_cnt_upgrade_thresh));
+
+ tx_bytes = qos->tx_bytes - qos->tx_bytes_prev;
+ tx_pkts = qos->tx_pkts - qos->tx_pkts_prev;
+ if ((tx_bytes == 0) || (tx_pkts == 0)) {
+ return 0;
+ }
+
+ tx_avg_pkt_size = tx_bytes / tx_pkts;
+
+ if ((tx_avg_pkt_size > pqos_params->avg_pkt_size_low_thresh) &&
+ (tx_avg_pkt_size < pqos_params->avg_pkt_size_high_thresh) &&
+ (tx_pkts > pqos_params->num_pkts_low_thresh) &&
+ (tx_pkts < pqos_params->num_pkts_high_thresh)) {
+ if (qos->lowlat_detect_count < pqos_params->detect_cnt_inc_thresh) {
+ qos->lowlat_detect_count++;
+ }
+ } else if (qos->lowlat_detect_count > pqos_params->detect_cnt_dec_thresh) {
+ qos->lowlat_detect_count--;
+ }
+
+ if (qos->lowlat_detect_count > pqos_params->detect_cnt_upgrade_thresh) {
+ qos->lowlat_flow = TRUE;
+ } else if (qos->lowlat_detect_count == 0) {
+ qos->lowlat_flow = FALSE;
+ }
+
+ DHD_TRACE(("%s(): TX:%lu:%lu:%lu, PUBG:%d::%d\n",
+ __FUNCTION__, tx_avg_pkt_size, tx_bytes, tx_pkts,
+ qos->lowlat_detect_count, qos->lowlat_flow));
+
+ return (qos->lowlat_flow == TRUE) ? 1 : 0;
+}
+
+int qos_algo_params_init(qos_algo_params_t *pqos_params)
+{
+ if (!pqos_params)
+ return BCME_BADARG;
+
+ memset(pqos_params, 0, sizeof(*pqos_params));
+ pqos_params->avg_pkt_size_low_thresh = LOWLAT_AVG_PKT_SIZE_LOW;
+ pqos_params->avg_pkt_size_high_thresh = LOWLAT_AVG_PKT_SIZE_HIGH;
+ pqos_params->num_pkts_low_thresh = LOWLAT_NUM_PKTS_LOW;
+ pqos_params->num_pkts_high_thresh = LOWLAT_NUM_PKTS_HIGH;
+ pqos_params->detect_cnt_inc_thresh = LOWLAT_DETECT_CNT_INC_THRESH;
+ pqos_params->detect_cnt_dec_thresh = LOWLAT_DETECT_CNT_DEC_THRESH;
+ pqos_params->detect_cnt_upgrade_thresh = LOWLAT_DETECT_CNT_UPGRADE_THRESH;
+
+ return BCME_OK;
+}
+/* ================= End of QOS Algorithm ======================== */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_sock_qos.h b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.h
new file mode 100755
index 0000000..641469d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_sock_qos.h
@@ -0,0 +1,118 @@
+/*
+ * Header file for DHD TPA (Traffic Pattern Analyzer)
+ *
+ * Provides type definitions and function prototypes to call into
+ * DHD's QOS on Socket Flow module.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ */
+
+#ifndef _DHD_LINUX_TPA_H_
+#define _DHD_LINUX_TPA_H_
+
+struct dhd_sock_flow_info;
+
+#if defined(DHD_QOS_ON_SOCK_FLOW)
+#define QOS_SAMPLING_INTVL_MS 100
+/* Feature Enabed original implementation */
+int dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms);
+int dhd_deinit_sock_flows_buf(dhd_info_t *dhd);
+void dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb);
+void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms);
+
+/* sysfs call backs */
+unsigned long dhd_sock_qos_get_status(dhd_info_t *dhd);
+void dhd_sock_qos_set_status(dhd_info_t *dhd, unsigned long on_off);
+ssize_t dhd_sock_qos_show_stats(dhd_info_t *dhd, char *buf, ssize_t sz);
+void dhd_sock_qos_clear_stats(dhd_info_t *dhd);
+unsigned long dhd_sock_qos_get_force_upgrade(dhd_info_t *dhd);
+void dhd_sock_qos_set_force_upgrade(dhd_info_t *dhd, unsigned long force_upgrade);
+int dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t *dhd);
+void dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t *dhd, int upgrade_thresh);
+void dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t *dhd,
+ unsigned long *avgpktsize_low,
+ unsigned long *avgpktsize_high);
+void dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t *dhd,
+ unsigned long avgpktsize_low,
+ unsigned long avgpktsize_high);
+void dhd_sock_qos_get_numpkts_thresh(dhd_info_t *dhd,
+ unsigned long *numpkts_low,
+ unsigned long *numpkts_high);
+void dhd_sock_qos_set_numpkts_thresh(dhd_info_t *dhd,
+ unsigned long numpkts_low,
+ unsigned long numpkts_high);
+void dhd_sock_qos_get_detectcnt_thresh(dhd_info_t *dhd,
+ unsigned char *detectcnt_inc,
+ unsigned char *detectcnt_dec);
+void dhd_sock_qos_set_detectcnt_thresh(dhd_info_t *dhd,
+ unsigned char detectcnt_inc,
+ unsigned char detectcnt_dec);
+int dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t *dhd);
+void dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t *dhd,
+ unsigned char detect_upgrd_thresh);
+int dhd_sock_qos_get_maxfl(dhd_info_t *dhd);
+void dhd_sock_qos_set_maxfl(dhd_info_t *dhd, unsigned int maxfl);
+
+/* Update from Bus Layer */
+void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf,
+ uint32 bus_flow_id);
+
+#else
+/* Feature Disabled dummy implementations */
+
+inline int dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms)
+{
+ BCM_REFERENCE(dhd);
+ return BCME_UNSUPPORTED;
+}
+
+inline int dhd_deinit_sock_flows_buf(dhd_info_t *dhd)
+{
+ BCM_REFERENCE(dhd);
+ return BCME_UNSUPPORTED;
+}
+
+inline void dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb)
+{
+ BCM_REFERENCE(dhd);
+ BCM_REFERENCE(skb);
+ return;
+}
+
+inline void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms)
+{
+ BCM_REFERENCE(dhd);
+ BCM_REFERENCE(dhd_watchdog_ms);
+ return;
+}
+
+inline void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf,
+ uint32 bus_flow_id)
+{
+ BCM_REFERENCE(dhd);
+ BCM_REFERENCE(pktbuf);
+ BCM_REFERENCE(bus_flow_id);
+}
+#endif /* End of !DHD_QOS_ON_SOCK_FLOW */
+
+#endif /* _DHD_LINUX_TPA_H_ */
diff --git a/bcmdhd.101.10.361.x/dhd_linux_wq.c b/bcmdhd.101.10.361.x/dhd_linux_wq.c
new file mode 100755
index 0000000..4bfe72b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_wq.c
@@ -0,0 +1,413 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/kfifo.h>
+
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_wq.h>
+
+/*
+ * XXX: always make sure that the size of this structure is aligned to
+ * the power of 2 (2^n) i.e, if any new variable has to be added then
+ * modify the padding accordingly
+ */
+typedef struct dhd_deferred_event {
+ u8 event; /* holds the event */
+ void *event_data; /* holds event specific data */
+ event_handler_t event_handler;
+ unsigned long pad; /* for memory alignment to power of 2 */
+} dhd_deferred_event_t;
+
+#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
+
+/*
+ * work events may occur simultaneously.
+ * can hold upto 64 low priority events and 16 high priority events
+ */
+#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
+#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32))
+#define kfifo_avail(fifo) (fifo->size - kfifo_len(fifo))
+#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) */
+
+#define DHD_FIFO_HAS_FREE_SPACE(fifo) \
+ ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
+#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
+ ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
+
+struct dhd_deferred_wq {
+ struct work_struct deferred_work; /* should be the first member */
+
+ struct kfifo *prio_fifo;
+ struct kfifo *work_fifo;
+ u8 *prio_fifo_buf;
+ u8 *work_fifo_buf;
+ spinlock_t work_lock;
+ void *dhd_info; /* review: does it require */
+ u32 event_skip_mask;
+};
+
+static inline struct kfifo*
+dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
+{
+ struct kfifo *fifo;
+ gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+ fifo = kfifo_init(buf, size, flags, lock);
+#else
+ fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
+ if (!fifo) {
+ return NULL;
+ }
+ kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+ return fifo;
+}
+
+static inline void
+dhd_kfifo_free(struct kfifo *fifo)
+{
+ kfifo_free(fifo);
+ kfree(fifo);
+}
+
+/* deferred work functions */
+static void dhd_deferred_work_handler(struct work_struct *data);
+
+void*
+dhd_deferred_work_init(void *dhd_info)
+{
+ struct dhd_deferred_wq *work = NULL;
+ u8* buf;
+ unsigned long fifo_size = 0;
+ gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+ if (!dhd_info) {
+ DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
+ goto return_null;
+ }
+
+ work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
+ flags);
+ if (!work) {
+ DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
+ goto return_null;
+ }
+
+ INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
+
+ /* initialize event fifo */
+ spin_lock_init(&work->work_lock);
+
+ /* allocate buffer to hold prio events */
+ fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
+ fifo_size = is_power_of_2(fifo_size) ? fifo_size :
+ roundup_pow_of_two(fifo_size);
+ buf = (u8*)kzalloc(fifo_size, flags);
+ if (!buf) {
+ DHD_ERROR(("%s: prio work fifo allocation failed\n",
+ __FUNCTION__));
+ goto return_null;
+ }
+
+ /* Initialize prio event fifo */
+ work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+ if (!work->prio_fifo) {
+ kfree(buf);
+ goto return_null;
+ }
+
+ /* allocate buffer to hold work events */
+ fifo_size = DHD_WORK_FIFO_SIZE;
+ fifo_size = is_power_of_2(fifo_size) ? fifo_size :
+ roundup_pow_of_two(fifo_size);
+ buf = (u8*)kzalloc(fifo_size, flags);
+ if (!buf) {
+ DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
+ goto return_null;
+ }
+
+ /* Initialize event fifo */
+ work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+ if (!work->work_fifo) {
+ kfree(buf);
+ goto return_null;
+ }
+
+ work->dhd_info = dhd_info;
+ work->event_skip_mask = 0;
+ DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
+ return work;
+
+return_null:
+ if (work) {
+ dhd_deferred_work_deinit(work);
+ }
+
+ return NULL;
+}
+
+void
+dhd_deferred_work_deinit(void *work)
+{
+ struct dhd_deferred_wq *deferred_work = work;
+
+ if (!deferred_work) {
+ DHD_ERROR(("%s: deferred work has been freed already\n",
+ __FUNCTION__));
+ return;
+ }
+
+ /* cancel the deferred work handling */
+ cancel_work_sync((struct work_struct *)deferred_work);
+
+ /*
+ * free work event fifo.
+ * kfifo_free frees locally allocated fifo buffer
+ */
+ if (deferred_work->prio_fifo) {
+ dhd_kfifo_free(deferred_work->prio_fifo);
+ }
+
+ if (deferred_work->work_fifo) {
+ dhd_kfifo_free(deferred_work->work_fifo);
+ }
+
+ kfree(deferred_work);
+}
+
+/* select kfifo according to priority */
+static inline struct kfifo *
+dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
+ u8 priority)
+{
+ if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
+ return deferred_wq->prio_fifo;
+ } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
+ return deferred_wq->work_fifo;
+ } else {
+ return NULL;
+ }
+}
+
+/*
+ * Prepares event to be queued
+ * Schedules the event
+ */
+int
+dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+ event_handler_t event_handler, u8 priority)
+{
+ struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
+ struct kfifo *fifo;
+ dhd_deferred_event_t deferred_event;
+ int bytes_copied = 0;
+
+ if (!deferred_wq) {
+ DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+ ASSERT(0);
+ return DHD_WQ_STS_UNINITIALIZED;
+ }
+
+ if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
+ DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
+ event));
+ return DHD_WQ_STS_UNKNOWN_EVENT;
+ }
+
+ if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
+ DHD_ERROR(("%s: unknown priority, priority=%d\n",
+ __FUNCTION__, priority));
+ return DHD_WQ_STS_UNKNOWN_PRIORITY;
+ }
+
+ if ((deferred_wq->event_skip_mask & (1 << event))) {
+ DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
+ __FUNCTION__, deferred_wq->event_skip_mask));
+ return DHD_WQ_STS_EVENT_SKIPPED;
+ }
+
+ /*
+ * default element size is 1, which can be changed
+ * using kfifo_esize(). Older kernel(FC11) doesn't support
+ * changing element size. For compatibility changing
+ * element size is not prefered
+ */
+ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+ ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+ deferred_event.event = event;
+ deferred_event.event_data = event_data;
+ deferred_event.event_handler = event_handler;
+
+ fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
+ if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
+ bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
+ DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+ }
+ if (bytes_copied != DEFRD_EVT_SIZE) {
+ DHD_ERROR(("%s: failed to schedule deferred work, "
+ "priority=%d, bytes_copied=%d\n", __FUNCTION__,
+ priority, bytes_copied));
+ return DHD_WQ_STS_SCHED_FAILED;
+ }
+ schedule_work((struct work_struct *)deferred_wq);
+ return DHD_WQ_STS_OK;
+}
+
+static bool
+dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
+ dhd_deferred_event_t *event)
+{
+ int bytes_copied = 0;
+
+ if (!deferred_wq) {
+ DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+ return DHD_WQ_STS_UNINITIALIZED;
+ }
+
+ /*
+ * default element size is 1 byte, which can be changed
+ * using kfifo_esize(). Older kernel(FC11) doesn't support
+ * changing element size. For compatibility changing
+ * element size is not prefered
+ */
+ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+ ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+ /* handle priority work */
+ if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
+ bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
+ event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+ }
+
+ /* handle normal work if priority work doesn't have enough data */
+ if ((bytes_copied != DEFRD_EVT_SIZE) &&
+ DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
+ bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
+ event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+ }
+
+ return (bytes_copied == DEFRD_EVT_SIZE);
+}
+
+static inline void
+dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
+{
+ if (!work_event) {
+ DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
+ work_event->event));
+ DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
+ work_event->event_data));
+ DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
+ work_event->event_handler));
+}
+
+/*
+ * Called when work is scheduled
+ */
+static void
+dhd_deferred_work_handler(struct work_struct *work)
+{
+ struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
+ dhd_deferred_event_t work_event;
+
+ if (!deferred_work) {
+ DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+ return;
+ }
+
+ do {
+ if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
+ DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
+ break;
+ }
+
+ if (work_event.event >= DHD_MAX_WQ_EVENTS) {
+ DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
+ dhd_deferred_dump_work_event(&work_event);
+ ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
+ continue;
+ }
+
+ /*
+ * XXX: don't do NULL check for 'work_event.event_data'
+ * as for some events like DHD_WQ_WORK_DHD_LOG_DUMP the
+ * event data is always NULL even though rest of the
+ * event parameters are valid
+ */
+
+ if (work_event.event_handler) {
+ work_event.event_handler(deferred_work->dhd_info,
+ work_event.event_data, work_event.event);
+ } else {
+ DHD_ERROR(("%s: event handler is null\n",
+ __FUNCTION__));
+ dhd_deferred_dump_work_event(&work_event);
+ ASSERT(work_event.event_handler != NULL);
+ }
+ } while (1);
+
+ return;
+}
+
+void
+dhd_deferred_work_set_skip(void *work, u8 event, bool set)
+{
+ struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
+
+ if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
+ DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
+ return;
+ }
+
+ if (set) {
+ /* Set */
+ deferred_wq->event_skip_mask |= (1 << event);
+ } else {
+ /* Clear */
+ deferred_wq->event_skip_mask &= ~(1 << event);
+ }
+}
diff --git a/bcmdhd.101.10.361.x/dhd_linux_wq.h b/bcmdhd.101.10.361.x/dhd_linux_wq.h
new file mode 100755
index 0000000..42c5a88
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_linux_wq.h
@@ -0,0 +1,89 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#ifndef _dhd_linux_wq_h_
+#define _dhd_linux_wq_h_
+/*
+ * Work event definitions
+ */
+enum _wq_event {
+ DHD_WQ_WORK_IF_ADD = 1,
+ DHD_WQ_WORK_IF_DEL,
+ DHD_WQ_WORK_SET_MAC,
+ DHD_WQ_WORK_SET_MCAST_LIST,
+ DHD_WQ_WORK_IPV6_NDO,
+ DHD_WQ_WORK_HANG_MSG,
+ DHD_WQ_WORK_DHD_LOG_DUMP,
+ DHD_WQ_WORK_PKTLOG_DUMP,
+ DHD_WQ_WORK_INFORM_DHD_MON,
+ DHD_WQ_WORK_EVENT_LOGTRACE,
+ DHD_WQ_WORK_DMA_LB_MEM_REL,
+ DHD_WQ_WORK_NATOE_EVENT,
+ DHD_WQ_WORK_NATOE_IOCTL,
+ DHD_WQ_WORK_MACDBG,
+ DHD_WQ_WORK_DEBUG_UART_DUMP,
+ DHD_WQ_WORK_GET_BIGDATA_AP,
+ DHD_WQ_WORK_SOC_RAM_DUMP,
+ DHD_WQ_WORK_SOC_RAM_COLLECT,
+#ifdef DHD_ERPOM
+ DHD_WQ_WORK_ERROR_RECOVERY,
+#endif /* DHD_ERPOM */
+ DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
+ DHD_WQ_WORK_AXI_ERROR_DUMP,
+ DHD_WQ_WORK_CTO_RECOVERY,
+#ifdef DHD_UPDATE_INTF_MAC
+ DHD_WQ_WORK_IF_UPDATE,
+#endif /* DHD_UPDATE_INTF_MAC */
+ DHD_MAX_WQ_EVENTS
+};
+
+/*
+ * Work event priority
+ */
+enum wq_priority {
+ DHD_WQ_WORK_PRIORITY_LOW = 1,
+ DHD_WQ_WORK_PRIORITY_HIGH,
+ DHD_WQ_MAX_PRIORITY
+};
+
+/*
+ * Error definitions
+ */
+#define DHD_WQ_STS_OK 0
+#define DHD_WQ_STS_FAILED -1 /* General failure */
+#define DHD_WQ_STS_UNINITIALIZED -2
+#define DHD_WQ_STS_SCHED_FAILED -3
+#define DHD_WQ_STS_UNKNOWN_EVENT -4
+#define DHD_WQ_STS_UNKNOWN_PRIORITY -5
+#define DHD_WQ_STS_EVENT_SKIPPED -6
+
+typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
+
+void *dhd_deferred_work_init(void *dhd);
+void dhd_deferred_work_deinit(void *workq);
+int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+ event_handler_t evt_handler, u8 priority);
+void dhd_deferred_work_set_skip(void *work, u8 event, bool set);
+#endif /* _dhd_linux_wq_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_macdbg.c b/bcmdhd.101.10.361.x/dhd_macdbg.c
new file mode 100755
index 0000000..dd145df
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_macdbg.c
@@ -0,0 +1,746 @@
+/* D11 macdbg functions for Broadcom 802.11abgn
+ * Networking Adapter Device Drivers.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ * $Id: dhd_macdbg.c 670412 2016-11-15 20:01:18Z shinuk $
+ */
+
+#ifdef BCMDBG
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dhd_macdbg.h>
+#include "d11reglist_proto.h"
+#include "dhdioctl.h"
+#include <sdiovar.h>
+
+#ifdef BCMDBUS
+#include <dbus.h>
+#define BUS_IOVAR_OP(a, b, c, d, e, f, g) dbus_iovar_op(a->dbus, b, c, d, e, f, g)
+#else
+#include <dhd_bus.h>
+#define BUS_IOVAR_OP dhd_bus_iovar_op
+#endif
+
+typedef struct _macdbg_info_t {
+ dhd_pub_t *dhdp;
+ d11regs_list_t *pd11regs;
+ uint16 d11regs_sz;
+ d11regs_list_t *pd11regs_x;
+ uint16 d11regsx_sz;
+ svmp_list_t *psvmpmems;
+ uint16 svmpmems_sz;
+} macdbg_info_t;
+
+#define SVMPLIST_HARDCODE
+
+int
+dhd_macdbg_attach(dhd_pub_t *dhdp)
+{
+ macdbg_info_t *macdbg_info = MALLOCZ(dhdp->osh, sizeof(*macdbg_info));
+#ifdef SVMPLIST_HARDCODE
+ svmp_list_t svmpmems[] = {
+ {0x20000, 256},
+ {0x21e10, 16},
+ {0x20300, 16},
+ {0x20700, 16},
+ {0x20b00, 16},
+ {0x20be0, 16},
+ {0x20bff, 16},
+ {0xc000, 32},
+ {0xe000, 32},
+ {0x10000, 0x8000},
+ {0x18000, 0x8000}
+ };
+#endif /* SVMPLIST_HARDCODE */
+
+ if (macdbg_info == NULL) {
+ return BCME_NOMEM;
+ }
+ dhdp->macdbg_info = macdbg_info;
+ macdbg_info->dhdp = dhdp;
+
+#ifdef SVMPLIST_HARDCODE
+ macdbg_info->psvmpmems = MALLOCZ(dhdp->osh, sizeof(svmpmems));
+ if (macdbg_info->psvmpmems == NULL) {
+ return BCME_NOMEM;
+ }
+
+ macdbg_info->svmpmems_sz = ARRAYSIZE(svmpmems);
+ memcpy(macdbg_info->psvmpmems, svmpmems, sizeof(svmpmems));
+
+ DHD_ERROR(("%s: psvmpmems %p svmpmems_sz %d\n",
+ __FUNCTION__, macdbg_info->psvmpmems, macdbg_info->svmpmems_sz));
+#endif
+ return BCME_OK;
+}
+
+void
+dhd_macdbg_detach(dhd_pub_t *dhdp)
+{
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ ASSERT(macdbg_info);
+
+ if (macdbg_info->pd11regs) {
+ ASSERT(macdbg_info->d11regs_sz > 0);
+ MFREE(dhdp->osh, macdbg_info->pd11regs,
+ (macdbg_info->d11regs_sz * sizeof(macdbg_info->pd11regs[0])));
+ macdbg_info->d11regs_sz = 0;
+ }
+ if (macdbg_info->pd11regs_x) {
+ ASSERT(macdbg_info->d11regsx_sz > 0);
+ MFREE(dhdp->osh, macdbg_info->pd11regs_x,
+ (macdbg_info->d11regsx_sz * sizeof(macdbg_info->pd11regs_x[0])));
+ macdbg_info->d11regsx_sz = 0;
+ }
+ if (macdbg_info->psvmpmems) {
+ ASSERT(macdbg_info->svmpmems_sz > 0);
+ MFREE(dhdp->osh, macdbg_info->psvmpmems,
+ (macdbg_info->svmpmems_sz * sizeof(macdbg_info->psvmpmems[0])));
+ macdbg_info->svmpmems_sz = 0;
+ }
+ MFREE(dhdp->osh, macdbg_info, sizeof(*macdbg_info));
+}
+
+void
+dhd_macdbg_event_handler(dhd_pub_t *dhdp, uint32 reason,
+ uint8 *event_data, uint32 datalen)
+{
+ d11regs_list_t *pd11regs;
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ uint d11regs_sz;
+
+ DHD_TRACE(("%s: reason %d datalen %d\n", __FUNCTION__, reason, datalen));
+ switch (reason) {
+ case WLC_E_MACDBG_LIST_PSMX:
+ /* Fall through */
+ case WLC_E_MACDBG_LIST_PSM:
+ pd11regs = MALLOCZ(dhdp->osh, datalen);
+ if (pd11regs == NULL) {
+ DHD_ERROR(("%s: NOMEM for len %d\n", __FUNCTION__, datalen));
+ return;
+ }
+ memcpy(pd11regs, event_data, datalen);
+ d11regs_sz = datalen / sizeof(pd11regs[0]);
+ DHD_ERROR(("%s: d11regs %p d11regs_sz %d\n",
+ __FUNCTION__, pd11regs, d11regs_sz));
+ if (reason == WLC_E_MACDBG_LIST_PSM) {
+ macdbg_info->pd11regs = pd11regs;
+ macdbg_info->d11regs_sz = (uint16)d11regs_sz;
+ } else {
+ macdbg_info->pd11regs_x = pd11regs;
+ macdbg_info->d11regsx_sz = (uint16)d11regs_sz;
+ }
+ break;
+ case WLC_E_MACDBG_REGALL:
+#ifdef LINUX
+ /* Schedule to work queue as this context could be ISR */
+ dhd_schedule_macdbg_dump(dhdp);
+#else
+ /* Dump PSMr */
+ (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, FALSE);
+ /* Dump PSMx */
+ (void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, TRUE);
+ /* Dump SVMP mems */
+ (void) dhd_macdbg_dumpsvmp(dhdp, NULL, 0, NULL);
+#endif
+ break;
+ default:
+ DHD_ERROR(("%s: Unknown reason %d\n",
+ __FUNCTION__, reason));
+ }
+ return;
+}
+
+static uint16
+_dhd_get_ihr16(macdbg_info_t *macdbg_info, uint16 addr, struct bcmstrbuf *b, bool verbose)
+{
+ sdreg_t sdreg;
+ uint16 val;
+
+ sdreg.func = 2;
+ sdreg.offset = (0x1000 | addr);
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET);
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: IHR16: read 0x%08x, size 2, value 0x%04x\n",
+ (addr + 0x18001000), val);
+ } else {
+ printf("DEBUG: IHR16: read 0x%08x, size 2, value 0x%04x\n",
+ (addr + 0x18001000), val);
+ }
+ }
+ return val;
+}
+
+static uint32
+_dhd_get_ihr32(macdbg_info_t *macdbg_info, uint16 addr, struct bcmstrbuf *b, bool verbose)
+{
+ sdreg_t sdreg;
+ uint32 val;
+
+ sdreg.func = 4;
+ sdreg.offset = (0x1000 | addr);
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET);
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: IHR32: read 0x%08x, size 4, value 0x%08x\n",
+ (addr + 0x18001000), val);
+ } else {
+ printf("DEBUG: IHR32: read 0x%08x, size 4, value 0x%08x\n",
+ (addr + 0x18001000), val);
+ }
+ }
+ return val;
+}
+
+static void
+_dhd_set_ihr16(macdbg_info_t *macdbg_info, uint16 addr, uint16 val,
+ struct bcmstrbuf *b, bool verbose)
+{
+ sdreg_t sdreg;
+
+ sdreg.func = 2;
+ sdreg.offset = (0x1000 | addr);
+ sdreg.value = val;
+
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: IHR16: write 0x%08x, size 2, value 0x%04x\n",
+ (addr + 0x18001000), val);
+ } else {
+ printf("DEBUG: IHR16: write 0x%08x, size 2, value 0x%04x\n",
+ (addr + 0x18001000), val);
+ }
+ }
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ NULL, 0, &sdreg, sizeof(sdreg), IOV_SET);
+}
+
+static void
+_dhd_set_ihr32(macdbg_info_t *macdbg_info, uint16 addr, uint32 val,
+ struct bcmstrbuf *b, bool verbose)
+{
+ sdreg_t sdreg;
+
+ sdreg.func = 4;
+ sdreg.offset = (0x1000 | addr);
+ sdreg.value = val;
+
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: IHR32: write 0x%08x, size 4, value 0x%08x\n",
+ (addr + 0x18001000), val);
+ } else {
+ printf("DEBUG: IHR32: write 0x%08x, size 4, value 0x%08x\n",
+ (addr + 0x18001000), val);
+ }
+ }
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ NULL, 0, &sdreg, sizeof(sdreg), IOV_SET);
+}
+
+static uint32
+_dhd_get_d11obj32(macdbg_info_t *macdbg_info, uint16 objaddr, uint32 sel,
+ struct bcmstrbuf *b, bool verbose)
+{
+ uint32 val;
+ sdreg_t sdreg;
+ sdreg.func = 4; // 4bytes by default.
+ sdreg.offset = 0x1160;
+
+ if (objaddr == 0xffff) {
+ if (verbose) {
+ goto objaddr_read;
+ } else {
+ goto objdata_read;
+ }
+ }
+
+ if (objaddr & 0x3) {
+ printf("%s: ERROR! Invalid addr 0x%x\n", __FUNCTION__, objaddr);
+ }
+
+ sdreg.value = (sel | (objaddr >> 2));
+
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: %s: Indirect: write 0x%08x, size %d, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ (sdreg.offset + 0x18000000), sdreg.func, sdreg.value);
+ } else {
+ printf("DEBUG: %s: Indirect: write 0x%08x, size %d, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ (sdreg.offset + 0x18000000), sdreg.func, sdreg.value);
+ }
+ }
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ NULL, 0, &sdreg, sizeof(sdreg), IOV_SET);
+
+objaddr_read:
+ /* Give some time to obj addr register */
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET);
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ (sdreg.offset + 0x18000000), sdreg.func, val);
+ } else {
+ printf("DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ (sdreg.offset + 0x18000000), sdreg.func, val);
+ }
+ }
+
+objdata_read:
+ sdreg.offset = 0x1164;
+ BUS_IOVAR_OP(macdbg_info->dhdp, "sbreg",
+ &sdreg, sizeof(sdreg), &val, sizeof(val), IOV_GET);
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%04x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ (sdreg.offset + 0x18000000), sdreg.func, val);
+ } else {
+ printf("DEBUG: %s: Indirect: Read 0x%08x, size %d, value 0x%04x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ (sdreg.offset + 0x18000000), sdreg.func, val);
+ }
+ }
+ return val;
+}
+
+static uint16
+_dhd_get_d11obj16(macdbg_info_t *macdbg_info, uint16 objaddr,
+ uint32 sel, d11obj_cache_t *obj_cache, struct bcmstrbuf *b, bool verbose)
+{
+ uint32 val;
+ if (obj_cache && obj_cache->cache_valid && ((obj_cache->sel ^ sel) & (0xffffff)) == 0) {
+ if (obj_cache->addr32 == (objaddr & ~0x3)) {
+ /* XXX: Same objaddr read as the previous one */
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: %s: Read cache value: "
+ "addr32 0x%04x, sel 0x%08x, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ obj_cache->addr32, obj_cache->sel, obj_cache->val);
+ } else {
+ printf("DEBUG: %s: Read cache value: "
+ "addr32 0x%04x, sel 0x%08x, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ obj_cache->addr32, obj_cache->sel, obj_cache->val);
+ }
+ }
+ val = obj_cache->val;
+ goto exit;
+ } else if ((obj_cache->sel & 0x02000000) &&
+ (obj_cache->addr32 + 4 == (objaddr & ~0x3))) {
+ /* XXX: objaddr is auto incrementing, so just read objdata */
+ if (verbose) {
+ if (b) {
+ bcm_bprintf(b, "DEBUG: %s: Read objdata only: "
+ "addr32 0x%04x, sel 0x%08x, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ obj_cache->addr32, obj_cache->sel, obj_cache->val);
+ } else {
+ printf("DEBUG: %s: Read objdata only: "
+ "addr32 0x%04x, sel 0x%08x, value 0x%08x\n",
+ (sel & 0x00020000) ? "SCR":"SHM",
+ obj_cache->addr32, obj_cache->sel, obj_cache->val);
+ }
+ }
+ val = _dhd_get_d11obj32(macdbg_info, 0xffff, sel, b, verbose);
+ goto exit;
+ }
+ }
+ val = _dhd_get_d11obj32(macdbg_info, (objaddr & ~0x2), sel, b, verbose);
+exit:
+ if (obj_cache) {
+ obj_cache->addr32 = (objaddr & ~0x3);
+ obj_cache->sel = sel;
+ obj_cache->val = val;
+ obj_cache->cache_valid = TRUE;
+ }
+ return (uint16)((objaddr & 0x2) ? (val >> 16) : val);
+}
+
+static int
+_dhd_print_d11reg(macdbg_info_t *macdbg_info, int idx, int type, uint16 addr, struct bcmstrbuf *b,
+ d11obj_cache_t *obj_cache, bool verbose)
+{
+ const char *regname[D11REG_TYPE_MAX] = D11REGTYPENAME;
+ uint32 val;
+
+ if (type == D11REG_TYPE_IHR32) {
+ if ((addr & 0x3)) {
+ printf("%s: ERROR! Invalid addr 0x%x\n", __FUNCTION__, addr);
+ addr &= ~0x3;
+ }
+ val = _dhd_get_ihr32(macdbg_info, addr, b, verbose);
+ if (b) {
+ bcm_bprintf(b, "%-3d %s 0x%-4x = 0x%-8x\n",
+ idx, regname[type], addr, val);
+ } else {
+ printf("%-3d %s 0x%-4x = 0x%-8x\n",
+ idx, regname[type], addr, val);
+ }
+ } else {
+ switch (type) {
+ case D11REG_TYPE_IHR16: {
+ if ((addr & 0x1)) {
+ printf("%s: ERROR! Invalid addr 0x%x\n", __FUNCTION__, addr);
+ addr &= ~0x1;
+ }
+ val = _dhd_get_ihr16(macdbg_info, addr, b, verbose);
+ break;
+ }
+ case D11REG_TYPE_IHRX16:
+ val = _dhd_get_d11obj16(macdbg_info, (addr - 0x400) << 1, 0x020b0000,
+ obj_cache, b, verbose);
+ break;
+ case D11REG_TYPE_SCR:
+ val = _dhd_get_d11obj16(macdbg_info, addr << 2, 0x02020000,
+ obj_cache, b, verbose);
+ break;
+ case D11REG_TYPE_SCRX:
+ val = _dhd_get_d11obj16(macdbg_info, addr << 2, 0x020a0000,
+ obj_cache, b, verbose);
+ break;
+ case D11REG_TYPE_SHM:
+ val = _dhd_get_d11obj16(macdbg_info, addr, 0x02010000,
+ obj_cache, b, verbose);
+ break;
+ case D11REG_TYPE_SHMX:
+ val = _dhd_get_d11obj16(macdbg_info, addr, 0x02090000,
+ obj_cache, b, verbose);
+ break;
+ default:
+ printf("Unrecognized type %d!\n", type);
+ return 0;
+ }
+ if (b) {
+ bcm_bprintf(b, "%-3d %s 0x%-4x = 0x%-4x\n",
+ idx, regname[type], addr, val);
+ } else {
+ printf("%-3d %s 0x%-4x = 0x%-4x\n",
+ idx, regname[type], addr, val);
+ }
+ }
+ return 1;
+}
+
+static int
+_dhd_print_d11regs(macdbg_info_t *macdbg_info, d11regs_list_t *pregs,
+ int start_idx, struct bcmstrbuf *b, bool verbose)
+{
+ uint16 addr;
+ int idx = 0;
+ d11obj_cache_t obj_cache = {0, 0, 0, FALSE};
+
+ addr = pregs->addr;
+ if (pregs->type >= D11REG_TYPE_MAX) {
+ printf("%s: wrong type %d\n", __FUNCTION__, pregs->type);
+ return 0;
+ }
+ if (pregs->bitmap) {
+ while (pregs->bitmap) {
+ if (pregs->bitmap && (pregs->bitmap & 0x1)) {
+ _dhd_print_d11reg(macdbg_info, (idx + start_idx), pregs->type,
+ addr, b, &obj_cache, verbose);
+ idx++;
+ }
+ pregs->bitmap = pregs->bitmap >> 1;
+ addr += pregs->step;
+ }
+ } else {
+ for (; idx < pregs->cnt; idx++) {
+ _dhd_print_d11reg(macdbg_info, (idx + start_idx), pregs->type,
+ addr, b, &obj_cache, verbose);
+ addr += pregs->step;
+ }
+ }
+ return idx;
+}
+
+static int
+_dhd_pd11regs_bylist(macdbg_info_t *macdbg_info, d11regs_list_t *reglist,
+ uint16 reglist_sz, struct bcmstrbuf *b)
+{
+ uint i, idx = 0;
+
+ if (reglist != NULL && reglist_sz > 0) {
+ for (i = 0; i < reglist_sz; i++) {
+ DHD_TRACE(("%s %d %p %d\n", __FUNCTION__, __LINE__,
+ &reglist[i], reglist_sz));
+ idx += _dhd_print_d11regs(macdbg_info, &reglist[i], idx, b, FALSE);
+ }
+ }
+ return idx;
+}
+
+int
+dhd_macdbg_dumpmac(dhd_pub_t *dhdp, char *buf, int buflen,
+ int *outbuflen, bool dump_x)
+{
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ struct bcmstrbuf *b = NULL;
+ struct bcmstrbuf bcmstrbuf;
+ uint cnt = 0;
+
+ DHD_TRACE(("%s %d %p %d %p %d %p %d\n", __FUNCTION__, __LINE__,
+ buf, buflen, macdbg_info->pd11regs, macdbg_info->d11regs_sz,
+ macdbg_info->pd11regs_x, macdbg_info->d11regsx_sz));
+
+ if (buf && buflen > 0) {
+ bcm_binit(&bcmstrbuf, buf, buflen);
+ b = &bcmstrbuf;
+ }
+ if (!dump_x) {
+ /* Dump PSMr */
+ cnt += _dhd_pd11regs_bylist(macdbg_info, macdbg_info->pd11regs,
+ macdbg_info->d11regs_sz, b);
+ } else {
+ /* Dump PSMx */
+ cnt += _dhd_pd11regs_bylist(macdbg_info, macdbg_info->pd11regs_x,
+ macdbg_info->d11regsx_sz, b);
+ }
+
+ if (b && outbuflen) {
+ if ((uint)buflen > BCMSTRBUF_LEN(b)) {
+ *outbuflen = buflen - BCMSTRBUF_LEN(b);
+ } else {
+ DHD_ERROR(("%s: buflen insufficient!\n", __FUNCTION__));
+ *outbuflen = buflen;
+ /* Do not return buftooshort to allow printing macregs we have got */
+ }
+ }
+
+ return ((cnt > 0) ? BCME_OK : BCME_UNSUPPORTED);
+}
+
+int
+dhd_macdbg_pd11regs(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen)
+{
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ dhd_pd11regs_param *pd11regs = (void *)params;
+ dhd_pd11regs_buf *pd11regs_buf = (void *)buf;
+ uint16 start_idx;
+ bool verbose;
+ d11regs_list_t reglist;
+ struct bcmstrbuf *b = NULL;
+ struct bcmstrbuf bcmstrbuf;
+
+ start_idx = pd11regs->start_idx;
+ verbose = pd11regs->verbose;
+ memcpy(&reglist, pd11regs->plist, sizeof(reglist));
+ memset(buf, '\0', buflen);
+ bcm_binit(&bcmstrbuf, (char *)(pd11regs_buf->pbuf),
+ (buflen - OFFSETOF(dhd_pd11regs_buf, pbuf)));
+ b = &bcmstrbuf;
+ pd11regs_buf->idx = (uint16)_dhd_print_d11regs(macdbg_info, &reglist,
+ start_idx, b, verbose);
+
+ return ((pd11regs_buf->idx > 0) ? BCME_OK : BCME_ERROR);
+}
+
+int
+dhd_macdbg_reglist(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+ int err, desc_idx = 0;
+ dhd_maclist_t *maclist = (dhd_maclist_t *)buf;
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ void *xtlvbuf_p = maclist->plist;
+ uint16 xtlvbuflen = (uint16)buflen;
+ xtlv_desc_t xtlv_desc[] = {
+ {0, 0, NULL},
+ {0, 0, NULL},
+ {0, 0, NULL},
+ {0, 0, NULL}
+ };
+
+ if (!macdbg_info->pd11regs) {
+ err = BCME_NOTFOUND;
+ goto exit;
+ }
+ ASSERT(macdbg_info->d11regs_sz > 0);
+ xtlv_desc[desc_idx].type = DHD_MACLIST_XTLV_R;
+ xtlv_desc[desc_idx].len =
+ macdbg_info->d11regs_sz * (uint16)sizeof(*(macdbg_info->pd11regs));
+ xtlv_desc[desc_idx].ptr = macdbg_info->pd11regs;
+ desc_idx++;
+
+ if (macdbg_info->pd11regs_x) {
+ ASSERT(macdbg_info->d11regsx_sz);
+ xtlv_desc[desc_idx].type = DHD_MACLIST_XTLV_X;
+ xtlv_desc[desc_idx].len = macdbg_info->d11regsx_sz *
+ (uint16)sizeof(*(macdbg_info->pd11regs_x));
+ xtlv_desc[desc_idx].ptr = macdbg_info->pd11regs_x;
+ desc_idx++;
+ }
+
+ if (macdbg_info->psvmpmems) {
+ ASSERT(macdbg_info->svmpmems_sz);
+ xtlv_desc[desc_idx].type = DHD_SVMPLIST_XTLV;
+ xtlv_desc[desc_idx].len = macdbg_info->svmpmems_sz *
+ (uint16)sizeof(*(macdbg_info->psvmpmems));
+ xtlv_desc[desc_idx].ptr = macdbg_info->psvmpmems;
+ desc_idx++;
+ }
+
+ err = bcm_pack_xtlv_buf_from_mem((uint8 **)&xtlvbuf_p, &xtlvbuflen,
+ xtlv_desc, BCM_XTLV_OPTION_ALIGN32);
+
+ maclist->version = 0; /* No version control for now anyway */
+ maclist->bytes_len = (buflen - xtlvbuflen);
+
+exit:
+ return err;
+}
+
+static int
+_dhd_print_svmps(macdbg_info_t *macdbg_info, svmp_list_t *psvmp,
+ int start_idx, struct bcmstrbuf *b, bool verbose)
+{
+ int idx;
+ uint32 addr, mem_id, offset, prev_mem_id, prev_offset;
+ uint16 cnt, val;
+
+ BCM_REFERENCE(start_idx);
+
+ /* Set tbl ID and tbl offset. */
+ _dhd_set_ihr32(macdbg_info, 0x3fc, 0x30000d, b, verbose);
+ _dhd_set_ihr32(macdbg_info, 0x3fc, 0x8000000e, b, verbose);
+
+ addr = psvmp->addr;
+ cnt = psvmp->cnt;
+
+ /* In validate previous mem_id and offset */
+ prev_mem_id = (uint32)(-1);
+ prev_offset = (uint32)(-1);
+
+ for (idx = 0; idx < cnt; idx++, addr++) {
+ mem_id = (addr >> 15);
+ offset = (addr & 0x7fff) >> 1;
+
+ if (mem_id != prev_mem_id) {
+ /* Set mem_id */
+ _dhd_set_ihr32(macdbg_info, 0x3fc, ((mem_id & 0xffff0000) | 0x10),
+ b, verbose);
+ _dhd_set_ihr32(macdbg_info, 0x3fc, ((mem_id << 16) | 0xf),
+ b, verbose);
+ }
+
+ if (offset != prev_offset) {
+ /* XXX: Is this needed?
+ * _dhd_set_ihr32(macdbg_info, 0x3fc, 0x30000d, b, verbose);
+ */
+ /* svmp offset */
+ _dhd_set_ihr32(macdbg_info, 0x3fc, ((offset << 16) | 0xe),
+ b, verbose);
+ }
+ /* Read hi or lo */
+ _dhd_set_ihr16(macdbg_info, 0x3fc, ((addr & 0x1) ? 0x10 : 0xf), b, verbose);
+ val = _dhd_get_ihr16(macdbg_info, 0x3fe, b, verbose);
+ if (b) {
+ bcm_bprintf(b, "0x%-4x 0x%-4x\n",
+ addr, val);
+
+ } else {
+ printf("0x%-4x 0x%-4x\n",
+ addr, val);
+ }
+ prev_mem_id = mem_id;
+ prev_offset = offset;
+ }
+ return idx;
+}
+
+static int
+_dhd_psvmps_bylist(macdbg_info_t *macdbg_info, svmp_list_t *svmplist,
+ uint16 svmplist_sz, struct bcmstrbuf *b)
+{
+ uint i, idx = 0;
+
+ if (svmplist != NULL && svmplist_sz > 0) {
+ for (i = 0; i < svmplist_sz; i++) {
+ DHD_TRACE(("%s %d %p %d\n", __FUNCTION__, __LINE__,
+ &svmplist[i], svmplist_sz));
+ idx += _dhd_print_svmps(macdbg_info, &svmplist[i], idx, b, FALSE);
+ }
+ }
+ return idx;
+}
+
+int
+dhd_macdbg_dumpsvmp(dhd_pub_t *dhdp, char *buf, int buflen,
+ int *outbuflen)
+{
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ struct bcmstrbuf *b = NULL;
+ struct bcmstrbuf bcmstrbuf;
+ uint cnt = 0;
+
+ DHD_TRACE(("%s %d %p %d %p %d\n", __FUNCTION__, __LINE__,
+ buf, buflen, macdbg_info->psvmpmems, macdbg_info->svmpmems_sz));
+
+ if (buf && buflen > 0) {
+ bcm_binit(&bcmstrbuf, buf, buflen);
+ b = &bcmstrbuf;
+ }
+ cnt = _dhd_psvmps_bylist(macdbg_info, macdbg_info->psvmpmems,
+ macdbg_info->svmpmems_sz, b);
+
+ if (b && outbuflen) {
+ if ((uint)buflen > BCMSTRBUF_LEN(b)) {
+ *outbuflen = buflen - BCMSTRBUF_LEN(b);
+ } else {
+ DHD_ERROR(("%s: buflen insufficient!\n", __FUNCTION__));
+ *outbuflen = buflen;
+ /* Do not return buftooshort to allow printing macregs we have got */
+ }
+ }
+
+ return ((cnt > 0) ? BCME_OK : BCME_UNSUPPORTED);
+}
+
+int
+dhd_macdbg_psvmpmems(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen)
+{
+ macdbg_info_t *macdbg_info = dhdp->macdbg_info;
+ dhd_pd11regs_param *pd11regs = (void *)params;
+ dhd_pd11regs_buf *pd11regs_buf = (void *)buf;
+ uint16 start_idx;
+ bool verbose;
+ svmp_list_t reglist;
+ struct bcmstrbuf *b = NULL;
+ struct bcmstrbuf bcmstrbuf;
+
+ start_idx = pd11regs->start_idx;
+ verbose = pd11regs->verbose;
+ memcpy(&reglist, pd11regs->plist, sizeof(reglist));
+ memset(buf, '\0', buflen);
+ bcm_binit(&bcmstrbuf, (char *)(pd11regs_buf->pbuf),
+ (buflen - OFFSETOF(dhd_pd11regs_buf, pbuf)));
+ b = &bcmstrbuf;
+ pd11regs_buf->idx = (uint16)_dhd_print_svmps(macdbg_info, &reglist,
+ start_idx, b, verbose);
+
+ return ((pd11regs_buf->idx > 0) ? BCME_OK : BCME_ERROR);
+}
+
+#endif /* BCMDBG */
diff --git a/bcmdhd.101.10.361.x/dhd_macdbg.h b/bcmdhd.101.10.361.x/dhd_macdbg.h
new file mode 100755
index 0000000..2175137
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_macdbg.h
@@ -0,0 +1,34 @@
+/* D11 macdbg function prototypes for Broadcom 802.11abgn
+ * Networking Adapter Device Drivers.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ * $Id: dhd_macdbg.h 649388 2016-07-15 22:54:42Z shinuk $
+ */
+
+#ifndef _dhd_macdbg_h_
+#define _dhd_macdbg_h_
+#ifdef BCMDBG
+#include <dngl_stats.h>
+#include <dhd.h>
+
+extern int dhd_macdbg_attach(dhd_pub_t *dhdp);
+extern void dhd_macdbg_detach(dhd_pub_t *dhdp);
+extern void dhd_macdbg_event_handler(dhd_pub_t *dhdp, uint32 reason,
+ uint8 *event_data, uint32 datalen);
+extern int dhd_macdbg_dumpmac(dhd_pub_t *dhdp, char *buf, int buflen, int *outbuflen, bool dump_x);
+extern int dhd_macdbg_pd11regs(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen);
+extern int dhd_macdbg_reglist(dhd_pub_t *dhdp, char *buf, int buflen);
+extern int dhd_macdbg_dumpsvmp(dhd_pub_t *dhdp, char *buf, int buflen, int *outbuflen);
+extern int dhd_macdbg_psvmpmems(dhd_pub_t *dhdp, char *params, int plen, char *buf, int buflen);
+#endif /* BCMDBG */
+#endif /* _dhd_macdbg_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_mschdbg.c b/bcmdhd.101.10.361.x/dhd_mschdbg.c
new file mode 100755
index 0000000..5865b6d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_mschdbg.c
@@ -0,0 +1,796 @@
+/*
+ * DHD debugability support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_mschdbg.c 639872 2016-05-25 05:39:30Z sjadhav $
+ */
+#ifdef SHOW_LOGTRACE
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_mschdbg.h>
+
+#include <event_log.h>
+#include <event_trace.h>
+#include <msgtrace.h>
+
+static const char *head_log = "";
+#define MSCH_EVENT_HEAD(space) \
+ do { \
+ MSCH_EVENT(("%s_E: ", head_log)); \
+ if (space > 0) { \
+ int ii; \
+ for (ii = 0; ii < space; ii += 4) MSCH_EVENT((" ")); \
+ } \
+ } while (0)
+
+#ifdef DHD_EFI
+#define MSCH_EVENT(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ DHD_LOG_DUMP_WRITE_FW("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE_FW args; \
+ } \
+} while (0)
+#else
+#define MSCH_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#endif /* DHD_EFI */
+
+static uint64 solt_start_time[4], req_start_time[4], profiler_start_time[4];
+static uint32 solt_chanspec[4] = {0, }, req_start[4] = {0, };
+static bool lastMessages = FALSE;
+
+#define US_PRE_SEC 1000000
+#define DATA_UNIT_FOR_LOG_CNT 4
+
+static void dhd_mschdbg_us_to_sec(uint32 time_h, uint32 time_l, uint32 *sec, uint32 *remain)
+{
+ uint64 cur_time = ((uint64)(ntoh32(time_h)) << 32) | ntoh32(time_l);
+ uint64 r, u = 0;
+
+ r = cur_time;
+ while (time_h != 0) {
+ u += (uint64)((0xffffffff / US_PRE_SEC)) * time_h;
+ r = cur_time - u * US_PRE_SEC;
+ time_h = (uint32)(r >> 32);
+ }
+
+ *sec = (uint32)(u + ((uint32)(r) / US_PRE_SEC));
+ *remain = (uint32)(r) % US_PRE_SEC;
+}
+
+static char *dhd_mschdbg_display_time(uint32 time_h, uint32 time_l)
+{
+ static char display_time[32];
+ uint32 s, ss;
+
+ if (time_h == 0xffffffff && time_l == 0xffffffff) {
+ snprintf(display_time, 31, "-1");
+ } else {
+ dhd_mschdbg_us_to_sec(time_h, time_l, &s, &ss);
+ snprintf(display_time, 31, "%d.%06d", s, ss);
+ }
+ return display_time;
+}
+
+static void
+dhd_mschdbg_chanspec_list(int sp, char *data, uint16 ptr, uint16 chanspec_cnt)
+{
+ int i, cnt = (int)ntoh16(chanspec_cnt);
+ uint16 *chanspec_list = (uint16 *)(data + ntoh16(ptr));
+ char buf[CHANSPEC_STR_LEN];
+ chanspec_t c;
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<chanspec_list>:"));
+ for (i = 0; i < cnt; i++) {
+ c = (chanspec_t)ntoh16(chanspec_list[i]);
+ MSCH_EVENT((" %s", wf_chspec_ntoa(c, buf)));
+ }
+ MSCH_EVENT(("\n"));
+}
+
+static void
+dhd_mschdbg_elem_list(int sp, char *title, char *data, uint16 ptr, uint16 list_cnt)
+{
+ int i, cnt = (int)ntoh16(list_cnt);
+ uint32 *list = (uint32 *)(data + ntoh16(ptr));
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("%s_list: ", title));
+ for (i = 0; i < cnt; i++) {
+ MSCH_EVENT(("0x%08x->", ntoh32(list[i])));
+ }
+ MSCH_EVENT(("null\n"));
+}
+
+static void
+dhd_mschdbg_req_param_profiler_event_data(int sp, int ver, char *data, uint16 ptr)
+{
+ int sn = sp + 4;
+ msch_req_param_profiler_event_data_t *p =
+ (msch_req_param_profiler_event_data_t *)(data + ntoh16(ptr));
+ uint32 type, flags;
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<request parameters>\n"));
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("req_type: "));
+
+ type = p->req_type;
+ if (type < 4) {
+ char *req_type[] = {"fixed", "start-flexible", "duration-flexible",
+ "both-flexible"};
+ MSCH_EVENT(("%s", req_type[type]));
+ }
+ else
+ MSCH_EVENT(("unknown(%d)", type));
+
+ flags = ntoh16(p->flags);
+ if (flags & WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS)
+ MSCH_EVENT((", CHAN_CONTIGUOUS"));
+ if (flags & WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS)
+ MSCH_EVENT((", MERGE_CONT_SLOTS"));
+ if (flags & WL_MSCH_REQ_FLAGS_PREMTABLE)
+ MSCH_EVENT((", PREMTABLE"));
+ if (flags & WL_MSCH_REQ_FLAGS_PREMT_CURTS)
+ MSCH_EVENT((", PREMT_CURTS"));
+ if (flags & WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE)
+ MSCH_EVENT((", PREMT_IMMEDIATE"));
+ MSCH_EVENT((", priority: %d\n", p->priority));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("start-time: %s, duration: %d(us), interval: %d(us)\n",
+ dhd_mschdbg_display_time(p->start_time_h, p->start_time_l),
+ ntoh32(p->duration), ntoh32(p->interval)));
+
+ if (type == WL_MSCH_RT_DUR_FLEX) {
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("dur_flex: %d(us)\n", ntoh32(p->flex.dur_flex)));
+ } else if (type == WL_MSCH_RT_BOTH_FLEX) {
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("min_dur: %d(us), max_away_dur: %d(us)\n",
+ ntoh32(p->flex.bf.min_dur), ntoh32(p->flex.bf.max_away_dur)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("hi_prio_time: %s, hi_prio_interval: %d(us)\n",
+ dhd_mschdbg_display_time(p->flex.bf.hi_prio_time_h,
+ p->flex.bf.hi_prio_time_l),
+ ntoh32(p->flex.bf.hi_prio_interval)));
+ }
+}
+
+static void
+dhd_mschdbg_timeslot_profiler_event_data(int sp, int ver, char *title, char *data,
+ uint16 ptr, bool empty)
+{
+ int s, sn = sp + 4;
+ msch_timeslot_profiler_event_data_t *p =
+ (msch_timeslot_profiler_event_data_t *)(data + ntoh16(ptr));
+ char *state[] = {"NONE", "CHN_SW", "ONCHAN_FIRE", "OFF_CHN_PREP",
+ "OFF_CHN_DONE", "TS_COMPLETE"};
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<%s timeslot>: ", title));
+ if (empty) {
+ MSCH_EVENT((" null\n"));
+ return;
+ }
+ else
+ MSCH_EVENT(("0x%08x\n", ntoh32(p->p_timeslot)));
+
+ s = (int)(ntoh32(p->state));
+ if (s < 0 || s > 5) s = 0;
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("id: %d, state[%d]: %s, chan_ctxt: [0x%08x]\n",
+ ntoh32(p->timeslot_id), ntoh32(p->state), state[s], ntoh32(p->p_chan_ctxt)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("fire_time: %s",
+ dhd_mschdbg_display_time(p->fire_time_h, p->fire_time_l)));
+
+ MSCH_EVENT((", pre_start_time: %s",
+ dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l)));
+
+ MSCH_EVENT((", end_time: %s",
+ dhd_mschdbg_display_time(p->end_time_h, p->end_time_l)));
+
+ MSCH_EVENT((", sch_dur: %s\n",
+ dhd_mschdbg_display_time(p->sch_dur_h, p->sch_dur_l)));
+}
+
+static void
+dhd_mschdbg_req_timing_profiler_event_data(int sp, int ver, char *title, char *data,
+ uint16 ptr, bool empty)
+{
+ int sn = sp + 4;
+ msch_req_timing_profiler_event_data_t *p =
+ (msch_req_timing_profiler_event_data_t *)(data + ntoh16(ptr));
+ uint32 type;
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<%s req_timing>: ", title));
+ if (empty) {
+ MSCH_EVENT((" null\n"));
+ return;
+ }
+ else
+ MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+ ntoh32(p->p_req_timing), ntoh32(p->p_prev), ntoh32(p->p_next)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("flags:"));
+ type = ntoh16(p->flags);
+ if ((type & 0x7f) == 0)
+ MSCH_EVENT((" NONE"));
+ else {
+ if (type & WL_MSCH_RC_FLAGS_ONCHAN_FIRE)
+ MSCH_EVENT((" ONCHAN_FIRE"));
+ if (type & WL_MSCH_RC_FLAGS_START_FIRE_DONE)
+ MSCH_EVENT((" START_FIRE"));
+ if (type & WL_MSCH_RC_FLAGS_END_FIRE_DONE)
+ MSCH_EVENT((" END_FIRE"));
+ if (type & WL_MSCH_RC_FLAGS_ONFIRE_DONE)
+ MSCH_EVENT((" ONFIRE_DONE"));
+ if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_START)
+ MSCH_EVENT((" SPLIT_SLOT_START"));
+ if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_END)
+ MSCH_EVENT((" SPLIT_SLOT_END"));
+ if (type & WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE)
+ MSCH_EVENT((" PRE_ONFIRE_DONE"));
+ }
+ MSCH_EVENT(("\n"));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("pre_start_time: %s",
+ dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l)));
+
+ MSCH_EVENT((", start_time: %s",
+ dhd_mschdbg_display_time(p->start_time_h, p->start_time_l)));
+
+ MSCH_EVENT((", end_time: %s\n",
+ dhd_mschdbg_display_time(p->end_time_h, p->end_time_l)));
+
+ if (p->p_timeslot && (p->timeslot_ptr == 0)) {
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("<%s timeslot>: 0x%08x\n", title, ntoh32(p->p_timeslot)));
+ } else
+ dhd_mschdbg_timeslot_profiler_event_data(sn, ver, title, data, p->timeslot_ptr,
+ (p->timeslot_ptr == 0));
+}
+
+static void
+dhd_mschdbg_chan_ctxt_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty)
+{
+ int sn = sp + 4;
+ msch_chan_ctxt_profiler_event_data_t *p =
+ (msch_chan_ctxt_profiler_event_data_t *)(data + ntoh16(ptr));
+ chanspec_t c;
+ char buf[CHANSPEC_STR_LEN];
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<chan_ctxt>: "));
+ if (empty) {
+ MSCH_EVENT((" null\n"));
+ return;
+ }
+ else
+ MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+ ntoh32(p->p_chan_ctxt), ntoh32(p->p_prev), ntoh32(p->p_next)));
+
+ c = (chanspec_t)ntoh16(p->chanspec);
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("channel: %s, bf_sch_pending: %s, bf_skipped: %d\n",
+ wf_chspec_ntoa(c, buf), p->bf_sch_pending? "TRUE" : "FALSE",
+ ntoh32(p->bf_skipped_count)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("bf_link: prev 0x%08x, next 0x%08x\n",
+ ntoh32(p->bf_link_prev), ntoh32(p->bf_link_next)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("onchan_time: %s",
+ dhd_mschdbg_display_time(p->onchan_time_h, p->onchan_time_l)));
+ MSCH_EVENT((", actual_onchan_dur: %s",
+ dhd_mschdbg_display_time(p->actual_onchan_dur_h, p->actual_onchan_dur_l)));
+ MSCH_EVENT((", pend_onchan_dur: %s\n",
+ dhd_mschdbg_display_time(p->pend_onchan_dur_h, p->pend_onchan_dur_l)));
+
+ dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr,
+ p->req_entity_list_cnt);
+ dhd_mschdbg_elem_list(sn, "bf_entity", data, p->bf_entity_list_ptr,
+ p->bf_entity_list_cnt);
+}
+
+static void
+dhd_mschdbg_req_entity_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty)
+{
+ int sn = sp + 4;
+ msch_req_entity_profiler_event_data_t *p =
+ (msch_req_entity_profiler_event_data_t *)(data + ntoh16(ptr));
+ char buf[CHANSPEC_STR_LEN];
+ chanspec_t c;
+ uint32 flags;
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<req_entity>: "));
+ if (empty) {
+ MSCH_EVENT((" null\n"));
+ return;
+ }
+ else
+ MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+ ntoh32(p->p_req_entity), ntoh32(p->req_hdl_link_prev),
+ ntoh32(p->req_hdl_link_next)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("req_hdl: [0x%08x]\n", ntoh32(p->p_req_hdl)));
+
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("chan_ctxt_link: prev 0x%08x, next 0x%08x\n",
+ ntoh32(p->chan_ctxt_link_prev), ntoh32(p->chan_ctxt_link_next)));
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("rt_specific_link: prev 0x%08x, next 0x%08x\n",
+ ntoh32(p->rt_specific_link_prev), ntoh32(p->rt_specific_link_next)));
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("start_fixed_link: prev 0x%08x, next 0x%08x\n",
+ ntoh32(p->start_fixed_link_prev), ntoh32(p->start_fixed_link_next)));
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("both_flex_list: prev 0x%08x, next 0x%08x\n",
+ ntoh32(p->both_flex_list_prev), ntoh32(p->both_flex_list_next)));
+
+ c = (chanspec_t)ntoh16(p->chanspec);
+ MSCH_EVENT_HEAD(sn);
+ if (ver >= 2) {
+ MSCH_EVENT(("channel: %s, onchan Id %d, current chan Id %d, priority %d",
+ wf_chspec_ntoa(c, buf), ntoh16(p->onchan_chn_idx), ntoh16(p->cur_chn_idx),
+ ntoh16(p->priority)));
+ flags = ntoh32(p->flags);
+ if (flags & WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE)
+ MSCH_EVENT((" : MULTI_INSTANCE\n"));
+ else
+ MSCH_EVENT(("\n"));
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("actual_start_time: %s, ",
+ dhd_mschdbg_display_time(p->actual_start_time_h, p->actual_start_time_l)));
+ MSCH_EVENT(("curts_fire_time: %s, ",
+ dhd_mschdbg_display_time(p->curts_fire_time_h, p->curts_fire_time_l)));
+ } else {
+ MSCH_EVENT(("channel: %s, priority %d, ", wf_chspec_ntoa(c, buf),
+ ntoh16(p->priority)));
+ }
+ MSCH_EVENT(("bf_last_serv_time: %s\n",
+ dhd_mschdbg_display_time(p->bf_last_serv_time_h, p->bf_last_serv_time_l)));
+
+ dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "current", data, p->cur_slot_ptr,
+ (p->cur_slot_ptr == 0));
+ dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "pending", data, p->pend_slot_ptr,
+ (p->pend_slot_ptr == 0));
+
+ if (p->p_chan_ctxt && (p->chan_ctxt_ptr == 0)) {
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("<chan_ctxt>: 0x%08x\n", ntoh32(p->p_chan_ctxt)));
+ }
+ else
+ dhd_mschdbg_chan_ctxt_profiler_event_data(sn, ver, data, p->chan_ctxt_ptr,
+ (p->chan_ctxt_ptr == 0));
+}
+
+static void
+dhd_mschdbg_req_handle_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty)
+{
+ int sn = sp + 4;
+ msch_req_handle_profiler_event_data_t *p =
+ (msch_req_handle_profiler_event_data_t *)(data + ntoh16(ptr));
+ uint32 flags;
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<req_handle>: "));
+ if (empty) {
+ MSCH_EVENT((" null\n"));
+ return;
+ }
+ else
+ MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+ ntoh32(p->p_req_handle), ntoh32(p->p_prev), ntoh32(p->p_next)));
+
+ dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr,
+ p->req_entity_list_cnt);
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("cb_func: [0x%08x], cb_func: [0x%08x]",
+ ntoh32(p->cb_func), ntoh32(p->cb_ctxt)));
+ if (ver < 2) {
+ MSCH_EVENT((", chan_cnt: %d", ntoh16(p->chan_cnt)));
+ }
+ flags = ntoh32(p->flags);
+ if (flags & WL_MSCH_REQ_HDL_FLAGS_NEW_REQ)
+ MSCH_EVENT((", NEW_REQ"));
+ MSCH_EVENT(("\n"));
+
+ dhd_mschdbg_req_param_profiler_event_data(sn, ver, data, p->req_param_ptr);
+
+ if (ver >= 2) {
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("req_time: %s\n",
+ dhd_mschdbg_display_time(p->req_time_h, p->req_time_l)));
+ MSCH_EVENT_HEAD(sn);
+ MSCH_EVENT(("chan_cnt: %d, chan idx %d, last chan idx %d\n",
+ ntoh16(p->chan_cnt), ntoh16(p->chan_idx), ntoh16(p->last_chan_idx)));
+ if (p->chanspec_list && p->chanspec_cnt) {
+ dhd_mschdbg_chanspec_list(sn, data, p->chanspec_list, p->chanspec_cnt);
+ }
+ }
+}
+
+static void
+dhd_mschdbg_profiler_profiler_event_data(int sp, int ver, char *data, uint16 ptr)
+{
+ msch_profiler_profiler_event_data_t *p =
+ (msch_profiler_profiler_event_data_t *)(data + ntoh16(ptr));
+ uint32 flags;
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("free list: req_hdl 0x%08x, req_entity 0x%08x,"
+ " chan_ctxt 0x%08x, chanspec 0x%08x\n",
+ ntoh32(p->free_req_hdl_list), ntoh32(p->free_req_entity_list),
+ ntoh32(p->free_chan_ctxt_list), ntoh32(p->free_chanspec_list)));
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("alloc count: chanspec %d, req_entity %d, req_hdl %d, "
+ "chan_ctxt %d, timeslot %d\n",
+ ntoh16(p->msch_chanspec_alloc_cnt), ntoh16(p->msch_req_entity_alloc_cnt),
+ ntoh16(p->msch_req_hdl_alloc_cnt), ntoh16(p->msch_chan_ctxt_alloc_cnt),
+ ntoh16(p->msch_timeslot_alloc_cnt)));
+
+ dhd_mschdbg_elem_list(sp, "req_hdl", data, p->msch_req_hdl_list_ptr,
+ p->msch_req_hdl_list_cnt);
+ dhd_mschdbg_elem_list(sp, "chan_ctxt", data, p->msch_chan_ctxt_list_ptr,
+ p->msch_chan_ctxt_list_cnt);
+ dhd_mschdbg_elem_list(sp, "req_timing", data, p->msch_req_timing_list_ptr,
+ p->msch_req_timing_list_cnt);
+ dhd_mschdbg_elem_list(sp, "start_fixed", data, p->msch_start_fixed_list_ptr,
+ p->msch_start_fixed_list_cnt);
+ dhd_mschdbg_elem_list(sp, "both_flex_req_entity", data,
+ p->msch_both_flex_req_entity_list_ptr,
+ p->msch_both_flex_req_entity_list_cnt);
+ dhd_mschdbg_elem_list(sp, "start_flex", data, p->msch_start_flex_list_ptr,
+ p->msch_start_flex_list_cnt);
+ dhd_mschdbg_elem_list(sp, "both_flex", data, p->msch_both_flex_list_ptr,
+ p->msch_both_flex_list_cnt);
+
+ if (p->p_cur_msch_timeslot && (p->cur_msch_timeslot_ptr == 0)) {
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<cur_msch timeslot>: 0x%08x\n",
+ ntoh32(p->p_cur_msch_timeslot)));
+ } else
+ dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "cur_msch", data,
+ p->cur_msch_timeslot_ptr, (p->cur_msch_timeslot_ptr == 0));
+
+ if (p->p_next_timeslot && (p->next_timeslot_ptr == 0)) {
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("<next timeslot>: 0x%08x\n",
+ ntoh32(p->p_next_timeslot)));
+ } else
+ dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "next", data,
+ p->next_timeslot_ptr, (p->next_timeslot_ptr == 0));
+
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("ts_id: %d, ", ntoh32(p->ts_id)));
+ flags = ntoh32(p->flags);
+ if (flags & WL_MSCH_STATE_IN_TIEMR_CTXT)
+ MSCH_EVENT(("IN_TIEMR_CTXT, "));
+ if (flags & WL_MSCH_STATE_SCHD_PENDING)
+ MSCH_EVENT(("SCHD_PENDING, "));
+ MSCH_EVENT(("slotskip_flags: %d, cur_armed_timeslot: 0x%08x\n",
+ (ver >= 2)? ntoh32(p->slotskip_flag) : 0, ntoh32(p->cur_armed_timeslot)));
+ MSCH_EVENT_HEAD(sp);
+ MSCH_EVENT(("flex_list_cnt: %d, service_interval: %d, "
+ "max_lo_prio_interval: %d\n",
+ ntoh16(p->flex_list_cnt), ntoh32(p->service_interval),
+ ntoh32(p->max_lo_prio_interval)));
+}
+
+static void dhd_mschdbg_dump_data(dhd_pub_t *dhdp, void *raw_event_ptr, int type,
+ char *data, int len)
+{
+ uint64 t = 0, tt = 0;
+ uint32 s = 0, ss = 0;
+ int wlc_index, ver;
+
+ ver = (type & WL_MSCH_PROFILER_VER_MASK) >> WL_MSCH_PROFILER_VER_SHIFT;
+ wlc_index = (type & WL_MSCH_PROFILER_WLINDEX_MASK) >> WL_MSCH_PROFILER_WLINDEX_SHIFT;
+ if (wlc_index >= 4)
+ return;
+
+ type &= WL_MSCH_PROFILER_TYPE_MASK;
+ if (type <= WL_MSCH_PROFILER_PROFILE_END) {
+ msch_profiler_event_data_t *pevent = (msch_profiler_event_data_t *)data;
+ tt = ((uint64)(ntoh32(pevent->time_hi)) << 32) | ntoh32(pevent->time_lo);
+ dhd_mschdbg_us_to_sec(pevent->time_hi, pevent->time_lo, &s, &ss);
+ }
+
+ if (lastMessages && (type != WL_MSCH_PROFILER_MESSAGE) &&
+ (type != WL_MSCH_PROFILER_EVENT_LOG)) {
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("\n"));
+ lastMessages = FALSE;
+ }
+
+ switch (type) {
+ case WL_MSCH_PROFILER_START:
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d START\n", s, ss));
+ break;
+
+ case WL_MSCH_PROFILER_EXIT:
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d EXIT\n", s, ss));
+ break;
+
+ case WL_MSCH_PROFILER_REQ:
+ {
+ msch_req_profiler_event_data_t *p = (msch_req_profiler_event_data_t *)data;
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("\n"));
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("===============================\n"));
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d] REGISTER:\n", s, ss, wlc_index));
+ dhd_mschdbg_req_param_profiler_event_data(4, ver, data, p->req_param_ptr);
+ dhd_mschdbg_chanspec_list(4, data, p->chanspec_ptr, p->chanspec_cnt);
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("===============================\n"));
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("\n"));
+ }
+ break;
+
+ case WL_MSCH_PROFILER_CALLBACK:
+ {
+ msch_callback_profiler_event_data_t *p =
+ (msch_callback_profiler_event_data_t *)data;
+ char buf[CHANSPEC_STR_LEN];
+ chanspec_t chanspec;
+ uint16 cbtype;
+
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d] CALLBACK: ", s, ss, wlc_index));
+ chanspec = (chanspec_t)ntoh16(p->chanspec);
+ MSCH_EVENT(("req_hdl[0x%08x], channel %s --",
+ ntoh32(p->p_req_hdl), wf_chspec_ntoa(chanspec, buf)));
+
+ cbtype = ntoh16(p->type);
+ if (cbtype & WL_MSCH_CT_ON_CHAN)
+ MSCH_EVENT((" ON_CHAN"));
+ if (cbtype & WL_MSCH_CT_OFF_CHAN)
+ MSCH_EVENT((" OFF_CHAN"));
+ if (cbtype & WL_MSCH_CT_REQ_START)
+ MSCH_EVENT((" REQ_START"));
+ if (cbtype & WL_MSCH_CT_REQ_END)
+ MSCH_EVENT((" REQ_END"));
+ if (cbtype & WL_MSCH_CT_SLOT_START)
+ MSCH_EVENT((" SLOT_START"));
+ if (cbtype & WL_MSCH_CT_SLOT_SKIP)
+ MSCH_EVENT((" SLOT_SKIP"));
+ if (cbtype & WL_MSCH_CT_SLOT_END)
+ MSCH_EVENT((" SLOT_END"));
+ if (cbtype & WL_MSCH_CT_OFF_CHAN_DONE)
+ MSCH_EVENT((" OFF_CHAN_DONE"));
+ if (cbtype & WL_MSCH_CT_PARTIAL)
+ MSCH_EVENT((" PARTIAL"));
+ if (cbtype & WL_MSCH_CT_PRE_ONCHAN)
+ MSCH_EVENT((" PRE_ONCHAN"));
+ if (cbtype & WL_MSCH_CT_PRE_REQ_START)
+ MSCH_EVENT((" PRE_REQ_START"));
+
+ if (cbtype & WL_MSCH_CT_REQ_START) {
+ req_start[wlc_index] = 1;
+ req_start_time[wlc_index] = tt;
+ } else if (cbtype & WL_MSCH_CT_REQ_END) {
+ if (req_start[wlc_index]) {
+ MSCH_EVENT((" : REQ duration %d",
+ (uint32)(tt - req_start_time[wlc_index])));
+ req_start[wlc_index] = 0;
+ }
+ }
+
+ if (cbtype & WL_MSCH_CT_SLOT_START) {
+ solt_chanspec[wlc_index] = p->chanspec;
+ solt_start_time[wlc_index] = tt;
+ } else if (cbtype & WL_MSCH_CT_SLOT_END) {
+ if (p->chanspec == solt_chanspec[wlc_index]) {
+ MSCH_EVENT((" : SLOT duration %d",
+ (uint32)(tt - solt_start_time[wlc_index])));
+ solt_chanspec[wlc_index] = 0;
+ }
+ }
+ MSCH_EVENT(("\n"));
+
+ if (cbtype & (WL_MSCH_CT_ON_CHAN | WL_MSCH_CT_SLOT_SKIP)) {
+ MSCH_EVENT_HEAD(4);
+ if (cbtype & WL_MSCH_CT_ON_CHAN) {
+ MSCH_EVENT(("ID %d onchan idx %d cur_chan_seq_start %s ",
+ ntoh32(p->timeslot_id), ntoh32(p->onchan_idx),
+ dhd_mschdbg_display_time(p->cur_chan_seq_start_time_h,
+ p->cur_chan_seq_start_time_l)));
+ }
+ t = ((uint64)(ntoh32(p->start_time_h)) << 32) |
+ ntoh32(p->start_time_l);
+ MSCH_EVENT(("start %s ",
+ dhd_mschdbg_display_time(p->start_time_h,
+ p->start_time_l)));
+ tt = ((uint64)(ntoh32(p->end_time_h)) << 32) | ntoh32(p->end_time_l);
+ MSCH_EVENT(("end %s duration %d\n",
+ dhd_mschdbg_display_time(p->end_time_h, p->end_time_l),
+ (p->end_time_h == 0xffffffff && p->end_time_l == 0xffffffff)?
+ -1 : (int)(tt - t)));
+ }
+
+ }
+ break;
+
+ case WL_MSCH_PROFILER_EVENT_LOG:
+ {
+ while (len >= (int)WL_MSCH_EVENT_LOG_HEAD_SIZE) {
+ msch_event_log_profiler_event_data_t *p =
+ (msch_event_log_profiler_event_data_t *)data;
+ /* TODO: How to parse MSCH if extended event tag is present ??? */
+ prcd_event_log_hdr_t hdr;
+ int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32);
+ if (len < size || size > sizeof(msch_event_log_profiler_event_data_t)) {
+ break;
+ }
+ data += size;
+ len -= size;
+ dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+ bzero(&hdr, sizeof(hdr));
+ hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
+ hdr.count = p->hdr.count + 1;
+ /* exclude LSB 2 bits which indicate binary/non-binary data */
+ hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
+ hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
+ if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
+ hdr.binary_payload = TRUE;
+ }
+ dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
+ }
+ lastMessages = TRUE;
+ break;
+ }
+
+ case WL_MSCH_PROFILER_MESSAGE:
+ {
+ msch_message_profiler_event_data_t *p = (msch_message_profiler_event_data_t *)data;
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d]: %s", s, ss, wlc_index, p->message));
+ lastMessages = TRUE;
+ break;
+ }
+
+ case WL_MSCH_PROFILER_PROFILE_START:
+ profiler_start_time[wlc_index] = tt;
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("-------------------------------\n"));
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d] PROFILE DATA:\n", s, ss, wlc_index));
+ dhd_mschdbg_profiler_profiler_event_data(4, ver, data, 0);
+ break;
+
+ case WL_MSCH_PROFILER_PROFILE_END:
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d] PROFILE END: take time %d\n", s, ss,
+ wlc_index, (uint32)(tt - profiler_start_time[wlc_index])));
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("-------------------------------\n"));
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("\n"));
+ break;
+
+ case WL_MSCH_PROFILER_REQ_HANDLE:
+ dhd_mschdbg_req_handle_profiler_event_data(4, ver, data, 0, FALSE);
+ break;
+
+ case WL_MSCH_PROFILER_REQ_ENTITY:
+ dhd_mschdbg_req_entity_profiler_event_data(4, ver, data, 0, FALSE);
+ break;
+
+ case WL_MSCH_PROFILER_CHAN_CTXT:
+ dhd_mschdbg_chan_ctxt_profiler_event_data(4, ver, data, 0, FALSE);
+ break;
+
+ case WL_MSCH_PROFILER_REQ_TIMING:
+ dhd_mschdbg_req_timing_profiler_event_data(4, ver, "msch", data, 0, FALSE);
+ break;
+
+ default:
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("[wl%d] ERROR: unsupported EVENT reason code:%d; ",
+ wlc_index, type));
+ break;
+ }
+}
+
+void
+wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, void *data, int len)
+{
+ head_log = "MSCH";
+ dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, (char *)data, len);
+}
+
+void
+wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, prcd_event_log_hdr_t *plog_hdr,
+ uint32 *log_ptr)
+{
+ uint32 log_pyld_len;
+ head_log = "CONSOLE";
+
+ if (plog_hdr->count == 0) {
+ return;
+ }
+ log_pyld_len = (plog_hdr->count - 1) * DATA_UNIT_FOR_LOG_CNT;
+
+ if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+ msch_event_log_profiler_event_data_t *p =
+ (msch_event_log_profiler_event_data_t *)log_ptr;
+ /* TODO: How to parse MSCH if extended event tag is present ??? */
+ prcd_event_log_hdr_t hdr;
+ uint32 s, ss;
+
+ if (log_pyld_len < OFFSETOF(msch_event_log_profiler_event_data_t, data) ||
+ log_pyld_len > sizeof(msch_event_log_profiler_event_data_t)) {
+ return;
+ }
+
+ dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
+ MSCH_EVENT_HEAD(0);
+ MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+ bzero(&hdr, sizeof(hdr));
+ hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
+ hdr.count = p->hdr.count + 1;
+ /* exclude LSB 2 bits which indicate binary/non-binary data */
+ hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
+ hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
+ if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
+ hdr.binary_payload = TRUE;
+ }
+ dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
+ } else {
+ msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr;
+ int type = ntoh16(p->type);
+ int len = ntoh16(p->size);
+
+ if (log_pyld_len < OFFSETOF(msch_collect_tlv_t, value) + len) {
+ return;
+ }
+
+ dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, p->value, len);
+ }
+}
+#endif /* SHOW_LOGTRACE */
diff --git a/bcmdhd.101.10.361.x/dhd_mschdbg.h b/bcmdhd.101.10.361.x/dhd_mschdbg.h
new file mode 100755
index 0000000..12ba3ab
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_mschdbg.h
@@ -0,0 +1,36 @@
+/*
+ * DHD debugability header file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_mschdbg.h 571265 2015-07-14 20:50:18Z eccopark $
+ */
+
+#ifndef _dhd_mschdbg_h_
+#define _dhd_mschdbg_h_
+
+#ifdef SHOW_LOGTRACE
+extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type,
+ void *data, int len);
+extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr,
+ prcd_event_log_hdr_t *plog_hdr, uint32 *log_ptr);
+#endif /* SHOW_LOGTRACE */
+
+#endif /* _dhd_mschdbg_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_msgbuf.c b/bcmdhd.101.10.361.x/dhd_msgbuf.c
new file mode 100755
index 0000000..12eb4e0
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_msgbuf.c
@@ -0,0 +1,15512 @@
+/**
+ * @file definition of host message ring functionality
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/** XXX Twiki: [PCIeFullDongleArchitecture] */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmmsgbuf.h>
+#include <bcmendian.h>
+#include <bcmstdlib_s.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+
+#ifdef BCMDBUS
+#include <dbus.h>
+#else
+#include <dhd_bus.h>
+#endif /* BCMDBUS */
+
+#include <dhd_dbg.h>
+#include <siutils.h>
+#include <dhd_debug.h>
+#ifdef EXT_STA
+#include <wlc_cfg.h>
+#include <wlc_pub.h>
+#include <wl_port_if.h>
+#endif /* EXT_STA */
+
+#include <dhd_flowring.h>
+
+#include <pcie_core.h>
+#include <bcmpcie.h>
+#include <dhd_pcie.h>
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+#ifdef DHD_PKTTS
+#include <bcmudp.h>
+#include <bcmtcp.h>
+#endif /* DHD_PKTTS */
+#include <dhd_config.h>
+
+#if defined(DHD_LB)
+#if !defined(LINUX) && !defined(linux) && !defined(OEM_ANDROID)
+#error "DHD Loadbalancing only supported on LINUX | OEM_ANDROID"
+#endif /* !LINUX && !OEM_ANDROID */
+#include <linux/cpu.h>
+#include <bcm_ring.h>
+#define DHD_LB_WORKQ_SZ (8192)
+#define DHD_LB_WORKQ_SYNC (16)
+#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
+#endif /* DHD_LB */
+
+#include <etd.h>
+#include <hnd_debug.h>
+#include <bcmtlv.h>
+#include <hnd_armtrap.h>
+#include <dnglevent.h>
+
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#include <dhd_linux_pktdump.h>
+#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_EWPR_VER2
+#include <dhd_bitpack.h>
+#endif /* DHD_EWPR_VER2 */
+
+extern char dhd_version[];
+extern char fw_version[];
+
+/**
+ * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
+ * address where a value must be written. Host may also interrupt coalescing
+ * on this soft doorbell.
+ * Use Case: Hosts with network processors, may register with the dongle the
+ * network processor's thread wakeup register and a value corresponding to the
+ * core/thread context. Dongle will issue a write transaction <address,value>
+ * to the PCIE RC which will need to be routed to the mapped register space, by
+ * the host.
+ */
+/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
+
+/* Dependency Check */
+#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
+#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+
+#if defined(DHD_HTPUT_TUNABLES)
+#define DEFAULT_RX_BUFFERS_TO_POST 1024
+#define RX_BUF_BURST 64 /* Rx buffers for MSDU Data */
+#define RXBUFPOST_THRESHOLD 64 /* Rxbuf post threshold */
+#else
+#define DEFAULT_RX_BUFFERS_TO_POST 256
+#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
+#define RXBUFPOST_THRESHOLD 32 /* Rxbuf post threshold */
+#endif /* DHD_HTPUT_TUNABLES */
+
+/* Read index update Magic sequence */
+#define DHD_DMA_INDX_SEQ_H2D_DB_MAGIC 0xDDDDDDDDAu
+#define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring) (0xDD000000 | (ring->idx << 16u) | ring->rd)
+/* Write index update Magic sequence */
+#define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring) (0xFF000000 | (ring->idx << 16u) | ring->wr)
+#define DHD_AGGR_H2D_DB_MAGIC 0xFFFFFFFAu
+
+#define DHD_STOP_QUEUE_THRESHOLD 200
+#define DHD_START_QUEUE_THRESHOLD 100
+
+#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
+#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
+
+/* flags for ioctl pending status */
+#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
+#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
+
+#define DHD_IOCTL_REQ_PKTBUFSZ 2048
+#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
+
+/**
+ * XXX: DMA_ALIGN_LEN use is overloaded:
+ * - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4
+ * - in ensuring that a buffer's va is 4 Byte aligned
+ * - in rounding up a buffer length to 4 Bytes.
+ */
+#define DMA_ALIGN_LEN 4
+
+#define DMA_D2H_SCRATCH_BUF_LEN 8
+#define DMA_XFER_LEN_LIMIT 0x400000
+
+#ifdef BCM_HOST_BUF
+#ifndef DMA_HOST_BUFFER_LEN
+#define DMA_HOST_BUFFER_LEN 0x200000
+#endif
+#endif /* BCM_HOST_BUF */
+
+#if defined(CUSTOMER_HW_AMLOGIC) && defined(USE_AML_PCIE_TEE_MEM)
+#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 4096
+#else
+#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
+#endif
+
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
+#define DHD_FLOWRING_MAX_EVENTBUF_POST 32
+#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
+#define DHD_H2D_INFORING_MAX_BUF_POST 32
+#ifdef BTLOG
+#define DHD_H2D_BTLOGRING_MAX_BUF_POST 32
+#endif /* BTLOG */
+#define DHD_MAX_TSBUF_POST 8
+
+#define DHD_PROT_FUNCS 43
+
+/* Length of buffer in host for bus throughput measurement */
+#define DHD_BUS_TPUT_BUF_LEN 2048
+
+#define TXP_FLUSH_NITEMS
+
+/* optimization to write "n" tx items at a time to ring */
+#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
+
+#define RING_NAME_MAX_LENGTH 24
+#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
+/* Giving room before ioctl_trans_id rollsover. */
+#define BUFFER_BEFORE_ROLLOVER 300
+
+/* 512K memory + 32K registers */
+#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
+
+struct msgbuf_ring; /* ring context for common and flow rings */
+
+#ifdef DHD_HMAPTEST
+/* 5 * DMA_CONSISTENT_ALIGN as different tests use upto 4th page */
+#define HMAP_SANDBOX_BUFFER_LEN (DMA_CONSISTENT_ALIGN * 5) /* for a 4k page this is 20K */
+/**
+ * for D11 DMA HMAPTEST thes states are as follows
+ * iovar sets ACTIVE state
+ * next TXPOST / RXPOST sets POSTED state
+ * on TXCPL / RXCPL POSTED + pktid match does buffer free nd state changed to INACTIVE
+ * This ensures that on an iovar only one buffer is replaced from sandbox area
+ */
+#define HMAPTEST_D11_TX_INACTIVE 0
+#define HMAPTEST_D11_TX_ACTIVE 1
+#define HMAPTEST_D11_TX_POSTED 2
+
+#define HMAPTEST_D11_RX_INACTIVE 0
+#define HMAPTEST_D11_RX_ACTIVE 1
+#define HMAPTEST_D11_RX_POSTED 2
+#endif /* DHD_HMAPTEST */
+
+#define PCIE_DMA_LOOPBACK 0
+#define D11_DMA_LOOPBACK 1
+#define BMC_DMA_LOOPBACK 2
+
+/**
+ * PCIE D2H DMA Complete Sync Modes
+ *
+ * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
+ * Host system memory. A WAR using one of 3 approaches is needed:
+ * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
+ * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
+ * writes in the last word of each work item. Each work item has a seqnum
+ * number = sequence num % 253.
+ *
+ * 3. Read Barrier: Dongle does a host memory read access prior to posting an
+ * interrupt, ensuring that D2H data transfer indeed completed.
+ * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
+ * ring contents before the indices.
+ *
+ * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
+ * callback (see dhd_prot_d2h_sync_none) may be bound.
+ *
+ * Dongle advertizes host side sync mechanism requirements.
+ */
+
+#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
+#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
+
+#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
+#define DHD_MSGBUF_INFO DHD_TRACE
+#else
+#define DHD_MSGBUF_INFO DHD_INFO
+#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
+
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ * For EDL messages.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+#ifdef EWP_EDL
+typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+ volatile cmn_msg_hdr_t *msg);
+#endif /* EWP_EDL */
+
+/*
+ * +----------------------------------------------------------------------------
+ *
+ * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
+ * flowids do not.
+ *
+ * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
+ * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
+ *
+ * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
+ * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
+ * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
+ *
+ * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
+ * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
+ *
+ * D2H Control Complete RingId = 2
+ * D2H Transmit Complete RingId = 3
+ * D2H Receive Complete RingId = 4
+ *
+ * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
+ * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
+ * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
+ *
+ * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
+ * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
+ *
+ * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
+ * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
+ * FlowId values would be in the range [2..133] and the corresponding
+ * RingId values would be in the range [5..136].
+ *
+ * The flowId allocator, may chose to, allocate Flowids:
+ * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
+ * X# of uc flowids in consecutive ranges (per station Id), where X is the
+ * packet's access category (e.g. 4 uc flowids per station).
+ *
+ * CAUTION:
+ * When DMA indices array feature is used, RingId=5, corresponding to the 0th
+ * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
+ * since the FlowId truly represents the index in the H2D DMA indices array.
+ *
+ * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
+ * will represent the index in the D2H DMA indices array.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* First TxPost Flowring Id */
+#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
+
+/* Determine whether a ringid belongs to a TxPost flowring */
+#define DHD_IS_FLOWRING(ringid, max_flow_rings) \
+ ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
+ (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
+
+/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
+#define DHD_FLOWID_TO_RINGID(flowid) \
+ (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
+
+/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
+#define DHD_RINGID_TO_FLOWID(ringid) \
+ (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
+
+/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
+ * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
+ * any array of H2D rings.
+ */
+#define DHD_H2D_RING_OFFSET(ringid) \
+ (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
+
+/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
+ * This may be used for IFRM.
+ */
+#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
+ ((ringid) - BCMPCIE_COMMON_MSGRINGS)
+
+/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
+ * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
+ * any array of D2H rings.
+ * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
+ * max_h2d_rings: total number of h2d rings
+ */
+#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
+ ((ringid) > (max_h2d_rings) ? \
+ ((ringid) - max_h2d_rings) : \
+ ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
+
+/* Convert a D2H DMA Indices Offset to a RingId */
+#define DHD_D2H_RINGID(offset) \
+ ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* XXX: The ringid and flowid and dma indices array index idiosyncracy is error
+ * prone. While a simplification is possible, the backward compatability
+ * requirement (DHD should operate with any PCIE rev version of firmware),
+ * limits what may be accomplished.
+ *
+ * At the minimum, implementation should use macros for any conversions
+ * facilitating introduction of future PCIE FD revs that need more "common" or
+ * other dynamic rings.
+ */
+
+/* XXX: Presently there is no need for maintaining both a dmah and a secdmah */
+#define DHD_DMAH_NULL ((void*)NULL)
+
+/*
+ * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
+ * buffer does not occupy the entire cacheline, and another object is placed
+ * following the DMA-able buffer, data corruption may occur if the DMA-able
+ * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
+ * is not available.
+ */
+#if defined(L1_CACHE_BYTES)
+#define DHD_DMA_PAD (L1_CACHE_BYTES)
+#else
+#define DHD_DMA_PAD (128)
+#endif
+
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
+ DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+ (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
+ (ring)++, (flowid)++)
+
+/* Used in loopback tests */
+typedef struct dhd_dmaxfer {
+ dhd_dma_buf_t srcmem;
+ dhd_dma_buf_t dstmem;
+ uint32 srcdelay;
+ uint32 destdelay;
+ uint32 len;
+ bool in_progress;
+ uint64 start_usec;
+ uint64 time_taken;
+ uint32 d11_lpbk;
+ int status;
+} dhd_dmaxfer_t;
+
+#ifdef DHD_HMAPTEST
+/* Used in HMAP test */
+typedef struct dhd_hmaptest {
+ dhd_dma_buf_t mem;
+ uint32 len;
+ bool in_progress;
+ uint32 is_write;
+ uint32 accesstype;
+ uint64 start_usec;
+ uint32 offset;
+} dhd_hmaptest_t;
+#endif /* DHD_HMAPTEST */
+/**
+ * msgbuf_ring : This object manages the host side ring that includes a DMA-able
+ * buffer, the WR and RD indices, ring parameters such as max number of items
+ * an length of each items, and other miscellaneous runtime state.
+ * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
+ * H2D TxPost ring as specified in the PCIE FullDongle Spec.
+ * Ring parameters are conveyed to the dongle, which maintains its own peer end
+ * ring state. Depending on whether the DMA Indices feature is supported, the
+ * host will update the WR/RD index in the DMA indices array in host memory or
+ * directly in dongle memory.
+ */
+typedef struct msgbuf_ring {
+ bool inited;
+ uint16 idx; /* ring id */
+ uint16 rd; /* read index */
+ uint16 curr_rd; /* read index for debug */
+ uint16 wr; /* write index */
+ uint16 max_items; /* maximum number of items in ring */
+ uint16 item_len; /* length of each item in the ring */
+ sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
+ dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
+ uint32 seqnum; /* next expected item's sequence number */
+#ifdef TXP_FLUSH_NITEMS
+ void *start_addr;
+ /* # of messages on ring not yet announced to dongle */
+ uint16 pend_items_count;
+#ifdef AGG_H2D_DB
+ osl_atomic_t inflight;
+#endif /* AGG_H2D_DB */
+#endif /* TXP_FLUSH_NITEMS */
+
+ uint8 ring_type;
+ uint8 n_completion_ids;
+ bool create_pending;
+ uint16 create_req_id;
+ uint8 current_phase;
+ uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
+ uchar name[RING_NAME_MAX_LENGTH];
+ uint32 ring_mem_allocated;
+ void *ring_lock;
+} msgbuf_ring_t;
+
+#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
+#define DHD_RING_END_VA(ring) \
+ ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
+ (((ring)->max_items - 1) * (ring)->item_len))
+
+#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+#define MAX_IOCTL_TRACE_SIZE 50
+#define MAX_IOCTL_BUF_SIZE 64
+typedef struct _dhd_ioctl_trace_t {
+ uint32 cmd;
+ uint16 transid;
+ char ioctl_buf[MAX_IOCTL_BUF_SIZE];
+ uint64 timestamp;
+} dhd_ioctl_trace_t;
+#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
+
+#ifdef DHD_PKTTS
+struct pktts_fwtx_v1 {
+ uint32 ts[PKTTS_MAX_FWTX];
+};
+
+struct pktts_fwtx_v2 {
+ uint32 ts[PKTTS_MAX_FWTX];
+ uint32 ut[PKTTS_MAX_UCTX];
+ uint32 uc[PKTTS_MAX_UCCNT];
+};
+
+static void dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhd, void *pkt,
+ void *fw_ts, uint16 version);
+static void dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhd, void *pkt,
+ uint fwr1, uint fwr2);
+#endif /* DHD_PKTTS */
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+/** D2H WLAN Rx Packet Chaining context */
+typedef struct rxchain_info {
+ uint pkt_count;
+ uint ifidx;
+ void *pkthead;
+ void *pkttail;
+ uint8 *h_da; /* pointer to da of chain head */
+ uint8 *h_sa; /* pointer to sa of chain head */
+ uint8 h_prio; /* prio of chain head */
+} rxchain_info_t;
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+/* This can be overwritten by module parameter defined in dhd_linux.c
+ * or by dhd iovar h2d_max_txpost.
+ */
+int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
+#if defined(DHD_HTPUT_TUNABLES)
+int h2d_htput_max_txpost = H2DRING_HTPUT_TXPOST_MAX_ITEM;
+#endif /* DHD_HTPUT_TUNABLES */
+
+#ifdef AGG_H2D_DB
+bool agg_h2d_db_enab = TRUE;
+
+#define AGG_H2D_DB_TIMEOUT_USEC (1000u) /* 1 msec */
+uint32 agg_h2d_db_timeout = AGG_H2D_DB_TIMEOUT_USEC;
+
+#ifndef AGG_H2D_DB_INFLIGHT_THRESH
+/* Keep inflight threshold same as txp_threshold */
+#define AGG_H2D_DB_INFLIGHT_THRESH TXP_FLUSH_MAX_ITEMS_FLUSH_CNT
+#endif /* !AGG_H2D_DB_INFLIGHT_THRESH */
+
+uint32 agg_h2d_db_inflight_thresh = AGG_H2D_DB_INFLIGHT_THRESH;
+
+#define DHD_NUM_INFLIGHT_HISTO_ROWS (14u)
+#define DHD_INFLIGHT_HISTO_SIZE (sizeof(uint64) * DHD_NUM_INFLIGHT_HISTO_ROWS)
+
+typedef struct _agg_h2d_db_info {
+ void *dhd;
+ struct hrtimer timer;
+ bool init;
+ uint32 direct_db_cnt;
+ uint32 timer_db_cnt;
+ uint64 *inflight_histo;
+} agg_h2d_db_info_t;
+#endif /* AGG_H2D_DB */
+
+/** DHD protocol handle. Is an opaque type to other DHD software layers. */
+typedef struct dhd_prot {
+ osl_t *osh; /* OSL handle */
+ uint16 rxbufpost_sz;
+ uint16 rxbufpost;
+ uint16 max_rxbufpost;
+ uint32 tot_rxbufpost;
+ uint32 tot_rxcpl;
+ uint16 max_eventbufpost;
+ uint16 max_ioctlrespbufpost;
+ uint16 max_tsbufpost;
+ uint16 max_infobufpost;
+ uint16 infobufpost;
+ uint16 cur_event_bufs_posted;
+ uint16 cur_ioctlresp_bufs_posted;
+ uint16 cur_ts_bufs_posted;
+
+ /* Flow control mechanism based on active transmits pending */
+ osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
+ uint16 h2d_max_txpost;
+#if defined(DHD_HTPUT_TUNABLES)
+ uint16 h2d_htput_max_txpost;
+#endif /* DHD_HTPUT_TUNABLES */
+ uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
+
+ /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
+ msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
+ msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
+ msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
+ msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
+ msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
+ msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
+ msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
+ msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
+
+ msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
+ dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
+ uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
+
+ uint32 rx_dataoffset;
+
+ dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
+ dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
+
+ /* ioctl related resources */
+ uint8 ioctl_state;
+ int16 ioctl_status; /* status returned from dongle */
+ uint16 ioctl_resplen;
+ dhd_ioctl_recieved_status_t ioctl_received;
+ uint curr_ioctl_cmd;
+ dhd_dma_buf_t retbuf; /* For holding ioctl response */
+ dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
+
+ dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
+
+ /* DMA-able arrays for holding WR and RD indices */
+ uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
+ dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
+ dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
+ dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
+ dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
+ dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
+
+ dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
+
+ dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
+#ifdef DHD_DMA_INDICES_SEQNUM
+ char *h2d_dma_indx_rd_copy_buf; /* Local copy of H2D WR indices array */
+ char *d2h_dma_indx_wr_copy_buf; /* Local copy of D2H WR indices array */
+ uint32 h2d_dma_indx_rd_copy_bufsz; /* H2D WR indices array size */
+ uint32 d2h_dma_indx_wr_copy_bufsz; /* D2H WR indices array size */
+ uint32 host_seqnum; /* Seqence number for D2H DMA Indices sync */
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ uint32 flowring_num;
+
+ d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
+#ifdef EWP_EDL
+ d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
+#endif /* EWP_EDL */
+ ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
+ ulong d2h_sync_wait_tot; /* total wait loops */
+
+ dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
+
+ uint16 ioctl_seq_no;
+ uint16 data_seq_no; /* XXX this field is obsolete */
+ uint16 ioctl_trans_id;
+ void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
+ void *pktid_rx_map; /* pktid map for rx path */
+ void *pktid_tx_map; /* pktid map for tx path */
+ bool metadata_dbg;
+ void *pktid_map_handle_ioctl;
+#ifdef DHD_MAP_PKTID_LOGGING
+ void *pktid_dma_map; /* pktid map for DMA MAP */
+ void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
+#endif /* DHD_MAP_PKTID_LOGGING */
+ uint32 pktid_depleted_cnt; /* pktid depleted count */
+ /* netif tx queue stop count */
+ uint8 pktid_txq_stop_cnt;
+ /* netif tx queue start count */
+ uint8 pktid_txq_start_cnt;
+ uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
+ uint64 ioctl_ack_time; /* timestamp for ioctl ack */
+ uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
+
+ /* Applications/utilities can read tx and rx metadata using IOVARs */
+ uint16 rx_metadata_offset;
+ uint16 tx_metadata_offset;
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ rxchain_info_t rxchain; /* chain of rx packets */
+#endif
+
+#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
+ /* Host's soft doorbell configuration */
+ bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
+#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
+
+ /* Work Queues to be used by the producer and the consumer, and threshold
+ * when the WRITE index must be synced to consumer's workq
+ */
+ dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
+
+ uint32 host_ipc_version; /* Host sypported IPC rev */
+ uint32 device_ipc_version; /* FW supported IPC rev */
+ uint32 active_ipc_version; /* Host advertised IPC rev */
+#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+ dhd_ioctl_trace_t ioctl_trace[MAX_IOCTL_TRACE_SIZE];
+ uint32 ioctl_trace_count;
+#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
+ dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
+ bool hostts_req_buf_inuse;
+ bool rx_ts_log_enabled;
+ bool tx_ts_log_enabled;
+#ifdef BTLOG
+ msgbuf_ring_t *h2dring_btlog_subn; /* H2D btlog submission ring */
+ msgbuf_ring_t *d2hring_btlog_cpln; /* D2H btlog completion ring */
+ uint16 btlogbufpost;
+ uint16 max_btlogbufpost;
+#endif /* BTLOG */
+#ifdef DHD_HMAPTEST
+ uint32 hmaptest_rx_active;
+ uint32 hmaptest_rx_pktid;
+ char *hmap_rx_buf_va;
+ dmaaddr_t hmap_rx_buf_pa;
+ uint32 hmap_rx_buf_len;
+
+ uint32 hmaptest_tx_active;
+ uint32 hmaptest_tx_pktid;
+ char *hmap_tx_buf_va;
+ dmaaddr_t hmap_tx_buf_pa;
+ uint32 hmap_tx_buf_len;
+ dhd_hmaptest_t hmaptest; /* for hmaptest */
+ bool hmap_enabled; /* TRUE = hmap is enabled */
+#endif /* DHD_HMAPTEST */
+#ifdef SNAPSHOT_UPLOAD
+ dhd_dma_buf_t snapshot_upload_buf; /* snapshot upload buffer */
+ uint32 snapshot_upload_len; /* snapshot uploaded len */
+ uint8 snapshot_type; /* snaphot uploaded type */
+ bool snapshot_cmpl_pending; /* snapshot completion pending */
+#endif /* SNAPSHOT_UPLOAD */
+ bool no_retry;
+ bool no_aggr;
+ bool fixed_rate;
+ dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
+#ifdef DHD_HP2P
+ msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
+ msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
+#endif /* DHD_HP2P */
+ bool no_tx_resource;
+ uint32 txcpl_db_cnt;
+#ifdef AGG_H2D_DB
+ agg_h2d_db_info_t agg_h2d_db_info;
+#endif /* AGG_H2D_DB */
+ uint64 tx_h2d_db_cnt;
+} dhd_prot_t;
+
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BASE64_BUFFER_SIZE 640
+#endif
+
+#ifdef DHD_DUMP_PCIE_RINGS
+static
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn);
+#ifdef EWP_EDL
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn);
+#endif /* EWP_EDL */
+#endif /* DHD_DUMP_PCIE_RINGS */
+extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
+extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
+/* Convert a dmaaddr_t to a base_addr with htol operations */
+static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
+
+/* APIs for managing a DMA-able buffer */
+static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+
+/* msgbuf ring management */
+static int dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot);
+static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
+static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
+
+/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
+static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
+static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
+static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
+
+/* Fetch and Release a flowring msgbuf_ring from flowring pool */
+static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
+ uint16 flowid);
+/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
+
+/* Producer: Allocate space in a msgbuf ring */
+static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint16 nitems, uint16 *alloced, bool exactly_nitems);
+static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
+ uint16 *alloced, bool exactly_nitems);
+
+/* Consumer: Determine the location where the next message may be consumed */
+static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint32 *available_len);
+
+/* Producer (WR index update) or Consumer (RD index update) indication */
+static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ void *p, uint16 len);
+
+#ifdef AGG_H2D_DB
+static void dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring,
+ void* p, uint16 len);
+static void dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db);
+static void dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid);
+#endif /* AGG_H2D_DB */
+static void dhd_prot_ring_doorbell(dhd_pub_t *dhd, uint32 value);
+static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+
+static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
+ dhd_dma_buf_t *dma_buf, uint32 bufsz);
+
+/* Set/Get a RD or WR index in the array of indices */
+/* See also: dhd_prot_dma_indx_init() */
+void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
+ uint16 ringid);
+static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
+
+/* Locate a packet given a pktid */
+static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
+ bool free_pktid);
+/* Locate a packet given a PktId and free it. */
+static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
+
+static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+ void *buf, uint len, uint8 action);
+static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+ void *buf, uint len, uint8 action);
+static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
+static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
+ void *buf, int ifidx);
+
+/* Post buffers for Rx, control ioctl response and events */
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
+static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
+static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
+static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
+static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
+static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
+
+static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, uint32 rxcnt);
+
+#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+static void dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len);
+static void dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf);
+#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
+
+/* D2H Message handling */
+static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
+
+/* D2H Message handlers */
+static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
+
+/* Loopback test with dongle */
+static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
+static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
+ uint destdelay, dhd_dmaxfer_t *dma);
+static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
+
+/* Flowring management communication with dongle */
+static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
+static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
+
+/* Monitor Mode */
+#ifdef WL_MONITOR
+extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
+extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
+#endif /* WL_MONITOR */
+
+/* Configure a soft doorbell per D2H ring */
+static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
+static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
+#if !defined(BCM_ROUTER_DHD)
+static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
+static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
+#endif /* !BCM_ROUTER_DHD */
+static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
+#ifdef BTLOG
+static void dhd_prot_process_btlog_complete(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_detach_btlog_rings(dhd_pub_t *dhd);
+#endif /* BTLOG */
+#ifdef DHD_HP2P
+static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
+#ifdef EWP_EDL
+static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
+#endif
+static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
+
+#ifdef DHD_TIMESYNC
+extern void dhd_parse_proto(uint8 *pktdata, dhd_pkt_parse_t *parse);
+#endif
+
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+void dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
+void dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+
+#ifdef DHD_TX_PROFILE
+extern bool dhd_protocol_matches_profile(uint8 *p, int plen, const
+ dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc);
+#endif /* defined(DHD_TX_PROFILE) */
+
+#ifdef DHD_HP2P
+static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
+static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
+static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
+static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
+#endif
+typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
+
+/** callback functions for messages generated by the dongle */
+#define MSG_TYPE_INVALID 0
+
+static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
+ dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
+ dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
+ dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
+ NULL,
+ dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
+ NULL,
+ dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
+ NULL,
+ dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
+ NULL,
+ dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
+ NULL,
+ dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
+ NULL,
+ dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
+ NULL,
+ dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
+ NULL,
+ NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
+ NULL,
+ dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
+ NULL, /* MSG_TYPE_FLOW_RING_RESUME */
+ dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
+ NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
+ dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
+ NULL, /* MSG_TYPE_INFO_BUF_POST */
+#if defined(BCM_ROUTER_DHD)
+ NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
+#else
+ dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
+#endif /* BCM_ROUTER_DHD */
+ NULL, /* MSG_TYPE_H2D_RING_CREATE */
+ NULL, /* MSG_TYPE_D2H_RING_CREATE */
+#if defined(BCM_ROUTER_DHD)
+ NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
+#else
+ dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
+#endif /* BCM_ROUTER_DHD */
+ dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
+ NULL, /* MSG_TYPE_H2D_RING_CONFIG */
+ NULL, /* MSG_TYPE_D2H_RING_CONFIG */
+ NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
+ dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
+ NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
+ dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
+ NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
+ NULL, /* MSG_TYPE_HOSTTIMSTAMP */
+ dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
+ dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
+ NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
+ dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
+};
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+/* Related to router CPU mapping per radio core */
+#define DHD_RX_CHAINING
+#endif /* BCM_ROUTER_DHD && HNDCTF */
+
+#ifdef DHD_RX_CHAINING
+
+#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
+ (dhd_wet_chainable(dhd) && \
+ dhd_rx_pkt_chainable((dhd), (ifidx)) && \
+ !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
+ !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
+ !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
+ !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
+ ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
+ ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
+ (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
+
+static INLINE void dhd_rxchain_reset(rxchain_info_t *rxchain);
+static void dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
+static void dhd_rxchain_commit(dhd_pub_t *dhd);
+
+#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
+
+#endif /* DHD_RX_CHAINING */
+
+#ifdef DHD_EFI
+#define DHD_LPBKDTDUMP_ON() (1)
+#else
+#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
+#endif
+
+static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
+
+#ifdef D2H_MINIDUMP
+dhd_dma_buf_t *
+dhd_prot_get_minidump_buf(dhd_pub_t *dhd)
+{
+ return &dhd->prot->fw_trap_buf;
+}
+#endif /* D2H_MINIDUMP */
+
+uint16
+dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd)
+{
+ return dhd->prot->rxbufpost_sz;
+}
+
+uint16
+dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *flow_ring = &prot->h2dring_rxp_subn;
+ uint16 rd, wr;
+
+ /* Since wr is owned by host in h2d direction, directly read wr */
+ wr = flow_ring->wr;
+
+ if (dhd->dma_d2h_ring_upd_support) {
+ rd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+ }
+ return NTXPACTIVE(rd, wr, flow_ring->max_items);
+}
+
+uint16
+dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *flow_ring = &prot->d2hring_rx_cpln;
+ uint16 rd, wr;
+
+ if (dhd->dma_d2h_ring_upd_support) {
+ wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+ }
+
+ /* Since rd is owned by host in d2h direction, directly read rd */
+ rd = flow_ring->rd;
+
+ return NTXPACTIVE(rd, wr, flow_ring->max_items);
+}
+
+bool
+dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
+{
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
+ uint16 rd, wr;
+ bool ret;
+
+ if (dhd->dma_d2h_ring_upd_support) {
+ wr = flow_ring->wr;
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+ }
+ if (dhd->dma_h2d_ring_upd_support) {
+ rd = flow_ring->rd;
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+ }
+ ret = (wr == rd) ? TRUE : FALSE;
+ return ret;
+}
+
+void
+dhd_prot_dump_ring_ptrs(void *prot_info)
+{
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
+ DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
+ ring->curr_rd, ring->rd, ring->wr));
+}
+
+uint16
+dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
+{
+ return (uint16)h2d_max_txpost;
+}
+void
+dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
+{
+ h2d_max_txpost = max_txpost;
+}
+#if defined(DHD_HTPUT_TUNABLES)
+uint16
+dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd)
+{
+ return (uint16)h2d_htput_max_txpost;
+}
+void
+dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 htput_max_txpost)
+{
+ h2d_htput_max_txpost = htput_max_txpost;
+}
+
+#endif /* DHD_HTPUT_TUNABLES */
+/**
+ * D2H DMA to completion callback handlers. Based on the mode advertised by the
+ * dongle through the PCIE shared region, the appropriate callback will be
+ * registered in the proto layer to be invoked prior to precessing any message
+ * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
+ * does not require host participation, then a noop callback handler will be
+ * bound that simply returns the msg_type.
+ */
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
+ uint32 tries, volatile uchar *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
+static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
+ uint16 ring_type, uint32 id);
+static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
+ uint8 type, uint32 id);
+
+/**
+ * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
+ * not completed, a livelock condition occurs. Host will avert this livelock by
+ * dropping this message and moving to the next. This dropped message can lead
+ * to a packet leak, or even something disastrous in the case the dropped
+ * message happens to be a control response.
+ * Here we will log this condition. One may choose to reboot the dongle.
+ *
+ */
+static void
+dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
+ volatile uchar *msg, int msglen)
+{
+ uint32 ring_seqnum = ring->seqnum;
+
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
+ DHD_ERROR((
+ "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
+ " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
+ dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
+ dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
+ ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
+
+ dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
+
+ /* Try to resume if already suspended or suspend in progress */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ goto exit;
+ }
+
+ dhd_bus_dump_console_buffer(dhd->bus);
+ dhd_prot_debug_info_print(dhd);
+
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+
+exit:
+ dhd_schedule_reset(dhd);
+
+#ifdef OEM_ANDROID
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ /* XXX Trigger HANG event for recovery */
+ dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
+ dhd_os_send_hang_message(dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* OEM_ANDROID */
+ dhd->livelock_occured = TRUE;
+}
+
+/**
+ * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
+ * mode. Sequence number is always in the last word of a message.
+ */
+static uint8
+BCMFASTPATH(dhd_prot_d2h_sync_seqnum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen)
+{
+ uint32 tries;
+ uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ int num_words = msglen / sizeof(uint32); /* num of 32bit words */
+ volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
+ dhd_prot_t *prot = dhd->prot;
+ uint32 msg_seqnum;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+
+ ASSERT(msglen == ring->item_len);
+
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ msg_seqnum = *marker;
+ if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
+ ring->seqnum++; /* next expected sequence number */
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
+
+ total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
+
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
+ (volatile uchar *) msg, msglen);
+
+ ring->seqnum++; /* skip this message ... leak of a pktid */
+ return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+
+ prot->d2h_sync_wait_tot += tries;
+ return msg->msg_type;
+}
+
+/**
+ * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
+ * mode. The xorcsum is placed in the last word of a message. Dongle will also
+ * place a seqnum in the epoch field of the cmn_msg_hdr.
+ */
+static uint8
+BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen)
+{
+ uint32 tries;
+ uint32 prot_checksum = 0; /* computed checksum */
+ int num_words = msglen / sizeof(uint32); /* num of 32bit words */
+ uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+
+ ASSERT(msglen == ring->item_len);
+
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ /* First verify if the seqnumber has been update,
+ * if yes, then only check xorcsum.
+ * Once seqnum and xorcsum is proper that means
+ * complete message has arrived.
+ */
+ if (msg->epoch == ring_seqnum) {
+ prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
+ num_words);
+ if (prot_checksum == 0U) { /* checksum is OK */
+ ring->seqnum++; /* next expected sequence number */
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to
+ * LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
+ }
+
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
+ (volatile uchar *) msg, msglen);
+
+ ring->seqnum++; /* skip this message ... leak of a pktid */
+ return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+
+ prot->d2h_sync_wait_tot += tries;
+ return msg->msg_type;
+}
+
+/**
+ * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
+static uint8
+BCMFASTPATH(dhd_prot_d2h_sync_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen)
+{
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return MSG_TYPE_INVALID;
+ } else {
+ return msg->msg_type;
+ }
+}
+
+#ifdef EWP_EDL
+/**
+ * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
+ * header values at both the beginning and end of the payload.
+ * The cmn_msg_hdr_t is placed at the start and end of the payload
+ * in each work item in the EDL ring.
+ * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
+ * and the length of the payload in the 'request_id' field.
+ * Structure of each work item in the EDL ring:
+ * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
+ * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
+ * too costly on the dongle side and might take up too many ARM cycles,
+ * hence the xorcsum sync method is not being used for EDL ring.
+ */
+static int
+BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ uint32 tries;
+ int msglen = 0, len = 0;
+ uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+ volatile cmn_msg_hdr_t *trailer = NULL;
+ volatile uint8 *buf = NULL;
+ bool valid_msg = FALSE;
+
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ /* First verify if the seqnumber has been updated,
+ * if yes, only then validate the header and trailer.
+ * Once seqnum, header and trailer have been validated, it means
+ * that the complete message has arrived.
+ */
+ valid_msg = FALSE;
+ if (msg->epoch == ring_seqnum &&
+ msg->msg_type == MSG_TYPE_INFO_PYLD &&
+ msg->request_id > 0 &&
+ msg->request_id <= ring->item_len) {
+ /* proceed to check trailer only if header is valid */
+ buf = (volatile uint8 *)msg;
+ msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
+ buf += msglen;
+ if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
+ trailer = (volatile cmn_msg_hdr_t *)buf;
+ valid_msg = (trailer->epoch == ring_seqnum) &&
+ (trailer->msg_type == msg->msg_type) &&
+ (trailer->request_id == msg->request_id);
+ if (!valid_msg) {
+ DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
+ " expected, seqnum=%u; reqid=%u. Retrying... \n",
+ __FUNCTION__, trailer->epoch, trailer->request_id,
+ msg->epoch, msg->request_id));
+ }
+ } else {
+ DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
+ __FUNCTION__, msg->request_id));
+ }
+
+ if (valid_msg) {
+ /* data is OK */
+ ring->seqnum++; /* next expected sequence number */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
+ } else {
+ DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
+ " msg_type=0x%x, request_id=%u."
+ " Retrying...\n",
+ __FUNCTION__, ring_seqnum, msg->epoch,
+ msg->msg_type, msg->request_id));
+ }
+
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+#if !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3))
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
+#endif /* !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)) */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
+ DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
+ " msgtype=0x%x; expected-msgtype=0x%x"
+ " length=%u; expected-max-length=%u", __FUNCTION__,
+ msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
+ msg->request_id, ring->item_len));
+ dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
+ if (trailer && msglen > 0 &&
+ (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
+ DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
+ " msgtype=0x%x; expected-msgtype=0x%x"
+ " length=%u; expected-length=%u", __FUNCTION__,
+ trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
+ trailer->request_id, msg->request_id));
+ dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
+ sizeof(*trailer), DHD_ERROR_VAL);
+ }
+
+ if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
+ len = msglen + sizeof(cmn_msg_hdr_t);
+ else
+ len = ring->item_len;
+
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
+ (volatile uchar *) msg, len);
+
+ ring->seqnum++; /* skip this message */
+ return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+ DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
+ msg->epoch, msg->request_id));
+
+ prot->d2h_sync_wait_tot += tries;
+ return BCME_OK;
+}
+
+/**
+ * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
+static int BCMFASTPATH
+(dhd_prot_d2h_sync_edl_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ if (msg->msg_type == MSG_TYPE_INFO_PYLD)
+ return BCME_OK;
+ else
+ return msg->msg_type;
+ }
+}
+#endif /* EWP_EDL */
+
+INLINE void
+dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
+{
+ /* To synchronize with the previous memory operations call wmb() */
+ OSL_SMP_WMB();
+ dhd->prot->ioctl_received = reason;
+ /* Call another wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ dhd_os_ioctl_resp_wake(dhd);
+}
+
+/**
+ * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
+ * dongle advertizes.
+ */
+static void
+dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->d2h_sync_wait_max = 0UL;
+ prot->d2h_sync_wait_tot = 0UL;
+
+ prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
+ prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
+ DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
+ } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
+ prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
+ DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
+ } else {
+ prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
+#endif /* EWP_EDL */
+ DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
+ }
+}
+
+/**
+ * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
+ */
+static void
+dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
+
+ prot->h2dring_rxp_subn.current_phase = 0;
+
+ prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_ctrl_subn.current_phase = 0;
+}
+
+/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
+ * virtual and physical address, the buffer lenght and the DMA handler.
+ * A secdma handler is also included in the dhd_dma_buf object.
+ * +---------------------------------------------------------------------------+
+ */
+
+static INLINE void
+dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
+{
+ base_addr->low_addr = htol32(PHYSADDRLO(pa));
+ base_addr->high_addr = htol32(PHYSADDRHI(pa));
+}
+
+/**
+ * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
+ */
+static int
+dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+ uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
+ ASSERT(dma_buf);
+ pa_lowaddr = PHYSADDRLO(dma_buf->pa);
+ ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
+ ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
+ ASSERT(dma_buf->len != 0);
+
+ /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
+ end = (pa_lowaddr + dma_buf->len); /* end address */
+
+ if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
+ DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
+ __FUNCTION__, pa_lowaddr, dma_buf->len));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+/**
+ * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+int
+dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
+{
+ uint32 dma_pad = 0;
+ osl_t *osh = dhd->osh;
+ uint16 dma_align = DMA_ALIGN_LEN;
+ uint32 rem = 0;
+
+ ASSERT(dma_buf != NULL);
+ ASSERT(dma_buf->va == NULL);
+ ASSERT(dma_buf->len == 0);
+
+ /* Pad the buffer length to align to cacheline size. */
+ rem = (buf_len % DHD_DMA_PAD);
+ dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
+
+ dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
+ dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
+
+ if (dma_buf->va == NULL) {
+ DHD_ERROR(("%s: buf_len %d, no memory available\n",
+ __FUNCTION__, buf_len));
+ return BCME_NOMEM;
+ }
+
+ dma_buf->len = buf_len; /* not including padded len */
+
+ if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
+ dhd_dma_buf_free(dhd, dma_buf);
+ return BCME_ERROR;
+ }
+
+ dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
+
+ return BCME_OK;
+}
+
+/**
+ * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
+ */
+static void
+dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+ if ((dma_buf == NULL) || (dma_buf->va == NULL))
+ return;
+
+ (void)dhd_dma_buf_audit(dhd, dma_buf);
+
+ /* Zero out the entire buffer and cache flush */
+ memset((void*)dma_buf->va, 0, dma_buf->len);
+ OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
+}
+
+void
+dhd_local_buf_reset(char *buf, uint32 len)
+{
+ /* Zero out the entire buffer and cache flush */
+ memset((void*)buf, 0, len);
+ OSL_CACHE_FLUSH((void *)buf, len);
+}
+
+/**
+ * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
+ * dhd_dma_buf_alloc().
+ */
+void
+dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+ osl_t *osh = dhd->osh;
+
+ ASSERT(dma_buf);
+
+ if (dma_buf->va == NULL)
+ return; /* Allow for free invocation, when alloc failed */
+
+ /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
+ (void)dhd_dma_buf_audit(dhd, dma_buf);
+
+ /* dma buffer may have been padded at allocation */
+ DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
+ dma_buf->pa, dma_buf->dmah);
+
+ memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
+}
+
+/**
+ * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
+ * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
+ */
+void
+dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
+ void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
+{
+ dhd_dma_buf_t *dma_buf;
+ ASSERT(dhd_dma_buf);
+ dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
+ dma_buf->va = va;
+ dma_buf->len = len;
+ dma_buf->pa = pa;
+ dma_buf->dmah = dmah;
+ dma_buf->secdma = secdma;
+
+ /* Audit user defined configuration */
+ (void)dhd_dma_buf_audit(dhd, dma_buf);
+}
+
+/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
+ * DHD_MAP_PKTID_LOGGING
+ * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
+ * debugging in customer platform.
+ * +---------------------------------------------------------------------------+
+ */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+typedef struct dhd_pktid_log_item {
+ dmaaddr_t pa; /* DMA bus address */
+ uint64 ts_nsec; /* Timestamp: nsec */
+ uint32 size; /* DMA map/unmap size */
+ uint32 pktid; /* Packet ID */
+ uint8 pkttype; /* Packet Type */
+ uint8 rsvd[7]; /* Reserved for future use */
+} dhd_pktid_log_item_t;
+
+typedef struct dhd_pktid_log {
+ uint32 items; /* number of total items */
+ uint32 index; /* index of pktid_log_item */
+ dhd_pktid_log_item_t map[0]; /* metadata storage */
+} dhd_pktid_log_t;
+
+typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
+
+#define MAX_PKTID_LOG (2048)
+#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
+#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
+ ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
+
+#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
+#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
+#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
+ dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
+#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
+
+static dhd_pktid_log_handle_t *
+dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ log_size = DHD_PKTID_LOG_SZ(num_items);
+ log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
+ if (log == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, log_size));
+ return (dhd_pktid_log_handle_t *)NULL;
+ }
+
+ log->items = num_items;
+ log->index = 0;
+
+ return (dhd_pktid_log_handle_t *)log; /* opaque handle */
+}
+
+static void
+dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ log_size = DHD_PKTID_LOG_SZ(log->items);
+ MFREE(dhd->osh, handle, log_size);
+}
+
+static void
+dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
+ uint32 pktid, uint32 len, uint8 pkttype)
+{
+ dhd_pktid_log_t *log;
+ uint32 idx;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ idx = log->index;
+ log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
+ log->map[idx].pa = pa;
+ log->map[idx].pktid = pktid;
+ log->map[idx].size = len;
+ log->map[idx].pkttype = pkttype;
+ log->index = (idx + 1) % (log->items); /* update index */
+}
+
+void
+dhd_pktid_logging_dump(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ dhd_pktid_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ if (prot == NULL) {
+ DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
+ unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
+ OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
+ if (map_log && unmap_log) {
+ DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->index, unmap_log->index,
+ (unsigned long)ts_sec, (unsigned long)ts_usec));
+ DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
+ "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
+ }
+}
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * Main purpose is to save memory on the dongle, has other purposes as well.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
+ * +---------------------------------------------------------------------------+
+ */
+#define DHD_PCIE_PKTID
+
+/* On Router, the pktptr serves as a pktid. */
+#if defined(BCM_ROUTER_DHD) && !defined(BCA_HNDROUTER)
+#undef DHD_PCIE_PKTID /* Comment this undef, to reenable PKTIDMAP */
+#endif /* BCM_ROUTER_DHD && !BCA_HNDROUTER */
+
+#if defined(BCM_ROUTER_DHD) && defined(DHD_PCIE_PKTID)
+#undef MAX_TX_PKTID
+#define MAX_TX_PKTID ((36 * 1024) - 1) /* Extend for 64 clients support. */
+#endif /* BCM_ROUTER_DHD && DHD_PCIE_PKTID */
+
+/* XXX: PROP_TXSTATUS: WLFS defines a private pkttag layout.
+ * Hence cannot store the dma parameters in the pkttag and the pktidmap locker
+ * is required.
+ */
+#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
+#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
+#endif
+
+/* Enum for marking the buffer color based on usage */
+typedef enum dhd_pkttype {
+ PKTTYPE_DATA_TX = 0,
+ PKTTYPE_DATA_RX,
+ PKTTYPE_IOCTL_RX,
+ PKTTYPE_EVENT_RX,
+ PKTTYPE_INFO_RX,
+ /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
+ PKTTYPE_NO_CHECK,
+ PKTTYPE_TSBUF_RX
+} dhd_pkttype_t;
+
+#define DHD_PKTID_MIN_AVAIL_COUNT 512U
+#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
+#define DHD_PKTID_INVALID (0U)
+#define DHD_IOCTL_REQ_PKTID (0xFFFE)
+#define DHD_FAKE_PKTID (0xFACE)
+#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
+#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
+#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
+#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
+#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
+#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
+#ifdef DHD_HP2P
+#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
+#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
+#endif /* DHD_HP2P */
+
+#define IS_FLOWRING(ring) \
+ ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
+
+typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
+
+/* Construct a packet id mapping table, returning an opaque map handle */
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
+
+/* Destroy a packet id mapping table, freeing all packets active in the table */
+static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
+
+#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
+#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
+#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
+#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
+
+#if defined(DHD_PCIE_PKTID)
+#if defined(NDIS) || defined(DHD_EFI)
+/* XXX: for NDIS, using consistent memory instead of buffer from PKTGET for
+ * up to 8K ioctl response
+ */
+#define IOCTLRESP_USE_CONSTMEM
+static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
+static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
+#endif /* NDIS || DHD_EFI */
+
+/* Determine number of pktids that are available */
+static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
+
+/* Allocate a unique pktid against which a pkt and some metadata is saved */
+static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ void *pkt, dhd_pkttype_t pkttype);
+static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
+ void *dmah, void *secdma, dhd_pkttype_t pkttype);
+static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
+ void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
+ void *dmah, void *secdma, dhd_pkttype_t pkttype);
+
+/* Return an allocated pktid, retrieving previously saved pkt and metadata */
+static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
+ uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
+ void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
+
+#ifdef DHD_PKTTS
+/* Store the Metadata buffer to the locker */
+static INLINE void
+dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
+ dmaaddr_t mpkt_pa,
+ uint16 mpkt_len,
+ void *dmah,
+ uint32 nkey);
+
+/* Return the Metadata buffer from the locker */
+static void * dhd_pktid_map_retreive_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ dmaaddr_t *pmpkt_pa, uint32 *pmpkt_len, void **pdmah, uint32 nkey);
+#endif /* DHD_PKTTS */
+
+/*
+ * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
+ *
+ * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
+ * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
+ *
+ * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
+ * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
+ */
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+#define USE_DHD_PKTID_AUDIT_LOCK 1
+/* Audit the pktidmap allocator */
+/* #define DHD_PKTID_AUDIT_MAP */
+
+/* Audit the pktid during production/consumption of workitems */
+#define DHD_PKTID_AUDIT_RING
+
+#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
+#error "May only enabled audit of MAP or RING, at a time."
+#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
+
+#define DHD_DUPLICATE_ALLOC 1
+#define DHD_DUPLICATE_FREE 2
+#define DHD_TEST_IS_ALLOC 3
+#define DHD_TEST_IS_FREE 4
+
+typedef enum dhd_pktid_map_type {
+ DHD_PKTID_MAP_TYPE_CTRL = 1,
+ DHD_PKTID_MAP_TYPE_TX,
+ DHD_PKTID_MAP_TYPE_RX,
+ DHD_PKTID_MAP_TYPE_UNKNOWN
+} dhd_pktid_map_type_t;
+
+#ifdef USE_DHD_PKTID_AUDIT_LOCK
+#define DHD_PKTID_AUDIT_LOCK_INIT(osh) osl_spin_lock_init(osh)
+#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock)
+#define DHD_PKTID_AUDIT_LOCK(lock) osl_spin_lock(lock)
+#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) osl_spin_unlock(lock, flags)
+#else
+#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
+#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
+#define DHD_PKTID_AUDIT_LOCK(lock) 0
+#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
+#endif /* !USE_DHD_PKTID_AUDIT_LOCK */
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+#define USE_DHD_PKTID_LOCK 1
+
+#ifdef USE_DHD_PKTID_LOCK
+#define DHD_PKTID_LOCK_INIT(osh) osl_spin_lock_init(osh)
+#define DHD_PKTID_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock)
+#define DHD_PKTID_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_PKTID_UNLOCK(lock, flags) osl_spin_unlock(lock, flags)
+#else
+#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
+#define DHD_PKTID_LOCK_DEINIT(osh, lock) \
+ do { \
+ BCM_REFERENCE(osh); \
+ BCM_REFERENCE(lock); \
+ } while (0)
+#define DHD_PKTID_LOCK(lock) 0
+#define DHD_PKTID_UNLOCK(lock, flags) \
+ do { \
+ BCM_REFERENCE(lock); \
+ BCM_REFERENCE(flags); \
+ } while (0)
+#endif /* !USE_DHD_PKTID_LOCK */
+
+typedef enum dhd_locker_state {
+ LOCKER_IS_FREE,
+ LOCKER_IS_BUSY,
+ LOCKER_IS_RSVD
+} dhd_locker_state_t;
+
+/* Packet metadata saved in packet id mapper */
+
+typedef struct dhd_pktid_item {
+ dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
+ uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
+ dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
+ uint16 len; /* length of mapped packet's buffer */
+ void *pkt; /* opaque native pointer to a packet */
+ dmaaddr_t pa; /* physical address of mapped packet's buffer */
+ void *dmah; /* handle to OS specific DMA map */
+ void *secdma;
+#ifdef DHD_PKTTS
+ void *mpkt; /* VA of Metadata */
+ dmaaddr_t mpkt_pa; /* PA of Metadata */
+ uint16 mpkt_len; /* Length of Metadata */
+#endif /* DHD_PKTTS */
+} dhd_pktid_item_t;
+
+typedef uint32 dhd_pktid_key_t;
+
+typedef struct dhd_pktid_map {
+ uint32 items; /* total items in map */
+ uint32 avail; /* total available items */
+ int failures; /* lockers unavailable count */
+ /* Spinlock to protect dhd_pktid_map in process/tasklet context */
+ void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ void *pktid_audit_lock;
+ struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+ dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
+ dhd_pktid_item_t lockers[0]; /* metadata storage */
+} dhd_pktid_map_t;
+
+/*
+ * PktId (Locker) #0 is never allocated and is considered invalid.
+ *
+ * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
+ * depleted pktid pool and must not be used by the caller.
+ *
+ * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+ */
+
+#define DHD_PKTID_FREE_LOCKER (FALSE)
+#define DHD_PKTID_RSV_LOCKER (TRUE)
+
+#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
+#define DHD_PKIDMAP_ITEMS(items) (items)
+#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
+ (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
+
+#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
+
+/* Convert a packet to a pktid, and save pkt pointer in busy locker */
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
+ dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
+/* Reuse a previously reserved locker to save packet params */
+#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
+ dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
+ (dhd_pkttype_t)(pkttype))
+/* Convert a packet to a pktid, and save packet params in locker */
+#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
+ dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
+ (dhd_pkttype_t)(pkttype))
+
+/* Convert pktid to a packet, and free the locker */
+#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+ dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
+
+/* Convert the pktid to a packet, empty locker, but keep it reserved */
+#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+ dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
+
+#ifdef DHD_PKTTS
+#define DHD_PKTID_SAVE_METADATA(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey) \
+ dhd_pktid_map_save_metadata(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey)
+
+#define DHD_PKTID_RETREIVE_METADATA(dhd, map, mpkt_pa, mpkt_len, dmah, nkey) \
+ dhd_pktid_map_retreive_metadata(dhd, map, (dmaaddr_t *)&mpkt_pa, (uint32 *)&mpkt_len, \
+ (void **) &dmah, nkey)
+#endif /* DHD_PKTTS */
+
+#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+
+static int
+dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int pktid_map_type;
+
+ if (pktid_map == prot->pktid_ctrl_map) {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
+ } else if (pktid_map == prot->pktid_tx_map) {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
+ } else if (pktid_map == prot->pktid_rx_map) {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
+ } else {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
+ }
+
+ return pktid_map_type;
+}
+
+/**
+* __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
+*/
+static int
+__dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+ const int test_for, const char *errmsg)
+{
+#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
+ struct bcm_mwbmap *handle;
+ uint32 flags;
+ bool ignore_audit;
+ int error = BCME_OK;
+
+ if (pktid_map == (dhd_pktid_map_t *)NULL) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
+ return BCME_OK;
+ }
+
+ flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
+
+ handle = pktid_map->pktid_audit;
+ if (handle == (struct bcm_mwbmap *)NULL) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
+ goto out;
+ }
+
+ /* Exclude special pktids from audit */
+ ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
+ if (ignore_audit) {
+ goto out;
+ }
+
+ if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
+ error = BCME_ERROR;
+ goto out;
+ }
+
+ /* Perform audit */
+ switch (test_for) {
+ case DHD_DUPLICATE_ALLOC:
+ if (!bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
+ errmsg, pktid));
+ error = BCME_ERROR;
+ } else {
+ bcm_mwbmap_force(handle, pktid);
+ }
+ break;
+
+ case DHD_DUPLICATE_FREE:
+ if (bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
+ errmsg, pktid));
+ error = BCME_ERROR;
+ } else {
+ bcm_mwbmap_free(handle, pktid);
+ }
+ break;
+
+ case DHD_TEST_IS_ALLOC:
+ if (bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
+ errmsg, pktid));
+ error = BCME_ERROR;
+ }
+ break;
+
+ case DHD_TEST_IS_FREE:
+ if (!bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
+ errmsg, pktid));
+ error = BCME_ERROR;
+ }
+ break;
+
+ default:
+ DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
+ error = BCME_ERROR;
+ break;
+ }
+
+out:
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+
+ if (error != BCME_OK) {
+ dhd->pktid_audit_failed = TRUE;
+ }
+
+ return error;
+}
+
+static int
+dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+ const int test_for, const char *errmsg)
+{
+ int ret = BCME_OK;
+ ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
+ __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
+ dhd_pktid_error_handler(dhd);
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG_DUMP(dhd);
+#endif /* DHD_MAP_PKTID_LOGGING */
+ }
+
+ return ret;
+}
+
+#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
+ dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
+
+static int
+dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
+ const int test_for, void *msg, uint32 msg_len, const char *func)
+{
+ int ret = BCME_OK;
+
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_ERROR;
+ }
+
+ ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
+ __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
+ prhex(func, (uchar *)msg, msg_len);
+ dhd_pktid_error_handler(dhdp);
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG_DUMP(dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+ }
+ return ret;
+}
+#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
+ dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
+ (pktid), (test_for), msg, msg_len, __FUNCTION__)
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+/**
+ * +---------------------------------------------------------------------------+
+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
+ *
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
+ *
+ * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
+ * packet id is returned. This unique packet id may be used to retrieve the
+ * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
+ * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
+ * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
+ *
+ * Implementation Note:
+ * Convert this into a <key,locker> abstraction and place into bcmutils !
+ * Locker abstraction should treat contents as opaque storage, and a
+ * callback should be registered to handle busy lockers on destructor.
+ *
+ * +---------------------------------------------------------------------------+
+ */
+
+/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
+
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
+{
+ void* osh;
+ uint32 nkey;
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+ uint32 map_items;
+ uint32 map_keys_sz;
+ osh = dhd->osh;
+
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
+
+ map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
+ if (map == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
+ __FUNCTION__, __LINE__, dhd_pktid_map_sz));
+ return (dhd_pktid_map_handle_t *)NULL;
+ }
+
+ map->items = num_items;
+ map->avail = num_items;
+
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+ /* Initialize the lock that protects this structure */
+ map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
+ if (map->pktid_lock == NULL) {
+ DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
+ goto error;
+ }
+
+ map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
+ if (map->keys == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
+ __FUNCTION__, __LINE__, map_keys_sz));
+ goto error;
+ }
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
+ map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
+ if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
+ DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
+ goto error;
+ } else {
+ DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
+ __FUNCTION__, __LINE__, map_items + 1));
+ }
+ map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
+ map->keys[nkey] = nkey; /* populate with unique keys */
+ map->lockers[nkey].state = LOCKER_IS_FREE;
+ map->lockers[nkey].pkt = NULL; /* bzero: redundant */
+ map->lockers[nkey].len = 0;
+ }
+
+ /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
+ map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
+ map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
+ map->lockers[DHD_PKTID_INVALID].len = 0;
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
+ bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ return (dhd_pktid_map_handle_t *)map; /* opaque handle */
+
+error:
+ if (map) {
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock)
+ DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ if (map->keys) {
+ MFREE(osh, map->keys, map_keys_sz);
+ }
+
+ if (map->pktid_lock) {
+ DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
+ }
+
+ VMFREE(osh, map, dhd_pktid_map_sz);
+ }
+ return (dhd_pktid_map_handle_t *)NULL;
+}
+
+/**
+ * Retrieve all allocated keys and free all <numbered_key, locker>.
+ * Freeing implies: unmapping the buffers and freeing the native packet
+ * This could have been a callback registered with the pktid mapper.
+ */
+static void
+dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+ void *osh;
+ uint32 nkey;
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ uint32 map_items;
+ unsigned long flags;
+ bool data_tx = FALSE;
+
+ map = (dhd_pktid_map_t *)handle;
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+ osh = dhd->osh;
+
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+ /* skip reserved KEY #0, and start from 1 */
+
+ for (nkey = 1; nkey <= map_items; nkey++) {
+ if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
+ locker = &map->lockers[nkey];
+ locker->state = LOCKER_IS_FREE;
+ data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
+ if (data_tx) {
+ OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
+ }
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_RING */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
+ locker->pa, nkey, locker->len,
+ locker->pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+ DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0, locker->dmah);
+ dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
+ locker->pkttype, data_tx);
+ }
+ else {
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
+ }
+ map->keys[nkey] = nkey; /* populate with unique keys */
+ }
+
+ map->avail = map_items;
+ memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+}
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+/** Called in detach scenario. Releasing IOCTL buffers. */
+static void
+dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+ uint32 nkey;
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ uint32 map_items;
+ unsigned long flags;
+
+ map = (dhd_pktid_map_t *)handle;
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+ /* skip reserved KEY #0, and start from 1 */
+ for (nkey = 1; nkey <= map_items; nkey++) {
+ if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
+ dhd_dma_buf_t retbuf;
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ locker = &map->lockers[nkey];
+ retbuf.va = locker->pkt;
+ retbuf.len = locker->len;
+ retbuf.pa = locker->pa;
+ retbuf.dmah = locker->dmah;
+ retbuf.secdma = locker->secdma;
+
+ free_ioctl_return_buffer(dhd, &retbuf);
+ }
+ else {
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
+ }
+ map->keys[nkey] = nkey; /* populate with unique keys */
+ }
+
+ map->avail = map_items;
+ memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+/**
+ * Free the pktid map.
+ */
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+ uint32 map_keys_sz;
+
+ if (handle == NULL)
+ return;
+
+ /* Free any pending packets */
+ dhd_pktid_map_reset(dhd, handle);
+
+ map = (dhd_pktid_map_t *)handle;
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+ DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock) {
+ DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
+ }
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+ MFREE(dhd->osh, map->keys, map_keys_sz);
+ VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
+}
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+static void
+dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+ uint32 map_keys_sz;
+
+ if (handle == NULL)
+ return;
+
+ /* Free any pending packets */
+ dhd_pktid_map_reset_ioctl(dhd, handle);
+
+ map = (dhd_pktid_map_t *)handle;
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+ DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock) {
+ DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
+ }
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ MFREE(dhd->osh, map->keys, map_keys_sz);
+ VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+/** Get the pktid free count */
+static INLINE uint32
+BCMFASTPATH(dhd_pktid_map_avail_cnt)(dhd_pktid_map_handle_t *handle)
+{
+ dhd_pktid_map_t *map;
+ uint32 avail;
+ unsigned long flags;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+ avail = map->avail;
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
+ return avail;
+}
+
+/**
+ * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
+ * yet populated. Invoke the pktid save api to populate the packet parameters
+ * into the locker. This function is not reentrant, and is the caller's
+ * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
+ * a failure case, implying a depleted pool of pktids.
+ */
+static INLINE uint32
+dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ void *pkt, dhd_pkttype_t pkttype)
+{
+ uint32 nkey;
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ unsigned long flags;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
+ map->failures++;
+ DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ return DHD_PKTID_INVALID; /* failed alloc request */
+ }
+
+ ASSERT(map->avail <= map->items);
+ nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+
+ if ((map->avail > map->items) || (nkey > map->items)) {
+ map->failures++;
+ DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
+ " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
+ __FUNCTION__, __LINE__, map->avail, nkey,
+ pkttype));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ return DHD_PKTID_INVALID; /* failed alloc request */
+ }
+
+ locker = &map->lockers[nkey]; /* save packet metadata in locker */
+ map->avail--;
+ locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
+ locker->len = 0;
+ locker->state = LOCKER_IS_BUSY; /* reserve this locker */
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
+ ASSERT(nkey != DHD_PKTID_INVALID);
+
+ return nkey; /* return locker's numbered key */
+}
+
+#ifdef DHD_PKTTS
+/*
+ * dhd_pktid_map_save_metadata - Save metadata information in a locker
+ * that has a reserved unique numbered key.
+ */
+static INLINE void
+dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
+ dmaaddr_t mpkt_pa,
+ uint16 mpkt_len,
+ void *dmah,
+ uint32 nkey)
+{
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ unsigned long flags;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+ DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u>",
+ __FUNCTION__, __LINE__, nkey));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return;
+ }
+
+ locker = &map->lockers[nkey];
+
+ /*
+ * TODO: checking the locker state for BUSY will prevent
+ * us from storing meta data on an already allocated
+ * Locker. But not checking may lead to overwriting
+ * existing data.
+ */
+ locker->mpkt = mpkt;
+ locker->mpkt_pa = mpkt_pa;
+ locker->mpkt_len = mpkt_len;
+ locker->dmah = dmah;
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+}
+#endif /* DHD_PKTTS */
+
+/*
+ * dhd_pktid_map_save - Save a packet's parameters into a locker
+ * corresponding to a previously reserved unique numbered key.
+ */
+static INLINE void
+dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+ uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
+{
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ unsigned long flags;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+ DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
+ __FUNCTION__, __LINE__, nkey, pkttype));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return;
+ }
+
+ locker = &map->lockers[nkey];
+
+ ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
+ ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
+
+ /* store contents in locker */
+ locker->dir = dir;
+ locker->pa = pa;
+ locker->len = (uint16)len; /* 16bit len */
+ locker->dmah = dmah; /* 16bit len */
+ locker->secdma = secdma;
+ locker->pkttype = pkttype;
+ locker->pkt = pkt;
+ locker->state = LOCKER_IS_BUSY; /* make this locker busy */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+}
+
+/**
+ * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
+ * contents into the corresponding locker. Return the numbered key.
+ */
+static uint32
+BCMFASTPATH(dhd_pktid_map_alloc)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+ dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
+{
+ uint32 nkey;
+
+ nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
+ if (nkey != DHD_PKTID_INVALID) {
+ dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
+ len, dir, dmah, secdma, pkttype);
+ }
+
+ return nkey;
+}
+
+#ifdef DHD_PKTTS
+static void *
+BCMFASTPATH(dhd_pktid_map_retreive_metadata)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ dmaaddr_t *pmpkt_pa,
+ uint32 *pmpkt_len,
+ void **pdmah,
+ uint32 nkey)
+{
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ void *mpkt;
+ unsigned long flags;
+
+ ASSERT(handle != NULL);
+
+ map = (dhd_pktid_map_t *)handle;
+
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ /* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+ DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>\n",
+ __FUNCTION__, __LINE__, nkey));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return NULL;
+ }
+
+ locker = &map->lockers[nkey];
+ mpkt = locker->mpkt;
+ *pmpkt_pa = locker->mpkt_pa;
+ *pmpkt_len = locker->mpkt_len;
+ if (pdmah)
+ *pdmah = locker->dmah;
+ locker->mpkt = NULL;
+ locker->mpkt_len = 0;
+ locker->dmah = NULL;
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ return mpkt;
+}
+#endif /* DHD_PKTTS */
+
+/**
+ * dhd_pktid_map_free - Given a numbered key, return the locker contents.
+ * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
+ * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
+ * value. Only a previously allocated pktid may be freed.
+ */
+static void *
+BCMFASTPATH(dhd_pktid_map_free)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
+ dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
+ bool rsv_locker)
+{
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+ void * pkt;
+ unsigned long long locker_addr;
+ unsigned long flags;
+
+ ASSERT(handle != NULL);
+
+ map = (dhd_pktid_map_t *)handle;
+
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ /* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+ DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
+ __FUNCTION__, __LINE__, nkey, pkttype));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return NULL;
+ }
+
+ locker = &map->lockers[nkey];
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
+#endif /* DHD_PKTID_AUDIT_MAP */
+
+ /* Debug check for cloned numbered key */
+ if (locker->state == LOCKER_IS_FREE) {
+ DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
+ __FUNCTION__, __LINE__, nkey));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ /* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return NULL;
+ }
+
+ /* Check for the colour of the buffer i.e The buffer posted for TX,
+ * should be freed for TX completion. Similarly the buffer posted for
+ * IOCTL should be freed for IOCT completion etc.
+ */
+ if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
+
+ DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
+ __FUNCTION__, __LINE__, nkey));
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(locker->pa, locker_addr);
+#else
+ locker_addr = PHYSADDRLO(locker->pa);
+#endif /* BCMDMA64OSL */
+ DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
+ "pkttype <%d> locker->pa <0x%llx> \n",
+ __FUNCTION__, __LINE__, locker->state, locker->pkttype,
+ pkttype, locker_addr));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return NULL;
+ }
+
+ if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
+ map->avail++;
+ map->keys[map->avail] = nkey; /* make this numbered key available */
+ locker->state = LOCKER_IS_FREE; /* open and free Locker */
+ } else {
+ /* pktid will be reused, but the locker does not have a valid pkt */
+ locker->state = LOCKER_IS_RSVD;
+ }
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_MAP */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
+ (uint32)locker->len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+ *pa = locker->pa; /* return contents of locker */
+ *len = (uint32)locker->len;
+ *dmah = locker->dmah;
+ *secdma = locker->secdma;
+
+ pkt = locker->pkt;
+ locker->pkt = NULL; /* Clear pkt */
+ locker->len = 0;
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
+ return pkt;
+}
+
+#else /* ! DHD_PCIE_PKTID */
+
+#ifndef linux
+#error "DHD_PCIE_PKTID has to be defined for non-linux/android platforms"
+#endif
+
+typedef struct pktlist {
+ PKT_LIST *tx_pkt_list; /* list for tx packets */
+ PKT_LIST *rx_pkt_list; /* list for rx packets */
+ PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
+} pktlists_t;
+
+/*
+ * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
+ * of a one to one mapping 32bit pktptr and a 32bit pktid.
+ *
+ * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
+ * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
+ * a lock.
+ * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
+ */
+#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
+#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
+
+static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
+ dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype);
+static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
+ dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
+ dhd_pkttype_t pkttype);
+
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
+{
+ osl_t *osh = dhd->osh;
+ pktlists_t *handle = NULL;
+
+ if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(pktlists_t)));
+ goto error_done;
+ }
+
+ if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+ goto error;
+ }
+
+ if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+ goto error;
+ }
+
+ if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+ goto error;
+ }
+
+ PKTLIST_INIT(handle->tx_pkt_list);
+ PKTLIST_INIT(handle->rx_pkt_list);
+ PKTLIST_INIT(handle->ctrl_pkt_list);
+
+ return (dhd_pktid_map_handle_t *) handle;
+
+error:
+ if (handle->ctrl_pkt_list) {
+ MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->rx_pkt_list) {
+ MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->tx_pkt_list) {
+ MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle) {
+ MFREE(osh, handle, sizeof(pktlists_t));
+ }
+
+error_done:
+ return (dhd_pktid_map_handle_t *)NULL;
+}
+
+static void
+dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
+{
+ osl_t *osh = dhd->osh;
+
+ if (handle->ctrl_pkt_list) {
+ PKTLIST_FINI(handle->ctrl_pkt_list);
+ MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->rx_pkt_list) {
+ PKTLIST_FINI(handle->rx_pkt_list);
+ MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->tx_pkt_list) {
+ PKTLIST_FINI(handle->tx_pkt_list);
+ MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
+ }
+}
+
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
+{
+ osl_t *osh = dhd->osh;
+ pktlists_t *handle = (pktlists_t *) map;
+
+ ASSERT(handle != NULL);
+ if (handle == (pktlists_t *)NULL) {
+ return;
+ }
+
+ dhd_pktid_map_reset(dhd, handle);
+
+ if (handle) {
+ MFREE(osh, handle, sizeof(pktlists_t));
+ }
+}
+
+/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
+static INLINE uint32
+dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
+ dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
+{
+ pktlists_t *handle = (pktlists_t *) map;
+ ASSERT(pktptr32 != NULL);
+ DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
+ DHD_PKT_SET_DMAH(pktptr32, dmah);
+ DHD_PKT_SET_PA(pktptr32, pa);
+ DHD_PKT_SET_SECDMA(pktptr32, secdma);
+
+ /* XXX optimize these branch conditionals */
+ if (pkttype == PKTTYPE_DATA_TX) {
+ PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
+ } else if (pkttype == PKTTYPE_DATA_RX) {
+ PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
+ } else {
+ PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
+ }
+
+ return DHD_PKTID32(pktptr32);
+}
+
+/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
+static INLINE void *
+dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
+ dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
+ dhd_pkttype_t pkttype)
+{
+ pktlists_t *handle = (pktlists_t *) map;
+ void *pktptr32;
+
+ ASSERT(pktid32 != 0U);
+ pktptr32 = DHD_PKTPTR32(pktid32);
+ *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
+ *dmah = DHD_PKT_GET_DMAH(pktptr32);
+ *pa = DHD_PKT_GET_PA(pktptr32);
+ *secdma = DHD_PKT_GET_SECDMA(pktptr32);
+
+ /* XXX optimize these branch conditionals */
+ if (pkttype == PKTTYPE_DATA_TX) {
+ PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
+ } else if (pkttype == PKTTYPE_DATA_RX) {
+ PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
+ } else {
+ PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
+ }
+
+ return pktptr32;
+}
+
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
+
+#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
+ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
+ dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
+ (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
+ })
+
+#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
+ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
+ dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
+ (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
+ })
+
+#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
+ dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
+ })
+
+#define DHD_PKTID_AVAIL(map) (~0)
+
+#endif /* ! DHD_PCIE_PKTID */
+
+/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
+
+/*
+ * Allocating buffers for common rings.
+ * also allocating Buffers for hmaptest, Scratch buffer for dma rx offset,
+ * bus_throughput_measurement and snapshot upload
+ */
+static int
+dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot)
+{
+
+ /* Common Ring Allocations */
+
+ /* Ring 0: H2D Control Submission */
+ if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
+ H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
+ BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+ /* Ring 1: H2D Receive Buffer Post */
+ if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
+ H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
+ BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+ /* Ring 2: D2H Control Completion */
+ if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
+ D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+ /* Ring 3: D2H Transmit Complete */
+ if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
+ D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
+ __FUNCTION__));
+ goto fail;
+
+ }
+
+ /* Ring 4: D2H Receive Complete */
+ if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
+ D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
+ __FUNCTION__));
+ goto fail;
+
+ }
+
+ /*
+ * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
+ * buffers for flowrings will be instantiated, in dhd_prot_init() .
+ * See dhd_prot_flowrings_pool_attach()
+ */
+ /* ioctl response buffer */
+ if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
+ goto fail;
+ }
+
+ /* IOCTL request buffer */
+ if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
+ goto fail;
+ }
+
+ /* Host TS request buffer one buffer for now */
+ if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
+ goto fail;
+ }
+ prot->hostts_req_buf_inuse = FALSE;
+
+ /* Scratch buffer for dma rx offset */
+#ifdef BCM_HOST_BUF
+ if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
+ ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
+#else
+ if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
+
+#endif /* BCM_HOST_BUF */
+ {
+ goto fail;
+ }
+
+#ifdef DHD_HMAPTEST
+ /* Allocate buffer for hmaptest */
+ DHD_ERROR(("allocating memory for hmaptest \n"));
+ if (dhd_dma_buf_alloc(dhd, &prot->hmaptest.mem, HMAP_SANDBOX_BUFFER_LEN)) {
+
+ goto fail;
+ } else {
+ uint32 scratch_len;
+ uint64 scratch_lin, w1_start;
+ dmaaddr_t scratch_pa;
+
+ scratch_pa = prot->hmaptest.mem.pa;
+ scratch_len = prot->hmaptest.mem.len;
+ scratch_lin = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
+ | (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
+ w1_start = scratch_lin + scratch_len;
+ DHD_ERROR(("hmap: NOTE Buffer alloc for HMAPTEST Start=0x%0llx len=0x%08x"
+ "End=0x%0llx\n", (uint64) scratch_lin, scratch_len, (uint64) w1_start));
+ }
+#endif /* DHD_HMAPTEST */
+
+ /* scratch buffer bus throughput measurement */
+ if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
+ goto fail;
+ }
+
+#ifdef SNAPSHOT_UPLOAD
+ /* snapshot upload buffer */
+ if (dhd_dma_buf_alloc(dhd, &prot->snapshot_upload_buf, SNAPSHOT_UPLOAD_BUF_SIZE)) {
+ goto fail;
+ }
+#endif /* SNAPSHOT_UPLOAD */
+
+ return BCME_OK;
+
+fail:
+ return BCME_NOMEM;
+}
+
+/**
+ * The PCIE FD protocol layer is constructed in two phases:
+ * Phase 1. dhd_prot_attach()
+ * Phase 2. dhd_prot_init()
+ *
+ * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
+ * All Common rings are also attached (msgbuf_ring_t objects are allocated
+ * with DMA-able buffers).
+ * All dhd_dma_buf_t objects are also allocated here.
+ *
+ * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
+ * initialization of objects that requires information advertized by the dongle
+ * may not be performed here.
+ * E.g. the number of TxPost flowrings is not know at this point, neither do
+ * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
+ * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
+ * rings (common + flow).
+ *
+ * dhd_prot_init() is invoked after the bus layer has fetched the information
+ * advertized by the dongle in the pcie_shared_t.
+ */
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+ osl_t *osh = dhd->osh;
+ dhd_prot_t *prot;
+ uint32 trap_buf_len;
+
+ /* Allocate prot structure */
+ if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
+ sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(prot, 0, sizeof(*prot));
+
+ prot->osh = osh;
+ dhd->prot = prot;
+
+ /* DMAing ring completes supported? FALSE by default */
+ dhd->dma_d2h_ring_upd_support = FALSE;
+ dhd->dma_h2d_ring_upd_support = FALSE;
+ dhd->dma_ring_upd_overwrite = FALSE;
+
+ dhd->idma_inited = 0;
+ dhd->ifrm_inited = 0;
+ dhd->dar_inited = 0;
+
+ if (dhd_prot_allocate_bufs(dhd, prot) != BCME_OK) {
+ goto fail;
+ }
+
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_reset(&prot->rxchain);
+#endif
+
+ prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_CTRL);
+ if (prot->pktid_ctrl_map == NULL) {
+ goto fail;
+ }
+
+ prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_RX);
+ if (prot->pktid_rx_map == NULL)
+ goto fail;
+
+ prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_TX);
+ if (prot->pktid_rx_map == NULL)
+ goto fail;
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+ prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
+ DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
+ if (prot->pktid_map_handle_ioctl == NULL) {
+ goto fail;
+ }
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+ prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_map == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
+ __FUNCTION__));
+ }
+
+ prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_unmap == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
+ __FUNCTION__));
+ }
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+#ifdef D2H_MINIDUMP
+ if (dhd->bus->sih->buscorerev < 71) {
+ trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN;
+ } else {
+ /* buscorerev >= 71, supports minimdump of len 96KB */
+ trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN;
+ }
+#else
+ /* FW going to DMA extended trap data,
+ * allocate buffer for the maximum extended trap data.
+ */
+ trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
+#endif /* D2H_MINIDUMP */
+
+ /* Initialize trap buffer */
+ if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
+ DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
+ goto fail;
+ }
+
+ return BCME_OK;
+
+fail:
+
+ if (prot) {
+ /* Free up all allocated memories */
+ dhd_prot_detach(dhd);
+ }
+
+ return BCME_NOMEM;
+} /* dhd_prot_attach */
+
+static int
+dhd_alloc_host_scbs(dhd_pub_t *dhd)
+{
+ int ret = BCME_OK;
+ sh_addr_t base_addr;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 host_scb_size = 0;
+
+ if (dhd->hscb_enable) {
+ /* read number of bytes to allocate from F/W */
+ dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
+ if (host_scb_size) {
+ /* In fw reload scenario the buffer could have been allocated for previous
+ * run. Check the existing buffer if there is one that can accommodate
+ * the new firmware requirement and reuse the buffer is possible.
+ */
+ if (prot->host_scb_buf.va) {
+ if (prot->host_scb_buf.len >= host_scb_size) {
+ prot->host_scb_buf.len = host_scb_size;
+ } else {
+ dhd_dma_buf_free(dhd, &prot->host_scb_buf);
+ }
+ }
+ /* alloc array of host scbs */
+ if (prot->host_scb_buf.va == NULL) {
+ ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
+ }
+ /* write host scb address to F/W */
+ if (ret == BCME_OK) {
+ dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ HOST_SCB_ADDR, 0);
+ }
+ }
+ } else {
+ DHD_TRACE(("%s: Host scb not supported in F/W. \n", __FUNCTION__));
+ }
+
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s dhd_alloc_host_scbs, alloc failed: Err Code %d\n",
+ __FUNCTION__, ret));
+ }
+ return ret;
+}
+
+void
+dhd_set_host_cap(dhd_pub_t *dhd)
+{
+ uint32 data = 0;
+ dhd_prot_t *prot = dhd->prot;
+#ifdef D2H_MINIDUMP
+ uint16 host_trap_addr_len;
+#endif /* D2H_MINIDUMP */
+
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+ if (dhd->h2d_phase_supported) {
+ data |= HOSTCAP_H2D_VALID_PHASE;
+ if (dhd->force_dongletrap_on_bad_h2d_phase)
+ data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
+ }
+ if (prot->host_ipc_version > prot->device_ipc_version)
+ prot->active_ipc_version = prot->device_ipc_version;
+ else
+ prot->active_ipc_version = prot->host_ipc_version;
+
+ data |= prot->active_ipc_version;
+
+ if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
+ DHD_INFO(("Advertise Hostready Capability\n"));
+ data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
+ }
+#ifdef PCIE_INB_DW
+ if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
+ DHD_INFO(("Advertise Inband-DW Capability\n"));
+ data |= HOSTCAP_DS_INBAND_DW;
+ data |= HOSTCAP_DS_NO_OOB_DW;
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
+ if (!dhd->dma_h2d_ring_upd_support || !dhd->dma_d2h_ring_upd_support) {
+ dhd_init_dongle_ds_lock(dhd->bus);
+ dhdpcie_set_dongle_deepsleep(dhd->bus, FALSE);
+ }
+ } else
+#endif /* PCIE_INB_DW */
+#ifdef PCIE_OOB
+ if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
+ } else
+#endif /* PCIE_OOB */
+ {
+ /* Disable DS altogether */
+ data |= HOSTCAP_DS_NO_OOB_DW;
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
+ }
+
+ /* Indicate support for extended trap data */
+ data |= HOSTCAP_EXTENDED_TRAP_DATA;
+
+ /* Indicate support for TX status metadata */
+ if (dhd->pcie_txs_metadata_enable != 0)
+ data |= HOSTCAP_TXSTATUS_METADATA;
+
+#ifdef BTLOG
+ /* Indicate support for BT logging */
+ if (dhd->bt_logging) {
+ if (dhd->bt_logging_enabled) {
+ data |= HOSTCAP_BT_LOGGING;
+ DHD_ERROR(("BT LOGGING enabled\n"));
+ }
+ else {
+ DHD_ERROR(("BT logging upported in FW, BT LOGGING disabled\n"));
+ }
+ }
+ else {
+ DHD_ERROR(("BT LOGGING not enabled in FW !!\n"));
+ }
+#endif /* BTLOG */
+
+ /* Enable fast delete ring in firmware if supported */
+ if (dhd->fast_delete_ring_support) {
+ data |= HOSTCAP_FAST_DELETE_RING;
+ }
+
+ if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
+ DHD_ERROR(("IDMA inited\n"));
+ data |= HOSTCAP_H2D_IDMA;
+ dhd->idma_inited = TRUE;
+ } else {
+ DHD_ERROR(("IDMA not enabled in FW !!\n"));
+ dhd->idma_inited = FALSE;
+ }
+
+ if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
+ DHD_ERROR(("IFRM Inited\n"));
+ data |= HOSTCAP_H2D_IFRM;
+ dhd->ifrm_inited = TRUE;
+ dhd->dma_h2d_ring_upd_support = FALSE;
+ dhd_prot_dma_indx_free(dhd);
+ } else {
+ DHD_ERROR(("IFRM not enabled in FW !!\n"));
+ dhd->ifrm_inited = FALSE;
+ }
+
+ if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
+ DHD_ERROR(("DAR doorbell Use\n"));
+ data |= HOSTCAP_H2D_DAR;
+ dhd->dar_inited = TRUE;
+ } else {
+ DHD_ERROR(("DAR not enabled in FW !!\n"));
+ dhd->dar_inited = FALSE;
+ }
+
+ /* FW Checks for HOSTCAP_UR_FW_NO_TRAP and Does not TRAP if set
+ * Radar 36403220 JIRA SWWLAN-182145
+ */
+ data |= HOSTCAP_UR_FW_NO_TRAP;
+
+#ifdef SNAPSHOT_UPLOAD
+ /* Indicate support for snapshot upload */
+ if (dhd->snapshot_upload) {
+ data |= HOSTCAP_SNAPSHOT_UPLOAD;
+ DHD_ERROR(("ALLOW SNAPSHOT UPLOAD!!\n"));
+ }
+#endif /* SNAPSHOT_UPLOAD */
+
+ if (dhd->hscb_enable) {
+ data |= HOSTCAP_HSCB;
+ }
+
+#ifdef EWP_EDL
+ if (dhd->dongle_edl_support) {
+ data |= HOSTCAP_EDL_RING;
+ DHD_ERROR(("Enable EDL host cap\n"));
+ } else {
+ DHD_ERROR(("DO NOT SET EDL host cap\n"));
+ }
+#endif /* EWP_EDL */
+
+#ifdef D2H_MINIDUMP
+ if (dhd_bus_is_minidump_enabled(dhd)) {
+ data |= HOSTCAP_EXT_TRAP_DBGBUF;
+ DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
+ }
+#endif /* D2H_MINIDUMP */
+#ifdef DHD_HP2P
+ if (dhdpcie_bus_get_hp2p_supported(dhd->bus)) {
+ data |= HOSTCAP_PKT_TIMESTAMP;
+ data |= HOSTCAP_PKT_HP2P;
+ DHD_ERROR(("Enable HP2P in host cap\n"));
+ } else {
+ DHD_ERROR(("HP2P not enabled in host cap\n"));
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_DB0TS
+ if (dhd->db0ts_capable) {
+ data |= HOSTCAP_DB0_TIMESTAMP;
+ DHD_ERROR(("Enable DB0 TS in host cap\n"));
+ } else {
+ DHD_ERROR(("DB0 TS not enabled in host cap\n"));
+ }
+#endif /* DHD_DB0TS */
+ if (dhd->extdtxs_in_txcpl) {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ data |= HOSTCAP_PKT_TXSTATUS;
+ }
+ else {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ }
+
+ DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
+ __FUNCTION__,
+ prot->active_ipc_version, prot->host_ipc_version,
+ prot->device_ipc_version));
+
+ dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
+ sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
+#ifdef D2H_MINIDUMP
+ if (dhd_bus_is_minidump_enabled(dhd)) {
+ /* Dongle expects the host_trap_addr_len in terms of words */
+ host_trap_addr_len = prot->fw_trap_buf.len / 4;
+ dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len,
+ sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0);
+ }
+#endif /* D2H_MINIDUMP */
+ }
+
+#ifdef DHD_TIMESYNC
+ dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
+#endif /* DHD_TIMESYNC */
+}
+
+#ifdef AGG_H2D_DB
+void dhd_agg_inflight_stats_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+ uint64 *inflight_histo = dhd->prot->agg_h2d_db_info.inflight_histo;
+ uint32 i;
+ uint64 total_inflight_histo = 0;
+
+ bcm_bprintf(strbuf, "inflight: \t count\n");
+ for (i = 0; i < DHD_NUM_INFLIGHT_HISTO_ROWS; i++) {
+ bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<<i, inflight_histo[i]);
+ total_inflight_histo += inflight_histo[i];
+ }
+ bcm_bprintf(strbuf, "total_inflight_histo: %llu\n", total_inflight_histo);
+}
+
+void dhd_agg_inflights_stats_update(dhd_pub_t *dhd, uint32 inflight)
+{
+ uint64 *bin = dhd->prot->agg_h2d_db_info.inflight_histo;
+ uint64 *p;
+ uint32 bin_power;
+ bin_power = next_larger_power2(inflight);
+
+ switch (bin_power) {
+ case 1: p = bin + 0; break;
+ case 2: p = bin + 1; break;
+ case 4: p = bin + 2; break;
+ case 8: p = bin + 3; break;
+ case 16: p = bin + 4; break;
+ case 32: p = bin + 5; break;
+ case 64: p = bin + 6; break;
+ case 128: p = bin + 7; break;
+ case 256: p = bin + 8; break;
+ case 512: p = bin + 9; break;
+ case 1024: p = bin + 10; break;
+ case 2048: p = bin + 11; break;
+ case 4096: p = bin + 12; break;
+ case 8192: p = bin + 13; break;
+ default : p = bin + 13; break;
+ }
+ ASSERT((p - bin) < DHD_NUM_INFLIGHT_HISTO_ROWS);
+ *p = *p + 1;
+ return;
+}
+
+/*
+ * dhd_msgbuf_agg_h2d_db_timer_fn:
+ * Timer callback function for ringing h2d DB.
+ * This is run in isr context (HRTIMER_MODE_REL),
+ * do not hold any spin_lock_bh().
+ * Using HRTIMER_MODE_REL_SOFT causing TPUT regressions.
+ */
+enum hrtimer_restart
+dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer *timer)
+{
+ agg_h2d_db_info_t *agg_db_info;
+ dhd_pub_t *dhd;
+ dhd_prot_t *prot;
+ uint32 db_index;
+ uint corerev;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ agg_db_info = container_of(timer, agg_h2d_db_info_t, timer);
+ GCC_DIAGNOSTIC_POP();
+
+ dhd = agg_db_info->dhd;
+ prot = dhd->prot;
+
+ prot->agg_h2d_db_info.timer_db_cnt++;
+ if (IDMA_ACTIVE(dhd)) {
+ db_index = IDMA_IDX0;
+ if (dhd->bus->sih) {
+ corerev = dhd->bus->sih->buscorerev;
+ if (corerev >= 24) {
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
+ }
+ }
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ } else {
+ prot->mb_ring_fn(dhd->bus, DHD_AGGR_H2D_DB_MAGIC);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+void
+dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t *prot)
+{
+ agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
+
+ /* Queue the timer only when it is not in the queue */
+ if (!hrtimer_active(&agg_db_info->timer)) {
+ hrtimer_start(&agg_db_info->timer, ns_to_ktime(agg_h2d_db_timeout * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+}
+
+static void
+dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
+
+ agg_db_info->dhd = dhd;
+ hrtimer_init(&agg_db_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ /* The timer function will run from ISR context, ensure no spin_lock_bh are used */
+ agg_db_info->timer.function = &dhd_msgbuf_agg_h2d_db_timer_fn;
+ agg_db_info->init = TRUE;
+ agg_db_info->timer_db_cnt = 0;
+ agg_db_info->direct_db_cnt = 0;
+ agg_db_info->inflight_histo = (uint64 *)MALLOCZ(dhd->osh, DHD_INFLIGHT_HISTO_SIZE);
+}
+
+static void
+dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
+ if (agg_db_info->init) {
+ if (agg_db_info->inflight_histo) {
+ MFREE(dhd->osh, agg_db_info->inflight_histo, DHD_INFLIGHT_HISTO_SIZE);
+ }
+ hrtimer_try_to_cancel(&agg_db_info->timer);
+ agg_db_info->init = FALSE;
+ }
+}
+
+static void
+dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
+ hrtimer_try_to_cancel(&agg_db_info->timer);
+}
+#endif /* AGG_H2D_DB */
+
+void
+dhd_prot_clearcounts(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+#ifdef AGG_H2D_DB
+ agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
+ if (agg_db_info->inflight_histo) {
+ memset(agg_db_info->inflight_histo, 0, DHD_INFLIGHT_HISTO_SIZE);
+ }
+ agg_db_info->direct_db_cnt = 0;
+ agg_db_info->timer_db_cnt = 0;
+#endif /* AGG_H2D_DB */
+ prot->txcpl_db_cnt = 0;
+ prot->tx_h2d_db_cnt = 0;
+}
+
+/**
+ * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
+ * completed it's initialization of the pcie_shared structure, we may now fetch
+ * the dongle advertized features and adjust the protocol layer accordingly.
+ *
+ * dhd_prot_init() may be invoked again after a dhd_prot_reset().
+ */
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+ sh_addr_t base_addr;
+ dhd_prot_t *prot = dhd->prot;
+ int ret = 0;
+ uint32 idmacontrol;
+ uint32 waitcount = 0;
+ uint16 max_eventbufpost = 0;
+
+ /**
+ * A user defined value can be assigned to global variable h2d_max_txpost via
+ * 1. DHD IOVAR h2d_max_txpost, before firmware download
+ * 2. module parameter h2d_max_txpost
+ * prot->h2d_max_txpost is assigned with DHD_H2DRING_TXPOST_MAX_ITEM,
+ * if user has not defined any buffers by one of the above methods.
+ */
+ prot->h2d_max_txpost = (uint16)h2d_max_txpost;
+ DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
+
+#if defined(DHD_HTPUT_TUNABLES)
+ prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost;
+ DHD_ERROR(("%s:%d: h2d_htput_max_txpost = %d\n",
+ __FUNCTION__, __LINE__, prot->h2d_htput_max_txpost));
+#endif /* DHD_HTPUT_TUNABLES */
+
+ /* Read max rx packets supported by dongle */
+ dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
+ if (prot->max_rxbufpost == 0) {
+ /* This would happen if the dongle firmware is not */
+ /* using the latest shared structure template */
+ prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
+ }
+ DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+
+ /* Initialize. bzero() would blow away the dma pointers. */
+ max_eventbufpost = (uint16)dhdpcie_get_max_eventbufpost(dhd->bus);
+ prot->max_eventbufpost = (((max_eventbufpost + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST)) >=
+ H2DRING_CTRL_SUB_MAX_ITEM) ? DHD_FLOWRING_MAX_EVENTBUF_POST : max_eventbufpost;
+ prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+ prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
+#ifdef BTLOG
+ prot->max_btlogbufpost = DHD_H2D_BTLOGRING_MAX_BUF_POST;
+#endif /* BTLOG */
+ prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
+
+ prot->cur_ioctlresp_bufs_posted = 0;
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
+ prot->data_seq_no = 0;
+ prot->ioctl_seq_no = 0;
+ prot->rxbufpost = 0;
+ prot->tot_rxbufpost = 0;
+ prot->tot_rxcpl = 0;
+ prot->cur_event_bufs_posted = 0;
+ prot->ioctl_state = 0;
+ prot->curr_ioctl_cmd = 0;
+ prot->cur_ts_bufs_posted = 0;
+ prot->infobufpost = 0;
+#ifdef BTLOG
+ prot->btlogbufpost = 0;
+#endif /* BTLOG */
+
+ prot->dmaxfer.srcmem.va = NULL;
+ prot->dmaxfer.dstmem.va = NULL;
+ prot->dmaxfer.in_progress = FALSE;
+
+#ifdef DHD_HMAPTEST
+ prot->hmaptest.in_progress = FALSE;
+#endif /* DHD_HMAPTEST */
+ prot->metadata_dbg = FALSE;
+ prot->rx_metadata_offset = 0;
+ prot->tx_metadata_offset = 0;
+ prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
+
+ /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
+ prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
+ prot->ioctl_state = 0;
+ prot->ioctl_status = 0;
+ prot->ioctl_resplen = 0;
+ prot->ioctl_received = IOCTL_WAIT;
+
+ /* Initialize Common MsgBuf Rings */
+
+ prot->device_ipc_version = dhd->bus->api.fw_rev;
+ prot->host_ipc_version = PCIE_SHARED_VERSION;
+ prot->no_tx_resource = FALSE;
+
+ /* Init the host API version */
+ dhd_set_host_cap(dhd);
+
+ /* alloc and configure scb host address for dongle */
+ if ((ret = dhd_alloc_host_scbs(dhd))) {
+ return ret;
+ }
+
+ /* Register the interrupt function upfront */
+ /* remove corerev checks in data path */
+ /* do this after host/fw negotiation for DAR */
+ prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+ prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
+
+ prot->tx_h2d_db_cnt = 0;
+#ifdef AGG_H2D_DB
+ dhd_msgbuf_agg_h2d_db_timer_init(dhd);
+#endif /* AGG_H2D_DB */
+
+ dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
+
+ /* If supported by the host, indicate the memory block
+ * for completion writes / submission reads to shared space
+ */
+ if (dhd->dma_d2h_ring_upd_support) {
+ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ D2H_DMA_INDX_WR_BUF, 0);
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ H2D_DMA_INDX_RD_BUF, 0);
+ }
+
+ if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ H2D_DMA_INDX_WR_BUF, 0);
+ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ D2H_DMA_INDX_RD_BUF, 0);
+ }
+
+ dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
+ dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
+ dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
+
+ /* Make it compatibile with pre-rev7 Firmware */
+ if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
+ prot->d2hring_tx_cpln.item_len =
+ D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
+ prot->d2hring_rx_cpln.item_len =
+ D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
+ }
+ dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
+ dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
+
+ dhd_prot_d2h_sync_init(dhd);
+
+ dhd_prot_h2d_sync_init(dhd);
+
+#ifdef PCIE_INB_DW
+ /* Set the initial DS state */
+ if (INBAND_DW_ENAB(dhd->bus)) {
+ dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
+ DW_DEVICE_DS_ACTIVE);
+ }
+#endif /* PCIE_INB_DW */
+
+ /* init the scratch buffer */
+ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ D2H_DMA_SCRATCH_BUF, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
+ sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
+#ifdef DHD_DMA_INDICES_SEQNUM
+ prot->host_seqnum = D2H_EPOCH_INIT_VAL % D2H_EPOCH_MODULO;
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ /* Signal to the dongle that common ring init is complete */
+ if (dhd->hostrdy_after_init)
+ dhd_bus_hostready(dhd->bus);
+
+ /*
+ * If the DMA-able buffers for flowring needs to come from a specific
+ * contiguous memory region, then setup prot->flowrings_dma_buf here.
+ * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
+ * this contiguous memory region, for each of the flowrings.
+ */
+
+ /* Pre-allocate pool of msgbuf_ring for flowrings */
+ if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
+ return BCME_ERROR;
+ }
+
+ dhd->ring_attached = TRUE;
+
+ /* If IFRM is enabled, wait for FW to setup the DMA channel */
+ if (IFRM_ENAB(dhd)) {
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ H2D_IFRM_INDX_WR_BUF, 0);
+ }
+
+ /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
+ * Waiting just before configuring doorbell
+ */
+#ifdef BCMQT
+#define IDMA_ENABLE_WAIT 100
+#else
+#define IDMA_ENABLE_WAIT 10
+#endif
+ if (IDMA_ACTIVE(dhd)) {
+ /* wait for idma_en bit in IDMAcontrol register to be set */
+ /* Loop till idma_en is not set */
+ uint buscorerev = dhd->bus->sih->buscorerev;
+ idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ IDMAControl(buscorerev), 0, 0);
+ while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
+ (waitcount++ < IDMA_ENABLE_WAIT)) {
+
+ DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
+ waitcount, idmacontrol));
+ OSL_DELAY(1000); /* 1ms as its onetime only */
+ idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ IDMAControl(buscorerev), 0, 0);
+ }
+
+ if (waitcount < IDMA_ENABLE_WAIT) {
+ DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
+ } else {
+ DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
+ waitcount, idmacontrol));
+ return BCME_ERROR;
+ }
+ // add delay to fix bring up issue
+ OSL_SLEEP(1);
+ }
+
+ /* Host should configure soft doorbells if needed ... here */
+
+ /* Post to dongle host configured soft doorbells */
+ dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
+
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+ prot->no_retry = FALSE;
+ prot->no_aggr = FALSE;
+ prot->fixed_rate = FALSE;
+
+ /*
+ * Note that any communication with the Dongle should be added
+ * below this point. Any other host data structure initialiation that
+ * needs to be done prior to the DPC starts executing should be done
+ * befor this point.
+ * Because once we start sending H2D requests to Dongle, the Dongle
+ * respond immediately. So the DPC context to handle this
+ * D2H response could preempt the context in which dhd_prot_init is running.
+ * We want to ensure that all the Host part of dhd_prot_init is
+ * done before that.
+ */
+
+ /* See if info rings could be created, info rings should be created
+ * only if dongle does not support EDL
+ */
+#ifdef EWP_EDL
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+#else
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+#endif /* EWP_EDL */
+ {
+ if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+
+#ifdef EWP_EDL
+ /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
+ if (dhd->dongle_edl_support) {
+ if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* EWP_EDL */
+
+#ifdef BTLOG
+ /* create BT log rings */
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->bt_logging) {
+ if ((ret = dhd_prot_init_btlog_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* BTLOG */
+
+#ifdef DHD_HP2P
+ /* create HPP txcmpl/rxcmpl rings */
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
+ if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_LB_RXP
+ /* defualt rx flow ctrl thresholds. Can be changed at run time through sysfs */
+ dhd->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STOP_THR);
+ dhd->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STRT_THR);
+ atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
+#endif /* DHD_LB_RXP */
+ return BCME_OK;
+} /* dhd_prot_init */
+
+/**
+ * dhd_prot_detach - PCIE FD protocol layer destructor.
+ * Unlink, frees allocated protocol memory (including dhd_prot)
+ */
+void dhd_prot_detach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Stop the protocol module */
+ if (prot) {
+ /* For non-android platforms, devreset will not be called,
+ * so call prot_reset here. It is harmless if called twice.
+ */
+ dhd_prot_reset(dhd);
+
+ /* free up all DMA-able buffers allocated during prot attach/init */
+
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
+#ifdef DHD_HMAPTEST
+ dhd_dma_buf_free(dhd, &prot->hmaptest.mem);
+#endif /* DHD_HMAPTEST */
+ dhd_dma_buf_free(dhd, &prot->retbuf);
+ dhd_dma_buf_free(dhd, &prot->ioctbuf);
+ dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
+ dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
+ dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
+ dhd_dma_buf_free(dhd, &prot->host_scb_buf);
+#ifdef SNAPSHOT_UPLOAD
+ dhd_dma_buf_free(dhd, &prot->snapshot_upload_buf);
+#endif /* SNAPSHOT_UPLOAD */
+
+ /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
+
+ dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
+
+ /* Common MsgBuf Rings */
+ dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
+ dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
+ dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
+ dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
+ dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
+
+ /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
+ dhd_prot_flowrings_pool_detach(dhd);
+
+ /* detach info rings */
+ dhd_prot_detach_info_rings(dhd);
+
+#ifdef BTLOG
+ /* detach BT log rings */
+ dhd_prot_detach_btlog_rings(dhd);
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+ dhd_prot_detach_edl_rings(dhd);
+#endif
+#ifdef DHD_HP2P
+ /* detach HPP rings */
+ dhd_prot_detach_hp2p_rings(dhd);
+#endif /* DHD_HP2P */
+
+ /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
+ * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
+ * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
+ * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
+ * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
+ * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
+ * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
+ * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
+ */
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
+#endif
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
+#endif /* DHD_MAP_PKTID_LOGGING */
+#ifdef DHD_DMA_INDICES_SEQNUM
+ if (prot->h2d_dma_indx_rd_copy_buf) {
+ MFREE(dhd->osh, prot->h2d_dma_indx_rd_copy_buf,
+ prot->h2d_dma_indx_rd_copy_bufsz);
+ }
+ if (prot->d2h_dma_indx_wr_copy_buf) {
+ MFREE(dhd->osh, prot->d2h_dma_indx_wr_copy_buf,
+ prot->d2h_dma_indx_wr_copy_bufsz);
+ }
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
+
+ dhd->prot = NULL;
+ }
+} /* dhd_prot_detach */
+
+/**
+ * dhd_prot_reset - Reset the protocol layer without freeing any objects.
+ * This may be invoked to soft reboot the dongle, without having to
+ * detach and attach the entire protocol layer.
+ *
+ * After dhd_prot_reset(), dhd_prot_init() may be invoked
+ * without going througha dhd_prot_attach() phase.
+ */
+void
+dhd_prot_reset(dhd_pub_t *dhd)
+{
+ struct dhd_prot *prot = dhd->prot;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ if (prot == NULL) {
+ return;
+ }
+
+ dhd->ring_attached = FALSE;
+
+ dhd_prot_flowrings_pool_reset(dhd);
+
+ /* Reset Common MsgBuf Rings */
+ dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
+ dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
+ dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
+ dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
+ dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
+
+ /* Reset info rings */
+ if (prot->h2dring_info_subn) {
+ dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
+ }
+
+ if (prot->d2hring_info_cpln) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
+ }
+
+#ifdef EWP_EDL
+ if (prot->d2hring_edl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_edl);
+ }
+#endif /* EWP_EDL */
+
+ /* Reset all DMA-able buffers allocated during prot attach */
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
+#ifdef DHD_HMAPTEST
+ dhd_dma_buf_reset(dhd, &prot->hmaptest.mem);
+#endif /* DHD_HMAPTEST */
+ dhd_dma_buf_reset(dhd, &prot->retbuf);
+ dhd_dma_buf_reset(dhd, &prot->ioctbuf);
+ dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
+ dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
+ dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
+ dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
+#ifdef SNAPSHOT_UPLOAD
+ dhd_dma_buf_reset(dhd, &prot->snapshot_upload_buf);
+#endif /* SNAPSHOT_UPLOAD */
+
+ dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
+
+ /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
+ dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
+ dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
+
+#ifdef DHD_DMA_INDICES_SEQNUM
+ if (prot->d2h_dma_indx_wr_copy_buf) {
+ dhd_local_buf_reset(prot->h2d_dma_indx_rd_copy_buf,
+ prot->h2d_dma_indx_rd_copy_bufsz);
+ dhd_local_buf_reset(prot->d2h_dma_indx_wr_copy_buf,
+ prot->d2h_dma_indx_wr_copy_bufsz);
+ }
+#endif /* DHD_DMA_INDICES_SEQNUM */
+
+ /* XXX: dmaxfer src and dst? */
+
+ prot->rx_metadata_offset = 0;
+ prot->tx_metadata_offset = 0;
+
+ prot->rxbufpost = 0;
+ prot->cur_event_bufs_posted = 0;
+ prot->cur_ioctlresp_bufs_posted = 0;
+
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
+ prot->data_seq_no = 0;
+ prot->ioctl_seq_no = 0;
+ prot->ioctl_state = 0;
+ prot->curr_ioctl_cmd = 0;
+ prot->ioctl_received = IOCTL_WAIT;
+ /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
+ prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
+ prot->txcpl_db_cnt = 0;
+
+ /* dhd_flow_rings_init is located at dhd_bus_start,
+ * so when stopping bus, flowrings shall be deleted
+ */
+ if (dhd->flow_rings_inited) {
+ dhd_flow_rings_deinit(dhd);
+ }
+
+#ifdef BTLOG
+ /* Reset BTlog rings */
+ if (prot->h2dring_btlog_subn) {
+ dhd_prot_ring_reset(dhd, prot->h2dring_btlog_subn);
+ }
+
+ if (prot->d2hring_btlog_cpln) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_btlog_cpln);
+ }
+#endif /* BTLOG */
+#ifdef DHD_HP2P
+ if (prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
+ }
+ if (prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
+ }
+#endif /* DHD_HP2P */
+
+ /* Reset PKTID map */
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
+ dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
+#ifndef IOCTLRESP_USE_CONSTMEM
+ dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
+ dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
+ dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
+#endif /* DMAMAP_STATS */
+
+#ifdef AGG_H2D_DB
+ dhd_msgbuf_agg_h2d_db_timer_reset(dhd);
+#endif /* AGG_H2D_DB */
+
+} /* dhd_prot_reset */
+
+#if defined(DHD_LB_RXP)
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
+#else /* !DHD_LB_RXP */
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
+#endif /* !DHD_LB_RXP */
+
+#if defined(DHD_LB)
+/* DHD load balancing: deferral of work to another online CPU */
+/* DHD_LB_RXP dispatchers, in dhd_linux.c */
+extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
+extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+extern unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
+
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
+ * to other CPU cores
+ */
+static INLINE void
+dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
+{
+ dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
+}
+#endif /* DHD_LB_RXP */
+#endif /* DHD_LB */
+
+void
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->rx_dataoffset = rx_offset;
+}
+
+static int
+dhd_check_create_info_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+#ifdef BTLOG
+ if (dhd->submit_count_WAR) {
+ ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
+ } else
+#endif /* BTLOG */
+ {
+ /* dongle may increase max_submission_rings so keep
+ * ringid at end of dynamic rings
+ */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS;
+ }
+
+ if (prot->d2hring_info_cpln) {
+ /* for d2hring re-entry case, clear inited flag */
+ prot->d2hring_info_cpln->inited = FALSE;
+ }
+
+ if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->h2dring_info_subn == NULL) {
+ prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->h2dring_info_subn == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
+ H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ if (prot->d2hring_info_cpln == NULL) {
+ prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_info_cpln == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* create the debug info completion ring next to debug info submit ring
+ * ringid = id next to debug info submit ring
+ */
+ ringid = ringid + 1;
+
+ DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
+ D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
+ __FUNCTION__));
+ dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_info_cpln) {
+ MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
+ }
+ return ret;
+} /* dhd_check_create_info_rings */
+
+int
+dhd_prot_init_info_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+
+ if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: info rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
+ DHD_INFO(("Info completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
+ BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_info_subn->current_phase = 0;
+ prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
+ prot->h2dring_info_subn->n_completion_ids = 1;
+ prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
+
+ ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
+ BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return ret;
+} /* dhd_prot_init_info_rings */
+
+static void
+dhd_prot_detach_info_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->h2dring_info_subn) {
+ dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
+ MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
+ }
+ if (dhd->prot->d2hring_info_cpln) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
+ }
+}
+
+#ifdef DHD_HP2P
+static int
+dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ /* Last 2 dynamic ring indices are used by hp2p rings */
+ ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
+
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
+ __FUNCTION__));
+ goto err2;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_txcpl->inited = FALSE;
+ }
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* create the hp2p rx completion ring next to hp2p tx compl ring
+ * ringid = id next to hp2p tx compl ring
+ */
+ ringid = ringid + 1;
+
+ DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
+ __FUNCTION__));
+ goto err1;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_rxcpl->inited = FALSE;
+ }
+
+ if (prot->d2hring_hp2p_rxcpl != NULL &&
+ prot->d2hring_hp2p_txcpl != NULL) {
+ /* dhd_prot_init rentry after a dhd_prot_reset */
+ ret = BCME_OK;
+ }
+
+ return ret;
+err1:
+ MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_rxcpl = NULL;
+
+err2:
+ MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_txcpl = NULL;
+ return ret;
+} /* dhd_check_create_hp2p_rings */
+
+int
+dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+
+ dhd->hp2p_ring_more = TRUE;
+ /* default multiflow not allowed */
+ dhd->hp2p_mf_enable = FALSE;
+
+ if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: hp2p rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
+ DHD_INFO(("hp2p tx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
+ prot->d2hring_hp2p_txcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
+ DHD_INFO(("hp2p rx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
+ prot->d2hring_hp2p_rxcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return BCME_OK;
+} /* dhd_prot_init_hp2p_rings */
+
+static void
+dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_txcpl = NULL;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_rxcpl = NULL;
+ }
+}
+#endif /* DHD_HP2P */
+
+#ifdef BTLOG
+static int
+dhd_check_create_btlog_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ if (dhd->submit_count_WAR) {
+ ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS + 2;
+ } else {
+ /* ringid is one less than ringids assign by dhd_check_create_info_rings */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS - 1;
+ }
+
+ if (prot->d2hring_btlog_cpln) {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_btlog_cpln->inited = FALSE;
+ }
+
+ if (prot->h2dring_btlog_subn && prot->d2hring_btlog_cpln) {
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->h2dring_btlog_subn == NULL) {
+ prot->h2dring_btlog_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->h2dring_btlog_subn == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->h2dring_btlog_subn, "h2dbtlog",
+ H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ if (prot->d2hring_btlog_cpln == NULL) {
+ prot->d2hring_btlog_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_btlog_cpln == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ if (dhd->submit_count_WAR) {
+ ringid = ringid + 1;
+ } else {
+ /* advance ringid past BTLOG submit ring and INFO submit and cmplt rings */
+ ringid = ringid + 3;
+ }
+
+ DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_btlog_cpln, "d2hbtlog",
+ D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
+ __FUNCTION__));
+ dhd_prot_ring_detach(dhd, prot->h2dring_btlog_subn);
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_btlog_cpln) {
+ MFREE(prot->osh, prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
+ }
+ return ret;
+} /* dhd_check_create_btlog_rings */
+
+int
+dhd_prot_init_btlog_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+
+ if ((ret = dhd_check_create_btlog_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: btlog rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_btlog_cpln->inited) || (prot->d2hring_btlog_cpln->create_pending)) {
+ DHD_INFO(("Info completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_ERROR(("trying to send create d2h btlog ring: id %d\n", prot->d2hring_btlog_cpln->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_btlog_cpln,
+ BCMPCIE_D2H_RING_TYPE_BTLOG_CPL, DHD_D2H_BTLOGRING_REQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->h2dring_btlog_subn->seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_btlog_subn->current_phase = 0;
+ prot->d2hring_btlog_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_btlog_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ DHD_ERROR(("trying to send create h2d btlog ring id %d\n", prot->h2dring_btlog_subn->idx));
+ prot->h2dring_btlog_subn->n_completion_ids = 1;
+ prot->h2dring_btlog_subn->compeltion_ring_ids[0] = prot->d2hring_btlog_cpln->idx;
+
+ ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_btlog_subn,
+ BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT, DHD_H2D_BTLOGRING_REQ_PKTID);
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return ret;
+} /* dhd_prot_init_btlog_rings */
+
+static void
+dhd_prot_detach_btlog_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->h2dring_btlog_subn) {
+ dhd_prot_ring_detach(dhd, dhd->prot->h2dring_btlog_subn);
+ MFREE(dhd->prot->osh, dhd->prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
+ }
+ if (dhd->prot->d2hring_btlog_cpln) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_btlog_cpln);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
+ }
+}
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+static int
+dhd_check_create_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+#ifdef BTLOG
+ if (dhd->submit_count_WAR) {
+ ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
+ } else
+#endif /* BTLOG */
+ {
+ /* dongle may increase max_submission_rings so keep
+ * ringid at end of dynamic rings (re-use info ring cpl ring id)
+ */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS + 1;
+ }
+
+ if (prot->d2hring_edl) {
+ prot->d2hring_edl->inited = FALSE;
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->d2hring_edl == NULL) {
+ prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_edl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
+ ringid));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
+ D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ prot->d2hring_edl = NULL;
+
+ return ret;
+} /* dhd_check_create_btlog_rings */
+
+int
+dhd_prot_init_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+
+ if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: EDL rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
+ DHD_INFO(("EDL completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
+ BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ return BCME_OK;
+} /* dhd_prot_init_btlog_rings */
+
+static void
+dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_edl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_edl = NULL;
+ }
+}
+#endif /* EWP_EDL */
+
+/**
+ * Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ uint len = 0;
+ wlc_rev_info_t revinfo;
+ char buf[128];
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+
+ /* Post ts buffer after shim layer is attached */
+ ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+
+ /* query for 'wlc_ver' to get version info from firmware */
+ /* memsetting to zero */
+ bzero(buf, sizeof(buf));
+ len = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf));
+ if (len == 0) {
+ DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
+ ret = BCME_ERROR;
+ goto done;
+ }
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major;
+ dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor;
+ }
+
+ DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor));
+#ifndef OEM_ANDROID
+ /* Get the device MAC address */
+ bzero(buf, sizeof(buf));
+ strlcpy(buf, "cur_etheraddr", sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
+ goto done;
+ }
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
+ }
+#endif /* OEM_ANDROID */
+
+#ifdef DHD_FW_COREDUMP
+ /* Check the memdump capability */
+ dhd_get_memdump_info(dhd);
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+ dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
+ goto done;
+ }
+ DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
+
+ /* Get the RxBuf post size */
+ /* Use default value in case of failure */
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ memset(buf, 0, sizeof(buf));
+ len = bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
+ if (len == 0) {
+ DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
+ } else {
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET RxBuf post FAILED, use default %d\n",
+ __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ } else {
+ if (memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz),
+ buf, sizeof(uint16)) != BCME_OK) {
+ DHD_ERROR(("%s: rxbufpost_sz memcpy failed\n", __FUNCTION__));
+ }
+
+ if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
+ DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
+ __FUNCTION__, prot->rxbufpost_sz,
+ DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ DHD_ERROR(("%s: RxBuf Post : %d\n",
+ __FUNCTION__, prot->rxbufpost_sz));
+ }
+ }
+ }
+
+ /* Post buffers for packet reception */
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+
+ DHD_SSSR_DUMP_INIT(dhd);
+
+ dhd_process_cid_mac(dhd, TRUE);
+ ret = dhd_preinit_ioctls(dhd);
+ dhd_process_cid_mac(dhd, FALSE);
+#if defined(DHD_SDTC_ETB_DUMP)
+ dhd_sdtc_etb_init(dhd);
+#endif /* DHD_SDTC_ETB_DUMP */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+#ifdef DHD_HP2P
+ if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable)
+#else
+ if (FW_SUPPORTED(dhd, h2dlogts))
+#endif // endif
+ {
+#ifdef DHD_HP2P
+ if (dhd->hp2p_enable) {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
+ } else {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+ }
+#else
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+#endif /* DHD_HP2P */
+ dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
+ /* This is during initialization. */
+ dhd_h2d_log_time_sync(dhd);
+ } else {
+ dhd->dhd_rte_time_sync_ms = 0;
+ }
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+#ifdef HOST_SFH_LLC
+ if (FW_SUPPORTED(dhd, host_sfhllc)) {
+ dhd->host_sfhllc_supported = TRUE;
+ } else {
+ dhd->host_sfhllc_supported = FALSE;
+ }
+#endif /* HOST_SFH_LLC */
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+done:
+ return ret;
+} /* dhd_sync_with_dongle */
+
+#define DHD_DBG_SHOW_METADATA 0
+
+#if DHD_DBG_SHOW_METADATA
+static void
+BCMFASTPATH(dhd_prot_print_metadata)(dhd_pub_t *dhd, void *ptr, int len)
+{
+ uint8 tlv_t;
+ uint8 tlv_l;
+ uint8 *tlv_v = (uint8 *)ptr;
+
+ if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
+ return;
+
+ len -= BCMPCIE_D2H_METADATA_HDRLEN;
+ tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
+
+ while (len > TLV_HDR_LEN) {
+ tlv_t = tlv_v[TLV_TAG_OFF];
+ tlv_l = tlv_v[TLV_LEN_OFF];
+
+ len -= TLV_HDR_LEN;
+ tlv_v += TLV_HDR_LEN;
+ if (len < tlv_l)
+ break;
+ if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
+ break;
+
+ switch (tlv_t) {
+ case WLFC_CTL_TYPE_TXSTATUS: {
+ uint32 txs;
+ memcpy(&txs, tlv_v, sizeof(uint32));
+ if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
+ printf("METADATA TX_STATUS: %08x\n", txs);
+ } else {
+ wl_txstatus_additional_info_t tx_add_info;
+ memcpy(&tx_add_info, tlv_v + sizeof(uint32),
+ sizeof(wl_txstatus_additional_info_t));
+ printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
+ " rate = %08x tries = %d - %d\n", txs,
+ tx_add_info.seq, tx_add_info.entry_ts,
+ tx_add_info.enq_ts, tx_add_info.last_ts,
+ tx_add_info.rspec, tx_add_info.rts_cnt,
+ tx_add_info.tx_cnt);
+ }
+ } break;
+
+ case WLFC_CTL_TYPE_RSSI: {
+ if (tlv_l == 1)
+ printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
+ else
+ printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
+ (*(tlv_v + 3) << 8) | *(tlv_v + 2),
+ (int8)(*tlv_v), *(tlv_v + 1));
+ } break;
+
+ case WLFC_CTL_TYPE_FIFO_CREDITBACK:
+ bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
+ bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_RX_STAMP: {
+ struct {
+ uint32 rspec;
+ uint32 bus_time;
+ uint32 wlan_time;
+ } rx_tmstamp;
+ memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
+ printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
+ rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
+ } break;
+
+ case WLFC_CTL_TYPE_TRANS_ID:
+ bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_COMP_TXSTATUS:
+ bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
+ break;
+
+ default:
+ bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
+ break;
+ }
+
+ len -= tlv_l;
+ tlv_v += tlv_l;
+ }
+}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+static INLINE void
+BCMFASTPATH(dhd_prot_packet_free)(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
+{
+ if (pkt) {
+ if (pkttype == PKTTYPE_IOCTL_RX ||
+ pkttype == PKTTYPE_EVENT_RX ||
+ pkttype == PKTTYPE_INFO_RX ||
+ pkttype == PKTTYPE_TSBUF_RX) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, pkt, send);
+#else
+ PKTFREE(dhd->osh, pkt, send);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTFREE(dhd->osh, pkt, send);
+ }
+ }
+}
+
+/**
+ * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
+ * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
+ * to ensure thread safety, so no need to hold any locks for this function
+ */
+static INLINE void *
+BCMFASTPATH(dhd_prot_packet_get)(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
+{
+ void *PKTBUF;
+ dmaaddr_t pa;
+ uint32 len;
+ void *dmah;
+ void *secdma;
+
+#ifdef DHD_PCIE_PKTID
+ if (free_pktid) {
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
+ pktid, pa, len, dmah, secdma, pkttype);
+ } else {
+ PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
+ pktid, pa, len, dmah, secdma, pkttype);
+ }
+#else
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
+ len, dmah, secdma, pkttype);
+#endif /* DHD_PCIE_PKTID */
+ if (PKTBUF) {
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+#ifdef DMAMAP_STATS
+ switch (pkttype) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ case PKTTYPE_IOCTL_RX:
+ dhd->dma_stats.ioctl_rx--;
+ dhd->dma_stats.ioctl_rx_sz -= len;
+ break;
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ case PKTTYPE_EVENT_RX:
+ dhd->dma_stats.event_rx--;
+ dhd->dma_stats.event_rx_sz -= len;
+ break;
+ case PKTTYPE_INFO_RX:
+ dhd->dma_stats.info_rx--;
+ dhd->dma_stats.info_rx_sz -= len;
+ break;
+ case PKTTYPE_TSBUF_RX:
+ dhd->dma_stats.tsbuf_rx--;
+ dhd->dma_stats.tsbuf_rx_sz -= len;
+ break;
+ }
+#endif /* DMAMAP_STATS */
+ }
+
+ return PKTBUF;
+}
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+static INLINE void
+BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
+{
+ memset(retbuf, 0, sizeof(dhd_dma_buf_t));
+ retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
+ retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
+
+ return;
+}
+#endif
+
+#ifdef PCIE_INB_DW
+static int
+dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
+{
+ unsigned long flags = 0;
+
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ bus->host_active_cnt++;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ bus->host_active_cnt--;
+ dhd_bus_inb_ack_pending_ds_req(bus);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ return BCME_ERROR;
+ }
+ }
+
+ return BCME_OK;
+}
+
+static void
+dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
+{
+ unsigned long flags = 0;
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ bus->host_active_cnt--;
+ dhd_bus_inb_ack_pending_ds_req(bus);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+}
+#endif /* PCIE_INB_DW */
+
+static void
+BCMFASTPATH(dhd_msgbuf_rxbuf_post)(dhd_pub_t *dhd, bool use_rsv_pktid)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int16 fillbufs;
+ int retcount = 0;
+
+ fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+ while (fillbufs >= RX_BUF_BURST) {
+ /* Post in a burst of 32 buffers at a time */
+ fillbufs = MIN(fillbufs, RX_BUF_BURST);
+
+ /* Post buffers */
+ retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
+
+ if (retcount > 0) {
+ prot->rxbufpost += (uint16)retcount;
+ /* how many more to post */
+ fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+ } else {
+ /* Make sure we don't run loop any further */
+ fillbufs = 0;
+ }
+ }
+}
+
+/** Post 'count' no of rx buffers to dongle */
+static int
+BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
+{
+ void *p, **pktbuf;
+ uint8 *rxbuf_post_tmp;
+ host_rxbuf_post_t *rxbuf_post;
+ void *msg_start;
+ dmaaddr_t pa, *pktbuf_pa;
+ uint32 *pktlen;
+ uint16 i = 0, alloced = 0;
+ unsigned long flags;
+ uint32 pktid;
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
+ void *lcl_buf;
+ uint16 lcl_buf_size;
+#ifdef BCM_ROUTER_DHD
+ uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ + BCMEXTRAHDROOM;
+#else
+ uint16 pktsz = prot->rxbufpost_sz;
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ /* allocate a local buffer to store pkt buffer va, pa and length */
+ lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
+ RX_BUF_BURST;
+ lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
+ if (!lcl_buf) {
+ DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return 0;
+ }
+ pktbuf = lcl_buf;
+ pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
+ pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
+
+ for (i = 0; i < count; i++) {
+ if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
+ dhd->rx_pktgetfail++;
+ break;
+ }
+
+#ifdef BCM_ROUTER_DHD
+ /* Reserve extra headroom for router builds */
+ PKTPULL(dhd->osh, p, BCMEXTRAHDROOM);
+#endif /* BCM_ROUTER_DHD */
+ pktlen[i] = PKTLEN(dhd->osh, p);
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
+
+ if (PHYSADDRISZERO(pa)) {
+ PKTFREE(dhd->osh, p, FALSE);
+ DHD_ERROR(("Invalid phyaddr 0\n"));
+ ASSERT(0);
+ break;
+ }
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.rxdata++;
+ dhd->dma_stats.rxdata_sz += pktlen[i];
+#endif /* DMAMAP_STATS */
+
+ PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
+ pktlen[i] = PKTLEN(dhd->osh, p);
+ pktbuf[i] = p;
+ pktbuf_pa[i] = pa;
+ }
+
+ /* only post what we have */
+ count = i;
+
+ /* grab the ring lock to allocate pktid and post on ring */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
+ msg_start = (void *)
+ dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
+ if (msg_start == NULL) {
+ DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ goto cleanup;
+ }
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
+ ASSERT(alloced > 0);
+
+ rxbuf_post_tmp = (uint8*)msg_start;
+
+ for (i = 0; i < alloced; i++) {
+ rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
+ p = pktbuf[i];
+ pa = pktbuf_pa[i];
+
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
+ pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
+#if defined(DHD_PCIE_PKTID)
+ if (pktid == DHD_PKTID_INVALID) {
+ break;
+ }
+#endif /* DHD_PCIE_PKTID */
+
+#ifdef DHD_HMAPTEST
+ if (dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_ACTIVE) {
+ /* scratchbuf area */
+ dhd->prot->hmap_rx_buf_va = (char *)dhd->prot->hmaptest.mem.va
+ + dhd->prot->hmaptest.offset;
+
+ dhd->prot->hmap_rx_buf_len = pktlen[i] + prot->rx_metadata_offset;
+ if ((dhd->prot->hmap_rx_buf_va + dhd->prot->hmap_rx_buf_len) >
+ ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
+ DHD_ERROR(("hmaptest: ERROR Rxpost outside HMAPTEST buffer\n"));
+ DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
+ dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
+ dhd->prot->hmaptest.in_progress = FALSE;
+ } else {
+ pa = DMA_MAP(dhd->osh, dhd->prot->hmap_rx_buf_va,
+ dhd->prot->hmap_rx_buf_len, DMA_RX, p, 0);
+
+ dhd->prot->hmap_rx_buf_pa = pa;
+ dhd->prot->hmaptest_rx_pktid = pktid;
+ dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_POSTED;
+ DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf pktid=0x%08x\n",
+ pktid));
+ DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf va=0x%p pa.lo=0x%08x\n",
+ dhd->prot->hmap_rx_buf_va, (uint32)PHYSADDRLO(pa)));
+ DHD_ERROR(("hmaptest: d11write rxpost orig pktdata va=0x%p pa.lo=0x%08x\n",
+ PKTDATA(dhd->osh, p), (uint32)PHYSADDRLO(pktbuf_pa[i])));
+ }
+ }
+#endif /* DHD_HMAPTEST */
+ dhd->prot->tot_rxbufpost++;
+ /* Common msg header */
+ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
+ rxbuf_post->cmn_hdr.if_id = 0;
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ rxbuf_post->cmn_hdr.flags = ring->current_phase;
+ ring->seqnum++;
+ rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
+ rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ rxbuf_post->data_buf_addr.low_addr =
+ htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
+
+ if (prot->rx_metadata_offset) {
+ rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
+ rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+ } else {
+ rxbuf_post->metadata_buf_len = 0;
+ rxbuf_post->metadata_buf_addr.high_addr = 0;
+ rxbuf_post->metadata_buf_addr.low_addr = 0;
+ }
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+
+ /* Move rxbuf_post_tmp to next item */
+ rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, p);
+#endif
+ }
+
+ if (i < alloced) {
+ if (ring->wr < (alloced - i))
+ ring->wr = ring->max_items - (alloced - i);
+ else
+ ring->wr -= (alloced - i);
+
+ if (ring->wr == 0) {
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ?
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+
+ alloced = i;
+ }
+
+ /* update ring's WR index and ring doorbell to dongle */
+ if (alloced > 0) {
+ dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+ }
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+cleanup:
+ for (i = alloced; i < count; i++) {
+ p = pktbuf[i];
+ pa = pktbuf_pa[i];
+
+ DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
+ PKTFREE(dhd->osh, p, FALSE);
+ }
+
+ MFREE(dhd->osh, lcl_buf, lcl_buf_size);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return alloced;
+} /* dhd_prot_rxbufpost */
+
+#if !defined(BCM_ROUTER_DHD)
+static int
+dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ unsigned long flags;
+ uint32 pktid;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 alloced = 0;
+ uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
+ uint32 pktlen;
+ info_buf_post_msg_t *infobuf_post;
+ uint8 *infobuf_post_tmp;
+ void *p;
+ void* msg_start;
+ uint8 i = 0;
+ dmaaddr_t pa;
+ int16 count = 0;
+
+ if (ring == NULL)
+ return 0;
+
+ if (ring->inited != TRUE)
+ return 0;
+ if (ring == dhd->prot->h2dring_info_subn) {
+ if (prot->max_infobufpost == 0)
+ return 0;
+
+ count = prot->max_infobufpost - prot->infobufpost;
+ }
+#ifdef BTLOG
+ else if (ring == dhd->prot->h2dring_btlog_subn) {
+ if (prot->max_btlogbufpost == 0)
+ return 0;
+
+ pktsz = DHD_BTLOG_RX_BUFPOST_PKTSZ;
+ count = prot->max_btlogbufpost - prot->btlogbufpost;
+ }
+#endif /* BTLOG */
+ else {
+ DHD_ERROR(("Unknown ring\n"));
+ return 0;
+ }
+
+ if (count <= 0) {
+ DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ /* grab the ring lock to allocate pktid and post on ring */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
+ msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
+
+ if (msg_start == NULL) {
+ DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return -1;
+ }
+
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
+ ASSERT(alloced > 0);
+
+ infobuf_post_tmp = (uint8*) msg_start;
+
+ /* loop through each allocated message in the host ring */
+ for (i = 0; i < alloced; i++) {
+ infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
+ /* Create a rx buffer */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+#else
+ p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ if (p == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
+ dhd->rx_pktgetfail++;
+ break;
+ }
+ pktlen = PKTLEN(dhd->osh, p);
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+ if (PHYSADDRISZERO(pa)) {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, p, FALSE);
+#else
+ PKTFREE(dhd->osh, p, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ DHD_ERROR(("Invalid phyaddr 0\n"));
+ ASSERT(0);
+ break;
+ }
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.info_rx++;
+ dhd->dma_stats.info_rx_sz += pktlen;
+#endif /* DMAMAP_STATS */
+ pktlen = PKTLEN(dhd->osh, p);
+
+ /* Common msg header */
+ infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
+ infobuf_post->cmn_hdr.if_id = 0;
+ infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ infobuf_post->cmn_hdr.flags = ring->current_phase;
+ ring->seqnum++;
+
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
+ pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
+
+#if defined(DHD_PCIE_PKTID)
+ if (pktid == DHD_PKTID_INVALID) {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, p, FALSE);
+#else
+ PKTFREE(dhd->osh, p, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ break;
+ }
+#endif /* DHD_PCIE_PKTID */
+
+ infobuf_post->host_buf_len = htol16((uint16)pktlen);
+ infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_MSGBUF_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
+ infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
+ infobuf_post->host_buf_addr.high_addr));
+
+ infobuf_post->cmn_hdr.request_id = htol32(pktid);
+ /* Move rxbuf_post_tmp to next item */
+ infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, p);
+#endif
+ }
+
+ if (i < alloced) {
+ if (ring->wr < (alloced - i))
+ ring->wr = ring->max_items - (alloced - i);
+ else
+ ring->wr -= (alloced - i);
+
+ alloced = i;
+ if (alloced && ring->wr == 0) {
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ?
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+ }
+
+ /* Update the write pointer in TCM & ring bell */
+ if (alloced > 0) {
+ if (ring == dhd->prot->h2dring_info_subn) {
+ prot->infobufpost += alloced;
+ }
+#ifdef BTLOG
+ if (ring == dhd->prot->h2dring_btlog_subn) {
+ prot->btlogbufpost += alloced;
+ }
+#endif /* BTLOG */
+ dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+ }
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return alloced;
+} /* dhd_prot_infobufpost */
+#endif /* !BCM_ROUTER_DHD */
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+static int
+alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
+{
+ int err;
+ memset(retbuf, 0, sizeof(dhd_dma_buf_t));
+
+ if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+
+ return BCME_OK;
+}
+
+static void
+free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
+{
+ /* retbuf (declared on stack) not fully populated ... */
+ if (retbuf->va) {
+ uint32 dma_pad;
+ dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+ retbuf->len = IOCT_RETBUF_SIZE;
+ retbuf->_alloced = retbuf->len + dma_pad;
+ }
+
+ dhd_dma_buf_free(dhd, retbuf);
+ return;
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+static int
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
+{
+ void *p;
+ uint16 pktsz;
+ ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
+ dmaaddr_t pa;
+ uint32 pktlen;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 alloced = 0;
+ unsigned long flags;
+ dhd_dma_buf_t retbuf;
+ void *dmah = NULL;
+ uint32 pktid;
+ void *map_handle;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+ bool non_ioctl_resp_buf = 0;
+ dhd_pkttype_t buf_type;
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return -1;
+ }
+ memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
+
+ if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
+ buf_type = PKTTYPE_IOCTL_RX;
+ else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
+ buf_type = PKTTYPE_EVENT_RX;
+ else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
+ buf_type = PKTTYPE_TSBUF_RX;
+ else {
+ DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
+ /* XXX: may be add an assert */
+ return -1;
+ }
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
+ return BCME_ERROR;
+ }
+#endif /* PCIE_INB_DW */
+
+ if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
+ non_ioctl_resp_buf = TRUE;
+ else
+ non_ioctl_resp_buf = FALSE;
+
+ if (non_ioctl_resp_buf) {
+ /* Allocate packet for not ioctl resp buffer post */
+ pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ /* Allocate packet for ctrl/ioctl buffer post */
+ pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
+ }
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (!non_ioctl_resp_buf) {
+ if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
+ DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
+ goto fail;
+ }
+ ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
+ p = retbuf.va;
+ pktlen = retbuf.len;
+ pa = retbuf.pa;
+ dmah = retbuf.dmah;
+ } else
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+#else
+ p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ if (p == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
+ __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
+ "EVENT" : "IOCTL RESP"));
+ dhd->rx_pktgetfail++;
+ goto fail;
+ }
+
+ pktlen = PKTLEN(dhd->osh, p);
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+
+ if (PHYSADDRISZERO(pa)) {
+ DHD_ERROR(("Invalid physaddr 0\n"));
+ ASSERT(0);
+ goto free_pkt_return;
+ }
+
+#ifdef DMAMAP_STATS
+ switch (buf_type) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ case PKTTYPE_IOCTL_RX:
+ dhd->dma_stats.ioctl_rx++;
+ dhd->dma_stats.ioctl_rx_sz += pktlen;
+ break;
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ case PKTTYPE_EVENT_RX:
+ dhd->dma_stats.event_rx++;
+ dhd->dma_stats.event_rx_sz += pktlen;
+ break;
+ case PKTTYPE_TSBUF_RX:
+ dhd->dma_stats.tsbuf_rx++;
+ dhd->dma_stats.tsbuf_rx_sz += pktlen;
+ break;
+ default:
+ break;
+ }
+#endif /* DMAMAP_STATS */
+
+ }
+
+ /* grab the ring lock to allocate pktid and post on ring */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (rxbuf_post == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
+ __FUNCTION__, __LINE__));
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (non_ioctl_resp_buf)
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ }
+ goto free_pkt_return;
+ }
+
+ /* CMN msg header */
+ rxbuf_post->cmn_hdr.msg_type = msg_type;
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (!non_ioctl_resp_buf) {
+ map_handle = dhd->prot->pktid_map_handle_ioctl;
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
+ ring->dma_buf.secdma, buf_type);
+ } else
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ map_handle = dhd->prot->pktid_ctrl_map;
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
+ p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
+ buf_type);
+ }
+
+ if (pktid == DHD_PKTID_INVALID) {
+ if (ring->wr == 0) {
+ ring->wr = ring->max_items - 1;
+ } else {
+ ring->wr--;
+ if (ring->wr == 0) {
+ ring->current_phase = ring->current_phase ? 0 :
+ BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+ }
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ goto free_pkt_return;
+ }
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+ rxbuf_post->cmn_hdr.if_id = 0;
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+ rxbuf_post->cmn_hdr.flags = ring->current_phase;
+
+#if defined(DHD_PCIE_PKTID)
+ if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+ if (ring->wr == 0) {
+ ring->wr = ring->max_items - 1;
+ } else {
+ if (ring->wr == 0) {
+ ring->current_phase = ring->current_phase ? 0 :
+ BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+ }
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (non_ioctl_resp_buf)
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ }
+ goto free_pkt_return;
+ }
+#endif /* DHD_PCIE_PKTID */
+
+#ifndef IOCTLRESP_USE_CONSTMEM
+ rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
+#else
+ rxbuf_post->host_buf_len = htol16((uint16)pktlen);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+#ifdef DHD_LBUF_AUDIT
+ if (non_ioctl_resp_buf)
+ PKTAUDIT(dhd->osh, p);
+#endif
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return 1;
+
+free_pkt_return:
+ if (!non_ioctl_resp_buf) {
+#ifdef IOCTLRESP_USE_CONSTMEM
+ free_ioctl_return_buffer(dhd, &retbuf);
+#else
+ dhd_prot_packet_free(dhd, p, buf_type, FALSE);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ } else {
+ dhd_prot_packet_free(dhd, p, buf_type, FALSE);
+ }
+
+fail:
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return -1;
+} /* dhd_prot_rxbufpost_ctrl */
+
+static uint16
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
+{
+ uint32 i = 0;
+ int32 ret_val;
+
+ DHD_MSGBUF_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+ return 0;
+ }
+
+ while (i < max_to_post) {
+ ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
+ if (ret_val < 0)
+ break;
+ i++;
+ }
+ DHD_MSGBUF_INFO(("posted %d buffers of type %d\n", i, msg_type));
+ return (uint16)i;
+}
+
+static void
+dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int max_to_post;
+
+ DHD_MSGBUF_INFO(("ioctl resp buf post\n"));
+ max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
+ if (max_to_post <= 0) {
+ DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
+ __FUNCTION__));
+ return;
+ }
+ prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+ MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
+}
+
+static void
+dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int max_to_post;
+
+ max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
+ if (max_to_post <= 0) {
+ DHD_ERROR(("%s: Cannot post more than max event buffers\n",
+ __FUNCTION__));
+ return;
+ }
+ prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+ MSG_TYPE_EVENT_BUF_POST, max_to_post);
+}
+
+static int
+dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
+{
+#ifdef DHD_TIMESYNC
+ dhd_prot_t *prot = dhd->prot;
+ int max_to_post;
+
+ if (prot->active_ipc_version < 7) {
+ DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
+ prot->active_ipc_version));
+ return 0;
+ }
+
+ max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
+ if (max_to_post <= 0) {
+ DHD_INFO(("%s: Cannot post more than max ts buffers\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+ MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
+#endif /* DHD_TIMESYNC */
+ return 0;
+}
+
+bool
+BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)(dhd_pub_t *dhd, uint bound)
+{
+ dhd_prot_t *prot = dhd->prot;
+ bool more = TRUE;
+ uint n = 0;
+ msgbuf_ring_t *ring = prot->d2hring_info_cpln;
+ unsigned long flags;
+
+ if (ring == NULL)
+ return FALSE;
+ if (ring->inited != TRUE)
+ return FALSE;
+
+ /* Process all the messages - DTOH direction */
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
+
+ if (dhd->hang_was_sent) {
+ more = FALSE;
+ break;
+ }
+
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* Get the message from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ if (msg_addr == NULL) {
+ more = FALSE;
+ break;
+ }
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
+ __FUNCTION__, msg_len));
+ }
+
+ /* Update read pointer */
+ dhd_prot_upd_read_idx(dhd, ring);
+
+ /* After batch processing, check RX bound */
+ n += msg_len / ring->item_len;
+ if (n >= bound) {
+ break;
+ }
+ }
+
+ return more;
+}
+
+#ifdef BTLOG
+bool
+BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)(dhd_pub_t *dhd, uint bound)
+{
+ dhd_prot_t *prot = dhd->prot;
+ bool more = TRUE;
+ uint n = 0;
+ msgbuf_ring_t *ring = prot->d2hring_btlog_cpln;
+
+ if (ring == NULL)
+ return FALSE;
+ if (ring->inited != TRUE)
+ return FALSE;
+
+ /* Process all the messages - DTOH direction */
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
+
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
+ if (dhd->hang_was_sent) {
+ more = FALSE;
+ break;
+ }
+
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
+ /* Get the message from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ if (msg_addr == NULL) {
+ more = FALSE;
+ break;
+ }
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
+ __FUNCTION__, msg_len));
+ }
+
+ /* Update read pointer */
+ dhd_prot_upd_read_idx(dhd, ring);
+
+ /* After batch processing, check RX bound */
+ n += msg_len / ring->item_len;
+ if (n >= bound) {
+ break;
+ }
+ }
+
+ return more;
+}
+#endif /* BTLOG */
+
+#ifdef EWP_EDL
+bool
+dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = prot->d2hring_edl;
+ unsigned long flags = 0;
+ uint32 items = 0;
+ uint16 rd = 0;
+ uint16 depth = 0;
+
+ if (ring == NULL)
+ return FALSE;
+ if (ring->inited != TRUE)
+ return FALSE;
+ if (ring->item_len == 0) {
+ DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
+ __FUNCTION__, ring->idx, ring->item_len));
+ return FALSE;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ return FALSE;
+ }
+
+ if (dhd->hang_was_sent) {
+ return FALSE;
+ }
+
+ /* in this DPC context just check if wr index has moved
+ * and schedule deferred context to actually process the
+ * work items.
+ */
+
+ /* update the write index */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ if (dhd->dma_d2h_ring_upd_support) {
+ /* DMAing write/read indices supported */
+ ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
+ }
+ rd = ring->rd;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ depth = ring->max_items;
+ /* check for avail space, in number of ring items */
+ items = READ_AVAIL_SPACE(ring->wr, rd, depth);
+ if (items == 0) {
+ /* no work items in edl ring */
+ return FALSE;
+ }
+ if (items > ring->max_items) {
+ DHD_ERROR(("\r\n======================= \r\n"));
+ DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
+ __FUNCTION__, ring, ring->name, ring->max_items, items));
+ DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
+ ring->wr, ring->rd, depth));
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
+ DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (ring->wr >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ }
+#else
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
+ dhd_bus_mem_dump(dhd);
+
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_schedule_reset(dhd);
+
+ return FALSE;
+ }
+
+ if (items > D2HRING_EDL_WATERMARK) {
+ DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
+ " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
+ ring->rd, ring->wr, depth));
+ }
+
+ dhd_schedule_logtrace(dhd->info);
+
+ return FALSE;
+}
+
+/*
+ * This is called either from work queue context of 'event_log_dispatcher_work' or
+ * from the kthread context of dhd_logtrace_thread
+ */
+int
+dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
+{
+ dhd_prot_t *prot = NULL;
+ msgbuf_ring_t *ring = NULL;
+ int err = 0;
+ unsigned long flags = 0;
+ cmn_msg_hdr_t *msg = NULL;
+ uint8 *msg_addr = NULL;
+ uint32 max_items_to_process = 0, n = 0;
+ uint32 num_items = 0, new_items = 0;
+ uint16 depth = 0;
+ volatile uint16 wr = 0;
+
+ if (!dhd || !dhd->prot)
+ return 0;
+
+ prot = dhd->prot;
+ ring = prot->d2hring_edl;
+
+ if (!ring || !evt_decode_data) {
+ return 0;
+ }
+
+ if (dhd->hang_was_sent) {
+ return FALSE;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ ring->curr_rd = ring->rd;
+ wr = ring->wr;
+ depth = ring->max_items;
+ /* check for avail space, in number of ring items
+ * Note, that this will only give the # of items
+ * from rd to wr if wr>=rd, or from rd to ring end
+ * if wr < rd. So in the latter case strictly speaking
+ * not all the items are read. But this is OK, because
+ * these will be processed in the next doorbell as rd
+ * would have wrapped around. Processing in the next
+ * doorbell is acceptable since EDL only contains debug data
+ */
+ num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
+
+ if (num_items == 0) {
+ /* no work items in edl ring */
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ return 0;
+ }
+
+ DHD_INFO(("%s: EDL work items [%u] available \n",
+ __FUNCTION__, num_items));
+
+ /* if space is available, calculate address to be read */
+ msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
+
+ max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+
+ n = max_items_to_process;
+ while (n > 0) {
+ msg = (cmn_msg_hdr_t *)msg_addr;
+ /* wait for DMA of work item to complete */
+ if ((err = dhd->prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
+ DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n",
+ __FUNCTION__, err));
+ }
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
+ if (err != BCME_OK) {
+ return 0;
+ }
+
+ /* process the edl work item, i.e, the event log */
+ err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
+
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr + ring->item_len);
+
+ msg_addr += ring->item_len;
+ --n;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* update host ring read pointer */
+ if ((ring->rd + max_items_to_process) >= ring->max_items)
+ ring->rd = 0;
+ else
+ ring->rd += max_items_to_process;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Now after processing max_items_to_process update dongle rd index.
+ * The TCM rd index is updated only if bus is not
+ * in D3. Else, the rd index is updated from resume
+ * context in - 'dhdpcie_bus_suspend'
+ */
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_EDL_RING_TCM_RD_UPDATE(dhd);
+ }
+
+ /* if num_items > bound, then anyway we will reschedule and
+ * this function runs again, so that if in between the DPC has
+ * updated the wr index, then the updated wr is read. But if
+ * num_items <= bound, and if DPC executes and updates the wr index
+ * when the above while loop is running, then the updated 'wr' index
+ * needs to be re-read from here, If we don't do so, then till
+ * the next time this function is scheduled
+ * the event logs will not be processed.
+ */
+ if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
+ /* read the updated wr index if reqd. and update num_items */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ if (wr != (volatile uint16)ring->wr) {
+ wr = (volatile uint16)ring->wr;
+ new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
+ DHD_INFO(("%s: new items [%u] avail in edl\n",
+ __FUNCTION__, new_items));
+ num_items += new_items;
+ }
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ }
+
+ /* if # of items processed is less than num_items, need to re-schedule
+ * the deferred ctx
+ */
+ if (max_items_to_process < num_items) {
+ DHD_INFO(("%s: EDL bound hit / new items found, "
+ "items processed=%u; remaining=%u, "
+ "resched deferred ctx...\n",
+ __FUNCTION__, max_items_to_process,
+ num_items - max_items_to_process));
+ return (num_items - max_items_to_process);
+ }
+
+ return 0;
+
+}
+
+void
+dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = NULL;
+ unsigned long flags = 0;
+ msgbuf_ring_t *ring = NULL;
+
+ if (!dhd)
+ return;
+
+ prot = dhd->prot;
+ if (!prot || !prot->d2hring_edl)
+ return;
+
+ ring = prot->d2hring_edl;
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ dhd_prot_upd_read_idx(dhd, ring);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ if (dhd->dma_h2d_ring_upd_support &&
+ !IDMA_ACTIVE(dhd)) {
+ dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
+ }
+}
+#endif /* EWP_EDL */
+
+static void
+dhd_prot_rx_frame(dhd_pub_t *dhd, void *pkt, int ifidx, uint pkt_count)
+{
+
+#ifdef DHD_LB_RXP
+ if (dhd_read_lb_rxp(dhd) == 1) {
+ dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
+ return;
+ }
+#endif /* DHD_LB_RXP */
+ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, pkt_count);
+}
+
+#ifdef DHD_LB_RXP
+static int dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t *dhd)
+{
+ if ((dhd->lb_rxp_stop_thr == 0) || (dhd->lb_rxp_strt_thr == 0)) {
+ /* when either of stop and start thresholds are zero flow ctrl is not enabled */
+ return FALSE;
+ }
+
+ if ((dhd_lb_rxp_process_qlen(dhd) >= dhd->lb_rxp_stop_thr) &&
+ (!atomic_read(&dhd->lb_rxp_flow_ctrl))) {
+ atomic_set(&dhd->lb_rxp_flow_ctrl, TRUE);
+#ifdef DHD_LB_STATS
+ dhd->lb_rxp_stop_thr_hitcnt++;
+#endif /* DHD_LB_STATS */
+ DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_stop_thr %d\n",
+ dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_stop_thr));
+ } else if ((dhd_lb_rxp_process_qlen(dhd) <= dhd->lb_rxp_strt_thr) &&
+ (atomic_read(&dhd->lb_rxp_flow_ctrl))) {
+ atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
+#ifdef DHD_LB_STATS
+ dhd->lb_rxp_strt_thr_hitcnt++;
+#endif /* DHD_LB_STATS */
+ DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_strt_thr %d\n",
+ dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_strt_thr));
+ }
+
+ return atomic_read(&dhd->lb_rxp_flow_ctrl);
+}
+#endif /* DHD_LB_RXP */
+
+/** called when DHD needs to check for 'receive complete' messages from the dongle */
+bool
+BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
+{
+ bool more = FALSE;
+ uint n = 0;
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring;
+ uint16 item_len;
+ host_rxbuf_cmpl_t *msg = NULL;
+ uint8 *msg_addr;
+ uint32 msg_len;
+ uint16 pkt_cnt, pkt_cnt_newidx;
+ unsigned long flags;
+ dmaaddr_t pa;
+ uint32 len;
+ void *dmah;
+ void *secdma;
+ int ifidx = 0, if_newidx = 0;
+ void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
+ uint32 pktid;
+ int i;
+ uint8 sync;
+
+#ifdef DHD_LB_RXP
+ /* must be the first check in this function */
+ if (dhd_prot_lb_rxp_flow_ctrl(dhd)) {
+ /* DHD is holding a lot of RX packets.
+ * Just give chance for netwrok stack to consumes RX packets.
+ */
+ return FALSE;
+ }
+#endif /* DHD_LB_RXP */
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Set rx_pending_due_to_rpm if device is not in resume state */
+ if (dhdpcie_runtime_bus_wake(dhd, FALSE, dhd_prot_process_msgbuf_rxcpl)) {
+ dhd->rx_pending_due_to_rpm = TRUE;
+ return more;
+ }
+ dhd->rx_pending_due_to_rpm = FALSE;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
+ ring = prot->d2hring_hp2p_rxcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &prot->d2hring_rx_cpln;
+ item_len = ring->item_len;
+ while (1) {
+ if (dhd_is_device_removed(dhd))
+ break;
+
+ if (dhd_query_bus_erros(dhd))
+ break;
+
+ if (dhd->hang_was_sent)
+ break;
+
+ if (dhd->smmu_fault_occurred) {
+ break;
+ }
+
+ pkt_cnt = 0;
+ pktqhead = pkt_newidx = NULL;
+ pkt_cnt_newidx = 0;
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ if (msg_addr == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ break;
+ }
+
+ while (msg_len > 0) {
+ msg = (host_rxbuf_cmpl_t *)msg_addr;
+
+ /* Wait until DMA completes, then fetch msg_type */
+ sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
+ if (!sync) {
+ msg_len -= item_len;
+ msg_addr += item_len;
+ continue;
+ }
+
+ pktid = ltoh32(msg->cmn_hdr.request_id);
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
+ DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
+ len, dmah, secdma, PKTTYPE_DATA_RX);
+ /* Sanity check of shinfo nrfrags */
+ if (!pkt || (dhd_check_shinfo_nrfrags(dhd, pkt, &pa, pktid) != BCME_OK)) {
+ msg_len -= item_len;
+ msg_addr += item_len;
+ continue;
+ }
+ dhd->prot->tot_rxcpl++;
+
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.rxdata--;
+ dhd->dma_stats.rxdata_sz -= len;
+#endif /* DMAMAP_STATS */
+#ifdef DHD_HMAPTEST
+ if ((dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_POSTED) &&
+ (pktid == dhd->prot->hmaptest_rx_pktid)) {
+
+ uchar *ptr;
+ ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
+ DMA_UNMAP(dhd->osh, dhd->prot->hmap_rx_buf_pa,
+ (uint)dhd->prot->hmap_rx_buf_len, DMA_RX, 0, dmah);
+ DHD_ERROR(("hmaptest: d11write rxcpl rcvd sc rxbuf pktid=0x%08x\n",
+ pktid));
+ DHD_ERROR(("hmaptest: d11write rxcpl r0_st=0x%08x r1_stat=0x%08x\n",
+ msg->rx_status_0, msg->rx_status_1));
+ DHD_ERROR(("hmaptest: d11write rxcpl rxbuf va=0x%p pa=0x%08x\n",
+ dhd->prot->hmap_rx_buf_va,
+ (uint32)PHYSADDRLO(dhd->prot->hmap_rx_buf_pa)));
+ DHD_ERROR(("hmaptest: d11write rxcpl pktdata va=0x%p pa=0x%08x\n",
+ PKTDATA(dhd->osh, pkt), (uint32)PHYSADDRLO(pa)));
+ memcpy(ptr, dhd->prot->hmap_rx_buf_va, dhd->prot->hmap_rx_buf_len);
+ dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
+ dhd->prot->hmap_rx_buf_va = NULL;
+ dhd->prot->hmap_rx_buf_len = 0;
+ PHYSADDRHISET(dhd->prot->hmap_rx_buf_pa, 0);
+ PHYSADDRLOSET(dhd->prot->hmap_rx_buf_pa, 0);
+ prot->hmaptest.in_progress = FALSE;
+ }
+#endif /* DHD_HMAPTEST */
+ DHD_MSGBUF_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
+ "pktdata %p, metalen %d\n",
+ ltoh32(msg->cmn_hdr.request_id),
+ ltoh16(msg->data_offset),
+ ltoh16(msg->data_len), msg->cmn_hdr.if_id,
+ msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
+ ltoh16(msg->metadata_len)));
+
+ pkt_cnt++;
+ msg_len -= item_len;
+ msg_addr += item_len;
+
+#if !defined(BCM_ROUTER_DHD)
+#if DHD_DBG_SHOW_METADATA
+ if (prot->metadata_dbg && prot->rx_metadata_offset &&
+ msg->metadata_len) {
+ uchar *ptr;
+ ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
+ /* header followed by data */
+ bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
+ }
+#endif /* DHD_DBG_SHOW_METADATA */
+#endif /* !BCM_ROUTER_DHD */
+
+ /* data_offset from buf start */
+ if (ltoh16(msg->data_offset)) {
+ /* data offset given from dongle after split rx */
+ PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
+ }
+ else if (prot->rx_dataoffset) {
+ /* DMA RX offset updated through shared area */
+ PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
+ }
+ /* Actual length of the packet */
+ PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
+#ifdef DHD_PKTTS
+ if (dhd_get_pktts_enab(dhd) == TRUE) {
+ uint fwr1 = 0, fwr2 = 0;
+
+ /* firmware mark rx_pktts.tref with 0xFFFFFFFF for errors */
+ if (ltoh32(msg->rx_pktts.tref) != 0xFFFFFFFF) {
+ fwr1 = (uint)htonl(ltoh32(msg->rx_pktts.tref));
+ fwr2 = (uint)htonl(ltoh32(msg->rx_pktts.tref) +
+ ltoh16(msg->rx_pktts.d_t2));
+
+ /* check for overflow */
+ if (ntohl(fwr2) > ntohl(fwr1)) {
+ /* send rx timestamp to netlnik socket */
+ dhd_msgbuf_send_msg_rx_ts(dhd, pkt, fwr1, fwr2);
+ }
+ }
+ }
+#endif /* DHD_PKTTS */
+
+#if defined(WL_MONITOR)
+ if (dhd_monitor_enabled(dhd, ifidx)) {
+ if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
+ dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
+ continue;
+ } else {
+ DHD_ERROR(("Received non 802.11 packet, "
+ "when monitor mode is enabled\n"));
+ }
+ }
+#endif /* WL_MONITOR */
+
+ if (!pktqhead) {
+ pktqhead = prevpkt = pkt;
+ ifidx = msg->cmn_hdr.if_id;
+ } else {
+ if (ifidx != msg->cmn_hdr.if_id) {
+ pkt_newidx = pkt;
+ if_newidx = msg->cmn_hdr.if_id;
+ pkt_cnt--;
+ pkt_cnt_newidx = 1;
+ break;
+ } else {
+ PKTSETNEXT(dhd->osh, prevpkt, pkt);
+ prevpkt = pkt;
+ }
+ }
+
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_rxstats(dhd, msg);
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_TIMESYNC
+ if (dhd->prot->rx_ts_log_enabled) {
+ dhd_pkt_parse_t parse;
+ ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
+
+ memset(&parse, 0, sizeof(dhd_pkt_parse_t));
+ dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
+
+ if (parse.proto == IP_PROT_ICMP)
+ dhd_timesync_log_rx_timestamp(dhd->ts, ifidx,
+ ts->low, ts->high, &parse);
+ }
+#endif /* DHD_TIMESYNC */
+
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif
+ }
+
+ /* roll back read pointer for unprocessed message */
+ if (msg_len > 0) {
+ if (ring->rd < msg_len / item_len)
+ ring->rd = ring->max_items - msg_len / item_len;
+ else
+ ring->rd -= msg_len / item_len;
+ }
+
+ /* Update read pointer */
+ dhd_prot_upd_read_idx(dhd, ring);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ pkt = pktqhead;
+ for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
+ nextpkt = PKTNEXT(dhd->osh, pkt);
+ PKTSETNEXT(dhd->osh, pkt, NULL);
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_frame(dhd, pkt, ifidx);
+#else
+ dhd_prot_rx_frame(dhd, pkt, ifidx, 1);
+#endif /* DHD_LB_RXP */
+ }
+
+ if (pkt_newidx) {
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
+#else
+ dhd_prot_rx_frame(dhd, pkt_newidx, if_newidx, 1);
+#endif /* DHD_LB_RXP */
+ }
+
+ pkt_cnt += pkt_cnt_newidx;
+
+ /* Post another set of rxbufs to the device */
+ dhd_prot_return_rxbuf(dhd, ring, 0, pkt_cnt);
+
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_commit(dhd);
+#endif
+
+ /* After batch processing, check RX bound */
+ n += pkt_cnt;
+ if (n >= bound) {
+ more = TRUE;
+ break;
+ }
+ }
+
+ /* Call lb_dispatch only if packets are queued */
+ if (n &&
+#ifdef WL_MONITOR
+ !(dhd_monitor_enabled(dhd, ifidx)) &&
+#endif /* WL_MONITOR */
+ TRUE) {
+ DHD_LB_DISPATCH_RX_PROCESS(dhd);
+ }
+
+ return more;
+
+}
+
+/**
+ * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
+ */
+void
+dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
+{
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
+ return;
+ }
+ /* Update read pointer */
+ if (dhd->dma_d2h_ring_upd_support) {
+ ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ }
+
+ DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
+ ring->idx, flowid, ring->wr, ring->rd));
+
+ /* Need more logic here, but for now use it directly */
+ dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
+}
+
+/** called when DHD needs to check for 'transmit complete' messages from the dongle */
+bool
+BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
+{
+ bool more = TRUE;
+ uint n = 0;
+ msgbuf_ring_t *ring;
+ unsigned long flags;
+
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
+ ring = dhd->prot->d2hring_hp2p_txcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &dhd->prot->d2hring_tx_cpln;
+
+ /* Process all the messages - DTOH direction */
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
+
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
+ if (dhd->hang_was_sent) {
+ more = FALSE;
+ break;
+ }
+
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ if (msg_addr == NULL) {
+ more = FALSE;
+ break;
+ }
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+ __FUNCTION__, ring->name, msg_addr, msg_len));
+ }
+
+ /* Write to dngl rd ptr */
+ dhd_prot_upd_read_idx(dhd, ring);
+
+ /* After batch processing, check bound */
+ n += msg_len / ring->item_len;
+ if (n >= bound) {
+ break;
+ }
+ }
+
+ if (n) {
+ /* For IDMA and HWA case, doorbell is sent along with read index update.
+ * For DMA indices case ring doorbell once n items are read to sync with dongle.
+ */
+ if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
+ dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
+ dhd->prot->txcpl_db_cnt++;
+ }
+ }
+ return more;
+}
+
+int
+BCMFASTPATH(dhd_prot_process_trapbuf)(dhd_pub_t *dhd)
+{
+ uint32 data;
+ dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
+
+ /* Interrupts can come in before this struct
+ * has been initialized.
+ */
+ if (trap_addr->va == NULL) {
+ DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
+ data = *(uint32 *)(trap_addr->va);
+
+ if (data & D2H_DEV_FWHALT) {
+ if (dhd->db7_trap.fw_db7w_trap_inprogress) {
+ DHD_ERROR(("DB7 FW responded 0x%04x\n", data));
+ } else {
+ DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
+ }
+
+ if (data & D2H_DEV_EXT_TRAP_DATA)
+ {
+ if (dhd->extended_trap_data) {
+ OSL_CACHE_INV((void *)trap_addr->va,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+ if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
+ DHD_ERROR(("Extended trap data available\n"));
+ }
+ }
+#ifdef BT_OVER_PCIE
+ if (data & D2H_DEV_TRAP_DUE_TO_BT) {
+ DHD_ERROR(("WLAN Firmware trapped due to BT\n"));
+ dhd->dongle_trap_due_to_bt = TRUE;
+ }
+#endif /* BT_OVER_PCIE */
+ return data;
+ }
+ return 0;
+}
+
+/** called when DHD needs to check for 'ioctl complete' messages from the dongle */
+int
+BCMFASTPATH(dhd_prot_process_ctrlbuf)(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
+ unsigned long flags;
+
+ /* Process all the messages - DTOH direction */
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
+
+ if (dhd_query_bus_erros(dhd)) {
+ break;
+ }
+
+ if (dhd->hang_was_sent) {
+ break;
+ }
+
+ if (dhd->smmu_fault_occurred) {
+ break;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ if (msg_addr == NULL) {
+ break;
+ }
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+ __FUNCTION__, ring->name, msg_addr, msg_len));
+ }
+
+ /* Write to dngl rd ptr */
+ dhd_prot_upd_read_idx(dhd, ring);
+ }
+
+ return 0;
+}
+
+/**
+ * Consume messages out of the D2H ring. Ensure that the message's DMA to host
+ * memory has completed, before invoking the message handler via a table lookup
+ * of the cmn_msg_hdr::msg_type.
+ */
+static int
+BCMFASTPATH(dhd_prot_process_msgtype)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
+{
+ uint32 buf_len = len;
+ uint16 item_len;
+ uint8 msg_type;
+ cmn_msg_hdr_t *msg = NULL;
+ int ret = BCME_OK;
+
+ ASSERT(ring);
+ item_len = ring->item_len;
+ if (item_len == 0) {
+ DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
+ __FUNCTION__, ring->idx, item_len, buf_len));
+ return BCME_ERROR;
+ }
+
+ while (buf_len > 0) {
+ if (dhd->hang_was_sent) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ if (dhd->smmu_fault_occurred) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ msg = (cmn_msg_hdr_t *)buf;
+
+ /* Wait until DMA completes, then fetch msg_type */
+ msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
+
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(buf + item_len);
+
+ DHD_MSGBUF_INFO(("msg_type %d item_len %d buf_len %d\n",
+ msg_type, item_len, buf_len));
+
+ if (msg_type == MSG_TYPE_LOOPBACK) {
+ bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
+ DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
+ }
+
+ ASSERT(msg_type < DHD_PROT_FUNCS);
+ if (msg_type >= DHD_PROT_FUNCS) {
+ DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
+ __FUNCTION__, msg_type, item_len, buf_len));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+#if !defined(BCM_ROUTER_DHD)
+ if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
+ if (ring == dhd->prot->d2hring_info_cpln) {
+ if (!dhd->prot->infobufpost) {
+ DHD_ERROR(("infobuf posted are zero,"
+ "but there is a completion\n"));
+ goto done;
+ }
+ dhd->prot->infobufpost--;
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
+ dhd_prot_process_infobuf_complete(dhd, buf);
+ }
+#ifdef BTLOG
+ else if (ring == dhd->prot->d2hring_btlog_cpln) {
+ info_buf_resp_t *resp = (info_buf_resp_t *)buf;
+
+ if (!dhd->prot->btlogbufpost) {
+ DHD_ERROR(("btlogbuf posted are zero,"
+ "but there is a completion\n"));
+ goto done;
+ }
+
+ dhd->prot->btlogbufpost--;
+ if (resp->compl_hdr.status != BCMPCIE_PKT_FLUSH) {
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
+ }
+ dhd_prot_process_btlog_complete(dhd, buf);
+ }
+#endif /* BTLOG */
+ } else
+#endif /* !defined(BCM_ROUTER_DHD) */
+ if (table_lookup[msg_type]) {
+ table_lookup[msg_type](dhd, buf);
+ }
+
+ if (buf_len < item_len) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+ buf_len = buf_len - item_len;
+ buf = buf + item_len;
+ }
+
+done:
+
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_commit(dhd);
+#endif
+
+ return ret;
+} /* dhd_prot_process_msgtype */
+
+static void
+dhd_prot_noop(dhd_pub_t *dhd, void *msg)
+{
+ return;
+}
+
+/** called on MSG_TYPE_RING_STATUS message received from dongle */
+static void
+dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
+{
+ pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
+ uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
+ uint16 status = ltoh16(ring_status->compl_hdr.status);
+ uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
+
+ DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
+ request_id, status, ring_id, ltoh16(ring_status->write_idx)));
+
+ if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
+ return;
+ if (status == BCMPCIE_BAD_PHASE) {
+ /* bad phase report from */
+ /* XXX: if the request is ioctl request finish the ioctl, rather than timing out */
+ DHD_ERROR(("Bad phase\n"));
+ }
+ if (status != BCMPCIE_BADOPTION)
+ return;
+
+ if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
+ /* XXX: see if the debug ring create is pending */
+ if (dhd->prot->h2dring_info_subn != NULL) {
+ if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for info ring\n"));
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
+ /* XXX: see if the debug ring create is pending */
+ if (dhd->prot->d2hring_info_cpln != NULL) {
+ if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for info ring\n"));
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for info ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#ifdef BTLOG
+ else if (request_id == DHD_H2D_BTLOGRING_REQ_PKTID) {
+ /* XXX: see if the debug ring create is pending */
+ if (dhd->prot->h2dring_btlog_subn != NULL) {
+ if (dhd->prot->h2dring_btlog_subn->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for btlog ring\n"));
+ dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s btlog submit ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_BTLOGRING_REQ_PKTID) {
+ /* XXX: see if the debug ring create is pending */
+ if (dhd->prot->d2hring_btlog_cpln != NULL) {
+ if (dhd->prot->d2hring_btlog_cpln->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for btlog ring\n"));
+ dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for btlog ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s btlog cpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#endif /* BTLOG */
+#ifdef DHD_HP2P
+ else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ /* XXX: see if the HPP txcmpl ring create is pending */
+ if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ /* XXX: see if the hp2p rxcmpl ring create is pending */
+ if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#endif /* DHD_HP2P */
+ else {
+ DHD_ERROR(("don;t know how to pair with original request\n"));
+ }
+ /* How do we track this to pair it with ??? */
+ return;
+}
+
+/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
+static void
+dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
+{
+ pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
+ DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
+ gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
+ gen_status->compl_hdr.flow_ring_id));
+
+ /* How do we track this to pair it with ??? */
+ return;
+}
+
+/**
+ * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
+ * dongle received the ioctl message in dongle memory.
+ */
+static void
+dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
+{
+ ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
+ unsigned long flags;
+#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
+ uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
+#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
+
+#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
+ /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
+ if (pktid != DHD_IOCTL_REQ_PKTID) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#else
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
+ DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ }
+#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
+
+ dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
+ (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
+ dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
+ } else {
+ DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
+ __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+ prhex("dhd_prot_ioctack_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ }
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+ ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
+ ioct_ack->compl_hdr.flow_ring_id));
+ if (ioct_ack->compl_hdr.status != 0) {
+ DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
+ /* FIXME: should we fail the pending IOCTL compelteion wait process... */
+ }
+#ifdef REPORT_FATAL_TIMEOUTS
+ else {
+ dhd_stop_bus_timer(dhd);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+}
+
+/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
+static void
+dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint32 pkt_id, xt_id;
+ ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
+ void *pkt;
+ unsigned long flags;
+ dhd_dma_buf_t retbuf;
+#ifdef REPORT_FATAL_TIMEOUTS
+ uint16 dhd_xt_id;
+#endif
+
+ /* Check for ioctl timeout induce flag, which is set by firing
+ * dhd iovar to induce IOCTL timeout. If flag is set,
+ * return from here, which results in to IOCTL timeout.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
+ DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
+ return;
+ }
+
+ memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
+
+ pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
+
+#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
+#ifndef IOCTLRESP_USE_CONSTMEM
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#else
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
+ !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
+ DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
+ __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+ prhex("dhd_prot_ioctcmplt_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return;
+ }
+
+ dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
+
+ /* Clear Response pending bit */
+ prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifndef IOCTLRESP_USE_CONSTMEM
+ pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
+#else
+ dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
+ pkt = retbuf.va;
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ if (!pkt) {
+ DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
+ prhex("dhd_prot_ioctcmplt_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ return;
+ }
+
+ prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
+ prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
+ xt_id = ltoh16(ioct_resp->trans_id);
+
+ if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
+ DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
+ __FUNCTION__, xt_id, prot->ioctl_trans_id,
+ prot->curr_ioctl_cmd, ioct_resp->cmd));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_stop_cmd_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+ dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
+ dhd_prot_debug_info_print(dhd);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ dhd_schedule_reset(dhd);
+ goto exit;
+ }
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_xt_id = dhd_get_request_id(dhd);
+ if (xt_id == dhd_xt_id) {
+ dhd_stop_cmd_timer(dhd);
+ } else {
+ DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
+ __FUNCTION__, xt_id, dhd_xt_id));
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+ DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
+ pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
+
+ if (prot->ioctl_resplen > 0) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
+#else
+ bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ }
+
+ /* wake up any dhd_os_ioctl_resp_wait() */
+ dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
+
+exit:
+#ifndef IOCTLRESP_USE_CONSTMEM
+ dhd_prot_packet_free(dhd, pkt,
+ PKTTYPE_IOCTL_RX, FALSE);
+#else
+ free_ioctl_return_buffer(dhd, &retbuf);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+
+ /* Post another ioctl buf to the device */
+ if (prot->cur_ioctlresp_bufs_posted > 0) {
+ prot->cur_ioctlresp_bufs_posted--;
+ }
+
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+}
+
+int
+dhd_prot_check_tx_resource(dhd_pub_t *dhd)
+{
+ return dhd->prot->no_tx_resource;
+}
+
+#ifdef DHD_PKTTS
+/**
+ * dhd_msgbuf_get_ip_info - this api finds following (ipv4 and ipv6 are supported)
+ * 1. pointer to data portion of pkt
+ * 2. five tuple checksum of pkt
+ * = {scr_ip, dst_ip, src_port, dst_port, proto}
+ * 3. ip_prec
+ *
+ * @dhdp: pointer to dhd_pub object
+ * @pkt: packet pointer
+ * @ptr: retuns pointer to data portion of pkt
+ * @chksum: returns five tuple checksum of pkt
+ * @prec: returns ip precedence
+ * @tcp_seqno: returns tcp sequnce number
+ *
+ * returns packet length remaining after tcp/udp header or BCME_ERROR.
+ */
+static int
+dhd_msgbuf_get_ip_info(dhd_pub_t *dhdp, void *pkt, void **ptr, uint32 *chksum,
+ uint32 *prec, uint32 *tcp_seqno, uint32 *tcp_ackno)
+{
+ char *pdata;
+ uint plen;
+ uint32 type, len;
+ uint32 checksum = 0;
+ uint8 dscp_prio = 0;
+ struct bcmtcp_hdr *tcp = NULL;
+
+ pdata = PKTDATA(dhdp->osh, pkt);
+ plen = PKTLEN(dhdp->osh, pkt);
+
+ /* Ethernet header */
+ if (plen < ETHER_HDR_LEN) {
+ return BCME_ERROR;
+ }
+ type = ntoh16(((struct ether_header *)pdata)->ether_type);
+ pdata += ETHER_HDR_LEN;
+ plen -= ETHER_HDR_LEN;
+
+ if ((type == ETHER_TYPE_IP) ||
+ (type == ETHER_TYPE_IPV6)) {
+ dscp_prio = (IP_TOS46(pdata) >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* IP header (v4 or v6) */
+ if (type == ETHER_TYPE_IP) {
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)pdata;
+ if (plen <= sizeof(*iph)) {
+ return BCME_ERROR;
+ }
+
+ len = IPV4_HLEN(iph);
+ if (plen <= len || IP_VER(iph) != IP_VER_4 || len < IPV4_MIN_HEADER_LEN) {
+ return BCME_ERROR;
+ }
+
+ type = IPV4_PROT(iph);
+ pdata += len;
+ plen -= len;
+
+ checksum ^= bcm_compute_xor32((volatile uint32 *)iph->src_ip,
+ sizeof(iph->src_ip) / sizeof(uint32));
+ checksum ^= bcm_compute_xor32((volatile uint32 *)iph->dst_ip,
+ sizeof(iph->dst_ip) / sizeof(uint32));
+ } else if (type == ETHER_TYPE_IPV6) {
+ struct ipv6_hdr *ip6h = (struct ipv6_hdr *)pdata;
+
+ if (plen <= IPV6_MIN_HLEN || IP_VER(ip6h) != IP_VER_6) {
+ return BCME_ERROR;
+ }
+
+ type = IPV6_PROT(ip6h);
+ pdata += IPV6_MIN_HLEN;
+ plen -= IPV6_MIN_HLEN;
+ if (IPV6_EXTHDR(type)) {
+ uint8 proto = 0;
+ int32 exth_len = ipv6_exthdr_len(pdata, &proto);
+ if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
+ return BCME_ERROR;
+ }
+ type = proto;
+ pdata += exth_len;
+ plen -= exth_len;
+ }
+
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->saddr,
+ sizeof(ip6h->saddr) / sizeof(uint32));
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->daddr,
+ sizeof(ip6h->saddr) / sizeof(uint32));
+ }
+
+ /* return error if not TCP or UDP */
+ if ((type != IP_PROT_UDP) && (type != IP_PROT_TCP)) {
+ return BCME_ERROR;
+ }
+
+ /* src_port and dst_port (together 32bit) */
+ checksum ^= bcm_compute_xor32((volatile uint32 *)pdata, 1);
+ checksum ^= bcm_compute_xor32((volatile uint32 *)&type, 1);
+
+ if (type == IP_PROT_TCP) {
+ tcp = (struct bcmtcp_hdr *)pdata;
+ len = TCP_HDRLEN(pdata[TCP_HLEN_OFFSET]) << 2;
+ } else { /* IP_PROT_UDP */
+ len = sizeof(struct bcmudp_hdr);
+ }
+
+ /* length check */
+ if (plen < len) {
+ return BCME_ERROR;
+ }
+
+ pdata += len;
+ plen -= len;
+
+ /* update data[0] */
+ *ptr = (void *)pdata;
+
+ /* update fivetuple checksum */
+ *chksum = checksum;
+
+ /* update ip prec */
+ *prec = dscp_prio;
+
+ /* update tcp sequence number */
+ if (tcp != NULL) {
+ *tcp_seqno = tcp->seq_num;
+ *tcp_ackno = tcp->ack_num;
+ }
+
+ return plen;
+}
+
+/**
+ * dhd_msgbuf_send_msg_tx_ts - send pktts tx timestamp to netlnik socket
+ *
+ * @dhdp: pointer to dhd_pub object
+ * @pkt: packet pointer
+ * @fwts: firmware timestamp {fwt1..fwt4}
+ * @version: pktlat version supported in firmware
+ */
+static void
+dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhdp, void *pkt, void *fw_ts, uint16 version)
+{
+ bcm_to_info_tx_ts_t to_tx_info;
+ void *ptr = NULL;
+ int dlen = 0;
+ uint32 checksum = 0;
+ uint32 prec = 0;
+ pktts_flow_t *flow = NULL;
+ uint32 flow_pkt_offset = 0;
+ uint32 num_config = 0;
+ uint32 tcp_seqno = 0;
+ uint32 tcp_ackno = 0;
+
+ dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
+
+ flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
+ if (flow) {
+ /* there is valid config for this chksum */
+ flow_pkt_offset = flow->pkt_offset;
+ } else if (num_config) {
+ /* there is valid config + no matching config for this chksum */
+ return;
+ } else {
+ /* there is no valid config. pass all to netlink */
+ }
+
+ memset(&to_tx_info, 0, sizeof(to_tx_info));
+ to_tx_info.hdr.type = BCM_TS_TX;
+ to_tx_info.hdr.flowid = checksum;
+ to_tx_info.hdr.prec = prec;
+
+ /* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
+ if (!flow && tcp_seqno) {
+ uint32 *xbytes = (uint32 *)to_tx_info.hdr.xbytes;
+
+ (void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
+ ((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
+ (void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
+ ((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
+ } else if ((dlen > flow_pkt_offset) &&
+ ((dlen - flow_pkt_offset) >= sizeof(to_tx_info.hdr.xbytes))) {
+ (void)memcpy_s(to_tx_info.hdr.xbytes, sizeof(to_tx_info.hdr.xbytes),
+ ((uint8 *)ptr + flow_pkt_offset), sizeof(to_tx_info.hdr.xbytes));
+ }
+
+ to_tx_info.dhdt0 = DHD_PKT_GET_QTIME(pkt);
+ to_tx_info.dhdt5 = OSL_SYSUPTIME_US();
+
+ if (version == METADATA_VER_1) {
+ struct pktts_fwtx_v1 *fwts = (struct pktts_fwtx_v1 *)fw_ts;
+
+ to_tx_info.hdr.magic = BCM_TS_MAGIC;
+
+ to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
+ to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
+ to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
+ to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
+
+ dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, OFFSETOF(bcm_to_info_tx_ts_t, ucts));
+ } else if (version == METADATA_VER_2) {
+ struct pktts_fwtx_v2 *fwts = (struct pktts_fwtx_v2 *)fw_ts;
+
+ to_tx_info.hdr.magic = BCM_TS_MAGIC_V2;
+
+ to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
+ to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
+ to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
+ to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
+
+ to_tx_info.ucts[0] = ntohl(fwts->ut[0]);
+ to_tx_info.ucts[1] = ntohl(fwts->ut[1]);
+ to_tx_info.ucts[2] = ntohl(fwts->ut[2]);
+ to_tx_info.ucts[3] = ntohl(fwts->ut[3]);
+ to_tx_info.ucts[4] = ntohl(fwts->ut[4]);
+
+ to_tx_info.uccnt[0] = ntohl(fwts->uc[0]);
+ to_tx_info.uccnt[1] = ntohl(fwts->uc[1]);
+ to_tx_info.uccnt[2] = ntohl(fwts->uc[2]);
+ to_tx_info.uccnt[3] = ntohl(fwts->uc[3]);
+ to_tx_info.uccnt[4] = ntohl(fwts->uc[4]);
+ to_tx_info.uccnt[5] = ntohl(fwts->uc[5]);
+ to_tx_info.uccnt[6] = ntohl(fwts->uc[6]);
+ to_tx_info.uccnt[7] = ntohl(fwts->uc[7]);
+
+ dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, sizeof(to_tx_info));
+ }
+ return;
+}
+
+/**
+ * dhd_msgbuf_send_msg_dx_ts - send pktts rx timestamp to netlnik socket
+ *
+ * @dhdp: pointer to dhd_pub object
+ * @pkt: packet pointer
+ * @fwr1: firmware timestamp at probe point 1
+ * @fwr2: firmware timestamp at probe point 2
+ */
+static void
+dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhdp, void *pkt, uint fwr1, uint fwr2)
+{
+ bcm_to_info_rx_ts_t to_rx_info;
+ void *ptr = NULL;
+ int dlen = 0;
+ uint32 checksum = 0;
+ uint32 prec = 0;
+ pktts_flow_t *flow = NULL;
+ uint32 flow_pkt_offset = 0;
+ uint32 num_config = 0;
+ uint32 tcp_seqno = 0;
+ uint32 tcp_ackno = 0;
+
+ dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
+
+ flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
+ if (flow) {
+ /* there is valid config for this chksum */
+ flow_pkt_offset = flow->pkt_offset;
+ } else if (num_config) {
+ /* there is valid config + no matching config for this chksum */
+ return;
+ } else {
+ /* there is no valid config. pass all to netlink */
+ }
+
+ memset(&to_rx_info, 0, sizeof(to_rx_info));
+ to_rx_info.hdr.magic = BCM_TS_MAGIC;
+ to_rx_info.hdr.type = BCM_TS_RX;
+ to_rx_info.hdr.flowid = checksum;
+ to_rx_info.hdr.prec = prec;
+
+ /* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
+ if (!flow && tcp_seqno) {
+ uint32 *xbytes = (uint32 *)to_rx_info.hdr.xbytes;
+
+ (void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
+ ((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
+ (void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
+ ((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
+ } else if ((dlen > flow_pkt_offset) &&
+ ((dlen - flow_pkt_offset) >= sizeof(to_rx_info.hdr.xbytes))) {
+ (void)memcpy_s(to_rx_info.hdr.xbytes, sizeof(to_rx_info.hdr.xbytes),
+ ((uint8 *)ptr + flow_pkt_offset), sizeof(to_rx_info.hdr.xbytes));
+ }
+
+ to_rx_info.dhdr3 = OSL_SYSUPTIME_US();
+
+ to_rx_info.fwts[0] = ntohl(fwr1);
+ to_rx_info.fwts[1] = ntohl(fwr2);
+
+ dhd_send_msg_to_ts(NULL, (void *)&to_rx_info, sizeof(to_rx_info));
+ return;
+}
+#endif /* DHD_PKTTS */
+
+/** called on MSG_TYPE_TX_STATUS message received from dongle */
+static void
+BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
+{
+ dhd_prot_t *prot = dhd->prot;
+ host_txbuf_cmpl_t * txstatus;
+ unsigned long flags;
+ uint32 pktid;
+ void *pkt;
+ dmaaddr_t pa;
+ uint32 len;
+ void *dmah;
+ void *secdma;
+ bool pkt_fate;
+ msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+#if defined(TX_STATUS_LATENCY_STATS)
+ flow_info_t *flow_info;
+ uint64 tx_status_latency;
+#endif /* TX_STATUS_LATENCY_STATS */
+#ifdef AGG_H2D_DB
+ msgbuf_ring_t *flow_ring;
+#endif /* AGG_H2D_DB */
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ dhd_awdl_stats_t *awdl_stats;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long awdl_stats_lock_flags;
+ uint8 ifindex;
+ uint8 role;
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+ flow_ring_node_t *flow_ring_node;
+ uint16 flowid;
+#ifdef DHD_PKTTS
+ struct metadata_txcmpl_v1 meta_ts_v1;
+ struct metadata_txcmpl_v2 meta_ts_v2;
+ dhd_dma_buf_t meta_data_buf;
+ uint64 addr = 0;
+
+ BCM_REFERENCE(meta_ts_v1);
+ BCM_REFERENCE(meta_ts_v2);
+ BCM_REFERENCE(meta_data_buf);
+ BCM_REFERENCE(addr);
+
+ if ((dhd->memdump_type == DUMP_TYPE_PKTID_AUDIT_FAILURE) ||
+ (dhd->memdump_type == DUMP_TYPE_PKTID_INVALID)) {
+ DHD_ERROR_RLMT(("%s: return as invalid pktid detected\n", __FUNCTION__));
+ return;
+ }
+
+ memset(&meta_ts_v1, 0, sizeof(meta_ts_v1));
+ memset(&meta_ts_v2, 0, sizeof(meta_ts_v2));
+ memset(&meta_data_buf, 0, sizeof(meta_data_buf));
+#endif /* DHD_PKTTS */
+ txstatus = (host_txbuf_cmpl_t *)msg;
+
+ flowid = txstatus->compl_hdr.flow_ring_id;
+ flow_ring_node = DHD_FLOW_RING(dhd, flowid);
+#ifdef AGG_H2D_DB
+ flow_ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+ OSL_ATOMIC_DEC(dhd->osh, &flow_ring->inflight);
+#endif /* AGG_H2D_DB */
+
+ BCM_REFERENCE(flow_ring_node);
+
+#ifdef DEVICE_TX_STUCK_DETECT
+ /**
+ * Since we got a completion message on this flowid,
+ * update tx_cmpl time stamp
+ */
+ flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
+ /* update host copy of rd pointer */
+#ifdef DHD_HP2P
+ if (dhd->prot->d2hring_hp2p_txcpl &&
+ flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ ring = dhd->prot->d2hring_hp2p_txcpl;
+ }
+#endif /* DHD_HP2P */
+ ring->curr_rd++;
+ if (ring->curr_rd >= ring->max_items) {
+ ring->curr_rd = 0;
+ }
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+ /* locks required to protect circular buffer accesses */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ pktid = ltoh32(txstatus->cmn_hdr.request_id);
+
+ if (dhd->pcie_txs_metadata_enable > 1) {
+ /* Return metadata format (little endian):
+ * |<--- txstatus --->|<- metadatalen ->|
+ * |____|____|________|________|________|
+ * | | | | |> total delay from fetch to report (8-bit 1 = 4ms)
+ * | | | |> ucode delay from enqueue to completion (8-bit 1 = 4ms)
+ * | | |> 8-bit reserved (pre-filled with original TX status by caller)
+ * | |> delay time first fetch to the last fetch (4-bit 1 = 32ms)
+ * |> fetch count (4-bit)
+ */
+ printf("TX status[%d] = %04x-%04x -> status = %d (%d/%dms + %d/%dms)\n", pktid,
+ ltoh16(txstatus->tx_status_ext), ltoh16(txstatus->tx_status),
+ (txstatus->tx_status & WLFC_CTL_PKTFLAG_MASK),
+ ((txstatus->tx_status >> 12) & 0xf),
+ ((txstatus->tx_status >> 8) & 0xf) * 32,
+ ((txstatus->tx_status_ext & 0xff) * 4),
+ ((txstatus->tx_status_ext >> 8) & 0xff) * 4);
+ }
+ pkt_fate = TRUE;
+
+#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
+ DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
+
+ DHD_MSGBUF_INFO(("txstatus for pktid 0x%04x\n", pktid));
+ if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
+ DHD_ERROR(("Extra packets are freed\n"));
+ }
+ ASSERT(pktid != 0);
+
+#ifdef DHD_HMAPTEST
+
+ if ((dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_POSTED) &&
+ (pktid == dhd->prot->hmaptest_tx_pktid)) {
+ DHD_ERROR(("hmaptest: d11read txcpl received sc txbuf pktid=0x%08x\n", pktid));
+ DHD_ERROR(("hmaptest: d11read txcpl txstatus=0x%08x\n", txstatus->tx_status));
+ DHD_ERROR(("hmaptest: d11read txcpl sc txbuf va=0x%p pa=0x%08x\n",
+ dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(dhd->prot->hmap_tx_buf_pa)));
+ dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
+ dhd->prot->hmap_tx_buf_va = NULL;
+ dhd->prot->hmap_tx_buf_len = 0;
+ PHYSADDRHISET(dhd->prot->hmap_tx_buf_pa, 0);
+ PHYSADDRLOSET(dhd->prot->hmap_tx_buf_pa, 0);
+ prot->hmaptest.in_progress = FALSE;
+ }
+ /* original skb is kept as it is because its going to be freed later in this path */
+#endif /* DHD_HMAPTEST */
+
+#ifdef DHD_PKTTS
+ if (dhd_get_pktts_enab(dhd) &&
+ dhd->pkt_metadata_buflen) {
+ /* Handle the Metadata first */
+ meta_data_buf.va = DHD_PKTID_RETREIVE_METADATA(dhd, dhd->prot->pktid_tx_map,
+ meta_data_buf.pa, meta_data_buf._alloced, meta_data_buf.dmah, pktid);
+ if (meta_data_buf.va) {
+ if (dhd->pkt_metadata_version == METADATA_VER_1) {
+ memcpy(&meta_ts_v1, meta_data_buf.va, sizeof(meta_ts_v1));
+ } else if (dhd->pkt_metadata_version == METADATA_VER_2) {
+ memcpy(&meta_ts_v2, meta_data_buf.va, sizeof(meta_ts_v2));
+ }
+ memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
+ DHD_TRACE(("%s(): pktid %d retrieved mdata buffer %p "
+ "pa: %llx dmah: %p\r\n", __FUNCTION__,
+ pktid, meta_data_buf.va, addr,
+ meta_data_buf.dmah));
+ }
+ }
+#endif /* DHD_PKTTS */
+
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+ pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+ if (!pkt) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef DHD_PKTTS
+ /*
+ * Call the free function after the Ring Lock is released.
+ * This is becuase pcie_free_consistent is not supposed to be
+ * called with Interrupts Disabled
+ */
+ if (meta_data_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
+ meta_data_buf.pa, meta_data_buf.dmah);
+ }
+#endif /* DHD_PKTTS */
+ DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
+ prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return;
+ }
+
+ if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
+ DHD_ERROR_RLMT(("%s: start tx queue as min pktids are available\n",
+ __FUNCTION__));
+ prot->pktid_txq_stop_cnt--;
+ dhd->prot->no_tx_resource = FALSE;
+ dhd_bus_start_queue(dhd->bus);
+ }
+
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+
+#ifdef TX_STATUS_LATENCY_STATS
+ /* update the tx status latency for flowid */
+ flow_info = &flow_ring_node->flow_info;
+ tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ if (dhd->pkt_latency > 0 &&
+ tx_status_latency > (dhd->pkt_latency)) {
+ DHD_ERROR(("Latency: %llu > %u aw_cnt: %u \n",
+ tx_status_latency, dhd->pkt_latency,
+ dhd->awdl_aw_counter));
+ }
+#endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
+ flow_info->cum_tx_status_latency += tx_status_latency;
+ flow_info->num_tx_status++;
+#endif /* TX_STATUS_LATENCY_STATS */
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ /* update the tx status latency when this AWDL slot is active */
+ if_flow_lkup = (if_flow_lkup_t *)dhd->if_flow_lkup;
+ ifindex = flow_ring_node->flow_info.ifindex;
+ role = if_flow_lkup[ifindex].role;
+ if (role == WLC_E_IF_ROLE_AWDL) {
+ awdl_stats = &dhd->awdl_stats[dhd->awdl_tx_status_slot];
+ DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
+ awdl_stats->cum_tx_status_latency += tx_status_latency;
+ awdl_stats->num_tx_status++;
+ DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
+ }
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+
+#ifdef HOST_SFH_LLC
+ if (dhd->host_sfhllc_supported) {
+ struct ether_header eth;
+ if (!memcpy_s(&eth, sizeof(eth),
+ PKTDATA(dhd->osh, pkt), sizeof(eth))) {
+ if (dhd_8023_llc_to_ether_hdr(dhd->osh,
+ &eth, pkt) != BCME_OK) {
+ DHD_ERROR_RLMT(("%s: host sfh llc"
+ " converstion to ether failed\n",
+ __FUNCTION__));
+ }
+ }
+ }
+#endif /* HOST_SFH_LLC */
+
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.txdata--;
+ dhd->dma_stats.txdata_sz -= len;
+#endif /* DMAMAP_STATS */
+ pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
+ ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
+#ifdef DHD_PKT_LOGGING
+ if (dhd->d11_tx_status) {
+ uint16 status = ltoh16(txstatus->compl_hdr.status) &
+ WLFC_CTL_PKTFLAG_MASK;
+ dhd_handle_pktdata(dhd, ltoh32(txstatus->cmn_hdr.if_id),
+ pkt, (uint8 *)PKTDATA(dhd->osh, pkt), pktid, len,
+ &status, NULL, TRUE, FALSE, TRUE);
+ }
+#endif /* DHD_PKT_LOGGING */
+#if defined(BCMPCIE) && (defined(LINUX) || defined(OEM_ANDROID) || defined(DHD_EFI))
+ dhd_txcomplete(dhd, pkt, pkt_fate);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#endif /* BCMPCIE && (defined(LINUX) || defined(OEM_ANDROID)) */
+
+#ifdef DHD_PKTTS
+ if (dhd_get_pktts_enab(dhd) == TRUE) {
+ if (dhd->pkt_metadata_buflen) {
+ /* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
+ if ((dhd->pkt_metadata_version == METADATA_VER_1) &&
+ (ltoh32(meta_ts_v1.tref) != 0xFFFFFFFF)) {
+ struct pktts_fwtx_v1 fwts;
+ fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v1.tref));
+ fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
+ ltoh16(meta_ts_v1.d_t2));
+ fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
+ ltoh16(meta_ts_v1.d_t3));
+ fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
+ ltoh16(meta_ts_v1.d_t4));
+ /* check for overflow */
+ if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
+ /* send tx timestamp to netlink socket */
+ dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
+ dhd->pkt_metadata_version);
+ }
+ } else if ((dhd->pkt_metadata_version == METADATA_VER_2) &&
+ (ltoh32(meta_ts_v2.tref) != 0xFFFFFFFF)) {
+ struct pktts_fwtx_v2 fwts;
+ fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref));
+ fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
+ ltoh16(meta_ts_v2.d_t2));
+ fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
+ ltoh16(meta_ts_v2.d_t3));
+ fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
+ ltoh16(meta_ts_v2.d_t4));
+
+ fwts.ut[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
+ ltoh16(meta_ts_v2.u_t1));
+ fwts.ut[1] = (uint32)htonl(ltoh16(meta_ts_v2.u_t2));
+ fwts.ut[2] = (uint32)htonl(ltoh16(meta_ts_v2.u_t3));
+ fwts.ut[3] = (uint32)htonl(ltoh16(meta_ts_v2.u_t4));
+ fwts.ut[4] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
+ ltoh16(meta_ts_v2.u_t5));
+
+ fwts.uc[0] = (uint32)htonl(ltoh32(meta_ts_v2.u_c1));
+ fwts.uc[1] = (uint32)htonl(ltoh32(meta_ts_v2.u_c2));
+ fwts.uc[2] = (uint32)htonl(ltoh32(meta_ts_v2.u_c3));
+ fwts.uc[3] = (uint32)htonl(ltoh32(meta_ts_v2.u_c4));
+ fwts.uc[4] = (uint32)htonl(ltoh32(meta_ts_v2.u_c5));
+ fwts.uc[5] = (uint32)htonl(ltoh32(meta_ts_v2.u_c6));
+ fwts.uc[6] = (uint32)htonl(ltoh32(meta_ts_v2.u_c7));
+ fwts.uc[7] = (uint32)htonl(ltoh32(meta_ts_v2.u_c8));
+
+ DHD_INFO(("uct1:%x uct2:%x uct3:%x uct4:%x uct5:%x\n",
+ ntohl(fwts.ut[0]), ntohl(fwts.ut[1]), ntohl(fwts.ut[2]),
+ ntohl(fwts.ut[3]), ntohl(fwts.ut[4])));
+ DHD_INFO(("ucc1:%x ucc2:%x ucc3:%x ucc4:%x"
+ " ucc5:%x ucc6:%x ucc7:%x ucc8:%x\n",
+ ntohl(fwts.uc[0]), ntohl(fwts.uc[1]), ntohl(fwts.uc[2]),
+ ntohl(fwts.uc[3]), ntohl(fwts.uc[4]), ntohl(fwts.uc[5]),
+ ntohl(fwts.uc[6]), ntohl(fwts.uc[7])));
+ /* check for overflow */
+ if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
+ /* send tx timestamp to netlink socket */
+ dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
+ dhd->pkt_metadata_version);
+ }
+ }
+ } else {
+ /* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
+ if (ltoh32(txstatus->tx_pktts.tref) != 0xFFFFFFFF) {
+ struct pktts_fwtx_v1 fwts;
+
+ fwts.ts[0] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref));
+ fwts.ts[1] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
+ ltoh16(txstatus->tx_pktts.d_t2));
+ fwts.ts[2] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
+ ltoh16(txstatus->tx_pktts.d_t3));
+ fwts.ts[3] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
+ ltoh16(txstatus->compl_hdr.tx_pktts.d_t4));
+
+ /* check for overflow */
+ if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
+ /* send tx timestamp to netlnik socket */
+ dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, METADATA_VER_1);
+ }
+ }
+ }
+ }
+#endif /* DHD_PKTTS */
+
+#if DHD_DBG_SHOW_METADATA
+ if (dhd->prot->metadata_dbg &&
+ dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+ uchar *ptr;
+ /* The Ethernet header of TX frame was copied and removed.
+ * Here, move the data pointer forward by Ethernet header size.
+ */
+ PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+ ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
+ bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
+ }
+#endif /* DHD_DBG_SHOW_METADATA */
+
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_txstats(dhd, txstatus);
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_TIMESYNC
+ if (dhd->prot->tx_ts_log_enabled) {
+ dhd_pkt_parse_t parse;
+ ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
+
+ memset(&parse, 0, sizeof(parse));
+ dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
+
+ if (parse.proto == IP_PROT_ICMP)
+ dhd_timesync_log_tx_timestamp(dhd->ts,
+ txstatus->compl_hdr.flow_ring_id,
+ txstatus->cmn_hdr.if_id,
+ ts->low, ts->high, &parse);
+ }
+#endif /* DHD_TIMESYNC */
+
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif
+ DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
+ txstatus->tx_status);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef DHD_PKTTS
+ if (meta_data_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
+ meta_data_buf.pa, meta_data_buf.dmah);
+ }
+#endif /* DHD_PKTTS */
+#ifdef DHD_MEM_STATS
+ DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags);
+ DHD_MSGBUF_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
+ __FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt)));
+ dhd->txpath_mem -= PKTLEN(dhd->osh, pkt);
+ DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags);
+#endif /* DHD_MEM_STATS */
+ PKTFREE(dhd->osh, pkt, TRUE);
+
+ return;
+} /* dhd_prot_txstatus_process */
+
+/* FIXME: assuming that it is getting inline data related to the event data */
+/** called on MSG_TYPE_WL_EVENT message received from dongle */
+static void
+dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
+{
+ wlevent_req_msg_t *evnt;
+ uint32 bufid;
+ uint16 buflen;
+ int ifidx = 0;
+ void* pkt;
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Event complete header */
+ evnt = (wlevent_req_msg_t *)msg;
+ bufid = ltoh32(evnt->cmn_hdr.request_id);
+
+#if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
+
+ buflen = ltoh16(evnt->event_data_len);
+
+ ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
+ /* FIXME: check the event status */
+
+ /* Post another rxbuf to the device */
+ if (prot->cur_event_bufs_posted)
+ prot->cur_event_bufs_posted--;
+ dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+ pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
+
+ if (!pkt) {
+ DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
+ return;
+ }
+
+#if !defined(BCM_ROUTER_DHD)
+ /* FIXME: make sure the length is more than dataoffset */
+ /* DMA RX offset updated through shared area */
+ if (dhd->prot->rx_dataoffset)
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+#endif /* !BCM_ROUTER_DHD */
+
+ PKTSETLEN(dhd->osh, pkt, buflen);
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif
+ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+}
+
+#if !defined(BCM_ROUTER_DHD)
+/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
+static void
+BCMFASTPATH(dhd_prot_process_infobuf_complete)(dhd_pub_t *dhd, void* buf)
+{
+ info_buf_resp_t *resp;
+ uint32 pktid;
+ uint16 buflen;
+ void * pkt;
+
+ resp = (info_buf_resp_t *)buf;
+ pktid = ltoh32(resp->cmn_hdr.request_id);
+ buflen = ltoh16(resp->info_data_len);
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_MSGBUF_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
+ pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
+ dhd->prot->rx_dataoffset));
+
+ if (dhd->debug_buf_dest_support) {
+ if (resp->dest < DEBUG_BUF_DEST_MAX) {
+ dhd->debug_buf_dest_stat[resp->dest]++;
+ }
+ }
+
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
+ if (!pkt)
+ return;
+
+#if !defined(BCM_ROUTER_DHD)
+ /* FIXME: make sure the length is more than dataoffset */
+ /* DMA RX offset updated through shared area */
+ if (dhd->prot->rx_dataoffset)
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+#endif /* !BCM_ROUTER_DHD */
+
+ PKTSETLEN(dhd->osh, pkt, buflen);
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif
+ /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
+ * special ifidx of -1. This is just internal to dhd to get the data to
+ * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
+ */
+ dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
+}
+#endif /* !BCM_ROUTER_DHD */
+
+/** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
+static void
+BCMFASTPATH(dhd_prot_process_snapshot_complete)(dhd_pub_t *dhd, void *buf)
+{
+#ifdef SNAPSHOT_UPLOAD
+ dhd_prot_t *prot = dhd->prot;
+ snapshot_resp_t *resp;
+ uint16 status;
+
+ resp = (snapshot_resp_t *)buf;
+
+ /* check completion status */
+ status = resp->compl_hdr.status;
+ if (status != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("%s: failed: %s (%d)\n",
+ __FUNCTION__,
+ status == BCMPCIE_BT_DMA_ERR ? "DMA_ERR" :
+ status == BCMPCIE_BT_DMA_DESCR_FETCH_ERR ?
+ "DMA_DESCR_ERR" :
+ status == BCMPCIE_SNAPSHOT_ERR ? "SNAPSHOT_ERR" :
+ status == BCMPCIE_NOT_READY ? "NOT_READY" :
+ status == BCMPCIE_INVALID_DATA ? "INVALID_DATA" :
+ status == BCMPCIE_NO_RESPONSE ? "NO_RESPONSE" :
+ status == BCMPCIE_NO_CLOCK ? "NO_CLOCK" :
+ "", status));
+ }
+
+ /* length may be truncated if error occurred */
+ prot->snapshot_upload_len = ltoh32(resp->resp_len);
+ prot->snapshot_type = resp->type;
+ prot->snapshot_cmpl_pending = FALSE;
+
+ DHD_INFO(("%s id 0x%04x, phase 0x%02x, resp_len %d, type %d\n",
+ __FUNCTION__, ltoh32(resp->cmn_hdr.request_id),
+ resp->cmn_hdr.flags,
+ prot->snapshot_upload_len, prot->snapshot_type));
+#endif /* SNAPSHOT_UPLOAD */
+}
+
+#ifdef BTLOG
+/** called on MSG_TYPE_BT_LOG_CMPLT message received from dongle */
+static void
+BCMFASTPATH(dhd_prot_process_btlog_complete)(dhd_pub_t *dhd, void* buf)
+{
+ info_buf_resp_t *resp;
+ uint32 pktid;
+ uint16 buflen;
+ void * pkt;
+
+ resp = (info_buf_resp_t *)buf;
+ pktid = ltoh32(resp->cmn_hdr.request_id);
+ buflen = ltoh16(resp->info_data_len);
+
+ /* check completion status */
+ if (resp->compl_hdr.status != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("%s: failed completion status %d\n",
+ __FUNCTION__, resp->compl_hdr.status));
+ return;
+ }
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
+ pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
+ dhd->prot->rx_dataoffset));
+
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
+
+ if (!pkt)
+ return;
+
+#if !defined(BCM_ROUTER_DHD)
+ /* FIXME: make sure the length is more than dataoffset */
+ /* DMA RX offset updated through shared area */
+ if (dhd->prot->rx_dataoffset)
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+#endif /* !BCM_ROUTER_DHD */
+
+ PKTSETLEN(dhd->osh, pkt, buflen);
+ PKTSETNEXT(dhd->osh, pkt, NULL);
+
+ dhd_bus_rx_bt_log(dhd->bus, pkt);
+}
+#endif /* BTLOG */
+
+/** Stop protocol: sync w/dongle state. */
+void dhd_prot_stop(dhd_pub_t *dhd)
+{
+ ASSERT(dhd);
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(NDIS)
+ if (dhd->prot) {
+ DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_ctrl_map);
+ DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_rx_map);
+ DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_tx_map);
+#if defined(IOCTLRESP_USE_CONSTMEM)
+ DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, dhd->prot->pktid_map_handle_ioctl);
+#endif /* DHD_PCIE_PKTID */
+ }
+#endif /* NDIS */
+}
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+void
+BCMFASTPATH(dhd_prot_hdrpush)(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+ return;
+}
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+ return 0;
+}
+
+#define PKTBUF pktbuf
+
+/**
+ * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
+ * the corresponding flow ring.
+ */
+int
+BCMFASTPATH(dhd_prot_txdata)(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
+{
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+ host_txbuf_post_t *txdesc = NULL;
+ dmaaddr_t pa, meta_pa;
+ uint8 *pktdata;
+ uint32 pktlen;
+ uint32 pktid;
+ uint8 prio;
+ uint16 flowid = 0;
+ uint16 alloced = 0;
+ uint16 headroom;
+ msgbuf_ring_t *ring;
+ flow_ring_table_t *flow_ring_table;
+ flow_ring_node_t *flow_ring_node;
+#if defined(BCMINTERNAL) && defined(LINUX)
+ void *pkt_to_free = NULL;
+#endif /* BCMINTERNAL && LINUX */
+#ifdef DHD_PKTTS
+ dhd_dma_buf_t meta_data_buf;
+ uint16 meta_data_buf_len = dhd->pkt_metadata_buflen;
+ uint64 addr = 0;
+#endif /* DHD_PKTTS */
+ void *big_pktbuf = NULL;
+ uint8 dhd_udr = FALSE;
+ bool host_sfh_llc_reqd = dhd->host_sfhllc_supported;
+ bool llc_inserted = FALSE;
+
+ BCM_REFERENCE(llc_inserted);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
+ DHD_ERROR(("failed to increment hostactive_devwake\n"));
+ return BCME_ERROR;
+ }
+#endif /* PCIE_INB_DW */
+
+ if (dhd->flow_ring_table == NULL) {
+ DHD_ERROR(("dhd flow_ring_table is NULL\n"));
+ goto fail;
+ }
+
+#ifdef DHD_PCIE_PKTID
+ if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
+ if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
+ DHD_ERROR(("%s: stop tx queue as pktid_depleted_cnt maxed\n",
+ __FUNCTION__));
+ prot->pktid_txq_stop_cnt++;
+ dhd_bus_stop_queue(dhd->bus);
+ dhd->prot->no_tx_resource = TRUE;
+ }
+ dhd->prot->pktid_depleted_cnt++;
+ goto fail;
+ } else {
+ dhd->prot->pktid_depleted_cnt = 0;
+ }
+#endif /* DHD_PCIE_PKTID */
+
+ if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT) {
+ if ((big_pktbuf = PKTGET(dhd->osh, DHD_FLOWRING_TX_BIG_PKT_SIZE, TRUE)) == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for txbuf failed\n", __FUNCTION__, __LINE__));
+ goto fail;
+ }
+
+ memset(PKTDATA(dhd->osh, big_pktbuf), 0xff, DHD_FLOWRING_TX_BIG_PKT_SIZE);
+ DHD_ERROR(("PKTBUF len = %d big_pktbuf len = %d\n", PKTLEN(dhd->osh, PKTBUF),
+ PKTLEN(dhd->osh, big_pktbuf)));
+ if (memcpy_s(PKTDATA(dhd->osh, big_pktbuf), DHD_FLOWRING_TX_BIG_PKT_SIZE,
+ PKTDATA(dhd->osh, PKTBUF), PKTLEN(dhd->osh, PKTBUF)) != BCME_OK) {
+ DHD_ERROR(("%s:%d: memcpy_s big_pktbuf failed\n", __FUNCTION__, __LINE__));
+ ASSERT(0);
+ }
+ }
+
+ flowid = DHD_PKT_GET_FLOWID(PKTBUF);
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+
+ ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+ /*
+ * XXX:
+ * JIRA SW4349-436:
+ * Copying the TX Buffer to an SKB that lives in the DMA Zone
+ * is done here. Previously this was done from dhd_stat_xmit
+ * On conditions where the Host is pumping heavy traffic to
+ * the dongle, we see that the Queue that is backing up the
+ * flow rings is getting full and holds the precious memory
+ * from DMA Zone, leading the host to run out of memory in DMA
+ * Zone. So after this change the back up queue would continue to
+ * hold the pointers from Network Stack, just before putting
+ * the PHY ADDR in the flow rings, we'll do the copy.
+ */
+#if defined(BCMINTERNAL) && defined(LINUX)
+ if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) {
+ struct sk_buff *skb;
+ /*
+ * We are about to add the Ethernet header and send out,
+ * copy the skb here.
+ */
+ skb = skb_copy(PKTBUF, GFP_DMA);
+ if (skb == NULL) {
+ /*
+ * Memory allocation failed, the old packet can
+ * live in the queue, return BCME_NORESOURCE so
+ * the caller re-queues this packet
+ */
+ DHD_ERROR(("%s: skb_copy(DMA) failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /*
+ * Now we have copied the SKB to GFP_DMA memory, make the
+ * rest of the code operate on this new SKB. Hold on to
+ * the original SKB. If we don't get the pkt id or flow ring
+ * space we'll free the Zone memory and return "no resource"
+ * so the caller would re-queue the original SKB.
+ */
+ pkt_to_free = PKTBUF;
+ PKTBUF = skb;
+ }
+#endif /* BCMINTERNAL && LINUX */
+
+ if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT && big_pktbuf) {
+ PKTFREE(dhd->osh, PKTBUF, TRUE);
+ PKTBUF = big_pktbuf;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Create a unique 32-bit packet id */
+ pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
+ PKTBUF, PKTTYPE_DATA_TX);
+#if defined(DHD_PCIE_PKTID)
+ if (pktid == DHD_PKTID_INVALID) {
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ /*
+ * If we return error here, the caller would queue the packet
+ * again. So we'll just free the skb allocated in DMA Zone.
+ * Since we have not freed the original SKB yet the caller would
+ * requeue the same.
+ */
+ goto err_no_res_pktfree;
+ }
+#endif /* DHD_PCIE_PKTID */
+
+ /* Reserve space in the circular buffer */
+ txdesc = (host_txbuf_post_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+ if (txdesc == NULL) {
+ DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
+ __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
+ goto err_free_pktid;
+ }
+ txdesc->flags = 0;
+
+ /* Extract the data pointer and length information */
+ pktdata = PKTDATA(dhd->osh, PKTBUF);
+ pktlen = PKTLEN(dhd->osh, PKTBUF);
+
+ /* TODO: XXX: re-look into dropped packets */
+ DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
+
+ dhd_handle_pktdata(dhd, ifidx, PKTBUF, pktdata, pktid,
+ pktlen, NULL, &dhd_udr, TRUE, FALSE, TRUE);
+
+#if defined(BCMINTERNAL) && defined(LINUX)
+ /*
+ * We have got all the resources, pktid and ring space
+ * so we can safely free the original SKB here.
+ */
+ if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
+ PKTCFREE(dhd->osh, pkt_to_free, FALSE);
+#endif /* BCMINTERNAL && LINUX */
+
+ /* Ethernet header - contains ethertype field
+ * Copy before we cache flush packet using DMA_MAP
+ */
+ bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
+
+#ifdef DHD_AWDL
+ /* the awdl ifidx will always have a non-zero value
+ * if the awdl iface is created. This is because the
+ * primary iface (usually eth1) will always have ifidx of 0.
+ * Hence we can check for non-zero value of awdl ifidx to
+ * see if awdl iface is created or not
+ */
+ if (dhd->awdl_llc_enabled &&
+ dhd->awdl_ifidx && ifidx == dhd->awdl_ifidx) {
+ if (host_sfh_llc_reqd) {
+ /* if FW supports host sfh llc insertion
+ * then BOTH sfh and llc needs to be inserted
+ * in which case the host LLC only path
+ * in FW will not be exercised - which is the
+ * objective of this feature. Hence in such a
+ * case disable awdl llc insertion
+ */
+ DHD_ERROR_RLMT(("%s: FW supports host sfh + llc, this is"
+ "is incompatible with awdl llc insertion"
+ " disable host sfh llc support in FW and try\n",
+ __FUNCTION__));
+ } else {
+ if (dhd_ether_to_awdl_llc_hdr(dhd, (struct ether_header *)pktdata,
+ PKTBUF) == BCME_OK) {
+ llc_inserted = TRUE;
+ /* in work item change ether type to len by
+ * re-copying the ether header
+ */
+ memcpy_s(txdesc->txhdr, ETHER_HDR_LEN, PKTDATA(dhd->osh, PKTBUF),
+ ETHER_HDR_LEN);
+ } else {
+ goto err_rollback_idx;
+ }
+ }
+ }
+#endif /* DHD_AWDL */
+
+#ifdef HOST_SFH_LLC
+ if (host_sfh_llc_reqd) {
+ if (dhd_ether_to_8023_hdr(dhd->osh, (struct ether_header *)pktdata,
+ PKTBUF) == BCME_OK) {
+ /* adjust the data pointer and length information */
+ pktdata = PKTDATA(dhd->osh, PKTBUF);
+ pktlen = PKTLEN(dhd->osh, PKTBUF);
+ txdesc->flags |= BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC;
+ } else {
+ goto err_rollback_idx;
+ }
+ } else
+#endif /* HOST_SFH_LLC */
+ {
+ /* Extract the ethernet header and adjust the data pointer and length */
+ pktlen = PKTLEN(dhd->osh, PKTBUF) - ETHER_HDR_LEN;
+ pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+ }
+
+ /* Map the data pointer to a DMA-able address */
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+
+ if (PHYSADDRISZERO(pa)) {
+ DHD_ERROR(("%s: Something really bad, unless 0 is "
+ "a valid phyaddr for pa\n", __FUNCTION__));
+ ASSERT(0);
+ /* XXX if ASSERT() doesn't work like as Android platform,
+ * try to requeue the packet to the backup queue.
+ */
+ goto err_rollback_idx;
+ }
+
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.txdata++;
+ dhd->dma_stats.txdata_sz += pktlen;
+#endif /* DMAMAP_STATS */
+ /* No need to lock. Save the rest of the packet's metadata */
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
+ pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
+
+#ifdef TXP_FLUSH_NITEMS
+ if (ring->pend_items_count == 0)
+ ring->start_addr = (void *)txdesc;
+ ring->pend_items_count++;
+#endif
+#ifdef DHD_HMAPTEST
+ if (dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_ACTIVE) {
+ /* scratch area */
+ dhd->prot->hmap_tx_buf_va = (char *)dhd->prot->hmaptest.mem.va
+ + dhd->prot->hmaptest.offset;
+ /* replace pa with our pa for txbuf post only */
+ dhd->prot->hmap_tx_buf_len = pktlen;
+ if ((dhd->prot->hmap_tx_buf_va + dhd->prot->hmap_tx_buf_len) >
+ ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
+ DHD_ERROR(("hmaptest: ERROR Txpost outside HMAPTEST buffer\n"));
+ DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
+ dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
+ dhd->prot->hmaptest.in_progress = FALSE;
+ } else {
+ /* copy pktdata to our va */
+ memcpy(dhd->prot->hmap_tx_buf_va, PKTDATA(dhd->osh, PKTBUF), pktlen);
+ pa = DMA_MAP(dhd->osh, dhd->prot->hmap_tx_buf_va,
+ dhd->prot->hmap_tx_buf_len, DMA_TX, PKTBUF, 0);
+
+ dhd->prot->hmap_tx_buf_pa = pa;
+ /* store pktid for later mapping in txcpl */
+ dhd->prot->hmaptest_tx_pktid = pktid;
+ dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_POSTED;
+ DHD_ERROR(("hmaptest: d11read txpost scratch txbuf pktid=0x%08x\n", pktid));
+ DHD_ERROR(("hmaptest: d11read txpost txbuf va=0x%p pa.lo=0x%08x len=%d\n",
+ dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(pa), pktlen));
+ }
+ }
+#endif /* DHD_HMAPTEST */
+
+#ifdef DHD_PKTTS
+ memset(&meta_data_buf, 0, sizeof(meta_data_buf));
+ if (dhd_get_pktts_enab(dhd) &&
+ dhd->pkt_metadata_buflen) {
+ /* Allocate memory for Meta data */
+ meta_data_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, meta_data_buf_len,
+ DMA_ALIGN_LEN, &meta_data_buf._alloced,
+ &meta_data_buf.pa, &meta_data_buf.dmah);
+
+ if (meta_data_buf.va == NULL) {
+ DHD_ERROR_RLMT(("%s: dhd_dma_buf_alloc failed \r\n", __FUNCTION__));
+ DHD_ERROR_RLMT((" ... Proceeding without metadata buffer \r\n"));
+ } else {
+ DHD_PKTID_SAVE_METADATA(dhd, dhd->prot->pktid_tx_map,
+ (void *)meta_data_buf.va,
+ meta_data_buf.pa,
+ (uint16)meta_data_buf._alloced,
+ meta_data_buf.dmah,
+ pktid);
+ }
+ memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
+ DHD_TRACE(("Meta data Buffer VA: %p PA: %llx dmah: %p\r\n",
+ meta_data_buf.va, addr, meta_data_buf.dmah));
+
+ txdesc->metadata_buf_addr.low = addr & (0xFFFFFFFF);
+ txdesc->metadata_buf_addr.high = (addr >> 32) & (0xFFFFFFFF);
+ txdesc->metadata_buf_len = meta_data_buf_len;
+ }
+#endif /* DHD_PKTTS */
+
+ /* Form the Tx descriptor message buffer */
+
+ /* Common message hdr */
+ txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
+ txdesc->cmn_hdr.if_id = ifidx;
+ txdesc->cmn_hdr.flags = ring->current_phase;
+
+ txdesc->flags |= BCMPCIE_PKT_FLAGS_FRAME_802_3;
+ prio = (uint8)PKTPRIO(PKTBUF);
+
+#ifdef EXT_STA
+ txdesc->flags &= ~BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK <<
+ BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
+ txdesc->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(PKTBUF)) &
+ BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK)
+ << BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
+#endif
+
+ txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
+ txdesc->seg_cnt = 1;
+
+ txdesc->data_len = htol16((uint16) pktlen);
+ txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+
+ if (!host_sfh_llc_reqd)
+ {
+ /* Move data pointer to keep ether header in local PKTBUF for later reference */
+ PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+ }
+
+ txdesc->ext_flags = 0;
+
+#ifdef DHD_TIMESYNC
+ txdesc->rate = 0;
+
+ if (!llc_inserted && dhd->prot->tx_ts_log_enabled) {
+ dhd_pkt_parse_t parse;
+
+ dhd_parse_proto(PKTDATA(dhd->osh, PKTBUF), &parse);
+
+ if (parse.proto == IP_PROT_ICMP) {
+ if (dhd->prot->no_retry)
+ txdesc->ext_flags = BCMPCIE_PKT_FLAGS_FRAME_NORETRY;
+ if (dhd->prot->no_aggr)
+ txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_NOAGGR;
+ if (dhd->prot->fixed_rate)
+ txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
+ }
+ }
+#endif /* DHD_TIMESYNC */
+
+#ifdef DHD_SBN
+ if (dhd_udr) {
+ txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
+ }
+#endif /* DHD_SBN */
+
+#ifdef DHD_TX_PROFILE
+ if (!llc_inserted &&
+ dhd->tx_profile_enab && dhd->num_profiles > 0)
+ {
+ uint8 offset;
+
+ for (offset = 0; offset < dhd->num_profiles; offset++) {
+ if (dhd_protocol_matches_profile((uint8 *)PKTDATA(dhd->osh, PKTBUF),
+ PKTLEN(dhd->osh, PKTBUF), &(dhd->protocol_filters[offset]),
+ host_sfh_llc_reqd)) {
+ /* mask so other reserved bits are not modified. */
+ txdesc->rate |=
+ (((uint8)dhd->protocol_filters[offset].profile_index) &
+ BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK);
+
+ /* so we can use the rate field for our purposes */
+ txdesc->rate |= BCMPCIE_TXPOST_RATE_EXT_USAGE;
+
+ break;
+ }
+ }
+ }
+#endif /* defined(DHD_TX_PROFILE) */
+
+ /* Handle Tx metadata */
+ headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
+ if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
+ DHD_ERROR(("No headroom for Metadata tx %d %d\n",
+ prot->tx_metadata_offset, headroom));
+
+ if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
+ DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
+
+ /* Adjust the data pointer to account for meta data in DMA_MAP */
+ PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+ meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+ prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+
+ if (PHYSADDRISZERO(meta_pa)) {
+ /* Unmap the data pointer to a DMA-able address */
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
+#ifdef TXP_FLUSH_NITEMS
+ /* update pend_items_count */
+ ring->pend_items_count--;
+#endif /* TXP_FLUSH_NITEMS */
+
+ DHD_ERROR(("%s: Something really bad, unless 0 is "
+ "a valid phyaddr for meta_pa\n", __FUNCTION__));
+ ASSERT(0);
+ /* XXX if ASSERT() doesn't work like as Android platform,
+ * try to requeue the packet to the backup queue.
+ */
+ goto err_rollback_idx;
+ }
+
+ /* Adjust the data pointer back to original value */
+ PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+ txdesc->metadata_buf_len = prot->tx_metadata_offset;
+ txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
+ txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
+ } else {
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_update_hp2p_txdesc(dhd, txdesc);
+ } else
+#endif /* DHD_HP2P */
+#ifdef DHD_PKTTS
+ if (!dhd_get_pktts_enab(dhd) || !dhd->pkt_metadata_buflen) {
+#else
+ if (1) {
+#endif /* DHD_PKTTS */
+ txdesc->metadata_buf_len = htol16(0);
+ txdesc->metadata_buf_addr.high_addr = 0;
+ txdesc->metadata_buf_addr.low_addr = 0;
+ }
+ }
+
+#ifdef AGG_H2D_DB
+ OSL_ATOMIC_INC(dhd->osh, &ring->inflight);
+#endif /* AGG_H2D_DB */
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ txdesc->cmn_hdr.request_id = htol32(pktid);
+
+ DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
+ txdesc->cmn_hdr.request_id));
+
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, PKTBUF);
+#endif
+
+ /* Update the write pointer in TCM & ring bell */
+#if defined(TXP_FLUSH_NITEMS)
+#if defined(DHD_HP2P)
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_calc_hp2p_burst(dhd, ring, flowid);
+ } else
+#endif /* HP2P */
+ {
+ if ((ring->pend_items_count == prot->txp_threshold) ||
+ ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
+#ifdef AGG_H2D_DB
+ if (agg_h2d_db_enab) {
+ dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
+ if ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring)) {
+ dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, TRUE);
+ }
+ } else
+#endif /* AGG_H2D_DB */
+ {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+ }
+
+ }
+ }
+#else
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
+#endif /* TXP_FLUSH_NITEMS */
+
+#ifdef TX_STATUS_LATENCY_STATS
+ /* set the time when pkt is queued to flowring */
+ DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
+#elif defined(DHD_PKTTS)
+ if (dhd_get_pktts_enab(dhd) == TRUE) {
+ /* set the time when pkt is queued to flowring */
+ DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
+ }
+#endif /* TX_STATUS_LATENCY_STATS */
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
+
+ /*
+ * Take a wake lock, do not sleep if we have atleast one packet
+ * to finish.
+ */
+ DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+#ifdef TX_STATUS_LATENCY_STATS
+ flow_ring_node->flow_info.num_tx_pkts++;
+#endif /* TX_STATUS_LATENCY_STATS */
+ return BCME_OK;
+
+err_rollback_idx:
+ /* roll back write pointer for unprocessed message */
+ if (ring->wr == 0) {
+ ring->wr = ring->max_items - 1;
+ } else {
+ ring->wr--;
+ if (ring->wr == 0) {
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ?
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+ }
+
+err_free_pktid:
+#if defined(DHD_PCIE_PKTID)
+ {
+ void *dmah;
+ void *secdma;
+ /* Free up the PKTID. physaddr and pktlen will be garbage. */
+ DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+ pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
+ }
+
+err_no_res_pktfree:
+#endif /* DHD_PCIE_PKTID */
+
+#if defined(BCMINTERNAL) && defined(LINUX)
+ if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
+ PKTCFREE(dhd->osh, PKTBUF, FALSE);
+#endif /* BCMINTERNAL && LINUX */
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+fail:
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NORESOURCE;
+} /* dhd_prot_txdata */
+
+#ifdef AGG_H2D_DB
+static void
+dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid)
+{
+ flow_ring_table_t *flow_ring_table;
+ flow_ring_node_t *flow_ring_node;
+ msgbuf_ring_t *ring;
+
+ if (dhd->flow_ring_table == NULL) {
+ return;
+ }
+
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+ ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+ if (ring->pend_items_count) {
+ dhd_prot_agg_db_ring_write(dhd, ring, ring->start_addr,
+ ring->pend_items_count);
+ ring->pend_items_count = 0;
+ ring->start_addr = NULL;
+ }
+
+}
+#endif /* AGG_H2D_DB */
+
+/* called with a ring_lock */
+/** optimization to write "n" tx items at a time to ring */
+void
+BCMFASTPATH(dhd_prot_txdata_write_flush)(dhd_pub_t *dhd, uint16 flowid)
+{
+#ifdef TXP_FLUSH_NITEMS
+ flow_ring_table_t *flow_ring_table;
+ flow_ring_node_t *flow_ring_node;
+ msgbuf_ring_t *ring;
+
+ if (dhd->flow_ring_table == NULL) {
+ return;
+ }
+
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+ ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+ if (ring->pend_items_count) {
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
+ ring->pend_items_count);
+ ring->pend_items_count = 0;
+ ring->start_addr = NULL;
+ dhd->prot->tx_h2d_db_cnt++;
+ }
+#endif /* TXP_FLUSH_NITEMS */
+}
+
+#undef PKTBUF /* Only defined in the above routine */
+
+int
+BCMFASTPATH(dhd_prot_hdrpull)(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
+{
+ return 0;
+}
+
+/** post a set of receive buffers to the dongle */
+static void
+BCMFASTPATH(dhd_prot_return_rxbuf)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid,
+ uint32 rxcnt)
+/* XXX function name could be more descriptive, eg dhd_prot_post_rxbufs */
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ if (prot->rxbufpost >= rxcnt) {
+ prot->rxbufpost -= (uint16)rxcnt;
+ } else {
+ /* XXX: I have seen this assert hitting.
+ * Will be removed once rootcaused.
+ */
+ /* ASSERT(0); */
+ prot->rxbufpost = 0;
+ }
+
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+ } else if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
+ /* Ring DoorBell after processing the rx packets,
+ * so that dongle will sync the DMA indices.
+ */
+ dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
+ }
+
+ return;
+}
+
+#ifdef DHD_HMAPTEST
+
+static void
+dhd_msgbuf_hmaptest_cmplt(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint64 end_usec;
+ char *readbuf;
+ uint32 len = dhd->prot->hmaptest.len;
+ uint32 i;
+
+ end_usec = OSL_SYSUPTIME_US();
+ end_usec -= prot->hmaptest.start_usec;
+ DHD_ERROR(("hmaptest cmplt: %d bytes in %llu usec, %u kBps\n",
+ len, end_usec, (len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
+
+ prot->hmaptest.in_progress = FALSE;
+ if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
+ DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
+ } else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
+ DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
+ } else {
+ return;
+ }
+ readbuf = (char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.offset;
+ OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
+ dhd->prot->hmaptest.mem.len);
+ if (prot->hmaptest.is_write) {
+ DHD_ERROR(("hmaptest cmplt: FW has written at 0x%p\n", readbuf));
+ DHD_ERROR(("hmaptest cmplt: pattern = \n"));
+ len = ALIGN_SIZE(len, (sizeof(int32)));
+ for (i = 0; i < len; i += (sizeof(int32))) {
+ DHD_ERROR(("0x%08x\n", *(int *)(readbuf + i)));
+ }
+ DHD_ERROR(("\n\n"));
+ }
+
+}
+/* program HMAPTEST window and window config registers
+ * Reference for HMAP implementation in OS's that can easily leverage it
+ * this function can be used as reference for programming HMAP windows
+ * the function to program HMAP windows and enable it
+ * can be called at init time or hmap iovar
+ */
+static void
+dhdmsgbuf_set_hmaptest_windows(dhd_pub_t *dhd)
+{
+ uint32 nwindows = 0;
+ uint32 scratch_len;
+ uint64 scratch_lin, w1_start;
+ dmaaddr_t scratch_pa;
+ pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
+ dhd_prot_t *prot = dhd->prot;
+ uint corerev = dhd->bus->sih->buscorerev;
+
+ scratch_pa = prot->hmaptest.mem.pa;
+ scratch_len = prot->hmaptest.mem.len;
+ scratch_lin = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
+ | (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
+ hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
+ /* windows are 4kb aligned and window length is 512 byte aligned
+ * window start ends with 0x1000 and window length ends with 0xe00
+ * make the sandbox buffer 4kb aligned and size also 4kb aligned for hmap test
+ * window0 = 0 - sandbox_start
+ * window1 = sandbox_end + 1 - 0xffffffff
+ * window2 = 0x100000000 - 0x1fffffe00
+ * window 3 is programmed only for valid test cases
+ * window3 = sandbox_start - sandbox_end
+ */
+ w1_start = scratch_lin + scratch_len;
+ DHD_ERROR(("hmaptest: window 0 offset lower=0x%p upper=0x%p length=0x%p\n",
+ &(hmapwindows[0].baseaddr_lo), &(hmapwindows[0].baseaddr_hi),
+ &(hmapwindows[0].windowlength)));
+ DHD_ERROR(("hmaptest: window 1 offset lower=0x%p upper=0x%p length=0x%p\n",
+ &(hmapwindows[1].baseaddr_lo), &(hmapwindows[1].baseaddr_hi),
+ &(hmapwindows[1].windowlength)));
+ DHD_ERROR(("hmaptest: window 2 offset lower=0x%p upper=0x%p length=0x%p\n",
+ &(hmapwindows[2].baseaddr_lo), &(hmapwindows[2].baseaddr_hi),
+ &(hmapwindows[2].windowlength)));
+ DHD_ERROR(("hmaptest: window 3 offset lower=0x%p upper=0x%p length=0x%p\n",
+ &(hmapwindows[3].baseaddr_lo), &(hmapwindows[3].baseaddr_hi),
+ &(hmapwindows[3].windowlength)));
+ DHD_ERROR(("hmaptest: w0 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
+ 0, 0, (uint64) scratch_lin));
+ DHD_ERROR(("hmaptest: w1 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
+ (uint32)(w1_start & 0xffffffff),
+ (uint32)((w1_start >> 32) & 0xffffffff),
+ (uint64)(0x100000000 - w1_start)));
+ DHD_ERROR(("hmaptest: w2 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
+ 0, 1, (uint64)0xfffffe00));
+ /* setting window0 */
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[0].baseaddr_lo)), ~0, 0x0);
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[0].baseaddr_hi)), ~0, 0x0);
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[0].windowlength)), ~0,
+ (uint64)scratch_lin);
+ /* setting window1 */
+ w1_start = scratch_lin + scratch_len;
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[1].baseaddr_lo)), ~0,
+ (uint32)(w1_start & 0xffffffff));
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[1].baseaddr_hi)), ~0,
+ (uint32)((w1_start >> 32) & 0xffffffff));
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[1].windowlength)), ~0,
+ (0x100000000 - w1_start));
+ /* setting window2 */
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[2].baseaddr_lo)), ~0, 0x0);
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[2].baseaddr_hi)), ~0, 0x1);
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[2].windowlength)), ~0, 0xfffffe00);
+ nwindows = 3;
+ /* program only windows 0-2 with section1 +section2 */
+ /* setting window config */
+ /* set bit 8:15 in windowconfig to enable n windows in order */
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, (nwindows << 8));
+}
+
+/* stop HMAPTEST does not check corerev
+ * caller has to ensure corerev check
+ */
+int
+dhdmsgbuf_hmaptest_stop(dhd_pub_t *dhd)
+{
+ uint32 window_config, nwindows, i;
+ pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
+ uint corerev = dhd->bus->sih->buscorerev;
+
+ hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
+ dhd->prot->hmaptest.in_progress = FALSE;
+
+ /* Reference for HMAP Implementation
+ * Disable HMAP windows.
+ * As windows were programmed in bus:hmap set call
+ * disabling in hmaptest_stop.
+ */
+ DHD_ERROR(("hmap: disable hmap windows\n"));
+ window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
+ nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, 0);
+ /* clear all windows */
+ for (i = 0; i < nwindows; i++) {
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[i].baseaddr_lo)), ~0, 0);
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[i].baseaddr_hi)), ~0, 0);
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[i].windowlength)), ~0, 0);
+ }
+
+ return BCME_OK;
+}
+
+/* HMAP iovar intercept process */
+int
+dhdmsgbuf_hmap(dhd_pub_t *dhd, pcie_hmap_t *hmap_params, bool set)
+{
+
+ uint32 scratch_len;
+ uint64 scratch_lin, w1_start;
+ dmaaddr_t scratch_pa;
+ uint32 addr_lo, addr_hi, window_length, window_config, nwindows, i;
+ pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
+
+ dhd_prot_t *prot = dhd->prot;
+ dhd_bus_t *bus = dhd->bus;
+ uint corerev = bus->sih->buscorerev;
+ scratch_pa = prot->hmaptest.mem.pa;
+ scratch_len = prot->hmaptest.mem.len;
+ scratch_lin = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
+ | (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
+ w1_start = scratch_lin + scratch_len;
+ DHD_ERROR(("HMAP: pcicorerev = %d\n", corerev));
+
+ if (corerev < 24) {
+ DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
+ return BCME_UNSUPPORTED;
+ }
+ if (set) {
+ if (hmap_params->enable) {
+ dhdmsgbuf_set_hmaptest_windows(dhd);
+ } else {
+ dhdmsgbuf_hmaptest_stop(dhd); /* stop will clear all programmed windows */
+ }
+ }
+
+ OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
+ dhd->prot->hmaptest.mem.len);
+
+ window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
+ nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
+ prot->hmap_enabled = nwindows ? TRUE : FALSE;
+
+ /* getting window config */
+ /* set bit 8:15 in windowconfig to enable n windows in order */
+ DHD_ERROR(("hmap: hmap status = %s\n", (prot->hmap_enabled ? "Enabled" : "Disabled")));
+ DHD_ERROR(("hmap: window config = 0x%08x\n", window_config));
+ DHD_ERROR(("hmap: Windows\n"));
+
+ hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
+ /* getting windows */
+ if (nwindows > 8)
+ return BCME_ERROR;
+ for (i = 0; i < nwindows; i++) {
+ addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[i].baseaddr_lo)), 0, 0);
+ addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[i].baseaddr_hi)), 0, 0);
+ window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uintptr_t)(&(hmapwindows[i].windowlength)), 0, 0);
+
+ DHD_ERROR(("hmap: window %d address lower=0x%08x upper=0x%08x length=0x%08x\n",
+ i, addr_lo, addr_hi, window_length));
+ }
+ addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)(PCI_HMAP_VIOLATION_ADDR_U(corerev)), 0, 0);
+ addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)(PCI_HMAP_VIOLATION_ADDR_L(corerev)), 0, 0);
+ window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ (uint)(PCI_HMAP_VIOLATION_INFO(corerev)), 0, 0);
+ DHD_ERROR(("hmap: violation regs\n"));
+ DHD_ERROR(("hmap: violationaddr_hi =0x%08x\n", addr_hi));
+ DHD_ERROR(("hmap: violationaddr_lo =0x%08x\n", addr_lo));
+ DHD_ERROR(("hmap: violation_info =0x%08x\n", window_length));
+ DHD_ERROR(("hmap: Buffer allocated for HMAPTEST Start=0x%0llx len =0x%08x End =0x%0llx\n",
+ (uint64) scratch_lin, scratch_len, (uint64) w1_start));
+
+ return BCME_OK;
+}
+
+/* hmaptest iovar process
+ * This iovar triggers HMAPTEST with given params
+ * on chips that have HMAP
+ * DHD programs hmap window registers with host addresses here.
+ */
+int
+dhdmsgbuf_hmaptest(dhd_pub_t *dhd, pcie_hmaptest_t *hmaptest_params)
+{
+
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+ uint32 offset = 0;
+ uint64 scratch_lin;
+ dhd_bus_t *bus = dhd->bus;
+ uint corerev = bus->sih->buscorerev;
+
+ if (prot->hmaptest.in_progress) {
+ DHD_ERROR(("HMAPTEST already running. Try again.\n"));
+ return BCME_BUSY;
+ }
+
+ prot->hmaptest.in_progress = TRUE;
+
+ if (corerev < 24) {
+ DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
+ return BCME_UNSUPPORTED;
+ }
+ prot->hmaptest.accesstype = hmaptest_params->accesstype;
+ prot->hmaptest.is_write = hmaptest_params->is_write;
+ prot->hmaptest.len = hmaptest_params->xfer_len;
+ prot->hmaptest.offset = hmaptest_params->host_offset;
+ offset = prot->hmaptest.offset;
+
+ DHD_ERROR(("hmaptest: is_write =%d accesstype=%d offset =%d len=%d value=0x%08x\n",
+ prot->hmaptest.is_write, prot->hmaptest.accesstype,
+ offset, prot->hmaptest.len, hmaptest_params->value));
+
+ DHD_ERROR(("hmaptest dma_lo=0x%08x hi=0x%08x pa\n",
+ (uint32)PHYSADDRLO(prot->hmaptest.mem.pa),
+ (uint32)PHYSADDRHI(prot->hmaptest.mem.pa)));
+
+ if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
+ if (prot->hmaptest.is_write) {
+ /* if d11 is writing then post rxbuf from scratch area */
+ dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_ACTIVE;
+ } else {
+ /* if d11 is reading then post txbuf from scratch area */
+ dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_ACTIVE;
+ }
+
+ } else {
+ uint32 pattern = 0xdeadbeef;
+ uint32 i;
+ uint32 maxbuflen = MIN(prot->hmaptest.len, (PKTBUFSZ));
+ char *fillbuf = (char *)dhd->prot->hmaptest.mem.va
+ + offset;
+ if ((fillbuf + maxbuflen) >
+ ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
+ DHD_ERROR(("hmaptest: M2m/ARM ERROR offset + len outside buffer\n"));
+ dhd->prot->hmaptest.in_progress = FALSE;
+ return BCME_BADARG;
+ }
+
+ if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
+ DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
+ } else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
+ DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
+ } else {
+ prot->hmaptest.in_progress = FALSE;
+ DHD_ERROR(("hmaptest: accesstype error\n"));
+ return BCME_BADARG;
+ }
+
+ /* fill a pattern at offset */
+ maxbuflen = ALIGN_SIZE(maxbuflen, (sizeof(uint32)));
+ memset(fillbuf, 0, maxbuflen);
+ DHD_ERROR(("hmaptest: dhd write pattern at addr=0x%p\n",
+ fillbuf));
+ DHD_ERROR(("pattern = %08x, %u times",
+ pattern, (uint32)(maxbuflen / sizeof(uint32))));
+ for (i = 0; i < maxbuflen; i += sizeof(uint32)) {
+ *(uint32 *)(fillbuf + i) = pattern;
+ }
+ OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
+ dhd->prot->hmaptest.mem.len);
+ DHD_ERROR(("\n\n"));
+
+ }
+
+ /*
+ * Do not calculate address from scratch buffer + offset,
+ * if user supplied absolute address
+ */
+ if (hmaptest_params->host_addr_lo || hmaptest_params->host_addr_hi) {
+ if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
+ DHD_ERROR(("hmaptest: accesstype D11 does not support absolute addr\n"));
+ return BCME_UNSUPPORTED;
+ }
+ } else {
+ scratch_lin = (uint64)(PHYSADDRLO(prot->hmaptest.mem.pa) & 0xffffffff)
+ | (((uint64)PHYSADDRHI(prot->hmaptest.mem.pa) & 0xffffffff) << 32);
+ scratch_lin += offset;
+ hmaptest_params->host_addr_lo = htol32((uint32)(scratch_lin & 0xffffffff));
+ hmaptest_params->host_addr_hi = htol32((uint32)((scratch_lin >> 32) & 0xffffffff));
+ }
+
+ DHD_INFO(("HMAPTEST Started...\n"));
+ prot->hmaptest.start_usec = OSL_SYSUPTIME_US();
+ return ret;
+
+}
+
+#endif /* DHD_HMAPTEST */
+
+/* called before an ioctl is sent to the dongle */
+static void
+dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int slen = 0;
+
+ if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
+ pcie_bus_tput_params_t *tput_params;
+
+ slen = strlen("pcie_bus_tput") + 1;
+ tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
+ bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
+ sizeof(tput_params->host_buf_addr));
+ tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
+ }
+
+#ifdef DHD_HMAPTEST
+ if (buf != NULL && !strcmp(buf, "bus:hmap")) {
+ pcie_hmap_t *hmap_params;
+ slen = strlen("bus:hmap") + 1;
+ hmap_params = (pcie_hmap_t*)((char *)buf + slen);
+ dhdmsgbuf_hmap(dhd, hmap_params, (ioc->cmd == WLC_SET_VAR));
+ }
+
+ if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
+ pcie_hmaptest_t *hmaptest_params;
+
+ slen = strlen("bus:hmaptest") + 1;
+ hmaptest_params = (pcie_hmaptest_t*)((char *)buf + slen);
+ dhdmsgbuf_hmaptest(dhd, hmaptest_params);
+ }
+#endif /* DHD_HMAPTEST */
+}
+
+/* called after an ioctl returns from dongle */
+static void
+dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
+ int ifidx, int ret, int len)
+{
+
+#ifdef DHD_HMAPTEST
+ if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
+ dhd_msgbuf_hmaptest_cmplt(dhd);
+ }
+#endif /* DHD_HMAPTEST */
+
+ if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
+ int slen;
+ /* Intercept the wme_dp ioctl here */
+ if (!strcmp(buf, "wme_dp")) {
+ int val = 0;
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+#ifdef DHD_AWDL
+ /* Intercept the awdl_peer_op ioctl here */
+ if (!strcmp(buf, "awdl_peer_op")) {
+ slen = strlen("awdl_peer_op") + 1;
+ dhd_awdl_peer_op(dhd, (uint8)ifidx, ((char *)buf + slen), len - slen);
+ }
+ /* Intercept the awdl ioctl here, delete flow rings if awdl is
+ * disabled
+ */
+ if (!strcmp(buf, "awdl")) {
+ int val = 0;
+ slen = strlen("awdl") + 1;
+ if (len >= (int)(slen + sizeof(int))) {
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ val = ltoh32(val);
+ if (val == TRUE) {
+ /**
+ * Though we are updating the link status when we recieve
+ * WLC_E_LINK from dongle, it is not gaurenteed always.
+ * So intercepting the awdl command fired from app to
+ * update the status.
+ */
+ dhd_update_interface_link_status(dhd, (uint8)ifidx, TRUE);
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ /* reset AWDL stats data structures when AWDL is enabled */
+ dhd_clear_awdl_stats(dhd);
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+ } else if (val == FALSE) {
+ dhd_update_interface_link_status(dhd, (uint8)ifidx, FALSE);
+ dhd_del_all_sta(dhd, (uint8)ifidx);
+ dhd_awdl_peer_op(dhd, (uint8)ifidx, NULL, 0);
+
+ }
+ }
+
+ }
+
+ /* store the awdl min extension count and presence mode values
+ * set by the user, same will be inserted in the LLC header for
+ * each tx packet on the awdl iface
+ */
+ slen = strlen("awdl_extcounts");
+ if (!strncmp(buf, "awdl_extcounts", slen)) {
+ awdl_extcount_t *extcnt = NULL;
+ slen = slen + 1;
+ if ((len - slen) >= sizeof(*extcnt)) {
+ extcnt = (awdl_extcount_t *)((char *)buf + slen);
+ dhd->awdl_minext = extcnt->minExt;
+ }
+ }
+
+ slen = strlen("awdl_presencemode");
+ if (!strncmp(buf, "awdl_presencemode", slen)) {
+ slen = slen + 1;
+ if ((len - slen) >= sizeof(uint8)) {
+ dhd->awdl_presmode = *((uint8 *)((char *)buf + slen));
+ }
+ }
+#endif /* DHD_AWDL */
+ }
+
+}
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
+int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ int ret = -1;
+ uint8 action;
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
+ " bus state: %d, sent hang: %d\n", __FUNCTION__,
+ dhd->busstate, dhd->hang_was_sent));
+ goto done;
+ }
+
+ if (dhd->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
+ goto done;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef DHD_PCIE_REG_ACCESS
+#ifdef BOARD_HIKEY
+#ifndef PCIE_LNK_SPEED_GEN1
+#define PCIE_LNK_SPEED_GEN1 0x1
+#endif
+ /* BUG_ON if link speed is GEN1 in Hikey for 4389B0 */
+ if (dhd->bus->sih->buscorerev == 72) {
+ if (dhd_get_pcie_linkspeed(dhd) == PCIE_LNK_SPEED_GEN1) {
+ DHD_ERROR(("%s: ******* Link Speed is GEN1 *********\n", __FUNCTION__));
+ BUG_ON(1);
+ }
+ }
+#endif /* BOARD_HIKEY */
+#endif /* DHD_PCIE_REG_ACCESS */
+
+ if (ioc->cmd == WLC_SET_PM) {
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ if (g_pm_control == TRUE) {
+ DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
+ __FUNCTION__, buf ? *(char *)buf : 0));
+ goto done;
+ }
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+#ifdef DHD_PM_OVERRIDE
+ {
+ extern bool g_pm_override;
+ if (g_pm_override == TRUE) {
+ DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n",
+ __FUNCTION__, buf ? *(char *)buf : 0));
+ goto done;
+ }
+ }
+#endif /* DHD_PM_OVERRIDE */
+ DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
+ }
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ action = ioc->set;
+
+ dhd_prot_wlioctl_intercept(dhd, ioc, buf);
+
+#if defined(EXT_STA)
+ wl_dbglog_ioctl_add(ioc, len, NULL);
+#endif
+ if (action & WL_IOCTL_ACTION_SET) {
+ ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ } else {
+ ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0)
+ ioc->used = ret;
+ }
+
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0) {
+ ret = 0;
+ } else {
+#ifndef DETAIL_DEBUG_LOG_FOR_IOCTL
+ DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
+#endif /* !DETAIL_DEBUG_LOG_FOR_IOCTL */
+ dhd->dongle_error = ret;
+ }
+
+ dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
+
+done:
+ return ret;
+
+} /* dhd_prot_ioctl */
+
+/** test / loopback */
+
+/*
+ * XXX: This will fail with new PCIe Split header Full Dongle using fixed
+ * sized messages in control submission ring. We seem to be sending the lpbk
+ * data via the control message, wherein the lpbk data may be larger than 1
+ * control message that is being committed.
+ */
+int
+dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
+{
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 alloced = 0;
+
+ ioct_reqst_hdr_t *ioct_rqst;
+
+ uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
+ uint16 msglen = len + hdrlen;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+ msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
+ msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ ioct_rqst = (ioct_reqst_hdr_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (ioct_rqst == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return 0;
+ }
+
+ {
+ uint8 *ptr;
+ uint16 i;
+
+ ptr = (uint8 *)ioct_rqst; /* XXX: failure!!! */
+ for (i = 0; i < msglen; i++) {
+ ptr[i] = i % 256;
+ }
+ }
+
+ /* Common msg buf hdr */
+ ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+ ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
+ ioct_rqst->msg.if_id = 0;
+ ioct_rqst->msg.flags = ring->current_phase;
+
+ bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return 0;
+}
+
+/** test / loopback */
+void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
+{
+ if (dmaxfer == NULL)
+ return;
+
+ dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
+ dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
+}
+
+/** test / loopback */
+int
+dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
+ dmaxref_mem_map_t *dmap = NULL;
+
+ dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
+ if (!dmap) {
+ DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
+ goto mem_alloc_fail;
+ }
+ dmap->srcmem = &(dmaxfer->srcmem);
+ dmap->dstmem = &(dmaxfer->dstmem);
+
+ DMAXFER_FREE(dhdp, dmap);
+ return BCME_OK;
+
+mem_alloc_fail:
+ if (dmap) {
+ MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
+ }
+ return BCME_NOMEM;
+} /* dhd_prepare_schedule_dmaxfer_free */
+
+/** test / loopback */
+void
+dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
+{
+
+ dhd_dma_buf_free(dhdp, dmmap->srcmem);
+ dhd_dma_buf_free(dhdp, dmmap->dstmem);
+
+ MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
+
+ dhdp->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhdp);
+} /* dmaxfer_free_prev_dmaaddr */
+
+/** test / loopback */
+int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
+ uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
+{
+ uint i = 0, j = 0;
+ if (!dmaxfer)
+ return BCME_ERROR;
+
+ /* First free up existing buffers */
+ dmaxfer_free_dmaaddr(dhd, dmaxfer);
+
+ if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
+ return BCME_NOMEM;
+ }
+
+ if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
+ dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
+ return BCME_NOMEM;
+ }
+
+ dmaxfer->len = len;
+
+ /* Populate source with a pattern like below
+ * 0x00000000
+ * 0x01010101
+ * 0x02020202
+ * 0x03030303
+ * 0x04040404
+ * 0x05050505
+ * ...
+ * 0xFFFFFFFF
+ */
+ while (i < dmaxfer->len) {
+ ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
+ i++;
+ if (i % 4 == 0) {
+ j++;
+ }
+ }
+
+ OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
+
+ dmaxfer->srcdelay = srcdelay;
+ dmaxfer->destdelay = destdelay;
+
+ return BCME_OK;
+} /* dmaxfer_prepare_dmaaddr */
+
+static void
+dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint64 end_usec;
+ pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
+ int buf_free_scheduled;
+ int err = 0;
+
+ BCM_REFERENCE(cmplt);
+ end_usec = OSL_SYSUPTIME_US();
+
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ /* restore interrupt poll period to the previous existing value */
+ dhd_os_set_intr_poll_period(dhd->bus, dhd->cur_intr_poll_period);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+
+ DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
+ prot->dmaxfer.status = cmplt->compl_hdr.status;
+ OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ if (prot->dmaxfer.d11_lpbk != M2M_WRITE_TO_RAM &&
+ prot->dmaxfer.d11_lpbk != M2M_READ_FROM_RAM &&
+ prot->dmaxfer.d11_lpbk != D11_WRITE_TO_RAM &&
+ prot->dmaxfer.d11_lpbk != D11_READ_FROM_RAM) {
+ err = memcmp(prot->dmaxfer.srcmem.va,
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ }
+ if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
+ if (err ||
+ cmplt->compl_hdr.status != BCME_OK) {
+ DHD_ERROR(("DMA loopback failed\n"));
+ /* it is observed that some times the completion
+ * header status is set as OK, but the memcmp fails
+ * hence always explicitly set the dmaxfer status
+ * as error if this happens.
+ */
+ prot->dmaxfer.status = BCME_ERROR;
+ prhex("XFER SRC: ",
+ prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
+ prhex("XFER DST: ",
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ }
+ else {
+ switch (prot->dmaxfer.d11_lpbk) {
+ case M2M_DMA_LPBK: {
+ DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
+ } break;
+ case D11_LPBK: {
+ DHD_ERROR(("DMA successful with d11 loopback\n"));
+ } break;
+ case BMC_LPBK: {
+ DHD_ERROR(("DMA successful with bmc loopback\n"));
+ } break;
+ case M2M_NON_DMA_LPBK: {
+ DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
+ } break;
+ case D11_HOST_MEM_LPBK: {
+ DHD_ERROR(("DMA successful d11 host mem loopback\n"));
+ } break;
+ case BMC_HOST_MEM_LPBK: {
+ DHD_ERROR(("DMA successful bmc host mem loopback\n"));
+ } break;
+ case M2M_WRITE_TO_RAM: {
+ DHD_ERROR(("DMA successful pcie m2m write to ram\n"));
+ } break;
+ case M2M_READ_FROM_RAM: {
+ DHD_ERROR(("DMA successful pcie m2m read from ram\n"));
+ prhex("XFER DST: ",
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ } break;
+ case D11_WRITE_TO_RAM: {
+ DHD_ERROR(("DMA successful D11 write to ram\n"));
+ } break;
+ case D11_READ_FROM_RAM: {
+ DHD_ERROR(("DMA successful D11 read from ram\n"));
+ prhex("XFER DST: ",
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ } break;
+ default: {
+ DHD_ERROR(("Invalid loopback option\n"));
+ } break;
+ }
+
+ if (DHD_LPBKDTDUMP_ON()) {
+ /* debug info print of the Tx and Rx buffers */
+ dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
+ prot->dmaxfer.len, DHD_INFO_VAL);
+ dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
+ prot->dmaxfer.len, DHD_INFO_VAL);
+ }
+ }
+ }
+
+ buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
+ end_usec -= prot->dmaxfer.start_usec;
+ if (end_usec) {
+ prot->dmaxfer.time_taken = end_usec;
+ DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
+ prot->dmaxfer.len, (unsigned long)end_usec,
+ (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
+ }
+ dhd->prot->dmaxfer.in_progress = FALSE;
+
+ if (buf_free_scheduled != BCME_OK) {
+ dhd->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhd);
+ }
+}
+
+/** Test functionality.
+ * Transfers bytes from host to dongle and to host again using DMA
+ * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
+ * by a spinlock.
+ */
+int
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
+ uint d11_lpbk, uint core_num, uint32 mem_addr)
+{
+ unsigned long flags;
+ int ret = BCME_OK;
+ dhd_prot_t *prot = dhd->prot;
+ pcie_dma_xfer_params_t *dmap;
+ uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
+ uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+ /* XXX: prot->dmaxfer.in_progress is not protected by lock */
+ if (prot->dmaxfer.in_progress) {
+ DHD_ERROR(("DMA is in progress...\n"));
+ return BCME_ERROR;
+ }
+
+ if (d11_lpbk >= MAX_LPBK) {
+ DHD_ERROR(("loopback mode should be either"
+ " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
+ return BCME_ERROR;
+ }
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
+ return BCME_ERROR;
+ }
+#endif /* PCIE_INB_DW */
+
+ prot->dmaxfer.in_progress = TRUE;
+ if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
+ &prot->dmaxfer)) != BCME_OK) {
+ prot->dmaxfer.in_progress = FALSE;
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return ret;
+ }
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ dmap = (pcie_dma_xfer_params_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (dmap == NULL) {
+ dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+ prot->dmaxfer.in_progress = FALSE;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
+ dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
+ dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ dmap->cmn_hdr.flags = ring->current_phase;
+ ring->seqnum++;
+
+ dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
+ dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
+ dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
+ dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
+ dmap->xfer_len = htol32(prot->dmaxfer.len);
+ dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
+ dmap->destdelay = htol32(prot->dmaxfer.destdelay);
+ prot->dmaxfer.d11_lpbk = d11_lpbk;
+ if (d11_lpbk == M2M_WRITE_TO_RAM) {
+ dmap->host_ouput_buf_addr.high = 0x0;
+ dmap->host_ouput_buf_addr.low = mem_addr;
+ } else if (d11_lpbk == M2M_READ_FROM_RAM) {
+ dmap->host_input_buf_addr.high = 0x0;
+ dmap->host_input_buf_addr.low = mem_addr;
+ } else if (d11_lpbk == D11_WRITE_TO_RAM) {
+ dmap->host_ouput_buf_addr.high = 0x0;
+ dmap->host_ouput_buf_addr.low = mem_addr;
+ } else if (d11_lpbk == D11_READ_FROM_RAM) {
+ dmap->host_input_buf_addr.high = 0x0;
+ dmap->host_input_buf_addr.low = mem_addr;
+ }
+ dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
+ << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
+ ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
+ << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
+ prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ DHD_ERROR(("DMA loopback Started... on core[%d]\n", core_num));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return BCME_OK;
+} /* dhdmsgbuf_dmaxfer_req */
+
+int
+dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ if (prot->dmaxfer.in_progress)
+ result->status = DMA_XFER_IN_PROGRESS;
+ else if (prot->dmaxfer.status == 0)
+ result->status = DMA_XFER_SUCCESS;
+ else
+ result->status = DMA_XFER_FAILED;
+
+ result->type = prot->dmaxfer.d11_lpbk;
+ result->error_code = prot->dmaxfer.status;
+ result->num_bytes = prot->dmaxfer.len;
+ result->time_taken = prot->dmaxfer.time_taken;
+ if (prot->dmaxfer.time_taken) {
+ /* throughput in kBps */
+ result->tput =
+ (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
+ (uint32)prot->dmaxfer.time_taken;
+ }
+
+ return BCME_OK;
+}
+
+/** Called in the process of submitting an ioctl to the dongle */
+static int
+dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ int ret = 0;
+ uint copylen = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!len || !*(uint8 *)buf) {
+ DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
+ ret = BCME_BADARG;
+ goto done;
+ }
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ copylen = MIN(len, BCME_STRLEN);
+
+ if ((len >= strlen("bcmerrorstr")) &&
+ (!strcmp((char *)buf, "bcmerrorstr"))) {
+ strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
+ goto done;
+ } else if ((len >= strlen("bcmerror")) &&
+ !strcmp((char *)buf, "bcmerror")) {
+ *(uint32 *)(uint32 *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
+ action, ifidx, cmd, len));
+#ifdef REPORT_FATAL_TIMEOUTS
+ /*
+ * These timers "should" be started before sending H2D interrupt.
+ * Think of the scenario where H2D interrupt is fired and the Dongle
+ * responds back immediately. From the DPC we would stop the cmd, bus
+ * timers. But the process context could have switched out leading to
+ * a situation where the timers are Not started yet, but are actually stopped.
+ *
+ * Disable preemption from the time we start the timer until we are done
+ * with seding H2D interrupts.
+ */
+ OSL_DISABLE_PREEMPTION(dhd->osh);
+ dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
+ dhd_start_cmd_timer(dhd);
+ dhd_start_bus_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* For some reason if we fail to ring door bell, stop the timers */
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ dhd_stop_cmd_timer(dhd);
+ dhd_stop_bus_timer(dhd);
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+ goto done;
+ }
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+#else
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ goto done;
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ /* wait for IOCTL completion message from dongle and get first fragment */
+ ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
+
+done:
+ return ret;
+}
+
+void
+dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
+{
+ uint32 intstatus;
+ dhd_prot_t *prot = dhd->prot;
+ dhd->rxcnt_timeout++;
+ dhd->rx_ctlerrs++;
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
+ dhd->is_sched_error ? " due to scheduling problem" : "",
+ dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
+ prot->ioctl_state, dhd->busstate, prot->ioctl_received));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ /* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
+ * due to tasklet or workqueue scheduling problems in the Linux Kernel.
+ * Customer informs that it is hard to find any clue from the
+ * host memory dump since the important tasklet or workqueue information
+ * is already disappered due the latency while printing out the timestamp
+ * logs for debugging scan timeout issue.
+ * For this reason, customer requestes us to trigger Kernel Panic rather than
+ * taking a SOCRAM dump.
+ */
+ if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+
+ if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
+ prot->curr_ioctl_cmd == WLC_GET_VAR) {
+ char iovbuf[32];
+ int dump_size = 128;
+ uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
+ iovbuf[sizeof(iovbuf) - 1] = '\0';
+ DHD_ERROR(("Current IOVAR (%s): %s\n",
+ prot->curr_ioctl_cmd == WLC_SET_VAR ?
+ "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
+ DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
+ prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size);
+ DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
+ }
+
+ /* Check the PCIe link status by reading intstatus register */
+ intstatus = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
+
+ dhd_bus_dump_console_buffer(dhd->bus);
+ dhd_prot_debug_info_print(dhd);
+}
+
+/**
+ * Waits for IOCTL completion message from the dongle, copies this into caller
+ * provided parameter 'buf'.
+ */
+static int
+dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int timeleft;
+ unsigned long flags;
+ int ret = 0;
+ static uint cnt = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhd_query_bus_erros(dhd)) {
+ ret = -EIO;
+ goto out;
+ }
+#ifdef GDB_PROXY
+ /* Loop while timeout is caused by firmware stop in GDB */
+ {
+ uint32 prev_stop_count;
+ do {
+ prev_stop_count = dhd->gdb_proxy_stop_count;
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+ } while ((timeleft == 0) && ((dhd->gdb_proxy_stop_count != prev_stop_count) ||
+ (dhd->gdb_proxy_stop_count & GDB_PROXY_STOP_MASK)));
+ }
+#else
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+#endif /* GDB_PROXY */
+
+#ifdef DHD_RECOVER_TIMEOUT
+ if (prot->ioctl_received == 0) {
+ uint32 intstatus = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
+ int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
+ if ((intstatus) && (intstatus != (uint32)-1) &&
+ (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
+ DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
+ " host_irq_disabled=%d\n",
+ __FUNCTION__, intstatus, host_irq_disbled));
+ dhd_pcie_intr_count_dump(dhd);
+ dhd_print_tasklet_status(dhd);
+ dhd_prot_process_ctrlbuf(dhd);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+ /* Clear Interrupts */
+ dhdpcie_bus_clear_intstatus(dhd->bus);
+ }
+ }
+#endif /* DHD_RECOVER_TIMEOUT */
+
+ if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+ cnt++;
+ if (cnt <= dhd->conf->ctrl_resched) {
+ uint buscorerev = dhd->bus->sih->buscorerev;
+ uint32 intstatus = 0, intmask = 0;
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
+ if (intstatus) {
+ DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
+ __FUNCTION__, cnt, intstatus, intmask));
+ dhd->bus->intstatus = intstatus;
+ dhd->bus->ipend = TRUE;
+ dhd->bus->dpc_sched = TRUE;
+ dhd_sched_dpc(dhd);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
+ }
+ }
+ } else {
+ cnt = 0;
+ }
+
+ if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+ if (dhd->check_trap_rot) {
+ /* check dongle trap first */
+ DHD_ERROR(("Check dongle trap in the case of iovar timeout\n"));
+ dhd_bus_checkdied(dhd->bus, NULL, 0);
+
+ if (dhd->dongle_trap_occured) {
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ ret = -EREMOTEIO;
+ goto out;
+ }
+ }
+ /* check if resumed on time out related to scheduling issue */
+ dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
+
+ dhd->iovar_timeout_occured = TRUE;
+ dhd_msgbuf_iovar_timeout_dump(dhd);
+
+#ifdef DHD_FW_COREDUMP
+ /* Collect socram dump */
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef DHD_EFI
+ /*
+ * for ioctl timeout, recovery is triggered only for EFI case, because
+ * in linux, dhd daemon will itself trap the FW,
+ * so if recovery is triggered
+ * then there is a race between FLR and daemon initiated trap
+ */
+ dhd_schedule_reset(dhd);
+#endif /* DHD_EFI */
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ ret = -ETIMEDOUT;
+ goto out;
+ } else {
+ if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
+ DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
+ __FUNCTION__, prot->ioctl_received));
+ ret = -EINVAL;
+ goto out;
+ }
+ dhd->rxcnt_timeout = 0;
+ dhd->rx_ctlpkts++;
+ DHD_CTL(("%s: ioctl resp resumed, got %d\n",
+ __FUNCTION__, prot->ioctl_resplen));
+ }
+
+ if (dhd->prot->ioctl_resplen > len)
+ dhd->prot->ioctl_resplen = (uint16)len;
+ if (buf)
+ bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
+
+ ret = (int)(dhd->prot->ioctl_status);
+
+out:
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->prot->ioctl_state = 0;
+ dhd->prot->ioctl_resplen = 0;
+ dhd->prot->ioctl_received = IOCTL_WAIT;
+ dhd->prot->curr_ioctl_cmd = 0;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return ret;
+} /* dhd_msgbuf_wait_ioctl_cmplt */
+
+static int
+dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ int ret = 0;
+
+ DHD_TRACE(("%s: Enter \n", __FUNCTION__));
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
+ action, ifidx, cmd, len));
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ /*
+ * These timers "should" be started before sending H2D interrupt.
+ * Think of the scenario where H2D interrupt is fired and the Dongle
+ * responds back immediately. From the DPC we would stop the cmd, bus
+ * timers. But the process context could have switched out leading to
+ * a situation where the timers are Not started yet, but are actually stopped.
+ *
+ * Disable preemption from the time we start the timer until we are done
+ * with seding H2D interrupts.
+ */
+ OSL_DISABLE_PREEMPTION(dhd->osh);
+ dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
+ dhd_start_cmd_timer(dhd);
+ dhd_start_bus_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ /* Fill up msgbuf for ioctl req */
+ ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* For some reason if we fail to ring door bell, stop the timers */
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ dhd_stop_cmd_timer(dhd);
+ dhd_stop_bus_timer(dhd);
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+ goto done;
+ }
+
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+#else
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ goto done;
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
+
+done:
+ return ret;
+}
+
+/** Called by upper DHD layer. Handles a protocol control response asynchronously. */
+int dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+ return 0;
+}
+
+/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
+int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+#ifdef DHD_DUMP_PCIE_RINGS
+int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write)
+{
+ dhd_prot_t *prot;
+ msgbuf_ring_t *ring;
+ int ret = 0;
+ uint16 h2d_flowrings_total;
+ uint16 flowid;
+
+ if (!(dhd) || !(dhd->prot)) {
+ goto exit;
+ }
+ prot = dhd->prot;
+
+ /* Below is the same ring dump sequence followed in parser as well. */
+ ring = &prot->h2dring_ctrl_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->h2dring_rxp_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_ctrl_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_tx_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_rx_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
+ goto exit;
+ }
+ }
+
+#ifdef EWP_EDL
+ if (dhd->dongle_edl_support) {
+ ring = prot->d2hring_edl;
+ if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+ else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+#else
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+#endif /* EWP_EDL */
+ {
+ ring = prot->h2dring_info_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = prot->d2hring_info_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+
+exit :
+ return ret;
+}
+
+/* Write to file */
+static
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn)
+{
+ int ret = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
+ ((unsigned long)(ring->max_items) * (ring->item_len)));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+ } else if (user_buf) {
+ ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
+ ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
+ }
+ return ret;
+}
+
+#ifdef EWP_EDL
+/* Write to file */
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn)
+{
+ int ret = 0, nitems = 0;
+ char *buf = NULL, *ptr = NULL;
+ uint8 *msg_addr = NULL;
+ uint16 rd = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ if (buf == NULL) {
+ DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+ ptr = buf;
+
+ for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
+ msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
+ memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
+ ptr += D2HRING_EDL_HDR_SIZE;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ goto done;
+ }
+ }
+ else {
+ ret = dhd_export_debug_data(buf, NULL, user_buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
+ }
+
+done:
+ if (buf) {
+ MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ }
+ return ret;
+}
+#endif /* EWP_EDL */
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+/** Add prot dump output to a buffer */
+void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+#if defined(BCM_ROUTER_DHD)
+ bcm_bprintf(b, "DHD Router: 1GMAC HotBRC forwarding mode\n");
+#endif /* BCM_ROUTER_DHD */
+
+ if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
+ bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
+ else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
+ bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
+ else
+ bcm_bprintf(b, "\nd2h_sync: NONE:");
+ bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
+ dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
+
+ bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
+ dhd->dma_h2d_ring_upd_support,
+ dhd->dma_d2h_ring_upd_support,
+ dhd->prot->rw_index_sz);
+ bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+ h2d_max_txpost, dhd->prot->h2d_max_txpost);
+#if defined(DHD_HTPUT_TUNABLES)
+ bcm_bprintf(b, "h2d_htput_max_txpost: %d, prot->h2d_htput_max_txpost: %d\n",
+ h2d_htput_max_txpost, dhd->prot->h2d_htput_max_txpost);
+#endif /* DHD_HTPUT_TUNABLES */
+ bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
+ bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
+ bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
+ bcm_bprintf(b, "txcpl_db_cnt: %d\n", dhd->prot->txcpl_db_cnt);
+#ifdef DHD_DMA_INDICES_SEQNUM
+ bcm_bprintf(b, "host_seqnum %u dngl_seqnum %u\n", dhd_prot_read_seqnum(dhd, TRUE),
+ dhd_prot_read_seqnum(dhd, FALSE));
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ bcm_bprintf(b, "tx_h2d_db_cnt:%llu\n", dhd->prot->tx_h2d_db_cnt);
+#ifdef AGG_H2D_DB
+ bcm_bprintf(b, "agg_h2d_db_enab:%d agg_h2d_db_timeout:%d agg_h2d_db_inflight_thresh:%d\n",
+ agg_h2d_db_enab, agg_h2d_db_timeout, agg_h2d_db_inflight_thresh);
+ bcm_bprintf(b, "agg_h2d_db: timer_db_cnt:%d direct_db_cnt:%d\n",
+ dhd->prot->agg_h2d_db_info.timer_db_cnt, dhd->prot->agg_h2d_db_info.direct_db_cnt);
+ dhd_agg_inflight_stats_dump(dhd, b);
+#endif /* AGG_H2D_DB */
+}
+
+/* Update local copy of dongle statistics */
+void dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ return;
+}
+
+/** Called by upper DHD layer */
+int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+ uint reorder_info_len, void **pkt, uint32 *free_buf_count)
+{
+ return 0;
+}
+
+/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
+int
+dhd_post_dummy_msg(dhd_pub_t *dhd)
+{
+ unsigned long flags;
+ hostevent_hdr_t *hevent = NULL;
+ uint16 alloced = 0;
+
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ hevent = (hostevent_hdr_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (hevent == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return -1;
+ }
+
+ /* CMN msg header */
+ hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+ hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
+ hevent->msg.if_id = 0;
+ hevent->msg.flags = ring->current_phase;
+
+ /* Event payload */
+ hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
+
+ /* Since, we are filling the data directly into the bufptr obtained
+ * from the msgbuf, we can directly call the write_complete
+ */
+ dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return 0;
+}
+
+/**
+ * If exactly_nitems is true, this function will allocate space for nitems or fail
+ * If exactly_nitems is false, this function will allocate space for nitems or less
+ */
+static void *
+BCMFASTPATH(dhd_prot_alloc_ring_space)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint16 nitems, uint16 * alloced, bool exactly_nitems)
+{
+ void * ret_buf;
+
+ if (nitems == 0) {
+ DHD_ERROR(("%s: nitems is 0 - ring(%s)\n", __FUNCTION__, ring->name));
+ return NULL;
+ }
+
+ /* Alloc space for nitems in the ring */
+ ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
+
+ if (ret_buf == NULL) {
+ /* if alloc failed , invalidate cached read ptr */
+ if (dhd->dma_d2h_ring_upd_support) {
+ ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ /* Check if ring->rd is valid */
+ if (ring->rd >= ring->max_items) {
+ DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
+ dhd->bus->read_shm_fail = TRUE;
+ return NULL;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ }
+
+ /* Try allocating once more */
+ ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
+
+ if (ret_buf == NULL) {
+ DHD_INFO(("%s: Ring space not available \n", ring->name));
+ return NULL;
+ }
+ }
+
+ if (ret_buf == HOST_RING_BASE(ring)) {
+ DHD_MSGBUF_INFO(("%s: setting the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+
+ /* Return alloced space */
+ return ret_buf;
+}
+
+/**
+ * Non inline ioct request.
+ * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
+ * Form a separate request buffer where a 4 byte cmn header is added in the front
+ * buf contents from parent function is copied to remaining section of this buffer
+ */
+static int
+dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
+{
+ dhd_prot_t *prot = dhd->prot;
+ ioctl_req_msg_t *ioct_rqst;
+ void * ioct_buf; /* For ioctl payload */
+ uint16 rqstlen, resplen;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+#ifdef DBG_DW_CHK_PCIE_READ_LATENCY
+ ulong addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ktime_t begin_time, end_time;
+ s64 diff_ns;
+#endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
+
+ if (dhd_query_bus_erros(dhd)) {
+ return -EIO;
+ }
+
+ rqstlen = len;
+ resplen = len;
+
+ /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
+ /* 8K allocation of dongle buffer fails */
+ /* dhd doesnt give separate input & output buf lens */
+ /* so making the assumption that input length can never be more than 2k */
+ rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+
+#ifdef DBG_DW_CHK_PCIE_READ_LATENCY
+ preempt_disable();
+ begin_time = ktime_get();
+ R_REG(dhd->osh, (volatile uint16 *)(dhd->bus->tcm + addr));
+ end_time = ktime_get();
+ preempt_enable();
+ diff_ns = ktime_to_ns(ktime_sub(end_time, begin_time));
+ /* Check if the delta is greater than 1 msec */
+ if (diff_ns > (1 * NSEC_PER_MSEC)) {
+ DHD_ERROR(("%s: found latency over 1ms (%lld ns), ds state=%d\n", __func__,
+ diff_ns, dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus)));
+ }
+#endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ if (prot->ioctl_state) {
+ DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_BUSY;
+ } else {
+ prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
+ }
+
+ /* Request for cbuf space */
+ ioct_rqst = (ioctl_req_msg_t*)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+ if (ioct_rqst == NULL) {
+ DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+ prot->ioctl_state = 0;
+ prot->curr_ioctl_cmd = 0;
+ prot->ioctl_received = IOCTL_WAIT;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return -1;
+ }
+
+ /* Common msg buf hdr */
+ ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
+ ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
+ ioct_rqst->cmn_hdr.flags = ring->current_phase;
+ ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
+ ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+ ioct_rqst->cmd = htol32(cmd);
+ prot->curr_ioctl_cmd = cmd;
+ ioct_rqst->output_buf_len = htol16(resplen);
+ prot->ioctl_trans_id++;
+ ioct_rqst->trans_id = prot->ioctl_trans_id;
+
+ /* populate ioctl buffer info */
+ ioct_rqst->input_buf_len = htol16(rqstlen);
+ ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
+ ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
+ /* copy ioct payload */
+ ioct_buf = (void *) prot->ioctbuf.va;
+
+ prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
+
+ if (buf)
+ memcpy(ioct_buf, buf, len);
+
+ OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
+
+ if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
+ DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+
+ DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+ ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
+ ioct_rqst->trans_id));
+
+#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+ dhd_prot_ioctl_trace(dhd, ioct_rqst, buf, len);
+#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return 0;
+} /* dhd_fillup_ioct_reqst */
+
+/**
+ * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
+ * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
+ * information is posted to the dongle.
+ *
+ * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
+ * each flowring in pool of flowrings.
+ *
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
+ uint16 max_items, uint16 item_len, uint16 ringid)
+{
+ int dma_buf_alloced = BCME_NOMEM;
+ uint32 dma_buf_len;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ dhd_dma_buf_t *dma_buf = NULL;
+
+ ASSERT(ring);
+ ASSERT(name);
+ ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
+
+ /* Init name */
+ strlcpy((char *)ring->name, name, sizeof(ring->name));
+
+ ring->idx = ringid;
+
+#if defined(DHD_HTPUT_TUNABLES)
+ /* Use HTPUT max items */
+ if (DHD_IS_FLOWRING(ringid, max_flowrings) &&
+ DHD_IS_FLOWID_HTPUT(dhd, DHD_RINGID_TO_FLOWID(ringid))) {
+ max_items = prot->h2d_htput_max_txpost;
+ }
+#endif /* DHD_HTPUT_TUNABLES */
+
+ dma_buf_len = max_items * item_len;
+
+ ring->max_items = max_items;
+ ring->item_len = item_len;
+
+ /* A contiguous space may be reserved for all flowrings */
+ if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
+ /* Carve out from the contiguous DMA-able flowring buffer */
+ uint16 flowid;
+ uint32 base_offset;
+ dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
+
+ dma_buf = &ring->dma_buf;
+
+ flowid = DHD_RINGID_TO_FLOWID(ringid);
+ base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
+
+ ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
+
+ dma_buf->len = dma_buf_len;
+ dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
+ PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
+ PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
+
+ /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
+ ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
+
+ dma_buf->dmah = rsv_buf->dmah;
+ dma_buf->secdma = rsv_buf->secdma;
+
+ (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+ } else {
+#ifdef EWP_EDL
+ if (ring == dhd->prot->d2hring_edl) {
+ /* For EDL ring, memory is alloced during attach,
+ * so just need to copy the dma_buf to the ring's dma_buf
+ */
+ memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
+ dma_buf = &ring->dma_buf;
+ if (dma_buf->va == NULL) {
+ return BCME_NOMEM;
+ }
+ } else
+#endif /* EWP_EDL */
+ {
+ /* Allocate a dhd_dma_buf */
+ dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
+ if (dma_buf_alloced != BCME_OK) {
+ return BCME_NOMEM;
+ }
+ }
+ }
+
+ /* CAUTION: Save ring::base_addr in little endian format! */
+ dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
+
+ ring->ring_lock = osl_spin_lock_init(dhd->osh);
+
+ DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
+ "ring start %p buf phys addr %x:%x \n",
+ ring->name, ring->max_items, ring->item_len,
+ dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr)));
+
+ return BCME_OK;
+} /* dhd_prot_ring_attach */
+
+/**
+ * dhd_prot_ring_init - Post the common ring information to dongle.
+ *
+ * Used only for common rings.
+ *
+ * The flowrings information is passed via the create flowring control message
+ * (tx_flowring_create_request_t) sent over the H2D control submission common
+ * ring.
+ */
+static void
+dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ ring->wr = 0;
+ ring->rd = 0;
+ ring->curr_rd = 0;
+
+ /* CAUTION: ring::base_addr already in Little Endian */
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
+ sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
+ sizeof(uint16), RING_MAX_ITEMS, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
+ sizeof(uint16), RING_ITEM_LEN, ring->idx);
+
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+ sizeof(uint16), RING_RD_UPD, ring->idx);
+
+ /* ring inited */
+ ring->inited = TRUE;
+
+} /* dhd_prot_ring_init */
+
+/**
+ * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
+ * Reset WR and RD indices to 0.
+ */
+static void
+dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ dhd_dma_buf_reset(dhd, &ring->dma_buf);
+
+ ring->rd = ring->wr = 0;
+ ring->curr_rd = 0;
+ ring->inited = FALSE;
+ ring->create_pending = FALSE;
+}
+
+/**
+ * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
+ * hanging off the msgbuf_ring.
+ */
+static void
+dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ ASSERT(ring);
+
+ ring->inited = FALSE;
+ /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
+
+ /* If the DMA-able buffer was carved out of a pre-reserved contiguous
+ * memory, then simply stop using it.
+ */
+ if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
+ (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+ memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
+ } else {
+#ifdef EWP_EDL
+ if (ring == dhd->prot->d2hring_edl) {
+ /* For EDL ring, do not free ring mem here,
+ * it is done in dhd_detach
+ */
+ memset(&ring->dma_buf, 0, sizeof(ring->dma_buf));
+ } else
+#endif /* EWP_EDL */
+ {
+ dhd_dma_buf_free(dhd, &ring->dma_buf);
+ }
+ }
+
+ osl_spin_lock_deinit(dhd->osh, ring->ring_lock);
+
+} /* dhd_prot_ring_detach */
+
+/* Fetch number of H2D flowrings given the total number of h2d rings */
+uint16
+dhd_get_max_flow_rings(dhd_pub_t *dhd)
+{
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+ return dhd->bus->max_tx_flowrings;
+ else
+ return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
+}
+
+/**
+ * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
+ *
+ * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
+ * Dongle includes common rings when it advertizes the number of H2D rings.
+ * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
+ * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
+ *
+ * dhd_prot_ring_attach is invoked to perform the actual initialization and
+ * attaching the DMA-able buffer.
+ *
+ * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
+ * initialized msgbuf_ring_t object.
+ *
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
+{
+ uint16 flowid;
+ msgbuf_ring_t *ring;
+ uint16 h2d_flowrings_total; /* exclude H2D common rings */
+ dhd_prot_t *prot = dhd->prot;
+ char ring_name[RING_NAME_MAX_LENGTH];
+
+ if (prot->h2d_flowrings_pool != NULL)
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+
+ ASSERT(prot->h2d_rings_total == 0);
+
+ /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
+ prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
+
+ if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
+ DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
+ __FUNCTION__, prot->h2d_rings_total));
+ return BCME_ERROR;
+ }
+
+ /* Subtract number of H2D common rings, to determine number of flowrings */
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+
+ DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
+
+ /* Allocate pool of msgbuf_ring_t objects for all flowrings */
+ prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
+ (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
+
+ if (prot->h2d_flowrings_pool == NULL) {
+ DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
+ __FUNCTION__, h2d_flowrings_total));
+ goto fail;
+ }
+
+ /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
+ /* For HTPUT case max_items will be changed inside dhd_prot_ring_attach */
+ if (dhd_prot_ring_attach(dhd, ring, ring_name,
+ prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
+ DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
+ goto attach_fail;
+ }
+ }
+
+ return BCME_OK;
+
+attach_fail:
+ /* XXX: On a per project basis, one may decide whether to continue with
+ * "fewer" flowrings, and what value of fewer suffices.
+ */
+ dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
+
+fail:
+ prot->h2d_rings_total = 0;
+ return BCME_NOMEM;
+
+} /* dhd_prot_flowrings_pool_attach */
+
+/**
+ * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
+ * Invokes dhd_prot_ring_reset to perform the actual reset.
+ *
+ * The DMA-able buffer is not freed during reset and neither is the flowring
+ * pool freed.
+ *
+ * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
+ * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
+ * from a previous flowring pool instantiation will be reused.
+ *
+ * This will avoid a fragmented DMA-able memory condition, if multiple
+ * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
+ * cycle.
+ */
+static void
+dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
+{
+ uint16 flowid, h2d_flowrings_total;
+ msgbuf_ring_t *ring;
+ dhd_prot_t *prot = dhd->prot;
+
+ if (prot->h2d_flowrings_pool == NULL) {
+ ASSERT(prot->h2d_rings_total == 0);
+ return;
+ }
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+ /* Reset each flowring in the flowring pool */
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ dhd_prot_ring_reset(dhd, ring);
+ ring->inited = FALSE;
+ }
+
+ /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
+}
+
+/**
+ * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
+ * DMA-able buffers for flowrings.
+ * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
+ * de-initialization of each msgbuf_ring_t.
+ */
+static void
+dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
+{
+ int flowid;
+ msgbuf_ring_t *ring;
+ uint16 h2d_flowrings_total; /* exclude H2D common rings */
+ dhd_prot_t *prot = dhd->prot;
+
+ if (prot->h2d_flowrings_pool == NULL) {
+ ASSERT(prot->h2d_rings_total == 0);
+ return;
+ }
+
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+ /* Detach the DMA-able buffer for each flowring in the flowring pool */
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ dhd_prot_ring_detach(dhd, ring);
+ }
+
+ MFREE(prot->osh, prot->h2d_flowrings_pool,
+ (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
+
+ prot->h2d_rings_total = 0;
+
+} /* dhd_prot_flowrings_pool_detach */
+
+/**
+ * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
+ * msgbuf_ring from the flowring pool, and assign it.
+ *
+ * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
+ * ring information to the dongle, a flowring's information is passed via a
+ * flowring create control message.
+ *
+ * Only the ring state (WR, RD) index are initialized.
+ */
+static msgbuf_ring_t *
+dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
+{
+ msgbuf_ring_t *ring;
+ dhd_prot_t *prot = dhd->prot;
+
+ ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
+ ASSERT(flowid < prot->h2d_rings_total);
+ ASSERT(prot->h2d_flowrings_pool != NULL);
+
+ ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+
+ /* ASSERT flow_ring->inited == FALSE */
+
+ ring->wr = 0;
+ ring->rd = 0;
+ ring->curr_rd = 0;
+ ring->inited = TRUE;
+ /**
+ * Every time a flowring starts dynamically, initialize current_phase with 0
+ * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
+ */
+ ring->current_phase = 0;
+ return ring;
+}
+
+/**
+ * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
+ * msgbuf_ring back to the flow_ring pool.
+ */
+void
+dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
+{
+ msgbuf_ring_t *ring;
+ dhd_prot_t *prot = dhd->prot;
+
+ ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
+ ASSERT(flowid < prot->h2d_rings_total);
+ ASSERT(prot->h2d_flowrings_pool != NULL);
+
+ ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+
+ ASSERT(ring == (msgbuf_ring_t*)flow_ring);
+ /* ASSERT flow_ring->inited == TRUE */
+
+ (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+
+ ring->wr = 0;
+ ring->rd = 0;
+ ring->inited = FALSE;
+
+ ring->curr_rd = 0;
+}
+
+#ifdef AGG_H2D_DB
+void
+dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flowid)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring;
+ uint16 inflight;
+ bool db_req = FALSE;
+ bool flush;
+
+ ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+ flush = !!ring->pend_items_count;
+ dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
+
+ inflight = OSL_ATOMIC_READ(dhd->osh, &ring->inflight);
+ if (flush && inflight) {
+ if (inflight <= agg_h2d_db_inflight_thresh) {
+ db_req = TRUE;
+ }
+ dhd_agg_inflights_stats_update(dhd, inflight);
+ dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, db_req);
+ }
+}
+#endif /* AGG_H2D_DB */
+
+/* Assumes only one index is updated at a time */
+/* FIXME Need to fix it */
+/* If exactly_nitems is true, this function will allocate space for nitems or fail */
+/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
+/* If exactly_nitems is false, this function will allocate space for nitems or less */
+static void *
+BCMFASTPATH(dhd_prot_get_ring_space)(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
+ bool exactly_nitems)
+{
+ void *ret_ptr = NULL;
+ uint16 ring_avail_cnt;
+
+ ASSERT(nitems <= ring->max_items);
+
+ ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
+
+ if ((ring_avail_cnt == 0) ||
+ (exactly_nitems && (ring_avail_cnt < nitems) &&
+ ((ring->max_items - ring->wr) >= nitems))) {
+ DHD_MSGBUF_INFO(("Space not available: ring %s items %d write %d read %d\n",
+ ring->name, nitems, ring->wr, ring->rd));
+ return NULL;
+ }
+ *alloced = MIN(nitems, ring_avail_cnt);
+
+ /* Return next available space */
+ ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
+
+ /* Update write index */
+ if ((ring->wr + *alloced) == ring->max_items)
+ ring->wr = 0;
+ else if ((ring->wr + *alloced) < ring->max_items)
+ ring->wr += *alloced;
+ else {
+ /* Should never hit this */
+ ASSERT(0);
+ return NULL;
+ }
+
+ return ret_ptr;
+} /* dhd_prot_get_ring_space */
+
+#ifdef AGG_H2D_DB
+
+static void
+dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+ uint16 nitems)
+{
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ unsigned long flags_bus;
+
+#ifdef DHD_FAKE_TX_STATUS
+ /* if fake tx status is enabled, we should not update
+ * dongle side rd/wr index for the tx flowring
+ * and also should not ring the doorbell
+ */
+ if (DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
+ return;
+ }
+#endif /* DHD_FAKE_TX_STATUS */
+
+ DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+
+ /* cache flush */
+ OSL_CACHE_FLUSH(p, ring->item_len * nitems);
+
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_DMA_INDX_WR_UPD, ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_IFRM_INDX_WR_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
+ }
+
+ DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+}
+
+static void
+dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db)
+{
+ dhd_prot_t *prot = dhd->prot;
+ flow_ring_table_t *flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+ uint32 db_index;
+ uint corerev;
+
+ if (ring_db == TRUE) {
+ dhd_msgbuf_agg_h2d_db_timer_cancel(dhd);
+ prot->agg_h2d_db_info.direct_db_cnt++;
+ /* raise h2d interrupt */
+ if (IDMA_ACTIVE(dhd) || (IFRM_ACTIVE(dhd))) {
+ db_index = IDMA_IDX0;
+ /* this api is called in wl down path..in that case sih is freed already */
+ if (dhd->bus->sih) {
+ corerev = dhd->bus->sih->buscorerev;
+ /* We need to explictly configure the type of DMA for
+ * core rev >= 24
+ */
+ if (corerev >= 24) {
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
+ }
+ }
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ } else {
+ prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
+ }
+ } else {
+ dhd_msgbuf_agg_h2d_db_timer_start(prot);
+ }
+}
+
+#endif /* AGG_H2D_DB */
+
+/**
+ * dhd_prot_ring_write_complete - Host updates the new WR index on producing
+ * new messages in a H2D ring. The messages are flushed from cache prior to
+ * posting the new WR index. The new WR index will be updated in the DMA index
+ * array or directly in the dongle's ring state memory.
+ * A PCIE doorbell will be generated to wake up the dongle.
+ * This is a non-atomic function, make sure the callers
+ * always hold appropriate locks.
+ */
+static void
+BCMFASTPATH(__dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+ uint16 nitems)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint32 db_index;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ uint corerev;
+
+ /* cache flush */
+ OSL_CACHE_FLUSH(p, ring->item_len * nitems);
+
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_DMA_INDX_WR_UPD, ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_IFRM_INDX_WR_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
+ }
+
+ /* raise h2d interrupt */
+ if (IDMA_ACTIVE(dhd) ||
+ (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
+ db_index = IDMA_IDX0;
+ /* this api is called in wl down path..in that case sih is freed already */
+ if (dhd->bus->sih) {
+ corerev = dhd->bus->sih->buscorerev;
+ /* We need to explictly configure the type of DMA for core rev >= 24 */
+ if (corerev >= 24) {
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
+ }
+ }
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ } else {
+ prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
+ }
+}
+
+static void
+BCMFASTPATH(dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+ uint16 nitems)
+{
+ unsigned long flags_bus;
+ DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+ __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
+ DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+}
+
+static void
+BCMFASTPATH(dhd_prot_ring_doorbell)(dhd_pub_t *dhd, uint32 value)
+{
+ unsigned long flags_bus;
+ DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+ dhd->prot->mb_ring_fn(dhd->bus, value);
+ DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+}
+
+/**
+ * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
+ * which will hold DHD_BUS_LP_STATE_LOCK to update WR pointer, Ring DB and also update
+ * bus_low_power_state to indicate D3_INFORM sent in the same BUS_LP_STATE_LOCK.
+ */
+static void
+BCMFASTPATH(dhd_prot_ring_write_complete_mbdata)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
+ uint16 nitems, uint32 mb_data)
+{
+ unsigned long flags_bus;
+
+ DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+
+ __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
+
+ /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
+ if (mb_data == H2D_HOST_D3_INFORM) {
+ __DHD_SET_BUS_LPS_D3_INFORMED(dhd->bus);
+ }
+
+ DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
+}
+
+/**
+ * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
+ * from a D2H ring. The new RD index will be updated in the DMA Index array or
+ * directly in dongle's ring state memory.
+ */
+static void
+dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint32 db_index;
+ uint corerev;
+
+ /* update read index */
+ /* If dma'ing h2d indices supported
+ * update the r -indices in the
+ * host memory o/w in TCM
+ */
+ if (IDMA_ACTIVE(dhd)) {
+ dhd_prot_dma_indx_set(dhd, ring->rd,
+ D2H_DMA_INDX_RD_UPD, ring->idx);
+ db_index = IDMA_IDX1;
+ if (dhd->bus->sih) {
+ corerev = dhd->bus->sih->buscorerev;
+ /* We need to explictly configure the type of DMA for core rev >= 24 */
+ if (corerev >= 24) {
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
+ }
+ }
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
+ } else if (dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, ring->rd,
+ D2H_DMA_INDX_RD_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+ sizeof(uint16), RING_RD_UPD, ring->idx);
+ }
+}
+
+static int
+dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
+ uint16 ring_type, uint32 req_id)
+{
+ unsigned long flags;
+ d2h_ring_create_req_t *d2h_ring;
+ uint16 alloced = 0;
+ int ret = BCME_OK;
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
+
+ if (ring_to_create == NULL) {
+ DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto err;
+ }
+
+ /* Request for ring buffer space */
+ d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+
+ if (d2h_ring == NULL) {
+ DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
+ __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto err;
+ }
+ ring_to_create->create_req_id = (uint16)req_id;
+ ring_to_create->create_pending = TRUE;
+
+ /* Common msg buf hdr */
+ d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
+ d2h_ring->msg.if_id = 0;
+ d2h_ring->msg.flags = ctrl_ring->current_phase;
+ d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
+ d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
+ DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
+ ring_to_create->idx, max_h2d_rings));
+
+ d2h_ring->ring_type = ring_type;
+ d2h_ring->max_items = htol16(ring_to_create->max_items);
+ d2h_ring->len_item = htol16(ring_to_create->item_len);
+ d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
+ d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
+
+ d2h_ring->flags = 0;
+ d2h_ring->msg.epoch =
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+#ifdef EWP_EDL
+ if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
+ DHD_ERROR(("%s: sending d2h EDL ring create: "
+ "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
+ __FUNCTION__, ltoh16(d2h_ring->max_items),
+ ltoh16(d2h_ring->len_item),
+ ltoh16(d2h_ring->ring_id),
+ d2h_ring->ring_ptr.low_addr,
+ d2h_ring->ring_ptr.high_addr));
+ }
+#endif /* EWP_EDL */
+
+ /* Update the flow_ring's WRITE index */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return ret;
+err:
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return ret;
+}
+
+static int
+dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
+{
+ unsigned long flags;
+ h2d_ring_create_req_t *h2d_ring;
+ uint16 alloced = 0;
+ uint8 i = 0;
+ int ret = BCME_OK;
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
+
+ if (ring_to_create == NULL) {
+ DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto err;
+ }
+
+ /* Request for ring buffer space */
+ h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+
+ if (h2d_ring == NULL) {
+ DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
+ __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto err;
+ }
+ ring_to_create->create_req_id = (uint16)id;
+ ring_to_create->create_pending = TRUE;
+
+ /* Common msg buf hdr */
+ h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
+ h2d_ring->msg.if_id = 0;
+ h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
+ h2d_ring->msg.flags = ctrl_ring->current_phase;
+ h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
+ h2d_ring->ring_type = ring_type;
+ h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
+ h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
+ h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
+ h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
+ h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
+
+ for (i = 0; i < ring_to_create->n_completion_ids; i++) {
+ h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
+ }
+
+ h2d_ring->flags = 0;
+ h2d_ring->msg.epoch =
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ /* Update the flow_ring's WRITE index */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return ret;
+err:
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return ret;
+}
+
+/**
+ * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
+ * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
+ * See dhd_prot_dma_indx_init()
+ */
+void
+dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
+{
+ uint8 *ptr;
+ uint16 offset;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+
+ switch (type) {
+ case H2D_DMA_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
+ offset = DHD_H2D_RING_OFFSET(ringid);
+ break;
+
+ case D2H_DMA_INDX_RD_UPD:
+ ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+ break;
+
+ case H2D_IFRM_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
+ offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
+ break;
+
+ default:
+ DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+ __FUNCTION__));
+ return;
+ }
+
+ ASSERT(prot->rw_index_sz != 0);
+ ptr += offset * prot->rw_index_sz;
+
+ /* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
+ *(uint16*)ptr = htol16(new_index);
+
+ OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
+
+ DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
+ __FUNCTION__, new_index, type, ringid, ptr, offset));
+
+} /* dhd_prot_dma_indx_set */
+
+/**
+ * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
+ * array.
+ * Dongle DMAes an entire array to host memory (if the feature is enabled).
+ * See dhd_prot_dma_indx_init()
+ */
+static uint16
+dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
+{
+ uint8 *ptr;
+ uint16 data;
+ uint16 offset;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+
+ switch (type) {
+ case H2D_DMA_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
+ offset = DHD_H2D_RING_OFFSET(ringid);
+ break;
+
+ case H2D_DMA_INDX_RD_UPD:
+#ifdef DHD_DMA_INDICES_SEQNUM
+ if (prot->h2d_dma_indx_rd_copy_buf) {
+ ptr = (uint8 *)(prot->h2d_dma_indx_rd_copy_buf);
+ } else
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ {
+ ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
+ }
+ offset = DHD_H2D_RING_OFFSET(ringid);
+ break;
+
+ case D2H_DMA_INDX_WR_UPD:
+#ifdef DHD_DMA_INDICES_SEQNUM
+ if (prot->d2h_dma_indx_wr_copy_buf) {
+ ptr = (uint8 *)(prot->d2h_dma_indx_wr_copy_buf);
+ } else
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ {
+ ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
+ }
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+ break;
+
+ case D2H_DMA_INDX_RD_UPD:
+ ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+ break;
+
+ default:
+ DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ ASSERT(prot->rw_index_sz != 0);
+ ptr += offset * prot->rw_index_sz;
+
+ OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
+
+ /* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
+ data = LTOH16(*((uint16*)ptr));
+
+ DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
+ __FUNCTION__, data, type, ringid, ptr, offset));
+
+ return (data);
+
+} /* dhd_prot_dma_indx_get */
+
+#ifdef DHD_DMA_INDICES_SEQNUM
+void
+dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num)
+{
+ uint8 *ptr;
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Update host sequence number in first four bytes of scratchbuf */
+ ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
+ *(uint32*)ptr = htol32(seq_num);
+ OSL_CACHE_FLUSH((void *)ptr, prot->d2h_dma_scratch_buf.len);
+
+ DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, seq_num, ptr));
+
+} /* dhd_prot_dma_indx_set */
+
+uint32
+dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host)
+{
+ uint8 *ptr;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 data;
+
+ OSL_CACHE_INV((void *)ptr, d2h_dma_scratch_buf.len);
+
+ /* First four bytes of scratchbuf contains the host sequence number.
+ * Next four bytes of scratchbuf contains the Dongle sequence number.
+ */
+ if (host) {
+ ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
+ data = LTOH32(*((uint32*)ptr));
+ } else {
+ ptr = ((uint8 *)(prot->d2h_dma_scratch_buf.va) + sizeof(uint32));
+ data = LTOH32(*((uint32*)ptr));
+ }
+ DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, data, ptr));
+ return data;
+} /* dhd_prot_dma_indx_set */
+
+void
+dhd_prot_save_dmaidx(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint32 dngl_seqnum;
+
+ dngl_seqnum = dhd_prot_read_seqnum(dhd, FALSE);
+
+ DHD_TRACE(("%s: host_seqnum %u dngl_seqnum %u\n", __FUNCTION__,
+ prot->host_seqnum, dngl_seqnum));
+ if (prot->d2h_dma_indx_wr_copy_buf && prot->h2d_dma_indx_rd_copy_buf) {
+ if (prot->host_seqnum == dngl_seqnum) {
+ memcpy_s(prot->d2h_dma_indx_wr_copy_buf, prot->d2h_dma_indx_wr_copy_bufsz,
+ prot->d2h_dma_indx_wr_buf.va, prot->d2h_dma_indx_wr_copy_bufsz);
+ memcpy_s(prot->h2d_dma_indx_rd_copy_buf, prot->h2d_dma_indx_rd_copy_bufsz,
+ prot->h2d_dma_indx_rd_buf.va, prot->h2d_dma_indx_rd_copy_bufsz);
+ dhd_prot_write_host_seqnum(dhd, prot->host_seqnum);
+ /* Ring DoorBell */
+ dhd_prot_ring_doorbell(dhd, DHD_DMA_INDX_SEQ_H2D_DB_MAGIC);
+ prot->host_seqnum++;
+ prot->host_seqnum %= D2H_EPOCH_MODULO;
+ }
+ }
+}
+
+int
+dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz, uint8 type)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ switch (type) {
+ case D2H_DMA_INDX_WR_BUF:
+ prot->d2h_dma_indx_wr_copy_buf = MALLOCZ(dhd->osh, buf_sz);
+ if (prot->d2h_dma_indx_wr_copy_buf == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, buf_sz));
+ goto ret_no_mem;
+ }
+ prot->d2h_dma_indx_wr_copy_bufsz = buf_sz;
+ break;
+
+ case H2D_DMA_INDX_RD_BUF:
+ prot->h2d_dma_indx_rd_copy_buf = MALLOCZ(dhd->osh, buf_sz);
+ if (prot->h2d_dma_indx_rd_copy_buf == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, buf_sz));
+ goto ret_no_mem;
+ }
+ prot->h2d_dma_indx_rd_copy_bufsz = buf_sz;
+ break;
+
+ default:
+ break;
+ }
+ return BCME_OK;
+ret_no_mem:
+ return BCME_NOMEM;
+
+}
+#endif /* DHD_DMA_INDICES_SEQNUM */
+
+/**
+ * An array of DMA read/write indices, containing information about host rings, can be maintained
+ * either in host memory or in device memory, dependent on preprocessor options. This function is,
+ * dependent on these options, called during driver initialization. It reserves and initializes
+ * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
+ * address of these host memory blocks are communicated to the dongle later on. By reading this host
+ * memory, the dongle learns about the state of the host rings.
+ */
+
+static INLINE int
+dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
+ dhd_dma_buf_t *dma_buf, uint32 bufsz)
+{
+ int rc;
+
+ if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
+ return BCME_OK;
+
+ rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
+
+ return rc;
+}
+
+int
+dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
+{
+ uint32 bufsz;
+ dhd_prot_t *prot = dhd->prot;
+ dhd_dma_buf_t *dma_buf;
+
+ if (prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
+
+ /* Dongle advertizes 2B or 4B RW index size */
+ ASSERT(rw_index_sz != 0);
+ prot->rw_index_sz = rw_index_sz;
+
+ bufsz = rw_index_sz * length;
+
+ switch (type) {
+ case H2D_DMA_INDX_WR_BUF:
+ dma_buf = &prot->h2d_dma_indx_wr_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+ goto ret_no_mem;
+ DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
+
+ case H2D_DMA_INDX_RD_BUF:
+ dma_buf = &prot->h2d_dma_indx_rd_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+ goto ret_no_mem;
+ DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
+
+ case D2H_DMA_INDX_WR_BUF:
+ dma_buf = &prot->d2h_dma_indx_wr_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+ goto ret_no_mem;
+ DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
+
+ case D2H_DMA_INDX_RD_BUF:
+ dma_buf = &prot->d2h_dma_indx_rd_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+ goto ret_no_mem;
+ DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
+
+ case H2D_IFRM_INDX_WR_BUF:
+ dma_buf = &prot->h2d_ifrm_indx_wr_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+ goto ret_no_mem;
+ DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
+
+ default:
+ DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
+ return BCME_BADOPTION;
+ }
+
+ return BCME_OK;
+
+ret_no_mem:
+ DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
+ __FUNCTION__, type, bufsz));
+ return BCME_NOMEM;
+
+} /* dhd_prot_dma_indx_init */
+
+/**
+ * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
+ * from, or NULL if there are no more messages to read.
+ */
+static uint8*
+dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
+{
+ uint16 wr;
+ uint16 rd;
+ uint16 depth;
+ uint16 items;
+ void *read_addr = NULL; /* address of next msg to be read in ring */
+ uint16 d2h_wr = 0;
+
+ DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
+ __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
+ (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
+
+ /* Remember the read index in a variable.
+ * This is becuase ring->rd gets updated in the end of this function
+ * So if we have to print the exact read index from which the
+ * message is read its not possible.
+ */
+ ring->curr_rd = ring->rd;
+
+ /* update write pointer */
+ if (dhd->dma_d2h_ring_upd_support) {
+ /* DMAing write/read indices supported */
+ d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ ring->wr = d2h_wr;
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
+ }
+
+ wr = ring->wr;
+ rd = ring->rd;
+ depth = ring->max_items;
+
+ /* check for avail space, in number of ring items */
+ items = READ_AVAIL_SPACE(wr, rd, depth);
+ if (items == 0)
+ return NULL;
+
+ /*
+ * Note that there are builds where Assert translates to just printk
+ * so, even if we had hit this condition we would never halt. Now
+ * dhd_prot_process_msgtype can get into an big loop if this
+ * happens.
+ */
+ if (items > ring->max_items) {
+ DHD_ERROR(("\r\n======================= \r\n"));
+ DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
+ __FUNCTION__, ring, ring->name, ring->max_items, items));
+ DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
+ DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (wr >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ }
+#else
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
+ dhd_bus_mem_dump(dhd);
+
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ *available_len = 0;
+ dhd_schedule_reset(dhd);
+
+ return NULL;
+ }
+
+ /* if space is available, calculate address to be read */
+ read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
+
+ /* update read pointer */
+ if ((ring->rd + items) >= ring->max_items)
+ ring->rd = 0;
+ else
+ ring->rd += items;
+
+ ASSERT(ring->rd < ring->max_items);
+
+ /* convert items to bytes : available_len must be 32bits */
+ *available_len = (uint32)(items * ring->item_len);
+
+ /* XXX Double cache invalidate for ARM with L2 cache/prefetch */
+ OSL_CACHE_INV(read_addr, *available_len);
+
+ /* return read address */
+ return read_addr;
+
+} /* dhd_prot_get_read_addr */
+
+/**
+ * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
+ * make sure the callers always hold appropriate locks.
+ */
+int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
+{
+ h2d_mailbox_data_t *h2d_mb_data;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
+ unsigned long flags;
+ int num_post = 1;
+ int i;
+
+ DHD_MSGBUF_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
+ __FUNCTION__, mb_data));
+ if (!ctrl_ring->inited) {
+ DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#ifdef PCIE_INB_DW
+ if ((INBAND_DW_ENAB(dhd->bus)) &&
+ (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
+ DW_DEVICE_DS_DEV_SLEEP)) {
+ if (mb_data == H2D_HOST_CONS_INT) {
+ /* One additional device_wake post needed */
+ num_post = 2;
+ }
+ }
+#endif /* PCIE_INB_DW */
+
+ for (i = 0; i < num_post; i ++) {
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ /* Request for ring buffer space */
+ h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+
+ if (h2d_mb_data == NULL) {
+ DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
+ __FUNCTION__));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ return BCME_NOMEM;
+ }
+
+ memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
+ /* Common msg buf hdr */
+ h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
+ h2d_mb_data->msg.flags = ctrl_ring->current_phase;
+
+ h2d_mb_data->msg.epoch =
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ /* Update flow create message */
+ h2d_mb_data->mail_box_data = htol32(mb_data);
+#ifdef PCIE_INB_DW
+ /* post device_wake first */
+ if ((num_post == 2) && (i == 0)) {
+ h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
+ } else
+#endif /* PCIE_INB_DW */
+ {
+ h2d_mb_data->mail_box_data = htol32(mb_data);
+ }
+
+ DHD_MSGBUF_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
+
+ /* upd wrt ptr and raise interrupt */
+ dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ /* Add a delay if device_wake is posted */
+ if ((num_post == 2) && (i == 0)) {
+ OSL_DELAY(1000);
+ }
+#endif /* PCIE_INB_DW */
+ }
+ return 0;
+}
+
+/** Creates a flow ring and informs dongle of this event */
+int
+dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_flowring_create_request_t *flow_create_rqst;
+ msgbuf_ring_t *flow_ring;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+
+ /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
+ flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
+ if (flow_ring == NULL) {
+ DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
+ __FUNCTION__, flow_ring_node->flowid));
+ return BCME_NOMEM;
+ }
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ /* Request for ctrl_ring buffer space */
+ flow_create_rqst = (tx_flowring_create_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
+
+ if (flow_create_rqst == NULL) {
+ dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
+ DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
+ __FUNCTION__, flow_ring_node->flowid));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NOMEM;
+ }
+
+ flow_ring_node->prot_info = (void *)flow_ring;
+
+ /* Common msg buf hdr */
+ flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
+ flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_create_rqst->msg.request_id = htol32(0); /* TBD */
+ flow_create_rqst->msg.flags = ctrl_ring->current_phase;
+
+ flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ /* Update flow create message */
+ flow_create_rqst->tid = flow_ring_node->flow_info.tid;
+ flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
+ memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
+ /* CAUTION: ring::base_addr already in Little Endian */
+ flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
+ flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
+ flow_create_rqst->max_items = htol16(flow_ring->max_items);
+ flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+ flow_create_rqst->if_flags = 0;
+
+#ifdef DHD_HP2P
+ /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
+ /* and traffic is not multicast */
+ /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
+ if (dhd->hp2p_capable && dhd->hp2p_ring_more &&
+ flow_ring_node->flow_info.tid == HP2P_PRIO &&
+ (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
+ !ETHER_ISMULTI(flow_create_rqst->da)) {
+ flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
+ flow_ring_node->hp2p_ring = TRUE;
+ /* Allow multiple HP2P Flow if mf override is enabled */
+ if (!dhd->hp2p_mf_enable) {
+ dhd->hp2p_ring_more = FALSE;
+ }
+
+ DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
+ __FUNCTION__, flow_ring_node->flow_info.tid,
+ flow_ring_node->flowid));
+ }
+#endif /* DHD_HP2P */
+
+ /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
+ * currently it is not used for priority. so uses solely for ifrm mask
+ */
+ if (IFRM_ACTIVE(dhd))
+ flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
+
+ DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
+ " prio %d ifindex %d items %d\n", __FUNCTION__, flow_ring_node->flowid,
+ MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
+ flow_ring_node->flow_info.ifindex, flow_ring->max_items));
+
+ /* Update the flow_ring's WRITE index */
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
+ sizeof(uint16), RING_WR_UPD, flow_ring->idx);
+ }
+
+ /* update control subn ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_OK;
+} /* dhd_prot_flow_ring_create */
+
+/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
+static void
+dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
+{
+ tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
+
+ DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
+ ltoh16(flow_create_resp->cmplt.status),
+ ltoh16(flow_create_resp->cmplt.flow_ring_id)));
+
+ dhd_bus_flow_ring_create_response(dhd->bus,
+ ltoh16(flow_create_resp->cmplt.flow_ring_id),
+ ltoh16(flow_create_resp->cmplt.status));
+}
+
+#if !defined(BCM_ROUTER_DHD)
+static void
+dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
+{
+ h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
+ DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+ ltoh16(resp->cmplt.status),
+ ltoh16(resp->cmplt.ring_id),
+ ltoh32(resp->cmn_hdr.request_id)));
+ if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
+ DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
+ return;
+ }
+ if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
+ !dhd->prot->h2dring_info_subn->create_pending) {
+ DHD_ERROR(("info ring create status for not pending submit ring\n"));
+ }
+#ifdef BTLOG
+ if (dhd->prot->h2dring_btlog_subn &&
+ dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
+ !dhd->prot->h2dring_btlog_subn->create_pending) {
+ DHD_ERROR(("btlog ring create status for not pending submit ring\n"));
+ }
+#endif /* BTLOG */
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("info/btlog ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
+ dhd->prot->h2dring_info_subn->inited = TRUE;
+ DHD_ERROR(("info buffer post after ring create\n"));
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
+ }
+#ifdef BTLOG
+ if (dhd->prot->h2dring_btlog_subn &&
+ dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
+ dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
+ dhd->prot->h2dring_btlog_subn->inited = TRUE;
+ DHD_ERROR(("btlog buffer post after ring create\n"));
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
+ }
+#endif /* BTLOG */
+}
+#endif /* !BCM_ROUTER_DHD */
+
+static void
+dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
+{
+ d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
+ DHD_ERROR(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+ ltoh16(resp->cmplt.status),
+ ltoh16(resp->cmplt.ring_id),
+ ltoh32(resp->cmn_hdr.request_id)));
+ if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
+#ifdef DHD_HP2P
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
+#endif /* DHD_HP2P */
+ TRUE) {
+ DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
+ return;
+ }
+ if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
+#ifdef EWP_EDL
+ if (!dhd->dongle_edl_support)
+#endif
+ {
+
+ if (!dhd->prot->d2hring_info_cpln->create_pending) {
+ DHD_ERROR(("info ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("info cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+ dhd->prot->d2hring_info_cpln->inited = TRUE;
+ }
+#ifdef EWP_EDL
+ else {
+ if (!dhd->prot->d2hring_edl->create_pending) {
+ DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("edl cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_edl->create_pending = FALSE;
+ dhd->prot->d2hring_edl->inited = TRUE;
+ }
+#endif /* EWP_EDL */
+ }
+
+#ifdef BTLOG
+ if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_BTLOGRING_REQ_PKTID) {
+ if (!dhd->prot->d2hring_btlog_cpln->create_pending) {
+ DHD_ERROR(("btlog ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("btlog cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
+ dhd->prot->d2hring_btlog_cpln->inited = TRUE;
+ }
+#endif /* BTLOG */
+#ifdef DHD_HP2P
+ if (dhd->prot->d2hring_hp2p_txcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
+ DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
+ DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
+ }
+#endif /* DHD_HP2P */
+}
+
+static void
+dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
+{
+ d2h_mailbox_data_t *d2h_data;
+
+ d2h_data = (d2h_mailbox_data_t *)buf;
+ DHD_MSGBUF_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
+ d2h_data->d2h_mailbox_data));
+ dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
+}
+
+static void
+dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
+{
+#ifdef DHD_TIMESYNC
+ host_timestamp_msg_cpl_t *host_ts_cpl;
+ uint32 pktid;
+ dhd_prot_t *prot = dhd->prot;
+
+ host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
+ DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
+ host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
+
+ pktid = ltoh32(host_ts_cpl->msg.request_id);
+ if (prot->hostts_req_buf_inuse == FALSE) {
+ DHD_ERROR(("No Pending Host TS req, but completion\n"));
+ return;
+ }
+ prot->hostts_req_buf_inuse = FALSE;
+ if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
+ DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
+ pktid, DHD_H2D_HOSTTS_REQ_PKTID));
+ return;
+ }
+ dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
+ host_ts_cpl->cmplt.status);
+#else /* DHD_TIMESYNC */
+ DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
+#endif /* DHD_TIMESYNC */
+
+}
+
+/** called on e.g. flow ring delete */
+void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
+{
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+ dhd_prot_ring_detach(dhd, flow_ring);
+ DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
+}
+
+void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d,
+ struct bcmstrbuf *strbuf, const char * fmt)
+{
+ const char *default_fmt =
+ "TRD:%d HLRD:%d HDRD:%d TWR:%d HLWR:%d HDWR:%d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
+ "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+ uint16 rd, wr, drd = 0, dwr = 0;
+ uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
+
+ if (fmt == NULL) {
+ fmt = default_fmt;
+ }
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
+ return;
+ }
+
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+ if (dhd->dma_d2h_ring_upd_support) {
+ if (h2d) {
+ drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, flow_ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else {
+ drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
+ }
+ }
+ bcm_bprintf(strbuf, fmt, rd, flow_ring->rd, drd, wr, flow_ring->wr, dwr,
+ flow_ring->dma_buf.va,
+ ltoh32(flow_ring->base_addr.high_addr),
+ ltoh32(flow_ring->base_addr.low_addr),
+ flow_ring->item_len, flow_ring->max_items,
+ dma_buf_len);
+}
+
+void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+ dhd_prot_t *prot = dhd->prot;
+ bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
+ dhd->prot->device_ipc_version,
+ dhd->prot->host_ipc_version,
+ dhd->prot->active_ipc_version);
+
+ bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
+ bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_infobufpost, dhd->prot->infobufpost);
+#ifdef BTLOG
+ bcm_bprintf(strbuf, "max BTLOG bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_btlogbufpost, dhd->prot->btlogbufpost);
+#endif /* BTLOG */
+ bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
+ bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
+ bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
+
+ bcm_bprintf(strbuf, "Total RX bufs posted: %d, \t RX cpl got %d \n",
+ dhd->prot->tot_rxbufpost, dhd->prot->tot_rxcpl);
+
+ bcm_bprintf(strbuf, "Total TX packets: %lu, \t TX cpl got %lu \n",
+ dhd->actual_tx_pkts, dhd->tot_txcpl);
+
+ bcm_bprintf(strbuf,
+ "%14s %18s %18s %17s %17s %14s %14s %10s\n",
+ "Type", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
+ "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
+ bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, TRUE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, FALSE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "H2DRxPost");
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, TRUE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, FALSE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, FALSE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
+ bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
+ dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, TRUE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, FALSE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ }
+ if (dhd->prot->d2hring_edl != NULL) {
+ bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, FALSE, strbuf,
+ " %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
+ }
+
+ bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
+ OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
+ DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
+ DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
+ DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
+
+#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+ dhd_prot_ioctl_dump(dhd->prot, strbuf);
+#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
+#ifdef DHD_MMIO_TRACE
+ dhd_dump_bus_mmio_trace(dhd->bus, strbuf);
+#endif /* DHD_MMIO_TRACE */
+ dhd_dump_bus_ds_trace(dhd->bus, strbuf);
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+ dhd_dump_bus_flow_ring_status_isr_trace(dhd->bus, strbuf);
+ dhd_dump_bus_flow_ring_status_dpc_trace(dhd->bus, strbuf);
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+}
+
+int
+dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_flowring_delete_request_t *flow_delete_rqst;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Request for ring buffer space */
+ flow_delete_rqst = (tx_flowring_delete_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (flow_delete_rqst == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
+ flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
+ flow_delete_rqst->msg.flags = ring->current_phase;
+
+ flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+ /* Update Delete info */
+ flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ flow_delete_rqst->reason = htol16(BCME_OK);
+
+ DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM"
+ " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
+ flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
+ flow_ring_node->flow_info.ifindex));
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_OK;
+}
+
+static void
+BCMFASTPATH(dhd_prot_flow_ring_fastdelete)(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
+{
+ flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+ host_txbuf_cmpl_t txstatus;
+ host_txbuf_post_t *txdesc;
+ uint16 wr_idx;
+
+ DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
+ __FUNCTION__, flowid, rd_idx, ring->wr));
+
+ memset(&txstatus, 0, sizeof(txstatus));
+ txstatus.compl_hdr.flow_ring_id = flowid;
+ txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
+ wr_idx = ring->wr;
+
+ while (wr_idx != rd_idx) {
+ if (wr_idx)
+ wr_idx--;
+ else
+ wr_idx = ring->max_items - 1;
+ txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
+ (wr_idx * ring->item_len));
+ txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
+ dhd_prot_txstatus_process(dhd, &txstatus);
+ }
+}
+
+static void
+dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
+{
+ tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
+
+ DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
+ flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
+
+ if (dhd->fast_delete_ring_support) {
+ dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
+ flow_delete_resp->read_idx);
+ }
+ dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
+ flow_delete_resp->cmplt.status);
+}
+
+static void
+dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
+{
+#ifdef IDLE_TX_FLOW_MGMT
+ tx_idle_flowring_resume_response_t *flow_resume_resp =
+ (tx_idle_flowring_resume_response_t *)msg;
+
+ DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
+ flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
+
+ dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
+ flow_resume_resp->cmplt.status);
+#endif /* IDLE_TX_FLOW_MGMT */
+}
+
+static void
+dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
+{
+#ifdef IDLE_TX_FLOW_MGMT
+ int16 status;
+ tx_idle_flowring_suspend_response_t *flow_suspend_resp =
+ (tx_idle_flowring_suspend_response_t *)msg;
+ status = flow_suspend_resp->cmplt.status;
+
+ DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
+ __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
+ status));
+
+ if (status != BCME_OK) {
+
+ DHD_ERROR(("%s Error in Suspending Flow rings!!"
+ "Dongle will still be polling idle rings!!Status = %d \n",
+ __FUNCTION__, status));
+ }
+#endif /* IDLE_TX_FLOW_MGMT */
+}
+
+int
+dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_flowring_flush_request_t *flow_flush_rqst;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Request for ring buffer space */
+ flow_flush_rqst = (tx_flowring_flush_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+ if (flow_flush_rqst == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
+ flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
+ flow_flush_rqst->msg.flags = ring->current_phase;
+ flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+ flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ flow_flush_rqst->reason = htol16(BCME_OK);
+
+ DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_OK;
+} /* dhd_prot_flow_ring_flush */
+
+static void
+dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
+{
+ tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
+
+ DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
+ flow_flush_resp->cmplt.status));
+
+ dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
+ flow_flush_resp->cmplt.status);
+}
+
+/**
+ * Request dongle to configure soft doorbells for D2H rings. Host populated soft
+ * doorbell information is transferred to dongle via the d2h ring config control
+ * message.
+ */
+void
+dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
+{
+#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
+ uint16 ring_idx;
+ uint8 *msg_next;
+ void *msg_start;
+ uint16 alloced = 0;
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+ ring_config_req_t *ring_config_req;
+ bcmpcie_soft_doorbell_t *soft_doorbell;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+ const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
+
+ if (msg_start == NULL) {
+ DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
+ __FUNCTION__, d2h_rings));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return;
+ }
+
+ msg_next = (uint8*)msg_start;
+
+ for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
+
+ /* position the ring_config_req into the ctrl subm ring */
+ ring_config_req = (ring_config_req_t *)msg_next;
+
+ /* Common msg header */
+ ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
+ ring_config_req->msg.if_id = 0;
+ ring_config_req->msg.flags = 0;
+
+ ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
+
+ /* Ring Config subtype and d2h ring_id */
+ ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
+ ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
+
+ /* Host soft doorbell configuration */
+ soft_doorbell = &prot->soft_doorbell[ring_idx];
+
+ ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
+ ring_config_req->soft_doorbell.haddr.high =
+ htol32(soft_doorbell->haddr.high);
+ ring_config_req->soft_doorbell.haddr.low =
+ htol32(soft_doorbell->haddr.low);
+ ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
+ ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
+
+ DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
+ __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
+ ring_config_req->soft_doorbell.haddr.low,
+ ring_config_req->soft_doorbell.value));
+
+ msg_next = msg_next + ctrl_ring->item_len;
+ }
+
+ /* update control subn ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
+}
+
+static void
+dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
+{
+ DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
+ __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
+ ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
+}
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+void
+copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
+{
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+ if (tlv) {
+ memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
+ }
+}
+#define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
+
+typedef struct {
+ char name[HANG_INFO_TRAP_T_NAME_MAX];
+ uint32 offset;
+} hang_info_trap_t;
+
+#ifdef DHD_EWPR_VER2
+static hang_info_trap_t hang_info_trap_tbl[] = {
+ {"reason", 0},
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
+ {"stype", 0},
+ TRAP_T_NAME_OFFSET(type),
+ TRAP_T_NAME_OFFSET(epc),
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"", 0}
+};
+#else
+static hang_info_trap_t hang_info_trap_tbl[] = {
+ {"reason", 0},
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
+ {"stype", 0},
+ TRAP_T_NAME_OFFSET(type),
+ TRAP_T_NAME_OFFSET(epc),
+ TRAP_T_NAME_OFFSET(cpsr),
+ TRAP_T_NAME_OFFSET(spsr),
+ TRAP_T_NAME_OFFSET(r0),
+ TRAP_T_NAME_OFFSET(r1),
+ TRAP_T_NAME_OFFSET(r2),
+ TRAP_T_NAME_OFFSET(r3),
+ TRAP_T_NAME_OFFSET(r4),
+ TRAP_T_NAME_OFFSET(r5),
+ TRAP_T_NAME_OFFSET(r6),
+ TRAP_T_NAME_OFFSET(r7),
+ TRAP_T_NAME_OFFSET(r8),
+ TRAP_T_NAME_OFFSET(r9),
+ TRAP_T_NAME_OFFSET(r10),
+ TRAP_T_NAME_OFFSET(r11),
+ TRAP_T_NAME_OFFSET(r12),
+ TRAP_T_NAME_OFFSET(r13),
+ TRAP_T_NAME_OFFSET(r14),
+ TRAP_T_NAME_OFFSET(pc),
+ {"", 0}
+};
+#endif /* DHD_EWPR_VER2 */
+
+#define TAG_TRAP_IS_STATE(tag) \
+ ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
+ (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
+ (tag == TAG_TRAP_CODE))
+
+static void
+copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
+ int *bytes_written, int *cnt, char *cookie)
+{
+ uint8 *ptr;
+ int remain_len;
+ int i;
+
+ ptr = (uint8 *)src;
+
+ memset(dest, 0, len);
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ /* hang reason, hang info ver */
+ for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[i].name, HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
+ hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
+
+ }
+
+ if (*cnt < HANG_FIELD_CNT_MAX) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ "cookie", HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
+ cookie, HANG_KEY_DEL);
+ (*cnt)++;
+ }
+
+ if (*cnt < HANG_FIELD_CNT_MAX) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
+ HANG_KEY_DEL);
+ (*cnt)++;
+ }
+
+ if (*cnt < HANG_FIELD_CNT_MAX) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
+ *(uint32 *)
+ (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
+ HANG_KEY_DEL);
+ (*cnt)++;
+ }
+#ifdef DHD_EWPR_VER2
+ /* put 0 for HG03 ~ HG06 (reserved for future use) */
+ for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
+ HANG_KEY_DEL);
+ }
+#endif /* DHD_EWPR_VER2 */
+}
+#ifndef DHD_EWPR_VER2
+static void
+copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
+ int *bytes_written, int *cnt, char *cookie)
+{
+ uint8 *ptr;
+ int remain_len;
+ int i;
+
+ ptr = (uint8 *)src;
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
+ (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
+ HANG_RAW_DEL, hang_info_trap_tbl[i].name);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
+ }
+}
+
+static void
+copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ int i = 0;
+ const uint32 *stack;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+ int remain_stack_cnt = 0;
+ uint32 dummy_data = 0;
+ int bigdata_key_stack_cnt = 0;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ if (tlv) {
+ stack = (const uint32 *)tlv->data;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
+ "%08x", *(uint32 *)(stack++));
+ (*cnt)++;
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+ for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ /* Raw data for bigdata use '_' and Key data for bigdata use space */
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
+ "%c%08x",
+ i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
+ *(uint32 *)(stack++));
+
+ (*cnt)++;
+ if ((*cnt >= HANG_FIELD_CNT_MAX) ||
+ (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
+ return;
+ }
+ }
+ }
+
+ remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
+
+ for (i = 0; i < remain_stack_cnt; i++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, dummy_data);
+ (*cnt)++;
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+
+}
+
+static void
+copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ int i;
+ const uint32 *data;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+ int remain_trap_data = 0;
+ uint8 buf_u8[sizeof(uint32)] = { 0, };
+ const uint8 *p_u8;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+ if (tlv) {
+ /* header include tlv hader */
+ remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+ if (tlv) {
+ /* header include tlv hader */
+ remain_trap_data -= (tlv->len + sizeof(uint16));
+ }
+
+ data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
+ i++, (*cnt)++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, *(uint32 *)(data++));
+ GCC_DIAGNOSTIC_POP();
+ }
+
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+
+ remain_trap_data -= (sizeof(uint32) * i);
+
+ if (remain_trap_data > sizeof(buf_u8)) {
+ DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
+ remain_trap_data = sizeof(buf_u8);
+ }
+
+ if (remain_trap_data) {
+ p_u8 = (const uint8 *)data;
+ for (i = 0; i < remain_trap_data; i++) {
+ buf_u8[i] = *(const uint8 *)(p_u8++);
+ }
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, ltoh32_ua(buf_u8));
+ (*cnt)++;
+ }
+}
+#endif /* DHD_EWPR_VER2 */
+
+static void
+get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
+{
+ uint32 i;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ /* Dump a list of all tags found before parsing data */
+ for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
+ if (tlv) {
+ if (!TAG_TRAP_IS_STATE(i)) {
+ *subtype = i;
+ return;
+ }
+ }
+ }
+}
+#ifdef DHD_EWPR_VER2
+static void
+copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ char *base64_out = NULL;
+ int base64_cnt;
+ int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
+ return;
+ }
+
+ if (remain_len < max_base64_len) {
+ DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
+ remain_len));
+ max_base64_len = remain_len;
+ }
+
+ base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
+ if (base64_out == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
+ return;
+ }
+
+ if (hdr->len > 0) {
+ base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
+ if (base64_cnt == 0) {
+ DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
+ }
+ }
+
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
+ base64_out);
+ (*cnt)++;
+ MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
+}
+#endif /* DHD_EWPR_VER2 */
+
+void
+copy_hang_info_trap(dhd_pub_t *dhd)
+{
+ trap_t tr;
+ int bytes_written;
+ int trap_subtype = 0;
+
+ if (!dhd || !dhd->hang_info) {
+ DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
+ dhd, (dhd ? dhd->hang_info : NULL)));
+ return;
+ }
+
+ if (!dhd->dongle_trap_occured) {
+ DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
+ return;
+ }
+
+ memset(&tr, 0x00, sizeof(struct _trap_struct));
+
+ copy_ext_trap_sig(dhd, &tr);
+ get_hang_info_trap_subtype(dhd, &trap_subtype);
+
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
+
+ bytes_written = 0;
+ dhd->hang_info_cnt = 0;
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
+
+ copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
+ &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
+
+ DHD_INFO(("hang info head cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+
+#ifdef DHD_EWPR_VER2
+ /* stack info & trap info are included in etd data */
+
+ /* extended trap data dump */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+#else
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
+ &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
+ DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+#endif /* DHD_EWPR_VER2 */
+}
+
+void
+copy_hang_info_linkdown(dhd_pub_t *dhd)
+{
+ int bytes_written = 0;
+ int remain_len;
+
+ if (!dhd || !dhd->hang_info) {
+ DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
+ dhd, (dhd ? dhd->hang_info : NULL)));
+ return;
+ }
+
+ if (!dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: link down is not happened\n", __FUNCTION__));
+ return;
+ }
+
+ dhd->hang_info_cnt = 0;
+
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
+
+ /* hang reason code (0x8808) */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
+ HANG_REASON_PCIE_LINK_DOWN_EP_DETECT, HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* EWP version */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
+ VENDOR_SEND_HANG_EXT_INFO_VER, HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* cookie - dump time stamp */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%s%c",
+ dhd->debug_dump_time_hang_str, HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+
+ /* dump PCIE RC registers */
+ dhd_dump_pcie_rc_regs_for_linkdown(dhd, &bytes_written);
+
+ DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+
+}
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+int
+dhd_prot_debug_info_print(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring;
+ uint16 rd, wr, drd, dwr;
+ uint32 dma_buf_len;
+ uint64 current_time;
+ ulong ring_tcm_rd_addr; /* dongle address */
+ ulong ring_tcm_wr_addr; /* dongle address */
+
+ DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
+ DHD_ERROR(("DHD: %s\n", dhd_version));
+ DHD_ERROR(("Firmware: %s\n", fw_version));
+
+#ifdef DHD_FW_COREDUMP
+ DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
+ DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
+#endif /* DHD_FW_COREDUMP */
+
+ DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
+ DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
+ prot->device_ipc_version,
+ prot->host_ipc_version,
+ prot->active_ipc_version));
+ DHD_ERROR(("d2h_intr_method -> %s d2h_intr_control -> %s\n",
+ dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX",
+ dhd->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK"));
+ DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
+ prot->max_tsbufpost, prot->cur_ts_bufs_posted));
+ DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
+ prot->max_infobufpost, prot->infobufpost));
+ DHD_ERROR(("max event bufs to post: %d, posted %d\n",
+ prot->max_eventbufpost, prot->cur_event_bufs_posted));
+ DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
+ prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
+ DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
+ prot->max_rxbufpost, prot->rxbufpost));
+ DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+ h2d_max_txpost, prot->h2d_max_txpost));
+#if defined(DHD_HTPUT_TUNABLES)
+ DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+ h2d_htput_max_txpost, prot->h2d_htput_max_txpost));
+#endif /* DHD_HTPUT_TUNABLES */
+
+ current_time = OSL_LOCALTIME_NS();
+ DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
+ DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
+ " ioctl_ack_time="SEC_USEC_FMT
+ " ioctl_cmplt_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(prot->ioctl_fillup_time),
+ GET_SEC_USEC(prot->ioctl_ack_time),
+ GET_SEC_USEC(prot->ioctl_cmplt_time)));
+
+ /* Check PCIe INT registers */
+ if (!dhd_pcie_dump_int_regs(dhd)) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
+
+ DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
+
+ ring = &prot->h2dring_ctrl_subn;
+ dma_buf_len = ring->max_items * ring->item_len;
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
+ DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlPost: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+
+ ring = &prot->d2hring_ctrl_cpln;
+ dma_buf_len = ring->max_items * ring->item_len;
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
+ DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+
+ ring = prot->h2dring_info_subn;
+ if (ring) {
+ dma_buf_len = ring->max_items * ring->item_len;
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+ }
+ ring = prot->d2hring_info_cpln;
+ if (ring) {
+ dma_buf_len = ring->max_items * ring->item_len;
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
+#ifdef EWP_EDL
+ ring = prot->d2hring_edl;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("EdlRing: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
+ ring->seqnum % D2H_EPOCH_MODULO));
+ }
+#endif /* EWP_EDL */
+
+ ring = &prot->d2hring_tx_cpln;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("TxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
+
+ ring = &prot->d2hring_rx_cpln;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("RxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
+
+ ring = &prot->h2dring_rxp_subn;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("RxSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("RxSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->dma_d2h_ring_upd_support) {
+ drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
+ DHD_ERROR(("RxSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
+ }
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("RxSub: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("RxSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("RxSub: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
+
+ DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
+ __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
+ DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
+ DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
+ dhd_pcie_debug_info_dump(dhd);
+#ifdef DHD_LB_STATS
+ DHD_ERROR(("\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
+ dhd->lb_rxp_stop_thr_hitcnt, dhd->lb_rxp_strt_thr_hitcnt));
+ DHD_ERROR(("\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
+ dhd->lb_rxp_napi_sched_cnt, dhd->lb_rxp_napi_complete_cnt));
+#endif /* DHD_LB_STATS */
+#ifdef DHD_TIMESYNC
+ dhd_timesync_debug_info_print(dhd);
+#endif /* DHD_TIMESYNC */
+ return 0;
+}
+
+int
+dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+ uint32 *ptr;
+ uint32 value;
+
+ if (dhd->prot->d2h_dma_indx_wr_buf.va) {
+ uint32 i;
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
+
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
+ dhd->prot->d2h_dma_indx_wr_buf.len);
+
+ ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
+
+ bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
+
+ bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%4p\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+ ptr++;
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%4p\n", ptr);
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+ ptr++;
+ }
+ }
+
+ if (dhd->prot->h2d_dma_indx_rd_buf.va) {
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
+ dhd->prot->h2d_dma_indx_rd_buf.len);
+
+ ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
+
+ bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%4p\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+ }
+
+ return 0;
+}
+
+uint32
+dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
+{
+ dhd_prot_t *prot = dhd->prot;
+#if DHD_DBG_SHOW_METADATA
+ prot->metadata_dbg = val;
+#endif
+ return (uint32)prot->metadata_dbg;
+}
+
+uint32
+dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ return (uint32)prot->metadata_dbg;
+}
+
+uint32
+dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
+{
+#if !(defined(BCM_ROUTER_DHD))
+ dhd_prot_t *prot = dhd->prot;
+ if (rx)
+ prot->rx_metadata_offset = (uint16)val;
+ else
+ prot->tx_metadata_offset = (uint16)val;
+#endif /* ! BCM_ROUTER_DHD */
+ return dhd_prot_metadatalen_get(dhd, rx);
+}
+
+uint32
+dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
+{
+ dhd_prot_t *prot = dhd->prot;
+ if (rx)
+ return prot->rx_metadata_offset;
+ else
+ return prot->tx_metadata_offset;
+}
+
+/** optimization to write "n" tx items at a time to ring */
+uint32
+dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ dhd_prot_t *prot = dhd->prot;
+ if (set)
+ prot->txp_threshold = (uint16)val;
+ val = prot->txp_threshold;
+ return val;
+}
+
+#ifdef DHD_RX_CHAINING
+
+static INLINE void
+BCMFASTPATH(dhd_rxchain_reset)(rxchain_info_t *rxchain)
+{
+ rxchain->pkt_count = 0;
+}
+
+static void
+BCMFASTPATH(dhd_rxchain_frame)(dhd_pub_t *dhd, void *pkt, uint ifidx)
+{
+ uint8 *eh;
+ uint8 prio;
+ dhd_prot_t *prot = dhd->prot;
+ rxchain_info_t *rxchain = &prot->rxchain;
+
+ ASSERT(!PKTISCHAINED(pkt));
+ ASSERT(PKTCLINK(pkt) == NULL);
+ ASSERT(PKTCGETATTR(pkt) == 0);
+
+ eh = PKTDATA(dhd->osh, pkt);
+ prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
+
+ if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
+ rxchain->h_da, rxchain->h_prio))) {
+ /* Different flow - First release the existing chain */
+ dhd_rxchain_commit(dhd);
+ }
+
+ /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
+ /* so that the chain can be handed off to CTF bridge as is. */
+ if (rxchain->pkt_count == 0) {
+ /* First packet in chain */
+ rxchain->pkthead = rxchain->pkttail = pkt;
+
+ /* Keep a copy of ptr to ether_da, ether_sa and prio */
+ rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+ rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+ rxchain->h_prio = prio;
+ rxchain->ifidx = ifidx;
+ rxchain->pkt_count++;
+ } else {
+ /* Same flow - keep chaining */
+ PKTSETCLINK(rxchain->pkttail, pkt);
+ rxchain->pkttail = pkt;
+ rxchain->pkt_count++;
+ }
+
+ if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
+ ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
+ (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
+ PKTSETCHAINED(dhd->osh, pkt);
+ PKTCINCRCNT(rxchain->pkthead);
+ PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
+ } else {
+ dhd_rxchain_commit(dhd);
+ return;
+ }
+
+ /* If we have hit the max chain length, dispatch the chain and reset */
+ if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
+ dhd_rxchain_commit(dhd);
+ }
+}
+
+static void
+BCMFASTPATH(dhd_rxchain_commit)(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ rxchain_info_t *rxchain = &prot->rxchain;
+
+ if (rxchain->pkt_count == 0)
+ return;
+
+ /* Release the packets to dhd_linux */
+ dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
+
+ /* Reset the chain */
+ dhd_rxchain_reset(rxchain);
+}
+
+#endif /* DHD_RX_CHAINING */
+
+#ifdef IDLE_TX_FLOW_MGMT
+int
+dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_idle_flowring_resume_request_t *flow_resume_rqst;
+ msgbuf_ring_t *flow_ring;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+
+ /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
+ flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
+ if (flow_ring == NULL) {
+ DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
+ __FUNCTION__, flow_ring_node->flowid));
+ return BCME_NOMEM;
+ }
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ /* Request for ctrl_ring buffer space */
+ flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
+
+ if (flow_resume_rqst == NULL) {
+ dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
+ DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
+ __FUNCTION__, flow_ring_node->flowid));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NOMEM;
+ }
+
+ flow_ring_node->prot_info = (void *)flow_ring;
+
+ /* Common msg buf hdr */
+ flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
+ flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
+ __FUNCTION__, flow_ring_node->flowid));
+
+ /* Update the flow_ring's WRITE index */
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_IFRM_INDX_WR_UPD,
+ (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
+ sizeof(uint16), RING_WR_UPD, flow_ring->idx);
+ }
+
+ /* update control subn ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_OK;
+} /* dhd_prot_flow_ring_create */
+
+int
+dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
+{
+ tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 index;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Request for ring buffer space */
+ flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (flow_suspend_rqst == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
+ /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
+ flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+ /* Update flow id info */
+ for (index = 0; index < count; index++)
+ {
+ flow_suspend_rqst->ring_id[index] = ringid[index];
+ }
+ flow_suspend_rqst->num = count;
+
+ DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+ return BCME_OK;
+}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+static void
+dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len)
+{
+ struct dhd_prot *prot = dhd->prot;
+ uint32 cnt = prot->ioctl_trace_count % MAX_IOCTL_TRACE_SIZE;
+
+ prot->ioctl_trace[cnt].cmd = ioct_rqst->cmd;
+ prot->ioctl_trace[cnt].transid = ioct_rqst->trans_id;
+ if ((ioct_rqst->cmd == 262 || ioct_rqst->cmd == 263) && buf)
+ memcpy(prot->ioctl_trace[cnt].ioctl_buf, buf,
+ len > MAX_IOCTL_BUF_SIZE ? MAX_IOCTL_BUF_SIZE : len);
+ else
+ memset(prot->ioctl_trace[cnt].ioctl_buf, 0, MAX_IOCTL_BUF_SIZE);
+ prot->ioctl_trace[cnt].timestamp = OSL_SYSUPTIME_US();
+ prot->ioctl_trace_count ++;
+}
+
+static void
+dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf)
+{
+ int dumpsz;
+ int i;
+
+ dumpsz = prot->ioctl_trace_count < MAX_IOCTL_TRACE_SIZE ?
+ prot->ioctl_trace_count : MAX_IOCTL_TRACE_SIZE;
+ if (dumpsz == 0) {
+ bcm_bprintf(strbuf, "\nEmpty IOCTL TRACE\n");
+ return;
+ }
+ bcm_bprintf(strbuf, "----------- IOCTL TRACE --------------\n");
+ bcm_bprintf(strbuf, "Timestamp us\t\tCMD\tTransID\tIOVAR\n");
+ for (i = 0; i < dumpsz; i ++) {
+ bcm_bprintf(strbuf, "%llu\t%d\t%d\t%s\n",
+ prot->ioctl_trace[i].timestamp,
+ prot->ioctl_trace[i].cmd,
+ prot->ioctl_trace[i].transid,
+ prot->ioctl_trace[i].ioctl_buf);
+ }
+}
+#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
+
+static void dump_psmwd_v1(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
+{
+ const hnd_ext_trap_psmwd_v1_t* psmwd = NULL;
+ uint32 i;
+ psmwd = (const hnd_ext_trap_psmwd_v1_t *)tlv;
+ for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1; i++) {
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
+ }
+ bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
+ bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
+ bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
+ bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
+ bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
+ bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
+ bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
+ bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
+ bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
+ bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
+ bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
+ bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
+ bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
+ bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
+ bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
+ bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
+ bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
+ bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
+
+}
+
+static void dump_psmwd_v2(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
+{
+ const hnd_ext_trap_psmwd_t* psmwd = NULL;
+ uint32 i;
+ psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
+ for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2; i++) {
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
+ }
+
+ bcm_bprintf(b, " psm_brwk0: 0x%x\n", psmwd->i16_0x4b8);
+ bcm_bprintf(b, " psm_brwk1: 0x%x\n", psmwd->i16_0x4ba);
+ bcm_bprintf(b, " psm_brwk2: 0x%x\n", psmwd->i16_0x4bc);
+ bcm_bprintf(b, " psm_brwk3: 0x%x\n", psmwd->i16_0x4be);
+ bcm_bprintf(b, " PSM BRC_1: 0x%x\n", psmwd->i16_0x4da);
+ bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
+ bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
+ bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
+ bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
+ bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
+ bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
+ bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
+ bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
+ bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
+ bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
+ bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
+ bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
+ bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
+ bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
+ bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
+ bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
+ bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
+ bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
+}
+
+static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
+{
+ switch (tag) {
+ case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
+ case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
+ case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
+ case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
+ case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
+ case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
+ case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
+ case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
+ case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
+ case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
+ case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
+ case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
+ case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
+ case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
+ case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
+ case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
+ case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
+ case TAG_TRAP_MEM_BIT_FLIP: return "TAG_TRAP_MEM_BIT_FLIP";
+ case TAG_TRAP_LAST:
+ default:
+ return "Unknown";
+ }
+ return "Unknown";
+}
+
+int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
+{
+ uint32 i;
+ uint32 *ext_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+ const trap_t *tr;
+ const uint32 *stack;
+ const hnd_ext_trap_bp_err_t *bpe;
+ uint32 raw_len;
+
+ ext_data = dhdp->extended_trap_data;
+
+ /* return if there is no extended trap data */
+ if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) {
+ bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
+ return BCME_OK;
+ }
+
+ bcm_bprintf(b, "Extended trap data\n");
+
+ /* First word is original trap_data */
+ bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+ bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
+
+ /* Dump a list of all tags found before parsing data */
+ bcm_bprintf(b, "\nTags Found:\n");
+ for (i = 0; i < TAG_TRAP_LAST; i++) {
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
+ if (tlv)
+ bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
+ }
+
+ /* XXX debug dump */
+ if (raw) {
+ raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
+ for (i = 0; i < raw_len; i++)
+ {
+ bcm_bprintf(b, "0x%08x ", ext_data[i]);
+ if (i % 4 == 3)
+ bcm_bprintf(b, "\n");
+ }
+ return BCME_OK;
+ }
+
+ /* Extract the various supported TLVs from the extended trap data */
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
+ if (tlv) {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
+ bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+ if (tlv) {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
+ tr = (const trap_t *)tlv->data;
+
+ bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
+ tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
+ bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
+ tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
+ bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
+ tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+ if (tlv) {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
+ stack = (const uint32 *)tlv->data;
+ for (i = 0; i < (uint32)(tlv->len / 4); i++)
+ {
+ bcm_bprintf(b, " 0x%08x\n", *stack);
+ stack++;
+ }
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
+ if (tlv) {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
+ bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
+ bcm_bprintf(b, " error: %x\n", bpe->error);
+ bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
+ bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
+ bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
+ bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
+ bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
+ bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
+ bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
+ bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
+ bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
+ bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
+ bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
+ bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
+ bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
+ bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
+ if (tlv) {
+ const hnd_ext_trap_heap_err_t* hme;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
+ hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
+ bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
+ bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
+ bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
+ bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
+ bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
+
+ bcm_bprintf(b, " Histogram:\n");
+ for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
+ if (hme->heap_histogm[i] == 0xfffe)
+ bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
+ else if (hme->heap_histogm[i] == 0xffff)
+ bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
+ else
+ bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
+ hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
+ * hme->heap_histogm[i + 1]);
+ }
+
+ bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
+ for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
+ bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
+ }
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
+ if (tlv) {
+ const hnd_ext_trap_pcie_mem_err_t* pqme;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
+ pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
+ bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
+ bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
+ if (tlv) {
+ const hnd_ext_trap_wlc_mem_err_t* wsme;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
+ wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
+ bcm_bprintf(b, " instance: %d\n", wsme->instance);
+ bcm_bprintf(b, " associated: %d\n", wsme->associated);
+ bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
+ bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
+ bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
+ bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
+ bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
+ bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
+
+ if (tlv->len >= (sizeof(*wsme) * 2)) {
+ wsme++;
+ bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
+ bcm_bprintf(b, " associated: %d\n", wsme->associated);
+ bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
+ bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
+ bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
+ bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
+ bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
+ bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
+ }
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
+ if (tlv) {
+ const hnd_ext_trap_phydbg_t* phydbg;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
+ phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
+ bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
+ bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
+ bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
+ bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
+ bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
+ bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
+ bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
+ bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
+ bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
+ bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
+ bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
+ bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
+ bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
+ bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
+ bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
+ bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
+ bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
+ bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
+ bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
+ bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
+ bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
+ bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
+ bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
+ bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
+ bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
+ bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
+ bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
+ for (i = 0; i < 3; i++)
+ bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
+ if (tlv) {
+ const hnd_ext_trap_psmwd_t* psmwd;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
+ psmwd = (const hnd_ext_trap_psmwd_t *)tlv->data;
+ bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
+ bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
+ bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
+ bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
+ bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
+ bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
+ if (psmwd->version == 1) {
+ dump_psmwd_v1(tlv, b);
+ }
+ if (psmwd->version == 2) {
+ dump_psmwd_v2(tlv, b);
+ }
+ }
+/* PHY TxErr MacDump */
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHYTXERR_THRESH);
+ if (tlv) {
+ const hnd_ext_trap_macphytxerr_t* phytxerr = NULL;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHYTXERR_THRESH), tlv->len);
+ phytxerr = (const hnd_ext_trap_macphytxerr_t *)tlv->data;
+ bcm_bprintf(b, " version: 0x%x\n", phytxerr->version);
+ bcm_bprintf(b, " trap_reason: %d\n", phytxerr->trap_reason);
+ bcm_bprintf(b, " Tsf_rx_ts_0x63E: 0x%x\n", phytxerr->i16_0x63E);
+ bcm_bprintf(b, " Tsf_tx_ts_0x640: 0x%x\n", phytxerr->i16_0x640);
+ bcm_bprintf(b, " tsf_tmr_rx_end_ts_0x642: 0x%x\n", phytxerr->i16_0x642);
+ bcm_bprintf(b, " TDC_FrmLen0_0x846: 0x%x\n", phytxerr->i16_0x846);
+ bcm_bprintf(b, " TDC_FrmLen1_0x848: 0x%x\n", phytxerr->i16_0x848);
+ bcm_bprintf(b, " TDC_Txtime_0x84a: 0x%x\n", phytxerr->i16_0x84a);
+ bcm_bprintf(b, " TXE_BytCntInTxFrmLo_0xa5a: 0x%x\n", phytxerr->i16_0xa5a);
+ bcm_bprintf(b, " TXE_BytCntInTxFrmHi_0xa5c: 0x%x\n", phytxerr->i16_0xa5c);
+ bcm_bprintf(b, " TDC_VhtPsduLen0_0x856: 0x%x\n", phytxerr->i16_0x856);
+ bcm_bprintf(b, " TDC_VhtPsduLen1_0x858: 0x%x\n", phytxerr->i16_0x858);
+ bcm_bprintf(b, " PSM_BRC: 0x%x\n", phytxerr->i16_0x490);
+ bcm_bprintf(b, " PSM_BRC_1: 0x%x\n", phytxerr->i16_0x4d8);
+ bcm_bprintf(b, " shm_txerr_reason: 0x%x\n", phytxerr->shm_txerr_reason);
+ bcm_bprintf(b, " shm_pctl0: 0x%x\n", phytxerr->shm_pctl0);
+ bcm_bprintf(b, " shm_pctl1: 0x%x\n", phytxerr->shm_pctl1);
+ bcm_bprintf(b, " shm_pctl2: 0x%x\n", phytxerr->shm_pctl2);
+ bcm_bprintf(b, " shm_lsig0: 0x%x\n", phytxerr->shm_lsig0);
+ bcm_bprintf(b, " shm_lsig1: 0x%x\n", phytxerr->shm_lsig1);
+ bcm_bprintf(b, " shm_plcp0: 0x%x\n", phytxerr->shm_plcp0);
+ bcm_bprintf(b, " shm_plcp1: 0x%x\n", phytxerr->shm_plcp1);
+ bcm_bprintf(b, " shm_plcp2: 0x%x\n", phytxerr->shm_plcp2);
+ bcm_bprintf(b, " shm_vht_sigb0: 0x%x\n", phytxerr->shm_vht_sigb0);
+ bcm_bprintf(b, " shm_vht_sigb1: 0x%x\n", phytxerr->shm_vht_sigb1);
+ bcm_bprintf(b, " shm_tx_tst: 0x%x\n", phytxerr->shm_tx_tst);
+ bcm_bprintf(b, " shm_txerr_tm: 0x%x\n", phytxerr->shm_txerr_tm);
+ bcm_bprintf(b, " shm_curchannel: 0x%x\n", phytxerr->shm_curchannel);
+ bcm_bprintf(b, " shm_blk_crx_rxtsf_pos: 0x%x\n", phytxerr->shm_crx_rxtsf_pos);
+ bcm_bprintf(b, " shm_lasttx_tsf: 0x%x\n", phytxerr->shm_lasttx_tsf);
+ bcm_bprintf(b, " shm_s_rxtsftmrval: 0x%x\n", phytxerr->shm_s_rxtsftmrval);
+ bcm_bprintf(b, " Phy_0x29: 0x%x\n", phytxerr->i16_0x29);
+ bcm_bprintf(b, " Phy_0x2a: 0x%x\n", phytxerr->i16_0x2a);
+ }
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
+ if (tlv) {
+ const hnd_ext_trap_macsusp_t* macsusp;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
+ macsusp = (const hnd_ext_trap_macsusp_t *)tlv->data;
+ bcm_bprintf(b, " version: %d\n", macsusp->version);
+ bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
+ bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
+ bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
+ bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
+ for (i = 0; i < 4; i++)
+ bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
+ for (i = 0; i < 8; i++)
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
+ bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
+ bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
+ bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
+ bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
+ bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
+ bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
+ bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
+ bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
+ bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
+ bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
+ bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
+ if (tlv) {
+ const hnd_ext_trap_macenab_t* macwake;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
+ macwake = (const hnd_ext_trap_macenab_t *)tlv->data;
+ bcm_bprintf(b, " version: 0x%x\n", macwake->version);
+ bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
+ bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
+ bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
+ bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
+ for (i = 0; i < 8; i++)
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
+ bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
+ bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
+ bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
+ bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
+ bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
+ bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
+ bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
+ bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
+ bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
+ if (tlv) {
+ const bcm_dngl_pcie_hc_t* hc;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
+ hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
+ bcm_bprintf(b, " version: 0x%x\n", hc->version);
+ bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
+ bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
+ bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
+ bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
+ for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
+ bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
+ if (tlv) {
+ const pcie_hmapviolation_t* hmap;
+ hmap = (const pcie_hmapviolation_t *)tlv->data;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
+ bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
+ bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
+ bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEM_BIT_FLIP);
+ if (tlv) {
+ const hnd_ext_trap_fb_mem_err_t* fbit;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEM_BIT_FLIP), tlv->len);
+ fbit = (const hnd_ext_trap_fb_mem_err_t *)tlv->data;
+ bcm_bprintf(b, " version: %d\n", fbit->version);
+ bcm_bprintf(b, " flip_bit_err_time: %d\n", fbit->flip_bit_err_time);
+ }
+
+ return BCME_OK;
+}
+
+#ifdef BCMPCIE
+int
+dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
+ uint16 seqnum, uint16 xt_id)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ host_timestamp_msg_t *ts_req;
+ unsigned long flags;
+ uint16 alloced = 0;
+ uchar *ts_tlv_buf;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+
+ if ((tlvs == NULL) || (tlv_len == 0)) {
+ DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
+ __FUNCTION__, tlvs, tlv_len));
+ return -1;
+ }
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ /* if Host TS req already pending go away */
+ if (prot->hostts_req_buf_inuse == TRUE) {
+ DHD_ERROR(("one host TS request already pending at device\n"));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+ return -1;
+ }
+
+ /* Request for cbuf space */
+ ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
+ if (ts_req == NULL) {
+ DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+ return -1;
+ }
+
+ /* Common msg buf hdr */
+ ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
+ ts_req->msg.if_id = 0;
+ ts_req->msg.flags = ctrl_ring->current_phase;
+ ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
+
+ ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ ts_req->xt_id = xt_id;
+ ts_req->seqnum = seqnum;
+ /* populate TS req buffer info */
+ ts_req->input_data_len = htol16(tlv_len);
+ ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
+ ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
+ /* copy ioct payload */
+ ts_tlv_buf = (void *) prot->hostts_req_buf.va;
+ prot->hostts_req_buf_inuse = TRUE;
+ memcpy(ts_tlv_buf, tlvs, tlv_len);
+
+ OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
+
+ if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
+ DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
+ }
+
+ DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
+ ts_req->msg.request_id, ts_req->input_data_len,
+ ts_req->xt_id, ts_req->seqnum));
+
+ /* upd wrt ptr and raise interrupt */
+ dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+ return 0;
+} /* dhd_prot_send_host_timestamp */
+
+bool
+dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->tx_ts_log_enabled = enable;
+
+ return dhd->prot->tx_ts_log_enabled;
+}
+
+bool
+dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->rx_ts_log_enabled = enable;
+
+ return dhd->prot->rx_ts_log_enabled;
+}
+
+bool
+dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->no_retry = enable;
+
+ return dhd->prot->no_retry;
+}
+
+bool
+dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->no_aggr = enable;
+
+ return dhd->prot->no_aggr;
+}
+
+bool
+dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->fixed_rate = enable;
+
+ return dhd->prot->fixed_rate;
+}
+#endif /* BCMPCIE */
+
+void
+dhd_prot_dma_indx_free(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
+}
+
+void
+dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
+{
+ if (dhd->prot->max_tsbufpost > 0)
+ dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+}
+
+static void
+BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
+{
+#ifdef DHD_TIMESYNC
+ fw_timestamp_event_msg_t *resp;
+ uint32 pktid;
+ uint16 buflen, seqnum;
+ void * pkt;
+
+ resp = (fw_timestamp_event_msg_t *)buf;
+ pktid = ltoh32(resp->msg.request_id);
+ buflen = ltoh16(resp->buf_len);
+ seqnum = ltoh16(resp->seqnum);
+
+#if defined(DHD_PKTID_AUDIT_RING)
+ DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_DUPLICATE_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
+ pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
+
+ if (!dhd->prot->cur_ts_bufs_posted) {
+ DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
+ return;
+ }
+
+ dhd->prot->cur_ts_bufs_posted--;
+
+ if (!dhd_timesync_delay_post_bufs(dhd)) {
+ if (dhd->prot->max_tsbufpost > 0) {
+ dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+ }
+ }
+
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
+
+ if (!pkt) {
+ DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
+ return;
+ }
+
+ PKTSETLEN(dhd->osh, pkt, buflen);
+ dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, pkt, TRUE);
+#else
+ PKTFREE(dhd->osh, pkt, TRUE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#else /* DHD_TIMESYNC */
+ DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
+#endif /* DHD_TIMESYNC */
+
+}
+
+uint16
+dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
+{
+ return dhdp->prot->ioctl_trans_id;
+}
+
+#ifdef SNAPSHOT_UPLOAD
+/* send request to take snapshot */
+int
+dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ dhd_dma_buf_t *dma_buf = &prot->snapshot_upload_buf;
+ snapshot_upload_request_msg_t *snap_req;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ /* Request for cbuf space */
+ snap_req = (snapshot_upload_request_msg_t *)dhd_prot_alloc_ring_space(dhdp,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+ if (snap_req == NULL) {
+ DHD_ERROR(("couldn't allocate space on msgring to send snapshot request\n"));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+ return BCME_ERROR;
+ }
+
+ /* Common msg buf hdr */
+ snap_req->cmn_hdr.msg_type = MSG_TYPE_SNAPSHOT_UPLOAD;
+ snap_req->cmn_hdr.if_id = 0;
+ snap_req->cmn_hdr.flags = ctrl_ring->current_phase;
+ snap_req->cmn_hdr.request_id = DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID;
+ snap_req->cmn_hdr.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ /* snapshot request msg */
+ snap_req->snapshot_buf_len = htol32(dma_buf->len);
+ snap_req->snapshot_type = snapshot_type;
+ snap_req->snapshot_param = snapshot_param;
+ snap_req->host_buf_addr.high = htol32(PHYSADDRHI(dma_buf->pa));
+ snap_req->host_buf_addr.low = htol32(PHYSADDRLO(dma_buf->pa));
+
+ if (ISALIGNED(dma_buf->va, DMA_ALIGN_LEN) == FALSE) {
+ DHD_ERROR(("snapshot req buffer address unaligned !!!!! \n"));
+ }
+
+ /* clear previous snapshot upload */
+ memset(dma_buf->va, 0, dma_buf->len);
+ prot->snapshot_upload_len = 0;
+ prot->snapshot_type = snapshot_type;
+ prot->snapshot_cmpl_pending = TRUE;
+
+ DHD_CTL(("submitted snapshot request request_id %d, buf_len %d, type %d, param %d\n",
+ snap_req->cmn_hdr.request_id, snap_req->snapshot_buf_len,
+ snap_req->snapshot_type, snap_req->snapshot_param));
+
+ /* upd wrt ptr and raise interrupt */
+ dhd_prot_ring_write_complete(dhdp, ctrl_ring, snap_req,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+
+ return BCME_OK;
+} /* dhd_prot_send_snapshot_request */
+
+/* get uploaded snapshot */
+int
+dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset,
+ uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ uint8 *buf = prot->snapshot_upload_buf.va;
+ uint8 *buf_end = buf + prot->snapshot_upload_len;
+ uint32 copy_size;
+
+ /* snapshot type must match */
+ if (prot->snapshot_type != snapshot_type) {
+ return BCME_DATA_NOTFOUND;
+ }
+
+ /* snapshot not completed */
+ if (prot->snapshot_cmpl_pending) {
+ return BCME_NOTREADY;
+ }
+
+ /* offset within the buffer */
+ if (buf + offset >= buf_end) {
+ return BCME_BADARG;
+ }
+
+ /* copy dst buf size or remaining size */
+ copy_size = MIN(dst_buf_size, buf_end - (buf + offset));
+ memcpy(dst_buf, buf + offset, copy_size);
+
+ /* return size and is_more */
+ *dst_size = copy_size;
+ *is_more = (offset + copy_size < prot->snapshot_upload_len) ?
+ TRUE : FALSE;
+ return BCME_OK;
+} /* dhd_prot_get_snapshot */
+
+#endif /* SNAPSHOT_UPLOAD */
+
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
+{
+ if (!dhd->hscb_enable) {
+ if (len) {
+ /* prevent "Operation not supported" dhd message */
+ *len = 0;
+ return BCME_OK;
+ }
+ return BCME_UNSUPPORTED;
+ }
+
+ if (va) {
+ *va = dhd->prot->host_scb_buf.va;
+ }
+ if (len) {
+ *len = dhd->prot->host_scb_buf.len;
+ }
+
+ return BCME_OK;
+}
+
+#ifdef DHD_BUS_MEM_ACCESS
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
+{
+ if (!dhd->hscb_enable) {
+ return BCME_UNSUPPORTED;
+ }
+
+ if (dhd->prot->host_scb_buf.va == NULL ||
+ ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
+ return BCME_BADADDR;
+ }
+
+ memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
+
+ return BCME_OK;
+}
+#endif /* DHD_BUS_MEM_ACCESS */
+
+#ifdef DHD_HP2P
+uint32
+dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_thresh = (uint16)val;
+
+ val = dhd->pkt_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->time_thresh = (uint16)val;
+
+ val = dhd->time_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_expiry = (uint16)val;
+
+ val = dhd->pkt_expiry;
+
+ return val;
+}
+
+uint8
+dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
+{
+ uint8 ret = 0;
+ if (set) {
+ dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
+ dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
+
+ if (enable) {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
+ } else {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
+ }
+ }
+ ret = dhd->hp2p_infra_enable ? 0x1:0x0;
+ ret <<= 4;
+ ret |= dhd->hp2p_enable ? 0x1:0x0;
+
+ return ret;
+}
+
+static void
+dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
+ hp2p_info_t *hp2p_info;
+ uint32 dur1;
+
+ hp2p_info = &dhd->hp2p_info[0];
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
+
+ if (dur1 > (MAX_RX_HIST_BIN - 1)) {
+ dur1 = MAX_RX_HIST_BIN - 1;
+ DHD_INFO(("%s: 0x%x 0x%x\n",
+ __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
+ return;
+}
+
+static void
+dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
+ uint16 flowid = txstatus->compl_hdr.flow_ring_id;
+ uint32 hp2p_flowid, dur1, dur2;
+ hp2p_info_t *hp2p_info;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+ ts = (ts_timestamp_t *)&(txstatus->ts);
+
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur1 > (MAX_TX_HIST_BIN - 1)) {
+ dur1 = MAX_TX_HIST_BIN - 1;
+ DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+ hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
+
+ dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur2 > (MAX_TX_HIST_BIN - 1)) {
+ dur2 = MAX_TX_HIST_BIN - 1;
+ DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
+ return;
+}
+
+enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
+{
+ hp2p_info_t *hp2p_info;
+ unsigned long flags;
+ dhd_pub_t *dhdp;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ hp2p_info = container_of(timer, hp2p_info_t, timer);
+ GCC_DIAGNOSTIC_POP();
+
+ dhdp = hp2p_info->dhd_pub;
+ if (!dhdp) {
+ goto done;
+ }
+
+ DHD_INFO(("%s: pend_item = %d flowid = %d\n",
+ __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
+ hp2p_info->flowid));
+
+ flags = dhd_os_hp2plock(dhdp);
+
+ dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->num_timer_limit++;
+
+ dhd_os_hp2punlock(dhdp, flags);
+done:
+ return HRTIMER_NORESTART;
+}
+
+static void
+dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
+{
+ hp2p_info_t *hp2p_info;
+ uint16 hp2p_flowid;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+
+ if (ring->pend_items_count == dhd->pkt_thresh) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->ring = NULL;
+ hp2p_info->num_pkt_limit++;
+ hrtimer_cancel(&hp2p_info->timer);
+
+ DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
+ "hp2p_flowid = %d pkt_thresh = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
+ } else {
+ if (hp2p_info->hrtimer_init == FALSE) {
+ hp2p_info->hrtimer_init = TRUE;
+ hp2p_info->flowid = flowid;
+ hp2p_info->dhd_pub = dhd;
+ hp2p_info->ring = ring;
+ hp2p_info->num_timer_start++;
+
+ hrtimer_start(&hp2p_info->timer,
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
+
+ DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid));
+ }
+ }
+ return;
+}
+
+static void
+dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
+{
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ txdesc->metadata_buf_len = 0;
+ txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
+ txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
+ txdesc->exp_time = dhd->pkt_expiry;
+
+ DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
+ __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
+ txdesc->metadata_buf_addr.low_addr,
+ txdesc->exp_time));
+
+ return;
+}
+#endif /* DHD_HP2P */
+
+#ifdef DHD_MAP_LOGGING
+void
+dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
+{
+ dhd_prot_debug_info_print(dhdp);
+ OSL_DMA_MAP_DUMP(dhdp->osh);
+#ifdef DHD_MAP_PKTID_LOGGING
+ dhd_pktid_logging_dump(dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+#ifdef DHD_FW_COREDUMP
+ dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ dhd_bus_get_mem_dump(dhdp);
+#else
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* DHD_FW_COREDUMP */
+}
+#endif /* DHD_MAP_LOGGING */
+
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+void
+dhd_dump_bus_flow_ring_status_trace(
+ dhd_bus_t *bus, struct bcmstrbuf *strbuf, dhd_frs_trace_t *frs_trace, int dumpsz, char *str)
+{
+ int i;
+ dhd_prot_t *prot = bus->dhd->prot;
+ uint32 isr_cnt = bus->frs_isr_count % FRS_TRACE_SIZE;
+ uint32 dpc_cnt = bus->frs_dpc_count % FRS_TRACE_SIZE;
+
+ bcm_bprintf(strbuf, "---- %s ------ isr_cnt: %d dpc_cnt %d\n",
+ str, isr_cnt, dpc_cnt);
+ bcm_bprintf(strbuf, "%s\t%s\t%s\t%s\t%s\t%s\t",
+ "Timestamp ns", "H2DCtrlPost", "D2HCtrlCpl",
+ "H2DRxPost", "D2HRxCpl", "D2HTxCpl");
+ if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
+ bcm_bprintf(strbuf, "%s\t%s\t", "H2DRingInfoPost", "D2HRingInfoCpl");
+ }
+ if (prot->d2hring_edl != NULL) {
+ bcm_bprintf(strbuf, "%s", "D2HRingEDL");
+ }
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < dumpsz; i ++) {
+ bcm_bprintf(strbuf, "%llu\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t",
+ frs_trace[i].timestamp,
+ frs_trace[i].h2d_ctrl_post_drd,
+ frs_trace[i].h2d_ctrl_post_dwr,
+ frs_trace[i].d2h_ctrl_cpln_drd,
+ frs_trace[i].d2h_ctrl_cpln_dwr,
+ frs_trace[i].h2d_rx_post_drd,
+ frs_trace[i].h2d_rx_post_dwr,
+ frs_trace[i].d2h_rx_cpln_drd,
+ frs_trace[i].d2h_rx_cpln_dwr,
+ frs_trace[i].d2h_tx_cpln_drd,
+ frs_trace[i].d2h_tx_cpln_dwr);
+ if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
+ bcm_bprintf(strbuf, "%6u-%u\t%6u-%u\t",
+ frs_trace[i].h2d_info_post_drd,
+ frs_trace[i].h2d_info_post_dwr,
+ frs_trace[i].d2h_info_cpln_drd,
+ frs_trace[i].d2h_info_cpln_dwr);
+ }
+ if (prot->d2hring_edl != NULL) {
+ bcm_bprintf(strbuf, "%6u-%u",
+ frs_trace[i].d2h_ring_edl_drd,
+ frs_trace[i].d2h_ring_edl_dwr);
+
+ }
+ bcm_bprintf(strbuf, "\n");
+ }
+ bcm_bprintf(strbuf, "--------------------------\n");
+}
+
+void
+dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ int dumpsz;
+
+ dumpsz = bus->frs_isr_count < FRS_TRACE_SIZE ?
+ bus->frs_isr_count : FRS_TRACE_SIZE;
+ if (dumpsz == 0) {
+ bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
+ return;
+ }
+ dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_isr_trace,
+ dumpsz, "ISR FLOW RING TRACE DRD-DWR");
+}
+
+void
+dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ int dumpsz;
+
+ dumpsz = bus->frs_dpc_count < FRS_TRACE_SIZE ?
+ bus->frs_dpc_count : FRS_TRACE_SIZE;
+ if (dumpsz == 0) {
+ bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
+ return;
+ }
+ dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_dpc_trace,
+ dumpsz, "DPC FLOW RING TRACE DRD-DWR");
+}
+static void
+dhd_bus_flow_ring_status_trace(dhd_pub_t *dhd, dhd_frs_trace_t *frs_trace)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring;
+
+ ring = &prot->h2dring_ctrl_subn;
+ frs_trace->h2d_ctrl_post_drd =
+ dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->h2d_ctrl_post_dwr =
+ dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
+
+ ring = &prot->d2hring_ctrl_cpln;
+ frs_trace->d2h_ctrl_cpln_drd =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->d2h_ctrl_cpln_dwr =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+
+ ring = &prot->h2dring_rxp_subn;
+ frs_trace->h2d_rx_post_drd =
+ dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->h2d_rx_post_dwr =
+ dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
+
+ ring = &prot->d2hring_rx_cpln;
+ frs_trace->d2h_rx_cpln_drd =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->d2h_rx_cpln_dwr =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+
+ ring = &prot->d2hring_tx_cpln;
+ frs_trace->d2h_tx_cpln_drd =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->d2h_tx_cpln_dwr =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+
+ if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
+ ring = prot->h2dring_info_subn;
+ frs_trace->h2d_info_post_drd =
+ dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->h2d_info_post_dwr =
+ dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
+
+ ring = prot->d2hring_info_cpln;
+ frs_trace->d2h_info_cpln_drd =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->d2h_info_cpln_dwr =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ }
+ if (prot->d2hring_edl != NULL) {
+ ring = prot->d2hring_edl;
+ frs_trace->d2h_ring_edl_drd =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
+ frs_trace->d2h_ring_edl_dwr =
+ dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ }
+
+}
+
+void
+dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd)
+{
+ uint32 cnt = dhd->bus->frs_isr_count % FRS_TRACE_SIZE;
+ dhd_frs_trace_t *frs_isr_trace = &dhd->bus->frs_isr_trace[cnt];
+ uint64 time_ns_prev = frs_isr_trace[cnt].timestamp;
+ uint64 time_ns_now = OSL_LOCALTIME_NS();
+
+ if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
+ return;
+ }
+
+ dhd_bus_flow_ring_status_trace(dhd, frs_isr_trace);
+
+ frs_isr_trace->timestamp = OSL_LOCALTIME_NS();
+ dhd->bus->frs_isr_count ++;
+}
+
+void
+dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd)
+{
+ uint32 cnt = dhd->bus->frs_dpc_count % FRS_TRACE_SIZE;
+ dhd_frs_trace_t *frs_dpc_trace = &dhd->bus->frs_dpc_trace[cnt];
+ uint64 time_ns_prev = frs_dpc_trace[cnt].timestamp;
+ uint64 time_ns_now = OSL_LOCALTIME_NS();
+
+ if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
+ return;
+ }
+
+ dhd_bus_flow_ring_status_trace(dhd, frs_dpc_trace);
+
+ frs_dpc_trace->timestamp = OSL_LOCALTIME_NS();
+ dhd->bus->frs_dpc_count ++;
+}
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
diff --git a/bcmdhd.101.10.361.x/dhd_pcie.c b/bcmdhd.101.10.361.x/dhd_pcie.c
new file mode 100755
index 0000000..f69951d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pcie.c
@@ -0,0 +1,17674 @@
+/*
+ * DHD Bus Module for PCIE
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/** XXX Twiki: [PCIeFullDongleArchitecture] */
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmrand.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
+#include <siutils.h>
+#include <hndoobr.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <etd.h>
+#include <hnd_debug.h>
+#include <sbchipc.h>
+#include <sbhndarm.h>
+#include <hnd_armtrap.h>
+#if defined(DHD_DEBUG)
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_flowring.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#if defined(LINUX) || defined(linux)
+#include <dhd_daemon.h>
+#endif /* LINUX || linux */
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <bcmpcie.h>
+#include <bcmendian.h>
+#include <bcmstdlib_s.h>
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+#include <bcmevent.h>
+#include <dhd_config.h>
+
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+
+#ifdef BCM_ROUTER_DHD
+#include <bcmnvram.h>
+#define STR_END "END\0\0"
+#define BOARDREV_PROMOTABLE_STR "0xff"
+#endif
+#if defined(BCMEMBEDIMAGE)
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#include <linux/pm_runtime.h>
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if defined(DEBUGGER) || defined (DHD_DSCOPE)
+#include <debugger.h>
+#endif /* DEBUGGER || DHD_DSCOPE */
+
+#if defined(FW_SIGNATURE)
+#include <dngl_rtlv.h>
+#include <bcm_fwsign.h>
+#endif /* FW_SIGNATURE */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+#include <dhd_linux_wq.h>
+#include <dhd_linux.h>
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+#include <dhd_linux_priv.h>
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#ifdef LINUX
+#define MAX_WKLK_IDLE_CHECK 3 /* times dhd_wake_lock checked before deciding not to suspend */
+#endif /* LINUX */
+
+#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
+#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
+#define MAX_HP2P_CMPL_RINGS 2u
+
+/* XXX defines for 4378 */
+#define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
+#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
+#define ACC_MPU_SHIFT 25
+#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
+
+/* XXX Offset for 4375 work around register */
+#define REG_WORK_AROUND (0x1e4/sizeof(uint32))
+
+/* XXX defines for 43602a0 workaround JIRA CRWLARMCR4-53 */
+#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
+#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
+/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
+
+/* CTO Prevention Recovery */
+#define CTO_TO_CLEAR_WAIT_MS 50
+#define CTO_TO_CLEAR_WAIT_MAX_CNT 200
+
+/* FLR setting */
+#define PCIE_FLR_CAPAB_BIT 28
+#define PCIE_FUNCTION_LEVEL_RESET_BIT 15
+
+#ifdef BCMQT_HW
+extern int qt_flr_reset;
+/* FLR takes longer on QT Z boards so increasing the delay by 30% */
+#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u
+#define DHD_SSRESET_STATUS_RETRY_DELAY 55u
+#else
+#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
+#define DHD_SSRESET_STATUS_RETRY_DELAY 40u
+#endif /* BCMQT_HW */
+/*
+ * Increase SSReset de-assert time to 8ms.
+ * since it takes longer time if re-scan time on 4378B0.
+ */
+#define DHD_SSRESET_STATUS_RETRIES 200u
+
+/* Fetch address of a member in the pciedev_shared structure in dongle memory */
+#define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
+ (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
+
+/* Fetch address of a member in rings_info_ptr structure in dongle memory */
+#define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
+ (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
+
+/* Fetch address of a member in the ring_mem structure in dongle memory */
+#define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
+ (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
+
+#ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
+#define DHD_PCIE_INFO DHD_TRACE
+#else
+#define DHD_PCIE_INFO DHD_INFO
+#endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
+
+#if defined(SUPPORT_MULTIPLE_BOARD_REV)
+ extern unsigned int system_rev;
+#endif /* SUPPORT_MULTIPLE_BOARD_REV */
+
+#ifdef EWP_EDL
+extern int host_edl_support;
+#endif
+
+#ifdef BCMQT_HW
+extern int qt_dngl_timeout;
+#endif /* BCMQT_HW */
+
+/* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
+uint dma_ring_indices = 0;
+/* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
+bool h2d_phase = 0;
+/* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
+ * defined in dhd_linux.c
+ */
+bool force_trap_bad_h2d_phase = 0;
+
+int dhd_dongle_ramsize;
+struct dhd_bus *g_dhd_bus = NULL;
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void dhd_log_dump_axi_error(uint8 *axi_err);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
+#if defined(DHD_FW_COREDUMP)
+static int dhdpcie_mem_dump(dhd_bus_t *bus);
+static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
+#endif /* DHD_FW_COREDUMP */
+
+static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
+static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+ const char *name, void *params,
+ uint plen, void *arg, uint len, int val_size);
+static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
+static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
+ uint32 len, uint32 srcdelay, uint32 destdelay,
+ uint32 d11_lpbk, uint32 core_num, uint32 wait,
+ uint32 mem_addr);
+#ifdef BCMINTERNAL
+static int dhdpcie_bus_set_tx_lpback(struct dhd_bus *bus, bool enable);
+static int dhdpcie_bus_get_tx_lpback(struct dhd_bus *bus);
+static uint64 serialized_backplane_access_64(dhd_bus_t* bus, uint addr, uint size, uint64* val,
+ bool read);
+#endif /* BCMINTERNAL */
+static uint serialized_backplane_access(dhd_bus_t* bus, uint addr, uint size, uint* val, bool read);
+static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
+static int _dhdpcie_download_firmware(struct dhd_bus *bus);
+static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
+
+#if defined(FW_SIGNATURE)
+static int dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write);
+static int dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus);
+static int dhdpcie_bus_write_fws_status(dhd_bus_t *bus);
+static int dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus);
+static int dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path);
+static int dhdpcie_download_rtlv_end(dhd_bus_t *bus);
+static int dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr,
+ uint32 download_size, const char *signature_fname,
+ const char *bloader_fname, uint32 bloader_download_addr);
+#endif /* FW_SIGNATURE */
+
+static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
+static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
+static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
+static int dhdpcie_readshared(dhd_bus_t *bus);
+static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
+static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
+static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
+static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
+ bool dongle_isolation, bool reset_flag);
+static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
+static void dhd_init_bar1_switch_lock(dhd_bus_t *bus);
+static void dhd_deinit_bar1_switch_lock(dhd_bus_t *bus);
+static void dhd_init_pwr_req_lock(dhd_bus_t *bus);
+static void dhd_deinit_pwr_req_lock(dhd_bus_t *bus);
+static void dhd_init_bus_lp_state_lock(dhd_bus_t *bus);
+static void dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus);
+static void dhd_init_backplane_access_lock(dhd_bus_t *bus);
+static void dhd_deinit_backplane_access_lock(dhd_bus_t *bus);
+static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
+#ifdef DHD_SUPPORT_64BIT
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
+#endif /* DHD_SUPPORT_64BIT */
+static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
+static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
+static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
+static void dhdpcie_fw_trap(dhd_bus_t *bus);
+static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
+extern void dhd_dpc_enable(dhd_pub_t *dhdp);
+#ifdef PCIE_INB_DW
+static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval,
+ bool d2h, enum dhd_bus_ds_state inbstate);
+#else
+static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h);
+#endif /* PCIE_INB_DW */
+#ifdef DHD_MMIO_TRACE
+static void dhd_bus_mmio_trace(dhd_bus_t *bus, uint32 addr, uint32 value, bool set);
+#endif /* defined(DHD_MMIO_TRACE) */
+#if defined(LINUX) || defined(linux)
+extern void dhd_dpc_kill(dhd_pub_t *dhdp);
+#endif /* LINUX || linux */
+
+#ifdef IDLE_TX_FLOW_MGMT
+static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
+static void dhd_bus_idle_scan(dhd_bus_t *bus);
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef BCMEMBEDIMAGE
+static int dhdpcie_download_code_array(dhd_bus_t *bus);
+#endif /* BCMEMBEDIMAGE */
+#ifdef BCM_ROUTER_DHD
+extern char * nvram_get(const char *name);
+#endif
+#if defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD)
+static void select_fd_image(
+ struct dhd_bus *bus, unsigned char **p_dlarray,
+ char **p_dlimagename, char **p_dlimagever,
+ char **p_dlimagedate, int *image_size);
+#endif /* defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) */
+
+#ifdef BCM_ROUTER_DHD
+int dbushost_initvars_flash(si_t *sih, osl_t *osh, char **base, uint len);
+#endif
+
+#ifdef EXYNOS_PCIE_DEBUG
+extern void exynos_pcie_register_dump(int ch_num);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+#define PCI_VENDOR_ID_BROADCOM 0x14e4
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#define MAX_D3_ACK_TIMEOUT 100
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef BCMQT
+#define DHD_DEFAULT_DOORBELL_TIMEOUT 40 /* ms */
+#else
+#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
+#endif
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
+#endif /* PCIE_OOB || PCIE_INB_DW */
+
+static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
+static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
+
+static int dhdpcie_init_d11status(struct dhd_bus *bus);
+
+static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+#include <dhd_fwtrace.h>
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef DHD_HP2P
+extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
+static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
+#endif
+#if defined(linux) || defined(LINUX)
+#ifdef DHD_FW_MEM_CORRUPTION
+#define NUM_PATTERNS 2
+#else
+#define NUM_PATTERNS 6
+#endif /* DHD_FW_MEM_CORRUPTION */
+static bool dhd_bus_tcm_test(struct dhd_bus *bus);
+#endif /* LINUX || linux */
+
+#if defined(FW_SIGNATURE)
+static int dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
+#endif
+static void dhdpcie_pme_stat_clear(dhd_bus_t *bus);
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+#ifdef DHD_BUS_MEM_ACCESS
+ IOV_MEMBYTES,
+#endif /* DHD_BUS_MEM_ACCESS */
+ IOV_MEMSIZE,
+ IOV_SET_DOWNLOAD_STATE,
+ IOV_SET_DOWNLOAD_INFO,
+ IOV_DEVRESET,
+ IOV_VARS,
+ IOV_MSI_SIM,
+ IOV_PCIE_LPBK,
+ IOV_CC_NVMSHADOW,
+ IOV_RAMSIZE,
+ IOV_RAMSTART,
+ IOV_SLEEP_ALLOWED,
+#ifdef BCMINTERNAL
+ IOV_PCIE_TX_LPBK,
+#endif /* BCMINTERNAL */
+ IOV_PCIE_DMAXFER,
+ IOV_PCIE_SUSPEND,
+#ifdef DHD_PCIE_REG_ACCESS
+ IOV_PCIEREG,
+ IOV_PCIECFGREG,
+ IOV_PCIECOREREG,
+ IOV_PCIESERDESREG,
+ IOV_PCIEASPM,
+ IOV_BAR0_SECWIN_REG,
+ IOV_SBREG,
+#endif /* DHD_PCIE_REG_ACCESS */
+ IOV_DONGLEISOLATION,
+ IOV_LTRSLEEPON_UNLOOAD,
+ IOV_METADATA_DBG,
+ IOV_RX_METADATALEN,
+ IOV_TX_METADATALEN,
+ IOV_TXP_THRESHOLD,
+ IOV_BUZZZ_DUMP,
+ IOV_DUMP_RINGUPD_BLOCK,
+ IOV_DMA_RINGINDICES,
+ IOV_FORCE_FW_TRAP,
+ IOV_DB1_FOR_MB,
+ IOV_FLOW_PRIO_MAP,
+#ifdef DHD_PCIE_RUNTIMEPM
+ IOV_IDLETIME,
+#endif /* DHD_PCIE_RUNTIMEPM */
+ IOV_RXBOUND,
+ IOV_TXBOUND,
+ IOV_HANGREPORT,
+ IOV_H2D_MAILBOXDATA,
+ IOV_INFORINGS,
+ IOV_H2D_PHASE,
+ IOV_H2D_ENABLE_TRAP_BADPHASE,
+ IOV_H2D_TXPOST_MAX_ITEM,
+#if defined(DHD_HTPUT_TUNABLES)
+ IOV_H2D_HTPUT_TXPOST_MAX_ITEM,
+#endif /* DHD_HTPUT_TUNABLES */
+ IOV_TRAPDATA,
+ IOV_TRAPDATA_RAW,
+ IOV_CTO_PREVENTION,
+#ifdef PCIE_OOB
+ IOV_OOB_BT_REG_ON,
+ IOV_OOB_ENABLE,
+#endif /* PCIE_OOB */
+#ifdef DEVICE_TX_STUCK_DETECT
+ IOV_DEVICE_TX_STUCK_DETECT,
+#endif /* DEVICE_TX_STUCK_DETECT */
+ IOV_PCIE_WD_RESET,
+ IOV_DUMP_DONGLE,
+#ifdef DHD_EFI
+ IOV_WIFI_PROPERTIES,
+ IOV_CONTROL_SIGNAL,
+ IOV_OTP_DUMP,
+#ifdef BT_OVER_PCIE
+ IOV_BTOP_TEST,
+#endif
+#endif /* DHD_EFI */
+ IOV_IDMA_ENABLE,
+ IOV_IFRM_ENABLE,
+ IOV_CLEAR_RING,
+ IOV_DAR_ENABLE,
+ IOV_DHD_CAPS, /**< returns string with dhd capabilities */
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ IOV_GDB_SERVER, /**< starts gdb server on given interface */
+#endif /* DEBUGGER || DHD_DSCOPE */
+#if defined(GDB_PROXY)
+ IOV_GDB_PROXY_PROBE, /**< gdb proxy support presence check */
+ IOV_GDB_PROXY_STOP_COUNT, /**< gdb proxy firmware stop count */
+#endif /* GDB_PROXY */
+ IOV_INB_DW_ENABLE,
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ IOV_DEEP_SLEEP,
+#endif /* PCIE_OOB || PCIE_INB_DW */
+ IOV_CTO_THRESHOLD,
+#ifdef D2H_MINIDUMP
+ IOV_MINIDUMP_OVERRIDE,
+#endif /* D2H_MINIDUMP */
+#ifdef BCMINTERNAL
+ IOV_DMA_CHAN,
+ IOV_HYBRIDFW,
+#endif /* BCMINTERNAL */
+ IOV_HSCBSIZE, /* get HSCB buffer size */
+#ifdef DHD_BUS_MEM_ACCESS
+ IOV_HSCBBYTES, /* copy HSCB buffer */
+#endif
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ IOV_FWTRACE, /* Enable/disable firmware tracing */
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+ IOV_HP2P_ENABLE,
+ IOV_HP2P_PKT_THRESHOLD,
+ IOV_HP2P_TIME_THRESHOLD,
+ IOV_HP2P_PKT_EXPIRY,
+ IOV_HP2P_TXCPL_MAXITEMS,
+ IOV_HP2P_RXCPL_MAXITEMS,
+ IOV_EXTDTXS_IN_TXCPL,
+ IOV_HOSTRDY_AFTER_INIT,
+#ifdef BCMINTERNAL
+ IOV_SBREG_64,
+#endif /* BCMINTERNAL */
+ IOV_HP2P_MF_ENABLE,
+ IOV_PCIE_LAST /**< unused IOVAR */
+};
+
+const bcm_iovar_t dhdpcie_iovars[] = {
+ {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
+#ifdef DHD_BUS_MEM_ACCESS
+ {"membytes", IOV_MEMBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int) },
+#endif /* DHD_BUS_MEM_ACCESS */
+ {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
+ {"dwnldinfo", IOV_SET_DOWNLOAD_INFO, 0, 0, IOVT_BUFFER,
+ sizeof(fw_download_info_t) },
+ {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
+ {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
+ {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
+ {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
+#ifdef BCMINTERNAL
+ {"msi_sim", IOV_MSI_SIM, 0, 0, IOVT_BOOL, 0 },
+#endif /* BCMINTERNAL */
+ {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
+ {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_PCIE_REG_ACCESS
+ {"pciereg", IOV_PCIEREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"pciecfgreg", IOV_PCIECFGREG, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"pciecorereg", IOV_PCIECOREREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"pcieserdesreg", IOV_PCIESERDESREG, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
+ {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(uint8) },
+#endif /* DHD_PCIE_REG_ACCESS */
+#ifdef BCMINTERNAL
+ {"pcie_tx_lpbk", IOV_PCIE_TX_LPBK, 0, 0, IOVT_UINT32, 0 },
+#endif /* BCMINTERNAL */
+ {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
+ {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
+#ifdef PCIE_OOB
+ {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, 0, IOVT_UINT32, 0 },
+ {"oob_enable", IOV_OOB_ENABLE, 0, 0, IOVT_UINT32, 0 },
+#endif /* PCIE_OOB */
+ {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
+ {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
+ {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
+ {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
+ {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
+ {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
+ {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
+ {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
+ {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
+ {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_PCIE_RUNTIMEPM
+ {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
+#endif /* DHD_PCIE_RUNTIMEPM */
+ {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
+ {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_PCIE_REG_ACCESS
+ {"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 },
+#endif /* DHD_PCIE_REG_ACCESS */
+ {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
+ {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
+ {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
+ {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
+ {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
+ IOVT_UINT32, 0 },
+ {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
+#if defined(DHD_HTPUT_TUNABLES)
+ {"h2d_htput_max_txpost", IOV_H2D_HTPUT_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
+#endif /* DHD_HTPUT_TUNABLES */
+ {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
+ {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
+ {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
+ {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
+#ifdef DEVICE_TX_STUCK_DETECT
+ {"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 },
+#endif /* DEVICE_TX_STUCK_DETECT */
+ {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
+ MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
+ {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_EFI
+ {"properties", IOV_WIFI_PROPERTIES, 0, 0, IOVT_BUFFER, 0},
+ {"otp_dump", IOV_OTP_DUMP, 0, 0, IOVT_BUFFER, 0},
+ {"control_signal", IOV_CONTROL_SIGNAL, 0, 0, IOVT_UINT32, 0},
+#ifdef BT_OVER_PCIE
+ {"btop_test", IOV_BTOP_TEST, 0, 0, IOVT_UINT32, 0},
+#endif
+#endif /* DHD_EFI */
+ {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {"cap", IOV_DHD_CAPS, 0, 0, IOVT_BUFFER, 0},
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
+#endif /* DEBUGGER || DHD_DSCOPE */
+#if defined(GDB_PROXY)
+ {"gdb_proxy_probe", IOV_GDB_PROXY_PROBE, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"gdb_proxy_stop_count", IOV_GDB_PROXY_STOP_COUNT, 0, 0, IOVT_UINT32, 0 },
+#endif /* GDB_PROXY */
+ {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ {"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32, 0},
+#endif /* PCIE_OOB || PCIE_INB_DW */
+ {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+#ifdef D2H_MINIDUMP
+ {"minidump_override", IOV_MINIDUMP_OVERRIDE, 0, 0, IOVT_UINT32, 0 },
+#endif /* D2H_MINIDUMP */
+#ifdef BCMINTERNAL
+ {"dma_chan_db0", IOV_DMA_CHAN, 0, 0, IOVT_UINT32, 0 },
+ {"hybridfw", IOV_HYBRIDFW, 0, 0, IOVT_BUFFER, 0 },
+#endif /* BCMINTERNAL */
+ {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_BUS_MEM_ACCESS
+ {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+#endif
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ {"fwtrace", IOV_FWTRACE, 0, 0, IOVT_UINT32, 0 },
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef DHD_HP2P
+ {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
+#endif /* DHD_HP2P */
+ {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
+ {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
+#ifdef BCMINTERNAL
+ {"sbreg_64", IOV_SBREG_64, 0, 0, IOVT_BUFFER, sizeof(uint8) },
+#endif /* BCMINTERNAL */
+ {"hp2p_mf_enable", IOV_HP2P_MF_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {NULL, 0, 0, 0, 0, 0 }
+};
+
+#ifdef BCMINTERNAL
+#define MSI_SIM_BUFSIZE 64
+#define PCIE_CFG_MSICAP_OFFSET 0x58
+#define PCIE_CFG_MSIADDR_LOW_OFFSET 0x5C
+#define PCIE_CFG_MSIDATA_OFFSET 0x64
+#define PCIE_CFG_MSI_GENDATA 0x5678
+#define PCIE_CFG_MSICAP_ENABLE_MSI 0x816805
+#define PCIE_CFG_MSICAP_DISABLE_MSI 0x806805
+#endif
+
+#ifdef BCMQT_HW
+#define MAX_READ_TIMEOUT 100 * 1000 /* 100 ms in dongle time */
+#elif defined(NDIS)
+#define MAX_READ_TIMEOUT 5 * 1000 * 1000
+#else
+#define MAX_READ_TIMEOUT 2 * 1000 * 1000
+#endif
+
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND 64
+#endif
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND 64
+#endif
+
+#define DHD_INFORING_BOUND 32
+#define DHD_BTLOGRING_BOUND 32
+
+uint dhd_rxbound = DHD_RXBOUND;
+uint dhd_txbound = DHD_TXBOUND;
+
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+/** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
+static struct dhd_gdb_bus_ops_s bus_ops = {
+ .read_u16 = dhdpcie_bus_rtcm16,
+ .read_u32 = dhdpcie_bus_rtcm32,
+ .write_u32 = dhdpcie_bus_wtcm32,
+};
+#endif /* DEBUGGER || DHD_DSCOPE */
+
+bool
+dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
+{
+ return bus->flr_force_fail;
+}
+
+/**
+ * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
+ * link with the bus driver, in order to look for or await the device.
+ */
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return dhdpcie_bus_register();
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhdpcie_bus_unregister();
+ return;
+}
+
+/** returns a host virtual address */
+uint32 *
+dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
+{
+ REG_UNMAP(addr);
+ return;
+}
+
+/**
+ * retrun H2D Doorbell registers address
+ * use DAR registers instead of enum register for corerev >= 23 (4347B0)
+ */
+static INLINE uint
+dhd_bus_db0_addr_get(struct dhd_bus *bus)
+{
+ uint addr = PCIH2D_MailBox;
+ uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
+
+#ifdef BCMINTERNAL
+ if (bus->dma_chan == 1) {
+ addr = PCIH2D_MailBox_1;
+ dar_addr = DAR_PCIH2D_DB1_0(bus->sih->buscorerev);
+ } else if (bus->dma_chan == 2) {
+ addr = PCIH2D_MailBox_2;
+ dar_addr = DAR_PCIH2D_DB2_0(bus->sih->buscorerev);
+ }
+#endif /* BCMINTERNAL */
+
+ return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
+}
+
+static INLINE uint
+dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
+{
+ return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
+}
+
+static INLINE uint
+dhd_bus_db1_addr_get(struct dhd_bus *bus)
+{
+ return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
+}
+
+static INLINE uint
+dhd_bus_db1_addr_3_get(struct dhd_bus *bus)
+{
+ return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB3_1(bus->sih->buscorerev) : PCIH2D_DB1_3);
+}
+
+static void
+dhd_init_pwr_req_lock(dhd_bus_t *bus)
+{
+ if (!bus->pwr_req_lock) {
+ bus->pwr_req_lock = osl_spin_lock_init(bus->osh);
+ }
+}
+
+static void
+dhd_deinit_pwr_req_lock(dhd_bus_t *bus)
+{
+ if (bus->pwr_req_lock) {
+ osl_spin_lock_deinit(bus->osh, bus->pwr_req_lock);
+ bus->pwr_req_lock = NULL;
+ }
+}
+
+#ifdef PCIE_INB_DW
+void
+dhdpcie_set_dongle_deepsleep(dhd_bus_t *bus, bool val)
+{
+ ulong flags_ds;
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_DONGLE_DS_LOCK(bus->dongle_ds_lock, flags_ds);
+ bus->dongle_in_deepsleep = val;
+ DHD_BUS_DONGLE_DS_UNLOCK(bus->dongle_ds_lock, flags_ds);
+ }
+}
+void
+dhd_init_dongle_ds_lock(dhd_bus_t *bus)
+{
+ if (!bus->dongle_ds_lock) {
+ bus->dongle_ds_lock = osl_spin_lock_init(bus->osh);
+ }
+}
+void
+dhd_deinit_dongle_ds_lock(dhd_bus_t *bus)
+{
+ if (bus->dongle_ds_lock) {
+ osl_spin_lock_deinit(bus->osh, bus->dongle_ds_lock);
+ bus->dongle_ds_lock = NULL;
+ }
+}
+#endif /* PCIE_INB_DW */
+
+/*
+ * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
+ */
+static INLINE void
+dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable)
+{
+ if (enable) {
+ si_corereg(bus->sih, bus->sih->buscoreidx, offset,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
+ } else {
+ si_corereg(bus->sih, bus->sih->buscoreidx, offset,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
+ }
+}
+
+static INLINE void
+_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
+{
+ uint mask;
+
+ /*
+ * If multiple de-asserts, decrement ref and return
+ * Clear power request when only one pending
+ * so initial request is not removed unexpectedly
+ */
+ if (bus->pwr_req_ref > 1) {
+ bus->pwr_req_ref--;
+ return;
+ }
+
+ ASSERT(bus->pwr_req_ref == 1);
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
+ mask = SRPWR_DMN1_ARMBPSD_MASK;
+ } else {
+ mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
+ }
+
+ si_srpwr_request(bus->sih, mask, 0);
+ bus->pwr_req_ref = 0;
+}
+
+static INLINE void
+dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
+{
+ unsigned long flags = 0;
+
+ DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
+ _dhd_bus_pcie_pwr_req_clear_cmn(bus);
+ DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
+}
+
+static INLINE void
+dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
+{
+ _dhd_bus_pcie_pwr_req_clear_cmn(bus);
+}
+
+static INLINE void
+_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
+{
+ uint mask, val;
+
+ /* If multiple request entries, increment reference and return */
+ if (bus->pwr_req_ref > 0) {
+ bus->pwr_req_ref++;
+ return;
+ }
+
+ ASSERT(bus->pwr_req_ref == 0);
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
+ mask = SRPWR_DMN1_ARMBPSD_MASK;
+ val = SRPWR_DMN1_ARMBPSD_MASK;
+ } else {
+ mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
+ val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
+ }
+
+ si_srpwr_request(bus->sih, mask, val);
+
+ bus->pwr_req_ref = 1;
+}
+
+static INLINE void
+dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
+{
+ unsigned long flags = 0;
+
+ DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
+ _dhd_bus_pcie_pwr_req_cmn(bus);
+ DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
+}
+
+static INLINE void
+_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
+{
+ uint mask, val;
+
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
+ val = SRPWR_DMN_ALL_MASK(bus->sih);
+
+ si_srpwr_request(bus->sih, mask, val);
+}
+
+void
+dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
+{
+ unsigned long flags = 0;
+
+ /*
+ * Few corerevs need the power domain to be active for FLR.
+ * Return if the pwr req is not applicable for the corerev
+ */
+ if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) {
+ return;
+ }
+
+ DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
+ _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
+ DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
+}
+
+static INLINE void
+_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
+{
+ uint mask;
+
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
+
+ si_srpwr_request(bus->sih, mask, 0);
+}
+
+void
+dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
+{
+ unsigned long flags = 0;
+
+ /* return if the pwr clear is not applicable for the corerev */
+ if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) {
+ return;
+ }
+ DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
+ _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
+ DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
+}
+
+static INLINE void
+dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
+{
+ _dhd_bus_pcie_pwr_req_cmn(bus);
+}
+
+bool
+dhdpcie_chip_support_msi(dhd_bus_t *bus)
+{
+ /* XXX For chips with buscorerev <= 14 intstatus
+ * is not getting cleared from these firmwares.
+ * Either host can read and clear intstatus for these
+ * or not enable MSI at all.
+ * Here option 2 of not enabling MSI is choosen.
+ * Also for hw4 chips, msi is not enabled.
+ */
+ DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n",
+ __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
+ if (bus->sih->buscorerev <= 14 ||
+ si_chipid(bus->sih) == BCM4389_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4385_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4375_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4376_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4362_CHIP_ID ||
+ si_chipid(bus->sih) == BCM43751_CHIP_ID ||
+ si_chipid(bus->sih) == BCM43752_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4361_CHIP_ID ||
+ si_chipid(bus->sih) == BCM4359_CHIP_ID) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+}
+
+/**
+ * Called once for each hardware (dongle) instance that this DHD manages.
+ *
+ * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
+ * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
+ * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
+ *
+ * 'tcm' is the *host* virtual address at which tcm is mapped.
+ */
+int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
+ volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter)
+{
+ dhd_bus_t *bus = NULL;
+ int ret = BCME_OK;
+
+ DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+ do {
+ if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ ret = BCME_NORESOURCE;
+ break;
+ }
+ bus->bus = adapter->bus_type;
+ bus->bus_num = adapter->bus_num;
+ bus->slot_num = adapter->slot_num;
+
+ bus->regs = regs;
+ bus->tcm = tcm;
+ bus->osh = osh;
+#ifndef NDIS
+ /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
+ bus->dev = (struct pci_dev *)pci_dev;
+#endif
+#ifdef DHD_EFI
+ bus->pcie_dev = pci_dev;
+#endif
+
+ dll_init(&bus->flowring_active_list);
+#ifdef IDLE_TX_FLOW_MGMT
+ bus->active_list_last_process_ts = OSL_SYSUPTIME();
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+ /* Enable the Device stuck detection feature by default */
+ bus->dev_tx_stuck_monitor = TRUE;
+ bus->device_tx_stuck_check = OSL_SYSUPTIME();
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+ /* Attach pcie shared structure */
+ if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
+ DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
+ ret = BCME_NORESOURCE;
+ break;
+ }
+
+ /* dhd_common_init(osh); */
+
+ if (dhdpcie_dongle_attach(bus)) {
+ DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
+ ret = BCME_NOTREADY;
+ break;
+ }
+
+ /* software resources */
+ if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+ ret = BCME_NORESOURCE;
+ break;
+ }
+#if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME)
+ dhd_conf_get_otp(bus->dhd, bus->sih);
+#endif
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->dhd->hostrdy_after_init = TRUE;
+ bus->db1_for_mb = TRUE;
+ bus->dhd->hang_report = TRUE;
+ bus->use_mailbox = FALSE;
+ bus->use_d0_inform = FALSE;
+ bus->intr_enabled = FALSE;
+ bus->flr_force_fail = FALSE;
+ /* update the dma indices if set through module parameter. */
+ if (dma_ring_indices != 0) {
+ dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
+ }
+ /* update h2d phase support if set through module parameter */
+ bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
+ /* update force trap on bad phase if set through module parameter */
+ bus->dhd->force_dongletrap_on_bad_h2d_phase =
+ force_trap_bad_h2d_phase ? TRUE : FALSE;
+#ifdef BTLOG
+ bus->dhd->bt_logging_enabled = TRUE;
+#endif
+#ifdef IDLE_TX_FLOW_MGMT
+ bus->enable_idle_flowring_mgmt = FALSE;
+#endif /* IDLE_TX_FLOW_MGMT */
+ bus->irq_registered = FALSE;
+
+#ifdef DHD_MSI_SUPPORT
+ bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
+ PCIE_MSI : PCIE_INTX;
+ if (bus->dhd->conf->d2h_intr_method >= 0)
+ bus->d2h_intr_method = bus->dhd->conf->d2h_intr_method;
+#else
+ bus->d2h_intr_method = PCIE_INTX;
+#endif /* DHD_MSI_SUPPORT */
+
+ /* For MSI, use host irq based control and for INTX use D2H INTMASK based control */
+ if (bus->d2h_intr_method == PCIE_MSI) {
+ bus->d2h_intr_control = PCIE_HOST_IRQ_CTRL;
+ } else {
+ bus->d2h_intr_control = PCIE_D2H_INTMASK_CTRL;
+ }
+
+#ifdef DHD_HP2P
+ bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
+ bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
+#endif /* DHD_HP2P */
+
+ DHD_TRACE(("%s: EXIT SUCCESS\n",
+ __FUNCTION__));
+ g_dhd_bus = bus;
+ *bus_ptr = bus;
+ return ret;
+ } while (0);
+
+ DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
+#ifdef DHD_EFI
+ /* for EFI even if there is an error, load still succeeds
+ * so 'bus' should not be freed here, it is freed during unload
+ */
+ if (bus) {
+ *bus_ptr = bus;
+ }
+#else
+ if (bus && bus->pcie_sh) {
+ MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+ }
+
+ if (bus) {
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+ }
+#endif /* DHD_EFI */
+
+ return ret;
+}
+
+bool
+dhd_bus_skip_clm(dhd_pub_t *dhdp)
+{
+ switch (dhd_bus_chip_id(dhdp)) {
+ case BCM4369_CHIP_ID:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+ ASSERT(bus);
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+ return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+/** Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->sih->chip;
+}
+
+/** Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->sih->chiprev;
+}
+
+/** Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->sih->chippkg;
+}
+
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+ *bus_type = bus->bus;
+ *bus_num = bus->bus_num;
+ *slot_num = bus->slot_num;
+ return 0;
+}
+
+/** Conduct Loopback test */
+int
+dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
+{
+ dma_xfer_info_t dmaxfer_lpbk;
+ int ret = BCME_OK;
+
+#define PCIE_DMAXFER_LPBK_LENGTH 4096
+ memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
+ dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
+ dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
+ dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
+ dmaxfer_lpbk.type = type;
+ dmaxfer_lpbk.should_wait = TRUE;
+
+ ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
+ (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
+ if (ret < 0) {
+ DHD_ERROR(("failed to start PCIe Loopback Test!!! "
+ "Type:%d Reason:%d\n", type, ret));
+ return ret;
+ }
+
+ if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
+ DHD_ERROR(("failed to check PCIe Loopback Test!!! "
+ "Type:%d Status:%d Error code:%d\n", type,
+ dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
+ ret = BCME_ERROR;
+ } else {
+ DHD_ERROR(("successful to check PCIe Loopback Test"
+ " Type:%d\n", type));
+ }
+#undef PCIE_DMAXFER_LPBK_LENGTH
+
+ return ret;
+}
+
+/* Check if there is DPC scheduling errors */
+bool
+dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bool sched_err;
+
+ if (bus->dpc_entry_time < bus->isr_exit_time) {
+ /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
+ sched_err = TRUE;
+ } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
+ /* Kernel doesn't schedule the DPC after DHD tries to reschedule
+ * the DPC due to pending work items to be processed.
+ */
+ sched_err = TRUE;
+ } else {
+ sched_err = FALSE;
+ }
+
+ if (sched_err) {
+ /* print out minimum timestamp info */
+ DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT
+ " dpc_entry_time="SEC_USEC_FMT
+ "\ndpc_exit_time="SEC_USEC_FMT
+ " isr_sched_dpc_time="SEC_USEC_FMT
+ " resched_dpc_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time),
+ GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->dpc_exit_time),
+ GET_SEC_USEC(bus->isr_sched_dpc_time),
+ GET_SEC_USEC(bus->resched_dpc_time)));
+ }
+
+ return sched_err;
+}
+
+/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
+uint32
+dhdpcie_bus_intstatus(dhd_bus_t *bus)
+{
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+
+ if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
+#ifdef DHD_EFI
+ DHD_INFO(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
+#else
+ DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
+#endif /* !DHD_EFI */
+ return intstatus;
+ }
+ /* XXX: check for PCIE Gen2 also */
+ if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+ (bus->sih->buscorerev == 2)) {
+ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
+ intstatus &= I_MB;
+ } else {
+ /* this is a PCIE core register..not a config register... */
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
+
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_int, intstatus, FALSE);
+#endif /* defined(DHD_MMIO_TRACE) */
+
+ /* this is a PCIE core register..not a config register... */
+ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
+ /* Is device removed. intstatus & intmask read 0xffffffff */
+ if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
+ DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
+ DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
+ __FUNCTION__, intstatus, intmask));
+ bus->is_linkdown = TRUE;
+ dhd_pcie_debug_info_dump(bus->dhd);
+#ifdef CUSTOMER_HW4_DEBUG
+#if defined(OEM_ANDROID)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ copy_hang_info_linkdown(bus->dhd);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ dhd_os_send_hang_message(bus->dhd);
+#endif /* OEM_ANDROID */
+#endif /* CUSTOMER_HW4_DEBUG */
+ return intstatus;
+ }
+
+#ifndef DHD_READ_INTSTATUS_IN_DPC
+ intstatus &= intmask;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, intmask, FALSE);
+#endif /* defined(DHD_MMIO_TRACE) */
+
+ /* XXX: define the mask in a .h file */
+ /*
+ * The fourth argument to si_corereg is the "mask" fields of the register to update
+ * and the fifth field is the "value" to update. Now if we are interested in only
+ * few fields of the "mask" bit map, we should not be writing back what we read
+ * By doing so, we might clear/ack interrupts that are not handled yet.
+ */
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_int, intstatus, TRUE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
+ intstatus);
+
+ intstatus &= bus->def_intmask;
+ }
+
+ return intstatus;
+}
+
+void
+dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
+{
+ dhd_bus_t *bus = dhd->bus;
+ int ret;
+
+ /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
+ */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+
+ /* Sleep for 1 seconds so that any AXI timeout
+ * if running on ALP clock also will be captured
+ */
+ OSL_SLEEP(1000);
+
+ /* reset backplane and cto,
+ * then access through pcie is recovered.
+ */
+ ret = dhdpcie_cto_error_recovery(bus);
+ if (!ret) {
+ /* Waiting for backplane reset */
+ OSL_SLEEP(10);
+ /* Dump debug Info */
+ dhd_prot_debug_info_print(bus->dhd);
+ /* Dump console buffer */
+ dhd_bus_dump_console_buffer(bus);
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump or write to a file */
+ if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+ DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
+ bus->dhd->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+#ifdef OEM_ANDROID
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bus->is_linkdown = TRUE;
+ bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
+ /* Send HANG event */
+ dhd_os_send_hang_message(bus->dhd);
+#endif /* OEM_ANDROID */
+}
+
+void
+dhd_bus_dump_imp_cfg_registers(struct dhd_bus *bus)
+{
+ uint32 status_cmd = dhd_pcie_config_read(bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
+ uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32));
+ uint32 base_addr0 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR0, sizeof(uint32));
+ uint32 base_addr1 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR1, sizeof(uint32));
+ uint32 linkctl = dhd_pcie_config_read(bus, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
+ uint32 l1ssctrl =
+ dhd_pcie_config_read(bus, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32));
+ uint32 devctl = dhd_pcie_config_read(bus, PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
+ uint32 devctl2 = dhd_pcie_config_read(bus, PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
+
+ DHD_ERROR(("status_cmd(0x%x)=0x%x, pmcsr(0x%x)=0x%x "
+ "base_addr0(0x%x)=0x%x base_addr1(0x%x)=0x%x "
+ "linkctl(0x%x)=0x%x l1ssctrl(0x%x)=0x%x "
+ "devctl(0x%x)=0x%x devctl2(0x%x)=0x%x \n",
+ PCIECFGREG_STATUS_CMD, status_cmd,
+ PCIE_CFG_PMCSR, pmcsr,
+ PCIECFGREG_BASEADDR0, base_addr0,
+ PCIECFGREG_BASEADDR1, base_addr1,
+ PCIECFGREG_LINK_STATUS_CTRL, linkctl,
+ PCIECFGREG_PML1_SUB_CTRL1, l1ssctrl,
+ PCIECFGREG_DEV_STATUS_CTRL, devctl,
+ PCIECFGGEN_DEV_STATUS_CTRL2, devctl2));
+}
+
+/**
+ * Name: dhdpcie_bus_isr
+ * Parameters:
+ * 1: IN int irq -- interrupt vector
+ * 2: IN void *arg -- handle to private data structure
+ * Return value:
+ * Status (TRUE or FALSE)
+ *
+ * Description:
+ * Interrupt Service routine checks for the status register,
+ * disable interrupt and queue DPC if mail box interrupts are raised.
+ */
+int32
+dhdpcie_bus_isr(dhd_bus_t *bus)
+{
+ uint32 intstatus = 0;
+
+ do {
+ DHD_INTR(("%s: Enter\n", __FUNCTION__));
+ /* verify argument */
+ if (!bus) {
+ DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
+ break;
+ }
+
+ if (bus->dhd->dongle_reset) {
+ DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
+ break;
+ }
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
+ break;
+ }
+
+ /* avoid processing of interrupts until msgbuf prot is inited */
+ if (!bus->intr_enabled) {
+ DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
+ break;
+ }
+
+ if (PCIECTO_ENAB(bus)) {
+ /* read pci_intstatus */
+ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
+
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
+ __FUNCTION__));
+ bus->is_linkdown = 1;
+ dhdpcie_disable_irq_nosync(bus);
+ dhd_prot_debug_info_print(bus->dhd);
+ break;
+ }
+
+ if (intstatus & PCI_CTO_INT_MASK) {
+ DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
+ "intstat=0x%x enab=%d\n", __FUNCTION__,
+ intstatus, bus->cto_enable));
+ bus->cto_triggered = 1;
+ dhd_bus_dump_imp_cfg_registers(bus);
+ /*
+ * DAR still accessible
+ */
+ dhd_bus_dump_dar_registers(bus);
+
+ /* Disable further PCIe interrupts */
+#ifndef NDIS
+ dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
+#endif
+ /* Stop Tx flow */
+ dhd_bus_stop_queue(bus);
+
+ /* Schedule CTO recovery */
+ dhd_schedule_cto_recovery(bus->dhd);
+
+ return TRUE;
+ }
+ }
+
+ if (bus->d2h_intr_method == PCIE_MSI &&
+ !dhd_conf_legacy_msi_chip(bus->dhd)) {
+ /* For MSI, as intstatus is cleared by firmware, no need to read */
+ goto skip_intstatus_read;
+ }
+
+#ifndef DHD_READ_INTSTATUS_IN_DPC
+ intstatus = dhdpcie_bus_intstatus(bus);
+
+ /* Check if the interrupt is ours or not */
+ if (intstatus == 0) {
+ bus->non_ours_irq_count++;
+ bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
+ break;
+ }
+
+ /* save the intstatus */
+ /* read interrupt status register!! Status bits will be cleared in DPC !! */
+ bus->intstatus = intstatus;
+
+ /* return error for 0xFFFFFFFF */
+ if (intstatus == (uint32)-1) {
+ DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
+ __FUNCTION__, intstatus));
+ bus->is_linkdown = 1;
+ dhdpcie_disable_irq_nosync(bus);
+ break;
+ }
+
+skip_intstatus_read:
+ /* Overall operation:
+ * - Mask further interrupts
+ * - Read/ack intstatus
+ * - Take action based on bits and state
+ * - Reenable interrupts (as per state)
+ */
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+ bus->ipend = TRUE;
+
+ bus->isr_intr_disable_count++;
+
+ if (bus->d2h_intr_control == PCIE_D2H_INTMASK_CTRL) {
+ dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
+ } else {
+ /* For Linux, Macos etc (otherthan NDIS) instead of disabling
+ * dongle interrupt by clearing the IntMask, disable directly
+ * interrupt from the host side, so that host will not recieve
+ * any interrupts at all, even though dongle raises interrupts
+ */
+ dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
+ }
+
+ bus->intdis = TRUE;
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+ if (bus->dhd->dma_h2d_ring_upd_support && bus->dhd->dma_d2h_ring_upd_support &&
+ (bus->dhd->ring_attached == TRUE)) {
+ dhd_bus_flow_ring_status_isr_trace(bus->dhd);
+ }
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+#if defined(PCIE_ISR_THREAD)
+
+ DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ while (dhd_bus_dpc(bus));
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+ bus->dpc_sched = TRUE;
+ bus->isr_sched_dpc_time = OSL_LOCALTIME_NS();
+#ifndef NDIS
+ dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
+#endif /* !NDIS */
+#endif /* defined(SDIO_ISR_THREAD) */
+
+ DHD_INTR(("%s: Exit Success DPC Queued\n", __FUNCTION__));
+ return TRUE;
+
+ } while (0);
+
+ DHD_INTR(("%s: Exit Failure\n", __FUNCTION__));
+ return FALSE;
+}
+
+int
+dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
+{
+ uint32 cur_state = 0;
+ uint32 pm_csr = 0;
+ osl_t *osh = bus->osh;
+
+ pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
+ cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
+
+ if (cur_state == state) {
+ DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
+ return BCME_OK;
+ }
+
+ if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
+ return BCME_ERROR;
+
+ /* Validate the state transition
+ * if already in a lower power state, return error
+ */
+ if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
+ cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
+ cur_state > state) {
+ DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
+ pm_csr |= state;
+
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
+
+ /* need to wait for the specified mandatory pcie power transition delay time */
+ if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
+ cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
+ OSL_DELAY(DHDPCIE_PM_D3_DELAY);
+ else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
+ cur_state == PCIECFGREG_PM_CSR_STATE_D2)
+ OSL_DELAY(DHDPCIE_PM_D2_DELAY);
+
+ /* read back the power state and verify */
+ pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
+ cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
+ if (cur_state != state) {
+ DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
+ __FUNCTION__, cur_state));
+ return BCME_ERROR;
+ } else {
+ DHD_ERROR(("%s: power transition to %u success \n",
+ __FUNCTION__, cur_state));
+ }
+
+ return BCME_OK;
+}
+
+int
+dhdpcie_config_check(dhd_bus_t *bus)
+{
+ uint32 i, val;
+ int ret = BCME_ERROR;
+
+ for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((val & 0xFFFF) == VENDOR_BROADCOM) {
+ ret = BCME_OK;
+ break;
+ }
+ OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
+ }
+
+ return ret;
+}
+
+int
+dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
+{
+ uint32 i;
+ osl_t *osh = bus->osh;
+
+ if (BCME_OK != dhdpcie_config_check(bus)) {
+ return BCME_ERROR;
+ }
+
+ for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
+ OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
+ }
+ OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
+
+ if (restore_pmcsr)
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
+ sizeof(uint32), bus->saved_config.pmcsr);
+
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
+ bus->saved_config.msi_addr0);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
+ sizeof(uint32), bus->saved_config.msi_addr1);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
+ sizeof(uint32), bus->saved_config.msi_data);
+
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
+ sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
+ sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
+ sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
+ sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
+
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
+ sizeof(uint32), bus->saved_config.l1pm0);
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
+ sizeof(uint32), bus->saved_config.l1pm1);
+
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
+ bus->saved_config.bar0_win);
+ dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
+
+ return BCME_OK;
+}
+
+int
+dhdpcie_config_save(dhd_bus_t *bus)
+{
+ uint32 i;
+ osl_t *osh = bus->osh;
+
+ if (BCME_OK != dhdpcie_config_check(bus)) {
+ return BCME_ERROR;
+ }
+
+ for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
+ bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
+ }
+
+ bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
+
+ bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
+ sizeof(uint32));
+ bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
+ sizeof(uint32));
+ bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
+ sizeof(uint32));
+ bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
+ sizeof(uint32));
+
+ bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
+ PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
+ bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
+ PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
+ bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
+ PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
+ bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
+ PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
+
+ bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
+ sizeof(uint32));
+ bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
+ sizeof(uint32));
+
+ bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
+ sizeof(uint32));
+ bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
+ sizeof(uint32));
+
+ return BCME_OK;
+}
+
+#ifdef CONFIG_ARCH_EXYNOS
+dhd_pub_t *link_recovery = NULL;
+#endif /* CONFIG_ARCH_EXYNOS */
+
+static void
+dhdpcie_bus_intr_init(dhd_bus_t *bus)
+{
+ uint buscorerev = bus->sih->buscorerev;
+ bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
+ bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
+ bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
+ bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
+ if (buscorerev < 64) {
+ bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
+ }
+}
+
+static void
+dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
+{
+ uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
+ (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
+ pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
+}
+
+void
+dhdpcie_dongle_reset(dhd_bus_t *bus)
+{
+
+ /* if the pcie link is down, watchdog reset
+ * should not be done, as it may hang
+ */
+ if (bus->is_linkdown) {
+ return;
+ }
+
+ /* Currently BP reset using CFG reg is done only for android platforms */
+#ifdef DHD_USE_BP_RESET_SPROM
+ /* This is for architectures that does NOT control subsystem reset */
+ (void)dhd_bus_cfg_sprom_ctrl_bp_reset(bus);
+ return;
+#elif defined(DHD_USE_BP_RESET_SS_CTRL)
+ /* This is for architectures that supports Subsystem Control */
+ (void)dhd_bus_cfg_ss_ctrl_bp_reset(bus);
+ return;
+#else
+
+#ifdef BCMQT_HW
+ /* flr takes a long time on qt and is only required when testing with BT
+ * included database. Fall back to watchdog reset by default and only perform
+ * flr if enabled through module parameter
+ */
+ if (qt_flr_reset && (dhd_bus_perform_flr(bus, FALSE) != BCME_UNSUPPORTED)) {
+ return;
+ }
+#else
+ /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
+ if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED)
+#endif
+ {
+ /* Legacy chipcommon watchdog reset */
+ dhdpcie_cc_watchdog_reset(bus);
+ }
+ return;
+#endif /* DHD_USE_BP_RESET */
+}
+
+#ifdef BCMQT_HW
+/* Calculate dongle/host clock ratio for QT so the waiting period in host driver can be scaled
+ * properly. The dongle uses ALP clock by default which can't be read directly. But ILP and
+ * ALP clocks are scaled disproportionally in QT. So DHD must know the preset crystal frequency
+ * for ALP clock in order to calculate the scale ratio. The logic below takes 3 sources of xtal
+ * frequency as following priority:
+ * 1 module parameter
+ * 2 nvram "xtalfreq" line (not available for the first dongle reset)
+ * 3 Hard coded 37.4MHz
+ * If the QT simulation of a chip uses a different frequency xtal other than 37.4MHz, it's
+ * strongly recommended to expend the hard coded value to per chip basis or override with module
+ * parameter.
+ */
+#define XTAL_FREQ_37M4 37400000u
+void dhdpcie_htclkratio_cal(dhd_bus_t *bus)
+{
+ uint cur_coreidx, pmu_idx;
+ uint32 ilp_start, ilp_tick, xtal_ratio;
+ int xtalfreq = 0;
+
+ /* If a larger than 1 htclkratio is set through module parameter, use it directly */
+ if (htclkratio > 1) {
+ goto exit;
+ }
+
+ /* Store current core id */
+ cur_coreidx = si_coreidx(bus->sih);
+ if (!si_setcore(bus->sih, PMU_CORE_ID, 0)) {
+ htclkratio = 2000;
+ goto exit;
+ }
+
+ pmu_idx = si_coreidx(bus->sih);
+
+ /* Count IPL ticks in 1 second of host domain clock */
+ ilp_start = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmutimer), 0, 0);
+ osl_sleep(1000);
+ ilp_tick = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmutimer), 0, 0);
+ /* -1 to compensate the incomplete cycle at the beginning */
+ ilp_tick -= ilp_start - 1;
+
+ /* Get xtal vs ILP ratio from XtalFreqRatio(0x66c) */
+ xtal_ratio = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmu_xtalfreq), 0, 0);
+ xtal_ratio = (xtal_ratio & PMU_XTALFREQ_REG_ILPCTR_MASK) / 4;
+
+ /* Go back to original core */
+ si_setcoreidx(bus->sih, cur_coreidx);
+
+ /* Use module parameter if one is provided. Otherwise use default 37.4MHz */
+ if (dngl_xtalfreq) {
+ xtalfreq = dngl_xtalfreq;
+ } else {
+ xtalfreq = XTAL_FREQ_37M4;
+ }
+
+ /* htclkratio = xtalfreq / QT_XTAL_FREQ
+ * = xtalfreq / (ilp_tick * xtal_ratio)
+ */
+ htclkratio = xtalfreq / (ilp_tick * xtal_ratio);
+ bus->xtalfreq = xtalfreq;
+ bus->ilp_tick = ilp_tick;
+ bus->xtal_ratio = xtal_ratio;
+
+exit:
+ DHD_ERROR(("Dongle/Host clock ratio %u with %dHz xtal frequency\n", htclkratio, xtalfreq));
+}
+
+/* Re-calculate htclkratio if nvram provides a different xtalfreq */
+void dhdpcie_htclkratio_recal(dhd_bus_t *bus, char *nvram, uint nvram_sz)
+{
+ char *freq_c = NULL;
+ uint len, p;
+ int xtalfreq = 0;
+
+ /* Do not re-calculate if xtalfreq is overridden by module parameter */
+ if (dngl_xtalfreq)
+ return;
+
+ /* look for "xtalfreq=xxxx" line in nvram */
+ len = strlen("xtalfreq");
+ for (p = 0; p < (nvram_sz - len) && nvram[p]; ) {
+ if ((bcmp(&nvram[p], "xtalfreq", len) == 0) && (nvram[p + len] == '=')) {
+ freq_c = &nvram[p + len + 1u];
+ break;
+ }
+ /* jump to next line */
+ while (nvram[p++]);
+ }
+
+ if (freq_c) {
+ xtalfreq = bcm_strtoul(freq_c, NULL, 0);
+ if (xtalfreq > (INT_MAX / 1000u)) {
+ DHD_ERROR(("xtalfreq %d in nvram is too big\n", xtalfreq));
+ xtalfreq = 0;
+ }
+ xtalfreq *= 1000;
+ }
+
+ /* Skip recalculation if:
+ * nvram doesn't provide "xtalfreq", or
+ * first calculation was not performed because module parameter override, or
+ * xtalfreq in nvram is the same as the one used in first calculation
+ */
+ if (xtalfreq == 0 || bus->xtalfreq == 0 || xtalfreq == bus->xtalfreq) {
+ return;
+ }
+
+ /* Print out a error message here. Even if the ratio is corrected with nvram setting, dongle
+ * reset has been performed before DHD has access to NVRAM. Insufficient waiting period
+ * for reset might cause unexpected behavior.
+ */
+ DHD_ERROR(("Re-calculating htclkratio because nvram xtalfreq %dHz is different from %dHz\n",
+ xtalfreq, bus->xtalfreq));
+
+ htclkratio = xtalfreq / (bus->ilp_tick * bus->xtal_ratio);
+ bus->xtalfreq = xtalfreq;
+
+ DHD_ERROR(("Corrected dongle/Host clock ratio %u with %dHz xtal frequency\n",
+ htclkratio, xtalfreq));
+}
+#endif /* BCMQT_HW */
+
+static bool
+is_bmpu_supported(dhd_bus_t *bus)
+{
+ if (BCM4378_CHIP(bus->sih->chip) ||
+ BCM4376_CHIP(bus->sih->chip) ||
+ BCM4387_CHIP(bus->sih->chip) ||
+ BCM4385_CHIP(bus->sih->chip)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+#define CHIP_COMMON_SCR_DHD_TO_BL_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_DHD_TO_BL)
+#define CHIP_COMMON_SCR_BL_TO_DHD_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_BL_TO_DHD)
+void
+dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
+{
+ volatile uint32 *cr4_regs;
+ uint val = 0;
+
+ if (is_bmpu_supported(bus) == FALSE) {
+ return;
+ }
+
+ /* reset to default values dhd_to_bl and bl_to_dhd regs */
+ (void)serialized_backplane_access(bus, CHIP_COMMON_SCR_DHD_TO_BL_ADDR(bus->sih),
+ sizeof(val), &val, FALSE);
+ (void)serialized_backplane_access(bus, CHIP_COMMON_SCR_BL_TO_DHD_ADDR(bus->sih),
+ sizeof(val), &val, FALSE);
+
+ cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+ if (cr4_regs == NULL) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ return;
+ }
+ if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
+ /* bus mpu is supported */
+ W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
+ }
+}
+
+static bool
+dhdpcie_dongle_attach(dhd_bus_t *bus)
+{
+ osl_t *osh = bus->osh;
+ volatile void *regsva = (volatile void*)bus->regs;
+ uint16 devid;
+ uint32 val;
+ sbpcieregs_t *sbpcieregs;
+ bool dongle_reset_needed;
+ uint16 chipid;
+
+ BCM_REFERENCE(chipid);
+
+ DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+ /* Configure CTO Prevention functionality */
+#if defined(BCMFPGA_HW) || defined(BCMQT_HW)
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
+#else
+#if defined(BCMPCIE_CTO_PREVENTION)
+ chipid = dhd_get_chipid(bus);
+
+ if (BCM4349_CHIP(chipid) || BCM4350_CHIP(chipid) || BCM4345_CHIP(chipid)) {
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
+ } else {
+ DHD_ERROR(("Enable CTO\n"));
+ bus->cto_enable = TRUE;
+ }
+#else
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
+#endif /* BCMPCIE_CTO_PREVENTION */
+#endif /* BCMFPGA_HW || BCMQT_HW */
+
+ if (PCIECTO_ENAB(bus)) {
+ dhdpcie_cto_init(bus, TRUE);
+ }
+
+#ifdef CONFIG_ARCH_EXYNOS
+ link_recovery = bus->dhd;
+#endif /* CONFIG_ARCH_EXYNOS */
+
+ dhd_init_pwr_req_lock(bus);
+ dhd_init_bus_lp_state_lock(bus);
+ dhd_init_backplane_access_lock(bus);
+
+ bus->alp_only = TRUE;
+ bus->sih = NULL;
+
+ /* Checking PCIe bus status with reading configuration space */
+ val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
+ if ((val & 0xFFFF) != VENDOR_BROADCOM) {
+ DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
+ goto fail;
+ }
+ devid = (val >> 16) & 0xFFFF;
+ bus->cl_devid = devid;
+
+ /* Set bar0 window to si_enum_base */
+ dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
+
+ /*
+ * Checking PCI_SPROM_CONTROL register for preventing invalid address access
+ * due to switch address space from PCI_BUS to SI_BUS.
+ */
+ val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
+ if (val == 0xffffffff) {
+ DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
+ goto fail;
+ }
+
+#if defined(DHD_EFI) || defined(NDIS)
+ /* Save good copy of PCIe config space */
+ if (BCME_OK != dhdpcie_config_save(bus)) {
+ DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__));
+ goto fail;
+ }
+#endif /* DHD_EFI */
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
+ /*
+ * HW JIRA - CRWLPCIEGEN2-672
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
+ * fixed in REV68
+ */
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ dhdpcie_ssreset_dis_enum_rst(bus);
+ }
+
+ /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
+ * dhdpcie_bus_release_dongle() --> si_detach()
+ * dhdpcie_dongle_attach() --> si_attach()
+ */
+ bus->pwr_req_ref = 0;
+ }
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_nolock(bus);
+ }
+
+ /* Get info on the ARM and SOCRAM cores... */
+ /* Should really be qualified by device id */
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ bus->coreid = si_coreid(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* CA7 requires coherent bits on */
+ if (bus->coreid == ARMCA7_CORE_ID) {
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
+ (val | PCIE_BARCOHERENTACCEN_MASK));
+ }
+
+ /* EFI requirement - stop driver load if FW is already running
+ * need to do this here before pcie_watchdog_reset, because
+ * pcie_watchdog_reset will put the ARM back into halt state
+ */
+ if (!dhdpcie_is_arm_halted(bus)) {
+ DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+ BCM_REFERENCE(dongle_reset_needed);
+
+ /* For inbuilt drivers pcie clk req will be done by RC,
+ * so do not do clkreq from dhd
+ */
+#if defined(linux) || defined(LINUX)
+ if (dhd_download_fw_on_driverload)
+#endif /* linux || LINUX */
+ {
+ /* Enable CLKREQ# */
+ dhdpcie_clkreq(bus->osh, 1, 1);
+ }
+
+ /* Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
+#ifdef BCMQT_HW
+ dhdpcie_htclkratio_cal(bus);
+#endif /* BCMQT_HW */
+
+ /*
+ * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
+ * without checking dongle_isolation flag, but if it is called via some other path
+ * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
+ * be called.
+ */
+ if (bus->dhd == NULL) {
+ /* dhd_attach not yet happened, do dongle reset */
+#ifdef DHD_SKIP_DONGLE_RESET_IN_ATTACH
+ dongle_reset_needed = FALSE;
+#else
+ dongle_reset_needed = TRUE;
+#endif /* DHD_SKIP_DONGLE_RESET_IN_ATTACH */
+ } else {
+ /* Based on dongle_isolationflag, reset dongle */
+ dongle_reset_needed = !(bus->dhd->dongle_isolation);
+ }
+
+ /* Fix for FLR reset specific to 4397a0. Write a value 0x1E in PMU CC reg18 */
+ if (BCM4397_CHIP(dhd_get_chipid(bus)) && (bus->sih->chiprev == 0)) {
+ uint origidx = 0;
+
+ origidx = si_coreidx(bus->sih);
+ pmu_corereg(bus->sih, SI_CC_IDX, chipcontrol_addr, ~0, PMU_CHIPCTL18);
+ pmu_corereg(bus->sih, SI_CC_IDX, chipcontrol_data,
+ (PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN | PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK),
+ (PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN |
+ ((PMU_CC18_WL_P_CHAN_TIMER_SEL_8ms << PMU_CC18_WL_P_CHAN_TIMER_SEL_OFF) &
+ PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK)));
+ si_setcore(bus->sih, origidx, 0);
+ }
+
+ /*
+ * Issue dongle to reset all the cores on the chip - similar to rmmod dhd
+ * This is required to avoid spurious interrupts to the Host and bring back
+ * dongle to a sane state (on host soft-reboot / watchdog-reboot).
+ */
+ if (dongle_reset_needed) {
+ dhdpcie_dongle_reset(bus);
+ }
+
+ /* need to set the force_bt_quiesce flag here
+ * before calling dhdpcie_dongle_flr_or_pwr_toggle
+ */
+ bus->force_bt_quiesce = TRUE;
+ /*
+ * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
+ * So don't need BT quiesce.
+ */
+ if (bus->sih->buscorerev >= 66) {
+ bus->force_bt_quiesce = FALSE;
+ }
+
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
+
+ dhdpcie_bus_mpu_disable(bus);
+
+ si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ sbpcieregs = (sbpcieregs_t*)(bus->regs);
+
+ /* WAR where the BAR1 window may not be sized properly */
+ W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
+ val = R_REG(osh, &sbpcieregs->configdata);
+ W_REG(osh, &sbpcieregs->configdata, val);
+
+ /* if chip uses sysmem instead of tcm, typically ARM CA chips */
+ if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+ if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ /* also populate base address */
+ switch ((uint16)bus->sih->chip) {
+ case BCM4385_CHIP_ID:
+ bus->dongle_ram_base = CA7_4385_RAM_BASE;
+ break;
+ case BCM4388_CHIP_ID:
+ case BCM4389_CHIP_ID:
+ bus->dongle_ram_base = CA7_4389_RAM_BASE;
+ break;
+#ifdef UNRELEASEDCHIP
+ case BCM4397_CHIP_ID:
+ bus->dongle_ram_base = CA7_4389_RAM_BASE;
+ break;
+#endif
+ default:
+ /* also populate base address */
+ bus->dongle_ram_base = 0x200000;
+ DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+ __FUNCTION__, bus->dongle_ram_base));
+ break;
+ }
+ } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ } else {
+ /* cr4 has a different way to find the RAM size from TCM's */
+ if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ /* also populate base address */
+ switch ((uint16)bus->sih->chip) {
+ case BCM4339_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ bus->dongle_ram_base = CR4_4335_RAM_BASE;
+ break;
+ case BCM4358_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM43567_CHIP_ID:
+ case BCM43569_CHIP_ID:
+ case BCM4350_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ bus->dongle_ram_base = CR4_4350_RAM_BASE;
+ break;
+ case BCM4360_CHIP_ID:
+ bus->dongle_ram_base = CR4_4360_RAM_BASE;
+ break;
+
+ case BCM4364_CHIP_ID:
+ bus->dongle_ram_base = CR4_4364_RAM_BASE;
+ break;
+
+ CASE_BCM4345_CHIP:
+ bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
+ ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
+ break;
+ CASE_BCM43602_CHIP:
+ bus->dongle_ram_base = CR4_43602_RAM_BASE;
+ break;
+ case BCM4349_CHIP_GRPID:
+ /* RAM based changed from 4349c0(revid=9) onwards */
+ bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
+ CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
+ break;
+ case BCM4347_CHIP_ID:
+ case BCM4357_CHIP_ID:
+ case BCM4361_CHIP_ID:
+ bus->dongle_ram_base = CR4_4347_RAM_BASE;
+ break;
+ case BCM43751_CHIP_ID:
+ bus->dongle_ram_base = CR4_43751_RAM_BASE;
+ break;
+ case BCM43752_CHIP_ID:
+ bus->dongle_ram_base = CR4_43752_RAM_BASE;
+ break;
+ case BCM4376_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4376_RAM_BASE;
+ break;
+ case BCM4378_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4378_RAM_BASE;
+ break;
+ case BCM4362_CHIP_ID:
+ bus->dongle_ram_base = CR4_4362_RAM_BASE;
+ break;
+ case BCM4375_CHIP_ID:
+ case BCM4369_CHIP_ID:
+ bus->dongle_ram_base = CR4_4369_RAM_BASE;
+ break;
+ case BCM4377_CHIP_ID:
+ bus->dongle_ram_base = CR4_4377_RAM_BASE;
+ break;
+ case BCM4387_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4387_RAM_BASE;
+ break;
+ case BCM4385_CHIP_ID:
+ bus->dongle_ram_base = CR4_4385_RAM_BASE;
+ break;
+ default:
+ bus->dongle_ram_base = 0;
+ DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+ __FUNCTION__, bus->dongle_ram_base));
+ }
+ }
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_ramsize) {
+ dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_ramsize);
+ }
+
+ if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
+ DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
+ __FUNCTION__, bus->ramsize, bus->ramsize));
+ goto fail;
+ }
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+ bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+ dhdpcie_bar1_window_switch_enab(bus);
+
+ /* Init bar1_switch_lock only after bar1_switch_enab is inited */
+ dhd_init_bar1_switch_lock(bus);
+
+ bus->srmemsize = si_socram_srmem_size(bus->sih);
+
+ dhdpcie_bus_intr_init(bus);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+ if ((bus->poll = (bool)dhd_poll))
+ bus->pollrate = 1;
+#ifdef DHD_DISABLE_ASPM
+ dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+#endif /* DHD_DISABLE_ASPM */
+#ifdef PCIE_OOB
+ dhdpcie_oob_init(bus);
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+ bus->inb_enabled = TRUE;
+#endif /* PCIE_INB_DW */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ bus->ds_enabled = TRUE;
+ bus->deep_sleep = TRUE;
+#endif
+
+ bus->idma_enabled = TRUE;
+ bus->ifrm_enabled = TRUE;
+#ifdef BCMINTERNAL
+ bus->dma_chan = 0;
+#endif /* BCMINTERNAL */
+
+ dhdpcie_pme_stat_clear(bus);
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear_nolock(bus);
+
+ /*
+ * One time clearing of Common Power Domain since HW default is set
+ * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
+ * for 4378B0 (rev 68).
+ * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
+ */
+ si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
+
+ /*
+ * WAR to fix ARM cold boot;
+ * Assert WL domain in DAR helps but not enum
+ */
+ if (bus->sih->buscorerev >= 68) {
+ dhd_bus_pcie_pwr_req_wl_domain(bus,
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
+ }
+ }
+
+ DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
+
+ return 0;
+
+fail:
+/* for EFI even if there is an error, load still succeeds
+* so si_detach should not be called here, it is called during unload
+*/
+#ifndef DHD_EFI
+ /*
+ * As request irq is done later, till then CTO will not be detected,
+ * so unconditionally dump cfg and DAR registers.
+ */
+ dhd_bus_dump_imp_cfg_registers(bus);
+ /* Check if CTO has happened */
+ if (PCIECTO_ENAB(bus)) {
+ /* read pci_intstatus */
+ uint32 pci_intstatus =
+ dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
+ if (pci_intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : Invalid pci_intstatus(0x%x)\n",
+ __FUNCTION__, pci_intstatus));
+ } else if (pci_intstatus & PCI_CTO_INT_MASK) {
+ DHD_ERROR(("%s: ##### CTO REPORTED BY DONGLE "
+ "intstat=0x%x enab=%d\n", __FUNCTION__,
+ pci_intstatus, bus->cto_enable));
+ }
+ }
+ dhd_deinit_pwr_req_lock(bus);
+ dhd_deinit_bus_lp_state_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
+
+ if (bus->sih != NULL) {
+ /* Dump DAR registers only if si_attach has succeeded */
+ dhd_bus_dump_dar_registers(bus);
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear_nolock(bus);
+ }
+
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ }
+
+#endif /* DHD_EFI */
+ DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
+ return -1;
+}
+
+int
+dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
+{
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
+ return 0;
+}
+int
+dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
+{
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
+ return 0;
+}
+
+/* Non atomic function, caller should hold appropriate lock */
+void
+dhdpcie_bus_intr_enable(dhd_bus_t *bus)
+{
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+ if (bus) {
+ if (bus->sih && !bus->is_linkdown) {
+ /* Skip after recieving D3 ACK */
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ return;
+ }
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_unmask_interrupt(bus);
+ } else {
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
+ bus->def_intmask, TRUE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
+ bus->def_intmask, bus->def_intmask);
+ }
+ }
+
+#if defined(NDIS)
+ dhd_msix_message_set(bus->dhd, 0, 0, TRUE);
+#endif
+ }
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+/* Non atomic function, caller should hold appropriate lock */
+void
+dhdpcie_bus_intr_disable(dhd_bus_t *bus)
+{
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+ if (bus && bus->sih && !bus->is_linkdown) {
+ /* Skip after recieving D3 ACK */
+ if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
+ return;
+ }
+
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_mask_interrupt(bus);
+ } else {
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, 0, TRUE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
+ bus->def_intmask, 0);
+ }
+ }
+#if defined(NDIS)
+ /*
+ * dhdpcie_bus_intr_disable may get called from
+ * dhdpcie_dongle_attach -> dhdpcie_dongle_reset
+ * with dhd = NULL during attach time. So check for bus->dhd NULL before
+ * calling dhd_msix_message_set
+ */
+ if (bus && bus->dhd) {
+ dhd_msix_message_set(bus->dhd, 0, 0, FALSE);
+ }
+#endif
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+/*
+ * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
+ * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
+ * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
+ * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
+ * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
+ */
+void
+dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
+ if (dhdp->dhd_watchdog_ms_backup) {
+ DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
+ __FUNCTION__));
+ dhd_os_wd_timer(dhdp, 0);
+ }
+ if (dhdp->busstate != DHD_BUS_DOWN) {
+#ifdef DHD_DONGLE_TRAP_IN_DETACH
+ /*
+ * For x86 platforms, rmmod/insmod is failing due to some power
+ * resources are not held high.
+ * Hence induce DB7 trap during detach and in FW trap handler all
+ * power resources are held high.
+ */
+ if (!dhd_query_bus_erros(dhdp) && dhdp->db7_trap.fw_db7w_trap) {
+ dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE;
+ dhdpcie_fw_trap(dhdp->bus);
+ OSL_DELAY(100 * 1000); // wait 100 msec
+ dhdp->db7_trap.fw_db7w_trap_inprogress = FALSE;
+ } else {
+ DHD_ERROR(("%s: DB7 Not sent!!!\n",
+ __FUNCTION__));
+ }
+#endif /* DHD_DONGLE_TRAP_IN_DETACH */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ }
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+#ifdef LINUX
+ if ((timeleft == 0) || (timeleft == 1))
+#else
+ if (timeleft == 0)
+#endif
+ {
+ /* XXX This condition ideally should not occur, this means some
+ * bus usage context is not clearing the respective usage bit, print
+ * dhd_bus_busy_state and crash the host for further debugging.
+ */
+ DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ ASSERT(0);
+ }
+
+ return;
+}
+
+static void
+dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_REMOVE;
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+ if ((timeleft == 0) || (timeleft == 1)) {
+ DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ ASSERT(0);
+ }
+
+ return;
+}
+
+static void
+dhdpcie_bus_remove_prep(dhd_bus_t *bus)
+{
+ unsigned long flags;
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef PCIE_INB_DW
+ /* De-Initialize the lock to serialize Device Wake Inband activities */
+ if (bus->inb_lock) {
+ osl_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
+ bus->inb_lock = NULL;
+ }
+#endif
+
+ dhd_os_sdlock(bus->dhd);
+
+ if (bus->sih && !bus->dhd->dongle_isolation) {
+
+ dhd_bus_pcie_pwr_req_reload_war(bus);
+
+ /* Skip below WARs for Android as insmod fails after rmmod in Brix Android */
+#if !defined(OEM_ANDROID)
+ /* HW4347-909, Set PCIE TRefUp time to 100us for 4347/4377 */
+ if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) {
+ pcie_set_trefup_time_100us(bus->sih);
+ }
+
+ /* disable fast lpo from 4347/4377 */
+ /* For 4378/4387/4389, do not disable fast lpo because we always enable fast lpo.
+ * it causes insmod/rmmod reload failure.
+ */
+ if ((PMUREV(bus->sih->pmurev) > 31) &&
+ !(PCIE_FASTLPO_ENABLED(bus->sih->buscorerev))) {
+ si_pmu_fast_lpo_disable(bus->sih);
+ }
+#endif /* !OEM_ANDROID */
+
+ /* if the pcie link is down, watchdog reset
+ * should not be done, as it may hang
+ */
+
+ if (!bus->is_linkdown) {
+ /* For Non-EFI modular builds, do dongle reset during rmmod */
+#ifndef DHD_EFI
+ /* For EFI-DHD this compile flag will be defined.
+ * In EFI, depending on bt over pcie mode
+ * we either power toggle or do F0 FLR
+ * from dhdpcie_bus_release dongle. So no need to
+ * do dongle reset from here
+ */
+ dhdpcie_dongle_reset(bus);
+#endif /* !DHD_EFI */
+ }
+
+ bus->dhd->is_pcie_watchdog_reset = TRUE;
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+void
+dhd_init_bus_lp_state_lock(dhd_bus_t *bus)
+{
+ if (!bus->bus_lp_state_lock) {
+ bus->bus_lp_state_lock = osl_spin_lock_init(bus->osh);
+ }
+}
+
+void
+dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus)
+{
+ if (bus->bus_lp_state_lock) {
+ osl_spin_lock_deinit(bus->osh, bus->bus_lp_state_lock);
+ bus->bus_lp_state_lock = NULL;
+ }
+}
+
+void
+dhd_init_backplane_access_lock(dhd_bus_t *bus)
+{
+ if (!bus->backplane_access_lock) {
+ bus->backplane_access_lock = osl_spin_lock_init(bus->osh);
+ }
+}
+
+void
+dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
+{
+ if (bus->backplane_access_lock) {
+ osl_spin_lock_deinit(bus->osh, bus->backplane_access_lock);
+ bus->backplane_access_lock = NULL;
+ }
+}
+
+/** Detach and free everything */
+void
+dhdpcie_bus_release(dhd_bus_t *bus)
+{
+ bool dongle_isolation = FALSE;
+#ifdef BCMQT
+ uint buscorerev = 0;
+#endif /* BCMQT */
+ osl_t *osh = NULL;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+
+ osh = bus->osh;
+ ASSERT(osh);
+
+ if (bus->dhd) {
+#if defined(DEBUGGER) || defined (DHD_DSCOPE)
+ debugger_close();
+#endif /* DEBUGGER || DHD_DSCOPE */
+ dhdpcie_advertise_bus_remove(bus->dhd);
+ dongle_isolation = bus->dhd->dongle_isolation;
+ bus->dhd->is_pcie_watchdog_reset = FALSE;
+ dhdpcie_bus_remove_prep(bus);
+
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
+ dhd_deinit_bus_lp_state_lock(bus);
+ dhd_deinit_bar1_switch_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
+ dhd_deinit_pwr_req_lock(bus);
+#ifdef PCIE_INB_DW
+ dhd_deinit_dongle_ds_lock(bus);
+#endif /* PCIE_INB_DW */
+#ifdef BCMQT
+ if (IDMA_ACTIVE(bus->dhd)) {
+ /**
+ * On FPGA during exit path force set "IDMA Control Register"
+ * to default value 0x0. Otherwise host dongle syc for IDMA fails
+ * during next IDMA initilization(without system reboot)
+ */
+ buscorerev = bus->sih->buscorerev;
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ IDMAControl(buscorerev), ~0, 0);
+ }
+#endif /* BCMQT */
+ /**
+ * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
+ * access Dongle registers.
+ * dhd_detach will communicate with dongle to delete flowring ..etc.
+ * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
+ */
+ dhd_detach(bus->dhd);
+ dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_free(bus->dhd);
+ bus->dhd = NULL;
+ }
+#ifdef DHD_EFI
+ else {
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
+ dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ }
+#endif /* DHD_EFI */
+ /* unmap the regs and tcm here!! */
+ if (bus->regs) {
+ dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
+ bus->regs = NULL;
+ }
+ if (bus->tcm) {
+ dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
+ bus->tcm = NULL;
+ }
+
+ dhdpcie_bus_release_malloc(bus, osh);
+ /* Detach pcie shared structure */
+ if (bus->pcie_sh) {
+ MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+ }
+
+ if (bus->console.buf != NULL) {
+ MFREE(osh, bus->console.buf, bus->console.bufsize);
+ }
+
+#ifdef BCMINTERNAL
+ if (bus->msi_sim) {
+ DMA_UNMAP(osh, bus->msi_sim_phys, MSI_SIM_BUFSIZE, DMA_RX, 0, 0);
+ MFREE(osh, bus->msi_sim_addr, MSI_SIM_BUFSIZE);
+ }
+
+ /* free host fw buffer if there is any */
+ if (bus->hostfw_buf.va) {
+ DMA_FREE_CONSISTENT(osh, bus->hostfw_buf.va, bus->hostfw_buf._alloced,
+ bus->hostfw_buf.pa, bus->hostfw_buf.dmah);
+ memset(&bus->hostfw_buf, 0, sizeof(bus->hostfw_buf));
+ }
+#endif /* BCMINTERNAL */
+
+ /* Finally free bus info */
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+
+ g_dhd_bus = NULL;
+ }
+
+ DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+} /* dhdpcie_bus_release */
+
+void
+dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+ DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+ bus->dhd, bus->dhd->dongle_reset));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
+ goto fail;
+ }
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (bus->sih) {
+#ifdef BCMINTERNAL
+ if (bus->msi_sim) {
+ /* disable MSI */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCIE_CFG_MSICAP_OFFSET);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), ~0,
+ PCIE_CFG_MSICAP_DISABLE_MSI);
+ }
+#endif /* BCMINTERNAL */
+
+ /*
+ * Perform dongle reset only if dongle isolation is not enabled.
+ * In android platforms, dongle isolation will be enabled and
+ * quiescing dongle will be done using DB7 trap.
+ */
+ if (!dongle_isolation &&
+ bus->dhd && !bus->dhd->is_pcie_watchdog_reset) {
+ dhdpcie_dongle_reset(bus);
+ }
+
+ /* Only for EFI this will be effective */
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
+
+ if (bus->ltrsleep_on_unload) {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
+ }
+
+ if (bus->sih->buscorerev == 13)
+ pcie_serdes_iddqdisable(bus->osh, bus->sih,
+ (sbpcieregs_t *) bus->regs);
+
+ /* For inbuilt drivers pcie clk req will be done by RC,
+ * so do not do clkreq from dhd
+ */
+#if defined(linux) || defined(LINUX)
+ if (dhd_download_fw_on_driverload)
+#endif /* linux || LINUX */
+ {
+ /* Disable CLKREQ# */
+ dhdpcie_clkreq(bus->osh, 1, 0);
+ }
+ }
+fail:
+ /* Resources should be freed */
+ if (bus->sih) {
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ }
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+uint32
+dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
+{
+ uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
+ return data;
+}
+
+/** 32 bit config write */
+void
+dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
+{
+ OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
+}
+
+void
+dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
+{
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
+}
+
+void
+dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_MEMSIZE;
+ /* Restrict the memsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d max accepted %d\n",
+ mem_size, min_size, (int32)bus->orig_ramsize));
+ if ((mem_size > min_size) &&
+ (mem_size < (int32)bus->orig_ramsize)) {
+ bus->ramsize = mem_size;
+ } else {
+ DHD_ERROR(("%s: Invalid mem_size %d\n", __FUNCTION__, mem_size));
+ }
+}
+
+void
+dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ }
+
+ DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+ return;
+
+}
+
+/** Stop bus module: clear pending frames, disable data flow */
+void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!bus->dhd)
+ return;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
+ goto done;
+ }
+
+ DHD_STOP_RPM_TIMER(bus->dhd);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ atomic_set(&bus->dhd->block_bus, TRUE);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ dhdpcie_bus_intr_disable(bus);
+
+ if (!bus->is_linkdown) {
+ uint32 status;
+ status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+ }
+
+#if defined(linux) || defined(LINUX)
+ if (!dhd_download_fw_on_driverload) {
+ dhd_dpc_kill(bus->dhd);
+ }
+#endif /* linux || LINUX */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_disable(dhd_bus_to_dev(bus));
+ pm_runtime_set_suspended(dhd_bus_to_dev(bus));
+ pm_runtime_enable(dhd_bus_to_dev(bus));
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ /* Clear rx control and wake any waiters */
+ /* XXX More important in disconnect, but no context? */
+ dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
+
+done:
+ return;
+}
+
+#ifdef DEVICE_TX_STUCK_DETECT
+void
+dhd_bus_send_msg_to_daemon(int reason)
+{
+ bcm_to_info_t to_info;
+
+ to_info.magic = BCM_TO_MAGIC;
+ to_info.reason = reason;
+
+ dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
+ return;
+}
+
+#define DHD_MEMORY_SET_PATTERN 0xAA
+
+/**
+ * scan the flow rings in active list to check if stuck and notify application
+ * The conditions for warn/stuck detection are
+ * 1. Flow ring is active
+ * 2. There are packets to be consumed by the consumer (wr != rd)
+ * If 1 and 2 are true, then
+ * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION
+ * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION
+ */
+static void
+dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus)
+{
+ uint32 tx_cmpl;
+ unsigned long list_lock_flags;
+ unsigned long ring_lock_flags;
+ dll_t *item, *prev;
+ flow_ring_node_t *flow_ring_node;
+ if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)bus->dhd->if_flow_lkup;
+ uint8 ifindex;
+#ifndef FW_HAS_AGING_LOGIC_ALL_IF
+ /**
+ * Since the aging logic is implemented only for INFRA in FW,
+ * DHD should monitor only INFRA for stuck detection.
+ */
+ uint8 role;
+#endif /* FW_HAS_AGING_LOGIC_ALL_IF */
+ bool ring_empty;
+ bool active;
+ uint8 status;
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+
+ for (item = dll_tail_p(&bus->flowring_active_list);
+ !dll_end(&bus->flowring_active_list, item); item = prev) {
+
+ prev = dll_prev_p(item);
+
+ flow_ring_node = dhd_constlist_to_flowring(item);
+ ifindex = flow_ring_node->flow_info.ifindex;
+#ifndef FW_HAS_AGING_LOGIC_ALL_IF
+ role = if_flow_lkup[ifindex].role;
+ if (role != WLC_E_IF_ROLE_STA) {
+ continue;
+ }
+#endif /* FW_HAS_AGING_LOGIC_ALL_IF */
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags);
+ tx_cmpl = flow_ring_node->tx_cmpl;
+ active = flow_ring_node->active;
+ status = flow_ring_node->status;
+ ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags);
+ /*
+ * Need not monitor the flow ring if,
+ * 1. flow ring is empty
+ * 2. LINK is down
+ * 3. flow ring is not in FLOW_RING_STATUS_OPEN state
+ */
+ if ((ring_empty) || !(if_flow_lkup[ifindex].status) ||
+ (status != FLOW_RING_STATUS_OPEN)) {
+ /* reset conters... etc */
+ flow_ring_node->stuck_count = 0;
+ flow_ring_node->tx_cmpl_prev = tx_cmpl;
+ continue;
+ }
+ /**
+ * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer
+ * representation of time, to decide if a flow is in warn state or stuck.
+ *
+ * flow_ring_node->stuck_count is an integer counter representing how long
+ * tx_cmpl is not received though there are pending packets in the ring
+ * to be consumed by the dongle for that particular flow.
+ *
+ * This method of determining time elapsed is helpful in sleep/wake scenarios.
+ * If host sleeps and wakes up, that sleep time is not considered into
+ * stuck duration.
+ */
+ if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) {
+
+ flow_ring_node->stuck_count++;
+
+ DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n",
+ __func__, flow_ring_node->flowid, tx_cmpl,
+ flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count));
+ dhd_prot_dump_ring_ptrs(flow_ring_node->prot_info);
+
+ switch (flow_ring_node->stuck_count) {
+ case DEVICE_TX_STUCK_WARN_DURATION:
+ /**
+ * Notify Device Tx Stuck Notification App about the
+ * device Tx stuck warning for this flowid.
+ * App will collect the logs required.
+ */
+ DHD_ERROR(("stuck warning for flowid: %d sent to app\n",
+ flow_ring_node->flowid));
+ dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING);
+ break;
+ case DEVICE_TX_STUCK_DURATION:
+ /**
+ * Notify Device Tx Stuck Notification App about the
+ * device Tx stuck info for this flowid.
+ * App will collect the logs required.
+ */
+ DHD_ERROR(("stuck information for flowid: %d sent to app\n",
+ flow_ring_node->flowid));
+ dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK);
+ break;
+ default:
+ break;
+ }
+ } else {
+ flow_ring_node->tx_cmpl_prev = tx_cmpl;
+ flow_ring_node->stuck_count = 0;
+ }
+ }
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+}
+/**
+ * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT,
+ * to determine if any flowid is stuck.
+ */
+static void
+dhd_bus_device_stuck_scan(dhd_bus_t *bus)
+{
+ uint32 time_stamp; /* in millisec */
+ uint32 diff;
+
+ /* Need not run the algorith if Dongle has trapped */
+ if (bus->dhd->dongle_trap_occured) {
+ return;
+ }
+ time_stamp = OSL_SYSUPTIME();
+ diff = time_stamp - bus->device_tx_stuck_check;
+ if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) {
+ dhd_bus_device_tx_stuck_scan(bus);
+ bus->device_tx_stuck_check = OSL_SYSUPTIME();
+ }
+ return;
+}
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+/**
+ * Watchdog timer function.
+ * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
+ */
+bool dhd_bus_watchdog(dhd_pub_t *dhd)
+{
+ unsigned long flags;
+ dhd_bus_t *bus = dhd->bus;
+
+ if (dhd_query_bus_erros(bus->dhd)) {
+ return FALSE;
+ }
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
+ DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return FALSE;
+ }
+ DHD_BUS_BUSY_SET_IN_WD(dhd);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef BCMINTERNAL
+ if ((bus->msi_sim) && (++bus->polltick >= bus->pollrate)) {
+ uint32 val;
+ bus->polltick = 0;
+ val = *(uint32 *)bus->msi_sim_addr;
+ *(uint32 *)bus->msi_sim_addr = 0;
+ if (val) {
+ DHD_INFO(("calling dhdpcie_bus_isr 0x%04x\n", val));
+ dhdpcie_bus_isr(bus);
+ }
+ }
+#endif /* BCMINTERNAL */
+
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ dhd_intr_poll_pkt_thresholds(dhd);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+
+ /* Poll for console output periodically */
+ if (dhd->busstate == DHD_BUS_DATA &&
+ dhd->dhd_console_ms != 0 &&
+ DHD_CHK_BUS_NOT_IN_LPS(bus)) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhd->dhd_console_ms) {
+ bus->console.count -= dhd->dhd_console_ms;
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ /* Make sure backplane clock is on */
+ if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
+ if (dhdpcie_bus_readconsole(bus) < 0) {
+ DHD_ERROR(("%s: disable dconpoll\n", __FUNCTION__));
+ dhd->dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+ }
+ }
+
+#ifdef DHD_READ_INTSTATUS_IN_DPC
+ if (bus->poll) {
+ bus->ipend = TRUE;
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
+ }
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+ if (dhd->bus->dev_tx_stuck_monitor == TRUE) {
+ dhd_bus_device_stuck_scan(bus);
+ }
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
+ dhd_os_busbusy_wake(dhd);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#if !defined(DHD_PCIE_RUNTIMEPM) && (defined(PCIE_OOB) || defined(PCIE_INB_DW))
+ dhd->bus->inb_dw_deassert_cnt += dhd_watchdog_ms;
+ if (dhd->bus->inb_dw_deassert_cnt >=
+ DHD_INB_DW_DEASSERT_MS) {
+ dhd->bus->inb_dw_deassert_cnt = 0;
+ /* Inband device wake is deasserted from DPC context after DS_Exit is received,
+ * but if at all there is no d2h interrupt received, dpc will not be scheduled
+ * and inband DW is not deasserted, hence DW is deasserted from watchdog thread
+ * for every 250ms.
+ */
+ dhd_bus_dw_deassert(dhd);
+ }
+#endif /* !DHD_PCIE_RUNTIMEPM && PCIE_OOB || PCIE_INB_DW */
+ return TRUE;
+} /* dhd_bus_watchdog */
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ uint32 chiprev;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ char chipver_tag[20] = "_4358";
+#else
+ char chipver_tag[10] = {0, };
+#endif /* SUPPORT_MULTIPLE_CHIPS */
+
+ chiprev = dhd_bus_chiprev(bus);
+ if (chiprev == 0) {
+ DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
+ strcat(chipver_tag, "_a0");
+ } else if (chiprev == 1) {
+ DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
+#if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
+ strcat(chipver_tag, "_a1");
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
+ } else if (chiprev == 3) {
+ DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ strcat(chipver_tag, "_a3");
+#endif /* SUPPORT_MULTIPLE_CHIPS */
+ } else {
+ DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
+ }
+
+ strcat(fw_path, chipver_tag);
+
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
+ if (chiprev == 1 || chiprev == 3) {
+ int ret = dhd_check_module_b85a();
+ if ((chiprev == 1) && (ret < 0)) {
+ memset(chipver_tag, 0x00, sizeof(chipver_tag));
+ strcat(chipver_tag, "_b85");
+ strcat(chipver_tag, "_a1");
+ }
+ }
+
+ DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
+#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
+
+#if defined(SUPPORT_MULTIPLE_BOARD_REV)
+ if (system_rev >= 10) {
+ DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
+ strcat(chipver_tag, "_r10");
+ }
+#endif /* SUPPORT_MULTIPLE_BOARD_REV */
+ strcat(nv_path, chipver_tag);
+
+ return 0;
+}
+
+static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ uint32 chip_ver;
+ char chipver_tag[10] = {0, };
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
+ defined(SUPPORT_BCM4359_MIXED_MODULES)
+ int module_type = -1;
+#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
+
+ chip_ver = bus->sih->chiprev;
+ if (chip_ver == 4) {
+ DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
+ strncat(chipver_tag, "_b0", strlen("_b0"));
+ } else if (chip_ver == 5) {
+ DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
+ strncat(chipver_tag, "_b1", strlen("_b1"));
+ } else if (chip_ver == 9) {
+ DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
+ strncat(chipver_tag, "_c0", strlen("_c0"));
+ } else {
+ DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
+ return -1;
+ }
+
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
+ defined(SUPPORT_BCM4359_MIXED_MODULES)
+ module_type = dhd_check_module_b90();
+
+ switch (module_type) {
+ case BCM4359_MODULE_TYPE_B90B:
+ strcat(fw_path, chipver_tag);
+ break;
+ case BCM4359_MODULE_TYPE_B90S:
+ default:
+ /*
+ * .cid.info file not exist case,
+ * loading B90S FW force for initial MFG boot up.
+ */
+ if (chip_ver == 5) {
+ strncat(fw_path, "_b90s", strlen("_b90s"));
+ }
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+ break;
+ }
+#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
+
+ return 0;
+}
+
+#define NVRAM_FEM_MURATA "_murata"
+static int
+concate_revision_from_cisinfo(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ int ret = BCME_OK;
+#if defined(SUPPORT_MIXED_MODULES)
+#if defined(USE_CID_CHECK)
+ char module_type[MAX_VNAME_LEN];
+ naming_info_t *info = NULL;
+ bool is_murata_fem = FALSE;
+
+ memset(module_type, 0, sizeof(module_type));
+
+ if (dhd_check_module_bcm(module_type,
+ MODULE_NAME_INDEX_MAX, &is_murata_fem) == BCME_OK) {
+ info = dhd_find_naming_info(bus->dhd, module_type);
+ } else {
+ /* in case of .cid.info doesn't exists */
+ info = dhd_find_naming_info_by_chip_rev(bus->dhd, &is_murata_fem);
+ }
+
+#ifdef BCM4361_CHIP
+ if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
+ is_murata_fem = FALSE;
+ }
+#endif /* BCM4361_CHIP */
+
+ if (info) {
+#ifdef BCM4361_CHIP
+ if (is_murata_fem) {
+ strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
+ }
+#endif /* BCM4361_CHIP */
+ strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
+ strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
+ } else {
+ DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+#endif /* USE_CID_CHECK */
+#ifdef USE_DIRECT_VID_TAG
+ int revid = bus->sih->chiprev;
+ unsigned char chipstr[MAX_VID_LEN];
+
+ memset(chipstr, 0, sizeof(chipstr));
+ snprintf(chipstr, sizeof(chipstr), "_4389");
+
+ /* write chipstr/vid into nvram tag */
+ ret = concate_nvram_by_vid(bus, nv_path, chipstr);
+ /* write chiprev into FW tag */
+ if (ret == BCME_OK) {
+ if (revid == 3) {
+ strncat(fw_path, A0_REV, strlen(fw_path));
+ DHD_ERROR(("%s: fw_path : %s\n", __FUNCTION__, fw_path));
+ } else if (revid == 1) {
+ strncat(fw_path, B0_REV, strlen(fw_path));
+ DHD_ERROR(("%s: fw_path : %s\n", __FUNCTION__, fw_path));
+ } else {
+ DHD_ERROR(("%s: INVALID CHIPREV %d\n", __FUNCTION__, revid));
+ }
+ }
+#endif /* USE_DIRECT_VID_TAG */
+#else /* SUPPORT_MIXED_MODULE */
+ char chipver_tag[10] = {0, };
+
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+#endif /* SUPPORT_MIXED_MODULE */
+
+ return ret;
+}
+
+int
+concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ int res = 0;
+
+ if (!bus || !bus->sih) {
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+ return -1;
+ }
+
+ if (!fw_path || !nv_path) {
+ DHD_ERROR(("fw_path or nv_path is null.\n"));
+ return res;
+ }
+
+ switch (si_chipid(bus->sih)) {
+
+ case BCM43569_CHIP_ID:
+ case BCM4358_CHIP_ID:
+ res = concate_revision_bcm4358(bus, fw_path, nv_path);
+ break;
+ case BCM4355_CHIP_ID:
+ case BCM4359_CHIP_ID:
+ res = concate_revision_bcm4359(bus, fw_path, nv_path);
+ break;
+ case BCM4361_CHIP_ID:
+ case BCM4347_CHIP_ID:
+ case BCM4375_CHIP_ID:
+ case BCM4389_CHIP_ID:
+ res = concate_revision_from_cisinfo(bus, fw_path, nv_path);
+ break;
+ default:
+ DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
+ return res;
+ }
+
+ return res;
+}
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+uint16
+dhd_get_chipid(struct dhd_bus *bus)
+{
+ if (bus && bus->sih) {
+ return (uint16)si_chipid(bus->sih);
+ } else if (bus && bus->regs) {
+ chipcregs_t *cc = (chipcregs_t *)bus->regs;
+ uint w, chipid;
+
+ /* Set bar0 window to si_enum_base */
+ dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(0));
+
+ w = R_REG(bus->osh, &cc->chipid);
+ chipid = w & CID_ID_MASK;
+
+ return (uint16)chipid;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
+ *
+ * BCM_REQUEST_FW specific :
+ * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
+ * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
+ *
+ * BCMEMBEDIMAGE specific:
+ * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
+ * file will be used instead.
+ *
+ * @return BCME_OK on success
+ */
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *pfw_path, char *pnv_path,
+ char *pclm_path, char *pconf_path)
+{
+ int ret;
+
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+ bus->dhd->clm_path = pclm_path;
+ bus->dhd->conf_path = pconf_path;
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+ if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
+ DHD_ERROR(("%s: fail to concatnate revison \n",
+ __FUNCTION__));
+ /* Proceed if SUPPORT_MULTIPLE_CHIPS is enabled */
+#ifndef SUPPORT_MULTIPLE_CHIPS
+ return BCME_BADARG;
+#endif /* !SUPPORT_MULTIPLE_CHIPS */
+ }
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ dhd_set_blob_support(bus->dhd, bus->fw_path);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+ DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
+ __FUNCTION__, bus->fw_path, bus->nv_path));
+#if defined(LINUX) || defined(linux)
+ dhdpcie_dump_resource(bus);
+#endif /* LINUX || linux */
+
+ ret = dhdpcie_download_firmware(bus, osh);
+
+ return ret;
+}
+
+void
+dhd_set_bus_params(struct dhd_bus *bus)
+{
+ if (bus->dhd->conf->dhd_poll >= 0) {
+ bus->poll = bus->dhd->conf->dhd_poll;
+ if (!bus->pollrate)
+ bus->pollrate = 1;
+ printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
+ }
+ if (bus->dhd->conf->d2h_intr_control >= 0)
+ bus->d2h_intr_control = bus->dhd->conf->d2h_intr_control;
+ printf("d2h_intr_method -> %s(%d); d2h_intr_control -> %s(%d)\n",
+ bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX", bus->d2h_intr_method,
+ bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK", bus->d2h_intr_control);
+}
+
+/**
+ * Loads firmware given by 'bus->fw_path' into PCIe dongle.
+ *
+ * BCM_REQUEST_FW specific :
+ * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
+ * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
+ *
+ * BCMEMBEDIMAGE specific:
+ * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
+ * file will be used instead.
+ *
+ * @return BCME_OK on success
+ */
+static int
+dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
+{
+ int ret = 0;
+#if defined(BCM_REQUEST_FW)
+ uint chipid = bus->sih->chip;
+ uint revid = bus->sih->chiprev;
+ char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
+ char nv_path[64]; /* path to nvram vars file */
+ bus->fw_path = fw_path;
+ bus->nv_path = nv_path;
+ switch (chipid) {
+ case BCM43570_CHIP_ID:
+ bcmstrncat(fw_path, "43570", 5);
+ switch (revid) {
+ case 0:
+ bcmstrncat(fw_path, "a0", 2);
+ break;
+ case 2:
+ bcmstrncat(fw_path, "a2", 2);
+ break;
+ default:
+ DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
+ revid));
+ break;
+ }
+ break;
+ default:
+ DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
+ chipid));
+ return 0;
+ }
+ /* load board specific nvram file */
+ snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
+ /* load firmware */
+ snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
+#endif /* BCM_REQUEST_FW */
+
+ DHD_OS_WAKE_LOCK(bus->dhd);
+
+ dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
+ dhd_set_bus_params(bus);
+
+ ret = _dhdpcie_download_firmware(bus);
+
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return ret;
+} /* dhdpcie_download_firmware */
+
+#ifdef BCMINTERNAL
+#define PCIE_HYBRIDFW_MAGICNUM 0x434F464Cu
+#define PCIE_HYBRIDFW_HOSTOFFSET_MASK 0xFFC00000u
+#define PCIE_HYBRIDFW_TYPE_DNGL 0u
+#define PCIE_HYBRIDFW_TYPE_HOST 1u
+#define PCIE_HYBRIDFW_TYPE_DNGLTBL 2u
+#define PCIE_HYBRIDFW_TYPE_HOSTTBL 3u
+#define SBtoPCIETranslation2 0xF0
+#define SBtoPCIETranslation2Upper 0xF4
+#define SBtoPCIETranslation3 0xF8
+#define SBtoPCIETranslation3Upper 0xFC
+#define SBtoPCIETranslation0 0x100
+#define SBtoPCIETranslation1 0x104
+#define SBtoPCIETranslation0Upper 0x10C
+#define SBtoPCIETranslation1Upper 0x110
+
+/* Get length of each portion of hybrid fw binary from the header */
+static int
+dhdpcie_hybridfw_get_next_block(char * fptr, int *fsize, uint32 *type, uint32 *len)
+{
+ struct portion_hdr {
+ uint32 type;
+ uint32 len;
+ } hdr;
+ int ret;
+
+ /* read and verify header */
+ if (*fsize <= sizeof(hdr)) {
+ return BCME_BADLEN;
+ }
+
+ ret = dhd_os_get_image_block((char *)&hdr, sizeof(hdr), fptr);
+ if (ret <= 0) {
+ return BCME_ERROR;
+ }
+
+ *fsize -= sizeof(hdr);
+ *type = ltoh32(hdr.type);
+ *len = ltoh32(hdr.len);
+
+ if ((*len > (uint32)*fsize) || ((int)*len < 0)) {
+ return BCME_BADLEN;
+ }
+
+ DHD_INFO(("%s Found section %d with length %d\n", __FUNCTION__, hdr.type, hdr.len));
+
+ return BCME_OK;
+}
+
+/* Replace host offload functions' pointers */
+static int
+dhdpcie_hybridfw_ptrrpl(char *fw, uint fw_sz, uint32 *jmptbl, uint jmptbl_sz,
+ dmaaddr_t hbuf_pa, uint32 hbuf_len)
+{
+ uint32 *p_ptr;
+ uint32 host_addr;
+ int ret = BCME_OK;
+
+ if (jmptbl_sz % 4) {
+ DHD_ERROR(("%s table size %u not 4 bytes aligned\n", __FUNCTION__, jmptbl_sz));
+ return BCME_ERROR;
+ }
+
+ host_addr = PCIEDEV_ARM_ADDR(PHYSADDRLO(hbuf_pa), PCIEDEV_TRANS_WIN_HOSTMEM);
+ for (; jmptbl_sz > 0; jmptbl_sz -= 4, jmptbl++) {
+ if (*jmptbl >= fw_sz) {
+ DHD_ERROR(("%s offset %u >= fw size %u\n", __FUNCTION__, *jmptbl, fw_sz));
+ ret = BCME_ERROR;
+ break;
+ }
+ p_ptr = (uint32 *)(fw + *jmptbl);
+ *p_ptr &= ~(uint32)PCIE_HYBRIDFW_HOSTOFFSET_MASK;
+ if (*p_ptr > hbuf_len) {
+ DHD_ERROR(("%s function offset %x >= host buffer len %x\n",
+ __FUNCTION__, *p_ptr, hbuf_len));
+ ret = BCME_ERROR;
+ break;
+ }
+ *p_ptr += host_addr;
+ }
+
+ return ret;
+}
+
+/* configure back plane to pcie translation window */
+static void
+dhdpcie_sbtopcie_translation_config(struct dhd_bus *bus, int bp_window, dmaaddr_t addr)
+{
+ uint32 trans_reg_offset, trans_u_reg_offset;
+
+ switch (bp_window) {
+ case PCIEDEV_TRANS_WIN_0:
+ trans_reg_offset = SBtoPCIETranslation0;
+ trans_u_reg_offset = SBtoPCIETranslation0Upper;
+ break;
+
+ case PCIEDEV_TRANS_WIN_1:
+ trans_reg_offset = SBtoPCIETranslation1;
+ trans_u_reg_offset = SBtoPCIETranslation1Upper;
+ break;
+
+ case PCIEDEV_TRANS_WIN_2:
+ trans_reg_offset = SBtoPCIETranslation2;
+ trans_u_reg_offset = SBtoPCIETranslation2Upper;
+ break;
+
+ case PCIEDEV_TRANS_WIN_3:
+ trans_reg_offset = SBtoPCIETranslation3;
+ trans_u_reg_offset = SBtoPCIETranslation3Upper;
+ break;
+
+ default:
+ DHD_ERROR(("%s Invalid bp translation window %d\n",
+ __FUNCTION__, bp_window));
+ return;
+ }
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, trans_reg_offset, ~0,
+ ((PHYSADDRLO(addr) & PCIEDEV_HOSTADDR_MAP_WIN_MASK) | 0xC));
+ si_corereg(bus->sih, bus->sih->buscoreidx, trans_u_reg_offset, ~0, PHYSADDRHI(addr));
+}
+
+/**
+ * hybrid firmware download handler
+ *
+ * Parse, prepare and download a hybrid firmware
+ * - Identify a hybrid firmware
+ * - Place the host offload portion in an allocated DMA consistent buffer
+ * - Modifying the host portion function pointers according to info table
+ */
+static int
+dhdpcie_hybridfw_download(struct dhd_bus *bus, char *fp)
+{
+ uint32 magic_num;
+ int ret = BCME_OK;
+ dhd_dma_buf_t *hstfw = &bus->hostfw_buf;
+ char *dnglfw = NULL, *dngltbl = NULL, *hsttbl = NULL;
+ int dnglfw_sz = 0, dngltbl_sz = 0, hsttbl_sz = 0;
+ int fsize;
+ int offset = 0;
+ uint32 type = 0, len = 0;
+ void * ptr = NULL;
+
+ fsize = dhd_os_get_image_size(fp);
+
+ /* Verify magic number */
+ if (fsize < sizeof(magic_num)) {
+ return BCME_UNSUPPORTED;
+ }
+ ret = dhd_os_get_image_block((char *)&magic_num, sizeof(magic_num), fp);
+ if (ret <= 0) {
+ return BCME_ERROR;
+ }
+ magic_num = ltoh32(magic_num);
+ if (magic_num != PCIE_HYBRIDFW_MAGICNUM) {
+ return BCME_UNSUPPORTED;
+ }
+ fsize -= sizeof(magic_num);
+
+ do {
+ ret = dhdpcie_hybridfw_get_next_block(fp, &fsize, &type, &len);
+ if (ret != BCME_OK) {
+ break;
+ }
+
+ if (len == 0) {
+ continue;
+ }
+
+ if ((ptr = MALLOC(bus->dhd->osh, len)) == NULL) {
+ ret = BCME_NOMEM;
+ break;
+ }
+
+ len = dhd_os_get_image_block(ptr, len, fp);
+ if (len <= 0) {
+ MFREE(bus->dhd->osh, ptr, len);
+ ret = BCME_ERROR;
+ break;
+ }
+ fsize -= len;
+
+ switch (type) {
+ case PCIE_HYBRIDFW_TYPE_DNGL:
+ /* cannot have more than one RAM image blocks */
+ if (dnglfw_sz) {
+ MFREE(bus->dhd->osh, ptr, len);
+ ret = BCME_ERROR;
+ break;
+ }
+
+ /* RAM portion of the FW image */
+ dnglfw = ptr;
+ dnglfw_sz = len;
+
+ if ((uint32)len > bus->ramsize) {
+ ret = BCME_BADLEN;
+ break;
+ }
+ break;
+
+ case PCIE_HYBRIDFW_TYPE_HOST:
+ /* Host portion of FW image
+ * Check if a -hostmem- fw has already been loaded, if yes and
+ * the buffer can accommodate the new firmware host portion,
+ * reuse the allocated buffer
+ * For Insufficient size buffer or freshly loaded dhd, allocate
+ * a coherent buffer
+ */
+ if (hstfw->va) {
+ if (hstfw->len >= len) {
+ hstfw->len = len;
+ } else {
+ DMA_FREE_CONSISTENT(bus->dhd->osh, hstfw->va,
+ hstfw->_alloced, hstfw->pa, hstfw->dmah);
+ memset(hstfw, 0, sizeof(*hstfw));
+ }
+ }
+
+ if (hstfw->va == NULL) {
+ hstfw->len = len;
+ hstfw->va = DMA_ALLOC_CONSISTENT(bus->dhd->osh, hstfw->len,
+ 4, &hstfw->_alloced, &hstfw->pa, &hstfw->dmah);
+ if (hstfw->va == NULL) {
+ MFREE(bus->dhd->osh, ptr, len);
+ ret = BCME_NOMEM;
+ break;
+ }
+ }
+
+ ret = memcpy_s(hstfw->va, hstfw->len, ptr, len);
+ MFREE(bus->dhd->osh, ptr, len);
+ break;
+
+ case PCIE_HYBRIDFW_TYPE_DNGLTBL:
+ /* cannot have more than one ram image relocation information */
+ if (dngltbl) {
+ MFREE(bus->dhd->osh, ptr, len);
+ ret = BCME_ERROR;
+ break;
+ }
+
+ /* RAM image relocation information */
+ dngltbl = ptr;
+ dngltbl_sz = len;
+
+ /* RAM image should be included before RAM reloc info */
+ if ((dnglfw == NULL) || (hstfw->va == NULL)) {
+ ret = BCME_ERROR;
+ break;
+ }
+ /* Store the fw assumed host memory base */
+ bus->hostfw_base = *(uint32 *)(dnglfw + *(uint32 *)dngltbl);
+ bus->hostfw_base &= PCIE_HYBRIDFW_HOSTOFFSET_MASK;
+
+ DHD_INFO(("%s FW assumed host base address is %08x\n",
+ __FUNCTION__, bus->hostfw_base));
+
+ ret = dhdpcie_hybridfw_ptrrpl(dnglfw, dnglfw_sz,
+ (uint32 *)dngltbl, dngltbl_sz, hstfw->pa, hstfw->len);
+ break;
+
+ case PCIE_HYBRIDFW_TYPE_HOSTTBL:
+ /* cannot have more than one host image relocation info */
+ if (hsttbl) {
+ MFREE(bus->dhd->osh, ptr, len);
+ ret = BCME_ERROR;
+ break;
+ }
+ /* Host image relocation information */
+ hsttbl = ptr;
+ hsttbl_sz = len;
+
+ /* Host image should be included before host reloc info */
+ if (hstfw->va == NULL) {
+ ret = BCME_ERROR;
+ break;
+ }
+ ret = dhdpcie_hybridfw_ptrrpl(hstfw->va, hstfw->len,
+ (uint32 *)hsttbl, hsttbl_sz, hstfw->pa, hstfw->len);
+ break;
+
+ default:
+ ret = BCME_ERROR;
+ break;
+ }
+
+ } while (!ret && (fsize > 0));
+
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: err:%d, fsize:%d, t:%d, l:%d\n",
+ __FUNCTION__, ret, fsize, type, len));
+ goto exit;
+ }
+
+ if ((uint32*)dnglfw == NULL) {
+ DHD_ERROR(("%s: Dongle image should be present in combo file\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ if (hstfw->va) {
+ OSL_CACHE_FLUSH((void *)hstfw->va, hstfw->len);
+ }
+
+ /* for CR4/CA7 store the reset instruction to be written in 0 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+ bus->resetinstr = *(((uint32*)dnglfw));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+
+ ret = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)dnglfw, dnglfw_sz);
+ if (ret) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, ret, dnglfw_sz, offset));
+ goto exit;
+ }
+
+ /* Configrue bptopcie register to allow ARM access the host offload area */
+ bus->bp_base = PCIEDEV_ARM_ADDR(PHYSADDRLO(hstfw->pa), PCIEDEV_TRANS_WIN_HOSTMEM);
+ dhdpcie_sbtopcie_translation_config(bus, PCIEDEV_TRANS_WIN_HOSTMEM, hstfw->pa);
+
+ /* Check if the buffer is crossing 32MB Window */
+ if (((bus->bp_base + hstfw->len) & PCIEDEV_ARM_ADDR_SPACE) <
+ (bus->bp_base & PCIEDEV_ARM_ADDR_SPACE)) {
+ DHD_ERROR(("Host memomery crissing 32MB window."
+ " Entire hostmem block should be within continuous 32MB block"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ DHD_ERROR(("%s %d bytes host offload firmware placed at pa %08x %08x\n",
+ __FUNCTION__, hstfw->len,
+ (uint)PHYSADDRHI(hstfw->pa), (uint)PHYSADDRLO(hstfw->pa)));
+
+exit:
+ if (dnglfw) {
+ MFREE(bus->dhd->osh, dnglfw, dnglfw_sz);
+ }
+
+ if (dngltbl) {
+ MFREE(bus->dhd->osh, dngltbl, dngltbl_sz);
+ }
+
+ if (hsttbl) {
+ MFREE(bus->dhd->osh, hsttbl, hsttbl_sz);
+ }
+
+ if (ret && hstfw->va) {
+ DMA_FREE_CONSISTENT(bus->dhd->osh, hstfw->va, hstfw->_alloced,
+ hstfw->pa, hstfw->dmah);
+ memset(hstfw, 0, sizeof(*hstfw));
+ }
+
+ return ret;
+}
+#endif /* BCMINTERNAL */
+
+/**
+ * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
+ * is updated with the event logging partitions within that file as well.
+ *
+ * @param pfw_path Path to .bin or .bea file
+ */
+static int
+dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+ int bcmerror = BCME_ERROR;
+ int offset = 0;
+ int len = 0;
+ bool store_reset;
+ char *imgbuf = NULL; /**< XXX a file pointer, contradicting its name and type */
+ uint8 *memblock = NULL, *memptr = NULL;
+#ifdef CHECK_DOWNLOAD_FW
+ uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
+#endif
+ int offset_end = bus->ramsize;
+ uint32 file_size = 0, read_len = 0;
+
+#if defined(CACHE_FW_IMAGES)
+ int buf_offset, total_len, residual_len;
+ char * dnld_buf;
+#endif /* CACHE_FW_IMAGE */
+
+#if defined(linux) || defined(LINUX)
+#if defined(DHD_FW_MEM_CORRUPTION)
+ if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
+ dhd_tcm_test_enable = TRUE;
+ } else {
+ dhd_tcm_test_enable = FALSE;
+ }
+#endif /* DHD_FW_MEM_CORRUPTION */
+ DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
+ /* TCM check */
+ if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
+ DHD_ERROR(("dhd_bus_tcm_test failed\n"));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+#endif /* LINUX || linux */
+#ifndef DHD_EFI
+ DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+#endif /* DHD_EFI */
+
+ /* Should succeed in opening image if it is actually given through registry
+ * entry or in module param.
+ */
+ imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
+ if (imgbuf == NULL) {
+ printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
+ goto err;
+ }
+
+ file_size = dhd_os_get_image_size(imgbuf);
+ if (!file_size) {
+ DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
+ goto err;
+ }
+
+#ifdef BCMINTERNAL
+ /* dhdpcie_hybridfw_download return BCME_UNSUPPORTED if the binary
+ * doesn't have a recognizable format. Continue to previous routine
+ * in such case. Return and propagate the result for BCME_OK or
+ * other errors
+ */
+ bcmerror = dhdpcie_hybridfw_download(bus, imgbuf);
+ if (bcmerror != BCME_UNSUPPORTED) {
+ goto err;
+ }
+
+ /* Close and re-open the image file to reset the file pointer.
+ * Needed because dhdpcie_hybridfw_download() already read 4 bytes from the file.
+ */
+ dhd_os_close_image1(bus->dhd, imgbuf);
+ imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
+ if (imgbuf == NULL) {
+ goto err;
+ }
+#endif /* BCMINTERNAL */
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+#ifdef CHECK_DOWNLOAD_FW
+ if (bus->dhd->conf->fwchk) {
+ memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memptr_tmp == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ }
+#endif
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+ }
+
+ /* check if CR4/CA7 */
+ store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
+#if defined(CACHE_FW_IMAGES)
+ total_len = bus->ramsize;
+ dhd_os_close_image(imgbuf);
+ imgbuf = NULL;
+ buf_offset = 0;
+ bcmerror = dhd_get_download_buffer(bus->dhd, pfw_path, FW, &dnld_buf, &total_len);
+ if (bcmerror != BCME_OK) {
+ DHD_ERROR(("%s: dhd_get_download_buffer failed (%d)\n", __FUNCTION__, bcmerror));
+ goto err;
+ }
+ residual_len = total_len;
+#endif /* CACHE_FW_IMAGE */
+ /* Download image with MEMBLOCK size */
+#if defined(CACHE_FW_IMAGES)
+ while (residual_len)
+#else
+ /* Download image with MEMBLOCK size */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf)))
+#endif /* CACHE_FW_IMAGE */
+ {
+#if defined(CACHE_FW_IMAGES)
+ len = MIN(residual_len, MEMBLOCK);
+ memcpy(memptr, dnld_buf + buf_offset, len);
+ residual_len -= len;
+ buf_offset += len;
+#else
+ if (len < 0) {
+ DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+#endif /* CACHE_FW_IMAGE */
+
+ read_len += len;
+ if (read_len > file_size) {
+ DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
+ " file_size=%u truncating len to %d \n", __FUNCTION__,
+ len, read_len, file_size, (len - (read_len - file_size))));
+ len -= (read_len - file_size);
+ }
+
+ /* if address is 0, store the reset instruction to be written in 0 */
+ if (store_reset) {
+ ASSERT(offset == 0);
+ bus->resetinstr = *(((uint32*)memptr));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ offset_end += offset;
+ store_reset = FALSE;
+ }
+
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+#ifdef CHECK_DOWNLOAD_FW
+ if (bus->dhd->conf->fwchk) {
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+ if (memcmp(memptr_tmp, memptr, len)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted at 0x%08x\n", __FUNCTION__, offset));
+ bcmerror = BCME_ERROR;
+ goto err;
+ } else
+ DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+ }
+#endif
+ offset += MEMBLOCK;
+
+ if (offset >= offset_end) {
+ DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
+ __FUNCTION__, offset, offset_end));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+
+ if (read_len >= file_size) {
+ break;
+ }
+ }
+err:
+ if (memblock) {
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+#ifdef CHECK_DOWNLOAD_FW
+ if (memptr_tmp)
+ MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
+#endif
+ }
+
+ if (imgbuf) {
+ dhd_os_close_image1(bus->dhd, imgbuf);
+ }
+
+ return bcmerror;
+} /* dhdpcie_download_code_file */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define MIN_NVRAMVARS_SIZE 128
+#endif /* CUSTOMER_HW4_DEBUG */
+
+static int
+dhdpcie_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = BCME_ERROR;
+ uint len;
+ char * memblock = NULL;
+ char *bufp;
+ char *pnv_path;
+ bool nvram_file_exists;
+ bool nvram_uefi_exists = FALSE;
+ bool local_alloc = FALSE;
+ pnv_path = bus->nv_path;
+
+ nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+
+ /* First try UEFI */
+ len = MAX_NVRAMBUF_SIZE;
+ dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
+
+ /* If UEFI empty, then read from file system */
+ if ((len <= 0) || (memblock == NULL)) {
+
+ if (nvram_file_exists) {
+ len = MAX_NVRAMBUF_SIZE;
+ dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
+ if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
+ goto err;
+ }
+ }
+#ifdef BCM_ROUTER_DHD
+ else if (bus->nvram_params_len) {
+ memblock = MALLOCZ(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+ local_alloc = TRUE;
+ /* nvram is string with null terminated. cannot use strlen */
+ len = bus->nvram_params_len;
+ ASSERT(len <= MAX_NVRAMBUF_SIZE);
+ memcpy(memblock, bus->nvram_params, len);
+ }
+#endif /* BCM_ROUTER_DHD */
+ else {
+ /* For SROM OTP no external file or UEFI required */
+ bcmerror = BCME_OK;
+ }
+ } else {
+ nvram_uefi_exists = TRUE;
+ }
+
+ DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
+
+ if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
+ bufp = (char *) memblock;
+
+#ifdef DHD_EFI
+ dhd_insert_random_mac_addr(bus->dhd, bufp, &len);
+
+#endif /* DHD_EFI */
+
+#ifdef CACHE_FW_IMAGES
+ if (bus->processed_nvram_params_len) {
+ len = bus->processed_nvram_params_len;
+ }
+
+ if (!bus->processed_nvram_params_len) {
+ bufp[len] = 0;
+ if (nvram_uefi_exists || nvram_file_exists) {
+ len = process_nvram_vars(bufp, len);
+ bus->processed_nvram_params_len = len;
+ }
+ } else
+#else
+ {
+ bufp[len] = 0;
+ if (nvram_uefi_exists || nvram_file_exists) {
+ len = process_nvram_vars(bufp, len);
+ }
+ }
+#endif /* CACHE_FW_IMAGES */
+
+ DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
+#ifdef CUSTOMER_HW4_DEBUG
+ if (len < MIN_NVRAMVARS_SIZE) {
+ DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ if (len % 4) {
+ len += 4 - (len % 4);
+ }
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+
+err:
+ if (memblock) {
+ if (local_alloc) {
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+ } else {
+ dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
+ }
+ }
+
+ return bcmerror;
+}
+
+#if defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD)
+
+#ifdef DLIMAGE_43602a1
+#define CHIPID_43602 BCM43602_CHIP_ID
+#define CHIPID_43462 BCM43462_CHIP_ID
+#define CHIPID_43522 BCM43522_CHIP_ID
+#define CHIP_43602_CHIPREV_A0 0
+#define CHIP_43602_CHIPREV_A1 1
+#define CHIP_43602_PKG_OPT 1
+#endif
+
+#define CHIPID_NONE -1
+
+struct fd_chip_image
+{
+ unsigned char *dlarray;
+ int dlimagesize;
+ char *dlimagename;
+ char *dlimagever;
+ char *dliamgedate;
+} static chip_dl_image_array[] __initdata =
+{
+#ifdef DLIMAGE_43602a1
+ {dlarray_43602a1, sizeof(dlarray_43602a1), dlimagename_43602a1,
+ dlimagever_43602a1, dlimagedate_43602a1},
+#endif
+ /* {image attributes for other chips, only if image is compiled} */
+};
+
+enum chip_image_rev
+{
+#ifdef DLIMAGE_43602a1
+ CHIP_43602_A1_CHIP_IMAGE,
+#endif
+ /* index in the above array */
+};
+
+struct chip_image_map
+{
+ uint32 chipid;
+ uint32 chiprev;
+ uint32 chippkg;
+ uint32 image_idx;
+} static chip_image_index_map_table [] __initdata =
+{
+#ifdef DLIMAGE_43602a1
+ {CHIPID_43602, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE},
+ {CHIPID_43462, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE},
+ {CHIPID_43522, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE},
+#endif
+ /* {for a given chipid, chiprev, chippkg, what is the index (the above enum) */
+ {CHIPID_NONE, 0, 0, 0} /* CHIPID_NONE is -1, used to mark end of list */
+};
+
+static void __init select_fd_image(
+ struct dhd_bus *bus, unsigned char **p_dlarray,
+ char **p_dlimagename, char **p_dlimagever,
+ char **p_dlimagedate, int *image_size) {
+
+ uint32 chipid, chiprev, chippkg_opt;
+ int image_index;
+ struct chip_image_map *p_image_index;
+
+ chipid = 0;
+ image_index = -1;
+ p_image_index = &chip_image_index_map_table[0];
+ while (chipid != CHIPID_NONE) {
+ chipid = p_image_index->chipid;
+ chiprev = p_image_index->chiprev;
+ chippkg_opt = p_image_index->chippkg;
+
+ if ((chipid == bus->sih->chip) && (chiprev == bus->sih->chiprev) &&
+ (chippkg_opt == bus->sih->chippkg)) {
+ image_index = p_image_index->image_idx;
+ break;
+ }
+ p_image_index++;
+ }
+
+ if (image_index != -1) {
+ *p_dlarray = chip_dl_image_array[image_index].dlarray;
+ *p_dlimagename = chip_dl_image_array[image_index].dlimagename;
+ *p_dlimagever = chip_dl_image_array[image_index].dlimagever;
+ *p_dlimagedate = chip_dl_image_array[image_index].dliamgedate;
+ *image_size = chip_dl_image_array[image_index].dlimagesize;
+ } else {
+ *p_dlarray = 0;
+ DHD_ERROR(("####################################################################\n"
+ "# %s: Dongle image not available for chipid = 0x%x"
+ " chiprev = %d chippkg = %d\n"
+ "####################################################################\n",
+ __FUNCTION__, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
+ }
+}
+#endif /* defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) */
+
+#ifdef BCMEMBEDIMAGE
+int
+dhdpcie_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *p_dlarray = NULL;
+ unsigned int dlarray_size = 0;
+ unsigned int downloded_len, remaining_len, len;
+ char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+ uint8 *memblock = NULL, *memptr;
+
+ downloded_len = 0;
+ remaining_len = 0;
+ len = 0;
+
+#ifdef DHD_EFI
+ p_dlarray = rtecdc_fw_arr;
+ dlarray_size = sizeof(rtecdc_fw_arr);
+#else
+#ifndef BCM_ROUTER_DHD
+ p_dlarray = dlarray;
+ dlarray_size = sizeof(dlarray);
+ p_dlimagename = dlimagename;
+ p_dlimagever = dlimagever;
+ p_dlimagedate = dlimagedate;
+#else
+ select_fd_image(bus, &p_dlarray, &p_dlimagename, &p_dlimagever,
+ &p_dlimagedate, &dlarray_size);
+#endif /* endif for BCM_ROUTER_DHD */
+#endif /* DHD_EFI */
+
+#ifndef DHD_EFI
+ if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
+ (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0))
+ goto err;
+#endif /* DHD_EFI */
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ while (downloded_len < dlarray_size) {
+ remaining_len = dlarray_size - downloded_len;
+ if (remaining_len >= MEMBLOCK)
+ len = MEMBLOCK;
+ else
+ len = remaining_len;
+
+ memcpy(memptr, (p_dlarray + downloded_len), len);
+ /* check if CR4/CA7 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)memptr));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+ downloded_len += len;
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+ offset += MEMBLOCK;
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ unsigned char *ularray = NULL;
+ unsigned int uploded_len;
+ uploded_len = 0;
+ bcmerror = -1;
+ ularray = MALLOC(bus->dhd->osh, dlarray_size);
+ if (ularray == NULL)
+ goto upload_err;
+ /* Upload image to verify downloaded contents. */
+ offset = bus->dongle_ram_base;
+ memset(ularray, 0xaa, dlarray_size);
+ while (uploded_len < dlarray_size) {
+ remaining_len = dlarray_size - uploded_len;
+ if (remaining_len >= MEMBLOCK)
+ len = MEMBLOCK;
+ else
+ len = remaining_len;
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
+ (uint8 *)(ularray + uploded_len), len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto upload_err;
+ }
+
+ uploded_len += len;
+ offset += MEMBLOCK;
+ }
+#ifdef DHD_EFI
+ if (memcmp(p_dlarray, ularray, dlarray_size)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__));
+ goto upload_err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__));
+#else
+ if (memcmp(p_dlarray, ularray, dlarray_size)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+ goto upload_err;
+
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+#endif /* DHD_EFI */
+
+upload_err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, dlarray_size);
+ }
+#endif /* DHD_DEBUG */
+err:
+
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ return bcmerror;
+} /* dhdpcie_download_code_array */
+#endif /* BCMEMBEDIMAGE */
+
+#ifdef BCM_ROUTER_DHD
+static int
+_dhdpcie_get_nvram_params(struct dhd_bus *bus)
+{
+ int nvram_len = MAX_NVRAMBUF_SIZE;
+ int tmp_nvram_len, boardrev_str_len;
+ char *boardrev_str;
+ char *boardtype_str;
+ char *ptr;
+
+ bus->nvram_params = MALLOC(bus->dhd->osh, nvram_len);
+ if (!bus->nvram_params) {
+ DHD_ERROR(("%s: fail to get nvram buffer to download.\n", __FUNCTION__));
+ return -1;
+ }
+
+ bus->nvram_params[0] = 0;
+ ptr = bus->nvram_params;
+ /*
+ * For full dongle router platforms, we would have two dhd instances running,
+ * serving two radios, one for 5G and another for 2G. But, both dongle instances
+ * would come up as wl0, as one is not aware of the other. In order to avoid
+ * this situation, we pass the dhd instance number through nvram parameter
+ * wlunit=0 and wlunit=1 to the dongle and make sure the two dongle instances
+ * come up as wl0 and wl1.
+ */
+
+ tmp_nvram_len = strlen("wlunit=xx\n\n") + 1;
+ tmp_nvram_len =
+ snprintf(ptr, tmp_nvram_len, "wlunit=%d", dhd_get_instance(bus->dhd));
+ ptr += (tmp_nvram_len + 1); /* leave NULL */
+ tmp_nvram_len++;
+
+ if ((boardrev_str = si_getdevpathvar(bus->sih, "boardrev")) == NULL)
+ boardrev_str = nvram_get("boardrev");
+
+ boardrev_str_len = strlen("boardrev=0xXXXX") + 1;
+ boardrev_str_len = snprintf(ptr, boardrev_str_len, "boardrev=%s",
+ boardrev_str? boardrev_str : BOARDREV_PROMOTABLE_STR);
+ ptr += (boardrev_str_len + 1); /* leave NULL */
+ tmp_nvram_len += (boardrev_str_len + 1);
+
+ /* If per device boardtype is not available, use global boardtype */
+ if ((boardtype_str = si_getdevpathvar(bus->sih, "boardtype")) == NULL) {
+ if ((boardtype_str = nvram_get("boardtype")) != NULL) {
+ int boardtype_str_len = 0;
+
+ boardtype_str_len = strlen("boardtype=0xXXXX") + 1;
+ boardtype_str_len = snprintf(ptr, boardtype_str_len,
+ "boardtype=%s", boardtype_str);
+ ptr += (boardtype_str_len + 1); /* leave NULL */
+ tmp_nvram_len += (boardtype_str_len + 1);
+ }
+ }
+
+ if (dbushost_initvars_flash(bus->sih,
+ bus->osh, &ptr,
+ (nvram_len - tmp_nvram_len)) != 0) {
+ DHD_ERROR(("%s: fail to read nvram from flash.\n", __FUNCTION__));
+ }
+
+ tmp_nvram_len = (int)(ptr - bus->nvram_params);
+
+ bcopy(STR_END, ptr, sizeof(STR_END));
+ tmp_nvram_len += sizeof(STR_END);
+ bus->nvram_params_len = tmp_nvram_len;
+ return 0;
+}
+
+static void
+_dhdpcie_free_nvram_params(struct dhd_bus *bus)
+{
+ if (bus->nvram_params) {
+ MFREE(bus->dhd->osh, bus->nvram_params, MAX_NVRAMBUF_SIZE);
+ }
+}
+
+/** Handler to send a signal to the dhdmonitor process to notify of firmware traps */
+void
+dhdpcie_handle_dongle_trap(struct dhd_bus *bus)
+{
+ char *failed_if;
+
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(bus->dhd);
+
+ /* Get the failed interface name to be later used by
+ * dhd_monitor to capture the required logs
+ */
+ failed_if = dhd_ifname(bus->dhd, 0);
+ dhd_schedule_trap_log_dump(bus->dhd, (uint8 *)failed_if, strlen(failed_if));
+}
+
+#endif /* BCM_ROUTER_DHD */
+
+/**
+ * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
+ *
+ * BCMEMBEDIMAGE specific:
+ * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
+ * file will be used instead.
+ *
+ */
+static int
+_dhdpcie_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
+ return 0;
+#endif
+ }
+#ifdef BCM_ROUTER_DHD
+ if (_dhdpcie_get_nvram_params(bus) < 0) {
+ DHD_ERROR(("%s: fail to get nvram from system.\n", __FUNCTION__));
+ return 0;
+ }
+#endif
+ /* Keep arm in reset */
+ if (dhdpcie_bus_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+ if (dhdpcie_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
+ __LINE__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ } else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdpcie_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ } else {
+ dlok = TRUE;
+ }
+ }
+#else
+ BCM_REFERENCE(embed);
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
+ goto err;
+ }
+
+ /* EXAMPLE: nvram_array */
+ /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+ /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+ /* External nvram takes precedence if specified */
+ if (dhdpcie_download_nvram(bus)) {
+ DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
+ goto err;
+ }
+
+ /* Take arm out of reset */
+ if (dhdpcie_bus_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+#ifdef BCM_ROUTER_DHD
+ _dhdpcie_free_nvram_params(bus);
+#endif /* BCM_ROUTER_DHD */
+ return bcmerror;
+} /* _dhdpcie_download_firmware */
+
+static int
+dhdpcie_bus_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+ uint readlen = 0;
+ uint i = 0;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return -1;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ DHD_PCIE_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
+ }
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ DHD_PCIE_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
+ idx, c->last));
+
+ /* Read the console buffer data to a local buffer
+ * optimize and read only the portion of the buffer needed, but
+ * important to handle wrap-around. Read ptr is 'c->last',
+ * write ptr is 'idx'
+ */
+ addr = ltoh32(c->log.buf);
+
+ /* wrap around case - write ptr < read ptr */
+ if (idx < c->last) {
+ /* from read ptr to end of buffer */
+ readlen = c->bufsize - c->last;
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE,
+ addr + c->last, c->buf, readlen)) < 0) {
+ DHD_ERROR(("conlog: read error[1] ! \n"));
+ return rv;
+ }
+ /* from beginning of buffer to write ptr */
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE,
+ addr, c->buf + readlen,
+ idx)) < 0) {
+ DHD_ERROR(("conlog: read error[2] ! \n"));
+ return rv;
+ }
+ readlen += idx;
+ } else {
+ /* non-wraparound case, write ptr > read ptr */
+ readlen = (uint)idx - c->last;
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE,
+ addr + c->last, c->buf, readlen)) < 0) {
+ DHD_ERROR(("conlog: read error[3] ! \n"));
+ return rv;
+ }
+ }
+ /* update read ptr */
+ c->last = idx;
+
+ /* now output the read data from the local buffer to the host console */
+ while (i < readlen) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
+ ch = c->buf[i];
+ ++i;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+
+ return BCME_OK;
+
+} /* dhdpcie_bus_readconsole */
+
+void
+dhd_bus_dump_console_buffer(dhd_bus_t *bus)
+{
+ uint32 n, i;
+ uint32 addr;
+ char *console_buffer = NULL;
+ uint32 console_ptr, console_size, console_index;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ int rv;
+
+ DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
+ return;
+ }
+
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
+ goto exit;
+ }
+
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+ (uint8 *)&console_size, sizeof(console_size))) < 0) {
+ goto exit;
+ }
+
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+ (uint8 *)&console_index, sizeof(console_index))) < 0) {
+ goto exit;
+ }
+
+ console_ptr = ltoh32(console_ptr);
+ console_size = ltoh32(console_size);
+ console_index = ltoh32(console_index);
+
+ if (console_size > CONSOLE_BUFFER_MAX ||
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
+ goto exit;
+ }
+
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
+ (uint8 *)console_buffer, console_size)) < 0) {
+ goto exit;
+ }
+
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ ch = console_buffer[(console_index + i + n) % console_size];
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ /* Don't use DHD_ERROR macro since we print
+ * a lot of information quickly. The macro
+ * will truncate a lot of the printfs
+ */
+
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+
+exit:
+ if (console_buffer)
+ MFREE(bus->dhd->osh, console_buffer, console_size);
+ return;
+}
+
+static void
+dhdpcie_schedule_log_dump(dhd_bus_t *bus)
+{
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
+ log_dump_type_t *flush_type;
+
+ /* flush_type is freed at do_dhd_log_dump function */
+ flush_type = MALLOCZ(bus->dhd->osh, sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = DLD_BUF_TYPE_ALL;
+ dhd_schedule_log_dump(bus->dhd, flush_type);
+ } else {
+ DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
+ }
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
+}
+
+/**
+ * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
+ *
+ * @return BCME_OK on success
+ */
+static int
+dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
+ struct bcmstrbuf strbuf;
+ unsigned long flags;
+ bool dongle_trap_occured = FALSE;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (DHD_NOCHECKDIED_ON()) {
+ return 0;
+ }
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done2;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done2;
+ }
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
+ goto done1;
+ }
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
+
+ if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (bus->pcie_sh->assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ bus->pcie_sh->assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0) {
+ goto done1;
+ }
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (bus->pcie_sh->assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ bus->pcie_sh->assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0) {
+ goto done1;
+ }
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
+ }
+
+ if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
+ trap_t *tr = &bus->dhd->last_trap_info;
+ dongle_trap_occured = TRUE;
+ if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
+ bus->dhd->dongle_trap_occured = TRUE;
+ goto done1;
+ }
+ dhd_bus_dump_trap_info(bus, &strbuf);
+ }
+ }
+
+ if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
+ printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
+#ifdef REPORT_FATAL_TIMEOUTS
+ /**
+ * stop the timers as FW trapped
+ */
+ if (dhd_stop_scan_timer(bus->dhd, FALSE, 0)) {
+ DHD_ERROR(("dhd_stop_scan_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_bus_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_bus_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_cmd_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_join_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_join_timer failed\n"));
+ ASSERT(0);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+ /* wake up IOCTL wait event */
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
+
+ dhd_bus_dump_console_buffer(bus);
+ dhd_prot_debug_info_print(bus->dhd);
+
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump or write to a file */
+ if (bus->dhd->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+ DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
+ bus->dhd->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+#ifdef DHD_SDTC_ETB_DUMP
+ DHD_ERROR(("%s : Set collect_sdtc as TRUE\n", __FUNCTION__));
+ bus->dhd->collect_sdtc = TRUE;
+#endif /* DHD_SDTC_ETB_DUMP */
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+
+ /* set the trap occured flag only after all the memdump,
+ * logdump and sssr dump collection has been scheduled
+ */
+ if (dongle_trap_occured) {
+ bus->dhd->dongle_trap_occured = TRUE;
+ if (bus->dhd->check_trap_rot &&
+ bus->dhd->ext_trap_data_supported &&
+ bus->pcie_sh->flags2 & PCIE_SHARED2_ETD_ADDR_SUPPORT) {
+ uint32 trap_data = *(uint32 *)bus->dhd->extended_trap_data;
+ DHD_ERROR(("%s : etd data : %x\n", __FUNCTION__, trap_data));
+ if (!(trap_data & D2H_DEV_EXT_TRAP_DATA)) {
+ uint32 *ext_data = bus->dhd->extended_trap_data;
+ /* Skip the first word which is trap_data */
+ ext_data++;
+ DHD_ERROR(("Dongle trap but no etd\n"));
+ if (dhdpcie_bus_membytes(bus, FALSE,
+ local_pciedev_shared->etd_addr,
+ (uint8 *)ext_data,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN -
+ sizeof(trap_data)) < 0) {
+ DHD_ERROR(("Error to read etd from dongle\n"));
+ }
+ } else {
+ DHD_ERROR(("Dongle trap with etd\n"));
+ }
+ }
+
+ }
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ copy_hang_info_trap(bus->dhd);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+ dhd_schedule_reset(bus->dhd);
+
+#ifdef NDIS
+ /* ASSERT only if hang detection/recovery is disabled. If enabled then let
+ * windows HDR mechansim trigger FW download via surprise removal
+ */
+ dhd_bus_check_died(bus);
+#endif
+
+ }
+
+done1:
+ if (bcmerror) {
+ /* dhdpcie_checkdied is invoked only when dongle has trapped
+ * or after PCIe link down..etc. so set dongle_trap_occured so that
+ * log_dump logic can rely on only one flag dongle_trap_occured.
+ */
+ bus->dhd->dongle_trap_occured = TRUE;
+ dhdpcie_schedule_log_dump(bus);
+ }
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+done2:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+
+ return bcmerror;
+} /* dhdpcie_checkdied */
+
+/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
+void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ uint8 *databuf = buf;
+
+ if (bus == NULL) {
+ return;
+ }
+
+ start = bus->dongle_ram_base;
+ read_size = 4;
+ /* check for dead bus */
+ {
+ uint test_word = 0;
+ ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
+ /* if read error or bus timeout */
+ if (ret || (test_word == 0xFFFFFFFF)) {
+ return;
+ }
+ }
+
+ /* Get full mem size */
+ size = bus->ramsize;
+ /* Read mem content */
+ while (size)
+ {
+ read_size = MIN(MEMBLOCK, size);
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
+ return;
+ }
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
+ }
+ bus->dhd->soc_ram = buf;
+ bus->dhd->soc_ram_length = bus->ramsize;
+ return;
+}
+
+#if defined(DHD_FW_COREDUMP)
+static int
+dhdpcie_get_mem_dump(dhd_bus_t *bus)
+{
+ int ret = BCME_OK;
+ int size = 0;
+ int start = 0;
+ int read_size = 0; /* Read size of each iteration */
+ uint8 *p_buf = NULL, *databuf = NULL;
+#ifdef BOARD_HIKEY
+ unsigned long flags_bus;
+#endif /* BOARD_HIKEY */
+
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (!bus->dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ size = bus->ramsize; /* Full mem size */
+ start = bus->dongle_ram_base; /* Start address */
+
+ /* Get full mem size */
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!p_buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
+ __FUNCTION__, size));
+ return BCME_ERROR;
+ }
+
+ /* Read mem content */
+ DHD_TRACE_HW4(("Dump dongle memory\n"));
+ databuf = p_buf;
+
+ while (size > 0) {
+ read_size = MIN(MEMBLOCK, size);
+#ifdef BOARD_HIKEY
+ /* Hold BUS_LP_STATE_LOCK to avoid simultaneous bus access */
+ DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
+#endif /* BOARD_HIKEY */
+ ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
+#ifdef BOARD_HIKEY
+ DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
+#endif /* BOARD_HIKEY */
+ if (ret) {
+ DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
+#ifdef DHD_DEBUG_UART
+ bus->dhd->memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
+ break;
+ }
+ DHD_TRACE(("."));
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
+ }
+ return ret;
+}
+
+static int
+dhdpcie_mem_dump(dhd_bus_t *bus)
+{
+ dhd_pub_t *dhdp;
+ int ret;
+ uint32 dhd_console_ms_prev = 0;
+
+ dhdp = bus->dhd;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd_console_ms_prev = dhdp->dhd_console_ms;
+ if (dhd_console_ms_prev) {
+ DHD_ERROR(("%s: Disabling console msgs(0x%d) before mem dump to local buf\n",
+ __FUNCTION__, dhd_console_ms_prev));
+ dhdp->dhd_console_ms = 0;
+ }
+#ifdef EXYNOS_PCIE_DEBUG
+ exynos_pcie_register_dump(1);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
+ /* panic only for DUMP_MEMFILE_BUGON */
+ ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
+ ret = BCME_ERROR;
+ goto exit;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* Induce DB7 trap for below non-trap cases */
+ switch (dhdp->memdump_type) {
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT:
+ /* intentional fall through */
+ case DUMP_TYPE_D3_ACK_TIMEOUT:
+ /* intentional fall through */
+ case DUMP_TYPE_PKTID_AUDIT_FAILURE:
+ /* intentional fall through */
+ case DUMP_TYPE_PKTID_INVALID:
+ /* intentional fall through */
+ case DUMP_TYPE_SCAN_TIMEOUT:
+ /* intentional fall through */
+ case DUMP_TYPE_SCAN_BUSY:
+ /* intentional fall through */
+ case DUMP_TYPE_BY_LIVELOCK:
+ /* intentional fall through */
+ case DUMP_TYPE_IFACE_OP_FAILURE:
+ /* intentional fall through */
+ case DUMP_TYPE_PKTID_POOL_DEPLETED:
+ /* intentional fall through */
+ case DUMP_TYPE_ESCAN_SYNCID_MISMATCH:
+ /* intentional fall through */
+ case DUMP_TYPE_INVALID_SHINFO_NRFRAGS:
+ if (dhdp->db7_trap.fw_db7w_trap) {
+ /* Set fw_db7w_trap_inprogress here and clear from DPC */
+ dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE;
+ dhdpcie_fw_trap(dhdp->bus);
+ OSL_DELAY(100 * 1000); // wait 100 msec
+ } else {
+ DHD_ERROR(("%s: DB7 Not supported!!!\n",
+ __FUNCTION__));
+ }
+ break;
+ default:
+ break;
+ }
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
+ return BCME_ERROR;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ ret = dhdpcie_get_mem_dump(bus);
+ if (ret) {
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
+ __FUNCTION__, ret));
+ goto exit;
+ }
+#ifdef DHD_DEBUG_UART
+ bus->dhd->memdump_success = TRUE;
+#endif /* DHD_DEBUG_UART */
+
+#ifdef BCMINTERNAL
+ /* TODO: for host offload firmware, need to modify the stack and pc/lr to point it back to
+ * the original offset so gdb can match with symbol files
+ */
+#endif
+
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+ /* buf, actually soc_ram free handled in dhd_{free,clear} */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+exit:
+ if (dhd_console_ms_prev) {
+ DHD_ERROR(("%s: enable console msgs(0x%d) after collecting memdump to local buf\n",
+ __FUNCTION__, dhd_console_ms_prev));
+ dhdp->dhd_console_ms = dhd_console_ms_prev;
+ }
+ return ret;
+}
+
+int
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
+{
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_get_mem_dump(dhdp->bus);
+}
+
+int
+dhd_bus_mem_dump(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int ret = BCME_ERROR;
+
+ if (dhdp->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s bus is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Try to resume if already suspended or suspend in progress */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+ return BCME_ERROR;
+ }
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ ret = dhdpcie_mem_dump(bus);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ return ret;
+}
+#endif /* DHD_FW_COREDUMP */
+
+int
+dhd_socram_dump(dhd_bus_t *bus)
+{
+#if defined(DHD_FW_COREDUMP)
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ dhd_bus_mem_dump(bus->dhd);
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+/**
+ * Transfers bytes from host to dongle using pio mode.
+ * Parameter 'address' is a backplane address.
+ */
+static int
+dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
+{
+ uint dsize;
+ int detect_endian_flag = 0x01;
+ bool little_endian;
+
+ if (write && bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ /* Detect endianness. */
+ little_endian = *(char *)&detect_endian_flag;
+
+ /* In remap mode, adjust address beyond socram and redirect
+ * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+ * is not backplane accessible
+ */
+
+ /* Determine initial transfer parameters */
+#ifdef DHD_SUPPORT_64BIT
+ dsize = sizeof(uint64);
+#else /* !DHD_SUPPORT_64BIT */
+ dsize = sizeof(uint32);
+#endif /* DHD_SUPPORT_64BIT */
+
+ /* Do the transfer(s) */
+ DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
+ __FUNCTION__, (write ? "write" : "read"), size, address));
+ if (write) {
+ while (size) {
+#ifdef DHD_SUPPORT_64BIT
+ if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
+ dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
+ }
+#else /* !DHD_SUPPORT_64BIT */
+ if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
+ dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
+ }
+#endif /* DHD_SUPPORT_64BIT */
+ else {
+ dsize = sizeof(uint8);
+ dhdpcie_bus_wtcm8(bus, address, *data);
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ }
+ }
+ } else {
+ while (size) {
+#ifdef DHD_SUPPORT_64BIT
+ if (size >= sizeof(uint64) && little_endian && !(address % 8))
+ {
+ *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
+ }
+#else /* !DHD_SUPPORT_64BIT */
+ if (size >= sizeof(uint32) && little_endian && !(address % 4))
+ {
+ *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
+ }
+#endif /* DHD_SUPPORT_64BIT */
+ else {
+ dsize = sizeof(uint8);
+ *data = dhdpcie_bus_rtcm8(bus, address);
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize) > 0) {
+ data += dsize;
+ address += dsize;
+ }
+ }
+ }
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+ return BCME_OK;
+} /* dhdpcie_bus_membytes */
+
+extern bool agg_h2d_db_enab;
+/**
+ * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
+ * to the (non flow controlled) flow ring.
+ */
+int
+BCMFASTPATH(dhd_bus_schedule_queue)(struct dhd_bus *bus, uint16 flow_id, bool txs)
+/** XXX function name could be more descriptive, eg use 'tx' and 'flow ring' in name */
+{
+ flow_ring_node_t *flow_ring_node;
+ int ret = BCME_OK;
+#ifdef DHD_LOSSLESS_ROAMING
+ dhd_pub_t *dhdp = bus->dhd;
+#endif
+
+ DHD_PCIE_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+
+ /* ASSERT on flow_id */
+ if (flow_id >= bus->max_submission_rings) {
+ DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
+ flow_id, bus->max_submission_rings));
+ return 0;
+ }
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+
+ if (flow_ring_node->prot_info == NULL) {
+ DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
+ return BCME_NOTREADY;
+ }
+
+#ifdef DHD_LOSSLESS_ROAMING
+ if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
+ DHD_ERROR_RLMT(("%s: roam in progress, tid %d is not in precedence map 0x%x."
+ " block scheduling\n",
+ __FUNCTION__, flow_ring_node->flow_info.tid, dhdp->dequeue_prec_map));
+ return BCME_OK;
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ {
+ unsigned long flags;
+ void *txp = NULL;
+ flow_queue_t *queue;
+#ifdef TPUT_MONITOR
+ int pktlen;
+#endif
+
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ return BCME_NOTREADY;
+ }
+
+ while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ if (bus->dhd->conf->orphan_move <= 1)
+ PKTORPHAN(txp, bus->dhd->conf->tsq);
+
+ /*
+ * Modifying the packet length caused P2P cert failures.
+ * Specifically on test cases where a packet of size 52 bytes
+ * was injected, the sniffer capture showed 62 bytes because of
+ * which the cert tests failed. So making the below change
+ * only Router specific.
+ */
+#if defined(BCM_ROUTER_DHD)
+ if (PKTLEN(bus->dhd->osh, txp) < (ETHER_MIN_LEN - ETHER_CRC_LEN)) {
+ PKTSETLEN(bus->dhd->osh, txp, (ETHER_MIN_LEN - ETHER_CRC_LEN));
+ }
+#endif /* BCM_ROUTER_DHD */
+
+#ifdef DHDTCPACK_SUPPRESS
+ if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+ ret = dhd_tcpack_check_xmit(bus->dhd, txp);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
+ __FUNCTION__));
+ }
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Attempt to transfer packet over flow ring */
+ /* XXX: ifidx is wrong */
+#ifdef TPUT_MONITOR
+ pktlen = PKTLEN(OSH_NULL, txp);
+ if ((bus->dhd->conf->data_drop_mode == TXPKT_DROP) && (pktlen > 500))
+ ret = BCME_OK;
+ else
+#endif
+ ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
+ if (ret != BCME_OK) { /* may not have resources in flow ring */
+ DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
+#ifdef AGG_H2D_DB
+ if (agg_h2d_db_enab) {
+ dhd_prot_schedule_aggregate_h2d_db(bus->dhd, flow_id);
+ } else
+#endif /* AGG_H2D_DB */
+ {
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
+ }
+ /* reinsert at head */
+ dhd_flow_queue_reinsert(bus->dhd, queue, txp);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* If we are able to requeue back, return success */
+ return BCME_OK;
+ }
+
+#ifdef DHD_MEM_STATS
+ DHD_MEM_STATS_LOCK(bus->dhd->mem_stats_lock, flags);
+ bus->dhd->txpath_mem += PKTLEN(bus->dhd->osh, txp);
+ DHD_PCIE_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
+ __FUNCTION__, bus->dhd->txpath_mem, PKTLEN(bus->dhd->osh, txp)));
+ DHD_MEM_STATS_UNLOCK(bus->dhd->mem_stats_lock, flags);
+#endif /* DHD_MEM_STATS */
+ }
+
+#ifdef DHD_HP2P
+ if (!flow_ring_node->hp2p_ring)
+#endif /* DHD_HP2P */
+ {
+#ifdef AGG_H2D_DB
+ if (agg_h2d_db_enab) {
+ dhd_prot_schedule_aggregate_h2d_db(bus->dhd, flow_id);
+ } else
+#endif /* AGG_H2D_DB */
+ {
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
+ }
+ }
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ }
+
+ return ret;
+} /* dhd_bus_schedule_queue */
+
+/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
+int
+BCMFASTPATH(dhd_bus_txdata)(struct dhd_bus *bus, void *txp, uint8 ifidx)
+{
+ uint16 flowid;
+#ifdef IDLE_TX_FLOW_MGMT
+ uint8 node_status;
+#endif /* IDLE_TX_FLOW_MGMT */
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+ int ret = BCME_OK;
+ void *txp_pend = NULL;
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ void *ntxp = NULL;
+ uint8 prio = PKTPRIO(txp);
+#endif
+
+ if (!bus->dhd->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ goto toss;
+ }
+
+ flowid = DHD_PKT_GET_FLOWID(txp);
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+
+ DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ if ((flowid > bus->dhd->max_tx_flowid) ||
+#ifdef IDLE_TX_FLOW_MGMT
+ (!flow_ring_node->active))
+#else
+ (!flow_ring_node->active) ||
+ (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
+ (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
+#endif /* IDLE_TX_FLOW_MGMT */
+ {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ ret = BCME_ERROR;
+ goto toss;
+ }
+
+#ifdef IDLE_TX_FLOW_MGMT
+ node_status = flow_ring_node->status;
+
+ /* handle diffrent status states here!! */
+ switch (node_status)
+ {
+ case FLOW_RING_STATUS_OPEN:
+
+ if (bus->enable_idle_flowring_mgmt) {
+ /* Move the node to the head of active list */
+ dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
+ }
+ break;
+
+ case FLOW_RING_STATUS_SUSPENDED:
+ DHD_INFO(("Need to Initiate TX Flow resume\n"));
+ /* Issue resume_ring request */
+ dhd_bus_flow_ring_resume_request(bus,
+ flow_ring_node);
+ break;
+
+ case FLOW_RING_STATUS_CREATE_PENDING:
+ case FLOW_RING_STATUS_RESUME_PENDING:
+ /* Dont do anything here!! */
+ DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
+ node_status));
+ break;
+
+ case FLOW_RING_STATUS_DELETE_PENDING:
+ default:
+ DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
+ flowid, node_status));
+ /* error here!! */
+ ret = BCME_ERROR;
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ goto toss;
+ }
+ /* Now queue the packet */
+#endif /* IDLE_TX_FLOW_MGMT */
+
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ FOREACH_CHAINED_PKT(txp, ntxp) {
+ /* Tag the packet with flowid - Remember, only the head packet */
+ /* of the chain has been tagged with the FlowID in dhd_sendpkt */
+ /* Also set the priority */
+ DHD_PKT_SET_FLOWID(txp, flowid);
+ PKTSETPRIO(txp, prio);
+
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
+ txp_pend = txp;
+ PKTSETCLINK((txp), ntxp);
+ break;
+ }
+ }
+#else /* !(defined(BCM_ROUTER_DHD) && defined(HNDCTF)) */
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
+ txp_pend = txp;
+#endif /* defined(BCM_ROUTER_DHD) && defined(HNDCTF */
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ if (flow_ring_node->status) {
+ DHD_PCIE_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ if (txp_pend) {
+ txp = txp_pend;
+ goto toss;
+ }
+ return BCME_OK;
+ }
+ ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
+
+ /* If we have anything pending, try to push into q */
+ if (txp_pend) {
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
+ FOREACH_CHAINED_PKT(txp_pend, ntxp) {
+ /* Tag the packet with flowid and set packet priority */
+ DHD_PKT_SET_FLOWID(txp_pend, flowid);
+ PKTSETPRIO(txp_pend, prio);
+
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend))
+ != BCME_OK) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ PKTSETCLINK((txp_pend), ntxp);
+ txp = txp_pend;
+ goto toss;
+ }
+ }
+#else /* !(defined(BCM_ROUTER_DHD) && defined(HNDCTF)) */
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ txp = txp_pend;
+ goto toss;
+ }
+#endif /* defined(BCM_ROUTER_DHD) && defined(HNDCTF) */
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ }
+
+ return ret;
+
+toss:
+ DHD_PCIE_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
+#ifdef DHD_EFI
+ /* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt
+ * into the Tx done queue
+ */
+ PKTCFREE(bus->dhd->osh, txp, FALSE);
+#else
+ PKTCFREE(bus->dhd->osh, txp, TRUE);
+#endif /* DHD_EFI */
+ return ret;
+} /* dhd_bus_txdata */
+
+void
+dhd_bus_stop_queue(struct dhd_bus *bus)
+{
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+}
+
+void
+dhd_bus_start_queue(struct dhd_bus *bus)
+{
+ /*
+ * Tx queue has been stopped due to resource shortage (or)
+ * bus is not in a state to turn on.
+ *
+ * Note that we try to re-start network interface only
+ * when we have enough resources, one has to first change the
+ * flag indicating we have all the resources.
+ */
+ if (dhd_prot_check_tx_resource(bus->dhd)) {
+ DHD_ERROR(("%s: Interface NOT started, previously stopped "
+ "due to resource shortage\n", __FUNCTION__));
+ return;
+ }
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+}
+
+/* Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhd->bus;
+ uint32 addr, val;
+ int rv;
+#ifdef PCIE_INB_DW
+ unsigned long flags = 0;
+#endif /* PCIE_INB_DW */
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ return BCME_NOTREADY;
+ }
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+ /* handle difference in definition of hnd_log_t in certain branches */
+ if (dhd->wlc_ver_major < 14) {
+ addr -= (uint32)sizeof(uint32);
+ }
+ val = htol32(0);
+ if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+ /* handle difference in definition of hnd_log_t in certain branches */
+ if (dhd->wlc_ver_major < 14) {
+ addr -= sizeof(uint32);
+ }
+ if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+#ifdef PCIE_INB_DW
+ /* Use a lock to ensure this tx DEVICE_WAKE + tx H2D_HOST_CONS_INT sequence is
+ * mutually exclusive with the rx D2H_DEV_DS_ENTER_REQ + tx H2D_HOST_DS_ACK sequence.
+ */
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+#endif /* PCIE_INB_DW */
+
+ /* generate an interrupt to dongle to indicate that it needs to process cons command */
+ dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
+
+#ifdef PCIE_INB_DW
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+#endif /* PCIE_INB_DW */
+done:
+ return rv;
+} /* dhd_bus_console_in */
+
+/**
+ * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
+ * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
+ */
+void
+BCMFASTPATH(dhd_bus_rx_frame)(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
+{
+ dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
+}
+
+/* Aquire/Release bar1_switch_lock only if the chip supports bar1 switching */
+#define DHD_BUS_BAR1_SWITCH_LOCK(bus, flags) \
+ ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_LOCK((bus)->bar1_switch_lock, flags) : \
+ BCM_REFERENCE(flags)
+
+#define DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags) \
+ ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_UNLOCK((bus)->bar1_switch_lock, flags) : \
+ BCM_REFERENCE(flags)
+
+/* Init/Deinit bar1_switch_lock only if the chip supports bar1 switching */
+static void
+dhd_init_bar1_switch_lock(dhd_bus_t *bus)
+{
+ if (bus->bar1_switch_enab && !bus->bar1_switch_lock) {
+ bus->bar1_switch_lock = osl_spin_lock_init(bus->osh);
+ }
+}
+
+static void
+dhd_deinit_bar1_switch_lock(dhd_bus_t *bus)
+{
+ if (bus->bar1_switch_enab && bus->bar1_switch_lock) {
+ osl_spin_lock_deinit(bus->osh, bus->bar1_switch_lock);
+ bus->bar1_switch_lock = NULL;
+ }
+}
+
+/*
+ * The bpwindow for any address will be lower bound of multiples of bar1_size.
+ * For eg, if addr=0x938fff and bar1_size is 0x400000, then
+ * address will fall in the window of 0x800000-0xbfffff, so need
+ * to select bpwindow as 0x800000.
+ * To achieve this mask the LSB nibbles of bar1_size of the given addr.
+ */
+#define DHD_BUS_BAR1_BPWIN(addr, bar1_size) \
+ (uint32)((addr) & ~((bar1_size) - 1))
+
+/**
+ * dhdpcie_bar1_window_switch_enab
+ *
+ * Check if the chip requires BAR1 window switching based on
+ * dongle_ram_base, ramsize and mapped bar1_size and sets
+ * bus->bar1_switch_enab accordingly
+ * @bus: dhd bus context
+ *
+ */
+void
+dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus)
+{
+ uint32 ramstart = bus->dongle_ram_base;
+ uint32 ramend = bus->dongle_ram_base + bus->ramsize - 1;
+ uint32 bpwinstart = DHD_BUS_BAR1_BPWIN(ramstart, bus->bar1_size);
+ uint32 bpwinend = DHD_BUS_BAR1_BPWIN(ramend, bus->bar1_size);
+
+ bus->bar1_switch_enab = FALSE;
+
+ /*
+ * Window switch is needed to access complete BAR1
+ * if bpwinstart and bpwinend are different
+ */
+ if (bpwinstart != bpwinend) {
+ bus->bar1_switch_enab = TRUE;
+ }
+
+ DHD_ERROR(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n",
+ __FUNCTION__, bus->bar1_switch_enab, ramstart, ramend, bus->bar1_size));
+}
+
+/**
+ * dhdpcie_setbar1win
+ *
+ * os independendent function for setting bar1 window in order to allow
+ * also set current window positon.
+ *
+ * @bus: dhd bus context
+ * @addr: new backplane windows address for BAR1
+ */
+static void
+dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
+{
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, addr);
+ bus->curr_bar1_win = addr;
+}
+
+/**
+ * dhdpcie_bus_chkandshift_bpoffset
+ *
+ * Check the provided address is within the current BAR1 window,
+ * if not, shift the window
+ *
+ * @bus: dhd bus context
+ * @offset: back plane address that the caller wants to access
+ *
+ * Return: new offset for access
+ */
+static ulong
+dhdpcie_bus_chkandshift_bpoffset(dhd_bus_t *bus, ulong offset)
+{
+
+ uint32 bpwin;
+#ifdef DHD_EFI
+ /* TODO: bar1_size is hardcoded for EFI. Below logic should be
+ * revisited. Also EFI platform should find bar1_size from
+ * EFI Kernel APIs
+ */
+ if (!bus->bar1_switch_enab) {
+ return offset;
+ }
+#endif /* DHD_EFI */
+ /* Determine BAR1 backplane window using window size
+ * Window address mask should be ~(size - 1)
+ */
+ bpwin = DHD_BUS_BAR1_BPWIN(offset, bus->bar1_size);
+
+ if (bpwin != bus->curr_bar1_win) {
+ DHD_PCIE_INFO(("%s: move BAR1 window curr_bar1_win=0x%x bpwin=0x%x offset=0x%lx\n",
+ __FUNCTION__, bus->curr_bar1_win, bpwin, offset));
+ /* Move BAR1 window */
+ dhdpcie_setbar1win(bus, bpwin);
+ }
+
+ return offset - bpwin;
+}
+
+/** 'offset' is a backplane address */
+void
+dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if defined(linux) || defined(LINUX)
+ W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
+#else
+ *(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+}
+
+void
+dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if defined(linux) || defined(LINUX)
+ W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
+#else
+ *(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+}
+
+void
+dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if defined(linux) || defined(LINUX)
+ W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
+#else
+ *(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+}
+
+#ifdef DHD_SUPPORT_64BIT
+void
+dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if defined(linux) || defined(LINUX)
+ W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
+#else
+ *(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+}
+#endif /* DHD_SUPPORT_64BIT */
+
+uint8
+dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint8 data;
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint8)-1;
+ return data;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if defined(linux) || defined(LINUX)
+ data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint8 *)(bus->tcm + offset);
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+ return data;
+}
+
+uint16
+dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint16 data;
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint16)-1;
+ return data;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if (defined(linux) || defined (LINUX))
+ data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint16 *)(bus->tcm + offset);
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+ return data;
+}
+
+uint32
+dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint32 data;
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint32)-1;
+ return data;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if (defined(linux) || defined (LINUX))
+ data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint32 *)(bus->tcm + offset);
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+ return data;
+}
+
+#ifdef DHD_SUPPORT_64BIT
+uint64
+dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint64 data;
+ ulong flags = 0;
+
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint64)-1;
+ return data;
+ }
+
+ DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
+
+ offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
+
+#if (defined(linux) || defined (LINUX))
+ data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint64 *)(bus->tcm + offset);
+#endif /* linux || LINUX */
+
+ DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
+ return data;
+}
+#endif /* DHD_SUPPORT_64BIT */
+
+void
+dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(dhd_bus_t *bus, void *data, uint8 type,
+ uint16 ringid, bool read, bool req_pwr)
+{
+ ulong addr;
+
+ if (type == RING_WR_UPD) {
+ addr = bus->ring_sh[ringid].ring_state_w;
+ } else if (type == RING_RD_UPD) {
+ addr = bus->ring_sh[ringid].ring_state_r;
+ } else {
+ DHD_ERROR(("%s: invalid type:%d\n", __FUNCTION__, type));
+ return;
+ }
+
+ if (req_pwr && MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ if (read) {
+ /* Read */
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ } else {
+ /* Write */
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ }
+
+ if (req_pwr && MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+}
+
+void
+dhdpcie_update_ring_ptrs_in_tcm(dhd_bus_t *bus, void *data, uint8 type, uint16 ringid,
+ bool read)
+{
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ ulong flags_ds;
+ DHD_BUS_DONGLE_DS_LOCK(bus->dongle_ds_lock, flags_ds);
+ dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(bus, data, type, ringid, read,
+ bus->dongle_in_deepsleep);
+ DHD_BUS_DONGLE_DS_UNLOCK(bus->dongle_ds_lock, flags_ds);
+ } else
+#endif /* PCIE_INB_DW */
+ {
+ /* Request power explicitly */
+ dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(bus, data, type, ringid, read, TRUE);
+ }
+}
+
+/** A snippet of dongle memory is shared between host and dongle */
+void
+dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
+{
+ uint64 long_data;
+ ulong addr; /* dongle address */
+
+ DHD_PCIE_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ /*
+ * Use explicit tcm ring ptr update functions when DMA indices are not enabled to
+ * as backplane power request calls are causing TPUT drops
+ */
+ if (!(bus->dhd->dma_d2h_ring_upd_support || bus->dhd->dma_h2d_ring_upd_support)) {
+ if ((type == RING_WR_UPD) || (type == RING_RD_UPD)) {
+ dhdpcie_update_ring_ptrs_in_tcm(bus, data, type, ringid, FALSE);
+ return;
+ }
+ }
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ switch (type) {
+ case RING_WR_UPD :
+ addr = bus->ring_sh[ringid].ring_state_w;
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case RING_RD_UPD :
+ addr = bus->ring_sh[ringid].ring_state_r;
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case D2H_DMA_SCRATCH_BUF:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
+ long_data = HTOL64(*(uint64 *)data);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case D2H_DMA_SCRATCH_BUF_LEN :
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case H2D_DMA_INDX_WR_BUF:
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case H2D_DMA_INDX_RD_BUF:
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case D2H_DMA_INDX_WR_BUF:
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case D2H_DMA_INDX_RD_BUF:
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case H2D_IFRM_INDX_WR_BUF:
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case RING_ITEM_LEN :
+ addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case RING_MAX_ITEMS :
+ addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case RING_BUF_ADDR :
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ prhex(__FUNCTION__, data, len);
+ }
+ break;
+
+ case D2H_MB_DATA:
+ addr = bus->d2h_mb_data_ptr_addr;
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ break;
+
+ case H2D_MB_DATA:
+ addr = bus->h2d_mb_data_ptr_addr;
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ break;
+
+ case HOST_API_VERSION:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ break;
+
+ case DNGL_TO_HOST_TRAP_ADDR:
+ long_data = HTOL64(*(uint64 *)data);
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
+ DHD_PCIE_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
+ break;
+
+#ifdef D2H_MINIDUMP
+ case DNGL_TO_HOST_TRAP_ADDR_LEN:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, device_trap_debug_buffer_len);
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ break;
+#endif /* D2H_MINIDUMP */
+
+ case HOST_SCB_ADDR:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
+#ifdef DHD_SUPPORT_64BIT
+ dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
+#else /* !DHD_SUPPORT_64BIT */
+ dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
+#endif /* DHD_SUPPORT_64BIT */
+ DHD_PCIE_INFO(("Wrote host_scb_addr:0x%x\n",
+ (uint32) HTOL32(*(uint32 *)data)));
+ break;
+
+ default:
+ break;
+ }
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+} /* dhd_bus_cmn_writeshared */
+
+/** A snippet of dongle memory is shared between host and dongle */
+void
+dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
+{
+ ulong addr; /* dongle address */
+
+ /*
+ * Use explicit tcm ring ptr update functions when DMA indices are not enabled to
+ * as backplane power request calls are causing TPUT drops
+ */
+ if (!(bus->dhd->dma_d2h_ring_upd_support || bus->dhd->dma_h2d_ring_upd_support)) {
+ if ((type == RING_WR_UPD) || (type == RING_RD_UPD)) {
+ dhdpcie_update_ring_ptrs_in_tcm(bus, data, type, ringid, TRUE);
+ return;
+ }
+ }
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ switch (type) {
+ case RING_WR_UPD :
+ addr = bus->ring_sh[ringid].ring_state_w;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ break;
+
+ case RING_RD_UPD :
+ addr = bus->ring_sh[ringid].ring_state_r;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ break;
+
+ case TOTAL_LFRAG_PACKET_CNT :
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ break;
+
+ case H2D_MB_DATA:
+ addr = bus->h2d_mb_data_ptr_addr;
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ break;
+
+ case D2H_MB_DATA:
+ addr = bus->d2h_mb_data_ptr_addr;
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ break;
+
+ case MAX_HOST_RXBUFS :
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ break;
+
+ case HOST_SCB_ADDR:
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ break;
+
+ default :
+ break;
+ }
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+}
+
+uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
+{
+ return ((pciedev_shared_t*)bus->pcie_sh)->flags;
+}
+
+void dhd_prot_clearcounts(dhd_pub_t *dhd);
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+ dhd_prot_clearcounts(dhdp);
+}
+
+/**
+ * @param params input buffer, NULL for 'set' operation.
+ * @param plen length of 'params' buffer, 0 for 'set' operation.
+ * @param arg output buffer
+ */
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, uint plen, void *arg, uint len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = BCME_UNSUPPORTED;
+ uint val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ if (!name)
+ return BCME_BADARG;
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+ if (!(set || (arg && len)))
+ return BCME_BADARG;
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+ if (!(!set || (!params && !plen)))
+ return BCME_BADARG;
+
+ DHD_PCIE_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
+ goto exit;
+ }
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
+ DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
+ } else {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ }
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ /* In DEVRESET_QUIESCE/DEVRESET_ON,
+ * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
+ * causes pwr_req_ref count miss-match in pwr req clear function and hang.
+ * In this case, bypass pwr req clear.
+ */
+ if (bcmerror == BCME_DNGL_DEVRESET) {
+ bcmerror = BCME_OK;
+ } else {
+ if (MULTIBP_ENAB(bus->sih)) {
+ if (vi != NULL) {
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
+ DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
+ } else {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+ }
+ }
+ }
+ return bcmerror;
+} /* dhd_bus_iovar_op */
+
+#ifdef BCM_BUZZZ
+#include <bcm_buzzz.h>
+
+int
+dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
+ const int num_counters)
+{
+ int bytes = 0;
+ uint32 ctr;
+ uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
+ uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
+
+ /* Compute elapsed counter values per counter event type */
+ for (ctr = 0U; ctr < num_counters; ctr++) {
+ prev[ctr] = core[ctr];
+ curr[ctr] = *log++;
+ core[ctr] = curr[ctr]; /* saved for next log */
+
+ if (curr[ctr] < prev[ctr])
+ delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
+ else
+ delta[ctr] = (curr[ctr] - prev[ctr]);
+
+ bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
+ }
+
+ return bytes;
+}
+
+typedef union cm3_cnts { /* export this in bcm_buzzz.h */
+ uint32 u32;
+ uint8 u8[4];
+ struct {
+ uint8 cpicnt;
+ uint8 exccnt;
+ uint8 sleepcnt;
+ uint8 lsucnt;
+ };
+} cm3_cnts_t;
+
+int
+dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
+{
+ int bytes = 0;
+
+ uint32 cyccnt, instrcnt;
+ cm3_cnts_t cm3_cnts;
+ uint8 foldcnt;
+
+ { /* 32bit cyccnt */
+ uint32 curr, prev, delta;
+ prev = core[0]; curr = *log++; core[0] = curr;
+ if (curr < prev)
+ delta = curr + (~0U - prev);
+ else
+ delta = (curr - prev);
+
+ bytes += sprintf(p + bytes, "%12u ", delta);
+ cyccnt = delta;
+ }
+
+ { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
+ int i;
+ uint8 max8 = ~0;
+ cm3_cnts_t curr, prev, delta;
+ prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
+ for (i = 0; i < 4; i++) {
+ if (curr.u8[i] < prev.u8[i])
+ delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
+ else
+ delta.u8[i] = (curr.u8[i] - prev.u8[i]);
+ bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
+ }
+ cm3_cnts.u32 = delta.u32;
+ }
+
+ { /* Extract the foldcnt from arg0 */
+ uint8 curr, prev, delta, max8 = ~0;
+ bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
+ prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
+ if (curr < prev)
+ delta = curr + (max8 - prev);
+ else
+ delta = (curr - prev);
+ bytes += sprintf(p + bytes, "%4u ", delta);
+ foldcnt = delta;
+ }
+
+ instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
+ + cm3_cnts.u8[3]) + foldcnt;
+ if (instrcnt > 0xFFFFFF00)
+ bytes += sprintf(p + bytes, "[%10s] ", "~");
+ else
+ bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
+ return bytes;
+}
+
+int
+dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
+{
+ int bytes = 0;
+ bcm_buzzz_arg0_t arg0;
+ static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
+
+ if (buzzz->counters == 6) {
+ bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
+ log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
+ } else {
+ bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
+ log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
+ }
+
+ /* Dump the logged arguments using the registered formats */
+ arg0.u32 = *log++;
+
+ switch (arg0.klog.args) {
+ case 0:
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
+ break;
+ case 1:
+ {
+ uint32 arg1 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
+ break;
+ }
+ case 2:
+ {
+ uint32 arg1, arg2;
+ arg1 = *log++; arg2 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
+ break;
+ }
+ case 3:
+ {
+ uint32 arg1, arg2, arg3;
+ arg1 = *log++; arg2 = *log++; arg3 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
+ break;
+ }
+ case 4:
+ {
+ uint32 arg1, arg2, arg3, arg4;
+ arg1 = *log++; arg2 = *log++;
+ arg3 = *log++; arg4 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
+ break;
+ }
+ default:
+ printf("%s: Maximum one argument supported\n", __FUNCTION__);
+ break;
+ }
+
+ bytes += sprintf(p + bytes, "\n");
+
+ return bytes;
+}
+
+void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
+{
+ int i;
+ uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
+ void * log;
+
+ for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
+ core[i] = 0;
+ }
+
+ log_sz = buzzz_p->log_sz;
+
+ part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
+
+ if (buzzz_p->wrap == TRUE) {
+ part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
+ total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
+ } else {
+ part2 = 0U;
+ total = buzzz_p->count;
+ }
+
+ if (total == 0U) {
+ printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
+ return;
+ } else {
+ printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
+ total, part2, part1);
+ }
+
+ if (part2) { /* with wrap */
+ log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
+ while (part2--) { /* from cur to end : part2 */
+ p[0] = '\0';
+ dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+ printf("%s", p);
+ log = (void*)((size_t)log + buzzz_p->log_sz);
+ }
+ }
+
+ log = (void*)buffer_p;
+ while (part1--) {
+ p[0] = '\0';
+ dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+ printf("%s", p);
+ log = (void*)((size_t)log + buzzz_p->log_sz);
+ }
+
+ printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
+}
+
+int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
+{
+ bcm_buzzz_t * buzzz_p = NULL;
+ void * buffer_p = NULL;
+ char * page_p = NULL;
+ pciedev_shared_t *sh;
+ int ret = 0;
+
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ return BCME_UNSUPPORTED;
+ }
+ if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
+ printf("%s: Page memory allocation failure\n", __FUNCTION__);
+ goto done;
+ }
+ if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
+ printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
+ goto done;
+ }
+
+ ret = dhdpcie_readshared(bus);
+ if (ret < 0) {
+ DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+ goto done;
+ }
+
+ sh = bus->pcie_sh;
+
+ DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
+
+ if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
+
+ dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
+ (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
+
+ printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
+ "count<%u> status<%u> wrap<%u>\n"
+ "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
+ (int)sh->buzz_dbg_ptr,
+ (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
+ buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
+ buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
+ buzzz_p->buffer_sz, buzzz_p->log_sz);
+
+ if (buzzz_p->count == 0) {
+ printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
+ goto done;
+ }
+
+ /* Allocate memory for trace buffer and format strings */
+ buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
+ if (buffer_p == NULL) {
+ printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
+ goto done;
+ }
+
+ /* Fetch the trace. format strings are exported via bcm_buzzz.h */
+ dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
+ (uint8 *)buffer_p, buzzz_p->buffer_sz);
+
+ /* Process and display the trace using formatted output */
+
+ {
+ int ctr;
+ for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
+ printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
+ }
+ printf("<code execution point>\n");
+ }
+
+ dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
+
+ printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
+
+ MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
+ }
+
+done:
+
+ if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
+ if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
+ if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
+
+ return BCME_OK;
+}
+#endif /* BCM_BUZZZ */
+
+#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
+ ((sih)->buscoretype == PCIE2_CORE_ID))
+#ifdef DHD_PCIE_REG_ACCESS
+static bool
+pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
+{
+ uint mdiodata, mdioctrl, i = 0;
+ uint pcie_serdes_spinwait = 200;
+
+ mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
+ mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
+
+ OSL_DELAY(10);
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
+ 0, 0);
+ if (!(mdioctrl_read & MDIODATA2_DONE)) {
+ break;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+
+ if (i >= pcie_serdes_spinwait) {
+ DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif /* DHD_PCIE_REG_ACCESS */
+
+static void
+dhdpcie_enum_reg_init(dhd_bus_t *bus)
+{
+ /* initialize Function control register (clear bit 4) to HW init value */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
+ PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
+
+ /* clear IntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
+ /* clear IntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
+
+ /* clear MSIVector */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
+ /* clear MSIIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
+ /* clear MSIIntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
+
+ /* clear PowerIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
+ /* clear PowerIntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
+
+ /* clear MailboxIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
+ /* clear MailboxInt */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
+}
+
+int
+dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
+{
+ uint flr_capab;
+ uint val;
+ int retry = 0;
+
+ DHD_ERROR(("******** Perform FLR ********\n"));
+
+ /* Kernel Panic for 4378Ax during traptest/devreset4 reload case:
+ * For 4378Ax, enum registers will not be reset with FLR (producer index WAR).
+ * So, the MailboxIntMask is left as 0xffff during fw boot-up,
+ * and the fw trap handling during fw boot causes Kernel Panic.
+ * Jira: SWWLAN-212578: [4378A0 PCIe DVT] :
+ * Kernel Panic seen in F0 FLR with BT Idle/Traffic/DMA
+ */
+ if (bus->sih && PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ if (bus->pcie_mailbox_mask != 0) {
+ dhdpcie_bus_intr_disable(bus);
+ }
+ /* initialize F0 enum registers before FLR for rev66/67 */
+ dhdpcie_enum_reg_init(bus);
+ }
+
+ /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
+ flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
+ DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
+ PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
+ if (!flr_capab) {
+ DHD_ERROR(("Chip does not support FLR\n"));
+ return BCME_UNSUPPORTED;
+ }
+
+#if defined(NDIS) && defined(BT_OVER_PCIE)
+ dhd_bwm_bt_quiesce(bus);
+#endif
+
+ /* Save pcie config space */
+ DHD_INFO(("Save Pcie Config Space\n"));
+ DHD_PCIE_CONFIG_SAVE(bus);
+
+ /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
+ DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
+ PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
+
+ /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
+#ifdef BCMSLTGT
+ DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY * htclkratio));
+#else
+ DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
+#endif /* BCMSLTGT */
+
+ CAN_SLEEP() ? OSL_SLEEP(DHD_FUNCTION_LEVEL_RESET_DELAY) :
+ OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * USEC_PER_MSEC);
+
+ if (force_fail) {
+ DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
+ PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
+ val));
+ val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
+ DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
+ val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
+
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
+ val));
+ }
+
+ /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
+ DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
+ PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
+
+ /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
+ DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
+ "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
+ do {
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ val = val & (1 << PCIE_SSRESET_STATUS_BIT);
+ OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
+ } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
+
+ if (val) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
+ /* User has to fire the IOVAR again, if force_fail is needed */
+ if (force_fail) {
+ bus->flr_force_fail = FALSE;
+ DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
+ }
+ return BCME_DONGLE_DOWN;
+ }
+
+ /* Restore pcie config space */
+ DHD_INFO(("Restore Pcie Config Space\n"));
+ DHD_PCIE_CONFIG_RESTORE(bus);
+
+#if defined(NDIS) && defined(BT_OVER_PCIE)
+ dhd_bwm_bt_resume(bus);
+#endif
+
+ DHD_ERROR(("******** FLR Succedeed ********\n"));
+
+ return BCME_OK;
+}
+
+#define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
+
+#define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
+#define DHD_BP_RESET_STATUS_RETRIES 50u
+
+#define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT 10
+#define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT 12
+
+int
+dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus)
+{
+ uint val;
+ int retry = 0;
+ int ret = BCME_OK;
+ bool reset_stat_bit;
+
+ DHD_ERROR(("******** Perform BP reset ********\n"));
+
+ /* Disable ASPM */
+ DHD_ERROR(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
+ PCIECFGREG_LINK_STATUS_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ val = val & (~PCIE_ASPM_ENAB);
+ DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
+
+ /* wait for delay usec */
+ DHD_ERROR(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
+ OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
+
+ /* Set bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
+ DHD_ERROR(("Set PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
+ " of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ val = val | (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT);
+ DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
+
+ /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is set */
+ DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
+ "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is set\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
+ do {
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT);
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
+ } while (!reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
+
+ if (!reset_stat_bit) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not set\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL,
+ PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT));
+ ret = BCME_ERROR;
+ goto aspm_enab;
+ }
+
+ /* Clear bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
+ DHD_ERROR(("Clear PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
+ " of PCIECFGREG_SPROM_CTRL(0x%x)\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ val = val & ~(1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT);
+ DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
+
+ /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
+ DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
+ "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is cleared\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
+ do {
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL, val));
+ reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT);
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
+ } while (reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
+
+ if (reset_stat_bit) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ PCIE_CFG_SUBSYSTEM_CONTROL,
+ PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT));
+ ret = BCME_ERROR;
+ }
+
+aspm_enab:
+ /* Enable ASPM */
+ DHD_ERROR(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
+ PCIECFGREG_LINK_STATUS_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ val = val | (PCIE_ASPM_L1_ENAB);
+ DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
+
+ if (ret) {
+ DHD_ERROR(("******** BP reset Failed ********\n"));
+ } else {
+ DHD_ERROR(("******** BP reset Succedeed ********\n"));
+ }
+
+ return ret;
+}
+
+#define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
+#define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
+
+int
+dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus)
+{
+ uint val;
+ int retry = 0;
+ uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
+ int ret = BCME_OK;
+ bool cond;
+
+ DHD_ERROR(("******** Perform BP reset ********\n"));
+
+ /* Disable ASPM */
+ DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
+ PCIECFGREG_LINK_STATUS_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ val = val & (~PCIE_ASPM_ENAB);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
+
+ /* wait for delay usec */
+ DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
+ OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
+
+ /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
+ DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
+ PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
+ val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
+
+ /* Wait till bit backplane reset is ASSERTED i,e
+ * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
+ * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
+ * else DAR register will read previous old value
+ */
+ DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
+ "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
+ PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
+ do {
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
+ cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
+ } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
+
+ if (cond) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
+ ret = BCME_ERROR;
+ goto aspm_enab;
+ }
+
+ /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
+ DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
+ "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
+ PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
+ do {
+ val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ dar_clk_ctrl_status_reg, 0, 0);
+ DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
+ dar_clk_ctrl_status_reg, val));
+ cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
+ } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
+
+ if (cond) {
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
+ dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
+ ret = BCME_ERROR;
+ }
+
+aspm_enab:
+ /* Enable ASPM */
+ DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
+ PCIECFGREG_LINK_STATUS_CTRL));
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ val = val | (PCIE_ASPM_L1_ENAB);
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
+
+ DHD_ERROR(("******** BP reset Succedeed ********\n"));
+
+ return ret;
+}
+
+#if defined(LINUX) || defined(linux)
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int bcmerror = 0;
+ unsigned long flags;
+ int retry = POWERUP_MAX_RETRY;
+
+ if (flag == TRUE) { /* Turn off WLAN */
+ /* Removing Power */
+ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+
+ /* wait for other contexts to finish -- if required a call
+ * to OSL_DELAY for 1s can be added to give other contexts
+ * a chance to finish
+ */
+ dhdpcie_advertise_bus_cleanup(bus->dhd);
+
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ atomic_set(&bus->dhd->block_bus, TRUE);
+ dhd_flush_rx_tx_wq(bus->dhd);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* Clean up any pending host wake IRQ */
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
+ dhd_bus_oob_intr_unregister(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ dhd_os_wd_timer(dhdp, 0);
+ dhd_bus_stop(bus, TRUE);
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
+ dhd_deinit_bus_lp_state_lock(bus);
+ dhd_deinit_bar1_switch_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
+ dhd_deinit_pwr_req_lock(bus);
+#ifdef PCIE_INB_DW
+ dhd_deinit_dongle_ds_lock(bus);
+#endif /* PCIE_INB_DW */
+ dhd_bus_release_dongle(bus);
+ dhdpcie_bus_free_resource(bus);
+ bcmerror = dhdpcie_bus_disable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+ __FUNCTION__, bcmerror));
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ atomic_set(&bus->dhd->block_bus, FALSE);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ }
+ /* Clean up protocol data after Bus Master Enable bit clear
+ * so that host can safely unmap DMA and remove the allocated buffers
+ * from the PKTID MAP. Some Applicantion Processors supported
+ * System MMU triggers Kernel panic when they detect to attempt to
+ * DMA-unmapped memory access from the devices which use the
+ * System MMU. Therefore, Kernel panic can be happened since it is
+ * possible that dongle can access to DMA-unmapped memory after
+ * calling the dhd_prot_reset().
+ * For this reason, the dhd_prot_reset() and dhd_clear() functions
+ * should be located after the dhdpcie_bus_disable_device().
+ */
+ dhd_prot_reset(dhdp);
+ /* XXX Reset dhd_pub_t instance to initial status
+ * for built-in type driver
+ */
+ dhd_clear(dhdp);
+
+ bcmerror = dhdpcie_bus_stop_host_dev(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_stop host_dev failed: %d\n",
+ __FUNCTION__, bcmerror));
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ atomic_set(&bus->dhd->block_bus, FALSE);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ goto done;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ atomic_set(&bus->dhd->block_bus, FALSE);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ } else {
+ if (bus->intr) {
+ dhdpcie_free_irq(bus);
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* Clean up any pending host wake IRQ */
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
+ dhd_bus_oob_intr_unregister(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ dhd_dpc_kill(bus->dhd);
+ if (!bus->no_bus_init) {
+ dhd_bus_release_dongle(bus);
+ dhdpcie_bus_free_resource(bus);
+ bcmerror = dhdpcie_bus_disable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+
+ /* Clean up protocol data after Bus Master Enable bit clear
+ * so that host can safely unmap DMA and remove the allocated
+ * buffers from the PKTID MAP. Some Applicantion Processors
+ * supported System MMU triggers Kernel panic when they detect
+ * to attempt to DMA-unmapped memory access from the devices
+ * which use the System MMU.
+ * Therefore, Kernel panic can be happened since it is possible
+ * that dongle can access to DMA-unmapped memory after calling
+ * the dhd_prot_reset().
+ * For this reason, the dhd_prot_reset() and dhd_clear() functions
+ * should be located after the dhdpcie_bus_disable_device().
+ */
+ dhd_prot_reset(dhdp);
+ /* XXX Reset dhd_pub_t instance to initial status
+ * for built-in type driver
+ */
+ dhd_clear(dhdp);
+ } else {
+ bus->no_bus_init = FALSE;
+ }
+
+ bcmerror = dhdpcie_bus_stop_host_dev(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_stop_host_dev failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+ }
+
+ bus->dhd->dongle_reset = TRUE;
+ DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
+
+ } else { /* Turn on WLAN */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ /* Powering On */
+ DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+ /* PCIe RC Turn on */
+ do {
+ bcmerror = dhdpcie_bus_start_host_dev(bus);
+ if (!bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_start_host_dev OK\n",
+ __FUNCTION__));
+ break;
+ } else {
+ OSL_SLEEP(10);
+ }
+ } while (retry--);
+
+ if (bcmerror) {
+ DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
+ dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
+ bus->is_linkdown = 0;
+ bus->cto_triggered = 0;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ bus->read_shm_fail = FALSE;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bcmerror = dhdpcie_bus_enable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host configuration restore failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bcmerror = dhdpcie_bus_alloc_resource(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+#if defined(DHD_HP2P) && defined(OEM_ANDROID)
+ bus->dhd->hp2p_enable = TRUE;
+#endif
+
+#ifdef FORCE_DONGLE_RESET_IN_DEVRESET_ON
+ /*
+ * This will be enabled from phone platforms to
+ * reset dongle during Wifi ON
+ */
+ dhdpcie_dongle_reset(bus);
+#endif /* FORCE_DONGLE_RESET_IN_DEVRESET_ON */
+
+ bcmerror = dhdpcie_bus_dongle_attach(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bcmerror = dhd_bus_request_irq(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bus->dhd->dongle_reset = FALSE;
+
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+ dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+ bcmerror = dhd_bus_start(dhdp);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhd_bus_start: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
+ if (bus->dhd->dhd_watchdog_ms_backup) {
+ DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
+ __FUNCTION__));
+ dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
+ }
+ DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
+ goto done;
+ }
+ }
+
+done:
+ return bcmerror;
+}
+#else
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int bcmerror = 0;
+ unsigned long flags;
+
+ if (flag == TRUE) {
+ /* Removing Power */
+ if (!dhdp->dongle_reset) {
+ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+ dhdpcie_advertise_bus_cleanup(bus->dhd);
+ dhd_os_sdlock(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+ dhd_bus_stop(bus, FALSE);
+ dhd_prot_reset(dhdp);
+
+ dhdpcie_bus_release_dongle(bus, bus->dhd->osh,
+ bus->dhd->dongle_isolation, TRUE);
+ bus->dhd->dongle_reset = TRUE;
+
+ dhd_os_sdunlock(dhdp);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: Dongle is already in RESET!\n", __FUNCTION__));
+ bcmerror = BCME_DONGLE_DOWN;
+ }
+ } else {
+ /* Powering On */
+ DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset) {
+ dhd_os_sdlock(dhdp); /* Turn on WLAN */
+
+ if (dhdpcie_dongle_attach(bus)) {
+ DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
+ dhd_os_sdunlock(dhdp);
+ return BCME_DONGLE_DOWN;
+ }
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ DHD_INFO(("%s: About to download firmware\n", __FUNCTION__));
+ if (dhd_bus_download_firmware(bus, bus->dhd->osh,
+ bus->fw_path, bus->nv_path) == 0) {
+
+ bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+ if (bcmerror == BCME_OK) {
+ bus->dhd->dongle_reset = FALSE;
+
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+ DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: dhd_bus_init FAILed\n", __FUNCTION__));
+ dhd_bus_stop(bus, FALSE);
+ }
+ } else {
+ DHD_ERROR(("%s: dhd_bus_download_firmware FAILed\n", __FUNCTION__));
+ bcmerror = BCME_DONGLE_DOWN;
+ }
+
+ dhd_os_sdunlock(dhdp);
+ } else {
+ bcmerror = BCME_DONGLE_DOWN;
+ DHD_ERROR(("%s called when dongle is not in reset\n", __FUNCTION__));
+ }
+ }
+ return bcmerror;
+}
+#endif /* LINUX || linux */
+
+#ifdef DHD_PCIE_REG_ACCESS
+static int
+pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
+ bool slave_bypass)
+{
+ uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
+ uint32 reg32;
+
+ pcie2_mdiosetblock(bus, physmedia);
+
+ /* enable mdio access to SERDES */
+ mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
+ mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
+
+ if (slave_bypass)
+ mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
+
+ if (!write)
+ mdio_ctrl |= MDIOCTL2_READ;
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
+
+ if (write) {
+ reg32 = PCIE2_MDIO_WR_DATA;
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
+ *val | MDIODATA2_DONE);
+ } else
+ reg32 = PCIE2_MDIO_RD_DATA;
+
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
+ if (!(done_val & MDIODATA2_DONE)) {
+ if (!write) {
+ *val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ PCIE2_MDIO_RD_DATA, 0, 0);
+ *val = *val & MDIODATA2_MASK;
+ }
+ return 0;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+ return -1;
+}
+#endif /* DHD_PCIE_REG_ACCESS */
+
+#ifdef BCMINTERNAL
+static uint64
+serialized_backplane_access_64(dhd_bus_t *bus, uint addr, uint size, uint64 *val, bool read)
+{
+ uint64 ret;
+ unsigned long flags;
+
+ DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
+ ret = si_backplane_access_64(bus->sih, addr, size, val, read);
+ DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
+ return ret;
+}
+#endif /* BCMINTERNAL */
+
+static int
+dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
+{
+ int h2d_support, d2h_support;
+
+ d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
+ h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
+ return (d2h_support | (h2d_support << 1));
+
+}
+int
+dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
+{
+ int bcmerror = 0;
+ /* Can change it only during initialization/FW download */
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ if ((int_val > 3) || (int_val < 0)) {
+ DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
+ bcmerror = BCME_BADARG;
+ } else {
+ dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+ dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+ dhd->dma_ring_upd_overwrite = TRUE;
+ }
+ } else {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ }
+
+ return bcmerror;
+
+}
+
+/* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
+ * calls shall be serialized. This wrapper function provides such serialization
+ * and shall be used everywjer einstead of direct call of si_backplane_access()
+ *
+ * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
+ * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
+ * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
+ * conditions calls of si_backplane_access() shall be serialized. Presence of
+ * tasklet context implies that serialization shall b ebased on spinlock. Hence
+ * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
+ * spinlock-based.
+ *
+ * Other platforms may add their own implementations of
+ * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
+ * needed implementation might be empty)
+ */
+static uint
+serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
+{
+ uint ret;
+ unsigned long flags;
+ DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
+ ret = si_backplane_access(bus->sih, addr, size, val, read);
+ DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
+ return ret;
+}
+
+#ifndef DHD_CAP_PLATFORM
+#define DHD_CAP_PLATFORM "x86 "
+#endif
+
+#ifndef DHD_CAP_CUSTOMER
+#define DHD_CAP_CUSTOMER "brcm "
+#endif
+
+void
+BCMRAMFN(dhd_cap_bcmstrbuf)(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+ bcm_bprintf(b, DHD_CAP_PLATFORM);
+ bcm_bprintf(b, DHD_CAP_CUSTOMER);
+#ifdef PCIE_FULL_DONGLE
+ bcm_bprintf(b, "pcie ");
+#endif /* PCIE_FULL_DONGLE */
+ /* regaccess and memaccess will be present only for internal reference builds @brcm */
+#ifdef DHD_NO_MOG
+ bcm_bprintf(b, "internal ");
+#else
+ bcm_bprintf(b, "external ");
+#endif /* DHD_NO_MOG */
+#ifdef WLAN_ACCEL_BOOT
+ bcm_bprintf(b, "wlan-accel ");
+#endif /* WLAN_ACCEL_BOOT */
+#ifdef ENABLE_DHD_GRO
+ bcm_bprintf(b, "gro ");
+#endif /* ENABLE_DHD_GRO */
+#ifdef WBRC
+ bcm_bprintf(b, "wbrc ");
+#endif /* ENABLE_DHD_GRO */
+#ifdef WL_CFG80211
+ bcm_bprintf(b, "cfg80211 ");
+#endif /* WL_CFG80211 */
+#ifdef DHD_FILE_DUMP_EVENT
+ bcm_bprintf(b, "haldump ");
+#endif /* DHD_FILE_DUMP_EVENT */
+#ifdef DHD_LB_RXP
+ bcm_bprintf(b, "lbrxp ");
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+#ifdef DHD_LB_TXP_DEFAULT_ENAB
+ bcm_bprintf(b, "lbtxp ");
+#endif /* DHD_LB_TXP_DEFAULT_ENAB */
+#endif /* DHD_LB_TXP */
+#ifdef DHD_HTPUT_TUNABLES
+ bcm_bprintf(b, "htput ");
+#endif /* DHD_HTPUT_TUNABLES */
+}
+
+/** Return dhd capability string */
+static char*
+dhd_cap(dhd_pub_t *dhd, char *buf, uint bufsize)
+{
+ struct bcmstrbuf b;
+
+ bcm_binit(&b, buf, bufsize);
+
+ dhd_cap_bcmstrbuf(dhd, &b);
+
+ /* this is either full or overflow. return error */
+ if (b.size <= 1)
+ return NULL;
+
+ return (buf);
+}
+
+/**
+ * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
+ *
+ * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
+ * @param params input buffer
+ * @param plen length in [bytes] of input buffer 'params'
+ * @param arg output buffer
+ * @param len length in [bytes] of output buffer 'arg'
+ */
+static int
+dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, uint plen, void *arg, uint len, int val_size)
+{
+ int bcmerror = 0;
+#ifdef BCMINTERNAL
+ uint64 uint64_val = 0;
+#endif /* BCMINTERNAL */
+ int32 int_val = 0;
+ int32 int_val2 = 0;
+ int32 int_val3 = 0;
+ bool bool_val = 0;
+
+ DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ if (plen >= sizeof(int_val) * 2)
+ bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+ if (plen >= sizeof(int_val) * 3)
+ bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ switch (actionid) {
+
+#ifdef BCMINTERNAL
+ case IOV_SVAL(IOV_MSI_SIM):
+ /* allocate memory for MSI (Message Signaled Interrupt) window */
+ int_val = !!int_val;
+ DHD_INFO(("int_val is %d\n", int_val));
+ if (bus->msi_sim != int_val) {
+ if (int_val) {
+ /* bus->msi_addr */
+ bus->msi_sim_addr =
+ MALLOC(bus->dhd->osh, MSI_SIM_BUFSIZE);
+ if (bus->msi_sim_addr) {
+ *bus->msi_sim_addr = 0;
+ bus->msi_sim_phys = DMA_MAP(bus->dhd->osh,
+ bus->msi_sim_addr, MSI_SIM_BUFSIZE, DMA_RX, 0, 0);
+ /* program the MSI addr */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configaddr), ~0, PCIE_CFG_MSIDATA_OFFSET);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configdata), ~0, PCIE_CFG_MSI_GENDATA);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configaddr), ~0, PCIE_CFG_MSIADDR_LOW_OFFSET);
+ ASSERT(PHYSADDRHI(bus->msi_sim_phys) == 0);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configdata), ~0, (uint32)PHYSADDRLO(bus->msi_sim_phys));
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configaddr), ~0, PCIE_CFG_MSICAP_OFFSET);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configdata), ~0, PCIE_CFG_MSICAP_ENABLE_MSI);
+ /* poll the MSI addr window */
+ bus->pollrate = 10;
+ }
+ DHD_INFO(("msi_sim_addr is %p\n", bus->msi_sim_addr));
+ } else {
+ /* bus->msi_addr */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configaddr), ~0,
+ PCIE_CFG_MSICAP_OFFSET);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configdata), ~0,
+ PCIE_CFG_MSICAP_DISABLE_MSI);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t,
+ configaddr), ~0,
+ PCIE_CFG_MSIADDR_LOW_OFFSET);
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), ~0, 0);
+
+ DMA_UNMAP(bus->dhd->osh, bus->msi_sim_phys,
+ MSI_SIM_BUFSIZE, DMA_RX, 0, 0);
+ MFREE(bus->dhd->osh,
+ bus->msi_sim_addr, MSI_SIM_BUFSIZE);
+ }
+ bus->msi_sim = (bool)int_val;
+ }
+ break;
+ case IOV_GVAL(IOV_MSI_SIM):
+ bcopy(&bus->msi_sim, arg, val_size);
+ break;
+#endif /* BCMINTERNAL */
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdpcie_downloadvars(bus, arg, len);
+ break;
+#ifdef DHD_PCIE_REG_ACCESS
+ case IOV_SVAL(IOV_PCIEREG):
+ /* XXX: validate int_val ??? */
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+ int_val);
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
+ int_val2);
+ break;
+
+ case IOV_GVAL(IOV_PCIEREG):
+ /* XXX: validate int_val ??? */
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+ int_val);
+ int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+
+ case IOV_SVAL(IOV_PCIECOREREG):
+ /* XXX: validate int_val ??? */
+ si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
+ break;
+ case IOV_GVAL(IOV_BAR0_SECWIN_REG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ size = sdreg.func;
+
+ if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
+ {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_BAR0_SECWIN_REG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ size = sdreg.func;
+ if (serialized_backplane_access(bus, addr, size,
+ (uint *)(&sdreg.value), FALSE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ }
+
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
+ size = sdreg.func;
+
+ if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
+ {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&int_val, arg, size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
+ size = sdreg.func;
+ if (serialized_backplane_access(bus, addr, size,
+ (uint *)(&sdreg.value), FALSE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ }
+
+ case IOV_GVAL(IOV_PCIESERDESREG):
+ {
+ uint val;
+ if (!PCIE_GEN2(bus->sih)) {
+ DHD_ERROR(("supported only in pcie gen2\n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+
+ if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
+ bcopy(&val, arg, sizeof(int32));
+ } else {
+ DHD_ERROR(("pcie2_mdioop failed.\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ }
+
+ case IOV_SVAL(IOV_PCIESERDESREG):
+ if (!PCIE_GEN2(bus->sih)) {
+ DHD_ERROR(("supported only in pcie gen2\n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) {
+ DHD_ERROR(("pcie2_mdioop failed.\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ case IOV_GVAL(IOV_PCIECOREREG):
+ /* XXX: validate int_val ??? */
+ int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+
+ case IOV_SVAL(IOV_PCIECFGREG):
+ /* XXX: validate int_val ??? */
+ OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
+ break;
+
+ case IOV_GVAL(IOV_PCIECFGREG):
+ /* XXX: validate int_val ??? */
+ int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+#endif /* DHD_PCIE_REG_ACCESS */
+ case IOV_SVAL(IOV_PCIE_LPBK):
+ bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
+ break;
+
+ case IOV_SVAL(IOV_PCIE_DMAXFER): {
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
+ uint32 mem_addr;
+
+ if (!dmaxfer)
+ return BCME_BADARG;
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
+ return BCME_VERSION;
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
+ return BCME_BADLEN;
+ }
+
+ mem_addr = (uint32)dmaxfer->tput;
+ dmaxfer->tput = 0;
+ bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
+ dmaxfer->src_delay, dmaxfer->dest_delay,
+ dmaxfer->type, dmaxfer->core_num,
+ dmaxfer->should_wait, mem_addr);
+
+ if (dmaxfer->should_wait && bcmerror >= 0) {
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
+ }
+ break;
+ }
+
+ case IOV_GVAL(IOV_PCIE_DMAXFER): {
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
+ if (!dmaxfer)
+ return BCME_BADARG;
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
+ return BCME_VERSION;
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
+ return BCME_BADLEN;
+ }
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
+ break;
+ }
+
+#ifdef BCMINTERNAL
+ case IOV_GVAL(IOV_PCIE_TX_LPBK):
+ int_val = dhdpcie_bus_get_tx_lpback(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_PCIE_TX_LPBK):
+ bcmerror = dhdpcie_bus_set_tx_lpback(bus, bool_val);
+ break;
+#endif /* BCMINTERNAL */
+
+#ifdef PCIE_OOB
+ case IOV_GVAL(IOV_OOB_BT_REG_ON):
+ int_val = dhd_oob_get_bt_reg_on(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_OOB_BT_REG_ON):
+ dhd_oob_set_bt_reg_on(bus, (uint8)int_val);
+ break;
+ case IOV_GVAL(IOV_OOB_ENABLE):
+ int_val = bus->oob_enabled;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_OOB_ENABLE):
+ bus->oob_enabled = (bool)int_val;
+ break;
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+ case IOV_GVAL(IOV_INB_DW_ENABLE):
+ int_val = bus->inb_enabled;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_INB_DW_ENABLE):
+ bus->inb_enabled = (bool)int_val;
+ break;
+#endif /* PCIE_INB_DW */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ case IOV_GVAL(IOV_DEEP_SLEEP):
+ int_val = bus->ds_enabled;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DEEP_SLEEP):
+ if (int_val == 1) {
+ bus->deep_sleep = TRUE;
+ if (!bus->ds_enabled) {
+ bus->ds_enabled = TRUE;
+ /* Deassert */
+ if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ int timeleft;
+ timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
+ if (timeleft == 0) {
+ DHD_ERROR(("DS-ENTER timeout\n"));
+ bus->ds_enabled = FALSE;
+ break;
+ }
+ }
+#endif /* PCIE_INB_DW */
+ }
+ else {
+ DHD_ERROR(("%s: Enable Deep Sleep failed !\n",
+ __FUNCTION__));
+ bus->ds_enabled = FALSE;
+ }
+ } else {
+ DHD_ERROR(("%s: Deep Sleep already enabled !\n", __FUNCTION__));
+ }
+ }
+ else if (int_val == 0) {
+ bus->deep_sleep = FALSE;
+ if (bus->ds_enabled) {
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ bus->dhd->cur_intr_poll_period = dhd_os_get_intr_poll_period();
+ /* for accurately measuring ds-exit latency
+ * set interrupt poll period to a lesser value
+ */
+ dhd_os_set_intr_poll_period(bus, INTR_POLL_PERIOD_CRITICAL);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+ bus->calc_ds_exit_latency = TRUE;
+ /* Assert */
+ if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK) {
+ bus->ds_enabled = FALSE;
+ if (INBAND_DW_ENAB(bus)) {
+ if (bus->ds_exit_latency != 0) {
+ DHD_ERROR(("DS-EXIT latency = %llu us\n",
+ bus->ds_exit_latency));
+ } else {
+ DHD_ERROR(("Failed to measure DS-EXIT"
+ " latency!(Possibly a non"
+ " waitable context)\n"));
+ }
+ }
+ } else {
+ DHD_ERROR(("%s: Disable Deep Sleep failed !\n",
+ __FUNCTION__));
+ }
+ bus->calc_ds_exit_latency = FALSE;
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ /* restore interrupt poll period to the previous existing value */
+ dhd_os_set_intr_poll_period(bus, bus->dhd->cur_intr_poll_period);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+ } else {
+ DHD_ERROR(("%s: Deep Sleep already disabled !\n", __FUNCTION__));
+ }
+ }
+ else
+ DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
+
+ break;
+#endif /* PCIE_OOB || PCIE_INB_DW */
+#ifdef DEVICE_TX_STUCK_DETECT
+ case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT):
+ int_val = bus->dev_tx_stuck_monitor;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT):
+ bus->dev_tx_stuck_monitor = (bool)int_val;
+ break;
+#endif /* DEVICE_TX_STUCK_DETECT */
+ case IOV_GVAL(IOV_PCIE_SUSPEND):
+ int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PCIE_SUSPEND):
+ if (bool_val) { /* Suspend */
+ int ret;
+ unsigned long flags;
+
+ /*
+ * If some other context is busy, wait until they are done,
+ * before starting suspend
+ */
+ ret = dhd_os_busbusy_wait_condition(bus->dhd,
+ &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
+ if (ret == 0) {
+ DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, bus->dhd->dhd_bus_busy_state));
+ return BCME_BUSY;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ dhdpcie_bus_suspend(bus, TRUE, TRUE);
+#else
+ dhdpcie_bus_suspend(bus, TRUE);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ } else { /* Resume */
+ unsigned long flags;
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ dhdpcie_bus_suspend(bus, FALSE);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ }
+ break;
+
+ case IOV_GVAL(IOV_MEMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+#ifdef DHD_BUS_MEM_ACCESS
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address; /* absolute backplane address */
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
+ (set ? "write" : "read"), size, address, dsize));
+
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, ARMCA7_CORE_ID, 0) ||
+ si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+ if (set && address == bus->dongle_ram_base) {
+ bus->resetinstr = *(((uint32*)params) + 2);
+ }
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
+
+ break;
+ }
+#endif /* DHD_BUS_MEM_ACCESS */
+
+ /* Debug related. Dumps core registers or one of the dongle memory */
+ case IOV_GVAL(IOV_DUMP_DONGLE):
+ {
+ dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
+ dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
+ uint32 *p = ddo->val;
+ const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
+
+ if (plen < sizeof(ddi) || len < sizeof(ddo)) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (ddi.type) {
+ case DUMP_DONGLE_COREREG:
+ ddo->n_bytes = 0;
+
+ if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
+ break; // beyond last core: core enumeration ended
+ }
+
+ ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
+ ddo->address += ddi.offset; // BP address at which this dump starts
+
+ ddo->id = si_coreid(bus->sih);
+ ddo->rev = si_corerev(bus->sih);
+
+ while (ddi.offset < max_offset &&
+ sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
+ *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
+ ddi.offset += sizeof(uint32);
+ ddo->n_bytes += sizeof(uint32);
+ }
+ break;
+ default:
+ // TODO: implement d11 SHM/TPL dumping
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ break;
+ }
+
+ /* Debug related. Returns a string with dongle capabilities */
+ case IOV_GVAL(IOV_DHD_CAPS):
+ {
+ if (dhd_cap(bus->dhd, (char*)arg, len) == NULL) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+ }
+
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ case IOV_SVAL(IOV_GDB_SERVER):
+ /* debugger_*() functions may sleep, so cannot hold spinlock */
+ if (int_val > 0) {
+ debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
+ } else {
+ debugger_close();
+ }
+ break;
+#endif /* DEBUGGER || DHD_DSCOPE */
+#if defined(GDB_PROXY)
+ case IOV_GVAL(IOV_GDB_PROXY_PROBE):
+ {
+ dhd_gdb_proxy_probe_data_t ret;
+ ret.data_len = (uint32)sizeof(ret);
+ ret.magic = DHD_IOCTL_MAGIC;
+ ret.flags = 0;
+ if (bus->gdb_proxy_access_enabled) {
+ ret.flags |= DHD_GDB_PROXY_PROBE_ACCESS_ENABLED;
+ if (bus->dhd->busstate < DHD_BUS_LOAD) {
+ ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING;
+ } else {
+ ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING;
+ }
+ }
+ if (bus->gdb_proxy_bootloader_mode) {
+ ret.flags |= DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE;
+ }
+ ret.last_id = bus->gdb_proxy_last_id;
+ if (bus->hostfw_buf.va) {
+ ret.flags |= DHD_GDB_PROXY_PROBE_HOSTMEM_CODE;
+ ret.hostmem_code_win_base =
+ (uint32)PCIEDEV_ARM_ADDR(PHYSADDRLO(bus->hostfw_buf.pa),
+ PCIEDEV_TRANS_WIN_HOSTMEM);
+ ret.hostmem_code_win_length = bus->hostfw_buf.len;
+ }
+ if (plen && int_val) {
+ bus->gdb_proxy_last_id = (uint32)int_val;
+ }
+ if (len >= sizeof(ret)) {
+ bcopy(&ret, arg, sizeof(ret));
+ bus->dhd->gdb_proxy_active = TRUE;
+ } else {
+ bcmerror = BCME_BADARG;
+ }
+ break;
+ }
+ case IOV_GVAL(IOV_GDB_PROXY_STOP_COUNT):
+ int_val = (int32)bus->dhd->gdb_proxy_stop_count;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ case IOV_SVAL(IOV_GDB_PROXY_STOP_COUNT):
+ bus->dhd->gdb_proxy_stop_count = (uint32)int_val;
+ break;
+#endif /* GDB_PROXY */
+
+#ifdef BCM_BUZZZ
+ /* Dump dongle side buzzz trace to console */
+ case IOV_GVAL(IOV_BUZZZ_DUMP):
+ bcmerror = dhd_buzzz_dump_dngl(bus);
+ break;
+#endif /* BCM_BUZZZ */
+
+ case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+ bcmerror = dhdpcie_bus_download_state(bus, bool_val);
+ break;
+
+#if defined(FW_SIGNATURE)
+ case IOV_SVAL(IOV_SET_DOWNLOAD_INFO):
+ {
+ fw_download_info_t *info = (fw_download_info_t*)params;
+ DHD_INFO(("dwnldinfo: sig=%s fw=%x,%u bl=%s,0x%x\n",
+ info->fw_signature_fname,
+ info->fw_start_addr, info->fw_size,
+ info->bootloader_fname, info->bootloader_start_addr));
+ bcmerror = dhdpcie_bus_save_download_info(bus,
+ info->fw_start_addr, info->fw_size, info->fw_signature_fname,
+ info->bootloader_fname, info->bootloader_start_addr);
+ break;
+ }
+#endif /* FW_SIGNATURE */
+
+ case IOV_GVAL(IOV_RAMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RAMSIZE):
+ bus->ramsize = int_val;
+ bus->orig_ramsize = int_val;
+ break;
+
+ case IOV_GVAL(IOV_RAMSTART):
+ int_val = (int32)bus->dongle_ram_base;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_CC_NVMSHADOW):
+ {
+ struct bcmstrbuf dump_b;
+
+ bcm_binit(&dump_b, arg, len);
+ bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
+ break;
+ }
+
+ case IOV_GVAL(IOV_SLEEP_ALLOWED):
+ bool_val = bus->sleep_allowed;
+ bcopy(&bool_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SLEEP_ALLOWED):
+ bus->sleep_allowed = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_DONGLEISOLATION):
+ int_val = bus->dhd->dongle_isolation;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DONGLEISOLATION):
+ bus->dhd->dongle_isolation = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
+ int_val = bus->ltrsleep_on_unload;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
+ bus->ltrsleep_on_unload = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
+ {
+ struct bcmstrbuf dump_b;
+ bcm_binit(&dump_b, arg, len);
+ bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
+ break;
+ }
+ case IOV_GVAL(IOV_DMA_RINGINDICES):
+ {
+ int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_DMA_RINGINDICES):
+ bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
+ break;
+
+ case IOV_GVAL(IOV_METADATA_DBG):
+ int_val = dhd_prot_metadata_dbg_get(bus->dhd);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_METADATA_DBG):
+ dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
+ break;
+
+ case IOV_GVAL(IOV_RX_METADATALEN):
+ int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RX_METADATALEN):
+#if !(defined(BCM_ROUTER_DHD))
+ if (int_val > 64) {
+ bcmerror = BCME_BUFTOOLONG;
+ break;
+ }
+ dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
+#else
+ bcmerror = BCME_UNSUPPORTED;
+#endif /* BCM_ROUTER_DHD */
+ break;
+
+ case IOV_SVAL(IOV_TXP_THRESHOLD):
+ dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_TXP_THRESHOLD):
+ int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DB1_FOR_MB):
+ if (int_val)
+ bus->db1_for_mb = TRUE;
+ else
+ bus->db1_for_mb = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_DB1_FOR_MB):
+ if (bus->db1_for_mb)
+ int_val = 1;
+ else
+ int_val = 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_TX_METADATALEN):
+ int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TX_METADATALEN):
+#if !(defined(BCM_ROUTER_DHD))
+ if (int_val > 64) {
+ bcmerror = BCME_BUFTOOLONG;
+ break;
+ }
+ dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
+#else
+ bcmerror = BCME_UNSUPPORTED;
+#endif /* BCM_ROUTER_DHD */
+ break;
+
+ case IOV_SVAL(IOV_DEVRESET):
+ {
+ devreset_info_t *devreset = (devreset_info_t *)arg;
+
+ if (!devreset) {
+ return BCME_BADARG;
+ }
+
+ if (devreset->length == sizeof(devreset_info_t)) {
+ if (devreset->version != DHD_DEVRESET_VERSION) {
+ return BCME_VERSION;
+ }
+ int_val = devreset->mode;
+ }
+
+ switch (int_val) {
+ case DHD_BUS_DEVRESET_ON:
+ bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
+ break;
+ case DHD_BUS_DEVRESET_OFF:
+ bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
+ break;
+#if !defined(NDIS)
+ case DHD_BUS_DEVRESET_FLR:
+ bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
+ break;
+ case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
+ bus->flr_force_fail = TRUE;
+ break;
+#ifdef BT_OVER_PCIE
+ case DHD_BUS_DEVRESET_QUIESCE:
+ if (bus->dhd->busstate == DHD_BUS_DATA) {
+ if (bus->dhd->db7_trap.fw_db7w_trap) {
+ unsigned long flags = 0;
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->db7_trap.fw_db7w_trap_inprogress = TRUE;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ dhdpcie_fw_trap(bus);
+ OSL_DELAY(100 * 1000); // wait 100 msec
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ } else {
+ DHD_TRACE(("%s: DB7 Not supported!!!\n",
+ __FUNCTION__));
+ }
+
+ devreset->status =
+ dhd_bus_perform_flr_with_quiesce(bus->dhd, bus,
+ FALSE);
+
+ if (devreset->status == BCME_DNGL_DEVRESET) {
+ devreset->status = BCME_OK;
+ }
+ bcmerror = BCME_DNGL_DEVRESET;
+ } else {
+ DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
+ bcmerror = BCME_NOTUP;
+ }
+ break;
+#endif /* BT_OVER_PCIE */
+#endif /* !defined(NDIS) */
+ default:
+ DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
+ break;
+ }
+ break;
+ }
+ case IOV_SVAL(IOV_FORCE_FW_TRAP):
+ if (bus->dhd->busstate == DHD_BUS_DATA)
+ dhdpcie_fw_trap(bus);
+ else {
+ DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
+ bcmerror = BCME_NOTUP;
+ }
+ break;
+ case IOV_GVAL(IOV_FLOW_PRIO_MAP):
+ int_val = bus->dhd->flow_prio_map_type;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FLOW_PRIO_MAP):
+ int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ case IOV_GVAL(IOV_IDLETIME):
+ if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
+ int_val = bus->idletime;
+ } else {
+ int_val = 0;
+ }
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if (int_val < 0) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ if (bus->idletime) {
+ DHD_ENABLE_RUNTIME_PM(bus->dhd);
+ } else {
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
+ }
+ }
+ break;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_SVAL(IOV_H2D_MAILBOXDATA):
+ dhdpcie_send_mb_data(bus, (uint)int_val);
+ break;
+
+ case IOV_SVAL(IOV_INFORINGS):
+ dhd_prot_init_info_rings(bus->dhd);
+ break;
+
+ case IOV_SVAL(IOV_H2D_PHASE):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ break;
+ }
+ if (int_val)
+ bus->dhd->h2d_phase_supported = TRUE;
+ else
+ bus->dhd->h2d_phase_supported = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_H2D_PHASE):
+ int_val = (int32) bus->dhd->h2d_phase_supported;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ break;
+ }
+ if (int_val)
+ bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
+ else
+ bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
+ int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ break;
+ }
+ dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
+ break;
+
+ case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
+ int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#if defined(DHD_HTPUT_TUNABLES)
+ case IOV_SVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ break;
+ }
+ dhd_prot_set_h2d_htput_max_txpost(bus->dhd, (uint16)int_val);
+ break;
+
+ case IOV_GVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM):
+ int_val = dhd_prot_get_h2d_htput_max_txpost(bus->dhd);
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* DHD_HTPUT_TUNABLES */
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TRAPDATA):
+ {
+ struct bcmstrbuf dump_b;
+ bcm_binit(&dump_b, arg, len);
+ bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
+ break;
+ }
+
+ case IOV_GVAL(IOV_TRAPDATA_RAW):
+ {
+ struct bcmstrbuf dump_b;
+ bcm_binit(&dump_b, arg, len);
+ bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
+ break;
+ }
+
+#ifdef DHD_PCIE_REG_ACCESS
+ case IOV_GVAL(IOV_PCIEASPM): {
+ uint8 clkreq = 0;
+ uint32 aspm = 0;
+
+ /* this command is to hide the details, but match the lcreg
+ #define PCIE_CLKREQ_ENAB 0x100
+ #define PCIE_ASPM_L1_ENAB 2
+ #define PCIE_ASPM_L0s_ENAB 1
+ */
+
+ clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0);
+ aspm = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
+
+ int_val = ((clkreq & 0x1) << 8) | (aspm & PCIE_ASPM_ENAB);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_PCIEASPM): {
+ uint32 tmp;
+
+ tmp = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
+ dhdpcie_lcreg(bus->dhd->osh, PCIE_ASPM_ENAB,
+ (tmp & ~PCIE_ASPM_ENAB) | (int_val & PCIE_ASPM_ENAB));
+
+ dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8));
+ break;
+ }
+#endif /* DHD_PCIE_REG_ACCESS */
+ case IOV_SVAL(IOV_HANGREPORT):
+ bus->dhd->hang_report = bool_val;
+ DHD_ERROR(("%s: Set hang_report as %d\n",
+ __FUNCTION__, bus->dhd->hang_report));
+ break;
+
+ case IOV_GVAL(IOV_HANGREPORT):
+ int_val = (int32)bus->dhd->hang_report;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CTO_PREVENTION):
+ bcmerror = dhdpcie_cto_init(bus, bool_val);
+ break;
+
+ case IOV_GVAL(IOV_CTO_PREVENTION):
+ if (bus->sih->buscorerev < 19) {
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ int_val = (int32)bus->cto_enable;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CTO_THRESHOLD):
+ {
+ if (bus->sih->buscorerev < 19) {
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ bus->cto_threshold = (uint32)int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_CTO_THRESHOLD):
+ if (bus->sih->buscorerev < 19) {
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ if (bus->cto_threshold) {
+ int_val = (int32)bus->cto_threshold;
+ } else {
+ int_val = pcie_cto_to_thresh_default(bus->sih->buscorerev);
+ }
+
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PCIE_WD_RESET):
+ if (bool_val) {
+ /* Legacy chipcommon watchdog reset */
+ dhdpcie_cc_watchdog_reset(bus);
+ }
+ break;
+
+#ifdef DHD_EFI
+ case IOV_SVAL(IOV_CONTROL_SIGNAL):
+ {
+ bcmerror = dhd_control_signal(bus, arg, len, TRUE);
+ break;
+ }
+
+ case IOV_GVAL(IOV_CONTROL_SIGNAL):
+ {
+ bcmerror = dhd_control_signal(bus, params, plen, FALSE);
+ break;
+ }
+ case IOV_GVAL(IOV_WIFI_PROPERTIES):
+ bcmerror = dhd_wifi_properties(bus, params, plen);
+ break;
+ case IOV_GVAL(IOV_OTP_DUMP):
+ bcmerror = dhd_otp_dump(bus, params, plen);
+ break;
+#if defined(BT_OVER_PCIE) && defined(BTOP_TEST)
+ case IOV_SVAL(IOV_BTOP_TEST):
+ bcmerror = dhd_btop_test(bus, arg, len);
+ break;
+#endif /* BT_OVER_PCIE && BTOP_TEST */
+#endif /* DHD_EFI */
+ case IOV_GVAL(IOV_IDMA_ENABLE):
+ int_val = bus->idma_enabled;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_IDMA_ENABLE):
+ bus->idma_enabled = (bool)int_val;
+ break;
+ case IOV_GVAL(IOV_IFRM_ENABLE):
+ int_val = bus->ifrm_enabled;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_IFRM_ENABLE):
+ bus->ifrm_enabled = (bool)int_val;
+ break;
+#ifdef BCMINTERNAL
+ case IOV_GVAL(IOV_DMA_CHAN):
+ int_val = bus->dma_chan;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_DMA_CHAN):
+ {
+ bus->dma_chan = (bool)int_val;
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+ dhd_bus_db0_addr_get(bus));
+ break;
+ }
+ case IOV_SVAL(IOV_HYBRIDFW):
+ {
+ char *fp;
+ fp = dhd_os_open_image1(bus->dhd, params);
+ if (fp == NULL) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcmerror = dhdpcie_hybridfw_download(bus, fp);
+ dhd_os_close_image1(bus->dhd, fp);
+ break;
+ }
+#endif /* BCMINTERNAL */
+ case IOV_GVAL(IOV_CLEAR_RING):
+ bcopy(&int_val, arg, val_size);
+ dhd_flow_rings_flush(bus->dhd, 0);
+ break;
+ case IOV_GVAL(IOV_DAR_ENABLE):
+ int_val = bus->dar_enabled;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_DAR_ENABLE):
+ bus->dar_enabled = (bool)int_val;
+ break;
+ case IOV_GVAL(IOV_HSCBSIZE):
+ bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
+ break;
+#ifdef DHD_BUS_MEM_ACCESS
+ case IOV_GVAL(IOV_HSCBBYTES):
+ bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
+ break;
+#endif
+#ifdef D2H_MINIDUMP
+ case IOV_GVAL(IOV_MINIDUMP_OVERRIDE):
+ int_val = bus->d2h_minidump_override;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_MINIDUMP_OVERRIDE):
+ /* Can change it only before FW download */
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ break;
+ }
+ bus->d2h_minidump_override = (bool)int_val;
+ break;
+#endif /* D2H_MINIDUMP */
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ case IOV_SVAL(IOV_FWTRACE):
+ {
+ DHD_INFO(("%s: set firware tracing enable/disable %d\n",
+ __FUNCTION__, int_val));
+
+ bcmerror = handle_set_fwtrace(bus->dhd, (uint32) int_val);
+ break;
+ }
+ break;
+
+ case IOV_GVAL(IOV_FWTRACE):
+ {
+ uint32 val = 0, temp_val = 0;
+ uint16 of_counter, trace_val = 0;
+ int ret;
+
+ ret = dhd_iovar(bus->dhd, 0, "dngl:fwtrace",
+ NULL, 0, (char *) &val, sizeof(val), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: get dhd_iovar has failed fwtrace, "
+ "ret=%d\n", __FUNCTION__, ret));
+ bcmerror = BCME_ERROR;
+ } else {
+ of_counter = get_fw_trace_overflow_counter(bus->dhd);
+ DHD_INFO(("overflow counter = %d \n", of_counter));
+ trace_val = val & 0xFFFF;
+ temp_val = (((uint32) temp_val | (uint32) of_counter) << 16u) | trace_val;
+ bcopy(&temp_val, arg, sizeof(temp_val));
+ }
+ break;
+ }
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef DHD_HP2P
+ case IOV_SVAL(IOV_HP2P_ENABLE):
+ dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_ENABLE):
+ int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
+ dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
+ int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
+ dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
+ int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
+ dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
+ int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_MF_ENABLE):
+ bus->dhd->hp2p_mf_enable = int_val ? TRUE : FALSE;
+ break;
+
+ case IOV_GVAL(IOV_HP2P_MF_ENABLE):
+ int_val = bus->dhd->hp2p_mf_enable ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* DHD_HP2P */
+ case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ if (int_val)
+ bus->dhd->extdtxs_in_txcpl = TRUE;
+ else
+ bus->dhd->extdtxs_in_txcpl = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
+ int_val = bus->dhd->extdtxs_in_txcpl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ if (int_val)
+ bus->dhd->hostrdy_after_init = TRUE;
+ else
+ bus->dhd->hostrdy_after_init = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
+ int_val = bus->dhd->hostrdy_after_init;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef BCMINTERNAL
+ case IOV_GVAL(IOV_SBREG_64):
+ {
+ sdreg_64_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
+ size = sdreg.func;
+
+ if (serialized_backplane_access_64(bus, addr, size,
+ &uint64_val, TRUE) != BCME_OK)
+ {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&uint64_val, arg, size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG_64):
+ {
+ sdreg_64_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
+ size = sdreg.func;
+
+ if (serialized_backplane_access_64(bus, addr, size,
+ (uint64 *)(&sdreg.value), FALSE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ }
+#endif /* BCMINTERNAL */
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ return bcmerror;
+} /* dhdpcie_bus_doiovar */
+
+/** Transfers bytes from host to dongle using pio mode */
+static int
+dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
+{
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
+ return 0;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+ return 0;
+ }
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
+ return 0;
+ }
+ dhdmsgbuf_lpbk_req(bus->dhd, len);
+ return 0;
+}
+
+void
+dhd_bus_dump_dar_registers(struct dhd_bus *bus)
+{
+ uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
+ dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
+ uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
+ dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: link is down\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->sih == NULL) {
+ DHD_ERROR(("%s: si_attach has not happened, cannot dump DAR registers\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
+ dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
+ dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
+ dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
+ dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
+ dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
+
+ if (bus->sih->buscorerev < 24) {
+ DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
+ __FUNCTION__, bus->sih->buscorerev));
+ return;
+ }
+
+ dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
+ dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
+ dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
+ dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
+ dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
+ dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
+
+ DHD_RPM(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) "
+ "dar_intstat(0x%x:0x%x)\n",
+ __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
+ dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
+
+ DHD_RPM(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) "
+ "dar_pcie_mbint(0x%x:0x%x)\n",
+ __FUNCTION__, dar_errlog_reg, dar_errlog_val,
+ dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
+}
+
+/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
+void
+dhd_bus_hostready(struct dhd_bus *bus)
+{
+ if (!bus->dhd->d2h_hostrdy_supported) {
+ return;
+ }
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR_MEM(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
+ dhd_pcie_config_read(bus, PCI_CFG_CMD, sizeof(uint32))));
+
+ dhd_bus_dump_dar_registers(bus);
+
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, dhd_bus_db1_addr_get(bus), 0x1, TRUE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
+ bus->hostready_count ++;
+ DHD_ERROR_MEM(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
+}
+
+/* Clear INTSTATUS */
+void
+dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
+{
+ uint32 intstatus = 0;
+ /* Skip after recieving D3 ACK */
+ if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
+ return;
+ }
+ /* XXX: check for PCIE Gen2 also */
+ if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+ (bus->sih->buscorerev == 2)) {
+ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
+ } else {
+ /* this is a PCIE core register..not a config register... */
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
+ intstatus);
+ }
+}
+
+int
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
+#else
+dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+{
+ int timeleft;
+ int rc = 0;
+ unsigned long flags;
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ int d3_read_retry = 0;
+ uint32 d2h_mb_data = 0;
+ uint32 zero = 0;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd_query_bus_erros(bus->dhd)) {
+ return BCME_ERROR;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
+ DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ return BCME_ERROR;
+ }
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ if (bus->dhd->dongle_reset) {
+ DHD_ERROR(("Dongle is in reset state.\n"));
+ return -EIO;
+ }
+
+ /* Check whether we are already in the requested state.
+ * state=TRUE means Suspend
+ * state=FALSE meanse Resume
+ */
+ if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("Bus is already in SUSPEND state.\n"));
+ return BCME_OK;
+ } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
+ DHD_ERROR(("Bus is already in RESUME state.\n"));
+ return BCME_OK;
+ }
+
+ if (state) {
+#ifdef OEM_ANDROID
+ int idle_retry = 0;
+ int active;
+#endif /* OEM_ANDROID */
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down, state=%d\n",
+ __FUNCTION__, state));
+ return BCME_ERROR;
+ }
+
+ /* Suspend */
+ DHD_RPM(("%s: Entering suspend state\n", __FUNCTION__));
+
+ bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
+ if (bus->dhd->dhd_watchdog_ms_backup) {
+ DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
+ __FUNCTION__));
+ dhd_os_wd_timer(bus->dhd, 0);
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+#if defined(LINUX) || defined(linux)
+ if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
+ DHD_ERROR(("Tx Request is not ended\n"));
+ bus->dhd->busstate = DHD_BUS_DATA;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifndef DHD_EFI
+ return -EBUSY;
+#else
+ return BCME_ERROR;
+#endif
+ }
+#endif /* LINUX || linux */
+
+ bus->last_suspend_start_time = OSL_LOCALTIME_NS();
+
+ /* stop all interface network queue. */
+ dhd_bus_stop_queue(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (byint) {
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ /* Clear wait_for_d3_ack before sending D3_INFORM */
+ bus->wait_for_d3_ack = 0;
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+ } else {
+ /* Clear wait_for_d3_ack before sending D3_INFORM */
+ bus->wait_for_d3_ack = 0;
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
+ while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
+ dhdpcie_handle_mb_data(bus);
+ usleep_range(1000, 1500);
+ d3_read_retry++;
+ }
+ }
+#else
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+#ifdef DHD_TIMESYNC
+ /* disable time sync mechanism, if configed */
+ dhd_timesync_control(bus->dhd, TRUE);
+#endif /* DHD_TIMESYNC */
+
+#ifdef PCIE_INB_DW
+ /* As D3_INFORM will be sent after De-assert,
+ * skip sending DS-ACK for DS-REQ.
+ */
+ bus->skip_ds_ack = TRUE;
+#endif /* PCIE_INB_DW */
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ dhd_bus_set_device_wake(bus, TRUE);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+#ifdef PCIE_OOB
+ bus->oob_presuspend = TRUE;
+#endif
+#ifdef PCIE_INB_DW
+ /* De-assert at this point for In-band device_wake */
+ if (INBAND_DW_ENAB(bus)) {
+#ifdef DHD_EFI
+ /* during pcie suspend, irrespective of 'deep_sleep' enabled
+ * or disabled, always de-assert DW. If 'deep_sleep' was disabled
+ * by user iovar, then upon resuming, DW is again asserted in the
+ * 'dhd_bus_handle_mb_data' path.
+ */
+ dhd_bus_inb_set_device_wake(bus, FALSE);
+#else
+ dhd_bus_set_device_wake(bus, FALSE);
+#endif /* DHD_EFI */
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+#endif /* PCIE_INB_DW */
+ /* Clear wait_for_d3_ack before sending D3_INFORM */
+ bus->wait_for_d3_ack = 0;
+ /*
+ * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
+ * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
+ * inside atomic context, so that no more DBs will be
+ * rung after sending D3_INFORM
+ */
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ } else
+#endif /* PCIE_INB_DW */
+ {
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+ }
+
+ /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
+
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
+
+#ifdef DHD_RECOVER_TIMEOUT
+ /* XXX: WAR for missing D3 ACK MB interrupt */
+ if (bus->wait_for_d3_ack == 0) {
+ /* If wait_for_d3_ack was not updated because D2H MB was not received */
+ uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
+ bus->pcie_mailbox_int, 0, 0);
+ int host_irq_disabled = dhdpcie_irq_disabled(bus);
+ if ((intstatus) && (intstatus != (uint32)-1) &&
+ (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
+ DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
+ " host_irq_disabled=%d\n",
+ __FUNCTION__, intstatus, host_irq_disabled));
+ dhd_pcie_intr_count_dump(bus->dhd);
+ dhd_print_tasklet_status(bus->dhd);
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
+ !bus->use_mailbox) {
+ dhd_prot_process_ctrlbuf(bus->dhd);
+ } else {
+ dhdpcie_handle_mb_data(bus);
+ }
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
+ /* Clear Interrupts */
+ dhdpcie_bus_clear_intstatus(bus);
+ }
+ } /* bus->wait_for_d3_ack was 0 */
+#endif /* DHD_RECOVER_TIMEOUT */
+
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef OEM_ANDROID
+ /* To allow threads that got pre-empted to complete.
+ */
+ while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
+ (idle_retry < MAX_WKLK_IDLE_CHECK)) {
+ OSL_SLEEP(1);
+ idle_retry++;
+ }
+#endif /* OEM_ANDROID */
+
+ if (bus->wait_for_d3_ack) {
+ DHD_RPM(("%s: Got D3 Ack \n", __FUNCTION__));
+ /* Got D3 Ack. Suspend the bus */
+#ifdef OEM_ANDROID
+ if (active) {
+ DHD_ERROR(("%s():Suspend failed because of wakelock"
+ "restoring Dongle to D0\n", __FUNCTION__));
+
+ if (bus->dhd->dhd_watchdog_ms_backup) {
+ DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
+ __FUNCTION__));
+ dhd_os_wd_timer(bus->dhd,
+ bus->dhd->dhd_watchdog_ms_backup);
+ }
+
+ /*
+ * Dongle still thinks that it has to be in D3 state until
+ * it gets a D0 Inform, but we are backing off from suspend.
+ * Ensure that Dongle is brought back to D0.
+ *
+ * Bringing back Dongle from D3 Ack state to D0 state is a
+ * 2 step process. Dongle would want to know that D0 Inform
+ * would be sent as a MB interrupt to bring it out of D3 Ack
+ * state to D0 state. So we have to send both this message.
+ */
+
+ /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
+ bus->wait_for_d3_ack = 0;
+
+ DHD_SET_BUS_NOT_IN_LPS(bus);
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ /* Since suspend has failed due to wakelock is held,
+ * update the DS state to DW_DEVICE_HOST_WAKE_WAIT.
+ * So that host sends the DS-ACK for DS-REQ.
+ */
+ DHD_ERROR(("Suspend failed due to wakelock is held, "
+ "set inband dw state to DW_DEVICE_HOST_WAKE_WAIT\n"));
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_HOST_WAKE_WAIT);
+ dhd_bus_ds_trace(bus, 0, TRUE,
+ dhdpcie_bus_get_pcie_inband_dw_state(bus));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+ bus->skip_ds_ack = FALSE;
+#endif /* PCIE_INB_DW */
+ /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
+ * interrupts using intmask and host interrupts
+ * which were disabled in the dhdpcie_bus_isr()->
+ * dhd_bus_handle_d3_ack().
+ */
+ /* Enable back interrupt using Intmask!! */
+ dhdpcie_bus_intr_enable(bus);
+#ifndef NDIS /* !NDIS */
+ /* Defer enabling host irq after RPM suspend failure */
+ if (!DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd)) {
+ /* Enable back interrupt from Host side!! */
+ if (dhdpcie_irq_disabled(bus)) {
+ dhdpcie_enable_irq(bus);
+ bus->resume_intr_enable_count++;
+ }
+ }
+#else
+ /* Enable back the intmask which was cleared in DPC
+ * after getting D3_ACK.
+ */
+ bus->resume_intr_enable_count++;
+
+#endif /* !NDIS */
+ if (bus->use_d0_inform) {
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus,
+ (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+ }
+ /* ring doorbell 1 (hostready) */
+ dhd_bus_hostready(bus);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ rc = BCME_ERROR;
+ } else {
+ /* Actual Suspend after no wakelock */
+#endif /* OEM_ANDROID */
+ /* At this time bus->bus_low_power_state will be
+ * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
+ * in dhd_bus_handle_d3_ack()
+ */
+#ifdef PCIE_OOB
+ bus->oob_presuspend = FALSE;
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+#endif
+#ifdef PCIE_OOB
+ bus->oob_presuspend = TRUE;
+#endif
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_HOST_SLEEP_WAIT) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_HOST_SLEEP);
+#ifdef PCIE_INB_DW
+ dhd_bus_ds_trace(bus, 0, TRUE,
+ dhdpcie_bus_get_pcie_inband_dw_state(bus));
+#else
+ dhd_bus_ds_trace(bus, 0, TRUE);
+#endif /* PCIE_INB_DW */
+ }
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+#endif /* PCIE_INB_DW */
+ if (bus->use_d0_inform &&
+ (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+ }
+#ifdef PCIE_OOB
+ dhd_bus_set_device_wake(bus, FALSE);
+#endif /* PCIE_OOB */
+
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
+ DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
+ } else {
+ dhdpcie_oob_intr_set(bus, TRUE);
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ /* The Host cannot process interrupts now so disable the same.
+ * No need to disable the dongle INTR using intmask, as we are
+ * already calling disabling INTRs from DPC context after
+ * getting D3_ACK in dhd_bus_handle_d3_ack.
+ * Code may not look symmetric between Suspend and
+ * Resume paths but this is done to close down the timing window
+ * between DPC and suspend context and bus->bus_low_power_state
+ * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
+ */
+ bus->dhd->d3ackcnt_timeout = 0;
+ bus->dhd->busstate = DHD_BUS_SUSPEND;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#if defined(LINUX) || defined(linux)
+ dhdpcie_dump_resource(bus);
+#endif /* LINUX || linux */
+ rc = dhdpcie_pci_suspend_resume(bus, state);
+ if (!rc) {
+ bus->last_suspend_end_time = OSL_LOCALTIME_NS();
+ }
+#ifdef OEM_ANDROID
+ }
+#endif /* OEM_ANDROID */
+ } else if (timeleft == 0) { /* D3 ACK Timeout */
+#ifdef DHD_FW_COREDUMP
+ uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
+#endif /* DHD_FW_COREDUMP */
+
+ /* check if the D3 ACK timeout due to scheduling issue */
+ bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
+ dhd_bus_query_dpc_sched_errors(bus->dhd);
+ bus->dhd->d3ack_timeout_occured = TRUE;
+ /* If the D3 Ack has timeout */
+ bus->dhd->d3ackcnt_timeout++;
+ DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
+ __FUNCTION__, bus->dhd->is_sched_error ?
+ " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ /* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
+ * due to tasklet or workqueue scheduling problems in the Linux Kernel.
+ * Customer informs that it is hard to find any clue from the
+ * host memory dump since the important tasklet or workqueue information
+ * is already disappered due the latency while printing out the timestamp
+ * logs for debugging scan timeout issue.
+ * For this reason, customer requestes us to trigger Kernel Panic rather
+ * than taking a SOCRAM dump.
+ */
+ if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+ DHD_SET_BUS_NOT_IN_LPS(bus);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ /* XXX : avoid multiple socram dump from dongle trap and
+ * invalid PCIe bus assceess due to PCIe link down
+ */
+ if (bus->dhd->check_trap_rot) {
+ DHD_ERROR(("Check dongle trap in the case of d3 ack timeout\n"));
+ dhdpcie_checkdied(bus, NULL, 0);
+ }
+ if (bus->dhd->dongle_trap_occured) {
+#ifdef OEM_ANDROID
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
+#endif /* OEM_ANDROID */
+ } else if (!bus->is_linkdown &&
+ !bus->cto_triggered) {
+ uint32 intstatus = 0;
+
+ /* Check if PCIe bus status is valid */
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
+ bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ /* Invalidate PCIe bus status */
+ bus->is_linkdown = 1;
+ }
+
+ dhd_bus_dump_console_buffer(bus);
+ dhd_prot_debug_info_print(bus->dhd);
+#ifdef DHD_FW_COREDUMP
+ if (cur_memdump_mode) {
+ /* write core dump to file */
+ bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef NDIS
+ /* ASSERT only if hang detection/recovery is disabled.
+ * If enabled then let
+ * windows HDR mechansim trigger FW download via surprise removal
+ */
+ dhd_bus_check_died(bus);
+#endif
+#ifdef OEM_ANDROID
+ DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
+ __FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
+#endif /* OEM_ANDROID */
+ }
+#if defined(DHD_ERPOM) || (defined(DHD_EFI) && defined(BT_OVER_PCIE))
+ dhd_schedule_reset(bus->dhd);
+#endif /* DHD_ERPOM || DHD_EFI */
+ rc = -ETIMEDOUT;
+ }
+#ifdef PCIE_OOB
+ bus->oob_presuspend = FALSE;
+#endif /* PCIE_OOB */
+ } else {
+ /* Resume */
+ DHD_RPM(("%s: Entering resume state\n", __FUNCTION__));
+ bus->last_resume_start_time = OSL_LOCALTIME_NS();
+
+ /**
+ * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
+ * si_backplane_access(function to read/write backplane)
+ * updates the window(PCIE2_BAR0_CORE2_WIN) only if
+ * window being accessed is different form the window
+ * being pointed by second_bar0win.
+ * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
+ * invalidating second_bar0win after resume updates
+ * PCIE2_BAR0_CORE2_WIN with right window.
+ */
+ si_invalidate_second_bar0win(bus->sih);
+#if defined(linux) && defined(OEM_ANDROID)
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#endif /* linux && OEM_ANDROID */
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
+#ifdef PCIE_INB_DW
+ dhd_bus_ds_trace(bus, 0, TRUE,
+ dhdpcie_bus_get_pcie_inband_dw_state(bus));
+#else
+ dhd_bus_ds_trace(bus, 0, TRUE);
+#endif /* PCIE_INB_DW */
+ }
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+ bus->skip_ds_ack = FALSE;
+#endif /* PCIE_INB_DW */
+ rc = dhdpcie_pci_suspend_resume(bus, state);
+#if defined(LINUX) || defined(linux)
+ dhdpcie_dump_resource(bus);
+#endif /* LINUX || linux */
+
+ /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
+ DHD_SET_BUS_NOT_IN_LPS(bus);
+
+ if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
+ if (bus->use_d0_inform) {
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+ }
+ /* ring doorbell 1 (hostready) */
+ dhd_bus_hostready(bus);
+ }
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->busstate = DHD_BUS_DATA;
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
+ bus->bus_wake = 1;
+ OSL_SMP_WMB();
+ wake_up(&bus->rpm_queue);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+#ifdef PCIE_OOB
+ /*
+ * Assert & Deassert the Device Wake. The following is the explanation for doing so.
+ * 0) At this point,
+ * Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold
+ * Device Wake is enabled.
+ * 1) When the Host comes out of Suspend, it first sends PERST# in the Link.
+ * Looking at this the Dongle moves from D3 Cold to NO DS State
+ * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first
+ * Asserts the Device Wake.
+ * From the defn, when the Device Wake is asserted, The dongle FW will ensure
+ * that the Dongle is out of deep sleep IF the device is already in deep sleep.
+ * But note that now the Dongle is NOT in Deep sleep and is actually in
+ * NO DS state. So just driving the Device Wake high does not trigger any state
+ * transitions. The Host should actually "Toggle" the Device Wake to ensure
+ * that Dongle synchronizes with the Host and starts the State Transition to D0.
+ * 4) Note that the above explanation is applicable Only when the Host comes out of
+ * suspend and the Dongle comes out of D3 Cold
+ */
+ /* This logic is not required when hostready is enabled */
+
+ if (!bus->dhd->d2h_hostrdy_supported) {
+ dhd_bus_set_device_wake(bus, TRUE);
+ OSL_DELAY(1000);
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+
+#endif /* PCIE_OOB */
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+
+ /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
+ * using intmask and host interrupts
+ * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
+ */
+ dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
+#ifndef NDIS /* !NDIS */
+ /* Defer enabling host interrupt until RPM resume done */
+ if (!DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
+ if (dhdpcie_irq_disabled(bus)) {
+ dhdpcie_enable_irq(bus);
+ bus->resume_intr_enable_count++;
+ }
+ }
+#else
+ /* TODO: for NDIS also we need to use enable_irq in future */
+ bus->resume_intr_enable_count++;
+#endif /* !NDIS */
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_TIMESYNC
+ /* enable time sync mechanism, if configed */
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhd_timesync_control(bus->dhd, FALSE);
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+#endif /* DHD_TIMESYNC */
+
+ if (bus->dhd->dhd_watchdog_ms_backup) {
+ DHD_ERROR(("%s: Enabling wdtick after resume\n",
+ __FUNCTION__));
+ dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
+ }
+
+ bus->last_resume_end_time = OSL_LOCALTIME_NS();
+
+ /* Update TCM rd index for EDL ring */
+ DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
+
+ }
+ return rc;
+}
+
+#define BUS_SUSPEND TRUE
+#define BUS_RESUME FALSE
+int dhd_bus_suspend(dhd_pub_t *dhd)
+{
+ int ret;
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ /* TODO: Check whether the arguments are correct */
+ ret = dhdpcie_bus_suspend(dhd->bus, TRUE, BUS_SUSPEND);
+#else
+ ret = dhdpcie_bus_suspend(dhd->bus, BUS_SUSPEND);
+#endif
+ return ret;
+}
+
+int dhd_bus_resume(dhd_pub_t *dhd, int stage)
+{
+ int ret;
+ BCM_REFERENCE(stage);
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ /* TODO: Check whether the arguments are correct */
+ ret = dhdpcie_bus_suspend(dhd->bus, FALSE, BUS_RESUME);
+#else
+ ret = dhdpcie_bus_suspend(dhd->bus, BUS_RESUME);
+#endif
+ return ret;
+}
+
+uint32
+dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
+{
+ ASSERT(bus && bus->sih);
+ if (enable) {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
+ } else {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
+ }
+ return 0;
+}
+
+/* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
+uint32
+dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
+{
+ uint reg_val;
+
+ ASSERT(bus && bus->sih);
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+ 0x1004);
+ reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+ reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
+ reg_val);
+
+ return 0;
+}
+
+static uint32
+dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
+{
+ uint16 chipid = si_chipid(bus->sih);
+ /*
+ * XXX: WAR for CRWLDOT11M-3011
+ * program the DMA descriptor Buffer length as the expected frame length
+ * + 8 bytes extra for corerev 82 when buffer length % 128 is equal to 4
+ */
+ if ((chipid == BCM4375_CHIP_ID ||
+ chipid == BCM4362_CHIP_ID ||
+ chipid == BCM4377_CHIP_ID ||
+ chipid == BCM43751_CHIP_ID ||
+ chipid == BCM43752_CHIP_ID) &&
+ (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
+ len += 8;
+ }
+ DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
+ return len;
+}
+
+/** Transfers bytes from host to dongle and to host again using DMA */
+static int
+dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
+ uint32 len, uint32 srcdelay, uint32 destdelay,
+ uint32 d11_lpbk, uint32 core_num, uint32 wait,
+ uint32 mem_addr)
+{
+ int ret = 0;
+
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (len < 5 || len > 4194296) {
+ DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ bus->dhd->cur_intr_poll_period = dhd_os_get_intr_poll_period();
+ /* before running loopback test, set interrupt poll period to a lesser value */
+ dhd_os_set_intr_poll_period(bus, INTR_POLL_PERIOD_CRITICAL);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+
+ len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
+
+ bus->dmaxfer_complete = FALSE;
+ ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
+ d11_lpbk, core_num, mem_addr);
+ if (ret != BCME_OK || !wait) {
+ DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
+ ret, wait));
+ } else {
+ ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
+ if (ret < 0)
+ ret = BCME_NOTREADY;
+#if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
+ /* restore interrupt poll period to the previous existing value */
+ dhd_os_set_intr_poll_period(bus, bus->dhd->cur_intr_poll_period);
+#endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
+ }
+
+ return ret;
+
+}
+
+#ifdef BCMINTERNAL
+static int
+dhdpcie_bus_set_tx_lpback(struct dhd_bus *bus, bool enable)
+{
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("bus not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ return BCME_ERROR;
+ }
+ bus->dhd->loopback = enable;
+ return BCME_OK;
+}
+
+static int
+dhdpcie_bus_get_tx_lpback(struct dhd_bus *bus)
+{
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("bus not inited\n"));
+ return BCME_ERROR;
+ }
+ return bus->dhd->loopback ? 1 : 0;
+ return BCME_OK;
+}
+#endif /* BCMINTERNAL */
+
+bool
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
+{
+ return MULTIBP_CAP(bus->sih);
+}
+
+#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
+#define PCIE_REV_FOR_4378B0 68
+
+static int
+dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
+{
+ int bcmerror = 0;
+ volatile uint32 *cr4_regs;
+ bool do_flr;
+ bool do_wr_flops = TRUE;
+
+ if (!bus->sih) {
+ DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
+ (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
+
+ /*
+ * Jira SWWLAN-214966: 4378B0 BToverPCIe: fails to download firmware
+ * with "insmod dhd.ko firmware_path=rtecdc.bin nvram_path=nvram.txt" format
+ * CTO is seen during autoload case.
+ * Need to assert PD1 power req during ARM out of reset.
+ * And doing FLR after this would conflict as FLR resets PCIe enum space.
+ */
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+#ifndef BCMQT /* for performance reasons, skip the FLR for QT */
+#ifdef BT_OVER_PCIE
+ if (dhd_bus_is_multibp_capable(bus) && do_flr &&
+ dhd_fw_download_status(bus->dhd) != FW_DOWNLOAD_IN_PROGRESS) {
+ /* for multi-backplane architecture, issue an FLR to reset the WLAN cores */
+ const int pwr_req_ref = bus->pwr_req_ref;
+ if (pwr_req_ref > 0) {
+ (void)dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, FALSE);
+
+ /*
+ * If power has been requested prior to calling FLR, but
+ * the FLR code cleared the power request, we need to
+ * request again to get back to the status of where we were
+ * prior, otherwise there'll be a mismatch in reqs/clears
+ */
+ if (bus->pwr_req_ref < pwr_req_ref) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ } else {
+ (void)dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, FALSE);
+ }
+ }
+#endif /* BT_OVER_PCIE */
+#endif /* !BCMQT */
+
+ /* Make sure BAR1 maps to backplane address 0 */
+ dhdpcie_setbar1win(bus, 0x00000000);
+ bus->alp_only = TRUE;
+#ifdef GDB_PROXY
+ bus->gdb_proxy_access_enabled = TRUE;
+ bus->gdb_proxy_bootloader_mode = FALSE;
+#endif /* GDB_PROXY */
+
+ /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
+ cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+
+ if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+ /* Halt ARM & remove reset */
+ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+ if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ si_core_reset(bus->sih, 0, 0);
+ /* reset last 4 bytes of RAM address. to be used for shared area */
+ dhdpcie_init_shared_addr(bus);
+ } else if (cr4_regs == NULL) { /* no CR4 present on chip */
+ si_core_disable(bus->sih, 0);
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
+ (uint8*)&zeros, 4) < 0) {
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ }
+ } else {
+ /* For CR4,
+ * Halt ARM
+ * Remove ARM reset
+ * Read RAM base address [0x18_0000]
+ * [next] Download firmware
+ * [done at else] Populate the reset vector
+ * [done at else] Remove ARM halt
+ */
+ /* Halt ARM & remove reset */
+ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+ if (BCM43602_CHIP(bus->sih->chip)) {
+ /* XXX CRWLARMCR4-53 43602a0 HW bug when banks are powered down */
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+ }
+ /* reset last 4 bytes of RAM address. to be used for shared area */
+ dhdpcie_init_shared_addr(bus);
+ }
+ } else {
+ if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+ /* write vars */
+ if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+ /* write random numbers to sysmem for the purpose of
+ * randomizing heap address space.
+ */
+ if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+#ifdef BCMINTERNAL
+ if (bus->hostfw_buf.va) {
+ /* Share the location of the host memory
+ * location where pageable FW binary is located.
+ */
+ host_page_location_info_t host_location;
+ host_location.tlv_signature =
+ htol32(BCM_HOST_PAGE_LOCATION_SIGNATURE);
+ host_location.tlv_size = htol32(sizeof(host_location)) -
+ sizeof(host_location.tlv_size) -
+ sizeof(host_location.tlv_signature);
+ host_location.binary_size = htol32(bus->hostfw_buf.len);
+ host_location.addr_hi = PHYSADDRHI(bus->hostfw_buf.pa);
+ host_location.addr_lo = PHYSADDRLO(bus->hostfw_buf.pa);
+ bus->next_tlv -= sizeof(host_location);
+ dhdpcie_bus_membytes(bus, TRUE, bus->next_tlv,
+ (uint8*)&host_location, sizeof(host_location));
+ DHD_INFO(("%s:Host page location info:"
+ " %08x-%08x Len:%x!\n",
+ __FUNCTION__, host_location.addr_hi,
+ host_location.addr_lo, host_location.binary_size));
+ }
+#ifdef DHD_FWTRACE
+ {
+ /*
+ * Send host trace buffer at time of firmware download
+ * to enable collecting full init time firmware trace
+ */
+ host_fwtrace_buf_location_info_t host_info;
+
+ if (fwtrace_init(bus->dhd) == BCME_OK) {
+ fwtrace_get_haddr(bus->dhd, &host_info.host_buf_info);
+
+ host_info.tlv_size = sizeof(host_info.host_buf_info);
+ host_info.tlv_signature =
+ htol32(BCM_HOST_FWTRACE_BUF_LOCATION_SIGNATURE);
+
+ bus->ramtop_addr -= sizeof(host_info);
+
+ dhdpcie_bus_membytes(bus, TRUE, bus->ramtop_addr,
+ (uint8*)&host_info, sizeof(host_info));
+
+ bus->next_tlv = sizeof(host_info);
+ }
+ }
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#if defined(FW_SIGNATURE)
+ if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops))
+ != BCME_OK) {
+ goto fail;
+ }
+#endif /* FW_SIGNATURE */
+
+ if (do_wr_flops) {
+ uint32 resetinstr_data;
+
+ /* switch back to arm core again */
+ if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM CA7 core!\n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ /*
+ * read address 0 with reset instruction,
+ * to validate that is not secured
+ */
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
+ (uint8 *)&resetinstr_data, sizeof(resetinstr_data));
+
+ if (resetinstr_data == 0xFFFFFFFF) {
+ DHD_ERROR(("%s: **** FLOPS Vector is secured, "
+ "Signature file is missing! ***\n", __FUNCTION__));
+ bcmerror = BCME_NO_SIG_FILE;
+ goto fail;
+ }
+
+ /* write address 0 with reset instruction */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+ /* now remove reset and halt and continue to run CA7 */
+ }
+ } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ /* Enable remap before ARM reset but after vars.
+ * No backplane access in remap mode
+ */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ /* XXX Change standby configuration here if necessary */
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ } else {
+ if (BCM43602_CHIP(bus->sih->chip)) {
+ /* Firmware crashes on SOCSRAM access when core is in reset */
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ si_core_reset(bus->sih, 0, 0);
+ si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+ }
+
+ /* write vars */
+ if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* write a random number rTLV to TCM for the purpose of
+ * randomizing heap address space.
+ */
+ if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+#if defined(FW_SIGNATURE)
+ if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops))
+ != BCME_OK) {
+ goto fail;
+ }
+#endif /* FW_SIGNATURE */
+ if (do_wr_flops) {
+ uint32 resetinstr_data;
+
+ /* switch back to arm core again */
+ if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM CR4 core!\n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ /*
+ * read address 0 with reset instruction,
+ * to validate that is not secured
+ */
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
+ (uint8 *)&resetinstr_data, sizeof(resetinstr_data));
+
+ if (resetinstr_data == 0xFFFFFFFF) {
+ DHD_ERROR(("%s: **** FLOPS Vector is secured, "
+ "Signature file is missing! ***\n", __FUNCTION__));
+ bcmerror = BCME_NO_SIG_FILE;
+ goto fail;
+ }
+
+ /* write address 0 with reset instruction */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+ if (bcmerror == BCME_OK) {
+ uint32 tmp;
+
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
+ (uint8 *)&tmp, sizeof(tmp));
+
+ if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+ DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+ __FUNCTION__, bus->resetinstr));
+ DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+ __FUNCTION__, tmp));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ }
+ /* now remove reset and halt and continue to run CR4 */
+ }
+ }
+
+ bus->arm_oor_time = OSL_LOCALTIME_NS();
+ si_core_reset(bus->sih, 0, 0);
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+#ifdef DHD_EFI
+ /*
+ * dhdpcie_init_phase2() sets the fw_download_status as FW_DOWNLOAD_IN_PROGRESS
+ * during the first default attempt to load FW either from OTP or WIRELESS folder.
+ *
+ * After the first successful download of the FW(either from OTP or WIRELESS folder
+ * or by dhd download command) set the fw_download_status as FW_DOWNLOAD_DONE.
+ *
+ * We need to maintain these states to perform FLR in dhdpcie_bus_download_state()
+ * only after first successful download.
+ */
+ bus->dhd->fw_download_status = FW_DOWNLOAD_DONE;
+#endif /* DHD_EFI */
+ }
+
+fail:
+ /* Always return to PCIE core */
+ si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+
+ return bcmerror;
+} /* dhdpcie_bus_download_state */
+
+#if defined(FW_SIGNATURE)
+
+static int
+dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_INFO(("FWSIG: bl=%s,%x fw=%x,%u sig=%s,%x,%u"
+ " stat=%x,%u ram=%x,%x\n",
+ bus->bootloader_filename, bus->bootloader_addr,
+ bus->fw_download_addr, bus->fw_download_len,
+ bus->fwsig_filename, bus->fwsig_download_addr,
+ bus->fwsig_download_len,
+ bus->fwstat_download_addr, bus->fwstat_download_len,
+ bus->dongle_ram_base, bus->ramtop_addr));
+
+ if (bus->fwsig_filename[0] == 0) {
+ DHD_INFO(("%s: missing signature file\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /* Write RAM Bootloader to TCM if requested */
+ if ((bcmerror = dhdpcie_bus_download_ram_bootloader(bus))
+ != BCME_OK) {
+ DHD_ERROR(("%s: could not write RAM BL to TCM, err %d\n",
+ __FUNCTION__, bcmerror));
+ goto exit;
+ }
+
+ /* Write FW signature rTLV to TCM */
+ if ((bcmerror = dhdpcie_bus_write_fwsig(bus, bus->fwsig_filename,
+ NULL))) {
+ DHD_ERROR(("%s: could not write FWsig to TCM, err %d\n",
+ __FUNCTION__, bcmerror));
+ goto exit;
+ }
+
+ /* Write FW signature verification status rTLV to TCM */
+ if ((bcmerror = dhdpcie_bus_write_fws_status(bus)) != BCME_OK) {
+ DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
+ __FUNCTION__, bcmerror));
+ goto exit;
+ }
+
+ /* Write FW memory map rTLV to TCM */
+ if ((bcmerror = dhdpcie_bus_write_fws_mem_info(bus)) != BCME_OK) {
+ DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
+ __FUNCTION__, bcmerror));
+ goto exit;
+ }
+
+ /* Write a end-of-TLVs marker to TCM */
+ if ((bcmerror = dhdpcie_download_rtlv_end(bus)) != BCME_OK) {
+ DHD_ERROR(("%s: could not write rTLV-end marker to TCM, err %d\n",
+ __FUNCTION__, bcmerror));
+ goto exit;
+ }
+
+ /* In case of BL RAM, do write flops */
+ if (bus->bootloader_filename[0] != 0) {
+ *do_write = TRUE;
+ } else {
+ *do_write = FALSE;
+ }
+
+exit:
+ return bcmerror;
+}
+
+/* Download a reversed-TLV to the top of dongle RAM without overlapping any existing rTLVs */
+static int
+dhdpcie_download_rtlv(dhd_bus_t *bus, dngl_rtlv_type_t type, dngl_rtlv_len_t len, uint8 *value)
+{
+ int bcmerror = BCME_OK;
+#ifdef DHD_DEBUG
+ uint8 *readback_buf = NULL;
+ uint32 readback_val = 0;
+#endif /* DHD_DEBUG */
+ uint32 dest_addr = 0; /* dongle RAM dest address */
+ uint32 dest_size = 0; /* dongle RAM dest size */
+ uint32 dest_raw_size = 0; /* dest size with added checksum */
+
+ /* Calculate the destination dongle RAM address and size */
+ dest_size = ROUNDUP(len, 4);
+ dest_addr = bus->ramtop_addr - sizeof(dngl_rtlv_type_t) - sizeof(dngl_rtlv_len_t)
+ - dest_size;
+ bus->ramtop_addr = dest_addr;
+
+ /* Create the rTLV size field. This consists of 2 16-bit fields:
+ * The lower 16 bits is the size. The higher 16 bits is a checksum
+ * consisting of the size with all bits reversed.
+ * +-------------+-------------+
+ * | checksum | size |
+ * +-------------+-------------+
+ * High 16 bits Low 16 bits
+ */
+ dest_raw_size = (~dest_size << 16) | (dest_size & 0x0000FFFF);
+
+ /* Write the value block */
+ if (dest_size > 0) {
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr, value, dest_size);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes to 0x%08x\n",
+ __FUNCTION__, bcmerror, dest_size, dest_addr));
+ goto exit;
+ }
+ }
+
+ /* Write the length word */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr + dest_size,
+ (uint8*)&dest_raw_size, sizeof(dngl_rtlv_len_t));
+
+ /* Write the type word */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE,
+ dest_addr + dest_size + sizeof(dngl_rtlv_len_t),
+ (uint8*)&type, sizeof(dngl_rtlv_type_t));
+
+#ifdef DHD_DEBUG
+ /* Read back and compare the downloaded data */
+ if (dest_size > 0) {
+ readback_buf = (uint8*)MALLOC(bus->dhd->osh, dest_size);
+ if (!readback_buf) {
+ bcmerror = BCME_NOMEM;
+ goto exit;
+ }
+ memset(readback_buf, 0xaa, dest_size);
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr, readback_buf, dest_size);
+ if (bcmerror) {
+ DHD_ERROR(("%s: readback error %d, %d bytes from 0x%08x\n",
+ __FUNCTION__, bcmerror, dest_size, dest_addr));
+ goto exit;
+ }
+ if (memcmp(value, readback_buf, dest_size) != 0) {
+ DHD_ERROR(("%s: Downloaded data mismatch.\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto exit;
+ } else {
+ DHD_ERROR(("Download and compare of TLV 0x%x succeeded"
+ " (size %u, addr %x).\n", type, dest_size, dest_addr));
+ }
+ }
+
+ /* Read back and compare the downloaded len field */
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr + dest_size,
+ (uint8*)&readback_val, sizeof(dngl_rtlv_len_t));
+ if (!bcmerror) {
+ if (readback_val != dest_raw_size) {
+ bcmerror = BCME_BADLEN;
+ }
+ }
+ if (bcmerror) {
+ DHD_ERROR(("%s: Downloaded len error %d\n", __FUNCTION__, bcmerror));
+ goto exit;
+ }
+
+ /* Read back and compare the downloaded type field */
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ dest_addr + dest_size + sizeof(dngl_rtlv_len_t),
+ (uint8*)&readback_val, sizeof(dngl_rtlv_type_t));
+ if (!bcmerror) {
+ if (readback_val != type) {
+ bcmerror = BCME_BADOPTION;
+ }
+ }
+ if (bcmerror) {
+ DHD_ERROR(("%s: Downloaded type error %d\n", __FUNCTION__, bcmerror));
+ goto exit;
+ }
+#endif /* DHD_DEBUG */
+
+ bus->ramtop_addr = dest_addr;
+
+exit:
+#ifdef DHD_DEBUG
+ if (readback_buf) {
+ MFREE(bus->dhd->osh, readback_buf, dest_size);
+ }
+#endif /* DHD_DEBUG */
+
+ return bcmerror;
+} /* dhdpcie_download_rtlv */
+
+/* Download a reversed-TLV END marker to the top of dongle RAM */
+static int
+dhdpcie_download_rtlv_end(dhd_bus_t *bus)
+{
+ return dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_END_MARKER, 0, NULL);
+}
+
+/* Write the FW signature verification status to dongle memory */
+static int
+dhdpcie_bus_write_fws_status(dhd_bus_t *bus)
+{
+ bcm_fwsign_verif_status_t vstatus;
+ int ret;
+
+ bzero(&vstatus, sizeof(vstatus));
+
+ ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_STATUS, sizeof(vstatus),
+ (uint8*)&vstatus);
+ bus->fwstat_download_addr = bus->ramtop_addr;
+ bus->fwstat_download_len = sizeof(vstatus);
+
+ return ret;
+} /* dhdpcie_bus_write_fws_status */
+
+/* Write the FW signature verification memory map to dongle memory */
+static int
+dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus)
+{
+ bcm_fwsign_mem_info_t memmap;
+ int ret;
+
+ bzero(&memmap, sizeof(memmap));
+ memmap.firmware.start = bus->fw_download_addr;
+ memmap.firmware.end = memmap.firmware.start + bus->fw_download_len;
+ memmap.heap.start = ROUNDUP(memmap.firmware.end + BL_HEAP_START_GAP_SIZE, 4);
+ memmap.heap.end = memmap.heap.start + BL_HEAP_SIZE;
+ memmap.signature.start = bus->fwsig_download_addr;
+ memmap.signature.end = memmap.signature.start + bus->fwsig_download_len;
+ memmap.vstatus.start = bus->fwstat_download_addr;
+ memmap.vstatus.end = memmap.vstatus.start + bus->fwstat_download_len;
+ DHD_INFO(("%s: mem_info: fw=%x-%x heap=%x-%x sig=%x-%x vst=%x-%x res=%x\n",
+ __FUNCTION__,
+ memmap.firmware.start, memmap.firmware.end,
+ memmap.heap.start, memmap.heap.end,
+ memmap.signature.start, memmap.signature.end,
+ memmap.vstatus.start, memmap.vstatus.end,
+ memmap.reset_vec.start));
+
+ ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_MEM_MAP, sizeof(memmap),
+ (uint8*)&memmap);
+ bus->fw_memmap_download_addr = bus->ramtop_addr;
+ bus->fw_memmap_download_len = sizeof(memmap);
+
+ return ret;
+} /* dhdpcie_bus_write_fws_mem_info */
+
+/* Download a bootloader image to dongle RAM */
+static int
+dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus)
+{
+ int ret = BCME_OK;
+ uint32 dongle_ram_base_save;
+
+ DHD_INFO(("download_bloader: %s,0x%x. ramtop=0x%x\n",
+ bus->bootloader_filename, bus->bootloader_addr, bus->ramtop_addr));
+ if (bus->bootloader_filename[0] == '\0') {
+ return ret;
+ }
+
+ /* Save ram base */
+ dongle_ram_base_save = bus->dongle_ram_base;
+
+ /* Set ram base to bootloader download start address */
+ bus->dongle_ram_base = bus->bootloader_addr;
+
+ /* Download the bootloader image to TCM */
+ ret = dhdpcie_download_code_file(bus, bus->bootloader_filename);
+
+ /* Restore ram base */
+ bus->dongle_ram_base = dongle_ram_base_save;
+
+ return ret;
+} /* dhdpcie_bus_download_ram_bootloader */
+
+/* Save the FW download address and size */
+static int
+dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr,
+ uint32 download_size, const char *signature_fname,
+ const char *bloader_fname, uint32 bloader_download_addr)
+{
+ bus->fw_download_len = download_size;
+ bus->fw_download_addr = download_addr;
+ strlcpy(bus->fwsig_filename, signature_fname, sizeof(bus->fwsig_filename));
+ strlcpy(bus->bootloader_filename, bloader_fname, sizeof(bus->bootloader_filename));
+ bus->bootloader_addr = bloader_download_addr;
+#ifdef GDB_PROXY
+ /* GDB proxy bootloader mode - if signature file specified (i.e.
+ * bootloader is used), but bootloader is not specified (i.e. ROM
+ * bootloader is uses).
+ * Bootloader mode is significant only for for preattachment debugging
+ * of chips, in which debug cell can't be initialized before ARM CPU
+ * start
+ */
+ bus->gdb_proxy_bootloader_mode =
+ (bus->fwsig_filename[0] != 0) && (bus->bootloader_filename[0] == 0);
+#endif /* GDB_PROXY */
+ return BCME_OK;
+} /* dhdpcie_bus_save_download_info */
+
+/* Read a small binary file and write it to the specified socram dest address */
+static int
+dhdpcie_download_sig_file(dhd_bus_t *bus, char *path, uint32 type)
+{
+ int bcmerror = BCME_OK;
+ void *filep = NULL;
+ uint8 *srcbuf = NULL;
+ int srcsize = 0;
+ int len;
+ uint32 dest_size = 0; /* dongle RAM dest size */
+
+ if (path == NULL || path[0] == '\0') {
+ DHD_ERROR(("%s: no file\n", __FUNCTION__));
+ bcmerror = BCME_NOTFOUND;
+ goto exit;
+ }
+
+ /* Open file, get size */
+ filep = dhd_os_open_image1(bus->dhd, path);
+ if (filep == NULL) {
+ DHD_ERROR(("%s: error opening file %s\n", __FUNCTION__, path));
+ bcmerror = BCME_NOTFOUND;
+ goto exit;
+ }
+ srcsize = dhd_os_get_image_size(filep);
+ if (srcsize <= 0 || srcsize > MEMBLOCK) {
+ DHD_ERROR(("%s: invalid fwsig size %u\n", __FUNCTION__, srcsize));
+ bcmerror = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ dest_size = ROUNDUP(srcsize, 4);
+
+ /* Allocate src buffer, read in the entire file */
+ srcbuf = (uint8 *)MALLOCZ(bus->dhd->osh, dest_size);
+ if (!srcbuf) {
+ bcmerror = BCME_NOMEM;
+ goto exit;
+ }
+ len = dhd_os_get_image_block(srcbuf, srcsize, filep);
+ if (len != srcsize) {
+ DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+ bcmerror = BCME_BADLEN;
+ goto exit;
+ }
+
+ /* Write the src buffer as a rTLV to the dongle */
+ bcmerror = dhdpcie_download_rtlv(bus, type, dest_size, srcbuf);
+
+ bus->fwsig_download_addr = bus->ramtop_addr;
+ bus->fwsig_download_len = dest_size;
+
+exit:
+ if (filep) {
+ dhd_os_close_image1(bus->dhd, filep);
+ }
+ if (srcbuf) {
+ MFREE(bus->dhd->osh, srcbuf, dest_size);
+ }
+
+ return bcmerror;
+} /* dhdpcie_download_sig_file */
+
+static int
+dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path)
+{
+ int bcmerror = BCME_OK;
+
+ /* Download the FW signature file to the chip */
+ bcmerror = dhdpcie_download_sig_file(bus, fwsig_path, DNGL_RTLV_TYPE_FW_SIGNATURE);
+ if (bcmerror) {
+ goto exit;
+ }
+
+exit:
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d\n", __FUNCTION__, bcmerror));
+ }
+ return bcmerror;
+} /* dhdpcie_bus_write_fwsig */
+
+/* Dump secure firmware status. */
+static int
+dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ bcm_fwsign_verif_status_t status;
+ bcm_fwsign_mem_info_t meminfo;
+ int err = BCME_OK;
+
+ bzero(&status, sizeof(status));
+ if (bus->fwstat_download_addr != 0) {
+ err = dhdpcie_bus_membytes(bus, FALSE, bus->fwstat_download_addr,
+ (uint8 *)&status, sizeof(status));
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
+ __FUNCTION__, err, sizeof(status), bus->fwstat_download_addr));
+ return (err);
+ }
+ }
+
+ bzero(&meminfo, sizeof(meminfo));
+ if (bus->fw_memmap_download_addr != 0) {
+ err = dhdpcie_bus_membytes(bus, FALSE, bus->fw_memmap_download_addr,
+ (uint8 *)&meminfo, sizeof(meminfo));
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
+ __FUNCTION__, err, sizeof(meminfo), bus->fw_memmap_download_addr));
+ return (err);
+ }
+ }
+
+ bcm_bprintf(strbuf, "Firmware signing\nSignature: (%08x) len (%d)\n",
+ bus->fwsig_download_addr, bus->fwsig_download_len);
+
+ bcm_bprintf(strbuf,
+ "Verification status: (%08x)\n"
+ "\tstatus: %d\n"
+ "\tstate: %u\n"
+ "\talloc_bytes: %u\n"
+ "\tmax_alloc_bytes: %u\n"
+ "\ttotal_alloc_bytes: %u\n"
+ "\ttotal_freed_bytes: %u\n"
+ "\tnum_allocs: %u\n"
+ "\tmax_allocs: %u\n"
+ "\tmax_alloc_size: %u\n"
+ "\talloc_failures: %u\n",
+ bus->fwstat_download_addr,
+ status.status,
+ status.state,
+ status.alloc_bytes,
+ status.max_alloc_bytes,
+ status.total_alloc_bytes,
+ status.total_freed_bytes,
+ status.num_allocs,
+ status.max_allocs,
+ status.max_alloc_size,
+ status.alloc_failures);
+
+ bcm_bprintf(strbuf,
+ "Memory info: (%08x)\n"
+ "\tfw %08x-%08x\n\theap %08x-%08x\n\tsig %08x-%08x\n\tvst %08x-%08x\n",
+ bus->fw_memmap_download_addr,
+ meminfo.firmware.start, meminfo.firmware.end,
+ meminfo.heap.start, meminfo.heap.end,
+ meminfo.signature.start, meminfo.signature.end,
+ meminfo.vstatus.start, meminfo.vstatus.end);
+
+ return (err);
+}
+#endif /* FW_SIGNATURE */
+
+/* Write nvram data to the top of dongle RAM, ending with a size in # of 32-bit words */
+static int
+dhdpcie_bus_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize, phys_size;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ varaddr += bus->dongle_ram_base;
+ bus->ramtop_addr = varaddr;
+
+ if (bus->vars) {
+
+ /* XXX In case the controller has trouble with odd bytes... */
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+ /* Write the vars list */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+
+ /* Implement read back and verify later */
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
+ nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray) {
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ return BCME_NOMEM;
+ }
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ prhex("nvram file", vbuffer, varsize);
+ prhex("downloaded nvram", nvram_ularray, varsize);
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ return BCME_ERROR;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+ phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+ phys_size += bus->dongle_ram_base;
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
+ phys_size, bus->ramsize));
+ DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
+ varaddr, varsize));
+ varsize = ((phys_size - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ bus->nvram_csm = varsizew;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ bus->nvram_csm = varsizew;
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+} /* dhdpcie_bus_write_vars */
+
+int
+dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+#ifdef KEEP_JP_REGREV
+ /* XXX Needed by customer's request */
+ char *tmpbuf;
+ uint tmpidx;
+#endif /* KEEP_JP_REGREV */
+#ifdef GDB_PROXY
+ const char nodeadman_record[] = "deadman_to=0";
+#endif /* GDB_PROXY */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+#ifdef GDB_PROXY
+ if (bus->dhd->gdb_proxy_nodeadman) {
+ len += sizeof(nodeadman_record);
+ }
+#endif /* GDB_PROXY */
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+#ifdef GDB_PROXY
+ if (bus->dhd->gdb_proxy_nodeadman &&
+ !replace_nvram_variable(bus->vars, bus->varsz, nodeadman_record, NULL))
+ {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+#endif /* GDB_PROXY */
+
+ /* Re-Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
+#ifdef BCMQT_HW
+ dhdpcie_htclkratio_recal(bus, bus->vars, bus->varsz);
+#endif
+
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
+ /* XXX Change the default country code only for MFG firmware */
+ if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
+ char *sp = NULL;
+ char *ep = NULL;
+ int i;
+ char tag[2][8] = {"ccode=", "regrev="};
+
+ /* Find ccode and regrev info */
+ for (i = 0; i < 2; i++) {
+ sp = strnstr(bus->vars, tag[i], bus->varsz);
+ if (!sp) {
+ DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
+ __FUNCTION__, bus->nv_path));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+ sp = strchr(sp, '=');
+ ep = strchr(sp, '\0');
+ /* We assumed that string length of both ccode and
+ * regrev values should not exceed WLC_CNTRY_BUF_SZ
+ */
+ if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
+ sp++;
+ while (*sp != '\0') {
+ DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
+ __FUNCTION__, tag[i], *sp));
+ *sp++ = '0';
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
+ __FUNCTION__, tag[i]));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+ }
+ }
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+
+#ifdef KEEP_JP_REGREV
+ /* XXX Needed by customer's request */
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
+ if (dhd_bus_get_fw_mode(bus->dhd) != DHD_FLAG_MFG_MODE)
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+ {
+ char *pos = NULL;
+ tmpbuf = MALLOCZ(bus->dhd->osh, bus->varsz + 1);
+ if (tmpbuf == NULL) {
+ goto err;
+ }
+ memcpy(tmpbuf, bus->vars, bus->varsz);
+ for (tmpidx = 0; tmpidx < bus->varsz; tmpidx++) {
+ if (tmpbuf[tmpidx] == 0) {
+ tmpbuf[tmpidx] = '\n';
+ }
+ }
+ bus->dhd->vars_ccode[0] = 0;
+ bus->dhd->vars_regrev = 0;
+ if ((pos = strstr(tmpbuf, "ccode"))) {
+ sscanf(pos, "ccode=%3s\n", bus->dhd->vars_ccode);
+ }
+ if ((pos = strstr(tmpbuf, "regrev"))) {
+ sscanf(pos, "regrev=%u\n", &(bus->dhd->vars_regrev));
+ }
+ MFREE(bus->dhd->osh, tmpbuf, bus->varsz + 1);
+ }
+#endif /* KEEP_JP_REGREV */
+
+err:
+ return bcmerror;
+}
+
+/* loop through the capability list and see if the pcie capabilty exists */
+uint8
+dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
+{
+ uint8 cap_id;
+ uint8 cap_ptr = 0;
+ uint8 byte_val;
+
+ /* check for Header type 0 */
+ byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
+ if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
+ DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
+ goto end;
+ }
+
+ /* check if the capability pointer field exists */
+ byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
+ if (!(byte_val & PCI_CAPPTR_PRESENT)) {
+ DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
+ goto end;
+ }
+
+ cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
+ /* check if the capability pointer is 0x00 */
+ if (cap_ptr == 0x00) {
+ DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
+ goto end;
+ }
+
+ /* loop thr'u the capability list and see if the pcie capabilty exists */
+
+ cap_id = read_pci_cfg_byte(cap_ptr);
+
+ while (cap_id != req_cap_id) {
+ cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
+ if (cap_ptr == 0x00) break;
+ cap_id = read_pci_cfg_byte(cap_ptr);
+ }
+
+end:
+ return cap_ptr;
+}
+
+void
+dhdpcie_pme_active(osl_t *osh, bool enable)
+{
+ uint8 cap_ptr;
+ uint32 pme_csr;
+
+ cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
+
+ if (!cap_ptr) {
+ DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
+ return;
+ }
+
+ pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
+ DHD_RPM(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
+
+ pme_csr |= PME_CSR_PME_STAT;
+ if (enable) {
+ pme_csr |= PME_CSR_PME_EN;
+ } else {
+ pme_csr &= ~PME_CSR_PME_EN;
+ }
+
+ OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
+}
+
+bool
+dhdpcie_pme_cap(osl_t *osh)
+{
+ uint8 cap_ptr;
+ uint32 pme_cap;
+
+ cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
+
+ if (!cap_ptr) {
+ DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
+
+ DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
+
+ return ((pme_cap & PME_CAP_PM_STATES) != 0);
+}
+
+static void
+dhdpcie_pme_stat_clear(dhd_bus_t *bus)
+{
+ uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32));
+
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(uint32), pmcsr | PCIE_PMCSR_PMESTAT);
+}
+
+uint32
+dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
+{
+
+ uint8 pcie_cap;
+ uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
+ uint32 reg_val;
+
+ pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
+
+ if (!pcie_cap) {
+ DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
+ return 0;
+ }
+
+ lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
+
+ /* set operation */
+ if (mask) {
+ /* read */
+ reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+
+ /* modify */
+ reg_val &= ~mask;
+ reg_val |= (mask & val);
+
+ /* write */
+ OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
+ }
+ return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+}
+
+#if defined(NDIS)
+/* set min res mask to highest value, preventing sleep */
+void
+dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask)
+{
+ si_pmu_set_min_res_mask(bus->sih, bus->osh, min_res_mask);
+}
+#endif /* defined(NDIS) */
+
+uint8
+dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
+{
+ uint8 pcie_cap;
+ uint32 reg_val;
+ uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
+
+ pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
+
+ if (!pcie_cap) {
+ DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
+ return 0;
+ }
+
+ lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
+
+ reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+ /* set operation */
+ if (mask) {
+ if (val)
+ reg_val |= PCIE_CLKREQ_ENAB;
+ else
+ reg_val &= ~PCIE_CLKREQ_ENAB;
+ OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
+ reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+ }
+ if (reg_val & PCIE_CLKREQ_ENAB)
+ return 1;
+ else
+ return 0;
+}
+
+void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+ dhd_bus_t *bus;
+ uint64 current_time = OSL_LOCALTIME_NS();
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bus = dhd->bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
+ bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+ "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+ "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
+ bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
+ " oob_intr_disable_count=%lu\noob_irq_num=%d"
+ " last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT
+ " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
+ " oob_irq_enabled=%d oob_gpio_level=%d\n",
+ bus->oob_intr_count, bus->oob_intr_enable_count,
+ bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
+ GET_SEC_USEC(bus->last_oob_irq_isr_time),
+ GET_SEC_USEC(bus->last_oob_irq_thr_time),
+ GET_SEC_USEC(bus->last_oob_irq_enable_time),
+ GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
+ dhdpcie_get_oob_irq_level());
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT"\n"
+ "isr_sched_dpc_time="SEC_USEC_FMT" rpm_sched_dpc_time="SEC_USEC_FMT"\n"
+ " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
+ "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
+ " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
+ " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
+ "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
+ "last_d3_inform_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->isr_sched_dpc_time),
+ GET_SEC_USEC(bus->rpm_sched_dpc_time),
+ GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->last_process_ctrlbuf_time),
+ GET_SEC_USEC(bus->last_process_flowring_time),
+ GET_SEC_USEC(bus->last_process_txcpl_time),
+ GET_SEC_USEC(bus->last_process_rxcpl_time),
+ GET_SEC_USEC(bus->last_process_infocpl_time),
+ GET_SEC_USEC(bus->last_process_edl_time),
+ GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
+ GET_SEC_USEC(bus->last_d3_inform_time));
+
+ bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
+ SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
+ SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
+ GET_SEC_USEC(bus->last_suspend_end_time),
+ GET_SEC_USEC(bus->last_resume_start_time),
+ GET_SEC_USEC(bus->last_resume_end_time));
+
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+ bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
+}
+
+void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ uint32 d2h_db0 = 0;
+ uint32 d2h_mb_data = 0;
+
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_int, 0, 0);
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(dhd->bus, dhd->bus->pcie_mailbox_int, intstatus, FALSE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_mask, 0, 0);
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(dhd->bus, dhd->bus->pcie_mailbox_mask, intmask, FALSE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+ bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0);
+ bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
+ d2h_mb_data, dhd->bus->def_intmask);
+}
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+void
+dhd_bus_dump_awdl_stats(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ int i = 0;
+ dhd_awdl_stats_t *awdl_stats;
+
+ bcm_bprintf(strbuf, "---------- AWDL STATISTICS ---------\n");
+ bcm_bprintf(strbuf, "%s %10s %12s %16s %12s %16s %8s %8s %8s\n",
+ "Slot", "AvgSlotTUs", "AvgSlotTUsFW", "NumSlots",
+ "AvgTxCmpL_Us", "NumTxStatus", "Acked", "tossed", "noack");
+ for (i = 0; i < AWDL_NUM_SLOTS; i++) {
+ awdl_stats = &dhdp->awdl_stats[i];
+ bcm_bprintf(strbuf, "%4d %10llu %12llu %16llu %12llu %16llu ",
+ i,
+ awdl_stats->num_slots ?
+ DIV_U64_BY_U64(awdl_stats->cum_slot_time,
+ awdl_stats->num_slots) : 0,
+ awdl_stats->num_slots ?
+ DIV_U64_BY_U64(awdl_stats->fw_cum_slot_time,
+ awdl_stats->num_slots) : 0,
+ awdl_stats->num_slots,
+ awdl_stats->num_tx_status ?
+ DIV_U64_BY_U64(awdl_stats->cum_tx_status_latency,
+ awdl_stats->num_tx_status) : 0,
+ awdl_stats->num_tx_status);
+#ifdef BCMDBG
+ if (!dhdp->d2h_sync_mode) {
+ bcm_bprintf(strbuf, "%8d %8d %8d\n",
+ awdl_stats->tx_status[WLFC_CTL_PKTFLAG_DISCARD],
+ awdl_stats->tx_status[WLFC_CTL_PKTFLAG_TOSSED_BYWLC],
+ awdl_stats->tx_status[WLFC_CTL_PKTFLAG_DISCARD_NOACK]);
+ } else {
+ bcm_bprintf(strbuf,
+ "%8s %8s %8s\n", "NA", "NA", "NA");
+ }
+#else
+ bcm_bprintf(strbuf,
+ "%8s %8s %8s\n", "NA", "NA", "NA");
+#endif
+ }
+}
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+/** Add bus dump output to a buffer */
+void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ uint16 flowid;
+ int ix = 0;
+ flow_ring_node_t *flow_ring_node;
+ flow_info_t *flow_info;
+#ifdef BCMDBG
+ flow_info_t *local_flow_info;
+#endif /* BCMDBG */
+#ifdef TX_STATUS_LATENCY_STATS
+ uint8 ifindex;
+ if_flow_lkup_t *if_flow_lkup;
+ dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
+#endif /* TX_STATUS_LATENCY_STATS */
+
+#if defined(FW_SIGNATURE)
+ /* Dump secure firmware status. */
+ if (dhdp->busstate <= DHD_BUS_LOAD) {
+ dhd_bus_dump_fws(dhdp->bus, strbuf);
+ }
+#endif
+
+ if (dhdp->busstate != DHD_BUS_DATA)
+ return;
+
+#ifdef TX_STATUS_LATENCY_STATS
+ memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
+#endif /* TX_STATUS_LATENCY_STATS */
+#ifdef DHD_WAKE_STATUS
+ bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
+ bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
+ dhdp->bus->wake_counts.rcwake);
+#ifdef DHD_WAKE_RX_STATUS
+ bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
+ dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
+ dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
+ bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
+ dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
+ dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
+ bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
+ dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
+ dhdp->bus->wake_counts.rx_icmpv6_ns);
+#endif /* DHD_WAKE_RX_STATUS */
+#ifdef DHD_WAKE_EVENT_STATUS
+ for (flowid = 0; flowid < WLC_E_LAST; flowid++)
+ if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
+ bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
+ dhdp->bus->wake_counts.rc_event[flowid]);
+ bcm_bprintf(strbuf, "\n");
+#endif /* DHD_WAKE_EVENT_STATUS */
+#endif /* DHD_WAKE_STATUS */
+
+ dhd_prot_print_info(dhdp, strbuf);
+ dhd_dump_intr_registers(dhdp, strbuf);
+ dhd_dump_intr_counters(dhdp, strbuf);
+ bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
+ dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
+ bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+#if defined(DHD_HTPUT_TUNABLES)
+ bcm_bprintf(strbuf, "htput_flow_ring_start:%d total_htput:%d client_htput=%d\n",
+ dhdp->htput_flow_ring_start, HTPUT_TOTAL_FLOW_RINGS, dhdp->htput_client_flow_rings);
+#endif /* DHD_HTPUT_TUNABLES */
+ bcm_bprintf(strbuf,
+ "%4s %4s %2s %4s %17s %4s %4s %6s %10s %17s %17s %17s %17s %14s %14s %10s ",
+ "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
+ " Overflows", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
+ "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
+
+#ifdef TX_STATUS_LATENCY_STATS
+ /* Average Tx status/Completion Latency in micro secs */
+ bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
+#endif /* TX_STATUS_LATENCY_STATS */
+
+ bcm_bprintf(strbuf, "\n");
+
+ for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
+ unsigned long flags;
+ flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ continue;
+ }
+
+ flow_info = &flow_ring_node->flow_info;
+ bcm_bprintf(strbuf,
+ "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
+ flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
+ MAC2STRDBG(flow_info->da),
+ DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
+ DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
+ DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
+ DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
+ dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, TRUE, strbuf,
+ "%5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d");
+
+#ifdef TX_STATUS_LATENCY_STATS
+ bcm_bprintf(strbuf, "%16llu %16llu ",
+ flow_info->num_tx_pkts,
+ flow_info->num_tx_status ?
+ DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
+ flow_info->num_tx_status) : 0);
+ ifindex = flow_info->ifindex;
+ ASSERT(ifindex < DHD_MAX_IFS);
+ if (ifindex < DHD_MAX_IFS) {
+ if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
+ if_tx_status_latency[ifindex].cum_tx_status_latency +=
+ flow_info->cum_tx_status_latency;
+ } else {
+ DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
+ __FUNCTION__, ifindex, flowid));
+ }
+#endif /* TX_STATUS_LATENCY_STATS */
+ bcm_bprintf(strbuf, "\n");
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ }
+
+#ifdef BCMDBG
+ if (!dhdp->d2h_sync_mode) {
+ ix = 0;
+ bcm_bprintf(strbuf, "\n%4s %4s %2s %10s %7s %6s %5s %5s %10s %7s %7s %7s \n",
+ "Num:", "Flow", "If", " ACKED", "D11SPRS", "WLSPRS", "TSDWL",
+ "NOACK", "SPRS_ACKED", "EXPIRED", "DROPPED", "FWFREED");
+ for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
+ flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+ if (!flow_ring_node->active)
+ continue;
+
+ flow_info = &flow_ring_node->flow_info;
+ bcm_bprintf(strbuf, "%4d %4d %2d ",
+ ix++, flow_ring_node->flowid, flow_info->ifindex);
+ local_flow_info = &flow_ring_node->flow_info;
+ bcm_bprintf(strbuf, "%10d %7d %6d %5d %5d %10d %7d %7d %7d\n",
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DISCARD],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_D11SUPPRESS],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_WLSUPPRESS],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_TOSSED_BYWLC],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DISCARD_NOACK],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_SUPPRESS_ACKED],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_EXPIRED],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DROPPED],
+ local_flow_info->tx_status[WLFC_CTL_PKTFLAG_MKTFREE]);
+ }
+ }
+#endif /* BCMDBG */
+
+#ifdef TX_STATUS_LATENCY_STATS
+ bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ for (ix = 0; ix < DHD_MAX_IFS; ix++) {
+ if (!if_flow_lkup[ix].status) {
+ continue;
+ }
+ bcm_bprintf(strbuf, "%2d %16llu %16llu\n",
+ ix,
+ if_tx_status_latency[ix].num_tx_status ?
+ DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
+ if_tx_status_latency[ix].num_tx_status): 0,
+ if_tx_status_latency[ix].num_tx_status);
+ }
+#endif /* TX_STATUS_LATENCY_STATS */
+
+#ifdef DHD_HP2P
+ if (dhdp->hp2p_capable) {
+ bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1");
+
+ for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
+ hp2p_info_t *hp2p_info;
+ int bin;
+
+ hp2p_info = &dhdp->hp2p_info[flowid];
+ if (hp2p_info->num_timer_start == 0)
+ continue;
+
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
+ bcm_bprintf(strbuf, "\n%s", "Bin");
+
+ for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
+ bcm_bprintf(strbuf, "\n%2d %20llu %16llu", bin,
+ hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
+ }
+
+ bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0");
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
+ bcm_bprintf(strbuf, "\n%s", "Bin");
+
+ for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
+ bcm_bprintf(strbuf, "\n%d %20llu", bin,
+ hp2p_info->rx_t0[bin]);
+ }
+
+ bcm_bprintf(strbuf, "\n%s %16s %16s",
+ "Packet limit", "Timer limit", "Timer start");
+ bcm_bprintf(strbuf, "\n%llu %24llu %16llu", hp2p_info->num_pkt_limit,
+ hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ }
+#endif /* DHD_HP2P */
+
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ dhd_bus_dump_awdl_stats(dhdp, strbuf);
+ dhd_clear_awdl_stats(dhdp);
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+ bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
+ bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
+ bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
+ if (dhdp->d2h_hostrdy_supported) {
+ bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
+ }
+#ifdef PCIE_INB_DW
+ /* Inband device wake counters */
+ if (INBAND_DW_ENAB(dhdp->bus)) {
+ bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
+ dhdp->bus->inband_dw_assert_cnt);
+ bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
+ dhdp->bus->inband_dw_deassert_cnt);
+ bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
+ dhdp->bus->inband_ds_exit_host_cnt);
+ bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
+ dhdp->bus->inband_ds_exit_device_cnt);
+ bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
+ dhdp->bus->inband_ds_exit_to_cnt);
+ bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
+ dhdp->bus->inband_host_sleep_exit_to_cnt);
+ }
+#endif /* PCIE_INB_DW */
+ bcm_bprintf(strbuf, "d2h_intr_method -> %s d2h_intr_control -> %s\n",
+ dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX",
+ dhdp->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK");
+
+ bcm_bprintf(strbuf, "\n\nDB7 stats - db7_send_cnt: %d, db7_trap_cnt: %d, "
+ "max duration: %lld (%lld - %lld), db7_timing_error_cnt: %d\n",
+ dhdp->db7_trap.debug_db7_send_cnt,
+ dhdp->db7_trap.debug_db7_trap_cnt,
+ dhdp->db7_trap.debug_max_db7_dur,
+ dhdp->db7_trap.debug_max_db7_trap_time,
+ dhdp->db7_trap.debug_max_db7_send_time,
+ dhdp->db7_trap.debug_db7_timing_error_cnt);
+}
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+bool
+dhd_axi_sig_match(dhd_pub_t *dhdp)
+{
+ uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
+
+ if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
+ DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
+ __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
+ dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
+ if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
+ axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
+ uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
+ OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
+ if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
+ return TRUE;
+ } else {
+ DHD_ERROR(("%s: No AXI signature: 0x%x\n",
+ __FUNCTION__, axi_signature));
+ return FALSE;
+ }
+ } else {
+ DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
+ return FALSE;
+ }
+}
+
+void
+dhd_axi_error(dhd_pub_t *dhdp)
+{
+ dhd_axi_error_dump_t *axi_err_dump;
+ uint8 *axi_err_buf = NULL;
+ uint8 *p_axi_err = NULL;
+ uint32 axi_logbuf_addr;
+ uint32 axi_tcm_addr;
+ int err, size;
+
+ /* XXX: On the Dongle side, if an invalid Host Address is generated for a transaction
+ * it results in SMMU Fault. Now the Host won't respond for the invalid transaction.
+ * On the Dongle side, after 50msec this results in AXI Slave Error.
+ * Hence introduce a delay higher than 50msec to ensure AXI Slave error happens and
+ * the Dongle collects the required information.
+ */
+ OSL_DELAY(75000);
+
+ axi_logbuf_addr = dhdp->axierror_logbuf_addr;
+ if (!axi_logbuf_addr) {
+ DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
+ goto sched_axi;
+ }
+
+ axi_err_dump = dhdp->axi_err_dump;
+ if (!axi_err_dump) {
+ goto sched_axi;
+ }
+
+ if (!dhd_axi_sig_match(dhdp)) {
+ goto sched_axi;
+ }
+
+ /* Reading AXI error data for SMMU fault */
+ DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
+ axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
+ size = sizeof(hnd_ext_trap_axi_error_v1_t);
+ axi_err_buf = MALLOCZ(dhdp->osh, size);
+ if (axi_err_buf == NULL) {
+ DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
+ goto sched_axi;
+ }
+
+ p_axi_err = axi_err_buf;
+ err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
+ if (err) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, err, size, axi_tcm_addr));
+ goto sched_axi;
+ }
+
+ /* Dump data to Dmesg */
+ dhd_log_dump_axi_error(axi_err_buf);
+ err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
+ if (err) {
+ DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
+ __FUNCTION__, err));
+ }
+
+sched_axi:
+ if (axi_err_buf) {
+ MFREE(dhdp->osh, axi_err_buf, size);
+ }
+ dhd_schedule_axi_error_dump(dhdp, NULL);
+}
+
+static void
+dhd_log_dump_axi_error(uint8 *axi_err)
+{
+ dma_dentry_v1_t dma_dentry;
+ dma_fifo_v1_t dma_fifo;
+ int i = 0, j = 0;
+
+ if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
+ hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
+ DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
+ DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
+ DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
+ DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
+ __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
+ DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_status));
+ DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_core));
+ DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_hi));
+ DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_lo));
+ DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_id));
+
+ for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
+ dma_fifo = axi_err_v1->dma_fifo[i];
+ DHD_ERROR(("%s: valid:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.valid));
+ DHD_ERROR(("%s: direction:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.direction));
+ DHD_ERROR(("%s: index:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.index));
+ DHD_ERROR(("%s: dpa:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.dpa));
+ DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.desc_lo));
+ DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.desc_hi));
+ DHD_ERROR(("%s: din:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.din));
+ DHD_ERROR(("%s: dout:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.dout));
+ for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
+ dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
+ DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.ctrl1));
+ DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.ctrl2));
+ DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.addrlo));
+ DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.addrhi));
+ }
+ }
+ }
+ else {
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
+ }
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+/**
+ * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
+ * flow queue to their flow ring.
+ */
+static void
+dhd_update_txflowrings(dhd_pub_t *dhd)
+{
+ unsigned long flags;
+ dll_t *item, *next;
+ flow_ring_node_t *flow_ring_node;
+ struct dhd_bus *bus = dhd->bus;
+ int count = 0;
+
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
+ /* Hold flowring_list_lock to ensure no race condition while accessing the List */
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ for (item = dll_head_p(&bus->flowring_active_list);
+ (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
+ item = next, count++) {
+ if (dhd->hang_was_sent) {
+ break;
+ }
+
+ if (count > bus->max_tx_flowrings) {
+ DHD_ERROR(("%s : overflow max flowrings\n", __FUNCTION__));
+#ifdef OEM_ANDROID
+ dhd->hang_reason = HANG_REASON_UNKNOWN;
+ dhd_os_send_hang_message(dhd);
+#endif /* OEM_ANDROID */
+ break;
+ }
+
+ next = dll_next_p(item);
+ flow_ring_node = dhd_constlist_to_flowring(item);
+
+ /* Ensure that flow_ring_node in the list is Not Null */
+ ASSERT(flow_ring_node != NULL);
+
+ /* Ensure that the flowring node has valid contents */
+ ASSERT(flow_ring_node->prot_info != NULL);
+
+ dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
+ }
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+}
+
+/** Mailbox ringbell Function */
+static void
+dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+{
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
+ return;
+ }
+ if (bus->db1_for_mb) {
+ /* this is a pcie core register, not the config register */
+ /* XXX: make sure we are on PCIE */
+ DHD_PCIE_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, dhd_bus_db1_addr_get(bus), 0x2, TRUE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
+ ~0, 0x12345678);
+ } else {
+ DHD_PCIE_INFO(("%s: writing a mail box interrupt to the device,"
+ " through config space\n", __FUNCTION__));
+ dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+ /* XXX CRWLPCIEGEN2-182 requires double write */
+ dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+ }
+}
+
+/* Upon receiving a mailbox interrupt,
+ * if H2D_FW_TRAP bit is set in mailbox location
+ * device traps
+ */
+static void
+dhdpcie_fw_trap(dhd_bus_t *bus)
+{
+ DHD_ERROR(("%s: send trap!!!\n", __FUNCTION__));
+ if (bus->dhd->db7_trap.fw_db7w_trap) {
+ uint32 addr = dhd_bus_db1_addr_3_get(bus);
+ bus->dhd->db7_trap.debug_db7_send_time = OSL_LOCALTIME_NS();
+ bus->dhd->db7_trap.debug_db7_send_cnt++;
+ si_corereg(bus->sih, bus->sih->buscoreidx, addr, ~0,
+ bus->dhd->db7_trap.db7_magic_number);
+ return;
+ }
+
+ /* Send the mailbox data and generate mailbox intr. */
+ dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
+ /* For FWs that cannot interprete H2D_FW_TRAP */
+ (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
+}
+
+#ifdef PCIE_INB_DW
+
+void
+dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
+{
+ /* The DHD_BUS_INB_DW_LOCK must be held before
+ * calling this function !!
+ */
+ if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP_PEND) &&
+ (bus->host_active_cnt == 0)) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ }
+}
+
+int
+dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ int timeleft;
+ unsigned long flags;
+ int ret;
+
+ if (!INBAND_DW_ENAB(bus)) {
+ return BCME_ERROR;
+ }
+ if (val) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+
+ /*
+ * Reset the Door Bell Timeout value. So that the Watchdog
+ * doesn't try to Deassert Device Wake, while we are in
+ * the process of still Asserting the same.
+ */
+ dhd_bus_doorbell_timeout_reset(bus);
+
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP) {
+ /* Clear wait_for_ds_exit */
+ bus->wait_for_ds_exit = 0;
+ if (bus->calc_ds_exit_latency) {
+ bus->ds_exit_latency = 0;
+ bus->ds_exit_ts2 = 0;
+ bus->ds_exit_ts1 = OSL_SYSUPTIME_US();
+ }
+ ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed: assert Inband device_wake\n"));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DISABLED_WAIT);
+ bus->inband_dw_assert_cnt++;
+ } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DISABLED_WAIT) {
+ DHD_ERROR(("Inband device wake is already asserted, "
+ "waiting for DS-Exit\n"));
+ }
+ else {
+ DHD_PCIE_INFO(("Not in DS SLEEP state \n"));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ ret = BCME_OK;
+ goto exit;
+ }
+
+ /*
+ * Since we are going to wait/sleep .. release the lock.
+ * The Device Wake sanity is still valid, because
+ * a) If there is another context that comes in and tries
+ * to assert DS again and if it gets the lock, since
+ * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
+ * context would return saying Not in DS Sleep.
+ * b) If ther is another context that comes in and tries
+ * to de-assert DS and gets the lock,
+ * since the ds_state is != DW_DEVICE_DS_DEV_WAKE
+ * that context would return too. This can not happen
+ * since the watchdog is the only context that can
+ * De-Assert Device Wake and as the first step of
+ * Asserting the Device Wake, we have pushed out the
+ * Door Bell Timeout.
+ *
+ */
+
+ if (!CAN_SLEEP()) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_WAKE);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ /* Called from context that cannot sleep */
+ OSL_DELAY(1000);
+ } else {
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
+ timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
+ if (!bus->wait_for_ds_exit || timeleft == 0) {
+ DHD_ERROR(("dhd_bus_inb_set_device_wake:DS-EXIT timeout, "
+ "wait_for_ds_exit : %d\n", bus->wait_for_ds_exit));
+ bus->inband_ds_exit_to_cnt++;
+ bus->ds_exit_timeout = 0;
+#ifdef DHD_FW_COREDUMP
+ if (bus->dhd->memdump_enabled) {
+ /* collect core dump */
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ bus->dhd->memdump_type =
+ DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE;
+ dhd_bus_mem_dump(bus->dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
+ ret = BCME_OK;
+ } else {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_WAKE)) {
+ ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed: deassert Inband device_wake\n"));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ goto exit;
+ }
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_ACTIVE);
+ bus->inband_dw_deassert_cnt++;
+ } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP_PEND) &&
+ (bus->host_active_cnt == 0)) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ }
+
+ ret = BCME_OK;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+
+exit:
+ return ret;
+}
+#endif /* PCIE_INB_DW */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+void
+dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
+{
+ if (dhd_doorbell_timeout) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhd_runtimepm_ms) {
+ dhd_timeout_start(&bus->doorbell_timer,
+ (dhd_doorbell_timeout * 1000) / dhd_runtimepm_ms);
+ }
+#else
+#ifdef BCMQT
+ uint wd_scale = 1;
+#else
+ uint wd_scale = dhd_watchdog_ms;
+#endif
+ if (dhd_watchdog_ms) {
+ dhd_timeout_start(&bus->doorbell_timer,
+ (dhd_doorbell_timeout * 1000) / wd_scale);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+ }
+ else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+}
+
+int
+dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ if (bus->ds_enabled && bus->dhd->ring_attached) {
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ return dhd_bus_inb_set_device_wake(bus, val);
+ }
+#endif /* PCIE_INB_DW */
+#ifdef PCIE_OOB
+ if (OOB_DW_ENAB(bus)) {
+ return dhd_os_oob_set_device_wake(bus, val);
+ }
+#endif /* PCIE_OOB */
+ }
+ return BCME_OK;
+}
+
+void
+dhd_bus_dw_deassert(dhd_pub_t *dhd)
+{
+ dhd_bus_t *bus = dhd->bus;
+ unsigned long flags;
+
+ if (dhd_query_bus_erros(bus->dhd)) {
+ return;
+ }
+
+ /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
+ if (dhd_doorbell_timeout != 0 && bus->dhd->busstate == DHD_BUS_DATA &&
+ dhd_timeout_expired(&bus->doorbell_timer) &&
+ !dhd_query_bus_erros(bus->dhd)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_BUSY_CHECK_IDLE(dhd) &&
+ !DHD_CHECK_CFG_IN_PROGRESS(dhd)) {
+ DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhd);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ dhd_bus_set_device_wake(bus, FALSE);
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ }
+ }
+
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ if (bus->ds_exit_timeout) {
+ bus->ds_exit_timeout --;
+ if (bus->ds_exit_timeout == 1) {
+ DHD_ERROR(("DS-EXIT TIMEOUT\n"));
+ bus->ds_exit_timeout = 0;
+ bus->inband_ds_exit_to_cnt++;
+ }
+ }
+ if (bus->host_sleep_exit_timeout) {
+ bus->host_sleep_exit_timeout --;
+ if (bus->host_sleep_exit_timeout == 1) {
+ DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
+ bus->host_sleep_exit_timeout = 0;
+ bus->inband_host_sleep_exit_to_cnt++;
+ }
+ }
+ }
+#endif /* PCIE_INB_DW */
+}
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+/** mailbox doorbell ring function */
+void
+dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
+{
+ /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
+ if (__DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ return;
+ }
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
+ PCIE_INTB, PCIE_INTB);
+ } else {
+ /* this is a pcie core register, not the config regsiter */
+ /* XXX: makesure we are on PCIE */
+ DHD_PCIE_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
+ if (IDMA_ACTIVE(bus->dhd)) {
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
+ ~0, value);
+ } else {
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
+ }
+ }
+}
+
+/** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
+void
+dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
+{
+ /* this is a pcie core register, not the config regsiter */
+ /* XXX: makesure we are on PCIE */
+ /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
+ if (__DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ return;
+ }
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_PCIE_INFO(("writing a door bell 2 to the device\n"));
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
+ ~0, value);
+}
+
+void
+dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
+{
+ /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
+ if (__DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_RPM(("%s: trying to ring the doorbell after D3 inform %d\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ return;
+ }
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, TRUE);
+ }
+ dhd_bus_doorbell_timeout_reset(bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, dhd_bus_db0_addr_get(bus), value,
+ ((value >> 24u) == 0xFF) ? TRUE : FALSE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+#ifdef DHD_DB0TS
+ if (bus->dhd->db0ts_capable) {
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ value = htol32(ts & 0xFFFFFFFF);
+ DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
+ }
+#endif /* DHD_DB0TS */
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
+}
+
+void
+dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
+{
+ /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
+ if (__DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_RPM(("%s: trying to ring the doorbell after D3 inform %d\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ return;
+ }
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ if (devwake) {
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, TRUE);
+ }
+ }
+ dhd_bus_doorbell_timeout_reset(bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+#ifdef DHD_MMIO_TRACE
+ dhd_bus_mmio_trace(bus, dhd_bus_db0_addr_2_get(bus), value, TRUE);
+#endif /* defined(DHD_MMIO_TRACE) */
+ if (DAR_PWRREQ(bus)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
+}
+
+static void
+dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
+{
+ uint32 w;
+ /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
+ if (__DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ return;
+ }
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
+}
+
+dhd_mb_ring_t
+dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
+{
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+ bus->pcie_mailbox_int);
+ if (bus->pcie_mb_intr_addr) {
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
+ return dhd_bus_ringbell_oldpcie;
+ }
+ } else {
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+ dhd_bus_db0_addr_get(bus));
+ if (bus->pcie_mb_intr_addr) {
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
+ return dhdpcie_bus_ringbell_fast;
+ }
+ }
+ return dhd_bus_ringbell;
+}
+
+dhd_mb_ring_2_t
+dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
+{
+ bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+ dhd_bus_db0_addr_2_get(bus));
+ if (bus->pcie_mb_intr_2_addr) {
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
+ return dhdpcie_bus_ringbell_2_fast;
+ }
+ return dhd_bus_ringbell_2;
+}
+
+bool
+BCMFASTPATH(dhd_bus_dpc)(struct dhd_bus *bus)
+{
+ bool resched = FALSE; /* Flag indicating resched wanted */
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bus->dpc_entry_time = OSL_LOCALTIME_NS();
+
+ if (dhd_query_bus_erros(bus->dhd)) {
+ return 0;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
+ * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
+ * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
+ * and if we return from here, then IOCTL response will never be handled
+ */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+ bus->intstatus = 0;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ bus->dpc_return_busdown_count++;
+ return 0;
+ }
+#ifdef DHD_PCIE_RUNTIMEPM
+ bus->idlecount = 0;
+#endif /* DHD_PCIE_RUNTIMEPM */
+ DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_READ_INTSTATUS_IN_DPC
+ if (bus->ipend) {
+ bus->ipend = FALSE;
+ bus->intstatus = dhdpcie_bus_intstatus(bus);
+ /* Check if the interrupt is ours or not */
+ if (bus->intstatus == 0) {
+ goto INTR_ON;
+ }
+ bus->intrcount++;
+ }
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+ /* Do not process dpc after receiving D3_ACK */
+ if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
+ DHD_ERROR(("%s: D3 Ack Recieved, skip dpc\n", __FUNCTION__));
+ goto exit;
+ }
+
+ resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
+ if (!resched) {
+ bus->intstatus = 0;
+#ifdef DHD_READ_INTSTATUS_IN_DPC
+INTR_ON:
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+ if (bus->d2h_intr_control == PCIE_D2H_INTMASK_CTRL) {
+ dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
+ bus->dpc_intr_enable_count++;
+ } else {
+ /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
+ * which has been disabled in the dhdpcie_bus_isr()
+ */
+ if ((dhdpcie_irq_disabled(bus)) && (!dhd_query_bus_erros(bus->dhd))) {
+ dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
+ bus->dpc_intr_enable_count++;
+ }
+ }
+ bus->dpc_exit_time = OSL_LOCALTIME_NS();
+ } else {
+ bus->resched_dpc_time = OSL_LOCALTIME_NS();
+ }
+
+ bus->dpc_sched = resched;
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+ if (bus->dhd->dma_h2d_ring_upd_support && bus->dhd->dma_d2h_ring_upd_support &&
+ (bus->dhd->ring_attached == TRUE)) {
+ dhd_bus_flow_ring_status_dpc_trace(bus->dhd);
+ }
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+
+exit:
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return resched;
+
+}
+
+int
+dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
+{
+ uint32 cur_h2d_mb_data = 0;
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_PCIE_INFO(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
+
+#ifdef PCIE_INB_DW
+ if (h2d_mb_data == H2D_HOST_DS_ACK) {
+ dhdpcie_set_dongle_deepsleep(bus, TRUE);
+ }
+ dhd_bus_ds_trace(bus, h2d_mb_data, FALSE, dhdpcie_bus_get_pcie_inband_dw_state(bus));
+#else
+ dhd_bus_ds_trace(bus, h2d_mb_data, FALSE);
+#endif /* PCIE_INB_DW */
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
+ DHD_PCIE_INFO(("API rev is 6, sending mb data as H2D Ctrl message"
+ " to dongle, 0x%04x\n", h2d_mb_data));
+ /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
+#ifdef PCIE_OOB
+ bus->oob_enabled = FALSE;
+#endif /* PCIE_OOB */
+ /* XXX: check the error return value here... */
+ if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
+ DHD_ERROR(("failure sending the H2D Mailbox message "
+ "to firmware\n"));
+ goto fail;
+ }
+#ifdef PCIE_OOB
+ bus->oob_enabled = TRUE;
+#endif /* PCIE_OOB */
+ goto done;
+ }
+
+ dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
+
+ if (cur_h2d_mb_data != 0) {
+ uint32 i = 0;
+ DHD_PCIE_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n",
+ __FUNCTION__, cur_h2d_mb_data));
+ /* XXX: start a zero length timer to keep checking this to be zero */
+ while ((i++ < 100) && cur_h2d_mb_data) {
+ OSL_DELAY(10);
+ dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
+ }
+ if (i >= 100) {
+ DHD_ERROR(("%s : waited 1ms for the dngl "
+ "to ack the previous mb transaction\n", __FUNCTION__));
+ DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
+ __FUNCTION__, cur_h2d_mb_data));
+ }
+ }
+
+ dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
+ dhd_bus_gen_devmb_intr(bus);
+
+done:
+ if (h2d_mb_data == H2D_HOST_D3_INFORM) {
+ DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+ bus->last_d3_inform_time = OSL_LOCALTIME_NS();
+ bus->d3_inform_cnt++;
+ }
+ if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
+ DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
+ bus->d0_inform_in_use_cnt++;
+ }
+ if (h2d_mb_data == H2D_HOST_D0_INFORM) {
+ DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
+ bus->d0_inform_cnt++;
+ }
+ return BCME_OK;
+fail:
+ return BCME_ERROR;
+}
+
+static void
+dhd_bus_handle_d3_ack(dhd_bus_t *bus)
+{
+ bus->suspend_intr_disable_count++;
+ /* Disable dongle Interrupts Immediately after D3 */
+
+ /* For Linux, Macos etc (otherthan NDIS) along with disabling
+ * dongle interrupt by clearing the IntMask, disable directly
+ * interrupt from the host side as well. Also clear the intstatus
+ * if it is set to avoid unnecessary intrrupts after D3 ACK.
+ */
+ dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
+ dhdpcie_bus_clear_intstatus(bus);
+#ifndef NDIS /* !NDIS */
+ dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
+#endif /* !NDIS */
+
+ DHD_SET_BUS_LPS_D3_ACKED(bus);
+ DHD_RPM(("%s: D3_ACK Recieved\n", __FUNCTION__));
+
+ if (bus->dhd->dhd_induce_error == DHD_INDUCE_D3_ACK_TIMEOUT) {
+ /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
+ DHD_ERROR(("%s: Due to d3ack induce error forcefully set "
+ "bus_low_power_state to DHD_BUS_D3_INFORM_SENT\n", __FUNCTION__));
+ DHD_SET_BUS_LPS_D3_INFORMED(bus);
+ }
+ /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
+ * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
+ */
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+ } else {
+ DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
+ }
+}
+
+void
+dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
+{
+#ifdef PCIE_INB_DW
+ unsigned long flags = 0;
+#endif /* PCIE_INB_DW */
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ DHD_PCIE_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
+#ifdef PCIE_INB_DW
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ dhd_bus_ds_trace(bus, d2h_mb_data, TRUE, dhdpcie_bus_get_pcie_inband_dw_state(bus));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+#else
+ dhd_bus_ds_trace(bus, d2h_mb_data, TRUE);
+#endif /* PCIE_INB_DW */
+
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
+ if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
+ DHD_ERROR(("FW trap has happened, dongle_trap_data 0x%8x\n",
+ bus->dhd->dongle_trap_data));
+ }
+
+ if (bus->dhd->dongle_trap_data & D2H_DEV_TRAP_HOSTDB) {
+ uint64 db7_dur;
+
+ bus->dhd->db7_trap.debug_db7_trap_time = OSL_LOCALTIME_NS();
+ bus->dhd->db7_trap.debug_db7_trap_cnt++;
+ db7_dur = bus->dhd->db7_trap.debug_db7_trap_time -
+ bus->dhd->db7_trap.debug_db7_send_time;
+ if (db7_dur > bus->dhd->db7_trap.debug_max_db7_dur) {
+ bus->dhd->db7_trap.debug_max_db7_send_time =
+ bus->dhd->db7_trap.debug_db7_send_time;
+ bus->dhd->db7_trap.debug_max_db7_trap_time =
+ bus->dhd->db7_trap.debug_db7_trap_time;
+ }
+ bus->dhd->db7_trap.debug_max_db7_dur =
+ MAX(bus->dhd->db7_trap.debug_max_db7_dur, db7_dur);
+ if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
+ bus->dhd->db7_trap.debug_db7_timing_error_cnt++;
+ }
+ } else {
+ dhdpcie_checkdied(bus, NULL, 0);
+#ifdef BCM_ROUTER_DHD
+ dhdpcie_handle_dongle_trap(bus);
+#endif
+#ifdef OEM_ANDROID
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
+#endif /* OEM_ANDROID */
+ }
+ if (bus->dhd->db7_trap.fw_db7w_trap_inprogress) {
+ bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE;
+ bus->dhd->dongle_trap_occured = TRUE;
+ }
+ goto exit;
+ }
+ if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
+ bool ds_acked = FALSE;
+ BCM_REFERENCE(ds_acked);
+ if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
+ DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ goto exit;
+ }
+ /* what should we do */
+ DHD_PCIE_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ /* As per inband state machine, host should not send DS-ACK
+ * during suspend or suspend in progress, instead D3 inform will be sent.
+ */
+ if (!bus->skip_ds_ack) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus)
+ == DW_DEVICE_DS_ACTIVE) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_SLEEP_PEND);
+ if (bus->host_active_cnt == 0) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_SLEEP);
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ ds_acked = TRUE;
+ DHD_PCIE_INFO(("D2H_MB_DATA: sent DEEP SLEEP"
+ "ACK to DNGL\n"));
+ } else {
+ DHD_PCIE_INFO(("%s: Host is active, "
+ "skip sending DS-ACK. "
+ "host_active_cnt is %d\n",
+ __FUNCTION__, bus->host_active_cnt));
+ }
+ }
+ /* Currently DW_DEVICE_HOST_SLEEP_WAIT is set only
+ * under dhd_bus_suspend() function.
+ */
+ else if (dhdpcie_bus_get_pcie_inband_dw_state(bus)
+ == DW_DEVICE_HOST_SLEEP_WAIT) {
+ DHD_ERROR(("%s: DS-ACK not sent due to suspend "
+ "in progress\n", __FUNCTION__));
+ } else {
+ DHD_ERROR_RLMT(("%s: Failed to send DS-ACK, DS state is %d",
+ __FUNCTION__,
+ dhdpcie_bus_get_pcie_inband_dw_state(bus)));
+ }
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ dhd_os_ds_enter_wake(bus->dhd);
+ } else {
+ DHD_PCIE_INFO(("%s: Skip DS-ACK due to "
+ "suspend in progress\n", __FUNCTION__));
+ }
+#ifdef DHD_EFI
+ if (ds_acked && !bus->ds_enabled) {
+ /* if 'deep_sleep' is disabled, then need to again assert DW
+ * from here once we we have acked the DS_ENTER_REQ, so that
+ * dongle stays awake and honours the user iovar request.
+ * Note, that this code will be hit only for the pcie_suspend/resume
+ * case with 'deep_sleep' disabled, and will not get executed in
+ * the normal path either when 'deep_sleep' is enabled (default)
+ * or when 'deep_sleep' is disabled, because if 'deep_sleep' is
+ * disabled, then by definition, dongle will not send DS_ENTER_REQ
+ * except in the case of D0 -> D3 -> D0 transitions, which is what
+ * is being handled here.
+ */
+ dhd_bus_inb_set_device_wake(bus, TRUE);
+ }
+#endif /* DHD_EFI */
+ } else
+#endif /* PCIE_INB_DW */
+ {
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ DHD_PCIE_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
+ }
+ }
+ if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ if (bus->calc_ds_exit_latency) {
+ bus->ds_exit_ts2 = OSL_SYSUPTIME_US();
+ if (bus->ds_exit_ts2 > bus->ds_exit_ts1 &&
+ bus->ds_exit_ts1 != 0)
+ bus->ds_exit_latency = bus->ds_exit_ts2 - bus->ds_exit_ts1;
+ else
+ bus->ds_exit_latency = 0;
+ }
+ }
+#endif /* PCIE_INB_DW */
+ /* what should we do */
+ DHD_PCIE_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DISABLED_WAIT) {
+ /* wake up only if some one is waiting in
+ * DW_DEVICE_DS_DISABLED_WAIT state
+ * in this case the waiter will change the state
+ * to DW_DEVICE_DS_DEV_WAKE
+ */
+ bus->inband_ds_exit_host_cnt++;
+ /* To synchronize with the previous memory operations call wmb() */
+ OSL_SMP_WMB();
+ bus->wait_for_ds_exit = 1;
+ /* Call another wmb() to make sure before waking up the
+ * other event value gets updated.
+ */
+ OSL_SMP_WMB();
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_WAKE);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ dhd_os_ds_exit_wake(bus->dhd);
+ } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP) {
+ DHD_PCIE_INFO(("recvd unsolicited DS-EXIT"
+ " from dongle in DEV_SLEEP\n"));
+ /*
+ * unsolicited state change to DW_DEVICE_DS_DEV_WAKE if
+ * D2H_DEV_DS_EXIT_NOTE received in DW_DEVICE_DS_DEV_SLEEP state.
+ * This is need when dongle is woken by external events like
+ * WOW, ping ..etc
+ */
+ bus->inband_ds_exit_device_cnt++;
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_WAKE);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ } else {
+ DHD_PCIE_INFO(("D2H_MB_DATA: not in"
+ " DS_DISABLED_WAIT/DS_DEV_SLEEP\n"));
+ bus->inband_ds_exit_host_cnt++;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+ /*
+ * bus->deep_sleep is TRUE by default. deep_sleep is set to FALSE when the
+ * dhd iovar deep_sleep is fired with value 0(user request to not enter
+ * deep sleep). So donot attempt to go to deep sleep when user has
+ * explicitly asked not to go to deep sleep. bus->deep_sleep is set to
+ * TRUE when the dhd iovar deep_sleep is fired with value 1.
+ */
+ if (bus->deep_sleep) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ dhdpcie_set_dongle_deepsleep(bus, FALSE);
+ }
+ }
+#endif /* PCIE_INB_DW */
+ }
+ if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
+ /* what should we do */
+ DHD_PCIE_INFO(("D2H_MB_DATA: D0 ACK\n"));
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_HOST_WAKE_WAIT) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
+ }
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+#endif /* PCIE_INB_DW */
+ }
+ if (d2h_mb_data & D2H_DEV_D3_ACK) {
+ /* what should we do */
+ DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
+ if (!bus->wait_for_d3_ack) {
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
+ DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
+ } else {
+ dhd_bus_handle_d3_ack(bus);
+ }
+#else /* DHD_HANG_SEND_UP_TEST */
+ dhd_bus_handle_d3_ack(bus);
+#endif /* DHD_HANG_SEND_UP_TEST */
+ }
+ }
+
+exit:
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+}
+
+static void
+dhdpcie_handle_mb_data(dhd_bus_t *bus)
+{
+ uint32 d2h_mb_data = 0;
+ uint32 zero = 0;
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return;
+ }
+
+ dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
+ DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
+ __FUNCTION__, d2h_mb_data));
+ goto exit;
+ }
+
+ dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
+
+ DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
+ DHD_ERROR(("FW trap has happened\n"));
+ dhdpcie_checkdied(bus, NULL, 0);
+ /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
+#ifdef BCM_ROUTER_DHD
+ dhdpcie_handle_dongle_trap(bus);
+#endif
+ goto exit;
+ }
+ if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
+ /* what should we do */
+ DHD_PCIE_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ DHD_PCIE_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
+ }
+ if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
+ /* what should we do */
+ DHD_PCIE_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
+ }
+ if (d2h_mb_data & D2H_DEV_D3_ACK) {
+ /* what should we do */
+ DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
+ if (!bus->wait_for_d3_ack) {
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
+ DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
+ } else {
+ dhd_bus_handle_d3_ack(bus);
+ }
+#else /* DHD_HANG_SEND_UP_TEST */
+ dhd_bus_handle_d3_ack(bus);
+#endif /* DHD_HANG_SEND_UP_TEST */
+ }
+ }
+
+exit:
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+}
+
+static void
+dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
+{
+ uint32 d2h_mb_data = 0;
+ uint32 zero = 0;
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ if (!d2h_mb_data) {
+ goto exit;
+ }
+
+ dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
+
+ dhd_bus_handle_mb_data(bus, d2h_mb_data);
+
+exit:
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+}
+
+#define DHD_SCHED_RETRY_DPC_DELAY_MS 100u
+
+static void
+dhd_bus_handle_intx_ahead_dma_indices(dhd_bus_t *bus)
+{
+ if (bus->d2h_intr_method == PCIE_MSI) {
+ DHD_PCIE_INFO(("%s: not required for msi\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dhd->dma_d2h_ring_upd_support == FALSE) {
+ DHD_PCIE_INFO(("%s: not required for non-dma-indices\n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd_query_bus_erros(bus->dhd)) {
+ return;
+ }
+
+#ifndef NDIS
+ /*
+ * skip delayed dpc if tasklet is scheduled by non-ISR
+ * From ISR context, we disable IRQ and enable IRQ back at the end of dpc,
+ * hence if IRQ is not disabled we can consider it as scheduled from non-ISR context.
+ */
+ if (dhdpcie_irq_disabled(bus) == FALSE) {
+ DHD_PCIE_INFO(("%s: skip delayed dpc as tasklet is scheduled from non isr\n",
+ __FUNCTION__));
+ return;
+ }
+#endif /* NDIS */
+
+ if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
+ DHD_PCIE_INFO(("%s: skip delayed dpc as d3 ack is received\n", __FUNCTION__));
+ return;
+ }
+
+ dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, DHD_SCHED_RETRY_DPC_DELAY_MS);
+ return;
+}
+
+static bool
+dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
+{
+ bool resched = FALSE;
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ /* Msg stream interrupt */
+ if (intstatus & I_BIT1) {
+ resched = dhdpci_bus_read_frames(bus);
+ } else if (intstatus & I_BIT0) {
+ /* do nothing for Now */
+ }
+ } else {
+ if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
+ bus->api.handle_mb_data(bus);
+
+ /* The fact that we are here implies that dhdpcie_bus_intstatus( )
+ * retuned a non-zer0 status after applying the current mask.
+ * No further check required, in fact bus->instatus can be eliminated.
+ * Both bus->instatus, and bud->intdis are shared between isr and dpc.
+ */
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
+ resched = dhdpci_bus_read_frames(bus);
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
+ }
+#else
+ resched = dhdpci_bus_read_frames(bus);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ }
+
+ dhd_bus_handle_intx_ahead_dma_indices(bus);
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+ return resched;
+}
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void
+dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
+{
+ unsigned long time_elapsed;
+
+ /* Poll for timeout value periodically */
+ if ((bus->dhd->busstate == DHD_BUS_DATA) &&
+ (bus->dhd->dhd_rte_time_sync_ms != 0) &&
+ DHD_CHK_BUS_NOT_IN_LPS(bus)) {
+ /*
+ * XXX OSL_SYSUPTIME_US() overflow should not happen.
+ * As it is a unsigned 64 bit value 18446744073709551616L,
+ * which needs 213503982334 days to overflow
+ */
+ time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
+ /* Compare time is milli seconds */
+ if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
+ /*
+ * Its fine, if it has crossed the timeout value. No need to adjust the
+ * elapsed time
+ */
+ bus->dhd_rte_time_sync_count += time_elapsed;
+
+ /* Schedule deffered work. Work function will send IOVAR. */
+ dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
+ }
+ }
+}
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+static bool
+dhdpci_bus_read_frames(dhd_bus_t *bus)
+{
+ bool more = FALSE;
+
+ /* First check if there a FW trap */
+ if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
+ (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (bus->dhd->axi_error) {
+ DHD_ERROR(("AXI Error happened\n"));
+ return FALSE;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
+ return FALSE;
+ }
+
+ if (dhd_query_bus_erros(bus->dhd)) {
+ DHD_ERROR(("%s: detected bus errors. Hence donot process msg rings\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+#ifdef DHD_DMA_INDICES_SEQNUM
+ dhd_prot_save_dmaidx(bus->dhd);
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
+ dhd_prot_process_ctrlbuf(bus->dhd);
+ bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
+
+ /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
+ if (DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_RPM(("%s: Bus is in power save state (%d). "
+ "Skip processing rest of ring buffers.\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ return FALSE;
+ }
+
+ /* update the flow ring cpls */
+ dhd_update_txflowrings(bus->dhd);
+ bus->last_process_flowring_time = OSL_LOCALTIME_NS();
+
+ /* With heavy TX traffic, we could get a lot of TxStatus
+ * so add bound
+ */
+#ifdef DHD_HP2P
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
+#endif /* DHD_HP2P */
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
+ bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
+
+ /* With heavy RX traffic, this routine potentially could spend some time
+ * processing RX frames without RX bound
+ */
+#ifdef DHD_HP2P
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
+#endif /* DHD_HP2P */
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
+ bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
+
+ /* Process info ring completion messages */
+#ifdef EWP_EDL
+ if (!bus->dhd->dongle_edl_support)
+#endif
+ {
+ more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
+ bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
+ }
+#ifdef EWP_EDL
+ else {
+ more |= dhd_prot_process_msgbuf_edl(bus->dhd);
+ bus->last_process_edl_time = OSL_LOCALTIME_NS();
+ }
+#endif /* EWP_EDL */
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ /* Handle the firmware trace data in the logtrace kernel thread */
+ dhd_event_logtrace_enqueue_fwtrace(bus->dhd);
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef BTLOG
+ /* Process info ring completion messages */
+ more |= dhd_prot_process_msgbuf_btlogcpl(bus->dhd, DHD_BTLOGRING_BOUND);
+#endif /* BTLOG */
+
+#ifdef IDLE_TX_FLOW_MGMT
+ if (bus->enable_idle_flowring_mgmt) {
+ /* Look for idle flow rings */
+ dhd_bus_check_idle_scan(bus);
+ }
+#endif /* IDLE_TX_FLOW_MGMT */
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (bus->dhd->hang_was_sent) {
+ more = FALSE;
+ }
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ /* XXX : It seems that linkdown is occurred without notification,
+ * In case read shared memory failed, recovery hang is needed
+ */
+ if (bus->read_shm_fail) {
+ /* Read interrupt state once again to confirm linkdown */
+ int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
+ bus->pcie_mailbox_int, 0, 0);
+ if (intstatus != (uint32)-1) {
+ DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
+#ifdef DHD_FW_COREDUMP
+ if (bus->dhd->memdump_enabled) {
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
+ dhd_bus_mem_dump(bus->dhd);
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+ } else {
+ DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ bus->is_linkdown = 1;
+ }
+
+ /* XXX The dhd_prot_debug_info_print() function *has* to be
+ * invoked only if the bus->is_linkdown is updated so that
+ * host doesn't need to read any pcie registers if
+ * PCIe link is down.
+ */
+ dhd_prot_debug_info_print(bus->dhd);
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ copy_hang_info_linkdown(bus->dhd);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ dhd_os_send_hang_message(bus->dhd);
+ more = FALSE;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+ dhdpci_bus_rte_log_time_sync_poll(bus);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+ return more;
+}
+
+bool
+dhdpcie_tcm_valid(dhd_bus_t *bus)
+{
+ uint32 addr = 0;
+ int rv;
+ uint32 shaddr = 0;
+ pciedev_shared_t sh;
+
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+
+ /* Read last word in memory to determine address of pciedev_shared structure */
+ addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+
+ if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+ (addr > shaddr)) {
+ DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
+ __FUNCTION__, addr));
+ return FALSE;
+ }
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
+ sizeof(pciedev_shared_t))) < 0) {
+ DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
+ return FALSE;
+ }
+
+ /* Compare any field in pciedev_shared_t */
+ if (sh.console_addr != bus->pcie_sh->console_addr) {
+ DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static void
+dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
+{
+ snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
+ firmware_api_version, host_api_version);
+ return;
+}
+
+static bool
+dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
+{
+ bool retcode = FALSE;
+
+ DHD_INFO(("firmware api revision %d, host api revision %d\n",
+ firmware_api_version, host_api_version));
+
+ switch (firmware_api_version) {
+ case PCIE_SHARED_VERSION_7:
+ case PCIE_SHARED_VERSION_6:
+ case PCIE_SHARED_VERSION_5:
+ retcode = TRUE;
+ break;
+ default:
+ if (firmware_api_version <= host_api_version)
+ retcode = TRUE;
+ }
+ return retcode;
+}
+
+static int
+dhdpcie_readshared(dhd_bus_t *bus)
+{
+ uint32 addr = 0;
+ int rv, dma_indx_wr_buf, dma_indx_rd_buf;
+ uint32 shaddr = 0;
+ pciedev_shared_t *sh = bus->pcie_sh;
+ dhd_timeout_t tmo;
+ bool idma_en = FALSE;
+#if defined(PCIE_INB_DW)
+ bool d2h_inband_dw = FALSE;
+#endif /* defined(PCIE_INB_DW) */
+#if defined(PCIE_OOB)
+ bool d2h_no_oob_dw = FALSE;
+#endif /* defined(PCIE_INB_DW) */
+ uint32 timeout = MAX_READ_TIMEOUT;
+ uint32 elapsed;
+#ifndef CUSTOMER_HW4_DEBUG
+ uint32 intstatus;
+#endif /* OEM_ANDROID */
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+
+#ifdef BCMSLTGT
+#ifdef BCMQT_HW
+ if (qt_dngl_timeout) {
+ timeout = qt_dngl_timeout * 1000;
+ }
+#endif /* BCMQT_HW */
+ DHD_ERROR(("%s: host timeout in QT/FPGA mode %ld ms\n",
+ __FUNCTION__, (timeout * htclkratio) / USEC_PER_MSEC));
+#endif /* BCMSLTGT */
+
+ /* start a timer for 5 seconds */
+ dhd_timeout_start(&tmo, timeout);
+
+ while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
+ /* Read last word in memory to determine address of pciedev_shared structure */
+ addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+ /*
+ * FW might fill all trace buffers even before full DHD/FW initialization.
+ * poll for trace buffers to avoid circular buffer overflow.
+ */
+ process_fw_trace_data(bus->dhd);
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+ }
+
+ if (addr == (uint32)-1) {
+ DHD_ERROR(("%s: ##### pciedev shared address is 0xffffffff ####\n", __FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ bus->is_linkdown = 1;
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+#else
+ dhd_bus_dump_imp_cfg_registers(bus);
+ dhd_bus_dump_dar_registers(bus);
+ /* Check the PCIe link status by reading intstatus register */
+ intstatus = si_corereg(bus->sih,
+ bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ bus->is_linkdown = TRUE;
+ } else {
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump or write to a file */
+ if (bus->dhd->memdump_enabled) {
+ /* since dhdpcie_readshared() is invoked only during init or trap */
+ bus->dhd->memdump_type = bus->dhd->dongle_trap_data ?
+ DUMP_TYPE_DONGLE_TRAP : DUMP_TYPE_DONGLE_INIT_FAILURE;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+ return BCME_ERROR;
+ }
+
+ if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+ (addr > shaddr)) {
+ elapsed = tmo.elapsed;
+#ifdef BCMSLTGT
+ elapsed *= htclkratio;
+#endif /* BCMSLTGT */
+ DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
+ __FUNCTION__, addr));
+ DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
+#ifdef DEBUG_DNGL_INIT_FAIL
+ if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
+#ifdef CUSTOMER_HW4_DEBUG
+ bus->dhd->memdump_enabled = DUMP_MEMONLY;
+#endif /* CUSTOMER_HW4_DEBUG */
+ if (bus->dhd->memdump_enabled) {
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
+ dhdpcie_mem_dump(bus);
+ }
+ }
+#endif /* DEBUG_DNGL_INIT_FAIL */
+#if defined(NDIS)
+ /* This is a very common code path to catch f/w init failures.
+ Capture a socram dump.
+ */
+ ASSERT(0);
+#endif /* defined(NDIS) */
+ return BCME_ERROR;
+ } else {
+ bus->rd_shared_pass_time = OSL_LOCALTIME_NS();
+ elapsed = tmo.elapsed;
+#ifdef BCMSLTGT
+ elapsed *= htclkratio;
+#endif /* BCMSLTGT */
+ bus->shared_addr = (ulong)addr;
+ DHD_ERROR(("### Total time ARM OOR to Readshared pass took %llu usec ###\n",
+ DIV_U64_BY_U32((bus->rd_shared_pass_time - bus->arm_oor_time),
+ NSEC_PER_USEC)));
+ DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
+ "before dongle is ready\n", __FUNCTION__, addr, elapsed));
+ }
+
+#ifdef DHD_EFI
+ bus->dhd->pcie_readshared_done = 1;
+#endif
+ /* Read hndrte_shared structure */
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
+ sizeof(pciedev_shared_t))) < 0) {
+ DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
+ return rv;
+ }
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+ sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
+ sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
+ sh->flags2 = ltoh32(sh->flags2);
+
+ /* load bus console address */
+ bus->console_addr = sh->console_addr;
+
+ /* Read the dma rx offset */
+ bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
+ dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
+
+ DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
+
+ bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
+ if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
+ {
+ DHD_ERROR(("%s: pcie_shared version %d in dhd "
+ "is older than pciedev_shared version %d in dongle\n",
+ __FUNCTION__, PCIE_SHARED_VERSION,
+ bus->api.fw_rev));
+ return BCME_ERROR;
+ }
+ dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
+
+ bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
+ sizeof(uint16) : sizeof(uint32);
+ DHD_INFO(("%s: Dongle advertizes %d size indices\n",
+ __FUNCTION__, bus->rw_index_sz));
+
+#ifdef IDLE_TX_FLOW_MGMT
+ if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
+ DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
+ __FUNCTION__));
+ bus->enable_idle_flowring_mgmt = TRUE;
+ }
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef PCIE_OOB
+ bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE;
+ d2h_no_oob_dw = bus->dhd->d2h_no_oob_dw;
+#endif /* PCIE_OOB */
+
+#ifdef PCIE_INB_DW
+ bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
+ d2h_inband_dw = bus->dhd->d2h_inband_dw;
+#endif /* PCIE_INB_DW */
+
+#if defined(PCIE_INB_DW)
+ DHD_ERROR(("FW supports Inband dw ? %s\n",
+ d2h_inband_dw ? "Y":"N"));
+#endif /* defined(PCIE_INB_DW) */
+
+#if defined(PCIE_OOB)
+ DHD_ERROR(("FW supports oob dw ? %s\n",
+ d2h_no_oob_dw ? "N":"Y"));
+#endif /* defined(PCIE_OOB) */
+
+ if (IDMA_CAPABLE(bus)) {
+ if (bus->sih->buscorerev == 23) {
+#ifdef PCIE_INB_DW
+ if (bus->dhd->d2h_inband_dw)
+ {
+ idma_en = TRUE;
+ }
+#endif /* PCIE_INB_DW */
+ } else {
+ idma_en = TRUE;
+ }
+ }
+
+ if (idma_en) {
+ bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
+ bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
+ }
+
+ bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
+
+ bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
+
+ /* Does the FW support DMA'ing r/w indices */
+ if (sh->flags & PCIE_SHARED_DMA_INDEX) {
+ if (!bus->dhd->dma_ring_upd_overwrite) {
+#if defined(BCM_ROUTER_DHD)
+ /* Router platform does not use IOV_DMA_RINGINDICES */
+ if (sh->flags & PCIE_SHARED_2BYTE_INDICES)
+#endif /* BCM_ROUTER_DHD */
+ {
+ if (!IFRM_ENAB(bus->dhd)) {
+ bus->dhd->dma_h2d_ring_upd_support = TRUE;
+ }
+ bus->dhd->dma_d2h_ring_upd_support = TRUE;
+ }
+ }
+
+ if (bus->dhd->dma_d2h_ring_upd_support && bus->dhd->d2h_sync_mode) {
+ DHD_ERROR(("%s: ERROR COMBO: sync (0x%x) enabled for DMA indices\n",
+ __FUNCTION__, bus->dhd->d2h_sync_mode));
+ }
+
+ DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
+ __FUNCTION__,
+ (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
+ (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
+ } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
+ DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
+ __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ } else {
+ bus->dhd->dma_h2d_ring_upd_support = FALSE;
+ bus->dhd->dma_d2h_ring_upd_support = FALSE;
+ }
+
+ /* Does the firmware support fast delete ring? */
+ if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
+ DHD_INFO(("%s: Firmware supports fast delete ring\n",
+ __FUNCTION__));
+ bus->dhd->fast_delete_ring_support = TRUE;
+ } else {
+ DHD_INFO(("%s: Firmware does not support fast delete ring\n",
+ __FUNCTION__));
+ bus->dhd->fast_delete_ring_support = FALSE;
+ }
+
+ /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
+ {
+ ring_info_t ring_info;
+
+ /* boundary check */
+ if ((sh->rings_info_ptr < bus->dongle_ram_base) || (sh->rings_info_ptr > shaddr)) {
+ DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
+ __FUNCTION__, sh->rings_info_ptr));
+ return BCME_ERROR;
+ }
+
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
+ (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
+ return rv;
+
+ bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
+ bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
+
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+ bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
+ bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
+ bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
+ bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
+ bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
+ bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
+ }
+ else {
+ bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
+ bus->max_submission_rings = bus->max_tx_flowrings;
+ bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
+ bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
+ bus->api.handle_mb_data = dhdpcie_handle_mb_data;
+ bus->use_mailbox = TRUE;
+ }
+ if (bus->max_completion_rings == 0) {
+ DHD_ERROR(("dongle completion rings are invalid %d\n",
+ bus->max_completion_rings));
+ return BCME_ERROR;
+ }
+ if (bus->max_submission_rings == 0) {
+ DHD_ERROR(("dongle submission rings are invalid %d\n",
+ bus->max_submission_rings));
+ return BCME_ERROR;
+ }
+ if (bus->max_tx_flowrings == 0) {
+ DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
+ return BCME_ERROR;
+ }
+
+ /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
+ * The max_sub_queues is read from FW initialized ring_info
+ */
+ if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
+ dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
+ dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
+
+ if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
+ DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
+ "Host will use w/r indices in TCM\n",
+ __FUNCTION__));
+ bus->dhd->dma_h2d_ring_upd_support = FALSE;
+ bus->dhd->idma_enable = FALSE;
+ }
+ }
+
+ if (bus->dhd->dma_d2h_ring_upd_support) {
+ dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
+ dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
+
+ if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
+ DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
+ "Host will use w/r indices in TCM\n",
+ __FUNCTION__));
+ bus->dhd->dma_d2h_ring_upd_support = FALSE;
+ }
+ }
+#ifdef DHD_DMA_INDICES_SEQNUM
+ if (bus->dhd->dma_d2h_ring_upd_support) {
+ uint32 bufsz = bus->rw_index_sz * bus->max_completion_rings;
+ if (dhd_prot_dma_indx_copybuf_init(bus->dhd, bufsz, D2H_DMA_INDX_WR_BUF)
+ != BCME_OK) {
+ return BCME_NOMEM;
+ }
+ bufsz = bus->rw_index_sz * bus->max_submission_rings;
+ if (dhd_prot_dma_indx_copybuf_init(bus->dhd, bufsz, H2D_DMA_INDX_RD_BUF)
+ != BCME_OK) {
+ return BCME_NOMEM;
+ }
+ }
+#endif /* DHD_DMA_INDICES_SEQNUM */
+ if (IFRM_ENAB(bus->dhd)) {
+ dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
+
+ if (dma_indx_wr_buf != BCME_OK) {
+ DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
+ __FUNCTION__));
+ bus->dhd->ifrm_enable = FALSE;
+ }
+ }
+
+ /* read ringmem and ringstate ptrs from shared area and store in host variables */
+ dhd_fillup_ring_sharedptr_info(bus, &ring_info);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
+ }
+ DHD_INFO(("%s: ring_info\n", __FUNCTION__));
+
+ DHD_ERROR(("%s: max H2D queues %d\n",
+ __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
+
+ DHD_INFO(("mail box address\n"));
+ DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
+ __FUNCTION__, bus->h2d_mb_data_ptr_addr));
+ DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
+ __FUNCTION__, bus->d2h_mb_data_ptr_addr));
+ }
+
+ DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
+ __FUNCTION__, bus->dhd->d2h_sync_mode));
+
+ bus->dhd->d2h_hostrdy_supported =
+ ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
+
+ bus->dhd->ext_trap_data_supported =
+ ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
+
+ if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
+ bus->dhd->pcie_txs_metadata_enable = 0;
+
+ if (sh->flags2 & PCIE_SHARED2_TRAP_ON_HOST_DB7) {
+ memset(&bus->dhd->db7_trap, 0, sizeof(bus->dhd->db7_trap));
+ bus->dhd->db7_trap.fw_db7w_trap = 1;
+ /* add an option to let the user select ?? */
+ bus->dhd->db7_trap.db7_magic_number = PCIE_DB7_MAGIC_NUMBER_DPC_TRAP;
+ }
+
+#ifdef BTLOG
+ bus->dhd->bt_logging = (sh->flags2 & PCIE_SHARED2_BT_LOGGING) ? TRUE : FALSE;
+ /* XXX: WAR is needed for dongle with BTLOG to be backwards compatible with existing DHD.
+ * The issue is that existing DHD doesn't not compute the INFO cmpl ringid correctly once
+ * BTLOG dongle increases the max_submission_rings resulting in overwritting ring in the
+ * dongle. When dongle enables submit_count_WAR, it implies that the sumbit ring has be
+ * incremented in the dongle but will not be reflected in max_submission_rings.
+ */
+ bus->dhd->submit_count_WAR = (sh->flags2 & PCIE_SHARED2_SUBMIT_COUNT_WAR) ? TRUE : FALSE;
+ DHD_ERROR(("FW supports BT logging ? %s \n", bus->dhd->bt_logging ? "Y" : "N"));
+#endif /* BTLOG */
+
+#ifdef SNAPSHOT_UPLOAD
+ bus->dhd->snapshot_upload = (sh->flags2 & PCIE_SHARED2_SNAPSHOT_UPLOAD) ? TRUE : FALSE;
+ DHD_ERROR(("FW supports snapshot upload ? %s \n", bus->dhd->snapshot_upload ? "Y" : "N"));
+#endif /* SNAPSHOT_UPLOAD */
+
+#ifdef D2H_MINIDUMP
+ bus->d2h_minidump = (sh->flags2 & PCIE_SHARED2_FW_SMALL_MEMDUMP) ? TRUE : FALSE;
+ DHD_ERROR(("FW supports minidump ? %s \n", bus->d2h_minidump ? "Y" : "N"));
+ if (bus->d2h_minidump_override) {
+ bus->d2h_minidump = FALSE;
+ }
+ DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
+ bus->d2h_minidump, bus->d2h_minidump_override));
+#endif /* D2H_MINIDUMP */
+
+ bus->dhd->hscb_enable =
+ (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
+
+#ifdef EWP_EDL
+ if (host_edl_support) {
+ bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
+ DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
+ }
+#endif /* EWP_EDL */
+
+ bus->dhd->debug_buf_dest_support =
+ (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
+ DHD_ERROR(("FW supports debug buf dest ? %s \n",
+ bus->dhd->debug_buf_dest_support ? "Y" : "N"));
+
+#ifdef DHD_HP2P
+ if (bus->dhd->hp2p_enable) {
+ bus->dhd->hp2p_ts_capable =
+ (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
+ bus->dhd->hp2p_capable =
+ (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
+ bus->dhd->hp2p_capable |= bus->dhd->hp2p_ts_capable;
+
+ DHD_ERROR(("FW supports HP2P ? %s\n",
+ bus->dhd->hp2p_capable ? "Y" : "N"));
+
+ if (bus->dhd->hp2p_capable) {
+ bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
+ bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
+ bus->dhd->time_thresh = HP2P_TIME_THRESH;
+ for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
+ hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
+ hp2p_info->hrtimer_init = FALSE;
+ hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hp2p_info->timer.function = &dhd_hp2p_write;
+ }
+ }
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_DB0TS
+ bus->dhd->db0ts_capable =
+ (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
+#endif /* DHD_DB0TS */
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+
+ /*
+ * WAR to fix ARM cold boot;
+ * De-assert WL domain in DAR
+ */
+ if (bus->sih->buscorerev >= 68) {
+ dhd_bus_pcie_pwr_req_wl_domain(bus,
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
+ }
+ }
+ return BCME_OK;
+} /* dhdpcie_readshared */
+
+/** Read ring mem and ring state ptr info from shared memory area in device memory */
+static void
+dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
+{
+ uint16 i = 0;
+ uint16 j = 0;
+ uint32 tcm_memloc;
+ uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
+ uint16 max_tx_flowrings = bus->max_tx_flowrings;
+
+ /* Ring mem ptr info */
+ /* Alloated in the order
+ H2D_MSGRING_CONTROL_SUBMIT 0
+ H2D_MSGRING_RXPOST_SUBMIT 1
+ D2H_MSGRING_CONTROL_COMPLETE 2
+ D2H_MSGRING_TX_COMPLETE 3
+ D2H_MSGRING_RX_COMPLETE 4
+ */
+
+ {
+ /* ringmemptr holds start of the mem block address space */
+ tcm_memloc = ltoh32(ring_info->ringmem_ptr);
+
+ /* Find out ringmem ptr for each ring common ring */
+ for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
+ bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+ /* Update mem block */
+ tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
+ DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
+ i, bus->ring_sh[i].ring_mem_addr));
+ }
+ }
+
+ /* Ring state mem ptr info */
+ {
+ d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
+ d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
+ h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
+ h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
+
+ /* Store h2d common ring write/read pointers */
+ for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
+ bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+ /* update mem block */
+ h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
+ h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
+
+ DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
+ bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+ }
+
+ /* Store d2h common ring write/read pointers */
+ for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
+ bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+
+ /* update mem block */
+ d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
+ d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
+
+ DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
+ bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+ }
+
+ /* Store txflow ring write/read pointers */
+ if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
+ max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
+ } else {
+ /* Account for Debug info h2d ring located after the last tx flow ring */
+ max_tx_flowrings = max_tx_flowrings + 1;
+ }
+ for (j = 0; j < max_tx_flowrings; i++, j++)
+ {
+ bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+ /* update mem block */
+ h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
+ h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
+
+ DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
+ __FUNCTION__, i,
+ bus->ring_sh[i].ring_state_w,
+ bus->ring_sh[i].ring_state_r));
+ }
+#ifdef DHD_HP2P
+ /* store wr/rd pointers for debug info completion or EDL ring and hp2p rings */
+ for (j = 0; j <= MAX_HP2P_CMPL_RINGS; i++, j++) {
+ bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+ d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
+ d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
+ DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+ bus->ring_sh[i].ring_state_w,
+ bus->ring_sh[i].ring_state_r));
+ }
+#else
+ /* store wr/rd pointers for debug info completion or EDL ring */
+ bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+ d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
+ d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
+ DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+ bus->ring_sh[i].ring_state_w,
+ bus->ring_sh[i].ring_state_r));
+#endif /* DHD_HP2P */
+ }
+} /* dhd_fillup_ring_sharedptr_info */
+
+/**
+ * Initialize bus module: prepare for communication with the dongle. Called after downloading
+ * firmware into the dongle.
+ */
+int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int ret = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return 0;
+
+ dhd_bus_pcie_pwr_req_clear_reload_war(bus);
+
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
+ /* Configure AER registers to log the TLP header */
+ dhd_bus_aer_config(bus);
+
+ /* Make sure we're talking to the core. */
+ bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ ASSERT(bus->reg != NULL);
+
+ /* before opening up bus for data transfer, check if shared are is intact */
+ ret = dhdpcie_readshared(bus);
+ if (ret < 0) {
+ DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+ goto exit;
+ }
+
+ /* Make sure we're talking to the core. */
+ bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ ASSERT(bus->reg != NULL);
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+ DHD_SET_BUS_NOT_IN_LPS(bus);
+ dhdp->dhd_bus_busy_state = 0;
+
+ /* D11 status via PCIe completion header */
+ if ((ret = dhdpcie_init_d11status(bus)) < 0) {
+ goto exit;
+ }
+
+#if defined(OEM_ANDROID) || defined(LINUX)
+ if (!dhd_download_fw_on_driverload)
+ dhd_dpc_enable(bus->dhd);
+#endif /* OEM_ANDROID || LINUX */
+ /* Enable the interrupt after device is up */
+ dhdpcie_bus_intr_enable(bus);
+
+ DHD_ERROR(("%s: Enabling bus->intr_enabled\n", __FUNCTION__));
+ bus->intr_enabled = TRUE;
+
+ /* XXX These need to change w/API updates */
+ /* bcmsdh_intr_unmask(bus->sdh); */
+#ifdef DHD_PCIE_RUNTIMEPM
+ bus->idlecount = 0;
+ bus->idletime = (int32)MAX_IDLE_COUNT;
+ init_waitqueue_head(&bus->rpm_queue);
+ mutex_init(&bus->pm_lock);
+#else
+ bus->idletime = 0;
+#endif /* DHD_PCIE_RUNTIMEPM */
+#ifdef PCIE_INB_DW
+ bus->skip_ds_ack = FALSE;
+ /* Initialize the lock to serialize Device Wake Inband activities */
+ if (!bus->inb_lock) {
+ bus->inb_lock = osl_spin_lock_init(bus->dhd->osh);
+ }
+#endif
+
+ /* XXX Temp errnum workaround: return ok, caller checks bus state */
+
+ /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
+ if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
+ bus->use_d0_inform = TRUE;
+ } else {
+ bus->use_d0_inform = FALSE;
+ }
+
+ bus->hostready_count = 0;
+
+exit:
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+ return ret;
+}
+
+static void
+dhdpcie_init_shared_addr(dhd_bus_t *bus)
+{
+ uint32 addr = 0;
+ uint32 val = 0;
+ addr = bus->dongle_ram_base + bus->ramsize - 4;
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
+}
+
+bool
+dhdpcie_chipmatch(uint16 vendor, uint16 device)
+{
+ if (vendor != PCI_VENDOR_ID_BROADCOM) {
+#ifndef DHD_EFI
+ DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
+ vendor, device));
+#endif /* DHD_EFI */
+ return (-ENODEV);
+ }
+
+ switch (device) {
+ case BCM4345_CHIP_ID:
+ case BCM43454_CHIP_ID:
+ case BCM43455_CHIP_ID:
+ case BCM43457_CHIP_ID:
+ case BCM43458_CHIP_ID:
+ case BCM4350_D11AC_ID:
+ case BCM4350_D11AC2G_ID:
+ case BCM4350_D11AC5G_ID:
+ case BCM4350_CHIP_ID:
+ case BCM4354_D11AC_ID:
+ case BCM4354_D11AC2G_ID:
+ case BCM4354_D11AC5G_ID:
+ case BCM4354_CHIP_ID:
+ case BCM4356_D11AC_ID:
+ case BCM4356_D11AC2G_ID:
+ case BCM4356_D11AC5G_ID:
+ case BCM4356_CHIP_ID:
+ case BCM4371_D11AC_ID:
+ case BCM4371_D11AC2G_ID:
+ case BCM4371_D11AC5G_ID:
+ case BCM4371_CHIP_ID:
+ case BCM4345_D11AC_ID:
+ case BCM4345_D11AC2G_ID:
+ case BCM4345_D11AC5G_ID:
+ case BCM43452_D11AC_ID:
+ case BCM43452_D11AC2G_ID:
+ case BCM43452_D11AC5G_ID:
+ case BCM4335_D11AC_ID:
+ case BCM4335_D11AC2G_ID:
+ case BCM4335_D11AC5G_ID:
+ case BCM4335_CHIP_ID:
+ case BCM43602_D11AC_ID:
+ case BCM43602_D11AC2G_ID:
+ case BCM43602_D11AC5G_ID:
+ case BCM43602_CHIP_ID:
+ case BCM43569_D11AC_ID:
+ case BCM43569_D11AC2G_ID:
+ case BCM43569_D11AC5G_ID:
+ case BCM43569_CHIP_ID:
+ /* XXX: For 4358, BCM4358_CHIP_ID is not checked intentionally as
+ * this is not a real chip id, but propagated from the OTP.
+ */
+ case BCM4358_D11AC_ID:
+ case BCM4358_D11AC2G_ID:
+ case BCM4358_D11AC5G_ID:
+ case BCM4349_D11AC_ID:
+ case BCM4349_D11AC2G_ID:
+ case BCM4349_D11AC5G_ID:
+ case BCM4355_D11AC_ID:
+ case BCM4355_D11AC2G_ID:
+ case BCM4355_D11AC5G_ID:
+ case BCM4355_CHIP_ID:
+ /* XXX: BCM4359_CHIP_ID is not checked intentionally as this is
+ * not a real chip id, but propogated from the OTP.
+ */
+ case BCM4359_D11AC_ID:
+ case BCM4359_D11AC2G_ID:
+ case BCM4359_D11AC5G_ID:
+ case BCM43596_D11AC_ID:
+ case BCM43596_D11AC2G_ID:
+ case BCM43596_D11AC5G_ID:
+ case BCM43597_D11AC_ID:
+ case BCM43597_D11AC2G_ID:
+ case BCM43597_D11AC5G_ID:
+ case BCM4364_D11AC_ID:
+ case BCM4364_D11AC2G_ID:
+ case BCM4364_D11AC5G_ID:
+ case BCM4364_CHIP_ID:
+ case BCM4361_D11AC_ID:
+ case BCM4361_D11AC2G_ID:
+ case BCM4361_D11AC5G_ID:
+ case BCM4361_CHIP_ID:
+ case BCM4347_D11AC_ID:
+ case BCM4347_D11AC2G_ID:
+ case BCM4347_D11AC5G_ID:
+ case BCM4347_CHIP_ID:
+ case BCM4369_D11AX_ID:
+ case BCM4369_D11AX2G_ID:
+ case BCM4369_D11AX5G_ID:
+ case BCM4369_CHIP_ID:
+ case BCM4376_D11AX_ID:
+ case BCM4376_D11AX2G_ID:
+ case BCM4376_D11AX5G_ID:
+ case BCM4376_CHIP_ID:
+ case BCM4377_M_D11AX_ID:
+ case BCM4377_D11AX_ID:
+ case BCM4377_D11AX2G_ID:
+ case BCM4377_D11AX5G_ID:
+ case BCM4377_CHIP_ID:
+ case BCM4378_D11AX_ID:
+ case BCM4378_D11AX2G_ID:
+ case BCM4378_D11AX5G_ID:
+ case BCM4378_CHIP_ID:
+ case BCM4387_D11AX_ID:
+ case BCM4387_CHIP_ID:
+ case BCM4362_D11AX_ID:
+ case BCM4362_D11AX2G_ID:
+ case BCM4362_D11AX5G_ID:
+ case BCM4362_CHIP_ID:
+ case BCM4375_D11AX_ID:
+ case BCM4375_D11AX2G_ID:
+ case BCM4375_D11AX5G_ID:
+ case BCM4375_CHIP_ID:
+ case BCM43751_D11AX_ID:
+ case BCM43751_D11AX2G_ID:
+ case BCM43751_D11AX5G_ID:
+ case BCM43751_CHIP_ID:
+ case BCM43752_D11AX_ID:
+ case BCM43752_D11AX2G_ID:
+ case BCM43752_D11AX5G_ID:
+ case BCM43752_CHIP_ID:
+ case BCM4388_CHIP_ID:
+ case BCM4388_D11AX_ID:
+ case BCM4389_CHIP_ID:
+ case BCM4389_D11AX_ID:
+ case BCM4385_D11AX_ID:
+ case BCM4385_CHIP_ID:
+
+#ifdef UNRELEASEDCHIP
+ case BCM4397_CHIP_ID:
+ case BCM4397_D11AX_ID:
+#endif
+ return 0;
+ default:
+#ifndef DHD_EFI
+ DHD_ERROR(("%s: Unsupported vendor %x device %x\n",
+ __FUNCTION__, vendor, device));
+#endif
+ return (-ENODEV);
+ }
+} /* dhdpcie_chipmatch */
+
+/**
+ * Name: dhdpcie_cc_nvmshadow
+ *
+ * Description:
+ * A shadow of OTP/SPROM exists in ChipCommon Region
+ * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+ * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+ * can also be read from ChipCommon Registers.
+ */
+/* XXX So far tested with 4345 and 4350 (Hence the checks in the function.) */
+static int
+dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
+{
+ uint16 dump_offset = 0;
+ uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
+
+ /* Table for 65nm OTP Size (in bits) */
+ int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
+
+ volatile uint16 *nvm_shadow;
+
+ uint cur_coreid;
+ uint chipc_corerev;
+ chipcregs_t *chipcregs;
+
+ /* Save the current core */
+ cur_coreid = si_coreid(bus->sih);
+ /* Switch to ChipC */
+ chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+ ASSERT(chipcregs != NULL);
+
+ chipc_corerev = si_corerev(bus->sih);
+
+ /* Check ChipcommonCore Rev */
+ if (chipc_corerev < 44) {
+ DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Check ChipID */
+ if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
+ ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
+ ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
+ DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
+ "4350/4345/4355/4364 only\n", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
+ if (chipcregs->sromcontrol & SRC_PRESENT) {
+ /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
+ sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
+ >> SRC_SIZE_SHIFT))) * 1024;
+ bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
+ }
+
+ /* XXX Check if OTP exists. 2 possible approaches:
+ * 1) Check if OtpPresent in SpromCtrl (0x190 in ChipCommon Regs) is set OR
+ * 2) Check if OtpSize > 0
+ */
+ if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
+ bcm_bprintf(b, "\nOTP Present");
+
+ if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
+ == OTPL_WRAP_TYPE_40NM) {
+ /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
+ >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+ } else {
+ otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
+ >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+ }
+ } else {
+ /* This part is untested since newer chips have 40nm OTP */
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
+ >> OTPL_ROW_SIZE_SHIFT];
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+ } else {
+ otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
+ >> CC_CAP_OTPSIZE_SHIFT];
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+ DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
+ __FUNCTION__));
+ }
+ }
+ }
+
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+ ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
+ "sromcontrol = %x, otplayout = %x \n",
+ __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
+ return BCME_NOTFOUND;
+ }
+ } else {
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+ ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
+ "sromcontrol = %x, capablities = %x \n",
+ __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
+ return BCME_NOTFOUND;
+ }
+ }
+
+ /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
+ if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
+ (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
+
+ bcm_bprintf(b, "OTP Strap selected.\n"
+ "\nOTP Shadow in ChipCommon:\n");
+
+ dump_size = otp_size / 16 ; /* 16bit words */
+
+ } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
+ (chipcregs->sromcontrol & SRC_PRESENT)) {
+
+ bcm_bprintf(b, "SPROM Strap selected\n"
+ "\nSPROM Shadow in ChipCommon:\n");
+
+ /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
+ /* dump_size in 16bit words */
+ dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
+ } else {
+ DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
+ __FUNCTION__));
+ return BCME_NOTFOUND;
+ }
+
+ if (bus->regs == NULL) {
+ DHD_ERROR(("ChipCommon Regs. not initialized\n"));
+ return BCME_NOTREADY;
+ } else {
+ bcm_bprintf(b, "\n OffSet:");
+
+ /* Chipcommon rev51 is a variation on rev45 and does not support
+ * the latest OTP configuration.
+ */
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
+ /* Chip common can read only 8kbits,
+ * for ccrev >= 49 otp size is around 12 kbits so use GCI core
+ */
+ nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
+ } else {
+ /* Point to the SPROM/OTP shadow in ChipCommon */
+ nvm_shadow = chipcregs->sromotp;
+ }
+
+ if (nvm_shadow == NULL) {
+ DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
+ return BCME_NOTFOUND;
+ }
+
+ /*
+ * Read 16 bits / iteration.
+ * dump_size & dump_offset in 16-bit words
+ */
+ while (dump_offset < dump_size) {
+ if (dump_offset % 2 == 0)
+ /* Print the offset in the shadow space in Bytes */
+ bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
+
+ bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
+ dump_offset += 0x1;
+ }
+ }
+
+ /* Switch back to the original core */
+ si_setcore(bus->sih, cur_coreid, 0);
+
+ return BCME_OK;
+} /* dhdpcie_cc_nvmshadow */
+
+/** Flow rings are dynamically created and destroyed */
+void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
+{
+ void *pkt;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
+ unsigned long flags;
+
+ queue = &flow_ring_node->queue;
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef DHD_HP2P
+ if (flow_ring_node->hp2p_ring) {
+ if (!bus->dhd->hp2p_ring_more) {
+ bus->dhd->hp2p_ring_more = TRUE;
+ }
+ flow_ring_node->hp2p_ring = FALSE;
+ }
+#endif /* DHD_HP2P */
+
+ /* clean up BUS level info */
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ /* Flush all pending packets in the queue, if any */
+ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTFREE(bus->dhd->osh, pkt, TRUE);
+ }
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+
+ /* Reinitialise flowring's queue */
+ dhd_flow_queue_reinit(bus->dhd, queue, bus->dhd->conf->flow_ring_queue_threshold);
+ flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
+ flow_ring_node->active = FALSE;
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Hold flowring_list_lock to ensure no race condition while accessing the List */
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ dll_delete(&flow_ring_node->list);
+ dll_init(&flow_ring_node->list);
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ /* Release the flowring object back into the pool */
+ dhd_prot_flowrings_pool_release(bus->dhd,
+ flow_ring_node->flowid, flow_ring_node->prot_info);
+
+ /* Free the flowid back to the flowid allocator */
+ dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+ flow_ring_node->flowid);
+}
+
+/**
+ * Allocate a Flow ring buffer,
+ * Init Ring buffer, send Msg to device about flow ring creation
+*/
+int
+dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
+{
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+ DHD_PCIE_INFO(("%s :Flow create\n", __FUNCTION__));
+
+ /* Send Msg to device about flow ring creation */
+ if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
+ return BCME_NOMEM;
+
+ return BCME_OK;
+}
+
+/** Handle response from dongle on a 'flow ring create' request */
+void
+dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ DHD_PCIE_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+
+ /* Boundary check of the flowid */
+ if (flowid > bus->dhd->max_tx_flowid) {
+ DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
+ flowid, bus->dhd->max_tx_flowid));
+ return;
+ }
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Flow create Response failure error status = %d \n",
+ __FUNCTION__, status));
+ /* Call Flow clean up */
+ dhd_bus_clean_flow_ring(bus, flow_ring_node);
+ return;
+ }
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Now add the Flow ring node into the active list
+ * Note that this code to add the newly created node to the active
+ * list was living in dhd_flowid_lookup. But note that after
+ * adding the node to the active list the contents of node is being
+ * filled in dhd_prot_flow_ring_create.
+ * If there is a D2H interrupt after the node gets added to the
+ * active list and before the node gets populated with values
+ * from the Bottom half dhd_update_txflowrings would be called.
+ * which will then try to walk through the active flow ring list,
+ * pickup the nodes and operate on them. Now note that since
+ * the function dhd_prot_flow_ring_create is not finished yet
+ * the contents of flow_ring_node can still be NULL leading to
+ * crashes. Hence the flow_ring_node should be added to the
+ * active list only after its truely created, which is after
+ * receiving the create response message from the Host.
+ */
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
+
+ return;
+}
+
+int
+dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
+{
+ void * pkt;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ DHD_PCIE_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+ flow_ring_node = (flow_ring_node_t *)arg;
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
+ return BCME_ERROR;
+ }
+ flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
+
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ /* Flush all pending packets in the queue, if any */
+ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTFREE(bus->dhd->osh, pkt, TRUE);
+ }
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Send Msg to device about flow ring deletion */
+ dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
+
+ return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+ flow_ring_node_t *flow_ring_node;
+
+ DHD_PCIE_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+
+ /* Boundary check of the flowid */
+ if (flowid > bus->dhd->max_tx_flowid) {
+ DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
+ flowid, bus->dhd->max_tx_flowid));
+ return;
+ }
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
+ __FUNCTION__, status));
+ return;
+ }
+ /* Call Flow clean up */
+ dhd_bus_clean_flow_ring(bus, flow_ring_node);
+
+ return;
+
+}
+
+int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
+{
+ void *pkt;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ DHD_PCIE_INFO(("%s :Flow Flush\n", __FUNCTION__));
+
+ flow_ring_node = (flow_ring_node_t *)arg;
+
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+ /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
+ * once flow ring flush response is received for this flowring node.
+ */
+ flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+
+ /* Flush all pending packets in the queue, if any */
+ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTFREE(bus->dhd->osh, pkt, TRUE);
+ }
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Send Msg to device about flow ring flush */
+ dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
+
+ return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+ flow_ring_node_t *flow_ring_node;
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
+ __FUNCTION__, status));
+ return;
+ }
+
+ /* Boundary check of the flowid */
+ if (flowid > bus->dhd->max_tx_flowid) {
+ DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
+ flowid, bus->dhd->max_tx_flowid));
+ return;
+ }
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
+
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+ return;
+}
+
+uint32
+dhd_bus_max_h2d_queues(struct dhd_bus *bus)
+{
+ return bus->max_submission_rings;
+}
+
+/* To be symmetric with SDIO */
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+ return;
+}
+
+void
+dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
+{
+ dhdp->bus->is_linkdown = val;
+}
+
+int
+dhd_bus_get_linkdown(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->is_linkdown;
+}
+
+int
+dhd_bus_get_cto(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->cto_triggered;
+}
+
+#ifdef IDLE_TX_FLOW_MGMT
+/* resume request */
+int
+dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
+{
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+ DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
+
+ flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
+
+ /* Send Msg to device about flow ring resume */
+ dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
+
+ return BCME_OK;
+}
+
+/* add the node back to active flowring */
+void
+dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+
+ flow_ring_node_t *flow_ring_node;
+
+ DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ ASSERT(flow_ring_node->flowid == flowid);
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Error Status = %d \n",
+ __FUNCTION__, status));
+ return;
+ }
+
+ DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
+ __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
+
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+
+ dhd_bus_schedule_queue(bus, flowid, FALSE);
+ return;
+}
+
+/* scan the flow rings in active list for idle time out */
+void
+dhd_bus_check_idle_scan(dhd_bus_t *bus)
+{
+ uint64 time_stamp; /* in millisec */
+ uint64 diff;
+
+ time_stamp = OSL_SYSUPTIME();
+ diff = time_stamp - bus->active_list_last_process_ts;
+
+ if (diff > IDLE_FLOW_LIST_TIMEOUT) {
+ dhd_bus_idle_scan(bus);
+ bus->active_list_last_process_ts = OSL_SYSUPTIME();
+ }
+
+ return;
+}
+
+/* scan the nodes in active list till it finds a non idle node */
+void
+dhd_bus_idle_scan(dhd_bus_t *bus)
+{
+ dll_t *item, *prev;
+ flow_ring_node_t *flow_ring_node;
+ uint64 time_stamp, diff;
+ unsigned long flags;
+ uint16 ringid[MAX_SUSPEND_REQ];
+ uint16 count = 0;
+
+ time_stamp = OSL_SYSUPTIME();
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+
+ for (item = dll_tail_p(&bus->flowring_active_list);
+ !dll_end(&bus->flowring_active_list, item); item = prev) {
+ prev = dll_prev_p(item);
+
+ flow_ring_node = dhd_constlist_to_flowring(item);
+
+ if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
+ continue;
+
+ if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+ /* Takes care of deleting zombie rings */
+ /* delete from the active list */
+ DHD_INFO(("deleting flow id %u from active list\n",
+ flow_ring_node->flowid));
+ __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
+ continue;
+ }
+
+ diff = time_stamp - flow_ring_node->last_active_ts;
+
+ if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
+ DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
+ /* delete from the active list */
+ __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
+ flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
+ ringid[count] = flow_ring_node->flowid;
+ count++;
+ if (count == MAX_SUSPEND_REQ) {
+ /* create a batch message now!! */
+ dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
+ count = 0;
+ }
+
+ } else {
+
+ /* No more scanning, break from here! */
+ break;
+ }
+ }
+
+ if (count) {
+ dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
+ }
+
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ return;
+}
+
+void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+ unsigned long flags;
+ dll_t* list;
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ /* check if the node is already at head, otherwise delete it and prepend */
+ list = dll_head_p(&bus->flowring_active_list);
+ if (&flow_ring_node->list != list) {
+ dll_delete(&flow_ring_node->list);
+ dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
+ }
+
+ /* update flow ring timestamp */
+ flow_ring_node->last_active_ts = OSL_SYSUPTIME();
+
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ return;
+}
+
+void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+ unsigned long flags;
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+
+ dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
+ /* update flow ring timestamp */
+ flow_ring_node->last_active_ts = OSL_SYSUPTIME();
+
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ return;
+}
+void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+ dll_delete(&flow_ring_node->list);
+}
+
+void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+ unsigned long flags;
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+
+ __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
+
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ return;
+}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#if defined(LINUX) || defined(linux)
+int
+dhdpcie_bus_start_host_dev(struct dhd_bus *bus)
+{
+ return dhdpcie_start_host_dev(bus);
+}
+
+int
+dhdpcie_bus_stop_host_dev(struct dhd_bus *bus)
+{
+ return dhdpcie_stop_host_dev(bus);
+}
+
+int
+dhdpcie_bus_disable_device(struct dhd_bus *bus)
+{
+ return dhdpcie_disable_device(bus);
+}
+
+int
+dhdpcie_bus_enable_device(struct dhd_bus *bus)
+{
+ return dhdpcie_enable_device(bus);
+}
+
+int
+dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
+{
+ return dhdpcie_alloc_resource(bus);
+}
+
+void
+dhdpcie_bus_free_resource(struct dhd_bus *bus)
+{
+ dhdpcie_free_resource(bus);
+}
+
+int
+dhd_bus_request_irq(struct dhd_bus *bus)
+{
+ return dhdpcie_bus_request_irq(bus);
+}
+
+bool
+dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
+{
+ return dhdpcie_dongle_attach(bus);
+}
+
+int
+dhd_bus_release_dongle(struct dhd_bus *bus)
+{
+ bool dongle_isolation;
+ osl_t *osh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ osh = bus->osh;
+ ASSERT(osh);
+
+ if (bus->dhd) {
+#if defined(DEBUGGER) || defined (DHD_DSCOPE)
+ debugger_close();
+#endif /* DEBUGGER || DHD_DSCOPE */
+
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ }
+ }
+
+ return 0;
+}
+#endif /* LINUX || linux */
+
+int
+dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
+{
+ if (enable) {
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
+ PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
+ } else {
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
+ }
+ return 0;
+}
+
+int
+dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
+{
+ volatile void *regsva = (volatile void *)bus->regs;
+ uint32 val;
+ uint16 chipid = dhd_get_chipid(bus);
+ uint32 ctoctrl;
+
+ bus->cto_enable = enable;
+
+ dhdpcie_cto_cfg_init(bus, enable);
+
+ if (enable) {
+ if (bus->cto_threshold == 0) {
+ if ((chipid == BCM4387_CHIP_ID) ||
+ (chipid == BCM4388_CHIP_ID) ||
+ (chipid == BCM4389_CHIP_ID)) {
+ bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT_REV69;
+ } else {
+ bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
+ }
+ }
+ val = ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
+ PCIE_CTO_TO_THRESHHOLD_MASK) |
+ ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
+ PCIE_CTO_CLKCHKCNT_MASK) |
+ PCIE_CTO_ENAB_MASK;
+
+ pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, val);
+ } else {
+ pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
+ }
+
+ ctoctrl = pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), 0, 0);
+
+ DHD_ERROR(("%s: ctoctrl(0x%x) enable/disable %d for chipid(0x%x)\n",
+ __FUNCTION__, ctoctrl, bus->cto_enable, chipid));
+
+ return 0;
+}
+
+static int
+dhdpcie_cto_error_recovery(struct dhd_bus *bus)
+{
+ uint32 pci_intmask, err_status;
+ uint8 i = 0;
+ uint32 val;
+
+ pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
+
+ DHD_OS_WAKE_LOCK(bus->dhd);
+
+ DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
+
+ /*
+ * DAR still accessible
+ */
+ dhd_bus_dump_dar_registers(bus);
+
+ /* reset backplane */
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
+
+ /* clear timeout error */
+ while (1) {
+ err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
+ DAR_ERRLOG(bus->sih->buscorerev),
+ 0, 0);
+ if (err_status & PCIE_CTO_ERR_MASK) {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ DAR_ERRLOG(bus->sih->buscorerev),
+ ~0, PCIE_CTO_ERR_MASK);
+ } else {
+ break;
+ }
+ OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
+ i++;
+ if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
+ DHD_ERROR(("cto recovery fail\n"));
+
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return BCME_ERROR;
+ }
+ }
+
+ /* clear interrupt status */
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
+
+ /* Halt ARM & remove reset */
+ /* TBD : we can add ARM Halt here in case */
+
+ /* reset SPROM_CFG_TO_SB_RST */
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
+
+ DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
+ PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
+
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
+ DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
+ PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
+
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+
+ return BCME_OK;
+}
+
+void
+dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
+{
+ uint32 val;
+
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
+ val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
+}
+
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+/*
+ * XXX: WAR: Update dongle that driver supports sending of d11
+ * tx_status through unused status field of PCIe completion header
+ * if dongle also supports the same WAR.
+ */
+static int
+dhdpcie_init_d11status(struct dhd_bus *bus)
+{
+ uint32 addr;
+ uint32 flags2;
+ int ret = 0;
+
+ if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
+ flags2 = bus->pcie_sh->flags2;
+ addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
+ flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
+ ret = dhdpcie_bus_membytes(bus, TRUE, addr,
+ (uint8 *)&flags2, sizeof(flags2));
+ if (ret < 0) {
+ DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
+ __FUNCTION__));
+ return ret;
+ }
+ bus->pcie_sh->flags2 = flags2;
+ bus->dhd->d11_tx_status = TRUE;
+ }
+ return ret;
+}
+
+#else
+static int
+dhdpcie_init_d11status(struct dhd_bus *bus)
+{
+ return 0;
+}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+
+int
+dhdpcie_get_max_eventbufpost(struct dhd_bus *bus)
+{
+ int evt_buf_pool = EVENT_BUF_POOL_LOW;
+ if (bus->pcie_sh->flags2 & (0x1 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) {
+ evt_buf_pool = EVENT_BUF_POOL_MEDIUM;
+ } else if (bus->pcie_sh->flags2 & (0x2 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) {
+ evt_buf_pool = EVENT_BUF_POOL_HIGH;
+ } else if (bus->pcie_sh->flags2 & (0x3 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) {
+ evt_buf_pool = EVENT_BUF_POOL_HIGHEST;
+ }
+ return evt_buf_pool;
+}
+
+int
+dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+ int err = 0;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ err = dhdpcie_oob_intr_register(dhdp->bus);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ return err;
+}
+
+void
+dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_oob_intr_unregister(dhdp->bus);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+}
+
+void
+dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_oob_intr_set(dhdp->bus, enable);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+}
+
+int
+dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp)
+{
+ int irq_num = 0;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ irq_num = dhdpcie_get_oob_irq_num(dhdp->bus);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ return irq_num;
+}
+
+#ifdef BCMDBG
+void
+dhd_bus_flow_ring_cnt_update(dhd_bus_t *bus, uint16 flowid, uint32 txstatus)
+{
+ flow_ring_node_t *flow_ring_node;
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ dhd_awdl_stats_t *awdl_stats;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+ uint8 ifindex;
+ uint8 role;
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+ /* If we have d2h sync enabled due to marker overloading, we cannot update this. */
+ if (bus->dhd->d2h_sync_mode)
+ return;
+ if (txstatus >= DHD_MAX_TX_STATUS_MSGS) {
+ /*
+ * XXX: changed DHD_ERROR to DHD_INFO
+ * There are flood of messages with P2P FW
+ * It is being root-caused.
+ */
+ DHD_INFO(("%s Unknown txtstatus = %d \n",
+ __FUNCTION__, txstatus));
+ return;
+ }
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ ASSERT(flow_ring_node->flowid == flowid);
+ flow_ring_node->flow_info.tx_status[txstatus]++;
+#if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
+ if_flow_lkup = (if_flow_lkup_t *)bus->dhd->if_flow_lkup;
+ ifindex = flow_ring_node->flow_info.ifindex;
+ role = if_flow_lkup[ifindex].role;
+ if (role == WLC_E_IF_ROLE_AWDL) {
+ DHD_AWDL_STATS_LOCK(bus->dhd->awdl_stats_lock, flags);
+ awdl_stats = &bus->dhd->awdl_stats[bus->dhd->awdl_tx_status_slot];
+ awdl_stats->tx_status[txstatus]++;
+ DHD_AWDL_STATS_UNLOCK(bus->dhd->awdl_stats_lock, flags);
+ }
+#endif /* DHD_AWDL && AWDL_SLOT_STATS */
+ return;
+}
+#endif /* BCMDBG */
+
+bool
+dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
+{
+ return bus->dhd->d2h_hostrdy_supported;
+}
+
+void
+dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
+{
+ dhd_bus_t *bus = pub->bus;
+ uint32 coreoffset = index << 12;
+ uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
+ uint32 value;
+
+ while (first_addr <= last_addr) {
+ core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
+ if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ }
+ DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
+ first_addr = first_addr + 4;
+ }
+}
+
+bool
+dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd)
+ return FALSE;
+ else if (bus->idma_enabled) {
+ return bus->dhd->idma_enable;
+ } else {
+ return FALSE;
+ }
+}
+
+bool
+dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd)
+ return FALSE;
+ else if (bus->ifrm_enabled) {
+ return bus->dhd->ifrm_enable;
+ } else {
+ return FALSE;
+ }
+}
+
+bool
+dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd) {
+ return FALSE;
+ } else if (bus->dar_enabled) {
+ return bus->dhd->dar_enable;
+ } else {
+ return FALSE;
+ }
+}
+
+#ifdef DHD_HP2P
+bool
+dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd) {
+ return FALSE;
+ } else if (bus->dhd->hp2p_enable) {
+ return bus->dhd->hp2p_capable;
+ } else {
+ return FALSE;
+ }
+}
+#endif /* DHD_HP2P */
+
+#ifdef PCIE_OOB
+bool
+dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd)
+ return FALSE;
+ if (bus->oob_enabled) {
+ return !bus->dhd->d2h_no_oob_dw;
+ } else {
+ return FALSE;
+ }
+}
+#endif /* PCIE_OOB */
+
+void
+dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
+{
+ DHD_ERROR(("ENABLING DW:%d\n", dw_option));
+ bus->dw_option = dw_option;
+}
+
+#ifdef PCIE_INB_DW
+bool
+dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd)
+ return FALSE;
+ if (bus->inb_enabled) {
+ return bus->dhd->d2h_inband_dw;
+ } else {
+ return FALSE;
+ }
+}
+
+void
+dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
+{
+ if (!INBAND_DW_ENAB(bus))
+ return;
+
+ DHD_PCIE_INFO(("%s:%d\n", __FUNCTION__, state));
+ bus->dhd->ds_state = state;
+ if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
+ bus->ds_exit_timeout = 100;
+ }
+ if (state == DW_DEVICE_HOST_WAKE_WAIT) {
+ bus->host_sleep_exit_timeout = 100;
+ }
+ if (state == DW_DEVICE_DS_DEV_WAKE) {
+ bus->ds_exit_timeout = 0;
+ }
+ if (state == DW_DEVICE_DS_ACTIVE) {
+ bus->host_sleep_exit_timeout = 0;
+ }
+}
+
+enum dhd_bus_ds_state
+dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
+{
+ if (!INBAND_DW_ENAB(bus))
+ return DW_DEVICE_DS_INVALID;
+ return bus->dhd->ds_state;
+}
+#endif /* PCIE_INB_DW */
+
+#ifdef DHD_MMIO_TRACE
+static void
+dhd_bus_mmio_trace(dhd_bus_t *bus, uint32 addr, uint32 value, bool set)
+{
+ uint32 cnt = bus->mmio_trace_count % MAX_MMIO_TRACE_SIZE;
+ uint64 ts_cur = OSL_LOCALTIME_NS();
+ uint32 tmp_cnt;
+
+ tmp_cnt = (bus->mmio_trace_count) ? ((bus->mmio_trace_count - 1)
+ % MAX_MMIO_TRACE_SIZE) : cnt;
+
+ if (((DIV_U64_BY_U64(ts_cur, NSEC_PER_USEC) -
+ DIV_U64_BY_U64(bus->mmio_trace[tmp_cnt].timestamp, NSEC_PER_USEC))
+ > MIN_MMIO_TRACE_TIME) || (bus->mmio_trace[tmp_cnt].value !=
+ (value & DHD_RING_IDX))) {
+ bus->mmio_trace_count++;
+ } else {
+ cnt = tmp_cnt;
+ }
+ bus->mmio_trace[cnt].timestamp = ts_cur;
+ bus->mmio_trace[cnt].addr = addr;
+ bus->mmio_trace[cnt].set = set;
+ bus->mmio_trace[cnt].value = value;
+}
+
+void
+dhd_dump_bus_mmio_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ int dumpsz;
+ int i;
+
+ dumpsz = bus->mmio_trace_count < MAX_MMIO_TRACE_SIZE ?
+ bus->mmio_trace_count : MAX_MMIO_TRACE_SIZE;
+ if (dumpsz == 0) {
+ bcm_bprintf(strbuf, "\nEmpty MMIO TRACE\n");
+ return;
+ }
+ bcm_bprintf(strbuf, "---- MMIO TRACE ------\n");
+ bcm_bprintf(strbuf, "Decoding value field, Ex: 0xFF2C00E4, 0xFF->WR/0XDD->RD "
+ "0x2C->Ringid 0x00E4->RD/WR Value\n");
+ bcm_bprintf(strbuf, "Timestamp ns\t\tAddr\t\tW/R\tValue\n");
+ for (i = 0; i < dumpsz; i ++) {
+ bcm_bprintf(strbuf, SEC_USEC_FMT"\t0x%08x\t%s\t0x%08x\n",
+ GET_SEC_USEC(bus->mmio_trace[i].timestamp),
+ bus->mmio_trace[i].addr,
+ bus->mmio_trace[i].set ? "W" : "R",
+ bus->mmio_trace[i].value);
+ }
+}
+#endif /* defined(DHD_MMIO_TRACE) */
+
+static void
+#ifdef PCIE_INB_DW
+dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h, enum dhd_bus_ds_state inbstate)
+#else
+dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h)
+#endif /* PCIE_INB_DW */
+{
+ uint32 cnt = bus->ds_trace_count % MAX_DS_TRACE_SIZE;
+
+ bus->ds_trace[cnt].timestamp = OSL_LOCALTIME_NS();
+ bus->ds_trace[cnt].d2h = d2h;
+ bus->ds_trace[cnt].dsval = dsval;
+#ifdef PCIE_INB_DW
+ bus->ds_trace[cnt].inbstate = inbstate;
+#endif /* PCIE_INB_DW */
+ bus->ds_trace_count ++;
+}
+
+#ifdef PCIE_INB_DW
+const char *
+dhd_convert_dsval(uint32 val, bool d2h)
+{
+ if (d2h) {
+ switch (val) {
+ case D2H_DEV_D3_ACK:
+ return "D2H_DEV_D3_ACK";
+ case D2H_DEV_DS_ENTER_REQ:
+ return "D2H_DEV_DS_ENTER_REQ";
+ case D2H_DEV_DS_EXIT_NOTE:
+ return "D2H_DEV_DS_EXIT_NOTE";
+ case D2H_DEV_FWHALT:
+ return "D2H_DEV_FWHALT";
+ case D2HMB_DS_HOST_SLEEP_EXIT_ACK:
+ return "D2HMB_DS_HOST_SLEEP_EXIT_ACK";
+ default:
+ return "INVALID";
+ }
+ } else {
+ switch (val) {
+ case H2DMB_DS_DEVICE_WAKE_DEASSERT:
+ return "H2DMB_DS_DEVICE_WAKE_DEASSERT";
+ case H2DMB_DS_DEVICE_WAKE_ASSERT:
+ return "H2DMB_DS_DEVICE_WAKE_ASSERT";
+ case H2D_HOST_D3_INFORM:
+ return "H2D_HOST_D3_INFORM";
+ case H2D_HOST_DS_ACK:
+ return "H2D_HOST_DS_ACK";
+ case H2D_HOST_DS_NAK:
+ return "H2D_HOST_DS_NAK";
+ case H2D_HOST_CONS_INT:
+ return "H2D_HOST_CONS_INT";
+ case H2D_FW_TRAP:
+ return "H2D_FW_TRAP";
+ default:
+ return "INVALID";
+ }
+ }
+}
+
+const char *
+dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate)
+{
+ switch (inbstate) {
+ case DW_DEVICE_DS_DEV_SLEEP:
+ return "DW_DEVICE_DS_DEV_SLEEP";
+ break;
+ case DW_DEVICE_DS_DISABLED_WAIT:
+ return "DW_DEVICE_DS_DISABLED_WAIT";
+ break;
+ case DW_DEVICE_DS_DEV_WAKE:
+ return "DW_DEVICE_DS_DEV_WAKE";
+ break;
+ case DW_DEVICE_DS_ACTIVE:
+ return "DW_DEVICE_DS_ACTIVE";
+ break;
+ case DW_DEVICE_HOST_SLEEP_WAIT:
+ return "DW_DEVICE_HOST_SLEEP_WAIT";
+ break;
+ case DW_DEVICE_HOST_SLEEP:
+ return "DW_DEVICE_HOST_SLEEP";
+ break;
+ case DW_DEVICE_HOST_WAKE_WAIT:
+ return "DW_DEVICE_HOST_WAKE_WAIT";
+ break;
+ case DW_DEVICE_DS_D3_INFORM_WAIT:
+ return "DW_DEVICE_DS_D3_INFORM_WAIT";
+ break;
+ default:
+ return "INVALID";
+ }
+}
+#endif /* PCIE_INB_DW */
+
+void
+dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ int dumpsz;
+ int i;
+
+ dumpsz = bus->ds_trace_count < MAX_DS_TRACE_SIZE ?
+ bus->ds_trace_count : MAX_DS_TRACE_SIZE;
+ if (dumpsz == 0) {
+ bcm_bprintf(strbuf, "\nEmpty DS TRACE\n");
+ return;
+ }
+ bcm_bprintf(strbuf, "---- DS TRACE ------\n");
+#ifdef PCIE_INB_DW
+ bcm_bprintf(strbuf, "%s\t\t%s\t%-30s\t\t%s\n",
+ "Timestamp us", "Dir", "Value", "Inband-State");
+ for (i = 0; i < dumpsz; i ++) {
+ bcm_bprintf(strbuf, "%llu\t%s\t%-30s\t\t%s\n",
+ bus->ds_trace[i].timestamp,
+ bus->ds_trace[i].d2h ? "D2H":"H2D",
+ dhd_convert_dsval(bus->ds_trace[i].dsval, bus->ds_trace[i].d2h),
+ dhd_convert_inb_state_names(bus->ds_trace[i].inbstate));
+ }
+#else
+ bcm_bprintf(strbuf, "Timestamp us\t\tDir\tValue\n");
+ for (i = 0; i < dumpsz; i ++) {
+ bcm_bprintf(strbuf, "%llu\t%s\t%d\n",
+ bus->ds_trace[i].timestamp,
+ bus->ds_trace[i].d2h ? "D2H":"H2D",
+ bus->ds_trace[i].dsval);
+ }
+#endif /* PCIE_INB_DW */
+ bcm_bprintf(strbuf, "--------------------------\n");
+}
+
+void
+dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ trap_t *tr = &bus->dhd->last_trap_info;
+ bcm_bprintf(strbuf,
+ "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ " lp 0x%x, rpc 0x%x"
+ "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
+ "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
+ ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
+ ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
+ ltoh32(bus->pcie_sh->trap_addr),
+ ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
+ ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
+ ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
+ ltoh32(tr->r11), ltoh32(tr->r12));
+}
+
+int
+dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
+{
+ int bcmerror = 0;
+ struct dhd_bus *bus = dhdp->bus;
+
+ if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ }
+
+ return bcmerror;
+}
+
+int
+dhd_get_idletime(dhd_pub_t *dhd)
+{
+ return dhd->bus->idletime;
+}
+
+bool
+dhd_get_rpm_state(dhd_pub_t *dhd)
+{
+ return dhd->bus->rpm_enabled;
+}
+
+void
+dhd_set_rpm_state(dhd_pub_t *dhd, bool state)
+{
+ DHD_RPM(("%s: %d\n", __FUNCTION__, state));
+ dhd->bus->rpm_enabled = state;
+}
+
+static INLINE void
+dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
+{
+ OSL_DELAY(1);
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
+ DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
+ } else {
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
+ }
+ return;
+}
+
+#ifdef DHD_SSSR_DUMP
+static int
+dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
+ uint addr_reg, uint data_reg)
+{
+ uint addr;
+ uint val = 0;
+ int i;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ if (!buf) {
+ DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (!fifo_size) {
+ DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Set the base address offset to 0 */
+ addr = addr_reg;
+ val = 0;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+ addr = data_reg;
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
+ for (i = 0; i < fifo_size / 4; i++) {
+ if (serialized_backplane_access(dhd->bus, addr,
+ sizeof(uint), &val, TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ buf[i] = val;
+ OSL_DELAY(1);
+ }
+ return BCME_OK;
+}
+
+static int
+dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
+ uint addr_reg)
+{
+ uint addr;
+ uint val = 0;
+ int i;
+ si_t *sih = dhd->bus->sih;
+ bool vasip_enab, dig_mem_check;
+ uint32 ioctrl_addr = 0;
+
+ DHD_ERROR(("%s addr_reg=0x%x size=0x%x\n", __FUNCTION__, addr_reg, fifo_size));
+
+ if (!buf) {
+ DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (!fifo_size) {
+ DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ vasip_enab = FALSE;
+ dig_mem_check = FALSE;
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ if ((dhd->sssr_reg_info->rev2.length > OFFSETOF(sssr_reg_info_v2_t,
+ dig_mem_info)) && dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
+ dig_mem_check = TRUE;
+ }
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
+ vasip_enab = TRUE;
+ } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
+ dig_mem_info)) && dhd->sssr_reg_info->rev1.
+ dig_mem_info.dig_sr_size) {
+ dig_mem_check = TRUE;
+ }
+ ioctrl_addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl;
+ break;
+ case SSSR_REG_INFO_VER_0 :
+ if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
+ vasip_enab = TRUE;
+ }
+ ioctrl_addr = dhd->sssr_reg_info->rev0.vasip_regs.wrapper_regs.ioctrl;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ if (addr_reg) {
+ DHD_ERROR(("dig_mem_check=%d vasip_enab=%d\n", dig_mem_check, vasip_enab));
+ if (!vasip_enab && dig_mem_check) {
+ int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
+ fifo_size);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
+ __FUNCTION__));
+ }
+ } else {
+ /* Check if vasip clk is disabled, if yes enable it */
+ addr = ioctrl_addr;
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
+ if (!val) {
+ val = 1;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+ }
+
+ addr = addr_reg;
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
+ for (i = 0; i < fifo_size / 4; i++, addr += 4) {
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
+ &val, TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
+ addr));
+ return BCME_ERROR;
+ }
+ buf[i] = val;
+ OSL_DELAY(1);
+ }
+ }
+ } else {
+ uint cur_coreid;
+ uint chipc_corerev;
+ chipcregs_t *chipcregs;
+
+ /* Save the current core */
+ cur_coreid = si_coreid(sih);
+
+ /* Switch to ChipC */
+ chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if (!chipcregs) {
+ DHD_ERROR(("%s: si_setcore returns NULL for core id %u \n",
+ __FUNCTION__, CC_CORE_ID));
+ return BCME_ERROR;
+ }
+
+ chipc_corerev = si_corerev(sih);
+
+ if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
+ W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
+
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
+ for (i = 0; i < fifo_size / 4; i++) {
+ buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
+ OSL_DELAY(1);
+ }
+ }
+
+ /* Switch back to the original core */
+ si_setcore(sih, cur_coreid, 0);
+ }
+
+ return BCME_OK;
+}
+
+#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
+void
+dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
+ uint8 *ext_trap_data, void *event_decode_data)
+{
+ hnd_ext_trap_hdr_t *hdr = NULL;
+ bcm_tlv_t *tlv;
+ eventlog_trapdata_info_t *etd_evtlog = NULL;
+ eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
+ uint arr_size = 0;
+ int i = 0;
+ int err = 0;
+ uint32 seqnum = 0;
+
+ if (!ext_trap_data || !event_decode_data || !dhd)
+ return;
+
+ if (!dhd->concise_dbg_buf)
+ return;
+
+ /* First word is original trap_data, skip */
+ ext_trap_data += sizeof(uint32);
+
+ hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
+ if (tlv) {
+ uint32 baseaddr = 0;
+ uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
+
+ etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
+ DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
+ "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
+ (etd_evtlog->num_elements),
+ ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
+ if (!etd_evtlog->num_elements ||
+ etd_evtlog->num_elements > MAX_EVENTLOG_BUFFERS) {
+ DHD_ERROR(("%s: ETD has bad 'num_elements' !\n", __FUNCTION__));
+ return;
+ }
+ if (!etd_evtlog->log_arr_addr) {
+ DHD_ERROR(("%s: ETD has bad 'log_arr_addr' !\n", __FUNCTION__));
+ return;
+ }
+
+ arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
+ evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
+ if (!evtlog_buf_arr) {
+ DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
+ return;
+ }
+
+ /* boundary check */
+ baseaddr = etd_evtlog->log_arr_addr;
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
+ ((baseaddr + arr_size) > endaddr)) {
+ DHD_ERROR(("%s: Error reading invalid address\n",
+ __FUNCTION__));
+ goto err;
+ }
+
+ /* read the eventlog_trap_buf_info_t array from dongle memory */
+ err = dhdpcie_bus_membytes(dhd->bus, FALSE,
+ (ulong)(etd_evtlog->log_arr_addr),
+ (uint8 *)evtlog_buf_arr, arr_size);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: Error reading event log array from dongle !\n",
+ __FUNCTION__));
+ goto err;
+ }
+ /* ntoh is required only for seq_num, because in the original
+ * case of event logs from info ring, it is sent from dongle in that way
+ * so for ETD also dongle follows same convention
+ */
+ seqnum = ntoh32(etd_evtlog->seq_num);
+ memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
+ for (i = 0; i < (etd_evtlog->num_elements); ++i) {
+ /* boundary check */
+ baseaddr = evtlog_buf_arr[i].buf_addr;
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
+ ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
+ DHD_ERROR(("%s: Error reading invalid address\n",
+ __FUNCTION__));
+ goto err;
+ }
+ /* read each individual event log buf from dongle memory */
+ err = dhdpcie_bus_membytes(dhd->bus, FALSE,
+ ((ulong)evtlog_buf_arr[i].buf_addr),
+ dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
+ __FUNCTION__));
+ goto err;
+ }
+ dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
+ event_decode_data, (evtlog_buf_arr[i].len),
+ FALSE, hton32(seqnum));
+ ++seqnum;
+ }
+err:
+ MFREE(dhd->osh, evtlog_buf_arr, arr_size);
+ } else {
+ DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
+ }
+}
+#endif /* BCMPCIE && DHD_LOG_DUMP */
+
+static uint32
+dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
+{
+ uint addr;
+ uint val = 0;
+ uint powerctrl_mask;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl;
+ powerctrl_mask = dhd->sssr_reg_info->rev2.
+ chipcommon_regs.base_regs.powerctrl_mask;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl;
+ powerctrl_mask = dhd->sssr_reg_info->rev1.
+ chipcommon_regs.base_regs.powerctrl_mask;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* conditionally clear bits [11:8] of PowerCtrl */
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
+
+ if (!(val & powerctrl_mask)) {
+ dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
+ }
+ return BCME_OK;
+}
+
+static uint32
+dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
+{
+ uint addr;
+ uint val = 0, reg_val = 0;
+ uint powerctrl_mask;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl;
+ powerctrl_mask = dhd->sssr_reg_info->rev2.
+ chipcommon_regs.base_regs.powerctrl_mask;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl;
+ powerctrl_mask = dhd->sssr_reg_info->rev1.
+ chipcommon_regs.base_regs.powerctrl_mask;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* conditionally clear bits [11:8] of PowerCtrl */
+ dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
+ if (reg_val & powerctrl_mask) {
+ val = 0;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+ }
+ return reg_val;
+}
+
+static int
+dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
+{
+ uint addr;
+ uint val;
+ uint32 cc_intmask, pmuintmask0, pmuintmask1, resreqtimer, macresreqtimer,
+ macresreqtimer1, vasip_sr_size = 0;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ cc_intmask = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.intmask;
+ pmuintmask0 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask0;
+ pmuintmask1 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask1;
+ resreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.resreqtimer;
+ macresreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.macresreqtimer;
+ macresreqtimer1 = dhd->sssr_reg_info->rev2.
+ pmu_regs.base_regs.macresreqtimer1;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ cc_intmask = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.intmask;
+ pmuintmask0 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask0;
+ pmuintmask1 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask1;
+ resreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.resreqtimer;
+ macresreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.macresreqtimer;
+ macresreqtimer1 = dhd->sssr_reg_info->rev1.
+ pmu_regs.base_regs.macresreqtimer1;
+ vasip_sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* clear chipcommon intmask */
+ val = 0x0;
+ dhd_sbreg_op(dhd, cc_intmask, &val, FALSE);
+
+ /* clear PMUIntMask0 */
+ val = 0x0;
+ dhd_sbreg_op(dhd, pmuintmask0, &val, FALSE);
+
+ /* clear PMUIntMask1 */
+ val = 0x0;
+ dhd_sbreg_op(dhd, pmuintmask1, &val, FALSE);
+
+ /* clear res_req_timer */
+ val = 0x0;
+ dhd_sbreg_op(dhd, resreqtimer, &val, FALSE);
+
+ /* clear macresreqtimer */
+ val = 0x0;
+ dhd_sbreg_op(dhd, macresreqtimer, &val, FALSE);
+
+ /* clear macresreqtimer1 */
+ val = 0x0;
+ dhd_sbreg_op(dhd, macresreqtimer1, &val, FALSE);
+
+ /* clear VasipClkEn */
+ if (vasip_sr_size) {
+ addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl;
+ val = 0x0;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+ }
+
+ return BCME_OK;
+}
+
+static void
+dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
+{
+#define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
+#define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
+ uint trap_data_mask[MAX_NUM_D11CORES] =
+ {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
+ int i;
+ /* Apply only for 4375 chip */
+ if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
+ if (dhd->sssr_d11_outofreset[i] &&
+ (dhd->dongle_trap_data & trap_data_mask[i])) {
+ dhd->sssr_d11_outofreset[i] = TRUE;
+ } else {
+ dhd->sssr_d11_outofreset[i] = FALSE;
+ }
+ DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
+ "trap_data:0x%x-0x%x\n",
+ __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
+ dhd->dongle_trap_data, trap_data_mask[i]));
+ }
+ }
+}
+
+static int
+dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
+{
+ int i;
+ uint addr = 0;
+ uint val = 0;
+ uint8 num_d11cores;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ for (i = 0; i < num_d11cores; i++) {
+ /* Check if bit 0 of resetctrl is cleared */
+ /* SSSR register information structure v0 and
+ * v1 shares most except dig_mem
+ */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ addr = dhd->sssr_reg_info->rev2.
+ mac_regs[i].wrapper_regs.resetctrl;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ addr = dhd->sssr_reg_info->rev1.
+ mac_regs[i].wrapper_regs.resetctrl;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ if (!addr) {
+ DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
+ __FUNCTION__, i));
+ continue;
+ }
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
+ if (!(val & 1)) {
+ dhd->sssr_d11_outofreset[i] = TRUE;
+ } else {
+ dhd->sssr_d11_outofreset[i] = FALSE;
+ }
+ DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
+ __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
+ }
+ /* XXX Temporary WAR for 4375 to handle AXI errors on bad core
+ * to not collect SSSR dump for the core whose bit is not set in trap_data.
+ * It will be reverted once AXI errors are fixed
+ */
+ dhdpcie_update_d11_status_from_trapdata(dhd);
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
+{
+ int i;
+ uint val = 0;
+ uint8 num_d11cores;
+ uint32 clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ for (i = 0; i < num_d11cores; i++) {
+ if (dhd->sssr_d11_outofreset[i]) {
+ /* clear request clk only if itopoobb/extrsrcreqs is non zero */
+ /* SSSR register information structure v0 and
+ * v1 shares most except dig_mem
+ */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ clockrequeststatus = dhd->sssr_reg_info->rev2.
+ mac_regs[i].wrapper_regs.extrsrcreq;
+ clockcontrolstatus = dhd->sssr_reg_info->rev2.
+ mac_regs[i].base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
+ mac_regs[i].base_regs.clockcontrolstatus_val;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ clockrequeststatus = dhd->sssr_reg_info->rev1.
+ mac_regs[i].wrapper_regs.itopoobb;
+ clockcontrolstatus = dhd->sssr_reg_info->rev1.
+ mac_regs[i].base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
+ mac_regs[i].base_regs.clockcontrolstatus_val;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
+ if (val != 0) {
+ /* clear clockcontrolstatus */
+ dhd_sbreg_op(dhd, clockcontrolstatus,
+ &clockcontrolstatus_val, FALSE);
+ }
+ }
+ }
+ return BCME_OK;
+}
+
+static int
+dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
+{
+ uint val = 0;
+ uint cfgval = 0;
+ uint32 resetctrl, clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ resetctrl = dhd->sssr_reg_info->rev2.
+ arm_regs.wrapper_regs.resetctrl;
+ clockrequeststatus = dhd->sssr_reg_info->rev2.
+ arm_regs.wrapper_regs.extrsrcreq;
+ clockcontrolstatus = dhd->sssr_reg_info->rev2.
+ arm_regs.base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
+ arm_regs.base_regs.clockcontrolstatus_val;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ resetctrl = dhd->sssr_reg_info->rev1.
+ arm_regs.wrapper_regs.resetctrl;
+ clockrequeststatus = dhd->sssr_reg_info->rev1.
+ arm_regs.wrapper_regs.itopoobb;
+ clockcontrolstatus = dhd->sssr_reg_info->rev1.
+ arm_regs.base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
+ arm_regs.base_regs.clockcontrolstatus_val;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Check if bit 0 of resetctrl is cleared */
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ if (!(val & 1)) {
+ /* clear request clk only if itopoobb/extrsrcreqs is non zero */
+ dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
+ if (val != 0) {
+ /* clear clockcontrolstatus */
+ dhd_sbreg_op(dhd, clockcontrolstatus, &clockcontrolstatus_val, FALSE);
+ }
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ /* Clear coherent bits for CA7 because CPU is halted */
+ if (dhd->bus->coreid == ARMCA7_CORE_ID) {
+ cfgval = dhdpcie_bus_cfg_read_dword(dhd->bus,
+ PCIE_CFG_SUBSYSTEM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
+ (cfgval & ~PCIE_BARCOHERENTACCEN_MASK));
+ }
+
+ /* Just halt ARM but do not reset the core */
+ resetctrl &= ~(SI_CORE_SIZE - 1);
+ resetctrl += OFFSETOF(aidmp_t, ioctrl);
+
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ val |= SICF_CPUHALT;
+ dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
+ }
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
+{
+ uint val = 0;
+ uint32 resetctrl;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ resetctrl = dhd->sssr_reg_info->rev2.
+ arm_regs.wrapper_regs.resetctrl;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ resetctrl = dhd->sssr_reg_info->rev1.
+ arm_regs.wrapper_regs.resetctrl;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Check if bit 0 of resetctrl is cleared */
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ if (!(val & 1)) {
+ if (MULTIBP_ENAB(dhd->bus->sih) && (dhd->bus->coreid != ARMCA7_CORE_ID)) {
+ /* Take ARM out of halt but do not reset core */
+ resetctrl &= ~(SI_CORE_SIZE - 1);
+ resetctrl += OFFSETOF(aidmp_t, ioctrl);
+
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ val &= ~SICF_CPUHALT;
+ dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ }
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
+{
+ uint val = 0;
+ uint32 clockrequeststatus, clockcontrolstatus_addr, clockcontrolstatus_val;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ clockrequeststatus = dhd->sssr_reg_info->rev2.
+ pcie_regs.wrapper_regs.extrsrcreq;
+ clockcontrolstatus_addr = dhd->sssr_reg_info->rev2.
+ pcie_regs.base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
+ pcie_regs.base_regs.clockcontrolstatus_val;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ clockrequeststatus = dhd->sssr_reg_info->rev1.
+ pcie_regs.wrapper_regs.itopoobb;
+ clockcontrolstatus_addr = dhd->sssr_reg_info->rev1.
+ pcie_regs.base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
+ pcie_regs.base_regs.clockcontrolstatus_val;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* clear request clk only if itopoobb/extrsrcreqs is non zero */
+ dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
+ if (val) {
+ /* clear clockcontrolstatus */
+ dhd_sbreg_op(dhd, clockcontrolstatus_addr, &clockcontrolstatus_val, FALSE);
+ }
+ return BCME_OK;
+}
+
+static int
+dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
+{
+ uint addr;
+ uint val = 0;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ addr = dhd->sssr_reg_info->rev2.pcie_regs.base_regs.ltrstate;
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ addr = dhd->sssr_reg_info->rev1.pcie_regs.base_regs.ltrstate;
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ val = LTR_ACTIVE;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+ val = LTR_SLEEP;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_clear_clk_req(dhd_pub_t *dhd)
+{
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ dhdpcie_arm_clear_clk_req(dhd);
+
+ dhdpcie_d11_clear_clk_req(dhd);
+
+ dhdpcie_pcie_clear_clk_req(dhd);
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
+{
+ int i;
+ uint val = 0;
+ uint8 num_d11cores;
+ uint32 resetctrl_addr, ioctrl_addr, ioctrl_resetseq_val0, ioctrl_resetseq_val1,
+ ioctrl_resetseq_val2, ioctrl_resetseq_val3, ioctrl_resetseq_val4;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ for (i = 0; i < num_d11cores; i++) {
+ if (dhd->sssr_d11_outofreset[i]) {
+ /* SSSR register information structure v0 and v1 shares
+ * most except dig_mem
+ */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_3 :
+ /* intentional fall through */
+ case SSSR_REG_INFO_VER_2 :
+ resetctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i].
+ wrapper_regs.resetctrl;
+ ioctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i].
+ wrapper_regs.ioctrl;
+ ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev2.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
+ ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev2.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
+ ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev2.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
+ ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev2.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
+ ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev2.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
+ break;
+ case SSSR_REG_INFO_VER_1 :
+ case SSSR_REG_INFO_VER_0 :
+ resetctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i].
+ wrapper_regs.resetctrl;
+ ioctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i].
+ wrapper_regs.ioctrl;
+ ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev1.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
+ ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev1.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
+ ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev1.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
+ ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev1.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
+ ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev1.
+ mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ /* disable core by setting bit 0 */
+ val = 1;
+ dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE);
+ OSL_DELAY(6000);
+
+ dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val0, FALSE);
+
+ dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val1, FALSE);
+
+ /* enable core by clearing bit 0 */
+ val = 0;
+ dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE);
+
+ dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val2, FALSE);
+
+ dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val3, FALSE);
+
+ dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val4, FALSE);
+ }
+ }
+ return BCME_OK;
+}
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+static int
+dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
+{
+ int i;
+ uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
+ uint8 num_d11cores;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ for (i = 0; i < num_d11cores; i++) {
+ if (dhd->sssr_d11_outofreset[i]) {
+ sr_size = dhd_sssr_mac_buf_size(dhd, i);
+ xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
+ xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
+ dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
+ sr_size, xmtaddress, xmtdata);
+ }
+ }
+
+ dig_buf_size = dhd_sssr_dig_buf_size(dhd);
+ dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
+ if (dig_buf_size) {
+ dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
+ dig_buf_size, dig_buf_addr);
+ }
+
+ return BCME_OK;
+}
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+static int
+dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
+{
+ int i;
+ uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
+ uint8 num_d11cores;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ for (i = 0; i < num_d11cores; i++) {
+ if (dhd->sssr_d11_outofreset[i]) {
+ sr_size = dhd_sssr_mac_buf_size(dhd, i);
+ xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
+ xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
+ dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
+ sr_size, xmtaddress, xmtdata);
+ }
+ }
+
+ dig_buf_size = dhd_sssr_dig_buf_size(dhd);
+ dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
+
+ if (dig_buf_size) {
+ dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, dig_buf_size, dig_buf_addr);
+ }
+
+ return BCME_OK;
+}
+
+int
+dhdpcie_sssr_dump(dhd_pub_t *dhd)
+{
+ uint32 powerctrl_val;
+
+ if (!dhd->sssr_inited) {
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
+ "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
+ PMU_REG(dhd->bus->sih, res_state, 0, 0)));
+
+ dhdpcie_d11_check_outofreset(dhd);
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
+ if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ dhdpcie_clear_intmask_and_timer(dhd);
+ dhdpcie_clear_clk_req(dhd);
+ powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
+ dhdpcie_pcie_send_ltrsleep(dhd);
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE);
+ }
+
+ /* Wait for some time before Restore */
+ OSL_DELAY(6000);
+
+ DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
+ "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
+ PMU_REG(dhd->bus->sih, res_state, 0, 0)));
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE);
+ /* Add delay for WL domain to power up */
+ OSL_DELAY(15000);
+
+ DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
+ "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
+ PMU_REG(dhd->bus->sih, res_state, 0, 0)));
+ }
+
+ dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
+ dhdpcie_arm_resume_clk_req(dhd);
+ dhdpcie_bring_d11_outofreset(dhd);
+
+ DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
+ if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ dhd->sssr_dump_collected = TRUE;
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
+
+ return BCME_OK;
+}
+
+#define PCIE_CFG_DSTATE_MASK 0x11u
+
+static int
+dhdpcie_fis_trigger(dhd_pub_t *dhd)
+{
+ uint32 fis_ctrl_status;
+ uint32 cfg_status_cmd;
+ uint32 cfg_pmcsr;
+
+ if (!dhd->sssr_inited) {
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Bring back to D0 */
+ dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
+ /* Stop RPM timer so that even INB DW DEASSERT should not happen */
+ DHD_STOP_RPM_TIMER(dhd);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* Set fis_triggered flag to ignore link down callback from RC */
+ dhd->fis_triggered = TRUE;
+
+ /* Set FIS PwrswForceOnAll */
+ PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_FIS_FORCEON_ALL_MASK, PMU_FIS_FORCEON_ALL_MASK);
+
+ fis_ctrl_status = PMU_REG(dhd->bus->sih, fis_ctrl_status, 0, 0);
+
+ DHD_ERROR(("%s: fis_ctrl_status=0x%x\n", __FUNCTION__, fis_ctrl_status));
+
+ cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
+ cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
+ DHD_ERROR(("before save: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
+ PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
+
+ DHD_PCIE_CONFIG_SAVE(dhd->bus);
+
+ /* Trigger FIS */
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
+ OSL_DELAY(100 * 1000);
+
+#ifdef OEM_ANDROID
+ /*
+ * For android built-in platforms need to perform REG ON/OFF
+ * to restore pcie link.
+ * dhd_download_fw_on_driverload will be FALSE for built-in.
+ */
+ if (!dhd_download_fw_on_driverload) {
+ DHD_ERROR(("%s: Toggle REG_ON and restore config space\n", __FUNCTION__));
+ dhdpcie_bus_stop_host_dev(dhd->bus);
+ dhd_wifi_platform_set_power(dhd, FALSE);
+ dhd_wifi_platform_set_power(dhd, TRUE);
+ dhdpcie_bus_start_host_dev(dhd->bus);
+ /* Restore inited pcie cfg from pci_load_saved_state */
+ dhdpcie_bus_enable_device(dhd->bus);
+ }
+#endif /* OEM_ANDROID */
+
+ cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
+ cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
+ DHD_ERROR(("after regon-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
+ PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
+
+ /* To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore */
+ DHD_PCIE_CONFIG_RESTORE(dhd->bus);
+
+ cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
+ cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
+ DHD_ERROR(("after normal-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
+ PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
+
+ /*
+ * To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore
+ * in both MSM and LSI RCs
+ */
+ if ((cfg_pmcsr & PCIE_CFG_DSTATE_MASK) != 0) {
+ int ret = dhdpcie_set_master_and_d0_pwrstate(dhd->bus);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Setting D0 failed, ABORT FIS collection\n", __FUNCTION__));
+ return ret;
+ }
+ cfg_status_cmd =
+ dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
+ cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
+ DHD_ERROR(("after force-d0: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
+ PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
+ }
+
+ /* Clear fis_triggered as REG OFF/ON recovered link */
+ dhd->fis_triggered = FALSE;
+
+ return BCME_OK;
+}
+
+int
+dhd_bus_fis_trigger(dhd_pub_t *dhd)
+{
+ return dhdpcie_fis_trigger(dhd);
+}
+
+static int
+dhdpcie_reset_hwa(dhd_pub_t *dhd)
+{
+ int ret;
+ sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
+ sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3;
+
+ /* HWA wrapper registers */
+ uint32 ioctrl, resetctrl;
+ /* HWA base registers */
+ uint32 clkenable, clkgatingenable, clkext, clkctlstatus;
+ uint32 hwa_resetseq_val[SSSR_HWA_RESET_SEQ_STEPS];
+ int i = 0;
+
+ if (sssr_reg_info->version < SSSR_REG_INFO_VER_3) {
+ DHD_ERROR(("%s: not supported for version:%d\n",
+ __FUNCTION__, sssr_reg_info->version));
+ return BCME_UNSUPPORTED;
+ }
+
+ if (sssr_reg_info->hwa_regs.base_regs.clkenable == 0) {
+ DHD_ERROR(("%s: hwa regs are not set\n", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ DHD_ERROR(("%s: version:%d\n", __FUNCTION__, sssr_reg_info->version));
+
+ ioctrl = sssr_reg_info->hwa_regs.wrapper_regs.ioctrl;
+ resetctrl = sssr_reg_info->hwa_regs.wrapper_regs.resetctrl;
+
+ clkenable = sssr_reg_info->hwa_regs.base_regs.clkenable;
+ clkgatingenable = sssr_reg_info->hwa_regs.base_regs.clkgatingenable;
+ clkext = sssr_reg_info->hwa_regs.base_regs.clkext;
+ clkctlstatus = sssr_reg_info->hwa_regs.base_regs.clkctlstatus;
+
+ ret = memcpy_s(hwa_resetseq_val, sizeof(hwa_resetseq_val),
+ sssr_reg_info->hwa_regs.hwa_resetseq_val,
+ sizeof(sssr_reg_info->hwa_regs.hwa_resetseq_val));
+ if (ret) {
+ DHD_ERROR(("%s: hwa_resetseq_val memcpy_s failed: %d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE);
+ dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE);
+ dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE);
+ dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE);
+
+ dhd_sbreg_op(dhd, clkenable, &hwa_resetseq_val[i++], FALSE);
+ dhd_sbreg_op(dhd, clkgatingenable, &hwa_resetseq_val[i++], FALSE);
+ dhd_sbreg_op(dhd, clkext, &hwa_resetseq_val[i++], FALSE);
+ dhd_sbreg_op(dhd, clkctlstatus, &hwa_resetseq_val[i++], FALSE);
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_fis_dump(dhd_pub_t *dhd)
+{
+ int i;
+ uint8 num_d11cores;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ if (!dhd->sssr_inited) {
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* bring up all pmu resources */
+ PMU_REG(dhd->bus->sih, min_res_mask, ~0,
+ PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
+ OSL_DELAY(10 * 1000);
+
+ num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ for (i = 0; i < num_d11cores; i++) {
+ dhd->sssr_d11_outofreset[i] = TRUE;
+ }
+
+ dhdpcie_bring_d11_outofreset(dhd);
+ OSL_DELAY(6000);
+
+ /* clear FIS Done */
+ PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
+
+ if (dhdpcie_reset_hwa(dhd) != BCME_OK) {
+ DHD_ERROR(("%s: dhdpcie_reset_hwa failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhdpcie_d11_check_outofreset(dhd);
+
+ DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
+ if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ dhd->sssr_dump_collected = TRUE;
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
+
+ return BCME_OK;
+}
+
+int
+dhd_bus_fis_dump(dhd_pub_t *dhd)
+{
+ return dhdpcie_fis_dump(dhd);
+}
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_SDTC_ETB_DUMP
+int
+dhd_bus_get_etb_info(dhd_pub_t *dhd, uint32 etbinfo_addr, etb_info_t *etb_info)
+{
+
+ int ret = 0;
+
+ if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, etbinfo_addr,
+ (unsigned char *)etb_info, sizeof(*etb_info)))) {
+ DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_bus_get_sdtc_etb(dhd_pub_t *dhd, uint8 *sdtc_etb_mempool, uint addr, uint read_bytes)
+{
+ int ret = 0;
+
+ if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, addr,
+ (unsigned char *)sdtc_etb_mempool, read_bytes))) {
+ DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+#endif /* DHD_SDTC_ETB_DUMP */
+
+#ifdef BTLOG
+void
+BCMFASTPATH(dhd_bus_rx_bt_log)(struct dhd_bus *bus, void* pkt)
+{
+ dhd_rx_bt_log(bus->dhd, pkt);
+}
+#endif /* BTLOG */
+
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_bus_get_wakecount(dhd_pub_t *dhd)
+{
+ return &dhd->bus->wake_counts;
+}
+int
+dhd_bus_get_bus_wake(dhd_pub_t *dhd)
+{
+ return bcmpcie_set_get_wake(dhd->bus, 0);
+}
+#endif /* DHD_WAKE_STATUS */
+
+/* Writes random number(s) to the TCM. FW upon initialization reads this register
+ * to fetch the random number, and uses it to randomize heap address space layout.
+ */
+static int
+dhdpcie_wrt_rnd(struct dhd_bus *bus)
+{
+ bcm_rand_metadata_t rnd_data;
+ uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
+ uint32 count = BCM_ENTROPY_HOST_NBYTES;
+ int ret = 0;
+ uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
+ ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
+
+ memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
+ rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
+ rnd_data.count = htol32(count);
+ /* write the metadata about random number */
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
+ /* scale back by number of random number counts */
+ addr -= count;
+
+ bus->ramtop_addr = addr;
+
+#ifdef DHD_RND_DEBUG
+ bus->dhd->rnd_buf = NULL;
+ /* get random contents from file */
+ ret = dhd_get_rnd_info(bus->dhd);
+ if (bus->dhd->rnd_buf) {
+ /* write file contents to TCM */
+ DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
+ dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
+
+ /* Dump random content to out file */
+ dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
+
+ /* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
+ MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
+ bus->dhd->rnd_buf = NULL;
+ return BCME_OK;
+ }
+#endif /* DHD_RND_DEBUG */
+
+ /* Now write the random number(s) */
+ ret = dhd_get_random_bytes(rand_buf, count);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+ dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
+
+#ifdef DHD_RND_DEBUG
+ /* Dump random content to out file */
+ dhd_dump_rnd_info(bus->dhd, rand_buf, count);
+#endif /* DHD_RND_DEBUG */
+
+ bus->next_tlv = addr;
+
+ return BCME_OK;
+}
+
+#ifdef D2H_MINIDUMP
+bool
+dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->d2h_minidump;
+}
+#endif /* D2H_MINIDUMP */
+
+void
+dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
+{
+ struct dhd_bus *bus = dhd->bus;
+ uint64 current_time;
+
+ DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
+ DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
+ DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
+ bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
+ bus->oob_intr_count, bus->oob_intr_enable_count,
+ bus->oob_intr_disable_count));
+ DHD_ERROR(("oob_irq_num=%d last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT"\n",
+ dhdpcie_get_oob_irq_num(bus),
+ GET_SEC_USEC(bus->last_oob_irq_isr_time),
+ GET_SEC_USEC(bus->last_oob_irq_thr_time)));
+ DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
+ " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_oob_irq_enable_time),
+ GET_SEC_USEC(bus->last_oob_irq_disable_time)));
+ DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
+ dhdpcie_get_oob_irq_status(bus),
+ dhdpcie_get_oob_irq_level()));
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count));
+
+ current_time = OSL_LOCALTIME_NS();
+ DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(current_time)));
+ DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time)));
+ DHD_ERROR(("isr_sched_dpc_time="SEC_USEC_FMT
+ " rpm_sched_dpc_time="SEC_USEC_FMT
+ " last_non_ours_irq_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->isr_sched_dpc_time),
+ GET_SEC_USEC(bus->rpm_sched_dpc_time),
+ GET_SEC_USEC(bus->last_non_ours_irq_time)));
+ DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
+ " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
+ DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
+ " last_process_txcpl_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_process_flowring_time),
+ GET_SEC_USEC(bus->last_process_txcpl_time)));
+ DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
+ " last_process_infocpl_time="SEC_USEC_FMT
+ " last_process_edl_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_process_rxcpl_time),
+ GET_SEC_USEC(bus->last_process_infocpl_time),
+ GET_SEC_USEC(bus->last_process_edl_time)));
+ DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
+ " resched_dpc_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->dpc_exit_time),
+ GET_SEC_USEC(bus->resched_dpc_time)));
+ DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_d3_inform_time)));
+
+ DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
+ " last_suspend_end_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_suspend_start_time),
+ GET_SEC_USEC(bus->last_suspend_end_time)));
+ DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
+ " last_resume_end_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_resume_start_time),
+ GET_SEC_USEC(bus->last_resume_end_time)));
+
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
+ DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
+}
+
+void
+dhd_bus_intr_count_dump(dhd_pub_t *dhd)
+{
+ dhd_pcie_intr_count_dump(dhd);
+}
+
+int
+dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
+{
+ uint32 save_idx, val;
+ si_t *sih = dhd->bus->sih;
+ uint32 oob_base, oob_base1;
+ uint32 wrapper_dump_list[] = {
+ AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
+ AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
+ AI_RESETSTATUS, AI_RESETCTRL,
+ AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
+ AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
+ };
+ uint32 i;
+ hndoobr_reg_t *reg;
+ cr4regs_t *cr4regs;
+ ca7regs_t *ca7regs;
+
+ save_idx = si_coreidx(sih);
+
+ DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
+
+ if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
+ for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) {
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
+ }
+ }
+
+ if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
+ DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
+ for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) {
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
+ }
+ DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
+ (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
+ }
+ /* XXX: Currently dumping CA7 registers causing CTO, temporarily disabling it */
+ BCM_REFERENCE(ca7regs);
+#ifdef NOT_YET
+ if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
+ DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
+ (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
+ }
+#endif /* NOT_YET */
+
+ DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
+
+ oob_base = si_oobr_baseaddr(sih, FALSE);
+ oob_base1 = si_oobr_baseaddr(sih, TRUE);
+ if (oob_base) {
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
+ } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
+ val = R_REG(dhd->osh, &reg->intstatus[0]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, &reg->intstatus[1]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, &reg->intstatus[2]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, &reg->intstatus[3]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ }
+
+ if (oob_base1) {
+ DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
+
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
+ }
+
+ si_setcoreidx(dhd->bus->sih, save_idx);
+
+ return 0;
+}
+
+static void
+dhdpcie_hw_war_regdump(dhd_bus_t *bus)
+{
+ uint32 save_idx, val;
+ volatile uint32 *reg;
+
+ save_idx = si_coreidx(bus->sih);
+ if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
+ val = R_REG(bus->osh, reg + REG_WORK_AROUND);
+ DHD_ERROR(("CC HW_WAR :0x%x\n", val));
+ }
+
+ if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
+ val = R_REG(bus->osh, reg + REG_WORK_AROUND);
+ DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
+ }
+
+ if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
+ val = R_REG(bus->osh, reg + REG_WORK_AROUND);
+ DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
+ }
+ si_setcoreidx(bus->sih, save_idx);
+
+ val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
+ DHD_ERROR(("MINRESMASK :0x%x\n", val));
+}
+
+int
+dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
+{
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
+ "due to PCIe link down ------- \r\n"));
+ return 0;
+ }
+
+ DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
+
+ //HostToDev
+ DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
+ DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
+ DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
+
+ DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
+ DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
+ DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
+
+ //DevToHost
+ DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
+ DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
+ DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
+
+ DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
+ DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
+ DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
+
+ return 0;
+}
+
+bool
+dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
+{
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ uint32 d2h_db0 = 0;
+ uint32 d2h_mb_data = 0;
+
+ DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("intstatus=0x%x \n", intstatus));
+ return FALSE;
+ }
+
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_mask, 0, 0);
+ if (intmask == (uint32) -1) {
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
+ return FALSE;
+ }
+
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCID2H_MailBox, 0, 0);
+ if (d2h_db0 == (uint32)-1) {
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0));
+ return FALSE;
+ }
+
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0));
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
+ dhd->bus->def_intmask));
+
+ return TRUE;
+}
+
+void
+dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
+{
+ DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
+ DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
+ DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
+}
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+#define MAX_RC_REG_INFO_VAL 8
+#define PCIE_EXTCAP_ERR_HD_SZ 4
+void
+dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t *dhd, int *bytes_written)
+{
+ int i;
+ int remain_len;
+
+ /* dump link control & status */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP,
+ PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* dump device control & status */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP,
+ PCIE_CAP_DEVCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* dump uncorrectable error */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0), HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* dump correctable error */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ /* XXX: use definition in linux/pcie_regs.h */
+ PCI_ERR_COR_STATUS, TRUE, FALSE, 0), HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* HG05/06 reserved */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
+ 0, HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
+ 0, HANG_KEY_DEL);
+ dhd->hang_info_cnt++;
+ }
+
+ /* dump error header log in RAW */
+ for (i = 0; i < PCIE_EXTCAP_ERR_HD_SZ; i++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len,
+ "%c%08x", HANG_RAW_DEL, dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_ERR_HEADER_LOG_0 + i * PCIE_EXTCAP_ERR_HD_SZ,
+ TRUE, FALSE, 0));
+ }
+ dhd->hang_info_cnt++;
+}
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+
+int
+dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
+{
+ int host_irq_disabled;
+
+ DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
+ host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
+ DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
+ dhd_print_tasklet_status(dhd);
+ dhd_pcie_intr_count_dump(dhd);
+
+#if defined(LINUX) || defined(linux)
+ DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
+ dhdpcie_dump_resource(dhd->bus);
+#endif /* LINUX || linux */
+
+ dhd_pcie_dump_rc_conf_space_cap(dhd);
+
+ DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
+ dhd_debug_get_rc_linkcap(dhd->bus)));
+#ifdef CUSTOMER_HW4_DEBUG
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
+ "link may be DOWN\n"));
+ return 0;
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+ DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
+ /* XXX: hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
+ dhd_bus_dump_imp_cfg_registers(dhd->bus);
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
+ DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
+ dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+ DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
+ "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
+ dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG1, sizeof(uint32)),
+ PCI_TLP_HDR_LOG2,
+ dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG2, sizeof(uint32)),
+ PCI_TLP_HDR_LOG3,
+ dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG3, sizeof(uint32)),
+ PCI_TLP_HDR_LOG4,
+ dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG4, sizeof(uint32))));
+ if (dhd->bus->sih->buscorerev >= 24) {
+ DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
+ "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
+ dhd_pcie_config_read(dhd->bus, PCIECFGREG_DEV_STATUS_CTRL,
+ sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
+ dhd_pcie_config_read(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL,
+ sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
+ dhd_pcie_config_read(dhd->bus, PCIECFGREG_PML1_SUB_CTRL2,
+ sizeof(uint32))));
+ dhd_bus_dump_dar_registers(dhd->bus);
+ }
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
+ return 0;
+ }
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ dhd_bus_pcie_pwr_req(dhd->bus);
+ }
+
+ DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
+ /* XXX: hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
+ * CurrentPcieGen2ProgramGuide/pcie_ep.htm
+ */
+
+ DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
+ "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
+ PCIECFGREG_PHY_DBG_CLKREQ1,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
+ PCIECFGREG_PHY_DBG_CLKREQ2,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
+ PCIECFGREG_PHY_DBG_CLKREQ3,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
+
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
+ if (dhd->bus->sih->buscorerev >= 24) {
+
+ DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
+ "ltssm_hist_2(0x%x)=0x%x "
+ "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
+ PCIECFGREG_PHY_LTSSM_HIST_1,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
+ PCIECFGREG_PHY_LTSSM_HIST_2,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
+ PCIECFGREG_PHY_LTSSM_HIST_3,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
+
+ DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
+ PCIECFGREG_TREFUP,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
+ PCIECFGREG_TREFUP_EXT,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
+ DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
+ "Function_Intstatus(0x%x)=0x%x "
+ "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
+ "Power_Intmask(0x%x)=0x%x\n",
+ PCIE_CORE_REG_ERRLOG,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIE_CORE_REG_ERRLOG, 0, 0),
+ PCIE_CORE_REG_ERR_ADDR,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIE_CORE_REG_ERR_ADDR, 0, 0),
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
+ PCIPowerIntmask(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
+ DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
+ "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
+ DHD_ERROR(("err_code(0x%x)=0x%x\n",
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
+
+ dhd_pcie_dump_wrapper_regs(dhd);
+ dhdpcie_hw_war_regdump(dhd->bus);
+ }
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
+
+ dhd_pcie_dma_info_dump(dhd);
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(dhd->bus);
+ }
+
+ return 0;
+}
+
+bool
+dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
+{
+ return bus->force_bt_quiesce;
+}
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+uint32 dhd_bus_get_bp_base(dhd_pub_t *dhdp)
+{
+ return (dhdp->bus->bp_base);
+}
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+#ifdef DHD_HP2P
+uint16
+dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
+{
+ if (tx)
+ return bus->hp2p_txcpl_max_items;
+ else
+ return bus->hp2p_rxcpl_max_items;
+}
+
+static uint16
+dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
+{
+ if (tx)
+ bus->hp2p_txcpl_max_items = val;
+ else
+ bus->hp2p_rxcpl_max_items = val;
+ return val;
+}
+#endif /* DHD_HP2P */
+
+uint8
+dhd_d11_slices_num_get(dhd_pub_t *dhdp)
+{
+ return si_scan_core_present(dhdp->bus->sih) ?
+ MAX_NUM_D11_CORES_WITH_SCAN : MAX_NUM_D11CORES;
+}
+
+#if defined(linux) || defined(LINUX)
+static bool
+dhd_bus_tcm_test(struct dhd_bus *bus)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ int num = 0;
+ uint8 *read_buf, *write_buf;
+ uint8 init_val[NUM_PATTERNS] = {
+ 0xFFu, /* 11111111 */
+ 0x00u, /* 00000000 */
+#if !defined(DHD_FW_MEM_CORRUPTION)
+ 0x77u, /* 01110111 */
+ 0x22u, /* 00100010 */
+ 0x27u, /* 00100111 */
+ 0x72u, /* 01110010 */
+#endif /* !DHD_FW_MEM_CORRUPTION */
+ };
+
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
+
+ if (!read_buf) {
+ DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
+
+ if (!write_buf) {
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
+ DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
+
+ while (num < NUM_PATTERNS) {
+ start = bus->dongle_ram_base;
+ /* Get full mem size */
+ size = bus->ramsize;
+
+ memset(write_buf, init_val[num], MEMBLOCK);
+ while (size > 0) {
+ read_size = MIN(MEMBLOCK, size);
+ memset(read_buf, 0, read_size);
+
+ /* Write */
+ if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
+ DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Read */
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
+ DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Compare */
+ if (memcmp(read_buf, write_buf, read_size)) {
+ DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
+ __FUNCTION__, start, num));
+ prhex("Readbuf", read_buf, read_size);
+ prhex("Writebuf", write_buf, read_size);
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ }
+ num++;
+ }
+
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+
+ DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
+ return TRUE;
+}
+#endif /* LINUX || linux */
+
+#define PCI_CFG_LINK_SPEED_SHIFT 16
+int
+dhd_get_pcie_linkspeed(dhd_pub_t *dhd)
+{
+ uint32 pcie_lnkst;
+ uint32 pcie_lnkspeed;
+ pcie_lnkst = OSL_PCI_READ_CONFIG(dhd->osh, PCIECFGREG_LINK_STATUS_CTRL,
+ sizeof(pcie_lnkst));
+
+ pcie_lnkspeed = (pcie_lnkst >> PCI_CFG_LINK_SPEED_SHIFT) & PCI_LINK_SPEED_MASK;
+ DHD_INFO(("%s: Link speed: %d\n", __FUNCTION__, pcie_lnkspeed));
+ return pcie_lnkspeed;
+}
+
+int
+dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size)
+{
+ return dhdpcie_checkdied(bus, data, size);
+}
+
+/* Common backplane can be hung by butting APB2 bridge in reset */
+void
+dhdpcie_induce_cbp_hang(dhd_pub_t *dhd)
+{
+ uint32 addr, val;
+ uint32 apb2_wrapper_reg = 0x18106000;
+ uint32 apb2_reset_ctrl_offset = 0x800;
+ addr = apb2_wrapper_reg + apb2_reset_ctrl_offset;
+ val = 1;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+}
diff --git a/bcmdhd.101.10.361.x/dhd_pcie.h b/bcmdhd.101.10.361.x/dhd_pcie.h
new file mode 100755
index 0000000..e18bc2b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pcie.h
@@ -0,0 +1,1048 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef dhd_pcie_h
+#define dhd_pcie_h
+
+#include <bcmpcie.h>
+#include <hnd_cons.h>
+#include <dhd_linux.h>
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#ifdef CONFIG_PCI_MSM
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif /* CONFIG_PCI_MSM */
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_EXYNOS
+#ifndef SUPPORT_EXYNOS7420
+#include <linux/exynos-pci-noti.h>
+extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
+extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
+#endif /* !SUPPORT_EXYNOS7420 */
+#endif /* CONFIG_ARCH_EXYNOS */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+/* defines */
+#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7
+
+#define PCMSGBUF_HDRLEN 0
+#define DONGLE_REG_MAP_SIZE (32 * 1024)
+#define DONGLE_TCM_MAP_SIZE (4096 * 1024)
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+#ifdef DHD_DEBUG
+#define DHD_PCIE_SUCCESS 0
+#define DHD_PCIE_FAILURE 1
+#endif /* DHD_DEBUG */
+#define REMAP_ENAB(bus) ((bus)->remap)
+#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#define struct_pcie_notify struct msm_pcie_notify
+#define struct_pcie_register_event struct msm_pcie_register_event
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_EXYNOS
+#ifndef SUPPORT_EXYNOS7420
+#define struct_pcie_notify struct exynos_pcie_notify
+#define struct_pcie_register_event struct exynos_pcie_register_event
+#endif /* !SUPPORT_EXYNOS7420 */
+#endif /* CONFIG_ARCH_EXYNOS */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#define MAX_DHD_TX_FLOWS 320
+
+/* user defined data structures */
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX 192u
+#define CONSOLE_BUFFER_MAX (8 * 1024)
+
+#ifdef IDLE_TX_FLOW_MGMT
+#define IDLE_FLOW_LIST_TIMEOUT 5000
+#define IDLE_FLOW_RING_TIMEOUT 5000
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+#define DEVICE_TX_STUCK_CKECK_TIMEOUT 1000 /* 1 sec */
+#define DEVICE_TX_STUCK_TIMEOUT 10000 /* 10 secs */
+#define DEVICE_TX_STUCK_WARN_DURATION (DEVICE_TX_STUCK_TIMEOUT / DEVICE_TX_STUCK_CKECK_TIMEOUT)
+#define DEVICE_TX_STUCK_DURATION (DEVICE_TX_STUCK_WARN_DURATION * 2)
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
+#define IDMA_ENAB(dhd) ((dhd) && (dhd)->idma_enable)
+#define IDMA_ACTIVE(dhd) ((dhd) && ((dhd)->idma_enable) && ((dhd)->idma_inited))
+
+#define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23))
+
+/* IFRM (Implicit Flow Ring Manager enable and inited */
+#define IFRM_ENAB(dhd) ((dhd) && (dhd)->ifrm_enable)
+#define IFRM_ACTIVE(dhd) ((dhd) && ((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
+
+/* DAR registers use for h2d doorbell */
+#define DAR_ENAB(dhd) ((dhd) && (dhd)->dar_enable)
+#define DAR_ACTIVE(dhd) ((dhd) && ((dhd)->dar_enable) && ((dhd)->dar_inited))
+
+/* DAR WAR for revs < 64 */
+#define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd))
+
+/* PCIE CTO Prevention and Recovery */
+#define PCIECTO_ENAB(bus) ((bus)->cto_enable)
+
+/* Implicit DMA index usage :
+ * Index 0 for h2d write index transfer
+ * Index 1 for d2h read index transfer
+ */
+#define IDMA_IDX0 0
+#define IDMA_IDX1 1
+#define IDMA_IDX2 2
+#define IDMA_IDX3 3
+#define DMA_TYPE_SHIFT 4
+#define DMA_TYPE_IDMA 1
+
+#define DHDPCIE_CONFIG_HDR_SIZE 16
+#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
+#define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20
+#define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */
+#define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */
+#define DHDPCIE_PM_D2_DELAY 200 /* 200us */
+
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hnd_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+
+typedef struct ring_sh_info {
+ uint32 ring_mem_addr;
+ uint32 ring_state_w;
+ uint32 ring_state_r;
+ pcie_hwa_db_index_t ring_hwa_db_idx; /* HWA DB index value per ring */
+} ring_sh_info_t;
+#define MAX_DS_TRACE_SIZE 50
+#ifdef DHD_MMIO_TRACE
+#define MAX_MMIO_TRACE_SIZE 256
+/* Minimum of 250us should be elapsed to add new entry */
+#define MIN_MMIO_TRACE_TIME 250
+#define DHD_RING_IDX 0x00FF0000
+typedef struct _dhd_mmio_trace_t {
+ uint64 timestamp;
+ uint32 addr;
+ uint32 value;
+ bool set;
+} dhd_mmio_trace_t;
+#endif /* defined(DHD_MMIO_TRACE) */
+typedef struct _dhd_ds_trace_t {
+ uint64 timestamp;
+ bool d2h;
+ uint32 dsval;
+#ifdef PCIE_INB_DW
+ enum dhd_bus_ds_state inbstate;
+#endif /* PCIE_INB_DW */
+} dhd_ds_trace_t;
+
+#define DEVICE_WAKE_NONE 0
+#define DEVICE_WAKE_OOB 1
+#define DEVICE_WAKE_INB 2
+
+#define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB)
+#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB)
+#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE)
+
+#define PCIE_PWR_REQ_RELOAD_WAR_ENAB(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \
+ (buscorerev == 70) || (buscorerev == 72))
+
+#define PCIE_FASTLPO_ENABLED(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \
+ (buscorerev == 70) || (buscorerev == 72))
+
+/*
+ * HW JIRA - CRWLPCIEGEN2-672
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
+ * fixed in REV68
+ */
+#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67))
+
+struct dhd_bus;
+
+struct dhd_pcie_rev {
+ uint8 fw_rev;
+ void (*handle_mb_data)(struct dhd_bus *);
+};
+
+typedef struct dhdpcie_config_save
+{
+ uint32 header[DHDPCIE_CONFIG_HDR_SIZE];
+ /* pmcsr save */
+ uint32 pmcsr;
+ /* express save */
+ uint32 exp_dev_ctrl_stat;
+ uint32 exp_link_ctrl_stat;
+ uint32 exp_dev_ctrl_stat2;
+ uint32 exp_link_ctrl_stat2;
+ /* msi save */
+ uint32 msi_cap;
+ uint32 msi_addr0;
+ uint32 msi_addr1;
+ uint32 msi_data;
+ /* l1pm save */
+ uint32 l1pm0;
+ uint32 l1pm1;
+ /* ltr save */
+ uint32 ltr;
+ /* aer save */
+ uint32 aer_caps_ctrl; /* 0x18 */
+ uint32 aer_severity; /* 0x0C */
+ uint32 aer_umask; /* 0x08 */
+ uint32 aer_cmask; /* 0x14 */
+ uint32 aer_root_cmd; /* 0x2c */
+ /* BAR0 and BAR1 windows */
+ uint32 bar0_win;
+ uint32 bar1_win;
+} dhdpcie_config_save_t;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_low_power_state {
+ DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */
+ DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */
+ DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */
+};
+
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+#define FRS_TRACE_SIZE 32 /* frs - flow_ring_status */
+typedef struct _dhd_flow_ring_status_trace_t {
+ uint64 timestamp;
+ uint16 h2d_ctrl_post_drd;
+ uint16 h2d_ctrl_post_dwr;
+ uint16 d2h_ctrl_cpln_drd;
+ uint16 d2h_ctrl_cpln_dwr;
+ uint16 h2d_rx_post_drd;
+ uint16 h2d_rx_post_dwr;
+ uint16 d2h_rx_cpln_drd;
+ uint16 d2h_rx_cpln_dwr;
+ uint16 d2h_tx_cpln_drd;
+ uint16 d2h_tx_cpln_dwr;
+ uint16 h2d_info_post_drd;
+ uint16 h2d_info_post_dwr;
+ uint16 d2h_info_cpln_drd;
+ uint16 d2h_info_cpln_dwr;
+ uint16 d2h_ring_edl_drd;
+ uint16 d2h_ring_edl_dwr;
+} dhd_frs_trace_t;
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+
+/** Instantiated once for each hardware (dongle) instance that this DHD manages */
+typedef struct dhd_bus {
+ dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */
+#if !defined(NDIS)
+ struct pci_dev *rc_dev; /* pci RC device handle */
+ struct pci_dev *dev; /* pci device handle */
+#endif /* !defined(NDIS) */
+#ifdef DHD_EFI
+ void *pcie_dev;
+#endif
+ dll_t flowring_active_list; /* constructed list of tx flowring queues */
+#ifdef IDLE_TX_FLOW_MGMT
+ uint64 active_list_last_process_ts;
+ /* stores the timestamp of active list processing */
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+ /* Flag to enable/disable device tx stuck monitor by DHD IOVAR dev_tx_stuck_monitor */
+ uint32 dev_tx_stuck_monitor;
+ /* Stores the timestamp (msec) of the last device Tx stuck check */
+ uint32 device_tx_stuck_check;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+ sbpcieregs_t *reg; /* Registers for PCIE core */
+
+ uint armrev; /* CPU core revision */
+ uint coreid; /* CPU core id */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 srmemsize; /* Size of SRMEM */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 bus_num; /* bus number */
+ uint32 slot_num; /* slot ID */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+#ifdef CACHE_FW_IMAGES
+ int processed_nvram_params_len; /* Modified len of NVRAM info */
+#endif
+
+#ifdef BCM_ROUTER_DHD
+ char *nvram_params; /* user specified nvram params. */
+ int nvram_params_len;
+#endif /* BCM_ROUTER_DHD */
+
+ struct pktq txq; /* Queue length used for flow-control */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+
+ bool alp_only; /* Don't use HT clock (ALP only) */
+
+ bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
+ * Available with socram rev 16
+ * Remap region not DMA-able
+ */
+ uint32 resetinstr;
+ uint32 dongle_ram_base;
+ uint32 next_tlv; /* Holds location of next available TLV */
+ ulong shared_addr;
+ pciedev_shared_t *pcie_sh;
+ uint32 dma_rxoffset;
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ uint32 bar1_size; /* pci device memory size */
+ uint32 curr_bar1_win; /* current PCIEBar1Window setting */
+ osl_t *osh;
+ uint32 nvram_csm; /* Nvram checksum */
+#ifdef BCMINTERNAL
+ bool msi_sim;
+ uchar *msi_sim_addr;
+ dmaaddr_t msi_sim_phys;
+ dhd_dma_buf_t hostfw_buf; /* Host offload firmware buffer */
+ uint32 hostfw_base; /* FW assumed base of host offload mem */
+ uint32 bp_base; /* adjusted bp base of host offload mem */
+#endif /* BCMINTERNAL */
+ uint16 pollrate;
+ uint16 polltick;
+
+ volatile uint32 *pcie_mb_intr_addr;
+ volatile uint32 *pcie_mb_intr_2_addr;
+ void *pcie_mb_intr_osh;
+ bool sleep_allowed;
+
+ wake_counts_t wake_counts;
+
+ /* version 3 shared struct related info start */
+ ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
+
+ uint8 h2d_ring_count;
+ uint8 d2h_ring_count;
+ uint32 ringmem_ptr;
+ uint32 ring_state_ptr;
+
+ uint32 d2h_dma_scratch_buffer_mem_addr;
+
+ uint32 h2d_mb_data_ptr_addr;
+ uint32 d2h_mb_data_ptr_addr;
+ /* version 3 shared struct related info end */
+
+ uint32 def_intmask;
+ uint32 d2h_mb_mask;
+ uint32 pcie_mailbox_mask;
+ uint32 pcie_mailbox_int;
+ bool ltrsleep_on_unload;
+ uint wait_for_d3_ack;
+ uint16 max_tx_flowrings;
+ uint16 max_submission_rings;
+ uint16 max_completion_rings;
+ uint16 max_cmn_rings;
+ uint32 rw_index_sz;
+ uint32 hwa_db_index_sz;
+ bool db1_for_mb;
+
+ dhd_timeout_t doorbell_timer;
+ bool device_wake_state;
+#ifdef PCIE_OOB
+ bool oob_enabled;
+#endif /* PCIE_OOB */
+ bool irq_registered;
+ bool d2h_intr_method;
+ bool d2h_intr_control;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || (defined(CONFIG_ARCH_EXYNOS) && \
+ !defined(SUPPORT_EXYNOS7420))
+#ifdef CONFIG_ARCH_MSM
+ uint8 no_cfg_restore;
+#endif /* CONFIG_ARCH_MSM */
+ struct_pcie_register_event pcie_event;
+#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS && !SUPPORT_EXYNOS7420 */
+ bool read_shm_fail;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ int32 idletime; /* Control for activity timeout */
+ bool rpm_enabled;
+#ifdef DHD_PCIE_RUNTIMEPM
+ int32 idlecount; /* Activity timeout counter */
+ int32 bus_wake; /* For wake up the bus */
+ bool runtime_resume_done; /* For check runtime suspend end */
+ struct mutex pm_lock; /* Synchronize for system PM & runtime PM */
+ wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */
+#endif /* DHD_PCIE_RUNTIMEPM */
+ uint32 d3_inform_cnt;
+ uint32 d0_inform_cnt;
+ uint32 d0_inform_in_use_cnt;
+ uint8 force_suspend;
+ uint8 is_linkdown;
+ uint8 no_bus_init;
+#ifdef IDLE_TX_FLOW_MGMT
+ bool enable_idle_flowring_mgmt;
+#endif /* IDLE_TX_FLOW_MGMT */
+ struct dhd_pcie_rev api;
+ bool use_mailbox;
+ bool use_d0_inform;
+ void *bus_lp_state_lock;
+ void *pwr_req_lock;
+ bool dongle_in_deepsleep;
+ void *dongle_ds_lock;
+ bool bar1_switch_enab;
+ void *bar1_switch_lock;
+ void *backplane_access_lock;
+ enum dhd_bus_low_power_state bus_low_power_state;
+#ifdef DHD_FLOW_RING_STATUS_TRACE
+ dhd_frs_trace_t frs_isr_trace[FRS_TRACE_SIZE]; /* frs - flow_ring_status */
+ dhd_frs_trace_t frs_dpc_trace[FRS_TRACE_SIZE]; /* frs - flow_ring_status */
+ uint32 frs_isr_count;
+ uint32 frs_dpc_count;
+#endif /* DHD_FLOW_RING_STATUS_TRACE */
+#ifdef DHD_MMIO_TRACE
+ dhd_mmio_trace_t mmio_trace[MAX_MMIO_TRACE_SIZE];
+ uint32 mmio_trace_count;
+#endif /* defined(DHD_MMIO_TRACE) */
+ dhd_ds_trace_t ds_trace[MAX_DS_TRACE_SIZE];
+ uint32 ds_trace_count;
+ uint32 hostready_count; /* Number of hostready issued */
+#if defined(PCIE_OOB) || defined (BCMPCIE_OOB_HOST_WAKE)
+ bool oob_presuspend;
+#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
+ dhdpcie_config_save_t saved_config;
+ ulong resume_intr_enable_count;
+ ulong dpc_intr_enable_count;
+ ulong isr_intr_disable_count;
+ ulong suspend_intr_disable_count;
+ ulong dpc_return_busdown_count;
+ ulong non_ours_irq_count;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ ulong oob_intr_count;
+ ulong oob_intr_enable_count;
+ ulong oob_intr_disable_count;
+ uint64 last_oob_irq_isr_time;
+ uint64 last_oob_irq_thr_time;
+ uint64 last_oob_irq_enable_time;
+ uint64 last_oob_irq_disable_time;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+ uint64 isr_entry_time;
+ uint64 isr_exit_time;
+ uint64 isr_sched_dpc_time;
+ uint64 rpm_sched_dpc_time;
+ uint64 dpc_entry_time;
+ uint64 dpc_exit_time;
+ uint64 resched_dpc_time;
+ uint64 last_d3_inform_time;
+ uint64 last_process_ctrlbuf_time;
+ uint64 last_process_flowring_time;
+ uint64 last_process_txcpl_time;
+ uint64 last_process_rxcpl_time;
+ uint64 last_process_infocpl_time;
+ uint64 last_process_edl_time;
+ uint64 last_suspend_start_time;
+ uint64 last_suspend_end_time;
+ uint64 last_resume_start_time;
+ uint64 last_resume_end_time;
+ uint64 last_non_ours_irq_time;
+ bool hwa_enabled;
+ bool idma_enabled;
+ bool ifrm_enabled;
+ bool dar_enabled;
+ uint32 dmaxfer_complete;
+ uint8 dw_option;
+#ifdef PCIE_INB_DW
+ bool inb_enabled;
+ uint32 ds_exit_timeout;
+ uint32 host_sleep_exit_timeout;
+ uint wait_for_ds_exit;
+ uint32 inband_dw_assert_cnt; /* # of inband device_wake assert */
+ uint32 inband_dw_deassert_cnt; /* # of inband device_wake deassert */
+ uint32 inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */
+ uint32 inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */
+ uint32 inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */
+ uint32 inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */
+ void *inb_lock; /* Lock to serialize in band device wake activity */
+ /* # of contexts in the host which currently want a FW transaction */
+ uint32 host_active_cnt;
+ bool skip_ds_ack; /* Skip DS-ACK during suspend in progress */
+#endif /* PCIE_INB_DW */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ bool ds_enabled;
+#endif
+#ifdef DHD_PCIE_RUNTIMEPM
+ bool chk_pm; /* To avoid counting of wake up from Runtime PM */
+#endif /* DHD_PCIE_RUNTIMEPM */
+#if defined(PCIE_INB_DW)
+ bool calc_ds_exit_latency;
+ bool deep_sleep; /* Indicates deep_sleep set or unset by the DHD IOVAR deep_sleep */
+ uint64 ds_exit_latency;
+ uint64 ds_exit_ts1;
+ uint64 ds_exit_ts2;
+#endif /* PCIE_INB_DW */
+ bool _dar_war;
+#ifdef GDB_PROXY
+ /* True if firmware loaded and backplane accessible */
+ bool gdb_proxy_access_enabled;
+ /* ID set by last "gdb_proxy_probe" iovar */
+ uint32 gdb_proxy_last_id;
+ /* True if firmware was started in bootloader mode */
+ bool gdb_proxy_bootloader_mode;
+#endif /* GDB_PROXY */
+ uint8 dma_chan;
+
+ bool cto_enable; /* enable PCIE CTO Prevention and recovery */
+ uint32 cto_threshold; /* PCIE CTO timeout threshold */
+ bool cto_triggered; /* CTO is triggered */
+ bool intr_enabled; /* ready to receive interrupts from dongle */
+ int pwr_req_ref;
+ bool flr_force_fail; /* user intends to simulate flr force fail */
+
+ /* Information used to compose the memory map and to write the memory map,
+ * FW, and FW signature to dongle RAM.
+ * This information is used by the bootloader.
+ */
+ uint32 ramtop_addr; /* Dongle address of unused space at top of RAM */
+ uint32 fw_download_addr; /* Dongle address of FW download */
+ uint32 fw_download_len; /* Length in bytes of FW download */
+ uint32 fwsig_download_addr; /* Dongle address of FW signature download */
+ uint32 fwsig_download_len; /* Length in bytes of FW signature download */
+ uint32 fwstat_download_addr; /* Dongle address of FWS status download */
+ uint32 fwstat_download_len; /* Length in bytes of FWS status download */
+ uint32 fw_memmap_download_addr; /* Dongle address of FWS memory-info download */
+ uint32 fw_memmap_download_len; /* Length in bytes of FWS memory-info download */
+
+ char fwsig_filename[DHD_FILENAME_MAX]; /* Name of FW signature file */
+ char bootloader_filename[DHD_FILENAME_MAX]; /* Name of bootloader image file */
+ uint32 bootloader_addr; /* Dongle address of bootloader download */
+ bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
+ bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
+ bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+ ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
+#endif /* DHD_H2D_LOG_TIME_SYNC */
+#ifdef D2H_MINIDUMP
+ bool d2h_minidump; /* This flag will be set if Host and FW handshake to collect minidump */
+ bool d2h_minidump_override; /* Force disable minidump through dhd IOVAR */
+#endif /* D2H_MINIDUMP */
+#ifdef BCMSLTGT
+ int xtalfreq; /* Xtal frequency used for htclkratio calculation */
+ uint32 ilp_tick; /* ILP ticks per second read from pmutimer */
+ uint32 xtal_ratio; /* xtal ticks per 4 ILP ticks read from pmu_xtalfreq */
+#endif /* BCMSLTGT */
+#ifdef BT_OVER_PCIE
+ /* whether the chip is in BT over PCIE mode or not */
+ bool btop_mode;
+#endif /* BT_OVER_PCIE */
+ uint16 hp2p_txcpl_max_items;
+ uint16 hp2p_rxcpl_max_items;
+ /* PCIE coherent status */
+ uint32 coherent_state;
+ uint32 inb_dw_deassert_cnt;
+ uint64 arm_oor_time;
+ uint64 rd_shared_pass_time;
+ uint32 hwa_mem_base;
+ uint32 hwa_mem_size;
+} dhd_bus_t;
+
+#ifdef DHD_MSI_SUPPORT
+extern uint enable_msi;
+#endif /* DHD_MSI_SUPPORT */
+
+enum {
+ PCIE_INTX = 0,
+ PCIE_MSI = 1
+};
+
+enum {
+ PCIE_D2H_INTMASK_CTRL = 0,
+ PCIE_HOST_IRQ_CTRL = 1
+};
+
+static INLINE bool
+__dhd_check_bus_in_lps(dhd_bus_t *bus)
+{
+ bool ret = (bus->bus_low_power_state == DHD_BUS_D3_INFORM_SENT) ||
+ (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED);
+ return ret;
+}
+
+static INLINE bool
+dhd_check_bus_in_lps(dhd_bus_t *bus)
+{
+ unsigned long flags_bus;
+ bool ret;
+ DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
+ ret = __dhd_check_bus_in_lps(bus);
+ DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
+ return ret;
+}
+
+static INLINE bool
+__dhd_check_bus_lps_d3_acked(dhd_bus_t *bus)
+{
+ bool ret = (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED);
+ return ret;
+}
+
+static INLINE bool
+dhd_check_bus_lps_d3_acked(dhd_bus_t *bus)
+{
+ unsigned long flags_bus;
+ bool ret;
+ DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
+ ret = __dhd_check_bus_lps_d3_acked(bus);
+ DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
+ return ret;
+}
+
+static INLINE void
+__dhd_set_bus_not_in_lps(dhd_bus_t *bus)
+{
+ bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
+ return;
+}
+
+static INLINE void
+dhd_set_bus_not_in_lps(dhd_bus_t *bus)
+{
+ unsigned long flags_bus;
+ DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
+ __dhd_set_bus_not_in_lps(bus);
+ DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
+ return;
+}
+
+static INLINE void
+__dhd_set_bus_lps_d3_informed(dhd_bus_t *bus)
+{
+ bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
+ return;
+}
+
+static INLINE void
+dhd_set_bus_lps_d3_informed(dhd_bus_t *bus)
+{
+ unsigned long flags_bus;
+ DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
+ __dhd_set_bus_lps_d3_informed(bus);
+ DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
+ return;
+}
+
+static INLINE void
+__dhd_set_bus_lps_d3_acked(dhd_bus_t *bus)
+{
+ bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
+ return;
+}
+
+static INLINE void
+dhd_set_bus_lps_d3_acked(dhd_bus_t *bus)
+{
+ unsigned long flags_bus;
+ DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
+ __dhd_set_bus_lps_d3_acked(bus);
+ DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
+ return;
+}
+
+/* check routines */
+#define DHD_CHK_BUS_IN_LPS(bus) dhd_check_bus_in_lps(bus)
+#define __DHD_CHK_BUS_IN_LPS(bus) __dhd_check_bus_in_lps(bus)
+
+#define DHD_CHK_BUS_NOT_IN_LPS(bus) !(DHD_CHK_BUS_IN_LPS(bus))
+#define __DHD_CHK_BUS_NOT_IN_LPS(bus) !(__DHD_CHK_BUS_IN_LPS(bus))
+
+#define DHD_CHK_BUS_LPS_D3_INFORMED(bus) DHD_CHK_BUS_IN_LPS(bus)
+#define __DHD_CHK_BUS_LPS_D3_INFORMED(bus) __DHD_CHK_BUS_IN_LPS(bus)
+
+#define DHD_CHK_BUS_LPS_D3_ACKED(bus) dhd_check_bus_lps_d3_acked(bus)
+#define __DHD_CHK_BUS_LPS_D3_ACKED(bus) __dhd_check_bus_lps_d3_acked(bus)
+
+/* set routines */
+#define DHD_SET_BUS_NOT_IN_LPS(bus) dhd_set_bus_not_in_lps(bus)
+#define __DHD_SET_BUS_NOT_IN_LPS(bus) __dhd_set_bus_not_in_lps(bus)
+
+#define DHD_SET_BUS_LPS_D3_INFORMED(bus) dhd_set_bus_lps_d3_informed(bus)
+#define __DHD_SET_BUS_LPS_D3_INFORMED(bus) __dhd_set_bus_lps_d3_informed(bus)
+
+#define DHD_SET_BUS_LPS_D3_ACKED(bus) dhd_set_bus_lps_d3_acked(bus)
+#define __DHD_SET_BUS_LPS_D3_ACKED(bus) __dhd_set_bus_lps_d3_acked(bus)
+
+/* function declarations */
+
+extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
+extern int dhdpcie_bus_register(void);
+extern void dhdpcie_bus_unregister(void);
+extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
+
+extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
+ volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter);
+extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
+extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
+extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
+extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
+extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus);
+extern void dhdpcie_bus_release(struct dhd_bus *bus);
+extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
+extern void dhdpcie_free_irq(dhd_bus_t *bus);
+extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
+extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
+extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
+extern int dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus);
+extern int dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus);
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint);
+#else
+extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
+extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
+extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
+extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
+extern void dhdpcie_pme_active(osl_t *osh, bool enable);
+extern bool dhdpcie_pme_cap(osl_t *osh);
+extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val);
+extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask);
+extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val);
+extern int dhdpcie_disable_irq(dhd_bus_t *bus);
+extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
+extern int dhdpcie_enable_irq(dhd_bus_t *bus);
+
+extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
+
+#if defined(linux) || defined(LINUX)
+extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
+extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
+ bool is_write, uint32 writeval);
+extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
+ bool is_write, uint32 writeval);
+extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
+#else
+static INLINE uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) { return 0;}
+static INLINE uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
+ bool is_write, uint32 writeval) { return -1;}
+static INLINE uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
+ bool is_write, uint32 writeval) { return -1;}
+static INLINE uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) { return -1;}
+#endif
+#if defined(linux) || defined(LINUX)
+extern int dhdpcie_start_host_dev(dhd_bus_t *bus);
+extern int dhdpcie_stop_host_dev(dhd_bus_t *bus);
+extern int dhdpcie_disable_device(dhd_bus_t *bus);
+extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
+extern void dhdpcie_free_resource(dhd_bus_t *bus);
+extern void dhdpcie_dump_resource(dhd_bus_t *bus);
+extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
+void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
+void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
+void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
+void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
+#ifdef DHD_SUPPORT_64BIT
+void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
+#endif
+#endif /* LINUX || linux */
+
+#if defined(linux) || defined(LINUX) || defined(DHD_EFI)
+extern int dhdpcie_enable_device(dhd_bus_t *bus);
+#endif
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
+extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
+extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
+extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
+extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
+extern int dhdpcie_get_oob_irq_level(void);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_OOB
+extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val);
+extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus);
+extern void dhdpcie_oob_init(dhd_bus_t *bus);
+extern int dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val);
+#endif /* PCIE_OOB */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+#if defined(linux) || defined(LINUX)
+/* XXX: SWWLAN-82173 Making PCIe RC D3cold by force during system PM
+ * exynos_pcie_pm_suspend : RC goes to suspend status & assert PERST
+ * exynos_pcie_pm_resume : de-assert PERST & RC goes to resume status
+ */
+#if defined(CONFIG_ARCH_EXYNOS)
+#define EXYNOS_PCIE_VENDOR_ID 0x144d
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
+#define EXYNOS_PCIE_DEVICE_ID 0xa575
+#define EXYNOS_PCIE_CH_NUM 1
+#elif defined(CONFIG_SOC_EXYNOS8890)
+#define EXYNOS_PCIE_DEVICE_ID 0xa544
+#define EXYNOS_PCIE_CH_NUM 0
+#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
+ defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) || \
+ defined(CONFIG_SOC_EXYNOS2100) || defined(CONFIG_SOC_EXYNOS1000) || \
+ defined(CONFIG_SOC_GS101)
+#define EXYNOS_PCIE_DEVICE_ID 0xecec
+#define EXYNOS_PCIE_CH_NUM 0
+#else
+#error "Not supported platform"
+#endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */
+extern void exynos_pcie_pm_suspend(int ch_num);
+extern void exynos_pcie_pm_resume(int ch_num);
+#endif /* CONFIG_ARCH_EXYNOS */
+
+#if defined(CONFIG_ARCH_MSM)
+#define MSM_PCIE_VENDOR_ID 0x17cb
+#if defined(CONFIG_ARCH_APQ8084)
+#define MSM_PCIE_DEVICE_ID 0x0101
+#elif defined(CONFIG_ARCH_MSM8994)
+#define MSM_PCIE_DEVICE_ID 0x0300
+#elif defined(CONFIG_ARCH_MSM8996)
+#define MSM_PCIE_DEVICE_ID 0x0104
+#elif defined(CONFIG_ARCH_MSM8998)
+#define MSM_PCIE_DEVICE_ID 0x0105
+#elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \
+ defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA)
+#define MSM_PCIE_DEVICE_ID 0x0106
+#else
+#error "Not supported platform"
+#endif
+#endif /* CONFIG_ARCH_MSM */
+
+#if defined(CONFIG_X86)
+#define X86_PCIE_VENDOR_ID 0x8086
+#define X86_PCIE_DEVICE_ID 0x9c1a
+#endif /* CONFIG_X86 */
+
+#if defined(CONFIG_ARCH_TEGRA)
+#define TEGRA_PCIE_VENDOR_ID 0x14e4
+#define TEGRA_PCIE_DEVICE_ID 0x4347
+#endif /* CONFIG_ARCH_TEGRA */
+
+#if defined(BOARD_HIKEY)
+#define HIKEY_PCIE_VENDOR_ID 0x19e5
+#define HIKEY_PCIE_DEVICE_ID 0x3660
+#endif /* BOARD_HIKEY */
+
+#define DUMMY_PCIE_VENDOR_ID 0xffff
+#define DUMMY_PCIE_DEVICE_ID 0xffff
+
+#if defined(CONFIG_ARCH_EXYNOS)
+#define PCIE_RC_VENDOR_ID EXYNOS_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID EXYNOS_PCIE_DEVICE_ID
+#elif defined(CONFIG_ARCH_MSM)
+#define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID
+#elif defined(CONFIG_X86)
+#define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID
+#elif defined(CONFIG_ARCH_TEGRA)
+#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
+#elif defined(BOARD_HIKEY)
+#define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
+#else
+/* Use dummy vendor and device IDs */
+#define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID
+#endif /* CONFIG_ARCH_EXYNOS */
+#endif /* linux || LINUX */
+
+#define DHD_REGULAR_RING 0
+#define DHD_HP2P_RING 1
+
+#ifdef CONFIG_ARCH_TEGRA
+extern int tegra_pcie_pm_suspend(void);
+extern int tegra_pcie_pm_resume(void);
+#endif /* CONFIG_ARCH_TEGRA */
+
+extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
+#ifdef IDLE_TX_FLOW_MGMT
+extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg);
+extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
+extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg);
+extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
+ flow_ring_node_t *flow_ring_node);
+extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
+ flow_ring_node_t *flow_ring_node);
+extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
+ flow_ring_node_t *flow_ring_node);
+extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
+ flow_ring_node_t *flow_ring_node);
+#endif /* IDLE_TX_FLOW_MGMT */
+
+extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
+
+#ifdef DHD_WAKE_STATUS
+int bcmpcie_get_total_wake(struct dhd_bus *bus);
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
+#endif /* DHD_WAKE_STATUS */
+#ifdef DHD_MMIO_TRACE
+extern void dhd_dump_bus_mmio_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
+#endif /* defined(DHD_MMIO_TRACE) */
+extern void dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
+extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
+extern void dhd_bus_hostready(struct dhd_bus *bus);
+#ifdef PCIE_OOB
+extern bool dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus);
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus);
+extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus,
+ enum dhd_bus_ds_state state);
+extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus);
+extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate);
+extern const char * dhd_convert_dsval(uint32 val, bool d2h);
+extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus);
+#endif /* PCIE_INB_DW */
+extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
+#if defined(LINUX) || defined(linux)
+extern int dhdpcie_irq_disabled(struct dhd_bus *bus);
+extern int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus);
+#else
+static INLINE bool dhdpcie_irq_disabled(struct dhd_bus *bus) { return BCME_ERROR;}
+static INLINE int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus)
+{ return BCME_ERROR;}
+#endif /* defined(LINUX) || defined(linux) */
+
+#ifdef DHD_EFI
+extern bool dhdpcie_is_arm_halted(struct dhd_bus *bus);
+extern int dhd_os_wifi_platform_set_power(uint32 value);
+extern void dhdpcie_dongle_pwr_toggle(dhd_bus_t *bus);
+void dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus);
+int dhd_control_signal(dhd_bus_t *bus, char *arg, int len, int set);
+extern int dhd_wifi_properties(struct dhd_bus *bus, char *arg, int len);
+extern int dhd_otp_dump(dhd_bus_t *bus, char *arg, int len);
+extern int dhdpcie_deinit_phase1(dhd_bus_t *bus);
+int dhdpcie_disable_intr_poll(dhd_bus_t *bus);
+int dhdpcie_enable_intr_poll(dhd_bus_t *bus);
+#ifdef BT_OVER_PCIE
+int dhd_btop_test(dhd_bus_t *bus, char *arg, int len);
+#endif /* BT_OVER_PCIE */
+#else
+static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
+static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
+static INLINE void
+dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
+{ return; }
+#endif /* DHD_EFI */
+
+int dhdpcie_config_check(dhd_bus_t *bus);
+int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
+int dhdpcie_config_save(dhd_bus_t *bus);
+int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
+
+extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
+extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
+extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
+extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
+extern bool dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus);
+
+static INLINE uint32
+dhd_pcie_config_read(dhd_bus_t *bus, uint offset, uint size)
+{
+ /* For 4375 or prior chips to 4375 */
+ if (bus->sih && bus->sih->buscorerev <= 64) {
+ OSL_DELAY(100);
+ }
+ return OSL_PCI_READ_CONFIG(bus->osh, offset, size);
+}
+
+static INLINE uint32
+dhd_pcie_corereg_read(si_t *sih, uint val)
+{
+ /* For 4375 or prior chips to 4375 */
+ if (sih->buscorerev <= 64) {
+ OSL_DELAY(100);
+ }
+ si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val);
+ return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+}
+
+extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
+ char *clm_path, char *txcap_path);
+
+extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
+extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
+extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
+#ifdef DHD_HP2P
+extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
+#endif
+
+#if defined(DHD_EFI)
+extern wifi_properties_t *dhd_get_props(dhd_bus_t *bus);
+#endif
+
+#if defined(DHD_EFI) || defined(NDIS)
+extern int dhd_get_platform(dhd_pub_t* dhd, char *progname);
+extern bool dhdpcie_is_chip_supported(uint32 chipid, int *idx);
+extern bool dhdpcie_is_sflash_chip(uint32 chipid);
+#endif
+
+extern int dhd_get_pcie_linkspeed(dhd_pub_t *dhd);
+extern void dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus);
+
+#ifdef PCIE_INB_DW
+extern void dhdpcie_set_dongle_deepsleep(dhd_bus_t *bus, bool val);
+extern void dhd_init_dongle_ds_lock(dhd_bus_t *bus);
+extern void dhd_deinit_dongle_ds_lock(dhd_bus_t *bus);
+#endif /* PCIE_INB_DW */
+
+#endif /* dhd_pcie_h */
diff --git a/bcmdhd.101.10.361.x/dhd_pcie_linux.c b/bcmdhd.101.10.361.x/dhd_pcie_linux.c
new file mode 100755
index 0000000..2a18c7c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pcie_linux.c
@@ -0,0 +1,3379 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <dhd_linux.h>
+#ifdef OEM_ANDROID
+#ifdef CONFIG_ARCH_MSM
+#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif /* CONFIG_PCI_MSM */
+#endif /* CONFIG_ARCH_MSM */
+#endif /* OEM_ANDROID */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#include <linux/pm_runtime.h>
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
+ defined(CONFIG_SOC_EXYNOS1000) || defined(CONFIG_SOC_GS101)
+#include <linux/exynos-pci-ctrl.h>
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
+ * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
+ * CONFIG_SOC_EXYNOS1000 || CONFIG_SOC_GS101
+ */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#ifndef AUTO_SUSPEND_TIMEOUT
+#define AUTO_SUSPEND_TIMEOUT 1000
+#endif /* AUTO_SUSPEND_TIMEOUT */
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+#define RPM_WAKE_UP_TIMEOUT 10000 /* ms */
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#include <linux/irq.h>
+#ifdef USE_SMMU_ARCH_MSM
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#endif /* USE_SMMU_ARCH_MSM */
+#include <dhd_config.h>
+
+#ifdef PCIE_OOB
+#include "ftdi_sio_external.h"
+#endif /* PCIE_OOB */
+
+#define PCI_CFG_RETRY 10 /* PR15065: retry count for pci cfg accesses */
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
+
+#ifdef PCIE_OOB
+#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */
+#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
+#define BIT_WL_REG_ON 6
+#define BIT_BT_REG_ON 7
+
+int gpio_handle_val = 0;
+unsigned char gpio_port = 0;
+unsigned char gpio_direction = 0;
+#define OOB_PORT "ttyUSB0"
+#endif /* PCIE_OOB */
+
+#ifndef BCMPCI_DEV_ID
+#define BCMPCI_DEV_ID PCI_ANY_ID
+#endif
+
+#ifdef FORCE_TPOWERON
+extern uint32 tpoweron_scale;
+#endif /* FORCE_TPOWERON */
+/* user defined data structures */
+
+typedef bool (*dhdpcie_cb_fn_t)(void *);
+
+typedef struct dhdpcie_info
+{
+ dhd_bus_t *bus;
+ osl_t *osh;
+ struct pci_dev *dev; /* pci device handle */
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ uint32 bar1_size; /* pci device memory size */
+ struct pcos_info *pcos_info;
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int irq;
+ char pciname[32];
+ struct pci_saved_state* default_state;
+ struct pci_saved_state* state;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ void *os_cxt; /* Pointer to per-OS private data */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_WAKE_STATUS
+ spinlock_t pkt_wake_lock;
+ unsigned int total_wake_count;
+ int pkt_wake;
+ int wake_irq;
+#endif /* DHD_WAKE_STATUS */
+#ifdef USE_SMMU_ARCH_MSM
+ void *smmu_cxt;
+#endif /* USE_SMMU_ARCH_MSM */
+} dhdpcie_info_t;
+
+struct pcos_info {
+ dhdpcie_info_t *pc;
+ spinlock_t lock;
+ wait_queue_head_t intr_wait_queue;
+ timer_list_compat_t tuning_timer;
+ int tuning_timer_exp;
+ atomic_t timer_enab;
+ struct tasklet_struct tuning_tasklet;
+};
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+typedef struct dhdpcie_os_info {
+ int oob_irq_num; /* valid when hardware or software oob in use */
+ unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
+ bool oob_irq_registered;
+ bool oob_irq_enabled;
+ bool oob_irq_wake_enabled;
+ spinlock_t oob_irq_spinlock;
+ void *dev; /* handle to the underlying device */
+} dhdpcie_os_info_t;
+static irqreturn_t wlan_oob_irq(int irq, void *data);
+#ifdef CUSTOMER_HW2
+extern struct brcm_pcie_wake brcm_pcie_wake;
+#endif /* CUSTOMER_HW2 */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef USE_SMMU_ARCH_MSM
+typedef struct dhdpcie_smmu_info {
+ struct dma_iommu_mapping *smmu_mapping;
+ dma_addr_t smmu_iova_start;
+ size_t smmu_iova_len;
+} dhdpcie_smmu_info_t;
+#endif /* USE_SMMU_ARCH_MSM */
+
+/* function declarations */
+static int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev);
+static int dhdpcie_init(struct pci_dev *pdev);
+static irqreturn_t dhdpcie_isr(int irq, void *arg);
+/* OS Routine functions for PCI suspend/resume */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint);
+#else
+static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
+static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
+static int dhdpcie_resume_dev(struct pci_dev *dev);
+static int dhdpcie_suspend_dev(struct pci_dev *dev);
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev);
+static int dhdpcie_pm_prepare(struct device *dev);
+static int dhdpcie_pm_resume(struct device *dev);
+static void dhdpcie_pm_complete(struct device *dev);
+#else
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
+static int dhdpcie_pm_system_resume_noirq(struct device * dev);
+#else
+static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
+static int dhdpcie_pci_resume(struct pci_dev *dev);
+#if defined(BT_OVER_PCIE)
+static int dhdpcie_pci_resume_early(struct pci_dev *dev);
+#endif /* BT_OVER_PCIE */
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+static int dhdpcie_pm_runtime_suspend(struct device * dev);
+static int dhdpcie_pm_runtime_resume(struct device * dev);
+static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
+static int dhdpcie_pm_system_resume_noirq(struct device * dev);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef SUPPORT_EXYNOS7420
+void exynos_pcie_pm_suspend(int ch_num) {}
+void exynos_pcie_pm_resume(int ch_num) {}
+#endif /* SUPPORT_EXYNOS7420 */
+
+static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
+
+uint32
+dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval);
+
+static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
+ { vendor: VENDOR_BROADCOM,
+ device: BCMPCI_DEV_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: PCI_CLASS_NETWORK_OTHER << 8,
+ class_mask: 0xffff00,
+ driver_data: 0,
+ },
+ { 0, 0, 0, 0, 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
+
+/* Power Management Hooks */
+#ifdef DHD_PCIE_RUNTIMEPM
+static const struct dev_pm_ops dhd_pcie_pm_ops = {
+ .prepare = dhdpcie_pm_prepare,
+ .suspend = dhdpcie_pm_suspend,
+ .resume = dhdpcie_pm_resume,
+ .complete = dhdpcie_pm_complete,
+};
+#endif /* DHD_PCIE_RUNTIMEPM */
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+static const struct dev_pm_ops dhdpcie_pm_ops = {
+ SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL)
+ .suspend_noirq = dhdpcie_pm_system_suspend_noirq,
+ .resume_noirq = dhdpcie_pm_system_resume_noirq
+};
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+static struct pci_driver dhdpcie_driver = {
+ node: {&dhdpcie_driver.node, &dhdpcie_driver.node},
+ name: "pcieh",
+ id_table: dhdpcie_pci_devid,
+ probe: dhdpcie_pci_probe,
+ remove: dhdpcie_pci_remove,
+#if defined (DHD_PCIE_RUNTIMEPM) || defined (DHD_PCIE_NATIVE_RUNTIMEPM)
+ .driver.pm = &dhd_pcie_pm_ops,
+#else
+ suspend: dhdpcie_pci_suspend,
+ resume: dhdpcie_pci_resume,
+#if defined(BT_OVER_PCIE)
+ resume_early: dhdpcie_pci_resume_early,
+#endif /* BT_OVER_PCIE */
+#endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */
+};
+
+int dhdpcie_init_succeeded = FALSE;
+
+#ifdef USE_SMMU_ARCH_MSM
+static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
+{
+ struct dma_iommu_mapping *mapping;
+ struct device_node *root_node = NULL;
+ dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
+ int smmu_iova_address[2];
+ char *wlan_node = "android,bcmdhd_wlan";
+ char *wlan_smmu_node = "wlan-smmu-iova-address";
+ int atomic_ctx = 1;
+ int s1_bypass = 1;
+ int ret = 0;
+
+ DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
+
+ root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+ if (!root_node) {
+ WARN(1, "failed to get device node of BRCM WLAN\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32_array(root_node, wlan_smmu_node,
+ smmu_iova_address, 2) == 0) {
+ DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
+ __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
+ smmu_info->smmu_iova_start = smmu_iova_address[0];
+ smmu_info->smmu_iova_len = smmu_iova_address[1];
+ } else {
+ printf("%s : can't get smmu iova address property\n",
+ __FUNCTION__);
+ return -ENODEV;
+ }
+
+ if (smmu_info->smmu_iova_len <= 0) {
+ DHD_ERROR(("%s: Invalid smmu iova len %d\n",
+ __FUNCTION__, (int)smmu_info->smmu_iova_len));
+ return -EINVAL;
+ }
+
+ DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
+ if (IS_ERR(mapping)) {
+ DHD_ERROR(("%s: create mapping failed, err = %d\n",
+ __FUNCTION__, ret));
+ ret = PTR_ERR(mapping);
+ goto map_fail;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_ATOMIC, &atomic_ctx);
+ if (ret) {
+ DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
+ __FUNCTION__, ret));
+ goto set_attr_fail;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
+ __FUNCTION__, ret));
+ goto set_attr_fail;
+ }
+
+ ret = arm_iommu_attach_device(&pdev->dev, mapping);
+ if (ret) {
+ DHD_ERROR(("%s: attach device failed, err = %d\n",
+ __FUNCTION__, ret));
+ goto attach_fail;
+ }
+
+ smmu_info->smmu_mapping = mapping;
+
+ return ret;
+
+attach_fail:
+set_attr_fail:
+ arm_iommu_release_mapping(mapping);
+map_fail:
+ return ret;
+}
+
+static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
+{
+ dhdpcie_smmu_info_t *smmu_info;
+
+ if (!smmu_cxt) {
+ return;
+ }
+
+ smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
+ if (smmu_info->smmu_mapping) {
+ arm_iommu_detach_device(&pdev->dev);
+ arm_iommu_release_mapping(smmu_info->smmu_mapping);
+ smmu_info->smmu_mapping = NULL;
+ }
+}
+#endif /* USE_SMMU_ARCH_MSM */
+
+#ifdef FORCE_TPOWERON
+static void
+dhd_bus_get_tpoweron(dhd_bus_t *bus)
+{
+
+ uint32 tpoweron_rc;
+ uint32 tpoweron_ep;
+
+ tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
+ tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
+ DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n",
+ __FUNCTION__, tpoweron_rc, tpoweron_ep));
+}
+
+static void
+dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
+{
+
+ dhd_bus_get_tpoweron(bus);
+ /* Set the tpoweron */
+ DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
+ dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
+ dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
+
+ dhd_bus_get_tpoweron(bus);
+
+}
+
+static bool
+dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
+{
+ /*
+ * On Fire's reference platform, coming out of L1.2,
+ * there is a constant delay of 45us between CLKREQ# and stable REFCLK
+ * Due to this delay, with tPowerOn < 50
+ * there is a chance of the refclk sense to trigger on noise.
+ *
+ * Which ever chip needs forced tPowerOn of 50us should be listed below.
+ */
+ if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif /* FORCE_TPOWERON */
+
+#ifdef BT_OVER_PCIE
+int dhd_bus_pwr_off(dhd_pub_t *dhdp, int reason)
+{
+ DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
+ __FUNCTION__, __FILE__));
+ return BCME_OK;
+}
+
+int dhd_bus_pwr_on(dhd_pub_t *dhdp, int reason)
+{
+ DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
+ __FUNCTION__, __FILE__));
+ return BCME_OK;
+}
+
+int dhd_bus_pwr_toggle(dhd_pub_t *dhdp, int reason)
+{
+ DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
+ __FUNCTION__, __FILE__));
+ return BCME_OK;
+}
+
+bool dhdpcie_is_btop_chip(struct dhd_bus *bus)
+{
+ DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
+ __FUNCTION__, __FILE__));
+ return FALSE;
+}
+
+int dhdpcie_redownload_fw(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
+ __FUNCTION__, __FILE__));
+ return BCME_OK;
+}
+#endif /* BT_OVER_PCIE */
+
+static bool
+dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
+{
+ uint32 linkctrl_before;
+ uint32 linkctrl_after = 0;
+ uint8 linkctrl_asm;
+ char *device;
+
+ device = (dev == bus->dev) ? "EP" : "RC";
+
+ linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
+
+ if (enable) {
+ if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
+ DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n",
+ __FUNCTION__, device, linkctrl_before));
+ return FALSE;
+ }
+ /* Enable only L1 ASPM (bit 1) */
+ dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
+ TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
+ } else {
+ if (linkctrl_asm == 0) {
+ DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n",
+ __FUNCTION__, device, linkctrl_before));
+ return FALSE;
+ }
+ /* Disable complete ASPM (bit 1 and bit 0) */
+ dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
+ TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
+ }
+
+ linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
+ __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
+ linkctrl_before, linkctrl_after));
+
+ return TRUE;
+}
+
+static bool
+dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
+{
+ uint32 rc_aspm_cap;
+ uint32 ep_aspm_cap;
+
+ /* RC ASPM capability */
+ rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ if (rc_aspm_cap == BCME_ERROR) {
+ DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ /* EP ASPM capability */
+ ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
+ FALSE, FALSE, 0);
+ if (ep_aspm_cap == BCME_ERROR) {
+ DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+bool
+dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
+{
+ bool ret;
+
+ if (!bus->rc_ep_aspm_cap) {
+ DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n",
+ __FUNCTION__, bus->rc_ep_aspm_cap));
+ return FALSE;
+ }
+
+ if (enable) {
+ /* Enable only L1 ASPM first RC then EP */
+ ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
+ ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
+ } else {
+ /* Disable complete ASPM first EP then RC */
+ ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
+ ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
+ }
+
+ return ret;
+}
+
+static void
+dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
+{
+ uint32 l1ssctrl_before;
+ uint32 l1ssctrl_after = 0;
+ uint8 l1ss_ep;
+ char *device;
+
+ device = (dev == bus->dev) ? "EP" : "RC";
+
+ /* Extendend Capacility Reg */
+ l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
+
+ if (enable) {
+ if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
+ DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n",
+ __FUNCTION__, device, l1ssctrl_before));
+ return;
+ }
+ dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
+ TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
+ } else {
+ if (l1ss_ep == 0) {
+ DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
+ __FUNCTION__, device, l1ssctrl_before));
+ return;
+ }
+ dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
+ TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
+ }
+ l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
+ __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
+ l1ssctrl_before, l1ssctrl_after));
+
+}
+
+static bool
+dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
+{
+ uint32 rc_l1ss_cap;
+ uint32 ep_l1ss_cap;
+
+ /* RC Extendend Capacility */
+ rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ if (rc_l1ss_cap == BCME_ERROR) {
+ DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ /* EP Extendend Capacility */
+ ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
+ PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
+ if (ep_l1ss_cap == BCME_ERROR) {
+ DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+void
+dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
+{
+ bool ret;
+
+ if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
+ DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
+ __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
+ return;
+ }
+
+ /* Disable ASPM of RC and EP */
+ ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+
+ if (enable) {
+ /* Enable RC then EP */
+ dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
+ dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
+ } else {
+ /* Disable EP then RC */
+ dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
+ dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
+ }
+
+ /* Enable ASPM of RC and EP only if this API disabled */
+ if (ret == TRUE) {
+ dhd_bus_aspm_enable_rc_ep(bus, TRUE);
+ }
+}
+
+void
+dhd_bus_aer_config(dhd_bus_t *bus)
+{
+ uint32 val;
+
+ DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
+ val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
+ if (val != (uint32)-1) {
+ val &= ~CORR_ERR_AE;
+ dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
+ } else {
+ DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
+ __FUNCTION__, val));
+ }
+
+ DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
+ val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
+ if (val != (uint32)-1) {
+ val &= ~CORR_ERR_AE;
+ dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
+ } else {
+ DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
+ __FUNCTION__, val));
+ }
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ unsigned long flags;
+ int msglevel = dhd_msg_level;
+
+ printf("%s: Enter\n", __FUNCTION__);
+ if (pch) {
+ bus = pch->bus;
+ }
+ if (!bus) {
+ return ret;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, bus->dhd->dhd_bus_busy_state));
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ return -EBUSY;
+ }
+ DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ dhd_msg_level |= DHD_RPM_VAL;
+ if (bus->dhd->up)
+ ret = dhdpcie_set_suspend_resume(bus, TRUE);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ dhd_msg_level = msglevel;
+ printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return ret;
+
+}
+
+static int dhdpcie_pm_prepare(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+
+ if (!pch || !pch->bus) {
+ return 0;
+ }
+
+ bus = pch->bus;
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
+ bus->chk_pm = TRUE;
+
+ return 0;
+}
+
+static int dhdpcie_pm_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ unsigned long flags;
+ int msglevel = dhd_msg_level;
+
+ printf("%s: Enter\n", __FUNCTION__);
+ if (pch) {
+ bus = pch->bus;
+ }
+ if (!bus) {
+ return ret;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ dhd_msg_level |= DHD_RPM_VAL;
+ if (bus->dhd->up)
+ ret = dhdpcie_set_suspend_resume(bus, FALSE);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ dhd_msg_level = msglevel;
+ printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return ret;
+}
+
+static void dhdpcie_pm_complete(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+
+ if (!pch || !pch->bus) {
+ return;
+ }
+
+ bus = pch->bus;
+ DHD_ENABLE_RUNTIME_PM(bus->dhd);
+ bus->chk_pm = FALSE;
+
+ return;
+}
+#else
+static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
+{
+ int ret = 0;
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ unsigned long flags;
+ uint32 i = 0;
+
+ printf("%s: Enter\n", __FUNCTION__);
+ if (pch) {
+ bus = pch->bus;
+ }
+ if (!bus) {
+ return ret;
+ }
+
+ BCM_REFERENCE(state);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, bus->dhd->dhd_bus_busy_state));
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ OSL_DELAY(1000);
+ /* retry till the transaction is complete */
+ while (i < 100) {
+ OSL_DELAY(1000);
+ i++;
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n",
+ __FUNCTION__, i));
+ break;
+ }
+ if (i != 100) {
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ }
+ }
+ if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
+ "dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
+ return -EBUSY;
+ }
+ }
+ DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_CFG80211_SUSPEND_RESUME
+ dhd_cfg80211_suspend(bus->dhd);
+#endif /* DHD_CFG80211_SUSPEND_RESUME */
+
+ if (!bus->dhd->dongle_reset)
+ ret = dhdpcie_set_suspend_resume(bus, TRUE);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return ret;
+}
+
+#if defined(BT_OVER_PCIE)
+static int dhdpcie_pci_resume_early(struct pci_dev *pdev)
+{
+ int ret = 0;
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ uint32 pmcsr;
+
+ if (pch) {
+ bus = pch->bus;
+ }
+ if (!bus) {
+ return ret;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9))
+ /* On fc30 (linux ver 5.0.9),
+ * PMEStat of PMCSR(cfg reg) is cleared before this callback by kernel.
+ * So, we use SwPme of FunctionControl(enum reg) instead of PMEStat without kernel change.
+ */
+ if (bus->sih->buscorerev >= 64) {
+ uint32 ftnctrl;
+ volatile void *regsva = (volatile void *)bus->regs;
+
+ ftnctrl = pcie_corereg(bus->osh, regsva,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.control), 0, 0);
+ pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr));
+
+ DHD_ERROR(("%s(): pmcsr is 0x%x, ftnctrl is 0x%8x \r\n",
+ __FUNCTION__, pmcsr, ftnctrl));
+ if (ftnctrl & PCIE_FTN_SWPME_MASK) {
+ DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__));
+ }
+ } else
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9)) */
+ {
+ pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr));
+
+ DHD_ERROR(("%s(): pmcsr is 0x%x \r\n", __FUNCTION__, pmcsr));
+ if (pmcsr & PCIE_PMCSR_PMESTAT) {
+ DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__));
+ }
+ }
+
+ /*
+ * TODO: Add code to take adavantage of what is read from pmcsr
+ */
+
+ return ret;
+}
+#endif /* BT_OVER_PCIE */
+
+static int dhdpcie_pci_resume(struct pci_dev *pdev)
+{
+ int ret = 0;
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ unsigned long flags;
+
+ printf("%s: Enter\n", __FUNCTION__);
+ if (pch) {
+ bus = pch->bus;
+ }
+ if (!bus) {
+ return ret;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ if (!bus->dhd->dongle_reset)
+ ret = dhdpcie_set_suspend_resume(bus, FALSE);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_CFG80211_SUSPEND_RESUME
+ dhd_cfg80211_resume(bus->dhd);
+#endif /* DHD_CFG80211_SUSPEND_RESUME */
+ return ret;
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+static int
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
+#else
+dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+{
+ int ret = 0;
+
+ ASSERT(bus && !bus->dhd->dongle_reset);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* if wakelock is held during suspend, return failed */
+ if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
+ return -EBUSY;
+ }
+ mutex_lock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+ DHD_RPM(("%s: Enter state=%d\n", __FUNCTION__, state));
+
+ /* When firmware is not loaded do the PCI bus */
+ /* suspend/resume only */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ ret = dhdpcie_pci_suspend_resume(bus, state);
+#ifdef DHD_PCIE_RUNTIMEPM
+ mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+ return ret;
+ }
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ ret = dhdpcie_bus_suspend(bus, state, byint);
+#else
+ ret = dhdpcie_bus_suspend(bus, state);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
+ if (ret == BCME_OK) {
+ /*
+ * net.ipv4.tcp_limit_output_bytes is used for all ipv4 sockets
+ * so, returning back to original value when there is no traffic(suspend)
+ */
+ if (state == TRUE) {
+ dhd_ctrl_tcp_limit_output_bytes(0);
+ } else {
+ dhd_ctrl_tcp_limit_output_bytes(1);
+ }
+ }
+#endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
+ DHD_RPM(("%s: Exit ret=%d\n", __FUNCTION__, ret));
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ return ret;
+}
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+static int dhdpcie_pm_runtime_suspend(struct device * dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ int ret = 0;
+
+ if (!pch)
+ return -EBUSY;
+
+ bus = pch->bus;
+
+ DHD_RPM(("%s Enter\n", __FUNCTION__));
+
+ if (atomic_read(&bus->dhd->block_bus))
+ return -EHOSTDOWN;
+
+ dhd_netif_stop_queue(bus);
+ atomic_set(&bus->dhd->block_bus, TRUE);
+
+ if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
+ pm_runtime_mark_last_busy(dev);
+ ret = -EAGAIN;
+ }
+
+ atomic_set(&bus->dhd->block_bus, FALSE);
+ dhd_bus_start_queue(bus);
+
+ return ret;
+}
+
+static int dhdpcie_pm_runtime_resume(struct device * dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = pch->bus;
+
+ DHD_RPM(("%s Enter\n", __FUNCTION__));
+
+ if (atomic_read(&bus->dhd->block_bus))
+ return -EHOSTDOWN;
+
+ if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int dhdpcie_pm_system_suspend_noirq(struct device * dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ int ret;
+
+ DHD_RPM(("%s Enter\n", __FUNCTION__));
+
+ if (!pch)
+ return -EBUSY;
+
+ bus = pch->bus;
+
+ if (atomic_read(&bus->dhd->block_bus))
+ return -EHOSTDOWN;
+
+ dhd_netif_stop_queue(bus);
+ atomic_set(&bus->dhd->block_bus, TRUE);
+
+ ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
+
+ if (ret) {
+ dhd_bus_start_queue(bus);
+ atomic_set(&bus->dhd->block_bus, FALSE);
+ }
+
+ return ret;
+}
+
+static int dhdpcie_pm_system_resume_noirq(struct device * dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ int ret;
+
+ if (!pch)
+ return -EBUSY;
+
+ bus = pch->bus;
+
+ DHD_RPM(("%s Enter\n", __FUNCTION__));
+
+ ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
+
+ atomic_set(&bus->dhd->block_bus, FALSE);
+ dhd_bus_start_queue(bus);
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
+
+ return ret;
+}
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+static void
+dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
+{
+ DHD_RPM(("%s: BaseAddress0(0x%x)=0x%x, "
+ "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x "
+ "PCI_BAR1_WIN(0x%x)=(0x%x)\n",
+ suspend_state,
+ PCIECFGREG_BASEADDR0,
+ dhd_pcie_config_read(bus,
+ PCIECFGREG_BASEADDR0, sizeof(uint32)),
+ PCIECFGREG_BASEADDR1,
+ dhd_pcie_config_read(bus,
+ PCIECFGREG_BASEADDR1, sizeof(uint32)),
+ PCIE_CFG_PMCSR,
+ dhd_pcie_config_read(bus,
+ PCIE_CFG_PMCSR, sizeof(uint32)),
+ PCI_BAR1_WIN,
+ dhd_pcie_config_read(bus,
+ PCI_BAR1_WIN, sizeof(uint32))));
+}
+
+static int dhdpcie_suspend_dev(struct pci_dev *dev)
+{
+ int ret;
+ dhdpcie_info_t *pch = pci_get_drvdata(dev);
+ dhd_bus_t *bus = pch->bus;
+
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ DHD_RPM(("%s: Enter\n", __FUNCTION__));
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
+ defined(CONFIG_SOC_EXYNOS1000)
+ DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__));
+ exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI);
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
+ * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
+ * CONFIG_SOC_EXYNOS1000
+ */
+#if defined(CONFIG_SOC_GS101)
+ DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__));
+ exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1);
+#endif /* CONFIG_SOC_GS101 */
+
+ dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ dhd_dpc_tasklet_kill(bus->dhd);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ pci_save_state(dev);
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ pch->state = pci_store_saved_state(dev);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ pci_enable_wake(dev, PCI_D0, TRUE);
+ if (pci_is_enabled(dev))
+ pci_disable_device(dev);
+
+ ret = pci_set_power_state(dev, PCI_D3hot);
+ if (ret) {
+ DHD_ERROR(("%s: pci_set_power_state error %d\n",
+ __FUNCTION__, ret));
+ }
+#ifdef OEM_ANDROID
+// dev->state_saved = FALSE;
+#endif /* OEM_ANDROID */
+ dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
+ return ret;
+}
+
+#ifdef DHD_WAKE_STATUS
+int bcmpcie_get_total_wake(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ return pch->total_wake_count;
+}
+
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+ unsigned long flags;
+ int ret;
+
+ DHD_PKT_WAKE_LOCK(&pch->pkt_wake_lock, flags);
+
+ ret = pch->pkt_wake;
+ pch->total_wake_count += flag;
+ pch->pkt_wake = flag;
+
+ DHD_PKT_WAKE_UNLOCK(&pch->pkt_wake_lock, flags);
+ return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+static int dhdpcie_resume_dev(struct pci_dev *dev)
+{
+ int err = 0;
+ dhdpcie_info_t *pch = pci_get_drvdata(dev);
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ pci_load_and_free_saved_state(dev, &pch->state);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ DHD_RPM(("%s: Enter\n", __FUNCTION__));
+#ifdef OEM_ANDROID
+// dev->state_saved = TRUE;
+#endif /* OEM_ANDROID */
+ pci_restore_state(dev);
+
+ /* Resture back current bar1 window */
+ OSL_PCI_WRITE_CONFIG(pch->bus->osh, PCI_BAR1_WIN, 4, pch->bus->curr_bar1_win);
+
+#ifdef FORCE_TPOWERON
+ if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
+ dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
+ }
+#endif /* FORCE_TPOWERON */
+ err = pci_enable_device(dev);
+ if (err) {
+ printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
+ goto out;
+ }
+ pci_set_master(dev);
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err) {
+ printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
+ goto out;
+ }
+ BCM_REFERENCE(pch);
+ dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
+ defined(CONFIG_SOC_EXYNOS1000)
+ DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
+ exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
+ * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
+ * CONFIG_SOC_EXYNOS1000
+ */
+#if defined(CONFIG_SOC_GS101)
+ DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
+ exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1);
+#endif /* CONFIG_SOC_GS101 */
+
+out:
+ return err;
+}
+
+static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+
+ bcmerror = dhdpcie_start_host_dev(bus);
+ if (bcmerror < 0) {
+ DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
+ __FUNCTION__, bcmerror));
+ bus->is_linkdown = 1;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ }
+
+ return bcmerror;
+}
+
+static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+#ifdef CONFIG_ARCH_EXYNOS
+ /*
+ * XXX : SWWLAN-82173, SWWLAN-82183 WAR for SS PCIe RC
+ * SS PCIe RC/EP is 1 to 1 mapping using different channel
+ * RC0 - LTE, RC1 - WiFi RC0-1 is working independently
+ */
+
+ if (bus->rc_dev) {
+ pci_save_state(bus->rc_dev);
+ } else {
+ DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+ }
+#endif /* CONFIG_ARCH_EXYNOS */
+ bcmerror = dhdpcie_stop_host_dev(bus);
+ return bcmerror;
+}
+
+int
+dhdpcie_set_master_and_d0_pwrstate(dhd_bus_t *bus)
+{
+ int err;
+ pci_set_master(bus->dev);
+ err = pci_set_power_state(bus->dev, PCI_D0);
+ if (err) {
+ DHD_ERROR(("%s: pci_set_power_state error %d \n", __FUNCTION__, err));
+ }
+ return err;
+}
+
+uint32
+dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
+{
+ uint val = -1; /* Initialise to 0xfffffff */
+ if (bus->rc_dev) {
+ pci_read_config_dword(bus->rc_dev, offset, &val);
+ OSL_DELAY(100);
+ } else {
+ DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+ }
+ DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
+ return (val);
+}
+
+/*
+ * Reads/ Writes the value of capability register
+ * from the given CAP_ID section of PCI Root Port
+ *
+ * Arguements
+ * @bus current dhd_bus_t pointer
+ * @cap Capability or Extended Capability ID to get
+ * @offset offset of Register to Read
+ * @is_ext TRUE if @cap is given for Extended Capability
+ * @is_write is set to TRUE to indicate write
+ * @val value to write
+ *
+ * Return Value
+ * Returns 0xffffffff on error
+ * on write success returns BCME_OK (0)
+ * on Read Success returns the value of register requested
+ * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
+ */
+
+uint32
+dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval)
+{
+ int cap_ptr = 0;
+ uint32 ret = -1;
+ uint32 readval;
+
+ if (!(pdev)) {
+ DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Find Capability offset */
+ if (is_ext) {
+ /* removing max EXT_CAP_ID check as
+ * linux kernel definition's max value is not upadted yet as per spec
+ */
+ cap_ptr = pci_find_ext_capability(pdev, cap);
+
+ } else {
+ /* removing max PCI_CAP_ID_MAX check as
+ * pervious kernel versions dont have this definition
+ */
+ cap_ptr = pci_find_capability(pdev, cap);
+ }
+
+ /* Return if capability with given ID not found */
+ if (cap_ptr == 0) {
+ DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
+ __FUNCTION__, cap));
+ return BCME_ERROR;
+ }
+
+ if (is_write) {
+ pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
+ ret = BCME_OK;
+
+ } else {
+
+ pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
+ ret = readval;
+ }
+
+ return ret;
+}
+
+uint32
+dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval)
+{
+ if (!(bus->rc_dev)) {
+ DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
+}
+
+uint32
+dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
+ uint32 writeval)
+{
+ if (!(bus->dev)) {
+ DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
+}
+
+/* API wrapper to read Root Port link capability
+ * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
+ */
+
+uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
+{
+ uint32 linkcap = -1;
+ linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
+ PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
+ linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
+ return linkcap;
+}
+
+static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
+{
+ if (bus->coreid == ARMCA7_CORE_ID) {
+ if (state) {
+ /* Sleep */
+ bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
+ PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
+ } else {
+ uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
+ 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
+ }
+ }
+}
+
+int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
+{
+ int rc;
+
+ struct pci_dev *dev = bus->dev;
+
+ if (state) {
+ dhdpcie_config_save_restore_coherent(bus, state);
+#if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB)
+ dhdpcie_pme_active(bus->osh, state);
+#endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */
+ rc = dhdpcie_suspend_dev(dev);
+ if (!rc) {
+ dhdpcie_suspend_host_dev(bus);
+ }
+ } else {
+ rc = dhdpcie_resume_host_dev(bus);
+ if (!rc) {
+ rc = dhdpcie_resume_dev(dev);
+ if (PCIECTO_ENAB(bus)) {
+ /* reinit CTO configuration
+ * because cfg space got reset at D3 (PERST)
+ */
+ dhdpcie_cto_cfg_init(bus, TRUE);
+ }
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ dhdpcie_ssreset_dis_enum_rst(bus);
+ }
+#if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB)
+ dhdpcie_pme_active(bus->osh, state);
+#endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */
+ }
+ dhdpcie_config_save_restore_coherent(bus, state);
+#if defined(OEM_ANDROID)
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (bus->is_linkdown ||
+ bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL)
+#else /* DHD_HANG_SEND_UP_TEST */
+ if (bus->is_linkdown)
+#endif /* DHD_HANG_SEND_UP_TEST */
+ {
+ bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
+ dhd_os_send_hang_message(bus->dhd);
+ }
+#endif /* OEM_ANDROID */
+ }
+ return rc;
+}
+
+static int dhdpcie_device_scan(struct device *dev, void *data)
+{
+ struct pci_dev *pcidev;
+ int *cnt = data;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ pcidev = container_of(dev, struct pci_dev, dev);
+ GCC_DIAGNOSTIC_POP();
+
+ if (pcidev->vendor != 0x14e4)
+ return 0;
+
+ DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
+ *cnt += 1;
+ if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
+ DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
+ pcidev->device, pcidev->driver->name));
+
+ return 0;
+}
+
+int
+dhdpcie_bus_register(void)
+{
+ int error = 0;
+
+ if (!(error = pci_register_driver(&dhdpcie_driver))) {
+ bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
+ if (!error) {
+ DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
+#ifdef DHD_PRELOAD
+ return 0;
+#endif
+ } else if (!dhdpcie_init_succeeded) {
+ DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
+ } else {
+ return 0;
+ }
+
+ pci_unregister_driver(&dhdpcie_driver);
+ error = BCME_ERROR;
+ }
+
+ return error;
+}
+
+void
+dhdpcie_bus_unregister(void)
+{
+ pci_unregister_driver(&dhdpcie_driver);
+}
+
+int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ DHD_MUTEX_LOCK();
+
+ if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
+ DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
+ "(good PCI location)\n", pdev->bus->number,
+ PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+
+ if (dhdpcie_init_succeeded == TRUE) {
+ DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
+ __FUNCTION__));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ if (dhdpcie_init (pdev)) {
+ DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
+ err = -ENODEV;
+ goto exit;
+ }
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ /*
+ Since MSM PCIe RC dev usage conunt already incremented +2 even
+ before dhdpcie_pci_probe() called, then we inevitably to call
+ pm_runtime_put_noidle() two times to make the count start with zero.
+ */
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
+ /* disable async suspend */
+ device_disable_async_suspend(&pdev->dev);
+#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
+
+ DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
+exit:
+ DHD_MUTEX_UNLOCK();
+ return err;
+}
+
+int
+dhdpcie_detach(dhdpcie_info_t *pch)
+{
+ if (pch) {
+#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (!dhd_download_fw_on_driverload) {
+ pci_load_and_free_saved_state(pch->dev, &pch->default_state);
+ }
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
+ }
+ return 0;
+}
+
+void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev)
+{
+ osl_t *osh = NULL;
+ dhdpcie_info_t *pch = NULL;
+ dhd_bus_t *bus = NULL;
+
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+ DHD_MUTEX_LOCK();
+
+ pch = pci_get_drvdata(pdev);
+ bus = pch->bus;
+ osh = pch->osh;
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ if (bus) {
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ msm_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_EXYNOS
+ exynos_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_ARCH_EXYNOS */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ bus->rc_dev = NULL;
+
+ dhdpcie_bus_release(bus);
+ }
+
+ /*
+ * For module type driver,
+ * it needs to back up configuration space before rmmod
+ * Since original backed up configuration space won't be restored if state_saved = false
+ * This back up the configuration space again & state_saved = true
+ */
+ pci_save_state(pdev);
+
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* pcie os info detach */
+ MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef USE_SMMU_ARCH_MSM
+ /* smmu info detach */
+ dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
+ MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
+#endif /* USE_SMMU_ARCH_MSM */
+ /* pcie info detach */
+ dhdpcie_detach(pch);
+ /* osl detach */
+ osl_detach(osh);
+
+#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
+ defined(CONFIG_ARCH_APQ8084)
+ brcm_pcie_wake.wake_irq = NULL;
+ brcm_pcie_wake.data = NULL;
+#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
+
+ dhdpcie_init_succeeded = FALSE;
+
+ DHD_MUTEX_UNLOCK();
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+
+ return;
+}
+
+/* Enable Linux Msi */
+int
+dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ return pci_enable_msi_range(pdev, min_vecs, max_vecs);
+#else
+ return pci_enable_msi_block(pdev, max_vecs);
+#endif
+}
+
+/* Disable Linux Msi */
+void
+dhdpcie_disable_msi(struct pci_dev *pdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ pci_free_irq_vectors(pdev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ pci_disable_msi(pdev);
+#else
+ pci_disable_msi(pdev);
+#endif
+ return;
+}
+
+/* Request Linux irq */
+int
+dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
+{
+ dhd_bus_t *bus = dhdpcie_info->bus;
+ struct pci_dev *pdev = dhdpcie_info->bus->dev;
+ int host_irq_disabled;
+
+ if (!bus->irq_registered) {
+ snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
+ "dhdpcie:%s", pci_name(pdev));
+
+ if (bus->d2h_intr_method == PCIE_MSI) {
+ if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
+ DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
+ dhdpcie_disable_msi(pdev);
+ bus->d2h_intr_method = PCIE_INTX;
+ }
+ }
+
+ if (bus->d2h_intr_method == PCIE_MSI)
+ printf("%s: MSI enabled\n", __FUNCTION__);
+ else
+ printf("%s: INTx enabled\n", __FUNCTION__);
+
+ if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+ dhdpcie_info->pciname, bus) < 0) {
+ DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+ if (bus->d2h_intr_method == PCIE_MSI) {
+ dhdpcie_disable_msi(pdev);
+ }
+ return -1;
+ }
+ else {
+ bus->irq_registered = TRUE;
+ }
+ } else {
+ DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
+ }
+
+ host_irq_disabled = dhdpcie_irq_disabled(bus);
+ if (host_irq_disabled) {
+ DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
+ __FUNCTION__, host_irq_disabled));
+ dhdpcie_enable_irq(bus);
+ }
+
+ DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
+
+ return 0; /* SUCCESS */
+}
+
+/**
+ * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
+ */
+int
+dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
+{
+ struct pci_dev *pdev = bus->dev;
+
+ if (!pdev) {
+ DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ *irq = pdev->irq;
+
+ return 0; /* SUCCESS */
+}
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRINTF_RESOURCE "0x%016llx"
+#else
+#define PRINTF_RESOURCE "0x%08x"
+#endif
+
+#ifdef EXYNOS_PCIE_MODULE_PATCH
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+extern struct pci_saved_state *bcm_pcie_default_state;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+#endif /* EXYNOS_MODULE_PATCH */
+
+/*
+
+Name: osl_pci_get_resource
+
+Parametrs:
+
+1: struct pci_dev *pdev -- pci device structure
+2: pci_res -- structure containing pci configuration space values
+
+Return value:
+
+int - Status (TRUE or FALSE)
+
+Description:
+Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure.
+
+ */
+int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
+{
+ phys_addr_t bar0_addr, bar1_addr;
+ ulong bar1_size;
+ struct pci_dev *pdev = NULL;
+ pdev = dhdpcie_info->dev;
+#ifdef EXYNOS_PCIE_MODULE_PATCH
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (bcm_pcie_default_state) {
+ pci_load_saved_state(pdev, bcm_pcie_default_state);
+ pci_restore_state(pdev);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+#endif /* EXYNOS_MODULE_PATCH */
+
+ /*
+ * For built-in type driver,
+ * it can't restore configuration backup because of state_saved = false at first load time
+ * For module type driver,
+ * it couldn't remap the BAR0/BAR1 address
+ * without restoring configuration backup at second load,
+ * and remains configuration backup in pci_dev, DHD didn't remove it from the bus
+ * pci_restore_state() restores proper BAR0/BAR1 address
+ */
+ pci_restore_state(pdev);
+
+ do {
+ if (pci_enable_device(pdev)) {
+ printf("%s: Cannot enable PCI device\n", __FUNCTION__);
+ break;
+ }
+ pci_set_master(pdev);
+ bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */
+ bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */
+
+ /* read Bar-1 mapped memory range */
+ bar1_size = pci_resource_len(pdev, 2);
+
+ if ((bar1_size == 0) || (bar1_addr == 0)) {
+ printf("%s: BAR1 Not enabled for this device size(%ld),"
+ " addr(0x"PRINTF_RESOURCE")\n",
+ __FUNCTION__, bar1_size, bar1_addr);
+ goto err;
+ }
+
+ dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+ dhdpcie_info->bar1_size =
+ (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
+
+ if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
+ DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
+ break;
+ }
+#ifdef EXYNOS_PCIE_MODULE_PATCH
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (bcm_pcie_default_state == NULL) {
+ pci_save_state(pdev);
+ bcm_pcie_default_state = pci_store_saved_state(pdev);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+#endif /* EXYNOS_MODULE_PATCH */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ /* Backup PCIe configuration so as to use Wi-Fi on/off process
+ * in case of built in driver
+ */
+ pci_save_state(pdev);
+ dhdpcie_info->default_state = pci_store_saved_state(pdev);
+
+ if (dhdpcie_info->default_state == NULL) {
+ DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
+ __FUNCTION__));
+ REG_UNMAP(dhdpcie_info->regs);
+ REG_UNMAP(dhdpcie_info->tcm);
+ pci_disable_device(pdev);
+ break;
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+ DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->regs, bar0_addr));
+ DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+ return 0; /* SUCCESS */
+ } while (0);
+err:
+ return -1; /* FAILURE */
+}
+
+int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
+{
+
+ DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+ do {
+ /* define it here only!! */
+ if (dhdpcie_get_resource (dhdpcie_info)) {
+ DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
+ break;
+ }
+ DHD_TRACE(("%s:Exit - SUCCESS \n",
+ __FUNCTION__));
+
+ return 0; /* SUCCESS */
+
+ } while (0);
+
+ DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+ return -1; /* FAILURE */
+
+}
+
+void dhdpcie_dump_resource(dhd_bus_t *bus)
+{
+ dhdpcie_info_t *pch;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* BAR0 */
+ DHD_RPM(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
+ DONGLE_REG_MAP_SIZE));
+
+ /* BAR1 */
+ DHD_RPM(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
+ pch->bar1_size));
+}
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS)
+void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
+{
+ struct pci_dev *pdev = (struct pci_dev *)noti->user;
+ dhdpcie_info_t *pch = NULL;
+
+ if (pdev) {
+ pch = pci_get_drvdata(pdev);
+ if (pch) {
+ dhd_bus_t *bus = pch->bus;
+ if (bus) {
+ dhd_pub_t *dhd = bus->dhd;
+ if (dhd) {
+#ifdef CONFIG_ARCH_MSM
+ DHD_ERROR(("%s: Set no_cfg_restore flag\n",
+ __FUNCTION__));
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#ifdef DHD_SSSR_DUMP
+ if (dhd->fis_triggered) {
+ DHD_ERROR(("%s: PCIe linkdown due to FIS, Ignore\n",
+ __FUNCTION__));
+ } else
+#endif /* DHD_SSSR_DUMP */
+ {
+ DHD_ERROR(("%s: Event HANG send up "
+ "due to PCIe linkdown\n",
+ __FUNCTION__));
+ bus->is_linkdown = 1;
+ dhd->hang_reason =
+ HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
+ dhd_os_send_hang_message(dhd);
+ }
+ }
+ }
+ }
+ }
+
+}
+#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+int dhdpcie_init(struct pci_dev *pdev)
+{
+
+ osl_t *osh = NULL;
+ dhd_bus_t *bus = NULL;
+ dhdpcie_info_t *dhdpcie_info = NULL;
+ wifi_adapter_info_t *adapter = NULL;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_os_info_t *dhdpcie_osinfo = NULL;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef USE_SMMU_ARCH_MSM
+ dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
+#endif /* USE_SMMU_ARCH_MSM */
+ int ret = 0;
+
+ do {
+ /* osl attach */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* initialize static buffer */
+ adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
+ PCI_SLOT(pdev->devfn));
+ if (adapter != NULL) {
+ DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
+ adapter->bus_type = PCI_BUS;
+ adapter->bus_num = pdev->bus->number;
+ adapter->slot_num = PCI_SLOT(pdev->devfn);
+ adapter->pci_dev = pdev;
+ } else
+ DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+ osl_static_mem_init(osh, adapter);
+
+ /* allocate linux spcific pcie structure here */
+ if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ break;
+ }
+ bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
+ dhdpcie_info->osh = osh;
+ dhdpcie_info->dev = pdev;
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ /* allocate OS speicific structure */
+ dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
+ if (dhdpcie_osinfo == NULL) {
+ DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
+ __FUNCTION__));
+ break;
+ }
+ bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+ dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
+
+ /* Initialize host wake IRQ */
+ spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
+ /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
+ dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
+ &dhdpcie_osinfo->oob_irq_flags);
+ if (dhdpcie_osinfo->oob_irq_num < 0) {
+ DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef USE_SMMU_ARCH_MSM
+ /* allocate private structure for using SMMU */
+ dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
+ if (dhdpcie_smmu_info == NULL) {
+ DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
+ __FUNCTION__));
+ break;
+ }
+ bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
+ dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
+
+ /* Initialize smmu structure */
+ if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
+ DHD_ERROR(("%s: Failed to initialize SMMU\n",
+ __FUNCTION__));
+ break;
+ }
+#endif /* USE_SMMU_ARCH_MSM */
+
+#ifdef DHD_WAKE_STATUS
+ /* Initialize pkt_wake_lock */
+ spin_lock_init(&dhdpcie_info->pkt_wake_lock);
+#endif /* DHD_WAKE_STATUS */
+
+ /* Find the PCI resources, verify the */
+ /* vendor and device ID, map BAR regions and irq, update in structures */
+ if (dhdpcie_scan_resource(dhdpcie_info)) {
+ DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
+
+ break;
+ }
+
+ /* Bus initialization */
+ ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev, adapter);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
+ break;
+ }
+
+ dhdpcie_info->bus = bus;
+ bus->bar1_size = dhdpcie_info->bar1_size;
+ bus->is_linkdown = 0;
+ bus->no_bus_init = FALSE;
+ bus->cto_triggered = 0;
+
+ bus->rc_dev = NULL;
+
+ /* Get RC Device Handle */
+ if (bus->dev->bus) {
+ /* self member of structure pci_bus is bridge device as seen by parent */
+ bus->rc_dev = bus->dev->bus->self;
+ if (bus->rc_dev)
+ DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
+ bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
+ else
+ DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
+ }
+
+ /* if rc_dev is still NULL, try to get from vendor/device IDs */
+ if (bus->rc_dev == NULL) {
+ bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
+ DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__,
+ PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev));
+ }
+
+ bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
+ bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
+ DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
+ __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
+
+#ifdef FORCE_TPOWERON
+ if (dhdpcie_chip_req_forced_tpoweron(bus)) {
+ dhd_bus_set_tpoweron(bus, tpoweron_scale);
+ }
+#endif /* FORCE_TPOWERON */
+
+#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
+ defined(CONFIG_ARCH_APQ8084)
+ brcm_pcie_wake.wake_irq = wlan_oob_irq;
+ brcm_pcie_wake.data = bus;
+#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
+
+#ifdef DONGLE_ENABLE_ISOLATION
+ bus->dhd->dongle_isolation = TRUE;
+#endif /* DONGLE_ENABLE_ISOLATION */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
+ bus->pcie_event.user = pdev;
+ bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
+ bus->pcie_event.callback = dhdpcie_linkdown_cb;
+ bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
+ msm_pcie_register_event(&bus->pcie_event);
+ bus->no_cfg_restore = FALSE;
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_EXYNOS
+ bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
+ bus->pcie_event.user = pdev;
+ bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
+ bus->pcie_event.callback = dhdpcie_linkdown_cb;
+ exynos_pcie_register_event(&bus->pcie_event);
+#endif /* CONFIG_ARCH_EXYNOS */
+ bus->read_shm_fail = FALSE;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+ bus->intr_enabled = FALSE;
+ dhdpcie_bus_intr_disable(bus);
+
+ if (dhdpcie_request_irq(dhdpcie_info)) {
+ DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+ break;
+ }
+ } else {
+ bus->pollrate = 1;
+ DHD_INFO(("%s: PCIe interrupt function is NOT registered "
+ "due to polling mode\n", __FUNCTION__));
+ }
+
+#if defined(BCM_REQUEST_FW)
+ if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
+ DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
+ }
+ bus->nv_path = NULL;
+ bus->fw_path = NULL;
+#endif /* BCM_REQUEST_FW */
+
+ /* set private data for pci_dev */
+ pci_set_drvdata(pdev, dhdpcie_info);
+
+ /* Ensure BAR1 switch feature enable if needed before FW download */
+ dhdpcie_bar1_window_switch_enab(bus);
+
+#if defined(BCMDHD_MODULAR) && defined(INSMOD_FW_LOAD)
+ if (1)
+#else
+ if (dhd_download_fw_on_driverload)
+#endif
+ {
+ if (dhd_bus_start(bus->dhd)) {
+ DHD_ERROR(("%s: dhd_bus_start() failed\n", __FUNCTION__));
+ if (!allow_delay_fwdl)
+ break;
+ }
+ } else {
+ /* Set ramdom MAC address during boot time */
+ get_random_bytes(&bus->dhd->mac.octet[3], 3);
+ /* Adding BRCM OUI */
+ bus->dhd->mac.octet[0] = 0;
+ bus->dhd->mac.octet[1] = 0x90;
+ bus->dhd->mac.octet[2] = 0x4C;
+ }
+
+ /* Attach to the OS network interface */
+ DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
+ if (dhd_attach_net(bus->dhd, TRUE)) {
+ DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
+ break;
+ }
+
+ dhdpcie_init_succeeded = TRUE;
+#ifdef CONFIG_ARCH_MSM
+ sec_pcie_set_use_ep_loaded(bus->rc_dev);
+#endif /* CONFIG_ARCH_MSM */
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ atomic_set(&bus->dhd->block_bus, FALSE);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if defined(MULTIPLE_SUPPLICANT)
+ wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif /* MULTIPLE_SUPPLICANT */
+
+ DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
+ return 0; /* return SUCCESS */
+
+ } while (0);
+ /* reverse the initialization in order in case of error */
+
+ if (bus)
+ dhdpcie_bus_release(bus);
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ if (dhdpcie_osinfo) {
+ MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef USE_SMMU_ARCH_MSM
+ if (dhdpcie_smmu_info) {
+ MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
+ dhdpcie_info->smmu_cxt = NULL;
+ }
+#endif /* USE_SMMU_ARCH_MSM */
+
+ if (dhdpcie_info)
+ dhdpcie_detach(dhdpcie_info);
+ pci_disable_device(pdev);
+ if (osh)
+ osl_detach(osh);
+ if (adapter != NULL) {
+ adapter->bus_type = -1;
+ adapter->bus_num = -1;
+ adapter->slot_num = -1;
+ }
+
+ dhdpcie_init_succeeded = FALSE;
+
+ DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+ return -1; /* return FAILURE */
+}
+
+/* Free Linux irq */
+void
+dhdpcie_free_irq(dhd_bus_t *bus)
+{
+ struct pci_dev *pdev = NULL;
+
+ DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
+ if (bus) {
+ pdev = bus->dev;
+ if (bus->irq_registered) {
+#if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
+ /* clean up the affinity_hint before
+ * the unregistration of PCIe irq
+ */
+ (void)irq_set_affinity_hint(pdev->irq, NULL);
+#endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
+ free_irq(pdev->irq, bus);
+ bus->irq_registered = FALSE;
+ if (bus->d2h_intr_method == PCIE_MSI) {
+ dhdpcie_disable_msi(pdev);
+ }
+ } else {
+ DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
+ }
+ }
+ DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+ return;
+}
+
+/*
+
+Name: dhdpcie_isr
+
+Parametrs:
+
+1: IN int irq -- interrupt vector
+2: IN void *arg -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+irqreturn_t
+dhdpcie_isr(int irq, void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ bus->isr_entry_time = OSL_LOCALTIME_NS();
+ if (!dhdpcie_bus_isr(bus)) {
+ DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
+ }
+ bus->isr_exit_time = OSL_LOCALTIME_NS();
+ return IRQ_HANDLED;
+}
+
+int
+dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
+{
+ struct pci_dev *dev;
+ if ((bus == NULL) || (bus->dev == NULL)) {
+ DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dev = bus->dev;
+ disable_irq_nosync(dev->irq);
+ return BCME_OK;
+}
+
+int
+dhdpcie_disable_irq(dhd_bus_t *bus)
+{
+ struct pci_dev *dev;
+ if ((bus == NULL) || (bus->dev == NULL)) {
+ DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dev = bus->dev;
+ disable_irq(dev->irq);
+ return BCME_OK;
+}
+
+int
+dhdpcie_enable_irq(dhd_bus_t *bus)
+{
+ struct pci_dev *dev;
+ if ((bus == NULL) || (bus->dev == NULL)) {
+ DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dev = bus->dev;
+ enable_irq(dev->irq);
+ return BCME_OK;
+}
+
+int
+dhdpcie_irq_disabled(dhd_bus_t *bus)
+{
+ struct irq_desc *desc = irq_to_desc(bus->dev->irq);
+ /* depth will be zero, if enabled */
+ return desc->depth;
+}
+
+#if defined(CONFIG_ARCH_EXYNOS)
+int pcie_ch_num = EXYNOS_PCIE_CH_NUM;
+#endif /* CONFIG_ARCH_EXYNOS */
+
+int
+dhdpcie_start_host_dev(dhd_bus_t *bus)
+{
+ int ret = 0;
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (bus->dev == NULL) {
+ return BCME_ERROR;
+ }
+
+#ifdef CONFIG_ARCH_EXYNOS
+ exynos_pcie_pm_resume(pcie_ch_num);
+#endif /* CONFIG_ARCH_EXYNOS */
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->no_cfg_restore) {
+ options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
+ }
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+ bus->dev, NULL, options);
+ if (bus->no_cfg_restore && !ret) {
+ msm_pcie_recover_config(bus->dev);
+ bus->no_cfg_restore = 0;
+ }
+#else
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+ bus->dev, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_TEGRA
+ ret = tegra_pcie_pm_resume();
+#endif /* CONFIG_ARCH_TEGRA */
+
+ if (ret) {
+ DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
+ goto done;
+ }
+
+done:
+ DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+ return ret;
+}
+
+int
+dhdpcie_stop_host_dev(dhd_bus_t *bus)
+{
+ int ret = 0;
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
+
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (bus->dev == NULL) {
+ return BCME_ERROR;
+ }
+
+#ifdef CONFIG_ARCH_EXYNOS
+ exynos_pcie_pm_suspend(pcie_ch_num);
+#endif /* CONFIG_ARCH_EXYNOS */
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->no_cfg_restore) {
+ options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+ }
+
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ bus->dev, NULL, options);
+#else
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ bus->dev, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_TEGRA
+ ret = tegra_pcie_pm_suspend();
+#endif /* CONFIG_ARCH_TEGRA */
+ if (ret) {
+ DHD_ERROR(("Failed to stop PCIe link\n"));
+ goto done;
+ }
+done:
+ DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+ return ret;
+}
+
+int
+dhdpcie_disable_device(dhd_bus_t *bus)
+{
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (bus->dev == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (pci_is_enabled(bus->dev))
+ pci_disable_device(bus->dev);
+
+ return 0;
+}
+
+int
+dhdpcie_enable_device(dhd_bus_t *bus)
+{
+ int ret = BCME_ERROR;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ dhdpcie_info_t *pch;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (bus->dev == NULL) {
+ return BCME_ERROR;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ return BCME_ERROR;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
+ /* Updated with pci_load_and_free_saved_state to compatible
+ * with Kernel version 3.14.0 to 3.18.41.
+ */
+ pci_load_and_free_saved_state(bus->dev, &pch->default_state);
+ pch->default_state = pci_store_saved_state(bus->dev);
+#else
+ pci_load_saved_state(bus->dev, pch->default_state);
+#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
+
+ /* Check if Device ID is valid */
+ if (bus->dev->state_saved) {
+ uint32 vid, saved_vid;
+ pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
+ saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
+ if (vid != saved_vid) {
+ DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
+ "Skip the bus init\n", __FUNCTION__, vid, saved_vid));
+ bus->no_bus_init = TRUE;
+ /* Check if the PCIe link is down */
+ if (vid == (uint32)-1) {
+ bus->is_linkdown = 1;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = TRUE;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ }
+ return BCME_ERROR;
+ }
+ }
+
+ pci_restore_state(bus->dev);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
+
+ ret = pci_enable_device(bus->dev);
+ if (ret) {
+ pci_disable_device(bus->dev);
+ } else {
+ pci_set_master(bus->dev);
+ }
+
+ return ret;
+}
+
+int
+dhdpcie_alloc_resource(dhd_bus_t *bus)
+{
+ dhdpcie_info_t *dhdpcie_info;
+ phys_addr_t bar0_addr, bar1_addr;
+ ulong bar1_size;
+
+ do {
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ break;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ break;
+ }
+
+ dhdpcie_info = pci_get_drvdata(bus->dev);
+ if (dhdpcie_info == NULL) {
+ DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+ break;
+ }
+
+ bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */
+ bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */
+
+ /* read Bar-1 mapped memory range */
+ bar1_size = pci_resource_len(bus->dev, 2);
+
+ if ((bar1_size == 0) || (bar1_addr == 0)) {
+ printf("%s: BAR1 Not enabled for this device size(%ld),"
+ " addr(0x"PRINTF_RESOURCE")\n",
+ __FUNCTION__, bar1_size, bar1_addr);
+ break;
+ }
+
+ dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+ if (!dhdpcie_info->regs) {
+ DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+ break;
+ }
+
+ bus->regs = dhdpcie_info->regs;
+ dhdpcie_info->bar1_size =
+ (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
+ if (!dhdpcie_info->tcm) {
+ DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+ REG_UNMAP(dhdpcie_info->regs);
+ bus->regs = NULL;
+ break;
+ }
+
+ bus->tcm = dhdpcie_info->tcm;
+ bus->bar1_size = dhdpcie_info->bar1_size;
+
+ DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->regs, bar0_addr));
+ DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+ return 0;
+ } while (0);
+
+ return BCME_ERROR;
+}
+
+void
+dhdpcie_free_resource(dhd_bus_t *bus)
+{
+ dhdpcie_info_t *dhdpcie_info;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdpcie_info = pci_get_drvdata(bus->dev);
+ if (dhdpcie_info == NULL) {
+ DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->regs) {
+ REG_UNMAP(dhdpcie_info->regs);
+ bus->regs = NULL;
+ }
+
+ if (bus->tcm) {
+ REG_UNMAP(dhdpcie_info->tcm);
+ bus->tcm = NULL;
+ }
+}
+
+int
+dhdpcie_bus_request_irq(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *dhdpcie_info;
+ int ret = 0;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhdpcie_info = pci_get_drvdata(bus->dev);
+ if (dhdpcie_info == NULL) {
+ DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+ bus->intr_enabled = FALSE;
+ dhdpcie_bus_intr_disable(bus);
+ ret = dhdpcie_request_irq(dhdpcie_info);
+ if (ret) {
+ DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+#ifdef CONFIG_BCMDHD_GET_OOB_STATE
+extern int dhd_get_wlan_oob_gpio(void);
+#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
+
+int dhdpcie_get_oob_irq_level(void)
+{
+ int gpio_level;
+
+#ifdef CONFIG_BCMDHD_GET_OOB_STATE
+ gpio_level = dhd_get_wlan_oob_gpio();
+#else
+ gpio_level = BCME_UNSUPPORTED;
+#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
+ return gpio_level;
+}
+
+int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+
+ return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
+}
+
+int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+
+ return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
+}
+
+void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
+{
+ unsigned long flags;
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+ DHD_OOB_IRQ_LOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags);
+ if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
+ (dhdpcie_osinfo->oob_irq_num > 0)) {
+ if (enable) {
+ enable_irq(dhdpcie_osinfo->oob_irq_num);
+ bus->oob_intr_enable_count++;
+ bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
+ } else {
+ disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
+ bus->oob_intr_disable_count++;
+ bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
+ }
+ dhdpcie_osinfo->oob_irq_enabled = enable;
+ }
+ DHD_OOB_IRQ_UNLOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags);
+}
+
+#if defined(DHD_USE_SPIN_LOCK_BH) && !defined(DHD_USE_PCIE_OOB_THREADED_IRQ)
+#error "Cannot enable DHD_USE_SPIN_LOCK_BH without enabling DHD_USE_PCIE_OOB_THREADED_IRQ"
+#endif /* DHD_USE_SPIN_LOCK_BH && !DHD_USE_PCIE_OOB_THREADED_IRQ */
+
+#ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
+static irqreturn_t wlan_oob_irq_isr(int irq, void *data)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)data;
+ DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__));
+ bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS();
+ return IRQ_WAKE_THREAD;
+}
+#endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */
+
+static irqreturn_t wlan_oob_irq(int irq, void *data)
+{
+ dhd_bus_t *bus;
+ bus = (dhd_bus_t *)data;
+ dhdpcie_oob_intr_set(bus, FALSE);
+#ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
+ DHD_TRACE(("%s: IRQ Thread\n", __FUNCTION__));
+ bus->last_oob_irq_thr_time = OSL_LOCALTIME_NS();
+#else
+ DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__));
+ bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS();
+#endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */
+
+ if (bus->dhd->up == 0) {
+ DHD_ERROR(("%s: ########### IRQ during dhd pub up is 0 ############\n",
+ __FUNCTION__));
+ }
+
+ bus->oob_intr_count++;
+#ifdef DHD_WAKE_STATUS
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* This condition is for avoiding counting of wake up from Runtime PM */
+ if (bus->chk_pm)
+#endif /* DHD_PCIE_RUNTIMPM */
+ {
+ bcmpcie_set_get_wake(bus, 1);
+ }
+#endif /* DHD_WAKE_STATUS */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
+#endif /* DHD_PCIE_RUNTIMPM */
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ dhd_bus_wakeup_work(bus->dhd);
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ /* Hold wakelock if bus_low_power_state is
+ * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
+ */
+ if (bus->dhd->up && DHD_CHK_BUS_IN_LPS(bus)) {
+ DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
+ }
+ return IRQ_HANDLED;
+}
+
+int dhdpcie_oob_intr_register(dhd_bus_t *bus)
+{
+ int err = 0;
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+ if (dhdpcie_osinfo->oob_irq_registered) {
+ DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
+ return -EBUSY;
+ }
+
+ if (dhdpcie_osinfo->oob_irq_num > 0) {
+ printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)dhdpcie_osinfo->oob_irq_num,
+ (int)dhdpcie_osinfo->oob_irq_flags);
+#ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
+ err = request_threaded_irq(dhdpcie_osinfo->oob_irq_num,
+ wlan_oob_irq_isr, wlan_oob_irq,
+ dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
+ bus);
+#else
+ err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
+ dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
+ bus);
+#endif /* DHD_USE_THREADED_IRQ_PCIE_OOB */
+ if (err) {
+ DHD_ERROR(("%s: request_irq failed with %d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+#if defined(DISABLE_WOWLAN)
+ printf("%s: disable_irq_wake\n", __FUNCTION__);
+ dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
+#else
+ printf("%s: enable_irq_wake\n", __FUNCTION__);
+ err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
+ if (!err) {
+ dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
+ } else
+ printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
+#endif
+ dhdpcie_osinfo->oob_irq_enabled = TRUE;
+ }
+
+ dhdpcie_osinfo->oob_irq_registered = TRUE;
+
+ return 0;
+}
+
+void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
+{
+ int err = 0;
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+ if (!dhdpcie_osinfo->oob_irq_registered) {
+ DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
+ return;
+ }
+ if (dhdpcie_osinfo->oob_irq_num > 0) {
+ if (dhdpcie_osinfo->oob_irq_wake_enabled) {
+ err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
+ if (!err) {
+ dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
+ }
+ }
+ if (dhdpcie_osinfo->oob_irq_enabled) {
+ disable_irq(dhdpcie_osinfo->oob_irq_num);
+ dhdpcie_osinfo->oob_irq_enabled = FALSE;
+ }
+ free_irq(dhdpcie_osinfo->oob_irq_num, bus);
+ }
+ dhdpcie_osinfo->oob_irq_registered = FALSE;
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef PCIE_OOB
+void dhdpcie_oob_init(dhd_bus_t *bus)
+{
+ /* XXX this should be passed in as a command line parameter */
+ gpio_handle_val = get_handle(OOB_PORT);
+ if (gpio_handle_val < 0)
+ {
+ DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
+ ASSERT(FALSE);
+ }
+
+ gpio_direction = 0;
+ ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
+
+ /* Note BT core is also enabled here */
+ gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+ gpio_write_port(gpio_handle_val, gpio_port);
+
+ gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+ ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
+
+ bus->oob_enabled = TRUE;
+ bus->oob_presuspend = FALSE;
+
+ /* drive the Device_Wake GPIO low on startup */
+ bus->device_wake_state = TRUE;
+ dhd_bus_set_device_wake(bus, FALSE);
+ dhd_bus_doorbell_timeout_reset(bus);
+
+}
+
+void
+dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
+{
+ DHD_INFO(("Set Device_Wake to %d\n", val));
+ if (val)
+ {
+ gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
+ gpio_write_port(gpio_handle_val, gpio_port);
+ } else {
+ gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
+ gpio_write_port(gpio_handle_val, gpio_port);
+ }
+}
+
+int
+dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
+{
+ int ret;
+ uint8 val;
+ ret = gpio_read_port(gpio_handle_val, &val);
+
+ if (ret < 0) {
+ /* XXX handle error properly */
+ DHD_ERROR(("gpio_read_port returns %d\n", ret));
+ return ret;
+ }
+
+ if (val & (1 << BIT_BT_REG_ON))
+ {
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int
+dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ if (bus->device_wake_state != val)
+ {
+ DHD_INFO(("Set Device_Wake to %d\n", val));
+
+ if (bus->oob_enabled && !bus->oob_presuspend)
+ {
+ if (val)
+ {
+ gpio_port = gpio_port | (1 << DEVICE_WAKE);
+ gpio_write_port_non_block(gpio_handle_val, gpio_port);
+ } else {
+ gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
+ gpio_write_port_non_block(gpio_handle_val, gpio_port);
+ }
+ }
+
+ bus->device_wake_state = val;
+ }
+ return BCME_OK;
+}
+
+INLINE void
+dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ /* TODO: Currently Inband implementation of Device_Wake is not supported,
+ * so this function is left empty later this can be used to support the same.
+ */
+}
+#endif /* PCIE_OOB */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+bool dhd_runtimepm_state(dhd_pub_t *dhd)
+{
+ dhd_bus_t *bus;
+ unsigned long flags;
+ bus = dhd->bus;
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ bus->idlecount++;
+
+ DHD_TRACE(("%s : Enter \n", __FUNCTION__));
+
+ if (dhd_query_bus_erros(dhd)) {
+ /* Becasue bus_error/dongle_trap ... etc,
+ * driver don't allow enter suspend, return FALSE
+ */
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return FALSE;
+ }
+
+ if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+ bus->idlecount = 0;
+ if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) &&
+ !DHD_CHECK_CFG_IN_PROGRESS(dhd) && !dhd_os_check_wakelock_all(bus->dhd)) {
+ DHD_RPM(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
+ __FUNCTION__, bus->idletime, dhd_runtimepm_ms));
+ bus->bus_wake = 0;
+ DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
+ bus->runtime_resume_done = FALSE;
+ /* stop all interface network queue. */
+ dhd_bus_stop_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ /* RPM suspend is failed, return FALSE then re-trying */
+ if (dhdpcie_set_suspend_resume(bus, TRUE)) {
+ DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ bus->runtime_resume_done = TRUE;
+ /* It can make stuck NET TX Queue without below */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ if (bus->dhd->rx_pending_due_to_rpm) {
+ /* Reschedule tasklet to process Rx frames */
+ DHD_ERROR(("%s: Schedule DPC to process pending"
+ " Rx packets\n", __FUNCTION__));
+ /* irq will be enabled at the end of dpc */
+ dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, 0);
+ } else {
+ /* enabling host irq deferred from system suspend */
+ if (dhdpcie_irq_disabled(bus)) {
+ dhdpcie_enable_irq(bus);
+ /* increasing intrrupt count when it enabled */
+ bus->resume_intr_enable_count++;
+ }
+ }
+ smp_wmb();
+ wake_up(&bus->rpm_queue);
+ return FALSE;
+ }
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
+ DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
+ /* For making sure NET TX Queue active */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ wait_event(bus->rpm_queue, bus->bus_wake);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
+ DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ dhdpcie_set_suspend_resume(bus, FALSE);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ /* Inform the wake up context that Resume is over */
+ bus->runtime_resume_done = TRUE;
+ /* For making sure NET TX Queue active */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ if (bus->dhd->rx_pending_due_to_rpm) {
+ /* Reschedule tasklet to process Rx frames */
+ DHD_ERROR(("%s: Schedule DPC to process pending Rx packets\n",
+ __FUNCTION__));
+ bus->rpm_sched_dpc_time = OSL_LOCALTIME_NS();
+ dhd_sched_dpc(bus->dhd);
+ }
+
+ /* enabling host irq deferred from system suspend */
+ if (dhdpcie_irq_disabled(bus)) {
+ dhdpcie_enable_irq(bus);
+ /* increasing intrrupt count when it enabled */
+ bus->resume_intr_enable_count++;
+ }
+
+ smp_wmb();
+ wake_up(&bus->rpm_queue);
+ DHD_RPM(("%s : runtime resume ended \n", __FUNCTION__));
+ return TRUE;
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ /* Since one of the contexts are busy (TX, IOVAR or RX)
+ * we should not suspend
+ */
+ DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, dhd->dhd_bus_busy_state));
+ return FALSE;
+ }
+ }
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return FALSE;
+} /* dhd_runtimepm_state */
+
+/*
+ * dhd_runtime_bus_wake
+ * TRUE - related with runtime pm context
+ * FALSE - It isn't invloved in runtime pm context
+ */
+bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
+{
+ unsigned long flags;
+ bus->idlecount = 0;
+ DHD_TRACE(("%s : enter\n", __FUNCTION__));
+ if (bus->dhd->up == FALSE) {
+ DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
+ /* Wake up RPM state thread if it is suspend in progress or suspended */
+ if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
+ DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
+ bus->bus_wake = 1;
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ if (dhd_msg_level & DHD_RPM_VAL)
+ DHD_ERROR_RLMT(("%s: Runtime Resume is called in %pf\n", __FUNCTION__, func_addr));
+ smp_wmb();
+ wake_up(&bus->rpm_queue);
+ /* No need to wake up the RPM state thread */
+ } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ }
+
+ /* If wait is TRUE, function with wait = TRUE will be wait in here */
+ if (wait) {
+ if (!wait_event_timeout(bus->rpm_queue, bus->runtime_resume_done,
+ msecs_to_jiffies(RPM_WAKE_UP_TIMEOUT))) {
+ DHD_ERROR(("%s: RPM_WAKE_UP_TIMEOUT error\n", __FUNCTION__));
+ return FALSE;
+ }
+ } else {
+ DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
+ }
+ /* If it is called from RPM context, it returns TRUE */
+ return TRUE;
+ }
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return FALSE;
+}
+
+bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return dhd_runtime_bus_wake(bus, wait, func_addr);
+}
+
+void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bus->idletime = 0;
+}
+
+bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->runtime_resume_done;
+}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+struct device * dhd_bus_to_dev(dhd_bus_t *bus)
+{
+ struct pci_dev *pdev;
+ pdev = bus->dev;
+
+ if (pdev)
+ return &pdev->dev;
+ else
+ return NULL;
+}
+
+#ifdef DHD_FW_COREDUMP
+int
+dhd_dongle_mem_dump(void)
+{
+ if (!g_dhd_bus) {
+ DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ dhd_bus_dump_console_buffer(g_dhd_bus);
+ dhd_prot_debug_info_print(g_dhd_bus->dhd);
+
+ g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+ g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ dhd_bus_mem_dump(g_dhd_bus->dhd);
+ return 0;
+}
+EXPORT_SYMBOL(dhd_dongle_mem_dump);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef CONFIG_ARCH_MSM
+void
+dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t *dhdp, bool up)
+{
+ sec_pcie_set_ep_driver_loaded(dhdp->bus->rc_dev, up);
+}
+#endif /* CONFIG_ARCH_MSM */
+
+bool
+dhd_bus_check_driver_up(void)
+{
+ dhd_bus_t *bus;
+ dhd_pub_t *dhdp;
+ bool isup = FALSE;
+
+ bus = (dhd_bus_t *)g_dhd_bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return isup;
+ }
+
+ dhdp = bus->dhd;
+ if (dhdp) {
+ isup = dhdp->up;
+ }
+
+ return isup;
+}
+EXPORT_SYMBOL(dhd_bus_check_driver_up);
diff --git a/bcmdhd.101.10.361.x/dhd_pktlog.c b/bcmdhd.101.10.361.x/dhd_pktlog.c
new file mode 100755
index 0000000..0d57344
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pktlog.c
@@ -0,0 +1,1684 @@
+/*
+ * DHD debugability packet logging support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_pktlog.h>
+#include <dhd_wlfc.h>
+
+#ifdef DHD_COMPACT_PKT_LOG
+#include <bcmip.h>
+#include <bcmudp.h>
+#include <bcmdhcp.h>
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <bcmtlv.h>
+#include <802.11.h>
+#include <eap.h>
+#include <eapol.h>
+#include <bcmendian.h>
+#include <bcm_l2_filter.h>
+#include <dhd_bitpack.h>
+#include <bcmipv6.h>
+#endif /* DHD_COMPACT_PKT_LOG */
+
+#ifdef DHD_PKT_LOGGING
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif /* strtoul */
+extern int wl_pattern_atoh(char *src, char *dst);
+extern int pattern_atoh_len(char *src, char *dst, int len);
+extern wifi_tx_packet_fate __dhd_dbg_map_tx_status_to_pkt_fate(uint16 status);
+
+#ifdef DHD_COMPACT_PKT_LOG
+#define CPKT_LOG_BITS_PER_BYTE 8
+
+#define CPKT_LOG_BIT_LEN_TYPE 4
+
+#define CPKT_LOG_BIT_OFFSET_TS 0
+#define CPKT_LOG_BIT_OFFSET_DIR 5
+#define CPKT_LOG_BIT_OFFSET_TYPE 6
+#define CPKT_LOG_BIT_OFFSET_SUBTYPE 10
+#define CPKT_LOG_BIT_OFFSET_PKT_FATE 18
+
+#define CPKT_LOG_BIT_MASK_TS 0x1f
+#define CPKT_LOG_BIT_MASK_DIR 0x01
+#define CPKT_LOG_BIT_MASK_TYPE 0x0f
+#define CPKT_LOG_BIT_MASK_SUBTYPE 0xff
+#define CPKT_LOG_BIT_MASK_PKT_FATE 0x0f
+
+#define CPKT_LOG_DNS_PORT_CLIENT 53
+#define CPKT_LOG_MDNS_PORT_CLIENT 5353
+
+#define CPKT_LOG_TYPE_DNS 0x0
+#define CPKT_LOG_TYPE_ARP 0x1
+#define CPKT_LOG_TYPE_ICMP_REQ 0x2
+#define CPKT_LOG_TYPE_ICMP_RES 0x3
+#define CPKT_LOG_TYPE_ICMP_UNREACHABLE 0x4
+#define CPKT_LOG_TYPE_DHCP 0x5
+#define CPKT_LOG_TYPE_802_1X 0x6
+#define CPKT_LOG_TYPE_ICMPv6 0x7
+#define CPKT_LOG_TYPE_OTHERS 0xf
+
+#define CPKT_LOG_802_1X_SUBTYPE_IDENTITY 0x0
+#define CPKT_LOG_802_1X_SUBTYPE_TLS 0x1
+#define CPKT_LOG_802_1X_SUBTYPE_TTLS 0x2
+#define CPKT_LOG_802_1X_SUBTYPE_PEAP 0x3
+#define CPKT_LOG_802_1X_SUBTYPE_FAST 0x4
+#define CPKT_LOG_802_1X_SUBTYPE_LEAP 0x5
+#define CPKT_LOG_802_1X_SUBTYPE_PWD 0x6
+#define CPKT_LOG_802_1X_SUBTYPE_SIM 0x7
+#define CPKT_LOG_802_1X_SUBTYPE_AKA 0x8
+#define CPKT_LOG_802_1X_SUBTYPE_AKAP 0x9
+#define CPKT_LOG_802_1X_SUBTYPE_SUCCESS 0xA
+#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M1 0xB
+#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M2 0xC
+#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M3 0xD
+#define CPKT_LOG_802_1X_SUBTYPE_4WAY_M4 0xE
+#define CPKT_LOG_802_1X_SUBTYPE_OTHERS 0xF
+
+#define CPKT_LOG_DHCP_MAGIC_COOKIE_LEN 4
+
+#define CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE 3
+#define CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE_IPV4_OFFSET 4
+
+typedef struct dhd_cpkt_log_ts_node {
+ struct rb_node rb;
+
+ uint64 ts_diff; /* key, usec */
+ int idx;
+} dhd_cpkt_log_ts_node_t;
+
+/* Compact Packet Log Timestamp values, unit: uSec */
+const uint64 dhd_cpkt_log_tt_idx[] = {
+ 10000, 50000, 100000, 150000, 300000, 500000, 750000, 1000000, 3000000, 5000000, 7500000,
+ 10000000, 12500000, 15000000, 17500000, 20000000, 22500000, 25000000, 27500000, 30000000,
+ 32500000, 35000000, 37500000, 40000000, 50000000, 75000000, 150000000, 300000000, 400000000,
+ 500000000, 600000000
+};
+#define CPKT_LOG_TT_IDX_ARR_SZ ARRAYSIZE(dhd_cpkt_log_tt_idx)
+
+static int dhd_cpkt_log_init_tt(dhd_pub_t *dhdp);
+static void dhd_cpkt_log_deinit_tt(dhd_pub_t *dhdp);
+#endif /* DHD_COMPACT_PKT_LOG */
+
+int
+dhd_os_attach_pktlog(dhd_pub_t *dhdp)
+{
+ dhd_pktlog_t *pktlog;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s(): dhdp is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ pktlog = (dhd_pktlog_t *)MALLOCZ(dhdp->osh, sizeof(dhd_pktlog_t));
+ if (unlikely(!pktlog)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_pktlog_t\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhdp->pktlog = pktlog;
+ pktlog->dhdp = dhdp;
+
+ OSL_ATOMIC_INIT(dhdp->osh, &pktlog->pktlog_status);
+
+ /* pktlog ring */
+ dhdp->pktlog->pktlog_ring = dhd_pktlog_ring_init(dhdp, MIN_PKTLOG_LEN);
+ dhdp->pktlog->pktlog_filter = dhd_pktlog_filter_init(MAX_DHD_PKTLOG_FILTER_LEN);
+#ifdef DHD_COMPACT_PKT_LOG
+ dhd_cpkt_log_init_tt(dhdp);
+#endif
+
+ DHD_ERROR(("%s(): dhd_os_attach_pktlog attach\n", __FUNCTION__));
+
+ return BCME_OK;
+}
+
+int
+dhd_os_detach_pktlog(dhd_pub_t *dhdp)
+{
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ dhd_pktlog_ring_deinit(dhdp, dhdp->pktlog->pktlog_ring);
+ dhd_pktlog_filter_deinit(dhdp->pktlog->pktlog_filter);
+#ifdef DHD_COMPACT_PKT_LOG
+ dhd_cpkt_log_deinit_tt(dhdp);
+#endif /* DHD_COMPACT_PKT_LOG */
+
+ DHD_ERROR(("%s(): dhd_os_attach_pktlog detach\n", __FUNCTION__));
+
+ MFREE(dhdp->osh, dhdp->pktlog, sizeof(dhd_pktlog_t));
+
+ return BCME_OK;
+}
+
+dhd_pktlog_ring_t*
+dhd_pktlog_ring_init(dhd_pub_t *dhdp, int size)
+{
+ dhd_pktlog_ring_t *ring;
+ int i = 0;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s(): dhdp is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ ring = (dhd_pktlog_ring_t *)MALLOCZ(dhdp->osh, sizeof(dhd_pktlog_ring_t));
+ if (unlikely(!ring)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_pktlog_ring_t\n", __FUNCTION__));
+ goto fail;
+ }
+
+ dll_init(&ring->ring_info_head);
+ dll_init(&ring->ring_info_free);
+
+ ring->ring_info_mem = (dhd_pktlog_ring_info_t *)MALLOCZ(dhdp->osh,
+ sizeof(dhd_pktlog_ring_info_t) * size);
+ if (unlikely(!ring->ring_info_mem)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_pktlog_ring_info_t\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* initialize free ring_info linked list */
+ for (i = 0; i < size; i++) {
+ dll_append(&ring->ring_info_free, (dll_t *)&ring->ring_info_mem[i].p_info);
+ }
+
+ OSL_ATOMIC_SET(dhdp->osh, &ring->start, TRUE);
+ ring->pktlog_minmize = FALSE;
+ ring->pktlog_len = size;
+ ring->pktcount = 0;
+ ring->dhdp = dhdp;
+ ring->pktlog_ring_lock = osl_spin_lock_init(dhdp->osh);
+
+ DHD_ERROR(("%s(): pktlog ring init success\n", __FUNCTION__));
+
+ return ring;
+fail:
+ if (ring) {
+ MFREE(dhdp->osh, ring, sizeof(dhd_pktlog_ring_t));
+ }
+
+ return NULL;
+}
+
+/* Maximum wait counts */
+#define DHD_PKTLOG_WAIT_MAXCOUNT 1000
+int
+dhd_pktlog_ring_deinit(dhd_pub_t *dhdp, dhd_pktlog_ring_t *ring)
+{
+ int ret = BCME_OK;
+ dhd_pktlog_ring_info_t *ring_info;
+ dll_t *item, *next_p;
+ int waitcounts = 0;
+
+ if (!ring) {
+ DHD_ERROR(("%s(): ring is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (!ring->dhdp) {
+ DHD_ERROR(("%s(): dhdp is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ /* stop pkt log */
+ OSL_ATOMIC_SET(dhdp->osh, &ring->start, FALSE);
+
+ /* waiting TX/RX/TXS context is done, max timeout 1 second */
+ while ((waitcounts++ < DHD_PKTLOG_WAIT_MAXCOUNT)) {
+ if (!OSL_ATOMIC_READ(dhdp->osh, &dhdp->pktlog->pktlog_status))
+ break;
+ OSL_SLEEP(1);
+ }
+
+ if (waitcounts >= DHD_PKTLOG_WAIT_MAXCOUNT) {
+ DHD_ERROR(("%s(): pktlog wait timeout pktlog_status : 0x%x \n",
+ __FUNCTION__,
+ OSL_ATOMIC_READ(dhdp->osh, &dhdp->pktlog->pktlog_status)));
+ ASSERT(0);
+ return -EINVAL;
+ }
+
+ /* free ring_info->info.pkt */
+ for (item = dll_head_p(&ring->ring_info_head); !dll_end(&ring->ring_info_head, item);
+ item = next_p) {
+ next_p = dll_next_p(item);
+
+ ring_info = (dhd_pktlog_ring_info_t *)item;
+
+ if (ring_info->info.pkt) {
+ PKTFREE(ring->dhdp->osh, ring_info->info.pkt, TRUE);
+ DHD_PKT_LOG(("%s(): pkt free pos %p\n",
+ __FUNCTION__, ring_info->info.pkt));
+ }
+ }
+
+ if (ring->ring_info_mem) {
+ MFREE(ring->dhdp->osh, ring->ring_info_mem,
+ sizeof(dhd_pktlog_ring_info_t) * ring->pktlog_len);
+ }
+
+ if (ring->pktlog_ring_lock) {
+ osl_spin_lock_deinit(ring->dhdp->osh, ring->pktlog_ring_lock);
+ }
+
+ MFREE(dhdp->osh, ring, sizeof(dhd_pktlog_ring_t));
+
+ DHD_ERROR(("%s(): pktlog ring deinit\n", __FUNCTION__));
+
+ return ret;
+}
+
+/*
+ * dhd_pktlog_ring_add_pkts : add filtered packets into pktlog ring
+ * pktid : incase of rx, pktid is not used (pass DHD_INVALID_PKID)
+ * direction : 1 - TX / 0 - RX / 2 - RX Wakeup Packet
+ */
+int
+dhd_pktlog_ring_add_pkts(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid, uint32 direction)
+{
+ dhd_pktlog_ring_info_t *pkts;
+ dhd_pktlog_ring_t *pktlog_ring;
+ dhd_pktlog_filter_t *pktlog_filter;
+ u64 ts_nsec;
+ uint32 pktlog_case = 0;
+ unsigned long rem_nsec;
+ unsigned long flags = 0;
+
+ /*
+ * dhdp, dhdp->pktlog, dhd->pktlog_ring, pktlog_ring->start
+ * are validated from the DHD_PKTLOG_TX macro
+ */
+
+ pktlog_ring = dhdp->pktlog->pktlog_ring;
+ pktlog_filter = dhdp->pktlog->pktlog_filter;
+
+ if (direction == PKT_TX) {
+ pktlog_case = PKTLOG_TXPKT_CASE;
+ } else if ((direction == PKT_RX) || (direction == PKT_WAKERX)) {
+ pktlog_case = PKTLOG_RXPKT_CASE;
+ }
+
+ if ((direction != PKT_WAKERX) &&
+ dhd_pktlog_filter_matched(pktlog_filter, pktdata, pktlog_case)
+ == FALSE) {
+ return BCME_OK;
+ }
+
+ if (direction == PKT_TX && pktid == DHD_INVALID_PKTID) {
+ DHD_ERROR(("%s : Invalid PKTID \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* get free ring_info and insert to ring_info_head */
+ DHD_PKT_LOG_LOCK(pktlog_ring->pktlog_ring_lock, flags);
+ /* if free_list is empty, use the oldest ring_info */
+ if (dll_empty(&pktlog_ring->ring_info_free)) {
+ pkts = (dhd_pktlog_ring_info_t *)dll_head_p(&pktlog_ring->ring_info_head);
+ dll_delete((dll_t *)pkts);
+ /* free the oldest packet */
+ PKTFREE(pktlog_ring->dhdp->osh, pkts->info.pkt, TRUE);
+ pktlog_ring->pktcount--;
+ } else {
+ pkts = (dhd_pktlog_ring_info_t *)dll_tail_p(&pktlog_ring->ring_info_free);
+ dll_delete((dll_t *)pkts);
+ }
+
+ /* Update packet information */
+ ts_nsec = local_clock();
+ rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
+
+ pkts->info.pkt = PKTDUP(dhdp->osh, pkt);
+ pkts->info.pkt_len = PKTLEN(dhdp->osh, pkt);
+ pkts->info.driver_ts_sec = (uint32)ts_nsec;
+ pkts->info.driver_ts_usec = (uint32)(rem_nsec/NSEC_PER_USEC);
+ pkts->info.firmware_ts = 0U;
+ pkts->info.payload_type = FRAME_TYPE_ETHERNET_II;
+ pkts->info.direction = direction;
+
+ if (direction == PKT_TX) {
+ pkts->info.pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ pkts->tx_fate = TX_PKT_FATE_DRV_QUEUED;
+ } else if (direction == PKT_RX) {
+ pkts->info.pkt_hash = 0U;
+ pkts->rx_fate = RX_PKT_FATE_SUCCESS;
+ } else if (direction == PKT_WAKERX) {
+ pkts->info.pkt_hash = 0U;
+ pkts->rx_fate = RX_PKT_FATE_WAKE_PKT;
+ }
+
+ DHD_PKT_LOG(("%s(): pkt hash %d\n", __FUNCTION__, pkts->info.pkt_hash));
+ DHD_PKT_LOG(("%s(): sec %d usec %d\n", __FUNCTION__,
+ pkts->info.driver_ts_sec, pkts->info.driver_ts_usec));
+
+ /* insert tx_pkts to the pktlog_ring->ring_info_head */
+ dll_append(&pktlog_ring->ring_info_head, (dll_t *)pkts);
+ pktlog_ring->pktcount++;
+ DHD_PKT_LOG_UNLOCK(pktlog_ring->pktlog_ring_lock, flags);
+ return BCME_OK;
+}
+
+int
+dhd_pktlog_ring_tx_status(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid,
+ uint16 status)
+{
+ dhd_pktlog_ring_info_t *tx_pkt;
+ wifi_tx_packet_fate pkt_fate;
+ uint32 pkt_hash, temp_hash;
+ dhd_pktlog_ring_t *pktlog_ring;
+ dhd_pktlog_filter_t *pktlog_filter;
+ dll_t *item_p, *next_p;
+ unsigned long flags = 0;
+
+#ifdef BDC
+ struct bdc_header *h;
+ BCM_REFERENCE(h);
+#endif /* BDC */
+ /*
+ * dhdp, dhdp->pktlog, dhd->pktlog_ring, pktlog_ring->start
+ * are validated from the DHD_PKTLOG_TXS macro
+ */
+
+ pktlog_ring = dhdp->pktlog->pktlog_ring;
+ pktlog_filter = dhdp->pktlog->pktlog_filter;
+
+ if (dhd_pktlog_filter_matched(pktlog_filter, pktdata,
+ PKTLOG_TXSTATUS_CASE) == FALSE) {
+ return BCME_OK;
+ }
+
+ pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status);
+
+ /* find the sent tx packet and adding pkt_fate info */
+ DHD_PKT_LOG_LOCK(pktlog_ring->pktlog_ring_lock, flags);
+ /* Inverse traverse from the last packets */
+ for (item_p = dll_tail_p(&pktlog_ring->ring_info_head);
+ !dll_end(&pktlog_ring->ring_info_head, item_p);
+ item_p = next_p)
+ {
+ if (dll_empty(item_p)) {
+ break;
+ }
+ next_p = dll_prev_p(item_p);
+ tx_pkt = (dhd_pktlog_ring_info_t *)item_p;
+ temp_hash = tx_pkt->info.pkt_hash;
+ if (temp_hash == pkt_hash) {
+ tx_pkt->tx_fate = pkt_fate;
+#ifdef BDC
+ h = (struct bdc_header *)PKTDATA(dhdp->osh, tx_pkt->info.pkt);
+ PKTPULL(dhdp->osh, tx_pkt->info.pkt, BDC_HEADER_LEN);
+ PKTPULL(dhdp->osh, tx_pkt->info.pkt, (h->dataOffset << DHD_WORD_TO_LEN_SHIFT));
+#endif /* BDC */
+ DHD_PKT_LOG(("%s(): Found pkt hash in prev pos\n", __FUNCTION__));
+ break;
+ }
+ }
+ DHD_PKT_LOG_UNLOCK(pktlog_ring->pktlog_ring_lock, flags);
+ return BCME_OK;
+}
+
+dhd_pktlog_filter_t*
+dhd_pktlog_filter_init(int size)
+{
+ int i;
+ gfp_t kflags;
+ uint32 alloc_len;
+ dhd_pktlog_filter_t *filter;
+ dhd_pktlog_filter_info_t *filter_info = NULL;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ /* allocate and initialze pktmon filter */
+ alloc_len = sizeof(dhd_pktlog_filter_t);
+ filter = (dhd_pktlog_filter_t *)kzalloc(alloc_len, kflags);
+ if (unlikely(!filter)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_pktlog_filter_t\n", __FUNCTION__));
+ goto fail;
+ }
+
+ alloc_len = (sizeof(dhd_pktlog_filter_info_t) * size);
+ filter_info = (dhd_pktlog_filter_info_t *)kzalloc(alloc_len, kflags);
+ if (unlikely(!filter_info)) {
+ DHD_ERROR(("%s(): could not allocate memory for - "
+ "dhd_pktlog_filter_info_t\n", __FUNCTION__));
+ goto fail;
+ }
+
+ filter->info = filter_info;
+ filter->list_cnt = 0;
+
+ for (i = 0; i < MAX_DHD_PKTLOG_FILTER_LEN; i++) {
+ filter->info[i].id = 0;
+ }
+
+ filter->enable = PKTLOG_TXPKT_CASE | PKTLOG_TXSTATUS_CASE | PKTLOG_RXPKT_CASE;
+
+ DHD_ERROR(("%s(): pktlog filter init success\n", __FUNCTION__));
+
+ return filter;
+fail:
+ if (filter) {
+ kfree(filter);
+ }
+
+ return NULL;
+}
+
+int
+dhd_pktlog_filter_deinit(dhd_pktlog_filter_t *filter)
+{
+ int ret = BCME_OK;
+
+ if (!filter) {
+ DHD_ERROR(("%s(): filter is NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (filter->info) {
+ kfree(filter->info);
+ }
+ kfree(filter);
+
+ DHD_ERROR(("%s(): pktlog filter deinit\n", __FUNCTION__));
+
+ return ret;
+}
+
+bool
+dhd_pktlog_filter_existed(dhd_pktlog_filter_t *filter, char *arg, uint32 *id)
+{
+ char filter_pattern[MAX_FILTER_PATTERN_LEN];
+ char *p;
+ int i, j;
+ int nchar;
+ int len;
+
+ if (!filter || !arg) {
+ DHD_ERROR(("%s(): filter=%p arg=%p\n", __FUNCTION__, filter, arg));
+ return TRUE;
+ }
+
+ for (i = 0; i < filter->list_cnt; i++) {
+ p = filter_pattern;
+ len = sizeof(filter_pattern);
+
+ nchar = snprintf(p, len, "%d ", filter->info[i].offset);
+ p += nchar;
+ len -= nchar;
+
+ nchar = snprintf(p, len, "0x");
+ p += nchar;
+ len -= nchar;
+
+ for (j = 0; j < filter->info[i].size_bytes; j++) {
+ nchar = snprintf(p, len, "%02x", filter->info[i].mask[j]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ nchar = snprintf(p, len, " 0x");
+ p += nchar;
+ len -= nchar;
+
+ for (j = 0; j < filter->info[i].size_bytes; j++) {
+ nchar = snprintf(p, len, "%02x", filter->info[i].pattern[j]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (strlen(arg) < strlen(filter_pattern)) {
+ continue;
+ }
+
+ DHD_PKT_LOG(("%s(): Pattern %s\n", __FUNCTION__, filter_pattern));
+
+ if (strncmp(filter_pattern, arg, strlen(filter_pattern)) == 0) {
+ *id = filter->info[i].id;
+ DHD_ERROR(("%s(): This pattern is existed\n", __FUNCTION__));
+ DHD_ERROR(("%s(): arg %s\n", __FUNCTION__, arg));
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+int
+dhd_pktlog_filter_add(dhd_pktlog_filter_t *filter, char *arg)
+{
+ int32 mask_size, pattern_size;
+ char *offset, *bitmask, *pattern;
+ uint32 id = 0;
+
+ if (!filter || !arg) {
+ DHD_ERROR(("%s(): pktlog_filter =%p arg =%p\n", __FUNCTION__, filter, arg));
+ return BCME_ERROR;
+ }
+
+ DHD_PKT_LOG(("%s(): arg %s\n", __FUNCTION__, arg));
+
+ if (dhd_pktlog_filter_existed(filter, arg, &id) == TRUE) {
+ DHD_PKT_LOG(("%s(): This pattern id %d is existed\n", __FUNCTION__, id));
+ return BCME_OK;
+ }
+
+ if (filter->list_cnt >= MAX_DHD_PKTLOG_FILTER_LEN) {
+ DHD_ERROR(("%s(): pktlog filter full\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if ((offset = bcmstrtok(&arg, " ", 0)) == NULL) {
+ DHD_ERROR(("%s(): offset not found\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if ((bitmask = bcmstrtok(&arg, " ", 0)) == NULL) {
+ DHD_ERROR(("%s(): bitmask not found\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if ((pattern = bcmstrtok(&arg, " ", 0)) == NULL) {
+ DHD_ERROR(("%s(): pattern not found\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* parse filter bitmask */
+ mask_size = pattern_atoh_len(bitmask,
+ (char *) &filter->info[filter->list_cnt].mask[0],
+ MAX_MASK_PATTERN_FILTER_LEN);
+ if (mask_size == -1) {
+ DHD_ERROR(("Rejecting: %s\n", bitmask));
+ return BCME_ERROR;
+ }
+
+ /* parse filter pattern */
+ pattern_size = pattern_atoh_len(pattern,
+ (char *) &filter->info[filter->list_cnt].pattern[0],
+ MAX_MASK_PATTERN_FILTER_LEN);
+ if (pattern_size == -1) {
+ DHD_ERROR(("Rejecting: %s\n", pattern));
+ return BCME_ERROR;
+ }
+
+ prhex("mask", (char *)&filter->info[filter->list_cnt].mask[0],
+ mask_size);
+ prhex("pattern", (char *)&filter->info[filter->list_cnt].pattern[0],
+ pattern_size);
+
+ if (mask_size != pattern_size) {
+ DHD_ERROR(("%s(): Mask and pattern not the same size\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ filter->info[filter->list_cnt].offset = strtoul(offset, NULL, 0);
+ filter->info[filter->list_cnt].size_bytes = mask_size;
+ filter->info[filter->list_cnt].id = filter->list_cnt + 1;
+ filter->info[filter->list_cnt].enable = TRUE;
+
+ filter->list_cnt++;
+
+ return BCME_OK;
+}
+
+int
+dhd_pktlog_filter_del(dhd_pktlog_filter_t *filter, char *arg)
+{
+ uint32 id = 0;
+
+ if (!filter || !arg) {
+ DHD_ERROR(("%s(): pktlog_filter =%p arg =%p\n", __FUNCTION__, filter, arg));
+ return BCME_ERROR;
+ }
+
+ DHD_PKT_LOG(("%s(): arg %s\n", __FUNCTION__, arg));
+
+ if (dhd_pktlog_filter_existed(filter, arg, &id) != TRUE) {
+ DHD_PKT_LOG(("%s(): This pattern id %d doesn't existed\n", __FUNCTION__, id));
+ return BCME_OK;
+ }
+
+ dhd_pktlog_filter_pull_forward(filter, id, filter->list_cnt);
+
+ filter->list_cnt--;
+
+ return BCME_OK;
+}
+
+int
+dhd_pktlog_filter_enable(dhd_pktlog_filter_t *filter, uint32 pktmon_case, uint32 enable)
+{
+ if (!filter) {
+ DHD_ERROR(("%s(): filter is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_PKT_LOG(("%s(): pktlog_case %d enable %d\n", __FUNCTION__, pktmon_case, enable));
+
+ if (enable) {
+ filter->enable |= pktmon_case;
+ } else {
+ filter->enable &= ~pktmon_case;
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_pktlog_filter_pattern_enable(dhd_pktlog_filter_t *filter, char *arg, uint32 enable)
+{
+ uint32 id = 0;
+
+ if (!filter || !arg) {
+ DHD_ERROR(("%s(): pktlog_filter =%p arg =%p\n", __FUNCTION__, filter, arg));
+ return BCME_ERROR;
+ }
+
+ if (dhd_pktlog_filter_existed(filter, arg, &id) == TRUE) {
+ if (id > 0) {
+ filter->info[id-1].enable = enable;
+ DHD_ERROR(("%s(): This pattern id %d is %s\n",
+ __FUNCTION__, id, (enable ? "enabled" : "disabled")));
+ }
+ } else {
+ DHD_ERROR(("%s(): This pattern is not existed\n", __FUNCTION__));
+ DHD_ERROR(("%s(): arg %s\n", __FUNCTION__, arg));
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_pktlog_filter_info(dhd_pktlog_filter_t *filter)
+{
+ char filter_pattern[MAX_FILTER_PATTERN_LEN];
+ char *p;
+ int i, j;
+ int nchar;
+ int len;
+
+ if (!filter) {
+ DHD_ERROR(("%s(): pktlog_filter is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_ERROR(("---- PKTLOG FILTER INFO ----\n\n"));
+
+ DHD_ERROR(("Filter list cnt %d Filter is %s\n",
+ filter->list_cnt, (filter->enable ? "enabled" : "disabled")));
+
+ for (i = 0; i < filter->list_cnt; i++) {
+ p = filter_pattern;
+ len = sizeof(filter_pattern);
+
+ nchar = snprintf(p, len, "%d ", filter->info[i].offset);
+ p += nchar;
+ len -= nchar;
+
+ nchar = snprintf(p, len, "0x");
+ p += nchar;
+ len -= nchar;
+
+ for (j = 0; j < filter->info[i].size_bytes; j++) {
+ nchar = snprintf(p, len, "%02x", filter->info[i].mask[j]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ nchar = snprintf(p, len, " 0x");
+ p += nchar;
+ len -= nchar;
+
+ for (j = 0; j < filter->info[i].size_bytes; j++) {
+ nchar = snprintf(p, len, "%02x", filter->info[i].pattern[j]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ DHD_ERROR(("ID:%d is %s\n",
+ filter->info[i].id, (filter->info[i].enable ? "enabled" : "disabled")));
+ DHD_ERROR(("Pattern %s\n", filter_pattern));
+ }
+
+ DHD_ERROR(("---- PKTLOG FILTER END ----\n"));
+
+ return BCME_OK;
+}
+bool
+dhd_pktlog_filter_matched(dhd_pktlog_filter_t *filter, char *data, uint32 pktlog_case)
+{
+ uint16 szbts; /* pattern size */
+ uint16 offset; /* pattern offset */
+ int i, j;
+ uint8 *mask = NULL; /* bitmask */
+ uint8 *pattern = NULL;
+ uint8 *pkt_offset = NULL; /* packet offset */
+ bool matched;
+
+ if (!filter || !data) {
+ DHD_PKT_LOG(("%s(): filter=%p data=%p\n",
+ __FUNCTION__, filter, data));
+ return TRUE;
+ }
+
+ if (!(pktlog_case & filter->enable)) {
+ DHD_PKT_LOG(("%s(): pktlog_case %d return TRUE filter is disabled\n",
+ __FUNCTION__, pktlog_case));
+ return TRUE;
+ }
+
+ for (i = 0; i < filter->list_cnt; i++) {
+ if (&filter->info[i] && filter->info[i].id && filter->info[i].enable) {
+ szbts = filter->info[i].size_bytes;
+ offset = filter->info[i].offset;
+ mask = &filter->info[i].mask[0];
+ pkt_offset = &data[offset];
+ pattern = &filter->info[i].pattern[0];
+
+ matched = TRUE;
+ for (j = 0; j < szbts; j++) {
+ if ((mask[j] & pkt_offset[j]) != pattern[j]) {
+ matched = FALSE;
+ break;
+ }
+ }
+
+ if (matched) {
+ DHD_PKT_LOG(("%s(): pktlog_filter return TRUE id %d\n",
+ __FUNCTION__, filter->info[i].id));
+ return TRUE;
+ }
+ } else {
+ DHD_PKT_LOG(("%s(): filter ino is null %p\n",
+ __FUNCTION__, &filter->info[i]));
+ }
+ }
+
+ return FALSE;
+}
+
+/* Ethernet Type MAC Header 12 bytes + Frame payload 10 bytes */
+#define PKTLOG_MINIMIZE_REPORT_LEN 22
+
+static char pktlog_minmize_mask_table[] = {
+ 0xff, 0x00, 0x00, 0x00, 0xff, 0x0f, /* Ethernet Type MAC Header - Destination MAC Address */
+ 0xff, 0x00, 0x00, 0x00, 0xff, 0x0f, /* Ethernet Type MAC Header - Source MAC Address */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Ethernet Type MAC Header - Ether Type - 2 bytes */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Frame payload */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* UDP port number offset - bytes as 0xff */
+ 0xff, 0xff,
+};
+
+static inline void
+dhd_pktlog_minimize_report(char *pkt, uint32 frame_len,
+ void *file, const void *user_buf, void *pos)
+{
+ int i;
+ int ret = 0;
+ int table_len;
+ int report_len;
+ char *p_table;
+ char *mem_buf = NULL;
+
+ table_len = sizeof(pktlog_minmize_mask_table);
+ report_len = table_len;
+ p_table = &pktlog_minmize_mask_table[0];
+
+ if (frame_len < PKTLOG_MINIMIZE_REPORT_LEN) {
+ DHD_ERROR(("%s : frame_len is samller than min\n", __FUNCTION__));
+ return;
+ }
+
+ mem_buf = vmalloc(frame_len);
+ if (!mem_buf) {
+ DHD_ERROR(("%s : failed to alloc membuf\n", __FUNCTION__));
+ return;
+ }
+
+ bzero(mem_buf, frame_len);
+
+ if (frame_len < table_len) {
+ report_len = PKTLOG_MINIMIZE_REPORT_LEN;
+ }
+
+ for (i = 0; i < report_len; i++) {
+ mem_buf[i] = pkt[i] & p_table[i];
+ }
+
+ ret = dhd_export_debug_data(mem_buf,
+ file, user_buf, frame_len, pos);
+ if (ret < 0) {
+ DHD_ERROR(("%s : Write minimize report\n", __FUNCTION__));
+ }
+ vfree(mem_buf);
+}
+
+dhd_pktlog_ring_t*
+dhd_pktlog_ring_change_size(dhd_pktlog_ring_t *ringbuf, int size)
+{
+ uint32 alloc_len;
+ uint32 pktlog_minmize;
+ dhd_pktlog_ring_t *pktlog_ring = NULL;
+ dhd_pub_t *dhdp;
+
+ if (!ringbuf) {
+ DHD_ERROR(("%s(): ringbuf is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ alloc_len = size;
+ if (alloc_len < MIN_PKTLOG_LEN) {
+ alloc_len = MIN_PKTLOG_LEN;
+ }
+ if (alloc_len > MAX_PKTLOG_LEN) {
+ alloc_len = MAX_PKTLOG_LEN;
+ }
+ DHD_ERROR(("ring size requested: %d alloc: %d\n", size, alloc_len));
+
+ /* backup variable */
+ pktlog_minmize = ringbuf->pktlog_minmize;
+ dhdp = ringbuf->dhdp;
+
+ /* free ring_info */
+ dhd_pktlog_ring_deinit(dhdp, ringbuf);
+
+ /* alloc ring_info */
+ pktlog_ring = dhd_pktlog_ring_init(dhdp, alloc_len);
+
+ /* restore variable */
+ if (pktlog_ring) {
+ OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, TRUE);
+ pktlog_ring->pktlog_minmize = pktlog_minmize;
+ }
+
+ return pktlog_ring;
+}
+
+void
+dhd_pktlog_filter_pull_forward(dhd_pktlog_filter_t *filter, uint32 del_filter_id, uint32 list_cnt)
+{
+ int ret = 0;
+ int pos = 0;
+ int move_list_cnt = 0;
+ int move_bytes = 0;
+
+ if ((del_filter_id > list_cnt) ||
+ (list_cnt > MAX_DHD_PKTLOG_FILTER_LEN)) {
+ DHD_ERROR(("Wrong id %d cnt %d tried to remove\n", del_filter_id, list_cnt));
+ return;
+ }
+
+ move_list_cnt = list_cnt - del_filter_id;
+
+ pos = del_filter_id -1;
+ move_bytes = sizeof(dhd_pktlog_filter_info_t) * move_list_cnt;
+ if (move_list_cnt) {
+ ret = memmove_s(&filter->info[pos], move_bytes + sizeof(dhd_pktlog_filter_info_t),
+ &filter->info[pos+1], move_bytes);
+ if (ret) {
+ DHD_ERROR(("filter moving failed\n"));
+ return;
+ }
+ for (; pos < list_cnt -1; pos++) {
+ filter->info[pos].id -= 1;
+ }
+ }
+ bzero(&filter->info[list_cnt-1], sizeof(dhd_pktlog_filter_info_t));
+}
+
+void dhd_pktlog_get_filename(dhd_pub_t *dhdp, char *dump_path, int len)
+{
+ /* Init file name */
+ bzero(dump_path, len);
+ clear_debug_dump_time(dhdp->debug_dump_time_pktlog_str);
+ get_debug_dump_time(dhdp->debug_dump_time_pktlog_str);
+
+ if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) {
+ if (dhdp->debug_dump_subcmd == CMD_UNWANTED) {
+ snprintf(dump_path, len, "%s",
+ DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE
+ DHD_DUMP_SUBSTR_UNWANTED);
+ } else if (dhdp->debug_dump_subcmd == CMD_DISCONNECTED) {
+ snprintf(dump_path, len, "%s",
+ DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE
+ DHD_DUMP_SUBSTR_DISCONNECTED);
+ } else {
+ snprintf(dump_path, len, "%s",
+ DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE);
+ }
+ } else {
+ if (dhdp->pktlog_debug) {
+ snprintf(dump_path, len, "%s",
+ DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DEBUG_DUMP_TYPE);
+ } else {
+ snprintf(dump_path, len, "%s",
+ DHD_PKTLOG_DUMP_PATH DHD_PKTLOG_DUMP_TYPE);
+ }
+
+ }
+
+ snprintf(dump_path, len, "%s_%s.pcap", dump_path,
+ dhdp->debug_dump_time_pktlog_str);
+ DHD_ERROR(("%s: pktlog path = %s%s\n", __FUNCTION__, dump_path, FILE_NAME_HAL_TAG));
+ clear_debug_dump_time(dhdp->debug_dump_time_pktlog_str);
+}
+
+uint32
+dhd_pktlog_get_item_length(dhd_pktlog_ring_info_t *report_ptr)
+{
+ uint32 len = 0;
+ char buf[DHD_PKTLOG_FATE_INFO_STR_LEN];
+ int bytes_user_data = 0;
+ uint32 write_frame_len;
+ uint32 frame_len;
+
+ len += (uint32)sizeof(report_ptr->info.driver_ts_sec);
+ len += (uint32)sizeof(report_ptr->info.driver_ts_usec);
+
+ if (report_ptr->info.payload_type == FRAME_TYPE_ETHERNET_II) {
+ frame_len = (uint32)min(report_ptr->info.pkt_len, (size_t)MAX_FRAME_LEN_ETHERNET);
+ } else {
+ frame_len = (uint32)min(report_ptr->info.pkt_len, (size_t)MAX_FRAME_LEN_80211_MGMT);
+ }
+
+ bytes_user_data = sprintf(buf, "%s:%s:%02d\n", DHD_PKTLOG_FATE_INFO_FORMAT,
+ (report_ptr->tx_fate ? "Failure" : "Succeed"), report_ptr->tx_fate);
+ write_frame_len = frame_len + bytes_user_data;
+
+ /* pcap pkt head has incl_len and orig_len */
+ len += (uint32)sizeof(write_frame_len);
+ len += (uint32)sizeof(write_frame_len);
+ len += frame_len;
+ len += bytes_user_data;
+
+ return len;
+}
+
+uint32
+dhd_pktlog_get_dump_length(dhd_pub_t *dhdp)
+{
+ dhd_pktlog_ring_info_t *report_ptr;
+ dhd_pktlog_ring_t *pktlog_ring;
+ uint32 len;
+ dll_t *item_p, *next_p;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring =%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
+ return -EINVAL;
+ }
+
+ pktlog_ring = dhdp->pktlog->pktlog_ring;
+ OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, FALSE);
+
+ len = sizeof(dhd_pktlog_pcap_hdr_t);
+
+ for (item_p = dll_head_p(&pktlog_ring->ring_info_head);
+ !dll_end(&pktlog_ring->ring_info_head, item_p);
+ item_p = next_p) {
+ next_p = dll_next_p(item_p);
+ report_ptr = (dhd_pktlog_ring_info_t *)item_p;
+ len += dhd_pktlog_get_item_length(report_ptr);
+ }
+ OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, TRUE);
+ DHD_PKT_LOG(("calcuated pkt log dump len:%d\n", len));
+
+ return len;
+}
+
+int
+dhd_pktlog_dump_write(dhd_pub_t *dhdp, void *file, const void *user_buf, uint32 size)
+{
+ dhd_pktlog_ring_info_t *report_ptr;
+ dhd_pktlog_ring_t *pktlog_ring;
+ char buf[DHD_PKTLOG_FATE_INFO_STR_LEN];
+ dhd_pktlog_pcap_hdr_t pcap_h;
+ uint32 write_frame_len;
+ uint32 frame_len;
+ ulong len;
+ int bytes_user_data = 0;
+ loff_t pos = 0;
+ int ret = BCME_OK;
+ dll_t *item_p, *next_p;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring =%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
+ return -EINVAL;
+ }
+
+ if (file && !user_buf && (size == 0)) {
+ DHD_ERROR(("Local file pktlog dump requested\n"));
+ } else if (!file && user_buf && (size > 0)) {
+ DHD_ERROR(("HAL file pktlog dump %d bytes requested\n", size));
+ } else {
+ DHD_ERROR(("Wrong type pktlog dump requested\n"));
+ return -EINVAL;
+ }
+
+ pktlog_ring = dhdp->pktlog->pktlog_ring;
+ OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, FALSE);
+
+ pcap_h.magic_number = PKTLOG_PCAP_MAGIC_NUM;
+ pcap_h.version_major = PKTLOG_PCAP_MAJOR_VER;
+ pcap_h.version_minor = PKTLOG_PCAP_MINOR_VER;
+ pcap_h.thiszone = 0x0;
+ pcap_h.sigfigs = 0x0;
+ pcap_h.snaplen = PKTLOG_PCAP_SNAP_LEN;
+ pcap_h.network = PKTLOG_PCAP_NETWORK_TYPE;
+
+ ret = dhd_export_debug_data((char *)&pcap_h, file, user_buf, sizeof(pcap_h), &pos);
+ len = sizeof(pcap_h);
+
+ for (item_p = dll_head_p(&pktlog_ring->ring_info_head);
+ !dll_end(&pktlog_ring->ring_info_head, item_p);
+ item_p = next_p) {
+
+ next_p = dll_next_p(item_p);
+ report_ptr = (dhd_pktlog_ring_info_t *)item_p;
+
+ if ((file == NULL) &&
+ (len + dhd_pktlog_get_item_length(report_ptr) > size)) {
+ DHD_ERROR(("overflowed pkt logs are dropped\n"));
+ break;
+ }
+
+ ret = dhd_export_debug_data((char*)&report_ptr->info.driver_ts_sec, file,
+ user_buf, sizeof(report_ptr->info.driver_ts_sec), &pos);
+ len += sizeof(report_ptr->info.driver_ts_sec);
+
+ ret = dhd_export_debug_data((char*)&report_ptr->info.driver_ts_usec, file,
+ user_buf, sizeof(report_ptr->info.driver_ts_usec), &pos);
+ len += sizeof(report_ptr->info.driver_ts_usec);
+
+ if (report_ptr->info.payload_type == FRAME_TYPE_ETHERNET_II) {
+ frame_len = (uint32)min(report_ptr->info.pkt_len,
+ (size_t)MAX_FRAME_LEN_ETHERNET);
+
+ } else {
+ frame_len = (uint32)min(report_ptr->info.pkt_len,
+ (size_t)MAX_FRAME_LEN_80211_MGMT);
+ }
+
+ bytes_user_data = sprintf(buf, "%s:%s:%02d\n", DHD_PKTLOG_FATE_INFO_FORMAT,
+ (report_ptr->tx_fate ? "Failure" : "Succeed"), report_ptr->tx_fate);
+ write_frame_len = frame_len + bytes_user_data;
+
+ /* pcap pkt head has incl_len and orig_len */
+ ret = dhd_export_debug_data((char*)&write_frame_len, file, user_buf,
+ sizeof(write_frame_len), &pos);
+ len += sizeof(write_frame_len);
+
+ ret = dhd_export_debug_data((char*)&write_frame_len, file, user_buf,
+ sizeof(write_frame_len), &pos);
+ len += sizeof(write_frame_len);
+
+ if (pktlog_ring->pktlog_minmize) {
+ dhd_pktlog_minimize_report(PKTDATA(pktlog_ring->dhdp->osh,
+ report_ptr->info.pkt), frame_len, file, user_buf, &pos);
+ } else {
+ ret = dhd_export_debug_data(PKTDATA(pktlog_ring->dhdp->osh,
+ report_ptr->info.pkt), file, user_buf, frame_len, &pos);
+ }
+ len += frame_len;
+
+ ret = dhd_export_debug_data(buf, file, user_buf, bytes_user_data, &pos);
+ len += bytes_user_data;
+ }
+ OSL_ATOMIC_SET(dhdp->osh, &pktlog_ring->start, TRUE);
+
+ return ret;
+}
+
+int
+dhd_pktlog_dump_write_memory(dhd_pub_t *dhdp, const void *user_buf, uint32 size)
+{
+ int ret = dhd_pktlog_dump_write(dhdp, NULL, user_buf, size);
+ if (ret < 0) {
+ DHD_ERROR(("dhd_pktlog_dump_write_memory error\n"));
+ }
+ return ret;
+}
+
+int
+dhd_pktlog_dump_write_file(dhd_pub_t *dhdp)
+{
+ struct file *w_pcap_fp = NULL;
+ uint32 file_mode;
+ mm_segment_t old_fs;
+ char pktlogdump_path[128];
+ int ret = BCME_OK;
+
+ dhd_pktlog_get_filename(dhdp, pktlogdump_path, 128);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ file_mode = O_CREAT | O_WRONLY;
+
+ w_pcap_fp = filp_open(pktlogdump_path, file_mode, 0664);
+ if (IS_ERR(w_pcap_fp)) {
+ DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
+ __FUNCTION__, pktlogdump_path, PTR_ERR(w_pcap_fp)));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+ dhd_pktlog_dump_write(dhdp, w_pcap_fp, NULL, 0);
+ if (ret < 0) {
+ DHD_ERROR(("dhd_pktlog_dump_write error\n"));
+ goto fail;
+ }
+
+ /* Sync file from filesystem to physical media */
+ ret = vfs_fsync(w_pcap_fp, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s(): sync pcap file error, err = %d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+fail:
+ if (!IS_ERR(w_pcap_fp)) {
+ filp_close(w_pcap_fp, NULL);
+ }
+
+ set_fs(old_fs);
+
+#ifdef DHD_DUMP_MNGR
+ if (ret >= 0) {
+ dhd_dump_file_manage_enqueue(dhdp, pktlogdump_path, DHD_PKTLOG_DUMP_TYPE);
+ }
+#endif /* DHD_DUMP_MNGR */
+ return ret;
+}
+
+#ifdef DHD_COMPACT_PKT_LOG
+static uint64
+dhd_cpkt_log_calc_time_diff(dhd_pktlog_ring_info_t *pkt_info, uint64 curr_ts_nsec)
+{
+ uint64 pkt_ts_nsec = pkt_info->info.driver_ts_sec * NSEC_PER_SEC +
+ pkt_info->info.driver_ts_usec * NSEC_PER_USEC;
+
+ return (curr_ts_nsec - pkt_ts_nsec) / NSEC_PER_USEC;
+}
+
+static int
+dhd_cpkt_log_get_ts_idx(dhd_pktlog_t *pktlog, dhd_pktlog_ring_info_t *pkt_info, u64 curr_ts_nsec)
+{
+ struct rb_node *n = pktlog->cpkt_log_tt_rbt.rb_node;
+ dhd_cpkt_log_ts_node_t *node = NULL;
+
+ uint64 ts_diff = dhd_cpkt_log_calc_time_diff(pkt_info, curr_ts_nsec);
+
+ if (ts_diff > dhd_cpkt_log_tt_idx[CPKT_LOG_TT_IDX_ARR_SZ - 1])
+ return CPKT_LOG_TT_IDX_ARR_SZ;
+
+ while (n) {
+ node = rb_entry(n, dhd_cpkt_log_ts_node_t, rb);
+
+ if (ts_diff < node->ts_diff)
+ n = n->rb_left;
+ else if (ts_diff > node->ts_diff)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ if (node != NULL) {
+ if (node->idx && ts_diff < node->ts_diff)
+ return node->idx - 1;
+ return node->idx;
+ }
+
+ return BCME_NOTFOUND;
+}
+
+static int
+dhd_cpkt_log_get_direction(dhd_pktlog_ring_info_t *pkt_info)
+{
+ return pkt_info->info.direction == PKTLOG_TXPKT_CASE ? PKT_TX : PKT_RX;
+}
+
+static int
+dhd_cpkt_log_get_802_1x_subtype(eapol_header_t *eapol)
+{
+ int subtype;
+ eap_header_t *eap;
+ eapol_wpa_key_header_t *ek;
+
+ uint16 key_info;
+ int pair, ack, mic, kerr, req, sec, install;
+
+ subtype = CPKT_LOG_802_1X_SUBTYPE_OTHERS;
+ if (eapol->type != EAPOL_KEY) {
+ eap = (eap_header_t *)eapol->body;
+
+ switch (eap->type) {
+ case EAP_IDENTITY:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_IDENTITY;
+ break;
+ case REALM_EAP_TLS:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_TLS;
+ break;
+ case REALM_EAP_TTLS:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_TTLS;
+ break;
+ case REALM_EAP_FAST:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_FAST;
+ break;
+ case REALM_EAP_LEAP:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_LEAP;
+ break;
+ case REALM_EAP_PSK:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_PWD;
+ break;
+ case REALM_EAP_SIM:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_SIM;
+ break;
+ case REALM_EAP_AKA:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_AKA;
+ break;
+ case REALM_EAP_AKAP:
+ subtype = CPKT_LOG_802_1X_SUBTYPE_AKAP;
+ break;
+ default:
+ break;
+ }
+ if (eap->code == EAP_SUCCESS)
+ subtype = CPKT_LOG_802_1X_SUBTYPE_SUCCESS;
+ } else {
+ /* in case of 4 way handshake */
+ ek = (eapol_wpa_key_header_t *)(eapol->body);
+
+ if (ek->type == EAPOL_WPA2_KEY || ek->type == EAPOL_WPA_KEY) {
+ key_info = ntoh16_ua(&ek->key_info);
+
+ pair = 0 != (key_info & WPA_KEY_PAIRWISE);
+ ack = 0 != (key_info & WPA_KEY_ACK);
+ mic = 0 != (key_info & WPA_KEY_MIC);
+ kerr = 0 != (key_info & WPA_KEY_ERROR);
+ req = 0 != (key_info & WPA_KEY_REQ);
+ sec = 0 != (key_info & WPA_KEY_SECURE);
+ install = 0 != (key_info & WPA_KEY_INSTALL);
+
+ if (!sec && !mic && ack && !install && pair && !kerr && !req)
+ subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M1;
+ else if (pair && !install && !ack && mic && !sec && !kerr && !req)
+ subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M2;
+ else if (pair && ack && mic && sec && !kerr && !req)
+ subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M3;
+ else if (pair && !install && !ack && mic && sec && !req && !kerr)
+ subtype = CPKT_LOG_802_1X_SUBTYPE_4WAY_M4;
+ }
+ }
+
+ return subtype;
+}
+
+static int
+dhd_cpkt_log_get_pkt_info(dhd_pktlog_t *pktlog, dhd_pktlog_ring_info_t *pkt_info)
+{
+ int type;
+ int subtype = 0;
+
+ uint8 prot;
+ uint16 src_port, dst_port;
+ int len, offset;
+
+ uint8 *pdata;
+ uint8 *pkt_data;
+
+ uint16 eth_type;
+ struct bcmarp *arp;
+ struct bcmicmp_hdr *icmp;
+ struct ipv4_hdr *ipv4;
+ struct ether_header *eth_hdr;
+ bcm_tlv_t *dhcp_opt;
+
+ struct ipv6_hdr *ipv6;
+ struct icmp6_hdr *icmpv6_hdr;
+
+ pkt_data = (uint8 *)PKTDATA(pktlog->dhdp->osh, pkt_info->info.pkt);
+
+ eth_hdr = (struct ether_header *)pkt_data;
+ eth_type = ntoh16(eth_hdr->ether_type);
+
+ type = CPKT_LOG_TYPE_OTHERS;
+ switch (eth_type) {
+ case ETHER_TYPE_IP:
+ if (get_pkt_ip_type(pktlog->dhdp->osh, pkt_info->info.pkt,
+ &pdata, &len, &prot) != 0) {
+ DHD_PKT_LOG(("%s: fail to get pkt ip type\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (prot == IP_PROT_ICMP) {
+ icmp = (struct bcmicmp_hdr *)(pdata);
+ if (!(icmp->type == ICMP_TYPE_ECHO_REQUEST ||
+ icmp->type == ICMP_TYPE_ECHO_REPLY ||
+ icmp->type == CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE)) {
+ return BCME_ERROR;
+ }
+
+ if (icmp->type == ICMP_TYPE_ECHO_REQUEST) {
+ type = CPKT_LOG_TYPE_ICMP_REQ;
+ /* Subtype = Last 8 bits of identifier */
+ subtype = ntoh16_ua(pdata + sizeof(*icmp)) & 0xFF;
+ } else if (icmp->type == ICMP_TYPE_ECHO_REPLY) {
+ type = CPKT_LOG_TYPE_ICMP_RES;
+ /* Subtype = Last 8 bits of identifier */
+ subtype = ntoh16_ua(pdata + sizeof(*icmp)) & 0xFF;
+ } else if (icmp->type == CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE) {
+ type = CPKT_LOG_TYPE_ICMP_UNREACHABLE;
+ /* Subtype = Last 8 bits of identifier */
+ ipv4 = (struct ipv4_hdr *)(pdata + sizeof(*icmp) +
+ CPKT_LOG_ICMP_TYPE_DEST_UNREACHABLE_IPV4_OFFSET);
+ subtype = ipv4->id & 0xFF;
+ }
+
+ DHD_PKT_LOG(("%s: type = ICMP(%d), subtype = %x \n",
+ __FUNCTION__, type, subtype));
+ } else if (prot == IP_PROT_UDP) {
+ if (len < UDP_HDR_LEN)
+ return BCME_ERROR;
+
+ src_port = ntoh16_ua(pdata);
+ dst_port = ntoh16_ua(pdata + UDP_DEST_PORT_OFFSET);
+
+ if (src_port == DHCP_PORT_SERVER || src_port == DHCP_PORT_CLIENT) {
+ type = CPKT_LOG_TYPE_DHCP;
+ /* Subtype = DHCP message type */
+ offset = DHCP_OPT_OFFSET + CPKT_LOG_DHCP_MAGIC_COOKIE_LEN;
+ if ((UDP_HDR_LEN + offset) >= len)
+ return BCME_ERROR;
+ len -= (UDP_HDR_LEN - offset);
+
+ dhcp_opt = bcm_parse_tlvs(pdata + UDP_HDR_LEN + offset,
+ len, DHCP_OPT_MSGTYPE);
+ if (dhcp_opt == NULL)
+ return BCME_NOTFOUND;
+ subtype = dhcp_opt->data[0];
+
+ DHD_PKT_LOG(("%s: type = DHCP(%d), subtype = %x \n",
+ __FUNCTION__, type, subtype));
+ } else if (src_port == CPKT_LOG_DNS_PORT_CLIENT ||
+ dst_port == CPKT_LOG_DNS_PORT_CLIENT ||
+ dst_port == CPKT_LOG_MDNS_PORT_CLIENT) {
+ type = CPKT_LOG_TYPE_DNS;
+ /* Subtype = Last 8 bits of DNS Transaction ID */
+ subtype = ntoh16_ua(pdata + UDP_HDR_LEN) & 0xFF;
+
+ DHD_PKT_LOG(("%s: type = DNS(%d), subtype = %x \n",
+ __FUNCTION__, type, subtype));
+ } else {
+ DHD_PKT_LOG(("%s: unsupported ports num (src:%d, dst:%d)\n",
+ __FUNCTION__, src_port, dst_port));
+ }
+ } else {
+ DHD_PKT_LOG(("%s: prot = %x\n", __FUNCTION__, prot));
+ }
+
+ break;
+ case ETHER_TYPE_ARP:
+ type = CPKT_LOG_TYPE_ARP;
+ /* Subtype = Last 8 bits of target IP address */
+ arp = (struct bcmarp *)(pkt_data + ETHER_HDR_LEN);
+ subtype = arp->dst_ip[IPV4_ADDR_LEN - 1];
+
+ DHD_PKT_LOG(("%s: type = ARP(%d), subtype = %x\n",
+ __FUNCTION__, type, subtype));
+
+ break;
+ case ETHER_TYPE_802_1X:
+ type = CPKT_LOG_TYPE_802_1X;
+ /* EAPOL for 802.3/Ethernet */
+ subtype = dhd_cpkt_log_get_802_1x_subtype((eapol_header_t *)pkt_data);
+
+ DHD_PKT_LOG(("%s: type = 802.1x(%d), subtype = %x\n",
+ __FUNCTION__, type, subtype));
+
+ break;
+ case ETHER_TYPE_IPV6:
+ ipv6 = (struct ipv6_hdr *)(pkt_data + ETHER_HDR_LEN);
+ if (ipv6->nexthdr == ICMPV6_HEADER_TYPE) {
+ type = CPKT_LOG_TYPE_ICMPv6;
+ icmpv6_hdr =
+ (struct icmp6_hdr *)(pkt_data + ETHER_HDR_LEN + sizeof(*ipv6));
+ subtype = icmpv6_hdr->icmp6_type;
+
+ DHD_PKT_LOG(("%s: type = ICMPv6(%x), subtype = %x\n",
+ __FUNCTION__, type, subtype));
+ } else {
+ DHD_ERROR(("%s: unsupported ipv6 next header\n", __FUNCTION__));
+ }
+
+ break;
+ default:
+ DHD_ERROR(("%s: Invalid eth type (%x)\n", __FUNCTION__, eth_hdr->ether_type));
+ break;
+ }
+
+ return (subtype << CPKT_LOG_BIT_LEN_TYPE) | type;
+}
+
+static int
+dhd_cpkt_log_get_pkt_fate(dhd_pktlog_ring_info_t *pktlog_info)
+{
+ return pktlog_info->fate;
+}
+
+/*
+ * dhd_cpkt_log_build: prepare 22 bits of data as compact packet log format to report to big data
+ *
+ * pkt_info: one packet data from packet log
+ * curr_ts_nsec: current time (nano seconds)
+ * cpkt: pointer for output(22 bits compact packet log)
+ *
+ */
+static int
+dhd_cpkt_log_build(dhd_pktlog_t *pktlog, dhd_pktlog_ring_info_t *pkt_info,
+ u64 curr_ts_nsec, int *cpkt)
+{
+ int ret;
+ int mask;
+ int temp = 0;
+
+ /* Timestamp index */
+ ret = dhd_cpkt_log_get_ts_idx(pktlog, pkt_info, curr_ts_nsec);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Invalid cpktlog ts, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ mask = CPKT_LOG_BIT_MASK_TS;
+ temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_TS);
+
+ /* Direction: Tx/Rx */
+ ret = dhd_cpkt_log_get_direction(pkt_info);
+ mask = CPKT_LOG_BIT_MASK_DIR;
+ temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_DIR);
+
+ /* Info = Packet Type & Packet Subtype */
+ ret = dhd_cpkt_log_get_pkt_info(pktlog, pkt_info);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Invalid cpktlog info, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ mask = CPKT_LOG_BIT_MASK_SUBTYPE << CPKT_LOG_BIT_LEN_TYPE | CPKT_LOG_BIT_MASK_TYPE;
+ temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_TYPE);
+
+ /* Packet Fate */
+ ret = dhd_cpkt_log_get_pkt_fate(pkt_info);
+ mask = CPKT_LOG_BIT_MASK_PKT_FATE;
+ temp |= ((ret & mask) << CPKT_LOG_BIT_OFFSET_PKT_FATE);
+
+ *cpkt = temp;
+
+ return BCME_OK;
+}
+
+int
+dhd_cpkt_log_proc(dhd_pub_t *dhdp, char *buf, int buf_len, int bit_offset, int req_pkt_num)
+{
+ int ret;
+ int cpkt;
+ int offset = bit_offset;
+ dll_t *item_p, *prev_p;
+
+ uint8 pkt_cnt;
+ u64 curr_ts_nsec;
+
+ dhd_pktlog_t *pktlog;
+ dhd_pktlog_ring_t *pktlog_rbuf;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_ERROR(("%s: dhdp or pktlog is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_ERROR(("%s: pktlog_ring is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_PKT_LOG(("%s: start cpkt log\n", __FUNCTION__));
+
+ pktlog = dhdp->pktlog;
+ pktlog_rbuf = pktlog->pktlog_ring;
+
+ req_pkt_num = req_pkt_num > CPKT_LOG_MAX_NUM ?
+ CPKT_LOG_MAX_NUM : req_pkt_num;
+
+ pkt_cnt = 0;
+ curr_ts_nsec = local_clock();
+ for (item_p = dll_tail_p(&pktlog_rbuf->ring_info_head);
+ !dll_end(&pktlog_rbuf->ring_info_head, item_p);
+ item_p = prev_p) {
+ prev_p = dll_prev_p(item_p);
+ if (prev_p == NULL)
+ break;
+
+ ret = dhd_cpkt_log_build(pktlog, (dhd_pktlog_ring_info_t *)item_p,
+ curr_ts_nsec, &cpkt);
+ if (ret < 0)
+ continue;
+
+ offset = dhd_bit_pack(buf, buf_len, offset, cpkt, CPKT_LOG_BIT_SIZE);
+
+ pkt_cnt++;
+ if (pkt_cnt >= req_pkt_num)
+ break;
+ }
+
+ return offset;
+}
+
+static void
+dhd_cpkt_log_insert_ts(dhd_cpkt_log_ts_node_t *node, struct rb_root *root)
+{
+ struct rb_node **new = &root->rb_node, *parent = NULL;
+ u64 ts_diff = node->ts_diff;
+
+ while (*new) {
+ parent = *new;
+ if (ts_diff < rb_entry(parent, dhd_cpkt_log_ts_node_t, rb)->ts_diff)
+ new = &parent->rb_left;
+ else
+ new = &parent->rb_right;
+ }
+
+ rb_link_node(&node->rb, parent, new);
+ rb_insert_color(&node->rb, root);
+}
+
+static void
+dhd_cpkt_log_deinit_tt(dhd_pub_t *dhdp)
+{
+ struct rb_node *n;
+ dhd_pktlog_t *pktlog = dhdp->pktlog;
+
+ dhd_cpkt_log_ts_node_t *node;
+
+ while ((n = rb_first(&pktlog->cpkt_log_tt_rbt))) {
+ node = rb_entry(n, dhd_cpkt_log_ts_node_t, rb);
+ rb_erase(&node->rb, &pktlog->cpkt_log_tt_rbt);
+ MFREE(dhdp->osh, node, sizeof(*node));
+ }
+}
+
+static int
+dhd_cpkt_log_init_tt(dhd_pub_t *dhdp)
+{
+ int i;
+ int ret = BCME_OK;
+
+ dhd_pktlog_t *pktlog = dhdp->pktlog;
+
+ dhd_cpkt_log_ts_node_t *node;
+
+ for (i = 0; i < ARRAYSIZE(dhd_cpkt_log_tt_idx); i++) {
+ node = (dhd_cpkt_log_ts_node_t *)MALLOCZ(dhdp->osh, sizeof(*node));
+ if (!node) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ node->ts_diff = dhd_cpkt_log_tt_idx[i];
+ node->idx = i;
+
+ dhd_cpkt_log_insert_ts(node, &pktlog->cpkt_log_tt_rbt);
+ }
+
+ return BCME_OK;
+exit:
+ dhd_cpkt_log_deinit_tt(dhdp);
+
+ return ret;
+}
+#endif /* DHD_COMPACT_PKT_LOG */
+#endif /* DHD_PKT_LOGGING */
diff --git a/bcmdhd.101.10.361.x/dhd_pktlog.h b/bcmdhd.101.10.361.x/dhd_pktlog.h
new file mode 100755
index 0000000..bfa38f5
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pktlog.h
@@ -0,0 +1,311 @@
+/*
+ * DHD debugability packet logging header file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_PKTLOG_H_
+#define __DHD_PKTLOG_H_
+
+#include <dhd_debug.h>
+#include <dhd.h>
+#include <asm/atomic.h>
+#ifdef DHD_COMPACT_PKT_LOG
+#include <linux/rbtree.h>
+#endif /* DHD_COMPACT_PKT_LOG */
+
+#ifdef DHD_PKT_LOGGING
+#define DHD_PKT_LOG(args) DHD_INFO(args)
+#define DEFAULT_MULTIPLE_PKTLOG_BUF 1
+#ifndef CUSTOM_MULTIPLE_PKTLOG_BUF
+#define CUSTOM_MULTIPLE_PKTLOG_BUF DEFAULT_MULTIPLE_PKTLOG_BUF
+#endif /* CUSTOM_MULTIPLE_PKTLOG_BUF */
+#define MIN_PKTLOG_LEN (32 * 10 * 2 * CUSTOM_MULTIPLE_PKTLOG_BUF)
+#define MAX_PKTLOG_LEN (32 * 10 * 2 * 10)
+#define MAX_DHD_PKTLOG_FILTER_LEN 14
+#define MAX_MASK_PATTERN_FILTER_LEN 64
+#define PKTLOG_TXPKT_CASE 0x0001
+#define PKTLOG_TXSTATUS_CASE 0x0002
+#define PKTLOG_RXPKT_CASE 0x0004
+/* MAX_FILTER_PATTERN_LEN is buf len to print bitmask/pattern with string */
+#define MAX_FILTER_PATTERN_LEN \
+ ((MAX_MASK_PATTERN_FILTER_LEN * HD_BYTE_SIZE) + HD_PREFIX_SIZE + 1) * 2
+#define PKTLOG_DUMP_BUF_SIZE (64 * 1024)
+
+typedef struct dhd_dbg_pktlog_info {
+ frame_type payload_type;
+ size_t pkt_len;
+ uint32 driver_ts_sec;
+ uint32 driver_ts_usec;
+ uint32 firmware_ts;
+ uint32 pkt_hash;
+ bool direction;
+ void *pkt;
+} dhd_dbg_pktlog_info_t;
+
+typedef struct dhd_pktlog_ring_info
+{
+ dll_t p_info; /* list pointer */
+ union {
+ wifi_tx_packet_fate tx_fate;
+ wifi_rx_packet_fate rx_fate;
+ uint32 fate;
+ };
+ dhd_dbg_pktlog_info_t info;
+} dhd_pktlog_ring_info_t;
+
+typedef struct dhd_pktlog_ring
+{
+ dll_t ring_info_head; /* ring_info list */
+ dll_t ring_info_free; /* ring_info free list */
+ osl_atomic_t start;
+ uint32 pktlog_minmize;
+ uint32 pktlog_len; /* size of pkts */
+ uint32 pktcount;
+ spinlock_t *pktlog_ring_lock;
+ dhd_pub_t *dhdp;
+ dhd_pktlog_ring_info_t *ring_info_mem; /* ring_info mem pointer */
+} dhd_pktlog_ring_t;
+
+typedef struct dhd_pktlog_filter_info
+{
+ uint32 id;
+ uint32 offset;
+ uint32 size_bytes; /* Size of pattern. */
+ uint32 enable;
+ uint8 mask[MAX_MASK_PATTERN_FILTER_LEN];
+ uint8 pattern[MAX_MASK_PATTERN_FILTER_LEN];
+} dhd_pktlog_filter_info_t;
+
+typedef struct dhd_pktlog_filter
+{
+ dhd_pktlog_filter_info_t *info;
+ uint32 list_cnt;
+ uint32 enable;
+} dhd_pktlog_filter_t;
+
+typedef struct dhd_pktlog
+{
+ struct dhd_pktlog_ring *pktlog_ring;
+ struct dhd_pktlog_filter *pktlog_filter;
+ osl_atomic_t pktlog_status;
+ dhd_pub_t *dhdp;
+#ifdef DHD_COMPACT_PKT_LOG
+ struct rb_root cpkt_log_tt_rbt;
+#endif /* DHD_COMPACT_PKT_LOG */
+} dhd_pktlog_t;
+
+typedef struct dhd_pktlog_pcap_hdr
+{
+ uint32 magic_number;
+ uint16 version_major;
+ uint16 version_minor;
+ uint16 thiszone;
+ uint32 sigfigs;
+ uint32 snaplen;
+ uint32 network;
+} dhd_pktlog_pcap_hdr_t;
+
+#define PKTLOG_PCAP_MAGIC_NUM 0xa1b2c3d4
+#define PKTLOG_PCAP_MAJOR_VER 0x02
+#define PKTLOG_PCAP_MINOR_VER 0x04
+#define PKTLOG_PCAP_SNAP_LEN 0x40000
+#define PKTLOG_PCAP_NETWORK_TYPE 147
+
+extern int dhd_os_attach_pktlog(dhd_pub_t *dhdp);
+extern int dhd_os_detach_pktlog(dhd_pub_t *dhdp);
+extern dhd_pktlog_ring_t* dhd_pktlog_ring_init(dhd_pub_t *dhdp, int size);
+extern int dhd_pktlog_ring_deinit(dhd_pub_t *dhdp, dhd_pktlog_ring_t *ring);
+extern int dhd_pktlog_ring_set_nextpos(dhd_pktlog_ring_t *ringbuf);
+extern int dhd_pktlog_ring_get_nextbuf(dhd_pktlog_ring_t *ringbuf, void **data);
+extern int dhd_pktlog_ring_set_prevpos(dhd_pktlog_ring_t *ringbuf);
+extern int dhd_pktlog_ring_get_prevbuf(dhd_pktlog_ring_t *ringbuf, void **data);
+extern int dhd_pktlog_ring_get_writebuf(dhd_pktlog_ring_t *ringbuf, void **data);
+extern int dhd_pktlog_ring_add_pkts(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid,
+ uint32 direction);
+extern int dhd_pktlog_ring_tx_status(dhd_pub_t *dhdp, void *pkt, void *pktdata, uint32 pktid,
+ uint16 status);
+extern dhd_pktlog_ring_t* dhd_pktlog_ring_change_size(dhd_pktlog_ring_t *ringbuf, int size);
+extern void dhd_pktlog_filter_pull_forward(dhd_pktlog_filter_t *filter,
+ uint32 del_filter_id, uint32 list_cnt);
+
+#define PKT_RX 0
+#define PKT_TX 1
+#define PKT_WAKERX 2
+#define DHD_INVALID_PKTID (0U)
+#define PKTLOG_TRANS_TX 0x01
+#define PKTLOG_TRANS_RX 0x02
+#define PKTLOG_TRANS_TXS 0x04
+
+#define PKTLOG_SET_IN_TX(dhdp) \
+{ \
+ do { \
+ OSL_ATOMIC_OR((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, PKTLOG_TRANS_TX); \
+ } while (0); \
+}
+
+#define PKTLOG_SET_IN_RX(dhdp) \
+{ \
+ do { \
+ OSL_ATOMIC_OR((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, PKTLOG_TRANS_RX); \
+ } while (0); \
+}
+
+#define PKTLOG_SET_IN_TXS(dhdp) \
+{ \
+ do { \
+ OSL_ATOMIC_OR((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, PKTLOG_TRANS_TXS); \
+ } while (0); \
+}
+
+#define PKTLOG_CLEAR_IN_TX(dhdp) \
+{ \
+ do { \
+ OSL_ATOMIC_AND((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, ~PKTLOG_TRANS_TX); \
+ } while (0); \
+}
+
+#define PKTLOG_CLEAR_IN_RX(dhdp) \
+{ \
+ do { \
+ OSL_ATOMIC_AND((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, ~PKTLOG_TRANS_RX); \
+ } while (0); \
+}
+
+#define PKTLOG_CLEAR_IN_TXS(dhdp) \
+{ \
+ do { \
+ OSL_ATOMIC_AND((dhdp)->osh, &(dhdp)->pktlog->pktlog_status, ~PKTLOG_TRANS_TXS); \
+ } while (0); \
+}
+
+#define DHD_PKTLOG_TX(dhdp, pkt, pktdata, pktid) \
+{ \
+ do { \
+ if ((dhdp) && (dhdp)->pktlog && (pkt)) { \
+ PKTLOG_SET_IN_TX(dhdp); \
+ if ((dhdp)->pktlog->pktlog_ring && \
+ OSL_ATOMIC_READ((dhdp)->osh, \
+ (&(dhdp)->pktlog->pktlog_ring->start))) { \
+ dhd_pktlog_ring_add_pkts(dhdp, pkt, pktdata, pktid, PKT_TX); \
+ } \
+ PKTLOG_CLEAR_IN_TX(dhdp); \
+ } \
+ } while (0); \
+}
+
+#define DHD_PKTLOG_TXS(dhdp, pkt, pktdata, pktid, status) \
+{ \
+ do { \
+ if ((dhdp) && (dhdp)->pktlog && (pkt)) { \
+ PKTLOG_SET_IN_TXS(dhdp); \
+ if ((dhdp)->pktlog->pktlog_ring && \
+ OSL_ATOMIC_READ((dhdp)->osh, \
+ (&(dhdp)->pktlog->pktlog_ring->start))) { \
+ dhd_pktlog_ring_tx_status(dhdp, pkt, pktdata, pktid, status); \
+ } \
+ PKTLOG_CLEAR_IN_TXS(dhdp); \
+ } \
+ } while (0); \
+}
+
+#define DHD_PKTLOG_RX(dhdp, pkt, pktdata) \
+{ \
+ do { \
+ if ((dhdp) && (dhdp)->pktlog && (pkt)) { \
+ PKTLOG_SET_IN_RX(dhdp); \
+ if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \
+ if ((dhdp)->pktlog->pktlog_ring && \
+ OSL_ATOMIC_READ((dhdp)->osh, \
+ (&(dhdp)->pktlog->pktlog_ring->start))) { \
+ dhd_pktlog_ring_add_pkts(dhdp, pkt, pktdata, \
+ DHD_INVALID_PKTID, PKT_RX); \
+ } \
+ } \
+ PKTLOG_CLEAR_IN_RX(dhdp); \
+ } \
+ } while (0); \
+}
+
+#define DHD_PKTLOG_WAKERX(dhdp, pkt, pktdata) \
+{ \
+ do { \
+ if ((dhdp) && (dhdp)->pktlog && (pkt)) { \
+ PKTLOG_SET_IN_RX(dhdp); \
+ if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \
+ if ((dhdp)->pktlog->pktlog_ring && \
+ OSL_ATOMIC_READ((dhdp)->osh, \
+ (&(dhdp)->pktlog->pktlog_ring->start))) { \
+ dhd_pktlog_ring_add_pkts(dhdp, pkt, pktdata, \
+ DHD_INVALID_PKTID, PKT_WAKERX); \
+ } \
+ } \
+ PKTLOG_CLEAR_IN_RX(dhdp); \
+ } \
+ } while (0); \
+}
+
+extern dhd_pktlog_filter_t* dhd_pktlog_filter_init(int size);
+extern int dhd_pktlog_filter_deinit(dhd_pktlog_filter_t *filter);
+extern int dhd_pktlog_filter_add(dhd_pktlog_filter_t *filter, char *arg);
+extern int dhd_pktlog_filter_del(dhd_pktlog_filter_t *filter, char *arg);
+extern int dhd_pktlog_filter_enable(dhd_pktlog_filter_t *filter, uint32 pktlog_case, uint32 enable);
+extern int dhd_pktlog_filter_pattern_enable(dhd_pktlog_filter_t *filter, char *arg, uint32 enable);
+extern int dhd_pktlog_filter_info(dhd_pktlog_filter_t *filter);
+extern bool dhd_pktlog_filter_matched(dhd_pktlog_filter_t *filter, char *data, uint32 pktlog_case);
+extern bool dhd_pktlog_filter_existed(dhd_pktlog_filter_t *filter, char *arg, uint32 *id);
+
+#define DHD_PKTLOG_FILTER_ADD(pattern, filter_pattern, dhdp) \
+{ \
+ do { \
+ if ((strlen(pattern) + 1) < sizeof(filter_pattern)) { \
+ strncpy(filter_pattern, pattern, sizeof(filter_pattern)); \
+ dhd_pktlog_filter_add(dhdp->pktlog->pktlog_filter, filter_pattern); \
+ } \
+ } while (0); \
+}
+
+#define DHD_PKTLOG_DUMP_PATH DHD_COMMON_DUMP_PATH
+extern int dhd_pktlog_debug_dump(dhd_pub_t *dhdp);
+extern void dhd_pktlog_dump(void *handle, void *event_info, u8 event);
+extern void dhd_schedule_pktlog_dump(dhd_pub_t *dhdp);
+extern int dhd_pktlog_dump_write_memory(dhd_pub_t *dhdp, const void *user_buf, uint32 size);
+extern int dhd_pktlog_dump_write_file(dhd_pub_t *dhdp);
+
+#define DHD_PKTLOG_FATE_INFO_STR_LEN 256
+#define DHD_PKTLOG_FATE_INFO_FORMAT "BRCM_Packet_Fate"
+#define DHD_PKTLOG_DUMP_TYPE "pktlog_dump"
+#define DHD_PKTLOG_DEBUG_DUMP_TYPE "pktlog_debug_dump"
+
+extern void dhd_pktlog_get_filename(dhd_pub_t *dhdp, char *dump_path, int len);
+extern uint32 dhd_pktlog_get_item_length(dhd_pktlog_ring_info_t *report_ptr);
+extern uint32 dhd_pktlog_get_dump_length(dhd_pub_t *dhdp);
+extern uint32 __dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid);
+
+#ifdef DHD_COMPACT_PKT_LOG
+#define CPKT_LOG_BIT_SIZE 22
+#define CPKT_LOG_MAX_NUM 80
+extern int dhd_cpkt_log_proc(dhd_pub_t *dhdp, char *buf, int buf_len,
+ int bit_offset, int req_pkt_num);
+#endif /* DHD_COMPACT_PKT_LOG */
+#endif /* DHD_PKT_LOGGING */
+#endif /* __DHD_PKTLOG_H_ */
diff --git a/bcmdhd.101.10.361.x/dhd_plat.h b/bcmdhd.101.10.361.x/dhd_plat.h
new file mode 100755
index 0000000..8c07b5b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_plat.h
@@ -0,0 +1,58 @@
+/*
+ * DHD Linux platform header file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_PLAT_H__
+#define __DHD_PLAT_H__
+
+#include <linuxver.h>
+
+#if !defined(CONFIG_WIFI_CONTROL_FUNC)
+#define WLAN_PLAT_NODFS_FLAG 0x01
+#define WLAN_PLAT_AP_FLAG 0x02
+struct wifi_platform_data {
+ int (*set_power)(int val, wifi_adapter_info_t *adapter);
+ int (*set_reset)(int val);
+ int (*set_carddetect)(int val);
+#ifdef DHD_COREDUMP
+ int (*set_coredump)(const char *buf, int buf_len, const char *info);
+#endif /* DHD_COREDUMP */
+#ifdef BCMDHD_MDRIVER
+ void *(*mem_prealloc)(uint bus_type, int index, int section, unsigned long size);
+#else
+ void *(*mem_prealloc)(int section, unsigned long size);
+#endif
+ int (*get_mac_addr)(unsigned char *buf, int ifidx);
+#ifdef BCMSDIO
+ int (*get_wake_irq)(void);
+#endif
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ void *(*get_country_code)(char *ccode, u32 flags);
+#else /* defined (CUSTOM_FORCE_NODFS_FLAG) */
+ void *(*get_country_code)(char *ccode);
+#endif
+};
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+#endif /* __DHD_PLAT_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_pno.c b/bcmdhd.101.10.361.x/dhd_pno.c
new file mode 100755
index 0000000..e002405
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pno.c
@@ -0,0 +1,4871 @@
+/*
+ * Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload and Wi-Fi Location Service(WLS) code.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#if defined (GSCAN_SUPPORT) && !defined(PNO_SUPPORT)
+#error "GSCAN needs PNO to be enabled!"
+#endif
+
+#ifdef PNO_SUPPORT
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+
+#ifdef OEM_ANDROID
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#endif
+
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <bcmevent.h>
+#include <dhd.h>
+#include <dhd_pno.h>
+#include <dhd_dbg.h>
+#ifdef GSCAN_SUPPORT
+#include <linux/gcd.h>
+#endif /* GSCAN_SUPPORT */
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+
+#ifdef __BIG_ENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#ifdef OEM_ANDROID
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
+#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
+
+#define PNO_BESTNET_LEN WLC_IOCTL_MEDLEN
+
+#define PNO_ON 1
+#define PNO_OFF 0
+#define CHANNEL_2G_MIN 1
+#define CHANNEL_2G_MAX 14
+#define CHANNEL_5G_MIN 34
+#define CHANNEL_5G_MAX 165
+#define IS_2G_CHANNEL(ch) ((ch >= CHANNEL_2G_MIN) && \
+ (ch <= CHANNEL_2G_MAX))
+#define IS_5G_CHANNEL(ch) ((ch >= CHANNEL_5G_MIN) && \
+ (ch <= CHANNEL_5G_MAX))
+#define MAX_NODE_CNT 5
+#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
+#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \
+ - (uint32)(timestamp2/1000)))
+#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \
+ - (uint32)(timestamp2)))
+#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+ (ts).tv_nsec / NSEC_PER_USEC)
+
+#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====")
+#define TIME_MIN_DIFF 5
+
+#define EVENT_DATABUF_MAXLEN (512 - sizeof(bcm_event_t))
+#define EVENT_MAX_NETCNT_V1 \
+ ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v1_t)) \
+ / sizeof(wl_pfn_net_info_v1_t) + 1)
+#define EVENT_MAX_NETCNT_V2 \
+ ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v2_t)) \
+ / sizeof(wl_pfn_net_info_v2_t) + 1)
+#define EVENT_MAX_NETCNT_V3 \
+ ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v3_t)) \
+ / sizeof(wl_pfn_net_info_v3_t) + 1)
+
+#ifdef GSCAN_SUPPORT
+static int _dhd_pno_flush_ssid(dhd_pub_t *dhd);
+static wl_pfn_gscan_ch_bucket_cfg_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state,
+ uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw);
+#endif /* GSCAN_SUPPORT */
+
+static int dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat,
+ int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+static inline bool
+is_dfs(dhd_pub_t *dhd, uint16 channel)
+{
+ u32 ch;
+ s32 err;
+ u8 buf[32];
+
+ ch = wl_ch_host_to_driver(channel);
+ err = dhd_iovar(dhd, 0, "per_chan_info", (char *)&ch,
+ sizeof(u32), buf, sizeof(buf), FALSE);
+ if (unlikely(err)) {
+ DHD_ERROR(("get per chan info failed:%d\n", err));
+ return FALSE;
+ }
+ /* Check the channel flags returned by fw */
+ if (*((u32 *)buf) & WL_CHAN_PASSIVE) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+ int pfn = 0;
+ int err;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ /* Disable PNO */
+ err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn(error : %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ _pno_state->pno_status = DHD_PNO_DISABLED;
+ err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n",
+ __FUNCTION__, err));
+ }
+exit:
+ return err;
+}
+
+bool
+dhd_is_pno_supported(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+
+ if (!dhd || !dhd->pno_state) {
+ DHD_ERROR(("NULL POINTER : %s\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ return WLS_SUPPORTED(_pno_state);
+}
+
+bool
+dhd_is_legacy_pno_enabled(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+
+ if (!dhd || !dhd->pno_state) {
+ DHD_ERROR(("NULL POINTER : %s\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ return ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) != 0);
+}
+
+#ifdef GSCAN_SUPPORT
+static uint64
+convert_fw_rel_time_to_systime(struct osl_timespec *ts, uint32 fw_ts_ms)
+{
+ return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000));
+}
+
+static void
+dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params,
+ dhd_epno_results_t *res, uint32 idx)
+{
+ dhd_pno_ssid_t *iter, *next;
+ int i;
+
+ /* If idx doesn't make sense */
+ if (idx >= gscan_params->epno_cfg.num_epno_ssid) {
+ DHD_ERROR(("No match, idx %d num_ssid %d\n", idx,
+ gscan_params->epno_cfg.num_epno_ssid));
+ goto exit;
+ }
+
+ if (gscan_params->epno_cfg.num_epno_ssid > 0) {
+ i = 0;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &gscan_params->epno_cfg.epno_ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (i++ == idx) {
+ memcpy(res->ssid, iter->SSID, iter->SSID_len);
+ res->ssid_len = iter->SSID_len;
+ return;
+ }
+ }
+ }
+exit:
+ /* If we are here then there was no match */
+ res->ssid[0] = '\0';
+ res->ssid_len = 0;
+ return;
+}
+
+/* Translate HAL flag bitmask to BRCM FW flag bitmask */
+void
+dhd_pno_translate_epno_fw_flags(uint32 *flags)
+{
+ uint32 in_flags, fw_flags = 0;
+ in_flags = *flags;
+
+ if (in_flags & DHD_EPNO_A_BAND_TRIG) {
+ fw_flags |= WL_PFN_SSID_A_BAND_TRIG;
+ }
+
+ if (in_flags & DHD_EPNO_BG_BAND_TRIG) {
+ fw_flags |= WL_PFN_SSID_BG_BAND_TRIG;
+ }
+
+ if (!(in_flags & DHD_EPNO_STRICT_MATCH) &&
+ !(in_flags & DHD_EPNO_HIDDEN_SSID)) {
+ fw_flags |= WL_PFN_SSID_IMPRECISE_MATCH;
+ }
+
+ if (in_flags & DHD_EPNO_SAME_NETWORK) {
+ fw_flags |= WL_PFN_SSID_SAME_NETWORK;
+ }
+
+ /* Add any hard coded flags needed */
+ fw_flags |= WL_PFN_SUPPRESS_AGING_MASK;
+ *flags = fw_flags;
+
+ return;
+}
+
+/* Translate HAL auth bitmask to BRCM FW bitmask */
+void
+dhd_pno_set_epno_auth_flag(uint32 *wpa_auth)
+{
+ switch (*wpa_auth) {
+ case DHD_PNO_AUTH_CODE_OPEN:
+ *wpa_auth = WPA_AUTH_DISABLED;
+ break;
+ case DHD_PNO_AUTH_CODE_PSK:
+ *wpa_auth = (WPA_AUTH_PSK | WPA2_AUTH_PSK);
+ break;
+ case DHD_PNO_AUTH_CODE_EAPOL:
+ *wpa_auth = ~WPA_AUTH_NONE;
+ break;
+ default:
+ DHD_ERROR(("%s: Unknown auth %d", __FUNCTION__, *wpa_auth));
+ *wpa_auth = WPA_AUTH_PFN_ANY;
+ break;
+ }
+ return;
+}
+
+/* Cleanup all results */
+static void
+dhd_gscan_clear_all_batch_results(dhd_pub_t *dhd)
+{
+ struct dhd_pno_gscan_params *gscan_params;
+ dhd_pno_status_info_t *_pno_state;
+ gscan_results_cache_t *iter;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+ iter = gscan_params->gscan_batch_cache;
+ /* Mark everything as consumed */
+ while (iter) {
+ iter->tot_consumed = iter->tot_count;
+ iter = iter->next;
+ }
+ dhd_gscan_batch_cache_cleanup(dhd);
+ return;
+}
+
+static int
+_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size)
+{
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__));
+ goto exit;
+ }
+exit:
+ return err;
+}
+
+static int
+_dhd_pno_flush_ssid(dhd_pub_t *dhd)
+{
+ int err;
+ wl_pfn_t pfn_elem;
+ memset(&pfn_elem, 0, sizeof(wl_pfn_t));
+ pfn_elem.flags = htod32(WL_PFN_FLUSH_ALL_SSIDS);
+
+ err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_elem, sizeof(wl_pfn_t), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+ }
+ return err;
+}
+
+static bool
+is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params)
+{
+ smp_rmb();
+ return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE);
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+_dhd_pno_suspend(dhd_pub_t *dhd)
+{
+ int err;
+ int suspend = 1;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err));
+ goto exit;
+
+ }
+ _pno_state->pno_status = DHD_PNO_SUSPEND;
+exit:
+ return err;
+}
+static int
+_dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (enable & 0xfffe) {
+ DHD_ERROR(("%s invalid value\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ if (!dhd_support_sta_mode(dhd)) {
+ DHD_ERROR(("PNO is not allowed for non-STA mode"));
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (enable) {
+ if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+ dhd_is_associated(dhd, 0, NULL)) {
+ DHD_ERROR(("%s Legacy PNO mode cannot be enabled "
+ "in assoc mode , ignore it\n", __FUNCTION__));
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ }
+ /* Enable/Disable PNO */
+ err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+ _pno_state->pno_status = (enable)?
+ DHD_PNO_ENABLED : DHD_PNO_DISABLED;
+ if (!enable)
+ _pno_state->pno_mode = DHD_PNO_NONE_MODE;
+
+ DHD_PNO(("%s set pno as %s\n",
+ __FUNCTION__, enable ? "Enable" : "Disable"));
+exit:
+ return err;
+}
+
+static int
+_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode)
+{
+ int err = BCME_OK;
+ wl_pfn_param_t pfn_param;
+ dhd_pno_params_t *_params;
+ dhd_pno_status_info_t *_pno_state;
+ bool combined_scan = FALSE;
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ memset(&pfn_param, 0, sizeof(pfn_param));
+
+ /* set pfn parameters */
+ pfn_param.version = htod32(PFN_VERSION);
+ pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) |
+ (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT));
+ if (mode == DHD_PNO_LEGACY_MODE) {
+ /* check and set extra pno params */
+ if ((pno_params->params_legacy.pno_repeat != 0) ||
+ (pno_params->params_legacy.pno_freq_expo_max != 0)) {
+ pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat);
+ pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max);
+ }
+ /* set up pno scan fr */
+ if (pno_params->params_legacy.scan_fr != 0)
+ pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr);
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n"));
+ mode |= DHD_PNO_BATCH_MODE;
+ combined_scan = TRUE;
+ } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+ DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n"));
+ mode |= DHD_PNO_HOTLIST_MODE;
+ combined_scan = TRUE;
+ }
+#ifdef GSCAN_SUPPORT
+ else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n"));
+ mode |= DHD_PNO_GSCAN_MODE;
+ }
+#endif /* GSCAN_SUPPORT */
+ }
+ if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+ /* Scan frequency of 30 sec */
+ pfn_param.scan_freq = htod32(30);
+ /* slow adapt scan is off by default */
+ pfn_param.slow_freq = htod32(0);
+ /* RSSI margin of 30 dBm */
+ pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+ /* Network timeout 60 sec */
+ pfn_param.lost_network_timeout = htod32(60);
+ /* best n = 2 by default */
+ pfn_param.bestn = DEFAULT_BESTN;
+ /* mscan m=0 by default, so not record best networks by default */
+ pfn_param.mscan = DEFAULT_MSCAN;
+ /* default repeat = 10 */
+ pfn_param.repeat = DEFAULT_REPEAT;
+ /* by default, maximum scan interval = 2^2
+ * scan_freq when adaptive scan is turned on
+ */
+ pfn_param.exp = DEFAULT_EXP;
+ if (mode == DHD_PNO_BATCH_MODE) {
+ /* In case of BATCH SCAN */
+ if (pno_params->params_batch.bestn)
+ pfn_param.bestn = pno_params->params_batch.bestn;
+ if (pno_params->params_batch.scan_fr)
+ pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr);
+ if (pno_params->params_batch.mscan)
+ pfn_param.mscan = pno_params->params_batch.mscan;
+ /* enable broadcast scan */
+ pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+ } else if (mode == DHD_PNO_HOTLIST_MODE) {
+ /* In case of HOTLIST SCAN */
+ if (pno_params->params_hotlist.scan_fr)
+ pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr);
+ pfn_param.bestn = 0;
+ pfn_param.repeat = 0;
+ /* enable broadcast scan */
+ pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+ }
+ if (combined_scan) {
+ /* Disable Adaptive Scan */
+ pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT));
+ pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+ pfn_param.repeat = 0;
+ pfn_param.exp = 0;
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ /* In case of Legacy PNO + BATCH SCAN */
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+ if (_params->params_batch.bestn)
+ pfn_param.bestn = _params->params_batch.bestn;
+ if (_params->params_batch.scan_fr)
+ pfn_param.scan_freq = htod32(_params->params_batch.scan_fr);
+ if (_params->params_batch.mscan)
+ pfn_param.mscan = _params->params_batch.mscan;
+ } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+ /* In case of Legacy PNO + HOTLIST SCAN */
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+ if (_params->params_hotlist.scan_fr)
+ pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr);
+ pfn_param.bestn = 0;
+ pfn_param.repeat = 0;
+ }
+ }
+ }
+#ifdef GSCAN_SUPPORT
+ if (mode & DHD_PNO_GSCAN_MODE) {
+ uint32 lost_network_timeout;
+
+ pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr);
+ if (pno_params->params_gscan.mscan) {
+ pfn_param.bestn = pno_params->params_gscan.bestn;
+ pfn_param.mscan = pno_params->params_gscan.mscan;
+ pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+ }
+ /* RSSI margin of 30 dBm */
+ pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+ pfn_param.repeat = 0;
+ pfn_param.exp = 0;
+ pfn_param.slow_freq = 0;
+ pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ dhd_pno_params_t *params;
+
+ params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+
+ pfn_param.scan_freq = gcd(pno_params->params_gscan.scan_fr,
+ params->params_legacy.scan_fr);
+
+ if ((params->params_legacy.pno_repeat != 0) ||
+ (params->params_legacy.pno_freq_expo_max != 0)) {
+ pfn_param.repeat = (uchar) (params->params_legacy.pno_repeat);
+ pfn_param.exp = (uchar) (params->params_legacy.pno_freq_expo_max);
+ }
+ }
+
+ lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq *
+ pfn_param.scan_freq *
+ pno_params->params_gscan.lost_ap_window);
+ if (lost_network_timeout) {
+ pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout,
+ GSCAN_MIN_BSSID_TIMEOUT));
+ } else {
+ pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT);
+ }
+ } else
+#endif /* GSCAN_SUPPORT */
+ {
+ if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
+ pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
+ DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
+ __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ }
+#if (!defined(WL_USE_RANDOMIZED_SCAN))
+ err = dhd_set_rand_mac_oui(dhd);
+ /* Ignore if chip doesnt support the feature */
+ if (err < 0 && err != BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s : failed to set random mac for PNO scan, %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+#endif /* !defined(WL_USE_RANDOMIZED_SCAN */
+#ifdef GSCAN_SUPPORT
+ if (mode == DHD_PNO_BATCH_MODE ||
+ ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan))
+#else
+ if (mode == DHD_PNO_BATCH_MODE)
+#endif /* GSCAN_SUPPORT */
+ {
+ int _tmp = pfn_param.bestn;
+ /* set bestn to calculate the max mscan which firmware supports */
+ err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__));
+ goto exit;
+ }
+ /* get max mscan which the firmware supports */
+ err = dhd_iovar(dhd, 0, "pfnmem", NULL, 0, (char *)&_tmp, sizeof(_tmp), FALSE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__));
+ goto exit;
+ }
+ pfn_param.mscan = MIN(pfn_param.mscan, _tmp);
+ DHD_PNO((" returned mscan : %d, set bestn : %d mscan %d\n", _tmp, pfn_param.bestn,
+ pfn_param.mscan));
+ }
+ err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+ /* need to return mscan if this is for batch scan instead of err */
+ err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err;
+exit:
+ return err;
+}
+
+static int
+_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head* ssid_list, int nssid)
+{
+ int err = BCME_OK;
+ int i = 0, mem_needed;
+ wl_pfn_t *pfn_elem_buf;
+ struct dhd_pno_ssid *iter, *next;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ if (!nssid) {
+ NULL_CHECK(ssid_list, "ssid list is NULL", err);
+ return BCME_ERROR;
+ }
+ mem_needed = (sizeof(wl_pfn_t) * nssid);
+ pfn_elem_buf = (wl_pfn_t *) MALLOCZ(dhd->osh, mem_needed);
+ if (!pfn_elem_buf) {
+ DHD_ERROR(("%s: Can't malloc %d bytes!\n", __FUNCTION__, mem_needed));
+ return BCME_NOMEM;
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next, ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ pfn_elem_buf[i].infra = htod32(1);
+ pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM);
+ pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth);
+ pfn_elem_buf[i].flags = htod32(iter->flags);
+ if (iter->hidden)
+ pfn_elem_buf[i].flags |= htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+ /* If a single RSSI threshold is defined, use that */
+#ifdef PNO_MIN_RSSI_TRIGGER
+ pfn_elem_buf[i].flags |= ((PNO_MIN_RSSI_TRIGGER & 0xFF) << WL_PFN_RSSI_SHIFT);
+#else
+ pfn_elem_buf[i].flags |= ((iter->rssi_thresh & 0xFF) << WL_PFN_RSSI_SHIFT);
+#endif /* PNO_MIN_RSSI_TRIGGER */
+ memcpy((char *)pfn_elem_buf[i].ssid.SSID, iter->SSID,
+ iter->SSID_len);
+ pfn_elem_buf[i].ssid.SSID_len = iter->SSID_len;
+ DHD_PNO(("%s size = %d hidden = %d flags = %x rssi_thresh %d\n",
+ iter->SSID, iter->SSID_len, iter->hidden,
+ iter->flags, iter->rssi_thresh));
+ if (++i >= nssid) {
+ /* shouldn't happen */
+ break;
+ }
+ }
+
+ err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+ }
+ MFREE(dhd->osh, pfn_elem_buf, mem_needed);
+ return err;
+}
+
+/* qsort compare function */
+static int
+_dhd_pno_cmpfunc(const void *a, const void *b)
+{
+ return (*(const uint16*)a - *(const uint16*)b);
+}
+
+static int
+_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan,
+ uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2)
+{
+ int err = BCME_OK;
+ int i = 0, j = 0, k = 0;
+ uint16 tmp;
+ NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+ NULL_CHECK(nchan, "nchan is NULL", err);
+ NULL_CHECK(chan_list1, "chan_list1 is NULL", err);
+ NULL_CHECK(chan_list2, "chan_list2 is NULL", err);
+ /* chan_list1 and chan_list2 should be sorted at first */
+ while (i < nchan1 && j < nchan2) {
+ tmp = chan_list1[i] < chan_list2[j]?
+ chan_list1[i++] : chan_list2[j++];
+ for (; i < nchan1 && chan_list1[i] == tmp; i++);
+ for (; j < nchan2 && chan_list2[j] == tmp; j++);
+ d_chan_list[k++] = tmp;
+ }
+
+ while (i < nchan1) {
+ tmp = chan_list1[i++];
+ for (; i < nchan1 && chan_list1[i] == tmp; i++);
+ d_chan_list[k++] = tmp;
+ }
+
+ while (j < nchan2) {
+ tmp = chan_list2[j++];
+ for (; j < nchan2 && chan_list2[j] == tmp; j++);
+ d_chan_list[k++] = tmp;
+
+ }
+ *nchan = k;
+ return err;
+}
+
+static int
+_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list,
+ int *nchan, uint8 band, bool skip_dfs)
+{
+ int err = BCME_OK;
+ int i, j;
+ uint32 chan_buf[WL_NUMCHANNELS + 1];
+ wl_uint32_list_t *list;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ if (*nchan) {
+ NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+ }
+ memset(&chan_buf, 0, sizeof(chan_buf));
+ list = (wl_uint32_list_t *) (void *)chan_buf;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0);
+ if (err < 0) {
+ DHD_ERROR(("failed to get channel list (err: %d)\n", err));
+ return err;
+ }
+ for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) {
+ if (IS_2G_CHANNEL(dtoh32(list->element[i]))) {
+ if (!(band & WLC_BAND_2G)) {
+ /* Skip, if not 2g */
+ continue;
+ }
+ /* fall through to include the channel */
+ } else if (IS_5G_CHANNEL(dtoh32(list->element[i]))) {
+ bool dfs_channel = is_dfs(dhd, dtoh32(list->element[i]));
+ if ((skip_dfs && dfs_channel) ||
+ (!(band & WLC_BAND_5G) && !dfs_channel)) {
+ /* Skip the channel if:
+ * the DFS bit is NOT set & the channel is a dfs channel
+ * the band 5G is not set & the channel is a non DFS 5G channel
+ */
+ continue;
+ }
+ /* fall through to include the channel */
+ } else {
+ /* Not in range. Bad channel */
+ DHD_ERROR(("Not in range. bad channel\n"));
+ *nchan = 0;
+ return BCME_BADCHAN;
+ }
+
+ /* Include the channel */
+ d_chan_list[j++] = (uint16) dtoh32(list->element[i]);
+ }
+ *nchan = j;
+ return err;
+}
+
+static int
+_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch,
+ char *buf, int nbufsize)
+{
+ int err = BCME_OK;
+ int bytes_written = 0, nreadsize = 0;
+ int t_delta = 0;
+ int nleftsize = nbufsize;
+ uint8 cnt = 0;
+ char *bp = buf;
+ char eabuf[ETHER_ADDR_STR_LEN];
+#ifdef PNO_DEBUG
+ char *_base_bp;
+ char msg[150];
+#endif
+ dhd_pno_bestnet_entry_t *iter, *next;
+ dhd_pno_scan_results_t *siter, *snext;
+ dhd_pno_best_header_t *phead, *pprev;
+ NULL_CHECK(params_batch, "params_batch is NULL", err);
+ if (nbufsize > 0)
+ NULL_CHECK(buf, "buf is NULL", err);
+ /* initialize the buffer */
+ memset(buf, 0, nbufsize);
+ DHD_PNO(("%s enter \n", __FUNCTION__));
+ /* # of scans */
+ if (!params_batch->get_batch.batch_started) {
+ bp += nreadsize = snprintf(bp, nleftsize, "scancount=%d\n",
+ params_batch->get_batch.expired_tot_scan_cnt);
+ nleftsize -= nreadsize;
+ params_batch->get_batch.batch_started = TRUE;
+ }
+ DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
+ /* preestimate scan count until which scan result this report is going to end */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(siter, snext,
+ &params_batch->get_batch.expired_scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ phead = siter->bestnetheader;
+ while (phead != NULL) {
+ /* if left_size is less than bestheader total size , stop this */
+ if (nleftsize <=
+ (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD))
+ goto exit;
+ /* increase scan count */
+ cnt++;
+ /* # best of each scan */
+ DHD_PNO(("\n<loop : %d, apcount %d>\n", cnt - 1, phead->tot_cnt));
+ /* attribute of the scan */
+ if (phead->reason & PNO_STATUS_ABORT_MASK) {
+ bp += nreadsize = snprintf(bp, nleftsize, "trunc\n");
+ nleftsize -= nreadsize;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &phead->entry_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
+#ifdef PNO_DEBUG
+ _base_bp = bp;
+ memset(msg, 0, sizeof(msg));
+#endif
+ /* BSSID info */
+ bp += nreadsize = snprintf(bp, nleftsize, "bssid=%s\n",
+ bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf));
+ nleftsize -= nreadsize;
+ /* SSID */
+ bp += nreadsize = snprintf(bp, nleftsize, "ssid=%s\n", iter->SSID);
+ nleftsize -= nreadsize;
+ /* channel */
+ bp += nreadsize = snprintf(bp, nleftsize, "freq=%d\n",
+ wl_channel_to_frequency(wf_chspec_ctlchan(iter->channel),
+ CHSPEC_BAND(iter->channel)));
+ nleftsize -= nreadsize;
+ /* RSSI */
+ bp += nreadsize = snprintf(bp, nleftsize, "level=%d\n", iter->RSSI);
+ nleftsize -= nreadsize;
+ /* add the time consumed in Driver to the timestamp of firmware */
+ iter->timestamp += t_delta;
+ bp += nreadsize = snprintf(bp, nleftsize,
+ "age=%d\n", iter->timestamp);
+ nleftsize -= nreadsize;
+ /* RTT0 */
+ bp += nreadsize = snprintf(bp, nleftsize, "dist=%d\n",
+ (iter->rtt0 == 0)? -1 : iter->rtt0);
+ nleftsize -= nreadsize;
+ /* RTT1 */
+ bp += nreadsize = snprintf(bp, nleftsize, "distSd=%d\n",
+ (iter->rtt0 == 0)? -1 : iter->rtt1);
+ nleftsize -= nreadsize;
+ bp += nreadsize = snprintf(bp, nleftsize, "%s", AP_END_MARKER);
+ nleftsize -= nreadsize;
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+#ifdef PNO_DEBUG
+ memcpy(msg, _base_bp, bp - _base_bp);
+ DHD_PNO(("Entry : \n%s", msg));
+#endif
+ }
+ bp += nreadsize = snprintf(bp, nleftsize, "%s", SCAN_END_MARKER);
+ DHD_PNO(("%s", SCAN_END_MARKER));
+ nleftsize -= nreadsize;
+ pprev = phead;
+ /* reset the header */
+ siter->bestnetheader = phead = phead->next;
+ MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+
+ siter->cnt_header--;
+ }
+ if (phead == NULL) {
+ /* we store all entry in this scan , so it is ok to delete */
+ list_del(&siter->list);
+ MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+ }
+ }
+exit:
+ if (cnt < params_batch->get_batch.expired_tot_scan_cnt) {
+ DHD_ERROR(("Buffer size is small to save all batch entry,"
+ " cnt : %d (remained_scan_cnt): %d\n",
+ cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt));
+ }
+ params_batch->get_batch.expired_tot_scan_cnt -= cnt;
+ /* set FALSE only if the link list is empty after returning the data */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ if (list_empty(&params_batch->get_batch.expired_scan_results_list)) {
+ GCC_DIAGNOSTIC_POP();
+ params_batch->get_batch.batch_started = FALSE;
+ bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER);
+ DHD_PNO(("%s", RESULTS_END_MARKER));
+ DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__));
+ }
+ /* return used memory in buffer */
+ bytes_written = (int32)(bp - buf);
+ return bytes_written;
+}
+
+static int
+_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last)
+{
+ int err = BCME_OK;
+ int removed_scan_cnt = 0;
+ dhd_pno_scan_results_t *siter, *snext;
+ dhd_pno_best_header_t *phead, *pprev;
+ dhd_pno_bestnet_entry_t *iter, *next;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(head, "head is NULL", err);
+ NULL_CHECK(head->next, "head->next is NULL", err);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(siter, snext,
+ head, list) {
+ if (only_last) {
+ /* in case that we need to delete only last one */
+ if (!list_is_last(&siter->list, head)) {
+ /* skip if the one is not last */
+ continue;
+ }
+ }
+ /* delete all data belong if the one is last */
+ phead = siter->bestnetheader;
+ while (phead != NULL) {
+ removed_scan_cnt++;
+ list_for_each_entry_safe(iter, next,
+ &phead->entry_list, list) {
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+ }
+ pprev = phead;
+ phead = phead->next;
+ MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+ }
+ if (phead == NULL) {
+ /* it is ok to delete top node */
+ list_del(&siter->list);
+ MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+ return removed_scan_cnt;
+}
+
+static int
+_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan)
+{
+ int err = BCME_OK;
+ int i = 0;
+ wl_pfn_cfg_t pfncfg_param;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ if (nchan) {
+ if (nchan > WL_NUMCHANNELS) {
+ return BCME_RANGE;
+ }
+ DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan));
+ (void)memset_s(&pfncfg_param, sizeof(wl_pfn_cfg_t), 0, sizeof(wl_pfn_cfg_t));
+ pfncfg_param.channel_num = htod32(0);
+
+ for (i = 0; i < nchan; i++) {
+ if (dhd->wlc_ver_major >= DHD_PNO_CHSPEC_SUPPORT_VER) {
+ pfncfg_param.channel_list[i] = CH20MHZ_CHSPEC(channel_list[i]);
+ } else {
+ pfncfg_param.channel_list[i] = channel_list[i];
+ }
+ }
+ }
+
+ /* Setup default values */
+ pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET);
+ pfncfg_param.channel_num = htod32(nchan);
+ err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+ goto exit;
+ }
+exit:
+ return err;
+}
+
+static int
+_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ mutex_lock(&_pno_state->pno_mutex);
+ switch (mode) {
+ case DHD_PNO_LEGACY_MODE: {
+ struct dhd_pno_ssid *iter, *next;
+ if (params->params_legacy.nssid > 0) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &params->params_legacy.ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
+ }
+ }
+
+ params->params_legacy.nssid = 0;
+ params->params_legacy.scan_fr = 0;
+ params->params_legacy.pno_freq_expo_max = 0;
+ params->params_legacy.pno_repeat = 0;
+ params->params_legacy.nchan = 0;
+ memset(params->params_legacy.chan_list, 0,
+ sizeof(params->params_legacy.chan_list));
+ break;
+ }
+ case DHD_PNO_BATCH_MODE: {
+ params->params_batch.scan_fr = 0;
+ params->params_batch.mscan = 0;
+ params->params_batch.nchan = 0;
+ params->params_batch.rtt = 0;
+ params->params_batch.bestn = 0;
+ params->params_batch.nchan = 0;
+ params->params_batch.band = WLC_BAND_AUTO;
+ memset(params->params_batch.chan_list, 0,
+ sizeof(params->params_batch.chan_list));
+ params->params_batch.get_batch.batch_started = FALSE;
+ params->params_batch.get_batch.buf = NULL;
+ params->params_batch.get_batch.bufsize = 0;
+ params->params_batch.get_batch.reason = 0;
+ _dhd_pno_clear_all_batch_results(dhd,
+ &params->params_batch.get_batch.scan_results_list, FALSE);
+ _dhd_pno_clear_all_batch_results(dhd,
+ &params->params_batch.get_batch.expired_scan_results_list, FALSE);
+ params->params_batch.get_batch.tot_scan_cnt = 0;
+ params->params_batch.get_batch.expired_tot_scan_cnt = 0;
+ params->params_batch.get_batch.top_node_cnt = 0;
+ INIT_LIST_HEAD(&params->params_batch.get_batch.scan_results_list);
+ INIT_LIST_HEAD(&params->params_batch.get_batch.expired_scan_results_list);
+ break;
+ }
+ case DHD_PNO_HOTLIST_MODE: {
+ struct dhd_pno_bssid *iter, *next;
+ if (params->params_hotlist.nbssid > 0) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
+ }
+ }
+ params->params_hotlist.scan_fr = 0;
+ params->params_hotlist.nbssid = 0;
+ params->params_hotlist.nchan = 0;
+ params->params_batch.band = WLC_BAND_AUTO;
+ memset(params->params_hotlist.chan_list, 0,
+ sizeof(params->params_hotlist.chan_list));
+ break;
+ }
+ default:
+ DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode));
+ break;
+ }
+ mutex_unlock(&_pno_state->pno_mutex);
+ return err;
+}
+
+static int
+_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid)
+{
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ if (nbssid) {
+ NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err);
+ }
+ err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid,
+ sizeof(wl_pfn_bssid_t) * nbssid, NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+ goto exit;
+ }
+exit:
+ return err;
+}
+
+int
+dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ uint32 mode = 0, cnt = 0;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params = NULL;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL, *tmp_bssid;
+
+ NULL_CHECK(dhd, "dev is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) {
+ DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__));
+ goto exit;
+ }
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ /* If pno mode is PNO_LEGACY_MODE clear the pno values and unset the DHD_PNO_LEGACY_MODE */
+ _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ struct dhd_pno_gscan_params *gscan_params;
+
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = &_params->params_gscan;
+ if (gscan_params->mscan) {
+ /* retrieve the batching data from firmware into host */
+ err = dhd_wait_batch_results_complete(dhd);
+ if (err != BCME_OK)
+ goto exit;
+ }
+ /* save current pno_mode before calling dhd_pno_clean */
+ mutex_lock(&_pno_state->pno_mutex);
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ mutex_unlock(&_pno_state->pno_mutex);
+ goto exit;
+ }
+ /* restore previous pno_mode */
+ _pno_state->pno_mode = mode;
+ mutex_unlock(&_pno_state->pno_mutex);
+ /* Restart gscan */
+ err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+ goto exit;
+ }
+#endif /* GSCAN_SUPPORT */
+ /* restart Batch mode if the batch mode is on */
+ if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+ /* retrieve the batching data from firmware into host */
+ dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+ /* save current pno_mode before calling dhd_pno_clean */
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ err = BCME_ERROR;
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* restore previous pno_mode */
+ _pno_state->pno_mode = mode;
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+ /* restart BATCH SCAN */
+ err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+ if (err < 0) {
+ _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+ DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+ /* restart HOTLIST SCAN */
+ struct dhd_pno_bssid *iter, *next;
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+ p_pfn_bssid = MALLOCZ(dhd->osh, sizeof(wl_pfn_bssid_t) *
+ _params->params_hotlist.nbssid);
+ if (p_pfn_bssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+ " (count: %d)",
+ __FUNCTION__, _params->params_hotlist.nbssid));
+ err = BCME_ERROR;
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ goto exit;
+ }
+ /* convert dhd_pno_bssid to wl_pfn_bssid */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ cnt = 0;
+ tmp_bssid = p_pfn_bssid;
+ list_for_each_entry_safe(iter, next,
+ &_params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ memcpy(&tmp_bssid->macaddr,
+ &iter->macaddr, ETHER_ADDR_LEN);
+ tmp_bssid->flags = iter->flags;
+ if (cnt < _params->params_hotlist.nbssid) {
+ tmp_bssid++;
+ cnt++;
+ } else {
+ DHD_ERROR(("%s: Allocated insufficient memory\n",
+ __FUNCTION__));
+ break;
+ }
+ }
+ err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+ if (err < 0) {
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+ } else {
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+exit:
+ if (p_pfn_bssid) {
+ MFREE(dhd->osh, p_pfn_bssid, sizeof(wl_pfn_bssid_t) *
+ _params->params_hotlist.nbssid);
+ }
+ return err;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ return (_dhd_pno_enable(dhd, enable));
+}
+
+static int
+dhd_pno_add_to_ssid_list(dhd_pub_t *dhd, struct list_head *ptr, wlc_ssid_ext_t *ssid_list,
+ int nssid, int *num_ssid_added)
+{
+ int ret = BCME_OK;
+ int i;
+ struct dhd_pno_ssid *_pno_ssid;
+
+ for (i = 0; i < nssid; i++) {
+ if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s : Invalid SSID length %d\n",
+ __FUNCTION__, ssid_list[i].SSID_len));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* Check for broadcast ssid */
+ if (!ssid_list[i].SSID_len) {
+ DHD_ERROR(("%d: Broadcast SSID is illegal for PNO setting\n", i));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ _pno_ssid = (struct dhd_pno_ssid *)MALLOCZ(dhd->osh,
+ sizeof(struct dhd_pno_ssid));
+ if (_pno_ssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ _pno_ssid->SSID_len = ssid_list[i].SSID_len;
+ _pno_ssid->hidden = ssid_list[i].hidden;
+ _pno_ssid->rssi_thresh = ssid_list[i].rssi_thresh;
+ _pno_ssid->flags = ssid_list[i].flags;
+ _pno_ssid->wpa_auth = WPA_AUTH_PFN_ANY;
+
+ memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
+ list_add_tail(&_pno_ssid->list, ptr);
+ }
+
+exit:
+ *num_ssid_added = i;
+ return ret;
+}
+
+int
+dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
+ uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ struct dhd_pno_legacy_params *params_legacy;
+ int err = BCME_OK;
+
+ if (!dhd || !dhd->pno_state) {
+ DHD_ERROR(("%s: PNO Not enabled/Not ready\n", __FUNCTION__));
+ return BCME_NOTREADY;
+ }
+
+ if (!dhd_support_sta_mode(dhd)) {
+ return BCME_BADOPTION;
+ }
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+ params_legacy = &(_params->params_legacy);
+ err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+ __FUNCTION__, err));
+ return err;
+ }
+
+ INIT_LIST_HEAD(&params_legacy->ssid_list);
+
+ if (dhd_pno_add_to_ssid_list(dhd, &params_legacy->ssid_list, ssid_list,
+ nssid, &params_legacy->nssid) < 0) {
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ return BCME_ERROR;
+ }
+
+ DHD_PNO(("%s enter : nssid %d, scan_fr :%d, pno_repeat :%d,"
+ "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
+ params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan));
+
+ return dhd_pno_set_legacy_pno(dhd, scan_fr, pno_repeat,
+ pno_freq_expo_max, channel_list, nchan);
+
+}
+
+static int
+dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat,
+ int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+ dhd_pno_params_t *_params;
+ dhd_pno_params_t *_params2;
+ dhd_pno_status_info_t *_pno_state;
+ uint16 _chan_list[WL_NUMCHANNELS];
+ int32 tot_nchan = 0;
+ int err = BCME_OK;
+ int i, nssid;
+ int mode = 0;
+ struct list_head *ssid_list;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+ /* If GSCAN is also ON will handle this down below */
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE &&
+ !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
+#else
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)
+#endif /* GSCAN_SUPPORT */
+ {
+ DHD_ERROR(("%s : Legacy PNO mode was already started, "
+ "will disable previous one to start new one\n", __FUNCTION__));
+ err = dhd_pno_stop_for_ssid(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
+ __FUNCTION__, err));
+ return err;
+ }
+ }
+ _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
+ (void)memset_s(_chan_list, sizeof(_chan_list),
+ 0, sizeof(_chan_list));
+ tot_nchan = MIN(nchan, WL_NUMCHANNELS);
+ if (tot_nchan > 0 && channel_list) {
+ for (i = 0; i < tot_nchan; i++)
+ _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i];
+ }
+#ifdef GSCAN_SUPPORT
+ else {
+ /* FW scan module will include all valid channels when chan count
+ * is set to 0
+ */
+ tot_nchan = 0;
+ }
+#endif /* GSCAN_SUPPORT */
+
+ if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+ DHD_PNO(("BATCH SCAN is on progress in firmware\n"));
+ /* retrieve the batching data from firmware into host */
+ dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+ /* store current pno_mode before disabling pno */
+ mode = _pno_state->pno_mode;
+ err = _dhd_pno_enable(dhd, PNO_OFF);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+ goto exit;
+ }
+ /* restore the previous mode */
+ _pno_state->pno_mode = mode;
+ /* use superset of channel list between two mode */
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+ if (_params2->params_batch.nchan > 0 && tot_nchan > 0) {
+ err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+ &_params2->params_batch.chan_list[0],
+ _params2->params_batch.nchan,
+ &channel_list[0], tot_nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to merge channel list"
+ " between legacy and batch\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ } else {
+ DHD_PNO(("superset channel will use"
+ " all channels in firmware\n"));
+ }
+ } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+ _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+ if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) {
+ err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+ &_params2->params_hotlist.chan_list[0],
+ _params2->params_hotlist.nchan,
+ &channel_list[0], tot_nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to merge channel list"
+ " between legacy and hotlist\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ }
+ }
+ }
+ _params->params_legacy.scan_fr = scan_fr;
+ _params->params_legacy.pno_repeat = pno_repeat;
+ _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max;
+ _params->params_legacy.nchan = tot_nchan;
+ ssid_list = &_params->params_legacy.ssid_list;
+ nssid = _params->params_legacy.nssid;
+
+#ifdef GSCAN_SUPPORT
+ /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ struct dhd_pno_gscan_params *gscan_params;
+ gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+ /* ePNO and Legacy PNO do not co-exist */
+ if (gscan_params->epno_cfg.num_epno_ssid) {
+ DHD_PNO(("ePNO and Legacy PNO do not co-exist\n"));
+ err = BCME_EPERM;
+ goto exit;
+ }
+ DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n"));
+ err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+ goto exit;
+ }
+#endif /* GSCAN_SUPPORT */
+ if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) {
+ DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+ goto exit;
+ }
+ if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) {
+ DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
+ goto exit;
+ }
+
+ if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+ if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+ DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+ }
+exit:
+ if (err < 0) {
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ }
+ /* clear mode in case of error */
+ if (err < 0) {
+ int ret = dhd_pno_clean(dhd);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, ret));
+ } else {
+ _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ }
+ }
+ return err;
+}
+
+int
+dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params)
+{
+ int err = BCME_OK;
+ uint16 _chan_list[WL_NUMCHANNELS];
+ int rem_nchan = 0, tot_nchan = 0;
+ int mode = 0, mscan = 0;
+ dhd_pno_params_t *_params;
+ dhd_pno_params_t *_params2;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ NULL_CHECK(batch_params, "batch_params is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+ if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+ _pno_state->pno_mode |= DHD_PNO_BATCH_MODE;
+ err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ } else {
+ /* batch mode is already started */
+ return -EBUSY;
+ }
+ _params->params_batch.scan_fr = batch_params->scan_fr;
+ _params->params_batch.bestn = batch_params->bestn;
+ _params->params_batch.mscan = (batch_params->mscan)?
+ batch_params->mscan : DEFAULT_BATCH_MSCAN;
+ _params->params_batch.nchan = batch_params->nchan;
+ memcpy(_params->params_batch.chan_list, batch_params->chan_list,
+ sizeof(_params->params_batch.chan_list));
+
+ memset(_chan_list, 0, sizeof(_chan_list));
+
+ rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan;
+ if (batch_params->band == WLC_BAND_2G ||
+#ifdef WL_6G_BAND
+ batch_params->band == WLC_BAND_6G ||
+#endif /* WL_6G_BAND */
+ batch_params->band == WLC_BAND_5G) {
+ /* get a valid channel list based on band B or A */
+ err = _dhd_pno_get_channels(dhd,
+ &_params->params_batch.chan_list[batch_params->nchan],
+ &rem_nchan, batch_params->band, FALSE);
+ if (err < 0) {
+ DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+ __FUNCTION__, batch_params->band));
+ goto exit;
+ }
+ /* now we need to update nchan because rem_chan has valid channel count */
+ _params->params_batch.nchan += rem_nchan;
+ /* need to sort channel list */
+ sort(_params->params_batch.chan_list, _params->params_batch.nchan,
+ sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+ }
+#ifdef PNO_DEBUG
+{
+ DHD_PNO(("Channel list : "));
+ for (i = 0; i < _params->params_batch.nchan; i++) {
+ DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+ }
+ DHD_PNO(("\n"));
+}
+#endif
+ if (_params->params_batch.nchan) {
+ /* copy the channel list into local array */
+ memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list));
+ tot_nchan = _params->params_batch.nchan;
+ }
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ DHD_PNO(("PNO SSID is on progress in firmware\n"));
+ /* store current pno_mode before disabling pno */
+ mode = _pno_state->pno_mode;
+ err = _dhd_pno_enable(dhd, PNO_OFF);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+ goto exit;
+ }
+ /* restore the previous mode */
+ _pno_state->pno_mode = mode;
+ /* Use the superset for channelist between two mode */
+ _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+ if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) {
+ err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+ &_params2->params_legacy.chan_list[0],
+ _params2->params_legacy.nchan,
+ &_params->params_batch.chan_list[0], _params->params_batch.nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to merge channel list"
+ " between legacy and batch\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ } else {
+ DHD_PNO(("superset channel will use all channels in firmware\n"));
+ }
+ if ((err = _dhd_pno_add_ssid(dhd, &_params2->params_legacy.ssid_list,
+ _params2->params_legacy.nssid)) < 0) {
+ DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+ goto exit;
+ }
+ }
+ if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ } else {
+ /* we need to return mscan */
+ mscan = err;
+ }
+ if (tot_nchan > 0) {
+ if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+ if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+ if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+ DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+ }
+exit:
+ /* clear mode in case of error */
+ if (err < 0)
+ _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+ else {
+ /* return #max scan firmware can do */
+ err = mscan;
+ }
+ return err;
+}
+
+#ifdef GSCAN_SUPPORT
+
+static int
+dhd_set_epno_params(dhd_pub_t *dhd, wl_ssid_ext_params_t *params, bool set)
+{
+ wl_pfn_ssid_cfg_t cfg;
+ int err;
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ memset(&cfg, 0, sizeof(wl_pfn_ssid_cfg_t));
+ cfg.version = WL_PFN_SSID_CFG_VERSION;
+
+ /* If asked to clear params (set == FALSE) just set the CLEAR bit */
+ if (!set)
+ cfg.flags |= WL_PFN_SSID_CFG_CLEAR;
+ else if (params)
+ memcpy(&cfg.params, params, sizeof(wl_ssid_ext_params_t));
+ err = dhd_iovar(dhd, 0, "pfn_ssid_cfg", (char *)&cfg,
+ sizeof(wl_pfn_ssid_cfg_t), NULL, 0, TRUE);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute pfn_ssid_cfg %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+
+int
+dhd_pno_flush_fw_epno(dhd_pub_t *dhd)
+{
+ int err;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+
+ err = dhd_set_epno_params(dhd, NULL, FALSE);
+ if (err < 0) {
+ DHD_ERROR(("failed to set ePNO params %d\n", err));
+ return err;
+ }
+ err = _dhd_pno_flush_ssid(dhd);
+ return err;
+}
+
+int
+dhd_pno_set_epno(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+
+ struct dhd_pno_gscan_params *gscan_params;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = &params->params_gscan;
+
+ if (gscan_params->epno_cfg.num_epno_ssid) {
+ DHD_PNO(("num_epno_ssid %d\n", gscan_params->epno_cfg.num_epno_ssid));
+ if ((err = _dhd_pno_add_ssid(dhd, &gscan_params->epno_cfg.epno_ssid_list,
+ gscan_params->epno_cfg.num_epno_ssid)) < 0) {
+ DHD_ERROR(("failed to add ssid list (err %d) to firmware\n", err));
+ return err;
+ }
+ err = dhd_set_epno_params(dhd, &gscan_params->epno_cfg.params, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("failed to set ePNO params %d\n", err));
+ }
+ }
+ return err;
+}
+
+static void
+dhd_pno_reset_cfg_gscan(dhd_pub_t *dhd, dhd_pno_params_t *_params,
+ dhd_pno_status_info_t *_pno_state, uint8 flags)
+{
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (flags & GSCAN_FLUSH_SCAN_CFG) {
+ _params->params_gscan.bestn = 0;
+ _params->params_gscan.mscan = 0;
+ _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+ _params->params_gscan.scan_fr = 0;
+ _params->params_gscan.send_all_results_flag = 0;
+ memset(_params->params_gscan.channel_bucket, 0,
+ _params->params_gscan.nchannel_buckets *
+ sizeof(struct dhd_pno_gscan_channel_bucket));
+ _params->params_gscan.nchannel_buckets = 0;
+ DHD_PNO(("Flush Scan config\n"));
+ }
+ if (flags & GSCAN_FLUSH_HOTLIST_CFG) {
+ struct dhd_pno_bssid *iter, *next;
+ if (_params->params_gscan.nbssid_hotlist > 0) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &_params->params_gscan.hotlist_bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
+ }
+ }
+ _params->params_gscan.nbssid_hotlist = 0;
+ DHD_PNO(("Flush Hotlist Config\n"));
+ }
+ if (flags & GSCAN_FLUSH_EPNO_CFG) {
+ dhd_pno_ssid_t *iter, *next;
+ dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg;
+
+ if (epno_cfg->num_epno_ssid > 0) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &epno_cfg->epno_ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
+ }
+ epno_cfg->num_epno_ssid = 0;
+ }
+ memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t));
+ DHD_PNO(("Flushed ePNO Config\n"));
+ }
+
+ return;
+}
+
+int
+dhd_pno_lock_batch_results(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+ int err = BCME_OK;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ mutex_lock(&_pno_state->pno_mutex);
+ return err;
+}
+
+void
+dhd_pno_unlock_batch_results(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ mutex_unlock(&_pno_state->pno_mutex);
+ return;
+}
+
+int
+dhd_wait_batch_results_complete(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ int err = BCME_OK;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ /* Has the workqueue finished its job already?? */
+ if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) {
+ DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__));
+ wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+ is_batch_retrieval_complete(&_params->params_gscan),
+ msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+ } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */
+ gscan_results_cache_t *iter;
+ uint16 num_results = 0;
+
+ mutex_lock(&_pno_state->pno_mutex);
+ iter = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ iter = iter->next;
+ }
+ mutex_unlock(&_pno_state->pno_mutex);
+
+ /* All results consumed/No results cached??
+ * Get fresh results from FW
+ */
+ if ((_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) && !num_results) {
+ DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__));
+ err = dhd_retreive_batch_scan_results(dhd);
+ if (err == BCME_OK) {
+ wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+ is_batch_retrieval_complete(&_params->params_gscan),
+ msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+ }
+ }
+ }
+ DHD_PNO(("%s: Wait complete\n", __FUNCTION__));
+ return err;
+}
+
+int
+dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *_params;
+ int i;
+ dhd_pno_status_info_t *_pno_state;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ mutex_lock(&_pno_state->pno_mutex);
+
+ switch (type) {
+ case DHD_PNO_BATCH_SCAN_CFG_ID:
+ {
+ gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf;
+ _params->params_gscan.bestn = ptr->bestn;
+ _params->params_gscan.mscan = ptr->mscan;
+ _params->params_gscan.buffer_threshold = ptr->buffer_threshold;
+ }
+ break;
+ case DHD_PNO_GEOFENCE_SCAN_CFG_ID:
+ {
+ gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf;
+ struct dhd_pno_bssid *_pno_bssid;
+ struct bssid_t *bssid_ptr;
+ int8 flags;
+
+ if (flush) {
+ dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state,
+ GSCAN_FLUSH_HOTLIST_CFG);
+ }
+
+ if (!ptr->nbssid) {
+ break;
+ }
+ if (!_params->params_gscan.nbssid_hotlist) {
+ INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
+ }
+
+ if ((_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+ DHD_ERROR(("Excessive number of hotlist APs programmed %d\n",
+ (_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid)));
+ err = BCME_RANGE;
+ goto exit;
+ }
+
+ for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
+ _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh,
+ sizeof(struct dhd_pno_bssid));
+ if (!_pno_bssid) {
+ DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes",
+ sizeof(struct dhd_pno_bssid)));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN);
+
+ flags = (int8) bssid_ptr->rssi_reporting_threshold;
+ _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT;
+ list_add_tail(&_pno_bssid->list,
+ &_params->params_gscan.hotlist_bssid_list);
+ }
+
+ _params->params_gscan.nbssid_hotlist += ptr->nbssid;
+ _params->params_gscan.lost_ap_window = ptr->lost_ap_window;
+ }
+ break;
+ case DHD_PNO_SCAN_CFG_ID:
+ {
+ int k;
+ uint16 band;
+ gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf;
+ struct dhd_pno_gscan_channel_bucket *ch_bucket;
+
+ if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) {
+ _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets;
+
+ memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket,
+ _params->params_gscan.nchannel_buckets *
+ sizeof(struct dhd_pno_gscan_channel_bucket));
+ ch_bucket = _params->params_gscan.channel_bucket;
+
+ for (i = 0; i < ptr->nchannel_buckets; i++) {
+ band = ch_bucket[i].band;
+ for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) {
+ ch_bucket[i].chan_list[k] =
+ wf_mhz2channel(ptr->channel_bucket[i].chan_list[k],
+ 0);
+ }
+ ch_bucket[i].band = 0;
+ /* HAL and DHD use different bits for 2.4G and
+ * 5G in bitmap. Hence translating it here...
+ */
+ if (band & GSCAN_BG_BAND_MASK) {
+ ch_bucket[i].band |= WLC_BAND_2G;
+ }
+ if (band & GSCAN_A_BAND_MASK) {
+ ch_bucket[i].band |= WLC_BAND_6G | WLC_BAND_5G;
+ }
+ if (band & GSCAN_DFS_MASK) {
+ ch_bucket[i].band |= GSCAN_DFS_MASK;
+ }
+ DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band,
+ ch_bucket[i].report_flag));
+ }
+
+ for (i = 0; i < ptr->nchannel_buckets; i++) {
+ ch_bucket[i].bucket_freq_multiple =
+ ch_bucket[i].bucket_freq_multiple/ptr->scan_fr;
+ ch_bucket[i].bucket_max_multiple =
+ ch_bucket[i].bucket_max_multiple/ptr->scan_fr;
+ DHD_PNO(("mult %d max_mult %d\n",
+ ch_bucket[i].bucket_freq_multiple,
+ ch_bucket[i].bucket_max_multiple));
+ }
+ _params->params_gscan.scan_fr = ptr->scan_fr;
+
+ DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets,
+ _params->params_gscan.scan_fr));
+ } else {
+ err = BCME_BADARG;
+ }
+ }
+ break;
+ case DHD_PNO_EPNO_CFG_ID:
+ if (flush) {
+ dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state,
+ GSCAN_FLUSH_EPNO_CFG);
+ }
+ break;
+ case DHD_PNO_EPNO_PARAMS_ID:
+ if (flush) {
+ memset(&_params->params_gscan.epno_cfg.params, 0,
+ sizeof(wl_ssid_ext_params_t));
+ }
+ if (buf) {
+ memcpy(&_params->params_gscan.epno_cfg.params, buf,
+ sizeof(wl_ssid_ext_params_t));
+ }
+ break;
+ default:
+ err = BCME_BADARG;
+ DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+ break;
+ }
+exit:
+ mutex_unlock(&_pno_state->pno_mutex);
+ return err;
+
+}
+
+static bool
+validate_gscan_params(struct dhd_pno_gscan_params *gscan_params)
+{
+ unsigned int i, k;
+
+ if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) {
+ DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n",
+ __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets));
+ return false;
+ }
+
+ for (i = 0; i < gscan_params->nchannel_buckets; i++) {
+ if (!gscan_params->channel_bucket[i].band) {
+ for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) {
+ if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) {
+ DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__,
+ gscan_params->channel_bucket[i].chan_list[k]));
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static int
+dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params)
+{
+ int err = BCME_OK;
+ int mode, i = 0;
+ uint16 _chan_list[WL_NUMCHANNELS];
+ int tot_nchan = 0;
+ int num_buckets_to_fw, tot_num_buckets, gscan_param_size;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL;
+ wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL;
+ dhd_pno_params_t *_params;
+ bool fw_flushed = FALSE;
+
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ NULL_CHECK(gscan_params, "gscan_params is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!validate_gscan_params(gscan_params)) {
+ DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state,
+ _chan_list, &tot_num_buckets, &num_buckets_to_fw))) {
+ goto exit;
+ }
+
+ mutex_lock(&_pno_state->pno_mutex);
+ /* Clear any pre-existing results in our cache
+ * not consumed by framework
+ */
+ dhd_gscan_clear_all_batch_results(dhd);
+ if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) {
+ /* store current pno_mode before disabling pno */
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+ mutex_unlock(&_pno_state->pno_mutex);
+ goto exit;
+ }
+ fw_flushed = TRUE;
+ /* restore the previous mode */
+ _pno_state->pno_mode = mode;
+ }
+ _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE;
+ mutex_unlock(&_pno_state->pno_mutex);
+
+ if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+ !gscan_params->epno_cfg.num_epno_ssid) {
+ struct dhd_pno_legacy_params *params_legacy;
+ params_legacy =
+ &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+
+ if ((err = _dhd_pno_add_ssid(dhd, &params_legacy->ssid_list,
+ params_legacy->nssid)) < 0) {
+ DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+ goto exit;
+ }
+ }
+
+ if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) {
+ DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+ goto exit;
+ }
+
+ gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) +
+ (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t);
+ pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOCZ(dhd->osh, gscan_param_size);
+
+ if (!pfn_gscan_cfg_t) {
+ DHD_ERROR(("%s: failed to malloc memory of size %d\n",
+ __FUNCTION__, gscan_param_size));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ pfn_gscan_cfg_t->version = WL_GSCAN_CFG_VERSION;
+ if (gscan_params->mscan)
+ pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold;
+ else
+ pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+ pfn_gscan_cfg_t->flags =
+ (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK);
+ pfn_gscan_cfg_t->flags |= GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK;
+ pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw;
+ pfn_gscan_cfg_t->retry_threshold = GSCAN_RETRY_THRESHOLD;
+
+ for (i = 0; i < num_buckets_to_fw; i++) {
+ pfn_gscan_cfg_t->channel_bucket[i].bucket_end_index =
+ ch_bucket[i].bucket_end_index;
+ pfn_gscan_cfg_t->channel_bucket[i].bucket_freq_multiple =
+ ch_bucket[i].bucket_freq_multiple;
+ pfn_gscan_cfg_t->channel_bucket[i].max_freq_multiple =
+ ch_bucket[i].max_freq_multiple;
+ pfn_gscan_cfg_t->channel_bucket[i].repeat =
+ ch_bucket[i].repeat;
+ pfn_gscan_cfg_t->channel_bucket[i].flag =
+ ch_bucket[i].flag;
+ }
+
+ tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1;
+ DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan,
+ tot_num_buckets, num_buckets_to_fw));
+
+ if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ /* Reprogram ePNO cfg from dhd cache if FW has been flushed */
+ if (fw_flushed) {
+ dhd_pno_set_epno(dhd);
+ }
+
+ if (gscan_params->nbssid_hotlist) {
+ struct dhd_pno_bssid *iter, *next;
+ wl_pfn_bssid_t *ptr;
+ p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh,
+ sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist);
+ if (p_pfn_bssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+ " (count: %d)",
+ __FUNCTION__, _params->params_hotlist.nbssid));
+ err = BCME_NOMEM;
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ goto exit;
+ }
+ ptr = p_pfn_bssid;
+ /* convert dhd_pno_bssid to wl_pfn_bssid */
+ DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &gscan_params->hotlist_bssid_list, list) {
+ char buffer_hotlist[64];
+ GCC_DIAGNOSTIC_POP();
+ memcpy(&ptr->macaddr,
+ &iter->macaddr, ETHER_ADDR_LEN);
+ BCM_REFERENCE(buffer_hotlist);
+ DHD_PNO(("%s\n", bcm_ether_ntoa(&ptr->macaddr, buffer_hotlist)));
+ ptr->flags = iter->flags;
+ ptr++;
+ }
+
+ err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+
+ if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) {
+ DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err));
+ }
+
+exit:
+ /* clear mode in case of error */
+ if (err < 0) {
+ int ret = dhd_pno_clean(dhd);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, ret));
+ } else {
+ _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE;
+ }
+ }
+ MFREE(dhd->osh, p_pfn_bssid,
+ sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist);
+ if (pfn_gscan_cfg_t) {
+ MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size);
+ }
+ if (ch_bucket) {
+ MFREE(dhd->osh, ch_bucket,
+ (tot_num_buckets * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+ }
+ return err;
+
+}
+
+static wl_pfn_gscan_ch_bucket_cfg_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd,
+ dhd_pno_status_info_t *_pno_state,
+ uint16 *chan_list,
+ uint32 *num_buckets,
+ uint32 *num_buckets_to_fw)
+{
+ int i, num_channels, err, nchan = WL_NUMCHANNELS, ch_cnt;
+ uint16 *ptr = chan_list, max;
+ wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket;
+ dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ bool is_pno_legacy_running;
+ dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket;
+
+ /* ePNO and Legacy PNO do not co-exist */
+ is_pno_legacy_running = ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+ !_params->params_gscan.epno_cfg.num_epno_ssid);
+
+ if (is_pno_legacy_running)
+ *num_buckets = _params->params_gscan.nchannel_buckets + 1;
+ else
+ *num_buckets = _params->params_gscan.nchannel_buckets;
+
+ *num_buckets_to_fw = 0;
+
+ ch_bucket = (wl_pfn_gscan_ch_bucket_cfg_t *) MALLOC(dhd->osh,
+ ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+
+ if (!ch_bucket) {
+ DHD_ERROR(("%s: failed to malloc memory of size %zd\n",
+ __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+ *num_buckets_to_fw = *num_buckets = 0;
+ return NULL;
+ }
+
+ max = gscan_buckets[0].bucket_freq_multiple;
+ num_channels = 0;
+ /* nchan is the remaining space left in chan_list buffer
+ * So any overflow list of channels is ignored
+ */
+ for (i = 0; i < _params->params_gscan.nchannel_buckets && nchan; i++) {
+ if (!gscan_buckets[i].band) {
+ ch_cnt = MIN(gscan_buckets[i].num_channels, (uint8)nchan);
+ num_channels += ch_cnt;
+ memcpy(ptr, gscan_buckets[i].chan_list,
+ ch_cnt * sizeof(uint16));
+ ptr = ptr + ch_cnt;
+ } else {
+ /* get a valid channel list based on band B or A */
+ err = _dhd_pno_get_channels(dhd, ptr,
+ &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK),
+ !(gscan_buckets[i].band & GSCAN_DFS_MASK));
+
+ if (err < 0) {
+ DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+ __FUNCTION__, gscan_buckets[i].band));
+ MFREE(dhd->osh, ch_bucket,
+ ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+ *num_buckets_to_fw = *num_buckets = 0;
+ return NULL;
+ }
+
+ num_channels += nchan;
+ ptr = ptr + nchan;
+ }
+
+ ch_bucket[i].bucket_end_index = num_channels - 1;
+ ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple;
+ ch_bucket[i].repeat = gscan_buckets[i].repeat;
+ ch_bucket[i].max_freq_multiple = gscan_buckets[i].bucket_max_multiple;
+ ch_bucket[i].flag = gscan_buckets[i].report_flag;
+ /* HAL and FW interpretations are opposite for this bit */
+ ch_bucket[i].flag ^= DHD_PNO_REPORT_NO_BATCH;
+ if (max < gscan_buckets[i].bucket_freq_multiple)
+ max = gscan_buckets[i].bucket_freq_multiple;
+ nchan = WL_NUMCHANNELS - num_channels;
+ *num_buckets_to_fw = *num_buckets_to_fw + 1;
+ DHD_PNO(("end_idx %d freq_mult - %d\n",
+ ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple));
+ }
+
+ _params->params_gscan.max_ch_bucket_freq = max;
+ /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket
+ * Get GCF of Legacy PNO and Gscan scanfreq
+ */
+ if (is_pno_legacy_running) {
+ dhd_pno_params_t *_params1 = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ uint16 *legacy_chan_list = _params1->params_legacy.chan_list;
+ uint16 common_freq;
+ uint32 legacy_bucket_idx = _params->params_gscan.nchannel_buckets;
+ /* If no space is left then only gscan buckets will be sent to FW */
+ if (nchan) {
+ common_freq = gcd(_params->params_gscan.scan_fr,
+ _params1->params_legacy.scan_fr);
+ max = gscan_buckets[0].bucket_freq_multiple;
+ /* GSCAN buckets */
+ for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+ ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr;
+ ch_bucket[i].bucket_freq_multiple /= common_freq;
+ if (max < gscan_buckets[i].bucket_freq_multiple)
+ max = gscan_buckets[i].bucket_freq_multiple;
+ }
+ /* Legacy PNO bucket */
+ ch_bucket[legacy_bucket_idx].bucket_freq_multiple =
+ _params1->params_legacy.scan_fr;
+ ch_bucket[legacy_bucket_idx].bucket_freq_multiple /=
+ common_freq;
+ _params->params_gscan.max_ch_bucket_freq = MAX(max,
+ ch_bucket[legacy_bucket_idx].bucket_freq_multiple);
+ ch_bucket[legacy_bucket_idx].flag = CH_BUCKET_REPORT_REGULAR;
+ /* Now add channels to the legacy scan bucket */
+ for (i = 0; i < _params1->params_legacy.nchan && nchan; i++, nchan--) {
+ ptr[i] = legacy_chan_list[i];
+ num_channels++;
+ }
+ ch_bucket[legacy_bucket_idx].bucket_end_index = num_channels - 1;
+ *num_buckets_to_fw = *num_buckets_to_fw + 1;
+ DHD_PNO(("end_idx %d freq_mult - %d\n",
+ ch_bucket[legacy_bucket_idx].bucket_end_index,
+ ch_bucket[legacy_bucket_idx].bucket_freq_multiple));
+ }
+ }
+ return ch_bucket;
+}
+
+static int
+dhd_pno_stop_for_gscan(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ int mode;
+ dhd_pno_status_info_t *_pno_state;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n",
+ __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+ DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+ goto exit;
+ }
+ if (_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan.mscan) {
+ /* retrieve the batching data from firmware into host */
+ err = dhd_wait_batch_results_complete(dhd);
+ if (err != BCME_OK)
+ goto exit;
+ }
+ mutex_lock(&_pno_state->pno_mutex);
+ mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ mutex_unlock(&_pno_state->pno_mutex);
+ return err;
+ }
+ _pno_state->pno_mode = mode;
+ mutex_unlock(&_pno_state->pno_mutex);
+
+ /* Reprogram Legacy PNO if it was running */
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ struct dhd_pno_legacy_params *params_legacy;
+ uint16 chan_list[WL_NUMCHANNELS];
+
+ params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+ _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+
+ DHD_PNO(("Restarting Legacy PNO SSID scan...\n"));
+ memcpy(chan_list, params_legacy->chan_list,
+ (params_legacy->nchan * sizeof(uint16)));
+ err = dhd_pno_set_legacy_pno(dhd, params_legacy->scan_fr,
+ params_legacy->pno_repeat, params_legacy->pno_freq_expo_max,
+ chan_list, params_legacy->nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ }
+
+exit:
+ return err;
+}
+
+int
+dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+ struct dhd_pno_gscan_params *gscan_params;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush));
+
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = &params->params_gscan;
+
+ if (run) {
+ err = dhd_pno_set_for_gscan(dhd, gscan_params);
+ } else {
+ if (flush) {
+ mutex_lock(&_pno_state->pno_mutex);
+ dhd_pno_reset_cfg_gscan(dhd, params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+ mutex_unlock(&_pno_state->pno_mutex);
+ }
+ /* Need to stop all gscan */
+ err = dhd_pno_stop_for_gscan(dhd);
+ }
+
+ return err;
+}
+
+int
+dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+ struct dhd_pno_gscan_params *gscan_params;
+ uint8 old_flag;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = &params->params_gscan;
+
+ mutex_lock(&_pno_state->pno_mutex);
+
+ old_flag = gscan_params->send_all_results_flag;
+ gscan_params->send_all_results_flag = (uint8) real_time_flag;
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ if (old_flag != gscan_params->send_all_results_flag) {
+ wl_pfn_gscan_cfg_t gscan_cfg;
+
+ gscan_cfg.version = WL_GSCAN_CFG_VERSION;
+ gscan_cfg.flags = (gscan_params->send_all_results_flag &
+ GSCAN_SEND_ALL_RESULTS_MASK);
+ gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK;
+
+ if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg,
+ sizeof(wl_pfn_gscan_cfg_t))) < 0) {
+ DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit_mutex_unlock;
+ }
+ } else {
+ DHD_PNO(("No change in flag - %d\n", old_flag));
+ }
+ } else {
+ DHD_PNO(("Gscan not started\n"));
+ }
+exit_mutex_unlock:
+ mutex_unlock(&_pno_state->pno_mutex);
+exit:
+ return err;
+}
+
+/* Cleanup any consumed results
+ * Return TRUE if all results consumed else FALSE
+ */
+int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ dhd_pno_params_t *params;
+ struct dhd_pno_gscan_params *gscan_params;
+ dhd_pno_status_info_t *_pno_state;
+ gscan_results_cache_t *iter, *tmp;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = &params->params_gscan;
+ iter = gscan_params->gscan_batch_cache;
+
+ while (iter) {
+ if (iter->tot_consumed == iter->tot_count) {
+ tmp = iter->next;
+ MFREE(dhd->osh, iter,
+ ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t))
+ + sizeof(gscan_results_cache_t));
+ iter = tmp;
+ } else
+ break;
+ }
+ gscan_params->gscan_batch_cache = iter;
+ ret = (iter == NULL);
+ return ret;
+}
+
+static int
+_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ uint32 timestamp = 0, ts = 0, i, j, timediff;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+ wl_pfn_lnet_info_v1_t *plnetinfo;
+ wl_pfn_lnet_info_v2_t *plnetinfo_v2;
+ struct dhd_pno_gscan_params *gscan_params;
+ wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL;
+ wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL;
+ gscan_results_cache_t *iter, *tail;
+ wifi_gscan_result_t *result;
+ uint8 *nAPs_per_scan = NULL;
+ uint8 num_scans_in_cur_iter;
+ uint16 count;
+ uint16 fwcount;
+ uint16 fwstatus = PFN_INCOMPLETE;
+ struct osl_timespec tm_spec;
+
+ /* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2
+ * net_info and subnet_info structures are compatible in size and SSID offset,
+ * allowing v1 to be safely used in the code below except for lscanresults
+ * fields themselves (status, count, offset to netinfo).
+ */
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+ DHD_ERROR(("%s: GSCAN is not enabled\n", __FUNCTION__));
+ goto exit;
+ }
+ gscan_params = &params->params_gscan;
+ nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan);
+
+ if (!nAPs_per_scan) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
+ gscan_params->mscan));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+ if (!plbestnet_v1) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
+ (int)PNO_BESTNET_LEN));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ plbestnet_v2 = (wl_pfn_lscanresults_v2_t *)plbestnet_v1;
+
+ mutex_lock(&_pno_state->pno_mutex);
+
+ dhd_gscan_clear_all_batch_results(dhd);
+
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+ DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+ goto exit_mutex_unlock;
+ }
+
+ timediff = gscan_params->scan_fr * 1000;
+ timediff = timediff >> 1;
+
+ /* Ok, now lets start getting results from the FW */
+ tail = gscan_params->gscan_batch_cache;
+ do {
+ err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN,
+ FALSE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n",
+ __FUNCTION__, err));
+ goto exit_mutex_unlock;
+ }
+ osl_get_monotonic_boottime(&tm_spec);
+
+ if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) {
+ fwstatus = plbestnet_v1->status;
+ fwcount = plbestnet_v1->count;
+ plnetinfo = &plbestnet_v1->netinfo[0];
+
+ DHD_PNO(("ver %d, status : %d, count %d\n",
+ plbestnet_v1->version, fwstatus, fwcount));
+
+ if (fwcount == 0) {
+ DHD_PNO(("No more batch results\n"));
+ goto exit_mutex_unlock;
+ }
+ if (fwcount > BESTN_MAX) {
+ DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
+ __FUNCTION__, fwcount, (int)BESTN_MAX));
+ /* Process only BESTN_MAX number of results per batch */
+ fwcount = BESTN_MAX;
+ }
+ num_scans_in_cur_iter = 0;
+
+ timestamp = plnetinfo->timestamp;
+ /* find out how many scans' results did we get in
+ * this batch of FW results
+ */
+ for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo++) {
+ /* Unlikely to happen, but just in case the results from
+ * FW doesnt make sense..... Assume its part of one single scan
+ */
+ if (num_scans_in_cur_iter >= gscan_params->mscan) {
+ num_scans_in_cur_iter = 0;
+ count = fwcount;
+ break;
+ }
+ if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) {
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ count = 0;
+ num_scans_in_cur_iter++;
+ }
+ timestamp = plnetinfo->timestamp;
+ }
+ if (num_scans_in_cur_iter < gscan_params->mscan) {
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ num_scans_in_cur_iter++;
+ }
+
+ DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
+ /* reset plnetinfo to the first item for the next loop */
+ plnetinfo -= i;
+
+ for (i = 0; i < num_scans_in_cur_iter; i++) {
+ iter = (gscan_results_cache_t *)
+ MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) *
+ sizeof(wifi_gscan_result_t)) +
+ sizeof(gscan_results_cache_t));
+ if (!iter) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
+ __FUNCTION__, gscan_params->mscan));
+ err = BCME_NOMEM;
+ goto exit_mutex_unlock;
+ }
+ /* Need this check because the new set of results from FW
+ * maybe a continuation of previous sets' scan results
+ */
+ if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) {
+ iter->scan_id = ++gscan_params->scan_id;
+ } else {
+ iter->scan_id = gscan_params->scan_id;
+ }
+ DHD_PNO(("scan_id %d tot_count %d \n",
+ gscan_params->scan_id, nAPs_per_scan[i]));
+ iter->tot_count = nAPs_per_scan[i];
+ iter->tot_consumed = 0;
+ iter->flag = 0;
+ if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+ DHD_PNO(("This scan is aborted\n"));
+ iter->flag = (ENABLE << PNO_STATUS_ABORT);
+ } else if (gscan_params->reason) {
+ iter->flag = (ENABLE << gscan_params->reason);
+ }
+
+ if (!tail) {
+ gscan_params->gscan_batch_cache = iter;
+ } else {
+ tail->next = iter;
+ }
+ tail = iter;
+ iter->next = NULL;
+ for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) {
+ result = &iter->results[j];
+
+ result->channel = wl_channel_to_frequency(
+ wf_chspec_ctlchan(plnetinfo->pfnsubnet.channel),
+ CHSPEC_BAND(plnetinfo->pfnsubnet.channel));
+ result->rssi = (int32) plnetinfo->RSSI;
+ result->beacon_period = 0;
+ result->capability = 0;
+ result->rtt = (uint64) plnetinfo->rtt0;
+ result->rtt_sd = (uint64) plnetinfo->rtt1;
+ result->ts = convert_fw_rel_time_to_systime(&tm_spec,
+ plnetinfo->timestamp);
+ ts = plnetinfo->timestamp;
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length %d\n",
+ __FUNCTION__,
+ plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ (void)memcpy_s(result->ssid, DOT11_MAX_SSID_LEN,
+ plnetinfo->pfnsubnet.SSID,
+ plnetinfo->pfnsubnet.SSID_len);
+ result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+ (void)memcpy_s(&result->macaddr, ETHER_ADDR_LEN,
+ &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+
+ DHD_PNO(("\tSSID : "));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: "MACDBG"\n",
+ MAC2STRDBG(result->macaddr.octet)));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo->pfnsubnet.channel,
+ plnetinfo->RSSI, plnetinfo->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
+ plnetinfo->rtt0, plnetinfo->rtt1));
+
+ }
+ }
+
+ } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) {
+ fwstatus = plbestnet_v2->status;
+ fwcount = plbestnet_v2->count;
+ plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0];
+
+ DHD_PNO(("ver %d, status : %d, count %d\n",
+ plbestnet_v2->version, fwstatus, fwcount));
+
+ if (fwcount == 0) {
+ DHD_PNO(("No more batch results\n"));
+ goto exit_mutex_unlock;
+ }
+ if (fwcount > BESTN_MAX) {
+ DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
+ __FUNCTION__, fwcount, (int)BESTN_MAX));
+ /* Process only BESTN_MAX number of results per batch */
+ fwcount = BESTN_MAX;
+ }
+ num_scans_in_cur_iter = 0;
+
+ timestamp = plnetinfo_v2->timestamp;
+ /* find out how many scans' results did we get
+ * in this batch of FW results
+ */
+ for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo_v2++) {
+ /* Unlikely to happen, but just in case the results from
+ * FW doesnt make sense..... Assume its part of one single scan
+ */
+ if (num_scans_in_cur_iter >= gscan_params->mscan) {
+ num_scans_in_cur_iter = 0;
+ count = fwcount;
+ break;
+ }
+ if (TIME_DIFF_MS(timestamp, plnetinfo_v2->timestamp) > timediff) {
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ count = 0;
+ num_scans_in_cur_iter++;
+ }
+ timestamp = plnetinfo_v2->timestamp;
+ }
+ if (num_scans_in_cur_iter < gscan_params->mscan) {
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ num_scans_in_cur_iter++;
+ }
+
+ DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
+ /* reset plnetinfo to the first item for the next loop */
+ plnetinfo_v2 -= i;
+
+ for (i = 0; i < num_scans_in_cur_iter; i++) {
+ iter = (gscan_results_cache_t *)
+ MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) *
+ sizeof(wifi_gscan_result_t)) +
+ sizeof(gscan_results_cache_t));
+ if (!iter) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
+ __FUNCTION__, gscan_params->mscan));
+ err = BCME_NOMEM;
+ goto exit_mutex_unlock;
+ }
+ /* Need this check because the new set of results from FW
+ * maybe a continuation of previous sets' scan results
+ */
+ if (TIME_DIFF_MS(ts, plnetinfo_v2->timestamp) > timediff) {
+ iter->scan_id = ++gscan_params->scan_id;
+ } else {
+ iter->scan_id = gscan_params->scan_id;
+ }
+ DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n",
+ gscan_params->scan_id, nAPs_per_scan[i],
+ plbestnet_v2->scan_ch_buckets[i]));
+ iter->tot_count = nAPs_per_scan[i];
+ iter->scan_ch_bucket = plbestnet_v2->scan_ch_buckets[i];
+ iter->tot_consumed = 0;
+ iter->flag = 0;
+ if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) {
+ DHD_PNO(("This scan is aborted\n"));
+ iter->flag = (ENABLE << PNO_STATUS_ABORT);
+ } else if (gscan_params->reason) {
+ iter->flag = (ENABLE << gscan_params->reason);
+ }
+
+ if (!tail) {
+ gscan_params->gscan_batch_cache = iter;
+ } else {
+ tail->next = iter;
+ }
+ tail = iter;
+ iter->next = NULL;
+ for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo_v2++) {
+ result = &iter->results[j];
+
+ result->channel =
+ wl_channel_to_frequency(
+ wf_chspec_ctlchan(plnetinfo_v2->pfnsubnet.channel),
+ CHSPEC_BAND(plnetinfo_v2->pfnsubnet.channel));
+ result->rssi = (int32) plnetinfo_v2->RSSI;
+ /* Info not available & not expected */
+ result->beacon_period = 0;
+ result->capability = 0;
+ result->rtt = (uint64) plnetinfo_v2->rtt0;
+ result->rtt_sd = (uint64) plnetinfo_v2->rtt1;
+ result->ts = convert_fw_rel_time_to_systime(&tm_spec,
+ plnetinfo_v2->timestamp);
+ ts = plnetinfo_v2->timestamp;
+ if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length %d\n",
+ __FUNCTION__,
+ plnetinfo_v2->pfnsubnet.SSID_len));
+ plnetinfo_v2->pfnsubnet.SSID_len =
+ DOT11_MAX_SSID_LEN;
+ }
+ (void)memcpy_s(result->ssid, DOT11_MAX_SSID_LEN,
+ plnetinfo_v2->pfnsubnet.u.SSID,
+ plnetinfo_v2->pfnsubnet.SSID_len);
+ result->ssid[plnetinfo_v2->pfnsubnet.SSID_len] = '\0';
+ (void)memcpy_s(&result->macaddr, ETHER_ADDR_LEN,
+ &plnetinfo_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+
+ DHD_PNO(("\tSSID : "));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: "MACDBG"\n",
+ MAC2STRDBG(result->macaddr.octet)));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo_v2->pfnsubnet.channel,
+ plnetinfo_v2->RSSI, plnetinfo_v2->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
+ plnetinfo_v2->rtt0, plnetinfo_v2->rtt1));
+
+ }
+ }
+
+ } else {
+ err = BCME_VERSION;
+ DHD_ERROR(("bestnet fw version %d not supported\n",
+ plbestnet_v1->version));
+ goto exit_mutex_unlock;
+ }
+ } while (fwstatus == PFN_INCOMPLETE);
+
+exit_mutex_unlock:
+ mutex_unlock(&_pno_state->pno_mutex);
+exit:
+ params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE;
+ smp_wmb();
+ wake_up_interruptible(&_pno_state->batch_get_wait);
+ if (nAPs_per_scan) {
+ MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8));
+ }
+ if (plbestnet_v1) {
+ MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN);
+ }
+ DHD_PNO(("Batch retrieval done!\n"));
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+static void *
+dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+ gscan_results_cache_t *iter, *results;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ uint16 num_scan_ids = 0, num_results = 0;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ iter = results = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ num_scan_ids++;
+ iter = iter->next;
+ }
+
+ *len = ((num_results << 16) | (num_scan_ids));
+ return results;
+}
+
+void *
+dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+ void *info, uint32 *len)
+{
+ void *ret = NULL;
+ dhd_pno_gscan_capabilities_t *ptr;
+ dhd_pno_ssid_t *ssid_elem;
+ dhd_pno_params_t *_params;
+ dhd_epno_ssid_cfg_t *epno_cfg;
+ dhd_pno_status_info_t *_pno_state;
+
+ if (!dhd || !dhd->pno_state) {
+ DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__));
+ return NULL;
+ }
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ if (!len) {
+ DHD_ERROR(("%s: len is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ switch (type) {
+ case DHD_PNO_GET_CAPABILITIES:
+ ptr = (dhd_pno_gscan_capabilities_t *)
+ MALLOCZ(dhd->osh, sizeof(dhd_pno_gscan_capabilities_t));
+ if (!ptr)
+ break;
+ /* Hardcoding these values for now, need to get
+ * these values from FW, will change in a later check-in
+ */
+ ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE;
+ ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
+ ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN;
+ ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX;
+ ptr->max_scan_reporting_threshold = 100;
+ ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS;
+ ptr->max_hotlist_ssids = 0;
+ ptr->max_significant_wifi_change_aps = 0;
+ ptr->max_bssid_history_entries = 0;
+ ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM;
+ ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID;
+ ptr->max_white_list_ssid = MAX_WHITELIST_SSID;
+ ret = (void *)ptr;
+ *len = sizeof(dhd_pno_gscan_capabilities_t);
+ break;
+
+ case DHD_PNO_GET_BATCH_RESULTS:
+ ret = dhd_get_gscan_batch_results(dhd, len);
+ break;
+ case DHD_PNO_GET_CHANNEL_LIST:
+ if (info) {
+ uint16 ch_list[WL_NUMCHANNELS];
+ uint32 *p, mem_needed, i;
+ int32 err, nchan = WL_NUMCHANNELS;
+ uint32 *gscan_band = (uint32 *) info;
+ uint8 band = 0;
+
+ /* No band specified?, nothing to do */
+ if ((*gscan_band & GSCAN_BAND_MASK) == 0) {
+ DHD_PNO(("No band specified\n"));
+ *len = 0;
+ break;
+ }
+
+ /* HAL and DHD use different bits for 2.4G and
+ * 5G in bitmap. Hence translating it here...
+ */
+ if (*gscan_band & GSCAN_BG_BAND_MASK) {
+ band |= WLC_BAND_2G;
+ }
+ if (*gscan_band & GSCAN_A_BAND_MASK) {
+ band |=
+#ifdef WL_6G_BAND
+ WLC_BAND_6G |
+#endif /* WL_6G_BAND */
+ WLC_BAND_5G;
+ }
+
+ err = _dhd_pno_get_channels(dhd, ch_list, &nchan,
+ (band & GSCAN_ABG_BAND_MASK),
+ !(*gscan_band & GSCAN_DFS_MASK));
+
+ if (err < 0) {
+ DHD_ERROR(("%s: failed to get valid channel list\n",
+ __FUNCTION__));
+ *len = 0;
+ } else {
+ mem_needed = sizeof(uint32) * nchan;
+ p = (uint32 *)MALLOC(dhd->osh, mem_needed);
+ if (!p) {
+ DHD_ERROR(("%s: Unable to malloc %d bytes\n",
+ __FUNCTION__, mem_needed));
+ break;
+ }
+ for (i = 0; i < nchan; i++) {
+ p[i] = wl_channel_to_frequency(
+ (ch_list[i]),
+ CHSPEC_BAND(ch_list[i]));
+ }
+ ret = p;
+ *len = mem_needed;
+ }
+ } else {
+ *len = 0;
+ DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__));
+ }
+ break;
+ case DHD_PNO_GET_NEW_EPNO_SSID_ELEM:
+ epno_cfg = &_params->params_gscan.epno_cfg;
+ if (epno_cfg->num_epno_ssid >=
+ MAX_EPNO_SSID_NUM) {
+ DHD_ERROR(("Excessive number of ePNO SSIDs programmed %d\n",
+ epno_cfg->num_epno_ssid));
+ return NULL;
+ }
+ if (!epno_cfg->num_epno_ssid) {
+ INIT_LIST_HEAD(&epno_cfg->epno_ssid_list);
+ }
+ ssid_elem = MALLOCZ(dhd->osh, sizeof(dhd_pno_ssid_t));
+ if (!ssid_elem) {
+ DHD_ERROR(("EPNO ssid: cannot alloc %zd bytes",
+ sizeof(dhd_pno_ssid_t)));
+ return NULL;
+ }
+ epno_cfg->num_epno_ssid++;
+ list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list);
+ ret = ssid_elem;
+ break;
+ default:
+ DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+ break;
+ }
+
+ return ret;
+
+}
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+static int
+_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+ int err = BCME_OK;
+ int i, j;
+ uint32 timestamp = 0;
+ dhd_pno_params_t *_params = NULL;
+ dhd_pno_status_info_t *_pno_state = NULL;
+ wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL;
+ wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL;
+ wl_pfn_lnet_info_v1_t *plnetinfo;
+ wl_pfn_lnet_info_v2_t *plnetinfo_v2;
+ dhd_pno_bestnet_entry_t *pbestnet_entry;
+ dhd_pno_best_header_t *pbestnetheader = NULL;
+ dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
+ bool allocate_header = FALSE;
+ uint16 fwstatus = PFN_INCOMPLETE;
+ uint16 fwcount;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+ /* The static asserts below guarantee the v1 and v2 net_info and subnet_info
+ * structures are compatible in size and SSID offset, allowing v1 to be safely
+ * used in the code below except for lscanresults fields themselves
+ * (status, count, offset to netinfo).
+ */
+ STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
+ ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
+ OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit_no_unlock;
+ }
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit_no_unlock;
+ }
+
+ if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+ DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+ goto exit_no_unlock;
+ }
+ mutex_lock(&_pno_state->pno_mutex);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+ if (buf && bufsize) {
+ if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
+ /* need to check whether we have cashed data or not */
+ DHD_PNO(("%s: have cashed batching data in Driver\n",
+ __FUNCTION__));
+ /* convert to results format */
+ goto convert_format;
+ } else {
+ /* this is a first try to get batching results */
+ if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+ /* move the scan_results_list to expired_scan_results_lists */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(siter, snext,
+ &_params->params_batch.get_batch.scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_move_tail(&siter->list,
+ &_params->params_batch.get_batch.expired_scan_results_list);
+ }
+ _params->params_batch.get_batch.top_node_cnt = 0;
+ _params->params_batch.get_batch.expired_tot_scan_cnt =
+ _params->params_batch.get_batch.tot_scan_cnt;
+ _params->params_batch.get_batch.tot_scan_cnt = 0;
+ goto convert_format;
+ }
+ }
+ }
+ /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */
+ pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE);
+ if (pscan_results == NULL) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n"));
+ goto exit;
+ }
+ pscan_results->bestnetheader = NULL;
+ pscan_results->cnt_header = 0;
+ /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */
+ if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) {
+ list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+ _params->params_batch.get_batch.top_node_cnt++;
+ } else {
+ int _removed_scan_cnt;
+ /* remove oldest one and add new one */
+ DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__));
+ _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd,
+ &_params->params_batch.get_batch.scan_results_list, TRUE);
+ _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt;
+ list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+
+ }
+
+ plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+ if (!plbestnet_v1) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("%s: failed to allocate buffer for bestnet", __FUNCTION__));
+ goto exit;
+ }
+
+ plbestnet_v2 = (wl_pfn_lscanresults_v2_t*)plbestnet_v1;
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ do {
+ err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN,
+ FALSE);
+ if (err < 0) {
+ if (err == BCME_EPERM) {
+ DHD_ERROR(("we cannot get the batching data "
+ "during scanning in firmware, try again\n,"));
+ msleep(500);
+ continue;
+ } else {
+ DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+
+ if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) {
+ fwstatus = plbestnet_v1->status;
+ fwcount = plbestnet_v1->count;
+ plnetinfo = &plbestnet_v1->netinfo[0];
+ if (fwcount == 0) {
+ DHD_PNO(("No more batch results\n"));
+ goto exit;
+ }
+ if (fwcount > BESTN_MAX) {
+ DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
+ __FUNCTION__, fwcount, (int)BESTN_MAX));
+ /* Process only BESTN_MAX number of results per batch */
+ fwcount = BESTN_MAX;
+ }
+ for (i = 0; i < fwcount; i++) {
+ pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+ MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+ if (pbestnet_entry == NULL) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+ goto exit;
+ }
+ memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+ /* record the current time */
+ pbestnet_entry->recorded_time = jiffies;
+ /* create header for the first entry */
+ allocate_header = (i == 0)? TRUE : FALSE;
+ /* check whether the new generation is started or not */
+ if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
+ > TIME_MIN_DIFF))
+ allocate_header = TRUE;
+ timestamp = plnetinfo->timestamp;
+ if (allocate_header) {
+ pbestnetheader = (dhd_pno_best_header_t *)
+ MALLOC(dhd->osh, BEST_HEADER_SIZE);
+ if (pbestnetheader == NULL) {
+ err = BCME_NOMEM;
+ if (pbestnet_entry)
+ MFREE(dhd->osh, pbestnet_entry,
+ BESTNET_ENTRY_SIZE);
+ DHD_ERROR(("failed to allocate"
+ " dhd_pno_bestnet_entry\n"));
+ goto exit;
+ }
+ /* increase total cnt of bestnet header */
+ pscan_results->cnt_header++;
+ /* need to record the reason to call dhd_pno_get_for_bach */
+ if (reason)
+ pbestnetheader->reason = (ENABLE << reason);
+ memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+ /* initialize the head of linked list */
+ INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+ /* link the pbestnet heaer into existed list */
+ if (pscan_results->bestnetheader == NULL)
+ /* In case of header */
+ pscan_results->bestnetheader = pbestnetheader;
+ else {
+ dhd_pno_best_header_t *head =
+ pscan_results->bestnetheader;
+ pscan_results->bestnetheader = pbestnetheader;
+ pbestnetheader->next = head;
+ }
+ }
+ pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
+ pbestnet_entry->RSSI = plnetinfo->RSSI;
+ if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+ /* if RSSI is positive value, we assume that
+ * this scan is aborted by other scan
+ */
+ DHD_PNO(("This scan is aborted\n"));
+ pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+ }
+ pbestnet_entry->rtt0 = plnetinfo->rtt0;
+ pbestnet_entry->rtt1 = plnetinfo->rtt1;
+ pbestnet_entry->timestamp = plnetinfo->timestamp;
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length"
+ " %d: trimming it to max\n",
+ __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
+ memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID,
+ pbestnet_entry->SSID_len);
+ memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID,
+ ETHER_ADDR_LEN);
+ /* add the element into list */
+ list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+ /* increase best entry count */
+ pbestnetheader->tot_cnt++;
+ pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+ DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+ DHD_PNO(("\tSSID : "));
+ for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
+ DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j]));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: "MACDBG"\n",
+ MAC2STRDBG(plnetinfo->pfnsubnet.BSSID.octet)));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo->pfnsubnet.channel,
+ plnetinfo->RSSI, plnetinfo->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0,
+ plnetinfo->rtt1));
+ plnetinfo++;
+ }
+ } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) {
+ fwstatus = plbestnet_v2->status;
+ fwcount = plbestnet_v2->count;
+ plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0];
+ if (fwcount == 0) {
+ DHD_PNO(("No more batch results\n"));
+ goto exit;
+ }
+ if (fwcount > BESTN_MAX) {
+ DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
+ __FUNCTION__, fwcount, (int)BESTN_MAX));
+ /* Process only BESTN_MAX number of results per batch */
+ fwcount = BESTN_MAX;
+ }
+ DHD_PNO(("ver %d, status : %d, count %d\n",
+ plbestnet_v2->version, fwstatus, fwcount));
+
+ for (i = 0; i < fwcount; i++) {
+ pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+ MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+ if (pbestnet_entry == NULL) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+ goto exit;
+ }
+ memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+ /* record the current time */
+ pbestnet_entry->recorded_time = jiffies;
+ /* create header for the first entry */
+ allocate_header = (i == 0)? TRUE : FALSE;
+ /* check whether the new generation is started or not */
+ if (timestamp && (TIME_DIFF(timestamp, plnetinfo_v2->timestamp)
+ > TIME_MIN_DIFF))
+ allocate_header = TRUE;
+ timestamp = plnetinfo_v2->timestamp;
+ if (allocate_header) {
+ pbestnetheader = (dhd_pno_best_header_t *)
+ MALLOC(dhd->osh, BEST_HEADER_SIZE);
+ if (pbestnetheader == NULL) {
+ err = BCME_NOMEM;
+ if (pbestnet_entry)
+ MFREE(dhd->osh, pbestnet_entry,
+ BESTNET_ENTRY_SIZE);
+ DHD_ERROR(("failed to allocate"
+ " dhd_pno_bestnet_entry\n"));
+ goto exit;
+ }
+ /* increase total cnt of bestnet header */
+ pscan_results->cnt_header++;
+ /* need to record the reason to call dhd_pno_get_for_bach */
+ if (reason)
+ pbestnetheader->reason = (ENABLE << reason);
+ memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+ /* initialize the head of linked list */
+ INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+ /* link the pbestnet heaer into existed list */
+ if (pscan_results->bestnetheader == NULL)
+ /* In case of header */
+ pscan_results->bestnetheader = pbestnetheader;
+ else {
+ dhd_pno_best_header_t *head =
+ pscan_results->bestnetheader;
+ pscan_results->bestnetheader = pbestnetheader;
+ pbestnetheader->next = head;
+ }
+ }
+ /* fills the best network info */
+ pbestnet_entry->channel = plnetinfo_v2->pfnsubnet.channel;
+ pbestnet_entry->RSSI = plnetinfo_v2->RSSI;
+ if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) {
+ /* if RSSI is positive value, we assume that
+ * this scan is aborted by other scan
+ */
+ DHD_PNO(("This scan is aborted\n"));
+ pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+ }
+ pbestnet_entry->rtt0 = plnetinfo_v2->rtt0;
+ pbestnet_entry->rtt1 = plnetinfo_v2->rtt1;
+ pbestnet_entry->timestamp = plnetinfo_v2->timestamp;
+ if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length"
+ " %d: trimming it to max\n",
+ __FUNCTION__, plnetinfo_v2->pfnsubnet.SSID_len));
+ plnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ pbestnet_entry->SSID_len = plnetinfo_v2->pfnsubnet.SSID_len;
+ memcpy(pbestnet_entry->SSID, plnetinfo_v2->pfnsubnet.u.SSID,
+ pbestnet_entry->SSID_len);
+ memcpy(&pbestnet_entry->BSSID, &plnetinfo_v2->pfnsubnet.BSSID,
+ ETHER_ADDR_LEN);
+ /* add the element into list */
+ list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+ /* increase best entry count */
+ pbestnetheader->tot_cnt++;
+ pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+ DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+ DHD_PNO(("\tSSID : "));
+ for (j = 0; j < plnetinfo_v2->pfnsubnet.SSID_len; j++)
+ DHD_PNO(("%c", plnetinfo_v2->pfnsubnet.u.SSID[j]));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: "MACDBG"\n",
+ MAC2STRDBG(plnetinfo_v2->pfnsubnet.BSSID.octet)));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo_v2->pfnsubnet.channel,
+ plnetinfo_v2->RSSI, plnetinfo_v2->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo_v2->rtt0,
+ plnetinfo_v2->rtt1));
+ plnetinfo_v2++;
+ }
+ } else {
+ err = BCME_VERSION;
+ DHD_ERROR(("bestnet fw version %d not supported\n",
+ plbestnet_v1->version));
+ goto exit;
+ }
+ } while (fwstatus != PFN_COMPLETE);
+
+ if (pscan_results->cnt_header == 0) {
+ /* In case that we didn't get any data from the firmware
+ * Remove the current scan_result list from get_bach.scan_results_list.
+ */
+ DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n"));
+ list_del(&pscan_results->list);
+ MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE);
+ _params->params_batch.get_batch.top_node_cnt--;
+ } else {
+ /* increase total scan count using current scan count */
+ _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header;
+ }
+
+ if (buf && bufsize) {
+ /* This is a first try to get batching results */
+ if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+ /* move the scan_results_list to expired_scan_results_lists */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(siter, snext,
+ &_params->params_batch.get_batch.scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_move_tail(&siter->list,
+ &_params->params_batch.get_batch.expired_scan_results_list);
+ }
+ /* reset gloval values after moving to expired list */
+ _params->params_batch.get_batch.top_node_cnt = 0;
+ _params->params_batch.get_batch.expired_tot_scan_cnt =
+ _params->params_batch.get_batch.tot_scan_cnt;
+ _params->params_batch.get_batch.tot_scan_cnt = 0;
+ }
+convert_format:
+ err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
+ if (err < 0) {
+ DHD_ERROR(("failed to convert the data into upper layer format\n"));
+ goto exit;
+ }
+ }
+exit:
+ if (plbestnet_v1)
+ MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN);
+ if (_params) {
+ _params->params_batch.get_batch.buf = NULL;
+ _params->params_batch.get_batch.bufsize = 0;
+ _params->params_batch.get_batch.bytes_written = err;
+ }
+ mutex_unlock(&_pno_state->pno_mutex);
+exit_no_unlock:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
+ if (waitqueue_active(&_pno_state->get_batch_done)) {
+ _pno_state->batch_recvd = TRUE;
+ wake_up(&_pno_state->get_batch_done);
+ }
+#else
+ if (waitqueue_active(&_pno_state->get_batch_done.wait))
+ complete(&_pno_state->get_batch_done);
+#endif
+ return err;
+}
+
+static void
+_dhd_pno_get_batch_handler(struct work_struct *work)
+{
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pub_t *dhd;
+ struct dhd_pno_batch_params *params_batch;
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ _pno_state = container_of(work, struct dhd_pno_status_info, work);
+ GCC_DIAGNOSTIC_POP();
+
+ dhd = _pno_state->dhd;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+#ifdef GSCAN_SUPPORT
+ _dhd_pno_get_gscan_batch_from_fw(dhd);
+#endif /* GSCAN_SUPPORT */
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+
+ _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
+ params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+ }
+}
+
+int
+dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+ int err = BCME_OK;
+ char *pbuf = buf;
+ dhd_pno_status_info_t *_pno_state;
+ struct dhd_pno_batch_params *params_batch;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ struct dhd_pno_gscan_params *gscan_params;
+ gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+ gscan_params->reason = reason;
+ err = dhd_retreive_batch_scan_results(dhd);
+ if (err == BCME_OK) {
+ wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+ is_batch_retrieval_complete(gscan_params),
+ msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+ }
+ } else
+#endif
+ {
+ if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+ DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+ memset(pbuf, 0, bufsize);
+ pbuf += snprintf(pbuf, bufsize, "scancount=%d\n", 0);
+ snprintf(pbuf, bufsize, "%s", RESULTS_END_MARKER);
+ err = strlen(buf);
+ goto exit;
+ }
+ params_batch->get_batch.buf = buf;
+ params_batch->get_batch.bufsize = bufsize;
+ params_batch->get_batch.reason = reason;
+ params_batch->get_batch.bytes_written = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
+ _pno_state->batch_recvd = FALSE;
+#endif
+ schedule_work(&_pno_state->work);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
+ wait_event(_pno_state->get_batch_done, _pno_state->batch_recvd);
+#else
+ wait_for_completion(&_pno_state->get_batch_done);
+#endif
+ }
+
+#ifdef GSCAN_SUPPORT
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
+#endif
+ err = params_batch->get_batch.bytes_written;
+exit:
+ return err;
+}
+
+int
+dhd_pno_stop_for_batch(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ int mode = 0;
+ int i = 0;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n",
+ __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ DHD_PNO(("Gscan is ongoing, nothing to stop here\n"));
+ return err;
+ }
+#endif
+
+ if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+ DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
+ goto exit;
+ }
+ _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+ if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) {
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ _pno_state->pno_mode = mode;
+ /* restart Legacy PNO if the Legacy PNO is on */
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ struct dhd_pno_legacy_params *_params_legacy;
+ _params_legacy =
+ &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+ err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr,
+ _params_legacy->pno_repeat,
+ _params_legacy->pno_freq_expo_max,
+ _params_legacy->chan_list, _params_legacy->nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+ struct dhd_pno_bssid *iter, *next;
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+ p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh,
+ sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid);
+ if (p_pfn_bssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+ " (count: %d)",
+ __FUNCTION__, _params->params_hotlist.nbssid));
+ err = BCME_ERROR;
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ goto exit;
+ }
+ i = 0;
+ /* convert dhd_pno_bssid to wl_pfn_bssid */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(iter, next,
+ &_params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
+ p_pfn_bssid[i].flags = iter->flags;
+ i++;
+ }
+ err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+ if (err < 0) {
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+ } else {
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+exit:
+ _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+ MFREE(dhd->osh, p_pfn_bssid,
+ sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid);
+ return err;
+}
+
+int
+dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+ struct dhd_pno_hotlist_params *hotlist_params)
+{
+ int err = BCME_OK;
+ int i;
+ uint16 _chan_list[WL_NUMCHANNELS];
+ int rem_nchan = 0;
+ int tot_nchan = 0;
+ int mode = 0;
+ dhd_pno_params_t *_params;
+ dhd_pno_params_t *_params2;
+ struct dhd_pno_bssid *_pno_bssid;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ NULL_CHECK(hotlist_params, "hotlist_params is NULL", err);
+ NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS];
+ if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+ _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE;
+ err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ }
+ _params->params_batch.nchan = hotlist_params->nchan;
+ _params->params_batch.scan_fr = hotlist_params->scan_fr;
+ if (hotlist_params->nchan)
+ memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list,
+ sizeof(_params->params_hotlist.chan_list));
+ memset(_chan_list, 0, sizeof(_chan_list));
+
+ rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan;
+ if (hotlist_params->band == WLC_BAND_2G ||
+#ifdef WL_6G_BAND
+ hotlist_params->band == WLC_BAND_6G ||
+#endif /* WL_6G_BAND */
+ hotlist_params->band == WLC_BAND_5G) {
+ /* get a valid channel list based on band B or A */
+ err = _dhd_pno_get_channels(dhd,
+ &_params->params_hotlist.chan_list[hotlist_params->nchan],
+ &rem_nchan, hotlist_params->band, FALSE);
+ if (err < 0) {
+ DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+ __FUNCTION__, hotlist_params->band));
+ goto exit;
+ }
+ /* now we need to update nchan because rem_chan has valid channel count */
+ _params->params_hotlist.nchan += rem_nchan;
+ /* need to sort channel list */
+ sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan,
+ sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+ }
+#ifdef PNO_DEBUG
+{
+ int i;
+ DHD_PNO(("Channel list : "));
+ for (i = 0; i < _params->params_batch.nchan; i++) {
+ DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+ }
+ DHD_PNO(("\n"));
+}
+#endif
+ if (_params->params_hotlist.nchan) {
+ /* copy the channel list into local array */
+ memcpy(_chan_list, _params->params_hotlist.chan_list,
+ sizeof(_chan_list));
+ tot_nchan = _params->params_hotlist.nchan;
+ }
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ DHD_PNO(("PNO SSID is on progress in firmware\n"));
+ /* store current pno_mode before disabling pno */
+ mode = _pno_state->pno_mode;
+ err = _dhd_pno_enable(dhd, PNO_OFF);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+ goto exit;
+ }
+ /* restore the previous mode */
+ _pno_state->pno_mode = mode;
+ /* Use the superset for channelist between two mode */
+ _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+ if (_params2->params_legacy.nchan > 0 &&
+ _params->params_hotlist.nchan > 0) {
+ err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+ &_params2->params_legacy.chan_list[0],
+ _params2->params_legacy.nchan,
+ &_params->params_hotlist.chan_list[0],
+ _params->params_hotlist.nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to merge channel list"
+ "between legacy and hotlist\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ }
+
+ }
+
+ INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list));
+
+ err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ if (tot_nchan > 0) {
+ if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+ for (i = 0; i < hotlist_params->nbssid; i++) {
+ _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh,
+ sizeof(struct dhd_pno_bssid));
+ NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err);
+ memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN);
+ _pno_bssid->flags = p_pfn_bssid[i].flags;
+ list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list);
+ }
+ _params->params_hotlist.nbssid = hotlist_params->nbssid;
+ if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+ if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+ DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+ }
+exit:
+ /* clear mode in case of error */
+ if (err < 0)
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ return err;
+}
+
+int
+dhd_pno_stop_for_hotlist(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ uint32 mode = 0;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n",
+ __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+ DHD_ERROR(("%s : Hotlist MODE is not enabled\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+
+ if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) {
+ /* retrieve the batching data from firmware into host */
+ dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+ /* save current pno_mode before calling dhd_pno_clean */
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ /* restore previos pno mode */
+ _pno_state->pno_mode = mode;
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ /* restart Legacy PNO Scan */
+ struct dhd_pno_legacy_params *_params_legacy;
+ _params_legacy =
+ &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+ err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr,
+ _params_legacy->pno_repeat, _params_legacy->pno_freq_expo_max,
+ _params_legacy->chan_list, _params_legacy->nchan);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ /* restart Batching Scan */
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+ /* restart BATCH SCAN */
+ err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+ if (err < 0) {
+ _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+ DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+ } else {
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+exit:
+ return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int
+dhd_retreive_batch_scan_results(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ struct dhd_pno_batch_params *params_batch;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+ if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) {
+ DHD_PNO(("Retreive batch results\n"));
+ params_batch->get_batch.buf = NULL;
+ params_batch->get_batch.bufsize = 0;
+ params_batch->get_batch.reason = PNO_STATUS_EVENT;
+ _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS;
+ smp_wmb();
+ schedule_work(&_pno_state->work);
+ } else {
+ DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval"
+ "already in progress, will skip\n", __FUNCTION__));
+ err = BCME_ERROR;
+ }
+
+ return err;
+}
+
+void
+dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type)
+{
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ struct dhd_pno_gscan_params *gscan_params;
+ gscan_results_cache_t *iter, *tmp;
+
+ if (!_pno_state) {
+ return;
+ }
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (type == HOTLIST_FOUND) {
+ iter = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = NULL;
+ } else {
+ iter = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = NULL;
+ }
+
+ while (iter) {
+ tmp = iter->next;
+ MFREE(dhd->osh, iter,
+ ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t))
+ + sizeof(gscan_results_cache_t));
+ iter = tmp;
+ }
+
+ return;
+}
+
+void *
+dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int *size)
+{
+ wl_bss_info_t *bi = NULL;
+ wl_gscan_result_t *gscan_result;
+ wifi_gscan_full_result_t *result = NULL;
+ u32 bi_length = 0;
+ uint8 channel;
+ uint32 mem_needed;
+ struct osl_timespec ts;
+ u32 bi_ie_length = 0;
+ u32 bi_ie_offset = 0;
+
+ *size = 0;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ gscan_result = (wl_gscan_result_t *)data;
+ GCC_DIAGNOSTIC_POP();
+ if (!gscan_result) {
+ DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
+ goto exit;
+ }
+
+ if ((len < sizeof(*gscan_result)) ||
+ (len < dtoh32(gscan_result->buflen)) ||
+ (dtoh32(gscan_result->buflen) >
+ (sizeof(*gscan_result) + WL_SCAN_IE_LEN_MAX))) {
+ DHD_ERROR(("%s: invalid gscan buflen:%u\n", __FUNCTION__,
+ dtoh32(gscan_result->buflen)));
+ goto exit;
+ }
+
+ bi = &gscan_result->bss_info[0].info;
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(gscan_result->buflen) -
+ WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) {
+ DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+ bi_ie_offset = dtoh32(bi->ie_offset);
+ bi_ie_length = dtoh32(bi->ie_length);
+ if ((bi_ie_offset + bi_ie_length) > bi_length) {
+ DHD_ERROR(("%s: Invalid ie_length:%u or ie_offset:%u\n",
+ __FUNCTION__, bi_ie_length, bi_ie_offset));
+ goto exit;
+ }
+ if (bi->SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length:%u\n", __FUNCTION__, bi->SSID_len));
+ goto exit;
+ }
+
+ mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi->ie_length;
+ result = (wifi_gscan_full_result_t *)MALLOC(dhd->osh, mem_needed);
+ if (!result) {
+ DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n",
+ __FUNCTION__, mem_needed));
+ goto exit;
+ }
+
+ result->scan_ch_bucket = gscan_result->scan_ch_bucket;
+ memcpy(result->fixed.ssid, bi->SSID, bi->SSID_len);
+ result->fixed.ssid[bi->SSID_len] = '\0';
+ channel = wf_chspec_ctlchspec(bi->chanspec);
+ result->fixed.channel = wl_channel_to_frequency(channel, CHSPEC_BAND(channel));
+ result->fixed.rssi = (int32) bi->RSSI;
+ result->fixed.rtt = 0;
+ result->fixed.rtt_sd = 0;
+ osl_get_monotonic_boottime(&ts);
+ result->fixed.ts = (uint64) TIMESPEC_TO_US(ts);
+ result->fixed.beacon_period = dtoh16(bi->beacon_period);
+ result->fixed.capability = dtoh16(bi->capability);
+ result->ie_length = bi_ie_length;
+ memcpy(&result->fixed.macaddr, &bi->BSSID, ETHER_ADDR_LEN);
+ memcpy(result->ie_data, ((uint8 *)bi + bi_ie_offset), bi_ie_length);
+ *size = mem_needed;
+exit:
+ return result;
+}
+
+static void *
+dhd_pno_update_pfn_v3_results(dhd_pub_t *dhd, wl_pfn_scanresults_v3_t *pfn_result,
+ uint32 *mem_needed, struct dhd_pno_gscan_params *gscan_params, uint32 event)
+{
+ uint32 i;
+ uint8 ssid[DOT11_MAX_SSID_LEN + 1];
+ struct ether_addr *bssid;
+ wl_pfn_net_info_v3_t *net_info = NULL;
+ dhd_epno_results_t *results = NULL;
+
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V3)) {
+ DHD_ERROR(("%s event %d: wrong pfn v3 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
+ return NULL;
+ }
+
+ *mem_needed = sizeof(dhd_epno_results_t) * pfn_result->count;
+ results = (dhd_epno_results_t *)MALLOC(dhd->osh, (*mem_needed));
+ if (!results) {
+ DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
+ *mem_needed));
+ return NULL;
+ }
+ for (i = 0; i < pfn_result->count; i++) {
+ net_info = &pfn_result->netinfo[i];
+ results[i].rssi = net_info->RSSI;
+ results[i].channel = wl_channel_to_frequency(
+ CHSPEC_CHANNEL(net_info->pfnsubnet.chanspec),
+ CHSPEC_BAND(net_info->pfnsubnet.chanspec));
+ results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
+ WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
+ results[i].ssid_len = min(net_info->pfnsubnet.SSID_len,
+ (uint8)DOT11_MAX_SSID_LEN);
+ bssid = &results[i].bssid;
+ (void)memcpy_s(bssid, ETHER_ADDR_LEN,
+ &net_info->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ if (!net_info->pfnsubnet.SSID_len) {
+ dhd_pno_idx_to_ssid(gscan_params, &results[i],
+ net_info->pfnsubnet.u.index);
+ } else {
+ (void)memcpy_s(results[i].ssid, DOT11_MAX_SSID_LEN,
+ net_info->pfnsubnet.u.SSID, results[i].ssid_len);
+ }
+ (void)memcpy_s(ssid, DOT11_MAX_SSID_LEN, results[i].ssid, results[i].ssid_len);
+ ssid[results[i].ssid_len] = '\0';
+ DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n",
+ ssid, MAC2STRDBG(bssid->octet), results[i].channel,
+ results[i].rssi, results[i].flags));
+ }
+
+ return results;
+}
+
+void *
+dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int *size)
+{
+ dhd_epno_results_t *results = NULL;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ struct dhd_pno_gscan_params *gscan_params;
+ uint32 count, mem_needed = 0, i;
+ uint8 ssid[DOT11_MAX_SSID_LEN + 1];
+ struct ether_addr *bssid;
+
+ *size = 0;
+ if (!_pno_state)
+ return NULL;
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) {
+ wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data;
+ wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
+ wl_pfn_scanresults_v3_t *pfn_result_v3 = (wl_pfn_scanresults_v3_t *)data;
+ wl_pfn_net_info_v1_t *net;
+ wl_pfn_net_info_v2_t *net_v2;
+
+ if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) {
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V1)) {
+ DHD_ERROR(("%s event %d: wrong pfn v1 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
+ return NULL;
+ }
+ count = pfn_result->count;
+ mem_needed = sizeof(dhd_epno_results_t) * count;
+ results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed);
+ if (!results) {
+ DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
+ mem_needed));
+ return NULL;
+ }
+ for (i = 0; i < count; i++) {
+ net = &pfn_result->netinfo[i];
+ results[i].rssi = net->RSSI;
+ results[i].channel = wf_channel2mhz(net->pfnsubnet.channel,
+ (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
+ WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
+ results[i].ssid_len = min(net->pfnsubnet.SSID_len,
+ (uint8)DOT11_MAX_SSID_LEN);
+ bssid = &results[i].bssid;
+ (void)memcpy_s(bssid, ETHER_ADDR_LEN,
+ &net->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ if (!net->pfnsubnet.SSID_len) {
+ DHD_ERROR(("%s: Gscan results indexing is not"
+ " supported in version 1 \n", __FUNCTION__));
+ MFREE(dhd->osh, results, mem_needed);
+ return NULL;
+ } else {
+ (void)memcpy_s(results[i].ssid, DOT11_MAX_SSID_LEN,
+ net->pfnsubnet.SSID, results[i].ssid_len);
+ }
+ (void)memcpy_s(ssid, DOT11_MAX_SSID_LEN,
+ results[i].ssid, results[i].ssid_len);
+ ssid[results[i].ssid_len] = '\0';
+ DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n",
+ ssid, MAC2STRDBG(bssid->octet), results[i].channel,
+ results[i].rssi, results[i].flags));
+ }
+ } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V2)) {
+ DHD_ERROR(("%s event %d: wrong pfn v2 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
+ return NULL;
+ }
+ count = pfn_result_v2->count;
+ mem_needed = sizeof(dhd_epno_results_t) * count;
+ results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed);
+ if (!results) {
+ DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
+ mem_needed));
+ return NULL;
+ }
+ for (i = 0; i < count; i++) {
+ net_v2 = &pfn_result_v2->netinfo[i];
+ results[i].rssi = net_v2->RSSI;
+ results[i].channel = wf_channel2mhz(net_v2->pfnsubnet.channel,
+ (net_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
+ WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
+ results[i].ssid_len = min(net_v2->pfnsubnet.SSID_len,
+ (uint8)DOT11_MAX_SSID_LEN);
+ bssid = &results[i].bssid;
+ (void)memcpy_s(bssid, ETHER_ADDR_LEN,
+ &net_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ if (!net_v2->pfnsubnet.SSID_len) {
+ dhd_pno_idx_to_ssid(gscan_params, &results[i],
+ net_v2->pfnsubnet.u.index);
+ } else {
+ (void)memcpy_s(results[i].ssid, DOT11_MAX_SSID_LEN,
+ net_v2->pfnsubnet.u.SSID, results[i].ssid_len);
+ }
+ (void)memcpy_s(ssid, DOT11_MAX_SSID_LEN,
+ results[i].ssid, results[i].ssid_len);
+ ssid[results[i].ssid_len] = '\0';
+ DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n",
+ ssid, MAC2STRDBG(bssid->octet), results[i].channel,
+ results[i].rssi, results[i].flags));
+ }
+ } else if (pfn_result_v3->version == PFN_SCANRESULT_VERSION_V3) {
+ results = dhd_pno_update_pfn_v3_results(dhd, pfn_result_v3, &mem_needed,
+ gscan_params, event);
+ if (results == NULL) {
+ return results;
+ }
+ } else {
+ DHD_ERROR(("%s event %d: Incorrect version %d , not supported\n",
+ __FUNCTION__, event, pfn_result->version));
+ return NULL;
+ }
+ }
+ *size = mem_needed;
+ return results;
+}
+
+static void *
+dhd_pno_update_hotlist_v3_results(dhd_pub_t *dhd, wl_pfn_scanresults_v3_t *pfn_result,
+ int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
+{
+ u32 malloc_size = 0, i;
+ struct osl_timespec tm_spec;
+ struct dhd_pno_gscan_params *gscan_params;
+ gscan_results_cache_t *gscan_hotlist_cache;
+ wifi_gscan_result_t *hotlist_found_array;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ wl_pfn_net_info_v3_t *pnetinfo = (wl_pfn_net_info_v3_t*)&pfn_result->netinfo[0];
+
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (!pfn_result->count || (pfn_result->count > EVENT_MAX_NETCNT_V3)) {
+ DHD_ERROR(("%s: wrong v3 fwcount:%d\n", __FUNCTION__, pfn_result->count));
+ *send_evt_bytes = 0;
+ return NULL;
+ }
+
+ osl_get_monotonic_boottime(&tm_spec);
+ malloc_size = sizeof(gscan_results_cache_t) +
+ ((pfn_result->count - 1) * sizeof(wifi_gscan_result_t));
+ gscan_hotlist_cache =
+ (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size);
+ if (!gscan_hotlist_cache) {
+ DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
+ *send_evt_bytes = 0;
+ return NULL;
+ }
+ *buf_len = malloc_size;
+ if (type == HOTLIST_FOUND) {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, pfn_result->count));
+ } else {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, pfn_result->count));
+ }
+
+ gscan_hotlist_cache->tot_count = pfn_result->count;
+ gscan_hotlist_cache->tot_consumed = 0;
+ gscan_hotlist_cache->scan_ch_bucket = pfn_result->scan_ch_bucket;
+
+ for (i = 0; i < pfn_result->count; i++, pnetinfo++) {
+ hotlist_found_array = &gscan_hotlist_cache->results[i];
+ (void)memset_s(hotlist_found_array, sizeof(wifi_gscan_result_t),
+ 0, sizeof(wifi_gscan_result_t));
+ hotlist_found_array->channel = wl_channel_to_frequency(
+ CHSPEC_CHANNEL(pnetinfo->pfnsubnet.chanspec),
+ CHSPEC_BAND(pnetinfo->pfnsubnet.chanspec));
+ hotlist_found_array->rssi = (int32) pnetinfo->RSSI;
+
+ hotlist_found_array->ts =
+ convert_fw_rel_time_to_systime(&tm_spec,
+ (pnetinfo->timestamp * 1000));
+ if (pnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+ pnetinfo->pfnsubnet.SSID_len));
+ pnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ (void)memcpy_s(hotlist_found_array->ssid, DOT11_MAX_SSID_LEN,
+ pnetinfo->pfnsubnet.u.SSID, pnetinfo->pfnsubnet.SSID_len);
+ hotlist_found_array->ssid[pnetinfo->pfnsubnet.SSID_len] = '\0';
+
+ (void)memcpy_s(&hotlist_found_array->macaddr, ETHER_ADDR_LEN,
+ &pnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ DHD_PNO(("\t%s "MACDBG" rssi %d\n",
+ hotlist_found_array->ssid,
+ MAC2STRDBG(hotlist_found_array->macaddr.octet),
+ hotlist_found_array->rssi));
+ }
+
+ return gscan_hotlist_cache;
+}
+
+void *
+dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+ int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
+{
+ void *ptr = NULL;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ struct dhd_pno_gscan_params *gscan_params;
+ wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data;
+ wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data;
+ wl_pfn_scanresults_v3_t *results_v3 = (wl_pfn_scanresults_v3_t *)event_data;
+ wifi_gscan_result_t *hotlist_found_array;
+ wl_pfn_net_info_v1_t *pnetinfo;
+ wl_pfn_net_info_v2_t *pnetinfo_v2;
+ gscan_results_cache_t *gscan_hotlist_cache;
+ u32 malloc_size = 0, i, total = 0;
+ struct osl_timespec tm_spec;
+ uint16 fwstatus;
+ uint16 fwcount;
+
+ /* Static asserts in _dhd_pno_get_for_batch() above guarantee the v1 and v2
+ * net_info and subnet_info structures are compatible in size and SSID offset,
+ * allowing v1 to be safely used in the code below except for lscanresults
+ * fields themselves (status, count, offset to netinfo).
+ */
+
+ *buf_len = 0;
+ if (results_v1->version == PFN_SCANRESULTS_VERSION_V1) {
+ fwstatus = results_v1->status;
+ fwcount = results_v1->count;
+ pnetinfo = &results_v1->netinfo[0];
+
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V1)) {
+ DHD_ERROR(("%s: wrong v1 fwcount:%d\n", __FUNCTION__, fwcount));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ osl_get_monotonic_boottime(&tm_spec);
+ malloc_size = sizeof(gscan_results_cache_t) +
+ ((fwcount - 1) * sizeof(wifi_gscan_result_t));
+ gscan_hotlist_cache = (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size);
+ if (!gscan_hotlist_cache) {
+ DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ *buf_len = malloc_size;
+ if (type == HOTLIST_FOUND) {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount));
+ } else {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount));
+ }
+
+ gscan_hotlist_cache->tot_count = fwcount;
+ gscan_hotlist_cache->tot_consumed = 0;
+
+ for (i = 0; i < fwcount; i++, pnetinfo++) {
+ hotlist_found_array = &gscan_hotlist_cache->results[i];
+ memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t));
+ hotlist_found_array->channel = wf_channel2mhz(pnetinfo->pfnsubnet.channel,
+ (pnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ hotlist_found_array->rssi = (int32) pnetinfo->RSSI;
+
+ hotlist_found_array->ts =
+ convert_fw_rel_time_to_systime(&tm_spec,
+ (pnetinfo->timestamp * 1000));
+ if (pnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+ pnetinfo->pfnsubnet.SSID_len));
+ pnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ (void)memcpy_s(hotlist_found_array->ssid, DOT11_MAX_SSID_LEN,
+ pnetinfo->pfnsubnet.SSID, pnetinfo->pfnsubnet.SSID_len);
+ hotlist_found_array->ssid[pnetinfo->pfnsubnet.SSID_len] = '\0';
+
+ (void)memcpy_s(&hotlist_found_array->macaddr, ETHER_ADDR_LEN,
+ &pnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ DHD_PNO(("\t%s "MACDBG" rssi %d\n",
+ hotlist_found_array->ssid,
+ MAC2STRDBG(hotlist_found_array->macaddr.octet),
+ hotlist_found_array->rssi));
+ }
+ } else if (results_v2->version == PFN_SCANRESULTS_VERSION_V2) {
+ fwstatus = results_v2->status;
+ fwcount = results_v2->count;
+ pnetinfo_v2 = (wl_pfn_net_info_v2_t*)&results_v2->netinfo[0];
+
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V2)) {
+ DHD_ERROR(("%s: wrong v2 fwcount:%d\n", __FUNCTION__, fwcount));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ osl_get_monotonic_boottime(&tm_spec);
+ malloc_size = sizeof(gscan_results_cache_t) +
+ ((fwcount - 1) * sizeof(wifi_gscan_result_t));
+ gscan_hotlist_cache =
+ (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size);
+ if (!gscan_hotlist_cache) {
+ DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+ *buf_len = malloc_size;
+ if (type == HOTLIST_FOUND) {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount));
+ } else {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount));
+ }
+
+ gscan_hotlist_cache->tot_count = fwcount;
+ gscan_hotlist_cache->tot_consumed = 0;
+ gscan_hotlist_cache->scan_ch_bucket = results_v2->scan_ch_bucket;
+
+ for (i = 0; i < fwcount; i++, pnetinfo_v2++) {
+ hotlist_found_array = &gscan_hotlist_cache->results[i];
+ memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t));
+ hotlist_found_array->channel =
+ wf_channel2mhz(pnetinfo_v2->pfnsubnet.channel,
+ (pnetinfo_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ hotlist_found_array->rssi = (int32) pnetinfo_v2->RSSI;
+
+ hotlist_found_array->ts =
+ convert_fw_rel_time_to_systime(&tm_spec,
+ (pnetinfo_v2->timestamp * 1000));
+ if (pnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+ pnetinfo_v2->pfnsubnet.SSID_len));
+ pnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ (void)memcpy_s(hotlist_found_array->ssid, DOT11_MAX_SSID_LEN,
+ pnetinfo_v2->pfnsubnet.u.SSID, pnetinfo_v2->pfnsubnet.SSID_len);
+ hotlist_found_array->ssid[pnetinfo_v2->pfnsubnet.SSID_len] = '\0';
+
+ (void)memcpy_s(&hotlist_found_array->macaddr, ETHER_ADDR_LEN,
+ &pnetinfo_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ DHD_PNO(("\t%s "MACDBG" rssi %d\n",
+ hotlist_found_array->ssid,
+ MAC2STRDBG(hotlist_found_array->macaddr.octet),
+ hotlist_found_array->rssi));
+ }
+ } else if (results_v3->version == PFN_SCANRESULTS_VERSION_V3) {
+ fwstatus = results_v3->status;
+ gscan_hotlist_cache = (gscan_results_cache_t *)dhd_pno_update_hotlist_v3_results(
+ dhd, results_v3, send_evt_bytes, type, buf_len);
+ } else {
+ DHD_ERROR(("%s: event version %d not supported\n",
+ __FUNCTION__, results_v1->version));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+ if (fwstatus == PFN_COMPLETE) {
+ ptr = (void *) gscan_hotlist_cache;
+ while (gscan_hotlist_cache) {
+ total += gscan_hotlist_cache->tot_count;
+ gscan_hotlist_cache = gscan_hotlist_cache->next;
+ }
+ *send_evt_bytes = total * sizeof(wifi_gscan_result_t);
+ }
+
+ return ptr;
+}
+#endif /* GSCAN_SUPPORT */
+
+int
+dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+ int err = BCME_OK;
+ uint event_type;
+ dhd_pno_status_info_t *_pno_state;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ event_type = ntoh32(event->event_type);
+ DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type));
+ switch (event_type) {
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ case WLC_E_PFN_BSSID_NET_LOST:
+ /* how can we inform this to framework ? */
+ /* TODO : need to implement event logic using generic netlink */
+ break;
+ case WLC_E_PFN_BEST_BATCHING:
+#ifndef GSCAN_SUPPORT
+ {
+ struct dhd_pno_batch_params *params_batch;
+ params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
+ if (!waitqueue_active(&_pno_state->get_batch_done))
+#else
+ if (!waitqueue_active(&_pno_state->get_batch_done.wait))
+#endif
+ {
+ DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
+ params_batch->get_batch.buf = NULL;
+ params_batch->get_batch.bufsize = 0;
+ params_batch->get_batch.reason = PNO_STATUS_EVENT;
+ schedule_work(&_pno_state->work);
+ } else
+ DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING"
+ "will skip this event\n", __FUNCTION__));
+ break;
+ }
+#else
+ break;
+#endif /* !GSCAN_SUPPORT */
+ default:
+ DHD_ERROR(("unknown event : %d\n", event_type));
+ }
+exit:
+ return err;
+}
+
+int dhd_pno_init(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+ char *buf = NULL;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ UNUSED_PARAMETER(_dhd_pno_suspend);
+ if (dhd->pno_state)
+ goto exit;
+ dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t));
+ NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err);
+ memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t));
+ /* need to check whether current firmware support batching and hotlist scan */
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _pno_state->wls_supported = TRUE;
+ _pno_state->dhd = dhd;
+ mutex_init(&_pno_state->pno_mutex);
+ INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
+ init_waitqueue_head(&_pno_state->get_batch_done);
+#else
+ init_completion(&_pno_state->get_batch_done);
+#endif
+#ifdef GSCAN_SUPPORT
+ init_waitqueue_head(&_pno_state->batch_get_wait);
+#endif /* GSCAN_SUPPORT */
+ buf = MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
+ if (!buf) {
+ DHD_ERROR((":%s buf alloc err.\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, buf, WLC_IOCTL_SMLEN,
+ FALSE);
+ if (err == BCME_UNSUPPORTED) {
+ _pno_state->wls_supported = FALSE;
+ DHD_ERROR(("Android Location Service, UNSUPPORTED\n"));
+ DHD_INFO(("Current firmware doesn't support"
+ " Android Location Service\n"));
+ } else {
+ DHD_ERROR(("%s: Support Android Location Service\n",
+ __FUNCTION__));
+ }
+exit:
+ MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN);
+ return err;
+}
+
+int dhd_pno_deinit(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ NULL_CHECK(_pno_state, "pno_state is NULL", err);
+ /* may need to free legacy ssid_list */
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ }
+
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ mutex_lock(&_pno_state->pno_mutex);
+ dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+ mutex_unlock(&_pno_state->pno_mutex);
+ }
+#endif /* GSCAN_SUPPORT */
+
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+ /* clear resource if the BATCH MODE is on */
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+ }
+ cancel_work_sync(&_pno_state->work);
+ MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t));
+ dhd->pno_state = NULL;
+ return err;
+}
+#endif /* OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+#if defined(NDIS)
+#define DHD_IOVAR_BUF_SIZE 128
+int
+dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg)
+{
+ int ret = -1;
+ uint len = 0;
+ char iovbuf[2 * DHD_IOVAR_BUF_SIZE];
+
+ if (!dhd)
+ return ret;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ if ((len =
+ bcm_mkiovar("pfn_cfg", (char *)pcfg,
+ sizeof(wl_pfn_cfg_t), iovbuf, sizeof(iovbuf))) > 0) {
+ if ((ret =
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0)
+ DHD_ERROR(("%s failed for error=%d\n",
+ __FUNCTION__, ret));
+ else
+ DHD_ERROR(("%s set OK\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s iovar failed\n", __FUNCTION__));
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int ret = -1;
+
+ if ((!dhd) || ((pfn_suspend != 0) && (pfn_suspend != 1))) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return ret;
+ }
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ /* suspend/resume PNO */
+ if ((ret = bcm_mkiovar("pfn_suspend", (char *)&pfn_suspend, 4, iovbuf,
+ sizeof(iovbuf))) > 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+ else {
+ DHD_TRACE(("%s set pno to %s\n", __FUNCTION__,
+ (pfn_suspend? "suspend" : "resume")));
+ dhd->pno_suspend = pfn_suspend;
+ }
+ }
+ else {
+ DHD_ERROR(("%s failed at mkiovar, err=%d\n", __FUNCTION__, ret));
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr, ushort slowscan_fr,
+ uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags)
+{
+ int err = -1;
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int k, i;
+ wl_pfn_param_t pfn_param;
+ wl_pfn_t pfn_element;
+ uint len = 0;
+
+ DHD_TRACE(("%s nssid=%d scan_fr=%d\n", __FUNCTION__, nssid, scan_fr));
+
+ if ((!dhd) || (!netinfo) ||
+ (nssid > MAX_PFN_LIST_COUNT) || (nssid <= 0)) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return err;
+ }
+
+ /* Check for broadcast ssid */
+ for (k = 0; k < nssid; k++) {
+ if (!netinfo[k].ssid.SSID_len) {
+ DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+ return err;
+ }
+ }
+
+ /* clean up everything */
+ if (dhd_pno_clean(dhd) < 0) {
+ DHD_ERROR(("%s failed\n", __FUNCTION__));
+ return err;
+ }
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ memset(&pfn_element, 0, sizeof(pfn_element));
+
+ /* set pfn parameters */
+ pfn_param.version = htod32(PFN_VERSION);
+ pfn_param.flags = htod16(flags |(PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+ /* set extra pno params */
+ pfn_param.repeat = pno_repeat;
+ pfn_param.exp = pno_freq_expo_max;
+ pfn_param.slow_freq = slowscan_fr;
+
+ /* set up pno scan fr */
+ if (scan_fr > PNO_SCAN_MAX_FW_SEC) {
+ DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+ return err;
+ }
+ if (scan_fr < PNO_SCAN_MIN_FW_SEC) {
+ DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ return err;
+ }
+ pfn_param.scan_freq = htod32(scan_fr);
+ if (slowscan_fr)
+ pfn_param.lost_network_timeout = -1; /* so no aging out */
+ memset(iovbuf, 0, sizeof(iovbuf));
+ len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+ if (!len)
+ return err;
+
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0)
+ return err;
+
+ /* set all pfn ssid */
+ for (i = 0; i < nssid; i++) {
+ pfn_element.infra = htod32(1);
+ pfn_element.auth = htod32(netinfo[i].auth);
+ pfn_element.wpa_auth = htod32(netinfo[i].wpa_auth);
+ pfn_element.wsec = htod32(netinfo[i].wsec);
+ pfn_element.flags = htod32(netinfo[i].flags);
+
+ memcpy((char *)pfn_element.ssid.SSID, netinfo[i].ssid.SSID,
+ netinfo[i].ssid.SSID_len);
+ pfn_element.ssid.SSID_len = netinfo[i].ssid.SSID_len;
+
+ if ((len =
+ bcm_mkiovar("pfn_add", (char *)&pfn_element,
+ sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+ if ((err =
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for i=%d error=%d\n",
+ __FUNCTION__, i, err));
+ return err;
+ }
+ else
+ DHD_ERROR(("%s set ssid %s\n",
+ __FUNCTION__, netinfo[i].ssid.SSID));
+ }
+ else
+ DHD_ERROR(("%s: mkiovar pfn_add failed\n", __FUNCTION__));
+
+ memset(&pfn_element, 0, sizeof(pfn_element));
+ }
+
+ return err;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int ret = -1;
+
+ if ((!dhd) || ((pfn_enabled != 0) && (pfn_enabled != 1))) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return ret;
+ }
+
+#ifndef WL_SCHED_SCAN
+ memset(iovbuf, 0, sizeof(iovbuf));
+
+ if ((pfn_enabled) && (dhd_is_associated(dhd, 0, NULL) == TRUE)) {
+ DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__));
+ return ret;
+ }
+#endif /* !WL_SCHED_SCAN */
+
+ /* make sure PNO is not suspended when it is going to be enabled */
+ if (pfn_enabled) {
+ int pfn_suspend = 0;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ if ((ret = bcm_mkiovar("pfn_suspend", (char *)&pfn_suspend, 4, iovbuf,
+ sizeof(iovbuf))) > 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("pfn_suspend failed for error=%d\n", __FUNCTION__, ret));
+ return ret;
+ } else {
+ DHD_TRACE(("pno resumed\n"));
+ }
+ } else {
+ return -1;
+ }
+ }
+
+ /* Enable/disable PNO */
+ if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+ return ret;
+ } else {
+ dhd->pno_enable = pfn_enabled;
+ DHD_TRACE(("%s set pno as %s\n",
+ __FUNCTION__, dhd->pno_enable ? "Enable" : "Disable"));
+ }
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int pfn_enabled = 0;
+ int iov_len = 0;
+ int ret;
+
+ /* Disable pfn */
+ iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %d \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return -1;
+ }
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) >= 0) {
+ /* clear pfn */
+ iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (iov_len) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ iov_len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ }
+ } else {
+ ret = -1;
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+ }
+ } else
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+#endif /* defined(NDIS) */
+#endif /* OEM_ANDROID */
+#endif /* PNO_SUPPORT */
diff --git a/bcmdhd.101.10.361.x/dhd_pno.h b/bcmdhd.101.10.361.x/dhd_pno.h
new file mode 100755
index 0000000..b2dd021
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_pno.h
@@ -0,0 +1,586 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef __DHD_PNO_H__
+#define __DHD_PNO_H__
+
+#if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
+#define PNO_TLV_PREFIX 'S'
+#define PNO_TLV_VERSION '1'
+#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
+#define PNO_TLV_RESERVED '0'
+
+#define PNO_BATCHING_SET "SET"
+#define PNO_BATCHING_GET "GET"
+#define PNO_BATCHING_STOP "STOP"
+
+#define PNO_PARAMS_DELIMETER " "
+#define PNO_PARAM_CHANNEL_DELIMETER ","
+#define PNO_PARAM_VALUE_DELLIMETER '='
+#define PNO_PARAM_SCANFREQ "SCANFREQ"
+#define PNO_PARAM_BESTN "BESTN"
+#define PNO_PARAM_MSCAN "MSCAN"
+#define PNO_PARAM_CHANNEL "CHANNEL"
+#define PNO_PARAM_RTT "RTT"
+
+#define PNO_TLV_TYPE_SSID_IE 'S'
+#define PNO_TLV_TYPE_TIME 'T'
+#define PNO_TLV_FREQ_REPEAT 'R'
+#define PNO_TLV_FREQ_EXPO_MAX 'M'
+
+#define MAXNUM_SSID_PER_ADD 16
+#define MAXNUM_PNO_PARAMS 2
+#define PNO_TLV_COMMON_LENGTH 1
+#define DEFAULT_BATCH_MSCAN 16
+
+#define RESULTS_END_MARKER "----\n"
+#define SCAN_END_MARKER "####\n"
+#define AP_END_MARKER "====\n"
+#define PNO_RSSI_MARGIN_DBM 30
+
+#define CSCAN_COMMAND "CSCAN "
+#define CSCAN_TLV_PREFIX 'S'
+#define CSCAN_TLV_VERSION 1
+#define CSCAN_TLV_SUBVERSION 0
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE 'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE 'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE 'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE 'P'
+#define CSCAN_TLV_TYPE_HOME_IE 'H'
+#define CSCAN_TLV_TYPE_STYPE_IE 'T'
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+#define GET_SSID "SSID="
+#define GET_CHANNEL "CH="
+#define GET_NPROBE "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
+#define GET_HOME_DWELL "HOME="
+#define GET_SCAN_TYPE "TYPE="
+
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+#define GSCAN_MAX_CH_BUCKETS 8
+#define GSCAN_MAX_CHANNELS_IN_BUCKET 32
+#define GSCAN_MAX_AP_CACHE_PER_SCAN 32
+#define GSCAN_MAX_AP_CACHE 320
+#define GSCAN_BG_BAND_MASK (1 << 0)
+#define GSCAN_A_BAND_MASK (1 << 1)
+#define GSCAN_DFS_MASK (1 << 2)
+#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK)
+#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK)
+
+#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0)
+#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1)
+#define GSCAN_FLUSH_SCAN_CFG (1 << 2)
+#define GSCAN_FLUSH_EPNO_CFG (1 << 3)
+#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \
+ GSCAN_FLUSH_SIGNIFICANT_CFG | \
+ GSCAN_FLUSH_HOTLIST_CFG | \
+ GSCAN_FLUSH_EPNO_CFG)
+#define DHD_EPNO_HIDDEN_SSID (1 << 0)
+#define DHD_EPNO_A_BAND_TRIG (1 << 1)
+#define DHD_EPNO_BG_BAND_TRIG (1 << 2)
+#define DHD_EPNO_STRICT_MATCH (1 << 3)
+#define DHD_EPNO_SAME_NETWORK (1 << 4)
+#define DHD_PNO_USE_SSID (DHD_EPNO_HIDDEN_SSID | DHD_EPNO_STRICT_MATCH)
+
+/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */
+#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0
+#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1
+#define GSCAN_BATCH_NO_THR_SET 101
+#define GSCAN_LOST_AP_WINDOW_DEFAULT 4
+#define GSCAN_MIN_BSSID_TIMEOUT 90
+#define GSCAN_BATCH_GET_MAX_WAIT 500
+#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF
+#define GSCAN_RETRY_THRESHOLD 3
+
+#define MAX_EPNO_SSID_NUM 64
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+enum scan_status {
+ /* SCAN ABORT by other scan */
+ PNO_STATUS_ABORT,
+ /* RTT is presence or not */
+ PNO_STATUS_RTT_PRESENCE,
+ /* Disable PNO by Driver */
+ PNO_STATUS_DISABLE,
+ /* NORMAL BATCHING GET */
+ PNO_STATUS_NORMAL,
+ /* WLC_E_PFN_BEST_BATCHING */
+ PNO_STATUS_EVENT,
+ PNO_STATUS_MAX
+};
+#define PNO_STATUS_ABORT_MASK 0x0001
+#define PNO_STATUS_RTT_MASK 0x0002
+#define PNO_STATUS_DISABLE_MASK 0x0004
+#define PNO_STATUS_OOM_MASK 0x0010
+
+enum index_mode {
+ INDEX_OF_LEGACY_PARAMS,
+ INDEX_OF_BATCH_PARAMS,
+ INDEX_OF_HOTLIST_PARAMS,
+ /* GSCAN includes hotlist scan and they do not run
+ * independent of each other
+ */
+ INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS,
+ INDEX_MODE_MAX
+};
+enum dhd_pno_status {
+ DHD_PNO_DISABLED,
+ DHD_PNO_ENABLED,
+ DHD_PNO_SUSPEND
+};
+typedef struct cmd_tlv {
+ char prefix;
+ char version;
+ char subtype;
+ char reserved;
+} cmd_tlv_t;
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+typedef enum {
+ HOTLIST_LOST,
+ HOTLIST_FOUND
+} hotlist_type_t;
+
+typedef enum dhd_pno_gscan_cmd_cfg {
+ DHD_PNO_BATCH_SCAN_CFG_ID = 0,
+ DHD_PNO_GEOFENCE_SCAN_CFG_ID,
+ DHD_PNO_SIGNIFICANT_SCAN_CFG_ID,
+ DHD_PNO_SCAN_CFG_ID,
+ DHD_PNO_GET_CAPABILITIES,
+ DHD_PNO_GET_BATCH_RESULTS,
+ DHD_PNO_GET_CHANNEL_LIST,
+ DHD_PNO_GET_NEW_EPNO_SSID_ELEM,
+ DHD_PNO_EPNO_CFG_ID,
+ DHD_PNO_GET_AUTOJOIN_CAPABILITIES,
+ DHD_PNO_EPNO_PARAMS_ID
+} dhd_pno_gscan_cmd_cfg_t;
+
+typedef enum dhd_pno_mode {
+ /* Wi-Fi Legacy PNO Mode */
+ DHD_PNO_NONE_MODE = 0,
+ DHD_PNO_LEGACY_MODE = (1 << (0)),
+ /* Wi-Fi Android BATCH SCAN Mode */
+ DHD_PNO_BATCH_MODE = (1 << (1)),
+ /* Wi-Fi Android Hotlist SCAN Mode */
+ DHD_PNO_HOTLIST_MODE = (1 << (2)),
+ /* Wi-Fi Google Android SCAN Mode */
+ DHD_PNO_GSCAN_MODE = (1 << (3))
+} dhd_pno_mode_t;
+#else
+typedef enum dhd_pno_mode {
+ /* Wi-Fi Legacy PNO Mode */
+ DHD_PNO_NONE_MODE = 0,
+ DHD_PNO_LEGACY_MODE = (1 << (0)),
+ /* Wi-Fi Android BATCH SCAN Mode */
+ DHD_PNO_BATCH_MODE = (1 << (1)),
+ /* Wi-Fi Android Hotlist SCAN Mode */
+ DHD_PNO_HOTLIST_MODE = (1 << (2))
+} dhd_pno_mode_t;
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+typedef struct dhd_pno_ssid {
+ bool hidden;
+ int8 rssi_thresh;
+ uint8 dummy;
+ uint16 SSID_len;
+ uint32 flags;
+ int32 wpa_auth;
+ uchar SSID[DOT11_MAX_SSID_LEN];
+ struct list_head list;
+} dhd_pno_ssid_t;
+
+struct dhd_pno_bssid {
+ struct ether_addr macaddr;
+ /* Bit4: suppress_lost, Bit3: suppress_found */
+ uint16 flags;
+ struct list_head list;
+};
+
+typedef struct dhd_pno_bestnet_entry {
+ struct ether_addr BSSID;
+ uint8 SSID_len;
+ uint8 SSID[DOT11_MAX_SSID_LEN];
+ int8 RSSI;
+ uint8 channel;
+ uint32 timestamp;
+ uint16 rtt0; /* distance_cm based on RTT */
+ uint16 rtt1; /* distance_cm based on sample standard deviation */
+ unsigned long recorded_time;
+ struct list_head list;
+} dhd_pno_bestnet_entry_t;
+#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t))
+
+typedef struct dhd_pno_bestnet_header {
+ struct dhd_pno_bestnet_header *next;
+ uint8 reason;
+ uint32 tot_cnt;
+ uint32 tot_size;
+ struct list_head entry_list;
+} dhd_pno_best_header_t;
+#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t))
+
+typedef struct dhd_pno_scan_results {
+ dhd_pno_best_header_t *bestnetheader;
+ uint8 cnt_header;
+ struct list_head list;
+} dhd_pno_scan_results_t;
+#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t))
+
+struct dhd_pno_get_batch_info {
+ /* info related to get batch */
+ char *buf;
+ bool batch_started;
+ uint32 tot_scan_cnt;
+ uint32 expired_tot_scan_cnt;
+ uint32 top_node_cnt;
+ uint32 bufsize;
+ uint32 bytes_written;
+ int reason;
+ struct list_head scan_results_list;
+ struct list_head expired_scan_results_list;
+};
+struct dhd_pno_legacy_params {
+ uint16 scan_fr;
+ uint16 chan_list[WL_NUMCHANNELS];
+ uint16 nchan;
+ int pno_repeat;
+ int pno_freq_expo_max;
+ int nssid;
+ struct list_head ssid_list;
+};
+struct dhd_pno_batch_params {
+ int32 scan_fr;
+ uint8 bestn;
+ uint8 mscan;
+ uint8 band;
+ uint16 chan_list[WL_NUMCHANNELS];
+ uint16 nchan;
+ uint16 rtt;
+ struct dhd_pno_get_batch_info get_batch;
+};
+struct dhd_pno_hotlist_params {
+ uint8 band;
+ int32 scan_fr;
+ uint16 chan_list[WL_NUMCHANNELS];
+ uint16 nchan;
+ uint16 nbssid;
+ struct list_head bssid_list;
+};
+
+#define DHD_PNO_CHSPEC_SUPPORT_VER 14
+
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+#define DHD_PNO_REPORT_NO_BATCH (1 << 2)
+
+typedef struct dhd_pno_gscan_channel_bucket {
+ uint16 bucket_freq_multiple;
+ /* band = 1 All bg band channels,
+ * band = 2 All a band channels,
+ * band = 0 chan_list channels
+ */
+ uint16 band;
+ uint8 report_flag;
+ uint8 num_channels;
+ uint16 repeat;
+ uint16 bucket_max_multiple;
+ uint16 chan_list[GSCAN_MAX_CHANNELS_IN_BUCKET];
+} dhd_pno_gscan_channel_bucket_t;
+
+#define DHD_PNO_AUTH_CODE_OPEN 1 /* Open */
+#define DHD_PNO_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */
+#define DHD_PNO_AUTH_CODE_EAPOL 4 /* any EAPOL */
+
+#define DHD_EPNO_DEFAULT_INDEX 0xFFFFFFFF
+
+typedef struct dhd_epno_params {
+ uint8 ssid[DOT11_MAX_SSID_LEN];
+ uint8 ssid_len;
+ int8 rssi_thresh;
+ uint8 flags;
+ uint8 auth;
+ /* index required only for visble ssid */
+ uint32 index;
+ struct list_head list;
+} dhd_epno_params_t;
+
+typedef struct dhd_epno_results {
+ uint8 ssid[DOT11_MAX_SSID_LEN];
+ uint8 ssid_len;
+ int8 rssi;
+ uint16 channel;
+ uint16 flags;
+ struct ether_addr bssid;
+} dhd_epno_results_t;
+
+typedef struct dhd_pno_swc_evt_param {
+ uint16 results_rxed_so_far;
+ wl_pfn_significant_net_t *change_array;
+} dhd_pno_swc_evt_param_t;
+
+typedef struct wifi_gscan_result {
+ uint64 ts; /* Time of discovery */
+ char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */
+ struct ether_addr macaddr; /* BSSID */
+ uint32 channel; /* channel frequency in MHz */
+ int32 rssi; /* in db */
+ uint64 rtt; /* in nanoseconds */
+ uint64 rtt_sd; /* standard deviation in rtt */
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint32 pad;
+} wifi_gscan_result_t;
+
+typedef struct wifi_gscan_full_result {
+ wifi_gscan_result_t fixed;
+ uint32 scan_ch_bucket;
+ uint32 ie_length; /* byte length of Information Elements */
+ char ie_data[1]; /* IE data to follow */
+} wifi_gscan_full_result_t;
+
+typedef struct gscan_results_cache {
+ struct gscan_results_cache *next;
+ uint8 scan_id;
+ uint8 flag;
+ uint8 tot_count;
+ uint8 tot_consumed;
+ uint32 scan_ch_bucket;
+ wifi_gscan_result_t results[1];
+} gscan_results_cache_t;
+
+typedef struct dhd_pno_gscan_capabilities {
+ int max_scan_cache_size;
+ int max_scan_buckets;
+ int max_ap_cache_per_scan;
+ int max_rssi_sample_size;
+ int max_scan_reporting_threshold;
+ int max_hotlist_bssids;
+ int max_hotlist_ssids;
+ int max_significant_wifi_change_aps;
+ int max_bssid_history_entries;
+ int max_epno_ssid_crc32;
+ int max_epno_hidden_ssid;
+ int max_white_list_ssid;
+} dhd_pno_gscan_capabilities_t;
+
+typedef struct dhd_epno_ssid_cfg {
+ wl_ssid_ext_params_t params;
+ uint32 num_epno_ssid;
+ struct list_head epno_ssid_list;
+} dhd_epno_ssid_cfg_t;
+
+struct dhd_pno_gscan_params {
+ int32 scan_fr;
+ uint8 bestn;
+ uint8 mscan;
+ uint8 buffer_threshold;
+ uint8 swc_nbssid_threshold;
+ uint8 swc_rssi_window_size;
+ uint8 lost_ap_window;
+ uint8 nchannel_buckets;
+ uint8 reason;
+ uint8 get_batch_flag;
+ uint8 send_all_results_flag;
+ uint16 max_ch_bucket_freq;
+ gscan_results_cache_t *gscan_batch_cache;
+ gscan_results_cache_t *gscan_hotlist_found;
+ gscan_results_cache_t*gscan_hotlist_lost;
+ uint16 nbssid_significant_change;
+ uint16 nbssid_hotlist;
+ struct dhd_pno_swc_evt_param param_significant;
+ struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+ struct list_head hotlist_bssid_list;
+ struct list_head significant_bssid_list;
+ dhd_epno_ssid_cfg_t epno_cfg;
+ uint32 scan_id;
+};
+
+typedef struct gscan_scan_params {
+ int32 scan_fr;
+ uint16 nchannel_buckets;
+ struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+} gscan_scan_params_t;
+
+typedef struct gscan_batch_params {
+ uint8 bestn;
+ uint8 mscan;
+ uint8 buffer_threshold;
+} gscan_batch_params_t;
+
+struct bssid_t {
+ struct ether_addr macaddr;
+ int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */
+};
+
+typedef struct gscan_hotlist_scan_params {
+ uint16 lost_ap_window; /* number of scans to declare LOST */
+ uint16 nbssid; /* number of bssids */
+ struct bssid_t bssid[1]; /* n bssids to follow */
+} gscan_hotlist_scan_params_t;
+
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+typedef union dhd_pno_params {
+ struct dhd_pno_legacy_params params_legacy;
+ struct dhd_pno_batch_params params_batch;
+ struct dhd_pno_hotlist_params params_hotlist;
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+ struct dhd_pno_gscan_params params_gscan;
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+} dhd_pno_params_t;
+
+typedef struct dhd_pno_status_info {
+ dhd_pub_t *dhd;
+ struct work_struct work;
+ struct mutex pno_mutex;
+#ifdef GSCAN_SUPPORT
+ wait_queue_head_t batch_get_wait;
+#endif /* GSCAN_SUPPORT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
+ wait_queue_head_t get_batch_done;
+ bool batch_recvd;
+#else
+ struct completion get_batch_done;
+#endif
+ bool wls_supported; /* wifi location service supported or not */
+ enum dhd_pno_status pno_status;
+ enum dhd_pno_mode pno_mode;
+ dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX];
+ struct list_head head_list;
+} dhd_pno_status_info_t;
+
+/* wrapper functions */
+extern int
+dhd_dev_pno_enable(struct net_device *dev, int enable);
+
+extern int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+ uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int
+dhd_dev_pno_set_for_batch(struct net_device *dev,
+ struct dhd_pno_batch_params *batch_params);
+
+extern int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize);
+
+extern int
+dhd_dev_pno_stop_for_batch(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+ struct dhd_pno_hotlist_params *hotlist_params);
+extern bool dhd_dev_is_legacy_pno_enabled(struct net_device *dev);
+#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+extern void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info,
+ uint32 *len);
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef GSCAN_SUPPORT
+extern int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush);
+int dhd_dev_pno_lock_access_batch_results(struct net_device *dev);
+void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev);
+extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush);
+extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time);
+int dhd_retreive_batch_scan_results(dhd_pub_t *dhd);
+extern void * dhd_dev_hotlist_scan_event(struct net_device *dev,
+ const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len);
+void * dhd_dev_process_full_gscan_result(struct net_device *dev,
+ const void *data, uint32 len, int *send_evt_bytes);
+extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev);
+extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type);
+extern int dhd_dev_wait_batch_results_complete(struct net_device *dev);
+extern void * dhd_dev_process_epno_result(struct net_device *dev,
+ const void *data, uint32 event, int *send_evt_bytes);
+extern int dhd_dev_set_epno(struct net_device *dev);
+extern int dhd_dev_flush_fw_epno(struct net_device *dev);
+#endif /* GSCAN_SUPPORT */
+/* dhd pno fuctions */
+extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int enable);
+extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
+ uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params);
+
+extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason);
+
+extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd);
+
+extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+ struct dhd_pno_hotlist_params *hotlist_params);
+
+extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd);
+
+extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+extern int dhd_pno_init(dhd_pub_t *dhd);
+extern int dhd_pno_deinit(dhd_pub_t *dhd);
+extern bool dhd_is_pno_supported(dhd_pub_t *dhd);
+extern bool dhd_is_legacy_pno_enabled(dhd_pub_t *dhd);
+#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info,
+ uint32 *len);
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef GSCAN_SUPPORT
+extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush);
+extern int dhd_pno_lock_batch_results(dhd_pub_t *dhd);
+extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd);
+extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush);
+extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag);
+extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf);
+extern int dhd_dev_retrieve_batch_scan(struct net_device *dev);
+extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+ int *send_evt_bytes, hotlist_type_t type, u32 *buf_len);
+extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data,
+ uint32 len, int *send_evt_bytes);
+extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd);
+extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type);
+extern int dhd_wait_batch_results_complete(dhd_pub_t *dhd);
+extern void * dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data,
+ uint32 event, int *size);
+extern void dhd_pno_translate_epno_fw_flags(uint32 *flags);
+extern int dhd_pno_set_epno(dhd_pub_t *dhd);
+extern int dhd_pno_flush_fw_epno(dhd_pub_t *dhd);
+extern void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth);
+#endif /* GSCAN_SUPPORT */
+#endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
+
+#if defined(NDIS)
+#if defined(PNO_SUPPORT)
+extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
+extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
+extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
+ ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* #if defined(PNO_SUPPORT) */
+#endif /* #if defined(NDIS) */
+#endif /* __DHD_PNO_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_proto.h b/bcmdhd.101.10.361.x/dhd_proto.h
new file mode 100755
index 0000000..7f0b121
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_proto.h
@@ -0,0 +1,302 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#ifdef BCMINTERNAL
+#ifdef DHD_FWTRACE
+#include <bcm_fwtrace.h>
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#define DEFAULT_IOCTL_RESP_TIMEOUT (5 * 1000) /* 5 seconds */
+#ifndef IOCTL_RESP_TIMEOUT
+#if defined(BCMQT_HW)
+#define IOCTL_RESP_TIMEOUT (600 * 1000) /* 600 sec in real time */
+#elif defined(BCMFPGA_HW)
+#define IOCTL_RESP_TIMEOUT (60 * 1000) /* 60 sec in real time */
+#else
+/* In milli second default value for Production FW */
+#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT
+#endif /* BCMQT */
+#endif /* IOCTL_RESP_TIMEOUT */
+
+#if defined(BCMQT_HW)
+#define IOCTL_DMAXFER_TIMEOUT (260 * 1000) /* 260 seconds second */
+#elif defined(BCMFPGA_HW)
+#define IOCTL_DMAXFER_TIMEOUT (120 * 1000) /* 120 seconds */
+#else
+/* In milli second default value for Production FW */
+#define IOCTL_DMAXFER_TIMEOUT (15 * 1000) /* 15 seconds for Production FW */
+#endif /* BCMQT */
+
+#ifndef MFG_IOCTL_RESP_TIMEOUT
+#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */
+#endif /* MFG_IOCTL_RESP_TIMEOUT */
+
+#define DEFAULT_D3_ACK_RESP_TIMEOUT 2000
+#ifndef D3_ACK_RESP_TIMEOUT
+#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT
+#endif /* D3_ACK_RESP_TIMEOUT */
+
+#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000)
+#ifndef DHD_BUS_BUSY_TIMEOUT
+#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT
+#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */
+
+#define DS_EXIT_TIMEOUT 1000 /* In ms */
+#define DS_ENTER_TIMEOUT 1000 /* In ms */
+
+#define IOCTL_DISABLE_TIMEOUT 0
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Initilizes the index block for dma'ing indices */
+extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz,
+ uint8 type, uint32 length);
+#ifdef DHD_DMA_INDICES_SEQNUM
+extern int dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz,
+ uint8 type);
+extern uint32 dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host);
+extern void dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num);
+extern void dhd_prot_save_dmaidx(dhd_pub_t *dhd);
+#endif /* DHD_DMA_INDICES_SEQNUM */
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_sync_with_dongle(dhd_pub_t *dhdp);
+
+/* Protocol initialization needed for IOCTL/IOVAR path */
+extern int dhd_prot_init(dhd_pub_t *dhd);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Dump extended trap data */
+extern int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+ uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+#ifdef BCMPCIE
+extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype);
+extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype);
+extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound);
+#ifdef BTLOG
+extern bool dhd_prot_process_msgbuf_btlogcpl(dhd_pub_t *dhd, uint bound);
+#endif /* BTLOG */
+extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
+extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd);
+extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
+extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
+extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len);
+extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
+extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
+extern void dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flow_id);
+extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd,
+ uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num,
+ uint32 mem_addr);
+extern int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result);
+
+extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf,
+ void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma);
+extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd,
+ uint16 flowid, void *msgbuf_ring);
+extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex);
+extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b);
+extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val);
+extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd);
+extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx);
+extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx);
+extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d,
+ struct bcmstrbuf *strbuf, const char * fmt);
+extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
+extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
+extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id);
+extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern void dhd_prot_reset(dhd_pub_t *dhd);
+extern uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
+
+#ifdef IDLE_TX_FLOW_MGMT
+extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count);
+extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+#endif /* IDLE_TX_FLOW_MGMT */
+extern int dhd_prot_init_info_rings(dhd_pub_t *dhd);
+#ifdef BTLOG
+extern int dhd_prot_init_btlog_rings(dhd_pub_t *dhd);
+#endif /* BTLOG */
+#ifdef DHD_HP2P
+extern int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
+extern int dhd_prot_check_tx_resource(dhd_pub_t *dhd);
+#endif /* BCMPCIE */
+
+#ifdef DHD_LB
+extern void dhd_lb_tx_compl_handler(unsigned long data);
+extern void dhd_lb_rx_compl_handler(unsigned long data);
+extern void dhd_lb_rx_process_handler(unsigned long data);
+#endif /* DHD_LB */
+extern int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data);
+
+#ifdef BCMPCIE
+extern int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlv, uint16 tlv_len,
+ uint16 seq, uint16 xt_id);
+extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set);
+extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set);
+extern bool dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set);
+extern bool dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set);
+extern bool dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set);
+#else /* BCMPCIE */
+#define dhd_prot_send_host_timestamp(a, b, c, d, e) 0
+#define dhd_prot_data_path_tx_timestamp_logging(a, b, c) 0
+#define dhd_prot_data_path_rx_timestamp_logging(a, b, c) 0
+#endif /* BCMPCIE */
+
+extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd);
+
+#ifdef SNAPSHOT_UPLOAD
+/* send request to take snapshot */
+int dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param);
+/* get uploaded snapshot */
+int dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset,
+ uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more);
+#endif /* SNAPSHOT_UPLOAD */
+
+#ifdef EWP_EDL
+int dhd_prot_init_edl_rings(dhd_pub_t *dhd);
+bool dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd);
+int dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data);
+#endif /* EWP_EDL */
+
+/* APIs for managing a DMA-able buffer */
+int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
+void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+void dhd_local_buf_reset(char *buf, uint32 len);
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len);
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff);
+
+#ifdef BCMINTERNAL
+typedef struct host_page_location_info {
+ uint32 addr_lo;
+ uint32 addr_hi;
+ uint32 binary_size;
+ uint32 tlv_size;
+ uint32 tlv_signature;
+} host_page_location_info_t;
+#define BCM_HOST_PAGE_LOCATION_SIGNATURE 0xFEED10C5u
+
+#ifdef DHD_FWTRACE
+typedef struct host_fwtrace_buf_location_info {
+ fwtrace_hostaddr_info_t host_buf_info;
+ uint32 tlv_size;
+ uint32 tlv_signature;
+} host_fwtrace_buf_location_info_t;
+/* Host buffer info for pushing the trace info */
+#define BCM_HOST_FWTRACE_BUF_LOCATION_SIGNATURE 0xFEED10C6u
+#endif /* DHD_FWTRACE */
+#endif /* BCMINTERNAL */
+
+#ifdef DHD_HP2P
+extern uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable);
+extern uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val);
+#endif
+
+#ifdef DHD_MAP_LOGGING
+extern void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp);
+#endif /* DHD_MAP_LOGGING */
+
+extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
+extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
+
+#if defined(DHD_HTPUT_TUNABLES)
+extern uint16 dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd);
+extern void dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
+#endif /* DHD_HTPUT_TUNABLES */
+
+#endif /* _dhd_proto_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_qos_algo.h b/bcmdhd.101.10.361.x/dhd_qos_algo.h
new file mode 100755
index 0000000..368d120
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_qos_algo.h
@@ -0,0 +1,90 @@
+/*
+ * Header file for QOS Algorithm on DHD
+ *
+ * Provides type definitions and function prototypes for the QOS Algorithm
+ * Note that this algorithm is a platform independent layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _DHD_QOS_ALGO_H_
+#define _DHD_QOS_ALGO_H_
+
+#define LOWLAT_AVG_PKT_SIZE_LOW 50u
+#define LOWLAT_AVG_PKT_SIZE_HIGH 200u
+#define LOWLAT_NUM_PKTS_LOW 1u
+#define LOWLAT_NUM_PKTS_HIGH 8u
+#define LOWLAT_DETECT_CNT_INC_THRESH 10u
+#define LOWLAT_DETECT_CNT_DEC_THRESH 0u
+#define LOWLAT_DETECT_CNT_UPGRADE_THRESH 4u
+
+typedef struct qos_stat
+{
+ /* Statistics */
+ unsigned long tx_pkts_prev;
+ unsigned long tx_bytes_prev;
+ unsigned long tx_pkts;
+ unsigned long tx_bytes;
+
+ /* low latency flow detection algorithm counts */
+ unsigned char lowlat_detect_count;
+ bool lowlat_flow;
+} qos_stat_t;
+
+/* QoS alogrithm parameter, controllable at runtime */
+typedef struct _qos_algo_params
+{
+ /* The avg Tx packet size in the sampling interval must be between
+ * these two thresholds for QoS upgrade to take place.
+ * default values = LOWLAT_AVG_PKT_SIZE_LOW, LOWLAT_AVG_PKT_SIZE_HIGH
+ */
+ unsigned long avg_pkt_size_low_thresh;
+ unsigned long avg_pkt_size_high_thresh;
+ /* The number of Tx packets in the sampling interval must be
+ * between these two thresholds for QoS upgrade to happen.
+ * default values = LOWLAT_NUM_PKTS_LOW, LOWLAT_NUM_PKTS_HIGH
+ */
+ unsigned long num_pkts_low_thresh;
+ unsigned long num_pkts_high_thresh;
+ /* If low latency traffic is detected, then the low latency count
+ * is incremented till the first threshold is hit.
+ * If traffic ceases to be low latency, then the count is
+ * decremented till the second threshold is hit.
+ * default values = LOWLAT_DETECT_CNT_INC_THRESH, LOWLAT_DETECT_CNT_DEC_THRESH
+ */
+ unsigned char detect_cnt_inc_thresh;
+ unsigned char detect_cnt_dec_thresh;
+ /* If the low latency count crosses this threshold, the flow will be upgraded.
+ * Default value = LOWLAT_DETECT_CNT_UPGRADE_THRESH
+ */
+ unsigned char detect_cnt_upgrade_thresh;
+} qos_algo_params_t;
+
+#define QOS_PARAMS(x) (&x->psk_qos->qos_params);
+
+/*
+ * Operates on a flow and returns 1 for upgrade and 0 for
+ * no up-grade
+ */
+int dhd_qos_algo(dhd_info_t *dhd, qos_stat_t *qos, qos_algo_params_t *qos_params);
+int qos_algo_params_init(qos_algo_params_t *qos_params);
+#endif /* _DHD_QOS_ALGO_H_ */
diff --git a/bcmdhd.101.10.361.x/dhd_rtt.c b/bcmdhd.101.10.361.x/dhd_rtt.c
new file mode 100755
index 0000000..9f807e9
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_rtt.c
@@ -0,0 +1,4855 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), RTT
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <bcmwifi_rspec.h>
+
+#include <bcmevent.h>
+#include <dhd.h>
+#include <dhd_rtt.h>
+#include <dhd_dbg.h>
+#include <dhd_bus.h>
+#include <wldev_common.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
+static DEFINE_SPINLOCK(noti_list_lock);
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
+#define DHD_RTT_CHK_SET_PARAM(param, param_cnt, targets, tlvid) \
+ do { \
+ if ((param_cnt) >= FTM_MAX_PARAMS) { \
+ DHD_RTT_ERR(("Param cnt exceeded for FTM cfg iovar\n")); \
+ err = BCME_ERROR; \
+ goto exit; \
+ } else { \
+ dhd_rtt_set_ftm_config_param((param), &(param_cnt), \
+ (targets), (tlvid)); \
+ }\
+ } while (0)
+
+#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+ (ts).tv_nsec / NSEC_PER_USEC)
+
+#undef DHD_RTT_MEM
+#undef DHD_RTT_ERR
+#define DHD_RTT_MEM DHD_LOG_MEM
+#define DHD_RTT_ERR DHD_ERROR
+
+#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */
+#define FTM_AVAIL_MAX_SLOTS 32
+#define FTM_MAX_CONFIGS 10
+#define FTM_MAX_PARAMS 20
+#define FTM_DEFAULT_SESSION 1
+#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */
+#define FTM_INVALID -1
+#define FTM_DEFAULT_CNT_20M 24u
+#define FTM_DEFAULT_CNT_40M 16u
+#define FTM_DEFAULT_CNT_80M 11u
+/* To handle congestion env, set max dur/timeout */
+#define FTM_MAX_BURST_DUR_TMO_MS 128u
+
+/* convenience macros */
+#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10)
+#define FTM_MICRO2TU(_tu) ((uint64)(_tu) >> 10)
+#define FTM_TU2MILLI(_tu) ((uint32)FTM_TU2MICRO(_tu) / 1000)
+#define FTM_MICRO2MILLI(_x) ((uint32)(_x) / 1000)
+#define FTM_MICRO2SEC(_x) ((uint32)(_x) / 1000000)
+#define FTM_INTVL2NSEC(_intvl) ((uint32)ftm_intvl2nsec(_intvl))
+#define FTM_INTVL2USEC(_intvl) ((uint32)ftm_intvl2usec(_intvl))
+#define FTM_INTVL2MSEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000)
+#define FTM_INTVL2SEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000000)
+#define FTM_USECIN100MILLI(_usec) ((_usec) / 100000)
+
+/* broadcom specific set to have more accurate data */
+#define ENABLE_VHT_ACK
+#define CH_MIN_5G_CHANNEL 34
+
+/* CUR ETH became obsolete with this major version onwards */
+#define RTT_IOV_CUR_ETH_OBSOLETE 12
+
+/*
+ * Parallel RTT Sessions are supported
+ * with this major and minor verion onwards
+ */
+#define RTT_PARALLEL_SSNS_SUPPORTED_MAJ_VER 14
+#define RTT_PARALLEL_SSNS_SUPPORTED_MIN_VER 2
+
+/* PROXD TIMEOUT */
+#define DHD_RTT_TIMER_INTERVAL_MS 5000u
+#define DHD_NAN_RTT_TIMER_INTERVAL_MS 20000u
+
+#define DHD_NAN_RTT_MAX_SESSIONS 4u
+#define DHD_NAN_RTT_MAX_SESSIONS_LEGACY 1u
+
+struct rtt_noti_callback {
+ struct list_head list;
+ void *ctx;
+ dhd_rtt_compl_noti_fn noti_fn;
+};
+
+/* bitmask indicating which command groups; */
+typedef enum {
+ FTM_SUBCMD_FLAG_METHOD = 0x01, /* FTM method command */
+ FTM_SUBCMD_FLAG_SESSION = 0x02, /* FTM session command */
+ FTM_SUBCMD_FLAG_ALL = FTM_SUBCMD_FLAG_METHOD | FTM_SUBCMD_FLAG_SESSION
+} ftm_subcmd_flag_t;
+
+/* proxd ftm config-category definition */
+typedef enum {
+ FTM_CONFIG_CAT_GENERAL = 1, /* generial configuration */
+ FTM_CONFIG_CAT_OPTIONS = 2, /* 'config options' */
+ FTM_CONFIG_CAT_AVAIL = 3, /* 'config avail' */
+} ftm_config_category_t;
+
+typedef struct ftm_subcmd_info {
+ int16 version; /* FTM version (optional) */
+ char *name; /* cmd-name string as cmdline input */
+ wl_proxd_cmd_t cmdid; /* cmd-id */
+ bcm_xtlv_unpack_cbfn_t *handler; /* cmd response handler (optional) */
+ ftm_subcmd_flag_t cmdflag; /* CMD flag (optional) */
+} ftm_subcmd_info_t;
+
+typedef struct ftm_config_options_info {
+ uint32 flags; /* wl_proxd_flags_t/wl_proxd_session_flags_t */
+ bool enable;
+} ftm_config_options_info_t;
+
+typedef struct ftm_config_param_info {
+ uint16 tlvid; /* mapping TLV id for the item */
+ union {
+ uint32 chanspec;
+ struct ether_addr mac_addr;
+ wl_proxd_intvl_t data_intvl;
+ uint32 data32;
+ uint16 data16;
+ uint8 data8;
+ uint32 event_mask;
+ };
+} ftm_config_param_info_t;
+
+/*
+* definition for id-string mapping.
+* This is used to map an id (can be cmd-id, tlv-id, ....) to a text-string
+* for debug-display or cmd-log-display
+*/
+typedef struct ftm_strmap_entry {
+ int32 id;
+ char *text;
+} ftm_strmap_entry_t;
+
+typedef struct ftm_status_map_host_entry {
+ wl_proxd_status_t proxd_status;
+ rtt_reason_t rtt_reason;
+} ftm_status_map_host_entry_t;
+
+static uint16
+rtt_result_ver(uint16 tlvid, const uint8 *p_data);
+
+static int
+dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
+ uint16 tlvid, uint16 len);
+
+static int
+dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
+ uint16 tlvid, uint16 len);
+
+static wifi_rate_v1
+dhd_rtt_convert_rate_to_host(uint32 ratespec);
+
+#if defined(WL_CFG80211) && defined(RTT_DEBUG)
+const char *
+ftm_cmdid_to_str(uint16 cmdid);
+#endif /* WL_CFG80211 && RTT_DEBUG */
+
+#ifdef WL_CFG80211
+static int
+dhd_rtt_start(dhd_pub_t *dhd);
+static int dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
+ struct ether_addr *addr);
+static void dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd);
+static void dhd_rtt_timeout_work(struct work_struct *work);
+static bool dhd_rtt_get_report_header(rtt_status_info_t *rtt_status,
+ rtt_results_header_t **rtt_results_header, struct ether_addr *addr);
+#ifdef WL_NAN
+static void dhd_rtt_trigger_pending_targets_on_session_end(dhd_pub_t *dhd);
+#endif /* WL_NAN */
+#endif /* WL_CFG80211 */
+static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0};
+
+/* ftm status mapping to host status */
+static const ftm_status_map_host_entry_t ftm_status_map_info[] = {
+ {WL_PROXD_E_INCOMPLETE, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_OVERRIDDEN, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_ASAP_FAILED, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_NOTSTARTED, RTT_STATUS_FAIL_NOT_SCHEDULED_YET},
+ {WL_PROXD_E_INVALIDMEAS, RTT_STATUS_FAIL_INVALID_TS},
+ {WL_PROXD_E_INCAPABLE, RTT_STATUS_FAIL_NO_CAPABILITY},
+ {WL_PROXD_E_MISMATCH, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_DUP_SESSION, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_REMOTE_FAIL, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_REMOTE_INCAPABLE, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_SCHED_FAIL, RTT_STATUS_FAIL_SCHEDULE},
+ {WL_PROXD_E_PROTO, RTT_STATUS_FAIL_PROTOCOL},
+ {WL_PROXD_E_EXPIRED, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_TIMEOUT, RTT_STATUS_FAIL_TM_TIMEOUT},
+ {WL_PROXD_E_NOACK, RTT_STATUS_FAIL_NO_RSP},
+ {WL_PROXD_E_DEFERRED, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_INVALID_SID, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_REMOTE_CANCEL, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_CANCELED, RTT_STATUS_ABORTED},
+ {WL_PROXD_E_INVALID_SESSION, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_BAD_STATE, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_ERROR, RTT_STATUS_FAILURE},
+ {WL_PROXD_E_OK, RTT_STATUS_SUCCESS}
+};
+
+static const ftm_strmap_entry_t ftm_event_type_loginfo[] = {
+ /* wl_proxd_event_type_t, text-string */
+ { WL_PROXD_EVENT_NONE, "none" },
+ { WL_PROXD_EVENT_SESSION_CREATE, "session create" },
+ { WL_PROXD_EVENT_SESSION_START, "session start" },
+ { WL_PROXD_EVENT_FTM_REQ, "FTM req" },
+ { WL_PROXD_EVENT_BURST_START, "burst start" },
+ { WL_PROXD_EVENT_BURST_END, "burst end" },
+ { WL_PROXD_EVENT_SESSION_END, "session end" },
+ { WL_PROXD_EVENT_SESSION_RESTART, "session restart" },
+ { WL_PROXD_EVENT_BURST_RESCHED, "burst rescheduled" },
+ { WL_PROXD_EVENT_SESSION_DESTROY, "session destroy" },
+ { WL_PROXD_EVENT_RANGE_REQ, "range request" },
+ { WL_PROXD_EVENT_FTM_FRAME, "FTM frame" },
+ { WL_PROXD_EVENT_DELAY, "delay" },
+ { WL_PROXD_EVENT_VS_INITIATOR_RPT, "initiator-report " }, /* rx initiator-rpt */
+ { WL_PROXD_EVENT_RANGING, "ranging " },
+ { WL_PROXD_EVENT_COLLECT, "collect" },
+ { WL_PROXD_EVENT_MF_STATS, "mf_stats" },
+ { WL_PROXD_EVENT_START_WAIT, "start-wait"}
+};
+
+/*
+* session-state --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = {
+ /* wl_proxd_session_state_t, text string */
+ { WL_PROXD_SESSION_STATE_CREATED, "created" },
+ { WL_PROXD_SESSION_STATE_CONFIGURED, "configured" },
+ { WL_PROXD_SESSION_STATE_STARTED, "started" },
+ { WL_PROXD_SESSION_STATE_DELAY, "delay" },
+ { WL_PROXD_SESSION_STATE_USER_WAIT, "user-wait" },
+ { WL_PROXD_SESSION_STATE_SCHED_WAIT, "sched-wait" },
+ { WL_PROXD_SESSION_STATE_BURST, "burst" },
+ { WL_PROXD_SESSION_STATE_STOPPING, "stopping" },
+ { WL_PROXD_SESSION_STATE_ENDED, "ended" },
+ { WL_PROXD_SESSION_STATE_DESTROYING, "destroying" },
+ { WL_PROXD_SESSION_STATE_NONE, "none" }
+};
+
+/*
+* status --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_status_value_loginfo[] = {
+ /* wl_proxd_status_t, text-string */
+ { WL_PROXD_E_OVERRIDDEN, "overridden" },
+ { WL_PROXD_E_ASAP_FAILED, "ASAP failed" },
+ { WL_PROXD_E_NOTSTARTED, "not started" },
+ { WL_PROXD_E_INVALIDMEAS, "invalid measurement" },
+ { WL_PROXD_E_INCAPABLE, "incapable" },
+ { WL_PROXD_E_MISMATCH, "mismatch"},
+ { WL_PROXD_E_DUP_SESSION, "dup session" },
+ { WL_PROXD_E_REMOTE_FAIL, "remote fail" },
+ { WL_PROXD_E_REMOTE_INCAPABLE, "remote incapable" },
+ { WL_PROXD_E_SCHED_FAIL, "sched failure" },
+ { WL_PROXD_E_PROTO, "protocol error" },
+ { WL_PROXD_E_EXPIRED, "expired" },
+ { WL_PROXD_E_TIMEOUT, "timeout" },
+ { WL_PROXD_E_NOACK, "no ack" },
+ { WL_PROXD_E_DEFERRED, "deferred" },
+ { WL_PROXD_E_INVALID_SID, "invalid session id" },
+ { WL_PROXD_E_REMOTE_CANCEL, "remote cancel" },
+ { WL_PROXD_E_CANCELED, "canceled" },
+ { WL_PROXD_E_INVALID_SESSION, "invalid session" },
+ { WL_PROXD_E_BAD_STATE, "bad state" },
+ { WL_PROXD_E_ERROR, "error" },
+ { WL_PROXD_E_OK, "OK" }
+};
+
+/*
+* time interval unit --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = {
+ /* wl_proxd_tmu_t, text-string */
+ { WL_PROXD_TMU_TU, "TU" },
+ { WL_PROXD_TMU_SEC, "sec" },
+ { WL_PROXD_TMU_MILLI_SEC, "ms" },
+ { WL_PROXD_TMU_MICRO_SEC, "us" },
+ { WL_PROXD_TMU_NANO_SEC, "ns" },
+ { WL_PROXD_TMU_PICO_SEC, "ps" }
+};
+
+struct ieee_80211_mcs_rate_info {
+ uint8 constellation_bits;
+ uint8 coding_q;
+ uint8 coding_d;
+};
+
+static const struct ieee_80211_mcs_rate_info wl_mcs_info[] = {
+ { 1, 1, 2 }, /* MCS 0: MOD: BPSK, CR 1/2 */
+ { 2, 1, 2 }, /* MCS 1: MOD: QPSK, CR 1/2 */
+ { 2, 3, 4 }, /* MCS 2: MOD: QPSK, CR 3/4 */
+ { 4, 1, 2 }, /* MCS 3: MOD: 16QAM, CR 1/2 */
+ { 4, 3, 4 }, /* MCS 4: MOD: 16QAM, CR 3/4 */
+ { 6, 2, 3 }, /* MCS 5: MOD: 64QAM, CR 2/3 */
+ { 6, 3, 4 }, /* MCS 6: MOD: 64QAM, CR 3/4 */
+ { 6, 5, 6 }, /* MCS 7: MOD: 64QAM, CR 5/6 */
+ { 8, 3, 4 }, /* MCS 8: MOD: 256QAM, CR 3/4 */
+ { 8, 5, 6 } /* MCS 9: MOD: 256QAM, CR 5/6 */
+};
+
+/**
+ * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination.
+ * 'mcs' : a *single* spatial stream MCS (11n or 11ac)
+ */
+uint
+rate_mcs2rate(uint mcs, uint nss, uint bw, int sgi)
+{
+ const int ksps = 250; /* kilo symbols per sec, 4 us sym */
+ const int Nsd_20MHz = 52;
+ const int Nsd_40MHz = 108;
+ const int Nsd_80MHz = 234;
+ const int Nsd_160MHz = 468;
+ uint rate;
+
+ if (mcs == 32) {
+ /* just return fixed values for mcs32 instead of trying to parametrize */
+ rate = (sgi == 0) ? 6000 : 6778;
+ } else if (mcs <= 9) {
+ /* This calculation works for 11n HT and 11ac VHT if the HT mcs values
+ * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8.
+ * That is, HT MCS 23 is a base MCS = 7, Nss = 3
+ */
+
+ /* find the number of complex numbers per symbol */
+ if (RSPEC_IS20MHZ(bw)) {
+ /* 4360 TODO: eliminate Phy const in rspec bw, then just compare
+ * as in 80 and 160 case below instead of RSPEC_IS20MHZ(bw)
+ */
+ rate = Nsd_20MHz;
+ } else if (RSPEC_IS40MHZ(bw)) {
+ /* 4360 TODO: eliminate Phy const in rspec bw, then just compare
+ * as in 80 and 160 case below instead of RSPEC_IS40MHZ(bw)
+ */
+ rate = Nsd_40MHz;
+ } else if (bw == WL_RSPEC_BW_80MHZ) {
+ rate = Nsd_80MHz;
+ } else if (bw == WL_RSPEC_BW_160MHZ) {
+ rate = Nsd_160MHz;
+ } else {
+ rate = 0;
+ }
+
+ /* multiply by bits per number from the constellation in use */
+ rate = rate * wl_mcs_info[mcs].constellation_bits;
+
+ /* adjust for the number of spatial streams */
+ rate = rate * nss;
+
+ /* adjust for the coding rate given as a quotient and divisor */
+ rate = (rate * wl_mcs_info[mcs].coding_q) / wl_mcs_info[mcs].coding_d;
+
+ /* multiply by Kilo symbols per sec to get Kbps */
+ rate = rate * ksps;
+
+ /* adjust the symbols per sec for SGI
+ * symbol duration is 4 us without SGI, and 3.6 us with SGI,
+ * so ratio is 10 / 9
+ */
+ if (sgi) {
+ /* add 4 for rounding of division by 9 */
+ rate = ((rate * 10) + 4) / 9;
+ }
+ } else {
+ rate = 0;
+ }
+
+ return rate;
+} /* wlc_rate_mcs2rate */
+
+/** take a well formed ratespec_t arg and return phy rate in [Kbps] units */
+static uint32
+rate_rspec2rate(uint32 rspec)
+{
+ int rate = 0;
+
+ if (RSPEC_ISLEGACY(rspec)) {
+ rate = 500 * (rspec & WL_RSPEC_RATE_MASK);
+ } else if (RSPEC_ISHT(rspec)) {
+ uint mcs = (rspec & WL_RSPEC_RATE_MASK);
+
+ if (mcs == 32) {
+ rate = rate_mcs2rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec));
+ } else {
+ uint nss = 1 + (mcs / 8);
+ mcs = mcs % 8;
+ rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
+ }
+ } else if (RSPEC_ISVHT(rspec)) {
+ uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK);
+ uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
+ if (mcs > 9 || nss > 8) {
+ DHD_RTT(("%s: Invalid mcs %d or nss %d\n", __FUNCTION__, mcs, nss));
+ goto exit;
+ }
+
+ rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
+ } else {
+ DHD_RTT(("%s: wrong rspec:%d\n", __FUNCTION__, rspec));
+ }
+exit:
+ return rate;
+}
+
+char resp_buf[WLC_IOCTL_SMLEN];
+
+static uint64
+ftm_intvl2nsec(const wl_proxd_intvl_t *intvl)
+{
+ uint64 ret;
+ ret = intvl->intvl;
+ switch (intvl->tmu) {
+ case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret) * 1000; break;
+ case WL_PROXD_TMU_SEC: ret *= 1000000000; break;
+ case WL_PROXD_TMU_MILLI_SEC: ret *= 1000000; break;
+ case WL_PROXD_TMU_MICRO_SEC: ret *= 1000; break;
+ case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000; break;
+ case WL_PROXD_TMU_NANO_SEC: /* fall through */
+ default: break;
+ }
+ return ret;
+}
+uint64
+ftm_intvl2usec(const wl_proxd_intvl_t *intvl)
+{
+ uint64 ret;
+ ret = intvl->intvl;
+ switch (intvl->tmu) {
+ case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret); break;
+ case WL_PROXD_TMU_SEC: ret *= 1000000; break;
+ case WL_PROXD_TMU_NANO_SEC: ret = intvl->intvl / 1000; break;
+ case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000000; break;
+ case WL_PROXD_TMU_MILLI_SEC: ret *= 1000; break;
+ case WL_PROXD_TMU_MICRO_SEC: /* fall through */
+ default: break;
+ }
+ return ret;
+}
+
+/*
+* lookup 'id' (as a key) from a fw status to host map table
+* if found, return the corresponding reason code
+*/
+
+static rtt_reason_t
+ftm_get_statusmap_info(wl_proxd_status_t id, const ftm_status_map_host_entry_t *p_table,
+ uint32 num_entries)
+{
+ int i;
+ const ftm_status_map_host_entry_t *p_entry;
+ /* scan thru the table till end */
+ p_entry = p_table;
+ for (i = 0; i < (int) num_entries; i++)
+ {
+ if (p_entry->proxd_status == id) {
+ return p_entry->rtt_reason;
+ }
+ p_entry++; /* next entry */
+ }
+ return RTT_STATUS_FAILURE; /* not found */
+}
+/*
+* lookup 'id' (as a key) from a table
+* if found, return the entry pointer, otherwise return NULL
+*/
+static const ftm_strmap_entry_t*
+ftm_get_strmap_info(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries)
+{
+ int i;
+ const ftm_strmap_entry_t *p_entry;
+
+ /* scan thru the table till end */
+ p_entry = p_table;
+ for (i = 0; i < (int) num_entries; i++)
+ {
+ if (p_entry->id == id)
+ return p_entry;
+ p_entry++; /* next entry */
+ }
+ return NULL; /* not found */
+}
+
+/*
+* map enum to a text-string for display, this function is called by the following:
+* For debug/trace:
+* ftm_[cmdid|tlvid]_to_str()
+* For TLV-output log for 'get' commands
+* ftm_[method|tmu|caps|status|state]_value_to_logstr()
+* Input:
+* pTable -- point to a 'enum to string' table.
+*/
+static const char *
+ftm_map_id_to_str(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries)
+{
+ const ftm_strmap_entry_t*p_entry = ftm_get_strmap_info(id, p_table, num_entries);
+ if (p_entry)
+ return (p_entry->text);
+
+ return "invalid";
+}
+
+#if defined(WL_CFG80211) && defined(RTT_DEBUG)
+/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */
+#define DEF_STRMAP_ENTRY(id) { (id), #id }
+
+/* ftm cmd-id mapping */
+static const ftm_strmap_entry_t ftm_cmdid_map[] = {
+ /* {wl_proxd_cmd_t(WL_PROXD_CMD_xxx), "WL_PROXD_CMD_xxx" }, */
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_NONE),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_VERSION),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_ENABLE),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_DISABLE),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_CONFIG),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_SESSION),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_BURST_REQUEST),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_SESSION),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_DELETE_SESSION),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RESULT),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_INFO),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_STATUS),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_SESSIONS),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_COUNTERS),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_CLEAR_COUNTERS),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_COLLECT),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_TUNE),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_DUMP),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_RANGING),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_RANGING),
+ DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RANGING_INFO),
+};
+
+/*
+* map a ftm cmd-id to a text-string for display
+*/
+const char *
+ftm_cmdid_to_str(uint16 cmdid)
+{
+ return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map));
+}
+#endif /* WL_CFG80211 && RTT_DEBUG */
+
+/*
+* convert BCME_xxx error codes into related error strings
+* note, bcmerrorstr() defined in bcmutils is for BCMDRIVER only,
+* this duplicate copy is for WL access and may need to clean up later
+*/
+static const char *ftm_bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+static const char *
+ftm_status_value_to_logstr(wl_proxd_status_t status)
+{
+ static char ftm_msgbuf_status_undef[32];
+ const ftm_strmap_entry_t *p_loginfo;
+ int bcmerror;
+
+ /* check if within BCME_xxx error range */
+ bcmerror = (int) status;
+ if (VALID_BCMERROR(bcmerror))
+ return ftm_bcmerrorstrtable[-bcmerror];
+
+ /* otherwise, look for 'proxd ftm status' range */
+ p_loginfo = ftm_get_strmap_info((int32) status,
+ &ftm_status_value_loginfo[0], ARRAYSIZE(ftm_status_value_loginfo));
+ if (p_loginfo)
+ return p_loginfo->text;
+
+ /* report for 'out of range' FTM-status error code */
+ memset(ftm_msgbuf_status_undef, 0, sizeof(ftm_msgbuf_status_undef));
+ snprintf(ftm_msgbuf_status_undef, sizeof(ftm_msgbuf_status_undef),
+ "Undefined status %d", status);
+ return &ftm_msgbuf_status_undef[0];
+}
+
+static const char *
+ftm_tmu_value_to_logstr(wl_proxd_tmu_t tmu)
+{
+ return ftm_map_id_to_str((int32)tmu,
+ &ftm_tmu_value_loginfo[0], ARRAYSIZE(ftm_tmu_value_loginfo));
+}
+
+static const ftm_strmap_entry_t*
+ftm_get_event_type_loginfo(wl_proxd_event_type_t event_type)
+{
+ /* look up 'event-type' from a predefined table */
+ return ftm_get_strmap_info((int32) event_type,
+ ftm_event_type_loginfo, ARRAYSIZE(ftm_event_type_loginfo));
+}
+
+static const char *
+ftm_session_state_value_to_logstr(wl_proxd_session_state_t state)
+{
+ return ftm_map_id_to_str((int32)state, &ftm_session_state_value_loginfo[0],
+ ARRAYSIZE(ftm_session_state_value_loginfo));
+}
+
+#ifdef WL_CFG80211
+/*
+* send 'proxd' iovar for all ftm get-related commands
+*/
+static int
+rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsize,
+ ftm_subcmd_info_t *p_subcmd_info)
+{
+
+ wl_proxd_iov_t *p_iovresp = (wl_proxd_iov_t *)resp_buf;
+ int status;
+ int tlvs_len;
+ /* send getbuf proxd iovar */
+ status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov,
+ proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN);
+ if (status != BCME_OK) {
+ DHD_RTT_ERR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
+ __FUNCTION__, p_subcmd_info->cmdid, status));
+ return status;
+ }
+ if (p_subcmd_info->cmdid == WL_PROXD_CMD_GET_VERSION) {
+ p_subcmd_info->version = ltoh16(p_iovresp->version);
+ DHD_RTT(("ftm version: 0x%x\n", ltoh16(p_iovresp->version)));
+ goto exit;
+ }
+
+ tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE;
+ if (tlvs_len < 0) {
+ DHD_RTT_ERR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
+ __FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE));
+ tlvs_len = 0;
+ }
+
+ if (tlvs_len > 0 && p_subcmd_info->handler) {
+ /* unpack TLVs and invokes the cbfn for processing */
+ status = bcm_unpack_xtlv_buf(p_proxd_iov, (uint8 *)p_iovresp->tlvs,
+ tlvs_len, BCM_XTLV_OPTION_ALIGN32, p_subcmd_info->handler);
+ }
+exit:
+ return status;
+}
+
+static wl_proxd_iov_t *
+rtt_alloc_getset_buf(dhd_pub_t *dhd, wl_proxd_method_t method, wl_proxd_session_id_t session_id,
+ wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize)
+{
+ uint16 proxd_iovsize;
+ wl_proxd_tlv_t *p_tlv;
+ wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL;
+
+ *p_out_bufsize = 0; /* init */
+ /* calculate the whole buffer size, including one reserve-tlv entry in the header */
+ proxd_iovsize = sizeof(wl_proxd_iov_t) + tlvs_bufsize;
+
+ p_proxd_iov = (wl_proxd_iov_t *)MALLOCZ(dhd->osh, proxd_iovsize);
+ if (p_proxd_iov == NULL) {
+ DHD_RTT_ERR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
+ return NULL;
+ }
+
+ /* setup proxd-FTM-method iovar header */
+ p_proxd_iov->version = htol16(WL_PROXD_API_VERSION);
+ p_proxd_iov->len = htol16(proxd_iovsize); /* caller may adjust it based on #of TLVs */
+ p_proxd_iov->cmd = htol16(cmdid);
+ p_proxd_iov->method = htol16(method);
+ p_proxd_iov->sid = htol16(session_id);
+
+ /* initialize the reserved/dummy-TLV in iovar header */
+ p_tlv = p_proxd_iov->tlvs;
+ p_tlv->id = htol16(WL_PROXD_TLV_ID_NONE);
+ p_tlv->len = htol16(0);
+
+ *p_out_bufsize = proxd_iovsize; /* for caller's reference */
+
+ return p_proxd_iov;
+}
+
+static int
+dhd_rtt_common_get_handler(dhd_pub_t *dhd, ftm_subcmd_info_t *p_subcmd_info,
+ wl_proxd_method_t method,
+ wl_proxd_session_id_t session_id)
+{
+ int status = BCME_OK;
+ uint16 proxd_iovsize = 0;
+ wl_proxd_iov_t *p_proxd_iov;
+#ifdef RTT_DEBUG
+ DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n",
+ __FUNCTION__, method, session_id, p_subcmd_info->cmdid,
+ ftm_cmdid_to_str(p_subcmd_info->cmdid)));
+#endif
+ /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */
+ p_proxd_iov = rtt_alloc_getset_buf(dhd, method, session_id, p_subcmd_info->cmdid,
+ 0, &proxd_iovsize);
+
+ if (p_proxd_iov == NULL)
+ return BCME_NOMEM;
+
+ status = rtt_do_get_ioctl(dhd, p_proxd_iov, proxd_iovsize, p_subcmd_info);
+
+ if (status != BCME_OK) {
+ DHD_RTT(("%s failed: status=%d\n", __FUNCTION__, status));
+ }
+ MFREE(dhd->osh, p_proxd_iov, proxd_iovsize);
+ return status;
+}
+
+/*
+* common handler for set-related proxd method commands which require no TLV as input
+* wl proxd ftm [session-id] <set-subcmd>
+* e.g.
+* wl proxd ftm enable -- to enable ftm
+* wl proxd ftm disable -- to disable ftm
+* wl proxd ftm <session-id> start -- to start a specified session
+* wl proxd ftm <session-id> stop -- to cancel a specified session;
+* state is maintained till session is delete.
+* wl proxd ftm <session-id> delete -- to delete a specified session
+* wl proxd ftm [<session-id>] clear-counters -- to clear counters
+* wl proxd ftm <session-id> burst-request -- on initiator: to send burst request;
+* on target: send FTM frame
+* wl proxd ftm <session-id> collect
+* wl proxd ftm tune (TBD)
+*/
+static int
+dhd_rtt_common_set_handler(dhd_pub_t *dhd, const ftm_subcmd_info_t *p_subcmd_info,
+ wl_proxd_method_t method, wl_proxd_session_id_t session_id)
+{
+ uint16 proxd_iovsize;
+ wl_proxd_iov_t *p_proxd_iov;
+ int ret;
+
+#ifdef RTT_DEBUG
+ DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n",
+ __FUNCTION__, method, session_id, p_subcmd_info->cmdid,
+ ftm_cmdid_to_str(p_subcmd_info->cmdid)));
+#endif
+
+ /* allocate and initialize a temp buffer for 'set proxd' iovar */
+ proxd_iovsize = 0;
+ p_proxd_iov = rtt_alloc_getset_buf(dhd, method, session_id, p_subcmd_info->cmdid,
+ 0, &proxd_iovsize); /* no TLV */
+ if (p_proxd_iov == NULL)
+ return BCME_NOMEM;
+
+ /* no TLV to pack, simply issue a set-proxd iovar */
+ ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, proxd_iovsize, NULL, 0, TRUE);
+#ifdef RTT_DEBUG
+ if (ret != BCME_OK) {
+ DHD_RTT(("error: IOVAR failed, status=%d\n", ret));
+ }
+#endif
+ /* clean up */
+ MFREE(dhd->osh, p_proxd_iov, proxd_iovsize);
+
+ return ret;
+}
+#endif /* WL_CFG80211 */
+
+/* gets the length and returns the version
+ * of the wl_proxd_collect_event_t version
+ */
+static uint
+rtt_collect_data_event_ver(uint16 len)
+{
+ if (len > sizeof(wl_proxd_collect_event_data_v3_t)) {
+ return WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX;
+ } else if (len == sizeof(wl_proxd_collect_event_data_v4_t)) {
+ return WL_PROXD_COLLECT_EVENT_DATA_VERSION_4;
+ } else if (len == sizeof(wl_proxd_collect_event_data_v3_t)) {
+ return WL_PROXD_COLLECT_EVENT_DATA_VERSION_3;
+ } else if (len == sizeof(wl_proxd_collect_event_data_v2_t)) {
+ return WL_PROXD_COLLECT_EVENT_DATA_VERSION_2;
+ } else {
+ return WL_PROXD_COLLECT_EVENT_DATA_VERSION_1;
+ }
+}
+
+static void
+rtt_collect_event_data_display(uint8 ver, void *ctx, const uint8 *p_data, uint16 len)
+{
+ int i;
+ wl_proxd_collect_event_data_v1_t *p_collect_data_v1 = NULL;
+ wl_proxd_collect_event_data_v2_t *p_collect_data_v2 = NULL;
+ wl_proxd_collect_event_data_v3_t *p_collect_data_v3 = NULL;
+ wl_proxd_collect_event_data_v4_t *p_collect_data_v4 = NULL;
+
+ if (!ctx || !p_data) {
+ return;
+ }
+
+ switch (ver) {
+ case WL_PROXD_COLLECT_EVENT_DATA_VERSION_1:
+ DHD_RTT(("\tVERSION_1\n"));
+ memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v1_t));
+ p_collect_data_v1 = (wl_proxd_collect_event_data_v1_t *)ctx;
+ DHD_RTT(("\tH_RX\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v1->H_RX[i] = ltoh32_ua(&p_collect_data_v1->H_RX[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v1->H_RX[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tH_LB\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v1->H_LB[i] = ltoh32_ua(&p_collect_data_v1->H_LB[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v1->H_LB[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tri_rr\n"));
+ for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) {
+ DHD_RTT(("\t%u\n", p_collect_data_v1->ri_rr[i]));
+ }
+ p_collect_data_v1->phy_err_mask = ltoh32_ua(&p_collect_data_v1->phy_err_mask);
+ DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v1->phy_err_mask));
+ break;
+ case WL_PROXD_COLLECT_EVENT_DATA_VERSION_2:
+ memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v2_t));
+ p_collect_data_v2 = (wl_proxd_collect_event_data_v2_t *)ctx;
+ DHD_RTT(("\tH_RX\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v2->H_RX[i] = ltoh32_ua(&p_collect_data_v2->H_RX[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v2->H_RX[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tH_LB\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v2->H_LB[i] = ltoh32_ua(&p_collect_data_v2->H_LB[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v2->H_LB[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tri_rr\n"));
+ for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) {
+ DHD_RTT(("\t%u\n", p_collect_data_v2->ri_rr[i]));
+ }
+ p_collect_data_v2->phy_err_mask = ltoh32_ua(&p_collect_data_v2->phy_err_mask);
+ DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v2->phy_err_mask));
+ break;
+ case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3:
+ memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v3_t));
+ p_collect_data_v3 = (wl_proxd_collect_event_data_v3_t *)ctx;
+ switch (p_collect_data_v3->version) {
+ case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3:
+ if (p_collect_data_v3->length !=
+ (len - OFFSETOF(wl_proxd_collect_event_data_v3_t, H_LB))) {
+ DHD_RTT(("\tversion/length mismatch\n"));
+ break;
+ }
+ DHD_RTT(("\tH_RX\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v3->H_RX[i] =
+ ltoh32_ua(&p_collect_data_v3->H_RX[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v3->H_RX[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tH_LB\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v3->H_LB[i] =
+ ltoh32_ua(&p_collect_data_v3->H_LB[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v3->H_LB[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tri_rr\n"));
+ for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) {
+ DHD_RTT(("\t%u\n", p_collect_data_v3->ri_rr[i]));
+ }
+ p_collect_data_v3->phy_err_mask =
+ ltoh32_ua(&p_collect_data_v3->phy_err_mask);
+ DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v3->phy_err_mask));
+ break;
+ /* future case */
+ }
+ break;
+ case WL_PROXD_COLLECT_EVENT_DATA_VERSION_4:
+ memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v4_t));
+ p_collect_data_v4 = (wl_proxd_collect_event_data_v4_t *)ctx;
+ switch (p_collect_data_v4->version) {
+ case WL_PROXD_COLLECT_EVENT_DATA_VERSION_4:
+ if (p_collect_data_v4->length !=
+ (len - OFFSETOF(wl_proxd_collect_event_data_v4_t, H_LB))) {
+ DHD_RTT(("\tversion/length mismatch\n"));
+ break;
+ }
+ DHD_RTT(("\tH_RX\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v4->H_RX[i] =
+ ltoh32_ua(&p_collect_data_v4->H_RX[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v4->H_RX[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tH_LB\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data_v4->H_LB[i] =
+ ltoh32_ua(&p_collect_data_v4->H_LB[i]);
+ DHD_RTT(("\t%u\n", p_collect_data_v4->H_LB[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tri_rr\n"));
+ for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0_5G; i++) {
+ DHD_RTT(("\t%u\n", p_collect_data_v4->ri_rr[i]));
+ }
+ p_collect_data_v4->phy_err_mask =
+ ltoh32_ua(&p_collect_data_v4->phy_err_mask);
+ DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v4->phy_err_mask));
+ break;
+ /* future case */
+ }
+ break;
+ }
+}
+
+static uint16
+rtt_result_ver(uint16 tlvid, const uint8 *p_data)
+{
+ uint16 ret = BCME_OK;
+ const wl_proxd_rtt_result_v2_t *r_v2 = NULL;
+
+ switch (tlvid) {
+ case WL_PROXD_TLV_ID_RTT_RESULT:
+ BCM_REFERENCE(p_data);
+ ret = WL_PROXD_RTT_RESULT_VERSION_1;
+ break;
+ case WL_PROXD_TLV_ID_RTT_RESULT_V2:
+ if (p_data) {
+ r_v2 = (const wl_proxd_rtt_result_v2_t *)p_data;
+ if (r_v2->version == WL_PROXD_RTT_RESULT_VERSION_2) {
+ ret = WL_PROXD_RTT_RESULT_VERSION_2;
+ }
+ }
+ break;
+ default:
+ DHD_RTT_ERR(("%s: > Unsupported TLV ID %d\n",
+ __FUNCTION__, tlvid));
+ break;
+ }
+ return ret;
+}
+
+/* pretty hex print a contiguous buffer */
+static void
+rtt_prhex(const char *msg, const uint8 *buf, uint nbytes)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ DHD_RTT(("%s:\n", msg));
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ DHD_RTT(("%s\n", line)); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ DHD_RTT(("%s\n", line));
+}
+
+static int
+rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len)
+{
+ int ret = BCME_OK;
+ int i;
+ wl_proxd_ftm_session_status_t *p_data_info = NULL;
+ uint32 chan_data_entry = 0;
+ uint16 expected_rtt_result_ver = 0;
+
+ BCM_REFERENCE(p_data_info);
+
+ switch (tlvid) {
+ case WL_PROXD_TLV_ID_RTT_RESULT:
+ case WL_PROXD_TLV_ID_RTT_RESULT_V2:
+ DHD_RTT(("WL_PROXD_TLV_ID_RTT_RESULT\n"));
+ expected_rtt_result_ver = rtt_result_ver(tlvid, p_data);
+ switch (expected_rtt_result_ver) {
+ case WL_PROXD_RTT_RESULT_VERSION_1:
+ ret = dhd_rtt_convert_results_to_host_v1((rtt_result_t *)ctx,
+ p_data, tlvid, len);
+ break;
+ case WL_PROXD_RTT_RESULT_VERSION_2:
+ ret = dhd_rtt_convert_results_to_host_v2((rtt_result_t *)ctx,
+ p_data, tlvid, len);
+ break;
+ default:
+ DHD_RTT_ERR((" > Unsupported RTT_RESULT version\n"));
+ ret = BCME_UNSUPPORTED;
+ break;
+ }
+ break;
+ case WL_PROXD_TLV_ID_SESSION_STATUS:
+ DHD_RTT(("WL_PROXD_TLV_ID_SESSION_STATUS\n"));
+ memcpy(ctx, p_data, sizeof(wl_proxd_ftm_session_status_t));
+ p_data_info = (wl_proxd_ftm_session_status_t *)ctx;
+ p_data_info->sid = ltoh16_ua(&p_data_info->sid);
+ p_data_info->state = ltoh16_ua(&p_data_info->state);
+ p_data_info->status = ltoh32_ua(&p_data_info->status);
+ p_data_info->burst_num = ltoh16_ua(&p_data_info->burst_num);
+ p_data_info->core_info = ltoh16_ua(&p_data_info->core_info);
+ DHD_RTT(("\tsid=%u, state=%d, status=%d, burst_num=%u\n",
+ p_data_info->sid, p_data_info->state,
+ p_data_info->status, p_data_info->burst_num));
+ DHD_RTT(("\tnum_cores=%u, core=%u\n", (p_data_info->core_info & 0xFF),
+ (p_data_info->core_info >> 8u & 0xFF)));
+
+ break;
+ case WL_PROXD_TLV_ID_COLLECT_DATA:
+ DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_DATA\n"));
+ /* we do not have handle to wl in the context of
+ * xtlv callback without changing the xtlv API.
+ */
+ rtt_collect_event_data_display(
+ rtt_collect_data_event_ver(len),
+ ctx, p_data, len);
+ break;
+ case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA:
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n"));
+ DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32))));
+ for (i = 0; (uint16)i < (uint16)(len/sizeof(chan_data_entry)); i++) {
+ uint32 *p = (uint32*)p_data;
+ chan_data_entry = ltoh32_ua(p + i);
+ DHD_RTT(("\t%u\n", chan_data_entry));
+ }
+ GCC_DIAGNOSTIC_POP();
+ break;
+ case WL_PROXD_TLV_ID_MF_STATS_DATA:
+ DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n"));
+ DHD_RTT(("\tmf stats len=%u\n", len));
+ rtt_prhex("", p_data, len);
+ break;
+ default:
+ DHD_RTT_ERR(("> Unsupported TLV ID %d\n", tlvid));
+ ret = BCME_ERROR;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef WL_CFG80211
+static int
+rtt_handle_config_options(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv,
+ uint16 *p_buf_space_left, ftm_config_options_info_t *ftm_configs, int ftm_cfg_cnt)
+{
+ int ret = BCME_OK;
+ int cfg_idx = 0;
+ uint32 flags = WL_PROXD_FLAG_NONE;
+ uint32 flags_mask = WL_PROXD_FLAG_NONE;
+ uint32 new_mask; /* cmdline input */
+ ftm_config_options_info_t *p_option_info;
+ uint16 type = (session_id == WL_PROXD_SESSION_ID_GLOBAL) ?
+ WL_PROXD_TLV_ID_FLAGS_MASK : WL_PROXD_TLV_ID_SESSION_FLAGS_MASK;
+ for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) {
+ p_option_info = (ftm_configs + cfg_idx);
+ if (p_option_info != NULL) {
+ new_mask = p_option_info->flags;
+ /* update flags mask */
+ flags_mask |= new_mask;
+ if (p_option_info->enable) {
+ flags |= new_mask; /* set the bit on */
+ } else {
+ flags &= ~new_mask; /* set the bit off */
+ }
+ }
+ }
+ flags = htol32(flags);
+ flags_mask = htol32(flags_mask);
+ /* setup flags_mask TLV */
+ ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
+ type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
+ __FUNCTION__, ret));
+ goto exit;
+ }
+
+ type = (session_id == WL_PROXD_SESSION_ID_GLOBAL)?
+ WL_PROXD_TLV_ID_FLAGS : WL_PROXD_TLV_ID_SESSION_FLAGS;
+ /* setup flags TLV */
+ ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
+ type, sizeof(uint32), (uint8 *)&flags, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+#ifdef RTT_DEBUG
+ DHD_RTT(("%s: bcm_pack_xltv_entry() for flags failed, status=%d\n",
+ __FUNCTION__, ret));
+#endif
+ }
+exit:
+ return ret;
+}
+
+static int
+rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv,
+ uint16 *p_buf_space_left, ftm_config_param_info_t *ftm_configs, int ftm_cfg_cnt)
+{
+ int ret = BCME_OK;
+ int cfg_idx = 0;
+ uint32 chanspec;
+ ftm_config_param_info_t *p_config_param_info;
+ void *p_src_data;
+ uint16 src_data_size; /* size of data pointed by p_src_data as 'source' */
+ for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) {
+ p_config_param_info = (ftm_configs + cfg_idx);
+ if (p_config_param_info != NULL) {
+ switch (p_config_param_info->tlvid) {
+ case WL_PROXD_TLV_ID_BSS_INDEX:
+ case WL_PROXD_TLV_ID_FTM_RETRIES:
+ case WL_PROXD_TLV_ID_FTM_REQ_RETRIES:
+ p_src_data = &p_config_param_info->data8;
+ src_data_size = sizeof(uint8);
+ break;
+ case WL_PROXD_TLV_ID_BURST_NUM_FTM: /* uint16 */
+ case WL_PROXD_TLV_ID_NUM_BURST:
+ case WL_PROXD_TLV_ID_RX_MAX_BURST:
+ p_src_data = &p_config_param_info->data16;
+ src_data_size = sizeof(uint16);
+ break;
+ case WL_PROXD_TLV_ID_TX_POWER: /* uint32 */
+ case WL_PROXD_TLV_ID_RATESPEC:
+ case WL_PROXD_TLV_ID_EVENT_MASK: /* wl_proxd_event_mask_t/uint32 */
+ case WL_PROXD_TLV_ID_DEBUG_MASK:
+ p_src_data = &p_config_param_info->data32;
+ src_data_size = sizeof(uint32);
+ break;
+ case WL_PROXD_TLV_ID_CHANSPEC: /* chanspec_t --> 32bit */
+ chanspec = p_config_param_info->chanspec;
+ p_src_data = (void *) &chanspec;
+ src_data_size = sizeof(uint32);
+ break;
+ case WL_PROXD_TLV_ID_BSSID: /* mac address */
+ case WL_PROXD_TLV_ID_PEER_MAC:
+ case WL_PROXD_TLV_ID_CUR_ETHER_ADDR:
+ p_src_data = &p_config_param_info->mac_addr;
+ src_data_size = sizeof(struct ether_addr);
+ break;
+ case WL_PROXD_TLV_ID_BURST_DURATION: /* wl_proxd_intvl_t */
+ case WL_PROXD_TLV_ID_BURST_PERIOD:
+ case WL_PROXD_TLV_ID_BURST_FTM_SEP:
+ case WL_PROXD_TLV_ID_BURST_TIMEOUT:
+ case WL_PROXD_TLV_ID_INIT_DELAY:
+ p_src_data = &p_config_param_info->data_intvl;
+ src_data_size = sizeof(wl_proxd_intvl_t);
+ break;
+ default:
+ ret = BCME_BADARG;
+ break;
+ }
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s bad TLV ID : %d\n",
+ __FUNCTION__, p_config_param_info->tlvid));
+ break;
+ }
+
+ ret = bcm_pack_xtlv_entry((uint8 **) p_tlv, p_buf_space_left,
+ p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s: bcm_pack_xltv_entry() failed,"
+ " status=%d\n", __FUNCTION__, ret));
+ break;
+ }
+
+ }
+ }
+ return ret;
+}
+
+static int
+dhd_rtt_ftm_enable(dhd_pub_t *dhd, bool enable)
+{
+ ftm_subcmd_info_t subcmd_info;
+ subcmd_info.name = (enable)? "enable" : "disable";
+ subcmd_info.cmdid = (enable)? WL_PROXD_CMD_ENABLE: WL_PROXD_CMD_DISABLE;
+ subcmd_info.handler = NULL;
+ return dhd_rtt_common_set_handler(dhd, &subcmd_info,
+ WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL);
+}
+
+static int
+dhd_rtt_start_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, bool start)
+{
+ ftm_subcmd_info_t subcmd_info;
+ subcmd_info.name = (start)? "start session" : "stop session";
+ subcmd_info.cmdid = (start)? WL_PROXD_CMD_START_SESSION: WL_PROXD_CMD_STOP_SESSION;
+ subcmd_info.handler = NULL;
+ return dhd_rtt_common_set_handler(dhd, &subcmd_info,
+ WL_PROXD_METHOD_FTM, session_id);
+}
+
+static int
+dhd_rtt_delete_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id)
+{
+ ftm_subcmd_info_t subcmd_info;
+ subcmd_info.name = "delete session";
+ subcmd_info.cmdid = WL_PROXD_CMD_DELETE_SESSION;
+ subcmd_info.handler = NULL;
+ return dhd_rtt_common_set_handler(dhd, &subcmd_info,
+ WL_PROXD_METHOD_FTM, session_id);
+}
+#ifdef WL_NAN
+int
+dhd_rtt_delete_nan_session(dhd_pub_t *dhd)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_cfgnan_terminate_directed_rtt_sessions(dev, cfg);
+ return BCME_OK;
+}
+/* API to find out if the given Peer Mac from FTM events
+* is nan-peer. Based on this we will handle the SESSION_END
+* event. For nan-peer FTM_SESSION_END event is ignored and handled in
+* nan-ranging-cancel or nan-ranging-end event.
+*/
+static bool
+dhd_rtt_is_nan_peer(dhd_pub_t *dhd, struct ether_addr *peer_mac)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_ranging_inst_t *ranging_inst = NULL;
+ bool ret = FALSE;
+
+ if ((wl_cfgnan_is_enabled(cfg) == FALSE) || ETHER_ISNULLADDR(peer_mac)) {
+ goto exit;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_mac);
+ if (ranging_inst) {
+ DHD_RTT((" RTT peer is of type NAN\n"));
+ ret = TRUE;
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+bool
+dhd_rtt_nan_is_directed_setup_in_prog(dhd_pub_t *dhd)
+{
+ bool setup_in_prog;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ setup_in_prog = rtt_status->directed_cfg.directed_setup_status.directed_na_setup_inprog;
+
+ return setup_in_prog;
+}
+
+bool
+dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd_pub_t *dhd,
+ struct ether_addr *peer)
+{
+ bool setup_in_prog = TRUE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ if (!dhd_rtt_nan_is_directed_setup_in_prog(dhd)) {
+ setup_in_prog = FALSE;
+ goto exit;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+
+ if ((ranging_inst == NULL) ||
+ (ranging_inst != rtt_status->directed_cfg.directed_setup_status.rng_inst)) {
+ setup_in_prog = FALSE;
+ }
+
+exit:
+ return setup_in_prog;
+}
+
+void
+dhd_rtt_nan_update_directed_setup_inprog(dhd_pub_t *dhd,
+ nan_ranging_inst_t *rng_inst, bool inprog)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ rtt_status->directed_cfg.directed_setup_status.directed_na_setup_inprog = inprog;
+ if (inprog) {
+ ASSERT(rng_inst);
+ rtt_status->directed_cfg.directed_setup_status.rng_inst = rng_inst;
+ } else {
+ rtt_status->directed_cfg.directed_setup_status.rng_inst = NULL;
+ }
+}
+
+bool
+dhd_rtt_nan_directed_sessions_allowed(dhd_pub_t *dhd)
+{
+ int max_sessions = 0;
+ bool allowed = TRUE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ max_sessions = rtt_status->max_nan_rtt_sessions;
+
+ if (dhd_rtt_nan_is_directed_setup_in_prog(dhd)) {
+ max_sessions--;
+ }
+
+ if (rtt_status->directed_cfg.directed_sessions_cnt >= max_sessions) {
+ allowed = FALSE;
+ }
+
+ return allowed;
+}
+
+bool
+dhd_rtt_nan_all_directed_sessions_triggered(dhd_pub_t *dhd)
+{
+ bool done;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if ((rtt_status->cur_idx + 1) >= rtt_status->rtt_config.rtt_target_cnt) {
+ done = TRUE;
+ } else {
+ done = FALSE;
+ }
+
+ return done;
+}
+
+void
+dhd_rtt_nan_update_directed_sessions_cnt(dhd_pub_t *dhd, bool incr)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (incr) {
+ rtt_status->directed_cfg.directed_sessions_cnt++;
+ } else {
+ rtt_status->directed_cfg.directed_sessions_cnt--;
+ }
+}
+
+static void
+dhd_rtt_event_trigger_failure(dhd_pub_t *dhd, rtt_target_info_t *rtt_target)
+{
+ wl_event_msg_t msg;
+
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ wl_proxd_event_t p_event;
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ bzero(&p_event, sizeof(p_event));
+
+ msg.event_type = hton32(WLC_E_PROXD);
+ msg.bsscfgidx = 0;
+ msg.datalen = hton32(sizeof(p_event));
+ msg.addr = rtt_target->addr;
+
+ p_event.version = htol16(WL_PROXD_API_VERSION);
+ p_event.type = htol16(WL_PROXD_EVENT_SESSION_END);
+ p_event.len = htol16(OFFSETOF(wl_proxd_event_t, tlvs));
+
+ wl_cfg80211_event(dev, &msg, &p_event);
+}
+
+static int
+dhd_rtt_nan_start_session(dhd_pub_t *dhd, rtt_target_info_t *rtt_target)
+{
+ s32 err = BCME_OK;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_ranging_inst_t *ranging_inst = NULL;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ NAN_MUTEX_LOCK();
+
+ if (!rtt_status) {
+ err = BCME_NOTENABLED;
+ goto done;
+ }
+
+ if (!wl_cfgnan_is_enabled(cfg)) { /* If nan is not enabled report error */
+ err = BCME_NOTENABLED;
+ goto done;
+ }
+
+ /* Below Scenarios should be avoided by callers/schedulers */
+ if (dhd_rtt_nan_is_directed_setup_in_prog(dhd)) {
+ DHD_RTT_ERR(("dhd_rtt_nan_start_session failed, setup already in prog\n"));
+ err = BCME_ERROR;
+ ASSERT(0);
+ goto done;
+ }
+
+ if (!dhd_rtt_nan_directed_sessions_allowed(dhd)) {
+ DHD_RTT_ERR(("dhd_rtt_nan_start_session failed, already max sessions running\n"));
+ err = BCME_ERROR;
+ ASSERT(0);
+ goto done;
+ }
+
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &rtt_target->addr, NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ err = BCME_NORESOURCE;
+ goto done;
+ }
+
+ DHD_RTT(("Trigger nan based range request\n"));
+ err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD, TRUE);
+ if (unlikely(err)) {
+ goto done;
+ }
+ ranging_inst->range_type = RTT_TYPE_NAN_DIRECTED;
+ ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ dhd_rtt_nan_update_directed_setup_inprog(dhd, ranging_inst, TRUE);
+
+done:
+ if (err) {
+ DHD_RTT_ERR(("Failed to issue Nan Ranging Request err %d\n", err));
+ /* Fake session end event which will help in
+ * scheduling the new target in the deffered context instead of here
+ * i.e, avoid scheduling work from itself
+ */
+ dhd_rtt_event_trigger_failure(dhd, rtt_target);
+ }
+ NAN_MUTEX_UNLOCK();
+ return err;
+}
+#endif /* WL_NAN */
+
+static int
+dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id,
+ ftm_config_category_t catagory, void *ftm_configs, int ftm_cfg_cnt)
+{
+ ftm_subcmd_info_t subcmd_info;
+ wl_proxd_tlv_t *p_tlv;
+ /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */
+ wl_proxd_iov_t *p_proxd_iov;
+ uint16 proxd_iovsize = 0;
+ uint16 bufsize;
+ uint16 buf_space_left;
+ uint16 all_tlvsize;
+ int ret = BCME_OK;
+
+ subcmd_info.name = "config";
+ subcmd_info.cmdid = WL_PROXD_CMD_CONFIG;
+
+ p_proxd_iov = rtt_alloc_getset_buf(dhd, WL_PROXD_METHOD_FTM, session_id, subcmd_info.cmdid,
+ FTM_IOC_BUFSZ, &proxd_iovsize);
+
+ if (p_proxd_iov == NULL) {
+ DHD_RTT_ERR(("%s : failed to allocate the iovar (size :%d)\n",
+ __FUNCTION__, FTM_IOC_BUFSZ));
+ return BCME_NOMEM;
+ }
+ /* setup TLVs */
+ bufsize = proxd_iovsize - WL_PROXD_IOV_HDR_SIZE; /* adjust available size for TLVs */
+ p_tlv = &p_proxd_iov->tlvs[0];
+ /* TLV buffer starts with a full size, will decrement for each packed TLV */
+ buf_space_left = bufsize;
+ if (catagory == FTM_CONFIG_CAT_OPTIONS) {
+ ret = rtt_handle_config_options(session_id, &p_tlv, &buf_space_left,
+ (ftm_config_options_info_t *)ftm_configs, ftm_cfg_cnt);
+ } else if (catagory == FTM_CONFIG_CAT_GENERAL) {
+ ret = rtt_handle_config_general(session_id, &p_tlv, &buf_space_left,
+ (ftm_config_param_info_t *)ftm_configs, ftm_cfg_cnt);
+ }
+ if (ret == BCME_OK) {
+ /* update the iov header, set len to include all TLVs + header */
+ all_tlvsize = (bufsize - buf_space_left);
+ p_proxd_iov->len = htol16(all_tlvsize + WL_PROXD_IOV_HDR_SIZE);
+ ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov,
+ all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : failed to set config err %d\n", __FUNCTION__, ret));
+ }
+ }
+ /* clean up */
+ MFREE(dhd->osh, p_proxd_iov, proxd_iovsize);
+ return ret;
+}
+
+static int
+dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version)
+{
+ int ret;
+ ftm_subcmd_info_t subcmd_info;
+ subcmd_info.name = "ver";
+ subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION;
+ subcmd_info.handler = NULL;
+ ret = dhd_rtt_common_get_handler(dhd, &subcmd_info,
+ WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL);
+ *out_version = (ret == BCME_OK) ? subcmd_info.version : 0;
+ return ret;
+}
+#endif /* WL_CFG80211 */
+
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info channel)
+{
+ int bw;
+ chanspec_t chanspec = 0;
+ uint8 center_chan;
+ uint8 primary_chan;
+ /* set witdh to 20MHZ for 2.4G HZ */
+ if (channel.center_freq >= 2400 && channel.center_freq <= 2500) {
+ channel.width = WIFI_CHAN_WIDTH_20;
+ }
+ switch (channel.width) {
+ case WIFI_CHAN_WIDTH_20:
+ bw = WL_CHANSPEC_BW_20;
+ primary_chan = wf_mhz2channel(channel.center_freq, 0);
+ chanspec = wf_channel2chspec(primary_chan, bw);
+ break;
+ case WIFI_CHAN_WIDTH_40:
+ bw = WL_CHANSPEC_BW_40;
+ primary_chan = wf_mhz2channel(channel.center_freq, 0);
+ chanspec = wf_channel2chspec(primary_chan, bw);
+ break;
+ case WIFI_CHAN_WIDTH_80:
+ bw = WL_CHANSPEC_BW_80;
+ primary_chan = wf_mhz2channel(channel.center_freq, 0);
+ center_chan = wf_mhz2channel(channel.center_freq0, 0);
+ chanspec = wf_chspec_80(center_chan, primary_chan);
+ break;
+ default:
+ DHD_RTT_ERR(("doesn't support this bandwith : %d", channel.width));
+ bw = -1;
+ break;
+ }
+ return chanspec;
+}
+
+int
+dhd_rtt_idx_to_burst_duration(uint idx)
+{
+ if (idx >= ARRAY_SIZE(burst_duration_idx)) {
+ return -1;
+ }
+ return burst_duration_idx[idx];
+}
+
+int8
+dhd_rtt_get_cur_target_idx(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ return rtt_status->cur_idx;
+}
+
+int8
+dhd_rtt_set_next_target_idx(dhd_pub_t *dhd, int start_idx)
+{
+ int idx;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ for (idx = start_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ /* skip the disabled device */
+ if (rtt_status->rtt_config.target_info[idx].disable) {
+ continue;
+ } else {
+ /* set the idx to cur_idx */
+ rtt_status->cur_idx = idx;
+ break;
+ }
+ }
+
+ if (idx == rtt_status->rtt_config.rtt_target_cnt) {
+ /* All targets trigerred */
+ rtt_status->cur_idx = rtt_status->rtt_config.rtt_target_cnt;
+ }
+
+ return (int8)rtt_status->cur_idx;
+}
+
+void
+dhd_rtt_set_target_list_mode(dhd_pub_t *dhd)
+{
+ int8 idx;
+ bool legacy = FALSE, nan = FALSE;
+ rtt_target_info_t *rtt_target;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_INVALID;
+ for (idx = rtt_status->start_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ rtt_target = &rtt_status->rtt_config.target_info[idx];
+ /* skip the disabled device */
+ if (rtt_target->disable) {
+ continue;
+ } else {
+ if (rtt_target->peer == RTT_PEER_NAN) {
+ nan = TRUE;
+ } else {
+ legacy = TRUE;
+ }
+ }
+ }
+
+ if ((nan == TRUE) && (legacy == TRUE)) {
+ rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_MIX;
+ } else if (nan == TRUE) {
+ rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_NAN;
+ } else if (legacy == TRUE) {
+ rtt_status->rtt_config.target_list_mode = RNG_TARGET_LIST_MODE_LEGACY;
+ }
+}
+
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status = NULL;
+ struct net_device *dev = NULL;
+
+ NULL_CHECK(params, "params is NULL", err);
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ dev = dhd_linux_get_primary_netdev(dhd);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ NULL_CHECK(dev, "dev is NULL", err);
+
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+
+ if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) {
+ DHD_RTT_ERR(("doesn't support RTT \n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ /* check if work is being scheduled ...cancel/sync if so
+ * Host REQ has more priority , so we to have cancel any
+ * geofence sessions in progress...for that we need to make sure
+ * work queue is IDLE & then cancel the geofence sessions
+ */
+ cancel_work_sync(&rtt_status->work);
+
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if (rtt_status->status != RTT_STOPPED) {
+ DHD_RTT_ERR(("rtt is already started, status : %d\n", rtt_status->status));
+ err = BCME_BUSY;
+ goto exit;
+ }
+ if (params->rtt_target_cnt > 0) {
+#ifdef WL_NAN
+ /* cancel ongoing geofence RTT both initiators and responders */
+ wl_cfgnan_suspend_all_geofence_rng_sessions(dev,
+ RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG, 0);
+#endif /* WL_NAN */
+ } else {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt;
+ memcpy(rtt_status->rtt_config.target_info,
+ params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt));
+ rtt_status->status = RTT_STARTED;
+ DHD_RTT_MEM(("dhd_rtt_set_cfg: RTT Started, target_cnt = %d\n", params->rtt_target_cnt));
+
+ /* This is the starting Directed RTT index */
+ rtt_status->start_idx = dhd_rtt_set_next_target_idx(dhd, 0);
+
+ dhd_rtt_set_target_list_mode(dhd);
+
+ if (rtt_status->cur_idx < rtt_status->rtt_config.rtt_target_cnt) {
+#ifdef WL_NAN
+ if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_NAN) {
+ /* reset directed cfg params */
+ dhd_rtt_nan_update_directed_setup_inprog(dhd, NULL, FALSE);
+ rtt_status->directed_cfg.directed_sessions_cnt = 0;
+
+ /*
+ * schedule proxd timeout
+ * Proxd timeout for NAN target list is scheduled as a whole,
+ * and not per target, unlike for legacy target list
+ */
+ schedule_delayed_work(&rtt_status->proxd_timeout,
+ msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
+ }
+#endif /* WL_NAN */
+ /* schedule RTT */
+ dhd_rtt_schedule_rtt_work_thread(dhd, RTT_SCHED_HOST_TRIGGER);
+ }
+exit:
+ mutex_unlock(&rtt_status->rtt_mutex);
+ return err;
+}
+
+#ifdef WL_NAN
+void
+dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+ memset_s(&rtt_status->geofence_cfg, sizeof(rtt_status->geofence_cfg),
+ 0, sizeof(rtt_status->geofence_cfg));
+
+ /* initialize non zero params of geofence cfg */
+ rtt_status->geofence_cfg.cur_target_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ rtt_status->geofence_cfg.geofence_rtt_interval = DHD_RTT_RETRY_TIMER_INTERVAL_MS;
+ rtt_status->geofence_cfg.geofence_sessions_cnt = 0;
+
+ rtt_status->geofence_cfg.max_geofence_sessions =
+ dhd_rtt_get_max_nan_rtt_sessions_supported(dhd);
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return;
+}
+
+#ifdef RTT_GEOFENCE_CONT
+void
+dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ *geofence_cont = rtt_status->geofence_cfg.geofence_cont;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+
+void
+dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.geofence_cont = geofence_cont;
+ DHD_RTT(("dhd_rtt_set_geofence_cont_override, geofence_cont = %d\n",
+ rtt_status->geofence_cfg.geofence_cont));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+#endif /* RTT_GEOFENCE_CONT */
+
+#ifdef RTT_GEOFENCE_INTERVAL
+void
+dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.geofence_rtt_interval = interval;
+ DHD_RTT(("dhd_rtt_set_geofence_rtt_interval: geofence interval = %d\n",
+ rtt_status->geofence_cfg.geofence_rtt_interval));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+#endif /* RTT_GEOFENCE_INTERVAL */
+
+int
+dhd_rtt_get_geofence_max_sessions(dhd_pub_t *dhd)
+{
+ int max_sessions;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg;
+
+ max_sessions = geofence_cfg->max_geofence_sessions;
+ if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
+ /* One slot busy with setup in prog */
+ max_sessions -= 1;
+ }
+
+ return max_sessions;
+}
+
+/*
+ * Return True, if geofence
+ * session count maxed out
+ */
+bool
+dhd_rtt_geofence_sessions_maxed_out(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg;
+ bool ret = TRUE;
+
+ if (geofence_cfg->geofence_sessions_cnt <
+ dhd_rtt_get_geofence_max_sessions(dhd)) {
+ ret = FALSE;
+ }
+
+ return ret;
+}
+
+int
+dhd_rtt_get_geofence_sessions_cnt(dhd_pub_t *dhd)
+{
+
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg;
+
+ return geofence_cfg->geofence_sessions_cnt;
+}
+
+int
+dhd_rtt_update_geofence_sessions_cnt(dhd_pub_t *dhd, bool incr,
+ struct ether_addr *peer_addr)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg;
+ int ret = BCME_OK;
+
+ if (incr) {
+ //ASSERT(!dhd_rtt_geofence_sessions_maxed_out(dhd));
+ if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ geofence_cfg->geofence_sessions_cnt++;
+ } else {
+ if (peer_addr && dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
+ peer_addr)) {
+ /* Set geofence RTT in progress state to false */
+ dhd_rtt_set_geofence_setup_status(dhd, FALSE, NULL);
+ } else {
+ //ASSERT(geofence_cfg->geofence_sessions_cnt > 0);
+ if (geofence_cfg->geofence_sessions_cnt <= 0) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* Decrement session count */
+ geofence_cfg->geofence_sessions_cnt--;
+ }
+ }
+ if (peer_addr) {
+ WL_INFORM_MEM(("session cnt update, upd = %d, cnt = %d, peer : "MACDBG", "
+ " ret = %d\n", incr, geofence_cfg->geofence_sessions_cnt,
+ MAC2STRDBG(peer_addr), ret));
+ } else {
+ WL_INFORM_MEM(("session cnt update, upd = %d, cnt = %d, ret = %d\n",
+ incr, geofence_cfg->geofence_sessions_cnt, ret));
+ }
+
+exit:
+ return ret;
+}
+
+int8
+dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return 0;
+ }
+ return rtt_status->geofence_cfg.geofence_target_cnt;
+}
+
+/* returns geofence RTT target list Head */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_target_info_t* head = NULL;
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ if (rtt_status->geofence_cfg.geofence_target_cnt) {
+ head = &rtt_status->geofence_cfg.geofence_target_info[0];
+ }
+
+ return head;
+}
+
+int8
+dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd)
+{
+ int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ goto exit;
+ }
+
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ if (target_cnt == 0) {
+ goto exit;
+ }
+
+ cur_idx = rtt_status->geofence_cfg.cur_target_idx;
+ if (cur_idx >= target_cnt) {
+ WL_INFORM_MEM(("dhd_rtt_get_geofence_cur_target_idx: "
+ "cur_index exceeded (>=) target_cnt, cur_idx = %d, "
+ "target_cnt = %d\n", cur_idx, target_cnt));
+ ASSERT(cur_idx < target_cnt);
+ cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ }
+
+exit:
+ return cur_idx;
+}
+
+void
+dhd_rtt_set_geofence_cur_target_idx(dhd_pub_t *dhd, int8 idx)
+{
+ int8 target_cnt = 0;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ ASSERT(idx < target_cnt);
+ rtt_status->geofence_cfg.cur_target_idx = idx;
+ return;
+}
+
+void
+dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return;
+ }
+
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 0) {
+ /* Invalidate current idx if no targets */
+ rtt_status->geofence_cfg.cur_target_idx =
+ DHD_RTT_INVALID_TARGET_INDEX;
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
+ return;
+ }
+ rtt_status->geofence_cfg.cur_target_idx++;
+
+ if (rtt_status->geofence_cfg.cur_target_idx >=
+ rtt_status->geofence_cfg.geofence_target_cnt) {
+ /* Reset once all targets done */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+}
+
+/* returns geofence current RTT target */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_target_info_t* cur_target = NULL;
+ int cur_idx = 0;
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
+ if (cur_idx >= 0) {
+ cur_target = &rtt_status->geofence_cfg.geofence_target_info[cur_idx];
+ }
+
+ return cur_target;
+}
+
+/* returns geofence target from list for the peer */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, int8 *index)
+{
+ int8 i;
+ rtt_status_info_t *rtt_status;
+ int target_cnt;
+ rtt_geofence_target_info_t *geofence_target_info, *tgt = NULL;
+
+ rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+
+ /* Loop through to find target */
+ for (i = 0; i < target_cnt; i++) {
+ if (geofence_target_info[i].valid == FALSE) {
+ break;
+ }
+ if (!memcmp(peer_addr, &geofence_target_info[i].peer_addr,
+ ETHER_ADDR_LEN)) {
+ *index = i;
+ tgt = &geofence_target_info[i];
+ }
+ }
+ if (!tgt) {
+ DHD_RTT(("dhd_rtt_get_geofence_target: Target not found in list,"
+ " MAC ADDR: "MACDBG" \n", MAC2STRDBG(peer_addr)));
+ }
+ return tgt;
+}
+
+/* add geofence target to the target list */
+int
+dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_geofence_target_info_t *geofence_target_info;
+ int8 geofence_target_cnt, index;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+
+ /* Get the geofence_target via peer addr, index param is dumm here */
+ geofence_target_info = dhd_rtt_get_geofence_target(dhd, &target->peer_addr, &index);
+ if (geofence_target_info) {
+ DHD_RTT(("Duplicate geofencing RTT add request dropped\n"));
+ err = BCME_OK;
+ goto exit;
+ }
+
+ geofence_target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ if (geofence_target_cnt >= RTT_MAX_GEOFENCE_TARGET_CNT) {
+ DHD_RTT(("Queue full, Geofencing RTT add request dropped\n"));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+
+ /* Add Geofence RTT request and increment target count */
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+ /* src and dest buffer len same, pointers of same DS statically allocated */
+ (void)memcpy_s(&geofence_target_info[geofence_target_cnt],
+ sizeof(geofence_target_info[geofence_target_cnt]), target,
+ sizeof(*target));
+ geofence_target_info[geofence_target_cnt].valid = TRUE;
+ rtt_status->geofence_cfg.geofence_target_cnt++;
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 1) {
+ /* Adding first target */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+
+ WL_INFORM_MEM(("dhd_rtt_add_geofence_target: " MACDBG
+ ", cur_idx = %d, total cnt = %d\n", MAC2STRDBG(&target->peer_addr),
+ rtt_status->geofence_cfg.cur_target_idx,
+ rtt_status->geofence_cfg.geofence_target_cnt));
+
+exit:
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+/* removes geofence target from the target list */
+int
+dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_geofence_target_info_t *geofence_target_info;
+ int8 geofence_target_cnt, j, index = 0;
+ struct net_device *dev;
+ struct bcm_cfg80211 *cfg;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ dev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wl_get_cfg(dev);
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+
+ geofence_target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
+ if (geofence_target_cnt == 0) {
+ DHD_RTT(("Queue Empty, Geofencing RTT remove request dropped\n"));
+ goto exit;
+ }
+
+ /* Get the geofence_target via peer addr */
+ geofence_target_info = dhd_rtt_get_geofence_target(dhd, peer_addr, &index);
+ if (geofence_target_info == NULL) {
+ DHD_RTT(("Geofencing RTT target not found, remove request dropped\n"));
+ err = BCME_NOTFOUND;
+ goto exit;
+ }
+
+ /* left shift all the valid entries, as we dont keep holes in list */
+ for (j = index; j < geofence_target_cnt; j++) {
+ /*
+ * src and dest buffer len same, pointers of same DS
+ * statically allocated
+ */
+ if ((j + 1) < geofence_target_cnt) {
+ (void)memcpy_s(&geofence_target_info[j], sizeof(geofence_target_info[j]),
+ &geofence_target_info[j + 1], sizeof(geofence_target_info[j + 1]));
+ } else {
+ /* reset the last target info */
+ bzero(&geofence_target_info[j], sizeof(rtt_geofence_target_info_t));
+ }
+ }
+
+ rtt_status->geofence_cfg.geofence_target_cnt--;
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 0) {
+ rtt_status->geofence_cfg.cur_target_idx =
+ DHD_RTT_INVALID_TARGET_INDEX;
+ } else {
+ if (rtt_status->geofence_cfg.geofence_target_cnt ==
+ rtt_status->geofence_cfg.cur_target_idx) {
+ /*
+ * Wrap to first (next here) target again,
+ * as the last target, got removed,
+ * which was the current target (idx) also
+ */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+ wl_cfgnan_update_geofence_target_idx(cfg);
+ }
+
+ WL_INFORM_MEM(("dhd_rtt_remove_geofence_target: " MACDBG
+ ", cur_idx = %d, target_cnt = %d\n", MAC2STRDBG(peer_addr),
+ rtt_status->geofence_cfg.cur_target_idx,
+ rtt_status->geofence_cfg.geofence_target_cnt));
+
+exit:
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+/* deletes/empty geofence target list */
+int
+dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status;
+
+ int err = BCME_OK;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ memset_s(&rtt_status->geofence_cfg, sizeof(rtt_geofence_cfg_t),
+ 0, sizeof(rtt_geofence_cfg_t));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+rtt_geofence_setup_status_t*
+dhd_rtt_get_geofence_setup_status(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg;
+ rtt_geofence_setup_status_t* rng_setup_status;
+
+ rng_setup_status = &geofence_cfg->geofence_setup_status;
+ return rng_setup_status;
+}
+
+bool
+dhd_rtt_is_geofence_setup_inprog(dhd_pub_t *dhd)
+{
+ rtt_geofence_setup_status_t* rng_setup_status;
+
+ rng_setup_status = dhd_rtt_get_geofence_setup_status(dhd);
+
+ return rng_setup_status->geofence_setup_inprog;
+}
+
+bool
+dhd_rtt_is_geofence_setup_inprog_with_peer(dhd_pub_t *dhd,
+ struct ether_addr *peer_addr)
+{
+ rtt_geofence_setup_status_t* rng_setup_status;
+ struct nan_ranging_inst *rng_inst = NULL;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bool ret = FALSE;
+
+ rng_setup_status = dhd_rtt_get_geofence_setup_status(dhd);
+ if (rng_setup_status->geofence_setup_inprog == FALSE) {
+ goto exit;
+ }
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (rng_inst && (rng_inst == rng_setup_status->rng_inst)) {
+ ret = TRUE;
+ }
+
+exit:
+ return ret;
+}
+
+/*
+ * Call with inprog as true and corresponding
+ * rng_inst, to take setup lock,
+ * call with inprog as False and rng_inst as NULL
+ * to unlock setup lock
+ */
+void
+dhd_rtt_set_geofence_setup_status(dhd_pub_t *dhd, bool inprog,
+ struct ether_addr *peer_addr)
+{
+ struct nan_ranging_inst *rng_inst = NULL;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_geofence_setup_status_t* rng_setup_status;
+
+ rng_setup_status = dhd_rtt_get_geofence_setup_status(dhd);
+ rng_setup_status->geofence_setup_inprog = inprog;
+ if (inprog) {
+ ASSERT(peer_addr);
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ ASSERT(rng_inst);
+ if (rng_inst) {
+ rng_setup_status->rng_inst = rng_inst;
+ }
+ } else {
+ rng_setup_status->rng_inst = NULL;
+ }
+}
+
+int
+dhd_rtt_sched_geofencing_target(dhd_pub_t *dhd)
+{
+ rtt_geofence_target_info_t *geofence_target_info;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ int ret = BCME_OK;
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_cfg_t* geofence_cfg = &rtt_status->geofence_cfg;
+ int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+
+ NAN_MUTEX_LOCK();
+
+ if (wl_cfgnan_is_enabled(cfg) == FALSE) {
+ ret = BCME_NOTENABLED;
+ goto done;
+ }
+
+ DHD_RTT_ERR(("dhd_rtt_sched_geofencing_target: "
+ " sched_reason = %d, sessions cnt = %d, cur target idx = %d\n",
+ rtt_status->rtt_sched_reason, geofence_cfg->geofence_sessions_cnt,
+ rtt_status->geofence_cfg.cur_target_idx));
+
+ //ASSERT(!dhd_rtt_geofence_sessions_maxed_out(dhd));
+ if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+
+ target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
+
+ if (target_cnt == 0) {
+ DHD_RTT_MEM(("dhd_rtt_sched_geofencing_target: "
+ "No geofence targets to schedule\n"));
+ goto done;
+ }
+
+ cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
+ if (cur_idx == DHD_RTT_INVALID_TARGET_INDEX) {
+ /*
+ * This Can be valid scenario, as cur_idx might
+ * get invalidated, after rtt thread sched and
+ * thread actually executing
+ */
+ DHD_RTT_MEM(("dhd_rtt_sched_geofencing_target: "
+ "cur idx is invalid, bail out\n"));
+ goto done;
+ }
+
+ /* Get current geofencing target */
+ geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
+ //ASSERT(geofence_target_info);
+
+ /* call cfg API for trigerring geofencing RTT */
+ if (geofence_target_info) {
+ /* check for dp/others concurrency */
+ rtt_invalid_reason = dhd_rtt_invalid_states(dev,
+ &geofence_target_info->peer_addr);
+ if ((rtt_invalid_reason != RTT_STATE_VALID) ||
+ wl_cfgnan_check_role_concurrency(cfg,
+ &geofence_target_info->peer_addr)) {
+ /* TODO: see if we can move to next target..
+ * i.e, if invalid state is due to DP with same peer
+ */
+ ret = BCME_BUSY;
+ DHD_RTT_ERR(("DRV State is not valid for RTT, "
+ "invalid_state = %d\n", rtt_invalid_reason));
+ goto done;
+ }
+
+ ret = wl_cfgnan_trigger_geofencing_ranging(dev,
+ &geofence_target_info->peer_addr);
+ if (ret == BCME_OK) {
+ dhd_rtt_set_geofence_setup_status(dhd, TRUE,
+ &geofence_target_info->peer_addr);
+ }
+ } else {
+ DHD_RTT(("No RTT target to schedule\n"));
+ ret = BCME_NOTFOUND;
+ }
+
+done:
+ NAN_MUTEX_UNLOCK();
+ return ret;
+}
+#endif /* WL_NAN */
+
+#ifdef WL_CFG80211
+#ifdef WL_NAN
+static void
+dhd_rtt_retry(dhd_pub_t *dhd)
+{
+
+ /* Attempt RTT for current geofence target */
+ wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd,
+ RTT_SCHED_RTT_RETRY_GEOFENCE);
+
+}
+static void
+dhd_rtt_retry_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status = NULL;
+ dhd_pub_t *dhd = NULL;
+ struct net_device *dev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+
+ if (!work) {
+ goto exit;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ rtt_status = container_of(work, rtt_status_info_t, rtt_retry_timer.work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+ dev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wl_get_cfg(dev);
+
+ NAN_MUTEX_LOCK();
+ (void) dhd_rtt_retry(dhd);
+ NAN_MUTEX_UNLOCK();
+
+exit:
+ return;
+}
+#endif /* WL_NAN */
+
+/*
+ * Return zero (0)
+ * for valid RTT state
+ * means if RTT is applicable
+ */
+uint8
+dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr)
+{
+ uint8 invalid_reason = RTT_STATE_VALID;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ UNUSED_PARAMETER(cfg);
+ UNUSED_PARAMETER(invalid_reason);
+
+ /* Make sure peer addr is not NULL in caller */
+ ASSERT(peer_addr);
+ /*
+ * Keep adding prohibited drv states here
+ * Only generic conditions which block
+ * All RTTs like NDP connection
+ */
+
+#ifdef WL_NAN
+ if (wl_cfgnan_data_dp_exists_with_peer(cfg, peer_addr)) {
+ invalid_reason = RTT_STATE_INV_REASON_NDP_EXIST;
+ DHD_RTT(("NDP in progress/connected, RTT prohibited\n"));
+ goto exit;
+ }
+#endif /* WL_NAN */
+
+ /* Remove below #defines once more exit calls come */
+#ifdef WL_NAN
+exit:
+#endif /* WL_NAN */
+ return invalid_reason;
+}
+#endif /* WL_CFG80211 */
+
+void
+dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status == NULL) {
+ ASSERT(0);
+ } else {
+ rtt_status->rtt_sched_reason = sched_reason;
+ rtt_status->rtt_sched = TRUE;
+ schedule_work(&rtt_status->work);
+ }
+ return;
+}
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
+{
+ int err = BCME_OK;
+#ifdef WL_CFG80211
+ int i = 0, j = 0;
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *entry, *next;
+ rtt_result_t *rtt_result, *next2;
+ struct rtt_noti_callback *iter;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ if (rtt_status->status == RTT_STOPPED) {
+ DHD_RTT_ERR(("rtt is not started\n"));
+ return BCME_OK;
+ }
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ mutex_lock(&rtt_status->rtt_mutex);
+ for (i = 0; i < mac_cnt; i++) {
+ for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
+ if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
+ ETHER_ADDR_LEN)) {
+ rtt_status->rtt_config.target_info[j].disable = TRUE;
+ }
+ }
+ }
+ if (rtt_status->all_cancel) {
+ /* cancel all of request */
+ rtt_status->status = RTT_STOPPED;
+ DHD_RTT(("current RTT process is cancelled\n"));
+ /* remove the rtt results in cache */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ /* Iterate rtt_results_header list */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(entry, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&entry->list);
+ /* Iterate rtt_result list */
+ list_for_each_entry_safe(rtt_result, next2,
+ &entry->result_list, list) {
+ list_del(&rtt_result->list);
+ MFREE(dhd->osh, rtt_result,
+ sizeof(rtt_result_t));
+ }
+ MFREE(dhd->osh, entry, sizeof(rtt_results_header_t));
+ }
+ GCC_DIAGNOSTIC_POP();
+ }
+ /* send the rtt complete event to wake up the user process */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* reinitialize the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ rtt_status->rtt_config.rtt_target_cnt = 0;
+ memset(rtt_status->rtt_config.target_info, 0,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->cur_idx = 0;
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
+ dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+#ifdef WL_NAN
+ dhd_rtt_delete_nan_session(dhd);
+#endif /* WL_NAN */
+ dhd_rtt_ftm_enable(dhd, FALSE);
+ }
+ mutex_unlock(&rtt_status->rtt_mutex);
+#endif /* WL_CFG80211 */
+ return err;
+}
+
+#ifdef WL_CFG80211
+static void
+dhd_rtt_timeout(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status;
+#ifndef DHD_DUMP_ON_RTT_TIMEOUT
+ rtt_target_info_t *rtt_target = NULL;
+ rtt_target_info_t *rtt_target_info = NULL;
+#ifdef WL_NAN
+ int8 idx;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ int ret = BCME_OK;
+ uint32 status;
+ struct net_device *ndev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+#endif /* WL_NAN */
+#endif /* !DHD_DUMP_ON_RTT_TIMEOUT */
+
+ rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ DHD_RTT_ERR(("Proxd timer expired but no RTT status\n"));
+ goto exit;
+ }
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT_ERR(("Proxd timer expired but no RTT Request\n"));
+ goto exit;
+ }
+
+#ifdef DHD_DUMP_ON_RTT_TIMEOUT
+ /* Dump, and Panic depending on memdump.info */
+#ifdef BCMDONGLEHOST
+ if (dhd_query_bus_erros(dhd)) {
+ goto exit;
+ }
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* Behave based on user memdump info */
+ dhd->memdump_type = DUMP_TYPE_PROXD_TIMEOUT;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* BCMDONGLEHOST */
+#else /* DHD_DUMP_ON_RTT_TIMEOUT */
+#ifdef WL_NAN
+ if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_NAN) {
+ for (idx = rtt_status->start_idx;
+ idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ rtt_target = &rtt_status->rtt_config.target_info[idx];
+ if ((!rtt_target->disable) &&
+ (!dhd_rtt_get_report_header(rtt_status,
+ NULL, &rtt_target->addr))) {
+ if (wl_cfgnan_ranging_is_in_prog_for_peer(cfg, &rtt_target->addr)) {
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg,
+ &rtt_target->addr);
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg,
+ &ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("%s:nan range cancel failed ret = %d "
+ "status = %d\n", __FUNCTION__,
+ ret, status));
+ }
+ }
+ dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr);
+ }
+ }
+ dhd_rtt_handle_rtt_session_end(dhd);
+ /* reset directed cfg params */
+ rtt_status->directed_cfg.directed_setup_status.rng_inst = NULL;
+ rtt_status->directed_cfg.directed_setup_status.directed_na_setup_inprog = FALSE;
+ rtt_status->directed_cfg.directed_sessions_cnt = 0;
+ } else
+#endif /* WL_NAN */
+ {
+ /* Cancel RTT for target and proceed to next target */
+ rtt_target_info = rtt_status->rtt_config.target_info;
+ if ((!rtt_target_info) ||
+ (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt)) {
+ goto exit;
+ }
+ rtt_target = &rtt_target_info[rtt_status->cur_idx];
+ WL_ERR(("Proxd timer expired for Target: "MACDBG" \n",
+ MAC2STRDBG(&rtt_target->addr)));
+ /* For Legacy RTT */
+ dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+ dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr);
+ dhd_rtt_handle_rtt_session_end(dhd);
+ }
+#endif /* DHD_DUMP_ON_RTT_TIMEOUT */
+exit:
+ return;
+}
+
+static void
+dhd_rtt_timeout_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status = NULL;
+ dhd_pub_t *dhd = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ mutex_lock(&rtt_status->rtt_mutex);
+ (void) dhd_rtt_timeout(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+}
+
+static void
+dhd_rtt_set_ftm_config_ratespec(ftm_config_param_info_t *ftm_params,
+ int *ftm_param_cnt, rtt_target_info_t *rtt_target)
+{
+ bool use_default = FALSE;
+ int nss;
+ int mcs;
+ uint32 rspec = 0;
+
+ if (!(rtt_target->bw && rtt_target->preamble)) {
+ goto exit;
+ }
+ switch (rtt_target->preamble) {
+ case RTT_PREAMBLE_LEGACY:
+ rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
+ rspec |= WL_RATE_6M;
+ break;
+ case RTT_PREAMBLE_HT:
+ rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
+ mcs = 0; /* default MCS 0 */
+ rspec |= mcs;
+ break;
+ case RTT_PREAMBLE_VHT:
+ rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
+ mcs = 0; /* default MCS 0 */
+ nss = 1; /* default Nss = 1 */
+ rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+ break;
+ default:
+ DHD_RTT(("doesn't support this preamble : %d\n",
+ rtt_target->preamble));
+ use_default = TRUE;
+ break;
+ }
+ switch (rtt_target->bw) {
+ case RTT_BW_20:
+ rspec |= WL_RSPEC_BW_20MHZ;
+ break;
+ case RTT_BW_40:
+ rspec |= WL_RSPEC_BW_40MHZ;
+ break;
+ case RTT_BW_80:
+ rspec |= WL_RSPEC_BW_80MHZ;
+ break;
+ default:
+ DHD_RTT(("doesn't support this BW : %d\n",
+ rtt_target->bw));
+ use_default = TRUE;
+ break;
+ }
+ if (!use_default) {
+ ftm_params[*ftm_param_cnt].data32 = htol32(rspec);
+ ftm_params[*ftm_param_cnt].tlvid =
+ WL_PROXD_TLV_ID_RATESPEC;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t ratespec : %d\n", rspec));
+ }
+
+exit:
+ return;
+
+}
+
+static void
+dhd_rtt_set_ftm_config_param(ftm_config_param_info_t *ftm_params,
+ int *ftm_param_cnt, rtt_target_info_t *rtt_target, uint16 tlvid)
+{
+ char eabuf[ETHER_ADDR_STR_LEN];
+ char chanbuf[CHANSPEC_STR_LEN];
+
+ switch (tlvid) {
+ case WL_PROXD_TLV_ID_CUR_ETHER_ADDR:
+ /* local mac address */
+ if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) {
+ ftm_params[*ftm_param_cnt].mac_addr = rtt_target->local_addr;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ bcm_ether_ntoa(&rtt_target->local_addr, eabuf);
+ DHD_RTT((">\t local %s\n", eabuf));
+ }
+ break;
+ case WL_PROXD_TLV_ID_PEER_MAC:
+ /* target's mac address */
+ if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) {
+ ftm_params[*ftm_param_cnt].mac_addr = rtt_target->addr;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_PEER_MAC;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ bcm_ether_ntoa(&rtt_target->addr, eabuf);
+ DHD_RTT((">\t target %s\n", eabuf));
+ }
+ break;
+ case WL_PROXD_TLV_ID_CHANSPEC:
+ /* target's chanspec */
+ if (rtt_target->chanspec) {
+ ftm_params[*ftm_param_cnt].chanspec =
+ htol32((uint32)rtt_target->chanspec);
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_CHANSPEC;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ wf_chspec_ntoa(rtt_target->chanspec, chanbuf);
+ DHD_RTT((">\t chanspec : %s\n", chanbuf));
+ }
+ break;
+ case WL_PROXD_TLV_ID_NUM_BURST:
+ /* num-burst */
+ if (rtt_target->num_burst) {
+ ftm_params[*ftm_param_cnt].data16 = htol16(rtt_target->num_burst);
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_NUM_BURST;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst));
+ }
+ break;
+ case WL_PROXD_TLV_ID_BURST_NUM_FTM:
+ /* number of frame per burst */
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
+ if (CHSPEC_IS80(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
+ } else if (CHSPEC_IS40(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_40M;
+ } else if (CHSPEC_IS20(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_20M;
+ }
+ ftm_params[*ftm_param_cnt].data16 =
+ htol16(rtt_target->num_frames_per_burst);
+ ftm_params[*ftm_param_cnt].tlvid =
+ WL_PROXD_TLV_ID_BURST_NUM_FTM;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t number of frame per burst : %d\n",
+ rtt_target->num_frames_per_burst));
+ break;
+ case WL_PROXD_TLV_ID_FTM_RETRIES:
+ /* FTM retry count */
+ if (rtt_target->num_retries_per_ftm) {
+ ftm_params[*ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_FTM_RETRIES;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t retry count of FTM : %d\n",
+ rtt_target->num_retries_per_ftm));
+ }
+ break;
+ case WL_PROXD_TLV_ID_FTM_REQ_RETRIES:
+ /* FTM Request retry count */
+ if (rtt_target->num_retries_per_ftmr) {
+ ftm_params[*ftm_param_cnt].data8 = rtt_target->num_retries_per_ftmr;
+ ftm_params[*ftm_param_cnt].tlvid =
+ WL_PROXD_TLV_ID_FTM_REQ_RETRIES;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t retry count of FTM Req : %d\n",
+ rtt_target->num_retries_per_ftmr));
+ }
+ break;
+ case WL_PROXD_TLV_ID_BURST_PERIOD:
+ /* burst-period */
+ if (rtt_target->burst_period) {
+ ftm_params[*ftm_param_cnt].data_intvl.intvl =
+ htol32(rtt_target->burst_period); /* ms */
+ ftm_params[*ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period));
+ }
+ break;
+ case WL_PROXD_TLV_ID_BURST_DURATION:
+ /* burst-duration */
+ rtt_target->burst_duration = FTM_MAX_BURST_DUR_TMO_MS;
+ if (rtt_target->burst_duration) {
+ ftm_params[*ftm_param_cnt].data_intvl.intvl =
+ htol32(rtt_target->burst_duration); /* ms */
+ ftm_params[*ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_BURST_DURATION;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t burst duration : %d ms\n",
+ rtt_target->burst_duration));
+ }
+ break;
+ case WL_PROXD_TLV_ID_BURST_TIMEOUT:
+ /* burst-timeout */
+ rtt_target->burst_timeout = FTM_MAX_BURST_DUR_TMO_MS;
+ if (rtt_target->burst_timeout) {
+ ftm_params[*ftm_param_cnt].data_intvl.intvl =
+ htol32(rtt_target->burst_timeout); /* ms */
+ ftm_params[*ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_BURST_TIMEOUT;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ DHD_RTT((">\t burst timeout : %d ms\n",
+ rtt_target->burst_timeout));
+ }
+ break;
+ case WL_PROXD_TLV_ID_RATESPEC:
+ dhd_rtt_set_ftm_config_ratespec(ftm_params,
+ ftm_param_cnt, rtt_target);
+ break;
+ case WL_PROXD_TLV_ID_EVENT_MASK:
+ {
+ /* set burst end and session end in ev mask by def */
+ uint32 event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
+ (1 << WL_PROXD_EVENT_SESSION_END));
+ /* only burst end for directed nan-rtt target */
+ if (rtt_target && (rtt_target->peer == RTT_PEER_NAN)) {
+ event_mask = (1 << WL_PROXD_EVENT_BURST_END);
+ }
+ ftm_params[*ftm_param_cnt].event_mask = event_mask;
+ ftm_params[*ftm_param_cnt].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
+ *ftm_param_cnt = *ftm_param_cnt + 1;
+ }
+ break;
+ default:
+ DHD_RTT_ERR(("Invalid FTM Param Config, tlvid = %d\n", tlvid));
+ break;
+ }
+
+ return;
+}
+
+static int
+dhd_rtt_start(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ int err_at = 0;
+ int ftm_cfg_cnt = 0;
+ int ftm_param_cnt = 0;
+ ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
+ ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+ rtt_target_info_t *rtt_target;
+ rtt_status_info_t *rtt_status;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ int rtt_sched_type = RTT_TYPE_INVALID;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ DHD_RTT(("Enter %s\n", __FUNCTION__));
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT(("No Directed RTT target to process, check for geofence\n"));
+ goto geofence;
+ }
+
+ if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
+ err = BCME_RANGE;
+ err_at = 1;
+ DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT_ERR(("STA is set as Target/Responder \n"));
+ err = BCME_ERROR;
+ }
+ goto exit;
+ }
+
+ /* Get a target information */
+ rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+
+ if (ETHER_ISNULLADDR(rtt_target->addr.octet)) {
+ err = BCME_BADADDR;
+ err_at = 2;
+ DHD_RTT(("RTT Target addr is NULL\n"));
+ goto exit;
+ }
+
+ /* check for dp/others concurrency */
+ rtt_invalid_reason = dhd_rtt_invalid_states(dev, &rtt_target->addr);
+ if (rtt_invalid_reason != RTT_STATE_VALID) {
+ err = BCME_BUSY;
+ err_at = 3;
+ DHD_RTT(("DRV State is not valid for RTT\n"));
+ goto exit;
+ }
+
+ /* enable ftm */
+ err = dhd_rtt_ftm_enable(dhd, TRUE);
+ if (err) {
+ DHD_RTT_ERR(("failed to enable FTM (%d)\n", err));
+ err_at = 4;
+ goto exit;
+ }
+ rtt_status->status = RTT_ENABLED;
+
+#ifdef WL_NAN
+ if (rtt_target->peer == RTT_PEER_NAN) {
+ rtt_sched_type = RTT_TYPE_NAN_DIRECTED;
+ /* apply event mask */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_EVENT_MASK);
+ dhd_rtt_ftm_config(dhd, 0, FTM_CONFIG_CAT_GENERAL,
+ ftm_params, ftm_param_cnt);
+ /* Ignore return value..failure taken care inside the API */
+ dhd_rtt_nan_start_session(dhd, rtt_target);
+ goto exit;
+ }
+#endif /* WL_NAN */
+
+ /* delete session of index default sesession */
+ err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+ if (err < 0 && err != BCME_NOTFOUND) {
+ DHD_RTT_ERR(("failed to delete session of FTM (%d)\n", err));
+ err_at = 5;
+ goto exit;
+ }
+
+ memset(ftm_configs, 0, sizeof(ftm_configs));
+ memset(ftm_params, 0, sizeof(ftm_params));
+
+ /* configure the session 1 as initiator */
+ if (ftm_cfg_cnt < FTM_MAX_CONFIGS) {
+ ftm_configs[ftm_cfg_cnt].enable = TRUE;
+ ftm_configs[ftm_cfg_cnt++].flags =
+ WL_PROXD_SESSION_FLAG_INITIATOR | WL_PROXD_SESSION_FLAG_RANDMAC;
+ dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
+ ftm_configs, ftm_cfg_cnt);
+ } else {
+ DHD_RTT_ERR(("Max FTM Config Options exceeded\n"));
+ err = BCME_ERROR;
+ err_at = 6;
+ goto exit;
+ }
+
+ memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
+ /* Rand Mac for newer version in place of cur_eth */
+ if (dhd->wlc_ver_major < RTT_IOV_CUR_ETH_OBSOLETE) {
+ err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0,
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err) {
+ DHD_RTT_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
+ err_at = 7;
+ goto exit;
+ }
+ memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN);
+
+ /* local mac address */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_CUR_ETHER_ADDR);
+ }
+ /* target's mac address */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_PEER_MAC);
+
+ /* target's chanspec */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_CHANSPEC);
+
+ /* num-burst */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_NUM_BURST);
+
+ /* number of frame per burst */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_BURST_NUM_FTM);
+
+ /* FTM retry count */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_FTM_RETRIES);
+
+ /* FTM Request retry count */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_FTM_REQ_RETRIES);
+
+ /* burst-period */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_BURST_PERIOD);
+
+ /* Setting both duration and timeout to MAX duration
+ * to handle the congestion environments.
+ * Hence ignoring the user config.
+ */
+ /* burst-duration */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_BURST_DURATION);
+
+ /* burst-timeout */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_BURST_TIMEOUT);
+
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_RATESPEC);
+
+ /* event_mask..applicable for only Legacy RTT.
+ * For nan-rtt config happens from firmware
+ */
+ DHD_RTT_CHK_SET_PARAM(ftm_params, ftm_param_cnt,
+ rtt_target, WL_PROXD_TLV_ID_EVENT_MASK);
+
+#if !defined(WL_USE_RANDOMIZED_SCAN)
+ /* legacy rtt randmac */
+ dhd_set_rand_mac_oui(dhd);
+#endif /* !defined(WL_USE_RANDOMIZED_SCAN */
+ dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL,
+ ftm_params, ftm_param_cnt);
+
+ rtt_sched_type = RTT_TYPE_LEGACY;
+ err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE);
+ if (err) {
+ DHD_RTT_ERR(("failed to start session of FTM : error %d\n", err));
+ err_at = 8;
+ } else {
+ /* schedule proxd timeout */
+ schedule_delayed_work(&rtt_status->proxd_timeout,
+ msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
+
+ }
+
+ goto exit;
+geofence:
+#ifdef WL_NAN
+ /* sched geofencing rtt */
+ rtt_sched_type = RTT_TYPE_NAN_GEOFENCE;
+ if ((err = dhd_rtt_sched_geofencing_target(dhd)) != BCME_OK) {
+ DHD_RTT_ERR(("geofencing sched failed, err = %d\n", err));
+ err_at = 9;
+ }
+#endif /* WL_NAN */
+
+exit:
+ if (err) {
+ /* RTT Failed */
+ DHD_RTT_ERR(("dhd_rtt_start: Failed & RTT_STOPPED, err = %d,"
+ " err_at = %d, rtt_sched_type = %d, rtt_invalid_reason = %d\n"
+ " sched_reason = %d",
+ err, err_at, rtt_sched_type, rtt_invalid_reason,
+ rtt_status->rtt_sched_reason));
+ rtt_status->status = RTT_STOPPED;
+ /* disable FTM */
+ dhd_rtt_ftm_enable(dhd, FALSE);
+ }
+ rtt_status->rtt_sched = FALSE;
+ return err;
+}
+#endif /* WL_CFG80211 */
+
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+ int err = BCME_OK;
+ struct rtt_noti_callback *cb = NULL, *iter;
+ rtt_status_info_t *rtt_status;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ spin_lock_bh(&noti_list_lock);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->noti_fn == noti_fn) {
+ goto exit;
+ }
+ }
+ cb = (struct rtt_noti_callback *)MALLOCZ(dhd->osh, sizeof(struct rtt_noti_callback));
+ if (!cb) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ cb->noti_fn = noti_fn;
+ cb->ctx = ctx;
+ list_add(&cb->list, &rtt_status->noti_fn_list);
+exit:
+ spin_unlock_bh(&noti_list_lock);
+ return err;
+}
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn)
+{
+ int err = BCME_OK;
+ struct rtt_noti_callback *cb = NULL, *iter;
+ rtt_status_info_t *rtt_status;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ spin_lock_bh(&noti_list_lock);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->noti_fn == noti_fn) {
+ cb = iter;
+ list_del(&cb->list);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&noti_list_lock);
+ if (cb) {
+ MFREE(dhd->osh, cb, sizeof(struct rtt_noti_callback));
+ }
+ return err;
+}
+
+static wifi_rate_v1
+dhd_rtt_convert_rate_to_host(uint32 rspec)
+{
+ wifi_rate_v1 host_rate;
+ uint32 bandwidth;
+ memset(&host_rate, 0, sizeof(wifi_rate_v1));
+ if (RSPEC_ISLEGACY(rspec)) {
+ host_rate.preamble = 0;
+ } else if (RSPEC_ISHT(rspec)) {
+ host_rate.preamble = 2;
+ host_rate.rateMcsIdx = rspec & WL_RSPEC_RATE_MASK;
+ } else if (RSPEC_ISVHT(rspec)) {
+ host_rate.preamble = 3;
+ host_rate.rateMcsIdx = rspec & WL_RSPEC_VHT_MCS_MASK;
+ host_rate.nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
+ }
+
+ bandwidth = RSPEC_BW(rspec);
+ switch (bandwidth) {
+ case WL_RSPEC_BW_20MHZ:
+ host_rate.bw = RTT_RATE_20M;
+ break;
+ case WL_RSPEC_BW_40MHZ:
+ host_rate.bw = RTT_RATE_40M;
+ break;
+ case WL_RSPEC_BW_80MHZ:
+ host_rate.bw = RTT_RATE_80M;
+ break;
+ case WL_RSPEC_BW_160MHZ:
+ host_rate.bw = RTT_RATE_160M;
+ break;
+ default:
+ host_rate.bw = RTT_RATE_20M;
+ break;
+ }
+
+ host_rate.bitrate = rate_rspec2rate(rspec) / 100; /* 100kbps */
+ DHD_RTT(("bit rate : %d\n", host_rate.bitrate));
+ return host_rate;
+}
+
+#define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"}
+static int
+dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
+ uint16 tlvid, uint16 len)
+{
+ int i;
+ int err = BCME_OK;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ wl_proxd_result_flags_t flags;
+ wl_proxd_session_state_t session_state;
+ wl_proxd_status_t proxd_status;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct osl_timespec ts;
+#endif /* LINUX_VER >= 2.6.39 */
+ uint32 ratespec;
+ uint32 avg_dist;
+ const wl_proxd_rtt_result_v1_t *p_data_info = NULL;
+ const wl_proxd_rtt_sample_v1_t *p_sample_avg = NULL;
+ const wl_proxd_rtt_sample_v1_t *p_sample = NULL;
+ wl_proxd_intvl_t rtt;
+ wl_proxd_intvl_t p_time;
+ uint16 num_rtt = 0, snr = 0, bitflips = 0;
+ wl_proxd_phy_error_t tof_phy_error = 0;
+ wl_proxd_phy_error_t tof_phy_tgt_error = 0;
+ wl_proxd_snr_t tof_target_snr = 0;
+ wl_proxd_bitflips_t tof_target_bitflips = 0;
+ int16 rssi = 0;
+ int32 dist = 0;
+ uint8 num_ftm = 0;
+ char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_report_t *rtt_report = &(rtt_result->report);
+
+ BCM_REFERENCE(ftm_frame_types);
+ BCM_REFERENCE(dist);
+ BCM_REFERENCE(rssi);
+ BCM_REFERENCE(tof_target_bitflips);
+ BCM_REFERENCE(tof_target_snr);
+ BCM_REFERENCE(tof_phy_tgt_error);
+ BCM_REFERENCE(tof_phy_error);
+ BCM_REFERENCE(bitflips);
+ BCM_REFERENCE(snr);
+ BCM_REFERENCE(session_state);
+ BCM_REFERENCE(ftm_session_state_value_to_logstr);
+
+ NULL_CHECK(rtt_report, "rtt_report is NULL", err);
+ NULL_CHECK(p_data, "p_data is NULL", err);
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ p_data_info = (const wl_proxd_rtt_result_v1_t *) p_data;
+ /* unpack and format 'flags' for display */
+ flags = ltoh16_ua(&p_data_info->flags);
+
+ /* session state and status */
+ session_state = ltoh16_ua(&p_data_info->state);
+ proxd_status = ltoh32_ua(&p_data_info->status);
+ bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
+ DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
+ eabuf,
+ session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status,
+ ftm_status_value_to_logstr(proxd_status)));
+
+ /* show avg_dist (1/256m units), burst_num */
+ avg_dist = ltoh32_ua(&p_data_info->avg_dist);
+ if (avg_dist == 0xffffffff) { /* report 'failure' case */
+ DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt)); /* in a session */
+ avg_dist = FTM_INVALID;
+ }
+ else {
+ DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n",
+ avg_dist >> 8, /* 1/256m units */
+ ((avg_dist & 0xff) * 625) >> 4,
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt,
+ p_data_info->num_ftm)); /* in a session */
+ }
+ /* show 'avg_rtt' sample */
+ p_sample_avg = &p_data_info->avg_rtt;
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu));
+ DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n",
+ (int16) ltoh16_ua(&p_sample_avg->rssi),
+ ltoh32_ua(&p_sample_avg->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)),
+ ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
+ ltoh32_ua(&p_sample_avg->ratespec)));
+
+ /* set peer address */
+ rtt_report->addr = p_data_info->peer;
+ /* burst num */
+ rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
+ /* success num */
+ rtt_report->success_num = p_data_info->num_valid_rtt;
+ /* actual number of FTM supported by peer */
+ rtt_report->num_per_burst_peer = p_data_info->num_ftm;
+ rtt_report->negotiated_burst_num = p_data_info->num_ftm;
+ /* status */
+ rtt_report->status = ftm_get_statusmap_info(proxd_status,
+ &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
+
+ /* rssi (0.5db) */
+ rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2;
+
+ /* rx rate */
+ ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec);
+ rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
+ /* tx rate */
+ if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
+ } else {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
+ }
+ /* rtt_sd */
+ rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu);
+ rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl);
+ rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
+ rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
+ DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
+ DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
+
+ /* average distance */
+ if (avg_dist != FTM_INVALID) {
+ rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
+ rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
+ } else {
+ rtt_report->distance = FTM_INVALID;
+ }
+ /* time stamp */
+ /* get the time elapsed from boot time */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ osl_get_monotonic_boottime(&ts);
+ rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
+#endif /* LINUX_VER >= 2.6.39 */
+
+ if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
+ /* retry time after failure */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
+ DHD_RTT((">\tretry_after: %d%s\n",
+ ltoh32_ua(&p_data_info->u.retry_after.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
+ } else {
+ /* burst duration */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
+ DHD_RTT((">\tburst_duration: %d%s\n",
+ ltoh32_ua(&p_data_info->u.burst_duration.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
+ DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
+ }
+
+ /* display detail if available */
+ num_rtt = ltoh16_ua(&p_data_info->num_rtt);
+ if (num_rtt > 0) {
+ DHD_RTT((">\tnum rtt: %d samples\n", num_rtt));
+ p_sample = &p_data_info->rtt[0];
+ for (i = 0; i < num_rtt; i++) {
+ snr = 0;
+ bitflips = 0;
+ tof_phy_error = 0;
+ tof_phy_tgt_error = 0;
+ tof_target_snr = 0;
+ tof_target_bitflips = 0;
+ rssi = 0;
+ dist = 0;
+ num_ftm = p_data_info->num_ftm;
+ /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */
+ if ((i % num_ftm) == 1) {
+ rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi);
+ snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr);
+ bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips);
+ tof_phy_error =
+ (wl_proxd_phy_error_t)
+ ltoh32_ua(&p_sample->tof_phy_error);
+ tof_phy_tgt_error =
+ (wl_proxd_phy_error_t)
+ ltoh32_ua(&p_sample->tof_tgt_phy_error);
+ tof_target_snr =
+ (wl_proxd_snr_t)
+ ltoh16_ua(&p_sample->tof_tgt_snr);
+ tof_target_bitflips =
+ (wl_proxd_bitflips_t)
+ ltoh16_ua(&p_sample->tof_tgt_bitflips);
+ dist = ltoh32_ua(&p_sample->distance);
+ } else {
+ rssi = -1;
+ snr = 0;
+ bitflips = 0;
+ dist = 0;
+ tof_target_bitflips = 0;
+ tof_target_snr = 0;
+ tof_phy_tgt_error = 0;
+ }
+ DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
+ " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
+ " target_bitflips=%d dist=%d rtt=%d%s status %s"
+ " Type %s coreid=%d\n",
+ i, p_sample->id, rssi, snr,
+ bitflips, tof_phy_error, tof_phy_tgt_error,
+ tof_target_snr,
+ tof_target_bitflips, dist,
+ ltoh32_ua(&p_sample->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
+ ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
+ ftm_frame_types[i % num_ftm], p_sample->coreid));
+ p_sample++;
+ }
+ }
+ return err;
+}
+
+static int
+dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
+ uint16 tlvid, uint16 len)
+{
+ int i;
+ int err = BCME_OK;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ wl_proxd_result_flags_t flags;
+ wl_proxd_session_state_t session_state;
+ wl_proxd_status_t proxd_status;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct osl_timespec ts;
+#endif /* LINUX_VER >= 2.6.39 */
+ uint32 ratespec;
+ uint32 avg_dist;
+ const wl_proxd_rtt_result_v2_t *p_data_info = NULL;
+ const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL;
+ const wl_proxd_rtt_sample_v2_t *p_sample = NULL;
+ uint16 num_rtt = 0;
+ wl_proxd_intvl_t rtt;
+ wl_proxd_intvl_t p_time;
+ uint16 snr = 0, bitflips = 0;
+ wl_proxd_phy_error_t tof_phy_error = 0;
+ wl_proxd_phy_error_t tof_phy_tgt_error = 0;
+ wl_proxd_snr_t tof_target_snr = 0;
+ wl_proxd_bitflips_t tof_target_bitflips = 0;
+ int16 rssi = 0;
+ int32 dist = 0;
+ uint32 chanspec = 0;
+ uint8 num_ftm = 0;
+ char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_report_t *rtt_report = &(rtt_result->report);
+
+ BCM_REFERENCE(ftm_frame_types);
+ BCM_REFERENCE(dist);
+ BCM_REFERENCE(rssi);
+ BCM_REFERENCE(tof_target_bitflips);
+ BCM_REFERENCE(tof_target_snr);
+ BCM_REFERENCE(tof_phy_tgt_error);
+ BCM_REFERENCE(tof_phy_error);
+ BCM_REFERENCE(bitflips);
+ BCM_REFERENCE(snr);
+ BCM_REFERENCE(chanspec);
+ BCM_REFERENCE(session_state);
+ BCM_REFERENCE(ftm_session_state_value_to_logstr);
+
+ NULL_CHECK(rtt_report, "rtt_report is NULL", err);
+ NULL_CHECK(p_data, "p_data is NULL", err);
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data;
+ /* unpack and format 'flags' for display */
+ flags = ltoh16_ua(&p_data_info->flags);
+ /* session state and status */
+ session_state = ltoh16_ua(&p_data_info->state);
+ proxd_status = ltoh32_ua(&p_data_info->status);
+ bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
+
+ if ((proxd_status != BCME_OK) || (p_data_info->num_meas == 0)) {
+ DHD_RTT_ERR((">\tTarget(%s) session state=%d(%s), status=%d(%s) "
+ "num_meas_ota %d num_valid_rtt %d result_flags %x\n",
+ eabuf, session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status, ftm_status_value_to_logstr(proxd_status),
+ p_data_info->num_meas, p_data_info->num_valid_rtt,
+ p_data_info->flags));
+ } else {
+ DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
+ eabuf, session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status, ftm_status_value_to_logstr(proxd_status)));
+ }
+ /* show avg_dist (1/256m units), burst_num */
+ avg_dist = ltoh32_ua(&p_data_info->avg_dist);
+ if (avg_dist == 0xffffffff) { /* report 'failure' case */
+ DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt)); /* in a session */
+ avg_dist = FTM_INVALID;
+ } else {
+ DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d "
+ "num_meas_ota=%d, result_flags=%x\n", avg_dist >> 8, /* 1/256m units */
+ ((avg_dist & 0xff) * 625) >> 4,
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt,
+ p_data_info->num_ftm, p_data_info->num_meas,
+ p_data_info->flags)); /* in a session */
+ }
+ rtt_result->rtt_detail.num_ota_meas = p_data_info->num_meas;
+ rtt_result->rtt_detail.result_flags = p_data_info->flags;
+ /* show 'avg_rtt' sample */
+ /* in v2, avg_rtt is the first element of the variable rtt[] */
+ p_sample_avg = &p_data_info->rtt[0];
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu));
+ DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d"
+ "ratespec=0x%08x chanspec=0x%08x\n",
+ (int16) ltoh16_ua(&p_sample_avg->rssi),
+ ltoh32_ua(&p_sample_avg->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)),
+ ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
+ ltoh32_ua(&p_sample_avg->ratespec),
+ ltoh32_ua(&p_sample_avg->chanspec)));
+
+ /* set peer address */
+ rtt_report->addr = p_data_info->peer;
+
+ /* burst num */
+ rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
+
+ /* success num */
+ rtt_report->success_num = p_data_info->num_valid_rtt;
+
+ /* num-ftm configured */
+ rtt_report->ftm_num = p_data_info->num_ftm;
+
+ /* actual number of FTM supported by peer */
+ rtt_report->num_per_burst_peer = p_data_info->num_ftm;
+ rtt_report->negotiated_burst_num = p_data_info->num_ftm;
+
+ /* status */
+ rtt_report->status = ftm_get_statusmap_info(proxd_status,
+ &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
+
+ /* Framework expects status as SUCCESS else all results will be
+ * set to zero even if we have partial valid result.
+ * So setting status as SUCCESS if we have a valid_rtt
+ * On burst timeout we stop burst with "timeout" reason and
+ * on msch end we set status as "cancel"
+ */
+ if ((proxd_status == WL_PROXD_E_TIMEOUT ||
+ proxd_status == WL_PROXD_E_CANCELED) &&
+ rtt_report->success_num) {
+ rtt_report->status = RTT_STATUS_SUCCESS;
+ }
+
+ /* rssi (0.5db) */
+ rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2;
+
+ /* rx rate */
+ ratespec = ltoh32_ua(&p_sample_avg->ratespec);
+ rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
+
+ /* tx rate */
+ if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
+ } else {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
+ }
+
+ /* rtt_sd */
+ rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu);
+ rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl);
+ rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
+ rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
+ DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
+ DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
+
+ /* average distance */
+ if (avg_dist != FTM_INVALID) {
+ rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
+ rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
+ /* rtt_sd is in 0.1 ns.
+ * host needs distance_sd in milli mtrs
+ * (0.1 * rtt_sd/2 * 10^-9) * C * 1000
+ */
+ rtt_report->distance_sd = rtt_report->rtt_sd * 15; /* mm */
+ } else {
+ rtt_report->distance = FTM_INVALID;
+ }
+ /* time stamp */
+ /* get the time elapsed from boot time */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ osl_get_monotonic_boottime(&ts);
+ rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
+#endif /* LINUX_VER >= 2.6.39 */
+
+ if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
+ /* retry time after failure */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
+ DHD_RTT((">\tretry_after: %d%s\n",
+ ltoh32_ua(&p_data_info->u.retry_after.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
+ } else {
+ /* burst duration */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
+ DHD_RTT((">\tburst_duration: %d%s\n",
+ ltoh32_ua(&p_data_info->u.burst_duration.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
+ DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
+ }
+ /* display detail if available */
+ num_rtt = ltoh16_ua(&p_data_info->num_rtt);
+ if (num_rtt > 0) {
+ DHD_RTT((">\tnum rtt: %d samples\n", num_rtt));
+ p_sample = &p_data_info->rtt[1];
+ for (i = 0; i < num_rtt; i++) {
+ snr = 0;
+ bitflips = 0;
+ tof_phy_error = 0;
+ tof_phy_tgt_error = 0;
+ tof_target_snr = 0;
+ tof_target_bitflips = 0;
+ rssi = 0;
+ dist = 0;
+ num_ftm = p_data_info->num_ftm;
+ /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */
+ if ((i % num_ftm) == 1) {
+ rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi);
+ snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr);
+ bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips);
+ tof_phy_error =
+ (wl_proxd_phy_error_t)
+ ltoh32_ua(&p_sample->tof_phy_error);
+ tof_phy_tgt_error =
+ (wl_proxd_phy_error_t)
+ ltoh32_ua(&p_sample->tof_tgt_phy_error);
+ tof_target_snr =
+ (wl_proxd_snr_t)
+ ltoh16_ua(&p_sample->tof_tgt_snr);
+ tof_target_bitflips =
+ (wl_proxd_bitflips_t)
+ ltoh16_ua(&p_sample->tof_tgt_bitflips);
+ dist = ltoh32_ua(&p_sample->distance);
+ chanspec = ltoh32_ua(&p_sample->chanspec);
+ } else {
+ rssi = -1;
+ snr = 0;
+ bitflips = 0;
+ dist = 0;
+ tof_target_bitflips = 0;
+ tof_target_snr = 0;
+ tof_phy_tgt_error = 0;
+ }
+ DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
+ " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
+ " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s"
+ " coreid=%d chanspec=0x%08x\n",
+ i, p_sample->id, rssi, snr,
+ bitflips, tof_phy_error, tof_phy_tgt_error,
+ tof_target_snr,
+ tof_target_bitflips, dist,
+ ltoh32_ua(&p_sample->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
+ ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
+ ftm_frame_types[i % num_ftm], p_sample->coreid,
+ chanspec));
+ p_sample++;
+ }
+ }
+ return err;
+}
+#ifdef WL_CFG80211
+/* Common API for handling Session End.
+* This API will flush out the results for a peer MAC.
+*
+* @For legacy FTM session, this API will be called
+* when legacy FTM_SESSION_END event is received.
+* @For legacy Nan-RTT , this API will be called when
+* we are cancelling the nan-ranging session or on
+* nan-ranging-end event.
+*/
+
+static bool
+dhd_rtt_all_directed_targets_done(dhd_pub_t *dhd)
+{
+ int8 idx;
+ bool done = TRUE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ for (idx = rtt_status->start_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ if (!rtt_status->rtt_config.target_info[idx].disable) {
+ if (!dhd_rtt_get_report_header(rtt_status,
+ NULL, &rtt_status->rtt_config.target_info[idx].addr)) {
+ done = FALSE;
+ break;
+ }
+ }
+ }
+ return done;
+}
+
+static void
+dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd)
+{
+
+ struct rtt_noti_callback *iter;
+ rtt_results_header_t *entry, *next;
+ rtt_result_t *next2;
+ rtt_result_t *rtt_result;
+ bool all_targets_done = FALSE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+#ifdef WL_NAN
+ struct net_device *ndev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+#endif /* WL_NAN */
+
+ /* check if all targets results received */
+ all_targets_done = dhd_rtt_all_directed_targets_done(dhd);
+ if (all_targets_done) {
+ DHD_RTT_MEM(("RTT_STOPPED\n"));
+ rtt_status->status = RTT_STOPPED;
+ /* notify the completed information to others */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* remove the rtt results in cache */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ /* Iterate rtt_results_header list */
+ list_for_each_entry_safe(entry, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&entry->list);
+ /* Iterate rtt_result list */
+ list_for_each_entry_safe(rtt_result, next2,
+ &entry->result_list, list) {
+ list_del(&rtt_result->list);
+ MFREE(dhd->osh, rtt_result,
+ sizeof(rtt_result_t));
+ }
+ MFREE(dhd->osh, entry, sizeof(rtt_results_header_t));
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+ /* reinitialize the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ rtt_status->rtt_config.rtt_target_cnt = 0;
+ memset_s(rtt_status->rtt_config.target_info, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT),
+ 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->cur_idx = 0;
+
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
+#ifdef WL_NAN
+ /* Reset for Geofence */
+ wl_cfgnan_reset_geofence_ranging(cfg, NULL,
+ RTT_SCHED_RNG_RPT_DIRECTED, FALSE);
+#endif /* WL_NAN */
+ } else {
+ /* Targets still pending */
+ if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_LEGACY) {
+ /* Pure legacy target list */
+
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
+
+ dhd_rtt_set_next_target_idx(dhd, (rtt_status->cur_idx + 1));
+ if (rtt_status->cur_idx < rtt_status->rtt_config.rtt_target_cnt) {
+ /* restart to measure RTT from next device */
+ DHD_INFO(("restart to measure rtt\n"));
+ rtt_status->rtt_sched = TRUE;
+ schedule_work(&rtt_status->work);
+ }
+ }
+#ifdef WL_NAN
+ else if (rtt_status->rtt_config.target_list_mode == RNG_TARGET_LIST_MODE_NAN) {
+ /* Pure NAN target list */
+ dhd_rtt_trigger_pending_targets_on_session_end(dhd);
+ }
+#endif /* WL_NAN */
+ }
+}
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+static int
+dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
+ struct ether_addr *addr)
+{
+ rtt_results_header_t *rtt_results_header = NULL;
+ rtt_target_info_t *rtt_target_info;
+ int ret = BCME_OK;
+ rtt_result_t *rtt_result;
+
+ /* allocate new header for rtt_results */
+ rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_results_header_t));
+ if (!rtt_results_header) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ /* same src and dest len */
+ (void)memcpy_s(&rtt_results_header->peer_mac,
+ ETHER_ADDR_LEN, addr, ETHER_ADDR_LEN);
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+
+ /* allocate rtt_results for new results */
+ rtt_result = (rtt_result_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ /* Free rtt result header */
+ MFREE(rtt_status->dhd->osh, rtt_results_header, sizeof(rtt_results_header_t));
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP;
+ /* same src and dest len */
+ (void)memcpy_s(&rtt_result->report.addr, ETHER_ADDR_LEN,
+ &rtt_target_info->addr, ETHER_ADDR_LEN);
+ rtt_result->report.distance = FTM_INVALID;
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len;
+exit:
+ return ret;
+}
+
+static bool
+dhd_rtt_get_report_header(rtt_status_info_t *rtt_status,
+ rtt_results_header_t **rtt_results_header, struct ether_addr *addr)
+{
+ rtt_results_header_t *entry;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ /* find a rtt_report_header for this mac address */
+ list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(&entry->peer_mac, addr, ETHER_ADDR_LEN)) {
+ /* found a rtt_report_header for peer_mac in the list */
+ if (rtt_results_header) {
+ *rtt_results_header = entry;
+ }
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+#ifdef WL_NAN
+int
+dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, struct ether_addr *peer)
+{
+ bool is_new = TRUE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ mutex_lock(&rtt_status->rtt_mutex);
+ is_new = !dhd_rtt_get_report_header(rtt_status, NULL, peer);
+
+ if (is_new) { /* no FTM result..create failure result */
+ dhd_rtt_create_failure_result(rtt_status, peer);
+ }
+ DHD_RTT_MEM(("RTT Session End for NAN peer "MACDBG"\n", MAC2STRDBG(peer)));
+ dhd_rtt_handle_rtt_session_end(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+ return BCME_OK;
+}
+
+static bool
+dhd_rtt_is_valid_measurement(rtt_result_t *rtt_result)
+{
+ bool ret = FALSE;
+
+ if (rtt_result && (rtt_result->report.success_num != 0)) {
+ ret = TRUE;
+ }
+ return ret;
+}
+
+static void
+dhd_rtt_trigger_pending_targets_on_session_end(dhd_pub_t *dhd)
+{
+ if (!(dhd_rtt_nan_is_directed_setup_in_prog(dhd)) &&
+ (!dhd_rtt_nan_all_directed_sessions_triggered(dhd)) &&
+ (!dhd_rtt_nan_directed_sessions_allowed(dhd))) {
+ /* Trigger next target from here */
+ dhd_rtt_set_next_target_idx(dhd,
+ (dhd_rtt_get_cur_target_idx(dhd) + 1));
+ dhd_rtt_schedule_rtt_work_thread(dhd, RTT_SCHED_RNG_DIR_EXCESS_TARGET);
+ }
+}
+#endif /* WL_NAN */
+#endif /* WL_CFG80211 */
+
+static int
+dhd_rtt_parse_result_event(wl_proxd_event_t *proxd_ev_data,
+ int tlvs_len, rtt_result_t *rtt_result)
+{
+ int ret = BCME_OK;
+
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf((void *) rtt_result,
+ (uint8 *)&proxd_ev_data->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->detail_len = sizeof(rtt_result->rtt_detail);
+
+exit:
+ return ret;
+
+}
+
+static int
+dhd_rtt_handle_directed_rtt_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
+ wl_proxd_event_t *proxd_ev_data, int tlvs_len, rtt_result_t *rtt_result, bool is_nan)
+{
+ int ret = BCME_OK;
+
+#ifdef WL_CFG80211
+ int err_at = 0;
+ rtt_status_info_t *rtt_status;
+ bool is_new = TRUE;
+ rtt_results_header_t *rtt_results_header = NULL;
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+ rtt_status = GET_RTTSTATE(dhd);
+ is_new = !dhd_rtt_get_report_header(rtt_status,
+ &rtt_results_header, peer_addr);
+
+ if (tlvs_len > 0) {
+ if (is_new) {
+ /* allocate new header for rtt_results */
+ rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_results_header_t));
+ if (!rtt_results_header) {
+ ret = BCME_NORESOURCE;
+ err_at = 1;
+ goto exit;
+ }
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ /* same src and header len */
+ (void)memcpy_s(&rtt_results_header->peer_mac, ETHER_ADDR_LEN,
+ peer_addr, ETHER_ADDR_LEN);
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+ }
+#endif /* WL_CFG80211 */
+
+ ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
+#ifdef WL_CFG80211
+ if (ret == BCME_OK) {
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len +
+ rtt_result->detail_len;
+ } else {
+ err_at = 2;
+ goto exit;
+ }
+ } else {
+ ret = BCME_ERROR;
+ err_at = 4;
+ goto exit;
+ }
+
+exit:
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("dhd_rtt_handle_directed_rtt_burst_end: failed, "
+ " ret = %d, err_at = %d\n", ret, err_at));
+ if (rtt_results_header) {
+ list_del(&rtt_results_header->list);
+ MFREE(dhd->osh, rtt_results_header,
+ sizeof(rtt_results_header_t));
+ }
+ }
+#endif /* WL_CFG80211 */
+ return ret;
+}
+
+#ifdef WL_NAN
+static void
+dhd_rtt_nan_range_report(struct bcm_cfg80211 *cfg,
+ rtt_result_t *rtt_result, bool is_geofence)
+{
+ wl_nan_ev_rng_rpt_ind_t range_res;
+ int rtt_status;
+
+ UNUSED_PARAMETER(range_res);
+
+ if (!rtt_result)
+ return;
+
+ rtt_status = rtt_result->report.status;
+ bzero(&range_res, sizeof(range_res));
+ range_res.dist_mm = rtt_result->report.distance;
+ /* same src and header len, ignoring ret val here */
+ (void)memcpy_s(&range_res.peer_m_addr, ETHER_ADDR_LEN,
+ &rtt_result->report.addr, ETHER_ADDR_LEN);
+ wl_cfgnan_process_range_report(cfg, &range_res, rtt_status);
+
+ return;
+}
+
+static int
+dhd_rtt_handle_nan_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
+ wl_proxd_event_t *proxd_ev_data, int tlvs_len)
+{
+ struct net_device *ndev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+ nan_ranging_inst_t *rng_inst = NULL;
+ rtt_status_info_t *rtt_status = NULL;
+ rtt_result_t *rtt_result = NULL;
+ bool geofence_rtt = FALSE;
+ int ret = BCME_OK;
+ rtt_result_t nan_rtt_res;
+ uint8 ftm_retry_cnt = 0;
+ int burst_status = -1;
+
+ ndev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+ NAN_MUTEX_LOCK();
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if ((wl_cfgnan_is_enabled(cfg) == FALSE) ||
+ ETHER_ISNULLADDR(peer_addr)) {
+ DHD_RTT_ERR(("Received Burst End with NULL ether addr, "
+ "or nan disable, nan_enable = %d\n", wl_cfgnan_is_enabled(cfg)));
+ ret = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (rng_inst) {
+ geofence_rtt = (rng_inst->range_type
+ == RTT_TYPE_NAN_GEOFENCE);
+ } else {
+ DHD_RTT_ERR(("Received Burst End without Ranging Instance\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ ret = BCME_OK;
+ goto exit;
+ }
+
+ bzero(&nan_rtt_res, sizeof(nan_rtt_res));
+ ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, &nan_rtt_res);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("Failed to parse RTT result %d\n", ret));
+ goto exit;
+ }
+
+ burst_status = nan_rtt_res.report.status;
+ if (nan_rtt_res.rtt_detail.num_ota_meas <= 1) {
+ /* Wait for some time(CRBs) for ftm protocol to go through */
+ if (rng_inst->ftm_ssn_retry_count < NAN_RTT_FTM_SSN_RETRIES) {
+ rng_inst->ftm_ssn_retry_count++;
+ ftm_retry_cnt = rng_inst->ftm_ssn_retry_count;
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ /* retries over...report the result as is to host */
+ }
+
+ BCM_REFERENCE(dhd_rtt_is_valid_measurement);
+
+ if (geofence_rtt) {
+ rtt_result = &nan_rtt_res;
+ } else {
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+ } else {
+ ret = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ }
+ /* allocate rtt_results for new results */
+ rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = BCME_NORESOURCE;
+ goto exit;
+ }
+ ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, peer_addr,
+ proxd_ev_data, tlvs_len, rtt_result, TRUE);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+
+ }
+
+exit:
+ mutex_unlock(&rtt_status->rtt_mutex);
+ if (ret == BCME_OK) {
+ /* Nothing to do for Responder */
+ if (rng_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
+ dhd_rtt_nan_range_report(cfg, rtt_result, geofence_rtt);
+ }
+ } else {
+ DHD_RTT_ERR(("nan-rtt: Burst End handling failed err %d is_geofence %d "
+ "retry cnt %d burst status %d", ret, geofence_rtt,
+ ftm_retry_cnt, burst_status));
+ if (rtt_result && !geofence_rtt) {
+ MFREE(dhd->osh, rtt_result,
+ sizeof(rtt_result_t));
+ }
+ }
+ NAN_MUTEX_UNLOCK();
+ return ret;
+}
+#endif /* WL_NAN */
+
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+ int ret = BCME_OK;
+ int tlvs_len;
+ uint16 version;
+ wl_proxd_event_t *p_event;
+ wl_proxd_event_type_t event_type;
+ wl_proxd_ftm_session_status_t session_status;
+ const ftm_strmap_entry_t *p_loginfo;
+ rtt_result_t *rtt_result;
+#ifdef WL_CFG80211
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *rtt_results_header = NULL;
+ bool is_new = TRUE;
+ rtt_target_info_t *target = NULL;
+#endif /* WL_CFG80211 */
+
+ DHD_RTT(("Enter %s \n", __FUNCTION__));
+ NULL_CHECK(dhd, "dhd is NULL", ret);
+
+ if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) {
+ DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__,
+ ntoh32_ua((void *)&event->datalen)));
+ return -EINVAL;
+ }
+ event_type = ntoh32_ua((void *)&event->event_type);
+ if (event_type != WLC_E_PROXD) {
+ DHD_RTT_ERR((" failed event \n"));
+ return -EINVAL;
+ }
+
+ if (!event_data) {
+ DHD_RTT_ERR(("%s: event_data:NULL\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ p_event = (wl_proxd_event_t *) event_data;
+ version = ltoh16(p_event->version);
+ if (version < WL_PROXD_API_VERSION) {
+ DHD_RTT_ERR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
+ version, WL_PROXD_API_VERSION));
+ return ret;
+ }
+
+ event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
+
+ DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n",
+ p_event->type, ntoh16(p_event->type), ltoh16(p_event->type)));
+ p_loginfo = ftm_get_event_type_loginfo(event_type);
+ if (p_loginfo == NULL) {
+ DHD_RTT_ERR(("receive an invalid FTM event %d\n", event_type));
+ ret = -EINVAL;
+ return ret; /* ignore this event */
+ }
+ /* get TLVs len, skip over event header */
+ if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) {
+ DHD_RTT_ERR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
+ ret = -EINVAL;
+ return ret;
+ }
+ tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs);
+ DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n",
+ p_loginfo->text,
+ version,
+ ltoh16(p_event->len),
+ ltoh16(p_event->method),
+ ltoh16(p_event->sid),
+ tlvs_len));
+#ifdef WL_CFG80211
+#ifdef WL_NAN
+ if ((event_type == WL_PROXD_EVENT_BURST_END) &&
+ dhd_rtt_is_nan_peer(dhd, &event->addr)) {
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END for NAN RTT\n"));
+ ret = dhd_rtt_handle_nan_burst_end(dhd, &event->addr, p_event, tlvs_len);
+ return ret;
+ }
+#endif /* WL_NAN */
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+ } else {
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ }
+
+ /* check current target_mac and event_mac are matching */
+ target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ if (memcmp(&target->addr, &event->addr, ETHER_ADDR_LEN)) {
+ DHD_RTT(("Ignore Proxd event for the unexpected peer "MACDBG
+ " expected peer "MACDBG"\n", MAC2STRDBG(&event->addr),
+ MAC2STRDBG(&target->addr)));
+ goto exit;
+ }
+
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ is_new = !dhd_rtt_get_report_header(rtt_status,
+ &rtt_results_header, &event->addr);
+ GCC_DIAGNOSTIC_POP();
+#endif /* WL_CFG80211 */
+ switch (event_type) {
+ case WL_PROXD_EVENT_SESSION_CREATE:
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_CREATE\n"));
+ break;
+ case WL_PROXD_EVENT_SESSION_START:
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_START\n"));
+ break;
+ case WL_PROXD_EVENT_BURST_START:
+ DHD_RTT(("WL_PROXD_EVENT_BURST_START\n"));
+ break;
+ case WL_PROXD_EVENT_BURST_END:
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END for Legacy RTT\n"));
+ /* allocate rtt_results for new legacy rtt results */
+ rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, &event->addr,
+ p_event, tlvs_len, rtt_result, FALSE);
+ if (rtt_result &&
+#ifdef WL_CFG80211
+ (ret != BCME_OK) &&
+#endif /* WL_CFG80211 */
+ TRUE) {
+ /*
+ * Free rtt_result irrespectively, for non-cfg,
+ * as it would not be needed any further
+ */
+ MFREE(dhd->osh, rtt_result,
+ sizeof(rtt_result_t));
+ goto exit;
+ }
+ break;
+ case WL_PROXD_EVENT_SESSION_END:
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
+#ifdef WL_CFG80211
+ if (!RTT_IS_ENABLED(rtt_status)) {
+ DHD_RTT(("Ignore the session end evt\n"));
+ goto exit;
+ }
+#endif /* WL_CFG80211 */
+ if (tlvs_len > 0) {
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf((void *) &session_status,
+ (uint8 *)&p_event->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ }
+#ifdef WL_CFG80211
+ /* In case of no result for the peer device, make fake result for error case */
+ if (is_new) {
+ dhd_rtt_create_failure_result(rtt_status, &event->addr);
+ }
+ DHD_RTT_MEM(("RTT Session End for Legacy peer "MACDBG"\n",
+ MAC2STRDBG(&event->addr)));
+ dhd_rtt_handle_rtt_session_end(dhd);
+#endif /* WL_CFG80211 */
+ break;
+ case WL_PROXD_EVENT_SESSION_RESTART:
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_RESTART\n"));
+ break;
+ case WL_PROXD_EVENT_BURST_RESCHED:
+ DHD_RTT(("WL_PROXD_EVENT_BURST_RESCHED\n"));
+ break;
+ case WL_PROXD_EVENT_SESSION_DESTROY:
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_DESTROY\n"));
+ break;
+ case WL_PROXD_EVENT_FTM_FRAME:
+ DHD_RTT(("WL_PROXD_EVENT_FTM_FRAME\n"));
+ break;
+ case WL_PROXD_EVENT_DELAY:
+ DHD_RTT(("WL_PROXD_EVENT_DELAY\n"));
+ break;
+ case WL_PROXD_EVENT_VS_INITIATOR_RPT:
+ DHD_RTT(("WL_PROXD_EVENT_VS_INITIATOR_RPT\n "));
+ break;
+ case WL_PROXD_EVENT_RANGING:
+ DHD_RTT(("WL_PROXD_EVENT_RANGING\n"));
+ break;
+ case WL_PROXD_EVENT_COLLECT:
+ DHD_RTT(("WL_PROXD_EVENT_COLLECT\n"));
+ if (tlvs_len > 0) {
+ void *buffer = NULL;
+ if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf(buffer,
+ (uint8 *)&p_event->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
+ MFREE(dhd->osh, buffer, tlvs_len);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
+ __FUNCTION__, event_type));
+ goto exit;
+ }
+ }
+ break;
+ case WL_PROXD_EVENT_MF_STATS:
+ DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n"));
+ if (tlvs_len > 0) {
+ void *buffer = NULL;
+ if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf(buffer,
+ (uint8 *)&p_event->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
+ MFREE(dhd->osh, buffer, tlvs_len);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
+ __FUNCTION__, event_type));
+ goto exit;
+ }
+ }
+ break;
+
+ default:
+ DHD_RTT_ERR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
+ break;
+ }
+exit:
+#ifdef WL_CFG80211
+ mutex_unlock(&rtt_status->rtt_mutex);
+#endif /* WL_CFG80211 */
+
+ return ret;
+}
+
+#ifdef WL_CFG80211
+static void
+dhd_rtt_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status;
+ dhd_pub_t *dhd;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ rtt_status = container_of(work, rtt_status_info_t, work);
+ GCC_DIAGNOSTIC_POP();
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+ (void) dhd_rtt_start(dhd);
+}
+#endif /* WL_CFG80211 */
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa)
+{
+ rtt_status_info_t *rtt_status;
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ NULL_CHECK(capa, "capa is NULL", err);
+ bzero(capa, sizeof(rtt_capabilities_t));
+
+ /* set rtt capabilities */
+ if (rtt_status->rtt_capa.proto & RTT_CAP_ONE_WAY)
+ capa->rtt_one_sided_supported = 1;
+ if (rtt_status->rtt_capa.proto & RTT_CAP_FTM_WAY)
+ capa->rtt_ftm_supported = 1;
+
+ if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCI)
+ capa->lci_support = 1;
+ if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCR)
+ capa->lcr_support = 1;
+ if (rtt_status->rtt_capa.feature & RTT_FEATURE_PREAMBLE)
+ capa->preamble_support = 1;
+ if (rtt_status->rtt_capa.feature & RTT_FEATURE_BW)
+ capa->bw_support = 1;
+
+ /* bit mask */
+ capa->preamble_support = rtt_status->rtt_capa.preamble;
+ capa->bw_support = rtt_status->rtt_capa.bw;
+
+ return err;
+}
+
+#ifdef WL_CFG80211
+int
+dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info)
+{
+ u32 chanspec = 0;
+ int err = BCME_OK;
+ chanspec_t c = 0;
+ u32 channel;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+
+ if ((err = wldev_iovar_getint(dev, "chanspec",
+ (s32 *)&chanspec)) == BCME_OK) {
+ c = (chanspec_t)dtoh32(chanspec);
+ c = wl_chspec_driver_to_host(c);
+ channel = wf_chspec_ctlchan(c);
+ DHD_RTT((" control channel is %d \n", channel));
+ if (CHSPEC_IS20(c)) {
+ channel_info->width = WIFI_CHAN_WIDTH_20;
+ DHD_RTT((" band is 20 \n"));
+ } else if (CHSPEC_IS40(c)) {
+ channel_info->width = WIFI_CHAN_WIDTH_40;
+ DHD_RTT(("band is 40 \n"));
+ } else {
+ channel_info->width = WIFI_CHAN_WIDTH_80;
+ DHD_RTT(("band is 80 \n"));
+ }
+ if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
+ (channel <= CH_MAX_2G_CHANNEL)) {
+ channel_info->center_freq =
+ ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ } else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
+ channel_info->center_freq =
+ ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+ }
+ if ((channel_info->width == WIFI_CHAN_WIDTH_80) ||
+ (channel_info->width == WIFI_CHAN_WIDTH_40)) {
+ channel = CHSPEC_CHANNEL(c);
+ channel_info->center_freq0 =
+ ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+ }
+ } else {
+ DHD_RTT_ERR(("Failed to get the chanspec \n"));
+ }
+ return err;
+}
+
+int
+dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info)
+{
+ int err = BCME_OK;
+ char chanbuf[CHANSPEC_STR_LEN];
+ int pm = PM_OFF;
+ int ftm_cfg_cnt = 0;
+ chanspec_t chanspec;
+ wifi_channel_info channel;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
+ ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+ rtt_status_info_t *rtt_status;
+
+ memset(&channel, 0, sizeof(channel));
+ BCM_REFERENCE(chanbuf);
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT(("STA responder/Target. \n"));
+ }
+ DHD_RTT(("Enter %s \n", __FUNCTION__));
+ if (!dhd_is_associated(dhd, 0, NULL)) {
+ if (channel_info) {
+ channel.width = channel_info->width;
+ channel.center_freq = channel_info->center_freq;
+ channel.center_freq0 = channel_info->center_freq;
+ }
+ else {
+ channel.width = WIFI_CHAN_WIDTH_80;
+ channel.center_freq = DEFAULT_FTM_FREQ;
+ channel.center_freq0 = DEFAULT_FTM_CNTR_FREQ0;
+ }
+ chanspec = dhd_rtt_convert_to_chspec(channel);
+ DHD_RTT(("chanspec/channel set as %s for rtt.\n",
+ wf_chspec_ntoa(chanspec, chanbuf)));
+ err = wldev_iovar_setint(dev, "chanspec", chanspec);
+ if (err) {
+ DHD_RTT_ERR(("Failed to set the chanspec \n"));
+ }
+ }
+ rtt_status->pm = PM_OFF;
+ err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
+ DHD_RTT(("Current PM value read %d\n", rtt_status->pm));
+ if (err) {
+ DHD_RTT_ERR(("Failed to get the PM value \n"));
+ } else {
+ err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (err) {
+ DHD_RTT_ERR(("Failed to set the PM \n"));
+ rtt_status->pm_restore = FALSE;
+ } else {
+ rtt_status->pm_restore = TRUE;
+ }
+ }
+ if (!RTT_IS_ENABLED(rtt_status)) {
+ err = dhd_rtt_ftm_enable(dhd, TRUE);
+ if (err) {
+ DHD_RTT_ERR(("Failed to enable FTM (%d)\n", err));
+ goto exit;
+ }
+ DHD_RTT(("FTM enabled \n"));
+ }
+ rtt_status->status = RTT_ENABLED;
+ DHD_RTT(("Responder enabled \n"));
+ memset(ftm_configs, 0, sizeof(ftm_configs));
+ memset(ftm_params, 0, sizeof(ftm_params));
+ ftm_configs[ftm_cfg_cnt].enable = TRUE;
+ ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_TARGET;
+ rtt_status->flags = WL_PROXD_SESSION_FLAG_TARGET;
+ DHD_RTT(("Set the device as responder \n"));
+ err = dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
+ ftm_configs, ftm_cfg_cnt);
+exit:
+ if (err) {
+ rtt_status->status = RTT_STOPPED;
+ DHD_RTT_ERR(("rtt is stopped %s \n", __FUNCTION__));
+ dhd_rtt_ftm_enable(dhd, FALSE);
+ DHD_RTT(("restoring the PM value \n"));
+ if (rtt_status->pm_restore) {
+ pm = PM_FAST;
+ err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (err) {
+ DHD_RTT_ERR(("Failed to restore PM \n"));
+ } else {
+ rtt_status->pm_restore = FALSE;
+ }
+ }
+ }
+ return err;
+}
+
+int
+dhd_rtt_cancel_responder(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ int pm = 0;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ DHD_RTT(("Enter %s \n", __FUNCTION__));
+ err = dhd_rtt_ftm_enable(dhd, FALSE);
+ if (err) {
+ DHD_RTT_ERR(("failed to disable FTM (%d)\n", err));
+ }
+ rtt_status->status = RTT_STOPPED;
+ if (rtt_status->pm_restore) {
+ pm = PM_FAST;
+ DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore));
+ err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (err) {
+ DHD_RTT_ERR(("Failed to restore PM \n"));
+ } else {
+ rtt_status->pm_restore = FALSE;
+ }
+ }
+ return err;
+}
+
+#ifdef WL_NAN
+static bool
+dhd_rtt_parallel_nan_rtt_sessions_supported(dhd_pub_t *dhd)
+{
+ bool supported = FALSE;
+
+ if ((dhd->wlc_ver_major > RTT_PARALLEL_SSNS_SUPPORTED_MAJ_VER) ||
+ ((dhd->wlc_ver_major == RTT_PARALLEL_SSNS_SUPPORTED_MAJ_VER) &&
+ (dhd->wlc_ver_minor >= RTT_PARALLEL_SSNS_SUPPORTED_MIN_VER))) {
+ supported = TRUE;
+ }
+
+ return supported;
+}
+
+int
+dhd_rtt_get_max_nan_rtt_sessions_supported(dhd_pub_t *dhd)
+{
+ int max_sessions = 0;
+
+ /* Older fw branches does not support parallel rtt sessions */
+ if (dhd_rtt_parallel_nan_rtt_sessions_supported(dhd)) {
+ max_sessions = DHD_NAN_RTT_MAX_SESSIONS;
+ } else {
+ max_sessions = DHD_NAN_RTT_MAX_SESSIONS_LEGACY;
+ }
+
+ return max_sessions;
+}
+#endif /* WL_NAN */
+#endif /* WL_CFG80211 */
+
+/*
+ * DHD Attach Context
+ */
+int
+dhd_rtt_attach(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+#ifdef WL_CFG80211
+ rtt_status_info_t *rtt_status = NULL;
+
+ dhd->rtt_supported = FALSE;
+ if (dhd->rtt_state) {
+ return err;
+ }
+
+ dhd->rtt_state = (rtt_status_info_t *)MALLOCZ(dhd->osh,
+ sizeof(rtt_status_info_t));
+ if (dhd->rtt_state == NULL) {
+ err = BCME_NOMEM;
+ DHD_RTT_ERR(("%s : failed to create rtt_state\n", __FUNCTION__));
+ return err;
+ }
+ bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
+ rtt_status = GET_RTTSTATE(dhd);
+ rtt_status->rtt_config.target_info =
+ (rtt_target_info_t *)MALLOCZ(dhd->osh,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ if (rtt_status->rtt_config.target_info == NULL) {
+ DHD_RTT_ERR(("%s failed to allocate the target info for %d\n",
+ __FUNCTION__, RTT_MAX_TARGET_CNT));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ rtt_status->dhd = dhd;
+ mutex_init(&rtt_status->rtt_mutex);
+ mutex_init(&rtt_status->geofence_mutex);
+ INIT_LIST_HEAD(&rtt_status->noti_fn_list);
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ INIT_WORK(&rtt_status->work, dhd_rtt_work);
+ /* initialize proxd timer */
+ INIT_DELAYED_WORK(&rtt_status->proxd_timeout, dhd_rtt_timeout_work);
+#ifdef WL_NAN
+ /* initialize proxd retry timer */
+ INIT_DELAYED_WORK(&rtt_status->rtt_retry_timer, dhd_rtt_retry_work);
+#endif /* WL_NAN */
+exit:
+ if (err < 0) {
+ MFREE(dhd->osh, rtt_status->rtt_config.target_info,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t));
+ }
+#endif /* WL_CFG80211 */
+ return err;
+
+}
+
+/*
+ * DHD Detach Context
+ */
+int
+dhd_rtt_detach(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+
+#ifdef WL_CFG80211
+ rtt_status_info_t *rtt_status;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ err = dhd_rtt_deinit(dhd);
+ if (err != BCME_OK) {
+ DHD_RTT_ERR(("dhd_rtt_deinit failed while detaching"
+ " err = %d\n", err));
+ goto exit;
+ }
+
+exit:
+ MFREE(dhd->osh, rtt_status->rtt_config.target_info,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t));
+
+#endif /* WL_CFG80211 */
+
+ return err;
+}
+
+/*
+ * If Up context
+ */
+int
+dhd_rtt_init(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+#ifdef WL_CFG80211
+ int ret;
+ int32 version;
+ rtt_status_info_t *rtt_status;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ DHD_RTT_MEM(("dhd_rtt_init ENTRY\n"));
+
+ ret = dhd_rtt_get_version(dhd, &version);
+ if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) {
+ DHD_RTT_ERR(("%s : FTM is supported\n", __FUNCTION__));
+ dhd->rtt_supported = TRUE;
+ /* TODO : need to find a way to check rtt capability */
+ /* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */
+ rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY;
+
+ /* indicate to set tx rate */
+ rtt_status->rtt_capa.feature |= RTT_FEATURE_LCI;
+ rtt_status->rtt_capa.feature |= RTT_FEATURE_LCR;
+ rtt_status->rtt_capa.feature |= RTT_FEATURE_PREAMBLE;
+ rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_VHT;
+ rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_HT;
+
+ /* indicate to set bandwith */
+ rtt_status->rtt_capa.feature |= RTT_FEATURE_BW;
+ rtt_status->rtt_capa.bw |= RTT_BW_20;
+ rtt_status->rtt_capa.bw |= RTT_BW_40;
+ rtt_status->rtt_capa.bw |= RTT_BW_80;
+ } else {
+ if ((ret != BCME_OK) || (version == 0)) {
+ DHD_RTT_ERR(("%s : FTM is not supported\n", __FUNCTION__));
+ } else {
+ DHD_RTT_ERR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
+ __FUNCTION__, WL_PROXD_API_VERSION, version));
+ }
+ goto exit;
+ }
+
+#ifdef WL_NAN
+ rtt_status->max_nan_rtt_sessions = dhd_rtt_get_max_nan_rtt_sessions_supported(dhd);
+#endif /* WL_NAN */
+ /* cancel all of RTT request once we got the cancel request */
+ rtt_status->all_cancel = TRUE;
+
+exit:
+ DHD_ERROR(("dhd_rtt_init EXIT, err = %d\n", err));
+#endif /* WL_CFG80211 */
+
+ return err;
+}
+
+/*
+ * If Down context
+ */
+int
+dhd_rtt_deinit(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+#ifdef WL_CFG80211
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *rtt_header, *next;
+ rtt_result_t *rtt_result, *next2;
+ struct rtt_noti_callback *iter, *iter2;
+ rtt_target_info_t *rtt_target = NULL;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ DHD_RTT_MEM(("dhd_rtt_deinit: ENTER\n"));
+
+#ifdef WL_NAN
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
+ }
+#endif /* WL_NAN */
+
+ if (work_pending(&rtt_status->work)) {
+ cancel_work_sync(&rtt_status->work);
+ rtt_status->rtt_sched = FALSE;
+ }
+
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work_sync(&rtt_status->proxd_timeout);
+ }
+
+ /*
+ * Cleanup attempt is required,
+ * if legacy RTT session is in progress
+ */
+ if ((!RTT_IS_STOPPED(rtt_status)) &&
+ rtt_status->rtt_config.rtt_target_cnt &&
+ (rtt_status->cur_idx < rtt_status->rtt_config.rtt_target_cnt)) {
+ /* if dhd is started and there is a target cnt */
+ rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ if (rtt_target->peer == RTT_PEER_AP) {
+ DHD_RTT_MEM(("dhd_rtt_deinit: Deleting Default FTM Session\n"));
+ dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+ }
+ }
+
+ rtt_status->status = RTT_STOPPED;
+ DHD_RTT(("rtt is stopped %s \n", __FUNCTION__));
+ /* clear evt callback list */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ if (!list_empty(&rtt_status->noti_fn_list)) {
+ list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
+ list_del(&iter->list);
+ MFREE(dhd->osh, iter, sizeof(struct rtt_noti_callback));
+ }
+ }
+ /* remove the rtt results */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ list_for_each_entry_safe(rtt_header, next, &rtt_status->rtt_results_cache, list) {
+ list_del(&rtt_header->list);
+ list_for_each_entry_safe(rtt_result, next2,
+ &rtt_header->result_list, list) {
+ list_del(&rtt_result->list);
+ MFREE(dhd->osh, rtt_result, sizeof(rtt_result_t));
+ }
+ MFREE(dhd->osh, rtt_header, sizeof(rtt_results_header_t));
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+ DHD_RTT_MEM(("dhd_rtt_deinit: EXIT, err = %d\n", err));
+#endif /* WL_CFG80211 */
+ return err;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_rtt.h b/bcmdhd.101.10.361.x/dhd_rtt.h
new file mode 100755
index 0000000..dd38122
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_rtt.h
@@ -0,0 +1,555 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), RTT
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef __DHD_RTT_H__
+#define __DHD_RTT_H__
+
+#include <dngl_stats.h>
+#include "wifi_stats.h"
+
+#define RTT_MAX_TARGET_CNT 50
+#define RTT_MAX_FRAME_CNT 25
+#define RTT_MAX_RETRY_CNT 10
+#define DEFAULT_FTM_CNT 6
+#define DEFAULT_RETRY_CNT 6
+#define DEFAULT_FTM_FREQ 5180
+#define DEFAULT_FTM_CNTR_FREQ0 5210
+#define RTT_MAX_GEOFENCE_TARGET_CNT 8
+
+#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count)
+
+#define TARGET_TYPE(target) (target->type)
+
+#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
+#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
+
+#define GEOFENCE_RTT_LOCK(rtt_status) mutex_lock(&(rtt_status)->geofence_mutex)
+#define GEOFENCE_RTT_UNLOCK(rtt_status) mutex_unlock(&(rtt_status)->geofence_mutex)
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+/* DSSS, CCK and 802.11n rates in [500kbps] units */
+#define WL_MAXRATE 108 /* in 500kbps units */
+#define WL_RATE_1M 2 /* in 500kbps units */
+#define WL_RATE_2M 4 /* in 500kbps units */
+#define WL_RATE_5M5 11 /* in 500kbps units */
+#define WL_RATE_11M 22 /* in 500kbps units */
+#define WL_RATE_6M 12 /* in 500kbps units */
+#define WL_RATE_9M 18 /* in 500kbps units */
+#define WL_RATE_12M 24 /* in 500kbps units */
+#define WL_RATE_18M 36 /* in 500kbps units */
+#define WL_RATE_24M 48 /* in 500kbps units */
+#define WL_RATE_36M 72 /* in 500kbps units */
+#define WL_RATE_48M 96 /* in 500kbps units */
+#define WL_RATE_54M 108 /* in 500kbps units */
+#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
+
+#ifdef WL_NAN
+/* RTT Retry Timer Interval */
+/* Fix Me: Revert back once retry logic is back in place */
+#define DHD_RTT_RETRY_TIMER_INTERVAL_MS -1
+#endif /* WL_NAN */
+
+#define DHD_RTT_INVALID_TARGET_INDEX -1
+
+enum rtt_role {
+ RTT_INITIATOR = 0,
+ RTT_TARGET = 1
+};
+enum rtt_status {
+ RTT_STOPPED = 0,
+ RTT_STARTED = 1,
+ RTT_ENABLED = 2
+};
+typedef int64_t wifi_timestamp; /* In microseconds (us) */
+typedef int64_t wifi_timespan;
+typedef int32 wifi_rssi_rtt;
+
+typedef enum {
+ RTT_INVALID,
+ RTT_ONE_WAY,
+ RTT_TWO_WAY,
+ RTT_AUTO
+} rtt_type_t;
+
+/* RTT peer type */
+typedef enum {
+ RTT_PEER_AP = 0x1,
+ RTT_PEER_STA = 0x2,
+ RTT_PEER_P2P_GO = 0x3,
+ RTT_PEER_P2P_CLIENT = 0x4,
+ RTT_PEER_NAN = 0x5,
+ RTT_PEER_INVALID = 0x6
+} rtt_peer_type_t;
+
+/* Ranging status */
+typedef enum rtt_reason {
+ RTT_STATUS_SUCCESS = 0,
+ RTT_STATUS_FAILURE = 1, // general failure status
+ RTT_STATUS_FAIL_NO_RSP = 2, // target STA does not respond to request
+ RTT_STATUS_FAIL_REJECTED = 3, // request rejected. Applies to 2-sided RTT only
+ RTT_STATUS_FAIL_NOT_SCHEDULED_YET = 4,
+ RTT_STATUS_FAIL_TM_TIMEOUT = 5, // timing measurement times out
+ RTT_STATUS_FAIL_AP_ON_DIFF_CHANNEL = 6, // Target on different channel, cannot range
+ RTT_STATUS_FAIL_NO_CAPABILITY = 7, // ranging not supported
+ RTT_STATUS_ABORTED = 8, // request aborted for unknown reason
+ RTT_STATUS_FAIL_INVALID_TS = 9, // Invalid T1-T4 timestamp
+ RTT_STATUS_FAIL_PROTOCOL = 10, // 11mc protocol failed
+ RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled
+ RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request
+ RTT_STATUS_INVALID_REQ = 13, // bad request args
+ RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info
+ // cannot range with new params
+ RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15
+} rtt_reason_t;
+
+enum {
+ RTT_CAP_ONE_WAY = BIT(0),
+ /* IEEE802.11mc */
+ RTT_CAP_FTM_WAY = BIT(1)
+};
+
+enum {
+ RTT_FEATURE_LCI = BIT(0),
+ RTT_FEATURE_LCR = BIT(1),
+ RTT_FEATURE_PREAMBLE = BIT(2),
+ RTT_FEATURE_BW = BIT(3)
+};
+
+enum {
+ RTT_PREAMBLE_LEGACY = BIT(0),
+ RTT_PREAMBLE_HT = BIT(1),
+ RTT_PREAMBLE_VHT = BIT(2)
+};
+
+enum {
+ RTT_BW_5 = BIT(0),
+ RTT_BW_10 = BIT(1),
+ RTT_BW_20 = BIT(2),
+ RTT_BW_40 = BIT(3),
+ RTT_BW_80 = BIT(4),
+ RTT_BW_160 = BIT(5)
+};
+
+enum rtt_rate_bw {
+ RTT_RATE_20M,
+ RTT_RATE_40M,
+ RTT_RATE_80M,
+ RTT_RATE_160M
+};
+
+typedef enum ranging_type {
+ RTT_TYPE_INVALID = 0,
+ RTT_TYPE_LEGACY = 1,
+ RTT_TYPE_NAN_DIRECTED = 2,
+ RTT_TYPE_NAN_GEOFENCE = 3
+} ranging_type_t;
+
+typedef enum ranging_target_list_mode {
+ RNG_TARGET_LIST_MODE_INVALID = 0,
+ RNG_TARGET_LIST_MODE_LEGACY = 1,
+ RNG_TARGET_LIST_MODE_NAN = 2,
+ RNG_TARGET_LIST_MODE_MIX = 3
+} ranging_target_list_mode_t;
+
+#define FTM_MAX_NUM_BURST_EXP 14
+#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY)
+#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY)
+#define HAS_RTT_CAP(cap) (HAS_ONEWAY_CAP(cap) || HAS_11MC_CAP(cap))
+
+typedef struct rtt_target_info {
+ struct ether_addr addr;
+ struct ether_addr local_addr;
+ rtt_type_t type; /* rtt_type */
+ rtt_peer_type_t peer; /* peer type */
+ wifi_channel_info channel; /* channel information */
+ chanspec_t chanspec; /* chanspec for channel */
+ bool disable; /* disable for RTT measurement */
+ /*
+ * Time interval between bursts (units: 100 ms).
+ * Applies to 1-sided and 2-sided RTT multi-burst requests.
+ * Range: 0-31, 0: no preference by initiator (2-sided RTT)
+ */
+ uint32 burst_period;
+ /*
+ * Total number of RTT bursts to be executed. It will be
+ * specified in the same way as the parameter "Number of
+ * Burst Exponent" found in the FTM frame format. It
+ * applies to both: 1-sided RTT and 2-sided RTT. Valid
+ * values are 0 to 15 as defined in 802.11mc std.
+ * 0 means single shot
+ * The implication of this parameter on the maximum
+ * number of RTT results is the following:
+ * for 1-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst)
+ * for 2-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst - 1)
+ */
+ uint16 num_burst;
+ /*
+ * num of frames per burst.
+ * Minimum value = 1, Maximum value = 31
+ * For 2-sided this equals the number of FTM frames
+ * to be attempted in a single burst. This also
+ * equals the number of FTM frames that the
+ * initiator will request that the responder send
+ * in a single frame
+ */
+ uint32 num_frames_per_burst;
+ /*
+ * num of frames in each RTT burst
+ * for single side, measurement result num = frame number
+ * for 2 side RTT, measurement result num = frame number - 1
+ */
+ uint32 num_retries_per_ftm; /* retry time for RTT measurment frame */
+ /* following fields are only valid for 2 side RTT */
+ uint32 num_retries_per_ftmr;
+ uint8 LCI_request;
+ uint8 LCR_request;
+ /*
+ * Applies to 1-sided and 2-sided RTT. Valid values will
+ * be 2-11 and 15 as specified by the 802.11mc std for
+ * the FTM parameter burst duration. In a multi-burst
+ * request, if responder overrides with larger value,
+ * the initiator will return failure. In a single-burst
+ * request if responder overrides with larger value,
+ * the initiator will sent TMR_STOP to terminate RTT
+ * at the end of the burst_duration it requested.
+ */
+ uint32 burst_duration;
+ uint32 burst_timeout;
+ uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */
+ uint8 bw; /* 5, 10, 20, 40, 80, 160 */
+} rtt_target_info_t;
+
+typedef struct rtt_goefence_target_info {
+ bool valid;
+ struct ether_addr peer_addr;
+} rtt_geofence_target_info_t;
+
+typedef struct rtt_config_params {
+ int8 rtt_target_cnt;
+ uint8 target_list_mode;
+ rtt_target_info_t *target_info;
+} rtt_config_params_t;
+
+typedef struct rtt_geofence_setup_status {
+ bool geofence_setup_inprog; /* Lock to serialize geofence setup */
+ struct nan_ranging_inst *rng_inst; /* Locked for this ranging instance */
+} rtt_geofence_setup_status_t;
+
+typedef struct rtt_geofence_cfg {
+ int8 geofence_target_cnt;
+ int8 cur_target_idx;
+ rtt_geofence_target_info_t geofence_target_info[RTT_MAX_GEOFENCE_TARGET_CNT];
+ int geofence_rtt_interval;
+ int max_geofence_sessions; /* Polled from FW via IOVAR Query */
+ int geofence_sessions_cnt; /* No. of Geofence/Resp Sessions running currently */
+ rtt_geofence_setup_status_t geofence_setup_status;
+#ifdef RTT_GEOFENCE_CONT
+ bool geofence_cont;
+#endif /* RTT_GEOFENCE_CONT */
+} rtt_geofence_cfg_t;
+
+typedef struct rtt_directed_setup_status {
+ bool directed_na_setup_inprog; /* Lock to serialize directed setup */
+ struct nan_ranging_inst *rng_inst; /* Locked for this ranging instance */
+} rtt_directed_setup_status_t;
+
+typedef struct rtt_directed_cfg {
+ int directed_sessions_cnt; /* No. of Geofence/Resp Sessions running currently */
+ rtt_directed_setup_status_t directed_setup_status;
+} rtt_directed_cfg_t;
+
+/*
+ * Keep Adding more reasons
+ * going forward if needed
+ */
+enum rtt_schedule_reason {
+ RTT_SCHED_HOST_TRIGGER = 1, /* On host command for directed RTT */
+ RTT_SCHED_SUB_MATCH = 2, /* on Sub Match for svc with range req */
+ RTT_SCHED_DIR_TRIGGER_FAIL = 3, /* On failure of Directed RTT Trigger */
+ RTT_SCHED_DP_END = 4, /* ON NDP End event from fw */
+ RTT_SCHED_DP_REJECTED = 5, /* On receving reject dp event from fw */
+ RTT_SCHED_RNG_RPT_DIRECTED = 6, /* On Ranging report for directed RTT */
+ RTT_SCHED_RNG_TERM = 7, /* On Range Term Indicator */
+ RTT_SHCED_HOST_DIRECTED_TERM = 8, /* On host terminating directed RTT sessions */
+ RTT_SCHED_RNG_RPT_GEOFENCE = 9, /* On Ranging report for geofence RTT */
+ RTT_SCHED_RTT_RETRY_GEOFENCE = 10, /* On Geofence Retry */
+ RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE = 11, /* On Rng Term, while pending role change */
+ RTT_SCHED_RNG_TERM_SUB_SVC_CANCEL = 12, /* Due rng canc attempt, on sub cancel */
+ RTT_SCHED_RNG_TERM_SUB_SVC_UPD = 13, /* Due rng canc attempt, on sub update */
+ RTT_SCHED_RNG_TERM_PUB_RNG_CLEAR = 14, /* Due rng canc attempt, on pub upd/timeout */
+ RTT_SCHED_RNG_RESP_IND = 15, /* Due to rng resp ind */
+ RTT_SCHED_RNG_DIR_EXCESS_TARGET = 16 /* On ssn end, if excess dir tgt pending */
+};
+
+/*
+ * Keep Adding more invalid RTT states
+ * going forward if needed
+ */
+enum rtt_invalid_state {
+ RTT_STATE_VALID = 0, /* RTT state is valid */
+ RTT_STATE_INV_REASON_NDP_EXIST = 1 /* RTT state invalid as ndp exists */
+};
+
+typedef struct rtt_status_info {
+ dhd_pub_t *dhd;
+ int8 status; /* current status for the current entry */
+ int8 txchain; /* current device tx chain */
+ int pm; /* to save current value of pm */
+ int8 pm_restore; /* flag to reset the old value of pm */
+ int8 cur_idx; /* current entry to do RTT */
+ int8 start_idx; /* start index for RTT */
+ bool all_cancel; /* cancel all request once we got the cancel requet */
+ uint32 flags; /* indicate whether device is configured as initiator or target */
+ struct capability {
+ int32 proto :8;
+ int32 feature :8;
+ int32 preamble :8;
+ int32 bw :8;
+ } rtt_capa; /* rtt capability */
+ struct mutex rtt_mutex;
+ struct mutex geofence_mutex;
+ rtt_config_params_t rtt_config;
+ rtt_geofence_cfg_t geofence_cfg;
+ rtt_directed_cfg_t directed_cfg;
+ struct work_struct work;
+ struct list_head noti_fn_list;
+ struct list_head rtt_results_cache; /* store results for RTT */
+ int rtt_sched_reason; /* rtt_schedule_reason: what scheduled RTT */
+ struct delayed_work proxd_timeout; /* Proxd Timeout work */
+ struct delayed_work rtt_retry_timer; /* Timer for retry RTT after all targets done */
+ bool rtt_sched; /* TO serialize rtt thread */
+ int max_nan_rtt_sessions; /* To be Polled from FW via IOVAR Query */
+} rtt_status_info_t;
+
+typedef struct rtt_report {
+ struct ether_addr addr;
+ unsigned int burst_num; /* # of burst inside a multi-burst request */
+ unsigned int ftm_num; /* total RTT measurement frames attempted */
+ unsigned int success_num; /* total successful RTT measurement frames */
+ uint8 num_per_burst_peer; /* max number of FTM number per burst the peer support */
+ rtt_reason_t status; /* raging status */
+ /* in s, 11mc only, only for RTT_REASON_FAIL_BUSY_TRY_LATER, 1- 31s */
+ uint8 retry_after_duration;
+ rtt_type_t type; /* rtt type */
+ wifi_rssi_rtt rssi; /* average rssi in 0.5 dB steps e.g. 143 implies -71.5 dB */
+ wifi_rssi_rtt rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */
+ /*
+ * 1-sided RTT: TX rate of RTT frame.
+ * 2-sided RTT: TX rate of initiator's Ack in response to FTM frame.
+ */
+ wifi_rate_v1 tx_rate;
+ /*
+ * 1-sided RTT: TX rate of Ack from other side.
+ * 2-sided RTT: TX rate of FTM frame coming from responder.
+ */
+ wifi_rate_v1 rx_rate;
+ wifi_timespan rtt; /* round trip time in 0.1 nanoseconds */
+ wifi_timespan rtt_sd; /* rtt standard deviation in 0.1 nanoseconds */
+ wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */
+ int distance; /* distance in cm (optional) */
+ int distance_sd; /* standard deviation in cm (optional) */
+ int distance_spread; /* difference between max and min distance recorded (optional) */
+ wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */
+ int burst_duration; /* in ms, how long the FW time is to fininish one burst measurement */
+ int negotiated_burst_num; /* Number of bursts allowed by the responder */
+ bcm_tlv_t *LCI; /* LCI Report */
+ bcm_tlv_t *LCR; /* Location Civic Report */
+} rtt_report_t;
+#define RTT_REPORT_SIZE (sizeof(rtt_report_t))
+
+/* rtt_results_header to maintain rtt result list per mac address */
+typedef struct rtt_results_header {
+ struct ether_addr peer_mac;
+ uint32 result_cnt;
+ uint32 result_tot_len; /* sum of report_len of rtt_result */
+ struct list_head list;
+ struct list_head result_list;
+} rtt_results_header_t;
+struct rtt_result_detail {
+ uint8 num_ota_meas;
+ uint32 result_flags;
+};
+/* rtt_result to link all of rtt_report */
+typedef struct rtt_result {
+ struct list_head list;
+ struct rtt_report report;
+ int32 report_len; /* total length of rtt_report */
+ struct rtt_result_detail rtt_detail;
+ int32 detail_len;
+} rtt_result_t;
+
+/* RTT Capabilities */
+typedef struct rtt_capabilities {
+ uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */
+ uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */
+ uint8 lci_support; /* location configuration information */
+ uint8 lcr_support; /* Civic Location */
+ uint8 preamble_support; /* bit mask indicate what preamble is supported */
+ uint8 bw_support; /* bit mask indicate what BW is supported */
+} rtt_capabilities_t;
+
+/* RTT responder information */
+typedef struct wifi_rtt_responder {
+ wifi_channel_info channel; /* channel of responder */
+ uint8 preamble; /* preamble supported by responder */
+} wifi_rtt_responder_t;
+
+typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data);
+/* Linux wrapper to call common dhd_rtt_set_cfg */
+int dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf);
+
+int dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt);
+
+int dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx,
+ dhd_rtt_compl_noti_fn noti_fn);
+
+int dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn);
+
+int dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa);
+
+int dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info);
+
+int dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info);
+
+int dhd_dev_rtt_cancel_responder(struct net_device *dev);
+/* export to upper layer */
+chanspec_t dhd_rtt_convert_to_chspec(wifi_channel_info channel);
+
+int dhd_rtt_idx_to_burst_duration(uint idx);
+
+int dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
+
+#ifdef WL_NAN
+void dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd);
+#ifdef RTT_GEOFENCE_CONT
+void dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont);
+
+void dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont);
+#endif /* RTT_GEOFENCE_CONT */
+
+#ifdef RTT_GEOFENCE_INTERVAL
+void dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval);
+#endif /* RTT_GEOFENCE_INTERVAL */
+
+int dhd_rtt_get_geofence_max_sessions(dhd_pub_t *dhd);
+
+bool dhd_rtt_geofence_sessions_maxed_out(dhd_pub_t *dhd);
+
+int dhd_rtt_get_geofence_sessions_cnt(dhd_pub_t *dhd);
+
+int dhd_rtt_update_geofence_sessions_cnt(dhd_pub_t *dhd, bool incr,
+ struct ether_addr *peer_addr);
+
+int8 dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t* dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t* dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr,
+ int8 *index);
+
+int dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target);
+
+int dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr);
+
+int dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd);
+
+int dhd_rtt_delete_nan_session(dhd_pub_t *dhd);
+
+bool dhd_rtt_nan_is_directed_setup_in_prog(dhd_pub_t *dhd);
+
+bool dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd_pub_t *dhd,
+ struct ether_addr *peer);
+
+void dhd_rtt_nan_update_directed_setup_inprog(dhd_pub_t *dhd,
+ struct nan_ranging_inst *rng_inst, bool inprog);
+
+bool dhd_rtt_nan_directed_sessions_allowed(dhd_pub_t *dhd);
+
+bool dhd_rtt_nan_all_directed_sessions_triggered(dhd_pub_t *dhd);
+
+void dhd_rtt_nan_update_directed_sessions_cnt(dhd_pub_t *dhd, bool incr);
+#endif /* WL_NAN */
+
+uint8 dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr);
+
+int8 dhd_rtt_get_cur_target_idx(dhd_pub_t *dhd);
+
+int8 dhd_rtt_set_next_target_idx(dhd_pub_t *dhd, int start_idx);
+
+void dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason);
+
+int dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
+
+int dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn);
+
+int dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn);
+
+int dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+
+int dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa);
+
+int dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info);
+
+int dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info);
+
+int dhd_rtt_cancel_responder(dhd_pub_t *dhd);
+
+int dhd_rtt_attach(dhd_pub_t *dhd);
+
+int dhd_rtt_detach(dhd_pub_t *dhd);
+
+int dhd_rtt_init(dhd_pub_t *dhd);
+
+int dhd_rtt_deinit(dhd_pub_t *dhd);
+
+#ifdef WL_CFG80211
+#ifdef WL_NAN
+int dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd,
+ struct ether_addr *peer);
+
+void dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd);
+
+int8 dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd);
+
+void dhd_rtt_set_geofence_cur_target_idx(dhd_pub_t *dhd, int8 idx);
+
+rtt_geofence_setup_status_t* dhd_rtt_get_geofence_setup_status(dhd_pub_t *dhd);
+
+bool dhd_rtt_is_geofence_setup_inprog(dhd_pub_t *dhd);
+
+bool dhd_rtt_is_geofence_setup_inprog_with_peer(dhd_pub_t *dhd,
+ struct ether_addr *peer_addr);
+
+void dhd_rtt_set_geofence_setup_status(dhd_pub_t *dhd, bool inprog,
+ struct ether_addr *peer_addr);
+
+int dhd_rtt_get_max_nan_rtt_sessions_supported(dhd_pub_t *dhd);
+#endif /* WL_NAN */
+#endif /* WL_CFG80211 */
+
+#endif /* __DHD_RTT_H__ */
diff --git a/bcmdhd.101.10.361.x/dhd_sdio.c b/bcmdhd.101.10.361.x/dhd_sdio.c
new file mode 100755
index 0000000..6fdb0e9
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_sdio.c
@@ -0,0 +1,11777 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#include <sbchipc.h>
+#include <sbhnddma.h>
+#if defined(DHD_SPROM)
+#include <bcmsrom.h>
+#endif /* defined(DHD_SPROM) */
+
+#include <sdio.h>
+#ifdef BCMSPI
+#include <spid.h>
+#endif /* BCMSPI */
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <ethernet.h>
+#include <802.1d.h>
+#include <802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <dhd_config.h>
+#ifdef DHD_PKTDUMP_TOFW
+#include <dhd_linux_pktdump.h>
+#endif
+#include <linux/mmc/sdio_func.h>
+#include <dhd_linux.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef BT_OVER_SDIO
+#include <dhd_bt_interface.h>
+#endif /* BT_OVER_SDIO */
+
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+#include <debugger.h>
+#endif /* DEBUGGER || DHD_DSCOPE */
+
+bool dhd_mp_halting(dhd_pub_t *dhdp);
+extern void bcmsdh_waitfor_iodrain(void *sdh);
+extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
+extern bool bcmsdh_fatal_error(void *sdh);
+static int dhdsdio_suspend(void *context);
+static int dhdsdio_resume(void *context);
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
+#endif
+
+#define QLEN (1024) /* bulk rx and tx queue lengths */
+#define FCHI (QLEN - 10)
+#define FCLOW (FCHI / 2)
+#define PRIOMASK 7 /* XXX FixMe: should come from elsewhere...
+ * MAXPRIO? PKTQ_MAX_PREC? WLC? Other?
+ */
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+#define READ_FRM_CNT_RETRIES 3
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
+#endif
+
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
+#endif
+
+#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#define MAX_MEMBLOCK (32 * 1024) /* Block size used for downloading of dongle image */
+
+#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */
+#define MAX_MEM_BUF 4096
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD 32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_HDRLEN_TXGLOM (SDPCM_HDRLEN + SDPCM_HWEXT_LEN)
+#define MAX_TX_PKTCHAIN_CNT SDPCM_MAXGLOM_SIZE
+
+#ifdef SDTEST
+#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ 32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ 2048 /* XXX Should be based on PKTGET limits? */
+
+/* Maximum milliseconds to wait for F2 to come up */
+#ifdef BCMQT
+#define DHD_WAIT_F2RDY 30000
+#else
+#define DHD_WAIT_F2RDY 3000
+#endif /* BCMQT */
+
+/* Maximum usec to wait for HTAVAIL to come up */
+#ifdef BCMQT
+#define DHD_WAIT_HTAVAIL 10000000
+#else
+#define DHD_WAIT_HTAVAIL 10000
+#endif /* BCMQT */
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#ifdef NO_EXT32K
+#define PMU_MAX_TRANSITION_DLY (1000000*5)
+#else
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+#endif // endif
+
+/* hooks for limiting threshold custom tx num in rx processing */
+#define DEFAULT_TXINRX_THRES 0
+#ifndef CUSTOM_TXINRX_THRES
+#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+/* XXX #define F2ASYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED | SDIO_REQ_ASYNC) */
+
+/* Packet free applicable unconditionally for sdio and sdspi. Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
+extern unsigned int system_hw_rev;
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */
+
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX 192
+#define CONSOLE_BUFFER_MAX 8192
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hnd_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+
+#define REMAP_ENAB(bus) ((bus)->remap)
+#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define KSO_ENAB(bus) ((bus)->kso)
+#define SR_ENAB(bus) ((bus)->_srenab)
+#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto))
+
+#define MIN_RSRC_SR 0x3
+#define CORE_CAPEXT_ADDR_OFFSET (0x64c)
+#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1)
+#define RCTL_MACPHY_DISABLE_MASK (1 << 26)
+#define RCTL_LOGIC_DISABLE_MASK (1 << 27)
+
+#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup)
+#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */
+#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */
+#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */
+#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0)
+#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2)
+#define OVERFLOW_BLKSZ512_WM 96
+#define OVERFLOW_BLKSZ512_MES 80
+
+#define CC_PMUCC3 (0x3)
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Ucode host download related macros */
+#define UCODE_DOWNLOAD_REQUEST 0xCAFECAFE
+#define UCODE_DOWNLOAD_COMPLETE 0xABCDABCD
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#if defined(BT_OVER_SDIO)
+#define BTMEM_OFFSET 0x19000000
+/* BIT0 => WLAN Power UP and BIT1=> WLAN Wake */
+#define BT2WLAN_PWRUP_WAKE 0x03
+#define BT2WLAN_PWRUP_ADDR 0x640894 /* This address is specific to 43012B0 */
+
+#define BTFW_MAX_STR_LEN 600
+#define BTFW_DOWNLOAD_BLK_SIZE (BTFW_MAX_STR_LEN/2 + 8)
+
+#define BTFW_ADDR_MODE_UNKNOWN 0
+#define BTFW_ADDR_MODE_EXTENDED 1
+#define BTFW_ADDR_MODE_SEGMENT 2
+#define BTFW_ADDR_MODE_LINEAR32 3
+
+#define BTFW_HEX_LINE_TYPE_DATA 0
+#define BTFW_HEX_LINE_TYPE_END_OF_DATA 1
+#define BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS 2
+#define BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS 4
+#define BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS 5
+
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+ dhd_pub_t *dhd;
+
+ bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+
+ sdpcmd_regs_t *regs; /* Registers for SDIO core */
+ uint sdpcmrev; /* SDIO core revision */
+ uint armrev; /* CPU core revision */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 srmemsize; /* Size of SRMEM */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 bus_num; /* bus number */
+ uint32 slot_num; /* slot ID */
+ uint32 hostintmask; /* Copy of Host Interrupt Mask */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ uint8 flowcontrol; /* per prio flow control bitmask */
+ uint8 tx_seq; /* Transmit sequence number (next) */
+ uint8 tx_max; /* Maximum transmit sequence allowed */
+
+#ifdef DYNAMIC_MAX_HDR_READ
+ uint8 *hdrbufp;
+#else
+ uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+#endif
+ uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ uint16 nextlen; /* Next Read Len from last header */
+ uint8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ void *glomd; /* Packet containing glomming descriptor */
+ void *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ uint8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ uint8 *rxctl; /* Aligned pointer into rxbuf */
+ uint8 *databuf; /* Buffer for receiving big glom packet */
+ uint8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ uint8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+
+ uint regfails; /* Count of R_REG/W_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ int32 idletime; /* Control for activity timeout */
+ int32 idlecount; /* Activity timeout counter */
+ int32 idleclock; /* How to set bus driver when idle */
+ int32 sd_divisor; /* Speed control to bus driver */
+ int32 sd_mode; /* Mode control to bus driver */
+ int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
+ bool use_rxchain; /* If dhd should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+#if defined(LINUX) && defined(SUPPORT_P2P_GO_PS)
+ wait_queue_head_t bus_sleep;
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+ bool ctrl_wait;
+ wait_queue_head_t ctrl_tx_wait;
+ uint rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
+#ifdef BCMINTERNAL
+ bool _nopmu; /* No PMU (FPGA), don't try it */
+ bool clockpoll; /* Force clock polling (no chipactive interrupt) */
+#endif
+ bool alp_only; /* Don't use HT clock (ALP only) */
+ /* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+ int32 txinrx_thres; /* num of in-queued pkts */
+ int32 dotxinrx; /* tx first in dhdsdio_readframes */
+#ifdef BCMSDIO_RXLIM_POST
+ bool rxlim_en;
+ uint32 rxlim_addr;
+#endif /* BCMSDIO_RXLIM_POST */
+#ifdef SDTEST
+ /* external loopback */
+ bool ext_loop;
+ uint8 loopid;
+
+ /* pktgen configuration */
+ uint pktgen_freq; /* Ticks between bursts */
+ uint pktgen_count; /* Packets to send each burst */
+ uint pktgen_print; /* Bursts between count displays */
+ uint pktgen_total; /* Stop after this many */
+ uint pktgen_minlen; /* Minimum packet data len */
+ uint pktgen_maxlen; /* Maximum packet data len */
+ uint pktgen_mode; /* Configured mode: tx, rx, or echo */
+ uint pktgen_stop; /* Number of tx failures causing stop */
+
+ /* active pktgen fields */
+ uint pktgen_tick; /* Tick counter for bursts */
+ uint pktgen_ptick; /* Burst counter for printing */
+ uint pktgen_sent; /* Number of test packets generated */
+ uint pktgen_rcvd; /* Number of test packets received */
+ uint pktgen_prev_time; /* Time at which previous stats where printed */
+ uint pktgen_prev_sent; /* Number of test packets generated when
+ * previous stats were printed
+ */
+ uint pktgen_prev_rcvd; /* Number of test packets received when
+ * previous stats were printed
+ */
+ uint pktgen_fail; /* Number of failed send attempts */
+ uint16 pktgen_len; /* Length of next packet to send */
+#define PKTGEN_RCV_IDLE (0)
+#define PKTGEN_RCV_ONGOING (1)
+ uint16 pktgen_rcv_state; /* receive state */
+ uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+ wake_counts_t wake_counts; /* Wake up counter */
+#ifdef BCMSPI
+ bool dwordmode;
+#endif /* BCMSPI */
+#ifdef DHDENABLE_TAILPAD
+ uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */
+ uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */
+#endif /* DHDENABLE_TAILPAD */
+#ifdef BCMINTERNAL
+ uint tx_deferred; /* Tx calls queued while event pending */
+#endif
+ uint8 *ctrl_frame_buf;
+ uint32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+#ifndef BCMSPI
+ uint32 rxint_mode; /* rx interrupt mode */
+#endif /* BCMSPI */
+ bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
+ * Available with socram rev 16
+ * Remap region not DMA-able
+ */
+ bool kso;
+ bool _slpauto;
+ bool _oobwakeup;
+ bool _srenab;
+ bool readframes;
+ bool reqbussleep;
+ uint32 resetinstr;
+ uint32 dongle_ram_base;
+
+ void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */
+ uint32 txglom_cnt; /* Number of pkts in the glom array */
+ uint32 txglom_total_len; /* Total length of pkts in glom array */
+ bool txglom_enable; /* Flag to indicate whether tx glom is enabled/disabled */
+ uint32 txglomsize; /* Glom size limitation */
+#ifdef DHDENABLE_TAILPAD
+ void *pad_pkt;
+#endif /* DHDENABLE_TAILPAD */
+ uint32 dongle_trap_addr; /* device trap addr location in device memory */
+#if defined(BT_OVER_SDIO)
+ char *btfw_path; /* module_param: path to BT firmware image */
+ uint32 bt_use_count; /* Counter that tracks whether BT is using the bus */
+#endif /* defined (BT_OVER_SDIO) */
+ uint txglomframes; /* Number of tx glom frames (superframes) */
+ uint txglompkts; /* Number of packets from tx glom frames */
+#ifdef PKT_STATICS
+ struct pkt_statics tx_statics;
+#endif
+ uint8 *membuf; /* Buffer for dhdsdio_membytes */
+#ifdef CONSOLE_DPC
+ char cons_cmd[16];
+#endif
+} dhd_bus_t;
+
+/*
+ * Whenever DHD_IDLE_IMMEDIATE condition is handled, we have to now check if
+ * BT is active too. Instead of adding #ifdef code in all the places, we thought
+ * of adding one macro check as part of the if condition that checks for DHD_IDLE_IMMEDIATE
+ * In case of non BT over SDIO builds, this macro will always return TRUE. In case
+ * of the builds where BT_OVER_SDIO is enabled, it will expand to a condition check
+ * that checks if bt_use_count is zero. So this macro will return equate to 1 if
+ * bt_use_count is 0, indicating that there are no active users and if bt_use_count
+ * is non zero it would return 0 there by preventing the caller from executing the
+ * sleep calls.
+ */
+#ifdef BT_OVER_SDIO
+#define NO_OTHER_ACTIVE_BUS_USER(bus) (bus->bt_use_count == 0)
+#else
+#define NO_OTHER_ACTIVE_BUS_USER(bus) (1)
+#endif /* BT_OVER_SDIO */
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#ifdef BCMINTERNAL
+#define DHD_NOPMU(dhd) ((dhd)->_nopmu)
+#else
+#define DHD_NOPMU(dhd) (FALSE)
+#endif
+
+#if defined(BCMSDIOH_STD)
+#define BLK_64_MAXTXGLOM 20
+#endif /* BCMSDIOH_STD */
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+#ifndef NDIS
+extern uint sd_f1_blocksize;
+#endif /* !NDIS */
+
+#ifdef BCMSPI_ANDROID
+extern uint *dhd_spi_lockcount;
+#endif /* BCMSPI_ANDROID */
+
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+int dhd_enableOOB(dhd_pub_t *dhd, bool sleep);
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_RAMSIZE (128 *1024)
+int dhd_dongle_ramsize;
+
+uint dhd_doflow = TRUE;
+uint dhd_dpcpoll = FALSE;
+
+#ifdef linux
+module_param(dhd_doflow, uint, 0644);
+module_param(dhd_dpcpoll, uint, 0644);
+#endif
+
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+#ifndef BCMINTERNAL
+#ifdef BCMSPI
+/* At a watermark around 8 the spid hits underflow error. */
+static uint watermark = 32;
+static uint mesbusyctrl = 0;
+#else
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
+#endif /* BCMSPI */
+#ifdef DYNAMIC_MAX_HDR_READ
+uint firstread = DHD_FIRSTREAD;
+#else
+static const uint firstread = DHD_FIRSTREAD;
+#endif
+#else /* BCMINTERNAL */
+/* PR47410: low watermark to avoid F2 hang after SD clock stops */
+/* PR48178: read to (not through) bus burst to avoid F2 underflow */
+#ifdef BCMSPI
+static uint watermark = 32;
+static uint mesbusyctrl = 0;
+#else
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
+#endif /* BCMSPI */
+static uint firstread = DHD_FIRSTREAD;
+/* Additional knobs in case we need them */
+static bool tstoph = FALSE;
+static bool checkfifo = FALSE;
+uint dhd_anychip = 0;
+#endif /* BCMINTERNAL */
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#if defined(DEBUGGER)
+static uint32 dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr);
+static void dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val);
+
+/** the debugger layer will call back into this (bus) layer to read/write dongle memory */
+static struct dhd_dbg_bus_ops_s bus_ops = {
+ .read_u16 = NULL,
+ .read_u32 = dhd_sdio_reg_read,
+ .write_u32 = dhd_sdio_reg_write,
+};
+#endif /* DEBUGGER */
+
+#define ALIGNMENT 4
+
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align) \
+ do { \
+ uintptr datalign; \
+ datalign = (uintptr)PKTDATA((osh), (p)); \
+ datalign = ROUNDUP(datalign, (align)) - datalign; \
+ ASSERT(datalign < (align)); \
+ ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
+ if (datalign) \
+ PKTPULL((osh), (p), (uint)datalign); \
+ PKTSETLEN((osh), (p), (len)); \
+ } while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+#if defined(BCMSDIOH_TXGLOM_EXT)
+bool
+dhdsdio_is_dataok(dhd_bus_t *bus) {
+ return (((uint8)(bus->tx_max - bus->tx_seq) - bus->dhd->conf->tx_max_offset > 1) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0));
+}
+
+uint8
+dhdsdio_get_databufcnt(dhd_bus_t *bus) {
+ return ((uint8)(bus->tx_max - bus->tx_seq) - 1 - bus->dhd->conf->tx_max_offset);
+}
+#endif
+
+/* To check if there's window offered */
+#if defined(BCMSDIOH_TXGLOM_EXT)
+#define DATAOK(bus) dhdsdio_is_dataok(bus)
+#else
+#define DATAOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+#endif
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Number of pkts available in dongle for data RX */
+#if defined(BCMSDIOH_TXGLOM_EXT)
+#define DATABUFCNT(bus) dhdsdio_get_databufcnt(bus)
+#else
+#define DATABUFCNT(bus) \
+ ((uint8)(bus->tx_max - bus->tx_seq) - 1)
+#endif
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+/* XXX Need to replace these with something better. */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ regvar = R_REG(bus->dhd->osh, regaddr); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) { \
+ DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ regvar = 0; \
+ } \
+ } \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ W_REG(bus->dhd->osh, regaddr, regval); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) \
+ DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ } \
+} while (0)
+
+#define BUS_WAKE(bus) \
+ do { \
+ bus->idlecount = 0; \
+ if ((bus)->sleeping) \
+ dhdsdio_bussleep((bus), FALSE); \
+ } while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0: Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1: (sdiod core rev >= 4)
+ * Device sets a new bit in the intstatus whenever there is a packet
+ * available in fifo. Host can't clear this specific status bit until all the
+ * packets are read from the FIFO. No need to ack dongle intstatus.
+ * Mode 2: (sdiod core rev >= 4)
+ * Device sets a bit in the intstatus, and host acks this by writing
+ * one to this bit. Dongle won't generate anymore packet interrupts
+ * until host reads all the packets from the dongle and reads a zero to
+ * figure that there are no more packets. No need to disable host ints.
+ * Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
+
+#ifdef BCMSPI
+
+#define FRAME_AVAIL_MASK(bus) I_HMB_FRAME_IND
+
+#define DHD_BUS SPI_BUS
+
+/* check packet-available-interrupt in piggybacked dstatus */
+#define PKT_AVAILABLE(bus, intstatus) (bcmsdh_get_dstatus(bus->sdh) & STATUS_F2_PKT_AVAILABLE)
+
+#define HOSTINTMASK (I_HMB_FC_CHANGE | I_HMB_HOST_INT)
+
+#define GSPI_PR55150_BAILOUT \
+do { \
+ uint32 dstatussw = bcmsdh_get_dstatus((void *)bus->sdh); \
+ uint32 dstatushw = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); \
+ uint32 intstatuserr = 0; \
+ uint retries = 0; \
+ \
+ R_SDREG(intstatuserr, &bus->regs->intstatus, retries); \
+ printf("dstatussw = 0x%x, dstatushw = 0x%x, intstatus = 0x%x\n", \
+ dstatussw, dstatushw, intstatuserr); \
+ \
+ bus->nextlen = 0; \
+ *finished = TRUE; \
+} while (0)
+
+#else /* BCMSDIO */
+
+#define FRAME_AVAIL_MASK(bus) \
+ ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#endif /* BCMSPI */
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
+#endif
+
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+#ifdef DHD_DEBUG
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_FW_COREDUMP)
+static int dhdsdio_mem_dump(dhd_bus_t *bus);
+static int dhdsdio_get_mem_dump(dhd_bus_t *bus);
+#endif /* DHD_FW_COREDUMP */
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+ void * regsva, uint16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+ bool reset_flag);
+
+static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry);
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+ int prev_chain_total_len, bool last_chained_pkt,
+ int *pad_pkt_len, void **new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ , int first_frame
+#endif
+);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
+
+static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+#ifdef DHD_UCODE_DOWNLOAD
+static int dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path);
+#endif /* DHD_UCODE_DOWNLOAD */
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+static bool dhdsdio_dpc(dhd_bus_t *bus);
+static int dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len);
+static int dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode);
+static int dhdsdio_sdclk(dhd_bus_t *bus, bool on);
+static void dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp);
+static void dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp);
+
+#if defined(BT_OVER_SDIO)
+static int extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value);
+static int read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode,
+ uint16 * hi_addr, uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes);
+static int dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_btfw(struct dhd_bus *bus);
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh);
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag);
+#endif /* DHD_WAKE_STATUS */
+
+/*
+ * PR 114233: [4335] Sdio 3.0 overflow due to spur mode PLL change
+ */
+static void
+dhdsdio_tune_fifoparam(struct dhd_bus *bus)
+{
+ int err;
+ uint8 devctl, wm, mes;
+
+ if (bus->sih->buscorerev >= 15) {
+ /* See .ppt in PR for these recommended values */
+ if (bus->blocksize == 512) {
+ wm = OVERFLOW_BLKSZ512_WM;
+ mes = OVERFLOW_BLKSZ512_MES;
+ } else {
+ mes = bus->blocksize/4;
+ wm = bus->blocksize/4;
+ }
+
+ /* XXX: Need to set watermark since SBSDIO_WATERMARK could be overwritten
+ based on watermark value in other place. Refer to SWDHD-17.
+ */
+ watermark = wm;
+ mesbusyctrl = mes;
+ } else {
+ DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n",
+ bus->sih->buscorerev));
+ return;
+ }
+
+ /* Update watermark */
+ if (wm > 0) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
+
+ devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Update MES */
+ if (mes > 0) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+ (mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+ }
+
+ DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err),
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err)));
+}
+
+static void
+dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_RAMSIZE;
+ /* Restrict the ramsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+ dhd_dongle_ramsize, min_size));
+ if ((dhd_dongle_ramsize > min_size) &&
+ (dhd_dongle_ramsize < (int32)bus->orig_ramsize))
+ bus->ramsize = dhd_dongle_ramsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+ int err = 0;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+ return err;
+}
+
+#ifdef BCMSPI
+static void
+dhdsdio_wkwlan(dhd_bus_t *bus, bool on)
+{
+ int err;
+ uint32 regdata;
+ bcmsdh_info_t *sdh = bus->sdh;
+
+ /* XXX: sdiod cores have SPI as a block, PCMCIA doesn't have the gspi core */
+ /* XXX: may be we don't even need this check at all */
+ if (bus->sih->buscoretype == SDIOD_CORE_ID) {
+ /* wake up wlan function :WAKE_UP goes as ht_avail_request and alp_avail_request */
+ regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL);
+ DHD_INFO(("F0 REG0 rd = 0x%x\n", regdata));
+
+ if (on == TRUE)
+ regdata |= WAKE_UP;
+ else
+ regdata &= ~WAKE_UP;
+
+ bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err);
+ }
+}
+#endif /* BCMSPI */
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+ uint32 val, addr, data;
+
+ bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+ addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+ /* Set device for gpio1 wakeup */
+ bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+ val = bcmsdh_reg_read(bus->sdh, data, 4);
+ val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+ bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+ bus->_oobwakeup = TRUE;
+
+ return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+#ifndef BCMSPI
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+ bool cap = FALSE;
+ uint32 core_capext, addr, data;
+
+ if (bus->sih->chip == BCM43430_CHIP_ID ||
+ bus->sih->chip == BCM43018_CHIP_ID) {
+ /* check if fw initialized sr engine */
+ addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, sr_control1);
+ if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0)
+ cap = TRUE;
+
+ return cap;
+ }
+ if (
+#ifdef UNRELEASEDCHIP
+ (bus->sih->chip == BCM4347_CHIP_ID) ||
+ (bus->sih->chip == BCM4357_CHIP_ID) ||
+ (bus->sih->chip == BCM4361_CHIP_ID) ||
+#endif
+ 0) {
+ core_capext = FALSE;
+ } else if ((bus->sih->chip == BCM4330_CHIP_ID) ||
+ (bus->sih->chip == BCM43362_CHIP_ID) ||
+ (BCM4347_CHIP(bus->sih->chip))) {
+ core_capext = FALSE;
+ } else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+ (bus->sih->chip == BCM4339_CHIP_ID) ||
+ BCM4345_CHIP(bus->sih->chip) ||
+ (bus->sih->chip == BCM4354_CHIP_ID) ||
+ (bus->sih->chip == BCM4358_CHIP_ID) ||
+ (bus->sih->chip == BCM43569_CHIP_ID) ||
+ (bus->sih->chip == BCM4371_CHIP_ID) ||
+ (BCM4349_CHIP(bus->sih->chip)) ||
+ (bus->sih->chip == BCM4350_CHIP_ID) ||
+ (bus->sih->chip == BCM4362_CHIP_ID) ||
+ (bus->sih->chip == BCM43012_CHIP_ID) ||
+ (bus->sih->chip == BCM43013_CHIP_ID) ||
+ (bus->sih->chip == BCM43014_CHIP_ID) ||
+ (bus->sih->chip == BCM43751_CHIP_ID) ||
+ (bus->sih->chip == BCM43752_CHIP_ID)) {
+ core_capext = TRUE;
+ } else {
+ /* XXX: For AOB, CORE_CAPEXT_ADDR is moved to PMU core */
+ core_capext = bcmsdh_reg_read(bus->sdh,
+ si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, core_cap_ext)),
+ 4);
+
+ core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
+ }
+ if (!(core_capext))
+ return FALSE;
+
+ if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+ (bus->sih->chip == BCM4339_CHIP_ID) ||
+ BCM4345_CHIP(bus->sih->chip) ||
+ (bus->sih->chip == BCM4354_CHIP_ID) ||
+ (bus->sih->chip == BCM4358_CHIP_ID) ||
+ (bus->sih->chip == BCM43569_CHIP_ID) ||
+ (bus->sih->chip == BCM4371_CHIP_ID) ||
+ (bus->sih->chip == BCM4350_CHIP_ID)) {
+ uint32 enabval = 0;
+ addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data);
+ /* XXX: assuming the dongle doesn't change chipcontrol_addr, because
+ * if that happens, the chipcontrol_data read will be wrong. So we need
+ * to make sure the dongle and host will not access chipcontrol_addr
+ * simultaneously at this point.
+ */
+ bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
+ enabval = bcmsdh_reg_read(bus->sdh, data, 4);
+
+ if ((bus->sih->chip == BCM4350_CHIP_ID) ||
+ BCM4345_CHIP(bus->sih->chip) ||
+ (bus->sih->chip == BCM4354_CHIP_ID) ||
+ (bus->sih->chip == BCM4358_CHIP_ID) ||
+ (bus->sih->chip == BCM43569_CHIP_ID) ||
+ (bus->sih->chip == BCM4371_CHIP_ID))
+ enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
+
+ /* XXX: not checking the CC_PMUCC3_SRCC_SR_ENG_ENAB bit [val 4], but
+ * will check the whole register to be non-zero so that, sleep sequence
+ * can be also checked without enabling SR.
+ */
+ if (enabval)
+ cap = TRUE;
+ } else {
+ data = bcmsdh_reg_read(bus->sdh,
+ si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, retention_ctl)),
+ 4);
+ if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0)
+ cap = TRUE;
+ }
+
+ return cap;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+ uint8 val;
+ int err = 0;
+
+ if (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == BCM43013_CHIP_ID ||
+ bus->sih->chip == BCM43014_CHIP_ID) {
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+ val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+ 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT, &err);
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+ } else {
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+ val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+ 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+ }
+
+#ifdef USE_CMD14
+ /* Add CMD14 Support */
+ dhdsdio_devcap_set(bus,
+ (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+#endif /* USE_CMD14 */
+
+ if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43018_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM4339_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM4362_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43012_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43013_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43014_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43751_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43752_CHIP_ID)
+ dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC);
+
+ if (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == BCM43013_CHIP_ID ||
+ bus->sih->chip == BCM43014_CHIP_ID) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_HT_AVAIL_REQ, &err);
+ } else {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+ }
+ bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+ bus->_srenab = TRUE;
+
+ return 0;
+}
+#endif /* BCMSPI */
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+ uint8 val;
+ int err = 0;
+
+ /* set flag */
+ bus->kso = TRUE;
+
+ /*
+ * Enable KeepSdioOn (KSO) bit for normal operation
+ * Default is 0 (4334A0) so set it. Fixed in B0.
+ */
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+ if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+ val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+ if (err)
+ DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+ }
+
+ return 0;
+}
+
+#define KSO_DBG(x)
+/* XXX KSO set typically takes depending on resource up & number of
+* resources which were down. Max value is PMU_MAX_TRANSITION_DLY usec.
+* Currently the KSO attempt loop is such that, it waits
+* (KSO_WAIT_US [50usec] time + 2 SDIO operations) * MAX_KSO_ATTEMPTS.
+* So setting a value of maximum PMU_MAX_TRANSITION_DLY as wait time.,
+* to calculate MAX_KSO_ATTEMPTS.
+*/
+#define KSO_WAIT_US 50
+#define KSO_WAIT_MS 1
+#define KSO_SLEEP_RETRY_COUNT 20
+#define KSO_WAKE_RETRY_COUNT 100
+#define ERROR_BCME_NODEVICE_MAX 1
+
+#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#ifndef CUSTOM_MAX_KSO_ATTEMPTS
+#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS
+#endif
+
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+ uint8 wr_val = 0, rd_val, cmp_val, bmask;
+ int err = 0;
+ int try_cnt = 0, try_max = CUSTOM_MAX_KSO_ATTEMPTS;
+ struct dhd_conf *conf = bus->dhd->conf;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
+ wifi_adapter_info_t *adapter = NULL;
+ uint32 bus_type = -1, bus_num = -1, slot_num = -1;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) */
+
+ KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
+ dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+ sdio_retune_crc_disable(adapter->sdio_func);
+ if (on)
+ sdio_retune_hold_now(adapter->sdio_func);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) */
+
+ wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+
+ /* XXX 1st KSO write goes to AOS wake up core if device is asleep */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+
+ /* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared.
+ * So the further reads of KSO register could fail. Thereby just bailing out immediately
+ * after clearing KSO bit, to avoid polling of KSO bit.
+ */
+ if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == BCM43013_CHIP_ID ||
+ bus->sih->chip == BCM43014_CHIP_ID)) {
+ goto exit;
+ }
+
+ if (on) {
+ /* XXX
+ * device WAKEUP through KSO:
+ * write bit 0 & read back until
+ * both bits 0(kso bit) & 1 (dev on status) are set
+ */
+ cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
+ bmask = cmp_val;
+
+#if defined(NDIS)
+ /* XXX Windows Host controller hangs if chip still sleeps before read.
+ * So during a wake we write 0x1 for 5 msec to guarantee that chip is a wake.
+ */
+ for (int i = 0; i < KSO_WAKE_RETRY_COUNT; i++) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
+ OSL_DELAY(KSO_WAIT_US);
+ }
+
+ rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+ if (((rd_val & bmask) != cmp_val) || err) {
+ /* Sdio Bus Failure - Bus hang */
+ DHD_ERROR(("%s> op:%s, ERROR: SDIO Bus Hang, rd_val:%x, ERR:%x \n",
+ __FUNCTION__, "KSO_SET", rd_val, err));
+ }
+#else
+ OSL_SLEEP(3);
+#endif /* defined(NDIS) */
+
+ } else {
+ /* Put device to sleep, turn off KSO */
+ cmp_val = 0;
+ /* XXX only check for bit0, bit1(devon status) may not get cleared right away */
+ bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
+ }
+#if !defined(NDIS)
+ /* XXX We can't use polling in Windows since Windows Host controller
+ * hangs if chip is a sleep during read or write.
+ */
+
+ if (conf->kso_try_max)
+ try_max = conf->kso_try_max;
+ do {
+ /*
+ * XXX reliable KSO bit set/clr:
+ * the sdiod sleep write access appears to be is synced to PMU 32khz clk
+ * just one write attempt may fail,(same is with read ?)
+ * in any case, read it back until it matches written value
+ */
+ rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+ if (((rd_val & bmask) == cmp_val) && !err)
+ break;
+
+ KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
+
+ if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) {
+ OSL_SLEEP(KSO_WAIT_MS);
+ } else
+ OSL_DELAY(KSO_WAIT_US);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+ } while (try_cnt++ < try_max);
+
+#ifdef KSO_DEBUG
+ if (try_cnt > 0 && try_cnt <= 10)
+ conf->kso_try_array[0] += 1;
+ else if (try_cnt <= 50)
+ conf->kso_try_array[1] += 1;
+ else if (try_cnt <= 100)
+ conf->kso_try_array[2] += 1;
+ else if (try_cnt <= 200)
+ conf->kso_try_array[3] += 1;
+ else if (try_cnt <= 500)
+ conf->kso_try_array[4] += 1;
+ else if (try_cnt <= 1000)
+ conf->kso_try_array[5] += 1;
+ else if (try_cnt <= 2000)
+ conf->kso_try_array[6] += 1;
+ else if (try_cnt <= 5000)
+ conf->kso_try_array[7] += 1;
+ else if (try_cnt <= 10000)
+ conf->kso_try_array[8] += 1;
+ else
+ conf->kso_try_array[9] += 1;
+#endif
+ if (try_cnt > 2)
+ KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
+ __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+
+ if (try_cnt > try_max) {
+ DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
+ __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+#ifdef KSO_DEBUG
+ {
+ int i;
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ for (i=0; i<10; i++) {
+ printk(KERN_CONT "[%d]: %d, ", i, conf->kso_try_array[i]);
+ }
+ printk("\n");
+ }
+#endif
+ }
+#endif /* !defined(NDIS) */
+
+exit:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
+ if (on)
+ sdio_retune_release(adapter->sdio_func);
+ sdio_retune_crc_enable(adapter->sdio_func);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) */
+
+ return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+ int err = 0;
+
+ if (on == FALSE) {
+
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+ dhdsdio_clk_kso_enab(bus, FALSE);
+ } else {
+ DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+ /* Make sure we have SD bus access */
+ if (bus->clkstate == CLK_NONE) {
+ DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+
+ dhdsdio_clk_kso_enab(bus, TRUE);
+
+ DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+ dhdsdio_sleepcsr_get(bus)));
+ }
+
+ bus->kso = on;
+ BCM_REFERENCE(err);
+
+ return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+ int err = 0;
+ uint8 val = 0;
+
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+ /* XXX: Propagate error */
+ if (err)
+ DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+ return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+ return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+ int err = 0;
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+ if (err)
+ DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+ return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+ int err = 0, retry;
+ uint8 val;
+
+ retry = 0;
+ if (on == TRUE) {
+ /* Enter Sleep */
+
+ /* Be sure we request clk before going to sleep
+ * so we can wake-up with clk request already set
+ * else device can go back to sleep immediately
+ */
+ if (!SLPAUTO_ENAB(bus))
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ else {
+ /* XXX: Check if Host cleared clock request
+ * XXX: With CMD14, Host does not need to explicitly toggle clock requests
+ * XXX: Just keep clock request active and use CMD14 to enter/exit sleep
+ */
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if ((val & SBSDIO_CSR_MASK) == 0) {
+ DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+ __FUNCTION__, val));
+
+ /* Reset clock request */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
+ DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+ }
+ }
+
+ DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+ if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) {
+ if (sd1idle) {
+ /* Change to SD1 mode */
+ dhdsdio_set_sdmode(bus, 1);
+ }
+ }
+
+ err = dhdsdio_clk_kso_enab(bus, FALSE);
+ if (OOB_WAKEUP_ENAB(bus))
+ {
+#if !defined(NDIS)
+ err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */
+#endif /* !defined(NDIS) */
+ }
+#endif /* USE_CMD14 */
+
+ if ((SLPAUTO_ENAB(bus)) && (bus->idleclock != DHD_IDLE_ACTIVE)) {
+ DHD_TRACE(("%s: Turnoff SD clk\n", __FUNCTION__));
+ /* Now remove the SD clock */
+ err = dhdsdio_sdclk(bus, FALSE);
+ }
+ } else {
+ /* Exit Sleep */
+ /* Make sure we have SD bus access */
+ if (bus->clkstate == CLK_NONE) {
+ DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+#ifdef USE_CMD14
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+ if (SLPAUTO_ENAB(bus) && (err != 0)) {
+ /* XXX: CMD14 exit sleep is failing somehow
+ * XXX: Is Host out of sync with device?
+ * XXX: Try toggling the reverse
+ */
+ OSL_DELAY(10000);
+ DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+ /* Toggle sleep to resync with host and device */
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+ OSL_DELAY(10000);
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+
+ /* XXX: Ugly hack for host-device out-of-sync while testing
+ * XXX: Need to root-cause
+ */
+ if (err) {
+ /* XXX: Host and device out-of-sync */
+ OSL_DELAY(10000);
+ DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+ /* Toggle sleep to resync with host and device */
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+ OSL_DELAY(10000);
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+ if (err) {
+ /* XXX: Give up and assumed it has exited sleep
+ * XXX: Device probably dead at this point
+ * XXX: So far only happens with SR
+ */
+ DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+ DHD_ERROR(("%s: FATAL: Device non-response!\n",
+ __FUNCTION__));
+ err = 0;
+ }
+ }
+ }
+#else
+ if (OOB_WAKEUP_ENAB(bus))
+ {
+#if !defined(NDIS)
+ err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */
+#endif /* !defined(NDIS) */
+ }
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ * Set KSO after ExitSleep.
+ */
+ do {
+ err = dhdsdio_clk_kso_enab(bus, TRUE);
+ if (err)
+ OSL_SLEEP(10);
+ } while ((err != 0) && (++retry < 3));
+
+ if (err != 0) {
+ DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+#ifndef BT_OVER_SDIO
+ err = 0; /* continue anyway */
+#endif /* BT_OVER_SDIO */
+ }
+
+ if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) {
+ dhdsdio_set_sdmode(bus, bus->sd_mode);
+ }
+#endif /* !USE_CMD14 */
+
+ if (err == 0) {
+ uint8 csr;
+
+ /* Wait for device ready during transition to wake-up */
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ (((csr = dhdsdio_sleepcsr_get(bus)) &
+ SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+ (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000));
+
+ DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+ if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+ DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+ __FUNCTION__, csr));
+ err = BCME_NODEVICE;
+ }
+
+ /* PR 101351: sdiod_aos sleep followed by immediate wakeup
+ * before sdiod_aos takes over has a problem.
+ */
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ (((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+ (SBSDIO_HT_AVAIL)), (DHD_WAIT_HTAVAIL));
+
+ DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
+ if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) {
+ DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n",
+ __FUNCTION__, csr));
+ err = BCME_NODEVICE;
+ }
+ }
+ }
+
+ /* Update if successful */
+ if (err == 0)
+ bus->kso = on ? FALSE : TRUE;
+ else {
+ DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n",
+ __FUNCTION__, bus->kso, on, err));
+ if (!on && retry > 2)
+ bus->kso = FALSE;
+ }
+
+ return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+#define HT_AVAIL_ERROR_MAX 10
+ static int ht_avail_error = 0;
+ int err;
+ uint8 clkctl, clkreq, devctl;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ clkctl = 0;
+ sdh = bus->sdh;
+
+#ifdef BCMINTERNAL
+ if (DHD_NOPMU(bus)) {
+ /* There is no PMU present, so just fake the clock state... */
+ bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+ return BCME_OK;
+ }
+
+ if (bus->clockpoll)
+ pendok = FALSE;
+#endif /* BCMINTERNAL */
+
+ if (!KSO_ENAB(bus))
+ return BCME_OK;
+
+ if (SLPAUTO_ENAB(bus)) {
+ bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+ return BCME_OK;
+ }
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+#ifdef BCMSPI
+ dhdsdio_wkwlan(bus, TRUE);
+#endif /* BCMSPI */
+
+ /* XXX Should be able to early-exit if pendok && PENDING */
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ ht_avail_error++;
+ if (ht_avail_error < HT_AVAIL_ERROR_MAX) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ }
+
+#ifdef OEM_ANDROID
+ else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
+ bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR;
+ dhd_os_send_hang_message(bus->dhd);
+ }
+#endif /* OEM_ANDROID */
+ return BCME_ERROR;
+ } else {
+ ht_avail_error = 0;
+ }
+
+ /* Check current status */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+#if !defined(OOB_INTR_ONLY)
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ DHD_INFO(("CLKCTL: set PENDING\n"));
+ bus->clkstate = CLK_PENDING;
+ return BCME_OK;
+ } else
+#endif /* !defined (OOB_INTR_ONLY) */
+ {
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+ }
+#ifndef BCMSDIOLITE
+ /* Otherwise, wait here (polling) for HT Avail */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+ !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+ }
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+ __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+ return BCME_ERROR;
+ }
+#endif /* BCMSDIOLITE */
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+ if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+ /* XXX For the SDMMC Driver stack, if DHD was unloaded,
+ * the chip is not completely reset, so in this case,
+ * the PMU may already be programmed to allow HT clock.
+ */
+ if (!SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+ }
+#endif /* !defined(BCMLXSDMMC) */
+ } else {
+ if (SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+ }
+ }
+#endif /* defined (DHD_DEBUG) */
+
+ bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+ bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+ } else {
+ clkreq = 0;
+
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ if (!SR_ENAB(bus)) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ DHD_INFO(("CLKCTL: turned OFF\n"));
+ if (err) {
+ DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+#ifdef BCMSPI
+ dhdsdio_wkwlan(bus, FALSE);
+#endif /* BCMSPI */
+ }
+ return BCME_OK;
+}
+
+/* Change SD1/SD4 bus mode */
+static int
+dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode)
+{
+ int err;
+
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &sd_mode, sizeof(sd_mode), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_mode: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+#ifndef BCMSPI
+ int err;
+ int32 iovalue;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (on) {
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ /* Turn on clock and restore mode */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Restore clock speed */
+ iovalue = bus->sd_divisor;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_SDONLY;
+ } else {
+ /* Stop or slow the SD clock itself */
+ if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+ DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+ __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+ return BCME_ERROR;
+ }
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ iovalue = 0;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Set divisor to idle value */
+ iovalue = bus->idleclock;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_NONE;
+ }
+#endif /* BCMSPI */
+
+ return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+ int ret = BCME_OK;
+#ifdef DHD_DEBUG
+ uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+ bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+ }
+ return ret;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ dhdsdio_sdclk(bus, TRUE);
+ /* Now request HT Avail on the backplane */
+ ret = dhdsdio_htclk(bus, TRUE, pendok);
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+ bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+ }
+ break;
+
+ case CLK_SDONLY:
+
+#ifdef BT_OVER_SDIO
+ /*
+ * If the request is to switch off Back plane clock,
+ * confirm that BT is inactive before doing so.
+ * If this call had come from Non Watchdog context any way
+ * the Watchdog would switch off the clock again when
+ * nothing is to be done & Bt has finished using the bus.
+ */
+ if (bus->bt_use_count != 0) {
+ DHD_INFO(("%s(): Req CLK_SDONLY, BT is active %d not switching off \r\n",
+ __FUNCTION__, bus->bt_use_count));
+ ret = BCME_OK;
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ break;
+ }
+
+ DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n",
+ __FUNCTION__));
+#endif /* BT_OVER_SDIO */
+
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ ret = dhdsdio_sdclk(bus, TRUE);
+ else if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ else
+ DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+ bus->clkstate, target));
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ }
+ break;
+
+ case CLK_NONE:
+
+#ifdef BT_OVER_SDIO
+ /*
+ * If the request is to switch off Back plane clock,
+ * confirm that BT is inactive before doing so.
+ * If this call had come from Non Watchdog context any way
+ * the Watchdog would switch off the clock again when
+ * nothing is to be done & Bt has finished using the bus.
+ */
+ if (bus->bt_use_count != 0) {
+ DHD_INFO(("%s(): Request CLK_NONE BT is active %d not switching off \r\n",
+ __FUNCTION__, bus->bt_use_count));
+ ret = BCME_OK;
+ break;
+ }
+
+ DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n",
+ __FUNCTION__));
+#endif /* BT_OVER_SDIO */
+
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ /* Now remove the SD clock */
+ ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+ if (bus->dhd->dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+ if (bus->poll == 0)
+ dhd_os_wd_timer(bus->dhd, 0);
+ break;
+ }
+#ifdef DHD_DEBUG
+ DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+ return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+ int err = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+#if defined(BCMSDIOH_STD)
+ uint32 sd3_tuning_disable = FALSE;
+#endif /* BCMSDIOH_STD */
+
+ DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE")));
+
+ if (bus->dhd->hang_was_sent)
+ return BCME_ERROR;
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return BCME_OK;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+#ifdef DHD_USE_IDLECOUNT
+ if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq) ||
+ bus->readframes || bus->ctrl_frame_stat)
+#else
+ if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq))
+#endif /* DHD_USE_IDLECOUNT */
+ return BCME_BUSY;
+
+#ifdef BT_OVER_SDIO
+ /*
+ * The following is the assumption based on which the hook is placed.
+ * From WLAN driver, either from the active contexts OR from the Watchdog contexts
+ * we will be attempting to Go to Sleep. AT that moment if we see that BT is still
+ * actively using the bus, we will return BCME_BUSY from here, but the bus->sleeping
+ * state would not have changed. So the caller can then schedule the Watchdog again
+ * which will come and attempt to sleep at a later point.
+ *
+ * In case if BT is the only one and is the last user, we don't switch off the clock
+ * immediately, we allow the WLAN to decide when to sleep i.e from the watchdog.
+ * Now if the watchdog becomes active and attempts to switch off the clock and if
+ * another WLAN context is active they are any way serialized with sdlock.
+ */
+ if (bus->bt_use_count != 0) {
+ DHD_INFO(("%s(): Cannot sleep BT is active \r\n", __FUNCTION__));
+ return BCME_BUSY;
+ }
+#endif /* !BT_OVER_SDIO */
+
+ /* XXX Is it an error to sleep when not in data state? */
+
+ if (!SLPAUTO_ENAB(bus)) {
+ /* Disable SDIO interrupts (no longer interested) */
+ bcmsdh_intr_disable(bus->sdh);
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+ } else {
+#ifdef FORCE_SWOOB_ENABLE
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+#endif
+ /* Leave interrupts enabled since device can exit sleep and
+ * interrupt host
+ */
+ err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+ }
+
+ /* Change state */
+ bus->sleeping = TRUE;
+#if defined(BCMSDIOH_STD)
+ sd3_tuning_disable = TRUE;
+ err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0,
+ &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE);
+#endif /* BCMSDIOH_STD */
+#if defined(LINUX) && defined(SUPPORT_P2P_GO_PS)
+ wake_up(&bus->bus_sleep);
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+ /* XXX Should be able to turn off clock and power */
+ /* XXX Make sure GPIO interrupt input is enabled */
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ if (!SLPAUTO_ENAB(bus)) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+ /* Force pad isolation off if possible (in case power never toggled) */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+ /* XXX Make sure GPIO interrupt input is disabled */
+ /* XXX Should be able to turn on power and clock */
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+ /* Make sure we have SD bus access */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Enable interrupts again */
+ if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+ bus->intdis = FALSE;
+ bcmsdh_intr_enable(bus->sdh);
+ }
+ } else {
+ err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+#ifdef FORCE_SWOOB_ENABLE
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+#endif
+#ifdef BT_OVER_SDIO
+ if (err < 0) {
+ struct net_device *net = NULL;
+ dhd_pub_t *dhd = bus->dhd;
+ net = dhd_idx2net(dhd, 0);
+ if (net != NULL) {
+ DHD_ERROR(("<< WIFI HANG by KSO Enabled failure\n"));
+ dhd_os_sdunlock(dhd);
+ net_os_send_hang_message(net);
+ dhd_os_sdlock(dhd);
+ } else {
+ DHD_ERROR(("<< WIFI HANG Fail because net is NULL\n"));
+ }
+ }
+#endif /* BT_OVER_SDIO */
+ }
+
+ if (err == 0) {
+ /* Change state */
+ bus->sleeping = FALSE;
+#if defined(BCMSDIOH_STD)
+ sd3_tuning_disable = FALSE;
+ err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0,
+ &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE);
+#endif /* BCMSDIOH_STD */
+ }
+ }
+
+ return err;
+}
+
+#ifdef BT_OVER_SDIO
+/*
+ * Call this function to Get the Clock running.
+ * Assumes that the caller holds the sdlock.
+ * bus - Pointer to the dhd_bus handle
+ * can_wait - TRUE if the caller can wait until the clock becomes ready
+ * FALSE if the caller cannot wait
+ */
+int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait)
+{
+ int ret = BCME_ERROR;
+
+ BCM_REFERENCE(owner);
+
+ bus->bt_use_count++;
+
+ /*
+ * We can call BUS_WAKE, clkctl multiple times, both of the items
+ * have states and if its already ON, no new configuration is done
+ */
+
+ /* Wake up the Dongle FW from SR */
+ BUS_WAKE(bus);
+
+ /*
+ * Make sure back plane ht clk is on
+ * CLK_AVAIL - Turn On both SD & HT clock
+ */
+ ret = dhdsdio_clkctl(bus, CLK_AVAIL, can_wait);
+
+ DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__,
+ bus->bt_use_count));
+ return ret;
+}
+
+/*
+ * Call this function to relinquish the Clock.
+ * Assumes that the caller holds the sdlock.
+ * bus - Pointer to the dhd_bus handle
+ * can_wait - TRUE if the caller can wait until the clock becomes ready
+ * FALSE if the caller cannot wait
+ */
+int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait)
+{
+ int ret = BCME_ERROR;
+
+ BCM_REFERENCE(owner);
+ BCM_REFERENCE(can_wait);
+
+ if (bus->bt_use_count == 0) {
+ DHD_ERROR(("%s(): Clocks are already turned off \r\n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ bus->bt_use_count--;
+
+ /*
+ * When the SDIO Bus is shared between BT & WLAN, we turn Off the clock
+ * once the last user has relinqushed the same. But there are two schemes
+ * in that too. We consider WLAN as the bus master (even if its not
+ * active). Even when the WLAN is OFF the DHD Watchdog is active.
+ * So this Bus Watchdog is the context whill put the Bus to sleep.
+ * Refer dhd_bus_watchdog function
+ */
+
+ ret = BCME_OK;
+ DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__,
+ bus->bt_use_count));
+ return ret;
+}
+
+void dhdsdio_reset_bt_use_count(struct dhd_bus *bus)
+{
+ /* reset bt use count */
+ bus->bt_use_count = 0;
+}
+#endif /* BT_OVER_SDIO */
+
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size)
+{
+ int func_blk_size = function_num;
+ int bcmerr = 0;
+ int result;
+
+ bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size,
+ sizeof(int), &result, sizeof(int), IOV_GET);
+
+ if (bcmerr != BCME_OK) {
+ DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num));
+ return BCME_ERROR;
+ }
+
+ if (result != block_size) {
+ DHD_TRACE_HW4(("%s: F%d Block size set from %d to %d\n",
+ __FUNCTION__, function_num, result, block_size));
+ func_blk_size = function_num << 16 | block_size;
+ bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
+ 0, &func_blk_size, sizeof(int32), IOV_SET);
+ if (bcmerr != BCME_OK) {
+ DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
+
+ return BCME_OK;
+}
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(BCMSPI_ANDROID)
+ bcmsdh_intr_enable(bus->sdh);
+#elif defined(HW_OOB) || defined(FORCE_WOWLAN)
+ bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (enable == TRUE) {
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ } else {
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+ }
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+ int ret = BCME_ERROR;
+ osl_t *osh;
+ uint datalen, prec;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ osh = bus->dhd->osh;
+ datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+ /* Push the test header if doing loopback */
+ if (bus->ext_loop) {
+ uint8* data;
+ PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+ data = PKTDATA(osh, pkt);
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->loopid++;
+ *data++ = (datalen >> 0);
+ *data++ = (datalen >> 8);
+ datalen += SDPCM_TEST_HDRLEN;
+ }
+#else /* SDTEST */
+ BCM_REFERENCE(datalen);
+#endif /* SDTEST */
+
+ prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+ /* move from dhdsdio_sendfromq(), try to orphan skb early */
+ if (bus->dhd->conf->orphan_move == 1)
+ PKTORPHAN(pkt, bus->dhd->conf->tsq);
+
+ /* Check for existing queue, current flow-control, pending event, or pending clock */
+ if (dhd_deferred_tx || bus->fcstate || pktq_n_pkts_tot(&bus->txq) || bus->dpc_sched ||
+ (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+ (bus->clkstate != CLK_AVAIL)) {
+ bool deq_ret;
+ int pkq_len = 0;
+
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+ pktq_n_pkts_tot(&bus->txq)));
+#ifdef BCMINTERNAL
+ if (!bus->fcstate)
+ bus->tx_deferred++;
+#endif /* BCMINTERNAL */
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ dhd_os_sdlock_txq(bus->dhd);
+ deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec);
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if (!deq_ret) {
+#ifdef PROP_TXSTATUS
+ if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0)
+#endif /* PROP_TXSTATUS */
+ {
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ dhd_txcomplete(bus->dhd, pkt, FALSE);
+ PKTFREE(osh, pkt, TRUE); /* XXX update counter */
+ }
+ ret = BCME_NORESOURCE;
+ } else
+ ret = BCME_OK;
+
+ /* XXX Possible race since check and action are not locked? */
+ if (dhd_doflow) {
+ dhd_os_sdlock_txq(bus->dhd);
+ pkq_len = pktq_n_pkts_tot(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+ }
+ if (dhd_doflow && pkq_len >= FCHI) {
+ bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
+ WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled && dhd_doflow) {
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+ }
+ }
+
+#ifdef DHD_DEBUG
+ dhd_os_sdlock_txq(bus->dhd);
+ if (pktqprec_n_pkts(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktqprec_n_pkts(&bus->txq, prec);
+ dhd_os_sdunlock_txq(bus->dhd);
+#endif
+
+ /* Schedule DPC if needed to send queued packet(s) */
+ /* XXX Also here, since other deferral conditions may no longer hold? */
+ if (dhd_deferred_tx && !bus->dpc_sched) {
+ if (bus->dhd->conf->deferred_tx_len) {
+ if(dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ if(pktq_n_pkts_tot(&bus->txq) >= bus->dhd->conf->deferred_tx_len &&
+ dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ }
+ } else {
+ int chan = SDPCM_DATA_CHANNEL;
+
+#ifdef SDTEST
+ chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
+#endif
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Otherwise, send it now */
+ BUS_WAKE(bus);
+ /* Make sure back plane ht clk is on, no pending allowed */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+ ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE);
+
+ if (ret != BCME_OK)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ }
+
+ return ret;
+}
+
+/* align packet data pointer and packet length to n-byte boundary, process packet headers,
+ * a new packet may be allocated if there is not enough head and/or tail from for padding.
+ * the caller is responsible for updating the glom size in the head packet (when glom is
+ * used)
+ *
+ * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter
+ * is taken in tx glom mode only
+ *
+ * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment
+ * padding, NULL if not needed, the caller is responsible for freeing the new packet
+ *
+ * return: positive value - length of the packet, including head and tail padding
+ * negative value - errors
+ */
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+ int prev_chain_total_len, bool last_chained_pkt,
+ int *pad_pkt_len, void **new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ , int first_frame
+#endif
+)
+{
+ osl_t *osh;
+ uint8 *frame;
+ int pkt_len;
+ int modulo;
+ int head_padding;
+ int tail_padding = 0;
+ uint32 swheader;
+ uint32 swhdr_offset;
+ bool alloc_new_pkt = FALSE;
+ uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+#ifdef PKT_STATICS
+ uint16 len;
+#endif
+
+ *new_pkt = NULL;
+ osh = bus->dhd->osh;
+
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+
+ /* Add space for the SDPCM hardware/software headers */
+ PKTPUSH(osh, pkt, sdpcm_hdrlen);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+ pkt_len = (uint16)PKTLEN(osh, pkt);
+
+#ifdef PKT_STATICS
+ len = (uint16)PKTLEN(osh, pkt);
+ switch(chan) {
+ case SDPCM_CONTROL_CHANNEL:
+ bus->tx_statics.ctrl_count++;
+ bus->tx_statics.ctrl_size += len;
+ break;
+ case SDPCM_DATA_CHANNEL:
+ bus->tx_statics.data_count++;
+ bus->tx_statics.data_size += len;
+ break;
+ case SDPCM_GLOM_CHANNEL:
+ bus->tx_statics.glom_count++;
+ bus->tx_statics.glom_size += len;
+ break;
+ case SDPCM_EVENT_CHANNEL:
+ bus->tx_statics.event_count++;
+ bus->tx_statics.event_size += len;
+ break;
+ case SDPCM_TEST_CHANNEL:
+ bus->tx_statics.test_count++;
+ bus->tx_statics.test_size += len;
+ break;
+
+ default:
+ break;
+ }
+#endif /* PKT_STATICS */
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
+ tx_packets[PKTPRIO(pkt)]++;
+#endif /* DHD_DEBUG */
+
+ /* align the data pointer, allocate a new packet if there is not enough space (new
+ * packet data pointer will be aligned thus no padding will be needed)
+ */
+ head_padding = (uintptr)frame % DHD_SDALIGN;
+ if (PKTHEADROOM(osh, pkt) < head_padding) {
+ head_padding = 0;
+ alloc_new_pkt = TRUE;
+ } else {
+ uint cur_chain_total_len;
+ int chain_tail_padding = 0;
+
+ /* All packets need to be aligned by DHD_SDALIGN */
+ modulo = (pkt_len + head_padding) % DHD_SDALIGN;
+ tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+
+ /* Total pkt chain length needs to be aligned by block size,
+ * unless it is a single pkt chain with total length less than one block size,
+ * which we prefer sending by byte mode.
+ *
+ * Do the chain alignment here if
+ * 1. This is the last pkt of the chain of multiple pkts or a single pkt.
+ * 2-1. This chain is of multiple pkts, or
+ * 2-2. This is a single pkt whose size is longer than one block size.
+ */
+ cur_chain_total_len = prev_chain_total_len +
+ (head_padding + pkt_len + tail_padding);
+ if (last_chained_pkt && bus->blocksize != 0 &&
+ (cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+ modulo = cur_chain_total_len % bus->blocksize;
+ chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+ }
+
+#ifdef DHDENABLE_TAILPAD
+ if (PKTTAILROOM(osh, pkt) < tail_padding) {
+ /* We don't have tail room to align by DHD_SDALIGN */
+ alloc_new_pkt = TRUE;
+ bus->tx_tailpad_pktget++;
+ } else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) {
+ /* We have tail room for tail_padding of this pkt itself, but not for
+ * total pkt chain alignment by block size.
+ * Use the padding packet to avoid memory copy if applicable,
+ * otherwise, just allocate a new pkt.
+ */
+ if (bus->pad_pkt) {
+ *pad_pkt_len = chain_tail_padding;
+ bus->tx_tailpad_chain++;
+ } else {
+ alloc_new_pkt = TRUE;
+ bus->tx_tailpad_pktget++;
+ }
+ } else
+ /* This last pkt's tailroom is sufficient to hold both tail_padding
+ * of the pkt itself and chain_tail_padding of total pkt chain
+ */
+#endif /* DHDENABLE_TAILPAD */
+ tail_padding += chain_tail_padding;
+ }
+
+ DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n",
+ __FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len));
+
+ if (alloc_new_pkt) {
+ void *tmp_pkt;
+ int newpkt_size;
+ int cur_total_len;
+
+ ASSERT(*pad_pkt_len == 0);
+
+ DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__));
+
+ /* head pointer is aligned now, no padding needed */
+ head_padding = 0;
+
+ /* update the tail padding as it depends on the head padding, since a new packet is
+ * allocated, the head padding is non longer needed and packet length is chagned
+ */
+
+ cur_total_len = prev_chain_total_len + pkt_len;
+ if (last_chained_pkt && bus->blocksize != 0 &&
+ (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+ modulo = cur_total_len % bus->blocksize;
+ tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+ } else {
+ modulo = pkt_len % DHD_SDALIGN;
+ tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+ }
+
+ newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN;
+ bus->dhd->tx_realloc++;
+ tmp_pkt = PKTGET(osh, newpkt_size, TRUE);
+ if (tmp_pkt == NULL) {
+ DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size));
+ return BCME_NOMEM;
+ }
+ PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt));
+ *new_pkt = tmp_pkt;
+ pkt = tmp_pkt;
+ }
+
+ if (head_padding)
+ PKTPUSH(osh, pkt, head_padding);
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+ bzero(frame, head_padding + sdpcm_hdrlen);
+ pkt_len = (uint16)PKTLEN(osh, pkt);
+
+ /* the header has the followming format
+ * 4-byte HW frame tag: length, ~length (for glom this is the total length)
+ *
+ * 8-byte HW extesion flags (glom mode only) as the following:
+ * 2-byte packet length, excluding HW tag and padding
+ * 2-byte frame channel and frame flags (e.g. next frame following)
+ * 2-byte header length
+ * 2-byte tail padding size
+ *
+ * 8-byte SW frame tags as the following
+ * 4-byte flags: host tx seq, channel, data offset
+ * 4-byte flags: TBD
+ */
+
+ swhdr_offset = SDPCM_FRAMETAG_LEN;
+
+ /* hardware frame tag:
+ *
+ * in tx-glom mode, dongle only checks the hardware frame tag in the first
+ * packet and sees it as the total lenght of the glom (including tail padding),
+ * for each packet in the glom, the packet length needs to be updated, (see
+ * below PKTSETLEN)
+ *
+ * in non tx-glom mode, PKTLEN still need to include tail padding as to be
+ * referred to in sdioh_request_buffer(). The tail length will be excluded in
+ * dhdsdio_txpkt_postprocess().
+ */
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ if (bus->dhd->conf->txglom_bucket_size)
+ tail_padding = 0;
+#endif
+ *(uint16*)frame = (uint16)htol16(pkt_len);
+ *(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
+ pkt_len += tail_padding;
+
+ /* hardware extesion flags */
+ if (bus->txglom_enable) {
+ uint32 hwheader1;
+ uint32 hwheader2;
+#ifdef BCMSDIOH_TXGLOM_EXT
+ uint32 act_len = pkt_len - tail_padding;
+ uint32 real_pad = 0;
+ if(bus->dhd->conf->txglom_ext && !last_chained_pkt) {
+ tail_padding = 0;
+ if(first_frame == 0) {
+ // first pkt, add pad to bucket size - recv offset
+ pkt_len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET;
+ } else {
+ // add pad to bucket size
+ pkt_len = bus->dhd->conf->txglom_bucket_size;
+ }
+ swhdr_offset += SDPCM_HWEXT_LEN;
+ hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (last_chained_pkt << 24);
+ hwheader2 = (pkt_len - act_len) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ real_pad = pkt_len - act_len;
+
+ if (PKTTAILROOM(osh, pkt) < real_pad) {
+ DHD_INFO(("%s : insufficient tailroom %d for %d real_pad\n",
+ __func__, (int)PKTTAILROOM(osh, pkt), real_pad));
+ if (PKTPADTAILROOM(osh, pkt, real_pad)) {
+ DHD_ERROR(("CHK1: padding error size %d\n", real_pad));
+ } else
+ frame = (uint8 *)PKTDATA(osh, pkt);
+ }
+ } else
+#endif
+ {
+ swhdr_offset += SDPCM_HWEXT_LEN;
+ hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+ (last_chained_pkt << 24);
+ hwheader2 = (tail_padding) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ }
+ }
+ PKTSETLEN((osh), (pkt), (pkt_len));
+
+ /* software frame tags */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | (txseq % SDPCM_SEQUENCE_WRAP) |
+ (((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + swhdr_offset);
+ htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader));
+
+ return pkt_len;
+}
+
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt)
+{
+ osl_t *osh;
+ uint8 *frame;
+ int data_offset;
+ int tail_padding;
+ int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0);
+
+ (void)osh;
+ osh = bus->dhd->osh;
+
+ /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ DHD_INFO(("%s PKTLEN before postprocess %d",
+ __FUNCTION__, PKTLEN(osh, pkt)));
+
+ /* PKTLEN still includes tail_padding, so exclude it.
+ * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+ */
+ if (bus->txglom_enable) {
+ /* txglom pkts have tail_padding length in HW ext header */
+ tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+ PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+ DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+ tail_padding, PKTLEN(osh, pkt)));
+ } else {
+ /* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+ * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+ * have the field for the total length of the chain.
+ */
+ PKTSETLEN(osh, pkt, *(uint16*)frame);
+ DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+ *(uint16*)frame, PKTLEN(osh, pkt)));
+ }
+
+ data_offset = ltoh32_ua(frame + swhdr_offset);
+ data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+ /* Get rid of sdpcm header + head_padding */
+ PKTPULL(osh, pkt, data_offset);
+
+ DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+ __FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+ return BCME_OK;
+}
+
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
+{
+ int i;
+ int ret = 0;
+ osl_t *osh;
+ bcmsdh_info_t *sdh;
+ void *pkt = NULL;
+ void *pkt_chain;
+ int total_len = 0;
+ void *head_pkt = NULL;
+ void *prev_pkt = NULL;
+ int pad_pkt_len = 0;
+ int new_pkt_num = 0;
+ void *new_pkts[MAX_TX_PKTCHAIN_CNT];
+ bool wlfc_enabled = FALSE;
+
+ if (bus->dhd->dongle_reset)
+ return BCME_NOTREADY;
+
+ if (num_pkt <= 0)
+ return BCME_BADARG;
+
+ sdh = bus->sdh;
+ osh = bus->dhd->osh;
+ /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */
+ new_pkts[0] = NULL;
+
+ for (i = 0; i < num_pkt; i++) {
+ int pkt_len;
+ bool last_pkt;
+ void *new_pkt = NULL;
+
+ pkt = pkts[i];
+ ASSERT(pkt);
+ last_pkt = (i == num_pkt - 1);
+ pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
+ total_len, last_pkt, &pad_pkt_len, &new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ , i
+#endif
+ );
+ if (pkt_len <= 0)
+ goto done;
+ if (new_pkt) {
+ pkt = new_pkt;
+ new_pkts[new_pkt_num++] = new_pkt;
+ }
+ total_len += pkt_len;
+
+ PKTSETNEXT(osh, pkt, NULL);
+ /* insert the packet into the list */
+ head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt);
+ prev_pkt = pkt;
+
+ }
+
+ /* Update the HW frame tag (total length) in the first pkt of the glom */
+ if (bus->txglom_enable) {
+ uint8 *frame;
+
+ total_len += pad_pkt_len;
+ frame = (uint8*)PKTDATA(osh, head_pkt);
+ *(uint16*)frame = (uint16)htol16(total_len);
+ *(((uint16*)frame) + 1) = (uint16)htol16(~total_len);
+
+ }
+
+#ifdef DHDENABLE_TAILPAD
+ /* if a padding packet if needed, insert it to the end of the link list */
+ if (pad_pkt_len) {
+ PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len);
+ PKTSETNEXT(osh, pkt, bus->pad_pkt);
+ }
+#endif /* DHDENABLE_TAILPAD */
+
+ /* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet
+ * parameter is not NULL, for non packet chian we pass NULL pkt pointer
+ * so it will take the aligned length and buffer pointer.
+ */
+ pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+#ifdef TPUT_MONITOR
+ if ((bus->dhd->conf->data_drop_mode == TXPKT_DROP) && (total_len > 500))
+ ret = BCME_OK;
+ else
+#endif
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+ if (ret == BCME_OK)
+ bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP;
+
+ /* if a padding packet was needed, remove it from the link list as it not a data pkt */
+ if (pad_pkt_len && pkt)
+ PKTSETNEXT(osh, pkt, NULL);
+
+done:
+ pkt = head_pkt;
+ while (pkt) {
+ void *pkt_next = PKTNEXT(osh, pkt);
+ PKTSETNEXT(osh, pkt, NULL);
+ dhdsdio_txpkt_postprocess(bus, pkt);
+ pkt = pkt_next;
+ }
+
+ /* new packets might be allocated due to insufficient room for padding, but we
+ * still have to indicate the original packets to upper layer
+ */
+ for (i = 0; i < num_pkt; i++) {
+ pkt = pkts[i];
+ wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+ if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) {
+ wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) !=
+ WLFC_UNSUPPORTED);
+ }
+#endif /* PROP_TXSTATUS */
+ if (!wlfc_enabled) {
+ PKTSETNEXT(osh, pkt, NULL);
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+ }
+ }
+
+ for (i = 0; i < new_pkt_num; i++)
+ PKTFREE(osh, new_pkts[i], TRUE);
+
+ return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+ uint cnt = 0;
+ uint8 tx_prec_map;
+ uint16 txpktqlen = 0;
+ uint32 intstatus = 0;
+ uint retries = 0;
+ osl_t *osh;
+ dhd_pub_t *dhd = bus->dhd;
+ sdpcmd_regs_t *regs = bus->regs;
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DHD_PKTDUMP_TOFW)
+ uint8 *pktdata;
+ struct ether_header *eh;
+#ifdef BDC
+ struct bdc_header *bdc_header;
+ uint8 data_offset;
+#endif
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ osh = dhd->osh;
+ tx_prec_map = ~bus->flowcontrol;
+#ifdef DHD_LOSSLESS_ROAMING
+ tx_prec_map &= dhd->dequeue_prec_map;
+#endif /* DHD_LOSSLESS_ROAMING */
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
+ int i;
+ int num_pkt = 1;
+ void *pkts[MAX_TX_PKTCHAIN_CNT];
+ int prec_out;
+ uint datalen = 0;
+
+ dhd_os_sdlock_txq(bus->dhd);
+ if (bus->txglom_enable) {
+ uint32 glomlimit = (uint32)bus->txglomsize;
+#if defined(BCMSDIOH_STD)
+ if (bus->blocksize == 64) {
+ glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM);
+ }
+#endif /* BCMSDIOH_STD */
+ num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit);
+ num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
+ }
+ num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
+ for (i = 0; i < num_pkt; i++) {
+ pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
+ if (!pkts[i]) {
+ DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n",
+ __FUNCTION__));
+ ASSERT(0);
+ break;
+ }
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DHD_PKTDUMP_TOFW)
+ pktdata = (uint8 *)PKTDATA(osh, pkts[i]);
+#ifdef BDC
+ /* Skip BDC header */
+ bdc_header = (struct bdc_header *)pktdata;
+ data_offset = bdc_header->dataOffset;
+ pktdata += BDC_HEADER_LEN + (data_offset << 2);
+#endif
+ eh = (struct ether_header *)pktdata;
+#ifdef DHD_LOSSLESS_ROAMING
+ if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ uint8 prio = (uint8)PKTPRIO(pkts[i]);
+
+ /* Restore to original priority for 802.1X packet */
+ if (prio == PRIO_8021D_NC) {
+ PKTSETPRIO(pkts[i], dhd->prio_8021x);
+#ifdef BDC
+ /* Restore to original priority in BDC header */
+ bdc_header->priority =
+ (dhd->prio_8021x & BDC_PRIORITY_MASK);
+#endif
+ }
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+#ifdef DHD_PKTDUMP_TOFW
+ dhd_dump_pkt(bus->dhd, BDC_GET_IF_IDX(bdc_header), pktdata,
+ (uint32)PKTLEN(bus->dhd->osh, pkts[i]), TRUE, NULL, NULL);
+#endif
+#endif /* DHD_LOSSLESS_ROAMING || DHD_8021X_DUMP */
+ if (!bus->dhd->conf->orphan_move)
+ PKTORPHAN(pkts[i], bus->dhd->conf->tsq);
+ datalen += PKTLEN(osh, pkts[i]);
+ }
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if (i == 0)
+ break;
+ if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK)
+ dhd->tx_errors++;
+ else {
+ dhd->dstats.tx_bytes += datalen;
+ bus->txglomframes++;
+ bus->txglompkts += num_pkt;
+#ifdef PKT_STATICS
+ bus->tx_statics.glom_cnt_us[num_pkt-1] =
+ (bus->tx_statics.glom_cnt[num_pkt-1]*bus->tx_statics.glom_cnt_us[num_pkt-1]
+ + bcmsdh_get_spend_time(bus->sdh))/(bus->tx_statics.glom_cnt[num_pkt-1] + 1);
+#endif
+ }
+ cnt += i;
+#ifdef PKT_STATICS
+ if (num_pkt) {
+ bus->tx_statics.glom_cnt[num_pkt-1]++;
+ if (num_pkt > bus->tx_statics.glom_max)
+ bus->tx_statics.glom_max = num_pkt;
+ }
+#endif
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt)
+ {
+ /* Check device status, signal pending interrupt */
+ R_SDREG(intstatus, &regs->intstatus, retries);
+ bus->f2txdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = TRUE;
+ }
+
+ }
+
+ if (dhd_doflow) {
+ dhd_os_sdlock_txq(bus->dhd);
+ txpktqlen = pktq_n_pkts_tot(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+ }
+
+ /* Do flow-control if needed */
+ if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) {
+ bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ }
+
+ return cnt;
+}
+
+static void
+dhdsdio_sendpendctl(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ int ret;
+ uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
+
+ if (bus->txglom_enable)
+ frame_seq += SDPCM_HWEXT_LEN;
+
+ if (*frame_seq != bus->tx_seq) {
+ DHD_INFO(("%s IOCTL frame seq lag detected!"
+ " frm_seq:%d != bus->tx_seq:%d, corrected\n",
+ __FUNCTION__, *frame_seq, bus->tx_seq));
+ *frame_seq = bus->tx_seq;
+ }
+
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+ NULL, NULL, NULL, 1);
+ if (ret == BCME_OK)
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+ bus->ctrl_frame_stat = FALSE;
+ dhd_wait_event_wakeup(bus->dhd);
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ static int err_nodevice = 0;
+ uint8 *frame;
+ uint16 len;
+ uint32 swheader;
+ uint8 doff = 0;
+ int ret = -1;
+ uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - sdpcm_hdrlen;
+ len = (msglen += sdpcm_hdrlen);
+
+ /* Add alignment padding (optional for ctl frames) */
+ if (dhd_alignctl) {
+ if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ bzero(frame, doff + sdpcm_hdrlen);
+ }
+ ASSERT(doff < DHD_SDALIGN);
+ }
+ doff += sdpcm_hdrlen;
+
+#ifndef BCMSPI
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+#endif /* BCMSPI */
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (len & (ALIGNMENT - 1)))
+ len = ROUNDUP(len, ALIGNMENT);
+
+ ASSERT(ISALIGNED((uintptr)frame, 2));
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ dhd_os_sdlock(bus->dhd);
+ if (bus->dhd->conf->txctl_tmo_fix > 0 && !TXCTLOK(bus)) {
+ bus->ctrl_wait = TRUE;
+ dhd_os_sdunlock(bus->dhd);
+ wait_event_interruptible_timeout(bus->ctrl_tx_wait, TXCTLOK(bus),
+ msecs_to_jiffies(bus->dhd->conf->txctl_tmo_fix));
+ dhd_os_sdlock(bus->dhd);
+ bus->ctrl_wait = FALSE;
+ }
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(uint16*)frame = htol16((uint16)msglen);
+ *(((uint16*)frame) + 1) = htol16(~msglen);
+
+ if (bus->txglom_enable) {
+ uint32 hwheader1, hwheader2;
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq
+ | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN
+ + SDPCM_HWEXT_LEN + sizeof(swheader));
+
+ hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24);
+ hwheader2 = (len - (msglen)) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+
+ *(uint16*)frame = htol16(len);
+ *(((uint16*)frame) + 1) = htol16(~(len));
+ } else {
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+ }
+
+ if (!TXCTLOK(bus))
+ {
+ DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq));
+ bus->ctrl_frame_stat = TRUE;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+#if defined(NDIS)
+ dhd_os_sdunlock(bus->dhd);
+ dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+ dhd_os_sdlock(bus->dhd);
+#else
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ if (bus->ctrl_frame_stat) {
+ dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+ }
+#endif /* NDIS */
+
+ if (bus->ctrl_frame_stat == FALSE) {
+ DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+ ret = 0;
+ } else {
+ bus->dhd->txcnt_timeout++;
+ if (!bus->dhd->hang_was_sent) {
+#ifdef CUSTOMER_HW4_DEBUG
+ /* XXX Add Debug code for find root cause from CSP:565333 */
+ uint32 status, retry = 0;
+ R_SDREG(status, &bus->regs->intstatus, retry);
+ DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n",
+ __FUNCTION__, status));
+ DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate));
+#endif /* CUSTOMER_HW4_DEBUG */
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+ __FUNCTION__, bus->dhd->txcnt_timeout));
+#ifdef BCMSDIO_RXLIM_POST
+ DHD_ERROR(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n",
+ __FUNCTION__,
+ bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr));
+#endif /* BCMSDIO_RXLIM_POST */
+ }
+#ifdef DHD_FW_COREDUMP
+ /* Collect socram dump */
+ if ((bus->dhd->memdump_enabled) &&
+ (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)) {
+ /* collect core dump */
+ bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX;
+ dhd_os_sdunlock(bus->dhd);
+ dhd_bus_mem_dump(bus->dhd);
+ dhd_os_sdlock(bus->dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+ ret = -1;
+ bus->ctrl_frame_stat = FALSE;
+ goto done;
+ }
+ }
+
+ bus->dhd->txcnt_timeout = 0;
+ bus->ctrl_frame_stat = TRUE;
+
+ if (ret == -1) {
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+#ifdef PKT_STATICS
+ bus->tx_statics.ctrl_count++;
+ bus->tx_statics.ctrl_size += len;
+#endif
+ ret = dhd_bcmsdh_send_buffer(bus, frame, len);
+ }
+ bus->ctrl_frame_stat = FALSE;
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ /* XXX Need to validate return code (ranges) */
+ if (ret)
+ bus->dhd->tx_ctlerrs++;
+ else
+ bus->dhd->tx_ctlpkts++;
+
+ if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) {
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ if (g_pm_control == TRUE) {
+ return -BCME_ERROR;
+ } else {
+ return -ETIMEDOUT;
+ }
+#else
+ return -ETIMEDOUT;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+ }
+ if (ret == BCME_NODEVICE)
+ err_nodevice++;
+ else
+ err_nodevice = 0;
+
+ return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ static uint cnt = 0;
+ uint max_rxcnt;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
+
+ dhd_os_sdlock(bus->dhd);
+ rxlen = bus->rxlen;
+ bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+ bus->rxlen = 0;
+ dhd_os_sdunlock(bus->dhd);
+
+ if (bus->dhd->conf->ctrl_resched > 0 && !rxlen && timeleft == 0) {
+ cnt++;
+ if (cnt <= bus->dhd->conf->ctrl_resched) {
+ uint32 status, retry = 0;
+ R_SDREG(status, &bus->regs->intstatus, retry);
+ if ((status & I_HMB_HOST_INT) || PKT_AVAILABLE(bus, status)) {
+ DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, status=0x%x\n",
+ __FUNCTION__, cnt, status));
+ bus->ipend = TRUE;
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
+
+ dhd_os_sdlock(bus->dhd);
+ rxlen = bus->rxlen;
+ bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+ bus->rxlen = 0;
+ dhd_os_sdunlock(bus->dhd);
+ }
+ }
+ } else {
+ cnt = 0;
+ }
+
+ if (rxlen) {
+ DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+ __FUNCTION__, rxlen, msglen));
+ } else {
+ if (timeleft == 0) {
+#ifdef DHD_DEBUG
+ uint32 status, retry = 0;
+ R_SDREG(status, &bus->regs->intstatus, retry);
+ DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n",
+ __FUNCTION__, status));
+#else
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#endif /* DHD_DEBUG */
+ if (!bus->dhd->dongle_trap_occured) {
+#ifdef DHD_FW_COREDUMP
+ bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
+#endif /* DHD_FW_COREDUMP */
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+ }
+ } else {
+ DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+ if (!bus->dhd->dongle_trap_occured) {
+#ifdef DHD_FW_COREDUMP
+ bus->dhd->memdump_type = DUMP_TYPE_RESUMED_UNKNOWN;
+#endif /* DHD_FW_COREDUMP */
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+ }
+ }
+#ifdef DHD_FW_COREDUMP
+ /* Dump the ram image */
+ if (bus->dhd->memdump_enabled && !bus->dhd->dongle_trap_occured)
+ dhdsdio_mem_dump(bus);
+#endif /* DHD_FW_COREDUMP */
+ }
+ if (timeleft == 0) {
+ if (rxlen == 0)
+ bus->dhd->rxcnt_timeout++;
+ DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__,
+ bus->dhd->rxcnt_timeout, rxlen));
+#ifdef DHD_FW_COREDUMP
+ /* collect socram dump */
+ if (bus->dhd->memdump_enabled) {
+ bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_RX;
+ dhd_bus_mem_dump(bus->dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+ } else {
+ bus->dhd->rxcnt_timeout = 0;
+ }
+
+ if (rxlen)
+ bus->dhd->rx_ctlpkts++;
+ else
+ bus->dhd->rx_ctlerrs++;
+
+ if (bus->dhd->conf->rxcnt_timeout)
+ max_rxcnt = bus->dhd->conf->rxcnt_timeout;
+ else
+ max_rxcnt = MAX_CNTL_RX_TIMEOUT;
+ if (bus->dhd->rxcnt_timeout >= max_rxcnt) {
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ if (g_pm_control == TRUE) {
+ return -BCME_ERROR;
+ } else {
+ return -ETIMEDOUT;
+ }
+#else
+ return -ETIMEDOUT;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+ }
+ if (bus->dhd->dongle_trap_occured)
+ return -EREMOTEIO;
+
+ return rxlen ? (int)rxlen : -EIO; /* XXX Returns EIO error */
+}
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+ IOV_POLLRATE,
+ IOV_SDREG,
+ IOV_SBREG,
+ IOV_SDCIS,
+#ifdef DHD_BUS_MEM_ACCESS
+ IOV_MEMBYTES,
+#endif /* DHD_BUS_MEM_ACCESS */
+ IOV_RAMSIZE,
+ IOV_RAMSTART,
+#ifdef DHD_DEBUG
+ IOV_CHECKDIED,
+ IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+ IOV_SET_DOWNLOAD_STATE,
+ IOV_SOCRAM_STATE,
+ IOV_FORCEEVEN,
+ IOV_SDIOD_DRIVE,
+ IOV_READAHEAD,
+ IOV_SDRXCHAIN,
+ IOV_ALIGNCTL,
+ IOV_SDALIGN,
+ IOV_DEVRESET,
+ IOV_CPU,
+#if defined(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL)
+ IOV_WATERMARK,
+ IOV_MESBUSYCTRL,
+#endif /* USE_SDIOFIFO_IOVAR */
+#ifdef BCMINTERNAL
+ IOV_SDRESET,
+ IOV_SDABORT,
+ IOV_FIRSTREAD,
+ IOV_TSTOPH,
+ IOV_RETRYDATA,
+ IOV_CHECKFIFO,
+ IOV_DOFLOW,
+ IOV_SDF2,
+ IOV_CLOCKPOLL,
+ IOV_MAXRAMSIZE,
+ IOV_SIALL,
+#endif /* BCMINTERNAL */
+#ifdef SDTEST
+ IOV_PKTGEN,
+ IOV_EXTLOOP,
+#endif /* SDTEST */
+ IOV_SPROM,
+ IOV_TXBOUND,
+ IOV_RXBOUND,
+ IOV_TXMINMAX,
+ IOV_IDLETIME,
+ IOV_IDLECLOCK,
+ IOV_SD1IDLE,
+ IOV_SLEEP,
+ IOV_DONGLEISOLATION,
+ IOV_KSO,
+ IOV_DEVSLEEP,
+ IOV_DEVCAP,
+ IOV_VARS,
+#ifdef SOFTAP
+ IOV_FWPATH,
+#endif
+ IOV_TXGLOMSIZE,
+ IOV_TXGLOMMODE,
+ IOV_HANGREPORT,
+ IOV_TXINRX_THRES,
+ IOV_SDIO_SUSPEND
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ IOV_GDB_SERVER, /**< starts gdb server on given interface */
+#endif /* DEBUGGER || DHD_DSCOPE */
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+ {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
+ {"sleep", IOV_SLEEP, 0, 0, IOVT_BOOL, 0 },
+ {"pollrate", IOV_POLLRATE, 0, 0, IOVT_UINT32, 0 },
+ {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
+ {"idleclock", IOV_IDLECLOCK, 0, 0, IOVT_INT32, 0 },
+ {"sd1idle", IOV_SD1IDLE, 0, 0, IOVT_BOOL, 0 },
+#ifdef DHD_BUS_MEM_ACCESS
+ {"membytes", IOV_MEMBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int) },
+#endif /* DHD_BUS_MEM_ACCESS */
+ {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
+ {"socram_state", IOV_SOCRAM_STATE, 0, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
+ {"sdiod_drive", IOV_SDIOD_DRIVE, 0, 0, IOVT_UINT32, 0 },
+ {"readahead", IOV_READAHEAD, 0, 0, IOVT_BOOL, 0 },
+ {"sdrxchain", IOV_SDRXCHAIN, 0, 0, IOVT_BOOL, 0 },
+ {"alignctl", IOV_ALIGNCTL, 0, 0, IOVT_BOOL, 0 },
+ {"sdalign", IOV_SDALIGN, 0, 0, IOVT_BOOL, 0 },
+ {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"sdreg", IOV_SDREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_cis", IOV_SDCIS, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"forcealign", IOV_FORCEEVEN, 0, 0, IOVT_BOOL, 0 },
+ {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
+ {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
+ {"txminmax", IOV_TXMINMAX, 0, 0, IOVT_UINT32, 0 },
+ {"cpu", IOV_CPU, 0, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"checkdied", IOV_CHECKDIED, 0, 0, IOVT_BUFFER, 0 },
+ {"serial", IOV_SERIALCONS, 0, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+#ifdef BCMINTERNAL
+ {"siregall", IOV_SIALL, 0, 0, IOVT_UINT32, 0 },
+#endif /* BCMINTERNAL */
+#endif /* DHD_DEBUG */
+#if defined(BCMINTERNAL) || defined(DHD_SPROM)
+ {"sprom", IOV_SPROM, 0, 0, IOVT_BUFFER, 2 * sizeof(int) },
+#endif /* BCMINTERNAL || DHD_SPROM */
+#ifdef SDTEST
+ {"extloop", IOV_EXTLOOP, 0, 0, IOVT_BOOL, 0 },
+ {"pktgen", IOV_PKTGEN, 0, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+#if defined(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL)
+ {"watermark", IOV_WATERMARK, 0, 0, IOVT_UINT32, 0 },
+ {"mesbusyctrl", IOV_MESBUSYCTRL, 0, 0, IOVT_UINT32, 0 },
+#endif /* USE_SDIOFIFO_IOVAR */
+#ifdef BCMINTERNAL
+ {"firstread", IOV_FIRSTREAD, 0, 0, IOVT_UINT32, 0 }, /* INTERNAL */
+ {"tstoph", IOV_TSTOPH, 0, 0, IOVT_BOOL, 0 },
+ {"retrydata", IOV_RETRYDATA, 0, 0, IOVT_BOOL, 0 },
+ {"checkfifo", IOV_CHECKFIFO, 0, 0, IOVT_BOOL, 0 },
+ {"sdf2", IOV_SDF2, 0, 0, IOVT_UINT32, 0 },
+ {"sdreset", IOV_SDRESET, 0, 0, IOVT_VOID, 0 },
+ {"sdabort", IOV_SDABORT, 0, 0, IOVT_UINT32, 0 },
+ {"doflow", IOV_DOFLOW, 0, 0, IOVT_BOOL, 0 },
+ {"clockpoll", IOV_CLOCKPOLL, 0, 0, IOVT_BOOL, 0 },
+ {"maxsocram", IOV_MAXRAMSIZE, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_DEBUG
+ {"serial", IOV_SERIALCONS, 0, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+#endif /* BCMINTERNAL */
+ {"devcap", IOV_DEVCAP, 0, 0, IOVT_UINT32, 0 },
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
+ {"kso", IOV_KSO, 0, 0, IOVT_UINT32, 0 },
+ {"devsleep", IOV_DEVSLEEP, 0, 0, IOVT_UINT32, 0 },
+#ifdef SOFTAP
+ {"fwpath", IOV_FWPATH, 0, 0, IOVT_BUFFER, 0 },
+#endif
+ {"txglomsize", IOV_TXGLOMSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
+ {"txinrx_thres", IOV_TXINRX_THRES, 0, 0, IOVT_INT32, 0 },
+ {"sdio_suspend", IOV_SDIO_SUSPEND, 0, 0, IOVT_UINT32, 0 },
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
+#endif /* DEBUGGER || DHD_DSCOPE */
+ {NULL, 0, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+ uint q1, q2;
+
+ if (!div) {
+ bcm_bprintf(strbuf, "%s N/A", desc);
+ } else {
+ q1 = num / div;
+ q2 = (100 * (num - (q1 * div))) / div;
+ bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+ }
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_bus_t *bus = dhdp->bus;
+#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKE_EVENT_STATUS)
+ int i;
+#endif
+
+ bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+ bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+ bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+ bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
+ bus->fcstate, pktq_n_pkts_tot(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+ bus->rxlen, bus->rx_seq);
+ bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
+ bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+
+#ifdef DHD_WAKE_STATUS
+ bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
+ bcmsdh_get_total_wake(bus->sdh), bus->wake_counts.rxwake,
+ bus->wake_counts.rcwake);
+#ifdef DHD_WAKE_RX_STATUS
+ bcm_bprintf(strbuf, " unicast %u multicast %u broadcast %u arp %u\n",
+ bus->wake_counts.rx_ucast, bus->wake_counts.rx_mcast,
+ bus->wake_counts.rx_bcast, bus->wake_counts.rx_arp);
+ bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
+ bus->wake_counts.rx_multi_ipv4, bus->wake_counts.rx_multi_ipv6,
+ bus->wake_counts.rx_icmpv6, bus->wake_counts.rx_multi_other);
+ bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
+ bus->wake_counts.rx_icmpv6_ra, bus->wake_counts.rx_icmpv6_na,
+ bus->wake_counts.rx_icmpv6_ns);
+#endif /* DHD_WAKE_RX_STATUS */
+#ifdef DHD_WAKE_EVENT_STATUS
+ for (i = 0; i < WLC_E_LAST; i++)
+ if (bus->wake_counts.rc_event[i] != 0)
+ bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(i),
+ bus->wake_counts.rc_event[i]);
+ bcm_bprintf(strbuf, "\n");
+#endif /* DHD_WAKE_EVENT_STATUS */
+#endif /* DHD_WAKE_STATUS */
+
+ bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n",
+ bus->pollrate, bus->pollcnt, bus->regfails);
+
+ bcm_bprintf(strbuf, "\nAdditional counters:\n");
+#ifdef DHDENABLE_TAILPAD
+ bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n",
+ bus->tx_tailpad_chain, bus->tx_tailpad_pktget);
+#endif /* DHDENABLE_TAILPAD */
+ bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
+ bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+ bus->rxc_errors);
+ bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n",
+ bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+ bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n",
+ bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+ bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n",
+ bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+ bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n",
+ (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+ bus->f2txdata, bus->f1regdata);
+ {
+ dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+ bus->dhd->rx_packets);
+ dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+ (bus->f2txdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+ bcm_bprintf(strbuf, "\n\n");
+ }
+
+#ifdef SDTEST
+ /* XXX Add new stats, include pktq len */
+ if (bus->pktgen_count) {
+ bcm_bprintf(strbuf, "pktgen config and count:\n");
+ bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n",
+ bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+ bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+ bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n",
+ bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+ }
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+ bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+ bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+ bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+ bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+ bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+#ifdef BCMINTERNAL
+ bcm_bprintf(strbuf, "tx_deferred %d, fc 0x%x\n", bus->tx_deferred, bus->flowcontrol);
+#ifdef DHD_DEBUG
+ {
+ int i;
+ bcm_bprintf(strbuf, "qcount: ");
+ for (i = 0; i < 8; i++)
+ bcm_bprintf(strbuf, " %d , ", qcount[i]);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "tx_packets: ");
+ for (i = 0; i < 8; i++)
+ bcm_bprintf(strbuf, " %d , ", tx_packets[i]);
+ bcm_bprintf(strbuf, "\n");
+ }
+#endif /* DHD_DEBUG */
+#endif /* BCMINTERNAL */
+ dhd_dump_pct(strbuf, "Tx: glom pct", (100 * bus->txglompkts), bus->dhd->tx_packets);
+ dhd_dump_pct(strbuf, ", pkts/glom", bus->txglompkts, bus->txglomframes);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "txglomframes %u, txglompkts %u\n", bus->txglomframes, bus->txglompkts);
+ bcm_bprintf(strbuf, "\n");
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+ bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+ bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+#ifdef DHDENABLE_TAILPAD
+ bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0;
+#endif /* DHDENABLE_TAILPAD */
+ bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+ bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+ bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+#ifdef BCMINTERNAL
+ bus->tx_deferred = bus->flowcontrol = 0;
+#endif
+ bus->txglomframes = bus->txglompkts = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+
+ pktgen.version = DHD_PKTGEN_VERSION;
+ pktgen.freq = bus->pktgen_freq;
+ pktgen.count = bus->pktgen_count;
+ pktgen.print = bus->pktgen_print;
+ pktgen.total = bus->pktgen_total;
+ pktgen.minlen = bus->pktgen_minlen;
+ pktgen.maxlen = bus->pktgen_maxlen;
+ pktgen.numsent = bus->pktgen_sent;
+ pktgen.numrcvd = bus->pktgen_rcvd;
+ pktgen.numfail = bus->pktgen_fail;
+ pktgen.mode = bus->pktgen_mode;
+ pktgen.stop = bus->pktgen_stop;
+
+ bcopy(&pktgen, arg, sizeof(pktgen));
+
+ return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+ uint oldcnt, oldmode;
+
+ bcopy(arg, &pktgen, sizeof(pktgen));
+ if (pktgen.version != DHD_PKTGEN_VERSION)
+ return BCME_BADARG;
+
+ oldcnt = bus->pktgen_count;
+ oldmode = bus->pktgen_mode;
+
+ bus->pktgen_freq = pktgen.freq;
+ bus->pktgen_count = pktgen.count;
+ bus->pktgen_print = pktgen.print;
+ bus->pktgen_total = pktgen.total;
+ bus->pktgen_minlen = pktgen.minlen;
+ bus->pktgen_maxlen = pktgen.maxlen;
+ bus->pktgen_mode = pktgen.mode;
+ bus->pktgen_stop = pktgen.stop;
+
+ bus->pktgen_tick = bus->pktgen_ptick = 0;
+#if defined(LINUX)
+ bus->pktgen_prev_time = jiffies;
+#endif /* LINUX */
+ bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+ bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+ /* Clear counts for a new pktgen (mode change, or was stopped) */
+ if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) {
+ bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0;
+ bus->pktgen_prev_rcvd = bus->pktgen_fail = 0;
+ }
+
+ return 0;
+}
+#endif /* SDTEST */
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint32 sdaddr;
+ uint dsize;
+ uint8 *pdata;
+
+ /* In remap mode, adjust address beyond socram and redirect
+ * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+ * is not backplane accessible
+ */
+ if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+ address -= bus->orig_ramsize;
+ address += SOCDEVRAM_BP_ADDR;
+ }
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+ __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+ (address & SBSDIO_SBWINDOW_MASK)));
+ if (dsize <= MAX_MEM_BUF) {
+ pdata = bus->membuf;
+ if (write)
+ memcpy(bus->membuf, data, dsize);
+ } else {
+ pdata = data;
+ }
+ if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, pdata, dsize))) {
+ DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+ break;
+ }
+ if (dsize <= MAX_MEM_BUF && !write)
+ memcpy(data, bus->membuf, dsize);
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ break;
+ }
+ sdaddr = 0;
+ dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+ DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+ bcmsdh_cur_sbwad(bus->sdh)));
+ }
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+ uint32 addr;
+ int rv, i;
+ uint32 shaddr = 0;
+
+ if (bus->sih == NULL) {
+ if (bus->dhd && bus->dhd->dongle_reset) {
+ DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__));
+ return BCME_NOTREADY;
+ } else {
+ ASSERT(bus->dhd);
+ ASSERT(bus->sih);
+ DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
+ /*
+ * If SR is not implemented in 43430 FW we should not adjust shaddr
+ * XXX Should be REMOVED after SR will be implemented in 43430 FW
+ */
+ if ((CHIPID(bus->sih->chip) == BCM43430_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) && !dhdsdio_sr_cap(bus))
+ bus->srmemsize = 0;
+
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+ i = 0;
+ do {
+ /* Read last word in memory to determine address of sdpcm_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+ return rv;
+
+ addr = ltoh32(addr);
+
+ DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+ if ((bus->srmemsize > 0) && (i++ == 0)) {
+ shaddr -= bus->srmemsize;
+ } else {
+ DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+ __FUNCTION__, addr));
+ return BCME_ERROR;
+ }
+ } else
+ break;
+ } while (i < 2);
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+#ifdef BCMSDIO_RXLIM_POST
+ if (sh->flags & SDPCM_SHARED_RXLIM_POST) {
+ if (bus->dhd->conf->rxlim_en)
+ bus->rxlim_en = !!sh->msgtrace_addr;
+ bus->rxlim_addr = sh->msgtrace_addr;
+ DHD_INFO(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n",
+ __FUNCTION__,
+ bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr));
+ sh->flags &= ~SDPCM_SHARED_RXLIM_POST;
+ } else {
+ bus->rxlim_en = 0;
+ DHD_INFO(("%s: FW has no rx limit post support\n", __FUNCTION__));
+ }
+#endif /* BCMSDIO_RXLIM_POST */
+
+#ifdef BCMSDIO_TXSEQ_SYNC
+ if (bus->dhd->conf->txseq_sync) {
+ sh->txseq_sync_addr = ltoh32(sh->txseq_sync_addr);
+ if (sh->flags & SDPCM_SHARED_TXSEQ_SYNC) {
+ uint8 val = 0;
+ DHD_INFO(("%s: TXSEQ_SYNC enabled in fw\n", __FUNCTION__));
+ if (0 == dhdsdio_membytes(bus, FALSE, sh->txseq_sync_addr, (uint8 *)&val, 1)) {
+ if (bus->tx_seq != val) {
+ DHD_INFO(("%s: Sync tx_seq from %d to %d\n",
+ __FUNCTION__, bus->tx_seq, val));
+ bus->tx_seq = val;
+ bus->tx_max = bus->tx_seq + 4;
+ }
+ }
+ sh->flags &= ~SDPCM_SHARED_TXSEQ_SYNC;
+ } else {
+ bus->dhd->conf->txseq_sync = FALSE;
+ }
+ }
+#endif /* BCMSDIO_TXSEQ_SYNC */
+
+ /*
+ * XXX - Allow a sdpcm_shared_t version mismatch between dhd structure
+ * version 1 and firmware structure version 3.
+ * The sdpcm_shared_t stucture fields used in this function are in the
+ * same positions in these two structure versions.
+ * For some chips in the FALCON release, the dhd driver is from the
+ * FALCON branch (sdpcm_shared_t structure version 1) and the firmware
+ * comes from the ROMTERM3 branch (sdpcm_shared_t structure version 1).
+ */
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+ return BCME_OK;
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+ "is different than sdpcm_shared version %d in dongle\n",
+ __FUNCTION__, SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX 192
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ if (!KSO_ENAB(bus))
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ }
+
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ /* Read the console buffer */
+ /* xxx this could optimize and read only the portion of the buffer needed, but
+ * it would also have to handle wrap-around.
+ */
+ addr = ltoh32(c->log.buf);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+#ifdef LOG_INTO_TCPDUMP
+ dhd_sendup_log(bus->dhd, line, n);
+#endif /* LOG_INTO_TCPDUMP */
+ }
+ }
+break2:
+
+ return BCME_OK;
+}
+#endif /* DHD_DEBUG */
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ char *console_buffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ sdpcm_shared_t l_sdpcm_shared;
+ struct bcmstrbuf strbuf;
+ uint32 console_ptr, console_size, console_index;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, i, addr;
+ int rv;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (DHD_NOCHECKDIED_ON())
+ return 0;
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if ((bcmerror = dhdsdio_readshared(bus, &l_sdpcm_shared)) < 0)
+ goto done;
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ l_sdpcm_shared.msgtrace_addr, l_sdpcm_shared.console_addr);
+
+ if ((l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (l_sdpcm_shared.assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ l_sdpcm_shared.assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (l_sdpcm_shared.assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ l_sdpcm_shared.assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", l_sdpcm_shared.assert_line);
+ }
+
+ if (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+ trap_t *tr = &bus->dhd->last_trap_info;
+ bus->dhd->dongle_trap_occured = TRUE;
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ l_sdpcm_shared.trap_addr,
+ (uint8*)tr, sizeof(trap_t))) < 0)
+ goto done;
+
+ bus->dongle_trap_addr = ltoh32(l_sdpcm_shared.trap_addr);
+
+ dhd_bus_dump_trap_info(bus, &strbuf);
+
+ addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+ goto printbuf;
+
+ addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_size, sizeof(console_size))) < 0)
+ goto printbuf;
+
+ addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_index, sizeof(console_index))) < 0)
+ goto printbuf;
+
+ console_ptr = ltoh32(console_ptr);
+ console_size = ltoh32(console_size);
+ console_index = ltoh32(console_index);
+
+ if (console_size > CONSOLE_BUFFER_MAX ||
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+ goto printbuf;
+
+ if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+ (uint8 *)console_buffer, console_size)) < 0)
+ goto printbuf;
+
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ ch = console_buffer[(console_index + i + n) % console_size];
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ /* Don't use DHD_ERROR macro since we print
+ * a lot of information quickly. The macro
+ * will truncate a lot of the printfs
+ */
+
+ if (dhd_msg_level & DHD_ERROR_VAL)
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+ }
+ }
+
+printbuf:
+ if (l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+ DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ }
+
+#if defined(DHD_FW_COREDUMP)
+ if (bus->dhd->memdump_enabled && (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP)) {
+ /* Mem dump to a file on device */
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
+ /* xxx this sdunlock has been put as a WAR here. We tried to come up
+ * with a better solution but with the current structure of sdlocks it is very
+ * unlikely to have a better fix for now. The better Rearch of sdio bus
+ * locking has been put up as a cleanup activity and a thorough
+ * code walkthrough is needed.
+ */
+ dhd_os_sdunlock(bus->dhd);
+ dhdsdio_mem_dump(bus);
+ dhd_os_sdlock(bus->dhd);
+#ifdef NDIS
+ /* Windows would like to crash and collect memory dump for analysis */
+ ASSERT(0 && "Dongle firmware died.");
+
+ /* For free drivers ASSERT will not bugcheck */
+ KeBugCheckEx(__LINE__, 0, 0, 0, 0);
+#endif
+ }
+#endif /* #if defined(DHD_FW_COREDUMP) */
+
+done:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+ if (console_buffer)
+ MFREE(bus->dhd->osh, console_buffer, console_size);
+
+ return bcmerror;
+}
+
+#if defined(DHD_FW_COREDUMP)
+int
+dhd_bus_mem_dump(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ if (dhdp->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__));
+ return 0;
+ }
+ return dhdsdio_mem_dump(bus);
+}
+
+int
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
+{
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdsdio_get_mem_dump(dhdp->bus);
+}
+
+static int
+dhdsdio_get_mem_dump(dhd_bus_t *bus)
+{
+ int ret = BCME_ERROR;
+ int size = bus->ramsize; /* Full mem size */
+ uint32 start = bus->dongle_ram_base; /* Start address */
+ uint read_size = 0; /* Read size of each iteration */
+ uint8 *p_buf = NULL, *databuf = NULL;
+
+ /* Get full mem size */
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!p_buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
+ __FUNCTION__, size));
+ return BCME_ERROR;
+ }
+
+ dhd_os_sdlock(bus->dhd);
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Read mem content */
+ DHD_ERROR(("Dump dongle memory\n"));
+ databuf = p_buf;
+ while (size) {
+ read_size = MIN(MEMBLOCK, size);
+ ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size);
+ if (ret) {
+ DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
+ ret = BCME_ERROR;
+ break;
+ }
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
+ }
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return ret;
+}
+
+static int
+dhdsdio_mem_dump(dhd_bus_t *bus)
+{
+ dhd_pub_t *dhdp;
+ int ret = BCME_ERROR;
+
+ dhdp = bus->dhd;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = dhdsdio_get_mem_dump(bus);
+ if (ret) {
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ /* schedule a work queue to perform actual memdump.
+ * dhd_mem_dump() performs the job
+ */
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+ /* soc_ram free handled in dhd_{free,clear} */
+ }
+
+ return ret;
+}
+#endif /* DHD_FW_COREDUMP */
+
+int
+dhd_socram_dump(dhd_bus_t * bus)
+{
+#if defined(DHD_FW_COREDUMP)
+ return (dhdsdio_mem_dump(bus));
+#else
+ return -1;
+#endif
+}
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->up &&
+ 1) {
+ bcmerror = BCME_NOTDOWN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+err:
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+ int int_val;
+ uint32 addr, data, uart_enab = 0;
+
+ addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data);
+ *bcmerror = 0;
+
+ bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+
+ if (!set)
+ return (int_val & uart_enab);
+ if (enable)
+ int_val |= uart_enab;
+ else
+ int_val &= ~uart_enab;
+ bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+
+ return (int_val & uart_enab);
+}
+#endif /* BCMINTERNAL */
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, uint plen, void *arg, uint len, uint val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ bool bool_val = 0;
+
+ DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ /* Some ioctls use the bus */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ /*
+ * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+ */
+ if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+ dhdsdio_clk_kso_iovar(bus, bool_val);
+ goto exit;
+ } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+#ifdef BCMINTERNAL
+ /* XXX: Temp for debugging devsleep */
+ if (int_val == 2) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ } else if (int_val == 3) {
+ bus->_slpauto = FALSE;
+ } else if (int_val == 4) {
+ bus->_slpauto = TRUE;
+ } else if (int_val == 5) {
+ bus->kso = TRUE;
+ } else if (int_val == 6) {
+ bus->kso = FALSE;
+ } else
+#endif
+ {
+ dhdsdio_clk_devsleep_iovar(bus, bool_val);
+ if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+ DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+ bus->dpc_sched));
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ }
+ }
+ goto exit;
+ }
+
+ /* Handle sleep stuff before any clock mucking */
+ if (vi->varid == IOV_SLEEP) {
+ if (IOV_ISSET(actionid)) {
+ bcmerror = dhdsdio_bussleep(bus, bool_val);
+ } else {
+ int_val = (int32)bus->sleeping;
+ bcopy(&int_val, arg, val_size);
+ }
+ goto exit;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ if (!bus->dhd->dongle_reset) {
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_INTR):
+ int_val = (int32)bus->intr;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_INTR):
+ bus->intr = bool_val;
+ bus->intdis = FALSE;
+ if (bus->dhd->up) {
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ // terence 20141207: enbale intdis
+ bus->intdis = TRUE;
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ }
+ break;
+
+ case IOV_GVAL(IOV_POLLRATE):
+ int_val = (int32)bus->pollrate;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POLLRATE):
+ bus->pollrate = (uint)int_val;
+ bus->poll = (bus->pollrate != 0);
+ break;
+
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_IDLECLOCK):
+ int_val = (int32)bus->idleclock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLECLOCK):
+ bus->idleclock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SD1IDLE):
+ int_val = (int32)sd1idle;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SD1IDLE):
+ sd1idle = bool_val;
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_CHECKDIED):
+ bcmerror = dhdsdio_checkdied(bus, arg, len);
+ break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_BUS_MEM_ACCESS
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address;
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+ (set ? "write" : "read"), size, address));
+
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /*
+ * If address is start of RAM (i.e. a downloaded image),
+ * store the reset instruction to be written in 0
+ */
+ if (set && address == bus->dongle_ram_base) {
+ bus->resetinstr = *(((uint32*)params) + 2);
+ }
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+ break;
+ }
+#endif /* DHD_BUS_MEM_ACCESS */
+
+ case IOV_GVAL(IOV_RAMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_RAMSTART):
+ int_val = (int32)bus->dongle_ram_base;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_SDIOD_DRIVE):
+ int_val = (int32)dhd_sdiod_drive_strength;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDIOD_DRIVE):
+ dhd_sdiod_drive_strength = int_val;
+ si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+ break;
+
+ case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_SOCRAM_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdsdio_downloadvars(bus, arg, len);
+ break;
+
+ case IOV_GVAL(IOV_READAHEAD):
+ int_val = (int32)dhd_readahead;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_READAHEAD):
+ if (bool_val && !dhd_readahead)
+ bus->nextlen = 0;
+ dhd_readahead = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDRXCHAIN):
+ int_val = (int32)bus->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDRXCHAIN):
+ if (bool_val && !bus->sd_rxchain)
+ bcmerror = BCME_UNSUPPORTED;
+ else
+ bus->use_rxchain = bool_val;
+ break;
+#ifndef BCMSPI
+ case IOV_GVAL(IOV_ALIGNCTL):
+ int_val = (int32)dhd_alignctl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ALIGNCTL):
+ dhd_alignctl = bool_val;
+ break;
+#endif /* BCMSPI */
+
+ case IOV_GVAL(IOV_SDALIGN):
+ int_val = DHD_SDALIGN;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_VARS):
+ if (bus->varsz < (uint)len)
+ bcopy(bus->vars, arg, bus->varsz);
+ else
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+ /* XXX Until these return BCME ranges, make assumptions here */
+ case IOV_GVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uintptr addr;
+ uint size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = ((uintptr)bus->regs + sd_ptr->offset);
+ size = sd_ptr->func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uintptr addr;
+ uint size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = ((uintptr)bus->regs + sd_ptr->offset);
+ size = sd_ptr->func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ /* XXX Same as above */
+ /* Same as above, but offset is not backplane (not SDIO core) */
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE(bus->sih) + sdreg.offset;
+ size = sdreg.func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE(bus->sih) + sdreg.offset;
+ size = sdreg.func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ case IOV_GVAL(IOV_SDCIS):
+ {
+ *(char *)arg = 0;
+
+ /* XXX Ignoring return codes, should be evident from printed results */
+ bcmstrcat(arg, "\nFunc 0\n");
+ bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 1\n");
+ bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 2\n");
+ bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+
+ case IOV_GVAL(IOV_FORCEEVEN):
+ int_val = (int32)forcealign;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCEEVEN):
+ forcealign = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TXMINMAX):
+ int_val = (int32)dhd_txminmax;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXMINMAX):
+ dhd_txminmax = (uint)int_val;
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_SERIALCONS):
+ int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+ if (bcmerror != 0)
+ break;
+
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SERIALCONS):
+ dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+ break;
+#endif /* DHD_DEBUG */
+
+#if defined(BCMINTERNAL) && defined(BCMDBG)
+ case IOV_SVAL(IOV_SIALL):
+ DHD_ERROR(("Dumping all the Backplane registers\n"));
+ si_viewall(bus->sih, TRUE);
+ break;
+#endif /* defined(BCMINTERNAL) && defined(BCMDBG) */
+
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_SPROM)
+ case IOV_SVAL(IOV_SPROM):
+ case IOV_GVAL(IOV_SPROM):
+ {
+ uint32 offset;
+ uint size, dsize;
+
+ bool set = (actionid == IOV_SVAL(IOV_SPROM));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ offset = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Avoid bigger size of srom reads that may be requested from app.
+ * gSPI has only F1 OTP visible from CC. There is no CIS in gSPI.
+ */
+ if (bus->bus == SPI_BUS)
+ size = SBSDIO_CIS_SIZE_LIMIT;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on srom %s, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "write" : "read"), offset, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if ((offset > SROM_MAX) || ((offset + size) > SROM_MAX)) {
+ DHD_ERROR(("%s: error on srom %s, offset %d size %d exceeds limit %d\n",
+ __FUNCTION__, (set ? "write" : "read"), offset, size, SROM_MAX));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (!set) {
+ if (!ISALIGNED((uintptr)arg, sizeof(uint16))) {
+ DHD_ERROR(("%s: srom data pointer %p not word-aligned\n",
+ __FUNCTION__, arg));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ bcmerror = srom_read(bus->sih, DHD_BUS, (void*)bus->regs, bus->dhd->osh,
+ offset, size, (uint16*)arg, FALSE);
+ GCC_DIAGNOSTIC_POP();
+
+ } else {
+ arg = (void*)((uintptr)arg + 2 * sizeof(int));
+ if (!ISALIGNED((uintptr)arg, sizeof(uint16))) {
+ DHD_ERROR(("%s: srom data pointer %p not word-aligned\n",
+ __FUNCTION__, arg));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ bcmerror = srom_write(bus->sih, DHD_BUS, (void*)bus->regs, bus->dhd->osh,
+ offset, size, (uint16*)arg);
+ GCC_DIAGNOSTIC_POP();
+ }
+ break;
+ }
+#endif /* DHD_SPROM */
+
+#ifdef SDTEST
+ case IOV_GVAL(IOV_EXTLOOP):
+ int_val = (int32)bus->ext_loop;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_EXTLOOP):
+ bus->ext_loop = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_get(bus, arg);
+ break;
+
+ case IOV_SVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_set(bus, arg);
+ break;
+#endif /* SDTEST */
+
+#if defined(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL)
+ case IOV_GVAL(IOV_WATERMARK):
+ int_val = (int32)watermark;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WATERMARK):
+ watermark = (uint)int_val;
+ watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark;
+ DHD_ERROR(("Setting watermark as 0x%x.\n", watermark));
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL);
+ break;
+
+ case IOV_GVAL(IOV_MESBUSYCTRL):
+ int_val = (int32)mesbusyctrl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MESBUSYCTRL):
+ mesbusyctrl = (uint)int_val;
+ mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK)
+ ? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl;
+ DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl));
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+ ((uint8)mesbusyctrl | 0x80), NULL);
+ break;
+#endif /* define(USE_SDIOFIFO_IOVAR) || defined(BCMINTERNAL) */
+
+#ifdef BCMINTERNAL
+ case IOV_GVAL(IOV_FIRSTREAD):
+ int_val = (int32)firstread;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FIRSTREAD):
+ if ((int_val < 12) || (int_val > 32)) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ firstread = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TSTOPH):
+ int_val = tstoph;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TSTOPH):
+ if (tstoph && bus->dhd->busstate == DHD_BUS_DOWN) {
+ tstoph = bool_val;
+ bus->dhd->busstate = DHD_BUS_DATA;
+ if (bus->intr) {
+ bus->intdis = FALSE;
+ bcmsdh_intr_enable(bus->sdh);
+ }
+ } else {
+ tstoph = bool_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_RETRYDATA):
+ int_val = (int32)retrydata;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RETRYDATA):
+ retrydata = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_CHECKFIFO):
+ int_val = (int32)checkfifo;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CHECKFIFO):
+ checkfifo = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDF2):
+ case IOV_SVAL(IOV_SDF2):
+ {
+ uint8 *buf;
+ int ret = BCME_OK;
+
+ if (!(buf = MALLOC(bus->dhd->osh, int_val))) {
+ bcmerror = BCME_NOMEM;
+ break;
+ }
+
+ if (actionid == IOV_SVAL(IOV_SDF2)) {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, buf, int_val, NULL, NULL, NULL, 1);
+ } else {
+ ret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, buf, int_val, NULL, NULL, NULL);
+ }
+ if (ret != BCME_OK) {
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+ MFREE(bus->dhd->osh, buf, int_val);
+
+ break;
+ }
+
+ case IOV_SVAL(IOV_CPU):
+ /* Go to the ARM core */
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+
+ /* Take the request action */
+ if (bool_val)
+ si_core_reset(bus->sih, 0, 0);
+ else
+ si_core_disable(bus->sih, 0);
+
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+
+ /* Return to the SDIO core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+ break;
+
+ case IOV_GVAL(IOV_CPU):
+ /* Go to the ARM core */
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+
+ /* Get its status */
+ int_val = (int32)si_iscoreup(bus->sih);
+ bcopy(&int_val, arg, val_size);
+
+ /* Return to the SDIO core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+ break;
+
+ case IOV_SVAL(IOV_SDRESET):
+ bcmsdh_reset(bus->sdh);
+ break;
+
+ case IOV_SVAL(IOV_SDABORT):
+ if (int_val == 1 || int_val == 2)
+ bcmsdh_abort(bus->sdh, int_val);
+ else
+ bcmerror = BCME_BADARG;
+ break;
+
+ case IOV_GVAL(IOV_DOFLOW):
+ int_val = (int32)dhd_doflow;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DOFLOW):
+ dhd_doflow = bool_val;
+ /* De flowcontrol if turning off flowcontrol */
+ if (!dhd_doflow)
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+ break;
+
+ case IOV_GVAL(IOV_CLOCKPOLL):
+ int_val = (int32)bus->clockpoll;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_MAXRAMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MAXRAMSIZE):
+ dhd_dongle_ramsize = int_val;
+ dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
+ break;
+
+ case IOV_SVAL(IOV_CLOCKPOLL):
+ bus->clockpoll = bool_val;
+ break;
+#endif /* BCMINTERNAL */
+
+ case IOV_GVAL(IOV_DONGLEISOLATION):
+ int_val = bus->dhd->dongle_isolation;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DONGLEISOLATION):
+ bus->dhd->dongle_isolation = bool_val;
+ break;
+
+ case IOV_SVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+ __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+ bus->dhd->busstate));
+
+ ASSERT(bus->dhd->osh);
+ /* ASSERT(bus->cl_devid); */
+
+ /* must release sdlock, since devreset also acquires it */
+ dhd_os_sdunlock(bus->dhd);
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+ dhd_os_sdlock(bus->dhd);
+ break;
+ /*
+ * softap firmware is updated through module parameter or android private command
+ */
+
+ case IOV_GVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+ /* Get its status */
+ int_val = (bool) bus->dhd->dongle_reset;
+ bcopy(&int_val, arg, val_size);
+
+ break;
+
+ case IOV_GVAL(IOV_KSO):
+ int_val = dhdsdio_sleepcsr_get(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DEVCAP):
+ int_val = dhdsdio_devcap_get(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DEVCAP):
+ dhdsdio_devcap_set(bus, (uint8) int_val);
+ break;
+ case IOV_GVAL(IOV_TXGLOMSIZE):
+ int_val = (int32)bus->txglomsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXGLOMSIZE):
+ if (int_val > SDPCM_MAXGLOM_SIZE) {
+ bcmerror = BCME_ERROR;
+ } else {
+ bus->txglomsize = (uint)int_val;
+ }
+ break;
+ case IOV_SVAL(IOV_HANGREPORT):
+ bus->dhd->hang_report = bool_val;
+ DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
+ break;
+
+ case IOV_GVAL(IOV_HANGREPORT):
+ int_val = (int32)bus->dhd->hang_report;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_TXINRX_THRES):
+ int_val = bus->txinrx_thres;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TXINRX_THRES):
+ if (int_val < 0) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->txinrx_thres = int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_SDIO_SUSPEND):
+ int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDIO_SUSPEND):
+ if (bool_val) { /* Suspend */
+ dhdsdio_suspend(bus);
+ }
+ else { /* Resume */
+ dhdsdio_resume(bus);
+ }
+ break;
+
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ case IOV_SVAL(IOV_GDB_SERVER):
+ if (bool_val == TRUE) {
+ debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
+ } else {
+ debugger_close();
+ }
+ break;
+#endif /* DEBUGGER || DHD_DSCOPE */
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize, phys_size;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ // terence 20150412: fix for nvram failed to download
+ if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
+ bus->dhd->conf->chip == BCM43341_CHIP_ID) {
+ varsize = varsize ? ROUNDUP(varsize, 64) : 0;
+ varaddr = (bus->ramsize - 64) - varsize;
+ }
+
+ varaddr += bus->dongle_ram_base;
+
+ if (bus->vars) {
+ /* XXX: WAR for PR85623 */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+ if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+ DHD_ERROR(("PR85623WAR in place\n"));
+ varsize += 4;
+ varaddr -= 4;
+ }
+ }
+
+ /* XXX In case the controller has trouble with odd bytes... */
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ return bcmerror;
+ }
+
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray) {
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ return BCME_NOMEM;
+ }
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+#ifdef MINIME
+ phys_size = bus->ramsize;
+#else
+ phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+#endif
+
+ phys_size += bus->dongle_ram_base;
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ phys_size, bus->ramsize));
+ DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize));
+ varsize = ((phys_size - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+#ifdef DHD_DEBUG
+ if (bcmerror) {
+ varsizew = 0;
+ } else
+#endif /* DHD_DEBUG */
+ {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+}
+
+bool
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
+{
+ return MULTIBP_CAP(bus->sih);
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+ uint retries;
+ int bcmerror = 0;
+ int foundcr4 = 0;
+
+ if (!bus->sih)
+ return BCME_ERROR;
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = TRUE;
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ foundcr4 = 1;
+ } else {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ }
+
+ if (!foundcr4) {
+ si_core_disable(bus->sih, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n",
+ __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) {
+ /* Disabling Remap for SRAM_3 */
+ si_socram_set_bankpda(bus->sih, 0x3, 0x0);
+ }
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4,
+ (uint8*)&zeros, 4) < 0) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+ }
+ } else {
+ /* For CR4,
+ * Halt ARM
+ * Remove ARM reset
+ * Read RAM base address [0x18_0000]
+ * [next] Download firmware
+ * [done at else] Populate the reset vector
+ * [done at else] Remove ARM halt
+ */
+ /* Halt ARM & remove reset */
+ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+ }
+ } else {
+ if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef BCMSDIOLITE
+ if (!si_setcore(bus->sih, CC_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+#else
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+#endif
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+ /* XXX Change standby configuration here if necessary */
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ } else {
+ /* cr4 has no socram, but tcm's */
+ /* write vars */
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+#ifdef BCMSDIOLITE
+ if (!si_setcore(bus->sih, CC_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+#else
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+#endif
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+ /* switch back to arm core again */
+ if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ /* write address 0 with reset instruction */
+ bcmerror = dhdsdio_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+ if (bcmerror == BCME_OK) {
+ uint32 tmp;
+
+ /* verify write */
+ bcmerror = dhdsdio_membytes(bus, FALSE, 0,
+ (uint8 *)&tmp, sizeof(tmp));
+
+ if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+ DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+ __FUNCTION__, bus->resetinstr));
+ DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+ __FUNCTION__, tmp));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+ }
+
+ /* now remove reset and halt and continue to run CR4 */
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+ }
+
+fail:
+ /* Always return to SDIOD core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+ return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, uint plen, void *arg, uint len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ uint val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Turn on clock in case SD command needs backplane */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+ /* Check for bus configuration changes of interest */
+
+ /* If it was divisor change, read the new one */
+ if (set && strcmp(name, "sd_divisor") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_divisor = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_divisor));
+ }
+ }
+ /* If it was a mode change, read the new one */
+ if (set && strcmp(name, "sd_mode") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_mode = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_mode));
+ }
+ }
+ /* Similar check for blocksize change */
+ if (set && strcmp(name, "sd_blocksize") == 0) {
+ int32 fnum = 2;
+ if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+ dhdsdio_tune_fifoparam(bus);
+ }
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ osl_t *osh;
+ uint32 local_hostintmask;
+ uint8 saveclk;
+ uint retries;
+ int err;
+ bool wlfc_enabled = FALSE;
+ unsigned long flags;
+
+ if (!bus->dhd)
+ return;
+
+ osh = bus->dhd->osh;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_waitlockfree(bus->sdh);
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
+ /* if Firmware already hangs disbale any interrupt */
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->hostintmask = 0;
+ bcmsdh_intr_disable(bus->sdh);
+ } else {
+
+ BUS_WAKE(bus);
+
+ if (KSO_ENAB(bus)) {
+
+ /* Enable clock for device interrupts */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Disable and clear interrupts at the chip level also */
+ W_SDREG(0, &bus->regs->hostintmask, retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
+ __FUNCTION__, err));
+ }
+
+ /* Turn off the bus (F2), free any pending packets */
+ /* XXX How to wake up any waiting processes? */
+ /* XXX New API: bcmsdh_fn_set(bus->sdh, SDIO_FUNC_2, FALSE); */
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+#if !defined(NDIS)
+ bcmsdh_intr_disable(bus->sdh); /* XXX bcmsdh_intr_mask(bus->sdh); */
+#endif /* !defined(NDIS) */
+#ifndef BCMSPI
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+#endif /* !BCMSPI */
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+ }
+
+ /* Turn off the backplane clock (only) */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Change our idea of bus state */
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+ }
+
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ dhd_os_sdlock_txq(bus->dhd);
+ /* Clear the data packet queues */
+ pktq_flush(osh, &bus->txq, TRUE);
+ dhd_os_sdunlock_txq(bus->dhd);
+ }
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ PKTFREE(osh, bus->glomd, FALSE);
+
+ if (bus->glom)
+ PKTFREE(osh, bus->glom, FALSE);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ /* XXX More important in disconnect, but no context? */
+ bus->rxlen = 0;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = FALSE;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ /* Initializing tx_max to a reasonable value to start xfer
+ * Gets updated to correct value after receving the first
+ * packet from firmware
+ * XXX - Need to find a right mechanism to querry from
+ * firmware when the device is coming up
+ */
+ bus->tx_max = 4;
+
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+}
+
+#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
+extern uint sd_txglom;
+#endif
+void
+dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
+{
+ /* can't enable host txglom by default, some platforms have no
+ * (or crappy) ADMA support and txglom will cause kernel assertions (e.g.
+ * panda board)
+ */
+ dhd_bus_t *bus = dhdp->bus;
+#ifdef BCMSDIOH_TXGLOM
+ uint32 rxglom;
+ int32 ret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BCMSDIOH_STD
+ if (enable)
+ enable = sd_txglom;
+#endif /* BCMSDIOH_STD */
+
+ if (enable) {
+ rxglom = 1;
+ ret = dhd_iovar(dhdp, 0, "bus:rxglom", (char *)&rxglom, sizeof(rxglom), NULL, 0,
+ TRUE);
+ if (ret >= 0)
+ bus->txglom_enable = TRUE;
+ else {
+#ifdef BCMSDIOH_STD
+ sd_txglom = 0;
+#endif /* BCMSDIOH_STD */
+ bus->txglom_enable = FALSE;
+ }
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ bus->txglom_enable = FALSE;
+ printf("%s: enable %d\n", __FUNCTION__, bus->txglom_enable);
+ dhd_conf_set_txglom_params(bus->dhd, bus->txglom_enable);
+ bcmsdh_set_mode(bus->sdh, bus->dhd->conf->txglom_mode);
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ dhd_timeout_t tmo;
+ uint retries = 0;
+ uint8 ready, enable;
+ int err, ret = 0;
+#ifdef BCMSPI
+ uint32 dstatus = 0; /* gSPI device-status bits */
+#else /* BCMSPI */
+ uint8 saveclk;
+#endif /* BCMSPI */
+#if defined(SDIO_ISR_THREAD)
+ int intr_extn;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return 0;
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ if (bus->sih->chip == BCM43362_CHIP_ID) {
+ printf("%s: delay 100ms for BCM43362\n", __FUNCTION__);
+ OSL_DELAY(100000); // terence 20131209: delay for 43362
+ }
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (bus->clkstate != CLK_AVAIL) {
+ DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+ ret = -1;
+ goto exit;
+ }
+
+#ifdef BCMSPI
+ /* fake "ready" for spi, wake-wlan would have already enabled F1 and F2 */
+ ready = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+ enable = 0;
+
+ /* Give the dongle some time to do its thing and set IOR2 */
+ dhd_timeout_start(&tmo, WAIT_F2RXFIFORDY * WAIT_F2RXFIFORDY_DELAY * 1000);
+ while (!enable && !dhd_timeout_expired(&tmo)) {
+ dstatus = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL);
+ if (dstatus & STATUS_F2_RX_READY)
+ enable = TRUE;
+ }
+
+ if (enable) {
+ DHD_ERROR(("Took %u usec before dongle is ready\n", tmo.elapsed));
+ enable = ready;
+ } else {
+ DHD_ERROR(("dstatus when timed out on f2-fifo not ready = 0x%x\n", dstatus));
+ DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
+ ret = -1;
+ goto exit;
+ }
+
+#else /* !BCMSPI */
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (!err) {
+#ifndef BCMQT
+ /* QT requires HT clock */
+ if (bus->sih->chip == BCM43012_CHIP_ID ||
+ bus->sih->chip == BCM43013_CHIP_ID ||
+ bus->sih->chip == BCM43014_CHIP_ID) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_HT_AVAIL_REQ), &err);
+ } else
+#endif /* BCMQT */
+ {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ }
+
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ ret = -1;
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ /* XXX New API: change to bcmsdh_fn_set(sdh, SDIO_FUNC_2, TRUE); */
+ W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+ &bus->regs->tosbmailboxdata, retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+ /* Give the dongle some time to do its thing and set IOR2 */
+#ifdef BCMSLTGT
+ dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000 * htclkratio);
+#else
+ dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+#endif /* BCMSLTGT */
+
+ ready = 0;
+ while (ready != enable && !dhd_timeout_expired(&tmo))
+ ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+#endif /* !BCMSPI */
+
+ DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+ __FUNCTION__, enable, ready, tmo.elapsed));
+
+#if defined(SDIO_ISR_THREAD)
+ if (dhdp->conf->intr_extn) {
+ intr_extn = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, NULL);
+ if (intr_extn & 0x1) {
+ intr_extn |= 0x2;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, intr_extn, NULL);
+ }
+ }
+#endif
+
+ /* XXX For simplicity, fail and await next call if F2 not ready.
+ * XXX Should really set timer to poll, and/or enable interrupt;
+ * XXX then put this process in wait until a result...
+ */
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Make sure we're talking to the core. */
+#ifdef BCMSDIOLITE
+ bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0);
+ ASSERT(bus->regs != NULL);
+#else
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+ bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+ ASSERT(bus->regs != NULL);
+#endif
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ /* corerev 4 could use the newer interrupt logic to detect the frames */
+#ifndef BCMSPI
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+ (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+ bus->hostintmask &= ~I_HMB_FRAME_IND;
+ bus->hostintmask |= I_XMTDATA_AVAIL;
+ }
+#endif /* BCMSPI */
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+ /* PR47410 - Lower F2 Watermark to avoid DMA Hang
+ * in F2 when SD Clock is stopped.
+ */
+ if (bus->sih->buscorerev < 15) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
+ (uint8)watermark, &err);
+ }
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+
+ /* Need to set fn2 block size to match fn1 block size.
+ * Requests to fn2 go thru fn1. *
+ * faltwig has this code contitioned with #if !BCMSPI_ANDROID.
+ * It would be cleaner to use the ->sdh->block_sz[fno] instead of
+ * 64, but this layer has no access to sdh types.
+ */
+#if defined(NDIS)
+ {
+ uint8 *ptr = NULL;
+ uint16 block_sz = 64;
+ ptr = (uint8*) &block_sz;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0,
+ (SDIOD_FBR_BASE(SDIO_FUNC_2) + SDIOD_CCCR_BLKSIZE_0),
+ *ptr++, &err);
+ if (err == BCME_OK)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0,
+ (SDIOD_FBR_BASE(SDIO_FUNC_2) + SDIOD_CCCR_BLKSIZE_1),
+ *ptr++, &err);
+ if (err != BCME_OK) {
+ printf("%s: set block size for func 2 failed\n",
+ __FUNCTION__);
+ ret = -1;
+ goto exit;
+ }
+ }
+#endif /* NDIS */
+
+ /* XXX These need to change w/API updates */
+ /* bcmsdh_intr_unmask(bus->sdh); */
+
+ bus->intdis = FALSE;
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+#ifndef BCMSPI_ANDROID
+ bcmsdh_intr_enable(bus->sdh);
+#endif /* !BCMSPI_ANDROID */
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+
+#ifdef DEBUG_LOST_INTERRUPTS
+ {
+ uint32 intstatus;
+ bool hostpending;
+ uint8 devena, devpend;
+ uint sdr_retries = 0;
+
+ hostpending = bcmsdh_intr_pending(bus->sdh);
+ devena = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTEN, NULL);
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTPEND, NULL);
+
+ R_SDREG(intstatus, &bus->regs->intstatus, sdr_retries);
+ intstatus &= bus->hostintmask;
+
+ DHD_ERROR(("%s: interrupts -- host %s device ena/pend 0x%02x/0x%02x\n"
+ "intstatus 0x%08x, hostmask 0x%08x\n", __FUNCTION__,
+ (hostpending ? "PENDING" : "NOT PENDING"),
+ devena, devpend, intstatus, bus->hostintmask));
+ }
+#endif /* DEBUG_LOST_INTERRUPTS */
+ }
+
+#ifndef BCMSPI
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+ }
+
+ if (dhdsdio_sr_cap(bus)) {
+ dhdsdio_sr_init(bus);
+ /* Masking the chip active interrupt permanantly */
+ bus->hostintmask &= ~I_CHIPACTIVE;
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+ DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
+ __FUNCTION__, bus->hostintmask));
+ } else {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+ }
+#endif /* !BCMSPI */
+
+ /* If we didn't come up, turn off backplane clock */
+ if (dhdp->busstate != DHD_BUS_DATA)
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+
+ /* XXX Temp errnum workaround: return ok, caller checks bus state */
+ return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+ uint16 lastrbc;
+ uint8 hi, lo;
+ int err;
+
+ DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+ (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return;
+ }
+
+ if (abort) {
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ }
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+ if (err) {
+ DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err);
+ if (err) {
+ DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+ __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries) {
+ DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+ } else {
+ DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+ }
+
+ if (rtx) {
+ bus->rxrtx++;
+ W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+ bus->f1regdata++;
+ if (retries <= retry_limit) {
+ bus->rxskip = TRUE;
+ }
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+fail:
+ /* If we can't reach the device, signal failure */
+ if (err || bcmsdh_regfail(sdh)) {
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint rdlen, pad;
+
+ int sdret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Control data already received in aligned rxctl */
+ if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+ goto gotpkt;
+
+ ASSERT(bus->rxbuf);
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+
+ /* Copy the already-read portion over */
+ bcopy(hdr, bus->rxctl, firstread);
+ if (len <= firstread)
+ goto gotpkt;
+
+ /* Copy the full data pkt in gSPI case and process ioctl. */
+ if (bus->bus == SPI_BUS) {
+ bcopy(hdr, bus->rxctl, len);
+ goto gotpkt;
+ }
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - firstread;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->dhd->maxctl))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + firstread) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+ __FUNCTION__, rdlen, bus->dhd->maxctl));
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+ if ((len - doff) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+ /* XXX Could block readers with rxlen=0? */
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+ bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("RxCtrl", bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ dhd_os_ioctl_resp_wake(bus->dhd);
+}
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+ void **pkt, uint32 *pkt_count);
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+ uint16 dlen, totlen;
+ uint8 *dptr, num = 0;
+
+ uint16 sublen, check;
+ void *pfirst, *plast, *pnext;
+ void * list_tail[DHD_MAX_IFS] = { NULL };
+ void * list_head[DHD_MAX_IFS] = { NULL };
+ uint8 idx;
+ osl_t *osh = bus->dhd->osh;
+
+ int errcode;
+ uint8 chan, seq, doff, sfdoff;
+ uint8 txmax;
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ dhd_os_sdlock_rxq(bus->dhd);
+
+ pfirst = plast = pnext = NULL;
+ dlen = (uint16)PKTLEN(osh, bus->glomd);
+ dptr = PKTDATA(osh, bus->glomd);
+ if (!dlen || (dlen & 1)) {
+ DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+ __FUNCTION__, dlen));
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = ltoh16_ua(dptr);
+ dlen -= sizeof(uint16);
+ dptr += sizeof(uint16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+ __FUNCTION__, num, sublen));
+ pnext = NULL;
+ break;
+ }
+ if (sublen % DHD_SDALIGN) {
+ DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+ __FUNCTION__, sublen, DHD_SDALIGN));
+ usechain = FALSE;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total is a block multiple */
+ if (!dlen) {
+ sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+ totlen = ROUNDUP(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+ DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+ __FUNCTION__, num, sublen));
+ break;
+ }
+ ASSERT(!PKTLINK(pnext));
+ if (!pfirst) {
+ ASSERT(!plast);
+ pfirst = plast = pnext;
+ } else {
+ ASSERT(plast);
+ PKTSETNEXT(osh, plast, pnext);
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain in bus structure */
+ if (pnext) {
+ DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+ __FUNCTION__, totlen, num));
+ if (DHD_GLOM_ON() && bus->nextlen) {
+ if (totlen != bus->nextlen) {
+ DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+ "rxseq %d\n", __FUNCTION__, bus->nextlen,
+ totlen, rxseq));
+ }
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ PKTFREE(osh, pfirst, FALSE);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ PKTFREE(osh, bus->glomd, FALSE);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+
+ /* Ok -- either we just generated a packet chain, or had one from before */
+ if (bus->glom) {
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+ for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+ DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
+ pnext, (uint8*)PKTDATA(osh, pnext),
+ PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (uint16)pkttotlen(osh, pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+ dlen, pfirst, NULL, NULL);
+ } else if (bus->dataptr) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, bus->dataptr,
+ dlen, NULL, NULL, NULL);
+ sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+ if (sublen != dlen) {
+ DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+ __FUNCTION__, dlen, sublen));
+ errcode = -1;
+ }
+ pnext = NULL;
+ BCM_REFERENCE(pnext);
+ } else {
+ DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+ ASSERT(errcode != BCME_PENDING);
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+ __FUNCTION__, dlen, errcode));
+ bus->dhd->rx_errors++; /* XXX Account for rtx?? */
+
+ if (bus->glomerr++ < 3) {
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 48));
+ }
+#endif
+
+ /* Validate the superframe header */
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, sublen, check));
+ errcode = -1;
+ } else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+ DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+ DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+ SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+ DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
+ SDPCM_HDRLEN));
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ PKTPULL(osh, pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = PKTNEXT(osh, pnext)) {
+ dptr = (uint8 *)PKTDATA(osh, pnext);
+ dlen = (uint16)PKTLEN(osh, pnext);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("subframe", dptr, 32);
+ }
+#endif
+
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (subframe %d): HW hdr error: "
+ "len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, num, sublen, check));
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ DHD_ERROR(("%s (subframe %d): length mismatch: "
+ "len 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, num, sublen, dlen));
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+ __FUNCTION__, num, chan));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+ __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ PKTPUSH(osh, pfirst, sfdoff);
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ bus->glom = NULL;
+ plast = NULL;
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = PKTNEXT(osh, pfirst);
+ PKTSETNEXT(osh, pfirst, NULL);
+
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+ PKTLEN(osh, pfirst), sublen, chan, seq));
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+ if (rxseq != seq) {
+ DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Subframe Data", dptr, dlen);
+ }
+#endif
+
+ PKTSETLEN(osh, pfirst, sublen);
+ PKTPULL(osh, pfirst, doff);
+
+ reorder_info_len = sizeof(reorder_info_buf);
+
+ if (PKTLEN(osh, pfirst) == 0) {
+ PKTFREE(bus->dhd->osh, pfirst, FALSE);
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ bus->dhd->rx_errors++;
+ PKTFREE(osh, pfirst, FALSE);
+ continue;
+ }
+ if (reorder_info_len) {
+ uint32 free_buf_count;
+ void *ppfirst;
+
+ ppfirst = pfirst;
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+ reorder_info_len, &ppfirst, &free_buf_count);
+
+ if (free_buf_count == 0) {
+ continue;
+ } else {
+ void *temp;
+
+ /* go to the end of the chain and attach the pnext there */
+ temp = ppfirst;
+ while (PKTNEXT(osh, temp) != NULL) {
+ temp = PKTNEXT(osh, temp);
+ }
+ pfirst = temp;
+ if (list_tail[ifidx] == NULL)
+ list_head[ifidx] = ppfirst;
+ else
+ PKTSETNEXT(osh, list_tail[ifidx], ppfirst);
+ list_tail[ifidx] = pfirst;
+ }
+
+ num += (uint8)free_buf_count;
+ } else {
+ /* this packet will go up, link back into chain and count it */
+
+ if (list_tail[ifidx] == NULL) {
+ list_head[ifidx] = list_tail[ifidx] = pfirst;
+ } else {
+ PKTSETNEXT(osh, list_tail[ifidx], pfirst);
+ list_tail[ifidx] = pfirst;
+ }
+ num++;
+ }
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+ __FUNCTION__, num, pfirst,
+ PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+ PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+ prhex("", (uint8 *)PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 32));
+ }
+#endif /* DHD_DEBUG */
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ if (list_head[idx]) {
+ void *temp;
+ uint8 cnt = 0;
+ temp = list_head[idx];
+ do {
+ temp = PKTNEXT(osh, temp);
+ cnt++;
+ } while (temp);
+ if (cnt) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0);
+ dhd_os_sdlock(bus->dhd);
+ }
+ }
+ }
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+ osl_t *osh = bus->dhd->osh;
+ bcmsdh_info_t *sdh = bus->sdh;
+
+ uint16 len, check; /* Extracted hardware header fields */
+ uint8 chan, seq, doff; /* Extracted software header fields */
+ uint8 fcbits; /* Extracted fcbits from software header */
+ uint8 delta;
+
+ void *pkt; /* Packet for event or data frames */
+ uint16 pad; /* Number of pad bytes to read */
+ uint16 rdlen; /* Total number of bytes to read */
+ uint8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from bcmsdh calls */
+ uint8 txmax; /* Maximum tx sequence offered */
+#ifdef BCMSPI
+ uint32 dstatus = 0; /* gSPI device status bits of */
+#endif /* BCMSPI */
+ bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+ uint8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+ uint pkt_count;
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+ bool sdtest = FALSE; /* To limit message spew from test mode */
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ bus->readframes = TRUE;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+ bus->readframes = FALSE;
+ return 0;
+ }
+
+ ASSERT(maxframes);
+
+#ifdef SDTEST
+ /* Allow pktgen to override maxframes */
+ if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+ maxframes = bus->pktgen_count;
+ sdtest = TRUE;
+ }
+#endif
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = FALSE;
+
+#ifdef BCMSPI
+ /* Get pktlen from gSPI device F0 reg. */
+ if (bus->bus == SPI_BUS) {
+ /* Peek in dstatus bits and find out size to do rx-read. */
+ dstatus = bcmsdh_get_dstatus(bus->sdh);
+ if (dstatus == 0)
+ DHD_ERROR(("%s:ZERO spi dstatus, a case observed in PR61352 hit !!!\n",
+ __FUNCTION__));
+
+ DHD_TRACE(("Device status from regread = 0x%x\n", dstatus));
+ DHD_TRACE(("Device status from bit-reconstruction = 0x%x\n",
+ bcmsdh_get_dstatus((void *)bus->sdh)));
+
+ /* Check underflow also, WAR for PR55150 */
+ if ((dstatus & STATUS_F2_PKT_AVAILABLE) && (((dstatus & STATUS_UNDERFLOW)) == 0)) {
+ bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >>
+ STATUS_F2_PKT_LEN_SHIFT);
+ /* '0' size with pkt-available interrupt is eqvt to 2048 bytes */
+ bus->nextlen = (bus->nextlen == 0) ? SPI_MAX_PKT_LEN : bus->nextlen;
+ if (bus->dwordmode)
+ bus->nextlen = bus->nextlen << 2;
+ DHD_TRACE(("Entering %s: length to be read from gSPI = %d\n",
+ __FUNCTION__, bus->nextlen));
+ } else {
+ if (dstatus & STATUS_F2_PKT_AVAILABLE)
+ DHD_ERROR(("Underflow during %s.\n", __FUNCTION__));
+ else
+ DHD_ERROR(("False pkt-available intr.\n"));
+ *finished = TRUE;
+ return (maxframes - rxleft);
+ }
+ }
+#endif /* BCMSPI */
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+ rxseq++, rxleft--) {
+#ifdef DHDTCPACK_SUP_DBG
+ if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) {
+ if (bus->dotxinrx == FALSE)
+ DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n",
+ __FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode));
+ }
+#ifdef DEBUG_COUNTER
+ else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) {
+ tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++;
+ }
+#endif /* DEBUG_COUNTER */
+#endif /* DHDTCPACK_SUP_DBG */
+ /* tx more to improve rx performance */
+ if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+ dhdsdio_sendpendctl(bus);
+ } else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
+ !bus->fcstate && DATAOK(bus) &&
+ (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
+ dhdsdio_sendfromq(bus, dhd_txbound);
+#ifdef DHDTCPACK_SUPPRESS
+ /* In TCPACK_SUP_DELAYTX mode, do txinrx only if
+ * 1. Any DATA packet to TX
+ * 2. TCPACK to TCPDATA PSH packets.
+ * in bus txq.
+ */
+ bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
+ FALSE : TRUE;
+#endif
+ }
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ uint8 cnt;
+ DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+ __FUNCTION__, bus->glomd, bus->glom));
+ cnt = dhdsdio_rxglom(bus, rxseq);
+ DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (dhd_readahead && bus->nextlen) {
+ uint16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ if (bus->bus == SPI_BUS) {
+ rdlen = len = nextlen;
+ } else {
+ rdlen = len = nextlen << 4;
+
+ /* Pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+ }
+
+ /* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+ * Later we use buffer-poll for data as well as control packets.
+ * This is required because dhd receives full frame in gSPI unlike SDIO.
+ * After the frame is received we have to distinguish whether it is data
+ * or non-data frame.
+ */
+ /* Allocate a packet buffer */
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+ if (bus->bus == SPI_BUS) {
+ bus->usebufpool = FALSE;
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+ rxbuf = bus->rxctl;
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+#ifdef BCMSPI
+ /* PR55150 WAR: Wait for next pkt-available interrupt for
+ * further processing
+ */
+ if (bcmsdh_get_dstatus((void *)bus->sdh) &
+ STATUS_UNDERFLOW) {
+ bus->nextlen = 0;
+ *finished = TRUE;
+ DHD_ERROR(("%s: read %d control bytes failed "
+ "due to spi underflow\n",
+ __FUNCTION__, rdlen));
+ /* dhd.rx_ctlerrs is higher level */
+ bus->rxc_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+#endif /* BCMSPI */
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ /* dhd.rx_ctlerrs is higher level */
+ bus->rxc_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ } else {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+ "expected rxseq %d\n",
+ __FUNCTION__, len, rdlen, rxseq));
+ /* XXX Can't issue retry (NAK), frame not started. */
+ /* Just go try again w/normal header read */
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ } else {
+ if (bus->bus == SPI_BUS)
+ bus->usebufpool = TRUE;
+
+ ASSERT(!PKTLINK(pkt));
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+ rxbuf = (uint8 *)PKTDATA(osh, pkt);
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+#ifdef BCMSPI
+ /* PR55150 WAR: Wait for next pkt-available interrupt for further
+ * processing
+ */
+ if (bcmsdh_get_dstatus((void *)bus->sdh) & STATUS_UNDERFLOW) {
+ bus->nextlen = 0;
+ *finished = TRUE;
+ DHD_ERROR(("%s (nextlen): read %d bytes failed due "
+ "to spi underflow\n",
+ __FUNCTION__, rdlen));
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ bus->dhd->rx_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+#endif /* BCMSPI */
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ bus->dhd->rx_errors++; /* XXX Account for rtx?? */
+ dhd_os_sdunlock_rxq(bus->dhd);
+ /* Force retry w/normal header read. Don't attempt NAK for
+ * gSPI
+ */
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ /* Now check the header */
+ bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means readahead info was bad */
+ if (!(len|check)) {
+ DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+ __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+ " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+ len, check));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ /* XXX Might choose to allow length 4 for signaling */
+ DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+ __FUNCTION__, len));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Check for consistency with readahead info */
+#ifdef BCMSPI
+ if (bus->bus == SPI_BUS) {
+ if (bus->dwordmode) {
+ uint16 spilen;
+ spilen = ROUNDUP(len, 4);
+ len_consistent = (nextlen != spilen);
+ } else
+ len_consistent = (nextlen != len);
+ } else
+#endif /* BCMSPI */
+ len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal header (may be >4K) */
+ DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+ "expected rxseq %d\n",
+ __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+#ifdef BCMSPI
+ /* Save the readahead length if there is one */
+ if (bus->bus == SPI_BUS) {
+ /* Use reconstructed dstatus bits and find out readahead size */
+ dstatus = bcmsdh_get_dstatus((void *)bus->sdh);
+ DHD_INFO(("Device status from bit-reconstruction = 0x%x\n",
+ bcmsdh_get_dstatus((void *)bus->sdh)));
+ if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+ bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >>
+ STATUS_F2_PKT_LEN_SHIFT);
+ bus->nextlen = (bus->nextlen == 0) ?
+ SPI_MAX_PKT_LEN : bus->nextlen;
+ if (bus->dwordmode)
+ bus->nextlen = bus->nextlen << 2;
+ DHD_INFO(("readahead len from gSPI = %d \n",
+ bus->nextlen));
+ bus->dhd->rx_readahead_cnt ++;
+ } else {
+ bus->nextlen = 0;
+ *finished = TRUE;
+ }
+ } else {
+#endif /* BCMSPI */
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+ " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+ seq));
+ bus->nextlen = 0;
+ }
+
+ bus->dhd->rx_readahead_cnt ++;
+#ifdef BCMSPI
+ }
+#endif /* BCMSPI */
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+#ifdef BCMSPI
+ if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_seq + 2;
+ } else {
+#endif /* BCMSPI */
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+#ifdef BCMSPI
+ }
+#endif /* BCMSPI */
+ }
+ bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", rxbuf, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ if (bus->bus == SPI_BUS) {
+ dhdsdio_read_control(bus, rxbuf, len, doff);
+ if (bus->usebufpool) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+ continue;
+ } else {
+ DHD_ERROR(("%s (nextlen): readahead on control"
+ " packet %d?\n", __FUNCTION__, seq));
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ dhdsdio_rxfail(bus, FALSE, TRUE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ }
+
+ if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+ DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+ "rx pktbuf's or not yet malloced.\n", len, chan));
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+ /* gSPI frames should not be handled in fractions */
+ if (bus->bus == SPI_BUS) {
+ break;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ bus->rxhdr, firstread, NULL, NULL, NULL);
+ bus->f2rxhdrs++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+ bus->rx_hdrfail++;
+#ifdef BCMINTERNAL
+ if (tstoph) {
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ continue;
+ }
+#endif
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ continue;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means no more frames */
+ if (!(len|check)) {
+ *finished = TRUE;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, len, check));
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ /* XXX Might choose to allow length 4 for signaling */
+ DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+ bus->rx_badhdr++;
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+ (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+ /* Length to read */
+ rdlen = (len > firstread) ? (len - firstread) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ if ((rdlen + firstread) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+ __FUNCTION__, rdlen, chan));
+ bus->dhd->rx_dropped++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+ continue;
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ ASSERT(!PKTLINK(pkt));
+
+ /* XXX Should check len for small packets in case we're done? */
+ /* Leave room for what we already read, and align remainder */
+ ASSERT(firstread < (PKTLEN(osh, pkt)));
+ PKTPULL(osh, pkt, firstread);
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+ ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++; /* XXX Account for rtx?? */
+ dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ PKTPUSH(osh, pkt, firstread);
+ bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+ __FUNCTION__, len));
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("Glom Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+ PKTSETLEN(osh, pkt, len);
+ ASSERT(doff == SDPCM_HDRLEN);
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ PKTSETLEN(osh, pkt, len);
+ PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+ /* Test channel packets are processed separately */
+ if (chan == SDPCM_TEST_CHANNEL) {
+ dhdsdio_testrcv(bus, pkt, seq);
+ continue;
+ }
+#endif /* SDTEST */
+
+ if (PKTLEN(osh, pkt) == 0) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ continue;
+ }
+
+ if (reorder_info_len) {
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+ &pkt, &pkt_count);
+ if (pkt_count == 0)
+ continue;
+ } else {
+ pkt_count = 1;
+ }
+
+ /* XXX Release the lock around the rx delivery: an OS (like Windows)
+ * might call tx in the same thread context, resulting in deadlock.
+ */
+ /* Unlock during rx call */
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+ dhd_os_sdlock(bus->dhd);
+#if defined(SDIO_ISR_THREAD)
+ /* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here,
+ * so call BUS_WAKE to wake up bus again
+ * dhd_bcmsdh_recv_buf: Device asleep
+ * dhdsdio_readframes: RXHEADER FAILED: -40
+ * dhdsdio_rxfail: abort command, terminate frame, send NAK
+ */
+ BUS_WAKE(bus);
+#endif
+ }
+ rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+ /* Message if we hit the limit */
+ if (!rxleft && !sdtest)
+ DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+ else
+#endif /* DHD_DEBUG */
+ DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ if (bus->reqbussleep)
+ {
+ dhdsdio_bussleep(bus, TRUE);
+ bus->reqbussleep = FALSE;
+ }
+ bus->readframes = FALSE;
+
+ return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus, uint32 *hmbd)
+{
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus = 0;
+ uint32 hmb_data;
+ uint8 fcbits;
+ uint retries = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Read mailbox data and ack that we did so */
+ R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+ /* XXX ASSERT(bus->rxskip); */
+ if (!bus->rxskip) {
+ DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+ }
+ bus->rxskip = FALSE;
+ intstatus |= FRAME_AVAIL_MASK(bus);
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION));
+ else
+ DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+#ifndef BCMSPI
+ /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+ uint32 val;
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ }
+#endif /* BCMSPI */
+
+#ifdef DHD_DEBUG
+ /* Retrieve console state address now that firmware should have updated it */
+ {
+ sdpcm_shared_t shared;
+ if (dhdsdio_readshared(bus, &shared) == 0)
+ bus->console_addr = shared.console_addr;
+ }
+#endif /* DHD_DEBUG */
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more. Leave this here for possibly remaining backward
+ * compatible with older dongles
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* At least print a message if FW halted */
+ if (hmb_data & HMB_DATA_FWHALT) {
+ DHD_ERROR(("FIRMWARE HALTED\n"));
+ dhdsdio_checkdied(bus, NULL, 0);
+ }
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_FWHALT |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK |
+ HMB_DATA_VERSION_MASK)) {
+ DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+ }
+
+ if (hmbd) {
+ *hmbd = hmb_data;
+ }
+
+ return intstatus;
+}
+
+#ifdef BCMSDIO_INTSTATUS_WAR
+static uint32
+dhdsdio_read_intstatus_byte(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 newstatus = 0, intstatus_byte = 0;
+ uint retries = 0;
+ int err1 = 0, err2 = 0, err3 = 0, err4 = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* read_intr_mode:
+ * 0: word mode only (default)
+ * 1: byte mode after read word failed
+ * 2: byte mode only
+ */
+ if (bus->dhd->conf->read_intr_mode) {
+ if (bus->dhd->conf->read_intr_mode == 1) {
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ if (!bcmsdh_regfail(bus->sdh)) {
+ goto exit;
+ }
+ }
+ intstatus_byte = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ ((unsigned long)&regs->intstatus & 0xffff) + 0, &err1);
+ if (!err1)
+ newstatus |= intstatus_byte;
+ intstatus_byte = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ ((unsigned long)&regs->intstatus & 0xffff) + 1, &err2) << 8;
+ if (!err2)
+ newstatus |= intstatus_byte;
+ intstatus_byte |= bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ ((unsigned long)&regs->intstatus & 0xffff) + 2, &err3) << 16;
+ if (!err3)
+ newstatus |= intstatus_byte;
+ intstatus_byte |= bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ ((unsigned long)&regs->intstatus & 0xffff) + 3, &err4) << 24;
+ if (!err4)
+ newstatus |= intstatus_byte;
+
+ if (!err1 || !err2 || !err3 || !err4)
+ sdh->regfail = FALSE;
+ }
+ else {
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ }
+
+exit:
+ return newstatus;
+}
+#endif
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+ uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = TRUE; /* Flag for no more read data */
+ bool resched = FALSE; /* Flag indicating resched wanted */
+ unsigned long flags;
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ bool is_resched_by_readframe = FALSE;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_os_sdlock(bus->dhd);
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+ bus->intstatus = 0;
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+ dhd_os_sdunlock(bus->dhd);
+ return 0;
+ }
+
+ DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /* If waiting for HTAVAIL, check status */
+ if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+ int err;
+ uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+ /* Check for inconsistent device control */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ } else {
+ ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+
+ DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+ __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+ __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+ if (bus->clkstate != CLK_AVAIL)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = FALSE;
+#if defined(BT_OVER_SDIO)
+ bcmsdh_btsdio_process_f3_intr();
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef BCMSDIO_INTSTATUS_WAR
+ newstatus = dhdsdio_read_intstatus_byte(bus);
+#else
+ R_SDREG(newstatus, &regs->intstatus, retries);
+#endif
+ bus->f1regdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ bus->f1regdata++;
+#ifndef BCMSPI
+ if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+ (newstatus == I_XMTDATA_AVAIL)) {
+ } else
+#endif /* BCMSPI */
+ W_SDREG(newstatus, &regs->intstatus, retries);
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata += 2;
+ bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ uint32 hmbdata = 0;
+
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= dhdsdio_hostmail(bus, &hmbdata);
+
+ }
+
+#ifdef DHD_UCODE_DOWNLOAD
+exit_ucode:
+#endif /* DHD_UCODE_DOWNLOAD */
+
+ /* Just being here means nothing more to do for chipactive */
+ if (intstatus & I_CHIPACTIVE) {
+ /* ASSERT(bus->clkstate == CLK_AVAIL); */
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= dhdsdio_hostmail(bus, NULL);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ /* XXX Besides noting the error, should we ABORT/TERM? */
+ if (intstatus & I_WR_OOSYNC) {
+ DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ /* XXX Should reset or something here... */
+ if (intstatus & I_SBINT) {
+ DHD_ERROR(("Dongle reports SBINT\n"));
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ if (intstatus & I_HMB_FC_STATE) {
+ DHD_INFO(("Dongle reports HMB_FC_STATE\n"));
+ intstatus &= ~I_HMB_FC_STATE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip) {
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ }
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE(bus, intstatus)) {
+#ifdef BCMINTERNAL
+ if (checkfifo) {
+ int count, regerrs = 0;
+ uint32 fifoaddr, rdptr, rdoffs, endptrs;
+ uint32 datalow[8], datahigh[8];
+ uint coretype = bus->sih->buscoretype;
+ uint corerev = bus->sdpcmrev;
+
+ /* set fifoaddr to fetch xmt fifo pointers */
+ fifoaddr = (0xB << 16);
+ W_SDREG(fifoaddr, &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr, retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+ R_SDREG(rdptr, &SDPCMFIFOREG(bus, coretype, corerev)->fifodatalow, retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+
+ /* Read the first 8 words out of the FIFO */
+ for (count = 0, rdoffs = (rdptr & 0x7F); count < 8; count++) {
+ fifoaddr = (0xA << 16) | rdoffs;
+ W_SDREG(fifoaddr,
+ &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr,
+ retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+ R_SDREG(datalow[count],
+ &SDPCMFIFOREG(bus, coretype, corerev)->fifodatalow,
+ retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+ W_SDREG(fifoaddr,
+ &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr,
+ retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+ R_SDREG(datahigh[count],
+ &SDPCMFIFOREG(bus, coretype, corerev)->fifodatahigh,
+ retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+ rdoffs = (rdoffs + 1) & 0x7F;
+ }
+
+ /* For the heck of it, read the pointers again */
+ fifoaddr = (0xB << 16);
+ W_SDREG(fifoaddr,
+ &SDPCMFIFOREG(bus, coretype, corerev)->fifoaddr, retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+ R_SDREG(endptrs,
+ &SDPCMFIFOREG(bus, coretype, corerev)->fifodatalow, retries);
+ if (bcmsdh_regfail(bus->sdh)) regerrs++;
+
+ printf("Initial read of Transmit DMA Pointers: 0x%08x\n", rdptr);
+ printf("Transmit DMA Data\n");
+ for (count = 0, rdoffs = (rdptr & 0x7F); count < 8; count++) {
+ printf("0x%08x: 0x%08x 0x%08x\n", rdoffs,
+ datahigh[count], datalow[count]);
+ rdoffs = (rdoffs + 1) & 0x7F;
+ }
+ printf("Final read of Transmit DMA Pointers: 0x%08x\n", endptrs);
+ printf("Register errors: %d\n", regerrs);
+
+ checkfifo = FALSE;
+ }
+#endif /* BCMINTERNAL */
+
+ framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ rxlimit -= MIN(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ /* Re-enable interrupts to detect new device events (mailbox, rx frame)
+ * or clock availability. (Allows tx loop to check ipend if desired.)
+ * (Unless register access seems hosed, as we may not be able to ACK...)
+ */
+ if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) &&
+ !(bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) {
+ DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+ __FUNCTION__, rxdone, framecnt));
+ bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+#if !defined(NDIS)
+ bcmsdh_intr_enable(sdh);
+#endif /* !defined(NDIS) */
+#ifdef BCMSPI_ANDROID
+ if (*dhd_spi_lockcount == 0)
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* BCMSPI_ANDROID */
+ }
+
+#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
+ /* In case of SW-OOB(using edge trigger),
+ * Check interrupt status in the dongle again after enable irq on the host.
+ * and rechedule dpc if interrupt is pended in the dongle.
+ * There is a chance to miss OOB interrupt while irq is disabled on the host.
+ * No need to do this with HW-OOB(level trigger)
+ */
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ if (bcmsdh_regfail(bus->sdh))
+ newstatus = 0;
+ if (newstatus & bus->hostintmask) {
+ bus->ipend = TRUE;
+ resched = TRUE;
+ }
+#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
+
+#ifdef BCMSDIO_RXLIM_POST
+ if (!DATAOK(bus) && bus->rxlim_en) {
+ uint8 rxlim = 0;
+ if (0 == dhdsdio_membytes(bus, FALSE, bus->rxlim_addr, (uint8 *)&rxlim, 1)) {
+ if (bus->tx_max != rxlim) {
+ DHD_INFO(("%s: bus->tx_max/rxlim=%d/%d\n", __FUNCTION__,
+ bus->tx_max, rxlim));
+ bus->tx_max = rxlim;
+ }
+ }
+ }
+#endif /* BCMSDIO_RXLIM_POST */
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
+#endif
+
+ if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
+ dhdsdio_sendpendctl(bus);
+#ifdef CONSOLE_DPC
+ else if (DATAOK(bus) && strlen(bus->cons_cmd) && (bus->clkstate == CLK_AVAIL) &&
+ !bus->fcstate) {
+ dhd_bus_console_in(bus->dhd, bus->cons_cmd, strlen(bus->cons_cmd));
+ }
+#endif
+
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+
+ if (bus->dhd->conf->dhd_txminmax < 0)
+ framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus));
+ else
+ framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax);
+ framecnt = dhdsdio_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+ /* Resched the DPC if ctrl cmd is pending on bus credit */
+ if (bus->ctrl_frame_stat) {
+ if (bus->dhd->conf->txctl_tmo_fix) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule_timeout(1);
+ set_current_state(TASK_RUNNING);
+ }
+ resched = TRUE;
+ }
+
+ /* Resched if events or tx frames are pending, else await next interrupt */
+ /* On failed register access, all bets are off: no resched or interrupts */
+ if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+ if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+ SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+ /* Bus failed because of KSO */
+ DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+ bus->kso = FALSE;
+ } else {
+ DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+ __FUNCTION__));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->intstatus = 0;
+ /* XXX Under certain conditions it may be reasonable to enable interrupts.
+ * E.g. if we get occasional 'bcmsdh_regfail' we should be able to continue
+ * operation. May want to make the decision to enable or not based on count
+ * of failures, so in case of bus lock up we avoid continuous interrupt.
+ */
+ }
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Awaiting I_CHIPACTIVE; don't resched */
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+ PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */
+ resched = TRUE;
+ }
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ /* XXX Leave request on if just waiting for new credit? */
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING) &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+exit:
+
+ if (!resched) {
+ /* Re-enable interrupts to detect new device events (mailbox, rx frame)
+ * or clock availability. (Allows tx loop to check ipend if desired.)
+ * (Unless register access seems hosed, as we may not be able to ACK...)
+ */
+ if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) &&
+ (bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) {
+ DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+ __FUNCTION__, rxdone, framecnt));
+ bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+ bcmsdh_intr_enable(sdh);
+ }
+ if (dhd_dpcpoll) {
+ if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) {
+ resched = TRUE;
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ is_resched_by_readframe = TRUE;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ }
+ }
+ }
+
+#ifdef TPUT_MONITOR
+ dhd_conf_tput_monitor(bus->dhd);
+#endif
+
+ if (bus->ctrl_wait && TXCTLOK(bus))
+ wake_up_interruptible(&bus->ctrl_tx_wait);
+ dhd_os_sdunlock(bus->dhd);
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ if (bus->dhd->dhd_bug_on) {
+ DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x"
+ " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n",
+ __FUNCTION__, resched, bus->ctrl_frame_stat,
+ bus->intstatus, bus->ipend,
+ pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe));
+
+ bus->dhd->dhd_bug_on = FALSE;
+ }
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+ bool resched;
+
+ /* Call the DPC directly. */
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ resched = dhdsdio_dpc(bus);
+
+ return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ bcmsdh_info_t *sdh;
+
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+ return;
+ }
+ sdh = bus->sdh;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return;
+ }
+ /* XXX Overall operation:
+ * XXX - Mask further interrupts
+ * XXX - Read/ack intstatus
+ * XXX - Take action based on bits and state
+ * XXX - Reenable interrupts (as per state)
+ */
+
+ DHD_INTR(("%s: Enter\n", __FUNCTION__));
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = TRUE;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (!SLPAUTO_ENAB(bus)) {
+ if (bus->sleeping) {
+ DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+ return;
+ } else if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("ISR in devsleep 1\n"));
+ }
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (bus->intr) {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+ }
+
+#ifdef BCMSPI_ANDROID
+ bcmsdh_oob_intr_set(bus->sdh, FALSE);
+#endif /* BCMSPI_ANDROID */
+#if !defined(NDIS)
+ bcmsdh_intr_disable(sdh); /* XXX New API: bcmsdh_intr_mask()? */
+#endif /* !defined(NDIS) */
+ bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ /* terence 20150209: dpc should be scheded again if dpc_sched is TRUE or dhd_bus_txdata can
+ not schedule anymore because dpc_sched is TRUE now.
+ */
+ if (dhdsdio_dpc(bus)) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+#if !defined(NDIS)
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+#endif /* !defined(NDIS) */
+#endif /* defined(SDIO_ISR_THREAD) */
+
+}
+
+#ifdef PKT_STATICS
+void
+dhd_bus_dump_txpktstatics(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 total = 0;
+ uint i;
+
+ printf("%s: TYPE EVENT: %d pkts (size=%d) transfered\n",
+ __FUNCTION__, bus->tx_statics.event_count, bus->tx_statics.event_size);
+ printf("%s: TYPE CTRL: %d pkts (size=%d) transfered\n",
+ __FUNCTION__, bus->tx_statics.ctrl_count, bus->tx_statics.ctrl_size);
+ printf("%s: TYPE DATA: %d pkts (size=%d) transfered\n",
+ __FUNCTION__, bus->tx_statics.data_count, bus->tx_statics.data_size);
+ printf("%s: Glom size distribution:\n", __FUNCTION__);
+ for (i=0;i<bus->tx_statics.glom_max;i++) {
+ total += bus->tx_statics.glom_cnt[i];
+ }
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ for (i=0;i<bus->tx_statics.glom_max;i++) {
+ printk(KERN_CONT "%02d: %5d", i+1, bus->tx_statics.glom_cnt[i]);
+ if ((i+1)%8)
+ printk(KERN_CONT ", ");
+ else {
+ printk("\n");
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ }
+ }
+ printk("\n");
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ for (i=0;i<bus->tx_statics.glom_max;i++) {
+ printk(KERN_CONT "%02d:%5d%%", i+1, (bus->tx_statics.glom_cnt[i]*100)/total);
+ if ((i+1)%8)
+ printk(KERN_CONT ", ");
+ else {
+ printk("\n");
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ }
+ }
+ printk("\n");
+ printf("%s: Glom spend time distribution(us):\n", __FUNCTION__);
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ for (i=0;i<bus->tx_statics.glom_max;i++) {
+ printk(KERN_CONT "%02d: %5u", i+1, bus->tx_statics.glom_cnt_us[i]);
+ if ((i+1)%8)
+ printk(KERN_CONT ", ");
+ else {
+ printk("\n");
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ }
+ }
+ printk("\n");
+ if (total) {
+ printf("%s: data(%d)/glom(%d)=%d, glom_max=%d\n",
+ __FUNCTION__, bus->tx_statics.data_count, total,
+ bus->tx_statics.data_count/total, bus->tx_statics.glom_max);
+ }
+ printf("%s: TYPE RX GLOM: %d pkts (size=%d) transfered\n",
+ __FUNCTION__, bus->tx_statics.glom_count, bus->tx_statics.glom_size);
+ printf("%s: TYPE TEST: %d pkts (size=%d) transfered\n",
+ __FUNCTION__, bus->tx_statics.test_count, bus->tx_statics.test_size);
+
+#ifdef KSO_DEBUG
+ printf("%s: kso try distribution(us):\n", __FUNCTION__);
+ printk(KERN_CONT DHD_LOG_PREFIXS);
+ for (i=0; i<10; i++) {
+ printk(KERN_CONT "[%d]: %d, ", i, dhdp->conf->kso_try_array[i]);
+ }
+ printk("\n");
+#endif
+}
+
+void
+dhd_bus_clear_txpktstatics(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ memset((uint8*) &bus->tx_statics, 0, sizeof(pkt_statics_t));
+}
+#endif
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+ /* Default to specified length, or full range */
+ if (dhd_pktgen_len) {
+ bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+ bus->pktgen_minlen = bus->pktgen_maxlen;
+ } else {
+ bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+ bus->pktgen_minlen = 0;
+ }
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Default to per-watchdog burst with 10s print time */
+ bus->pktgen_freq = 1;
+ bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0;
+ bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+ /* Default to echo mode */
+ bus->pktgen_mode = DHD_PKTGEN_ECHO;
+ bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+ void *pkt;
+ uint8 *data;
+ uint pktcount;
+ uint fillbyte;
+ osl_t *osh = bus->dhd->osh;
+ uint16 len;
+#if defined(LINUX)
+ ulong time_lapse;
+ uint sent_pkts;
+ uint rcvd_pkts;
+#endif /* LINUX */
+
+ /* Display current count if appropriate */
+ if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+ bus->pktgen_ptick = 0;
+ printf("%s: send attempts %d, rcvd %d, errors %d\n",
+ __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+
+#if defined(LINUX)
+ /* Print throughput stats only for constant length packet runs */
+ if (bus->pktgen_minlen == bus->pktgen_maxlen) {
+ time_lapse = jiffies - bus->pktgen_prev_time;
+ bus->pktgen_prev_time = jiffies;
+ sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent;
+ bus->pktgen_prev_sent = bus->pktgen_sent;
+ rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd;
+ bus->pktgen_prev_rcvd = bus->pktgen_rcvd;
+
+ printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n",
+ __FUNCTION__,
+ (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8,
+ (rcvd_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8);
+ }
+#endif /* LINUX */
+ }
+
+ /* For recv mode, just make sure dongle has started sending */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+ dhdsdio_sdtest_set(bus, bus->pktgen_total);
+ }
+ return;
+ }
+
+ /* Otherwise, generate or request the specified number of packets */
+ for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+ /* Stop if total has been reached */
+ if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ break;
+ }
+
+ /* Allocate an appropriate-sized packet */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+ len = SDPCM_TEST_PKT_CNT_FLD_LEN;
+ } else {
+ len = bus->pktgen_len;
+ }
+ if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+ TRUE))) {;
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ break;
+ }
+ PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Write test header cmd and extra based on mode */
+ switch (bus->pktgen_mode) {
+ case DHD_PKTGEN_ECHO:
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_SEND:
+ *data++ = SDPCM_TEST_DISCARD;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_RXBURST:
+ *data++ = SDPCM_TEST_BURST;
+ *data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */
+ break;
+
+ default:
+ DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+ PKTFREE(osh, pkt, TRUE);
+ bus->pktgen_count = 0;
+ return;
+ }
+
+ /* Write test header length field */
+ *data++ = (bus->pktgen_len >> 0);
+ *data++ = (bus->pktgen_len >> 8);
+
+ /* Write frame count in a 4 byte field adjucent to SDPCM test header for
+ * burst mode
+ */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+ *data++ = (uint8)(bus->pktgen_count >> 0);
+ *data++ = (uint8)(bus->pktgen_count >> 8);
+ *data++ = (uint8)(bus->pktgen_count >> 16);
+ *data++ = (uint8)(bus->pktgen_count >> 24);
+ } else {
+
+ /* Then fill in the remainder -- N/A for burst */
+ for (fillbyte = 0; fillbyte < len; fillbyte++)
+ *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+ prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
+ bus->pktgen_fail++;
+ if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+ bus->pktgen_count = 0;
+ }
+ bus->pktgen_sent++;
+
+ /* Bump length if not fixed, wrap at max */
+ if (++bus->pktgen_len > bus->pktgen_maxlen)
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Special case for burst mode: just send one request! */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+ break;
+ }
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint count)
+{
+ void *pkt;
+ uint8 *data;
+ osl_t *osh = bus->dhd->osh;
+
+ /* Allocate the packet */
+ if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+ SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) {
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ return;
+ }
+ PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+ SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Fill in the test header */
+ *data++ = SDPCM_TEST_SEND;
+ *data++ = (count > 0)?TRUE:FALSE;
+ *data++ = (bus->pktgen_maxlen >> 0);
+ *data++ = (bus->pktgen_maxlen >> 8);
+ *data++ = (uint8)(count >> 0);
+ *data++ = (uint8)(count >> 8);
+ *data++ = (uint8)(count >> 16);
+ *data++ = (uint8)(count >> 24);
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK)
+ bus->pktgen_fail++;
+}
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+ osl_t *osh = bus->dhd->osh;
+ uint8 *data;
+ uint pktlen;
+
+ uint8 cmd;
+ uint8 extra;
+ uint16 len;
+ uint16 offset;
+
+ /* Check for min length */
+ if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+
+ /* Extract header fields */
+ data = PKTDATA(osh, pkt);
+ cmd = *data++;
+ extra = *data++;
+ len = *data++; len += *data++ << 8;
+ DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+ /* Check length for relevant commands */
+ if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+ if (pktlen != len + SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+ }
+
+ /* Process as per command */
+ switch (cmd) {
+ case SDPCM_TEST_ECHOREQ:
+ /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+ *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+ if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) {
+ bus->pktgen_sent++;
+ } else {
+ bus->pktgen_fail++;
+ PKTFREE(osh, pkt, FALSE);
+ }
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_ECHORSP:
+ if (bus->ext_loop) {
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+ }
+
+ for (offset = 0; offset < len; offset++, data++) {
+ if (*data != SDPCM_TEST_FILL(offset, extra)) {
+ DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+ "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+ offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+ break;
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_DISCARD:
+ {
+ int i = 0;
+ uint8 *prn = data;
+ uint8 testval = extra;
+ for (i = 0; i < len; i++) {
+ if (*prn != testval) {
+ DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+ i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+ prn++; testval++;
+ }
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_BURST:
+ case SDPCM_TEST_SEND:
+ default:
+ DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ break;
+ }
+
+ /* For recv mode, stop at limit (and tell dongle to stop sending) */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcvd_rcvsession++;
+
+ if (bus->pktgen_total &&
+ (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ DHD_ERROR(("Pktgen:rcv test complete!\n"));
+ bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+ dhdsdio_sdtest_set(bus, FALSE);
+ bus->pktgen_rcvd_rcvsession = 0;
+ }
+ }
+ }
+}
+#endif /* SDTEST */
+
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+ int err = 0;
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
+#endif
+ return err;
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
+#endif
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
+#endif
+}
+
+int dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp)
+{
+ int irq_num = 0;
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ irq_num = bcmsdh_get_oob_intr_num(dhdp->bus->sdh);
+#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID */
+ return irq_num;
+}
+
+void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
+{
+#ifdef LINUX
+ bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh);
+#endif
+}
+
+void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub)
+{
+#ifdef LINUX
+ bcmsdh_dev_relax(dhdpub->bus->sdh);
+#endif
+}
+
+bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub)
+{
+ bool enabled = FALSE;
+
+#ifdef LINUX
+ enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh);
+#endif
+ return enabled;
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+ unsigned long flags;
+
+ DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+ bus = dhdp->bus;
+
+ if (bus->dhd->dongle_reset)
+ return FALSE;
+
+ if (bus->dhd->hang_was_sent) {
+ dhd_os_wd_timer(bus->dhd, 0);
+ return FALSE;
+ }
+
+ /* Ignore the timer if simulating bus down */
+ if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+ return FALSE;
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) ||
+ DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+ return FALSE;
+ }
+ DHD_BUS_BUSY_SET_IN_WD(dhdp);
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+ dhd_os_sdlock(bus->dhd);
+
+ /* Poll period: check device if appropriate. */
+ // terence 20160615: remove !SLPAUTO_ENAB(bus) to fix not able to polling if sr supported
+ if (1 && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+ uint32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+#ifdef DEBUG_LOST_INTERRUPTS
+ uint retries = 0;
+ bool hostpending;
+ uint8 devena, devpend;
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ hostpending = bcmsdh_intr_pending(bus->sdh);
+ devena = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTEN, NULL);
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTPEND, NULL);
+
+ R_SDREG(intstatus, &bus->regs->intstatus, retries);
+ intstatus &= bus->hostintmask;
+
+ if (intstatus && !hostpending) {
+ DHD_ERROR(("%s: !hpend: ena 0x%02x pend 0x%02x intstatus 0x%08x\n",
+ __FUNCTION__, devena, devpend, intstatus));
+ }
+#endif /* DEBUG_LOST_INTERRUPTS */
+
+#ifndef BCMSPI
+ /* XXX Needs to be fixed for polling operation (in CE) */
+ if (!bus->dpc_sched) {
+ uint8 devpend;
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+ SDIOD_CCCR_INTPEND, NULL);
+ intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+ }
+#else
+ if (!bus->dpc_sched) {
+ uint32 devpend;
+ devpend = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0,
+ SPID_STATUS_REG, NULL);
+ intstatus = devpend & STATUS_F2_PKT_AVAILABLE;
+ }
+#endif /* !BCMSPI */
+
+ /* If there is something, make like the ISR and schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = TRUE;
+ if (bus->intr) {
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+
+ if ((!bus->dpc_sched) && pktq_n_pkts_tot(&bus->txq)) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+
+#ifdef DHD_DEBUG
+ /* Poll for console output periodically */
+ if (dhdp->busstate == DHD_BUS_DATA && dhdp->dhd_console_ms != 0) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhdp->dhd_console_ms) {
+ bus->console.count -= dhdp->dhd_console_ms;
+ /* Make sure backplane clock is on */
+ if (SLPAUTO_ENAB(bus))
+ dhdsdio_bussleep(bus, FALSE);
+ else
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (dhdsdio_readconsole(bus) < 0)
+ dhdp->dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+ /* Generate packets if configured */
+ if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+ /* Make sure backplane clock is on */
+ if (SLPAUTO_ENAB(bus))
+ dhdsdio_bussleep(bus, FALSE);
+ else
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ bus->pktgen_tick = 0;
+ dhdsdio_pktgen(bus);
+ }
+#endif
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+#ifdef DHD_USE_IDLECOUNT
+ if (bus->activity)
+ bus->activity = FALSE;
+ else {
+ bus->idlecount++;
+
+ /*
+ * If the condition to switch off the clock is reached And if
+ * BT is inactive (in case of BT_OVER_SDIO build) turn off clk.
+ *
+ * Consider the following case, DHD is configured with
+ * 1) idletime == DHD_IDLE_IMMEDIATE
+ * 2) BT is the last user of the clock
+ * We cannot disable the clock from __dhdsdio_clk_disable
+ * since WLAN might be using it. If WLAN is active then
+ * from the respective function/context after doing the job
+ * the clk is turned off.
+ * But if WLAN is actually inactive then the watchdog should
+ * disable the clock. So the condition check below should be
+ * bus->idletime != 0 instead of idletime == 0
+ */
+ if ((bus->idletime != 0) && (bus->idlecount >= bus->idletime) &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
+ if (!bus->poll && SLPAUTO_ENAB(bus)) {
+ if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
+ dhd_os_wd_timer(bus->dhd, 0);
+ } else
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+ bus->idlecount = 0;
+ }
+ }
+#else
+ if ((bus->idletime != 0) && (bus->clkstate == CLK_AVAIL) &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = FALSE;
+#if !defined(OEM_ANDROID) && !defined(NDIS)
+/* XXX
+ * For Android turn off clocks as soon as possible, to improve power
+ * efficiency. For non-android, extend clock-active period for voice
+ * quality reasons (see PR84690/Jira:SWWLAN-7650).
+ */
+ } else {
+#endif /* !defined(OEM_ANDROID) && !defined(NDIS) */
+ if (!bus->poll && SLPAUTO_ENAB(bus)) {
+ if (!bus->readframes)
+ dhdsdio_bussleep(bus, TRUE);
+ else
+ bus->reqbussleep = TRUE;
+ } else {
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ }
+ }
+ }
+#endif /* DHD_USE_IDLECOUNT */
+
+ dhd_os_sdunlock(bus->dhd);
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_WD(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+ return bus->ipend;
+}
+
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 addr, val;
+ int rv;
+ void *pkt;
+
+#ifndef CONSOLE_DPC
+ /* Exclusive bus access */
+ dhd_os_sdlock(bus->dhd);
+#endif
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0) {
+ rv = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ rv = BCME_NOTREADY;
+ goto exit;
+ }
+
+#ifndef CONSOLE_DPC
+ if (!DATAOK(bus)) {
+ DHD_CTL(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d, pktq_len %d\n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq)));
+ rv = BCME_NOTREADY;
+ goto exit;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ BUS_WAKE(bus);
+ /* No pend allowed since txpkt is called later, ht clk has to be on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+#endif
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+ /* handle difference in definition of hnd_log_t in certain branches */
+ if (dhdp->wlc_ver_major < 14) {
+ addr -= sizeof(uint32);
+ }
+ val = htol32(0);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+ /* handle difference in definition of hnd_log_t in certain branches */
+ if (dhdp->wlc_ver_major < 14) {
+ addr -= sizeof(uint32);
+ }
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Bump dongle by sending an empty packet on the event channel.
+ * sdpcm_sendup (RX) checks for virtual console input.
+ */
+ if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+ rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
+
+done:
+#ifndef CONSOLE_DPC
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+ NO_OTHER_ACTIVE_BUS_USER(bus)) {
+ bus->activity = FALSE;
+ dhdsdio_bussleep(bus, TRUE);
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+#endif
+
+exit:
+#ifdef CONSOLE_DPC
+ memset(bus->cons_cmd, 0, sizeof(bus->cons_cmd));
+#else
+ dhd_os_sdunlock(bus->dhd);
+#endif
+ return rv;
+}
+
+#ifdef CONSOLE_DPC
+extern int
+dhd_bus_txcons(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int ret = BCME_OK;
+
+ dhd_os_sdlock(bus->dhd);
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0) {
+ ret = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+
+ if (msglen >= sizeof(bus->cons_cmd)) {
+ DHD_ERROR(("%s: \"%s\"(%d) too long\n", __FUNCTION__, msg, msglen));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ if (!strlen(bus->cons_cmd)) {
+ strncpy(bus->cons_cmd, msg, sizeof(bus->cons_cmd));
+ DHD_CTL(("%s: \"%s\" delay send, tx_max %d, tx_seq %d, pktq_len %d\n",
+ __FUNCTION__, bus->cons_cmd, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq)));
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+ DHD_CTL(("%s: \"%s\" is pending, tx_max %d, tx_seq %d, pktq_len %d\n",
+ __FUNCTION__, bus->cons_cmd, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq)));
+ ret = BCME_NOTREADY;
+ }
+
+exit:
+ dhd_os_sdunlock(bus->dhd);
+
+ return ret;
+}
+#endif
+
+#if defined(DHD_DEBUG) && !defined(BCMSDIOLITE)
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+ uint byte, tag, tdata;
+ DHD_INFO(("Function %d CIS:\n", fn));
+
+ for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+ if ((byte % 16) == 0)
+ DHD_INFO((" "));
+ DHD_INFO(("%02x ", cis[byte]));
+ if ((byte % 16) == 15)
+ DHD_INFO(("\n"));
+ if (!tdata--) {
+ tag = cis[byte];
+ if (tag == 0xff)
+ break;
+ else if (!tag)
+ tdata = 0;
+ else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+ tdata = cis[byte + 1] + 1;
+ else
+ DHD_INFO(("]"));
+ }
+ }
+ if ((byte % 16) != 15)
+ DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+ if (chipid == BCM4330_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43362_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43340_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43341_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4334_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4324_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4335_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4339_CHIP_ID)
+ return TRUE;
+ if (BCM4345_CHIP(chipid))
+ return TRUE;
+ if (chipid == BCM4350_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4354_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4358_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43569_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4371_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43430_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43018_CHIP_ID)
+ return TRUE;
+ if (BCM4349_CHIP(chipid))
+ return TRUE;
+#ifdef UNRELEASEDCHIP
+ if ((chipid == BCM4347_CHIP_ID) ||
+ (chipid == BCM4357_CHIP_ID) ||
+ (chipid == BCM4361_CHIP_ID))
+ return TRUE;
+#endif
+ if (chipid == BCM4364_CHIP_ID)
+ return TRUE;
+
+ if (chipid == BCM43012_CHIP_ID)
+ return TRUE;
+
+ if (chipid == BCM43014_CHIP_ID)
+ return TRUE;
+
+ if (chipid == BCM43013_CHIP_ID)
+ return TRUE;
+
+ if (chipid == BCM4369_CHIP_ID)
+ return TRUE;
+
+ if (BCM4378_CHIP(chipid)) {
+ return TRUE;
+ }
+
+ if (chipid == BCM4362_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43751_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43752_CHIP_ID)
+ return TRUE;
+
+ return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+ uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+ int ret;
+ dhd_bus_t *bus;
+
+ DHD_MUTEX_LOCK();
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ dhd_txbound = DHD_TXBOUND;
+ dhd_rxbound = DHD_RXBOUND;
+#ifdef BCMSPI
+ dhd_alignctl = FALSE;
+#else
+ dhd_alignctl = TRUE;
+#endif /* BCMSPI */
+ sd1idle = TRUE;
+ dhd_readahead = TRUE;
+ retrydata = FALSE;
+
+#ifdef DISABLE_FLOW_CONTROL
+ dhd_doflow = FALSE;
+#endif /* DISABLE_FLOW_CONTROL */
+ dhd_dongle_ramsize = 0;
+ dhd_txminmax = DHD_TXMINMAX;
+
+#ifdef BCMSPI
+ forcealign = FALSE;
+#else
+ forcealign = TRUE;
+#endif /* !BCMSPI */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+ /* We make assumptions about address window mappings */
+ ASSERT((uintptr)regsva == si_enum_base(devid));
+
+ /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+ * means early parse could fail, so here we should get either an ID
+ * we recognize OR (-1) indicating we must request power first.
+ */
+ /* Check the Vendor ID */
+ switch (venid) {
+ case 0x0000:
+ case VENDOR_BROADCOM:
+ break;
+ default:
+ DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+ __FUNCTION__, venid));
+ goto forcereturn;
+ }
+
+ /* Check the Device ID and make sure it's one that we support */
+ switch (devid) {
+#ifdef BCMINTERNAL
+ case SDIOD_FPGA_ID:
+ DHD_INFO(("%s: found FPGA Dongle\n", __FUNCTION__));
+ break;
+#endif /* BCMINTERNAL */
+ case 0:
+ DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+ __FUNCTION__));
+ break;
+
+ default:
+ DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+ __FUNCTION__, venid, devid));
+ goto forcereturn;
+ }
+
+ if (osh == NULL) {
+ DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
+ goto forcereturn;
+ }
+
+ /* Allocate private bus interface state */
+ if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ goto fail;
+ }
+ bzero(bus, sizeof(dhd_bus_t));
+ bus->sdh = sdh;
+ bus->cl_devid = (uint16)devid;
+ bus->bus = DHD_BUS;
+ bus->bus_num = bus_no;
+ bus->slot_num = slot;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+#ifdef BT_OVER_SDIO
+ bus->bt_use_count = 0;
+#endif
+
+#if defined(LINUX) && defined(SUPPORT_P2P_GO_PS)
+ init_waitqueue_head(&bus->bus_sleep);
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+ init_waitqueue_head(&bus->ctrl_tx_wait);
+
+ /* attempt to attach to the dongle */
+ if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+ DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Attach to the dhd/OS/network interface */
+ if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+#if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME)
+ dhd_conf_get_otp(bus->dhd, sdh, bus->sih);
+#endif
+
+ /* Allocate buffers */
+ if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+ bcmsdh_intr_disable(sdh); /* XXX New API: bcmsdh_intr_mask()? */
+ if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+ DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+ DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+ __FUNCTION__));
+ }
+
+ DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+ /* if firmware path present try to download and bring up bus */
+ bus->dhd->hang_report = TRUE;
+#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd
+#if defined(LINUX) || defined(linux)
+ if (dhd_download_fw_on_driverload) {
+#endif /* LINUX || linux */
+ if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+ DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+#if !defined(OEM_ANDROID)
+ if (ret == BCME_NOTUP)
+#endif /* !OEM_ANDROID */
+ goto fail;
+ }
+#if defined(LINUX) || defined(linux)
+ }
+ else {
+ /* Set random MAC address during boot time */
+ get_random_bytes(&bus->dhd->mac.octet[3], 3);
+ /* Adding BRCM OUI */
+ bus->dhd->mac.octet[0] = 0;
+ bus->dhd->mac.octet[1] = 0x90;
+ bus->dhd->mac.octet[2] = 0x4C;
+ }
+#endif /* LINUX || linux */
+#endif
+#if defined(BT_OVER_SDIO)
+ /* At this point Regulators are turned on and iconditionaly sdio bus is started
+ * based upon dhd_download_fw_on_driverload check, so
+ * increase the bus user count, this count will only be disabled inside
+ * dhd_register_if() function if flag dhd_download_fw_on_driverload is set to false,
+ * i.e FW download during insmod is not needed, otherwise it will not be decremented
+ * so that WALN will always hold the bus untill rmmod is done.
+ */
+ dhdsdio_bus_usr_cnt_inc(bus->dhd);
+#endif /* BT_OVER_SDIO */
+
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (dhd_attach_net(bus->dhd, TRUE) != 0) {
+ DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef BCMHOST_XTAL_PU_TIME_MOD
+ bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11);
+ bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001);
+#endif /* BCMHOST_XTAL_PU_TIME_MOD */
+
+#if defined(MULTIPLE_SUPPLICANT)
+ wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif /* MULTIPLE_SUPPLICANT */
+ DHD_MUTEX_UNLOCK();
+
+ return bus;
+
+fail:
+ dhdsdio_release(bus, osh);
+
+forcereturn:
+ DHD_MUTEX_UNLOCK();
+
+ return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+ uint16 devid)
+{
+#ifndef BCMSPI
+ int err = 0;
+ uint8 clkctl = 0;
+#endif /* !BCMSPI */
+
+ bus->alp_only = TRUE;
+ bus->sih = NULL;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, si_enum_base(devid))) {
+ DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+ }
+
+#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG)
+ DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+ bcmsdh_reg_read(bus->sdh, si_enum_base(devid), 4)));
+#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */
+
+#ifndef BCMSPI /* wake-wlan in gSPI will bring up the htavail/alpavail clocks. */
+
+ /* Force PLL off until si_attach() programs PLL control regs */
+
+ /* XXX Ideally should not access F1 power control regs before
+ * reading CIS and confirming device. But strapping option for
+ * low-power start requires turning on ALP before reading CIS,
+ * and at some point bcmsdh should read the CIS for the ID and
+ * not even tell us if it's some other device. At this point
+ * (see above) we should know it's us (powered on) or can't read
+ * CIS so we need to power on and try.
+ */
+
+ /* WAR for PR 39902: must force HT off until PLL programmed. */
+ /* WAR for PR43618, PR44891: don't do ALPReq until ALPAvail set */
+
+ /* XXX Replace write/read sequence with single bcmsdh_cfg_raw() call */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+ DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, DHD_INIT_CLKCTL1, clkctl));
+ goto fail;
+ }
+
+#endif /* !BCMSPI */
+
+#ifdef DHD_DEBUG
+ if (DHD_INFO_ON()) {
+ uint fn, numfn;
+ uint8 *cis = NULL;
+ int local_err = 0;
+
+#ifndef BCMSPI
+ numfn = bcmsdh_query_iofnum(sdh);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ /* Make sure ALP is available before trying to read CIS */
+ SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+ /* Now request ALP be put on the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ DHD_INIT_CLKCTL2, &local_err);
+ /* XXX Account for possible delay between ALP available and on active */
+ OSL_DELAY(65);
+#else
+ numfn = 0; /* internally func is hardcoded to 1 as gSPI has cis on F1 only */
+#endif /* !BCMSPI */
+#ifndef BCMSDIOLITE
+ if (!(cis = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: cis malloc failed\n"));
+ goto fail;
+ }
+
+ for (fn = 0; fn <= numfn; fn++) {
+ bzero(cis, SBSDIO_CIS_SIZE_LIMIT);
+ if ((err = bcmsdh_cis_read(sdh, fn, cis,
+ SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n",
+ fn, err));
+ break;
+ }
+ dhd_dump_cis(fn, cis);
+ }
+ MFREE(osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+#else
+ BCM_REFERENCE(cis);
+ BCM_REFERENCE(fn);
+#endif /* DHD_DEBUG */
+ if (local_err) {
+ DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+ goto fail;
+ }
+ }
+#endif /* DHD_DEBUG */
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+#ifdef DHD_DEBUG
+ DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
+ bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
+#endif /* DHD_DEBUG */
+
+ /* XXX Let the layers below dhd know the chipid and chiprev for
+ * controlling sw WARs for hw PRs
+ */
+ bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+ if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+ DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+ __FUNCTION__, bus->sih->chip));
+#ifdef BCMINTERNAL
+ if (dhd_anychip)
+ DHD_ERROR(("Continuing anyway...\n"));
+ else
+#endif /* BCMINTERNAL */
+ goto fail;
+ }
+
+ if (bus->sih->buscorerev >= 12)
+ dhdsdio_clk_kso_init(bus);
+ else
+ bus->kso = TRUE;
+
+ si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+#ifdef BCMINTERNAL
+ /* Check if there is a PMU in the chip. The FPGA does not have a PMU. */
+ if (!(bus->sih->cccaps & CC_CAP_PMU)) {
+ DHD_NOPMU(bus) = 1;
+ }
+#endif /* BCMINTERNAL */
+
+ /* Get info on the ARM and SOCRAM cores... */
+ /* XXX Should really be qualified by device id */
+ if (!DHD_NOPMU(bus)) {
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ } else {
+ /* cr4 has a different way to find the RAM size from TCM's */
+ if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ /* also populate base address */
+ switch ((uint16)bus->sih->chip) {
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ bus->dongle_ram_base = CR4_4335_RAM_BASE;
+ break;
+ case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM4358_CHIP_ID:
+ case BCM43569_CHIP_ID:
+ case BCM4371_CHIP_ID:
+ bus->dongle_ram_base = CR4_4350_RAM_BASE;
+ break;
+ case BCM4360_CHIP_ID:
+ bus->dongle_ram_base = CR4_4360_RAM_BASE;
+ break;
+ CASE_BCM4345_CHIP:
+ bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */
+ ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
+ break;
+ case BCM4349_CHIP_GRPID:
+ /* RAM based changed from 4349c0(revid=9) onwards */
+ bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
+ CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9);
+ break;
+ case BCM4364_CHIP_ID:
+ bus->dongle_ram_base = CR4_4364_RAM_BASE;
+ break;
+#ifdef UNRELEASEDCHIP
+ case BCM4347_CHIP_ID:
+ case BCM4357_CHIP_ID:
+ case BCM4361_CHIP_ID:
+ bus->dongle_ram_base = CR4_4347_RAM_BASE;
+ break;
+#endif
+ case BCM4362_CHIP_ID:
+ bus->dongle_ram_base = CR4_4362_RAM_BASE;
+ break;
+ case BCM43751_CHIP_ID:
+ bus->dongle_ram_base = CR4_43751_RAM_BASE;
+ break;
+ case BCM43752_CHIP_ID:
+ bus->dongle_ram_base = CR4_43752_RAM_BASE;
+ break;
+ case BCM4369_CHIP_ID:
+ bus->dongle_ram_base = CR4_4369_RAM_BASE;
+ break;
+ case BCM4378_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4378_RAM_BASE;
+ break;
+ default:
+ bus->dongle_ram_base = 0;
+ DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+ __FUNCTION__, bus->dongle_ram_base));
+ }
+ }
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_ramsize)
+ dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+ bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+ bus->srmemsize = si_socram_srmem_size(bus->sih);
+ }
+
+ /* ...but normally deal with the SDPCMDEV core */
+#ifdef BCMSDIOLITE
+ if (!(bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0))) {
+ DHD_ERROR(("%s: failed to find Chip Common core!\n", __FUNCTION__));
+ goto fail;
+ }
+#else
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+ !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+ DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+ goto fail;
+ }
+#endif
+ bus->sdpcmrev = si_corerev(bus->sih);
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+#ifndef BCMSPI
+ bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1))
+ {
+ uint32 val;
+
+ val = R_REG(osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(osh, &bus->regs->corecontrol, val);
+ }
+#endif /* BCMSPI */
+
+ /* XXX Tx needs priority queue, where to determine levels? */
+ /* XXX Should it try to do WLC mapping, or just pass through? */
+ pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+#ifndef DYNAMIC_MAX_HDR_READ
+ bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+#endif
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+ if ((bus->poll = (bool)dhd_poll))
+ bus->pollrate = 1;
+
+ /* Setting default Glom size */
+ bus->txglomsize = SDPCM_DEFGLOM_SIZE;
+
+ return TRUE;
+
+fail:
+ if (bus->sih != NULL) {
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ }
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->maxctl) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+ __FUNCTION__, bus->rxblen));
+ goto fail;
+ }
+ }
+ /* Allocate buffer to receive glomed packet */
+ if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+ __FUNCTION__, MAX_DATA_BUF));
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+ goto fail;
+ }
+ /* Allocate buffer to membuf */
+ bus->membuf = MALLOC(osh, MAX_MEM_BUF);
+ if (bus->membuf == NULL) {
+ DHD_ERROR(("%s: MALLOC of %d-byte membuf failed\n",
+ __FUNCTION__, MAX_MEM_BUF));
+ if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+ bus->databuf = NULL;
+ }
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+ goto fail;
+ }
+ memset(bus->membuf, 0, MAX_MEM_BUF);
+
+ /* Align the buffer */
+ if ((uintptr)bus->databuf % DHD_SDALIGN)
+ bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return TRUE;
+
+fail:
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ int32 fnum;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bus->_srenab = FALSE;
+
+#ifdef SDTEST
+ dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+#ifndef BCMSPI
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ /* XXX New API: change to bcmsdh_fn_set(sdh, SDIO_FUNC_2, FALSE); */
+ /* XXX Might write SRES instead, or reset ARM (download prep)? */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+#endif /* !BCMSPI */
+
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->sleeping = FALSE;
+ bus->rxflow = FALSE;
+ bus->prev_rxlim_hit = 0;
+
+#ifndef BCMSPI
+ /* Done with backplane-dependent accesses, can drop clock... */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+#endif /* !BCMSPI */
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = (int32)dhd_idletime;
+ bus->idleclock = DHD_IDLE_ACTIVE;
+
+ /* Query the SD clock speed */
+ if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+ bus->sd_divisor = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_divisor", bus->sd_divisor));
+ }
+
+ /* Query the SD bus mode */
+ if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+ bus->sd_mode = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_mode", bus->sd_mode));
+ }
+
+ /* Query the F2 block size, set roundup accordingly */
+ fnum = 2;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+ dhdsdio_tune_fifoparam(bus);
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+#ifdef DHDENABLE_TAILPAD
+ if (bus->pad_pkt)
+ PKTFREE(osh, bus->pad_pkt, FALSE);
+ bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE);
+ if (bus->pad_pkt == NULL)
+ DHD_ERROR(("failed to allocate padding packet\n"));
+ else {
+ int alignment_offset = 0;
+ uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt);
+ if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN)))
+ PKTPUSH(osh, bus->pad_pkt, alignment_offset);
+ PKTSETNEXT(osh, bus->pad_pkt, NULL);
+ }
+#endif /* DHDENABLE_TAILPAD */
+
+ /* Query if bus module supports packet chaining, default to use if supported */
+ if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+ &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_rxchain = FALSE;
+ } else {
+ DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+ __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+ }
+ bus->use_rxchain = (bool)bus->sd_rxchain;
+ bus->txinrx_thres = CUSTOM_TXINRX_THRES;
+ /* TX first in dhdsdio_readframes() */
+ bus->dotxinrx = TRUE;
+
+#ifdef PKT_STATICS
+ dhd_bus_clear_txpktstatics(bus->dhd);
+#endif
+
+ return TRUE;
+}
+
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *pfw_path, char *pnv_path,
+ char *pclm_path, char *pconf_path)
+{
+ int ret;
+
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+ bus->dhd->clm_path = pclm_path;
+ bus->dhd->conf_path = pconf_path;
+
+ ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+ return ret;
+}
+
+int
+dhd_set_bus_params(struct dhd_bus *bus)
+{
+ int ret = 0;
+
+ if (bus->dhd->conf->dhd_poll >= 0) {
+ bus->poll = bus->dhd->conf->dhd_poll;
+ if (!bus->pollrate)
+ bus->pollrate = 1;
+ printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
+ }
+ if (bus->dhd->conf->use_rxchain >= 0) {
+ bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain;
+ }
+ if (bus->dhd->conf->txinrx_thres >= 0) {
+ bus->txinrx_thres = bus->dhd->conf->txinrx_thres;
+ }
+ if (bus->dhd->conf->txglomsize >= 0) {
+ bus->txglomsize = bus->dhd->conf->txglomsize;
+ }
+#ifdef MINIME
+ if (bus->dhd->conf->fw_type == FW_TYPE_MINIME) {
+ bus->ramsize = bus->dhd->conf->ramsize;
+ printf("%s: set ramsize 0x%x\n", __FUNCTION__, bus->ramsize);
+ }
+#endif
+#ifdef DYNAMIC_MAX_HDR_READ
+ if (bus->dhd->conf->max_hdr_read <= 0) {
+ bus->dhd->conf->max_hdr_read = MAX_HDR_READ;
+ }
+ if (bus->hdrbufp) {
+ MFREE(bus->dhd->osh, bus->hdrbufp, bus->dhd->conf->max_hdr_read + DHD_SDALIGN);
+ }
+ bus->hdrbufp = MALLOC(bus->dhd->osh, bus->dhd->conf->max_hdr_read + DHD_SDALIGN);
+ if (bus->hdrbufp == NULL) {
+ DHD_ERROR(("%s: MALLOC of %d-byte hdrbufp failed\n",
+ __FUNCTION__, bus->dhd->conf->max_hdr_read + DHD_SDALIGN));
+ ret = -1;
+ goto exit;
+ }
+ bus->rxhdr = (uint8 *)ROUNDUP((uintptr)bus->hdrbufp, DHD_SDALIGN);
+
+exit:
+#endif
+ return ret;
+}
+
+static int
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+ int ret;
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+ if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
+ DHD_ERROR(("%s: fail to concatnate revison \n",
+ __FUNCTION__));
+ return BCME_BADARG;
+ }
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ dhd_set_blob_support(bus->dhd, bus->fw_path);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+ DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+ __FUNCTION__, bus->fw_path, bus->nv_path));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+
+ dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
+ ret = dhd_set_bus_params(bus);
+ if (ret) {
+ goto exit;
+ }
+
+ /* Download the firmware */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ ret = _dhdsdio_download_firmware(bus);
+
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+exit:
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+ bool dongle_isolation = FALSE;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(osh);
+
+ if (bus->dhd) {
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
+ debugger_close();
+#endif /* DEBUGGER || DHD_DSCOPE */
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhd_detach(bus->dhd);
+ }
+
+ /* De-register interrupt handler */
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_intr_dereg(bus->sdh);
+
+ if (bus->dhd) {
+ dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_free(bus->dhd);
+ bus->dhd = NULL;
+ }
+
+ dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+ if (bus->console.buf != NULL)
+ MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+#ifdef DHDENABLE_TAILPAD
+ if (bus->pad_pkt)
+ PKTFREE(osh, bus->pad_pkt, FALSE);
+#endif /* DHDENABLE_TAILPAD */
+#ifdef DYNAMIC_MAX_HDR_READ
+ if (bus->hdrbufp) {
+ MFREE(osh, bus->hdrbufp, MAX_HDR_READ + DHD_SDALIGN);
+ }
+#endif
+
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+ bus->rxctl = NULL;
+ bus->rxlen = 0;
+ }
+
+ if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+ }
+
+ if (bus->membuf) {
+ MFREE(osh, bus->membuf, MAX_MEM_BUF);
+ bus->membuf = NULL;
+ }
+
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ }
+
+}
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+ DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+ bus->dhd, bus->dhd->dongle_reset));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+ return;
+
+ if (bus->sih) {
+ /* In Win10, system will be BSOD if using "sysprep" to do OS image */
+ /* Skip this will not cause the BSOD. */
+#if !defined(BCMLXSDMMC) && !defined(NDIS)
+ /* XXX - Using the watchdog to reset the chip does not allow
+ * further SDIO communication. For the SDMMC Driver, this
+ * causes interrupt to not be de-registered properly.
+ */
+ /* XXX: dongle isolation mode is on don't reset the chip */
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+ if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+ si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ if (bus->vars && bus->varsz)
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_MUTEX_LOCK();
+ if (bus) {
+ ASSERT(bus->dhd);
+ /* Advertise bus remove during rmmod */
+ dhdsdio_advertise_bus_remove(bus->dhd);
+ dhdsdio_release(bus, bus->dhd->osh);
+ }
+ DHD_MUTEX_UNLOCK();
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static int
+dhdsdio_suspend(void *context)
+{
+ int ret = 0;
+#ifdef SUPPORT_P2P_GO_PS
+ int wait_time = 0;
+#endif /* SUPPORT_P2P_GO_PS */
+
+#if defined(LINUX)
+ dhd_bus_t *bus = (dhd_bus_t*)context;
+ unsigned long flags;
+
+ DHD_ERROR(("%s Enter\n", __FUNCTION__));
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("bus not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
+
+ if (bus->dhd->up == FALSE) {
+ return BCME_OK;
+ }
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
+ DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+ return BCME_ERROR;
+ }
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+ if (bus->dhd->dongle_reset) {
+ DHD_ERROR(("Dongle is in reset state.\n"));
+ return -EIO;
+ }
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ /* stop all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+ bus->dhd->busstate = DHD_BUS_SUSPEND;
+#if defined(LINUX) || defined(linux)
+ if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
+ DHD_ERROR(("Tx Request is not ended\n"));
+ bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+ return -EBUSY;
+ }
+#endif /* LINUX || linux */
+ DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef SUPPORT_P2P_GO_PS
+ if (bus->idletime > 0) {
+ wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
+ }
+#endif /* SUPPORT_P2P_GO_PS */
+ ret = dhd_os_check_wakelock(bus->dhd);
+#ifdef SUPPORT_P2P_GO_PS
+ // terence 20141124: fix for suspend issue
+ if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) {
+ if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
+ if (!bus->sleeping) {
+ ret = 1;
+ }
+ }
+ }
+#endif /* SUPPORT_P2P_GO_PS */
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ if (ret) {
+ bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+ }
+ DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+#endif /* LINUX */
+ return ret;
+}
+
+static int
+dhdsdio_resume(void *context)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)context;
+ ulong flags;
+
+ DHD_ERROR(("%s Enter\n", __FUNCTION__));
+
+ if (bus->dhd->up == FALSE) {
+ return BCME_OK;
+ }
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ if (dhd_os_check_if_up(bus->dhd))
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+ bus->dhd->busstate = DHD_BUS_DATA;
+ dhd_os_busbusy_wake(bus->dhd);
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return 0;
+}
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+ dhdsdio_probe,
+ dhdsdio_disconnect,
+ dhdsdio_suspend,
+ dhdsdio_resume
+};
+
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+ return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+ bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *ularray = NULL;
+
+ DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+ /* Download image */
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)dlarray));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+ /* Upload image to verify downloaded contents. */
+ offset = 0;
+ memset(ularray, 0xaa, bus->ramsize);
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+ ularray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+ if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+ goto err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+ }
+#endif /* DHD_DEBUG */
+
+err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, bus->ramsize);
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ int len;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+#ifdef CHECK_DOWNLOAD_FW
+ uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
+#endif
+ uint memblock_size = MEMBLOCK;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ unsigned long initial_jiffies = 0;
+ uint firmware_sz = 0;
+#endif
+
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+ /* XXX: Should succeed in opening image if it is actually given through registry
+ * entry or in module param.
+ */
+ image = dhd_os_open_image1(bus->dhd, pfw_path);
+ if (image == NULL) {
+ printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
+ goto err;
+ }
+
+ /* Update the dongle image download block size depending on the F1 block size */
+#ifndef NDIS
+ if (sd_f1_blocksize == 512)
+ memblock_size = MAX_MEMBLOCK;
+#endif /* !NDIS */
+
+ memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+ memblock_size));
+ goto err;
+ }
+#ifdef CHECK_DOWNLOAD_FW
+ if (bus->dhd->conf->fwchk) {
+ memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memptr_tmp == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ }
+#endif
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ initial_jiffies = jiffies;
+#endif
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) {
+ // terence 20150412: fix for firmware failed to download
+ if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
+ bus->dhd->conf->chip == BCM43341_CHIP_ID) {
+ if (len % 64 != 0) {
+ memset(memptr+len, 0, len%64);
+ len += (64 - len%64);
+ }
+ }
+ if (len < 0) {
+ DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)memptr));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, memblock_size, offset));
+ goto err;
+ }
+
+#ifdef CHECK_DOWNLOAD_FW
+ if (bus->dhd->conf->fwchk) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, memptr_tmp, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+ if (memcmp(memptr_tmp, memptr, len)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted at 0x%08x\n", __FUNCTION__, offset));
+ bcmerror = BCME_ERROR;
+ goto err;
+ } else
+ DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+ }
+#endif
+
+ offset += memblock_size;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ firmware_sz += len;
+#endif
+ }
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ DHD_ERROR(("Firmware download time for %u bytes: %u ms\n",
+ firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies)));
+#endif
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN);
+#ifdef CHECK_DOWNLOAD_FW
+ if (bus->dhd->conf->fwchk) {
+ if (memptr_tmp)
+ MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
+ }
+#endif
+
+ if (image)
+ dhd_os_close_image1(bus->dhd, image);
+
+ return bcmerror;
+}
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Currently supported only for the chips in which ucode RAM is AXI addressable */
+static uint32
+dhdsdio_ucode_base(struct dhd_bus *bus)
+{
+ uint32 ucode_base = 0;
+
+ switch ((uint16)bus->sih->chip) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ ucode_base = 0xE8020000;
+ break;
+ default:
+ DHD_ERROR(("%s: Unsupported!\n", __func__));
+ break;
+ }
+
+ return ucode_base;
+}
+
+static int
+dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ int len;
+ uint32 ucode_base;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+ uint memblock_size = MEMBLOCK;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ unsigned long initial_jiffies = 0;
+ uint firmware_sz = 0;
+#endif
+
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, ucode_path));
+
+ ucode_base = dhdsdio_ucode_base(bus);
+
+ /* XXX: Should succeed in opening image if it is actually given through registry
+ * entry or in module param.
+ */
+ image = dhd_os_open_image1(bus->dhd, ucode_path);
+ if (image == NULL)
+ goto err;
+
+ /* Update the dongle image download block size depending on the F1 block size */
+ if (sd_f1_blocksize == 512)
+ memblock_size = MAX_MEMBLOCK;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+ memblock_size));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ initial_jiffies = jiffies;
+#endif
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) {
+ if (len < 0) {
+ DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+
+ bcmerror = dhdsdio_membytes(bus, TRUE, (ucode_base + offset), memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, memblock_size, offset));
+ goto err;
+ }
+
+ offset += memblock_size;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ firmware_sz += len;
+#endif
+ }
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+ DHD_ERROR(("ucode download time for %u bytes: %u ms\n",
+ firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies)));
+#endif
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN);
+
+ if (image)
+ dhd_os_close_image1(bus->dhd, image);
+
+ return bcmerror;
+} /* dhdsdio_download_ucode_file */
+
+void
+dhd_bus_ucode_download(struct dhd_bus *bus)
+{
+ uint32 shaddr = 0, shdata = 0;
+
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+ dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&shdata, 4);
+
+ DHD_TRACE(("%s: shdata:[0x%08x :0x%08x]\n", __func__, shaddr, shdata));
+
+ if (shdata == UCODE_DOWNLOAD_REQUEST)
+ {
+ DHD_ERROR(("%s: Received ucode download request!\n", __func__));
+
+ /* Download the ucode */
+ if (!dhd_get_ucode_path(bus->dhd)) {
+ DHD_ERROR(("%s: bus->uc_path not set!\n", __func__));
+ return;
+ }
+ dhdsdio_download_ucode_file(bus, dhd_get_ucode_path(bus->dhd));
+
+ DHD_ERROR(("%s: Ucode downloaded successfully!\n", __func__));
+
+ shdata = UCODE_DOWNLOAD_COMPLETE;
+ dhdsdio_membytes(bus, TRUE, shaddr, (uint8 *)&shdata, 4);
+ }
+}
+
+#endif /* DHD_UCODE_DOWNLOAD */
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ uint len;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp;
+ char *pnv_path;
+ bool nvram_file_exists;
+
+ pnv_path = bus->nv_path;
+
+ nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+
+ /* For Get nvram from UEFI */
+ if (nvram_file_exists) {
+ image = dhd_os_open_image1(bus->dhd, pnv_path);
+ if (image == NULL) {
+ printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
+ goto err;
+ }
+ }
+
+ memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+
+ /* For Get nvram from image or UEFI (when image == NULL ) */
+ len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+
+ if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = process_nvram_vars(bufp, len);
+ if (len % 4) {
+ len += 4 - (len % 4);
+ }
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ } else {
+ DHD_ERROR(("%s: error reading nvram file: %d\n",
+ __FUNCTION__, len));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+ if (image)
+ dhd_os_close_image1(bus->dhd, image);
+
+ return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ return bcmerror;
+#endif
+ }
+
+ /* Keep arm in reset */
+ if (dhdsdio_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+ if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ } else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdsdio_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ } else {
+ dlok = TRUE;
+ }
+ }
+#else
+ BCM_REFERENCE(embed);
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External nvram takes precedence if specified */
+ if (dhdsdio_download_nvram(bus)) {
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* Take arm out of reset */
+ if (dhdsdio_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ int status;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete_fn, handle);
+
+ return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle, int max_retry)
+{
+ int ret;
+ int i = 0;
+ int retries = 0;
+ bcmsdh_info_t *sdh;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ sdh = bus->sdh;
+ do {
+ ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+ pkt, complete_fn, handle);
+
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+ bus->f1regdata++;
+ bus->dhd->tx_errors++;
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ } while ((ret < 0) && retrydata && ++retries < max_retry);
+
+ return ret;
+}
+
+uint8
+dhd_bus_is_ioready(struct dhd_bus *bus)
+{
+ uint8 enable;
+ bcmsdh_info_t *sdh;
+ ASSERT(bus);
+ ASSERT(bus->sih != NULL);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+ sdh = bus->sdh;
+ return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+ ASSERT(bus);
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+ return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+ return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+}
+
+void
+dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val)
+{
+ bus->dotxinrx = val;
+}
+
+/*
+ * dhdsdio_advertise_bus_cleanup advertises that clean up is under progress
+ * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
+ * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
+ * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
+ * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
+ */
+static void
+dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+#ifdef LINUX
+ if ((timeleft == 0) || (timeleft == 1))
+#else
+ if (timeleft == 0)
+#endif
+ {
+ /* XXX This condition ideally should not occur, this means some
+ * bus usage context is not clearing the respective usage bit, print
+ * dhd_bus_busy_state and crash the host for further debugging.
+ */
+ DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ ASSERT(0);
+ }
+
+ return;
+}
+
+static void
+dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+ DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_REMOVE;
+ DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+ if ((timeleft == 0) || (timeleft == 1)) {
+ DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ ASSERT(0);
+ }
+
+ return;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ int bcmerror = 0;
+ dhd_bus_t *bus;
+ unsigned long flags;
+
+ bus = dhdp->bus;
+
+ if (flag == TRUE) {
+ if (!bus->dhd->dongle_reset) {
+ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+ dhdsdio_advertise_bus_cleanup(bus->dhd);
+ dhd_os_sdlock(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+#if defined(OEM_ANDROID)
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Force flow control as protection when stop come before ifconfig_down */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+#endif /* OEM_ANDROID */
+ /* Expect app to have torn down any connection before calling */
+ /* Stop the bus, disable F2 */
+ dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ /* Clean up any pending IRQ */
+ dhd_enable_oob_intr(bus, FALSE);
+ bcmsdh_oob_intr_set(bus->sdh, FALSE);
+ bcmsdh_oob_intr_unregister(bus->sdh);
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+ /* Clean tx/rx buffer pointers, detach from the dongle */
+ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+ bus->dhd->dongle_reset = TRUE;
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
+ bus->dhd->up = FALSE;
+ dhd_txglom_enable(dhdp, FALSE);
+ dhd_os_sdunlock(dhdp);
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+ DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ /* App can now remove power from device */
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else {
+ /* App must have restored power to device before calling */
+
+ printf("%s: == Power ON ==\n", __FUNCTION__);
+
+ if (bus->dhd->dongle_reset) {
+ /* Turn on WLAN */
+ dhd_os_sdlock(dhdp);
+ /* Reset SD client */
+ bcmsdh_reset(bus->sdh);
+
+ /* Attempt to re-attach & download */
+ if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+ (uint32 *)(uintptr)si_enum_base(bus->cl_devid),
+ bus->cl_devid)) {
+
+ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+ /* Attempt to download binary to the dongle */
+ if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+ dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
+
+ /* Re-init bus, enable F2 transfer */
+ bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+ if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+ dhd_enable_oob_intr(bus, TRUE);
+ bcmsdh_oob_intr_register(bus->sdh,
+ dhdsdio_isr, bus);
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#elif defined(FORCE_WOWLAN)
+ dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+
+ bus->dhd->dongle_reset = FALSE;
+ bus->dhd->up = TRUE;
+
+#if defined(OEM_ANDROID) && !defined(IGNORE_ETH0_DOWN)
+ /* Restore flow control */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif /* defined(OEM_ANDROID) && (!defined(IGNORE_ETH0_DOWN)) */
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+ DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+ } else {
+ dhd_bus_stop(bus, FALSE);
+ dhdsdio_release_dongle(bus, bus->dhd->osh,
+ TRUE, FALSE);
+ }
+ } else {
+ DHD_ERROR(("%s Failed to download binary to the dongle\n",
+ __FUNCTION__));
+ if (bus->sih != NULL) {
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ }
+ bcmerror = BCME_SDIO_ERROR;
+ }
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+
+ dhd_os_sdunlock(dhdp);
+ } else {
+ DHD_INFO(("%s called when dongle is not in reset\n",
+ __FUNCTION__));
+#if defined(OEM_ANDROID)
+ DHD_INFO(("Will call dhd_bus_start instead\n"));
+ dhd_bus_resume(dhdp, 1);
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+ dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih); // terence 20120615: fix for OOB initial issue
+#endif
+ if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+ DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+ __FUNCTION__, bcmerror));
+#endif /* defined(OEM_ANDROID) */
+ }
+ }
+
+#ifdef PKT_STATICS
+ dhd_bus_clear_txpktstatics(dhdp);
+#endif
+ return bcmerror;
+}
+
+#if defined(LINUX)
+int dhd_bus_suspend(dhd_pub_t *dhdpub)
+{
+ return bcmsdh_stop(dhdpub->bus->sdh);
+}
+
+int dhd_bus_resume(dhd_pub_t *dhdpub, int stage)
+{
+ return bcmsdh_start(dhdpub->bus->sdh, stage);
+}
+#endif /* defined(LINUX) */
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ if (bus && bus->sih)
+ return bus->sih->chip;
+ else
+ return 0;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ if (bus && bus->sih)
+ return bus->sih->chiprev;
+ else
+ return 0;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ return bus->sih->chippkg;
+}
+
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+ *bus_type = bus->bus;
+ *bus_num = bus->bus_num;
+ *slot_num = bus->slot_num;
+ return 0;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+ return dhdsdio_membytes(bus, set, address, data, size);
+}
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+static int
+concate_revision_bcm4335(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+
+ uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ char chipver_tag[10] = "_4335";
+#else
+ char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+ DHD_TRACE(("%s: BCM4335 Multiple Revision Check\n", __FUNCTION__));
+ if (bus->sih->chip != BCM4335_CHIP_ID) {
+ DHD_ERROR(("%s:Chip is not BCM4335\n", __FUNCTION__));
+ return -1;
+ }
+ chipver = bus->sih->chiprev;
+ DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+ if (chipver == 0x0) {
+ DHD_ERROR(("----- CHIP bcm4335_A0 -----\n"));
+ strcat(chipver_tag, "_a0");
+ } else if (chipver == 0x1) {
+ DHD_ERROR(("----- CHIP bcm4335_B0 -----\n"));
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ strcat(chipver_tag, "_b0");
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+ }
+
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+ return 0;
+}
+
+static int
+concate_revision_bcm4339(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+
+ uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ char chipver_tag[10] = "_4339";
+#else
+ char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+ DHD_TRACE(("%s: BCM4339 Multiple Revision Check\n", __FUNCTION__));
+ if (bus->sih->chip != BCM4339_CHIP_ID) {
+ DHD_ERROR(("%s:Chip is not BCM4339\n", __FUNCTION__));
+ return -1;
+ }
+ chipver = bus->sih->chiprev;
+ DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+ if (chipver == 0x1) {
+ DHD_ERROR(("----- CHIP bcm4339_A0 -----\n"));
+ strcat(chipver_tag, "_a0");
+ } else {
+ DHD_ERROR(("----- CHIP bcm4339 unknown revision %d -----\n",
+ chipver));
+ }
+
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+ return 0;
+}
+
+static int concate_revision_bcm4350(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ uint32 chip_ver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ char chipver_tag[10] = {0, };
+#else
+ char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+ chip_ver = bus->sih->chiprev;
+
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ if (chip_ver == 3)
+ strcat(chipver_tag, "_4354");
+ else
+ strcat(chipver_tag, "_4350");
+#endif
+
+ if (chip_ver == 3) {
+ DHD_ERROR(("----- CHIP 4354 A0 -----\n"));
+ strcat(chipver_tag, "_a0");
+ } else {
+ DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
+ }
+
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+ return 0;
+}
+
+static int concate_revision_bcm4354(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ uint32 chip_ver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+ char chipver_tag[10] = "_4354";
+#else
+#if !defined(CUSTOMER_HW4)
+ char chipver_tag[4] = {0, };
+#endif /* !CUSTOMER_HW4 */
+#endif /* SUPPORT_MULTIPLE_CHIPS */
+
+ chip_ver = bus->sih->chiprev;
+#if !defined(SUPPORT_MULTIPLE_CHIPS) && defined(CUSTOMER_HW4)
+ DHD_INFO(("----- CHIP 4354, ver=%x -----\n", chip_ver));
+#else
+ if (chip_ver == 1) {
+ DHD_ERROR(("----- CHIP 4354 A1 -----\n"));
+ strcat(chipver_tag, "_a1");
+ } else {
+ DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
+ }
+
+ strcat(fw_path, chipver_tag);
+ strcat(nv_path, chipver_tag);
+#endif /* !SUPPORT_MULTIPLE_CHIPS && CUSTOMER_HW4 */
+
+ return 0;
+}
+
+static int
+concate_revision_bcm43454(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ char chipver_tag[10] = {0, };
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT
+ int base_system_rev_for_nv = 0;
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */
+
+ DHD_TRACE(("%s: BCM43454 Multiple Revision Check\n", __FUNCTION__));
+ if (bus->sih->chip != BCM43454_CHIP_ID) {
+ DHD_ERROR(("%s:Chip is not BCM43454!\n", __FUNCTION__));
+ return -1;
+ }
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT
+ base_system_rev_for_nv = dhd_get_system_rev();
+ if (base_system_rev_for_nv > 0) {
+ DHD_ERROR(("----- Board Rev [%d] -----\n", base_system_rev_for_nv));
+ sprintf(chipver_tag, "_r%02d", base_system_rev_for_nv);
+ }
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
+ DHD_ERROR(("----- Rev [%d] Fot MULTIPLE Board. -----\n", system_hw_rev));
+ if ((system_hw_rev >= 8) && (system_hw_rev <= 11)) {
+ DHD_ERROR(("This HW is Rev 08 ~ 11. this is For FD-HW\n"));
+ strcat(chipver_tag, "_FD");
+ }
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */
+
+ strcat(nv_path, chipver_tag);
+ return 0;
+}
+
+int
+concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
+{
+ int res = 0;
+
+ if (!bus || !bus->sih) {
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+ return -1;
+ }
+
+ switch (bus->sih->chip) {
+ case BCM4335_CHIP_ID:
+ res = concate_revision_bcm4335(bus, fw_path, nv_path);
+
+ break;
+ case BCM4339_CHIP_ID:
+ res = concate_revision_bcm4339(bus, fw_path, nv_path);
+ break;
+ case BCM4350_CHIP_ID:
+ res = concate_revision_bcm4350(bus, fw_path, nv_path);
+ break;
+ case BCM4354_CHIP_ID:
+ res = concate_revision_bcm4354(bus, fw_path, nv_path);
+ break;
+ case BCM43454_CHIP_ID:
+ res = concate_revision_bcm43454(bus, fw_path, nv_path);
+ break;
+
+ /* XXX: Add New Multiple CHIP ID */
+ default:
+ DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
+ /* XXX: if revision specific feature is not required then return true always */
+ return res;
+ }
+
+ if (res == 0) {
+#ifdef BCMDBG
+ printf("dhd concatenated fw & nv:\n fw_path:%s\n"
+ " nv_path:%s\n", fw_path, nv_path);
+ printf("make sure they exist\n");
+#endif
+ }
+ return res;
+}
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+#if defined(NDIS)
+void
+dhd_bus_reject_ioreqs(dhd_pub_t *dhdp, bool reject)
+{
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_reject_ioreqs(dhdp->bus->sdh, reject);
+}
+
+void
+dhd_bus_waitfor_iodrain(dhd_pub_t *dhdp)
+{
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_waitfor_iodrain(dhdp->bus->sdh);
+}
+#endif /* (NDIS) */
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
+ char *pclm_path, char *pconf_path)
+{
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+ bus->dhd->clm_path = pclm_path;
+ bus->dhd->conf_path = pconf_path;
+}
+
+int
+dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
+{
+ dhd_bus_t *bus = dhd->bus;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ if (sleep) {
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit) {
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+ return BCME_BUSY;
+ }
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ } else {
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+ /* Make sure we have SD bus access */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+ return BCME_OK;
+}
+
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bool wlfc_enabled = FALSE;
+
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Clear the data packet queues */
+ pktq_flush(dhdp->osh, &bus->txq, TRUE);
+ }
+}
+
+#ifdef BCMSDIO
+int
+dhd_sr_config(dhd_pub_t *dhd, bool on)
+{
+ dhd_bus_t *bus = dhd->bus;
+
+ if (!bus->_srenab)
+ return -1;
+
+ return dhdsdio_clk_devsleep_iovar(bus, on);
+}
+
+uint16
+dhd_get_chipid(struct dhd_bus *bus)
+{
+ if (bus && bus->sih)
+ return (uint16)bus->sih->chip;
+ else
+ return 0;
+}
+#endif /* BCMSDIO */
+
+#ifdef DEBUGGER
+static uint32
+dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr)
+{
+ uint32 rval;
+
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ rval = bcmsdh_reg_read(bus->sdh, addr, 4);
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return rval;
+}
+
+static void
+dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val)
+{
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmsdh_reg_write(bus->sdh, addr, 4, val);
+
+ dhd_os_sdunlock(bus->dhd);
+}
+
+#endif /* DEBUGGER */
+
+#if defined(SOFTAP_TPUT_ENHANCE)
+void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time)
+{
+ if (!dhdp || !dhdp->bus) {
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+ return;
+ }
+ dhdp->bus->idletime = idle_time;
+}
+
+void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time)
+{
+ if (!dhdp || !dhdp->bus) {
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+ return;
+ }
+
+ if (!idle_time) {
+ DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__));
+ return;
+ }
+ *idle_time = dhdp->bus->idletime;
+}
+#endif /* SOFTAP_TPUT_ENHANCE */
+
+#if defined(BT_OVER_SDIO)
+uint8 dhd_bus_cfg_read(void *h, uint fun_num, uint32 addr, int *err)
+{
+ uint8 intrd;
+ dhd_pub_t *dhdp = (dhd_pub_t *)h;
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ dhd_os_sdlock(bus->dhd);
+
+ intrd = bcmsdh_cfg_read(bus->sdh, fun_num, addr, err);
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return intrd;
+} EXPORT_SYMBOL(dhd_bus_cfg_read);
+
+void dhd_bus_cfg_write(void *h, uint fun_num, uint32 addr, uint8 val, int *err)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)h;
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ dhd_os_sdlock(bus->dhd);
+
+ bcmsdh_cfg_write(bus->sdh, fun_num, addr, val, err);
+
+ dhd_os_sdunlock(bus->dhd);
+
+} EXPORT_SYMBOL(dhd_bus_cfg_write);
+
+static int
+extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value)
+{
+ char field [8];
+
+ strlcpy(field, line + start_pos, sizeof(field));
+
+ return (sscanf (field, "%hX", value) == 1);
+}
+
+static int
+read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, uint16 * hi_addr,
+ uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes)
+{
+ int str_len;
+ uint16 num_data_bytes, addr, data_pos, type, w, i;
+ uint32 abs_base_addr32 = 0;
+ *num_bytes = 0;
+
+ while (!*num_bytes)
+ {
+ str_len = dhd_os_gets_image(bus->dhd, line, BTFW_MAX_STR_LEN, file);
+
+ DHD_TRACE(("%s: Len :0x%x %s\n", __FUNCTION__, str_len, line));
+
+ if (str_len == 0) {
+ break;
+ } else if (str_len > 9) {
+ extract_hex_field(line, 1, 2, &num_data_bytes);
+ extract_hex_field(line, 3, 4, &addr);
+ extract_hex_field(line, 7, 2, &type);
+
+ data_pos = 9;
+ for (i = 0; i < num_data_bytes; i++) {
+ extract_hex_field(line, data_pos, 2, &w);
+ data_bytes [i] = (uint8)(w & 0x00FF);
+ data_pos += 2;
+ }
+
+ if (type == BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS) {
+ *hi_addr = (data_bytes [0] << 8) | data_bytes [1];
+ *addr_mode = BTFW_ADDR_MODE_EXTENDED;
+ } else if (type == BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS) {
+ *hi_addr = (data_bytes [0] << 8) | data_bytes [1];
+ *addr_mode = BTFW_ADDR_MODE_SEGMENT;
+ } else if (type == BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS) {
+ abs_base_addr32 = (data_bytes [0] << 24) | (data_bytes [1] << 16) |
+ (data_bytes [2] << 8) | data_bytes [3];
+ *addr_mode = BTFW_ADDR_MODE_LINEAR32;
+ } else if (type == BTFW_HEX_LINE_TYPE_DATA) {
+ *dest_addr = addr;
+ if (*addr_mode == BTFW_ADDR_MODE_EXTENDED)
+ *dest_addr += (*hi_addr << 16);
+ else if (*addr_mode == BTFW_ADDR_MODE_SEGMENT)
+ *dest_addr += (*hi_addr << 4);
+ else if (*addr_mode == BTFW_ADDR_MODE_LINEAR32)
+ *dest_addr += abs_base_addr32;
+ *num_bytes = num_data_bytes;
+ }
+ }
+ }
+ return (*num_bytes > 0);
+}
+
+static int
+_dhdsdio_download_btfw(struct dhd_bus *bus)
+{
+ int bcm_error = -1;
+ void *image = NULL;
+ uint8 *mem_blk = NULL, *mem_ptr = NULL, *data_ptr = NULL;
+
+ uint32 offset_addr = 0, offset_len = 0, bytes_to_write = 0;
+
+ char *line = NULL;
+ uint32 dest_addr = 0, num_bytes;
+ uint16 hiAddress = 0;
+ uint32 start_addr, start_data, end_addr, end_data, i, index, pad,
+ bt2wlan_pwrup_adr;
+
+ int addr_mode = BTFW_ADDR_MODE_EXTENDED;
+
+ /* Out immediately if no image to download */
+ if ((bus->btfw_path == NULL) || (bus->btfw_path[0] == '\0')) {
+ return 0;
+ }
+
+ /* XXX: Should succeed in opening image if it is actually given through registry
+ * entry or in module param.
+ */
+ image = dhd_os_open_image1(bus->dhd, bus->btfw_path);
+ if (image == NULL)
+ goto err;
+
+ mem_ptr = mem_blk = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN);
+ if (mem_blk == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+ BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN));
+ goto err;
+ }
+ if ((uint32)(uintptr)mem_blk % DHD_SDALIGN)
+ mem_ptr += (DHD_SDALIGN - ((uint32)(uintptr)mem_blk % DHD_SDALIGN));
+
+ data_ptr = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE - 8);
+ if (data_ptr == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+ BTFW_DOWNLOAD_BLK_SIZE - 8));
+ goto err;
+ }
+ /* Write to BT register to hold WLAN wake high during BT FW download */
+ bt2wlan_pwrup_adr = BTMEM_OFFSET + BT2WLAN_PWRUP_ADDR;
+ bcmsdh_reg_write(bus->sdh, bt2wlan_pwrup_adr, 4, BT2WLAN_PWRUP_WAKE);
+ /*
+ * Wait for at least 2msec for the clock to be ready/Available.
+ */
+ OSL_DELAY(2000);
+
+ line = MALLOC(bus->dhd->osh, BTFW_MAX_STR_LEN);
+ if (line == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, BTFW_MAX_STR_LEN));
+ goto err;
+ }
+ memset(line, 0, BTFW_MAX_STR_LEN);
+
+ while (read_more_btbytes (bus, image, line, &addr_mode, &hiAddress, &dest_addr,
+ data_ptr, &num_bytes)) {
+
+ DHD_TRACE(("read %d bytes at address %08X\n", num_bytes, dest_addr));
+
+ start_addr = BTMEM_OFFSET + dest_addr;
+ index = 0;
+
+ /* Make sure the start address is 4 byte aligned to avoid alignment issues
+ * with SD host controllers
+ */
+ if (!ISALIGNED(start_addr, 4)) {
+ pad = start_addr % 4;
+ start_addr = ROUNDDN(start_addr, 4);
+ start_data = bcmsdh_reg_read(bus->sdh, start_addr, 4);
+ for (i = 0; i < pad; i++, index++) {
+ mem_ptr[index] = (uint8)((uint8 *)&start_data)[i];
+ }
+ }
+ bcopy(data_ptr, &(mem_ptr[index]), num_bytes);
+ index += num_bytes;
+
+ /* Make sure the length is multiple of 4bytes to avoid alignment issues
+ * with SD host controllers
+ */
+ end_addr = start_addr + index;
+ if (!ISALIGNED(end_addr, 4)) {
+ end_addr = ROUNDDN(end_addr, 4);
+ end_data = bcmsdh_reg_read(bus->sdh, end_addr, 4);
+ for (i = (index % 4); i < 4; i++, index++) {
+ mem_ptr[index] = (uint8)((uint8 *)&end_data)[i];
+ }
+ }
+
+ offset_addr = start_addr & 0xFFF;
+ offset_len = offset_addr + index;
+ if (offset_len <= 0x1000) {
+ bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, index);
+ if (bcm_error) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcm_error, num_bytes, start_addr));
+ goto err;
+ }
+ }
+ else {
+ bytes_to_write = 0x1000 - offset_addr;
+ bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr,
+ bytes_to_write);
+ if (bcm_error) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcm_error, num_bytes, start_addr));
+ goto err;
+ }
+
+ OSL_DELAY(10000);
+
+ bcm_error = dhdsdio_membytes(bus, TRUE, (start_addr + bytes_to_write),
+ (mem_ptr + bytes_to_write), (index - bytes_to_write));
+ if (bcm_error) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcm_error, num_bytes, start_addr));
+ goto err;
+ }
+ }
+ memset(line, 0, BTFW_MAX_STR_LEN);
+ }
+
+ bcm_error = 0;
+err:
+ if (mem_blk)
+ MFREE(bus->dhd->osh, mem_blk, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN);
+
+ if (data_ptr)
+ MFREE(bus->dhd->osh, data_ptr, BTFW_DOWNLOAD_BLK_SIZE - 8);
+
+ if (line)
+ MFREE(bus->dhd->osh, line, BTFW_MAX_STR_LEN);
+
+ if (image)
+ dhd_os_close_image1(bus->dhd, image);
+
+ return bcm_error;
+}
+
+static int
+dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+ int ret;
+
+ DHD_TRACE(("%s: btfw path=%s\n",
+ __FUNCTION__, bus->btfw_path));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ dhd_os_sdlock(bus->dhd);
+
+ /* Download the firmware */
+ ret = _dhdsdio_download_btfw(bus);
+
+ dhd_os_sdunlock(bus->dhd);
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+
+ return ret;
+}
+
+int
+dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh,
+ char *pbtfw_path)
+{
+ int ret;
+
+ bus->btfw_path = pbtfw_path;
+
+ ret = dhdsdio_download_btfw(bus, osh, bus->sdh);
+
+ return ret;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+void
+dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+ trap_t *tr = &bus->dhd->last_trap_info;
+
+ bcm_bprintf(strbuf,
+ "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+ "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+ ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
+ ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
+ ltoh32(bus->dongle_trap_addr),
+ ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
+ ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7));
+
+}
+
+static int
+dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len)
+{
+ int ret = -1;
+
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(((dhd_bus_t*)bus)->sdh),
+ SDIO_FUNC_2, F2SYNC, frame, len, NULL, NULL, NULL, TXRETRIES);
+
+ if (ret == BCME_OK)
+ ((dhd_bus_t*)bus)->tx_seq = (((dhd_bus_t*)bus)->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+ return ret;
+}
+
+/* Function to set the min res mask depending on the chip ID used */
+bool
+dhd_bus_set_default_min_res_mask(struct dhd_bus *bus)
+{
+ if ((bus == NULL) || (bus->sih == NULL)) {
+ DHD_ERROR(("%s(): Invalid Arguments \r\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ switch (bus->sih->chip) {
+ case BCM4339_CHIP_ID:
+ bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE(bus->sih) + 0x618, 4, 0x3fcaf377);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+ break;
+
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ bcmsdh_reg_write(bus->sdh,
+ si_get_pmu_reg_addr(bus->sih, OFFSETOF(pmuregs_t, min_res_mask)),
+ 4, DEFAULT_43012_MIN_RES_MASK);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+ break;
+
+ default:
+ DHD_ERROR(("%s: Unhandled chip id\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Function to reset PMU registers */
+void
+dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp)
+{
+ struct dhd_bus *bus = dhdp->bus;
+ bcmsdh_reg_write(bus->sdh, si_get_pmu_reg_addr(bus->sih,
+ OFFSETOF(pmuregs_t, swscratch)), 4, 0x0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
+ }
+}
+
+int
+dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
+{
+ int bcmerror = 0;
+ struct dhd_bus *bus = dhdp->bus;
+
+ if (read) {
+ *data = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ } else {
+ bcmsdh_reg_write(bus->sdh, addr, size, *data);
+ }
+
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+
+ return bcmerror;
+}
+
+int dhd_get_idletime(dhd_pub_t *dhd)
+{
+ return dhd->bus->idletime;
+}
+
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_bus_get_wakecount(dhd_pub_t *dhd)
+{
+ if (!dhd->bus) {
+ return NULL;
+ }
+ return &dhd->bus->wake_counts;
+}
+int
+dhd_bus_get_bus_wake(dhd_pub_t *dhd)
+{
+ return bcmsdh_set_get_wake(dhd->bus->sdh, 0);
+}
+#endif /* DHD_WAKE_STATUS */
+
+int
+dhd_bus_sleep(dhd_pub_t *dhdp, bool sleep, uint32 *intstatus)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 retry = 0;
+ int ret = 0;
+
+ if (bus) {
+ dhd_os_sdlock(dhdp);
+ BUS_WAKE(bus);
+ R_SDREG(*intstatus, &bus->regs->intstatus, retry);
+ if (sleep) {
+ if (SLPAUTO_ENAB(bus)) {
+ ret = dhdsdio_bussleep(bus, sleep);
+ if (ret != BCME_BUSY)
+ dhd_os_wd_timer(bus->dhd, 0);
+ } else
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ dhd_os_sdunlock(dhdp);
+ } else {
+ DHD_ERROR(("bus is NULL\n"));
+ ret = -1;
+ }
+
+ return ret;
+}
diff --git a/bcmdhd.101.10.361.x/dhd_sec_feature.h b/bcmdhd.101.10.361.x/dhd_sec_feature.h
new file mode 100755
index 0000000..671a71a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_sec_feature.h
@@ -0,0 +1,226 @@
+/*
+ * Customer HW 4 dependant file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_sec_feature.h$
+ */
+
+/* XXX This File managed by Samsung */
+
+/*
+ * ** Desciption ***
+ * 1. Module vs COB
+ * If your model's WIFI HW chip is COB type, you must add below feature
+ * - #undef USE_CID_CHECK
+ * - #define READ_MACADDR
+ * Because COB type chip have not CID and Mac address.
+ * So, you must add below feature to defconfig file.
+ * - CONFIG_WIFI_BROADCOM_COB
+ *
+ * 2. PROJECTS
+ * If you want add some feature only own Project, you can add it in 'PROJECTS' part.
+ *
+ * 3. Region code
+ * If you want add some feature only own region model, you can use below code.
+ * - 100 : EUR OPEN
+ * - 101 : EUR ORG
+ * - 200 : KOR OPEN
+ * - 201 : KOR SKT
+ * - 202 : KOR KTT
+ * - 203 : KOR LGT
+ * - 300 : CHN OPEN
+ * - 400 : USA OPEN
+ * - 401 : USA ATT
+ * - 402 : USA TMO
+ * - 403 : USA VZW
+ * - 404 : USA SPR
+ * - 405 : USA USC
+ * You can refer how to using it below this file.
+ * And, you can add more region code, too.
+ */
+
+#ifndef _dhd_sec_feature_h_
+#define _dhd_sec_feature_h_
+
+#include <linuxver.h>
+
+/* For COB type feature */
+#ifdef CONFIG_WIFI_BROADCOM_COB
+#undef USE_CID_CHECK
+#define READ_MACADDR
+#endif /* CONFIG_WIFI_BROADCOM_COB */
+
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_ARCH_MSM8994) || \
+ defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
+#define SUPPORT_MULTIPLE_MODULE_CIS
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_ARCH_MSM8994 ||
+ * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8890
+ */
+
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
+#define SUPPORT_BCM4359_MIXED_MODULES
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8890 */
+
+#if defined(CONFIG_ARGOS)
+#if defined(CONFIG_SPLIT_ARGOS_SET)
+#define ARGOS_IRQ_WIFI_TABLE_LABEL "WIFI TX"
+#define ARGOS_WIFI_TABLE_LABEL "WIFI RX"
+#else /* CONFIG_SPLIT_ARGOS_SET */
+#define ARGOS_IRQ_WIFI_TABLE_LABEL "WIFI"
+#define ARGOS_WIFI_TABLE_LABEL "WIFI"
+#endif /* CONFIG_SPLIT_ARGOS_SET */
+#define ARGOS_P2P_TABLE_LABEL "P2P"
+#endif /* CONFIG_ARGOS */
+
+/* PROJECTS START */
+
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
+ defined(CONFIG_SOC_EXYNOS8895)
+#undef CUSTOM_SET_CPUCORE
+#define PRIMARY_CPUCORE 0
+#define DPC_CPUCORE 4
+#define RXF_CPUCORE 5
+#define TASKLET_CPUCORE 5
+#define ARGOS_CPU_SCHEDULER
+#define ARGOS_RPS_CPU_CTL
+
+#ifdef CONFIG_SOC_EXYNOS8895
+#define ARGOS_DPC_TASKLET_CTL
+#endif /* CONFIG_SOC_EXYNOS8895 */
+
+#ifdef CONFIG_MACH_UNIVERSAL7420
+#define EXYNOS_PCIE_DEBUG
+#endif /* CONFIG_MACH_UNIVERSAL7420 */
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
+ defined(CONFIG_SOC_EXYNOS1000)
+#define PCIE_IRQ_CPU_CORE 5
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 || defined(CONFIG_SOC_EXYNOS9830 */
+
+#if defined(DHD_LB)
+#if defined(CONFIG_ARCH_SM8150) || defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA)
+#define DHD_LB_PRIMARY_CPUS (0x70)
+#define DHD_LB_SECONDARY_CPUS (0x0E)
+#elif defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
+ defined(CONFIG_SOC_EXYNOS1000)
+#define DHD_LB_PRIMARY_CPUS (0x70)
+#define DHD_LB_SECONDARY_CPUS (0x0E)
+#elif defined(CONFIG_SOC_EXYNOS8890)
+/*
+ * Removed core 6~7 from NAPI CPU mask.
+ * Exynos 8890 disabled core 6~7 by default.
+ */
+#define DHD_LB_PRIMARY_CPUS (0x30)
+#define DHD_LB_SECONDARY_CPUS (0x0E)
+#elif defined(CONFIG_SOC_EXYNOS8895)
+/* using whole big core with NAPI mask */
+#define DHD_LB_PRIMARY_CPUS (0xF0)
+#define DHD_LB_SECONDARY_CPUS (0x0E)
+#elif defined(CONFIG_ARCH_MSM8998)
+#define DHD_LB_PRIMARY_CPUS (0x20)
+#define DHD_LB_SECONDARY_CPUS (0x0E)
+#elif defined(CONFIG_ARCH_MSM8996)
+#define DHD_LB_PRIMARY_CPUS (0x0C)
+#define DHD_LB_SECONDARY_CPUS (0x03)
+#else /* Default LB masks */
+/* using whole big core with NAPI mask */
+#define DHD_LB_PRIMARY_CPUS (0xF0)
+#define DHD_LB_SECONDARY_CPUS (0x0E)
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#else /* !DHD_LB */
+#define ARGOS_DPC_TASKLET_CTL
+#endif /* !DHD_LB */
+
+#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_SOC_EXYNOS8895) || \
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
+ defined(CONFIG_SOC_EXYNOS1000)
+#if defined(CONFIG_BCMDHD_PCIE)
+#define BCMPCIE_DISABLE_ASYNC_SUSPEND
+#endif /* CONFIG_BCMDHD_PCIE */
+#endif /* CONFIG_ARCH_MSM */
+/* PROJECTS END */
+
+/* REGION CODE START */
+
+#ifndef CONFIG_WLAN_REGION_CODE
+#define CONFIG_WLAN_REGION_CODE 100
+#endif /* CONFIG_WLAN_REGION_CODE */
+
+#if (CONFIG_WLAN_REGION_CODE >= 100) && (CONFIG_WLAN_REGION_CODE < 200) /* EUR */
+#if (CONFIG_WLAN_REGION_CODE == 101) /* EUR ORG */
+/* GAN LITE NAT KEEPALIVE FILTER */
+#define GAN_LITE_NAT_KEEPALIVE_FILTER
+#endif /* CONFIG_WLAN_REGION_CODE == 101 */
+#if (CONFIG_WLAN_REGION_CODE == 150) /* EUR FD(DualSIM) */
+#define SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
+#endif /* CONFIG_WLAN_REGION_CODE == 150 */
+#endif /* CONFIG_WLAN_REGION_CODE >= 100 && CONFIG_WLAN_REGION_CODE < 200 */
+
+#if (CONFIG_WLAN_REGION_CODE >= 200) && (CONFIG_WLAN_REGION_CODE < 300) /* KOR */
+#undef USE_INITIAL_2G_SCAN
+#ifndef ROAM_ENABLE
+#define ROAM_ENABLE
+#endif /* ROAM_ENABLE */
+#ifndef ROAM_API
+#define ROAM_API
+#endif /* ROAM_API */
+#ifndef ROAM_CHANNEL_CACHE
+#define ROAM_CHANNEL_CACHE
+#endif /* ROAM_CHANNEL_CACHE */
+#ifndef OKC_SUPPORT
+#define OKC_SUPPORT
+#endif /* OKC_SUPPORT */
+
+#ifndef ROAM_AP_ENV_DETECTION
+#define ROAM_AP_ENV_DETECTION
+#endif /* ROAM_AP_ENV_DETECTION */
+
+#undef WRITE_MACADDR
+#ifndef READ_MACADDR
+#define READ_MACADDR
+#endif /* READ_MACADDR */
+#endif /* CONFIG_WLAN_REGION_CODE >= 200 && CONFIG_WLAN_REGION_CODE < 300 */
+
+#if (CONFIG_WLAN_REGION_CODE >= 300) && (CONFIG_WLAN_REGION_CODE < 400) /* CHN */
+#define BCMWAPI_WPI
+#define BCMWAPI_WAI
+#endif /* CONFIG_WLAN_REGION_CODE >= 300 && CONFIG_WLAN_REGION_CODE < 400 */
+
+#if (CONFIG_WLAN_REGION_CODE == 500) /* JP */
+#if defined(BCM4375_CHIP)
+#define DISABLE_HE_ENAB
+#endif /* BCM4375_CHIP */
+#endif /* CONFIG_WLAN_REGION_CODE == 500 */
+
+/* REGION CODE END */
+
+#if !defined(READ_MACADDR) && !defined(WRITE_MACADDR)
+#define GET_MAC_FROM_OTP
+#define SHOW_NVRAM_TYPE
+#endif /* !READ_MACADDR && !WRITE_MACADDR */
+
+#define WRITE_WLANINFO
+
+#endif /* _dhd_sec_feature_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_static_buf.c b/bcmdhd.101.10.361.x/dhd_static_buf.c
new file mode 100755
index 0000000..4ff7e04
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_static_buf.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/skbuff.h>
+#include <linux/amlogic/wlan_plat.h>
+#include <linux/amlogic/dhd_buf.h>
+
+#define DHD_STATIC_VERSION_STR "101.10.361.16 (wlan=r892223-20220401-1)"
+#define STATIC_ERROR_LEVEL (1 << 0)
+#define STATIC_TRACE_LEVEL (1 << 1)
+#define STATIC_MSG_LEVEL (1 << 0)
+uint static_msg_level = STATIC_ERROR_LEVEL | STATIC_MSG_LEVEL;
+
+#define DHD_STATIC_MSG(x, args...) \
+ do { \
+ if (static_msg_level & STATIC_MSG_LEVEL) { \
+ pr_err("[dhd] STATIC-MSG) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define DHD_STATIC_ERROR(x, args...) \
+ do { \
+ if (static_msg_level & STATIC_ERROR_LEVEL) { \
+ pr_err("[dhd] STATIC-ERROR) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define DHD_STATIC_TRACE(x, args...) \
+ do { \
+ if (static_msg_level & STATIC_TRACE_LEVEL) { \
+ pr_err("[dhd] STATIC-TRACE) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+
+#define BCMDHD_SDIO
+#define BCMDHD_PCIE
+//#define BCMDHD_USB
+#define CONFIG_BCMDHD_VTS := y
+#define CONFIG_BCMDHD_DEBUG := y
+//#define BCMDHD_UNUSE_MEM
+
+#ifndef MAX_NUM_ADAPTERS
+#define MAX_NUM_ADAPTERS 1
+#endif
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+#if defined(BCMDHD_SDIO)
+ DHD_PREALLOC_RXBUF = 1,
+ DHD_PREALLOC_DATABUF = 2,
+#endif /* BCMDHD_SDIO */
+ DHD_PREALLOC_OSL_BUF = 3,
+ DHD_PREALLOC_SKB_BUF = 4,
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+ DHD_PREALLOC_WIPHY_ESCAN1 = 6,
+ DHD_PREALLOC_DHD_INFO = 7,
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+#ifdef BCMDHD_PCIE
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
+#endif /* BCMDHD_PCIE */
+ DHD_PREALLOC_MEMDUMP_BUF = 10,
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+ DHD_PREALLOC_STAT_REPORT_BUF = 18,
+ DHD_PREALLOC_WL_ESCAN = 19,
+ DHD_PREALLOC_FW_VERBOSE_RING = 20,
+ DHD_PREALLOC_FW_EVENT_RING = 21,
+ DHD_PREALLOC_DHD_EVENT_RING = 22,
+#if defined(BCMDHD_UNUSE_MEM)
+ DHD_PREALLOC_NAN_EVENT_RING = 23,
+#endif /* BCMDHD_UNUSE_MEM */
+ DHD_PREALLOC_MAX
+};
+
+#define STATIC_BUF_MAX_NUM 20
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+
+#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
+#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
+#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
+
+#define DHD_PREALLOC_PROT_SIZE (16 * 1024)
+#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024)
+#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024)
+#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024)
+#define DHD_PREALLOC_DHD_INFO_SIZE (36 * 1024)
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024)
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024)
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024)
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+#define DHD_PREALLOC_WL_ESCAN_SIZE (70 * 1024)
+#ifdef CONFIG_64BIT
+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2)
+#else
+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024)
+#endif
+#define FW_VERBOSE_RING_SIZE (256 * 1024)
+#define FW_EVENT_RING_SIZE (64 * 1024)
+#define DHD_EVENT_RING_SIZE (64 * 1024)
+#define NAN_EVENT_RING_SIZE (64 * 1024)
+
+#if defined(CONFIG_64BIT)
+#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024)
+#else
+#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024)
+#endif /* CONFIG_64BIT */
+#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024)
+
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#ifdef BCMDHD_PCIE
+#define DHD_SKB_1PAGE_BUF_NUM 0
+#define DHD_SKB_2PAGE_BUF_NUM 192
+#elif defined(BCMDHD_SDIO)
+#define DHD_SKB_1PAGE_BUF_NUM 8
+#define DHD_SKB_2PAGE_BUF_NUM 8
+#endif /* BCMDHD_PCIE */
+#define DHD_SKB_4PAGE_BUF_NUM 1
+
+/* The number is defined in linux_osl.c
+ * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM
+ * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM
+ */
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \
+ (DHD_SKB_2PAGE_BUF_NUM))
+#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM))
+#endif
+
+void *wlan_static_prot[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_rxbuf [MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_databuf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_osl_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_scan_buf0[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_scan_buf1[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_dhd_info_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_dhd_wlfc_info_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_if_flow_lkup[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_dhd_memdump_ram_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_dhd_wlfc_hanger_buf[MAX_NUM_ADAPTERS] = {NULL};
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+void *wlan_static_dhd_log_dump_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_dhd_log_dump_buf_ex[MAX_NUM_ADAPTERS] = {NULL};
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+void *wlan_static_wl_escan_info_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_fw_verbose_ring_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_fw_event_ring_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_dhd_event_ring_buf[MAX_NUM_ADAPTERS] = {NULL};
+void *wlan_static_nan_event_ring_buf[MAX_NUM_ADAPTERS] = {NULL};
+
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+static struct sk_buff *wlan_static_skb[MAX_NUM_ADAPTERS][WLAN_SKB_BUF_NUM];
+#endif /* BCMDHD_SDIO | BCMDHD_PCIE */
+
+void *
+bcmdhd_mem_prealloc(
+#ifdef BCMDHD_MDRIVER
+ uint bus_type, int index,
+#endif
+ int section, unsigned long size)
+{
+#ifndef BCMDHD_MDRIVER
+ int index = 0;
+#endif
+
+#ifdef BCMDHD_MDRIVER
+ DHD_STATIC_MSG("bus_type %d, index %d, sectoin %d, size %ld\n",
+ bus_type, index, section, size);
+#else
+ DHD_STATIC_MSG("sectoin %d, size %ld\n", section, size);
+#endif
+
+ if (section == DHD_PREALLOC_PROT)
+ return wlan_static_prot[index];
+
+#if defined(BCMDHD_SDIO)
+ if (section == DHD_PREALLOC_RXBUF)
+ return wlan_static_rxbuf[index];
+
+ if (section == DHD_PREALLOC_DATABUF)
+ return wlan_static_databuf[index];
+#endif /* BCMDHD_SDIO */
+
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+ if (section == DHD_PREALLOC_SKB_BUF)
+ return wlan_static_skb[index];
+#endif /* BCMDHD_SDIO | BCMDHD_PCIE */
+
+ if (section == DHD_PREALLOC_WIPHY_ESCAN0)
+ return wlan_static_scan_buf0[index];
+
+ if (section == DHD_PREALLOC_WIPHY_ESCAN1)
+ return wlan_static_scan_buf1[index];
+
+ if (section == DHD_PREALLOC_OSL_BUF) {
+ if (size > DHD_PREALLOC_OSL_BUF_SIZE) {
+ DHD_STATIC_ERROR("request OSL_BUF(%lu) > %ld\n",
+ size, DHD_PREALLOC_OSL_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_osl_buf[index];
+ }
+
+ if (section == DHD_PREALLOC_DHD_INFO) {
+ if (size > DHD_PREALLOC_DHD_INFO_SIZE) {
+ DHD_STATIC_ERROR("request DHD_INFO(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_INFO_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_info_buf[index];
+ }
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ if (section == DHD_PREALLOC_DHD_WLFC_INFO) {
+ if (size > WLAN_DHD_WLFC_BUF_SIZE) {
+ DHD_STATIC_ERROR("request DHD_WLFC_INFO(%lu) > %d\n",
+ size, WLAN_DHD_WLFC_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_info_buf[index];
+ }
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+#ifdef BCMDHD_PCIE
+ if (section == DHD_PREALLOC_IF_FLOW_LKUP) {
+ if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) {
+ DHD_STATIC_ERROR("request DHD_IF_FLOW_LKUP(%lu) > %d\n",
+ size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);
+ return NULL;
+ }
+ return wlan_static_if_flow_lkup[index];
+ }
+#endif /* BCMDHD_PCIE */
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ if (section == DHD_PREALLOC_MEMDUMP_RAM) {
+ if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_MEMDUMP_RAM(%lu) > %d\n",
+ size, DHD_PREALLOC_MEMDUMP_RAM_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_memdump_ram_buf[index];
+ }
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ if (section == DHD_PREALLOC_DHD_WLFC_HANGER) {
+ if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) {
+ DHD_STATIC_ERROR("request DHD_WLFC_HANGER(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_hanger_buf[index];
+ }
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF) {
+ if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_DHD_LOG_DUMP_BUF(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_log_dump_buf[index];
+ }
+ if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX) {
+ if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_log_dump_buf_ex[index];
+ }
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+ if (section == DHD_PREALLOC_WL_ESCAN) {
+ if (size > DHD_PREALLOC_WL_ESCAN_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_WL_ESCAN(%lu) > %d\n",
+ size, DHD_PREALLOC_WL_ESCAN_SIZE);
+ return NULL;
+ }
+ return wlan_static_wl_escan_info_buf[index];
+ }
+ if (section == DHD_PREALLOC_FW_VERBOSE_RING) {
+ if (size > FW_VERBOSE_RING_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n",
+ size, FW_VERBOSE_RING_SIZE);
+ return NULL;
+ }
+ return wlan_static_fw_verbose_ring_buf[index];
+ }
+ if (section == DHD_PREALLOC_FW_EVENT_RING) {
+ if (size > FW_EVENT_RING_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n",
+ size, FW_EVENT_RING_SIZE);
+ return NULL;
+ }
+ return wlan_static_fw_event_ring_buf[index];
+ }
+ if (section == DHD_PREALLOC_DHD_EVENT_RING) {
+ if (size > DHD_EVENT_RING_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n",
+ size, DHD_EVENT_RING_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_event_ring_buf[index];
+ }
+#if defined(BCMDHD_UNUSE_MEM)
+ if (section == DHD_PREALLOC_NAN_EVENT_RING) {
+ if (size > NAN_EVENT_RING_SIZE) {
+ DHD_STATIC_ERROR("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n",
+ size, NAN_EVENT_RING_SIZE);
+ return NULL;
+ }
+ return wlan_static_nan_event_ring_buf[index];
+ }
+#endif /* BCMDHD_UNUSE_MEM */
+ if ((section < 0) || (section > DHD_PREALLOC_MAX))
+ DHD_STATIC_ERROR("request section id(%d) is out of max index %d\n",
+ section, DHD_PREALLOC_MAX);
+
+ DHD_STATIC_ERROR("failed to alloc section %d, size=%ld\n",
+ section, size);
+
+ return NULL;
+}
+EXPORT_SYMBOL(bcmdhd_mem_prealloc);
+
+static void
+dhd_deinit_wlan_mem(int index)
+{
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+ int i;
+#endif /* BCMDHD_SDIO | BCMDHD_PCIE */
+
+ if (wlan_static_prot[index])
+ kfree(wlan_static_prot[index]);
+#if defined(BCMDHD_SDIO)
+ if (wlan_static_rxbuf[index])
+ kfree(wlan_static_rxbuf[index]);
+ if (wlan_static_databuf[index])
+ kfree(wlan_static_databuf[index]);
+#endif /* BCMDHD_SDIO */
+ if (wlan_static_osl_buf[index])
+ kfree(wlan_static_osl_buf[index]);
+ if (wlan_static_scan_buf0[index])
+ kfree(wlan_static_scan_buf0[index]);
+ if (wlan_static_scan_buf1[index])
+ kfree(wlan_static_scan_buf1[index]);
+ if (wlan_static_dhd_info_buf[index])
+ kfree(wlan_static_dhd_info_buf[index]);
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ if (wlan_static_dhd_wlfc_info_buf[index])
+ kfree(wlan_static_dhd_wlfc_info_buf[index]);
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+#ifdef BCMDHD_PCIE
+ if (wlan_static_if_flow_lkup[index])
+ kfree(wlan_static_if_flow_lkup[index]);
+#endif /* BCMDHD_PCIE */
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ if (wlan_static_dhd_memdump_ram_buf[index])
+ kfree(wlan_static_dhd_memdump_ram_buf[index]);
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ if (wlan_static_dhd_wlfc_hanger_buf[index])
+ kfree(wlan_static_dhd_wlfc_hanger_buf[index]);
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ if (wlan_static_dhd_log_dump_buf[index])
+ kfree(wlan_static_dhd_log_dump_buf[index]);
+ if (wlan_static_dhd_log_dump_buf_ex[index])
+ kfree(wlan_static_dhd_log_dump_buf_ex[index]);
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+ if (wlan_static_wl_escan_info_buf[index])
+ kfree(wlan_static_wl_escan_info_buf[index]);
+ if (wlan_static_fw_verbose_ring_buf[index])
+ kfree(wlan_static_fw_verbose_ring_buf[index]);
+ if (wlan_static_fw_event_ring_buf[index])
+ kfree(wlan_static_fw_event_ring_buf[index]);
+ if (wlan_static_dhd_event_ring_buf[index])
+ kfree(wlan_static_dhd_event_ring_buf[index]);
+#if defined(BCMDHD_UNUSE_MEM)
+ if (wlan_static_nan_event_ring_buf[index])
+ kfree(wlan_static_nan_event_ring_buf[index]);
+#endif /* BCMDHD_UNUSE_MEM */
+
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+ for (i=0; i<WLAN_SKB_BUF_NUM; i++) {
+ if (wlan_static_skb[index][i])
+ dev_kfree_skb(wlan_static_skb[index][i]);
+ }
+#endif /* BCMDHD_SDIO | BCMDHD_PCIE */
+
+ return;
+}
+
+static int
+dhd_init_wlan_mem(int index, unsigned int all_buf)
+{
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+ int i;
+#endif
+ unsigned long size = 0;
+
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_PCIE)
+ for (i=0; i <WLAN_SKB_BUF_NUM; i++) {
+ wlan_static_skb[index][i] = NULL;
+ }
+
+ for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
+ wlan_static_skb[index][i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
+ if (!wlan_static_skb[index][i]) {
+ goto err_mem_alloc;
+ }
+ size += DHD_SKB_1PAGE_BUFSIZE;
+ DHD_STATIC_TRACE("sectoin %d skb[%d], size=%ld\n",
+ DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE);
+ }
+
+ for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
+ wlan_static_skb[index][i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
+ if (!wlan_static_skb[index][i]) {
+ goto err_mem_alloc;
+ }
+ size += DHD_SKB_2PAGE_BUFSIZE;
+ DHD_STATIC_TRACE("sectoin %d skb[%d], size=%ld\n",
+ DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE);
+ }
+#endif /* BCMDHD_SDIO | BCMDHD_PCIE */
+
+ if (all_buf == 1) {
+#if defined(BCMDHD_SDIO)
+ wlan_static_skb[index][i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
+ if (!wlan_static_skb[index][i])
+ goto err_mem_alloc;
+ size += DHD_SKB_4PAGE_BUFSIZE;
+ DHD_STATIC_TRACE("sectoin %d skb[%d], size=%ld\n",
+ DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE);
+#endif /* BCMDHD_SDIO */
+
+ wlan_static_prot[index] = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL);
+ if (!wlan_static_prot[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_PROT_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE);
+
+#if defined(BCMDHD_SDIO)
+ wlan_static_rxbuf[index] = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_rxbuf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_RXBUF_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE);
+
+ wlan_static_databuf[index] = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_databuf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_DATABUF_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE);
+#endif /* BCMDHD_SDIO */
+
+ wlan_static_osl_buf[index] = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_osl_buf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_OSL_BUF_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%ld\n",
+ DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE);
+
+ wlan_static_scan_buf0[index] = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL);
+ if (!wlan_static_scan_buf0[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_WIPHY_ESCAN0_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE);
+
+ wlan_static_dhd_info_buf[index] = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_info_buf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_DHD_INFO_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE);
+
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ wlan_static_dhd_wlfc_info_buf[index] = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_info_buf[index])
+ goto err_mem_alloc;
+ size += WLAN_DHD_WLFC_BUF_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE);
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+
+#ifdef BCMDHD_PCIE
+ wlan_static_if_flow_lkup[index] = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL);
+ if (!wlan_static_if_flow_lkup[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_IF_FLOW_LKUP_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_IF_FLOW_LKUP, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);
+#endif /* BCMDHD_PCIE */
+ }
+
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ wlan_static_dhd_memdump_ram_buf[index] = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_memdump_ram_buf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_MEMDUMP_RAM_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE);
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+
+ if (all_buf == 1) {
+#if defined(BCMDHD_SDIO) || defined(BCMDHD_USB)
+ wlan_static_dhd_wlfc_hanger_buf[index] = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_hanger_buf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_DHD_WLFC_HANGER_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);
+#endif /* BCMDHD_SDIO | BCMDHD_USB */
+
+#if defined(CONFIG_BCMDHD_VTS) || defined(CONFIG_BCMDHD_DEBUG)
+ wlan_static_dhd_log_dump_buf[index] = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_log_dump_buf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE);
+
+ wlan_static_dhd_log_dump_buf_ex[index] = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_log_dump_buf_ex[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX, DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE);
+#endif /* CONFIG_BCMDHD_VTS | CONFIG_BCMDHD_DEBUG */
+
+ wlan_static_wl_escan_info_buf[index] = kmalloc(DHD_PREALLOC_WL_ESCAN_SIZE, GFP_KERNEL);
+ if (!wlan_static_wl_escan_info_buf[index])
+ goto err_mem_alloc;
+ size += DHD_PREALLOC_WL_ESCAN_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_WL_ESCAN, DHD_PREALLOC_WL_ESCAN_SIZE);
+ }
+
+ wlan_static_fw_verbose_ring_buf[index] = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_fw_verbose_ring_buf[index])
+ goto err_mem_alloc;
+ size += FW_VERBOSE_RING_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE);
+
+ if (all_buf == 1) {
+ wlan_static_fw_event_ring_buf[index] = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_fw_event_ring_buf[index])
+ goto err_mem_alloc;
+ size += FW_EVENT_RING_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_FW_EVENT_RING, FW_EVENT_RING_SIZE);
+
+ wlan_static_dhd_event_ring_buf[index] = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_event_ring_buf[index])
+ goto err_mem_alloc;
+ size += DHD_EVENT_RING_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE);
+
+#if defined(BCMDHD_UNUSE_MEM)
+ wlan_static_nan_event_ring_buf[index] = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_nan_event_ring_buf[index])
+ goto err_mem_alloc;
+ size += NAN_EVENT_RING_SIZE;
+ DHD_STATIC_TRACE("sectoin %d, size=%d\n",
+ DHD_PREALLOC_NAN_EVENT_RING, NAN_EVENT_RING_SIZE);
+#endif /* BCMDHD_UNUSE_MEM */
+ }
+
+ DHD_STATIC_MSG("prealloc ok for index %d: %ld(%ldK)\n", index, size, size/1024);
+ return 0;
+
+err_mem_alloc:
+ DHD_STATIC_ERROR("Failed to allocate memory for index %d\n", index);
+
+ return -ENOMEM;
+}
+
+#ifdef DHD_STATIC_IN_DRIVER
+int
+#else
+static int __init
+#endif
+bcmdhd_init_wlan_mem(unsigned int all_buf))
+{
+ int i, ret = 0;
+
+ DHD_STATIC_MSG("%s\n", DHD_STATIC_VERSION_STR);
+
+ for (i=0; i<MAX_NUM_ADAPTERS; i++) {
+ ret = dhd_init_wlan_mem(i, all_buf);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for (i=0; i<MAX_NUM_ADAPTERS; i++)
+ dhd_deinit_wlan_mem(i);
+ }
+
+ return 0;
+}
+
+#ifdef DHD_STATIC_IN_DRIVER
+void
+#else
+static void __exit
+#endif
+dhd_static_buf_exit(void)
+{
+ int i;
+
+ DHD_STATIC_MSG("Enter\n");
+
+ for (i=0; i<MAX_NUM_ADAPTERS; i++)
+ dhd_deinit_wlan_mem(i);
+}
+
+#ifndef DHD_STATIC_IN_DRIVER
+EXPORT_SYMBOL(bcmdhd_init_wlan_mem);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AMLOGIC");
+MODULE_DESCRIPTION("wifi device tree driver");
+#endif
diff --git a/bcmdhd.101.10.361.x/dhd_statlog.c b/bcmdhd.101.10.361.x/dhd_statlog.c
new file mode 100755
index 0000000..b33914d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_statlog.c
@@ -0,0 +1,1081 @@
+/*
+ * DHD debugability: Status Information Logging support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#include <linuxver.h>
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <ethernet.h>
+#include <bcmevent.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+
+#ifdef DHD_STATUS_LOGGING
+
+#define DHD_STATLOG_ERR_INTERNAL(fmt, ...) DHD_ERROR(("STATLOG-" fmt, ##__VA_ARGS__))
+#define DHD_STATLOG_INFO_INTERNAL(fmt, ...) DHD_INFO(("STATLOG-" fmt, ##__VA_ARGS__))
+
+#define DHD_STATLOG_PRINT(x) DHD_ERROR(x)
+#define DHD_STATLOG_ERR(x) DHD_STATLOG_ERR_INTERNAL x
+#define DHD_STATLOG_INFO(x) DHD_STATLOG_INFO_INTERNAL x
+#define DHD_STATLOG_VALID(stat) (((stat) > (ST(INVALID))) && ((stat) < (ST(MAX))))
+
+dhd_statlog_handle_t *
+dhd_attach_statlog(dhd_pub_t *dhdp, uint32 num_items, uint32 bdlog_num_items, uint32 logbuf_len)
+{
+ dhd_statlog_t *statlog = NULL;
+ void *buf = NULL;
+
+ if (!dhdp) {
+ DHD_STATLOG_ERR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return NULL;
+ }
+
+ statlog = (dhd_statlog_t *)VMALLOCZ(dhdp->osh, sizeof(dhd_statlog_t));
+ if (!statlog) {
+ DHD_STATLOG_ERR(("%s: failed to allocate memory for dhd_statlog_t\n",
+ __FUNCTION__));
+ return NULL;
+ }
+
+ /* allocate log buffer */
+ statlog->logbuf = (uint8 *)VMALLOCZ(dhdp->osh, logbuf_len);
+ if (!statlog->logbuf) {
+ DHD_STATLOG_ERR(("%s: failed to alloc log buffer\n", __FUNCTION__));
+ goto error;
+ }
+ statlog->logbuf_len = logbuf_len;
+
+ /* alloc ring buffer */
+ statlog->bufsize = (uint32)(dhd_ring_get_hdr_size() +
+ DHD_STATLOG_RING_SIZE(num_items));
+ buf = VMALLOCZ(dhdp->osh, statlog->bufsize);
+ if (!buf) {
+ DHD_STATLOG_ERR(("%s: failed to allocate memory for ring buffer\n",
+ __FUNCTION__));
+ goto error;
+ }
+
+ statlog->ringbuf = dhd_ring_init(dhdp, buf, statlog->bufsize,
+ DHD_STATLOG_ITEM_SIZE, num_items, DHD_RING_TYPE_SINGLE_IDX);
+ if (!statlog->ringbuf) {
+ DHD_STATLOG_ERR(("%s: failed to init ring buffer\n", __FUNCTION__));
+ VMFREE(dhdp->osh, buf, statlog->bufsize);
+ goto error;
+ }
+
+ /* alloc ring buffer for bigdata logging */
+ statlog->bdlog_bufsize = (uint32)(dhd_ring_get_hdr_size() +
+ DHD_STATLOG_RING_SIZE(bdlog_num_items));
+ buf = VMALLOCZ(dhdp->osh, statlog->bdlog_bufsize);
+ if (!buf) {
+ DHD_STATLOG_ERR(("%s: failed to allocate memory for bigdata logging buffer\n",
+ __FUNCTION__));
+ goto error;
+ }
+
+ statlog->bdlog_ringbuf = dhd_ring_init(dhdp, buf, statlog->bdlog_bufsize,
+ DHD_STATLOG_ITEM_SIZE, bdlog_num_items, DHD_RING_TYPE_SINGLE_IDX);
+ if (!statlog->bdlog_ringbuf) {
+ DHD_STATLOG_ERR(("%s: failed to init ring buffer for bigdata logging\n",
+ __FUNCTION__));
+ VMFREE(dhdp->osh, buf, statlog->bdlog_bufsize);
+ goto error;
+ }
+
+ return (dhd_statlog_handle_t *)statlog;
+
+error:
+ if (statlog->logbuf) {
+ VMFREE(dhdp->osh, statlog->logbuf, logbuf_len);
+ }
+
+ if (statlog->ringbuf) {
+ dhd_ring_deinit(dhdp, statlog->ringbuf);
+ VMFREE(dhdp->osh, statlog->ringbuf, statlog->bufsize);
+ }
+
+ if (statlog->bdlog_ringbuf) {
+ dhd_ring_deinit(dhdp, statlog->bdlog_ringbuf);
+ VMFREE(dhdp->osh, statlog->bdlog_ringbuf, statlog->bdlog_bufsize);
+ }
+
+ if (statlog) {
+ VMFREE(dhdp->osh, statlog, sizeof(dhd_statlog_t));
+ }
+
+ return NULL;
+}
+
+void
+dhd_detach_statlog(dhd_pub_t *dhdp)
+{
+ dhd_statlog_t *statlog;
+
+ if (!dhdp) {
+ DHD_STATLOG_ERR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: statlog is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+
+ if (statlog->bdlog_ringbuf) {
+ dhd_ring_deinit(dhdp, statlog->bdlog_ringbuf);
+ VMFREE(dhdp->osh, statlog->bdlog_ringbuf, statlog->bdlog_bufsize);
+ }
+
+ if (statlog->ringbuf) {
+ dhd_ring_deinit(dhdp, statlog->ringbuf);
+ VMFREE(dhdp->osh, statlog->ringbuf, statlog->bufsize);
+ }
+
+ if (statlog->logbuf) {
+ VMFREE(dhdp->osh, statlog->logbuf, statlog->logbuf_len);
+ }
+
+ VMFREE(dhdp->osh, statlog, sizeof(dhd_statlog_t));
+ dhdp->statlog = NULL;
+}
+
+static int
+dhd_statlog_ring_log(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, uint8 dir,
+ uint16 status, uint16 reason)
+{
+ dhd_statlog_t *statlog;
+ stat_elem_t *elem;
+
+ if (!dhdp || !dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: dhdp or dhdp->statlog is NULL\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (ifidx >= DHD_MAX_IFS) {
+ DHD_STATLOG_ERR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
+ return BCME_ERROR;
+ }
+
+ if (!DHD_STATLOG_VALID(stat)) {
+ DHD_STATLOG_ERR(("%s: invalid stat %d\n", __FUNCTION__, stat));
+ return BCME_ERROR;
+ }
+
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+ elem = (stat_elem_t *)dhd_ring_get_empty(statlog->ringbuf);
+ if (!elem) {
+ /* no available slot */
+ DHD_STATLOG_ERR(("%s: cannot allocate a new element\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ elem->ts_tz = OSL_SYSTZTIME_US();
+ elem->ts = OSL_LOCALTIME_NS();
+ elem->stat = stat;
+ elem->ifidx = ifidx;
+ elem->dir = dir;
+ elem->reason = reason;
+ elem->status = status;
+
+ /* Logging for the bigdata */
+ if (isset(statlog->bdmask, stat)) {
+ stat_elem_t *elem_bd;
+ elem_bd = (stat_elem_t *)dhd_ring_get_empty(statlog->bdlog_ringbuf);
+ if (!elem_bd) {
+ /* no available slot */
+ DHD_STATLOG_ERR(("%s: cannot allocate a new element for bigdata\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ bcopy(elem, elem_bd, sizeof(stat_elem_t));
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_statlog_ring_log_data(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx,
+ uint8 dir, bool cond)
+{
+ return cond ? dhd_statlog_ring_log(dhdp, stat, ifidx,
+ dir ? STDIR(TX) : STDIR(RX), 0, 0) : BCME_OK;
+}
+
+int
+dhd_statlog_ring_log_data_reason(dhd_pub_t *dhdp, uint16 stat,
+ uint8 ifidx, uint8 dir, uint16 reason)
+{
+ return dhd_statlog_ring_log(dhdp, stat, ifidx,
+ dir ? STDIR(TX) : STDIR(RX), 0, reason);
+}
+
+int
+dhd_statlog_ring_log_ctrl(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx, uint16 reason)
+{
+ return dhd_statlog_ring_log(dhdp, stat, ifidx, ST(DIR_TX), 0, reason);
+}
+
+int
+dhd_statlog_process_event(dhd_pub_t *dhdp, int type, uint8 ifidx,
+ uint16 status, uint16 reason, uint16 flags)
+{
+ int stat = ST(INVALID);
+ uint8 dir = STDIR(RX);
+
+ if (!dhdp || !dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: dhdp or dhdp->statlog is NULL\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ switch (type) {
+ case WLC_E_SET_SSID:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ stat = ST(ASSOC_DONE);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ stat = ST(ASSOC_TIMEOUT);
+ } else if (status == WLC_E_STATUS_FAIL) {
+ stat = ST(ASSOC_FAIL);
+ } else if (status == WLC_E_STATUS_NO_ACK) {
+ stat = ST(ASSOC_NO_ACK);
+ } else if (status == WLC_E_STATUS_ABORT) {
+ stat = ST(ASSOC_ABORT);
+ } else if (status == WLC_E_STATUS_UNSOLICITED) {
+ stat = ST(ASSOC_UNSOLICITED);
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ stat = ST(ASSOC_NO_NETWORKS);
+ } else {
+ stat = ST(ASSOC_OTHERS);
+ }
+ break;
+ case WLC_E_AUTH:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ stat = ST(AUTH_DONE);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ stat = ST(AUTH_TIMEOUT);
+ } else if (status == WLC_E_STATUS_FAIL) {
+ stat = ST(AUTH_FAIL);
+ } else if (status == WLC_E_STATUS_NO_ACK) {
+ stat = ST(AUTH_NO_ACK);
+ } else {
+ stat = ST(AUTH_OTHERS);
+ }
+ dir = STDIR(TX);
+ break;
+ case WLC_E_AUTH_IND:
+ stat = ST(AUTH_DONE);
+ break;
+ case WLC_E_DEAUTH:
+ stat = ST(DEAUTH);
+ dir = STDIR(TX);
+ break;
+ case WLC_E_DEAUTH_IND:
+ stat = ST(DEAUTH);
+ break;
+ case WLC_E_DISASSOC:
+ stat = ST(DISASSOC);
+ dir = STDIR(TX);
+ break;
+ case WLC_E_LINK:
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+ stat = ST(LINKDOWN);
+ }
+ break;
+ case WLC_E_ROAM_PREP:
+ stat = ST(REASSOC_START);
+ break;
+ case WLC_E_ASSOC_REQ_IE:
+ stat = ST(ASSOC_REQ);
+ dir = STDIR(TX);
+ break;
+ case WLC_E_ASSOC_RESP_IE:
+ stat = ST(ASSOC_RESP);
+ break;
+ case WLC_E_BSSID:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ stat = ST(REASSOC_DONE);
+ } else {
+ stat = ST(REASSOC_DONE_OTHERS);
+ }
+ break;
+ case WLC_E_REASSOC:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ stat = ST(REASSOC_SUCCESS);
+ } else {
+ stat = ST(REASSOC_FAILURE);
+ }
+ dir = STDIR(TX);
+ break;
+ case WLC_E_ASSOC_IND:
+ stat = ST(ASSOC_REQ);
+ break;
+ default:
+ break;
+ }
+
+ /* logging interested events */
+ if (DHD_STATLOG_VALID(stat)) {
+ dhd_statlog_ring_log(dhdp, stat, ifidx, dir, status, reason);
+ }
+
+ return BCME_OK;
+}
+
+uint32
+dhd_statlog_get_logbuf_len(dhd_pub_t *dhdp)
+{
+ uint32 length = 0;
+ dhd_statlog_t *statlog;
+
+ if (dhdp && dhdp->statlog) {
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+ length = statlog->logbuf_len;
+ }
+
+ return length;
+}
+
+void *
+dhd_statlog_get_logbuf(dhd_pub_t *dhdp)
+{
+ dhd_statlog_t *statlog;
+ void *ret_addr = NULL;
+
+ if (dhdp && dhdp->statlog) {
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+ ret_addr = (void *)(statlog->logbuf);
+ }
+
+ return ret_addr;
+}
+
+/*
+ * called function uses buflen as the DHD_STATLOG_STATSTR_BUF_LEN max.
+ * So when adding a case, make sure the string is less than
+ * the DHD_STATLOG_STATSTR_BUF_LEN bytes
+ */
+static void
+dhd_statlog_stat_name(char *buf, uint32 buflen, uint32 state, uint8 dir)
+{
+ char *stat_str = NULL;
+ bool tx = (dir == STDIR(TX));
+ uint32 max_buf_len = MIN(buflen, DHD_STATLOG_STATSTR_BUF_LEN);
+
+ switch (state) {
+ case ST(INVALID):
+ stat_str = "INVALID_STATE";
+ break;
+ case ST(WLAN_POWER_ON):
+ stat_str = "WLAN_POWER_ON";
+ break;
+ case ST(WLAN_POWER_OFF):
+ stat_str = "WLAN_POWER_OFF";
+ break;
+ case ST(ASSOC_START):
+ stat_str = "ASSOC_START";
+ break;
+ case ST(AUTH_DONE):
+ stat_str = "AUTH_DONE";
+ break;
+ case ST(ASSOC_REQ):
+ stat_str = tx ? "ASSOC_REQ" : "RX_ASSOC_REQ";
+ break;
+ case ST(ASSOC_RESP):
+ stat_str = "ASSOC_RESP";
+ break;
+ case ST(ASSOC_DONE):
+ stat_str = "ASSOC_DONE";
+ break;
+ case ST(DISASSOC_START):
+ stat_str = "DISASSOC_START";
+ break;
+ case ST(DISASSOC_INT_START):
+ stat_str = "DISASSOC_INTERNAL_START";
+ break;
+ case ST(DISASSOC_DONE):
+ stat_str = "DISASSOC_DONE";
+ break;
+ case ST(DISASSOC):
+ stat_str = tx ? "DISASSOC_EVENT" : "DISASSOC_IND_EVENT";
+ break;
+ case ST(DEAUTH):
+ stat_str = tx ? "DEAUTH_EVENT" : "DEAUTH_IND_EVENT";
+ break;
+ case ST(LINKDOWN):
+ stat_str = "LINKDOWN_EVENT";
+ break;
+ case ST(REASSOC_START):
+ stat_str = "REASSOC_START";
+ break;
+ case ST(REASSOC_INFORM):
+ stat_str = "REASSOC_INFORM";
+ break;
+ case ST(REASSOC_DONE):
+ stat_str = "REASSOC_DONE_SUCCESS";
+ break;
+ case ST(EAPOL_M1):
+ stat_str = tx ? "TX_EAPOL_M1" : "RX_EAPOL_M1";
+ break;
+ case ST(EAPOL_M2):
+ stat_str = tx ? "TX_EAPOL_M2" : "RX_EAPOL_M2";
+ break;
+ case ST(EAPOL_M3):
+ stat_str = tx ? "TX_EAPOL_M3" : "RX_EAPOL_M3";
+ break;
+ case ST(EAPOL_M4):
+ stat_str = tx ? "TX_EAPOL_M4" : "RX_EAPOL_M4";
+ break;
+ case ST(EAPOL_GROUPKEY_M1):
+ stat_str = tx ? "TX_EAPOL_GROUPKEY_M1" : "RX_EAPOL_GROUPKEY_M1";
+ break;
+ case ST(EAPOL_GROUPKEY_M2):
+ stat_str = tx ? "TX_EAPOL_GROUPKEY_M2" : "RX_EAPOL_GROUPKEY_M2";
+ break;
+ case ST(EAP_REQ_IDENTITY):
+ stat_str = tx ? "TX_EAP_REQ_IDENTITY" : "RX_EAP_REQ_IDENTITY";
+ break;
+ case ST(EAP_RESP_IDENTITY):
+ stat_str = tx ? "TX_EAP_RESP_IDENTITY" : "RX_EAP_RESP_IDENTITY";
+ break;
+ case ST(EAP_REQ_TLS):
+ stat_str = tx ? "TX_EAP_REQ_TLS" : "RX_EAP_REQ_TLS";
+ break;
+ case ST(EAP_RESP_TLS):
+ stat_str = tx ? "TX_EAP_RESP_TLS" : "RX_EAP_RESP_TLS";
+ break;
+ case ST(EAP_REQ_LEAP):
+ stat_str = tx ? "TX_EAP_REQ_LEAP" : "RX_EAP_REQ_LEAP";
+ break;
+ case ST(EAP_RESP_LEAP):
+ stat_str = tx ? "TX_EAP_RESP_LEAP" : "RX_EAP_RESP_LEAP";
+ break;
+ case ST(EAP_REQ_TTLS):
+ stat_str = tx ? "TX_EAP_REQ_TTLS" : "RX_EAP_REQ_TTLS";
+ break;
+ case ST(EAP_RESP_TTLS):
+ stat_str = tx ? "TX_EAP_RESP_TTLS" : "RX_EAP_RESP_TTLS";
+ break;
+ case ST(EAP_REQ_AKA):
+ stat_str = tx ? "TX_EAP_REQ_AKA" : "RX_EAP_REQ_AKA";
+ break;
+ case ST(EAP_RESP_AKA):
+ stat_str = tx ? "TX_EAP_RESP_AKA" : "RX_EAP_RESP_AKA";
+ break;
+ case ST(EAP_REQ_PEAP):
+ stat_str = tx ? "TX_EAP_REQ_PEAP" : "RX_EAP_REQ_PEAP";
+ break;
+ case ST(EAP_RESP_PEAP):
+ stat_str = tx ? "TX_EAP_RESP_PEAP" : "RX_EAP_RESP_PEAP";
+ break;
+ case ST(EAP_REQ_FAST):
+ stat_str = tx ? "TX_EAP_REQ_FAST" : "RX_EAP_REQ_FAST";
+ break;
+ case ST(EAP_RESP_FAST):
+ stat_str = tx ? "TX_EAP_RESP_FAST" : "RX_EAP_RESP_FAST";
+ break;
+ case ST(EAP_REQ_PSK):
+ stat_str = tx ? "TX_EAP_REQ_PSK" : "RX_EAP_REQ_PSK";
+ break;
+ case ST(EAP_RESP_PSK):
+ stat_str = tx ? "TX_EAP_RESP_PSK" : "RX_EAP_RESP_PSK";
+ break;
+ case ST(EAP_REQ_AKAP):
+ stat_str = tx ? "TX_EAP_REQ_AKAP" : "RX_EAP_REQ_AKAP";
+ break;
+ case ST(EAP_RESP_AKAP):
+ stat_str = tx ? "TX_EAP_RESP_AKAP" : "RX_EAP_RESP_AKAP";
+ break;
+ case ST(EAP_SUCCESS):
+ stat_str = tx ? "TX_EAP_SUCCESS" : "RX_EAP_SUCCESS";
+ break;
+ case ST(EAP_FAILURE):
+ stat_str = tx ? "TX_EAP_FAILURE" : "RX_EAP_FAILURE";
+ break;
+ case ST(EAPOL_START):
+ stat_str = tx ? "TX_EAPOL_START" : "RX_EAPOL_START";
+ break;
+ case ST(WSC_START):
+ stat_str = tx ? "TX_WSC_START" : "RX_WSC_START";
+ break;
+ case ST(WSC_DONE):
+ stat_str = tx ? "TX_WSC_DONE" : "RX_WSC_DONE";
+ break;
+ case ST(WPS_M1):
+ stat_str = tx ? "TX_WPS_M1" : "RX_WPS_M1";
+ break;
+ case ST(WPS_M2):
+ stat_str = tx ? "TX_WPS_M2" : "RX_WPS_M2";
+ break;
+ case ST(WPS_M3):
+ stat_str = tx ? "TX_WPS_M3" : "RX_WPS_M3";
+ break;
+ case ST(WPS_M4):
+ stat_str = tx ? "TX_WPS_M4" : "RX_WPS_M4";
+ break;
+ case ST(WPS_M5):
+ stat_str = tx ? "TX_WPS_M5" : "RX_WPS_M5";
+ break;
+ case ST(WPS_M6):
+ stat_str = tx ? "TX_WPS_M6" : "RX_WPS_M6";
+ break;
+ case ST(WPS_M7):
+ stat_str = tx ? "TX_WPS_M7" : "RX_WPS_M7";
+ break;
+ case ST(WPS_M8):
+ stat_str = tx ? "TX_WPS_M8" : "RX_WPS_M8";
+ break;
+ case ST(8021X_OTHER):
+ stat_str = tx ? "TX_OTHER_8021X" : "RX_OTHER_8021X";
+ break;
+ case ST(INSTALL_KEY):
+ stat_str = "INSTALL_KEY";
+ break;
+ case ST(DELETE_KEY):
+ stat_str = "DELETE_KEY";
+ break;
+ case ST(INSTALL_PMKSA):
+ stat_str = "INSTALL_PMKSA";
+ break;
+ case ST(INSTALL_OKC_PMK):
+ stat_str = "INSTALL_OKC_PMK";
+ break;
+ case ST(DHCP_DISCOVER):
+ stat_str = tx ? "TX_DHCP_DISCOVER" : "RX_DHCP_DISCOVER";
+ break;
+ case ST(DHCP_OFFER):
+ stat_str = tx ? "TX_DHCP_OFFER" : "RX_DHCP_OFFER";
+ break;
+ case ST(DHCP_REQUEST):
+ stat_str = tx ? "TX_DHCP_REQUEST" : "RX_DHCP_REQUEST";
+ break;
+ case ST(DHCP_DECLINE):
+ stat_str = tx ? "TX_DHCP_DECLINE" : "RX_DHCP_DECLINE";
+ break;
+ case ST(DHCP_ACK):
+ stat_str = tx ? "TX_DHCP_ACK" : "RX_DHCP_ACK";
+ break;
+ case ST(DHCP_NAK):
+ stat_str = tx ? "TX_DHCP_NAK" : "RX_DHCP_NAK";
+ break;
+ case ST(DHCP_RELEASE):
+ stat_str = tx ? "TX_DHCP_RELEASE" : "RX_DHCP_RELEASE";
+ break;
+ case ST(DHCP_INFORM):
+ stat_str = tx ? "TX_DHCP_INFORM" : "RX_DHCP_INFORM";
+ break;
+ case ST(ICMP_PING_REQ):
+ stat_str = tx ? "TX_ICMP_PING_REQ" : "RX_ICMP_PING_REQ";
+ break;
+ case ST(ICMP_PING_RESP):
+ stat_str = tx ? "TX_ICMP_PING_RESP" : "RX_ICMP_PING_RESP";
+ break;
+ case ST(ICMP_DEST_UNREACH):
+ stat_str = tx ? "TX_ICMP_DEST_UNREACH" : "RX_ICMP_DEST_UNREACH";
+ break;
+ case ST(ICMP_OTHER):
+ stat_str = tx ? "TX_ICMP_OTHER" : "RX_ICMP_OTHER";
+ break;
+ case ST(ARP_REQ):
+ stat_str = tx ? "TX_ARP_REQ" : "RX_ARP_REQ";
+ break;
+ case ST(ARP_RESP):
+ stat_str = tx ? "TX_ARP_RESP" : "RX_ARP_RESP";
+ break;
+ case ST(DNS_QUERY):
+ stat_str = tx ? "TX_DNS_QUERY" : "RX_DNS_QUERY";
+ break;
+ case ST(DNS_RESP):
+ stat_str = tx ? "TX_DNS_RESP" : "RX_DNS_RESP";
+ break;
+ case ST(REASSOC_SUCCESS):
+ stat_str = "REASSOC_SUCCESS";
+ break;
+ case ST(REASSOC_FAILURE):
+ stat_str = "REASSOC_FAILURE";
+ break;
+ case ST(AUTH_TIMEOUT):
+ stat_str = "AUTH_TIMEOUT";
+ break;
+ case ST(AUTH_FAIL):
+ stat_str = "AUTH_FAIL";
+ break;
+ case ST(AUTH_NO_ACK):
+ stat_str = "AUTH_NO_ACK";
+ break;
+ case ST(AUTH_OTHERS):
+ stat_str = "AUTH_FAIL_OTHER_STATUS";
+ break;
+ case ST(ASSOC_TIMEOUT):
+ stat_str = "ASSOC_TIMEOUT";
+ break;
+ case ST(ASSOC_FAIL):
+ stat_str = "ASSOC_FAIL";
+ break;
+ case ST(ASSOC_NO_ACK):
+ stat_str = "ASSOC_NO_ACK";
+ break;
+ case ST(ASSOC_ABORT):
+ stat_str = "ASSOC_ABORT";
+ break;
+ case ST(ASSOC_UNSOLICITED):
+ stat_str = "ASSOC_UNSOLICITED";
+ break;
+ case ST(ASSOC_NO_NETWORKS):
+ stat_str = "ASSOC_NO_NETWORKS";
+ break;
+ case ST(ASSOC_OTHERS):
+ stat_str = "ASSOC_FAIL_OTHER_STATUS";
+ break;
+ case ST(REASSOC_DONE_OTHERS):
+ stat_str = "REASSOC_DONE_OTHER_STATUS";
+ break;
+ default:
+ stat_str = "UNKNOWN_STATUS";
+ break;
+ }
+
+ strncpy(buf, stat_str, max_buf_len);
+ buf[max_buf_len - 1] = '\0';
+}
+
+static void
+dhd_statlog_get_timestamp(stat_elem_t *elem, uint64 *sec, uint64 *usec)
+{
+ uint64 ts_nsec, rem_nsec;
+
+ ts_nsec = elem->ts;
+ rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
+ *sec = ts_nsec;
+ *usec = (uint64)(rem_nsec / NSEC_PER_USEC);
+}
+
+static void
+dhd_statlog_convert_time(stat_elem_t *elem, uint8 *buf, uint32 buflen)
+{
+#if defined(LINUX) || defined(linux)
+ struct rtc_time tm;
+ uint64 ts_sec, rem_usec;
+
+ if (!buf) {
+ DHD_STATLOG_ERR(("%s: buf is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bzero(buf, buflen);
+ ts_sec = elem->ts_tz;
+ rem_usec = DIV_AND_MOD_U64_BY_U32(ts_sec, USEC_PER_SEC);
+
+ rtc_time_to_tm((unsigned long)ts_sec, &tm);
+ snprintf(buf, buflen, DHD_STATLOG_TZFMT_YYMMDDHHMMSSMS,
+ tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (uint32)(rem_usec / USEC_PER_MSEC));
+#endif /* LINUX || linux */
+}
+
+#ifdef DHD_LOG_DUMP
+static int
+dhd_statlog_dump(dhd_statlog_t *statlog, char *buf, uint32 buflen)
+{
+ stat_elem_t *elem;
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+ char stat_str[DHD_STATLOG_STATSTR_BUF_LEN];
+ char ts_str[DHD_STATLOG_TZFMT_BUF_LEN];
+ uint64 sec = 0, usec = 0;
+
+ if (!statlog) {
+ DHD_STATLOG_ERR(("%s: statlog is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ bcm_binit(strbuf, buf, buflen);
+ bzero(stat_str, sizeof(stat_str));
+ bzero(ts_str, sizeof(ts_str));
+ dhd_ring_whole_lock(statlog->ringbuf);
+ elem = (stat_elem_t *)dhd_ring_get_first(statlog->ringbuf);
+ while (elem) {
+ if (DHD_STATLOG_VALID(elem->stat)) {
+ dhd_statlog_stat_name(stat_str, sizeof(stat_str),
+ elem->stat, elem->dir);
+ dhd_statlog_get_timestamp(elem, &sec, &usec);
+ dhd_statlog_convert_time(elem, ts_str, sizeof(ts_str));
+ bcm_bprintf(strbuf, "[%s][%5lu.%06lu] status=%s, ifidx=%d, "
+ "reason=%d, status=%d\n", ts_str, (unsigned long)sec,
+ (unsigned long)usec, stat_str, elem->ifidx,
+ elem->reason, elem->status);
+ }
+ elem = (stat_elem_t *)dhd_ring_get_next(statlog->ringbuf, (void *)elem);
+ }
+ dhd_ring_whole_unlock(statlog->ringbuf);
+
+ return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
+}
+
+int
+dhd_statlog_write_logdump(dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, unsigned long *pos)
+{
+ dhd_statlog_t *statlog;
+ log_dump_section_hdr_t sec_hdr;
+ char *buf;
+ uint32 buflen;
+ int remain_len = 0;
+ int ret = BCME_OK;
+
+ if (!dhdp || !dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: dhdp or dhdp->statlog is NULL\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+ if (!statlog->logbuf) {
+ DHD_STATLOG_ERR(("%s: logbuf is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ buf = statlog->logbuf;
+ buflen = statlog->logbuf_len;
+ bzero(buf, buflen);
+
+ remain_len = dhd_statlog_dump(statlog, buf, buflen);
+ if (remain_len < 0) {
+ DHD_STATLOG_ERR(("%s: failed to write stat info to buffer\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ DHD_STATLOG_INFO(("%s: Start to write statlog\n", __FUNCTION__));
+
+ /* write the section header first */
+ ret = dhd_export_debug_data(STATUS_LOG_HDR, fp, user_buf,
+ strlen(STATUS_LOG_HDR), pos);
+ if (ret < 0) {
+ goto exit;
+ }
+
+ dhd_init_sec_hdr(&sec_hdr);
+ sec_hdr.type = LOG_DUMP_SECTION_STATUS;
+ sec_hdr.length = buflen - remain_len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf,
+ sizeof(sec_hdr), pos);
+ if (ret < 0) {
+ goto exit;
+ }
+
+ /* write status log info */
+ ret = dhd_export_debug_data(buf, fp, user_buf, buflen - remain_len, pos);
+ if (ret < 0) {
+ DHD_STATLOG_ERR(("%s: failed to write stat info, err=%d\n",
+ __FUNCTION__, ret));
+ }
+
+ DHD_STATLOG_INFO(("%s: Complete to write statlog file, err=%d\n",
+ __FUNCTION__, ret));
+
+exit:
+ return ret;
+}
+#endif /* DHD_LOG_DUMP */
+
+int
+dhd_statlog_generate_bdmask(dhd_pub_t *dhdp, void *reqbuf)
+{
+ dhd_statlog_t *statlog;
+ stat_bdmask_req_t *query;
+ uint8 *req_buf;
+ uint32 req_buf_len;
+ int cnt;
+
+ if (!dhdp || !dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: dhdp or statlog is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (!reqbuf) {
+ DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ statlog = dhdp->statlog;
+ query = (stat_bdmask_req_t *)reqbuf;
+ req_buf = query->req_buf;
+ req_buf_len = query->req_buf_len;
+ if (!req_buf) {
+ DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ bzero(statlog->bdmask, DHD_STAT_BDMASK_SIZE);
+ for (cnt = 0; cnt < req_buf_len; cnt++) {
+ if (DHD_STATLOG_VALID(req_buf[cnt])) {
+ setbit(statlog->bdmask, req_buf[cnt]);
+ }
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_statlog_get_latest_info(dhd_pub_t *dhdp, void *reqbuf)
+{
+ dhd_statlog_t *statlog;
+ stat_query_t *query;
+ stat_elem_t *elem;
+ uint8 *req_buf, *resp_buf, *sp;
+ uint32 req_buf_len, resp_buf_len, req_num;
+ int i, remain_len, cpcnt = 0;
+ uint8 filter[DHD_STAT_BDMASK_SIZE];
+ bool query_bigdata = FALSE;
+ void *ringbuf;
+
+ if (!dhdp || !dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: dhdp or statlog is NULL\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ query = (stat_query_t *)reqbuf;
+ if (!query) {
+ DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+ req_buf = query->req_buf;
+ req_buf_len = query->req_buf_len;
+ resp_buf = query->resp_buf;
+ resp_buf_len = query->resp_buf_len;
+ req_num = MIN(query->req_num, MAX_STATLOG_REQ_ITEM);
+ if (!resp_buf) {
+ DHD_STATLOG_ERR(("%s: invalid query\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ bzero(filter, sizeof(filter));
+ if (!req_buf || !req_buf_len) {
+ query_bigdata = TRUE;
+ ringbuf = statlog->bdlog_ringbuf;
+ } else {
+ ringbuf = statlog->ringbuf;
+ /* build a filter from req_buf */
+ for (i = 0; i < req_buf_len; i++) {
+ if (DHD_STATLOG_VALID(req_buf[i])) {
+ setbit(filter, req_buf[i]);
+ }
+ }
+ }
+
+ sp = resp_buf;
+ remain_len = resp_buf_len;
+ dhd_ring_whole_lock(ringbuf);
+ elem = (stat_elem_t *)dhd_ring_get_last(ringbuf);
+ while (elem) {
+ if (query_bigdata || isset(filter, elem->stat)) {
+ /* found the status from the list of interests */
+ if (remain_len < sizeof(stat_elem_t)) {
+ dhd_ring_whole_unlock(ringbuf);
+ return BCME_BUFTOOSHORT;
+ }
+ bcopy((char *)elem, sp, sizeof(stat_elem_t));
+ sp += sizeof(stat_elem_t);
+ remain_len -= sizeof(stat_elem_t);
+ cpcnt++;
+ }
+
+ if (cpcnt >= req_num) {
+ break;
+ }
+
+ /* Proceed to next item */
+ elem = (stat_elem_t *)dhd_ring_get_prev(ringbuf, (void *)elem);
+ }
+ dhd_ring_whole_unlock(ringbuf);
+
+ return cpcnt;
+}
+
+int
+dhd_statlog_query(dhd_pub_t *dhdp, char *cmd, int total_len)
+{
+ stat_elem_t *elem = NULL;
+ stat_query_t query;
+ char *pos, *token;
+ uint8 *req_buf = NULL, *resp_buf = NULL;
+ uint32 req_buf_len = 0, resp_buf_len = 0;
+ ulong req_num, stat_num, stat;
+ char stat_str[DHD_STATLOG_STATSTR_BUF_LEN];
+ uint64 sec = 0, usec = 0;
+ int i, resp_num, err = BCME_OK;
+ char ts_str[DHD_STATLOG_TZFMT_BUF_LEN];
+
+ /*
+ * DRIVER QUERY_STAT_LOG <total req num> <stat list num> <stat list>
+ * Note: use the defult status list if the 'stat list num' is zero
+ */
+ pos = cmd;
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+ /* total number of request */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ req_num = bcm_strtoul(token, NULL, 0);
+
+ /* total number of status list */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ stat_num = bcm_strtoul(token, NULL, 0);
+ if (stat_num) {
+ /* create a status list */
+ req_buf_len = (uint32)(stat_num * sizeof(uint8));
+ req_buf = (uint8 *)MALLOCZ(dhdp->osh, req_buf_len);
+ if (!req_buf) {
+ DHD_STATLOG_ERR(("%s: failed to allocate request buf\n",
+ __FUNCTION__));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* parse the status list and update to the request buffer */
+ for (i = 0; i < (uint32)stat_num; i++) {
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ stat = bcm_strtoul(token, NULL, 0);
+ req_buf[i] = (uint8)stat;
+ }
+ }
+
+ /* creat a response list */
+ resp_buf_len = (uint32)DHD_STATLOG_RING_SIZE(req_num);
+ resp_buf = (uint8 *)MALLOCZ(dhdp->osh, resp_buf_len);
+ if (!resp_buf) {
+ DHD_STATLOG_ERR(("%s: failed to allocate response buf\n",
+ __FUNCTION__));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* create query format and query the status */
+ query.req_buf = req_buf;
+ query.req_buf_len = req_buf_len;
+ query.resp_buf = resp_buf;
+ query.resp_buf_len = resp_buf_len;
+ query.req_num = (uint32)req_num;
+ resp_num = dhd_statlog_get_latest_info(dhdp, (void *)&query);
+ if (resp_num < 0) {
+ DHD_STATLOG_ERR(("%s: failed to query the status\n", __FUNCTION__));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ /* print out the results */
+ DHD_STATLOG_PRINT(("=============== QUERY RESULT ===============\n"));
+ if (resp_num > 0) {
+ elem = (stat_elem_t *)resp_buf;
+ for (i = 0; i < resp_num; i++) {
+ if (DHD_STATLOG_VALID(elem->stat)) {
+ dhd_statlog_stat_name(stat_str, sizeof(stat_str),
+ elem->stat, elem->dir);
+ dhd_statlog_get_timestamp(elem, &sec, &usec);
+ dhd_statlog_convert_time(elem, ts_str, sizeof(ts_str));
+ DHD_STATLOG_PRINT(("[RAWTS:%llu][%s][%5lu.%06lu] status=%s,"
+ " ifidx=%d, reason=%d, status=%d\n", elem->ts_tz,
+ ts_str, (unsigned long)sec, (unsigned long)usec,
+ stat_str, elem->ifidx, elem->reason, elem->status));
+ }
+ elem++;
+ }
+ } else {
+ DHD_STATLOG_PRINT(("No data found\n"));
+ }
+
+exit:
+ if (resp_buf) {
+ MFREE(dhdp->osh, resp_buf, resp_buf_len);
+ }
+
+ if (req_buf) {
+ MFREE(dhdp->osh, req_buf, req_buf_len);
+ }
+
+ return err;
+}
+
+void
+dhd_statlog_dump_scr(dhd_pub_t *dhdp)
+{
+ dhd_statlog_t *statlog;
+ stat_elem_t *elem;
+ char stat_str[DHD_STATLOG_STATSTR_BUF_LEN];
+ char ts_str[DHD_STATLOG_TZFMT_BUF_LEN];
+ uint64 sec = 0, usec = 0;
+
+ if (!dhdp || !dhdp->statlog) {
+ DHD_STATLOG_ERR(("%s: dhdp or statlog is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ statlog = (dhd_statlog_t *)(dhdp->statlog);
+ bzero(stat_str, sizeof(stat_str));
+ bzero(ts_str, sizeof(ts_str));
+
+ DHD_STATLOG_PRINT(("=============== START OF CURRENT STATUS INFO ===============\n"));
+ dhd_ring_whole_lock(statlog->ringbuf);
+ elem = (stat_elem_t *)dhd_ring_get_first(statlog->ringbuf);
+ while (elem) {
+ if (DHD_STATLOG_VALID(elem->stat)) {
+ dhd_statlog_stat_name(stat_str, sizeof(stat_str),
+ elem->stat, elem->dir);
+ dhd_statlog_get_timestamp(elem, &sec, &usec);
+ dhd_statlog_convert_time(elem, ts_str, sizeof(ts_str));
+ DHD_STATLOG_PRINT(("[RAWTS:%llu][%s][%5lu.%06lu] status=%s,"
+ " ifidx=%d, reason=%d, status=%d\n", elem->ts_tz, ts_str,
+ (unsigned long)sec, (unsigned long)usec, stat_str,
+ elem->ifidx, elem->reason, elem->status));
+ }
+ elem = (stat_elem_t *)dhd_ring_get_next(statlog->ringbuf, (void *)elem);
+ }
+ dhd_ring_whole_unlock(statlog->ringbuf);
+ DHD_STATLOG_PRINT(("=============== END OF CURRENT STATUS INFO ===============\n"));
+}
+#endif /* DHD_STATUS_LOGGING */
diff --git a/bcmdhd.101.10.361.x/dhd_statlog.h b/bcmdhd.101.10.361.x/dhd_statlog.h
new file mode 100755
index 0000000..c6dc5cf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_statlog.h
@@ -0,0 +1,221 @@
+/*
+ * DHD debugability: Header file for the Status Information Logging
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef __DHD_STATLOG_H_
+#define __DHD_STATLOG_H_
+
+#ifdef DHD_STATUS_LOGGING
+
+/* status element */
+typedef struct stat_elem {
+ uint16 stat; /* store status */
+ uint64 ts; /* local timestamp(ns) */
+ uint64 ts_tz; /* timestamp applied timezone(us) */
+ uint8 ifidx; /* ifidx */
+ uint8 dir; /* direction (TX/RX) */
+ uint8 reason; /* reason code from dongle */
+ uint8 status; /* status code from dongle */
+ uint8 resv[2]; /* reserved for future use */
+} stat_elem_t;
+
+/* status logging info */
+#define DHD_STAT_BDMASK_SIZE 16
+typedef struct dhd_statlog {
+ uint8 *logbuf; /* log buffer */
+ uint32 logbuf_len; /* length of the log buffer */
+ void *ringbuf; /* fixed ring buffer */
+ uint32 bufsize; /* size of ring buffer */
+ void *bdlog_ringbuf; /* fixed ring buffer for bigdata logging */
+ uint32 bdlog_bufsize; /* size of ring buffer for bigdata logging */
+ uint8 bdmask[DHD_STAT_BDMASK_SIZE]; /* bitmask for bigdata */
+} dhd_statlog_t;
+
+/* status query format */
+typedef struct stat_query {
+ uint8 *req_buf; /* request buffer to interested status */
+ uint32 req_buf_len; /* length of the request buffer */
+ uint8 *resp_buf; /* response buffer */
+ uint32 resp_buf_len; /* length of the response buffer */
+ uint32 req_num; /* total number of items to query */
+} stat_query_t;
+
+/* bitmask generation request format */
+typedef struct stat_bdmask_req {
+ uint8 *req_buf; /* request buffer to gernerate bitmask */
+ uint32 req_buf_len; /* length of the request buffer */
+} stat_bdmask_req_t;
+
+typedef void * dhd_statlog_handle_t; /* opaque handle to status log */
+
+/* enums */
+#define ST(x) STATE_## x
+#define STDIR(x) STATE_DIR_## x
+
+/* status direction */
+typedef enum stat_log_dir {
+ STDIR(TX) = 1,
+ STDIR(RX) = 2,
+ STDIR(MAX) = 3
+} stat_dir_t;
+
+/* status definition */
+typedef enum stat_log_stat {
+ ST(INVALID) = 0, /* invalid status */
+ ST(WLAN_POWER_ON) = 1, /* Wi-Fi Power on */
+ ST(WLAN_POWER_OFF) = 2, /* Wi-Fi Power off */
+ ST(ASSOC_START) = 3, /* connect to the AP triggered by upper layer */
+ ST(AUTH_DONE) = 4, /* complete to authenticate with the AP */
+ ST(ASSOC_REQ) = 5, /* send or receive Assoc Req */
+ ST(ASSOC_RESP) = 6, /* send or receive Assoc Resp */
+ ST(ASSOC_DONE) = 7, /* complete to disconnect to the associated AP */
+ ST(DISASSOC_START) = 8, /* disconnect to the associated AP by upper layer */
+ ST(DISASSOC_INT_START) = 9, /* initiate the disassoc by DHD */
+ ST(DISASSOC_DONE) = 10, /* complete to disconnect to the associated AP */
+ ST(DISASSOC) = 11, /* send or receive Disassoc */
+ ST(DEAUTH) = 12, /* send or receive Deauth */
+ ST(LINKDOWN) = 13, /* receive the link down event */
+ ST(REASSOC_START) = 14, /* reassoc the candidate AP */
+ ST(REASSOC_INFORM) = 15, /* inform reassoc completion to upper layer */
+ ST(REASSOC_DONE) = 16, /* complete to reassoc */
+ ST(EAPOL_M1) = 17, /* send or receive the EAPOL M1 */
+ ST(EAPOL_M2) = 18, /* send or receive the EAPOL M2 */
+ ST(EAPOL_M3) = 19, /* send or receive the EAPOL M3 */
+ ST(EAPOL_M4) = 20, /* send or receive the EAPOL M4 */
+ ST(EAPOL_GROUPKEY_M1) = 21, /* send or receive the EAPOL Group key handshake M1 */
+ ST(EAPOL_GROUPKEY_M2) = 22, /* send or receive the EAPOL Group key handshake M2 */
+ ST(EAP_REQ_IDENTITY) = 23, /* send or receive the EAP REQ IDENTITY */
+ ST(EAP_RESP_IDENTITY) = 24, /* send or receive the EAP RESP IDENTITY */
+ ST(EAP_REQ_TLS) = 25, /* send or receive the EAP REQ TLS */
+ ST(EAP_RESP_TLS) = 26, /* send or receive the EAP RESP TLS */
+ ST(EAP_REQ_LEAP) = 27, /* send or receive the EAP REQ LEAP */
+ ST(EAP_RESP_LEAP) = 28, /* send or receive the EAP RESP LEAP */
+ ST(EAP_REQ_TTLS) = 29, /* send or receive the EAP REQ TTLS */
+ ST(EAP_RESP_TTLS) = 30, /* send or receive the EAP RESP TTLS */
+ ST(EAP_REQ_AKA) = 31, /* send or receive the EAP REQ AKA */
+ ST(EAP_RESP_AKA) = 32, /* send or receive the EAP RESP AKA */
+ ST(EAP_REQ_PEAP) = 33, /* send or receive the EAP REQ PEAP */
+ ST(EAP_RESP_PEAP) = 34, /* send or receive the EAP RESP PEAP */
+ ST(EAP_REQ_FAST) = 35, /* send or receive the EAP REQ FAST */
+ ST(EAP_RESP_FAST) = 36, /* send or receive the EAP RESP FAST */
+ ST(EAP_REQ_PSK) = 37, /* send or receive the EAP REQ PSK */
+ ST(EAP_RESP_PSK) = 38, /* send or receive the EAP RESP PSK */
+ ST(EAP_REQ_AKAP) = 39, /* send or receive the EAP REQ AKAP */
+ ST(EAP_RESP_AKAP) = 40, /* send or receive the EAP RESP AKAP */
+ ST(EAP_SUCCESS) = 41, /* send or receive the EAP SUCCESS */
+ ST(EAP_FAILURE) = 42, /* send or receive the EAP FAILURE */
+ ST(EAPOL_START) = 43, /* send or receive the EAPOL-START */
+ ST(WSC_START) = 44, /* send or receive the WSC START */
+ ST(WSC_DONE) = 45, /* send or receive the WSC DONE */
+ ST(WPS_M1) = 46, /* send or receive the WPS M1 */
+ ST(WPS_M2) = 47, /* send or receive the WPS M2 */
+ ST(WPS_M3) = 48, /* send or receive the WPS M3 */
+ ST(WPS_M4) = 49, /* send or receive the WPS M4 */
+ ST(WPS_M5) = 50, /* send or receive the WPS M5 */
+ ST(WPS_M6) = 51, /* send or receive the WPS M6 */
+ ST(WPS_M7) = 52, /* send or receive the WPS M7 */
+ ST(WPS_M8) = 53, /* send or receive the WPS M8 */
+ ST(8021X_OTHER) = 54, /* send or receive the other 8021X frames */
+ ST(INSTALL_KEY) = 55, /* install the key */
+ ST(DELETE_KEY) = 56, /* remove the key */
+ ST(INSTALL_PMKSA) = 57, /* install PMKID information */
+ ST(INSTALL_OKC_PMK) = 58, /* install PMKID information for OKC */
+ ST(DHCP_DISCOVER) = 59, /* send or recv DHCP Discover */
+ ST(DHCP_OFFER) = 60, /* send or recv DHCP Offer */
+ ST(DHCP_REQUEST) = 61, /* send or recv DHCP Request */
+ ST(DHCP_DECLINE) = 62, /* send or recv DHCP Decline */
+ ST(DHCP_ACK) = 63, /* send or recv DHCP ACK */
+ ST(DHCP_NAK) = 64, /* send or recv DHCP NACK */
+ ST(DHCP_RELEASE) = 65, /* send or recv DHCP Release */
+ ST(DHCP_INFORM) = 66, /* send or recv DHCP Inform */
+ ST(ICMP_PING_REQ) = 67, /* send or recv ICMP PING Req */
+ ST(ICMP_PING_RESP) = 68, /* send or recv ICMP PING Resp */
+ ST(ICMP_DEST_UNREACH) = 69, /* send or recv ICMP DEST UNREACH message */
+ ST(ICMP_OTHER) = 70, /* send or recv other ICMP */
+ ST(ARP_REQ) = 71, /* send or recv ARP Req */
+ ST(ARP_RESP) = 72, /* send or recv ARP Resp */
+ ST(DNS_QUERY) = 73, /* send or recv DNS Query */
+ ST(DNS_RESP) = 74, /* send or recv DNS Resp */
+ ST(REASSOC_SUCCESS) = 75, /* reassociation success */
+ ST(REASSOC_FAILURE) = 76, /* reassociation failure */
+ ST(AUTH_TIMEOUT) = 77, /* authentication timeout */
+ ST(AUTH_FAIL) = 78, /* authentication failure */
+ ST(AUTH_NO_ACK) = 79, /* authentication failure due to no ACK */
+ ST(AUTH_OTHERS) = 80, /* authentication failure with other status */
+ ST(ASSOC_TIMEOUT) = 81, /* association timeout */
+ ST(ASSOC_FAIL) = 82, /* association failure */
+ ST(ASSOC_NO_ACK) = 83, /* association failure due to no ACK */
+ ST(ASSOC_ABORT) = 84, /* association abort */
+ ST(ASSOC_UNSOLICITED) = 85, /* association unsolicited */
+ ST(ASSOC_NO_NETWORKS) = 86, /* association failure due to no networks */
+ ST(ASSOC_OTHERS) = 87, /* association failure due to no networks */
+ ST(REASSOC_DONE_OTHERS) = 88, /* complete to reassoc with other reason */
+ ST(MAX) = 89 /* Max Status */
+} stat_log_stat_t;
+
+/* functions */
+extern dhd_statlog_handle_t *dhd_attach_statlog(dhd_pub_t *dhdp, uint32 num_items,
+ uint32 bdlog_num_items, uint32 logbuf_len);
+extern void dhd_detach_statlog(dhd_pub_t *dhdp);
+extern int dhd_statlog_ring_log_data(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx,
+ uint8 dir, bool cond);
+extern int dhd_statlog_ring_log_data_reason(dhd_pub_t *dhdp, uint16 stat,
+ uint8 ifidx, uint8 dir, uint16 reason);
+extern int dhd_statlog_ring_log_ctrl(dhd_pub_t *dhdp, uint16 stat, uint8 ifidx,
+ uint16 reason);
+extern int dhd_statlog_process_event(dhd_pub_t *dhdp, int type, uint8 ifidx,
+ uint16 status, uint16 reason, uint16 flags);
+extern int dhd_statlog_get_latest_info(dhd_pub_t *dhdp, void *reqbuf);
+extern void dhd_statlog_dump_scr(dhd_pub_t *dhdp);
+extern int dhd_statlog_query(dhd_pub_t *dhdp, char *cmd, int total_len);
+extern uint32 dhd_statlog_get_logbuf_len(dhd_pub_t *dhdp);
+extern void *dhd_statlog_get_logbuf(dhd_pub_t *dhdp);
+extern int dhd_statlog_generate_bdmask(dhd_pub_t *dhdp, void *reqbuf);
+#ifdef DHD_LOG_DUMP
+extern int dhd_statlog_write_logdump(dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, unsigned long *pos);
+#endif /* DHD_LOG_DUMP */
+
+/* macros */
+#define MAX_STATLOG_ITEM 512
+#define MAX_STATLOG_REQ_ITEM 32
+#define STATLOG_LOGBUF_LEN (64 * 1024)
+#define DHD_STATLOG_VERSION_V1 0x1
+#define DHD_STATLOG_VERSION DHD_STATLOG_VERSION_V1
+#define DHD_STATLOG_ITEM_SIZE (sizeof(stat_elem_t))
+#define DHD_STATLOG_RING_SIZE(items) ((items) * (DHD_STATLOG_ITEM_SIZE))
+#define DHD_STATLOG_STATSTR_BUF_LEN 32
+#define DHD_STATLOG_TZFMT_BUF_LEN 20
+#define DHD_STATLOG_TZFMT_YYMMDDHHMMSSMS "%02d%02d%02d%02d%02d%02d%04d"
+
+#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \
+ dhd_statlog_ring_log_ctrl((dhdp), (stat), (ifidx), (reason))
+#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \
+ dhd_statlog_ring_log_data((dhdp), (stat), (ifidx), (dir), (cond))
+#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \
+ dhd_statlog_ring_log_data_reason((dhdp), (stat), (ifidx), \
+ (dir), (reason))
+
+#endif /* DHD_STATUS_LOGGING */
+#endif /* __DHD_STATLOG_H_ */
diff --git a/bcmdhd.101.10.361.x/dhd_timesync.c b/bcmdhd.101.10.361.x/dhd_timesync.c
new file mode 100755
index 0000000..618d234
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_timesync.c
@@ -0,0 +1,1239 @@
+/**
+ * @file Broadcom Dongle Host Driver (DHD), time sync protocol handler
+ *
+ * timesync mesasages are exchanged between the host and device to synchronize the source time
+ * for ingress and egress packets.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$:
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhd_timesync.h>
+#include <bcmpcie.h>
+#include <bcmmsgbuf.h>
+
+extern void dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd);
+
+#define MAX_FW_CLKINFO_TYPES 8
+#define MAX_SIZE_FW_CLKINFO_TYPE (MAX_FW_CLKINFO_TYPES * sizeof(ts_fw_clock_info_t))
+
+#define MAX_FW_TS_LOG_SAMPLES 64
+
+#define BCMMSGBUF_HOST_TS_BADTAG 0xF0
+
+#define DHD_DEFAULT_TIMESYNC_TIMER_VALUE 20 /* ms */
+#define DHD_DEFAULT_TIMESYNC_TIMER_VALUE_MAX 9000 /* ms */
+
+#define MAX_TS_LOG_SAMPLES_DATA 128
+#define TS_NODROP_CONFIG_TO 1
+#define TS_DROP_CONFIG_TO 5
+
+typedef struct clksrc_ts_log {
+ uchar name[4];
+ uint32 inuse;
+ ts_timestamp_srcid_t log[MAX_FW_TS_LOG_SAMPLES];
+} clksrc_ts_log_t;
+
+typedef struct clk_ts_log {
+ uint32 clk_ts_inited;
+ uint32 cur_idx;
+ uint32 seqnum[MAX_FW_TS_LOG_SAMPLES];
+ clksrc_ts_log_t ts_log[MAX_CLKSRC_ID+1];
+} clk_ts_log_t;
+
+typedef struct dhd_ts_xt_id {
+ uint16 host_timestamping_config;
+ uint16 host_clock_selection;
+ uint16 host_clk_info;
+ uint16 d2h_clk_correction;
+} dhd_ts_xt_id_t;
+
+typedef struct dhd_ts_log_ts_item {
+ uint16 flowid; /* interface, Flow ID */
+ uint8 intf; /* interface */
+ uint8 rsvd;
+ uint32 ts_low; /* time stamp values */
+ uint32 ts_high; /* time stamp values */
+ uint32 proto;
+ uint32 t1;
+ uint32 t2;
+} dhd_ts_log_ts_item_t;
+
+typedef struct dhd_ts_log_ts {
+ uint32 max_idx;
+ uint32 cur_idx;
+ dhd_ts_log_ts_item_t ts_log[MAX_TS_LOG_SAMPLES_DATA];
+} dhd_ts_log_ts_t;
+
+#define MAX_BUF_SIZE_HOST_CLOCK_INFO 512
+
+#define HOST_TS_CONFIG_FW_TIMESTAMP_PERIOD_DEFAULT 1000
+
+struct dhd_ts {
+ dhd_pub_t *dhdp;
+ osl_t *osh;
+ uint32 xt_id;
+ uint16 host_ts_capture_cnt;
+ uint32 fw_ts_capture_cnt;
+ uint32 fw_ts_disc_cnt;
+ uint32 h_clkid_min;
+ uint32 h_clkid_max;
+ uint32 h_tsconf_period;
+
+ /* sould these be per clock source */
+ ts_correction_m_t correction_m;
+ ts_correction_m_t correction_b;
+
+ ts_fw_clock_info_t fw_tlv[MAX_FW_CLKINFO_TYPES];
+ uint32 fw_tlv_len;
+ clk_ts_log_t fw_ts_log;
+ uchar host_ts_host_clk_info_buffer[MAX_BUF_SIZE_HOST_CLOCK_INFO];
+ bool host_ts_host_clk_info_buffer_in_use;
+ dhd_ts_xt_id_t xt_ids;
+ uint32 active_ipc_version;
+
+ uint32 fwts2hsts_delay;
+ uint32 fwts2hsts_delay_wdcount;
+ uint32 ts_watchdog_calls;
+ uint64 last_ts_watchdog_time;
+ uint32 pending_requests;
+
+ dhd_ts_log_ts_t tx_timestamps;
+ dhd_ts_log_ts_t rx_timestamps;
+ /* outside modules could stop timesync independent of the user config */
+ bool timesync_disabled;
+ uint32 host_reset_cnt;
+ bool nodrop_config;
+
+ uint32 suspend_req;
+ uint32 resume_req;
+};
+struct dhd_ts *g_dhd_ts;
+static uint32 dhd_timesync_send_D2H_clk_correction(dhd_ts_t *ts);
+static uint32 dhd_timesync_send_host_clk_info(dhd_ts_t *ts);
+static uint32 dhd_timesync_send_host_clock_selection(dhd_ts_t *ts);
+static uint32 dhd_timesync_send_host_timestamping_config(dhd_ts_t *ts, bool inject_err);
+static void dhd_timesync_ts_log_dump_item(dhd_ts_log_ts_t *tsl, struct bcmstrbuf *b);
+
+/* Check for and handle local prot-specific iovar commands */
+
+enum {
+ IOV_TS_INFO_DUMP,
+ IOV_TS_TX_TS_DUMP,
+ IOV_TS_RX_TS_DUMP,
+ IOV_TS_FW_CLKINFO_DUMP,
+ IOV_TS_HCLK_CLKID_MIN,
+ IOV_TS_HCLK_CLKID_MAX,
+ IOV_TS_HTSCONF_PERIOD,
+ IOV_TS_SEND_TSCONFIG,
+ IOV_TS_SEND_HCLK_SEL,
+ IOV_TS_SEND_HCLK_INFO,
+ IOV_TS_SEND_D2H_CRCT,
+ IOV_TS_TXS_LOG,
+ IOV_TS_RXS_LOG,
+ IOV_TS_INJECT_BAD_XTID,
+ IOV_TS_INJECT_BAD_TAG,
+ IOV_TS_FWTS2HSTS_DELAY,
+ IOV_TS_NODROP_CONFIG,
+ IOV_TS_CLEAR_LOGS,
+ IOV_TS_NO_RETRY,
+ IOV_TS_NO_AGGR,
+ IOV_TS_FIXED_RATE,
+ IOV_LAST
+};
+const bcm_iovar_t dhd_ts_iovars[] = {
+ {"ts_info_dump", IOV_TS_INFO_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"ts_tx_ts_dump", IOV_TS_TX_TS_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"ts_rx_ts_dump", IOV_TS_RX_TS_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"ts_fw_clkinfo_dump", IOV_TS_FW_CLKINFO_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"ts_hclk_clkid_min", IOV_TS_HCLK_CLKID_MIN, 0, 0, IOVT_UINT32, 0 },
+ {"ts_hclk_clkid_max", IOV_TS_HCLK_CLKID_MAX, 0, 0, IOVT_UINT32, 0 },
+ {"ts_htsconf_period", IOV_TS_HTSCONF_PERIOD, 0, 0, IOVT_UINT32, 0 },
+ {"ts_send_tsconfig", IOV_TS_SEND_TSCONFIG, 0, 0, IOVT_UINT32, 0 },
+ {"ts_send_hostclk_sel", IOV_TS_SEND_HCLK_SEL, 0, 0, IOVT_UINT32, 0 },
+ {"ts_send_hostclk_info", IOV_TS_SEND_HCLK_INFO, 0, 0, IOVT_UINT32, 0 },
+ {"ts_send_d2h_corect ", IOV_TS_SEND_D2H_CRCT, 0, 0, IOVT_UINT32, 0 },
+ {"ts_txs_log", IOV_TS_TXS_LOG, 0, 0, IOVT_UINT32, 0 },
+ {"ts_rxs_log", IOV_TS_RXS_LOG, 0, 0, IOVT_UINT32, 0 },
+
+ /* error injection cases */
+ {"ts_inject_bad_xtid", IOV_TS_INJECT_BAD_XTID, 0, 0, IOVT_UINT32, 0 },
+ {"ts_inject_bad_tag", IOV_TS_INJECT_BAD_TAG, 0, 0, IOVT_UINT32, 0 },
+ {"ts_fwts2hsts_delay", IOV_TS_FWTS2HSTS_DELAY, 0, 0, IOVT_UINT32, 0 },
+ {"ts_nodrop_config", IOV_TS_NODROP_CONFIG, 0, 0, IOVT_UINT32, 0 },
+ {"ts_clear_logs", IOV_TS_CLEAR_LOGS, 0, 0, IOVT_UINT32, 0 },
+ {"ts_set_no_retry", IOV_TS_NO_RETRY, 0, 0, IOVT_UINT32, 0 },
+ {"ts_set_no_aggr", IOV_TS_NO_AGGR, 0, 0, IOVT_UINT32, 0 },
+ {"ts_set_fixed_rate", IOV_TS_FIXED_RATE, 0, 0, IOVT_UINT32, 0 },
+
+ {NULL, 0, 0, 0, 0, 0 }
+};
+
+static int dhd_ts_fw_clksrc_dump(dhd_ts_t *ts, char *buf, int buflen);
+#ifdef CONFIG_PROC_FS
+static int dhd_open_proc_ts_fw_clk_dump(struct inode *inode, struct file *file);
+ssize_t dhd_read_proc_ts_fw_clk_dump(struct file *file, char *user_buf, size_t count, loff_t *loff);
+static int dhd_open_proc_ts_tx_dump(struct inode *inode, struct file *file);
+ssize_t dhd_read_proc_ts_tx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff);
+static int dhd_open_proc_ts_rx_dump(struct inode *inode, struct file *file);
+ssize_t dhd_read_proc_ts_rx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff);
+
+static int
+dhd_open_proc_ts_fw_clk_dump(struct inode *inode, struct file *file)
+{
+ return single_open(file, 0, NULL);
+}
+ssize_t
+dhd_read_proc_ts_fw_clk_dump(struct file *file, char *user_buf, size_t count, loff_t *loff)
+{
+ dhd_ts_t *ts;
+ char *buf;
+ ssize_t ret = 0;
+
+ ts = g_dhd_ts;
+ if (ts == NULL) {
+ return -EAGAIN;
+ }
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(ts->dhdp)) {
+ DHD_INFO(("%s bus is in suspend or suspend in progress\n", __func__));
+ return -EAGAIN;
+ }
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(ts->dhdp)) {
+ DHD_ERROR(("%s rmmod in progress\n", __func__));
+ return -ENOENT;
+ }
+ buf = kmalloc(count, GFP_KERNEL);
+ if (buf == NULL) {
+ DHD_ERROR(("%s failed to allocate buf with size %zu\n", __func__, count));
+ return -ENOMEM;
+ }
+ ret = dhd_ts_fw_clksrc_dump(ts, buf, count);
+ if (ret < 0) {
+ return 0;
+ }
+ ret = simple_read_from_buffer(user_buf, count, loff, buf, (count - ret));
+ kfree(buf);
+ return ret;
+}
+static int dhd_open_proc_ts_tx_dump(struct inode *inode, struct file *file)
+{
+ return single_open(file, 0, NULL);
+}
+ssize_t
+dhd_read_proc_ts_tx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff)
+{
+ dhd_ts_t *ts;
+ char *buf;
+ ssize_t ret = 0;
+ struct bcmstrbuf strbuf;
+
+ ts = g_dhd_ts;
+ if (ts == NULL) {
+ return -EAGAIN;
+ }
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(ts->dhdp)) {
+ DHD_INFO(("%s bus is in suspend or suspend in progress\n", __func__));
+ return -EAGAIN;
+ }
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(ts->dhdp)) {
+ DHD_ERROR(("%s rmmod in progress\n", __func__));
+ return -ENOENT;
+ }
+ buf = kmalloc(count, GFP_KERNEL);
+ if (buf == NULL) {
+ DHD_ERROR(("%s failed to allocate buf with size %zu\n", __func__, count));
+ return -ENOMEM;
+ }
+ bcm_binit(&strbuf, buf, count);
+ bcm_bprintf(&strbuf, "Tx Log dump\n");
+ dhd_timesync_ts_log_dump_item(&ts->tx_timestamps, &strbuf);
+ ret = simple_read_from_buffer(user_buf, count, loff, buf, (count - strbuf.size));
+ kfree(buf);
+ return ret;
+}
+
+static int dhd_open_proc_ts_rx_dump(struct inode *inode, struct file *file)
+{
+ return single_open(file, 0, NULL);
+}
+
+ssize_t
+dhd_read_proc_ts_rx_dump(struct file *file, char *user_buf, size_t count, loff_t *loff)
+{
+ dhd_ts_t *ts;
+ char *buf;
+ ssize_t ret = 0;
+ struct bcmstrbuf strbuf;
+
+ ts = g_dhd_ts;
+ if (ts == NULL) {
+ return -EAGAIN;
+ }
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(ts->dhdp)) {
+ DHD_INFO(("%s bus is in suspend or suspend in progress\n", __func__));
+ return -EAGAIN;
+ }
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(ts->dhdp)) {
+ DHD_ERROR(("%s rmmod in progress\n", __func__));
+ return -ENOENT;
+ }
+ buf = kmalloc(count, GFP_KERNEL);
+ if (buf == NULL) {
+ DHD_ERROR(("%s failed to allocate buf with size %zu\n", __func__, count));
+ return -ENOMEM;
+ }
+ bcm_binit(&strbuf, buf, count);
+ bcm_bprintf(&strbuf, "Rx Log dump\n");
+ dhd_timesync_ts_log_dump_item(&ts->rx_timestamps, &strbuf);
+ ret = simple_read_from_buffer(user_buf, count, loff, buf, (count - strbuf.size));
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations proc_fops_ts_fw_clk_dump = {
+ .read = dhd_read_proc_ts_fw_clk_dump,
+ .open = dhd_open_proc_ts_fw_clk_dump,
+ .release = seq_release,
+};
+static const struct file_operations proc_fops_ts_tx_dump = {
+ .read = dhd_read_proc_ts_tx_dump,
+ .open = dhd_open_proc_ts_tx_dump,
+ .release = seq_release,
+};
+static const struct file_operations proc_fops_ts_rx_dump = {
+ .read = dhd_read_proc_ts_rx_dump,
+ .open = dhd_open_proc_ts_rx_dump,
+ .release = seq_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+int
+dhd_timesync_detach(dhd_pub_t *dhdp)
+{
+ dhd_ts_t *ts;
+
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, __LINE__));
+
+ if (!dhdp) {
+ return BCME_OK;
+ }
+ ts = dhdp->ts;
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("ts_fw_clk_dump", NULL);
+ remove_proc_entry("ts_tx_dump", NULL);
+ remove_proc_entry("ts_rx_dump", NULL);
+#endif /* CONFIG_PROC_FS */
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhdp->osh, ts, sizeof(dhd_ts_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ g_dhd_ts = NULL;
+ dhdp->ts = NULL;
+ DHD_INFO(("Deallocated DHD TS\n"));
+ return BCME_OK;
+}
+int
+dhd_timesync_attach(dhd_pub_t *dhdp)
+{
+ dhd_ts_t *ts;
+
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, __LINE__));
+ /* Allocate prot structure */
+ if (!(ts = (dhd_ts_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_PROT,
+ sizeof(dhd_ts_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(ts, 0, sizeof(*ts));
+
+ g_dhd_ts = ts;
+ ts->osh = dhdp->osh;
+ dhdp->ts = ts;
+ ts->dhdp = dhdp;
+
+ ts->correction_m.low = 1;
+ ts->correction_m.high = 1;
+
+ ts->correction_b.low = 0;
+ ts->correction_m.high = 0;
+
+ ts->fwts2hsts_delay = DHD_DEFAULT_TIMESYNC_TIMER_VALUE;
+ ts->fwts2hsts_delay_wdcount = 0;
+
+ ts->tx_timestamps.max_idx = MAX_TS_LOG_SAMPLES_DATA;
+ ts->rx_timestamps.max_idx = MAX_TS_LOG_SAMPLES_DATA;
+
+ ts->xt_id = 1;
+
+ DHD_INFO(("allocated DHD TS\n"));
+
+#ifdef CONFIG_PROC_FS
+ if (proc_create("ts_fw_clk_dump", S_IRUSR, NULL, &proc_fops_ts_fw_clk_dump) == NULL) {
+ DHD_ERROR(("Failed to create /proc/ts_fw_clk_dump procfs interface\n"));
+ }
+ if (proc_create("ts_tx_dump", S_IRUSR, NULL, &proc_fops_ts_tx_dump) == NULL) {
+ DHD_ERROR(("Failed to create /proc/ts_tx_dump procfs interface\n"));
+ }
+ if (proc_create("ts_rx_dump", S_IRUSR, NULL, &proc_fops_ts_rx_dump) == NULL) {
+ DHD_ERROR(("Failed to create /proc/ts_rx_dump procfs interface\n"));
+ }
+#endif /* CONFIG_PROC_FS */
+
+ return BCME_OK;
+
+fail:
+ if (dhdp->ts != NULL) {
+ dhd_timesync_detach(dhdp);
+ }
+ return BCME_NOMEM;
+}
+
+static void
+dhd_timesync_ts_log_dump_item(dhd_ts_log_ts_t *tsl, struct bcmstrbuf *b)
+{
+ uint32 i = 0;
+
+ bcm_bprintf(b, "Max_idx: %d, cur_idx %d\n", tsl->max_idx, tsl->cur_idx);
+ for (i = 0; i < tsl->max_idx; i++) {
+ bcm_bprintf(b, "\t idx: %03d, (%d: %d) timestamp: 0x%08x:0x%08x "
+ " proto: %02d, t1: 0x%08x t2: 0x%08x\n",
+ i, tsl->ts_log[i].intf, tsl->ts_log[i].flowid,
+ tsl->ts_log[i].ts_high, tsl->ts_log[i].ts_low,
+ tsl->ts_log[i].proto, tsl->ts_log[i].t1,
+ tsl->ts_log[i].t2);
+ }
+}
+
+static int
+dhd_timesync_ts_log_dump(dhd_ts_t *ts, char *buf, int buflen, bool tx)
+{
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ if (tx) {
+ bcm_bprintf(strbuf, "Tx Log dump\t");
+ dhd_timesync_ts_log_dump_item(&ts->tx_timestamps, strbuf);
+ }
+ else {
+ bcm_bprintf(strbuf, "Rx Log dump\n");
+ dhd_timesync_ts_log_dump_item(&ts->rx_timestamps, strbuf);
+ }
+ return BCME_OK;
+}
+
+static void
+dhd_timesync_clear_logs(dhd_ts_t *ts)
+{
+ dhd_ts_log_ts_t *tsl;
+
+ tsl = &ts->rx_timestamps;
+ tsl->cur_idx = 0;
+ memset(tsl->ts_log, 0, sizeof(dhd_ts_log_ts_item_t) *
+ MAX_TS_LOG_SAMPLES_DATA);
+
+ tsl = &ts->tx_timestamps;
+ tsl->cur_idx = 0;
+ memset(tsl->ts_log, 0, sizeof(dhd_ts_log_ts_item_t) *
+ MAX_TS_LOG_SAMPLES_DATA);
+
+ return;
+}
+
+void
+dhd_timesync_debug_info_print(dhd_pub_t *dhdp)
+{
+ dhd_ts_t *ts = dhdp->ts;
+ uint64 current_time;
+
+ if (!ts) {
+ DHD_ERROR(("%s: %d ts is NULL\n", __FUNCTION__, __LINE__));
+ return;
+ }
+
+ DHD_ERROR(("\nts info dump: active_ipc_version %d\n", ts->active_ipc_version));
+ current_time = OSL_LOCALTIME_NS();
+ DHD_ERROR(("current_time="SEC_USEC_FMT" last_ts_watchdog_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(current_time), GET_SEC_USEC(ts->last_ts_watchdog_time)));
+ DHD_ERROR(("timesync disabled %d\n", ts->timesync_disabled));
+ DHD_ERROR(("Host TS dump cnt %d, fw TS dump cnt %d, descrepency %d\n",
+ ts->host_ts_capture_cnt, ts->fw_ts_capture_cnt, ts->fw_ts_disc_cnt));
+ DHD_ERROR(("ts_watchdog calls %d reset cnt %d\n",
+ ts->ts_watchdog_calls, ts->host_reset_cnt));
+ DHD_ERROR(("xt_ids tag/ID %d/%d, %d/%d, %d/%d, %d/%d\n",
+ BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG, ts->xt_ids.host_timestamping_config,
+ BCMMSGBUF_HOST_CLOCK_SELECT_TAG, ts->xt_ids.host_clock_selection,
+ BCMMSGBUF_HOST_CLOCK_INFO_TAG, ts->xt_ids.host_clk_info,
+ BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG, ts->xt_ids.d2h_clk_correction));
+ DHD_ERROR(("pending requests %d suspend req %d resume req %d\n",
+ ts->pending_requests, ts->suspend_req, ts->resume_req));
+
+}
+
+static int
+dhd_timesync_dump(dhd_ts_t *ts, char *buf, int buflen)
+{
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ bcm_bprintf(strbuf, "ts info dump: active_ipc_version %d\n", ts->active_ipc_version);
+ bcm_bprintf(strbuf, "timesync disabled %d\n", ts->timesync_disabled);
+ bcm_bprintf(strbuf, "Host TS dump cnt %d, fw TS dump cnt %d, descrepency %d\n",
+ ts->host_ts_capture_cnt, ts->fw_ts_capture_cnt, ts->fw_ts_disc_cnt);
+ bcm_bprintf(strbuf, "ts_watchdog calls %d reset cnt %d\n",
+ ts->ts_watchdog_calls, ts->host_reset_cnt);
+ bcm_bprintf(strbuf, "xt_ids tag/ID %d/%d, %d/%d, %d/%d, %d/%d\n",
+ BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG, ts->xt_ids.host_timestamping_config,
+ BCMMSGBUF_HOST_CLOCK_SELECT_TAG, ts->xt_ids.host_clock_selection,
+ BCMMSGBUF_HOST_CLOCK_INFO_TAG, ts->xt_ids.host_clk_info,
+ BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG, ts->xt_ids.d2h_clk_correction);
+ bcm_bprintf(strbuf, "pending requests %d suspend req %d resume req %d\n",
+ ts->pending_requests, ts->suspend_req, ts->resume_req);
+
+ return BCME_OK;
+}
+
+static int
+dhd_timesync_doiovar(dhd_ts_t *ts, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, uint plen, void *arg, uint len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_TS_INFO_DUMP):
+ dhd_timesync_dump(ts, arg, len);
+ break;
+ case IOV_GVAL(IOV_TS_TX_TS_DUMP):
+ dhd_timesync_ts_log_dump(ts, arg, len, TRUE);
+ break;
+ case IOV_GVAL(IOV_TS_RX_TS_DUMP):
+ dhd_timesync_ts_log_dump(ts, arg, len, FALSE);
+ break;
+ case IOV_GVAL(IOV_TS_FW_CLKINFO_DUMP):
+ dhd_ts_fw_clksrc_dump(ts, arg, len);
+ break;
+ case IOV_SVAL(IOV_TS_SEND_TSCONFIG):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcmerror = dhd_timesync_send_host_timestamping_config(ts, FALSE);
+ break;
+ case IOV_SVAL(IOV_TS_SEND_HCLK_SEL):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcmerror = dhd_timesync_send_host_clock_selection(ts);
+ break;
+ case IOV_SVAL(IOV_TS_SEND_HCLK_INFO):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcmerror = dhd_timesync_send_host_clk_info(ts);
+ break;
+ case IOV_SVAL(IOV_TS_SEND_D2H_CRCT):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcmerror = dhd_timesync_send_D2H_clk_correction(ts);
+ break;
+ case IOV_SVAL(IOV_TS_INJECT_BAD_TAG):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcmerror = dhd_timesync_send_host_timestamping_config(ts, TRUE);
+ break;
+ case IOV_SVAL(IOV_TS_INJECT_BAD_XTID): {
+ uint16 old_xt_id;
+
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ old_xt_id = ts->xt_id;
+ ts->xt_id += 10; /* will cause the error now */
+ DHD_ERROR(("generating bad XTID transaction for the device exp %d, sending %d",
+ old_xt_id, ts->xt_id));
+ bcmerror = dhd_timesync_send_host_timestamping_config(ts, FALSE);
+ ts->xt_id = old_xt_id;
+ break;
+ }
+ case IOV_GVAL(IOV_TS_FWTS2HSTS_DELAY):
+ bcopy(&ts->fwts2hsts_delay, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_FWTS2HSTS_DELAY):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ if (int_val > DHD_DEFAULT_TIMESYNC_TIMER_VALUE_MAX) {
+ bcmerror = BCME_RANGE;
+ break;
+ }
+ if (int_val <= DHD_DEFAULT_TIMESYNC_TIMER_VALUE) {
+ bcmerror = BCME_RANGE;
+ break;
+ }
+ ts->fwts2hsts_delay = int_val;
+ break;
+ case IOV_GVAL(IOV_TS_NODROP_CONFIG):
+ bcopy(&ts->nodrop_config, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_NODROP_CONFIG):
+ ts->nodrop_config = int_val;
+ break;
+ case IOV_GVAL(IOV_TS_NO_RETRY):
+ int_val = dhd_prot_pkt_noretry(ts->dhdp, 0, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_NO_RETRY):
+ dhd_prot_pkt_noretry(ts->dhdp, int_val, TRUE);
+ break;
+ case IOV_GVAL(IOV_TS_NO_AGGR):
+ int_val = dhd_prot_pkt_noaggr(ts->dhdp, 0, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_NO_AGGR):
+ dhd_prot_pkt_noaggr(ts->dhdp, int_val, TRUE);
+ break;
+ case IOV_GVAL(IOV_TS_FIXED_RATE):
+ int_val = dhd_prot_pkt_fixed_rate(ts->dhdp, 0, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_FIXED_RATE):
+ dhd_prot_pkt_fixed_rate(ts->dhdp, int_val, TRUE);
+ break;
+ case IOV_SVAL(IOV_TS_CLEAR_LOGS):
+ dhd_timesync_clear_logs(ts);
+ break;
+ case IOV_GVAL(IOV_TS_TXS_LOG):
+ int_val = dhd_prot_data_path_tx_timestamp_logging(ts->dhdp, 0, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_TXS_LOG):
+ dhd_prot_data_path_tx_timestamp_logging(ts->dhdp, int_val, TRUE);
+ break;
+ case IOV_GVAL(IOV_TS_RXS_LOG):
+ int_val = dhd_prot_data_path_rx_timestamp_logging(ts->dhdp, 0, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_RXS_LOG):
+ dhd_prot_data_path_rx_timestamp_logging(ts->dhdp, int_val, TRUE);
+ break;
+ case IOV_SVAL(IOV_TS_HTSCONF_PERIOD):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ ts->h_tsconf_period = int_val;
+ break;
+ case IOV_GVAL(IOV_TS_HTSCONF_PERIOD):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&ts->h_tsconf_period, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TS_HCLK_CLKID_MAX):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ ts->h_clkid_max = int_val;
+ break;
+ case IOV_GVAL(IOV_TS_HCLK_CLKID_MAX):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&ts->h_clkid_max, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TS_HCLK_CLKID_MIN):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ ts->h_clkid_min = int_val;
+ break;
+ case IOV_GVAL(IOV_TS_HCLK_CLKID_MIN):
+ if (ts->active_ipc_version < 7) {
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&ts->h_clkid_min, arg, val_size);
+ break;
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+ DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+ return bcmerror;
+}
+
+int
+dhd_timesync_iovar_op(dhd_ts_t *ts, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+ int val_size;
+ const bcm_iovar_t *vi = NULL;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ if ((vi = bcm_iovar_lookup(dhd_ts_iovars, name)) == NULL) {
+ DHD_TRACE(("%s: not ours\n", name));
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+ bcmerror = dhd_timesync_doiovar(ts, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+void
+dhd_timesync_handle_host_ts_complete(dhd_ts_t *ts, uint16 xt_id, uint16 status)
+{
+ if (ts == NULL) {
+ DHD_ERROR(("%s: called with ts null\n", __FUNCTION__));
+ return;
+ }
+ DHD_INFO(("Host send TS complete, for ID %d, status %d\n", xt_id, status));
+ if (xt_id == ts->xt_ids.host_clk_info) {
+ if (ts->host_ts_host_clk_info_buffer_in_use != TRUE) {
+ DHD_ERROR(("same ID as the host clock info, but buffer not in use: %d\n",
+ ts->xt_ids.host_clk_info));
+ return;
+ }
+ ts->host_ts_host_clk_info_buffer_in_use = FALSE;
+ }
+ ts->pending_requests--;
+}
+
+void
+dhd_timesync_notify_ipc_rev(dhd_ts_t *ts, uint32 ipc_rev)
+{
+ if (ts != NULL)
+ ts->active_ipc_version = ipc_rev;
+}
+
+static int
+dhd_ts_fw_clksrc_dump(dhd_ts_t *ts, char *buf, int buflen)
+{
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+ clk_ts_log_t *fw_ts_log;
+ uint32 i = 0, j = 0;
+ clksrc_ts_log_t *clk_src;
+
+ fw_ts_log = &ts->fw_ts_log;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ while (i <= MAX_CLKSRC_ID) {
+ clk_src = &fw_ts_log->ts_log[i];
+ if (clk_src->inuse == FALSE) {
+ bcm_bprintf(strbuf, "clkID %d: not in use\n", i);
+ }
+ else {
+ bcm_bprintf(strbuf, "clkID %d: name %s Max idx %d, cur_idx %d\n",
+ i, clk_src->name, MAX_FW_TS_LOG_SAMPLES, fw_ts_log->cur_idx);
+ j = 0;
+ while (j < MAX_FW_TS_LOG_SAMPLES) {
+ bcm_bprintf(strbuf, "%03d: %03d: 0x%08x-0x%08x\n", j,
+ fw_ts_log->seqnum[j], clk_src->log[j].ts_high,
+ clk_src->log[j].ts_low);
+ j++;
+ }
+ }
+ i++;
+ }
+ return b.size;
+}
+
+static void
+dhd_ts_fw_clksrc_log(dhd_ts_t *ts, uchar *tlvs, uint32 tlv_len, uint32 seqnum)
+{
+ ts_fw_clock_info_t *fw_clock_info;
+ clksrc_ts_log_t *clk_src;
+ clk_ts_log_t *fw_ts_log;
+
+ fw_ts_log = &ts->fw_ts_log;
+
+ fw_ts_log->seqnum[fw_ts_log->cur_idx] = seqnum;
+ while (tlv_len) {
+ fw_clock_info = (ts_fw_clock_info_t *)tlvs;
+ clk_src = &fw_ts_log->ts_log[(fw_clock_info->ts.ts_high >> 28) & 0xF];
+ if (clk_src->inuse == FALSE) {
+ bcopy(fw_clock_info->clk_src, clk_src->name, sizeof(clk_src->name));
+ clk_src->inuse = TRUE;
+ }
+ clk_src->log[fw_ts_log->cur_idx].ts_low = fw_clock_info->ts.ts_low;
+ clk_src->log[fw_ts_log->cur_idx].ts_high = fw_clock_info->ts.ts_high;
+
+ tlvs += sizeof(*fw_clock_info);
+ tlv_len -= sizeof(*fw_clock_info);
+ }
+ fw_ts_log->cur_idx++;
+ if (fw_ts_log->cur_idx >= MAX_FW_TS_LOG_SAMPLES)
+ fw_ts_log->cur_idx = 0;
+}
+
+void
+dhd_timesync_handle_fw_timestamp(dhd_ts_t *ts, uchar *tlvs, uint32 tlv_len, uint32 seqnum)
+{
+ ts_fw_clock_info_t *fw_clock_info;
+ uint16 tag_id;
+
+ DHD_INFO(("FW sent timestamp message, tlv_len %d, seqnum %d\n", tlv_len, seqnum));
+ bcm_print_bytes("fw ts", tlvs, tlv_len);
+ /* we are expecting only one TLV type from the firmware side */
+ /* BCMMSGBUF_FW_CLOCK_INFO_TAG */
+ /* Validate the tag ID */
+ if (ts == NULL) {
+ DHD_ERROR(("%s: NULL TS \n", __FUNCTION__));
+ return;
+ }
+ if (tlvs == NULL) {
+ DHD_ERROR(("%s: NULL TLV \n", __FUNCTION__));
+ return;
+ }
+ if (tlv_len < BCM_XTLV_HDR_SIZE) {
+ DHD_ERROR(("%s: bad length %d\n", __FUNCTION__, tlv_len));
+ return;
+ }
+ if (tlv_len > MAX_SIZE_FW_CLKINFO_TYPE) {
+ DHD_ERROR(("tlv_len %d more than what is supported in Host %d\n", tlv_len,
+ (uint32)MAX_SIZE_FW_CLKINFO_TYPE));
+ return;
+ }
+ if (tlv_len % (sizeof(*fw_clock_info))) {
+ DHD_ERROR(("bad tlv_len for the packet %d, needs to be multiple of %d\n", tlv_len,
+ (uint32)(sizeof(*fw_clock_info))));
+ return;
+ }
+
+ /* validate the tag for all the include tag IDs */
+ {
+ uint32 check_len = 0;
+ uchar *tag_ptr = (uchar *)(tlvs);
+ while (check_len < tlv_len) {
+ bcopy(tag_ptr+check_len, &tag_id, sizeof(uint16));
+ DHD_INFO(("FWTS: tag_id %d, offset %d \n",
+ tag_id, check_len));
+ if (tag_id != BCMMSGBUF_FW_CLOCK_INFO_TAG) {
+ DHD_ERROR(("Fatal: invalid tag from FW in TS: %d, offset %d \n",
+ tag_id, check_len));
+ return;
+ }
+ check_len += sizeof(*fw_clock_info);
+ }
+ }
+
+ if (seqnum != (ts->fw_ts_capture_cnt + 1)) {
+ DHD_ERROR(("FW TS descrepency: out of sequence exp %d, got %d, resyncing %d\n",
+ ts->fw_ts_capture_cnt + 1, seqnum, seqnum));
+ ts->fw_ts_disc_cnt++;
+ }
+ ts->fw_ts_capture_cnt = seqnum;
+
+ /* copy it into local info */
+ bcopy(tlvs, &ts->fw_tlv[0], tlv_len);
+ ts->fw_tlv_len = tlv_len;
+
+ dhd_ts_fw_clksrc_log(ts, tlvs, tlv_len, seqnum);
+ /* launch the watchdog to send the host time stamp as per the delay programmed */
+ if (ts->fwts2hsts_delay_wdcount != 0) {
+ DHD_ERROR(("FATAL: Last Host sync is not sent out yet\n"));
+ return;
+ }
+ if (dhd_watchdog_ms == 0) {
+ DHD_ERROR(("FATAL: WATCHDOG is set to 0, timesync can't work properly \n"));
+ return;
+ }
+ /* schedule sending host time sync values to device */
+ ts->fwts2hsts_delay_wdcount = ts->fwts2hsts_delay / dhd_watchdog_ms;
+ if (ts->fwts2hsts_delay_wdcount == 0)
+ ts->fwts2hsts_delay_wdcount = 1;
+}
+
+static uint32
+dhd_timesync_send_host_timestamping_config(dhd_ts_t *ts, bool inject_err)
+{
+ ts_host_timestamping_config_t ts_config;
+ int ret_val;
+
+ if (ts->timesync_disabled) {
+ DHD_ERROR(("Timesync Disabled: Cannot send HOST TS config msg\n"));
+ return BCME_ERROR;
+ }
+ bzero(&ts_config, sizeof(ts_config));
+
+ ts_config.xtlv.id = BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG;
+ if (inject_err)
+ ts_config.xtlv.id = BCMMSGBUF_HOST_TS_BADTAG;
+
+ ts_config.xtlv.len = sizeof(ts_config) - sizeof(_bcm_xtlv_t);
+ ts_config.period_ms = ts->h_tsconf_period;
+
+ if (ts_config.period_ms) {
+ ts_config.flags |= FLAG_HOST_RESET;
+ ts_config.reset_cnt = ts->host_reset_cnt + 1;
+ }
+
+ if (ts->nodrop_config) {
+ ts_config.flags |= FLAG_CONFIG_NODROP;
+ ts_config.post_delay = TS_NODROP_CONFIG_TO;
+ } else {
+ ts_config.post_delay = TS_DROP_CONFIG_TO;
+ }
+
+ DHD_ERROR(("sending Host Timestamping Config: TLV (ID %d, LEN %d), period %d, seq %d\n",
+ ts_config.xtlv.id, ts_config.xtlv.len, ts_config.period_ms,
+ ts->host_ts_capture_cnt));
+ ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)&ts_config, sizeof(ts_config),
+ ts->host_ts_capture_cnt, ts->xt_id);
+ if (ret_val != 0) {
+ DHD_ERROR(("Fatal: Error sending HOST TS config msg to device: %d\n", ret_val));
+ return BCME_ERROR;
+ }
+
+ if (ts_config.period_ms) {
+ ts->host_reset_cnt++;
+ }
+
+ ts->pending_requests++;
+ ts->xt_ids.host_timestamping_config = ts->xt_id;
+ ts->xt_id++;
+ return BCME_OK;
+}
+
+static uint32
+dhd_timesync_send_host_clock_selection(dhd_ts_t *ts)
+{
+ ts_host_clock_sel_t ts_clk_sel;
+ int ret_val;
+
+ if (ts->timesync_disabled) {
+ DHD_ERROR(("Timesync Disabled: Cannot send HOST clock sel msg\n"));
+ return BCME_ERROR;
+ }
+
+ bzero(&ts_clk_sel, sizeof(ts_clk_sel));
+
+ ts_clk_sel.xtlv.id = BCMMSGBUF_HOST_CLOCK_SELECT_TAG;
+ ts_clk_sel.xtlv.len = sizeof(ts_clk_sel) - sizeof(_bcm_xtlv_t);
+ ts_clk_sel.min_clk_idx = ts->h_clkid_min;
+ ts_clk_sel.max_clk_idx = ts->h_clkid_max;
+ DHD_INFO(("sending Host ClockSel Config: TLV (ID %d, LEN %d), min %d, max %d, seq %d\n",
+ ts_clk_sel.xtlv.id, ts_clk_sel.xtlv.len, ts_clk_sel.min_clk_idx,
+ ts_clk_sel.max_clk_idx,
+ ts->host_ts_capture_cnt));
+ ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)&ts_clk_sel, sizeof(ts_clk_sel),
+ ts->host_ts_capture_cnt, ts->xt_id);
+ if (ret_val != 0) {
+ DHD_ERROR(("Fatal: Error sending HOST ClockSel msg to device: %d\n", ret_val));
+ return BCME_ERROR;
+ }
+ ts->xt_ids.host_clock_selection = ts->xt_id;
+ ts->xt_id++;
+ ts->pending_requests++;
+ return BCME_OK;
+}
+
+static uint32
+dhd_timesync_send_host_clk_info(dhd_ts_t *ts)
+{
+ ts_host_clock_info_t *host_clock_info;
+ uchar *clk_info_buffer;
+ uint32 clk_info_bufsize;
+ int ret_val;
+
+ if (ts->timesync_disabled) {
+ DHD_ERROR(("Timesync Disabled: Cannot send HOST clock config msg\n"));
+ return BCME_ERROR;
+ }
+ if (ts->host_ts_host_clk_info_buffer_in_use == TRUE) {
+ DHD_ERROR(("Host Ts Clock info buffer in Use\n"));
+ return BCME_ERROR;
+ }
+ clk_info_buffer = &ts->host_ts_host_clk_info_buffer[0];
+ clk_info_bufsize = sizeof(ts->host_ts_host_clk_info_buffer);
+
+ DHD_INFO(("clk_info_buf size %d, tlv_len %d, host clk_info_len %d\n",
+ clk_info_bufsize, ts->fw_tlv_len, (uint32)sizeof(*host_clock_info)));
+
+ if (clk_info_bufsize < sizeof(*host_clock_info)) {
+ DHD_ERROR(("clock_info_buf_size too small to fit host clock info %d, %d\n",
+ clk_info_bufsize, (uint32)sizeof(*host_clock_info)));
+ return BCME_ERROR;
+ }
+
+ host_clock_info = (ts_host_clock_info_t *)clk_info_buffer;
+ host_clock_info->xtlv.id = BCMMSGBUF_HOST_CLOCK_INFO_TAG;
+ host_clock_info->xtlv.len = sizeof(*host_clock_info) - sizeof(_bcm_xtlv_t);
+ /* OSL_GET_CYCLES */
+ host_clock_info->ticks.low = 0;
+ host_clock_info->ticks.high = 0;
+ /* OSL_SYS_UPTIME?? */
+ host_clock_info->ns.low = 0;
+ host_clock_info->ns.high = 0;
+ clk_info_buffer += (sizeof(*host_clock_info));
+ clk_info_bufsize -= sizeof(*host_clock_info);
+
+ /* copy the device clk config as that is the reference for this */
+ if (clk_info_bufsize < ts->fw_tlv_len) {
+ DHD_ERROR(("clock info buffer is small to fit dev clk info %d, %d\n",
+ clk_info_bufsize, ts->fw_tlv_len));
+ return BCME_ERROR;
+ }
+ bcopy(ts->fw_tlv, clk_info_buffer, ts->fw_tlv_len);
+ clk_info_bufsize -= ts->fw_tlv_len;
+
+ DHD_INFO(("sending Host TS msg Len %d, xt_id %d, host_ts_capture_count %d\n",
+ (uint32)(sizeof(ts->host_ts_host_clk_info_buffer) - clk_info_bufsize),
+ ts->xt_id, ts->host_ts_capture_cnt));
+
+ bcm_print_bytes("host ts", (uchar *)ts->host_ts_host_clk_info_buffer,
+ sizeof(ts->host_ts_host_clk_info_buffer) - clk_info_bufsize);
+
+ ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)ts->host_ts_host_clk_info_buffer,
+ sizeof(ts->host_ts_host_clk_info_buffer) - clk_info_bufsize,
+ ts->host_ts_capture_cnt, ts->xt_id);
+ if (ret_val != 0) {
+ DHD_ERROR(("Fatal: Error sending HOST ClockSel msg to device: %d\n", ret_val));
+ return BCME_ERROR;
+ }
+ ts->host_ts_host_clk_info_buffer_in_use = TRUE;
+ ts->xt_ids.host_clk_info = ts->xt_id;
+ ts->xt_id++;
+ ts->pending_requests++;
+ return BCME_OK;
+}
+
+static uint32
+dhd_timesync_send_D2H_clk_correction(dhd_ts_t *ts)
+{
+ ts_d2h_clock_correction_t ts_clk_crtion;
+ int ret_val;
+
+ if (ts->timesync_disabled) {
+ DHD_ERROR(("Timesync Disabled: Cannot send d2h clock correction msg\n"));
+ return BCME_ERROR;
+ }
+
+ bzero(&ts_clk_crtion, sizeof(ts_clk_crtion));
+
+ /* XXX: should this be sending for all the clock sources */
+
+ ts_clk_crtion.xtlv.id = BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG;
+ ts_clk_crtion.xtlv.len = sizeof(ts_clk_crtion) - sizeof(_bcm_xtlv_t);
+ ts_clk_crtion.clk_id = ts->h_clkid_max;
+ ts_clk_crtion.m.low = ts->correction_m.low;
+ ts_clk_crtion.m.high = ts->correction_m.high;
+ ts_clk_crtion.b.low = ts->correction_b.low;
+ ts_clk_crtion.b.high = ts->correction_b.high;
+
+ DHD_INFO(("sending D2H Correction: ID %d, LEN %d, clkid %d, m %d:%d, b %d:%d, seq %d\n",
+ ts_clk_crtion.xtlv.id, ts_clk_crtion.xtlv.len, ts_clk_crtion.clk_id,
+ ts_clk_crtion.m.high,
+ ts_clk_crtion.m.low,
+ ts_clk_crtion.b.high,
+ ts_clk_crtion.b.low,
+ ts->host_ts_capture_cnt));
+
+ ret_val = dhd_prot_send_host_timestamp(ts->dhdp, (uchar *)&ts_clk_crtion,
+ sizeof(ts_clk_crtion), ts->host_ts_capture_cnt, ts->xt_id);
+ if (ret_val != 0) {
+ DHD_ERROR(("Fatal: Error sending HOST ClockSel msg to device: %d\n", ret_val));
+ return BCME_ERROR;
+ }
+ ts->xt_ids.d2h_clk_correction = ts->xt_id;
+ ts->xt_id++;
+ ts->pending_requests++;
+ return BCME_OK;
+}
+
+bool
+dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp)
+{
+ return (dhdp->ts->fwts2hsts_delay != 0);
+}
+
+bool
+dhd_timesync_watchdog(dhd_pub_t *dhdp)
+{
+ dhd_ts_t *ts = dhdp->ts;
+
+ if (ts == NULL)
+ return FALSE;
+
+ ts->last_ts_watchdog_time = OSL_LOCALTIME_NS();
+ ts->ts_watchdog_calls++;
+
+ /* XXX: this is relying the watchdog to be running..which may not hold good */
+ if (ts->fwts2hsts_delay_wdcount) {
+ ts->fwts2hsts_delay_wdcount--;
+ if (ts->fwts2hsts_delay != 0 && dhdp->busstate == DHD_BUS_DATA &&
+ (ts->fwts2hsts_delay_wdcount == 0)) {
+ /* see if we need to send the host clock info */
+ dhd_timesync_send_host_clk_info(ts);
+ dhd_msgbuf_delay_post_ts_bufs(dhdp);
+ }
+ }
+ return FALSE;
+}
+
+static void
+dhd_timesync_log_timestamp_item(dhd_ts_log_ts_t *tsl, uint16 flowid, uint8 intf,
+ uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *pkt)
+{
+ tsl->ts_log[tsl->cur_idx].ts_low = ts_low;
+ tsl->ts_log[tsl->cur_idx].ts_high = ts_high;
+ tsl->ts_log[tsl->cur_idx].intf = intf;
+ tsl->ts_log[tsl->cur_idx].proto = pkt->proto;
+ tsl->ts_log[tsl->cur_idx].t1 = pkt->t1;
+ tsl->ts_log[tsl->cur_idx].t2 = pkt->t2;
+ tsl->cur_idx++;
+ if (tsl->cur_idx == tsl->max_idx)
+ tsl->cur_idx = 0;
+}
+
+void
+dhd_timesync_log_tx_timestamp(dhd_ts_t *ts, uint16 flowid, uint8 intf,
+ uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *pkt)
+{
+ if (ts != NULL) {
+ dhd_timesync_log_timestamp_item(&ts->tx_timestamps, flowid, intf,
+ ts_low, ts_high, pkt);
+ }
+}
+
+void
+dhd_timesync_log_rx_timestamp(dhd_ts_t *ts, uint8 intf, uint32 ts_low, uint32 ts_high,
+ dhd_pkt_parse_t *pkt)
+{
+ if (ts != NULL) {
+ dhd_timesync_log_timestamp_item(&ts->rx_timestamps, 0, intf,
+ ts_low, ts_high, pkt);
+ }
+}
+
+void
+dhd_timesync_control(dhd_pub_t *dhdp, bool disabled)
+{
+ dhd_ts_t *ts;
+ if (dhdp == NULL)
+ return;
+
+ ts = dhdp->ts;
+ if (ts != NULL) {
+ if (disabled) {
+ DHD_ERROR(("resetting the timesync counter, current(%d)\n",
+ ts->fw_ts_capture_cnt));
+
+ ts->fw_ts_capture_cnt = 0;
+
+ /* Suspend case: Disable timesync after the config message */
+ if ((ts->active_ipc_version >= 7) && (ts->h_tsconf_period != 0)) {
+ uint32 tsconf_period;
+
+ tsconf_period = ts->h_tsconf_period;
+ ts->h_tsconf_period = 0;
+
+ dhd_timesync_send_host_timestamping_config(ts, FALSE);
+ ts->h_tsconf_period = tsconf_period;
+ }
+ ts->timesync_disabled = TRUE;
+ ts->suspend_req++;
+ } else {
+ /* Resume case: Enable timesync before the config message */
+ DHD_ERROR(("enabling the timesync counter, current(%d)\n",
+ ts->fw_ts_capture_cnt));
+
+ ts->timesync_disabled = FALSE;
+ ts->resume_req++;
+
+ if ((ts->active_ipc_version >= 7) && (ts->h_tsconf_period != 0))
+ dhd_timesync_send_host_timestamping_config(ts, FALSE);
+ }
+ }
+ /* XXX: may be all the other internal iovar calls should check for disabled state */
+}
diff --git a/bcmdhd.101.10.361.x/dhd_timesync.h b/bcmdhd.101.10.361.x/dhd_timesync.h
new file mode 100755
index 0000000..0e3afb3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_timesync.h
@@ -0,0 +1,68 @@
+/*
+ * Header file describing the common timesync functionality
+ *
+ * Provides type definitions and function prototypes used to handle timesync functionality.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$:
+ */
+
+#ifndef _dhd_timesync_h_
+#define _dhd_timesync_h_
+
+typedef struct dhd_ts dhd_ts_t;
+
+/* Linkage, sets "ts" link and updates hdrlen in pub */
+extern int dhd_timesync_attach(dhd_pub_t *dhdp);
+
+/* Linkage, sets "ts" link and updates hdrlen in pub */
+extern bool dhd_timesync_watchdog(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated timesync memory (including dhd_ts_t) */
+extern int dhd_timesync_detach(dhd_pub_t *dhdp);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_timesync_iovar_op(dhd_ts_t *ts, const char *name, void *params, int plen,
+ void *arg, int len, bool set);
+
+/* handle host time stamp completion */
+extern void dhd_timesync_handle_host_ts_complete(dhd_ts_t *ts, uint16 xt_id, uint16 status);
+
+/* handle fw time stamp event from firmware */
+extern void dhd_timesync_handle_fw_timestamp(dhd_ts_t *ts, uchar *tlv, uint32 tlv_len,
+ uint32 seqnum);
+
+/* get notified of the ipc rev */
+extern void dhd_timesync_notify_ipc_rev(dhd_ts_t *ts, uint32 ipc_rev);
+
+/* log txs timestamps */
+extern void dhd_timesync_log_tx_timestamp(dhd_ts_t *ts, uint16 flowid, uint8 intf,
+ uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *parse);
+
+/* log rx cpl timestamps */
+extern void dhd_timesync_log_rx_timestamp(dhd_ts_t *ts, uint8 intf,
+ uint32 ts_low, uint32 ts_high, dhd_pkt_parse_t *parse);
+
+/* dynamically disabling it based on the host driver suspend/resume state */
+extern void dhd_timesync_control(dhd_pub_t *dhdp, bool disabled);
+
+extern void dhd_timesync_debug_info_print(dhd_pub_t *dhdp);
+#endif /* _dhd_timesync_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_wet.c b/bcmdhd.101.10.361.x/dhd_wet.c
new file mode 100755
index 0000000..4537aef
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_wet.c
@@ -0,0 +1,1187 @@
+/**
+ * @file
+ * @brief
+ * Wireless EThernet (WET) Bridge.
+ *
+ * WET STA and WET client are inter-exchangable in this file and refer to
+ * addressable entities whose traffic are sent and received through this
+ * bridge, including the hosting device.
+ *
+ * Supported protocol families: IP v4.
+ *
+ * Tx: replace frames' source MAC address with wireless interface's;
+ * update the IP-MAC address mapping table entry.
+ *
+ * Rx: replace frames' the destination MAC address with what found in
+ * the IP-MAC address mapping table.
+ *
+ * All data structures defined in this file are optimized for IP v4. To
+ * support other protocol families, write protocol specific handlers.
+ * Doing so may require data structures changes to expand various address
+ * storages to fit the protocol specific needs, for example, IPX needs 10
+ * octets for its network address. Also one may need to define the data
+ * structures in a more generic way so that they work with all supported
+ * protocol families, for example, the wet_sta strcture may be defined
+ * as follow:
+ *
+ * struct wet_sta {
+ * uint8 nal; network address length
+ * uint8 na[NETA_MAX_LEN]; network address
+ * uint8 mac[ETHER_ADDR_LEN];
+ * ...
+ * };
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/**
+ * @file
+ * @brief
+ * XXX Twiki: [WirelessEthernet]
+ */
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <wlioctl.h>
+#include <802.11.h>
+#include <ethernet.h>
+#include <vlan.h>
+#include <802.3.h>
+#include <bcmip.h>
+#include <bcmarp.h>
+#include <bcmudp.h>
+#include <bcmdhcp.h>
+#include <bcmendian.h>
+#include <dhd_dbg.h>
+#include <d11.h>
+
+#include <dhd_wet.h>
+
+/* IP/MAC address mapping entry */
+typedef struct wet_sta wet_sta_t;
+struct wet_sta {
+ /* client */
+ uint8 ip[IPV4_ADDR_LEN]; /* client IP addr */
+ struct ether_addr mac; /* client MAC addr */
+ uint8 flags[DHCP_FLAGS_LEN]; /* orig. dhcp flags */
+ /* internal */
+ wet_sta_t *next; /* free STA link */
+ wet_sta_t *next_ip; /* hash link by IP */
+ wet_sta_t *next_mac; /* hash link by MAC */
+};
+#define WET_NUMSTAS (1 << 8) /* max. # clients, must be multiple of 2 */
+#define WET_STA_HASH_SIZE (WET_NUMSTAS/2) /* must be <= WET_NUMSTAS */
+#define WET_STA_HASH_IP(ip) ((ip)[3]&(WET_STA_HASH_SIZE-1)) /* hash by IP */
+#define WET_STA_HASH_MAC(ea) (((ea)[3]^(ea)[4]^(ea)[5])&(WET_STA_HASH_SIZE-1)) /* hash by MAC */
+#define WET_STA_HASH_UNK -1 /* Unknown hash */
+#define IP_ISMULTI(ip) (((ip) & 0xf0000000) == 0xe0000000) /* Check for multicast by IP */
+
+/* WET private info structure */
+struct dhd_wet_info {
+ /* pointer to dhdpublic info struct */
+ dhd_pub_t *pub;
+ /* Host addresses */
+ uint8 ip[IPV4_ADDR_LEN];
+ struct ether_addr mac;
+ /* STA storage, one entry per eth. client */
+ wet_sta_t sta[WET_NUMSTAS];
+ /* Free sta list */
+ wet_sta_t *stafree;
+ /* Used sta hash by IP */
+ wet_sta_t *stahash_ip[WET_STA_HASH_SIZE];
+ /* Used sta hash by MAC */
+ wet_sta_t *stahash_mac[WET_STA_HASH_SIZE];
+};
+
+/* forward declarations */
+static int wet_eth_proc(dhd_wet_info_t *weth, void *sdu,
+ uint8 *frame, int length, int send);
+static int wet_vtag_proc(dhd_wet_info_t *weth, void *sdu,
+ uint8 * eh, uint8 *vtag, int length, int send);
+static int wet_ip_proc(dhd_wet_info_t *weth, void *sdu,
+ uint8 * eh, uint8 *iph, int length, int send);
+static int wet_arp_proc(dhd_wet_info_t *weth, void *sdu,
+ uint8 *eh, uint8 *arph, int length, int send);
+static int wet_udp_proc(dhd_wet_info_t *weth,
+ uint8 *eh, uint8 *iph, uint8 *udph, int length, int send);
+static int wet_dhcpc_proc(dhd_wet_info_t *weth,
+ uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send);
+static int wet_dhcps_proc(dhd_wet_info_t *weth,
+ uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send);
+static int wet_sta_alloc(dhd_wet_info_t *weth, wet_sta_t **saddr);
+static int wet_sta_update_all(dhd_wet_info_t *weth,
+ uint8 *iaddr, struct ether_addr *eaddr, wet_sta_t **saddr);
+static int wet_sta_update_mac(dhd_wet_info_t *weth,
+ struct ether_addr *eaddr, wet_sta_t **saddr);
+static int wet_sta_remove_mac_entry(dhd_wet_info_t *weth, struct ether_addr *eaddr);
+static int wet_sta_find_ip(dhd_wet_info_t *weth,
+ uint8 *iaddr, wet_sta_t **saddr);
+static int wet_sta_find_mac(dhd_wet_info_t *weth,
+ struct ether_addr *eaddr, wet_sta_t **saddr);
+static void csum_fixup_16(uint8 *chksum,
+ uint8 *optr, int olen, uint8 *nptr, int nlen);
+
+/*
+ * Protocol handler. 'ph' points to protocol specific header,
+ * for example, it points to IP header if it is IP packet.
+ */
+typedef int (*prot_proc_t)(dhd_wet_info_t *weth, void *sdu, uint8 *eh,
+ uint8 *ph, int length, int send);
+/* Protocol handlers hash table - hash by ether type */
+typedef struct prot_hdlr prot_hdlr_t;
+struct prot_hdlr {
+ uint16 type; /* ether type */
+ prot_proc_t prot_proc;
+ prot_hdlr_t *next; /* next proto handler that has the same hash */
+};
+#define WET_PROT_HASH_SIZE (1 << 3)
+#define WET_PROT_HASH(t) ((t)[1]&(WET_PROT_HASH_SIZE-1))
+static prot_hdlr_t ept_tbl[] = {
+ /* 0 */ {HTON16(ETHER_TYPE_8021Q), wet_vtag_proc, NULL}, /* 0x8100 */
+};
+static prot_hdlr_t prot_hash[WET_PROT_HASH_SIZE] = {
+ /* 0 */ {HTON16(ETHER_TYPE_IP), wet_ip_proc, &ept_tbl[0]}, /* 0x0800 */
+ /* 1 */ {0, NULL, NULL}, /* unused */
+ /* 2 */ {0, NULL, NULL}, /* unused */
+ /* 3 */ {0, NULL, NULL}, /* unused */
+ /* 4 */ {0, NULL, NULL}, /* unused */
+ /* 5 */ {0, NULL, NULL}, /* unused */
+ /* 6 */ {HTON16(ETHER_TYPE_ARP), wet_arp_proc, NULL}, /* 0x0806 */
+ /* 7 */ {0, NULL, NULL}, /* unused */
+};
+
+/*
+ * IPv4 handler. 'ph' points to protocol specific header,
+ * for example, it points to UDP header if it is UDP packet.
+ */
+typedef int (*ipv4_proc_t)(dhd_wet_info_t *weth, uint8 *eh,
+ uint8 *iph, uint8 *ph, int length, int send);
+/* IPv4 handlers hash table - hash by protocol type */
+typedef struct ipv4_hdlr ipv4_hdlr_t;
+struct ipv4_hdlr {
+ uint8 type; /* protocol type */
+ ipv4_proc_t ipv4_proc;
+ ipv4_hdlr_t *next; /* next proto handler that has the same hash */
+};
+#define WET_IPV4_HASH_SIZE (1 << 1)
+#define WET_IPV4_HASH(p) ((p)&(WET_IPV4_HASH_SIZE-1))
+static ipv4_hdlr_t ipv4_hash[WET_IPV4_HASH_SIZE] = {
+ /* 0 */ {0, NULL, NULL}, /* unused */
+ /* 1 */ {IP_PROT_UDP, wet_udp_proc, NULL}, /* 0x11 */
+};
+
+/*
+ * UDP handler. 'ph' points to protocol specific header,
+ * for example, it points to DHCP header if it is DHCP packet.
+ */
+typedef int (*udp_proc_t)(dhd_wet_info_t *weth, uint8 *eh,
+ uint8 *iph, uint8 *udph, uint8 *ph, int length, int send);
+/* UDP handlers hash table - hash by port number */
+typedef struct udp_hdlr udp_hdlr_t;
+struct udp_hdlr {
+ uint16 port; /* udp dest. port */
+ udp_proc_t udp_proc;
+ udp_hdlr_t *next; /* next proto handler that has the same hash */
+};
+#define WET_UDP_HASH_SIZE (1 << 3)
+#define WET_UDP_HASH(p) ((p)[1]&(WET_UDP_HASH_SIZE-1))
+static udp_hdlr_t udp_hash[WET_UDP_HASH_SIZE] = {
+ /* 0 */ {0, NULL, NULL}, /* unused */
+ /* 1 */ {0, NULL, NULL}, /* unused */
+ /* 2 */ {0, NULL, NULL}, /* unused */
+ /* 3 */ {HTON16(DHCP_PORT_SERVER), wet_dhcpc_proc, NULL}, /* 0x43 */
+ /* 4 */ {HTON16(DHCP_PORT_CLIENT), wet_dhcps_proc, NULL}, /* 0x44 */
+ /* 5 */ {0, NULL, NULL}, /* unused */
+ /* 6 */ {0, NULL, NULL}, /* unused */
+ /* 7 */ {0, NULL, NULL}, /* unused */
+};
+
+#define WETHWADDR(weth) ((weth)->pub->mac.octet)
+#define WETOSH(weth) ((weth)->pub->osh)
+
+/* special values */
+/* 802.3 llc/snap header */
+static uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+static uint8 ipv4_bcast[IPV4_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff}; /* IP v4 broadcast address */
+static uint8 ipv4_null[IPV4_ADDR_LEN] = {0x00, 0x00, 0x00, 0x00}; /* IP v4 NULL address */
+
+dhd_wet_info_t *
+dhd_get_wet_info(dhd_pub_t *pub)
+{
+ dhd_wet_info_t *p;
+ int i;
+ p = (dhd_wet_info_t *)MALLOCZ(pub->osh, sizeof(dhd_wet_info_t));
+ if (p == NULL) {
+ return 0;
+ }
+ for (i = 0; i < WET_NUMSTAS - 1; i ++)
+ p->sta[i].next = &p->sta[i + 1];
+ p->stafree = &p->sta[0];
+ p->pub = pub;
+ return p;
+}
+
+void
+dhd_free_wet_info(dhd_pub_t *pub, void *wet)
+{
+ if (wet) {
+ MFREE(pub->osh, wet, sizeof(dhd_wet_info_t));
+ }
+}
+
+void dhd_set_wet_host_ipv4(dhd_pub_t *pub, void *parms, uint32 len)
+{
+ dhd_wet_info_t *p;
+ p = (dhd_wet_info_t *)pub->wet_info;
+ bcopy(parms, p->ip, len);
+}
+
+void dhd_set_wet_host_mac(dhd_pub_t *pub, void *parms, uint32 len)
+{
+ dhd_wet_info_t *p;
+ p = (dhd_wet_info_t *)pub->wet_info;
+ bcopy(parms, &p->mac, len);
+}
+/* process Ethernet frame */
+/*
+* Return:
+* = 0 if frame is done ok
+* < 0 if unable to handle the frame
+* > 0 if no further process
+*/
+static int
+BCMFASTPATH(wet_eth_proc)(dhd_wet_info_t *weth, void *sdu, uint8 *frame, int length, int send)
+{
+ uint8 *pt = frame + ETHER_TYPE_OFFSET;
+ uint16 type;
+ uint8 *ph;
+ prot_hdlr_t *phdlr;
+ /* intercept Ethernet II frame (type > 1500) */
+ if (length >= ETHER_HDR_LEN && (pt[0] > (ETHER_MAX_DATA >> 8) ||
+ (pt[0] == (ETHER_MAX_DATA >> 8) && pt[1] > (ETHER_MAX_DATA & 0xff))))
+ ;
+ /* intercept 802.3 LLC/SNAP frame (type <= 1500) */
+ else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN) {
+ uint8 *llc = frame + ETHER_HDR_LEN;
+ if (bcmp(llc_snap_hdr, llc, SNAP_HDR_LEN))
+ return 0;
+ pt = llc + SNAP_HDR_LEN;
+ }
+ /* frame too short bail out */
+ else {
+ DHD_ERROR(("wet_eth_proc: %s short eth frame, ignored\n",
+ send ? "send" : "recv"));
+ return -1;
+ }
+ ph = pt + ETHER_TYPE_LEN;
+ length -= ph - frame;
+
+ /* Call protocol specific handler to process frame. */
+ type = *(uint16 *)pt;
+
+ for (phdlr = &prot_hash[WET_PROT_HASH(pt)];
+ phdlr != NULL; phdlr = phdlr->next) {
+ if (phdlr->type != type || !phdlr->prot_proc)
+ continue;
+ return (phdlr->prot_proc)(weth, sdu, frame, ph, length, send);
+ }
+
+ if (!bcmp(WETHWADDR(weth), frame + ETHER_SRC_OFFSET, ETHER_ADDR_LEN)) {
+ return 0;
+ }
+ else {
+ DHD_INFO(("%s: %s unknown type (0x%X), ignored %s\n",
+ __FUNCTION__, send ? "send" : "recv", type,
+ (type == 0xDD86) ? "IPv6":""));
+ /* ignore unsupported protocol from different mac addr than us */
+ return BCME_UNSUPPORTED;
+ }
+}
+
+/* process 8021p/Q tagged frame */
+/*
+* Return:
+* = 0 if frame is done ok
+* < 0 if unable to handle the frame
+* > 0 if no further process
+*/
+static int
+BCMFASTPATH(wet_vtag_proc)(dhd_wet_info_t *weth, void *sdu,
+ uint8 * eh, uint8 *vtag, int length, int send)
+{
+ uint16 type;
+ uint8 *pt;
+ prot_hdlr_t *phdlr;
+
+ /* check minimum length */
+ if (length < ETHERVLAN_HDR_LEN) {
+ DHD_ERROR(("wet_vtag_proc: %s short VLAN frame, ignored\n",
+ send ? "send" : "recv"));
+ return -1;
+ }
+
+ /*
+ * FIXME: check recursiveness to prevent stack from overflow
+ * in case someone sent frames 8100xxxxxxxx8100xxxxxxxx...
+ */
+
+ /* Call protocol specific handler to process frame. */
+ type = *(uint16 *)(pt = vtag + VLAN_TAG_LEN);
+
+ for (phdlr = &prot_hash[WET_PROT_HASH(pt)];
+ phdlr != NULL; phdlr = phdlr->next) {
+ if (phdlr->type != type || !phdlr->prot_proc)
+ continue;
+ return (phdlr->prot_proc)(weth, sdu, eh,
+ pt + ETHER_TYPE_LEN, length, send);
+ }
+
+ return 0;
+}
+
+/* process IP frame */
+/*
+* Return:
+* = 0 if frame is done ok
+* < 0 if unable to handle the frame
+* > 0 if no further process
+*/
+static int
+BCMFASTPATH(wet_ip_proc)(dhd_wet_info_t *weth, void *sdu,
+ uint8 *eh, uint8 *iph, int length, int send)
+{
+ uint8 type;
+ int ihl;
+ wet_sta_t *sta;
+ ipv4_hdlr_t *iphdlr;
+ uint8 *iaddr;
+ struct ether_addr *ea = NULL;
+ int ret, ea_off = 0;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ BCM_REFERENCE(eabuf);
+
+ /* IPv4 only */
+ if (length < 1 || (IP_VER(iph) != IP_VER_4)) {
+ DHD_INFO(("wet_ip_proc: %s non IPv4 frame, ignored\n",
+ send ? "send" : "recv"));
+ return -1;
+ }
+
+ ihl = IPV4_HLEN(iph);
+
+ /* minimum length */
+ if (length < ihl) {
+ DHD_ERROR(("wet_ip_proc: %s short IPv4 frame, ignored\n",
+ send ? "send" : "recv"));
+ return -1;
+ }
+
+ /* protocol specific handling */
+ type = IPV4_PROT(iph);
+ for (iphdlr = &ipv4_hash[WET_IPV4_HASH(type)];
+ iphdlr; iphdlr = iphdlr->next) {
+ if (iphdlr->type != type || !iphdlr->ipv4_proc)
+ continue;
+ if ((ret = (iphdlr->ipv4_proc)(weth, eh,
+ iph, iph + ihl, length - ihl, send)))
+ return ret;
+ }
+
+ /* generic IP packet handling
+ * Replace source MAC in Ethernet header with wireless's and
+ * keep track of IP MAC mapping when sending frame.
+ */
+ if (send) {
+ uint32 iaddr_dest, iaddr_src;
+ bool wet_table_upd = TRUE;
+ iaddr = iph + IPV4_SRC_IP_OFFSET;
+ iaddr_dest = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+ iaddr_src = ntoh32(*(uint32 *)(iaddr));
+
+ /* Do not process and update knowledge base on receipt of a local IP
+ * multicast frame
+ */
+ if (IP_ISMULTI(iaddr_dest) && !iaddr_src) {
+ DHD_INFO(("recv multicast frame from %s.Don't update hash table\n",
+ bcm_ether_ntoa((struct ether_addr*)
+ (eh + ETHER_SRC_OFFSET), eabuf)));
+ wet_table_upd = FALSE;
+ }
+ if (wet_table_upd && wet_sta_update_all(weth, iaddr,
+ (struct ether_addr*)(eh + ETHER_SRC_OFFSET), &sta) < 0) {
+ DHD_INFO(("wet_ip_proc: unable to update STA %u.%u.%u.%u %s\n",
+ iaddr[0], iaddr[1], iaddr[2], iaddr[3],
+ bcm_ether_ntoa(
+ (struct ether_addr*)(eh + ETHER_SRC_OFFSET), eabuf)));
+ return -1;
+ }
+ ea = (struct ether_addr *)WETHWADDR(weth);
+ ea_off = ETHER_SRC_OFFSET;
+ eacopy(ea, eh + ea_off);
+ }
+ /*
+ * Replace dest MAC in Ethernet header using the found one
+ * when receiving frame.
+ */
+ /* no action for received bcast/mcast ethernet frame */
+ else if (!ETHER_ISMULTI(eh)) {
+ iaddr = iph + IPV4_DEST_IP_OFFSET;
+ if (wet_sta_find_ip(weth, iaddr, &sta) < 0) {
+ DHD_ERROR(("wet_ip_proc: unable to find STA %u.%u.%u.%u\n",
+ iaddr[0], iaddr[1], iaddr[2], iaddr[3]));
+ return -1;
+ }
+ ea = &sta->mac;
+ ea_off = ETHER_DEST_OFFSET;
+ eacopy(ea, eh + ea_off);
+ }
+
+ return 0;
+}
+
+/* process ARP frame - ARP proxy */
+/*
+ * Return:
+ * = 0 if frame is done ok
+ * < 0 if unable to handle the frame
+ * > 0 if no further process
+ */
+static int
+BCMFASTPATH(wet_arp_proc)(dhd_wet_info_t *weth, void *sdu,
+ uint8 *eh, uint8 *arph, int length, int send)
+{
+ wet_sta_t *sta;
+ uint8 *iaddr;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ BCM_REFERENCE(eabuf);
+
+ /*
+ * FIXME: validate ARP header:
+ * h/w Ethernet 2, proto IP x800, h/w addr size 6, proto addr size 4.
+ */
+
+ /*
+ * Replace source MAC in Ethernet header as well as source MAC in
+ * ARP protocol header when processing frame sent.
+ */
+ if (send) {
+ iaddr = arph + ARP_SRC_IP_OFFSET;
+ if (wet_sta_update_all(weth, iaddr,
+ (struct ether_addr*)(eh + ETHER_SRC_OFFSET), &sta) < 0) {
+ DHD_INFO(("wet_arp_proc: unable to update STA %u.%u.%u.%u %s\n",
+ iaddr[0], iaddr[1], iaddr[2], iaddr[3],
+ bcm_ether_ntoa(
+ (struct ether_addr*)(eh + ETHER_SRC_OFFSET), eabuf)));
+ return -1;
+ }
+ bcopy(WETHWADDR(weth), eh + ETHER_SRC_OFFSET, ETHER_ADDR_LEN);
+ bcopy(WETHWADDR(weth), arph+ARP_SRC_ETH_OFFSET, ETHER_ADDR_LEN);
+ }
+ /*
+ * Replace dest MAC in Ethernet header as well as dest MAC in
+ * ARP protocol header when processing frame recv'd. Process ARP
+ * replies and Unicast ARP requests.
+ */
+ else if ((*(uint16 *)(arph + ARP_OPC_OFFSET) == HTON16(ARP_OPC_REPLY)) ||
+ ((*(uint16 *)(arph + ARP_OPC_OFFSET) == HTON16(ARP_OPC_REQUEST)) &&
+ (!ETHER_ISMULTI(eh)))) {
+ iaddr = arph + ARP_TGT_IP_OFFSET;
+ if (wet_sta_find_ip(weth, iaddr, &sta) < 0) {
+ DHD_INFO(("wet_arp_proc: unable to find STA %u.%u.%u.%u\n",
+ iaddr[0], iaddr[1], iaddr[2], iaddr[3]));
+ return -1;
+ }
+ bcopy(&sta->mac, arph + ARP_TGT_ETH_OFFSET, ETHER_ADDR_LEN);
+ bcopy(&sta->mac, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+
+/* process UDP frame */
+/*
+ * Return:
+ * = 0 if frame is done ok
+ * < 0 if unable to handle the frame
+ * > 0 if no further process
+ */
+static int
+BCMFASTPATH(wet_udp_proc)(dhd_wet_info_t *weth,
+ uint8 *eh, uint8 *iph, uint8 *udph, int length, int send)
+{
+ udp_hdlr_t *udphdlr;
+ uint16 port;
+
+ /* check frame length, at least UDP_HDR_LEN */
+ if ((length -= UDP_HDR_LEN) < 0) {
+ DHD_ERROR(("wet_udp_proc: %s short UDP frame, ignored\n",
+ send ? "send" : "recv"));
+ return -1;
+ }
+
+ /*
+ * Unfortunately we must spend some time here to deal with
+ * some higher layer protocol special processings.
+ * See individual handlers for protocol specific details.
+ */
+ port = *(uint16 *)(udph + UDP_DEST_PORT_OFFSET);
+ for (udphdlr = &udp_hash[WET_UDP_HASH((uint8 *)&port)];
+ udphdlr; udphdlr = udphdlr->next) {
+ if (udphdlr->port != port || !udphdlr->udp_proc)
+ continue;
+ return (udphdlr->udp_proc)(weth, eh, iph, udph,
+ udph + UDP_HDR_LEN, length, send);
+ }
+
+ return 0;
+}
+
+/*
+ * DHCP is a 'complex' protocol for WET, mainly because it
+ * uses its protocol body to convey IP/MAC info. It is impossible
+ * to forward frames correctly back and forth without looking
+ * into the DHCP's body and interpreting it. See RFC2131 sect.
+ * 4.1 'Constructing and sending DHCP messages' for details
+ * of using/parsing various fields in the body.
+ *
+ * DHCP pass through:
+ *
+ * Must alter DHCP flag to broadcast so that the server
+ * can reply with the broadcast address before we can
+ * provide DHCP relay functionality. Otherwise the DHCP
+ * server will send DHCP replies using the DHCP client's
+ * MAC address. Such replies will not be delivered simply
+ * because:
+ *
+ * 1. The AP's bridge will not forward the replies back to
+ * this device through the wireless link because it does not
+ * know such node exists on this link. The bridge's forwarding
+ * table on the AP will have this device's MAC address only.
+ * It does not know anything else behind this device.
+ *
+ * 2. The AP's wireless driver won't allow such frames out
+ * either even if they made their way out the AP's bridge
+ * through the bridge's DLF broadcasting because there is
+ * no such STA associated with the AP.
+ *
+ * 3. This device's MAC won't allow such frames pass
+ * through in non-promiscuous mode even when they made
+ * their way out of the AP's wireless interface somehow.
+ *
+ * DHCP relay:
+ *
+ * Once the WET is configured with the host MAC address it can
+ * relay the host request as if it were sent from WET itself.
+ *
+ * Once the WET is configured with the host IP address it can
+ * pretend to be the host and act as a relay agent.
+ *
+ * process DHCP client frame (client to server, or server to relay agent)
+ * Return:
+ * = 0 if frame is done ok
+ * < 0 if unable to handle the frame
+ * > 0 if no further process
+ */
+static int
+BCMFASTPATH(wet_dhcpc_proc)(dhd_wet_info_t *weth,
+ uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send)
+{
+ wet_sta_t *sta;
+ uint16 flags;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ uint16 port;
+ uint8 *ipv4;
+ const struct ether_addr *ether;
+ BCM_REFERENCE(eabuf);
+
+ /*
+ * FIXME: validate DHCP body:
+ * htype Ethernet 1, hlen Ethernet 6, frame length at least 242.
+ */
+
+ /* only interested in requests when sending to server */
+ if (send && *(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REQUEST)
+ return 0;
+ /* only interested in replies when receiving from server as a relay agent */
+ if (!send && *(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+ return 0;
+
+ /* send request */
+ if (send) {
+ /* find existing or alloc new IP/MAC mapping entry */
+ if (wet_sta_update_mac(weth,
+ (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), &sta) < 0) {
+ DHD_INFO(("wet_dhcpc_proc: unable to update STA %s\n",
+ bcm_ether_ntoa(
+ (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf)));
+ return -1;
+ }
+ bcopy(dhcp + DHCP_FLAGS_OFFSET, &flags, DHCP_FLAGS_LEN);
+ /* We can always relay the host's request when we know its MAC addr. */
+ if (!ETHER_ISNULLADDR(weth->mac.octet) &&
+ !bcmp(dhcp + DHCP_CHADDR_OFFSET, &weth->mac, ETHER_ADDR_LEN)) {
+ /* replace chaddr with host's MAC */
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN,
+ WETHWADDR(weth), ETHER_ADDR_LEN);
+ bcopy(WETHWADDR(weth), dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN);
+ /* force reply to be unicast */
+ flags &= ~HTON16(DHCP_FLAG_BCAST);
+ }
+ /* We can relay other clients' requests when we know the host's IP addr. */
+ else if (!IPV4_ADDR_NULL(weth->ip)) {
+ /* we can only handle the first hop otherwise drop it */
+ if (!IPV4_ADDR_NULL(dhcp + DHCP_GIADDR_OFFSET)) {
+ DHD_INFO(("wet_dhcpc_proc: not first hop, ignored\n"));
+ return -1;
+ }
+ /* replace giaddr with host's IP */
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN,
+ weth->ip, IPV4_ADDR_LEN);
+ bcopy(weth->ip, dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN);
+ /* force reply to be unicast */
+ flags &= ~HTON16(DHCP_FLAG_BCAST);
+ }
+ /*
+ * Request comes in when we don't know the host's MAC and/or IP
+ * addresses hence we can't relay the request. We must notify the
+ * server of our addressing limitation by turning on the broadcast
+ * bit at this point as what the function comments point out.
+ */
+ else
+ flags |= HTON16(DHCP_FLAG_BCAST);
+ /* update flags */
+ bcopy(dhcp + DHCP_FLAGS_OFFSET, sta->flags, DHCP_FLAGS_LEN);
+ if (flags != *(uint16 *)sta->flags) {
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN,
+ (uint8 *)&flags, DHCP_FLAGS_LEN);
+ bcopy((uint8 *)&flags, dhcp + DHCP_FLAGS_OFFSET,
+ DHCP_FLAGS_LEN);
+ }
+ /* replace the Ethernet source MAC with ours */
+ bcopy(WETHWADDR(weth), eh + ETHER_SRC_OFFSET, ETHER_ADDR_LEN);
+ }
+ /* relay recv'd reply to its destiny */
+ else if (!IPV4_ADDR_NULL(weth->ip) &&
+ !bcmp(dhcp + DHCP_GIADDR_OFFSET, weth->ip, IPV4_ADDR_LEN)) {
+ /* find IP/MAC mapping entry */
+ if (wet_sta_find_mac(weth,
+ (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), &sta) < 0) {
+ DHD_INFO(("wet_dhcpc_proc: unable to find STA %s\n",
+ bcm_ether_ntoa(
+ (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf)));
+ return -1;
+ }
+ /*
+ * XXX the following code works for the first hop only
+ */
+ /* restore the DHCP giaddr with its original */
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN,
+ ipv4_null, IPV4_ADDR_LEN);
+ bcopy(ipv4_null, dhcp + DHCP_GIADDR_OFFSET, IPV4_ADDR_LEN);
+ /* restore the original client's dhcp flags */
+ if (bcmp(dhcp + DHCP_FLAGS_OFFSET, sta->flags, DHCP_FLAGS_LEN)) {
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN,
+ sta->flags, DHCP_FLAGS_LEN);
+ bcopy(sta->flags, dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN);
+ }
+ /* replace the dest UDP port with DHCP client port */
+ port = HTON16(DHCP_PORT_CLIENT);
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ udph + UDP_DEST_PORT_OFFSET, UDP_PORT_LEN,
+ (uint8 *)&port, UDP_PORT_LEN);
+ bcopy((uint8 *)&port, udph + UDP_DEST_PORT_OFFSET, UDP_PORT_LEN);
+ /* replace the dest MAC & IP addr with the client's */
+ if (*(uint16 *)sta->flags & HTON16(DHCP_FLAG_BCAST)) {
+ ipv4 = ipv4_bcast;
+ ether = &ether_bcast;
+ }
+ else {
+ ipv4 = dhcp + DHCP_YIADDR_OFFSET;
+ ether = &sta->mac;
+ }
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ iph + IPV4_DEST_IP_OFFSET, IPV4_ADDR_LEN,
+ ipv4, IPV4_ADDR_LEN);
+ csum_fixup_16(iph + IPV4_CHKSUM_OFFSET,
+ iph + IPV4_DEST_IP_OFFSET, IPV4_ADDR_LEN,
+ ipv4, IPV4_ADDR_LEN);
+ bcopy(ipv4, iph + IPV4_DEST_IP_OFFSET, IPV4_ADDR_LEN);
+ bcopy(ether, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ }
+ /* it should not recv non-relay reply at all, but just in case */
+ else {
+ DHD_INFO(("wet_dhcpc_proc: ignore recv'd frame from %s\n",
+ bcm_ether_ntoa((struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf)));
+ return -1;
+ }
+
+ /* no further processing! */
+ return 1;
+}
+
+/* process DHCP server frame (server to client) */
+/*
+ * Return:
+ * = 0 if frame is done ok
+ * < 0 if unable to handle the frame
+ * > 0 if no further process
+ */
+static int
+BCMFASTPATH(wet_dhcps_proc)(dhd_wet_info_t *weth,
+ uint8 *eh, uint8 *iph, uint8 *udph, uint8 *dhcp, int length, int send)
+{
+ wet_sta_t *sta;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ BCM_REFERENCE(eabuf);
+
+ /*
+ * FIXME: validate DHCP body:
+ * htype Ethernet 1, hlen Ethernet 6, frame length at least 242.
+ */
+
+ /* only interested in replies when receiving from server */
+ if (send || *(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+ return 0;
+
+ /* find IP/MAC mapping entry */
+ if (wet_sta_find_mac(weth, (struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), &sta) < 0) {
+ DHD_INFO(("wet_dhcps_proc: unable to find STA %s\n",
+ bcm_ether_ntoa((struct ether_addr*)(dhcp + DHCP_CHADDR_OFFSET), eabuf)));
+ return -1;
+ }
+ /* relay the reply to the host when we know the host's MAC addr */
+ if (!ETHER_ISNULLADDR(weth->mac.octet) &&
+ !bcmp(dhcp + DHCP_CHADDR_OFFSET, WETHWADDR(weth), ETHER_ADDR_LEN)) {
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN,
+ weth->mac.octet, ETHER_ADDR_LEN);
+ bcopy(&weth->mac, dhcp + DHCP_CHADDR_OFFSET, ETHER_ADDR_LEN);
+ }
+ /* restore the original client's dhcp flags if necessary */
+ if (bcmp(dhcp + DHCP_FLAGS_OFFSET, sta->flags, DHCP_FLAGS_LEN)) {
+ csum_fixup_16(udph + UDP_CHKSUM_OFFSET,
+ dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN,
+ sta->flags, DHCP_FLAGS_LEN);
+ bcopy(sta->flags, dhcp + DHCP_FLAGS_OFFSET, DHCP_FLAGS_LEN);
+ }
+ /* replace the dest MAC with that of client's */
+ if (*(uint16 *)sta->flags & HTON16(DHCP_FLAG_BCAST))
+ bcopy((const uint8 *)&ether_bcast, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ else
+ bcopy(&sta->mac, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+
+ /* no further processing! */
+ return 1;
+}
+
+/* alloc IP/MAC mapping entry
+ * Returns 0 if succeeded; < 0 otherwise.
+ */
+static int
+wet_sta_alloc(dhd_wet_info_t *weth, wet_sta_t **saddr)
+{
+ wet_sta_t *sta;
+
+ /* allocate a new one */
+ if (!weth->stafree) {
+ DHD_INFO(("wet_sta_alloc: no room for another STA\n"));
+ return -1;
+ }
+ sta = weth->stafree;
+ weth->stafree = sta->next;
+
+ /* init them just in case */
+ sta->next = NULL;
+ sta->next_ip = NULL;
+ sta->next_mac = NULL;
+
+ *saddr = sta;
+ return 0;
+}
+
+/* update IP/MAC mapping entry and hash
+ * Returns 0 if succeeded; < 0 otherwise.
+ */
+static int
+BCMFASTPATH(wet_sta_update_all)(dhd_wet_info_t *weth, uint8 *iaddr, struct ether_addr *eaddr,
+ wet_sta_t **saddr)
+{
+ wet_sta_t *sta;
+ int i;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ BCM_REFERENCE(eabuf);
+
+ /* find the existing one and remove it from the old IP hash link */
+ if (!wet_sta_find_mac(weth, eaddr, &sta)) {
+ i = WET_STA_HASH_IP(sta->ip);
+ if (bcmp(sta->ip, iaddr, IPV4_ADDR_LEN)) {
+ wet_sta_t *sta2, **next;
+ for (next = &weth->stahash_ip[i], sta2 = *next;
+ sta2; sta2 = sta2->next_ip) {
+ if (sta2 == sta)
+ break;
+ next = &sta2->next_ip;
+ }
+ if (sta2) {
+ *next = sta2->next_ip;
+ sta2->next_ip = NULL;
+ }
+ i = WET_STA_HASH_UNK;
+ }
+ }
+ /* allocate a new one and hash it by MAC */
+ else if (!wet_sta_alloc(weth, &sta)) {
+ i = WET_STA_HASH_MAC(eaddr->octet);
+ bcopy(eaddr, &sta->mac, ETHER_ADDR_LEN);
+ sta->next_mac = weth->stahash_mac[i];
+ weth->stahash_mac[i] = sta;
+ i = WET_STA_HASH_UNK;
+ }
+ /* bail out if we can't find nor create any */
+ else {
+ DHD_INFO(("wet_sta_update_all: unable to alloc STA %u.%u.%u.%u %s\n",
+ iaddr[0], iaddr[1], iaddr[2], iaddr[3],
+ bcm_ether_ntoa(eaddr, eabuf)));
+ return -1;
+ }
+
+ /* update IP and hash by new IP */
+ if (i == WET_STA_HASH_UNK) {
+ i = WET_STA_HASH_IP(iaddr);
+ bcopy(iaddr, sta->ip, IPV4_ADDR_LEN);
+ sta->next_ip = weth->stahash_ip[i];
+ weth->stahash_ip[i] = sta;
+
+ /* start here and look for other entries with same IP address */
+ {
+ wet_sta_t *sta2, *prev;
+ prev = sta;
+ for (sta2 = sta->next_ip; sta2; sta2 = sta2->next_ip) {
+ /* does this entry have the same IP address? */
+ if (!bcmp(sta->ip, sta2->ip, IPV4_ADDR_LEN)) {
+ /* sta2 currently points to the entry we need to remove */
+ /* fix next pointers */
+ prev->next_ip = sta2->next_ip;
+ sta2->next_ip = NULL;
+ /* now we need to find this guy in the MAC list and
+ remove it from that list too.
+ */
+ wet_sta_remove_mac_entry(weth, &sta2->mac);
+ /* entry should be completely out of the table now,
+ add it to the free list
+ */
+ memset(sta2, 0, sizeof(wet_sta_t));
+ sta2->next = weth->stafree;
+ weth->stafree = sta2;
+
+ sta2 = prev;
+ }
+ prev = sta2;
+ }
+ }
+ }
+
+ *saddr = sta;
+ return 0;
+}
+
+/* update IP/MAC mapping entry and hash */
+static int
+BCMFASTPATH(wet_sta_update_mac)(dhd_wet_info_t *weth, struct ether_addr *eaddr, wet_sta_t **saddr)
+{
+ wet_sta_t *sta;
+ int i;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ BCM_REFERENCE(eabuf);
+
+ /* find the existing one */
+ if (!wet_sta_find_mac(weth, eaddr, &sta))
+ ;
+ /* allocate a new one and hash it */
+ else if (!wet_sta_alloc(weth, &sta)) {
+ i = WET_STA_HASH_MAC(eaddr->octet);
+ bcopy(eaddr, &sta->mac, ETHER_ADDR_LEN);
+ sta->next_mac = weth->stahash_mac[i];
+ weth->stahash_mac[i] = sta;
+ }
+ /* bail out if we can't find nor create any */
+ else {
+ DHD_INFO(("wet_sta_update_mac: unable to alloc STA %s\n",
+ bcm_ether_ntoa(eaddr, eabuf)));
+ return -1;
+ }
+
+ *saddr = sta;
+ return 0;
+}
+
+/* Remove MAC entry from hash list
+ * NOTE: This only removes the entry matching "eaddr" from the MAC
+ * list. The caller needs to remove from the IP list and
+ * put back onto the free list to completely remove the entry
+ * from the WET table.
+ */
+static int
+BCMFASTPATH(wet_sta_remove_mac_entry)(dhd_wet_info_t *weth, struct ether_addr *eaddr)
+{
+ wet_sta_t *sta, *prev;
+ int i = WET_STA_HASH_MAC(eaddr->octet);
+ char eabuf[ETHER_ADDR_STR_LEN];
+ int found = 0;
+ BCM_REFERENCE(eabuf);
+
+ /* find the existing one */
+ for (sta = prev = weth->stahash_mac[i]; sta; sta = sta->next_mac) {
+ if (!bcmp(&sta->mac, eaddr, ETHER_ADDR_LEN)) {
+ found = 1;
+ break;
+ }
+ prev = sta;
+ }
+
+ /* bail out if we can't find */
+ if (!found) {
+ DHD_INFO(("wet_sta_remove_mac_entry: unable to find STA %s entry\n",
+ bcm_ether_ntoa(eaddr, eabuf)));
+ return -1;
+ }
+
+ /* fix the list */
+ if (prev == sta)
+ weth->stahash_mac[i] = sta->next_mac; /* removing first entry in this bucket */
+ else
+ prev->next_mac = sta->next_mac;
+
+ return 0;
+}
+
+/* find IP/MAC mapping entry by IP address
+ * Returns 0 if succeeded; < 0 otherwise.
+ */
+static int
+BCMFASTPATH(wet_sta_find_ip)(dhd_wet_info_t *weth, uint8 *iaddr, wet_sta_t **saddr)
+{
+ int i = WET_STA_HASH_IP(iaddr);
+ wet_sta_t *sta;
+
+ /* find the existing one by IP */
+ for (sta = weth->stahash_ip[i]; sta; sta = sta->next_ip) {
+ if (bcmp(sta->ip, iaddr, IPV4_ADDR_LEN))
+ continue;
+ *saddr = sta;
+ return 0;
+ }
+
+ /* sta has not been learned */
+ DHD_INFO(("wet_sta_find_ip: unable to find STA %u.%u.%u.%u\n",
+ iaddr[0], iaddr[1], iaddr[2], iaddr[3]));
+ return -1;
+}
+
+/* find IP/MAC mapping entry by MAC address
+ * Returns 0 if succeeded; < 0 otherwise.
+ */
+static int
+BCMFASTPATH(wet_sta_find_mac)(dhd_wet_info_t *weth, struct ether_addr *eaddr, wet_sta_t **saddr)
+{
+ int i = WET_STA_HASH_MAC(eaddr->octet);
+ wet_sta_t *sta;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ BCM_REFERENCE(eabuf);
+
+ /* find the existing one by MAC */
+ for (sta = weth->stahash_mac[i]; sta; sta = sta->next_mac) {
+ if (bcmp(&sta->mac, eaddr, ETHER_ADDR_LEN))
+ continue;
+ *saddr = sta;
+ return 0;
+ }
+
+ /* sta has not been learnt */
+ DHD_INFO(("wet_sta_find_mac: unable to find STA %s\n",
+ bcm_ether_ntoa(eaddr, eabuf)));
+ return -1;
+}
+
+/* Adjust 16 bit checksum - taken from RFC 3022.
+ *
+ * The algorithm below is applicable only for even offsets (i.e., optr
+ * below must be at an even offset from start of header) and even lengths
+ * (i.e., olen and nlen below must be even).
+ */
+static void
+BCMFASTPATH(csum_fixup_16)(uint8 *chksum, uint8 *optr, int olen, uint8 *nptr, int nlen)
+{
+ long x, old, new;
+
+ ASSERT(!((uintptr_t)optr&1) && !(olen&1));
+ ASSERT(!((uintptr_t)nptr&1) && !(nlen&1));
+
+ x = (chksum[0]<< 8)+chksum[1];
+ if (!x)
+ return;
+ x = ~x & 0xFFFF;
+ while (olen)
+ {
+ old = (optr[0]<< 8)+optr[1]; optr += 2;
+ x -= old & 0xffff;
+ if (x <= 0) { x--; x &= 0xffff; }
+ olen -= 2;
+ }
+ while (nlen)
+ {
+ new = (nptr[0]<< 8)+nptr[1]; nptr += 2;
+ x += new & 0xffff;
+ if (x & 0x10000) { x++; x &= 0xffff; }
+ nlen -= 2;
+ }
+ x = ~x & 0xFFFF;
+ chksum[0] = (uint8)(x >> 8); chksum[1] = (uint8)x;
+}
+
+/* Process frames in transmit direction by replacing source MAC with
+ * wireless's and keep track of IP MAC address mapping table.
+ * Return:
+ * = 0 if frame is done ok;
+ * < 0 if unable to handle the frame;
+ *
+ * To avoid other interfaces to see our changes specially
+ * changes to broadcast frame which definitely will be seen by
+ * other bridged interfaces we must copy the frame to our own
+ * buffer, modify it, and then sent it.
+ * Return the new sdu in 'new'.
+ */
+int
+BCMFASTPATH(dhd_wet_send_proc)(void *wet, void *sdu, void **new)
+{
+ dhd_wet_info_t *weth = (dhd_wet_info_t *)wet;
+ uint8 *frame = PKTDATA(WETOSH(weth), sdu);
+ int length = PKTLEN(WETOSH(weth), sdu);
+ void *pkt = sdu;
+
+ /*
+ * FIXME: need to tell if buffer is shared and only
+ * do copy on shared buffer.
+ */
+ /*
+ * copy broadcast/multicast frame to our own packet
+ * otherwise we will screw up others because we alter
+ * the frame content.
+ */
+ if (length < ETHER_HDR_LEN) {
+ DHD_ERROR(("dhd_wet_send_proc: unable to process short frame\n"));
+ return -1;
+ }
+ if (ETHER_ISMULTI(frame)) {
+ length = pkttotlen(WETOSH(weth), sdu);
+ if (!(pkt = PKTGET(WETOSH(weth), length, TRUE))) {
+ DHD_ERROR(("dhd_wet_send_proc: unable to alloc, dropped\n"));
+ return -1;
+ }
+ frame = PKTDATA(WETOSH(weth), pkt);
+ pktcopy(WETOSH(weth), sdu, 0, length, frame);
+ /* Transfer priority */
+ PKTSETPRIO(pkt, PKTPRIO(sdu));
+ PKTFREE(WETOSH(weth), sdu, TRUE);
+ PKTSETLEN(WETOSH(weth), pkt, length);
+ }
+ *new = pkt;
+
+ /* process frame */
+ return wet_eth_proc(weth, sdu, frame, length, 1) < 0 ? -1 : 0;
+}
+
+/*
+ * Process frames in receive direction by replacing destination MAC with
+ * the one found in IP MAC address mapping table.
+ * Return:
+ * = 0 if frame is done ok;
+ * < 0 if unable to handle the frame;
+ */
+int
+BCMFASTPATH(dhd_wet_recv_proc)(void *wet, void *sdu)
+{
+ dhd_wet_info_t *weth = (dhd_wet_info_t *)wet;
+ /* process frame */
+ return wet_eth_proc(weth, sdu, PKTDATA(WETOSH(weth), sdu),
+ PKTLEN(WETOSH(weth), sdu), 0) < 0 ? -1 : 0;
+}
+
+/* Delete WET Database */
+void
+dhd_wet_sta_delete_list(dhd_pub_t *dhd_pub)
+{
+ wet_sta_t *sta;
+ int i, j;
+ dhd_wet_info_t *weth = dhd_pub->wet_info;
+
+ for (i = 0; i < WET_STA_HASH_SIZE; i ++) {
+ for (sta = weth->stahash_mac[i]; sta; sta = sta->next_mac) {
+ wet_sta_t *sta2, **next;
+ j = WET_STA_HASH_IP(sta->ip);
+ for (next = &weth->stahash_ip[j], sta2 = *next;
+ sta2; sta2 = sta2->next_ip) {
+ if (sta2 == sta)
+ break;
+ next = &sta2->next_ip;
+ }
+ if (sta2) {
+ *next = sta2->next_ip;
+ sta2->next_ip = NULL;
+ }
+ j = WET_STA_HASH_UNK;
+
+ wet_sta_remove_mac_entry(weth, &sta->mac);
+ memset(sta, 0, sizeof(wet_sta_t));
+ }
+ }
+}
+void
+dhd_wet_dump(dhd_pub_t *dhdp, struct bcmstrbuf *b)
+{
+ char eabuf[ETHER_ADDR_STR_LEN];
+ wet_sta_t *sta;
+ int i;
+ dhd_wet_info_t *weth = dhdp->wet_info;
+
+ bcm_bprintf(b, "Host MAC: %s\n", bcm_ether_ntoa(&weth->mac, eabuf));
+ bcm_bprintf(b, "Host IP: %u.%u.%u.%u\n",
+ weth->ip[0], weth->ip[1], weth->ip[2], weth->ip[3]);
+ bcm_bprintf(b, "Entry\tEnetAddr\t\tInetAddr\n");
+ for (i = 0; i < WET_NUMSTAS; i ++) {
+ /* FIXME: it leaves the last sta entry unfiltered, who cares! */
+ if (weth->sta[i].next)
+ continue;
+ /* format the entry dump */
+ sta = &weth->sta[i];
+ bcm_bprintf(b, "%u\t%s\t%u.%u.%u.%u\n",
+ i, bcm_ether_ntoa(&sta->mac, eabuf),
+ sta->ip[0], sta->ip[1], sta->ip[2], sta->ip[3]);
+ }
+}
diff --git a/bcmdhd.101.10.361.x/dhd_wet.h b/bcmdhd.101.10.361.x/dhd_wet.h
new file mode 100755
index 0000000..21b8429
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_wet.h
@@ -0,0 +1,60 @@
+/*
+ * Wireless Ethernet (WET) interface
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/** XXX Twiki: [WirelessEthernet] */
+
+#ifndef _dhd_wet_h_
+#define _dhd_wet_h_
+
+#include <ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#define DHD_WET_ENAB 1
+#define WET_ENABLED(dhdp) ((dhdp)->info->wet_mode == DHD_WET_ENAB)
+
+/* forward declaration */
+typedef struct dhd_wet_info dhd_wet_info_t;
+
+extern dhd_wet_info_t *dhd_get_wet_info(dhd_pub_t *pub);
+extern void dhd_free_wet_info(dhd_pub_t *pub, void *wet);
+
+/* Process frames in transmit direction */
+extern int dhd_wet_send_proc(void *weth, void *sdu, void **new);
+extern void dhd_set_wet_host_ipv4(dhd_pub_t *pub, void *parms, uint32 len);
+extern void dhd_set_wet_host_mac(dhd_pub_t *pub, void *parms, uint32 len);
+/* Process frames in receive direction */
+extern int dhd_wet_recv_proc(void *weth, void *sdu);
+extern void dhd_wet_sta_delete_list(dhd_pub_t *dhd_pub);
+
+#ifdef PLC_WET
+extern void dhd_wet_bssid_upd(dhd_wet_info_t *weth, dhd_bsscfg_t *cfg);
+#endif /* PLC_WET */
+
+int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val);
+int dhd_get_wet_mode(dhd_pub_t *dhdp);
+extern void dhd_wet_dump(dhd_pub_t *dhdp, struct bcmstrbuf *b);
+
+#endif /* _dhd_wet_h_ */
diff --git a/bcmdhd.101.10.361.x/dhd_wlfc.c b/bcmdhd.101.10.361.x/dhd_wlfc.c
new file mode 100755
index 0000000..11283aa
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_wlfc.c
@@ -0,0 +1,4988 @@
+/*
+ * DHD PROP_TXSTATUS Module.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ */
+
+/** XXX Twiki [PropTxStatus] */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <dhd_bus.h>
+
+#include <dhd_dbg.h>
+#include <dhd_config.h>
+#include <wl_android.h>
+
+#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+/*
+ * wlfc naming and lock rules:
+ *
+ * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation.
+ * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed.
+ * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation.
+ *
+ */
+
+#if defined (DHD_WLFC_THREAD)
+#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */
+#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */
+#endif /* defined (DHD_WLFC_THREAD) */
+
+#ifdef PROP_TXSTATUS
+
+#ifdef QMONITOR
+#define DHD_WLFC_QMON_COMPLETE(entry) dhd_qmon_txcomplete(&entry->qmon)
+#else
+#define DHD_WLFC_QMON_COMPLETE(entry)
+#endif /* QMONITOR */
+
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+
+/** for 'out of order' debug */
+static void
+_dhd_wlfc_bprint(athost_wl_status_info_t* wlfc, const char *fmt, ...)
+{
+ va_list ap;
+ int r, size;
+ uint8 *buf;
+ bool bRetry = FALSE;
+
+ if (!wlfc || !wlfc->log_buf) {
+ return;
+ }
+
+ va_start(ap, fmt);
+
+retry:
+ buf = wlfc->log_buf + wlfc->log_buf_offset;
+ size = WLFC_LOG_BUF_SIZE -1 - wlfc->log_buf_offset;
+
+ r = vsnprintf(buf, size, fmt, ap);
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ /* r == 0 is also the case when strlen(fmt) is zero.
+ * typically the case when "" is passed as argument.
+ */
+ if ((r == -1) || (r >= size)) {
+ bRetry = TRUE;
+ } else {
+ wlfc->log_buf_offset += r;
+ }
+
+ if ((wlfc->log_buf_offset >= (WLFC_LOG_BUF_SIZE -1)) || bRetry) {
+ wlfc->log_buf[wlfc->log_buf_offset] = 0;
+ wlfc->log_buf_offset = 0;
+ if (!wlfc->log_buf_full) {
+ wlfc->log_buf_full = TRUE;
+ }
+
+ if (bRetry) {
+ bRetry = FALSE;
+ goto retry;
+ }
+ }
+
+ va_end(ap);
+
+ return;
+} /* _dhd_wlfc_bprint */
+
+/** for 'out of order' debug */
+static void _dhd_wlfc_print_1k_buf(uint8* buf, int size)
+{
+ /* print last 1024 bytes */
+ if (size > 1024) {
+ buf += (size - 1024);
+ }
+ printf("%s", buf);
+}
+
+/** for 'out of order' debug */
+static void
+_dhd_wlfc_print_log(athost_wl_status_info_t* wlfc)
+{
+ if (!wlfc || !wlfc->log_buf) {
+ return;
+ }
+
+ printf("%s: log_buf_full(%d), log_buf_offset(%d)\n",
+ __FUNCTION__, wlfc->log_buf_full, wlfc->log_buf_offset);
+ if (wlfc->log_buf_full) {
+ _dhd_wlfc_print_1k_buf(wlfc->log_buf + wlfc->log_buf_offset,
+ WLFC_LOG_BUF_SIZE - wlfc->log_buf_offset);
+ }
+ wlfc->log_buf[wlfc->log_buf_offset] = 0;
+ _dhd_wlfc_print_1k_buf(wlfc->log_buf, wlfc->log_buf_offset);
+ printf("\n%s: done\n", __FUNCTION__);
+
+ wlfc->log_buf_offset = 0;
+ wlfc->log_buf_full = FALSE;
+}
+
+/** for 'out of order' debug */
+static void
+_dhd_wlfc_check_send_order(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry, void* p)
+{
+ uint8 seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ uint8 gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+ if ((entry->last_send_gen[prec] == gen) &&
+ ((uint8)(entry->last_send_seq[prec] + 1) > seq)) {
+ printf("%s: prec(%d), last(%u), p(%u)\n",
+ __FUNCTION__, prec, entry->last_send_seq[prec], seq);
+ _dhd_wlfc_print_log(wlfc);
+ }
+
+ entry->last_send_seq[prec] = seq;
+ entry->last_send_gen[prec] = gen;
+}
+
+/** for 'out of order' debug */
+static void
+_dhd_wlfc_check_complete_order(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry, void* p)
+{
+ uint8 seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+ entry->last_complete_seq[prec] = seq;
+}
+
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+
+/** reordering related */
+
+#if defined(DHD_WLFC_THREAD)
+static void
+_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp)
+{
+#if defined(LINUX)
+ dhdp->wlfc_thread_go = TRUE;
+ wake_up_interruptible(&dhdp->wlfc_wqhead);
+#endif /* LINUX */
+}
+#endif /* DHD_WLFC_THREAD */
+
+static uint16
+_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
+{
+ uint16 seq;
+
+ if (!p) {
+ return 0xffff;
+ }
+
+ seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ if (seq < current_seq) {
+ /* wrap around */
+ seq += 256;
+ }
+
+ return seq;
+}
+
+/**
+ * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder
+ * suppressed packets.
+ * @param[in] pq caller supplied packet queue to enqueue the packet on
+ * @param[in] prec precedence of the to-be-queued packet
+ * @param[in] p transmit packet to enqueue
+ * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order.
+ * @param[in] current_seq
+ * @param[in] reOrder reOrder on odd precedence (=suppress queue)
+ */
+static void
+_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
+ uint8 current_seq, bool reOrder)
+{
+ struct pktq_prec *q;
+ uint16 seq, seq2;
+ void *p2, *p2_prev;
+
+ if (!p)
+ return;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktqprec_full(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL) {
+ /* empty queue */
+ q->head = p;
+ q->tail = p;
+ } else {
+ if (reOrder && (prec & 1)) {
+ seq = _dhd_wlfc_adjusted_seq(p, current_seq);
+ p2 = qHead ? q->head : q->tail;
+ seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+ if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) {
+ /* need reorder */
+ p2 = q->head;
+ p2_prev = NULL;
+ seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+ while (seq > seq2) {
+ p2_prev = p2;
+ p2 = PKTLINK(p2);
+ if (!p2) {
+ break;
+ }
+ seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+ }
+
+ if (p2_prev == NULL) {
+ /* insert head */
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ } else if (p2 == NULL) {
+ /* insert tail */
+ PKTSETLINK(p2_prev, p);
+ q->tail = p;
+ } else {
+ /* insert after p2_prev */
+ PKTSETLINK(p, PKTLINK(p2_prev));
+ PKTSETLINK(p2_prev, p);
+ }
+ goto exit;
+ }
+ }
+
+ if (qHead) {
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ } else {
+ PKTSETLINK(q->tail, p);
+ q->tail = p;
+ }
+ }
+
+exit:
+
+ q->n_pkts++;
+ pq->n_pkts_tot++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+} /* _dhd_wlfc_prec_enque */
+
+/**
+ * Create a place to store all packet pointers submitted to the firmware until a status comes back,
+ * suppress or otherwise.
+ *
+ * hang-er: noun, a contrivance on which things are hung, as a hook.
+ */
+/** @deprecated soon */
+static void*
+_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items)
+{
+ int i;
+ wlfc_hanger_t* hanger;
+
+ /* allow only up to a specific size for now */
+ ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+ if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER,
+ WLFC_HANGER_SIZE(max_items))) == NULL) {
+ return NULL;
+ }
+ memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+ hanger->max_items = max_items;
+
+ for (i = 0; i < hanger->max_items; i++) {
+ hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ }
+ return hanger;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger)
+{
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items));
+ return BCME_OK;
+ }
+ return BCME_BADARG;
+}
+
+/** @deprecated soon */
+static uint16
+_dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+ uint32 i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ i = h->slot_pos + 1;
+ if (i == h->max_items) {
+ i = 0;
+ }
+ while (i != h->slot_pos) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) {
+ h->slot_pos = i;
+ return (uint16)i;
+ }
+ i++;
+ if (i == h->max_items)
+ i = 0;
+ }
+ h->failed_slotfind++;
+ }
+ return WLFC_HANGER_MAXITEMS;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ *gen = 0xff;
+
+ /* this packet was not pushed at the time it went to the firmware */
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return BCME_NOTFOUND;
+
+ if (h) {
+ if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+ *gen = h->items[slot_id].gen;
+ }
+ else {
+ DHD_ERROR(("Error: %s():%d item not used\n",
+ __FUNCTION__, __LINE__));
+ rc = BCME_NOTFOUND;
+ }
+
+ } else {
+ rc = BCME_BADARG;
+ }
+
+ return rc;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+ h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+ h->items[slot_id].pkt = pkt;
+ h->items[slot_id].pkt_state = 0;
+ h->items[slot_id].pkt_txstatus = 0;
+ h->pushed++;
+ } else {
+ h->failed_to_push++;
+ rc = BCME_NOTFOUND;
+ }
+ } else {
+ rc = BCME_BADARG;
+ }
+
+ return rc;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ *pktout = NULL;
+
+ /* this packet was not pushed at the time it went to the firmware */
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return BCME_NOTFOUND;
+
+ if (h) {
+ if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+ *pktout = h->items[slot_id].pkt;
+ if (remove_from_hanger) {
+ h->items[slot_id].state =
+ WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[slot_id].pkt = NULL;
+ h->items[slot_id].gen = 0xff;
+ h->items[slot_id].identifier = 0;
+ h->popped++;
+ }
+ } else {
+ h->failed_to_pop++;
+ rc = BCME_NOTFOUND;
+ }
+ } else {
+ rc = BCME_BADARG;
+ }
+
+ return rc;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ /* this packet was not pushed at the time it went to the firmware */
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return BCME_NOTFOUND;
+ if (h) {
+ h->items[slot_id].gen = gen;
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
+ } else {
+ rc = BCME_BADARG;
+ }
+ } else {
+ rc = BCME_BADARG;
+ }
+
+ return rc;
+}
+
+/** remove reference of specific packet in hanger */
+/** @deprecated soon */
+static bool
+_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
+{
+ int i;
+
+ if (!h || !pkt) {
+ return FALSE;
+ }
+
+ i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt)));
+
+ if ((i < h->max_items) && (pkt == h->items[i].pkt)) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+ h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[i].pkt = NULL;
+ h->items[i].gen = 0xff;
+ h->items[i].identifier = 0;
+ return TRUE;
+ } else {
+ DHD_ERROR(("Error: %s():%d item not suppressed\n",
+ __FUNCTION__, __LINE__));
+ }
+ }
+
+ return FALSE;
+}
+
+/** afq = At Firmware Queue, queue containing packets pending in the dongle */
+static int
+_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
+{
+ wlfc_mac_descriptor_t* entry;
+ uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+ if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE)
+ entry = &ctx->destination_entries.nodes[entry_idx];
+ else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+ entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE];
+ else
+ entry = &ctx->destination_entries.other;
+
+ pktq_penq(&entry->afq, prec, p);
+
+ return BCME_OK;
+}
+
+/** afq = At Firmware Queue, queue containing packets pending in the dongle */
+static int
+_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
+ void **pktout)
+{
+ wlfc_mac_descriptor_t *entry;
+ struct pktq *pq;
+ struct pktq_prec *q;
+ void *p, *b;
+
+ if (!ctx) {
+ DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout));
+ return BCME_BADARG;
+ }
+
+ if (pktout) {
+ *pktout = NULL;
+ }
+
+ ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+ if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+ entry = &ctx->destination_entries.nodes[hslot];
+ else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+ entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+ else
+ entry = &ctx->destination_entries.other;
+
+ pq = &entry->afq;
+
+ ASSERT(prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ b = NULL;
+ p = q->head;
+
+ while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)))))
+ {
+ b = p;
+ p = PKTLINK(p);
+ }
+
+ if (p == NULL) {
+ /* none is matched */
+ if (b) {
+ DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+ } else {
+ DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+ }
+
+ return BCME_ERROR;
+ }
+
+ bcm_pkt_validate_chk(p, "_dhd_wlfc_deque_afq");
+
+ if (!b) {
+ /* head packet is matched */
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ /* middle packet is matched */
+ DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt,
+ WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head)))));
+ ctx->stats.ooo_pkts[prec]++;
+ PKTSETLINK(b, PKTLINK(p));
+ if (PKTLINK(p) == NULL) {
+ q->tail = b;
+ }
+ }
+
+ q->n_pkts--;
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(p, NULL);
+
+ if (pktout) {
+ *pktout = p;
+ }
+
+ return BCME_OK;
+} /* _dhd_wlfc_deque_afq */
+
+/**
+ * Flow control information piggy backs on packets, in the form of one or more TLVs. This function
+ * pushes one or more TLVs onto a packet that is going to be sent towards the dongle.
+ *
+ * @param[in] ctx
+ * @param[in/out] packet
+ * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid
+ * @param[in] tim_bmp
+ * @param[in] mac_handle
+ * @param[in] htodtag
+ * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon
+ * earlier between host and firmware.
+ * @param[in] skip_wlfc_hdr
+ */
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal,
+ uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
+{
+ uint32 wl_pktinfo = 0;
+ uint8* wlh;
+ uint8 dataOffset = 0;
+ uint8 fillers;
+ uint8 tim_signal_len = 0;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+ struct bdc_header *h;
+ void *p = *packet;
+
+ if (skip_wlfc_hdr)
+ goto push_bdc_hdr;
+
+ if (tim_signal) {
+ tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ }
+
+ /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+ dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len;
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ dataOffset += WLFC_CTL_VALUE_LEN_SEQ;
+ }
+
+ fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+ dataOffset += fillers;
+
+ PKTPUSH(ctx->osh, p, dataOffset);
+ wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+ wl_pktinfo = htol32(htodtag);
+
+ wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
+ wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
+ memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32));
+
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ uint16 wl_seqinfo = htol16(htodseq);
+ wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ;
+ memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo,
+ WLFC_CTL_VALUE_LEN_SEQ);
+ }
+
+ if (tim_signal_len) {
+ wlh[dataOffset - fillers - tim_signal_len ] =
+ WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 1] =
+ WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+ wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+ }
+ if (fillers)
+ memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+push_bdc_hdr:
+ PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+ h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(p))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+#ifdef EXT_STA
+ /* save pkt encryption exemption info for dongle */
+ h->flags &= ~BDC_FLAG_EXEMPT;
+ h->flags |= (DHD_PKTTAG_EXEMPT(PKTTAG(p)) & BDC_FLAG_EXEMPT);
+#endif /* EXT_STA */
+
+ h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = dataOffset >> 2;
+ BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+ *packet = p;
+ return BCME_OK;
+} /* _dhd_wlfc_pushheader */
+
+/**
+ * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg
+ * when a packet is about to be freed.
+ */
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+ struct bdc_header *h;
+
+ if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+ h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+ /* pull BDC header */
+ PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+
+ if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2)));
+ return BCME_ERROR;
+ }
+
+ /* pull wl-header */
+ PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+ return BCME_OK;
+}
+
+/**
+ * @param[in/out] p packet
+ */
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+ int i;
+ wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+ uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+ uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+ wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p));
+ int iftype = ctx->destination_entries.interfaces[ifid].iftype;
+
+ /* saved one exists, return it */
+ if (entry)
+ return entry;
+
+ /* Multicast destination, STA and P2P clients get the interface entry.
+ * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+ * have their own entry.
+ */
+ if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
+ iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
+ (ctx->destination_entries.interfaces[ifid].occupied)) {
+ entry = &ctx->destination_entries.interfaces[ifid];
+ }
+
+ if (entry && ETHER_ISMULTI(dstn)) {
+ DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+ return entry;
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (table[i].occupied) {
+ if (table[i].interface_id == ifid) {
+ if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) {
+ entry = &table[i];
+ break;
+ }
+ }
+ }
+ }
+
+ if (entry == NULL)
+ entry = &ctx->destination_entries.other;
+
+ DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+
+ return entry;
+} /* _dhd_wlfc_find_table_entry */
+
+/**
+ * In case a packet must be dropped (because eg the queues are full), various tallies have to be
+ * be updated. Called from several other functions.
+ * @param[in] dhdp pointer to public DHD structure
+ * @param[in] prec precedence of the packet
+ * @param[in] p the packet to be dropped
+ * @param[in] bPktInQ TRUE if packet is part of a queue
+ */
+static int
+_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
+{
+ athost_wl_status_info_t* ctx;
+ void *pout = NULL;
+
+ ASSERT(dhdp && p);
+ if (prec < 0 || prec >= WLFC_PSQ_PREC_COUNT) {
+ ASSERT(0);
+ return BCME_BADARG;
+ }
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+ /* suppressed queue, need pop from hanger */
+ _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG
+ (PKTTAG(p))), &pout, TRUE);
+ ASSERT(p == pout);
+ }
+
+ if (!(prec & 1)) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* pkt in delayed q, so fake push BDC header for
+ * dhd_tcpack_check_xmit() and dhd_txcomplete().
+ */
+ _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE);
+
+ /* This packet is about to be freed, so remove it from tcp_ack_info_tbl
+ * This must be one of...
+ * 1. A pkt already in delayQ is evicted by another pkt with higher precedence
+ * in _dhd_wlfc_prec_enq_with_drop()
+ * 2. A pkt could not be enqueued to delayQ because it is full,
+ * in _dhd_wlfc_enque_delayq().
+ * 3. A pkt could not be enqueued to delayQ because it is full,
+ * in _dhd_wlfc_rollback_packet_toq().
+ */
+ if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+ " Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ }
+
+ if (bPktInQ) {
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+ ctx->pkt_cnt_per_ac[prec>>1]--;
+ ctx->pkt_cnt_in_psq--;
+ }
+
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+ ctx->stats.pktout++;
+ ctx->stats.drop_pkts[prec]++;
+
+ dhd_txcomplete(dhdp, p, FALSE);
+ PKTFREE(ctx->osh, p, TRUE);
+
+ return 0;
+} /* _dhd_wlfc_prec_drop */
+
+/**
+ * Called when eg the host handed a new packet over to the driver, or when the dongle reported
+ * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit
+ * packet in the host driver to be (re)transmitted at a later opportunity.
+ * @param[in] dhdp pointer to public DHD structure
+ * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence
+ */
+static bool
+_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
+ uint8 current_seq)
+{
+ void *p = NULL;
+ int eprec = -1; /* precedence to evict from */
+ athost_wl_status_info_t* ctx;
+
+ ASSERT(dhdp && pq && pkt);
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktqprec_full(pq, prec) && !pktq_full(pq)) {
+ goto exit;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktqprec_full(pq, prec)) {
+ eprec = prec;
+ } else if (pktq_full(pq)) {
+ p = pktq_peek_tail(pq, &eprec);
+ if (!p) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+ if ((eprec > prec) || (eprec < 0)) {
+ if (!pktqprec_empty(pq, prec)) {
+ eprec = prec;
+ } else {
+ return FALSE;
+ }
+ }
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktqprec_empty(pq, eprec));
+ /* Evict all fragmented frames */
+ dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
+ }
+
+exit:
+ /* Enqueue */
+ _dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq,
+ WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
+ ctx->pkt_cnt_per_ac[prec>>1]++;
+ ctx->pkt_cnt_in_psq++;
+
+ return TRUE;
+} /* _dhd_wlfc_prec_enq_with_drop */
+
+/**
+ * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in
+ * the event that this 'commit' failed.
+ */
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+ void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+ /*
+ * put the packet back to the head of queue
+ * - suppressed packet goes back to suppress sub-queue
+ * - pull out the header, if new or delayed packet
+ *
+ * Note: hslot is used only when header removal is done.
+ */
+ wlfc_mac_descriptor_t* entry;
+ int rc = BCME_OK;
+ int prec, fifo_id;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+ fifo_id = prec << 1;
+ if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED)
+ fifo_id += 1;
+ if (entry != NULL) {
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the firmware (for pspoll etc.)
+ */
+ if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p)))
+ entry->requested_credit++;
+
+ if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+ /* decrement sequence count */
+ WLFC_DECR_SEQCOUNT(entry, prec);
+ /* remove header first */
+ rc = _dhd_wlfc_pullheader(ctx, p);
+ if (rc != BCME_OK) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+ }
+
+ if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE,
+ WLFC_SEQCOUNT(entry, fifo_id>>1))
+ == FALSE) {
+ /* enque failed */
+ DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n",
+ __FUNCTION__, __LINE__, fifo_id));
+ rc = BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+
+exit:
+ if (rc != BCME_OK) {
+ ctx->stats.rollback_failed++;
+ _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
+ } else {
+ ctx->stats.rollback++;
+ }
+
+ return rc;
+} /* _dhd_wlfc_rollback_packet_toq */
+
+/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */
+static bool
+_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
+{
+ int prec, ac_traffic = WLFC_NO_TRAFFIC;
+
+ for (prec = 0; prec < AC_COUNT; prec++) {
+ if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) {
+ if (ac_traffic == WLFC_NO_TRAFFIC)
+ ac_traffic = prec + 1;
+ else if (ac_traffic != (prec + 1))
+ ac_traffic = WLFC_MULTI_TRAFFIC;
+ }
+ }
+
+ if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) {
+ /* single AC (BE/BK/VI/VO) in queue */
+ if (ctx->allow_fc) {
+ return TRUE;
+ } else {
+ uint32 delta;
+ uint32 curr_t = OSL_SYSUPTIME();
+
+ if (ctx->fc_defer_timestamp == 0) {
+ /* first single ac scenario */
+ ctx->fc_defer_timestamp = curr_t;
+ return FALSE;
+ }
+
+ /* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */
+ delta = curr_t - ctx->fc_defer_timestamp;
+ if (delta >= WLFC_FC_DEFER_PERIOD_MS) {
+ ctx->allow_fc = TRUE;
+ }
+ }
+ } else {
+ /* multiple ACs or BCMC in queue */
+ ctx->allow_fc = FALSE;
+ ctx->fc_defer_timestamp = 0;
+ }
+
+ return ctx->allow_fc;
+} /* _dhd_wlfc_allow_fc */
+
+/**
+ * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on
+ * low/high watermarks.
+ */
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+ dhd_pub_t *dhdp;
+
+ ASSERT(ctx);
+
+ dhdp = (dhd_pub_t *)ctx->dhdp;
+ ASSERT(dhdp);
+
+ if (dhdp->skip_fc && dhdp->skip_fc((void *)dhdp, if_id))
+ return;
+
+ if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
+ return;
+
+ if ((pq->n_pkts_tot <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+ /* start traffic */
+ ctx->hostif_flow_state[if_id] = OFF;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+ pq->n_pkts_tot, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("F"));
+
+ dhd_txflowcontrol(dhdp, if_id, OFF);
+
+ ctx->toggle_host_if = 0;
+ }
+
+ if (pq->n_pkts_tot >= WLFC_FLOWCONTROL_HIWATER && ctx->hostif_flow_state[if_id] == OFF) {
+ /* stop traffic */
+ ctx->hostif_flow_state[if_id] = ON;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n",
+ pq->n_pkts_tot, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("N"));
+
+ dhd_txflowcontrol(dhdp, if_id, ON);
+
+ ctx->host_ifidx = if_id;
+ ctx->toggle_host_if = 1;
+ }
+
+ return;
+} /* _dhd_wlfc_flow_control_check */
+
+/** XXX: Warning: this function directly accesses bus-transmit function */
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ uint8 ta_bmp)
+{
+ int rc = BCME_OK;
+ void* p = NULL;
+ int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+ if (dhdp->proptxstatus_txoff) {
+ rc = BCME_NORESOURCE;
+ return rc;
+ }
+
+ /* allocate a dummy packet */
+ p = PKTGET(ctx->osh, dummylen, TRUE);
+ if (p) {
+ PKTPULL(ctx->osh, p, dummylen);
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+ _dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
+ DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+ ctx->stats.signal_only_pkts_sent++;
+#endif
+
+#if defined(BCMPCIE)
+ /* XXX : RAHUL : Verify the ifidx */
+ rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
+#else
+ rc = dhd_bus_txdata(dhdp->bus, p);
+#endif
+ if (rc != BCME_OK) {
+ _dhd_wlfc_pullheader(ctx, p);
+ PKTFREE(ctx->osh, p, TRUE);
+ }
+ } else {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, dummylen));
+ rc = BCME_NOMEM;
+ dhdp->tx_pktgetfail++;
+ }
+
+ return rc;
+} /* _dhd_wlfc_send_signalonly_packet */
+
+/**
+ * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration
+ * maintained in caller supplied parameter 'entry'.
+ *
+ * @param[in/out] entry administration about a remote MAC entity
+ * @param[in] prec precedence queue for this remote MAC entitity
+ *
+ * Return value: TRUE if traffic availability changed
+ */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ int prec)
+{
+ bool rc = FALSE;
+
+ if (entry->state == WLFC_STATE_CLOSE) {
+ if ((pktqprec_n_pkts(&entry->psq, (prec << 1)) == 0) &&
+ (pktqprec_n_pkts(&entry->psq, ((prec << 1) + 1)) == 0)) {
+ /* no packets in both 'normal' and 'suspended' queues */
+ if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp & ~ NBITVAL(prec);
+ }
+ } else {
+ /* packets are queued in host for transmission to dongle */
+ if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp | NBITVAL(prec);
+ }
+ }
+ }
+
+ if (rc) {
+ /* request a TIM update to firmware at the next piggyback opportunity */
+ if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+ entry->send_tim_signal = 1;
+ /*
+ XXX: send a header only packet from the same context.
+ --this should change to sending from a timeout or similar.
+ */
+ _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ entry->send_tim_signal = 0;
+ } else {
+ rc = FALSE;
+ }
+ }
+
+ return rc;
+} /* _dhd_wlfc_traffic_pending_check */
+
+/**
+ * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues
+ * the packet to transmit to firmware again at a later opportunity.
+ */
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+ wlfc_mac_descriptor_t* entry;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ if (entry == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_NOTFOUND;
+ }
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE,
+ WLFC_SEQCOUNT(entry, prec))
+ == FALSE) {
+ ctx->stats.delayq_full_error++;
+ /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+ WLFC_DBGMESG(("s"));
+ return BCME_ERROR;
+ }
+
+ /* A packet has been pushed, update traffic availability bitmap, if applicable */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+/**
+ * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer
+ * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet.
+ *
+ * @param[in/out] ctx driver specific flow control administration
+ * @param[in/out] entry The remote MAC entity for which the packet is destined.
+ * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet.
+ * @param[in] header_needed True if packet is 'new' to flow control
+ * @param[out] slot Handle to container in which the packet was 'parked'
+ */
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot)
+{
+ int rc = BCME_OK;
+ int hslot = WLFC_HANGER_MAXITEMS;
+ bool send_tim_update = FALSE;
+ uint32 htod = 0;
+ uint16 htodseq = 0;
+ uint8 free_ctr;
+ int gen = 0xff;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+ void * p = *packet;
+
+ *slot = hslot;
+
+ if (entry == NULL) {
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ }
+
+ if (entry == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+
+ if (entry->send_tim_signal) {
+ /* sends a traffic indication bitmap to the dongle */
+ send_tim_update = TRUE;
+ entry->send_tim_signal = 0;
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ }
+
+ if (header_needed) {
+ if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ hslot = (uint)(entry - &ctx->destination_entries.nodes[0]);
+ } else {
+ hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+ }
+ gen = entry->generation;
+ free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(ctx, "d%u.%u.%u-",
+ (uint8)(entry - &ctx->destination_entries.nodes[0]), gen, free_ctr);
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ } else {
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p));
+ }
+
+ hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+ if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) {
+ gen = entry->generation;
+ } else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ } else {
+ _dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
+ }
+
+ free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(ctx, "s%u.%u.%u-",
+ (uint8)(entry - &ctx->destination_entries.nodes[0]), gen, free_ctr);
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ _dhd_wlfc_bprint(ctx, "%u.%u-",
+ IS_WL_TO_REUSE_SEQ(DHD_PKTTAG_H2DSEQ(PKTTAG(p))),
+ WL_SEQ_GET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(p))));
+ }
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ /* remove old header */
+ _dhd_wlfc_pullheader(ctx, p);
+ }
+
+ if (hslot >= WLFC_HANGER_MAXITEMS) {
+ DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+ WL_TXSTATUS_SET_HSLOT(htod, hslot);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ WL_TXSTATUS_SET_GENERATION(htod, gen);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ /*
+ Indicate that this packet is being sent in response to an
+ explicit request from the firmware side.
+ */
+ WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+ } else {
+ WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+ }
+
+ rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
+ if (rc == BCME_OK) {
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger);
+ if (header_needed) {
+ /*
+ a new header was created for this packet.
+ push to hanger slot and scrub q. Since bus
+ send succeeded, increment seq number as well.
+ */
+ rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ if (rc == BCME_OK) {
+#ifdef PROP_TXSTATUS_DEBUG
+ h->items[hslot].push_time =
+ OSL_SYSUPTIME();
+#endif
+ } else {
+ DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
+ __FUNCTION__, rc));
+ }
+ } else {
+ /* clear hanger state */
+ if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p)
+ DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n",
+ __FUNCTION__, p, h->items[hslot].pkt));
+ ASSERT(h->items[hslot].pkt == p);
+ bcm_object_feature_set(h->items[hslot].pkt,
+ BCM_OBJECT_FEATURE_PKT_STATE, 0);
+ h->items[hslot].pkt_state = 0;
+ h->items[hslot].pkt_txstatus = 0;
+ h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE;
+ }
+ }
+
+ if ((rc == BCME_OK) && header_needed) {
+ /* increment free running sequence count */
+ WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ }
+ }
+ *slot = hslot;
+ *packet = p;
+ return rc;
+} /* _dhd_wlfc_pretx_pktprocess */
+
+/**
+ * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote
+ * mac is in the 'open' state, otherwise '0'.
+ */
+static int
+_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, int prec)
+{
+ wlfc_mac_descriptor_t* interfaces = ctx->destination_entries.interfaces;
+
+ if (entry->interface_id >= WLFC_MAX_IFNUM) {
+ ASSERT(&ctx->destination_entries.other == entry);
+ return 1;
+ }
+
+ if (interfaces[entry->interface_id].iftype ==
+ WLC_E_IF_ROLE_P2P_GO) {
+ /* - destination interface is of type p2p GO.
+ For a p2pGO interface, if the destination is OPEN but the interface is
+ CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+ destination-specific-credit left send packets. This is because the
+ firmware storing the destination-specific-requested packet in queue.
+ */
+ /* XXX: This behavior will change one PM1 protocol mod is complete */
+ if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0)) {
+ return 0;
+ }
+ }
+
+ /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+ if ((((entry->state == WLFC_STATE_CLOSE) ||
+ (interfaces[entry->interface_id].state == WLFC_STATE_CLOSE)) &&
+ (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0)) ||
+ (!(entry->ac_bitmap & (1 << prec)))) {
+ return 0;
+ }
+
+ return 1;
+} /* _dhd_wlfc_is_destination_open */
+
+/**
+ * Dequeues a suppressed or delayed packet from a queue
+ * @param[in/out] ctx Driver specific flow control administration
+ * @param[in] prec Precedence of queue to dequeue from
+ * @param[out] ac_credit_spent Boolean, returns 0 or 1
+ * @param[out] needs_hdr Boolean, returns 0 or 1
+ * @param[out] entry_out The remote MAC for which the packet is destined
+ * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones
+ *
+ * Return value: the dequeued packet
+ */
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
+ uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
+ bool only_no_credit)
+{
+ wlfc_mac_descriptor_t* entry;
+ int total_entries;
+ void* p = NULL;
+ int i;
+ uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
+ uint16 qlen;
+ bool change_entry = FALSE;
+
+ BCM_REFERENCE(qlen);
+ BCM_REFERENCE(change_entry);
+
+ *entry_out = NULL;
+ /* most cases a packet will count against FIFO credit */
+ *ac_credit_spent = credit_spent;
+
+ /* search all entries, include nodes as well as interfaces */
+ if (only_no_credit) {
+ total_entries = ctx->requested_entry_count;
+ } else {
+ total_entries = ctx->active_entry_count;
+ }
+
+ for (i = 0; i < total_entries; i++) {
+ if (only_no_credit) {
+ entry = ctx->requested_entry[i];
+ } else {
+ entry = ctx->active_entry_head;
+ }
+ ASSERT(entry);
+
+ if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
+#ifdef PROPTX_MAXCOUNT
+ (entry->transit_count < entry->transit_maxcount) &&
+#endif /* PROPTX_MAXCOUNT */
+ (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
+ (!entry->suppressed)) {
+ *ac_credit_spent = credit_spent;
+ if (entry->state == WLFC_STATE_CLOSE) {
+ *ac_credit_spent = 0;
+ }
+
+ /* higher precedence will be picked up first,
+ * i.e. suppressed packets before delayed ones
+ */
+ p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
+ *needs_hdr = 0;
+ if (p == NULL) {
+ /* De-Q from delay Q */
+ p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
+ *needs_hdr = 1;
+ }
+
+ if (p != NULL) {
+ bcm_pkt_validate_chk(p, "_dhd_wlfc_deque_afq");
+ /* did the packet come from suppress sub-queue? */
+ if (entry->requested_credit > 0) {
+ entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_sent_packets++;
+#endif
+ } else if (entry->requested_packet > 0) {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+ }
+
+ *entry_out = entry;
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+ ctx->pkt_cnt_per_ac[prec]--;
+ ctx->pkt_cnt_in_psq--;
+#ifdef BULK_DEQUEUE
+ /* Check pkts in delayq */
+ if (entry->state == WLFC_STATE_OPEN) {
+ entry->release_count[prec]++;
+ qlen = pktq_mlen(&entry->psq,
+ (1 << PSQ_SUP_IDX(prec) | 1 << PSQ_DLY_IDX(prec)));
+
+ if (entry->release_count[prec] == ctx->max_release_count ||
+ qlen == 0) {
+ change_entry = TRUE;
+ entry->release_count[prec] = 0;
+ }
+
+ if (change_entry) {
+ /* move head */
+ ctx->active_entry_head =
+ ctx->active_entry_head->next;
+ }
+ }
+#endif /* BULK_DEQUEUE */
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq,
+ DHD_PKTTAG_IF(PKTTAG(p)));
+ /*
+ * A packet has been picked up, update traffic availability bitmap,
+ * if applicable.
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ return p;
+ }
+ }
+ if (!only_no_credit) {
+ /* move head */
+ ctx->active_entry_head = ctx->active_entry_head->next;
+ }
+ }
+ return NULL;
+} /* _dhd_wlfc_deque_delayedq */
+
+/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */
+static int
+_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec)
+{
+ wlfc_mac_descriptor_t* entry;
+
+ if (pktbuf != NULL) {
+ entry = _dhd_wlfc_find_table_entry(ctx, pktbuf);
+ if (entry == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1),
+ FALSE, WLFC_SEQCOUNT(entry, prec))
+ == FALSE) {
+ WLFC_DBGMESG(("D"));
+ ctx->stats.delayq_full_error++;
+ return BCME_ERROR;
+ }
+
+#ifdef QMONITOR
+ dhd_qmon_tx(&entry->qmon);
+#endif
+
+ /* A packet has been pushed, update traffic availability bitmap, if applicable */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ }
+
+ return BCME_OK;
+} /* _dhd_wlfc_enque_delayq */
+
+/** Returns TRUE if caller supplied packet is destined for caller supplied interface */
+static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
+{
+ if (!p || !p_ifid)
+ return FALSE;
+
+ return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
+}
+
+/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */
+static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
+{
+ if (!p || !entry)
+ return FALSE;
+
+ return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p))));
+}
+
+static void
+_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
+{
+ dhd_pub_t *dhdp;
+ bool credit_return = FALSE;
+
+ if (!wlfc || !pkt) {
+ return;
+ }
+
+ dhdp = (dhd_pub_t *)(wlfc->dhdp);
+ if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) &&
+ DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+ int lender, credit_returned = 0;
+ uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+ credit_return = TRUE;
+
+ /* Note that borrower is fifo_id */
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+
+ BCM_REFERENCE(credit_return);
+#if defined(DHD_WLFC_THREAD)
+ if (credit_return) {
+ _dhd_wlfc_thread_wakeup(dhdp);
+ }
+#endif /* defined(DHD_WLFC_THREAD) */
+}
+
+/** Removes and frees a packet from the hanger. Called during eg tx complete. */
+static void
+_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state,
+ int pkt_txstatus)
+{
+ wlfc_hanger_t* hanger;
+ wlfc_hanger_item_t* item;
+
+ if (!wlfc)
+ return;
+
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+ if (!hanger)
+ return;
+
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return;
+
+ item = &hanger->items[slot_id];
+
+ if (item->pkt) {
+ item->pkt_state |= pkt_state;
+ if (pkt_txstatus != -1)
+ item->pkt_txstatus = (uint8)pkt_txstatus;
+ bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state);
+ if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) {
+ void *p = NULL;
+ void *pkt = item->pkt;
+ uint8 old_state = item->state;
+ int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE);
+ BCM_REFERENCE(ret);
+ BCM_REFERENCE(pkt);
+ ASSERT((ret == BCME_OK) && p && (pkt == p));
+ if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+ printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n",
+ pkt, old_state, item->pkt_state);
+ }
+ ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED);
+
+ /* free packet */
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))]
+ [DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+ wlfc->stats.pktout++;
+ dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus);
+ PKTFREE(wlfc->osh, p, TRUE);
+ }
+ } else {
+ /* free slot */
+ if (item->state == WLFC_HANGER_ITEM_STATE_FREE)
+ DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n",
+ __FUNCTION__, __LINE__, item->pkt_state, pkt_state));
+ item->state = WLFC_HANGER_ITEM_STATE_FREE;
+ }
+} /* _dhd_wlfc_hanger_free_pkt */
+
+/** Called during eg detach() */
+static void
+_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
+ bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
+{
+ int prec;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+ ASSERT(dhdp);
+
+ /* Optimize flush, if pktq len = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->n_pkts_tot == 0) {
+ return;
+ }
+
+ for (prec = 0; prec < pq->num_prec; prec++) {
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ bcm_pkt_validate_chk(p, "_dhd_wlfc_pktq_flush");
+ if (fn == NULL || (*fn)(p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ if (q_type == Q_TYPE_PSQ) {
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+ _dhd_wlfc_hanger_remove_reference(ctx->hanger, p);
+ }
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+ ctx->pkt_cnt_per_ac[prec>>1]--;
+ ctx->pkt_cnt_in_psq--;
+ ctx->stats.cleanup_psq_cnt++;
+ if (!(prec & 1)) {
+ /* pkt in delayed q, so fake push BDC header for
+ * dhd_tcpack_check_xmit() and dhd_txcomplete().
+ */
+ _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0,
+ 0, 0, TRUE);
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+ " Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhdp,
+ TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ }
+ } else if (q_type == Q_TYPE_AFQ) {
+ wlfc_mac_descriptor_t* entry =
+ _dhd_wlfc_find_table_entry(ctx, p);
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count) {
+ entry->suppr_transit_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(ctx, "[sc]-");
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+ _dhd_wlfc_return_implied_credit(ctx, p);
+ ctx->stats.cleanup_fw_cnt++;
+ }
+ PKTSETLINK(p, NULL);
+ if (dir) {
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+ ctx->stats.pktout++;
+ dhd_txcomplete(dhdp, p, FALSE);
+ }
+ PKTFREE(ctx->osh, p, dir);
+
+ q->n_pkts--;
+ pq->n_pkts_tot--;
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->n_pkts == 0);
+ q->tail = NULL;
+ }
+
+ }
+
+ if (fn == NULL)
+ ASSERT(pq->n_pkts_tot == 0);
+} /* _dhd_wlfc_pktq_flush */
+
+#ifndef BCMDBUS
+/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */
+static void*
+_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+ p = q->head;
+
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ break;
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+ if (p == NULL)
+ return NULL;
+
+ bcm_pkt_validate_chk(p, "_dhd_wlfc_pktq_flush");
+
+ if (prev == NULL) {
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ PKTSETLINK(prev, PKTLINK(p));
+ if (q->tail == p) {
+ q->tail = prev;
+ }
+ }
+
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+/** !BCMDBUS specific function */
+static void
+_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ int prec;
+ void *pkt = NULL, *head = NULL, *tail = NULL;
+ struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ wlfc_mac_descriptor_t* entry;
+
+ dhd_os_sdlock_txq(dhd);
+ for (prec = 0; prec < txq->num_prec; prec++) {
+ while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ if (!head) {
+ head = pkt;
+ }
+ if (tail) {
+ PKTSETLINK(tail, pkt);
+ }
+ tail = pkt;
+ }
+ }
+ dhd_os_sdunlock_txq(dhd);
+
+ while ((pkt = head)) {
+ head = PKTLINK(pkt);
+ PKTSETLINK(pkt, NULL);
+ entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode) &&
+ !_dhd_wlfc_hanger_remove_reference(h, pkt)) {
+ DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
+ __FUNCTION__, pkt));
+ }
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count) {
+ entry->suppr_transit_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[sc]-");
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+ _dhd_wlfc_return_implied_credit(wlfc, pkt);
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--;
+ wlfc->stats.pktout++;
+ wlfc->stats.cleanup_txq_cnt++;
+ dhd_txcomplete(dhd, pkt, FALSE);
+ PKTFREE(wlfc->osh, pkt, TRUE);
+ }
+} /* _dhd_wlfc_cleanup_txq */
+
+#endif /* !BCMDBUS */
+
+/** called during eg detach */
+void
+_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ int i;
+ int total_entries;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+
+ wlfc->stats.cleanup_txq_cnt = 0;
+ wlfc->stats.cleanup_psq_cnt = 0;
+ wlfc->stats.cleanup_fw_cnt = 0;
+
+ /*
+ * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one
+ */
+#ifndef BCMDBUS
+ /* flush bus->txq */
+ _dhd_wlfc_cleanup_txq(dhd, fn, arg);
+#endif /* BCMDBUS */
+
+ /* flush psq, search all entries, include nodes as well as interfaces */
+ total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+ table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+ for (i = 0; i < total_entries; i++) {
+ if (table[i].occupied) {
+ /* release packets held in PSQ (both delayed and suppressed) */
+ if (table[i].psq.n_pkts_tot) {
+ WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
+ __FUNCTION__, i, table[i].psq.n_pkts_tot));
+ _dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
+ fn, arg, Q_TYPE_PSQ);
+ }
+
+ /* free packets held in AFQ */
+ if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.n_pkts_tot)) {
+ _dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
+ fn, arg, Q_TYPE_AFQ);
+ }
+
+ if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) {
+ table[i].occupied = 0;
+ if (table[i].transit_count || table[i].suppr_transit_count) {
+ DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n",
+ __FUNCTION__, i,
+ table[i].transit_count,
+ table[i].suppr_transit_count));
+ }
+ }
+ }
+ }
+
+ /*
+ . flush remained pkt in hanger queue, not in bus->txq nor psq.
+ . the remained pkt was successfully downloaded to dongle already.
+ . hanger slot state cannot be set to free until receive txstatus update.
+ */
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ for (i = 0; i < h->max_items; i++) {
+ if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+ (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+ if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
+ h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED;
+ }
+ }
+ }
+ }
+
+ return;
+} /* _dhd_wlfc_cleanup */
+
+/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
+ f_processpkt_t fn, void *arg)
+{
+ int rc = BCME_OK;
+ uint8 i;
+
+ BCM_REFERENCE(i);
+
+#ifdef QMONITOR
+ dhd_qmon_reset(&entry->qmon);
+#endif
+
+ if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
+ entry->occupied = 1;
+ entry->state = WLFC_STATE_OPEN;
+ entry->requested_credit = 0;
+ entry->interface_id = ifid;
+ entry->iftype = iftype;
+ entry->ac_bitmap = 0xff; /* update this when handling APSD */
+#ifdef BULK_DEQUEUE
+ for (i = 0; i < AC_COUNT + 1; i++) {
+ entry->release_count[i] = 0;
+ }
+#endif /* BULK_DEQUEUE */
+ /* for an interface entry we may not care about the MAC address */
+ if (ea != NULL)
+ memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ entry->suppressed = FALSE;
+ entry->transit_count = 0;
+#if defined(WL_EXT_IAPSTA) && defined(PROPTX_MAXCOUNT)
+ entry->transit_maxcount = wl_ext_get_wlfc_maxcount(ctx->dhdp, ifid);
+#endif /* PROPTX_MAXCOUNT */
+ entry->suppr_transit_count = 0;
+ entry->onbus_pkts_count = 0;
+ }
+
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+ pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+ if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
+ }
+
+ if (entry->next == NULL) {
+ /* not linked to anywhere, add to tail */
+ if (ctx->active_entry_head) {
+ entry->prev = ctx->active_entry_head->prev;
+ ctx->active_entry_head->prev->next = entry;
+ ctx->active_entry_head->prev = entry;
+ entry->next = ctx->active_entry_head;
+ } else {
+ ASSERT(ctx->active_entry_count == 0);
+ entry->prev = entry->next = entry;
+ ctx->active_entry_head = entry;
+ }
+ ctx->active_entry_count++;
+ } else {
+ DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__,
+ (int)(entry - &ctx->destination_entries.nodes[0])));
+ }
+ }
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ for (i = 0; i < (AC_COUNT + 1); i++) {
+ entry->last_send_seq[i] = 255;
+ entry->last_complete_seq[i] = 255;
+ }
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ } else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+ /* When the entry is deleted, the packets that are queued in the entry must be
+ cleanup. The cleanup action should be before the occupied is set as 0.
+ */
+ _dhd_wlfc_cleanup(ctx->dhdp, fn, arg);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+ entry->occupied = 0;
+ entry->state = WLFC_STATE_CLOSE;
+ memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
+
+ if (entry->next) {
+ /* not floating, remove from Q */
+ if (ctx->active_entry_count <= 1) {
+ /* last item */
+ ctx->active_entry_head = NULL;
+ ctx->active_entry_count = 0;
+ } else {
+ entry->prev->next = entry->next;
+ entry->next->prev = entry->prev;
+ if (entry == ctx->active_entry_head) {
+ ctx->active_entry_head = entry->next;
+ }
+ ctx->active_entry_count--;
+ }
+ entry->next = entry->prev = NULL;
+ } else {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ }
+ }
+ return rc;
+} /* _dhd_wlfc_mac_entry_update */
+
+#ifdef LIMIT_BORROW
+
+/** LIMIT_BORROW specific function */
+static int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac,
+ bool bBorrowAll)
+{
+ int lender_ac, borrow_limit = 0;
+ int rc = -1;
+
+ if (ctx == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return -1;
+ }
+
+ /* Borrow from lowest priority available AC (including BC/MC credits) */
+ for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+ if (!bBorrowAll) {
+ borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO;
+ } else {
+ borrow_limit = 0;
+ }
+
+ if (ctx->FIFO_credit[lender_ac] > borrow_limit) {
+ ctx->credits_borrowed[borrower_ac][lender_ac]++;
+ ctx->FIFO_credit[lender_ac]--;
+ rc = lender_ac;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/** LIMIT_BORROW specific function */
+static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
+{
+ if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
+ (borrower_ac < 0) || (borrower_ac > AC_COUNT)) {
+ DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n",
+ __FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac));
+
+ return BCME_BADARG;
+ }
+
+ ctx->credits_borrowed[borrower_ac][lender_ac]--;
+ ctx->FIFO_credit[lender_ac]++;
+
+ return BCME_OK;
+}
+
+#endif /* LIMIT_BORROW */
+
+/**
+ * Called on an interface event (WLC_E_IF) indicated by firmware.
+ * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD
+ */
+static int
+_dhd_wlfc_interface_entry_update(void* state,
+ uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ wlfc_mac_descriptor_t* entry;
+
+ if (ifid >= WLFC_MAX_IFNUM)
+ return BCME_BADARG;
+
+ entry = &ctx->destination_entries.interfaces[ifid];
+
+ return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea,
+ _dhd_wlfc_ifpkt_fn, &ifid);
+}
+
+/**
+ * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast
+ * specific)
+ */
+static int
+_dhd_wlfc_BCMCCredit_support_update(void* state)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ ctx->bcmc_credit_supported = TRUE;
+ return BCME_OK;
+}
+
+/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */
+static int
+_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ int i;
+
+ for (i = 0; i <= 4; i++) {
+ if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) {
+ DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n",
+ __FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i]));
+ }
+ }
+
+ /* update the AC FIFO credit map */
+ ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]);
+ ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]);
+ ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]);
+ ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]);
+ ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]);
+
+ ctx->Init_FIFO_credit[0] = credits[0];
+ ctx->Init_FIFO_credit[1] = credits[1];
+ ctx->Init_FIFO_credit[2] = credits[2];
+ ctx->Init_FIFO_credit[3] = credits[3];
+ ctx->Init_FIFO_credit[4] = credits[4];
+
+ /* credit for ATIM FIFO is not used yet. */
+ ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0;
+
+ return BCME_OK;
+}
+
+/**
+ * Called during committing of a transmit packet from the OS DHD layer to the next layer towards
+ * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer.
+ *
+ * @param[in/out] ctx Driver specific flow control administration
+ * @param[in] ac Access Category (QoS) of called supplied packet
+ * @param[in] commit_info Contains eg the packet to send
+ * @param[in] fcommit Function pointer to transmit function of next software layer
+ * @param[in] commit_ctx Opaque context used when calling next layer
+ */
+static int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+ dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ uint32 hslot;
+ int rc;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+ /*
+ if ac_fifo_credit_spent = 0
+
+ This packet will not count against the FIFO credit.
+ To ensure the txstatus corresponding to this packet
+ does not provide an implied credit (default behavior)
+ mark the packet accordingly.
+
+ if ac_fifo_credit_spent = 1
+
+ This is a normal packet and it counts against the FIFO
+ credit count.
+ */
+ DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+ rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p,
+ commit_info->needs_hdr, &hslot);
+
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_check_send_order(ctx, commit_info->mac_entry, commit_info->p);
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ if (rc == BCME_OK) {
+ rc = fcommit(commit_ctx, commit_info->p);
+ if (rc == BCME_OK) {
+ uint8 gen = WL_TXSTATUS_GET_GENERATION(
+ DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p)));
+ dhd_txpkt_log_and_dump(dhdp, commit_info->p, NULL);
+ ctx->stats.pkt2bus++;
+ if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) {
+ ctx->stats.send_pkts[ac]++;
+ WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+ }
+
+ if (gen != commit_info->mac_entry->generation) {
+ /* will be suppressed back by design */
+ if (!commit_info->mac_entry->suppressed) {
+ commit_info->mac_entry->suppressed = TRUE;
+ }
+ commit_info->mac_entry->suppr_transit_count++;
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(ctx, "[si%u]-",
+ commit_info->mac_entry->suppr_transit_count);
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+ commit_info->mac_entry->transit_count++;
+ commit_info->mac_entry->onbus_pkts_count++;
+ } else if (commit_info->needs_hdr) {
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ void *pout = NULL;
+ /* pop hanger for delayed packet */
+ _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(
+ DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE);
+ ASSERT(commit_info->p == pout);
+ }
+ }
+ } else {
+ ctx->stats.generic_error++;
+ }
+
+ if (rc != BCME_OK) {
+ /*
+ pretx pkt process or bus commit has failed, rollback.
+ - remove wl-header for a delayed packet
+ - save wl-header header for suppressed packets
+ - reset credit check flag
+ */
+ _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot);
+ DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0);
+ }
+
+ return rc;
+} /* _dhd_wlfc_handle_packet_commit */
+
+/** Returns remote MAC descriptor for caller supplied MAC address */
+static uint8
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea)
+{
+ wlfc_mac_descriptor_t* table =
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+ uint8 table_index;
+
+ if (ea != NULL) {
+ for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+ if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+ table[table_index].occupied)
+ return table_index;
+ }
+ }
+ return WLFC_MAC_DESC_ID_INVALID;
+}
+
+/**
+ * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the
+ * status of a frame that the dongle attempted to transmit over the wireless medium.
+ */
+static int
+dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt)
+{
+ athost_wl_status_info_t* ctx;
+ wlfc_mac_descriptor_t* entry = NULL;
+ struct pktq *pq;
+ struct pktq_prec *q;
+ void *p, *b;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd));
+ return BCME_BADARG;
+ }
+ ctx = (athost_wl_status_info_t*)dhd->wlfc_state;
+ if (!ctx) {
+ DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx));
+ return BCME_ERROR;
+ }
+
+ ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+ if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+ entry = &ctx->destination_entries.nodes[hslot];
+ else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+ entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+ else
+ entry = &ctx->destination_entries.other;
+
+ pq = &entry->psq;
+
+ ASSERT(((prec << 1) + 1) < pq->num_prec);
+
+ q = &pq->q[((prec << 1) + 1)];
+
+ b = NULL;
+ p = q->head;
+
+ while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) {
+ b = p;
+ p = PKTLINK(p);
+ }
+
+ if (p == NULL) {
+ /* none is matched */
+ if (b) {
+ DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+ } else {
+ DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+ }
+
+ return BCME_ERROR;
+ }
+
+ if (!b) {
+ /* head packet is matched */
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ /* middle packet is matched */
+ PKTSETLINK(b, PKTLINK(p));
+ if (PKTLINK(p) == NULL) {
+ q->tail = b;
+ }
+ }
+
+ q->n_pkts--;
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+ ctx->pkt_cnt_per_ac[prec]--;
+
+ PKTSETLINK(p, NULL);
+
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_enque_afq(ctx, p);
+ } else {
+ _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ }
+
+ entry->transit_count++;
+
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
+{
+ uint8 status_flag_ori, status_flag;
+ uint32 status;
+ int ret = BCME_OK;
+ int remove_from_hanger_ori, remove_from_hanger = 1;
+ void* pktbuf = NULL;
+ uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
+ uint16 hslot;
+ wlfc_mac_descriptor_t* entry = NULL;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
+ uint16 pktfate_status;
+
+ memcpy(&status, pkt_info, sizeof(uint32));
+ status = ltoh32(status);
+ status_flag = WL_TXSTATUS_GET_FLAGS(status);
+ hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
+ hslot = WL_TXSTATUS_GET_HSLOT(status);
+ fifo_id = WL_TXSTATUS_GET_FIFO(status);
+ gen = WL_TXSTATUS_GET_GENERATION(status);
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+ seq = ltoh16(seq);
+ seq_fromfw = GET_WL_HAS_ASSIGNED_SEQ(seq);
+ seq_num = WL_SEQ_GET_NUM(seq);
+ }
+
+ wlfc->stats.txstatus_in += len;
+
+ if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+ wlfc->stats.pkt_freed += len;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+ wlfc->stats.pkt_freed += len;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+ wlfc->stats.d11_suppress += len;
+ remove_from_hanger = 0;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+ wlfc->stats.wl_suppress += len;
+ remove_from_hanger = 0;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+ wlfc->stats.wlc_tossed_pkts += len;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+ wlfc->stats.pkt_freed += len;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_EXPIRED) {
+ wlfc->stats.pkt_exptime += len;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_DROPPED) {
+ wlfc->stats.pkt_dropped += len;
+ }
+
+ if (dhd->proptxstatus_txstatus_ignore) {
+ if (!remove_from_hanger) {
+ DHD_ERROR(("suppress txstatus: %d\n", status_flag));
+ }
+ return BCME_OK;
+ }
+
+ status_flag_ori = status_flag;
+ remove_from_hanger_ori = remove_from_hanger;
+
+ while (count < len) {
+ if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+ dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt);
+ }
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
+ } else {
+ status_flag = status_flag_ori;
+ remove_from_hanger = remove_from_hanger_ori;
+ ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE);
+ if (!pktbuf) {
+ _dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+ WLFC_HANGER_PKT_STATE_TXSTATUS, -1);
+ goto cont;
+ } else {
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) {
+ status_flag = WLFC_CTL_PKTFLAG_DISCARD;
+ remove_from_hanger = 1;
+ }
+ }
+ }
+
+ if ((ret != BCME_OK) || !pktbuf) {
+ goto cont;
+ }
+
+ bcm_pkt_validate_chk(pktbuf, "_dhd_wlfc_compressed_txstatus_update");
+
+ pktfate_status = ltoh16(status_flag_ori) & WLFC_CTL_PKTFLAG_MASK;
+ dhd_txpkt_log_and_dump(dhd, pktbuf, &pktfate_status);
+
+ /* set fifo_id to correct value because not all FW does that */
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+
+ if (!remove_from_hanger) {
+ /* this packet was suppressed */
+ if (!entry->suppressed || (entry->generation != gen)) {
+ if (!entry->suppressed) {
+ entry->suppr_transit_count = entry->transit_count;
+ if (p_mac) {
+ *p_mac = entry;
+ }
+ } else {
+ DHD_ERROR(("gen(%d), entry->generation(%d)\n",
+ gen, entry->generation));
+ }
+ entry->suppressed = TRUE;
+
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[ss%u.%u.%u]-",
+ (uint8)(entry - &wlfc->destination_entries.nodes[0]),
+ entry->generation,
+ entry->suppr_transit_count);
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+ entry->generation = gen;
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ if (gen == WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)))) {
+ printf("==%d.%d==\n", gen, hcnt);
+ }
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+ {
+ uint32 new_t = OSL_SYSUPTIME();
+ uint32 old_t;
+ uint32 delta;
+ old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
+
+ wlfc->stats.latency_sample_count++;
+ if (new_t > old_t)
+ delta = new_t - old_t;
+ else
+ delta = 0xffffffff + new_t - old_t;
+ wlfc->stats.total_status_latency += delta;
+ wlfc->stats.latency_most_recent = delta;
+
+ wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+ if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+ wlfc->stats.idx_delta = 0;
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+
+ /* pick up the implicit credit from this packet */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+ _dhd_wlfc_return_implied_credit(wlfc, pktbuf);
+ } else {
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the destination entry (for pspoll etc.)
+ */
+ if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) {
+ entry->requested_credit++;
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhd);
+#endif /* DHD_WLFC_THREAD */
+ }
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_acks++;
+#endif
+ }
+
+ if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+ (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+ /* save generation bit inside packet */
+ WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen);
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ WL_SEQ_SET_REUSE(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw);
+ WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num);
+ }
+
+ ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+ if (ret != BCME_OK) {
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "f%u.%u.%u-",
+ (uint8)(entry - &wlfc->destination_entries.nodes[0]),
+ gen,
+ hcnt);
+ _dhd_wlfc_check_complete_order(wlfc, entry, pktbuf);
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ /* delay q is full, drop this packet */
+ DHD_WLFC_QMON_COMPLETE(entry);
+ _dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE);
+ } else {
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ /* Mark suppressed to avoid a double free
+ during wlfc cleanup
+ */
+ _dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen);
+ }
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "r%u.%u.%u.%u-",
+ status_flag,
+ (uint8)(entry - &wlfc->destination_entries.nodes[0]),
+ gen,
+ hcnt);
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_bprint(wlfc, "%u.%u-", seq_fromfw, seq_num);
+ }
+
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+ } else {
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "c%u.%u.%u.%u-",
+ status_flag,
+ (uint8)(entry - &wlfc->destination_entries.nodes[0]),
+ gen,
+ hcnt);
+ _dhd_wlfc_check_complete_order(wlfc, entry, pktbuf);
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+
+ DHD_WLFC_QMON_COMPLETE(entry);
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+ WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE);
+ } else {
+ dhd_txcomplete(dhd, pktbuf, TRUE);
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))]
+ [DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--;
+ wlfc->stats.pktout++;
+ /* free the packet */
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ }
+ /* pkt back from firmware side */
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count) {
+ entry->suppr_transit_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[sc]-");
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+
+cont:
+ hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK;
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK;
+ }
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) {
+ seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK;
+ }
+
+ count++;
+ }
+
+ return BCME_OK;
+} /* _dhd_wlfc_compressed_txstatus_update */
+
+/**
+ * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle.
+ * @param[in] credits caller supplied credit that will be added to the host credit.
+ */
+static int
+_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+ int i;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+
+ /* update FIFO credits */
+ if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+ {
+ int lender; /* Note that borrower is i */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+ if (wlfc->credits_borrowed[i][lender] > 0) {
+ if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+ credits[i] -=
+ (uint8)wlfc->credits_borrowed[i][lender];
+ wlfc->FIFO_credit[lender] +=
+ wlfc->credits_borrowed[i][lender];
+ wlfc->credits_borrowed[i][lender] = 0;
+ } else {
+ wlfc->credits_borrowed[i][lender] -= credits[i];
+ wlfc->FIFO_credit[lender] += credits[i];
+ credits[i] = 0;
+ }
+ }
+ }
+
+ /* If we have more credits left over, these must belong to the AC */
+ if (credits[i] > 0) {
+ wlfc->FIFO_credit[i] += credits[i];
+ }
+
+ if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) {
+ wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i];
+ }
+ }
+ }
+
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhd);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+ return BCME_OK;
+} /* _dhd_wlfc_fifocreditback_indicate */
+
+#ifndef BCMDBUS
+/** !BCMDBUS specific function */
+static void
+_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* entry;
+ int prec;
+ void *pkt = NULL, *head = NULL, *tail = NULL;
+ struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+ uint8 results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ];
+ uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0};
+ uint32 htod = 0;
+ uint16 htodseq = 0;
+ bool bCreditUpdate = FALSE;
+
+ dhd_os_sdlock_txq(dhd);
+ for (prec = 0; prec < txq->num_prec; prec++) {
+ while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+ if (!head) {
+ head = pkt;
+ }
+ if (tail) {
+ PKTSETLINK(tail, pkt);
+ }
+ tail = pkt;
+ }
+ }
+ dhd_os_sdunlock_txq(dhd);
+
+ while ((pkt = head)) {
+ head = PKTLINK(pkt);
+ PKTSETLINK(pkt, NULL);
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+ if (!entry) {
+ PKTFREE(dhd->osh, pkt, TRUE);
+ continue;
+ }
+ if (entry->onbus_pkts_count > 0) {
+ entry->onbus_pkts_count--;
+ }
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count)) {
+ entry->suppressed = FALSE;
+ }
+ /* fake a suppression txstatus */
+ htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
+ WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+ htod = htol32(htod);
+ memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
+ if (IS_WL_TO_REUSE_SEQ(htodseq)) {
+ SET_WL_HAS_ASSIGNED_SEQ(htodseq);
+ RESET_WL_TO_REUSE_SEQ(htodseq);
+ }
+ htodseq = htol16(htodseq);
+ memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
+ WLFC_CTL_VALUE_LEN_SEQ);
+ }
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_enque_afq(wlfc, pkt);
+ }
+ _dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL);
+
+ /* fake a fifo credit back */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+ credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++;
+ bCreditUpdate = TRUE;
+ }
+ }
+
+ if (bCreditUpdate) {
+ _dhd_wlfc_fifocreditback_indicate(dhd, credits);
+ }
+} /* _dhd_wlfc_suppress_txq */
+
+#endif /* !BCMDBUS */
+
+static int
+_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
+{
+ uint32 timestamp;
+
+ (void)dhd;
+
+ bcopy(&value[2], &timestamp, sizeof(uint32));
+ timestamp = ltoh32(timestamp);
+ DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+ (void)dhd;
+ (void)rssi;
+ return BCME_OK;
+}
+
+static void
+_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+ int i;
+
+ if (!wlfc || !entry) {
+ return;
+ }
+
+ for (i = 0; i < wlfc->requested_entry_count; i++) {
+ if (entry == wlfc->requested_entry[i]) {
+ break;
+ }
+ }
+
+ if (i == wlfc->requested_entry_count) {
+ /* no match entry found */
+ ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1));
+ wlfc->requested_entry[wlfc->requested_entry_count++] = entry;
+ }
+}
+
+/** called on eg receiving 'mac open' event from the dongle. */
+static void
+_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+ int i;
+
+ if (!wlfc || !entry) {
+ return;
+ }
+
+ for (i = 0; i < wlfc->requested_entry_count; i++) {
+ if (entry == wlfc->requested_entry[i]) {
+ break;
+ }
+ }
+
+ if (i < wlfc->requested_entry_count) {
+ /* found */
+ ASSERT(wlfc->requested_entry_count > 0);
+ wlfc->requested_entry_count--;
+ if (i != wlfc->requested_entry_count) {
+ wlfc->requested_entry[i] =
+ wlfc->requested_entry[wlfc->requested_entry_count];
+ }
+ wlfc->requested_entry[wlfc->requested_entry_count] = NULL;
+ }
+}
+
+/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */
+static int
+_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ int rc;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 existing_index;
+ uint8 table_index;
+ uint8 ifid;
+ uint8* ea;
+
+ WLFC_DBGMESG(("%s(), mac ["MACDBG"],%s,idx:%d,id:0x%02x\n",
+ __FUNCTION__, MAC2STRDBG(&value[2]),
+ ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+ WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+ table = wlfc->destination_entries.nodes;
+ table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+ ifid = value[1];
+ ea = &value[2];
+
+ _dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]);
+ if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+ existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+ if ((existing_index != WLFC_MAC_DESC_ID_INVALID) &&
+ (existing_index != table_index) && table[existing_index].occupied) {
+ /*
+ there is an existing different entry, free the old one
+ and move it to new index if necessary.
+ */
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index],
+ eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id,
+ table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn,
+ &table[existing_index]);
+ }
+
+ if (!table[table_index].occupied) {
+ /* this new MAC entry does not exist, create one */
+ table[table_index].mac_handle = value[0];
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea, NULL, NULL);
+ } else {
+ /* the space should have been empty, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+
+ if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+ if (table[table_index].occupied) {
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea, _dhd_wlfc_entrypkt_fn, &table[table_index]);
+ } else {
+ /* the space should have been occupied, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ BCM_REFERENCE(rc);
+ return BCME_OK;
+} /* _dhd_wlfc_mac_table_update */
+
+/** Called on a 'mac open' or 'mac close' event indicated by the dongle */
+static int
+_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */
+ uint8 mac_handle = value[0];
+ int i;
+
+ table = wlfc->destination_entries.nodes;
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+#ifdef BULK_DEQUEUE
+ for (i = 0; i < AC_COUNT + 1; i++) {
+ desc->release_count[i] = 0;
+ }
+#endif /* BULK_DEQUEUE */
+ if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+ desc->state = WLFC_STATE_OPEN;
+ desc->ac_bitmap = 0xff;
+ DHD_WLFC_CTRINC_MAC_OPEN(desc);
+ desc->requested_credit = 0;
+ desc->requested_packet = 0;
+ _dhd_wlfc_remove_requested_entry(wlfc, desc);
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[op%u.%u]-",
+ (uint8)(table - &wlfc->destination_entries.nodes[0]),
+ OSL_SYSUPTIME());
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ } else {
+ desc->state = WLFC_STATE_CLOSE;
+ DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[cl%u.%u]-",
+ (uint8)(table - &wlfc->destination_entries.nodes[0]),
+ OSL_SYSUPTIME());
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ /* Indicate to firmware if there is any traffic pending. */
+ for (i = 0; i < AC_COUNT; i++) {
+ _dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+ }
+ }
+ } else {
+ wlfc->stats.psmode_update_failed++;
+ }
+
+ return BCME_OK;
+} /* _dhd_wlfc_psmode_update */
+
+/** called upon receiving 'interface open' or 'interface close' event from the dongle */
+static int
+_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 if_id = value[0];
+ uint8 i;
+
+ BCM_REFERENCE(i);
+
+ if (if_id < WLFC_MAX_IFNUM) {
+ table = wlfc->destination_entries.interfaces;
+ if (table[if_id].occupied) {
+#ifdef BULK_DEQUEUE
+ for (i = 0; i < AC_COUNT + 1; i++) {
+ table->release_count[i] = 0;
+ }
+#endif /* BULK_DEQUEUE */
+ if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+ table[if_id].state = WLFC_STATE_OPEN;
+ /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[op%u.%u]-",
+ (uint8)(table - &wlfc->destination_entries.nodes[0]),
+ OSL_SYSUPTIME());
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ } else {
+ table[if_id].state = WLFC_STATE_CLOSE;
+ /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ _dhd_wlfc_bprint(wlfc, "[cl%u.%u]-",
+ (uint8)(table - &wlfc->destination_entries.nodes[0]),
+ OSL_SYSUPTIME());
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ }
+ return BCME_OK;
+ }
+ }
+ wlfc->stats.interface_update_failed++;
+
+ /* XXX: what is an appropriate error? */
+ return BCME_OK;
+}
+
+/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */
+static int
+_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 credit;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ credit = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_credit = credit;
+
+ /* XXX: toggle AC prec bitmap based on received bmp, exclude ac/bc pkt */
+ desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+ _dhd_wlfc_add_requested_entry(wlfc, desc);
+#if defined(DHD_WLFC_THREAD)
+ if (credit) {
+ _dhd_wlfc_thread_wakeup(dhd);
+ }
+#endif /* DHD_WLFC_THREAD */
+ } else {
+ wlfc->stats.credit_request_failed++;
+ }
+
+ return BCME_OK;
+}
+
+/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */
+static int
+_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 packet_count;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ packet_count = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_packet = packet_count;
+
+ /* XXX: toggle AC prec bitmap based on received bmp, exclude ac/bc pkt */
+ desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+ _dhd_wlfc_add_requested_entry(wlfc, desc);
+#if defined(DHD_WLFC_THREAD)
+ if (packet_count) {
+ _dhd_wlfc_thread_wakeup(dhd);
+ }
+#endif /* DHD_WLFC_THREAD */
+ } else {
+ wlfc->stats.packet_request_failed++;
+ }
+
+ return BCME_OK;
+}
+
+/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */
+static void
+_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+ if (info_len) {
+ /* Check copy length to avoid buffer overrun. In case of length exceeding
+ * WLHOST_REORDERDATA_TOTLEN, return failure instead sending incomplete result
+ * of length WLHOST_REORDERDATA_TOTLEN
+ */
+ if ((info_buf) && (len <= WLHOST_REORDERDATA_TOTLEN)) {
+ bcopy(val, info_buf, len);
+ *info_len = len;
+ } else {
+ *info_len = 0;
+ }
+ }
+}
+
+/*
+ * public functions
+ */
+
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd)
+{
+ bool rc = TRUE;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ rc = FALSE;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return rc;
+}
+
+#ifdef BULK_DEQUEUE
+#ifndef WLFC_MAX_RELEASE_CNT
+#ifdef CUSTOM_AMPDU_MPDU
+#define WLFC_MAX_RELEASE_CNT CUSTOM_AMPDU_MPDU
+#else
+#define WLFC_MAX_RELEASE_CNT 16
+#endif /* CUSTOM_AMPDU_MPDU */
+#endif /* WLFC_MAX_RELEASE_CNT */
+#endif /* BULK_DEQUEUE */
+
+int dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+ int i, rc = BCME_OK;
+ athost_wl_status_info_t* wlfc;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_enabled || dhd->wlfc_state) {
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ /* allocate space to track txstatus propagated from firmware */
+ dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO,
+ sizeof(athost_wl_status_info_t));
+ if (dhd->wlfc_state == NULL) {
+ rc = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* initialize state space */
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+ /* remember osh & dhdp */
+ wlfc->osh = dhd->osh;
+ wlfc->dhdp = dhd;
+#ifdef BULK_DEQUEUE
+ wlfc->max_release_count = WLFC_MAX_RELEASE_CNT;
+#endif /* BULK_DEQUEUE */
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS);
+ if (wlfc->hanger == NULL) {
+ DHD_OS_PREFREE(dhd, dhd->wlfc_state,
+ sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ rc = BCME_NOMEM;
+ goto exit;
+ }
+ }
+
+ dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+ /* default to check rx pkt */
+ dhd->wlfc_rxpkt_chk = TRUE;
+#if defined (LINUX) || defined(linux)
+ if (dhd->op_mode & DHD_FLAG_IBSS_MODE) {
+ dhd->wlfc_rxpkt_chk = FALSE;
+ }
+#endif /* defined (LINUX) || defined(linux) */
+
+ /* initialize all interfaces to accept traffic */
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ wlfc->hostif_flow_state[i] = OFF;
+ }
+
+ _dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other,
+ eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL);
+
+ wlfc->allow_credit_borrow = 0;
+ wlfc->single_ac = 0;
+ wlfc->single_ac_timestamp = 0;
+
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ wlfc->log_buf = MALLOC(dhd->osh, WLFC_LOG_BUF_SIZE);
+ wlfc->log_buf[WLFC_LOG_BUF_SIZE - 1] = 0;
+ wlfc->log_buf_offset = 0;
+ wlfc->log_buf_full = FALSE;
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+
+exit:
+ DHD_ERROR(("%s: ret=%d\n", __FUNCTION__, rc));
+ dhd_os_wlfc_unblock(dhd);
+
+ return rc;
+} /* dhd_wlfc_enable */
+
+#ifdef SUPPORT_P2P_GO_PS
+
+/**
+ * Called when the host platform enters a lower power mode, eg right before a system hibernate.
+ * SUPPORT_P2P_GO_PS specific function.
+ */
+int
+dhd_wlfc_suspend(dhd_pub_t *dhd)
+{
+ uint32 tlv = 0;
+
+ DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
+ if (!dhd->wlfc_enabled)
+ return -1;
+
+ if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0))
+ return -1;
+ if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
+ return 0;
+ tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+ if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Called when the host platform resumes from a power management operation, eg resume after a
+ * system hibernate. SUPPORT_P2P_GO_PS specific function.
+ */
+int
+dhd_wlfc_resume(dhd_pub_t *dhd)
+{
+ uint32 tlv = 0;
+
+ DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
+ if (!dhd->wlfc_enabled)
+ return -1;
+
+ if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0))
+ return -1;
+ if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
+ (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
+ return 0;
+ tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+ if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0))
+ return -1;
+
+ return 0;
+}
+
+#endif /* SUPPORT_P2P_GO_PS */
+
+/** A flow control header was received from firmware, containing one or more TLVs */
+int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+ uint *reorder_info_len)
+{
+ uint8 type, len;
+ uint8* value;
+ uint8* tmpbuf;
+ uint16 remainder = (uint16)tlv_hdr_len;
+ uint16 processed = 0;
+ athost_wl_status_info_t* wlfc = NULL;
+ void* entry;
+
+ if ((dhd == NULL) || (pktbuf == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) {
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ }
+
+ tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+
+ if (remainder) {
+ while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+ type = tmpbuf[processed];
+ if (type == WLFC_CTL_TYPE_FILLER) {
+ remainder -= 1;
+ processed += 1;
+ continue;
+ }
+
+ len = tmpbuf[processed + 1];
+ value = &tmpbuf[processed + 2];
+
+ if (remainder < (2 + len))
+ break;
+
+ remainder -= 2 + len;
+ processed += 2 + len;
+ entry = NULL;
+
+ DHD_INFO(("%s():%d type %d remainder %d processed %d\n",
+ __FUNCTION__, __LINE__, type, remainder, processed));
+
+ if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+ _dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+ reorder_info_len);
+
+ if (wlfc == NULL) {
+ ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER);
+
+ if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS &&
+ type != WLFC_CTL_TYPE_TRANS_ID)
+ DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!"
+ " type %d remainder %d processed %d\n",
+ __FUNCTION__, __LINE__, type, remainder, processed));
+ continue;
+ }
+
+ if (type == WLFC_CTL_TYPE_TXSTATUS) {
+ _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
+ } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+ uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ;
+ }
+ _dhd_wlfc_compressed_txstatus_update(dhd, value,
+ value[compcnt_offset], &entry);
+ } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) {
+ _dhd_wlfc_fifocreditback_indicate(dhd, value);
+ } else if (type == WLFC_CTL_TYPE_RSSI) {
+ _dhd_wlfc_rssi_indicate(dhd, value);
+ } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) {
+ _dhd_wlfc_credit_request(dhd, value);
+ } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) {
+ _dhd_wlfc_packet_request(dhd, value);
+ } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+ (type == WLFC_CTL_TYPE_MAC_CLOSE)) {
+ _dhd_wlfc_psmode_update(dhd, value, type);
+ } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+ (type == WLFC_CTL_TYPE_MACDESC_DEL)) {
+ _dhd_wlfc_mac_table_update(dhd, value, type);
+ } else if (type == WLFC_CTL_TYPE_TRANS_ID) {
+ _dhd_wlfc_dbg_senum_check(dhd, value);
+ } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+ (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+ _dhd_wlfc_interface_update(dhd, value, type);
+ }
+
+#ifndef BCMDBUS
+ if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) {
+ /* suppress all packets for this mac entry from bus->txq */
+ _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
+ }
+#endif /* !BCMDBUS */
+ } /* while */
+
+ if (remainder != 0 && wlfc) {
+ /* trouble..., something is not right */
+ wlfc->stats.tlv_parse_failed++;
+ }
+ } /* if */
+
+ if (wlfc)
+ wlfc->stats.dhd_hdrpulls++;
+
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+}
+
+KERNEL_THREAD_RETURN_TYPE
+dhd_wlfc_transfer_packets(void *data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)data;
+ int ac, single_ac = 0, rc = BCME_OK;
+ dhd_wlfc_commit_info_t commit_info;
+ athost_wl_status_info_t* ctx;
+ int bus_retry_count = 0;
+ int pkt_send = 0;
+ int pkt_send_per_ac = 0;
+
+ uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
+ uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */
+ uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */
+ bool no_credit = FALSE;
+
+ int lender;
+ int pkt_bound = WLFC_PACKET_BOUND;
+ int highest_lender_ac;
+
+ BCM_REFERENCE(highest_lender_ac);
+
+#if defined(DHD_WLFC_THREAD)
+ /* wait till someone wakeup me up, will change it at running time */
+#if defined(LINUX)
+ int wait_msec = msecs_to_jiffies(0xFFFFFFFF);
+#endif /* LINUX */
+#endif /* defined(DHD_WLFC_THREAD) */
+
+#if defined(DHD_WLFC_THREAD)
+ while (1) {
+ bus_retry_count = 0;
+ pkt_send = 0;
+ tx_map = 0;
+ rx_map = 0;
+ packets_map = 0;
+#if defined(LINUX)
+ wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead,
+ dhdp->wlfc_thread_go, wait_msec);
+ if (kthread_should_stop()) {
+ break;
+ }
+ dhdp->wlfc_thread_go = FALSE;
+#endif /* LINUX */
+
+ dhd_os_wlfc_block(dhdp);
+#endif /* defined(DHD_WLFC_THREAD) */
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+#if defined(DHD_WLFC_THREAD)
+ if (!ctx)
+ goto exit;
+#endif /* defined(DHD_WLFC_THREAD) */
+
+ memset(&commit_info, 0, sizeof(commit_info));
+
+ /*
+ Commit packets for regular AC traffic. Higher priority first.
+ First, use up FIFO credits available to each AC. Based on distribution
+ and credits left, borrow from other ACs as applicable
+
+ -NOTE:
+ If the bus between the host and firmware is overwhelmed by the
+ traffic from host, it is possible that higher priority traffic
+ starves the lower priority queue. If that occurs often, we may
+ have to employ weighted round-robin or ucode scheme to avoid
+ low priority packet starvation.
+ */
+
+#ifdef BULK_DEQUEUE
+ pkt_bound = ctx->max_release_count;
+#endif
+
+ for (ac = AC_COUNT; ac >= 0; ac--) {
+ if (dhdp->wlfc_rxpkt_chk) {
+ /* check rx packet */
+ uint32 curr_t = OSL_SYSUPTIME(), delta;
+
+ delta = curr_t - ctx->rx_timestamp[ac];
+ if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) {
+ rx_map |= (1 << ac);
+ }
+ }
+
+ if (ctx->pkt_cnt_per_ac[ac] == 0) {
+ continue;
+ }
+
+ tx_map |= (1 << ac);
+ single_ac = ac + 1;
+ pkt_send_per_ac = 0;
+ while ((FALSE == dhdp->proptxstatus_txoff) &&
+ (pkt_send_per_ac < pkt_bound)) {
+ /* packets from delayQ with less priority are fresh and
+ * they'd need header and have no MAC entry
+ */
+ no_credit = (ctx->FIFO_credit[ac] < 1);
+ if (dhdp->proptxstatus_credit_ignore ||
+ ((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) {
+ no_credit = FALSE;
+ }
+
+ lender = -1;
+#ifdef LIMIT_BORROW
+ if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map) &&
+ dhdp->wlfc_borrow_allowed) {
+ /* try borrow from lower priority */
+#ifdef BULK_DEQUEUE
+ /* Enable credit borrow from higher AC
+ * to make packet chain longer
+ */
+ highest_lender_ac = AC_COUNT;
+#else
+ highest_lender_ac = ac - 1;
+#endif /* BULK_DEQUEUE */
+ lender = _dhd_wlfc_borrow_credit(ctx, highest_lender_ac, ac, FALSE);
+ if (lender != -1) {
+ no_credit = FALSE;
+ }
+ }
+#endif /* LIMIT_BORROW */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry),
+ no_credit);
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ if (commit_info.p == NULL) {
+#ifdef LIMIT_BORROW
+ if (lender != -1 && dhdp->wlfc_borrow_allowed) {
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+ }
+#endif
+ break;
+ }
+
+ if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) {
+ ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
+ }
+ /* here we can ensure have credit or no credit needed */
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ ctx->fcommit, ctx->commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ pkt_send++;
+ pkt_send_per_ac++;
+ if (commit_info.ac_fifo_credit_spent && (lender == -1)) {
+ ctx->FIFO_credit[ac]--;
+ }
+#ifdef LIMIT_BORROW
+ else if (!commit_info.ac_fifo_credit_spent && (lender != -1) &&
+ dhdp->wlfc_borrow_allowed) {
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+ }
+#endif
+ } else {
+#ifdef LIMIT_BORROW
+ if (lender != -1 && dhdp->wlfc_borrow_allowed) {
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+ }
+#endif
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+ goto exit;
+ }
+ }
+ }
+
+ if (ctx->pkt_cnt_per_ac[ac]) {
+ packets_map |= (1 << ac);
+ }
+ }
+
+ if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) {
+ /* nothing send out or remain in queue */
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) {
+ /* only one tx ac exist and no higher rx ac */
+ if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) {
+ ac = single_ac - 1;
+ } else {
+ uint32 delta;
+ uint32 curr_t = OSL_SYSUPTIME();
+
+ if (single_ac != ctx->single_ac) {
+ /* new single ac traffic (first single ac or different single ac) */
+ ctx->allow_credit_borrow = 0;
+ ctx->single_ac_timestamp = curr_t;
+ ctx->single_ac = (uint8)single_ac;
+ rc = BCME_OK;
+ goto exit;
+ }
+ /* same ac traffic, check if it lasts enough time */
+ delta = curr_t - ctx->single_ac_timestamp;
+
+ if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+ /* wait enough time, can borrow now */
+ ctx->allow_credit_borrow = 1;
+ ac = single_ac - 1;
+ } else {
+ rc = BCME_OK;
+ goto exit;
+ }
+ }
+ } else {
+ /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+ ctx->allow_credit_borrow = 0;
+ ctx->single_ac_timestamp = 0;
+ ctx->single_ac = 0;
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ if (packets_map == 0) {
+ /* nothing to send, skip borrow */
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ /* At this point, borrow all credits only for ac */
+ while (FALSE == dhdp->proptxstatus_txoff) {
+#ifdef LIMIT_BORROW
+ if (dhdp->wlfc_borrow_allowed) {
+ if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) {
+ break;
+ }
+ }
+ else
+ break;
+#endif
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry),
+ FALSE);
+ if (commit_info.p == NULL) {
+ /* before borrow only one ac exists and now this only ac is empty */
+#ifdef LIMIT_BORROW
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+ break;
+ }
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ ctx->fcommit, ctx->commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ pkt_send++;
+ if (commit_info.ac_fifo_credit_spent) {
+#ifndef LIMIT_BORROW
+ ctx->FIFO_credit[ac]--;
+#endif
+ } else {
+#ifdef LIMIT_BORROW
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+ }
+ } else {
+#ifdef LIMIT_BORROW
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+ goto exit;
+ }
+ }
+ }
+
+ BCM_REFERENCE(pkt_send);
+
+exit:
+#if defined(DHD_WLFC_THREAD)
+ dhd_os_wlfc_unblock(dhdp);
+#if defined(LINUX)
+ if (ctx && ctx->pkt_cnt_in_psq && pkt_send) {
+ wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS);
+ } else {
+ wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS);
+ }
+#endif /* LINUX */
+ }
+ return 0;
+#else
+ return rc;
+#endif /* defined(DHD_WLFC_THREAD) */
+}
+
+/**
+ * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by
+ * eg dhd_sendpkt().
+ * @param[in] dhdp Pointer to public DHD structure
+ * @param[in] fcommit Pointer to transmit function of next layer
+ * @param[in] commit_ctx Opaque context used when calling next layer
+ * @param[in] pktbuf Packet to send
+ * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if
+ */
+int
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+ struct dhd_bus *commit_ctx, void *pktbuf, bool need_toggle_host_if)
+{
+ int rc = BCME_OK;
+ athost_wl_status_info_t* ctx;
+
+#if defined(DHD_WLFC_THREAD)
+ if (!pktbuf)
+ return BCME_OK;
+#endif /* defined(DHD_WLFC_THREAD) */
+
+ if ((dhdp == NULL) || (fcommit == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ if (pktbuf) {
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+ }
+ rc = WLFC_UNSUPPORTED;
+ goto exit;
+ }
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+#ifdef BCMDBUS
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ if (pktbuf) {
+ PKTFREE(ctx->osh, pktbuf, TRUE);
+ rc = BCME_OK;
+ }
+ goto exit;
+ }
+#endif
+
+ if (dhdp->proptxstatus_module_ignore) {
+ if (pktbuf) {
+ uint32 htod = 0;
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+ if (fcommit(commit_ctx, pktbuf)) {
+ /* free it if failed, otherwise do it in tx complete cb */
+ PKTFREE(ctx->osh, pktbuf, TRUE);
+ }
+ rc = BCME_OK;
+ }
+ goto exit;
+ }
+
+ if (pktbuf) {
+ int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+ ASSERT(ac <= AC_COUNT);
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
+ /* en-queue the packets to respective queue. */
+ rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+ if (rc) {
+ _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+ } else {
+ ctx->stats.pktin++;
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
+ }
+ }
+
+ if (!ctx->fcommit) {
+ ctx->fcommit = fcommit;
+ } else {
+ ASSERT(ctx->fcommit == fcommit);
+ }
+ if (!ctx->commit_ctx) {
+ ctx->commit_ctx = commit_ctx;
+ } else {
+ ASSERT(ctx->commit_ctx == commit_ctx);
+ }
+
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhdp);
+#else
+ dhd_wlfc_transfer_packets(dhdp);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+exit:
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+} /* dhd_wlfc_commit_packets */
+
+/**
+ * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet
+ */
+int
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+ athost_wl_status_info_t* wlfc;
+ wlfc_mac_descriptor_t *entry;
+ void* pout = NULL;
+ int rtn = BCME_OK;
+ if ((dhd == NULL) || (txp == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ bcm_pkt_validate_chk(txp, "_dhd_wlfc_compressed_txstatus_update");
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ rtn = WLFC_UNSUPPORTED;
+ goto EXIT;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.signal_only_pkts_freed++;
+#endif
+ /* is this a signal-only packet? */
+ _dhd_wlfc_pullheader(wlfc, txp);
+ PKTFREE(wlfc->osh, txp, TRUE);
+ goto EXIT;
+ }
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+ ASSERT(entry);
+
+ if (!success || dhd->proptxstatus_txstatus_ignore) {
+ WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+ __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT(
+ DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE);
+ ASSERT(txp == pout);
+ }
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, txp, success);
+
+ /* return the credit, if necessary */
+ _dhd_wlfc_return_implied_credit(wlfc, txp);
+
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count)
+ entry->suppr_transit_count--;
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--;
+ wlfc->stats.pktout++;
+ PKTFREE(wlfc->osh, txp, TRUE);
+ } else {
+ /* bus confirmed pkt went to firmware side */
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_enque_afq(wlfc, txp);
+ } else {
+ int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
+ _dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+ WLFC_HANGER_PKT_STATE_BUSRETURNED, -1);
+ }
+ }
+
+ ASSERT(entry->onbus_pkts_count > 0);
+ if (entry->onbus_pkts_count > 0)
+ entry->onbus_pkts_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
+EXIT:
+ dhd_os_wlfc_unblock(dhd);
+ return rtn;
+} /* dhd_wlfc_txcomplete */
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+ /* enable all signals & indicate host proptxstatus logic is active */
+ uint32 tlv, mode, fw_caps;
+ int ret = 0;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+ if (dhd->wlfc_enabled) {
+ DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+ }
+ dhd->wlfc_enabled = TRUE;
+ dhd_os_wlfc_unblock(dhd);
+
+ tlv = WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+ WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+ /* XXX dhd->wlfc_state = NULL; */
+ /* XXX ANDREY:may erase pointer to already created wlfc_state, PR#97824 */
+
+ /*
+ try to enable/disable signaling by sending "tlv" iovar. if that fails,
+ fallback to no flow control? Print a message for now.
+ */
+
+ /* enable proptxtstatus signaling by default */
+ if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ dhd->wlfc_enabled?"enabled":"disabled", tlv));
+ }
+
+ mode = 0;
+
+ /* query caps */
+ ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0);
+
+ if (!ret) {
+ DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+
+ if (WLFC_IS_OLD_DEF(fw_caps)) {
+#ifdef BCMDBUS
+ mode = WLFC_MODE_HANGER;
+#else
+ /* enable proptxtstatus v2 by default */
+ mode = WLFC_MODE_AFQ;
+#endif /* BCMDBUS */
+ } else {
+ WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
+#ifdef BCMDBUS
+ WLFC_SET_AFQ(mode, 0);
+#endif /* BCMDBUS */
+ WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
+ WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
+ }
+ ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0);
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->wlfc_mode = 0;
+ if (ret >= 0) {
+ if (WLFC_IS_OLD_DEF(mode)) {
+ WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ));
+ } else {
+ dhd->wlfc_mode = mode;
+ }
+ }
+
+ DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+#ifdef LIMIT_BORROW
+ dhd->wlfc_borrow_allowed = TRUE;
+#endif
+ dhd_os_wlfc_unblock(dhd);
+
+ if (dhd->plat_init)
+ dhd->plat_init((void *)dhd);
+
+ return BCME_OK;
+} /* dhd_wlfc_init */
+
+/** AMPDU host reorder specific function */
+int
+dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
+{
+ /* enable only ampdu hostreorder here */
+ uint32 tlv;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__));
+
+ tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+ /* enable proptxtstatus signaling by default */
+ if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
+ DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
+ __FUNCTION__));
+ } else {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n",
+ __FUNCTION__, tlv));
+ }
+
+ dhd_os_wlfc_block(dhd);
+ dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER;
+ dhd_os_wlfc_unblock(dhd);
+ /* terence 20161229: enable ampdu_hostreorder if tlv enable hostreorder */
+ dhd_conf_set_intiovar(dhd, 0, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
+
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+#ifndef BCMDBUS
+ _dhd_wlfc_cleanup_txq(dhd, fn, arg);
+#endif /* !BCMDBUS */
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** release all packet resources */
+int
+dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ _dhd_wlfc_cleanup(dhd, fn, arg);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+ /* cleanup all psq related resources */
+ athost_wl_status_info_t* wlfc;
+ uint32 tlv = 0;
+ uint32 hostreorder = 0;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+ if (!dhd->wlfc_enabled) {
+ DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__));
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+ }
+
+ dhd->wlfc_enabled = FALSE;
+ dhd_os_wlfc_unblock(dhd);
+
+ /* query ampdu hostreorder */
+ (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder",
+ &hostreorder, WLC_GET_VAR, FALSE, 0);
+
+ if (hostreorder) {
+ tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+ DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n",
+ __FUNCTION__, __LINE__));
+ }
+
+ /* Disable proptxtstatus signaling for deinit */
+ (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0);
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+ _dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
+ _dhd_wlfc_hanger_free_pkt(wlfc, i,
+ WLFC_HANGER_PKT_STATE_COMPLETE, TRUE);
+ }
+ }
+
+ /* delete hanger */
+ _dhd_wlfc_hanger_delete(dhd, h);
+ }
+
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ if (wlfc->log_buf) {
+ MFREE(dhd->osh, wlfc->log_buf, WLFC_LOG_BUF_SIZE);
+ wlfc->log_buf_offset = 0;
+ wlfc->log_buf_full = FALSE;
+ }
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+
+ /* free top structure */
+ DHD_OS_PREFREE(dhd, dhd->wlfc_state,
+ sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ dhd->proptxstatus_mode = hostreorder ?
+ WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ if (dhd->plat_deinit)
+ dhd->plat_deinit((void *)dhd);
+ return BCME_OK;
+} /* dhd_wlfc_init */
+
+/**
+ * Called on an interface event (WLC_E_IF) indicated by firmware
+ * @param[in] dhdp Pointer to public DHD structure
+ * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD
+ */
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ int rc;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea);
+
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+}
+
+/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+ int rc;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data);
+
+ dhd_os_wlfc_unblock(dhdp);
+
+ return rc;
+}
+#ifdef LIMIT_BORROW
+int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+ if (dhdp == NULL || event_data == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+ dhd_os_wlfc_block(dhdp);
+ dhdp->wlfc_borrow_allowed = (bool)(*(uint32 *)event_data);
+ dhd_os_wlfc_unblock(dhdp);
+
+ return BCME_OK;
+}
+#endif /* LIMIT_BORROW */
+
+/**
+ * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast
+ * specific)
+ */
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
+{
+ int rc;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state);
+
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+}
+
+/** debug specific function */
+int
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ int i;
+ uint8* ea;
+ athost_wl_status_info_t* wlfc;
+ wlfc_hanger_t* h;
+ wlfc_mac_descriptor_t* mac_table;
+ wlfc_mac_descriptor_t* interfaces;
+ char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+ if (!dhdp || !strbuf) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ }
+
+ mac_table = wlfc->destination_entries.nodes;
+ interfaces = wlfc->destination_entries.interfaces;
+ bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ } else {
+ bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+ "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+ h->pushed,
+ h->popped,
+ h->failed_to_push,
+ h->failed_to_pop,
+ h->failed_slotfind,
+ (h->pushed - h->popped));
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+ "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
+ wlfc->stats.tlv_parse_failed,
+ wlfc->stats.credit_request_failed,
+ wlfc->stats.mac_update_failed,
+ wlfc->stats.psmode_update_failed,
+ wlfc->stats.delayq_full_error,
+ wlfc->stats.rollback_failed);
+
+ bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) "
+ "(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d],"
+ "AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n",
+ wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
+ wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0],
+ wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
+ wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1],
+ wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
+ wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2],
+ wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
+ wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3],
+ wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4],
+ wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]);
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (interfaces[i].occupied) {
+ char* iftype_desc;
+
+ if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+ iftype_desc = "<Unknown";
+ else
+ iftype_desc = iftypes[interfaces[i].iftype];
+
+ ea = interfaces[i].ea;
+ bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+ "["MACDBG"], if:%d, type: %s "
+ "netif_flow_control:%s\n", i,
+ MAC2STRDBG(ea), interfaces[i].interface_id,
+ iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+ ? " OFF":" ON"));
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),"
+ "(trans,supp_trans,onbus)"
+ "= (%d,%s,%d),(%d,%d,%d)\n",
+ i,
+ interfaces[i].psq.n_pkts_tot,
+ ((interfaces[i].state ==
+ WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
+ interfaces[i].requested_credit,
+ interfaces[i].transit_count,
+ interfaces[i].suppr_transit_count,
+ interfaces[i].onbus_pkts_count);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
+ "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+ "(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
+ "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+ i,
+ interfaces[i].psq.q[0].n_pkts,
+ interfaces[i].psq.q[1].n_pkts,
+ interfaces[i].afq.q[0].n_pkts,
+ interfaces[i].psq.q[2].n_pkts,
+ interfaces[i].psq.q[3].n_pkts,
+ interfaces[i].afq.q[1].n_pkts,
+ interfaces[i].psq.q[4].n_pkts,
+ interfaces[i].psq.q[5].n_pkts,
+ interfaces[i].afq.q[2].n_pkts,
+ interfaces[i].psq.q[6].n_pkts,
+ interfaces[i].psq.q[7].n_pkts,
+ interfaces[i].afq.q[3].n_pkts,
+ interfaces[i].psq.q[8].n_pkts,
+ interfaces[i].psq.q[9].n_pkts,
+ interfaces[i].afq.q[4].n_pkts);
+ }
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (mac_table[i].occupied) {
+ ea = mac_table[i].ea;
+ bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+ "["MACDBG"], if:%d \n", i,
+ MAC2STRDBG(ea), mac_table[i].interface_id);
+
+ bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),"
+ "(trans,supp_trans,onbus)"
+ "= (%d,%s,%d),(%d,%d,%d)\n",
+ i,
+ mac_table[i].psq.n_pkts_tot,
+ ((mac_table[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ mac_table[i].requested_credit,
+ mac_table[i].transit_count,
+ mac_table[i].suppr_transit_count,
+ mac_table[i].onbus_pkts_count);
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+ i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+ bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
+ "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+ "(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
+ "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+ i,
+ mac_table[i].psq.q[0].n_pkts,
+ mac_table[i].psq.q[1].n_pkts,
+ mac_table[i].afq.q[0].n_pkts,
+ mac_table[i].psq.q[2].n_pkts,
+ mac_table[i].psq.q[3].n_pkts,
+ mac_table[i].afq.q[1].n_pkts,
+ mac_table[i].psq.q[4].n_pkts,
+ mac_table[i].psq.q[5].n_pkts,
+ mac_table[i].afq.q[2].n_pkts,
+ mac_table[i].psq.q[6].n_pkts,
+ mac_table[i].psq.q[7].n_pkts,
+ mac_table[i].afq.q[3].n_pkts,
+ mac_table[i].psq.q[8].n_pkts,
+ mac_table[i].psq.q[9].n_pkts,
+ mac_table[i].afq.q[4].n_pkts);
+
+ }
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int avg;
+ int moving_avg = 0;
+ int moving_samples;
+
+ if (wlfc->stats.latency_sample_count) {
+ moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+ for (i = 0; i < moving_samples; i++)
+ moving_avg += wlfc->stats.deltas[i];
+ moving_avg /= moving_samples;
+
+ avg = (100 * wlfc->stats.total_status_latency) /
+ wlfc->stats.latency_sample_count;
+ bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+ "(%d.%d, %03d, %03d)\n",
+ moving_samples, avg/100, (avg - (avg/100)*100),
+ wlfc->stats.latency_most_recent,
+ moving_avg);
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+ "back = (%d,%d,%d,%d,%d,%d)\n",
+ wlfc->stats.fifo_credits_sent[0],
+ wlfc->stats.fifo_credits_sent[1],
+ wlfc->stats.fifo_credits_sent[2],
+ wlfc->stats.fifo_credits_sent[3],
+ wlfc->stats.fifo_credits_sent[4],
+ wlfc->stats.fifo_credits_sent[5],
+
+ wlfc->stats.fifo_credits_back[0],
+ wlfc->stats.fifo_credits_back[1],
+ wlfc->stats.fifo_credits_back[2],
+ wlfc->stats.fifo_credits_back[3],
+ wlfc->stats.fifo_credits_back[4],
+ wlfc->stats.fifo_credits_back[5]);
+ {
+ uint32 fifo_cr_sent = 0;
+ uint32 fifo_cr_acked = 0;
+ uint32 request_cr_sent = 0;
+ uint32 request_cr_ack = 0;
+ uint32 bc_mc_cr_ack = 0;
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+ fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+ }
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+ fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.nodes[i].dstncredit_acks;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.interfaces[i].dstncredit_acks;
+ }
+ }
+ bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+ "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+ fifo_cr_sent, fifo_cr_acked,
+ request_cr_sent, request_cr_ack,
+ wlfc->destination_entries.other.dstncredit_acks,
+ bc_mc_cr_ack,
+ wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),"
+ "(dropped,hdr_only,wlc_tossed,wlc_dropped,wlc_exptime)"
+ "(freed,free_err,rollback)) = "
+ "((%d,%d,%d,%d,%d),(%d,%d,%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.pktin,
+ wlfc->stats.pkt2bus,
+ wlfc->stats.txstatus_in,
+ wlfc->stats.dhd_hdrpulls,
+ wlfc->stats.pktout,
+
+ wlfc->stats.pktdropped,
+ wlfc->stats.wlfc_header_only_pkt,
+ wlfc->stats.wlc_tossed_pkts,
+ wlfc->stats.pkt_dropped,
+ wlfc->stats.pkt_exptime,
+
+ wlfc->stats.pkt_freed,
+ wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+ bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+ "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.d11_suppress,
+ wlfc->stats.wl_suppress,
+ wlfc->stats.bad_suppress,
+
+ wlfc->stats.psq_d11sup_enq,
+ wlfc->stats.psq_wlsup_enq,
+ wlfc->stats.psq_hostq_enq,
+ wlfc->stats.mac_handle_notfound,
+
+ wlfc->stats.psq_d11sup_retx,
+ wlfc->stats.psq_wlsup_retx,
+ wlfc->stats.psq_hostq_retx);
+
+ bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n",
+ wlfc->stats.cleanup_txq_cnt,
+ wlfc->stats.cleanup_psq_cnt,
+ wlfc->stats.cleanup_fw_cnt);
+
+ bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error);
+
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i,
+ wlfc->pkt_cnt_in_q[i][0],
+ wlfc->pkt_cnt_in_q[i][1],
+ wlfc->pkt_cnt_in_q[i][2],
+ wlfc->pkt_cnt_in_q[i][3],
+ wlfc->pkt_cnt_in_q[i][4]);
+ }
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_os_wlfc_unblock(dhdp);
+ return BCME_OK;
+} /* dhd_wlfc_dump */
+
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
+{
+ athost_wl_status_info_t* wlfc;
+ wlfc_hanger_t* hanger;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+ memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+
+ hanger->pushed = 0;
+ hanger->popped = 0;
+ hanger->failed_slotfind = 0;
+ hanger->failed_to_pop = 0;
+ hanger->failed_to_push = 0;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** returns TRUE if flow control is enabled */
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->wlfc_enabled;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** Called via an IOVAR */
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** Called via an IOVAR */
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (dhd->wlfc_state) {
+ dhd->proptxstatus_mode = val & 0xff;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** Called when rx frame is received from the dongle */
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
+{
+ athost_wl_status_info_t* wlfc;
+ bool rc = FALSE;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return FALSE;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+ if (PKTLEN(wlfc->osh, pktbuf) == 0) {
+ wlfc->stats.wlfc_header_only_pkt++;
+ rc = TRUE;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return rc;
+}
+
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock)
+{
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ if (bAcquireLock) {
+ dhd_os_wlfc_block(dhdp);
+ }
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) ||
+ dhdp->proptxstatus_module_ignore) {
+ if (bAcquireLock) {
+ dhd_os_wlfc_unblock(dhdp);
+ }
+ return WLFC_UNSUPPORTED;
+ }
+
+ if (state != dhdp->proptxstatus_txoff) {
+ dhdp->proptxstatus_txoff = state;
+ }
+
+ if (bAcquireLock) {
+ dhd_os_wlfc_unblock(dhdp);
+ }
+
+ return BCME_OK;
+}
+
+/** Called when eg an rx frame is received from the dongle */
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio)
+{
+ athost_wl_status_info_t* wlfc;
+ int rx_path_ac = -1;
+
+ if ((dhd == NULL) || (prio >= NUMPRIO)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_rxpkt_chk) {
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+ }
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+ rx_path_ac = prio2fifo[prio];
+ wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME();
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->proptxstatus_module_ignore;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
+{
+ uint32 tlv = 0;
+ bool bChanged = FALSE;
+
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if ((bool)val != dhd->proptxstatus_module_ignore) {
+ dhd->proptxstatus_module_ignore = (val != 0);
+ /* force txstatus_ignore sync with proptxstatus_module_ignore */
+ dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore;
+ if (FALSE == dhd->proptxstatus_module_ignore) {
+ tlv = WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+ }
+ /* always enable host reorder */
+ tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+ bChanged = TRUE;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ if (bChanged) {
+ /* select enable proptxtstatus signaling */
+ if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
+ DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+ __FUNCTION__, tlv));
+ } else {
+ DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
+ __FUNCTION__, tlv));
+ }
+ }
+
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhd);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->proptxstatus_credit_ignore;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->proptxstatus_credit_ignore = (val != 0);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->proptxstatus_txstatus_ignore;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->proptxstatus_txstatus_ignore = (val != 0);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->wlfc_rxpkt_chk;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->wlfc_rxpkt_chk = (val != 0);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_txpkt_log_and_dump(dhd_pub_t *dhdp, void* pkt, uint16 *pktfate_status)
+{
+ uint32 pktid;
+ uint32 pktlen = PKTLEN(dhdp->osh, pkt);
+ uint8 *pktdata = PKTDATA(dhdp->osh, pkt);
+#ifdef BDC
+ struct bdc_header *bdch;
+ uint32 bdc_len;
+#endif /* BDC */
+ uint8 ifidx = DHD_PKTTAG_IF(PKTTAG(pkt));
+ uint8 hcnt = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(pkt)));
+ uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+ if (!pkt) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+ pktid = (ifidx << DHD_PKTID_IF_SHIFT) | (fifo_id << DHD_PKTID_FIFO_SHIFT) | hcnt;
+#ifdef BDC
+ bdch = (struct bdc_header *)pktdata;
+ bdc_len = BDC_HEADER_LEN + (bdch->dataOffset << DHD_WORD_TO_LEN_SHIFT);
+ pktlen -= bdc_len;
+ pktdata = pktdata + bdc_len;
+#endif /* BDC */
+ dhd_handle_pktdata(dhdp, ifidx, pkt, pktdata, pktid, pktlen,
+ pktfate_status, NULL, TRUE, FALSE, TRUE);
+ return BCME_OK;
+}
+
+#ifdef PROPTX_MAXCOUNT
+int dhd_wlfc_update_maxcount(dhd_pub_t *dhdp, uint8 ifid, int maxcount)
+{
+ athost_wl_status_info_t* ctx;
+ int rc = 0;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ rc = WLFC_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (ifid >= WLFC_MAX_IFNUM) {
+ DHD_ERROR(("%s: bad ifid\n", __FUNCTION__));
+ rc = BCME_BADARG;
+ goto exit;
+ }
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+ ctx->destination_entries.interfaces[ifid].transit_maxcount = maxcount;
+exit:
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+}
+#endif /* PROPTX_MAXCOUNT */
+#endif /* PROP_TXSTATUS */
diff --git a/bcmdhd.101.10.361.x/dhd_wlfc.h b/bcmdhd.101.10.361.x/dhd_wlfc.h
new file mode 100755
index 0000000..1089a2f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/dhd_wlfc.h
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ *
+ */
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+#ifdef QMONITOR
+#include <dhd_qmon.h>
+#endif
+
+/* #define OOO_DEBUG */
+
+#define KERNEL_THREAD_RETURN_TYPE int
+
+typedef int (*f_commitpkt_t)(struct dhd_bus *ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
+#define WLFC_UNSUPPORTED -9999
+
+#define WLFC_NO_TRAFFIC -1
+#define WLFC_MULTI_TRAFFIC 0
+
+#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
+
+/** 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 3072
+
+#define WLFC_HANGER_ITEM_STATE_FREE 1
+#define WLFC_HANGER_ITEM_STATE_INUSE 2
+#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3
+#define WLFC_HANGER_ITEM_STATE_FLUSHED 4
+
+#define WLFC_HANGER_PKT_STATE_TXSTATUS 1
+#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2
+#define WLFC_HANGER_PKT_STATE_COMPLETE \
+ (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED)
+
+typedef enum {
+ Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */
+ Q_TYPE_AFQ /**< At Firmware Queue */
+} q_type_t;
+
+typedef enum ewlfc_packet_state {
+ eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */
+ eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */
+ eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */
+ eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+ eWLFC_MAC_ENTRY_ACTION_ADD,
+ eWLFC_MAC_ENTRY_ACTION_DEL,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+ uint8 state;
+ uint8 gen;
+ uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */
+ uint8 pkt_txstatus;
+ uint32 identifier;
+ void* pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 push_time;
+#endif
+ struct wlfc_hanger_item *next;
+} wlfc_hanger_item_t;
+
+/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */
+typedef struct wlfc_hanger {
+ int max_items;
+ uint32 pushed;
+ uint32 popped;
+ uint32 failed_to_push;
+ uint32 failed_to_pop;
+ uint32 failed_slotfind;
+ uint32 slot_pos;
+ /** XXX: items[1] should be the last element here. Do not add new elements below it. */
+ wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \
+ sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */
+#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */
+
+#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */
+#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1)
+
+#define WLFC_PSQ_LEN (4096 * 8)
+
+#ifdef BCMDBUS
+#define WLFC_FLOWCONTROL_HIWATER 512
+#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4)
+#else
+#define WLFC_FLOWCONTROL_HIWATER ((4096 * 8) - 256)
+#define WLFC_FLOWCONTROL_LOWATER 256
+#endif
+
+#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256))
+#undef WLFC_FLOWCONTROL_HIWATER
+#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256)
+#undef WLFC_FLOWCONTROL_LOWATER
+#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4)
+#endif
+
+#define WLFC_LOG_BUF_SIZE (1024*1024)
+
+/** Properties related to a remote MAC entity */
+typedef struct wlfc_mac_descriptor {
+ uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */
+ uint8 interface_id;
+ uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */
+ uint8 state; /**< eg WLFC_STATE_OPEN */
+ uint8 ac_bitmap; /**< automatic power save delivery (APSD) */
+ uint8 requested_credit;
+ uint8 requested_packet; /**< unit: [number of packets] */
+ uint8 ea[ETHER_ADDR_LEN];
+
+ /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */
+ uint8 seq[AC_COUNT + 1];
+ uint8 generation; /**< toggles between 0 and 1 */
+ struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */
+ /** packets at firmware queue */
+ struct pktq afq;
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ uint8 last_send_gen[AC_COUNT+1];
+ uint8 last_send_seq[AC_COUNT+1];
+ uint8 last_complete_seq[AC_COUNT+1];
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+ /** The AC pending bitmap that was reported to the fw at last change */
+ uint8 traffic_lastreported_bmp;
+ /** The new AC pending bitmap */
+ uint8 traffic_pending_bmp;
+ /** 1= send on next opportunity */
+ uint8 send_tim_signal;
+ uint8 mac_handle; /**< mac handles are assigned by the dongle */
+ /** Number of packets at dongle for this entry. */
+ int transit_count;
+ /** Number of suppression to wait before evict from delayQ */
+ int suppr_transit_count;
+ /** pkt sent to bus but no bus TX complete yet */
+ int onbus_pkts_count;
+ /** flag. TRUE when remote MAC is in suppressed state */
+ uint8 suppressed;
+
+#ifdef QMONITOR
+ dhd_qmon_t qmon;
+#endif /* QMONITOR */
+
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 dstncredit_sent_packets;
+ uint32 dstncredit_acks;
+ uint32 opened_ct;
+ uint32 closed_ct;
+#endif
+#ifdef PROPTX_MAXCOUNT
+ /** Max Number of packets at dongle for this entry. */
+ int transit_maxcount;
+#endif /* PROPTX_MAXCOUNT */
+ struct wlfc_mac_descriptor* prev;
+ struct wlfc_mac_descriptor* next;
+#ifdef BULK_DEQUEUE
+ uint16 release_count[AC_COUNT + 1];
+#endif
+} wlfc_mac_descriptor_t;
+
+/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */
+typedef struct dhd_wlfc_commit_info {
+ uint8 needs_hdr;
+ uint8 ac_fifo_credit_spent;
+ ewlfc_packet_state_t pkt_type;
+ wlfc_mac_descriptor_t* mac_entry;
+ void* p;
+} dhd_wlfc_commit_info_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+ entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+ uint32 pktin;
+ uint32 pktout;
+ uint32 pkt2bus;
+ uint32 pktdropped;
+ uint32 tlv_parse_failed;
+ uint32 rollback;
+ uint32 rollback_failed;
+ uint32 delayq_full_error;
+ uint32 credit_request_failed;
+ uint32 packet_request_failed;
+ uint32 mac_update_failed;
+ uint32 psmode_update_failed;
+ uint32 interface_update_failed;
+ uint32 wlfc_header_only_pkt;
+ uint32 txstatus_in;
+ uint32 d11_suppress;
+ uint32 wl_suppress;
+ uint32 bad_suppress;
+ uint32 pkt_dropped;
+ uint32 pkt_exptime;
+ uint32 pkt_freed;
+ uint32 pkt_free_err;
+ uint32 psq_wlsup_retx;
+ uint32 psq_wlsup_enq;
+ uint32 psq_d11sup_retx;
+ uint32 psq_d11sup_enq;
+ uint32 psq_hostq_retx;
+ uint32 psq_hostq_enq;
+ uint32 mac_handle_notfound;
+ uint32 wlc_tossed_pkts;
+ uint32 dhd_hdrpulls;
+ uint32 generic_error;
+ /* an extra one for bc/mc traffic */
+ uint32 send_pkts[AC_COUNT + 1];
+ uint32 drop_pkts[WLFC_PSQ_PREC_COUNT];
+ uint32 ooo_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+ /** all pkt2bus -> txstatus latency accumulated */
+ uint32 latency_sample_count;
+ uint32 total_status_latency;
+ uint32 latency_most_recent;
+ int idx_delta;
+ uint32 deltas[10];
+ uint32 fifo_credits_sent[6];
+ uint32 fifo_credits_back[6];
+ uint32 dropped_qfull[6];
+ uint32 signal_only_pkts_sent;
+ uint32 signal_only_pkts_freed;
+#endif
+ uint32 cleanup_txq_cnt;
+ uint32 cleanup_psq_cnt;
+ uint32 cleanup_fw_cnt;
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+ (ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+#define WLFC_PACKET_BOUND 10
+#define WLFC_FCMODE_NONE 0
+#define WLFC_FCMODE_IMPLIED_CREDIT 1
+#define WLFC_FCMODE_EXPLICIT_CREDIT 2
+#define WLFC_ONLY_AMPDU_HOSTREORDER 3
+
+/** Reserved credits ratio when borrowed by hihger priority */
+#define WLFC_BORROW_LIMIT_RATIO 4
+
+/** How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/** How long to defer flow control in milliseconds */
+#define WLFC_FC_DEFER_PERIOD_MS 200
+
+/** How long to detect occurance per AC in miliseconds */
+#define WLFC_RX_DETECTION_THRESHOLD_MS 100
+
+/** Mask to represent available ACs (note: BC/MC is ignored) */
+#define WLFC_AC_MASK 0xF
+
+/** flow control specific information, only 1 instance during driver lifetime */
+typedef struct athost_wl_status_info {
+ uint8 last_seqid_to_wlc;
+
+ /** OSL handle */
+ osl_t *osh;
+ /** dhd public struct pointer */
+ void *dhdp;
+
+ f_commitpkt_t fcommit;
+ void* commit_ctx;
+
+ /** statistics */
+ athost_wl_stat_counters_t stats;
+
+ /** incremented on eg receiving a credit map event from the dongle */
+ int Init_FIFO_credit[AC_COUNT + 2];
+ /** the additional ones are for bc/mc and ATIM FIFO */
+ int FIFO_credit[AC_COUNT + 2];
+ /** Credit borrow counts for each FIFO from each of the other FIFOs */
+ int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+ /** packet hanger and MAC->handle lookup table */
+ void *hanger;
+
+ struct {
+ /** table for individual nodes */
+ wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE];
+ /** table for interfaces */
+ wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM];
+ /* OS may send packets to unknown (unassociated) destinations */
+ /** A place holder for bc/mc and packets to unknown destinations */
+ wlfc_mac_descriptor_t other;
+ } destination_entries;
+
+ wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */
+ int active_entry_count;
+
+ wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+ int requested_entry_count;
+
+ /* pkt counts for each interface and ac */
+ int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
+ int pkt_cnt_per_ac[AC_COUNT+1];
+ int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1];
+ int pkt_cnt_in_psq;
+ uint8 allow_fc; /**< Boolean */
+ uint32 fc_defer_timestamp;
+ uint32 rx_timestamp[AC_COUNT+1];
+
+ /** ON/OFF state for flow control to the host network interface */
+ uint8 hostif_flow_state[WLFC_MAX_IFNUM];
+ uint8 host_ifidx;
+
+ /** to flow control an OS interface */
+ uint8 toggle_host_if;
+
+ /** To borrow credits */
+ uint8 allow_credit_borrow;
+
+ /** ac number for the first single ac traffic */
+ uint8 single_ac;
+
+ /** Timestamp for the first single ac traffic */
+ uint32 single_ac_timestamp;
+
+ bool bcmc_credit_supported;
+
+#if defined(BCMINTERNAL) && defined(OOO_DEBUG)
+ uint8* log_buf;
+ uint32 log_buf_offset;
+ bool log_buf_full;
+#endif /* defined(BCMINTERNAL) && defined(OOO_DEBUG) */
+
+#ifdef BULK_DEQUEUE
+ uint8 max_release_count;
+#endif /* total_credit */
+} athost_wl_status_info_t;
+
+/** Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+
+#ifdef BCM_OBJECT_TRACE
+ /* if use this field, keep it at the first 4 bytes */
+ uint32 sn;
+#endif /* BCM_OBJECT_TRACE */
+
+ /**
+ b[15] - 1 = wlfc packet
+ b[14:13] - encryption exemption
+ b[12 ] - 1 = event channel
+ b[11 ] - 1 = this packet was sent in response to one time packet request,
+ do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+ b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+ b[9 ] - 1 = packet is host->firmware (transmit direction)
+ - 0 = packet received from firmware (firmware->host)
+ b[8 ] - 1 = packet was sent due to credit_request (pspoll),
+ packet does not count against FIFO credit.
+ - 0 = normal transaction, packet counts against FIFO credit
+ b[7 ] - 1 = AP, 0 = STA
+ b[6:4] - AC FIFO number
+ b[3:0] - interface index
+ */
+ uint16 if_flags;
+
+ /**
+ * destination MAC address for this packet so that not every module needs to open the packet
+ * to find this
+ */
+ uint8 dstn_ether[ETHER_ADDR_LEN];
+
+ /** This 32-bit goes from host to device for every packet. */
+ uint32 htod_tag;
+
+ /** This 16-bit is original d11seq number for every suppressed packet. */
+ uint16 htod_seq;
+
+ /** This address is mac entry for every packet. */
+ void *entry;
+
+ /** bus specific stuff */
+ union {
+ struct {
+ void *stuff;
+ uint32 thing1;
+ uint32 thing2;
+ } sd;
+
+ /* XXX: using the USB typedef here will complicate life for anybody using dhd.h */
+ struct {
+ void *bus;
+ void *urb;
+ } usb;
+ } bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_WLFCPKT_MASK 0x1
+#define DHD_PKTTAG_WLFCPKT_SHIFT 15
+#define DHD_PKTTAG_WLFCPKT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \
+ (((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT)
+#define DHD_PKTTAG_WLFCPKT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK)
+
+#define DHD_PKTTAG_EXEMPT_MASK 0x3
+#define DHD_PKTTAG_EXEMPT_SHIFT 13
+#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
+ (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
+#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
+
+#define DHD_PKTTAG_EVENT_MASK 0x1
+#define DHD_PKTTAG_EVENT_SHIFT 12
+#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
+ (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
+#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+ (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SIGNALONLY_MASK 0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT 10
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+ (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_PKTDIR_MASK 0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT 9
+#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+ (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_CREDITCHECK_MASK 0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT 8
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+ (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_IFTYPE_MASK 0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT 7
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+ (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_FIFO_MASK 0x7
+#define DHD_PKTTAG_FIFO_SHIFT 4
+#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+ (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_IF_MASK 0xf
+#define DHD_PKTTAG_IF_SHIFT 0
+#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \
+ (((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT)
+#define DHD_PKTTAG_IF(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+ (dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_SET_H2DSEQ(tag, seq) ((dhd_pkttag_t*)(tag))->htod_seq = (seq)
+#define DHD_PKTTAG_H2DSEQ(tag) (((dhd_pkttag_t*)(tag))->htod_seq)
+
+#define DHD_PKTTAG_SET_ENTRY(tag, entry) ((dhd_pkttag_t*)(tag))->entry = (entry)
+#define DHD_PKTTAG_ENTRY(tag) (((dhd_pkttag_t*)(tag))->entry)
+
+#define PSQ_SUP_IDX(x) (x * 2 + 1)
+#define PSQ_DLY_IDX(x) (x * 2)
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
+#endif
+
+#ifdef BCM_OBJECT_TRACE
+#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val)
+#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn)
+#endif /* BCM_OBJECT_TRACE */
+
+#define DHD_PKTID_IF_SHIFT (16u)
+#define DHD_PKTID_FIFO_SHIFT (8u)
+
+/* public functions */
+int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
+ uchar *reorder_info_buf, uint *reorder_info_len);
+KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data);
+int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+ struct dhd_bus *commit_ctx, void *pktbuf, bool need_toggle_host_if);
+int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+int dhd_wlfc_init(dhd_pub_t *dhd);
+#ifdef SUPPORT_P2P_GO_PS
+int dhd_wlfc_suspend(dhd_pub_t *dhd);
+int dhd_wlfc_resume(dhd_pub_t *dhd);
+#endif /* SUPPORT_P2P_GO_PS */
+int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd);
+int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg);
+int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg);
+int dhd_wlfc_deinit(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data);
+#ifdef LIMIT_BORROW
+int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data);
+#endif /* LIMIT_BORROW */
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp);
+int dhd_wlfc_enable(dhd_pub_t *dhdp);
+int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd);
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val);
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val);
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd);
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf);
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock);
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio);
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val);
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
+int dhd_txpkt_log_and_dump(dhd_pub_t *dhdp, void* pkt, uint16 *pktfate_status);
+#ifdef PROPTX_MAXCOUNT
+int dhd_wlfc_update_maxcount(dhd_pub_t *dhdp, uint8 ifid, int maxcount);
+#endif /* PROPTX_MAXCOUNT */
+
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/bcmdhd.101.10.361.x/frag.c b/bcmdhd.101.10.361.x/frag.c
new file mode 100755
index 0000000..e49c335
--- /dev/null
+++ b/bcmdhd.101.10.361.x/frag.c
@@ -0,0 +1,108 @@
+/*
+ * IE/TLV fragmentation/defragmentation support for
+ * Broadcom 802.11bang Networking Device Driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <bcmutils.h>
+#include <frag.h>
+#include <802.11.h>
+
+/* defrag a fragmented dot11 ie/tlv. if space does not permit, return the needed
+ * ie length to contain all the fragments with status BCME_BUFTOOSHORT.
+ * out_len is in/out parameter, max length on input, used/required length on output
+ */
+int
+bcm_tlv_dot11_defrag(const void *buf, uint buf_len, uint8 id, bool id_ext,
+ uint8 *out, uint *out_len)
+{
+ int err = BCME_OK;
+ const bcm_tlv_t *ie;
+ uint tot_len = 0;
+ uint out_left;
+
+ /* find the ie; includes validation */
+ ie = bcm_parse_tlvs_dot11(buf, buf_len, id, id_ext);
+ if (!ie) {
+ err = BCME_IE_NOTFOUND;
+ goto done;
+ }
+
+ out_left = (out && out_len) ? *out_len : 0;
+
+ /* first fragment */
+ tot_len = id_ext ? ie->len - 1 : ie->len;
+
+ /* copy out if output space permits */
+ if (out_left < tot_len) {
+ err = BCME_BUFTOOSHORT;
+ out_left = 0; /* prevent further copy */
+ } else {
+ memcpy(out, &ie->data[id_ext ? 1 : 0], tot_len);
+ out += tot_len;
+ out_left -= tot_len;
+ }
+
+ /* if not fragmened or not fragmentable per 802.11 table 9-77 11md0.1 bail
+ * we can introduce the latter check later
+ */
+ if (ie->len != BCM_TLV_MAX_DATA_SIZE) {
+ goto done;
+ }
+
+ /* adjust buf_len to length after ie including it */
+ buf_len -= (uint)(((const uint8 *)ie - (const uint8 *)buf));
+
+ /* update length from fragments, okay if no next ie */
+ while ((ie = bcm_next_tlv(ie, &buf_len)) &&
+ (ie->id == DOT11_MNG_FRAGMENT_ID)) {
+ /* note: buf_len starts at next ie and last frag may be partial */
+ if (out_left < ie->len) {
+ err = BCME_BUFTOOSHORT;
+ out_left = 0;
+ } else {
+ memcpy(out, &ie->data[0], ie->len);
+ out += ie->len;
+ out_left -= ie->len;
+ }
+
+ tot_len += ie->len + BCM_TLV_HDR_SIZE;
+
+ /* all but last should be of max size */
+ if (ie->len < BCM_TLV_MAX_DATA_SIZE) {
+ break;
+ }
+ }
+
+done:
+ if (out_len) {
+ *out_len = tot_len;
+ }
+
+ return err;
+}
+
+int
+bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len,
+ uint8 id, bool id_ext, uint *ie_len)
+{
+ return bcm_tlv_dot11_defrag(buf, buf_len, id, id_ext, NULL, ie_len);
+}
diff --git a/bcmdhd.101.10.361.x/frag.h b/bcmdhd.101.10.361.x/frag.h
new file mode 100755
index 0000000..e14edd9
--- /dev/null
+++ b/bcmdhd.101.10.361.x/frag.h
@@ -0,0 +1,32 @@
+/*
+ * IE/TLV (de)fragmentation declarations/definitions for
+ * Broadcom 802.11abgn Networking Device Driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+
+#ifndef __FRAG_H__
+#define __FRAG_H__
+
+int bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len,
+ uint8 id, bool id_ext, uint *ie_len);
+
+#endif /* __FRAG_H__ */
diff --git a/bcmdhd.101.10.361.x/ftdi_sio_external.h b/bcmdhd.101.10.361.x/ftdi_sio_external.h
new file mode 100755
index 0000000..8c021a1
--- /dev/null
+++ b/bcmdhd.101.10.361.x/ftdi_sio_external.h
@@ -0,0 +1,39 @@
+/*
+ * External driver API to ftdi_sio_brcm driver.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: $
+ */
+
+typedef struct usb_serial_port * gpio_handle;
+
+#define BITMODE_RESET 0x00
+#define BITMODE_BITBANG 0x01
+
+int ftdi_usb_reset(int handle);
+int ftdi_set_bitmode(int handle, unsigned char bitmask, unsigned char mode);
+int gpio_write_port(int handle, unsigned char pins);
+int gpio_write_port_non_block(int handle, unsigned char pins);
+int gpio_read_port(int handle, unsigned char *pins);
+int handle_add(gpio_handle pointer);
+int handle_remove(gpio_handle pointer);
+int get_handle(const char *dev_filename);
+gpio_handle get_pointer_by_handle(int handle);
diff --git a/bcmdhd.101.10.361.x/hnd_pktpool.c b/bcmdhd.101.10.361.x/hnd_pktpool.c
new file mode 100755
index 0000000..eee518a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/hnd_pktpool.c
@@ -0,0 +1,2130 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <osl_ext.h>
+#include <bcmutils.h>
+#include <wlioctl.h>
+#include <hnd_pktpool.h>
+#ifdef BCMRESVFRAGPOOL
+#include <hnd_resvpool.h>
+#endif /* BCMRESVFRAGPOOL */
+#ifdef BCMFRWDPOOLREORG
+#include <hnd_poolreorg.h>
+#endif /* BCMFRWDPOOLREORG */
+
+#if defined(DONGLEBUILD) && defined(SRMEM)
+#include <hndsrmem.h>
+#endif /* DONGLEBUILD && SRMEM */
+#if defined(DONGLEBUILD)
+#include <d11_cfg.h>
+#endif
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTPOOL_THREAD_SAFE
+#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
+#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
+#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
+#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
+#else
+#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
+#endif
+
+/* Registry size is one larger than max pools, as slot #0 is reserved */
+#define PKTPOOLREG_RSVD_ID (0U)
+#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
+#define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
+
+#define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
+#define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
+
+/* Tag a registry entry as free for use */
+#define PKTPOOL_REGISTRY_CLR(id) \
+ PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
+#define PKTPOOL_REGISTRY_ISCLR(id) \
+ (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
+
+/* Tag registry entry 0 as reserved */
+#define PKTPOOL_REGISTRY_RSV() \
+ PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
+#define PKTPOOL_REGISTRY_ISRSVD() \
+ (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
+
+/* Walk all un-reserved entries in registry */
+#define PKTPOOL_REGISTRY_FOREACH(id) \
+ for ((id) = 1U; (id) <= pktpools_max; (id)++)
+
+enum pktpool_empty_cb_state {
+ EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
+ EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
+ EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
+};
+
+uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
+pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
+
+/* number of pktids that are reserved for pktpool usage at the moment
+ * initializing this with max pktids reserved for pktpool
+ * pktpool_init, pktpool_fill and pktpool_refill decrements this
+ * pktpool_reclaim, pktpool_empty and heap_pkt_release increments this
+ */
+#ifdef DONGLEBUILD
+uint32 total_pool_pktid_count = PKTID_POOL;
+#else
+uint32 total_pool_pktid_count = 0U;
+#endif /* DONGLEBUILD */
+
+#ifdef POOL_HEAP_RECONFIG
+typedef struct pktpool_heap_cb_reg {
+ pktpool_heap_cb_t fn;
+ void *ctxt;
+ uint32 flag;
+} pktpool_heap_cb_reg_t;
+#define PKTPOOL_MAX_HEAP_CB 2
+pktpool_heap_cb_reg_t pktpool_heap_cb_reg[PKTPOOL_MAX_HEAP_CB];
+uint32 pktpool_heap_rel_active = 0U;
+
+static void hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag);
+static void hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag);
+static int hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize);
+static void hnd_pktpool_lbuf_free_cb(uint8 poolid);
+static pktpool_heap_cb_reg_t *BCMRAMFN(hnd_pool_get_cb_registry)(void);
+#endif /* POOL_HEAP_RECONFIG */
+
+/* Register/Deregister a pktpool with registry during pktpool_init/deinit */
+static int pktpool_register(pktpool_t * poolptr);
+static int pktpool_deregister(pktpool_t * poolptr);
+
+/** add declaration */
+static void pktpool_avail_notify(pktpool_t *pktp);
+
+/** accessor functions required when ROMming this file, forced into RAM */
+
+pktpool_t *
+BCMPOSTTRAPRAMFN(get_pktpools_registry)(int id)
+{
+ return pktpools_registry[id];
+}
+
+static void
+BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
+{
+ pktpools_registry[id] = pp;
+}
+
+static bool
+BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
+{
+ return pktpools_registry[id] == pp;
+}
+
+/** Constructs a pool registry to serve a maximum of total_pools */
+int
+BCMATTACHFN(pktpool_attach)(osl_t *osh, uint32 total_pools)
+{
+ uint32 poolid;
+ BCM_REFERENCE(osh);
+
+ if (pktpools_max != 0U) {
+ return BCME_ERROR;
+ }
+
+ ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
+
+ /* Initialize registry: reserve slot#0 and tag others as free */
+ PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
+
+ PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
+ PKTPOOL_REGISTRY_CLR(poolid);
+ }
+
+ pktpools_max = total_pools;
+
+ return (int)pktpools_max;
+}
+
+/** Destructs the pool registry. Ascertain all pools were first de-inited */
+int
+BCMATTACHFN(pktpool_dettach)(osl_t *osh)
+{
+ uint32 poolid;
+ BCM_REFERENCE(osh);
+
+ if (pktpools_max == 0U) {
+ return BCME_OK;
+ }
+
+ /* Ascertain that no pools are still registered */
+ ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
+
+ PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
+ ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
+ }
+
+ pktpools_max = 0U; /* restore boot state */
+
+ return BCME_OK;
+}
+
+/** Registers a pool in a free slot; returns the registry slot index */
+static int
+BCMATTACHFN(pktpool_register)(pktpool_t * poolptr)
+{
+ uint32 poolid;
+
+ if (pktpools_max == 0U) {
+ return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
+ }
+
+ ASSERT(pktpools_max != 0U);
+
+ /* find an empty slot in pktpools_registry */
+ PKTPOOL_REGISTRY_FOREACH(poolid) {
+ if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
+ PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
+ return (int)poolid; /* return pool ID */
+ }
+ } /* FOREACH */
+
+ return PKTPOOL_INVALID_ID; /* error: registry is full */
+}
+
+/** Deregisters a pktpool, given the pool pointer; tag slot as free */
+static int
+BCMATTACHFN(pktpool_deregister)(pktpool_t * poolptr)
+{
+ uint32 poolid;
+
+ ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
+
+ poolid = POOLID(poolptr);
+ ASSERT(poolid <= pktpools_max);
+
+ /* Asertain that a previously registered poolptr is being de-registered */
+ if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
+ PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
+ } else {
+ ASSERT(0);
+ return BCME_ERROR; /* mismatch in registry */
+ }
+
+ return BCME_OK;
+}
+
+/**
+ * pktpool_init:
+ * User provides a pktpool_t structure and specifies the number of packets to
+ * be pre-filled into the pool (n_pkts).
+ * pktpool_init first attempts to register the pool and fetch a unique poolid.
+ * If registration fails, it is considered an BCME_ERR, caused by either the
+ * registry was not pre-created (pktpool_attach) or the registry is full.
+ * If registration succeeds, then the requested number of packets will be filled
+ * into the pool as part of initialization. In the event that there is no
+ * available memory to service the request, then BCME_NOMEM will be returned
+ * along with the count of how many packets were successfully allocated.
+ * In dongle builds, prior to memory reclaimation, one should limit the number
+ * of packets to be allocated during pktpool_init and fill the pool up after
+ * reclaim stage.
+ *
+ * @param n_pkts Number of packets to be pre-filled into the pool
+ * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
+ * @param type e.g. 'lbuf_frag'
+ */
+int
+BCMATTACHFN(pktpool_init)(osl_t *osh,
+ pktpool_t *pktp,
+ int *n_pkts,
+ int max_pkt_bytes,
+ bool istx,
+ uint8 type,
+ bool is_heap_pool,
+ uint32 heap_pool_flag,
+ uint16 min_backup_buf)
+{
+ int i, err = BCME_OK;
+ int pktplen;
+ uint8 pktp_id;
+
+ ASSERT(pktp != NULL);
+ ASSERT(osh != NULL);
+ ASSERT(n_pkts != NULL);
+
+ pktplen = *n_pkts;
+
+ bzero(pktp, sizeof(pktpool_t));
+
+ /* assign a unique pktpool id */
+ if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
+ return BCME_ERROR;
+ }
+ POOLSETID(pktp, pktp_id);
+
+ pktp->inited = TRUE;
+ pktp->istx = istx ? TRUE : FALSE;
+ pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
+ pktp->type = type;
+
+#ifdef POOL_HEAP_RECONFIG
+ pktp->poolheap_flag = heap_pool_flag;
+ pktp->poolheap_count = 0;
+ pktp->min_backup_buf = min_backup_buf;
+ if (is_heap_pool) {
+ if (rte_freelist_mgr_register(&pktp->mem_handle,
+ hnd_pktpool_heap_get_cb,
+ lb_get_pktalloclen(type, max_pkt_bytes),
+ pktp) != BCME_OK) {
+ return BCME_ERROR;
+ }
+ }
+ pktp->is_heap_pool = is_heap_pool;
+#endif
+ if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
+ return BCME_ERROR;
+ }
+
+ pktp->maxlen = PKTPOOL_LEN_MAX;
+ pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
+
+ for (i = 0; i < pktplen; i++) {
+ void *p;
+#ifdef _RTE_
+ /* For rte builds, use PKTALLOC rather than PKTGET
+ * Avoid same pkts being dequed and enqued to pool
+ * when allocation fails.
+ */
+ p = PKTALLOC(osh, max_pkt_bytes, type);
+#else
+ p = PKTGET(osh, max_pkt_bytes, TRUE);
+#endif
+
+ if (p == NULL) {
+ /* Not able to allocate all requested pkts
+ * so just return what was actually allocated
+ * We can add to the pool later
+ */
+ if (pktp->freelist == NULL) /* pktpool free list is empty */
+ err = BCME_NOMEM;
+
+ goto exit;
+ }
+
+ PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
+
+ PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
+ pktp->freelist = p;
+
+ pktp->avail++;
+
+ ASSERT(total_pool_pktid_count > 0);
+ total_pool_pktid_count--;
+
+#ifdef BCMDBG_POOL
+ pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+ }
+
+exit:
+ pktp->n_pkts = pktp->avail;
+
+ *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
+ return err;
+} /* pktpool_init */
+
+/**
+ * pktpool_deinit:
+ * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
+ * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
+ * An assert is in place to ensure that there are no packets still lingering
+ * around. Packets freed to a pool after the deinit will cause a memory
+ * corruption as the pktpool_t structure no longer exists.
+ */
+int
+BCMATTACHFN(pktpool_deinit)(osl_t *osh, pktpool_t *pktp)
+{
+ uint16 freed = 0;
+
+ ASSERT(osh != NULL);
+ ASSERT(pktp != NULL);
+
+#ifdef BCMDBG_POOL
+ {
+ int i;
+ for (i = 0; i <= pktp->n_pkts; i++) {
+ pktp->dbg_q[i].p = NULL;
+ }
+ }
+#endif
+
+ while (pktp->freelist != NULL) {
+ void * p = pktp->freelist;
+
+ pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+ PKTSETFREELIST(p, NULL);
+
+ PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+ total_pool_pktid_count++;
+ PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+ freed++;
+ ASSERT(freed <= pktp->n_pkts);
+ }
+
+ pktp->avail -= freed;
+ ASSERT(pktp->avail == 0);
+
+ pktp->n_pkts -= freed;
+
+ pktpool_deregister(pktp); /* release previously acquired unique pool id */
+ POOLSETID(pktp, PKTPOOL_INVALID_ID);
+
+ if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ pktp->inited = FALSE;
+
+ /* Are there still pending pkts? */
+ ASSERT(pktp->n_pkts == 0);
+
+ return 0;
+}
+
+int
+pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
+{
+ void *p;
+ int err = 0;
+ int n_pkts, psize, maxlen;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+#ifdef BCMRXDATAPOOL
+ ASSERT((pktp->max_pkt_bytes != 0) || (pktp->type == lbuf_rxfrag));
+#else
+ ASSERT(pktp->max_pkt_bytes != 0);
+#endif /* BCMRXDATAPOOL */
+
+ maxlen = pktp->maxlen;
+ psize = minimal ? (maxlen >> 2) : maxlen;
+ n_pkts = (int)pktp->n_pkts;
+#ifdef POOL_HEAP_RECONFIG
+ /*
+ * Consider the packets released to freelist mgr also
+ * as part of pool size
+ */
+ n_pkts += pktp->is_heap_pool ?
+ pktp->poolheap_count : 0;
+#endif
+ for (; n_pkts < psize; n_pkts++) {
+
+#ifdef _RTE_
+ /* For rte builds, use PKTALLOC rather than PKTGET
+ * Avoid same pkts being dequed and enqued to pool when allocation fails.
+ * All pkts in pool have same length.
+ */
+ p = PKTALLOC(osh, pktp->max_pkt_bytes, pktp->type);
+#else
+ p = PKTGET(osh, pktp->n_pkts, TRUE);
+#endif
+
+ if (p == NULL) {
+ err = BCME_NOMEM;
+ break;
+ }
+
+ if (pktpool_add(pktp, p) != BCME_OK) {
+ PKTFREE(osh, p, FALSE);
+ err = BCME_ERROR;
+ break;
+ }
+ ASSERT(total_pool_pktid_count > 0);
+ total_pool_pktid_count--;
+ }
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ if (pktp->cbcnt) {
+ if (pktp->empty == FALSE)
+ pktpool_avail_notify(pktp);
+ }
+
+ return err;
+}
+
+#ifdef BCMPOOLRECLAIM
+/* New API to decrease the pkts from pool, but not deinit
+*/
+uint16
+pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt, uint8 action)
+{
+ uint16 freed = 0;
+
+ pktpool_cb_extn_t cb = NULL;
+ void *arg = NULL;
+ void *rem_list_head = NULL;
+ void *rem_list_tail = NULL;
+ bool dont_free = FALSE;
+
+ ASSERT(osh != NULL);
+ ASSERT(pktp != NULL);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
+ return freed;
+ }
+
+ if (pktp->avail < free_cnt) {
+ free_cnt = pktp->avail;
+ }
+
+ if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
+ /* If pool is shared rx frag pool, use call back fn to reclaim host address
+ * and Rx cpl ID associated with the pkt.
+ */
+ ASSERT(pktp->cbext.cb != NULL);
+
+ cb = pktp->cbext.cb;
+ arg = pktp->cbext.arg;
+
+ } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
+ /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
+ * associated with the pkt.
+ */
+ cb = pktp->rxcplidfn.cb;
+ arg = pktp->rxcplidfn.arg;
+ }
+
+ while ((pktp->freelist != NULL) && (free_cnt)) {
+ void * p = pktp->freelist;
+
+ pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+ PKTSETFREELIST(p, NULL);
+
+ dont_free = FALSE;
+
+ if (action == FREE_ALL_FRAG_PKTS) {
+ /* Free lbufs which are marked as frag_free_mem */
+ if (!PKTISFRMFRAG(p)) {
+ dont_free = TRUE;
+ }
+ }
+
+ if (dont_free) {
+ if (rem_list_head == NULL) {
+ rem_list_head = p;
+ } else {
+ PKTSETFREELIST(rem_list_tail, p);
+ }
+ rem_list_tail = p;
+ continue;
+ }
+ if (cb != NULL) {
+ if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) {
+ PKTSETFREELIST(p, pktp->freelist);
+ pktp->freelist = p;
+ break;
+ }
+ }
+
+ PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+ pktp->avail--;
+ pktp->n_pkts--;
+
+ total_pool_pktid_count++;
+ PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+ freed++;
+ free_cnt--;
+ }
+
+ if (rem_list_head) {
+ PKTSETFREELIST(rem_list_tail, pktp->freelist);
+ pktp->freelist = rem_list_head;
+ }
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
+ return freed;
+ }
+
+ return freed;
+}
+#endif /* #ifdef BCMPOOLRECLAIM */
+
+/* New API to empty the pkts from pool, but not deinit
+* NOTE: caller is responsible to ensure,
+* all pkts are available in pool for free; else LEAK !
+*/
+int
+pktpool_empty(osl_t *osh, pktpool_t *pktp)
+{
+ uint16 freed = 0;
+
+ ASSERT(osh != NULL);
+ ASSERT(pktp != NULL);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+#ifdef BCMDBG_POOL
+ {
+ int i;
+ for (i = 0; i <= pktp->n_pkts; i++) {
+ pktp->dbg_q[i].p = NULL;
+ }
+ }
+#endif
+
+ while (pktp->freelist != NULL) {
+ void * p = pktp->freelist;
+
+ pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+ PKTSETFREELIST(p, NULL);
+
+ PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+ total_pool_pktid_count++;
+ PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+ freed++;
+ ASSERT(freed <= pktp->n_pkts);
+ }
+
+ pktp->avail -= freed;
+ ASSERT(pktp->avail == 0);
+
+ pktp->n_pkts -= freed;
+
+ ASSERT(pktp->n_pkts == 0);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+
+int
+BCMPOSTTRAPFN(pktpool_avail)(pktpool_t *pktpool)
+{
+ int avail = pktpool->avail;
+
+ if (avail == 0) {
+ pktpool_emptycb_disable(pktpool, FALSE);
+ }
+
+ return avail;
+}
+
+static void *
+BCMPOSTTRAPFASTPATH(pktpool_deq)(pktpool_t *pktp)
+{
+ void *p = NULL;
+
+ if (pktp->avail == 0)
+ return NULL;
+
+ ASSERT_FP(pktp->freelist != NULL);
+
+ p = pktp->freelist; /* dequeue packet from head of pktpool free list */
+ pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
+
+#if defined(DONGLEBUILD) && defined(SRMEM)
+ if (SRMEM_ENAB()) {
+ PKTSRMEM_INC_INUSE(p);
+ }
+#endif /* DONGLEBUILD && SRMEM */
+
+ PKTSETFREELIST(p, NULL);
+
+ pktp->avail--;
+
+ return p;
+}
+
+static void
+BCMPOSTTRAPFASTPATH(pktpool_enq)(pktpool_t *pktp, void *p)
+{
+ ASSERT_FP(p != NULL);
+
+ PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
+ pktp->freelist = p; /* free list points to newly inserted packet */
+
+#if defined(DONGLEBUILD) && defined(SRMEM)
+ if (SRMEM_ENAB()) {
+ PKTSRMEM_DEC_INUSE(p);
+ }
+#endif /* DONGLEBUILD && SRMEM */
+
+ pktp->avail++;
+ ASSERT_FP(pktp->avail <= pktp->n_pkts);
+}
+
+/** utility for registering host addr fill function called from pciedev */
+int
+BCMATTACHFN(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+ ASSERT(cb != NULL);
+
+ ASSERT(pktp->cbext.cb == NULL);
+ pktp->cbext.cb = cb;
+ pktp->cbext.arg = arg;
+ return 0;
+}
+
+int
+BCMATTACHFN(pktpool_rxcplid_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+ ASSERT(cb != NULL);
+
+ if (pktp == NULL)
+ return BCME_ERROR;
+ ASSERT(pktp->rxcplidfn.cb == NULL);
+ pktp->rxcplidfn.cb = cb;
+ pktp->rxcplidfn.arg = arg;
+ return 0;
+}
+
+/** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
+void
+pktpool_invoke_dmarxfill(pktpool_t *pktp)
+{
+ ASSERT(pktp->dmarxfill.cb);
+ ASSERT(pktp->dmarxfill.arg);
+
+ if (pktp->dmarxfill.cb)
+ pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
+}
+
+/** Registers callback functions for split rx mode */
+int
+BCMATTACHFN(pkpool_haddr_avail_register_cb)(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+
+ ASSERT(cb != NULL);
+
+ pktp->dmarxfill.cb = cb;
+ pktp->dmarxfill.arg = arg;
+
+ return 0;
+}
+
+/**
+ * Registers callback functions.
+ * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
+ */
+int
+pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int err = 0;
+ int i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ ASSERT(cb != NULL);
+
+ for (i = 0; i < pktp->cbcnt; i++) {
+ ASSERT(pktp->cbs[i].cb != NULL);
+ if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
+ pktp->cbs[i].refcnt++;
+ goto done;
+ }
+ }
+
+ i = pktp->cbcnt;
+ if (i == PKTPOOL_CB_MAX_AVL) {
+ err = BCME_ERROR;
+ goto done;
+ }
+
+ ASSERT(pktp->cbs[i].cb == NULL);
+ pktp->cbs[i].cb = cb;
+ pktp->cbs[i].arg = arg;
+ pktp->cbs[i].refcnt++;
+ pktp->cbcnt++;
+
+ /* force enable empty callback */
+ pktpool_emptycb_disable(pktp, FALSE);
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
+}
+
+/* No BCMATTACHFN as it is used in a non-attach function */
+int
+pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int err = 0;
+ int i, k;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
+ return BCME_ERROR;
+ }
+
+ ASSERT(cb != NULL);
+
+ for (i = 0; i < pktp->cbcnt; i++) {
+ ASSERT(pktp->cbs[i].cb != NULL);
+ if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
+ pktp->cbs[i].refcnt--;
+ if (pktp->cbs[i].refcnt) {
+ /* Still there are references to this callback */
+ goto done;
+ }
+ /* Moving any more callbacks to fill the hole */
+ for (k = i+1; k < pktp->cbcnt; i++, k++) {
+ pktp->cbs[i].cb = pktp->cbs[k].cb;
+ pktp->cbs[i].arg = pktp->cbs[k].arg;
+ pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
+ }
+
+ /* reset the last callback */
+ pktp->cbs[i].cb = NULL;
+ pktp->cbs[i].arg = NULL;
+ pktp->cbs[i].refcnt = 0;
+
+ pktp->cbcnt--;
+ goto done;
+ }
+ }
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
+ return BCME_ERROR;
+ }
+
+ return err;
+}
+
+/** Registers callback functions */
+int
+BCMATTACHFN(pktpool_empty_register)(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int err = 0;
+ int i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ ASSERT(cb != NULL);
+
+ i = pktp->ecbcnt;
+ if (i == PKTPOOL_CB_MAX) {
+ err = BCME_ERROR;
+ goto done;
+ }
+
+ ASSERT(pktp->ecbs[i].cb == NULL);
+ pktp->ecbs[i].cb = cb;
+ pktp->ecbs[i].arg = arg;
+ pktp->ecbcnt++;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
+}
+
+/** Calls registered callback functions */
+static int
+BCMPOSTTRAPFN(pktpool_empty_notify)(pktpool_t *pktp)
+{
+ int i;
+
+ pktp->empty = TRUE;
+ for (i = 0; i < pktp->ecbcnt; i++) {
+ ASSERT(pktp->ecbs[i].cb != NULL);
+ pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
+ }
+ pktp->empty = FALSE;
+
+ return 0;
+}
+
+#ifdef BCMDBG_POOL
+int
+pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int err = 0;
+ int i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ ASSERT(cb);
+
+ i = pktp->dbg_cbcnt;
+ if (i == PKTPOOL_CB_MAX) {
+ err = BCME_ERROR;
+ goto done;
+ }
+
+ ASSERT(pktp->dbg_cbs[i].cb == NULL);
+ pktp->dbg_cbs[i].cb = cb;
+ pktp->dbg_cbs[i].arg = arg;
+ pktp->dbg_cbcnt++;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
+}
+
+int pktpool_dbg_notify(pktpool_t *pktp);
+
+int
+pktpool_dbg_notify(pktpool_t *pktp)
+{
+ int i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ for (i = 0; i < pktp->dbg_cbcnt; i++) {
+ ASSERT(pktp->dbg_cbs[i].cb);
+ pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
+ }
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+
+int
+pktpool_dbg_dump(pktpool_t *pktp)
+{
+ int i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p);
+ printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
+ pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
+ }
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+
+int
+pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
+{
+ int i;
+ int state;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ bzero(stats, sizeof(pktpool_stats_t));
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p != NULL);
+
+ state = PKTPOOLSTATE(pktp->dbg_q[i].p);
+ switch (state) {
+ case POOL_TXENQ:
+ stats->enq++; break;
+ case POOL_TXDH:
+ stats->txdh++; break;
+ case POOL_TXD11:
+ stats->txd11++; break;
+ case POOL_RXDH:
+ stats->rxdh++; break;
+ case POOL_RXD11:
+ stats->rxd11++; break;
+ case POOL_RXFILL:
+ stats->rxfill++; break;
+ case POOL_IDLE:
+ stats->idle++; break;
+ }
+ }
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+
+int
+pktpool_start_trigger(pktpool_t *pktp, void *p)
+{
+ uint32 cycles, i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ if (!PKTPOOL(OSH_NULL, p))
+ goto done;
+
+ OSL_GETCYCLES(cycles);
+
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p != NULL);
+
+ if (pktp->dbg_q[i].p == p) {
+ pktp->dbg_q[i].cycles = cycles;
+ break;
+ }
+ }
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+
+int pktpool_stop_trigger(pktpool_t *pktp, void *p);
+
+int
+pktpool_stop_trigger(pktpool_t *pktp, void *p)
+{
+ uint32 cycles, i;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ if (!PKTPOOL(OSH_NULL, p))
+ goto done;
+
+ OSL_GETCYCLES(cycles);
+
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p != NULL);
+
+ if (pktp->dbg_q[i].p == p) {
+ if (pktp->dbg_q[i].cycles == 0)
+ break;
+
+ if (cycles >= pktp->dbg_q[i].cycles)
+ pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
+ else
+ pktp->dbg_q[i].dur =
+ (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
+
+ pktp->dbg_q[i].cycles = 0;
+ break;
+ }
+ }
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+#endif /* BCMDBG_POOL */
+
+int
+pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
+{
+ BCM_REFERENCE(osh);
+ ASSERT(pktp);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ pktp->availcb_excl = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return 0;
+}
+
+int
+pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
+{
+ int i;
+ int err;
+ BCM_REFERENCE(osh);
+
+ ASSERT(pktp);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ ASSERT(pktp->availcb_excl == NULL);
+ for (i = 0; i < pktp->cbcnt; i++) {
+ if (cb == pktp->cbs[i].cb) {
+ pktp->availcb_excl = &pktp->cbs[i];
+ break;
+ }
+ }
+
+ if (pktp->availcb_excl == NULL)
+ err = BCME_ERROR;
+ else
+ err = 0;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
+}
+
+static void
+BCMPOSTTRAPFN(pktpool_avail_notify)(pktpool_t *pktp)
+{
+ int i, k, idx;
+
+ ASSERT(pktp);
+ pktpool_emptycb_disable(pktp, TRUE);
+
+ if (pktp->availcb_excl != NULL) {
+ pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
+ return;
+ }
+
+ k = pktp->cbcnt - 1;
+ for (i = 0; i < pktp->cbcnt; i++) {
+ /* callbacks are getting disabled at this func entry.
+ * For the case of avail is say 5, and first callback
+ * consumes exactly 5 due to dma rxpost setting, then
+ * further callbacks will not getting notified if avail check
+ * is present.
+ * so calling all cbs even if pktp->avail is zero, so that
+ * cbs get oppurtunity to enable callbacks if their
+ * operation is in progress / not completed.
+ */
+ if (pktp->cbtoggle)
+ idx = i;
+ else
+ idx = k--;
+
+ ASSERT(pktp->cbs[idx].cb != NULL);
+ pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
+ }
+
+ /* Alternate between filling from head or tail
+ */
+ pktp->cbtoggle ^= 1;
+
+ return;
+}
+
+#ifdef APP_RX
+/* Update freelist and avail count for a given packet pool */
+void
+BCMFASTPATH(pktpool_update_freelist)(pktpool_t *pktp, void *p, uint pkts_consumed)
+{
+ ASSERT_FP(pktp->avail >= pkts_consumed);
+
+ pktp->freelist = p;
+ pktp->avail -= pkts_consumed;
+}
+#endif /* APP_RX */
+
+/** Gets an empty packet from the caller provided pool */
+void *
+BCMPOSTTRAPFASTPATH(pktpool_get_ext)(pktpool_t *pktp, uint8 type, uint *pktcnt)
+{
+ void *p = NULL;
+ uint pkts_requested = 1;
+#if defined(DONGLEBUILD)
+ uint pkts_avail;
+ bool rxcpl = (pktp->rxcplidfn.cb != NULL) ? TRUE : FALSE;
+#endif /* DONGLEBUILD */
+
+ if (pktcnt) {
+ pkts_requested = *pktcnt;
+ if (pkts_requested == 0) {
+ goto done;
+ }
+ }
+
+#if defined(DONGLEBUILD)
+ pkts_avail = pkts_requested;
+#endif /* DONGLEBUILD */
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ /* If there are lesser packets in the pool than requested, call
+ * pktpool_empty_notify() to reclaim more pkts.
+ */
+ if (pktp->avail < pkts_requested) {
+ /* Notify and try to reclaim tx pkts */
+ if (pktp->ecbcnt) {
+ pktpool_empty_notify(pktp);
+ }
+
+ if (pktp->avail < pkts_requested) {
+ pktpool_emptycb_disable(pktp, FALSE);
+ if (pktp->avail == 0) {
+ goto done;
+ }
+ }
+ }
+
+#ifdef APP_RX
+ if (pktcnt) {
+ p = pktp->freelist;
+ if (pktp->avail < pkts_requested) {
+ pkts_avail = pktp->avail;
+ }
+
+ /* For rx frags in APP, we need to return only the head of freelist and
+ * the caller operates on it and updates the avail count and freelist pointer
+ * using pktpool_update_freelist().
+ */
+ if (BCMSPLITRX_ENAB() && ((type == lbuf_rxfrag) || (type == lbuf_rxdata))) {
+ *pktcnt = pkts_avail;
+ goto done;
+ }
+ } else
+#endif /* APP_RX */
+ {
+ ASSERT_FP(pkts_requested == 1);
+ p = pktpool_deq(pktp);
+ }
+
+ ASSERT_FP(p);
+
+#if defined(DONGLEBUILD)
+#ifndef APP_RX
+ if (BCMSPLITRX_ENAB() && (type == lbuf_rxfrag)) {
+ /* If pool is shared rx pool, use call back fn to populate host address.
+ * In case of APP, callback may use lesser number of packets than what
+ * we have given to callback because of some resource crunch and the exact
+ * number of packets that are used by the callback are returned using
+ * (*pktcnt) and the pktpool freelist head is updated accordingly.
+ */
+ ASSERT_FP(pktp->cbext.cb != NULL);
+ if (pktp->cbext.cb(pktp, pktp->cbext.arg, p, rxcpl, &pkts_avail)) {
+ pktpool_enq(pktp, p);
+ p = NULL;
+ }
+ }
+#endif /* APP_RX */
+
+ if ((type == lbuf_basic) && rxcpl) {
+ /* If pool is shared rx pool, use call back fn to populate Rx cpl ID */
+ ASSERT_FP(pktp->rxcplidfn.cb != NULL);
+ /* If rxcplblock is allocated */
+ if (pktp->rxcplidfn.cb(pktp, pktp->rxcplidfn.arg, p, TRUE, NULL)) {
+ pktpool_enq(pktp, p);
+ p = NULL;
+ }
+ }
+#endif /* _DONGLEBUILD_ */
+
+done:
+ if ((pktp->avail == 0) && (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
+ pktp->emptycb_disable = EMPTYCB_DISABLED;
+ }
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void
+BCMFASTPATH(pktpool_nfree)(pktpool_t *pktp, void *head, void *tail, uint count)
+{
+#ifdef BCMRXDATAPOOL
+ void *_head = head;
+#endif /* BCMRXDATAPOOL */
+
+ if (count > 1) {
+ pktp->avail += (count - 1);
+
+#ifdef BCMRXDATAPOOL
+ while (--count) {
+ _head = PKTLINK(_head);
+ ASSERT_FP(_head);
+ pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, _head));
+ }
+#endif /* BCMRXDATAPOOL */
+
+ PKTSETFREELIST(tail, pktp->freelist);
+ pktp->freelist = PKTLINK(head);
+ PKTSETLINK(head, NULL);
+ }
+ pktpool_free(pktp, head);
+}
+
+void
+BCMPOSTTRAPFASTPATH(pktpool_free)(pktpool_t *pktp, void *p)
+{
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ ASSERT_FP(p != NULL);
+#ifdef BCMDBG_POOL
+ /* pktpool_stop_trigger(pktp, p); */
+#endif
+
+#ifdef BCMRXDATAPOOL
+ /* Free rx data buffer to rx data buffer pool */
+ if (PKT_IS_RX_PKT(OSH_NULL, p)) {
+ pktpool_t *_pktp = pktpool_shared_rxdata;
+ if (PKTISRXFRAG(OSH_NULL, p)) {
+ _pktp->cbext.cb(_pktp, _pktp->cbext.arg, p, REMOVE_RXCPLID, NULL);
+ PKTRESETRXFRAG(OSH_NULL, p);
+ }
+ pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, p));
+ }
+#endif /* BCMRXDATAPOOL */
+
+ pktpool_enq(pktp, p);
+
+ /**
+ * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
+ * If any avail callback functions are registered, send a notification
+ * that a new packet is available in the pool.
+ */
+ if (pktp->cbcnt) {
+ /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
+ * This allows to feed on burst basis as opposed to inefficient per-packet basis.
+ */
+ if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
+ /**
+ * If the call originated from pktpool_empty_notify, the just freed packet
+ * is needed in pktpool_get.
+ * Therefore don't call pktpool_avail_notify.
+ */
+ if (pktp->empty == FALSE)
+ pktpool_avail_notify(pktp);
+ } else {
+ /**
+ * The callback is temporarily disabled, log that a packet has been freed.
+ */
+ pktp->emptycb_disable = EMPTYCB_SKIPPED;
+ }
+ }
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+/** Adds a caller provided (empty) packet to the caller provided pool */
+int
+pktpool_add(pktpool_t *pktp, void *p)
+{
+ int err = 0;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ ASSERT(p != NULL);
+
+ if (pktp->n_pkts == pktp->maxlen) {
+ err = BCME_RANGE;
+ goto done;
+ }
+
+ /* pkts in pool have same length */
+ ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
+ PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
+
+ pktp->n_pkts++;
+ pktpool_enq(pktp, p);
+
+#ifdef BCMDBG_POOL
+ pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
+}
+
+/**
+ * Force pktpool_setmaxlen () into RAM as it uses a constant
+ * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
+ */
+int
+BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
+{
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ if (maxlen > PKTPOOL_LEN_MAX)
+ maxlen = PKTPOOL_LEN_MAX;
+
+ /* if pool is already beyond maxlen, then just cap it
+ * since we currently do not reduce the pool len
+ * already allocated
+ */
+ pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return pktp->maxlen;
+}
+
+void
+BCMPOSTTRAPFN(pktpool_emptycb_disable)(pktpool_t *pktp, bool disable)
+{
+ bool notify = FALSE;
+ ASSERT(pktp);
+
+ /**
+ * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
+ * If callback is going to be re-enabled, check if any packet got
+ * freed and added back to the pool while callback was disabled.
+ * When this is the case do the callback now, provided that callback functions
+ * are registered and this call did not originate from pktpool_empty_notify.
+ */
+ if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
+ (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
+ notify = TRUE;
+ }
+
+ /* Enable or temporarily disable callback when packet becomes available. */
+ if (disable) {
+ if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
+ /* mark disabled only if enabled.
+ * if state is EMPTYCB_SKIPPED, it means already
+ * disabled and some pkts are freed. So don't lose the state
+ * of skipped to ensure calling pktpool_avail_notify().
+ */
+ pktp->emptycb_disable = EMPTYCB_DISABLED;
+ }
+ } else {
+ pktp->emptycb_disable = EMPTYCB_ENABLED;
+ }
+ if (notify) {
+ /* pktpool_emptycb_disable() is called from pktpool_avail_notify() and
+ * pktp->cbs. To have the result of most recent call, notify after
+ * emptycb_disable is modified.
+ * This change also prevents any recursive calls of pktpool_avail_notify()
+ * from pktp->cbs if pktpool_emptycb_disable() is called from them.
+ */
+ pktpool_avail_notify(pktp);
+ }
+}
+
+bool
+pktpool_emptycb_disabled(pktpool_t *pktp)
+{
+ ASSERT(pktp);
+ return pktp->emptycb_disable != EMPTYCB_ENABLED;
+}
+
+#ifdef BCMPKTPOOL
+#include <hnd_lbuf.h>
+
+pktpool_t *pktpool_shared = NULL;
+
+#ifdef BCMFRAGPOOL
+pktpool_t *pktpool_shared_lfrag = NULL;
+#ifdef BCMRESVFRAGPOOL
+pktpool_t *pktpool_resv_lfrag = NULL;
+struct resv_info *resv_pool_info = NULL;
+#endif /* BCMRESVFRAGPOOL */
+#endif /* BCMFRAGPOOL */
+
+#ifdef BCMALFRAGPOOL
+pktpool_t *pktpool_shared_alfrag = NULL;
+pktpool_t *pktpool_shared_alfrag_data = NULL;
+#endif /* BCMCTFRAGPOOL */
+
+pktpool_t *pktpool_shared_rxlfrag = NULL;
+
+/* Rx data pool w/o rxfrag structure */
+pktpool_t *pktpool_shared_rxdata = NULL;
+
+static osl_t *pktpool_osh = NULL;
+
+/**
+ * Initializes several packet pools and allocates packets within those pools.
+ */
+int
+BCMATTACHFN(hnd_pktpool_init)(osl_t *osh)
+{
+ int err = BCME_OK;
+ int n, pktsz;
+ bool is_heap_pool;
+
+ BCM_REFERENCE(pktsz);
+ BCM_REFERENCE(is_heap_pool);
+
+ /* Construct a packet pool registry before initializing packet pools */
+ n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
+ if (n != PKTPOOL_MAXIMUM_ID) {
+ ASSERT(0);
+ err = BCME_ERROR;
+ goto error;
+ }
+
+ pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_lfrag == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+
+#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
+ resv_pool_info = hnd_resv_pool_alloc(osh);
+ if (resv_pool_info == NULL) {
+ err = BCME_NOMEM;
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_resv_lfrag = resv_pool_info->pktp;
+ if (pktpool_resv_lfrag == NULL) {
+ err = BCME_ERROR;
+ ASSERT(0);
+ goto error;
+ }
+#endif /* RESVFRAGPOOL */
+#endif /* FRAGPOOL */
+
+#if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
+ pktpool_shared_alfrag = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_alfrag == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+
+ pktpool_shared_alfrag_data = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_alfrag_data == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+#endif /* BCMCTFRAGPOOL */
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+ pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_rxlfrag == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+#endif
+
+#if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLE)
+ pktpool_shared_rxdata = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_rxdata == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+#endif
+
+ /*
+ * At this early stage, there's not enough memory to allocate all
+ * requested pkts in the shared pool. Need to add to the pool
+ * after reclaim
+ *
+ * n = NRXBUFPOST + SDPCMD_RXBUFS;
+ *
+ * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
+ * registry is not initialized or the registry is depleted.
+ *
+ * A BCME_NOMEM error only indicates that the requested number of packets
+ * were not filled into the pool.
+ */
+ n = 1;
+ MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
+ if ((err = pktpool_init(osh, pktpool_shared,
+ &n, PKTBUFSZ, FALSE, lbuf_basic, FALSE, 0, 0)) != BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ n = 1;
+#if (((defined(EVENTLOG_D3_PRESERVE) && !defined(EVENTLOG_D3_PRESERVE_DISABLED)) || \
+ defined(BCMPOOLRECLAIM)))
+ is_heap_pool = TRUE;
+#else
+ is_heap_pool = FALSE;
+#endif /* (( EVENTLOG_D3_PRESERVE && !EVENTLOG_D3_PRESERVE_DISABLED) || BCMPOOLRECLAIM) */
+
+ if ((err = pktpool_init(osh, pktpool_shared_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag,
+ is_heap_pool, POOL_HEAP_FLAG_D3, SHARED_FRAG_POOL_LEN >> 3)) !=
+ BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
+
+#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
+ n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
+#ifdef RESV_POOL_HEAP
+ is_heap_pool = TRUE;
+#else
+ is_heap_pool = FALSE;
+#endif /* RESV_POOL_HEAP */
+
+ if ((err = pktpool_init(osh, pktpool_resv_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag,
+ is_heap_pool, POOL_HEAP_FLAG_RSRVPOOL, 0)) != BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
+#endif /* RESVFRAGPOOL */
+#endif /* BCMFRAGPOOL */
+
+#if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
+ n = 1;
+ is_heap_pool = FALSE;
+
+ if ((err = pktpool_init(osh, pktpool_shared_alfrag, &n, PKTFRAGSZ, TRUE, lbuf_alfrag,
+ is_heap_pool, 0, SHARED_ALFRAG_POOL_LEN >> 3)) != BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_setmaxlen(pktpool_shared_alfrag, SHARED_ALFRAG_POOL_LEN);
+
+ n = 0;
+ if ((err = pktpool_init(osh, pktpool_shared_alfrag_data, &n, TXPKTALFRAG_DATA_BUFSZ, TRUE,
+ lbuf_alfrag_data, FALSE, 0, SHARED_ALFRAG_DATA_POOL_LEN >> 3)) != BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_setmaxlen(pktpool_shared_alfrag_data, SHARED_ALFRAG_DATA_POOL_LEN);
+
+#endif /* BCMCTFRAGPOOL */
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+#if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED)
+ n = 1;
+ if ((err = pktpool_init(osh, pktpool_shared_rxdata, &n, RXPKTFRAGDATASZ, TRUE, lbuf_rxdata,
+ FALSE, 0, 0)) != BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+ pktpool_setmaxlen(pktpool_shared_rxdata, SHARED_RXDATA_POOL_LEN);
+
+ pktsz = 0;
+#else
+ pktsz = RXPKTFRAGDATASZ;
+#endif /* defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED) */
+
+#ifdef RESV_POOL_HEAP
+ is_heap_pool = BCMPOOLRECLAIM_ENAB() ? TRUE : FALSE;
+#else
+ is_heap_pool = FALSE;
+#endif /* RESV_POOL_HEAP */
+
+ n = 1;
+ if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, &n, pktsz, TRUE, lbuf_rxfrag,
+ is_heap_pool, POOL_HEAP_FLAG_D3, 0)) != BCME_OK) {
+ ASSERT(0);
+ goto error;
+ }
+
+ pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
+#endif /* defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) */
+
+#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
+ /* Attach poolreorg module */
+ if ((frwd_poolreorg_info = poolreorg_attach(osh,
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ pktpool_shared_lfrag,
+#else
+ NULL,
+#endif /* defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) */
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+ pktpool_shared_rxlfrag,
+#else
+ NULL,
+#endif /* BCMRXFRAGPOOL */
+ pktpool_shared)) == NULL) {
+ ASSERT(0);
+ err = BCME_NOMEM;
+ goto error;
+ }
+#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
+
+ pktpool_osh = osh;
+ MALLOC_CLEAR_NOPERSIST(osh);
+
+#ifdef POOL_HEAP_RECONFIG
+ lbuf_free_cb_set(hnd_pktpool_lbuf_free_cb);
+#endif
+
+ return BCME_OK;
+
+error:
+ hnd_pktpool_deinit(osh);
+
+ return err;
+} /* hnd_pktpool_init */
+
+void
+BCMATTACHFN(hnd_pktpool_deinit)(osl_t *osh)
+{
+#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
+ if (frwd_poolreorg_info != NULL) {
+ poolreorg_detach(frwd_poolreorg_info);
+ }
+#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+ if (pktpool_shared_rxlfrag != NULL) {
+ if (pktpool_shared_rxlfrag->inited) {
+ pktpool_deinit(osh, pktpool_shared_rxlfrag);
+ }
+
+ hnd_free(pktpool_shared_rxlfrag);
+ pktpool_shared_rxlfrag = (pktpool_t *)NULL;
+ }
+#endif
+
+#if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED)
+ if (pktpool_shared_rxdata != NULL) {
+ if (pktpool_shared_rxdata->inited) {
+ pktpool_deinit(osh, pktpool_shared_rxdata);
+ }
+
+ hnd_free(pktpool_shared_rxdata);
+ pktpool_shared_rxdata = (pktpool_t *)NULL;
+ }
+#endif
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ if (pktpool_shared_lfrag != NULL) {
+ if (pktpool_shared_lfrag->inited) {
+ pktpool_deinit(osh, pktpool_shared_lfrag);
+ }
+ hnd_free(pktpool_shared_lfrag);
+ pktpool_shared_lfrag = (pktpool_t *)NULL;
+ }
+#endif /* BCMFRAGPOOL */
+
+#if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
+ if (pktpool_shared_alfrag != NULL) {
+ if (pktpool_shared_alfrag->inited) {
+ pktpool_deinit(osh, pktpool_shared_alfrag);
+ }
+ hnd_free(pktpool_shared_alfrag);
+ pktpool_shared_alfrag = (pktpool_t *)NULL;
+ }
+
+ if (pktpool_shared_alfrag_data != NULL) {
+ if (pktpool_shared_alfrag_data->inited) {
+ pktpool_deinit(osh, pktpool_shared_alfrag_data);
+ }
+
+ hnd_free(pktpool_shared_alfrag_data);
+ pktpool_shared_alfrag_data = (pktpool_t *)NULL;
+ }
+#endif /* BCMFRAGPOOL */
+
+#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
+ if (resv_pool_info != NULL) {
+ if (pktpool_resv_lfrag != NULL) {
+ pktpool_resv_lfrag = NULL;
+ }
+ hnd_free(resv_pool_info);
+ }
+#endif /* RESVFRAGPOOL */
+
+ if (pktpool_shared != NULL) {
+ if (pktpool_shared->inited) {
+ pktpool_deinit(osh, pktpool_shared);
+ }
+
+ hnd_free(pktpool_shared);
+ pktpool_shared = (pktpool_t *)NULL;
+ }
+
+ pktpool_dettach(osh);
+
+ MALLOC_CLEAR_NOPERSIST(osh);
+}
+
+/** is called at each 'wl up' */
+int
+hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
+{
+ return (pktpool_fill(pktpool_osh, pktpool, minimal));
+}
+
+/** refills pktpools after reclaim, is called once */
+void
+hnd_pktpool_refill(bool minimal)
+{
+ if (POOL_ENAB(pktpool_shared)) {
+#if defined(SRMEM)
+ if (SRMEM_ENAB()) {
+ int maxlen = pktpool_max_pkts(pktpool_shared);
+ int n_pkts = pktpool_tot_pkts(pktpool_shared);
+
+ for (; n_pkts < maxlen; n_pkts++) {
+ void *p;
+ if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
+ break;
+ pktpool_add(pktpool_shared, p);
+ }
+ }
+#endif /* SRMEM */
+ pktpool_fill(pktpool_osh, pktpool_shared, minimal);
+ }
+/* fragpool reclaim */
+#ifdef BCMFRAGPOOL
+ if (POOL_ENAB(pktpool_shared_lfrag)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
+ }
+#endif /* BCMFRAGPOOL */
+
+/* alfragpool reclaim */
+#ifdef BCMALFRAGPOOL
+ if (POOL_ENAB(pktpool_shared_alfrag)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_alfrag, minimal);
+ }
+
+ if (POOL_ENAB(pktpool_shared_alfrag_data)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_alfrag_data, minimal);
+ }
+#endif /* BCMALFRAGPOOL */
+
+/* rx fragpool reclaim */
+#ifdef BCMRXFRAGPOOL
+ if (POOL_ENAB(pktpool_shared_rxlfrag)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
+ }
+#endif
+
+#ifdef BCMRXDATAPOOL
+ if (POOL_ENAB(pktpool_shared_rxdata)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_rxdata, minimal);
+ }
+#endif
+
+#if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
+ if (POOL_ENAB(pktpool_resv_lfrag)) {
+ int resv_size = (pktpool_resv_lfrag->max_pkt_bytes + LBUFFRAGSZ) *
+ pktpool_resv_lfrag->maxlen;
+ hnd_resv_pool_init(resv_pool_info, resv_size);
+ hnd_resv_pool_enable(resv_pool_info);
+ }
+#endif /* BCMRESVFRAGPOOL */
+}
+
+#ifdef POOL_HEAP_RECONFIG
+#define hnd_pktpool_release_active_set(pktp) (pktpool_heap_rel_active |= (1 << pktp->id))
+#define hnd_pktpool_release_active_reset(pktp) (pktpool_heap_rel_active &= ~(1 << pktp->id))
+/* Function enable/disable heap pool usage */
+
+void
+hnd_pktpool_heap_handle(osl_t *osh, uint32 flag, bool enable)
+{
+ int i = 0;
+ pktpool_t *pktp;
+ /*
+ * Loop through all the registerd pktpools.
+ * Trigger retreave of pkts from the heap back to pool if no
+ * flags are active.
+ */
+ for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
+ if ((pktp = get_pktpools_registry(i)) != NULL) {
+ if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) {
+ if (enable) {
+ hnd_pktpool_heap_pkt_release(pktpool_osh, pktp, flag);
+ } else {
+ hnd_pktpool_heap_pkt_retrieve(pktp, flag);
+ }
+ }
+ }
+ }
+}
+
+/* Do memory allocation from pool heap memory */
+void *
+hnd_pktpool_freelist_alloc(uint size, uint alignbits, uint32 flag)
+{
+ int i = 0;
+ pktpool_t *pktp;
+ void *p = NULL;
+ for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
+ if ((pktp = get_pktpools_registry(i)) != NULL) {
+ if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) {
+ p = rte_freelist_mgr_alloc(size, alignbits, pktp->mem_handle);
+ if (p)
+ break;
+ }
+ }
+ }
+ return p;
+}
+
+/* Release pkts from pool to free heap */
+static void
+hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag)
+{
+ pktpool_cb_extn_t cb = NULL;
+ void *arg = NULL;
+ int i = 0;
+ pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
+
+ pktp->release_active = FALSE;
+ hnd_pktpool_release_active_reset(pktp);
+
+ if (pktp->n_pkts <= pktp->min_backup_buf)
+ return;
+ /* call module specific callbacks */
+ if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
+ /* If pool is shared rx frag pool, use call back fn to reclaim host address
+ * and Rx cpl ID associated with the pkt.
+ */
+ ASSERT(pktp->cbext.cb != NULL);
+ cb = pktp->cbext.cb;
+ arg = pktp->cbext.arg;
+ } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
+ /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
+ * associated with the pkt.
+ */
+ cb = pktp->rxcplidfn.cb;
+ arg = pktp->rxcplidfn.arg;
+ }
+
+ while (pktp->avail > pktp->min_backup_buf) {
+ void * p = pktp->freelist;
+
+ pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+ PKTSETFREELIST(p, NULL);
+
+ if (cb != NULL) {
+ if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) {
+ PKTSETFREELIST(p, pktp->freelist);
+ pktp->freelist = p;
+ break;
+ }
+ }
+
+ PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+ lb_set_nofree(p);
+ total_pool_pktid_count++;
+ PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+ rte_freelist_mgr_add(p, pktp->mem_handle);
+ pktp->avail--;
+ pktp->n_pkts--;
+ pktp->poolheap_count++;
+ }
+
+ /* Execute call back for upper layer which used pkt from heap */
+ for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
+ if ((pktp_heap_cb[i].fn != NULL) &&
+ (flag == pktp_heap_cb[i].flag))
+ (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, TRUE);
+ }
+
+}
+
+static pktpool_heap_cb_reg_t *
+BCMRAMFN(hnd_pool_get_cb_registry)(void)
+{
+ return pktpool_heap_cb_reg;
+}
+
+static void
+BCMFASTPATH(hnd_pktpool_lbuf_free_cb)(uint8 poolid)
+{
+ int i = 0;
+ pktpool_t *pktp;
+
+ if (poolid == PKTPOOL_INVALID_ID && pktpool_heap_rel_active) {
+ for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
+ if ((pktp = get_pktpools_registry(i)) != NULL) {
+ if (pktp->is_heap_pool && (pktp->release_active)) {
+ rte_freelist_mgr_release(pktp->mem_handle);
+ }
+ }
+ }
+ }
+}
+
+/* Take back pkts from free mem and refill pool */
+static void
+hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag)
+{
+ int i = 0;
+ pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
+ pktp->release_active = TRUE;
+ hnd_pktpool_release_active_set(pktp);
+
+ /* Execute call back for upper layer which used pkt from heap */
+ for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
+ if ((pktp_heap_cb[i].fn != NULL) &&
+ (flag == pktp_heap_cb[i].flag))
+ (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, FALSE);
+ }
+
+ rte_freelist_mgr_release(pktp->mem_handle);
+}
+
+/* Function to add back the pkt to pktpool */
+static int
+hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize)
+{
+ pktpool_t *pktp = (pktpool_t *)ctxt;
+ struct lbuf *lb;
+ int ret = BCME_ERROR;
+ if (pktp != NULL) {
+ if ((lb = PKTALLOC_ON_LOC(pktpool_osh, pktp->max_pkt_bytes,
+ pktp->type, pkt, pktsize)) != NULL) {
+ if ((ret = pktpool_add(pktp, lb)) == BCME_OK) {
+ pktp->poolheap_count--;
+ ASSERT(total_pool_pktid_count > 0);
+ total_pool_pktid_count--;
+ if (pktp->poolheap_count == 0) {
+ pktp->release_active = FALSE;
+ hnd_pktpool_release_active_reset(pktp);
+ }
+ if (pktp->cbcnt) {
+ if (pktp->empty == FALSE)
+ pktpool_avail_notify(pktp);
+ }
+ } else {
+ /*
+ * pktpool_add failed indicate already max
+ * number of pkts are available in pool. So
+ * free this buffer to heap
+ */
+ PKTFREE(pktpool_osh, lb, pktsize);
+ }
+ ret = BCME_OK;
+ }
+ }
+ return ret;
+}
+
+int
+hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn, void *ctxt, uint32 flag)
+{
+ int i = 0;
+ int err = BCME_ERROR;
+ pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
+
+ /* Search for free entry */
+ for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
+ if (pktp_heap_cb[i].fn == NULL)
+ break;
+ }
+
+ if (i < PKTPOOL_MAX_HEAP_CB) {
+ pktp_heap_cb[i].fn = fn;
+ pktp_heap_cb[i].ctxt = ctxt;
+ pktp_heap_cb[i].flag = flag;
+ err = BCME_OK;
+ }
+ return err;
+}
+
+int
+hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn)
+{
+ int i = 0;
+ int err = BCME_ERROR;
+ pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
+
+ /* Search for matching entry */
+ for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
+ if (pktp_heap_cb[i].fn == fn)
+ break;
+ }
+
+ if (i < PKTPOOL_MAX_HEAP_CB) {
+ pktp_heap_cb[i].fn = NULL;
+ err = BCME_OK;
+ }
+ return err;
+}
+
+uint16
+hnd_pktpool_get_min_bkup_buf(pktpool_t *pktp)
+{
+ return pktp->min_backup_buf;
+}
+#endif /* POOL_HEAP_RECONFIG */
+
+uint32
+hnd_pktpool_get_total_poolheap_count(void)
+{
+ return total_pool_pktid_count;
+}
+#endif /* BCMPKTPOOL */
diff --git a/bcmdhd.101.10.361.x/hnd_pktq.c b/bcmdhd.101.10.361.x/hnd_pktq.c
new file mode 100755
index 0000000..bd5cc81
--- /dev/null
+++ b/bcmdhd.101.10.361.x/hnd_pktq.c
@@ -0,0 +1,1548 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <osl_ext.h>
+#include <bcmutils.h>
+#include <hnd_pktq.h>
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTQ_THREAD_SAFE
+#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
+#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
+#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
+#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
+#else
+#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
+#endif /* HND_PKTQ_THREAD_SAFE */
+
+/* status during txfifo sync */
+#if defined(PROP_TXSTATUS)
+#define TXQ_PKT_DEL 0x01
+#define HEAD_PKT_FLUSHED 0xFF
+#endif /* defined(PROP_TXSTATUS) */
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void *
+BCMFASTPATH(pktq_penq)(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+ ASSERT_FP(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT_FP(!pktq_full(pq));
+ ASSERT_FP(!pktqprec_full(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->n_pkts++;
+
+ pq->n_pkts_tot++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(spktq_enq_chain)(struct spktq *dspq, struct spktq *sspq)
+{
+ struct pktq_prec *dq;
+ struct pktq_prec *sq;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&dspq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&sspq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ dq = &dspq->q;
+ sq = &sspq->q;
+
+ if (dq->head) {
+ PKTSETLINK(OSL_PHYS_TO_VIRT_ADDR(dq->tail), OSL_VIRT_TO_PHYS_ADDR(sq->head));
+ }
+ else {
+ dq->head = sq->head;
+ }
+
+ dq->tail = sq->tail;
+ dq->n_pkts += sq->n_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&dspq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&sspq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return dspq;
+}
+
+/*
+ * osl simple, non-priority packet queue
+ */
+void *
+BCMFASTPATH(spktq_enq)(struct spktq *spq, void *p)
+{
+ struct pktq_prec *q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(!spktq_full(spq));
+
+ PKTSETLINK(p, NULL);
+
+ q = &spq->q;
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->n_pkts++;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMPOSTTRAPFASTPATH(pktq_penq_head)(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+ ASSERT_FP(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT_FP(!pktq_full(pq));
+ ASSERT_FP(!pktqprec_full(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->n_pkts++;
+
+ pq->n_pkts_tot++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(spktq_enq_head)(struct spktq *spq, void *p)
+{
+ struct pktq_prec *q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(!spktq_full(spq));
+
+ PKTSETLINK(p, NULL);
+
+ q = &spq->q;
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->n_pkts++;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(pktq_pdeq)(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(spktq_deq)(struct spktq *spq)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ q = &spq->q;
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->n_pkts--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void*
+BCMFASTPATH(spktq_deq_virt)(struct spktq *spq)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ q = &spq->q;
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ p = (void *)OSL_PHYS_TO_VIRT_ADDR(p);
+
+ if ((q->head = (void*)PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->n_pkts--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(pktq_pdeq_tail)(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(spktq_deq_tail)(struct spktq *spq)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ q = &spq->q;
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->n_pkts--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (pq->n_pkts_tot == 0)
+ goto done;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ p = pq->q[prec].tail;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+/*
+ * Append spktq 'list' to the tail of pktq 'pq'
+ */
+void
+BCMFASTPATH(pktq_append)(struct pktq *pq, int prec, struct spktq *list)
+{
+ struct pktq_prec *q;
+ struct pktq_prec *list_q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ list_q = &list->q;
+
+ /* empty list check */
+ if (list_q->head == NULL)
+ goto done;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+ ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */
+
+ ASSERT_FP(!pktq_full(pq));
+ ASSERT_FP(!pktqprec_full(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, list_q->head);
+ else
+ q->head = list_q->head;
+
+ q->tail = list_q->tail;
+ q->n_pkts += list_q->n_pkts;
+ pq->n_pkts_tot += list_q->n_pkts;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+#ifdef WL_TXQ_STALL
+ list_q->dequeue_count += list_q->n_pkts;
+#endif
+
+ list_q->head = NULL;
+ list_q->tail = NULL;
+ list_q->n_pkts = 0;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+/*
+ * Append spktq 'list' to the tail of spktq 'spq'
+ */
+void
+BCMFASTPATH(spktq_append)(struct spktq *spq, struct spktq *list)
+{
+ struct pktq_prec *q;
+ struct pktq_prec *list_q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ list_q = &list->q;
+
+ /* empty list check */
+ if (list_q->head == NULL)
+ goto done;
+
+ ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */
+
+ ASSERT_FP(!spktq_full(spq));
+
+ q = &spq->q;
+
+ if (q->head)
+ PKTSETLINK(q->tail, list_q->head);
+ else
+ q->head = list_q->head;
+
+ q->tail = list_q->tail;
+ q->n_pkts += list_q->n_pkts;
+
+#ifdef WL_TXQ_STALL
+ list_q->dequeue_count += list_q->n_pkts;
+#endif
+
+ list_q->head = NULL;
+ list_q->tail = NULL;
+ list_q->n_pkts = 0;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+/*
+ * Prepend spktq 'list' to the head of pktq 'pq'
+ */
+void
+BCMFASTPATH(pktq_prepend)(struct pktq *pq, int prec, struct spktq *list)
+{
+ struct pktq_prec *q;
+ struct pktq_prec *list_q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ list_q = &list->q;
+
+ /* empty list check */
+ if (list_q->head == NULL)
+ goto done;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+ ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */
+
+ ASSERT_FP(!pktq_full(pq));
+ ASSERT_FP(!pktqprec_full(pq, prec));
+
+ q = &pq->q[prec];
+
+ /* set the tail packet of list to point at the former pq head */
+ PKTSETLINK(list_q->tail, q->head);
+ /* the new q head is the head of list */
+ q->head = list_q->head;
+
+ /* If the q tail was non-null, then it stays as is.
+ * If the q tail was null, it is now the tail of list
+ */
+ if (q->tail == NULL) {
+ q->tail = list_q->tail;
+ }
+
+ q->n_pkts += list_q->n_pkts;
+ pq->n_pkts_tot += list_q->n_pkts;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+#ifdef WL_TXQ_STALL
+ list_q->dequeue_count += list_q->n_pkts;
+#endif
+
+ list_q->head = NULL;
+ list_q->tail = NULL;
+ list_q->n_pkts = 0;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+/*
+ * Prepend spktq 'list' to the head of spktq 'spq'
+ */
+void
+BCMFASTPATH(spktq_prepend)(struct spktq *spq, struct spktq *list)
+{
+ struct pktq_prec *q;
+ struct pktq_prec *list_q;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ list_q = &list->q;
+
+ /* empty list check */
+ if (list_q->head == NULL)
+ goto done;
+
+ ASSERT_FP(PKTLINK(list_q->tail) == NULL); /* terminated list */
+
+ ASSERT_FP(!spktq_full(spq));
+
+ q = &spq->q;
+
+ /* set the tail packet of list to point at the former pq head */
+ PKTSETLINK(list_q->tail, q->head);
+ /* the new q head is the head of list */
+ q->head = list_q->head;
+
+ /* If the q tail was non-null, then it stays as is.
+ * If the q tail was null, it is now the tail of list
+ */
+ if (q->tail == NULL) {
+ q->tail = list_q->tail;
+ }
+
+ q->n_pkts += list_q->n_pkts;
+
+#ifdef WL_TXQ_STALL
+ list_q->dequeue_count += list_q->n_pkts;
+#endif
+
+ list_q->head = NULL;
+ list_q->tail = NULL;
+ list_q->n_pkts = 0;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+void *
+BCMFASTPATH(pktq_pdeq_prev)(struct pktq *pq, int prec, void *prev_p)
+{
+ struct pktq_prec *q;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if (prev_p == NULL)
+ goto done;
+
+ if ((p = PKTLINK(prev_p)) == NULL)
+ goto done;
+
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+ PKTSETLINK(prev_p, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(pktq_pdeq_with_fn)(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+ p = q->head;
+
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ break;
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+ if (p == NULL)
+ goto done;
+
+ if (prev == NULL) {
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ PKTSETLINK(prev, PKTLINK(p));
+ if (q->tail == p) {
+ q->tail = prev;
+ }
+ }
+
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+bool
+BCMFASTPATH(pktq_pdel)(struct pktq *pq, void *pktbuf, int prec)
+{
+ bool ret = FALSE;
+ struct pktq_prec *q;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+
+ /* Should this just assert pktbuf? */
+ if (!pktbuf)
+ goto done;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ goto done;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->n_pkts--;
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ PKTSETLINK(pktbuf, NULL);
+ ret = TRUE;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
+}
+
+static void
+_pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx,
+ defer_free_pkt_fn_t defer, void *defer_ctx)
+{
+ struct pktq_prec wq;
+ struct pktq_prec *q;
+ void *p;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ /* move the prec queue aside to a work queue */
+ q = &pq->q[prec];
+
+ wq = *q;
+
+ q->head = NULL;
+ q->tail = NULL;
+ q->n_pkts = 0;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count += wq.n_pkts;
+#endif
+
+ pq->n_pkts_tot -= wq.n_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
+
+ /* start with the head of the work queue */
+ while ((p = wq.head) != NULL) {
+ /* unlink the current packet from the list */
+ wq.head = PKTLINK(p);
+ PKTSETLINK(p, NULL);
+ wq.n_pkts--;
+
+#ifdef WL_TXQ_STALL
+ wq.dequeue_count++;
+#endif
+
+ /* call the filter function on current packet */
+ ASSERT(fltr != NULL);
+ switch ((*fltr)(fltr_ctx, p)) {
+ case PKT_FILTER_NOACTION:
+ /* put this packet back */
+ pktq_penq(pq, prec, p);
+ break;
+
+ case PKT_FILTER_DELETE:
+ /* delete this packet */
+ ASSERT(defer != NULL);
+ (*defer)(defer_ctx, p);
+ break;
+
+ case PKT_FILTER_REMOVE:
+ /* pkt already removed from list */
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ ASSERT(wq.n_pkts == 0);
+}
+
+void
+pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx,
+ defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx)
+{
+ _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx);
+
+ ASSERT(flush != NULL);
+ (*flush)(flush_ctx);
+}
+
+void
+pktq_filter(struct pktq *pq, pktq_filter_t fltr, void* fltr_ctx,
+ defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx)
+{
+ bool filter = FALSE;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ /* Optimize if pktq n_pkts = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->n_pkts_tot > 0) {
+ filter = TRUE;
+ }
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
+
+ if (filter) {
+ int prec;
+
+ PKTQ_PREC_ITER(pq, prec) {
+ _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx);
+ }
+
+ ASSERT(flush != NULL);
+ (*flush)(flush_ctx);
+ }
+}
+
+void
+spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx,
+ defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx)
+{
+ struct pktq_prec wq;
+ struct pktq_prec *q;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ q = &spq->q;
+
+ /* Optimize if pktq_prec n_pkts = 0, just return. */
+ if (q->n_pkts == 0) {
+ (void)HND_PKTQ_MUTEX_RELEASE(&spq->mutex);
+ return;
+ }
+
+ wq = *q;
+
+ q->head = NULL;
+ q->tail = NULL;
+ q->n_pkts = 0;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count += wq.n_pkts;
+#endif
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return;
+
+ /* start with the head of the work queue */
+
+ while ((p = wq.head) != NULL) {
+ /* unlink the current packet from the list */
+ wq.head = PKTLINK(p);
+ PKTSETLINK(p, NULL);
+ wq.n_pkts--;
+
+#ifdef WL_TXQ_STALL
+ wq.dequeue_count++;
+#endif
+
+ /* call the filter function on current packet */
+ ASSERT(fltr != NULL);
+ switch ((*fltr)(fltr_ctx, p)) {
+ case PKT_FILTER_NOACTION:
+ /* put this packet back */
+ spktq_enq(spq, p);
+ break;
+
+ case PKT_FILTER_DELETE:
+ /* delete this packet */
+ ASSERT(defer != NULL);
+ (*defer)(defer_ctx, p);
+ break;
+
+ case PKT_FILTER_REMOVE:
+ /* pkt already removed from list */
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ ASSERT(wq.n_pkts == 0);
+
+ ASSERT(flush != NULL);
+ (*flush)(flush_ctx);
+}
+
+bool
+pktq_init(struct pktq *pq, int num_prec, uint max_pkts)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max_pkts = (uint16)max_pkts;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max_pkts = pq->max_pkts;
+
+ return TRUE;
+}
+
+bool
+spktq_init(struct spktq *spq, uint max_pkts)
+{
+ bzero(spq, sizeof(struct spktq));
+
+ if (HND_PKTQ_MUTEX_CREATE("spktq", &spq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ spq->q.max_pkts = (uint16)max_pkts;
+
+ return TRUE;
+}
+
+bool
+spktq_init_list(struct spktq *spq, uint max_pkts, void *head, void *tail, uint16 n_pkts)
+{
+ if (HND_PKTQ_MUTEX_CREATE("spktq", &spq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ASSERT(PKTLINK(tail) == NULL);
+ PKTSETLINK(tail, NULL);
+ spq->q.head = head;
+ spq->q.tail = tail;
+ spq->q.max_pkts = (uint16)max_pkts;
+ spq->q.n_pkts = n_pkts;
+ spq->q.stall_count = 0;
+ spq->q.dequeue_count = 0;
+
+ return TRUE;
+}
+
+bool
+pktq_deinit(struct pktq *pq)
+{
+ BCM_REFERENCE(pq);
+ if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return TRUE;
+}
+
+bool
+spktq_deinit(struct spktq *spq)
+{
+ BCM_REFERENCE(spq);
+ if (HND_PKTQ_MUTEX_DELETE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return TRUE;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, uint max_pkts)
+{
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ if (prec < pq->num_prec)
+ pq->q[prec].max_pkts = (uint16)max_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
+}
+
+void *
+BCMFASTPATH(pktq_deq)(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p = NULL;
+ int prec;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (pq->n_pkts_tot == 0)
+ goto done;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+BCMFASTPATH(pktq_deq_tail)(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p = NULL, *prev;
+ int prec;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (pq->n_pkts_tot == 0)
+ goto done;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->n_pkts--;
+
+ pq->n_pkts_tot--;
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (pq->n_pkts_tot == 0)
+ goto done;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ p = pq->q[prec].head;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void *
+spktq_peek(struct spktq *spq)
+{
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (spq->q.n_pkts == 0)
+ goto done;
+
+ p = spq->q.head;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+{
+ void *p;
+
+ /* no need for a mutex protection! */
+
+ /* start with the head of the list */
+ while ((p = pktq_pdeq(pq, prec)) != NULL) {
+
+ /* delete this packet */
+ PKTFREE(osh, p, dir);
+ }
+}
+
+void
+spktq_flush_ext(osl_t *osh, struct spktq *spq, bool dir,
+ void (*pktq_flush_cb)(void *ctx, void *pkt), void *pktq_flush_ctx)
+{
+ void *pkt;
+
+ /* no need for a mutex protection! */
+
+ /* start with the head of the list */
+ while ((pkt = spktq_deq(spq)) != NULL) {
+ if (pktq_flush_cb != NULL) {
+ pktq_flush_cb(pktq_flush_ctx, pkt);
+ }
+ /* delete this packet */
+ PKTFREE(osh, pkt, dir);
+ }
+}
+
+typedef struct {
+ spktq_cb_t cb;
+ void *arg;
+} spktq_cbinfo_t;
+static spktq_cbinfo_t spktq_cbinfo = {NULL, NULL};
+static spktq_cbinfo_t *spktq_cbinfo_get(void);
+
+/* Accessor function forced into RAM to keep spktq_cbinfo out of shdat */
+static spktq_cbinfo_t*
+BCMRAMFN(spktq_cbinfo_get)(void)
+{
+ return (&spktq_cbinfo);
+}
+
+void
+BCMATTACHFN(spktq_free_register)(spktq_cb_t cb, void *arg)
+{
+ spktq_cbinfo_t *cbinfop = spktq_cbinfo_get();
+ cbinfop->cb = cb;
+ cbinfop->arg = arg;
+}
+
+void
+spktq_cb(void *spq)
+{
+ spktq_cbinfo_t *cbinfop = spktq_cbinfo_get();
+ if (cbinfop->cb) {
+ cbinfop->cb(cbinfop->arg, spq);
+ }
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+{
+ bool flush = FALSE;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
+ /* Optimize flush, if pktq n_pkts_tot = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->n_pkts_tot > 0) {
+ flush = TRUE;
+ }
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
+
+ if (flush) {
+ int prec;
+
+ PKTQ_PREC_ITER(pq, prec) {
+ pktq_pflush(osh, pq, prec, dir);
+ }
+ }
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].n_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
+ return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void *
+BCMFASTPATH(pktq_mpeek)(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p = NULL;
+ int prec;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (pq->n_pkts_tot == 0)
+ goto done;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ goto done;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ if (prec_out)
+ *prec_out = prec;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void *
+BCMPOSTTRAPFASTPATH(pktq_mdeq)(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p = NULL;
+ int prec;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ if (pq->n_pkts_tot == 0)
+ goto done;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+ if (prec-- == 0)
+ goto done;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->n_pkts--;
+
+ // terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit
+ if (q->n_pkts == 0) {
+ q->head = NULL;
+ q->tail = NULL;
+ }
+
+#ifdef WL_TXQ_STALL
+ q->dequeue_count++;
+#endif
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->n_pkts_tot--;
+
+ PKTSETLINK(p, NULL);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
+#ifdef HND_PKTQ_THREAD_SAFE
+int
+pktqprec_avail_pkts(struct pktq *pq, int prec)
+{
+ int ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ ret = pq->q[prec].max_pkts - pq->q[prec].n_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
+ return ret;
+}
+
+bool
+BCMFASTPATH(pktqprec_full)(struct pktq *pq, int prec)
+{
+ bool ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ASSERT_FP(prec >= 0 && prec < pq->num_prec);
+
+ ret = pq->q[prec].n_pkts >= pq->q[prec].max_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
+}
+
+int
+pktq_avail(struct pktq *pq)
+{
+ int ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
+ ret = pq->max_pkts - pq->n_pkts_tot;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
+ return ret;
+}
+
+int
+spktq_avail(struct spktq *spq)
+{
+ int ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
+ ret = spq->q.max_pkts - spq->q.n_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
+ return ret;
+}
+
+bool
+pktq_full(struct pktq *pq)
+{
+ bool ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ret = pq->n_pkts_tot >= pq->max_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
+}
+
+bool
+spktq_full(struct spktq *spq)
+{
+ bool ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ret = spq->q.n_pkts >= spq->q.max_pkts;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
+}
+
+#endif /* HND_PKTQ_THREAD_SAFE */
diff --git a/bcmdhd.101.10.361.x/hndlhl.c b/bcmdhd.101.10.361.x/hndlhl.c
new file mode 100755
index 0000000..0695e09
--- /dev/null
+++ b/bcmdhd.101.10.361.x/hndlhl.c
@@ -0,0 +1,1241 @@
+/*
+ * Misc utility routines for accessing lhl specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <hndpmu.h>
+#include <hndlhl.h>
+#include <sbchipc.h>
+#include <hndsoc.h>
+#include <bcmdevs.h>
+#include <osl.h>
+#include <sbgci.h>
+#include <siutils.h>
+#include <bcmutils.h>
+
+#define SI_LHL_EXT_WAKE_REQ_MASK_MAGIC 0x7FBBF7FF /* magic number for LHL EXT */
+
+/* PmuRev1 has a 24-bit PMU RsrcReq timer. However it pushes all other bits
+ * upward. To make the code to run for all revs we use a variable to tell how
+ * many bits we need to shift.
+ */
+#define FLAGS_SHIFT 14
+#define LHL_ERROR(args) printf args
+static const char BCMATTACHDATA(rstr_rfldo3p3_cap_war)[] = "rfldo3p3_cap_war";
+static const char BCMATTACHDATA(rstr_abuck_volt_sleep)[] = "abuck_volt_sleep";
+static const char BCMATTACHDATA(rstr_cbuck_volt_sleep)[] = "cbuck_volt_sleep";
+
+void
+si_lhl_setup(si_t *sih, osl_t *osh)
+{
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ /* Enable PMU sleep mode0 */
+#ifdef BCMQT
+ LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_0);
+#else
+ LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_2);
+#endif
+ /* Modify as per the
+ BCM43012/LHL#LHL-RecommendedsettingforvariousPMUSleepModes:
+ */
+ LHL_REG(sih, lhl_top_pwrup_ctl_adr, LHL_PWRUP_CTL_MASK, LHL_PWRUP_CTL);
+ LHL_REG(sih, lhl_top_pwrup2_ctl_adr, LHL_PWRUP2_CTL_MASK, LHL_PWRUP2_CTL);
+ LHL_REG(sih, lhl_top_pwrdn_ctl_adr, LHL_PWRDN_CTL_MASK, LHL_PWRDN_SLEEP_CNT);
+ LHL_REG(sih, lhl_top_pwrdn2_ctl_adr, LHL_PWRDN2_CTL_MASK, LHL_PWRDN2_CTL);
+ }
+
+ if (!FWSIGN_ENAB() && si_hib_ext_wakeup_isenab(sih)) {
+ /*
+ * Enable wakeup on GPIO1, PCIE clkreq and perst signal,
+ * GPIO[0] is mapped to GPIO1
+ * GPIO[1] is mapped to PCIE perst
+ * GPIO[2] is mapped to PCIE clkreq
+ */
+
+ /* GPIO1 */
+ /* Clear any old interrupt status */
+ LHL_REG(sih, gpio_int_st_port_adr[0],
+ 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN);
+ /* active high level trigger */
+ LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_GPIO1_GPIO_PIN], ~0,
+ 1 << GCI_GPIO_STS_WL_DIN_SELECT);
+ LHL_REG(sih, gpio_int_en_port_adr[0],
+ 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN);
+ LHL_REG(sih, gpio_int_st_port_adr[0],
+ 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN);
+ si_gci_set_functionsel(sih, 1, CC_FNSEL_SAMEASPIN);
+
+ /* PCIE perst */
+ LHL_REG(sih, gpio_int_st_port_adr[0],
+ 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN);
+ LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_PERST_GPIO_PIN], ~0,
+ (1 << GCI_GPIO_STS_EDGE_TRIG_BIT |
+ 1 << GCI_GPIO_STS_WL_DIN_SELECT));
+ LHL_REG(sih, gpio_int_en_port_adr[0],
+ 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN);
+ LHL_REG(sih, gpio_int_st_port_adr[0],
+ 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN);
+
+ /* PCIE clkreq */
+ LHL_REG(sih, gpio_int_st_port_adr[0],
+ 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN);
+ LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_CLKREQ_GPIO_PIN], ~0,
+ (1 << GCI_GPIO_STS_NEG_EDGE_TRIG_BIT) |
+ (1 << GCI_GPIO_STS_WL_DIN_SELECT));
+ LHL_REG(sih, gpio_int_en_port_adr[0],
+ 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN);
+ LHL_REG(sih, gpio_int_st_port_adr[0],
+ 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN);
+ }
+}
+
+static const uint32 lpo_opt_tab[4][2] = {
+ { LPO1_PD_EN, LHL_LPO1_SEL },
+ { LPO2_PD_EN, LHL_LPO2_SEL },
+ { OSC_32k_PD, LHL_32k_SEL},
+ { EXTLPO_BUF_PD, LHL_EXT_SEL }
+};
+
+#define LPO_EN_OFFSET 0u
+#define LPO_SEL_OFFSET 1u
+
+static int
+si_lhl_get_lpo_sel(si_t *sih, uint32 lpo)
+{
+ int sel;
+ if (lpo <= LHL_EXT_SEL) {
+ LHL_REG(sih, lhl_main_ctl_adr, lpo_opt_tab[lpo - 1u][LPO_EN_OFFSET], 0u);
+ sel = lpo_opt_tab[lpo - 1u][LPO_SEL_OFFSET];
+ } else {
+ sel = BCME_NOTFOUND;
+ }
+ return sel;
+}
+
+static void
+si_lhl_detect_lpo(si_t *sih, osl_t *osh)
+{
+ uint clk_det_cnt;
+ int timeout = 0;
+ gciregs_t *gciregs;
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+ ASSERT(gciregs != NULL);
+
+ LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN, 0);
+ LHL_REG(sih, lhl_clk_det_ctl_adr,
+ LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR);
+ timeout = 0;
+ clk_det_cnt =
+ ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
+ LHL_CLK_DET_CNT_SHIFT);
+ while (clk_det_cnt != 0 && timeout <= LPO_SEL_TIMEOUT) {
+ OSL_DELAY(10);
+ clk_det_cnt =
+ ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
+ LHL_CLK_DET_CNT_SHIFT);
+ timeout++;
+ }
+
+ if (clk_det_cnt != 0) {
+ LHL_ERROR(("Clock not present as clear did not work timeout = %d\n", timeout));
+ ROMMABLE_ASSERT(0);
+ }
+ LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, 0);
+ LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN,
+ LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN);
+ clk_det_cnt =
+ ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
+ LHL_CLK_DET_CNT_SHIFT);
+ timeout = 0;
+
+ while (clk_det_cnt <= CLK_DET_CNT_THRESH && timeout <= LPO_SEL_TIMEOUT) {
+ OSL_DELAY(10);
+ clk_det_cnt =
+ ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
+ LHL_CLK_DET_CNT_SHIFT);
+ timeout++;
+ }
+
+ if (timeout >= LPO_SEL_TIMEOUT) {
+ LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout));
+ ROMMABLE_ASSERT(0);
+ }
+}
+
+static void
+si_lhl_select_lpo(si_t *sih, osl_t *osh, int sel, uint32 lpo)
+{
+ uint status;
+ int timeout = 0u;
+ gciregs_t *gciregs;
+ uint32 final_clk_sel;
+ uint32 final_lpo_sel;
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+ ASSERT(gciregs != NULL);
+
+ LHL_REG(sih, lhl_main_ctl_adr,
+ LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL, (sel) << LPO_SEL_SHIFT);
+ final_clk_sel = (R_REG(osh, &gciregs->lhl_clk_status_adr)
+ & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL);
+ final_lpo_sel = (unsigned)(((1u << sel) << LPO_FINAL_SEL_SHIFT));
+
+ status = (final_clk_sel == final_lpo_sel) ? 1u : 0u;
+ timeout = 0;
+ while (!status && timeout <= LPO_SEL_TIMEOUT) {
+ OSL_DELAY(10);
+ final_clk_sel = (R_REG(osh, &gciregs->lhl_clk_status_adr)
+ & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL);
+ status = (final_clk_sel == final_lpo_sel) ? 1u : 0u;
+ timeout++;
+ }
+
+ if (timeout >= LPO_SEL_TIMEOUT) {
+ LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout));
+ ROMMABLE_ASSERT(0);
+ }
+
+ /* for 4377 and chiprev B0 and greater do not power-off other LPOs */
+ if (BCM4389_CHIP(sih->chip) || BCM4378_CHIP(sih->chip) || BCM4397_CHIP(sih->chip) ||
+ BCM4388_CHIP(sih->chip) || BCM4387_CHIP(sih->chip) ||
+ (CHIPID(sih->chip) == BCM4377_CHIP_ID)) {
+ LHL_ERROR(("NOT Power Down other LPO\n"));
+ } else {
+ /* Power down the rest of the LPOs */
+
+ if (lpo != LHL_EXT_LPO_ENAB) {
+ LHL_REG(sih, lhl_main_ctl_adr, EXTLPO_BUF_PD, EXTLPO_BUF_PD);
+ }
+
+ if (lpo != LHL_LPO1_ENAB) {
+ LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_EN, LPO1_PD_EN);
+ LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_SEL, LPO1_PD_SEL_VAL);
+ }
+ if (lpo != LHL_LPO2_ENAB) {
+ LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_EN, LPO2_PD_EN);
+ LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_SEL, LPO2_PD_SEL_VAL);
+ }
+ if (lpo != LHL_OSC_32k_ENAB) {
+ LHL_REG(sih, lhl_main_ctl_adr, OSC_32k_PD, OSC_32k_PD);
+ }
+ if (lpo != RADIO_LPO_ENAB) {
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, LPO_SEL, 0);
+ }
+ }
+
+}
+
+/* To skip this function, specify a invalid "lpo_select" value in nvram */
+int
+BCMATTACHFN(si_lhl_set_lpoclk)(si_t *sih, osl_t *osh, uint32 lpo_force)
+{
+ int lhl_wlclk_sel;
+ uint32 lpo = 0;
+
+ /* Apply nvram override to lpo */
+ if (!FWSIGN_ENAB()) {
+ if ((lpo = (uint32)getintvar(NULL, "lpo_select")) == 0) {
+ if (lpo_force == LHL_LPO_AUTO) {
+ lpo = LHL_OSC_32k_ENAB;
+ } else {
+ lpo = lpo_force;
+ }
+ }
+ } else {
+ lpo = lpo_force;
+ }
+
+ lhl_wlclk_sel = si_lhl_get_lpo_sel(sih, lpo);
+
+ if (lhl_wlclk_sel < 0) {
+ return BCME_OK;
+ }
+
+ LHL_REG(sih, lhl_clk_det_ctl_adr,
+ LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL, lhl_wlclk_sel);
+
+ /* Detect the desired LPO */
+ si_lhl_detect_lpo(sih, osh);
+
+ /* Select the desired LPO */
+ si_lhl_select_lpo(sih, osh, lhl_wlclk_sel, lpo);
+
+ return BCME_OK;
+}
+
+void
+BCMATTACHFN(si_lhl_timer_config)(si_t *sih, osl_t *osh, int timer_type)
+{
+ uint origidx;
+ pmuregs_t *pmu = NULL;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+
+ ASSERT(pmu != NULL);
+
+ switch (timer_type) {
+ case LHL_MAC_TIMER:
+ /* Enable MAC Timer interrupt */
+ LHL_REG(sih, lhl_wl_mactim0_intrp_adr,
+ (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER),
+ (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER));
+
+ /* Programs bits for MACPHY_CLK_AVAIL and all its dependent bits in
+ * MacResourceReqMask0.
+ */
+ PMU_REG(sih, mac_res_req_mask, ~0, si_pmu_rsrc_macphy_clk_deps(sih, osh, 0));
+
+ /* One time init of mac_res_req_timer to enable interrupt and clock request */
+ HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
+ PMUREGADDR(sih, pmu, pmu, mac_res_req_timer),
+ ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
+
+ /*
+ * Reset MAC Main Timer if in case it is running due to previous instance
+ * This also resets the interrupt status
+ */
+ LHL_REG(sih, lhl_wl_mactim_int0_adr, LHL_WL_MACTIMER_MASK, 0x0);
+
+ if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 1) {
+ LHL_REG(sih, lhl_wl_mactim1_intrp_adr,
+ (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER),
+ (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER));
+
+ PMU_REG(sih, mac_res_req_mask1, ~0,
+ si_pmu_rsrc_macphy_clk_deps(sih, osh, 1));
+
+ HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
+ PMUREGADDR(sih, pmu, pmu, mac_res_req_timer1),
+ ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
+
+ /*
+ * Reset MAC Aux Timer if in case it is running due to previous instance
+ * This also resets the interrupt status
+ */
+ LHL_REG(sih, lhl_wl_mactim_int1_adr, LHL_WL_MACTIMER_MASK, 0x0);
+ }
+
+ if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 2) {
+ LHL_REG(sih, lhl_wl_mactim2_intrp_adr,
+ (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER),
+ (LHL_WL_MACTIM_INTRP_EN | LHL_WL_MACTIM_INTRP_EDGE_TRIGGER));
+
+ PMU_REG_NEW(sih, mac_res_req_mask2, ~0,
+ si_pmu_rsrc_macphy_clk_deps(sih, osh, 2));
+
+ HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
+ PMUREGADDR(sih, pmu, pmu, mac_res_req_timer2),
+ ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
+
+ /*
+ * Reset Scan MAC Timer if in case it is running due to previous instance
+ * This also resets the interrupt status
+ */
+ LHL_REG(sih, lhl_wl_mactim_int2_adr, LHL_WL_MACTIMER_MASK, 0x0);
+ }
+
+ break;
+
+ case LHL_ARM_TIMER:
+ /* Enable ARM Timer interrupt */
+ LHL_REG(sih, lhl_wl_armtim0_intrp_adr,
+ (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER),
+ (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER));
+
+ /* Programs bits for HT_AVAIL and all its dependent bits in ResourceReqMask0 */
+ /* Programs bits for CORE_RDY_CB and all its dependent bits in ResourceReqMask0 */
+ PMU_REG(sih, res_req_mask, ~0, (si_pmu_rsrc_ht_avail_clk_deps(sih, osh) |
+ si_pmu_rsrc_cb_ready_deps(sih, osh)));
+
+ /* One time init of res_req_timer to enable interrupt and clock request
+ * For low power request only ALP (HT_AVAIL is anyway requested by res_req_mask)
+ */
+ HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
+ PMUREGADDR(sih, pmu, pmu, res_req_timer),
+ ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+BCMATTACHFN(si_lhl_timer_enable)(si_t *sih)
+{
+ /* Enable clks for pmu int propagation */
+ PMU_REG(sih, pmuintctrl0, PMU_INTC_ALP_REQ, PMU_INTC_ALP_REQ);
+
+ PMU_REG(sih, pmuintmask0, RSRC_INTR_MASK_TIMER_INT_0, RSRC_INTR_MASK_TIMER_INT_0);
+#ifndef BCMQT
+ LHL_REG(sih, lhl_main_ctl_adr, LHL_FAST_WRITE_EN, LHL_FAST_WRITE_EN);
+#endif /* BCMQT */
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_USE_LHL_TIMER, PCTL_EXT_USE_LHL_TIMER);
+}
+
+void
+BCMPOSTTRAPFN(si_lhl_timer_reset)(si_t *sih, uint coreid, uint coreunit)
+{
+ switch (coreid) {
+ case D11_CORE_ID:
+ switch (coreunit) {
+ case 0: /* MAC_CORE_UNIT_0 */
+ LHL_REG(sih, lhl_wl_mactim_int0_adr, LHL_WL_MACTIMER_MASK, 0x0);
+ LHL_REG(sih, lhl_wl_mactim0_st_adr,
+ LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK);
+ break;
+ case 1: /* MAC_CORE_UNIT_1 */
+ LHL_REG(sih, lhl_wl_mactim_int1_adr, LHL_WL_MACTIMER_MASK, 0x0);
+ LHL_REG(sih, lhl_wl_mactim1_st_adr,
+ LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK);
+ break;
+ case 2: /* SCAN_CORE_UNIT */
+ LHL_REG(sih, lhl_wl_mactim_int2_adr, LHL_WL_MACTIMER_MASK, 0x0);
+ LHL_REG(sih, lhl_wl_mactim2_st_adr,
+ LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK);
+ break;
+ default:
+ LHL_ERROR(("Cannot reset lhl timer, wrong coreunit = %d\n", coreunit));
+ }
+ break;
+ case ARMCR4_CORE_ID: /* intentional fallthrough */
+ case ARMCA7_CORE_ID:
+ LHL_REG(sih, lhl_wl_armtim0_adr, LHL_WL_MACTIMER_MASK, 0x0);
+ LHL_REG(sih, lhl_wl_armtim0_st_adr,
+ LHL_WL_MACTIMER_INT_ST_MASK, LHL_WL_MACTIMER_INT_ST_MASK);
+ break;
+ default:
+ LHL_ERROR(("Cannot reset lhl timer, wrong coreid = 0x%x\n", coreid));
+ }
+}
+
+void
+si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
+{
+ gciregs_t *gciregs;
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+ ASSERT(gciregs != NULL);
+ W_REG(osh, &gciregs->lhl_wl_ilp_val_adr, ilp_period);
+ }
+}
+
+lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4369_lhl_reg_set)[] =
+{
+ /* set wl_sleep_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)},
+
+ /* set top_pwrsw_en, top_slb_en, top_iso_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)},
+
+ /* set VMUX_asr_sel_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)},
+
+ /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
+ {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF},
+
+ /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E9F97},
+
+ /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE},
+
+ /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_DWN_CNT << 16) |
+ (LHL4369_CSR_MODE_DWN_CNT << 8) | (LHL4369_CSR_ADJ_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_UP_CNT << 16) |
+ (LHL4369_CSR_MODE_UP_CNT << 8) | (LHL4369_CSR_ADJ_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_DWN_CNT << 24) |
+ (LHL4369_ASR_ADJ_DWN_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_UP_CNT << 24) |
+ (LHL4369_ASR_ADJ_UP_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_DWN_CNT << 24) |
+ (LHL4369_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4369_ASR_LPPFM_MODE_DWN_CNT << 8) |
+ (LHL4369_ASR_CLK4M_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_UP_CNT << 24) |
+ (LHL4369_ASR_MODE_SEL_UP_CNT << 16)| (LHL4369_ASR_LPPFM_MODE_UP_CNT << 8) |
+ (LHL4369_ASR_CLK4M_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_DWN_CNT << 24) |
+ (LHL4369_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4369_SRBG_REF_SEL_DWN_CNT << 8) |
+ (LHL4369_HPBG_PU_EN_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_UP_CNT << 24) |
+ (LHL4369_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4369_SRBG_REF_SEL_UP_CNT << 8) |
+ (LHL4369_HPBG_PU_EN_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_DWN_CNT << 16)},
+
+ /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_UP_CNT << 16)},
+
+ /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_DWN_CNT << 0)},
+
+ /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_UP_CNT << 0)},
+
+ /* Change the default down count values for the resources */
+ /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4369_PWRSW_EN_DWN_CNT << 24) |
+ (LHL4369_SLB_EN_DWN_CNT << 16) | (LHL4369_ISO_EN_DWN_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4369_VMUX_ASR_SEL_DWN_CNT << 16)},
+
+ /* Change the default up count values for the resources */
+ /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4369_PWRSW_EN_UP_CNT << 24) |
+ (LHL4369_SLB_EN_UP_CNT << 16) | (LHL4369_ISO_EN_UP_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4369_VMUX_ASR_SEL_UP_CNT << 16))},
+
+ /* Enable lhl interrupt */
+ {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
+
+ /* Enable LHL Wake up */
+ {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
+
+ /* Making forceOTPpwrOn 1 */
+ {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)}
+};
+
+lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4378_lhl_reg_set)[] =
+{
+ /* set wl_sleep_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)},
+
+ /* set top_pwrsw_en, top_slb_en, top_iso_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)},
+
+ /* set VMUX_asr_sel_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)},
+
+ /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
+ {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF},
+
+ /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.66V and trim_adj -5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E9F97},
+
+ /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE},
+
+ /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_DWN_CNT << 16) |
+ (LHL4378_CSR_MODE_DWN_CNT << 8) | (LHL4378_CSR_ADJ_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_UP_CNT << 16) |
+ (LHL4378_CSR_MODE_UP_CNT << 8) | (LHL4378_CSR_ADJ_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_DWN_CNT << 24) |
+ (LHL4378_ASR_ADJ_DWN_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_UP_CNT << 24) |
+ (LHL4378_ASR_ADJ_UP_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_DWN_CNT << 24) |
+ (LHL4378_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4378_ASR_LPPFM_MODE_DWN_CNT << 8) |
+ (LHL4378_ASR_CLK4M_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_UP_CNT << 24) |
+ (LHL4378_ASR_MODE_SEL_UP_CNT << 16)| (LHL4378_ASR_LPPFM_MODE_UP_CNT << 8) |
+ (LHL4378_ASR_CLK4M_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_DWN_CNT << 24) |
+ (LHL4378_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4378_SRBG_REF_SEL_DWN_CNT << 8) |
+ (LHL4378_HPBG_PU_EN_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_UP_CNT << 24) |
+ (LHL4378_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4378_SRBG_REF_SEL_UP_CNT << 8) |
+ (LHL4378_HPBG_PU_EN_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_CSR_TRIM_ADJ_DWN_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_CSR_TRIM_ADJ_UP_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4378_ASR_TRIM_ADJ_DWN_CNT << 0)},
+
+ /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_ASR_TRIM_ADJ_UP_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* Change the default down count values for the resources */
+ /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4378_PWRSW_EN_DWN_CNT << 24) |
+ (LHL4378_SLB_EN_DWN_CNT << 16) | (LHL4378_ISO_EN_DWN_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4378_VMUX_ASR_SEL_DWN_CNT << 16)},
+
+ /* Change the default up count values for the resources */
+ /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4378_PWRSW_EN_UP_CNT << 24) |
+ (LHL4378_SLB_EN_UP_CNT << 16) | (LHL4378_ISO_EN_UP_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4378_VMUX_ASR_SEL_UP_CNT << 16))},
+
+ /* Enable lhl interrupt */
+ {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
+
+ /* Enable LHL Wake up */
+ {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
+
+ /* Making forceOTPpwrOn 1 */
+ {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)}
+};
+
+lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4387_lhl_reg_set)[] =
+{
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr),
+ LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK |
+ LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK |
+ LHL_TOP_PWRSEQ_SERDES_SLB_EN_MASK |
+ LHL_TOP_PWRSEQ_SERDES_CLK_DIS_EN_MASK,
+ LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK |
+ LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK |
+ LHL_TOP_PWRSEQ_SERDES_SLB_EN_MASK |
+ LHL_TOP_PWRSEQ_SERDES_CLK_DIS_EN_MASK},
+
+ /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
+ {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF},
+
+ /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9ED797},
+
+ /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.64V and trim_adj +5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x076D},
+
+ /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_DWN_CNT << 16) |
+ (LHL4378_CSR_MODE_DWN_CNT << 8) | (LHL4378_CSR_ADJ_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_UP_CNT << 16) |
+ (LHL4378_CSR_MODE_UP_CNT << 8) | (LHL4378_CSR_ADJ_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_DWN_CNT << 24) |
+ (LHL4378_ASR_ADJ_DWN_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_UP_CNT << 24) |
+ (LHL4378_ASR_ADJ_UP_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_DWN_CNT << 24) |
+ (LHL4378_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4378_ASR_LPPFM_MODE_DWN_CNT << 8) |
+ (LHL4378_ASR_CLK4M_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_UP_CNT << 24) |
+ (LHL4378_ASR_MODE_SEL_UP_CNT << 16)| (LHL4378_ASR_LPPFM_MODE_UP_CNT << 8) |
+ (LHL4378_ASR_CLK4M_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_DWN_CNT << 24) |
+ (LHL4378_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4378_SRBG_REF_SEL_DWN_CNT << 8) |
+ (LHL4378_HPBG_PU_EN_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_UP_CNT << 24) |
+ (LHL4378_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4378_SRBG_REF_SEL_UP_CNT << 8) |
+ (LHL4378_HPBG_PU_EN_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_CSR_TRIM_ADJ_DWN_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl2_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_CSR_TRIM_ADJ_UP_CNT << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_ASR_TRIM_ADJ_DWN_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_ASR_TRIM_ADJ_UP_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* Change the default down count values for the resources */
+ /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4378_PWRSW_EN_DWN_CNT << 24) |
+ (LHL4378_SLB_EN_DWN_CNT << 16) | (LHL4378_ISO_EN_DWN_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4387_VMUX_ASR_SEL_DWN_CNT << 16)},
+
+ /* Change the default up count values for the resources */
+ /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4378_PWRSW_EN_UP_CNT << 24) |
+ (LHL4378_SLB_EN_UP_CNT << 16) | (LHL4378_ISO_EN_UP_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4387_VMUX_ASR_SEL_UP_CNT << 16))},
+
+ /* Enable lhl interrupt */
+ {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
+
+ /* Enable LHL Wake up */
+ {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
+
+ /* Making forceOTPpwrOn 1 */
+ {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)},
+
+ /* serdes_clk_dis dn=2, miscldo_pu dn=6; Also include CRWLLHL-48 WAR set bit31 */
+ {LHL_REG_OFF(lhl_top_pwrdn3_ctl_adr), ~0, 0x80040c02},
+
+ /* serdes_clk_dis dn=11, miscldo_pu dn=0 */
+ {LHL_REG_OFF(lhl_top_pwrup3_ctl_adr), ~0, 0x00160010}
+};
+
+lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4387_lhl_reg_set_top_off)[] =
+{
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr),
+ LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK |
+ LHL_TOP_PWRSEQ_TOP_ISO_EN_MASK |
+ LHL_TOP_PWRSEQ_TOP_SLB_EN_MASK |
+ LHL_TOP_PWRSEQ_TOP_PWRSW_EN_MASK |
+ LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK,
+ LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK |
+ LHL_TOP_PWRSEQ_TOP_ISO_EN_MASK |
+ LHL_TOP_PWRSEQ_TOP_SLB_EN_MASK |
+ LHL_TOP_PWRSEQ_TOP_PWRSW_EN_MASK |
+ LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK},
+
+ /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
+ {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F87DB},
+
+ /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9ED7B7},
+
+ /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.64V and trim_adj +5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x076D},
+
+ /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4387_TO_CSR_OVERI_DIS_DWN_CNT << 16) |
+ (LHL4387_TO_CSR_MODE_DWN_CNT << 8) | (LHL4387_TO_CSR_ADJ_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4387_TO_CSR_OVERI_DIS_UP_CNT << 16) |
+ (LHL4387_TO_CSR_MODE_UP_CNT << 8) | (LHL4387_TO_CSR_ADJ_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, lp_mode_dn_cnt,
+ * ASR_adj, vddc_sw_dis
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4387_TO_VDDC_SW_DIS_DWN_CNT << 24) |
+ (LHL4387_TO_ASR_ADJ_DWN_CNT << 16) | (LHL4387_TO_LP_MODE_DWN_CNT << 8) |
+ (LHL4387_TO_HPBG_CHOP_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, lp_mode_dn_cnt,
+ * ASR_adj, vddc_sw_dis
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4387_TO_VDDC_SW_DIS_UP_CNT << 24) |
+ (LHL4387_TO_ASR_ADJ_UP_CNT << 16) | (LHL4387_TO_LP_MODE_UP_CNT << 8) |
+ (LHL4387_TO_HPBG_CHOP_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4387_TO_ASR_MANUAL_MODE_DWN_CNT << 24) |
+ (LHL4387_TO_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4387_TO_ASR_LPPFM_MODE_DWN_CNT << 8) |
+ (LHL4387_TO_ASR_CLK4M_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4387_TO_ASR_MANUAL_MODE_UP_CNT << 24) |
+ (LHL4387_TO_ASR_MODE_SEL_UP_CNT << 16)| (LHL4387_TO_ASR_LPPFM_MODE_UP_CNT << 8) |
+ (LHL4387_TO_ASR_CLK4M_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4387_TO_PFM_PWR_SLICE_DWN_CNT << 24) |
+ (LHL4387_TO_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4387_TO_SRBG_REF_SEL_DWN_CNT << 8) |
+ (LHL4387_TO_HPBG_PU_EN_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4387_TO_PFM_PWR_SLICE_UP_CNT << 24) |
+ (LHL4387_TO_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4387_TO_SRBG_REF_SEL_UP_CNT << 8) |
+ (LHL4387_TO_HPBG_PU_EN_UP_CNT << 0))},
+
+ /* ASR_trim_adj downcount=0x3, [30:24] is default value for spmi_*io_sel */
+ {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, 0x3},
+
+ /* ASR_trim_adj upcount=0x1, [30:24] is default value for spmi_*io_sel */
+ {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK, 0x1},
+
+ /* Change the default down count values for the resources */
+ /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4387_TO_PWRSW_EN_DWN_CNT << 24) |
+ (LHL4387_TO_SLB_EN_DWN_CNT << 16) | (LHL4387_TO_ISO_EN_DWN_CNT << 8) |
+ (LHL4387_TO_TOP_SLP_EN_DWN_CNT))},
+
+ /* Change the default up count values for the resources */
+ /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4387_TO_PWRSW_EN_UP_CNT << 24) |
+ (LHL4387_TO_SLB_EN_UP_CNT << 16) | (LHL4387_TO_ISO_EN_UP_CNT << 8) |
+ (LHL4387_TO_TOP_SLP_EN_UP_CNT))},
+
+ /* lhl_top_pwrup2_ctl, serdes_slb_en_up_cnt=0x7 */
+ {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, 0xe0000},
+
+ /* lhl_top_pwrdn2_ctl, serdes_slb_en_dn_cnt=0x2 */
+ {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), LHL4378_CSR_TRIM_ADJ_CNT_MASK, 0x40000},
+
+ /* Enable lhl interrupt */
+ {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
+
+ /* Enable LHL Wake up */
+ {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
+
+ /* Making forceOTPpwrOn 1 */
+ {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)},
+
+ /* lhl_top_pwrup3_ctl, FLL pu power up count=0x8, miscldo pu power up count=0x0,
+ * serdes_clk_dis up count=0x7
+ */
+ {LHL_REG_OFF(lhl_top_pwrup3_ctl_adr), ~0, 0xe0010},
+
+ /* lhl_top_pwrdn3_ctl, FLL pu power up count=0x1,miscldo pu power up count=0x3,
+ * serdes_clk_dis up count=0x1
+ */
+ {LHL_REG_OFF(lhl_top_pwrdn3_ctl_adr), ~0, 0x20602}
+};
+
+lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4389_lhl_reg_set)[] =
+{
+ /* set wl_sleep_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)},
+
+ /* set top_pwrsw_en, top_slb_en, top_iso_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)},
+
+ /* set VMUX_asr_sel_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)},
+
+ /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
+ {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF},
+
+ /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9EDF97},
+
+ /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.64V and trim_adj +5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07ED},
+
+ /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_DWN_CNT << 16) |
+ (LHL4378_CSR_MODE_DWN_CNT << 8) | (LHL4378_CSR_ADJ_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4378_CSR_OVERI_DIS_UP_CNT << 16) |
+ (LHL4378_CSR_MODE_UP_CNT << 8) | (LHL4378_CSR_ADJ_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_DWN_CNT << 24) |
+ (LHL4378_ASR_ADJ_DWN_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4378_VDDC_SW_DIS_UP_CNT << 24) |
+ (LHL4378_ASR_ADJ_UP_CNT << 16) | (LHL4378_HPBG_CHOP_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_DWN_CNT << 24) |
+ (LHL4378_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4378_ASR_LPPFM_MODE_DWN_CNT << 8) |
+ (LHL4378_ASR_CLK4M_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4378_ASR_MANUAL_MODE_UP_CNT << 24) |
+ (LHL4378_ASR_MODE_SEL_UP_CNT << 16)| (LHL4378_ASR_LPPFM_MODE_UP_CNT << 8) |
+ (LHL4378_ASR_CLK4M_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_DWN_CNT << 24) |
+ (LHL4378_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4378_SRBG_REF_SEL_DWN_CNT << 8) |
+ (LHL4378_HPBG_PU_EN_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4378_PFM_PWR_SLICE_UP_CNT << 24) |
+ (LHL4378_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4378_SRBG_REF_SEL_UP_CNT << 8) |
+ (LHL4378_HPBG_PU_EN_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4378_CSR_TRIM_ADJ_DWN_CNT << 16)},
+
+ /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4378_CSR_TRIM_ADJ_UP_CNT << 16)},
+
+ /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_ASR_TRIM_ADJ_DWN_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl5_adr), LHL4378_ASR_TRIM_ADJ_CNT_MASK,
+ (LHL4378_ASR_TRIM_ADJ_UP_CNT << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)},
+
+ /* Change the default down count values for the resources */
+ /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4378_PWRSW_EN_DWN_CNT << 24) |
+ (LHL4378_SLB_EN_DWN_CNT << 16) | (LHL4378_ISO_EN_DWN_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4387_VMUX_ASR_SEL_DWN_CNT << 16)},
+
+ /* Change the default up count values for the resources */
+ /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4378_PWRSW_EN_UP_CNT << 24) |
+ (LHL4378_SLB_EN_UP_CNT << 16) | (LHL4378_ISO_EN_UP_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4387_VMUX_ASR_SEL_UP_CNT << 16))},
+
+ /* Enable lhl interrupt */
+ {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
+
+ /* Enable LHL Wake up */
+ {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
+
+ /* Making forceOTPpwrOn 1 */
+ {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)},
+
+ /* serdes_clk_dis dn=2, miscldo_pu dn=6; Also include CRWLLHL-48 WAR set bit31 */
+ {LHL_REG_OFF(lhl_top_pwrdn3_ctl_adr), ~0, 0x80040c02},
+
+ /* serdes_clk_dis dn=11, miscldo_pu dn=0 */
+ {LHL_REG_OFF(lhl_top_pwrup3_ctl_adr), ~0, 0x00160010}
+};
+
+/* LV sleep mode summary:
+ * LV mode is where both ABUCK and CBUCK are programmed to low voltages during
+ * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off.
+ * With ASR ON, LPLDO OFF
+ */
+void
+BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4369)(si_t *sih)
+{
+ uint i;
+ uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
+ lhl_reg_set_t *regs = lv_sleep_mode_4369_lhl_reg_set;
+
+ /* Enable LHL LV mode:
+ * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en
+ */
+ for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4369_lhl_reg_set); i++) {
+ si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val);
+ }
+ if (getintvar(NULL, rstr_rfldo3p3_cap_war)) {
+ si_corereg(sih, coreidx, LHL_REG_OFF(lhl_lp_main_ctl1_adr),
+ BCM_MASK32(23, 0), 0x9E9F9F);
+ }
+}
+
+void
+BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4378)(si_t *sih)
+{
+ uint i;
+ uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
+ lhl_reg_set_t *regs = lv_sleep_mode_4378_lhl_reg_set;
+
+ /* Enable LHL LV mode:
+ * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en
+ */
+ for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4378_lhl_reg_set); i++) {
+ si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val);
+ }
+}
+
+void
+BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4387)(si_t *sih)
+{
+ uint i;
+ uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
+ lhl_reg_set_t *regs;
+ uint32 abuck_volt_sleep, cbuck_volt_sleep;
+ uint regs_size;
+
+ if (BCMSRTOPOFF_ENAB()) {
+ regs = lv_sleep_mode_4387_lhl_reg_set_top_off;
+ regs_size = ARRAYSIZE(lv_sleep_mode_4387_lhl_reg_set_top_off);
+ } else {
+ /* Enable LHL LV mode:
+ * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en
+ */
+ regs = lv_sleep_mode_4387_lhl_reg_set;
+ regs_size = ARRAYSIZE(lv_sleep_mode_4387_lhl_reg_set);
+ }
+
+ for (i = 0; i < regs_size; i++) {
+ si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val);
+ }
+
+ if (getvar(NULL, rstr_cbuck_volt_sleep) != NULL) {
+ cbuck_volt_sleep = getintvar(NULL, rstr_cbuck_volt_sleep);
+ LHL_REG(sih, lhl_lp_main_ctl1_adr, LHL_CBUCK_VOLT_SLEEP_MASK,
+ (cbuck_volt_sleep << LHL_CBUCK_VOLT_SLEEP_SHIFT));
+ }
+
+ if (getvar(NULL, rstr_abuck_volt_sleep) != NULL) {
+ abuck_volt_sleep = getintvar(NULL, rstr_abuck_volt_sleep);
+ LHL_REG(sih, lhl_lp_main_ctl2_adr, LHL_ABUCK_VOLT_SLEEP_MASK,
+ (abuck_volt_sleep << LHL_ABUCK_VOLT_SLEEP_SHIFT));
+ }
+
+ if (BCMSRTOPOFF_ENAB()) {
+ /* Serdes AFE retention control enable */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_05,
+ CC_GCI_05_4387C0_AFE_RET_ENB_MASK,
+ CC_GCI_05_4387C0_AFE_RET_ENB_MASK);
+ }
+}
+
+void
+BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4389)(si_t *sih)
+{
+ uint i;
+ uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
+ lhl_reg_set_t *regs = lv_sleep_mode_4389_lhl_reg_set;
+ uint32 abuck_volt_sleep, cbuck_volt_sleep;
+
+ /* Enable LHL LV mode:
+ * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en
+ */
+ for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4389_lhl_reg_set); i++) {
+ si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val);
+ }
+
+ if (getvar(NULL, rstr_cbuck_volt_sleep) != NULL) {
+ cbuck_volt_sleep = getintvar(NULL, rstr_cbuck_volt_sleep);
+ LHL_REG(sih, lhl_lp_main_ctl1_adr, LHL_CBUCK_VOLT_SLEEP_MASK,
+ (cbuck_volt_sleep << LHL_CBUCK_VOLT_SLEEP_SHIFT));
+ }
+
+ if (getvar(NULL, rstr_abuck_volt_sleep) != NULL) {
+ abuck_volt_sleep = getintvar(NULL, rstr_abuck_volt_sleep);
+ LHL_REG(sih, lhl_lp_main_ctl2_adr, LHL_ABUCK_VOLT_SLEEP_MASK,
+ (abuck_volt_sleep << LHL_ABUCK_VOLT_SLEEP_SHIFT));
+ }
+
+ OSL_DELAY(100);
+ LHL_REG(sih, lhl_top_pwrseq_ctl_adr, ~0, 0x00000101);
+
+ /* Clear Misc_LDO override */
+ si_pmu_vreg_control(sih, PMU_VREG_5, VREG5_4387_MISCLDO_PU_MASK, 0);
+}
+
+lhl_reg_set_t BCMATTACHDATA(lv_sleep_mode_4362_lhl_reg_set)[] =
+{
+ /* set wl_sleep_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)},
+
+ /* set top_pwrsw_en, top_slb_en, top_iso_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)},
+
+ /* set VMUX_asr_sel_en */
+ {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)},
+
+ /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
+ {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF},
+
+ /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.66V and trim_adj -5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E9F97},
+
+ /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */
+ {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE},
+
+ /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4362_CSR_OVERI_DIS_DWN_CNT << 16) |
+ (LHL4362_CSR_MODE_DWN_CNT << 8) | (LHL4362_CSR_ADJ_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4362_CSR_OVERI_DIS_UP_CNT << 16) |
+ (LHL4362_CSR_MODE_UP_CNT << 8) | (LHL4362_CSR_ADJ_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4362_VDDC_SW_DIS_DWN_CNT << 24) |
+ (LHL4362_ASR_ADJ_DWN_CNT << 16) | (LHL4362_HPBG_CHOP_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
+ {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4362_VDDC_SW_DIS_UP_CNT << 24) |
+ (LHL4362_ASR_ADJ_UP_CNT << 16) | (LHL4362_HPBG_CHOP_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4362_ASR_MANUAL_MODE_DWN_CNT << 24) |
+ (LHL4362_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4362_ASR_LPPFM_MODE_DWN_CNT << 8) |
+ (LHL4362_ASR_CLK4M_DIS_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
+ * clk4m_dis, lppfm_mode, mode_sel, manual_mode
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4362_ASR_MANUAL_MODE_UP_CNT << 24) |
+ (LHL4362_ASR_MODE_SEL_UP_CNT << 16)| (LHL4362_ASR_LPPFM_MODE_UP_CNT << 8) |
+ (LHL4362_ASR_CLK4M_DIS_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4362_PFM_PWR_SLICE_DWN_CNT << 24) |
+ (LHL4362_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4362_SRBG_REF_SEL_DWN_CNT << 8) |
+ (LHL4362_HPBG_PU_EN_DWN_CNT << 0))},
+
+ /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
+ * CSR_pfm_pwr_slice_en
+ */
+ {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4362_PFM_PWR_SLICE_UP_CNT << 24) |
+ (LHL4362_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4362_SRBG_REF_SEL_UP_CNT << 8) |
+ (LHL4362_HPBG_PU_EN_UP_CNT << 0))},
+
+ /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4362_CSR_TRIM_ADJ_DWN_CNT << 16)},
+
+ /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4362_CSR_TRIM_ADJ_UP_CNT << 16)},
+
+ /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4362_ASR_TRIM_ADJ_DWN_CNT << 0)},
+
+ /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */
+ {LHL_REG_OFF(lhl_lp_up_ctl5_adr), ~0, (LHL4362_ASR_TRIM_ADJ_UP_CNT << 0)},
+
+ /* Change the default down count values for the resources */
+ /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4362_PWRSW_EN_DWN_CNT << 24) |
+ (LHL4362_SLB_EN_DWN_CNT << 16) | (LHL4362_ISO_EN_DWN_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4362_VMUX_ASR_SEL_DWN_CNT << 16)},
+
+ /* Change the default up count values for the resources */
+ /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
+ {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4362_PWRSW_EN_UP_CNT << 24) |
+ (LHL4362_SLB_EN_UP_CNT << 16) | (LHL4362_ISO_EN_UP_CNT << 8))},
+
+ /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
+ {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4362_VMUX_ASR_SEL_UP_CNT << 16))},
+
+ /* Enable lhl interrupt */
+ {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
+
+ /* Enable LHL Wake up */
+ {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
+
+ /* Making forceOTPpwrOn 1 */
+ {LHL_REG_OFF(otpcontrol), (1 << 16), (1 << 16)}
+};
+
+/* LV sleep mode summary:
+ * LV mode is where both ABUCK and CBUCK are programmed to low voltages during
+ * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off.
+ * With ASR ON, LPLDO OFF
+ */
+void
+BCMATTACHFN(si_set_lv_sleep_mode_lhl_config_4362)(si_t *sih)
+{
+ uint i;
+ uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
+ lhl_reg_set_t *regs = lv_sleep_mode_4362_lhl_reg_set;
+
+ /* Enable LHL LV mode:
+ * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en
+ */
+ for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4362_lhl_reg_set); i++) {
+ si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val);
+ }
+}
+
+void
+si_lhl_mactim0_set(si_t *sih, uint32 val)
+{
+ LHL_REG(sih, lhl_wl_mactim_int0_adr, LHL_WL_MACTIMER_MASK, val);
+}
diff --git a/bcmdhd.101.10.361.x/hndmem.c b/bcmdhd.101.10.361.x/hndmem.c
new file mode 100755
index 0000000..18dbced
--- /dev/null
+++ b/bcmdhd.101.10.361.x/hndmem.c
@@ -0,0 +1,423 @@
+/*
+ * Utility routines for configuring different memories in Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <sbchipc.h>
+#include <hndsoc.h>
+#include <bcmdevs.h>
+#include <osl.h>
+#include <sbgci.h>
+#include <siutils.h>
+#include <bcmutils.h>
+#include <hndmem.h>
+
+#define IS_MEMTYPE_VALID(mem) ((mem >= MEM_SOCRAM) && (mem < MEM_MAX))
+#define IS_MEMCONFIG_VALID(cfg) ((cfg >= PDA_CONFIG_CLEAR) && (cfg < PDA_CONFIG_MAX))
+
+/* Returns the number of banks in a given memory */
+int
+hndmem_num_banks(si_t *sih, int mem)
+{
+ uint32 savecore, mem_info;
+ int num_banks = 0;
+ gciregs_t *gciregs;
+ osl_t *osh = si_osh(sih);
+
+ if (!IS_MEMTYPE_VALID(mem)) {
+ goto exit;
+ }
+
+ savecore = si_coreidx(sih);
+
+ /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
+ /* In future we need to add code for TCM based chips as well */
+ if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
+ goto exit;
+ }
+
+ if (GCIREV(sih->gcirev) >= 9) {
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+
+ mem_info = R_REG(osh, &gciregs->wlan_mem_info);
+
+ switch (mem) {
+ case MEM_SOCRAM:
+ num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK) >>
+ WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT;
+ break;
+ case MEM_BM:
+ num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACBM_MASK) >>
+ WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT;
+ break;
+ case MEM_UCM:
+ num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK) >>
+ WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT;
+ break;
+ case MEM_SHM:
+ num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK) >>
+ WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ } else {
+ /* TODO: Figure out bank information using SOCRAM registers */
+ }
+
+ si_setcoreidx(sih, savecore);
+exit:
+ return num_banks;
+}
+
+/* Returns the size of a give bank in a given memory */
+int
+hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num)
+{
+ uint32 savecore, bank_info, reg_data;
+ int bank_sz = 0;
+ gciregs_t *gciregs;
+ osl_t *osh = si_osh(sih);
+
+ if (!IS_MEMTYPE_VALID(mem)) {
+ goto exit;
+ }
+
+ savecore = si_coreidx(sih);
+
+ /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
+ /* In future we need to add code for TCM based chips as well */
+ if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
+ goto exit;
+ }
+
+ if (GCIREV(sih->gcirev) >= 9) {
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+
+ reg_data = ((mem &
+ GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) <<
+ GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) |
+ ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK)
+ << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT);
+ W_REG(osh, &gciregs->gci_indirect_addr, reg_data);
+
+ bank_info = R_REG(osh, &gciregs->wlan_bankxinfo);
+ bank_sz = (bank_info & WLAN_BANKXINFO_BANK_SIZE_MASK) >>
+ WLAN_BANKXINFO_BANK_SIZE_SHIFT;
+ } else {
+ /* TODO: Figure out bank size using SOCRAM registers */
+ }
+
+ si_setcoreidx(sih, savecore);
+exit:
+ return bank_sz;
+}
+
+/* Returns the start address of given memory */
+uint32
+hndmem_mem_base(si_t *sih, hndmem_type_t mem)
+{
+ uint32 savecore, base_addr = 0;
+
+ /* Currently only support of SOCRAM is available in hardware */
+ if (mem != MEM_SOCRAM) {
+ goto exit;
+ }
+
+ savecore = si_coreidx(sih);
+
+ if (si_setcore(sih, SOCRAM_CORE_ID, 0))
+ {
+ base_addr = si_get_slaveport_addr(sih, CORE_SLAVE_PORT_1,
+ CORE_BASE_ADDR_0, SOCRAM_CORE_ID, 0);
+ } else {
+ /* TODO: Add code to get the base address of TCM */
+ base_addr = 0;
+ }
+
+ si_setcoreidx(sih, savecore);
+
+exit:
+ return base_addr;
+}
+
+#ifdef BCMDEBUG
+char *hndmem_type_str[] =
+ {
+ "SOCRAM", /* 0 */
+ "BM", /* 1 */
+ "UCM", /* 2 */
+ "SHM", /* 3 */
+ };
+
+/* Dumps the complete memory information */
+void
+hndmem_dump_meminfo_all(si_t *sih)
+{
+ int mem, bank, bank_cnt, bank_sz;
+
+ for (mem = MEM_SOCRAM; mem < MEM_MAX; mem++) {
+ bank_cnt = hndmem_num_banks(sih, mem);
+
+ printf("\nMemtype: %s\n", hndmem_type_str[mem]);
+ for (bank = 0; bank < bank_cnt; bank++) {
+ bank_sz = hndmem_bank_size(sih, mem, bank);
+ printf("Bank-%d: %d KB\n", bank, bank_sz);
+ }
+ }
+}
+#endif /* BCMDEBUG */
+
+/* Configures the Sleep PDA for a particular bank for a given memory type */
+int
+hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem, int bank_num,
+ hndmem_config_t config, uint32 pda)
+{
+ uint32 savecore, reg_data;
+ gciregs_t *gciregs;
+ int err = BCME_OK;
+ osl_t *osh = si_osh(sih);
+
+ /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
+ /* In future we need to add code for TCM based chips as well */
+ if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ /* Sleep PDA is supported only by GCI rev >= 9 */
+ if (GCIREV(sih->gcirev) < 9) {
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!IS_MEMTYPE_VALID(mem)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ if (!IS_MEMCONFIG_VALID(config)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ savecore = si_coreidx(sih);
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+
+ reg_data = ((mem &
+ GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) <<
+ GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) |
+ ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK)
+ << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT);
+
+ W_REG(osh, &gciregs->gci_indirect_addr, reg_data);
+
+ if (config == PDA_CONFIG_SET_PARTIAL) {
+ W_REG(osh, &gciregs->wlan_bankxsleeppda, pda);
+ W_REG(osh, &gciregs->wlan_bankxkill, 0);
+ }
+ else if (config == PDA_CONFIG_SET_FULL) {
+ W_REG(osh, &gciregs->wlan_bankxsleeppda, WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK);
+ W_REG(osh, &gciregs->wlan_bankxkill, WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK);
+ } else {
+ W_REG(osh, &gciregs->wlan_bankxsleeppda, 0);
+ W_REG(osh, &gciregs->wlan_bankxkill, 0);
+ }
+
+ si_setcoreidx(sih, savecore);
+
+exit:
+ return err;
+}
+
+/* Configures the Active PDA for a particular bank for a given memory type */
+int
+hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem,
+ int bank_num, hndmem_config_t config, uint32 pda)
+{
+ uint32 savecore, reg_data;
+ gciregs_t *gciregs;
+ int err = BCME_OK;
+ osl_t *osh = si_osh(sih);
+
+ if (!IS_MEMTYPE_VALID(mem)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ if (!IS_MEMCONFIG_VALID(config)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ savecore = si_coreidx(sih);
+
+ /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
+ /* In future we need to add code for TCM based chips as well */
+ if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (GCIREV(sih->gcirev) >= 9) {
+ gciregs = si_setcore(sih, GCI_CORE_ID, 0);
+
+ reg_data = ((mem &
+ GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) <<
+ GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) |
+ ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK)
+ << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT);
+
+ W_REG(osh, &gciregs->gci_indirect_addr, reg_data);
+
+ if (config == PDA_CONFIG_SET_PARTIAL) {
+ W_REG(osh, &gciregs->wlan_bankxactivepda, pda);
+ }
+ else if (config == PDA_CONFIG_SET_FULL) {
+ W_REG(osh, &gciregs->wlan_bankxactivepda,
+ WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK);
+ } else {
+ W_REG(osh, &gciregs->wlan_bankxactivepda, 0);
+ }
+ } else {
+ /* TODO: Configure SOCRAM PDA using SOCRAM registers */
+ err = BCME_UNSUPPORTED;
+ }
+
+ si_setcoreidx(sih, savecore);
+
+exit:
+ return err;
+}
+
+/* Configures the Sleep PDA for all the banks for a given memory type */
+int
+hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config)
+{
+ int bank;
+ int num_banks = hndmem_num_banks(sih, mem);
+ int err = BCME_OK;
+
+ /* Sleep PDA is supported only by GCI rev >= 9 */
+ if (GCIREV(sih->gcirev) < 9) {
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!IS_MEMTYPE_VALID(mem)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ if (!IS_MEMCONFIG_VALID(config)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ for (bank = 0; bank < num_banks; bank++)
+ {
+ err = hndmem_sleeppda_bank_config(sih, mem, bank, config, 0);
+ }
+
+exit:
+ return err;
+}
+
+/* Configures the Active PDA for all the banks for a given memory type */
+int
+hndmem_activepda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config)
+{
+ int bank;
+ int num_banks = hndmem_num_banks(sih, mem);
+ int err = BCME_OK;
+
+ if (!IS_MEMTYPE_VALID(mem)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ if (!IS_MEMCONFIG_VALID(config)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+
+ for (bank = 0; bank < num_banks; bank++)
+ {
+ err = hndmem_activepda_bank_config(sih, mem, bank, config, 0);
+ }
+
+exit:
+ return err;
+}
+
+/* Turn off/on all the possible banks in a given memory range.
+ * Currently this works only for SOCRAM as this is restricted by HW.
+ */
+int
+hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem, uint32 mem_start,
+ uint32 size, hndmem_config_t config)
+{
+ int bank, bank_sz, num_banks;
+ int mem_end;
+ int bank_start_addr, bank_end_addr;
+ int err = BCME_OK;
+
+ /* We can get bank size for only SOCRAM/TCM only. Support is not avilable
+ * for other memories (BM, UCM and SHM)
+ */
+ if (mem != MEM_SOCRAM) {
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ num_banks = hndmem_num_banks(sih, mem);
+ bank_start_addr = hndmem_mem_base(sih, mem);
+ mem_end = mem_start + size - 1;
+
+ for (bank = 0; bank < num_banks; bank++)
+ {
+ /* Bank size is spcified in bankXinfo register in terms on KBs */
+ bank_sz = 1024 * hndmem_bank_size(sih, mem, bank);
+
+ bank_end_addr = bank_start_addr + bank_sz - 1;
+
+ if (config == PDA_CONFIG_SET_FULL) {
+ /* Check if the bank is completely overlapping with the given mem range */
+ if ((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) {
+ err = hndmem_activepda_bank_config(sih, mem, bank, config, 0);
+ }
+ } else {
+ /* Check if the bank is completely overlaped with the given mem range */
+ if (((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) ||
+ /* Check if the bank is partially overlaped with the given range */
+ ((mem_start <= bank_end_addr) && (mem_end >= bank_start_addr))) {
+ err = hndmem_activepda_bank_config(sih, mem, bank, config, 0);
+ }
+ }
+
+ bank_start_addr += bank_sz;
+ }
+
+exit:
+ return err;
+}
diff --git a/bcmdhd.101.10.361.x/hndpmu.c b/bcmdhd.101.10.361.x/hndpmu.c
new file mode 100755
index 0000000..7bd12e3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/hndpmu.c
@@ -0,0 +1,9929 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/**
+ * @file
+ * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
+ * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
+ *
+ * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used.
+ * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
+ * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
+ * fractional frequency generation. pmu2_ does not support fractional frequency generation.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndchipc.h>
+#include <hndpmu.h>
+#if defined DONGLEBUILD
+#include <hndcpu.h>
+#ifdef __arm__
+#include <hndarm.h>
+#endif
+#endif /* DONGLEBUILD */
+#if !defined(BCMDONGLEHOST)
+#include <bcm_math.h>
+#include <bcmotp.h>
+#ifdef BCM_OTP_API
+#include <bcm_otp_api.h>
+#endif /* BCM_OTP_API */
+#endif /* !BCMDONGLEHOST */
+#if !defined(BCMDONGLEHOST)
+#include <saverestore.h>
+#endif
+#include <hndlhl.h>
+#include <sbgci.h>
+#ifdef EVENT_LOG_COMPILE
+#include <event_log.h>
+#endif
+#include <sbgci.h>
+#include <lpflags.h>
+
+#include "siutils_priv.h"
+
+#ifdef BCM_AVS
+#include <bcm_avs.h>
+#endif
+
+#if defined(EVENT_LOG_COMPILE) && defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define PMU_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_PMU_ERROR, args)
+#else
+#define PMU_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_PMU_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#elif defined(BCMDBG_ERR)
+#define PMU_ERROR(args) printf args
+#else
+#define PMU_ERROR(args)
+#endif /* defined(BCMDBG_ERR) && defined(ERR_USE_EVENT_LOG) */
+
+#ifdef BCMDBG
+//#define BCMDBG_PMU
+#endif
+
+#ifdef BCMDBG_PMU
+#define PMU_MSG(args) printf args
+#else
+#define PMU_MSG(args)
+#endif /* BCMDBG_MPU */
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define PMU_NONE(args)
+#define flags_shift 14
+
+/** contains resource bit positions for a specific chip */
+struct rsc_per_chip {
+ uint8 ht_avail;
+ uint8 macphy_clkavail;
+ uint8 ht_start;
+ uint8 otp_pu;
+ uint8 macphy_aux_clkavail;
+ uint8 macphy_scan_clkavail;
+ uint8 cb_ready;
+ uint8 dig_ready;
+};
+
+typedef struct rsc_per_chip rsc_per_chip_t;
+
+#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
+bool _pmustatsenab = TRUE;
+#else
+bool _pmustatsenab = FALSE;
+#endif /* BCMPMU_STATS */
+
+/* 1MHz lpo enable */
+/* PLEASE USE THIS MACRO IN ATTACH PATH ONLY! */
+#if defined(BCM_FASTLPO) && !defined(BCM_FASTLPO_DISABLED)
+ #define FASTLPO_ENAB() (TRUE)
+#else
+ #define FASTLPO_ENAB() (FALSE)
+#endif
+
+/* Disable the power optimization feature */
+bool _bcm_pwr_opt_dis = FALSE;
+
+#ifdef BCMSRTOPOFF
+bool _srtopoff_enab = FALSE;
+#endif
+
+pmuregs_t *hnd_pmur = NULL; /* PMU core regs */
+
+#if !defined(BCMDONGLEHOST)
+static void si_pmu_chipcontrol_xtal_settings_4369(si_t *sih);
+static void si_pmu_chipcontrol_xtal_settings_4362(si_t *sih);
+static void si_pmu_chipcontrol_xtal_settings_4378(si_t *sih);
+
+/* PLL controls/clocks */
+static void si_pmu1_pllinit1(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 xtal);
+static void si_pmu_pll_off(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *min_mask,
+ uint32 *max_mask, uint32 *clk_ctl_st);
+static void si_pmu_pll_on(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 min_mask,
+ uint32 max_mask, uint32 clk_ctl_st);
+static void si_pmu_otp_pllcontrol(si_t *sih, osl_t *osh);
+static void si_pmu_otp_vreg_control(si_t *sih, osl_t *osh);
+static void si_pmu_otp_chipcontrol(si_t *sih, osl_t *osh);
+static uint32 si_pmu_def_alp_clock(si_t *sih, osl_t *osh);
+static bool si_pmu_update_pllcontrol(si_t *sih, osl_t *osh, uint32 xtal, bool update_required);
+static uint32 si_pmu_htclk_mask(si_t *sih);
+
+static uint32 si_pmu1_cpuclk0(si_t *sih, osl_t *osh, pmuregs_t *pmu);
+static uint32 si_pmu1_alpclk0(si_t *sih, osl_t *osh, pmuregs_t *pmu);
+
+static uint32 si_pmu1_cpuclk0_pll2(si_t *sih);
+
+/* PMU resources */
+static uint32 si_pmu_res_deps(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 rsrcs, bool all);
+static uint si_pmu_res_uptime(si_t *sih, osl_t *osh, pmuregs_t *pmu,
+ uint8 rsrc, bool pmu_fast_trans_en);
+static void si_pmu_res_masks(si_t *sih, uint32 *pmin, uint32 *pmax);
+
+uint32 si_pmu_get_pmutime_diff(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *prev);
+bool si_pmu_wait_for_res_pending(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint usec,
+ bool cond, uint32 *elapsed_time);
+
+#ifdef __ARM_ARCH_7A__
+static uint32 si_pmu_mem_ca7clock(si_t *sih, osl_t *osh);
+#endif
+
+static uint8 fastlpo_dis_get(void);
+static uint8 fastlpo_pcie_dis_get(void);
+
+static uint32 si_pmu_bpclk_4387(si_t *sih);
+
+static int si_pmu_openloop_cal_43012(si_t *sih, uint16 currtemp);
+
+static uint32 si_pmu_pll6val_armclk_calc(osl_t *osh, pmuregs_t *pmu, uint32 armclk, uint32 xtal,
+ bool write);
+static bool si_pmu_armpll_write_required(si_t *sih, uint32 xtal);
+
+uint8 si_pmu_pll28nm_calc_ndiv(uint32 fvco, uint32 xtal, uint32 *ndiv_int, uint32 *ndiv_frac);
+
+void si_pmu_armpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac);
+void si_pmu_bbpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac);
+void si_pmu_armpll_chmdiv_upd(si_t *sih, uint32 ch0_mdiv, uint32 ch1_mdiv);
+
+#ifdef BCM_LDO3P3_SOFTSTART
+static int si_pmu_ldo3p3_soft_start_get(si_t *sih, osl_t *osh, uint32 bt_or_wl, int *res);
+static int si_pmu_ldo3p3_soft_start_set(si_t *sih, osl_t *osh, uint32 bt_or_wl, uint32 slew_rate);
+#endif /* BCM_LDO3P3_SOFTSTART */
+#ifdef XTAL_BIAS_FROM_OTP
+static void si_pmu_chipcontrol_xtal_bias_from_otp(si_t *sih, uint8* flag, uint8* val);
+#ifndef BCM_OTP_API
+static void si_pmu_chipcontrol_xtal_bias_cal_done_offsets(si_t *sih, uint16* wrd_offset,
+ uint8* wrd_shift, uint8* wrd_mask);
+static void si_pmu_chipcontrol_xtal_bias_val_offsets(si_t *sih, uint16* wrd_offset,
+ uint8* wrd_shift, uint8* wrd_mask);
+#endif /* !BCM_OTP_API */
+#endif /* XTAL_BIAS_FROM_OTP */
+
+/* PMU timer ticks once in 32uS */
+#define PMU_US_STEPS (32)
+
+void *g_si_pmutmr_lock_arg = NULL;
+si_pmu_callback_t g_si_pmutmr_lock_cb = NULL, g_si_pmutmr_unlock_cb = NULL;
+
+/* FVCO frequency in [KHz] */
+#define FVCO_640 640000 /**< 640MHz */
+#define FVCO_880 880000 /**< 880MHz */
+#define FVCO_1760 1760000 /**< 1760MHz */
+#define FVCO_1440 1440000 /**< 1440MHz */
+#define FVCO_960 960000 /**< 960MHz */
+#define FVCO_960p1 960100 /**< 960.1MHz */
+#define FVCO_960010 960010 /**< 960.0098MHz */
+#define FVCO_961 961000 /**< 961MHz */
+#define FVCO_960p5 960500 /**< 960.5MHz */
+#define FVCO_963 963000 /**< 963MHz */
+#define FVCO_963p01 963010 /**< 963.01MHz */
+#define FVCO_1000 1000000 /**< 1000MHz */
+#define FVCO_1600 1600000 /**< 1600MHz */
+#define FVCO_1920 1920000 /**< 1920MHz */
+#define FVCO_1938 1938000 /* 1938MHz */
+#define FVCO_385 385000 /**< 385MHz */
+#define FVCO_400 400000 /**< 400MHz */
+#define FVCO_720 720000 /**< 720MHz */
+#define FVCO_2880 2880000 /**< 2880 MHz */
+#define FVCO_2946 2946000 /**< 2946 MHz */
+#define FVCO_3000 3000000 /**< 3000 MHz */
+#define FVCO_3200 3200000 /**< 3200 MHz */
+#define FVCO_1002p8 1002823 /**< 1002.823MHz */
+
+/* defines to make the code more readable */
+/* But 0 is a valid resource number! */
+#define NO_SUCH_RESOURCE 0 /**< means: chip does not have such a PMU resource */
+
+/* uses these defines instead of 'magic' values when writing to register pllcontrol_addr */
+#define PMU_PLL_CTRL_REG0 0
+#define PMU_PLL_CTRL_REG1 1
+#define PMU_PLL_CTRL_REG2 2
+#define PMU_PLL_CTRL_REG3 3
+#define PMU_PLL_CTRL_REG4 4
+#define PMU_PLL_CTRL_REG5 5
+#define PMU_PLL_CTRL_REG6 6
+#define PMU_PLL_CTRL_REG7 7
+#define PMU_PLL_CTRL_REG8 8
+#define PMU_PLL_CTRL_REG9 9
+#define PMU_PLL_CTRL_REG10 10
+#define PMU_PLL_CTRL_REG11 11
+#define PMU_PLL_CTRL_REG12 12
+#define PMU_PLL_CTRL_REG13 13
+#define PMU_PLL_CTRL_REG14 14
+#define PMU_PLL_CTRL_REG15 15
+
+#ifndef BCM_OTP_API
+#define OTP_XTAL_BIAS_CAL_DONE_4378_WRD_OFFSET 743
+#define OTP_XTAL_BIAS_CAL_DONE_4378_WRD_SHIFT 8
+#define OTP_XTAL_BIAS_CAL_DONE_4378_WRD_MASK 0x1
+
+#define OTP_XTAL_BIAS_VAL_4378_WRD_OFFSET 743
+#define OTP_XTAL_BIAS_VAL_4378_WRD_SHIFT 0
+#define OTP_XTAL_BIAS_VAL_4378_WRD_MASK 0xFF
+#endif /* !BCM_OTP_API */
+
+/* changes the drive strength of gpio_12 and gpio_14 from 0x3 to 0x01 */
+#define GPIO_DRIVE_4378_MASK 0x3Fu
+#define GPIO_DRIVE_4378_VAL 0x09u
+
+/**
+ * The chip has one or more PLLs/FLLs (e.g. baseband PLL, USB PHY PLL). The settings of each PLL are
+ * contained within one or more 'PLL control' registers. Since the PLL hardware requires that
+ * changes for one PLL are committed at once, the PMU has a provision for 'updating' all PLL control
+ * registers at once.
+ *
+ * When software wants to change the any PLL parameters, it withdraws requests for that PLL clock,
+ * updates the PLL control registers being careful not to alter any control signals for the other
+ * PLLs, and then writes a 1 to PMUCtl.PllCtnlUpdate to commit the changes. Best usage model would
+ * be bring PLL down then update the PLL control register.
+ */
+void
+si_pmu_pllupd(si_t *sih)
+{
+ pmu_corereg(sih, SI_CC_IDX, pmucontrol,
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+}
+
+/* 4360_OTP_PU is used for 4352, not a typo */
+static rsc_per_chip_t rsc_4352 = {NO_SUCH_RESOURCE, NO_SUCH_RESOURCE,
+ NO_SUCH_RESOURCE, RES4360_OTP_PU, NO_SUCH_RESOURCE,
+ NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE};
+static rsc_per_chip_t rsc_4360 = {RES4360_HT_AVAIL, NO_SUCH_RESOURCE,
+ NO_SUCH_RESOURCE, RES4360_OTP_PU, NO_SUCH_RESOURCE,
+ NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE};
+static rsc_per_chip_t rsc_43602 = {RES43602_HT_AVAIL, RES43602_MACPHY_CLKAVAIL,
+ RES43602_HT_START, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE,
+ NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE};
+static rsc_per_chip_t rsc_43012 = {RES43012_HT_AVAIL, RES43012_MACPHY_CLK_AVAIL,
+ RES43012_HT_START, RES43012_OTP_PU, NO_SUCH_RESOURCE,
+ NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE};
+/* As per the chip team OTP doesn't have the resource in 4369 */
+static rsc_per_chip_t rsc_4369 = {RES4369_HT_AVAIL, RES4369_MACPHY_MAIN_CLK_AVAIL,
+ RES4369_HT_AVAIL, NO_SUCH_RESOURCE, RES4369_MACPHY_AUX_CLK_AVAIL,
+ NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, RES4369_DIG_CORE_RDY};
+static rsc_per_chip_t rsc_4378 = {RES4378_HT_AVAIL, RES4378_MACPHY_MAIN_CLK_AVAIL,
+ RES4378_HT_AVAIL, RES4378_PMU_SLEEP, RES4378_MACPHY_AUX_CLK_AVAIL,
+ NO_SUCH_RESOURCE, RES4378_CORE_RDY_CB, RES4378_CORE_RDY_DIG};
+static rsc_per_chip_t rsc_4387 = {RES4387_HT_AVAIL, RES4387_MACPHY_CLK_MAIN,
+ RES4387_HT_AVAIL, RES4387_PMU_SLEEP, RES4387_MACPHY_CLK_AUX,
+ RES4387_MACPHY_CLK_SCAN, RES4387_CORE_RDY_CB, RES4387_CORE_RDY_DIG};
+static rsc_per_chip_t rsc_4388 = {RES4388_HT_AVAIL, RES4388_MACPHY_CLK_MAIN,
+ RES4388_HT_AVAIL, RES4388_PMU_LP, RES4388_MACPHY_CLK_AUX,
+ RES4388_MACPHY_CLK_SCAN, RES4388_CORE_RDY_CB, RES4388_CORE_RDY_DIG};
+static rsc_per_chip_t rsc_4389 = {RES4389_HT_AVAIL, RES4389_MACPHY_CLK_MAIN,
+ RES4389_HT_AVAIL, RES4389_PMU_LP, RES4389_MACPHY_CLK_AUX,
+ RES4389_MACPHY_CLK_SCAN, RES4389_CORE_RDY_CB, RES4389_CORE_RDY_DIG};
+static rsc_per_chip_t rsc_4397 = {RES4397_HT_AVAIL, RES4397_MACPHY_CLK_MAIN,
+ RES4397_HT_AVAIL, RES4397_PMU_LP, RES4397_MACPHY_CLK_AUX,
+ RES4397_MACPHY_CLK_SCAN, RES4397_CORE_RDY_CB, RES4397_CORE_RDY_DIG};
+
+static rsc_per_chip_t rsc_4362 = {RES4362_HT_AVAIL, RES4362_MACPHY_MAIN_CLK_AVAIL,
+ RES4362_HT_AVAIL, /* macphy aux clk */
+ NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE, NO_SUCH_RESOURCE,
+ RES4362_DIG_CORE_RDY};
+
+/**
+* For each chip, location of resource bits (e.g., ht bit) in resource mask registers may differ.
+* This function abstracts the bit position of commonly used resources, thus making the rest of the
+* code in hndpmu.c cleaner.
+*/
+static rsc_per_chip_t* BCMRAMFN(si_pmu_get_rsc_positions)(si_t *sih)
+{
+ rsc_per_chip_t *rsc = NULL;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID: /* usb variant of 4352 */
+ rsc = &rsc_4352;
+ break;
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ rsc = &rsc_4360;
+ break;
+ CASE_BCM43602_CHIP:
+ rsc = &rsc_43602;
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ rsc = &rsc_43012;
+ break;
+ case BCM4369_CHIP_GRPID:
+ rsc = &rsc_4369;
+ break;
+ case BCM4362_CHIP_GRPID:
+ rsc = &rsc_4362;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ rsc = &rsc_4378;
+ break;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ rsc = &rsc_4387;
+ break;
+ case BCM4388_CHIP_GRPID:
+ rsc = &rsc_4388;
+ break;
+ case BCM4389_CHIP_GRPID:
+ rsc = &rsc_4389;
+ break;
+ case BCM4397_CHIP_GRPID:
+ rsc = &rsc_4397;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ return rsc;
+}; /* si_pmu_get_rsc_positions */
+
+static const char BCMATTACHDATA(rstr_pllD)[] = "pll%d";
+static const char BCMATTACHDATA(rstr_regD)[] = "reg%d";
+static const char BCMATTACHDATA(rstr_chipcD)[] = "chipc%d";
+static const char BCMATTACHDATA(rstr_rDt)[] = "r%dt";
+static const char BCMATTACHDATA(rstr_rDd)[] = "r%dd";
+static const char BCMATTACHDATA(rstr_Invalid_Unsupported_xtal_value_D)[] =
+ "Invalid/Unsupported xtal value %d";
+static const char BCMATTACHDATA(rstr_xtalfreq)[] = "xtalfreq";
+#if defined(SAVERESTORE) && defined(LDO3P3_MIN_RES_MASK)
+static const char BCMATTACHDATA(rstr_ldo_prot)[] = "ldo_prot";
+#endif /* SAVERESTORE && LDO3P3_MIN_RES_MASK */
+static const char BCMATTACHDATA(rstr_btldo3p3pu)[] = "btldopu";
+#if defined(BCM_FASTLPO_PMU) && !defined(BCM_FASTLPO_PMU_DISABLED)
+static const char BCMATTACHDATA(rstr_fastlpo_dis)[] = "fastlpo_dis";
+#endif /* BCM_FASTLPO_PMU */
+static const char BCMATTACHDATA(rstr_fastlpo_pcie_dis)[] = "fastlpo_pcie_dis";
+static const char BCMATTACHDATA(rstr_memlpldo_volt)[] = "memlpldo_volt";
+static const char BCMATTACHDATA(rstr_lpldo_volt)[] = "lpldo_volt";
+static const char BCMATTACHDATA(rstr_dyn_clksw_en)[] = "dyn_clksw_en";
+static const char BCMATTACHDATA(rstr_abuck_volt)[] = "abuck_volt";
+static const char BCMATTACHDATA(rstr_cbuck_volt)[] = "cbuck_volt";
+static const char BCMATTACHDATA(rstr_csrtune)[] = "csr_tune";
+
+/* The check for OTP parameters for the PLL control registers is done and if found the
+ * registers are updated accordingly.
+ */
+
+/**
+ * As a hardware bug workaround, OTP can contain variables in the form 'pll%d=%d'.
+ * If these variables are present, the corresponding PLL control register(s) are
+ * overwritten, but not yet 'updated'.
+ */
+static void
+BCMATTACHFN(si_pmu_otp_pllcontrol)(si_t *sih, osl_t *osh)
+{
+ char name[16];
+ const char *otp_val;
+ uint8 i;
+ uint32 val;
+ uint8 pll_ctrlcnt = 0;
+
+ if (FWSIGN_ENAB()) {
+ return;
+ }
+
+ if (PMUREV(sih->pmurev) >= 5) {
+ pll_ctrlcnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT;
+ } else {
+ pll_ctrlcnt = (sih->pmucaps & PCAP_PC_MASK) >> PCAP_PC_SHIFT;
+ }
+
+ for (i = 0; i < pll_ctrlcnt; i++) {
+ snprintf(name, sizeof(name), rstr_pllD, i);
+ if ((otp_val = getvar(NULL, name)) == NULL)
+ continue;
+
+ val = (uint32)bcm_strtoul(otp_val, NULL, 0);
+ si_pmu_pllcontrol(sih, i, ~0, val);
+ }
+}
+
+/**
+ * The check for OTP parameters for the Voltage Regulator registers is done and if found the
+ * registers are updated accordingly.
+ */
+static void
+BCMATTACHFN(si_pmu_otp_vreg_control)(si_t *sih, osl_t *osh)
+{
+ char name[16];
+ const char *otp_val;
+ uint8 i;
+ uint32 val;
+ uint8 vreg_ctrlcnt = 0;
+
+ if (FWSIGN_ENAB()) {
+ return;
+ }
+
+ if (PMUREV(sih->pmurev) >= 5) {
+ vreg_ctrlcnt = (sih->pmucaps & PCAP5_VC_MASK) >> PCAP5_VC_SHIFT;
+ } else {
+ vreg_ctrlcnt = (sih->pmucaps & PCAP_VC_MASK) >> PCAP_VC_SHIFT;
+ }
+
+ for (i = 0; i < vreg_ctrlcnt; i++) {
+ snprintf(name, sizeof(name), rstr_regD, i);
+ if ((otp_val = getvar(NULL, name)) == NULL)
+ continue;
+
+ val = (uint32)bcm_strtoul(otp_val, NULL, 0);
+ si_pmu_vreg_control(sih, i, ~0, val);
+ }
+}
+
+/**
+ * The check for OTP parameters for the chip control registers is done and if found the
+ * registers are updated accordingly.
+ */
+static void
+BCMATTACHFN(si_pmu_otp_chipcontrol)(si_t *sih, osl_t *osh)
+{
+ uint32 val, cc_ctrlcnt, i;
+ char name[16];
+ const char *otp_val;
+
+ if (FWSIGN_ENAB()) {
+ return;
+ }
+ if (PMUREV(sih->pmurev) >= 5) {
+ cc_ctrlcnt = (sih->pmucaps & PCAP5_CC_MASK) >> PCAP5_CC_SHIFT;
+ } else {
+ cc_ctrlcnt = (sih->pmucaps & PCAP_CC_MASK) >> PCAP_CC_SHIFT;
+ }
+
+ for (i = 0; i < cc_ctrlcnt; i++) {
+ snprintf(name, sizeof(name), rstr_chipcD, i);
+ if ((otp_val = getvar(NULL, name)) == NULL)
+ continue;
+
+ val = (uint32)bcm_strtoul(otp_val, NULL, 0);
+ si_pmu_chipcontrol(sih, i, 0xFFFFFFFF, val); /* writes to PMU chipctrl reg 'i' */
+ }
+}
+
+/**
+ * A chip contains one or more LDOs (Low Drop Out regulators). During chip bringup, it can turn out
+ * that the default (POR) voltage of a regulator is not right or optimal.
+ * This function is called only by si_pmu_swreg_init() for specific chips
+ */
+void
+si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, uint8 ldo, uint8 voltage)
+{
+ uint8 sr_cntl_shift = 0, rc_shift = 0, shift = 0, mask = 0;
+ uint8 addr = 0;
+ uint8 do_reg2 = 0, rshift2 = 0, rc_shift2 = 0, mask2 = 0, addr2 = 0;
+
+ BCM_REFERENCE(osh);
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ switch (ldo) {
+ case SET_LDO_VOLTAGE_PAREF:
+ addr = 1;
+ rc_shift = 0;
+ mask = 0xf;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+ break;
+ CASE_BCM43602_CHIP:
+ switch (ldo) {
+ case SET_LDO_VOLTAGE_PAREF:
+ addr = 0;
+ rc_shift = 29;
+ mask = 0x7;
+ do_reg2 = 1;
+ addr2 = 1;
+ rshift2 = 3;
+ mask2 = 0x8;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+ break;
+ default:
+ ASSERT(FALSE);
+ return;
+ }
+
+ shift = sr_cntl_shift + rc_shift;
+
+ pmu_corereg(sih, SI_CC_IDX, regcontrol_addr, /* PMU VREG register */
+ ~0, addr);
+ pmu_corereg(sih, SI_CC_IDX, regcontrol_data,
+ mask << shift, (voltage & mask) << shift);
+ if (do_reg2) {
+ /* rshift2 - right shift moves mask2 to bit 0, rc_shift2 - left shift in reg */
+ si_pmu_vreg_control(sih, addr2, (mask2 >> rshift2) << rc_shift2,
+ ((voltage & mask2) >> rshift2) << rc_shift2);
+ }
+} /* si_pmu_set_ldo_voltage */
+
+/* d11 slow to fast clock transition time in slow clock cycles */
+#define D11SCC_SLOW2FAST_TRANSITION 2
+
+/* For legacy chips only, will be discarded eventually */
+static uint16
+BCMINITFN(si_pmu_fast_pwrup_delay_legacy)(si_t *sih, osl_t *osh, pmuregs_t *pmu)
+{
+ uint pmudelay = PMU_MAX_TRANSITION_DLY;
+ uint32 ilp; /* ILP clock frequency in [Hz] */
+ rsc_per_chip_t *rsc; /* chip specific resource bit positions */
+
+ /* Should be calculated based on the PMU updown/depend tables */
+ switch (CHIPID(sih->chip)) {
+ case BCM43460_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ pmudelay = 3700;
+ break;
+ case BCM4360_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ if (CHIPREV(sih->chiprev) < 4) {
+ pmudelay = 1500;
+ } else {
+ pmudelay = 3000;
+ }
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ pmudelay = 1500; /* In micro seconds for 43012 chip */
+ break;
+ CASE_BCM43602_CHIP:
+ rsc = si_pmu_get_rsc_positions(sih);
+ /* Retrieve time by reading it out of the hardware */
+ ilp = si_ilp_clock(sih);
+ if (ilp != 0) {
+ pmudelay = (si_pmu_res_uptime(sih, osh, pmu, rsc->macphy_clkavail, FALSE) +
+ D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp - 1) / ilp);
+ pmudelay = (11 * pmudelay) / 10;
+ }
+ break;
+ case BCM4362_CHIP_GRPID:
+ rsc = si_pmu_get_rsc_positions(sih);
+ /* Retrieve time by reading it out of the hardware */
+ ilp = si_ilp_clock(sih);
+ if (ilp != 0) {
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu, rsc->ht_avail, FALSE) +
+ D11SCC_SLOW2FAST_TRANSITION;
+ pmudelay = (11 * pmudelay) / 10;
+ /* With PWR SW optimization, Need to add this addtional
+ time to fast power up delay to avoid beacon loss
+ */
+ pmudelay += 600;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return (uint16)pmudelay;
+} /* si_pmu_fast_pwrup_delay_legacy */
+
+/**
+ * d11 core has a 'fastpwrup_dly' register that must be written to.
+ * This function returns d11 slow to fast clock transition time in [us] units.
+ * It does not write to the d11 core.
+ */
+uint16
+BCMINITFN(si_pmu_fast_pwrup_delay)(si_t *sih, osl_t *osh)
+{
+ uint pmudelay = PMU_MAX_TRANSITION_DLY;
+ pmuregs_t *pmu;
+ uint origidx;
+ rsc_per_chip_t *rsc; /* chip specific resource bit positions */
+ uint macunit;
+ bool pmu_fast_trans_en;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ if (ISSIM_ENAB(sih)) {
+ pmudelay = 1000;
+ goto exit;
+ }
+
+ macunit = si_coreunit(sih);
+
+ origidx = si_coreidx(sih);
+ /* Still support 43602 so need AOB check,
+ * 43602 is the only non-AOB chip supported now
+ */
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ pmu_fast_trans_en = (R_REG(osh, &pmu->pmucontrol_ext) & PCTL_EXT_FAST_TRANS_ENAB) ?
+ TRUE : FALSE;
+
+ rsc = si_pmu_get_rsc_positions(sih);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ if (macunit == 0) {
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu,
+ rsc->macphy_clkavail, pmu_fast_trans_en);
+ } else if (macunit == 1) {
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu,
+ rsc->macphy_aux_clkavail, pmu_fast_trans_en);
+ } else {
+ ASSERT(0);
+ }
+ break;
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ if (macunit == 0) {
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu,
+ rsc->macphy_clkavail, pmu_fast_trans_en);
+ } else if (macunit == 1) {
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu,
+ rsc->macphy_aux_clkavail, pmu_fast_trans_en);
+ } else if (macunit == 2) {
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu,
+ rsc->macphy_scan_clkavail, pmu_fast_trans_en);
+ } else {
+ ASSERT(0);
+ }
+ break;
+
+ default:
+ pmudelay = si_pmu_fast_pwrup_delay_legacy(sih, osh, pmu);
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+exit:
+ return (uint16)pmudelay;
+} /* si_pmu_fast_pwrup_delay */
+
+/*
+ * Get fast pwrup delay for given resource
+ */
+static uint
+BCMINITFN(si_pmu_fast_pwrup_delay_rsrc)(si_t *sih, osl_t *osh, uint8 rsrc)
+{
+ uint pmudelay = PMU_MAX_TRANSITION_DLY;
+ pmuregs_t *pmu = NULL;
+ bool pmu_fast_trans_en = TRUE;
+ uint origidx;
+
+ origidx = si_coreidx(sih);
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ ASSERT(pmu != NULL);
+
+ pmu_fast_trans_en = (R_REG(osh, &pmu->pmucontrol_ext) & PCTL_EXT_FAST_TRANS_ENAB) ?
+ TRUE : FALSE;
+
+ pmudelay = si_pmu_res_uptime(sih, osh, pmu, rsrc, pmu_fast_trans_en);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return pmudelay;
+}
+
+/*
+ * Get fast pwrup delay for given DIG_READY resource
+ */
+uint
+BCMINITFN(si_pmu_fast_pwrup_delay_dig)(si_t *sih, osl_t *osh)
+{
+ uint delay = 0;
+ rsc_per_chip_t *rsc = si_pmu_get_rsc_positions(sih);
+ ASSERT(rsc);
+
+ if (rsc) {
+ delay = si_pmu_fast_pwrup_delay_rsrc(sih, osh, rsc->dig_ready);
+ }
+ return delay;
+}
+
+/*
+ * During chip bringup, it can turn out that the 'hard wired' PMU dependencies are not fully
+ * correct, or that up/down time values can be optimized. The following data structures and arrays
+ * deal with that.
+ */
+
+/* Setup resource up/down timers */
+typedef struct {
+ uint8 resnum;
+ uint32 updown;
+} pmu_res_updown_t;
+
+#define PMU_RES_SUBSTATE_SHIFT 8
+
+/* Setup resource substate transition timer value */
+typedef struct {
+ uint8 resnum;
+ uint8 substate;
+ uint32 tmr;
+} pmu_res_subst_trans_tmr_t;
+
+/* Change resource dependencies masks */
+typedef struct {
+ uint32 res_mask; /* resources (chip specific) */
+ int8 action; /* action, e.g. RES_DEPEND_SET */
+ uint32 depend_mask; /* changes to the dependencies mask */
+ bool (*filter)(si_t *sih); /* action is taken when filter is NULL or return TRUE */
+} pmu_res_depend_t;
+
+/* Resource dependencies mask change action */
+#define RES_DEPEND_SET 0 /* Override the dependencies mask */
+#define RES_DEPEND_ADD 1 /* Add to the dependencies mask */
+#define RES_DEPEND_REMOVE -1 /* Remove from the dependencies mask */
+
+/* Using a safe SAVE_RESTORE up/down time, it will get updated after openloop cal */
+static const pmu_res_updown_t BCMATTACHDATA(bcm43012a0_res_updown_ds0)[] = {
+ {RES43012_MEMLPLDO_PU, 0x00200020},
+ {RES43012_PMU_SLEEP, 0x00a600a6},
+ {RES43012_FAST_LPO, 0x00D20000},
+ {RES43012_BTLPO_3P3, 0x007D0000},
+ {RES43012_SR_POK, 0x00c80000},
+ {RES43012_DUMMY_PWRSW, 0x01400000},
+ {RES43012_DUMMY_LDO3P3, 0x00000000},
+ {RES43012_DUMMY_BT_LDO3P3, 0x00000000},
+ {RES43012_DUMMY_RADIO, 0x00000000},
+ {RES43012_VDDB_VDDRET, 0x0020000a},
+ {RES43012_HV_LDO3P3, 0x002C0000},
+ {RES43012_XTAL_PU, 0x04000000},
+ {RES43012_SR_CLK_START, 0x00080000},
+ {RES43012_XTAL_STABLE, 0x00000000},
+ {RES43012_FCBS, 0x00000000},
+ {RES43012_CBUCK_MODE, 0x00000000},
+ {RES43012_CORE_READY, 0x00000000},
+ {RES43012_ILP_REQ, 0x00000000},
+ {RES43012_ALP_AVAIL, 0x00280008},
+ {RES43012_RADIOLDO_1P8, 0x00220000},
+ {RES43012_MINI_PMU, 0x00220000},
+ {RES43012_SR_SAVE_RESTORE, 0x02600260},
+ {RES43012_PHY_PWRSW, 0x00800005},
+ {RES43012_VDDB_CLDO, 0x0020000a},
+ {RES43012_SUBCORE_PWRSW, 0x0060000a},
+ {RES43012_SR_SLEEP, 0x00000000},
+ {RES43012_HT_START, 0x00A00000},
+ {RES43012_HT_AVAIL, 0x00000000},
+ {RES43012_MACPHY_CLK_AVAIL, 0x00000000},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4360_res_updown)[] = {
+ {RES4360_BBPLLPWRSW_PU, 0x00200001}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm43602_res_updown)[] = {
+ {RES43602_SR_SAVE_RESTORE, 0x00190019},
+ {RES43602_XTAL_PU, 0x00280002},
+ {RES43602_RFLDO_PU, 0x00430005}
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm43012a0_res_depend_ds0)[] = {
+ {0, 0, 0, NULL}
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm43602_res_depend)[] = {
+ /* JIRA HW43602-131 : PCIe SERDES dependency problem */
+ {
+ PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_SR_CLK_STABLE) |
+ PMURES_BIT(RES43602_SR_SAVE_RESTORE) | PMURES_BIT(RES43602_SR_SLEEP) |
+ PMURES_BIT(RES43602_LQ_START) | PMURES_BIT(RES43602_LQ_AVAIL) |
+ PMURES_BIT(RES43602_WL_CORE_RDY) | PMURES_BIT(RES43602_ILP_REQ) |
+ PMURES_BIT(RES43602_ALP_AVAIL) | PMURES_BIT(RES43602_RFLDO_PU) |
+ PMURES_BIT(RES43602_HT_START) | PMURES_BIT(RES43602_HT_AVAIL) |
+ PMURES_BIT(RES43602_MACPHY_CLKAVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_SERDES_PU),
+ NULL
+ },
+ /* set rsrc 7, 8, 9, 12, 13, 14 & 17 add (1<<10 | 1<<4 )] */
+ {
+ PMURES_BIT(RES43602_SR_CLK_START) | PMURES_BIT(RES43602_SR_PHY_PWRSW) |
+ PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_SR_CLK_STABLE) |
+ PMURES_BIT(RES43602_SR_SAVE_RESTORE) | PMURES_BIT(RES43602_SR_SLEEP) |
+ PMURES_BIT(RES43602_WL_CORE_RDY),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_XTALLDO_PU) | PMURES_BIT(RES43602_XTAL_PU),
+ NULL
+ },
+ /* set rsrc 11 add (1<<13 | 1<<12 | 1<<9 | 1<<8 | 1<<7 )] */
+ {
+ PMURES_BIT(RES43602_PERST_OVR),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_SR_CLK_START) | PMURES_BIT(RES43602_SR_PHY_PWRSW) |
+ PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_SR_CLK_STABLE) |
+ PMURES_BIT(RES43602_SR_SAVE_RESTORE),
+ NULL
+ },
+ /* set rsrc 19, 21, 22, 23 & 24 remove ~(1<<16 | 1<<15 )] */
+ {
+ PMURES_BIT(RES43602_ALP_AVAIL) | PMURES_BIT(RES43602_RFLDO_PU) |
+ PMURES_BIT(RES43602_HT_START) | PMURES_BIT(RES43602_HT_AVAIL) |
+ PMURES_BIT(RES43602_MACPHY_CLKAVAIL),
+ RES_DEPEND_REMOVE,
+ PMURES_BIT(RES43602_LQ_START) | PMURES_BIT(RES43602_LQ_AVAIL),
+ NULL
+ }
+};
+
+#ifndef BCM_BOOTLOADER
+/** switch off LPLDO for 12x12 package because it can cause a problem when chip is reset */
+static const pmu_res_depend_t BCMATTACHDATA(bcm43602_12x12_res_depend)[] = {
+ /* set rsrc 19, 21, 22, 23 & 24 remove ~(1<<16 | 1<<15 )] */
+ { /* resources no longer dependent on resource that is going to be removed */
+ PMURES_BIT(RES43602_LPLDO_PU) | PMURES_BIT(RES43602_REGULATOR) |
+ PMURES_BIT(RES43602_PMU_SLEEP) | PMURES_BIT(RES43602_RSVD_3) |
+ PMURES_BIT(RES43602_XTALLDO_PU) | PMURES_BIT(RES43602_SERDES_PU) |
+ PMURES_BIT(RES43602_BBPLL_PWRSW_PU) | PMURES_BIT(RES43602_SR_CLK_START) |
+ PMURES_BIT(RES43602_SR_PHY_PWRSW) | PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) |
+ PMURES_BIT(RES43602_XTAL_PU) | PMURES_BIT(RES43602_PERST_OVR) |
+ PMURES_BIT(RES43602_SR_CLK_STABLE) | PMURES_BIT(RES43602_SR_SAVE_RESTORE) |
+ PMURES_BIT(RES43602_SR_SLEEP) | PMURES_BIT(RES43602_LQ_START) |
+ PMURES_BIT(RES43602_LQ_AVAIL) | PMURES_BIT(RES43602_WL_CORE_RDY) |
+ PMURES_BIT(RES43602_ILP_REQ) | PMURES_BIT(RES43602_ALP_AVAIL) |
+ PMURES_BIT(RES43602_RADIO_PU) | PMURES_BIT(RES43602_RFLDO_PU) |
+ PMURES_BIT(RES43602_HT_START) | PMURES_BIT(RES43602_HT_AVAIL) |
+ PMURES_BIT(RES43602_MACPHY_CLKAVAIL) | PMURES_BIT(RES43602_PARLDO_PU) |
+ PMURES_BIT(RES43602_RSVD_26),
+ RES_DEPEND_REMOVE,
+ /* resource that is going to be removed */
+ PMURES_BIT(RES43602_LPLDO_PU),
+ NULL
+ }
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm43602_res_pciewar)[] = {
+ {
+ PMURES_BIT(RES43602_PERST_OVR),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_REGULATOR) |
+ PMURES_BIT(RES43602_PMU_SLEEP) |
+ PMURES_BIT(RES43602_XTALLDO_PU) |
+ PMURES_BIT(RES43602_XTAL_PU) |
+ PMURES_BIT(RES43602_RADIO_PU),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_WL_CORE_RDY),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_LQ_START),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_LQ_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_ALP_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_HT_START),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ },
+ {
+ PMURES_BIT(RES43602_MACPHY_CLKAVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES43602_PERST_OVR),
+ NULL
+ }
+};
+#endif /* BCM_BOOTLOADER */
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4360B1_res_updown)[] = {
+ /* Need to change elements here, should get default values for this - 4360B1 */
+ {RES4360_XTAL_PU, 0x00430002}, /* Changed for 4360B1 */
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4369a0_res_depend)[] = {
+ {PMURES_BIT(RES4369_DUMMY), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4369_ABUCK), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4369_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4369_MISCLDO), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4369_LDO3P3), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4369_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4369_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4369_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL},
+ {PMURES_BIT(RES4369_PWRSW_DIG), RES_DEPEND_SET, 0x060000cf, NULL},
+ {PMURES_BIT(RES4369_SR_DIG), RES_DEPEND_SET, 0x060001cf, NULL},
+ {PMURES_BIT(RES4369_SLEEP_DIG), RES_DEPEND_SET, 0x060003cf, NULL},
+ {PMURES_BIT(RES4369_PWRSW_AUX), RES_DEPEND_SET, 0x040000cf, NULL},
+ {PMURES_BIT(RES4369_SR_AUX), RES_DEPEND_SET, 0x040008cf, NULL},
+ {PMURES_BIT(RES4369_SLEEP_AUX), RES_DEPEND_SET, 0x040018cf, NULL},
+ {PMURES_BIT(RES4369_PWRSW_MAIN), RES_DEPEND_SET, 0x040000cf, NULL},
+ {PMURES_BIT(RES4369_SR_MAIN), RES_DEPEND_SET, 0x040040cf, NULL},
+ {PMURES_BIT(RES4369_SLEEP_MAIN), RES_DEPEND_SET, 0x0400c0cf, NULL},
+ {PMURES_BIT(RES4369_DIG_CORE_RDY), RES_DEPEND_SET, 0x060007cf, NULL},
+ {PMURES_BIT(RES4369_CORE_RDY_AUX), RES_DEPEND_SET, 0x040038cf, NULL},
+ {PMURES_BIT(RES4369_ALP_AVAIL), RES_DEPEND_SET, 0x060207cf, NULL},
+ {PMURES_BIT(RES4369_RADIO_AUX_PU), RES_DEPEND_SET, 0x040438df, NULL},
+ {PMURES_BIT(RES4369_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x041438df, NULL},
+ {PMURES_BIT(RES4369_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0401c0cf, NULL},
+ {PMURES_BIT(RES4369_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0441c0df, NULL},
+ {PMURES_BIT(RES4369_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x04c1c0df, NULL},
+ {PMURES_BIT(RES4369_PCIE_EP_PU), RES_DEPEND_SET, 0x040000cf, NULL},
+ {PMURES_BIT(RES4369_COLD_START_WAIT), RES_DEPEND_SET, 0x0000000f, NULL},
+ {PMURES_BIT(RES4369_ARMHTAVAIL), RES_DEPEND_SET, 0x060a07cf, NULL},
+ {PMURES_BIT(RES4369_HT_AVAIL), RES_DEPEND_SET, 0x060a07cf, NULL},
+ {PMURES_BIT(RES4369_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fdf, NULL},
+ {PMURES_BIT(RES4369_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7df, NULL},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4369a0_res_depend_fastlpo_pcie)[] = {
+ {PMURES_BIT(RES4369_DUMMY), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4369_ABUCK), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4369_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4369_MISCLDO), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4369_LDO3P3), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4369_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4369_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4369_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL},
+ {PMURES_BIT(RES4369_PWRSW_DIG), RES_DEPEND_SET, 0x060000ef, NULL},
+ {PMURES_BIT(RES4369_SR_DIG), RES_DEPEND_SET, 0x060001ef, NULL},
+ {PMURES_BIT(RES4369_SLEEP_DIG), RES_DEPEND_SET, 0x060003ef, NULL},
+ {PMURES_BIT(RES4369_PWRSW_AUX), RES_DEPEND_SET, 0x040000ef, NULL},
+ {PMURES_BIT(RES4369_SR_AUX), RES_DEPEND_SET, 0x040008ef, NULL},
+ {PMURES_BIT(RES4369_SLEEP_AUX), RES_DEPEND_SET, 0x040018ef, NULL},
+ {PMURES_BIT(RES4369_PWRSW_MAIN), RES_DEPEND_SET, 0x040000ef, NULL},
+ {PMURES_BIT(RES4369_SR_MAIN), RES_DEPEND_SET, 0x040040ef, NULL},
+ {PMURES_BIT(RES4369_SLEEP_MAIN), RES_DEPEND_SET, 0x0400c0ef, NULL},
+ {PMURES_BIT(RES4369_DIG_CORE_RDY), RES_DEPEND_SET, 0x060007ef, NULL},
+ {PMURES_BIT(RES4369_CORE_RDY_AUX), RES_DEPEND_SET, 0x040038ef, NULL},
+ {PMURES_BIT(RES4369_ALP_AVAIL), RES_DEPEND_SET, 0x060207ef, NULL},
+ {PMURES_BIT(RES4369_RADIO_AUX_PU), RES_DEPEND_SET, 0x040438ff, NULL},
+ {PMURES_BIT(RES4369_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x041438ff, NULL},
+ {PMURES_BIT(RES4369_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0401c0ef, NULL},
+ {PMURES_BIT(RES4369_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0441c0ff, NULL},
+ {PMURES_BIT(RES4369_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x04c1c0ff, NULL},
+ {PMURES_BIT(RES4369_PCIE_EP_PU), RES_DEPEND_SET, 0x0400002f, NULL},
+ {PMURES_BIT(RES4369_COLD_START_WAIT), RES_DEPEND_SET, 0x0000002f, NULL},
+ {PMURES_BIT(RES4369_ARMHTAVAIL), RES_DEPEND_SET, 0x060a07ef, NULL},
+ {PMURES_BIT(RES4369_HT_AVAIL), RES_DEPEND_SET, 0x060a07ef, NULL},
+ {PMURES_BIT(RES4369_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fff, NULL},
+ {PMURES_BIT(RES4369_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7ff, NULL},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4369a0_res_updown)[] = {
+ {RES4369_DUMMY, 0x00220022},
+ {RES4369_ABUCK, 0x00c80022},
+ {RES4369_PMU_SLEEP, 0x00c80022},
+ {RES4369_MISCLDO, 0x00bd0022},
+ {RES4369_LDO3P3, 0x00bd0022},
+ {RES4369_FAST_LPO_AVAIL, 0x01500022},
+ {RES4369_XTAL_PU, 0x07d00022},
+ {RES4369_XTAL_STABLE, 0x00220022},
+ {RES4369_PWRSW_DIG, 0x02100087},
+ {RES4369_SR_DIG, 0x02000200},
+ {RES4369_SLEEP_DIG, 0x00220022},
+ {RES4369_PWRSW_AUX, 0x03900087},
+ {RES4369_SR_AUX, 0x01cc01cc},
+ {RES4369_SLEEP_AUX, 0x00220022},
+ {RES4369_PWRSW_MAIN, 0x03900087},
+ {RES4369_SR_MAIN, 0x02000200},
+ {RES4369_SLEEP_MAIN, 0x00220022},
+ {RES4369_DIG_CORE_RDY, 0x00220044},
+ {RES4369_CORE_RDY_AUX, 0x00220044},
+ {RES4369_ALP_AVAIL, 0x00220044},
+ {RES4369_RADIO_AUX_PU, 0x006e0022},
+ {RES4369_MINIPMU_AUX_PU, 0x00460022},
+ {RES4369_CORE_RDY_MAIN, 0x00220022},
+ {RES4369_RADIO_MAIN_PU, 0x006e0022},
+ {RES4369_MINIPMU_MAIN_PU, 0x00460022},
+ {RES4369_PCIE_EP_PU, 0x02100087},
+ {RES4369_COLD_START_WAIT, 0x00220022},
+ {RES4369_ARMHTAVAIL, 0x00a80022},
+ {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL},
+ {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022},
+ {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4369a0_res_updown_fastlpo_pmu)[] = {
+ {RES4369_DUMMY, 0x00220022},
+ {RES4369_ABUCK, 0x00c80022},
+ {RES4369_PMU_SLEEP, 0x00c80022},
+ {RES4369_MISCLDO, 0x00bd0022},
+ {RES4369_LDO3P3, 0x00bd0022},
+ {RES4369_FAST_LPO_AVAIL, 0x01500022},
+ {RES4369_XTAL_PU, 0x07d00022},
+ {RES4369_XTAL_STABLE, 0x00220022},
+ {RES4369_PWRSW_DIG, 0x02100087},
+ {RES4369_SR_DIG, 0x02000200},
+ {RES4369_SLEEP_DIG, 0x00220022},
+ {RES4369_PWRSW_AUX, 0x03900087},
+ {RES4369_SR_AUX, 0x01cc01cc},
+ {RES4369_SLEEP_AUX, 0x00220022},
+ {RES4369_PWRSW_MAIN, 0x03900087},
+ {RES4369_SR_MAIN, 0x02000200},
+ {RES4369_SLEEP_MAIN, 0x00220022},
+ {RES4369_DIG_CORE_RDY, 0x00220044},
+ {RES4369_CORE_RDY_AUX, 0x00220044},
+ {RES4369_ALP_AVAIL, 0x00220044},
+ {RES4369_RADIO_AUX_PU, 0x006e0022},
+ {RES4369_MINIPMU_AUX_PU, 0x00460022},
+ {RES4369_CORE_RDY_MAIN, 0x00220022},
+ {RES4369_RADIO_MAIN_PU, 0x006e0022},
+ {RES4369_MINIPMU_MAIN_PU, 0x00460022},
+ {RES4369_PCIE_EP_PU, 0x01200087},
+ {RES4369_COLD_START_WAIT, 0x00220022},
+ {RES4369_ARMHTAVAIL, 0x00a80022},
+ {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL},
+ {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022},
+ {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4369b0_res_updown)[] = {
+ {RES4369_DUMMY, 0x00220022},
+ {RES4369_ABUCK, 0x00c80022},
+ {RES4369_PMU_SLEEP, 0x00c80022},
+ {RES4369_MISCLDO, 0x00bd0022},
+ {RES4369_LDO3P3, 0x01ad0022},
+ {RES4369_FAST_LPO_AVAIL, 0x01500022},
+ {RES4369_XTAL_PU, 0x05dc0022},
+ {RES4369_XTAL_STABLE, 0x00220022},
+ {RES4369_PWRSW_DIG, 0x02100087},
+ {RES4369_SR_DIG, 0x00A000A0},
+ {RES4369_SLEEP_DIG, 0x00220022},
+ {RES4369_PWRSW_AUX, 0x03900087},
+ {RES4369_SR_AUX, 0x01400140},
+ {RES4369_SLEEP_AUX, 0x00220022},
+ {RES4369_PWRSW_MAIN, 0x03900087},
+ {RES4369_SR_MAIN, 0x01A001A0},
+ {RES4369_SLEEP_MAIN, 0x00220022},
+ {RES4369_DIG_CORE_RDY, 0x00220044},
+ {RES4369_CORE_RDY_AUX, 0x00220044},
+ {RES4369_ALP_AVAIL, 0x00220044},
+ {RES4369_RADIO_AUX_PU, 0x006e0022},
+ {RES4369_MINIPMU_AUX_PU, 0x00460022},
+ {RES4369_CORE_RDY_MAIN, 0x00220022},
+ {RES4369_RADIO_MAIN_PU, 0x006e0022},
+ {RES4369_MINIPMU_MAIN_PU, 0x00460022},
+ {RES4369_PCIE_EP_PU, 0x02100087},
+ {RES4369_COLD_START_WAIT, 0x00220022},
+ {RES4369_ARMHTAVAIL, 0x00a80022},
+ {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL},
+ {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022},
+ {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4369b0_res_updown_fastlpo_pmu)[] = {
+ {RES4369_DUMMY, 0x00220022},
+ {RES4369_ABUCK, 0x00c80022},
+ {RES4369_PMU_SLEEP, 0x00c80022},
+ {RES4369_MISCLDO, 0x00bd0022},
+ {RES4369_LDO3P3, 0x01ad0022},
+ {RES4369_FAST_LPO_AVAIL, 0x01500022},
+ {RES4369_XTAL_PU, 0x05dc0022},
+ {RES4369_XTAL_STABLE, 0x00220022},
+ {RES4369_PWRSW_DIG, 0x02100087},
+ {RES4369_SR_DIG, 0x02000200},
+ {RES4369_SLEEP_DIG, 0x00220022},
+ {RES4369_PWRSW_AUX, 0x03900087},
+ {RES4369_SR_AUX, 0x01cc01cc},
+ {RES4369_SLEEP_AUX, 0x00220022},
+ {RES4369_PWRSW_MAIN, 0x03900087},
+ {RES4369_SR_MAIN, 0x02000200},
+ {RES4369_SLEEP_MAIN, 0x00220022},
+ {RES4369_DIG_CORE_RDY, 0x00220044},
+ {RES4369_CORE_RDY_AUX, 0x00220044},
+ {RES4369_ALP_AVAIL, 0x00220044},
+ {RES4369_RADIO_AUX_PU, 0x006e0022},
+ {RES4369_MINIPMU_AUX_PU, 0x00460022},
+ {RES4369_CORE_RDY_MAIN, 0x00220022},
+ {RES4369_RADIO_MAIN_PU, 0x006e0022},
+ {RES4369_MINIPMU_MAIN_PU, 0x00460022},
+ {RES4369_PCIE_EP_PU, 0x01200087},
+ {RES4369_COLD_START_WAIT, 0x00220022},
+ {RES4369_ARMHTAVAIL, 0x00a80022},
+ {RES4369_HT_AVAIL, RES4369_HTAVAIL_VAL},
+ {RES4369_MACPHY_AUX_CLK_AVAIL, 0x00640022},
+ {RES4369_MACPHY_MAIN_CLK_AVAIL, 0x00640022},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4362_res_depend)[] = {
+ {PMURES_BIT(RES4362_DUMMY), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4362_ABUCK), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4362_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4362_MISCLDO_PU), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4362_LDO3P3_PU), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4362_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4362_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4362_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL},
+ {PMURES_BIT(RES4362_PWRSW_DIG), RES_DEPEND_SET, 0x060000cf, NULL},
+ {PMURES_BIT(RES4362_SR_DIG), RES_DEPEND_SET, 0x060001cf, NULL},
+ {PMURES_BIT(RES4362_SLEEP_DIG), RES_DEPEND_SET, 0x060003cf, NULL},
+ {PMURES_BIT(RES4362_PWRSW_AUX), RES_DEPEND_SET, 0x040000cf, NULL},
+ {PMURES_BIT(RES4362_SR_AUX), RES_DEPEND_SET, 0x040008cf, NULL},
+ {PMURES_BIT(RES4362_SLEEP_AUX), RES_DEPEND_SET, 0x040018cf, NULL},
+ {PMURES_BIT(RES4362_PWRSW_MAIN), RES_DEPEND_SET, 0x040000cf, NULL},
+ {PMURES_BIT(RES4362_SR_MAIN), RES_DEPEND_SET, 0x040040cf, NULL},
+ {PMURES_BIT(RES4362_SLEEP_MAIN), RES_DEPEND_SET, 0x0400c0cf, NULL},
+ {PMURES_BIT(RES4362_DIG_CORE_RDY), RES_DEPEND_SET, 0x060007cf, NULL},
+ {PMURES_BIT(RES4362_CORE_RDY_AUX), RES_DEPEND_SET, 0x040038cf, NULL},
+ {PMURES_BIT(RES4362_ALP_AVAIL), RES_DEPEND_SET, 0x060207cf, NULL},
+ {PMURES_BIT(RES4362_RADIO_AUX_PU), RES_DEPEND_SET, 0x040438df, NULL},
+ {PMURES_BIT(RES4362_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x041438df, NULL},
+ {PMURES_BIT(RES4362_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0401c0cf, NULL},
+ {PMURES_BIT(RES4362_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0441c0df, NULL},
+ {PMURES_BIT(RES4362_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x04c1c0df, NULL},
+ {PMURES_BIT(RES4362_PCIE_EP_PU), RES_DEPEND_SET, 0x040000cf, NULL},
+ {PMURES_BIT(RES4362_COLD_START_WAIT), RES_DEPEND_SET, 0x0000000f, NULL},
+ {PMURES_BIT(RES4362_ARMHTAVAIL), RES_DEPEND_SET, 0x060a07cf, NULL},
+ {PMURES_BIT(RES4362_HT_AVAIL), RES_DEPEND_SET, 0x060a07cf, NULL},
+ {PMURES_BIT(RES4362_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fdf, NULL},
+ {PMURES_BIT(RES4362_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7df, NULL},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4362_res_updown)[] = {
+ {RES4362_DUMMY, 0x00220022},
+ {RES4362_ABUCK, 0x00c80022},
+ {RES4362_PMU_SLEEP, 0x00c80022},
+ {RES4362_MISCLDO_PU, 0x00bd0022},
+ {RES4362_LDO3P3_PU, 0x01ad0022},
+ {RES4362_FAST_LPO_AVAIL, 0x01500022},
+ {RES4362_XTAL_PU, 0x05dc0022},
+ {RES4362_XTAL_STABLE, 0x00220022},
+ {RES4362_PWRSW_DIG, 0x009000ca},
+ {RES4362_SR_DIG, 0x00A000A0},
+ {RES4362_SLEEP_DIG, 0x00220022},
+ {RES4362_PWRSW_AUX, 0x039000ca},
+ {RES4362_SR_AUX, 0x01400140},
+ {RES4362_SLEEP_AUX, 0x00220022},
+ {RES4362_PWRSW_MAIN, 0x039000ca},
+ {RES4362_SR_MAIN, 0x01a001a0},
+ {RES4362_SLEEP_MAIN, 0x00220022},
+ {RES4362_DIG_CORE_RDY, 0x00220044},
+ {RES4362_CORE_RDY_AUX, 0x00220044},
+ {RES4362_ALP_AVAIL, 0x00220044},
+ {RES4362_RADIO_AUX_PU, 0x006e0022},
+ {RES4362_MINIPMU_AUX_PU, 0x00460022},
+ {RES4362_CORE_RDY_MAIN, 0x00220022},
+ {RES4362_RADIO_MAIN_PU, 0x006e0022},
+ {RES4362_MINIPMU_MAIN_PU, 0x00460022},
+ {RES4362_PCIE_EP_PU, 0x009000ca},
+ {RES4362_COLD_START_WAIT, 0x00220022},
+ {RES4362_ARMHTAVAIL, 0x00a80022},
+ {RES4362_HT_AVAIL, 0x00a80022},
+ {RES4362_MACPHY_AUX_CLK_AVAIL, 0x00640022},
+ {RES4362_MACPHY_MAIN_CLK_AVAIL, 0x00640022},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4378b0_res_updown)[] = {
+ {RES4378_ABUCK, 0x00c80022},
+ {RES4378_PMU_SLEEP, 0x011c0022},
+ {RES4378_MISC_LDO, 0x00c80022},
+ {RES4378_XTAL_PU, 0x05dc0022},
+ {RES4378_SR_DIG, 0x00700070},
+ {RES4378_SR_AUX, 0x01800180},
+ {RES4378_SR_MAIN, 0x01a001a0},
+ {RES4378_RADIO_AUX_PU, 0x006e0022},
+ {RES4378_MINIPMU_AUX_PU, 0x00460022},
+ {RES4378_RADIO_MAIN_PU, 0x006e0022},
+ {RES4378_MINIPMU_MAIN_PU, 0x00460022},
+ {RES4378_CORE_RDY_CB, 0x00220022},
+#ifdef BCMPCIE_TREFUP_HW_SUPPORT
+ {RES4378_PWRSW_CB, 0x015e00ca},
+#endif
+ {RES4378_MACPHY_AUX_CLK_AVAIL, 0x00640022},
+ {RES4378_MACPHY_MAIN_CLK_AVAIL, 0x00640022},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4378b0_res_depend)[] = {
+ {PMURES_BIT(RES4378_ABUCK), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4378_PMU_SLEEP), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4378_MISC_LDO), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4378_LDO3P3_PU), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4378_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4378_XTAL_PU), RES_DEPEND_SET, 0x00000007, NULL},
+ {PMURES_BIT(RES4378_XTAL_STABLE), RES_DEPEND_SET, 0x00000047, NULL},
+ {PMURES_BIT(RES4378_PWRSW_DIG), RES_DEPEND_SET, 0x060000ef, NULL},
+ {PMURES_BIT(RES4378_SR_DIG), RES_DEPEND_SET, 0x060001ef, NULL},
+ {PMURES_BIT(RES4378_SLEEP_DIG), RES_DEPEND_SET, 0x060003ef, NULL},
+ {PMURES_BIT(RES4378_PWRSW_AUX), RES_DEPEND_SET, 0x060000ef, NULL},
+ {PMURES_BIT(RES4378_SR_AUX), RES_DEPEND_SET, 0x060008ef, NULL},
+ {PMURES_BIT(RES4378_SLEEP_AUX), RES_DEPEND_SET, 0x060018ef, NULL},
+ {PMURES_BIT(RES4378_PWRSW_MAIN), RES_DEPEND_SET, 0x060000ef, NULL},
+ {PMURES_BIT(RES4378_SR_MAIN), RES_DEPEND_SET, 0x060040ef, NULL},
+ {PMURES_BIT(RES4378_SLEEP_MAIN), RES_DEPEND_SET, 0x0600c0ef, NULL},
+ {PMURES_BIT(RES4378_CORE_RDY_DIG), RES_DEPEND_SET, 0x060007ef, NULL},
+ {PMURES_BIT(RES4378_CORE_RDY_AUX), RES_DEPEND_SET, 0x06023fef, NULL},
+ {PMURES_BIT(RES4378_ALP_AVAIL), RES_DEPEND_SET, 0x000000c7, NULL},
+ {PMURES_BIT(RES4378_RADIO_AUX_PU), RES_DEPEND_SET, 0x06063fff, NULL},
+ {PMURES_BIT(RES4378_MINIPMU_AUX_PU), RES_DEPEND_SET, 0x06163fff, NULL},
+ {PMURES_BIT(RES4378_CORE_RDY_MAIN), RES_DEPEND_SET, 0x0603c7ef, NULL},
+ {PMURES_BIT(RES4378_RADIO_MAIN_PU), RES_DEPEND_SET, 0x0643c7ff, NULL},
+ {PMURES_BIT(RES4378_MINIPMU_MAIN_PU), RES_DEPEND_SET, 0x06c3c7ff, NULL},
+#ifdef BCMPCIE_TREFUP_HW_SUPPORT
+ {PMURES_BIT(RES4378_CORE_RDY_CB), RES_DEPEND_SET, 0x0400002f, NULL},
+#else
+ {PMURES_BIT(RES4378_CORE_RDY_CB), RES_DEPEND_SET, 0x040000ef, NULL},
+#endif
+ {PMURES_BIT(RES4378_PWRSW_CB), RES_DEPEND_SET, 0x0000002f, NULL},
+ {PMURES_BIT(RES4378_ARMHTAVAIL), RES_DEPEND_SET, 0x000800c7, NULL},
+ {PMURES_BIT(RES4378_HT_AVAIL), RES_DEPEND_SET, 0x000800c7, NULL},
+ {PMURES_BIT(RES4378_MACPHY_AUX_CLK_AVAIL), RES_DEPEND_SET, 0x163e3fff, NULL},
+ {PMURES_BIT(RES4378_MACPHY_MAIN_CLK_AVAIL), RES_DEPEND_SET, 0x17cbc7ff, NULL},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4387b0_res_updown_qt)[] = {
+ {RES4387_XTAL_PU, 0x012c0033},
+ {RES4387_PWRSW_DIG, 0x38993899},
+ {RES4387_PWRSW_AUX, 0x38993899},
+ {RES4387_PWRSW_SCAN, 0x38993899},
+ {RES4387_PWRSW_MAIN, 0x38993899},
+ {RES4387_CORE_RDY_CB, 0x00960033},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4387b0_res_subst_trans_tmr_qt)[] = {
+ {RES4387_PWRSW_DIG, 0, 0x38993800},
+ {RES4387_PWRSW_DIG, 1, 0x36000600},
+ {RES4387_PWRSW_DIG, 2, 0x01000002},
+
+ {RES4387_PWRSW_AUX, 0, 0x38993800},
+ {RES4387_PWRSW_AUX, 1, 0x36000600},
+ {RES4387_PWRSW_AUX, 2, 0x01000002},
+
+ {RES4387_PWRSW_SCAN, 0, 0x38993800},
+ {RES4387_PWRSW_SCAN, 1, 0x36000600},
+ {RES4387_PWRSW_SCAN, 2, 0x01000002},
+
+ {RES4387_PWRSW_MAIN, 0, 0x38993800},
+ {RES4387_PWRSW_MAIN, 1, 0x36000600},
+ {RES4387_PWRSW_MAIN, 2, 0x01000002},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4387b0_res_updown)[] = {
+ {RES4387_PMU_SLEEP, 0x00960022},
+ {RES4387_MISC_LDO, 0x00320022},
+ {RES4387_XTAL_HQ, 0x00210021},
+ {RES4387_XTAL_PU, 0x03e80033},
+ {RES4387_PWRSW_DIG, 0x04b002bc},
+ {RES4387_PWRSW_AUX, 0x060e03bc},
+ {RES4387_PWRSW_SCAN, 0x060e03bc},
+ {RES4387_PWRSW_MAIN, 0x060e03bc},
+ {RES4387_CORE_RDY_CB, 0x000a0033},
+ {RES4387_PWRSW_CB, 0x006400ca},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4387b0_res_subst_trans_tmr)[] = {
+ {RES4387_PWRSW_DIG, 0, 0x04b002bc},
+ {RES4387_PWRSW_DIG, 1, 0x02500210},
+ {RES4387_PWRSW_DIG, 2, 0x00a00010},
+
+ {RES4387_PWRSW_AUX, 0, 0x060e03ac},
+ {RES4387_PWRSW_AUX, 1, 0x028a0134},
+ {RES4387_PWRSW_AUX, 2, 0x00320002},
+
+ {RES4387_PWRSW_MAIN, 0, 0x060e03b2},
+ {RES4387_PWRSW_MAIN, 1, 0x028a0134},
+ {RES4387_PWRSW_MAIN, 2, 0x00320002},
+
+ {RES4387_PWRSW_SCAN, 0, 0x060e03b2},
+ {RES4387_PWRSW_SCAN, 1, 0x028a0134},
+ {RES4387_PWRSW_SCAN, 2, 0x00320002},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4387b0_res_depend)[] = {
+ {PMURES_BIT(RES4387_DUMMY), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_RESERVED_1), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_PMU_SLEEP), RES_DEPEND_SET, 0x1, NULL},
+ {PMURES_BIT(RES4387_MISC_LDO), RES_DEPEND_SET, 0x5, NULL},
+ {PMURES_BIT(RES4387_RESERVED_4), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_XTAL_HQ), RES_DEPEND_SET, 0xc5, NULL},
+ {PMURES_BIT(RES4387_XTAL_PU), RES_DEPEND_SET, 0x5, NULL},
+ {PMURES_BIT(RES4387_XTAL_STABLE), RES_DEPEND_SET, 0x45, NULL},
+ {PMURES_BIT(RES4387_PWRSW_DIG), RES_DEPEND_SET, 0x060000CD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_BTMAIN), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_BTSC), RES_DEPEND_SET, 0xC5, NULL},
+ {PMURES_BIT(RES4387_PWRSW_AUX), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_PWRSW_SCAN), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060010CD, NULL},
+ {PMURES_BIT(RES4387_PWRSW_MAIN), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_RESERVED_15), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_RESERVED_16), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001CD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209CD, NULL},
+ {PMURES_BIT(RES4387_ALP_AVAIL), RES_DEPEND_SET, 0xC5, NULL},
+ {PMURES_BIT(RES4387_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609CD, NULL},
+ {PMURES_BIT(RES4387_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060030CD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241CD, NULL},
+ {PMURES_BIT(RES4387_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241CD, NULL},
+ {PMURES_BIT(RES4387_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x162830CD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_CB), RES_DEPEND_SET, 0x0400000D, NULL},
+ {PMURES_BIT(RES4387_PWRSW_CB), RES_DEPEND_SET, 0x0000000D, NULL},
+ {PMURES_BIT(RES4387_ARMCLK_AVAIL), RES_DEPEND_SET, 0x000800CD, NULL},
+ {PMURES_BIT(RES4387_HT_AVAIL), RES_DEPEND_SET, 0x000800CD, NULL},
+ {PMURES_BIT(RES4387_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161E09ED, NULL},
+ {PMURES_BIT(RES4387_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16CA41ED, NULL},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4387c0_res_updown_topoff)[] = {
+ {RES4387_PMU_SLEEP, 0x02000022},
+ {RES4387_MISC_LDO, 0x00320022},
+ {RES4387_SERDES_AFE_RET, 0x00010001},
+ {RES4387_XTAL_HQ, 0x00210021},
+ {RES4387_XTAL_PU, 0x03e80033},
+ {RES4387_PWRSW_DIG, 0x00d20102},
+ {RES4387_PWRSW_AUX, 0x01c201e2},
+ {RES4387_PWRSW_SCAN, 0x01020122},
+ {RES4387_PWRSW_MAIN, 0x02220242},
+ {RES4387_CORE_RDY_CB, 0x000a0033},
+ {RES4387_PWRSW_CB, 0x006400ca},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4387c0_res_updown)[] = {
+#ifdef BCM_PMU_FLL_PU_MANAGE
+ {RES4387_FAST_LPO_AVAIL, 0x00960001},
+#endif
+ {RES4387_PMU_SLEEP, 0x00960022},
+ {RES4387_MISC_LDO, 0x00320022},
+ {RES4387_XTAL_HQ, 0x00210021},
+ {RES4387_XTAL_PU, 0x03e80033},
+ {RES4387_PWRSW_DIG, 0x01320172},
+ {RES4387_PWRSW_AUX, 0x01c201e2},
+ {RES4387_PWRSW_SCAN, 0x019201b2},
+ {RES4387_PWRSW_MAIN, 0x02220242},
+ {RES4387_CORE_RDY_CB, 0x000a0033},
+ {RES4387_PWRSW_CB, 0x006400ca},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4387c0_res_subst_trans_tmr)[] = {
+ {RES4387_PWRSW_DIG, 0, 0x01320142},
+ {RES4387_PWRSW_DIG, 1, 0x00e2005a},
+ {RES4387_PWRSW_DIG, 2, 0x00c20052},
+ {RES4387_PWRSW_DIG, 3, 0x00020002},
+
+ {RES4387_PWRSW_AUX, 0, 0x01c201b2},
+ {RES4387_PWRSW_AUX, 1, 0x0172005a},
+ {RES4387_PWRSW_AUX, 2, 0x01520052},
+ {RES4387_PWRSW_AUX, 3, 0x00020002},
+
+ {RES4387_PWRSW_MAIN, 0, 0x02220212},
+ {RES4387_PWRSW_MAIN, 1, 0x01d2005a},
+ {RES4387_PWRSW_MAIN, 2, 0x01b20052},
+ {RES4387_PWRSW_MAIN, 3, 0x00020002},
+
+ {RES4387_PWRSW_SCAN, 0, 0x01920182},
+ {RES4387_PWRSW_SCAN, 1, 0x0142005a},
+ {RES4387_PWRSW_SCAN, 2, 0x01220052},
+ {RES4387_PWRSW_SCAN, 3, 0x00020002},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4387c0_res_depend)[] = {
+ {PMURES_BIT(RES4387_DUMMY), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_PMU_LP), RES_DEPEND_SET, 0x1, NULL},
+ {PMURES_BIT(RES4387_MISC_LDO), RES_DEPEND_SET, 0x5, NULL},
+ {PMURES_BIT(RES4387_SERDES_AFE_RET), RES_DEPEND_SET, 0xD, NULL},
+ {PMURES_BIT(RES4387_XTAL_HQ), RES_DEPEND_SET, 0xC5, NULL},
+ {PMURES_BIT(RES4387_XTAL_PU), RES_DEPEND_SET, 0x5, NULL},
+ {PMURES_BIT(RES4387_XTAL_STABLE), RES_DEPEND_SET, 0x45, NULL},
+ {PMURES_BIT(RES4387_PWRSW_DIG), RES_DEPEND_SET, 0x060000DD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_BTMAIN), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_BTSC), RES_DEPEND_SET, 0xC5, NULL},
+ {PMURES_BIT(RES4387_PWRSW_AUX), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_PWRSW_SCAN), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060010DD, NULL},
+ {PMURES_BIT(RES4387_PWRSW_MAIN), RES_DEPEND_SET, 0xCD, NULL},
+ {PMURES_BIT(RES4387_XTAL_PM_CLK), RES_DEPEND_SET, 0xC5, NULL},
+ {PMURES_BIT(RES4387_RESERVED_16), RES_DEPEND_SET, 0x0, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001DD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209DD, NULL},
+ {PMURES_BIT(RES4387_ALP_AVAIL), RES_DEPEND_SET, 0x80C5, NULL},
+ {PMURES_BIT(RES4387_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609DD, NULL},
+ {PMURES_BIT(RES4387_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060030DD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241DD, NULL},
+ {PMURES_BIT(RES4387_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241DD, NULL},
+ {PMURES_BIT(RES4387_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x1628B0DD, NULL},
+ {PMURES_BIT(RES4387_CORE_RDY_CB), RES_DEPEND_SET, 0x0400001D, NULL},
+ {PMURES_BIT(RES4387_PWRSW_CB), RES_DEPEND_SET, 0x0000001D, NULL},
+ {PMURES_BIT(RES4387_ARMCLK_AVAIL), RES_DEPEND_SET, 0x000880CD, NULL},
+ {PMURES_BIT(RES4387_HT_AVAIL), RES_DEPEND_SET, 0x000880CD, NULL},
+ {PMURES_BIT(RES4387_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161E89FD, NULL},
+ {PMURES_BIT(RES4387_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16CAC1FD, NULL},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4388a0_res_updown_qt)[] = {
+ {RES4388_XTAL_PU, 0x012c0033},
+ {RES4388_PWRSW_DIG, 0x38993899},
+ {RES4388_PWRSW_AUX, 0x38993899},
+ {RES4388_PWRSW_SCAN, 0x38993899},
+ {RES4388_PWRSW_MAIN, 0x38993899},
+ {RES4388_CORE_RDY_CB, 0x00960033},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4388a0_res_subst_trans_tmr_qt)[] = {
+ {RES4388_PWRSW_DIG, 0, 0x38993800},
+ {RES4388_PWRSW_DIG, 1, 0x36c00600},
+ {RES4388_PWRSW_DIG, 2, 0x360005a0},
+ {RES4388_PWRSW_DIG, 3, 0x01000002},
+
+ {RES4388_PWRSW_AUX, 0, 0x38993800},
+ {RES4388_PWRSW_AUX, 1, 0x36c00600},
+ {RES4388_PWRSW_AUX, 2, 0x360005a0},
+ {RES4388_PWRSW_AUX, 3, 0x01000002},
+
+ {RES4388_PWRSW_MAIN, 0, 0x38993800},
+ {RES4388_PWRSW_MAIN, 1, 0x36c00600},
+ {RES4388_PWRSW_MAIN, 2, 0x360005a0},
+ {RES4388_PWRSW_MAIN, 3, 0x01000002},
+
+ {RES4388_PWRSW_SCAN, 0, 0x38993800},
+ {RES4388_PWRSW_SCAN, 1, 0x33c00600},
+ {RES4388_PWRSW_SCAN, 2, 0x330005a0},
+ {RES4388_PWRSW_SCAN, 3, 0x01000002},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4388a0_res_updown)[] = {
+#ifdef BCM_PMU_FLL_PU_MANAGE
+ {RES4388_FAST_LPO_AVAIL, 0x00960001},
+#endif /* BCM_PMU_FLL_PU_MANAGE */
+ {RES4388_PMU_LP, 0x00960022},
+ {RES4388_MISC_LDO, 0x00320022},
+ {RES4388_XTAL_HQ, 0x00210021},
+ {RES4388_XTAL_PU, 0x03e80033},
+ {RES4388_PWRSW_DIG, 0x042c0349},
+ {RES4388_PWRSW_AUX, 0x0740046a},
+ {RES4388_PWRSW_SCAN, 0x03c802e8},
+ {RES4388_PWRSW_MAIN, 0x08080532},
+ {RES4388_CORE_RDY_CB, 0x000a0033},
+ {RES4388_PWRSW_CB, 0x006400ca},
+ {RES4388_MACPHY_CLK_MAIN, 0x00860022},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4388a0_res_subst_trans_tmr)[] = {
+ {RES4388_PWRSW_DIG, 0, 0x0428033c},
+ {RES4388_PWRSW_DIG, 1, 0x028c0210},
+ {RES4388_PWRSW_DIG, 2, 0x01cc01b0},
+ {RES4388_PWRSW_DIG, 3, 0x00a00010},
+
+ {RES4388_PWRSW_AUX, 0, 0x0740045a},
+ {RES4388_PWRSW_AUX, 1, 0x03580202},
+ {RES4388_PWRSW_AUX, 2, 0x02f801a2},
+ {RES4388_PWRSW_AUX, 3, 0x00a00002},
+
+ {RES4388_PWRSW_MAIN, 0, 0x08080522},
+ {RES4388_PWRSW_MAIN, 1, 0x04200202},
+ {RES4388_PWRSW_MAIN, 2, 0x03c001a2},
+ {RES4388_PWRSW_MAIN, 3, 0x00a00002},
+
+ {RES4388_PWRSW_SCAN, 0, 0x03c402d8},
+ {RES4388_PWRSW_SCAN, 1, 0x02280210},
+ {RES4388_PWRSW_SCAN, 2, 0x016801b0},
+ {RES4388_PWRSW_SCAN, 3, 0x00a00010},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4388a0_res_depend)[] = {
+ {PMURES_BIT(RES4388_DUMMY), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4388_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4388_PMU_LP), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4388_MISC_LDO), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4388_SERDES_AFE_RET), RES_DEPEND_SET, 0x0000000d, NULL},
+ {PMURES_BIT(RES4388_XTAL_HQ), RES_DEPEND_SET, 0x000000c5, NULL},
+ {PMURES_BIT(RES4388_XTAL_PU), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4388_XTAL_STABLE), RES_DEPEND_SET, 0x00000045, NULL},
+ {PMURES_BIT(RES4388_PWRSW_DIG), RES_DEPEND_SET, 0x060000dd, NULL},
+ {PMURES_BIT(RES4388_BTMC_TOP_RDY), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4388_BTSC_TOP_RDY), RES_DEPEND_SET, 0x000000c5, NULL},
+ {PMURES_BIT(RES4388_PWRSW_AUX), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4388_PWRSW_SCAN), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4388_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060211dd, NULL},
+ {PMURES_BIT(RES4388_PWRSW_MAIN), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4388_RESERVED_15), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4388_RESERVED_16), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4388_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001dd, NULL},
+ {PMURES_BIT(RES4388_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209dd, NULL},
+ {PMURES_BIT(RES4388_ALP_AVAIL), RES_DEPEND_SET, 0x000000c5, NULL},
+ {PMURES_BIT(RES4388_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609dd, NULL},
+ {PMURES_BIT(RES4388_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060231dd, NULL},
+ {PMURES_BIT(RES4388_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241dd, NULL},
+ {PMURES_BIT(RES4388_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241dd, NULL},
+ {PMURES_BIT(RES4388_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x162a31fd, NULL},
+ {PMURES_BIT(RES4388_CORE_RDY_CB), RES_DEPEND_SET, 0x040000dd, NULL},
+ {PMURES_BIT(RES4388_PWRSW_CB), RES_DEPEND_SET, 0x000000dd, NULL},
+ {PMURES_BIT(RES4388_ARMCLKAVAIL), RES_DEPEND_SET, 0x000800cd, NULL},
+ {PMURES_BIT(RES4388_HT_AVAIL), RES_DEPEND_SET, 0x000800cd, NULL},
+ {PMURES_BIT(RES4388_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161e09fd, NULL},
+ {PMURES_BIT(RES4388_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16ca41fd, NULL},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4389b0_res_updown_qt)[] = {
+ {RES4389_XTAL_PU, 0x012c0033},
+ {RES4389_PWRSW_DIG, 0x38993899},
+ {RES4389_PWRSW_AUX, 0x38993899},
+ {RES4389_PWRSW_SCAN, 0x38993899},
+ {RES4389_PWRSW_MAIN, 0x38993899},
+ {RES4389_CORE_RDY_CB, 0x00960033},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4389b0_res_subst_trans_tmr_qt)[] = {
+ {RES4389_PWRSW_DIG, 0, 0x38993800},
+ {RES4389_PWRSW_DIG, 1, 0x36c00600},
+ {RES4389_PWRSW_DIG, 2, 0x360005a0},
+ {RES4389_PWRSW_DIG, 3, 0x01000002},
+
+ {RES4389_PWRSW_AUX, 0, 0x38993800},
+ {RES4389_PWRSW_AUX, 1, 0x36c00600},
+ {RES4389_PWRSW_AUX, 2, 0x360005a0},
+ {RES4389_PWRSW_AUX, 3, 0x01000002},
+
+ {RES4389_PWRSW_MAIN, 0, 0x38993800},
+ {RES4389_PWRSW_MAIN, 1, 0x36c00600},
+ {RES4389_PWRSW_MAIN, 2, 0x360005a0},
+ {RES4389_PWRSW_MAIN, 3, 0x01000002},
+
+ {RES4389_PWRSW_SCAN, 0, 0x38993800},
+ {RES4389_PWRSW_SCAN, 1, 0x33c00600},
+ {RES4389_PWRSW_SCAN, 2, 0x330005a0},
+ {RES4389_PWRSW_SCAN, 3, 0x01000002},
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4389b0_res_updown)[] = {
+#ifdef BCM_PMU_FLL_PU_MANAGE
+ {RES4389_FAST_LPO_AVAIL, 0x001e0001},
+#endif /* BCM_PMU_FLL_PU_MANAGE */
+ {RES4389_PMU_LP, 0x00960022},
+ {RES4389_MISC_LDO, 0x00320022},
+ {RES4389_XTAL_HQ, 0x00210021},
+ {RES4389_XTAL_PU, 0x03e80033},
+ {RES4389_PWRSW_DIG, 0x042c0349},
+ {RES4389_PWRSW_AUX, 0x0740046a},
+ {RES4389_PWRSW_SCAN, 0x03c802e8},
+ {RES4389_PWRSW_MAIN, 0x08080532},
+ {RES4389_CORE_RDY_CB, 0x000a0033},
+ {RES4389_PWRSW_CB, 0x006400ca},
+ {RES4389_MACPHY_CLK_MAIN, 0x00860022},
+};
+
+static const pmu_res_subst_trans_tmr_t BCMATTACHDATA(bcm4389b0_res_subst_trans_tmr)[] = {
+ {RES4389_PWRSW_DIG, 0, 0x0428033c},
+ {RES4389_PWRSW_DIG, 1, 0x028c0210},
+ {RES4389_PWRSW_DIG, 2, 0x01cc01b0},
+ {RES4389_PWRSW_DIG, 3, 0x00a00010},
+
+ {RES4389_PWRSW_AUX, 0, 0x0740045a},
+ {RES4389_PWRSW_AUX, 1, 0x03580202},
+ {RES4389_PWRSW_AUX, 2, 0x02f801a2},
+ {RES4389_PWRSW_AUX, 3, 0x00a00002},
+
+ {RES4389_PWRSW_MAIN, 0, 0x08080522},
+ {RES4389_PWRSW_MAIN, 1, 0x04200202},
+ {RES4389_PWRSW_MAIN, 2, 0x03c001a2},
+ {RES4389_PWRSW_MAIN, 3, 0x00a00002},
+
+ {RES4389_PWRSW_SCAN, 0, 0x03c402d8},
+ {RES4389_PWRSW_SCAN, 1, 0x02280210},
+ {RES4389_PWRSW_SCAN, 2, 0x016801b0},
+ {RES4389_PWRSW_SCAN, 3, 0x00a00010},
+};
+
+static pmu_res_depend_t BCMATTACHDATA(bcm4389b0_res_depend)[] = {
+ {PMURES_BIT(RES4389_DUMMY), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4389_FAST_LPO_AVAIL), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4389_PMU_LP), RES_DEPEND_SET, 0x00000001, NULL},
+ {PMURES_BIT(RES4389_MISC_LDO), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4389_SERDES_AFE_RET), RES_DEPEND_SET, 0x0000000d, NULL},
+ {PMURES_BIT(RES4389_XTAL_HQ), RES_DEPEND_SET, 0x000000c5, NULL},
+ {PMURES_BIT(RES4389_XTAL_PU), RES_DEPEND_SET, 0x00000005, NULL},
+ {PMURES_BIT(RES4389_XTAL_STABLE), RES_DEPEND_SET, 0x00000045, NULL},
+ {PMURES_BIT(RES4389_PWRSW_DIG), RES_DEPEND_SET, 0x060000dd, NULL},
+ {PMURES_BIT(RES4389_BTMC_TOP_RDY), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4389_BTSC_TOP_RDY), RES_DEPEND_SET, 0x000000c5, NULL},
+ {PMURES_BIT(RES4389_PWRSW_AUX), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4389_PWRSW_SCAN), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4389_CORE_RDY_SCAN), RES_DEPEND_SET, 0x060211dd, NULL},
+ {PMURES_BIT(RES4389_PWRSW_MAIN), RES_DEPEND_SET, 0x000000cd, NULL},
+ {PMURES_BIT(RES4389_RESERVED_15), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4389_RESERVED_16), RES_DEPEND_SET, 0x00000000, NULL},
+ {PMURES_BIT(RES4389_CORE_RDY_DIG), RES_DEPEND_SET, 0x060001dd, NULL},
+ {PMURES_BIT(RES4389_CORE_RDY_AUX), RES_DEPEND_SET, 0x060209dd, NULL},
+ {PMURES_BIT(RES4389_ALP_AVAIL), RES_DEPEND_SET, 0x000000c5, NULL},
+ {PMURES_BIT(RES4389_RADIO_PU_AUX), RES_DEPEND_SET, 0x060609dd, NULL},
+ {PMURES_BIT(RES4389_RADIO_PU_SCAN), RES_DEPEND_SET, 0x060231dd, NULL},
+ {PMURES_BIT(RES4389_CORE_RDY_MAIN), RES_DEPEND_SET, 0x060241dd, NULL},
+ {PMURES_BIT(RES4389_RADIO_PU_MAIN), RES_DEPEND_SET, 0x064241dd, NULL},
+ {PMURES_BIT(RES4389_MACPHY_CLK_SCAN), RES_DEPEND_SET, 0x162a31fd, NULL},
+ {PMURES_BIT(RES4389_CORE_RDY_CB), RES_DEPEND_SET, 0x040000dd, NULL},
+ {PMURES_BIT(RES4389_PWRSW_CB), RES_DEPEND_SET, 0x000000dd, NULL},
+ {PMURES_BIT(RES4389_ARMCLKAVAIL), RES_DEPEND_SET, 0x000800cd, NULL},
+ {PMURES_BIT(RES4389_HT_AVAIL), RES_DEPEND_SET, 0x000800cd, NULL},
+ {PMURES_BIT(RES4389_MACPHY_CLK_AUX), RES_DEPEND_SET, 0x161e09fd, NULL},
+ {PMURES_BIT(RES4389_MACPHY_CLK_MAIN), RES_DEPEND_SET, 0x16ca41fd, NULL},
+};
+
+/** To enable avb timer clock feature */
+void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag)
+{
+ uint32 min_mask = 0, max_mask = 0;
+ pmuregs_t *pmu;
+ uint origidx;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID || CHIPID(sih->chip) == BCM43460_CHIP_ID) &&
+ CHIPREV(sih->chiprev) >= 0x3) {
+ int cst_ht = CST4360_RSRC_INIT_MODE(sih->chipst) & 0x1;
+ if (cst_ht == 0) {
+ /* Enable the AVB timers for proxd feature */
+ min_mask = R_REG(osh, &pmu->min_res_mask);
+ max_mask = R_REG(osh, &pmu->max_res_mask);
+ if (set_flag) {
+ max_mask |= PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU);
+ max_mask |= PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL);
+ min_mask |= PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU);
+ min_mask |= PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL);
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ W_REG(osh, &pmu->max_res_mask, max_mask);
+ } else {
+ AND_REG(osh, &pmu->min_res_mask,
+ ~PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU));
+ AND_REG(osh, &pmu->min_res_mask,
+ ~PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL));
+ AND_REG(osh, &pmu->max_res_mask,
+ ~PMURES_BIT(RES4360_AVB_PLL_PWRSW_PU));
+ AND_REG(osh, &pmu->max_res_mask,
+ ~PMURES_BIT(RES4360_PCIE_TL_CLK_AVAIL));
+ }
+ /* Need to wait 100 millisecond for the uptime */
+ OSL_DELAY(100);
+ }
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/**
+ * Determines min/max rsrc masks. Normally hardware contains these masks, and software reads the
+ * masks from hardware. Note that masks are sometimes dependent on chip straps.
+ */
+static void
+si_pmu_res_masks(si_t *sih, uint32 *pmin, uint32 *pmax)
+{
+ uint32 min_mask = 0, max_mask = 0;
+
+ /* determine min/max rsrc masks */
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ if (CHIPREV(sih->chiprev) >= 0x4) {
+ min_mask = 0x103;
+ }
+ /* Continue - Don't break */
+ case BCM43460_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ if (CHIPREV(sih->chiprev) >= 0x3) {
+ /* PR 110203 */
+ int cst_ht = CST4360_RSRC_INIT_MODE(sih->chipst) & 0x1;
+ if (cst_ht == 0)
+ max_mask = 0x1ff;
+ }
+ break;
+
+ CASE_BCM43602_CHIP:
+ /* as a bare minimum, have ALP clock running */
+ min_mask = PMURES_BIT(RES43602_LPLDO_PU) | PMURES_BIT(RES43602_REGULATOR) |
+ PMURES_BIT(RES43602_PMU_SLEEP) | PMURES_BIT(RES43602_XTALLDO_PU) |
+ PMURES_BIT(RES43602_SERDES_PU) | PMURES_BIT(RES43602_BBPLL_PWRSW_PU) |
+ PMURES_BIT(RES43602_SR_CLK_START) | PMURES_BIT(RES43602_SR_PHY_PWRSW) |
+ PMURES_BIT(RES43602_SR_SUBCORE_PWRSW) | PMURES_BIT(RES43602_XTAL_PU) |
+ PMURES_BIT(RES43602_PERST_OVR) | PMURES_BIT(RES43602_SR_CLK_STABLE) |
+ PMURES_BIT(RES43602_SR_SAVE_RESTORE) | PMURES_BIT(RES43602_SR_SLEEP) |
+ PMURES_BIT(RES43602_LQ_START) | PMURES_BIT(RES43602_LQ_AVAIL) |
+ PMURES_BIT(RES43602_WL_CORE_RDY) |
+ PMURES_BIT(RES43602_ALP_AVAIL);
+
+ if (sih->chippkg == BCM43602_12x12_PKG_ID) /* LPLDO WAR */
+ min_mask &= ~PMURES_BIT(RES43602_LPLDO_PU);
+
+ max_mask = (1<<3) | min_mask | PMURES_BIT(RES43602_RADIO_PU) |
+ PMURES_BIT(RES43602_RFLDO_PU) | PMURES_BIT(RES43602_HT_START) |
+ PMURES_BIT(RES43602_HT_AVAIL) | PMURES_BIT(RES43602_MACPHY_CLKAVAIL);
+
+#if defined(SAVERESTORE)
+ /* min_mask is updated after SR code is downloaded to txfifo */
+ if (SR_ENAB() && sr_isenab(sih)) {
+ ASSERT(sih->chippkg != BCM43602_12x12_PKG_ID);
+ min_mask = PMURES_BIT(RES43602_LPLDO_PU);
+ }
+#endif /* SAVERESTORE */
+ break;
+
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ /* Set the bits for all resources in the max mask except for the SR Engine */
+ max_mask = 0x7FFFFFFF;
+ break;
+ case BCM4369_CHIP_GRPID:
+ min_mask = 0x64fffff;
+#if defined(SAVERESTORE)
+ if (SR_ENAB() && sr_isenab(sih)) {
+ if (si_get_nvram_rfldo3p3_war(sih)) {
+ min_mask = 0x0000011;
+ } else {
+ min_mask = 0x0000001;
+ }
+ }
+#endif /* SAVERESTORE */
+ max_mask = 0x7FFFFFFF;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ min_mask = 0x064fffff;
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ if (!sr_isenab(sih)) {
+ min_mask = 0x064fffff;
+ } else {
+ min_mask = PMURES_BIT(RES4378_DUMMY);
+ }
+ }
+#endif /* SAVERESTORE */
+ max_mask = 0x7FFFFFFF;
+ break;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ min_mask = 0x64fffff;
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ if (sr_isenab(sih)) {
+ min_mask = PMURES_BIT(RES4387_DUMMY);
+ } else {
+ min_mask = pmu_corereg(sih, SI_CC_IDX, min_res_mask, 0, 0);
+ if (PMU_FLL_PU_ENAB()) {
+ min_mask |= PMURES_BIT(RES4387_FAST_LPO_AVAIL) |
+ PMURES_BIT(RES4387_PMU_LP);
+ }
+ }
+ }
+#endif /* SAVERESTORE */
+
+ max_mask = 0x7FFFFFFF;
+ break;
+
+ case BCM4388_CHIP_GRPID:
+ min_mask = 0x64fffff;
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ if (sr_isenab(sih)) {
+ min_mask = PMURES_BIT(RES4388_DUMMY);
+ } else {
+ min_mask = pmu_corereg(sih, SI_CC_IDX, min_res_mask, 0, 0);
+ if (PMU_FLL_PU_ENAB()) {
+ min_mask |= PMURES_BIT(RES4388_FAST_LPO_AVAIL) |
+ PMURES_BIT(RES4388_PMU_LP);
+ }
+ }
+ }
+#endif /* SAVERESTORE */
+ max_mask = 0x7FFFFFFF;
+ break;
+
+ case BCM4389_CHIP_GRPID:
+ /*
+ * check later if this can be replaced with chip default value read from
+ * PMU register - min_res_mask and remove the code in SR_ENAB() portion
+ */
+ min_mask = 0x64fffff;
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ if (sr_isenab(sih)) {
+ min_mask = PMURES_BIT(RES4389_DUMMY);
+ } else {
+ min_mask = pmu_corereg(sih, SI_CC_IDX, min_res_mask, 0, 0);
+ if (PMU_FLL_PU_ENAB()) {
+ min_mask |= PMURES_BIT(RES4389_FAST_LPO_AVAIL) |
+ PMURES_BIT(RES4389_PMU_LP);
+ }
+ }
+ }
+#endif /* SAVERESTORE */
+ max_mask = 0x7FFFFFFF;
+ break;
+
+ case BCM4397_CHIP_GRPID:
+ min_mask = 0x64fffff;
+ max_mask = 0x7FFFFFFF;
+ break;
+
+ case BCM4362_CHIP_GRPID:
+ min_mask = 0x64fffff;
+#if defined(SAVERESTORE)
+ if (SR_ENAB() && sr_isenab(sih)) {
+ min_mask = (PMURES_BIT(RES4362_DUMMY));
+ }
+#endif /* SAVERESTORE */
+ max_mask = 0x7FFFFFFF;
+ break;
+
+ default:
+ PMU_ERROR(("MIN and MAX mask is not programmed\n"));
+ break;
+ }
+
+ if (!FWSIGN_ENAB()) {
+ /* nvram override */
+ si_nvram_res_masks(sih, &min_mask, &max_mask);
+ }
+
+ *pmin = min_mask;
+ *pmax = max_mask;
+} /* si_pmu_res_masks */
+
+/**
+ * resource dependencies can change because of the host interface
+ * selected, to work around an issue, or for more optimal power
+ * savings after tape out
+ */
+#ifdef DUAL_PMU_SEQUENCE
+static void
+si_pmu_resdeptbl_upd(si_t *sih, osl_t *osh, pmuregs_t *pmu,
+ const pmu_res_depend_t *restable, uint tablesz)
+#else
+static void
+BCMATTACHFN(si_pmu_resdeptbl_upd)(si_t *sih, osl_t *osh, pmuregs_t *pmu,
+ const pmu_res_depend_t *restable, uint tablesz)
+#endif /* DUAL_PMU_SEQUENCE */
+{
+ uint i, rsrcs;
+
+ if (tablesz == 0)
+ return;
+
+ ASSERT(restable != NULL);
+
+ rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ /* Program resource dependencies table */
+ while (tablesz--) {
+ if (restable[tablesz].filter != NULL &&
+ !(restable[tablesz].filter)(sih))
+ continue;
+ for (i = 0; i < rsrcs; i ++) {
+ if ((restable[tablesz].res_mask &
+ PMURES_BIT(i)) == 0)
+ continue;
+ W_REG(osh, &pmu->res_table_sel, i);
+ switch (restable[tablesz].action) {
+ case RES_DEPEND_SET:
+ PMU_MSG(("Changing rsrc %d res_dep_mask to 0x%x\n", i,
+ restable[tablesz].depend_mask));
+ W_REG(osh, &pmu->res_dep_mask,
+ restable[tablesz].depend_mask);
+ break;
+ case RES_DEPEND_ADD:
+ PMU_MSG(("Adding 0x%x to rsrc %d res_dep_mask\n",
+ restable[tablesz].depend_mask, i));
+ OR_REG(osh, &pmu->res_dep_mask,
+ restable[tablesz].depend_mask);
+ break;
+ case RES_DEPEND_REMOVE:
+ PMU_MSG(("Removing 0x%x from rsrc %d res_dep_mask\n",
+ restable[tablesz].depend_mask, i));
+ AND_REG(osh, &pmu->res_dep_mask,
+ ~restable[tablesz].depend_mask);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ }
+} /* si_pmu_resdeptbl_upd */
+
+static void
+BCMATTACHFN(si_pmu_dep_table_fll_pu_fixup)(si_t *sih, osl_t *osh,
+ pmu_res_depend_t *pmu_res_depend_table, uint pmu_res_depend_table_sz)
+{
+ uint i;
+
+ if (!PMU_FLL_PU_ENAB()) {
+ return;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_GRPID:
+ for (i = 0; i < pmu_res_depend_table_sz; i ++) {
+ if (pmu_res_depend_table[i].res_mask ==
+ PMURES_BIT(RES4387_FAST_LPO_AVAIL)) {
+ pmu_res_depend_table[i].depend_mask = PMURES_BIT(RES4387_DUMMY) |
+ PMURES_BIT(RES4387_PMU_LP);
+ } else if ((pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4387_DUMMY)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4387_PMU_LP)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4387_RESERVED_16))) {
+ pmu_res_depend_table[i].depend_mask |=
+ PMURES_BIT(RES4387_FAST_LPO_AVAIL);
+ }
+ }
+ break;
+ case BCM4388_CHIP_GRPID:
+ for (i = 0; i < pmu_res_depend_table_sz; i ++) {
+ if (pmu_res_depend_table[i].res_mask ==
+ PMURES_BIT(RES4388_FAST_LPO_AVAIL)) {
+ pmu_res_depend_table[i].depend_mask = PMURES_BIT(RES4388_DUMMY) |
+ PMURES_BIT(RES4388_PMU_LP);
+ } else if ((pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4388_DUMMY)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4388_PMU_LP)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4388_RESERVED_15)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4388_RESERVED_16))) {
+ pmu_res_depend_table[i].depend_mask |=
+ PMURES_BIT(RES4388_FAST_LPO_AVAIL);
+ }
+ }
+ break;
+ case BCM4389_CHIP_GRPID:
+ for (i = 0; i < pmu_res_depend_table_sz; i ++) {
+ if (pmu_res_depend_table[i].res_mask ==
+ PMURES_BIT(RES4389_FAST_LPO_AVAIL)) {
+ pmu_res_depend_table[i].depend_mask = PMURES_BIT(RES4389_DUMMY) |
+ PMURES_BIT(RES4389_PMU_LP);
+ } else if ((pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4389_DUMMY)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4389_PMU_LP)) &&
+ (pmu_res_depend_table[i].res_mask !=
+ PMURES_BIT(RES4389_RESERVED_16))) {
+ pmu_res_depend_table[i].depend_mask |=
+ PMURES_BIT(RES4389_FAST_LPO_AVAIL);
+ }
+ }
+ break;
+ default:
+ PMU_MSG(("si_pmu_dep_table_fll_pu_fixup: unsupported chip!\n"));
+ ASSERT(0);
+ break;
+ }
+}
+
+/** Initialize PMU hardware resources. */
+void
+BCMATTACHFN(si_pmu_res_init)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ const pmu_res_updown_t *pmu_res_updown_table = NULL;
+ uint pmu_res_updown_table_sz = 0;
+ const pmu_res_subst_trans_tmr_t *pmu_res_subst_trans_tmr_table = NULL;
+ uint pmu_res_subst_trans_tmr_table_sz = 0;
+ pmu_res_depend_t *pmu_res_depend_table = NULL;
+ uint pmu_res_depend_table_sz = 0;
+#ifndef BCM_BOOTLOADER
+ const pmu_res_depend_t *pmu_res_depend_pciewar_table[2] = {NULL, NULL};
+ uint pmu_res_depend_pciewar_table_sz[2] = {0, 0};
+#endif /* BCM_BOOTLOADER */
+ uint32 min_mask = 0, max_mask = 0;
+ char name[8];
+ const char *val;
+ uint i, rsrcs;
+ uint8 fastlpo_dis = fastlpo_dis_get();
+ uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get();
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ /*
+ * Hardware contains the resource updown and dependency tables. Only if a chip has a
+ * hardware problem, software tables can be used to override hardware tables.
+ */
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ if (CHIPREV(sih->chiprev) < 4) {
+ pmu_res_updown_table = bcm4360_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4360_res_updown);
+ } else {
+ /* FOR 4360B1 */
+ pmu_res_updown_table = bcm4360B1_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4360B1_res_updown);
+ }
+ break;
+ CASE_BCM43602_CHIP:
+ pmu_res_updown_table = bcm43602_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm43602_res_updown);
+ pmu_res_depend_table = bcm43602_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm43602_res_depend);
+#ifndef BCM_BOOTLOADER
+ pmu_res_depend_pciewar_table[0] = bcm43602_res_pciewar;
+ pmu_res_depend_pciewar_table_sz[0] = ARRAYSIZE(bcm43602_res_pciewar);
+ if (sih->chippkg == BCM43602_12x12_PKG_ID) { /* LPLDO WAR */
+ pmu_res_depend_pciewar_table[1] = bcm43602_12x12_res_depend;
+ pmu_res_depend_pciewar_table_sz[1] = ARRAYSIZE(bcm43602_12x12_res_depend);
+ }
+#endif /* !BCM_BOOTLOADER */
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ pmu_res_updown_table = bcm43012a0_res_updown_ds0;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds0);
+ pmu_res_depend_table = bcm43012a0_res_depend_ds0;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm43012a0_res_depend_ds0);
+ break;
+ case BCM4369_CHIP_GRPID:
+ /* fastlpo_dis is override for PMU1M, updown times are updated accordingly
+ * if PMU 1M is enabled only Resource Up/Down times are changed
+ * Also the Up/Down times are different for A0 and B0
+ */
+ if (fastlpo_dis) {
+ /* Only Resource Up/Down times are different b/w A0 and B0 */
+ if (CHIPREV(sih->chiprev) == 0) {
+ pmu_res_updown_table = bcm4369a0_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4369a0_res_updown);
+ } else {
+ pmu_res_updown_table = bcm4369b0_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4369b0_res_updown);
+ }
+ } else {
+ if (fastlpo_pcie_dis) {
+ PMU_ERROR(("INVALID: PCIE 1MHz disabled but PMU 1MHz enabled\n"));
+ ASSERT(0);
+ }
+ /* Only Resource Up/Down times are different b/w A0 and B0 */
+ if (CHIPREV(sih->chiprev) == 0) {
+ pmu_res_updown_table = bcm4369a0_res_updown_fastlpo_pmu;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4369a0_res_updown_fastlpo_pmu);
+ } else {
+ pmu_res_updown_table = bcm4369b0_res_updown_fastlpo_pmu;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4369b0_res_updown_fastlpo_pmu);
+ }
+ }
+
+ /* fastlpo_pcie_dis is override for PCIE1M, resource dependencies are updated
+ * if pcie 1M is enabled resource dependency are different
+ * for A0 and B0 chiprev there is no resource dependency change
+ */
+ if (fastlpo_pcie_dis) {
+ pmu_res_depend_table = bcm4369a0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4369a0_res_depend);
+ } else {
+ pmu_res_depend_table = bcm4369a0_res_depend_fastlpo_pcie;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4369a0_res_depend_fastlpo_pcie);
+ }
+ break;
+
+ case BCM4362_CHIP_GRPID:
+ pmu_res_updown_table = bcm4362_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4362_res_updown);
+
+ GCI_REG_NEW(sih, bt_smem_control1, (0xFF<<16), 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL14,
+ (PMU_CC14_MAIN_VDDB2VDDRET_UP_DLY_MASK |
+ PMU_CC14_MAIN_VDDB2VDD_UP_DLY_MASK |
+ PMU_CC14_AUX_VDDB2VDDRET_UP_DLY_MASK |
+ PMU_CC14_AUX_VDDB2VDD_UP_DLY_MASK |
+ PMU_CC14_PCIE_VDDB2VDDRET_UP_DLY_MASK |
+ PMU_CC14_PCIE_VDDB2VDD_UP_DLY_MASK), 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL15,
+ (PMU_CC15_PCIE_VDDB_CURRENT_LIMIT_DELAY_MASK |
+ PMU_CC15_PCIE_VDDB_FORCE_RPS_PWROK_DELAY_MASK), 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL10,
+ (PMU_CC10_PCIE_RESET0_CNT_SLOW_MASK |
+ PMU_CC10_PCIE_RESET1_CNT_SLOW_MASK), 0);
+
+ GCI_REG_NEW(sih, bt_smem_control0, (0xF<<16), 0);
+ GCI_REG_NEW(sih, bt_smem_control0, (0xF<<24), 0);
+
+ pmu_res_depend_table = bcm4362_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4362_res_depend);
+ break;
+
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ if (SR_ENAB()) {
+ pmu_res_updown_table = bcm4378b0_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4378b0_res_updown);
+ pmu_res_depend_table = bcm4378b0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4378b0_res_depend);
+ }
+ break;
+
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ if (SR_ENAB()) {
+ if (ISSIM_ENAB(sih)) {
+ if (PMUREV(sih->pmurev) == 39) {
+ pmu_res_updown_table = bcm4387c0_res_updown;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4387c0_res_updown);
+
+ pmu_res_subst_trans_tmr_table =
+ bcm4387c0_res_subst_trans_tmr;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4387c0_res_subst_trans_tmr);
+
+ pmu_res_depend_table = bcm4387c0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4387c0_res_depend);
+ } else {
+ pmu_res_updown_table = bcm4387b0_res_updown_qt;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4387b0_res_updown_qt);
+
+ pmu_res_subst_trans_tmr_table =
+ bcm4387b0_res_subst_trans_tmr_qt;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4387b0_res_subst_trans_tmr_qt);
+
+ pmu_res_depend_table = bcm4387b0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4387b0_res_depend);
+ }
+ } else {
+ if (PMUREV(sih->pmurev) == 39) {
+ if (BCMSRTOPOFF_ENAB()) {
+ pmu_res_updown_table = bcm4387c0_res_updown_topoff;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4387c0_res_updown_topoff);
+ } else {
+ pmu_res_updown_table = bcm4387c0_res_updown;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4387c0_res_updown);
+ }
+
+ pmu_res_subst_trans_tmr_table =
+ bcm4387c0_res_subst_trans_tmr;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4387c0_res_subst_trans_tmr);
+
+ pmu_res_depend_table = bcm4387c0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4387c0_res_depend);
+
+ if (PMU_FLL_PU_ENAB()) {
+ si_pmu_dep_table_fll_pu_fixup(sih, osh,
+ pmu_res_depend_table,
+ pmu_res_depend_table_sz);
+ }
+ } else {
+ pmu_res_updown_table = bcm4387b0_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4387b0_res_updown);
+
+ pmu_res_subst_trans_tmr_table =
+ bcm4387b0_res_subst_trans_tmr;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4387b0_res_subst_trans_tmr);
+
+ pmu_res_depend_table = bcm4387b0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4387b0_res_depend);
+ }
+ }
+ }
+ break;
+
+ case BCM4388_CHIP_GRPID:
+ if (SR_ENAB()) {
+ if (ISSIM_ENAB(sih)) {
+ pmu_res_updown_table = bcm4388a0_res_updown_qt;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4388a0_res_updown_qt);
+
+ pmu_res_subst_trans_tmr_table = bcm4388a0_res_subst_trans_tmr_qt;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4388a0_res_subst_trans_tmr_qt);
+ } else {
+ pmu_res_updown_table = bcm4388a0_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4388a0_res_updown);
+
+ pmu_res_subst_trans_tmr_table = bcm4388a0_res_subst_trans_tmr;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4388a0_res_subst_trans_tmr);
+ }
+
+ pmu_res_depend_table = bcm4388a0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4388a0_res_depend);
+
+ if (PMU_FLL_PU_ENAB()) {
+ si_pmu_dep_table_fll_pu_fixup(sih, osh,
+ pmu_res_depend_table,
+ pmu_res_depend_table_sz);
+ }
+ }
+ break;
+
+ case BCM4389_CHIP_GRPID:
+ if (SR_ENAB()) {
+ if (ISSIM_ENAB(sih)) {
+ pmu_res_updown_table = bcm4389b0_res_updown_qt;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4389b0_res_updown_qt);
+
+ pmu_res_subst_trans_tmr_table = bcm4389b0_res_subst_trans_tmr_qt;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4389b0_res_subst_trans_tmr_qt);
+ } else {
+ pmu_res_updown_table = bcm4389b0_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4389b0_res_updown);
+
+ pmu_res_subst_trans_tmr_table = bcm4389b0_res_subst_trans_tmr;
+ pmu_res_subst_trans_tmr_table_sz =
+ ARRAYSIZE(bcm4389b0_res_subst_trans_tmr);
+ }
+
+ pmu_res_depend_table = bcm4389b0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4389b0_res_depend);
+ if (PMU_FLL_PU_ENAB()) {
+ si_pmu_dep_table_fll_pu_fixup(sih, osh,
+ pmu_res_depend_table, pmu_res_depend_table_sz);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Program up/down timers */
+ while (pmu_res_updown_table_sz--) {
+ ASSERT(pmu_res_updown_table != NULL);
+ PMU_MSG(("Changing rsrc %d res_updn_timer to 0x%x\n",
+ pmu_res_updown_table[pmu_res_updown_table_sz].resnum,
+ pmu_res_updown_table[pmu_res_updown_table_sz].updown));
+ W_REG(osh, &pmu->res_table_sel,
+ pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
+ W_REG(osh, &pmu->res_updn_timer,
+ pmu_res_updown_table[pmu_res_updown_table_sz].updown);
+ }
+
+ if (!FWSIGN_ENAB()) {
+ /* # resources */
+ rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+
+ /* Apply nvram overrides to up/down timers */
+ for (i = 0; i < rsrcs; i ++) {
+ uint32 r_val;
+ snprintf(name, sizeof(name), rstr_rDt, i);
+ if ((val = getvar(NULL, name)) == NULL)
+ continue;
+ r_val = (uint32)bcm_strtoul(val, NULL, 0);
+ /* PMUrev = 13, pmu resource updown times are 12 bits(0:11 DT, 16:27 UT) */
+ /* OLD values are 8 bits for UT/DT, handle the old nvram format */
+ if (PMUREV(sih->pmurev) >= 13) {
+ if (r_val < (1 << 16)) {
+ uint16 up_time = (r_val >> 8) & 0xFF;
+ r_val &= 0xFF;
+ r_val |= (up_time << 16);
+ }
+ }
+ PMU_MSG(("Applying %s=%s to rsrc %d res_updn_timer\n", name, val, i));
+ W_REG(osh, &pmu->res_table_sel, (uint32)i);
+ W_REG(osh, &pmu->res_updn_timer, r_val);
+ }
+ }
+
+ /* Program Rsrc Substate Transition Timer */
+ while (pmu_res_subst_trans_tmr_table_sz --) {
+ ASSERT(pmu_res_subst_trans_tmr_table != NULL);
+ PMU_MSG(("Changing rsrc %d substate %d res_subst_trans_timer to 0x%x\n",
+ pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].resnum,
+ pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].substate,
+ pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].tmr));
+ W_REG(osh, &pmu->res_table_sel,
+ pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].resnum |
+ (pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].substate
+ << PMU_RES_SUBSTATE_SHIFT));
+ W_REG(osh, &pmu->rsrc_substate_trans_tmr,
+ pmu_res_subst_trans_tmr_table[pmu_res_subst_trans_tmr_table_sz].tmr);
+ }
+
+ /* Program resource dependencies table */
+ si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table, pmu_res_depend_table_sz);
+
+ if (!FWSIGN_ENAB()) {
+ /* Apply nvram overrides to dependencies masks */
+ for (i = 0; i < rsrcs; i ++) {
+ snprintf(name, sizeof(name), rstr_rDd, i);
+ if ((val = getvar(NULL, name)) == NULL)
+ continue;
+ PMU_MSG(("Applying %s=%s to rsrc %d res_dep_mask\n", name, val, i));
+ W_REG(osh, &pmu->res_table_sel, (uint32)i);
+ W_REG(osh, &pmu->res_dep_mask, (uint32)bcm_strtoul(val, NULL, 0));
+ }
+ }
+
+#if !defined(BCM_BOOTLOADER)
+ /* Initial any chip interface dependent PMU rsrc by looking at the
+ * chipstatus register to figure the selected interface
+ */
+ /* this should be a general change to cover all the chips.
+ * this also should validate the build where the dongle is
+ * built for SDIO but downloaded on PCIE dev
+ */
+ if (BUSTYPE(sih->bustype) == PCI_BUS || BUSTYPE(sih->bustype) == SI_BUS) {
+ bool is_pciedev = BCM43602_CHIP(sih->chip);
+
+ for (i = 0; i < ARRAYSIZE(pmu_res_depend_pciewar_table); i++) {
+ if (is_pciedev && pmu_res_depend_pciewar_table[i] &&
+ pmu_res_depend_pciewar_table_sz[i]) {
+ si_pmu_resdeptbl_upd(sih, osh, pmu,
+ pmu_res_depend_pciewar_table[i],
+ pmu_res_depend_pciewar_table_sz[i]);
+ }
+ }
+ }
+#endif /* !BCM_BOOTLOADER */
+ /* Determine min/max rsrc masks */
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+ /* Add min mask dependencies */
+ min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, FALSE);
+
+#ifdef BCM_BOOTLOADER
+ /* Apply nvram override to max mask */
+ if ((val = getvar(NULL, "brmax")) != NULL) {
+ PMU_MSG(("Applying brmax=%s to max_res_mask\n", val));
+ max_mask = (uint32)bcm_strtoul(val, NULL, 0);
+ }
+
+ /* Apply nvram override to min mask */
+ if ((val = getvar(NULL, "brmin")) != NULL) {
+ PMU_MSG(("Applying brmin=%s to min_res_mask\n", val));
+ min_mask = (uint32)bcm_strtoul(val, NULL, 0);
+ }
+#endif /* BCM_BOOTLOADER */
+
+ /* apply new PLL setting if is ALP strap (need to close out
+ * if possible apply if is HT strap)
+ */
+ if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (CHIPREV(sih->chiprev) < 4) &&
+ ((CST4360_RSRC_INIT_MODE(sih->chipst) & 1) == 0)) {
+ /* BBPLL */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, ~0, 0x09048562);
+ /* AVB PLL */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG14, ~0, 0x09048562);
+ si_pmu_pllupd(sih);
+ } else if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (CHIPREV(sih->chiprev) >= 4) &&
+ ((CST4360_RSRC_INIT_MODE(sih->chipst) & 1) == 0)) {
+ /* Changes for 4360B1 */
+
+ /* Enable REFCLK bit 11 */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL1, 0x800, 0x800);
+
+ /* BBPLL */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, ~0, 0x080004e2);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG7, ~0, 0xE);
+ /* AVB PLL */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG14, ~0, 0x080004e2);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG15, ~0, 0xE);
+ si_pmu_pllupd(sih);
+ }
+ /* disable PLL open loop operation */
+ si_pll_closeloop(sih);
+
+ if (max_mask) {
+ /* Ensure there is no bit set in min_mask which is not set in max_mask */
+ max_mask |= min_mask;
+
+ /* First set the bits which change from 0 to 1 in max, then update the
+ * min_mask register and then reset the bits which change from 1 to 0
+ * in max. This is required as the bit in MAX should never go to 0 when
+ * the corresponding bit in min is still 1. Similarly the bit in min cannot
+ * be 1 when the corresponding bit in max is still 0.
+ */
+ OR_REG(osh, &pmu->max_res_mask, max_mask);
+ } else {
+ /* First set the bits which change from 0 to 1 in max, then update the
+ * min_mask register and then reset the bits which change from 1 to 0
+ * in max. This is required as the bit in MAX should never go to 0 when
+ * the corresponding bit in min is still 1. Similarly the bit in min cannot
+ * be 1 when the corresponding bit in max is still 0.
+ */
+ if (min_mask)
+ OR_REG(osh, &pmu->max_res_mask, min_mask);
+ }
+
+ /* Program min resource mask */
+ if (min_mask) {
+ PMU_MSG(("Changing min_res_mask to 0x%x\n", min_mask));
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ }
+
+ /* Program max resource mask */
+ if (max_mask) {
+ PMU_MSG(("Changing max_res_mask to 0x%x\n", max_mask));
+ W_REG(osh, &pmu->max_res_mask, max_mask);
+ }
+#if defined(SAVERESTORE) && defined(LDO3P3_MIN_RES_MASK)
+ if (SR_ENAB()) {
+ /* Set the default state for LDO3P3 protection */
+ if (getintvar(NULL, rstr_ldo_prot) == 1) {
+ si_pmu_min_res_ldo3p3_set(sih, osh, TRUE);
+ }
+ }
+#endif /* SAVERESTORE && LDO3P3_MIN_RES_MASK */
+
+ /* request htavail thru pcie core */
+ if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) || (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (BUSTYPE(sih->bustype) == PCI_BUS) &&
+ (CHIPREV(sih->chiprev) < 4)) {
+ uint32 pcie_clk_ctl_st;
+
+ pcie_clk_ctl_st = si_corereg(sih, 3, 0x1e0, 0, 0);
+ si_corereg(sih, 3, 0x1e0, ~0, (pcie_clk_ctl_st | CCS_HTAREQ));
+ }
+
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+ /* Add some delay; allow resources to come up and settle. */
+ OSL_DELAY(2000);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+} /* si_pmu_res_init */
+
+/* setup pll and query clock speed */
+typedef struct {
+ uint16 fref; /* x-tal frequency in [hz] */
+ uint8 xf; /* x-tal index as contained in PMU control reg, see PMU programmers guide */
+ uint8 p1div;
+ uint8 p2div;
+ uint8 ndiv_int;
+ uint32 ndiv_frac;
+} pmu1_xtaltab0_t;
+
+/* 'xf' values corresponding to the 'xf' definition in the PMU control register */
+/* unclear why this enum contains '_640_' since the PMU prog guide says nothing about that */
+enum xtaltab0_640 {
+ XTALTAB0_640_12000K = 1,
+ XTALTAB0_640_13000K,
+ XTALTAB0_640_14400K,
+ XTALTAB0_640_15360K,
+ XTALTAB0_640_16200K,
+ XTALTAB0_640_16800K,
+ XTALTAB0_640_19200K,
+ XTALTAB0_640_19800K,
+ XTALTAB0_640_20000K,
+ XTALTAB0_640_24000K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_640_25000K,
+ XTALTAB0_640_26000K,
+ XTALTAB0_640_30000K,
+ XTALTAB0_640_33600K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_640_37400K,
+ XTALTAB0_640_38400K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_640_40000K,
+ XTALTAB0_640_48000K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_640_52000K
+};
+
+/* the following table is based on 880Mhz fvco */
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_880)[] = {
+ {12000, 1, 3, 22, 0x9, 0xFFFFEF},
+ {13000, 2, 1, 6, 0xb, 0x483483},
+ {14400, 3, 1, 10, 0xa, 0x1C71C7},
+ {15360, 4, 1, 5, 0xb, 0x755555},
+ {16200, 5, 1, 10, 0x5, 0x6E9E06},
+ {16800, 6, 1, 10, 0x5, 0x3Cf3Cf},
+ {19200, 7, 1, 4, 0xb, 0x755555},
+ {19800, 8, 1, 11, 0x4, 0xA57EB},
+ {20000, 9, 1, 11, 0x4, 0x0},
+ {24000, 10, 3, 11, 0xa, 0x0},
+ {25000, 11, 5, 16, 0xb, 0x0},
+ {26000, 12, 1, 2, 0x10, 0xEC4EC4},
+ {30000, 13, 3, 8, 0xb, 0x0},
+ {33600, 14, 1, 2, 0xd, 0x186186},
+ {38400, 15, 1, 2, 0xb, 0x755555},
+ {40000, 16, 1, 2, 0xb, 0},
+ {0, 0, 0, 0, 0, 0}
+};
+
+/* indices into pmu1_xtaltab0_880[] */
+#define PMU1_XTALTAB0_880_12000K 0
+#define PMU1_XTALTAB0_880_13000K 1
+#define PMU1_XTALTAB0_880_14400K 2
+#define PMU1_XTALTAB0_880_15360K 3
+#define PMU1_XTALTAB0_880_16200K 4
+#define PMU1_XTALTAB0_880_16800K 5
+#define PMU1_XTALTAB0_880_19200K 6
+#define PMU1_XTALTAB0_880_19800K 7
+#define PMU1_XTALTAB0_880_20000K 8
+#define PMU1_XTALTAB0_880_24000K 9
+#define PMU1_XTALTAB0_880_25000K 10
+#define PMU1_XTALTAB0_880_26000K 11
+#define PMU1_XTALTAB0_880_30000K 12
+#define PMU1_XTALTAB0_880_37400K 13
+#define PMU1_XTALTAB0_880_38400K 14
+#define PMU1_XTALTAB0_880_40000K 15
+
+/* the following table is based on 1760Mhz fvco */
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_1760)[] = {
+ {12000, 1, 3, 44, 0x9, 0xFFFFEF},
+ {13000, 2, 1, 12, 0xb, 0x483483},
+ {14400, 3, 1, 20, 0xa, 0x1C71C7},
+ {15360, 4, 1, 10, 0xb, 0x755555},
+ {16200, 5, 1, 20, 0x5, 0x6E9E06},
+ {16800, 6, 1, 20, 0x5, 0x3Cf3Cf},
+ {19200, 7, 1, 18, 0x5, 0x17B425},
+ {19800, 8, 1, 22, 0x4, 0xA57EB},
+ {20000, 9, 1, 22, 0x4, 0x0},
+ {24000, 10, 3, 22, 0xa, 0x0},
+ {25000, 11, 5, 32, 0xb, 0x0},
+ {26000, 12, 1, 4, 0x10, 0xEC4EC4},
+ {30000, 13, 3, 16, 0xb, 0x0},
+ {38400, 14, 1, 10, 0x4, 0x955555},
+ {40000, 15, 1, 4, 0xb, 0},
+ {0, 0, 0, 0, 0, 0}
+};
+
+#define XTAL_FREQ_24000MHZ 24000
+#define XTAL_FREQ_29985MHZ 29985
+#define XTAL_FREQ_30000MHZ 30000
+#define XTAL_FREQ_37400MHZ 37400
+#define XTAL_FREQ_48000MHZ 48000
+#define XTAL_FREQ_59970MHZ 59970
+
+/* 'xf' values corresponding to the 'xf' definition in the PMU control register */
+/* unclear why this enum contains '_960_' since the PMU prog guide says nothing about that */
+enum xtaltab0_960 {
+ XTALTAB0_960_12000K = 1,
+ XTALTAB0_960_13000K,
+ XTALTAB0_960_14400K,
+ XTALTAB0_960_15360K,
+ XTALTAB0_960_16200K,
+ XTALTAB0_960_16800K,
+ XTALTAB0_960_19200K,
+ XTALTAB0_960_19800K,
+ XTALTAB0_960_20000K,
+ XTALTAB0_960_24000K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_960_25000K,
+ XTALTAB0_960_26000K,
+ XTALTAB0_960_30000K,
+ XTALTAB0_960_33600K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_960_37400K,
+ XTALTAB0_960_38400K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_960_40000K,
+ XTALTAB0_960_48000K, /* warning: unknown in PMU programmers guide. seems incorrect. */
+ XTALTAB0_960_52000K,
+ XTALTAB0_960_59970K
+};
+
+/**
+ * given an x-tal frequency, this table specifies the PLL params to use to generate a 960Mhz output
+ * clock. This output clock feeds the clock divider network. The defines of the form
+ * PMU1_XTALTAB0_960_* index into this array.
+ */
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_960)[] = {
+/* fref xf p1div p2div ndiv_int ndiv_frac */
+ {12000, 1, 1, 1, 0x50, 0x0 }, /* array index 0 */
+ {13000, 2, 1, 1, 0x49, 0xD89D89},
+ {14400, 3, 1, 1, 0x42, 0xAAAAAA},
+ {15360, 4, 1, 1, 0x3E, 0x800000},
+ {16200, 5, 1, 1, 0x3B, 0x425ED0},
+ {16800, 6, 1, 1, 0x39, 0x249249},
+ {19200, 7, 1, 1, 0x32, 0x0 },
+ {19800, 8, 1, 1, 0x30, 0x7C1F07},
+ {20000, 9, 1, 1, 0x30, 0x0 },
+ {24000, 10, 1, 1, 0x28, 0x0 },
+ {25000, 11, 1, 1, 0x26, 0x666666}, /* array index 10 */
+ {26000, 12, 1, 1, 0x24, 0xEC4EC4},
+ {30000, 13, 1, 1, 0x20, 0x0 },
+ {33600, 14, 1, 1, 0x1C, 0x924924},
+ {37400, 15, 2, 1, 0x33, 0x563EF9},
+ {38400, 16, 2, 1, 0x32, 0x0 },
+ {40000, 17, 2, 1, 0x30, 0x0 },
+ {48000, 18, 2, 1, 0x28, 0x0 },
+ {52000, 19, 2, 1, 0x24, 0xEC4EC4}, /* array index 18 */
+ {59970, 20, 0, 0, 0, 0 },
+ /* TBD: will separate 59970 for 4387B0 for new pll scheme */
+ {0, 0, 0, 0, 0, 0 }
+};
+
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_4369_963)[] = {
+/* fref xf p1div NA ndiv_int ndiv_frac */
+ {12000, 1, 1, 1, 0x50, 0x40000}, /* array index 0 */
+ {13000, 2, 1, 1, 0x4A, 0x13B14},
+ {14400, 3, 1, 1, 0x42, 0xE0000},
+ {15360, 4, 1, 1, 0x3E, 0xB2000},
+ {16200, 5, 1, 1, 0x3B, 0x71C72},
+ {16800, 6, 1, 1, 0x39, 0x52492},
+ {19200, 7, 1, 1, 0x32, 0x28000},
+ {19800, 8, 1, 1, 0x30, 0xA2E8C},
+ {20000, 9, 1, 1, 0x30, 0x26666},
+ {24000, 10, 1, 1, 0x28, 0x20000},
+ {25000, 11, 1, 1, 0x26, 0x851EC}, /* array index 10 */
+ {26000, 12, 1, 1, 0x25, 0x09D8A},
+ {30000, 13, 1, 1, 0x20, 0x1999A},
+ {33600, 14, 1, 1, 0x1C, 0xA9249},
+ {37400, 15, 1, 1, 0x19, 0xBFA86},
+ {38400, 16, 1, 1, 0x19, 0x14000},
+ {40000, 17, 1, 1, 0x18, 0x13333},
+ {48000, 18, 1, 1, 0x14, 0x10000},
+ {52000, 19, 1, 1, 0x12, 0x84EC5}, /* array index 18 */
+ {0, 0, 0, 0, 0, 0 }
+};
+
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_4362_963)[] = {
+/* fref xf p1div NA ndiv_int ndiv_frac */
+ {12000, 1, 1, 1, 0x50, 0x40000}, /* array index 0 */
+ {13000, 2, 1, 1, 0x4A, 0x13B14},
+ {14400, 3, 1, 1, 0x42, 0xE0000},
+ {15360, 4, 1, 1, 0x3E, 0xB2000},
+ {16200, 5, 1, 1, 0x3B, 0x71C72},
+ {16800, 6, 1, 1, 0x39, 0x52492},
+ {19200, 7, 1, 1, 0x32, 0x28000},
+ {19800, 8, 1, 1, 0x30, 0xA2E8C},
+ {20000, 9, 1, 1, 0x30, 0x26666},
+ {24000, 10, 1, 1, 0x28, 0x20000},
+ {25000, 11, 1, 1, 0x26, 0x851EC}, /* array index 10 */
+ {26000, 12, 1, 1, 0x25, 0x09D8A},
+ {30000, 13, 1, 1, 0x20, 0x1999A},
+ {33600, 14, 1, 1, 0x1C, 0xA9249},
+ {37400, 15, 1, 1, 0x19, 0xBFA86},
+ {38400, 16, 1, 1, 0x19, 0x14000},
+ {40000, 17, 1, 1, 0x18, 0x13333},
+ {48000, 18, 1, 1, 0x14, 0x10000},
+ {52000, 19, 1, 1, 0x12, 0x84EC5}, /* array index 18 */
+ {0, 0, 0, 0, 0, 0 }
+};
+
+/* Indices into array pmu1_xtaltab0_960[]. Keep array and these defines synchronized. */
+#define PMU1_XTALTAB0_960_12000K 0
+#define PMU1_XTALTAB0_960_13000K 1
+#define PMU1_XTALTAB0_960_14400K 2
+#define PMU1_XTALTAB0_960_15360K 3
+#define PMU1_XTALTAB0_960_16200K 4
+#define PMU1_XTALTAB0_960_16800K 5
+#define PMU1_XTALTAB0_960_19200K 6
+#define PMU1_XTALTAB0_960_19800K 7
+#define PMU1_XTALTAB0_960_20000K 8
+#define PMU1_XTALTAB0_960_24000K 9
+#define PMU1_XTALTAB0_960_25000K 10
+#define PMU1_XTALTAB0_960_26000K 11
+#define PMU1_XTALTAB0_960_30000K 12
+#define PMU1_XTALTAB0_960_33600K 13
+#define PMU1_XTALTAB0_960_37400K 14
+#define PMU1_XTALTAB0_960_38400K 15
+#define PMU1_XTALTAB0_960_40000K 16
+#define PMU1_XTALTAB0_960_48000K 17
+#define PMU1_XTALTAB0_960_52000K 18
+#define PMU1_XTALTAB0_960_59970K 19
+
+#define PMU15_XTALTAB0_12000K 0
+#define PMU15_XTALTAB0_20000K 1
+#define PMU15_XTALTAB0_26000K 2
+#define PMU15_XTALTAB0_37400K 3
+#define PMU15_XTALTAB0_52000K 4
+#define PMU15_XTALTAB0_END 5
+
+/* For having the pllcontrol data (info)
+ * The table with the values of the registers will have one - one mapping.
+ */
+typedef struct {
+ uint16 clock; /**< x-tal frequency in [KHz] */
+ uint8 mode; /**< spur mode */
+ uint8 xf; /**< corresponds with xf bitfield in PMU control register */
+} pllctrl_data_t;
+
+/* ***************************** tables for 43012a0 *********************** */
+
+/**
+ * PLL control register table giving info about the xtal supported for 43012
+ * There should be a one to one mapping between pmu1_pllctrl_tab_43012_960mhz[] and this table.
+ */
+static const pllctrl_data_t(pmu1_xtaltab0_43012)[] = {
+/* clock mode xf */
+ {37400, 0, XTALTAB0_960_37400K},
+ {37400, 100, XTALTAB0_960_37400K},
+ {26000, 0, XTALTAB0_960_26000K},
+ {24000, 0, XTALTAB0_960_24000K}
+};
+
+/*
+There should be a one to one mapping between pmu1_pllctrl_tab_43012_640mhz[]
+* and this table. PLL control5 register is related to HSIC which is not supported in 43012
+* Use a safe DCO code=56 by default, Across PVT openloop VCO Max=320MHz, Min=100
+* Mhz
+*/
+#ifdef BCMQT
+static const uint32 (pmu1_pllctrl_tab_43012_1600mhz)[] = {
+/* Fvco is taken as 160.1 */
+/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 */
+ 0x072fe811, 0x00800000, 0x00000000, 0x038051e8, 0x00000000, 0x00000000,
+ 0x0e5fd422, 0x00800000, 0x00000000, 0x000011e8, 0x00000000, 0x00000000
+};
+#else
+static const uint32 (pmu1_pllctrl_tab_43012_1600mhz)[] = {
+/* Fvco is taken as 160.1 */
+/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 */
+ 0x07df2411, 0x00800000, 0x00000000, 0x038051e8, 0x00000000,
+ 0x0e5fd422, 0x00800000, 0x00000000, 0x000011e8, 0x00000000,
+ 0x1d89dc12, 0x00800000, 0x00000000, 0x06d04de8, 0x00000000,
+ 0x072fe828, 0x00800000, 0x00000000, 0x06d04de8, 0x00000000
+};
+#endif /* BCMQT */
+/* ************************ tables for 43012a0 END *********************** */
+
+/* ***************************** tables for 4369a0 *********************** */
+/* should get real value from hardware guys */
+/**
+ * PLL control register table giving info about the xtal supported for 4369
+ * There should be a one to one mapping between pmu1_pllctrl_tab_4369_960mhz[] and this table.
+ * Even though macro suggests XTALTAB0_960_37400K --> BBPLL VCO is set to 963MHz
+ */
+static const pllctrl_data_t BCMATTACHDATA(pmu1_xtaltab0_4369)[] = {
+/* clock mode xf */
+ {37400, 0, XTALTAB0_960_37400K}
+};
+
+/**
+ * PLL control register table giving info about the xtal supported for 4369.
+ * There should be a one to one mapping between pmu1_pllctrl_tab_4369_963mhz[] and this table.
+ */
+
+/* For 4369, 960.1MHz BBPLL freq is chosen to avoid the spurs
+* freq table : pll1 : fvco 960.1M, pll2 for arm : 400 MHz
+*/
+#define PMU_PLL3_4369B0_DEFAULT 0x006ABF86
+static const uint32 BCMATTACHDATA(pmu1_pllctrl_tab_4369_960p1mhz)[] = {
+/* Default values for unused registers 4-7 as sw loop execution will go for 8 times */
+/* Fvco is taken as 963M */
+/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 PLL 6 PLL 7 PLL 8 PLL 9 PLL 10 */
+ 0x15000000, 0x06050603, 0x01910806, PMU_PLL3_4369B0_DEFAULT,
+ 0x00000000, 0x32800000, 0xC7AE00A9, 0x40800000,
+ 0x00000000, 0x00000000, 0x00000000
+};
+
+/* ************************ tables for 4369a0 END *********************** */
+
+/* ***************************** tables for 4362a0 *********************** */
+/* should get real value from hardware guys */
+/**
+ * PLL control register table giving info about the xtal supported for 4362
+ * There should be a one to one mapping between pmu1_pllctrl_tab_4362_960mhz[] and this table.
+ * Even though macro suggests XTALTAB0_960_37400K --> BBPLL VCO is set to 963MHz
+ */
+static const pllctrl_data_t BCMATTACHDATA(pmu1_xtaltab0_4362)[] = {
+/* clock mode xf */
+ {37400, 0, XTALTAB0_960_37400K}
+};
+
+/* For 4362, 960.1MHz BBPLL freq is chosen to avoid the spurs
+* freq table : pll1 : fvco 960.1M, pll2 for arm : 400 MHz
+*/
+/* This freq actually around 960.123 */
+#define PMU_PLL3_4362A0_DEFAULT 0x006ABF86
+
+static const uint32 BCMATTACHDATA(pmu1_pllctrl_tab_4362_960p1mhz)[] = {
+/* Default values for unused registers 4-7 as sw loop execution will go for 8 times */
+/* Fvco is taken as 963M */
+/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 PLL 6 PLL 7 PLL 8 PLL 9 PLL 10 */
+ 0x15000000, 0x06050603, 0x01910806, PMU_PLL3_4362A0_DEFAULT,
+ 0x00000000, 0x32800000, 0xC7AE00A9, 0x40800000,
+ 0x00000000, 0x00000000, 0x00000000
+};
+
+/* ************************ tables for 4362a0 END *********************** */
+
+/* ***************************** tables for 4389 *********************** */
+static const pllctrl_data_t BCMATTACHDATA(pmu1_xtaltab0_4389)[] = {
+/* clock mode xf */
+ {XTAL_FREQ_59970MHZ, 0, XTALTAB0_960_59970K}
+};
+
+static const uint32 BCMATTACHDATA(pmu1_pllctrl_tab_4389_963mhz)[] = {
+/* Default values for all registers */
+/* Fvco (BBPLL) is taken as 963M */
+/* PLL 0 PLL 1 PLL 2 PLL 3 PLL 4 PLL 5 PLL 6 PLL 7 PLL 8 PLL 9 PLL 10 */
+ 0x29d00000, 0x30100c03, 0x00240c06, 0x597ff060,
+ 0x00000000, 0x00000800, 0x00321d3a, 0x000551ff,
+ 0x00000000, 0x10000000, 0x00000000
+};
+
+/* ************************ tables for 4389 END *********************** */
+
+/** returns a table that instructs how to program the BBPLL for a particular xtal frequency */
+static const pmu1_xtaltab0_t *
+BCMPOSTTRAPFN(si_pmu1_xtaltab0)(si_t *sih)
+{
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ CASE_BCM43602_CHIP:
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ return pmu1_xtaltab0_960;
+ case BCM4369_CHIP_GRPID:
+ return pmu1_xtaltab0_4369_963;
+ case BCM4362_CHIP_GRPID:
+ return pmu1_xtaltab0_4362_963;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ return pmu1_xtaltab0_960;
+ default:
+ PMU_MSG(("si_pmu1_xtaltab0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+ ASSERT(0);
+ return NULL;
+} /* si_pmu1_xtaltab0 */
+
+/** returns chip specific PLL settings for default xtal frequency and VCO output frequency */
+static const pmu1_xtaltab0_t *
+BCMPOSTTRAPFN(si_pmu1_xtaldef0)(si_t *sih)
+{
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ /* Default to 37400Khz */
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_37400K];
+ case BCM43014_CHIP_ID:
+ /* Default to 24000Khz */
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_24000K];
+ CASE_BCM43602_CHIP:
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_40000K];
+
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_37400K];
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_59970K];
+ case BCM4369_CHIP_GRPID:
+ return &pmu1_xtaltab0_4369_963[PMU1_XTALTAB0_960_37400K];
+ case BCM4362_CHIP_GRPID:
+ return &pmu1_xtaltab0_4362_963[PMU1_XTALTAB0_960_37400K];
+ default:
+ PMU_MSG(("si_pmu1_xtaldef0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+ ASSERT(0);
+ return NULL;
+} /* si_pmu1_xtaldef0 */
+
+static uint32 fvco_4360 = 0;
+
+/**
+ * store the val on init, then if func is called during normal operation
+ * don't touch core regs anymore
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu_pll1_fvco_4360)(si_t *sih, osl_t *osh)
+{
+ uint32 xf, ndiv_int, ndiv_frac, fvco, pll_reg, p1_div_scale;
+ uint32 r_high, r_low, int_part, frac_part, rounding_const;
+ uint8 p1_div;
+ uint origidx = 0;
+ bcm_int_bitmask_t intr_val;
+
+ if (fvco_4360) {
+ printf("si_pmu_pll1_fvco_4360:attempt to query fvco during normal operation\n");
+ /* this will insure that the func is called only once upon init */
+ return fvco_4360;
+ }
+
+ /* Remember original core before switch to chipc */
+ si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+ xf = si_pmu_alp_clock(sih, osh)/1000;
+
+ /* pll reg 10 , p1div, ndif_mode, ndiv_int */
+ pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG10, 0, 0);
+ p1_div = pll_reg & 0xf;
+ ndiv_int = (pll_reg >> 7) & 0x1f;
+
+ /* pllctrl11 */
+ pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG11, 0, 0);
+ ndiv_frac = pll_reg & 0xfffff;
+
+ int_part = xf * ndiv_int;
+
+ rounding_const = 1 << (BBPLL_NDIV_FRAC_BITS - 1);
+ math_uint64_multiple_add(&r_high, &r_low, ndiv_frac, xf, rounding_const);
+ math_uint64_right_shift(&frac_part, r_high, r_low, BBPLL_NDIV_FRAC_BITS);
+
+ if (!p1_div) {
+ PMU_ERROR(("p1_div calc returned 0! [%d]\n", __LINE__));
+ ROMMABLE_ASSERT(0);
+ }
+
+ if (p1_div == 0) {
+ ASSERT(p1_div != 0);
+ p1_div_scale = 0;
+ } else
+
+ p1_div_scale = (1 << P1_DIV_SCALE_BITS) / p1_div;
+ rounding_const = 1 << (P1_DIV_SCALE_BITS - 1);
+
+ math_uint64_multiple_add(&r_high, &r_low, (int_part + frac_part),
+ p1_div_scale, rounding_const);
+ math_uint64_right_shift(&fvco, r_high, r_low, P1_DIV_SCALE_BITS);
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, &intr_val);
+
+ fvco_4360 = fvco;
+ return fvco;
+} /* si_pmu_pll1_fvco_4360 */
+
+/**
+ * Specific to 43012 and calculate the FVCO frequency from XTAL freq
+ * Returns the FCVO frequency in [khz] units
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu_pll1_fvco_43012)(si_t *sih, osl_t *osh)
+{
+ uint32 xf, ndiv_int, ndiv_frac, fvco, pll_reg, p1_div_scale;
+ uint32 r_high, r_low, int_part, frac_part, rounding_const;
+ uint8 p_div;
+ chipcregs_t *cc;
+ uint origidx = 0;
+ bcm_int_bitmask_t intr_val;
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *)si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+ ASSERT(cc != NULL);
+ BCM_REFERENCE(cc);
+
+ xf = si_pmu_alp_clock(sih, osh)/1000;
+
+ pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG0, 0, 0);
+
+ ndiv_int = (pll_reg & PMU43012_PLL0_PC0_NDIV_INT_MASK) >>
+ PMU43012_PLL0_PC0_NDIV_INT_SHIFT;
+
+ ndiv_frac = (pll_reg & PMU43012_PLL0_PC0_NDIV_FRAC_MASK) >>
+ PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT;
+
+ pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0);
+
+ p_div = (pll_reg & PMU43012_PLL0_PC3_PDIV_MASK) >>
+ PMU43012_PLL0_PC3_PDIV_SHIFT;
+
+ /* If the p_div value read from PLL control register is zero,
+ * then return default FVCO value instead of computing the FVCO frequency
+ * using XTAL frequency
+ */
+ if (!p_div) {
+ PMU_ERROR(("pll control register read failed [%d]\n", __LINE__));
+ ROMMABLE_ASSERT(0);
+ fvco = 0;
+ goto done;
+ }
+ /* Actual expression is as below */
+ /* fvco1 = ((xf * (1/p1_div)) * (ndiv_int + (ndiv_frac /(1 << 20)))); */
+
+ int_part = xf * ndiv_int;
+ rounding_const = 1 << (PMU43012_PLL_NDIV_FRAC_BITS - 1);
+ math_uint64_multiple_add(&r_high, &r_low, ndiv_frac, xf, rounding_const);
+ math_uint64_right_shift(&frac_part, r_high, r_low, PMU43012_PLL_NDIV_FRAC_BITS);
+
+ p1_div_scale = (1 << PMU43012_PLL_P_DIV_SCALE_BITS) / p_div;
+ rounding_const = 1 << (PMU43012_PLL_P_DIV_SCALE_BITS - 1);
+
+ math_uint64_multiple_add(&r_high, &r_low, (int_part + frac_part),
+ p1_div_scale, rounding_const);
+ math_uint64_right_shift(&fvco, r_high, r_low, PMU43012_PLL_P_DIV_SCALE_BITS);
+
+done:
+ /* Return to original core */
+ si_restore_core(sih, origidx, &intr_val);
+ return fvco;
+} /* si_pmu_pll1_fvco_43012 */
+
+/** returns chip specific default BaseBand pll fvco frequency in [khz] units */
+static uint32
+BCMPOSTTRAPFN(si_pmu1_pllfvco0)(si_t *sih)
+{
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ return FVCO_960;
+
+ CASE_BCM43602_CHIP:
+ return FVCO_960;
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ {
+ osl_t *osh;
+ osh = si_osh(sih);
+ return si_pmu_pll1_fvco_4360(sih, osh);
+ }
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ {
+ osl_t *osh;
+ osh = si_osh(sih);
+ return si_pmu_pll1_fvco_43012(sih, osh);
+ }
+ case BCM4369_CHIP_GRPID:
+ return FVCO_960p1;
+ case BCM4362_CHIP_GRPID:
+ return FVCO_960p1;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ return FVCO_960p1;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ return FVCO_963p01;
+ default:
+ PMU_MSG(("si_pmu1_pllfvco0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+ ASSERT(0);
+ return 0;
+} /* si_pmu1_pllfvco0 */
+
+/**
+ * returns chip specific default pll fvco frequency in [khz] units
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu1_pllfvco0_pll2)(si_t *sih)
+{
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ return si_get_armpllclkfreq(sih) * 1000;
+ case BCM4389_CHIP_GRPID:
+ return SI_INFO(sih)->armpllclkfreq ? si_get_armpllclkfreq(sih) * 1000 : FVCO_1002p8;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ return FVCO_400;
+ default:
+ PMU_MSG(("si_pmu1_pllfvco0_pll2 : Unknown chipid %s\n",
+ bcm_chipname(sih->chip, chn, 8)));
+ ASSERT(0);
+ break;
+ }
+ return 0;
+} /* si_pmu1_pllfvco0_pll2 */
+
+/** query alp/xtal clock frequency */
+static uint32
+BCMPOSTTRAPFN(si_pmu1_alpclk0)(si_t *sih, osl_t *osh, pmuregs_t *pmu)
+{
+ const pmu1_xtaltab0_t *xt;
+ uint32 xf;
+ uint8 xtdiv = 1;
+
+ BCM_REFERENCE(sih);
+
+ /* Find the frequency in the table */
+ xf = (R_REG(osh, &pmu->pmucontrol) & PCTL_XTALFREQ_MASK) >>
+ PCTL_XTALFREQ_SHIFT;
+ for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt ++)
+ if (xt->xf == xf)
+ break;
+ /* Could not find it so assign a default value */
+ if (xt == NULL || xt->fref == 0)
+ xt = si_pmu1_xtaldef0(sih);
+ ASSERT(xt != NULL && xt->fref != 0);
+
+ switch (CHIPID(sih->chip))
+ {
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ /* xtalfreq for 4378B0 is 59.97 MHz and
+ * but ALP clk is xtal / 2 (29.985 MHz) by default.
+ */
+ xtdiv = 2;
+ break;
+ default:
+ break;
+ }
+
+ return (xt->fref * 1000) / xtdiv;
+}
+
+/**
+ * Before the PLL is switched off, the HT clocks need to be deactivated, and reactivated
+ * when the PLL is switched on again.
+ * This function returns the chip specific HT clock resources (HT and MACPHY clocks).
+ */
+static uint32
+si_pmu_htclk_mask(si_t *sih)
+{
+ /* chip specific bit position of various resources */
+ rsc_per_chip_t *rsc = si_pmu_get_rsc_positions(sih);
+
+ uint32 ht_req = (PMURES_BIT(rsc->ht_avail) | PMURES_BIT(rsc->macphy_clkavail));
+
+ switch (CHIPID(sih->chip))
+ {
+ CASE_BCM43602_CHIP:
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ ht_req |= PMURES_BIT(rsc->ht_start);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ return ht_req;
+} /* si_pmu_htclk_mask */
+
+/** returns ALP frequency in [Hz] */
+static uint32
+BCMATTACHFN(si_pmu_def_alp_clock)(si_t *sih, osl_t *osh)
+{
+ uint32 clock = ALP_CLOCK;
+
+ BCM_REFERENCE(osh);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+
+#ifdef UNRELEASEDCHIP
+#endif
+
+ clock = 37400*1000;
+ break;
+ CASE_BCM43602_CHIP:
+ clock = 40000 * 1000;
+ break;
+ }
+
+ return clock;
+}
+
+/**
+ * The BBPLL register set needs to be reprogrammed because the x-tal frequency is not known at
+ * compile time, or a different spur mode is selected. This function writes appropriate values into
+ * the BBPLL registers. It returns the 'xf', corresponding to the 'xf' bitfield in the PMU control
+ * register.
+ * 'xtal' : xtal frequency in [KHz]
+ * 'pllctrlreg_update': contains info on what entries to use in 'pllctrlreg_val' for the given
+ * x-tal frequency and spur mode
+ * 'pllctrlreg_val' : contains a superset of the BBPLL values to write
+ *
+ * Note: if pmu is NULL, this function returns xf, without programming PLL registers.
+ * This function is only called for pmu1_ type chips, perhaps we should rename it.
+ */
+static uint8
+si_pmu_pllctrlreg_update(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 xtal,
+ uint8 spur_mode, const pllctrl_data_t *pllctrlreg_update, uint32 array_size,
+ const uint32 *pllctrlreg_val)
+{
+ uint8 indx, reg_offset, xf = 0;
+ uint8 pll_ctrlcnt = 0;
+
+ ASSERT(pllctrlreg_update);
+
+ if (PMUREV(sih->pmurev) >= 5) {
+ pll_ctrlcnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT;
+ } else {
+ pll_ctrlcnt = (sih->pmucaps & PCAP_PC_MASK) >> PCAP_PC_SHIFT;
+ }
+
+ /* Program the PLL control register if the xtal value matches with the table entry value */
+ for (indx = 0; indx < array_size; indx++) {
+ /* If the entry does not match the xtal and spur_mode just continue the loop */
+ if (!((pllctrlreg_update[indx].clock == (uint16)xtal) &&
+ (pllctrlreg_update[indx].mode == spur_mode)))
+ continue;
+ /*
+ * Don't program the PLL registers if register base is NULL.
+ * If NULL just return the xref.
+ */
+ if (pmu) {
+ for (reg_offset = 0; reg_offset < pll_ctrlcnt; reg_offset++) {
+ si_pmu_pllcontrol(sih, reg_offset, ~0,
+ pllctrlreg_val[indx*pll_ctrlcnt + reg_offset]);
+ }
+
+ /* for 4369, arm clk cycle can be set from nvram - default is 400 MHz */
+ if ((BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip)) &&
+ (pll_ctrlcnt > PMU1_PLL0_PLLCTL6)) {
+ si_pmu_pll6val_armclk_calc(osh, pmu,
+ si_get_armpllclkfreq(sih), xtal, TRUE);
+ }
+ }
+ xf = pllctrlreg_update[indx].xf;
+ break;
+ }
+ return xf;
+} /* si_pmu_pllctrlreg_update */
+
+/*
+ * Calculate p1div, ndiv_int, ndiv_frac for clock ratio.
+ * Input: fvco, xtal
+ * Output: ndiv_int, ndiv_frac
+ * Returns: p1div
+ *
+ */
+uint8
+si_pmu_pll28nm_calc_ndiv(uint32 fvco, uint32 xtal, uint32 *ndiv_int, uint32 *ndiv_frac)
+{
+ uint8 p1div;
+ uint32 temp_high, temp_low;
+ ASSERT(xtal <= 0xFFFFFFFF / 1000);
+ p1div = 1 + (uint8) ((xtal * 1000) / 54000000UL);
+ *ndiv_int = (fvco * p1div) / xtal;
+ /* nfrac = 20 */
+ /* ndiv_frac = (uint32) (((uint64) (fvco * p1div - xtal * ndiv_int) * (1 << 20)) / xtal) */
+ math_uint64_multiple_add(&temp_high, &temp_low, fvco * p1div - xtal * (*ndiv_int), 1 << 20,
+ 0);
+ math_uint64_divide(ndiv_frac, temp_high, temp_low, xtal);
+ return p1div;
+}
+
+void
+si_pmu_armpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4388_CHIP_GRPID:
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4388_ARMPLL_I_NDIV_INT_MASK,
+ ndiv_int << PMU4388_ARMPLL_I_NDIV_INT_SHIFT);
+ si_pmu_pllupd(sih);
+ break;
+ case BCM4389_CHIP_GRPID:
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4389_ARMPLL_I_NDIV_INT_MASK,
+ ndiv_int << PMU4389_ARMPLL_I_NDIV_INT_SHIFT);
+ si_pmu_pllupd(sih);
+ break;
+ case BCM4369_CHIP_GRPID:
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, PMU4369_PLL1_PC5_P1DIV_MASK,
+ ((p1div >> PMU4369_P1DIV_LO_SHIFT) << PMU4369_PLL1_PC5_P1DIV_SHIFT));
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_P1DIV_MASK,
+ (p1div >> PMU4369_P1DIV_HI_SHIFT));
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_INT_MASK,
+ ndiv_int << PMU4369_PLL1_PC6_NDIV_INT_SHIFT);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_FRAC_MASK,
+ ndiv_frac << PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT);
+ si_pmu_pllupd(sih);
+ break;
+ case BCM4362_CHIP_GRPID:
+ /* 4362/69 PLL definitions are same. so reusing definitions */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, PMU4369_PLL1_PC5_P1DIV_MASK,
+ ((p1div >> PMU4369_P1DIV_LO_SHIFT) << PMU4369_PLL1_PC5_P1DIV_SHIFT));
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_P1DIV_MASK,
+ (p1div >> PMU4369_P1DIV_HI_SHIFT));
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_INT_MASK,
+ ndiv_int << PMU4369_PLL1_PC6_NDIV_INT_SHIFT);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6, PMU4369_PLL1_PC6_NDIV_FRAC_MASK,
+ ndiv_frac << PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT);
+ si_pmu_pllupd(sih);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+void
+si_pmu_bbpll_freq_upd(si_t *sih, uint8 p1div, uint32 ndiv_int, uint32 ndiv_frac)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ /* PLL Control 2 Register are the same for 4368, 4369 */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, PMU4369_PLL0_PC2_PDIV_MASK, p1div);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, PMU4369_PLL0_PC2_NDIV_INT_MASK,
+ ndiv_int << PMU4369_PLL0_PC2_NDIV_INT_SHIFT);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, PMU4369_PLL0_PC3_NDIV_FRAC_MASK,
+ ndiv_frac << PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT);
+ si_pmu_pllupd(sih);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+void
+si_pmu_armpll_chmdiv_upd(si_t *sih, uint32 ch0_mdiv, uint32 ch1_mdiv)
+{
+ switch (CHIPID(sih->chip)) {
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+static bool
+si_pmu_armpll_write_required(si_t *sih, uint32 xtal)
+{
+ uint32 def_xtal = 0;
+ uint32 def_armclk_mhz = 0;
+ uint32 armclk_mhz = si_get_armpllclkfreq(sih);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ def_xtal = XTAL_FREQ_37400MHZ;
+ def_armclk_mhz = ARMPLL_FREQ_400MHZ;
+ break;
+ case BCM4388_CHIP_GRPID:
+ def_xtal = XTAL_FREQ_59970MHZ;
+ def_armclk_mhz = ARMPLL_FREQ_1000MHZ;
+ break;
+ case BCM4389_CHIP_GRPID:
+ def_xtal = XTAL_FREQ_59970MHZ;
+ def_armclk_mhz = ARMPLL_FREQ_1000MHZ;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If programmed xtalfreq is same as xtal, no need to enable pll write. Check for
+ * armclk and xtalfreq instead of comparing calculated value and pll register value.
+ */
+ return (((armclk_mhz == def_armclk_mhz) && (xtal == def_xtal)) ? FALSE : TRUE);
+}
+
+/**
+ * Chip-specific overrides to PLLCONTROL registers during init. If certain conditions (dependent on
+ * x-tal frequency and current ALP frequency) are met, an update of the PLL is required.
+ *
+ * This takes less precedence over OTP PLLCONTROL overrides.
+ * If update_required=FALSE, it returns TRUE if a update is about to occur.
+ * No write happens.
+ *
+ * Return value: TRUE if the BBPLL registers 'update' field should be written by the caller.
+ *
+ * This function is only called for pmu1_ type chips, perhaps we should rename it.
+ */
+static bool
+BCMATTACHFN(si_pmu_update_pllcontrol)(si_t *sih, osl_t *osh, uint32 xtal, bool update_required)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ bool write_en = FALSE;
+ uint8 xf = 0;
+ const pmu1_xtaltab0_t *xt;
+ uint32 tmp;
+ const pllctrl_data_t *pllctrlreg_update = NULL;
+ uint32 array_size = 0;
+ /* points at a set of PLL register values to write for a given x-tal frequency: */
+ const uint32 *pllctrlreg_val = NULL;
+ uint8 ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MASH;
+ uint32 xtalfreq = 0;
+ uint32 ndiv_int;
+ uint32 ndiv_frac;
+ uint8 pdiv;
+
+ BCM_REFERENCE(ndiv_int);
+ BCM_REFERENCE(ndiv_frac);
+ BCM_REFERENCE(pdiv);
+ /* If there is OTP or NVRAM entry for xtalfreq, program the
+ * PLL control register even if it is default xtal.
+ */
+ xtalfreq = getintvar(NULL, rstr_xtalfreq);
+ /* CASE1 */
+ if (xtalfreq) {
+ write_en = TRUE;
+ xtal = xtalfreq;
+ } else {
+ /* There is NO OTP value */
+ if (xtal) {
+ /* CASE2: If the xtal value was calculated, program the PLL control
+ * registers only if it is not default xtal value.
+ */
+ if (xtal != (si_pmu_def_alp_clock(sih, osh)/1000))
+ write_en = TRUE;
+ } else {
+ /* CASE3: If the xtal obtained is "0", ie., clock is not measured, then
+ * leave the PLL control register as it is but program the xf in
+ * pmucontrol register with the default xtal value.
+ */
+ xtal = si_pmu_def_alp_clock(sih, osh)/1000;
+ }
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ pllctrlreg_update = pmu1_xtaltab0_43012;
+ array_size = ARRAYSIZE(pmu1_xtaltab0_43012);
+ pllctrlreg_val = pmu1_pllctrl_tab_43012_1600mhz;
+ break;
+ case BCM4369_CHIP_GRPID:
+ pllctrlreg_update = pmu1_xtaltab0_4369;
+ array_size = ARRAYSIZE(pmu1_xtaltab0_4369);
+ pllctrlreg_val = pmu1_pllctrl_tab_4369_960p1mhz;
+ /* default pll programming be true, later based on need disable it */
+ write_en = TRUE;
+ break;
+ case BCM4362_CHIP_GRPID:
+ pllctrlreg_update = pmu1_xtaltab0_4362;
+ array_size = ARRAYSIZE(pmu1_xtaltab0_4362);
+ pllctrlreg_val = pmu1_pllctrl_tab_4362_960p1mhz;
+ /* default pll programming be true, later based on need disable it */
+ write_en = TRUE;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ /* TBD : bypass PLL programming, So use chip default values */
+ pllctrlreg_update = NULL;
+ array_size = 0;
+ pllctrlreg_val = NULL;
+ write_en = FALSE;
+ break;
+ case BCM4388_CHIP_GRPID:
+ /* TBD : bypass PLL programming, So use chip default values */
+ pllctrlreg_update = NULL;
+ array_size = 0;
+ pllctrlreg_val = NULL;
+ write_en = FALSE;
+ break;
+ case BCM4389_CHIP_GRPID:
+ pllctrlreg_update = pmu1_xtaltab0_4389;
+ array_size = ARRAYSIZE(pmu1_xtaltab0_4389);
+ pllctrlreg_val = pmu1_pllctrl_tab_4389_963mhz;
+ break;
+ CASE_BCM43602_CHIP:
+ /*
+ * 43602 has only 1 x-tal value, possibly insert case when an other BBPLL
+ * frequency than 960Mhz is required (e.g., for spur avoidance)
+ */
+ /* fall through */
+ default:
+ /* write_en is FALSE in this case. So returns from the function */
+ write_en = FALSE;
+ break;
+ }
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ /* Check if the table has PLL control register values for the requested xtal */
+ if (!update_required && pllctrlreg_update) {
+ /* Here the chipcommon register base is passed as NULL, so that we just get
+ * the xf for the xtal being programmed but don't program the registers now
+ * as the PLL is not yet turned OFF.
+ */
+ xf = si_pmu_pllctrlreg_update(sih, osh, NULL, xtal, 0, pllctrlreg_update,
+ array_size, pllctrlreg_val);
+
+ /* Program the PLL based on the xtal value. */
+ if (xf != 0) {
+ /* Write XtalFreq. Set the divisor also. */
+ tmp = R_REG(osh, &pmu->pmucontrol) &
+ ~(PCTL_ILP_DIV_MASK | PCTL_XTALFREQ_MASK);
+ tmp |= (((((xtal + 127) / 128) - 1) << PCTL_ILP_DIV_SHIFT) &
+ PCTL_ILP_DIV_MASK) |
+ ((xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK);
+ W_REG(osh, &pmu->pmucontrol, tmp);
+ } else {
+ write_en = FALSE;
+ if (!FWSIGN_ENAB()) {
+ printf(rstr_Invalid_Unsupported_xtal_value_D, xtal);
+ }
+ }
+
+ write_en = si_pmu_armpll_write_required(sih, xtal);
+ }
+
+ /* If its a check sequence or if there is nothing to write, return here */
+ if ((update_required == FALSE) || (write_en == FALSE)) {
+ goto exit;
+ }
+
+ /* Update the PLL control register based on the xtal used. */
+ if (pllctrlreg_val) {
+ si_pmu_pllctrlreg_update(sih, osh, pmu, xtal, 0, pllctrlreg_update, array_size,
+ pllctrlreg_val);
+ }
+
+ /* Chip specific changes to PLL Control registers is done here. */
+ switch (CHIPID(sih->chip)) {
+ case BCM4388_CHIP_ID: {
+ uint32 armclk_mhz = si_get_armpllclkfreq(sih);
+ uint32 vco_freq = (armclk_mhz * PMU4388_APLL_PDIV * 1000);
+
+ ASSERT(vco_freq <= FVCO_3200);
+
+ /*
+ * ndiv_init = Fvco / Frefeff
+ * Frefeff = Fref / pdiv
+ * Fref = xtal / 2
+ * pdiv = 3
+ *
+ * ndiv_init = ((Fvco * pdiv * 1000000) / ((xtal * 1000) / 2)
+ */
+ ndiv_int = (vco_freq / (xtal / 2));
+ si_pmu_armpll_freq_upd(sih, 0, ndiv_int, 0);
+ break;
+ }
+
+ case BCM4389_CHIP_ID: {
+ uint32 armclk_mhz = si_get_armpllclkfreq(sih);
+ uint32 vco_freq = (armclk_mhz * PMU4389_APLL_PDIV * 1000);
+
+ ASSERT(vco_freq <= FVCO_3200);
+
+ /*
+ * ndiv_init = Fvco / Frefeff
+ * Frefeff = Fref / pdiv
+ * Fref = xtal / 2
+ * pdiv = 3
+ *
+ * ndiv_init = ((Fvco * pdiv * 1000000) / ((xtal * 1000) / 2)
+ */
+ ndiv_int = (vco_freq / (xtal / 2));
+ si_pmu_armpll_freq_upd(sih, 0, ndiv_int, 0);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* Program the PLL based on the xtal value. */
+ if (xtal != 0) {
+ /* Find the frequency in the table */
+ for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt ++)
+ if (xt->fref == xtal) {
+ break;
+ }
+
+ /* Check current PLL state, bail out if it has been programmed or
+ * we don't know how to program it. But we might still have some programming
+ * like changing the ARM clock, etc. So cannot return from here.
+ */
+ if (xt == NULL || xt->fref == 0) {
+ goto exit;
+ }
+
+ /* If the PLL is already programmed exit from here. */
+ if (((R_REG(osh, &pmu->pmucontrol) &
+ PCTL_XTALFREQ_MASK) >> PCTL_XTALFREQ_SHIFT) == xt->xf) {
+ goto exit;
+ }
+
+ PMU_MSG(("XTAL %d.%d MHz (%d)\n", xtal / 1000, xtal % 1000, xt->xf));
+ PMU_MSG(("Programming PLL for %d.%d MHz\n", xt->fref / 1000, xt->fref % 1000));
+
+ if (BCM4389_CHIP(sih->chip)) {
+ /* Write ndiv_int to pllcontrol[6] */
+ tmp = ((xt->ndiv_int << PMU4389_ARMPLL_I_NDIV_INT_SHIFT)
+ & PMU4389_ARMPLL_I_NDIV_INT_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6,
+ (PMU4389_ARMPLL_I_NDIV_INT_MASK), tmp);
+ } else if (BCM4388_CHIP(sih->chip)) {
+ /* Write ndiv_int to pllcontrol[6] */
+ tmp = ((xt->ndiv_int << PMU4388_ARMPLL_I_NDIV_INT_SHIFT)
+ & PMU4388_ARMPLL_I_NDIV_INT_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG6,
+ (PMU4388_ARMPLL_I_NDIV_INT_MASK), tmp);
+ } else if (BCM4369_CHIP(sih->chip) ||
+ BCM4362_CHIP(sih->chip) ||
+ FALSE) {
+ /* Write pdiv (Actually it is mapped to p1div in the struct)
+ to pllcontrol[2]
+ */
+ tmp = ((xt->p1div << PMU4369_PLL0_PC2_PDIV_SHIFT) &
+ PMU4369_PLL0_PC2_PDIV_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2,
+ (PMU4369_PLL0_PC2_PDIV_MASK), tmp);
+
+ /* Write ndiv_int to pllcontrol[2] */
+ tmp = ((xt->ndiv_int << PMU4369_PLL0_PC2_NDIV_INT_SHIFT)
+ & PMU4369_PLL0_PC2_NDIV_INT_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2,
+ (PMU4369_PLL0_PC2_NDIV_INT_MASK), tmp);
+
+ /* Write ndiv_frac to pllcontrol[3] */
+ tmp = ((xt->ndiv_frac << PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT) &
+ PMU4369_PLL0_PC3_NDIV_FRAC_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3,
+ PMU4369_PLL0_PC3_NDIV_FRAC_MASK, tmp);
+ } else {
+ /* Write p1div and p2div to pllcontrol[0] */
+ tmp = ((xt->p1div << PMU1_PLL0_PC0_P1DIV_SHIFT) &
+ PMU1_PLL0_PC0_P1DIV_MASK) |
+ ((xt->p2div << PMU1_PLL0_PC0_P2DIV_SHIFT) &
+ PMU1_PLL0_PC0_P2DIV_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG0,
+ (PMU1_PLL0_PC0_P1DIV_MASK | PMU1_PLL0_PC0_P2DIV_MASK), tmp);
+
+ /* Write ndiv_int and ndiv_mode to pllcontrol[2] */
+ tmp = ((xt->ndiv_int << PMU1_PLL0_PC2_NDIV_INT_SHIFT)
+ & PMU1_PLL0_PC2_NDIV_INT_MASK) |
+ ((ndiv_mode << PMU1_PLL0_PC2_NDIV_MODE_SHIFT)
+ & PMU1_PLL0_PC2_NDIV_MODE_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2,
+ (PMU1_PLL0_PC2_NDIV_INT_MASK | PMU1_PLL0_PC2_NDIV_MODE_MASK), tmp);
+ /* Write ndiv_frac to pllcontrol[3] */
+ tmp = ((xt->ndiv_frac << PMU1_PLL0_PC3_NDIV_FRAC_SHIFT) &
+ PMU1_PLL0_PC3_NDIV_FRAC_MASK);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3,
+ PMU1_PLL0_PC3_NDIV_FRAC_MASK, tmp);
+ }
+
+ /* Write XtalFreq. Set the divisor also. */
+ tmp = R_REG(osh, &pmu->pmucontrol) &
+ ~(PCTL_ILP_DIV_MASK | PCTL_XTALFREQ_MASK);
+ tmp |= (((((xt->fref + 127) / 128) - 1) << PCTL_ILP_DIV_SHIFT) &
+ PCTL_ILP_DIV_MASK) |
+ ((xt->xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK);
+ W_REG(osh, &pmu->pmucontrol, tmp);
+ }
+
+exit:
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return write_en;
+} /* si_pmu_update_pllcontrol */
+
+/* returns current value of PMUTimer.
+ also taking care of PR88659 by multiple reads.
+*/
+uint32
+BCMPOSTTRAPFN(si_pmu_get_pmutimer)(si_t *sih)
+{
+ osl_t *osh = si_osh(sih);
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 start;
+ BCM_REFERENCE(sih);
+
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ start = R_REG(osh, &pmu->pmutimer);
+ if (start != R_REG(osh, &pmu->pmutimer))
+ start = R_REG(osh, &pmu->pmutimer);
+
+ si_setcoreidx(sih, origidx);
+
+ return (start);
+}
+
+/* Get current pmu time API */
+uint32
+si_cur_pmu_time(si_t *sih)
+{
+ uint origidx;
+ uint32 pmu_time;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+
+ pmu_time = si_pmu_get_pmutimer(sih);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return (pmu_time);
+}
+
+/**
+ * returns
+ * a) diff between a 'prev' value of pmu timer and current value
+ * b) the current pmutime value in 'prev'
+ * So, 'prev' is an IO parameter.
+ */
+uint32
+BCMPOSTTRAPFN(si_pmu_get_pmutime_diff)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *prev)
+{
+ uint32 pmutime_diff = 0, pmutime_val = 0;
+ uint32 prev_val = *prev;
+ BCM_REFERENCE(osh);
+ BCM_REFERENCE(pmu);
+ /* read current value */
+ pmutime_val = si_pmu_get_pmutimer(sih);
+ /* diff btween prev and current value, take on wraparound case as well. */
+ pmutime_diff = (pmutime_val >= prev_val) ?
+ (pmutime_val - prev_val) :
+ (~prev_val + pmutime_val + 1);
+ *prev = pmutime_val;
+ return pmutime_diff;
+}
+
+/**
+ * wait for usec for the res_pending register to change.
+ * NOTE: usec SHOULD be > 32uS
+ * if cond = TRUE, res_pending will be read until it becomes == 0;
+ * If cond = FALSE, res_pending will be read until it becomes != 0;
+ * returns TRUE if timedout.
+ * returns elapsed time in this loop in elapsed_time
+ */
+bool
+BCMPOSTTRAPFN(si_pmu_wait_for_res_pending)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint usec,
+ bool cond, uint32 *elapsed_time)
+{
+ /* add 32uSec more */
+ uint countdown = usec;
+ uint32 pmutime_prev = 0, pmutime_elapsed = 0, res_pend;
+ bool pending = FALSE;
+
+ /* store current time */
+ pmutime_prev = si_pmu_get_pmutimer(sih);
+ while (1) {
+ res_pend = R_REG(osh, &pmu->res_pending);
+
+ /* based on the condition, check */
+ if (cond == TRUE) {
+ if (res_pend == 0) break;
+ } else {
+ if (res_pend != 0) break;
+ }
+
+ /* if required time over */
+ if ((pmutime_elapsed * PMU_US_STEPS) >= countdown) {
+ /* timeout. so return as still pending */
+ pending = TRUE;
+ break;
+ }
+
+ /* get elapsed time after adding diff between prev and current
+ * pmutimer value
+ */
+ pmutime_elapsed += si_pmu_get_pmutime_diff(sih, osh, pmu, &pmutime_prev);
+ }
+
+ *elapsed_time = pmutime_elapsed * PMU_US_STEPS;
+ return pending;
+} /* si_pmu_wait_for_res_pending */
+
+/**
+ * The algorithm for pending check is that,
+ * step1: wait till (res_pending !=0) OR pmu_max_trans_timeout.
+ * if max_trans_timeout, flag error and exit.
+ * wait for 1 ILP clk [64uS] based on pmu timer,
+ * polling to see if res_pending again goes high.
+ * if res_pending again goes high, go back to step1.
+ * Note: res_pending is checked repeatedly because, in between switching
+ * of dependent
+ * resources, res_pending resets to 0 for a short duration of time before
+ * it becomes 1 again.
+ * Note: return 0 is GOOD, 1 is BAD [mainly timeout].
+ */
+int
+BCMPOSTTRAPFN(si_pmu_wait_for_steady_state)(si_t *sih, osl_t *osh, pmuregs_t *pmu)
+{
+ si_info_t *sii = SI_INFO(sih);
+ int stat = 0;
+ bool timedout = FALSE;
+ uint32 elapsed = 0, pmutime_total_elapsed = 0;
+ uint32 pmutime_prev;
+
+ sii->res_pend_count = 0;
+
+ pmutime_prev = si_pmu_get_pmutimer(sih);
+
+ while (1) {
+ /* wait until all resources are settled down [till res_pending becomes 0] */
+ timedout = si_pmu_wait_for_res_pending(sih, osh, pmu,
+ PMU_MAX_TRANSITION_DLY, TRUE, &elapsed);
+
+ sii->res_state[sii->res_pend_count].low_time =
+ si_pmu_get_pmutime_diff(sih, osh, pmu, &pmutime_prev);
+ sii->res_state[sii->res_pend_count].low = R_REG(osh, &pmu->res_pending);
+
+ if (timedout) {
+ stat = 1;
+ break;
+ }
+
+ pmutime_total_elapsed += elapsed;
+ /* wait to check if any resource comes back to non-zero indicating
+ * that it pends again. The res_pending goes 0 for 1 ILP clock before
+ * getting set for next resource in the sequence , so if res_pending
+ * is 0 for more than 1 ILP clk it means nothing is pending
+ * to indicate some pending dependency.
+ */
+ pmutime_prev = R_REG(osh, &pmu->pmutimer);
+ timedout = si_pmu_wait_for_res_pending(sih, osh, pmu,
+ 64, FALSE, &elapsed);
+
+ pmutime_total_elapsed += elapsed;
+
+ sii->res_state[sii->res_pend_count].high_time =
+ si_pmu_get_pmutime_diff(sih, osh, pmu, &pmutime_prev);
+ sii->res_state[sii->res_pend_count].high = R_REG(osh, &pmu->res_pending);
+
+ /* Here, we can also check timedout, but we make sure that,
+ * we read the res_pending again.
+ */
+
+ if (timedout) {
+ stat = 0;
+ break;
+ }
+
+ /* Total wait time for all the waits above added should be
+ * less than PMU_MAX_TRANSITION_DLY
+ */
+ if (pmutime_total_elapsed >= PMU_MAX_TRANSITION_DLY) {
+ /* timeout. so return as still pending */
+ stat = 1;
+ break;
+ }
+
+ sii->res_pend_count++;
+ sii->res_pend_count %= RES_PEND_STATS_COUNT;
+ pmutime_prev = R_REG(osh, &pmu->pmutimer);
+ }
+ return stat;
+} /* si_pmu_wait_for_steady_state */
+
+static uint32
+si_pmu_pll_delay_43012(si_t *sih, uint32 delay_us, uint32 poll)
+{
+ uint32 delay = 0;
+
+ /* In case of NIC builds, we can use OSL_DELAY() for 1 us delay. But in case of DONGLE
+ * builds, we can't rely on the OSL_DELAY() as it is internally relying on HT clock and
+ * we are calling this function when ALP clock is present.
+ */
+#if defined(DONGLEBUILD)
+ uint32 initial, current;
+
+ initial = get_arm_cyclecount();
+ while (delay < delay_us) {
+ if (poll == 1) {
+ if (si_gci_chipstatus(sih, GCI_CHIPSTATUS_07) &
+ GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK) {
+ goto exit;
+ }
+ }
+ current = get_arm_cyclecount();
+ delay = ((current - initial) * 1000) / si_xtalfreq(sih);
+ }
+#else
+ for (delay = 0; delay < delay_us; delay++) {
+ if (poll == 1) {
+ if (si_gci_chipstatus(sih, GCI_CHIPSTATUS_07) &
+ GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK) {
+ goto exit;
+ }
+ }
+ OSL_DELAY(1);
+ }
+#endif /* BCMDONGLEHOST */
+
+ if (poll == 1) {
+ PMU_ERROR(("si_pmu_pll_delay_43012: PLL not locked!"));
+ ASSERT(0);
+ }
+exit:
+ return delay;
+}
+
+static void
+si_pmu_pll_on_43012(si_t *sih, osl_t *osh, pmuregs_t *pmu, bool openloop_cal)
+{
+ uint32 rsrc_ht, total_time = 0;
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_PWROFF, 0);
+ total_time += si_pmu_pll_delay_43012(sih, 2, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH |
+ PMUCCTL04_43012_FORCE_BBPLL_PWRDN, 0);
+ total_time += si_pmu_pll_delay_43012(sih, 2, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_ARESET, 0);
+
+ rsrc_ht = R_REG(osh, &pmu->res_state) &
+ ((1 << RES43012_HT_AVAIL) | (1 << RES43012_HT_START));
+
+ if (rsrc_ht)
+ {
+ /* Wait for PLL to lock in close-loop */
+ total_time += si_pmu_pll_delay_43012(sih, 200, 1);
+ }
+ else {
+ /* Wait for 1 us for the open-loop clock to start */
+ total_time += si_pmu_pll_delay_43012(sih, 1, 0);
+ }
+
+ if (!openloop_cal) {
+ /* Allow clk to be used if its not calibration */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_FORCE_BBPLL_DRESET, 0);
+ total_time += si_pmu_pll_delay_43012(sih, 1, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_DISABLE_LQ_AVAIL, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL04_43012_DISABLE_HT_AVAIL, 0);
+ }
+
+ PMU_MSG(("si_pmu_pll_on_43012: time taken: %d us\n", total_time));
+}
+
+static void
+si_pmu_pll_off_43012(si_t *sih, osl_t *osh, pmuregs_t *pmu)
+{
+ uint32 total_time = 0;
+ BCM_REFERENCE(osh);
+ BCM_REFERENCE(pmu);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ PMUCCTL04_43012_DISABLE_LQ_AVAIL | PMUCCTL04_43012_DISABLE_HT_AVAIL,
+ PMUCCTL04_43012_DISABLE_LQ_AVAIL | PMUCCTL04_43012_DISABLE_HT_AVAIL);
+ total_time += si_pmu_pll_delay_43012(sih, 1, 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMUCCTL04_43012_FORCE_BBPLL_ARESET | PMUCCTL04_43012_FORCE_BBPLL_DRESET |
+ PMUCCTL04_43012_FORCE_BBPLL_PWRDN |PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH),
+ (PMUCCTL04_43012_FORCE_BBPLL_ARESET | PMUCCTL04_43012_FORCE_BBPLL_DRESET |
+ PMUCCTL04_43012_FORCE_BBPLL_PWRDN |PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH));
+ total_time += si_pmu_pll_delay_43012(sih, 1, 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ PMUCCTL04_43012_FORCE_BBPLL_PWROFF,
+ PMUCCTL04_43012_FORCE_BBPLL_PWROFF);
+
+ PMU_MSG(("si_pmu_pll_off_43012: time taken: %d us\n", total_time));
+}
+
+/** Turn Off the PLL - Required before setting the PLL registers */
+static void
+si_pmu_pll_off(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 *min_mask,
+ uint32 *max_mask, uint32 *clk_ctl_st)
+{
+ uint32 ht_req;
+
+ /* Save the original register values */
+ *min_mask = R_REG(osh, &pmu->min_res_mask);
+ *max_mask = R_REG(osh, &pmu->max_res_mask);
+ *clk_ctl_st = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0);
+
+ ht_req = si_pmu_htclk_mask(sih);
+ if (ht_req == 0)
+ return;
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
+ (BCM4369_CHIP(sih->chip)) ||
+ (BCM4362_CHIP(sih->chip)) ||
+ (BCM4376_CHIP(sih->chip)) ||
+ (BCM4378_CHIP(sih->chip)) ||
+ (BCM4385_CHIP(sih->chip)) ||
+ (BCM4387_CHIP(sih->chip)) ||
+ (BCM4388_CHIP(sih->chip)) ||
+ (BCM4389_CHIP(sih->chip)) ||
+ BCM43602_CHIP(sih->chip) ||
+ 0) {
+ /*
+ * If HT_AVAIL is not set, wait to see if any resources are availing HT.
+ */
+ if (((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) != CCS_HTAVAIL))
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+ } else {
+ OR_REG(osh, &pmu->max_res_mask, ht_req);
+ /* wait for HT to be ready before taking the HT away...HT could be coming up... */
+ SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY);
+ ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL));
+ }
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ si_pmu_pll_off_43012(sih, osh, pmu);
+ } else {
+ AND_REG(osh, &pmu->min_res_mask, ~ht_req);
+ AND_REG(osh, &pmu->max_res_mask, ~ht_req);
+
+ SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) == CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY);
+ ASSERT(!(si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL));
+ OSL_DELAY(100);
+ }
+} /* si_pmu_pll_off */
+
+/* below function are for BBPLL parallel purpose */
+/** Turn Off the PLL - Required before setting the PLL registers */
+void
+si_pmu_pll_off_PARR(si_t *sih, osl_t *osh, uint32 *min_mask,
+uint32 *max_mask, uint32 *clk_ctl_st)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ uint32 ht_req;
+
+ /* Block ints and save current core */
+ si_introff(sih, &intr_val);
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ /* Save the original register values */
+ *min_mask = R_REG(osh, &pmu->min_res_mask);
+ *max_mask = R_REG(osh, &pmu->max_res_mask);
+ *clk_ctl_st = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0);
+ ht_req = si_pmu_htclk_mask(sih);
+ if (ht_req == 0) {
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ si_intrrestore(sih, &intr_val);
+ return;
+ }
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
+ (BCM4369_CHIP(sih->chip)) ||
+ (BCM4362_CHIP(sih->chip)) ||
+ (BCM4376_CHIP(sih->chip)) ||
+ (BCM4378_CHIP(sih->chip)) ||
+ (BCM4385_CHIP(sih->chip)) ||
+ (BCM4387_CHIP(sih->chip)) ||
+ (BCM4388_CHIP(sih->chip)) ||
+ (BCM4389_CHIP(sih->chip)) ||
+ (BCM4397_CHIP(sih->chip)) ||
+ (BCM43602_CHIP(sih->chip)) ||
+ 0) {
+ /*
+ * If HT_AVAIL is not set, wait to see if any resources are availing HT.
+ */
+ if (((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL)
+ != CCS_HTAVAIL))
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+ } else {
+ OR_REG(osh, &pmu->max_res_mask, ht_req);
+ /* wait for HT to be ready before taking the HT away...HT could be coming up... */
+ SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY);
+ ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL));
+ }
+
+ AND_REG(osh, &pmu->min_res_mask, ~ht_req);
+ AND_REG(osh, &pmu->max_res_mask, ~ht_req);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ si_intrrestore(sih, &intr_val);
+} /* si_pmu_pll_off_PARR */
+
+/** Turn ON/restore the PLL based on the mask received */
+static void
+si_pmu_pll_on(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 min_mask_mask,
+ uint32 max_mask_mask, uint32 clk_ctl_st_mask)
+{
+ uint32 ht_req;
+
+ ht_req = si_pmu_htclk_mask(sih);
+ if (ht_req == 0)
+ return;
+
+ max_mask_mask &= ht_req;
+ min_mask_mask &= ht_req;
+
+ if (max_mask_mask != 0)
+ OR_REG(osh, &pmu->max_res_mask, max_mask_mask);
+
+ if (min_mask_mask != 0)
+ OR_REG(osh, &pmu->min_res_mask, min_mask_mask);
+
+ if (clk_ctl_st_mask & CCS_HTAVAIL) {
+ /* Wait for HT_AVAIL to come back */
+ SPINWAIT(((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY);
+ ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL));
+ }
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ si_pmu_pll_on_43012(sih, osh, pmu, 0);
+ }
+}
+
+/**
+ * Set up PLL registers in the PMU as per the (optional) OTP values, or, if no OTP values are
+ * present, optionally update with POR override values contained in firmware. Enables the BBPLL
+ * when done.
+ */
+static void
+BCMATTACHFN(si_pmu1_pllinit1)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 xtal)
+{
+ char name[16];
+ const char *otp_val;
+ uint8 i, otp_entry_found = FALSE;
+ uint32 pll_ctrlcnt;
+ uint32 min_mask = 0, max_mask = 0, clk_ctl_st = 0;
+#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED)
+ uint32 otpval = 0, regval = 0;
+#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */
+
+ if (!FWSIGN_ENAB()) {
+ if (PMUREV(sih->pmurev) >= 5) {
+ pll_ctrlcnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT;
+ } else {
+ pll_ctrlcnt = (sih->pmucaps & PCAP_PC_MASK) >> PCAP_PC_SHIFT;
+ }
+
+ /* Check if there is any otp enter for PLLcontrol registers */
+ for (i = 0; i < pll_ctrlcnt; i++) {
+ snprintf(name, sizeof(name), rstr_pllD, i);
+ if ((otp_val = getvar(NULL, name)) == NULL)
+ continue;
+
+ /* If OTP entry is found for PLL register, then turn off the PLL
+ * and set the status of the OTP entry accordingly.
+ */
+ otp_entry_found = TRUE;
+ break;
+ }
+ }
+
+ /* If no OTP parameter is found and no chip-specific updates are needed, return. */
+ if ((otp_entry_found == FALSE) &&
+ (si_pmu_update_pllcontrol(sih, osh, xtal, FALSE) == FALSE)) {
+#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED)
+ /*
+ * For 4369/4362 PLL3 could be prorammed by BT, check the value is default and not
+ * overrided by BT
+ */
+ if ((BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip)) &&
+ (regval = si_pmu_pllcontrol(sih, 3, 0, 0)) != PMU_PLL3_4369B0_DEFAULT) {
+ PMU_ERROR(("Default PLL3 value 0x%x is not same as programmed"
+ "value 0x%x\n", PMU_PLL3_4369B0_DEFAULT, regval));
+ hnd_gcisem_set_err(GCI_PLL_LOCK_SEM);
+ return;
+ }
+
+ /* Update SW_READY bit indicating WLAN is ready and verified PLL3 */
+ si_gci_output(sih, GCI_ECI_SW1(GCI_WLAN_IP_ID), GCI_SWREADY, GCI_SWREADY);
+#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */
+ return;
+ }
+
+#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED)
+ if ((hnd_gcisem_acquire(GCI_PLL_LOCK_SEM, TRUE, GCI_PLL_LOCK_SEM_TIMEOUT) != BCME_OK)) {
+ PMU_ERROR(("Failed to get GCI PLL Lock semaphore...\n"));
+ hnd_gcisem_set_err(GCI_PLL_LOCK_SEM);
+ return;
+ }
+
+ /* Skip BB PLL programming if BT has already done it, which is indicated by SW_READY bit */
+ if (si_gci_input(sih, GCI_ECI_SW1(GCI_BT_IP_ID)) & GCI_SWREADY) {
+ PMU_MSG(("PLL is already programmed\n"));
+
+ /* Program ARM PLL only if xtalfreq(pllctrl6) programmed is different from xtal */
+ if (si_pmu_update_pllcontrol(sih, osh, xtal, FALSE)) {
+ /* Make sure PLL is off */
+ si_pmu_pll_off(sih, osh, pmu, &min_mask, &max_mask, &clk_ctl_st);
+
+ /* for 4369, arm clk cycle can be set from nvram - default is 400 MHz */
+ if ((BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip)) &&
+ (pll_ctrlcnt > PMU1_PLL0_PLLCTL6)) {
+ PMU_MSG(("Programming ARM CLK\n"));
+ si_pmu_pll6val_armclk_calc(osh, pmu,
+ si_get_armpllclkfreq(sih), xtal, TRUE);
+ }
+
+ /* Flush ('update') the deferred pll control registers writes */
+ if (PMUREV(sih->pmurev) >= 2)
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ /* Restore back the register values. This ensures PLL remains on if it
+ * was originally on and remains off if it was originally off.
+ */
+ si_pmu_pll_on(sih, osh, pmu, min_mask, max_mask, clk_ctl_st);
+ }
+
+ snprintf(name, sizeof(name), rstr_pllD, 3);
+ if ((otp_val = getvar(NULL, name)) != NULL) {
+ otpval = (uint32)bcm_strtoul(otp_val, NULL, 0);
+ if ((regval = si_pmu_pllcontrol(sih, 3, 0, 0)) != otpval) {
+ PMU_ERROR(("PLL3 programming value 0x%x is not same as programmed"
+ "value 0x%x\n", otpval, regval));
+ hnd_gcisem_set_err(GCI_PLL_LOCK_SEM);
+ }
+ }
+ goto done;
+ }
+#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */
+
+ /* Make sure PLL is off */
+ si_pmu_pll_off(sih, osh, pmu, &min_mask, &max_mask, &clk_ctl_st);
+
+ /* Update any chip-specific PLL registers. Does not write PLL 'update' bit yet. */
+ si_pmu_update_pllcontrol(sih, osh, xtal, TRUE);
+
+ /* Update the PLL register if there is a OTP entry for PLL registers */
+ si_pmu_otp_pllcontrol(sih, osh);
+
+ /* Flush ('update') the deferred pll control registers writes */
+ if (PMUREV(sih->pmurev) >= 2)
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ /* Restore back the register values. This ensures PLL remains on if it
+ * was originally on and remains off if it was originally off.
+ */
+ si_pmu_pll_on(sih, osh, pmu, min_mask, max_mask, clk_ctl_st);
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ uint32 origidx;
+ /* PMU clock stretch to be decreased to 8 for HT and ALP
+ * to reduce DS0 current during high traffic
+ */
+ W_REG(osh, &pmu->clkstretch, CSTRETCH_REDUCE_8);
+
+ /* SDIOD to request for ALP
+ * to reduce DS0 current during high traffic
+ */
+ origidx = si_coreidx(sih);
+ si_setcore(sih, SDIOD_CORE_ID, 0);
+ /* Clear the Bit 8 for ALP REQUEST change */
+ si_wrapperreg(sih, AI_OOBSELOUTB30, (AI_OOBSEL_MASK << AI_OOBSEL_1_SHIFT),
+ OOB_B_ALP_REQUEST << AI_OOBSEL_1_SHIFT);
+ si_setcoreidx(sih, origidx);
+ }
+
+#if defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED)
+done:
+ /* Update SW_READY bit indicating WLAN is done programming PLL registers */
+ si_gci_output(sih, GCI_ECI_SW1(GCI_WLAN_IP_ID), GCI_SWREADY, GCI_SWREADY);
+ if ((hnd_gcisem_release(GCI_PLL_LOCK_SEM) != BCME_OK)) {
+ PMU_ERROR(("Failed to release GCI PLL Lock semaphore...\n"));
+ hnd_gcisem_set_err(GCI_PLL_LOCK_SEM);
+ }
+#endif /* defined(BTOVERPCIE) && !defined(BTOVERPCIE_DISABLED) */
+} /* si_pmu1_pllinit1 */
+
+#if defined(EDV)
+/* returns backplane clk programmed in pll cntl 1 */
+/* WHY NOT JUST CALL si_pmu_si_clock()? */
+uint32 si_pmu_get_backplaneclkspeed(si_t *sih)
+{
+ uint32 FVCO;
+ uint32 tmp, mdiv = 1;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ return si_pmu_bpclk_4387(sih);
+ default:
+ break;
+ }
+
+ FVCO = si_pmu1_pllfvco0(sih);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0);
+ mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+ return FVCO / mdiv * 1000u;
+}
+
+/* Update backplane clock speed */
+void
+si_pmu_update_backplane_clock(si_t *sih, osl_t *osh, uint reg, uint32 mask, uint32 val)
+{
+
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 max_mask = 0, min_mask = 0, clk_ctl_st = 0;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+
+ ASSERT(pmu != NULL);
+
+ /* Make sure PLL is off */
+ si_pmu_pll_off(sih, osh, pmu, &min_mask, &max_mask, &clk_ctl_st);
+
+ si_pmu_pllcontrol(sih, reg, mask, val);
+
+ /* Flush ('update') the deferred pll control registers writes */
+ if (PMUREV(sih->pmurev) >= 2)
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ /* Restore back the register values. This ensures PLL remains on if it
+ * was originally on and remains off if it was originally off.
+ */
+ si_pmu_pll_on(sih, osh, pmu, min_mask, max_mask, clk_ctl_st);
+ si_setcoreidx(sih, origidx);
+}
+#endif /* si_pmu_update_backplane_clock */
+
+/**
+ * returns the backplane clock frequency.
+ * Does this by determining current Fvco and the setting of the
+ * clock divider that leads up to the backplane. Returns value in [Hz] units.
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu_bpclk_4387)(si_t *sih)
+{
+ uint32 tmp, mdiv;
+ uint32 FVCO; /* in [khz] units */
+
+ FVCO = si_pmu1_pllfvco0(sih);
+
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, 0, 0);
+ mdiv = (tmp & PMU4387_PLL0_PC2_ICH3_MDIV_MASK);
+ ASSERT(mdiv != 0);
+
+ return FVCO / mdiv * 1000;
+}
+
+/**
+ * returns the CPU clock frequency. Does this by determining current Fvco and the setting of the
+ * clock divider that leads up to the ARM. Returns value in [Hz] units.
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu1_cpuclk0)(si_t *sih, osl_t *osh, pmuregs_t *pmu)
+{
+ uint32 tmp, mdiv = 1;
+#ifdef BCMDBG
+ uint32 ndiv_int, ndiv_frac, p2div, p1div, fvco;
+ uint32 fref;
+#endif
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+ uint32 FVCO; /* in [khz] units */
+
+ FVCO = si_pmu1_pllfvco0(sih);
+
+ if (BCM43602_CHIP(sih->chip) &&
+#ifdef DONGLEBUILD
+#ifdef __arm__
+ (si_arm_clockratio(sih, 0) == 1) &&
+#endif
+#endif /* DONGLEBUILD */
+ TRUE) {
+ /* CR4 running on backplane_clk */
+ return si_pmu_si_clock(sih, osh); /* in [hz] units */
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ /* Read m6div from pllcontrol[5] */
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, 0, 0);
+ mdiv = (tmp & PMU1_PLL0_PC2_M6DIV_MASK) >> PMU1_PLL0_PC2_M6DIV_SHIFT;
+ break;
+#ifdef DONGLEBUILD
+ CASE_BCM43602_CHIP:
+#ifdef __arm__
+ ASSERT(si_arm_clockratio(sih, 0) == 2);
+#endif
+ /* CR4 running on armcr4_clk (Ch5). Read 'bbpll_i_m5div' from pllctl[5] */
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, 0, 0);
+ mdiv = (tmp & PMU1_PLL0_PC2_M5DIV_MASK) >> PMU1_PLL0_PC2_M5DIV_SHIFT;
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ /* mdiv is not supported for 43012 and FVCO frequency should be divided by 2 */
+ mdiv = 2;
+ break;
+#endif /* DONGLEBUILD */
+
+ case BCM4369_CHIP_GRPID:
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0);
+ mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT;
+ break;
+ case BCM4362_CHIP_GRPID:
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0);
+ mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT;
+ break;
+
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0);
+ mdiv = (tmp & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT;
+ break;
+
+ default:
+ PMU_MSG(("si_pmu1_cpuclk0: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8)));
+ ASSERT(0);
+ break;
+ }
+
+ ASSERT(mdiv != 0);
+
+#ifdef BCMDBG
+ /* Read p2div/p1div from pllcontrol[0] */
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG0, 0, 0);
+ p2div = (tmp & PMU1_PLL0_PC0_P2DIV_MASK) >> PMU1_PLL0_PC0_P2DIV_SHIFT;
+ p1div = (tmp & PMU1_PLL0_PC0_P1DIV_MASK) >> PMU1_PLL0_PC0_P1DIV_SHIFT;
+
+ /* Calculate fvco based on xtal freq and ndiv and pdiv */
+ tmp = PMU1_PLL0_PLLCTL2;
+
+ tmp = si_pmu_pllcontrol(sih, tmp, 0, 0);
+
+ if (BCM4362_CHIP(sih->chip) ||
+ BCM4369_CHIP(sih->chip)) {
+ p2div = 1;
+ p1div = (tmp & PMU4369_PLL0_PC2_PDIV_MASK) >> PMU4369_PLL0_PC2_PDIV_SHIFT;
+ ndiv_int = (tmp & PMU4369_PLL0_PC2_NDIV_INT_MASK) >>
+ PMU4369_PLL0_PC2_NDIV_INT_SHIFT;
+ } else if (BCM4378_CHIP(sih->chip) || BCM4376_CHIP(sih->chip)) {
+ p2div = 1;
+ p1div = (tmp & PMU4378_PLL0_PC2_P1DIV_MASK) >> PMU4378_PLL0_PC2_P1DIV_SHIFT;
+ ndiv_int = (tmp & PMU4378_PLL0_PC2_NDIV_INT_MASK) >>
+ PMU4378_PLL0_PC2_NDIV_INT_SHIFT;
+ } else {
+ ndiv_int = (tmp & PMU1_PLL0_PC2_NDIV_INT_MASK) >> PMU1_PLL0_PC2_NDIV_INT_SHIFT;
+ }
+
+ ASSERT(p1div != 0);
+
+ tmp = PMU1_PLL0_PLLCTL3;
+
+ tmp = si_pmu_pllcontrol(sih, tmp, 0, 0);
+
+ if (BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip) ||
+ BCM4376_CHIP(sih->chip) ||
+ BCM4378_CHIP(sih->chip) ||
+ FALSE) {
+ ndiv_frac =
+ (tmp & PMU4369_PLL0_PC3_NDIV_FRAC_MASK) >>
+ PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT;
+ fref = si_pmu1_alpclk0(sih, osh, pmu) / 1000; /* [KHz] */
+
+ fvco = (fref * ndiv_int) << 8;
+ fvco += (fref * ((ndiv_frac & 0xfffff) >> 4)) >> 8;
+ fvco >>= 8;
+ fvco *= p1div;
+ fvco /= 1000;
+ fvco *= 1000;
+ } else {
+ ndiv_frac =
+ (tmp & PMU1_PLL0_PC3_NDIV_FRAC_MASK) >> PMU1_PLL0_PC3_NDIV_FRAC_SHIFT;
+
+ fref = si_pmu1_alpclk0(sih, osh, pmu) / 1000;
+
+ fvco = (fref * ndiv_int) << 8;
+ fvco += (fref * (ndiv_frac >> 12)) >> 4;
+ fvco += (fref * (ndiv_frac & 0xfff)) >> 12;
+ fvco >>= 8;
+ fvco *= p2div;
+ fvco /= p1div;
+ fvco /= 1000;
+ fvco *= 1000;
+ }
+
+ PMU_MSG(("si_pmu1_cpuclk0: ndiv_int %u ndiv_frac %u p2div %u p1div %u fvco %u\n",
+ ndiv_int, ndiv_frac, p2div, p1div, fvco));
+
+ FVCO = fvco;
+#endif /* BCMDBG */
+
+ return FVCO / mdiv * 1000; /* Return CPU clock in [Hz] */
+} /* si_pmu1_cpuclk0 */
+
+/**
+ * BCM4369/4378/4387 specific function returning the CPU clock frequency.
+ * Does this by determining current Fvco and the setting of the clock divider that leads up to
+ * the ARM.
+ * Returns value in [Hz] units.
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu1_cpuclk0_pll2)(si_t *sih)
+{
+ uint32 FVCO = si_pmu1_pllfvco0_pll2(sih); /* in [khz] units */
+
+ /* Return ARM/SB clock */
+ return FVCO * 1000;
+} /* si_pmu1_cpuclk0_pll2 */
+
+/**
+ * Returns the MAC clock frequency. Called when e.g. MAC clk frequency has to change because of
+ * interference mitigation.
+ */
+uint32
+si_mac_clk(si_t *sih, osl_t *osh)
+{
+ uint8 mdiv2 = 0;
+ uint32 mac_clk = 0;
+ chipcregs_t *cc;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ uint32 FVCO = si_pmu1_pllfvco0(sih); /* in [khz] units */
+
+ BCM_REFERENCE(osh);
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *)si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+ ASSERT(cc != NULL);
+ BCM_REFERENCE(cc);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ mdiv2 = 2;
+ mac_clk = FVCO / mdiv2;
+ break;
+ default:
+ PMU_MSG(("si_mac_clk: Unknown chipid %s\n",
+ bcm_chipname(CHIPID(sih->chip), chn, 8)));
+ ASSERT(0);
+ break;
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, &intr_val);
+
+ return mac_clk;
+} /* si_mac_clk */
+
+/* 4387 pll MAC channel divisor - for ftm */
+static uint32
+si_pmu_macdiv_4387(si_t *sih)
+{
+ uint32 tmp, mdiv;
+
+ /* TODO: when it's needed return different MAC clock freq.
+ * for different MAC/slice!
+ */
+ tmp = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0);
+ mdiv = (tmp & PMU4387_PLL0_PC1_ICH2_MDIV_MASK) >> PMU4387_PLL0_PC1_ICH2_MDIV_SHIFT;
+ ASSERT(mdiv != 0);
+
+ return mdiv;
+}
+
+/** Get chip's FVCO and PLLCTRL1 register value */
+int
+si_pmu_fvco_macdiv(si_t *sih, uint32 *fvco, uint32 *div)
+{
+ chipcregs_t *cc;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ int err = BCME_OK;
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ if (fvco)
+ *fvco = si_pmu1_pllfvco0(sih)/1000;
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *)si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+ ASSERT(cc != NULL);
+ BCM_REFERENCE(cc);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ if (div)
+ *div = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG12, 0, 0) &
+ PMU1_PLL0_PC1_M1DIV_MASK;
+ break;
+
+ case BCM43602_CHIP_ID:
+ if (div) {
+ *div = (si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG4, 0, 0) &
+ PMU1_PLL0_PC1_M3DIV_MASK) >> PMU1_PLL0_PC1_M3DIV_SHIFT;
+ }
+ break;
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ if (div) {
+ *div = (si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, 0, 0)
+ & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT;
+ }
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ /* mDIV is not supported for 43012 & divisor value is always 2 */
+ if (div)
+ *div = 2;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ if (div) {
+ *div = (si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, 0, 0)
+ & PMU1_PLL0_PC1_M4DIV_MASK) >> PMU1_PLL0_PC1_M4DIV_SHIFT;
+ }
+ break;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ if (div) {
+ *div = si_pmu_macdiv_4387(sih);
+ }
+ break;
+ default:
+ PMU_MSG(("si_mac_clk: Unknown chipid %s\n", bcm_chipname(sih->chip, chn, 8)));
+ err = BCME_ERROR;
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, &intr_val);
+
+ return err;
+}
+
+/** Return TRUE if scan retention memory's sleep/pm signal was asserted */
+bool
+BCMPOSTTRAPFN(si_pmu_reset_ret_sleep_log)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 ret_ctl;
+ bool was_sleep = FALSE;
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ ret_ctl = R_REG(osh, &pmu->retention_ctl);
+ if (ret_ctl & RCTL_MEM_RET_SLEEP_LOG_MASK) {
+ W_REG(osh, &pmu->retention_ctl, ret_ctl);
+ was_sleep = TRUE;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return was_sleep;
+}
+
+/** Return TRUE if pmu rsrc XTAL_PU was de-asserted */
+bool
+BCMPOSTTRAPFN(si_pmu_reset_chip_sleep_log)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ bool was_sleep = FALSE;
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if (PMUREV(sih->pmurev) >= 36) {
+ uint32 pmu_int_sts = R_REG(osh, &pmu->pmuintstatus);
+ if (pmu_int_sts & PMU_INT_STAT_RSRC_EVENT_INT0_MASK) {
+ /* write 1 to clear the status */
+ W_REG(osh, &pmu->pmuintstatus, PMU_INT_STAT_RSRC_EVENT_INT0_MASK);
+ was_sleep = TRUE;
+ }
+ } else {
+ was_sleep = si_pmu_reset_ret_sleep_log(sih, osh);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return was_sleep;
+}
+
+/* For 43602a0 MCH2/MCH5 boards: power up PA Reference LDO */
+void
+si_pmu_switch_on_PARLDO(si_t *sih, osl_t *osh)
+{
+ uint32 mask;
+ pmuregs_t *pmu;
+ uint origidx;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ mask = R_REG(osh, &pmu->min_res_mask) | PMURES_BIT(RES43602_PARLDO_PU);
+ W_REG(osh, &pmu->min_res_mask, mask);
+ mask = R_REG(osh, &pmu->max_res_mask) | PMURES_BIT(RES43602_PARLDO_PU);
+ W_REG(osh, &pmu->max_res_mask, mask);
+ break;
+ default:
+ break;
+ }
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/* For 43602a0 MCH2/MCH5 boards: power off PA Reference LDO */
+void
+si_pmu_switch_off_PARLDO(si_t *sih, osl_t *osh)
+{
+ uint32 mask;
+ pmuregs_t *pmu;
+ uint origidx;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43602_CHIP_ID:
+ case BCM43462_CHIP_ID:
+ mask = R_REG(osh, &pmu->min_res_mask) & ~PMURES_BIT(RES43602_PARLDO_PU);
+ W_REG(osh, &pmu->min_res_mask, mask);
+ mask = R_REG(osh, &pmu->max_res_mask) & ~PMURES_BIT(RES43602_PARLDO_PU);
+ W_REG(osh, &pmu->max_res_mask, mask);
+ break;
+ default:
+ break;
+ }
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/**
+ * Change VCO frequency (slightly), e.g. to avoid PHY errors due to spurs.
+ */
+static void
+BCMATTACHFN(si_set_bb_vcofreq_frac)(si_t *sih, osl_t *osh, int vcofreq, int frac, int xtalfreq)
+{
+ uint32 vcofreq_withfrac, p1div, ndiv_int, fraca, ndiv_mode, reg;
+ /* shifts / masks for PMU PLL control register #2 : */
+ uint32 ndiv_int_shift, ndiv_mode_shift, p1div_shift, pllctrl2_mask;
+ /* shifts / masks for PMU PLL control register #3 : */
+ uint32 pllctrl3_mask;
+ BCM_REFERENCE(osh);
+
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ BCM43602_CHIP(sih->chip)) {
+ if (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) {
+ PMU_MSG(("HTAVAIL is set, so not updating BBPLL Frequency \n"));
+ return;
+ }
+
+ ndiv_int_shift = 7;
+ ndiv_mode_shift = 4;
+ p1div_shift = 0;
+ pllctrl2_mask = 0xffffffff;
+ pllctrl3_mask = 0xffffffff;
+ } else {
+ /* put more chips here */
+ PMU_ERROR(("si_set_bb_vcofreq_frac: only work on 4360, 4352\n"));
+ return;
+ }
+
+ vcofreq_withfrac = vcofreq * 10000 + frac;
+ p1div = 0x1;
+ ndiv_int = vcofreq / xtalfreq;
+ ndiv_mode = (vcofreq_withfrac % (xtalfreq * 10000)) ? 3 : 0;
+ PMU_ERROR(("ChangeVCO => vco:%d, xtalF:%d, frac: %d, ndivMode: %d, ndivint: %d\n",
+ vcofreq, xtalfreq, frac, ndiv_mode, ndiv_int));
+
+ reg = (ndiv_int << ndiv_int_shift) |
+ (ndiv_mode << ndiv_mode_shift) |
+ (p1div << p1div_shift);
+ PMU_ERROR(("Data written into the PLL_CNTRL_ADDR2: %08x\n", reg));
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, pllctrl2_mask, reg);
+
+ if (ndiv_mode) {
+ /* frac = (vcofreq_withfrac % (xtalfreq * 10000)) * 2^24) / (xtalfreq * 10000) */
+ uint32 r1, r0;
+ math_uint64_multiple_add(
+ &r1, &r0, vcofreq_withfrac % (xtalfreq * 10000), 1 << 24, 0);
+ math_uint64_divide(&fraca, r1, r0, xtalfreq * 10000);
+ PMU_ERROR(("Data written into the PLL_CNTRL_ADDR3 (Fractional): %08x\n", fraca));
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, pllctrl3_mask, fraca);
+ }
+
+ si_pmu_pllupd(sih);
+} /* si_set_bb_vcofreq_frac */
+
+/**
+ * given x-tal frequency, returns BaseBand vcofreq with fraction in 100Hz
+ * @param xtalfreq In [Mhz] units.
+ * @return In [100Hz] units.
+ */
+uint32
+si_pmu_get_bb_vcofreq(si_t *sih, osl_t *osh, int xtalfreq)
+{
+ uint32 ndiv_int, /* 9 bits integer divider */
+ ndiv_mode,
+ frac = 0, /* 24 bits fractional divider */
+ p1div; /* predivider: divides x-tal freq */
+ uint32 xtal1, vcofrac = 0, vcofreq;
+ uint32 r1, r0, reg;
+
+ BCM_REFERENCE(osh);
+
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ BCM43602_CHIP(sih->chip)) {
+ reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, 0, 0);
+ ndiv_int = reg >> 7;
+ ndiv_mode = (reg >> 4) & 7;
+ p1div = 1; /* do not divide x-tal frequency */
+
+ if (ndiv_mode)
+ frac = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0);
+ } else if ((BCM4369_CHIP(sih->chip) &&
+ CST4369_CHIPMODE_PCIE(sih->chipst)) ||
+ BCM4376_CHIP(sih->chip) ||
+ BCM4378_CHIP(sih->chip) ||
+ (BCM4362_CHIP(sih->chip) &&
+ CST4362_CHIPMODE_PCIE(sih->chipst))) {
+ reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG2, 0, 0);
+ ndiv_int = reg >> 20;
+ p1div = (reg >> 16) & 0xf;
+ frac = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0) & 0x00fffff;
+ ndiv_mode = 1;
+ } else {
+ /* put more chips here */
+ PMU_ERROR(("si_pmu_get_bb_vcofreq: only work on 4360, 4352, 4369, 4378\n"));
+ ASSERT(FALSE);
+ return 0;
+ }
+
+ xtal1 = 10000 * xtalfreq / p1div; /* in [100Hz] units */
+
+ if (ndiv_mode) {
+ /* vcofreq fraction = (xtal1 * frac + (1 << 23)) / (1 << 24);
+ * handle overflow
+ */
+ math_uint64_multiple_add(&r1, &r0, xtal1, frac, 1 << 23);
+ vcofrac = (r1 << 8) | (r0 >> 24);
+ }
+
+ if (ndiv_int == 0) {
+ ASSERT(0);
+ return 0;
+ }
+
+ if ((int)xtal1 > (int)((0xffffffff - vcofrac) / ndiv_int)) {
+ PMU_ERROR(("si_pmu_get_bb_vcofreq: xtalfreq is too big, %d\n", xtalfreq));
+ return 0;
+ }
+
+ vcofreq = xtal1 * ndiv_int + vcofrac;
+ return vcofreq;
+} /* si_pmu_get_bb_vcofreq */
+
+/** Enable PMU 1Mhz clock */
+static void
+si_pmu_enb_slow_clk(si_t *sih, osl_t *osh, uint32 xtalfreq)
+{
+ uint32 val;
+ pmuregs_t *pmu;
+ uint origidx;
+
+ if (PMUREV(sih->pmurev) < 24) {
+ PMU_ERROR(("si_pmu_enb_slow_clk: Not supported %d\n", PMUREV(sih->pmurev)));
+ return;
+ }
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ /* twiki PmuRev30, OneMhzToggleEn:31, AlpPeriod[23:0] */
+ if (PMUREV(sih->pmurev) >= 38) {
+ /* Use AlpPeriod[23:0] only chip default value for PmuRev >= 38 chips
+ * eg. ROUND(POWER(2,26) / (55.970 / 2 MHz) for 4387/4385, etc
+ */
+ val = R_REG(osh, &pmu->slowclkperiod) | PMU30_ALPCLK_ONEMHZ_ENAB;
+ } else {
+ if (PMUREV(sih->pmurev) >= 30) {
+ /* AlpPeriod = ROUND(POWER(2,26)/ALP_CLK_FREQ_IN_MHz,0) */
+ /* Calculation will be accurate for only one decimal of xtal (like 37.4),
+ * and will not be accurate for more than one decimal
+ * of xtal freq (like 37.43)
+ * Also no rounding is done on final result
+ */
+ ROMMABLE_ASSERT((xtalfreq/100)*100 == xtalfreq);
+ val = (((1 << 26)*10)/(xtalfreq/100));
+ /* set the 32 bit to enable OneMhzToggle
+ * -usec wide toggle signal will be generated
+ */
+ val |= PMU30_ALPCLK_ONEMHZ_ENAB;
+ } else { /* twiki PmuRev24, OneMhzToggleEn:16, AlpPeriod[15:0] */
+ if (xtalfreq == 37400) {
+ val = 0x101B6;
+ } else if (xtalfreq == 40000) {
+ val = 0x10199;
+ } else {
+ PMU_ERROR(("si_pmu_enb_slow_clk: xtalfreq is not supported, %d\n",
+ xtalfreq));
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return;
+ }
+ }
+ }
+
+ W_REG(osh, &pmu->slowclkperiod, val);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/**
+ * Initializes PLL given an x-tal frequency.
+ * Calls si_pmuX_pllinitY() type of functions, where the reasoning behind 'X' and 'Y' is historical
+ * rather than logical.
+ *
+ * xtalfreq : x-tal frequency in [KHz]
+ */
+void
+BCMATTACHFN(si_pmu_pll_init)(si_t *sih, osl_t *osh, uint xtalfreq)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+ BCM_REFERENCE(pmu1_xtaltab0_880);
+ BCM_REFERENCE(pmu1_xtaltab0_1760);
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID: {
+ if (CHIPREV(sih->chiprev) > 2)
+ si_set_bb_vcofreq_frac(sih, osh, 960, 98, 40);
+ break;
+ }
+ CASE_BCM43602_CHIP:
+ si_set_bb_vcofreq_frac(sih, osh, 960, 98, 40);
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ si_pmu1_pllinit1(sih, osh, pmu, xtalfreq); /* nvram PLL overrides + enables PLL */
+ break;
+ default:
+ PMU_MSG(("No PLL init done for chip %s rev %d pmurev %d\n",
+ bcm_chipname(
+ CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), PMUREV(sih->pmurev)));
+ break;
+ }
+
+#ifdef BCMDBG_FORCEHT
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), CCS_FORCEHT, CCS_FORCEHT)
+#endif
+
+ si_pmu_enb_slow_clk(sih, osh, xtalfreq);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+} /* si_pmu_pll_init */
+
+/** get alp clock frequency in [Hz] units */
+uint32
+BCMPOSTTRAPFN(si_pmu_alp_clock)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 clock = ALP_CLOCK;
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ if (sih->chipst & CST4360_XTAL_40MZ)
+ clock = 40000 * 1000;
+ else
+ clock = 20000 * 1000;
+ break;
+
+ CASE_BCM43602_CHIP:
+ /* always 40Mhz */
+ clock = 40000 * 1000;
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+#ifndef BCMSDIOLITE
+ case BCM4369_CHIP_GRPID:
+#endif /* BCMSDIOLITE */
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ clock = si_pmu1_alpclk0(sih, osh, pmu);
+ break;
+#ifdef BCMSDIOLITE
+ case BCM4369_CHIP_ID:
+ /* always 25Mhz */
+ clock = 25000 * 1000;
+ break;
+#endif /* BCMSDIOLITE */
+ default:
+ PMU_MSG(("No ALP clock specified "
+ "for chip %s rev %d pmurev %d, using default %d Hz\n",
+ bcm_chipname(
+ CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev),
+ PMUREV(sih->pmurev), clock));
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return clock; /* in [Hz] units */
+} /* si_pmu_alp_clock */
+
+/**
+ * Find the output of the "m" pll divider given pll controls that start with
+ * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
+ */
+static uint32
+BCMPOSTTRAPFN(si_pmu5_clock)(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint pll0, uint m)
+{
+ uint32 tmp, div, ndiv, p1, p2, fc;
+
+ if ((pll0 & 3) || (pll0 > PMU4716_MAINPLL_PLL0)) {
+ PMU_ERROR(("si_pmu5_clock: Bad pll0: %d\n", pll0));
+ return 0;
+ }
+
+ /* Strictly there is an m5 divider, but I'm not sure we use it */
+ if ((m == 0) || (m > 4)) {
+ PMU_ERROR(("si_pmu5_clock: Bad m divider: %d\n", m));
+ return 0;
+ }
+
+ W_REG(osh, &pmu->pllcontrol_addr, pll0 + PMU5_PLL_P1P2_OFF);
+ (void)R_REG(osh, &pmu->pllcontrol_addr);
+ tmp = R_REG(osh, &pmu->pllcontrol_data);
+ p1 = (tmp & PMU5_PLL_P1_MASK) >> PMU5_PLL_P1_SHIFT;
+ p2 = (tmp & PMU5_PLL_P2_MASK) >> PMU5_PLL_P2_SHIFT;
+
+ W_REG(osh, &pmu->pllcontrol_addr, pll0 + PMU5_PLL_M14_OFF);
+ (void)R_REG(osh, &pmu->pllcontrol_addr);
+ tmp = R_REG(osh, &pmu->pllcontrol_data);
+ div = (tmp >> ((m - 1) * PMU5_PLL_MDIV_WIDTH)) & PMU5_PLL_MDIV_MASK;
+
+ W_REG(osh, &pmu->pllcontrol_addr, pll0 + PMU5_PLL_NM5_OFF);
+ (void)R_REG(osh, &pmu->pllcontrol_addr);
+ tmp = R_REG(osh, &pmu->pllcontrol_data);
+ ndiv = (tmp & PMU5_PLL_NDIV_MASK) >> PMU5_PLL_NDIV_SHIFT;
+
+ /* Do calculation in Mhz */
+ fc = si_pmu_alp_clock(sih, osh) / 1000000;
+ fc = (p1 * ndiv * fc) / p2;
+
+ PMU_NONE(("si_pmu5_clock: p1=%d, p2=%d, ndiv=%d(0x%x), m%d=%d; fc=%d, clock=%d\n",
+ p1, p2, ndiv, ndiv, m, div, fc, fc / div));
+
+ /* Return clock in Hertz */
+ return ((fc / div) * 1000000);
+} /* si_pmu5_clock */
+
+/**
+ * Get backplane clock frequency, returns a value in [hz] units.
+ * For designs that feed the same clock to both backplane and CPU just return the CPU clock speed.
+ */
+uint32
+BCMPOSTTRAPFN(si_pmu_si_clock)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 clock = HT_CLOCK; /* in [hz] units */
+#ifdef BCMDBG_PMU
+ char chn[8];
+#endif
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ clock = si_pmu1_cpuclk0(sih, osh, pmu);
+ break;
+
+ CASE_BCM43602_CHIP: {
+ uint32 mdiv;
+ /* Ch3 is connected to backplane_clk. Read 'bbpll_i_m3div' from pllctl[4] */
+ mdiv = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG4, 0, 0);
+ mdiv = (mdiv & PMU1_PLL0_PC1_M3DIV_MASK) >> PMU1_PLL0_PC1_M3DIV_SHIFT;
+ ASSERT(mdiv != 0);
+ clock = si_pmu1_pllfvco0(sih) / mdiv * 1000;
+ break;
+ }
+
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ clock = si_pmu1_cpuclk0(sih, osh, pmu);
+ break;
+
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ clock = si_pmu_bpclk_4387(sih);
+ break;
+
+ default:
+ PMU_MSG(("No backplane clock specified "
+ "for chip %s rev %d pmurev %d, using default %d Hz\n",
+ bcm_chipname(
+ CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev),
+ PMUREV(sih->pmurev), clock));
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return clock;
+} /* si_pmu_si_clock */
+
+/** returns CPU clock frequency in [hz] units */
+uint32
+BCMPOSTTRAPFN(si_pmu_cpu_clock)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 clock; /* in [hz] units */
+
+ uint32 tmp;
+ uint32 armclk_offcnt, armclk_oncnt;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if (BCM4369_CHIP(sih->chip) ||
+ BCM4376_CHIP(sih->chip) ||
+ BCM4378_CHIP(sih->chip) ||
+ BCM4385_CHIP(sih->chip) ||
+ BCM4387_CHIP(sih->chip) ||
+ BCM4388_CHIP(sih->chip) ||
+ BCM4389_CHIP(sih->chip) ||
+ BCM4397_CHIP(sih->chip) ||
+ BCM4362_CHIP(sih->chip)) {
+ clock = si_pmu1_cpuclk0_pll2(sih); /* for chips with separate CPU PLL */
+ } else if ((PMUREV(sih->pmurev) >= 5) &&
+ !((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
+ 0)) {
+ uint pll = PMU4716_MAINPLL_PLL0;
+
+ if (BCM43602_CHIP(sih->chip)) {
+ clock = si_pmu1_cpuclk0(sih, osh, pmu);
+ } else {
+ clock = si_pmu5_clock(sih, osh, pmu, pll, PMU5_MAINPLL_CPU);
+ }
+ } else {
+ clock = si_pmu_si_clock(sih, osh);
+ }
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ /* Fout = (on_count + 1) * Fin/(on_count + 1 + off_count)
+ * ARM clock using Fast divider calculation
+ * Fin = FVCO/2
+ */
+ tmp = si_pmu_chipcontrol(sih, PMU1_PLL0_CHIPCTL1, 0, 0);
+ armclk_offcnt =
+ (tmp & CCTL_43012_ARM_OFFCOUNT_MASK) >> CCTL_43012_ARM_OFFCOUNT_SHIFT;
+ armclk_oncnt =
+ (tmp & CCTL_43012_ARM_ONCOUNT_MASK) >> CCTL_43012_ARM_ONCOUNT_SHIFT;
+ clock = (armclk_oncnt + 1) * clock/(armclk_oncnt + 1 + armclk_offcnt);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return clock;
+} /* si_pmu_cpu_clock */
+
+#ifdef __ARM_ARCH_7A__
+static uint32
+si_pmu_mem_ca7clock(si_t *sih, osl_t *osh)
+{
+ uint32 clock = 0;
+ int8 mdiv = 1;
+ uint idx = si_coreidx(sih);
+ bool fastclk;
+ ca7regs_t *regs = si_setcore(sih, ARMCA7_CORE_ID, 0);
+
+ if (regs == NULL) {
+ goto end;
+ }
+
+ fastclk = ((R_REG(osh, ARMREG(regs, clk_ctl_st)) & CCS_ARMFASTCLOCKREQ) != 0);
+
+ if (fastclk) {
+ uint32 fvco = si_pmu_pll28nm_fvco(sih);
+ if (si_corerev(sih) >= 7) {
+ mdiv = (R_REG(osh, ARMREG(regs, corecontrol)) & ACC_CLOCKRATIO_MASK) >>
+ ACC_CLOCKRATIO_SHIFT;
+ } else {
+ ASSERT(0);
+ }
+
+ if (mdiv == 0) {
+ ASSERT(0);
+ clock = 0;
+ } else {
+ clock = (fvco / mdiv);
+ }
+ } else {
+ clock = si_pmu_si_clock(sih, osh);
+ }
+
+end:
+ si_setcoreidx(sih, idx);
+ return clock;
+
+}
+#endif /* __ARM_ARCH_7A__ */
+
+/** get memory clock frequency, which is the same as the HT clock for newer chips. Returns [Hz]. */
+uint32
+BCMINITFN(si_pmu_mem_clock)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 clock;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if ((PMUREV(sih->pmurev) >= 5) &&
+ !((BCM4369_CHIP(sih->chip)) ||
+ (BCM4362_CHIP(sih->chip)) ||
+ BCM43602_CHIP(sih->chip) ||
+ (CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
+ BCM4376_CHIP(sih->chip) ||
+ BCM4378_CHIP(sih->chip) ||
+ BCM4387_CHIP(sih->chip) ||
+ BCM4388_CHIP(sih->chip) ||
+ BCM4389_CHIP(sih->chip) ||
+ BCM4397_CHIP(sih->chip) ||
+ 0)) {
+ uint pll = PMU4716_MAINPLL_PLL0;
+
+ clock = si_pmu5_clock(sih, osh, pmu, pll, PMU5_MAINPLL_MEM);
+ } else {
+#ifdef __ARM_ARCH_7A__
+ clock = si_pmu_mem_ca7clock(sih, osh);
+#else /* !__ARM_ARCH_7A__ */
+ clock = si_pmu_si_clock(sih, osh); /* mem clk same as backplane clk */
+#endif /* __ARM_ARCH_7A__ */
+ }
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return clock;
+} /* si_pmu_mem_clock */
+
+/*
+ * ilpcycles per sec are now calculated during CPU init in a new way
+ * for better accuracy. We set it here for compatability.
+ *
+ * On platforms that do not do this we resort to the old way.
+ */
+
+#define ILP_CALC_DUR 10 /* ms, make sure 1000 can be divided by it. */
+
+static uint32 ilpcycles_per_sec = 0;
+
+void
+BCMPOSTTRAPFN(si_pmu_ilp_clock_set)(uint32 cycles_per_sec)
+{
+ ilpcycles_per_sec = cycles_per_sec;
+}
+
+/**
+ * Measure ILP clock frequency. Returns a value in [Hz] units.
+ *
+ * The variable ilpcycles_per_sec is used to store the ILP clock speed. The value
+ * is calculated when the function is called the first time and then cached.
+ * The change in PMU timer count is measured across a delay of ILP_CALC_DUR msec.
+ * Before the first time the function is called, one must make sure the HT clock is
+ * turned on and used to feed the CPU and that OSL_DELAY() is calibrated.
+ */
+uint32
+BCMINITFN(si_pmu_ilp_clock)(si_t *sih, osl_t *osh)
+{
+ if (ISSIM_ENAB(sih))
+ return ILP_CLOCK;
+
+ if (ilpcycles_per_sec == 0) {
+ uint32 start, end, delta;
+ pmuregs_t *pmu;
+ uint origidx = si_coreidx(sih);
+
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+ start = R_REG(osh, &pmu->pmutimer);
+ /* PR88659: verify pmutimer reads */
+ if (start != R_REG(osh, &pmu->pmutimer))
+ start = R_REG(osh, &pmu->pmutimer);
+ OSL_DELAY(ILP_CALC_DUR * 1000);
+ end = R_REG(osh, &pmu->pmutimer);
+ if (end != R_REG(osh, &pmu->pmutimer))
+ end = R_REG(osh, &pmu->pmutimer);
+ delta = end - start;
+ ilpcycles_per_sec = delta * (1000 / ILP_CALC_DUR);
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ }
+
+ ASSERT(ilpcycles_per_sec != 0);
+ return ilpcycles_per_sec;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+/**
+ * Reads/writes a chipcontrol reg. Performes core switching if required, at function exit the
+ * original core is restored. Depending on chip type, read/writes to chipcontrol regs in CC core
+ * (older chips) or to chipcontrol regs in PMU core (later chips).
+ */
+uint32
+BCMPOSTTRAPFN(si_pmu_chipcontrol)(si_t *sih, uint reg, uint32 mask, uint32 val)
+{
+ pmu_corereg(sih, SI_CC_IDX, chipcontrol_addr, ~0, reg);
+ return pmu_corereg(sih, SI_CC_IDX, chipcontrol_data, mask, val);
+}
+
+/**
+ * Reads/writes a voltage regulator (vreg) register. Performes core switching if required, at
+ * function exit the original core is restored. Depending on chip type, writes to regulator regs
+ * in CC core (older chips) or to regulator regs in PMU core (later chips).
+ */
+uint32
+BCMPOSTTRAPFN(si_pmu_vreg_control)(si_t *sih, uint reg, uint32 mask, uint32 val)
+{
+ pmu_corereg(sih, SI_CC_IDX, regcontrol_addr, ~0, reg);
+ return pmu_corereg(sih, SI_CC_IDX, regcontrol_data, mask, val);
+}
+
+/**
+ * Reads/writes a PLL control register. Performes core switching if required, at function exit the
+ * original core is restored. Depending on chip type, writes to PLL control regs in CC core (older
+ * chips) or to PLL control regs in PMU core (later chips).
+ */
+uint32
+BCMPOSTTRAPFN(si_pmu_pllcontrol)(si_t *sih, uint reg, uint32 mask, uint32 val)
+{
+ pmu_corereg(sih, SI_CC_IDX, pllcontrol_addr, ~0, reg);
+ return pmu_corereg(sih, SI_CC_IDX, pllcontrol_data, mask, val);
+}
+
+/**
+ * Balance between stable SDIO operation and power consumption is achieved using this function.
+ * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
+ * function should read the VDDIO itself to select the correct table. For now it has been solved
+ * with the 'BCM_SDIO_VDDIO' preprocessor constant.
+ *
+ * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
+ * hardware supports this), if no hw support drive strength is not programmed.
+ */
+void
+BCMINITFN(si_sdiod_drive_strength_init)(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+ /*
+ * Note:
+ * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for the
+ * 43143, 4330, 4334, 4336, 43362 chips. These chips are now no longer supported, so
+ * the code has been deleted.
+ * Newer chips have the SDIO drive strength setting via a GCI Chip Control register,
+ * but the bit definitions are chip-specific. We are keeping this function available
+ * (accessed via DHD 'sdiod_drive' IOVar) in case these newer chips need to provide access.
+ */
+ UNUSED_PARAMETER(sih);
+ UNUSED_PARAMETER(osh);
+ UNUSED_PARAMETER(drivestrength);
+}
+
+#if !defined(BCMDONGLEHOST)
+/** initialize PMU */
+void
+BCMATTACHFN(si_pmu_init)(si_t *sih, osl_t *osh)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+#if defined(BT_WLAN_REG_ON_WAR)
+ si_pmu_reg_on_war_ext_wake_perst_clear(sih);
+ si_pmu_reg_on_war_ext_wake_perst_set(sih);
+#endif /* BT_WLAN_REG_ON_WAR */
+
+ /* Feature is added in PMU rev. 1 but doesn't work until rev. 2 */
+ if (PMUREV(sih->pmurev) == 1)
+ AND_REG(osh, &pmu->pmucontrol, ~PCTL_NOILP_ON_WAIT);
+ else if (PMUREV(sih->pmurev) >= 2)
+ OR_REG(osh, &pmu->pmucontrol, PCTL_NOILP_ON_WAIT);
+
+ /* Changes from PMU revision 26 are not included in revision 27 */
+ if ((PMUREV(sih->pmurev) >= 26) && (PMUREV(sih->pmurev) != 27)) {
+ uint32 val = PMU_INTC_ALP_REQ | PMU_INTC_HT_REQ | PMU_INTC_HQ_REQ;
+ pmu_corereg(sih, SI_CC_IDX, pmuintctrl0, val, val);
+
+ val = RSRC_INTR_MASK_TIMER_INT_0;
+ pmu_corereg(sih, SI_CC_IDX, pmuintmask0, val, val);
+ (void)pmu_corereg(sih, SI_CC_IDX, pmuintmask0, 0, 0);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_pmu_rsrc_macphy_clk_deps(si_t *sih, osl_t *osh, int macunit)
+{
+ uint32 deps = 0;
+ rsc_per_chip_t *rsc;
+ uint origidx;
+ pmuregs_t *pmu = NULL;
+ uint8 rsc_num;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+
+ ASSERT(pmu != NULL);
+
+ rsc = si_pmu_get_rsc_positions(sih);
+ if (macunit == 0) {
+ rsc_num = rsc->macphy_clkavail;
+ } else if (macunit == 1) {
+ rsc_num = rsc->macphy_aux_clkavail;
+ } else if (macunit == 2) {
+ rsc_num = rsc->macphy_scan_clkavail;
+ } else {
+ PMU_ERROR(("si_pmu_rsrc_macphy_clk_deps: slice %d is not supported\n", macunit));
+ rsc_num = NO_SUCH_RESOURCE; /* to satisfy the compiler */
+ ASSERT(0);
+ }
+ deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsc_num), TRUE);
+ deps |= PMURES_BIT(rsc_num);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return deps;
+}
+
+void
+si_pmu_set_mac_rsrc_req_sc(si_t *sih, osl_t *osh)
+{
+ uint32 deps = 0;
+ rsc_per_chip_t *rsc;
+ uint origidx;
+ pmuregs_t *pmu = NULL;
+ uint32 rsrc = 0;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ ASSERT(pmu != NULL);
+
+ rsc = si_pmu_get_rsc_positions(sih);
+
+ rsrc = (PMURES_BIT(rsc->macphy_scan_clkavail) |
+ PMURES_BIT(rsc->dig_ready));
+
+ deps = si_pmu_res_deps(sih, osh, pmu, rsrc, TRUE);
+ deps |= rsrc;
+
+ W_REG(osh, &pmu->mac_res_req_timer2, PMU32_MAC_SCAN_RSRC_REQ_TIMER);
+ W_REG(osh, &pmu->mac_res_req_mask2, deps);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+uint32
+BCMATTACHFN(si_pmu_rsrc_ht_avail_clk_deps)(si_t *sih, osl_t *osh)
+{
+ uint32 deps;
+ rsc_per_chip_t *rsc;
+ uint origidx;
+ pmuregs_t *pmu = NULL;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+
+ ASSERT(pmu != NULL);
+
+ rsc = si_pmu_get_rsc_positions(sih);
+ deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsc->ht_avail), FALSE);
+ deps |= PMURES_BIT(rsc->ht_avail);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return deps;
+}
+
+uint32
+BCMATTACHFN(si_pmu_rsrc_cb_ready_deps)(si_t *sih, osl_t *osh)
+{
+ uint32 deps;
+ rsc_per_chip_t *rsc;
+ uint origidx;
+ pmuregs_t *pmu = NULL;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+
+ ASSERT(pmu != NULL);
+
+ rsc = si_pmu_get_rsc_positions(sih);
+ if (rsc->cb_ready == NO_SUCH_RESOURCE) {
+ deps = 0;
+ } else {
+ deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsc->cb_ready), FALSE);
+ deps |= PMURES_BIT(rsc->cb_ready);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return deps;
+}
+
+void
+si_pmu_set_mac_rsrc_req(si_t *sih, int macunit)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if (macunit == 0) {
+ W_REG(osh, &pmu->mac_res_req_timer, PMU32_MAC_MAIN_RSRC_REQ_TIMER);
+ W_REG(osh, &pmu->mac_res_req_mask, si_pmu_rsrc_macphy_clk_deps(sih, osh, macunit));
+ } else if (macunit == 1) {
+ W_REG(osh, &pmu->mac_res_req_timer1, PMU32_MAC_AUX_RSRC_REQ_TIMER);
+ W_REG(osh, &pmu->mac_res_req_mask1, si_pmu_rsrc_macphy_clk_deps(sih, osh, macunit));
+ } else if (macunit == 2) {
+ W_REG(osh, &pmu->mac_res_req_timer2, PMU32_MAC_SCAN_RSRC_REQ_TIMER);
+ W_REG(osh, &pmu->mac_res_req_mask2, si_pmu_rsrc_macphy_clk_deps(sih, osh, macunit));
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/**
+ * Return worst case up time in [ILP cycles] for the given resource.
+ *
+ * Example use case: the d11 core needs to be programmed with the max time it
+ * takes to make the HT clock available.
+ *
+ * need to check circular dependancies and prevent dead recursion.
+ */
+static uint
+BCMINITFN(si_pmu_res_uptime)(si_t *sih, osl_t *osh,
+ pmuregs_t *pmu, uint8 rsrc, bool pmu_fast_trans_en)
+{
+ uint32 deps;
+ uint uptime, i, dup, dmax, uptrans, ret;
+ uint32 min_mask = 0;
+#ifndef SR_DEBUG
+ uint32 max_mask = 0;
+#endif /* SR_DEBUG */
+
+ /* uptime of resource 'rsrc' */
+ W_REG(osh, &pmu->res_table_sel, rsrc);
+ if (PMUREV(sih->pmurev) >= 30)
+ uptime = (R_REG(osh, &pmu->res_updn_timer) >> 16) & 0x7fff;
+ else if (PMUREV(sih->pmurev) >= 13)
+ uptime = (R_REG(osh, &pmu->res_updn_timer) >> 16) & 0x3ff;
+ else
+ uptime = (R_REG(osh, &pmu->res_updn_timer) >> 8) & 0xff;
+
+ /* direct dependencies of resource 'rsrc' */
+ deps = si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsrc), FALSE);
+ for (i = 0; i <= PMURES_MAX_RESNUM; i ++) {
+ if (!(deps & PMURES_BIT(i)))
+ continue;
+ deps &= ~si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(i), TRUE);
+ }
+#ifndef SR_DEBUG
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+#else
+ /* Recalculate fast pwr up delay if min res mask/max res mask has changed */
+ min_mask = R_REG(osh, &pmu->min_res_mask);
+#endif /* SR_DEBUG */
+ deps &= ~min_mask;
+
+ /* max uptime of direct dependencies */
+ dmax = 0;
+ for (i = 0; i <= PMURES_MAX_RESNUM; i ++) {
+ if (!(deps & PMURES_BIT(i)))
+ continue;
+ dup = si_pmu_res_uptime(sih, osh, pmu, (uint8)i, pmu_fast_trans_en);
+ if (dmax < dup)
+ dmax = dup;
+ }
+
+ PMU_MSG(("si_pmu_res_uptime: rsrc %u uptime %u(deps 0x%08x uptime %u)\n",
+ rsrc, uptime, deps, dmax));
+
+ uptrans = pmu_fast_trans_en ? 0 : PMURES_UP_TRANSITION;
+ ret = uptime + dmax + uptrans;
+ return ret;
+}
+
+/* Return dependencies (direct or all/indirect) for the given resources */
+/* need to check circular dependencies and prevent dead recursion */
+static uint32
+si_pmu_res_deps(si_t *sih, osl_t *osh, pmuregs_t *pmu, uint32 rsrcs, bool all)
+{
+ uint32 deps = 0;
+ uint32 i;
+
+ for (i = 0; i <= PMURES_MAX_RESNUM; i ++) {
+ if (!(rsrcs & PMURES_BIT(i)))
+ continue;
+ W_REG(osh, &pmu->res_table_sel, i);
+ deps |= R_REG(osh, &pmu->res_dep_mask);
+ }
+
+ return !all ? deps : (deps ? (deps | si_pmu_res_deps(sih, osh, pmu, deps, TRUE)) : 0);
+}
+
+static bool
+si_pmu_otp_is_ready(si_t *sih)
+{
+ uint32 otps = 0u;
+
+ if (AOB_ENAB(sih)) {
+ otps = si_corereg(sih, si_findcoreidx(sih, GCI_CORE_ID, 0u),
+ OFFSETOF(gciregs_t, otpstatus), 0u, 0u);
+ } else {
+ otps = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otpstatus), 0u, 0u);
+ }
+ return !!(otps & OTPS_READY);
+}
+
+static bool
+si_pmu_otp_is_ready_and_wait(si_t *sih, bool on)
+{
+ SPINWAIT((si_pmu_otp_is_ready(sih) != on), 3000u);
+
+ if (si_pmu_otp_is_ready(sih) != on) {
+ PMU_ERROR(("OTP ready bit not %s after wait\n", (on ? "Set" : "Clear")));
+ OSL_SYS_HALT();
+ }
+
+ return si_pmu_otp_is_ready(sih) == on;
+}
+
+/**
+ * OTP is powered down/up as a means of resetting it, or for saving current when OTP is unused.
+ * OTP is powered up/down through PMU resources.
+ * OTP will turn OFF only if its not in the dependency of any "higher" rsrc in min_res_mask
+ */
+void
+si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 rsrcs = 0; /* rsrcs to turn on/off OTP power */
+ rsc_per_chip_t *rsc; /* chip specific resource bit positions */
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Don't do anything if OTP is disabled */
+ if (si_is_otp_disabled(sih)) {
+ PMU_MSG(("si_pmu_otp_power: OTP is disabled\n"));
+ return;
+ }
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ /*
+ * OTP can't be power cycled by toggling OTP_PU for always on OTP chips. For now
+ * corerev 45 is the only one that has always on OTP.
+ * Instead, the chipc register OTPCtrl1 (Offset 0xF4) bit 25 (forceOTPpwrDis) is used.
+ * Please refer to http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/ChipcommonRev45
+ */
+ if (CCREV(sih->ccrev) == 45) {
+ uint32 otpctrl1;
+ otpctrl1 = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otpcontrol1), 0, 0);
+ if (on)
+ otpctrl1 &= ~OTPC_FORCE_PWR_OFF;
+ else
+ otpctrl1 |= OTPC_FORCE_PWR_OFF;
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, otpcontrol1), ~0, otpctrl1);
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+
+#ifdef UNRELEASEDCHIP
+#endif
+
+ rsc = si_pmu_get_rsc_positions(sih);
+ rsrcs = PMURES_BIT(rsc->otp_pu);
+ break;
+ case BCM4378_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ si_gci_direct(sih, GCI_OFFSETOF(sih, otpcontrol), OTPC_FORCE_OTP_PWR_DIS,
+ on ? 0u : OTPC_FORCE_OTP_PWR_DIS);
+ if (!si_pmu_otp_is_ready_and_wait(sih, on)) {
+ PMU_MSG(("OTP ready bit not %s after wait\n", (on ? "ON" : "OFF")));
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (rsrcs != 0) {
+ bool on_check = FALSE; /* Stores otp_ready state */
+ uint32 min_mask = 0;
+
+ /* Turn on/off the power */
+ if (on) {
+ min_mask = R_REG(osh, &pmu->min_res_mask);
+ *min_res_mask = min_mask;
+
+ min_mask |= rsrcs;
+ min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, TRUE);
+ on_check = TRUE;
+ /* Assuming max rsc mask defines OTP_PU, so not programming max */
+ PMU_MSG(("Adding rsrc 0x%x to min_res_mask\n", min_mask));
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+ OSL_DELAY(1000);
+ SPINWAIT(!(R_REG(osh, &pmu->res_state) & rsrcs),
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(R_REG(osh, &pmu->res_state) & rsrcs);
+ } else {
+ /*
+ * Restore back the min_res_mask,
+ * but keep OTP powered off if allowed by dependencies
+ */
+ if (*min_res_mask)
+ min_mask = *min_res_mask;
+ else
+ min_mask = R_REG(osh, &pmu->min_res_mask);
+
+ min_mask &= ~rsrcs;
+ /*
+ * OTP rsrc can be cleared only if its not
+ * in the dependency of any "higher" rsrc in min_res_mask
+ */
+ min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, TRUE);
+ on_check = ((min_mask & rsrcs) != 0);
+
+ PMU_MSG(("Removing rsrc 0x%x from min_res_mask\n", min_mask));
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+ }
+
+ if (!si_pmu_otp_is_ready_and_wait(sih, on_check)) {
+ PMU_MSG(("OTP ready bit not %s after wait\n", (on_check ? "ON" : "OFF")));
+ }
+#ifdef NOT_YET
+ /*
+ * FIXME: Temporarily disabling OTPS_READY ASSERT check. Right now ASSERT in
+ * ROM is enabled only for 4389B0/C0. Therefore this change anyway will not
+ * affect other chips. Once the correct spin-wait value is updated by the
+ * HW team, then this ASSERT will be enabled back.
+ */
+ ASSERT(si_pmu_otp_is_ready(sih) == on_check);
+#endif /* NOT_YET */
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+} /* si_pmu_otp_power */
+
+void
+si_pmu_spuravoid(si_t *sih, osl_t *osh, uint8 spuravoid)
+{
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ /* Block ints and save current core */
+ si_introff(sih, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ si_intrrestore(sih, &intr_val);
+} /* si_pmu_spuravoid */
+
+/* below function are only for BBPLL parallel purpose */
+/* For having the pllcontrol data values for spuravoid */
+typedef struct {
+ uint8 spuravoid_mode;
+ uint8 pllctrl_reg;
+ uint32 pllctrl_regval;
+} pllctrl_spuravoid_t;
+
+uint32
+si_pmu_pll28nm_fvco(si_t *sih)
+{
+ uint32 r_high, r_low, r;
+ uint32 xf = si_alp_clock(sih);
+ /* PLL registers for 4368 */
+ uint32 pllreg5 = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG5, 0, 0);
+ uint32 pllreg4 = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG4, 0, 0);
+ /* p1div has lower 2 bits in pll4 and high 2 bits in pll5 */
+ uint8 p1div_lo = (pllreg4 & PMU4368_PLL1_PC4_P1DIV_MASK) >> PMU4368_PLL1_PC4_P1DIV_SHIFT;
+ uint8 p1div_hi = (pllreg5 & PMU4368_PLL1_PC5_P1DIV_MASK) >> PMU4368_PLL1_PC5_P1DIV_SHIFT;
+ uint8 p1div = (p1div_hi << PMU4368_P1DIV_HI_SHIFT) | (p1div_lo << PMU4368_P1DIV_LO_SHIFT);
+ uint32 ndiv_int = (pllreg5 & PMU4368_PLL1_PC5_NDIV_INT_MASK) >>
+ PMU4368_PLL1_PC5_NDIV_INT_SHIFT;
+ uint32 ndiv_frac = (pllreg5 & PMU4368_PLL1_PC5_NDIV_FRAC_MASK) >>
+ PMU4368_PLL1_PC5_NDIV_FRAC_SHIFT;
+
+ if (ISSIM_ENAB(sih)) {
+ /* PLL CTRL registers are meaningless under QT, return the pre-configured freq */
+ return (FVCO_720 * 1000);
+ } else if (p1div == 0) {
+ /* PLL register read fails, return 0 so caller can retry */
+ PMU_ERROR(("p1div is invalid\n"));
+ return 0;
+ }
+
+ /* Calculate xf * ( ndiv_frac / (1 << 20) + ndiv_int) / p1div)
+ * To reduce the inaccuracy in division,
+ * Covert to (xf * ndiv_frac / (1 << 20) + xf * ndiv_int) / p1div
+ */
+ math_uint64_multiple_add(&r_high, &r_low, xf, ndiv_frac, 0);
+ /* Make sure the caclulated 64 bits number is in the safe rage (with in 52 bits),
+ * so we have a valid 32 bits result after divided by 1<<20
+ */
+ ASSERT((r_high & 0xFFE00000) == 0);
+ math_uint64_right_shift(&r, r_high, r_low, 20);
+
+ return (r + ndiv_int * xf) / p1div;
+}
+
+bool
+si_pmu_is_otp_powered(si_t *sih, osl_t *osh)
+{
+ uint idx;
+ pmuregs_t *pmu;
+ bool st;
+ rsc_per_chip_t *rsc; /* chip specific resource bit positions */
+
+ /* Remember original core before switch to chipc/pmu */
+ idx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ rsc = si_pmu_get_rsc_positions(sih);
+ st = (R_REG(osh, &pmu->res_state) & PMURES_BIT(rsc->otp_pu)) != 0;
+ break;
+ case BCM4378_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ st = (!(si_gci_direct(sih, GCI_OFFSETOF(sih, otpcontrol), 0u, 0u) &
+ OTPC_FORCE_OTP_PWR_DIS)) && si_pmu_otp_is_ready_and_wait(sih, TRUE);
+ break;
+ default:
+ st = TRUE;
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, idx);
+ return st;
+} /* si_pmu_is_otp_powered */
+
+/**
+ * Some chip/boards can be optionally fitted with an external 32Khz clock source for increased power
+ * savings (due to more accurate sleep intervals).
+ */
+static void
+BCMATTACHFN(si_pmu_set_lpoclk)(si_t *sih, osl_t *osh)
+{
+ uint32 ext_lpo_sel, int_lpo_sel, timeout = 0,
+ ext_lpo_avail = 0, lpo_sel = 0;
+ uint32 ext_lpo_isclock; /* On e.g. 43602a0, either x-tal or clock can be on LPO pins */
+ pmuregs_t *pmu;
+ uint origidx;
+
+ if (!(getintvar(NULL, "boardflags3")))
+ return;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ ext_lpo_sel = getintvar(NULL, "boardflags3") & BFL3_FORCE_EXT_LPO_SEL;
+ int_lpo_sel = getintvar(NULL, "boardflags3") & BFL3_FORCE_INT_LPO_SEL;
+ ext_lpo_isclock = getintvar(NULL, "boardflags3") & BFL3_EXT_LPO_ISCLOCK;
+
+ BCM_REFERENCE(ext_lpo_isclock);
+
+ if (ext_lpo_sel != 0) {
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ /* External LPO is POR default enabled */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU43602_CC2_XTAL32_SEL,
+ ext_lpo_isclock ? 0 : PMU43602_CC2_XTAL32_SEL);
+ break;
+ default:
+ /* Force External LPO Power Up */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_EXT_LPO_PU, CC_EXT_LPO_PU);
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_EXT_LPO_PU, GC_EXT_LPO_PU);
+ break;
+ }
+
+ ext_lpo_avail = R_REG(osh, &pmu->pmustatus) & EXT_LPO_AVAIL;
+ while (ext_lpo_avail == 0 && timeout < LPO_SEL_TIMEOUT) {
+ OSL_DELAY(1000);
+ ext_lpo_avail = R_REG(osh, &pmu->pmustatus) & EXT_LPO_AVAIL;
+ timeout++;
+ }
+
+ if (timeout >= LPO_SEL_TIMEOUT) {
+ PMU_ERROR(("External LPO is not available\n"));
+ } else {
+ /* External LPO is available, lets use (=select) it */
+ OSL_DELAY(1000);
+ timeout = 0;
+
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU43602_CC2_FORCE_EXT_LPO,
+ PMU43602_CC2_FORCE_EXT_LPO); /* switches to external LPO */
+ break;
+ default:
+ /* Force External LPO Sel up */
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, EXT_LPO_SEL, EXT_LPO_SEL);
+ /* Clear Force Internal LPO Sel */
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, INT_LPO_SEL, 0x0);
+ OSL_DELAY(1000);
+
+ lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL;
+ while (lpo_sel != 0 && timeout < LPO_SEL_TIMEOUT) {
+ OSL_DELAY(1000);
+ lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL;
+ timeout++;
+ }
+ }
+
+ if (timeout >= LPO_SEL_TIMEOUT) {
+ PMU_ERROR(("External LPO is not set\n"));
+ /* Clear Force External LPO Sel */
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
+ PMU43602_CC2_FORCE_EXT_LPO, 0);
+ break;
+ default:
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, EXT_LPO_SEL, 0x0);
+ break;
+ }
+ } else {
+ /* Clear Force Internal LPO Power Up */
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ break;
+ default:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_INT_LPO_PU, 0x0);
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_INT_LPO_PU, 0x0);
+ break;
+ }
+ } /* if (timeout) */
+ } /* if (timeout) */
+ } else if (int_lpo_sel != 0) {
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ break; /* do nothing, internal LPO is POR default powered and selected */
+ default:
+ /* Force Internal LPO Power Up */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_INT_LPO_PU, CC_INT_LPO_PU);
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_INT_LPO_PU, GC_INT_LPO_PU);
+
+ OSL_DELAY(1000);
+
+ /* Force Internal LPO Sel up */
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, INT_LPO_SEL, INT_LPO_SEL);
+ /* Clear Force External LPO Sel */
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, EXT_LPO_SEL, 0x0);
+
+ OSL_DELAY(1000);
+
+ lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL;
+ timeout = 0;
+ while (lpo_sel == 0 && timeout < LPO_SEL_TIMEOUT) {
+ OSL_DELAY(1000);
+ lpo_sel = R_REG(osh, &pmu->pmucontrol) & LPO_SEL;
+ timeout++;
+ }
+ if (timeout >= LPO_SEL_TIMEOUT) {
+ PMU_ERROR(("Internal LPO is not set\n"));
+ /* Clear Force Internal LPO Sel */
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, INT_LPO_SEL, 0x0);
+ } else {
+ /* Clear Force External LPO Power Up */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, CC_EXT_LPO_PU, 0x0);
+ si_gci_chipcontrol(sih, CHIPCTRLREG6, GC_EXT_LPO_PU, 0x0);
+ }
+ break;
+ }
+ if ((PMUREV(sih->pmurev) >= 33)) {
+ /* Enabling FAST_SEQ */
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTSEQ_ENAB, PCTL_EXT_FASTSEQ_ENAB);
+ }
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+} /* si_pmu_set_lpoclk */
+
+static int
+si_pmu_fast_lpo_locked(si_t *sih, osl_t *osh)
+{
+ int lock = 0;
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ lock = CHIPC_REG(sih, chipstatus, 0, 0) & CST43012_FLL_LOCK;
+ break;
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ lock = si_gci_chipstatus(sih, GCI_CHIPSTATUS_13) & GCI_CS_4369_FLL1MHZ_LOCK_MASK;
+ break;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ lock = si_gci_chipstatus(sih, GCI_CHIPSTATUS_15) & GCI_CS_4387_FLL1MHZ_LOCK_MASK;
+ break;
+ default:
+ PMU_MSG(("si_pmu_fast_lpo_locked: LPO enable: unsupported chip!\n"));
+ }
+ return lock ? 1 : 0;
+}
+
+/* Turn ON FAST LPO FLL (1MHz) */
+static void
+BCMATTACHFN(si_pmu_fast_lpo_enable)(si_t *sih, osl_t *osh)
+{
+ int i = 0, lock = 0;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(lock);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_ENAB, PCTL_EXT_FASTLPO_ENAB);
+ lock = CHIPC_REG(sih, chipstatus, 0, 0) & CST43012_FLL_LOCK;
+
+ for (i = 0; ((i <= 30) && (!lock)); i++)
+ {
+ lock = CHIPC_REG(sih, chipstatus, 0, 0) & CST43012_FLL_LOCK;
+ OSL_DELAY(10);
+ }
+
+ PMU_MSG(("si_pmu_fast_lpo_enable: duration: %d\n", i*10));
+
+ if (!lock) {
+ PMU_MSG(("si_pmu_fast_lpo_enable: FLL lock not present!"));
+ ROMMABLE_ASSERT(0);
+ }
+
+ /* Now switch to using FAST LPO clk */
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, PCTL_EXT_FASTLPO_SWENAB);
+ break;
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ {
+ uint8 fastlpo_dis = fastlpo_dis_get();
+ uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get();
+
+ if (!fastlpo_dis || !fastlpo_pcie_dis) {
+ /* LHL rev 6 in 4387 requires this bit to be set first */
+ if ((LHLREV(sih->lhlrev) >= 6) && !PMU_FLL_PU_ENAB()) {
+ LHL_REG(sih, lhl_top_pwrseq_ctl_adr,
+ LHL_PWRSEQCTL_PMU_LPLDO_PD, LHL_PWRSEQCTL_WL_FLLPU_EN);
+ }
+
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_ENAB, PCTL_EXT_FASTLPO_ENAB);
+
+ lock = si_pmu_fast_lpo_locked(sih, osh);
+ for (i = 0; ((i < 300) && (!lock)); i++) {
+ lock = si_pmu_fast_lpo_locked(sih, osh);
+ OSL_DELAY(10);
+ }
+ ASSERT(lock);
+ }
+
+ if (!fastlpo_dis) {
+ /* Now switch to using FAST LPO clk */
+ PMU_REG(sih, pmucontrol_ext,
+ PCTL_EXT_FASTLPO_SWENAB, PCTL_EXT_FASTLPO_SWENAB);
+
+ OSL_DELAY(1000);
+ PMU_MSG(("pmu fast lpo enabled\n"));
+ }
+ break;
+ }
+ default:
+ PMU_MSG(("si_pmu_fast_lpo_enable: LPO enable: unsupported chip!\n"));
+ }
+}
+
+/* Turn ON FAST LPO FLL (1MHz) for PCIE */
+bool
+BCMATTACHFN(si_pmu_fast_lpo_enable_pcie)(si_t *sih)
+{
+ if (!FASTLPO_ENAB()) {
+ return FALSE;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ {
+ uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get();
+
+ if (!fastlpo_pcie_dis) {
+ PMU_REG(sih, pmucontrol_ext,
+ PCTL_EXT_FASTLPO_PCIE_SWENAB, PCTL_EXT_FASTLPO_PCIE_SWENAB);
+ OSL_DELAY(1000);
+ PMU_MSG(("pcie fast lpo enabled\n"));
+ return TRUE;
+ }
+ break;
+ }
+ default:
+ PMU_MSG(("si_pmu_fast_lpo_enable_pcie: LPO enable: unsupported chip!\n"));
+ }
+
+ return FALSE;
+}
+
+/* Turn ON FAST LPO FLL (1MHz) for PMU */
+bool
+BCMATTACHFN(si_pmu_fast_lpo_enable_pmu)(si_t *sih)
+{
+ if (!FASTLPO_ENAB()) {
+ return FALSE;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ {
+ uint8 fastlpo_dis = fastlpo_dis_get();
+
+ if (!fastlpo_dis) {
+ PMU_MSG(("pmu fast lpo enabled\n"));
+ return TRUE;
+ }
+ break;
+ }
+ default:
+ PMU_MSG(("si_pmu_fast_lpo_enable_pmu: LPO enable: unsupported chip!\n"));
+ }
+
+ return FALSE;
+}
+
+static uint8
+BCMATTACHFN(fastlpo_dis_get)(void)
+{
+ uint8 fastlpo_dis = 1;
+
+#if defined(BCM_FASTLPO_PMU) && !defined(BCM_FASTLPO_PMU_DISABLED)
+ if (FASTLPO_ENAB()) {
+ fastlpo_dis = 0;
+ if (getvar(NULL, rstr_fastlpo_dis) != NULL) {
+ fastlpo_dis = (uint8)getintvar(NULL, rstr_fastlpo_dis);
+ }
+ }
+#endif /* BCM_FASTLPO_PMU */
+ return fastlpo_dis;
+}
+
+static uint8
+BCMATTACHFN(fastlpo_pcie_dis_get)(void)
+{
+ uint8 fastlpo_pcie_dis = 1;
+
+ if (FASTLPO_ENAB()) {
+ fastlpo_pcie_dis = 0;
+ if (getvar(NULL, rstr_fastlpo_pcie_dis) != NULL) {
+ fastlpo_pcie_dis = (uint8)getintvar(NULL, rstr_fastlpo_pcie_dis);
+ }
+ }
+ return fastlpo_pcie_dis;
+}
+
+static void
+BCMATTACHFN(si_pmu_fll_preload_enable)(si_t *sih)
+{
+ if (!PMU_FLL_PU_ENAB()) {
+ return;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ {
+ uint32 fll_dac_out;
+
+ fll_dac_out = (si_gci_chipstatus(sih, GCI_CHIPSTATUS_15) &
+ GCI_CS_4387_FLL1MHZ_DAC_OUT_MASK)
+ >> GCI_CS_4387_FLL1MHZ_DAC_OUT_SHIFT;
+
+ LHL_REG(sih, lhl_wl_hw_ctl_adr[1],
+ LHL_1MHZ_FLL_DAC_EXT_MASK,
+ (fll_dac_out) << LHL_1MHZ_FLL_DAC_EXT_SHIFT);
+ LHL_REG(sih, lhl_wl_hw_ctl_adr[1],
+ LHL_1MHZ_FLL_PRELOAD_MASK,
+ LHL_1MHZ_FLL_PRELOAD_MASK);
+ break;
+ }
+ default:
+ PMU_MSG(("si_pmu_fll_preload_enable: unsupported chip!\n"));
+ ASSERT(0);
+ break;
+ }
+}
+
+/* LV sleep mode summary:
+ * LV mode is where both ABUCK and CBUCK are programmed to low voltages during
+ * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off.
+ * With ASR ON, LPLDO OFF
+ */
+#if defined(SAVERESTORE)
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_pmu)(si_t *sih, osl_t *osh)
+{
+ /* jtag_udr_write USER_REG9W jtag_serdes_pic_enable 1 */
+ if (BCM4369_CHIP(sih->chip) && (CHIPREV(sih->chiprev) == 0)) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON, 0);
+
+ //JTAG_SEL override. When this bit is set, jtag_sel 0, Required for JTAG writes
+ /* Temporarily we are disabling this as it is not required..
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, 0x10, 0x10);
+ jtag_setbit_128(sih, 9, 103, 1);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, 0x10, 0x0);
+ */
+
+ }
+
+ /* Program pmu VREG resgiter for Resouce based ABUCK and CBUCK modes
+ * cbuck rsrc 0 - PWM and abuck rsrc 0 - Auto, rsrc 1 - PWM
+ */
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC0_CBUCK_MODE_MASK,
+ 0x3u << PMU_4369_VREG16_RSRC0_CBUCK_MODE_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC0_ABUCK_MODE_MASK,
+ 0x3u << PMU_4369_VREG16_RSRC0_ABUCK_MODE_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK,
+ 0x3u << PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC2_ABUCK_MODE_MASK,
+ 0x3u << PMU_4369_VREG16_RSRC2_ABUCK_MODE_SHIFT);
+
+ /* asr voltage adjust PWM - 0.8V */
+ si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK,
+ 0x10u << PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT);
+
+ /* Enable rsrc_en_asr_msk[0] and msk[1] */
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN0_ASR_MASK,
+ 0x1u << PMU_4369_VREG13_RSRC_EN0_ASR_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN1_ASR_MASK,
+ 0x1u << PMU_4369_VREG13_RSRC_EN1_ASR_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN2_ASR_MASK,
+ 0x1u << PMU_4369_VREG13_RSRC_EN2_ASR_SHIFT);
+
+ si_pmu_vreg_control(sih, PMU_VREG_14, PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK,
+ 0x1u << PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT);
+
+ /* disable force_hp_mode and enable wl_pmu_lv_mod */
+ si_pmu_vreg_control(sih, PMU_VREG_7,
+ (PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK | PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK |
+ PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK), PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK);
+
+ /* Enable MISCLDO only for A0, MEMLPLDO_adj -0.7V, Disable LPLDO power up */
+ /* For 4387, should not disable because this is PU when analog PMU is out of sleep
+ * and bypass when in sleep mode
+ */
+ if (!(BCM4389_CHIP(sih->chip) || BCM4388_CHIP(sih->chip) || BCM4397_CHIP(sih->chip) ||
+ BCM4387_CHIP(sih->chip))) {
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_MISCLDO_POWER_UP_MASK,
+ ((CHIPREV(sih->chiprev) == 0) ? 1 : 0) <<
+ PMU_4369_VREG_5_MISCLDO_POWER_UP_SHIFT);
+ }
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_LPLDO_POWER_UP_MASK, 0x0u);
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK,
+ 0xDu << PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_MASK,
+ 0xFu << PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_SHIFT);
+
+ /* Enabale MEMLPLDO ( to enable 0x08)and BTLDO is enabled. At sleep RFLDO is disabled */
+ si_pmu_vreg_control(sih, PMU_VREG_6, PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK,
+ 0x1u << PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT);
+
+ /* Program PMU chip cntrl register to control
+ * cbuck2vddb_pwrsw_force_on =1 and memlpldo2vddb_pwrsw_force_off = 1
+ * cbuck2ret_pwrsw_force_on = 1 and memlpldo2vddb_pwrsw_force_off = 1
+ * set d11_2x2_bw80_cbuck2vddb_pwrsw_force_on and
+ * d11_2x2_bw20_cbuck2vddb_pwrsw_force_on cbuck2ret_pwrsw on 4 cores
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON | PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON),
+ (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON | PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON),
+ (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON));
+
+ /* set subcore_cbuck2vddb_pwrsw_force_on */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON |
+ PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON | PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON),
+ (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON));
+
+ /* Set subcore_memlpldo2vddb_pwrsw_force_off, d11_2x2_bw80_memlpldo2vddb_pwrsw_force_off
+ * and d11_2x2_bw20_memlpldo2vddb_pwrsw_force_off
+ * Set subcore_memlpldo2vddret_pwrsw_force_off,d11_2x2_bw80_memlpldo2vddret_pwrsw_force_off
+ * and d11_2x2_bw20_memlpldo2vddret_pwrsw_force_off
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_SUBCORE_CBUCK2VDDB_OFF | PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_MAIN_CBUCK2VDDB_OFF | PMU_CC13_MAIN_CBUCK2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_AUX_CBUCK2VDDB_OFF | PMU_CC13_AUX_CBUCK2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF));
+
+ /* PCIE retention mode enable */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ PMU_CC6_ENABLE_PCIE_RETENTION, PMU_CC6_ENABLE_PCIE_RETENTION);
+}
+
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_4369)(si_t *sih, osl_t *osh)
+{
+ si_set_lv_sleep_mode_pmu(sih, osh);
+
+ si_set_lv_sleep_mode_lhl_config_4369(sih);
+
+ /* Enable PMU interrupts */
+ CHIPC_REG(sih, intmask, (1u << 4u), (1u << 4u));
+}
+
+void si_set_abuck_mode_4362(si_t *sih, uint8 mode)
+{
+
+ if (mode < 2 || mode > 4) {
+ ASSERT(0);
+ return;
+ }
+
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC0_ABUCK_MODE_MASK,
+ mode << PMU_4362_VREG16_RSRC0_ABUCK_MODE_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC1_ABUCK_MODE_MASK,
+ mode << PMU_4362_VREG16_RSRC1_ABUCK_MODE_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC2_ABUCK_MODE_MASK,
+ mode << PMU_4362_VREG16_RSRC2_ABUCK_MODE_SHIFT);
+}
+
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_4378)(si_t *sih, osl_t *osh)
+{
+ si_set_lv_sleep_mode_pmu(sih, osh);
+
+ si_set_lv_sleep_mode_lhl_config_4378(sih);
+}
+
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_pmu_4387)(si_t *sih, osl_t *osh)
+{
+ /* Program pmu VREG resgiter for Resouce based ABUCK and CBUCK modes
+ * cbuck rsrc 0 - PWM and abuck rsrc 0 - Auto, rsrc 1 - PWM
+ */
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK,
+ 0x2u << PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT);
+
+ /* asr voltage adjust PWM - 0.8V */
+ si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK,
+ 0x10u << PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT);
+
+ /* Enable rsrc_en_asr_msk[0] and msk[1] */
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN0_ASR_MASK,
+ 0x1u << PMU_4369_VREG13_RSRC_EN0_ASR_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN1_ASR_MASK,
+ 0x1u << PMU_4369_VREG13_RSRC_EN1_ASR_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4369_VREG13_RSRC_EN2_ASR_MASK,
+ 0x1u << PMU_4369_VREG13_RSRC_EN2_ASR_SHIFT);
+
+ si_pmu_vreg_control(sih, PMU_VREG_14, PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK,
+ 0x1u << PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT);
+
+ /* disable force_hp_mode and enable wl_pmu_lv_mod */
+ si_pmu_vreg_control(sih, PMU_VREG_7,
+ (PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK | PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK |
+ PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK), PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK);
+
+ /* Enabale MEMLPLDO ( to enable 0x08)and BTLDO is enabled. At sleep RFLDO is disabled */
+ si_pmu_vreg_control(sih, PMU_VREG_6, PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK,
+ 0x1u << PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT);
+
+ /* For 4387C0, we don't need memlpldo2vddret_on nor cldo2vddb_on.
+ * We just need to clear the memlpldo2vddb_forceoff to turn on all the memlpldo2vddb pwrsw
+ */
+ if (PMUREV(sih->pmurev) < 39) {
+ /* Program PMU chip cntrl register to control
+ * cbuck2vddb_pwrsw_force_on =1 and memlpldo2vddb_pwrsw_force_off = 1
+ * cbuck2ret_pwrsw_force_on = 1 and memlpldo2vddb_pwrsw_force_off = 1
+ * set d11_2x2_bw80_cbuck2vddb_pwrsw_force_on and
+ * d11_2x2_bw20_cbuck2vddb_pwrsw_force_on cbuck2ret_pwrsw on 4 cores
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON | PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON |
+ PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON),
+ (PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON |
+ PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON |
+ PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON),
+ (PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON | PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON));
+
+ /* set subcore_cbuck2vddb_pwrsw_force_on */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON |
+ PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON |
+ PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON),
+ (PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON |
+ PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON));
+
+ /* Set subcore_memlpldo2vddb_pwrsw_force_off,
+ * d11_2x2_bw80_memlpldo2vddb_pwrsw_force_off
+ * and d11_2x2_bw20_memlpldo2vddb_pwrsw_force_off
+ * Set subcore_memlpldo2vddret_pwrsw_force_off,
+ * d11_2x2_bw80_memlpldo2vddret_pwrsw_force_off
+ * and d11_2x2_bw20_memlpldo2vddret_pwrsw_force_off
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_SUBCORE_CBUCK2VDDB_OFF | PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_MAIN_CBUCK2VDDB_OFF | PMU_CC13_MAIN_CBUCK2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_AUX_CBUCK2VDDB_OFF | PMU_CC13_AUX_CBUCK2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_AUX_MEMLPLDO2VDDB_OFF | PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON,
+ PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON,
+ PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON),
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_CMN_MEMLPLDO2VDDRET_ON,
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_CMN_MEMLPLDO2VDDRET_ON);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON |
+ PMU_CC17_SCAN_CBUCK2VDDB_ON |
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF,
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON |
+ PMU_CC17_SCAN_CBUCK2VDDB_ON);
+ } else {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ PMU_CC13_CMN_MEMLPLDO2VDDRET_ON |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF,
+ PMU_CC13_CMN_MEMLPLDO2VDDRET_ON);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF, 0);
+ }
+
+ /* PCIE retention mode enable */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ PMU_CC6_ENABLE_PCIE_RETENTION, PMU_CC6_ENABLE_PCIE_RETENTION);
+
+ /* H/W JIRA http://jira.broadcom.com/browse/HW4387-825
+ * B0 only, the h/w bug is fixed in C0
+ */
+ if (PMUREV(sih->pmurev) == 38) {
+ si_pmu_vreg_control(sih, PMU_VREG_14,
+ PMU_VREG14_RSRC_EN_ASR_PWM_PFM_MASK,
+ PMU_VREG14_RSRC_EN_ASR_PWM_PFM_MASK);
+ }
+
+ /* WAR for jira HW4387-922 */
+ si_pmu_vreg_control(sih, PMU_VREG_1,
+ PMU_4387_VREG1_CSR_OVERI_DIS_MASK,
+ PMU_4387_VREG1_CSR_OVERI_DIS_MASK);
+
+ /* Clear Misc_LDO override */
+ si_pmu_vreg_control(sih, PMU_VREG_5, VREG5_4387_MISCLDO_PU_MASK, 0);
+
+ si_pmu_vreg_control(sih, PMU_VREG_8,
+ PMU_4387_VREG8_ASR_OVERI_DIS_MASK,
+ PMU_4387_VREG8_ASR_OVERI_DIS_MASK);
+
+ if (BCMSRTOPOFF_ENAB()) {
+ si_pmu_vreg_control(sih, PMU_VREG_6,
+ PMU_4387_VREG6_WL_PMU_LV_MODE_MASK, 0);
+
+ /* Clear memldo_pu bit as 4387 doesn't plan to use MEMLDO */
+ si_pmu_vreg_control(sih, PMU_VREG_6,
+ PMU_4387_VREG6_MEMLDO_PU_MASK, 0);
+ } else {
+ si_pmu_vreg_control(sih, PMU_VREG_6,
+ PMU_4387_VREG6_WL_PMU_LV_MODE_MASK,
+ PMU_4387_VREG6_WL_PMU_LV_MODE_MASK);
+ }
+}
+
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_4387)(si_t *sih, osl_t *osh)
+{
+ si_set_lv_sleep_mode_pmu_4387(sih, osh);
+ si_set_lv_sleep_mode_lhl_config_4387(sih);
+}
+
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_4389)(si_t *sih, osl_t *osh)
+{
+ si_set_lv_sleep_mode_pmu(sih, osh);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ PMU_CC4_4387_MAIN_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4387_AUX_PD_CBUCK2VDDRET_ON,
+ 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ PMU_CC5_4387_SUBCORE_CBUCK2VDDRET_ON,
+ 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ PMU_CC6_RX4_CLK_SEQ_SELECT_MASK,
+ 0);
+ /* Disable lq_clk - HW4387-254 */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL12,
+ PMU_CC12_DISABLE_LQ_CLK_ON,
+ PMU_CC12_DISABLE_LQ_CLK_ON);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF,
+ 0);
+
+#ifdef NOT_YET
+ /* FIXME: this setting is causing the load switch from CSR to ASR */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF |
+ PMU_CC13_CMN_MEMLPLDO2VDDRET_ON, 0);
+#endif /* NOT_YET */
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON |
+ PMU_CC17_SCAN_CBUCK2VDDB_ON |
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF,
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON |
+ PMU_CC17_SCAN_CBUCK2VDDB_ON);
+
+ si_set_lv_sleep_mode_lhl_config_4389(sih);
+
+ si_pmu_vreg_control(sih, PMU_VREG_6,
+ (PMU_4389_VREG6_WL_PMU_LV_MODE_MASK | PMU_4389_VREG6_MEMLDO_PU_MASK),
+ PMU_4389_VREG6_WL_PMU_LV_MODE_MASK);
+
+ /* SW WAR for 4389B0(rev 01) issue - HW4387-922. 4389C0(rev 02) already has HW fix */
+ if (CHIPREV(sih->chiprev) == 1) {
+ si_pmu_vreg_control(sih, PMU_VREG_1,
+ PMU_4387_VREG1_CSR_OVERI_DIS_MASK,
+ PMU_4387_VREG1_CSR_OVERI_DIS_MASK);
+
+ si_pmu_vreg_control(sih, PMU_VREG_8,
+ PMU_4387_VREG8_ASR_OVERI_DIS_MASK,
+ PMU_4387_VREG8_ASR_OVERI_DIS_MASK);
+ }
+}
+
+static void
+BCMATTACHFN(si_set_lv_sleep_mode_4362)(si_t *sih, osl_t *osh)
+{
+ /* Program pmu VREG resgiter for Resouce based ABUCK and CBUCK modes
+ * cbuck rsrc 0 - PWM and abuck rsrc 0 - Auto, rsrc 1 - PWM
+ */
+ si_pmu_vreg_control(sih, PMU_VREG_16, PMU_4362_VREG16_RSRC0_CBUCK_MODE_MASK,
+ 0x3u << PMU_4362_VREG16_RSRC0_CBUCK_MODE_SHIFT);
+
+ si_set_abuck_mode_4362(sih, 0x3u);
+
+ /* asr voltage adjust PWM - 0.8V */
+ si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_LPPFM_MASK,
+ 0x10u << PMU_4362_VREG8_ASR_OVADJ_LPPFM_SHIFT);
+
+ /* Enable rsrc_en_asr_msk[0] and msk[1] */
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4362_VREG13_RSRC_EN0_ASR_MASK,
+ 0x1u << PMU_4362_VREG13_RSRC_EN0_ASR_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4362_VREG13_RSRC_EN1_ASR_MASK,
+ 0x1u << PMU_4362_VREG13_RSRC_EN1_ASR_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_13, PMU_4362_VREG13_RSRC_EN2_ASR_MASK,
+ 0x1u << PMU_4362_VREG13_RSRC_EN2_ASR_SHIFT);
+
+ si_pmu_vreg_control(sih, PMU_VREG_14, PMU_4362_VREG14_RSRC_EN_CSR_MASK0_MASK,
+ 0x1u << PMU_4362_VREG14_RSRC_EN_CSR_MASK0_SHIFT);
+
+ /* disable force_hp_mode and enable wl_pmu_lv_mod */
+ si_pmu_vreg_control(sih, PMU_VREG_7,
+ (PMU_4362_VREG_7_WL_PMU_LV_MODE_MASK | PMU_4362_VREG_7_WL_PMU_LP_MODE_MASK |
+ PMU_4362_VREG_7_PMU_FORCE_HP_MODE_MASK), PMU_4362_VREG_7_WL_PMU_LV_MODE_MASK);
+
+ /* Enable MISCLDO, MEMLPLDO_adj -0.7V, Disable LPLDO power up */
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4362_VREG_5_MISCLDO_POWER_UP_MASK,
+ 0x1u << PMU_4362_VREG_5_MISCLDO_POWER_UP_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4362_VREG_5_LPLDO_POWER_UP_MASK, 0x0u);
+ si_pmu_vreg_control(sih, PMU_VREG_5, PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK,
+ 0xBu << PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT);
+
+ /* Enabale MEMLPLDO ( to enable 0x08)and BTLDO is enabled. At sleep RFLDO is disabled */
+ si_pmu_vreg_control(sih, PMU_VREG_6, PMU_4362_VREG_6_MEMLPLDO_POWER_UP_MASK,
+ 0x1u << PMU_4362_VREG_6_MEMLPLDO_POWER_UP_SHIFT);
+
+ /* Program PMU chip cntrl register to control
+ * cbuck2vddb_pwrsw_force_on =1 and memlpldo2vddb_pwrsw_force_off = 1
+ * cbuck2ret_pwrsw_force_on = 1 and memlpldo2vddb_pwrsw_force_off = 1
+ * set d11_2x2_bw80_cbuck2vddb_pwrsw_force_on and
+ * d11_2x2_bw20_cbuck2vddb_pwrsw_force_on cbuck2ret_pwrsw on 4 cores
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4362_PD_CBUCK2VDDB_ON | PMU_CC4_4362_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4362_PD_MEMLPLDO2VDDB_ON | PMU_CC4_4362_PD_MEMLPDLO2VDDRET_ON),
+ (PMU_CC4_4362_PD_CBUCK2VDDB_ON | PMU_CC4_4362_PD_CBUCK2VDDRET_ON));
+
+ /* set subcore_cbuck2vddb_pwrsw_force_on */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ (PMU_CC5_4362_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4362_SUBCORE_CBUCK2VDDRET_ON |
+ PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDB_ON | PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDRET_ON),
+ (PMU_CC5_4362_SUBCORE_CBUCK2VDDB_ON | PMU_CC5_4362_SUBCORE_CBUCK2VDDRET_ON));
+
+ /* Set subcore_memlpldo2vddb_pwrsw_force_off, d11_2x2_bw80_memlpldo2vddb_pwrsw_force_off
+ * and d11_2x2_bw20_memlpldo2vddb_pwrsw_force_off
+ * Set subcore_memlpldo2vddret_pwrsw_force_off,d11_2x2_bw80_memlpldo2vddret_pwrsw_force_off
+ * and d11_2x2_bw20_memlpldo2vddret_pwrsw_force_off
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_SUBCORE_CBUCK2VDDB_OFF | PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF |
+ PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF | PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF));
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_MAIN_CBUCK2VDDB_OFF | PMU_CC13_MAIN_CBUCK2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF | PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF));
+
+ /* PCIE retention mode enable */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ PMU_CC6_ENABLE_PCIE_RETENTION, PMU_CC6_ENABLE_PCIE_RETENTION);
+
+ si_set_lv_sleep_mode_lhl_config_4362(sih);
+
+ /* Enable PMU interrupts */
+ CHIPC_REG(sih, intmask, (1u << 4u), (1u << 4u));
+}
+
+void
+BCMATTACHFN(si_pmu_fis_setup)(si_t *sih)
+{
+ uint origidx;
+ pmuregs_t *pmu;
+ int val;
+ osl_t *osh = si_osh(sih);
+
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ val = R_REG(osh, &pmu->max_res_mask);
+ W_REG(osh, &pmu->fis_start_min_res_mask, val);
+
+ val = R_REG(osh, &pmu->min_res_mask);
+ W_REG(osh, &pmu->fis_min_res_mask, val);
+
+ W_REG(osh, &pmu->fis_ctrl_status,
+ (PMU_FIS_DN_TIMER_VAL_4378 << PMU_FIS_DN_TIMER_VAL_SHIFT)
+ & PMU_FIS_DN_TIMER_VAL_MASK);
+ break;
+ case BCM4388_CHIP_GRPID:
+ val = R_REG(osh, &pmu->max_res_mask);
+ W_REG(osh, &pmu->fis_start_min_res_mask, val);
+
+ val = R_REG(osh, &pmu->min_res_mask);
+ W_REG(osh, &pmu->fis_min_res_mask, val);
+
+ W_REG(osh, &pmu->fis_ctrl_status,
+ ((PMU_FIS_DN_TIMER_VAL_4388 << PMU_FIS_DN_TIMER_VAL_SHIFT)
+ & PMU_FIS_DN_TIMER_VAL_MASK) | PMU_FIS_PCIE_SAVE_EN_VALUE);
+ break;
+ case BCM4389_CHIP_GRPID:
+ val = R_REG(osh, &pmu->max_res_mask);
+ W_REG(osh, &pmu->fis_start_min_res_mask, val);
+
+ val = R_REG(osh, &pmu->min_res_mask);
+ W_REG(osh, &pmu->fis_min_res_mask, val);
+
+ W_REG(osh, &pmu->fis_ctrl_status,
+ ((PMU_FIS_DN_TIMER_VAL_4389 << PMU_FIS_DN_TIMER_VAL_SHIFT)
+ & PMU_FIS_DN_TIMER_VAL_MASK) | PMU_FIS_PCIE_SAVE_EN_VALUE);
+ break;
+
+ default:
+ break;
+ }
+ si_setcoreidx(sih, origidx);
+}
+#endif /* defined(SAVERESTORE) */
+
+/*
+ * Enable: Dynamic Clk Switching
+ * Disable: Mirrored Mode
+ * use nvram to enable
+ */
+static void
+BCMATTACHFN(si_pmu_dynamic_clk_switch_enab)(si_t *sih)
+{
+ if (PMUREV(sih->pmurev) >= 36) {
+ if (getintvar(NULL, rstr_dyn_clksw_en)) {
+ PMU_REG(sih, pmucontrol_ext,
+ PCTL_EXT_REQ_MIRROR_ENAB, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
+ CC2_4378_USE_WLAN_BP_CLK_ON_REQ_MASK |
+ CC2_4378_USE_CMN_BP_CLK_ON_REQ_MASK,
+ 0);
+ }
+ }
+}
+
+/* use pmu rsrc XTAL_PU to count deep sleep of chip */
+static void
+BCMATTACHFN(si_pmu_enb_slp_cnt_on_rsrc)(si_t *sih, osl_t *osh)
+{
+ uint origidx;
+ pmuregs_t *pmu;
+ uint32 rsrc_slp = 0xffffffff;
+
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ rsrc_slp = RES4378_XTAL_PU;
+ break;
+
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ rsrc_slp = RES4387_XTAL_PU;
+ break;
+
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ rsrc_slp = RES4389_XTAL_PU;
+ break;
+
+ default:
+ break;
+ }
+
+ if (rsrc_slp != 0xffffffff) {
+ W_REG(osh, &pmu->rsrc_event0, PMURES_BIT(rsrc_slp));
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+#define MISC_LDO_STEPPING_DELAY (150u) /* 150 us, includes 50us additional margin */
+
+/** initialize PMU chip controls and other chip level stuff */
+void
+BCMATTACHFN(si_pmu_chip_init)(si_t *sih, osl_t *osh)
+{
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+ if (AOB_ENAB(sih)) {
+ if (hnd_pmur == NULL) {
+ uint coreidx = si_coreidx(sih);
+ hnd_pmur = si_setcore(sih, PMU_CORE_ID, 0);
+ ASSERT(hnd_pmur != NULL);
+ /* Restore to CC */
+ si_setcoreidx(sih, coreidx);
+ }
+ }
+
+ si_pmu_otp_chipcontrol(sih, osh);
+
+#ifdef CHIPC_UART_ALWAYS_ON
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
+#endif /* CHIPC_UART_ALWAYS_ON */
+
+ si_pmu_enb_slp_cnt_on_rsrc(sih, osh);
+
+ /* Misc. chip control, has nothing to do with PMU */
+ switch (CHIPID(sih->chip)) {
+
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ {
+#ifdef USE_LHL_TIMER
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_LHL_TIMER_SELECT,
+ PMUCCTL02_43012_LHL_TIMER_SELECT);
+#else
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_LHL_TIMER_SELECT, 0);
+#endif /* USE_LHL_TIMER */
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4, PMUCCTL14_43012_DISABLE_LQ_AVAIL, 0);
+
+ PMU_REG_NEW(sih, extwakemask0,
+ PMU_EXT_WAKE_MASK_0_SDIO, PMU_EXT_WAKE_MASK_0_SDIO);
+ PMU_REG_NEW(sih, extwakereqmask[0], ~0, si_pmu_rsrc_ht_avail_clk_deps(sih, osh));
+
+ if (sih->lpflags & LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON) {
+ /* Force PWM when Radio ON */
+ /* 2G_Listen/2G_RX/2G_TX/5G_Listen/5G_RX/5G_TX = PWM */
+ si_pmu_vreg_control(sih, PMU_VREG_8,
+ PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK,
+ PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0);
+ si_pmu_vreg_control(sih, PMU_VREG_9,
+ PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK,
+ PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0);
+ }
+ else {
+ /* LPPFM opt setting for ePA */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL16, PMU_CC16_CLK4M_DIS, 1);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL16, PMU_CC16_FF_ZERO_ADJ, 4);
+ /* 2G_Listen/2G_RX = LPPFM, 2G_TX/5G_Listen/5G_RX/5G_TX = PWM */
+ si_pmu_vreg_control(sih, PMU_VREG_8,
+ PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK,
+ PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1);
+ si_pmu_vreg_control(sih, PMU_VREG_9,
+ PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK,
+ PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1);
+ }
+ /* Set external LPO */
+ si_lhl_set_lpoclk(sih, osh, LHL_LPO_AUTO);
+
+ /* Enabling WL2CDIG sleep */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB,
+ PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL9,
+ PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_MASK,
+ PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_VAL <<
+ PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_SHIFT);
+
+ /* Setting MemLPLDO voltage to 0.74 */
+ si_pmu_vreg_control(sih, PMU_VREG_6, VREG6_43012_MEMLPLDO_ADJ_MASK,
+ 0x8 << VREG6_43012_MEMLPLDO_ADJ_SHIFT);
+
+ /* Setting LPLDO voltage to 0.8 */
+ si_pmu_vreg_control(sih, PMU_VREG_6, VREG6_43012_LPLDO_ADJ_MASK,
+ 0xB << VREG6_43012_LPLDO_ADJ_SHIFT);
+
+ /* Turn off power switch 1P8 in sleep */
+ si_pmu_vreg_control(sih, PMU_VREG_7, VREG7_43012_PWRSW_1P8_PU_MASK, 0);
+
+ /* Enable PMU sleep mode0 (DS0-PS0) */
+ LHL_REG(sih, lhl_top_pwrseq_ctl_adr, ~0, PMU_SLEEP_MODE_0);
+
+ si_pmu_fast_lpo_enable(sih, osh);
+
+ /* Enable the 'power kill' (power off selected retention memories) */
+ GCI_REG_NEW(sih, bt_smem_control0, GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL,
+ GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL);
+
+ break;
+ }
+ case BCM4362_CHIP_GRPID:
+ {
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ uint32 lpo = LHL_LPO_AUTO;
+ uint32 lhl_tmr_sel = 0;
+
+ /* DMAHANG WAR:SWWLAN:171729
+ * Stretch the ALP and HT clocks after de-asserting
+ * the request. During the RX frame transfer from RXFIFO to
+ * DP FIFO, in certain cases the clock is getting de-asserted
+ * by ucode as it does not have visibility beyond BM
+ */
+ W_REG(osh, &pmu->clkstretch, 0x0fff0fff);
+
+#ifdef USE_LHL_TIMER
+ lhl_tmr_sel = PMU_CC13_LHL_TIMER_SELECT;
+#endif /* USE_LHL_TIMER */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_LHL_TIMER_SELECT, lhl_tmr_sel);
+
+ if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) {
+ lpo = LHL_EXT_LPO_ENAB;
+ }
+
+ if (!ISSIM_ENAB(sih)) {
+ si_lhl_set_lpoclk(sih, osh, lpo);
+ }
+
+ if (getintvar(NULL, rstr_btldo3p3pu)) {
+ si_pmu_regcontrol(sih, 4,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN);
+ si_pmu_regcontrol(sih, 6,
+ PMU_28NM_VREG6_BTLDO3P3_PU,
+ PMU_28NM_VREG6_BTLDO3P3_PU);
+ }
+
+ /* write the XTAL preferred startup/normal A0/B0 revision */
+ si_pmu_chipcontrol_xtal_settings_4362(sih);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION),
+ (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION));
+
+ si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_LPPFM_MASK,
+ 0x02u << PMU_4362_VREG8_ASR_OVADJ_LPPFM_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_PFM_MASK,
+ 0x02u << PMU_4362_VREG8_ASR_OVADJ_PFM_SHIFT);
+ si_pmu_vreg_control(sih, PMU_VREG_8, PMU_4362_VREG8_ASR_OVADJ_PWM_MASK,
+ 0x02u << PMU_4362_VREG8_ASR_OVADJ_PWM_SHIFT);
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ si_set_lv_sleep_mode_4362(sih, osh);
+ }
+#endif /* SAVERESTORE */
+
+ si_pmu_fast_lpo_enable(sih, osh);
+ if ((PMUREV(sih->pmurev) >= 33) && FASTLPO_ENAB()) {
+ /* Enabling FAST_SEQ */
+ uint8 fastlpo_dis = fastlpo_dis_get();
+ uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get();
+ if (!fastlpo_dis || !fastlpo_pcie_dis) {
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTSEQ_ENAB,
+ PCTL_EXT_FASTSEQ_ENAB);
+ }
+ }
+
+ break;
+ }
+
+ case BCM4369_CHIP_GRPID:
+ {
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ uint32 lpo = LHL_LPO_AUTO;
+ uint32 lhl_tmr_sel = 0;
+
+ /* DMAHANG WAR:SWWLAN:171729
+ * Stretch the ALP and HT clocks after de-asserting
+ * the request. During the RX frame transfer from RXFIFO to
+ * DP FIFO, in certain cases the clock is getting de-asserted
+ * by ucode as it does not have visibility beyond BM
+ */
+#ifndef ATE_BUILD
+ W_REG(osh, &pmu->clkstretch, 0x0fff0fff);
+#endif
+
+#ifdef USE_LHL_TIMER
+ lhl_tmr_sel = PMU_CC13_4369_LHL_TIMER_SELECT;
+#endif
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4369_LHL_TIMER_SELECT, lhl_tmr_sel);
+
+ if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) {
+ lpo = LHL_EXT_LPO_ENAB;
+ }
+
+ if (!ISSIM_ENAB(sih)) {
+ si_lhl_set_lpoclk(sih, osh, lpo);
+ }
+
+ if (getintvar(NULL, rstr_btldo3p3pu)) {
+ si_pmu_regcontrol(sih, 4,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN);
+ si_pmu_regcontrol(sih, 6,
+ PMU_28NM_VREG6_BTLDO3P3_PU,
+ PMU_28NM_VREG6_BTLDO3P3_PU);
+ }
+
+ /* write the XTAL preferred startup/normal A0/B0 revision */
+ si_pmu_chipcontrol_xtal_settings_4369(sih);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION),
+ (PMU_CC6_ENABLE_CLKREQ_WAKEUP | PMU_CC6_ENABLE_PCIE_RETENTION));
+
+ /* write the PWRSW CLK start/stop delay only for A0 revision */
+ if (CHIPREV(sih->chiprev) == 0) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL1, PMU_CC1_PWRSW_CLKSTRSTP_DELAY_MASK,
+ PMU_CC1_PWRSW_CLKSTRSTP_DELAY);
+ }
+
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ si_set_lv_sleep_mode_4369(sih, osh);
+ }
+#endif /* SAVERESTORE */
+
+ si_pmu_fast_lpo_enable(sih, osh);
+#ifdef BCM_LDO3P3_SOFTSTART
+ if (CHIPID(sih->chip) != BCM4377_CHIP_ID) {
+ si_pmu_ldo3p3_soft_start_wl_set(sih, osh, 3);
+ }
+#endif
+ if ((PMUREV(sih->pmurev) >= 33) && FASTLPO_ENAB()) {
+ /* Enabling FAST_SEQ */
+ uint8 fastlpo_dis = fastlpo_dis_get();
+ uint8 fastlpo_pcie_dis = fastlpo_pcie_dis_get();
+ if (!fastlpo_dis || !fastlpo_pcie_dis) {
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FASTSEQ_ENAB,
+ PCTL_EXT_FASTSEQ_ENAB);
+ }
+ }
+
+ break;
+ }
+
+ CASE_BCM43602_CHIP: /* fall through */
+ /* Set internal/external LPO */
+ si_pmu_set_lpoclk(sih, osh);
+ break;
+
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ {
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ uint32 lpo = LHL_LPO_AUTO;
+ uint32 lhl_tmr_sel = 0;
+
+#ifdef USE_LHL_TIMER
+ lhl_tmr_sel = PMU_CC13_4378_LHL_TIMER_SELECT;
+#endif
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4378_LHL_TIMER_SELECT, lhl_tmr_sel);
+
+ if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) {
+ lpo = LHL_EXT_LPO_ENAB;
+ }
+
+ if (!ISSIM_ENAB(sih)) {
+ si_lhl_set_lpoclk(sih, osh, lpo);
+ }
+
+ /* JIRA: SWWLAN-228979
+ * BT LDO is required for Aux 2G Tx only. Keep powerd down until Aux is up
+ */
+ si_pmu_bt_ldo_pu(sih, FALSE);
+
+ /* Updating xtal pmu registers to combat slow powerup issue */
+ si_pmu_chipcontrol_xtal_settings_4378(sih);
+
+ if (LHL_IS_PSMODE_1(sih)) {
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_07,
+ ((1 << GCI_CC7_AAON_BYPASS_PWRSW_SEL) |
+ (1 << GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON)),
+ 0);
+ }
+
+ si_lhl_setup(sih, osh);
+
+ /* Setting MemLPLDO voltage */
+ if (getvar(NULL, rstr_memlpldo_volt) != NULL) {
+ int memlpldo_volt = getintvar(NULL, rstr_memlpldo_volt);
+
+ if (memlpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 &&
+ memlpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4378_MEMLPLDO_ADJ_MASK,
+ memlpldo_volt << VREG5_4378_MEMLPLDO_ADJ_SHIFT);
+ } else {
+ PMU_MSG(("Invalid memlpldo value: %d\n", memlpldo_volt));
+ }
+ }
+
+ /* Setting LPLDO voltage */
+ if (getvar(NULL, rstr_lpldo_volt) != NULL) {
+ int lpldo_volt = getintvar(NULL, rstr_lpldo_volt);
+
+ if (lpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 &&
+ lpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4378_LPLDO_ADJ_MASK,
+ lpldo_volt << VREG5_4378_LPLDO_ADJ_SHIFT);
+ } else {
+ PMU_MSG(("Invalid lpldo value: %d\n", lpldo_volt));
+ }
+ }
+
+ /* Enable fast LPO */
+ si_pmu_fast_lpo_enable(sih, osh);
+
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ si_set_lv_sleep_mode_4378(sih, osh);
+ }
+#endif /* SAVERESTORE */
+
+ si_pmu_dynamic_clk_switch_enab(sih);
+
+ if (CHIPID(sih->chip) == BCM4378_CHIP_GRPID) {
+ si_pmu_vreg_control(sih, PMU_VREG_0,
+ VREG0_4378_CSR_VOLT_ADJ_PWM_MASK |
+ VREG0_4378_CSR_VOLT_ADJ_PFM_MASK |
+ VREG0_4378_CSR_VOLT_ADJ_LP_PFM_MASK |
+ VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_MASK,
+ (CSR_VOLT_ADJ_PWM_4378 << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT) |
+ (CSR_VOLT_ADJ_PFM_4378 << VREG0_4378_CSR_VOLT_ADJ_PFM_SHIFT) |
+ (CSR_VOLT_ADJ_LP_PFM_4378 << VREG0_4378_CSR_VOLT_ADJ_LP_PFM_SHIFT) |
+ (CSR_OUT_VOLT_TRIM_ADJ_4378 <<
+ VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_SHIFT));
+#ifdef BCM_LDO3P3_SOFTSTART
+ si_pmu_ldo3p3_soft_start_wl_set(sih, osh, 0x03u);
+ si_pmu_ldo3p3_soft_start_bt_set(sih, osh, 0x03u);
+#endif
+ } else {
+ /* 4368 */
+ int nvcsr;
+ if ((nvcsr = getintvar(NULL, rstr_csrtune))) {
+ si_pmu_vreg_control(sih, PMU_VREG_0,
+ VREG0_4378_CSR_VOLT_ADJ_PWM_MASK |
+ VREG0_4378_CSR_VOLT_ADJ_PFM_MASK |
+ VREG0_4378_CSR_VOLT_ADJ_LP_PFM_MASK,
+ (nvcsr << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT) |
+ (nvcsr << VREG0_4378_CSR_VOLT_ADJ_PFM_SHIFT) |
+ (nvcsr << VREG0_4378_CSR_VOLT_ADJ_LP_PFM_SHIFT));
+ }
+ }
+ }
+ break;
+
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ {
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ uint32 lpo = LHL_LPO_AUTO;
+ uint32 lhl_tmr_sel = 0;
+ uint32 abuck_volt, cbuck_volt;
+ uint32 min_mask;
+ uint32 misc_ldo_volt, curr_misc_ldo_volt, i;
+
+#ifdef DONGLEBUILD
+ si_set_arm_clkfreq_high(sih);
+#endif
+
+ if (PMU_FLL_PU_ENAB()) {
+ min_mask = R_REG(osh, &pmu->min_res_mask) |
+ PMURES_BIT(RES4387_FAST_LPO_AVAIL) |
+ PMURES_BIT(RES4387_PMU_LP);
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ }
+
+#ifdef USE_LHL_TIMER
+ lhl_tmr_sel = PMU_CC13_4387_LHL_TIMER_SELECT;
+#endif
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_LHL_TIMER_SELECT, lhl_tmr_sel);
+
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ si_set_lv_sleep_mode_4387(sih, osh);
+ }
+#endif /* SAVERESTORE */
+
+ if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) {
+ lpo = LHL_EXT_LPO_ENAB;
+ }
+
+ if (!ISSIM_ENAB(sih)) {
+ si_lhl_set_lpoclk(sih, osh, lpo);
+ }
+
+ if (getintvar(NULL, rstr_btldo3p3pu)) {
+ si_pmu_regcontrol(sih, 4,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN);
+ si_pmu_regcontrol(sih, 6,
+ PMU_4387_VREG6_BTLDO3P3_PU,
+ PMU_4387_VREG6_BTLDO3P3_PU);
+ }
+
+ if (LHL_IS_PSMODE_1(sih)) {
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_07,
+ ((1 << GCI_CC7_AAON_BYPASS_PWRSW_SEL) |
+ (1 << GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON)),
+ 0);
+ }
+
+ si_lhl_setup(sih, osh);
+
+ /* Setting MemLPLDO voltage */
+ if (getvar(NULL, rstr_memlpldo_volt) != NULL) {
+ int memlpldo_volt = getintvar(NULL, rstr_memlpldo_volt);
+
+ if (memlpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 &&
+ memlpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_MEMLPLDO_ADJ_MASK,
+ memlpldo_volt << VREG5_4387_MEMLPLDO_ADJ_SHIFT);
+ } else {
+ PMU_MSG(("Invalid memlpldo value: %d\n", memlpldo_volt));
+ }
+ }
+
+ /* Setting LPLDO voltage */
+ if (getvar(NULL, rstr_lpldo_volt) != NULL) {
+ int lpldo_volt = getintvar(NULL, rstr_lpldo_volt);
+
+ if (lpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 &&
+ lpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_LPLDO_ADJ_MASK,
+ lpldo_volt << VREG5_4387_LPLDO_ADJ_SHIFT);
+ } else {
+ PMU_MSG(("Invalid lpldo value: %d\n", lpldo_volt));
+ }
+ }
+
+ /* Setting misc ldo voltage to 0.85625V but need stepping up */
+ curr_misc_ldo_volt = (si_pmu_regcontrol(sih, PMU_VREG_5, 0, 0) &
+ VREG5_4387_MISC_LDO_ADJ_MASK) >> VREG5_4387_MISC_LDO_ADJ_SHIFT;
+
+ /* Only after POR, chip default is 0.8V */
+ if (curr_misc_ldo_volt == PMU_VREG5_MISC_LDO_VOLT_0p800) {
+ misc_ldo_volt = PMU_VREG5_MISC_LDO_VOLT_0p856; /* 0.85625V */
+
+ for (i = PMU_VREG5_MISC_LDO_VOLT_0p818; i <= misc_ldo_volt; i ++) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_MISC_LDO_ADJ_MASK,
+ i << VREG5_4387_MISC_LDO_ADJ_SHIFT);
+ OSL_DELAY(MISC_LDO_STEPPING_DELAY);
+ }
+ }
+
+ /* Enable fast LPO */
+ si_pmu_fast_lpo_enable(sih, osh);
+
+ if (PMU_FLL_PU_ENAB()) {
+ /* Wait until fast LPO is stable */
+ OSL_DELAY(500u);
+ si_pmu_fll_preload_enable(sih);
+ }
+
+ si_pmu_dynamic_clk_switch_enab(sih);
+
+ /* HQ settings */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25,
+ 0xFFFFFFFF, XTAL_HQ_SETTING_4387);
+
+ /* LQ settings */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_26,
+ 0xFFFFFFFF, XTAL_LQ_SETTING_4387);
+
+ /* Enable Radiodig Clk Gating */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_ENAB_RADIO_REG_CLK, 0);
+
+ /* Set up HW based switch-off of select BBPLL channels when SCAN-only mode
+ *
+ * Assign bbpll_ch_control_grp_pd_trigger_mask = {gci_chip_cntrl[559:554],
+ * gci_chip_cntrl[543:522], 1'b0, gci_chip_cntrl[521], 1'b0};
+ */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_16,
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_MASK |
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_MASK,
+ (((GRP_PD_TRIGGER_MASK_4387 >> 1) & 0x1) << /* bit 1 */
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_SHIFT) |
+ (((GRP_PD_TRIGGER_MASK_4387 >> 3) & 0x3FFFFF) << /* bit 24:3 */
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_SHIFT));
+
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_17,
+ CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_MASK |
+ CC_GCI_17_BBPLL_CH_CTRL_EN_MASK,
+ (((GRP_PD_TRIGGER_MASK_4387 >> 25) & 0x3F) << /* bits 30:25 */
+ CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_SHIFT) |
+ CC_GCI_17_BBPLL_CH_CTRL_EN_MASK);
+
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_20,
+ CC_GCI_20_BBPLL_CH_CTRL_GRP_MASK,
+ (GRP_PD_MASK_4387 << CC_GCI_20_BBPLL_CH_CTRL_GRP_SHIFT));
+
+ if (CHIPID(sih->chip) == BCM4397_CHIP_GRPID) {
+ /* For Phy Reg Access configure IHRP access */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_28,
+ GCI_CC28_IHRP_SEL_MASK,
+ 0u << GCI_CC28_IHRP_SEL_SHIFT);
+ }
+
+ if (getvar(NULL, rstr_abuck_volt) != NULL) {
+ abuck_volt = getintvar(NULL, rstr_abuck_volt);
+ } else {
+ abuck_volt = ABUCK_VOLT_SW_DEFAULT_4387;
+ }
+
+ if (CHIPID(sih->chip) == BCM4397_CHIP_GRPID) {
+ /* For Phy Reg Access configure IHRP access */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_28,
+ GCI_CC28_IHRP_SEL_MASK,
+ 0u << GCI_CC28_IHRP_SEL_SHIFT);
+ }
+
+ /* For 4397, PMU has only 11 Regulator Registers */
+ if (sih->chip != BCM4397_CHIP_ID) {
+ si_pmu_vreg_control(sih, PMU_VREG_13,
+ PMU_VREG13_ASR_OVADJ_PWM_MASK,
+ abuck_volt << PMU_VREG13_ASR_OVADJ_PWM_SHIFT);
+ }
+ if (BCM_PWR_OPT_ENAB()) {
+ if (getvar(NULL, rstr_cbuck_volt) != NULL) {
+ cbuck_volt = getintvar(NULL, rstr_cbuck_volt);
+ } else {
+ cbuck_volt = CBUCK_VOLT_SW_DEFAULT_4387;
+ }
+
+ si_pmu_vreg_control(sih, PMU_VREG_0,
+ VREG0_4378_CSR_VOLT_ADJ_PWM_MASK,
+ cbuck_volt << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT);
+ }
+
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FAST_TRANS_ENAB, PCTL_EXT_FAST_TRANS_ENAB);
+
+ if (si_hib_ext_wakeup_isenab(sih)) {
+ /* pull up common BP */
+ int rsrc_num = RES4387_CORE_RDY_CB;
+ uint32 deps = PMURES_BIT(rsrc_num) |
+ si_pmu_res_deps(sih, osh, pmu, PMURES_BIT(rsrc_num), TRUE);
+ W_REG(osh, &pmu->extwakereqmask[0], deps);
+ }
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ PMU_CC17_SCAN_DIG_SR_CLK_MASK,
+ SCAN_DIG_SR_CLK_40_MHZ << PMU_CC17_SCAN_DIG_SR_CLK_SHIFT);
+ }
+ break;
+
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ {
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ uint32 lpo = LHL_LPO_AUTO;
+ uint32 lhl_tmr_sel = 0;
+ uint32 abuck_volt, cbuck_volt;
+ uint32 min_mask;
+
+ if (PMU_FLL_PU_ENAB()) {
+ min_mask = R_REG(osh, &pmu->min_res_mask) |
+ PMURES_BIT(RES4389_FAST_LPO_AVAIL) |
+ PMURES_BIT(RES4389_PMU_LP);
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ }
+
+#ifdef USE_LHL_TIMER
+ lhl_tmr_sel = PMU_CC13_4387_LHL_TIMER_SELECT;
+#endif
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_LHL_TIMER_SELECT, lhl_tmr_sel);
+
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ si_set_lv_sleep_mode_4389(sih, osh);
+ }
+#endif /* SAVERESTORE */
+ /* SET CB2WL Intr PWR Request irrespective of Default value */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU_CC2_CB2WL_INTR_PWRREQ_EN,
+ PMU_CC2_CB2WL_INTR_PWRREQ_EN);
+
+ if (R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL) {
+ lpo = LHL_EXT_LPO_ENAB;
+ }
+
+ if (!ISSIM_ENAB(sih)) {
+ si_lhl_set_lpoclk(sih, osh, lpo);
+ }
+
+ if (getintvar(NULL, rstr_btldo3p3pu)) {
+ si_pmu_regcontrol(sih, 4,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN,
+ PMU_28NM_VREG4_WL_LDO_CNTL_EN);
+ si_pmu_regcontrol(sih, 6,
+ PMU_4387_VREG6_BTLDO3P3_PU,
+ PMU_4387_VREG6_BTLDO3P3_PU);
+ }
+
+ if (LHL_IS_PSMODE_1(sih)) {
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_07,
+ ((1 << GCI_CC7_AAON_BYPASS_PWRSW_SEL) |
+ (1 << GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON)),
+ 0);
+ }
+
+ si_lhl_setup(sih, osh);
+
+ /* Setting MemLPLDO voltage */
+ if (getvar(NULL, rstr_memlpldo_volt) != NULL) {
+ int memlpldo_volt = getintvar(NULL, rstr_memlpldo_volt);
+
+ if (memlpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 &&
+ memlpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_MEMLPLDO_ADJ_MASK,
+ memlpldo_volt << VREG5_4387_MEMLPLDO_ADJ_SHIFT);
+ } else {
+ PMU_MSG(("Invalid memlpldo value: %d\n", memlpldo_volt));
+ }
+ }
+
+ /* Setting LPLDO voltage */
+ if (getvar(NULL, rstr_lpldo_volt) != NULL) {
+ int lpldo_volt = getintvar(NULL, rstr_lpldo_volt);
+
+ if (lpldo_volt >= PMU_VREG5_LPLDO_VOLT_0_90 &&
+ lpldo_volt <= PMU_VREG5_LPLDO_VOLT_0_88) {
+ si_pmu_regcontrol(sih, PMU_VREG_5, VREG5_4387_LPLDO_ADJ_MASK,
+ lpldo_volt << VREG5_4387_LPLDO_ADJ_SHIFT);
+ } else {
+ PMU_MSG(("Invalid lpldo value: %d\n", lpldo_volt));
+ }
+ }
+
+ /* Enable fast LPO */
+ si_pmu_fast_lpo_enable(sih, osh);
+
+ if (PMU_FLL_PU_ENAB()) {
+ /* Wait until fast LPO is stable */
+ OSL_DELAY(500u);
+ si_pmu_fll_preload_enable(sih);
+ }
+
+ si_pmu_dynamic_clk_switch_enab(sih);
+
+ /* HQ settings */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25,
+ 0xFFFFFFFF, XTAL_HQ_SETTING_4387);
+
+ /* LQ settings */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_26,
+ 0xFFFFFFFF, XTAL_LQ_SETTING_4387);
+
+ /* Enable Radiodig Clk Gating */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13, PMU_CC13_4387_ENAB_RADIO_REG_CLK, 0);
+
+ /* Set up HW based switch-off of select BBPLL channels when SCAN-only mode
+ *
+ * Assign bbpll_ch_control_grp_pd_trigger_mask = {gci_chip_cntrl[559:554],
+ * gci_chip_cntrl[543:522], 1'b0, gci_chip_cntrl[521], 1'b0};
+ */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_16,
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_MASK |
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_MASK,
+ (((GRP_PD_TRIGGER_MASK_4387 >> 1) & 0x1) << /* bit 1 */
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_SHIFT) |
+ (((GRP_PD_TRIGGER_MASK_4387 >> 3) & 0x3FFFFF) << /* bit 24:3 */
+ CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_SHIFT));
+
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_17,
+ CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_MASK |
+ CC_GCI_17_BBPLL_CH_CTRL_EN_MASK,
+ (((GRP_PD_TRIGGER_MASK_4387 >> 25) & 0x3F) << /* bits 30:25 */
+ CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_SHIFT) |
+ CC_GCI_17_BBPLL_CH_CTRL_EN_MASK);
+
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_20,
+ CC_GCI_20_BBPLL_CH_CTRL_GRP_MASK,
+ (GRP_PD_MASK_4387 << CC_GCI_20_BBPLL_CH_CTRL_GRP_SHIFT));
+
+ if (getvar(NULL, rstr_abuck_volt) != NULL) {
+ abuck_volt = getintvar(NULL, rstr_abuck_volt);
+ } else {
+ abuck_volt = ABUCK_VOLT_SW_DEFAULT_4387;
+ }
+
+ if (CHIPID(sih->chip) == BCM4397_CHIP_GRPID) {
+ /* For Phy Reg Access configure IHRP access */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_28,
+ GCI_CC28_IHRP_SEL_MASK,
+ 0u << GCI_CC28_IHRP_SEL_SHIFT);
+ }
+
+ /* For 4397, PMU has only 11 Regulator Registers */
+ if (sih->chip != BCM4397_CHIP_ID) {
+ si_pmu_vreg_control(sih, PMU_VREG_13,
+ PMU_VREG13_ASR_OVADJ_PWM_MASK,
+ abuck_volt << PMU_VREG13_ASR_OVADJ_PWM_SHIFT);
+ }
+
+ if (BCM_PWR_OPT_ENAB()) {
+ if (getvar(NULL, rstr_cbuck_volt) != NULL) {
+ cbuck_volt = getintvar(NULL, rstr_cbuck_volt);
+ } else {
+ cbuck_volt = CBUCK_VOLT_SW_DEFAULT_4387;
+ }
+
+ si_pmu_vreg_control(sih, PMU_VREG_0,
+ VREG0_4378_CSR_VOLT_ADJ_PWM_MASK,
+ cbuck_volt << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT);
+ }
+
+ PMU_REG(sih, pmucontrol_ext, PCTL_EXT_FAST_TRANS_ENAB, PCTL_EXT_FAST_TRANS_ENAB);
+
+ if (PMUREV(sih->pmurev) == 39) {
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_04,
+ CC_GCI_04_4387C0_XTAL_PM_CLK,
+ CC_GCI_04_4387C0_XTAL_PM_CLK);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+} /* si_pmu_chip_init */
+
+/** Reference: http://confluence.broadcom.com/display/WLAN/Open+loop+Calibration+Sequence */
+int
+si_pmu_openloop_cal(si_t *sih, uint16 currtemp)
+{
+ int err = BCME_OK;
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ err = si_pmu_openloop_cal_43012(sih, currtemp);
+ break;
+
+ default:
+ PMU_MSG(("si_pmu_openloop_cal: chip not supported!\n"));
+ break;
+ }
+ return err;
+}
+
+static int
+si_pmu_openloop_cal_43012(si_t *sih, uint16 currtemp)
+{
+ int32 a1 = -27, a2 = -15, b1 = 18704, b2 = 7531, a3, y1, y2, b3, y3;
+ int32 xtal, array_size = 0, dco_code = 0, origidx = 0, pll_reg = 0, err;
+ bcm_int_bitmask_t intr_val;
+ pmuregs_t *pmu = NULL;
+ const pllctrl_data_t *pllctrlreg_update;
+ const uint32 *pllctrlreg_val;
+ osl_t *osh = si_osh(sih);
+ uint32 final_dco_code = si_get_openloop_dco_code(sih);
+
+ xtal = si_xtalfreq(sih);
+ err = BCME_OK;
+
+ origidx = si_coreidx(sih);
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ if (!pmu) {
+ PMU_MSG(("si_pmu_openloop_cal_43012: NULL pmu pointer \n"));
+ err = BCME_ERROR;
+ goto done;
+ }
+
+ if (final_dco_code == 0) {
+ currtemp = (currtemp == 0)?-1: currtemp;
+
+ SPINWAIT(((si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL) != CCS_HTAVAIL), PMU_MAX_TRANSITION_DLY);
+ ASSERT((si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), 0, 0)
+ & CCS_HTAVAIL));
+
+ /* Stop using PLL clks, by programming the disable_ht_avail */
+ /* and disable_lq_avail in the pmu chip control bit */
+ /* Turn Off PLL */
+ si_pmu_pll_off_43012(sih, osh, pmu);
+
+ /* Program PLL for 320MHz VCO */
+ pllctrlreg_update = pmu1_xtaltab0_43012;
+ array_size = ARRAYSIZE(pmu1_xtaltab0_43012);
+ pllctrlreg_val = pmu1_pllctrl_tab_43012_1600mhz;
+ si_pmu_pllctrlreg_update(sih, osh, pmu, xtal, 100,
+ pllctrlreg_update, array_size, pllctrlreg_val);
+
+ /* Update PLL control register */
+ /* Set the Update bit (bit 10) in PMU for PLL registers */
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ /* Turn PLL ON but ensure that force_bbpll_dreset */
+ /* bit is set , so that PLL 320Mhz clocks cannot be consumed */
+ si_pmu_pll_on_43012(sih, osh, pmu, 1);
+
+ /* Settings to get dco_code on PLL test outputs and then read */
+ /* from gci chip status */
+ pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, 0, 0);
+ pll_reg = (pll_reg & (~0x3C000)) | (0x4<<14);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, ~0, pll_reg);
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ pll_reg = pll_reg | (1<<17);
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG1, ~0, pll_reg);
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ /* Get the DCO code from GCI CHIP STATUS Register 7 , bits 27 downto 16 */
+ dco_code = (si_gci_chipstatus(sih, GCI_CHIPSTATUS_07));
+ dco_code = ((dco_code & 0x0FFF0000) >> 16);
+ dco_code = (dco_code >> 4);
+
+ /* The DCO code obtained above and the temperature */
+ /* sensed at this time will give us the DCO code */
+ /* that needs to be programmed to ensure VCO does not crosses 160 MHz at 125C */
+ y1 = ((a1 * currtemp) + b1);
+ y2 = ((a2 * currtemp) + b2);
+ dco_code = (dco_code * 100);
+ b3 = b1 + (((b2-b1)/(y2 - y1)) * (dco_code - y1));
+ if (b3 > dco_code) {
+ a3 = (b3 - dco_code) / currtemp;
+ y3 = b3 - (a3 * 125);
+ }
+ else {
+ a3 = (dco_code - b3) / currtemp;
+ y3 = b3 + (a3 * 125);
+ }
+ y3 = (y3/100);
+ PMU_MSG(("DCO_CODE = %d\n", y3));
+
+ /* Turning ON PLL at 160.1 MHz for Normal Operation */
+ si_pmu_pll_off_43012(sih, osh, pmu);
+ pllctrlreg_update = pmu1_xtaltab0_43012;
+ array_size = ARRAYSIZE(pmu1_xtaltab0_43012);
+ pllctrlreg_val = pmu1_pllctrl_tab_43012_1600mhz;
+ si_pmu_pllctrlreg_update(sih, osh, pmu, xtal, 0,
+ pllctrlreg_update, array_size, pllctrlreg_val);
+
+ /* Update PLL control register */
+ /* Set the Update bit (bit 10) in PMU for PLL registers */
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ si_pmu_pll_on_43012(sih, osh, pmu, 0);
+ y3 = (y3 << 4);
+ final_dco_code = y3;
+ PMU_MSG(("openloop_dco_code = %x\n", final_dco_code));
+ }
+
+ pll_reg = si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0, 0);
+ y3 = (pll_reg >> 16) & 0xFFF;
+
+ if (final_dco_code != (uint32)y3) {
+
+ /* Program the DCO code to bits 27 */
+ /* downto 16 of the PLL control 3 register */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3,
+ 0x0FFF0000, (final_dco_code << 16));
+
+ /* Enable Extra post divison for Open Loop */
+ /* by writing 1 to bit 14 of above register */
+ si_pmu_pllcontrol(sih, PMU_PLL_CTRL_REG3, 0x00004000, (1<<14));
+
+ /* Update PLL control register */
+ /* Set the Update bit (bit 10) in PMU for PLL registers */
+ OR_REG(osh, &pmu->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+ /* After cal openloop VCO Max=320MHz, Min=240Mhz (with extra margin */
+ /* 230-220MHz). Update SAVE_RESTORE up/down times accordingly */
+ W_REG(osh, &pmu->res_table_sel, RES43012_SR_SAVE_RESTORE);
+ W_REG(osh, &pmu->res_updn_timer, 0x01800180);
+ }
+
+ si_restore_core(sih, origidx, &intr_val);
+ si_set_openloop_dco_code(sih, final_dco_code);
+done:
+ return err;
+}
+
+void
+si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh)
+{
+#if !defined(BCMDONGLEHOST)
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 xtalfreq;
+
+ /* PMU specific initializations */
+ if (!PMUCTL_ENAB(sih))
+ return;
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+ if (cc == NULL)
+ return;
+
+ xtalfreq = getintvar(NULL, rstr_xtalfreq);
+ /*
+ * workaround for chips that don't support external LPO, thus ALP clock
+ * can not be measured accurately:
+ */
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ xtalfreq = XTAL_FREQ_54MHZ;
+ break;
+ default:
+ break;
+ }
+ /* If xtalfreq var not available, try to measure it */
+ if (xtalfreq == 0)
+ xtalfreq = si_pmu_measure_alpclk(sih, osh);
+ si_pmu_enb_slow_clk(sih, osh, xtalfreq);
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+#endif /* !BCMDONGLEHOST */
+}
+
+/** initialize PMU registers in case default values proved to be suboptimal */
+void
+BCMATTACHFN(si_pmu_swreg_init)(si_t *sih, osl_t *osh)
+{
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ /* adjust PA Vref to 2.80V */
+ si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_PAREF, 0x0c);
+ break;
+ case BCM4378_CHIP_GRPID:
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_04, GPIO_DRIVE_4378_MASK,
+ GPIO_DRIVE_4378_VAL);
+ /* fall through */
+ case BCM4376_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+#ifdef BCM_AVS
+ if (BCM_AVS_ENAB()) {
+ si_pmu_set_avs(sih);
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+ si_pmu_otp_vreg_control(sih, osh);
+} /* si_pmu_swreg_init */
+
+/** Wait for a particular clock level to be on the backplane */
+uint32
+si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, uint32 clk, uint32 delay_val)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ uint32 val;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if (delay_val)
+ SPINWAIT(((R_REG(osh, &pmu->pmustatus) & clk) != clk), delay_val);
+ val = R_REG(osh, &pmu->pmustatus) & clk;
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return (val);
+}
+
+#define EXT_ILP_HZ 32768
+
+/**
+ * Measures the ALP clock frequency in KHz. Returns 0 if not possible.
+ * Possible only if PMU rev >= 10 and there is an external LPO 32768Hz crystal.
+ */
+uint32
+BCMATTACHFN(si_pmu_measure_alpclk)(si_t *sih, osl_t *osh)
+{
+ uint32 alp_khz;
+ uint32 pmustat_lpo = 0;
+ pmuregs_t *pmu;
+ uint origidx;
+
+ if (PMUREV(sih->pmurev) < 10)
+ return 0;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
+ (PMUREV(sih->pmurev) >= 22))
+ pmustat_lpo = !(R_REG(osh, &pmu->pmucontrol) & PCTL_LPO_SEL);
+ else
+ pmustat_lpo = R_REG(osh, &pmu->pmustatus) & PST_EXTLPOAVAIL;
+
+ if (pmustat_lpo) {
+ uint32 ilp_ctr, alp_hz;
+
+ /* Enable the reg to measure the freq, in case disabled before */
+ W_REG(osh, &pmu->pmu_xtalfreq, 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+
+ /* Delay for well over 4 ILP clocks */
+ OSL_DELAY(1000);
+
+ /* Read the latched number of ALP ticks per 4 ILP ticks */
+ ilp_ctr = R_REG(osh, &pmu->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+
+ /* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT bit to save power */
+ W_REG(osh, &pmu->pmu_xtalfreq, 0);
+
+ /* Calculate ALP frequency */
+ alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
+
+ /* Round to nearest 100KHz, and at the same time convert to KHz */
+ alp_khz = (alp_hz + 50000) / 100000 * 100;
+ } else
+ alp_khz = 0;
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return alp_khz;
+} /* si_pmu_measure_alpclk */
+
+/** Update min/max resources after SR-ASM download to d11 txfifo */
+void
+si_pmu_res_minmax_update(si_t *sih, osl_t *osh)
+{
+ uint32 min_mask = 0, max_mask = 0;
+ pmuregs_t *pmu;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ /* Block ints and save current core */
+ si_introff(sih, &intr_val);
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ max_mask = 0; /* Only care about min_mask for now */
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ min_mask = RES43012_PMU_SLEEP;
+ break;
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+ max_mask = 0; /* Don't need to update max */
+ break;
+ default:
+ break;
+ }
+ if (min_mask) {
+ /* Add min mask dependencies */
+ min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, FALSE);
+ W_REG(osh, &pmu->min_res_mask, min_mask);
+ }
+ if (max_mask) {
+ max_mask |= si_pmu_res_deps(sih, osh, pmu, max_mask, FALSE);
+ W_REG(osh, &pmu->max_res_mask, max_mask);
+ }
+
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ si_intrrestore(sih, &intr_val);
+} /* si_pmu_res_minmax_update */
+
+#ifdef DONGLEBUILD
+
+#define PMUCAP_DUMP_TAG_SIZE_BYTES 4
+
+/* Move the below definitions to .ro_ontrap section so they
+ * won't be reused when reusing rodata section after trap.
+ */
+static const uint32 BCMPOST_TRAP_RODATA(chipc_regs_to_dump)[] = {
+ OFFSETOF(chipcregs_t, clk_ctl_st),
+ OFFSETOF(chipcregs_t, powerctl)
+};
+
+static const uint BCMPOST_TRAP_RODATA(pmuregsdump)[] = {
+ OFFSETOF(pmuregs_t, pmucontrol),
+ OFFSETOF(pmuregs_t, pmucapabilities),
+ OFFSETOF(pmuregs_t, pmustatus),
+ OFFSETOF(pmuregs_t, res_state),
+ OFFSETOF(pmuregs_t, res_pending),
+ OFFSETOF(pmuregs_t, pmutimer),
+ OFFSETOF(pmuregs_t, min_res_mask),
+ OFFSETOF(pmuregs_t, max_res_mask),
+ OFFSETOF(pmuregs_t, clkstretch),
+ OFFSETOF(pmuregs_t, res_req_timer),
+ OFFSETOF(pmuregs_t, res_req_mask),
+ OFFSETOF(pmuregs_t, mac_res_req_timer),
+ OFFSETOF(pmuregs_t, mac_res_req_mask),
+ OFFSETOF(pmuregs_t, pmuintmask0),
+ OFFSETOF(pmuregs_t, pmuintstatus),
+ OFFSETOF(pmuregs_t, pmuintctrl0),
+ OFFSETOF(pmuregs_t, extwakeupstatus),
+ OFFSETOF(pmuregs_t, extwakemask0)
+};
+
+static const uint BCMPOST_TRAP_RODATA(pmuregsdump_mac_res1)[] = {
+ OFFSETOF(pmuregs_t, mac_res_req_timer1),
+ OFFSETOF(pmuregs_t, mac_res_req_mask1)
+};
+
+static const uint BCMPOST_TRAP_RODATA(pmuregsdump_mac_res2)[] = {
+ OFFSETOF(pmuregs_t, mac_res_req_timer2),
+ OFFSETOF(pmuregs_t, mac_res_req_mask2)
+};
+
+static const uint BCMPOST_TRAP_RODATA(pmuregsdump_pmu_int1)[] = {
+ OFFSETOF(pmuregs_t, pmuintmask1),
+ OFFSETOF(pmuregs_t, pmuintctrl1)
+};
+
+/* Pointer to location in ROdata where PMU registers are stored.
+ * It is good to avoid re-reading PMU registers as: 1. reading regs is slow
+ * 2. As part of trap, these registers are dumped to RO data section anyway.
+ * so why not read directly from ROdata section and send to host?
+ * these registers will be dumped n RODATA first and then hnd_minidump_pmuegs_dump()
+ * will pick these up. For it to pick these up, it
+ * needs to know where they are stored.
+ */
+/* Length of the reg dump containing address, value pair */
+#define SI_PMU_REG_DUMP_BASE_SIZE (ARRAYSIZE(pmuregsdump) * 2u * sizeof(uint32))
+#define SI_PMU_REG_DUMP_MACRSRC1_SIZE (ARRAYSIZE(pmuregsdump_mac_res1) * 2u * sizeof(uint32))
+#define SI_PMU_REG_DUMP_MACRSRC2_SIZE (ARRAYSIZE(pmuregsdump_mac_res2) * 2u * sizeof(uint32))
+#define SI_PMU_REG_DUMP_INTRCV1_SIZE (ARRAYSIZE(pmuregsdump_pmu_int1) * 2u * sizeof(uint32))
+
+static uint32 *rodata_pmuregdump_ptr = NULL;
+
+/** size of the buffer needed to store the PMU register dump specifically PMU indirect registers */
+uint32
+BCMATTACHFN(si_pmu_dump_buf_size_pmucap)(si_t *sih)
+{
+ uint32 buf_size = 0;
+ uint32 pmu_size = 0;
+ uint32 cnt;
+
+ if (PMUREV(sih->pmurev) < 5)
+ return 0;
+
+ /* pmu resources resource mask and resource updown */
+ cnt = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ if (cnt) {
+ buf_size += (cnt * 2 * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES;
+ }
+ /* pll controls */
+ cnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT;
+ if (cnt) {
+ buf_size += (cnt * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES;
+ }
+
+ /* voltage controls */
+ cnt = (sih->pmucaps & PCAP5_VC_MASK) >> PCAP5_VC_SHIFT;
+ if (cnt) {
+ buf_size += (cnt * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES;
+ }
+
+ /* chip controls */
+ cnt = (sih->pmucaps & PCAP5_CC_MASK) >> PCAP5_CC_SHIFT;
+ if (cnt) {
+ buf_size += (cnt * sizeof(uint32)) + PMUCAP_DUMP_TAG_SIZE_BYTES;
+ }
+
+ /* include chip common regsiters from the list */
+ /* cnt indicates how many registers, tag_id 0 will say these are address/value */
+ if (ARRAYSIZE(chipc_regs_to_dump)) {
+ buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES;
+ /* address/value pairs */
+ buf_size += sizeof(chipc_regs_to_dump);
+ buf_size += sizeof(chipc_regs_to_dump);
+ }
+
+ /* include PMU regsiters from the list 'pmuregsdumpXX' */
+ if ((PMUREV(sih->pmurev) > 27) && ARRAYSIZE(pmuregsdump) != 0) {
+ uint8 rsrc_cnt = si_pmu_get_mac_rsrc_req_tmr_cnt(sih);
+ buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES;
+ pmu_size += sizeof(pmuregsdump);
+ if (ARRAYSIZE(pmuregsdump_mac_res1) != 0 && rsrc_cnt > 1) {
+ buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES;
+ pmu_size += sizeof(pmuregsdump_mac_res1);
+ }
+ if (ARRAYSIZE(pmuregsdump_mac_res2) != 0 && rsrc_cnt > 2) {
+ buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES;
+ pmu_size += sizeof(pmuregsdump_mac_res2);
+ }
+ if (ARRAYSIZE(pmuregsdump_pmu_int1) != 0 &&
+ si_pmu_get_pmu_interrupt_rcv_cnt(sih) > 1) {
+ buf_size += PMUCAP_DUMP_TAG_SIZE_BYTES;
+ pmu_size += sizeof(pmuregsdump_pmu_int1);
+ }
+ /* address/value pairs */
+ buf_size += (pmu_size << 1);
+ }
+
+ return buf_size;
+}
+
+/**
+ * routine to dump the registers into the user specified buffer
+ * needed to store the PMU register dump specifically PMU indirect registers
+ * format is sets of count, base regiser, register values
+*/
+uint32
+BCMPOSTTRAPFN(si_pmu_dump_pmucap_binary)(si_t *sih, uchar *p)
+{
+ uint32 cnt, i;
+ osl_t *osh;
+ pmuregs_t *pmu;
+ uint origidx;
+ uint mac_res_cnt;
+ uint pmu_int_rcv_cnt;
+ uint32 pmu_totalsize = 0;
+
+ uint32 *p32 = (uint32 *)p;
+
+ if (PMUREV(sih->pmurev) < 5)
+ return 0;
+
+ origidx = si_coreidx(sih);
+
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ }
+ else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ osh = si_osh(sih);
+
+ cnt = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ if (cnt) {
+ *p32++ = (cnt << 16 | RSRCTABLEADDR);
+ for (i = 0; i < cnt; i++) {
+ W_REG(osh, &pmu->res_table_sel, i);
+ *p32++ = R_REG(osh, &pmu->res_dep_mask);
+ *p32++ = R_REG(osh, &pmu->res_updn_timer);
+ }
+ }
+
+ cnt = (sih->pmucaps & PCAP5_PC_MASK) >> PCAP5_PC_SHIFT;
+ if (cnt) {
+ *p32++ = (cnt << 16 | PMU_PLL_CONTROL_ADDR);
+ for (i = 0; i < cnt; i++) {
+ *p32++ = si_pmu_pllcontrol(sih, i, 0, 0);
+ }
+ }
+
+ cnt = (sih->pmucaps & PCAP5_VC_MASK) >> PCAP5_VC_SHIFT;
+ if (cnt) {
+ *p32++ = (cnt << 16 | PMU_REG_CONTROL_ADDR);
+ for (i = 0; i < cnt; i++) {
+ *p32++ = si_pmu_vreg_control(sih, i, 0, 0);
+ }
+ }
+ cnt = (sih->pmucaps & PCAP5_CC_MASK) >> PCAP5_CC_SHIFT;
+ if (cnt) {
+ *p32++ = (cnt << 16 | CC_CHIPCTL_ADDR);
+ for (i = 0; i < cnt; i++) {
+ *p32++ = si_pmu_chipcontrol(sih, i, 0, 0);
+ }
+ }
+ if (ARRAYSIZE(chipc_regs_to_dump)) {
+ uint32 *addr;
+ *p32++ = (ARRAYSIZE(chipc_regs_to_dump) << 16 | 0);
+ for (i = 0; i < ARRAYSIZE(chipc_regs_to_dump); i++) {
+ addr = (uint32 *)(SI_ENUM_BASE(sih) + chipc_regs_to_dump[i]);
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(osh, addr);
+ }
+ }
+
+ if ((PMUREV(sih->pmurev) > 27)) {
+ volatile uint32 *addr;
+ *p32++ = (ARRAYSIZE(pmuregsdump) << 16 | 1);
+ for (i = 0; i < ARRAYSIZE(pmuregsdump); i++) {
+ addr = (volatile uint32*)((volatile char*)pmu + pmuregsdump[i]);
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(osh, addr);
+ }
+ pmu_totalsize += (ARRAYSIZE(pmuregsdump));
+ mac_res_cnt = si_pmu_get_mac_rsrc_req_tmr_cnt(sih);
+ if (mac_res_cnt > 1) {
+ *p32++ = (ARRAYSIZE(pmuregsdump_mac_res1) << 16 | 1);
+ for (i = 0; i < ARRAYSIZE(pmuregsdump_mac_res1); i++) {
+ addr = (volatile uint32*)((volatile char*)pmu +
+ pmuregsdump_mac_res1[i]);
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(osh, addr);
+ }
+ pmu_totalsize += (ARRAYSIZE(pmuregsdump_mac_res1));
+ }
+ if (mac_res_cnt > 2) {
+ *p32++ = (ARRAYSIZE(pmuregsdump_mac_res2) << 16 | 1);
+ for (i = 0; i < ARRAYSIZE(pmuregsdump_mac_res2); i++) {
+ addr = (volatile uint32*)((volatile char*)pmu +
+ pmuregsdump_mac_res2[i]);
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(osh, addr);
+ }
+ pmu_totalsize += (ARRAYSIZE(pmuregsdump_mac_res2));
+ }
+ pmu_int_rcv_cnt = si_pmu_get_pmu_interrupt_rcv_cnt(sih);
+ if (pmu_int_rcv_cnt > 1) {
+ *p32++ = (ARRAYSIZE(pmuregsdump_pmu_int1) << 16 | 1);
+ for (i = 0; i < ARRAYSIZE(pmuregsdump_pmu_int1); i++) {
+ addr = (volatile uint32*)((volatile char*)pmu +
+ pmuregsdump_pmu_int1[i]);
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(osh, addr);
+ }
+ pmu_totalsize += (ARRAYSIZE(pmuregsdump_pmu_int1));
+ }
+ /* Mark the location where these registers are dumped to avoid a re-read in
+ * trap context.
+ */
+ rodata_pmuregdump_ptr = (p32 - (2 * pmu_totalsize));
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return 1;
+} /* si_pmu_dump_pmucap_binary */
+
+#endif /* DONGLEBUILD */
+/**
+ * Function to enable the min_mask with specified resources along with its dependencies.
+ * Also it can be used for bringing back to the default value of the device.
+ */
+int
+si_pmu_min_res_set(si_t *sih, osl_t *osh, uint min_mask, bool set)
+{
+ uint32 min_res, max_res;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ pmuregs_t *pmu;
+
+ /* Block ints and save current core */
+ si_introff(sih, &intr_val);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ si_pmu_res_masks(sih, &min_res, &max_res);
+ min_mask |= si_pmu_res_deps(sih, osh, pmu, min_mask, TRUE);
+
+ /*
+ * If set is enabled, the resources specified in the min_mask is brought up. If not set,
+ * go to the default min_resource of the device.
+ */
+ if (set) {
+ OR_REG(osh, &pmu->min_res_mask, min_mask);
+ } else {
+ min_mask &= ~min_res;
+ AND_REG(osh, &pmu->min_res_mask, ~min_mask);
+ }
+
+ si_pmu_wait_for_steady_state(sih, osh, pmu);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ si_intrrestore(sih, &intr_val);
+
+ return min_mask;
+}
+
+void
+si_pmu_bt_ldo_pu(si_t *sih, bool up)
+{
+ si_pmu_regcontrol(sih, PMU_VREG_6, PMU_28NM_VREG6_BTLDO3P3_PU,
+ (up == TRUE) ? PMU_28NM_VREG6_BTLDO3P3_PU : 0x00);
+}
+
+#ifdef BCM_LDO3P3_SOFTSTART
+int si_pmu_ldo3p3_soft_start_wl_get(si_t *sih, osl_t *osh, int *res)
+{
+ uint32 bt_or_wl = 0u;
+ return si_pmu_ldo3p3_soft_start_get(sih, osh, bt_or_wl, res);
+}
+
+int si_pmu_ldo3p3_soft_start_bt_get(si_t *sih, osl_t *osh, int *res)
+{
+ uint32 bt_or_wl = 1u;
+ return si_pmu_ldo3p3_soft_start_get(sih, osh, bt_or_wl, res);
+}
+
+static int
+si_pmu_soft_start_params(si_t *sih, uint32 bt_or_wl, uint *en_reg, uint32 *en_shift,
+ uint32 *en_mask, uint32 *en_val, uint *val_reg, uint32 *val_shift, uint32 *val_mask)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ *en_reg = SOFT_START_EN_REG_4369;
+ *en_shift = SOFT_START_EN_SHIFT_4369(bt_or_wl);
+ *en_mask = SOFT_START_EN_MASK_4369;
+ *en_val = SOFT_START_EN_VALUE_4369;
+ *val_reg = SLEW_RATE_VALUE_REG_4369;
+ *val_shift = SLEW_RATE_SHIFT_4369(bt_or_wl);
+ *val_mask = SLEW_RATE_MASK_4369;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ *en_reg = SOFT_START_EN_REG_4378;
+ *en_shift = SOFT_START_EN_SHIFT_4378(bt_or_wl);
+ *en_mask = SOFT_START_EN_MASK_4378;
+ *en_val = SOFT_START_EN_VALUE_4378;
+ *val_reg = SLEW_RATE_VALUE_REG_4378;
+ *val_shift = SLEW_RATE_SHIFT_4378(bt_or_wl);
+ *val_mask = SLEW_RATE_MASK_4378;
+ if (BCM4378_CHIP(sih->chip) && PMUREV(sih->pmurev) == 37) {
+ *en_val = SOFT_START_EN_VALUE_4378_REV37;
+ }
+ break;
+ case BCM4387_CHIP_GRPID:
+ if (bt_or_wl == 0) {
+ return BCME_UNSUPPORTED;
+ }
+ *en_reg = SOFT_START_EN_REG_4387;
+ *en_shift = SOFT_START_EN_SHIFT_4387(bt_or_wl);
+ *en_mask = SOFT_START_EN_MASK_4387;
+ *en_val = SOFT_START_EN_VALUE_4387;
+ *val_reg = SLEW_RATE_VALUE_REG_4387;
+ *val_shift = SLEW_RATE_SHIFT_4387(bt_or_wl);
+ *val_mask = SLEW_RATE_MASK_4387;
+ break;
+ default:
+ /* Add support */
+ ASSERT(0);
+ break;
+ }
+ return BCME_OK;
+}
+
+static int si_pmu_ldo3p3_soft_start_get(si_t *sih, osl_t *osh, uint32 bt_or_wl, int *res)
+{
+ uint en_reg = 0, val_reg = 0;
+ uint32 en_shift = 0, en_mask = 0, en_val = 0, val_shift = 0, val_mask = 0;
+ uint32 soft_start_en, slew_rate;
+ int ret = si_pmu_soft_start_params(sih, bt_or_wl, &en_reg, &en_shift, &en_mask, &en_val,
+ &val_reg, &val_shift, &val_mask);
+
+ if (BCME_OK != ret) {
+ return ret;
+ }
+ soft_start_en = (si_pmu_vreg_control(sih, en_reg, 0, 0) >> en_shift);
+ soft_start_en &= en_mask;
+ if (en_val == 0u) {
+ soft_start_en = !soft_start_en;
+ }
+ if (soft_start_en) {
+ slew_rate = (si_pmu_vreg_control(sih, val_reg, 0, 0) >> val_shift);
+ slew_rate &= val_mask;
+ *res = slew_rate;
+ } else {
+ *res = -1;
+ }
+ return BCME_OK;
+}
+
+int si_pmu_ldo3p3_soft_start_wl_set(si_t *sih, osl_t *osh, uint32 slew_rate)
+{
+ uint32 bt_or_wl = 0u;
+ return si_pmu_ldo3p3_soft_start_set(sih, osh, bt_or_wl, slew_rate);
+}
+
+int si_pmu_ldo3p3_soft_start_bt_set(si_t *sih, osl_t *osh, uint32 slew_rate)
+{
+ uint32 bt_or_wl = 1u;
+ return si_pmu_ldo3p3_soft_start_set(sih, osh, bt_or_wl, slew_rate);
+}
+
+static int si_pmu_ldo3p3_soft_start_set(si_t *sih, osl_t *osh, uint32 bt_or_wl, uint32 slew_rate)
+{
+ uint en_reg = 0, val_reg = 0;
+ uint32 en_shift = 0, en_mask = 0, en_val = 0, val_shift = 0, val_mask = 0;
+ int ret = si_pmu_soft_start_params(sih, bt_or_wl, &en_reg, &en_shift, &en_mask, &en_val,
+ &val_reg, &val_shift, &val_mask);
+ uint32 dis_val = en_val ? 0u : 1u;
+
+ if (BCME_OK != ret) {
+ return ret;
+ }
+
+ if (slew_rate != (uint32)(~0u)) {
+
+ /* Without disabling soft start bit
+ * programming a new slew rate value
+ * doesn't take effect
+ */
+
+ /* Disable soft start */
+ si_pmu_vreg_control(sih, en_reg, (en_mask << en_shift), (dis_val << en_shift));
+
+ /* Program Slew rate */
+ si_pmu_vreg_control(sih, val_reg, (val_mask << val_shift),
+ ((slew_rate & val_mask) << val_shift));
+
+ /* Enable Soft start */
+ si_pmu_vreg_control(sih, en_reg, (en_mask << en_shift), (en_val << en_shift));
+ } else {
+ /* Slew rate value of 0xFFFF is used as a special value
+ * to disable/reset soft start feature
+ */
+
+ /* Disable soft start */
+ si_pmu_vreg_control(sih, en_reg, (en_mask << en_shift), (dis_val << en_shift));
+
+ /* Set slew rate value to zero */
+ si_pmu_vreg_control(sih, val_reg, (val_mask << val_shift), 0u);
+ }
+ return BCME_OK;
+}
+#endif /* BCM_LDO3P3_SOFTSTART */
+
+#ifdef LDO3P3_MIN_RES_MASK
+static bool ldo3p3_min_res_enabled = FALSE;
+/** Set ldo 3.3V mask in the min resources mask register */
+int
+si_pmu_min_res_ldo3p3_set(si_t *sih, osl_t *osh, bool on)
+{
+ uint32 min_mask = 0;
+ uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ min_mask = PMURES_BIT(RES4369_LDO3P3_PU);
+ if (on) {
+ si_corereg(sih, coreidx, LHL_REG_OFF(lhl_lp_main_ctl1_adr),
+ BCM_MASK32(23, 0), 0x9E9F9F);
+ } else {
+ si_corereg(sih, coreidx, LHL_REG_OFF(lhl_lp_main_ctl1_adr),
+ BCM_MASK32(23, 0), 0x9E9F97);
+ }
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ min_mask = PMURES_BIT(RES4378_LDO3P3_PU);
+ break;
+ default:
+ return BCME_UNSUPPORTED;
+ }
+
+ si_pmu_min_res_set(sih, osh, min_mask, on);
+ ldo3p3_min_res_enabled = on;
+
+ return BCME_OK;
+}
+
+int
+si_pmu_min_res_ldo3p3_get(si_t *sih, osl_t *osh, int *res)
+{
+ *res = (int)ldo3p3_min_res_enabled;
+ return BCME_OK;
+}
+#endif /* LDO3P3_MIN_RES_MASK */
+int
+si_pmu_min_res_otp_pu_set(si_t *sih, osl_t *osh, bool on)
+{
+ uint32 min_mask = 0;
+ rsc_per_chip_t *rsc;
+
+ rsc = si_pmu_get_rsc_positions(sih);
+ if (rsc) {
+ min_mask = PMURES_BIT(rsc->otp_pu);
+ } else {
+ return BCME_UNSUPPORTED;
+ }
+ si_pmu_min_res_set(sih, osh, min_mask, on);
+ return BCME_OK;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+uint32
+BCMPOSTTRAPFN(si_pmu_wake_bit_offset)(si_t *sih)
+{
+ uint32 wakebit;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ wakebit = PMU_CC2_GCI2_WAKE;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ wakebit = CC2_4378_GCI2WAKE_MASK;
+ break;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ wakebit = CC2_4387_GCI2WAKE_MASK;
+ break;
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ wakebit = CC2_4389_GCI2WAKE_MASK;
+ break;
+ default:
+ wakebit = 0;
+ ASSERT(0);
+ break;
+ }
+
+ return wakebit;
+}
+
+#ifdef ATE_BUILD
+void hnd_pmu_clr_int_sts_req_active(osl_t *hnd_osh, si_t *sih)
+{
+ uint32 res_req_timer;
+ pmuregs_t *pmu;
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+ W_REG(hnd_osh, &pmu->pmuintstatus,
+ RSRC_INTR_MASK_TIMER_INT_0);
+ (void)R_REG(hnd_osh, &pmu->pmuintstatus);
+ res_req_timer = R_REG(hnd_osh, &pmu->res_req_timer);
+ W_REG(hnd_osh, &pmu->res_req_timer,
+ res_req_timer & ~(PRRT_REQ_ACTIVE << flags_shift));
+ (void)R_REG(hnd_osh, &pmu->res_req_timer);
+}
+#endif /* ATE_BUILD */
+
+void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ }
+ else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ W_REG(osh, &pmu->min_res_mask, min_res_mask);
+ OSL_DELAY(100);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+bool
+si_pmu_cap_fast_lpo(si_t *sih)
+{
+ return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE;
+}
+
+int
+si_pmu_fast_lpo_disable(si_t *sih)
+{
+ if (!si_pmu_cap_fast_lpo(sih)) {
+ PMU_ERROR(("si_pmu_fast_lpo_disable: No Fast LPO capability\n"));
+ return BCME_ERROR;
+ }
+
+ PMU_REG(sih, pmucontrol_ext,
+ PCTL_EXT_FASTLPO_ENAB |
+ PCTL_EXT_FASTLPO_SWENAB |
+ PCTL_EXT_FASTLPO_PCIE_SWENAB,
+ 0);
+ OSL_DELAY(1000);
+ return BCME_OK;
+}
+
+/*
+* 4389B0/C0 - WL and BT turn on WAR,
+* set below bits in PMU chip control 6
+* - global bit[195] / bit[3] - enable legacy pmu_wakeup to make
+* domain 1 (WL) power request
+* - global bit[206] / bit[14] - perst_wake_en
+*/
+void
+si_pmu_dmn1_perst_wakeup(si_t *sih, bool set)
+{
+ if (PMUREV(sih->pmurev) == 40) {
+ if (set) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ (PMU_CC6_ENABLE_DMN1_WAKEUP |
+ PMU_CC6_ENABLE_PMU_WAKEUP_PERST),
+ (PMU_CC6_ENABLE_DMN1_WAKEUP |
+ PMU_CC6_ENABLE_PMU_WAKEUP_PERST));
+ } else {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6,
+ (PMU_CC6_ENABLE_DMN1_WAKEUP |
+ PMU_CC6_ENABLE_PMU_WAKEUP_PERST),
+ 0);
+ }
+ }
+}
+
+#if !defined(BCMDONGLEHOST)
+
+/* write :
+ * TRUE - Programs the PLLCTRL6 with xtal and returns value written in pllctrl6 register.
+ * FALSE - returns 0 if xtal programming is same as pllctrl6 register else retruns value of
+ * pllctrl6 val. This will not program any register.
+ */
+static uint32
+si_pmu_pll6val_armclk_calc(osl_t *osh, pmuregs_t *pmu, uint32 armclk, uint32 xtal, bool write)
+{
+ uint32 q, r;
+ uint32 xtal_scale;
+ uint32 pll6val;
+ if (armclk == 0 || xtal == 0) {
+ PMU_ERROR((" si_pmu_pll6val_armclk_calc: invalid armclk = %d or xtal = %d\n",
+ armclk, xtal));
+ return 0;
+ }
+ q = (armclk * 1000 * PMU4369_PLL6VAL_P1DIV) / xtal;
+ xtal_scale = xtal / 100;
+ r = ((armclk * 10 * PMU4369_PLL6VAL_P1DIV * PMU4369_PLL6VAL_PRE_SCALE) / xtal_scale) -
+ (q * PMU4369_PLL6VAL_PRE_SCALE);
+ r *= PMU4369_PLL6VAL_POST_SCALE;
+
+ pll6val = (r << PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT) |
+ (q << PMU4369_PLL1_PC6_NDIV_INT_SHIFT) | PMU4369_PLL6VAL_P1DIV_BIT3_2;
+
+ PMU_MSG(("si_pmu_pll6val_armclk_calc, armclk %d, xtal %d, q %d, r 0x%8x, pll6val 0x%8x\n",
+ armclk, xtal, q, r, pll6val));
+
+ if (write) {
+ W_REG(osh, &pmu->pllcontrol_addr, PMU1_PLL0_PLLCTL6);
+ W_REG(osh, &pmu->pllcontrol_data, pll6val);
+ } else {
+ W_REG(osh, &pmu->pllcontrol_addr, PMU1_PLL0_PLLCTL6);
+ if (pll6val == R_REG(osh, &pmu->pllcontrol_data))
+ return 0;
+ }
+
+ return pll6val;
+}
+
+static void
+BCMATTACHFN(si_pmu_chipcontrol_xtal_settings_4369)(si_t *sih)
+{
+
+/* 4369 XTAL Bias settings */
+/*
+ Reg name startup Normal
+ xtal_bias_adj 0xFF 0x1A
+ xtal_coresize_nmos 0x3f 0x3f
+ xtal_coresize_pmos 0x3f 0x3f
+ xtal_sel_bias_res 0x2 0x6
+ xt_res_bypass 0x0 0x1
+*/
+ uint32 u32Val;
+ uint32 u32Mask;
+ u32Val = (PMU_CC0_4369B0_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL |
+ PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_VAL);
+
+ u32Mask = (PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK |
+ PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_MASK);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, u32Mask, u32Val);
+
+ u32Val = (PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL);
+ u32Mask = (PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, u32Mask, u32Val);
+
+ u32Val = (PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_VAL |
+ PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_VAL |
+ PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_VAL);
+
+ u32Mask = (PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_MASK |
+ PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_MASK |
+ PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_MASK);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL3, u32Mask, u32Val);
+
+}
+
+static void
+BCMATTACHFN(si_pmu_chipcontrol_xtal_settings_4362)(si_t *sih)
+{
+ /* 4369 XTAL Bias settings */
+ /*
+ Reg name startup Normal
+ xtal_bias_adj 0xFF 0x1A
+ xtal_coresize_nmos 0x3f 0x3f
+ xtal_coresize_pmos 0x3f 0x3f
+ xtal_sel_bias_res 0x2 0x6
+ xt_res_bypass 0x0 0x1
+ */
+ uint32 u32Val;
+ uint32 u32Mask;
+ u32Val = (PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL |
+ PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_VAL);
+
+ u32Mask = (PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK |
+ PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_MASK);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, u32Mask, u32Val);
+
+ u32Val = (PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL);
+ u32Mask = (PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, u32Mask, u32Val);
+
+ u32Val = (PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_VAL |
+ PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_VAL |
+ PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_VAL);
+
+ u32Mask = (PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_MASK |
+ PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_MASK |
+ PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_MASK);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL3, u32Mask, u32Val);
+
+}
+
+/* 4378 based on 4369 XTAL Bias settings
+ * Reg name startup Normal
+ * xtal_bias_adj 0xFF 0x1A
+ * xtal_coresize_nmos 0x3f 0x3f
+ * xtal_coresize_pmos 0x3f 0x3f
+ * xtal_sel_bias_res 0x2 0x2
+ * xt_res_bypass 0x0 0x2
+ */
+static void
+BCMATTACHFN(si_pmu_chipcontrol_xtal_settings_4378)(si_t *sih)
+{
+ uint32 u32Val;
+ uint32 u32Mask;
+ uint16 xtal_bias_adj;
+ uint8 xtal_bias_adj_otp = 0, xtal_bias_cal_otp_done = 0;
+
+#ifdef XTAL_BIAS_FROM_OTP
+ /* Read xtal bias cal done bit and xtal biase from OTP */
+ si_pmu_chipcontrol_xtal_bias_from_otp(sih, &xtal_bias_cal_otp_done, &xtal_bias_adj_otp);
+#endif /* XTAL_BIAS_FROM_OTP */
+
+ /*
+ * If xtal_bias_cal_done flag is read as non zero, write the xtal biase in PMU control
+ * register from OTP otherwise write the default value of 0x1a.
+ */
+ xtal_bias_adj = (uint16)xtal_bias_adj_otp;
+ xtal_bias_adj = xtal_bias_cal_otp_done != 0 ? (xtal_bias_adj << 6) :
+ PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL;
+
+ u32Val = (xtal_bias_adj | PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_VAL);
+
+ u32Mask = (PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK |
+ PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_MASK);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, u32Mask, u32Val);
+
+ u32Val = (PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL);
+ u32Mask = (PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, u32Mask, u32Val);
+
+ u32Val = (PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_VAL |
+ PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_VAL |
+ PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_VAL);
+
+ u32Mask = (PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_MASK |
+ PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_MASK |
+ PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_MASK);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL3, u32Mask, u32Val);
+
+}
+
+#ifdef XTAL_BIAS_FROM_OTP
+static void
+BCMATTACHFN(si_pmu_chipcontrol_xtal_bias_from_otp)(si_t *sih, uint8* flag, uint8* val)
+{
+ uint8 xtal_bias_adj = 0, xtal_bias_cal_otp_done = 0;
+#ifndef BCM_OTP_API
+ uint16 datum, offset;
+ uint8 shift, mask;
+#endif /* !BCM_OTP_API */
+
+ /* Read the XTAL BIAS CAL value from OTP.
+ * 1) Read the xtal cal done bit and the xtal biase value from OTP.
+ * 2) OTP memory is zero by default, so the chips which aren't OTP programmed will read a
+ * '0' for xtal_bias_cal_otp_done.
+ */
+#ifdef BCM_OTP_API
+ otp_read_8b_field(sih, BCM_OTP_FLD_XTAL_BIAS_FLAG, &xtal_bias_cal_otp_done);
+ if (xtal_bias_cal_otp_done) {
+ otp_read_8b_field(sih, BCM_OTP_FLD_XTAL_BIAS_ADJ, &xtal_bias_adj);
+ }
+#else
+ si_pmu_chipcontrol_xtal_bias_cal_done_offsets(sih, &offset, &shift, &mask);
+ if (!otp_read_word(sih, offset, &datum)) {
+ xtal_bias_cal_otp_done = ((datum >> shift) & mask);
+ }
+
+ si_pmu_chipcontrol_xtal_bias_val_offsets(sih, &offset, &shift, &mask);
+ if (xtal_bias_cal_otp_done && (!otp_read_word(sih, offset, &datum)))
+ {
+ xtal_bias_adj = ((datum >> shift) & mask);
+ }
+#endif /* BCM_OTP_API */
+ *flag = xtal_bias_cal_otp_done;
+ *val = xtal_bias_adj;
+}
+
+#ifndef BCM_OTP_API
+static void
+BCMATTACHFN(si_pmu_chipcontrol_xtal_bias_cal_done_offsets)(si_t *sih, uint16* wrd_offset,
+ uint8* wrd_shift, uint8* wrd_mask)
+{
+ /* Offset is 16 bit aligned address, shift is the starting bit position of the value
+ * mask defines the bitwidth of the value. Each value in the array is for one of the
+ * cores.
+ */
+ /* XTAL BIAS CAL done 11896 */
+ switch (CHIPID(sih->chip)) {
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ *wrd_offset = OTP_XTAL_BIAS_CAL_DONE_4378_WRD_OFFSET;
+ *wrd_shift = OTP_XTAL_BIAS_CAL_DONE_4378_WRD_SHIFT;
+ *wrd_mask = OTP_XTAL_BIAS_CAL_DONE_4378_WRD_MASK;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+static void
+BCMATTACHFN(si_pmu_chipcontrol_xtal_bias_val_offsets)(si_t *sih, uint16* wrd_offset,
+ uint8* wrd_shift, uint8* wrd_mask)
+{
+ /* Offset is 16 bit aligned address, shift is the starting bit position of the value
+ * mask defines the bitwidth of the value. Each value in the array is for one of the
+ * cores.
+ */
+ /* XTAL BIAS value 11888 */
+ switch (CHIPID(sih->chip)) {
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ *wrd_offset = OTP_XTAL_BIAS_VAL_4378_WRD_OFFSET;
+ *wrd_shift = OTP_XTAL_BIAS_VAL_4378_WRD_SHIFT;
+ *wrd_mask = OTP_XTAL_BIAS_VAL_4378_WRD_MASK;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+#endif /* !BCM_OTP_API */
+#endif /* XTAL_BIAS_FROM_OTP */
+
+#endif /* !BCMDONGLEHOST */
+
+#ifdef BCMPMU_STATS
+/*
+ * 8 pmu statistics timer default map
+ *
+ * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of CORE_RDY_MAIN.
+ * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
+ * { SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH},
+ * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
+ * { SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE}
+ */
+static pmu_stats_timer_t pmustatstimer[] = {
+ { SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l12
+ { SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l23
+ { SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d0
+ { SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d3
+ //deep-sleep duration : pmu_rsrc_state(XTAL_PU)
+ { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW},
+ //deep-sleep entry count : pmu_rsrc_state(XTAL_PU)
+ { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL},
+ //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
+ { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},
+ //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
+ { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE}
+};
+
+static void
+si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid)
+{
+ uint32 stats_timer_ctrl;
+
+ W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
+ stats_timer_ctrl =
+ ((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) &
+ PMU_ST_SRC_MASK) |
+ ((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) &
+ PMU_ST_CNT_MODE_MASK) |
+ ((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) |
+ ((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) & PMU_ST_INT_EN_MASK);
+ W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl);
+ W_REG(osh, &pmu->pmu_statstimer_N, 0);
+}
+
+void
+si_pmustatstimer_int_enable(si_t *sih)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_int_disable(si_t *sih)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_init(si_t *sih)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+ uint32 core_cap_ext;
+ uint8 max_stats_timer_num;
+ int8 i;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
+
+ max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
+
+ for (i = 0; i < max_stats_timer_num; i++) {
+ si_pmustatstimer_update(osh, pmu, i);
+ }
+
+ OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_dump(si_t *sih)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+ uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0, pmuintstatus;
+ uint8 max_stats_timer_num, max_stats_timer_src_num;
+ uint32 stat_timer_ctrl, stat_timer_N;
+ uint8 i;
+ uint32 current_time_ms = OSL_SYSUPTIME();
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ pmucapabilities = R_REG(osh, &pmu->pmucapabilities);
+ core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
+ AlpPeriod = R_REG(osh, &pmu->slowclkperiod);
+ ILPPeriod = R_REG(osh, &pmu->ILPPeriod);
+
+ max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >>
+ PCAP_EXT_ST_NUM_SHIFT) + 1;
+ max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >>
+ PCAP_EXT_ST_SRC_NUM_SHIFT) + 1;
+
+ pmuintstatus = R_REG(osh, &pmu->pmuintstatus);
+ pmuintmask0 = R_REG(osh, &pmu->pmuintmask0);
+
+ PMU_ERROR(("si_pmustatstimer_dump : TIME %d\n", current_time_ms));
+
+ PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n",
+ max_stats_timer_num, max_stats_timer_src_num));
+ PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, ILPPeriod 0x%8x, "
+ "pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n",
+ pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod,
+ pmuintmask0, pmuintstatus, PMUREV(sih->pmurev)));
+
+ for (i = 0; i < max_stats_timer_num; i++) {
+ W_REG(osh, &pmu->pmu_statstimer_addr, i);
+ stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl);
+ stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
+ PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n",
+ i, stat_timer_ctrl, stat_timer_N));
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_start(si_t *sih, uint8 timerid)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ pmustatstimer[timerid].enable = TRUE;
+
+ W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
+ OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_stop(si_t *sih, uint8 timerid)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ pmustatstimer[timerid].enable = FALSE;
+
+ W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
+ AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT));
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_clear(si_t *sih, uint8 timerid)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
+ W_REG(osh, &pmu->pmu_statstimer_N, 0);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_clear_overflow(si_t *sih)
+{
+ uint8 i;
+ uint32 core_cap_ext;
+ uint8 max_stats_timer_num;
+ uint32 timerN;
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
+ max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
+
+ for (i = 0; i < max_stats_timer_num; i++) {
+ W_REG(osh, &pmu->pmu_statstimer_addr, i);
+ timerN = R_REG(osh, &pmu->pmu_statstimer_N);
+ if (timerN == 0xFFFFFFFF) {
+ PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i));
+ si_pmustatstimer_clear(sih, i);
+ }
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_pmustatstimer_read(si_t *sih, uint8 timerid)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+ uint32 stats_timer_N;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
+ stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return stats_timer_N;
+}
+
+void
+si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ pmustatstimer[timerid].src_num = src_num;
+ si_pmustatstimer_update(osh, pmu, timerid);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+
+ pmustatstimer[timerid].cnt_mode = cnt_mode;
+ si_pmustatstimer_update(osh, pmu, timerid);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+#endif /* BCMPMU_STATS */
+
+#ifdef DONGLEBUILD
+/* Note this could be called from trap context !!
+ * So observe caution. Do NOT ASSERT() in this function
+ * len parameter is dual purpose - On input it is length of the
+ * buffer provided. On output it is the amount of data written in
+ * bytes.
+ */
+/* This includes address data pair
+ * Note presence of arg2. arg2 could further define what subset of information
+ * needs to be dumped. Some external entities such as SMD could optionally pass
+ * arg2 to define subset of information needed
+ */
+int
+BCMPOSTTRAPFN(si_pmu_regs_in_rodata_dump)(void *sih, void *arg2,
+ uint32 *bufptr, uint16 *len)
+{
+ int rc = BCME_OK;
+ uint16 totalsize = SI_PMU_REG_DUMP_BASE_SIZE;
+
+ if ((bufptr == NULL) || (len == NULL)) {
+ rc = BCME_NOMEM;
+ goto fail;
+ }
+
+ /* Are PMU registers available in rodata? If not, bail out
+ * Avoid re-read. If data is not there, then there could have been
+ * an error in reading these regs.
+ */
+ if (rodata_pmuregdump_ptr == NULL) {
+ rc = BCME_ERROR;
+ goto fail;
+ }
+
+ if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 1) {
+ totalsize += SI_PMU_REG_DUMP_MACRSRC1_SIZE;
+ }
+ if (si_pmu_get_mac_rsrc_req_tmr_cnt(sih) > 2) {
+ totalsize += SI_PMU_REG_DUMP_MACRSRC2_SIZE;
+ }
+ if (si_pmu_get_pmu_interrupt_rcv_cnt(sih) > 1) {
+ totalsize += SI_PMU_REG_DUMP_INTRCV1_SIZE;
+ }
+
+ /* Make sure there is enough space for address value pair */
+ if (len && *len < totalsize) {
+ rc = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+
+ /* Write registers to supplied buffer */
+ /* Note that rodata_pmuregdump_size needs to be
+ * a multiple of a word size
+ */
+ memcpy((uint8*)bufptr, rodata_pmuregdump_ptr, totalsize);
+
+ *len = totalsize;
+fail:
+ return rc;
+
+}
+#endif /* DONGLEBUILD */
+
+/* query the # of mac resource request timers */
+uint
+BCMPOSTTRAPFN(si_pmu_get_mac_rsrc_req_tmr_cnt)(si_t *sih)
+{
+ if (PMUREV(sih->pmurev) >= 26) {
+ uint32 core_cap_ext = PMU_REG(sih, core_cap_ext, 0, 0);
+ uint mac_rsrc_cnt =
+ ((core_cap_ext & PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_MASK) >>
+ PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_SHIFT) + 1;
+ return mac_rsrc_cnt;
+ }
+
+ return si_numd11coreunits(sih);
+}
+
+/* query the # of pmu interrupt recevier */
+uint
+BCMPOSTTRAPFN(si_pmu_get_pmu_interrupt_rcv_cnt)(si_t *sih)
+{
+ if (PMUREV(sih->pmurev) >= 26) {
+ uint32 core_cap_ext = PMU_REG(sih, core_cap_ext, 0, 0);
+ uint pmu_intr_rcvr_cnt =
+ ((core_cap_ext & PCAP_EXT_PMU_INTR_RCVR_CNT_MASK) >>
+ PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT) + 1;
+ return pmu_intr_rcvr_cnt;
+ }
+
+ return si_numd11coreunits(sih);
+}
+
+#ifdef DONGLEBUILD
+int
+si_pmu_mem_pwr_off(si_t *sih, int core_idx)
+{
+ int ret = BCME_OK;
+
+ if (si_setcore(sih, D11_CORE_ID, core_idx) == NULL) {
+ /* core_idx doesn't exsist */
+ return BCME_BADOPTION;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ if (core_idx == 0) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4387_MAIN_PD_CBUCK2VDDB_ON |
+ PMU_CC4_4387_MAIN_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4387_MAIN_PD_MEMLPLDO2VDDB_ON |
+ PMU_CC4_4387_MAIN_PD_MEMLPDLO2VDDRET_ON),
+ 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_MAIN_CBUCK2VDDB_OFF |
+ PMU_CC13_MAIN_CBUCK2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_MAIN_CBUCK2VDDB_OFF |
+ PMU_CC13_MAIN_CBUCK2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF));
+
+ /* LQ settings */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25,
+ 0xFFFFFFFF, XTAL_LQ_SETTING_4387);
+ } else if (core_idx == 1) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL4,
+ (PMU_CC4_4387_AUX_PD_CBUCK2VDDB_ON |
+ PMU_CC4_4387_AUX_PD_CBUCK2VDDRET_ON |
+ PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDB_ON |
+ PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDRET_ON),
+ 0);
+
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_AUX_CBUCK2VDDB_OFF |
+ PMU_CC13_AUX_CBUCK2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC13_AUX_CBUCK2VDDB_OFF |
+ PMU_CC13_AUX_CBUCK2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF));
+ } else if (core_idx == 2) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ (PMU_CC17_SCAN_CBUCK2VDDB_ON |
+ PMU_CC17_SCAN_MEMLPLDO2VDDB_ON |
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON),
+ 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ (PMU_CC17_SCAN_CBUCK2VDDB_OFF |
+ PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF |
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF),
+ (PMU_CC17_SCAN_CBUCK2VDDB_OFF |
+ PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF |
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF));
+ }
+ break;
+
+ default:
+ ret = BCME_UNSUPPORTED;
+ break;
+ }
+
+ return ret;
+}
+
+int
+BCMPOSTTRAPFN(si_pmu_mem_pwr_on)(si_t *sih)
+{
+ int ret = BCME_OK;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_MAIN_CBUCK2VDDB_OFF |
+ PMU_CC13_MAIN_CBUCK2VDDRET_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF),
+ PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL13,
+ (PMU_CC13_AUX_CBUCK2VDDB_OFF |
+ PMU_CC13_AUX_CBUCK2VDDRET_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDB_OFF |
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF),
+ PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL17,
+ (PMU_CC17_SCAN_CBUCK2VDDB_OFF |
+ PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF |
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF),
+ PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF);
+
+ /* HQ settings */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_25,
+ 0xFFFFFFFF, XTAL_HQ_SETTING_4387);
+ break;
+
+ default:
+ ret = BCME_UNSUPPORTED;
+ break;
+ }
+
+ return ret;
+}
+
+void
+BCMPOSTTRAPFN(si_pmu_disable_intr_pwrreq)(si_t *sih)
+{
+ if (MULTIBP_CAP(sih)) {
+ switch (CHIPID(sih->chip)) {
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, PMU_CC2_CB2WL_INTR_PWRREQ_EN, 0);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL6, PMU_CC6_ENABLE_DMN1_WAKEUP, 0);
+ break;
+ default:
+ PMU_ERROR(("si_pmu_disable_intr_pwrreq: add support for this chip!\n"));
+ OSL_SYS_HALT();
+ break;
+ }
+ }
+}
+
+void
+BCMPOSTTRAPFN(si_pmu_clear_intmask)(si_t *sih)
+{
+ pmuregs_t *pmu;
+ uint origidx;
+ osl_t *osh = si_osh(sih);
+ uint pmu_intr_recvr_cnt;
+
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+
+ ASSERT(pmu != NULL);
+ W_REG(osh, &pmu->pmuintmask0, 0);
+
+ pmu_intr_recvr_cnt = ((R_REG(osh, &pmu->core_cap_ext) & PCAP_EXT_PMU_INTR_RCVR_CNT_MASK)
+ >> PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT) + 1;
+
+ if (pmu_intr_recvr_cnt > 1) {
+ W_REG(osh, &pmu->pmuintmask1, 0);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+#endif /* DONGLEBUILD */
+
+int
+si_pmu_res_state_pwrsw_main_wait(si_t *sih)
+{
+ int ret = BCME_OK;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_GRPID:
+ if (PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(RES4387_PWRSW_MAIN)) {
+ SPINWAIT((PMU_REG(sih, res_state, 0, 0) &
+ PMURES_BIT(RES4387_PWRSW_MAIN)), 10000);
+ OSL_DELAY(1000);
+ }
+ ret = (PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(RES4387_PWRSW_MAIN)) ?
+ BCME_ERROR : BCME_OK;
+ break;
+ default:
+ PMU_ERROR(("si_pmu_res_state_pwrsw_main_wait: add support for this chip!\n"));
+ OSL_SYS_HALT();
+ break;
+ }
+
+ return ret;
+}
+
+int
+si_pmu_lvm_csr_update(si_t *sih, bool lvm)
+{
+
+#ifdef BCMDVFS
+ if (BCMDVFS_ENAB() && si_dvfs_enable_status(sih)) {
+ uint32 ndv_volt = lvm ? DVFS_VOLTAGE_NDV : DVFS_VOLTAGE_NDV_NON_LVM;
+ si_dvfs_set_ndv_voltage(sih, ndv_volt);
+ } else
+#endif /* BCMDVFS */
+ {
+ uint32 cbuck_volt = lvm ? CBUCK_VOLT_SW_DEFAULT_4387 : CBUCK_VOLT_NON_LVM;
+ si_pmu_vreg_control(sih, PMU_VREG_0,
+ VREG0_4378_CSR_VOLT_ADJ_PWM_MASK,
+ cbuck_volt << VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT);
+ }
+ return BCME_OK;
+}
+
+#if defined(BT_WLAN_REG_ON_WAR)
+void
+si_pmu_reg_on_war_ext_wake_perst_set(si_t *sih)
+{
+ uint origidx = si_coreidx(sih);
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ osl_t *osh = si_osh(sih);
+
+ if (PMUREV(sih->pmurev) == 40) {
+ /*
+ * set PCIEPerstReq (bit-5) as a wake-up source in
+ * ExtWakeMask0 (0x760) register
+ */
+ W_REG(osh, &pmu->extwakemask0, PMU_EXT_WAKE_MASK_0_PCIE_PERST);
+
+ /*
+ * configure the wakemask as "common backplane" resources to
+ * be up during wake-up in ExtWakeReqMask0 (0x770) register
+ */
+ W_REG(osh, &pmu->extwakereqmask[0], REG_ON_WAR_PMU_EXT_WAKE_REQ_MASK0_VAL);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmu_reg_on_war_ext_wake_perst_clear(si_t *sih)
+{
+ uint32 val = 0;
+ uint origidx = si_coreidx(sih);
+ pmuregs_t *pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ osl_t *osh = si_osh(sih);
+
+ if (PMUREV(sih->pmurev) == 40) {
+ /* clear all set bits in ExtWakeupStatus (0x744) register */
+ val = R_REG(osh, &pmu->extwakeupstatus);
+ W_REG(osh, &pmu->extwakeupstatus, val);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+#endif /* BT_WLAN_REG_ON_WAR */
+
+void
+si_pmu_res_state_wait(si_t *sih, uint rsrc)
+{
+ SPINWAIT(!(PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(rsrc)), PMU_MAX_TRANSITION_DLY);
+ ASSERT(PMU_REG(sih, res_state, 0, 0) & PMURES_BIT(rsrc));
+}
diff --git a/bcmdhd.101.10.361.x/include/802.11.h b/bcmdhd.101.10.361.x/include/802.11.h
new file mode 100755
index 0000000..c1cc979
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.11.h
@@ -0,0 +1,5920 @@
+/*
+ * Fundamental types and constants relating to 802.11
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <ethernet.h>
+#endif
+
+/* Include WPA definitions here for compatibility */
+#include <wpa.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */
+#define DOT11_SEC_TO_TU 977u /* 1000000 / DOT11_TU_TO_US = ~977 TU */
+
+/* Generic 802.11 frame constants */
+#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */
+#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */
+#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */
+#define DOT11_FCS_LEN 4u /* d11 FCS length */
+#define DOT11_ICV_LEN 4 /* d11 ICV length */
+#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */
+#define DOT11_MAX_ICV_AES_LEN 16 /* d11 MAX ICV/AES length */
+#define DOT11_QOS_LEN 2 /* d11 QoS length */
+#define DOT11_HTC_LEN 4 /* d11 HT Control field length */
+
+#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */
+#define DOT11_IV_LEN 4 /* d11 IV length */
+#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */
+#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */
+#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */
+#define DOT11_IV_WAPI_LEN 18 /* d11 IV WAPI length */
+/* TODO: Need to change DOT11_IV_MAX_LEN to 18, but currently unable to change as the old
+ * branches are still referencing to this component.
+ */
+#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */
+
+/* Includes MIC */
+#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */
+/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */
+#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ DOT11_IV_AES_CCM_LEN + \
+ DOT11_MAX_MPDU_BODY_LEN + \
+ DOT11_ICV_LEN + \
+ DOT11_FCS_LEN) /* d11 max MPDU length */
+
+#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */
+
+/* dot11RTSThreshold */
+#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */
+#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */
+
+/* dot11FragmentationThreshold */
+#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */
+#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength
+ * of the attached PHY
+ */
+#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */
+
+/* dot11BeaconPeriod */
+#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */
+
+/* dot11DTIMPeriod */
+#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */
+#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */
+
+/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */
+#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */
+/* minimum LLC header length; DSAP, SSAP, 8 bit Control (unnumbered) */
+#define DOT11_LLC_HDR_LEN_MIN 3
+#define DOT11_OUI_LEN 3 /* d11 OUI length */
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+ uint8 dsap; /* always 0xAA */
+ uint8 ssap; /* always 0xAA */
+ uint8 ctl; /* always 0x03 */
+ uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00
+ * Bridge-Tunnel: 0x00 0x00 0xF8
+ */
+ uint16 type; /* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* RFC1042 header used by 802.11 per 802.1H */
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */
+
+#define SFH_LLC_SNAP_SZ (RFC1042_HDR_LEN)
+
+#define COPY_SFH_LLCSNAP(dst, src) \
+ do { \
+ *((uint32 *)dst + 0) = *((uint32 *)src + 0); \
+ *((uint32 *)dst + 1) = *((uint32 *)src + 1); \
+ *((uint32 *)dst + 2) = *((uint32 *)src + 2); \
+ *((uint32 *)dst + 3) = *((uint32 *)src + 3); \
+ *((uint32 *)dst + 4) = *((uint32 *)src + 4); \
+ *(uint16 *)((uint32 *)dst + 5) = *(uint16 *)((uint32 *)src + 5); \
+ } while (0)
+
+/* Generic 802.11 MAC header */
+/**
+ * N.B.: This struct reflects the full 4 address 802.11 MAC header.
+ * The fields are defined such that the shorter 1, 2, and 3
+ * address headers just use the first k fields.
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr a1; /* address 1 */
+ struct ether_addr a2; /* address 2 */
+ struct ether_addr a3; /* address 3 */
+ uint16 seq; /* sequence control */
+ struct ether_addr a4; /* address 4 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Control frames */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr ta; /* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_RTS_LEN 16 /* d11 RTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTS_LEN 10u /* d11 CTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACK_LEN 10 /* d11 ACK frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+ uint16 fc; /* frame control */
+ uint16 durid; /* AID */
+ struct ether_addr bssid; /* receiver address, STA in AP */
+ struct ether_addr ta; /* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr bssid; /* transmitter address, STA in AP */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */
+
+/**
+ * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling
+ * category+OUI+vendor specific content ( this can be variable)
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+/** generic vendor specific action frame with variable length */
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+
+#define DOT11_ACTION_VS_HDR_LEN 6
+
+#define BCM_ACTION_OUI_BYTE0 0x00
+#define BCM_ACTION_OUI_BYTE1 0x90
+#define BCM_ACTION_OUI_BYTE2 0x4c
+
+/* BA/BAR Control parameters */
+#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */
+#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */
+#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */
+
+#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */
+#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */
+
+#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */
+#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */
+
+#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */
+#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */
+
+/** control frame header (BA/BAR) */
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr ta; /* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */
+
+/** BAR frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+ uint16 bar_control; /* BAR Control */
+ uint16 seqnum; /* Starting Sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN 4 /* BAR frame payload length */
+
+#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */
+#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */
+/** BA frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+ uint16 ba_control; /* BA Control */
+ uint16 seqnum; /* Starting Sequence control */
+ uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */
+
+/** Management frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr da; /* receiver address */
+ struct ether_addr sa; /* transmitter address */
+ struct ether_addr bssid; /* BSS ID */
+ uint16 seq; /* sequence control */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_management_header dot11_management_header_t;
+#define DOT11_MGMT_HDR_LEN 24u /* d11 management header length */
+
+/* Management frame payloads */
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+ uint32 timestamp[2];
+ uint16 beacon_interval;
+ uint16 capability;
+ uint8 ies[];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */
+#define DOT11_BCN_PRB_FIXED_LEN 12u /* 802.11 beacon/probe frame fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+ uint16 alg; /* algorithm */
+ uint16 seq; /* sequence control */
+ uint16 status; /* status code */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */
+#define DOT11_AUTH_SEQ_STATUS_LEN 4 /* length of auth frame without challenge IE and
+ * without algorithm
+ */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+ uint16 capability; /* capability information */
+ uint16 listen; /* listen interval */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+ uint16 capability; /* capability information */
+ uint16 listen; /* listen interval */
+ struct ether_addr ap; /* Current AP address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+ uint16 capability; /* capability information */
+ uint16 status; /* status code */
+ uint16 aid; /* association ID */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+ uint8 category;
+ uint8 action;
+ uint8 ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+ uint8 category;
+ uint8 action;
+ uint8 control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+ uint8 category;
+ uint8 action;
+ uint16 id;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode {
+ uint8 category;
+ uint8 action;
+ uint8 mode;
+} BWL_POST_PACKED_STRUCT;
+
+/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */
+#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */
+#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */
+BWL_PRE_PACKED_STRUCT struct dot11_action_group_id {
+ uint8 category;
+ uint8 action;
+ uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN];
+ uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE 1
+#define SM_PWRSAVE_MODE 2
+
+/* ************* 802.11h related definitions. ************* */
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+ uint8 id;
+ uint8 len;
+ uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+ int8 min;
+ int8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+ uint8 id;
+ uint8 len;
+ uint8 tx_pwr;
+ uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_SIZE (sizeof(dot11_tpc_rep_t))
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+ uint8 id;
+ uint8 len;
+ uint8 first_channel;
+ uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+/**
+ * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband
+ * offset for 40MHz operation. The possible 3 values are:
+ * 1 = above control channel
+ * 3 = below control channel
+ * 0 = no extension channel
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+ uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */
+ uint8 len; /* IE length */
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 type; /* type indicates what follows */
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN 5
+#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */
+#define DOT11_EXTCH_IE_LEN 1
+#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */
+#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */
+#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */
+#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+ uint8 category;
+ uint8 action;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_frmhdr dot11_action_frmhdr_t;
+
+/* Action Field length */
+#define DOT11_ACTION_CATEGORY_LEN 1u
+#define DOT11_ACTION_ACTION_LEN 1u
+#define DOT11_ACTION_DIALOG_TOKEN_LEN 1u
+#define DOT11_ACTION_CAPABILITY_LEN 2u
+#define DOT11_ACTION_STATUS_CODE_LEN 2u
+#define DOT11_ACTION_REASON_CODE_LEN 2u
+#define DOT11_ACTION_TARGET_CH_LEN 1u
+#define DOT11_ACTION_OPER_CLASS_LEN 1u
+
+#define DOT11_ACTION_FRMHDR_LEN 2
+
+/** CSA IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+ uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ uint8 mode; /* mode 0 or 1 */
+ uint8 channel; /* channel switch to */
+ uint8 count; /* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */
+/* CSA mode - 802.11h-2003 $7.3.2.20 */
+#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */
+#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+ uint8 category;
+ uint8 action;
+ dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */
+ dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+ uint8 mode; /* mode 0 or 1 */
+ uint8 reg; /* regulatory class */
+ uint8 channel; /* channel switch to */
+ uint8 count; /* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+
+/** 11n Extended Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+ uint8 id; /* id DOT11_MNG_EXT_CSA_ID */
+ uint8 len; /* length of IE */
+ struct dot11_csa_body b; /* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ struct dot11_csa_body b; /* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+
+/** Wide Bandwidth Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch {
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ uint8 channel_width; /* new channel width */
+ uint8 center_frequency_segment_0; /* center frequency segment 0 */
+ uint8 center_frequency_segment_1; /* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t;
+
+#define DOT11_WIDE_BW_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */
+
+/** Channel Switch Wrapper IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper {
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t;
+
+/* Proposed wide bandwidth channel IE */
+typedef enum wide_bw_chan_width {
+ WIDE_BW_CHAN_WIDTH_20 = 0,
+ WIDE_BW_CHAN_WIDTH_40 = 1,
+ WIDE_BW_CHAN_WIDTH_80 = 2,
+ WIDE_BW_CHAN_WIDTH_160 = 3,
+ WIDE_BW_CHAN_WIDTH_80_80 = 4
+} wide_bw_chan_width_t;
+
+/** Wide Bandwidth Channel IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel {
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_ID */
+ uint8 len; /* length of IE */
+ uint8 channel_width; /* channel width */
+ uint8 center_frequency_segment_0; /* center frequency segment 0 */
+ uint8 center_frequency_segment_1; /* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel dot11_wide_bw_chan_ie_t;
+
+#define DOT11_WIDE_BW_IE_LEN 3 /* length of IE data, not including 2 byte header */
+/** VHT Transmit Power Envelope IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope {
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ uint8 transmit_power_info;
+ uint8 local_max_transmit_power_20;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t;
+
+/* vht transmit power envelope IE length depends on channel width */
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ 1
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ 2
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ 3
+
+/* TPE Transmit Power Information Field */
+#define DOT11_TPE_INFO_MAX_TX_PWR_CNT_MASK 0x07u
+#define DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_MASK 0x38u
+#define DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_SHIFT 3u
+#define DOT11_TPE_INFO_MAX_TX_PWR_CAT_MASK 0xC0u
+#define DOT11_TPE_INFO_MAX_TX_PWR_CAT_SHIFT 6u
+
+/* TPE Transmit Power Information Field Accessor */
+#define DOT11_TPE_INFO_MAX_TX_PWR_CNT(x) \
+ (x & DOT11_TPE_INFO_MAX_TX_PWR_CNT_MASK)
+#define DOT11_TPE_INFO_MAX_TX_PWR_INTRPN(x) \
+ (((x) & DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_MASK) >> \
+ DOT11_TPE_INFO_MAX_TX_PWR_INTRPN_SHIFT)
+#define DOT11_TPE_INFO_MAX_TX_PWR_CAT(x) \
+ (((x) & DOT11_TPE_INFO_MAX_TX_PWR_CAT_MASK) >> \
+ DOT11_TPE_INFO_MAX_TX_PWR_CAT_SHIFT)
+
+/* Maximum Transmit Power Interpretation subfield */
+#define DOT11_TPE_MAX_TX_PWR_INTRPN_LOCAL_EIRP 0u
+#define DOT11_TPE_MAX_TX_PWR_INTRPN_LOCAL_EIRP_PSD 1u
+#define DOT11_TPE_MAX_TX_PWR_INTRPN_REG_CLIENT_EIRP 2u
+#define DOT11_TPE_MAX_TX_PWR_INTRPN_REG_CLIENT_EIRP_PSD 3u
+
+/* Maximum Transmit Power category subfield */
+#define DOT11_TPE_MAX_TX_PWR_CAT_DEFAULT 0u
+
+/* Maximum Transmit Power category subfield in US */
+#define DOT11_TPE_MAX_TX_PWR_CAT_US_DEFAULT 0u
+#define DOT11_TPE_MAX_TX_PWR_CAT_US_SUB_DEV 1u
+
+/* Maximum Transmit Power Count subfield values when
+ * Maximum Transmit Power Interpretation subfield is 0 or 2
+ */
+#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_MHZ 0u
+#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_40_MHZ 1u
+#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_40_80_MHZ 2u
+#define DOT11_TPE_INFO_MAX_TX_CNT_EIRP_20_40_80_160_MHZ 3u
+
+/* Maximum Transmit Power Count subfield values when
+ * Maximum Transmit Power Interpretation subfield is 1 or 3
+ */
+#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_0 0u
+#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_1 1u
+#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_2 2u
+#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_3 4u
+#define DOT11_TPE_INFO_MAX_TX_CNT_PSD_VAL_4 8u
+
+#define DOT11_TPE_MAX_TX_PWR_EIRP_MIN -128 /* 0.5 db step */
+#define DOT11_TPE_MAX_TX_PWR_EIRP_MAX 126 /* 0.5 db step */
+#define DOT11_TPE_MAX_TX_PWR_EIRP_NO_LIMIT 127 /* 0.5 db step */
+
+#define DOT11_TPE_MAX_TX_PWR_PSD_BLOCKED -128
+#define DOT11_TPE_MAX_TX_PWR_PSD_NO_LIMIT 127u
+/** Transmit Power Envelope IE data structure as per 11ax draft */
+BWL_PRE_PACKED_STRUCT struct dot11_transmit_power_envelope {
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ uint8 transmit_power_info;
+ uint8 max_transmit_power[]; /* Variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_transmit_power_envelope dot11_transmit_power_envelope_ie_t;
+/* id (1) + len (1) + transmit_power_info(1) + max_transmit_power(1) */
+#define DOT11_TPE_ELEM_MIN_LEN 4u
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+ uint8 id;
+ uint8 len;
+ uint8 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */
+
+#define DOT11_OBSS_COEX_INFO_REQ 0x01
+#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02
+#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+ uint8 id;
+ uint8 len;
+ uint8 regclass;
+ uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_COEX 1
+#define DOT11_EXTCAP_LEN_BT 3
+#define DOT11_EXTCAP_LEN_IW 4
+#define DOT11_EXTCAP_LEN_SI 6
+
+#define DOT11_EXTCAP_LEN_TDLS 5
+#define DOT11_11AC_EXTCAP_LEN_TDLS 8
+
+#define DOT11_EXTCAP_LEN_FMS 2
+#define DOT11_EXTCAP_LEN_PROXY_ARP 2
+#define DOT11_EXTCAP_LEN_TFS 3
+#define DOT11_EXTCAP_LEN_WNM_SLEEP 3
+#define DOT11_EXTCAP_LEN_TIMBC 3
+#define DOT11_EXTCAP_LEN_BSSTRANS 3
+#define DOT11_EXTCAP_LEN_DMS 4
+#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6
+#define DOT11_EXTCAP_LEN_TDLS_WBW 8
+#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8
+#define DOT11_EXTCAP_LEN_TWT 10u
+#define DOT11_EXTCAP_LEN_BCN_PROT 11u
+
+/* TDLS Capabilities */
+#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */
+#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */
+#define DOT11_TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */
+#define DOT11_TDLS_CAP_CH_SW 30 /* TDLS Channel switch */
+#define DOT11_TDLS_CAP_PROH 38 /* TDLS prohibited */
+#define DOT11_TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */
+#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61 /* TDLS Wider Band-Width */
+
+#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */
+
+/* FIXME: remove redundant DOT11_CAP_SAE_HASH_TO_ELEMENT */
+#define DOT11_CAP_SAE_HASH_TO_ELEMENT 5u /* SAE Hash-to-element support */
+#define DOT11_EXT_RSN_CAP_SAE_H2E 5u /* SAE Hash-to-element support */
+/* FIXME: Use these temporary IDs until ANA assigns IDs */
+#define DOT11_EXT_RSN_CAP_SAE_PK 6u /* SAE-PK support */
+/* Last bit in extended rsn capabilities (RSNXE) */
+#define DOT11_EXT_RSN_CAP_MAX_BIT DOT11_EXT_RSN_CAP_SAE_PK
+
+BWL_PRE_PACKED_STRUCT struct dot11_rsnxe {
+ uint8 id; /* id DOT11_MNG_RSNXE_ID */
+ uint8 len;
+ uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rsnxe dot11_rsnxe_t;
+
+#define RSNXE_CAP_LENGTH_MASK (0x0f)
+#define RSNXE_CAP_LENGTH(cap) ((uint8)(cap) & RSNXE_CAP_LENGTH_MASK)
+#define RSNXE_SET_CAP_LENGTH(cap, len)\
+ (cap = (cap & ~RSNXE_CAP_LENGTH_MASK) | ((uint8)(len) & RSNXE_CAP_LENGTH_MASK))
+
+BWL_PRE_PACKED_STRUCT struct dot11_rejected_groups_ie {
+ uint8 id; /* DOT11_MNG_EXT_ID */
+ uint8 len;
+ uint8 id_ext; /* DOT11_MNG_REJECTED_GROUPS_ID */
+ uint16 groups[];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rejected_groups_ie dot11_rejected_groups_ie_t;
+
+/* 802.11h/802.11k Measurement Request/Report IEs */
+/* Measurement Type field */
+#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */
+#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */
+#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */
+#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */
+#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */
+#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */
+#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */
+#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */
+
+/* Measurement Request Modes */
+#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */
+#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */
+#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */
+#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */
+#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */
+/* Measurement Report Modes */
+#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */
+#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */
+#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */
+/* Basic Measurement Map bits */
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */
+#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */
+#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */
+/* length of Measure Request IE data not including variable len */
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subject;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT lci;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subject;
+ uint8 type; /* type of civic location */
+ uint8 siu; /* service interval units */
+ uint16 si; /* service interval */
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT civic;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subject;
+ uint8 siu; /* service interval units */
+ uint16 si; /* service interval */
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT locid;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint16 max_init_delay; /* maximum random initial delay */
+ uint8 min_ap_count;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT ftm_range;
+ } BWL_POST_PACKED_STRUCT req;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req_loc dot11_meas_req_loc_t;
+#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement {
+ uint8 subelement;
+ uint8 length;
+ uint8 lci_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lci_subelement dot11_lci_subelement_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_colocated_bssid_list_se {
+ uint8 sub_id;
+ uint8 length;
+ uint8 max_bssid_ind; /* MaxBSSID Indicator */
+ struct ether_addr bssid[1]; /* variable */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_colocated_bssid_list_se dot11_colocated_bssid_list_se_t;
+#define DOT11_LCI_COLOCATED_BSSID_LIST_FIXED_LEN 3
+#define DOT11_LCI_COLOCATED_BSSID_SUBELEM_ID 7
+
+BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement {
+ uint8 type; /* type of civic location */
+ uint8 subelement;
+ uint8 length;
+ uint8 civic_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_civic_subelement dot11_civic_subelement_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+ } BWL_POST_PACKED_STRUCT basic;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subelement;
+ uint8 length;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT lci;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 type; /* type of civic location */
+ uint8 subelement;
+ uint8 length;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT civic;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 exp_tsf[8];
+ uint8 subelement;
+ uint8 length;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT locid;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 entry_count;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT ftm_range;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_LOCID_FIXED_LEN 13 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4
+
+/* length of Measure Report IE data not including variable len */
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+ uint8 id;
+ uint8 len;
+ uint8 count; /* TBTTs until beacon interval in quiet starts */
+ uint8 period; /* Beacon intervals between periodic quiet periods ? */
+ uint16 duration; /* Length of quiet period, in TU's */
+ uint16 offset; /* TU's offset from TBTT in Count field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+ uint8 channel;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+ uint8 id;
+ uint8 len;
+ uint8 eaddr[ETHER_ADDR_LEN];
+ uint8 interval;
+ chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+/* WME Elements */
+#define WME_OUI "\x00\x50\xf2" /* WME OUI */
+#define WME_OUI_LEN 3
+#define WME_OUI_TYPE 2 /* WME type */
+#define WME_TYPE 2 /* WME type, deprecated */
+#define WME_SUBTYPE_IE 0 /* Information Element */
+#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */
+#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */
+#define WME_VER 1 /* WME version */
+
+/* WME Access Category Indices (ACIs) */
+#define AC_BE 0 /* Best Effort */
+#define AC_BK 1 /* Background */
+#define AC_VI 2 /* Video */
+#define AC_VO 3 /* Voice */
+#define AC_COUNT 4 /* number of ACs */
+
+typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */
+
+#define AC_BITMAP_NONE 0x0 /* No ACs */
+#define AC_BITMAP_ALL 0xf /* All ACs */
+#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+/* Management PKT Lifetime indices */
+/* Removing flag checks 'WLTEST'
+ * while merging MERGE BIS120RC4 to DINGO2
+ */
+#define MGMT_ALL 0xffff
+#define MGMT_AUTH_LT FC_SUBTYPE_AUTH
+#define MGMT_ASSOC_LT FC_SUBTYPE_ASSOC_REQ
+
+/** WME Information Element (IE) */
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7 /* WME IE length */
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+ uint8 ACI;
+ uint8 ECW;
+ uint16 TXOP; /* stored in network order (ls octet first) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+/** WME Parameter Element (PE) */
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */
+
+/* QoS Info field for IE as sent from AP */
+#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */
+#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */
+#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */
+#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */
+
+/* QoS Info field for IE as sent from STA */
+#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */
+#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */
+#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */
+#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */
+#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */
+#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */
+#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */
+#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */
+#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */
+#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */
+#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */
+#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */
+
+/* ACI */
+#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */
+#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */
+#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */
+#define EDCF_ACM_MASK 0x10 /* ACM mask */
+#define EDCF_ACI_MASK 0x60 /* ACI mask */
+#define EDCF_ACI_SHIFT 5 /* ACI shift */
+#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */
+
+/* ECW */
+#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */
+#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */
+#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */
+#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */
+
+/* TXOP */
+#define EDCF_TXOP_MIN 0 /* TXOP minimum value */
+#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+
+/* Default BE ACI value for non-WME connection STA */
+#define NON_EDCF_AC_BE_ACI_STA 0x02
+
+/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */
+#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */
+#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */
+#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */
+#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */
+#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */
+#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */
+#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */
+#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */
+
+/* Default EDCF parameters that AP uses; WMM draft Table 14 */
+#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */
+#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */
+#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */
+#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */
+#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */
+#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */
+#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */
+#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */
+
+/** EDCA Parameter IE */
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */
+
+/** QoS Capability IE */
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+ uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */
+ uint8 length;
+ uint16 station_count; /* total number of STAs associated */
+ uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */
+ uint16 aac; /* available admission capacity */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */
+
+#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */
+
+/* Estimated Service Parameters (ESP) IE - 802.11-2016 9.4.2.174 */
+typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie {
+ uint8 id;
+ uint8 length;
+ uint8 id_ext;
+ /* variable len info */
+ uint8 esp_info_lists[];
+} BWL_POST_PACKED_STRUCT dot11_esp_ie_t;
+
+#define DOT11_ESP_IE_HDR_SIZE (OFFSETOF(dot11_esp_ie_t, esp_info_lists))
+
+/* ESP Information list - 802.11-2016 9.4.2.174 */
+typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie_info_list {
+ /* acess category, data format, ba win size */
+ uint8 ac_df_baws;
+ /* estimated air time fraction */
+ uint8 eat_frac;
+ /* data PPDU duration target (50us units) */
+ uint8 ppdu_dur;
+} BWL_POST_PACKED_STRUCT dot11_esp_ie_info_list_t;
+
+#define DOT11_ESP_IE_INFO_LIST_SIZE (sizeof(dot11_esp_ie_info_list_t))
+
+#define DOT11_ESP_NBR_INFO_LISTS 4u /* max nbr of esp information lists */
+#define DOT11_ESP_INFO_LIST_AC_BK 0u /* access category of esp information list AC_BK */
+#define DOT11_ESP_INFO_LIST_AC_BE 1u /* access category of esp information list AC_BE */
+#define DOT11_ESP_INFO_LIST_AC_VI 2u /* access category of esp information list AC_VI */
+#define DOT11_ESP_INFO_LIST_AC_VO 3u /* access category of esp information list AC_VO */
+
+#define DOT11_ESP_INFO_LIST_DF_MASK 0x18 /* Data Format Mask */
+#define DOT11_ESP_INFO_LIST_BAWS_MASK 0xE0 /* BA window size mask */
+
+/* nom_msdu_size */
+#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */
+#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */
+
+/* surplus_bandwidth */
+/* Represented as 3 bits of integer, binary point, 13 bits fraction */
+#define INTEGER_SHIFT 13 /* integer shift */
+#define FRACTION_MASK 0x1FFF /* fraction mask */
+
+/** Management Notification Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+ uint8 category; /* DOT11_ACTION_NOTIFICATION */
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */
+
+/** Timeout Interval IE */
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+ uint8 ti_type;
+ uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE 1
+#define TI_TYPE_KEY_LIFETIME 2
+
+#ifndef CISCO_AIRONET_OUI
+#define CISCO_AIRONET_OUI "\x00\x40\x96" /* Cisco AIRONET OUI */
+#endif
+/* QoS FastLane IE. */
+BWL_PRE_PACKED_STRUCT struct ccx_qfl_ie {
+ uint8 id; /* 221, DOT11_MNG_VS_ID */
+ uint8 length; /* 5 */
+ uint8 oui[3]; /* 00:40:96 */
+ uint8 type; /* 11 */
+ uint8 data;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ccx_qfl_ie ccx_qfl_ie_t;
+#define CCX_QFL_IE_TYPE 11
+#define CCX_QFL_ENABLE_SHIFT 5
+#define CCX_QFL_ENALBE (1 << CCX_QFL_ENABLE_SHIFT)
+
+/* WME Action Codes */
+#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */
+#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */
+#define WME_DELTS_REQUEST 2 /* WME DELTS request */
+
+/* WME Setup Response Status Codes */
+#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */
+#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */
+#define WME_ADMISSION_REFUSED 3 /* WME admission refused */
+
+/* Macro to take a pointer to a beacon or probe response
+ * body and return the char* pointer to the SSID info element
+ */
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+/* Authentication frame payload constants */
+#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */
+#define DOT11_SHARED_KEY 1 /* d11 shared authentication */
+#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */
+#define DOT11_SAE 3 /* d11 simultaneous authentication of equals */
+#define DOT11_FILS_SKEY 4 /* d11 fils shared key authentication w/o pfs */
+#define DOT11_FILS_SKEY_PFS 5 /* d11 fils shared key authentication w/ pfs */
+#define DOT11_FILS_PKEY 6 /* d11 fils public key authentication */
+#define DOT11_MAX_AUTH_ALG DOT11_FILS_PKEY /* maximum value of an auth alg */
+#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */
+
+/* Frame control macros */
+#define FC_PVER_MASK 0x3 /* PVER mask */
+#define FC_PVER_SHIFT 0 /* PVER shift */
+#define FC_TYPE_MASK 0xC /* type mask */
+#define FC_TYPE_SHIFT 2 /* type shift */
+#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */
+#define FC_SUBTYPE_SHIFT 4 /* subtype shift */
+#define FC_TODS 0x100 /* to DS */
+#define FC_TODS_SHIFT 8 /* to DS shift */
+#define FC_FROMDS 0x200 /* from DS */
+#define FC_FROMDS_SHIFT 9 /* from DS shift */
+#define FC_MOREFRAG 0x400 /* more frag. */
+#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */
+#define FC_RETRY 0x800 /* retry */
+#define FC_RETRY_SHIFT 11 /* retry shift */
+#define FC_PM 0x1000 /* PM */
+#define FC_PM_SHIFT 12 /* PM shift */
+#define FC_MOREDATA 0x2000 /* more data */
+#define FC_MOREDATA_SHIFT 13 /* more data shift */
+#define FC_WEP 0x4000 /* WEP */
+#define FC_WEP_SHIFT 14 /* WEP shift */
+#define FC_ORDER 0x8000 /* order */
+#define FC_ORDER_SHIFT 15 /* order shift */
+
+/* sequence control macros */
+#define SEQNUM_SHIFT 4 /* seq. number shift */
+#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */
+#define FRAGNUM_MASK 0xF /* frag. number mask */
+
+/* Frame Control type/subtype defs */
+
+/* FC Types */
+#define FC_TYPE_MNG 0 /* management type */
+#define FC_TYPE_CTL 1 /* control type */
+#define FC_TYPE_DATA 2 /* data type */
+
+/* Management Subtypes */
+#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */
+#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */
+#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */
+#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */
+#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */
+#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */
+#define FC_SUBTYPE_BEACON 8 /* beacon */
+#define FC_SUBTYPE_ATIM 9 /* ATIM */
+#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */
+#define FC_SUBTYPE_AUTH 11 /* authentication */
+#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */
+#define FC_SUBTYPE_ACTION 13 /* action */
+#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */
+
+/* Control Subtypes */
+#define FC_SUBTYPE_TRIGGER 2 /* Trigger frame */
+#define FC_SUBTYPE_NDPA 5 /* NDPA */
+#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */
+#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */
+#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */
+#define FC_SUBTYPE_PS_POLL 10 /* PS poll */
+#define FC_SUBTYPE_RTS 11 /* RTS */
+#define FC_SUBTYPE_CTS 12 /* CTS */
+#define FC_SUBTYPE_ACK 13 /* ACK */
+#define FC_SUBTYPE_CF_END 14 /* CF-END */
+#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */
+
+/* Data Subtypes */
+#define FC_SUBTYPE_DATA 0 /* Data */
+#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */
+#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_NULL 4 /* Null */
+#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */
+#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */
+#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */
+#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */
+#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */
+
+/* Data Subtype Groups */
+#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0)
+#define FC_SUBTYPE_ANY_PSPOLL(s) (((s) & 10) != 0)
+
+/* Type/Subtype Combos */
+#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */
+
+#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */
+
+#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */
+
+#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */
+#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */
+#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */
+#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */
+#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */
+#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */
+#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */
+#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */
+#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */
+#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */
+
+#define FC_CTL_TRIGGER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_TRIGGER) /* Trigger frame */
+#define FC_CTL_NDPA FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_NDPA) /* NDPA frame */
+#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */
+#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */
+#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */
+#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */
+#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */
+#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */
+#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */
+
+#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */
+#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */
+#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */
+
+/* QoS Control Field */
+
+/* 802.1D Priority */
+#define QOS_PRIO_SHIFT 0 /* QoS priority shift */
+#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */
+#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */
+
+/* Traffic Identifier */
+#define QOS_TID_SHIFT 0 /* QoS TID shift */
+#define QOS_TID_MASK 0x000f /* QoS TID mask */
+#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */
+
+/* End of Service Period (U-APSD) */
+#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */
+#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */
+#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */
+
+/* Ack Policy */
+#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */
+#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */
+#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */
+#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */
+#define QOS_ACK_SHIFT 5 /* QoS ACK shift */
+#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */
+#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */
+
+/* A-MSDU flag */
+#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */
+#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */
+
+/* QOS Mesh Flags */
+#define QOS_MESH_CTL_FLAG 0x0100u // Mesh Control Present
+#define QOS_MESH_PSL_FLAG 0x0200u // Mesh Power Save Level
+#define QOS_MESH_RSPI_FLAG 0x0400u // Mesh RSPI
+
+/* QOS Mesh Accessor macros */
+#define QOS_MESH_CTL(qos) (((qos) & QOS_MESH_CTL_FLAG) != 0)
+#define QOS_MESH_PSL(qos) (((qos) & QOS_MESH_PSL_FLAG) != 0)
+#define QOS_MESH_RSPI(qos) (((qos) & QOS_MESH_RSPI_FLAG) != 0)
+
+/* Management Frames */
+
+/* Management Frame Constants */
+
+/* Fixed fields */
+#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */
+#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */
+#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */
+#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */
+#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */
+#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */
+#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */
+#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */
+#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */
+#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */
+
+/* DUR/ID field in assoc resp is 0xc000 | AID */
+#define DOT11_AID_MASK 0x3fff /* d11 AID mask */
+#define DOT11_AID_OCTET_VAL_SHIFT 3u /* AID octet value shift */
+#define DOT11_AID_BIT_POS_IN_OCTET 0x07 /* AID bit position in octet */
+
+/* Reason Codes */
+#define DOT11_RC_RESERVED 0 /* d11 RC reserved */
+#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */
+#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */
+#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station
+ * is leaving (or has left) IBSS or ESS
+ */
+#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */
+#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle
+ * all currently associated stations
+ */
+#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from
+ * nonauthenticated station
+ */
+#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from
+ * nonassociated station
+ */
+#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is
+ * leaving (or has left) BSS
+ */
+#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not
+ * authenticated with responding station
+ */
+#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */
+#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */
+
+/* 12 is unused by STA but could be used by AP/GO */
+#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */
+
+/* 13-23 are WPA/802.11i reason codes defined in wpa.h */
+
+/* 32-39 are QSTA specific reasons added in 11e */
+#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */
+#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */
+#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */
+#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */
+#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */
+#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */
+#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */
+#define DOT11_RC_TIMEOUT 39 /* timeout */
+
+#define DOT11_RC_MESH_PEERING_CANCELLED 52
+#define DOT11_RC_MESH_MAX_PEERS 53
+#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54
+#define DOT11_RC_MESH_CLOSE_RECVD 55
+#define DOT11_RC_MESH_MAX_RETRIES 56
+#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57
+#define DOT11_RC_MESH_INVALID_GTK 58
+#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59
+
+#define DOT11_RC_MESH_INVALID_SEC_CAP 60
+#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61
+#define DOT11_RC_MESH_PATHERR_NOFWINFO 62
+#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63
+#define DOT11_RC_MESH_MBSSMAC_EXISTS 64
+#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65
+#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66
+
+#define DOT11_RC_POOR_RSSI_CONDITIONS 71 /* Poor RSSI */
+#define DOT11_RC_MAX 71 /* Reason codes > 71 are reserved */
+
+#define DOT11_RC_TDLS_PEER_UNREACH 25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26
+
+/* Status Codes */
+#define DOT11_SC_SUCCESS 0 /* Successful */
+#define DOT11_SC_FAILURE 1 /* Unspecified failure */
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */
+ /* schedule provided */
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */
+#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */
+#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */
+#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */
+#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested
+ * capabilities in the Capability
+ * Information field
+ */
+#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability
+ * to confirm that association exists
+ */
+#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason
+ * outside the scope of this standard
+ */
+#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support
+ * the specified authentication
+ * algorithm
+ */
+#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame
+ * with authentication transaction
+ * sequence number out of expected
+ * sequence
+ */
+#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of
+ * challenge failure
+ */
+#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout
+ * waiting for next frame in sequence
+ */
+#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is
+ * unable to handle additional
+ * associated stations
+ */
+#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting
+ * station not supporting all of the
+ * data rates in the BSSBasicRateSet
+ * parameter
+ */
+#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting
+ * station not supporting the Short
+ * Preamble option
+ */
+#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting
+ * station not supporting the PBCC
+ * Modulation option
+ */
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting
+ * station not supporting the Channel
+ * Agility option
+ */
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum
+ * Management capability is required.
+ */
+#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info
+ * in the Power Cap element is
+ * unacceptable.
+ */
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info
+ * in the Supported Channel element is
+ * unacceptable
+ */
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting
+ * station not supporting the Short Slot
+ * Time option
+ */
+#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26 /* Association denied because requesting station
+ * does not support the DSSS-OFDM option
+ */
+#define DOT11_SC_ASSOC_HT_REQUIRED 27 /* Association denied because the requesting
+ * station does not support HT features
+ */
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP
+ * being unable to reach the R0 Key Holder
+ */
+#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later
+ */
+#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management
+ * frame policy violation
+ */
+
+#define DOT11_SC_POOR_RSSI_CONDN 34 /* Association denied due to poor RSSI */
+#define DOT11_SC_DECLINED 37 /* request declined */
+#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */
+#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */
+#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */
+#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */
+#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */
+#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */
+#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */
+
+#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59 /* ad proto not supported */
+#define DOT11_SC_NO_OUTSTAND_REQ 60 /* no outstanding req */
+#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61 /* no response from server */
+#define DOT11_SC_TIMEOUT 62 /* timeout */
+#define DOT11_SC_QUERY_RSP_TOO_LARGE 63 /* query rsp too large */
+#define DOT11_SC_SERVER_UNREACHABLE 65 /* server unreachable */
+
+#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */
+#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */
+#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */
+
+#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */
+#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */
+#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */
+
+#define DOT11_SC_TCLAS_RESOURCES_EXHAUSTED 81u /* TCLAS resources exhausted */
+
+#define DOT11_SC_TCLAS_PROCESSING_TERMINATED 97 /* End traffic classification */
+
+#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting
+ * station does not support VHT features.
+ */
+#define DOT11_SC_UNKNOWN_PASSWORD_IDENTIFIER 123u /* mismatch of password id */
+
+#define DOT11_SC_SAE_HASH_TO_ELEMENT 126u /* SAE Hash-to-element PWE required */
+#define DOT11_SC_SAE_PK 127u /* SAE PK required */
+
+/* Requested TCLAS processing has been terminated by the AP due to insufficient QoS capacity. */
+#define DOT11_SC_TCLAS_PROCESSING_TERMINATED_INSUFFICIENT_QOS 128u
+
+/* Requested TCLAS processing has been terminated by the AP due to conflict with
+ * higher layer QoS policies.
+ */
+#define DOT11_SC_TCLAS_PROCESSING_TERMINATED_POLICY_CONFLICT 129u
+
+/* Info Elts, length of INFORMATION portion of Info Elts */
+#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */
+#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */
+
+/* TIM Info element has 3 bytes fixed info in INFORMATION field,
+ * followed by 1 to 251 bytes of Partial Virtual Bitmap
+ */
+#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */
+#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */
+#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */
+#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */
+#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */
+
+#define DOT11_MNG_TIM_BITMAP_CTL_BCMC_MASK 0x01 /* Mask for bcmc bit in tim bitmap ctrl */
+#define DOT11_MNG_TIM_BITMAP_CTL_PVBOFF_MASK 0xFE /* Mask for partial virtual bitmap */
+
+/* TLV defines */
+#define TLV_TAG_OFF 0 /* tag offset */
+#define TLV_LEN_OFF 1 /* length offset */
+#define TLV_HDR_LEN 2 /* header length */
+#define TLV_BODY_OFF 2 /* body offset */
+#define TLV_BODY_LEN_MAX 255 /* max body length */
+#define TLV_EXT_HDR_LEN 3u /* extended IE header length */
+#define TLV_EXT_BODY_OFF 3u /* extended IE body offset */
+
+/* Management Frame Information Element IDs */
+enum dot11_tag_ids {
+ DOT11_MNG_SSID_ID = 0, /* d11 management SSID id */
+ DOT11_MNG_RATES_ID = 1, /* d11 management rates id */
+ DOT11_MNG_FH_PARMS_ID = 2, /* d11 management FH parameter id */
+ DOT11_MNG_DS_PARMS_ID = 3, /* d11 management DS parameter id */
+ DOT11_MNG_CF_PARMS_ID = 4, /* d11 management CF parameter id */
+ DOT11_MNG_TIM_ID = 5, /* d11 management TIM id */
+ DOT11_MNG_IBSS_PARMS_ID = 6, /* d11 management IBSS parameter id */
+ DOT11_MNG_COUNTRY_ID = 7, /* d11 management country id */
+ DOT11_MNG_HOPPING_PARMS_ID = 8, /* d11 management hopping parameter id */
+ DOT11_MNG_HOPPING_TABLE_ID = 9, /* d11 management hopping table id */
+ DOT11_MNG_FTM_SYNC_INFO_ID = 9, /* 11mc D4.3 */
+ DOT11_MNG_REQUEST_ID = 10, /* d11 management request id */
+ DOT11_MNG_QBSS_LOAD_ID = 11, /* d11 management QBSS Load id */
+ DOT11_MNG_EDCA_PARAM_ID = 12, /* 11E EDCA Parameter id */
+ DOT11_MNG_TSPEC_ID = 13, /* d11 management TSPEC id */
+ DOT11_MNG_TCLAS_ID = 14, /* d11 management TCLAS id */
+ DOT11_MNG_CHALLENGE_ID = 16, /* d11 management chanllenge id */
+ DOT11_MNG_PWR_CONSTRAINT_ID = 32, /* 11H PowerConstraint */
+ DOT11_MNG_PWR_CAP_ID = 33, /* 11H PowerCapability */
+ DOT11_MNG_TPC_REQUEST_ID = 34, /* 11H TPC Request */
+ DOT11_MNG_TPC_REPORT_ID = 35, /* 11H TPC Report */
+ DOT11_MNG_SUPP_CHANNELS_ID = 36, /* 11H Supported Channels */
+ DOT11_MNG_CHANNEL_SWITCH_ID = 37, /* 11H ChannelSwitch Announcement */
+ DOT11_MNG_MEASURE_REQUEST_ID = 38, /* 11H MeasurementRequest */
+ DOT11_MNG_MEASURE_REPORT_ID = 39, /* 11H MeasurementReport */
+ DOT11_MNG_QUIET_ID = 40, /* 11H Quiet */
+ DOT11_MNG_IBSS_DFS_ID = 41, /* 11H IBSS_DFS */
+ DOT11_MNG_ERP_ID = 42, /* d11 management ERP id */
+ DOT11_MNG_TS_DELAY_ID = 43, /* d11 management TS Delay id */
+ DOT11_MNG_TCLAS_PROC_ID = 44, /* d11 management TCLAS processing id */
+ DOT11_MNG_HT_CAP = 45, /* d11 mgmt HT cap id */
+ DOT11_MNG_QOS_CAP_ID = 46, /* 11E QoS Capability id */
+ DOT11_MNG_NONERP_ID = 47, /* d11 management NON-ERP id */
+ DOT11_MNG_RSN_ID = 48, /* d11 management RSN id */
+ DOT11_MNG_EXT_RATES_ID = 50, /* d11 management ext. rates id */
+ DOT11_MNG_AP_CHREP_ID = 51, /* 11k AP Channel report id */
+ DOT11_MNG_NEIGHBOR_REP_ID = 52, /* 11k & 11v Neighbor report id */
+ DOT11_MNG_RCPI_ID = 53, /* 11k RCPI */
+ DOT11_MNG_MDIE_ID = 54, /* 11r Mobility domain id */
+ DOT11_MNG_FTIE_ID = 55, /* 11r Fast Bss Transition id */
+ DOT11_MNG_FT_TI_ID = 56, /* 11r Timeout Interval id */
+ DOT11_MNG_RDE_ID = 57, /* 11r RIC Data Element id */
+ DOT11_MNG_REGCLASS_ID = 59, /* d11 management regulatory class id */
+ DOT11_MNG_EXT_CSA_ID = 60, /* d11 Extended CSA */
+ DOT11_MNG_HT_ADD = 61, /* d11 mgmt additional HT info */
+ DOT11_MNG_EXT_CHANNEL_OFFSET = 62, /* d11 mgmt ext channel offset */
+ DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID = 63, /* 11k bss average access delay */
+ DOT11_MNG_ANTENNA_ID = 64, /* 11k antenna id */
+ DOT11_MNG_RSNI_ID = 65, /* 11k RSNI id */
+ DOT11_MNG_MEASUREMENT_PILOT_TX_ID = 66, /* 11k measurement pilot tx info id */
+ DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID = 67, /* 11k bss aval admission cap id */
+ DOT11_MNG_BSS_AC_ACCESS_DELAY_ID = 68, /* 11k bss AC access delay id */
+ DOT11_MNG_WAPI_ID = 68, /* d11 management WAPI id */
+ DOT11_MNG_TIME_ADVERTISE_ID = 69, /* 11p time advertisement */
+ DOT11_MNG_RRM_CAP_ID = 70, /* 11k radio measurement capability */
+ DOT11_MNG_MULTIPLE_BSSID_ID = 71, /* 11k multiple BSSID id */
+ DOT11_MNG_HT_BSS_COEXINFO_ID = 72, /* d11 mgmt OBSS Coexistence INFO */
+ DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID = 73, /* d11 mgmt OBSS Intolerant Channel list */
+ DOT11_MNG_HT_OBSS_ID = 74, /* d11 mgmt OBSS HT info */
+ DOT11_MNG_MMIE_ID = 76, /* d11 mgmt MIC IE */
+ DOT11_MNG_NONTRANS_BSSID_CAP_ID = 83, /* 11k nontransmitted BSSID capability */
+ DOT11_MNG_MULTIPLE_BSSIDINDEX_ID = 85, /* 11k multiple BSSID index */
+ DOT11_MNG_FMS_DESCR_ID = 86, /* 11v FMS descriptor */
+ DOT11_MNG_FMS_REQ_ID = 87, /* 11v FMS request id */
+ DOT11_MNG_FMS_RESP_ID = 88, /* 11v FMS response id */
+ DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID = 90, /* 11v bss max idle id */
+ DOT11_MNG_TFS_REQUEST_ID = 91, /* 11v tfs request id */
+ DOT11_MNG_TFS_RESPONSE_ID = 92, /* 11v tfs response id */
+ DOT11_MNG_WNM_SLEEP_MODE_ID = 93, /* 11v wnm-sleep mode id */
+ DOT11_MNG_TIMBC_REQ_ID = 94, /* 11v TIM broadcast request id */
+ DOT11_MNG_TIMBC_RESP_ID = 95, /* 11v TIM broadcast response id */
+ DOT11_MNG_CHANNEL_USAGE = 97, /* 11v channel usage */
+ DOT11_MNG_TIME_ZONE_ID = 98, /* 11v time zone */
+ DOT11_MNG_DMS_REQUEST_ID = 99, /* 11v dms request id */
+ DOT11_MNG_DMS_RESPONSE_ID = 100, /* 11v dms response id */
+ DOT11_MNG_LINK_IDENTIFIER_ID = 101, /* 11z TDLS Link Identifier IE */
+ DOT11_MNG_WAKEUP_SCHEDULE_ID = 102, /* 11z TDLS Wakeup Schedule IE */
+ DOT11_MNG_CHANNEL_SWITCH_TIMING_ID = 104, /* 11z TDLS Channel Switch Timing IE */
+ DOT11_MNG_PTI_CONTROL_ID = 105, /* 11z TDLS PTI Control IE */
+ DOT11_MNG_PU_BUFFER_STATUS_ID = 106, /* 11z TDLS PU Buffer Status IE */
+ DOT11_MNG_INTERWORKING_ID = 107, /* 11u interworking */
+ DOT11_MNG_ADVERTISEMENT_ID = 108, /* 11u advertisement protocol */
+ DOT11_MNG_EXP_BW_REQ_ID = 109, /* 11u expedited bandwith request */
+ DOT11_MNG_QOS_MAP_ID = 110, /* 11u QoS map set */
+ DOT11_MNG_ROAM_CONSORT_ID = 111, /* 11u roaming consortium */
+ DOT11_MNG_EMERGCY_ALERT_ID = 112, /* 11u emergency alert identifier */
+ DOT11_MNG_MESH_CONFIG = 113, /* Mesh Configuration */
+ DOT11_MNG_MESH_ID = 114, /* Mesh ID */
+ DOT11_MNG_MESH_PEER_MGMT_ID = 117, /* Mesh PEER MGMT IE */
+ DOT11_MNG_EXT_CAP_ID = 127, /* d11 mgmt ext capability */
+ DOT11_MNG_EXT_PREQ_ID = 130, /* Mesh PREQ IE */
+ DOT11_MNG_EXT_PREP_ID = 131, /* Mesh PREP IE */
+ DOT11_MNG_EXT_PERR_ID = 132, /* Mesh PERR IE */
+ DOT11_MNG_VHT_CAP_ID = 191, /* d11 mgmt VHT cap id */
+ DOT11_MNG_VHT_OPERATION_ID = 192, /* d11 mgmt VHT op id */
+ DOT11_MNG_EXT_BSSLOAD_ID = 193, /* d11 mgmt VHT extended bss load id */
+ DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID = 194, /* Wide BW Channel Switch IE */
+ DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID= 195, /* VHT transmit Power Envelope IE */
+ DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID = 196, /* Channel Switch Wrapper IE */
+ DOT11_MNG_AID_ID = 197, /* Association ID IE */
+ DOT11_MNG_OPER_MODE_NOTIF_ID = 199, /* d11 mgmt VHT oper mode notif */
+ DOT11_MNG_RNR_ID = 201,
+ /* FIXME: Use these temp. IDs until ANA assigns IDs */
+ DOT11_MNG_FTM_PARAMS_ID = 206, /* mcd3.2/2014 this is not final yet */
+ DOT11_MNG_TWT_ID = 216, /* 11ah D5.0 */
+ DOT11_MNG_WPA_ID = 221, /* d11 management WPA id */
+ DOT11_MNG_PROPR_ID = 221, /* d11 management proprietary id */
+ /* should start using this one instead of above two */
+ DOT11_MNG_VS_ID = 221, /* d11 management Vendor Specific IE */
+ DOT11_MNG_MESH_CSP_ID = 222, /* d11 Mesh Channel Switch Parameter */
+ DOT11_MNG_FILS_IND_ID = 240, /* 11ai FILS Indication element */
+ DOT11_MNG_FRAGMENT_ID = 242, /* IE's fragment ID */
+ DOT11_MNG_RSNXE_ID = 244, /* RSN Extension Element (RSNXE) ID */
+
+ /* The follwing ID extensions should be defined >= 255
+ * i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension).
+ */
+ DOT11_MNG_ID_EXT_ID = 255 /* Element ID Extension 11mc D4.3 */
+};
+
+/* FILS and OCE ext ids */
+#define FILS_EXTID_MNG_REQ_PARAMS 2u /* FILS Request Parameters element */
+#define DOT11_MNG_FILS_REQ_PARAMS (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_REQ_PARAMS)
+#define FILS_EXTID_MNG_KEY_CONFIRMATION_ID 3u /* FILS Key Confirmation element */
+#define DOT11_MNG_FILS_KEY_CONFIRMATION (DOT11_MNG_ID_EXT_ID + \
+ FILS_EXTID_MNG_KEY_CONFIRMATION_ID)
+#define FILS_EXTID_MNG_SESSION_ID 4u /* FILS Session element */
+#define DOT11_MNG_FILS_SESSION (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_SESSION_ID)
+#define FILS_EXTID_MNG_HLP_CONTAINER_ID 5u /* FILS HLP Container element */
+#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID + \
+ FILS_EXTID_MNG_HLP_CONTAINER_ID)
+#define FILS_EXTID_MNG_KEY_DELIVERY_ID 7u /* FILS Key Delivery element */
+#define DOT11_MNG_FILS_KEY_DELIVERY (DOT11_MNG_ID_EXT_ID + \
+ FILS_EXTID_MNG_KEY_DELIVERY_ID)
+#define FILS_EXTID_MNG_WRAPPED_DATA_ID 8u /* FILS Wrapped Data element */
+#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID + \
+ FILS_EXTID_MNG_WRAPPED_DATA_ID)
+
+#define OCE_EXTID_MNG_ESP_ID 11u /* Estimated Service Parameters element */
+#define DOT11_MNG_ESP (DOT11_MNG_ID_EXT_ID + OCE_EXTID_MNG_ESP_ID)
+#define FILS_EXTID_MNG_PUBLIC_KEY_ID 12u /* FILS Public Key element */
+#define DOT11_MNG_FILS_PUBLIC_KEY (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_PUBLIC_KEY_ID)
+#define FILS_EXTID_MNG_NONCE_ID 13u /* FILS Nonce element */
+#define DOT11_MNG_FILS_NONCE (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_NONCE_ID)
+
+#define EXT_MNG_OWE_DH_PARAM_ID 32u /* OWE DH Param ID - RFC 8110 */
+#define DOT11_MNG_OWE_DH_PARAM_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_OWE_DH_PARAM_ID)
+#define EXT_MSG_PASSWORD_IDENTIFIER_ID 33u /* Password ID EID */
+#define DOT11_MSG_PASSWORD_IDENTIFIER_ID (DOT11_MNG_ID_EXT_ID + \
+ EXT_MSG_PASSWORD_IDENTIFIER_ID)
+#define EXT_MNG_HE_CAP_ID 35u /* HE Capabilities, 11ax */
+#define DOT11_MNG_HE_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_CAP_ID)
+#define EXT_MNG_HE_OP_ID 36u /* HE Operation IE, 11ax */
+#define DOT11_MNG_HE_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_OP_ID)
+#define EXT_MNG_UORA_ID 37u /* UORA Parameter Set */
+#define DOT11_MNG_UORA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_UORA_ID)
+#define EXT_MNG_MU_EDCA_ID 38u /* MU EDCA Parameter Set */
+#define DOT11_MNG_MU_EDCA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MU_EDCA_ID)
+#define EXT_MNG_SRPS_ID 39u /* Spatial Reuse Parameter Set */
+#define DOT11_MNG_SRPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SRPS_ID)
+#define EXT_MNG_BSSCOLOR_CHANGE_ID 42u /* BSS Color Change Announcement */
+#define DOT11_MNG_BSSCOLOR_CHANGE_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_BSSCOLOR_CHANGE_ID)
+#define OCV_EXTID_MNG_OCI_ID 54u /* OCI element */
+#define DOT11_MNG_OCI_ID (DOT11_MNG_ID_EXT_ID + OCV_EXT_OCI_ID)
+#define EXT_MNG_SHORT_SSID_ID 58u /* SHORT SSID ELEMENT */
+#define DOT11_MNG_SHORT_SSID_LIST_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SHORT_SSID_ID)
+#define EXT_MNG_HE_6G_CAP_ID 59u /* HE Extended Capabilities, 11ax */
+#define DOT11_MNG_HE_6G_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_6G_CAP_ID)
+
+#define MSCS_EXTID_MNG_DESCR_ID 88u /* Ext ID for the MSCS descriptor */
+#define DOT11_MNG_MSCS_DESCR_ID (DOT11_MNG_ID_EXT_ID + MSCS_EXTID_MNG_DESCR_ID)
+
+#define TCLAS_EXTID_MNG_MASK_ID 89u /* Ext ID for the TCLAS Mask element */
+#define DOT11_MNG_TCLASS_MASK_ID (DOT11_MNG_ID_EXT_ID + TCLAS_EXTID_MNG_MASK_ID)
+
+#define SAE_EXT_REJECTED_GROUPS_ID 92u /* SAE Rejected Groups element */
+#define DOT11_MNG_REJECTED_GROUPS_ID (DOT11_MNG_ID_EXT_ID + SAE_EXT_REJECTED_GROUPS_ID)
+#define SAE_EXT_ANTICLOG_TOKEN_CONTAINER_ID 93u /* SAE Anti-clogging token container */
+#define DOT11_MNG_ANTICLOG_TOKEN_CONTAINER_ID (DOT11_MNG_ID_EXT_ID + \
+ SAE_EXT_ANTICLOG_TOKEN_CONTAINER_ID)
+#define EXT_MNG_EHT_CAP_ID 100u /* EHT Capabilities IE FIXME */
+#define DOT11_MNG_EHT_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_EHT_CAP_ID)
+#define EXT_MNG_EHT_OP_ID 101u /* EHT Operation IE # FIXME */
+#define DOT11_MNG_EHT_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_EHT_OP_ID)
+
+/* unassigned IDs for ranging parameter elements. To be updated after final
+ * assignement.
+ */
+#define DOT11_MNG_FTM_RANGING_EXT_ID 100u /* 11AZ sounding mode parameter element */
+#define DOT11_MNG_FTM_ISTA_AVAIL_EXT_ID 101u /* 11 AZ TN ISTA avaialability window */
+#define DOT11_MNG_FTM_RSTA_AVAIL_EXT_ID 102u /* 11 AZ TN RSTA avaialability window */
+#define DOT11_MNG_FTM_SECURE_LTF_EXT_ID 103u /* 11 AZ Secure LTF parameter element */
+
+#define DOT11_FTM_NTB_SUB_ELT_ID 0u /* non-TB ranging parameter sub-element ID */
+#define DOT11_FTM_TB_SUB_ELT_ID 1u /* TB ranging parameter sub-element ID */
+
+/* deprecated definitions, do not use, to be deleted later */
+#define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID
+#define DOT11_ESP_EXT_ID OCE_EXTID_MNG_ESP_ID
+#define FILS_REQ_PARAMS_EXT_ID FILS_EXTID_MNG_REQ_PARAMS
+#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */
+#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID)
+/* End of deprecated definitions */
+
+#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\
+ ((_ie)->id == DOT11_MNG_ID_EXT_ID) && \
+ ((_ie)->len > 0) && \
+ ((_id) == ((uint8 *)(_ie) + TLV_HDR_LEN)[0]))
+
+#define DOT11_MNG_IE_ID_EXT_INIT(_ie, _id, _len) do {\
+ (_ie)->id = DOT11_MNG_ID_EXT_ID; \
+ (_ie)->len = _len; \
+ (_ie)->id_ext = _id; \
+ } while (0)
+
+/* Rate Defines */
+
+/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
+ * Encoding is the rate in 500kbps units, rouding up for fractional values.
+ * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values.
+ * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates.
+ * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27},
+ * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices.
+ */
+
+#define DOT11_RATE_1M 2 /* 1 Mbps in 500kbps units */
+#define DOT11_RATE_2M 4 /* 2 Mbps in 500kbps units */
+#define DOT11_RATE_5M5 11 /* 5.5 Mbps in 500kbps units */
+#define DOT11_RATE_11M 22 /* 11 Mbps in 500kbps units */
+#define DOT11_RATE_6M 12 /* 6 Mbps in 500kbps units */
+#define DOT11_RATE_9M 18 /* 9 Mbps in 500kbps units */
+#define DOT11_RATE_12M 24 /* 12 Mbps in 500kbps units */
+#define DOT11_RATE_18M 36 /* 18 Mbps in 500kbps units */
+#define DOT11_RATE_24M 48 /* 24 Mbps in 500kbps units */
+#define DOT11_RATE_36M 72 /* 36 Mbps in 500kbps units */
+#define DOT11_RATE_48M 96 /* 48 Mbps in 500kbps units */
+#define DOT11_RATE_54M 108 /* 54 Mbps in 500kbps units */
+#define DOT11_RATE_MAX 108 /* highest rate (54 Mbps) in 500kbps units */
+
+/* Supported Rates and Extended Supported Rates IEs
+ * The supported rates octets are defined a the MSB indicatin a Basic Rate
+ * and bits 0-6 as the rate value
+ */
+#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */
+#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */
+
+/* BSS Membership Selector parameters
+ * 802.11-2016 (and 802.11ax-D1.1), Sec 9.4.2.3
+ * These selector values are advertised in Supported Rates and Extended Supported Rates IEs
+ * in the supported rates list with the Basic rate bit set.
+ * Constants below include the basic bit.
+ */
+#define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */
+#define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */
+#define DOT11_BSS_MEMBERSHIP_HE 0xFD /* Basic 0x80 + 125, HE Required to join */
+#define DOT11_BSS_SAE_HASH_TO_ELEMENT 123u /* SAE Hash-to-element Required to join */
+
+/* ERP info element bit values */
+#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */
+#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present
+ *in the BSS
+ */
+#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for
+ *ERP-OFDM frames
+ */
+#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed,
+ * 1 == not allowed
+ */
+/* TS Delay element offset & size */
+#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */
+#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */
+
+/* Capability Information Field */
+#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */
+#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */
+#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */
+#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */
+#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */
+#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */
+#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */
+#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */
+#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */
+#define DOT11_CAP_QOS 0x0200 /* d11 cap. qos */
+#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */
+#define DOT11_CAP_APSD 0x0800 /* d11 cap. apsd */
+#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */
+#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */
+#define DOT11_CAP_DELAY_BA 0x4000 /* d11 cap. delayed block ack */
+#define DOT11_CAP_IMMEDIATE_BA 0x8000 /* d11 cap. immediate block ack */
+
+/* Extended capabilities IE bitfields */
+/* 20/40 BSS Coexistence Management support bit position */
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0u
+/* Extended Channel Switching support bit position */
+#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING 2u
+/* scheduled PSMP support bit position */
+#define DOT11_EXT_CAP_SPSMP 6u
+/* Flexible Multicast Service */
+#define DOT11_EXT_CAP_FMS 11u
+/* proxy ARP service support bit position */
+#define DOT11_EXT_CAP_PROXY_ARP 12u
+/* Civic Location */
+#define DOT11_EXT_CAP_CIVIC_LOC 14u
+/* Geospatial Location */
+#define DOT11_EXT_CAP_LCI 15u
+/* Traffic Filter Service */
+#define DOT11_EXT_CAP_TFS 16u
+/* WNM-Sleep Mode */
+#define DOT11_EXT_CAP_WNM_SLEEP 17u
+/* TIM Broadcast service */
+#define DOT11_EXT_CAP_TIMBC 18u
+/* BSS Transition Management support bit position */
+#define DOT11_EXT_CAP_BSSTRANS_MGMT 19u
+/* Multiple BSSID support position */
+#define DOT11_EXT_CAP_MULTIBSSID 22u
+/* Direct Multicast Service */
+#define DOT11_EXT_CAP_DMS 26u
+/* Interworking support bit position */
+#define DOT11_EXT_CAP_IW 31u
+/* QoS map support bit position */
+#define DOT11_EXT_CAP_QOS_MAP 32u
+/* service Interval granularity bit position and mask */
+#define DOT11_EXT_CAP_SI 41u
+#define DOT11_EXT_CAP_SI_MASK 0x0E
+/* Location Identifier service */
+#define DOT11_EXT_CAP_IDENT_LOC 44u
+/* WNM notification */
+#define DOT11_EXT_CAP_WNM_NOTIF 46u
+/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
+#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62u
+/* Fine timing measurement - D3.0 */
+#define DOT11_EXT_CAP_FTM_RESPONDER 70u
+#define DOT11_EXT_CAP_FTM_INITIATOR 71u /* tentative 11mcd3.0 */
+#define DOT11_EXT_CAP_FILS 72u /* FILS Capability */
+/* TWT support */
+#define DOT11_EXT_CAP_TWT_REQUESTER 77u
+#define DOT11_EXT_CAP_TWT_RESPONDER 78u
+#define DOT11_EXT_CAP_OBSS_NB_RU_OFDMA 79u
+/* FIXME: Use these temp. IDs until ANA assigns IDs */
+#define DOT11_EXT_CAP_EMBSS_ADVERTISE 80u
+/* SAE password ID */
+#define DOT11_EXT_CAP_SAE_PWD_ID_INUSE 81u
+#define DOT11_EXT_CAP_SAE_PWD_ID_USED_EXCLUSIVE 82u
+/* Beacon Protection Enabled 802.11 D3.0 - 9.4.2.26
+ * This field is reserved for a STA.
+ */
+#define DOT11_EXT_CAP_BCN_PROT 84u
+
+/* Mirrored SCS (MSCS) support */
+#define DOT11_EXT_CAP_MSCS 85u
+
+/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset.
+ * Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path.
+ * It will cause ROM invalidation otherwise.
+ */
+#define DOT11_EXT_CAP_MAX_IDX 85u
+
+/* Remove this hack (DOT11_EXT_CAP_MAX_BIT_IDX) when no one
+ * references DOT11_EXTCAP_LEN_MAX
+ */
+#define DOT11_EXT_CAP_MAX_BIT_IDX 95u /* !!!update this please!!! */
+
+/* Remove DOT11_EXTCAP_LEN_MAX when no one references it */
+/* extended capability */
+#ifndef DOT11_EXTCAP_LEN_MAX
+#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3)
+#endif
+/* Remove dot11_extcap when no one references it */
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+ uint8 extcap[DOT11_EXTCAP_LEN_MAX];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+/* VHT Operating mode bit fields - (11ac D8.0/802.11-2016 - 9.4.1.53) */
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3
+#define DOT11_OPER_MODE_160_8080_BW_SHIFT 2
+#define DOT11_OPER_MODE_160_8080_BW_MASK 0x04
+#define DOT11_OPER_MODE_NOLDPC_SHIFT 3
+#define DOT11_OPER_MODE_NOLDPC_MASK 0x08
+#define DOT11_OPER_MODE_RXNSS_SHIFT 4
+#define DOT11_OPER_MODE_RXNSS_MASK 0x70
+#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7
+#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80
+
+#define DOT11_OPER_MODE_RESET_CHAN_WIDTH_160MHZ(oper_mode) \
+ (oper_mode & (~(DOT11_OPER_MODE_CHANNEL_WIDTH_MASK | \
+ DOT11_OPER_MODE_160_8080_BW_MASK)))
+#define DOT11_OPER_MODE_SET_CHAN_WIDTH_160MHZ(oper_mode) \
+ (oper_mode = (DOT11_OPER_MODE_RESET_CHAN_WIDTH_160MHZ(oper_mode) | \
+ (DOT11_OPER_MODE_80MHZ | DOT11_OPER_MODE_160_8080_BW_MASK)))
+
+#ifdef DOT11_OPER_MODE_LEFT_SHIFT_FIX
+
+#define DOT11_OPER_MODE(type, nss, chanw) (\
+ ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+ DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+ (((nss) - 1u) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+ ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+ DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\
+ ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+ DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+ (((nss) - 1u) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+ ((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\
+ ((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\
+ DOT11_OPER_MODE_160_8080_BW_MASK) |\
+ ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+ DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#else
+
+/* avoid invalidation from above fix on release branches, can be removed when older release
+ * branches no longer use component/proto from trunk
+ */
+
+#define DOT11_OPER_MODE(type, nss, chanw) (\
+ ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+ DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+ (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+ ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+ DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\
+ ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+ DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+ (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+ ((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\
+ ((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\
+ DOT11_OPER_MODE_160_8080_BW_MASK) |\
+ ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+ DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#endif /* DOT11_OPER_MODE_LEFT_SHIFT_FIX */
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \
+ (((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\
+ >> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT)
+#define DOT11_OPER_MODE_160_8080(mode) \
+ (((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)\
+ >> DOT11_OPER_MODE_160_8080_BW_SHIFT)
+#define DOT11_OPER_MODE_NOLDPC(mode) \
+ (((mode) & DOT11_OPER_MODE_NOLDPC_MASK)\
+ >> DOT11_OPER_MODE_NOLDPC_SHIFT)
+#define DOT11_OPER_MODE_RXNSS(mode) \
+ ((((mode) & DOT11_OPER_MODE_RXNSS_MASK) \
+ >> DOT11_OPER_MODE_RXNSS_SHIFT) + 1)
+#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \
+ (((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\
+ >> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT)
+
+#define DOT11_OPER_MODE_20MHZ 0
+#define DOT11_OPER_MODE_40MHZ 1
+#define DOT11_OPER_MODE_80MHZ 2
+#define DOT11_OPER_MODE_160MHZ 3
+#define DOT11_OPER_MODE_8080MHZ 3
+#define DOT11_OPER_MODE_1608080MHZ 1
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\
+ ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\
+ ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\
+ ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\
+ ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK))
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\
+ ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK))
+
+/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */
+BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie {
+ uint8 mode;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t;
+
+#define DOT11_OPER_MODE_NOTIF_IE_LEN 1
+
+/* Extended Capability Information Field */
+#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 /* 20/40 BSS Coexistence Management support */
+
+/*
+ * Action Frame Constants
+ */
+#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */
+#define DOT11_ACTION_CAT_OFF 0 /* category offset */
+#define DOT11_ACTION_ACT_OFF 1 /* action offset */
+
+/* Action Category field (sec 8.4.1.11) */
+#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */
+#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */
+#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */
+#define DOT11_ACTION_CAT_QOS 1 /* category QoS */
+#define DOT11_ACTION_CAT_DLS 2 /* category DLS */
+#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */
+#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */
+#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */
+#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */
+#define DOT11_ACTION_CAT_HT 7 /* category for HT */
+#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */
+#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */
+#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */
+#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */
+#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */
+#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */
+#define DOT11_ACTION_NOTIFICATION 17
+
+#define DOT11_ACTION_RAV_STREAMING 19 /* category for Robust AV streaming:
+ * SCS, MSCS, etc.
+ */
+
+#define DOT11_ACTION_CAT_VHT 21 /* VHT action */
+#define DOT11_ACTION_CAT_S1G 22 /* S1G action */
+/* FIXME: Use temp. ID until ANA assigns one */
+#define DOT11_ACTION_CAT_HE 27 /* HE action frame */
+#define DOT11_ACTION_CAT_FILS 26 /* FILS action frame */
+#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */
+#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */
+
+/* Spectrum Management Action IDs (sec 7.4.1) */
+#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */
+#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */
+#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */
+#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */
+#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */
+#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */
+
+/* QoS action ids */
+#define DOT11_QOS_ACTION_ADDTS_REQ 0 /* d11 action ADDTS request */
+#define DOT11_QOS_ACTION_ADDTS_RESP 1 /* d11 action ADDTS response */
+#define DOT11_QOS_ACTION_DELTS 2 /* d11 action DELTS */
+#define DOT11_QOS_ACTION_SCHEDULE 3 /* d11 action schedule */
+#define DOT11_QOS_ACTION_QOS_MAP 4 /* d11 action QOS map */
+
+/* HT action ids */
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */
+#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */
+
+/* Public action ids */
+#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */
+#define DOT11_PUB_ACTION_VENDOR_SPEC 9 /* Vendor specific */
+#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */
+#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */
+#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */
+/* unassigned value. Will change after final assignement.
+ * for now, use 34(same as FILS DISC) due to QT/TB/chipsim support from uCode
+ */
+#define DOT11_PUB_ACTION_FTM_LMR 34 /* FTM 11AZ Location Management Report */
+
+#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_START 1u /* FTM request start trigger */
+#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_STOP 0u /* FTM request stop trigger */
+
+/* Block Ack action types */
+#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */
+#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */
+#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */
+
+/* ADDBA action parameters */
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */
+#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */
+#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */
+#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */
+
+#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */
+#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */
+
+/* Fast Transition action types */
+#define DOT11_FT_ACTION_FT_RESERVED 0
+#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */
+#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */
+
+/* DLS action types */
+#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */
+#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */
+#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */
+
+/* Robust Audio Video streaming action types */
+#define DOT11_RAV_SCS_REQ 0 /* SCS Request */
+#define DOT11_RAV_SCS_RES 1 /* SCS Response */
+#define DOT11_RAV_GM_REQ 2 /* Group Membership Request */
+#define DOT11_RAV_GM_RES 3 /* Group Membership Response */
+#define DOT11_RAV_MSCS_REQ 4 /* MSCS Request */
+#define DOT11_RAV_MSCS_RES 5 /* MSCS Response */
+
+/* Wireless Network Management (WNM) action types */
+#define DOT11_WNM_ACTION_EVENT_REQ 0
+#define DOT11_WNM_ACTION_EVENT_REP 1
+#define DOT11_WNM_ACTION_DIAG_REQ 2
+#define DOT11_WNM_ACTION_DIAG_REP 3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ 4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP 5
+#define DOT11_WNM_ACTION_BSSTRANS_QUERY 6
+#define DOT11_WNM_ACTION_BSSTRANS_REQ 7
+#define DOT11_WNM_ACTION_BSSTRANS_RESP 8
+#define DOT11_WNM_ACTION_FMS_REQ 9
+#define DOT11_WNM_ACTION_FMS_RESP 10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12
+#define DOT11_WNM_ACTION_TFS_REQ 13
+#define DOT11_WNM_ACTION_TFS_RESP 14
+#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ 15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17
+#define DOT11_WNM_ACTION_TIMBC_REQ 18
+#define DOT11_WNM_ACTION_TIMBC_RESP 19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22
+#define DOT11_WNM_ACTION_DMS_REQ 23
+#define DOT11_WNM_ACTION_DMS_RESP 24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ 26
+#define DOT11_WNM_ACTION_NOTFCTN_RESP 27
+#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP 28
+
+/* Unprotected Wireless Network Management (WNM) action types */
+#define DOT11_UWNM_ACTION_TIM 0
+#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT 1
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */
+#define DOT11_VHT_ACTION_CBF 0 /* Compressed Beamforming */
+#define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */
+#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */
+
+/* FILS category action types - 802.11ai D11.0 - 9.6.8.1 */
+#define DOT11_FILS_ACTION_DISCOVERY 34 /* FILS Discovery */
+
+/** DLS Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+ uint8 category; /* category of action frame (2) */
+ uint8 action; /* DLS action: req (0) */
+ struct ether_addr da; /* destination address */
+ struct ether_addr sa; /* source address */
+ uint16 cap; /* capability */
+ uint16 timeout; /* timeout value */
+ uint8 data[1]; /* IE:support rate, extend support rate, HT cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18 /* Fixed length */
+
+/** DLS response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+ uint8 category; /* category of action frame (2) */
+ uint8 action; /* DLS action: req (0) */
+ uint16 status; /* status code field */
+ struct ether_addr da; /* destination address */
+ struct ether_addr sa; /* source address */
+ uint8 data[1]; /* optional: capability, rate ... */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16 /* Fixed length */
+
+/* ************* 802.11v related definitions. ************* */
+
+/** BSS Management Transition Query frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: trans_query (6) */
+ uint8 token; /* dialog token */
+ uint8 reason; /* transition query reason */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
+#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */
+
+/* BTM transition reason */
+#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0
+#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1
+#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2
+#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3
+#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4
+#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5
+#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6
+#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7
+#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8
+#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9
+#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10
+#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11
+#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12
+#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13
+#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14
+#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15
+#define DOT11_BSSTRANS_REASON_LOW_RSSI 16
+#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17
+#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18
+#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19
+#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20
+
+/** BSS Management Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: trans_req (7) */
+ uint8 token; /* dialog token */
+ uint8 reqmode; /* transition request mode */
+ uint16 disassoc_tmr; /* disassociation timer */
+ uint8 validity_intrvl; /* validity interval */
+ uint8 data[1]; /* optional: BSS term duration, ... */
+ /* ...session info URL, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
+#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */
+#define DOT11_BSSTRANS_REQ_FIXED_LEN 7u /* Fixed length */
+
+/* BSS Mgmt Transition Request Mode Field - 802.11v */
+#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01
+#define DOT11_BSSTRANS_REQMODE_ABRIDGED 0x02
+#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT 0x04
+#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL 0x08
+#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT 0x10
+
+/** BSS Management transition response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: trans_resp (8) */
+ uint8 token; /* dialog token */
+ uint8 status; /* transition status */
+ uint8 term_delay; /* validity interval */
+ uint8 data[1]; /* optional: BSSID target, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t;
+#define DOT11_BSSTRANS_RESP_LEN 5 /* Fixed length */
+
+/* BSS Mgmt Transition Response Status Field */
+#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT 0
+#define DOT11_BSSTRANS_RESP_STATUS_REJECT 1
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN 2
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP 3
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED 4
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ 5
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED 6
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS 7
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8
+
+/** BSS Max Idle Period element */
+BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
+ uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
+ uint8 len;
+ uint16 max_idle_period; /* in unit of 1000 TUs */
+ uint8 idle_opt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t;
+#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3 /* bss max idle period IE size */
+#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1 /* BSS max idle option */
+
+/** TIM Broadcast request element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie {
+ uint8 id; /* 94, DOT11_MNG_TIMBC_REQ_ID */
+ uint8 len;
+ uint8 interval; /* in unit of beacon interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t;
+#define DOT11_TIMBC_REQ_IE_LEN 1 /* Fixed length */
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* TIM broadcast request element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req dot11_timbc_req_t;
+#define DOT11_TIMBC_REQ_LEN 3 /* Fixed length */
+
+/** TIM Broadcast response element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie {
+ uint8 id; /* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */
+ uint8 len;
+ uint8 status; /* status of add request */
+ uint8 interval; /* in unit of beacon interval */
+ int32 offset; /* in unit of ms */
+ uint16 high_rate; /* in unit of 0.5 Mb/s */
+ uint16 low_rate; /* in unit of 0.5 Mb/s */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t;
+#define DOT11_TIMBC_DENY_RESP_IE_LEN 1 /* Deny. Fixed length */
+#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10 /* Accept. Fixed length */
+
+#define DOT11_TIMBC_STATUS_ACCEPT 0
+#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP 1
+#define DOT11_TIMBC_STATUS_DENY 2
+#define DOT11_TIMBC_STATUS_OVERRIDDEN 3
+#define DOT11_TIMBC_STATUS_RESERVED 4
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* TIM broadcast response element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp dot11_timbc_resp_t;
+#define DOT11_TIMBC_RESP_LEN 3 /* Fixed length */
+
+/** TIM element */
+BWL_PRE_PACKED_STRUCT struct dot11_tim_ie {
+ uint8 id; /* 5, DOT11_MNG_TIM_ID */
+ uint8 len; /* 4 - 255 */
+ uint8 dtim_count; /* DTIM decrementing counter */
+ uint8 dtim_period; /* DTIM period */
+ uint8 bitmap_control; /* AID 0 + bitmap offset */
+ uint8 pvb[1]; /* Partial Virtual Bitmap, variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tim_ie dot11_tim_ie_t;
+#define DOT11_TIM_IE_FIXED_LEN 3 /* Fixed length, without id and len */
+#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5 /* Fixed length, with id and len */
+
+/** TIM Broadcast frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc {
+ uint8 category; /* category of action frame (11) */
+ uint8 action; /* action: TIM (0) */
+ uint8 check_beacon; /* need to check-beacon */
+ uint8 tsf[8]; /* Time Synchronization Function */
+ dot11_tim_ie_t tim_ie; /* TIM element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc dot11_timbc_t;
+#define DOT11_TIMBC_HDR_LEN (sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t))
+#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1) /* Fixed length */
+#define DOT11_TIMBC_LEN 11 /* Fixed length */
+
+/** TCLAS frame classifier type */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr {
+ uint8 type;
+ uint8 mask;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t;
+#define DOT11_TCLAS_FC_HDR_LEN 2 /* Fixed length */
+
+#define DOT11_TCLAS_MASK_0 0x1
+#define DOT11_TCLAS_MASK_1 0x2
+#define DOT11_TCLAS_MASK_2 0x4
+#define DOT11_TCLAS_MASK_3 0x8
+#define DOT11_TCLAS_MASK_4 0x10
+#define DOT11_TCLAS_MASK_5 0x20
+#define DOT11_TCLAS_MASK_6 0x40
+#define DOT11_TCLAS_MASK_7 0x80
+
+#define DOT11_TCLAS_FC_0_ETH 0
+#define DOT11_TCLAS_FC_1_IP 1
+#define DOT11_TCLAS_FC_2_8021Q 2
+#define DOT11_TCLAS_FC_3_OFFSET 3
+#define DOT11_TCLAS_FC_4_IP_HIGHER 4
+#define DOT11_TCLAS_FC_5_8021D 5
+
+/** TCLAS frame classifier type 0 parameters for Ethernet */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth {
+ uint8 type;
+ uint8 mask;
+ uint8 sa[ETHER_ADDR_LEN];
+ uint8 da[ETHER_ADDR_LEN];
+ uint16 eth_type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t;
+#define DOT11_TCLAS_FC_0_ETH_LEN 16
+
+/** TCLAS frame classifier type 1 parameters for IPV4 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 {
+ uint8 type;
+ uint8 mask;
+ uint8 version;
+ uint32 src_ip;
+ uint32 dst_ip;
+ uint16 src_port;
+ uint16 dst_port;
+ uint8 dscp;
+ uint8 protocol;
+ uint8 reserved;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t;
+#define DOT11_TCLAS_FC_1_IPV4_LEN 18
+
+/** TCLAS frame classifier type 2 parameters for 802.1Q */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q {
+ uint8 type;
+ uint8 mask;
+ uint16 tci;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t;
+#define DOT11_TCLAS_FC_2_8021Q_LEN 4
+
+/** TCLAS frame classifier type 3 parameters for filter offset */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter {
+ uint8 type;
+ uint8 mask;
+ uint16 offset;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t;
+#define DOT11_TCLAS_FC_3_FILTER_LEN 4
+
+/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t;
+#define DOT11_TCLAS_FC_4_IPV4_LEN DOT11_TCLAS_FC_1_IPV4_LEN
+
+/** TCLAS frame classifier type 4 parameters for IPV6 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 {
+ uint8 type;
+ uint8 mask;
+ uint8 version;
+ uint8 saddr[16];
+ uint8 daddr[16];
+ uint16 src_port;
+ uint16 dst_port;
+ uint8 dscp;
+ uint8 nexthdr;
+ uint8 flow_lbl[3];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t;
+#define DOT11_TCLAS_FC_4_IPV6_LEN 44
+
+/** TCLAS frame classifier type 5 parameters for 802.1D */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d {
+ uint8 type;
+ uint8 mask;
+ uint8 pcp;
+ uint8 cfi;
+ uint16 vid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t;
+#define DOT11_TCLAS_FC_5_8021D_LEN 6
+
+/** TCLAS frame classifier type parameters */
+BWL_PRE_PACKED_STRUCT union dot11_tclas_fc {
+ uint8 data[1];
+ dot11_tclas_fc_hdr_t hdr;
+ dot11_tclas_fc_0_eth_t t0_eth;
+ dot11_tclas_fc_1_ipv4_t t1_ipv4;
+ dot11_tclas_fc_2_8021q_t t2_8021q;
+ dot11_tclas_fc_3_filter_t t3_filter;
+ dot11_tclas_fc_4_ipv4_t t4_ipv4;
+ dot11_tclas_fc_4_ipv6_t t4_ipv6;
+ dot11_tclas_fc_5_8021d_t t5_8021d;
+} BWL_POST_PACKED_STRUCT;
+typedef union dot11_tclas_fc dot11_tclas_fc_t;
+
+#define DOT11_TCLAS_FC_MIN_LEN 4 /* Classifier Type 2 has the min size */
+#define DOT11_TCLAS_FC_MAX_LEN 254
+
+/** TCLAS element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie {
+ uint8 id; /* 14, DOT11_MNG_TCLAS_ID */
+ uint8 len;
+ uint8 user_priority;
+ dot11_tclas_fc_t fc;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_ie dot11_tclas_ie_t;
+#define DOT11_TCLAS_IE_LEN 3u /* Fixed length, include id and len */
+
+/** TCLAS processing element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
+ uint8 id; /* 44, DOT11_MNG_TCLAS_PROC_ID */
+ uint8 len;
+ uint8 process;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
+#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */
+
+#define DOT11_TCLAS_PROC_LEN 1u /* Proc ie length is always 1 byte */
+
+#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */
+#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */
+#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */
+
+/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
+#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */
+
+/** TCLAS Mask element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_mask_ie {
+ uint8 id; /* DOT11_MNG_ID_EXT_ID (255) */
+ uint8 len;
+ uint8 id_ext; /* TCLAS_EXTID_MNG_MASK_ID (89) */
+ dot11_tclas_fc_t fc; /* Variable length frame classifier (fc) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_mask_ie dot11_tclas_mask_ie_t;
+#define DOT11_TCLAS_MASK_IE_LEN 1u /* Fixed length, excludes id and len */
+#define DOT11_TCLAS_MASK_IE_HDR_LEN 3u /* Fixed length */
+
+/* Bitmap definitions for the User Priority Bitmap
+ * Each bit in the bitmap corresponds to a user priority.
+ */
+#define DOT11_UP_CTRL_UP_0 0u
+#define DOT11_UP_CTRL_UP_1 1u
+#define DOT11_UP_CTRL_UP_2 2u
+#define DOT11_UP_CTRL_UP_3 3u
+#define DOT11_UP_CTRL_UP_4 4u
+#define DOT11_UP_CTRL_UP_5 5u
+#define DOT11_UP_CTRL_UP_6 6u
+#define DOT11_UP_CTRL_UP_7 7u
+
+/* User priority control (up_ctl) macros */
+#define DOT11_UPC_UP_BITMAP_MASK 0xFFu /* UP bitmap mask */
+#define DOT11_UPC_UP_BITMAP_SHIFT 0u /* UP bitmap shift */
+#define DOT11_UPC_UP_LIMIT_MASK 0x700u /* UP limit mask */
+#define DOT11_UPC_UP_LIMIT_SHIFT 8u /* UP limit shift */
+
+/* MSCS Request Types */
+#define DOT11_MSCS_REQ_TYPE_ADD 0u
+#define DOT11_MSCS_REQ_TYPE_REMOVE 1u
+#define DOT11_MSCS_REQ_TYPE_CHANGE 2u
+
+/** MSCS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_mscs_descr_ie {
+ uint8 id; /* DOT11_MNG_ID_EXT_ID (255) */
+ uint8 len;
+ uint8 id_ext; /* MSCS_EXTID_MNG_DESCR_ID (88) */
+ uint8 req_type; /* MSCS request type */
+ uint16 up_ctl; /* User priority control:
+ * Bits 0..7, up_bitmap(8 bits);
+ * Bits 8..10, up_limit (3 bits)
+ * Bits 11..15 reserved (5 bits)
+ */
+ uint32 stream_timeout;
+ uint8 data[];
+ /* optional tclas mask elements */ /* dot11_tclas_mask_ie_t */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mscs_descr_ie dot11_mscs_descr_ie_t;
+#define DOT11_MSCS_DESCR_IE_LEN 8u /* Fixed length, exludes id and len */
+#define DOT11_MSCS_DESCR_IE_HDR_LEN 10u /* Entire descriptor header length */
+
+/** MSCS Request frame, refer section 9.4.18.6 in the spec P802.11REVmd_D3.1 */
+BWL_PRE_PACKED_STRUCT struct dot11_mscs_req {
+ uint8 category; /* ACTION_RAV_STREAMING (19) */
+ uint8 robust_action; /* action: MSCS Req (4), MSCS Res (5), etc. */
+ uint8 dialog_token; /* To identify the MSCS request and response */
+ dot11_mscs_descr_ie_t mscs_descr; /* MSCS descriptor */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mscs_req dot11_mscs_req_t;
+#define DOT11_MSCS_REQ_HDR_LEN 3u /* Fixed length */
+
+/** MSCS Response frame, refer section 9.4.18.7 in the spec P802.11REVmd_D3.1 */
+BWL_PRE_PACKED_STRUCT struct dot11_mscs_res {
+ uint8 category; /* ACTION_RAV_STREAMING (19) */
+ uint8 robust_action; /* action: MSCS Req (4), MSCS Res (5), etc. */
+ uint8 dialog_token; /* To identify the MSCS request and response */
+ uint16 status; /* status code */
+ uint8 data[]; /* optional MSCS descriptor */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mscs_res dot11_mscs_res_t;
+#define DOT11_MSCS_RES_HDR_LEN 5u /* Fixed length */
+
+/* MSCS subelement */
+#define DOT11_MSCS_SUBELEM_ID_STATUS 1u /* MSCS subelement ID for the status */
+
+BWL_PRE_PACKED_STRUCT struct dot11_mscs_subelement {
+ uint8 id; /* MSCS specific subelement ID */
+ uint8 len; /* Length in bytes */
+ uint8 data[]; /* Subelement specific data */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mscs_subelement dot11_mscs_subelement_t;
+#define DOT11_MSCS_DESCR_SUBELEM_IE_STATUS_LEN 2u /* Subelement ID status length */
+
+/** TFS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie {
+ uint8 id; /* 91, DOT11_MNG_TFS_REQUEST_ID */
+ uint8 len;
+ uint8 tfs_id;
+ uint8 actcode;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t;
+#define DOT11_TFS_REQ_IE_LEN 2 /* Fixed length, without id and len */
+
+/** TFS request action codes (bitfield) */
+#define DOT11_TFS_ACTCODE_DELETE 1
+#define DOT11_TFS_ACTCODE_NOTIFY 2
+
+/** TFS request subelement IDs */
+#define DOT11_TFS_REQ_TFS_SE_ID 1
+#define DOT11_TFS_REQ_VENDOR_SE_ID 221
+
+/** TFS subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 data[1]; /* TCLAS element(s) + optional TCLAS proc */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_se dot11_tfs_se_t;
+
+/** TFS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
+ uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */
+ uint8 len;
+ uint8 tfs_id;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
+#define DOT11_TFS_RESP_IE_LEN 1u /* Fixed length, without id and len */
+
+/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */
+#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1
+#define DOT11_TFS_RESP_TFS_SE_ID 2
+#define DOT11_TFS_RESP_VENDOR_SE_ID 221
+
+/** TFS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se {
+ uint8 sub_id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */
+ uint8 len;
+ uint8 resp_st;
+ uint8 data[1]; /* Potential dot11_tfs_se_t included */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_status_se dot11_tfs_status_se_t;
+#define DOT11_TFS_STATUS_SE_LEN 1 /* Fixed length, without id and len */
+
+/* Following Definition should be merged to FMS_TFS macro below */
+/* TFS Response status code. Identical to FMS Element status, without N/A */
+#define DOT11_TFS_STATUS_ACCEPT 0
+#define DOT11_TFS_STATUS_DENY_FORMAT 1
+#define DOT11_TFS_STATUS_DENY_RESOURCE 2
+#define DOT11_TFS_STATUS_DENY_POLICY 4
+#define DOT11_TFS_STATUS_DENY_UNSPECIFIED 5
+#define DOT11_TFS_STATUS_ALTPREF_POLICY 7
+#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP 14
+
+/* FMS Element Status and TFS Response Status Definition */
+#define DOT11_FMS_TFS_STATUS_ACCEPT 0
+#define DOT11_FMS_TFS_STATUS_DENY_FORMAT 1
+#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE 2
+#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI 3
+#define DOT11_FMS_TFS_STATUS_DENY_POLICY 4
+#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED 5
+#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI 6
+#define DOT11_FMS_TFS_STATUS_ALT_POLICY 7
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI 8
+#define DOT11_FMS_TFS_STATUS_ALT_MCRATE 9
+#define DOT11_FMS_TFS_STATUS_TERM_POLICY 10
+#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE 11
+#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO 12
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI 13
+#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP 14
+
+/** TFS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS request (13) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req dot11_tfs_req_t;
+#define DOT11_TFS_REQ_LEN 3 /* Fixed length */
+
+/** TFS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS request (14) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp dot11_tfs_resp_t;
+#define DOT11_TFS_RESP_LEN 3 /* Fixed length */
+
+/** TFS Management Notify frame request header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS notify request (15) */
+ uint8 tfs_id_cnt; /* TFS IDs count */
+ uint8 tfs_id[1]; /* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t;
+#define DOT11_TFS_NOTIFY_REQ_LEN 3 /* Fixed length */
+
+/** TFS Management Notify frame response header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS notify response (28) */
+ uint8 tfs_id_cnt; /* TFS IDs count */
+ uint8 tfs_id[1]; /* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
+#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */
+
+/** WNM-Sleep Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: wnm-sleep request (16) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t;
+#define DOT11_WNM_SLEEP_REQ_LEN 3 /* Fixed length */
+
+/** WNM-Sleep Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: wnm-sleep request (17) */
+ uint8 token; /* dialog token */
+ uint16 key_len; /* key data length */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t;
+#define DOT11_WNM_SLEEP_RESP_LEN 5 /* Fixed length */
+
+#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK 0
+#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk {
+ uint8 sub_id;
+ uint8 len;
+ uint16 key_info;
+ uint8 key_length;
+ uint8 rsc[8];
+ uint8 key[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11 /* without sub_id, len, and key */
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43 /* without sub_id and len */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk {
+ uint8 sub_id;
+ uint8 len;
+ uint16 key_id;
+ uint8 pn[6];
+ uint8 key[16];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24 /* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie {
+ uint8 id; /* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */
+ uint8 len;
+ uint8 act_type;
+ uint8 resp_status;
+ uint16 interval;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t;
+#define DOT11_WNM_SLEEP_IE_LEN 4 /* Fixed length */
+
+#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER 0
+#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT 1
+
+#define DOT11_WNM_SLEEP_RESP_ACCEPT 0
+#define DOT11_WNM_SLEEP_RESP_UPDATE 1
+#define DOT11_WNM_SLEEP_RESP_DENY 2
+#define DOT11_WNM_SLEEP_RESP_DENY_TEMP 3
+#define DOT11_WNM_SLEEP_RESP_DENY_KEY 4
+#define DOT11_WNM_SLEEP_RESP_DENY_INUSE 5
+#define DOT11_WNM_SLEEP_RESP_LAST 6
+
+/** DMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: dms request (23) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req dot11_dms_req_t;
+#define DOT11_DMS_REQ_LEN 3 /* Fixed length */
+
+/** DMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: dms request (24) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp dot11_dms_resp_t;
+#define DOT11_DMS_RESP_LEN 3 /* Fixed length */
+
+/** DMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie {
+ uint8 id; /* 99, DOT11_MNG_DMS_REQUEST_ID */
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_ie dot11_dms_req_ie_t;
+#define DOT11_DMS_REQ_IE_LEN 2 /* Fixed length */
+
+/** DMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie {
+ uint8 id; /* 100, DOT11_MNG_DMS_RESPONSE_ID */
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t;
+#define DOT11_DMS_RESP_IE_LEN 2 /* Fixed length */
+
+/** DMS request descriptor */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc {
+ uint8 dms_id;
+ uint8 len;
+ uint8 type;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_desc dot11_dms_req_desc_t;
+#define DOT11_DMS_REQ_DESC_LEN 3 /* Fixed length */
+
+#define DOT11_DMS_REQ_TYPE_ADD 0
+#define DOT11_DMS_REQ_TYPE_REMOVE 1
+#define DOT11_DMS_REQ_TYPE_CHANGE 2
+
+/** DMS response status */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st {
+ uint8 dms_id;
+ uint8 len;
+ uint8 type;
+ uint16 lsc;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_st dot11_dms_resp_st_t;
+#define DOT11_DMS_RESP_STATUS_LEN 5 /* Fixed length */
+
+#define DOT11_DMS_RESP_TYPE_ACCEPT 0
+#define DOT11_DMS_RESP_TYPE_DENY 1
+#define DOT11_DMS_RESP_TYPE_TERM 2
+
+#define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF
+
+/** WNM-Notification Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_notif_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: Notification request (26) */
+ uint8 token; /* dialog token */
+ uint8 type; /* type */
+ uint8 data[1]; /* Sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_notif_req dot11_wnm_notif_req_t;
+#define DOT11_WNM_NOTIF_REQ_LEN 4 /* Fixed length */
+
+/** FMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: fms request (9) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req dot11_fms_req_t;
+#define DOT11_FMS_REQ_LEN 3 /* Fixed length */
+
+/** FMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: fms request (10) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp dot11_fms_resp_t;
+#define DOT11_FMS_RESP_LEN 3 /* Fixed length */
+
+/** FMS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_desc {
+ uint8 id;
+ uint8 len;
+ uint8 num_fms_cnt;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_desc dot11_fms_desc_t;
+#define DOT11_FMS_DESC_LEN 1 /* Fixed length */
+
+#define DOT11_FMS_CNTR_MAX 0x8
+#define DOT11_FMS_CNTR_ID_MASK 0x7
+#define DOT11_FMS_CNTR_ID_SHIFT 0x0
+#define DOT11_FMS_CNTR_COUNT_MASK 0xf1
+#define DOT11_FMS_CNTR_SHIFT 0x3
+
+/** FMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie {
+ uint8 id;
+ uint8 len;
+ uint8 fms_token; /* token used to identify fms stream set */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req_ie dot11_fms_req_ie_t;
+#define DOT11_FMS_REQ_IE_FIX_LEN 1 /* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field {
+ uint8 mask;
+ uint8 mcs_idx;
+ uint16 rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rate_id_field dot11_rate_id_field_t;
+#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK 0x7
+#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET 0
+#define DOT11_RATE_ID_FIELD_RATETYPE_MASK 0x18
+#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET 3
+#define DOT11_RATE_ID_FIELD_LEN sizeof(dot11_rate_id_field_t)
+
+/** FMS request subelements */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 interval;
+ uint8 max_interval;
+ dot11_rate_id_field_t rate;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_se dot11_fms_se_t;
+#define DOT11_FMS_REQ_SE_LEN 6 /* Fixed length */
+
+#define DOT11_FMS_REQ_SE_ID_FMS 1 /* FMS subelement */
+#define DOT11_FMS_REQ_SE_ID_VS 221 /* Vendor Specific subelement */
+
+/** FMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie {
+ uint8 id;
+ uint8 len;
+ uint8 fms_token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t;
+#define DOT11_FMS_RESP_IE_FIX_LEN 1 /* Fixed length */
+
+/* FMS status subelements */
+#define DOT11_FMS_STATUS_SE_ID_FMS 1 /* FMS Status */
+#define DOT11_FMS_STATUS_SE_ID_TCLAS 2 /* TCLAS Status */
+#define DOT11_FMS_STATUS_SE_ID_VS 221 /* Vendor Specific subelement */
+
+/** FMS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 status;
+ uint8 interval;
+ uint8 max_interval;
+ uint8 fmsid;
+ uint8 counter;
+ dot11_rate_id_field_t rate;
+ uint8 mcast_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_status_se dot11_fms_status_se_t;
+#define DOT11_FMS_STATUS_SE_LEN 15 /* Fixed length */
+
+/** TCLAS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 fmsid;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_status_se dot11_tclas_status_se_t;
+#define DOT11_TCLAS_STATUS_SE_LEN 1 /* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+ uint8 category; /* category of action frame (3) */
+ uint8 action; /* action: addba req */
+ uint8 token; /* identifier */
+ uint16 addba_param_set; /* parameter set */
+ uint16 timeout; /* timeout in seconds */
+ uint16 start_seqnum; /* starting sequence number */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+ uint8 category; /* category of action frame (3) */
+ uint8 action; /* action: addba resp */
+ uint8 token; /* identifier */
+ uint16 status; /* status of add request */
+ uint16 addba_param_set; /* negotiated parameter set */
+ uint16 timeout; /* negotiated timeout in seconds */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */
+
+/* DELBA action parameters */
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */
+#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */
+#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */
+#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+ uint8 category; /* category of action frame (3) */
+ uint8 action; /* action: addba req */
+ uint16 delba_param_set; /* paarmeter set */
+ uint16 reason; /* reason for dellba */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN 6 /* length of delba frame */
+
+/* SA Query action field value */
+#define SA_QUERY_REQUEST 0
+#define SA_QUERY_RESPONSE 1
+
+/* ************* 802.11r related definitions. ************* */
+
+/** Over-the-DS Fast Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+ uint8 category; /* category of action frame (6) */
+ uint8 action; /* action: ft req */
+ uint8 sta_addr[ETHER_ADDR_LEN];
+ uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+/** Over-the-DS Fast Transition Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+ uint8 category; /* category of action frame (6) */
+ uint8 action; /* action: ft resp */
+ uint8 sta_addr[ETHER_ADDR_LEN];
+ uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+ uint16 status; /* status code */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+/** RDE RIC Data Element. */
+BWL_PRE_PACKED_STRUCT struct dot11_rde_ie {
+ uint8 id; /* 11r, DOT11_MNG_RDE_ID */
+ uint8 length;
+ uint8 rde_id; /* RDE identifier. */
+ uint8 rd_count; /* Resource Descriptor Count. */
+ uint16 status; /* Status Code. */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rde_ie dot11_rde_ie_t;
+
+/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
+#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+
+/* ************* 802.11k related definitions. ************* */
+
+/* Radio measurements enabled capability ie */
+#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */
+#define RCPI_IE_LEN 1
+#define RSNI_IE_LEN 1
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+ uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+/* Bitmap definitions for cap ie */
+#define DOT11_RRM_CAP_LINK 0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1
+#define DOT11_RRM_CAP_PARALLEL 2
+#define DOT11_RRM_CAP_REPEATED 3
+#define DOT11_RRM_CAP_BCN_PASSIVE 4
+#define DOT11_RRM_CAP_BCN_ACTIVE 5
+#define DOT11_RRM_CAP_BCN_TABLE 6
+#define DOT11_RRM_CAP_BCN_REP_COND 7
+#define DOT11_RRM_CAP_FM 8
+#define DOT11_RRM_CAP_CLM 9
+#define DOT11_RRM_CAP_NHM 10
+#define DOT11_RRM_CAP_SM 11
+#define DOT11_RRM_CAP_LCIM 12
+#define DOT11_RRM_CAP_LCIA 13
+#define DOT11_RRM_CAP_TSCM 14
+#define DOT11_RRM_CAP_TTSCM 15
+#define DOT11_RRM_CAP_AP_CHANREP 16
+#define DOT11_RRM_CAP_RMMIB 17
+/* bit18-bit23, not used for RRM_IOVAR */
+#define DOT11_RRM_CAP_MPC0 24
+#define DOT11_RRM_CAP_MPC1 25
+#define DOT11_RRM_CAP_MPC2 26
+#define DOT11_RRM_CAP_MPTI 27
+#define DOT11_RRM_CAP_NBRTSFO 28
+#define DOT11_RRM_CAP_RCPI 29
+#define DOT11_RRM_CAP_RSNI 30
+#define DOT11_RRM_CAP_BSSAAD 31
+#define DOT11_RRM_CAP_BSSAAC 32
+#define DOT11_RRM_CAP_AI 33
+#define DOT11_RRM_CAP_FTM_RANGE 34
+#define DOT11_RRM_CAP_CIVIC_LOC 35
+#define DOT11_RRM_CAP_IDENT_LOC 36
+#define DOT11_RRM_CAP_LAST 36
+
+#ifdef WL11K_ALL_MEAS
+#define DOT11_RRM_CAP_LINK_ENAB (1 << DOT11_RRM_CAP_LINK)
+#define DOT11_RRM_CAP_FM_ENAB (1 << (DOT11_RRM_CAP_FM - 8))
+#define DOT11_RRM_CAP_CLM_ENAB (1 << (DOT11_RRM_CAP_CLM - 8))
+#define DOT11_RRM_CAP_NHM_ENAB (1 << (DOT11_RRM_CAP_NHM - 8))
+#define DOT11_RRM_CAP_SM_ENAB (1 << (DOT11_RRM_CAP_SM - 8))
+#define DOT11_RRM_CAP_LCIM_ENAB (1 << (DOT11_RRM_CAP_LCIM - 8))
+#define DOT11_RRM_CAP_TSCM_ENAB (1 << (DOT11_RRM_CAP_TSCM - 8))
+#ifdef WL11K_AP
+#define DOT11_RRM_CAP_MPC0_ENAB (1 << (DOT11_RRM_CAP_MPC0 - 24))
+#define DOT11_RRM_CAP_MPC1_ENAB (1 << (DOT11_RRM_CAP_MPC1 - 24))
+#define DOT11_RRM_CAP_MPC2_ENAB (1 << (DOT11_RRM_CAP_MPC2 - 24))
+#define DOT11_RRM_CAP_MPTI_ENAB (1 << (DOT11_RRM_CAP_MPTI - 24))
+#else
+#define DOT11_RRM_CAP_MPC0_ENAB 0
+#define DOT11_RRM_CAP_MPC1_ENAB 0
+#define DOT11_RRM_CAP_MPC2_ENAB 0
+#define DOT11_RRM_CAP_MPTI_ENAB 0
+#endif /* WL11K_AP */
+#define DOT11_RRM_CAP_CIVIC_LOC_ENAB (1 << (DOT11_RRM_CAP_CIVIC_LOC - 32))
+#define DOT11_RRM_CAP_IDENT_LOC_ENAB (1 << (DOT11_RRM_CAP_IDENT_LOC - 32))
+#else
+#define DOT11_RRM_CAP_LINK_ENAB 0
+#define DOT11_RRM_CAP_FM_ENAB 0
+#define DOT11_RRM_CAP_CLM_ENAB 0
+#define DOT11_RRM_CAP_NHM_ENAB 0
+#define DOT11_RRM_CAP_SM_ENAB 0
+#define DOT11_RRM_CAP_LCIM_ENAB 0
+#define DOT11_RRM_CAP_TSCM_ENAB 0
+#define DOT11_RRM_CAP_MPC0_ENAB 0
+#define DOT11_RRM_CAP_MPC1_ENAB 0
+#define DOT11_RRM_CAP_MPC2_ENAB 0
+#define DOT11_RRM_CAP_MPTI_ENAB 0
+#define DOT11_RRM_CAP_CIVIC_LOC_ENAB 0
+#define DOT11_RRM_CAP_IDENT_LOC_ENAB 0
+#endif /* WL11K_ALL_MEAS */
+#ifdef WL11K_NBR_MEAS
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB (1 << DOT11_RRM_CAP_NEIGHBOR_REPORT)
+#else
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB 0
+#endif /* WL11K_NBR_MEAS */
+#ifdef WL11K_BCN_MEAS
+#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB (1 << DOT11_RRM_CAP_BCN_PASSIVE)
+#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB (1 << DOT11_RRM_CAP_BCN_ACTIVE)
+#else
+#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB 0
+#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB 0
+#endif /* WL11K_BCN_MEAS */
+#define DOT11_RRM_CAP_MPA_MASK 0x7
+/* Operating Class (formerly "Regulatory Class") definitions */
+#define DOT11_OP_CLASS_NONE 255
+
+BWL_PRE_PACKED_STRUCT struct do11_ap_chrep {
+ uint8 id;
+ uint8 len;
+ uint8 reg;
+ uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct do11_ap_chrep dot11_ap_chrep_t;
+
+/* Radio Measurements action ids */
+#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */
+#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */
+#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */
+#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */
+#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */
+#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */
+#define DOT11_PUB_ACTION_MP 7 /* Measurement Pilot public action id */
+
+/** Generic radio measurement action frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ uint16 reps; /* no. of repetitions */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN 5
+
+/* Definitions for "mode" bits in rm req */
+#define DOT11_RMREQ_MODE_PARALLEL 1
+#define DOT11_RMREQ_MODE_ENABLE 2
+#define DOT11_RMREQ_MODE_REQUEST 4
+#define DOT11_RMREQ_MODE_REPORT 8
+#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */
+
+/* Definitions for "mode" bits in rm rep */
+#define DOT11_RMREP_MODE_LATE 1
+#define DOT11_RMREP_MODE_INCAPABLE 2
+#define DOT11_RMREP_MODE_REFUSED 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+ uint8 bcn_mode;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN 18u
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+ uint8 frame_info;
+ uint8 rcpi;
+ uint8 rsni;
+ struct ether_addr bssid;
+ uint8 antenna_id;
+ uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN 26
+
+/* Beacon request measurement mode */
+#define DOT11_RMREQ_BCN_PASSIVE 0
+#define DOT11_RMREQ_BCN_ACTIVE 1
+#define DOT11_RMREQ_BCN_TABLE 2
+
+/* Sub-element IDs for Beacon Request */
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID 1
+#define DOT11_RMREQ_BCN_REPDET_ID 2
+#define DOT11_RMREQ_BCN_REQUEST_ID 10
+#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID
+#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID 164
+
+/* Reporting Detail element definition */
+#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */
+#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */
+#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */
+
+/* Reporting Information (reporting condition) element definition */
+#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */
+#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */
+
+/* Last Beacon Report Indication Request definition */
+#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ENAB 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind_req {
+ uint8 id; /* DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID */
+ uint8 len; /* length of remaining fields */
+ uint8 data; /* data = 1 means last bcn rpt ind requested */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_last_bcn_rpt_ind_req dot11_rmrep_last_bcn_rpt_ind_req_t;
+
+/* Sub-element IDs for Beacon Report */
+#define DOT11_RMREP_BCN_FRM_BODY 1
+#define DOT11_RMREP_BCN_FRM_BODY_FRAG_ID 2
+#define DOT11_RMREP_BCN_LAST_RPT_IND 164
+#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */
+
+/* Refer IEEE P802.11-REVmd/D1.0 9.4.2.21.7 Beacon report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_fragmt_id {
+ uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */
+ uint8 len; /* length of remaining fields */
+ /* More fragments(B15), fragment Id(B8-B14), Bcn rpt instance ID (B0 - B7) */
+ uint16 frag_info_rpt_id;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_rmrep_bcn_frm_body_fragmt_id dot11_rmrep_bcn_frm_body_fragmt_id_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id {
+ uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */
+ uint8 len; /* length of remaining fields */
+ uint8 bcn_rpt_id; /* Bcn rpt instance ID */
+ uint8 frag_info; /* fragment Id(7 bits) | More fragments(1 bit) */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_rmrep_bcn_frm_body_frag_id dot11_rmrep_bcn_frm_body_frag_id_t;
+#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2u
+#define DOT11_RMREP_BCNRPT_FRAG_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_frag_id_t)
+#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1u
+#define DOT11_RMREP_BCNRPT_FRAGMT_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_fragmt_id_t)
+#define DOT11_RMREP_BCNRPT_BCN_RPT_ID_MASK 0x00FFu
+#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_SHIFT 8u
+#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_MASK 0x7F00u
+#define DOT11_RMREP_BCNRPT_MORE_FRAG_SHIFT 15u
+#define DOT11_RMREP_BCNRPT_MORE_FRAG_MASK 0x8000u
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind {
+ uint8 id; /* DOT11_RMREP_BCN_LAST_RPT_IND */
+ uint8 len; /* length of remaining fields */
+ uint8 data; /* data = 1 is last bcn rpt */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_rmrep_last_bcn_rpt_ind dot11_rmrep_last_bcn_rpt_ind_t;
+#define DOT11_RMREP_LAST_BCN_RPT_IND_DATA_LEN 1
+#define DOT11_RMREP_LAST_BCN_RPT_IND_SE_LEN sizeof(dot11_rmrep_last_bcn_rpt_ind_t)
+
+/* Sub-element IDs for Frame Report */
+#define DOT11_RMREP_FRAME_COUNT_REPORT 1
+
+/* Channel load request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t;
+#define DOT11_RMREQ_CHANLOAD_LEN 11
+
+/** Channel load report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+ uint8 channel_load;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t;
+#define DOT11_RMREP_CHANLOAD_LEN 13
+
+/** Noise histogram request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_noise dot11_rmreq_noise_t;
+#define DOT11_RMREQ_NOISE_LEN 11
+
+/** Noise histogram report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+ uint8 antid;
+ uint8 anpi;
+ uint8 ipi0_dens;
+ uint8 ipi1_dens;
+ uint8 ipi2_dens;
+ uint8 ipi3_dens;
+ uint8 ipi4_dens;
+ uint8 ipi5_dens;
+ uint8 ipi6_dens;
+ uint8 ipi7_dens;
+ uint8 ipi8_dens;
+ uint8 ipi9_dens;
+ uint8 ipi10_dens;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_noise dot11_rmrep_noise_t;
+#define DOT11_RMREP_NOISE_LEN 25
+
+/** Frame request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+ uint8 req_type;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_frame dot11_rmreq_frame_t;
+#define DOT11_RMREQ_FRAME_LEN 18
+
+/** Frame report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frame dot11_rmrep_frame_t;
+#define DOT11_RMREP_FRAME_LEN 12
+
+/** Frame report entry */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry {
+ struct ether_addr ta;
+ struct ether_addr bssid;
+ uint8 phy_type;
+ uint8 avg_rcpi;
+ uint8 last_rsni;
+ uint8 last_rcpi;
+ uint8 ant_id;
+ uint16 frame_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t;
+#define DOT11_RMREP_FRMENTRY_LEN 19
+
+/** STA statistics request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ struct ether_addr peer;
+ uint16 interval;
+ uint16 duration;
+ uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_stat dot11_rmreq_stat_t;
+#define DOT11_RMREQ_STAT_LEN 16
+
+/** STA statistics report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat {
+ uint16 duration;
+ uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_stat dot11_rmrep_stat_t;
+
+/* Statistics Group Report: Group IDs */
+enum {
+ DOT11_RRM_STATS_GRP_ID_0 = 0,
+ DOT11_RRM_STATS_GRP_ID_1,
+ DOT11_RRM_STATS_GRP_ID_2,
+ DOT11_RRM_STATS_GRP_ID_3,
+ DOT11_RRM_STATS_GRP_ID_4,
+ DOT11_RRM_STATS_GRP_ID_5,
+ DOT11_RRM_STATS_GRP_ID_6,
+ DOT11_RRM_STATS_GRP_ID_7,
+ DOT11_RRM_STATS_GRP_ID_8,
+ DOT11_RRM_STATS_GRP_ID_9,
+ DOT11_RRM_STATS_GRP_ID_10,
+ DOT11_RRM_STATS_GRP_ID_11,
+ DOT11_RRM_STATS_GRP_ID_12,
+ DOT11_RRM_STATS_GRP_ID_13,
+ DOT11_RRM_STATS_GRP_ID_14,
+ DOT11_RRM_STATS_GRP_ID_15,
+ DOT11_RRM_STATS_GRP_ID_16
+};
+
+/* Statistics Group Report: Group Data length */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28
+typedef struct rrm_stat_group_0 {
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 rxframe;
+ uint32 rxmulti;
+ uint32 rxbadfcs;
+ uint32 txframe;
+} rrm_stat_group_0_t;
+
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_1 24
+typedef struct rrm_stat_group_1 {
+ uint32 txretry;
+ uint32 txretries;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 rtsfail;
+ uint32 ackfail;
+} rrm_stat_group_1_t;
+
+/* group 2-9 use same qos data structure (tid 0-7), total 52 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_2_9 52
+typedef struct rrm_stat_group_qos {
+ uint32 txfrag;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretries;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 rtsfail;
+ uint32 ackfail;
+ uint32 rxfrag;
+ uint32 txframe;
+ uint32 txdrop;
+ uint32 rxmpdu;
+ uint32 rxretries;
+} rrm_stat_group_qos_t;
+
+/* dot11BSSAverageAccessDelay Group (only available at an AP): 8 byte */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_10 8
+typedef BWL_PRE_PACKED_STRUCT struct rrm_stat_group_10 {
+ uint8 apavgdelay;
+ uint8 avgdelaybe;
+ uint8 avgdelaybg;
+ uint8 avgdelayvi;
+ uint8 avgdelayvo;
+ uint16 stacount;
+ uint8 chanutil;
+} BWL_POST_PACKED_STRUCT rrm_stat_group_10_t;
+
+/* AMSDU, 40 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_11 40
+typedef struct rrm_stat_group_11 {
+ uint32 txamsdu;
+ uint32 amsdufail;
+ uint32 amsduretry;
+ uint32 amsduretries;
+ uint32 txamsdubyte_h;
+ uint32 txamsdubyte_l;
+ uint32 amsduackfail;
+ uint32 rxamsdu;
+ uint32 rxamsdubyte_h;
+ uint32 rxamsdubyte_l;
+} rrm_stat_group_11_t;
+
+/* AMPDU, 36 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_12 36
+typedef struct rrm_stat_group_12 {
+ uint32 txampdu;
+ uint32 txmpdu;
+ uint32 txampdubyte_h;
+ uint32 txampdubyte_l;
+ uint32 rxampdu;
+ uint32 rxmpdu;
+ uint32 rxampdubyte_h;
+ uint32 rxampdubyte_l;
+ uint32 ampducrcfail;
+} rrm_stat_group_12_t;
+
+/* BACK etc, 36 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_13 36
+typedef struct rrm_stat_group_13 {
+ uint32 rximpbarfail;
+ uint32 rxexpbarfail;
+ uint32 chanwidthsw;
+ uint32 txframe20mhz;
+ uint32 txframe40mhz;
+ uint32 rxframe20mhz;
+ uint32 rxframe40mhz;
+ uint32 psmpgrantdur;
+ uint32 psmpuseddur;
+} rrm_stat_group_13_t;
+
+/* RD Dual CTS etc, 36 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_14 36
+typedef struct rrm_stat_group_14 {
+ uint32 grantrdgused;
+ uint32 grantrdgunused;
+ uint32 txframeingrantrdg;
+ uint32 txbyteingrantrdg_h;
+ uint32 txbyteingrantrdg_l;
+ uint32 dualcts;
+ uint32 dualctsfail;
+ uint32 rtslsi;
+ uint32 rtslsifail;
+} rrm_stat_group_14_t;
+
+/* bf and STBC etc, 20 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_15 20
+typedef struct rrm_stat_group_15 {
+ uint32 bfframe;
+ uint32 stbccts;
+ uint32 stbcctsfail;
+ uint32 nonstbccts;
+ uint32 nonstbcctsfail;
+} rrm_stat_group_15_t;
+
+/* RSNA, 28 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_16 28
+typedef struct rrm_stat_group_16 {
+ uint32 rsnacmacicverr;
+ uint32 rsnacmacreplay;
+ uint32 rsnarobustmgmtccmpreplay;
+ uint32 rsnatkipicverr;
+ uint32 rsnatkipicvreplay;
+ uint32 rsnaccmpdecrypterr;
+ uint32 rsnaccmpreplay;
+} rrm_stat_group_16_t;
+
+/* Transmit stream/category measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint16 interval;
+ uint16 duration;
+ struct ether_addr peer;
+ uint8 traffic_id;
+ uint8 bin0_range;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t;
+#define DOT11_RMREQ_TXSTREAM_LEN 17
+
+/** Transmit stream/category measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream {
+ uint32 starttime[2];
+ uint16 duration;
+ struct ether_addr peer;
+ uint8 traffic_id;
+ uint8 reason;
+ uint32 txmsdu_cnt;
+ uint32 msdu_discarded_cnt;
+ uint32 msdufailed_cnt;
+ uint32 msduretry_cnt;
+ uint32 cfpolls_lost_cnt;
+ uint32 avrqueue_delay;
+ uint32 avrtx_delay;
+ uint8 bin0_range;
+ uint32 bin0;
+ uint32 bin1;
+ uint32 bin2;
+ uint32 bin3;
+ uint32 bin4;
+ uint32 bin5;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
+#define DOT11_RMREP_TXSTREAM_LEN 71
+
+typedef struct rrm_tscm {
+ uint32 msdu_tx;
+ uint32 msdu_exp;
+ uint32 msdu_fail;
+ uint32 msdu_retries;
+ uint32 cfpolls_lost;
+ uint32 queue_delay;
+ uint32 tx_delay_sum;
+ uint32 tx_delay_cnt;
+ uint32 bin0_range_us;
+ uint32 bin0;
+ uint32 bin1;
+ uint32 bin2;
+ uint32 bin3;
+ uint32 bin4;
+ uint32 bin5;
+} rrm_tscm_t;
+enum {
+ DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */
+ DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */
+ DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */
+};
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 subj;
+
+ /* Following 3 fields are unused. Keep for ROM compatibility. */
+ uint8 lat_res;
+ uint8 lon_res;
+ uint8 alt_res;
+
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t;
+#define DOT11_RMREQ_LCI_LEN 9
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 lci_sub_id;
+ uint8 lci_sub_len;
+ /* optional LCI field */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t;
+
+#define DOT11_FTM_LCI_SUBELEM_ID 0
+#define DOT11_FTM_LCI_SUBELEM_LEN 2
+#define DOT11_FTM_LCI_FIELD_LEN 16
+#define DOT11_FTM_LCI_UNKNOWN_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 subj;
+ uint8 civloc_type;
+ uint8 siu; /* service interval units */
+ uint16 si; /* service interval */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t;
+#define DOT11_RMREQ_CIVIC_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 civloc_type;
+ uint8 civloc_sub_id;
+ uint8 civloc_sub_len;
+ /* optional location civic field */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t;
+
+#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0
+#define DOT11_FTM_CIVIC_SUBELEM_ID 0
+#define DOT11_FTM_CIVIC_SUBELEM_LEN 2
+#define DOT11_FTM_CIVIC_LOC_SI_NONE 0
+#define DOT11_FTM_CIVIC_TYPE_LEN 1
+#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3
+
+/* Location Identifier measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_locid {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 subj;
+ uint8 siu;
+ uint16 si;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_locid dot11_rmreq_locid_t;
+#define DOT11_RMREQ_LOCID_LEN 9
+
+/* Location Identifier measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_locid {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 exp_tsf[8];
+ uint8 locid_sub_id;
+ uint8 locid_sub_len;
+ /* optional location identifier field */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_locid dot11_rmrep_locid_t;
+#define DOT11_LOCID_UNKNOWN_LEN 10
+#define DOT11_LOCID_SUBELEM_ID 0
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel {
+ uint8 id;
+ uint8 len;
+ uint16 max_age;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t;
+#define DOT11_FTM_RANGE_SUBELEM_ID 4
+#define DOT11_FTM_RANGE_SUBELEM_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint16 max_init_delay; /* maximum random initial delay */
+ uint8 min_ap_count;
+ uint8 data[1];
+ /* neighbor report sub-elements */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t;
+#define DOT11_RMREQ_FTM_RANGE_LEN 8
+
+#define DOT11_FTM_RANGE_LEN 3
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry {
+ uint32 start_tsf; /* 4 lsb of tsf */
+ struct ether_addr bssid;
+ uint8 range[DOT11_FTM_RANGE_LEN];
+ uint8 max_err[DOT11_FTM_RANGE_LEN];
+ uint8 rsvd;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t;
+#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15
+
+enum {
+ DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 2,
+ DOT11_FTM_RANGE_ERROR_AP_FAILED = 3,
+ DOT11_FTM_RANGE_ERROR_TX_FAILED = 8,
+ DOT11_FTM_RANGE_ERROR_MAX
+};
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry {
+ uint32 start_tsf; /* 4 lsb of tsf */
+ struct ether_addr bssid;
+ uint8 code;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t;
+#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 entry_count;
+ uint8 data[2]; /* includes pad */
+ /*
+ dot11_ftm_range_entry_t entries[entry_count];
+ uint8 error_count;
+ dot11_ftm_error_entry_t errors[error_count];
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t;
+
+#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */
+#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15
+#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11
+#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */
+/** Measurement pause request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
+ uint8 id; /* use dot11_rm_ie_t ? */
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint16 pause_time;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
+#define DOT11_RMREQ_PAUSE_LEN 7
+
+/* Neighbor Report subelements ID (11k & 11v) */
+#define DOT11_NGBR_TSF_INFO_SE_ID 1
+#define DOT11_NGBR_CCS_SE_ID 2
+#define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3
+#define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4
+#define DOT11_NGBR_BEARING_SE_ID 5
+#define DOT11_NGBR_WIDE_BW_CHAN_SE_ID 6 /* proposed */
+
+/** Neighbor Report, BSS Transition Candidate Preference subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 preference;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t;
+#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1
+#define DOT11_NGBR_BSSTRANS_PREF_SE_IE_LEN 3
+#define DOT11_NGBR_BSSTRANS_PREF_SE_HIGHEST 0xff
+
+/** Neighbor Report, BSS Termination Duration subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 tsf[8];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t;
+#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN 10
+
+/* Neighbor Report BSSID Information Field */
+#define DOT11_NGBR_BI_REACHABILTY_UNKN 0x0002
+#define DOT11_NGBR_BI_REACHABILTY 0x0003
+#define DOT11_NGBR_BI_SEC 0x0004
+#define DOT11_NGBR_BI_KEY_SCOPE 0x0008
+#define DOT11_NGBR_BI_CAP 0x03f0
+#define DOT11_NGBR_BI_CAP_SPEC_MGMT 0x0010
+#define DOT11_NGBR_BI_CAP_QOS 0x0020
+#define DOT11_NGBR_BI_CAP_APSD 0x0040
+#define DOT11_NGBR_BI_CAP_RDIO_MSMT 0x0080
+#define DOT11_NGBR_BI_CAP_DEL_BA 0x0100
+#define DOT11_NGBR_BI_CAP_IMM_BA 0x0200
+#define DOT11_NGBR_BI_MOBILITY 0x0400
+#define DOT11_NGBR_BI_HT 0x0800
+#define DOT11_NGBR_BI_VHT 0x1000
+#define DOT11_NGBR_BI_FTM 0x2000
+
+/** Neighbor Report element (11k & 11v) */
+BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
+ uint8 id;
+ uint8 len;
+ struct ether_addr bssid;
+ uint32 bssid_info;
+ uint8 reg; /* Operating class */
+ uint8 channel;
+ uint8 phytype;
+ uint8 data[1]; /* Variable size subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
+#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13u
+
+/* MLME Enumerations */
+#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */
+#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */
+#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */
+#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */
+#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */
+#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */
+
+/** Link Measurement */
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ uint8 txpwr; /* Transmit Power Used */
+ uint8 maxtxpwr; /* Max Transmit Power */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ dot11_tpc_rep_t tpc; /* TPC element */
+ uint8 rxant; /* Receive Antenna ID */
+ uint8 txant; /* Transmit Antenna ID */
+ uint8 rcpi; /* RCPI */
+ uint8 rsni; /* RSNI */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN 11
+
+#define DOT11_MP_CAP_SPECTRUM 0x01 /* d11 cap. spectrum */
+#define DOT11_MP_CAP_SHORTSLOT 0x02 /* d11 cap. shortslot */
+/* Measurement Pilot */
+BWL_PRE_PACKED_STRUCT struct dot11_mprep {
+ uint8 cap_info; /* Condensed capability Info. */
+ uint8 country[2]; /* Condensed country string */
+ uint8 opclass; /* Op. Class */
+ uint8 channel; /* Channel */
+ uint8 mp_interval; /* Measurement Pilot Interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mprep dot11_mprep_t;
+#define DOT11_MPREP_LEN 6
+
+/* 802.11 BRCM "Compromise" Pre N constants */
+#define PREN_PREAMBLE 24 /* green field preamble time */
+#define PREN_MM_EXT 12 /* extra mixed mode preamble time */
+#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */
+
+/* 802.11N PHY constants */
+#define RIFS_11N_TIME 2 /* NPHY RIFS time */
+
+/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3
+ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2
+ */
+/* HT-SIG1 */
+#define HT_SIG1_MCS_MASK 0x00007F
+#define HT_SIG1_CBW 0x000080
+#define HT_SIG1_HT_LENGTH 0xFFFF00
+
+/* HT-SIG2 */
+#define HT_SIG2_SMOOTHING 0x000001
+#define HT_SIG2_NOT_SOUNDING 0x000002
+#define HT_SIG2_RESERVED 0x000004
+#define HT_SIG2_AGGREGATION 0x000008
+#define HT_SIG2_STBC_MASK 0x000030
+#define HT_SIG2_STBC_SHIFT 4
+#define HT_SIG2_FEC_CODING 0x000040
+#define HT_SIG2_SHORT_GI 0x000080
+#define HT_SIG2_ESS_MASK 0x000300
+#define HT_SIG2_ESS_SHIFT 8
+#define HT_SIG2_CRC 0x03FC00
+#define HT_SIG2_TAIL 0x1C0000
+
+/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */
+#define HT_T_LEG_PREAMBLE 16
+#define HT_T_L_SIG 4
+#define HT_T_SIG 8
+#define HT_T_LTF1 4
+#define HT_T_GF_LTF1 8
+#define HT_T_LTFs 4
+#define HT_T_STF 4
+#define HT_T_GF_STF 8
+#define HT_T_SYML 4
+
+#define HT_N_SERVICE 16 /* bits in SERVICE field */
+#define HT_N_TAIL 6 /* tail bits per BCC encoder */
+
+/* 802.11 A PHY constants */
+#define APHY_SLOT_TIME 9 /* APHY slot time */
+#define APHY_SIFS_TIME 16 /* APHY SIFS time */
+#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */
+#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */
+#define APHY_SIGNAL_TIME 4 /* APHY signal time */
+#define APHY_SYMBOL_TIME 4 /* APHY symbol time */
+#define APHY_SERVICE_NBITS 16 /* APHY service nbits */
+#define APHY_TAIL_NBITS 6 /* APHY tail nbits */
+#define APHY_CWMIN 15 /* APHY cwmin */
+#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */
+
+/* 802.11 B PHY constants */
+#define BPHY_SLOT_TIME 20 /* BPHY slot time */
+#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */
+#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */
+#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */
+#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */
+#define BPHY_CWMIN 31 /* BPHY cwmin */
+#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */
+#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */
+
+/* 802.11 G constants */
+#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */
+
+#define PHY_CWMAX 1023 /* PHY cwmax */
+
+#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */
+
+/* 802.11 VHT constants */
+
+typedef int vht_group_id_t;
+
+/* for VHT-A1 */
+/* SIG-A1 reserved bits */
+#define VHT_SIGA1_CONST_MASK 0x800004
+
+#define VHT_SIGA1_BW_MASK 0x000003
+#define VHT_SIGA1_20MHZ_VAL 0x000000
+#define VHT_SIGA1_40MHZ_VAL 0x000001
+#define VHT_SIGA1_80MHZ_VAL 0x000002
+#define VHT_SIGA1_160MHZ_VAL 0x000003
+
+#define VHT_SIGA1_STBC 0x000008
+
+#define VHT_SIGA1_GID_MASK 0x0003f0
+#define VHT_SIGA1_GID_SHIFT 4
+#define VHT_SIGA1_GID_TO_AP 0x00
+#define VHT_SIGA1_GID_NOT_TO_AP 0x3f
+#define VHT_SIGA1_GID_MAX_GID 0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+#define VHT_SIGA1_NSTS_SHIFT 10
+#define VHT_SIGA1_MAX_USERPOS 3
+
+#define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000
+#define VHT_SIGA1_PARTIAL_AID_SHIFT 13
+
+#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED 0x400000
+
+/* for VHT-A2 */
+#define VHT_SIGA2_GI_NONE 0x000000
+#define VHT_SIGA2_GI_SHORT 0x000001
+#define VHT_SIGA2_GI_W_MOD10 0x000002
+#define VHT_SIGA2_CODING_LDPC 0x000004
+#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM 0x000008
+#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100
+#define VHT_SIGA2_MCS_SHIFT 4
+
+#define VHT_SIGA2_B9_RESERVED 0x000200
+#define VHT_SIGA2_TAIL_MASK 0xfc0000
+#define VHT_SIGA2_TAIL_VALUE 0x000000
+
+/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */
+#define VHT_T_LEG_PREAMBLE 16
+#define VHT_T_L_SIG 4
+#define VHT_T_SIG_A 8
+#define VHT_T_LTF 4
+#define VHT_T_STF 4
+#define VHT_T_SIG_B 4
+#define VHT_T_SYML 4
+
+#define VHT_N_SERVICE 16 /* bits in SERVICE field */
+#define VHT_N_TAIL 6 /* tail bits per BCC encoder */
+
+/** dot11Counters Table - 802.11 spec., Annex D */
+typedef struct d11cnt {
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
+} d11cnt_t;
+
+/* OUI for BRCM proprietary IE */
+#define BRCM_PROP_OUI "\x00\x90\x4C" /* Broadcom proprietary OUI */
+
+/* Broadcom Proprietary OUI type list. Please update below page when adding a new type.
+ * Twiki http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlBrcmPropIE
+ */
+/* The following BRCM_PROP_OUI types are currently in use (defined in
+ * relevant subsections). Each of them will be in a separate proprietary(221) IE
+ * #define RWL_WIFI_DEFAULT 0
+ * #define SES_VNDR_IE_TYPE 1 (defined in src/ses/shared/ses.h)
+ * #define VHT_FEATURES_IE_TYPE 4
+ * #define RWL_WIFI_FIND_MY_PEER 9
+ * #define RWL_WIFI_FOUND_PEER 10
+ * #define PROXD_IE_TYPE 11
+ */
+
+#define BRCM_FTM_IE_TYPE 14
+
+/* #define HT_CAP_IE_TYPE 51
+ * #define HT_ADD_IE_TYPE 52
+ * #define BRCM_EXTCH_IE_TYPE 53
+ * #define MEMBER_OF_BRCM_PROP_IE_TYPE 54
+ * #define BRCM_RELMACST_IE_TYPE 55
+ * #define BRCM_EVT_WL_BSS_INFO 64
+ * #define RWL_ACTION_WIFI_FRAG_TYPE 85
+ * #define BTC_INFO_BRCM_PROP_IE_TYPE 90
+ * #define ULB_BRCM_PROP_IE_TYPE 91
+ * #define SDB_BRCM_PROP_IE_TYPE 92
+ */
+
+/* Action frame type for RWL */
+#define RWL_WIFI_DEFAULT 0
+#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER 10 /* Server response to the client */
+#define RWL_ACTION_WIFI_FRAG_TYPE 85 /* Fragment indicator for receiver */
+
+#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */
+#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */
+
+/* Action frame type for FTM Initiator Report */
+#define BRCM_FTM_VS_AF_TYPE 14
+enum {
+ BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */
+ BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */
+};
+
+/* Action frame type for vendor specific action frames */
+#define VS_AF_TYPE 221
+
+#ifdef WL_VS_AFTX
+/* Vendor specific action frame subtype for transmit using SU EDCA */
+#define VS_AF_SUBTYPE_SUEDCA 1
+
+#define VENDOR_PROP_OUI "\x00\x17\xF2"
+#endif /* WL_VS_AFTX */
+
+/*
+ * This BRCM_PROP_OUI types is intended for use in events to embed additional
+ * data, and would not be expected to appear on the air -- but having an IE
+ * format allows IE frame data with extra data in events in that allows for
+ * more flexible parsing.
+ */
+#define BRCM_EVT_WL_BSS_INFO 64
+
+/**
+ * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI).
+ * DPT uses this format with type set to DPT_IE_TYPE
+ */
+BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 type; /* type of this IE */
+ uint16 cap; /* DPT capabilities */
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_prop_ie_s brcm_prop_ie_t;
+
+#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */
+
+#define DPT_IE_TYPE 2
+
+#define BRCM_SYSCAP_IE_TYPE 3
+#define WET_TUNNEL_IE_TYPE 3
+
+/* brcm syscap_ie cap */
+#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */
+
+/* BRCM OUI: Used in the proprietary(221) IE in all broadcom devices */
+#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */
+
+/** BRCM info element */
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3]; /* Proprietary OUI, BRCM_OUI */
+ uint8 ver; /* type/ver of this IE */
+ uint8 assoc; /* # of assoc STAs */
+ uint8 flags; /* misc flags */
+ uint8 flags1; /* misc flags */
+ uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */
+ uint8 flags2; /* Bit 0: DTPC TX cap, Bit 1: DTPC Recv Cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN 12u /* BRCM IE length */
+#define BRCM_IE_VER 2u /* BRCM IE version */
+#define BRCM_IE_LEGACY_AES_VER 1u /* BRCM IE legacy AES version */
+
+/* brcm_ie flags */
+#define BRF_ABCAP 0x1 /* afterburner is obsolete, defined for backward compat */
+#define BRF_ABRQRD 0x2 /* afterburner is obsolete, defined for backward compat */
+#define BRF_LZWDS 0x4 /* lazy wds enabled */
+#define BRF_BLOCKACK 0x8 /* BlockACK capable */
+#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */
+#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */
+#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */
+
+/**
+ * Support for Broadcom proprietary HT MCS rates. Re-uses afterburner bits since
+ * afterburner is not used anymore. Checks for BRF_ABCAP to stay compliant with 'old'
+ * images in the field.
+ */
+#define GET_BRF_PROP_11N_MCS(brcm_ie) \
+ (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
+
+/* brcm_ie flags1 */
+#define BRF1_AMSDU 0x1 /* A-MSDU capable */
+#define BRF1_WNM 0x2 /* WNM capable */
+#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */
+#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */
+#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */
+#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */
+#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */
+#define BRF1_DWDS 0x80 /* DWDS capable */
+
+/* brcm_ie flags2 */
+#define BRF2_DTPC_TX 0x1u /* DTPC: DTPC TX Cap */
+#define BRF2_DTPC_RX 0x2u /* DTPC: DTPC RX Cap */
+#define BRF2_DTPC_TX_RX 0x3u /* DTPC: Enable Both DTPC TX and RX Cap */
+
+/** Vendor IE structure */
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+ uchar id;
+ uchar len;
+ uchar oui [3];
+ uchar data [1]; /* Variable size data */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN 2u /* id + len field */
+#define VNDR_IE_MIN_LEN 3u /* size of the oui field */
+#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
+
+#define VNDR_IE_MAX_LEN 255u /* vendor IE max length, without ID and len */
+
+/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
+BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
+ uchar id;
+ uchar len;
+ uchar oui[3];
+ uint8 type; /* type indicates what follows */
+ struct ether_addr ea; /* Device Primary MAC Adrress */
+} BWL_POST_PACKED_STRUCT;
+typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t;
+
+#define MEMBER_OF_BRCM_PROP_IE_LEN 10 /* IE max length */
+#define MEMBER_OF_BRCM_PROP_IE_HDRLEN (sizeof(member_of_brcm_prop_ie_t))
+#define MEMBER_OF_BRCM_PROP_IE_TYPE 54 /* used in prop IE 221 only */
+
+/** BRCM Reliable Multicast IE */
+BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type; /* type indicates what follows */
+ struct ether_addr ea; /* The ack sender's MAC Adrress */
+ struct ether_addr mcast_ea; /* The multicast MAC address */
+ uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */
+} BWL_POST_PACKED_STRUCT;
+typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t;
+
+/* IE length */
+/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */
+#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8)))
+
+#define RELMCAST_BRCM_PROP_IE_TYPE 55 /* used in prop IE 221 only */
+
+/* BRCM BTC IE */
+BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type; /* type inidicates what follows */
+ uint32 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t;
+
+#define BTC_INFO_BRCM_PROP_IE_TYPE 90
+#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8)))
+
+/* ************* HT definitions. ************* */
+#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
+#define MAX_MCS_NUM (128) /* max mcs number = 128 */
+#define BASIC_HT_MCS 0xFFu /* HT MCS supported rates */
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+ uint16 cap;
+ uint8 params;
+ uint8 supp_mcs[MCSSET_LEN];
+ uint16 ext_htcap;
+ uint32 txbf_cap;
+ uint8 as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie {
+ uint8 id;
+ uint8 len;
+ ht_cap_ie_t ht_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t;
+
+/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the capability IE is primarily used to convey this nodes abilities */
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 type; /* type indicates what follows */
+ ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */
+#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */
+#define HT_CAP_IE_TYPE 51 /* used in prop IE 221 only */
+
+#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */
+#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */
+#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */
+#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */
+#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */
+#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */
+#define HT_CAP_GF 0x0010 /* Greenfield preamble support */
+#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */
+#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */
+#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */
+#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */
+#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */
+#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */
+#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */
+
+#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */
+#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */
+#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */
+#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */
+
+#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */
+#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */
+
+#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1
+#define HT_CAP_TXBF_CAP_NDP_RX 0x8
+#define HT_CAP_TXBF_CAP_NDP_TX 0x10
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI 0x100
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING 0x200
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING 0x400
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK 0x1800
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT 11
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK 0x6000
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT 13
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK 0x18000
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT 15
+#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT 19
+#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT 21
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT 23
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK 0x1800000
+
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT 27
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK 0x18000000
+
+#define HT_CAP_TXBF_FB_TYPE_NONE 0
+#define HT_CAP_TXBF_FB_TYPE_DELAYED 1
+#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 2
+#define HT_CAP_TXBF_FB_TYPE_BOTH 3
+
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK 0x400
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT 10
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15
+
+#define HT_CAP_MCS_FLAGS_SUPP_BYTE 12 /* byte offset in HT Cap Supported MCS for various flags */
+#define HT_CAP_MCS_RX_8TO15_BYTE_OFFSET 1
+#define HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL 0x02
+#define HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK 0x0C
+
+#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */
+#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */
+/* Max AMSDU len - per spec */
+#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */
+#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */
+
+#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */
+#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */
+#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */
+
+/* HT/AMPDU specific define */
+#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/4 usec units */
+#define AMPDU_DENSITY_NONE 0 /* No density requirement */
+#define AMPDU_DENSITY_1over4_US 1 /* 1/4 us density */
+#define AMPDU_DENSITY_1over2_US 2 /* 1/2 us density */
+#define AMPDU_DENSITY_1_US 3 /* 1 us density */
+#define AMPDU_DENSITY_2_US 4 /* 2 us density */
+#define AMPDU_DENSITY_4_US 5 /* 4 us density */
+#define AMPDU_DENSITY_8_US 6 /* 8 us density */
+#define AMPDU_DENSITY_16_US 7 /* 16 us density */
+#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */
+#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */
+#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */
+#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */
+
+/* AMPDU RX factors for VHT rates */
+#define AMPDU_RX_FACTOR_128K 4 /* max rcv ampdu len (128kb) */
+#define AMPDU_RX_FACTOR_256K 5 /* max rcv ampdu len (256kb) */
+#define AMPDU_RX_FACTOR_512K 6 /* max rcv ampdu len (512kb) */
+#define AMPDU_RX_FACTOR_1024K 7 /* max rcv ampdu len (1024kb) */
+
+#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */
+#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */
+
+#define AMPDU_DELIMITER_LEN 4u /* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */
+
+#define HT_CAP_EXT_PCO 0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT 1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8
+#define HT_CAP_EXT_HTC 0x0400
+#define HT_CAP_EXT_RD_RESP 0x0800
+
+/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+ uint8 ctl_ch; /* control channel number */
+ uint8 byte1; /* ext ch,rec. ch. width, RIFS support */
+ uint16 opmode; /* operation mode */
+ uint16 misc_bits; /* misc bits */
+ uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the additional IE is primarily used to convey the current BSS configuration */
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 type; /* indicates what follows */
+ ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN 22 /* HT capability len (based on .11n d1.0) */
+#define HT_ADD_IE_TYPE 52 /* faked out as current spec is illegal */
+
+/* byte1 defn's */
+#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */
+#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */
+
+/* opmode defn's */
+#define HT_OPMODE_MASK 0x0003 /* protection mode mask */
+#define HT_OPMODE_SHIFT 0 /* protection mode shift */
+#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */
+#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */
+#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */
+#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */
+#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */
+#define DOT11N_TXBURST 0x0008 /* Tx burst limit */
+#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */
+#define HT_OPMODE_CCFS2_MASK 0x1fe0 /* Channel Center Frequency Segment 2 mask */
+#define HT_OPMODE_CCFS2_SHIFT 5 /* Channel Center Frequency Segment 2 shift */
+
+/* misc_bites defn's */
+#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */
+#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */
+#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */
+#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */
+#define HT_PCO_ACTIVE 0x0400 /* PCO active */
+#define HT_PCO_PHASE 0x0800 /* PCO phase */
+#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */
+
+/* Tx Burst Limits */
+#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+
+/* Macros for opmode */
+#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ >> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_MIXED) /* mixed mode present */
+#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_HT20IN40) /* 20MHz HT present */
+#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_OPTIONAL) /* Optional protection present */
+#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \
+ HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */
+#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+ == HT_OPMODE_NONGF) /* non-GF present */
+#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+ == DOT11N_TXBURST) /* Tx Burst present */
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+ == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */
+#define HT_OPMODE_CCFS2_GET(add_ie) ((ltoh16_ua(&(add_ie)->opmode) & HT_OPMODE_CCFS2_MASK) \
+ >> HT_OPMODE_CCFS2_SHIFT) /* get CCFS2 */
+#define HT_OPMODE_CCFS2_SET(add_ie, ccfs2) do { /* set CCFS2 */ \
+ (add_ie)->opmode &= htol16(~HT_OPMODE_CCFS2_MASK); \
+ (add_ie)->opmode |= htol16(((ccfs2) << HT_OPMODE_CCFS2_SHIFT) & HT_OPMODE_CCFS2_MASK); \
+} while (0)
+
+/* Macros for HT MCS field access */
+#define HT_CAP_MCS_BITMASK(supp_mcs) \
+ ((supp_mcs)[HT_CAP_MCS_RX_8TO15_BYTE_OFFSET])
+#define HT_CAP_MCS_TX_RX_UNEQUAL(supp_mcs) \
+ ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL)
+#define HT_CAP_MCS_TX_STREAM_SUPPORT(supp_mcs) \
+ ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK)
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+ uint16 passive_dwell;
+ uint16 active_dwell;
+ uint16 bss_widthscan_interval;
+ uint16 passive_total;
+ uint16 active_total;
+ uint16 chanwidth_transition_dly;
+ uint16 activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+ uint8 id;
+ uint8 len;
+ obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */
+
+/* HT control field */
+#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */
+#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */
+#define HT_CTRL_LA_MAI_SHIFT 2
+#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */
+#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */
+#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */
+#define HT_CTRL_LA_MFSI_SHIFT 6
+#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */
+#define HT_CTRL_LA_MFB_ASELC_SH 9
+#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */
+#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */
+#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */
+#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */
+#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */
+#define HT_CTRL_CSI_STEER_SHIFT 22
+#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */
+#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */
+#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */
+#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */
+#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */
+#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */
+#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */
+
+/* ************* VHT definitions. ************* */
+
+/**
+ * VHT Capabilites IE (sec 8.4.2.160)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+ uint32 vht_cap_info;
+ /* supported MCS set - 64 bit field */
+ uint16 rx_mcs_map;
+ uint16 rx_max_rate;
+ uint16 tx_mcs_map;
+ uint16 tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+/* 4B cap_info + 8B supp_mcs */
+#define VHT_CAP_IE_LEN 12
+
+/* VHT Capabilities Info field - 32bit - in VHT Cap IE */
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c
+#define VHT_CAP_INFO_LDPC 0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ 0x00000020
+#define VHT_CAP_INFO_SGI_160MHZ 0x00000040
+#define VHT_CAP_INFO_TX_STBC 0x00000080
+#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT 8u
+#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13u
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16u
+#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000
+#define VHT_CAP_INFO_TXOPPS 0x00200000
+#define VHT_CAP_INFO_HTCVHT 0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23u
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26u
+#define VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK 0xc0000000
+#define VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT 30u
+
+/* get Extended NSS BW Support passing vht cap info */
+#define VHT_CAP_EXT_NSS_BW_SUP(cap_info) \
+ (((cap_info) & VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK) >> VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT)
+
+/* VHT CAP INFO extended NSS BW support - refer to IEEE 802.11 REVmc D8.0 Figure 9-559 */
+#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160 1 /* 160MHz at half NSS CAP */
+#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160_80P80 2 /* 160 & 80p80 MHz at half NSS CAP */
+
+/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0
+
+/* defines for field(s) in vht_cap_ie->rx_max_rate */
+#define VHT_CAP_MAX_NSTS_MASK 0xe000
+#define VHT_CAP_MAX_NSTS_SHIFT 13
+
+/* defines for field(s) in vht_cap_ie->tx_max_rate */
+#define VHT_CAP_EXT_NSS_BW_CAP 0x2000
+
+#define VHT_CAP_MCS_MAP_0_7 0
+#define VHT_CAP_MCS_MAP_0_8 1
+#define VHT_CAP_MCS_MAP_0_9 2
+#define VHT_CAP_MCS_MAP_NONE 3
+#define VHT_CAP_MCS_MAP_S 2 /* num bits for 1-stream */
+#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */
+/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
+#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff
+
+/* VHT rates bitmap */
+#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff
+#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff
+#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff
+#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP
+
+#define VHT_PROP_MCS_MAP_10_11 0
+#define VHT_PROP_MCS_MAP_UNUSED1 1
+#define VHT_PROP_MCS_MAP_UNUSED2 2
+#define VHT_PROP_MCS_MAP_NONE 3
+#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff
+
+/* VHT prop rates bitmap */
+#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00
+#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP
+
+#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3)
+/* remove after moving define to wlc_rate.h */
+/* mcsmap with MCS0-9 for Nss = 3 */
+#define VHT_CAP_MCS_MAP_0_9_NSS3 \
+ ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
+ (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
+ (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3)))
+#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */
+
+#define VHT_CAP_MCS_MAP_NSS_MAX 8
+
+/* get mcsmap with given mcs for given nss streams */
+#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \
+ do { \
+ int i; \
+ for (i = 1; i <= nss; i++) { \
+ VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \
+ } \
+ } while (0)
+
+/* Map the mcs code to mcs bit map */
+#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
+ ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \
+ (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \
+ (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0)
+
+/* Map the proprietary mcs code to proprietary mcs bitmap */
+#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \
+ ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0)
+
+/* Map the mcs bit map to mcs code */
+#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
+ ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \
+ (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \
+ (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+
+/* Map the proprietary mcs map to proprietary mcs code */
+#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \
+ (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE)
+
+/** VHT Capabilities Supported Channel Width */
+typedef enum vht_cap_chan_width {
+ VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
+ VHT_CAP_CHAN_WIDTH_SUPPORT_160 = 0x04,
+ VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080 = 0x08
+} vht_cap_chan_width_t;
+
+/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */
+typedef enum vht_cap_max_mpdu_len {
+ VHT_CAP_MPDU_MAX_4K = 0x00,
+ VHT_CAP_MPDU_MAX_8K = 0x01,
+ VHT_CAP_MPDU_MAX_11K = 0x02
+} vht_cap_max_mpdu_len_t;
+
+/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */
+#define VHT_MPDU_LIMIT_4K 3895
+#define VHT_MPDU_LIMIT_8K 7991
+#define VHT_MPDU_LIMIT_11K 11454
+
+/**
+ * VHT Operation IE (sec 8.4.2.161)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+ uint8 chan_width;
+ uint8 chan1;
+ uint8 chan2;
+ uint16 supp_mcs; /* same def as above in vht cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+/* 3B VHT Op info + 2B Basic MCS */
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+ VHT_OP_CHAN_WIDTH_20_40 = 0,
+ VHT_OP_CHAN_WIDTH_80 = 1,
+ VHT_OP_CHAN_WIDTH_160 = 2, /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */
+ VHT_OP_CHAN_WIDTH_80_80 = 3 /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */
+} vht_op_chan_width_t;
+
+#define VHT_OP_INFO_LEN 3
+
+/* AID length */
+#define AID_IE_LEN 2
+/**
+ * BRCM vht features IE header
+ * The header if the fixed part of the IE
+ * On the 5GHz band this is the entire IE,
+ * on 2.4GHz the VHT IEs as defined in the 802.11ac
+ * specification follows
+ *
+ *
+ * VHT features rates bitmap.
+ * Bit0: 5G MCS 0-9 BW 160MHz
+ * Bit1: 5G MCS 0-9 support BW 80MHz
+ * Bit2: 5G MCS 0-9 support BW 20MHz
+ * Bit3: 2.4G MCS 0-9 support BW 20MHz
+ * Bits:4-7 Reserved for future use
+ *
+ */
+#define VHT_FEATURES_IE_TYPE 0x4
+BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr {
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 type; /* type of this IE = 4 */
+ uint8 rate_mask; /* VHT rate mask */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_features_ie_hdr vht_features_ie_hdr_t;
+
+/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */
+#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \
+ (((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \
+ do { \
+ (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \
+ (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \
+ } while (0)
+#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \
+ (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE)
+
+/* Get the max ss supported from the mcs map */
+#define VHT_MAX_SS_SUPPORTED(mcsMap) \
+ VHT_MCS_SS_SUPPORTED(8, mcsMap) ? 8 : \
+ VHT_MCS_SS_SUPPORTED(7, mcsMap) ? 7 : \
+ VHT_MCS_SS_SUPPORTED(6, mcsMap) ? 6 : \
+ VHT_MCS_SS_SUPPORTED(5, mcsMap) ? 5 : \
+ VHT_MCS_SS_SUPPORTED(4, mcsMap) ? 4 : \
+ VHT_MCS_SS_SUPPORTED(3, mcsMap) ? 3 : \
+ VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \
+ VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0
+
+#ifdef IBSS_RMC
+/* customer's OUI */
+#define RMC_PROP_OUI "\x00\x16\x32"
+#endif
+
+/* ************* WPA definitions. ************* */
+#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
+#define WPA_OUI_LEN 3 /* WPA OUI length */
+#define WPA_OUI_TYPE 1
+#define WPA_VERSION 1 /* WPA version */
+#define WPA_VERSION_LEN 2 /* WPA version length */
+
+/* ************* WPA2 definitions. ************* */
+#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */
+#define WPA2_OUI_LEN 3 /* WPA2 OUI length */
+#define WPA2_VERSION 1 /* WPA2 version */
+#define WPA2_VERSION_LEN 2 /* WAP2 version length */
+#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */
+
+/* ************* WPS definitions. ************* */
+#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */
+#define WPS_OUI_LEN 3 /* WPS OUI length */
+#define WPS_OUI_TYPE 4
+
+/* ************* TPC definitions. ************* */
+#define TPC_OUI "\x00\x50\xF2" /* TPC OUI */
+#define TPC_OUI_LEN 3 /* TPC OUI length */
+#define TPC_OUI_TYPE 8
+#define WFA_OUI_TYPE_TPC 8 /* deprecated */
+
+/* ************* WFA definitions. ************* */
+#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
+#define WFA_OUI_LEN 3 /* WFA OUI length */
+#define WFA_OUI_TYPE_P2P 9
+
+/* WFA definitions for LEGACY P2P */
+#ifdef WL_LEGACY_P2P
+#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */
+#define APPLE_OUI_LEN 3
+#define APPLE_OUI_TYPE_P2P 5
+#endif /* WL_LEGACY_P2P */
+
+#ifndef WL_LEGACY_P2P
+#define P2P_OUI WFA_OUI
+#define P2P_OUI_LEN WFA_OUI_LEN
+#define P2P_OUI_TYPE WFA_OUI_TYPE_P2P
+#else
+#define P2P_OUI APPLE_OUI
+#define P2P_OUI_LEN APPLE_OUI_LEN
+#define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P
+#endif /* !WL_LEGACY_P2P */
+
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */
+#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */
+#define WFA_OUI_TYPE_WFD 10
+#endif /* WTDLS */
+#define WFA_OUI_TYPE_HS20 0x10
+#define WFA_OUI_TYPE_OSEN 0x12
+#define WFA_OUI_TYPE_NAN 0x13
+#define WFA_OUI_TYPE_MBO 0x16
+#define WFA_OUI_TYPE_MBO_OCE 0x16
+#define WFA_OUI_TYPE_OWE 0x1C
+#define WFA_OUI_TYPE_SAE_PK 0x1F
+#define WFA_OUI_TYPE_TD_INDICATION 0x20
+
+#define SAE_PK_MOD_LEN 32u
+BWL_PRE_PACKED_STRUCT struct dot11_sae_pk_element {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[WFA_OUI_LEN]; /* WFA_OUI */
+ uint8 type; /* SAE-PK */
+ uint8 data[SAE_PK_MOD_LEN]; /* Modifier. 32Byte fixed */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_sae_pk_element dot11_sae_pk_element_t;
+
+/* RSN authenticated key managment suite */
+#define RSN_AKM_NONE 0 /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
+#define RSN_AKM_PSK 2 /* Pre-shared Key */
+#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
+/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more
+ * Just kept here to avoid build issue in BISON/CARIBOU branch
+ */
+#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
+#define RSN_AKM_SAE_PSK 8 /* AKM for SAE with 4-way handshake */
+#define RSN_AKM_SAE_FBT 9 /* AKM for SAE with FBT */
+#define RSN_AKM_SUITEB_SHA256_1X 11 /* Suite B SHA256 */
+#define RSN_AKM_SUITEB_SHA384_1X 12 /* Suite B-192 SHA384 */
+#define RSN_AKM_FBT_SHA384_1X 13 /* FBT SHA384 */
+#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */
+#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */
+#define RSN_AKM_FBT_SHA256_FILS 16
+#define RSN_AKM_FBT_SHA384_FILS 17
+#define RSN_AKM_OWE 18 /* RFC 8110 OWE */
+#define RSN_AKM_FBT_SHA384_PSK 19
+#define RSN_AKM_PSK_SHA384 20
+/* OSEN authenticated key managment suite */
+#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */
+/* WFA DPP RSN authenticated key managment */
+#define RSN_AKM_DPP 02u /* DPP RSN */
+
+/* Key related defines */
+#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */
+#define DOT11_MAX_IGTK_KEYS 2
+#define DOT11_MAX_BIGTK_KEYS 2
+#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */
+#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */
+#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */
+#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */
+
+#define WEP1_KEY_SIZE 5 /* max size of any WEP key */
+#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */
+#define WEP128_KEY_SIZE 13 /* max size of any WEP key */
+#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */
+#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */
+#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */
+#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */
+#define TKIP_KEY_SIZE 32 /* size of any TKIP key, includs MIC keys */
+#define TKIP_TK_SIZE 16
+#define TKIP_MIC_KEY_SIZE 8
+#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */
+#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */
+#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */
+#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */
+#define AES_KEY_SIZE 16 /* size of AES key */
+#define AES_MIC_SIZE 8 /* size of AES MIC */
+#define BIP_KEY_SIZE 16 /* size of BIP key */
+#define BIP_MIC_SIZE 8 /* sizeof BIP MIC */
+
+#define AES_GCM_MIC_SIZE 16 /* size of MIC for 128-bit GCM - .11adD9 */
+
+#define AES256_KEY_SIZE 32 /* size of AES 256 key - .11acD5 */
+#define AES256_MIC_SIZE 16 /* size of MIC for 256 bit keys, incl BIP */
+
+/* WCN */
+#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */
+#define WCN_TYPE 4 /* WCN type */
+
+#ifdef BCMWAPI_WPI
+#define SMS4_KEY_LEN 16
+#define SMS4_WPI_CBC_MAC_LEN 16
+#endif
+
+/* 802.11r protocol definitions */
+
+/** Mobility Domain IE */
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+ uint8 id;
+ uint8 len; /* DOT11_MDID_IE_DATA_LEN (3) */
+ uint16 mdid; /* Mobility Domain Id */
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+/* length of data portion of Mobility Domain IE */
+#define DOT11_MDID_IE_DATA_LEN 3
+#define DOT11_MDID_LEN 2
+#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */
+#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */
+
+/* BITs in FTIE mic control field */
+#define DOT11_FTIE_RSNXE_USED 0x1u
+
+/* Fast Bss Transition IE */
+#ifdef FT_IE_VER_V2
+typedef BWL_PRE_PACKED_STRUCT struct dot11_ft_ie_v2 {
+ uint8 id;
+ uint8 len;
+ uint16 mic_control;
+ /* dynamic offset to following mic[], anonce[], snonce[] */
+} BWL_POST_PACKED_STRUCT dot11_ft_ie_v2;
+typedef struct dot11_ft_ie_v2 dot11_ft_ie_t;
+#else
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+ uint8 id;
+ uint8 len; /* At least equal to DOT11_FT_IE_FIXED_LEN (82) */
+ uint16 mic_control; /* Mic Control */
+ uint8 mic[16];
+ uint8 anonce[32];
+ uint8 snonce[32];
+ /* Optional sub-elements follow */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+/* Fixed length of data portion of Fast BSS Transition IE. There could be
+ * optional parameters, which if present, could raise the FT IE length to 255.
+ */
+#define DOT11_FT_IE_FIXED_LEN 82
+#endif /* FT_IE_VER_V2 */
+
+#ifdef FT_IE_VER_V2
+#define DOT11_FT_IE_LEN(mic_len) (sizeof(dot11_ft_ie_v2) + mic_len + EAPOL_WPA_KEY_NONCE_LEN *2)
+#define FT_IE_MIC(pos) ((uint8 *)pos + sizeof(dot11_ft_ie_v2))
+#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len)
+#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len + \
+ EAPOL_WPA_KEY_NONCE_LEN)
+#else
+#define DOT11_FT_IE_LEN(mic_len) sizeof(dot11_ft_ie)
+#define FT_IE_MIC(pos) ((uint8 *)&pos->mic)
+#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)&pos->anonce)
+#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)&pos->snonce)
+#endif /* FT_IE_VER_V2 */
+#define TIE_TYPE_RESERVED 0
+#define TIE_TYPE_REASSOC_DEADLINE 1
+#define TIE_TYPE_KEY_LIEFTIME 2
+#define TIE_TYPE_ASSOC_COMEBACK 3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+ uint8 id;
+ uint8 len;
+ uint8 type; /* timeout interval type */
+ uint32 value; /* timeout interval value */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+/** GTK ie */
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+ uint8 id;
+ uint8 len;
+ uint16 key_info;
+ uint8 key_len;
+ uint8 rsc[8];
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+/** Management MIC ie */
+BWL_PRE_PACKED_STRUCT struct mmic_ie {
+ uint8 id; /* IE ID: DOT11_MNG_MMIE_ID */
+ uint8 len; /* IE length */
+ uint16 key_id; /* key id */
+ uint8 ipn[6]; /* ipn */
+ uint8 mic[16]; /* mic */
+} BWL_POST_PACKED_STRUCT;
+typedef struct mmic_ie mmic_ie_t;
+
+#define DOT11_MMIC_IE_HDR_SIZE (OFFSETOF(mmic_ie_t, mic))
+
+/* 802.11r-2008, 11A.10.3 - RRB frame format */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame {
+ uint8 frame_type; /* 1 for RRB */
+ uint8 packet_type; /* 0 for Request 1 for Response */
+ uint16 len;
+ uint8 cur_ap_addr[ETHER_ADDR_LEN];
+ uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t;
+
+#define DOT11_FT_RRB_FIXED_LEN 10
+#define DOT11_FT_REMOTE_FRAME_TYPE 1
+#define DOT11_FT_PACKET_REQ 0
+#define DOT11_FT_PACKET_RESP 1
+
+#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+#ifdef BCMWAPI_WAI
+#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */
+#define WAPI_VERSION 1 /* WAPI version */
+#define WAPI_VERSION_LEN 2 /* WAPI version length */
+#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */
+#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */
+#endif /* BCMWAPI_WAI */
+
+/* ************* WMM Parameter definitions. ************* */
+#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */
+#define WMM_OUI_LEN 3 /* WMM OUI length */
+#define WMM_OUI_TYPE 2 /* WMM OUT type */
+#define WMM_VERSION 1
+#define WMM_VERSION_LEN 1
+
+/* WMM OUI subtype */
+#define WMM_OUI_SUBTYPE_PARAMETER 1
+#define WMM_PARAMETER_IE_LEN 24
+
+/** Link Identifier Element */
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+ uint8 id;
+ uint8 len;
+ struct ether_addr bssid;
+ struct ether_addr tdls_init_mac;
+ struct ether_addr tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN 18u
+
+/** Link Wakeup Schedule Element */
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+ uint8 id;
+ uint8 len;
+ uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */
+ uint32 interval; /* in ms bwtween the start of 2 Awake Windows */
+ uint32 awake_win_slots; /* in backof slots, duration of Awake Window */
+ uint32 max_wake_win; /* in ms, max duration of Awake Window */
+ uint16 idle_cnt; /* number of consecutive Awake Windows */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN 18
+
+/** Channel Switch Timing Element */
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+ uint8 id;
+ uint8 len;
+ uint16 switch_time; /* in ms, time to switch channels */
+ uint16 switch_timeout; /* in ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4
+
+/** PTI Control Element */
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+ uint8 id;
+ uint8 len;
+ uint8 tid;
+ uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN 3
+
+/** PU Buffer Status Element */
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+ uint8 id;
+ uint8 len;
+ uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN 1
+#define TDLS_PU_BUFFER_STATUS_AC_BK 1
+#define TDLS_PU_BUFFER_STATUS_AC_BE 2
+#define TDLS_PU_BUFFER_STATUS_AC_VI 4
+#define TDLS_PU_BUFFER_STATUS_AC_VO 8
+
+/* TDLS Action Field Values */
+#define TDLS_SETUP_REQ 0
+#define TDLS_SETUP_RESP 1
+#define TDLS_SETUP_CONFIRM 2
+#define TDLS_TEARDOWN 3
+#define TDLS_PEER_TRAFFIC_IND 4
+#define TDLS_CHANNEL_SWITCH_REQ 5
+#define TDLS_CHANNEL_SWITCH_RESP 6
+#define TDLS_PEER_PSM_REQ 7
+#define TDLS_PEER_PSM_RESP 8
+#define TDLS_PEER_TRAFFIC_RESP 9
+#define TDLS_DISCOVERY_REQ 10
+
+/* 802.11z TDLS Public Action Frame action field */
+#define TDLS_DISCOVERY_RESP 14
+
+/* 802.11u GAS action frames */
+#define GAS_REQUEST_ACTION_FRAME 10
+#define GAS_RESPONSE_ACTION_FRAME 11
+#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12
+#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13
+
+/* FTM - fine timing measurement public action frames */
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_req {
+ uint8 category; /* category of action frame (4) */
+ uint8 action; /* public action (32) */
+ uint8 trigger; /* trigger/continue? */
+ /* optional lci, civic loc, ftm params */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_req dot11_ftm_req_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm {
+ uint8 category; /* category of action frame (4) */
+ uint8 action; /* public action (33) */
+ uint8 dialog; /* dialog token */
+ uint8 follow_up; /* follow up dialog token */
+ uint8 tod[6]; /* t1 - last depart timestamp */
+ uint8 toa[6]; /* t4 - last ack arrival timestamp */
+ uint8 tod_err[2]; /* t1 error */
+ uint8 toa_err[2]; /* t4 error */
+ /* optional lci report, civic loc report, ftm params */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm dot11_ftm_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_lmr {
+ uint8 category; /* category of action frame (4) */
+ uint8 action; /* public action (33) */
+ uint8 dialog; /* dialog token */
+ uint8 tod[6]; /* RSTA t3 or ISTA t1:
+ * last departure of NDP
+ */
+ uint8 toa[6]; /* RSTA t2 or ISTA t4:
+ * last arrival of NDP
+ */
+ uint8 tod_err[2]; /* t3 or t1 error */
+ uint8 toa_err[2]; /* t2 or t4 error */
+ uint16 cfo; /* I2R LMR: clock difference between ISTA and RSTA. */
+ uint8 sec_ltf_params[]; /* Optional Secure LTF parameters */
+ /* no AOA feedback */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_lmr dot11_ftm_lmr_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_ranging_ndpa {
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr ta; /* transmitter address */
+ uint8 dialog_token; /* sounding dialog token */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_ranging_ndpa dot11_ftm_ranging_ndpa_t;
+
+/* NDPA types = dialog token byte lower 2 bits */
+#define DOT11_NDPA_TYPE_MASK 0x03
+#define DOT11_NDPA_TYPE_VHT 0x00
+#define DOT11_NDPA_TYPE_RANGING 0x01
+#define DOT11_NDPA_TYPE_HE 0x02
+
+#define DOT11_FTM_ERR_NOT_CONT_OFFSET 1
+#define DOT11_FTM_ERR_NOT_CONT_MASK 0x80
+#define DOT11_FTM_ERR_NOT_CONT_SHIFT 7
+#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \
+ DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT)
+#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\
+ uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \
+ _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \
+ _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \
+ (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \
+} while (0)
+
+#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0
+#define DOT11_FTM_ERR_MAX_ERR_MASK 0x7fff
+#define DOT11_FTM_ERR_MAX_ERR_SHIFT 0
+#define DOT11_FTM_ERR_MAX_ERR(_err) (((((_err)[1] & 0x7f) << 8) | (_err)[0]))
+#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\
+ uint16 _val2; \
+ uint16 _not_cont; \
+ _val2 = (((_val) & DOT11_FTM_ERR_MAX_ERR_MASK) << DOT11_FTM_ERR_MAX_ERR_SHIFT); \
+ _val2 = (_val2 > 0x3fff) ? 0 : _val2; /* not expecting > 16ns error */ \
+ _not_cont = DOT11_FTM_ERR_NOT_CONT(_err); \
+ (_err)[0] = _val2 & 0xff; \
+ (_err)[1] = (_val2 >> 8) & 0xff; \
+ DOT11_FTM_ERR_SET_NOT_CONT(_err, _not_cont); \
+} while (0)
+
+#if defined(DOT11_FTM_ERR_ROM_COMPAT)
+/* incorrect defs - here for ROM compatibiity */
+#undef DOT11_FTM_ERR_NOT_CONT_OFFSET
+#undef DOT11_FTM_ERR_NOT_CONT_MASK
+#undef DOT11_FTM_ERR_NOT_CONT_SHIFT
+#undef DOT11_FTM_ERR_NOT_CONT
+#undef DOT11_FTM_ERR_SET_NOT_CONT
+
+#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0
+#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001
+#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0
+#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \
+ DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT)
+#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\
+ uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \
+ _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \
+ _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \
+ (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \
+} while (0)
+
+#undef DOT11_FTM_ERR_MAX_ERR_OFFSET
+#undef DOT11_FTM_ERR_MAX_ERR_MASK
+#undef DOT11_FTM_ERR_MAX_ERR_SHIFT
+#undef DOT11_FTM_ERR_MAX_ERR
+#undef DOT11_FTM_ERR_SET_MAX_ERR
+
+#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0
+#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7
+#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1
+#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1)
+#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\
+ uint16 _val2; \
+ _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\
+ ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \
+ (_err)[0] = _val2 & 0xff; \
+ (_err)[1] = _val2 >> 8 & 0xff; \
+} while (0)
+#endif /* DOT11_FTM_ERR_ROM_COMPAT */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_params {
+ uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */
+ uint8 len;
+ uint8 info[9];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_ftm_params dot11_ftm_params_t;
+#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2)
+
+/* common part for both TB and NTB */
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_ranging_params {
+ uint8 id; /* 255 */
+ uint8 len;
+ uint8 ext_id; /* DOT11_MNG_FTM_RANGING_EXT_ID */
+ uint8 info[6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_ranging_params dot11_ftm_ranging_params_t;
+#define DOT11_FTM_CMN_RANGING_PARAMS_IE_LEN (sizeof(dot11_ftm_ranging_params_t) - TLV_EXT_HDR_LEN)
+
+/* FTM NTB specific */
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_ntb_params {
+ uint8 id; /* DOT11_FTM_NTB_SUB_ELT_ID */
+ uint8 len;
+ uint8 info[6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_ntb_params dot11_ftm_ntb_params_t;
+
+#define DOT11_FTM_NTB_PARAMS_SUB_IE_LEN (sizeof(dot11_ftm_ntb_params_t))
+#define DOT11_FTM_NTB_PARAMS_IE_LEN DOT11_FTM_CMN_RANGING_PARAMS_IE_LEN + \
+ DOT11_FTM_NTB_PARAMS_SUB_IE_LEN
+
+/* FTM TB specific */
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_tb_params {
+ uint8 id; /* DOT11_FTM_TB_SUB_ELT_ID */
+ uint8 len;
+ uint8 info[1]; /* variable length, minimum 1 */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_ftm_tb_params dot11_ftm_tb_params_t;
+#define DOT11_FTM_TB_PARAMS_IE_LEN sizeof(dot11_ftm_tb_params_t)
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_sec_ltf_params {
+ uint8 id; /* 255 */
+ uint8 len;
+ uint8 ext_id; /* DOT11_MNG_FTM_SECURE_LTF_EXT_ID */
+ uint8 info[11];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_sec_ltf_params dot11_ftm_sec_ltf_params_t;
+#define DOT11_FTM_SEC_LTF_PARAMS_IE_LEN (sizeof(dot11_ftm_sec_ltf_params_t) - 3)
+
+#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift))
+#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\
+ uint8 _ptmp = (_p)->info[_off] & ~(_mask); \
+ (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \
+} while (0)
+
+#define FTM_PARAMS_STATUS_OFFSET 0
+#define FTM_PARAMS_STATUS_MASK 0x03
+#define FTM_PARAMS_STATUS_SHIFT 0
+#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \
+ FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT)
+#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status)
+
+#define FTM_PARAMS_VALUE_OFFSET 0
+#define FTM_PARAMS_VALUE_MASK 0x7c
+#define FTM_PARAMS_VALUE_SHIFT 2
+#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \
+ FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT)
+#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value)
+#define FTM_PARAMS_MAX_VALUE 32
+
+#define FTM_PARAMS_NBURSTEXP_OFFSET 1
+#define FTM_PARAMS_NBURSTEXP_MASK 0x0f
+#define FTM_PARAMS_NBURSTEXP_SHIFT 0
+#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \
+ FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT)
+#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \
+ _bexp)
+
+#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p))
+
+enum {
+ FTM_PARAMS_NBURSTEXP_NOPREF = 15
+};
+
+enum {
+ FTM_PARAMS_BURSTTMO_NOPREF = 15
+};
+
+#define FTM_PARAMS_BURSTTMO_OFFSET 1
+#define FTM_PARAMS_BURSTTMO_MASK 0xf0
+#define FTM_PARAMS_BURSTTMO_SHIFT 4
+#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \
+ FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT)
+/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */
+#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2)
+
+#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250)
+#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \
+ (_val) == FTM_PARAMS_BURSTTMO_NOPREF)
+#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */
+#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */
+
+#define FTM_PARAMS_MINDELTA_OFFSET 2
+#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100)
+#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \
+ (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \
+} while (0)
+
+enum {
+ FTM_PARAMS_MINDELTA_NOPREF = 0
+};
+
+#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3])
+#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \
+ (_p)->info[3] = (_partial_tsf) & 0xff; \
+ (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \
+} while (0)
+
+#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL
+#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10
+#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16
+#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff
+
+/* FTM can indicate upto 62k TUs forward and 1k TU backward */
+#define FTM_PARAMS_TSF_FW_HI (63487 << 10) /* in micro sec */
+#define FTM_PARAMS_TSF_BW_LOW (64512 << 10) /* in micro sec */
+#define FTM_PARAMS_TSF_BW_HI (65535 << 10) /* in micro sec */
+#define FTM_PARAMS_TSF_FW_MAX FTM_PARAMS_TSF_FW_HI
+#define FTM_PARAMS_TSF_BW_MAX (FTM_PARAMS_TSF_BW_HI - FTM_PARAMS_TSF_BW_LOW)
+
+#define FTM_PARAMS_PTSFNOPREF_OFFSET 5
+#define FTM_PARAMS_PTSFNOPREF_MASK 0x1
+#define FTM_PARAMS_PTSFNOPREF_SHIFT 0
+#define FTM_PARAMS_PTSFNOPREF(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_PTSFNOPREF_OFFSET, \
+ FTM_PARAMS_PTSFNOPREF_MASK, FTM_PARAMS_PTSFNOPREF_SHIFT)
+#define FTM_PARAMS_SET_PTSFNOPREF(_p, _nopref) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_PTSFNOPREF_OFFSET, FTM_PARAMS_PTSFNOPREF_MASK, \
+ FTM_PARAMS_PTSFNOPREF_SHIFT, _nopref)
+
+#define FTM_PARAMS_ASAP_OFFSET 5
+#define FTM_PARAMS_ASAP_MASK 0x4
+#define FTM_PARAMS_ASAP_SHIFT 2
+#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \
+ FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT)
+#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap)
+
+/* FTM1 - AKA ASAP Capable */
+#define FTM_PARAMS_FTM1_OFFSET 5
+#define FTM_PARAMS_FTM1_MASK 0x02
+#define FTM_PARAMS_FTM1_SHIFT 1
+#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \
+ FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT)
+#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1)
+
+#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5
+#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8
+#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3
+#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \
+ FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT)
+#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \
+ FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms)
+
+enum {
+ FTM_PARAMS_FTMS_PER_BURST_NOPREF = 0
+};
+
+#define FTM_PARAMS_CHAN_INFO_OFFSET 6
+#define FTM_PARAMS_CHAN_INFO_MASK 0xfc
+#define FTM_PARAMS_CHAN_INFO_SHIFT 2
+#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \
+ FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT)
+#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci)
+
+/* burst period - units of 100ms */
+#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7])
+#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\
+ (_p)->info[7] = (_bp) & 0xff; \
+ (_p)->info[8] = ((_bp) >> 8) & 0xff; \
+} while (0)
+
+#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100)
+
+enum {
+ FTM_PARAMS_BURST_PERIOD_NOPREF = 0
+};
+
+/* FTM status values - last updated from 11mcD4.0 */
+enum {
+ FTM_PARAMS_STATUS_RESERVED = 0,
+ FTM_PARAMS_STATUS_SUCCESSFUL = 1,
+ FTM_PARAMS_STATUS_INCAPABLE = 2,
+ FTM_PARAMS_STATUS_FAILED = 3,
+ /* Below are obsolte */
+ FTM_PARAMS_STATUS_OVERRIDDEN = 4,
+ FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5,
+ FTM_PARAMS_STATUS_ASAP_FAILED = 6,
+ /* rest are reserved */
+};
+
+enum {
+ FTM_PARAMS_CHAN_INFO_NO_PREF = 0,
+ FTM_PARAMS_CHAN_INFO_RESERVE1 = 1,
+ FTM_PARAMS_CHAN_INFO_RESERVE2 = 2,
+ FTM_PARAMS_CHAN_INFO_RESERVE3 = 3,
+ FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4,
+ FTM_PARAMS_CHAN_INFO_RESERVE5 = 5,
+ FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6,
+ FTM_PARAMS_CHAN_INFO_RESERVE7 = 7,
+ FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */
+ FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9,
+ FTM_PARAMS_CHAN_INFO_VHT_20 = 10,
+ FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11,
+ FTM_PARAMS_CHAN_INFO_VHT_40 = 12,
+ FTM_PARAMS_CHAN_INFO_VHT_80 = 13,
+ FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14,
+ FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15,
+ FTM_PARAMS_CHAN_INFO_VHT_160 = 16,
+ /* Reserved from 17 - 30 */
+ FTM_PARAMS_CHAN_INFO_DMG_2160 = 31,
+ /* Reserved from 32 - 63 */
+ FTM_PARAMS_CHAN_INFO_MAX = 63
+};
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT ftm_vs_tlv_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie {
+ uint8 id; /* DOT11_MNG_VS_ID */
+ uint8 len; /* length following */
+ uint8 oui[3]; /* BRCM_PROP_OUI (or Customer) */
+ uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */
+ uint8 version;
+ ftm_vs_tlv_t tlvs[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t;
+
+/* same as payload of dot11_ftm_vs_ie.
+* This definition helps in having struct access
+* of pay load while building FTM VS IE from other modules(NAN)
+*/
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie_pyld {
+ uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */
+ uint8 version;
+ ftm_vs_tlv_t tlvs[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_vs_ie_pyld dot11_ftm_vs_ie_pyld_t;
+
+/* ftm vs api version */
+#define BCM_FTM_VS_PARAMS_VERSION 0x01
+
+/* ftm vendor specific information tlv types */
+enum {
+ FTM_VS_TLV_NONE = 0,
+ FTM_VS_TLV_REQ_PARAMS = 1, /* additional request params (in FTM_REQ) */
+ FTM_VS_TLV_MEAS_INFO = 2, /* measurement information (in FTM_MEAS) */
+ FTM_VS_TLV_SEC_PARAMS = 3, /* security parameters (in either) */
+ FTM_VS_TLV_SEQ_PARAMS = 4, /* toast parameters (FTM_REQ, BRCM proprietary) */
+ FTM_VS_TLV_MF_BUF = 5, /* multi frame buffer - may span ftm vs ie's */
+ FTM_VS_TLV_TIMING_PARAMS = 6, /* timing adjustments */
+ FTM_VS_TLV_MF_STATS_BUF = 7 /* multi frame statistics buffer */
+ /* add additional types above */
+};
+
+/* the following definitions are *DEPRECATED* and moved to implementation files. They
+ * are retained here because previous (May 2016) some branches use them
+ */
+#define FTM_TPK_LEN 16u
+#define FTM_RI_RR_BUF_LEN 32u
+#define FTM_TPK_RI_RR_LEN 13
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0 28
+#define FTM_TPK_RI_PHY_LEN 7u
+#define FTM_TPK_RR_PHY_LEN 7u
+#define FTM_TPK_DATA_BUFFER_LEN 88u
+#define FTM_TPK_LEN_SECURE_2_0 64u
+#define FTM_TPK_RI_PHY_LEN_SECURE_2_0 14u
+#define FTM_TPK_RR_PHY_LEN_SECURE_2_0 14u
+
+#define FTM_RI_RR_BUF_LEN_20MHZ 32u
+#define FTM_RI_RR_BUF_LEN_80MHZ 64u
+
+#define FTM_RI_RR_BUF_LEN_FROM_CHANSPEC(chanspec) \
+ (CHSPEC_IS20((chanspec)) ? \
+ FTM_RI_RR_BUF_LEN_20MHZ : FTM_RI_RR_BUF_LEN_80MHZ)
+
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0_20MHZ 28u
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0_80MHZ 62u
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0_2G FTM_TPK_RI_RR_LEN_SECURE_2_0
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0_5G FTM_TPK_RI_RR_LEN_SECURE_2_0_80MHZ
+
+#define FTM_TPK_RI_RR_LEN_FROM_CHANSPEC(chanspec) \
+ (CHSPEC_IS20((chanspec)) ? FTM_TPK_RI_RR_LEN_SECURE_2_0_20MHZ : \
+ FTM_TPK_RI_RR_LEN_SECURE_2_0_80MHZ)
+
+#define FTM_TPK_RI_PHY_LEN_SECURE_2_0_20MHZ 14u
+#define FTM_TPK_RI_PHY_LEN_SECURE_2_0_80MHZ 31u
+#define FTM_TPK_RR_PHY_LEN_SECURE_2_0_80MHZ 31u
+
+#define FTM_TPK_RI_PHY_LEN_FROM_CHANSPEC(chanspec) \
+ (CHSPEC_IS20((chanspec)) ? FTM_TPK_RI_PHY_LEN_SECURE_2_0_20MHZ : \
+ FTM_TPK_RI_PHY_LEN_SECURE_2_0_80MHZ)
+
+#define FTM_TPK_RR_PHY_LEN_SECURE_2_0_20MHZ 14u
+
+#define FTM_TPK_RR_PHY_LEN_FROM_CHANSPEC(chanspec) \
+ (CHSPEC_IS20((chanspec)) ? FTM_TPK_RR_PHY_LEN_SECURE_2_0_20MHZ : \
+ FTM_TPK_RR_PHY_LEN_SECURE_2_0_80MHZ)
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_params {
+ uint8 id; /* DOT11_MNG_VS_ID */
+ uint8 len;
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 bcm_vs_id;
+ ftm_vs_tlv_t ftm_tpk_ri_rr[1]; /* ftm_TPK_ri_rr place holder */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_vs_params dot11_ftm_vs_tpk_ri_rr_params_t;
+#define DOT11_FTM_VS_LEN (sizeof(dot11_ftm_vs_tpk_ri_rr_params_t) - TLV_HDR_LEN)
+/* end *DEPRECATED* ftm definitions */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_sync_info {
+ uint8 id; /* Extended - 255 11mc D4.3 */
+ uint8 len;
+ uint8 id_ext;
+ uint8 tsf_sync_info[4];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_sync_info dot11_ftm_sync_info_t;
+
+/* ftm tsf sync info ie len - includes id ext */
+#define DOT11_FTM_SYNC_INFO_IE_LEN (sizeof(dot11_ftm_sync_info_t) - TLV_HDR_LEN)
+
+#define DOT11_FTM_IS_SYNC_INFO_IE(_ie) (\
+ DOT11_MNG_IE_ID_EXT_MATCH(_ie, DOT11_MNG_FTM_SYNC_INFO) && \
+ (_ie)->len == DOT11_FTM_SYNC_INFO_IE_LEN)
+
+BWL_PRE_PACKED_STRUCT struct dot11_dh_param_ie {
+ uint8 id; /* OWE */
+ uint8 len;
+ uint8 ext_id; /* EXT_MNG_OWE_DH_PARAM_ID */
+ uint16 group;
+ uint8 pub_key[0];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dh_param_ie dot11_dh_param_ie_t;
+
+#define DOT11_DH_EXTID_OFFSET (OFFSETOF(dot11_dh_param_ie_t, ext_id))
+
+#define DOT11_OWE_DH_PARAM_IE(_ie) (\
+ DOT11_MNG_IE_ID_EXT_MATCH(_ie, EXT_MNG_OWE_DH_PARAM_ID))
+
+#define DOT11_MNG_OWE_IE_ID_EXT_INIT(_ie, _id, _len) do {\
+ (_ie)->id = DOT11_MNG_ID_EXT_ID; \
+ (_ie)->len = _len; \
+ (_ie)->ext_id = _id; \
+} while (0)
+
+/* 802.11u interworking access network options */
+#define IW_ANT_MASK 0x0f
+#define IW_INTERNET_MASK 0x10
+#define IW_ASRA_MASK 0x20
+#define IW_ESR_MASK 0x40
+#define IW_UESA_MASK 0x80
+
+/* 802.11u interworking access network type */
+#define IW_ANT_PRIVATE_NETWORK 0
+#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1
+#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2
+#define IW_ANT_FREE_PUBLIC_NETWORK 3
+#define IW_ANT_PERSONAL_DEVICE_NETWORK 4
+#define IW_ANT_EMERGENCY_SERVICES_NETWORK 5
+#define IW_ANT_TEST_NETWORK 14
+#define IW_ANT_WILDCARD_NETWORK 15
+
+#define IW_ANT_LEN 1
+#define IW_VENUE_LEN 2
+#define IW_HESSID_LEN 6
+#define IW_HESSID_OFF (IW_ANT_LEN + IW_VENUE_LEN)
+#define IW_MAX_LEN (IW_ANT_LEN + IW_VENUE_LEN + IW_HESSID_LEN)
+
+/* 802.11u advertisement protocol */
+#define ADVP_ANQP_PROTOCOL_ID 0
+#define ADVP_MIH_PROTOCOL_ID 1
+
+/* 802.11u advertisement protocol masks */
+#define ADVP_QRL_MASK 0x7f
+#define ADVP_PAME_BI_MASK 0x80
+
+/* 802.11u advertisement protocol values */
+#define ADVP_QRL_REQUEST 0x00
+#define ADVP_QRL_RESPONSE 0x7f
+#define ADVP_PAME_BI_DEPENDENT 0x00
+#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK
+
+/* 802.11u ANQP information ID */
+#define ANQP_ID_QUERY_LIST 256
+#define ANQP_ID_CAPABILITY_LIST 257
+#define ANQP_ID_VENUE_NAME_INFO 258
+#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259
+#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260
+#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261
+#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262
+#define ANQP_ID_NAI_REALM_LIST 263
+#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264
+#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265
+#define ANQP_ID_AP_CIVIC_LOCATION 266
+#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267
+#define ANQP_ID_DOMAIN_NAME_LIST 268
+#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269
+#define ANQP_ID_EMERGENCY_NAI 271
+#define ANQP_ID_NEIGHBOR_REPORT 272
+#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797
+
+/* 802.11u ANQP ID len */
+#define ANQP_INFORMATION_ID_LEN 2
+
+/* 802.11u ANQP OUI */
+#define ANQP_OUI_SUBTYPE 9
+
+/* 802.11u venue name */
+#define VENUE_LANGUAGE_CODE_SIZE 3
+#define VENUE_NAME_SIZE 255
+
+/* 802.11u venue groups */
+#define VENUE_UNSPECIFIED 0
+#define VENUE_ASSEMBLY 1
+#define VENUE_BUSINESS 2
+#define VENUE_EDUCATIONAL 3
+#define VENUE_FACTORY 4
+#define VENUE_INSTITUTIONAL 5
+#define VENUE_MERCANTILE 6
+#define VENUE_RESIDENTIAL 7
+#define VENUE_STORAGE 8
+#define VENUE_UTILITY 9
+#define VENUE_VEHICULAR 10
+#define VENUE_OUTDOOR 11
+
+/* 802.11u network authentication type indicator */
+#define NATI_UNSPECIFIED -1
+#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0
+#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1
+#define NATI_HTTP_HTTPS_REDIRECTION 2
+#define NATI_DNS_REDIRECTION 3
+
+/* 802.11u IP address type availability - IPv6 */
+#define IPA_IPV6_SHIFT 0
+#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT)
+#define IPA_IPV6_NOT_AVAILABLE 0x00
+#define IPA_IPV6_AVAILABLE 0x01
+#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02
+
+/* 802.11u IP address type availability - IPv4 */
+#define IPA_IPV4_SHIFT 2
+#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT)
+#define IPA_IPV4_NOT_AVAILABLE 0x00
+#define IPA_IPV4_PUBLIC 0x01
+#define IPA_IPV4_PORT_RESTRICT 0x02
+#define IPA_IPV4_SINGLE_NAT 0x03
+#define IPA_IPV4_DOUBLE_NAT 0x04
+#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05
+#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06
+#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07
+
+/* 802.11u NAI realm encoding */
+#define REALM_ENCODING_RFC4282 0
+#define REALM_ENCODING_UTF8 1
+
+/* 802.11u IANA EAP method type numbers */
+#define REALM_EAP_TLS 13
+#define REALM_EAP_LEAP 17
+#define REALM_EAP_SIM 18
+#define REALM_EAP_TTLS 21
+#define REALM_EAP_AKA 23
+#define REALM_EAP_PEAP 25
+#define REALM_EAP_FAST 43
+#define REALM_EAP_PSK 47
+#define REALM_EAP_AKAP 50
+#define REALM_EAP_EXPANDED 254
+
+/* 802.11u authentication ID */
+#define REALM_EXPANDED_EAP 1
+#define REALM_NON_EAP_INNER_AUTHENTICATION 2
+#define REALM_INNER_AUTHENTICATION_EAP 3
+#define REALM_EXPANDED_INNER_EAP 4
+#define REALM_CREDENTIAL 5
+#define REALM_TUNNELED_EAP_CREDENTIAL 6
+#define REALM_VENDOR_SPECIFIC_EAP 221
+
+/* 802.11u non-EAP inner authentication type */
+#define REALM_RESERVED_AUTH 0
+#define REALM_PAP 1
+#define REALM_CHAP 2
+#define REALM_MSCHAP 3
+#define REALM_MSCHAPV2 4
+
+/* 802.11u credential type */
+#define REALM_SIM 1
+#define REALM_USIM 2
+#define REALM_NFC 3
+#define REALM_HARDWARE_TOKEN 4
+#define REALM_SOFTOKEN 5
+#define REALM_CERTIFICATE 6
+#define REALM_USERNAME_PASSWORD 7
+#define REALM_SERVER_SIDE 8
+#define REALM_RESERVED_CRED 9
+#define REALM_VENDOR_SPECIFIC_CRED 10
+
+/* 802.11u 3GPP PLMN */
+#define G3PP_GUD_VERSION 0
+#define G3PP_PLMN_LIST_IE 0
+
+/* AP Location Public ID Info encoding */
+#define PUBLIC_ID_URI_FQDN_SE_ID 0
+/* URI/FQDN Descriptor field values */
+#define LOCATION_ENCODING_HELD 1
+#define LOCATION_ENCODING_SUPL 2
+#define URI_FQDN_SIZE 255
+
+/** hotspot2.0 indication element (vendor specific) */
+BWL_PRE_PACKED_STRUCT struct hs20_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 config;
+} BWL_POST_PACKED_STRUCT;
+typedef struct hs20_ie hs20_ie_t;
+#define HS20_IE_LEN 5 /* HS20 IE length */
+
+/* Short SSID list Extended Capabilities element */
+BWL_PRE_PACKED_STRUCT struct short_ssid_list_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 data[1]; /* Capabilities Information */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct short_ssid_list_ie short_ssid_list_ie_t;
+#define SHORT_SSID_LIST_IE_FIXED_LEN 3 /* SHORT SSID LIST IE LENGTH */
+
+/** IEEE 802.11 Annex E */
+typedef enum {
+ DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */
+ DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */
+ DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */
+ DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */
+ DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */
+ DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */
+ DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */
+ DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */
+ DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */
+ DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */
+ DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */
+ DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */
+ DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */
+ DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */
+ DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */
+ DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */
+} dot11_op_class_t;
+
+/* QoS map */
+#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */
+
+/* BCM proprietary IE type for AIBSS */
+#define BCM_AIBSS_IE_TYPE 56
+
+/* BCM proprietary flag type for WL_DISCO_VSIE */
+#define SSE_OUI "\x00\x00\xF0"
+#define VENDOR_ENTERPRISE_STA_OUI_TYPE 0x22
+#define MAX_VSIE_DISASSOC (1)
+#define DISCO_VSIE_LEN 0x09u
+
+/* Single PMK IE */
+#define CCX_SPMK_TYPE 3 /* CCX Extended Cap IE type for SPMK */
+/* CCX Extended Capability IE */
+BWL_PRE_PACKED_STRUCT struct ccx_spmk_cap_ie {
+ uint8 id; /* 221, DOT11_MNG_PROPR_ID */
+ uint8 len;
+ uint8 oui[DOT11_OUI_LEN]; /* 00:40:96, CISCO_AIRONET_OUI */
+ uint8 type; /* 11 */
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ccx_spmk_cap_ie ccx_spmk_cap_ie_t;
+
+/* OWE definitions */
+/* ID + len + OUI + OI type + BSSID + SSID_len */
+#define OWE_TRANS_MODE_IE_FIXED_LEN 13u
+
+/* Supported Operating Classes element */
+BWL_PRE_PACKED_STRUCT struct supp_op_classes_ie {
+ uint8 id;
+ uint8 len;
+ uint8 cur_op_class;
+ uint8 op_classes[]; /* Supported Operating Classes */
+} BWL_POST_PACKED_STRUCT;
+typedef struct supp_op_classes_ie supp_op_classes_ie_t;
+
+/* Transition mode (bit number) */
+#define TRANSISION_MODE_WPA3_PSK 0u
+#define TRANSITION_MODE_SAE_PK 1u
+#define TRANSITION_MODE_WPA3_ENTERPRISE 2u
+#define TRANSITION_MODE_ENHANCED_OPEN 3u
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11_H_ */
diff --git a/bcmdhd.101.10.361.x/include/802.11ah.h b/bcmdhd.101.10.361.x/include/802.11ah.h
new file mode 100755
index 0000000..637284b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.11ah.h
@@ -0,0 +1,281 @@
+/*
+ * Basic types and constants relating to 802.11ah standard.
+ * This is a portion of 802.11ah definition. The rest are in 802.11.h.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ */
+
+#ifndef _802_11ah_h_
+#define _802_11ah_h_
+
+#include <typedefs.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/**
+ * TWT IE (sec 9.4.2.200)
+ */
+
+/* TWT element - top (Figure 9-589av) */
+BWL_PRE_PACKED_STRUCT struct twt_ie_top {
+ uint8 id;
+ uint8 len;
+ uint8 ctrl; /* Control */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct twt_ie_top twt_ie_top_t;
+
+/* S1G Action IDs */
+#define S1G_ACTION_TWT_SETUP 6u
+#define S1G_ACTION_TWT_TEARDOWN 7u
+#define S1G_ACTION_TWT_INFO 11u
+
+/* S1G Action frame offsets */
+#define S1G_AF_CAT_OFF 0u
+#define S1G_AF_ACT_OFF 1u
+
+/* TWT Setup */
+#define S1G_AF_TWT_SETUP_TOKEN_OFF 2u
+#define S1G_AF_TWT_SETUP_TWT_IE_OFF 3u
+
+/* TWT Teardown */
+#define S1G_AF_TWT_TEARDOWN_FLOW_OFF 2u
+
+/* TWT Information */
+#define S1G_AF_TWT_INFO_OFF 2u
+
+#define TWT_BCAST_WAKE_TIME_OFFSET 10u
+#define TWT_BCAST_WAKE_TIME_SHIFT 10u
+#define TWT_BCAST_WAKE_TIME_MASK 0x03FFFC00u
+#define TWT_BCAST_WAKE_TIME_ZERO_BIT_SZ 10u
+
+/* Control field (Figure 9-589aw) */
+#define TWT_CTRL_NDP_PAGING_IND 0x01u /* NDP Paging Indication */
+#define TWT_CTRL_RESP_PM_MODE 0x02u /* Respondor PM Mode */
+#define TWT_CTRL_NEGO_TYPE_IDX 2u
+#define TWT_CTRL_NEGO_TYPE_MASK 0x0Cu /* TWT Negotiation Type */
+#define TWT_CTRL_NEGO_TYPE_SHIFT 2u
+#define TWT_CTRL_INFO_FRM_DISABLED 0x10u /* TWT info frame disabled */
+#define TWT_CTRL_WAKEDUR_UNIT 0x20u /* Wake duration unit */
+
+/* TWT Negotiation Type (Table 9-262j1) */
+typedef enum twt_ctrl_nego_type {
+ TWT_CTRL_NEGO_TYPE_0 = 0, /* Individual TWT Setup */
+ TWT_CTRL_NEGO_TYPE_1 = 1, /* Wake TBTT Negotiation */
+ TWT_CTRL_NEGO_TYPE_2 = 2, /* Broadcast TWT IE in Beacon */
+ TWT_CTRL_NEGO_TYPE_3 = 3, /* Broadcast TWT memberships */
+} twt_ctrl_nego_type_t;
+
+/* Request Type field (Figure 9-589ay) */
+#define TWT_REQ_TYPE_REQUEST 0x0001u /* Request */
+#define TWT_REQ_TYPE_SETUP_CMD_MASK 0x000eu /* Setup Command */
+#define TWT_REQ_TYPE_SETUP_CMD_SHIFT 1u
+#define TWT_REQ_TYPE_TRIGGER 0x0010u /* Trigger */
+#define TWT_REQ_TYPE_IMPLICIT 0x0020u /* Implicit */
+#define TWT_REQ_TYPE_LAST_BCAST_PARAM 0x0020u /* Last Broadcast Parameter Set */
+#define TWT_REQ_TYPE_FLOW_TYPE 0x0040u /* Flow Type */
+#define TWT_REQ_TYPE_FLOW_ID_MASK 0x0380u /* Flow Identifier */
+#define TWT_REQ_TYPE_FLOW_ID_SHIFT 7u
+#define TWT_REQ_TYPE_BTWT_RECOMM_MASK 0x0380u /* Broadcast TWT Recommendation */
+#define TWT_REQ_TYPE_BTWT_RECOMM_SHIFT 7u
+#define TWT_REQ_TYPE_WAKE_EXP_MASK 0x7c00u /* Wake Interval Exponent */
+#define TWT_REQ_TYPE_WAKE_EXP_SHIFT 10u
+#define TWT_REQ_TYPE_PROTECTION 0x8000u /* Protection */
+
+/* Setup Command field (Table 9-262k) */
+#define TWT_SETUP_CMD_REQUEST_TWT 0u /* Request TWT */
+#define TWT_SETUP_CMD_SUGGEST_TWT 1u /* Suggest TWT */
+#define TWT_SETUP_CMD_DEMAND_TWT 2u /* Demand TWT */
+#define TWT_SETUP_CMD_GROUPING_TWT 3u /* Grouping TWT */
+#define TWT_SETUP_CMD_ACCEPT_TWT 4u /* Accept TWT */
+#define TWT_SETUP_CMD_ALTERNATE_TWT 5u /* Alternate TWT */
+#define TWT_SETUP_CMD_DICTATE_TWT 6u /* Dictate TWT */
+#define TWT_SETUP_CMD_REJECT_TWT 7u /* Reject TWT */
+
+/* Broadcast TWT Recommendation field (Table 9-262k1) */
+#define TWT_BCAST_FRAME_RECOMM_0 0u /* No constrains on frames in Broadcast TWT SP */
+#define TWT_BCAST_FRAME_RECOMM_1 1u /* Do not contain RUs for random access */
+#define TWT_BCAST_FRAME_RECOMM_2 2u /* Can contain RUs for random access */
+#define TWT_BCAST_FRAME_RECOMM_3 3u
+
+/* Request Type subfield - 2 octets */
+typedef uint16 twt_request_type_t; /* 16 bit request type */
+
+/* Target Wake Time - 8 octets or 0 octet */
+typedef uint64 twt_target_wake_time_t; /* 64 bit TSF time of TWT Responding STA */
+typedef uint16 twt_bcast_wake_time_t; /* 16 bit Wake Time of Bcast scheduling STA */
+typedef uint16 twt_bcast_twt_info_t; /* 16 bit Broadcast TWT Info subfield */
+
+/* TWT Group Assignment Info - 9 octets (long format) or 3 octets (short format) or 0 octet */
+/* Group Assignment Info field - short format - Zero Offset Preset field is 0 */
+BWL_PRE_PACKED_STRUCT struct twt_grp_short {
+ uint8 grpid_n_0off; /* Group ID and Zero Offset Present */
+ uint16 unit_n_off; /* TWT Unit and TWT Offset */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct twt_grp_short twt_grp_short_t;
+
+/* Group Assignment Info field - long format - Zero Offset Preset field is 1 */
+#define TWT_ZERO_OFF_GRP_LEN 6u
+BWL_PRE_PACKED_STRUCT struct twt_grp_long {
+ uint8 grpid_n_0off; /* Group ID and Zero Offset Present */
+ uint8 grp_0off[TWT_ZERO_OFF_GRP_LEN]; /* Zero Offset of Group */
+ uint16 unit_n_off; /* Unit and Offset */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct twt_grp_long twt_grp_long_t;
+
+/* TWT Unit and TWT Offset field */
+#define TWT_UNIT_MASK 0x000fu /* TWT Unit */
+#define TWT_OFFSET_MASK 0xfff0u /* TWT Offset */
+#define TWT_OFFSET_SHIFT 4u
+
+/* TWT Unit field (table 8-248m) */
+#define TWT_UNIT_32us 0u
+#define TWT_UNIT_256us 1u
+#define TWT_UNIT_1024us 2u
+#define TWT_UNIT_8ms192us 3u
+#define TWT_UNIT_32ms768us 4u
+#define TWT_UNIT_262ms144us 5u
+#define TWT_UNIT_1s048576us 6u
+#define TWT_UNIT_8s388608us 7u
+#define TWT_UNIT_33s554432us 8u
+#define TWT_UNIT_268s435456us 9u
+#define TWT_UNIT_1073s741824us 10u
+#define TWT_UNIT_8589s934592us 11u
+
+/* TWT element - bottom */
+BWL_PRE_PACKED_STRUCT struct twt_ie_itwt_bottom {
+ uint8 nom_wake_dur; /* Nominal Minimum Wake Duration */
+ uint16 wake_int_mant; /* TWT Wake Interval Mantissa */
+ uint8 channel; /* TWT Channel */
+ /* NDP Paging field */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct twt_ie_itwt_bottom twt_ie_itwt_bottom_t;
+
+/* TWT element - bottom */
+BWL_PRE_PACKED_STRUCT struct twt_ie_btwt_bottom {
+ uint8 nom_wake_dur; /* Nominal Minimum Wake Duration */
+ uint16 wake_int_mant; /* TWT Wake Interval Mantissa */
+ twt_bcast_twt_info_t btwt_info; /* Broadcast TWT Info */
+ /* NDP Paging field */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct twt_ie_btwt_bottom twt_ie_btwt_bottom_t;
+
+/* TWT IE structure for broadcast TWT */
+typedef struct twt_last_bcast_ie {
+ twt_ie_top_t top; /* Element id, len, control fields */
+ twt_request_type_t req_type; /* request type field */
+ twt_bcast_wake_time_t twt; /* twt field */
+ twt_ie_btwt_bottom_t btwt_bottom; /* wake dur, int, BID Info */
+} twt_last_bcast_ie_t;
+
+/* Nominal Minimum Wake Duration */
+#define TWT_WAKE_DUR_UNIT_256us 256u /* Nom.Min. Wake Duration is in 256us units */
+#define TWT_WAKE_DUR_UNIT_1ms 1024u /* Nom. Min. Wake Duration is in 1ms units */
+
+/* to be deprecated */
+#define TWT_NOM_WAKE_DUR_UNIT 256u /* Nominal Minimum Wake Duration is in 256us units */
+
+/* TWT IE field lengths */
+#define TWT_IE_NOM_MIN_TWT_WK_DUR_SZ 1u /* 1 byte */
+#define TWT_IE_TWT_WAKE_INT_MANT_SZ 2u /* 2 bytes */
+#define TWT_IE_BCAST_TWT_INFO_SZ 2u /* 2 byte */
+#define TWT_IE_TWT_CHANNEL_SZ 1u /* 1 byte */
+
+/* Broadcast TWT info subfield format (figure 9-589ay1) */
+#define TWT_BTWT_PERSIST_EXPO_MASK 0x0007u /* Broadcast TWT Persistence Exponent */
+#define TWT_BCAST_TWT_ID_MASK 0x00F8u /* Broadcast TWT ID */
+#define TWT_BCAST_TWT_ID_SHIFT 3u
+#define TWT_BTWT_PERSIST_MANT_MASK 0xFF00u /* Broadcast TWT Persistence Mantissa */
+#define TWT_BTWT_PERSIST_MANT_SHIFT 8u
+
+#define TWT_BTWT_PERSIST_INDEFINITE 0xFFu
+
+/* NDP Paging field - 4 octets or 0 octet */
+typedef uint32 twt_ndp_paging_t;
+
+#define TWT_NDP_PAGING_PID 0x000001ffu /* P-ID */
+#define TWT_NDP_PAGING_MAX_PERIOD 0x0001fe00u /* Max NDP Paging Period */
+#define TWT_NDP_PAGING_PART_TSF_OFF 0x001e0000u /* Partial TSF Offset */
+#define TWT_NDP_PAGING_ACTION 0x00e00000u /* Action */
+#define TWT_NDP_PAGING_MIN_SLEEP 0x3f000000u /* Min Sleep Duration */
+
+/* Action field (table 8-248n) */
+#define TWT_ACTION_SEND_PSP_TRIG 0u /* Send a PS-Poll or uplink trigger frame */
+#define TWT_ACTION_WAKE_MIN_SLEEP 1u /* Wake up at the time indicated by
+ * Min Sleep Duration
+ */
+#define TWT_ACTION_WAKE_RCV_BCN 2u /* Wake up to receive the Beacon */
+#define TWT_ACTION_WAKE_RCV_DTIM 3u /* Wake up to receive the DTIM Beacon */
+#define TWT_ACTION_WAKE_IND_TIME 4u /* Wakeup at the time indicated by the sum of
+ * the Min Sleep Duration field and the ASD subfield
+ * in the APDI field of the NDP Paging frame
+ */
+
+/* TWT Teardown for Negotiation type 0 or 1 */
+#define TWT_TEARDOWN_FLOW_ID_MASK 0x07u
+/* TWT Teardown for Negotiation type 3 */
+#define TWT_TEARDOWN_BTWT_ID_MASK 0x1Fu
+
+#define TWT_TEARDOWN_NEGO_TYPE_MASK 0x60u
+#define TWT_TEARDOWN_NEGO_TYPE_SHIFT 5u
+/* Teardown All TWT indication */
+#define TWT_TEARDOWN_ALL_TWT 0x80u
+
+/* TWT Information field byte 0 */
+#define TWT_INFO_FLOW_ID_MASK 0x07u
+#define TWT_INFO_RESP_REQ 0x08u
+#define TWT_INFO_NEXT_TWT_REQ 0x10u
+#define TWT_INFO_NEXT_TWT_SIZE_MASK 0x60u
+#define TWT_INFO_NEXT_TWT_SIZE_SHIFT 0x5u
+#define TWT_INFO_ALL_TWT 0x80u
+
+/* Next TWT Subfield Size field encoding */
+#define TWT_INFO_NEXT_TWT_SIZE_0_IDX 0u /* 0 byte */
+#define TWT_INFO_NEXT_TWT_SIZE_32_IDX 1u /* 4 bytes */
+#define TWT_INFO_NEXT_TWT_SIZE_48_IDX 2u /* 6 bytes */
+#define TWT_INFO_NEXT_TWT_SIZE_64_IDX 3u /* 8 bytes */
+
+/* Next TWT Subfield Size field */
+#define TWT_INFO_NEXT_TWT_SIZE_0 0u /* 0 byte */
+#define TWT_INFO_NEXT_TWT_SIZE_32 4u /* 4 bytes */
+#define TWT_INFO_NEXT_TWT_SIZE_48 6u /* 6 bytes */
+#define TWT_INFO_NEXT_TWT_SIZE_64 8u /* 8 bytes */
+
+/* Old macro definitions - To be removed - Start here */
+#define TWT_BCAST_MAX_VALID_FLOW_ID 3u
+#define TWT_CTRL_BCAST 0x04u /* Broadcast */
+#define TWT_CTRL_WAKE_TBTT_NEGO 0x08u /* Wake TBTT Negotiation */
+#define TWT_SETUP_CMD_GRPING_TWT 3u /* Grouping TWT */
+#define TWT_SETUP_CMD_ALTER_TWT 5u /* Alternate TWT */
+#define TWT_IE_BCAST_TWT_ID_SZ 1u /* 1 byte */
+#define TWT_INFO_BROADCAST_RESCHED 0x80u
+
+typedef struct twt_ie_itwt_bottom twt_ie_bottom_t;
+/* Old macro definitions - To be removed - End here */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11ah_h_ */
diff --git a/bcmdhd.101.10.361.x/include/802.11ax.h b/bcmdhd.101.10.361.x/include/802.11ax.h
new file mode 100755
index 0000000..49c5e48
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.11ax.h
@@ -0,0 +1,1180 @@
+/*
+ * Basic types and constants relating to 802.11ax/HE STA
+ * This is a portion of 802.11ax definition. The rest are in 802.11.h.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _802_11ax_h_
+#define _802_11ax_h_
+
+#include <typedefs.h>
+#include <802.11.h>
+#include <bcmtlv.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* HT Control Field: (Table 9-9a) */
+#define HTC_HE_VARIANT 0x03u
+#define HTC_HEVAR_SHIFT 0 /* HE VARIANT shift */
+#define HTC_HEVAR(htc) (((htc) & HTC_HE_VARIANT) >> HTC_HEVAR_SHIFT)
+
+/* HT Control IDs: (Table 9-18a & Table 9-9a) */
+#define HTC_HE_CTLID_SHIFT 0x02u /* HTC HE CTLID shift */
+#define HTC_HE_CTLID_MASK 0x0Fu /* HTC HE CTLID mask */
+#define HTC_HE_CTLID(htc) (((htc) >> HTC_HE_CTLID_SHIFT) & HTC_HE_CTLID_MASK)
+
+#define HTC_HE_CTLID_TRS 0x0u /* Triggered response scheduling */
+#define HTC_HE_CTLID_OMI 0x1u /* Operating mode */
+#define HTC_HE_CTLID_HLA 0x2u /* HE link adaptation */
+#define HTC_HE_CTLID_BSR 0x3u /* Buffer status report */
+#define HTC_HE_CTLID_UPH 0x4u /* UL power headroom */
+#define HTC_HE_CTLID_BQR 0x5u /* Bandwidth query report */
+#define HTC_HE_CTLID_CAS 0x6u /* Command and status */
+
+/* HTC-Control field definitions: (Table 9.9a HTC Control field) */
+#define HTC_HE_CTL_SIZE 30u /* HTC Control field size */
+#define HTC_HE_CTL_DEFAULT 0xFFFFFFFC
+
+/* A-Control offset definitions: (Figure 9.18a Control ID subfield values) */
+#define HE_ACTRL_TRS_FSZ 26u
+#define HE_ACTRL_OMI_FSZ 12u
+#define HE_ACTRL_HLA_FSZ 26u
+#define HE_ACTRL_BSR_FSZ 26u
+#define HE_ACTRL_UPH_FSZ 8u
+#define HE_ACTRL_BQR_FSZ 10u
+#define HE_ACTRL_CAS_FSZ 8u
+
+/* OM-Control Field definitions: (Figure 9.15d Control Information subfield for OM Control) */
+#define HE_OMI_RXNSS_FSZ 3
+#define HE_OMI_RXNSS_IDX 0
+#define HE_OMI_RXNSS_MASK 0x07u
+#define HE_OMI_CHW_FSZ 2
+#define HE_OMI_CHW_IDX 3
+#define HE_OMI_CHW_MASK 0x18u
+#define HE_OMI_ULMU_DIS_FSZ 1
+#define HE_OMI_ULMU_DIS_IDX 5
+#define HE_OMI_ULMU_DIS_MASK 0x20u
+#define HE_OMI_TXNSTS_FSZ 3
+#define HE_OMI_TXNSTS_IDX 6
+#define HE_OMI_TXNSTS_MASK 0x1c0u
+#define HE_OMI_ERSU_DIS_FSZ 1
+#define HE_OMI_ERSU_DIS_IDX 9
+#define HE_OMI_ERSU_DIS_MASK 0x200u
+#define HE_OMI_DLMU_RSD_RCM_FSZ 1
+#define HE_OMI_DLMU_RSD_RCM_IDX 10
+#define HE_OMI_DLMU_RSD_RCM_MASK 0x400u
+#define HE_OMI_ULMU_DATA_DIS_FSZ 1
+#define HE_OMI_ULMU_DATA_DIS_IDX 11
+#define HE_OMI_ULMU_DATA_DIS_MASK 0x800u
+
+/* OM-Control Channel Width Subfield definition, as per 9.2.4.6a.2 OM Control */
+#define OMI_CHW_20MHZ 0
+#define OMI_CHW_40MHZ 1
+#define OMI_CHW_80MHZ 2
+#define OMI_CHW_160MHZ_80P80MHZ 3
+
+/* Table 9-18d ACI Bitmap subfield encoding */
+#define HE_BSR_ACI_MAP_BE 0u
+#define HE_BSR_ACI_MAP_BK 1u
+#define HE_BSR_ACI_MAP_VI 2u
+#define HE_BSR_ACI_MAP_VO 3u
+
+/* GI And LTF Type subfield encoding (Table 9-31d) */
+#define HE_LTF_1_GI_1_6us (0u)
+#define HE_LTF_2_GI_1_6us (1u)
+#define HE_LTF_4_GI_3_2us (2u)
+
+/* special STA-IDs (Section 27.11.1) */
+#define HE_STAID_BSS_BCAST 0
+#define HE_STAID_UNASSOCIATED_STA 2045u
+#define HE_STAID_NO_USER 2046u
+#define HE_STAID_MBSS_BCAST 2047u
+#define HE_STAID_MASK 0x07FFu
+#define HE_AID12_MASK 0x0FFFu
+
+/**
+ * HE Capabilites element (sec 9.4.2.218)
+ */
+
+/* HE MAC Capabilities Information field (figure 9-589ck) */
+#define HE_MAC_CAP_INFO_SIZE 6u
+typedef uint8 he_mac_cap_t[HE_MAC_CAP_INFO_SIZE];
+
+/* bit position and field width */
+#define HE_MAC_HTC_HE_SUPPORT_IDX 0 /* HTC HE Support */
+#define HE_MAC_HTC_HE_SUPPORT_FSZ 1
+#define HE_MAC_TWT_REQ_SUPPORT_IDX 1 /* TWT Requestor Support */
+#define HE_MAC_TWT_REQ_SUPPORT_FSZ 1
+#define HE_MAC_TWT_RESP_SUPPORT_IDX 2 /* TWT Responder Support */
+#define HE_MAC_TWT_RESP_SUPPORT_FSZ 1
+#define HE_MAC_FRAG_SUPPORT_IDX 3 /* Fragmentation Support */
+#define HE_MAC_FRAG_SUPPORT_FSZ 2
+#define HE_MAC_MAX_MSDU_FRAGS_IDX 5 /* Max. Fragmented MSDUs */
+#define HE_MAC_MAX_MSDU_FRAGS_FSZ 3
+#define HE_MAC_MIN_FRAG_SIZE_IDX 8 /* Min. Fragment Size */
+#define HE_MAC_MIN_FRAG_SIZE_FSZ 2
+#define HE_MAC_TRIG_MAC_PAD_DUR_IDX 10 /* Trigger Frame MAC Pad Dur */
+#define HE_MAC_TRIG_MAC_PAD_DUR_FSZ 2
+#define HE_MAC_MULTI_TID_AGG_RX_IDX 12 /* Multi TID Agg. Rx support */
+#define HE_MAC_MULTI_TID_AGG_RX_FSZ 3
+#define HE_MAC_LINK_ADAPT_IDX 15 /* HE Link Adaptation Support */
+#define HE_MAC_LINK_ADAPT_FSZ 2
+#define HE_MAC_ALL_ACK_SUPPORT_IDX 17 /* All Ack Support */
+#define HE_MAC_ALL_ACK_SUPPORT_FSZ 1
+#define HE_MAC_TRS_SUPPORT_IDX 18 /* TRS Support */
+#define HE_MAC_TRS_SUPPORT_FSZ 1
+#define HE_MAC_BSR_SUPPORT_IDX 19 /* BSR Support */
+#define HE_MAC_BSR_SUPPORT_FSZ 1
+#define HE_MAC_BCAST_TWT_SUPPORT_IDX 20 /* Broadcast TWT Support */
+#define HE_MAC_BCAST_TWT_SUPPORT_FSZ 1
+#define HE_MAC_32BA_BITMAP_SUPPORT_IDX 21 /* 32-bit BA Bitmap Support */
+#define HE_MAC_32BA_BITMAP_SUPPORT_FSZ 1
+#define HE_MAC_MU_CASCADE_SUPPORT_IDX 22 /* MU Cascade Support */
+#define HE_MAC_MU_CASCADE_SUPPORT_FSZ 1
+#define HE_MAC_ACK_ENAB_AGG_SUPPORT_IDX 23 /* Ack Enabled Agg. Support */
+#define HE_MAC_ACK_ENAB_AGG_SUPPORT_FSZ 1
+/* bit 24 - Reserved */
+#define HE_MAC_OMI_CONTROL_SUPPORT_IDX 25 /* OMI Control Support */
+#define HE_MAC_OMI_CONTROL_SUPPORT_FSZ 1
+#define HE_MAC_OFDMA_RA_SUPPORT_IDX 26 /* OFDMA RA Support */
+#define HE_MAC_OFDMA_RA_SUPPORT_FSZ 1
+#define HE_MAC_MAX_AMPDU_LEN_EXP_IDX 27 /* Max AMPDU Length Exponent */
+#define HE_MAC_MAX_AMPDU_LEN_EXP_FSZ 2
+#define HE_MAC_AMSDU_FRAG_SUPPORT_IDX 29 /* AMSDU Fragementation Support */
+#define HE_MAC_AMSDU_FRAG_SUPPORT_FSZ 1
+#define HE_MAC_FLEX_TWT_SCHEDULE_IDX 30 /* Flexible TWT Schedule Support */
+#define HE_MAC_FLEX_TWT_SCHEDULE_FSZ 1
+#define HE_MAC_RX_MBSS_CTL_FRAME_IDX 31 /* Rx control frames to Multi BSS */
+#define HE_MAC_RX_MBSS_CTL_FRAME_FSZ 1
+#define HE_MAC_RX_AGG_BSRP_BQRP_IDX 32 /* Aggregated BSRP BQRP Rx */
+#define HE_MAC_RX_AGG_BSRP_BQRP_FSZ 1
+#define HE_MAC_QTP_SUPPORT_IDX 33 /* Support Quiet time period */
+#define HE_MAC_QTP_SUPPORT_FSZ 1
+#define HE_MAC_BQR_SUPPORT_IDX 34 /* Support BQR */
+#define HE_MAC_BQR_SUPPORT_FSZ 1
+#define HE_MAC_SRP_RESPONDER_IDX 35 /* SRP responder Support */
+#define HE_MAC_SRP_RESPONDER_FSZ 1
+#define HE_MAC_NDP_FDBK_SUPPORT_IDX 36 /* NDP feedback report Support */
+#define HE_MAC_NDP_FDBK_SUPPORT_FSZ 1
+#define HE_MAC_OPS_SUPPORT_IDX 37 /* OPS support */
+#define HE_MAC_OPS_SUPPORT_FSZ 1
+#define HE_MAC_AMSDU_IN_AMPDU_IDX 38 /* AMSDU in AMPDU support */
+#define HE_MAC_AMSDU_IN_AMPDU_FSZ 1
+#define HE_MAC_MULTI_TID_AGG_TX_IDX 39 /* Multi TID Agg. Tx support */
+#define HE_MAC_MULTI_TID_AGG_TX_FSZ 3
+#define HE_MAC_SST_SUPPORT_IDX 42 /* Sub-channel Selective channel */
+#define HE_MAC_SST_SUPPORT_FSZ 1
+#define HE_MAC_UL_2X_996_TONE_RU_SUPP_IDX 43 /* UL 2X 996 tone RU Support */
+#define HE_MAC_UL_2X_996_TONE_RU_SUPP_FSZ 1
+#define HE_MAC_UL_MU_DATA_DISABLE_RX_IDX 44 /* OM - UL MU Data Disable RX */
+#define HE_MAC_UL_MU_DATA_DISABLE_RX_FSZ 1
+#define HE_MAC_DYNAMIC_SM_PWR_SAVE_IDX 45 /* HE Dynamic SM Power Save */
+#define HE_MAC_DYNAMIC_SM_PWR_SAVE_FSZ 1
+#define HE_MAC_PUNCT_SOUNDING_SUPP_IDX 46 /* Punctured Sounding Support */
+#define HE_MAC_PUNCT_SOUNDING_SUPP_FSZ 1
+#define HE_MAC_HT_VHT_TRIG_FRAME_RX_IDX 47 /* HT And VHT Trigger Frame RX Support */
+#define HE_MAC_HT_VHT_TRIG_FRAME_RX_FSZ 1
+
+/* HE PHY Capabilities Information field (figure 9-589cl) */
+#define HE_PHY_CAP_INFO_SIZE 11u
+typedef uint8 he_phy_cap_t[HE_PHY_CAP_INFO_SIZE];
+
+/* bit position and field width */
+/* bit 0 - Reserved */
+#define HE_PHY_CH_WIDTH_SET_IDX 1 /* Channel Width Set */
+#define HE_PHY_CH_WIDTH_SET_FSZ 7
+#define HE_PHY_PUNCT_PREAMBLE_RX_IDX 8 /* Punctured Preamble Rx */
+#define HE_PHY_PUNCT_PREAMBLE_RX_FSZ 4
+#define HE_PHY_DEVICE_CLASS_IDX 12 /* Device Class */
+#define HE_PHY_DEVICE_CLASS_FSZ 1
+#define HE_PHY_LDPC_PYLD_IDX 13 /* LDPC Coding In Payload */
+#define HE_PHY_LDPC_PYLD_FSZ 1
+#define HE_PHY_SU_PPDU_1x_LTF_0_8_GI_IDX 14 /* SU PPDU 1x LTF GI 0.8 us */
+#define HE_PHY_SU_PPDU_1x_LTF_0_8_GI_FSZ 1
+#define HE_PHY_MIDAMBLE_MAX_NSTS_IDX 15 /* Midamble Tx/Rx Max NSTS */
+#define HE_PHY_MIDAMBLE_MAX_NSTS_FSZ 2
+#define HE_PHY_NDP_4x_LTF_3_2_GI_IDX 17 /* NDP with 4xLTF 3.2us GI */
+#define HE_PHY_NDP_4x_LTF_3_2_GI_FSZ 1
+#define HE_PHY_STBC_TX_IDX 18 /* STBC Tx for <= 80 MHz */
+#define HE_PHY_STBC_TX_FSZ 1
+#define HE_PHY_STBC_RX_IDX 19 /* STBC Rx for <= 80 MHz */
+#define HE_PHY_STBC_RX_FSZ 1
+#define HE_PHY_DOPPLER_TX_IDX 20 /* Doppler Tx */
+#define HE_PHY_DOPPLER_TX_FSZ 1
+#define HE_PHY_DOPPLER_RX_IDX 21 /* Doppler Rx */
+#define HE_PHY_DOPPLER_RX_FSZ 1
+#define HE_PHY_FULL_BW_UL_MU_MIMO_IDX 22 /* Full bandwidth UL MU MIMO */
+#define HE_PHY_FULL_BW_UL_MU_MIMO_FSZ 1
+#define HE_PHY_PART_BW_UL_MU_MIMO_IDX 23 /* Partial bandwidth UL MU MIMO */
+#define HE_PHY_PART_BW_UL_MU_MIMO_FSZ 1
+#define HE_PHY_DCM_MAX_CONST_TX_IDX 24 /* DCM Max Constellation Tx */
+#define HE_PHY_DCM_MAX_CONST_TX_FSZ 2
+#define HE_PHY_DCM_MAX_NSS_TX_IDX 26 /* DCM Max NSS Tx */
+#define HE_PHY_DCM_MAX_NSS_TX_FSZ 1
+#define HE_PHY_DCM_MAX_CONST_RX_IDX 27 /* DCM Max Constellation Rx */
+#define HE_PHY_DCM_MAX_CONST_RX_FSZ 2
+#define HE_PHY_DCM_MAX_NSS_RX_IDX 29 /* DCM Max NSS Rx */
+#define HE_PHY_DCM_MAX_NSS_RX_FSZ 1
+#define HE_PHY_RX_MU_PPDU_IDX 30 /* Rx HE MU PPDU From nonAP STA */
+#define HE_PHY_RX_MU_PPDU_FSZ 1
+#define HE_PHY_SU_BEAMFORMER_IDX 31 /* SU Beamformer */
+#define HE_PHY_SU_BEAMFORMER_FSZ 1
+#define HE_PHY_SU_BEAMFORMEE_IDX 32 /* SU Beamformee */
+#define HE_PHY_SU_BEAMFORMEE_FSZ 1
+#define HE_PHY_MU_BEAMFORMER_IDX 33 /* MU Beamformer */
+#define HE_PHY_MU_BEAMFORMER_FSZ 1
+#define HE_PHY_BEAMFORMEE_STS_BELOW80MHZ_IDX 34 /* Beamformee STS For <= 80MHz */
+#define HE_PHY_BEAMFORMEE_STS_BELOW80MHZ_FSZ 3
+#define HE_PHY_BEAMFORMEE_STS_ABOVE80MHZ_IDX 37 /* Beamformee STS For >80 MHz */
+#define HE_PHY_BEAMFORMEE_STS_ABOVE80MHZ_FSZ 3
+#define HE_PHY_SOUND_DIM_BELOW80MHZ_IDX 40 /* Num. Sounding Dim.<= 80 MHz */
+#define HE_PHY_SOUND_DIM_BELOW80MHZ_FSZ 3
+#define HE_PHY_SOUND_DIM_ABOVE80MHZ_IDX 43 /* Num. Sounding Dim.> 80 MHz */
+#define HE_PHY_SOUND_DIM_ABOVE80MHZ_FSZ 3
+#define HE_PHY_SU_FEEDBACK_NG16_SUPPORT_IDX 46 /* Ng=16 For SU Feedback */
+#define HE_PHY_SU_FEEDBACK_NG16_SUPPORT_FSZ 1
+#define HE_PHY_MU_FEEDBACK_NG16_SUPPORT_IDX 47 /* Ng=16 For MU Feedback */
+#define HE_PHY_MU_FEEDBACK_NG16_SUPPORT_FSZ 1
+#define HE_PHY_SU_CODEBOOK_SUPPORT_IDX 48 /* Codebook Sz {4, 2} For SU */
+#define HE_PHY_SU_CODEBOOK_SUPPORT_FSZ 1
+#define HE_PHY_MU_CODEBOOK_SUPPORT_IDX 49 /* Codebook Size {7, 5} For MU */
+#define HE_PHY_MU_CODEBOOK_SUPPORT_FSZ 1
+#define HE_PHY_TRG_SU_BFM_FEEDBACK_IDX 50 /* Triggered SU TXBF Feedback */
+#define HE_PHY_TRG_SU_BFM_FEEDBACK_FSZ 1
+#define HE_PHY_TRG_MU_BFM_FEEDBACK_IDX 51 /* Triggered MU TXBF partial BW Feedback */
+#define HE_PHY_TRG_MU_BFM_FEEDBACK_FSZ 1
+#define HE_PHY_TRG_CQI_FEEDBACK_IDX 52 /* Triggered CQI Feedback */
+#define HE_PHY_TRG_CQI_FEEDBACK_FSZ 1
+#define HE_PHY_PART_BW_EXT_RANGE_IDX 53 /* Partial BW Extended Range */
+#define HE_PHY_PART_BW_EXT_RANGE_FSZ 1
+#define HE_PHY_DL_MU_MIMO_PART_BW_IDX 54 /* Partial Bandwidth DL MU MIMO */
+#define HE_PHY_DL_MU_MIMO_PART_BW_FSZ 1
+#define HE_PHY_PPE_THRESH_PRESENT_IDX 55 /* PPE Threshold Present */
+#define HE_PHY_PPE_THRESH_PRESENT_FSZ 1
+#define HE_PHY_SRP_SR_SUPPORT_IDX 56 /* SRP based SR Support */
+#define HE_PHY_SRP_SR_SUPPORT_FSZ 1
+#define HE_PHY_POWER_BOOST_FACTOR_IDX 57 /* Power Boost Factor Support */
+#define HE_PHY_POWER_BOOST_FACTOR_FSZ 1
+#define HE_PHY_4X_LTF_0_8_GI_SUPPORT_IDX 58 /* HE SU PPDU And HE MU PPDU with
+ * 4x HE-LTF And 0.8 us GI
+ */
+#define HE_PHY_4X_LTF_0_8_GI_SUPPORT_FSZ 1
+#define HE_PHY_MAX_NC_IDX 59 /* Maximum NC */
+#define HE_PHY_MAX_NC_FSZ 3
+#define HE_PHY_STBC_TX_ABOVE_80_IDX 62 /* STBC Tx above 80 MHz */
+#define HE_PHY_STBC_TX_ABOVE_80_FSZ 1
+#define HE_PHY_STBC_RX_ABOVE_80_IDX 63 /* STBC Rx above 80 MHz */
+#define HE_PHY_STBC_RX_ABOVE_80_FSZ 1
+#define HE_PHY_ER_SU_4X_LTF_0_8_GI_IDX 64 /* ER SU PPDU 4x HE-LTF 0.8 GI */
+#define HE_PHY_ER_SU_4X_LTF_0_8_GI_FSZ 1
+#define HE_PHY_20_IN_40_2G_IDX 65 /* 20 in 40 MHz HE PPDU in 2G */
+#define HE_PHY_20_IN_40_2G_FSZ 1
+#define HE_PHY_20_IN_160_80P80_IDX 66 /* 20 in 160/80+80 MHz HE PPDU */
+#define HE_PHY_20_IN_160_80P80_FSZ 1
+#define HE_PHY_80_IN_160_80P80_IDX 67 /* 80 in 160/80+80 MHz HE PPDU */
+#define HE_PHY_80_IN_160_80P80_FSZ 1
+#define HE_PHY_ER_SU_1X_LTF_0_8_GI_IDX 68 /* HE ER SU 1x HE-LTF 0.8 GI */
+#define HE_PHY_ER_SU_1X_LTF_0_8_GI_FSZ 1
+#define HE_PHY_MIDAMBLE_2X_1X_LTF_IDX 69 /* Midamble TX/RX 2x & 1x HE LTF */
+#define HE_PHY_MIDAMBLE_2X_1X_LTF_FSZ 1
+#define HE_PHY_DCM_MAX_BW_IDX 70 /* DCM Max BW */
+#define HE_PHY_DCM_MAX_BW_FSZ 2
+#define HE_PHY_ABOVE16_OFDM_SYM_IDX 72 /* Longer than 16 HE-SIGB OFDM
+ * Symbol support
+ */
+#define HE_PHY_ABOVE16_OFDM_SYM_FSZ 1
+#define HE_PHY_NON_TRIG_CQI_FDBK_IDX 73 /* Non-triggered CQI feedback Support */
+#define HE_PHY_NON_TRIG_CQI_FDBK_FSZ 1
+#define HE_PHY_1024_QAM_TX_BELOW_242_RU_IDX 74 /* Tx 1024 QAM in < 242 RU Tone Support */
+#define HE_PHY_1024_QAM_TX_BELOW_242_RU_FSZ 1
+#define HE_PHY_1024_QAM_RX_BELOW_242_RU_IDX 75 /* Rx 1024 QAM in < 242 RU Tone Support */
+#define HE_PHY_1024_QAM_RX_BELOW_242_RU_FSZ 1
+#define HE_PHY_RX_FULL_BW_MU_COMP_SIGB_IDX 76 /* Rx Full BW MU PPDU with Comp. SIGB */
+#define HE_PHY_RX_FULL_BW_MU_COMP_SIGB_FSZ 1
+#define HE_PHY_RX_FULL_BW_MU_NON_COMP_SIGB_IDX 77 /* Rx Full BW MU PPDU Non-Comp SIGB */
+#define HE_PHY_RX_FULL_BW_MU_NON_COMP_SIGB_FSZ 1
+
+/* HE Mac Capabilities values */
+/* b3-b4: Fragmentation Support field (table 9-262z) */
+#define HE_MAC_FRAG_NOSUPPORT 0 /* dynamic fragmentation not supported */
+#define HE_MAC_FRAG_PER_MPDU 1 /* dynamic fragmentation of MPDU/SMPDU */
+#define HE_MAC_FRAG_ONE_PER_AMPDU 2 /* upto 1 fragment per AMPDU/MMPDU */
+#define HE_MAC_FRAG_MULTI_PER_AMPDU 3 /* multiple fragment per AMPDU */
+
+/* b5-b7 : Maximum Number Of Fragmented MSDUs/AMSDUs Exponent */
+#define HE_MAC_MAXFRAG_NUM_NO_RESTRICT 7
+
+/* b8-b9: Minimum payload size of first fragment */
+#define HE_MAC_MINFRAG_NO_RESTRICT 0 /* no restriction on min. payload size */
+#define HE_MAC_MINFRAG_SIZE_128 1 /* minimum payload size of 128 Bytes */
+#define HE_MAC_MINFRAG_SIZE_256 2 /* minimum payload size of 256 Bytes */
+#define HE_MAC_MINFRAG_SIZE_512 3 /* minimum payload size of 512 Bytes */
+
+/* b10-b11: Trigger Frame MAC Padding Duration */
+#define HE_MAC_TRIG_MAC_PAD_0 0
+#define HE_MAC_TRIG_MAC_PAD_8us 1
+#define HE_MAC_TRIG_MAC_PAD_16us 2
+
+/* b15-b16: HE Link Adaptation */
+#define HE_MAC_SEND_NO_MFB 0 /* if STA does not provide HE MFB */
+#define HE_MAC_SEND_UNSOLICATED_MFB 2 /* if STA provides unsolicited HE MFB */
+#define HE_MAC_SEND_MFB_IN_RESPONSE 3 /* if STA can provide HE MFB in response to
+ * HE MRQ and if the STA provides unsolicited HE MFB.
+ */
+
+/* b27-b28: Max. AMPDU Length HE Exponent */
+/* Use Max AMPDU length exponent from VHT or HT */
+#define HE_MAC_MAX_AMPDU_EXP_ADOPT_VHT (0)
+/* Max. AMPDU length =
+ * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_1) -1 (if this value in VHT CAP is 7) or
+ * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_1) -1 (if this value in HT CAP is 3).
+ */
+#define HE_MAC_MAX_AMPDU_EXP_HE_1 (1)
+/* Max. AMPDU length =
+ * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_2) -1 (if this value in VHT CAP is 7) or
+ * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_2) -1 (if this value in HT CAP is 3).
+ */
+#define HE_MAC_MAX_AMPDU_EXP_HE_2 (2)
+/* Max. AMPDU length =
+ * 2^(20 + MAX_AMPDU_LEN_HE_EXPO_3) -1 (if this value in VHT CAP is 7) or
+ * 2^(16 + MAX_AMPDU_LEN_HE_EXPO_3) -1 (if this value in HT CAP is 3).
+ */
+#define HE_MAC_MAX_AMPDU_EXP_HE_3 (3)
+
+/* HE PHY Capabilities values */
+/* b1-b7: Channel Width Support field */
+#define HE_PHY_CH_WIDTH_2G_40 0x01
+#define HE_PHY_CH_WIDTH_5G_80 0x02
+#define HE_PHY_CH_WIDTH_5G_160 0x04
+#define HE_PHY_CH_WIDTH_5G_80P80 0x08
+#define HE_PHY_CH_WIDTH_2G_242RU 0x10
+#define HE_PHY_CH_WIDTH_5G_242RU 0x20
+
+/* b8-b11: Preamble puncturing Rx */
+/* Rx of 80 MHz preamble where secondary 20 MHz subchannel is punctured */
+#define HE_PHY_PREAMBLE_PUNC_RX_0 0x1
+/* Rx of 80 MHz preamble where one of two 20 MHz subchannels in secondary 40 MHz is punctured */
+#define HE_PHY_PREAMBLE_PUNC_RX_1 0x2
+/* Rx of 160 MHz or 80+80 MHz preamble where primary 80 MHz of
+ * preamble only the secondary 20 MHz is punctured
+ */
+#define HE_PHY_PREAMBLE_PUNC_RX_2 0x4
+/* Rx of 160 MHz or 80+80 MHz preamble where primary 80 MHz of
+ * the preamble, the primary 40 MHz is present
+ */
+#define HE_PHY_PREAMBLE_PUNC_RX_3 0x8
+
+/* b24-b26: DCM Encoding Tx */
+#define HE_PHY_TX_DCM_ENC_NOSUPPORT 0x00
+#define HE_PHY_TX_DCM_ENC_BPSK 0x01
+#define HE_PHY_TX_DCM_ENC_QPSK 0x02
+#define HE_PHY_TX_DCM_ENC_QAM 0x03
+
+#define HE_PHY_TX_DCM_1_SS 0x00
+#define HE_PHY_TX_DCM_2_SS 0x01
+
+/* b27-b29: DCM Encoding Rx */
+#define HE_PHY_RX_DCM_ENC_NOSUPPORT 0x00
+#define HE_PHY_RX_DCM_ENC_BPSK 0x01
+#define HE_PHY_RX_DCM_ENC_QPSK 0x02
+#define HE_PHY_RX_DCM_ENC_QAM 0x03
+
+#define HE_PHY_RX_DCM_1_SS 0x00
+#define HE_PHY_RX_DCM_2_SS 0x01
+
+/* b70-b71: DCM Max BW */
+#define HE_PHY_DCM_MAX_BW_20 0
+#define HE_PHY_DCM_MAX_BW_40 1
+#define HE_PHY_DCM_MAX_BW_80 2
+#define HE_PHY_DCM_MAX_BW_160 3
+
+/* HE Duration based RTS Threshold Figure 9-589cr */
+#define HE_RTS_THRES_DISABLED 1023
+#define HE_RTS_THRES_ALL_FRAMES 0
+#define HE_RTS_THRES_MASK 0x03ff
+
+/* Tx Rx HE MCS Support field format : Table 9-589cm */
+#define HE_TX_RX_MCS_NSS_SUP_FIELD_MIN_SIZE 4u
+
+/**
+* Bandwidth configuration indices used in the HE TX-RX MCS support field
+* Section 9.4.2.218.4
+*/
+#define HE_BW20_CFG_IDX 0
+#define HE_BW40_CFG_IDX 1
+#define HE_BW80_CFG_IDX 2
+#define HE_BW80P80_CFG_IDX 3
+#define HE_BW160_CFG_IDX 4
+#define HE_MAX_BW_CFG 5
+
+#define HE_MCS_CODE_0_7 0u
+#define HE_MCS_CODE_0_9 1u
+#define HE_MCS_CODE_0_11 2u
+#define HE_MCS_CODE_NONE 3u
+#define HE_MCS_CODE_SIZE 2u /* num bits */
+#define HE_MCS_CODE_MASK 0x3u /* mask for 1-stream */
+
+/* Defines for The Max HE MCS For n SS subfield (where n = 1, ..., 8) */
+#define HE_MCS_MAP_NSS_MAX 8u /* Max number of streams possible */
+#define HE_MCS_NSS_SET_MASK 0xffffu /* Field is to be 16 bits long */
+#define HE_MCS_NSS_GET_SS_IDX(nss) (((nss)-1u) * HE_MCS_CODE_SIZE)
+#define HE_MCS_NSS_GET_MCS(nss, mcs_nss_map) \
+ (((mcs_nss_map) >> HE_MCS_NSS_GET_SS_IDX(nss)) & HE_MCS_CODE_MASK)
+#define HE_MCS_NSS_SET_MCS(nss, mcs_code, mcs_nss_map) \
+ do { \
+ (mcs_nss_map) &= (~(HE_MCS_CODE_MASK << HE_MCS_NSS_GET_SS_IDX(nss))); \
+ (mcs_nss_map) |= (((mcs_code) & HE_MCS_CODE_MASK) << HE_MCS_NSS_GET_SS_IDX(nss)); \
+ (mcs_nss_map) &= (HE_MCS_NSS_SET_MASK); \
+ } while (0)
+
+#define HE_BW80_ORDR_IDX 0u
+#define HE_BW160_ORDR_IDX 1u
+#define HE_BW80P80_ORDR_IDX 2u
+
+#define HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN 2u /* 2 bytes */
+#define HE_MCS_NSS_SUP_FLD_UNIT_MAP_SZ (HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN * 8u) /* 16 bits */
+
+/* Two unit-maps (TX+RX) */
+#define HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN (HE_MCS_NSS_SUP_FLD_UNIT_MAP_LEN * 2u)
+#define HE_MCS_NSS_SUP_FLD_TXRX_MAP_SZ (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN * 8u) /* 32 bits */
+
+/* One TX-RX unit-map (80 MHz) */
+#define HE_MCS_NSS_SUP_FLD_MIN_LEN (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN)
+/* Three TX-RX unit-maps (80 MHz, 160MHz, 80+80MHz) */
+#define HE_MCS_NSS_SUP_FLD_MAX_LEN (HE_MCS_NSS_SUP_FLD_TXRX_MAP_LEN * 3u)
+
+/* HE Capabilities element */
+BWL_PRE_PACKED_STRUCT struct he_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ he_mac_cap_t mac_cap; /* MAC Capabilities Information */
+ he_phy_cap_t phy_cap; /* PHY Capabilities Information */
+ /* he_tx_rx_mcs_nss_sup_t txx_rx_mcs_nss_sup; */ /* Tx Rx HE MCS NSS Support (variable) */
+ /* he_ppe_ths_t ppe_ths; */ /* PPE Thresholds (optional) */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_cap_ie he_cap_ie_t;
+
+/* Multiple BSSID element */
+BWL_PRE_PACKED_STRUCT struct nontrans_BSSID_cap {
+ uint8 id; /* 83 */
+ uint8 len;
+ uint16 capability;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct nontrans_BSSID_cap nontrans_BSSID_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct multi_BSSID_index {
+ uint8 id; /* 85 */
+ uint8 len; /* 3 in beacon, 1 in probe response */
+ uint8 bssid_index; /* between 1 and 2^n - 1 */
+ uint8 dtim_period; /* only valid in beacon */
+ uint8 dtim_count; /* only valid in beacon */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct multi_BSSID_index multi_BSSID_index_t;
+
+BWL_PRE_PACKED_STRUCT struct fms_descriptor {
+ uint8 id; /* 86 */
+ uint8 len;
+ uint8 num_FMS_counters;
+ uint8 *FMS_counters;
+ uint8 *FMSID;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct fms_descriptor fms_descriptor_t;
+
+BWL_PRE_PACKED_STRUCT struct nontrans_BSSID_profile_subie {
+ uint8 subie_id; /* 0 */
+ uint8 subie_len;
+ uint8 moreie[1];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct nontrans_BSSID_profile_subie nontrans_BSSID_profile_subie_t;
+
+BWL_PRE_PACKED_STRUCT struct multi_BSSID_ie {
+ uint8 id;
+ uint8 len;
+ uint8 maxBSSID_indicator;
+ nontrans_BSSID_profile_subie_t profile[1];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct multi_BSSID_ie multi_BSSID_ie_t;
+#define DOT11_MULTIPLE_BSSID_PROFILE_SUBID 0
+
+/* Table 9-262ab, Highest MCS Supported subfield encoding */
+#define HE_CAP_MCS_CODE_0_7 0
+#define HE_CAP_MCS_CODE_0_8 1
+#define HE_CAP_MCS_CODE_0_9 2
+#define HE_CAP_MCS_CODE_0_10 3
+#define HE_CAP_MCS_CODE_0_11 4
+#define HE_CAP_MCS_CODE_SIZE 3 /* num bits for 1-stream */
+#define HE_CAP_MCS_CODE_MASK 0x7 /* mask for 1-stream */
+
+#define HE_CAP_MCS_MAP_NSS_MAX 8u /* Max number of streams possible */
+
+#define HE_MAX_RU_COUNT 4u /* Max number of RU allocation possible */
+
+#define HE_NSSM1_IDX 0 /* Offset of NSSM1 field */
+#define HE_NSSM1_LEN 3 /* length of NSSM1 field in bits */
+
+#define HE_RU_INDEX_MASK_IDX 3 /* Offset of RU index mask field */
+#define HE_RU_INDEX_MASK_LEN 4u /* length of RU Index mask field in bits */
+
+/* PPE Threshold field (figure 9-589co) */
+#define HE_PPE_THRESH_NSS_RU_FSZ 3u
+
+/* PPE Threshold Info field (figure 9-589cp) */
+/* ruc: RU Count; NSSnM1: NSSn - 1; RUmM1: RUm - 1 */
+/* bit offset in PPE Threshold field */
+#define HE_PPET16_BIT_OFFSET(ruc, NSSnM1, RUmM1) \
+ (HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((NSSnM1) * (ruc) + (RUmM1)) * 6)
+
+#define HE_PPET8_BIT_OFFSET(ruc, NSSnM1, RUmM1) \
+ (HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((NSSnM1) * (ruc) + (RUmM1)) * 6 + 3)
+
+/* Total PPE Threshold field byte length (Figure 9-589cq) */
+#define HE_PPE_THRESH_LEN(nss, ruc) \
+ (CEIL((HE_NSSM1_LEN + HE_RU_INDEX_MASK_LEN + ((nss) * (ruc) * 6)), 8))
+
+/* RU Allocation Index encoding (table 9-262ae) */
+#define HE_RU_ALLOC_IDX_242 0 /* RU alloc: 282 tones */
+#define HE_RU_ALLOC_IDX_484 1 /* RU alloc: 484 tones - 40Mhz */
+#define HE_RU_ALLOC_IDX_996 2 /* RU alloc: 996 tones - 80Mhz */
+#define HE_RU_ALLOC_IDX_2x996 3 /* RU alloc: 2x996 tones - 80p80/160Mhz */
+
+/* Constellation Index encoding (table 9-262ac) */
+#define HE_CONST_IDX_BPSK 0
+#define HE_CONST_IDX_QPSK 1
+#define HE_CONST_IDX_16QAM 2
+#define HE_CONST_IDX_64QAM 3
+#define HE_CONST_IDX_256QAM 4
+#define HE_CONST_IDX_1024QAM 5
+#define HE_CONST_IDX_RSVD 6
+#define HE_CONST_IDX_NONE 7
+
+/* Min HE cap ie length when only 80Mhz is supported */
+#define HE_CAP_IE_MIN_LEN (sizeof(he_cap_ie_t) - TLV_HDR_LEN + HE_MCS_NSS_SUP_FLD_MIN_LEN)
+
+/* Max HE cap ie length considering MAX NSS and RU */
+#define HE_CAP_IE_MAX_LEN (sizeof(he_cap_ie_t) - TLV_HDR_LEN + HE_MCS_NSS_SUP_FLD_MAX_LEN + \
+ HE_PPE_THRESH_LEN(HE_CAP_MCS_MAP_NSS_MAX, HE_MAX_RU_COUNT))
+/**
+ * HE Operation IE (Section 9.4.2.238)
+ */
+/* HE Operation Parameters field (figure 9-589cr) */
+#define HE_OP_PARAMS_SIZE 3u
+typedef uint8 he_op_parms_t[HE_OP_PARAMS_SIZE];
+
+/* bit position and field width */
+#define HE_OP_DEF_PE_DUR_IDX 0u /* Default PE Duration */
+#define HE_OP_DEF_PE_DUR_FSZ 3u
+#define HE_OP_TWT_REQD_IDX 3u /* TWT Required */
+#define HE_OP_TWT_REQD_FSZ 1u
+#define HE_OP_TXOP_DUR_RTS_THRESH_IDX 4u /* TXOP Duration Based RTS Threshold */
+#define HE_OP_TXOP_DUR_RTS_THRESH_FSZ 10u
+#define HE_OP_VHT_OP_PRESENT_IDX 14u /* VHT Oper Info Present */
+#define HE_OP_VHT_OP_PRESENT_FSZ 1u
+#define HE_OP_COL_LOC_BSS_IDX 15u
+#define HE_OP_COL_LOC_BSS_FSZ 1u
+#define HE_OP_ER_SU_DISABLE_IDX 16u
+#define HE_OP_ER_SU_DISABLE_FSZ 1u
+#define HE_OP_6G_OP_INFO_PRESENT_IDX 17u
+#define HE_OP_6G_OP_INFO_PRESENT_FSZ 1u
+
+/* BSS Color Information field (figure 9-589cs) */
+#define HE_OP_BSS_COLOR_IDX 0 /* BSS Color */
+#define HE_OP_BSS_COLOR_FSZ 6
+#define HE_OP_PART_BSS_COLOR_IDX 6 /* Partial BSS Color */
+#define HE_OP_PART_BSS_COLOR_FSZ 1
+#define HE_OP_DISABLE_BSSCOLOR_IDX 7 /* BSS Color Disable */
+#define HE_OP_DISABLE_BSSCOLOR_FSZ 1
+
+/* b4-b13: TXOP Duration RTS threshold */
+#define HE_OP_TXOP_RTS_THRESH_DISABLED 1023u
+
+#define HE_BASIC_MCS_NSS_SIZE 2u
+typedef uint8 he_basic_mcs_nss_set_t[HE_BASIC_MCS_NSS_SIZE];
+
+#define HE_OP_MAX_BSSID_IND_LEN 1u
+#define HE_OP_6G_OPER_INFO_LEN 5u
+/* HE Operation element */
+BWL_PRE_PACKED_STRUCT struct he_op_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ he_op_parms_t parms;
+ uint8 bsscolor_info;
+ he_basic_mcs_nss_set_t mcs_nss_op; /* Basic HE MCS & NSS Set */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_op_ie he_op_ie_t;
+
+#define HE_OP_IE_MIN_LEN (sizeof(he_op_ie_t) - TLV_HDR_LEN)
+#define HE_OP_IE_MAX_LEN (sizeof(he_op_ie_t) - TLV_HDR_LEN + VHT_OP_INFO_LEN +\
+ HE_OP_MAX_BSSID_IND_LEN + HE_OP_6G_OPER_INFO_LEN)
+
+#define HE_6G_OP_BW_20 0u
+#define HE_6G_OP_BW_40 1u
+#define HE_6G_OP_BW_80 2u
+#define HE_6G_OP_BW_160_80P80 3u
+
+/* Regulatory Info subfield in the United States */
+#define HE_6G_OP_REG_INFO_INDOOR_AP_US 0u
+#define HE_6G_OP_REG_INFO_SP_AP_US 1u
+
+/* Figure 9-788l Control field format in Draft P802.11ax_D6.0 */
+#define HE_6G_CTL_CHBW_MASK 0x03u
+#define HE_6G_OP_CTL_CHBW(ctl) (ctl & HE_6G_CTL_CHBW_MASK)
+#define HE_6G_CTL_DUP_BCN_MASK 0x04u
+#define HE_6G_CTL_REG_INFO_MASK 0x38u
+#define HE_6G_CTL_REG_INFO_SHIFT 3u
+#define HE_6G_OP_CTL_REG_INFO(ctl) \
+ ((ctl & HE_6G_CTL_REG_INFO_MASK) >> HE_6G_CTL_REG_INFO_SHIFT)
+
+/* HE 6G Operation info */
+BWL_PRE_PACKED_STRUCT struct he_6g_op_info {
+ uint8 pri_chan;
+ uint8 control;
+ uint8 seg0;
+ uint8 seg1;
+ uint8 min_rate;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_6g_op_info he_6g_op_info_t;
+
+/* HE Extended Capabilities element */
+BWL_PRE_PACKED_STRUCT struct he_6g_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint16 cap_info; /* Capabilities Information */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_6g_cap_ie he_6g_cap_ie_t;
+#define HE_6G_CAP_IE_LEN sizeof(he_6g_cap_ie_t)
+
+/* HE Capabilities Information bit position and fieldwidth.
+ * Figure 9-787ai Capabilities Information field format in
+ * Draft P802.11ax_D5.0.
+ */
+#define HE_6G_CAP_MIN_MPDU_START_MASK 0x0007u
+#define HE_6G_CAP_MAX_AMPDU_LEN_EXP_MASK 0x0038u
+#define HE_6G_CAP_MAX_AMPDU_LEN_EXP_SHIFT 3u
+#define HE_6G_CAP_MAX_MPDU_LEN_MASK 0x00C0u
+#define HE_6G_CAP_MAX_MPDU_LEN_SHIFT 6u
+#define HE_6G_CAP_SM_PW_SAVE_MASK 0x0600u
+#define HE_6G_CAP_SM_PW_SAVE_SHIFT 9u
+#define HE_6G_CAP_RD_RESPONDER_MASK 0x0800u
+#define HE_6G_CAP_RD_RESPONDER_SHIFT 11u
+#define HE_6G_CAP_RX_ANT_PATN_CONST_MASK 0x1000u
+#define HE_6G_CAP_RX_ANT_PATN_CONST_SHIFT 12u
+#define HE_6G_CAP_TX_ANT_PATN_CONST_MASK 0x2000u
+#define HE_6G_CAP_TX_ANT_PATN_CONST_SHIFT 13u
+
+#define HE_6G_CAP_MIN_MPDU_START(cap) ((cap) & HE_6G_CAP_MIN_MPDU_START_MASK)
+#define HE_6G_CAP_MAX_AMPDU_LEN_EXP(cap) (((cap) & HE_6G_CAP_MAX_AMPDU_LEN_EXP_MASK) >> \
+ HE_6G_CAP_MAX_AMPDU_LEN_EXP_SHIFT)
+#define HE_6G_CAP_MAX_MPDU_LEN(cap) (((cap) & HE_6G_CAP_MAX_MPDU_LEN_MASK) >> \
+ HE_6G_CAP_MAX_MPDU_LEN_SHIFT)
+#define HE_6G_CAP_SM_PW_SAVE(cap) (((cap) & HE_6G_CAP_SM_PW_SAVE_MASK) >> \
+ HE_6G_CAP_SM_PW_SAVE_SHIFT)
+#define HE_6G_CAP_RD_RESPONDER(cap) (((cap) & HE_6G_CAP_RD_RESPONDER_MASK) != 0)
+#define HE_6G_CAP_RX_ANT_PATN_CONST(cap) (((cap) & HE_6G_CAP_RX_ANT_PATN_CONST_MASK) != 0)
+#define HE_6G_CAP_TX_ANT_PATN_CONST(cap) (((cap) & HE_6G_CAP_TX_ANT_PATN_CONST_MASK) != 0)
+
+/**
+ * UORA parameter set element (sec 9.4.2.244)
+ */
+BWL_PRE_PACKED_STRUCT struct he_uora_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 ocw_range;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_uora_ie he_uora_ie_t;
+
+/* Bit field Masks */
+#define HE_UORA_EOCW_MIN_IDX 0u
+#define HE_UORA_EOCW_MIN_FSZ 3u
+#define HE_UORA_EOCW_MAX_IDX 3u
+#define HE_UORA_EOCW_MAX_FSZ 3u
+/* Reserved -bit6 -7 */
+
+/**
+ * MU EDCA parameter set element (sec 9.4.2.245)
+ */
+BWL_PRE_PACKED_STRUCT struct he_mu_ac_param_record {
+ uint8 aci_aifsn;
+ uint8 ecw_min_max;
+ uint8 muedca_timer;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_mu_ac_param_record he_mu_ac_param_record_t;
+
+BWL_PRE_PACKED_STRUCT struct he_muedca_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 mu_qos_info;
+ he_mu_ac_param_record_t param_ac[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_muedca_ie he_muedca_ie_t;
+
+#define HE_MU_EDCA_PARAM_UPD_CNT_IDX 0u /* EDCA Parameter Set Update Count */
+#define HE_MU_EDCA_PARAM_UPD_CNT_LEN 4u
+
+#define HE_MU_SIGA_SIGB_MCS_DPCU 0
+#define HE_MU_SIGA_SIGB_SYMS_DPCU 3u
+#define HE_MU_SIGA_GI_LTF_DPCU 3u
+
+/**
+ * Spatial Reuse Parameter Set element (sec 9.4.2.241)
+ */
+/* bit position and field width */
+#define HE_SRP_CTRL_SRP_DISALLOW_IDX 0 /* SRP Disallowed */
+#define HE_SRP_CTRL_SRP_DISALLOW_FSZ 1
+#define HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW_IDX 1 /* NonSRG OBSS PD SR Disallowed */
+#define HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW_FSZ 1
+#define HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT_IDX 2 /* NonSRG Offset Present */
+#define HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT_FSZ 1
+#define HE_SRP_CTRL_SRG_INFO_PRESENT_IDX 3 /* SRG Information Present */
+#define HE_SRP_CTRL_SRG_INFO_PRESENT_FSZ 1
+#define HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED_IDX 4 /* HESIGA_SRP_value15_allowed */
+#define HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED_FSZ 1
+/* Reserved b5-b7 */
+
+/* Spatial reuse element element */
+BWL_PRE_PACKED_STRUCT struct he_srp_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 sr_control;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_srp_ie he_srp_ie_t;
+
+#define HE_SRP_NON_SRG_OBSS_PD_MAX_OFFSET_LEN 1u
+#define HE_SRP_SRG_OBSS_PD_MIN_OFFSET_LEN 1u
+#define HE_SRP_SRG_OBSS_PD_MAX_OFFSET_LEN 1u
+#define HE_SRP_SRG_BSSCOLOR_BITMAP_LEN 8u
+#define HE_SRP_SRG_PARTIAL_BSSID_BITMAP_LEN 8u
+
+#define HE_SRP_IE_MIN_LEN (sizeof(he_srp_ie_t) - TLV_HDR_LEN)
+#define HE_SRP_IE_MAX_LEN (sizeof(he_srp_ie_t) - TLV_HDR_LEN +\
+ HE_SRP_NON_SRG_OBSS_PD_MAX_OFFSET_LEN + HE_SRP_SRG_OBSS_PD_MIN_OFFSET_LEN\
+ HE_SRP_SRG_OBSS_PD_MAX_OFFSET_LEN + HE_SRP_SRG_BSSCOLOR_BITMAP_LEN\
+ HE_SRP_SRG_PARTIAL_BSSID_BITMAP_LEN)
+
+/* Bit field Masks */
+#define HE_SRP_CTRL_SRP_DISALLOW (1 << HE_SRP_CTRL_SRP_DISALLOW_IDX)
+#define HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW (1 << HE_SRP_CTRL_NON_SRG_OBSS_PD_SR_DISALLOW_IDX)
+#define HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT (1 << HE_SRP_CTRL_NON_SRG_OFFSET_PRESENT_IDX)
+#define HE_SRP_CTRL_SRG_INFO_PRESENT (1 << HE_SRP_CTRL_SRG_INFO_PRESENT_IDX)
+#define HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED (1 << HE_SRP_CTRL_HESIGA_SR_VALUE15_ALLOWED_IDX)
+
+/**
+ * ref: (Table 28-21 Page 473 D3.0)
+ *
+ * -Spatial Reuse field encoding for an HE SU PPDU, HE ER SU PPDU, and HE MU PPDU
+ */
+#define HE_SRP_DISALLOW 0u /* SRP_DISALLOW */
+/* Values 1 to 12 are reserved */
+#define HE_SR_RESTRICTED 13u /* SR Restricted */
+#define HE_SR_DELAY 14u /* SR Delay */
+#define HE_SRP_AND_NON_SRG_OBSS_PD_PROHIBITED 15u /* SRP_AND_NON_SRG_OBSS_PD_PROHIBITED */
+#define HE_SRP_MASK 0x0Fu
+
+/**
+ * BSS Color Change Announcement element (sec 9.4.2.243)
+ */
+/* bit position and field width */
+#define HE_BSSCOLOR_CHANGE_NEWCOLOR_IDX 0 /* New BSSColor info */
+#define HE_BSSCOLOR_CHANGE_NEWCOLOR_FSZ 6u
+
+/* HE Bsscolor change element */
+BWL_PRE_PACKED_STRUCT struct he_bsscolor_change_ie {
+ uint8 id;
+ uint8 len;
+ uint8 id_ext;
+ uint8 color_switch_cntdwn;
+ uint8 new_bsscolor_info;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct he_bsscolor_change_ie he_bsscolor_change_ie_t;
+
+/* HE SU bit position and field width */
+#define HE_SU_PPDU_FORMAT_IDX 0u
+#define HE_SU_PPDU_FORMAT_FSZ 1u
+#define HE_SU_PPDU_BEAM_CHANGE_IDX 1u
+#define HE_SU_PPDU_BEAM_CHANGE_FSZ 1u
+#define HE_SU_PPDU_DL_UL_IDX 2u
+#define HE_SU_PPDU_DL_UL_FSZ 1u
+#define HE_SU_PPDU_MCS_IDX 3u
+#define HE_SU_PPDU_MCS_FSZ 4u
+#define HE_SU_PPDU_DCM_IDX 7u
+#define HE_SU_PPDU_DCM_FSZ 1u
+#define HE_SU_PPDU_BSS_COLOR_IDX 8u
+#define HE_SU_PPDU_BSS_COLOR_FSZ 6u
+#define HE_SU_PPDU_SR_IDX 15
+#define HE_SU_PPDU_SR_FSZ 4u
+#define HE_SU_PPDU_BW_IDX 19u
+#define HE_SU_PPDU_BW_FSZ 2u
+#define HE_SU_PPDU_GI_IDX 21u
+#define HE_SU_PPDU_GI_FSZ 2u
+#define HE_SU_PPDU_LTF_SIZE_IDX 21u
+#define HE_SU_PPDU_LTF_SIZE_FSZ 2u
+#define HE_SU_PPDU_NUM_LTF_IDX 21u
+#define HE_SU_PPDU_NUM_LTF_FSZ 2u
+#define HE_SU_PPDU_NSTS_IDX 23u
+#define HE_SU_PPDU_NSTS_FSZ 3u
+#define HE_SU_PPDU_DOPPLER_NOTSET_NSTS_IDX 23u
+#define HE_SU_PPDU_DOPPLER_NOTSET_NSTS_FSZ 3u
+#define HE_SU_PPDU_DOPPLER_SET_NSTS_IDX 23u
+#define HE_SU_PPDU_DOPPLER_SET_NSTS_FSZ 2u
+#define HE_SU_PPDU_MIDAMBLE_IDX 25u
+#define HE_SU_PPDU_MIDAMBLE_FSZ 1u
+#define HE_SU_PPDU_TXOP_IDX 26u
+#define HE_SU_PPDU_TXOP_FSZ 7u
+#define HE_SU_PPDU_CODING_IDX 33u
+#define HE_SU_PPDU_CODING_FSZ 1u
+#define HE_SU_PPDU_LDPC_IDX 34u
+#define HE_SU_PPDU_LDPC_FSZ 1u
+#define HE_SU_PPDU_STBC_IDX 35u
+#define HE_SU_PPDU_STBC_FSZ 1u
+#define HE_SU_PPDU_TXBF_IDX 36u
+#define HE_SU_PPDU_TXBF_FSZ 1u
+#define HE_SU_PPDU_PADDING_IDX 37u
+#define HE_SU_PPDU_PADDING_FSZ 2u
+#define HE_SU_PPDU_PE_IDX 39u
+#define HE_SU_PPDU_PE_FSZ 1u
+#define HE_SU_PPDU_DOPPLER_IDX 41u
+#define HE_SU_PPDU_DOPPLER_FSZ 1u
+
+/* For HE SU/RE SIG A : PLCP0 bit fields [32bit] */
+#define HE_SU_RE_SIGA_FORMAT_MASK 0x00000001u
+#define HE_SU_RE_SIGA_RE_VAL 0x00000000u
+#define HE_SU_RE_SIGA_SU_VAL 0x00000001u
+#define HE_SU_RE_SIGA_FORMAT_SHIFT 0u
+#define HE_SU_RE_SIGA_BEAM_CHANGE_SHIFT 1u
+#define HE_SU_RE_SIGA_UL_DL_SHIFT 2u
+#define HE_SU_RE_SIGA_MCS_MASK 0x00000078u
+#define HE_SU_RE_SIGA_MCS_SHIFT 3u
+#define HE_SU_RE_SIGA_DCM_MASK 0x00000080u
+#define HE_SU_RE_SIGA_DCM_SHIFT 7u
+#define HE_SU_RE_SIGA_BSS_COLOR_SHIFT 8u /* Bits 13:8 */
+#define HE_SU_RE_SIGA_BSS_COLOR_MASK 0x00003F00u
+#define HE_SU_RE_SIGA_RSVD_PLCP0_VAL 0x00004000u
+#define HE_SU_RE_SIGA_SRP_VAL_SHIFT 15u /* Bits 18:15 */
+#define HE_SU_RE_SIGA_SRP_VAL_MASK 0x00078000u
+#define HE_SU_SIGA_BW_MASK 0x00180000u
+#define HE_SU_SIGA_BW_SHIFT 19u
+#define HE_RE_SIGA_TONE_MASK 0x00180000u
+#define HE_RE_SIGA_TONE_SHIFT 19u
+#define HE_SU_RE_SIGA_20MHZ_VAL 0x00000000u
+#define HE_SU_RE_SIGA_40MHZ_VAL 0x00080000u
+#define HE_SU_RE_SIGA_80MHZ_VAL 0x00100000u
+#define HE_SU_RE_SIGA_160MHZ_VAL 0x00180000u
+#define HE_SU_RE_SIGA_GI_LTF_MASK 0x00600000u
+#define HE_SU_RE_SIGA_1xLTF_GI8us_VAL 0x00000000u
+#define HE_SU_RE_SIGA_2xLTF_GI8us_VAL 0x00200000u
+#define HE_SU_RE_SIGA_2xLTF_GI16us_VAL 0x00400000u
+#define HE_SU_RE_SIGA_4xLTF_GI32us_VAL 0x00600000u
+#define HE_SU_RE_SIGA_GI_LTF_SHIFT 21u
+#define HE_SU_RE_SIGA_NSTS_MASK 0x03800000u
+#define HE_SU_RE_SIGA_NSTS_SHIFT 23u
+#define HE_SU_RE_SIGA_TXOP_PLCP0_MASK 0xFC000000u
+#define HE_SU_RE_SIGA_TXOP_PLCP0_SHIFT 26u
+
+/* For HE SU SIG EXT : PLCP0 bit fields [32bit] */
+#define HE_SU_SIG_EXT_GI_LTF_MASK 0x00000003u
+#define HE_SU_SIG_EXT_1xLTF_GI8us_VAL 0x00000000u
+#define HE_SU_SIG_EXT_2xLTF_GI8us_VAL 0x00000001u
+#define HE_SU_SIG_EXT_2xLTF_GI16us_VAL 0x00000002u
+#define HE_SU_SIG_EXT_4xLTF_GI32us_VAL 0x00000003u
+#define HE_SU_SIG_EXT_STBC_MASK 0x00000040u
+#define HE_SU_SIG_EXT_STBC_SHIFT 6u
+#define HE_SU_SIG_EXT_LDPC_MASK 0x00000080u
+#define HE_SU_SIG_EXT_LDPC_SHIFT 7u
+#define HE_SU_SIG_EXT_MCS_MASK 0x0000f000u
+#define HE_SU_SIG_EXT_MCS_SHIFT 12u
+#define HE_SU_SIG_EXT_DCM_MASK 0x00010000u
+#define HE_SU_SIG_EXT_DCM_SHIFT 16u
+#define HE_SU_SIG_EXT_NSTS_MASK 0x000e0000u
+#define HE_SU_SIG_EXT_NSTS_SHIFT 17u
+#define HE_SU_SIG_EXT_CODING_MASK 0x00800000u
+#define HE_SU_SIG_EXT_CODING_SHIFT 23u
+
+/* HE mu ppdu - bit position and field width */
+#define HE_MU_PPDU_DL_UL_IDX 0u
+#define HE_MU_PPDU_DL_UL_FSZ 1u
+#define HE_MU_PPDU_SIGB_MCS_IDX 1u
+#define HE_MU_PPDU_SIGB_MCS_FSZ 3u
+#define HE_MU_PPDU_SIGB_DCM_IDX 4u
+#define HE_MU_PPDU_SIGB_DCM_FSZ 1u
+#define HE_MU_PPDU_BSS_COLOR_IDX 5u
+#define HE_MU_PPDU_BSS_COLOR_FSZ 6u
+#define HE_MU_PPDU_SR_IDX 11u
+#define HE_MU_PPDU_SR_FSZ 4u
+
+#define HE_MU_PPDU_SIGB_SYM_MU_MIMO_USER_IDX 18u
+#define HE_MU_PPDU_SIGB_SYM_MU_MIMO_USER_FSZ 3u
+
+#define HE_MU_PPDU_PRE_PUNCR_SIGA_IDX 15u
+#define HE_MU_PPDU_PRE_PUNCR_SIGA_FSZ 2u
+
+#define HE_MU_PPDU_BW_SIGA_IDX 15u
+#define HE_MU_PPDU_BW_SIGA_FSZ 2u
+#define HE_MU_PPDU_BW_SIGA_KNOWN_IDX 17u
+#define HE_MU_PPDU_BW_SIGA_KNOWN_FSZ 1u
+
+#define HE_MU_PPDU_SIGB_SYMB_IDX 18u
+#define HE_MU_PPDU_SIGB_SYMB_FSZ 4u
+
+#define HE_MU_PPDU_SIGB_COMP_IDX 22u
+#define HE_MU_PPDU_SIGB_COMP_FSZ 1u
+#define HE_MU_PPDU_GI_IDX 23u
+#define HE_MU_PPDU_GI_FSZ 2u
+#define HE_MU_PPDU_LTF_SIZE_IDX 23u
+#define HE_MU_PPDU_LTF_SIZE_FSZ 2u
+#define HE_MU_PPDU_NUM_LTF_IDX 23u
+#define HE_MU_PPDU_NUM_LTF_FSZ 2u
+#define HE_MU_PPDU_DOPPLER_IDX 25u
+#define HE_MU_PPDU_DOPPLER_FSZ 1u
+#define HE_MU_PPDU_TXOP_IDX 26u
+#define HE_MU_PPDU_TXOP_FSZ 7u
+#define HE_MU_PPDU_MIDAMBLE_IDX 34u
+#define HE_MU_PPDU_MIDAMBLE_FSZ 3u
+#define HE_MU_PPDU_LDPC_IDX 37u
+#define HE_MU_PPDU_LDPC_FSZ 1u
+#define HE_MU_PPDU_STBC_IDX 38u
+#define HE_MU_PPDU_STBC_FSZ 1u
+#define HE_MU_PPDU_PADDING_IDX 39u
+#define HE_MU_PPDU_PADDING_FSZ 2u
+#define HE_MU_PPDU_PE_IDX 41u
+#define HE_MU_PPDU_PE_FSZ 1u
+
+/* he trigger ppdu - bit position and field width */
+#define HE_TRIG_PPDU_BSS_COLOR_IDX 1u
+#define HE_TRIG_PPDU_BSS_COLOR_FSZ 6u
+
+/* full spatial reuse field */
+#define HE_TRIG_PPDU_SR_IDX 7u
+#define HE_TRIG_PPDU_SR_FSZ 16u
+
+#define HE_TRIG_PPDU_SR1_IDX 7u
+#define HE_TRIG_PPDU_SR1_FSZ 4u
+#define HE_TRIG_PPDU_SR2_IDX 11u
+#define HE_TRIG_PPDU_SR2_FSZ 4u
+#define HE_TRIG_PPDU_SR3_IDX 15u
+#define HE_TRIG_PPDU_SR3_FSZ 4u
+#define HE_TRIG_PPDU_SR4_IDX 19u
+#define HE_TRIG_PPDU_SR4_FSZ 4u
+#define HE_TRIG_PPDU_TXOP_IDX 26u
+#define HE_TRIG_PPDU_TXOP_FSZ 7u
+
+/* For HE MU SIG A : PLCP0 bit fields [32bit] */
+#define HE_MU_SIGA_UL_DL_SHIFT 0
+#define HE_MU_SIGA_UL_TB_PPDU 0
+#define HE_MU_SIGA_SIGB_MCS_MASK 0x000000E
+#define HE_MU_SIGA_SIGB_MCS_SHIFT 1
+#define HE_MU_SIGA_SIGB_DCM_SHIFT 4
+#define HE_MU_SIGA_SIGB_DCM_DISABLED 0
+#define HE_MU_SIGA_BW_SHIFT 15
+#define HE_MU_SIGA_BW_80_UNPUNCTURED 2
+#define HE_MU_SIGA_BW_SEC_20_PUNCTURED 4
+#define HE_MU_SIGA_BW_SEC_40_PUNCTURED 5
+#define HE_MU_SIGA_SIGB_SYMS_SHIFT 18
+#define HE_MU_SIGA_GI_LTF_MASK 0x01800000
+#define HE_MU_SIGA_GI_LTF_SHIFT 23
+
+/* For HE MU SIG A : PLCP1 bit fields [32bit] */
+#define HE_MU_SIGA_STBC_MASK 0x00000040
+#define HE_MU_SIGA_STBC_SHIFT 6
+
+/* For HE SU/RE SIG A : PLCP1 bit fields [16bit] */
+#define HE_SU_RE_SIGA_TXOP_PLCP1_MASK 0x0001
+#define HE_SU_RE_SIGA_TXOP_PLCP1_SHIFT 0
+#define HE_SU_RE_SIGA_CODING_MASK 0x0002
+#define HE_SU_RE_SIGA_CODING_SHIFT 1
+#define HE_SU_RE_SIGA_LDPC_EXTRA_MASK 0x0004
+#define HE_SU_RE_SIGA_LDPC_EXTRA_SHIFT 2
+#define HE_SU_RE_SIGA_STBC_MASK 0x0008
+#define HE_SU_RE_SIGA_STBC_SHIFT 3
+#define HE_SU_RE_SIGA_BEAMFORM_MASK 0x0010
+#define HE_SU_RE_SIGA_BEAMFORM_SHIFT 4
+#define HE_SU_RE_SIGA_RSVD_PLCP1_VAL 0x0100
+
+/* For HE MU SIG A : PLCP1 bit fields [16bit] */
+#define HE_MU_SIGA_RSVD_SHIFT 1
+#define HE_MU_SIGA_LTF_SYMS_SHIFT 2
+
+/* For HE SU SIG A : RX PLCP4 bit fields [8bit] */
+#define HE_SU_SIGA2_STBC_RX_MASK 0x08u
+
+/* For HE ER SIG A : RX PLCP4 bit fields [8bit] */
+#define HE_ER_SIGA2_STBC_RX_MASK 0x08u
+
+/* For HE MU SIG A : RX PLCP4 bit fields [8bit] */
+#define HE_MU_SIGA2_STBC_RX_MASK 0x40u
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* HE Action Frame */
+/* FIXME: use temporary Offsets until the spec assigns them */
+#define HE_AF_CAT_OFF 0
+#define HE_AF_ACT_OFF 1
+
+/* TWT Setup */
+#define HE_AF_TWT_SETUP_TOKEN_OFF 2
+#define HE_AF_TWT_SETUP_TWT_IE_OFF 3
+
+/* TWT Teardown */
+#define HE_AF_TWT_TEARDOWN_FLOW_OFF 2
+
+/* TWT Information */
+#define HE_AF_TWT_INFO_OFF 2
+
+/* HE Action ID */
+/* FIXME: use temporary IDs until ANA assigns them */
+#define HE_ACTION_TWT_SETUP 1
+#define HE_ACTION_TWT_TEARDOWN 2
+#define HE_ACTION_TWT_INFO 3
+
+/* HE Basic trigger frame common info fields */
+#define HE_TRIG_CMNINFO_SZ 8
+typedef uint8 he_trig_cmninfo_set_t[HE_TRIG_CMNINFO_SZ];
+
+/* bit position and field width */
+#define HE_TRIG_CMNINFO_FRMTYPE_INDX 0 /* Trigger frame type */
+#define HE_TRIG_CMNINFO_FRMTYPE_FSZ 4
+#define HE_TRIG_CMNINFO_LSIGLEN_INDX 4 /* L-sig length */
+#define HE_TRIG_CMNINFO_LSIGLEN_FSZ 12
+#define HE_TRIG_CMNINFO_CASCADEIND_INDX 16 /* Cascade indication */
+#define HE_TRIG_CMNINFO_CASCADEIND_FSZ 1
+#define HE_TRIG_CMNINFO_CSREQ_INDX 17 /* Carrier sense indication */
+#define HE_TRIG_CMNINFO_CSREQ_FSZ 1
+#define HE_TRIG_CMNINFO_BWINFO_INDX 18 /* Bw info */
+#define HE_TRIG_CMNINFO_BWINFO_FSZ 2
+#define HE_TRIG_CMNINFO_GI_LTF_INDX 20 /* Cp-LTF size */
+#define HE_TRIG_CMNINFO_GI_LTF_FSZ 2
+#define HE_TRIG_CMNINFO_MUMIMO_LTF_INDX 22 /* HE-LTF mask enable */
+#define HE_TRIG_CMNINFO_MUMIMO_LTF_FSZ 1
+#define HE_TRIG_CMNINFO_HELTF_SYM_INDX 23 /* He-LTF sumbols */
+#define HE_TRIG_CMNINFO_HELTF_SYM_FSZ 3
+#define HE_TRIG_CMNINFO_STBC_INDX 26 /* STBC support */
+#define HE_TRIG_CMNINFO_STBC_FSZ 1
+#define HE_TRIG_CMNINFO_LDPC_EXTSYM_INDX 27 /* LDPC extra symbol */
+#define HE_TRIG_CMNINFO_LDPC_EXTSYM_FSZ 1
+#define HE_TRIG_CMNINFO_AP_TXPWR_INDX 28 /* AP TX power */
+#define HE_TRIG_CMNINFO_AP_TXPWR_FSZ 6
+#define HE_TRIG_CMNINFO_AFACT_INDX 34 /* a-factor */
+#define HE_TRIG_CMNINFO_AFACT_FSZ 2
+#define HE_TRIG_CMNINFO_PEDISAMBIG_INDX 36 /* PE disambiguity */
+#define HE_TRIG_CMNINFO_PEDISAMBIG_FSZ 1
+#define HE_TRIG_CMNINFO_SPTIAL_REUSE_INDX 37 /* spatial re-use */
+#define HE_TRIG_CMNINFO_SPTIAL_REUSE_FSZ 16
+#define HE_TRIG_CMNINFO_DOPPLER_INDX 53 /* doppler supoort */
+#define HE_TRIG_CMNINFO_DOPPLER_FSZ 1
+#define HE_TRIG_CMNINFO_HESIGA_RSVD_INDX 54 /* rsvd bits from HE-SIGA */
+#define HE_TRIG_CMNINFO_HESIGA_RSVD_FSZ 9
+#define HE_TRIG_CMNINFO_RSVD_INDX 63 /* reseved bit from HE-SIGA */
+#define HE_TRIG_CMNINFO_RSVD_FSZ 1
+
+/* HE Basic trigger frame user info fields */
+#define HE_TRIG_USRINFO_SZ 5
+typedef uint8 he_trig_usrinfo_set_t[HE_TRIG_USRINFO_SZ];
+
+/* bit position and field width */
+#define HE_TRIG_USRINFO_AID_INDX 0 /* AID */
+#define HE_TRIG_USRINFO_AID_FSZ 12
+#define HE_TRIG_USRINFO_RU_ALLOC_INDX 12 /* RU allocation index */
+#define HE_TRIG_USRINFO_RU_ALLOC_FSZ 8
+#define HE_TRIG_USRINFO_CODING_INDX 20 /* coding type (BCC/LDPC) */
+#define HE_TRIG_USRINFO_CODING_FSZ 1
+#define HE_TRIG_USRINFO_MCS_INDX 21 /* MCS index value */
+#define HE_TRIG_USRINFO_MCS_FSZ 4
+#define HE_TRIG_USRINFO_DCM_INDX 25 /* Dual carrier modulation */
+#define HE_TRIG_USRINFO_DCM_FSZ 1
+#define HE_TRIG_USRINFO_SSALLOC_STRMOFFSET_INDX 26 /* stream offset */
+#define HE_TRIG_USRINFO_SSALLOC_STRMOFFSET_FSZ 3
+#define HE_TRIG_USRINFO_SSALLOC_NSS_INDX 29 /* number of spatial streams */
+#define HE_TRIG_USRINFO_SSALLOC_NSS_FSZ 3
+#define HE_TRIG_USRINFO_TARGET_RSSI_INDX 32 /* Target RSSI */
+#define HE_TRIG_USRINFO_TARGET_RSSI_FSZ 7
+#define HE_TRIG_USRINFO_RSVD_INDX 39 /* Reserved bit */
+#define HE_TRIG_USRINFO_RSVD_FSZ 1
+
+/* Different types of trigger frame */
+#define HE_TRIG_TYPE_BASIC_FRM 0 /* basic trigger frame */
+#define HE_TRIG_TYPE_BEAM_RPT_POLL_FRM 1 /* beamforming report poll frame */
+#define HE_TRIG_TYPE_MU_BAR_FRM 2 /* MU-BAR frame */
+#define HE_TRIG_TYPE_MU_RTS__FRM 3 /* MU-RTS frame */
+#define HE_TRIG_TYPE_BSR_FRM 4 /* Buffer status report poll */
+
+/* HE Timing related parameters (Table 28-9) */
+#define HE_T_LEG_STF 8
+#define HE_T_LEG_LTF 8
+#define HE_T_LEG_LSIG 4
+#define HE_T_RL_SIG 4
+#define HE_T_SIGA 8
+#define HE_T_STF 4 /* STF for SU / MU HE PPDUs */
+#define HE_T_TB_PPDU_STF 8 /* STF for HE trigger based PPDUs */
+#define HE_T_LEG_PREAMBLE (HE_T_LEG_STF + HE_T_LEG_LTF + HE_T_LEG_LSIG)
+#define HE_T_LEG_SYMB 4
+#define HE_RU_26_TONE 26
+#define HE_RU_52_TONE 52
+#define HE_RU_106_TONE 106
+#define HE_RU_242_TONE 242
+#define HE_RU_484_TONE 484
+#define HE_RU_996_TONE 996
+#define HE_RU_2x996_TONE 1992
+#define HE_MAX_26_TONE_RU_INDX 36
+#define HE_MAX_52_TONE_RU_INDX 52
+#define HE_MAX_106_TONE_RU_INDX 60
+#define HE_MAX_242_TONE_RU_INDX 64
+#define HE_MAX_484_TONE_RU_INDX 66
+#define HE_MAX_996_TONE_RU_INDX 67
+#define HE_MAX_2x996_TONE_RU_INDX 68
+
+/**
+ * ref: (Table 28-9 Page 285)
+ *
+ * - for calculation purpose - in multiples of 10 (*10)
+ */
+#define HE_T_LTF_1X 32
+#define HE_T_LTF_2X 64
+#define HE_T_LTF_4X 128
+#define HE_T_SYM1 136 /* OFDM symbol duration with base GI */
+#define HE_T_SYM2 144 /* OFDM symbol duration with double GI */
+#define HE_T_SYM4 160 /* OFDM symbol duration with quad GI */
+
+#define HE_N_LEG_SYM 3 /* bytes per legacy symbol */
+#define HE_N_TAIL 6 /* tail field bits for BCC */
+#define HE_N_SERVICE 16 /* bits in service field */
+#define HE_T_MAX_PE 16 /* max Packet extension duration */
+
+#endif /* _802_11ax_h_ */
diff --git a/bcmdhd.101.10.361.x/include/802.11e.h b/bcmdhd.101.10.361.x/include/802.11e.h
new file mode 100755
index 0000000..0fbf58c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.11e.h
@@ -0,0 +1,133 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+ uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+ uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */
+ uint8 type; /* WME_TYPE */
+ uint8 subtype; /* WME_SUBTYPE_TSPEC */
+ uint8 version; /* WME_VERSION */
+ tsinfo_t tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint32 min_srv_interval; /* Minimum Service Interval (us) */
+ uint32 max_srv_interval; /* Maximum Service Interval (us) */
+ uint32 inactivity_interval; /* Inactivity Interval (us) */
+ uint32 suspension_interval; /* Suspension Interval (us) */
+ uint32 srv_start_time; /* Service Start Time (us) */
+ uint32 min_data_rate; /* Minimum Data Rate (bps) */
+ uint32 mean_data_rate; /* Mean Data Rate (bps) */
+ uint32 peak_data_rate; /* Peak Data Rate (bps) */
+ uint32 max_burst_size; /* Maximum Burst Size (bytes) */
+ uint32 delay_bound; /* Delay Bound (us) */
+ uint32 min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */
+#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */
+#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */
+#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \
+ TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \
+ TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+ ((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+ ((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT 1000 /* default ADDTS response timeout in ms */
+ /* DEFVAL dot11ADDTSResponseTimeout = 1s */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
+#ifdef BCMCCX
+#define CCX_STATUS_ASSOC_DENIED_UNKNOWN 0xc8 /* unspecified QoS related failure */
+#define CCX_STATUS_ASSOC_DENIED_AP_POLICY 0xc9 /* TSPEC refused due to AP policy */
+#define CCX_STATUS_ASSOC_DENIED_NO_BW 0xca /* Assoc denied due to AP insufficient BW */
+#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM 0xcb /* one or more TSPEC with invalid parameter */
+#endif /* BCMCCX */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
+#define DOT11E_STATUS_END_TS 37 /* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/bcmdhd.101.10.361.x/include/802.11r.h b/bcmdhd.101.10.361.x/include/802.11r.h
new file mode 100755
index 0000000..7bb8728
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.11r.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ * Fundamental constants relating to 802.11r
+ */
+
+#ifndef _802_11r_H_
+#define _802_11r_H_
+
+#define FBT_R0KH_ID_LEN 49 /* includes null termination */
+#define FBT_REASSOC_TIME_DEF 1000
+
+#define DOT11_FBT_SUBELEM_ID_R1KH_ID 1
+#define DOT11_FBT_SUBELEM_ID_GTK 2
+#define DOT11_FBT_SUBELEM_ID_R0KH_ID 3
+#define DOT11_FBT_SUBELEM_ID_IGTK 4
+#define DOT11_FBT_SUBELEM_ID_OCI 5u
+
+/*
+* FBT Subelement id lenths
+*/
+
+#define DOT11_FBT_SUBELEM_R1KH_LEN 6
+/* GTK_FIXED_LEN = Key_Info (2Bytes) + Key_Length (1Byte) + RSC (8Bytes) */
+#define DOT11_FBT_SUBELEM_GTK_FIXED_LEN 11
+/* GTK_MIN_LEN = GTK_FIXED_LEN + key (min 16 Bytes) + key_wrap (8Bytes) */
+#define DOT11_FBT_SUBELEM_GTK_MIN_LEN (DOT11_FBT_SUBELEM_GTK_FIXED_LEN + 24)
+/* GTK_MAX_LEN = GTK_FIXED_LEN + key (max 32 Bytes) + key_wrap (8Bytes) */
+#define DOT11_FBT_SUBELEM_GTK_MAX_LEN (DOT11_FBT_SUBELEM_GTK_FIXED_LEN + 40)
+#define DOT11_FBT_SUBELEM_R0KH_MIN_LEN 1
+#define DOT11_FBT_SUBELEM_R0KH_MAX_LEN 48
+/* IGTK_LEN = KeyID (2Bytes) + IPN (6Bytes) + Key_Length (1Byte) +
+* Wrapped_Key (key (16Bytes) + key_wrap (8Bytes))
+*/
+#define DOT11_FBT_SUBELEM_IGTK_LEN 33
+#define DOT11_FBT_SUBELEM_OCI_LEN 3u
+
+#endif /* #ifndef _802_11r_H_ */
diff --git a/bcmdhd.101.10.361.x/include/802.11s.h b/bcmdhd.101.10.361.x/include/802.11s.h
new file mode 100755
index 0000000..7c0869f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.11s.h
@@ -0,0 +1,337 @@
+/*
+ * Fundamental types and constants relating to 802.11s Mesh
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _802_11s_h_
+#define _802_11s_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define DOT11_MESH_FLAGS_AE_MASK 0x3u
+#define DOT11_MESH_FLAGS_AE_SHIFT 0u
+
+/* Mesh Control Flags: Address Exetension mode values */
+#define DOT11_MESH_AE_NONE 0u
+#define DOT11_MESH_AE_A4 1u
+#define DOT11_MESH_AE_A5_A6 2u
+
+#define DOT11_MESH_CONNECTED_AS_SET 7
+#define DOT11_MESH_NUMBER_PEERING_SET 1
+#define DOT11_MESH_MESH_GWSET 0
+
+#define DOT11_MESH_ACTION_LINK_MET_REP 0
+#define DOT11_MESH_ACTION_PATH_SEL 1
+#define DOT11_MESH_ACTION_GATE_ANN 2
+#define DOT11_MESH_ACTION_CONG_CONT_NOTIF 3
+#define DOT11_MESH_ACTION_MCCA_SETUP_REQ 4
+#define DOT11_MESH_ACTION_MCCA_SETUP_REP 5
+#define DOT11_MESH_ACTION_MCCA_ADVT_REQ 6
+#define DOT11_MESH_ACTION_MCCA_ADVT 7
+#define DOT11_MESH_ACTION_MCCA_TEARDOWN 8
+#define DOT11_MESH_ACTION_TBTT_ADJ_REQ 9
+#define DOT11_MESH_ACTION_TBTT_ADJ_RESP 10
+
+/* self-protected action field values: 7-57v24 */
+#define DOT11_SELFPROT_ACTION_MESH_PEER_OPEN 1
+#define DOT11_SELFPROT_ACTION_MESH_PEER_CONFM 2
+#define DOT11_SELFPROT_ACTION_MESH_PEER_CLOSE 3
+#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_INF 4
+#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_ACK 5
+
+#define DOT11_MESH_AUTH_PROTO_NONE 0
+#define DOT11_MESH_AUTH_PROTO_SAE 1
+#define DOT11_MESH_AUTH_PROTO_8021X 2
+#define DOT11_MESH_AUTH_PROTO_VS 255
+
+#define DOT11_MESH_PATHSEL_LEN 2
+#define DOT11_MESH_PERR_LEN1 2 /* Least PERR length fixed */
+#define DOT11_MESH_PERR_LEN2 13 /* Least PERR length variable */
+#define DOT11_MESH_PREP_LEN 31 /* Least PREP length */
+#define DOT11_MESH_PREQ_LEN 37 /* Least PREQ length */
+
+#define DOT11_MESH_PATHSEL_PROTID_HWMP 1
+#define DOT11_MESH_PATHSEL_METRICID_ALM 1 /* Air link metric */
+#define DOT11_MESH_CONGESTCTRL_NONE 0
+#define DOT11_MESH_CONGESTCTRL_SP 1
+#define DOT11_MESH_SYNCMETHOD_NOFFSET 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_meshctrl_hdr {
+ uint8 flags; /* flag bits such as ae etc */
+ uint8 ttl; /* time to live */
+ uint32 seq; /* sequence control */
+ struct ether_addr a5; /* optional address 5 */
+ struct ether_addr a6; /* optional address 6 */
+} BWL_POST_PACKED_STRUCT;
+
+#define DOT11_MESH_CONTROL_MIN_LEN 6u
+#define DOT11_MESH_CONTROL_A4_LEN 12u
+#define DOT11_MESH_CONTROL_A5A6_LEN 18u
+
+/* Mesh Path Selection Action Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_mesh_pathsel {
+ uint8 category;
+ uint8 meshaction;
+ uint8 data[];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mesh_pathsel dot11_mesh_pathsel_t;
+
+/* Mesh PREQ IE */
+BWL_PRE_PACKED_STRUCT struct mesh_preq_ie {
+ uint8 id;
+ uint8 len;
+ uint8 flags;
+ uint8 hop_count;
+ uint8 ttl;
+ uint32 pathdis_id;
+ struct ether_addr originator_addr;
+ uint32 originator_seq;
+ union {
+ BWL_PRE_PACKED_STRUCT struct {
+ struct ether_addr target_ext_add;
+ uint32 lifetime;
+ uint32 metric;
+ uint8 target_count;
+ uint8 data[];
+ } BWL_POST_PACKED_STRUCT oea;
+
+ BWL_PRE_PACKED_STRUCT struct {
+ uint32 lifetime;
+ uint32 metric;
+ uint8 target_count;
+ uint8 data[];
+ } BWL_POST_PACKED_STRUCT noea;
+ } u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_preq_ie mesh_preq_ie_t;
+
+/* Target info (part of Mesh PREQ IE) */
+BWL_PRE_PACKED_STRUCT struct mesh_targetinfo {
+ uint8 target_flag;
+ struct ether_addr target_addr;
+ uint32 target_seq;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_targetinfo mesh_targetinfo_t;
+
+/* Mesh PREP IE */
+BWL_PRE_PACKED_STRUCT struct mesh_prep_ie {
+ uint8 id;
+ uint8 len;
+ uint8 flags;
+ uint8 hop_count;
+ uint8 ttl;
+ struct ether_addr target_addr;
+ uint32 target_seq;
+ union {
+ BWL_PRE_PACKED_STRUCT struct {
+ struct ether_addr target_ext_add;
+ uint32 lifetime;
+ uint32 metric;
+ uint8 target_count;
+ struct ether_addr originator_addr;
+ uint32 originator_seq;
+ } BWL_POST_PACKED_STRUCT oea;
+
+ BWL_PRE_PACKED_STRUCT struct {
+ uint32 lifetime;
+ uint32 metric;
+ uint8 target_count;
+ struct ether_addr originator_addr;
+ uint32 originator_seq;
+ } BWL_POST_PACKED_STRUCT noea;
+ } u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_prep_ie mesh_prep_ie_t;
+
+/* Mesh PERR IE */
+struct mesh_perr_ie {
+ uint8 id;
+ uint8 len;
+ uint8 ttl;
+ uint8 num_dest;
+ uint8 data[];
+};
+typedef struct mesh_perr_ie mesh_perr_ie_t;
+
+/* Destination info is part of PERR IE */
+BWL_PRE_PACKED_STRUCT struct mesh_perr_destinfo {
+ uint8 flags;
+ struct ether_addr destination_addr;
+ uint32 dest_seq;
+ union {
+ BWL_PRE_PACKED_STRUCT struct {
+ struct ether_addr dest_ext_addr;
+ } BWL_POST_PACKED_STRUCT dea;
+
+ BWL_PRE_PACKED_STRUCT struct {
+ /* 1 byte reason code to be populated manually in software */
+ uint16 reason_code;
+ } BWL_POST_PACKED_STRUCT nodea;
+ } u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_perr_destinfo mesh_perr_destinfo_t;
+
+/* Mesh peering action frame hdr */
+BWL_PRE_PACKED_STRUCT struct mesh_peering_frmhdr {
+ uint8 category;
+ uint8 action;
+ union {
+ struct {
+ uint16 capability;
+ } open;
+ struct {
+ uint16 capability;
+ uint16 AID;
+ } confirm;
+ uint8 data[1];
+ } u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peering_frmhdr mesh_peering_frmhdr_t;
+
+/* Mesh peering mgmt IE */
+BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_common {
+ uint16 mesh_peer_prot_id;
+ uint16 local_link_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peer_mgmt_ie_common mesh_peer_mgmt_ie_common_t;
+#define MESH_PEER_MGMT_IE_OPEN_LEN (4)
+
+BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_cfm {
+ mesh_peer_mgmt_ie_common_t common;
+ uint16 peer_link_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peer_mgmt_ie_cfm mesh_peer_mgmt_ie_cfm_t;
+#define MESH_PEER_MGMT_IE_CONF_LEN (6)
+
+BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_close {
+ mesh_peer_mgmt_ie_common_t common;
+ /* uint16 peer_link_id;
+ * simplicity: not supported, TODO for future
+ */
+ uint16 reason_code;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peer_mgmt_ie_close mesh_peer_mgmt_ie_close_t;
+#define MESH_PEER_MGMT_IE_CLOSE_LEN (6)
+
+struct mesh_config_ie {
+ uint8 activ_path_sel_prot_id;
+ uint8 activ_path_sel_metric_id;
+ uint8 cong_ctl_mode_id;
+ uint8 sync_method_id;
+ uint8 auth_prot_id;
+ uint8 mesh_formation_info;
+ uint8 mesh_cap;
+};
+typedef struct mesh_config_ie mesh_config_ie_t;
+#define MESH_CONFIG_IE_LEN (7)
+
+/* Mesh peering states */
+#define MESH_PEERING_IDLE 0
+#define MESH_PEERING_OPEN_SNT 1
+#define MESH_PEERING_CNF_RCVD 2
+#define MESH_PEERING_OPEN_RCVD 3
+#define MESH_PEERING_ESTAB 4
+#define MESH_PEERING_HOLDING 5
+#define MESH_PEERING_LAST_STATE 6
+/* for debugging: mapping strings */
+#define MESH_PEERING_STATE_STRINGS \
+ {"IDLE ", "OPNSNT", "CNFRCV", "OPNRCV", "ESTAB ", "HOLDNG"}
+
+#ifdef WLMESH
+typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info {
+ /* mesh_peer_instance as given in the spec. Note that, peer address
+ * is stored in scb
+ */
+ uint16 mesh_peer_prot_id;
+ uint16 local_link_id;
+ uint16 peer_link_id;
+ /* AID generated by *peer* to self & received in peer_confirm */
+ uint16 peer_aid;
+
+ /* TODO: no mention in spec? possibly used in PS case. Note that aid generated
+ * from self to peer is stored in scb.
+ */
+ uint8 state;
+ /* TODO: struct mesh_peer_info *next; this field is required
+ * if multiple peerings per same src is allowed, which is
+ * true as per spec.
+ */
+} BWL_POST_PACKED_STRUCT mesh_peer_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_ext {
+ mesh_peer_info_t peer_info;
+ uint16 local_aid; /* AID generated by *local* to peer */
+ struct ether_addr ea; /* peer ea */
+ uint32 entry_state; /* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid
+ * ONLY for internal peering requests
+ */
+ int rssi;
+} BWL_POST_PACKED_STRUCT mesh_peer_info_ext_t;
+
+/* #ifdef WLMESH */
+typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_dump {
+ uint32 buflen;
+ uint32 version;
+ uint32 count; /* number of results */
+ mesh_peer_info_ext_t mpi_ext[1];
+} BWL_POST_PACKED_STRUCT mesh_peer_info_dump_t;
+#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t))
+
+#endif /* WLMESH */
+
+/* once an entry is added into mesh_peer_list, if peering is lost, it will
+* get retried for peering, MAX_MESH_PEER_ENTRY_RETRIES times. after wards, it
+* wont get retried and will be moved to MESH_PEER_ENTRY_STATE_TIMEDOUT state,
+* until user adds it again explicitely, when its entry_state is changed
+* to MESH_PEER_ENTRY_STATE_ACTIVE and tried again.
+*/
+#define MAX_MESH_SELF_PEER_ENTRY_RETRIES 3
+#define MESH_SELF_PEER_ENTRY_STATE_ACTIVE 1
+#define MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT 2
+
+/** Mesh Channel Switch Parameter IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_mcsp_body {
+ uint8 ttl; /* remaining number of hops allowed for this element. */
+ uint8 flags; /* attributes of this channel switch attempt */
+ uint8 reason; /* reason for the mesh channel switch */
+ uint16 precedence; /* random value in the range 0 to 65535 */
+} BWL_POST_PACKED_STRUCT;
+
+#define DOT11_MCSP_TTL_DEFAULT 1
+#define DOT11_MCSP_FLAG_TRANS_RESTRICT 0x1 /* no transmit except frames with mcsp */
+#define DOT11_MCSP_FLAG_INIT 0x2 /* initiates the channel switch attempt */
+#define DOT11_MCSP_FLAG_REASON 0x4 /* validity of reason code field */
+#define DOT11_MCSP_REASON_REGULATORY 0 /* meet regulatory requirements */
+#define DOT11_MCSP_REASON_UNSPECIFIED 1 /* unspecified reason */
+
+BWL_PRE_PACKED_STRUCT struct dot11_mesh_csp {
+ uint8 id; /* id DOT11_MNG_MESH_CSP_ID */
+ uint8 len; /* length of IE */
+ struct dot11_mcsp_body body; /* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mesh_csp dot11_mesh_csp_ie_t;
+#define DOT11_MESH_CSP_IE_LEN 5 /* length of mesh channel switch parameter IE body */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* #ifndef _802_11s_H_ */
diff --git a/bcmdhd.101.10.361.x/include/802.1d.h b/bcmdhd.101.10.361.x/include/802.1d.h
new file mode 100755
index 0000000..a05bb28
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.1d.h
@@ -0,0 +1,47 @@
+/*
+ * Fundamental types and constants relating to 802.1D
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+/* 802.1D priority defines */
+#define PRIO_8021D_NONE 2 /* None = - */
+#define PRIO_8021D_BK 1 /* BK - Background */
+#define PRIO_8021D_BE 0 /* BE - Best-effort */
+#define PRIO_8021D_EE 3 /* EE - Excellent-effort */
+#define PRIO_8021D_CL 4 /* CL - Controlled Load */
+#define PRIO_8021D_VI 5 /* Vi - Video */
+#define PRIO_8021D_VO 6 /* Vo - Voice */
+#define PRIO_8021D_NC 7 /* NC - Network Control */
+#define MAXPRIO 7 /* 0-7 */
+#define NUMPRIO (MAXPRIO + 1)
+
+#define ALLPRIO -1 /* All prioirty */
+
+/* Converts prio to precedence since the numerical value of
+ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped.
+ */
+#define PRIO2PREC(prio) \
+ (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif /* _802_1_D__ */
diff --git a/bcmdhd.101.10.361.x/include/802.3.h b/bcmdhd.101.10.361.x/include/802.3.h
new file mode 100755
index 0000000..af9de38
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/802.3.h
@@ -0,0 +1,49 @@
+/*
+ * Fundamental constants relating to 802.3
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _802_3_h_
+#define _802_3_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */
+#define DOT3_OUI_LEN 3 /* 802.3 oui length */
+
+BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */
+ uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */
+ uint16 length; /* frame length incl header */
+ uint8 dsap; /* always 0xAA */
+ uint8 ssap; /* always 0xAA */
+ uint8 ctl; /* always 0x03 */
+ uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00
+ * Bridge-Tunnel: 0x00 0x00 0xF8
+ */
+ uint16 type; /* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* #ifndef _802_3_h_ */
diff --git a/bcmdhd.101.10.361.x/include/aidmp.h b/bcmdhd.101.10.361.x/include/aidmp.h
new file mode 100755
index 0000000..57c60ae
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/aidmp.h
@@ -0,0 +1,438 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _AIDMP_H
+#define _AIDMP_H
+
+/* Manufacturer Ids */
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+/* Component Classes */
+#define CC_SIM 0
+#define CC_EROM 1
+#define CC_CORESIGHT 9
+#define CC_VERIF 0xb
+#define CC_OPTIMO 0xd
+#define CC_GEN 0xe
+#define CC_PRIMECELL 0xf
+
+/* Enumeration ROM registers */
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+/* Erom entries */
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+#define ER_SZ_MAX 4096 /* 4KB */
+
+/* EROM CompIdentA */
+#define CIA_MFG_MASK 0xfff00000u
+#define CIA_MFG_SHIFT 20u
+#define CIA_CID_MASK 0x000fff00u
+#define CIA_CID_SHIFT 8u
+#define CIA_CCL_MASK 0x000000f0u
+#define CIA_CCL_SHIFT 4u
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000u
+#define CIB_REV_SHIFT 24u
+#define CIB_NSW_MASK 0x00f80000u
+#define CIB_NSW_SHIFT 19u
+#define CIB_NMW_MASK 0x0007c000u
+#define CIB_NMW_SHIFT 14u
+#define CIB_NSP_MASK 0x00003e00u
+#define CIB_NSP_SHIFT 9u
+#define CIB_NMP_MASK 0x000001f0u
+#define CIB_NMP_SHIFT 4u
+
+/* EROM MasterPortDesc */
+#define MPD_MUI_MASK 0x0000ff00u
+#define MPD_MUI_SHIFT 8u
+#define MPD_MP_MASK 0x000000f0u
+#define MPD_MP_SHIFT 4u
+
+/* EROM AddrDesc */
+#define AD_ADDR_MASK 0xfffff000u
+#define AD_SP_MASK 0x00000f00u
+#define AD_SP_SHIFT 8u
+#define AD_ST_MASK 0x000000c0u
+#define AD_ST_SHIFT 6u
+#define AD_ST_SLAVE 0x00000000u
+#define AD_ST_BRIDGE 0x00000040u
+#define AD_ST_SWRAP 0x00000080u
+#define AD_ST_MWRAP 0x000000c0u
+#define AD_SZ_MASK 0x00000030u
+#define AD_SZ_SHIFT 4u
+#define AD_SZ_4K 0x00000000u
+#define AD_SZ_8K 0x00000010u
+#define AD_SZ_16K 0x00000020u
+#define AD_SZ_SZD 0x00000030u
+#define AD_AG32 0x00000008u
+#define AD_ADDR_ALIGN 0x00000fffu
+#define AD_SZ_BASE 0x00001000u /* 4KB */
+
+/* EROM SizeDesc */
+#define SD_SZ_MASK 0xfffff000u
+#define SD_SG32 0x00000008u
+#define SD_SZ_ALIGN 0x00000fffu
+
+#define WRAPPER_TIMEOUT_CONFIG 0x4u
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _aidmp {
+ uint32 oobselina30; /* 0x000 */
+ uint32 oobselina74; /* 0x004 */
+ uint32 PAD[6];
+ uint32 oobselinb30; /* 0x020 */
+ uint32 oobselinb74; /* 0x024 */
+ uint32 PAD[6];
+ uint32 oobselinc30; /* 0x040 */
+ uint32 oobselinc74; /* 0x044 */
+ uint32 PAD[6];
+ uint32 oobselind30; /* 0x060 */
+ uint32 oobselind74; /* 0x064 */
+ uint32 PAD[38];
+ uint32 oobselouta30; /* 0x100 */
+ uint32 oobselouta74; /* 0x104 */
+ uint32 PAD[6];
+ uint32 oobseloutb30; /* 0x120 */
+ uint32 oobseloutb74; /* 0x124 */
+ uint32 PAD[6];
+ uint32 oobseloutc30; /* 0x140 */
+ uint32 oobseloutc74; /* 0x144 */
+ uint32 PAD[6];
+ uint32 oobseloutd30; /* 0x160 */
+ uint32 oobseloutd74; /* 0x164 */
+ uint32 PAD[38];
+ uint32 oobsynca; /* 0x200 */
+ uint32 oobseloutaen; /* 0x204 */
+ uint32 PAD[6];
+ uint32 oobsyncb; /* 0x220 */
+ uint32 oobseloutben; /* 0x224 */
+ uint32 PAD[6];
+ uint32 oobsyncc; /* 0x240 */
+ uint32 oobseloutcen; /* 0x244 */
+ uint32 PAD[6];
+ uint32 oobsyncd; /* 0x260 */
+ uint32 oobseloutden; /* 0x264 */
+ uint32 PAD[38];
+ uint32 oobaextwidth; /* 0x300 */
+ uint32 oobainwidth; /* 0x304 */
+ uint32 oobaoutwidth; /* 0x308 */
+ uint32 PAD[5];
+ uint32 oobbextwidth; /* 0x320 */
+ uint32 oobbinwidth; /* 0x324 */
+ uint32 oobboutwidth; /* 0x328 */
+ uint32 PAD[5];
+ uint32 oobcextwidth; /* 0x340 */
+ uint32 oobcinwidth; /* 0x344 */
+ uint32 oobcoutwidth; /* 0x348 */
+ uint32 PAD[5];
+ uint32 oobdextwidth; /* 0x360 */
+ uint32 oobdinwidth; /* 0x364 */
+ uint32 oobdoutwidth; /* 0x368 */
+ uint32 PAD[37];
+ uint32 ioctrlset; /* 0x400 */
+ uint32 ioctrlclear; /* 0x404 */
+ uint32 ioctrl; /* 0x408 */
+ uint32 PAD[61];
+ uint32 iostatus; /* 0x500 */
+ uint32 PAD[127];
+ uint32 ioctrlwidth; /* 0x700 */
+ uint32 iostatuswidth; /* 0x704 */
+ uint32 PAD[62];
+ uint32 resetctrl; /* 0x800 */
+ uint32 resetstatus; /* 0x804 */
+ uint32 resetreadid; /* 0x808 */
+ uint32 resetwriteid; /* 0x80c */
+ uint32 PAD[60];
+ uint32 errlogctrl; /* 0x900 */
+ uint32 errlogdone; /* 0x904 */
+ uint32 errlogstatus; /* 0x908 */
+ uint32 errlogaddrlo; /* 0x90c */
+ uint32 errlogaddrhi; /* 0x910 */
+ uint32 errlogid; /* 0x914 */
+ uint32 errloguser; /* 0x918 */
+ uint32 errlogflags; /* 0x91c */
+ uint32 PAD[56];
+ uint32 intstatus; /* 0xa00 */
+ uint32 PAD[255];
+ uint32 config; /* 0xe00 */
+ uint32 PAD[63];
+ uint32 itcr; /* 0xf00 */
+ uint32 PAD[3];
+ uint32 itipooba; /* 0xf10 */
+ uint32 itipoobb; /* 0xf14 */
+ uint32 itipoobc; /* 0xf18 */
+ uint32 itipoobd; /* 0xf1c */
+ uint32 PAD[4];
+ uint32 itipoobaout; /* 0xf30 */
+ uint32 itipoobbout; /* 0xf34 */
+ uint32 itipoobcout; /* 0xf38 */
+ uint32 itipoobdout; /* 0xf3c */
+ uint32 PAD[4];
+ uint32 itopooba; /* 0xf50 */
+ uint32 itopoobb; /* 0xf54 */
+ uint32 itopoobc; /* 0xf58 */
+ uint32 itopoobd; /* 0xf5c */
+ uint32 PAD[4];
+ uint32 itopoobain; /* 0xf70 */
+ uint32 itopoobbin; /* 0xf74 */
+ uint32 itopoobcin; /* 0xf78 */
+ uint32 itopoobdin; /* 0xf7c */
+ uint32 PAD[4];
+ uint32 itopreset; /* 0xf90 */
+ uint32 PAD[15];
+ uint32 peripherialid4; /* 0xfd0 */
+ uint32 peripherialid5; /* 0xfd4 */
+ uint32 peripherialid6; /* 0xfd8 */
+ uint32 peripherialid7; /* 0xfdc */
+ uint32 peripherialid0; /* 0xfe0 */
+ uint32 peripherialid1; /* 0xfe4 */
+ uint32 peripherialid2; /* 0xfe8 */
+ uint32 peripherialid3; /* 0xfec */
+ uint32 componentid0; /* 0xff0 */
+ uint32 componentid1; /* 0xff4 */
+ uint32 componentid2; /* 0xff8 */
+ uint32 componentid3; /* 0xffc */
+} aidmp_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* Out-of-band Router registers */
+#define OOB_BUSCONFIG 0x020
+#define OOB_STATUSA 0x100
+#define OOB_STATUSB 0x104
+#define OOB_STATUSC 0x108
+#define OOB_STATUSD 0x10c
+#define OOB_ENABLEA0 0x200
+#define OOB_ENABLEA1 0x204
+#define OOB_ENABLEA2 0x208
+#define OOB_ENABLEA3 0x20c
+#define OOB_ENABLEB0 0x280
+#define OOB_ENABLEB1 0x284
+#define OOB_ENABLEB2 0x288
+#define OOB_ENABLEB3 0x28c
+#define OOB_ENABLEC0 0x300
+#define OOB_ENABLEC1 0x304
+#define OOB_ENABLEC2 0x308
+#define OOB_ENABLEC3 0x30c
+#define OOB_ENABLED0 0x380
+#define OOB_ENABLED1 0x384
+#define OOB_ENABLED2 0x388
+#define OOB_ENABLED3 0x38c
+#define OOB_ITCR 0xf00
+#define OOB_ITIPOOBA 0xf10
+#define OOB_ITIPOOBB 0xf14
+#define OOB_ITIPOOBC 0xf18
+#define OOB_ITIPOOBD 0xf1c
+#define OOB_ITOPOOBA 0xf30
+#define OOB_ITOPOOBB 0xf34
+#define OOB_ITOPOOBC 0xf38
+#define OOB_ITOPOOBD 0xf3c
+
+/* DMP wrapper registers */
+#define AI_OOBSELINA30 0x000
+#define AI_OOBSELINA74 0x004
+#define AI_OOBSELINB30 0x020
+#define AI_OOBSELINB74 0x024
+#define AI_OOBSELINC30 0x040
+#define AI_OOBSELINC74 0x044
+#define AI_OOBSELIND30 0x060
+#define AI_OOBSELIND74 0x064
+#define AI_OOBSELOUTA30 0x100
+#define AI_OOBSELOUTA74 0x104
+#define AI_OOBSELOUTB30 0x120
+#define AI_OOBSELOUTB74 0x124
+#define AI_OOBSELOUTC30 0x140
+#define AI_OOBSELOUTC74 0x144
+#define AI_OOBSELOUTD30 0x160
+#define AI_OOBSELOUTD74 0x164
+#define AI_OOBSYNCA 0x200
+#define AI_OOBSELOUTAEN 0x204
+#define AI_OOBSYNCB 0x220
+#define AI_OOBSELOUTBEN 0x224
+#define AI_OOBSYNCC 0x240
+#define AI_OOBSELOUTCEN 0x244
+#define AI_OOBSYNCD 0x260
+#define AI_OOBSELOUTDEN 0x264
+#define AI_OOBAEXTWIDTH 0x300
+#define AI_OOBAINWIDTH 0x304
+#define AI_OOBAOUTWIDTH 0x308
+#define AI_OOBBEXTWIDTH 0x320
+#define AI_OOBBINWIDTH 0x324
+#define AI_OOBBOUTWIDTH 0x328
+#define AI_OOBCEXTWIDTH 0x340
+#define AI_OOBCINWIDTH 0x344
+#define AI_OOBCOUTWIDTH 0x348
+#define AI_OOBDEXTWIDTH 0x360
+#define AI_OOBDINWIDTH 0x364
+#define AI_OOBDOUTWIDTH 0x368
+
+#if !defined(IL_BIGENDIAN)
+#define AI_IOCTRLSET 0x400
+#define AI_IOCTRLCLEAR 0x404
+#define AI_IOCTRL 0x408
+#define AI_IOCTRL_BOOKER 0x248 /* Starting from OOBR base - 0x18006000 */
+#define AI_IOSTATUS 0x500
+#define AI_RESETCTRL 0x800
+#define AI_RESETSTATUS 0x804
+#endif /* IL_BIGENDIAN */
+
+#define AI_IOCTRLWIDTH 0x700
+#define AI_IOSTATUSWIDTH 0x704
+
+#define AI_RESETREADID 0x808
+#define AI_RESETWRITEID 0x80c
+#define AI_ERRLOGCTRL 0x900
+#define AI_ERRLOGDONE 0x904
+#define AI_ERRLOGSTATUS 0x908
+#define AI_ERRLOGADDRLO 0x90c
+#define AI_ERRLOGADDRHI 0x910
+#define AI_ERRLOGID 0x914
+#define AI_ERRLOGUSER 0x918
+#define AI_ERRLOGFLAGS 0x91c
+#define AI_INTSTATUS 0xa00
+#define AI_CONFIG 0xe00
+#define AI_ITCR 0xf00
+#define AI_ITIPOOBA 0xf10
+#define AI_ITIPOOBB 0xf14
+#define AI_ITIPOOBC 0xf18
+#define AI_ITIPOOBD 0xf1c
+#define AI_ITIPOOBAOUT 0xf30
+#define AI_ITIPOOBBOUT 0xf34
+#define AI_ITIPOOBCOUT 0xf38
+#define AI_ITIPOOBDOUT 0xf3c
+#define AI_ITOPOOBA 0xf50
+#define AI_ITOPOOBB 0xf54
+#define AI_ITOPOOBC 0xf58
+#define AI_ITOPOOBD 0xf5c
+#define AI_ITOPOOBAIN 0xf70
+#define AI_ITOPOOBBIN 0xf74
+#define AI_ITOPOOBCIN 0xf78
+#define AI_ITOPOOBDIN 0xf7c
+#define AI_ITOPRESET 0xf90
+#define AI_PERIPHERIALID4 0xfd0
+#define AI_PERIPHERIALID5 0xfd4
+#define AI_PERIPHERIALID6 0xfd8
+#define AI_PERIPHERIALID7 0xfdc
+#define AI_PERIPHERIALID0 0xfe0
+#define AI_PERIPHERIALID1 0xfe4
+#define AI_PERIPHERIALID2 0xfe8
+#define AI_PERIPHERIALID3 0xfec
+#define AI_COMPONENTID0 0xff0
+#define AI_COMPONENTID1 0xff4
+#define AI_COMPONENTID2 0xff8
+#define AI_COMPONENTID3 0xffc
+
+/* resetctrl */
+#define AIRC_RESET 1
+
+/* errlogctrl */
+#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */
+#define AIELC_TO_EXP_SHIFT 4
+#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */
+
+/* errlogdone */
+#define AIELD_ERRDONE_MASK 0x3
+
+/* errlogstatus */
+#define AIELS_SLAVE_ERR 0x1
+#define AIELS_TIMEOUT 0x2
+#define AIELS_DECODE 0x3
+#define AIELS_ERROR_MASK 0x3
+#define AIELS_MULTIPLE_ERRORS 0x4
+#define ERRLOGID_AXIID_MASK 0xF
+
+/* errorlog status bit map, for SW use */
+#define AXI_WRAP_STS_NONE (0)
+#define AXI_WRAP_STS_TIMEOUT (1<<0)
+#define AXI_WRAP_STS_SLAVE_ERR (1<<1)
+#define AXI_WRAP_STS_DECODE_ERR (1<<2)
+#define AXI_WRAP_STS_PCI_RD_ERR (1<<3)
+#define AXI_WRAP_STS_WRAP_RD_ERR (1<<4)
+#define AXI_WRAP_STS_SET_CORE_FAIL (1<<5)
+#define AXI_WRAP_STS_MULTIPLE_ERRORS (1<<6)
+
+/* errlogFrags */
+#define AXI_ERRLOG_FLAGS_WRITE_REQ (1<<24)
+
+/* config */
+#define AICFG_OOB 0x00000020
+#define AICFG_IOS 0x00000010
+#define AICFG_IOC 0x00000008
+#define AICFG_TO 0x00000004
+#define AICFG_ERRL 0x00000002
+#define AICFG_RST 0x00000001
+
+/* bit defines for AI_OOBSELOUTB74 reg */
+#define OOB_SEL_OUTEN_B_5 15
+#define OOB_SEL_OUTEN_B_6 23
+
+/* AI_OOBSEL for A/B/C/D, 0-7 */
+#define AI_OOBSEL_MASK 0x1F
+#define AI_OOBSEL_0_SHIFT 0
+#define AI_OOBSEL_1_SHIFT 8
+#define AI_OOBSEL_2_SHIFT 16
+#define AI_OOBSEL_3_SHIFT 24
+#define AI_OOBSEL_4_SHIFT 0
+#define AI_OOBSEL_5_SHIFT 8
+#define AI_OOBSEL_6_SHIFT 16
+#define AI_OOBSEL_7_SHIFT 24
+#define AI_IOCTRL_ENABLE_D11_PME (1 << 14)
+
+/* bit Specific for AI_OOBSELOUTB30 */
+#define OOB_B_ALP_REQUEST 0
+#define OOB_B_HT_REQUEST 1
+#define OOB_B_ILP_REQUEST 2
+#define OOB_B_ALP_AVAIL_REQUEST 3
+#define OOB_B_HT_AVAIL_REQUEST 4
+
+/* mask for interrupts from each core to wrapper */
+#define AI_OOBSELINA74_CORE_MASK 0x80808080
+#define AI_OOBSELINA30_CORE_MASK 0x80808080
+
+#define AI_OOBSEL_30_0_INTR_MASK 0x00000080
+#define AI_OOBSEL_30_3_INTR_MASK 0x80000000
+
+#define AI_OOBSEL_74_4_INTR_MASK 0x00000080
+#define AI_OOBSEL_74_7_INTR_MASK 0x80000000
+
+/* axi id mask in the error log id */
+#define AI_ERRLOGID_AXI_ID_MASK 0x07
+#define AI_ERRLOGID_AXI_ID_MASK_EXTD 0x1F
+
+#endif /* _AIDMP_H */
diff --git a/bcmdhd.101.10.361.x/include/bcm_fwtrace.h b/bcmdhd.101.10.361.x/include/bcm_fwtrace.h
new file mode 100755
index 0000000..ae0836a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcm_fwtrace.h
@@ -0,0 +1,111 @@
+
+/*
+ * Firmware trace implementation common header file between DHD and the firmware.
+ *
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Secret:>>
+ */
+
+#ifndef _bcm_fwtrace_h
+#define _bcm_fwtrace_h
+
+#include <bcmpcie.h>
+#include <bcmmsgbuf.h>
+
+#define FWTRACE_VERSION 1u
+
+/*
+ * Number of trace entries per trace buffer.
+ * Both DHD and FW must use same number.
+ */
+#define FWTRACE_NUM_ENTRIES (2u * 1024u) /* 2KB, power of 2 */
+/*
+ * Number of buffers provided by the host.
+ * DHD may allocate smaller number of trace buffers based on continuous memory availability.
+ */
+#define FWTRACE_NUM_HOST_BUFFERS 32u
+
+/* Magic value to differentiate between regural trace data Vs other blobs */
+#define FWTRACE_BLOB_MAGIC (0xFFu)
+#define FWTRACE_BLOB_MGIC_SHIFT (24u)
+
+/* The lower 24 bits of the fwtrace_entry->func_ptr is used to push different type of
+ * information to host such as ACK bitmap, interrupts DPC is going to process etc.
+ */
+#define FWTRACE_BLOB_TYPE_MASK (0xFFFFFFu)
+#define FWTRACE_BLOB_TYPE_SHIFT (0)
+
+#define FWTRACE_BLOB_TYPE_NUM_PKTS (0x1u)
+#define FWTRACE_BLOB_TYPE_ACK_BMAP1 (0x2u) /* Ack bits (0-31 ) */
+#define FWTRACE_BLOB_TYPE_ACK_BMAP2 (0x4u) /* Ack bits (32-63) */
+#define FWTRACE_BLOB_TYPE_ACK_BMAP3 (0x8u) /* Ack bits (64-95) */
+#define FWTRACE_BLOB_TYPE_ACK_BMAP4 (0x10u) /* Ack bits (96-127) */
+#define FWTRACE_BLOB_TYPE_INTR1 (0x20u) /* interrupts the DPC is going to process */
+#define FWTRACE_BLOB_TYPE_INTR2 (0x40u) /* interrupts the DPC is going to process */
+/* The blob data for LFRAGS_INFO will contain
+ * Bit31-16: Available buffer/lfrags info
+ * Bit15-0 : # of lfrags requested by FW in the fetch request
+ */
+#define FWTRACE_BLOB_TYPE_LFRAGS_INFO (0x80u) /* Available and fetch requested lfrags */
+
+#define FWTRACE_BLOB_DATA_MASK (0xFFFFFu)
+
+#define FWTRACE_BLOB_ADD_CUR (0) /* updates with in the existing trace entry */
+#define FWTRACE_BLOB_ADD_NEW (1u) /* Creates new trace entry */
+
+/*
+ * Host sends host memory location to FW via iovar.
+ * FW will push trace information here.
+ */
+typedef struct fwtrace_hostaddr_info {
+ bcm_addr64_t haddr; /* host address for the firmware to DMA trace data */
+ uint32 buf_len;
+ uint32 num_bufs; /* Number of trace buffers */
+} fwtrace_hostaddr_info_t;
+
+/*
+ * Eact trace info buffer pushed to host will have this header.
+ */
+typedef struct fwtrace_dma_header_info {
+ uint16 length; /* length in bytes */
+ uint16 seq_num; /* sequence number */
+ uint32 version;
+ uint32 hostmem_addr;
+} fwtrace_dma_header_info_t;
+
+/*
+ * Content of each trace entry
+ */
+typedef struct fwtrace_entry {
+ uint32 func_ptr;
+ /* How the pkts_cycle being used?
+ * Bit31-23: (If present) Used to indicate the number of packets processed by the
+ * current function
+ * Bit22-1 : Used to indicate the CPU cycles(in units of 2Cycles). So to get the actual
+ * cycles multiply the cycles by 2.
+ * Bit0 : Used to indicate whether this entry is valid or not
+ */
+ uint32 pkts_cycles;
+} fwtrace_entry_t;
+
+#define FWTRACE_CYCLES_VALID (1u << 0u)
+
+/*
+ * Format of firmware trace buffer pushed to host memory
+ */
+typedef struct fwtrace_buf {
+ fwtrace_dma_header_info_t info; /* includes the sequence number and the length */
+ fwtrace_entry_t entry[FWTRACE_NUM_ENTRIES];
+} fwtrace_buf_t;
+
+void fwtracing_add_blob(uint32 update_type, uint32 trace_type, uint32 blob);
+#endif /* _bcm_fwtrace_h */
diff --git a/bcmdhd.101.10.361.x/include/bcm_l2_filter.h b/bcmdhd.101.10.361.x/include/bcm_l2_filter.h
new file mode 100755
index 0000000..d594285
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcm_l2_filter.h
@@ -0,0 +1,99 @@
+/*
+ * L2 Filter handling functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+#ifndef _l2_filter_h_
+#define _l2_filter_h_
+
+/* Proxy ARP processing return values */
+#define PARP_DROP 0
+#define PARP_NOP 1
+#define PARP_TAKEN 2
+/* Adjust for ETHER_HDR_LEN pull in linux
+ * which makes pkt nonaligned
+ */
+#define ALIGN_ADJ_BUFLEN 2
+
+#define BCM_PARP_TABLE_SIZE 32 /* proxyarp hash table bucket size */
+#define BCM_PARP_TABLE_MASK 0x1f /* proxyarp hash table index mask */
+#define BCM_PARP_TABLE_INDEX(val) (val & BCM_PARP_TABLE_MASK)
+#define BCM_PARP_TIMEOUT 600 /* proxyarp cache entry timerout duration(10 min) */
+
+#define BCM_PARP_IS_TIMEOUT(pub_tick, entry) \
+ (pub_tick - entry->used > BCM_PARP_TIMEOUT)
+
+#define BCM_PARP_ANNOUNCE_WAIT 2 /* proxyarp announce wait duration(2 sec) */
+
+#define BCM_PARP_ANNOUNCE_WAIT_REACH(pub_tick, entry) \
+ (pub_tick - entry->used > BCM_PARP_ANNOUNCE_WAIT)
+
+#define BCM_ARP_TABLE_UPDATE_TIMEOUT 100
+
+/* Taken from wlc_tdls.h for block_tdls iovar */
+#define TDLS_PAYLOAD_TYPE 2
+#define TDLS_PAYLOAD_TYPE_LEN 1
+
+/* TDLS Action Category code */
+#define TDLS_ACTION_CATEGORY_CODE 12
+
+typedef struct parp_entry {
+ struct parp_entry *next;
+ uint32 used; /* time stamp */
+ struct ether_addr ea;
+ bcm_tlv_t ip;
+} parp_entry_t;
+
+typedef struct arp_table arp_table_t;
+
+extern int bcm_l2_filter_gratuitous_arp(osl_t *osh, void *pktbuf);
+extern int bcm_l2_filter_block_ping(osl_t *osh, void *pktbuf);
+extern int bcm_l2_filter_get_mac_addr_dhcp_pkt(osl_t *osh, void *pktbuf,
+ int ifidx, uint8** addr);
+
+arp_table_t* init_l2_filter_arp_table(osl_t* osh);
+void deinit_l2_filter_arp_table(osl_t* osh, arp_table_t* ptable);
+int get_pkt_ether_type(osl_t *osh, void *skb, uint8 **data_ptr,
+ int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
+int get_pkt_ip_type(osl_t *osh, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr);
+int bcm_l2_filter_parp_addentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt);
+int bcm_l2_filter_parp_delentry(osl_t *osh, arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached);
+parp_entry_t *bcm_l2_filter_parp_findentry(arp_table_t* arp_tbl, uint8 *ip,
+ uint8 ip_ver, bool cached, unsigned int entry_tickcnt);
+
+int bcm_l2_filter_parp_modifyentry(arp_table_t* arp_tbl, struct ether_addr *ea,
+ uint8 *ip, uint8 ip_ver, bool cached, unsigned int entry_tickcnt);
+extern void bcm_l2_filter_arp_table_update(osl_t *osh, arp_table_t* arp_tbl, bool all,
+ uint8 *del_ea, bool periodic, unsigned int tickcnt);
+
+void *bcm_l2_filter_proxyarp_alloc_reply(osl_t* osh, uint16 pktlen, struct ether_addr *src_ea,
+ struct ether_addr *dst_ea, uint16 ea_type, bool snap, void **p);
+void bcm_l2_filter_parp_get_smac(arp_table_t* ptable, void* smac);
+void bcm_l2_filter_parp_get_cmac(arp_table_t* ptable, void* cmac);
+void bcm_l2_filter_parp_set_smac(arp_table_t* ptable, void* smac);
+void bcm_l2_filter_parp_set_cmac(arp_table_t* ptable, void* cmac);
+bcm_tlv_t* parse_nd_options(void *buf, int buflen, uint key);
+uint16 calc_checksum(uint8 *src_ipa, uint8 *dst_ipa, uint32 ul_len, uint8 prot, uint8 *ul_data);
+extern int bcm_l2_filter_block_tdls(osl_t *osh, void *pktbuf);
+#endif /* _l2_filter_h */
diff --git a/bcmdhd.101.10.361.x/include/bcm_mpool_pub.h b/bcmdhd.101.10.361.x/include/bcm_mpool_pub.h
new file mode 100755
index 0000000..76c4ce8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcm_mpool_pub.h
@@ -0,0 +1,344 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ * pool manager: A singleton object that acts as a factory for
+ * pool allocators. It also is used for global
+ * instrumentation, such as reporting all blocks
+ * in use across all data structures. The pool manager
+ * creates and provides individual memory pools
+ * upon request to application code.
+ *
+ * memory pool: An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ * bcm_mpm_* Memory pool manager
+ * bcm_mp_* Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ * prealloc: The contiguous memory block of objects can either be supplied
+ * by the client or malloc'ed by the memory manager. The objects are
+ * allocated out of a block of memory and freed back to the block.
+ *
+ * heap: The memory pool allocator uses the heap (malloc/free) for memory.
+ * In this case, the pool allocator is just providing statistics
+ * and instrumentation on top of the heap, without modifying the heap
+ * allocation implementation.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+ char name[BCM_MP_NAMELEN]; /* Name of this pool. */
+ unsigned int objsz; /* Object size allocated in this pool */
+ uint16 nobj; /* Total number of objects in this pool */
+ uint16 num_alloc; /* Number of objects currently allocated */
+ uint16 high_water; /* Max number of allocated objects. */
+ uint16 failed_alloc; /* Failed allocations. */
+} bcm_mp_stats_t;
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ * osh: INPUT Operating system handle. Needed for heap memory allocation.
+ * max_pools: INPUT Maximum number of mempools supported.
+ * mgr: OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ * BCME_OK Object initialized successfully. May be used.
+ * BCME_NOMEM Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ * mgr: INPUT Pointer to pool manager handle.
+ *
+ * Returns:
+ * BCME_OK Memory pool manager successfully de-initialized.
+ * other Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ * pool uses a contiguous block of pre-alloced
+ * memory. The memory block may either be provided
+ * by the client or dynamically allocated by the
+ * pool manager.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pool manager
+ * obj_sz: INPUT Size of objects that will be allocated by the new pool
+ * Must be >= sizeof(void *).
+ * nobj: INPUT Maximum number of concurrently existing objects to support
+ * memstart INPUT Pointer to the memory to use, or NULL to malloc()
+ * memsize INPUT Number of bytes referenced from memstart (for error checking).
+ * Must be 0 if 'memstart' is NULL.
+ * poolname INPUT For instrumentation, the name of the pool
+ * newp: OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ * BCME_OK Pool created ok.
+ * other Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+ unsigned int obj_sz,
+ int nobj,
+ void *memstart,
+ unsigned int memsize,
+ const char poolname[BCM_MP_NAMELEN],
+ bcm_mp_pool_h *newp);
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ * all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * pool: INPUT The handle of the pool to delete
+ *
+ * Returns:
+ * BCME_OK Pool deleted ok.
+ * other Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ * pool allocator uses the heap (malloc/free) for memory.
+ * In this case, the pool allocator is just providing
+ * statistics and instrumentation on top of the heap,
+ * without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pool manager
+ * obj_sz: INPUT Size of objects that will be allocated by the new pool
+ * poolname INPUT For instrumentation, the name of the pool
+ * newp: OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ * BCME_OK Pool created ok.
+ * other Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+ const char poolname[BCM_MP_NAMELEN],
+ bcm_mp_pool_h *newp);
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ * all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * pool: INPUT The handle of the pool to delete
+ *
+ * Returns:
+ * BCME_OK Pool deleted ok.
+ * other Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * stats: OUTPUT Array of pool statistics.
+ * nentries: MOD Max elements in 'stats' array on INPUT. Actual number
+ * of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * b: OUTPUT Output buffer.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ * compensate for alignment requirements of the objects.
+ * This function provides the padded object size. If clients
+ * pre-allocate a memory slab for a memory pool, the
+ * padded object size should be used by the client to allocate
+ * the memory slab (in order to provide sufficent space for
+ * the maximum number of objects).
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager.
+ * obj_sz: INPUT Input object size.
+ * padded_obj_sz: OUTPUT Padded object size.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * BCME_BADARG Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool.
+ *
+ * Returns:
+ * A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool.
+ * objp: INPUT A pointer to the object to free.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool
+ * stats: OUTPUT Pool statistics
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error getting statistics.
+ *
+ */
+void bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool
+ * b OUTPUT Output buffer
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/bcmdhd.101.10.361.x/include/bcm_ring.h b/bcmdhd.101.10.361.x/include/bcm_ring.h
new file mode 100755
index 0000000..0a45432
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcm_ring.h
@@ -0,0 +1,585 @@
+/*
+ * bcm_ring.h : Ring context abstraction
+ * The ring context tracks the WRITE and READ indices where elements may be
+ * produced and consumed respectively. All elements in the ring need to be
+ * fixed size.
+ *
+ * NOTE: A ring of size N, may only hold N-1 elements.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef __bcm_ring_included__
+#define __bcm_ring_included__
+/*
+ * API Notes:
+ *
+ * Ring manipulation API allows for:
+ * Pending operations: Often before some work can be completed, it may be
+ * desired that several resources are available, e.g. space for production in
+ * a ring. Approaches such as, #1) reserve resources one by one and return them
+ * if another required resource is not available, or #2) employ a two pass
+ * algorithm of first testing whether all resources are available, have a
+ * an impact on performance critical code. The approach taken here is more akin
+ * to approach #2, where a test for resource availability essentially also
+ * provides the index for production in an un-committed state.
+ * The same approach is taken for the consumer side.
+ *
+ * - Pending production: Fetch the next index where a ring element may be
+ * produced. The caller may not commit the WRITE of the element.
+ * - Pending consumption: Fetch the next index where a ring element may be
+ * consumed. The caller may not commut the READ of the element.
+ *
+ * Producer side API:
+ * - bcm_ring_is_full : Test whether ring is full
+ * - bcm_ring_prod : Fetch index where an element may be produced (commit)
+ * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending)
+ * - bcm_ring_prod_done: Commit a previous pending produce fetch
+ * - bcm_ring_prod_avail: Fetch total number free slots eligible for production
+ *
+ * Consumer side API:
+ * - bcm_ring_is_empty : Test whether ring is empty
+ * - bcm_ring_cons : Fetch index where an element may be consumed (commit)
+ * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending)
+ * - bcm_ring_cons_done: Commit a previous pending consume fetch
+ * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption
+ *
+ * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring
+ * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * Design Notes:
+ * Following items are not tracked in a ring context (design decision)
+ * - width of a ring element.
+ * - depth of the ring.
+ * - base of the buffer, where the elements are stored.
+ * - count of number of free slots in the ring
+ *
+ * Implementation Notes:
+ * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init().
+ * - BCM_RING_EMPTY and BCM_RING_FULL are (-1)
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * Usage Notes:
+ * An application may incarnate a ring of some fixed sized elements, by defining
+ * - a ring data buffer to store the ring elements.
+ * - depth of the ring (max number of elements managed by ring context).
+ * Preferrably, depth may be represented as a constant.
+ * - width of a ring element: to be used in pointer arithmetic with the ring's
+ * data buffer base and an index to fetch the ring element.
+ *
+ * Use bcm_workq_t to instantiate a pair of workq constructs, one for the
+ * producer and the other for the consumer, both pointing to the same circular
+ * buffer. The producer may operate on it's own local workq and flush the write
+ * index to the consumer. Likewise the consumer may use its local workq and
+ * flush the read index to the producer. This way we do not repeatedly access
+ * the peer's context. The two peers may reside on different CPU cores with a
+ * private L1 data cache.
+ * +----------------------------------------------------------------------------
+ *
+ * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
+ * vim: set ts=4 noet sw=4 tw=80:
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+#ifdef ____cacheline_aligned
+#define __ring_aligned ____cacheline_aligned
+#else
+#define __ring_aligned
+#endif
+
+/* Conditional compile for debug */
+/* #define BCM_RING_DEBUG */
+
+#define BCM_RING_EMPTY (-1)
+#define BCM_RING_FULL (-1)
+#define BCM_RING_NULL ((bcm_ring_t *)NULL)
+
+#if defined(BCM_RING_DEBUG)
+#define RING_ASSERT(exp) ASSERT(exp)
+#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \
+ ((ring)->self == (ring)))
+#else /* ! BCM_RING_DEBUG */
+#define RING_ASSERT(exp) do {} while (0)
+#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL)
+#endif /* ! BCM_RING_DEBUG */
+
+#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0)
+
+/*
+ * +----------------------------------------------------------------------------
+ * Ring Context
+ * +----------------------------------------------------------------------------
+ */
+typedef struct bcm_ring { /* Ring context */
+#if defined(BCM_RING_DEBUG)
+ struct bcm_ring *self; /* ptr to self for IS VALID test */
+#endif /* BCM_RING_DEBUG */
+ int write __ring_aligned; /* WRITE index in a circular ring */
+ int read __ring_aligned; /* READ index in a circular ring */
+} bcm_ring_t;
+
+static INLINE void bcm_ring_init(bcm_ring_t *ring);
+static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from);
+static INLINE bool bcm_ring_is_empty(const bcm_ring_t *ring);
+
+static INLINE int __bcm_ring_next_write(const bcm_ring_t *ring, const int ring_size);
+
+static INLINE bool __bcm_ring_full(const bcm_ring_t *ring, int next_write);
+static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write);
+static INLINE int bcm_ring_prod_pend(const bcm_ring_t *ring, int *pend_write,
+ const int ring_size);
+static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read);
+static INLINE int bcm_ring_cons_pend(const bcm_ring_t *ring, int *pend_read,
+ const int ring_size);
+static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self);
+static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self);
+
+static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring,
+ const int ring_size);
+static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring,
+ const int ring_size);
+static INLINE void bcm_ring_cons_all(bcm_ring_t *ring);
+
+/**
+ * bcm_ring_init - initialize a ring context.
+ * @ring: pointer to a ring context
+ */
+static INLINE void
+bcm_ring_init(bcm_ring_t *ring)
+{
+ ASSERT(ring != (bcm_ring_t *)NULL);
+#if defined(BCM_RING_DEBUG)
+ ring->self = ring;
+#endif /* BCM_RING_DEBUG */
+ ring->write = 0;
+ ring->read = 0;
+}
+
+/**
+ * bcm_ring_copy - copy construct a ring
+ * @to: pointer to the new ring context
+ * @from: pointer to orig ring context
+ */
+static INLINE void
+bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from)
+{
+ bcm_ring_init(to);
+
+ to->write = from->write;
+ to->read = from->read;
+}
+
+/**
+ * bcm_ring_is_empty - "Boolean" test whether ring is empty.
+ * @ring: pointer to a ring context
+ *
+ * PS. does not return BCM_RING_EMPTY value.
+ */
+static INLINE bool
+bcm_ring_is_empty(const bcm_ring_t *ring)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring));
+ return (ring->read == ring->write);
+}
+
+/**
+ * __bcm_ring_next_write - determine the index where the next write may occur
+ * (with wrap-around).
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ *
+ * PRIVATE INTERNAL USE ONLY.
+ */
+static INLINE int
+__bcm_ring_next_write(const bcm_ring_t *ring, const int ring_size)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ return ((ring->write + 1) % ring_size);
+}
+
+/**
+ * __bcm_ring_full - support function for ring full test.
+ * @ring: pointer to a ring context
+ * @next_write: next location in ring where an element is to be produced
+ *
+ * PRIVATE INTERNAL USE ONLY.
+ */
+static INLINE bool
+__bcm_ring_full(const bcm_ring_t *ring, int next_write)
+{
+ return (next_write == ring->read);
+}
+
+/**
+ * bcm_ring_is_full - "Boolean" test whether a ring is full.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ *
+ * PS. does not return BCM_RING_FULL value.
+ */
+static INLINE bool
+bcm_ring_is_full(bcm_ring_t *ring, const int ring_size)
+{
+ int next_write;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ next_write = __bcm_ring_next_write(ring, ring_size);
+ return __bcm_ring_full(ring, next_write);
+}
+
+/**
+ * bcm_ring_prod_done - commit a previously pending index where production
+ * was requested.
+ * @ring: pointer to a ring context
+ * @write: index into ring upto where production was done.
+ * +----------------------------------------------------------------------------
+ */
+static INLINE void
+bcm_ring_prod_done(bcm_ring_t *ring, int write)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring));
+ ring->write = write;
+}
+
+/**
+ * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be
+ * produced.
+ * @ring: pointer to a ring context
+ * @pend_write: next index, after the returned index
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod_pend(const bcm_ring_t *ring, int *pend_write, const int ring_size)
+{
+ int rtn;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ *pend_write = __bcm_ring_next_write(ring, ring_size);
+ if (__bcm_ring_full(ring, *pend_write)) {
+ *pend_write = BCM_RING_FULL;
+ rtn = BCM_RING_FULL;
+ } else {
+ /* production is not committed, caller needs to explicitly commit */
+ rtn = ring->write;
+ }
+ return rtn;
+}
+
+/**
+ * bcm_ring_prod - Fetch and "commit" the next index where a ring element may
+ * be produced.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod(bcm_ring_t *ring, const int ring_size)
+{
+ int next_write, prod_write;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+
+ next_write = __bcm_ring_next_write(ring, ring_size);
+ if (__bcm_ring_full(ring, next_write)) {
+ prod_write = BCM_RING_FULL;
+ } else {
+ prod_write = ring->write;
+ bcm_ring_prod_done(ring, next_write); /* "commit" production */
+ }
+ return prod_write;
+}
+
+/**
+ * bcm_ring_cons_done - commit a previously pending read
+ * @ring: pointer to a ring context
+ * @read: index upto which elements have been consumed.
+ */
+static INLINE void
+bcm_ring_cons_done(bcm_ring_t *ring, int read)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring));
+ ring->read = read;
+}
+
+/**
+ * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring
+ * element may be consumed.
+ * @ring: pointer to a ring context
+ * @pend_read: index into ring upto which elements may be consumed.
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons_pend(const bcm_ring_t *ring, int *pend_read, const int ring_size)
+{
+ int rtn;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (bcm_ring_is_empty(ring)) {
+ *pend_read = BCM_RING_EMPTY;
+ rtn = BCM_RING_EMPTY;
+ } else {
+ *pend_read = (ring->read + 1) % ring_size;
+ /* production is not committed, caller needs to explicitly commit */
+ rtn = ring->read;
+ }
+ return rtn;
+}
+
+/**
+ * bcm_ring_cons - fetch and "commit" the next index where a ring element may
+ * be consumed.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons(bcm_ring_t *ring, const int ring_size)
+{
+ int cons_read;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (bcm_ring_is_empty(ring)) {
+ cons_read = BCM_RING_EMPTY;
+ } else {
+ cons_read = ring->read;
+ ring->read = (ring->read + 1) % ring_size; /* read is committed */
+ }
+ return cons_read;
+}
+
+/**
+ * bcm_ring_sync_read - on consumption, update peer's read index.
+ * @peer: pointer to peer's producer ring context
+ * @self: pointer to consumer's ring context
+ */
+static INLINE void
+bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(peer));
+ RING_ASSERT(BCM_RING_IS_VALID(self));
+ peer->read = self->read; /* flush read update to peer producer */
+}
+
+/**
+ * bcm_ring_sync_write - on consumption, update peer's write index.
+ * @peer: pointer to peer's consumer ring context
+ * @self: pointer to producer's ring context
+ */
+static INLINE void
+bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(peer));
+ RING_ASSERT(BCM_RING_IS_VALID(self));
+ peer->write = self->write; /* flush write update to peer consumer */
+}
+
+/**
+ * bcm_ring_prod_avail - fetch total number of available empty slots in the
+ * ring for production.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size)
+{
+ int prod_avail;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (ring->write >= ring->read) {
+ prod_avail = (ring_size - (ring->write - ring->read) - 1);
+ } else {
+ prod_avail = (ring->read - (ring->write + 1));
+ }
+ ASSERT(prod_avail < ring_size);
+ return prod_avail;
+}
+
+/**
+ * bcm_ring_cons_avail - fetch total number of available elements for consumption.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size)
+{
+ int cons_avail;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (ring->read == ring->write) {
+ cons_avail = 0;
+ } else if (ring->read > ring->write) {
+ cons_avail = ((ring_size - ring->read) + ring->write);
+ } else {
+ cons_avail = ring->write - ring->read;
+ }
+ ASSERT(cons_avail < ring_size);
+ return cons_avail;
+}
+
+/**
+ * bcm_ring_cons_all - set ring in state where all elements are consumed.
+ * @ring: pointer to a ring context
+ */
+static INLINE void
+bcm_ring_cons_all(bcm_ring_t *ring)
+{
+ ring->read = ring->write;
+}
+
+/**
+ * Work Queue
+ * A work Queue is composed of a ring of work items, of a specified depth.
+ * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a
+ * producer/consumer circular ring.
+ */
+
+struct bcm_workq {
+ bcm_ring_t ring; /* Ring context abstraction */
+ struct bcm_workq *peer; /* Peer workq context */
+ void *buffer; /* Buffer storage for work items in workQ */
+ int ring_size; /* Depth of workQ */
+} __ring_aligned;
+
+typedef struct bcm_workq bcm_workq_t;
+
+/* #define BCM_WORKQ_DEBUG */
+#if defined(BCM_WORKQ_DEBUG)
+#define WORKQ_ASSERT(exp) ASSERT(exp)
+#else /* ! BCM_WORKQ_DEBUG */
+#define WORKQ_ASSERT(exp) do {} while (0)
+#endif /* ! BCM_WORKQ_DEBUG */
+
+#define WORKQ_AUDIT(workq) \
+ WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \
+ WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \
+ WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \
+ WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size);
+
+#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL)
+
+#define WORKQ_PEER(workq) ((workq)->peer)
+#define WORKQ_RING(workq) (&((workq)->ring))
+#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring))
+
+#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \
+ WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \
+ WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \
+ ((__elem_type *)((__workq)->buffer)) + (__index); \
+})
+
+static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
+ void *buffer, int ring_size);
+
+static INLINE bool bcm_workq_is_empty(const bcm_workq_t *workq_prod);
+
+static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod);
+static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons);
+
+static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod);
+static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons);
+
+/**
+ * bcm_workq_init - initialize a workq
+ * @workq: pointer to a workq context
+ * @buffer: pointer to a pre-allocated circular buffer to serve as a ring
+ * @ring_size: size of the ring in terms of max number of elements.
+ */
+static INLINE void
+bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
+ void *buffer, int ring_size)
+{
+ ASSERT(workq != BCM_WORKQ_NULL);
+ ASSERT(workq_peer != BCM_WORKQ_NULL);
+ ASSERT(buffer != NULL);
+ ASSERT(ring_size > 0);
+
+ WORKQ_PEER(workq) = workq_peer;
+ WORKQ_PEER(workq_peer) = workq;
+
+ bcm_ring_init(WORKQ_RING(workq));
+ bcm_ring_init(WORKQ_RING(workq_peer));
+
+ workq->buffer = workq_peer->buffer = buffer;
+ workq->ring_size = workq_peer->ring_size = ring_size;
+}
+
+/**
+ * bcm_workq_empty - test whether there is work
+ * @workq_prod: producer's workq
+ */
+static INLINE bool
+bcm_workq_is_empty(const bcm_workq_t *workq_prod)
+{
+ return bcm_ring_is_empty(WORKQ_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring
+ * @workq_prod: producer's workq whose write index must be synced to peer
+ */
+static INLINE void
+bcm_workq_prod_sync(bcm_workq_t *workq_prod)
+{
+ WORKQ_AUDIT(workq_prod);
+
+ /* cons::write <--- prod::write */
+ bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring
+ * @workq_cons: consumer's workq whose read index must be synced to peer
+ */
+static INLINE void
+bcm_workq_cons_sync(bcm_workq_t *workq_cons)
+{
+ WORKQ_AUDIT(workq_cons);
+
+ /* prod::read <--- cons::read */
+ bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons));
+}
+
+/**
+ * bcm_workq_prod_refresh - Fetch the updated consumer's read index
+ * @workq_prod: producer's workq whose read index must be refreshed from peer
+ */
+static INLINE void
+bcm_workq_prod_refresh(bcm_workq_t *workq_prod)
+{
+ WORKQ_AUDIT(workq_prod);
+
+ /* prod::read <--- cons::read */
+ bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_cons_refresh - Fetch the updated producer's write index
+ * @workq_cons: consumer's workq whose write index must be refreshed from peer
+ */
+static INLINE void
+bcm_workq_cons_refresh(bcm_workq_t *workq_cons)
+{
+ WORKQ_AUDIT(workq_cons);
+
+ /* cons::write <--- prod::write */
+ bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons));
+}
+
+#endif /* ! __bcm_ring_h_included__ */
diff --git a/bcmdhd.101.10.361.x/include/bcmarp.h b/bcmdhd.101.10.361.x/include/bcmarp.h
new file mode 100755
index 0000000..2e6d92d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmarp.h
@@ -0,0 +1,84 @@
+/*
+ * Fundamental constants relating to ARP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmarp_h_
+#define _bcmarp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <bcmip.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define ARP_OPC_OFFSET 6 /* option code offset */
+#define ARP_SRC_ETH_OFFSET 8 /* src h/w address offset */
+#define ARP_SRC_IP_OFFSET 14 /* src IP address offset */
+#define ARP_TGT_ETH_OFFSET 18 /* target h/w address offset */
+#define ARP_TGT_IP_OFFSET 24 /* target IP address offset */
+
+#define ARP_OPC_REQUEST 1 /* ARP request */
+#define ARP_OPC_REPLY 2 /* ARP reply */
+
+#define ARP_DATA_LEN 28 /* ARP data length */
+
+#define HTYPE_ETHERNET 1 /* htype for ethernet */
+BWL_PRE_PACKED_STRUCT struct bcmarp {
+ uint16 htype; /* Header type (1 = ethernet) */
+ uint16 ptype; /* Protocol type (0x800 = IP) */
+ uint8 hlen; /* Hardware address length (Eth = 6) */
+ uint8 plen; /* Protocol address length (IP = 4) */
+ uint16 oper; /* ARP_OPC_... */
+ uint8 src_eth[ETHER_ADDR_LEN]; /* Source hardware address */
+ uint8 src_ip[IPV4_ADDR_LEN]; /* Source protocol address (not aligned) */
+ uint8 dst_eth[ETHER_ADDR_LEN]; /* Destination hardware address */
+ uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination protocol address */
+} BWL_POST_PACKED_STRUCT;
+
+/* Ethernet header + Arp message */
+BWL_PRE_PACKED_STRUCT struct bcmetharp {
+ struct ether_header eh;
+ struct bcmarp arp;
+} BWL_POST_PACKED_STRUCT;
+
+/* IPv6 Neighbor Advertisement */
+#define NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET 8 /* src IPv6 address offset */
+#define NEIGHBOR_ADVERTISE_TYPE_OFFSET 40 /* type offset */
+#define NEIGHBOR_ADVERTISE_CHECKSUM_OFFSET 42 /* check sum offset */
+#define NEIGHBOR_ADVERTISE_FLAGS_OFFSET 44 /* R,S and O flags offset */
+#define NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET 48 /* target IPv6 address offset */
+#define NEIGHBOR_ADVERTISE_OPTION_OFFSET 64 /* options offset */
+#define NEIGHBOR_ADVERTISE_TYPE 136
+#define NEIGHBOR_SOLICITATION_TYPE 135
+
+#define OPT_TYPE_SRC_LINK_ADDR 1
+#define OPT_TYPE_TGT_LINK_ADDR 2
+
+#define NEIGHBOR_ADVERTISE_DATA_LEN 72 /* neighbor advertisement data length */
+#define NEIGHBOR_ADVERTISE_FLAGS_VALUE 0x60 /* R=0, S=1 and O=1 */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* !defined(_bcmarp_h_) */
diff --git a/bcmdhd.101.10.361.x/include/bcmbloom.h b/bcmdhd.101.10.361.x/include/bcmbloom.h
new file mode 100755
index 0000000..dabfb26
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmbloom.h
@@ -0,0 +1,73 @@
+/*
+ * Bloom filter support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmbloom_h_
+#define _bcmbloom_h_
+
+#include <typedefs.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#else
+#include <stddef.h> /* For size_t */
+#endif
+
+struct bcm_bloom_filter;
+typedef struct bcm_bloom_filter bcm_bloom_filter_t;
+
+typedef void* (*bcm_bloom_alloc_t)(void *ctx, uint size);
+typedef void (*bcm_bloom_free_t)(void *ctx, void *buf, uint size);
+typedef uint (*bcm_bloom_hash_t)(void* ctx, uint idx, const uint8 *tag, uint len);
+
+/* create/allocate a bloom filter. filter size can be 0 for validate only filters */
+int bcm_bloom_create(bcm_bloom_alloc_t alloc_cb,
+ bcm_bloom_free_t free_cb, void *callback_ctx, uint max_hash,
+ uint filter_size /* bytes */, bcm_bloom_filter_t **bloom);
+
+/* destroy bloom filter */
+int bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb);
+
+/* add a hash function to filter, return an index */
+int bcm_bloom_add_hash(bcm_bloom_filter_t *filter, bcm_bloom_hash_t hash, uint *idx);
+
+/* remove the hash function at index from filter */
+int bcm_bloom_remove_hash(bcm_bloom_filter_t *filter, uint idx);
+
+/* check if given tag is member of the filter. If buf is NULL and/or buf_len is 0
+ * then use the internal state. BCME_OK if member, BCME_NOTFOUND if not,
+ * or other error (e.g. BADARG)
+ */
+bool bcm_bloom_is_member(bcm_bloom_filter_t *filter,
+ const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len);
+
+/* add a member to the filter. invalid for validate_only filters */
+int bcm_bloom_add_member(bcm_bloom_filter_t *filter, const uint8 *tag, uint tag_len);
+
+/* no support for remove member */
+
+/* get the filter data from state. BCME_BUFTOOSHORT w/ required length in buf_len
+ * if supplied size is insufficient
+ */
+int bcm_bloom_get_filter_data(bcm_bloom_filter_t *filter,
+ uint buf_size, uint8 *buf, uint *buf_len);
+
+#endif /* _bcmbloom_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmcdc.h b/bcmdhd.101.10.361.x/include/bcmcdc.h
new file mode 100755
index 0000000..cc03c7a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmcdc.h
@@ -0,0 +1,115 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _bcmcdc_h_
+#define _bcmcdc_h_
+#include <ethernet.h>
+
+typedef struct cdc_ioctl {
+ uint32 cmd; /* ioctl command value */
+ uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+ uint32 flags; /* flag defns given below */
+ uint32 status; /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */
+ /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT 0
+#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT 16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */
+#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */
+#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */
+#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */
+#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */
+#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */
+#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */
+#define CDCF_IOC_IF_SHIFT 12
+#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ * The BDC header is used on data packets to convey priority across USB.
+ */
+
+struct bdc_header {
+ uint8 flags; /* Flags */
+ uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */
+ uint8 flags2;
+ uint8 dataOffset; /* Offset from end of BDC header to packet data, in
+ * 4-byte words. Leaves room for optional headers.
+ */
+};
+
+#define BDC_HEADER_LEN 4
+
+/* flags field bitmap */
+#define BDC_FLAG_EXEMPT 0x03 /* EXT_STA: encryption exemption (host -> dongle?) */
+#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */
+#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */
+#define BDC_FLAG_EVENT_MSG 0x08 /* Payload contains an event msg: device->host */
+#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
+
+/* priority field bitmap */
+#define BDC_PRIORITY_MASK 0x07
+#define BDC_PRIORITY_FC_MASK 0xf0 /* flow control info mask */
+#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */
+
+/* flags2 field bitmap */
+#define BDC_FLAG2_IF_MASK 0x0f /* interface index (host <-> dongle) */
+#define BDC_FLAG2_IF_SHIFT 0
+#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */
+ /* FLOW CONTROL info only */
+
+/* version numbers */
+#define BDC_PROTO_VER_1 1 /* Old Protocol version */
+#define BDC_PROTO_VER 2 /* Protocol version */
+
+/* flags2.if field access macros */
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#endif /* _bcmcdc_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmdefs.h b/bcmdhd.101.10.361.x/include/bcmdefs.h
new file mode 100755
index 0000000..58e1ca3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmdefs.h
@@ -0,0 +1,909 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmdefs_h_
+#define _bcmdefs_h_
+
+/*
+ * One doesn't need to include this file explicitly, gets included automatically if
+ * typedefs.h is included.
+ */
+
+/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function
+ * arguments or local variables.
+ */
+#define BCM_REFERENCE(data) ((void)(data))
+
+/* Allow for suppressing unused variable warnings. */
+#ifdef __GNUC__
+#define UNUSED_VAR __attribute__ ((unused))
+#else
+#define UNUSED_VAR
+#endif
+
+/* GNU GCC 4.6+ supports selectively turning off a warning.
+ * Define these diagnostic macros to help suppress cast-qual warning
+ * until all the work can be done to fix the casting issues.
+ */
+#if (defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) || \
+ defined(__clang__))
+#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF() \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wnull-dereference\"")
+#define GCC_DIAGNOSTIC_POP() \
+ _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
+ __pragma(warning(push)) \
+ __pragma(warning(disable:4090))
+#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF() \
+ __pragma(warning(push))
+#define GCC_DIAGNOSTIC_POP() \
+ __pragma(warning(pop))
+#else
+#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
+#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF()
+#define GCC_DIAGNOSTIC_POP()
+#endif /* Diagnostic macros not defined */
+
+/* Macros to allow Coverity modeling contructs in source code */
+#if defined(__COVERITY__)
+
+/* Coverity Doc:
+ * Indicates to the TAINTED_SCALAR checker and the INTEGER_OVERFLOW checker
+ * that a function taints its argument
+ */
+#define COV_TAINTED_DATA_ARG(arg) __coverity_tainted_data_argument__(arg)
+
+/* Coverity Doc:
+ * Indicates to the TAINTED_SCALAR checker and the INTEGER_OVERFLOW checker
+ * that a function is a tainted data sink for an argument.
+ */
+#define COV_TAINTED_DATA_SINK(arg) __coverity_tainted_data_sink__(arg)
+
+/* Coverity Doc:
+ * Models a function that cannot take a negative number as an argument. Used in
+ * conjunction with other models to indicate that negative arguments are invalid.
+ */
+#define COV_NEG_SINK(arg) __coverity_negative_sink__(arg)
+
+#else
+
+#define COV_TAINTED_DATA_ARG(arg) do { } while (0)
+#define COV_TAINTED_DATA_SINK(arg) do { } while (0)
+#define COV_NEG_SINK(arg) do { } while (0)
+
+#endif /* __COVERITY__ */
+
+/* Compile-time assert can be used in place of ASSERT if the expression evaluates
+ * to a constant at compile time.
+ */
+#define STATIC_ASSERT(expr) { \
+ /* Make sure the expression is constant. */ \
+ typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \
+ /* Make sure the expression is true. */ \
+ typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
+}
+
+/* Reclaiming text and data :
+ * The following macros specify special linker sections that can be reclaimed
+ * after a system is considered 'up'.
+ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN,
+ * as in most cases, the attach function calls the detach function to clean up on error).
+ */
+#if defined(BCM_RECLAIM)
+
+extern bool bcm_reclaimed;
+extern bool bcm_attach_part_reclaimed;
+extern bool bcm_preattach_part_reclaimed;
+extern bool bcm_postattach_part_reclaimed;
+
+#define RECLAIMED() (bcm_reclaimed)
+#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed)
+#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed)
+#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed)
+
+/* Place _fn/_data symbols in various reclaimed output sections */
+#define BCMATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data
+#define BCMATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn
+#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini3." #_data))) _data
+#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini3." #_fn), noinline)) _fn
+#define BCMPOSTATTACHDATA(_data) __attribute__ ((__section__ (".dataini5." #_data))) _data
+#define BCMPOSTATTACHFN(_fn) __attribute__ ((__section__ (".textini5." #_fn), noinline)) _fn
+
+/* Relocate attach symbols to save-restore region to increase pre-reclaim heap size. */
+#define BCM_SRM_ATTACH_DATA(_data) __attribute__ ((__section__ (".datasrm." #_data))) _data
+#define BCM_SRM_ATTACH_FN(_fn) __attribute__ ((__section__ (".textsrm." #_fn), noinline)) _fn
+
+/* Explicitly place data in .rodata section so it can be write-protected after attach */
+#define BCMRODATA(_data) __attribute__ ((__section__ (".shrodata." #_data))) _data
+
+#ifdef BCMDBG_SR
+/*
+ * Don't reclaim so we can compare SR ASM
+ */
+#define BCMPREATTACHDATASR(_data) _data
+#define BCMPREATTACHFNSR(_fn) _fn
+#define BCMATTACHDATASR(_data) _data
+#define BCMATTACHFNSR(_fn) _fn
+#else
+#define BCMPREATTACHDATASR(_data) BCMPREATTACHDATA(_data)
+#define BCMPREATTACHFNSR(_fn) BCMPREATTACHFN(_fn)
+#define BCMATTACHDATASR(_data) BCMATTACHDATA(_data)
+#define BCMATTACHFNSR(_fn) BCMATTACHFN(_fn)
+#endif
+
+#define BCMINITDATA(_data) _data
+#define BCMINITFN(_fn) _fn
+#ifndef CONST
+#define CONST const
+#endif
+
+/* Non-manufacture or internal attach function/dat */
+#if !(defined(WLTEST) || defined(ATE_BUILD))
+#define BCMNMIATTACHFN(_fn) BCMATTACHFN(_fn)
+#define BCMNMIATTACHDATA(_data) BCMATTACHDATA(_data)
+#else
+#define BCMNMIATTACHFN(_fn) _fn
+#define BCMNMIATTACHDATA(_data) _data
+#endif /* WLTEST || ATE_BUILD */
+
+#if !defined(ATE_BUILD) && defined(BCM_CISDUMP_NO_RECLAIM)
+#define BCMCISDUMPATTACHFN(_fn) _fn
+#define BCMCISDUMPATTACHDATA(_data) _data
+#else
+#define BCMCISDUMPATTACHFN(_fn) BCMNMIATTACHFN(_fn)
+#define BCMCISDUMPATTACHDATA(_data) BCMNMIATTACHDATA(_data)
+#endif /* !ATE_BUILD && BCM_CISDUMP_NO_RECLAIM */
+
+/* SROM with OTP support */
+#if defined(BCMOTPSROM)
+#define BCMSROMATTACHFN(_fn) _fn
+#define BCMSROMATTACHDATA(_data) _data
+#else
+#define BCMSROMATTACHFN(_fn) BCMNMIATTACHFN(_fn)
+#define BCMSROMATTACHDATA(_data) BCMNMIATTACHFN(_data)
+#endif /* BCMOTPSROM */
+
+#if defined(BCM_CISDUMP_NO_RECLAIM)
+#define BCMSROMCISDUMPATTACHFN(_fn) _fn
+#define BCMSROMCISDUMPATTACHDATA(_data) _data
+#else
+#define BCMSROMCISDUMPATTACHFN(_fn) BCMSROMATTACHFN(_fn)
+#define BCMSROMCISDUMPATTACHDATA(_data) BCMSROMATTACHDATA(_data)
+#endif /* BCM_CISDUMP_NO_RECLAIM */
+
+#define BCMUNINITFN(_fn) _fn
+
+#else /* BCM_RECLAIM */
+
+#define bcm_reclaimed (1)
+#define bcm_attach_part_reclaimed (1)
+#define bcm_preattach_part_reclaimed (1)
+#define bcm_postattach_part_reclaimed (1)
+#define BCMATTACHDATA(_data) _data
+#define BCMATTACHFN(_fn) _fn
+#define BCM_SRM_ATTACH_DATA(_data) _data
+#define BCM_SRM_ATTACH_FN(_fn) _fn
+/* BCMRODATA data is written into at attach time so it cannot be in .rodata */
+#define BCMRODATA(_data) __attribute__ ((__section__ (".data." #_data))) _data
+#define BCMPREATTACHDATA(_data) _data
+#define BCMPREATTACHFN(_fn) _fn
+#define BCMPOSTATTACHDATA(_data) _data
+#define BCMPOSTATTACHFN(_fn) _fn
+#define BCMINITDATA(_data) _data
+#define BCMINITFN(_fn) _fn
+#define BCMUNINITFN(_fn) _fn
+#define BCMNMIATTACHFN(_fn) _fn
+#define BCMNMIATTACHDATA(_data) _data
+#define BCMSROMATTACHFN(_fn) _fn
+#define BCMSROMATTACHDATA(_data) _data
+#define BCMPREATTACHFNSR(_fn) _fn
+#define BCMPREATTACHDATASR(_data) _data
+#define BCMATTACHFNSR(_fn) _fn
+#define BCMATTACHDATASR(_data) _data
+#define BCMSROMATTACHFN(_fn) _fn
+#define BCMSROMATTACHDATA(_data) _data
+#define BCMCISDUMPATTACHFN(_fn) _fn
+#define BCMCISDUMPATTACHDATA(_data) _data
+#define BCMSROMCISDUMPATTACHFN(_fn) _fn
+#define BCMSROMCISDUMPATTACHDATA(_data) _data
+#define CONST const
+
+#define RECLAIMED() (bcm_reclaimed)
+#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed)
+#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed)
+#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed)
+
+#endif /* BCM_RECLAIM */
+
+#define BCMUCODEDATA(_data) BCMINITDATA(_data)
+
+#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED) && !defined(DONGLEBUILD)
+#define BCMUCODEFN(_fn) BCMINITFN(_fn)
+#else
+#define BCMUCODEFN(_fn) BCMATTACHFN(_fn)
+#endif /* BCM_AQM_DMA_DESC */
+
+/* This feature is for dongle builds only.
+ * In Rom build use BCMFASTPATH() to mark functions that will excluded from ROM bits if
+ * BCMFASTPATH_EXCLUDE_FROM_ROM flag is defined (defined by default).
+ * In romoffload or ram builds all functions that marked by BCMFASTPATH() will be placed
+ * in "text_fastpath" section and will be used by trap handler.
+ */
+#ifndef BCMFASTPATH
+#if defined(DONGLEBUILD)
+#if defined(BCMROMBUILD)
+#if defined(BCMFASTPATH_EXCLUDE_FROM_ROM)
+ #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_ram." #_fn))) _fn
+#else /* BCMFASTPATH_EXCLUDE_FROM_ROM */
+ #define BCMFASTPATH(_fn) _fn
+#endif /* BCMFASTPATH_EXCLUDE_FROM_ROM */
+#else /* BCMROMBUILD */
+#ifdef BCMFASTPATH_O3OPT
+#ifdef ROM_ENAB_RUNTIME_CHECK
+ #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_fastpath." #_fn))) _fn
+#else
+ #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_fastpath." #_fn))) \
+ __attribute__ ((optimize(3))) _fn
+#endif /* ROM_ENAB_RUNTIME_CHECK */
+#else
+ #define BCMFASTPATH(_fn) __attribute__ ((__section__ (".text_fastpath." #_fn))) _fn
+#endif /* BCMFASTPATH_O3OPT */
+#endif /* BCMROMBUILD */
+#else /* DONGLEBUILD */
+ #define BCMFASTPATH(_fn) _fn
+#endif /* DONGLEBUILD */
+#endif /* BCMFASTPATH */
+
+/* Use the BCMRAMFN/BCMRAMDATA() macros to tag functions/data in source that must be included in RAM
+ * (excluded from ROM). This should eliminate the need to manually specify these functions/data in
+ * the ROM config file. It should only be used in special cases where the function must be in RAM
+ * for *all* ROM-based chips.
+ */
+#if defined(BCMROMBUILD)
+ #define BCMRAMFN(_fn) __attribute__ ((__section__ (".text_ram." #_fn), noinline)) _fn
+ #define BCMRAMDATA(_data) __attribute__ ((__section__ (".rodata_ram." #_data))) _data
+#else
+ #define BCMRAMFN(_fn) _fn
+ #define BCMRAMDATA(_data) _data
+#endif /* ROMBUILD */
+
+/* Use BCMSPECSYM() macro to tag symbols going to a special output section in the binary. */
+#define BCMSPECSYM(_sym) __attribute__ ((__section__ (".special." #_sym))) _sym
+
+#define STATIC static
+
+/* functions that do not examine any values except their arguments, and have no effects except
+ * the return value should use this keyword. Note that a function that has pointer arguments
+ * and examines the data pointed to must not be declared as BCMCONSTFN.
+ */
+#ifdef __GNUC__
+#define BCMCONSTFN __attribute__ ((const))
+#else
+#define BCMCONSTFN
+#endif /* __GNUC__ */
+
+/* Bus types */
+#define SI_BUS 0 /* SOC Interconnect */
+#define PCI_BUS 1 /* PCI target */
+#define PCMCIA_BUS 2 /* PCMCIA target */
+#define SDIO_BUS 3 /* SDIO target */
+#define JTAG_BUS 4 /* JTAG */
+#define USB_BUS 5 /* USB (does not support R/W REG) */
+#define SPI_BUS 6 /* gSPI target */
+#define RPC_BUS 7 /* RPC target */
+
+/* Allows size optimization for single-bus image */
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) (BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) (bus)
+#endif
+
+#ifdef BCMBUSCORETYPE
+#define BUSCORETYPE(ct) (BCMBUSCORETYPE)
+#else
+#define BUSCORETYPE(ct) (ct)
+#endif
+
+/* Allows size optimization for single-backplane image */
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) (BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) (bus)
+#endif
+
+/* Allows size optimization for SPROM support */
+#if defined(BCMSPROMBUS)
+#define SPROMBUS (BCMSPROMBUS)
+#else
+#define SPROMBUS (PCI_BUS)
+#endif
+
+/* Allows size optimization for single-chip image */
+/* These macros are NOT meant to encourage writing chip-specific code.
+ * Use them only when it is appropriate for example in PMU PLL/CHIP/SWREG
+ * controls and in chip-specific workarounds.
+ */
+#ifdef BCMCHIPID
+#define CHIPID(chip) (BCMCHIPID)
+#else
+#define CHIPID(chip) (chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev) (BCMCHIPREV)
+#else
+#define CHIPREV(rev) (rev)
+#endif
+
+#ifdef BCMPCIEREV
+#define PCIECOREREV(rev) (BCMPCIEREV)
+#else
+#define PCIECOREREV(rev) (rev)
+#endif
+
+#ifdef BCMPMUREV
+#define PMUREV(rev) (BCMPMUREV)
+#else
+#define PMUREV(rev) (rev)
+#endif
+
+#ifdef BCMCCREV
+#define CCREV(rev) (BCMCCREV)
+#else
+#define CCREV(rev) (rev)
+#endif
+
+#ifdef BCMGCIREV
+#define GCIREV(rev) (BCMGCIREV)
+#else
+#define GCIREV(rev) (rev)
+#endif
+
+#ifdef BCMCR4REV
+#define CR4REV(rev) (BCMCR4REV)
+#define CR4REV_GE(rev, val) ((BCMCR4REV) >= (val))
+#else
+#define CR4REV(rev) (rev)
+#define CR4REV_GE(rev, val) ((rev) >= (val))
+#endif
+
+#ifdef BCMLHLREV
+#define LHLREV(rev) (BCMLHLREV)
+#else
+#define LHLREV(rev) (rev)
+#endif
+
+#ifdef BCMSPMISREV
+#define SPMISREV(rev) (BCMSPMISREV)
+#else
+#define SPMISREV(rev) (rev)
+#endif
+
+/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
+#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */
+#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */
+#define DMADDR_MASK_26 0xFC000000 /* Address maks for 26-bits */
+#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */
+
+#define DMADDRWIDTH_26 26 /* 26-bit addressing capability */
+#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
+#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
+#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
+#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
+
+typedef struct {
+ uint32 loaddr;
+ uint32 hiaddr;
+} dma64addr_t;
+
+#define PHYSADDR64HI(_pa) ((_pa).hiaddr)
+#define PHYSADDR64HISET(_pa, _val) \
+ do { \
+ (_pa).hiaddr = (_val); \
+ } while (0)
+#define PHYSADDR64LO(_pa) ((_pa).loaddr)
+#define PHYSADDR64LOSET(_pa, _val) \
+ do { \
+ (_pa).loaddr = (_val); \
+ } while (0)
+
+#ifdef BCMDMA64OSL
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa)
+#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
+#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa)
+#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+#define PHYSADDRTOULONG(_pa, _ulong) \
+ do { \
+ _ulong = ((unsigned long long)(_pa).hiaddr << 32) | ((_pa).loaddr); \
+ } while (0)
+
+#else
+typedef uint32 dmaaddr_t;
+#define PHYSADDRHI(_pa) (0u)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa) = (_val); \
+ } while (0)
+#endif /* BCMDMA64OSL */
+
+#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0)
+
+/* One physical DMA segment */
+typedef struct {
+ dmaaddr_t addr;
+ uint32 length;
+} hnddma_seg_t;
+
+#if defined(__linux__)
+#define MAX_DMA_SEGS 8
+#else
+#define MAX_DMA_SEGS 4
+#endif
+
+typedef struct {
+ void *oshdmah; /* Opaque handle for OSL to store its information */
+ uint origsize; /* Size of the virtual packet */
+ uint nsegs;
+ hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (hnddma.c).
+ */
+
+#ifndef BCMEXTRAHDROOM
+#define BCMEXTRAHDROOM 204
+#endif
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef SDALIGN
+#define SDALIGN 32
+#endif
+
+/* Headroom required for dongle-to-host communication. Packets allocated
+ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
+ * leave this much room in front for low-level message headers which may
+ * be needed to get across the dongle bus to the host. (These messages
+ * don't go over the network, so room for the full WL header above would
+ * be a waste.).
+*/
+/*
+ * set the numbers to be MAX of all the devices, to avoid problems with ROM builds
+ * USB BCMDONGLEHDRSZ and BCMDONGLEPADSZ is 0
+ * SDIO BCMDONGLEHDRSZ 12 and BCMDONGLEPADSZ 16
+*/
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+#ifdef BCMDBG
+
+#ifndef BCMDBG_ERR
+#define BCMDBG_ERR
+#endif /* BCMDBG_ERR */
+
+#ifndef BCMDBG_ASSERT
+#define BCMDBG_ASSERT
+#endif /* BCMDBG_ASSERT */
+
+#endif /* BCMDBG */
+
+#if defined(NO_BCMDBG_ASSERT)
+ #undef BCMDBG_ASSERT
+ #undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMDBG_ASSERT) || defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif /* BCMDBG_ASSERT || BCMASSERT_LOG */
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ * #define <NAME>_M BITFIELD_MASK(3)
+ * #define <NAME>_S 4
+ * ...
+ * regval = R_REG(osh, &regs->regfoo);
+ * field = GFIELD(regval, <NAME>);
+ * regval = SFIELD(regval, <NAME>, 1);
+ * W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
+
+/* define BCMSMALL to remove misc features for memory-constrained environments */
+#ifdef BCMSMALL
+#undef BCMSPACE
+#define bcmspace FALSE /* if (bcmspace) code is discarded */
+#else
+#define BCMSPACE
+#define bcmspace TRUE /* if (bcmspace) code is retained */
+#endif
+
+/* ROM_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+ * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
+ */
+#if defined(BCMROMBUILD)
+#ifndef ROM_ENAB_RUNTIME_CHECK
+ #define ROM_ENAB_RUNTIME_CHECK
+#endif
+#endif /* BCMROMBUILD */
+
+#ifdef BCM_SH_SFLASH
+ extern bool _bcm_sh_sflash;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCM_SH_SFLASH_ENAB() (_bcm_sh_sflash)
+#elif defined(BCM_SH_SFLASH_DISABLED)
+ #define BCM_SH_SFLASH_ENAB() (0)
+#else
+ #define BCM_SH_SFLASH_ENAB() (1)
+#endif
+#else
+ #define BCM_SH_SFLASH_ENAB() (0)
+#endif /* BCM_SH_SFLASH */
+
+#ifdef BCM_SFLASH
+ extern bool _bcm_sflash;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCM_SFLASH_ENAB() (_bcm_sflash)
+#elif defined(BCM_SFLASH_DISABLED)
+ #define BCM_SFLASH_ENAB() (0)
+#else
+ #define BCM_SFLASH_ENAB() (1)
+#endif
+#else
+ #define BCM_SFLASH_ENAB() (0)
+#endif /* BCM_SFLASH */
+
+#ifdef BCM_DELAY_ON_LTR
+ extern bool _bcm_delay_on_ltr;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCM_DELAY_ON_LTR_ENAB() (_bcm_delay_on_ltr)
+#elif defined(BCM_DELAY_ON_LTR_DISABLED)
+ #define BCM_DELAY_ON_LTR_ENAB() (0)
+#else
+ #define BCM_DELAY_ON_LTR_ENAB() (1)
+#endif
+#else
+ #define BCM_DELAY_ON_LTR_ENAB() (0)
+#endif /* BCM_DELAY_ON_LTR */
+
+/* Max. nvram variable table size */
+#ifndef MAXSZ_NVRAM_VARS
+#ifdef LARGE_NVRAM_MAXSZ
+#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2)
+#else
+#if defined(BCMROMBUILD) || defined(DONGLEBUILD)
+/* SROM12 changes */
+#define MAXSZ_NVRAM_VARS 6144 /* should be reduced */
+#else
+#define LARGE_NVRAM_MAXSZ 8192
+#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2)
+#endif /* BCMROMBUILD || DONGLEBUILD */
+#endif /* LARGE_NVRAM_MAXSZ */
+#endif /* !MAXSZ_NVRAM_VARS */
+
+#ifdef ATE_BUILD
+#ifndef ATE_NVRAM_MAXSIZE
+#define ATE_NVRAM_MAXSIZE 32000
+#endif /* ATE_NVRAM_MAXSIZE */
+#endif /* ATE_BUILD */
+
+#ifdef BCMLFRAG /* BCMLFRAG support enab macros */
+ extern bool _bcmlfrag;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMLFRAG_ENAB() (_bcmlfrag)
+#elif defined(BCMLFRAG_DISABLED)
+ #define BCMLFRAG_ENAB() (0)
+#else
+ #define BCMLFRAG_ENAB() (1)
+#endif
+#else
+ #define BCMLFRAG_ENAB() (0)
+#endif /* BCMLFRAG_ENAB */
+
+#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */
+extern bool _pciedevenab;
+#if defined(ROM_ENAB_RUNTIME_CHECK)
+ #define BCMPCIEDEV_ENAB() (_pciedevenab)
+#elif defined(BCMPCIEDEV_ENABLED)
+ #define BCMPCIEDEV_ENAB() 1
+#else
+ #define BCMPCIEDEV_ENAB() 0
+#endif
+#else
+ #define BCMPCIEDEV_ENAB() 0
+#endif /* BCMPCIEDEV */
+
+#ifdef BCMRESVFRAGPOOL /* BCMRESVFRAGPOOL support enab macros */
+extern bool _resvfragpool_enab;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMRESVFRAGPOOL_ENAB() (_resvfragpool_enab)
+#elif defined(BCMRESVFRAGPOOL_DISABLED)
+ #define BCMRESVFRAGPOOL_ENAB() (0)
+#else
+ #define BCMRESVFRAGPOOL_ENAB() (1)
+#endif
+#else
+ #define BCMRESVFRAGPOOL_ENAB() 0
+#endif /* BCMPCIEDEV */
+
+#ifdef BCMSDIODEV /* BCMSDIODEV support enab macros */
+extern bool _sdiodevenab;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMSDIODEV_ENAB() (_sdiodevenab)
+#elif defined(BCMSDIODEV_ENABLED)
+ #define BCMSDIODEV_ENAB() 1
+#else
+ #define BCMSDIODEV_ENAB() 0
+#endif
+#else
+ #define BCMSDIODEV_ENAB() 0
+#endif /* BCMSDIODEV */
+
+#ifdef BCMSPMIS
+extern bool _bcmspmi_enab;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMSPMIS_ENAB() (_bcmspmi_enab)
+#elif defined(BCMSPMIS_DISABLED)
+ #define BCMSPMIS_ENAB() 0
+#else
+ #define BCMSPMIS_ENAB() 1
+#endif
+#else
+ #define BCMSPMIS_ENAB() 0
+#endif /* BCMSPMIS */
+
+#ifdef BCMDVFS /* BCMDVFS support enab macros */
+extern bool _dvfsenab;
+#if defined(ROM_ENAB_RUNTIME_CHECK)
+ #define BCMDVFS_ENAB() (_dvfsenab)
+#elif !defined(BCMDVFS_DISABLED)
+ #define BCMDVFS_ENAB() (1)
+#else
+ #define BCMDVFS_ENAB() (0)
+#endif
+#else
+ #define BCMDVFS_ENAB() (0)
+#endif /* BCMDVFS */
+
+/* Max size for reclaimable NVRAM array */
+#ifndef ATE_BUILD
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS
+#endif /* DL_NVRAM */
+#else
+#define NVRAM_ARRAY_MAXSIZE ATE_NVRAM_MAXSIZE
+#endif /* ATE_BUILD */
+
+extern uint32 gFWID;
+
+#ifdef BCMFRWDPKT /* BCMFRWDPKT support enab macros */
+ extern bool _bcmfrwdpkt;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMFRWDPKT_ENAB() (_bcmfrwdpkt)
+#elif defined(BCMFRWDPKT_DISABLED)
+ #define BCMFRWDPKT_ENAB() (0)
+#else
+ #define BCMFRWDPKT_ENAB() (1)
+#endif
+#else
+ #define BCMFRWDPKT_ENAB() (0)
+#endif /* BCMFRWDPKT */
+
+#ifdef BCMFRWDPOOLREORG /* BCMFRWDPOOLREORG support enab macros */
+ extern bool _bcmfrwdpoolreorg;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMFRWDPOOLREORG_ENAB() (_bcmfrwdpoolreorg)
+#elif defined(BCMFRWDPOOLREORG_DISABLED)
+ #define BCMFRWDPOOLREORG_ENAB() (0)
+#else
+ #define BCMFRWDPOOLREORG_ENAB() (1)
+#endif
+#else
+ #define BCMFRWDPOOLREORG_ENAB() (0)
+#endif /* BCMFRWDPOOLREORG */
+
+#ifdef BCMPOOLRECLAIM /* BCMPOOLRECLAIM support enab macros */
+ extern bool _bcmpoolreclaim;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMPOOLRECLAIM_ENAB() (_bcmpoolreclaim)
+#elif defined(BCMPOOLRECLAIM_DISABLED)
+ #define BCMPOOLRECLAIM_ENAB() (0)
+#else
+ #define BCMPOOLRECLAIM_ENAB() (1)
+#endif
+#else
+ #define BCMPOOLRECLAIM_ENAB() (0)
+#endif /* BCMPOOLRECLAIM */
+
+/* Chip related low power flags (lpflags) */
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+#if defined(DONGLEBUILD) && ! defined(__COVERITY__)
+#define MODULE_DETACH(var, detach_func)\
+ do { \
+ BCM_REFERENCE(detach_func); \
+ OSL_SYS_HALT(); \
+ } while (0);
+#define MODULE_DETACH_2(var1, var2, detach_func) MODULE_DETACH(var1, detach_func)
+#define MODULE_DETACH_TYPECASTED(var, detach_func)
+#else
+#define MODULE_DETACH(var, detach_func)\
+ if (var) { \
+ detach_func(var); \
+ (var) = NULL; \
+ }
+#define MODULE_DETACH_2(var1, var2, detach_func) detach_func(var1, var2)
+#define MODULE_DETACH_TYPECASTED(var, detach_func) detach_func(var)
+#endif /* DONGLEBUILD */
+
+/* When building ROML image use runtime conditional to cause the compiler
+ * to compile everything but not to complain "defined but not used"
+ * as #ifdef would cause at the callsites.
+ * In the end functions called under if (0) {} will not be linked
+ * into the final binary if they're not called from other places either.
+ */
+#if !defined(BCMROMBUILD) || defined(BCMROMSYMGEN_BUILD)
+#define BCM_ATTACH_REF_DECL()
+#define BCM_ATTACH_REF() (1)
+#else
+#define BCM_ATTACH_REF_DECL() static bool bcm_non_roml_build = 0;
+#define BCM_ATTACH_REF() (bcm_non_roml_build)
+#endif
+
+/* For ROM builds, keep it in const section so that it gets ROMmed. If abandoned, move it to
+ * RO section but before ro region start so that FATAL log buf doesn't use this.
+ */
+// Temporary - leave old definition in place until all references are removed elsewhere
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+#define BCMRODATA_ONTRAP(_data) _data
+#else
+#define BCMRODATA_ONTRAP(_data) __attribute__ ((__section__ (".ro_ontrap." #_data))) _data
+#endif
+// Renamed for consistency with post trap function definition
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+#define BCMPOST_TRAP_RODATA(_data) _data
+#else
+#define BCMPOST_TRAP_RODATA(_data) __attribute__ ((__section__ (".ro_ontrap." #_data))) _data
+#endif
+
+/* Similar to RO data on trap, we want code that's used after a trap to be placed in a special area
+ * as this means we can use all of the rest of the .text for post trap dumps. Functions with
+ * the BCMPOSTTRAPFN macro applied will either be in ROM or this protected area.
+ * For RAMFNs, the ROM build only needs to nkow that they won't be in ROM, but the -roml
+ * builds need to know to protect them.
+ */
+#if defined(BCMROMBUILD)
+#define BCMPOSTTRAPFN(_fn) _fn
+#define BCMPOSTTRAPRAMFN(_fn) __attribute__ ((__section__ (".text_ram." #_fn))) _fn
+#if defined(BCMFASTPATH_EXCLUDE_FROM_ROM)
+#define BCMPOSTTRAPFASTPATH(_fn) __attribute__ ((__section__ (".text_ram." #_fn))) _fn
+#else /* BCMFASTPATH_EXCLUDE_FROM_ROM */
+#define BCMPOSTTRAPFASTPATH(fn) BCMPOSTTRAPFN(fn)
+#endif /* BCMFASTPATH_EXCLUDE_FROM_ROM */
+#else
+#if defined(DONGLEBUILD)
+#define BCMPOSTTRAPFN(_fn) __attribute__ ((__section__ (".text_posttrap." #_fn))) _fn
+#else
+#define BCMPOSTTRAPFN(_fn) _fn
+#endif /* DONGLEBUILD */
+#define BCMPOSTTRAPRAMFN(fn) BCMPOSTTRAPFN(fn)
+#define BCMPOSTTRAPFASTPATH(fn) BCMPOSTTRAPFN(fn)
+#endif /* ROMBUILD */
+
+typedef struct bcm_rng * bcm_rng_handle_t;
+
+/* Use BCM_FUNC_PTR() to tag function pointers for ASLR code implementation. It will perform
+ * run-time relocation of a function pointer by translating it from a physical to virtual address.
+ *
+ * BCM_FUNC_PTR() should only be used where the function name is referenced (corresponding to the
+ * relocation entry for that symbol). It should not be used when the function pointer is invoked.
+ */
+void* BCM_ASLR_CODE_FNPTR_RELOCATOR(void *func_ptr);
+#if defined(BCM_ASLR_CODE_FNPTR_RELOC)
+ /* 'func_ptr_err_chk' performs a compile time error check to ensure that only a constant
+ * function name is passed as an argument to BCM_FUNC_PTR(). This ensures that the macro is
+ * only used for function pointer references, and not for function pointer invocations.
+ */
+ #define BCM_FUNC_PTR(func) \
+ ({ static void *func_ptr_err_chk __attribute__ ((unused)) = (func); \
+ BCM_ASLR_CODE_FNPTR_RELOCATOR(func); })
+#else
+ #define BCM_FUNC_PTR(func) (func)
+#endif /* BCM_ASLR_CODE_FNPTR_RELOC */
+
+/*
+ * Timestamps have this tag appended following a null byte which
+ * helps comparison/hashing scripts find and ignore them.
+ */
+#define TIMESTAMP_SUFFIX "<TIMESTAMP>"
+
+#ifdef ASLR_STACK
+/* MMU main thread stack data */
+#define BCM_MMU_MTH_STK_DATA(_data) __attribute__ ((__section__ (".mmu_mth_stack." #_data))) _data
+#endif /* ASLR_STACK */
+
+/* Special section for MMU page-tables. */
+#define BCM_MMU_PAGE_TABLE_DATA(_data) \
+ __attribute__ ((__section__ (".mmu_pagetable." #_data))) _data
+
+/* Some phy initialization code/data can't be reclaimed in dualband mode */
+#if defined(DBAND)
+#define WLBANDINITDATA(_data) _data
+#define WLBANDINITFN(_fn) _fn
+#else
+#define WLBANDINITDATA(_data) BCMINITDATA(_data)
+#define WLBANDINITFN(_fn) BCMINITFN(_fn)
+#endif
+
+/* Tag struct members to make it explicitly clear that they are physical addresses. These are
+ * typically used in data structs shared by the firmware and host code (or off-line utilities). The
+ * use of the macro avoids customer visible API/name changes.
+ */
+#if defined(BCM_PHYS_ADDR_NAME_CONVERSION)
+ #define PHYS_ADDR_N(name) name ## _phys
+#else
+ #define PHYS_ADDR_N(name) name
+#endif
+
+/*
+ * A compact form for a list of valid register address offsets.
+ * Used for when dumping the contents of the register set for the user.
+ *
+ * bmp_cnt has either bitmap or count. If the MSB (bit 31) is set, then
+ * bmp_cnt[30:0] has count, i.e, number of valid registers whose values are
+ * contigous from the start address. If MSB is zero, then the value
+ * should be considered as a bitmap of 31 discreet addresses from the base addr.
+ * Note: the data type for bmp_cnt is chosen as an array of uint8 to avoid padding.
+ */
+typedef struct _regs_bmp_list {
+ uint16 addr; /* start address offset */
+ uint8 bmp_cnt[4]; /* bit[31]=1, bit[30:0] is count else it is a bitmap */
+} regs_list_t;
+
+#endif /* _bcmdefs_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmdevs.h b/bcmdhd.101.10.361.x/include/bcmdevs.h
new file mode 100755
index 0000000..87a884c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmdevs.h
@@ -0,0 +1,626 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _BCMDEVS_H
+#define _BCMDEVS_H
+
+/* PCI vendor IDs */
+#define VENDOR_EPIGRAM 0xfeda
+#define VENDOR_BROADCOM 0x14e4
+#define VENDOR_3COM 0x10b7
+#define VENDOR_NETGEAR 0x1385
+#define VENDOR_DIAMOND 0x1092
+#define VENDOR_INTEL 0x8086
+#define VENDOR_DELL 0x1028
+#define VENDOR_HP 0x103c
+#define VENDOR_HP_COMPAQ 0x0e11
+#define VENDOR_APPLE 0x106b
+#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */
+#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */
+#define VENDOR_TI 0x104c /* Texas Instruments */
+#define VENDOR_RICOH 0x1180 /* Ricoh */
+#define VENDOR_JMICRON 0x197b
+
+/* precommit failed when this is removed */
+/* BLAZAR_BRANCH_101_10_DHD_001/build/dhd/linux-fc19/brix-brcm */
+/* TBD: Revisit later */
+#ifdef BCMINTERNAL
+#define VENDOR_JINVANI 0x1947 /* Jinvani Systech, Inc. */
+#endif
+
+/* PCMCIA vendor IDs */
+#define VENDOR_BROADCOM_PCMCIA 0x02d0
+
+/* SDIO vendor IDs */
+#define VENDOR_BROADCOM_SDIO 0x00BF
+
+/* DONGLE VID/PIDs */
+#define BCM_DNGL_VID 0x0a5c
+#define BCM_DNGL_BL_PID_4328 0xbd12
+#define BCM_DNGL_BL_PID_4332 0xbd18
+#define BCM_DNGL_BL_PID_4360 0xbd1d
+
+#define BCM_DNGL_BDC_PID 0x0bdc
+#define BCM_DNGL_JTAG_PID 0x4a44
+
+/* Pseudo IDs */
+#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */
+#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */
+#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */
+#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */
+#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */
+#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */
+#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */
+#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */
+#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */
+#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */
+#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */
+#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */
+#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */
+#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */
+#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */
+#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */
+#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */
+#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */
+#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */
+#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */
+#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */
+#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */
+#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */
+#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */
+#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */
+#define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */
+#define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */
+#define BCM47XX_USBHUB_ID 0x472c /* 47xx usb hub */
+#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */
+#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */
+#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */
+#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */
+#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */
+#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */
+#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */
+
+/* PCI Device IDs */
+/* DEPRECATED but used */
+#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */
+/* DEPRECATED */
+
+#define BCM4360_D11AC_ID 0x43a0
+#define BCM4360_D11AC2G_ID 0x43a1
+#define BCM4360_D11AC5G_ID 0x43a2
+#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */
+#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */
+#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */
+#define BCM43602_D11AC_ID 0x43ba /* ac dualband PCI devid SPROM programmed */
+#define BCM43602_D11AC2G_ID 0x43bb /* 43602 802.11ac 2.4G device */
+#define BCM43602_D11AC5G_ID 0x43bc /* 43602 802.11ac 5G device */
+
+#define BCM43012_D11N_ID 0xA804 /* 43012 802.11n dualband device */
+#define BCM43012_D11N2G_ID 0xA805 /* 43012 802.11n 2.4G device */
+#define BCM43012_D11N5G_ID 0xA806 /* 43012 802.11n 5G device */
+#define BCM43014_D11N_ID 0x4495 /* 43014 802.11n dualband device */
+#define BCM43014_D11N2G_ID 0x4496 /* 43014 802.11n 2.4G device */
+#define BCM43014_D11N5G_ID 0x4497 /* 43014 802.11n 5G device */
+#define BCM43013_D11N_ID 0x4498 /* 43013 802.11n dualband device */
+#define BCM43013_D11N2G_ID 0x4499 /* 43013 802.11n 2.4G device */
+#define BCM43013_D11N5G_ID 0x449a /* 43013 802.11n 5G device */
+
+/* PCI Subsystem ID */
+#define BCM4376_D11AX_ID 0x4445 /* 4376 802.11ax dualband device */
+#define BCM4376_D11AX2G_ID 0x4436 /* 4376 802.11ax 2.4G device */
+#define BCM4376_D11AX5G_ID 0x4437 /* 4376 802.11ax 5G device */
+
+#define BCM4378_D11AX_ID 0x4425 /* 4378 802.11ax dualband device */
+#define BCM4378_D11AX2G_ID 0x4426 /* 4378 802.11ax 2.4G device */
+#define BCM4378_D11AX5G_ID 0x4427 /* 4378 802.11ax 5G device */
+
+#define BCM4387_D11AX_ID 0x4433 /* 4387 802.11ax dualband device */
+#define BCM4388_D11AX_ID 0x4434 /* 4388 802.11ax dualband device */
+#define BCM4385_D11AX_ID 0x4442 /* 4385 802.11ax dualband device */
+#define BCM4389_D11AX_ID 0x4441 /* 4389 802.11ax dualband device */
+#define BCM4397_D11AX_ID 0x4443 /* 4397 802.11ax dualband device */
+
+#define BCM4362_D11AX_ID 0x4490 /* 4362 802.11ax dualband device */
+#define BCM4362_D11AX2G_ID 0x4491 /* 4362 802.11ax 2.4G device */
+#define BCM4362_D11AX5G_ID 0x4492 /* 4362 802.11ax 5G device */
+#define BCM43751_D11AX_ID 0x449a /* 43751 802.11ac dualband device */
+#define BCM43751_D11AX2G_ID 0x449b /* 43751 802.11ac 2.4G device */
+#define BCM43751_D11AX5G_ID 0x449c /* 43751 802.11ac 5G device */
+#define BCM43752_D11AX_ID 0x449d /* 43752 802.11ax dualband device */
+#define BCM43752_D11AX2G_ID 0x449e /* 43752 802.11ax 2.4G device */
+#define BCM43752_D11AX5G_ID 0x449f /* 43752 802.11ax 5G device */
+
+/* TBD change below values */
+#define BCM4369_D11AX_ID 0x4470 /* 4369 802.11ax dualband device */
+#define BCM4369_D11AX2G_ID 0x4471 /* 4369 802.11ax 2.4G device */
+#define BCM4369_D11AX5G_ID 0x4472 /* 4369 802.11ax 5G device */
+
+#define BCM4375_D11AX_ID 0x4475 /* 4375 802.11ax dualband device */
+#define BCM4375_D11AX2G_ID 0x4476 /* 4375 802.11ax 2.4G device */
+#define BCM4375_D11AX5G_ID 0x4477 /* 4375 802.11ax 5G device */
+
+#define BCM4377_D11AX_ID 0x4480 /* 4377 802.11ax dualband device */
+#define BCM4377_D11AX2G_ID 0x4481 /* 4377 802.11ax 2.4G device */
+#define BCM4377_D11AX5G_ID 0x4482 /* 4377 802.11ax 5G device */
+
+/* 4377 802.11ax dualband device with multifunction */
+#define BCM4377_M_D11AX_ID 0x4488
+
+/* Chip IDs */
+
+#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */
+#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */
+#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */
+#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */
+#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */
+#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
+#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */
+#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */
+#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */
+#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */
+#define BCM43526_CHIP_ID 0xAA06
+#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */
+#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */
+#define BCM43562_CHIP_ID 0xAA2A /* 43562 chipcommon chipid */
+#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */
+#define BCM43013_CHIP_ID 0xA805 /* 43013 chipcommon chipid */
+#define BCM43014_CHIP_ID 0xA806 /* 43014 chipcommon chipid */
+#define BCM4369_CHIP_ID 0x4369 /* 4369 chipcommon chipid */
+#define BCM4375_CHIP_ID 0x4375 /* 4375 chipcommon chipid */
+#define BCM4376_CHIP_ID 0x4376 /* 4376 chipcommon chipid */
+#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */
+#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */
+#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */
+
+#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */
+#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */
+#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */
+#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */
+#define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */
+#define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */
+#define BCM43752_CHIP_ID 0xAAE8 /* 43752 chipcommon chipid */
+#define BCM4369_CHIP_ID 0x4369 /* 4369 chipcommon chipid */
+#define BCM4377_CHIP_ID 0x4377 /* 4377 chipcommon chipid */
+#define BCM4378_CHIP_ID 0x4378 /* 4378 chipcommon chipid */
+#define BCM4385_CHIP_ID 0x4385 /* 4385 chipcommon chipid */
+#define BCM4387_CHIP_ID 0x4387 /* 4387 chipcommon chipid */
+#define BCM4388_CHIP_ID 0x4388 /* 4388 chipcommon chipid */
+#define BCM4389_CHIP_ID 0x4389 /* 4389 chipcommon chipid */
+#define BCM4397_CHIP_ID 0x4397 /* 4397 chipcommon chipid */
+
+#define BCM4362_CHIP(chipid) (CHIPID(chipid) == BCM4362_CHIP_ID)
+#define BCM4362_CHIP_GRPID BCM4362_CHIP_ID
+
+#define BCM4369_CHIP(chipid) ((CHIPID(chipid) == BCM4369_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4377_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4375_CHIP_ID))
+#define BCM4369_CHIP_GRPID BCM4369_CHIP_ID: \
+ case BCM4377_CHIP_ID: \
+ case BCM4375_CHIP_ID
+
+#define BCM4385_CHIP(chipid) (CHIPID(chipid) == BCM4385_CHIP_ID)
+#define BCM4385_CHIP_GRPID BCM4385_CHIP_ID
+
+#define BCM4378_CHIP(chipid) (CHIPID(chipid) == BCM4378_CHIP_ID)
+#define BCM4378_CHIP_GRPID BCM4378_CHIP_ID
+
+#define BCM4376_CHIP_GRPID BCM4376_CHIP_ID
+#define BCM4376_CHIP(chipid) (CHIPID(chipid) == BCM4376_CHIP_ID)
+
+#define BCM4387_CHIP(chipid) (CHIPID(chipid) == BCM4387_CHIP_ID)
+#define BCM4387_CHIP_GRPID BCM4387_CHIP_ID
+
+#define BCM4388_CHIP(chipid) (CHIPID(chipid) == BCM4388_CHIP_ID)
+#define BCM4388_CHIP_GRPID BCM4388_CHIP_ID
+
+#define BCM4389_CHIP(chipid) (CHIPID(chipid) == BCM4389_CHIP_ID)
+#define BCM4389_CHIP_GRPID BCM4389_CHIP_ID
+
+#define BCM4397_CHIP(chipid) (CHIPID(chipid) == BCM4397_CHIP_ID)
+#define BCM4397_CHIP_GRPID BCM4397_CHIP_ID
+
+#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */
+#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */
+#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */
+#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43462_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */
+#define BCM43012_CHIP(chipid) ((CHIPID(chipid) == BCM43012_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43013_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43014_CHIP_ID))
+#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \
+ case BCM43462_CHIP_ID: /* fallthrough */ \
+ case BCM43522_CHIP_ID
+
+/* Package IDs */
+
+#define HDLSIM_PKG_ID 14 /* HDL simulator package id */
+#define HWSIM_PKG_ID 15 /* Hardware simulator package id */
+
+#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */
+
+#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */
+
+/* 43012 package ID's
+ http://confluence.broadcom.com/display/WLAN/BCM43012+Variants%2Cpackage%2Cballmap%2Cfloorplan#
+ BCM43012Variants,package,ballmap,floorplan-PackageOptions
+*/
+#define BCM943012_WLCSPOLY_PKG_ID 0x0 /* WLCSP Oly package */
+#define BCM943012_FCBGA_PKG_ID 0x3 /* FCBGA debug package */
+#define BCM943012_WLCSPWE_PKG_ID 0x1 /* WLCSP WE package */
+#define BCM943012_FCBGAWE_PKG_ID 0x5 /* FCBGA WE package */
+#define BCM943012_WLBGA_PKG_ID 0x2 /* WLBGA package */
+
+/* boardflags */
+#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */
+#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */
+#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */
+#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio radio disable indication */
+#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */
+#define BFL_DIS_256QAM 0x00000008
+ /* for 4360, this bit is to disable 256QAM support */
+#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */
+#define BFL_TSSIAVG 0x00000010 /* TSSI averaging for ACPHY chips */
+#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */
+#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */
+#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */
+#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */
+#define BFL_LTECOEX 0x00000200 /* LTE Coex enabled */
+#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */
+#define BFL_FEM 0x00000800 /* Board supports the Front End Module */
+#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
+#define BFL_HGPA 0x00002000 /* Board has a high gain PA */
+#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */
+#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */
+#define BFL_NOPA 0x00010000 /* Board has no PA */
+#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */
+#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */
+#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */
+#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */
+#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */
+#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */
+#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */
+#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */
+/* BFL_FASTPWR and BFL_UCPWRCTL_MININDX are non-overlaping features and use the same bit */
+#define BFL_FASTPWR 0x08000000 /* Fast switch/antenna powerup (no POR WAR) */
+#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */
+#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
+#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */
+#define BFL_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */
+#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */
+#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field
+ * when this flag is set
+ */
+#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */
+#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */
+#define BFL2_WLCX_ATLAS 0x00000040 /* Board flag to initialize ECI for WLCX on FL-ATLAS */
+#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */
+#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace
+ * BFL2_BTC3WIRE
+ */
+#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */
+#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */
+#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */
+#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000 /* Reducing DAC Spurs */
+#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */
+#define BFL2_REDUCED_PA_TURNONTIME 0x00010000 /* Flag to reduce PA turn on Time */
+#define BFL2_IPALVLSHIFT_3P3 0x00020000 /* Flag to Activate the PR 74115 PA Level Shift
+ * Workaround where the gpaio pin is connected to 3.3V
+ */
+#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */
+ /* Most drivers will turn it off without this flag */
+ /* to save power. */
+
+#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */
+#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */
+#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value
+ * than programmed. The exact delta is decided by
+ * driver per chip/boardtype. This can be used
+ * when tempsense qualification happens after shipment
+ */
+#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */
+#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */
+#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */
+ /* ucode control of eLNA during Tx */
+#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */
+#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */
+#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */
+#define BFL2_LNA1BYPFORTR5G 0x80000000 /* acphy, enable lna1 bypass for clip gain, 5g */
+
+/* SROM 11 - 11ac boardflag definitions */
+#define BFL_SROM11_BTCOEX 0x00000001 /* Board supports BTCOEX */
+#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002 /* bluetooth and wlan share same crystal */
+#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
+#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */
+#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15
+#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */
+ /* per subband, as derived from 43602A1 MCH5 */
+#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
+#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */
+#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
+#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
+#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */
+
+/* boardflags3 */
+#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */
+#define BFL3_RCAL_WAR 0x00000008 /* acphy, rcal war active on this board (4335a0) */
+#define BFL3_TXGAINTBLID 0x00000070 /* acphy, txgain table id */
+#define BFL3_TXGAINTBLID_SHIFT 0x4 /* acphy, txgain table id shift bit */
+#define BFL3_TSSI_DIV_WAR 0x00000080 /* acphy, Seperate paparam for 20/40/80 */
+#define BFL3_TSSI_DIV_WAR_SHIFT 0x7 /* acphy, Seperate paparam for 20/40/80 shift bit */
+#define BFL3_FEMTBL_FROM_NVRAM 0x00000100 /* acphy, femctrl table is read from nvram */
+#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8 /* acphy, femctrl table is read from nvram */
+#define BFL3_AGC_CFG_2G 0x00000200 /* acphy, gain control configuration for 2G */
+#define BFL3_AGC_CFG_5G 0x00000400 /* acphy, gain control configuration for 5G */
+#define BFL3_PPR_BIT_EXT 0x00000800 /* acphy, bit position for 1bit extension for ppr */
+#define BFL3_PPR_BIT_EXT_SHIFT 11 /* acphy, bit shift for 1bit extension for ppr */
+#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000 /* acphy, disables bbpll spur modes */
+#define BFL3_RCAL_OTP_VAL_EN 0x00002000 /* acphy, to read rcal_trim value from otp */
+#define BFL3_2GTXGAINTBL_BLANK 0x00004000 /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14 /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK 0x00008000 /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15 /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000 /* acphy, to max out alpha,beta to 511 */
+#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16 /* acphy, to max out alpha,beta to 511 */
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN 0x00060000
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17
+#define BFL3_5G_SPUR_WAR 0x00080000 /* acphy, enable spur WAR in 5G band */
+
+/* acphy: lpmode2g and lpmode_5g related boardflags */
+#define BFL3_ACPHY_LPMODE_2G 0x00300000 /* bits 20:21 for lpmode_2g choice */
+#define BFL3_ACPHY_LPMODE_2G_SHIFT 20
+
+#define BFL3_ACPHY_LPMODE_5G 0x00C00000 /* bits 22:23 for lpmode_5g choice */
+#define BFL3_ACPHY_LPMODE_5G_SHIFT 22
+
+#define BFL3_1X1_RSDB_ANT 0x01000000 /* to find if 2-ant RSDB board or 1-ant RSDB board */
+#define BFL3_1X1_RSDB_ANT_SHIFT 24
+
+#define BFL3_EXT_LPO_ISCLOCK 0x02000000 /* External LPO is clock, not x-tal */
+#define BFL3_FORCE_INT_LPO_SEL 0x04000000 /* Force internal lpo */
+#define BFL3_FORCE_EXT_LPO_SEL 0x08000000 /* Force external lpo */
+
+#define BFL3_EN_BRCM_IMPBF 0x10000000 /* acphy, Allow BRCM Implicit TxBF */
+
+#define BFL3_PADCAL_OTP_VAL_EN 0x20000000 /* acphy, to read pad cal values from otp */
+
+#define BFL3_AVVMID_FROM_NVRAM 0x40000000 /* Read Av Vmid from NVRAM */
+#define BFL3_VLIN_EN_FROM_NVRAM 0x80000000 /* Read Vlin En from NVRAM */
+
+#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */
+#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */
+
+/* boardflags4 for SROM12/SROM13 */
+
+/* To distinguigh between normal and 4dB pad board */
+#define BFL4_SROM12_4dBPAD (1u << 0)
+
+/* Determine power detector type for 2G */
+#define BFL4_SROM12_2G_DETTYPE (1u << 1u)
+
+/* Determine power detector type for 5G */
+#define BFL4_SROM12_5G_DETTYPE (1u << 2u)
+
+/* using pa_dettype from SROM13 flags */
+#define BFL4_SROM13_DETTYPE_EN (1u << 3u)
+
+/* using cck spur reduction setting */
+#define BFL4_SROM13_CCK_SPUR_EN (1u << 4u)
+
+/* using 1.5V cbuck board */
+#define BFL4_SROM13_1P5V_CBUCK (1u << 7u)
+
+/* Enable/disable bit for sw chain mask */
+#define BFL4_SROM13_EN_SW_TXRXCHAIN_MASK (1u << 8u)
+
+#define BFL4_BTCOEX_OVER_SECI 0x00000400u /* Enable btcoex over gci seci */
+
+/* RFFE rFEM 5G and 2G present bit */
+#define BFL4_FEM_RFFE (1u << 21u)
+
+/* papd params */
+#define PAPD_TX_ATTN_2G 0xFF
+#define PAPD_TX_ATTN_5G 0xFF00
+#define PAPD_TX_ATTN_5G_SHIFT 8
+#define PAPD_RX_ATTN_2G 0xFF
+#define PAPD_RX_ATTN_5G 0xFF00
+#define PAPD_RX_ATTN_5G_SHIFT 8
+#define PAPD_CAL_IDX_2G 0xFF
+#define PAPD_CAL_IDX_5G 0xFF00
+#define PAPD_CAL_IDX_5G_SHIFT 8
+#define PAPD_BBMULT_2G 0xFF
+#define PAPD_BBMULT_5G 0xFF00
+#define PAPD_BBMULT_5G_SHIFT 8
+#define TIA_GAIN_MODE_2G 0xFF
+#define TIA_GAIN_MODE_5G 0xFF00
+#define TIA_GAIN_MODE_5G_SHIFT 8
+#define PAPD_EPS_OFFSET_2G 0xFFFF
+#define PAPD_EPS_OFFSET_5G 0xFFFF0000
+#define PAPD_EPS_OFFSET_5G_SHIFT 16
+#define PAPD_CALREF_DB_2G 0xFF
+#define PAPD_CALREF_DB_5G 0xFF00
+#define PAPD_CALREF_DB_5G_SHIFT 8
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
+#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */
+#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */
+#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */
+#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */
+#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */
+#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12 0x1000 /* gpio 12 */
+#define BOARD_GPIO_13 0x2000 /* gpio 13 */
+#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */
+#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */
+#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */
+#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */
+#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */
+#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */
+#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */
+#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */
+#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */
+
+#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */
+
+#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */
+#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */
+
+/* need to be moved to a chip specific header file */
+/* power control defines */
+#define PLL_DELAY 150 /* us pll on delay */
+#define FREF_DELAY 200 /* us fref change delay */
+#define MIN_SLOW_CLK 32 /* us Slow clock period */
+#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
+
+/* Board IDs */
+
+/* Reference Board Types */
+#define BU4710_BOARD 0x0400
+#define VSIM4710_BOARD 0x0401
+#define QT4710_BOARD 0x0402
+
+#define BCM94710D_BOARD 0x041a
+#define BCM94710R1_BOARD 0x041b
+#define BCM94710R4_BOARD 0x041c
+#define BCM94710AP_BOARD 0x041d
+
+#define BU2050_BOARD 0x041f
+
+/* BCM4318 boards */
+#define BU4318_BOARD 0x0447
+#define CB4318_BOARD 0x0448
+#define MPG4318_BOARD 0x0449
+#define MP4318_BOARD 0x044a
+#define SD4318_BOARD 0x044b
+#define BCM94318MPGH_BOARD 0x0463
+
+/* 4321 boards */
+#define BU4321_BOARD 0x046b
+#define BU4321E_BOARD 0x047c
+#define MP4321_BOARD 0x046c
+#define CB2_4321_BOARD 0x046d
+#define CB2_4321_AG_BOARD 0x0066
+#define MC4321_BOARD 0x046e
+
+/* 4360 Boards */
+#define BCM94360X52C 0X0117
+#define BCM94360X52D 0X0137
+#define BCM94360X29C 0X0112
+#define BCM94360X29CP2 0X0134
+#define BCM94360X29CP3 0X013B
+#define BCM94360X51 0x0111
+#define BCM94360X51P2 0x0129
+#define BCM94360X51P3 0x0142
+#define BCM94360X51A 0x0135
+#define BCM94360X51B 0x0136
+#define BCM94360CS 0x061B
+#define BCM94360J28_D11AC2G 0x0c00
+#define BCM94360J28_D11AC5G 0x0c01
+#define BCM94360USBH5_D11AC5G 0x06aa
+#define BCM94360MCM5 0x06d8
+
+/* need to update si_fixup_vid_overrides() for additional platforms */
+
+/* 43012 wlbga Board */
+#define BCM943012WLREF_SSID 0x07d7
+
+/* 43012 fcbga Board */
+#define BCM943012FCREF_SSID 0x07d4
+
+/* 43602 Boards, unclear yet what boards will be created. */
+#define BCM943602RSVD1_SSID 0x06a5
+#define BCM943602RSVD2_SSID 0x06a6
+#define BCM943602X87 0X0133
+#define BCM943602X87P2 0X0152
+#define BCM943602X87P3 0X0153 /* need to update si_fixup_vid_overrides() */
+#define BCM943602X238 0X0132
+#define BCM943602X238D 0X014A
+#define BCM943602X238DP2 0X0155 /* J117 */
+#define BCM943602X238DP3 0X0156 /* J94 */
+#define BCM943602X100 0x0761 /* Dev only */
+#define BCM943602X100GS 0x0157 /* Woody */
+#define BCM943602X100P2 0x015A /* Buzz, Zurg */
+
+/* 4375B0 WLCSP SEMCO Board */
+#define BCM94375B0_WLCSP_SSID 0x086b
+
+/* # of GPIO pins */
+#define GPIO_NUMPINS 32
+
+/* chip RAM specifications */
+#define RDL_RAM_SIZE_4360 0xA0000
+#define RDL_RAM_BASE_4360 0x60000000
+
+/* generic defs for nvram "muxenab" bits
+* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options.
+*/
+#define MUXENAB_UART 0x00000001
+#define MUXENAB_GPIO 0x00000002
+#define MUXENAB_ERCX 0x00000004 /* External Radio BT coex */
+#define MUXENAB_JTAG 0x00000008
+#define MUXENAB_HOST_WAKE 0x00000010 /* configure GPIO for SDIO host_wake */
+#define MUXENAB_I2S_EN 0x00000020
+#define MUXENAB_I2S_MASTER 0x00000040
+#define MUXENAB_I2S_FULL 0x00000080
+#define MUXENAB_SFLASH 0x00000100
+#define MUXENAB_RFSWCTRL0 0x00000200
+#define MUXENAB_RFSWCTRL1 0x00000400
+#define MUXENAB_RFSWCTRL2 0x00000800
+#define MUXENAB_SECI 0x00001000
+#define MUXENAB_BT_LEGACY 0x00002000
+#define MUXENAB_HOST_WAKE1 0x00004000 /* configure alternative GPIO for SDIO host_wake */
+
+/* Boot flags */
+#define FLASH_KERNEL_NFLASH 0x00000001
+#define FLASH_BOOT_NFLASH 0x00000002
+
+#endif /* _BCMDEVS_H */
diff --git a/bcmdhd.101.10.361.x/include/bcmdevs_legacy.h b/bcmdhd.101.10.361.x/include/bcmdevs_legacy.h
new file mode 100755
index 0000000..6e57c42
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmdevs_legacy.h
@@ -0,0 +1,188 @@
+/*
+ * Broadcom device-specific manifest constants used by DHD, but deprecated in firmware.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmdevs_legacy_h_
+#define _bcmdevs_legacy_h_
+
+/* DONGLE VID/PIDs */
+#define BCM_DNGL_BL_PID_4322 0xbd13
+#define BCM_DNGL_BL_PID_4319 0xbd16
+#define BCM_DNGL_BL_PID_43236 0xbd17
+#define BCM_DNGL_BL_PID_43143 0xbd1e
+#define BCM_DNGL_BL_PID_43242 0xbd1f
+#define BCM_DNGL_BL_PID_4350 0xbd23
+#define BCM_DNGL_BL_PID_43569 0xbd27
+
+/* PCI Device IDs */
+#define BCM4335_D11AC_ID 0x43ae
+#define BCM4335_D11AC2G_ID 0x43af
+#define BCM4335_D11AC5G_ID 0x43b0
+#define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */
+#define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */
+#define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */
+#define BCM43452_D11AC_ID 0x47ab /* 43452 802.11ac dualband device */
+#define BCM43452_D11AC2G_ID 0x47ac /* 43452 802.11ac 2.4G device */
+#define BCM43452_D11AC5G_ID 0x47ad /* 43452 802.11ac 5G device */
+#define BCM4347_D11AC_ID 0x440a /* 4347 802.11ac dualband device */
+#define BCM4347_D11AC2G_ID 0x440b /* 4347 802.11ac 2.4G device */
+#define BCM4347_D11AC5G_ID 0x440c /* 4347 802.11ac 5G device */
+#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */
+#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */
+#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */
+
+#define BCM4350_D11AC_ID 0x43a3
+#define BCM4350_D11AC2G_ID 0x43a4
+#define BCM4350_D11AC5G_ID 0x43a5
+#define BCM4354_D11AC_ID 0x43df /* 4354 802.11ac dualband device */
+#define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */
+#define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */
+#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */
+#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */
+#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */
+#define BCM4356_D11AC_ID 0x43ec /* 4356 802.11ac dualband device */
+#define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */
+#define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */
+#define BCM43569_D11AC_ID 0x43d9
+#define BCM43569_D11AC2G_ID 0x43da
+#define BCM43569_D11AC5G_ID 0x43db
+#define BCM4358_D11AC_ID 0x43e9 /* 4358 802.11ac dualband device */
+#define BCM4358_D11AC2G_ID 0x43ea /* 4358 802.11ac 2.4G device */
+#define BCM4358_D11AC5G_ID 0x43eb /* 4358 802.11ac 5G device */
+
+#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */
+#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */
+#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */
+#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */
+#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */
+#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */
+#define BCM43597_D11AC_ID 0x441c /* 43597 802.11ac dualband device */
+#define BCM43597_D11AC2G_ID 0x441d /* 43597 802.11ac 2.4G device */
+#define BCM43597_D11AC5G_ID 0x441e /* 43597 802.11ac 5G device */
+#define BCM4361_D11AC_ID 0x441f /* 4361 802.11ac dualband device */
+#define BCM4361_D11AC2G_ID 0x4420 /* 4361 802.11ac 2.4G device */
+#define BCM4361_D11AC5G_ID 0x4421 /* 4361 802.11ac 5G device */
+#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */
+#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */
+#define BCM4364_D11AC5G_ID 0x446b /* 4364 802.11ac 5G device */
+#define BCM4371_D11AC_ID 0x440d /* 4371 802.11ac dualband device */
+#define BCM4371_D11AC2G_ID 0x440e /* 4371 802.11ac 2.4G device */
+#define BCM4371_D11AC5G_ID 0x440f /* 4371 802.11ac 5G device */
+
+/* Chip IDs */
+#define BCM43018_CHIP_ID 43018 /* 43018 chipcommon chipid */
+#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */
+#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */
+#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */
+#define BCM43452_CHIP_ID 43452 /* 43454 chipcommon chipid */
+#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */
+#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */
+#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */
+#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */
+
+#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \
+ CHIPID(chipid) == BCM43452_CHIP_ID || \
+ CHIPID(chipid) == BCM43454_CHIP_ID || \
+ CHIPID(chipid) == BCM43455_CHIP_ID || \
+ CHIPID(chipid) == BCM43457_CHIP_ID || \
+ CHIPID(chipid) == BCM43458_CHIP_ID)
+
+#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \
+ case BCM43454_CHIP_ID: /* fallthrough */ \
+ case BCM43455_CHIP_ID: /* fallthrough */ \
+ case BCM43457_CHIP_ID: /* fallthrough */ \
+ case BCM43458_CHIP_ID
+
+#define BCM4347_CHIP_ID 0x4347 /* 4347 chipcommon chipid */
+#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4357_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4361_CHIP_ID))
+#define BCM4347_CHIP_GRPID BCM4347_CHIP_ID: \
+ case BCM4357_CHIP_ID: \
+ case BCM4361_CHIP_ID
+
+#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */
+#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */
+#define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */
+#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */
+#define BCM4357_CHIP_ID 0x4357 /* 4357 chipcommon chipid */
+#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */
+#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */
+#define BCM43596_CHIP_ID 43596 /* 43596 chipcommon chipid */
+#define BCM4361_CHIP_ID 0x4361 /* 4361 chipcommon chipid */
+#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */
+#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */
+
+#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */
+#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */
+#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */
+#define BCM4355_CHIP(chipid) (CHIPID(chipid) == BCM4355_CHIP_ID)
+#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4355_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4359_CHIP_ID))
+#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \
+ case BCM4355_CHIP_ID: \
+ case BCM4359_CHIP_ID
+#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4354_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43567_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43569_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43570_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */
+
+#define BCM43598_CHIP_ID 0xaa4c /* 4371 chipcommon chipid */
+#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */
+#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */
+#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
+#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
+#define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */
+#define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */
+#define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */
+#define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */
+
+/* Board Flags */
+
+/* Package IDs */
+
+/* Board IDs */
+
+/* chip RAM specifications */
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329 0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+#define RDL_RAM_SIZE_43242 0x90000
+#define RDL_RAM_BASE_43242 0x60000000
+#define RDL_RAM_SIZE_43143 0x70000
+#define RDL_RAM_BASE_43143 0x60000000
+#define RDL_RAM_SIZE_4350 0xC0000
+#define RDL_RAM_BASE_4350 0x180800
+
+#endif /* _bcmdevs_legacy_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmdhcp.h b/bcmdhd.101.10.361.x/include/bcmdhcp.h
new file mode 100755
index 0000000..0051ebf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmdhcp.h
@@ -0,0 +1,86 @@
+/*
+ * Fundamental constants relating to DHCP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmdhcp_h_
+#define _bcmdhcp_h_
+
+/* DHCP params */
+#define DHCP_TYPE_OFFSET 0 /* DHCP type (request|reply) offset */
+#define DHCP_TID_OFFSET 4 /* DHCP transition id offset */
+#define DHCP_FLAGS_OFFSET 10 /* DHCP flags offset */
+#define DHCP_CIADDR_OFFSET 12 /* DHCP client IP address offset */
+#define DHCP_YIADDR_OFFSET 16 /* DHCP your IP address offset */
+#define DHCP_GIADDR_OFFSET 24 /* DHCP relay agent IP address offset */
+#define DHCP_CHADDR_OFFSET 28 /* DHCP client h/w address offset */
+#define DHCP_OPT_OFFSET 236 /* DHCP options offset */
+
+#define DHCP_OPT_MSGTYPE 53 /* DHCP message type */
+#define DHCP_OPT_MSGTYPE_REQ 3
+#define DHCP_OPT_MSGTYPE_ACK 5 /* DHCP message type - ACK */
+
+#define DHCP_OPT_CODE_OFFSET 0 /* Option identifier */
+#define DHCP_OPT_LEN_OFFSET 1 /* Option data length */
+#define DHCP_OPT_DATA_OFFSET 2 /* Option data */
+
+#define DHCP_OPT_CODE_CLIENTID 61 /* Option identifier */
+
+#define DHCP_TYPE_REQUEST 1 /* DHCP request (discover|request) */
+#define DHCP_TYPE_REPLY 2 /* DHCP reply (offset|ack) */
+
+#define DHCP_PORT_SERVER 67 /* DHCP server UDP port */
+#define DHCP_PORT_CLIENT 68 /* DHCP client UDP port */
+
+#define DHCP_FLAG_BCAST 0x8000 /* DHCP broadcast flag */
+
+#define DHCP_FLAGS_LEN 2 /* DHCP flags field length */
+
+#define DHCP6_TYPE_SOLICIT 1 /* DHCP6 solicit */
+#define DHCP6_TYPE_ADVERTISE 2 /* DHCP6 advertise */
+#define DHCP6_TYPE_REQUEST 3 /* DHCP6 request */
+#define DHCP6_TYPE_CONFIRM 4 /* DHCP6 confirm */
+#define DHCP6_TYPE_RENEW 5 /* DHCP6 renew */
+#define DHCP6_TYPE_REBIND 6 /* DHCP6 rebind */
+#define DHCP6_TYPE_REPLY 7 /* DHCP6 reply */
+#define DHCP6_TYPE_RELEASE 8 /* DHCP6 release */
+#define DHCP6_TYPE_DECLINE 9 /* DHCP6 decline */
+#define DHCP6_TYPE_RECONFIGURE 10 /* DHCP6 reconfigure */
+#define DHCP6_TYPE_INFOREQ 11 /* DHCP6 information request */
+#define DHCP6_TYPE_RELAYFWD 12 /* DHCP6 relay forward */
+#define DHCP6_TYPE_RELAYREPLY 13 /* DHCP6 relay reply */
+
+#define DHCP6_TYPE_OFFSET 0 /* DHCP6 type offset */
+
+#define DHCP6_MSG_OPT_OFFSET 4 /* Offset of options in client server messages */
+#define DHCP6_RELAY_OPT_OFFSET 34 /* Offset of options in relay messages */
+
+#define DHCP6_OPT_CODE_OFFSET 0 /* Option identifier */
+#define DHCP6_OPT_LEN_OFFSET 2 /* Option data length */
+#define DHCP6_OPT_DATA_OFFSET 4 /* Option data */
+
+#define DHCP6_OPT_CODE_CLIENTID 1 /* DHCP6 CLIENTID option */
+#define DHCP6_OPT_CODE_SERVERID 2 /* DHCP6 SERVERID option */
+
+#define DHCP6_PORT_SERVER 547 /* DHCP6 server UDP port */
+#define DHCP6_PORT_CLIENT 546 /* DHCP6 client UDP port */
+
+#endif /* #ifndef _bcmdhcp_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmendian.h b/bcmdhd.101.10.361.x/include/bcmendian.h
new file mode 100755
index 0000000..097e5ea
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmendian.h
@@ -0,0 +1,451 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+/* Reverse the bytes in a 16-bit value */
+#define BCMSWAP16(val) \
+ ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+ (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+/* Reverse the bytes in a 32-bit value */
+#define BCMSWAP32(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+ (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \
+ (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \
+ (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+/* Reverse the two 16-bit halves of a 32-bit value */
+#define BCMSWAP32BY16(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+ (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+/* Reverse the bytes in a 64-bit value */
+#define BCMSWAP64(val) \
+ ((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \
+ (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \
+ (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \
+ (((uint64)(val) & 0x00000000ff000000ULL) << 8) | \
+ (((uint64)(val) & 0x000000ff00000000ULL) >> 8) | \
+ (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \
+ (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \
+ (((uint64)(val) & 0xff00000000000000ULL) >> 56)))
+
+/* Reverse the two 32-bit halves of a 64-bit value */
+#define BCMSWAP64BY32(val) \
+ ((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
+ (((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
+/* Byte swapping macros
+ * Host <=> Network (Big Endian) for 16- and 32-bit values
+ * Host <=> Little-Endian for 16- and 32-bit values
+ */
+#ifndef hton16
+#ifndef IL_BIGENDIAN
+#define HTON16(i) BCMSWAP16(i)
+#define hton16(i) bcmswap16(i)
+#define HTON32(i) BCMSWAP32(i)
+#define hton32(i) bcmswap32(i)
+#define NTOH16(i) BCMSWAP16(i)
+#define ntoh16(i) bcmswap16(i)
+#define NTOH32(i) BCMSWAP32(i)
+#define ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#define HTOL64(i) (i)
+#define htol64(i) (i)
+#else /* IL_BIGENDIAN */
+#define HTON16(i) (i)
+#define hton16(i) (i)
+#define HTON32(i) (i)
+#define hton32(i) (i)
+#define NTOH16(i) (i)
+#define ntoh16(i) (i)
+#define NTOH32(i) (i)
+#define ntoh32(i) (i)
+#define LTOH16(i) BCMSWAP16(i)
+#define ltoh16(i) bcmswap16(i)
+#define LTOH32(i) BCMSWAP32(i)
+#define ltoh32(i) bcmswap32(i)
+#define HTOL16(i) BCMSWAP16(i)
+#define htol16(i) bcmswap16(i)
+#define HTOL32(i) BCMSWAP32(i)
+#define htol32(i) bcmswap32(i)
+#define HTOL64(i) BCMSWAP64(i)
+#define htol64(i) bcmswap64(i)
+#endif /* IL_BIGENDIAN */
+#endif /* hton16 */
+
+#ifndef IL_BIGENDIAN
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+#define ltoh32_buf(buf, i)
+#define htol32_buf(buf, i)
+#define ltoh64_buf(buf, i)
+#define htol64_buf(buf, i)
+#else
+#define ltoh16_buf(buf, i) bcmswap16_buf((uint16 *)(buf), (i))
+#define htol16_buf(buf, i) bcmswap16_buf((uint16 *)(buf), (i))
+#define ltoh32_buf(buf, i) bcmswap32_buf((uint16 *)(buf), (i))
+#define htol32_buf(buf, i) bcmswap32_buf((uint16 *)(buf), (i))
+#define ltoh64_buf(buf, i) bcmswap64_buf((uint16 *)(buf), (i))
+#define htol64_buf(buf, i) bcmswap64_buf((uint16 *)(buf), (i))
+#endif /* IL_BIGENDIAN */
+
+/* Unaligned loads and stores in host byte order */
+#ifndef IL_BIGENDIAN
+#define load32_ua(a) ltoh32_ua(a)
+#define store32_ua(a, v) htol32_ua_store(v, a)
+#define load16_ua(a) ltoh16_ua(a)
+#define store16_ua(a, v) htol16_ua_store(v, a)
+#define load64_ua(a) ltoh64_ua(a)
+#define store64_ua(a, v) htol64_ua_store(v, a)
+#else
+#define load32_ua(a) ntoh32_ua(a)
+#define store32_ua(a, v) hton32_ua_store(v, a)
+#define load16_ua(a) ntoh16_ua(a)
+#define store16_ua(a, v) hton16_ua_store(v, a)
+#define load64_ua(a) ntoh64_ua(a)
+#define store64_ua(a, v) hton64_ua_store(v, a)
+#endif /* IL_BIGENDIAN */
+
+#define _LTOH16_UA(cp) ((uint16)(cp)[0] | ((uint16)(cp)[1] << 8))
+#define _LTOH32_UA(cp) ((uint32)(cp)[0] | ((uint32)(cp)[1] << 8) | \
+ ((uint32)(cp)[2] << 16) | ((uint32)(cp)[3] << 24))
+#define _NTOH16_UA(cp) (((uint16)(cp)[0] << 8) | (uint16)(cp)[1])
+#define _NTOH32_UA(cp) (((uint32)(cp)[0] << 24) | ((uint32)(cp)[1] << 16) | \
+ ((uint32)(cp)[2] << 8) | (uint32)(cp)[3])
+
+#define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \
+ ((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \
+ ((uint64)(cp)[4] << 32) | ((uint64)(cp)[5] << 40) | \
+ ((uint64)(cp)[6] << 48) | ((uint64)(cp)[7] << 56))
+
+#define _NTOH64_UA(cp) ((uint64)(cp)[7] | ((uint64)(cp)[6] << 8) | \
+ ((uint64)(cp)[5] << 16) | ((uint64)(cp)[4] << 24) | \
+ ((uint64)(cp)[3] << 32) | ((uint64)(cp)[2] << 40) | \
+ ((uint64)(cp)[1] << 48) | ((uint64)(cp)[0] << 56))
+
+#define ltoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? (uint16)_LTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? (uint32)_LTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? (uint16)_NTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? (uint32)_NTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#ifdef __GNUC__
+
+/* GNU macro versions avoid referencing the argument multiple times, while also
+ * avoiding the -fno-inline used in ROM builds.
+ */
+
+#define bcmswap16(val) ({ \
+ uint16 _val = (val); \
+ BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32(_val); \
+})
+
+#define bcmswap64(val) ({ \
+ uint64 _val = (val); \
+ BCMSWAP64(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+ uint16 *_buf = (uint16 *)(buf); \
+ uint _wds = (len) / 2; \
+ while (_wds--) { \
+ *_buf = bcmswap16(*_buf); \
+ _buf++; \
+ } \
+})
+
+#define bcmswap32_buf(buf, len) ({ \
+ uint32 *_buf = (uint32 *)(buf); \
+ uint _wds = (len) / 4; \
+ while (_wds--) { \
+ *_buf = bcmswap32(*_buf); \
+ _buf++; \
+ } \
+})
+
+#define bcmswap64_buf(buf, len) ({ \
+ uint64 *_buf = (uint64 *)(buf); \
+ uint _wds = (len) / 8; \
+ while (_wds--) { \
+ *_buf = bcmswap64(*_buf); \
+ _buf++; \
+ } \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = (_val >> 8) & 0xff; \
+ _bytes[2] = (_val >> 16) & 0xff; \
+ _bytes[3] = _val >> 24; \
+})
+
+#define htol64_ua_store(val, bytes) ({ \
+ uint64 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ int _ii; \
+ for (_ii = 0; _ii < (int)sizeof(_val); ++_ii) { \
+ *_bytes++ = _val & 0xff; \
+ _val >>= 8; \
+ } \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 8; \
+ _bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 24; \
+ _bytes[1] = (_val >> 16) & 0xff; \
+ _bytes[2] = (_val >> 8) & 0xff; \
+ _bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH32_UA(_bytes); \
+})
+
+#define ltoh64_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH64_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH32_UA(_bytes); \
+})
+
+#define ntoh64_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH64_UA(_bytes); \
+})
+
+#else /* !__GNUC__ */
+
+/* Inline versions avoid referencing the argument multiple times */
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+ return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+ return BCMSWAP32(val);
+}
+
+static INLINE uint64
+bcmswap64(uint64 val)
+{
+ return BCMSWAP64(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+ return BCMSWAP32BY16(val);
+}
+
+/* Reverse pairs of bytes in a buffer (not for high-performance use) */
+/* buf - start of buffer of shorts to swap */
+/* len - byte length of buffer */
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+ len = len / 2;
+
+ while (len--) {
+ *buf = bcmswap16(*buf);
+ buf++;
+ }
+}
+
+/*
+ * Store 16-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = val >> 8;
+}
+
+/*
+ * Store 32-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = (val >> 16) & 0xff;
+ bytes[3] = val >> 24;
+}
+
+/*
+ * Store 64-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol64_ua_store(uint64 val, uint8 *bytes)
+{
+ int i;
+ for (i = 0; i < sizeof(val); ++i) {
+ *bytes++ = (uint8)(val & 0xff);
+ val >>= 8;
+ }
+}
+
+/*
+ * Store 16-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val >> 8;
+ bytes[1] = val & 0xff;
+}
+
+/*
+ * Store 32-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val >> 24;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+/*
+ * Load 16-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+ return _LTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+ return _LTOH32_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 64-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint64
+ltoh64_ua(const void *bytes)
+{
+ return _LTOH64_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 16-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+ return _NTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+ return _NTOH32_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 64-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint64
+ntoh64_ua(const void *bytes)
+{
+ return _NTOH64_UA((const uint8 *)bytes);
+}
+
+#endif /* !__GNUC__ */
+#endif /* !_BCMENDIAN_H_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmerror.h b/bcmdhd.101.10.361.x/include/bcmerror.h
new file mode 100755
index 0000000..ef2a440
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmerror.h
@@ -0,0 +1,573 @@
+/*
+ * Common header file for all error codes.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ *
+ *
+ */
+
+#ifndef _bcmerror_h_
+#define _bcmerror_h_
+
+#include <typedefs.h>
+
+/* Use error codes from this file only if BCMUTILS_ERR_CODES is defined. */
+#ifdef BCMUTILS_ERR_CODES
+
+/* NOTE re: Module specific error codes.
+ *
+ * BCME_.. error codes are extended by various features - e.g. FTM, NAN, SAE etc.
+ * The current process is to allocate a range of 1024 negative 32 bit integers to
+ * each module that extends the error codes to indicate a module specific status.
+ *
+ * The next range to use is below. If that range is used for a new feature, please
+ * update the range to be used by the next feature.
+ *
+ * Next available (inclusive) range: [-8*1024 + 1, -7*1024]
+ *
+ * Common error codes use BCME_ prefix. Firmware (wl) components should use the
+ * convention to prefix the error code name with WL_<Component>_E_ (e.g. WL_NAN_E_?).
+ * Non-wl components, other than common error codes use BCM_<Componennt>_E_
+ * prefix(e.g. BCM_FWSIGN_E_).
+ *
+ * End Note
+ */
+
+typedef int bcmerror_t;
+
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK 0 /* Success */
+#define BCME_ERROR -1 /* Error generic */
+#define BCME_BADARG -2 /* Bad Argument */
+#define BCME_BADOPTION -3 /* Bad option */
+#define BCME_NOTUP -4 /* Not up */
+#define BCME_NOTDOWN -5 /* Not down */
+#define BCME_NOTAP -6 /* Not AP */
+#define BCME_NOTSTA -7 /* Not STA */
+#define BCME_BADKEYIDX -8 /* BAD Key Index */
+#define BCME_RADIOOFF -9 /* Radio Off */
+#define BCME_NOTBANDLOCKED -10 /* Not band locked */
+#define BCME_NOCLK -11 /* No Clock */
+#define BCME_BADRATESET -12 /* BAD Rate valueset */
+#define BCME_BADBAND -13 /* BAD Band */
+#define BCME_BUFTOOSHORT -14 /* Buffer too short */
+#define BCME_BUFTOOLONG -15 /* Buffer too long */
+#define BCME_BUSY -16 /* Busy */
+#define BCME_NOTASSOCIATED -17 /* Not Associated */
+#define BCME_BADSSIDLEN -18 /* Bad SSID len */
+#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */
+#define BCME_BADCHAN -20 /* Bad Channel */
+#define BCME_BADADDR -21 /* Bad Address */
+#define BCME_NORESOURCE -22 /* Not Enough Resources */
+#define BCME_UNSUPPORTED -23 /* Unsupported */
+#define BCME_BADLEN -24 /* Bad length */
+#define BCME_NOTREADY -25 /* Not Ready */
+#define BCME_EPERM -26 /* Not Permitted */
+#define BCME_NOMEM -27 /* No Memory */
+#define BCME_ASSOCIATED -28 /* Associated */
+#define BCME_RANGE -29 /* Not In Range */
+#define BCME_NOTFOUND -30 /* Not Found */
+#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */
+#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */
+#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */
+#define BCME_VERSION -37 /* Incorrect version */
+#define BCME_TXFAIL -38 /* TX failure */
+#define BCME_RXFAIL -39 /* RX failure */
+#define BCME_NODEVICE -40 /* Device not present */
+#define BCME_NMODE_DISABLED -41 /* NMODE disabled */
+#define BCME_MSCH_DUP_REG -42 /* Duplicate slot registration */
+#define BCME_SCANREJECT -43 /* reject scan request */
+#define BCME_USAGE_ERROR -44 /* WLCMD usage error */
+#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */
+#define BCME_DISABLED -47 /* Disabled in this build */
+#define BCME_DECERR -48 /* Decrypt error */
+#define BCME_ENCERR -49 /* Encrypt error */
+#define BCME_MICERR -50 /* Integrity/MIC error */
+#define BCME_REPLAY -51 /* Replay */
+#define BCME_IE_NOTFOUND -52 /* IE not found */
+#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */
+#define BCME_NOT_GC -54 /* expecting a group client */
+#define BCME_PRS_REQ_FAILED -55 /* GC presence req failed to sent */
+#define BCME_NO_P2P_SE -56 /* Could not find P2P-Subelement */
+#define BCME_NOA_PND -57 /* NoA pending, CB shuld be NULL */
+#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */
+#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */
+#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */
+#define BCME_IOV_LAST_CMD -61 /* last batched iov sub-command */
+#define BCME_MINIPMU_CAL_FAIL -62 /* MiniPMU cal failed */
+#define BCME_RCAL_FAIL -63 /* Rcal failed */
+#define BCME_LPF_RCCAL_FAIL -64 /* RCCAL failed */
+#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */
+#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */
+#define BCME_BANDLOCKED -67 /* interface is restricted to a band */
+#define BCME_BAD_IE_DATA -68 /* Recieved ie with invalid/bad data */
+#define BCME_REG_FAILED -69 /* Generic registration failed */
+#define BCME_NOCHAN -70 /* Registration with 0 chans in list */
+#define BCME_PKTTOSS -71 /* Pkt tossed */
+#define BCME_DNGL_DEVRESET -72 /* dongle re-attach during DEVRESET */
+#define BCME_ROAM -73 /* Roam related failures */
+#define BCME_NO_SIG_FILE -74 /* Signature file is missing */
+
+#define BCME_LAST BCME_NO_SIG_FILE
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* This error code is *internal* to the driver, and is not propogated to users. It should
+ * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL.
+ * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required,
+ * nor does it need to be part of any OSL driver-to-OS error code mapping).
+ */
+#define BCME_IOCTL_PATCH_UNSUPPORTED -9999
+#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED)
+ #error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED"
+#endif
+
+/* These are collection of BCME Error strings */
+#define BCMERRSTRINGTABLE { \
+ "OK", \
+ "Undefined error", \
+ "Bad Argument", \
+ "Bad Option", \
+ "Not up", \
+ "Not down", \
+ "Not AP", \
+ "Not STA", \
+ "Bad Key Index", \
+ "Radio Off", \
+ "Not band locked", \
+ "No clock", \
+ "Bad Rate valueset", \
+ "Bad Band", \
+ "Buffer too short", \
+ "Buffer too long", \
+ "Busy", \
+ "Not Associated", \
+ "Bad SSID len", \
+ "Out of Range Channel", \
+ "Bad Channel", \
+ "Bad Address", \
+ "Not Enough Resources", \
+ "Unsupported", \
+ "Bad length", \
+ "Not Ready", \
+ "Not Permitted", \
+ "No Memory", \
+ "Associated", \
+ "Not In Range", \
+ "Not Found", \
+ "WME Not Enabled", \
+ "TSPEC Not Found", \
+ "ACM Not Supported", \
+ "Not WME Association", \
+ "SDIO Bus Error", \
+ "Dongle Not Accessible", \
+ "Incorrect version", \
+ "TX Failure", \
+ "RX Failure", \
+ "Device Not Present", \
+ "NMODE Disabled", \
+ "Host Offload in device", \
+ "Scan Rejected", \
+ "WLCMD usage error", \
+ "WLCMD ioctl error", \
+ "RWL serial port error", \
+ "Disabled", \
+ "Decrypt error", \
+ "Encrypt error", \
+ "MIC error", \
+ "Replay", \
+ "IE not found", \
+ "Data not found", \
+ "NOT GC", \
+ "PRS REQ FAILED", \
+ "NO P2P SubElement", \
+ "NOA Pending", \
+ "FRAG Q FAILED", \
+ "GET ActionFrame failed", \
+ "scheduler not ready", \
+ "Last IOV batched sub-cmd", \
+ "Mini PMU Cal failed", \
+ "R-cal failed", \
+ "LPF RC Cal failed", \
+ "DAC buf RC Cal failed", \
+ "VCO Cal failed", \
+ "band locked", \
+ "Recieved ie with invalid data", \
+ "registration failed", \
+ "Registration with zero channels", \
+ "pkt toss", \
+ "Dongle Devreset", \
+ "Critical roam in progress", \
+ "Signature file is missing", \
+}
+
+/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */
+enum {
+ WL_PROXD_E_LAST = -1057,
+ WL_PROXD_E_ASSOC_INPROG = -1057,
+ WL_PROXD_E_NOAVAIL = -1056,
+ WL_PROXD_E_EXT_SCHED = -1055,
+ WL_PROXD_E_NOT_BCM = -1054,
+ WL_PROXD_E_FRAME_TYPE = -1053,
+ WL_PROXD_E_VERNOSUPPORT = -1052,
+ WL_PROXD_E_SEC_NOKEY = -1051,
+ WL_PROXD_E_SEC_POLICY = -1050,
+ WL_PROXD_E_SCAN_INPROCESS = -1049,
+ WL_PROXD_E_BAD_PARTIAL_TSF = -1048,
+ WL_PROXD_E_SCANFAIL = -1047,
+ WL_PROXD_E_NOTSF = -1046,
+ WL_PROXD_E_POLICY = -1045,
+ WL_PROXD_E_INCOMPLETE = -1044,
+ WL_PROXD_E_OVERRIDDEN = -1043,
+ WL_PROXD_E_ASAP_FAILED = -1042,
+ WL_PROXD_E_NOTSTARTED = -1041,
+ WL_PROXD_E_INVALIDMEAS = -1040,
+ WL_PROXD_E_INCAPABLE = -1039,
+ WL_PROXD_E_MISMATCH = -1038,
+ WL_PROXD_E_DUP_SESSION = -1037,
+ WL_PROXD_E_REMOTE_FAIL = -1036,
+ WL_PROXD_E_REMOTE_INCAPABLE = -1035,
+ WL_PROXD_E_SCHED_FAIL = -1034,
+ WL_PROXD_E_PROTO = -1033,
+ WL_PROXD_E_EXPIRED = -1032,
+ WL_PROXD_E_TIMEOUT = -1031,
+ WL_PROXD_E_NOACK = -1030,
+ WL_PROXD_E_DEFERRED = -1029,
+ WL_PROXD_E_INVALID_SID = -1028,
+ WL_PROXD_E_REMOTE_CANCEL = -1027,
+ WL_PROXD_E_CANCELED = -1026, /**< local */
+ WL_PROXD_E_INVALID_SESSION = -1025,
+ WL_PROXD_E_BAD_STATE = -1024,
+ WL_PROXD_E_START = -1024,
+ WL_PROXD_E_ERROR = -1,
+ WL_PROXD_E_OK = 0
+};
+typedef int32 wl_proxd_status_t;
+
+/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */
+enum {
+ /* add new status here... */
+ WL_NAN_E_NO_ACTION = -2136, /* status for no action */
+ WL_NAN_E_INVALID_TOKEN = -2135, /* invalid token or mismatch */
+ WL_NAN_E_INVALID_ATTR = -2134, /* generic invalid attr error */
+ WL_NAN_E_INVALID_NDL_ATTR = -2133, /* invalid NDL attribute */
+ WL_NAN_E_SCB_NORESOURCE = -2132, /* no more peer scb available */
+ WL_NAN_E_PEER_NOTAVAIL = -2131,
+ WL_NAN_E_SCB_EXISTS = -2130,
+ WL_NAN_E_INVALID_PEER_NDI = -2129,
+ WL_NAN_E_INVALID_LOCAL_NDI = -2128,
+ WL_NAN_E_ALREADY_EXISTS = -2127, /* generic NAN error for duplication */
+ WL_NAN_E_EXCEED_MAX_NUM_MAPS = -2126,
+ WL_NAN_E_INVALID_DEV_CHAN_SCHED = -2125,
+ WL_NAN_E_INVALID_PEER_BLOB_TYPE = -2124,
+ WL_NAN_E_INVALID_LCL_BLOB_TYPE = -2123,
+ WL_NAN_E_BCMC_PDPA = -2122, /* BCMC NAF PDPA */
+ WL_NAN_E_TIMEOUT = -2121,
+ WL_NAN_E_HOST_CFG = -2120,
+ WL_NAN_E_NO_ACK = -2119,
+ WL_NAN_E_SECINST_FAIL = -2118,
+ WL_NAN_E_REJECT_NDL = -2117, /* generic NDL rejection error */
+ WL_NAN_E_INVALID_NDP_ATTR = -2116,
+ WL_NAN_E_HOST_REJECTED = -2115,
+ WL_NAN_E_PCB_NORESOURCE = -2114,
+ WL_NAN_E_NDC_EXISTS = -2113,
+ WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2112,
+ WL_NAN_E_INVALID_NDC_ENTRY = -2111,
+ WL_NAN_E_SD_TX_LIST_FULL = -2110,
+ WL_NAN_E_SVC_SUB_LIST_FULL = -2109,
+ WL_NAN_E_SVC_PUB_LIST_FULL = -2108,
+ WL_NAN_E_SDF_MAX_LEN_EXCEEDED = -2107,
+ WL_NAN_E_ZERO_CRB = -2106, /* no CRB between local and peer */
+ WL_NAN_E_PEER_NDC_NOT_SELECTED = -2105, /* peer ndc not selected */
+ WL_NAN_E_DAM_CHAN_CONFLICT = -2104, /* dam schedule channel conflict */
+ WL_NAN_E_DAM_SCHED_PERIOD = -2103, /* dam schedule period mismatch */
+ WL_NAN_E_LCL_NDC_NOT_SELECTED = -2102, /* local selected ndc not configured */
+ WL_NAN_E_NDL_QOS_INVALID_NA = -2101, /* na doesn't comply with ndl qos */
+ WL_NAN_E_CLEAR_NAF_WITH_SA_AS_RNDI = -2100, /* rx clear naf with peer rndi */
+ WL_NAN_E_SEC_CLEAR_PKT = -2099, /* rx clear pkt from a peer with sec_sa */
+ WL_NAN_E_PROT_NON_PDPA_NAF = -2098, /* rx protected non PDPA frame */
+ WL_NAN_E_DAM_DOUBLE_REMOVE = -2097, /* remove peer schedule already removed */
+ WL_NAN_E_DAM_DOUBLE_MERGE = -2096, /* merge peer schedule already merged */
+ WL_NAN_E_DAM_REJECT_INVALID = -2095, /* reject for invalid schedule */
+ WL_NAN_E_DAM_REJECT_RANGE = -2094,
+ WL_NAN_E_DAM_REJECT_QOS = -2093,
+ WL_NAN_E_DAM_REJECT_NDC = -2092,
+ WL_NAN_E_DAM_REJECT_PEER_IMMUT = -2091,
+ WL_NAN_E_DAM_REJECT_LCL_IMMUT = -2090,
+ WL_NAN_E_DAM_EXCEED_NUM_SCHED = -2089,
+ WL_NAN_E_DAM_INVALID_SCHED_MAP = -2088, /* invalid schedule map list */
+ WL_NAN_E_DAM_INVALID_LCL_SCHED = -2087,
+ WL_NAN_E_INVALID_MAP_ID = -2086,
+ WL_NAN_E_CHAN_OVERLAP_ACROSS_MAP = -2085,
+ WL_NAN_E_INVALID_CHAN_LIST = -2084,
+ WL_NAN_E_INVALID_RANGE_TBMP = -2083,
+ WL_NAN_E_INVALID_IMMUT_SCHED = -2082,
+ WL_NAN_E_INVALID_NDC_ATTR = -2081,
+ WL_NAN_E_INVALID_TIME_BITMAP = -2080,
+ WL_NAN_E_INVALID_NA_ATTR = -2079,
+ WL_NAN_E_NO_NA_ATTR_IN_AVAIL_MAP = -2078, /* no na attr saved in avail map */
+ WL_NAN_E_INVALID_MAP_IDX = -2077,
+ WL_NAN_E_SEC_SA_NOTFOUND = -2076,
+ WL_NAN_E_BSSCFG_NOTFOUND = -2075,
+ WL_NAN_E_SCB_NOTFOUND = -2074,
+ WL_NAN_E_NCS_SK_KDESC_TYPE = -2073,
+ WL_NAN_E_NCS_SK_KEY_DESC_VER = -2072, /* key descr ver */
+ WL_NAN_E_NCS_SK_KEY_TYPE = -2071, /* key descr type */
+ WL_NAN_E_NCS_SK_KEYINFO_FAIL = -2070, /* key info (generic) */
+ WL_NAN_E_NCS_SK_KEY_LEN = -2069, /* key len */
+ WL_NAN_E_NCS_SK_KDESC_NOT_FOUND = -2068, /* key desc not found */
+ WL_NAN_E_NCS_SK_INVALID_PARAMS = -2067, /* invalid args */
+ WL_NAN_E_NCS_SK_KDESC_INVALID = -2066, /* key descr is not valid */
+ WL_NAN_E_NCS_SK_NONCE_MISMATCH = -2065,
+ WL_NAN_E_NCS_SK_KDATA_SAVE_FAIL = -2064, /* not able to save key data */
+ WL_NAN_E_NCS_SK_AUTH_TOKEN_CALC_FAIL = -2063,
+ WL_NAN_E_NCS_SK_PTK_CALC_FAIL = -2062,
+ WL_NAN_E_INVALID_STARTOFFSET = -2061,
+ WL_NAN_E_BAD_NA_ENTRY_TYPE = -2060,
+ WL_NAN_E_INVALID_CHANBMP = -2059,
+ WL_NAN_E_INVALID_OP_CLASS = -2058,
+ WL_NAN_E_NO_IES = -2057,
+ WL_NAN_E_NO_PEER_ENTRY_AVAIL = -2056,
+ WL_NAN_E_INVALID_PEER = -2055,
+ WL_NAN_E_PEER_EXISTS = -2054,
+ WL_NAN_E_PEER_NOTFOUND = -2053,
+ WL_NAN_E_NO_MEM = -2052,
+ WL_NAN_E_INVALID_OPTION = -2051,
+ WL_NAN_E_INVALID_BAND = -2050,
+ WL_NAN_E_INVALID_MAC = -2049,
+ WL_NAN_E_BAD_INSTANCE = -2048,
+ /* NAN status code reserved from -2048 to -3071 */
+ /* Do NOT add new status below -2048 */
+ WL_NAN_E_ERROR = -1,
+ WL_NAN_E_OK = 0
+};
+
+/* SAE (Simultaneous Authentication of Equals) status codes.
+ * SAE status codes are reserved from -3072 to -4095 (1K)
+ */
+enum {
+ WL_SAE_E_AUTH_FAILURE = -3072,
+ /* Discard silently */
+ WL_SAE_E_AUTH_DISCARD = -3073,
+ /* Authentication in progress */
+ WL_SAE_E_AUTH_CONTINUE = -3074,
+ /* Invalid scalar/elt */
+ WL_SAE_E_AUTH_COMMIT_INVALID = -3075,
+ /* Invalid confirm token */
+ WL_SAE_E_AUTH_CONFIRM_INVALID = -3076,
+ /* Peer scalar validation failure */
+ WL_SAE_E_CRYPTO_SCALAR_VALIDATION = -3077,
+ /* Peer element prime validation failure */
+ WL_SAE_E_CRYPTO_ELE_PRIME_VALIDATION = -3078,
+ /* Peer element is not on the curve */
+ WL_SAE_E_CRYPTO_ELE_NOT_ON_CURVE = -3079,
+ /* Generic EC error (eliptic curve related) */
+ WL_SAE_E_CRYPTO_EC_ERROR = -3080,
+ /* Both local and peer mac addrs are same */
+ WL_SAE_E_CRYPTO_EQUAL_MACADDRS = -3081,
+ /* Loop exceeded in deriving the scalar */
+ WL_SAE_E_CRYPTO_SCALAR_ITER_EXCEEDED = -3082,
+ /* ECC group is unsupported */
+ WL_SAE_E_CRYPTO_UNSUPPORTED_GROUP = -3083,
+ /* Exceeded the hunting-and-pecking counter */
+ WL_SAE_E_CRYPTO_PWE_COUNTER_EXCEEDED = -3084,
+ /* SAE crypto component is not initialized */
+ WL_SAE_E_CRYPTO_NOT_INITED = -3085,
+ /* bn_get has failed */
+ WL_SAE_E_CRYPTO_BN_GET_ERROR = -3086,
+ /* bn_set has failed */
+ WL_SAE_E_CRYPTO_BN_SET_ERROR = -3087,
+ /* PMK is not computed yet */
+ WL_SAE_E_CRYPTO_PMK_UNAVAILABLE = -3088,
+ /* Peer confirm did not match */
+ WL_SAE_E_CRYPTO_CONFIRM_MISMATCH = -3089,
+ /* Element K is at infinity no the curve */
+ WL_SAE_E_CRYPTO_KEY_AT_INFINITY = -3090,
+ /* SAE Crypto private data magic number mismatch */
+ WL_SAE_E_CRYPTO_PRIV_MAGIC_MISMATCH = -3091,
+ /* Max retry exhausted */
+ WL_SAE_E_MAX_RETRY_LIMIT_REACHED = -3092,
+ /* peer sent password ID mismatch to local */
+ WL_SAE_E_AUTH_PEER_PWDID_MISMATCH = -3093,
+ /* user not configured password */
+ WL_SAE_E_AUTH_PASSWORD_NOT_CONFIGURED = -3094,
+ /* user not configured password ID */
+ WL_SAE_E_AUTH_PWDID_NOT_CONFIGURED = -3095,
+ /* Anti-clogging token mismatch */
+ WL_SAE_E_AUTH_ANTI_CLOG_MISMATCH = -3096,
+ /* SAE PWE method mismatch */
+ WL_SAE_E_AUTH_PWE_MISMATCH = -3097
+};
+
+/*
+ * Firmware signing error code range: -4096...-5119
+ */
+enum {
+ /* okay */
+ BCM_FWSIGN_E_OK = 0,
+
+ /* Operation is in progress */
+ BCM_FWSIGN_E_INPROGRESS = -4096,
+
+ /* version mismatch */
+ BCM_FWSIGN_E_VERSION = -4097,
+
+ /* key not found */
+ BCM_FWSIGN_E_KEY_NOT_FOUND = -4098,
+
+ /* key found, but is not valid (revoked) */
+ BCM_FWSIGN_E_KEY_NOT_VALID = -4099,
+
+ /* Cipher suite id mismatch for the key */
+ BCM_FWSIGN_E_CS_ID_MISMATCH = -4100,
+
+ /* Signature does not match */
+ BCM_FWSIGN_E_SIGNATURE = -4101,
+
+ /* Continue */
+ BCM_FWSIGN_E_CONTINUE = -4102,
+
+ /* Heap is too small */
+ BCM_FWSIGN_E_HEAP_TOO_SMALL = -4103,
+
+ /* Allocation of bn ctx failed */
+ BCM_FWSIGN_E_BN_CTX_ALLOC_FAILED = -4104,
+
+ /* possible bug */
+ BCM_FWSIGN_E_BUGCHECK = -4105,
+
+ /* chosen key is invalid */
+ BCM_FWSIGN_E_INVALID_KEY = -4106,
+
+ /* signature is invalid */
+ BCM_FWSIGN_E_INVALID_SIGNATURE = -4107,
+
+ /* signature tlv missing */
+ BCM_FWSIGN_E_NO_CSID_SIG = -4108,
+
+ /* chosen key is invalid */
+ BCM_FWSIGN_E_REVOKED_KEY = -4109,
+
+ /* signature has no matching valid key in ROM */
+ BCM_FWSIGN_E_NO_OTP_FOR_ROM_KEY = -4110,
+
+ /* Compression not supported */
+ BCM_FWSIGN_E_COMPNOTSUP = -4111,
+
+ /* OTP read error */
+ BCM_FWSIGN_E_OTP_READ = -4112,
+
+ /* heap address overlaps with FW address space */
+ BCM_FWSIGN_E_HEAP_OVR_FW = -4113,
+
+ /* heap address overlaps with bootloader data/bss region */
+ BCM_FWSIGN_E_HEAP_OVR_BSS = -4114,
+
+ /* heap address overlaps with bootloader stack region */
+ BCM_FWSIGN_E_HEAP_OVR_STACK = -4115,
+
+ /* firmware encryption header tlv is missing */
+ BCM_FWSIGN_E_NO_FWENC_HDR = -4116,
+
+ /* firmware encryption algo not supported */
+ BCM_FWSIGN_E_FWENC_ALGO_NOTSUP = -4117,
+
+ /* firmware encryption tag tlv is missing */
+ BCM_FWSIGN_E_NO_FW_TAG = -4118,
+
+ /* firmware encryption tag tlv is not valid */
+ BCM_FWSIGN_E_FW_TAG_INVALID_TLV = -4119,
+
+ /* firmware encryption tag verification fail */
+ BCM_FWSIGN_E_FW_TAG_MISMATCH = -4120,
+
+ /* signature package is invalid */
+ BCM_FWSIGN_E_PACKAGE_INVALID = -4121,
+
+ /* last error */
+ BCM_FWSIGN_E_LAST = -5119
+};
+typedef int32 bcm_fwsign_status_t;
+
+/* PMK manager block. Event codes from -5120 to -6143 */
+/* PSK hashing event codes */
+enum {
+ WL_PMK_E_PSK_HASH_FAILED = -5120,
+ WL_PMK_E_PSK_HASH_DONE = -5121,
+ WL_PMK_E_PSK_HASH_RUNNING = -5122,
+ WL_PMK_E_PSK_INVALID = -5123,
+ WL_PMK_E_PSK_NOMEM = -5124
+};
+
+/*
+ * SOE (Security Offload Engine) status codes.
+ * SOE status codes are reserved from -6144 to -7167 (1K)
+ */
+enum {
+ /* Invalid operational context */
+ WL_SOE_E_BAD_OP_CONTEXT = -6144,
+
+ /* Invalid operational type */
+ WL_SOE_E_BAD_OP_TYPE = -6145,
+
+ /* Failure to get NAF3 encoded scalar */
+ WL_SOE_E_BN_NAF3_GET_ERROR = -6146,
+
+ /* Failure to get NAF3 params */
+ WL_SOE_E_NAF3_PARAMS_GET_ERROR = -6147
+};
+
+/* BCM crypto ASN.1 status codes. */
+/* Reserved range is from -7168 to -8291 */
+enum {
+ /* tag mismatch */
+ BCM_CRYPTO_E_ASN1_TAG_MISMATCH = -7168,
+
+ /* OID mismatch */
+ BCM_CRYPTO_E_ASN1_OID_MISMATCH = -7169,
+
+ /* Bad key type */
+ BCM_CRYPTO_E_ASN1_BAD_KEY_TYPE = -7170,
+
+ /* value length is invalid */
+ BCM_CRYPTO_E_ASN1_INVALID_LENGTH = -7171,
+
+ /* Invalid public key length */
+ BCM_CRYPTO_E_ASN1_INVALID_PKLEN = -7172,
+
+ /* Unsupported elliptic curve group */
+ BCM_CRYPTO_E_ASN1_UNSUPPORTED_ECG = -7173
+};
+
+#endif /* BCMUTILS_ERR_CODES */
+
+#endif /* _bcmerror_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmeth.h b/bcmdhd.101.10.361.x/include/bcmeth.h
new file mode 100755
index 0000000..f433437
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmeth.h
@@ -0,0 +1,109 @@
+/*
+ * Broadcom Ethernettype protocol definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/*
+ * Broadcom Ethernet protocol defines
+ */
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* ETHER_TYPE_BRCM is defined in ethernet.h */
+
+/*
+ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field
+ * in one of two formats: (only subtypes 32768-65535 are in use now)
+ *
+ * subtypes 0-32767:
+ * 8 bit subtype (0-127)
+ * 8 bit length in bytes (0-255)
+ *
+ * subtypes 32768-65535:
+ * 16 bit big-endian subtype
+ * 16 bit big-endian length in bytes (0-65535)
+ *
+ * length is the number of additional bytes beyond the 4 or 6 byte header
+ *
+ * Reserved values:
+ * 0 reserved
+ * 5-15 reserved for iLine protocol assignments
+ * 17-126 reserved, assignable
+ * 127 reserved
+ * 32768 reserved
+ * 32769-65534 reserved, assignable
+ * 65535 reserved
+ */
+
+/*
+ * While adding the subtypes and their specific processing code make sure
+ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition
+ */
+
+#define BCMILCP_SUBTYPE_RATE 1
+#define BCMILCP_SUBTYPE_LINK 2
+#define BCMILCP_SUBTYPE_CSA 3
+#define BCMILCP_SUBTYPE_LARQ 4
+#define BCMILCP_SUBTYPE_VENDOR 5
+#define BCMILCP_SUBTYPE_FLH 17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
+#define BCMILCP_SUBTYPE_CERT 32770
+#define BCMILCP_SUBTYPE_SES 32771
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED 0
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define BCMILCP_BCM_SUBTYPE_SES 2
+/*
+ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded
+ * within BCMILCP_BCM_SUBTYPE_EVENT type messages
+ */
+/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */
+#define BCMILCP_BCM_SUBTYPE_DPT 4
+#define BCMILCP_BCM_SUBTYPE_DNGLEVENT 5
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
+#define BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD 2
+
+/* These fields are stored in network order */
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+ uint16 subtype; /* Vendor specific..32769 */
+ uint16 length;
+ uint8 version; /* Version is 0 */
+ uint8 oui[3]; /* Broadcom OUI */
+ /* user specific Data */
+ uint16 usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _BCMETH_H_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmevent.h b/bcmdhd.101.10.361.x/include/bcmevent.h
new file mode 100755
index 0000000..e7876c7
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmevent.h
@@ -0,0 +1,1617 @@
+/*
+ * Broadcom Event protocol definitions
+ *
+ * Dependencies: bcmeth.h
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+
+/*
+ * Broadcom Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#include <typedefs.h>
+/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
+#include <bcmeth.h>
+#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT)
+#include <dnglevent.h>
+#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */
+#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */
+
+/* flags */
+#define WLC_EVENT_MSG_LINK 0x01 /* link is up */
+#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */
+#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */
+#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */
+#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */
+
+/* these fields are stored in network order */
+
+/* version 1 */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags; /* see flags below */
+ uint32 event_type; /* Message (see below) */
+ uint32 status; /* Status code (see below) */
+ uint32 reason; /* Reason code (if applicable) */
+ uint32 auth_type; /* WLC_E_AUTH */
+ uint32 datalen; /* data buf */
+ struct ether_addr addr; /* Station address (if applicable) */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+/* the current version */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags; /* see flags below */
+ uint32 event_type; /* Message (see below) */
+ uint32 status; /* Status code (see below) */
+ uint32 reason; /* Reason code (if applicable) */
+ uint32 auth_type; /* WLC_E_AUTH */
+ uint32 datalen; /* data buf */
+ struct ether_addr addr; /* Station address (if applicable) */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+ uint8 ifidx; /* destination OS i/f index */
+ uint8 bsscfgidx; /* source bsscfg index */
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+/* used by driver msgs */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+ struct ether_header eth;
+ bcmeth_hdr_t bcm_hdr;
+ wl_event_msg_t event;
+ /* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+/*
+ * used by host event
+ * note: if additional event types are added, it should go with is_wlc_event_frame() as well.
+ */
+typedef union bcm_event_msg_u {
+ wl_event_msg_t event;
+#if defined(HEALTH_CHECK) || defined(DNGL_EVENT_SUPPORT)
+ bcm_dngl_event_msg_t dngl_event;
+#endif /* HEALTH_CHECK || DNGL_EVENT_SUPPORT */
+
+ /* add new event here */
+} bcm_event_msg_u_t;
+
+#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+/* Event messages */
+#define WLC_E_SET_SSID 0 /* indicates status of set SSID */
+#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */
+#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */
+#define WLC_E_AUTH 3 /* 802.11 AUTH request */
+#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */
+#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */
+#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */
+#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */
+#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */
+#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */
+#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */
+#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */
+#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */
+#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */
+#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */
+#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */
+#define WLC_E_LINK 16 /* generic link indication */
+#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */
+#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */
+#define WLC_E_ROAM 19 /* roam complete: indicate status & reason */
+#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */
+#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */
+#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */
+#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */
+#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */
+#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */
+#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */
+#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */
+#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */
+#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */
+#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */
+#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */
+#define WLC_E_ROAM_PREP 32 /* before attempting to roam association */
+#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */
+#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */
+#define WLC_E_RESET_COMPLETE 35
+#define WLC_E_JOIN_START 36
+#define WLC_E_ROAM_START 37 /* roam attempt started: indicate reason */
+#define WLC_E_ASSOC_START 38
+#define WLC_E_IBSS_ASSOC 39
+#define WLC_E_RADIO 40
+#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */
+
+#define WLC_E_PROBREQ_MSG 44 /* probe request received */
+#define WLC_E_SCAN_CONFIRM_IND 45
+#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */
+#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */
+#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */
+#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */
+#define WLC_E_TRACE 52
+#define WLC_E_IF 54 /* I/F change (for dongle host notification) */
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */
+#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */
+#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */
+#define WLC_E_EXTLOG_MSG 58
+#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */
+#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */
+#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */
+#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */
+#ifdef CSI_SUPPORT
+#define WLC_E_CSI 63
+#else
+#define WLC_E_CHANNEL_ADOPTED 63 /* channel adopted (obsoleted) */
+#endif /* CSI_SUPPORT */
+#define WLC_E_AP_STARTED 64 /* AP started */
+#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */
+#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */
+#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */
+#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */
+#define WLC_E_ESCAN_RESULT 69 /* escan result event */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */
+#define WLC_E_PROBRESP_MSG 71 /* probe response received */
+#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */
+#define WLC_E_DCS_REQUEST 73
+/* will enable this after proptxstatus code is merged back to ToT */
+#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */
+#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH
+ * wl_event_rx_frame_data_t header
+ */
+#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */
+#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */
+#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */
+#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */
+#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */
+#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */
+#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */
+/* PFN BSSID network found event, conflict/share with WLC_E_PFN_SCAN_NONE */
+#define WLC_E_PFN_BSSID_NET_FOUND 82
+#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */
+/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */
+#define WLC_E_PFN_BSSID_NET_LOST 83
+#define WLC_E_GTK_PLUMBED 84
+#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */
+#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */
+#define WLC_E_ASSOC_REQ_IE 87
+#define WLC_E_ASSOC_RESP_IE 88
+#define WLC_E_ASSOC_RECREATED 89 /* association recreated on resume */
+#define WLC_E_ACTION_FRAME_RX_NDIS 90 /* rx action frame event for NDIS only */
+#define WLC_E_AUTH_REQ 91 /* authentication request received */
+#define WLC_E_TDLS_PEER_EVENT 92 /* discovered peer, connected/disconnected peer */
+#define WLC_E_SPEEDY_RECREATE_FAIL 93 /* fast assoc recreation failed */
+#define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */
+#define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */
+
+#ifdef WLAWDL
+#define WLC_E_AWDL_AW 96 /* AWDL AW period starts */
+#define WLC_E_AWDL_ROLE 97 /* AWDL Master/Slave/NE master role event */
+#define WLC_E_AWDL_EVENT 98 /* Generic AWDL event */
+#endif /* WLAWDL */
+
+#define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */
+#define WLC_E_NAN 100 /* NAN event - Reserved for future */
+#define WLC_E_BEACON_FRAME_RX 101
+#define WLC_E_SERVICE_FOUND 102 /* desired service found */
+#define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */
+#define WLC_E_GAS_COMPLETE 104 /* GAS sessions all complete */
+#define WLC_E_P2PO_ADD_DEVICE 105 /* New device found by p2p offload */
+#define WLC_E_P2PO_DEL_DEVICE 106 /* device has been removed by p2p offload */
+#define WLC_E_WNM_STA_SLEEP 107 /* WNM event to notify STA enter sleep mode */
+#define WLC_E_TXFAIL_THRESH 108 /* Indication of MAC tx failures (exhaustion of
+ * 802.11 retries) exceeding threshold(s)
+ */
+#define WLC_E_PROXD 109 /* Proximity Detection event */
+#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */
+#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */
+#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */
+#define WLC_E_MIMO_PWR_SAVE 115 /* Inform host MIMO PWR SAVE learning events */
+#define WLC_E_LEAKY_AP_STATS 116 /* Inform host leaky Ap stats events */
+#define WLC_E_ALLOW_CREDIT_BORROW 117 /* Allow or disallow wlfc credit borrowing in DHD */
+#define WLC_E_MSCH 120 /* Multiple channel scheduler event */
+#define WLC_E_CSA_START_IND 121
+#define WLC_E_CSA_DONE_IND 122
+#define WLC_E_CSA_FAILURE_IND 123
+#define WLC_E_CCA_CHAN_QUAL 124 /* CCA based channel quality report */
+#define WLC_E_BSSID 125 /* to report change in BSSID while roaming */
+#define WLC_E_TX_STAT_ERROR 126 /* tx error indication */
+#define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */
+#define WLC_E_PEER_TIMEOUT 128 /* silently drop a STA because of inactivity */
+#define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */
+#define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */
+#define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */
+#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */
+#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */
+#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */
+#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */
+#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */
+#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */
+#define WLC_E_RMC_EVENT 139 /* RMC Event */
+#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */
+#define WLC_E_RRM 141 /* RRM Event */
+#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */
+#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */
+#define WLC_E_ULP 146 /* ULP entered indication */
+#define WLC_E_MACDBG 147 /* Ucode debugging event */
+#define WLC_E_RESERVED 148 /* reserved */
+#define WLC_E_PRE_ASSOC_RSEP_IND 149 /* assoc resp received */
+#define WLC_E_PSK_AUTH 150 /* PSK AUTH WPA2-PSK 4 WAY Handshake failure */
+#define WLC_E_TKO 151 /* TCP keepalive offload */
+#define WLC_E_SDB_TRANSITION 152 /* SDB mode-switch event */
+#define WLC_E_NATOE_NFCT 153 /* natoe event */
+#define WLC_E_TEMP_THROTTLE 154 /* Temperature throttling control event */
+#define WLC_E_LINK_QUALITY 155 /* Link quality measurement complete */
+#define WLC_E_BSSTRANS_RESP 156 /* BSS Transition Response received */
+#define WLC_E_TWT_SETUP 157 /* TWT Setup Complete event */
+#define WLC_E_HE_TWT_SETUP 157 /* TODO:Remove after merging TWT changes to trunk */
+#define WLC_E_NAN_CRITICAL 158 /* NAN Critical Event */
+#define WLC_E_NAN_NON_CRITICAL 159 /* NAN Non-Critical Event */
+#define WLC_E_RADAR_DETECTED 160 /* Radar Detected event */
+#define WLC_E_RANGING_EVENT 161 /* Ranging event */
+#define WLC_E_INVALID_IE 162 /* Received invalid IE */
+#define WLC_E_MODE_SWITCH 163 /* Mode switch event */
+#define WLC_E_PKT_FILTER 164 /* Packet filter event */
+#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing tx/rxchain */
+#define WLC_E_FBT 166 /* FBT event */
+#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */
+#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */
+#define WLC_E_AGGR_EVENT 169 /* Aggregated event */
+#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */
+#define WLC_E_SCAN_START 172 /* Deprecated */
+#define WLC_E_SCAN 172 /* Scan event */
+#define WLC_E_MBO 173 /* MBO event */
+#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */
+#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */
+#define WLC_E_ADPS 176 /* ADPS event */
+#define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */
+#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */
+#define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */
+#define WLC_E_BCNRECV_ABORTED 181 /* Fake AP bcnrecv aborted roam event */
+#define WLC_E_PMK_INFO 182 /* PMK,PMKID information event */
+#define WLC_E_BSSTRANS 183 /* BSS Transition request / Response */
+#define WLC_E_WA_LQM 184 /* link quality monitoring */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_DWELL_COMPLETE 185 /* action frame off channel
+ * dwell time complete
+ */
+#define WLC_E_WSEC 186 /* wsec keymgmt event */
+#define WLC_E_OBSS_DETECTION 187 /* OBSS HW event */
+#define WLC_E_AP_BCN_MUTE 188 /* Beacon mute mitigation event */
+#define WLC_E_SC_CHAN_QUAL 189 /* Event to indicate the SC chanel quality */
+#define WLC_E_DYNSAR 190 /* Dynamic SAR indicate optimize on/off */
+#define WLC_E_ROAM_CACHE_UPDATE 191 /* Roam cache update indication */
+#define WLC_E_AP_BCN_DRIFT 192 /* Beacon Drift event */
+#define WLC_E_PFN_SCAN_ALLGONE_EXT 193 /* last found PFN network gets lost. */
+#define WLC_E_AUTH_START 194 /* notify upper layer to start auth */
+#define WLC_E_TWT_TEARDOWN 195 /* TWT Teardown Complete Event */
+#define WLC_E_TWT_INFO_FRM 196 /* TWT Info Event Notification */
+#define WLC_E_LAST 197 /* highest val + 1 for range checking */
+#if (WLC_E_LAST > 197)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 197."
+#endif /* WLC_E_LAST */
+
+/* define an API for getting the string name of an event */
+extern const char *bcmevent_get_name(uint event_type);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+extern void wl_event_to_network_order(wl_event_msg_t * evt);
+
+/* validate if the event is proper and if valid copy event header to event */
+extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+ bcm_event_msg_u_t *out_event);
+
+/* conversion between host and network order for events */
+void wl_event_to_host_order(wl_event_msg_t * evt);
+void wl_event_to_network_order(wl_event_msg_t * evt);
+
+#define WLC_ROAM_EVENT_V1 0x1u
+
+/* tlv ids for roam event */
+#define WLC_ROAM_NO_NETWORKS_TLV_ID 1
+
+/* No Networks reasons */
+#define WLC_E_REASON_NO_NETWORKS 0x0u /* value 0 means no networks found */
+#define WLC_E_REASON_NO_NETWORKS_BY_SCORE 0x01u /* bit 1 indicates filtered by score */
+
+/* bit mask field indicating fail reason */
+typedef uint32 wlc_roam_fail_reason_t;
+
+typedef struct wlc_roam_event_header {
+ uint16 version; /* version */
+ uint16 length; /* total length */
+} wlc_roam_event_header_t;
+
+typedef struct wlc_roam_event {
+ wlc_roam_event_header_t header;
+ uint8 xtlvs[]; /* data */
+} wl_roam_event_t;
+
+#define WLC_ROAM_PREP_EVENT_V1 0x1u
+#define WLC_ROAM_START_EVENT_V1 0x1u
+
+typedef struct wlc_roam_start_event {
+ uint16 version; /* version */
+ uint16 length; /* total length */
+ int16 rssi; /* current bss rssi */
+ int8 pad[2]; /* padding */
+ uint8 xtlvs[]; /* optional xtlvs */
+} wlc_roam_start_event_t;
+
+typedef struct wlc_roam_prep_event {
+ uint16 version; /* version */
+ uint16 length; /* total length */
+ int16 rssi; /* target bss rssi */
+ int8 pad[2]; /* padding */
+ uint8 xtlvs[]; /* optional xtlvs */
+} wlc_roam_prep_event_t;
+
+#define WLC_ROAM_CACHE_UPDATE_EVENT_V1 0x1u
+
+/* WLC_E_ROAM_CACHE_UPDATE event data prototype */
+typedef struct wlc_roam_cache_update_event {
+ uint16 version; /* version */
+ uint16 length; /* total length */
+ uint8 xtlvs[]; /* optional xtlvs */
+} wlc_roam_cache_update_event_t;
+
+typedef enum wlc_roam_cache_update_reason {
+ WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE = 1, /* new roam cache */
+ WLC_ROAM_CACHE_UPDATE_JOIN = 2, /* join bss */
+ WLC_ROAM_CACHE_UPDATE_RSSI_DELTA = 3, /* rssi delta */
+ WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA = 4, /* motion rssi delta */
+ WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS = 5, /* channel missed */
+ WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN = 6, /* start split scan */
+ WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN = 7, /* start full scan */
+ WLC_ROAM_CACHE_UPDATE_INIT_ASSOC = 8, /* init before assoc */
+ WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED = 9, /* full scan failed */
+ WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND = 10, /* no ap found */
+ WLC_ROAM_CACHE_UPDATE_MISSING_AP = 11, /* cached ap not found */
+ WLC_ROAM_CACHE_UPDATE_START_PART_SCAN = 12, /* RCC */
+ WLC_ROAM_CACHE_UPDATE_RCC_MODE = 13, /* RCC */
+ WLC_ROAM_CACHE_UPDATE_RCC_CHANNELS = 14, /* RCC */
+ WLC_ROAM_CACHE_UPDATE_START_LP_FULL_SCAN = 15 /* start low power full scan */
+} wlc_roam_cache_update_reason_t;
+
+/*
+ * Please do not insert/delete events in the middle causing renumbering.
+ * It is a problem for host-device compatibility, especially with ROMmed chips.
+ */
+
+/* Translate between internal and exported status codes */
+/* Event status codes */
+#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */
+#define WLC_E_STATUS_FAIL 1 /* operation failed */
+#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */
+#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */
+#define WLC_E_STATUS_ABORT 4 /* operation was aborted */
+#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */
+#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */
+#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */
+#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */
+#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */
+#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */
+#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */
+#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */
+#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */
+#ifdef BCMCCX
+#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */
+#endif /* BCMCCX */
+#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */
+#define WLC_E_STATUS_ERROR 16 /* request failed due to error */
+#define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */
+#define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */
+#define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */
+#define WLC_E_STATUS_RXBCN_ABORT 20 /* Rx Beacon abort event for FAKEAP feature */
+#define WLC_E_STATUS_LOWPOWER_ON_LOWSPAN 21 /* LOWPOWER scan request during LOWSPAN */
+#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */
+
+/* 4-way handshake event type */
+#define WLC_E_PSK_AUTH_SUB_EAPOL_START 1 /* EAPOL start */
+#define WLC_E_PSK_AUTH_SUB_EAPOL_DONE 2 /* EAPOL end */
+/* GTK event type */
+#define WLC_E_PSK_AUTH_SUB_GTK_DONE 3 /* GTK end */
+
+/* 4-way handshake event status code */
+#define WLC_E_STATUS_PSK_AUTH_WPA_TIMOUT 1 /* operation timed out */
+#define WLC_E_STATUS_PSK_AUTH_MIC_WPA_ERR 2 /* MIC error */
+#define WLC_E_STATUS_PSK_AUTH_IE_MISMATCH_ERR 3 /* IE Missmatch error */
+#define WLC_E_STATUS_PSK_AUTH_REPLAY_COUNT_ERR 4
+#define WLC_E_STATUS_PSK_AUTH_PEER_BLACKISTED 5 /* Blaclisted peer */
+#define WLC_E_STATUS_PSK_AUTH_GTK_REKEY_FAIL 6 /* GTK event status code */
+
+/* SDB transition status code */
+#define WLC_E_STATUS_SDB_START 1
+#define WLC_E_STATUS_SDB_COMPLETE 2
+/* Slice-swap status code */
+#define WLC_E_STATUS_SLICE_SWAP_START 3
+#define WLC_E_STATUS_SLICE_SWAP_COMPLETE 4
+
+/* SDB transition reason code */
+#define WLC_E_REASON_HOST_DIRECT 0
+#define WLC_E_REASON_INFRA_ASSOC 1
+#define WLC_E_REASON_INFRA_ROAM 2
+#define WLC_E_REASON_INFRA_DISASSOC 3
+#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4
+
+#ifdef WLAWDL
+#define WLC_E_REASON_AWDL_ENABLE 5
+#define WLC_E_REASON_AWDL_DISABLE 6
+#endif /* WLAWDL */
+
+/* TX STAT ERROR REASON CODE */
+#define WLC_E_REASON_TXBACKOFF_NOT_DECREMENTED 0x1
+
+/* WLC_E_SDB_TRANSITION event data */
+#define WL_MAX_BSSCFG 4
+#define WL_EVENT_SDB_TRANSITION_VER 1
+typedef struct wl_event_sdb_data {
+ uint8 wlunit; /* Core index */
+ uint8 is_iftype; /* Interface Type(Station, SoftAP, P2P_GO, P2P_GC */
+ uint16 chanspec; /* Interface Channel/Chanspec */
+ char ssidbuf[(4 * 32) + 1]; /* SSID_FMT_BUF_LEN: ((4 * DOT11_MAX_SSID_LEN) + 1) */
+} wl_event_sdb_data_t;
+
+typedef struct wl_event_sdb_trans {
+ uint8 version; /* Event Data Version */
+ uint8 rsdb_mode;
+ uint8 enable_bsscfg;
+ uint8 reserved;
+ struct wl_event_sdb_data values[WL_MAX_BSSCFG];
+} wl_event_sdb_trans_t;
+
+/* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */
+#define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */
+
+#ifdef WLAWDL
+#define WLC_E_GTKKEYROT_SKIPCHANSW_AWDL 1 /* Avoid chansw by awdl while gtk in progress */
+#endif /* WLAWDL */
+
+#define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */
+
+/* roam reason codes */
+#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */
+#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */
+#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */
+#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */
+#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */
+
+/* Roam codes (5-7) used primarily by CCX */
+#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */
+#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */
+#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */
+#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */
+#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */
+#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */
+#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */
+#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */
+#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */
+#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */
+#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */
+#define WLC_E_REASON_SILENT_ROAM 16 /* roamed due to Silent roam */
+#define WLC_E_REASON_INACTIVITY 17 /* full roam scan due to inactivity */
+#define WLC_E_REASON_ROAM_SCAN_TIMEOUT 18 /* roam scan timer timeout */
+#define WLC_E_REASON_REASSOC 19 /* roamed due to reassoc iovar */
+#define WLC_E_REASON_LAST 20 /* NOTE: increment this as you add reasons above */
+
+/* prune reason codes */
+#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */
+#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */
+#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */
+#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */
+#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */
+#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */
+#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */
+#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */
+#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */
+#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */
+#ifdef BCMCCX
+#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */
+#endif /* def BCMCCX */
+#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */
+#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */
+#ifdef BCMCCX
+#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */
+#endif /* def BCMCCX */
+#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */
+#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */
+#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */
+#ifdef BCMCCX
+#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */
+#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */
+#endif /* BCMCCX */
+#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */
+#define WLC_E_PRUNE_ASSOC_RETRY_DELAY 21 /* MBO assoc retry delay */
+#define WLC_E_PRUNE_RSSI_ASSOC_REJ 22 /* OCE RSSI-based assoc rejection */
+#define WLC_E_PRUNE_MAC_AVOID 23 /* AP's MAC addr is in STA's MAC avoid list */
+#define WLC_E_PRUNE_TRANSITION_DISABLE 24 /* AP's Transition Disable Policy */
+
+/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
+#define WLC_E_SUP_OTHER 0 /* Other reason */
+#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */
+#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */
+#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */
+#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */
+#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */
+#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */
+#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */
+#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */
+#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */
+#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */
+#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */
+#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */
+#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */
+#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */
+#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */
+#define WLC_E_SUP_WPA_PSK_M1_TMO 16 /* WPA PSK 4-way handshake M1 timeout */
+#define WLC_E_SUP_WPA_PSK_M3_TMO 17 /* WPA PSK 4-way handshake M3 timeout */
+#define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */
+#define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */
+#define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */
+#define WLC_E_SUP_PTK_UPDATE 21 /* PTK update */
+#define WLC_E_SUP_MSG1_PMKID_MISMATCH 22 /* MSG1 PMKID not matched to PMKSA cache list */
+
+/* event msg for WLC_E_SUP_PTK_UPDATE */
+typedef struct wlc_sup_ptk_update {
+ uint16 version; /* 0x0001 */
+ uint16 length; /* length of data that follows */
+ uint32 tsf_low; /* tsf at which ptk updated by internal supplicant */
+ uint32 tsf_high;
+ uint8 key_id; /* always 0 for PTK update */
+ uint8 tid; /* tid for the PN below - PTK refresh is per key */
+ uint16 pn_low;
+ uint32 pn_high; /* local highest PN of any tid of the key when M4 was sent */
+} wlc_sup_ptk_update_t;
+
+/* sub event of WLC_E_WSEC */
+typedef enum {
+ WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR = 0x01
+} wl_wsec_event_type_t;
+
+/* sub event msg - WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR */
+struct wlc_wsec_ptk_pn_sync_error_v1 {
+ uint32 tsf_low; /* tsf at which PN sync error happened */
+ uint32 tsf_high;
+ uint8 key_id; /* always 0 for PTK update */
+ uint8 tid; /* tid for the PN below - PTK refresh is per key */
+ uint16 PAD1;
+ uint16 rx_seqn; /* d11 seq number */
+ uint16 pn_low;
+ uint32 pn_high; /* local PN window start for the tid */
+ uint16 key_idx; /* key idx in the keymgmt */
+ uint16 rx_pn_low;
+ uint32 rx_pn_high; /* Rx PN window start for the tid */
+ uint32 span_time; /* time elapsed since replay */
+ uint32 span_pkts; /* pkt count since replay */
+};
+
+typedef struct wlc_wsec_ptk_pn_sync_error_v1 wlc_wsec_ptk_pn_sync_error_t;
+
+/* WLC_E_WSEC event msg */
+typedef struct wlc_wsec_event {
+ uint16 version; /* 0x0001 */
+ uint16 length; /* length of data that follows */
+ uint16 type; /* wsec_event_type_t */
+ uint16 PAD1;
+ union {
+ wlc_wsec_ptk_pn_sync_error_t pn_sync_err;
+ } data;
+} wlc_wsec_event_t;
+
+/* Ucode reason codes carried in the WLC_E_MACDBG event */
+#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */
+#define WLC_E_MACDBG_LIST_PSMX 1 /* Dump list update for PSMx registers */
+#define WLC_E_MACDBG_REGALL 2 /* Dump all registers */
+
+/* Event data for events that include frames received over the air */
+/* WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+
+#ifdef WLAWDL
+#define WLC_E_AWDL_SCAN_START 1 /* Scan start indication to host */
+#define WLC_E_AWDL_SCAN_DONE 0 /* Scan Done indication to host */
+#endif /* WLAWDL */
+
+#define MAX_PHY_CORE_NUM 4u
+
+#define BCM_RX_FRAME_DATA_VERSION_2 2u
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v2 {
+ uint16 version;
+ uint16 len;
+ uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */
+ uint16 pad;
+ int32 rssi;
+ uint32 mactime;
+ uint32 rate;
+ int8 per_core_rssi[MAX_PHY_CORE_NUM];
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v2_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v1 {
+ uint16 version;
+ uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */
+ int32 rssi;
+ uint32 mactime;
+ uint32 rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v1_t;
+
+#define BCM_RX_FRAME_DATA_VERSION_1 1u
+
+#ifndef WL_EVENT_RX_FRAME_DATA_ALIAS
+#define BCM_RX_FRAME_DATA_VERSION BCM_RX_FRAME_DATA_VERSION_1
+typedef wl_event_rx_frame_data_v1_t wl_event_rx_frame_data_t;
+#endif
+
+/* WLC_E_IF event data */
+typedef struct wl_event_data_if {
+ uint8 ifidx; /* RTE virtual device index (for dongle) */
+ uint8 opcode; /* see I/F opcode */
+ uint8 reserved; /* bit mask (WLC_E_IF_FLAGS_XXX ) */
+ uint8 bssidx; /* bsscfg index */
+ uint8 role; /* see I/F role */
+} wl_event_data_if_t;
+
+/* WLC_E_NATOE event data */
+typedef struct wl_event_data_natoe {
+ uint32 natoe_active;
+ uint32 sta_ip;
+ uint16 start_port;
+ uint16 end_port;
+} wl_event_data_natoe_t;
+
+/* opcode in WLC_E_IF event */
+#define WLC_E_IF_ADD 1 /* bsscfg add */
+#define WLC_E_IF_DEL 2 /* bsscfg delete */
+#define WLC_E_IF_CHANGE 3 /* bsscfg role change */
+
+/* I/F role code in WLC_E_IF event */
+#define WLC_E_IF_ROLE_STA 0 /* Infra STA */
+#define WLC_E_IF_ROLE_AP 1 /* Access Point */
+#define WLC_E_IF_ROLE_WDS 2 /* WDS link */
+#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */
+#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */
+
+#ifdef WLAWDL
+#define WLC_E_IF_ROLE_AWDL 7 /* AWDL */
+#endif /* WLAWDL */
+
+#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */
+#define WLC_E_IF_ROLE_NAN 9 /* NAN */
+
+#define WLC_E_IF_ROLE_MESH 10u /* identifies the role as MESH */
+
+/* WLC_E_RSSI event data */
+typedef struct wl_event_data_rssi {
+ int32 rssi;
+ int32 snr;
+ int32 noise;
+} wl_event_data_rssi_t;
+
+#define WL_EVENT_WA_LQM_VER 0 /* initial version */
+
+#define WL_EVENT_WA_LQM_BASIC 0 /* event sub-types */
+typedef struct { /* payload of subevent in xtlv */
+ int32 rssi;
+ int32 snr;
+ uint32 tx_rate;
+ uint32 rx_rate;
+} wl_event_wa_lqm_basic_t;
+
+typedef struct wl_event_wa_lqm {
+ uint16 ver; /* version */
+ uint16 len; /* total length structure */
+ uint8 subevent[]; /* sub-event data in bcm_xtlv_t format */
+} wl_event_wa_lqm_t;
+
+/* WLC_E_IF flag */
+#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */
+
+/* Reason codes for LINK */
+/* Reason codes for LINK */
+#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */
+#define WLC_E_LINK_ASSOC_FAIL 5 /* Link down due to assoc to new AP during roam */
+#define WLC_E_LINK_REASSOC_ROAM_FAIL 6 /* Link down due to reassoc roaming failed */
+#define WLC_E_LINK_LOWRSSI_ROAM_FAIL 7 /* Link down due to Low rssi roaming failed */
+#define WLC_E_LINK_NO_FIRST_BCN_RX 8 /* Link down due to 1st beacon rx failure */
+
+/* WLC_E_NDIS_LINK event data */
+typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms {
+ struct ether_addr peer_mac; /* 6 bytes */
+ uint16 chanspec; /* 2 bytes */
+ uint32 link_speed; /* current datarate in units of 500 Kbit/s */
+ uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */
+ int32 rssi; /* average rssi */
+} BWL_POST_PACKED_STRUCT ndis_link_parms_t;
+
+/* reason codes for WLC_E_OVERLAY_REQ event */
+#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */
+#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */
+
+/* reason codes for WLC_E_TDLS_PEER_EVENT event */
+#define WLC_E_TDLS_PEER_DISCOVERED 0 /* peer is ready to establish TDLS */
+#define WLC_E_TDLS_PEER_CONNECTED 1
+#define WLC_E_TDLS_PEER_DISCONNECTED 2
+
+/* reason codes for WLC_E_RMC_EVENT event */
+#define WLC_E_REASON_RMC_NONE 0
+#define WLC_E_REASON_RMC_AR_LOST 1
+#define WLC_E_REASON_RMC_AR_NO_ACK 2
+
+#ifdef WLTDLS
+/* TDLS Action Category code */
+#define TDLS_AF_CATEGORY 12
+/* Wi-Fi Display (WFD) Vendor Specific Category */
+/* used for WFD Tunneled Probe Request and Response */
+#define TDLS_VENDOR_SPECIFIC 127
+/* TDLS Action Field Values */
+#define TDLS_ACTION_SETUP_REQ 0
+#define TDLS_ACTION_SETUP_RESP 1
+#define TDLS_ACTION_SETUP_CONFIRM 2
+#define TDLS_ACTION_TEARDOWN 3
+#define WLAN_TDLS_SET_PROBE_WFD_IE 11
+#define WLAN_TDLS_SET_SETUP_WFD_IE 12
+#define WLAN_TDLS_SET_WFD_ENABLED 13
+#define WLAN_TDLS_SET_WFD_DISABLED 14
+#endif
+
+/* WLC_E_RANGING_EVENT subtypes */
+#define WLC_E_RANGING_RESULTS 0
+
+#define PHY_CAL_EVT_VERSION 1
+typedef struct wlc_phy_cal_info {
+ uint16 version; /* structure version */
+ uint16 length; /* length of the rest of the structure */
+ uint16 chanspec;
+ uint8 start;
+ uint8 phase;
+ int16 temp;
+ uint8 reason;
+ uint8 slice;
+} wlc_phy_cal_info_t;
+
+#ifdef WLAWDL
+/* WLC_E_AWDL_EVENT subtypes */
+#define WLC_E_AWDL_SCAN_STATUS 0
+#define WLC_E_AWDL_RX_ACT_FRAME 1
+#define WLC_E_AWDL_RX_PRB_RESP 2
+#define WLC_E_AWDL_PHYCAL_STATUS 3
+#define WLC_E_AWDL_WOWL_NULLPKT 4
+#define WLC_E_AWDL_OOB_AF_STATUS 5
+/* WLC_E_AWDL_RANGING_RESULTS will be removed and only WLC_E_AWDL_UNUSED will be here
+ * Keeping both of them to avoid compilation errot on trunk
+ * It will be removed after wlc_ranging merge from IGUANA
+ */
+#define WLC_E_AWDL_RANGING_RESULTS 6
+#define WLC_E_AWDL_UNUSED 6
+#define WLC_E_AWDL_SUB_PEER_STATE 7
+#define WLC_E_AWDL_SUB_INTERFACE_STATE 8
+#define WLC_E_AWDL_UCAST_AF_TXSTATUS 9
+#define WLC_E_AWDL_NAN_CLUSTER_MERGE 10
+#define WLC_E_AWDL_NAN_RX_BEACON 11
+#define WLC_E_AWDL_SD_DISCOVERY_RESULT 12
+#define WLC_E_AWDL_SD_REPLIED 13
+#define WLC_E_AWDL_SD_TERMINATED 14
+#define WLC_E_AWDL_SD_RECEIVE 15
+#define WLC_E_AWDL_SD_VNDR_IE 16
+#define WLC_E_AWDL_SD_DEVICE_STATE_IE 17
+#define WLC_E_AWDL_DFSP_NOTIF 18
+#define WLC_E_AWDL_DFSP_SUSPECT 19
+#define WLC_E_AWDL_DFSP_RESUME 20
+
+/* WLC_E_AWDL_SCAN_STATUS status values */
+#define WLC_E_AWDL_SCAN_START 1 /* Scan start indication to host */
+#define WLC_E_AWDL_SCAN_DONE 0 /* Scan Done indication to host */
+#define WLC_E_AWDL_PHYCAL_START 1 /* Phy calibration start indication to host */
+#define WLC_E_AWDL_PHYCAL_DONE 0 /* Phy calibration done indication to host */
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 subscribe_id; /* local subscribe instance id */
+ uint8 publish_id; /* publiser's insance id */
+ struct ether_addr addr; /* publsher's address */
+ uint8 service_info_len; /* length of the service specific information in data[] */
+ uint8 data[1]; /* service specific info */
+} BWL_PRE_PACKED_STRUCT awdl_sd_discovery_result_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 instance_id;
+ struct ether_addr addr; /* publsher's address */
+} BWL_PRE_PACKED_STRUCT awdl_sd_replied_event_t;
+
+#define AWDL_SD_TERM_REASON_TIMEOUT 1
+#define AWDL_SD_TERM_REASON_USERREQ 2
+#define AWDL_SD_TERM_REASON_FAIL 3
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 instance_id; /* publish instance id */
+ uint8 reason; /* 1=timeout, 2=user request, 3=failure */
+} BWL_PRE_PACKED_STRUCT awdl_sd_term_event_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 instance_id; /* local publish/subscribe instance id */
+ uint8 sender_instance_id;
+ struct ether_addr addr; /* sender's address */
+ uint8 service_info_len; /* length of the service specific information in data[] */
+ uint8 data[1]; /* service specific info */
+} BWL_PRE_PACKED_STRUCT awdl_sd_receive_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ether_addr addr; /* sender's address */
+ uint16 len; /* length of data[] */
+ uint8 data[1]; /* vndr specific info */
+} BWL_PRE_PACKED_STRUCT awdl_sd_vndr_ie_event_t;
+
+#endif /* WLAWDL */
+
+/* GAS event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
+ uint16 channel; /* channel of GAS protocol */
+ uint8 dialog_token; /* GAS dialog token */
+ uint8 fragment_id; /* fragment id */
+ uint16 status_code; /* status code on GAS completion */
+ uint16 data_len; /* length of data to follow */
+ uint8 data[1]; /* variable length specified by data_len */
+} BWL_POST_PACKED_STRUCT wl_event_gas_t;
+
+/* service discovery TLV */
+typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv {
+ uint16 length; /* length of response_data */
+ uint8 protocol; /* service protocol type */
+ uint8 transaction_id; /* service transaction id */
+ uint8 status_code; /* status code */
+ uint8 data[1]; /* response data */
+} BWL_POST_PACKED_STRUCT wl_sd_tlv_t;
+
+/* service discovery event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd {
+ uint16 channel; /* channel */
+ uint8 count; /* number of tlvs */
+ wl_sd_tlv_t tlv[1]; /* service discovery TLV */
+} BWL_POST_PACKED_STRUCT wl_event_sd_t;
+
+/* WLC_E_PKT_FILTER event sub-classification codes */
+#define WLC_E_PKT_FILTER_TIMEOUT 1 /* Matching packet not received in last timeout seconds */
+
+/* Note: proxd has a new API (ver 3.0) deprecates the following */
+
+/* Reason codes for WLC_E_PROXD */
+#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */
+#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */
+#define WLC_E_PROXD_START 3 /* used by: target */
+#define WLC_E_PROXD_STOP 4 /* used by: target */
+#define WLC_E_PROXD_COMPLETED 5 /* used by: initiator completed */
+#define WLC_E_PROXD_ERROR 6 /* used by both initiator and target */
+#define WLC_E_PROXD_COLLECT_START 7 /* used by: target & initiator */
+#define WLC_E_PROXD_COLLECT_STOP 8 /* used by: target */
+#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */
+#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */
+#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */
+#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */
+
+/* proxd_event data */
+typedef struct ftm_sample {
+ uint32 value; /* RTT in ns */
+ int8 rssi; /* RSSI */
+} ftm_sample_t;
+
+typedef struct ts_sample {
+ uint32 t1;
+ uint32 t2;
+ uint32 t3;
+ uint32 t4;
+} ts_sample_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data {
+ uint16 ver; /* version */
+ uint16 mode; /* mode: target/initiator */
+ uint16 method; /* method: rssi/TOF/AOA */
+ uint8 err_code; /* error classification */
+ uint8 TOF_type; /* one way or two way TOF */
+ uint8 OFDM_frame_type; /* legacy or VHT */
+ uint8 bandwidth; /* Bandwidth is 20, 40,80, MHZ */
+ struct ether_addr peer_mac; /* (e.g for tgt:initiator's */
+ uint32 distance; /* dst to tgt, units meter */
+ uint32 meanrtt; /* mean delta */
+ uint32 modertt; /* Mode delta */
+ uint32 medianrtt; /* median RTT */
+ uint32 sdrtt; /* Standard deviation of RTT */
+ int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */
+ /* frame type is VHT, then we should do it by hardware */
+ int16 avg_rssi; /* avg rssi accroos the ftm frames */
+ int16 validfrmcnt; /* Firmware's valid frame counts */
+ int32 peer_router_info; /* Peer router information if available in TLV, */
+ /* We will add this field later */
+ int32 var1; /* average of group delay */
+ int32 var2; /* average of threshold crossing */
+ int32 var3; /* difference between group delay and threshold crossing */
+ /* raw Fine Time Measurements (ftm) data */
+ uint16 ftm_unit; /* ftm cnt resolution in picoseconds , 6250ps - default */
+ uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */
+ ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results {
+ uint16 ver; /* version */
+ uint16 mode; /* mode: target/initiator */
+ uint16 method; /* method: rssi/TOF/AOA */
+ uint8 err_code; /* error classification */
+ uint8 TOF_type; /* one way or two way TOF */
+ uint16 ts_cnt; /* number of timestamp measurements */
+ ts_sample_t ts_buff[1]; /* Timestamps */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t;
+
+#ifdef WLAWDL
+/* WLC_E_AWDL_AW event data */
+typedef BWL_PRE_PACKED_STRUCT struct awdl_aws_event_data {
+ uint32 fw_time; /* firmware PMU time */
+ struct ether_addr current_master; /* Current master Mac addr */
+ uint16 aw_counter; /* AW seq# */
+ uint8 aw_ext_count; /* AW extension count */
+ uint8 aw_role; /* AW role */
+ uint8 flags; /* AW event flag */
+ uint16 aw_chan;
+ uint8 infra_rssi; /* rssi on the infra channel */
+ uint32 infra_rxbcn_count; /* number of beacons received */
+ struct ether_addr top_master; /* Top master */
+} BWL_POST_PACKED_STRUCT awdl_aws_event_data_t;
+
+/* For awdl_aws_event_data_t.flags */
+#define AWDL_AW_LAST_EXT 0x01
+
+/* WLC_E_AWDL_OOB_AF_STATUS event data */
+typedef BWL_PRE_PACKED_STRUCT struct awdl_oob_af_status_data {
+ uint32 tx_time_diff;
+ uint16 pkt_tag;
+ uint8 tx_chan;
+} BWL_POST_PACKED_STRUCT awdl_oob_af_status_data_t;
+#endif /* WLAWDL */
+
+/* Video Traffic Interference Monitor Event */
+#define INTFER_EVENT_VERSION 1
+#define INTFER_STREAM_TYPE_NONTCP 1
+#define INTFER_STREAM_TYPE_TCP 2
+#define WLINTFER_STATS_NSMPLS 4
+typedef struct wl_intfer_event {
+ uint16 version; /* version */
+ uint16 status; /* status */
+ uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */
+} wl_intfer_event_t;
+
+#define RRM_EVENT_VERSION 0
+typedef struct wl_rrm_event {
+ int16 version;
+ int16 len;
+ int16 cat; /* Category */
+ int16 subevent;
+ char payload[1]; /* Measurement payload */
+} wl_rrm_event_t;
+
+/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
+typedef struct wl_psta_primary_intf_event {
+ struct ether_addr prim_ea; /* primary intf ether addr */
+} wl_psta_primary_intf_event_t;
+
+/* WLC_E_DPSTA_INTF_IND event data */
+typedef enum {
+ WL_INTF_PSTA = 1,
+ WL_INTF_DWDS = 2
+} wl_dpsta_intf_type;
+
+typedef struct wl_dpsta_intf_event {
+ wl_dpsta_intf_type intf_type; /* dwds/psta intf register */
+} wl_dpsta_intf_event_t;
+
+/* ********** NAN protocol events/subevents ********** */
+#ifndef NAN_EVENT_BUFFER_SIZE
+#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
+#endif /* NAN_EVENT_BUFFER_SIZE */
+/* NAN Events sent by firmware */
+
+/*
+ * If you make changes to this enum, dont forget to update the mask (if need be).
+ */
+typedef enum wl_nan_events {
+ WL_NAN_EVENT_START = 1, /* NAN cluster started */
+ WL_NAN_EVENT_JOIN = 2, /* To be deprecated */
+ WL_NAN_EVENT_ROLE = 3, /* Role changed */
+ WL_NAN_EVENT_SCAN_COMPLETE = 4, /* To be deprecated */
+ WL_NAN_EVENT_DISCOVERY_RESULT = 5, /* Subscribe Received */
+ WL_NAN_EVENT_REPLIED = 6, /* Publish Sent */
+ WL_NAN_EVENT_TERMINATED = 7, /* sub / pub is terminated */
+ WL_NAN_EVENT_RECEIVE = 8, /* Follow up Received */
+ WL_NAN_EVENT_STATUS_CHG = 9, /* change in nan_mac status */
+ WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */
+ WL_NAN_EVENT_STOP = 11, /* To be deprecated */
+ WL_NAN_EVENT_P2P = 12, /* Unused */
+ WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Unused */
+ WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, /* Unused */
+ WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, /* Unused */
+ WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, /* Unused */
+ WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */
+ WL_NAN_EVENT_DATA_IF_ADD = 18, /* Unused */
+ WL_NAN_EVENT_DATA_PEER_ADD = 19, /* Event for peer add */
+ /* nan 2.0 */
+ WL_NAN_EVENT_PEER_DATAPATH_IND = 20, /* Incoming DP req */
+ WL_NAN_EVENT_DATAPATH_ESTB = 21, /* DP Established */
+ WL_NAN_EVENT_SDF_RX = 22, /* SDF payload */
+ WL_NAN_EVENT_DATAPATH_END = 23, /* DP Terminate recvd */
+ WL_NAN_EVENT_BCN_RX = 24, /* received beacon payload */
+ WL_NAN_EVENT_PEER_DATAPATH_RESP = 25, /* Peer's DP response */
+ WL_NAN_EVENT_PEER_DATAPATH_CONF = 26, /* Peer's DP confirm */
+ WL_NAN_EVENT_RNG_REQ_IND = 27, /* Range Request */
+ WL_NAN_EVENT_RNG_RPT_IND = 28, /* Range Report */
+ WL_NAN_EVENT_RNG_TERM_IND = 29, /* Range Termination */
+ WL_NAN_EVENT_PEER_DATAPATH_SEC_INST = 30, /* Peer's DP sec install */
+ WL_NAN_EVENT_TXS = 31, /* for tx status of follow-up and SDFs */
+ WL_NAN_EVENT_DW_START = 32, /* dw start */
+ WL_NAN_EVENT_DW_END = 33, /* dw end */
+ WL_NAN_EVENT_CHAN_BOUNDARY = 34, /* channel switch event */
+ WL_NAN_EVENT_MR_CHANGED = 35, /* AMR or IMR changed event during DW */
+ WL_NAN_EVENT_RNG_RESP_IND = 36, /* Range Response Rx */
+ WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF = 37, /* Peer's schedule update notification */
+ WL_NAN_EVENT_PEER_SCHED_REQ = 38, /* Peer's schedule request */
+ WL_NAN_EVENT_PEER_SCHED_RESP = 39, /* Peer's schedule response */
+ WL_NAN_EVENT_PEER_SCHED_CONF = 40, /* Peer's schedule confirm */
+ WL_NAN_EVENT_SENT_DATAPATH_END = 41, /* Sent DP terminate frame */
+ WL_NAN_EVENT_SLOT_START = 42, /* SLOT_START event */
+ WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */
+ WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */
+ WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */
+ WL_NAN_EVENT_DISC_CACHE_TIMEOUT = 46, /* Disc cache timeout */
+ WL_NAN_EVENT_OOB_AF_TXS = 47, /* OOB AF transmit status */
+ WL_NAN_EVENT_OOB_AF_RX = 48, /* OOB AF receive event */
+
+ /* keep WL_NAN_EVENT_INVALID as the last element */
+ WL_NAN_EVENT_INVALID /* delimiter for max value */
+} nan_app_events_e;
+
+/* remove after precommit */
+#define NAN_EV_MASK(ev) (1 << (ev - 1))
+#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
+
+#define NAN_EV_MASK_SET(var, evt) \
+ (((uint32)evt < WL_NAN_EVMASK_EXTN_LEN * 8) ? \
+ ((*((uint8 *)var + ((evt - 1)/8))) |= (1 << ((evt - 1) %8))) : 0)
+#define IS_NAN_EVENT_ON(var, evt) \
+ (((uint32)evt < WL_NAN_EVMASK_EXTN_LEN * 8) && \
+ (((*((uint8 *)var + ((evt - 1)/8))) & (1 << ((evt - 1) %8))) != 0))
+
+/* ******************* end of NAN section *************** */
+
+typedef enum wl_scan_events {
+ WL_SCAN_START = 1,
+ WL_SCAN_END = 2
+} wl_scan_events;
+
+/* WLC_E_ULP event data */
+#define WL_ULP_EVENT_VERSION 1
+#define WL_ULP_DISABLE_CONSOLE 1 /* Disable console message on ULP entry */
+#define WL_ULP_UCODE_DOWNLOAD 2 /* Download ULP ucode file */
+
+typedef struct wl_ulp_event {
+ uint16 version;
+ uint16 ulp_dongle_action;
+} wl_ulp_event_t;
+
+/* TCP keepalive event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_tko {
+ uint8 index; /* TCP connection index, 0 to max-1 */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} BWL_POST_PACKED_STRUCT wl_event_tko_t;
+
+typedef struct {
+ uint8 radar_type; /* one of RADAR_TYPE_XXX */
+ uint16 min_pw; /* minimum pulse-width (usec * 20) */
+ uint16 max_pw; /* maximum pulse-width (usec * 20) */
+ uint16 min_pri; /* minimum pulse repetition interval (usec) */
+ uint16 max_pri; /* maximum pulse repetition interval (usec) */
+ uint16 subband; /* subband/frequency */
+} radar_detected_event_info_t;
+typedef struct wl_event_radar_detect_data {
+
+ uint32 version;
+ uint16 current_chanspec; /* chanspec on which the radar is recieved */
+ uint16 target_chanspec; /* Target chanspec after detection of radar on current_chanspec */
+ radar_detected_event_info_t radar_info[2];
+} wl_event_radar_detect_data_t;
+
+#define WL_EVENT_MODESW_VER_1 1
+#define WL_EVENT_MODESW_VER_CURRENT WL_EVENT_MODESW_VER_1
+
+#define WL_E_MODESW_FLAG_MASK_DEVICE 0x01u /* mask of device: belongs to local or peer */
+#define WL_E_MODESW_FLAG_MASK_FROM 0x02u /* mask of origin: firmware or user */
+#define WL_E_MODESW_FLAG_MASK_STATE 0x0Cu /* mask of state: modesw progress state */
+
+#define WL_E_MODESW_FLAG_DEVICE_LOCAL 0x00u /* flag - device: info is about self/local */
+#define WL_E_MODESW_FLAG_DEVICE_PEER 0x01u /* flag - device: info is about peer */
+
+#define WL_E_MODESW_FLAG_FROM_FIRMWARE 0x00u /* flag - from: request is from firmware */
+#define WL_E_MODESW_FLAG_FROM_USER 0x02u /* flag - from: request is from user/iov */
+
+#define WL_E_MODESW_FLAG_STATE_REQUESTED 0x00u /* flag - state: mode switch request */
+#define WL_E_MODESW_FLAG_STATE_INITIATED 0x04u /* flag - state: switch initiated */
+#define WL_E_MODESW_FLAG_STATE_COMPLETE 0x08u /* flag - state: switch completed/success */
+#define WL_E_MODESW_FLAG_STATE_FAILURE 0x0Cu /* flag - state: failed to switch */
+
+/* Get sizeof *X including variable data's length where X is pointer to wl_event_mode_switch_t */
+#define WL_E_MODESW_SIZE(X) (sizeof(*(X)) + (X)->length)
+
+/* Get variable data's length where X is pointer to wl_event_mode_switch_t */
+#define WL_E_MODESW_DATA_SIZE(X) (((X)->length > sizeof(*(X))) ? ((X)->length - sizeof(*(X))) : 0)
+
+#define WL_E_MODESW_REASON_UNKNOWN 0u /* reason: UNKNOWN */
+#define WL_E_MODESW_REASON_ACSD 1u /* reason: ACSD (based on events from FW */
+#define WL_E_MODESW_REASON_OBSS_DBS 2u /* reason: OBSS DBS (eg. on interference) */
+#define WL_E_MODESW_REASON_DFS 3u /* reason: DFS (eg. on subband radar) */
+#define WL_E_MODESW_REASON_DYN160 4u /* reason: DYN160 (160/2x2 - 80/4x4) */
+
+/* event structure for WLC_E_MODE_SWITCH */
+typedef struct {
+ uint16 version;
+ uint16 length; /* size including 'data' field */
+ uint16 opmode_from;
+ uint16 opmode_to;
+ uint32 flags; /* bit 0: peer(/local==0);
+ * bit 1: user(/firmware==0);
+ * bits 3,2: 00==requested, 01==initiated,
+ * 10==complete, 11==failure;
+ * rest: reserved
+ */
+ uint16 reason; /* value 0: unknown, 1: ACSD, 2: OBSS_DBS,
+ * 3: DFS, 4: DYN160, rest: reserved
+ */
+ uint16 data_offset; /* offset to 'data' from beginning of this struct.
+ * fields may be added between data_offset and data
+ */
+ /* ADD NEW FIELDS HERE */
+ uint8 data[]; /* reason specific data; could be empty */
+} wl_event_mode_switch_t;
+
+/* when reason in WLC_E_MODE_SWITCH is DYN160, data will carry the following structure */
+typedef struct {
+ uint16 trigger; /* value 0: MU to SU, 1: SU to MU, 2: metric_dyn160, 3:re-/assoc,
+ * 4: disassoc, 5: rssi, 6: traffic, 7: interference,
+ * 8: chanim_stats
+ */
+ struct ether_addr sta_addr; /* causal STA's MAC address when known */
+ uint16 metric_160_80; /* latest dyn160 metric */
+ uint8 nss; /* NSS of the STA */
+ uint8 bw; /* BW of the STA */
+ int8 rssi; /* RSSI of the STA */
+ uint8 traffic; /* internal metric of traffic */
+} wl_event_mode_switch_dyn160;
+
+#define WL_EVENT_FBT_VER_1 1
+
+#define WL_E_FBT_TYPE_FBT_OTD_AUTH 1
+#define WL_E_FBT_TYPE_FBT_OTA_AUTH 2
+
+/* event structure for WLC_E_FBT */
+typedef struct {
+ uint16 version;
+ uint16 length; /* size including 'data' field */
+ uint16 type; /* value 0: unknown, 1: FBT OTD Auth Req */
+ uint16 data_offset; /* offset to 'data' from beginning of this struct.
+ * fields may be added between data_offset and data
+ */
+ /* ADD NEW FIELDS HERE */
+ uint8 data[]; /* type specific data; could be empty */
+} wl_event_fbt_t;
+
+/* TWT Setup Completion is designed to notify the user of TWT Setup process
+ * status. When 'status' field is value of BCME_OK, the user must check the
+ * 'setup_cmd' field value in 'wl_twt_sdesc_t' structure that at the end of
+ * the event data to see the response from the TWT Responding STA; when
+ * 'status' field is value of BCME_ERROR or non BCME_OK, user must not use
+ * anything from 'wl_twt_sdesc_t' structure as it is the TWT Requesting STA's
+ * own TWT parameter.
+ */
+
+#define WL_TWT_SETUP_CPLT_VER 0u
+
+/* TWT Setup Reason code */
+typedef enum wl_twt_setup_rc {
+ WL_TWT_SETUP_RC_ACCEPT = 0, /* TWT Setup Accepted */
+ WL_TWT_SETUP_RC_REJECT = 1, /* TWT Setup Rejected */
+ WL_TWT_SETUP_RC_TIMEOUT = 2, /* TWT Setup Time-out */
+ WL_TWT_SETUP_RC_IE = 3, /* TWT Setup IE Validation failed */
+ WL_TWT_SETUP_RC_PARAMS = 4, /* TWT Setup IE Params invalid */
+ WL_TWT_SETUP_RC_ERROR = 5, /* Generic Error cases */
+} wl_twt_setup_rc_t;
+
+/* TWT Setup Completion event data */
+typedef struct wl_twt_setup_cplt {
+ uint16 version;
+ uint16 length; /* the byte count of fields from 'dialog' onwards */
+ uint8 dialog; /* Setup frame dialog token */
+ uint8 reason_code; /* see WL_TWT_SETUP_RC_XXXX */
+ uint8 pad[2];
+ int32 status;
+ /* wl_twt_sdesc_t desc; - defined in wlioctl.h */
+} wl_twt_setup_cplt_t;
+
+#define WL_TWT_TEARDOWN_CPLT_VER 0u
+
+/* TWT teardown Reason code */
+typedef enum wl_twt_td_rc {
+ WL_TWT_TD_RC_SUCCESS = 0, /* Teardown complete Successful */
+ WL_TWT_TD_RC_HOST = 1, /* Teardown triggered by Host */
+ WL_TWT_TD_RC_PEER = 2, /* Peer initiated teardown */
+ WL_TWT_TD_RC_MCHAN = 3, /* Teardown due to MCHAN Active */
+ WL_TWT_TD_RC_MCNX = 4, /* Teardown due to MultiConnection */
+ WL_TWT_TD_RC_SETUP_FAIL = 5, /* Setup fail midway. Teardown all connections */
+ WL_TWT_TD_RC_SCHED = 6, /* Teardown by TWT Scheduler */
+ WL_TWT_TD_RC_CSA = 7, /* Teardown due to CSA */
+ WL_TWT_TD_RC_BTCX = 8, /* Teardown due to BTCX */
+ WL_TWT_TD_RC_ERROR = 9, /* Generic Error cases */
+} wl_twt_td_rc_t;
+
+/* TWT Teardown complete event data */
+typedef struct wl_twt_teardown_cplt {
+ uint16 version;
+ uint16 length; /* the byte count of fields from 'reason_code' onwards */
+ uint8 reason_code; /* WL_TWT_TD_RC_XXXX */
+ uint8 pad[3];
+ int32 status;
+ /* wl_twt_teardesc_t; - defined in wlioctl.h */
+} wl_twt_teardown_cplt_t;
+
+#define WL_TWT_INFO_CPLT_VER 0u
+
+/* TWT Info Reason code */
+typedef enum wl_twt_info_rc {
+ WL_TWT_INFO_RC_HOST = 0, /* Host initiated Info complete */
+ WL_TWT_INFO_RC_PEER = 1, /* Peer initiated TWT Info */
+ WL_TWT_INFO_RC_ERROR = 2, /* generic error conditions */
+} wl_twt_info_rc_t;
+
+/* TWT Info complete event data */
+typedef struct wl_twt_info_cplt {
+ uint16 version;
+ uint16 length; /* the byte count of fields from 'reason_code' onwards */
+ uint8 reason_code; /* WL_TWT_INFO_RC_XXXX */
+ uint8 pad[3];
+ int32 status;
+ /* wl_twt_infodesc_t; - defined in wlioctl.h */
+} wl_twt_info_cplt_t;
+
+#define WL_INVALID_IE_EVENT_VERSION 0
+
+/* Invalid IE Event data */
+typedef struct wl_invalid_ie_event {
+ uint16 version;
+ uint16 len; /* Length of the invalid IE copy */
+ uint16 type; /* Type/subtype of the frame which contains the invalid IE */
+ uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */
+ uint8 ie[]; /* Variable length buffer for the invalid IE copy */
+} wl_invalid_ie_event_t;
+
+/* Fixed header portion of Invalid IE Event */
+typedef struct wl_invalid_ie_event_hdr {
+ uint16 version;
+ uint16 len; /* Length of the invalid IE copy */
+ uint16 type; /* Type/subtype of the frame which contains the invalid IE */
+ uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */
+ /* var length IE data follows */
+} wl_invalid_ie_event_hdr_t;
+
+typedef enum ie_error_code {
+ IE_ERROR_OUT_OF_RANGE = 0x01
+} ie_error_code_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* reason of channel switch */
+typedef enum {
+ CHANSW_DFS = 10, /* channel switch due to DFS module */
+ CHANSW_HOMECH_REQ = 14, /* channel switch due to HOME Channel Request */
+ CHANSW_STA = 15, /* channel switch due to STA */
+ CHANSW_SOFTAP = 16, /* channel switch due to SodtAP */
+ CHANSW_AIBSS = 17, /* channel switch due to AIBSS */
+ CHANSW_NAN = 18, /* channel switch due to NAN */
+ CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */
+ CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */
+
+#ifdef WLAWDL
+ CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */
+ CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */
+ CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */
+ CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */
+ CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */
+#endif /* WLAWDL */
+
+ CHANSW_TDLS = 26, /* channel switch due to TDLS */
+ CHANSW_PROXD = 27, /* channel switch due to PROXD */
+ CHANSW_SLOTTED_BSS = 28, /* channel switch due to slotted bss */
+ CHANSW_SLOTTED_CMN_SYNC = 29, /* channel switch due to Common Sync Layer */
+ CHANSW_SLOTTED_BSS_CAL = 30, /* channel switch due to Cal request from slotted bss */
+ CHANSW_MAX_NUMBER = 31 /* max channel switch reason */
+} wl_chansw_reason_t;
+
+#define CHANSW_REASON(reason) (1 << reason)
+
+#define EVENT_AGGR_DATA_HDR_LEN 8
+
+typedef struct event_aggr_data {
+ uint16 num_events; /* No of events aggregated */
+ uint16 len; /* length of the aggregated events, excludes padding */
+ uint8 pad[4]; /* Padding to make aggr event packet header aligned
+ * on 64-bit boundary, for a 64-bit host system.
+ */
+ uint8 data[]; /* Aggregate buffer containing Events */
+} event_aggr_data_t;
+
+/* WLC_E_TVPM_MITIGATION event structure version */
+#define WL_TVPM_MITIGATION_VERSION 1
+
+/* TVPM mitigation on/off status bits */
+#define WL_TVPM_MITIGATION_TXDC 0x1
+#define WL_TVPM_MITIGATION_TXPOWER 0x2
+#define WL_TVPM_MITIGATION_TXCHAINS 0x4
+
+/* Event structure for WLC_E_TVPM_MITIGATION */
+typedef struct wl_event_tvpm_mitigation {
+ uint16 version; /* structure version */
+ uint16 length; /* length of this structure */
+ uint32 timestamp_ms; /* millisecond timestamp */
+ uint8 slice; /* slice number */
+ uint8 pad;
+ uint16 on_off; /* mitigation status bits */
+} wl_event_tvpm_mitigation_t;
+
+/* Event structures for sub health checks of PHY */
+
+#define WL_PHY_HC_DESENSE_STATS_VER (1)
+typedef struct wl_hc_desense_stats {
+ uint16 version;
+ uint16 chanspec;
+ int8 allowed_weakest_rssi; /* based on weakest link RSSI */
+ uint8 ofdm_desense; /* Desense requested for OFDM */
+ uint8 bphy_desense; /* Desense requested for bphy */
+ int8 glitch_upd_wait; /* wait post ACI mitigation */
+} wl_hc_desense_stats_v1_t;
+
+#define WL_PHY_HC_TEMP_STATS_VER (1)
+typedef struct wl_hc_temp_stats {
+ uint16 version;
+ uint16 chanspec;
+ int16 curtemp; /* Temperature */
+ uint8 temp_disthresh; /* Threshold to reduce tx chain */
+ uint8 temp_enthresh; /* Threshold to increase tx chains */
+ uint tempsense_period; /* Temperature check period */
+ bool heatedup; /* 1: temp throttling on */
+ uint8 bitmap; /* Indicating rx and tx chains */
+ uint8 pad[2];
+} wl_hc_temp_stats_v1_t;
+
+#define WL_PHY_HC_TEMP_STATS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 chanspec;
+ int16 curtemp; /* Temperature */
+ uint8 pad[2];
+} wl_hc_temp_stats_v2_t;
+
+#define WL_PHY_HC_VCOCAL_STATS_VER (1)
+typedef struct wl_hc_vcocal_stats {
+ uint16 version;
+ uint16 chanspec;
+ int16 curtemp; /* Temperature */
+ /* Ring buffer - Maintains history of previous 16 wake/sleep cycles */
+ uint16 vcocal_status_wake;
+ uint16 vcocal_status_sleep;
+ uint16 plllock_status_wake;
+ uint16 plllock_status_sleep;
+ /* Cal Codes */
+ uint16 cc_maincap;
+ uint16 cc_secondcap;
+ uint16 cc_auxcap;
+} wl_hc_vcocal_stats_v1_t;
+
+#define WL_PHY_HC_TXPWR_STATS_VER (1)
+typedef struct wl_hc_tx_stats {
+ uint16 version;
+ uint16 chanspec;
+ int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */
+ int8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */
+ int8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */
+ uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */
+ int16 temp; /* Temperature */
+ uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */
+ int8 min_txpower; /* min tx power per ant */
+ uint8 pad[3];
+} wl_hc_txpwr_stats_v1_t;
+
+#define WL_PHY_HC_TXPWR_STATS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 chanspec;
+ int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */
+ uint8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */
+ uint8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */
+ uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */
+ int16 temp; /* Temperature */
+ uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */
+ int8 min_txpower; /* min tx power per ant */
+ uint8 pad[3];
+} wl_hc_txpwr_stats_v2_t;
+
+typedef enum wl_mbo_event_type {
+ WL_MBO_E_CELLULAR_NW_SWITCH = 1,
+ WL_MBO_E_BTM_RCVD = 2,
+ /* ADD before this */
+ WL_MBO_E_LAST = 3 /* highest val + 1 for range checking */
+} wl_mbo_event_type_t;
+
+/* WLC_E_MBO event structure version */
+#define WL_MBO_EVT_VER 1
+
+struct wl_event_mbo {
+ uint16 version; /* structure version */
+ uint16 length; /* length of the rest of the structure from type */
+ wl_mbo_event_type_t type; /* Event type */
+ uint8 data[]; /* Variable length data */
+};
+
+/* WLC_E_MBO_CELLULAR_NW_SWITCH event structure version */
+#define WL_MBO_CELLULAR_NW_SWITCH_VER 1
+
+/* WLC_E_MBO_CELLULAR_NW_SWITCH event data */
+struct wl_event_mbo_cell_nw_switch {
+ uint16 version; /* structure version */
+ uint16 length; /* length of the rest of the structure from reason */
+ /* Reason of switch as per MBO Tech spec */
+ uint8 reason;
+ /* pad */
+ uint8 pad;
+ /* delay after which re-association can be tried to current BSS (seconds) */
+ uint16 reassoc_delay;
+ /* How long current association will be there (milli seconds).
+ * This is zero if not known or value is overflowing.
+ */
+ uint32 assoc_time_remain;
+};
+
+/* WLC_E_MBO_BTM_RCVD event structure version */
+#define WL_BTM_EVENT_DATA_VER_1 1
+/* Specific btm event type data */
+struct wl_btm_event_type_data {
+ uint16 version;
+ uint16 len;
+ uint8 transition_reason; /* transition reason code */
+ uint8 pad[3]; /* pad */
+};
+
+/* WLC_E_PRUNE event structure version */
+#define WL_BSSID_PRUNE_EVT_VER_1 1
+/* MBO-OCE params */
+struct wl_bssid_prune_evt_info {
+ uint16 version;
+ uint16 len;
+ uint8 SSID[32];
+ uint32 time_remaining; /* Time remaining */
+ struct ether_addr BSSID;
+ uint8 SSID_len;
+ uint8 reason; /* Reason code */
+ int8 rssi_threshold; /* RSSI threshold */
+ uint8 pad[3]; /* pad */
+};
+
+/* WLC_E_ADPS status */
+enum {
+ WL_E_STATUS_ADPS_DEAUTH = 0,
+ WL_E_STATUS_ADPS_MAX
+};
+
+/* WLC_E_ADPS event data */
+#define WL_EVENT_ADPS_VER_1 1
+
+/* WLC_E_ADPS event type */
+#define WL_E_TYPE_ADPS_BAD_AP 1
+
+typedef struct wl_event_adps_bad_ap {
+ uint32 status;
+ uint32 reason;
+ struct ether_addr ea; /* bssid */
+} wl_event_adps_bad_ap_t;
+
+typedef struct wl_event_adps {
+ uint16 version; /* structure version */
+ uint16 length; /* length of structure */
+ uint32 type; /* event type */
+ uint8 data[]; /* variable length data */
+} wl_event_adps_v1_t;
+
+typedef wl_event_adps_v1_t wl_event_adps_t;
+
+#define WLC_USER_E_KEY_UPDATE 1 /* Key add/remove */
+#define WLC_USER_E_FORCE_FLUSH 2 /* SDC force flush */
+
+/* OBSS HW event data */
+typedef struct wlc_obss_hw_event_data {
+ uint16 available_chanspec; /* Contains band, channel and BW info */
+} wlc_obss_hw_event_data_t;
+
+/* status when WLC_E_OBSS_DETECTION */
+#define WLC_OBSS_BW_UPDATED 1 /* Sent when BW is update at SW */
+#define WLC_OBSS_BW_AVAILABLE 2 /* Sent When a change in BW is detected / noticed */
+
+/* WLC_E_DYNSAR event structure version */
+#define WL_DYNSAR_VERSION 1
+
+/* bits used in status field */
+#define WL_STATUS_DYNSAR_PWR_OPT (1 << 0) /* power optimized */
+#define WL_STATUS_DYNSAR_FAILSAFE (1 << 1) /* radio is using failsafe cap values */
+#define WL_STATUS_DYNSAR_NOMUTE_OPT (1 << 2) /* ack mute */
+
+/* Event structure for WLC_E_DYNSAR */
+typedef struct wl_event_dynsar {
+ uint16 version; /* structure version */
+ uint16 length; /* length of this structure */
+ uint32 timestamp_ms; /* millisecond timestamp */
+ uint8 opt; /* optimization power offset */
+ uint8 slice; /* slice number */
+ uint8 status; /* WL_STATUS_DYNSAR_XXX, to indicate which optimization
+ * is being applied
+ */
+ uint8 pad;
+} wl_event_dynsar_t;
+
+/* status when WLC_E_AP_BCN_MUTE event is sent */
+#define BCN_MUTE_MITI_ACTIVE 1u /* Mitigation is activated when probe response received
+ * but Beacon is not received
+ */
+#define BCN_MUTE_MITI_END 2u /* Sent when beacon is received */
+#define BCN_MUTE_MITI_TIMEOUT 3u /* Mitigation period is reached */
+
+/* bcn_mute_miti event data */
+#define WLC_BCN_MUTE_MITI_EVENT_DATA_VER_1 1u
+typedef struct wlc_bcn_mute_miti_event_data_v1 {
+ uint16 version; /* Structure version number */
+ uint16 length; /* Length of the whole struct */
+ uint16 uatbtt_count; /* Number of UATBTT during mitigation */
+ uint8 PAD[2]; /* Pad to fit to 32 bit alignment */
+} wlc_bcn_mute_miti_event_data_v1_t;
+
+/* bcn_drift event data */
+#define WLC_BCN_DRIFT_EVENT_DATA_VER_1 (1u)
+typedef struct wlc_bcn_drift_event_data_v1 {
+ uint16 version; /* Structure version number */
+ uint16 length; /* Length of the whole struct */
+ int16 drift; /* in ms */
+ int16 jitter; /* in ms */
+} wlc_bcn_drift_event_data_v1_t;
+
+#endif /* _BCMEVENT_H_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmicmp.h b/bcmdhd.101.10.361.x/include/bcmicmp.h
new file mode 100755
index 0000000..31e809a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmicmp.h
@@ -0,0 +1,83 @@
+/*
+ * Fundamental constants relating to ICMP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmicmp_h_
+#define _bcmicmp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define ICMP_TYPE_ECHO_REQUEST 8 /* ICMP type echo request */
+#define ICMP_TYPE_ECHO_REPLY 0 /* ICMP type echo reply */
+
+#define ICMP_CHKSUM_OFFSET 2 /* ICMP body checksum offset */
+
+/* ICMP6 error and control message types */
+#define ICMP6_DEST_UNREACHABLE 1
+#define ICMP6_PKT_TOO_BIG 2
+#define ICMP6_TIME_EXCEEDED 3
+#define ICMP6_PARAM_PROBLEM 4
+#define ICMP6_ECHO_REQUEST 128
+#define ICMP6_ECHO_REPLY 129
+#define ICMP_MCAST_LISTENER_QUERY 130
+#define ICMP_MCAST_LISTENER_REPORT 131
+#define ICMP_MCAST_LISTENER_DONE 132
+#define ICMP6_RTR_SOLICITATION 133
+#define ICMP6_RTR_ADVERTISEMENT 134
+#define ICMP6_NEIGH_SOLICITATION 135
+#define ICMP6_NEIGH_ADVERTISEMENT 136
+#define ICMP6_REDIRECT 137
+
+#define ICMP6_RTRSOL_OPT_OFFSET 8
+#define ICMP6_RTRADV_OPT_OFFSET 16
+#define ICMP6_NEIGHSOL_OPT_OFFSET 24
+#define ICMP6_NEIGHADV_OPT_OFFSET 24
+#define ICMP6_REDIRECT_OPT_OFFSET 40
+
+BWL_PRE_PACKED_STRUCT struct icmp6_opt {
+ uint8 type; /* Option identifier */
+ uint8 length; /* Lenth including type and length */
+ uint8 data[0]; /* Variable length data */
+} BWL_POST_PACKED_STRUCT;
+
+#define ICMP6_OPT_TYPE_SRC_LINK_LAYER 1
+#define ICMP6_OPT_TYPE_TGT_LINK_LAYER 2
+#define ICMP6_OPT_TYPE_PREFIX_INFO 3
+#define ICMP6_OPT_TYPE_REDIR_HDR 4
+#define ICMP6_OPT_TYPE_MTU 5
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmicmp_hdr {
+ uint8 type; /* Echo or Echo-reply */
+ uint8 code; /* Always 0 */
+ uint16 chksum; /* Icmp packet checksum */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* #ifndef _bcmicmp_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmiov.h b/bcmdhd.101.10.361.x/include/bcmiov.h
new file mode 100755
index 0000000..05a9f58
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmiov.h
@@ -0,0 +1,353 @@
+/*
+ * bcmiov.h
+ * Common iovar handling/parsing support - batching, parsing, sub-cmd dispatch etc.
+ * To be used in firmware and host apps or dhd - reducing code size,
+ * duplication, and maintenance overhead.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmiov_h_
+#define _bcmiov_h_
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <wlioctl.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#else
+#include <stddef.h> /* For size_t */
+#endif /* BCMDRIVER */
+
+/* Forward declarations */
+typedef uint16 bcm_iov_cmd_id_t;
+typedef uint16 bcm_iov_cmd_flags_t;
+typedef uint16 bcm_iov_cmd_mflags_t;
+typedef struct bcm_iov_cmd_info bcm_iov_cmd_info_t;
+typedef struct bcm_iov_cmd_digest bcm_iov_cmd_digest_t;
+typedef struct bcm_iov_cmd_tlv_info bcm_iov_cmd_tlv_info_t;
+typedef struct bcm_iov_buf bcm_iov_buf_t;
+typedef struct bcm_iov_batch_buf bcm_iov_batch_buf_t;
+typedef struct bcm_iov_parse_context bcm_iov_parse_context_t;
+typedef struct bcm_iov_sub_cmd_context bcm_iov_sub_cmd_context_t;
+
+typedef void* (*bcm_iov_malloc_t)(void* alloc_ctx, size_t len);
+typedef void (*bcm_iov_free_t)(void* alloc_ctx, void *buf, size_t len);
+
+typedef uint8 bcm_iov_tlp_data_type_t;
+typedef struct bcm_iov_tlp bcm_iov_tlp_t;
+typedef struct bcm_iov_tlp_node bcm_iov_tlp_node_t;
+typedef struct bcm_iov_batch_subcmd bcm_iov_batch_subcmd_t;
+
+/*
+ * iov validation handler - All the common checks that are required
+ * for processing of iovars for any given command.
+ */
+typedef int (*bcm_iov_cmd_validate_t)(const bcm_iov_cmd_digest_t *dig,
+ uint32 actionid, const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen);
+
+/* iov get handler - process subcommand specific input and return output.
+ * input and output may overlap, so the callee needs to check if
+ * that is supported. For xtlv data a tlv digest is provided to make
+ * parsing simpler. Output tlvs may be packed into output buffer using
+ * bcm xtlv support. olen is input/output parameter. On input contains
+ * max available obuf length and callee must fill the correct length
+ * to represent the length of output returned.
+ */
+typedef int (*bcm_iov_cmd_get_t)(const bcm_iov_cmd_digest_t *dig,
+ const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen);
+
+/* iov set handler - process subcommand specific input and return output
+ * input and output may overlap, so the callee needs to check if
+ * that is supported. olen is input/output parameter. On input contains
+ * max available obuf length and callee must fill the correct length
+ * to represent the length of output returned.
+ */
+typedef int (*bcm_iov_cmd_set_t)(const bcm_iov_cmd_digest_t *dig,
+ const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen);
+
+/* iov (sub-cmd) batch - a vector of commands. count can be zero
+ * to support a version query. Each command is a tlv - whose data
+ * portion may have an optional return status, followed by a fixed
+ * length data header, optionally followed by tlvs.
+ * cmd = type|length|<status|options>[header][tlvs]
+ */
+
+/*
+ * Batch sub-commands have status length included in the
+ * response length packed in TLV.
+ */
+#define BCM_IOV_STATUS_LEN sizeof(uint32)
+
+/* batch version is indicated by setting high bit. */
+#define BCM_IOV_BATCH_MASK 0x8000
+
+/*
+ * Batched commands will have the following memory layout
+ * +--------+---------+--------+-------+
+ * |version |count | is_set |sub-cmd|
+ * +--------+---------+--------+-------+
+ * version >= 0x8000
+ * count = number of sub-commands encoded in the iov buf
+ * sub-cmd one or more sub-commands for processing
+ * Where sub-cmd is padded byte buffer with memory layout as follows
+ * +--------+---------+-----------------------+-------------+------
+ * |cmd-id |length |IN(options) OUT(status)|command data |......
+ * +--------+---------+-----------------------+-------------+------
+ * cmd-id =sub-command ID
+ * length = length of this sub-command
+ * IN(options) = On input processing options/flags for this command
+ * OUT(status) on output processing status for this command
+ * command data = encapsulated IOVAR data as a single structure or packed TLVs for each
+ * individual sub-command.
+ */
+struct bcm_iov_batch_subcmd {
+ uint16 id;
+ uint16 len;
+ union {
+ uint32 options;
+ uint32 status;
+ } u;
+ uint8 data[1];
+};
+
+struct bcm_iov_batch_buf {
+ uint16 version;
+ uint8 count;
+ uint8 is_set; /* to differentiate set or get */
+ struct bcm_iov_batch_subcmd cmds[0];
+};
+
+/* non-batched command version = major|minor w/ major <= 127 */
+struct bcm_iov_buf {
+ uint16 version;
+ uint16 len;
+ bcm_iov_cmd_id_t id;
+ uint16 data[1]; /* 32 bit alignment may be repurposed by the command */
+ /* command specific data follows */
+};
+
+/* iov options flags */
+enum {
+ BCM_IOV_CMD_OPT_ALIGN_NONE = 0x0000,
+ BCM_IOV_CMD_OPT_ALIGN32 = 0x0001,
+ BCM_IOV_CMD_OPT_TERMINATE_SUB_CMDS = 0x0002
+};
+
+/* iov command flags */
+enum {
+ BCM_IOV_CMD_FLAG_NONE = 0,
+ BCM_IOV_CMD_FLAG_STATUS_PRESENT = (1 << 0), /* status present at data start - output only */
+ BCM_IOV_CMD_FLAG_XTLV_DATA = (1 << 1), /* data is a set of xtlvs */
+ BCM_IOV_CMD_FLAG_HDR_IN_LEN = (1 << 2), /* length starts at version - non-bacthed only */
+ BCM_IOV_CMD_FLAG_NOPAD = (1 << 3) /* No padding needed after iov_buf */
+};
+
+/* information about the command, xtlv options and xtlvs_off are meaningful
+ * only if XTLV_DATA cmd flag is selected
+ */
+struct bcm_iov_cmd_info {
+ bcm_iov_cmd_id_t cmd; /* the (sub)command - module specific */
+ bcm_iov_cmd_flags_t flags; /* checked by bcmiov but set by module */
+ bcm_iov_cmd_mflags_t mflags; /* owned and checked by module */
+ bcm_xtlv_opts_t xtlv_opts;
+ bcm_iov_cmd_validate_t validate_h; /* command validation handler */
+ bcm_iov_cmd_get_t get_h;
+ bcm_iov_cmd_set_t set_h;
+ uint16 xtlvs_off; /* offset to beginning of xtlvs in cmd data */
+ uint16 min_len_set;
+ uint16 max_len_set;
+ uint16 min_len_get;
+ uint16 max_len_get;
+};
+
+/* tlv digest to support parsing of xtlvs for commands w/ tlv data; the tlv
+ * digest is available in the handler for the command. The count and order in
+ * which tlvs appear in the digest are exactly the same as the order of tlvs
+ * passed in the registration for the command. Unknown tlvs are ignored.
+ * If registered tlvs are missing datap will be NULL. common iov rocessing
+ * acquires an input digest to process input buffer. The handler is responsible
+ * for constructing an output digest and use packing functions to generate
+ * the output buffer. The handler may use the input digest as output digest once
+ * the tlv data is extracted and used. Multiple tlv support involves allocation of
+ * tlp nodes, except the first, as required,
+ */
+
+/* tlp data type indicates if the data is not used/invalid, input or output */
+enum {
+ BCM_IOV_TLP_NODE_INVALID = 0,
+ BCM_IOV_TLP_NODE_IN = 1,
+ BCM_IOV_TLP_NODE_OUT = 2
+};
+
+struct bcm_iov_tlp {
+ uint16 type;
+ uint16 len;
+ uint16 nodeix; /* node index */
+};
+
+/* tlp data for a given tlv - multiple tlvs of same type chained */
+struct bcm_iov_tlp_node {
+ uint8 *next; /* multiple tlv support */
+ bcm_iov_tlp_data_type_t type;
+ uint8 *data; /* pointer to data in buffer or state */
+};
+
+struct bcm_iov_cmd_digest {
+ uint32 version; /* Version */
+ void *cmd_ctx;
+ struct wlc_bsscfg *bsscfg;
+ const bcm_iov_cmd_info_t *cmd_info;
+ uint16 max_tlps; /* number of tlps allocated */
+ uint16 max_nodes; /* number of nods allocated */
+ uint16 num_tlps; /* number of tlps valid */
+ uint16 num_nodes; /* number of nods valid */
+ uint16 tlps_off; /* offset to tlps */
+ uint16 nodes_off; /* offset to nodes */
+ /*
+ * bcm_iov_tlp_t tlps[max_tlps];
+ * bcm_iov_tlp_node_t nodes[max_nodes]
+ */
+};
+
+/* get length callback - default length is min_len taken from digest */
+typedef size_t (*bcm_iov_xtlv_get_len_t)(const bcm_iov_cmd_digest_t *dig,
+ const bcm_iov_cmd_tlv_info_t *tlv_info);
+
+/* pack to buffer data callback. under some conditions it might
+ * not be a straight copy and can refer to context(ual) information and
+ * endian conversions...
+ */
+typedef void (*bcm_iov_xtlv_pack_t)(const bcm_iov_cmd_digest_t *dig,
+ const bcm_iov_cmd_tlv_info_t *tlv_info,
+ uint8 *out_buf, const uint8 *in_data, size_t len);
+
+struct bcm_iov_cmd_tlv_info {
+ uint16 id;
+ uint16 min_len; /* inclusive */
+ uint16 max_len; /* inclusive */
+ bcm_iov_xtlv_get_len_t get_len;
+ bcm_iov_xtlv_pack_t pack;
+};
+
+/*
+ * module private parse context. Default version type len is uint16
+ */
+
+/* Command parsing options with respect to validation */
+/* Possible values for parse context options */
+/* Bit 0 - Validate only */
+#define BCM_IOV_PARSE_OPT_BATCH_VALIDATE 0x00000001
+
+typedef uint32 bcm_iov_parse_opts_t;
+
+/* get digest callback */
+typedef int (*bcm_iov_get_digest_t)(void *cmd_ctx, bcm_iov_cmd_digest_t **dig);
+
+typedef struct bcm_iov_parse_config {
+ bcm_iov_parse_opts_t options; /* to handle different ver lengths */
+ bcm_iov_malloc_t alloc_fn;
+ bcm_iov_free_t free_fn;
+ bcm_iov_get_digest_t dig_fn;
+ int max_regs;
+ void *alloc_ctx;
+} bcm_iov_parse_config_t;
+
+/* API */
+
+/* All calls return an integer status code BCME_* unless otherwise indicated */
+
+/* return length of allocation for 'num_cmds' commands. data_len
+ * includes length of data for all the commands excluding the headers
+ */
+size_t bcm_iov_get_alloc_len(int num_cmds, size_t data_len);
+
+/* create parsing context using allocator provided; max_regs provides
+ * the number of allowed registrations for commands using the context
+ * sub-components of a module may register their own commands indepdently
+ * using the parsing context. If digest callback is NULL or returns NULL,
+ * the (input) digest is allocated using the provided allocators and released on
+ * completion of processing.
+ */
+int bcm_iov_create_parse_context(const bcm_iov_parse_config_t *parse_cfg,
+ bcm_iov_parse_context_t **parse_ctx);
+
+/* free the parsing context; ctx is set to NULL on exit */
+int bcm_iov_free_parse_context(bcm_iov_parse_context_t **ctx, bcm_iov_free_t free_fn);
+
+/* Return the command context for the module */
+void *bcm_iov_get_cmd_ctx_info(bcm_iov_parse_context_t *parse_ctx);
+
+/* register a command info vector along with supported tlvs. Each command
+ * may support a subset of tlvs
+ */
+int bcm_iov_register_commands(bcm_iov_parse_context_t *parse_ctx, void *cmd_ctx,
+ const bcm_iov_cmd_info_t *info, size_t num_cmds,
+ const bcm_iov_cmd_tlv_info_t *tlv_info, size_t num_tlvs);
+
+/* pack the xtlvs provided in the digest. may returns BCME_BUFTOOSHORT, but the
+ * out_len is set to required length in that case.
+ */
+int bcm_iov_pack_xtlvs(const bcm_iov_cmd_digest_t *dig, bcm_xtlv_opts_t xtlv_opts,
+ uint8 *out_buf, size_t out_size, size_t *out_len);
+
+#ifdef BCMDRIVER
+/* wlc modules register their iovar(s) using the parsing context w/ wlc layer
+ * during attach.
+ */
+struct wlc_if;
+struct wlc_info;
+extern struct wlc_bsscfg *bcm_iov_bsscfg_find_from_wlcif(struct wlc_info *wlc,
+ struct wlc_if *wlcif);
+int bcm_iov_doiovar(void *parse_ctx, uint32 id, void *params, uint params_len,
+ void *arg, uint arg_len, uint vsize, struct wlc_if *intf);
+#endif /* BCMDRIVER */
+
+/* parsing context helpers */
+
+/* get the maximum number of tlvs - can be used to allocate digest for all
+ * commands. the digest can be shared. Negative values are BCM_*, >=0, the
+ * number of tlvs
+ */
+int bcm_iov_parse_get_max_tlvs(const bcm_iov_parse_context_t *ctx);
+
+/* common packing support */
+
+/* pack a buffer of uint8s - memcpy wrapper */
+int bcm_iov_pack_buf(const bcm_iov_cmd_digest_t *dig, uint8 *buf,
+ const uint8 *data, size_t len);
+
+#define bcm_iov_packv_u8 bcm_iov_pack_buf
+
+/*
+ * pack a buffer with uint16s - serialized in LE order, data points to uint16
+ * length is not checked.
+ */
+int bcm_iov_packv_u16(const bcm_iov_cmd_digest_t *dig, uint8 *buf,
+ const uint16 *data, int n);
+
+/*
+ * pack a buffer with uint32s - serialized in LE order - data points to uint32
+ * length is not checked.
+ */
+int bcm_iov_packv_u32(const bcm_iov_cmd_digest_t *dig, uint8 *buf,
+ const uint32 *data, int n);
+
+#endif /* _bcmiov_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmip.h b/bcmdhd.101.10.361.x/include/bcmip.h
new file mode 100755
index 0000000..898a231
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmip.h
@@ -0,0 +1,286 @@
+/*
+ * Fundamental constants relating to IP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET 0x0 /* offset to version field */
+#define IP_VER_MASK 0xf0 /* version mask */
+#define IP_VER_SHIFT 4 /* version shift */
+#define IP_VER_4 4 /* version number for IPV4 */
+#define IP_VER_6 6 /* version number for IPV6 */
+
+#define IP_VER(ip_body) \
+ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP 0x1 /* ICMP protocol */
+#define IP_PROT_IGMP 0x2 /* IGMP protocol */
+#define IP_PROT_TCP 0x6 /* TCP protocol */
+#define IP_PROT_UDP 0x11 /* UDP protocol type */
+#define IP_PROT_GRE 0x2f /* GRE protocol type */
+#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET 1 /* type of service offset */
+#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */
+#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */
+#define IPV4_PROT_OFFSET 9 /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */
+#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */
+#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */
+#define IPV4_VER_SHIFT 4 /* IPV4 version shift */
+
+#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */
+#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_HLEN_MIN (4 * 5) /* IPV4 header minimum length */
+
+#define IPV4_ADDR_LEN 4 /* IPV4 address length */
+
+#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+ ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */
+#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */
+
+#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */
+#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */
+
+#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */
+#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */
+
+#define IPV4_TOS_ROUTINE 0
+#define IPV4_TOS_PRIORITY 1
+#define IPV4_TOS_IMMEDIATE 2
+#define IPV4_TOS_FLASH 3
+#define IPV4_TOS_FLASHOVERRIDE 4
+#define IPV4_TOS_CRITICAL 5
+#define IPV4_TOS_INETWORK_CTRL 6
+#define IPV4_TOS_NETWORK_CTRL 7
+
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV 0x8000 /* Reserved */
+#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */
+#define IPV4_FRAG_MORE 0x2000 /* More fragments */
+#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */
+
+#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */
+
+/* IPV4 packet formats */
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+ uint8 addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+ uint8 version_ihl; /* Version and Internet Header Length */
+ uint8 tos; /* Type Of Service */
+ uint16 tot_len; /* Number of bytes in packet (max 65535) */
+ uint16 id;
+ uint16 frag; /* 3 flag bits and fragment offset */
+ uint8 ttl; /* Time To Live */
+ uint8 prot; /* Protocol */
+ uint16 hdr_chksum; /* IP header checksum */
+ uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */
+ uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */
+} BWL_POST_PACKED_STRUCT;
+
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */
+#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */
+
+/* IPV6 field decodes */
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+ (((uint8 *)(ipv6_body))[2] << 8) | \
+ (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN 16 /* IPV6 address length */
+
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
+#define IP_TOS46(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT);
+
+/* IPV4 or IPV6 Protocol Classifier or 0 */
+#define IP_PROT46(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0)
+
+/* IPV6 extension headers (options) */
+#define IPV6_EXTHDR_HOP 0
+#define IPV6_EXTHDR_ROUTING 43
+#define IPV6_EXTHDR_FRAGMENT 44
+#define IPV6_EXTHDR_AUTH 51
+#define IPV6_EXTHDR_NONE 59
+#define IPV6_EXTHDR_DEST 60
+
+#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \
+ ((prot) == IPV6_EXTHDR_ROUTING) || \
+ ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+ ((prot) == IPV6_EXTHDR_AUTH) || \
+ ((prot) == IPV6_EXTHDR_NONE) || \
+ ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 40
+
+#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+ uint8 nexthdr;
+ uint8 hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+ uint8 nexthdr;
+ uint8 rsvd;
+ uint16 frag_off;
+ uint32 ident;
+} BWL_POST_PACKED_STRUCT;
+
+/* deprecated and replaced by ipv6_exthdr_len_check */
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+ uint16 len = 0, hlen;
+ struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+ while (IPV6_EXTHDR(eh->nexthdr)) {
+ if (eh->nexthdr == IPV6_EXTHDR_NONE)
+ return -1;
+ else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+ hlen = 8U;
+ else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+ hlen = (uint16)((eh->hdrlen + 2U) << 2U);
+ else
+ hlen = (uint16)IPV6_EXTHDR_LEN(eh);
+
+ len += hlen;
+ eh = (struct ipv6_exthdr *)(h + len);
+ }
+
+ *proto = eh->nexthdr;
+ return len;
+}
+
+/* determine length of exthdr with length checking */
+static INLINE int32
+ipv6_exthdr_len_check(uint8 *h, uint16 plen, uint8 *proto)
+{
+ uint16 len = 0, hlen;
+ struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+ /* must have at least one exthdr */
+ if (plen < sizeof(struct ipv6_exthdr)) {
+ return -1;
+ }
+
+ /* length check before accessing next exthdr */
+ while ((plen >= len + sizeof(struct ipv6_exthdr)) && IPV6_EXTHDR(eh->nexthdr)) {
+ if (eh->nexthdr == IPV6_EXTHDR_NONE) {
+ return -1;
+ } else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) {
+ hlen = 8U;
+ } else if (eh->nexthdr == IPV6_EXTHDR_AUTH) {
+ hlen = (uint16)((eh->hdrlen + 2U) << 2U);
+ } else {
+ hlen = (uint16)IPV6_EXTHDR_LEN(eh);
+ }
+
+ /* check exthdr length */
+ if (plen < len + hlen) {
+ /* invalid exthdr */
+ return -1;
+ }
+ len += hlen;
+ eh = (struct ipv6_exthdr *)(h + len);
+ }
+
+ /* length check before accessing next exthdr */
+ if (plen >= len + sizeof(struct ipv6_exthdr)) {
+ *proto = eh->nexthdr;
+ } else {
+ *proto = 0;
+ }
+ return len;
+}
+#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000)
+
+#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \
+{ \
+ ether[0] = 0x01; \
+ ether[1] = 0x00; \
+ ether[2] = 0x5E; \
+ ether[3] = (ipv4 & 0x7f0000) >> 16; \
+ ether[4] = (ipv4 & 0xff00) >> 8; \
+ ether[5] = (ipv4 & 0xff); \
+}
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define IPV4_ADDR_STR "%d.%d.%d.%d"
+#define IPV4_ADDR_TO_STR(addr) ((uint32)addr & 0xff000000) >> 24, \
+ ((uint32)addr & 0x00ff0000) >> 16, \
+ ((uint32)addr & 0x0000ff00) >> 8, \
+ ((uint32)addr & 0x000000ff)
+
+#endif /* _bcmip_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmipv6.h b/bcmdhd.101.10.361.x/include/bcmipv6.h
new file mode 100755
index 0000000..89a1515
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmipv6.h
@@ -0,0 +1,160 @@
+/*
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Extension headers */
+#define IPV6_EXT_HOP 0
+#define IPV6_EXT_ROUTE 43
+#define IPV6_EXT_FRAG 44
+#define IPV6_EXT_DEST 60
+#define IPV6_EXT_ESEC 50
+#define IPV6_EXT_AUTH 51
+
+/* Minimum size (extension header "word" length) */
+#define IPV6_EXT_WORD 8
+
+/* Offsets for most extension headers */
+#define IPV6_EXT_NEXTHDR 0
+#define IPV6_EXT_HDRLEN 1
+
+/* Constants specific to fragmentation header */
+#define IPV6_FRAG_MORE_MASK 0x0001
+#define IPV6_FRAG_MORE_SHIFT 0
+#define IPV6_FRAG_OFFS_MASK 0xfff8
+#define IPV6_FRAG_OFFS_SHIFT 3
+
+/* For icmpv6 */
+#define ICMPV6_HEADER_TYPE 0x3A
+#define ICMPV6_PKT_TYPE_RA 134
+#define ICMPV6_PKT_TYPE_NS 135
+#define ICMPV6_PKT_TYPE_NA 136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR 1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR 1
+
+#define IPV6_VERSION 6
+#define IPV6_HOP_LIMIT 255
+
+#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \
+ a[5] | a[6] | a[7] | a[8] | a[9] | \
+ a[10] | a[11] | a[12] | a[13] | \
+ a[14] | a[15]) == 0)
+
+#define IPV6_ADDR_LOCAL(a) (((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+ uint8 addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+/* use masks, htonl instead of bit fileds */
+#ifndef IL_BIGENDIAN
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+ uint8 icmp6_type;
+ uint8 icmp6_code;
+ uint16 icmp6_cksum;
+ BWL_PRE_PACKED_STRUCT union {
+ uint32 reserved;
+ BWL_PRE_PACKED_STRUCT struct nd_advt {
+ uint32 reserved1:5,
+ override:1,
+ solicited:1,
+ router:1,
+ reserved2:24;
+ } BWL_POST_PACKED_STRUCT nd_advt;
+ } BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+ uint8 priority:4,
+ version:4;
+ uint8 flow_lbl[3];
+ uint16 payload_len;
+ uint8 nexthdr;
+ uint8 hop_limit;
+ struct ipv6_addr saddr;
+ struct ipv6_addr daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct bcm_nd_msg {
+ struct icmp6_hdr icmph;
+ struct ipv6_addr target;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+ uint8 type;
+ uint8 len;
+ uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Fragmentation Header */
+BWL_PRE_PACKED_STRUCT struct ipv6_frag {
+ uint8 nexthdr;
+ uint8 reserved;
+ uint16 frag_offset;
+ uint32 ident;
+} BWL_POST_PACKED_STRUCT;
+
+#endif /* IL_BIGENDIAN */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+static const struct ipv6_addr all_node_ipv6_maddr = {
+ { 0xff, 0x2, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 1
+ }};
+
+#define IPV6_ISMULTI(a) (a[0] == 0xff)
+
+#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \
+{ \
+ ether[0] = 0x33; \
+ ether[1] = 0x33; \
+ ether[2] = ipv6[12]; \
+ ether[3] = ipv6[13]; \
+ ether[4] = ipv6[14]; \
+ ether[5] = ipv6[15]; \
+}
+
+#endif /* !defined(_bcmipv6_h_) */
diff --git a/bcmdhd.101.10.361.x/include/bcmmsgbuf.h b/bcmdhd.101.10.361.x/include/bcmmsgbuf.h
new file mode 100755
index 0000000..d9177d7
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmmsgbuf.h
@@ -0,0 +1,1706 @@
+/*
+ * MSGBUF network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _bcmmsgbuf_h_
+#define _bcmmsgbuf_h_
+
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <bcmpcie.h>
+
+#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN
+
+#define D2H_EPOCH_MODULO 253 /* sequence number wrap */
+#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1)
+
+#define H2D_EPOCH_MODULO 253 /* sequence number wrap */
+#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1)
+
+/* Txpost base workitem size w/o any extended tags */
+#define H2DRING_TXPOST_BASE_ITEMSIZE 48u
+
+/*
+ * The workitem size - H2DRING_TXPOST_ITEMSIZE is fixed at compile time
+ * only for FW, depending on the BCMPCIE_EXT_TXPOST_SUPPORT flag.
+ * For DHD the work item size is decided dynamically based on
+ * the dongle capability announced in the PCIE_SHARED2 flags which
+ * is read by DHD during dhdpcie_readshared(). Because this
+ * happens before DHD allocs memory for the flowrings, the workitem
+ * size can be dynamic for DHD.
+ */
+#define H2DRING_TXPOST_EXT_ITEMSIZE 56
+#if defined(BCMPCIE_EXT_TXPOST_SUPPORT)
+#define H2DRING_TXPOST_ITEMSIZE H2DRING_TXPOST_EXT_ITEMSIZE
+#else
+#define H2DRING_TXPOST_ITEMSIZE H2DRING_TXPOST_BASE_ITEMSIZE
+#endif
+#define H2DRING_RXPOST_ITEMSIZE 32
+#define H2DRING_CTRL_SUB_ITEMSIZE 40
+
+#define D2HRING_TXCMPLT_ITEMSIZE 24
+#define D2HRING_RXCMPLT_ITEMSIZE 40
+
+#define D2HRING_TXCMPLT_ITEMSIZE_PREREV7 16
+#define D2HRING_RXCMPLT_ITEMSIZE_PREREV7 32
+
+#define D2HRING_CTRL_CMPLT_ITEMSIZE 24
+#define H2DRING_INFO_BUFPOST_ITEMSIZE H2DRING_CTRL_SUB_ITEMSIZE
+#define D2HRING_INFO_BUFCMPLT_ITEMSIZE D2HRING_CTRL_CMPLT_ITEMSIZE
+
+#define D2HRING_SNAPSHOT_CMPLT_ITEMSIZE 20
+
+#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32
+#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32
+
+#define H2DRING_TXPOST_MAX_ITEM 512
+
+#if defined(DHD_HTPUT_TUNABLES)
+#define H2DRING_RXPOST_MAX_ITEM 2048
+#define D2HRING_RXCMPLT_MAX_ITEM 1024
+#define D2HRING_TXCMPLT_MAX_ITEM 2048
+/* Only few htput flowrings use htput max items, other use normal max items */
+#define H2DRING_HTPUT_TXPOST_MAX_ITEM 2048
+#define H2DRING_CTRL_SUB_MAX_ITEM 128
+#else
+#define H2DRING_RXPOST_MAX_ITEM 512
+#define D2HRING_TXCMPLT_MAX_ITEM 1024
+#define D2HRING_RXCMPLT_MAX_ITEM 512
+#define H2DRING_CTRL_SUB_MAX_ITEM 64
+#endif /* DHD_HTPUT_TUNABLES */
+
+#define D2HRING_EDL_HDR_SIZE 48u
+#define D2HRING_EDL_ITEMSIZE 2048u
+#define D2HRING_EDL_MAX_ITEM 256u
+#define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u)
+
+#ifdef BCM_ROUTER_DHD
+#define D2HRING_CTRL_CMPLT_MAX_ITEM 256
+#else
+#define D2HRING_CTRL_CMPLT_MAX_ITEM 64
+#endif
+
+/* Max pktids for each type of pkt, shared between host and dongle */
+#define MAX_PKTID_CTRL (1024)
+#define MAX_PKTID_RX (4 * 1024)
+#define MAX_PKTID_TX (36 * 1024)
+
+enum {
+ DNGL_TO_HOST_MSGBUF,
+ HOST_TO_DNGL_MSGBUF
+};
+
+enum {
+ HOST_TO_DNGL_TXP_DATA,
+ HOST_TO_DNGL_RXP_DATA,
+ HOST_TO_DNGL_CTRL,
+ DNGL_TO_HOST_DATA,
+ DNGL_TO_HOST_CTRL
+};
+
+#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE
+#define PCIEDEV_FIRMWARE_TSINFO 0x1
+#define PCIEDEV_FIRMWARE_TSINFO_FIRST 0x1
+#define PCIEDEV_FIRMWARE_TSINFO_MIDDLE 0x2
+#define PCIEDEV_BTLOG_POST 0x3
+#define PCIEDEV_BT_SNAPSHOT_POST 0x4
+
+#ifdef PCIE_API_REV1
+
+#define BCMMSGBUF_DUMMY_REF(a, b) do {BCM_REFERENCE((a));BCM_REFERENCE((b));} while (0)
+
+#define BCMMSGBUF_API_IFIDX(a) 0
+#define BCMMSGBUF_API_SEQNUM(a) 0
+#define BCMMSGBUF_IOCTL_XTID(a) 0
+#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->cmd_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b) BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_SET_API_SEQNUM(a, b) BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID(a) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b) BCMMSGBUF_DUMMY_REF(a, b)
+
+#else /* PCIE_API_REV1 */
+
+#define BCMMSGBUF_API_IFIDX(a) ((a)->if_id)
+#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->pkt_id)
+#define BCMMSGBUF_API_SEQNUM(a) ((a)->u.seq.seq_no)
+#define BCMMSGBUF_IOCTL_XTID(a) ((a)->xt_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b) (BCMMSGBUF_API_IFIDX((a)) = (b))
+#define BCMMSGBUF_SET_API_SEQNUM(a, b) (BCMMSGBUF_API_SEQNUM((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b) (BCMMSGBUF_IOCTL_XTID((a)) = (b))
+
+#endif /* PCIE_API_REV1 */
+
+/* utility data structures */
+
+union addr64 {
+ struct {
+ uint32 low;
+ uint32 high;
+ };
+ struct {
+ uint32 low_addr;
+ uint32 high_addr;
+ };
+ uint64 u64;
+} DECLSPEC_ALIGN(8);
+
+typedef union addr64 bcm_addr64_t;
+
+/* IOCTL req Hdr */
+/* cmn Msg Hdr */
+typedef struct cmn_msg_hdr {
+ /** message type */
+ uint8 msg_type;
+ /** interface index this is valid for */
+ uint8 if_id;
+ /* flags */
+ uint8 flags;
+ /** sequence number */
+ uint8 epoch;
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id;
+} cmn_msg_hdr_t;
+
+/* cmn aggregated work item msg hdr */
+typedef struct cmn_aggr_msg_hdr {
+ /** aggregate message type */
+ uint8 msg_type;
+ /** aggregation count */
+ uint8 aggr_cnt;
+ /* current phase */
+ uint8 phase;
+ /* flags or sequence number */
+ union {
+ uint8 flags; /* H2D direction */
+ uint8 epoch; /* D2H direction */
+ };
+} cmn_aggr_msg_hdr_t;
+
+/** cmn aggregated completion work item msg hdr */
+typedef struct compl_aggr_msg_hdr {
+ /** interface index this is valid for */
+ uint8 if_id;
+ /** status for the completion */
+ int8 status;
+ /** submisison flow ring id which generated this status */
+ uint16 ring_id;
+} compl_aggr_msg_hdr_t;
+
+/** message type */
+typedef enum bcmpcie_msgtype {
+ MSG_TYPE_GEN_STATUS = 0x1,
+ MSG_TYPE_RING_STATUS = 0x2,
+ MSG_TYPE_FLOW_RING_CREATE = 0x3,
+ MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4,
+ /* Enum value as copied from BISON 7.15: new generic message */
+ MSG_TYPE_RING_CREATE_CMPLT = 0x4,
+ MSG_TYPE_FLOW_RING_DELETE = 0x5,
+ MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6,
+ /* Enum value as copied from BISON 7.15: new generic message */
+ MSG_TYPE_RING_DELETE_CMPLT = 0x6,
+ MSG_TYPE_FLOW_RING_FLUSH = 0x7,
+ MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8,
+ MSG_TYPE_IOCTLPTR_REQ = 0x9,
+ MSG_TYPE_IOCTLPTR_REQ_ACK = 0xA,
+ MSG_TYPE_IOCTLRESP_BUF_POST = 0xB,
+ MSG_TYPE_IOCTL_CMPLT = 0xC,
+ MSG_TYPE_EVENT_BUF_POST = 0xD,
+ MSG_TYPE_WL_EVENT = 0xE,
+ MSG_TYPE_TX_POST = 0xF,
+ MSG_TYPE_TX_STATUS = 0x10,
+ MSG_TYPE_RXBUF_POST = 0x11,
+ MSG_TYPE_RX_CMPLT = 0x12,
+ MSG_TYPE_LPBK_DMAXFER = 0x13,
+ MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14,
+ MSG_TYPE_FLOW_RING_RESUME = 0x15,
+ MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16,
+ MSG_TYPE_FLOW_RING_SUSPEND = 0x17,
+ MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18,
+ MSG_TYPE_INFO_BUF_POST = 0x19,
+ MSG_TYPE_INFO_BUF_CMPLT = 0x1A,
+ MSG_TYPE_H2D_RING_CREATE = 0x1B,
+ MSG_TYPE_D2H_RING_CREATE = 0x1C,
+ MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D,
+ MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E,
+ MSG_TYPE_H2D_RING_CONFIG = 0x1F,
+ MSG_TYPE_D2H_RING_CONFIG = 0x20,
+ MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21,
+ MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22,
+ MSG_TYPE_H2D_MAILBOX_DATA = 0x23,
+ MSG_TYPE_D2H_MAILBOX_DATA = 0x24,
+ MSG_TYPE_TIMSTAMP_BUFPOST = 0x25,
+ MSG_TYPE_HOSTTIMSTAMP = 0x26,
+ MSG_TYPE_HOSTTIMSTAMP_CMPLT = 0x27,
+ MSG_TYPE_FIRMWARE_TIMESTAMP = 0x28,
+ MSG_TYPE_SNAPSHOT_UPLOAD = 0x29,
+ MSG_TYPE_SNAPSHOT_CMPLT = 0x2A,
+ MSG_TYPE_H2D_RING_DELETE = 0x2B,
+ MSG_TYPE_D2H_RING_DELETE = 0x2C,
+ MSG_TYPE_H2D_RING_DELETE_CMPLT = 0x2D,
+ MSG_TYPE_D2H_RING_DELETE_CMPLT = 0x2E,
+ MSG_TYPE_TX_POST_AGGR = 0x2F,
+ MSG_TYPE_TX_STATUS_AGGR = 0x30,
+ MSG_TYPE_RXBUF_POST_AGGR = 0x31,
+ MSG_TYPE_RX_CMPLT_AGGR = 0x32,
+ MSG_TYPE_API_MAX_RSVD = 0x3F
+} bcmpcie_msg_type_t;
+
+/* message type used in internal queue */
+typedef enum bcmpcie_msgtype_int {
+ MSG_TYPE_INTERNAL_USE_START = 0x40, /* internal pkt */
+ MSG_TYPE_EVENT_PYLD = 0x41, /* wl event pkt */
+ MSG_TYPE_IOCT_PYLD = 0x42, /* ioctl compl pkt */
+ MSG_TYPE_RX_PYLD = 0x43,
+ MSG_TYPE_HOST_FETCH = 0x44,
+ MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, /* loopback pkt */
+ MSG_TYPE_TXMETADATA_PYLD = 0x46, /* transmit status pkt */
+ MSG_TYPE_INDX_UPDATE = 0x47, /* write indx updated */
+ MSG_TYPE_INFO_PYLD = 0x48,
+ MSG_TYPE_TS_EVENT_PYLD = 0x49,
+ MSG_TYPE_PVT_BTLOG_CMPLT = 0x4A,
+ MSG_TYPE_BTLOG_PYLD = 0x4B,
+ MSG_TYPE_HMAPTEST_PYLD = 0x4C,
+ MSG_TYPE_PVT_BT_SNAPSHOT_CMPLT = 0x4D,
+ MSG_TYPE_BT_SNAPSHOT_PYLD = 0x4E,
+ MSG_TYPE_LPBK_DMAXFER_PYLD_ADDR = 0x4F /* loopback from addr pkt */
+} bcmpcie_msgtype_int_t;
+
+typedef enum bcmpcie_msgtype_u {
+ MSG_TYPE_TX_BATCH_POST = 0x80,
+ MSG_TYPE_IOCTL_REQ = 0x81,
+ MSG_TYPE_HOST_EVNT = 0x82, /* console related */
+ MSG_TYPE_LOOPBACK = 0x83
+} bcmpcie_msgtype_u_t;
+
+/**
+ * D2H ring host wakeup soft doorbell, override the PCIE doorbell.
+ * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE
+ * Transl0 to write specified value to host address.
+ *
+ * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register
+ * and value is Core/Thread context. Host will ensure routing the 32bit address
+ * offerred to PCIE to the mapped register.
+ *
+ * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL
+ */
+typedef struct bcmpcie_soft_doorbell {
+ uint32 value; /* host defined value to be written, eg HW threadid */
+ bcm_addr64_t haddr; /* host address, eg thread wakeup register address */
+ uint16 items; /* interrupt coalescing: item count before wakeup */
+ uint16 msecs; /* interrupt coalescing: timeout in millisecs */
+} bcmpcie_soft_doorbell_t;
+
+/**
+ * D2H interrupt using MSI instead of INTX
+ * Host configures MSI vector offset for each D2H interrupt
+ *
+ * D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL
+ */
+typedef enum bcmpcie_msi_intr_idx {
+ MSI_INTR_IDX_CTRL_CMPL_RING = 0,
+ MSI_INTR_IDX_TXP_CMPL_RING = 1,
+ MSI_INTR_IDX_RXP_CMPL_RING = 2,
+ MSI_INTR_IDX_INFO_CMPL_RING = 3,
+ MSI_INTR_IDX_MAILBOX = 4,
+ MSI_INTR_IDX_MAX = 5
+} bcmpcie_msi_intr_idx_t;
+
+#define BCMPCIE_D2H_MSI_OFFSET_SINGLE 0
+typedef enum bcmpcie_msi_offset_type {
+ BCMPCIE_D2H_MSI_OFFSET_MB0 = 2,
+ BCMPCIE_D2H_MSI_OFFSET_MB1 = 3,
+ BCMPCIE_D2H_MSI_OFFSET_DB0 = 4,
+ BCMPCIE_D2H_MSI_OFFSET_DB1 = 5,
+ BCMPCIE_D2H_MSI_OFFSET_H1_DB0 = 6,
+ BCMPCIE_D2H_MSI_OFFSET_MAX = 7
+} bcmpcie_msi_offset_type_t;
+
+typedef struct bcmpcie_msi_offset {
+ uint16 intr_idx; /* interrupt index */
+ uint16 msi_offset; /* msi vector offset */
+} bcmpcie_msi_offset_t;
+
+typedef struct bcmpcie_msi_offset_config {
+ uint32 len;
+ bcmpcie_msi_offset_t bcmpcie_msi_offset[MSI_INTR_IDX_MAX];
+} bcmpcie_msi_offset_config_t;
+
+#define BCMPCIE_D2H_MSI_OFFSET_DEFAULT BCMPCIE_D2H_MSI_OFFSET_DB1
+
+#define BCMPCIE_D2H_MSI_SINGLE 0xFFFE
+
+/* if_id */
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX 0x7
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK \
+ (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT 0
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX 0x1F
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK \
+ (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+
+/* flags */
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2
+#define BCMPCIE_CMNHDR_FLAGS_TS_SEQNUM_INIT 0x4
+#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80
+#define BCMPCIE_CMNHDR_PHASE_BIT_INIT 0x80
+
+/* IOCTL request message */
+typedef struct ioctl_req_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** ioctl command type */
+ uint32 cmd;
+ /** ioctl transaction ID, to pair with a ioctl response */
+ uint16 trans_id;
+ /** input arguments buffer len */
+ uint16 input_buf_len;
+ /** expected output len */
+ uint16 output_buf_len;
+ /** to align the host address on 8 byte boundary */
+ uint16 rsvd[3];
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_input_buf_addr;
+ /* rsvd */
+ uint32 rsvd1[2];
+} ioctl_req_msg_t;
+
+/** buffer post messages for device to use to return IOCTL responses, Events */
+typedef struct ioctl_resp_evt_buf_post_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** length of the host buffer supplied */
+ uint16 host_buf_len;
+ /** to align the host address on 8 byte boundary */
+ uint16 reserved[3];
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_buf_addr;
+ uint32 rsvd[4];
+} ioctl_resp_evt_buf_post_msg_t;
+
+/* buffer post messages for device to use to return dbg buffers */
+typedef ioctl_resp_evt_buf_post_msg_t info_buf_post_msg_t;
+
+#ifdef DHD_EFI
+#define DHD_INFOBUF_RX_BUFPOST_PKTSZ 1800
+#else
+#define DHD_INFOBUF_RX_BUFPOST_PKTSZ (2 * 1024)
+#endif
+
+#define DHD_BTLOG_RX_BUFPOST_PKTSZ (2 * 1024)
+
+/* An infobuf host buffer starts with a 32 bit (LE) version. */
+#define PCIE_INFOBUF_V1 1
+/* Infobuf v1 type MSGTRACE's data is exactly the same as the MSGTRACE data that
+ * is wrapped previously/also in a WLC_E_TRACE event. See structure
+ * msgrace_hdr_t in msgtrace.h.
+*/
+#define PCIE_INFOBUF_V1_TYPE_MSGTRACE 1
+
+/* Infobuf v1 type LOGTRACE data is exactly the same as the LOGTRACE data that
+ * is wrapped previously/also in a WLC_E_TRACE event. See structure
+ * msgrace_hdr_t in msgtrace.h. (The only difference between a MSGTRACE
+ * and a LOGTRACE is the "trace type" field.)
+*/
+#define PCIE_INFOBUF_V1_TYPE_LOGTRACE 2
+
+/* An infobuf version 1 host buffer has a single TLV. The information on the
+ * version 1 types follow this structure definition. (int's LE)
+*/
+typedef struct info_buf_payload_hdr_s {
+ uint16 type;
+ uint16 length;
+} info_buf_payload_hdr_t;
+
+/* BT logs/memory to DMA directly from BT memory to host */
+typedef struct info_buf_btlog_s {
+ void (*status_cb)(void *ctx, void *p, int error); /* obsolete - to be removed */
+ void *ctx;
+ dma64addr_t src_addr;
+ uint32 length;
+ bool (*pcie_status_cb)(osl_t *osh, void *p, int error);
+ uint32 bt_intstatus;
+ int error;
+} info_buf_btlog_t;
+
+/** snapshot upload request message */
+typedef struct snapshot_upload_request_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** length of the snaphost buffer supplied */
+ uint32 snapshot_buf_len;
+ /** type of snapshot */
+ uint8 snapshot_type;
+ /** snapshot param */
+ uint8 snapshot_param;
+ /** to align the host address on 8 byte boundary */
+ uint8 reserved[2];
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_buf_addr;
+ uint32 rsvd[4];
+} snapshot_upload_request_msg_t;
+
+/** snapshot types */
+typedef enum bcmpcie_snapshot_type {
+ SNAPSHOT_TYPE_BT = 0, /* Bluetooth SRAM and patch RAM */
+ SNAPSHOT_TYPE_WLAN_SOCRAM = 1, /* WLAN SOCRAM */
+ SNAPSHOT_TYPE_WLAN_HEAP = 2, /* WLAN HEAP */
+ SNAPSHOT_TYPE_WLAN_REGISTER = 3 /* WLAN registers */
+} bcmpcie_snapshot_type_t;
+
+#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK 0xF
+#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT 2
+#define PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK 3
+#define PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT 0
+
+typedef struct pcie_dma_xfer_params {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_input_buf_addr;
+
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_ouput_buf_addr;
+
+ /** length of transfer */
+ uint32 xfer_len;
+ /** delay before doing the src txfer */
+ uint32 srcdelay;
+ /** delay before doing the dest txfer */
+ uint32 destdelay;
+ uint8 rsvd[3];
+ /* bit0: D11 DMA loopback flag */
+ uint8 flags;
+} pcie_dma_xfer_params_t;
+
+#define BCMPCIE_FLOW_RING_INTF_HP2P 0x01u /* bit0 */
+#define BCMPCIE_FLOW_RING_OPT_EXT_TXSTATUS 0x02u /* bit1 */
+#define BCMPCIE_FLOW_RING_INTF_MESH 0x04u /* bit2, identifies the mesh flow ring */
+
+/** Complete msgbuf hdr for flow ring update from host to dongle */
+typedef struct tx_flowring_create_request {
+ cmn_msg_hdr_t msg;
+ uint8 da[ETHER_ADDR_LEN];
+ uint8 sa[ETHER_ADDR_LEN];
+ uint8 tid;
+ uint8 if_flags;
+ uint16 flow_ring_id;
+ uint8 tc;
+ /* priority_ifrmmask is to define core mask in ifrm mode.
+ * currently it is not used for priority. so uses solely for ifrm mask
+ */
+ uint8 priority_ifrmmask;
+ uint16 int_vector;
+ uint16 max_items;
+ uint16 len_item;
+ bcm_addr64_t flow_ring_ptr;
+} tx_flowring_create_request_t;
+
+typedef struct tx_flowring_delete_request {
+ cmn_msg_hdr_t msg;
+ uint16 flow_ring_id;
+ uint16 reason;
+ uint32 rsvd[7];
+} tx_flowring_delete_request_t;
+
+typedef tx_flowring_delete_request_t d2h_ring_delete_req_t;
+typedef tx_flowring_delete_request_t h2d_ring_delete_req_t;
+
+typedef struct tx_flowring_flush_request {
+ cmn_msg_hdr_t msg;
+ uint16 flow_ring_id;
+ uint16 reason;
+ uint32 rsvd[7];
+} tx_flowring_flush_request_t;
+
+/** Subtypes for ring_config_req control message */
+typedef enum ring_config_subtype {
+ /** Default D2H PCIE doorbell override using ring_config_req msg */
+ D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */
+ D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */
+} ring_config_subtype_t;
+
+typedef struct ring_config_req { /* pulled from upcoming rev6 ... */
+ cmn_msg_hdr_t msg;
+ uint16 subtype;
+ uint16 ring_id;
+ uint32 rsvd;
+ union {
+ uint32 data[6];
+ /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */
+ bcmpcie_soft_doorbell_t soft_doorbell;
+ /** D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL */
+ bcmpcie_msi_offset_config_t msi_offset;
+ };
+} ring_config_req_t;
+
+/* data structure to use to create on the fly d2h rings */
+typedef struct d2h_ring_create_req {
+ cmn_msg_hdr_t msg;
+ uint16 ring_id;
+ uint16 ring_type;
+ uint32 flags;
+ bcm_addr64_t ring_ptr;
+ uint16 max_items;
+ uint16 len_item;
+ uint32 rsvd[3];
+} d2h_ring_create_req_t;
+
+/* data structure to use to create on the fly h2d rings */
+#define MAX_COMPLETION_RING_IDS_ASSOCIATED 4
+typedef struct h2d_ring_create_req {
+ cmn_msg_hdr_t msg;
+ uint16 ring_id;
+ uint8 ring_type;
+ uint8 n_completion_ids;
+ uint32 flags;
+ bcm_addr64_t ring_ptr;
+ uint16 max_items;
+ uint16 len_item;
+ uint16 completion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
+ uint32 rsvd;
+} h2d_ring_create_req_t;
+
+typedef struct d2h_ring_config_req {
+ cmn_msg_hdr_t msg;
+ uint16 d2h_ring_config_subtype;
+ uint16 d2h_ring_id;
+ uint32 d2h_ring_config_data[4];
+ uint32 rsvd[3];
+} d2h_ring_config_req_t;
+
+typedef struct h2d_ring_config_req {
+ cmn_msg_hdr_t msg;
+ uint16 h2d_ring_config_subtype;
+ uint16 h2d_ring_id;
+ uint32 h2d_ring_config_data;
+ uint32 rsvd[6];
+} h2d_ring_config_req_t;
+
+typedef struct h2d_mailbox_data {
+ cmn_msg_hdr_t msg;
+ uint32 mail_box_data;
+ uint32 rsvd[7];
+} h2d_mailbox_data_t;
+typedef struct host_timestamp_msg {
+ cmn_msg_hdr_t msg;
+ uint16 xt_id; /* transaction ID */
+ uint16 input_data_len; /* data len at the host_buf_addr, data in TLVs */
+ uint16 seqnum; /* number of times host captured the timestamp */
+ uint16 rsvd;
+ /* always align on 8 byte boundary */
+ bcm_addr64_t host_buf_addr;
+ /* rsvd */
+ uint32 rsvd1[4];
+} host_timestamp_msg_t;
+
+/* buffer post message for timestamp events MSG_TYPE_TIMSTAMP_BUFPOST */
+typedef ioctl_resp_evt_buf_post_msg_t ts_buf_post_msg_t;
+
+typedef union ctrl_submit_item {
+ ioctl_req_msg_t ioctl_req;
+ ioctl_resp_evt_buf_post_msg_t resp_buf_post;
+ pcie_dma_xfer_params_t dma_xfer;
+ tx_flowring_create_request_t flow_create;
+ tx_flowring_delete_request_t flow_delete;
+ tx_flowring_flush_request_t flow_flush;
+ ring_config_req_t ring_config_req;
+ d2h_ring_create_req_t d2h_create;
+ h2d_ring_create_req_t h2d_create;
+ d2h_ring_config_req_t d2h_config;
+ h2d_ring_config_req_t h2d_config;
+ h2d_mailbox_data_t h2d_mailbox_data;
+ host_timestamp_msg_t host_ts;
+ ts_buf_post_msg_t ts_buf_post;
+ d2h_ring_delete_req_t d2h_delete;
+ h2d_ring_delete_req_t h2d_delete;
+ unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE];
+} ctrl_submit_item_t;
+
+typedef struct info_ring_submit_item {
+ info_buf_post_msg_t info_buf_post;
+ unsigned char check[H2DRING_INFO_BUFPOST_ITEMSIZE];
+} info_sumbit_item_t;
+
+/** Control Completion messages (20 bytes) */
+typedef struct compl_msg_hdr {
+ union {
+ /** status for the completion */
+ int16 status;
+
+ /* mutually exclusive with pkt fate debug feature */
+ struct pktts_compl_hdr {
+ uint16 d_t4; /* Delta TimeStamp 3: T4-tref */
+ } tx_pktts;
+ };
+ /** submisison flow ring id which generated this status */
+ union {
+ uint16 ring_id;
+ uint16 flow_ring_id;
+ };
+} compl_msg_hdr_t;
+
+/** XOR checksum or a magic number to audit DMA done */
+typedef uint32 dma_done_t;
+
+#define MAX_CLKSRC_ID 0xF
+#define TX_PKT_RETRY_CNT_0_MASK 0x000000FF
+#define TX_PKT_RETRY_CNT_0_SHIFT 0
+#define TX_PKT_RETRY_CNT_1_MASK 0x0000FF00
+#define TX_PKT_RETRY_CNT_1_SHIFT 8
+#define TX_PKT_RETRY_CNT_2_MASK 0x00FF0000
+#define TX_PKT_RETRY_CNT_2_SHIFT 16
+#define TX_PKT_BAND_INFO 0x0F000000
+#define TX_PKT_BAND_INFO_SHIFT 24
+#define TX_PKT_VALID_INFO 0xF0000000
+#define TX_PKT_VALID_INFO_SHIFT 28
+
+typedef struct ts_timestamp_srcid {
+ union {
+ uint32 ts_low; /* time stamp low 32 bits */
+ uint32 rate_spec; /* use ratespec */
+ };
+ union {
+ uint32 ts_high; /* time stamp high 28 bits */
+ union {
+ uint32 ts_high_ext :28; /* time stamp high 28 bits */
+ uint32 clk_id_ext :3; /* clock ID source */
+ uint32 phase :1; /* Phase bit */
+ dma_done_t marker_ext;
+ };
+ uint32 tx_pkt_band_retry_info;
+ };
+} ts_timestamp_srcid_t;
+
+typedef ts_timestamp_srcid_t ipc_timestamp_t;
+
+typedef struct ts_timestamp {
+ uint32 low;
+ uint32 high;
+} ts_timestamp_t;
+
+typedef ts_timestamp_t tick_count_64_t;
+typedef ts_timestamp_t ts_timestamp_ns_64_t;
+typedef ts_timestamp_t ts_correction_m_t;
+typedef ts_timestamp_t ts_correction_b_t;
+
+typedef struct _pktts {
+ uint32 tref; /* Ref Clk in uSec (currently, tsf) */
+ uint16 d_t2; /* Delta TimeStamp 1: T2-tref */
+ uint16 d_t3; /* Delta TimeStamp 2: T3-tref */
+} pktts_t;
+
+/* completion header status codes */
+#define BCMPCIE_SUCCESS 0
+#define BCMPCIE_NOTFOUND 1
+#define BCMPCIE_NOMEM 2
+#define BCMPCIE_BADOPTION 3
+#define BCMPCIE_RING_IN_USE 4
+#define BCMPCIE_RING_ID_INVALID 5
+#define BCMPCIE_PKT_FLUSH 6
+#define BCMPCIE_NO_EVENT_BUF 7
+#define BCMPCIE_NO_RX_BUF 8
+#define BCMPCIE_NO_IOCTLRESP_BUF 9
+#define BCMPCIE_MAX_IOCTLRESP_BUF 10
+#define BCMPCIE_MAX_EVENT_BUF 11
+#define BCMPCIE_BAD_PHASE 12
+#define BCMPCIE_INVALID_CPL_RINGID 13
+#define BCMPCIE_RING_TYPE_INVALID 14
+#define BCMPCIE_NO_TS_EVENT_BUF 15
+#define BCMPCIE_MAX_TS_EVENT_BUF 16
+#define BCMPCIE_PCIE_NO_BTLOG_BUF 17
+#define BCMPCIE_BT_DMA_ERR 18
+#define BCMPCIE_BT_DMA_DESCR_FETCH_ERR 19
+#define BCMPCIE_SNAPSHOT_ERR 20
+#define BCMPCIE_NOT_READY 21
+#define BCMPCIE_INVALID_DATA 22
+#define BCMPCIE_NO_RESPONSE 23
+#define BCMPCIE_NO_CLOCK 24
+
+/** IOCTL completion response */
+typedef struct ioctl_compl_resp_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /** response buffer len where a host buffer is involved */
+ uint16 resp_len;
+ /** transaction id to pair with a request */
+ uint16 trans_id;
+ /** cmd id */
+ uint32 cmd;
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ioctl_comp_resp_msg_t;
+
+/** IOCTL request acknowledgement */
+typedef struct ioctl_req_ack_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /** cmd id */
+ uint32 cmd;
+ uint32 rsvd;
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ioctl_req_ack_msg_t;
+
+/** WL event message: send from device to host */
+typedef struct wlevent_req_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /** event data len valid with the event buffer */
+ uint16 event_data_len;
+ /** sequence number */
+ uint16 seqnum;
+ /** rsvd */
+ uint32 rsvd;
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} wlevent_req_msg_t;
+
+/** dma xfer complete message */
+typedef struct pcie_dmaxfer_cmplt {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} pcie_dmaxfer_cmplt_t;
+
+/** general status message */
+typedef struct pcie_gen_status {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} pcie_gen_status_t;
+
+/** ring status message */
+typedef struct pcie_ring_status {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /** message which firmware couldn't decode */
+ uint16 write_idx;
+ uint16 rsvd[3];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} pcie_ring_status_t;
+
+typedef struct ring_create_response {
+ cmn_msg_hdr_t cmn_hdr;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ring_create_response_t;
+
+typedef ring_create_response_t tx_flowring_create_response_t;
+typedef ring_create_response_t h2d_ring_create_response_t;
+typedef ring_create_response_t d2h_ring_create_response_t;
+
+typedef struct tx_flowring_delete_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint16 read_idx;
+ uint16 rsvd[3];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} tx_flowring_delete_response_t;
+
+typedef tx_flowring_delete_response_t h2d_ring_delete_response_t;
+typedef tx_flowring_delete_response_t d2h_ring_delete_response_t;
+
+typedef struct tx_flowring_flush_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} tx_flowring_flush_response_t;
+
+/** Common layout of all d2h control messages */
+typedef struct ctrl_compl_msg {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ctrl_compl_msg_t;
+
+typedef struct ring_config_resp {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint16 subtype;
+ uint16 rsvd[3];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ring_config_resp_t;
+
+typedef struct d2h_mailbox_data {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 d2h_mailbox_data;
+ uint32 rsvd[1];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} d2h_mailbox_data_t;
+
+/* dbg buf completion msg: send from device to host */
+typedef struct info_buf_resp {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* event data len valid with the event buffer */
+ uint16 info_data_len;
+ /* sequence number */
+ uint16 seqnum;
+ /* destination */
+ uint8 dest;
+ /* rsvd */
+ uint8 rsvd[3];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} info_buf_resp_t;
+
+/* snapshot completion msg: send from device to host */
+typedef struct snapshot_resp {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* snapshot length uploaded */
+ uint32 resp_len;
+ /* snapshot type */
+ uint8 type;
+ /* rsvd */
+ uint8 rsvd[3];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} snapshot_resp_t;
+
+typedef struct info_ring_cpl_item {
+ info_buf_resp_t info_buf_post;
+ unsigned char check[D2HRING_INFO_BUFCMPLT_ITEMSIZE];
+} info_cpl_item_t;
+
+typedef struct host_timestamp_msg_cpl {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint16 xt_id; /* transaction ID */
+ uint16 rsvd;
+ uint32 rsvd1;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} host_timestamp_msg_cpl_t;
+
+typedef struct fw_timestamp_event_msg {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ /* fw captures time stamp info and passed that to host in TLVs */
+ uint16 buf_len; /* length of the time stamp data copied in host buf */
+ uint16 seqnum; /* number of times fw captured time stamp */
+ uint32 rsvd;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} fw_timestamp_event_msg_t;
+
+typedef union ctrl_completion_item {
+ ioctl_comp_resp_msg_t ioctl_resp;
+ wlevent_req_msg_t event;
+ ioctl_req_ack_msg_t ioct_ack;
+ pcie_dmaxfer_cmplt_t pcie_xfer_cmplt;
+ pcie_gen_status_t pcie_gen_status;
+ pcie_ring_status_t pcie_ring_status;
+ tx_flowring_create_response_t txfl_create_resp;
+ tx_flowring_delete_response_t txfl_delete_resp;
+ tx_flowring_flush_response_t txfl_flush_resp;
+ ctrl_compl_msg_t ctrl_compl;
+ ring_config_resp_t ring_config_resp;
+ d2h_mailbox_data_t d2h_mailbox_data;
+ info_buf_resp_t dbg_resp;
+ h2d_ring_create_response_t h2d_ring_create_resp;
+ d2h_ring_create_response_t d2h_ring_create_resp;
+ host_timestamp_msg_cpl_t host_ts_cpl;
+ fw_timestamp_event_msg_t fw_ts_event;
+ h2d_ring_delete_response_t h2d_ring_delete_resp;
+ d2h_ring_delete_response_t d2h_ring_delete_resp;
+ unsigned char ctrl_response[D2HRING_CTRL_CMPLT_ITEMSIZE];
+} ctrl_completion_item_t;
+
+/** H2D Rxpost ring work items */
+typedef struct host_rxbuf_post {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** provided meta data buffer len */
+ uint16 metadata_buf_len;
+ /** provided data buffer len to receive data */
+ uint16 data_buf_len;
+ /** alignment to make the host buffers start on 8 byte boundary */
+ uint32 rsvd;
+ /** provided meta data buffer */
+ bcm_addr64_t metadata_buf_addr;
+ /** provided data buffer to receive data */
+ bcm_addr64_t data_buf_addr;
+} host_rxbuf_post_t;
+
+typedef union rxbuf_submit_item {
+ host_rxbuf_post_t rxpost;
+ unsigned char check[H2DRING_RXPOST_ITEMSIZE];
+} rxbuf_submit_item_t;
+
+/* D2H Rxcompletion ring work items for IPC rev7 */
+typedef struct host_rxbuf_cmpl {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /** filled up meta data len */
+ uint16 metadata_len;
+ /** filled up buffer len to receive data */
+ uint16 data_len;
+ /** offset in the host rx buffer where the data starts */
+ uint16 data_offset;
+ /** offset in the host rx buffer where the data starts */
+ uint16 flags;
+ /** rx status */
+ uint32 rx_status_0;
+ uint32 rx_status_1;
+
+ union { /* size per IPC = (3 x uint32) bytes */
+ struct {
+ /* used by Monitor mode */
+ uint32 marker;
+ /* timestamp */
+ ipc_timestamp_t ts;
+ };
+
+ /* LatTS_With_XORCSUM */
+ struct {
+ /* latency timestamp */
+ pktts_t rx_pktts;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker_ext;
+ };
+ };
+} host_rxbuf_cmpl_t;
+
+typedef union rxbuf_complete_item {
+ host_rxbuf_cmpl_t rxcmpl;
+ unsigned char check[D2HRING_RXCMPLT_ITEMSIZE];
+} rxbuf_complete_item_t;
+
+typedef struct host_txbuf_post_v1 {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** eth header */
+ uint8 txhdr[ETHER_HDR_LEN];
+ /** flags */
+ uint8 flags;
+ /** number of segments */
+ uint8 seg_cnt;
+
+ /** provided meta data buffer for txstatus */
+ bcm_addr64_t metadata_buf_addr;
+ /** provided data buffer containing Tx payload */
+ bcm_addr64_t data_buf_addr;
+ /** provided meta data buffer len */
+ uint16 metadata_buf_len;
+ /** provided data buffer len */
+ uint16 data_len;
+ union {
+ struct {
+ /** extended transmit flags */
+ uint8 ext_flags;
+ uint8 scale_factor;
+
+ /** user defined rate */
+ uint8 rate;
+ uint8 exp_time;
+ };
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+ };
+} host_txbuf_post_v1_t;
+
+typedef enum pkt_csum_type_shift {
+ PKT_CSUM_TYPE_IPV4_SHIFT = 0, /* pkt has IPv4 hdr */
+ PKT_CSUM_TYPE_IPV6_SHIFT = 1, /* pkt has IPv6 hdr */
+ PKT_CSUM_TYPE_TCP_SHIFT = 2, /* pkt has TCP hdr */
+ PKT_CSUM_TYPE_UDP_SHIFT = 3, /* pkt has UDP hdr */
+ PKT_CSUM_TYPE_NWK_CSUM_SHIFT = 4, /* pkt requires IP csum offload */
+ PKT_CSUM_TYPE_TRANS_CSUM_SHIFT = 5, /* pkt requires TCP/UDP csum offload */
+ PKT_CSUM_TYPE_PSEUDOHDR_CSUM_SHIFT = 6, /* pkt requires pseudo header csum offload */
+} pkt_type_shift_t;
+
+typedef struct pkt_info_cso {
+ /* packet csum type = ipv4/v6|udp|tcp|nwk_csum|trans_csum|ph_csum */
+ uint8 ver;
+ uint8 pkt_csum_type;
+ uint8 nwk_hdr_len; /* IP header length */
+ uint8 trans_hdr_len; /* TCP header length */
+} pkt_info_cso_t;
+
+typedef struct host_txbuf_post_v2 {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** eth header */
+ uint8 txhdr[ETHER_HDR_LEN];
+ /** flags */
+ uint8 flags;
+ /** number of segments */
+ uint8 seg_cnt;
+
+ /** provided meta data buffer for txstatus */
+ bcm_addr64_t metadata_buf_addr;
+ /** provided data buffer containing Tx payload */
+ bcm_addr64_t data_buf_addr;
+ /** provided meta data buffer len */
+ uint16 metadata_buf_len;
+ /** provided data buffer len */
+ uint16 data_len;
+ struct {
+ /** extended transmit flags */
+ uint8 ext_flags;
+ uint8 scale_factor;
+
+ /** user defined rate */
+ uint8 rate;
+ uint8 exp_time;
+ };
+ /** additional information on the packet required for CSO */
+ pkt_info_cso_t pktinfo;
+ uint32 PAD;
+} host_txbuf_post_v2_t;
+
+#if defined(BCMPCIE_EXT_TXPOST_SUPPORT) || defined(TX_CSO)
+typedef host_txbuf_post_v2_t host_txbuf_post_t;
+#else
+typedef host_txbuf_post_v1_t host_txbuf_post_t;
+#endif
+
+#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01
+#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02
+
+#define BCMPCIE_PKT_FLAGS_FRAME_NORETRY 0x01 /* Disable retry on this frame */
+#define BCMPCIE_PKT_FLAGS_FRAME_NOAGGR 0x02 /* Disable aggregation for this frame */
+#define BCMPCIE_PKT_FLAGS_FRAME_UDR 0x04 /* User defined rate for this frame */
+#define BCMPCIE_PKT_FLAGS_FRAME_ATTR_MASK 0x07 /* Attribute mask */
+
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */
+
+#define BCMPCIE_PKT_FLAGS_EPOCH_SHIFT 3u
+#define BCMPCIE_PKT_FLAGS_EPOCH_MASK (1u << BCMPCIE_PKT_FLAGS_EPOCH_SHIFT)
+
+#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5
+#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
+#define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU 0x00
+#define BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT 0x01
+#define BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT 0x02
+#define BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT 0x03
+#define BCMPCIE_PKT_FLAGS_MONITOR_SHIFT 8
+#define BCMPCIE_PKT_FLAGS_MONITOR_MASK (3 << BCMPCIE_PKT_FLAGS_MONITOR_SHIFT)
+
+#define BCMPCIE_PKT_FLAGS_FRAME_MESH 0x400u
+/* Indicate RX checksum verified and passed */
+#define BCMPCIE_PKT_FLAGS_RCSUM_VALID 0x800u
+
+/* These are added to fix up compile issues */
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11
+#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT
+#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK
+
+#define BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC 0x10u
+#define BCMPCIE_TXPOST_RATE_EXT_USAGE 0x80 /* The rate field has extended usage */
+#define BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK 0x07 /* The Tx profile index in the rate field */
+
+/* H2D Txpost ring work items */
+typedef union txbuf_submit_item {
+ host_txbuf_post_t txpost;
+ unsigned char check[H2DRING_TXPOST_ITEMSIZE];
+} txbuf_submit_item_t;
+
+/* D2H Txcompletion ring work items - extended for IOC rev7 */
+typedef struct host_txbuf_cmpl {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+
+ union { /* size per IPC = (3 x uint32) bytes */
+ /* Usage 1: TxS_With_TimeSync */
+ struct {
+ struct {
+ union {
+ /** provided meta data len */
+ uint16 metadata_len;
+ /** provided extended TX status */
+ uint16 tx_status_ext;
+ }; /*Ext_TxStatus */
+
+ /** WLAN side txstatus */
+ uint16 tx_status;
+ }; /* TxS */
+ /* timestamp */
+ ipc_timestamp_t ts;
+ }; /* TxS_with_TS */
+
+ /* Usage 2: LatTS_With_XORCSUM */
+ struct {
+ /* latency timestamp */
+ pktts_t tx_pktts;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker_ext;
+ };
+ };
+
+} host_txbuf_cmpl_t;
+
+typedef union txbuf_complete_item {
+ host_txbuf_cmpl_t txcmpl;
+ unsigned char check[D2HRING_TXCMPLT_ITEMSIZE];
+} txbuf_complete_item_t;
+
+#define METADATA_VER_1 1u
+#define METADATA_VER_2 2u
+#define PCIE_METADATA_VER METADATA_VER_2
+
+/* version and length are not part of this structure.
+ * dhd queries version and length through bus iovar "bus:metadata_info".
+ */
+struct metadata_txcmpl_v1 {
+ uint32 tref; /* TSF or Ref Clock in uSecs */
+ uint16 d_t2; /* T2-fwt1 delta */
+ uint16 d_t3; /* T3-fwt1 delta */
+ uint16 d_t4; /* T4-fwt1 delta */
+ uint16 rsvd; /* reserved */
+};
+
+struct metadata_txcmpl_v2 {
+ uint32 tref; /* TSF or Ref Clock in uSecs */
+ uint16 d_t2; /* T2-fwt1 delta */
+ uint16 d_t3; /* T3-fwt1 delta */
+ uint16 d_t4; /* T4-fwt1 delta */
+
+ uint16 u_t1; /* PSM Packet Fetch Time in 32us */
+ uint16 u_t2; /* Medium Access Delay delta */
+ uint16 u_t3; /* Rx duration delta */
+ uint16 u_t4; /* Mac Suspend Duration delta */
+ uint16 u_t5; /* TxStatus Time in 32us */
+
+ uint16 u_c1; /* Number of times Tx was enabled */
+ uint16 u_c2; /* Other AC TxStatus count */
+ uint16 u_c3; /* DataRetry count */
+ uint16 u_c4; /* RTS */
+ uint16 u_c5; /* CTS */
+ uint16 u_c6; /* debug 1 */
+ uint16 u_c7; /* debug 2 */
+ uint16 u_c8; /* debug 3 */
+};
+typedef struct metadata_txcmpl_v2 metadata_txcmpl_t;
+
+#define BCMPCIE_D2H_METADATA_HDRLEN 4
+#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4)
+
+/** ret buf struct */
+typedef struct ret_buf_ptr {
+ uint32 low_addr;
+ uint32 high_addr;
+} ret_buf_t;
+
+#ifdef PCIE_API_REV1
+
+/* ioctl specific hdr */
+typedef struct ioctl_hdr {
+ uint16 cmd;
+ uint16 retbuf_len;
+ uint32 cmd_id;
+} ioctl_hdr_t;
+
+typedef struct ioctlptr_hdr {
+ uint16 cmd;
+ uint16 retbuf_len;
+ uint16 buflen;
+ uint16 rsvd;
+ uint32 cmd_id;
+} ioctlptr_hdr_t;
+
+#else /* PCIE_API_REV1 */
+
+typedef struct ioctl_req_hdr {
+ uint32 pkt_id; /**< Packet ID */
+ uint32 cmd; /**< IOCTL ID */
+ uint16 retbuf_len;
+ uint16 buflen;
+ uint16 xt_id; /**< transaction ID */
+ uint16 rsvd[1];
+} ioctl_req_hdr_t;
+
+#endif /* PCIE_API_REV1 */
+
+/** Complete msgbuf hdr for ioctl from host to dongle */
+typedef struct ioct_reqst_hdr {
+ cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+ ioctl_hdr_t ioct_hdr;
+#else
+ ioctl_req_hdr_t ioct_hdr;
+#endif
+ ret_buf_t ret_buf;
+} ioct_reqst_hdr_t;
+
+typedef struct ioctptr_reqst_hdr {
+ cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+ ioctlptr_hdr_t ioct_hdr;
+#else
+ ioctl_req_hdr_t ioct_hdr;
+#endif
+ ret_buf_t ret_buf;
+ ret_buf_t ioct_buf;
+} ioctptr_reqst_hdr_t;
+
+/** ioctl response header */
+typedef struct ioct_resp_hdr {
+ cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+ uint32 cmd_id;
+#else
+ uint32 pkt_id;
+#endif
+ uint32 status;
+ uint32 ret_len;
+ uint32 inline_data;
+#ifdef PCIE_API_REV1
+#else
+ uint16 xt_id; /**< transaction ID */
+ uint16 rsvd[1];
+#endif
+} ioct_resp_hdr_t;
+
+/* ioct resp header used in dongle */
+/* ret buf hdr will be stripped off inside dongle itself */
+typedef struct msgbuf_ioctl_resp {
+ ioct_resp_hdr_t ioct_hdr;
+ ret_buf_t ret_buf; /**< ret buf pointers */
+} msgbuf_ioct_resp_t;
+
+/** WL event hdr info */
+typedef struct wl_event_hdr {
+ cmn_msg_hdr_t msg;
+ uint16 event;
+ uint8 flags;
+ uint8 rsvd;
+ uint16 retbuf_len;
+ uint16 rsvd1;
+ uint32 rxbufid;
+} wl_event_hdr_t;
+
+#define TXDESCR_FLOWID_PCIELPBK_1 0xFF
+#define TXDESCR_FLOWID_PCIELPBK_2 0xFE
+
+typedef struct txbatch_lenptr_tup {
+ uint32 pktid;
+ uint16 pktlen;
+ uint16 rsvd;
+ ret_buf_t ret_buf; /**< ret buf pointers */
+} txbatch_lenptr_tup_t;
+
+typedef struct txbatch_cmn_msghdr {
+ cmn_msg_hdr_t msg;
+ uint8 priority;
+ uint8 hdrlen;
+ uint8 pktcnt;
+ uint8 flowid;
+ uint8 txhdr[ETHER_HDR_LEN];
+ uint16 rsvd;
+} txbatch_cmn_msghdr_t;
+
+typedef struct txbatch_msghdr {
+ txbatch_cmn_msghdr_t txcmn;
+ txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */
+} txbatch_msghdr_t;
+
+/* TX desc posting header */
+typedef struct tx_lenptr_tup {
+ uint16 pktlen;
+ uint16 rsvd;
+ ret_buf_t ret_buf; /**< ret buf pointers */
+} tx_lenptr_tup_t;
+
+typedef struct txdescr_cmn_msghdr {
+ cmn_msg_hdr_t msg;
+ uint8 priority;
+ uint8 hdrlen;
+ uint8 descrcnt;
+ uint8 flowid;
+ uint32 pktid;
+} txdescr_cmn_msghdr_t;
+
+typedef struct txdescr_msghdr {
+ txdescr_cmn_msghdr_t txcmn;
+ uint8 txhdr[ETHER_HDR_LEN];
+ uint16 rsvd;
+ tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */
+} txdescr_msghdr_t;
+
+/** Tx status header info */
+typedef struct txstatus_hdr {
+ cmn_msg_hdr_t msg;
+ uint32 pktid;
+} txstatus_hdr_t;
+
+/** RX bufid-len-ptr tuple */
+typedef struct rx_lenptr_tup {
+ uint32 rxbufid;
+ uint16 len;
+ uint16 rsvd2;
+ ret_buf_t ret_buf; /**< ret buf pointers */
+} rx_lenptr_tup_t;
+
+/** Rx descr Post hdr info */
+typedef struct rxdesc_msghdr {
+ cmn_msg_hdr_t msg;
+ uint16 rsvd0;
+ uint8 rsvd1;
+ uint8 descnt;
+ rx_lenptr_tup_t rx_tup[0];
+} rxdesc_msghdr_t;
+
+/** RX complete tuples */
+typedef struct rxcmplt_tup {
+ uint16 retbuf_len;
+ uint16 data_offset;
+ uint32 rxstatus0;
+ uint32 rxstatus1;
+ uint32 rxbufid;
+} rxcmplt_tup_t;
+
+/** RX complete messge hdr */
+typedef struct rxcmplt_hdr {
+ cmn_msg_hdr_t msg;
+ uint16 rsvd0;
+ uint16 rxcmpltcnt;
+ rxcmplt_tup_t rx_tup[0];
+} rxcmplt_hdr_t;
+
+typedef struct hostevent_hdr {
+ cmn_msg_hdr_t msg;
+ uint32 evnt_pyld;
+} hostevent_hdr_t;
+
+typedef struct dma_xfer_params {
+ uint32 src_physaddr_hi;
+ uint32 src_physaddr_lo;
+ uint32 dest_physaddr_hi;
+ uint32 dest_physaddr_lo;
+ uint32 len;
+ uint32 srcdelay;
+ uint32 destdelay;
+} dma_xfer_params_t;
+
+enum {
+ HOST_EVENT_CONS_CMD = 1
+};
+
+/* defines for flags */
+#define MSGBUF_IOC_ACTION_MASK 0x1
+
+#define MAX_SUSPEND_REQ 15
+
+typedef struct tx_idle_flowring_suspend_request {
+ cmn_msg_hdr_t msg;
+ uint16 ring_id[MAX_SUSPEND_REQ]; /* ring Id's */
+ uint16 num; /* number of flowid's to suspend */
+} tx_idle_flowring_suspend_request_t;
+
+typedef struct tx_idle_flowring_suspend_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ dma_done_t marker;
+} tx_idle_flowring_suspend_response_t;
+
+typedef struct tx_idle_flowring_resume_request {
+ cmn_msg_hdr_t msg;
+ uint16 flow_ring_id;
+ uint16 reason;
+ uint32 rsvd[7];
+} tx_idle_flowring_resume_request_t;
+
+typedef struct tx_idle_flowring_resume_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ dma_done_t marker;
+} tx_idle_flowring_resume_response_t;
+
+/* timesync related additions */
+
+/* defined similar to bcm_xtlv_t */
+typedef struct _bcm_xtlv {
+ uint16 id; /* TLV idenitifier */
+ uint16 len; /* TLV length in bytes */
+} _bcm_xtlv_t;
+
+#define BCMMSGBUF_FW_CLOCK_INFO_TAG 0
+#define BCMMSGBUF_HOST_CLOCK_INFO_TAG 1
+#define BCMMSGBUF_HOST_CLOCK_SELECT_TAG 2
+#define BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG 3
+#define BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG 4
+#define BCMMSGBUF_MAX_TSYNC_TAG 5
+
+/* Flags in fw clock info TLV */
+#define CAP_DEVICE_TS (1 << 0)
+#define CAP_CORRECTED_TS (1 << 1)
+#define TS_CLK_ACTIVE (1 << 2)
+
+typedef struct ts_fw_clock_info {
+ _bcm_xtlv_t xtlv; /* BCMMSGBUF_FW_CLOCK_INFO_TAG */
+ ts_timestamp_srcid_t ts; /* tick count */
+ uchar clk_src[4]; /* clock source acronym ILP/AVB/TSF */
+ uint32 nominal_clock_freq;
+ uint32 reset_cnt;
+ uint8 flags;
+ uint8 rsvd[3];
+} ts_fw_clock_info_t;
+
+typedef struct ts_host_clock_info {
+ _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */
+ tick_count_64_t ticks; /* 64 bit host tick counter */
+ ts_timestamp_ns_64_t ns; /* 64 bit host time in nano seconds */
+} ts_host_clock_info_t;
+
+typedef struct ts_host_clock_sel {
+ _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_SELECT_TAG */
+ uint32 seqnum; /* number of times GPIO time sync toggled */
+ uint8 min_clk_idx; /* clock idenitifer configured for packet tiem stamping */
+ uint8 max_clk_idx; /* clock idenitifer configured for packet tiem stamping */
+ uint16 rsvd[1];
+} ts_host_clock_sel_t;
+
+typedef struct ts_d2h_clock_correction {
+ _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */
+ uint8 clk_id; /* clock source in the device */
+ uint8 rsvd[3];
+ ts_correction_m_t m; /* y = 'm' x + b */
+ ts_correction_b_t b; /* y = 'm' x + 'c' */
+} ts_d2h_clock_correction_t;
+
+typedef struct ts_host_timestamping_config {
+ _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG */
+ /* time period to capture the device time stamp and toggle WLAN_TIME_SYNC_GPIO */
+ uint16 period_ms;
+ uint8 flags;
+ uint8 post_delay;
+ uint32 reset_cnt;
+} ts_host_timestamping_config_t;
+
+/* Flags in host timestamping config TLV */
+#define FLAG_HOST_RESET (1 << 0)
+#define IS_HOST_RESET(x) ((x) & FLAG_HOST_RESET)
+#define CLEAR_HOST_RESET(x) ((x) & ~FLAG_HOST_RESET)
+
+#define FLAG_CONFIG_NODROP (1 << 1)
+#define IS_CONFIG_NODROP(x) ((x) & FLAG_CONFIG_NODROP)
+#define CLEAR_CONFIG_NODROP(x) ((x) & ~FLAG_CONFIG_NODROP)
+
+/* HP2P RLLW Extended TxStatus info when host enables the same */
+#define D2H_TXSTATUS_EXT_PKT_WITH_OVRRD 0x8000 /**< set when pkt had override bit on */
+#define D2H_TXSTATUS_EXT_PKT_XMIT_ON5G 0x4000 /**< set when pkt xmitted on 5G */
+#define D2H_TXSTATUS_EXT_PKT_BT_DENY 0x2000 /**< set when WLAN is given prio over BT */
+#define D2H_TXSTATUS_EXT_PKT_NAV_SWITCH 0x1000 /**< set when band switched due to NAV intr */
+#define D2H_TXSTATUS_EXT_PKT_HOF_SWITCH 0x0800 /**< set when band switched due to HOF intr */
+
+/* H2D Txpost aggregated work item */
+#define TXBUF_AGGR_CNT (2u)
+
+/* aggregated work item of txpost v2 */
+typedef struct host_txbuf_post_aggr_v2 {
+ /** common aggregated message header */
+ cmn_aggr_msg_hdr_t cmn_aggr_hdr;
+
+ /** data buffer len to transmit */
+ uint16 data_buf_len[TXBUF_AGGR_CNT];
+
+ /** address of data buffer to transmit */
+ bcm_addr64_t data_buf_addr[TXBUF_AGGR_CNT];
+
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id[TXBUF_AGGR_CNT];
+
+ /** eth header */
+ uint8 txhdr[ETHER_HDR_LEN];
+
+ /* reserved bytes */
+ uint16 reserved;
+
+ /** additional information on the packet required for CSO */
+ pkt_info_cso_t pktinfo[TXBUF_AGGR_CNT];
+} host_txbuf_post_aggr_v2_t;
+
+/* aggregated work item of txpost v1 */
+typedef struct host_txbuf_post_aggr_v1 {
+ /** common aggregated message header */
+ cmn_aggr_msg_hdr_t cmn_aggr_hdr;
+
+ /** data buffer len to transmit */
+ uint16 data_buf_len[TXBUF_AGGR_CNT];
+
+ /** address of data buffer to transmit */
+ bcm_addr64_t data_buf_addr[TXBUF_AGGR_CNT];
+
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id[TXBUF_AGGR_CNT];
+
+ /** eth header */
+ uint8 txhdr[ETHER_HDR_LEN];
+
+ /* pad bytes */
+ uint16 PAD;
+} host_txbuf_post_aggr_v1_t;
+
+#if defined(BCMPCIE_EXT_TXPOST_SUPPORT) || defined(TX_CSO)
+typedef host_txbuf_post_aggr_v2_t host_txbuf_post_aggr_t;
+#else
+typedef host_txbuf_post_aggr_v1_t host_txbuf_post_aggr_t;
+#endif
+
+/* D2H Txcompletion ring aggregated work item */
+#define TXCPL_AGGR_CNT (4u)
+
+/* head aggregated work item of txcpl */
+typedef struct host_txbuf_cmpl_aggr {
+ /** common aggregated message header */
+ cmn_aggr_msg_hdr_t cmn_aggr_hdr;
+
+ /** completion aggregated message header */
+ compl_aggr_msg_hdr_t compl_aggr_hdr;
+
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id[TXCPL_AGGR_CNT];
+} host_txbuf_cmpl_aggr_t;
+
+#define TXCPL_AGGR_CNT_EXT (6u)
+/* non-head aggregated work item of txcpl */
+typedef struct host_txbuf_cmpl_aggr_ext {
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id[TXCPL_AGGR_CNT_EXT];
+} host_txbuf_cmpl_aggr_ext_t;
+
+/* H2D Rxpost ring aggregated work items */
+#define RXBUF_AGGR_CNT (2u)
+
+/* aggregated work item of rxpost */
+typedef struct host_rxbuf_post_aggr {
+ /** common aggregated message header */
+ cmn_aggr_msg_hdr_t cmn_aggr_hdr;
+
+ /** data buffer len to transmit */
+ uint16 data_buf_len[RXBUF_AGGR_CNT];
+
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id[RXBUF_AGGR_CNT];
+
+ /** address of data buffer to transmit */
+ bcm_addr64_t data_buf_addr[RXBUF_AGGR_CNT];
+} host_rxbuf_post_aggr_t;
+
+/* D2H Rxcompletion ring for aggregated work items */
+#define RXCPL_AGGR_CNT (2u)
+
+/* each rx buffer work item */
+typedef struct host_rxbuf_cmpl_pkt {
+ /** offset in the host rx buffer where the data starts */
+ uint16 data_offset;
+ /** filled up buffer len to receive data */
+ uint16 data_len;
+ /** packet Identifier for the associated host buffer */
+ uint32 request_id;
+} host_rxbuf_cmpl_item_t;
+
+/* head aggregated work item of rxcpl */
+typedef struct host_rxbuf_cmpl_aggr {
+ /** common aggregated message header */
+ cmn_aggr_msg_hdr_t cmn_aggr_hdr;
+
+ /** completion aggregated message header */
+ compl_aggr_msg_hdr_t compl_aggr_hdr;
+
+ /** rxbuffer work item */
+ host_rxbuf_cmpl_item_t item[RXCPL_AGGR_CNT];
+} host_rxbuf_cmpl_aggr_t;
+
+#define RXCPL_AGGR_CNT_EXT (5u)
+/* non-head aggregated work item of rxcpl */
+typedef struct host_rxbuf_cmpl_aggr_ext {
+ /** rxbuffer work item */
+ host_rxbuf_cmpl_item_t item[RXCPL_AGGR_CNT_EXT];
+} host_rxbuf_cmpl_aggr_ext_t;
+
+/* txpost extended tag types */
+typedef uint8 txpost_ext_tag_type_t;
+enum {
+ TXPOST_EXT_TAG_TYPE_RSVD = 0u, /* Reserved */
+ TXPOST_EXT_TAG_TYPE_CSO = 1u,
+ TXPOST_EXT_TAG_TYPE_MESH = 2u,
+ TXPOST_EXT_TAG_TYPE_MAX = 3u /* NOTE: increment this as you add reasons above */
+};
+
+/* Fixed lengths for each extended tag */
+typedef uint8 txpost_ext_tag_len_t;
+enum {
+ TXPOST_EXT_TAG_LEN_RSVD = 0u, /* Reserved */
+ TXPOST_EXT_TAG_LEN_CSO = 4u,
+ TXPOST_EXT_TAG_LEN_MESH = 20u
+};
+
+/*
+ * Note: The only requirement is that the overall size of the workitem be multiple of 8.
+ * However, each individual ext tag not necessarily 8x.
+ */
+
+#endif /* _bcmmsgbuf_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmnvram.h b/bcmdhd.101.10.361.x/include/bcmnvram.h
new file mode 100755
index 0000000..498fb99
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmnvram.h
@@ -0,0 +1,162 @@
+/*
+ * NVRAM variable manipulation
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmnvram_h_
+#define _bcmnvram_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+struct nvram_header {
+ uint32 magic;
+ uint32 len;
+ uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ uint32 config_ncdl; /* ncdl values for memc */
+};
+
+struct nvram_tuple {
+ char *name;
+ char *value;
+ struct nvram_tuple *next;
+};
+
+#ifdef BCMDRIVER
+#include <siutils.h>
+
+/*
+ * Initialize NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern int nvram_init(si_t *sih);
+
+extern int nvram_file_read(char **nvramp, int *nvraml);
+
+/*
+ * Append a chunk of nvram variables to the global list
+ */
+extern int nvram_append(void *si, char *vars, uint varsz, uint16 prio);
+
+/*
+ * Check for reset button press for restoring factory defaults.
+ */
+extern int nvram_reset(si_t *sih);
+
+/*
+ * Disable NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern void nvram_exit(si_t *sih);
+
+/*
+ * Get the value of an NVRAM variable. The pointer returned may be
+ * invalid after a set.
+ * @param name name of variable to get
+ * @return value of variable or NULL if undefined
+ */
+extern char * nvram_get(const char *name);
+
+/*
+ * Get the value of an NVRAM variable.
+ * @param name name of variable to get
+ * @return value of variable or NUL if undefined
+ */
+static INLINE char *
+nvram_safe_get(const char *name)
+{
+ char *p = nvram_get(name);
+ return p ? p : "";
+}
+
+/*
+ * Set the value of an NVRAM variable. The name and value strings are
+ * copied into private storage. Pointers to previously set values
+ * may become invalid. The new value may be immediately
+ * retrieved but will not be permanently stored until a commit.
+ * @param name name of variable to set
+ * @param value value of variable
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_set(const char *name, const char *value);
+
+/*
+ * Unset an NVRAM variable. Pointers to previously set values
+ * remain valid until a set.
+ * @param name name of variable to unset
+ * @return 0 on success and errno on failure
+ * NOTE: use nvram_commit to commit this change to flash.
+ */
+extern int nvram_unset(const char *name);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_commit(void);
+
+/*
+ * Get all NVRAM variables (format name=value\0 ... \0\0).
+ * @param buf buffer to store variables
+ * @param count size of buffer in bytes
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_getall(char *nvram_buf, int count);
+
+/*
+ * returns the crc value of the nvram
+ * @param nvh nvram header pointer
+ */
+uint8 nvram_calc_crc(struct nvram_header * nvh);
+
+extern void nvram_printall(void);
+
+#endif /* BCMDRIVER */
+#endif /* _LANGUAGE_ASSEMBLY */
+
+#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
+#define NVRAM_VERSION 1
+#define NVRAM_HEADER_SIZE 20
+/* This definition is for precommit staging, and will be removed */
+#define NVRAM_SPACE 0x8000
+#define MAX_NVRAM_SPACE 0x10000
+#define DEF_NVRAM_SPACE 0x8000
+#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */
+#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */
+
+#define BCM_JUMBO_NVRAM_DELIMIT '\n'
+#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
+#if defined(BCMSDIODEV) || defined(BCMHOSTVARS)
+extern char *_vars;
+extern uint _varsz;
+#endif
+
+#endif /* _bcmnvram_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmpcie.h b/bcmdhd.101.10.361.x/include/bcmpcie.h
new file mode 100755
index 0000000..f464e20
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmpcie.h
@@ -0,0 +1,559 @@
+/*
+ * Broadcom PCIE
+ * Software-specific definitions shared between device and host side
+ * Explains the shared area between host and dongle
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmpcie_h_
+#define _bcmpcie_h_
+
+#include <typedefs.h>
+
+#define ADDR_64(x) (x.addr)
+#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr))
+#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr))
+
+typedef struct {
+ uint32 low_addr;
+ uint32 high_addr;
+} sh_addr_t;
+
+/* May be overridden by 43xxxxx-roml.mk */
+#if !defined(BCMPCIE_MAX_TX_FLOWS)
+#define BCMPCIE_MAX_TX_FLOWS 40
+#endif /* ! BCMPCIE_MAX_TX_FLOWS */
+
+#define PCIE_SHARED_VERSION_9 0x00009
+#define PCIE_SHARED_VERSION_8 0x00008
+#define PCIE_SHARED_VERSION_7 0x00007
+#define PCIE_SHARED_VERSION_6 0x00006 /* rev6 is compatible with rev 5 */
+#define PCIE_SHARED_VERSION_5 0x00005 /* rev6 is compatible with rev 5 */
+/**
+ * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that
+ * is located in device memory.
+ */
+#define PCIE_SHARED_VERSION_MASK 0x000FF
+#define PCIE_SHARED_ASSERT_BUILT 0x00100
+#define PCIE_SHARED_ASSERT 0x00200
+#define PCIE_SHARED_TRAP 0x00400
+#define PCIE_SHARED_IN_BRPT 0x00800
+#define PCIE_SHARED_SET_BRPT 0x01000
+#define PCIE_SHARED_PENDING_BRPT 0x02000
+/* BCMPCIE_SUPPORT_TX_PUSH_RING 0x04000 obsolete */
+#define PCIE_SHARED_EVT_SEQNUM 0x08000
+#define PCIE_SHARED_DMA_INDEX 0x10000
+
+/**
+ * There are host types where a device interrupt can 'race ahead' of data written by the device into
+ * host memory. The dongle can avoid this condition using a variety of techniques (read barrier,
+ * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately
+ * these techniques have drawbacks on router platforms. For these platforms, it was decided to not
+ * avoid the condition, but to detect the condition instead and act on it.
+ * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM
+ */
+#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000
+#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000
+#define PCIE_SHARED_D2H_SYNC_MODE_MASK \
+ (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM)
+#define PCIE_SHARED_IDLE_FLOW_RING 0x80000
+#define PCIE_SHARED_2BYTE_INDICES 0x100000
+
+#define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */
+#define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */
+#define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */
+
+/* dongle supports fatal buf log collection */
+#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000
+
+/* Implicit DMA with corerev 19 and after */
+#define PCIE_SHARED_IDMA 0x400000
+
+/* MSI support */
+#define PCIE_SHARED_D2H_MSI_MULTI_MSG 0x800000
+
+/* IFRM with corerev 19 and after */
+#define PCIE_SHARED_IFRM 0x1000000
+
+/**
+ * From Rev6 and above, suspend/resume can be done using two handshake methods.
+ * 1. Using ctrl post/ctrl cmpl messages (Default rev6)
+ * 2. Using Mailbox data (old method as used in rev5)
+ * This shared flag indicates whether to overide rev6 default method and use mailbox for
+ * suspend/resume.
+ */
+#define PCIE_SHARED_USE_MAILBOX 0x2000000
+
+/* Firmware compiled for mfgbuild purposes */
+#define PCIE_SHARED_MFGBUILD_FW 0x4000000
+
+/* Firmware could use DB0 value as host timestamp */
+#define PCIE_SHARED_TIMESTAMP_DB0 0x8000000
+/* Firmware could use Hostready (IPC rev7) */
+#define PCIE_SHARED_HOSTRDY_SUPPORT 0x10000000
+
+/* When set, Firmwar does not support OOB Device Wake based DS protocol */
+#define PCIE_SHARED_NO_OOB_DW 0x20000000
+
+/* When set, Firmwar supports Inband DS protocol */
+#define PCIE_SHARED_INBAND_DS 0x40000000
+
+/* use DAR registers */
+#define PCIE_SHARED_DAR 0x80000000
+
+/**
+ * Following are the shared2 flags. All bits in flags have been used. A flags2
+ * field got added and the definition for these flags come here:
+ */
+/* WAR: D11 txstatus through unused status field of PCIe completion header */
+#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */
+#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002
+#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */
+#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */
+#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */
+#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 /* Fast Delete ring support */
+#define PCIE_SHARED2_EVTBUF_MAX_MASK 0x000000C0 /* 0:32, 1:64, 2:128, 3: 256 */
+
+/* using flags2 to indicate firmware support added to reuse timesync to update PKT txstatus */
+#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100
+#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */
+#define PCIE_SHARED2_FW_HC_ON_TRAP 0x00000400
+#define PCIE_SHARED2_HSCB 0x00000800 /* Host SCB support */
+
+#define PCIE_SHARED2_EDL_RING 0x00001000 /* Support Enhanced Debug Lane */
+#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */
+#define PCIE_SHARED2_PCIE_ENUM_RESET_FLR 0x00004000 /* BT producer index reset WAR */
+#define PCIE_SHARED2_PKT_TIMESTAMP 0x00008000 /* Timestamp in packet */
+
+#define PCIE_SHARED2_HP2P 0x00010000u /* HP2P feature */
+#define PCIE_SHARED2_HWA 0x00020000u /* HWA feature */
+#define PCIE_SHARED2_TRAP_ON_HOST_DB7 0x00040000u /* can take a trap on DB7 from host */
+
+#define PCIE_SHARED2_DURATION_SCALE 0x00100000u
+#define PCIE_SHARED2_ETD_ADDR_SUPPORT 0x00800000u
+
+#define PCIE_SHARED2_TXCSO 0x00200000u /* Tx Checksum offload support */
+#define PCIE_SHARED2_TXPOST_EXT 0x00400000u /* extended txpost work item support */
+
+#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000
+#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000
+
+#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09
+#define PCIE_SHARED_H2D_MAGIC 0x12345678
+
+typedef uint16 pcie_hwa_db_index_t; /* 16 bit HWA index (IPC Rev 7) */
+#define PCIE_HWA_DB_INDEX_SZ (2u) /* 2 bytes sizeof(pcie_hwa_db_index_t) */
+
+/**
+ * Message rings convey messages between host and device. They are unidirectional, and are located
+ * in host memory.
+ *
+ * This is the minimal set of message rings, known as 'common message rings':
+ */
+#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0
+#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1
+#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2
+#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3
+#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4
+#define BCMPCIE_COMMON_MSGRING_MAX_ID 4
+
+#define BCMPCIE_H2D_COMMON_MSGRINGS 2
+#define BCMPCIE_D2H_COMMON_MSGRINGS 3
+#define BCMPCIE_COMMON_MSGRINGS 5
+
+#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \
+ (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows))
+
+/* different ring types */
+#define BCMPCIE_H2D_RING_TYPE_CTRL_SUBMIT 0x1
+#define BCMPCIE_H2D_RING_TYPE_TXFLOW_RING 0x2
+#define BCMPCIE_H2D_RING_TYPE_RXBUFPOST 0x3
+#define BCMPCIE_H2D_RING_TYPE_TXSUBMIT 0x4
+#define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT 0x5
+#define BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT 0x6
+
+#define BCMPCIE_D2H_RING_TYPE_CTRL_CPL 0x1
+#define BCMPCIE_D2H_RING_TYPE_TX_CPL 0x2
+#define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3
+#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4
+#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5
+#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6
+#define BCMPCIE_D2H_RING_TYPE_EDL 0x7
+#define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL 0x8
+#define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL 0x9
+
+/**
+ * H2D and D2H, WR and RD index, are maintained in the following arrays:
+ * - Array of all H2D WR Indices
+ * - Array of all H2D RD Indices
+ * - Array of all D2H WR Indices
+ * - Array of all D2H RD Indices
+ *
+ * The offset of the WR or RD indexes (for common rings) in these arrays are
+ * listed below. Arrays ARE NOT indexed by a ring's id.
+ *
+ * D2H common rings WR and RD index start from 0, even though their ringids
+ * start from BCMPCIE_H2D_COMMON_MSGRINGS
+ */
+
+#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id)
+
+enum h2dring_idx {
+ /* H2D common rings */
+ BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX =
+ BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT),
+ BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX =
+ BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT),
+
+ /* First TxPost's WR or RD index starts after all H2D common rings */
+ BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START =
+ BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS)
+};
+
+#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \
+ ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+enum d2hring_idx {
+ /* D2H Common Rings */
+ BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX =
+ BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE),
+ BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX =
+ BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE),
+ BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX =
+ BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE)
+};
+
+/**
+ * Macros for managing arrays of RD WR indices:
+ * rw_index_sz:
+ * - in dongle, rw_index_sz is known at compile time
+ * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags
+ *
+ * ring_idx: See h2dring_idx and d2hring_idx
+ */
+
+/** Offset of a RD or WR index in H2D or D2H indices array */
+#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \
+ ((rw_index_sz) * (ring_idx))
+
+/** Fetch the address of RD or WR index in H2D or D2H indices array */
+#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \
+ (void *)((uint32)(indices_array_base) + \
+ BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx)))
+
+/** H2D DMA Indices array size: given max flow rings */
+#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \
+ ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows))
+
+/** D2H DMA Indices array size */
+#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \
+ ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS)
+
+/* Backwards compatibility for legacy branches. */
+#if !defined(PHYS_ADDR_N)
+ #define PHYS_ADDR_N(name) name
+#endif
+
+/**
+ * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used
+ * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated
+ * both in host as well as device memory.
+ */
+typedef struct ring_mem {
+ uint16 idx; /* ring id */
+ uint8 type;
+ uint8 rsvd;
+ uint16 max_item; /* Max number of items in flow ring */
+ uint16 len_items; /* Items are fixed size. Length in bytes of one item */
+ sh_addr_t base_addr; /* 64 bits address, either in host or device memory */
+} ring_mem_t;
+
+/**
+ * Per flow ring, information is maintained in device memory, eg at what address the ringmem and
+ * ringstate are located. The flow ring itself can be instantiated in either host or device memory.
+ *
+ * Perhaps this type should be renamed to make clear that it resides in device memory only.
+ */
+typedef struct ring_info {
+ uint32 PHYS_ADDR_N(ringmem_ptr); /* ring mem location in dongle memory */
+
+ /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not
+ * by a ringid.
+ */
+
+ /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */
+ uint32 PHYS_ADDR_N(h2d_w_idx_ptr); /* Array of all H2D ring's WR indices */
+ uint32 PHYS_ADDR_N(h2d_r_idx_ptr); /* Array of all H2D ring's RD indices */
+ uint32 PHYS_ADDR_N(d2h_w_idx_ptr); /* Array of all D2H ring's WR indices */
+ uint32 PHYS_ADDR_N(d2h_r_idx_ptr); /* Array of all D2H ring's RD indices */
+
+ /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host.
+ * Host may directly fetch WR and RD indices from these host-side arrays.
+ *
+ * 64bit ptr to arrays of WR or RD indices for all rings in host memory.
+ */
+ sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */
+ sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */
+ sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */
+ sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */
+
+ uint16 max_tx_flowrings; /* maximum number of H2D rings: common + flow */
+ uint16 max_submission_queues; /* maximum number of H2D rings: common + flow */
+ uint16 max_completion_rings; /* maximum number of H2D rings: common + flow */
+ uint16 max_vdevs; /* max number of virtual interfaces supported */
+
+ sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */
+
+ /* 32bit ptr to arrays of HWA DB indices for all rings in dongle memory */
+ uint32 PHYS_ADDR_N(h2d_hwa_db_idx_ptr); /* Array of all H2D rings HWA DB indices */
+ uint32 PHYS_ADDR_N(d2h_hwa_db_idx_ptr); /* Array of all D2H rings HWA DB indices */
+
+} ring_info_t;
+
+/**
+ * A structure located in TCM that is shared between host and device, primarily used during
+ * initialization.
+ */
+typedef struct {
+ /** shared area version captured at flags 7:0 */
+ uint32 flags;
+
+ uint32 PHYS_ADDR_N(trap_addr);
+ uint32 PHYS_ADDR_N(assert_exp_addr);
+ uint32 PHYS_ADDR_N(assert_file_addr);
+ uint32 assert_line;
+ uint32 PHYS_ADDR_N(console_addr); /**< Address of hnd_cons_t */
+
+ uint32 PHYS_ADDR_N(msgtrace_addr);
+
+ uint32 fwid;
+
+ /* Used for debug/flow control */
+ uint16 total_lfrag_pkt_cnt;
+ uint16 max_host_rxbufs; /* rsvd in spec */
+
+ uint32 dma_rxoffset; /* rsvd in spec */
+
+ /** these will be used for sleep request/ack, d3 req/ack */
+ uint32 PHYS_ADDR_N(h2d_mb_data_ptr);
+ uint32 PHYS_ADDR_N(d2h_mb_data_ptr);
+
+ /* information pertinent to host IPC/msgbuf channels */
+ /** location in the TCM memory which has the ring_info */
+ uint32 PHYS_ADDR_N(rings_info_ptr);
+
+ /** block of host memory for the scratch buffer */
+ uint32 host_dma_scratch_buffer_len;
+ sh_addr_t host_dma_scratch_buffer;
+
+ /* location in host memory for scb host offload structures */
+ sh_addr_t host_scb_addr;
+ uint32 host_scb_size;
+
+ /* anonymous union for overloading fields in structure */
+ union {
+ uint32 buzz_dbg_ptr; /* BUZZZ state format strings and trace buffer */
+ struct {
+ /* Host provided trap buffer length in words */
+ uint16 device_trap_debug_buffer_len;
+ uint16 rsvd2;
+ };
+ };
+
+ /* rev6 compatible changes */
+ uint32 flags2;
+ uint32 host_cap;
+
+ /* location in the host address space to write trap indication.
+ * At this point for the current rev of the spec, firmware will
+ * support only indications to 32 bit host addresses.
+ * This essentially is device_trap_debug_buffer_addr
+ */
+ sh_addr_t host_trap_addr;
+
+ /* location for host fatal error log buffer start address */
+ uint32 PHYS_ADDR_N(device_fatal_logbuf_start);
+
+ /* location in host memory for offloaded modules */
+ sh_addr_t hoffload_addr;
+ uint32 flags3;
+ uint32 host_cap2;
+ uint32 host_cap3; /* host indicates its txpost ext tag capabilities */
+ uint32 PHYS_ADDR_N(etd_addr);
+
+ /* Device advertises the txpost extended tag capabilities */
+ uint32 device_txpost_ext_tags_bitmask;
+
+} pciedev_shared_t;
+
+/* Device F/W provides the following access function:
+ * pciedev_shared_t *hnd_get_pciedev_shared(void);
+ */
+
+/* host capabilities */
+#define HOSTCAP_PCIEAPI_VERSION_MASK 0x000000FF
+#define HOSTCAP_H2D_VALID_PHASE 0x00000100
+#define HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE 0x00000200
+#define HOSTCAP_H2D_ENABLE_HOSTRDY 0x00000400
+#define HOSTCAP_DB0_TIMESTAMP 0x00000800
+#define HOSTCAP_DS_NO_OOB_DW 0x00001000
+#define HOSTCAP_DS_INBAND_DW 0x00002000
+#define HOSTCAP_H2D_IDMA 0x00004000
+#define HOSTCAP_H2D_IFRM 0x00008000
+#define HOSTCAP_H2D_DAR 0x00010000
+#define HOSTCAP_EXTENDED_TRAP_DATA 0x00020000
+#define HOSTCAP_TXSTATUS_METADATA 0x00040000
+#define HOSTCAP_BT_LOGGING 0x00080000
+#define HOSTCAP_SNAPSHOT_UPLOAD 0x00100000
+#define HOSTCAP_FAST_DELETE_RING 0x00200000
+#define HOSTCAP_PKT_TXSTATUS 0x00400000
+#define HOSTCAP_UR_FW_NO_TRAP 0x00800000 /* Don't trap on UR */
+#define HOSTCAP_TX_CSO 0x01000000
+#define HOSTCAP_HSCB 0x02000000
+/* Host support for extended device trap debug buffer */
+#define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000
+#define HOSTCAP_TXPOST_EXT 0x08000000
+/* Host support for enhanced debug lane */
+#define HOSTCAP_EDL_RING 0x10000000
+#define HOSTCAP_PKT_TIMESTAMP 0x20000000
+#define HOSTCAP_PKT_HP2P 0x40000000
+#define HOSTCAP_HWA 0x80000000
+
+#define HOSTCAP2_DURATION_SCALE_MASK 0x0000003Fu
+
+/* extended trap debug buffer allocation sizes. Note that this buffer can be used for
+ * other trap related purposes also.
+ */
+#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN (64u * 1024u)
+#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN (96u * 1024u)
+#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MAX (256u * 1024u)
+
+/**
+ * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware
+ * support.
+ */
+
+/* H2D mail box Data */
+#define H2D_HOST_D3_INFORM 0x00000001
+#define H2D_HOST_DS_ACK 0x00000002
+#define H2D_HOST_DS_NAK 0x00000004
+#define H2D_HOST_D0_INFORM_IN_USE 0x00000008
+#define H2D_HOST_D0_INFORM 0x00000010
+#define H2DMB_DS_ACTIVE 0x00000020
+#define H2DMB_DS_DEVICE_WAKE 0x00000040
+#define H2D_HOST_IDMA_INITED 0x00000080
+#define H2D_HOST_ACK_NOINT 0x00010000 /* d2h_ack interrupt ignore */
+#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */
+#define H2D_FW_TRAP 0x20000000 /**< h2d force TRAP */
+#define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM
+#define H2DMB_DS_DEVICE_SLEEP_ACK H2D_HOST_DS_ACK
+#define H2DMB_DS_DEVICE_SLEEP_NAK H2D_HOST_DS_NAK
+#define H2DMB_D0_INFORM_IN_USE H2D_HOST_D0_INFORM_IN_USE
+#define H2DMB_D0_INFORM H2D_HOST_D0_INFORM
+#define H2DMB_FW_TRAP H2D_FW_TRAP
+#define H2DMB_HOST_CONS_INT H2D_HOST_CONS_INT
+#define H2DMB_DS_DEVICE_WAKE_ASSERT H2DMB_DS_DEVICE_WAKE
+#define H2DMB_DS_DEVICE_WAKE_DEASSERT H2DMB_DS_ACTIVE
+
+/* D2H mail box Data */
+#define D2H_DEV_D3_ACK 0x00000001
+#define D2H_DEV_DS_ENTER_REQ 0x00000002
+#define D2H_DEV_DS_EXIT_NOTE 0x00000004
+#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008
+#define D2H_DEV_IDMA_INITED 0x00000010
+#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK
+#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ
+#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE
+
+#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
+ D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED)
+#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK))
+
+/* trap data codes */
+#define D2H_DEV_FWHALT 0x10000000
+#define D2H_DEV_EXT_TRAP_DATA 0x20000000
+#define D2H_DEV_TRAP_IN_TRAP 0x40000000
+#define D2H_DEV_TRAP_HOSTDB 0x80000000 /* trap as set by host DB */
+#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000
+/* Indicates trap due to HMAP violation */
+#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000
+/* Indicates whether HMAP violation was Write */
+#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000
+#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000
+#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */
+
+#define D2HMB_FWHALT D2H_DEV_FWHALT
+#define D2HMB_TRAP_IN_TRAP D2H_DEV_TRAP_IN_TRAP
+#define D2HMB_EXT_TRAP_DATA D2H_DEV_EXT_TRAP_DATA
+#define D2H_FWTRAP_MAC_SSSR_RDY 0x00010000u /* MAC SSSR prepped */
+
+/* Size of Extended Trap data Buffer */
+#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096
+
+/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */
+#define PREVTXP(i, d) (((i) == 0) ? ((d) - 1) : ((i) - 1))
+#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1))
+#define NEXTNTXP(i, n, d) ((((i)+(n)) >= (d)) ? 0 : ((i)+(n)))
+#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
+#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1)
+
+/* Function can be used to notify host of FW halt */
+#define READ_AVAIL_SPACE(w, r, d) ((w >= r) ? (uint32)(w - r) : (uint32)(d - r))
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d) ((r) > (w)) ? \
+ (uint32)((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? \
+ (uint32)((d) - (w) - 1) : (uint32)((d) - (w))
+
+#define CHECK_NOWRITE_SPACE(r, w, d) \
+ (((uint32)(r) == (uint32)((w) + 1)) || (((r) == 0) && ((w) == ((d) - 1))))
+
+/* These should be moved into pciedev.h --- */
+#define WRT_PEND(x) ((x)->wr_pending)
+#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */
+#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a))
+
+#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) /**< advanced by consumer */
+#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a))
+
+#define MODULO_RING_IDX(x, y) ((x) % (y)->bitmap_size)
+
+#define RING_READ_PTR(x) ((x)->ringstate->r_offset)
+#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset)
+#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr)
+#define RING_MAX_ITEM(x) ((x)->ringmem->max_item)
+#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items)
+#define HOST_RING_BASE(x) ((x)->dma_buf.va)
+#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \
+ ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
+
+/* Trap types copied in the pciedev_shared.trap_addr */
+#define FW_INITIATED_TRAP_TYPE (0x1 << 7)
+#define HEALTHCHECK_NODS_TRAP_TYPE (0x1 << 6)
+
+/* Device supported txpost extended tag capabilities */
+#define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_RSVD (1u << 0u) /* Reserved */
+#define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_CSO (1u << 1u) /* CSO */
+#define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_MESH (1u << 2u) /* MESH */
+
+#define RING_MESH(x) (((x)->txpost_ext_cap_flags) & PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_MESH)
+
+#endif /* _bcmpcie_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmpcispi.h b/bcmdhd.101.10.361.x/include/bcmpcispi.h
new file mode 100755
index 0000000..bd04557
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmpcispi.h
@@ -0,0 +1,204 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _BCM_PCI_SPI_H
+#define _BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/*
++---------------------------------------------------------------------------+
+| |
+| 7 6 5 4 3 2 1 0 |
+| 0x0000 SPI_CTRL SPIE SPE 0 MSTR CPOL CPHA SPR1 SPR0 |
+| 0x0004 SPI_STAT SPIF WCOL ST1 ST0 WFFUL WFEMP RFFUL RFEMP |
+| 0x0008 SPI_DATA Bits 31:0, data to send out on MOSI |
+| 0x000C SPI_EXT ICNT1 ICNT0 BSWAP *HSMODE ESPR1 ESPR0 |
+| 0x0020 GPIO_OE 0=input, 1=output PWR_OE CS_OE |
+| 0x0024 GPIO_DATA CARD:1=missing, 0=present CARD PWR_DAT CS_DAT |
+| 0x0040 INT_EDGE 0=level, 1=edge DEV_E SPI_E |
+| 0x0044 INT_POL 1=active high, 0=active low DEV_P SPI_P |
+| 0x0048 INTMASK DEV SPI |
+| 0x004C INTSTATUS DEV SPI |
+| 0x0060 HEXDISP Reset value: 0x14e443f5. In hexdisp mode, value |
+| shows on the Raggedstone1 4-digit 7-segment display. |
+| 0x0064 CURRENT_MA Low 16 bits indicate card current consumption in mA |
+| 0x006C DISP_SEL Display mode (0=hexdisp, 1=current) DSP |
+| 0x00C0 PLL_CTL bit31=ext_clk, remainder unused. |
+| 0x00C4 PLL_STAT LOCK |
+| 0x00C8 CLK_FREQ |
+| 0x00CC CLK_CNT |
+| |
+| *Notes: HSMODE is not implemented, never set this bit! |
+| BSWAP is available in rev >= 8 |
+| |
++---------------------------------------------------------------------------+
+*/
+
+typedef volatile struct {
+ uint32 spih_ctrl; /* 0x00 SPI Control Register */
+ uint32 spih_stat; /* 0x04 SPI Status Register */
+ uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */
+ uint32 spih_ext; /* 0x0C SPI Extension Register */
+ uint32 PAD[4]; /* 0x10-0x1F PADDING */
+
+ uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */
+ uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */
+ uint32 PAD[6]; /* 0x28-0x3F PADDING */
+
+ uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+ uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+ /* 1=Active High) */
+ uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */
+ uint32 spih_int_status; /* 0x4C SPI Interrupt Status */
+ uint32 PAD[4]; /* 0x50-0x5F PADDING */
+
+ uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */
+ uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */
+ uint32 PAD[1]; /* 0x68 PADDING */
+ uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */
+ uint32 PAD[4]; /* 0x70-0x7F PADDING */
+ uint32 PAD[8]; /* 0x80-0x9F PADDING */
+ uint32 PAD[8]; /* 0xA0-0xBF PADDING */
+ uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */
+ uint32 spih_pll_status; /* 0xC4 PLL Status Register */
+ uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */
+ uint32 spih_clk_count; /* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+ uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */
+ uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */
+
+ uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */
+ uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */
+ uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */
+ uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */
+ uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */
+ uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */
+ uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */
+ uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */
+ uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */
+ uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */
+ uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */
+ uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */
+ uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */
+ uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */
+ uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */
+ uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */
+ uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */
+ uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */
+ uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */
+ uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */
+ uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */
+ uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */
+ uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */
+ uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */
+ uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */
+ uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */
+
+ uint32 PAD[5]; /* 0x16C-0x17F PADDING */
+
+ uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */
+ uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */
+ uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */
+ uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+ uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+ uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */
+ uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */
+ uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+ uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+ uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */
+ uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+ uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+ uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+ uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */
+ uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+ uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+ uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+ uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */
+ uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+ uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+ uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+ uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+ uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */
+ uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */
+ uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */
+ uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+ uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */
+ uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */
+ uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */
+#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */
+#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */
+#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND 0xf4240 /* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/bcmdhd.101.10.361.x/include/bcmperf.h b/bcmdhd.101.10.361.x/include/bcmperf.h
new file mode 100755
index 0000000..fd37d27
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmperf.h
@@ -0,0 +1,33 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define BCMPERF_GETICACHE_MISS(x) ((x) = 0)
+#define BCMPERF_GETICACHE_HIT(x) ((x) = 0)
+#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmproto.h b/bcmdhd.101.10.361.x/include/bcmproto.h
new file mode 100755
index 0000000..2770caf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmproto.h
@@ -0,0 +1,275 @@
+/*
+ * Fundamental constants relating to IP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmproto_h_
+#define _bcmproto_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#include "eapol.h"
+#include "802.3.h"
+#include "vlan.h"
+#include "bcmtcp.h"
+/* copy from igsc.h */
+#define IGMP_HLEN 8
+
+enum frame_l2_hdr {
+FRAME_L2_SNAP_H = 1,
+FRAME_L2_SNAPVLAN_H,
+FRAME_L2_ETH_H,
+FRAME_L2_ETHVLAN_H,
+FRAME_L2_ERROR,
+};
+
+enum frame_l3_hdr {
+FRAME_L3_IP_H = 4,
+FRAME_L3_IP6_H = 6,
+FRAME_L3_ARP_H,
+FRAME_L3_8021X_EAPOLKEY_H,
+FRAME_L3_ERROR,
+};
+
+enum frame_l4_hdr {
+FRAME_L4_ICMP_H = 1,
+FRAME_L4_IGMP_H = 2,
+FRAME_L4_TCP_H = 6,
+FRAME_L4_UDP_H = 17,
+FRAME_L4_ICMP6_H = 58,
+FRAME_L4_ERROR,
+};
+
+typedef struct {
+ uint8 *l2;
+ uint8 l2_t;
+ uint16 l2_len;
+ uint8 *l3;
+ uint8 l3_t;
+ uint16 l3_len;
+ uint8 *l4;
+ uint8 l4_t;
+ uint16 l4_len;
+} frame_proto_t;
+
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+/* Generic header parser function */
+static INLINE int
+hnd_frame_proto(uint8 *p, int plen, frame_proto_t *fp)
+{
+ struct dot3_mac_llc_snap_header *sh = (struct dot3_mac_llc_snap_header *)p;
+ struct dot3_mac_llc_snapvlan_header *svh = (struct dot3_mac_llc_snapvlan_header *)p;
+ struct ether_header *eh = (struct ether_header *)p;
+ struct ethervlan_header *evh = (struct ethervlan_header *)p;
+ uint16 type;
+ uint16 len;
+
+ if (p == NULL || plen <= 0) {
+ return BCME_ERROR;
+ }
+
+ if (plen < (int)sizeof(*eh)) {
+ return BCME_BUFTOOSHORT;
+ }
+ type = ntoh16(eh->ether_type);
+
+ bzero(fp, sizeof(frame_proto_t));
+
+ /* L2 header/pointer check */
+ fp->l2 = p;
+ fp->l2_len = (uint16)plen;
+ if (type < ETHER_TYPE_MIN) {
+ if (plen < (int)sizeof(*sh)) {
+ return BCME_BUFTOOSHORT;
+ }
+ if (bcmp(&sh->dsap, llc_snap_hdr, SNAP_HDR_LEN) == 0) {
+ type = ntoh16(sh->type);
+ if (type == ETHER_TYPE_8021Q) {
+ fp->l2_t = FRAME_L2_SNAPVLAN_H;
+ p += sizeof(struct dot3_mac_llc_snap_header);
+ if ((plen -= sizeof(struct dot3_mac_llc_snap_header)) <= 0) {
+ return BCME_ERROR;
+ }
+ }
+ else {
+ fp->l2_t = FRAME_L2_SNAP_H;
+ type = ntoh16(svh->ether_type);
+ p += sizeof(struct dot3_mac_llc_snapvlan_header);
+ if ((plen -= sizeof(struct dot3_mac_llc_snapvlan_header)) <= 0) {
+ return BCME_ERROR;
+ }
+ }
+ }
+ else {
+ return BCME_ERROR;
+ }
+ }
+ else {
+ if (type == ETHER_TYPE_8021Q) {
+ fp->l2_t = FRAME_L2_ETHVLAN_H;
+ type = ntoh16(evh->ether_type);
+ p += ETHERVLAN_HDR_LEN;
+ if ((plen -= ETHERVLAN_HDR_LEN) <= 0) {
+ return BCME_ERROR;
+ }
+ }
+ else {
+ fp->l2_t = FRAME_L2_ETH_H;
+ p += ETHER_HDR_LEN;
+ if ((plen -= ETHER_HDR_LEN) <= 0) {
+ return BCME_ERROR;
+ }
+ }
+ }
+ /* L3 header/pointer check */
+ fp->l3 = p;
+ fp->l3_len = (uint16)plen;
+ switch (type) {
+ case ETHER_TYPE_ARP: {
+ if ((plen -= ARP_DATA_LEN) < 0) {
+ return BCME_ERROR;
+ }
+
+ fp->l3_t = FRAME_L3_ARP_H;
+ /* no layer 4 protocol, return */
+ return BCME_OK;
+ break;
+ }
+ case ETHER_TYPE_IP: {
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)p;
+ len = IPV4_HLEN(iph);
+
+ if ((plen -= len) <= 0) {
+ return BCME_ERROR;
+ }
+
+ if (IP_VER(iph) == IP_VER_4 && len >= IPV4_MIN_HEADER_LEN) {
+ fp->l3_t = FRAME_L3_IP_H;
+ type = IPV4_PROT(iph);
+ p += len;
+ }
+ else {
+ /* not a valid ipv4 packet */
+ return BCME_ERROR;
+ }
+ break;
+ }
+ case ETHER_TYPE_IPV6: {
+ struct ipv6_hdr *ip6h = (struct ipv6_hdr *)p;
+
+ if ((plen -= IPV6_MIN_HLEN) <= 0) {
+ return BCME_ERROR;
+ }
+
+ if (IP_VER(ip6h) == IP_VER_6) {
+ fp->l3_t = FRAME_L3_IP6_H;
+ type = IPV6_PROT(ip6h);
+ p += IPV6_MIN_HLEN;
+ if (IPV6_EXTHDR(type)) {
+ uint8 proto = 0;
+ int32 exth_len = ipv6_exthdr_len_check(p, plen, &proto);
+ if (exth_len < 0 || ((plen -= exth_len) <= 0))
+ return BCME_ERROR;
+ type = proto;
+ p += exth_len;
+ }
+ }
+ else {
+ /* not a valid ipv6 packet */
+ return BCME_ERROR;
+ }
+ break;
+ }
+ case ETHER_TYPE_802_1X: {
+ eapol_hdr_t *eapolh = (eapol_hdr_t *)p;
+
+ if ((plen -= EAPOL_HDR_LEN) <= 0) {
+ return BCME_ERROR;
+ }
+
+ if (eapolh->type == EAPOL_KEY) {
+ fp->l3_t = FRAME_L3_8021X_EAPOLKEY_H;
+ return BCME_OK;
+ }
+ else {
+ /* not a valid ipv6 packet */
+ return BCME_ERROR;
+ }
+
+ break;
+ }
+ default:
+ /* not interesting case */
+ return BCME_ERROR;
+ break;
+ }
+
+ /* L4 header/pointer check */
+ fp->l4 = p;
+ fp->l4_len = (uint16)plen;
+ switch (type) {
+ case IP_PROT_ICMP:
+ fp->l4_t = FRAME_L4_ICMP_H;
+ if ((plen -= sizeof(struct bcmicmp_hdr)) < 0) {
+ return BCME_ERROR;
+ }
+ break;
+ case IP_PROT_IGMP:
+ fp->l4_t = FRAME_L4_IGMP_H;
+ if ((plen -= IGMP_HLEN) < 0) {
+ return BCME_ERROR;
+ }
+ break;
+ case IP_PROT_TCP:
+ fp->l4_t = FRAME_L4_TCP_H;
+ if ((plen -= sizeof(struct bcmtcp_hdr)) < 0) {
+ return BCME_ERROR;
+ }
+ break;
+ case IP_PROT_UDP:
+ fp->l4_t = FRAME_L4_UDP_H;
+ if ((plen -= sizeof(struct bcmudp_hdr)) < 0) {
+ return BCME_ERROR;
+ }
+ break;
+ case IP_PROT_ICMP6:
+ fp->l4_t = FRAME_L4_ICMP6_H;
+ if ((plen -= sizeof(struct icmp6_hdr)) < 0) {
+ return BCME_ERROR;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return BCME_OK;
+}
+
+#define SNAP_HDR_LEN 6 /* 802.3 LLC/SNAP header length */
+
+#define FRAME_DROP 0
+#define FRAME_NOP 1
+#define FRAME_TAKEN 2
+
+#endif /* _bcmproto_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmrand.h b/bcmdhd.101.10.361.x/include/bcmrand.h
new file mode 100755
index 0000000..3bebca6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmrand.h
@@ -0,0 +1,65 @@
+/*
+ * bcmrand.h.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmrand_h_
+#define _bcmrand_h_
+
+/* When HOST driver is for PCIE dongle image, we suppose the HOST must provide the entropy
+ * input if it does not define the macro BCM_RNG_NO_HOST_ENTROPY
+ */
+#if defined(BCMPCIEDEV) && !defined(BCMFUZZ)
+#if !defined(BCM_RNG_HOST_ENTROPY) && !defined(BCM_RNG_NO_HOST_ENTROPY)
+#define BCM_RNG_HOST_ENTROPY
+#define BCM_RNG_PCIEDEV_DEFAULT
+#endif /* !BCM_RNG_HOST_ENTROPY && !BCM_RNG_NO_HOST_ENTROPY */
+#endif /* BCMPCIEDEV */
+
+/* the format of current TCM layout during boot
+ *
+ * Code Unused memory Random numbers Random number Magic number NVRAM NVRAM
+ * byte Count 0xFEEDC0DE Size
+ * |<-----Variable---->|<---Variable--->|<-----4 bytes-->|<---4 bytes---->|<---V--->|<--4B--->|
+ * |<------------- BCM_ENTROPY_HOST_MAXSIZE --------->|
+ */
+
+/* The HOST need to provided 64 bytes (512 bits) entropy for the bcm SW RNG */
+#define BCM_ENTROPY_MAGIC_SIZE 4u
+#define BCM_ENTROPY_COUNT_SIZE 4u
+#define BCM_ENTROPY_SEED_NBYTES 64u
+#define BCM_ENTROPY_NONCE_NBYTES 16u
+#define BCM_ENTROPY_HOST_NBYTES 128u
+
+#ifdef DBG_RNG_SEC_TEST
+#define BCM_ENTROPY_MAX_NBYTES 128u
+#else
+#define BCM_ENTROPY_MAX_NBYTES 512u
+#endif /* DBG_RNG_SEC_TEST */
+#define BCM_ENTROPY_HOST_MAXSIZE \
+ (BCM_ENTROPY_MAGIC_SIZE + BCM_ENTROPY_COUNT_SIZE + BCM_ENTROPY_MAX_NBYTES)
+
+/* Constant for calculate the location of host entropy input */
+#define BCM_NVRAM_OFFSET_TCM 4u
+#define BCM_NVRAM_IMG_COMPRS_FACTOR 4u
+#define BCM_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu
+
+#endif /* _bcmrand_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmsdbus.h b/bcmdhd.101.10.361.x/include/bcmsdbus.h
new file mode 100755
index 0000000..d2d89e8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsdbus.h
@@ -0,0 +1,187 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _sdio_api_h_
+#define _sdio_api_h_
+
+#if defined (BT_OVER_SDIO)
+#include <linux/mmc/sdio_func.h>
+#endif /* defined (BT_OVER_SDIO) */
+
+/*
+ * The following were:
+ * incorrectly in bcmsdio.h
+ * incorrectly named using SDIOH which indicates BRCM SDIO FPGA host controller
+ */
+
+#define SDIOH_API_RC_SUCCESS (0x00)
+#define SDIOH_API_RC_FAIL (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
+
+#define SDIOH_DATA_PIO 0 /* PIO mode */
+#define SDIOH_DATA_DMA 1 /* DMA mode */
+
+/* Max number of glommed pkts */
+#ifdef CUSTOM_MAX_TXGLOM_SIZE
+#define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE
+#else
+#define SDPCM_MAXGLOM_SIZE 36
+#endif /* CUSTOM_MAX_TXGLOM_SIZE */
+
+#define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */
+#define SDPCM_TXGLOM_MDESC 1 /* SDIO 3.0 should use multi-desc mode */
+
+#ifdef CUSTOM_DEF_TXGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE CUSTOM_DEF_TXGLOM_SIZE
+#else
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif /* CUSTOM_DEF_TXGLOM_SIZE */
+
+#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE
+#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
+#undef SDPCM_DEFGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif
+
+#ifdef PKT_STATICS
+typedef struct pkt_statics {
+ uint16 event_count;
+ uint32 event_size;
+ uint16 ctrl_count;
+ uint32 ctrl_size;
+ uint32 data_count;
+ uint32 data_size;
+ uint32 glom_cnt[SDPCM_MAXGLOM_SIZE];
+ uint16 glom_max;
+ uint16 glom_count;
+ uint32 glom_size;
+ uint16 test_count;
+ uint32 test_size;
+ uint32 glom_cnt_us[SDPCM_MAXGLOM_SIZE];
+} pkt_statics_t;
+#endif
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+#if defined (BT_OVER_SDIO)
+extern
+void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func);
+#endif /* defined (BT_OVER_SDIO) */
+
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+ uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+ uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+ void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+extern SDIOH_API_RC sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, uint len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+#ifdef BCMSPI
+/* Function to pass gSPI specific device-status bits to dhd. */
+extern uint32 sdioh_get_dstatus(sdioh_info_t *si);
+
+/* chipid and chiprev info for lower layers to control sw WAR's for hw bugs. */
+extern void sdioh_chipinfo(sdioh_info_t *si, uint32 chip, uint32 chiprev);
+extern void sdioh_dwordmode(sdioh_info_t *si, bool set);
+#endif /* BCMSPI */
+
+#if defined(BCMSDIOH_STD)
+ /*
+ * Only STD host supports cmd14 sleep.
+ * Using define instead of empty stubs for other hosts for now.
+ */
+ #define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode);
+#ifdef PKT_STATICS
+extern uint32 sdioh_get_spend_time(sdioh_info_t *sd);
+#endif
+
+#endif /* _sdio_api_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmsdh.h b/bcmdhd.101.10.361.x/include/bcmsdh.h
new file mode 100755
index 0000000..81c3438
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsdh.h
@@ -0,0 +1,290 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ * export functions to client drivers
+ * abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef _bcmsdh_h_
+#define _bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL 0x0001 /* Error */
+#define BCMSDH_INFO_VAL 0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#ifdef BCMDBG
+#define BCMSDH_ERROR(x) do { if (bcmsdh_msglevel & BCMSDH_ERROR_VAL) printf x; } while (0)
+#define BCMSDH_INFO(x) do { if (bcmsdh_msglevel & BCMSDH_INFO_VAL) printf x; } while (0)
+#else /* BCMDBG */
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+#endif /* BCMDBG */
+
+#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+#if defined(NDIS) && (NDISVER >= 0x0630) && defined(BCMDONGLEHOST)
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl,
+ void **regsva, uint irq, shared_info_t *sh);
+#else
+
+#if defined(BT_OVER_SDIO)
+typedef enum {
+ NO_HANG_STATE = 0,
+ HANG_START_STATE = 1,
+ HANG_RECOVERY_STATE = 2
+} dhd_hang_state_t;
+#endif
+
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+ bool init_success; /* underlying driver successfully attached */
+ void *sdioh; /* handler for sdioh */
+ uint32 vendevid; /* Target Vendor and Device ID on SD bus */
+ osl_t *osh;
+ bool regfail; /* Save status of last reg_read/reg_write call */
+ uint32 sbwad; /* Save backplane window address */
+ void *os_cxt; /* Pointer to per-OS private data */
+ bool force_sbwad_calc; /* forces calculation of sbwad instead of using cached value */
+#ifdef DHD_WAKE_STATUS
+ unsigned int total_wake_count;
+ int pkt_wake;
+#endif /* DHD_WAKE_STATUS */
+};
+#endif /* defined(NDIS) && (NDISVER >= 0x0630) && defined(BCMDONGLEHOST) */
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG) || defined(BCMDBG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ * fn: function whose CIS is being requested (0 is common CIS)
+ * cis: pointer to memory location to place results
+ * length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+extern int bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint offset);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uintptr addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+/* Replace this with status pointers in reg_read/write */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len);
+extern void bcmsdh_glom_clear(void *sdh);
+extern uint bcmsdh_set_mode(void *sdh, uint mode);
+extern bool bcmsdh_glom_enabled(void);
+#ifdef PKT_STATICS
+extern uint32 bcmsdh_get_spend_time(void *sdh) ;
+#endif
+/* Flags bits */
+#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING 1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Bogosity alert. This should only know about devids gleaned through
+ * the standard CIS (versus some client dependent method), and we already
+ * have an interface for the CIS.
+ * Remove me.
+ */
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, uint plen, void *arg, uint len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+/* callback functions */
+typedef struct {
+ /* probe the device */
+ void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+ uint16 func, uint bustype, void * regsva, osl_t * osh,
+ void * param);
+ /* remove the device */
+ void (*remove)(void *context);
+ /* can we suspend now */
+ int (*suspend)(void *context);
+ /* resume from suspend */
+ int (*resume)(void *context);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+ void* oob_irq_handler_context);
+extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
+extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
+extern int bcmsdh_get_oob_intr_num(bcmsdh_info_t *bcmsdh);
+#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
+extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
+extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh);
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh);
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* function to force sbwad calculation instead of using cached value */
+extern void bcmsdh_force_sbwad_calc(void *sdh, bool force);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+#ifdef BCMSPI
+extern void bcmsdh_dwordmode(void *sdh, bool set);
+#endif /* BCMSPI */
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif /* _bcmsdh_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h b/bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h
new file mode 100755
index 0000000..d9a67e0
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsdh_sdmmc.h
@@ -0,0 +1,142 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#ifdef BCMDBG
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#define sd_cost(x) do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0)
+#else
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_cost(x) do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0)
+#endif
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+#ifdef BCMPERFSTATS
+#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0)
+#else
+#define sd_log(x)
+#endif
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4 2
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+#define SDIOH_SDMMC_MAX_SG_ENTRIES 64
+
+struct sdioh_info {
+ osl_t *osh; /* osh handler */
+ void *bcmsdh; /* upper layer handle */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ uint16 intmask; /* Current active interrupts */
+
+ int intrcount; /* Client interrupts */
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ bool use_rxchain;
+ struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+ struct sdio_func fake_func0;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+ uint sd_clk_rate;
+ uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#ifdef PKT_STATICS
+ uint32 sdio_spent_time_us;
+#endif
+#if !defined(OOB_INTR_ONLY)
+ struct mutex claim_host_mutex; // terence 20140926: fix for claim host issue
+#endif
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+ sdioh_info_t *sd;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+#endif
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/bcmdhd.101.10.361.x/include/bcmsdpcm.h b/bcmdhd.101.10.361.x/include/bcmsdpcm.h
new file mode 100755
index 0000000..3e01299
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsdpcm.h
@@ -0,0 +1,304 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmsdpcm_h_
+#define _bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+
+#ifdef DS_PROT
+/* Bit msgs for custom deep sleep protocol */
+#define SMB_DATA_D3INFORM 0x100 /* host announcing D3 entry */
+#define SMB_DATA_DSACK 0x200 /* host acking a deepsleep request */
+#define SMB_DATA_DSNACK 0x400 /* host nacking a deepsleep request */
+#endif /* DS_PROT */
+/* force a trap */
+#define SMB_DATA_TRAP 0x800 /* host forcing trap */
+
+#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_INT_ACK I_HMB_SW0 /* To Host Mailbox Dev Interrupt ACK */
+#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL (I_HMB_INT_ACK | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_INT_ACK (1 << 0) /* To Host Mailbox Dev Interrupt ACK */
+#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */
+#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */
+
+#ifdef DS_PROT
+/* Bit msgs for custom deep sleep protocol */
+#define HMB_DATA_DSREQ 0x100 /* firmware requesting deepsleep entry */
+#define HMB_DATA_DSEXIT 0x200 /* firmware announcing deepsleep exit */
+#define HMB_DATA_D3ACK 0x400 /* firmware acking a D3 notice from host */
+#define HMB_DATA_D3EXIT 0x800 /* firmware announcing D3 exit */
+#define HMB_DATA_DSPROT_MASK 0xf00
+#endif /* DS_PROT */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+/* Replace all this with packed struct */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET 6 /* Version # */
+#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET 7 /* Spare */
+#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL 15
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0 0x01
+#define SDPCM_FLAG_RESVD1 0x02
+#define SDPCM_FLAG_GSPI_TXENAB 0x04 /* GSPI Tx enable (PR55150 only) */
+#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2);
+ * Semantics of Ext byte depend on command.
+ * Len is current or requested frame length, not
+ * including test header; sent little-endian.
+ */
+#define SDPCM_TEST_PKT_CNT_FLD_LEN 4 /* Packet count filed legth */
+#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count
+ * (Backward compatabilty) Set frame count in a
+ * 4 byte filed adjacent to the HDR
+ */
+#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off
+ * Set frame count in a 4 byte filed adjacent to
+ * the HDR
+ */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+ uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+ uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+ uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */
+ uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */
+ uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */
+ uint32 rxdescuflo; /* receive descriptor underflows */
+ uint32 rxfifooflo; /* receive fifo overflows */
+ uint32 txfifouflo; /* transmit fifo underflows */
+ uint32 runt; /* runt (too short) frames recv'd from bus */
+ uint32 badlen; /* frame's rxh len does not match its hw tag len */
+ uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */
+ uint32 seqbreak; /* break in sequence # space from one rx frame to the next */
+ uint32 rxfcrc; /* frame rx header indicates crc error */
+ uint32 rxfwoos; /* frame rx header indicates write out of sync */
+ uint32 rxfwft; /* frame rx header indicates write frame termination */
+ uint32 rxfabort; /* frame rx header indicates frame aborted */
+ uint32 woosint; /* write out of sync interrupt */
+ uint32 roosint; /* read out of sync interrupt */
+ uint32 rftermint; /* read frame terminate interrupt */
+ uint32 wftermint; /* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val) ((var) == (val))
+#define SDIODREV_GE(var, val) ((var) >= (val))
+#define SDIODREV_GT(var, val) ((var) > (val))
+#define SDIODREV_LT(var, val) ((var) < (val))
+#define SDIODREV_LE(var, val) ((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+ (SDIODREV_LT((h)->corerev, 1) ? \
+ SDIODDMAREG32((h), (dir), (chnl)) : \
+ SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODDMAREG(h, dir, chnl) : \
+ PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+ (SDIODREV_LT((corerev), 1) ? \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODFIFOREG(h, corerev) : \
+ PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0001
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+#define SDPCM_SHARED_IN_BRPT 0x0800
+#define SDPCM_SHARED_SET_BRPT 0x1000
+#define SDPCM_SHARED_PENDING_BRPT 0x2000
+#define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000
+#define SDPCM_SHARED_RXLIM_POST 0x4000
+#define SDPCM_SHARED_TXSEQ_SYNC 0x4000
+
+typedef struct {
+ uint32 flags;
+ uint32 trap_addr;
+ uint32 assert_exp_addr;
+ uint32 assert_file_addr;
+ uint32 assert_line;
+ uint32 console_addr; /* Address of hnd_cons_t */
+ uint32 msgtrace_addr;
+ uint32 fwid;
+ uint32 device_fatal_logbuf_start;
+#ifdef BCMSDIO_TXSEQ_SYNC
+ uint32 txseq_sync_addr;
+#endif /* BCMSDIO_TXSEQ_SYNC */
+} sdpcm_shared_t;
+
+/* Device F/W provides the following access function:
+ * sdpcm_shared_t *hnd_get_sdpcm_shared(void);
+ */
+
+#endif /* _bcmsdpcm_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmspi.h b/bcmdhd.101.10.361.x/include/bcmspi.h
new file mode 100755
index 0000000..bfdbab2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmspi.h
@@ -0,0 +1,37 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _BCM_SPI_H
+#define _BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/bcmdhd.101.10.361.x/include/bcmspibrcm.h b/bcmdhd.101.10.361.x/include/bcmspibrcm.h
new file mode 100755
index 0000000..298edda
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmspibrcm.h
@@ -0,0 +1,165 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _BCM_SPI_BRCM_H
+#define _BCM_SPI_BRCM_H
+
+#ifndef SPI_MAX_IOFUNCS
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+#endif
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#if defined(BCMDBG) || defined(DHD_DEBUG)
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+#ifdef BCMPERFSTATS
+#define sd_log(x) do { if (sd_msglevel & SDH_LOG_VAL) bcmlog x; } while (0)
+#else
+#define sd_log(x)
+#endif
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_F1 64
+#define BLOCK_SIZE_F2 2048
+#define BLOCK_SIZE_F3 2048
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+#define ERROR_UF 2
+#define ERROR_OF 3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+#ifndef BCMSPI_ANDROID
+ void *bar0; /* BAR0 for PCI Device */
+#endif /* !BCMSPI_ANDROID */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+ uint lockcount; /* nest count of spi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 card_dstatus; /* 32bit device status */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SPI_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ uint32 wordlen; /* host processor 16/32bits */
+ uint32 prev_fun;
+ uint32 chip;
+ uint32 chiprev;
+ bool resp_delay_all;
+ bool dwordmode;
+ bool resp_delay_new;
+
+ struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S 31
+#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S 30
+#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S 28
+#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S 11
+#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */
+#define SPI_LEN_S 0
+
+#endif /* _BCM_SPI_BRCM_H */
diff --git a/bcmdhd.101.10.361.x/include/bcmsrom.h b/bcmdhd.101.10.361.x/include/bcmsrom.h
new file mode 100755
index 0000000..f3008b2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsrom.h
@@ -0,0 +1,72 @@
+/*
+ * Misc useful routines to access NIC local SROM/OTP .
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmsrom_h_
+#define _bcmsrom_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
+
+#include <bcmsrom_fmt.h>
+
+typedef struct srom_info {
+ char *_srom_vars;
+ bool is_caldata_prsnt;
+} srom_info_t;
+
+/* Prototypes */
+extern int srom_var_init(si_t *sih, uint bus, volatile void *curmap, osl_t *osh,
+ char **vars, uint *count);
+extern void srom_var_deinit(si_t *sih);
+
+extern int srom_read(si_t *sih, uint bus, volatile void *curmap, osl_t *osh,
+ uint byteoff, uint nbytes, uint16 *buf,
+ bool check_crc);
+
+extern int srom_write(si_t *sih, uint bus, volatile void *curmap, osl_t *osh,
+ uint byteoff, uint nbytes, uint16 *buf);
+
+extern int srom_write_short(si_t *sih, uint bustype, volatile void *curmap, osl_t *osh,
+ uint byteoff, uint16 value);
+extern int srom_otp_cisrwvar(si_t *sih, osl_t *osh, char *vars, int *count);
+extern int srom_otp_write_region_crc(si_t *sih, uint nbytes, uint16* buf16, bool write);
+
+/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
+ * and extract from it into name=value pairs
+ */
+extern int srom_parsecis(si_t *sih, osl_t *osh, uint8 **pcis, uint ciscnt,
+ char **vars, uint *count);
+extern int _initvars_srom_pci_caldata(si_t *sih, uint16 *srom, uint32 sromrev);
+extern void srom_set_sromvars(char *vars);
+extern char * srom_get_sromvars(void);
+extern srom_info_t * srom_info_init(osl_t *osh);
+extern int get_srom_pci_caldata_size(uint32 sromrev);
+extern uint32 get_srom_size(uint32 sromrev);
+
+/* Return sprom size in 16-bit words */
+extern uint srom_size(si_t *sih, osl_t *osh);
+
+extern bool srom_caldata_prsnt(si_t *sih);
+extern int srom_get_caldata(si_t *sih, uint16 *srom);
+#endif /* _bcmsrom_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmsrom_fmt.h b/bcmdhd.101.10.361.x/include/bcmsrom_fmt.h
new file mode 100755
index 0000000..97e3e4d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsrom_fmt.h
@@ -0,0 +1,1028 @@
+/*
+ * SROM format definition.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmsrom_fmt_h_
+#define _bcmsrom_fmt_h_
+
+#define SROM_MAXREV 18 /* max revision supported by driver */
+
+/* Maximum srom: 16 Kilobits == 2048 bytes */
+
+#define SROM_MAX 2048
+#define SROM_MAXW 1024
+
+#ifdef LARGE_NVRAM_MAXSZ
+#define VARS_MAX LARGE_NVRAM_MAXSZ
+#else
+#if defined(BCMROMBUILD) || defined(DONGLEBUILD)
+#define VARS_MAX 4096
+#else
+#define LARGE_NVRAM_MAXSZ 8192
+#define VARS_MAX LARGE_NVRAM_MAXSZ
+#endif /* BCMROMBUILD || DONGLEBUILD */
+#endif /* LARGE_NVRAM_MAXSZ */
+
+/* PCI fields */
+#define PCI_F0DEVID 48
+
+/* SROM Rev 2: 1 Kilobit map for 11a/b/g devices.
+ * SROM Rev 3: Upward compatible modification for lpphy and PCIe
+ * hardware workaround.
+ */
+
+#define SROM_WORDS 64
+#define SROM_SIGN_MINWORDS 128
+#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */
+
+#define SROM_SSID 2
+#define SROM_SVID 3
+
+#define SROM_WL1LHMAXP 29
+
+#define SROM_WL1LPAB0 30
+#define SROM_WL1LPAB1 31
+#define SROM_WL1LPAB2 32
+
+#define SROM_WL1HPAB0 33
+#define SROM_WL1HPAB1 34
+#define SROM_WL1HPAB2 35
+
+#define SROM_MACHI_IL0 36
+#define SROM_MACMID_IL0 37
+#define SROM_MACLO_IL0 38
+#define SROM_MACHI_ET0 39
+#define SROM_MACMID_ET0 40
+#define SROM_MACLO_ET0 41
+#define SROM_MACHI_ET1 42
+#define SROM_MACMID_ET1 43
+#define SROM_MACLO_ET1 44
+#define SROM3_MACHI 37
+#define SROM3_MACMID 38
+#define SROM3_MACLO 39
+
+#define SROM_BXARSSI2G 40
+#define SROM_BXARSSI5G 41
+
+#define SROM_TRI52G 42
+#define SROM_TRI5GHL 43
+
+#define SROM_RXPO52G 45
+
+#define SROM2_ENETPHY 45
+
+#define SROM_AABREV 46
+/* Fields in AABREV */
+#define SROM_BR_MASK 0x00ff
+#define SROM_CC_MASK 0x0f00
+#define SROM_CC_SHIFT 8
+#define SROM_AA0_MASK 0x3000
+#define SROM_AA0_SHIFT 12
+#define SROM_AA1_MASK 0xc000
+#define SROM_AA1_SHIFT 14
+
+#define SROM_WL0PAB0 47
+#define SROM_WL0PAB1 48
+#define SROM_WL0PAB2 49
+
+#define SROM_LEDBH10 50
+#define SROM_LEDBH32 51
+
+#define SROM_WL10MAXP 52
+
+#define SROM_WL1PAB0 53
+#define SROM_WL1PAB1 54
+#define SROM_WL1PAB2 55
+
+#define SROM_ITT 56
+
+#define SROM_BFL 57
+#define SROM_BFL2 28
+#define SROM3_BFL2 61
+
+#define SROM_AG10 58
+
+#define SROM_CCODE 59
+
+#define SROM_OPO 60
+
+#define SROM3_LEDDC 62
+
+#define SROM_CRCREV 63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accomodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of useable srom i.e. the useable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define SROM4_WORDS 220
+
+#define SROM4_SIGN 32
+#define SROM4_SIGNATURE 0x5372
+
+#define SROM4_BREV 33
+
+#define SROM4_BFL0 34
+#define SROM4_BFL1 35
+#define SROM4_BFL2 36
+#define SROM4_BFL3 37
+#define SROM5_BFL0 37
+#define SROM5_BFL1 38
+#define SROM5_BFL2 39
+#define SROM5_BFL3 40
+
+#define SROM4_MACHI 38
+#define SROM4_MACMID 39
+#define SROM4_MACLO 40
+#define SROM5_MACHI 41
+#define SROM5_MACMID 42
+#define SROM5_MACLO 43
+
+#define SROM4_CCODE 41
+#define SROM4_REGREV 42
+#define SROM5_CCODE 34
+#define SROM5_REGREV 35
+
+#define SROM4_LEDBH10 43
+#define SROM4_LEDBH32 44
+#define SROM5_LEDBH10 59
+#define SROM5_LEDBH32 60
+
+#define SROM4_LEDDC 45
+#define SROM5_LEDDC 45
+
+#define SROM4_AA 46
+#define SROM4_AA2G_MASK 0x00ff
+#define SROM4_AA2G_SHIFT 0
+#define SROM4_AA5G_MASK 0xff00
+#define SROM4_AA5G_SHIFT 8
+
+#define SROM4_AG10 47
+#define SROM4_AG32 48
+
+#define SROM4_TXPID2G 49
+#define SROM4_TXPID5G 51
+#define SROM4_TXPID5GL 53
+#define SROM4_TXPID5GH 55
+
+#define SROM4_TXRXC 61
+#define SROM4_TXCHAIN_MASK 0x000f
+#define SROM4_TXCHAIN_SHIFT 0
+#define SROM4_RXCHAIN_MASK 0x00f0
+#define SROM4_RXCHAIN_SHIFT 4
+#define SROM4_SWITCH_MASK 0xff00
+#define SROM4_SWITCH_SHIFT 8
+
+/* Per-path fields */
+#define MAX_PATH_SROM 4
+#define SROM4_PATH0 64
+#define SROM4_PATH1 87
+#define SROM4_PATH2 110
+#define SROM4_PATH3 133
+
+#define SROM4_2G_ITT_MAXP 0
+#define SROM4_2G_PA 1
+#define SROM4_5G_ITT_MAXP 5
+#define SROM4_5GLH_MAXP 6
+#define SROM4_5G_PA 7
+#define SROM4_5GL_PA 11
+#define SROM4_5GH_PA 15
+
+/* Fields in the ITT_MAXP and 5GLH_MAXP words */
+#define B2G_MAXP_MASK 0xff
+#define B2G_ITT_SHIFT 8
+#define B5G_MAXP_MASK 0xff
+#define B5G_ITT_SHIFT 8
+#define B5GH_MAXP_MASK 0xff
+#define B5GL_MAXP_SHIFT 8
+
+/* All the miriad power offsets */
+#define SROM4_2G_CCKPO 156
+#define SROM4_2G_OFDMPO 157
+#define SROM4_5G_OFDMPO 159
+#define SROM4_5GL_OFDMPO 161
+#define SROM4_5GH_OFDMPO 163
+#define SROM4_2G_MCSPO 165
+#define SROM4_5G_MCSPO 173
+#define SROM4_5GL_MCSPO 181
+#define SROM4_5GH_MCSPO 189
+#define SROM4_CDDPO 197
+#define SROM4_STBCPO 198
+#define SROM4_BW40PO 199
+#define SROM4_BWDUPPO 200
+
+#define SROM4_CRCREV 219
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+
+#define SROM8_SIGN 64
+
+#define SROM8_BREV 65
+
+#define SROM8_BFL0 66
+#define SROM8_BFL1 67
+#define SROM8_BFL2 68
+#define SROM8_BFL3 69
+
+#define SROM8_MACHI 70
+#define SROM8_MACMID 71
+#define SROM8_MACLO 72
+
+#define SROM8_CCODE 73
+#define SROM8_REGREV 74
+
+#define SROM8_LEDBH10 75
+#define SROM8_LEDBH32 76
+
+#define SROM8_LEDDC 77
+
+#define SROM8_AA 78
+
+#define SROM8_AG10 79
+#define SROM8_AG32 80
+
+#define SROM8_TXRXC 81
+
+#define SROM8_BXARSSI2G 82
+#define SROM8_BXARSSI5G 83
+#define SROM8_TRI52G 84
+#define SROM8_TRI5GHL 85
+#define SROM8_RXPO52G 86
+
+#define SROM8_FEM2G 87
+#define SROM8_FEM5G 88
+#define SROM8_FEM_ANTSWLUT_MASK 0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SROM8_FEM_TR_ISO_MASK 0x0700
+#define SROM8_FEM_TR_ISO_SHIFT 8
+#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SROM8_FEM_TSSIPOS_MASK 0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT 0
+
+#define SROM8_THERMAL 89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS 90
+#define SROM8_TS_SLP_OPT_CORRX 91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP 92
+
+#define SROM8_EXTLNAGAIN 93
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA 94
+
+/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
+#define SROM8_MPWR_1_AND_2 95
+
+/* Per-path offsets & fields */
+#define SROM8_PATH0 96
+#define SROM8_PATH1 112
+#define SROM8_PATH2 128
+#define SROM8_PATH3 144
+
+#define SROM8_2G_ITT_MAXP 0
+#define SROM8_2G_PA 1
+#define SROM8_5G_ITT_MAXP 4
+#define SROM8_5GLH_MAXP 5
+#define SROM8_5G_PA 6
+#define SROM8_5GL_PA 9
+#define SROM8_5GH_PA 12
+
+/* All the miriad power offsets */
+#define SROM8_2G_CCKPO 160
+
+#define SROM8_2G_OFDMPO 161
+#define SROM8_5G_OFDMPO 163
+#define SROM8_5GL_OFDMPO 165
+#define SROM8_5GH_OFDMPO 167
+
+#define SROM8_2G_MCSPO 169
+#define SROM8_5G_MCSPO 177
+#define SROM8_5GL_MCSPO 185
+#define SROM8_5GH_MCSPO 193
+
+#define SROM8_CDDPO 201
+#define SROM8_STBCPO 202
+#define SROM8_BW40PO 203
+#define SROM8_BWDUPPO 204
+
+/* SISO PA parameters are in the path0 spaces */
+#define SROM8_SISO 96
+
+/* Legacy names for SISO PA paramters */
+#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
+#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
+#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
+#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
+#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
+#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
+#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
+#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
+#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
+#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
+#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
+#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
+#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
+
+#define SROM8_CRCREV 219
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20 160
+#define SROM9_2GPO_CCKBW20UL 161
+#define SROM9_2GPO_LOFDMBW20 162
+#define SROM9_2GPO_LOFDMBW20UL 164
+
+#define SROM9_5GLPO_LOFDMBW20 166
+#define SROM9_5GLPO_LOFDMBW20UL 168
+#define SROM9_5GMPO_LOFDMBW20 170
+#define SROM9_5GMPO_LOFDMBW20UL 172
+#define SROM9_5GHPO_LOFDMBW20 174
+#define SROM9_5GHPO_LOFDMBW20UL 176
+
+#define SROM9_2GPO_MCSBW20 178
+#define SROM9_2GPO_MCSBW20UL 180
+#define SROM9_2GPO_MCSBW40 182
+
+#define SROM9_5GLPO_MCSBW20 184
+#define SROM9_5GLPO_MCSBW20UL 186
+#define SROM9_5GLPO_MCSBW40 188
+#define SROM9_5GMPO_MCSBW20 190
+#define SROM9_5GMPO_MCSBW20UL 192
+#define SROM9_5GMPO_MCSBW40 194
+#define SROM9_5GHPO_MCSBW20 196
+#define SROM9_5GHPO_MCSBW20UL 198
+#define SROM9_5GHPO_MCSBW40 200
+
+#define SROM9_PO_MCS32 202
+#define SROM9_PO_LOFDM40DUP 203
+#define SROM9_EU_EDCRSTH 204
+#define SROM10_EU_EDCRSTH 204
+#define SROM8_RXGAINERR_2G 205
+#define SROM8_RXGAINERR_5GL 206
+#define SROM8_RXGAINERR_5GM 207
+#define SROM8_RXGAINERR_5GH 208
+#define SROM8_RXGAINERR_5GU 209
+#define SROM8_SUBBAND_PPR 210
+#define SROM8_PCIEINGRESS_WAR 211
+#define SROM8_EU_EDCRSTH 212
+#define SROM9_SAR 212
+
+#define SROM8_NOISELVL_2G 213
+#define SROM8_NOISELVL_5GL 214
+#define SROM8_NOISELVL_5GM 215
+#define SROM8_NOISELVL_5GH 216
+#define SROM8_NOISELVL_5GU 217
+#define SROM8_NOISECALOFFSET 218
+
+#define SROM9_REV_CRC 219
+
+#define SROM10_CCKPWROFFSET 218
+#define SROM10_SIGN 219
+#define SROM10_SWCTRLMAP_2G 220
+#define SROM10_CRCREV 229
+
+#define SROM10_WORDS 230
+#define SROM10_SIGNATURE SROM4_SIGNATURE
+
+/* SROM REV 11 */
+#define SROM11_BREV 65
+
+#define SROM11_BFL0 66
+#define SROM11_BFL1 67
+#define SROM11_BFL2 68
+#define SROM11_BFL3 69
+#define SROM11_BFL4 70
+#define SROM11_BFL5 71
+
+#define SROM11_MACHI 72
+#define SROM11_MACMID 73
+#define SROM11_MACLO 74
+
+#define SROM11_CCODE 75
+#define SROM11_REGREV 76
+
+#define SROM11_LEDBH10 77
+#define SROM11_LEDBH32 78
+
+#define SROM11_LEDDC 79
+
+#define SROM11_AA 80
+
+#define SROM11_AGBG10 81
+#define SROM11_AGBG2A0 82
+#define SROM11_AGA21 83
+
+#define SROM11_TXRXC 84
+
+#define SROM11_FEM_CFG1 85
+#define SROM11_FEM_CFG2 86
+
+/* Masks and offsets for FEM_CFG */
+#define SROM11_FEMCTRL_MASK 0xf800
+#define SROM11_FEMCTRL_SHIFT 11
+#define SROM11_PAPDCAP_MASK 0x0400
+#define SROM11_PAPDCAP_SHIFT 10
+#define SROM11_TWORANGETSSI_MASK 0x0200
+#define SROM11_TWORANGETSSI_SHIFT 9
+#define SROM11_PDGAIN_MASK 0x01f0
+#define SROM11_PDGAIN_SHIFT 4
+#define SROM11_EPAGAIN_MASK 0x000e
+#define SROM11_EPAGAIN_SHIFT 1
+#define SROM11_TSSIPOSSLOPE_MASK 0x0001
+#define SROM11_TSSIPOSSLOPE_SHIFT 0
+#define SROM11_GAINCTRLSPH_MASK 0xf800
+#define SROM11_GAINCTRLSPH_SHIFT 11
+
+#define SROM11_THERMAL 87
+#define SROM11_MPWR_RAWTS 88
+#define SROM11_TS_SLP_OPT_CORRX 89
+#define SROM11_XTAL_FREQ 90
+#define SROM11_5GB0_4080_W0_A1 91
+#define SROM11_PHYCAL_TEMPDELTA 92
+#define SROM11_MPWR_1_AND_2 93
+#define SROM11_5GB0_4080_W1_A1 94
+#define SROM11_TSSIFLOOR_2G 95
+#define SROM11_TSSIFLOOR_5GL 96
+#define SROM11_TSSIFLOOR_5GM 97
+#define SROM11_TSSIFLOOR_5GH 98
+#define SROM11_TSSIFLOOR_5GU 99
+
+/* Masks and offsets for Thermal parameters */
+#define SROM11_TEMPS_PERIOD_MASK 0xf0
+#define SROM11_TEMPS_PERIOD_SHIFT 4
+#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f
+#define SROM11_TEMPS_HYSTERESIS_SHIFT 0
+#define SROM11_TEMPCORRX_MASK 0xfc
+#define SROM11_TEMPCORRX_SHIFT 2
+#define SROM11_TEMPSENSE_OPTION_MASK 0x3
+#define SROM11_TEMPSENSE_OPTION_SHIFT 0
+
+#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f
+#define SROM11_PDOFF_2G_40M_A0_SHIFT 0
+#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0
+#define SROM11_PDOFF_2G_40M_A1_SHIFT 4
+#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00
+#define SROM11_PDOFF_2G_40M_A2_SHIFT 8
+#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000
+#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15
+
+#define SROM11_PDOFF_2G_40M 100
+#define SROM11_PDOFF_40M_A0 101
+#define SROM11_PDOFF_40M_A1 102
+#define SROM11_PDOFF_40M_A2 103
+#define SROM11_5GB0_4080_W2_A1 103
+#define SROM11_PDOFF_80M_A0 104
+#define SROM11_PDOFF_80M_A1 105
+#define SROM11_PDOFF_80M_A2 106
+#define SROM11_5GB1_4080_W0_A1 106
+
+#define SROM11_SUBBAND5GVER 107
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_11 3
+#define SROM11_PATH0 108
+#define SROM11_PATH1 128
+#define SROM11_PATH2 148
+
+#define SROM11_2G_MAXP 0
+#define SROM11_5GB1_4080_PA 0
+#define SROM11_2G_PA 1
+#define SROM11_5GB2_4080_PA 2
+#define SROM11_RXGAINS1 4
+#define SROM11_RXGAINS 5
+#define SROM11_5GB3_4080_PA 5
+#define SROM11_5GB1B0_MAXP 6
+#define SROM11_5GB3B2_MAXP 7
+#define SROM11_5GB0_PA 8
+#define SROM11_5GB1_PA 11
+#define SROM11_5GB2_PA 14
+#define SROM11_5GB3_PA 17
+
+/* Masks and offsets for rxgains */
+#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS2GTRISOA_MASK 0x0078
+#define SROM11_RXGAINS2GTRISOA_SHIFT 3
+#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0
+#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GHTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078
+#define SROM11_RXGAINS5GMTRISOA_SHIFT 3
+#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0
+
+/* Power per rate */
+#define SROM11_CCKBW202GPO 168
+#define SROM11_CCKBW20UL2GPO 169
+#define SROM11_MCSBW202GPO 170
+#define SROM11_MCSBW202GPO_1 171
+#define SROM11_MCSBW402GPO 172
+#define SROM11_MCSBW402GPO_1 173
+#define SROM11_DOT11AGOFDMHRBW202GPO 174
+#define SROM11_OFDMLRBW202GPO 175
+
+#define SROM11_MCSBW205GLPO 176
+#define SROM11_MCSBW205GLPO_1 177
+#define SROM11_MCSBW405GLPO 178
+#define SROM11_MCSBW405GLPO_1 179
+#define SROM11_MCSBW805GLPO 180
+#define SROM11_MCSBW805GLPO_1 181
+#define SROM11_RPCAL_2G 182
+#define SROM11_RPCAL_5GL 183
+#define SROM11_MCSBW205GMPO 184
+#define SROM11_MCSBW205GMPO_1 185
+#define SROM11_MCSBW405GMPO 186
+#define SROM11_MCSBW405GMPO_1 187
+#define SROM11_MCSBW805GMPO 188
+#define SROM11_MCSBW805GMPO_1 189
+#define SROM11_RPCAL_5GM 190
+#define SROM11_RPCAL_5GH 191
+#define SROM11_MCSBW205GHPO 192
+#define SROM11_MCSBW205GHPO_1 193
+#define SROM11_MCSBW405GHPO 194
+#define SROM11_MCSBW405GHPO_1 195
+#define SROM11_MCSBW805GHPO 196
+#define SROM11_MCSBW805GHPO_1 197
+#define SROM11_RPCAL_5GU 198
+#define SROM11_PDOFF_2G_CCK 199
+#define SROM11_MCSLR5GLPO 200
+#define SROM11_MCSLR5GMPO 201
+#define SROM11_MCSLR5GHPO 202
+
+#define SROM11_SB20IN40HRPO 203
+#define SROM11_SB20IN80AND160HR5GLPO 204
+#define SROM11_SB40AND80HR5GLPO 205
+#define SROM11_SB20IN80AND160HR5GMPO 206
+#define SROM11_SB40AND80HR5GMPO 207
+#define SROM11_SB20IN80AND160HR5GHPO 208
+#define SROM11_SB40AND80HR5GHPO 209
+#define SROM11_SB20IN40LRPO 210
+#define SROM11_SB20IN80AND160LR5GLPO 211
+#define SROM11_SB40AND80LR5GLPO 212
+#define SROM11_TXIDXCAP2G 212
+#define SROM11_SB20IN80AND160LR5GMPO 213
+#define SROM11_SB40AND80LR5GMPO 214
+#define SROM11_TXIDXCAP5G 214
+#define SROM11_SB20IN80AND160LR5GHPO 215
+#define SROM11_SB40AND80LR5GHPO 216
+
+#define SROM11_DOT11AGDUPHRPO 217
+#define SROM11_DOT11AGDUPLRPO 218
+
+/* MISC */
+#define SROM11_PCIEINGRESS_WAR 220
+#define SROM11_SAR 221
+
+#define SROM11_NOISELVL_2G 222
+#define SROM11_NOISELVL_5GL 223
+#define SROM11_NOISELVL_5GM 224
+#define SROM11_NOISELVL_5GH 225
+#define SROM11_NOISELVL_5GU 226
+
+#define SROM11_RXGAINERR_2G 227
+#define SROM11_RXGAINERR_5GL 228
+#define SROM11_RXGAINERR_5GM 229
+#define SROM11_RXGAINERR_5GH 230
+#define SROM11_RXGAINERR_5GU 231
+
+#define SROM11_EU_EDCRSTH 232
+#define SROM12_EU_EDCRSTH 232
+
+#define SROM11_SIGN 64
+#define SROM11_CRCREV 233
+
+#define SROM11_WORDS 234
+#define SROM11_SIGNATURE 0x0634
+
+/* SROM REV 12 */
+#define SROM12_SIGN 64
+#define SROM12_WORDS 512
+#define SROM12_SIGNATURE 0x8888
+#define SROM12_CRCREV 511
+
+#define SROM12_BFL6 486
+#define SROM12_BFL7 487
+
+#define SROM12_MCSBW205GX1PO 234
+#define SROM12_MCSBW205GX1PO_1 235
+#define SROM12_MCSBW405GX1PO 236
+#define SROM12_MCSBW405GX1PO_1 237
+#define SROM12_MCSBW805GX1PO 238
+#define SROM12_MCSBW805GX1PO_1 239
+#define SROM12_MCSLR5GX1PO 240
+#define SROM12_SB40AND80LR5GX1PO 241
+#define SROM12_SB20IN80AND160LR5GX1PO 242
+#define SROM12_SB20IN80AND160HR5GX1PO 243
+#define SROM12_SB40AND80HR5GX1PO 244
+
+#define SROM12_MCSBW205GX2PO 245
+#define SROM12_MCSBW205GX2PO_1 246
+#define SROM12_MCSBW405GX2PO 247
+#define SROM12_MCSBW405GX2PO_1 248
+#define SROM12_MCSBW805GX2PO 249
+#define SROM12_MCSBW805GX2PO_1 250
+#define SROM12_MCSLR5GX2PO 251
+#define SROM12_SB40AND80LR5GX2PO 252
+#define SROM12_SB20IN80AND160LR5GX2PO 253
+#define SROM12_SB20IN80AND160HR5GX2PO 254
+#define SROM12_SB40AND80HR5GX2PO 255
+
+/* MISC */
+#define SROM12_RXGAINS10 483
+#define SROM12_RXGAINS11 484
+#define SROM12_RXGAINS12 485
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_12 3
+#define SROM12_PATH0 256
+#define SROM12_PATH1 328
+#define SROM12_PATH2 400
+
+#define SROM12_5GB42G_MAXP 0
+#define SROM12_2GB0_PA 1
+#define SROM12_2GB0_PA_W0 1
+#define SROM12_2GB0_PA_W1 2
+#define SROM12_2GB0_PA_W2 3
+#define SROM12_2GB0_PA_W3 4
+
+#define SROM12_RXGAINS 5
+#define SROM12_5GB1B0_MAXP 6
+#define SROM12_5GB3B2_MAXP 7
+
+#define SROM12_5GB0_PA 8
+#define SROM12_5GB0_PA_W0 8
+#define SROM12_5GB0_PA_W1 9
+#define SROM12_5GB0_PA_W2 10
+#define SROM12_5GB0_PA_W3 11
+
+#define SROM12_5GB1_PA 12
+#define SROM12_5GB1_PA_W0 12
+#define SROM12_5GB1_PA_W1 13
+#define SROM12_5GB1_PA_W2 14
+#define SROM12_5GB1_PA_W3 15
+
+#define SROM12_5GB2_PA 16
+#define SROM12_5GB2_PA_W0 16
+#define SROM12_5GB2_PA_W1 17
+#define SROM12_5GB2_PA_W2 18
+#define SROM12_5GB2_PA_W3 19
+
+#define SROM12_5GB3_PA 20
+#define SROM12_5GB3_PA_W0 20
+#define SROM12_5GB3_PA_W1 21
+#define SROM12_5GB3_PA_W2 22
+#define SROM12_5GB3_PA_W3 23
+
+#define SROM12_5GB4_PA 24
+#define SROM12_5GB4_PA_W0 24
+#define SROM12_5GB4_PA_W1 25
+#define SROM12_5GB4_PA_W2 26
+#define SROM12_5GB4_PA_W3 27
+
+#define SROM12_2G40B0_PA 28
+#define SROM12_2G40B0_PA_W0 28
+#define SROM12_2G40B0_PA_W1 29
+#define SROM12_2G40B0_PA_W2 30
+#define SROM12_2G40B0_PA_W3 31
+
+#define SROM12_5G40B0_PA 32
+#define SROM12_5G40B0_PA_W0 32
+#define SROM12_5G40B0_PA_W1 33
+#define SROM12_5G40B0_PA_W2 34
+#define SROM12_5G40B0_PA_W3 35
+
+#define SROM12_5G40B1_PA 36
+#define SROM12_5G40B1_PA_W0 36
+#define SROM12_5G40B1_PA_W1 37
+#define SROM12_5G40B1_PA_W2 38
+#define SROM12_5G40B1_PA_W3 39
+
+#define SROM12_5G40B2_PA 40
+#define SROM12_5G40B2_PA_W0 40
+#define SROM12_5G40B2_PA_W1 41
+#define SROM12_5G40B2_PA_W2 42
+#define SROM12_5G40B2_PA_W3 43
+
+#define SROM12_5G40B3_PA 44
+#define SROM12_5G40B3_PA_W0 44
+#define SROM12_5G40B3_PA_W1 45
+#define SROM12_5G40B3_PA_W2 46
+#define SROM12_5G40B3_PA_W3 47
+
+#define SROM12_5G40B4_PA 48
+#define SROM12_5G40B4_PA_W0 48
+#define SROM12_5G40B4_PA_W1 49
+#define SROM12_5G40B4_PA_W2 50
+#define SROM12_5G40B4_PA_W3 51
+
+#define SROM12_5G80B0_PA 52
+#define SROM12_5G80B0_PA_W0 52
+#define SROM12_5G80B0_PA_W1 53
+#define SROM12_5G80B0_PA_W2 54
+#define SROM12_5G80B0_PA_W3 55
+
+#define SROM12_5G80B1_PA 56
+#define SROM12_5G80B1_PA_W0 56
+#define SROM12_5G80B1_PA_W1 57
+#define SROM12_5G80B1_PA_W2 58
+#define SROM12_5G80B1_PA_W3 59
+
+#define SROM12_5G80B2_PA 60
+#define SROM12_5G80B2_PA_W0 60
+#define SROM12_5G80B2_PA_W1 61
+#define SROM12_5G80B2_PA_W2 62
+#define SROM12_5G80B2_PA_W3 63
+
+#define SROM12_5G80B3_PA 64
+#define SROM12_5G80B3_PA_W0 64
+#define SROM12_5G80B3_PA_W1 65
+#define SROM12_5G80B3_PA_W2 66
+#define SROM12_5G80B3_PA_W3 67
+
+#define SROM12_5G80B4_PA 68
+#define SROM12_5G80B4_PA_W0 68
+#define SROM12_5G80B4_PA_W1 69
+#define SROM12_5G80B4_PA_W2 70
+#define SROM12_5G80B4_PA_W3 71
+
+/* PD offset */
+#define SROM12_PDOFF_2G_CCK 472
+
+#define SROM12_PDOFF_20in40M_5G_B0 473
+#define SROM12_PDOFF_20in40M_5G_B1 474
+#define SROM12_PDOFF_20in40M_5G_B2 475
+#define SROM12_PDOFF_20in40M_5G_B3 476
+#define SROM12_PDOFF_20in40M_5G_B4 477
+
+#define SROM12_PDOFF_40in80M_5G_B0 478
+#define SROM12_PDOFF_40in80M_5G_B1 479
+#define SROM12_PDOFF_40in80M_5G_B2 480
+#define SROM12_PDOFF_40in80M_5G_B3 481
+#define SROM12_PDOFF_40in80M_5G_B4 482
+
+#define SROM12_PDOFF_20in80M_5G_B0 488
+#define SROM12_PDOFF_20in80M_5G_B1 489
+#define SROM12_PDOFF_20in80M_5G_B2 490
+#define SROM12_PDOFF_20in80M_5G_B3 491
+#define SROM12_PDOFF_20in80M_5G_B4 492
+
+#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */
+#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */
+
+#define SROM13_SIGN 64
+#define SROM13_WORDS 590
+#define SROM13_SIGNATURE 0x4d55
+#define SROM13_CRCREV 589
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_13 4
+#define SROM13_PATH0 256
+#define SROM13_PATH1 328
+#define SROM13_PATH2 400
+#define SROM13_PATH3 512
+#define SROM13_RXGAINS 5
+
+#define SROM13_XTALFREQ 90
+
+#define SROM13_PDOFFSET20IN40M2G 94
+#define SROM13_PDOFFSET20IN40M2GCORE3 95
+#define SROM13_SB20IN40HRLRPOX 96
+
+#define SROM13_RXGAINS1CORE3 97
+
+#define SROM13_PDOFFSET20IN40M5GCORE3 98
+#define SROM13_PDOFFSET20IN40M5GCORE3_1 99
+
+#define SROM13_ANTGAIN_BANDBGA 100
+
+#define SROM13_PDOFFSET40IN80M5GCORE3 105
+#define SROM13_PDOFFSET40IN80M5GCORE3_1 106
+
+/* power per rate */
+#define SROM13_MCS1024QAM2GPO 108
+#define SROM13_MCS1024QAM5GLPO 109
+#define SROM13_MCS1024QAM5GLPO_1 110
+#define SROM13_MCS1024QAM5GMPO 111
+#define SROM13_MCS1024QAM5GMPO_1 112
+#define SROM13_MCS1024QAM5GHPO 113
+#define SROM13_MCS1024QAM5GHPO_1 114
+#define SROM13_MCS1024QAM5GX1PO 115
+#define SROM13_MCS1024QAM5GX1PO_1 116
+#define SROM13_MCS1024QAM5GX2PO 117
+#define SROM13_MCS1024QAM5GX2PO_1 118
+
+#define SROM13_MCSBW1605GLPO 119
+#define SROM13_MCSBW1605GLPO_1 120
+#define SROM13_MCSBW1605GMPO 121
+#define SROM13_MCSBW1605GMPO_1 122
+#define SROM13_MCSBW1605GHPO 123
+#define SROM13_MCSBW1605GHPO_1 124
+
+#define SROM13_MCSBW1605GX1PO 125
+#define SROM13_MCSBW1605GX1PO_1 126
+#define SROM13_MCSBW1605GX2PO 127
+#define SROM13_MCSBW1605GX2PO_1 128
+
+#define SROM13_ULBPPROFFS5GB0 129
+#define SROM13_ULBPPROFFS5GB1 130
+#define SROM13_ULBPPROFFS5GB2 131
+#define SROM13_ULBPPROFFS5GB3 132
+#define SROM13_ULBPPROFFS5GB4 133
+#define SROM13_ULBPPROFFS2G 134
+
+#define SROM13_MCS8POEXP 135
+#define SROM13_MCS8POEXP_1 136
+#define SROM13_MCS9POEXP 137
+#define SROM13_MCS9POEXP_1 138
+#define SROM13_MCS10POEXP 139
+#define SROM13_MCS10POEXP_1 140
+#define SROM13_MCS11POEXP 141
+#define SROM13_MCS11POEXP_1 142
+#define SROM13_ULBPDOFFS5GB0A0 143
+#define SROM13_ULBPDOFFS5GB0A1 144
+#define SROM13_ULBPDOFFS5GB0A2 145
+#define SROM13_ULBPDOFFS5GB0A3 146
+#define SROM13_ULBPDOFFS5GB1A0 147
+#define SROM13_ULBPDOFFS5GB1A1 148
+#define SROM13_ULBPDOFFS5GB1A2 149
+#define SROM13_ULBPDOFFS5GB1A3 150
+#define SROM13_ULBPDOFFS5GB2A0 151
+#define SROM13_ULBPDOFFS5GB2A1 152
+#define SROM13_ULBPDOFFS5GB2A2 153
+#define SROM13_ULBPDOFFS5GB2A3 154
+#define SROM13_ULBPDOFFS5GB3A0 155
+#define SROM13_ULBPDOFFS5GB3A1 156
+#define SROM13_ULBPDOFFS5GB3A2 157
+#define SROM13_ULBPDOFFS5GB3A3 158
+#define SROM13_ULBPDOFFS5GB4A0 159
+#define SROM13_ULBPDOFFS5GB4A1 160
+#define SROM13_ULBPDOFFS5GB4A2 161
+#define SROM13_ULBPDOFFS5GB4A3 162
+#define SROM13_ULBPDOFFS2GA0 163
+#define SROM13_ULBPDOFFS2GA1 164
+#define SROM13_ULBPDOFFS2GA2 165
+#define SROM13_ULBPDOFFS2GA3 166
+
+#define SROM13_RPCAL5GB4 199
+#define SROM13_RPCAL2GCORE3 101
+#define SROM13_RPCAL5GB01CORE3 102
+#define SROM13_RPCAL5GB23CORE3 103
+
+#define SROM13_SW_TXRX_MASK 104
+
+#define SROM13_EU_EDCRSTH 232
+
+#define SROM13_SWCTRLMAP4_CFG 493
+#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494
+#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495
+#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496
+#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497
+#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498
+#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499
+#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500
+#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501
+#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502
+#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503
+#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504
+#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505
+#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506
+#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507
+#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508
+#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509
+
+#define SROM13_PDOFFSET20IN80M5GCORE3 510
+#define SROM13_PDOFFSET20IN80M5GCORE3_1 511
+
+#define SROM13_NOISELVLCORE3 584
+#define SROM13_NOISELVLCORE3_1 585
+#define SROM13_RXGAINERRCORE3 586
+#define SROM13_RXGAINERRCORE3_1 587
+
+#define SROM13_PDOFF_2G_CCK_20M 167
+
+#define SROM15_CALDATA_WORDS 943
+#define SROM15_CAL_OFFSET_LOC 68
+#define MAX_IOCTL_TXCHUNK_SIZE 1500
+#define SROM15_MAX_CAL_SIZE 1886
+#define SROM15_SIGNATURE 0x110c
+#define SROM15_WORDS 1024
+#define SROM15_MACHI 65
+#define SROM15_CRCREV 1023
+#define SROM15_BRDREV 69
+#define SROM15_CCODE 70
+#define SROM15_REGREV 71
+#define SROM15_SIGN 64
+
+#define SROM16_SIGN 128
+#define SROM16_WORDS 1024
+#define SROM16_SFLASH_WORDS 2048U
+#define SROM16_SIGNATURE 0x4357
+#define SROM16_CRCREV 1023
+#define SROM16_MACHI (SROM16_SIGN + 1)
+#define SROM16_CALDATA_OFFSET_LOC (SROM16_SIGN + 4)
+#define SROM16_BOARDREV (SROM16_SIGN + 5)
+#define SROM16_CCODE (SROM16_SIGN + 6)
+#define SROM16_REGREV (SROM16_SIGN + 7)
+
+#define SROM_CALDATA_WORDS 832
+
+#define SROM17_SIGN 64
+#define SROM17_BRDREV 65
+#define SROM17_MACADDR 66
+#define SROM17_CCODE 69
+#define SROM17_CALDATA 70
+#define SROM17_GCALTMP 71
+
+#define SROM17_C0SRD202G 72
+#define SROM17_C0SRD202G_1 73
+#define SROM17_C0SRD205GL 74
+#define SROM17_C0SRD205GL_1 75
+#define SROM17_C0SRD205GML 76
+#define SROM17_C0SRD205GML_1 77
+#define SROM17_C0SRD205GMU 78
+#define SROM17_C0SRD205GMU_1 79
+#define SROM17_C0SRD205GH 80
+#define SROM17_C0SRD205GH_1 81
+
+#define SROM17_C1SRD202G 82
+#define SROM17_C1SRD202G_1 83
+#define SROM17_C1SRD205GL 84
+#define SROM17_C1SRD205GL_1 85
+#define SROM17_C1SRD205GML 86
+#define SROM17_C1SRD205GML_1 87
+#define SROM17_C1SRD205GMU 88
+#define SROM17_C1SRD205GMU_1 89
+#define SROM17_C1SRD205GH 90
+#define SROM17_C1SRD205GH_1 91
+
+#define SROM17_TRAMMAGIC 92
+#define SROM17_TRAMMAGIC_1 93
+#define SROM17_TRAMDATA 94
+
+#define SROM17_WORDS 256
+#define SROM17_CRCREV 255
+#define SROM17_CALDATA_WORDS 161
+#define SROM17_SIGNATURE 0x1103 /* 4355 in hex format */
+
+#define SROM18_SIGN 112
+#define SROM18_WORDS 1024
+#define SROM18_SIGNATURE 0x4377
+#define SROM18_CRCREV 1023
+#define SROM18_MACHI (SROM18_SIGN + 1)
+#define SROM18_CALDATA_OFFSET_LOC (SROM18_SIGN + 4)
+#define SROM18_BOARDREV (SROM18_SIGN + 5)
+#define SROM18_CCODE (SROM18_SIGN + 6)
+#define SROM18_REGREV (SROM18_SIGN + 7)
+#define SROM18_CALDATA_WORDS (SROM18_WORDS - SROM18_CALDATA_OFFSET_LOC)
+
+typedef struct {
+ uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
+ uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+ uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
+ uint8 triso; /* TR switch isolation */
+ uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
+} srom_fem_t;
+
+#endif /* _bcmsrom_fmt_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmsrom_tbl.h b/bcmdhd.101.10.361.x/include/bcmsrom_tbl.h
new file mode 100755
index 0000000..2485603
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmsrom_tbl.h
@@ -0,0 +1,1303 @@
+/*
+ * Table that encodes the srom formats for PCI/PCIe NICs.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmsrom_tbl_h_
+#define _bcmsrom_tbl_h_
+
+#include <sbpcmcia.h>
+#include <bcmsrom_fmt.h>
+
+typedef struct {
+ const char *name;
+ uint32 revmask;
+ uint32 flags;
+ uint16 off;
+ uint16 mask;
+} sromvar_t;
+
+#define SRFL_MORE 1 /* value continues as described by the next entry */
+#define SRFL_NOFFS 2 /* value bits can't be all one's */
+#define SRFL_PRHEX 4 /* value is in hexdecimal format */
+#define SRFL_PRSIGN 8 /* value is in signed decimal format */
+#define SRFL_CCODE 0x10 /* value is in country code format */
+#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
+#define SRFL_UNUSED 0x40 /* unused, was SRFL_LEDDC */
+#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */
+#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST
+ * ONE in the array should have this flag set.
+ */
+#define PRHEX_N_MORE (SRFL_PRHEX | SRFL_MORE)
+
+#define SROM_DEVID_PCIE 48
+
+/**
+ * Assumptions:
+ * - Ethernet address spans across 3 consecutive words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple words
+ * (even multiple fields in the same word) with each entry except the last having
+ * it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
+ * bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table. Other
+ * entries must have non-NULL name.
+ */
+#if !defined(SROM15_MEMOPT)
+static const sromvar_t BCMATTACHDATA(pci_sromvars)[] = {
+/* name revmask flags off mask */
+#if defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED)
+ {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff},
+#else
+ {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff},
+#endif /* BCMPCIEDEV && BCMPCIEDEV_ENABLED */
+ {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
+ {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
+ {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
+ {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
+ {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM_BFL2, 0xffff},
+ {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM3_BFL2, 0xffff},
+ {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff},
+ {"", 0, 0, SROM4_BFL1, 0xffff},
+ {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff},
+ {"", 0, 0, SROM5_BFL1, 0xffff},
+ {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff},
+ {"", 0, 0, SROM8_BFL1, 0xffff},
+ {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff},
+ {"", 0, 0, SROM4_BFL3, 0xffff},
+ {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff},
+ {"", 0, 0, SROM5_BFL3, 0xffff},
+ {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff},
+ {"", 0, 0, SROM8_BFL3, 0xffff},
+ {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
+ {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff},
+ {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
+ {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff},
+ {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff},
+ {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff},
+ {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff},
+ {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
+ {"regrev", 0x00000008, 0, SROM_OPO, 0xff00},
+ {"regrev", 0x00000010, 0, SROM4_REGREV, 0xffff},
+ {"regrev", 0x000000e0, 0, SROM5_REGREV, 0xffff},
+ {"regrev", 0x00000700, 0, SROM8_REGREV, 0xffff},
+ {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
+ {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
+ {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
+ {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff},
+ {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
+ {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
+ {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
+ {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
+ {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00},
+ {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff},
+ {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff},
+ {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff},
+ {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
+ {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff},
+ {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff},
+ {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
+ {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00},
+ {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00},
+ {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff},
+ {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00},
+ {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff},
+ {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00},
+ {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff},
+ {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00},
+ {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff},
+ {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00},
+ {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff},
+ {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00},
+ {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
+ {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
+ {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
+ {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
+ {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
+ {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
+ {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
+ {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
+ {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
+ {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00},
+ {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
+ {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
+ {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
+ {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
+ {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
+ {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
+ {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
+ {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
+ {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
+ {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
+ {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
+ {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
+ {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00},
+ {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff},
+ {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00},
+ {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
+ {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
+ {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f},
+ {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
+ {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f},
+ {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff},
+ {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00},
+ {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00},
+ {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff},
+ {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00},
+ {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00},
+ {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
+ {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
+ {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK},
+ {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK},
+ {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK},
+ {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK},
+ {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
+ {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
+ {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
+ {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
+ {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
+ {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
+ {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
+ {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
+ {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
+ {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
+ {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
+ {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
+ {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
+ {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
+ {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
+ {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
+
+ {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
+ {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
+ {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+ {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff},
+ {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
+ {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
+ {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
+ {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
+ {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff},
+ {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff},
+
+ {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00},
+ {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff},
+ {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300},
+ {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f},
+ {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010},
+ {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020},
+ {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff},
+ {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00},
+ {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80},
+
+ {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
+ {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
+ {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
+ {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
+ {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
+ {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
+ {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff},
+ {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff},
+ {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff},
+ {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
+ {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff},
+ {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff},
+ {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff},
+ {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
+
+ /* power per rate from sromrev 9 */
+ {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff},
+ {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
+ {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
+ {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
+ {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff},
+ {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff},
+ {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf},
+ {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00},
+ {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00},
+ {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f},
+ {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0},
+ {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800},
+ {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f},
+ {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0},
+ {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800},
+ {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f},
+ {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0},
+ {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800},
+ {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800},
+ {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff},
+ {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00},
+ {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00},
+ {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f},
+ {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0},
+ {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00},
+ {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f},
+ {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0},
+ {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00},
+ {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f},
+ {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0},
+ {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00},
+ {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f},
+ {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00},
+ {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff},
+ {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00},
+ {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7},
+
+ {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff},
+ {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00},
+ /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */
+ {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff},
+ {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff},
+
+ /* sromrev 11 */
+ {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff},
+ {"", 0, 0, SROM11_BFL5, 0xffff},
+ {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff},
+ {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff},
+ {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff},
+ {"regrev", 0xfffff800, 0, SROM11_REGREV, 0xffff},
+ {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff},
+ {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00},
+ {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00},
+ {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff},
+ {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00},
+ {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff},
+ {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00},
+ {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff},
+ {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK},
+
+ {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001},
+ {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e},
+ {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0},
+ {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200},
+ {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400},
+ {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800},
+
+ {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001},
+ {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e},
+ {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0},
+ {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200},
+ {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400},
+ {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800},
+
+ {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00},
+ {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff},
+ {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300},
+ {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff},
+ {"txpwrbckof", 0x00000800, SRFL_PRHEX, SROM11_PATH0 + SROM11_2G_MAXP, 0xff00},
+ /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */
+ {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff},
+ {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80},
+ {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff},
+ {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff},
+ {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f},
+ {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0},
+ {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00},
+ {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000},
+ {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff},
+ {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff},
+ {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff},
+ {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff},
+ {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff},
+ {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff},
+
+ {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff},
+ {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000},
+ {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000},
+ /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */
+ {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+ /* Special PA Params for 4335 5G Band, 40 MHz BW */
+ {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff},
+ /* Special PA Params for 4335 5G Band, 80 MHz BW */
+ {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+ /* Special PA Params for 4335 2G Band, CCK */
+ {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff},
+
+ /* power per rate */
+ {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff},
+ {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff},
+ {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff},
+ {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff},
+ {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff},
+ {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff},
+ {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff},
+ {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff},
+ {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff},
+ {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff},
+ {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff},
+ {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff},
+ {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff},
+ {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff},
+ {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff},
+ {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff},
+ {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff},
+ {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff},
+ {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff},
+ {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff},
+ {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff},
+ {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff},
+ {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff},
+ {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff},
+ {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff},
+ {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff},
+ {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff},
+ {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff},
+ {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff},
+ {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff},
+ {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff},
+ {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff},
+ {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff},
+ {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff},
+
+ /* Misc */
+ {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff},
+ {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00},
+
+ {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00},
+ {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f},
+ {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00},
+ {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00},
+
+ {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800},
+ {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff},
+ {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff},
+ {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff},
+ {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff},
+ {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff},
+ {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0},
+ {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0},
+ {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f},
+ {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0},
+ {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00},
+
+ /* sromrev 12 */
+ {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff},
+ {"", 0, 0, SROM12_BFL7, 0xffff},
+ {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff},
+ {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff},
+ {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff},
+ {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff},
+ {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff},
+ {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff},
+ {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff},
+ {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff},
+ {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff},
+ {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff},
+ {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff},
+ {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff},
+ {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff},
+ {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff},
+ {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff},
+ {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff},
+
+ /* power per rate */
+ {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff},
+ {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff},
+ {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff},
+ {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff},
+ {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff},
+ {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff},
+
+ {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff},
+ {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff},
+ {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff},
+ {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff},
+ {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff},
+ {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff},
+ {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff},
+ {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff},
+
+ {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007},
+ {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007},
+ {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007},
+ {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078},
+ {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078},
+ {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078},
+ {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080},
+ {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080},
+ {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080},
+ {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700},
+ {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700},
+ {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700},
+ {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800},
+ {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800},
+ {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800},
+ {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000},
+ {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000},
+ {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000},
+ {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00},
+
+ {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff},
+ {"", 0, 0, SROM12_GPDN_H, 0xffff},
+
+ {"rpcal2gcore3", 0xffffe000, 0, SROM13_RPCAL2GCORE3, 0x00ff},
+ {"rpcal5gb0core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0x00ff},
+ {"rpcal5gb1core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0xff00},
+ {"rpcal5gb2core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0x00ff},
+ {"rpcal5gb3core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0xff00},
+
+ {"sw_txchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x000f},
+ {"sw_rxchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x00f0},
+
+ {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00},
+
+ {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00},
+ {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff},
+ {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f},
+ {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f},
+ {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0},
+ {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f},
+ {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f},
+ {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0},
+ {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007},
+ {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078},
+ {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080},
+ {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700},
+ {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800},
+ {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000},
+
+ /* pdoffset */
+ {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
+ {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
+ {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
+ {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
+ {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
+ {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
+
+ {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff},
+ {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff},
+ {"pdoffsetcck20m", 0xffffe000, 0, SROM13_PDOFF_2G_CCK_20M, 0xffff},
+
+ /* power per rate */
+ {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff},
+ {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff},
+ {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff},
+ {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff},
+ {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff},
+ {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff},
+
+ {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff},
+ {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff},
+ {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff},
+ {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff},
+ {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff},
+
+ {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff},
+
+ {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff},
+ {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff},
+ {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff},
+ {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff},
+
+ {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff},
+ {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff},
+ {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff},
+ {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff},
+ {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff},
+ {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff},
+ {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff},
+ {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff},
+ {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff},
+ {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff},
+ {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff},
+ {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff},
+ {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff},
+ {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff},
+ {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff},
+ {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff},
+ {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff},
+ {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff},
+ {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff},
+ {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff},
+ {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff},
+ {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff},
+ {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff},
+ {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff},
+
+ {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff},
+
+ {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff},
+
+ {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff},
+ {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff},
+ {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff},
+ {NULL, 0, 0, 0, 0}
+};
+#endif /* !defined(SROM15_MEMOPT) */
+
+static const sromvar_t BCMATTACHDATA(pci_srom15vars)[] = {
+ {"macaddr", 0x00008000, SRFL_ETHADDR, SROM15_MACHI, 0xffff},
+ {"caldata_offset", 0x00008000, 0, SROM15_CAL_OFFSET_LOC, 0xffff},
+ {"boardrev", 0x00008000, SRFL_PRHEX, SROM15_BRDREV, 0xffff},
+ {"ccode", 0x00008000, SRFL_CCODE, SROM15_CCODE, 0xffff},
+ {"regrev", 0x00008000, 0, SROM15_REGREV, 0xffff},
+ {NULL, 0, 0, 0, 0}
+};
+
+static const sromvar_t BCMATTACHDATA(pci_srom16vars)[] = {
+ {"macaddr", 0x00010000, SRFL_ETHADDR, SROM16_MACHI, 0xffff},
+ {"caldata_offset", 0x00010000, 0, SROM16_CALDATA_OFFSET_LOC, 0xffff},
+ {"boardrev", 0x00010000, SRFL_PRHEX, SROM16_BOARDREV, 0xffff},
+ {"ccode", 0x00010000, SRFL_CCODE, SROM16_CCODE, 0xffff},
+ {"regrev", 0x00010000, 0, SROM16_REGREV, 0xffff},
+ {NULL, 0, 0, 0, 0}
+};
+
+static const sromvar_t BCMATTACHDATA(pci_srom17vars)[] = {
+ {"boardrev", 0x00020000, SRFL_PRHEX, SROM17_BRDREV, 0xffff},
+ {"macaddr", 0x00020000, SRFL_ETHADDR, SROM17_MACADDR, 0xffff},
+ {"ccode", 0x00020000, SRFL_CCODE, SROM17_CCODE, 0xffff},
+ {"caldata_offset", 0x00020000, 0, SROM17_CALDATA, 0xffff},
+ {"gain_cal_temp", 0x00020000, SRFL_PRHEX, SROM17_GCALTMP, 0xffff},
+ {"rssi_delta_2gb0_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD202G, 0xffff},
+ {"", 0x00020000, 0, SROM17_C0SRD202G_1, 0xffff},
+ {"rssi_delta_5gl_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GL, 0xffff},
+ {"", 0x00020000, 0, SROM17_C0SRD205GL_1, 0xffff},
+ {"rssi_delta_5gml_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GML, 0xffff},
+ {"", 0x00020000, 0, SROM17_C0SRD205GML_1, 0xffff},
+ {"rssi_delta_5gmu_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GMU, 0xffff},
+ {"", 0x00020000, 0, SROM17_C0SRD205GMU_1, 0xffff},
+ {"rssi_delta_5gh_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GH, 0xffff},
+ {"", 0x00020000, 0, SROM17_C0SRD205GH_1, 0xffff},
+ {"rssi_delta_2gb0_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD202G, 0xffff},
+ {"", 0x00020000, 0, SROM17_C1SRD202G_1, 0xffff},
+ {"rssi_delta_5gl_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GL, 0xffff},
+ {"", 0x00020000, 0, SROM17_C1SRD205GL_1, 0xffff},
+ {"rssi_delta_5gml_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GML, 0xffff},
+ {"", 0x00020000, 0, SROM17_C1SRD205GML_1, 0xffff},
+ {"rssi_delta_5gmu_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GMU, 0xffff},
+ {"", 0x00020000, 0, SROM17_C1SRD205GMU_1, 0xffff},
+ {"rssi_delta_5gh_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GH, 0xffff},
+ {"", 0x00020000, 0, SROM17_C1SRD205GH_1, 0xffff},
+ {"txpa_trim_magic", 0x00020000, PRHEX_N_MORE, SROM17_TRAMMAGIC, 0xffff},
+ {"", 0x00020000, 0, SROM17_TRAMMAGIC_1, 0xffff},
+ {"txpa_trim_data", 0x00020000, SRFL_PRHEX, SROM17_TRAMDATA, 0xffff},
+ {NULL, 0, 0, 0, 0x00}
+};
+
+static const sromvar_t BCMATTACHDATA(pci_srom18vars)[] = {
+ {"macaddr", 0x00040000, SRFL_ETHADDR, SROM18_MACHI, 0xffff},
+ {"caldata_offset", 0x00040000, 0, SROM18_CALDATA_OFFSET_LOC, 0xffff},
+ {"boardrev", 0x00040000, SRFL_PRHEX, SROM18_BOARDREV, 0xffff},
+ {"ccode", 0x00040000, SRFL_CCODE, SROM18_CCODE, 0xffff},
+ {"regrev", 0x00040000, 0, SROM18_REGREV, 0xffff},
+ {NULL, 0, 0, 0, 0}
+};
+
+static const sromvar_t BCMATTACHDATA(perpath_pci_sromvars)[] = {
+ {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
+ {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
+ {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
+ {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
+ {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
+ {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
+ {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff},
+ {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff},
+ {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff},
+ {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff},
+ {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
+ {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
+ {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
+ {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
+ {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff},
+ {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff},
+
+ /* sromrev 11 */
+ {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff},
+ {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff},
+ {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007},
+ {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078},
+ {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080},
+ {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700},
+ {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800},
+ {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000},
+ {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007},
+ {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078},
+ {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080},
+ {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700},
+ {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800},
+ {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000},
+ {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff},
+ {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00},
+ {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff},
+ {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00},
+ {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff},
+
+ /* sromrev 12 */
+ {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00},
+ {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff},
+
+ {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff},
+ {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff},
+ {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00},
+ {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff},
+ {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00},
+
+ {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff},
+
+ {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff},
+
+ {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff},
+ /* sromrev 13 */
+ {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007},
+ {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078},
+ {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080},
+ {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700},
+ {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800},
+ {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000},
+ {NULL, 0, 0, 0, 0}
+};
+
+typedef struct {
+ uint8 tag; /* Broadcom subtag name */
+ uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in
+ * different tuples have the same name. Therefore, the MFGc tool
+ * needs to know which tuple to generate when seeing these
+ * parameters (given that we know sromrev from user input, like the
+ * nvram file).
+ */
+ uint8 len; /* Length field of the tuple, note that it includes the
+ * subtag name (1 byte): 1 + tuple content length
+ */
+ const char *params; /* Each param is in this form: length(1 byte ascii) + var name
+ * Note that the order here has to match the parsing
+ * order in parsecis() in src/shared/bcmsrom.c
+ */
+} cis_tuple_t;
+
+#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */
+/* quick hacks for supporting standard CIS tuples. */
+#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */
+#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */
+#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */
+
+/** this array is used by CIS creating/writing applications */
+static const cis_tuple_t cis_hnbuvars[] = {
+/* tag revmask len params */
+ {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */
+ {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */
+ {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */
+ /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+ {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"},
+ {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"},
+ /* NOTE: subdevid is also written to boardtype.
+ * Need to write HNBU_BOARDTYPE to change it if it is different.
+ */
+ {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"},
+ {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"},
+ {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"},
+ {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"},
+ {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */
+ {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"},
+ {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 "
+ "4boardflags4 4boardflags5 "},
+ {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"},
+ {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"},
+ {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"},
+ {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 "
+ "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit "
+ "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"},
+ {HNBU_RDLID, 0xffffffff, 3, "2rdlid"},
+ {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g "
+ "0rssisav2g 0bxa2g"}, /* special case */
+ {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g "
+ "0rssisav5g 0bxa5g"}, /* special case */
+ {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"},
+ {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"},
+ {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"},
+ {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"},
+ {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"},
+ {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"},
+ {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */
+ {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"},
+ {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"},
+ {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"},
+ {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"},
+ {HNBU_REGREV, 0xffffffff, 3, "2regrev"},
+ {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g "
+ "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */
+ {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 "
+ "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 "
+ "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"},
+ {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 "
+ "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 "
+ "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"},
+ {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo "
+ "4ofdm5ghpo"},
+ {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 "
+ "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"},
+ {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 "
+ "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"},
+ {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 "
+ "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 "
+ "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 "
+ "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"},
+ {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"},
+ {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"},
+ {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"},
+ {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"},
+ {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"},
+ {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"},
+ {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"},
+ {HNBU_USBFS, 0xffffffff, 2, "1usbfs"},
+ {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"},
+ {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"},
+ {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"},
+ {OTP_RAW, 0xffffffff, 0, ""}, /* special case */
+ {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"},
+ {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"},
+ {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"},
+ {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"},
+ {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo "
+ "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"},
+ {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"},
+ {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"},
+ {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"},
+ {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"},
+ {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"},
+ {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"},
+ {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis "
+ "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option "
+ "1phycal_tempdelta"}, /* special case */
+ {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"},
+ {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
+ "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
+ "0tssiposslope5g"}, /* special case */
+ {HNBU_ACPA_C0, 0x00001800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 "
+ "1*4maxp5ga0 2*12pa5ga0"},
+ {HNBU_ACPA_C1, 0x00001800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+ {HNBU_ACPA_C2, 0x00001800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+ {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"},
+ {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
+ "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
+ {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo "
+ "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo "
+ "2sb20in80ofdmlrbw202gpo"},
+ {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo "
+ "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo "
+ "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po "
+ "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"},
+ {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"},
+ {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo "
+ "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo "
+ "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo "
+ "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo "
+ "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo "
+ "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po "
+ },
+ {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo "
+ "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo "
+ "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo "
+ "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo "
+ "2sb20in80p80lr5gpo 2dot11agduppo"},
+ {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 "
+ "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"},
+ {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 "
+ "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"},
+ {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"},
+ {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"},
+ {HNBU_UUID, 0xffffffff, 17, "16uuid"},
+ {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"},
+ {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 "
+ "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 "
+ "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 "
+ "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */
+ {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 "
+ "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 "
+ "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 "
+ "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */
+ {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 "
+ "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 "
+ "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 "
+ "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */
+ {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g "
+ "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"},
+ {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 "
+ "0pdoffset2g40ma2 0pdoffset2g40mvalid"},
+ {HNBU_ACPA_CCK_C0, 0xfffff800, 7, "2*3pa2gccka0"},
+ {HNBU_ACPA_CCK_C1, 0xfffff800, 7, "2*3pa2gccka1"},
+ {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"},
+ {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"},
+ {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"},
+ {HNBU_ACPA_4X4C0, 0xffffe000, 23, "1maxp2ga0 2*4pa2ga0 2*4pa2g40a0 "
+ "1maxp5gb0a0 1maxp5gb1a0 1maxp5gb2a0 1maxp5gb3a0 1maxp5gb4a0"},
+ {HNBU_ACPA_4X4C1, 0xffffe000, 23, "1maxp2ga1 2*4pa2ga1 2*4pa2g40a1 "
+ "1maxp5gb0a1 1maxp5gb1a1 1maxp5gb2a1 1maxp5gb3a1 1maxp5gb4a1"},
+ {HNBU_ACPA_4X4C2, 0xffffe000, 23, "1maxp2ga2 2*4pa2ga2 2*4pa2g40a2 "
+ "1maxp5gb0a2 1maxp5gb1a2 1maxp5gb2a2 1maxp5gb3a2 1maxp5gb4a2"},
+ {HNBU_ACPA_4X4C3, 0xffffe000, 23, "1maxp2ga3 2*4pa2ga3 2*4pa2g40a3 "
+ "1maxp5gb0a3 1maxp5gb1a3 1maxp5gb2a3 1maxp5gb3a3 1maxp5gb4a3"},
+ {HNBU_ACPA_BW20_4X4C0, 0xffffe000, 41, "2*20pa5ga0"},
+ {HNBU_ACPA_BW40_4X4C0, 0xffffe000, 41, "2*20pa5g40a0"},
+ {HNBU_ACPA_BW80_4X4C0, 0xffffe000, 41, "2*20pa5g80a0"},
+ {HNBU_ACPA_BW20_4X4C1, 0xffffe000, 41, "2*20pa5ga1"},
+ {HNBU_ACPA_BW40_4X4C1, 0xffffe000, 41, "2*20pa5g40a1"},
+ {HNBU_ACPA_BW80_4X4C1, 0xffffe000, 41, "2*20pa5g80a1"},
+ {HNBU_ACPA_BW20_4X4C2, 0xffffe000, 41, "2*20pa5ga2"},
+ {HNBU_ACPA_BW40_4X4C2, 0xffffe000, 41, "2*20pa5g40a2"},
+ {HNBU_ACPA_BW80_4X4C2, 0xffffe000, 41, "2*20pa5g80a2"},
+ {HNBU_ACPA_BW20_4X4C3, 0xffffe000, 41, "2*20pa5ga3"},
+ {HNBU_ACPA_BW40_4X4C3, 0xffffe000, 41, "2*20pa5g40a3"},
+ {HNBU_ACPA_BW80_4X4C3, 0xffffe000, 41, "2*20pa5g80a3"},
+ {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"},
+ {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"},
+ {HNBU_TXBFRPCALS, 0xfffff800, 11,
+ "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */
+ {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"},
+ {HNBU_MACADDR2, 0xffffffff, 7, "6macaddr2"}, /* special case */
+ {HNBU_RSSI_DELTA_2G_B0, 0xffffffff, 17, "1*16rssi_delta_2gb0"},
+ {HNBU_RSSI_DELTA_2G_B1, 0xffffffff, 17, "1*16rssi_delta_2gb1"},
+ {HNBU_RSSI_DELTA_2G_B2, 0xffffffff, 17, "1*16rssi_delta_2gb2"},
+ {HNBU_RSSI_DELTA_2G_B3, 0xffffffff, 17, "1*16rssi_delta_2gb3"},
+ {HNBU_RSSI_DELTA_2G_B4, 0xffffffff, 17, "1*16rssi_delta_2gb4"},
+ {HNBU_RSSI_CAL_FREQ_GRP_2G, 0xffffffff, 8, "1*7rssi_cal_freq_grp"},
+ {HNBU_RSSI_DELTA_5GL, 0xffffffff, 25, "1*24rssi_delta_5gl"},
+ {HNBU_RSSI_DELTA_5GML, 0xffffffff, 25, "1*24rssi_delta_5gml"},
+ {HNBU_RSSI_DELTA_5GMU, 0xffffffff, 25, "1*24rssi_delta_5gmu"},
+ {HNBU_RSSI_DELTA_5GH, 0xffffffff, 25, "1*24rssi_delta_5gh"},
+ {HNBU_ACPA_6G_C0, 0x00000800, 45, "2subband6gver 1*6maxp6ga0 2*18pa6ga0 "},
+ {HNBU_ACPA_6G_C1, 0x00000800, 43, "1*6maxp6ga1 2*18pa6ga1 "},
+ {HNBU_ACPA_6G_C2, 0x00000800, 43, "1*6maxp6ga2 2*18pa6ga2 "},
+ {0xFF, 0xffffffff, 0, ""}
+};
+
+#endif /* _bcmsrom_tbl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmstdlib_s.h b/bcmdhd.101.10.361.x/include/bcmstdlib_s.h
new file mode 100755
index 0000000..dad66b8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmstdlib_s.h
@@ -0,0 +1,54 @@
+/*
+ * Broadcom Secure Standard Library.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmstdlib_s_h_
+#define _bcmstdlib_s_h_
+
+#ifndef BWL_NO_INTERNAL_STDLIB_S_SUPPORT
+#if !defined(__STDC_WANT_SECURE_LIB__) && \
+ !(defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__))
+extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n);
+extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n);
+extern int memset_s(void *dest, size_t destsz, int c, size_t n);
+#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */
+#if !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY)
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif /* !defined(FREEBSD) && !defined(MACOSX) && !defined(BCM_USE_PLATFORM_STRLCPY) */
+extern size_t strlcat_s(char *dest, const char *src, size_t size);
+
+/* Remap xxx_s() APIs to use compiler builtin functions for C standard library functions.
+ * The intent is to identify buffer overflow at compile-time for the safe stdlib APIs when
+ * the user-specified destination buffer-size is incorrect.
+ *
+ * This is only intended as a compile-time test, and should be used by compile-only targets.
+ */
+#if defined(BCM_STDLIB_S_BUILTINS_TEST)
+#define memmove_s(dest, destsz, src, n) ((void)(destsz), (int)__builtin_memmove((dest), (src), (n)))
+#define memcpy_s(dest, destsz, src, n) ((void)(destsz), (int)__builtin_memcpy((dest), (src), (n)))
+#define memset_s(dest, destsz, c, n) ((void)(destsz), (int)__builtin_memset((dest), (c), (n)))
+#define strlcpy(dest, src, size) ((void)(size), (size_t)__builtin_strcpy((dest), (src)))
+#define strlcat_s(dest, src, size) ((void)(size), (size_t)__builtin_strcat((dest), (src)))
+#endif /* BCM_STDLIB_S_BUILTINS_TEST */
+
+#endif /* !BWL_NO_INTERNAL_STDLIB_S_SUPPORT */
+#endif /* _bcmstdlib_s_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmtcp.h b/bcmdhd.101.10.361.x/include/bcmtcp.h
new file mode 100755
index 0000000..3e580a2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmtcp.h
@@ -0,0 +1,86 @@
+/*
+ * Fundamental constants relating to TCP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmtcp_h_
+#define _bcmtcp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */
+#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */
+#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */
+#define TCP_ACK_NUM_OFFSET 8 /* TCP acknowledgement number offset */
+#define TCP_HLEN_OFFSET 12 /* HLEN and reserved bits offset */
+#define TCP_FLAGS_OFFSET 13 /* FLAGS and reserved bits offset */
+#define TCP_CHKSUM_OFFSET 16 /* TCP body checksum offset */
+
+#define TCP_PORT_LEN 2 /* TCP port field length */
+
+/* 8bit TCP flag field */
+#define TCP_FLAG_URG 0x20
+#define TCP_FLAG_ACK 0x10
+#define TCP_FLAG_PSH 0x08
+#define TCP_FLAG_RST 0x04
+#define TCP_FLAG_SYN 0x02
+#define TCP_FLAG_FIN 0x01
+
+#define TCP_HLEN_MASK 0xf000
+#define TCP_HLEN_SHIFT 12
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr
+{
+ uint16 src_port; /* Source Port Address */
+ uint16 dst_port; /* Destination Port Address */
+ uint32 seq_num; /* TCP Sequence Number */
+ uint32 ack_num; /* TCP Sequence Number */
+ uint16 hdrlen_rsvd_flags; /* Header length, reserved bits and flags */
+ uint16 tcpwin; /* TCP window */
+ uint16 chksum; /* Segment checksum with pseudoheader */
+ uint16 urg_ptr; /* Points to seq-num of byte following urg data */
+} BWL_POST_PACKED_STRUCT;
+
+#define TCP_MIN_HEADER_LEN 20
+
+#define TCP_HDRLEN_MASK 0xf0
+#define TCP_HDRLEN_SHIFT 4
+#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT)
+
+#define TCP_FLAGS_MASK 0x1f
+#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* To address round up by 32bit. */
+#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31)) /* a >= b */
+#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31)) /* a =< b */
+#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b) /* a > b */
+#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b) /* a < b */
+
+#endif /* #ifndef _bcmtcp_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmtlv.h b/bcmdhd.101.10.361.x/include/bcmtlv.h
new file mode 100755
index 0000000..78710b6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmtlv.h
@@ -0,0 +1,375 @@
+/*
+ * TLV and XTLV support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmtlv_h_
+#define _bcmtlv_h_
+
+#include <typedefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* begin tlvs - used in 802.11 IEs etc. */
+
+/* type(aka id)/length/value buffer triple */
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+/* size of tlv including data */
+#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0u)
+
+/* get next tlv - no length checks */
+#define BCM_TLV_NEXT(_tlv) (bcm_tlv_t *)((uint8 *)(_tlv)+ BCM_TLV_SIZE(_tlv))
+
+/* tlv length is restricted to 1 byte */
+#define BCM_TLV_MAX_DATA_SIZE (255)
+
+/* tlv header - two bytes */
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+/* Check that bcm_tlv_t fits into the given buffer len */
+#define bcm_valid_tlv(elt, buflen) \
+ ((elt != NULL) && \
+ ((buflen) >= (uint)BCM_TLV_HDR_SIZE) && \
+ ((buflen) >= (uint)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+/* type(aka id)/length/ext/value buffer */
+typedef struct bcm_tlv_ext {
+ uint8 id;
+ uint8 len;
+ uint8 ext;
+ uint8 data[1];
+} bcm_tlv_ext_t;
+
+/* get next tlv_ext - no length checks */
+#define BCM_TLV_EXT_NEXT(_tlv_ext) \
+ (bcm_tlv_ext_t *)((uint8 *)(_tlv_ext)+ BCM_TLV_EXT_SIZE(_tlv_ext))
+
+/* tlv_ext length is restricted to 1 byte */
+#define BCM_TLV_EXT_MAX_DATA_SIZE (254u)
+
+/* tlv_ext header - three bytes */
+#define BCM_TLV_EXT_HDR_SIZE (OFFSETOF(bcm_tlv_ext_t, data))
+
+/* size of tlv_ext including data */
+#define BCM_TLV_EXT_SIZE(_tlv_ext) (BCM_TLV_EXT_HDR_SIZE + (_tlv_ext)->len)
+
+/* find the next tlv */
+bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen);
+
+/* move buffer/buflen up to the given tlv, or set to NULL/0 on error */
+void bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
+
+/* move buffer/buflen past the given tlv, or set to NULL/0 on error */
+void bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
+
+/* find the tlv for a given id */
+bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key);
+
+/* advancement modes for bcm_parse_tlvs_advance() */
+typedef enum {
+ BCM_TLV_ADVANCE_NONE = 0, // do not adjust the buffer/buflen
+ BCM_TLV_ADVANCE_TO = 1, // advance to the found tlv
+ BCM_TLV_ADVANCE_PAST = 2 // advance past the found tlv
+} bcm_tlv_advance_mode_t;
+
+/* Find an IE of a specific type from a buffer.
+ * tlvs: buffer to search for IE
+ * tlvs_len: buffer length
+ * tag: IE tag
+ * oui_len: length of the OUI
+ * oui: Specific OUI to match
+ * type: OUI type
+ * Return the matched IE, else return null.
+*/
+extern bcm_tlv_t *bcm_find_ie(const uint8 *tlvs, uint tlvs_len, uint8 tag,
+ uint8 oui_len, const char *oui, uint8 type);
+
+/* search for a matching tlv id, and adjust the parse buffer pointer/length */
+const bcm_tlv_t *bcm_parse_tlvs_advance(const uint8 **buf, uint *buflen, uint key,
+ bcm_tlv_advance_mode_t advance);
+
+/*
+ * Traverse tlvs and return pointer to the first tlv that
+ * matches the key. Return NULL if not found or tlv len < min_bodylen
+ */
+bcm_tlv_t *bcm_parse_tlvs_min_bodylen(const void *buf, uint buflen, uint key, uint min_bodylen);
+
+/*
+ * Traverse tlvs and return pointer to the first tlv that
+ * matches the key. Return NULL if not found or tlv size > max_len or < min_len
+ */
+bcm_tlv_t *bcm_parse_tlvs_minmax_len(const void *buf, uint buflen, uint key,
+ uint min_len, uint max_len);
+
+/* parse tlvs for dot11 - same as parse_tlvs but supports 802.11 id extension */
+bcm_tlv_t *bcm_parse_tlvs_dot11(const void *buf, uint buflen, uint key, bool id_ext);
+
+/* same as parse_tlvs, but stops when found id > key */
+const bcm_tlv_t *bcm_parse_ordered_tlvs(const void *buf, uint buflen, uint key);
+
+/* find a tlv with DOT11_MNG_PROPR_ID as id, and the given oui and type */
+ bcm_tlv_t *bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui,
+ uint8 *type, uint type_len);
+
+/* write tlv at dst and return next tlv ptr */
+uint8 *bcm_write_tlv(int type, const void *data, uint datalen, uint8 *dst);
+
+/* write tlv_ext at dst and return next tlv ptr */
+uint8 *bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst);
+
+/* write tlv at dst if space permits and return next tlv ptr */
+uint8 *bcm_write_tlv_safe(int type, const void *data, uint datalen, uint8 *dst,
+ uint dst_maxlen);
+
+/* copy a tlv and return next tlv ptr */
+uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+
+/* copy a tlv if space permits and return next tlv ptr */
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, uint dst_maxlen);
+
+/* end tlvs */
+
+/* begin xtlv - used for iovars, nan attributes etc. */
+
+/* bcm type(id), length, value with w/16 bit id/len. The structure below
+ * is nominal, and is used to support variable length id and type. See
+ * xtlv options below.
+ */
+typedef struct bcm_xtlv {
+ uint16 id;
+ uint16 len;
+ uint8 data[1];
+} bcm_xtlv_t;
+
+/* xtlv options */
+#define BCM_XTLV_OPTION_NONE 0x0000u
+#define BCM_XTLV_OPTION_ALIGN32 0x0001u /* 32bit alignment of type.len.data */
+#define BCM_XTLV_OPTION_IDU8 0x0002u /* shorter id */
+#define BCM_XTLV_OPTION_LENU8 0x0004u /* shorted length */
+#define BCM_XTLV_OPTION_IDBE 0x0008u /* big endian format id */
+#define BCM_XTLV_OPTION_LENBE 0x0010u /* big endian format length */
+typedef uint16 bcm_xtlv_opts_t;
+
+/* header size. depends on options. Macros names ending w/ _EX are where
+ * options are explcitly specified that may be less common. The ones
+ * without use default values that correspond to ...OPTION_NONE
+ */
+
+/* xtlv header size depends on options */
+#define BCM_XTLV_HDR_SIZE 4u
+#define BCM_XTLV_HDR_SIZE_EX(_opts) bcm_xtlv_hdr_size(_opts)
+
+/* note: xtlv len only stores the value's length without padding */
+#define BCM_XTLV_LEN(_elt) ltoh16_ua(&(_elt)->len)
+#define BCM_XTLV_LEN_EX(_elt, _opts) bcm_xtlv_len(_elt, _opts)
+
+#define BCM_XTLV_ID(_elt) ltoh16_ua(&(_elt)->id)
+#define BCM_XTLV_ID_EX(_elt, _opts) bcm_xtlv_id(_elt, _opts)
+
+/* entire size of the XTLV including header, data, and optional padding */
+#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts)
+#define BCM_XTLV_SIZE_EX(_elt, _opts) bcm_xtlv_size(_elt, _opts)
+
+/* max xtlv data size */
+#define BCM_XTLV_MAX_DATA_SIZE 65535u
+#define BCM_XTLV_MAX_DATA_SIZE_EX(_opts) ((_opts & BCM_XTLV_OPTION_LENU8) ? \
+ 255u : 65535u)
+
+/* descriptor of xtlv data, packing(src) and unpacking(dst) support */
+typedef struct {
+ uint16 type;
+ uint16 len;
+ void *ptr; /* ptr to memory location */
+} xtlv_desc_t;
+
+/* xtlv buffer - packing/unpacking support */
+struct bcm_xtlvbuf {
+ bcm_xtlv_opts_t opts;
+ uint16 size;
+ uint8 *head; /* point to head of buffer */
+ uint8 *buf; /* current position of buffer */
+ /* allocated buffer may follow, but not necessarily */
+};
+typedef struct bcm_xtlvbuf bcm_xtlvbuf_t;
+
+/* valid xtlv ? */
+bool bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts);
+
+/* return the next xtlv element, and update buffer len (remaining). Buffer length
+ * updated includes padding as specified by options
+ */
+bcm_xtlv_t *bcm_next_xtlv(const bcm_xtlv_t *elt, int *buf_len, bcm_xtlv_opts_t opts);
+
+/* initialize an xtlv buffer. Use options specified for packing/unpacking using
+ * the buffer. Caller is responsible for allocating both buffers.
+ */
+int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len,
+ bcm_xtlv_opts_t opts);
+
+/* length of data in the xtlv buffer */
+uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf);
+
+/* remaining space in the xtlv buffer */
+uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf);
+
+/* write ptr */
+uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf);
+
+/* head */
+uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf);
+
+/* put a data buffer into xtlv */
+int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n);
+
+/* put one or more u16 elts into xtlv */
+int bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n);
+
+/* put one or more u32 elts into xtlv */
+int bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n);
+
+/* put one or more u64 elts into xtlv */
+int bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n);
+
+/* note: there are no get equivalent of integer unpacking, becasuse bcmendian.h
+ * can be used directly using pointers returned in the buffer being processed.
+ */
+
+/* unpack a single xtlv entry, advances buffer and copies data to dst_data on match
+ * type and length match must be exact
+ */
+int bcm_unpack_xtlv_entry(const uint8 **buf, uint16 expected_type, uint16 expected_len,
+ uint8 *dst_data, bcm_xtlv_opts_t opts);
+
+/* packs an xtlv into buffer, and advances buffer, decreements buffer length.
+ * buffer length is checked and must be >= size of xtlv - otherwise BCME_BADLEN
+ */
+int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len,
+ const uint8 *src_data, bcm_xtlv_opts_t opts);
+
+/* accessors and lengths for element given options */
+int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
+int bcm_xtlv_hdr_size(bcm_xtlv_opts_t opts);
+int bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
+int bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
+int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts);
+
+/* compute size needed for number of tlvs whose total data len is given */
+#define BCM_XTLV_SIZE_FOR_TLVS(_data_len, _num_tlvs, _opts) (\
+ bcm_xtlv_size_for_data(_data_len, _opts) + (\
+ (_num_tlvs) * BCM_XTLV_HDR_SIZE_EX(_opts)))
+
+/* unsafe copy xtlv */
+#define BCM_XTLV_BCOPY(_src, _dst, _opts) \
+ bcm_xtlv_bcopy(_src, _dst, BCM_XTLV_MAX_DATA_SIZE_EX(_opts), \
+ BCM_XTLV_MAX_DATA_SIZE_EX(_opts), _opts)
+
+/* copy xtlv - note: src->dst bcopy order - to be compatible w/ tlv version */
+bcm_xtlv_t* bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst,
+ int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts);
+
+/* callback for unpacking xtlv from a buffer into context. */
+typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, const uint8 *buf,
+ uint16 type, uint16 len);
+
+/* unpack a tlv buffer using buffer, options, and callback */
+int bcm_unpack_xtlv_buf(void *ctx, const uint8 *buf, uint16 buflen,
+ bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn);
+
+/* unpack a set of tlvs from the buffer using provided xtlv descriptors */
+int bcm_unpack_xtlv_buf_to_mem(const uint8 *buf, int *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts);
+
+/* pack a set of tlvs into buffer using provided xtlv descriptors */
+int bcm_pack_xtlv_buf_from_mem(uint8 **buf, uint16 *buflen,
+ const xtlv_desc_t *items, bcm_xtlv_opts_t opts);
+
+/* return data pointer and data length of a given id from xtlv buffer
+ * data_len may be NULL
+ */
+const uint8* bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen,
+ uint16 id, uint16 *datalen, bcm_xtlv_opts_t opts);
+
+/* callback to return next tlv id and len to pack, if there is more tlvs to come and
+ * options e.g. alignment
+ */
+typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len);
+
+/* callback to pack the tlv into length validated buffer */
+typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx,
+ uint16 tlv_id, uint16 tlv_len, uint8* buf);
+
+/* pack a set of tlvs into buffer using get_next to interate */
+int bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen,
+ bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next,
+ bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen);
+
+/* pack an xtlv. does not do any error checking. if data is not NULL
+ * data of given length is copied to buffer (xtlv)
+ */
+void bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len,
+ const uint8 *data, bcm_xtlv_opts_t opts);
+
+/* unpack an xtlv and return ptr to data, and data length */
+void bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len,
+ const uint8 **data, bcm_xtlv_opts_t opts);
+
+/* end xtlvs */
+
+/* length value pairs */
+struct bcm_xlv {
+ uint16 len;
+ uint8 data[1];
+};
+typedef struct bcm_xlv bcm_xlv_t;
+
+struct bcm_xlvp {
+ uint16 len;
+ uint8 *data;
+};
+typedef struct bcm_xlvp bcm_xlvp_t;
+
+struct bcm_const_xlvp {
+ uint16 len;
+ const uint8 *data;
+};
+
+typedef struct bcm_const_xlvp bcm_const_xlvp_t;
+
+struct bcm_const_ulvp {
+ uint32 len;
+ const uint8 *data;
+};
+
+typedef struct bcm_const_ulvp bcm_const_ulvp_t;
+
+/* end length value pairs */
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _bcmtlv_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmudp.h b/bcmdhd.101.10.361.x/include/bcmudp.h
new file mode 100755
index 0000000..5a5113d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmudp.h
@@ -0,0 +1,54 @@
+/*
+ * Fundamental constants relating to UDP Protocol
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmudp_h_
+#define _bcmudp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* UDP header */
+#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */
+#define UDP_LEN_OFFSET 4 /* UDP length offset */
+#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */
+
+#define UDP_HDR_LEN 8 /* UDP header length */
+#define UDP_PORT_LEN 2 /* UDP port length */
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmudp_hdr
+{
+ uint16 src_port; /* Source Port Address */
+ uint16 dst_port; /* Destination Port Address */
+ uint16 len; /* Number of bytes in datagram including header */
+ uint16 chksum; /* entire datagram checksum with pseudoheader */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* #ifndef _bcmudp_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmutils.h b/bcmdhd.101.10.361.x/include/bcmutils.h
new file mode 100755
index 0000000..2971d8b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmutils.h
@@ -0,0 +1,1639 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmutils_h_
+#define _bcmutils_h_
+
+#include <bcmtlv.h>
+
+/* For now, protect the bcmerror.h */
+#ifdef BCMUTILS_ERR_CODES
+#include <bcmerror.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
+#ifdef FREEBSD
+#define bcm_strncat_s(dst, noOfElements, src, count) strcat((dst), (src))
+#else
+#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count))
+#endif /* FREEBSD */
+#define bcm_snprintf_s snprintf
+#define bcm_sprintf_s snprintf
+
+/*
+ * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count))
+ * Use bcm_strcpy_s instead as it is a safer option
+ * bcm_strcat_s: Use bcm_strncat_s as a safer option
+ *
+ */
+
+#define BCM_BIT(x) (1u << (x))
+/* useful to count number of set bit in x */
+#define BCM_CLR_FISRT_BIT(x) ((x - 1) & x)
+/* first bit set in x. Useful to iterate through a mask */
+#define BCM_FIRST_BIT(x) (BCM_CLR_FISRT_BIT(x)^(x))
+
+/* Macro to iterate through the set bits in mask.
+ * NOTE: the argument "mask" will be cleared after
+ * the iteration.
+ */
+
+#define FOREACH_BIT(c, mask)\
+ for (c = BCM_FIRST_BIT(mask); mask != 0; \
+ mask = BCM_CLR_FISRT_BIT(mask), c = BCM_FIRST_BIT(mask))
+
+/* ctype replacement */
+#define _BCM_U 0x01 /* upper */
+#define _BCM_L 0x02 /* lower */
+#define _BCM_D 0x04 /* digit */
+#define _BCM_C 0x08 /* cntrl */
+#define _BCM_P 0x10 /* punct */
+#define _BCM_S 0x20 /* white space (space/lf/tab) */
+#define _BCM_X 0x40 /* hex digit */
+#define _BCM_SP 0x80 /* hard space (0x20) */
+
+extern const unsigned char bcm_ctype[256];
+#define bcm_ismask(x) (bcm_ctype[(unsigned char)(x)])
+
+#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx)
+
+#define KB(bytes) (((bytes) + 1023) / 1024)
+
+/* Buffer structure for collecting string-formatted data
+* using bcm_bprintf() API.
+* Use bcm_binit() to initialize before use
+*/
+
+struct bcmstrbuf {
+ char *buf; /* pointer to current position in origbuf */
+ unsigned int size; /* current (residual) size in bytes */
+ char *origbuf; /* unmodified pointer to orignal buffer */
+ unsigned int origsize; /* unmodified orignal buffer size in bytes */
+};
+
+#define BCMSTRBUF_LEN(b) (b->size)
+#define BCMSTRBUF_BUF(b) (b->buf)
+
+struct ether_addr;
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+/* ** driver-only section ** */
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <hnd_pktq.h>
+#include <hnd_pktpool.h>
+
+#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#ifndef SPINWAIT_POLL_PERIOD
+#define SPINWAIT_POLL_PERIOD 10U
+#endif
+
+#ifdef BCMFUZZ
+/* fake spinwait for fuzzing */
+#define SPINWAIT(exp, us) { \
+ uint countdown = (exp) != 0 ? 1 : 0; \
+ while (countdown > 0) { \
+ countdown--; \
+ } \
+}
+
+#elif defined(PHY_REG_TRACE_FRAMEWORK)
+#include <phy_utils_log_api.h>
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1U); \
+ phy_utils_log_spinwait_start(); \
+ while (((exp) != 0) && (uint)(countdown >= SPINWAIT_POLL_PERIOD)) { \
+ OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+ countdown -= SPINWAIT_POLL_PERIOD; \
+ } \
+ phy_utils_log_spinwait_end(us, countdown); \
+}
+
+#else
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1U); \
+ while (((exp) != 0) && (uint)(countdown >= SPINWAIT_POLL_PERIOD)) { \
+ OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+ countdown -= SPINWAIT_POLL_PERIOD; \
+ } \
+}
+#endif /* BCMFUZZ */
+
+/* forward definition of ether_addr structure used by some function prototypes */
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */
+#define CORE_SLAVE_PORT_0 0
+#define CORE_SLAVE_PORT_1 1
+#define CORE_BASE_ADDR_0 0
+#define CORE_BASE_ADDR_1 1
+
+#ifdef DONGLEBUILD
+/* TRIM Tail bytes from lfrag */
+extern void pktfrag_trim_tailbytes(osl_t * osh, void* p, uint16 len, uint8 type);
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) pktfrag_trim_tailbytes(osh, p, len, type)
+#else
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+#endif /* DONGLEBUILD */
+
+/* externs */
+/* packet */
+extern uint pktcopy(osl_t *osh, void *p, uint offset, uint len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, uint len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern uint pkttotcnt(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset);
+extern void *pktoffset(osl_t *osh, void *p, uint offset);
+
+#ifdef WLCSO
+extern uint pkttotlen_no_sfhtoe_hdr(osl_t *osh, void *p, uint toe_hdr_len);
+#else
+#define pkttotlen_no_sfhtoe_hdr(osh, p, hdrlen) pkttotlen(osh, p)
+#endif /* WLCSO */
+
+/* Get priority from a packet and pass it back in scb (or equiv) */
+#define PKTPRIO_VDSCP 0x100u /* DSCP prio found after VLAN tag */
+#define PKTPRIO_VLAN 0x200u /* VLAN prio found */
+#define PKTPRIO_UPD 0x400u /* DSCP used to update VLAN prio */
+#define PKTPRIO_DSCP 0x800u /* DSCP prio found */
+
+/* DSCP type definitions (RFC4594) */
+/* AF1x: High-Throughput Data (RFC2597) */
+#define DSCP_AF11 0x0Au
+#define DSCP_AF12 0x0Cu
+#define DSCP_AF13 0x0Eu
+/* AF2x: Low-Latency Data (RFC2597) */
+#define DSCP_AF21 0x12u
+#define DSCP_AF22 0x14u
+#define DSCP_AF23 0x16u
+/* CS2: OAM (RFC2474) */
+#define DSCP_CS2 0x10u
+/* AF3x: Multimedia Streaming (RFC2597) */
+#define DSCP_AF31 0x1Au
+#define DSCP_AF32 0x1Cu
+#define DSCP_AF33 0x1Eu
+/* CS3: Broadcast Video (RFC2474) */
+#define DSCP_CS3 0x18u
+/* VA: VOCIE-ADMIT (RFC5865) */
+#define DSCP_VA 0x2Cu
+/* EF: Telephony (RFC3246) */
+#define DSCP_EF 0x2Eu
+/* CS6: Network Control (RFC2474) */
+#define DSCP_CS6 0x30u
+/* CS7: Network Control (RFC2474) */
+#define DSCP_CS7 0x38u
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag);
+extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp);
+
+/* ethernet address */
+extern uint64 bcm_ether_ntou64(const struct ether_addr *ea) BCMCONSTFN;
+extern int bcm_addrmask_set(int enable);
+extern int bcm_addrmask_get(int *val);
+
+/* ip address */
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+extern char *bcm_ipv6_ntoa(void *ipv6, char *buf);
+extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip);
+
+/* delay */
+extern void bcm_mdelay(uint ms);
+/* variable access */
+#if defined(BCM_RECLAIM)
+extern bool _nvram_reclaim_enb;
+#define NVRAM_RECLAIM_ENAB() (_nvram_reclaim_enb)
+#ifdef BCMDBG
+#define NVRAM_RECLAIM_CHECK(name) \
+ if (NVRAM_RECLAIM_ENAB() && (bcm_attach_part_reclaimed == TRUE)) { \
+ printf("NVRAM already reclaimed, %s\n", (name)); \
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF(); \
+ *(char*) 0 = 0; /* TRAP */ \
+ GCC_DIAGNOSTIC_POP(); \
+ return NULL; \
+ }
+#else /* BCMDBG */
+#define NVRAM_RECLAIM_CHECK(name) \
+ if (NVRAM_RECLAIM_ENAB() && (bcm_attach_part_reclaimed == TRUE)) { \
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_NULL_DEREF(); \
+ *(char*) 0 = 0; /* TRAP */ \
+ GCC_DIAGNOSTIC_POP(); \
+ return NULL; \
+ }
+#endif /* BCMDBG */
+#else /* BCM_RECLAIM */
+#define NVRAM_RECLAIM_CHECK(name)
+#endif /* BCM_RECLAIM */
+
+#ifdef WL_FWSIGN
+#define getvar(vars, name) (NULL)
+#define getintvar(vars, name) (0)
+#define getintvararray(vars, name, index) (0)
+#define getintvararraysize(vars, name) (0)
+#else /* WL_FWSIGN */
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+#endif /* WL_FWSIGN */
+
+/* Read an array of values from a possibly slice-specific nvram string */
+extern int get_uint8_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor,
+ const char* name, uint8* dest_array, uint dest_size);
+extern int get_int16_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor,
+ const char* name, int16* dest_array, uint dest_size);
+/* Prepend a slice-specific accessor to an nvram string name */
+extern uint get_slicespecific_var_name(osl_t *osh, char *vars_table_accessor,
+ const char *name, char **name_out);
+
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#ifdef BCMDBG
+extern void prpkt(const char *msg, osl_t *osh, void *p0);
+#endif /* BCMDBG */
+#ifdef BCMPERFSTATS
+extern void bcm_perf_enable(void);
+extern void bcmstats(char *fmt);
+extern void bcmlog(char *fmt, uint a1, uint a2);
+extern void bcmdumplog(char *buf, int size);
+extern int bcmdumplogent(char *buf, uint idx);
+#else
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define bcmlog(fmt, a1, a2)
+#define bcmdumplog(buf, size) *buf = '\0'
+#define bcmdumplogent(buf, idx) -1
+#endif /* BCMPERFSTATS */
+
+#define TSF_TICKS_PER_MS 1000
+#define TS_ENTER 0xdeadbeef /* Timestamp profiling enter */
+#define TS_EXIT 0xbeefcafe /* Timestamp profiling exit */
+
+#if defined(BCMTSTAMPEDLOGS)
+/* Store a TSF timestamp and a log line in the log buffer */
+extern void bcmtslog(uint32 tstamp, const char *fmt, uint a1, uint a2);
+/* Print out the log buffer with timestamps */
+extern void bcmprinttslogs(void);
+/* Print out a microsecond timestamp as "sec.ms.us " */
+extern void bcmprinttstamp(uint32 us);
+/* Dump to buffer a microsecond timestamp as "sec.ms.us " */
+extern void bcmdumptslog(struct bcmstrbuf *b);
+#else
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(b)
+#endif /* BCMTSTAMPEDLOGS */
+
+bool bcm_match_buffers(const uint8 *b1, uint b1_len, const uint8 *b2, uint b2_len);
+
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry. Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
+
+/* iovar structure */
+typedef struct bcm_iovar {
+ const char *name; /* name for lookup and display */
+ uint16 varid; /* id for switch */
+ uint16 flags; /* driver-specific flag bits */
+ uint8 flags2; /* driver-specific flag bits */
+ uint8 type; /* base type of argument */
+ uint16 minlen; /* min length for buffer vars */
+} bcm_iovar_t;
+
+/* varid definitions are per-driver, may use these get/set bits */
+
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
+
+/* Varid to actionid mapping */
+#define IOV_GVAL(id) ((id) * 2)
+#define IOV_SVAL(id) ((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid) (actionid >> 1)
+
+/* flags are per-driver based on driver attributes */
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, uint len, bool set);
+
+/* ioctl structure */
+typedef struct wlc_ioctl_cmd {
+ uint16 cmd; /**< IOCTL command */
+ uint16 flags; /**< IOCTL command flags */
+ uint16 min_len; /**< IOCTL command minimum argument len (in bytes) */
+} wlc_ioctl_cmd_t;
+
+#if defined(WLTINYDUMP) || defined(BCMDBG) || defined(WLMSG_INFORM) || \
+ defined(WLMSG_ASSOC) || defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif /* WLTINYDUMP || BCMDBG || WLMSG_INFORM || WLMSG_ASSOC || WLMSG_PRPKT */
+#endif /* BCMDRIVER */
+
+/* string */
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern uint64 bcm_strtoull(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+uint16 bcmhex2bin(const uint8* hex, uint hex_len, uint8 *buf, uint buf_len);
+
+/* Base type definitions */
+#define IOVT_VOID 0 /* no value (implictly set only) */
+#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */
+#define IOVT_INT8 2 /* integer values are range-checked */
+#define IOVT_UINT8 3 /* unsigned int 8 bits */
+#define IOVT_INT16 4 /* int 16 bits */
+#define IOVT_UINT16 5 /* unsigned int 16 bits */
+#define IOVT_INT32 6 /* int 32 bits */
+#define IOVT_UINT32 7 /* unsigned int 32 bits */
+#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+/* Initializer for IOV type strings */
+#define BCM_IOV_TYPE_INIT { \
+ "void", \
+ "bool", \
+ "int8", \
+ "uint8", \
+ "int16", \
+ "uint16", \
+ "int32", \
+ "uint32", \
+ "buffer", \
+ "" }
+
+#define BCM_IOVT_IS_INT(type) (\
+ (type == IOVT_BOOL) || \
+ (type == IOVT_INT8) || \
+ (type == IOVT_UINT8) || \
+ (type == IOVT_INT16) || \
+ (type == IOVT_UINT16) || \
+ (type == IOVT_INT32) || \
+ (type == IOVT_UINT32))
+
+/* ** driver/apps-shared section ** */
+
+#define BCME_STRLEN 64 /* Max string length for BCM errors */
+#define VALID_BCMERROR(e) valid_bcmerror(e)
+
+#ifdef DBG_BUS
+/** tracks non typical execution paths, use gdb with arm sim + firmware dump to read counters */
+#define DBG_BUS_INC(s, cnt) ((s)->dbg_bus->cnt++)
+#else
+#define DBG_BUS_INC(s, cnt)
+#endif /* DBG_BUS */
+
+/* BCMUTILS_ERR_CODES is defined to use the error codes from bcmerror.h
+ * otherwise use from this file.
+ */
+#ifndef BCMUTILS_ERR_CODES
+
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK 0 /* Success */
+#define BCME_ERROR -1 /* Error generic */
+#define BCME_BADARG -2 /* Bad Argument */
+#define BCME_BADOPTION -3 /* Bad option */
+#define BCME_NOTUP -4 /* Not up */
+#define BCME_NOTDOWN -5 /* Not down */
+#define BCME_NOTAP -6 /* Not AP */
+#define BCME_NOTSTA -7 /* Not STA */
+#define BCME_BADKEYIDX -8 /* BAD Key Index */
+#define BCME_RADIOOFF -9 /* Radio Off */
+#define BCME_NOTBANDLOCKED -10 /* Not band locked */
+#define BCME_NOCLK -11 /* No Clock */
+#define BCME_BADRATESET -12 /* BAD Rate valueset */
+#define BCME_BADBAND -13 /* BAD Band */
+#define BCME_BUFTOOSHORT -14 /* Buffer too short */
+#define BCME_BUFTOOLONG -15 /* Buffer too long */
+#define BCME_BUSY -16 /* Busy */
+#define BCME_NOTASSOCIATED -17 /* Not Associated */
+#define BCME_BADSSIDLEN -18 /* Bad SSID len */
+#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */
+#define BCME_BADCHAN -20 /* Bad Channel */
+#define BCME_BADADDR -21 /* Bad Address */
+#define BCME_NORESOURCE -22 /* Not Enough Resources */
+#define BCME_UNSUPPORTED -23 /* Unsupported */
+#define BCME_BADLEN -24 /* Bad length */
+#define BCME_NOTREADY -25 /* Not Ready */
+#define BCME_EPERM -26 /* Not Permitted */
+#define BCME_NOMEM -27 /* No Memory */
+#define BCME_ASSOCIATED -28 /* Associated */
+#define BCME_RANGE -29 /* Not In Range */
+#define BCME_NOTFOUND -30 /* Not Found */
+#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */
+#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */
+#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */
+#define BCME_VERSION -37 /* Incorrect version */
+#define BCME_TXFAIL -38 /* TX failure */
+#define BCME_RXFAIL -39 /* RX failure */
+#define BCME_NODEVICE -40 /* Device not present */
+#define BCME_NMODE_DISABLED -41 /* NMODE disabled */
+#define BCME_MSCH_DUP_REG -42 /* Duplicate slot registration */
+#define BCME_SCANREJECT -43 /* reject scan request */
+#define BCME_USAGE_ERROR -44 /* WLCMD usage error */
+#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */
+#define BCME_DISABLED -47 /* Disabled in this build */
+#define BCME_DECERR -48 /* Decrypt error */
+#define BCME_ENCERR -49 /* Encrypt error */
+#define BCME_MICERR -50 /* Integrity/MIC error */
+#define BCME_REPLAY -51 /* Replay */
+#define BCME_IE_NOTFOUND -52 /* IE not found */
+#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */
+#define BCME_NOT_GC -54 /* expecting a group client */
+#define BCME_PRS_REQ_FAILED -55 /* GC presence req failed to sent */
+#define BCME_NO_P2P_SE -56 /* Could not find P2P-Subelement */
+#define BCME_NOA_PND -57 /* NoA pending, CB shuld be NULL */
+#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */
+#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */
+#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */
+#define BCME_IOV_LAST_CMD -61 /* last batched iov sub-command */
+#define BCME_MINIPMU_CAL_FAIL -62 /* MiniPMU cal failed */
+#define BCME_RCAL_FAIL -63 /* Rcal failed */
+#define BCME_LPF_RCCAL_FAIL -64 /* RCCAL failed */
+#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */
+#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */
+#define BCME_BANDLOCKED -67 /* interface is restricted to a band */
+#define BCME_BAD_IE_DATA -68 /* Recieved ie with invalid/bad data */
+#define BCME_REG_FAILED -69 /* Generic registration failed */
+#define BCME_NOCHAN -70 /* Registration with 0 chans in list */
+#define BCME_PKTTOSS -71 /* Pkt tossed */
+#define BCME_DNGL_DEVRESET -72 /* dongle re-attach during DEVRESET */
+#define BCME_ROAM -73 /* Roam related failures */
+#define BCME_NO_SIG_FILE -74 /* Signature file is missing */
+
+#define BCME_LAST BCME_NO_SIG_FILE
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* This error code is *internal* to the driver, and is not propogated to users. It should
+ * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL.
+ * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required,
+ * nor does it need to be part of any OSL driver-to-OS error code mapping).
+ */
+#define BCME_IOCTL_PATCH_UNSUPPORTED -9999
+#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED)
+ #error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED"
+#endif
+
+/* These are collection of BCME Error strings */
+#define BCMERRSTRINGTABLE { \
+ "OK", \
+ "Undefined error", \
+ "Bad Argument", \
+ "Bad Option", \
+ "Not up", \
+ "Not down", \
+ "Not AP", \
+ "Not STA", \
+ "Bad Key Index", \
+ "Radio Off", \
+ "Not band locked", \
+ "No clock", \
+ "Bad Rate valueset", \
+ "Bad Band", \
+ "Buffer too short", \
+ "Buffer too long", \
+ "Busy", \
+ "Not Associated", \
+ "Bad SSID len", \
+ "Out of Range Channel", \
+ "Bad Channel", \
+ "Bad Address", \
+ "Not Enough Resources", \
+ "Unsupported", \
+ "Bad length", \
+ "Not Ready", \
+ "Not Permitted", \
+ "No Memory", \
+ "Associated", \
+ "Not In Range", \
+ "Not Found", \
+ "WME Not Enabled", \
+ "TSPEC Not Found", \
+ "ACM Not Supported", \
+ "Not WME Association", \
+ "SDIO Bus Error", \
+ "Dongle Not Accessible", \
+ "Incorrect version", \
+ "TX Failure", \
+ "RX Failure", \
+ "Device Not Present", \
+ "NMODE Disabled", \
+ "Host Offload in device", \
+ "Scan Rejected", \
+ "WLCMD usage error", \
+ "WLCMD ioctl error", \
+ "RWL serial port error", \
+ "Disabled", \
+ "Decrypt error", \
+ "Encrypt error", \
+ "MIC error", \
+ "Replay", \
+ "IE not found", \
+ "Data not found", \
+ "NOT GC", \
+ "PRS REQ FAILED", \
+ "NO P2P SubElement", \
+ "NOA Pending", \
+ "FRAG Q FAILED", \
+ "GET ActionFrame failed", \
+ "scheduler not ready", \
+ "Last IOV batched sub-cmd", \
+ "Mini PMU Cal failed", \
+ "R-cal failed", \
+ "LPF RC Cal failed", \
+ "DAC buf RC Cal failed", \
+ "VCO Cal failed", \
+ "band locked", \
+ "Recieved ie with invalid data", \
+ "registration failed", \
+ "Registration with zero channels", \
+ "pkt toss", \
+ "Dongle Devreset", \
+ "Critical roam in progress", \
+ "Signature file is missing", \
+}
+#endif /* BCMUTILS_ERR_CODES */
+
+#ifndef ABS
+#define ABS(a) (((a) < 0) ? -(a) : (a))
+#endif /* ABS */
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif /* MIN */
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif /* MAX */
+
+/* limit to [min, max] */
+#ifndef LIMIT_TO_RANGE
+#define LIMIT_TO_RANGE(x, min, max) \
+ ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_RANGE */
+
+/* limit to max */
+#ifndef LIMIT_TO_MAX
+#define LIMIT_TO_MAX(x, max) \
+ (((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_MAX */
+
+/* limit to min */
+#ifndef LIMIT_TO_MIN
+#define LIMIT_TO_MIN(x, min) \
+ (((x) < (min) ? (min) : (x)))
+#endif /* LIMIT_TO_MIN */
+
+#define SIZE_BITS(x) (sizeof(x) * NBBY)
+#define SIZE_BITS32(x) ((uint)sizeof(x) * NBBY)
+
+#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \
+ (0xffffffff - (prev) + (curr) + 1))
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDDN(p, align) ((p) & ~((align) - 1))
+#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+ & ~((uintptr)(boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask) !((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8))
+ /* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */
+ #define OFFSETOF(type, member) __builtin_offsetof(type, member)
+#else
+#ifdef BCMFUZZ
+ /* use 0x10 offset to avoid undefined behavior error due to NULL access */
+ #define OFFSETOF(type, member) (((uint)(uintptr)&((type *)0x10)->member) - 0x10)
+#else
+ #define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
+#endif /* BCMFUZZ */
+#endif /* GCC 4.8 or newer */
+#endif /* OFFSETOF */
+
+#ifndef CONTAINEROF
+#define CONTAINEROF(ptr, type, member) ((type *)((char *)(ptr) - OFFSETOF(type, member)))
+#endif /* CONTAINEROF */
+
+/* substruct size up to and including a member of the struct */
+#ifndef STRUCT_SIZE_THROUGH
+#define STRUCT_SIZE_THROUGH(sptr, fname) \
+ (((uint8*)&((sptr)->fname) - (uint8*)(sptr)) + sizeof((sptr)->fname))
+#endif
+
+/* Extracting the size of element in a structure */
+#define SIZE_OF(type, field) sizeof(((type *)0)->field)
+
+/* Extracting the size of pointer element in a structure */
+#define SIZE_OF_PV(type, pfield) sizeof(*((type *)0)->pfield)
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a) (uint32)(sizeof(a) / sizeof(a[0]))
+#endif
+
+#ifndef ARRAYLAST /* returns pointer to last array element */
+#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1])
+#endif
+
+/* Calculates the required pad size. This is mainly used in register structures */
+#define PADSZ(start, end) ((((end) - (start)) / 4) + 1)
+
+/* Reference a function; used to prevent a static function from being optimized out */
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f))
+
+/* bit map related macros */
+#ifndef setbit
+#ifndef NBBY /* the BSD family defines NBBY */
+#define NBBY 8 /* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+extern void setbit(void *array, uint bit);
+extern void clrbit(void *array, uint bit);
+extern bool isset(const void *array, uint bit);
+extern bool isclr(const void *array, uint bit);
+#else
+#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+#endif /* setbit */
+
+/* read/write/clear field in a consecutive bits in an octet array.
+ * 'addr' is the octet array's start byte address
+ * 'size' is the octet array's byte size
+ * 'stbit' is the value's start bit offset
+ * 'nbits' is the value's bit size
+ * This set of utilities are for convenience. Don't use them
+ * in time critical/data path as there's a great overhead in them.
+ */
+void setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val);
+uint32 getbits(const uint8 *addr, uint size, uint stbit, uint nbits);
+#define clrbits(addr, size, stbit, nbits) setbits(addr, size, stbit, nbits, 0)
+
+extern void set_bitrange(void *array, uint start, uint end, uint maxbit);
+extern void clr_bitrange(void *array, uint start, uint end, uint maxbit);
+extern void set_bitrange_u32(void *array, uint start, uint end, uint maxbit);
+extern void clr_bitrange_u32(void *array, uint start, uint end, uint maxbit);
+
+extern int bcm_find_fsb(uint32 num);
+
+#define isbitset(a, i) (((a) & (1 << (i))) != 0)
+
+#if defined DONGLEBUILD
+#define NBITS(type) (sizeof(type) * 8)
+#else
+#define NBITS(type) ((uint32)(sizeof(type) * 8))
+#endif /* DONGLEBUILD */
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+enum {
+ BCM_FMT_BASE32
+};
+typedef int bcm_format_t;
+
+/* encodes using specified format and returns length of output written on success
+ * or a status code BCME_XX on failure. Input and output buffers may overlap.
+ * input will be advanced to the position when function stoped.
+ * out value of in_len will specify the number of processed input bytes.
+ * on input pad_off represents the number of bits (MSBs of the first output byte)
+ * to preserve and on output number of pad bits (LSBs) set to 0 in the output.
+ */
+int bcm_encode(uint8 **in, uint *in_len, bcm_format_t fmt,
+ uint *pad_off, uint8 *out, uint out_size);
+
+/* decodes input in specified format, returns length of output written on success
+ * or a status code BCME_XX on failure. Input and output buffers may overlap.
+ * input will be advanced to the position when function stoped.
+ * out value of in_len will specify the number of processed input bytes.
+ * on input pad_off represents the number of bits (MSBs of the first output byte)
+ * to preserve and on output number of pad bits (LSBs) set to 0 in the output.
+ */
+int bcm_decode(const uint8 **in, uint *in_len, bcm_format_t fmt,
+ uint *pad_off, uint8 *out, uint out_size);
+
+extern void bcm_bitprint32(const uint32 u32);
+
+/*
+ * ----------------------------------------------------------------------------
+ * Multiword map of 2bits, nibbles
+ * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val)
+ * getbit2 getbit4 (void *ptr, uint32 ix)
+ * ----------------------------------------------------------------------------
+ */
+
+#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK) \
+static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val) \
+{ \
+ uint32 *addr = (uint32 *)ptr; \
+ uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */ \
+ uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */ \
+ uint32 mask = (MSK << pos); \
+ uint32 tmp = *a & ~mask; \
+ *a = tmp | (val << pos); \
+} \
+static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \
+{ \
+ uint32 *addr = (uint32 *)ptr; \
+ uint32 *a = addr + (ix >> RSH); \
+ uint32 pos = (ix & OFF) << LSH; \
+ return ((*a >> pos) & MSK); \
+}
+
+DECLARE_MAP_API(2, 4, 1, 15u, 0x0003u) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7u, 0x000Fu) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3u, 0x00FFu) /* setbit8() and getbit8() */
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
+#define MODADD(x, y, bound) \
+ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+/* module add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+/* crc defines */
+#define CRC8_INIT_VALUE 0xffu /* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE 0x9fu /* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffffu /* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8u /* Good final CRC16 checksum value */
+#define CRC32_INIT_VALUE 0xffffffffu /* Initial CRC32 checksum value */
+#define CRC32_GOOD_VALUE 0xdebb20e3u /* Good final CRC32 checksum value */
+
+#ifdef DONGLEBUILD
+#define MACF "MACADDR:%08x%04x"
+#define ETHERP_TO_MACF(ea) (uint32)bcm_ether_ntou64(ea), \
+ (uint32)(bcm_ether_ntou64(ea) >> 32)
+
+#define CONST_ETHERP_TO_MACF(ea) ETHERP_TO_MACF(ea)
+
+#define ETHER_TO_MACF(ea) ETHERP_TO_MACF(&ea)
+
+#else
+/* use for direct output of MAC address in printf etc */
+#define MACF "%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea) ((const struct ether_addr *) (ea))->octet[0], \
+ ((const struct ether_addr *) (ea))->octet[1], \
+ ((const struct ether_addr *) (ea))->octet[2], \
+ ((const struct ether_addr *) (ea))->octet[3], \
+ ((const struct ether_addr *) (ea))->octet[4], \
+ ((const struct ether_addr *) (ea))->octet[5]
+
+#define CONST_ETHERP_TO_MACF(ea) ETHERP_TO_MACF(ea)
+
+#define ETHER_TO_MACF(ea) (ea).octet[0], \
+ (ea).octet[1], \
+ (ea).octet[2], \
+ (ea).octet[3], \
+ (ea).octet[4], \
+ (ea).octet[5]
+#endif /* DONGLEBUILD */
+/* use only for debug, the string length can be changed
+ * If you want to use this macro to the logic,
+ * USE MACF instead
+ */
+#if !defined(SIMPLE_MAC_PRINT)
+#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC2STRDBG(ea) ((const uint8*)(ea))[0], \
+ ((const uint8*)(ea))[1], \
+ ((const uint8*)(ea))[2], \
+ ((const uint8*)(ea))[3], \
+ ((const uint8*)(ea))[4], \
+ ((const uint8*)(ea))[5]
+#else
+#define MACDBG "%02x:xx:xx:xx:x%x:%02x"
+#define MAC2STRDBG(ea) ((const uint8*)(ea))[0], \
+ (((const uint8*)(ea))[4] & 0xf), \
+ ((const uint8*)(ea))[5]
+#endif /* SIMPLE_MAC_PRINT */
+
+#define MACOUIDBG "%02x:%x:%02x"
+#define MACOUI2STRDBG(ea) ((const uint8*)(ea))[0], \
+ ((const uint8*)(ea))[1] & 0xf, \
+ ((const uint8*)(ea))[2]
+
+#define MACOUI "%02x:%02x:%02x"
+#define MACOUI2STR(ea) (ea)[0], (ea)[1], (ea)[2]
+
+/* bcm_format_flags() bit description structure */
+typedef struct bcm_bit_desc {
+ uint32 bit;
+ const char* name;
+} bcm_bit_desc_t;
+
+/* bcm_format_field */
+typedef struct bcm_bit_desc_ex {
+ uint32 mask;
+ const bcm_bit_desc_t *bitfield;
+} bcm_bit_desc_ex_t;
+
+/* buffer length for ethernet address from bcm_ether_ntoa() */
+#define ETHER_ADDR_STR_LEN 18u /* 18-bytes of Ethernet address buffer length */
+
+static INLINE uint32 /* 32bit word aligned xor-32 */
+bcm_compute_xor32(volatile uint32 *u32_val, int num_u32)
+{
+ int idx;
+ uint32 xor32 = 0;
+ for (idx = 0; idx < num_u32; idx++)
+ xor32 ^= *(u32_val + idx);
+ return xor32;
+}
+
+/* crypto utility function */
+/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+ if (
+#ifdef __i386__
+ 1 ||
+#endif
+ (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+ /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
+ /* x86 supports unaligned. This version runs 6x-9x faster on x86. */
+ ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+ ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+ ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+ ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+ } else {
+ /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
+ int k;
+ for (k = 0; k < 16; k++)
+ dst[k] = src1[k] ^ src2[k];
+ }
+}
+
+/* externs */
+/* crc */
+uint8 hndcrc8(const uint8 *p, uint nbytes, uint8 crc);
+uint16 hndcrc16(const uint8 *p, uint nbytes, uint16 crc);
+uint32 hndcrc32(const uint8 *p, uint nbytes, uint32 crc);
+
+/* format/print */
+/* print out the value a field has: fields may have 1-32 bits and may hold any value */
+extern uint bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, uint len);
+/* print out which bits in flags are set */
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, uint len);
+/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */
+int bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz,
+ const uint8 *addr, uint size, char *buf, uint len);
+
+extern int bcm_format_hex(char *str, const void *bytes, uint len);
+
+#ifdef BCMDBG
+extern void deadbeef(void *p, uint len);
+#endif
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, const uchar *buf, uint len);
+extern void prhexstr(const char *prefix, const uint8 *buf, uint len, bool newline);
+
+/* bcmerror */
+extern const char *bcmerrorstr(int bcmerror);
+
+#if defined(BCMDBG) || defined(WLMSG_ASSOC)
+/* get 802.11 frame name based on frame kind - see frame types FC_.. in 802.11.h */
+const char *bcm_80211_fk_name(uint fk);
+#else
+#define bcm_80211_fk_names(_x) ""
+#endif
+
+extern int wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie);
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+typedef uint32 mbool;
+#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */
+#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+/* generic datastruct to help dump routines */
+struct fielddesc {
+ const char *nameandfmt;
+ uint32 offset;
+ uint32 len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+#define bcm_bsize(b) ((b)->size)
+#define bcm_breset(b) do {bcm_binit(b, (b)->origbuf, (b)->origsize);} while (0)
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline,
+ const uint8 *buf, uint len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, uint len);
+
+typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+ char *buf, uint32 bufsize);
+extern uint bcm_bitcount(const uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+/* power conversion */
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len);
+
+#ifdef BCMDBG_PKT /* pkt logging for debugging */
+#define PKTLIST_SIZE 3000
+
+#ifdef BCMDBG_PTRACE
+#define PKTTRACE_MAX_BYTES 12
+#define PKTTRACE_MAX_BITS (PKTTRACE_MAX_BYTES * NBBY)
+
+enum pkttrace_info {
+ PKTLIST_PRECQ, /* Pkt in Prec Q */
+ PKTLIST_FAIL_PRECQ, /* Pkt failed to Q in PRECQ */
+ PKTLIST_DMAQ, /* Pkt in DMA Q */
+ PKTLIST_MI_TFS_RCVD, /* Received TX status */
+ PKTLIST_TXDONE, /* Pkt TX done */
+ PKTLIST_TXFAIL, /* Pkt TX failed */
+ PKTLIST_PKTFREE, /* pkt is freed */
+ PKTLIST_PRECREQ, /* Pkt requeued in precq */
+ PKTLIST_TXFIFO /* To trace in wlc_fifo */
+};
+#endif /* BCMDBG_PTRACE */
+
+typedef struct pkt_dbginfo {
+ int line;
+ char *file;
+ void *pkt;
+#ifdef BCMDBG_PTRACE
+ char pkt_trace[PKTTRACE_MAX_BYTES];
+#endif /* BCMDBG_PTRACE */
+} pkt_dbginfo_t;
+
+typedef struct {
+ pkt_dbginfo_t list[PKTLIST_SIZE]; /* List of pointers to packets */
+ uint16 count; /* Total count of the packets */
+} pktlist_info_t;
+
+extern void pktlist_add(pktlist_info_t *pktlist, void *p, int len, char *file);
+extern void pktlist_remove(pktlist_info_t *pktlist, void *p);
+extern char* pktlist_dump(pktlist_info_t *pktlist, char *buf);
+#ifdef BCMDBG_PTRACE
+extern void pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit);
+#endif /* BCMDBG_PTRACE */
+#endif /* BCMDBG_PKT */
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+bool replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable,
+ unsigned int *datalen);
+
+/* trace any object allocation / free, with / without features (flags) set to the object */
+#if (defined(DONGLEBUILD) && defined(BCMDBG_MEM) && (!defined(BCM_OBJECT_TRACE)))
+#define BCM_OBJECT_TRACE
+#endif /* (defined(DONGLEBUILD) && defined(BCMDBG_MEM) && (!defined(BCM_OBJECT_TRACE))) */
+
+#define BCM_OBJDBG_ADD 1
+#define BCM_OBJDBG_REMOVE 2
+#define BCM_OBJDBG_ADD_PKT 3
+
+/* object feature: set or clear flags */
+#define BCM_OBJECT_FEATURE_FLAG 1
+#define BCM_OBJECT_FEATURE_PKT_STATE 2
+/* object feature: flag bits */
+#define BCM_OBJECT_FEATURE_0 (1 << 0)
+#define BCM_OBJECT_FEATURE_1 (1 << 1)
+#define BCM_OBJECT_FEATURE_2 (1 << 2)
+/* object feature: clear flag bits field set with this flag */
+#define BCM_OBJECT_FEATURE_CLEAR (1 << 31)
+#if defined(BCM_OBJECT_TRACE) && !defined(BINCMP)
+#define bcm_pkt_validate_chk(obj, func) do { \
+ void * pkttag; \
+ bcm_object_trace_chk(obj, 0, 0, \
+ func, __LINE__); \
+ if ((pkttag = PKTTAG(obj))) { \
+ bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \
+ func, __LINE__); \
+ } \
+} while (0)
+extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line);
+extern void bcm_object_trace_upd(void *obj, void *obj_new);
+extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+ const char *caller, int line);
+extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value);
+extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value);
+extern void bcm_object_trace_init(void);
+extern void bcm_object_trace_deinit(void);
+#else
+#define bcm_pkt_validate_chk(obj, func)
+#define bcm_object_trace_opr(a, b, c, d)
+#define bcm_object_trace_upd(a, b)
+#define bcm_object_trace_chk(a, b, c, d, e)
+#define bcm_object_feature_set(a, b, c)
+#define bcm_object_feature_get(a, b, c)
+#define bcm_object_trace_init()
+#define bcm_object_trace_deinit()
+#endif /* BCM_OBJECT_TRACE && !BINCMP */
+
+/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
+
+/* Table driven count set bits. */
+static const uint8 /* Table only for use by bcm_cntsetbits */
+_CSBTBL[256] =
+{
+ #define B2(n) n, n + 1, n + 1, n + 2
+ #define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2)
+ #define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2)
+ B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2)
+};
+
+static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
+bcm_cntsetbits(const uint32 u32arg)
+{
+ /* function local scope declaration of const _CSBTBL[] */
+ const uint8 * p = (const uint8 *)&u32arg;
+ /* uint32 cast to avoid uint8 being promoted to int for arithmetic operation */
+ return ((uint32)_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
+}
+
+static INLINE int /* C equivalent count of leading 0's in a u32 */
+C_bcm_count_leading_zeros(uint32 u32arg)
+{
+ int shifts = 0;
+ while (u32arg) {
+ shifts++; u32arg >>= 1;
+ }
+ return (32 - shifts);
+}
+
+typedef struct bcm_rand_metadata {
+ uint32 count; /* number of random numbers in bytes */
+ uint32 signature; /* host fills it in, FW verfies before reading rand */
+} bcm_rand_metadata_t;
+
+#ifdef BCMDRIVER
+/*
+ * Assembly instructions: Count Leading Zeros
+ * "clz" : MIPS, ARM
+ * "cntlzw" : PowerPC
+ * "BSF" : x86
+ * "lzcnt" : AMD, SPARC
+ */
+
+#if defined(__arm__)
+#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7M__ */
+#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7R__ */
+#endif /* __arm__ */
+
+static INLINE int
+bcm_count_leading_zeros(uint32 u32arg)
+{
+#if defined(__USE_ASM_CLZ__)
+ int zeros;
+ __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg));
+ return zeros;
+#else /* C equivalent */
+ return C_bcm_count_leading_zeros(u32arg);
+#endif /* C equivalent */
+}
+
+/*
+ * Macro to count leading zeroes
+ *
+ */
+#if defined(__GNUC__)
+#define CLZ(x) __builtin_clzl(x)
+#elif defined(__arm__)
+#define CLZ(x) __clz(x)
+#else
+#define CLZ(x) bcm_count_leading_zeros(x)
+#endif /* __GNUC__ */
+
+/* INTERFACE: Multiword bitmap based small id allocator. */
+struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */
+
+#define BCM_MWBMAP_INVALID_HDL ((struct bcm_mwbmap *)NULL)
+#define BCM_MWBMAP_INVALID_IDX ((uint32)(~0U))
+
+/* Incarnate a multiword bitmap based small index allocator */
+extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max);
+
+/* Free up the multiword bitmap index allocator */
+extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl);
+
+/* Allocate a unique small index using a multiword bitmap index allocator */
+extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Force an index at a specified position to be in use */
+extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Determine whether an index is inuse or free */
+extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Debug dump a multiword bitmap allocator */
+extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl);
+
+extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
+/* End - Multiword bitmap based small Id allocator. */
+
+/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
+
+#define ID8_INVALID 0xFFu
+#define ID16_INVALID 0xFFFFu
+#define ID32_INVALID 0xFFFFFFFFu
+#define ID16_UNDEFINED ID16_INVALID
+
+/*
+ * Construct a 16bit id allocator, managing 16bit ids in the range:
+ * [start_val16 .. start_val16+total_ids)
+ * Note: start_val16 is inclusive.
+ * Returns an opaque handle to the 16bit id allocator.
+ */
+extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16);
+extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl);
+extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16);
+
+/* Allocate a unique 16bit id */
+extern uint16 id16_map_alloc(void * id16_map_hndl);
+
+/* Free a 16bit id value into the id16 allocator */
+extern void id16_map_free(void * id16_map_hndl, uint16 val16);
+
+/* Get the number of failures encountered during id allocation. */
+extern uint32 id16_map_failures(void * id16_map_hndl);
+
+/* Audit the 16bit id allocator state. */
+extern bool id16_map_audit(void * id16_map_hndl);
+/* End - Simple 16bit Id Allocator. */
+#endif /* BCMDRIVER */
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+#define MASK_32_BITS (~0)
+#define MASK_8_BITS ((1 << 8) - 1)
+
+#define EXTRACT_LOW32(num) (uint32)(num & MASK_32_BITS)
+#define EXTRACT_HIGH32(num) (uint32)(((uint64)num >> 32) & MASK_32_BITS)
+
+#define MAXIMUM(a, b) ((a > b) ? a : b)
+#define MINIMUM(a, b) ((a < b) ? a : b)
+#define LIMIT(x, min, max) ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum);
+
+#ifndef _dll_t_
+#define _dll_t_
+/*
+ * -----------------------------------------------------------------------------
+ * Double Linked List Macros
+ * -----------------------------------------------------------------------------
+ *
+ * All dll operations must be performed on a pre-initialized node.
+ * Inserting an uninitialized node into a list effectively initialized it.
+ *
+ * When a node is deleted from a list, you may initialize it to avoid corruption
+ * incurred by double deletion. You may skip initialization if the node is
+ * immediately inserted into another list.
+ *
+ * By placing a dll_t element at the start of a struct, you may cast a dll_t *
+ * to the struct or vice versa.
+ *
+ * Example of declaring an initializing someList and inserting nodeA, nodeB
+ *
+ * typedef struct item {
+ * dll_t node;
+ * int someData;
+ * } Item_t;
+ * Item_t nodeA, nodeB, nodeC;
+ * nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333;
+ *
+ * dll_t someList;
+ * dll_init(&someList);
+ *
+ * dll_append(&someList, (dll_t *) &nodeA);
+ * dll_prepend(&someList, &nodeB.node);
+ * dll_insert((dll_t *)&nodeC, &nodeA.node);
+ *
+ * dll_delete((dll_t *) &nodeB);
+ *
+ * Example of a for loop to walk someList of node_p
+ *
+ * extern void mydisplay(Item_t * item_p);
+ *
+ * dll_t * item_p, * next_p;
+ * for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p);
+ * item_p = next_p)
+ * {
+ * next_p = dll_next_p(item_p);
+ * ... use item_p at will, including removing it from list ...
+ * mydisplay((PItem_t)item_p);
+ * }
+ *
+ * -----------------------------------------------------------------------------
+ */
+typedef struct dll {
+ struct dll * next_p;
+ struct dll * prev_p;
+} dll_t;
+
+static INLINE void
+dll_init(dll_t *node_p)
+{
+ node_p->next_p = node_p;
+ node_p->prev_p = node_p;
+}
+/* dll macros returing a pointer to dll_t */
+
+static INLINE dll_t *
+BCMPOSTTRAPFN(dll_head_p)(dll_t *list_p)
+{
+ return list_p->next_p;
+}
+
+static INLINE dll_t *
+BCMPOSTTRAPFN(dll_tail_p)(dll_t *list_p)
+{
+ return (list_p)->prev_p;
+}
+
+static INLINE dll_t *
+BCMPOSTTRAPFN(dll_next_p)(dll_t *node_p)
+{
+ return (node_p)->next_p;
+}
+
+static INLINE dll_t *
+BCMPOSTTRAPFN(dll_prev_p)(dll_t *node_p)
+{
+ return (node_p)->prev_p;
+}
+
+static INLINE bool
+BCMPOSTTRAPFN(dll_empty)(dll_t *list_p)
+{
+ return ((list_p)->next_p == (list_p));
+}
+
+static INLINE bool
+BCMPOSTTRAPFN(dll_end)(dll_t *list_p, dll_t * node_p)
+{
+ return (list_p == node_p);
+}
+
+/* inserts the node new_p "after" the node at_p */
+static INLINE void
+BCMPOSTTRAPFN(dll_insert)(dll_t *new_p, dll_t * at_p)
+{
+ new_p->next_p = at_p->next_p;
+ new_p->prev_p = at_p;
+ at_p->next_p = new_p;
+ (new_p->next_p)->prev_p = new_p;
+}
+
+static INLINE void
+BCMPOSTTRAPFN(dll_append)(dll_t *list_p, dll_t *node_p)
+{
+ dll_insert(node_p, dll_tail_p(list_p));
+}
+
+static INLINE void
+BCMPOSTTRAPFN(dll_prepend)(dll_t *list_p, dll_t *node_p)
+{
+ dll_insert(node_p, list_p);
+}
+
+/* deletes a node from any list that it "may" be in, if at all. */
+static INLINE void
+BCMPOSTTRAPFN(dll_delete)(dll_t *node_p)
+{
+ node_p->prev_p->next_p = node_p->next_p;
+ node_p->next_p->prev_p = node_p->prev_p;
+}
+#endif /* ! defined(_dll_t_) */
+
+/* Elements managed in a double linked list */
+
+typedef struct dll_pool {
+ dll_t free_list;
+ uint16 free_count;
+ uint16 elems_max;
+ uint16 elem_size;
+ dll_t elements[1];
+} dll_pool_t;
+
+dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size);
+void * dll_pool_alloc(dll_pool_t * dll_pool_p);
+void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p);
+void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p);
+typedef void (* dll_elem_dump)(void * elem_p);
+#ifdef BCMDBG
+void dll_pool_dump(dll_pool_t * dll_pool_p, dll_elem_dump dump);
+#endif
+void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size);
+
+int valid_bcmerror(int e);
+/* Stringify macro definition */
+#define BCM_STRINGIFY(s) #s
+/* Used to pass in a macro variable that gets expanded and then stringified */
+#define BCM_EXTENDED_STRINGIFY(s) BCM_STRINGIFY(s)
+
+/* calculate IPv4 header checksum
+ * - input ip points to IP header in network order
+ * - output cksum is in network order
+ */
+uint16 ipv4_hdr_cksum(uint8 *ip, uint ip_len);
+
+/* calculate IPv4 TCP header checksum
+ * - input ip and tcp points to IP and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16 ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len);
+
+/* calculate IPv6 TCP header checksum
+ * - input ipv6 and tcp points to IPv6 and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16 ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len);
+
+#ifdef __cplusplus
+ }
+#endif
+
+/* #define DEBUG_COUNTER */
+#ifdef DEBUG_COUNTER
+#define CNTR_TBL_MAX 10
+typedef struct _counter_tbl_t {
+ char name[16]; /* name of this counter table */
+ uint32 prev_log_print; /* Internal use. Timestamp of the previous log print */
+ uint log_print_interval; /* Desired interval to print logs in ms */
+ uint needed_cnt; /* How many counters need to be used */
+ uint32 cnt[CNTR_TBL_MAX]; /* Counting entries to increase at desired places */
+ bool enabled; /* Whether to enable printing log */
+} counter_tbl_t;
+
+/* How to use
+ Eg.: In dhd_linux.c
+ cnt[0]: How many times dhd_start_xmit() was called in every 1sec.
+ cnt[1]: How many bytes were requested to be sent in every 1sec.
+
+++ static counter_tbl_t xmit_tbl = {"xmit", 0, 1000, 2, {0,}, 1};
+
+ int
+ dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+ {
+ ..........
+++ counter_printlog(&xmit_tbl);
+++ xmit_tbl.cnt[0]++;
+
+ ifp = dhd->iflist[ifidx];
+ datalen = PKTLEN(dhdp->osh, skb);
+
+++ xmit_tbl.cnt[1] += datalen;
+ ............
+
+ ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+ ...........
+ }
+*/
+
+void counter_printlog(counter_tbl_t *ctr_tbl);
+#endif /* DEBUG_COUNTER */
+
+#if defined(__GNUC__)
+#define CALL_SITE __builtin_return_address(0)
+#elif defined(_WIN32)
+#define CALL_SITE _ReturnAddress()
+#else
+#define CALL_SITE ((void*) 0)
+#endif
+#ifdef SHOW_LOGTRACE
+#define TRACE_LOG_BUF_MAX_SIZE 1700
+#define RTT_LOG_BUF_MAX_SIZE 1700
+#define BUF_NOT_AVAILABLE 0
+#define NEXT_BUF_NOT_AVAIL 1
+#define NEXT_BUF_AVAIL 2
+
+typedef struct trace_buf_info {
+ int availability;
+ int size;
+ char buf[TRACE_LOG_BUF_MAX_SIZE];
+} trace_buf_info_t;
+#endif /* SHOW_LOGTRACE */
+
+enum dump_dongle_e {
+ DUMP_DONGLE_COREREG = 0,
+ DUMP_DONGLE_D11MEM
+};
+
+typedef struct {
+ uint32 type; /**< specifies e.g dump of d11 memory, use enum dump_dongle_e */
+ uint32 index; /**< iterator1, specifies core index or d11 memory index */
+ uint32 offset; /**< iterator2, byte offset within register set or memory */
+} dump_dongle_in_t;
+
+typedef struct {
+ uint32 address; /**< e.g. backplane address of register */
+ uint32 id; /**< id, e.g. core id */
+ uint32 rev; /**< rev, e.g. core rev */
+ uint32 n_bytes; /**< nbytes in array val[] */
+ uint32 val[1]; /**< out: values that were read out of registers or memory */
+} dump_dongle_out_t;
+
+extern uint32 sqrt_int(uint32 value);
+
+extern uint8 bcm_get_ceil_pow_2(uint val);
+
+#ifdef BCMDRIVER
+/* structures and routines to process variable sized data */
+typedef struct var_len_data {
+ uint32 vlen;
+ uint8 *vdata;
+} var_len_data_t;
+
+int bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size);
+int bcm_vdata_free(osl_t *osh, var_len_data_t *vld);
+#if defined(PRIVACY_MASK)
+void bcm_ether_privacy_mask(struct ether_addr *addr);
+#else
+#define bcm_ether_privacy_mask(addr)
+#endif /* PRIVACY_MASK */
+#endif /* BCMDRIVER */
+
+/* Count the number of elements in an array that do not match the given value */
+extern int array_value_mismatch_count(uint8 value, uint8 *array, int array_size);
+/* Count the number of non-zero elements in an uint8 array */
+extern int array_nonzero_count(uint8 *array, int array_size);
+/* Count the number of non-zero elements in an int16 array */
+extern int array_nonzero_count_int16(int16 *array, int array_size);
+/* Count the number of zero elements in an uint8 array */
+extern int array_zero_count(uint8 *array, int array_size);
+/* Validate a uint8 ordered array. Assert if invalid. */
+extern int verify_ordered_array_uint8(uint8 *array, int array_size, uint8 range_lo, uint8 range_hi);
+/* Validate a int16 configuration array that need not be zero-terminated. Assert if invalid. */
+extern int verify_ordered_array_int16(int16 *array, int array_size, int16 range_lo, int16 range_hi);
+/* Validate all values in an array are in range */
+extern int verify_array_values(uint8 *array, int array_size,
+ int range_lo, int range_hi, bool zero_terminated);
+
+/* To unwind from the trap_handler. */
+extern void (*const print_btrace_int_fn)(int depth, uint32 pc, uint32 lr, uint32 sp);
+extern void (*const print_btrace_fn)(int depth);
+#define PRINT_BACKTRACE(depth) if (print_btrace_fn) print_btrace_fn(depth)
+#define PRINT_BACKTRACE_INT(depth, pc, lr, sp) \
+ if (print_btrace_int_fn) print_btrace_int_fn(depth, pc, lr, sp)
+
+/* FW Signing - only in bootloader builds, never in dongle FW builds */
+#ifdef WL_FWSIGN
+ #define FWSIGN_ENAB() (1)
+#else
+ #define FWSIGN_ENAB() (0)
+#endif /* WL_FWSIGN */
+
+/* Utilities for reading SROM/SFlash vars */
+
+typedef struct varbuf {
+ char *base; /* pointer to buffer base */
+ char *buf; /* pointer to current position */
+ unsigned int size; /* current (residual) size in bytes */
+} varbuf_t;
+
+/** Initialization of varbuf structure */
+void varbuf_init(varbuf_t *b, char *buf, uint size);
+/** append a null terminated var=value string */
+int varbuf_append(varbuf_t *b, const char *fmt, ...);
+#if defined(BCMDRIVER)
+int initvars_table(osl_t *osh, char *start, char *end, char **vars, uint *count);
+#endif
+
+/* Count the number of trailing zeros in uint32 val
+ * Applying unary minus to unsigned value is intentional,
+ * and doesn't influence counting of trailing zeros
+ */
+static INLINE uint32
+count_trailing_zeros(uint32 val)
+{
+#ifdef BCMDRIVER
+ uint32 c = (uint32)CLZ(val & ((uint32)(-(int)val)));
+#else
+ uint32 c = (uint32)C_bcm_count_leading_zeros(val & ((uint32)(-(int)val)));
+#endif /* BCMDRIVER */
+ return val ? 31u - c : c;
+}
+
+/** Size in bytes of data block, defined by struct with last field, declared as
+ * one/zero element vector - such as wl_uint32_list_t or bcm_xtlv_cbuf_s.
+ * Arguments:
+ * list - address of data block (value is ignored, only type is important)
+ * last_var_len_field - name of last field (usually declared as ...[] or ...[1])
+ * num_elems - number of elements in data block
+ * Example:
+ * wl_uint32_list_t *list;
+ * WL_VAR_LEN_STRUCT_SIZE(list, element, 10); // Size in bytes of 10-element list
+ */
+#define WL_VAR_LEN_STRUCT_SIZE(list, last_var_len_field, num_elems) \
+ ((size_t)((const char *)&((list)->last_var_len_field) - (const char *)(list)) + \
+ (sizeof((list)->last_var_len_field[0]) * (size_t)(num_elems)))
+
+int buf_shift_right(uint8 *buf, uint16 len, uint8 bits);
+#endif /* _bcmutils_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_channels.h b/bcmdhd.101.10.361.x/include/bcmwifi_channels.h
new file mode 100755
index 0000000..d3744de
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmwifi_channels.h
@@ -0,0 +1,888 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmwifi_channels_h_
+#define _bcmwifi_channels_h_
+
+/* A chanspec holds the channel number, band, bandwidth and primary 20MHz sub-band */
+typedef uint16 chanspec_t;
+typedef uint16 chanspec_band_t;
+typedef uint16 chanspec_bw_t;
+typedef uint16 chanspec_subband_t;
+
+/* channel defines */
+#define CH_80MHZ_APART 16u
+#define CH_40MHZ_APART 8u
+#define CH_20MHZ_APART 4u
+#define CH_10MHZ_APART 2u
+#define CH_5MHZ_APART 1u /* 2G band channels are 5 Mhz apart */
+#define CH_160MHZ_APART (32u * CH_5MHZ_APART) /* 32 5Mhz-spaces */
+
+#define CH_MIN_2G_CHANNEL 1u /* Min channel in 2G band */
+#define CH_MAX_2G_CHANNEL 14u /* Max channel in 2G band */
+#define CH_MIN_2G_40M_CHANNEL 3u /* Min 40MHz center channel in 2G band */
+#define CH_MAX_2G_40M_CHANNEL 11u /* Max 40MHz center channel in 2G band */
+
+#define CH_MIN_6G_CHANNEL 1u /* Min 20MHz channel in 6G band */
+#define CH_MAX_6G_CHANNEL 253u /* Max 20MHz channel in 6G band */
+#define CH_MIN_6G_40M_CHANNEL 3u /* Min 40MHz center channel in 6G band */
+#define CH_MAX_6G_40M_CHANNEL 227u /* Max 40MHz center channel in 6G band */
+#define CH_MIN_6G_80M_CHANNEL 7u /* Min 80MHz center channel in 6G band */
+#define CH_MAX_6G_80M_CHANNEL 215u /* Max 80MHz center channel in 6G band */
+#define CH_MIN_6G_160M_CHANNEL 15u /* Min 160MHz center channel in 6G band */
+#define CH_MAX_6G_160M_CHANNEL 207u /* Max 160MHz center channel in 6G band */
+#define CH_MIN_6G_240M_CHANNEL 23u /* Min 240MHz center channel in 6G band */
+#define CH_MAX_6G_240M_CHANNEL 167u /* Max 240MHz center channel in 6G band */
+#define CH_MIN_6G_320M_CHANNEL 31u /* Min 320MHz center channel in 6G band */
+#define CH_MAX_6G_320M_CHANNEL 199u /* Max 320MHz center channel in 6G band */
+
+/* maximum # channels the s/w supports */
+#define MAXCHANNEL 254u /* max # supported channels.
+ * DO NOT MAKE > 255: channels are uint8's all over
+ */
+#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */
+
+#define INVCHANNEL 255u /* error value for a bad channel */
+
+/* length of channel vector bitmap is the MAXCHANNEL we want to handle rounded up to a byte */
+/* The actual CHANVEC_LEN fix is leading to high static memory impact
+* in all projects wherein the previous CHANVEC_LEN definition is used.
+*
+* Retaining the previous definition under MAXCHNL_ROM_COMPAT flag.
+* All those chip porgrams where memory impact is observed need to define the same.
+*/
+#ifdef MAXCHNL_ROM_COMPAT
+#define CHANVEC_LEN (MAXCHANNEL + (8 - 1) / 8)
+#else
+#define CHANVEC_LEN ((MAXCHANNEL + (8 - 1)) / 8)
+#endif
+
+/* channel bitvec */
+typedef struct {
+ uint8 vec[CHANVEC_LEN]; /* bitvec of channels */
+} chanvec_t;
+
+/* make sure channel num is within valid range */
+#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM)
+
+#define CHSPEC_CTLOVLP(sp1, sp2, sep) \
+ ((uint)ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (uint)(sep))
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+/* For contiguous channel bandwidth other than 240MHz/320Mhz */
+#define WL_CHANSPEC_CHAN_MASK 0x00ffu
+#define WL_CHANSPEC_CHAN_SHIFT 0u
+
+/* For contiguous channel bandwidth >= 240MHz */
+#define WL_CHANSPEC_GE240_CHAN_MASK 0x0003u
+#define WL_CHANSPEC_GE240_CHAN_SHIFT 0u
+
+/* For discontiguous channel bandwidth */
+#define WL_CHANSPEC_CHAN0_MASK 0x000fu
+#define WL_CHANSPEC_CHAN0_SHIFT 0u
+#define WL_CHANSPEC_CHAN1_MASK 0x00f0u
+#define WL_CHANSPEC_CHAN1_SHIFT 4u
+
+/* Non-320/Non-240 Mhz channel sideband indication */
+#define WL_CHANSPEC_CTL_SB_MASK 0x0700u
+#define WL_CHANSPEC_CTL_SB_SHIFT 8u
+#define WL_CHANSPEC_CTL_SB_LLL 0x0000u
+#define WL_CHANSPEC_CTL_SB_LLU 0x0100u
+#define WL_CHANSPEC_CTL_SB_LUL 0x0200u
+#define WL_CHANSPEC_CTL_SB_LUU 0x0300u
+#define WL_CHANSPEC_CTL_SB_ULL 0x0400u
+#define WL_CHANSPEC_CTL_SB_ULU 0x0500u
+#define WL_CHANSPEC_CTL_SB_UUL 0x0600u
+#define WL_CHANSPEC_CTL_SB_UUU 0x0700u
+#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
+
+/* channel sideband indication for frequency >= 240MHz */
+#define WL_CHANSPEC_GE240_SB_MASK 0x0780u
+#define WL_CHANSPEC_GE240_SB_SHIFT 7u
+
+/* Bandwidth field */
+#define WL_CHANSPEC_BW_MASK 0x3800u
+#define WL_CHANSPEC_BW_SHIFT 11u
+#define WL_CHANSPEC_BW_320 0x0000u
+#define WL_CHANSPEC_BW_160160 0x0800u
+#define WL_CHANSPEC_BW_20 0x1000u
+#define WL_CHANSPEC_BW_40 0x1800u
+#define WL_CHANSPEC_BW_80 0x2000u
+#define WL_CHANSPEC_BW_160 0x2800u
+#define WL_CHANSPEC_BW_8080 0x3000u
+#define WL_CHANSPEC_BW_240 0x3800u
+
+/* Band field */
+#define WL_CHANSPEC_BAND_MASK 0xc000u
+#define WL_CHANSPEC_BAND_SHIFT 14u
+#define WL_CHANSPEC_BAND_2G 0x0000u
+#define WL_CHANSPEC_BAND_6G 0x4000u
+#define WL_CHANSPEC_BAND_4G 0x8000u
+#define WL_CHANSPEC_BAND_5G 0xc000u
+
+#define INVCHANSPEC 255u
+#define MAX_CHANSPEC 0xFFFFu
+
+#define WL_CHSPEC_BW(chspec) ((chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT)
+#define MAX_BW_NUM (uint8)(((WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT))
+
+#define WL_CHANNEL_BAND(ch) (((uint)(ch) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)
+
+/* channel defines */
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \
+ ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+
+/* pass a 80MHz channel number (uint8) to get respective LL, UU, LU, UL */
+#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
+#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+ ((channel) + 3 * CH_10MHZ_APART) : 0)
+#define LU_20_SB(channel) LOWER_20_SB(channel)
+#define UL_20_SB(channel) UPPER_20_SB(channel)
+
+#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART)
+
+#ifndef CHSPEC_WLCBANDUNIT
+#define CHSPEC_WLCBANDUNIT(chspec) \
+ ((CHSPEC_IS5G(chspec) || CHSPEC_IS6G(chspec)) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#endif
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ WL_CHANNEL_BAND(channel))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ WL_CHANNEL_BAND(channel))
+#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+
+/* simple MACROs to get different fields of chanspec */
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_CHAN0(chspec) (((chspec) & WL_CHANSPEC_CHAN0_MASK) >> WL_CHANSPEC_CHAN0_SHIFT)
+#define CHSPEC_CHAN1(chspec) (((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT)
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+#define CHSPEC_GE240_CHAN(chspec) (((chspec) & WL_CHANSPEC_GE240_CHAN_MASK) >> \
+ WL_CHANSPEC_GE240_CHAN_SHIFT)
+#define CHSPEC_GE240_SB(chspec) ((chspec) & WL_CHANSPEC_GE240_SB_MASK)
+
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#define CHSPEC_IS20_5G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \
+ CHSPEC_IS5G(chspec))
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#define CHSPEC_IS8080(chspec) (FALSE)
+#ifndef CHSPEC_IS320
+#ifdef WL11BE
+#define CHSPEC_IS320(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_320)
+#else
+#define CHSPEC_IS320(chspec) (FALSE)
+#endif
+#endif /* CHSPEC_IS320 */
+#ifndef CHSPEC_IS240
+#ifdef WL11BE
+#define CHSPEC_IS240(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_240)
+#else
+#define CHSPEC_IS240(chspec) (FALSE)
+#endif
+#endif /* CHSPEC_IS240 */
+
+/* pass a center channel and get channel offset from it by 10MHz */
+#define CH_OFF_10MHZ_MULTIPLES(channel, offset) \
+((uint8) (((offset) < 0) ? \
+ (((channel) > (WL_CHANSPEC_CHAN_MASK & ((uint16)((-(offset)) * CH_10MHZ_APART)))) ? \
+ ((channel) + (offset) * CH_10MHZ_APART) : 0) : \
+ ((((uint16)(channel) + (uint16)(offset) * CH_10MHZ_APART) < (uint16)MAXCHANNEL) ? \
+ ((channel) + (offset) * CH_10MHZ_APART) : 0)))
+
+uint wf_chspec_first_20_sb(chanspec_t chspec);
+
+#if defined(WL_BW160MHZ)
+/* pass a 160MHz center channel to get 20MHz subband channel numbers */
+#define LLL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -7)
+#define LLU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -5)
+#define LUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -3)
+#define LUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -1)
+#define ULL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 1)
+#define ULU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 3)
+#define UUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 5)
+#define UUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 7)
+
+/* get lowest 20MHz sideband of a given chspec
+ * (works with 20, 40, 80, 160)
+ */
+#define CH_FIRST_20_SB(chspec) ((uint8) (\
+ CHSPEC_IS160(chspec) ? LLL_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\
+ CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\
+ CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \
+ CHSPEC_CHANNEL(chspec)))))
+
+/* get upper most 20MHz sideband of a given chspec
+ * (works with 20, 40, 80, 160)
+ */
+#define CH_LAST_20_SB(chspec) ((uint8) (\
+ CHSPEC_IS160(chspec) ? UUU_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\
+ CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\
+ CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \
+ CHSPEC_CHANNEL(chspec)))))
+
+/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband
+ * (works with 20, 40, 80, 160)
+ * resolves to 0 if called with upper most channel
+ */
+#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\
+ ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \
+ ((channel) + CH_20MHZ_APART))))
+
+#else /* WL_BW160MHZ */
+
+#define LLL_20_SB_160(channel) 0
+#define LLU_20_SB_160(channel) 0
+#define LUL_20_SB_160(channel) 0
+#define LUU_20_SB_160(channel) 0
+#define ULL_20_SB_160(channel) 0
+#define ULU_20_SB_160(channel) 0
+#define UUL_20_SB_160(channel) 0
+#define UUU_20_SB_160(channel) 0
+
+/* get lowest 20MHz sideband of a given chspec
+ * (works with 20, 40, 80)
+ */
+#define CH_FIRST_20_SB(chspec) ((uint8) (\
+ CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\
+ CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \
+ CHSPEC_CHANNEL(chspec))))
+/* get upper most 20MHz sideband of a given chspec
+ * (works with 20, 40, 80, 160)
+ */
+#define CH_LAST_20_SB(chspec) ((uint8) (\
+ CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\
+ CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \
+ CHSPEC_CHANNEL(chspec))))
+
+/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband
+ * (works with 20, 40, 80, 160)
+ * resolves to 0 if called with upper most channel
+ */
+#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\
+ ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \
+ ((channel) + CH_20MHZ_APART))))
+
+#endif /* WL_BW160MHZ */
+
+/* Iterator for 20MHz side bands of a chanspec: (chanspec_t chspec, uint8 channel)
+ * 'chspec' chanspec_t of interest (used in loop, better to pass a resolved value than a macro)
+ * 'channel' must be a variable (not an expression).
+ */
+#define FOREACH_20_SB(chspec, channel) \
+ for (channel = (uint8)wf_chspec_first_20_sb(chspec); channel; \
+ channel = CH_NEXT_20_SB((chspec), channel))
+
+/* Uses iterator to populate array with all side bands involved (sorted lower to upper).
+ * 'chspec' chanspec_t of interest
+ * 'psb' pointer to uint8 array of enough size to hold all side bands for the given chspec
+ */
+#define GET_ALL_SB(chspec, psb) do { \
+ uint8 channel, idx = 0; \
+ chanspec_t chspec_local = chspec; \
+ FOREACH_20_SB(chspec_local, channel) \
+ (psb)[idx++] = channel; \
+} while (0)
+
+/* given a chanspec of any bw, tests if primary20 SB is in lower 20, 40, 80 respectively */
+#define IS_CTL_IN_L20(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_U) /* CTL SB is in low 20 of any 40 */
+#define IS_CTL_IN_L40(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_UL) /* in low 40 of any 80 */
+#define IS_CTL_IN_L80(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_ULL) /* in low 80 of 160 */
+
+#define BW_LE40(bw) ((bw) == WL_CHANSPEC_BW_20 || ((bw) == WL_CHANSPEC_BW_40))
+#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80))
+#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160))
+
+#define CHSPEC_IS6G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_6G)
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+
+#ifdef WL_BAND6G
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS2G(chspec) ? WLC_BAND_2G : CHSPEC_IS5G(chspec) ? \
+ WLC_BAND_5G : WLC_BAND_6G)
+#else
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS2G(chspec) ? WLC_BAND_2G : WLC_BAND_5G)
+#endif
+
+#define CHSPEC_BW_CHANGED(prev_chspec, curr_chspec) \
+ (((prev_chspec) & WL_CHANSPEC_BW_MASK) != ((curr_chspec) & WL_CHANSPEC_BW_MASK))
+
+#if (defined(WL_BAND6G) && !defined(WL_BAND6G_DISABLED))
+#define CHSPEC_IS_5G_6G(chspec) (CHSPEC_IS5G(chspec) || CHSPEC_IS6G(chspec))
+#define CHSPEC_IS20_5G_6G(chspec) ((((chspec) & \
+ WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \
+ (CHSPEC_IS5G(chspec) || CHSPEC_IS6G(chspec)))
+#else
+#define CHSPEC_IS_5G_6G(chspec) (CHSPEC_IS5G(chspec))
+#define CHSPEC_IS20_5G_6G(chspec) (CHSPEC_IS20_5G(chspec))
+#endif
+
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
+#ifdef WL11BE
+#define CHANSPEC_STR_LEN 22
+#else
+#define CHANSPEC_STR_LEN 20
+#endif
+
+/*
+ * This function returns TRUE if both the chanspec can co-exist in PHY.
+ * Addition to primary20 channel, the function checks for side band for 2g 40 channels
+ */
+extern bool wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2);
+
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+ CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, GE (>=), GT (>) */
+
+#define CHSPEC_BW_GE(chspec, bw) (CHSPEC_BW(chspec) >= (bw))
+
+#define CHSPEC_BW_GT(chspec, bw) (CHSPEC_BW(chspec) > (bw))
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
+#define WL_LCHANSPEC_CHAN_MASK 0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT 0
+
+#define WL_LCHANSPEC_CTL_SB_MASK 0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT 8
+#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_LCHANSPEC_BW_MASK 0x0C00
+#define WL_LCHANSPEC_BW_SHIFT 10
+#define WL_LCHANSPEC_BW_10 0x0400
+#define WL_LCHANSPEC_BW_20 0x0800
+#define WL_LCHANSPEC_BW_40 0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK 0xf000
+#define WL_LCHANSPEC_BAND_SHIFT 12
+#define WL_LCHANSPEC_BAND_5G 0x1000
+#define WL_LCHANSPEC_BAND_2G 0x2000
+
+#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
+#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#define CH20MHZ_LCHSPEC(channel) \
+ (chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \
+ WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
+
+#define GET_ALL_EXT wf_get_all_ext
+
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G 4814u /* 2.4 GHz band, 2407 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G 8000u /* 4.9 GHz band for Japan */
+
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G 10000u /* 5 GHz band, 5000 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 6 GHz channels.
+ * The value corresponds to 5940 MHz.
+ */
+#define WF_CHAN_FACTOR_6_G 11900u /* 6 GHz band, 5950 MHz */
+
+#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
+
+/**
+ * No of sub-band value of the specified Mhz chanspec
+ */
+#define WF_NUM_SIDEBANDS_40MHZ 2u
+#define WF_NUM_SIDEBANDS_80MHZ 4u
+#define WF_NUM_SIDEBANDS_160MHZ 8u
+
+/**
+ * Return the chanspec bandwidth in MHz
+ */
+uint wf_bw_chspec_to_mhz(chanspec_t chspec);
+
+/**
+ * Return the bandwidth string for a given chanspec
+ */
+const char *wf_chspec_to_bw_str(chanspec_t chspec);
+
+/**
+ * Convert chanspec to ascii string, or formats hex of an invalid chanspec.
+ */
+char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf);
+
+/**
+ * Convert chanspec to ascii string, or returns NULL on error.
+ */
+char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+/**
+ * Convert ascii string to chanspec
+ */
+chanspec_t wf_chspec_aton(const char *a);
+
+/**
+ * Verify the chanspec fields are valid for a chanspec_t
+ */
+bool wf_chspec_malformed(chanspec_t chanspec);
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ */
+bool wf_chspec_valid(chanspec_t chanspec);
+
+/**
+ * Verify that the channel is a valid 20MHz channel according to 802.11.
+ */
+bool wf_valid_20MHz_chan(uint channel, chanspec_band_t band);
+
+/**
+ * Verify that the center channel is a valid 40MHz center channel according to 802.11.
+ */
+bool wf_valid_40MHz_center_chan(uint center_channel, chanspec_band_t band);
+
+/**
+ * Verify that the center channel is a valid 80MHz center channel according to 802.11.
+ */
+bool wf_valid_80MHz_center_chan(uint center_channel, chanspec_band_t band);
+
+/**
+ * Verify that the center channel is a valid 160MHz center channel according to 802.11.
+ */
+bool wf_valid_160MHz_center_chan(uint center_channel, chanspec_band_t band);
+
+/**
+ * Verify that the center channel is a valid 240MHz center channel according to 802.11.
+ */
+bool wf_valid_240MHz_center_chan(uint center_channel, chanspec_band_t band);
+
+/**
+ * Verify that the center channel is a valid 320MHz center channel according to 802.11.
+ */
+bool wf_valid_320MHz_center_chan(uint center_channel, chanspec_band_t band);
+
+/**
+ * Create a 20MHz chanspec for the given band.
+ */
+chanspec_t wf_create_20MHz_chspec(uint channel, chanspec_band_t band);
+
+/**
+ * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ */
+chanspec_t wf_create_40MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for a 40MHz channel given the primary 20MHz channel number,
+ * the sub-band for the primary 20MHz channel, and the band.
+ */
+chanspec_t wf_create_40MHz_chspec_primary_sb(uint primary_channel,
+ chanspec_subband_t primary_subband,
+ chanspec_band_t band);
+/**
+ * Returns the chanspec for an 80MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ */
+chanspec_t wf_create_80MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for an 160MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ */
+chanspec_t wf_create_160MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for an 240MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ */
+chanspec_t wf_create_240MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for an 320MHz channel given the primary 20MHz channel number,
+ * the center channel number, and the band.
+ */
+chanspec_t wf_create_320MHz_chspec(uint primary_channel, uint center_channel,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for an 80+80MHz channel given the primary 20MHz channel number,
+ * the center channel numbers for each frequency segment, and the band.
+ */
+chanspec_t wf_create_8080MHz_chspec(uint primary_channel, uint chan0, uint chan1,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for an 160+160MHz channel given the primary 20MHz channel number,
+ * the center channel numbers for each frequency segment, and the band.
+ */
+chanspec_t wf_create_160160MHz_chspec(uint primary_channel, uint chan0, uint chan1,
+ chanspec_band_t band);
+/**
+ * Returns the chanspec given the primary 20MHz channel number,
+ * the center channel number, channel width, and the band.
+ *
+ * The channel width must be 20, 40, 80, or 160 MHz.
+ */
+chanspec_t wf_create_chspec(uint primary_channel, uint center_channel,
+ chanspec_bw_t bw, chanspec_band_t band);
+
+/**
+ * Returns the chanspec given the primary 20MHz channel number,
+ * channel width, and the band.
+ */
+chanspec_t wf_create_chspec_from_primary(uint primary_channel, chanspec_bw_t bw,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec given the index of primary 20MHz channel within whole
+ * channel, the center channel number, channel width, and the band.
+ *
+ * The channel width must be 20, 40, 80, or 160 MHz.
+ */
+chanspec_t wf_create_chspec_sb(uint sb, uint center_channel, chanspec_bw_t bw,
+ chanspec_band_t band);
+
+/**
+ * Returns the chanspec for an 160+160MHz channel given the index of primary 20MHz
+ * channel within whole channel pair (0-3 if within chan0, 4-7 if within chan1),
+ * the center channel numbers for each frequency segment, and the band.
+ */
+chanspec_t wf_create_160160MHz_chspec_sb(uint sb, uint chan0, uint chan1,
+ chanspec_band_t band);
+
+/**
+ * Return the primary 20MHz channel.
+ */
+uint8 wf_chspec_primary20_chan(chanspec_t chspec);
+
+/* alias for old function name */
+#define wf_chspec_ctlchan(c) wf_chspec_primary20_chan(c)
+
+/**
+ * Return the primary 20MHz chanspec of a given chanspec
+ */
+chanspec_t wf_chspec_primary20_chspec(chanspec_t chspec);
+
+/* alias for old function name */
+#define wf_chspec_ctlchspec(c) wf_chspec_primary20_chspec(c)
+
+/**
+ * Return the primary 40MHz chanspec for a 40MHz or wider channel
+ */
+chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+/**
+ * Return the channel number for a given frequency and base frequency
+ */
+int wf_mhz2channel(uint freq, uint start_factor);
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ */
+int wf_channel2mhz(uint channel, uint start_factor);
+
+/**
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ * primary_channel - primary 20Mhz channel
+ * center_channel - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel);
+
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param ctl_ch channel
+ * @param bw bandwidth
+ *
+ * @return > 0 if successful or 0 otherwise
+ *
+ */
+extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
+
+/*
+ * Returns the 80+80 MHz chanspec corresponding to the following input parameters
+ *
+ * primary_20mhz - Primary 20 MHz channel
+ * chan0_80MHz - center channel number of one frequency segment
+ * chan1_80MHz - center channel number of the other frequency segment
+ *
+ * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to 802.11-2016 section 22.3.14 "Channelization".
+ */
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+ uint8 chan0_80Mhz, uint8 chan1_80Mhz);
+
+/**
+ * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec
+ *
+ * @param chspec input chanspec
+ *
+ * @return center channel number of the primary 80MHz sub-band of the input.
+ * Will return the center channel of an input 80MHz chspec.
+ * Will return INVCHANNEL if the chspec is malformed or less than 80MHz bw.
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
+
+/**
+ * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec
+ *
+ * @param chspec input chanspec
+ *
+ * @return center channel number of the secondary 80MHz sub-band of the input.
+ * Will return INVCHANNEL if the chspec is malformed or bw is not greater than 80MHz.
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/**
+ * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel
+ *
+ * @param chspec input chanspec
+ *
+ * @return An 80MHz chanspec describing the primary 80MHz sub-band of the input.
+ * Will return an input 80MHz chspec as is.
+ * Will return INVCHANSPEC if the chspec is malformed or less than 80MHz bw.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+/**
+ * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel
+ * The sideband in the chanspec is always set to WL_CHANSPEC_CTL_SB_LL since this sub-band
+ * does not contain the primary 20MHz channel.
+ *
+ * @param chspec input chanspec
+ *
+ * @return An 80MHz chanspec describing the secondary 80MHz sub-band of the input.
+ * Will return INVCHANSPEC if the chspec is malformed or bw is not greater than 80MHz.
+ */
+extern chanspec_t wf_chspec_secondary80_chspec(chanspec_t chspec);
+
+/**
+ * Returns the center channel of the primary 160MHz sub-band of the provided chanspec
+ *
+ * @param chspec input chanspec
+ *
+ * @return center channel number of the primary 160MHz sub-band of the input.
+ * Will return the center channel of an input 160MHz chspec.
+ * Will return INVCHANNEL if the chspec is malformed or less than 160MHz bw.
+ */
+extern uint8 wf_chspec_primary160_channel(chanspec_t chanspec);
+
+/**
+ * Returns the chanspec for the primary 160MHz sub-band of an 320MHz channel
+ *
+ * @param chspec input chanspec
+ *
+ * @return An 160MHz chanspec describing the primary 160MHz sub-band of the input.
+ * Will return an input 160MHz chspec as is.
+ * Will return INVCHANSPEC if the chspec is malformed or less than 160MHz bw.
+ */
+extern chanspec_t wf_chspec_primary160_chspec(chanspec_t chspec);
+
+/*
+ * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels
+ *
+ * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1
+ */
+extern void wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch);
+
+/* wf_chanspec_iter_... iterator API is deprecated. Use wlc_clm_chanspec_iter_... API instead */
+
+struct wf_iter_range {
+ uint8 start;
+ uint8 end;
+};
+
+/* Internal structure for wf_chanspec_iter_* functions.
+ * Do not directly access the members. Only use the related
+ * functions to query and manipulate the structure.
+ */
+typedef struct chanspec_iter {
+ uint8 state;
+ chanspec_t chanspec;
+ chanspec_band_t band;
+ chanspec_bw_t bw;
+ struct wf_iter_range range;
+ union {
+ uint8 range_id;
+ struct {
+ uint8 ch0;
+ uint8 ch1;
+ };
+ };
+} wf_chanspec_iter_t;
+
+/**
+ * Initialize a chanspec iteration structure.
+ * The parameters define the set of chanspecs to generate in the iteration.
+ * After initialization wf_chanspec_iter_current() will return the first chanspec
+ * in the set. A call to wf_chanspec_iter_next() will advance the interation
+ * to the next chanspec in the set.
+ *
+ * Example use:
+ * wf_chanspec_iter_t iter;
+ * chanspec_t chanspec;
+ *
+ * wf_chanspec_iter_init(&iter, band, bw);
+ *
+ * while (wf_chanspec_iter_next(&iter, &chanspec)) {
+ * ... do some work ...
+ * }
+ *
+ * @param iter pointer to a wf_chanspec_iter_t structure to initialize
+ * @param band chanspec_band_t value specifying the band of interest
+ * @param bw chanspec_bw_t value specifying the bandwidth of interest,
+ * or INVCHANSPEC to specify all bandwidths
+ *
+ * @return a success value, FALSE on error, or TRUE if OK
+ */
+bool wf_chanspec_iter_init(wf_chanspec_iter_t *iter, chanspec_band_t band, chanspec_bw_t bw);
+
+/**
+ * Advance the iteration to the next chanspec in the set.
+ *
+ * @param iter pointer to a wf_chanspec_iter_t structure
+ * @param chspec pointer to storage for the next chanspec. Return value will be INVCHANSPEC
+ * if the iteration ended. Pass in NULL if return value is not desired.
+ *
+ * @return a success value, TRUE if there was another chanspec in the iteration, FALSE if not
+ */
+bool wf_chanspec_iter_next(wf_chanspec_iter_t *iter, chanspec_t *chspec);
+
+/**
+ * Return the current chanspec of the iteration.
+ *
+ * @param iter pointer to a wf_chanspec_iter_t structure
+ *
+ * @return the current chanspec_t
+ */
+chanspec_t wf_chanspec_iter_current(wf_chanspec_iter_t *iter);
+
+/* Populates array with all 20MHz side bands of a given chanspec_t in the following order:
+ * primary20, ext20, two ext40s, four ext80s.
+ * 'chspec' is the chanspec of interest
+ * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec
+ *
+ * Works with 20, 40, 80 and 160MHz chspec
+ */
+
+extern void wf_get_all_ext(chanspec_t chspec, uint8 *chan_ptr);
+
+/*
+ * Given two chanspecs, returns true if they overlap.
+ * (Overlap: At least one 20MHz subband is common between the two chanspecs provided)
+ */
+extern bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1);
+
+extern uint8 channel_bw_to_width(chanspec_t chspec);
+
+uint8 wf_chspec_320_id2cch(chanspec_t chanspec);
+
+uint8 wf_chspec_240_id2cch(chanspec_t chanspec);
+
+#endif /* _bcmwifi_channels_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_monitor.h b/bcmdhd.101.10.361.x/include/bcmwifi_monitor.h
new file mode 100755
index 0000000..41f1f94
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmwifi_monitor.h
@@ -0,0 +1,98 @@
+/*
+ * Monitor Mode routines.
+ * This header file housing the define and function use by DHD
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+#ifndef _BCMWIFI_MONITOR_H_
+#define _BCMWIFI_MONITOR_H_
+
+#include <monitor.h>
+
+typedef struct monitor_info monitor_info_t;
+
+typedef struct monitor_pkt_ts {
+ union {
+ uint32 ts_low; /* time stamp low 32 bits */
+ uint32 reserved; /* If timestamp not used */
+ };
+ union {
+ uint32 ts_high; /* time stamp high 28 bits */
+ union {
+ uint32 ts_high_ext :28; /* time stamp high 28 bits */
+ uint32 clk_id_ext :3; /* clock ID source */
+ uint32 phase :1; /* Phase bit */
+ uint32 marker_ext;
+ };
+ };
+} monitor_pkt_ts_t;
+
+typedef struct monitor_pkt_info {
+ uint32 marker;
+ /* timestamp */
+ monitor_pkt_ts_t ts;
+} monitor_pkt_info_t;
+
+typedef struct monitor_pkt_rssi {
+ int8 dBm; /* number of full dBms */
+ /* sub-dbm resolution */
+ int8 decidBm; /* sub dBms : value after the decimal point */
+} monitor_pkt_rssi_t;
+
+/* structure to add specific information to rxsts structure
+ * otherwise non available to all modules like core RSSI and qdbm resolution
+*/
+
+typedef struct monitor_pkt_rxsts {
+ wl_rxsts_t *rxsts;
+ uint8 corenum; /* number of cores/antennas */
+ monitor_pkt_rssi_t rxpwr[4];
+} monitor_pkt_rxsts_t;
+
+#define HE_EXTRACT_FROM_PLCP(plcp, ppdu_type, field) \
+ (getbits(plcp, D11_PHY_HDR_LEN, \
+ HE_ ## ppdu_type ## _PPDU_ ## field ## _IDX, \
+ HE_ ## ppdu_type ## _PPDU_ ## field ## _FSZ))
+
+#define HE_PACK_RTAP_FROM_PLCP(plcp, ppdu_type, field) \
+ (HE_EXTRACT_FROM_PLCP(plcp, ppdu_type, field) << \
+ HE_RADIOTAP_ ## field ## _SHIFT)
+
+#define HE_PACK_RTAP_GI_LTF_FROM_PLCP(plcp, ppdu_type, field, member) \
+ ((he_plcp2ltf_gi[HE_EXTRACT_FROM_PLCP(plcp, ppdu_type, field)].member) << \
+ HE_RADIOTAP_ ## field ## _SHIFT)
+
+#define HE_PACK_RTAP_FROM_VAL(val, field) \
+ ((val) << HE_RADIOTAP_ ## field ## _SHIFT)
+
+#define HE_PACK_RTAP_FROM_PRXS(rxh, corerev, corerev_minor, field) \
+ (HE_PACK_RTAP_FROM_VAL(D11PPDU_ ## field(rxh, corerev, corerev_minor), field))
+
+/* channel bandwidth */
+#define WLC_20_MHZ 20 /**< 20Mhz channel bandwidth */
+#define WLC_40_MHZ 40 /**< 40Mhz channel bandwidth */
+#define WLC_80_MHZ 80 /**< 80Mhz channel bandwidth */
+#define WLC_160_MHZ 160 /**< 160Mhz channel bandwidth */
+#define WLC_240_MHZ 240 /**< 240Mhz channel bandwidth */
+#define WLC_320_MHZ 320 /**< 320Mhz channel bandwidth */
+
+extern uint16 bcmwifi_monitor_create(monitor_info_t**);
+extern void bcmwifi_set_corerev_major(monitor_info_t* info, int8 corerev);
+extern void bcmwifi_set_corerev_minor(monitor_info_t* info, int8 corerev);
+extern void bcmwifi_monitor_delete(monitor_info_t* info);
+extern uint16 bcmwifi_monitor(monitor_info_t* info,
+ monitor_pkt_info_t* pkt_info, void *pdata, uint16 len, void* pout,
+ uint16* offset, uint16 pad_req, void *wrxh_in, void *wrxh_last);
+extern uint16 wl_rxsts_to_rtap(monitor_pkt_rxsts_t* pkt_rxsts, void *pdata,
+ uint16 len, void* pout, uint16 pad_req);
+
+#endif /* _BCMWIFI_MONITOR_H_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h b/bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h
new file mode 100755
index 0000000..77fc0e7
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmwifi_radiotap.h
@@ -0,0 +1,382 @@
+/*
+ * RadioTap utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _BCMWIFI_RADIOTAP_H_
+#define _BCMWIFI_RADIOTAP_H_
+
+#include <ieee80211_radiotap.h>
+#include <siutils.h>
+#include <monitor.h>
+#include <802.11.h>
+#include <802.11ax.h>
+#include "bcmwifi_monitor.h"
+#include <bcmwifi_rspec.h>
+#include <bcmwifi_rates.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+/*
+ * RadioTap header specific implementation. Used by MacOS implementation only.
+ */
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_hdr {
+ struct ieee80211_radiotap_header ieee_radiotap;
+ uint64 tsft;
+ uint8 flags;
+ union {
+ uint8 rate;
+ uint8 pad;
+ } u;
+ uint16 channel_freq;
+ uint16 channel_flags;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_sna {
+ uint8 signal;
+ uint8 noise;
+ uint8 antenna;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_xchan {
+ uint32 xchannel_flags;
+ uint16 xchannel_freq;
+ uint8 xchannel_channel;
+ uint8 xchannel_maxpower;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_ampdu {
+ uint32 ref_num;
+ uint16 flags;
+ uint8 delimiter_crc;
+ uint8 reserved;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct wl_htmcs {
+ uint8 mcs_known;
+ uint8 mcs_flags;
+ uint8 mcs_index;
+ uint8 pad; /* pad to 32 bit aligned */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct wl_vhtmcs {
+ uint16 vht_known; /* IEEE80211_RADIOTAP_VHT */
+ uint8 vht_flags;
+ uint8 vht_bw;
+ uint8 vht_mcs_nss[4];
+ uint8 vht_coding;
+ uint8 vht_group_id;
+ uint16 vht_partial_aid;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_ht_tail {
+ struct wl_radiotap_xchan xc;
+ struct wl_radiotap_ampdu ampdu;
+ union {
+ struct wl_htmcs ht;
+ struct wl_vhtmcs vht;
+ } u;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct bsd_header_rx {
+ struct wl_radiotap_hdr hdr;
+ /*
+ * include extra space beyond wl_radiotap_ht size
+ * (larger of two structs in union):
+ * signal/noise/ant plus max of 3 pad for xchannel
+ * tail struct (xchannel and MCS info)
+ */
+ uint8 pad[3];
+ uint8 ht[sizeof(struct wl_radiotap_ht_tail)];
+} bsd_header_rx_t;
+
+typedef struct radiotap_parse {
+ struct ieee80211_radiotap_header *hdr;
+ void *fields;
+ uint fields_len;
+ uint idx;
+ uint offset;
+} radiotap_parse_t;
+
+struct rtap_field {
+ uint len;
+ uint align;
+};
+
+/* he radiotap - https://www.radiotap.org/fields/HE.html */
+#define HE_RADIOTAP_BSS_COLOR_SHIFT 0u
+#define HE_RADIOTAP_BEAM_CHANGE_SHIFT 6u
+#define HE_RADIOTAP_DL_UL_SHIFT 7u
+#define HE_RADIOTAP_MCS_SHIFT 8u
+#define HE_RADIOTAP_DCM_SHIFT 12u
+#define HE_RADIOTAP_CODING_SHIFT 13u
+#define HE_RADIOTAP_LDPC_SHIFT 14u
+#define HE_RADIOTAP_STBC_SHIFT 15u
+#define HE_RADIOTAP_SR_SHIFT 0u
+#define HE_RADIOTAP_STAID_SHIFT 4u
+#define HE_RADIOTAP_SR1_SHIFT 0u
+#define HE_RADIOTAP_SR2_SHIFT 4u
+#define HE_RADIOTAP_SR3_SHIFT 8u
+#define HE_RADIOTAP_SR4_SHIFT 12u
+#define HE_RADIOTAP_BW_SHIFT 0u
+#define HE_RADIOTAP_RU_ALLOC_SHIFT 0u
+#define HE_RADIOTAP_GI_SHIFT 4u
+#define HE_RADIOTAP_LTF_SIZE_SHIFT 6u
+#define HE_RADIOTAP_NUM_LTF_SHIFT 8u
+#define HE_RADIOTAP_PADDING_SHIFT 12u
+#define HE_RADIOTAP_TXBF_SHIFT 14u
+#define HE_RADIOTAP_PE_SHIFT 15u
+#define HE_RADIOTAP_NSTS_SHIFT 0u
+#define HE_RADIOTAP_DOPPLER_SHIFT 4u
+#define HE_RADIOTAP_TXOP_SHIFT 8u
+#define HE_RADIOTAP_MIDAMBLE_SHIFT 15u
+#define HE_RADIOTAP_DOPPLER_SET_NSTS_SHIFT 0u
+#define HE_RADIOTAP_DOPPLER_NOTSET_NSTS_SHIFT 0u
+
+/* he mu radiotap - https://www.radiotap.org/fields/HE-MU.html */
+#define HE_RADIOTAP_SIGB_MCS_SHIFT 0u
+#define HE_RADIOTAP_SIGB_MCS_KNOWN_SHIFT 4u
+#define HE_RADIOTAP_SIGB_DCM_SHIFT 5u
+#define HE_RADIOTAP_SIGB_DCM_KNOWN_SHIFT 6u
+#define HE_RADIOTAP_SIGB_COMP_KNOWN_SHIFT 14u
+#define HE_RADIOTAP_SIGB_COMP_SHIFT 3u
+#define HE_RADIOTAP_SIGB_SYMB_SHIFT 18u
+#define HE_RADIOTAP_BW_SIGA_SHIFT 0u
+#define HE_RADIOTAP_BW_SIGA_KNOWN_SHIFT 2u
+#define HE_RADIOTAP_SIGB_SYM_MU_MIMO_USER_SHIFT 4u
+#define HE_RADIOTAP_PRE_PUNCR_SIGA_SHIFT 8u
+#define HE_RADIOTAP_PRE_PUNCR_SIGA_KNOWN_SHIFT 10u
+
+#define WL_RADIOTAP_BRCM_SNS 0x01
+#define WL_RADIOTAP_BRCM_MCS 0x00000001
+#define WL_RADIOTAP_LEGACY_SNS 0x02
+#define WL_RADIOTAP_LEGACY_VHT 0x00000001
+#define WL_RADIOTAP_BRCM_PAD_SNS 0x3
+
+#define IEEE80211_RADIOTAP_HTMOD_40 0x01
+#define IEEE80211_RADIOTAP_HTMOD_SGI 0x02
+#define IEEE80211_RADIOTAP_HTMOD_GF 0x04
+#define IEEE80211_RADIOTAP_HTMOD_LDPC 0x08
+#define IEEE80211_RADIOTAP_HTMOD_STBC_MASK 0x30
+#define IEEE80211_RADIOTAP_HTMOD_STBC_SHIFT 4
+
+/* Dyanmic bandwidth for VHT signaled in NONHT */
+#define WL_RADIOTAP_F_NONHT_VHT_DYN_BW 0x01
+/* VHT BW is valid in NONHT */
+#define WL_RADIOTAP_F_NONHT_VHT_BW 0x02
+
+typedef struct ieee80211_radiotap_header ieee80211_radiotap_header_t;
+
+/* VHT information in non-HT frames; primarily VHT b/w signaling
+ * in frames received at legacy rates.
+ */
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_nonht_vht {
+ uint8 len; /* length of the field excluding 'len' field */
+ uint8 flags;
+ uint8 bw;
+ uint8 PAD; /* Add a pad so the next vendor entry, if any, will be 16 bit aligned */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_radiotap_nonht_vht wl_radiotap_nonht_vht_t;
+
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_basic {
+ uint32 tsft_l;
+ uint32 tsft_h;
+ uint8 flags;
+ uint8 rate; /* this field acts as a pad for non legacy packets */
+ uint16 channel_freq;
+ uint16 channel_flags;
+ uint8 signal;
+ uint8 noise;
+ int8 antenna;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_radiotap_basic wl_radiotap_basic_t;
+
+/* radiotap standard - non-HT, non-VHT information with Broadcom vendor namespace extension
+ * that includes VHT information.
+ * Used with monitor type 3 when received by HT/Legacy PHY and received rate is legacy.
+ * Struct ieee80211_radiotap_header is of variable length due to possible
+ * extra it_present bitmap fields.
+ * It should not be included as a static length field here
+ */
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_legacy {
+ wl_radiotap_basic_t basic;
+ uint8 PAD;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_radiotap_legacy wl_radiotap_legacy_t;
+
+#define WL_RADIOTAP_LEGACY_SKIP_LEN htol16(sizeof(struct wl_radiotap_legacy) - \
+ OFFSETOF(struct wl_radiotap_legacy, nonht_vht))
+
+#define WL_RADIOTAP_NONHT_VHT_LEN (sizeof(wl_radiotap_nonht_vht_t) - 1)
+
+/* Radiotap standard that includes HT information. This is for use with monitor type 3
+ * whenever frame is received by HT-PHY, and received rate is non-VHT.
+ * Struct ieee80211_radiotap_header is of variable length due to possible
+ * extra it_present bitmap fields.
+ * It should not be included as a static length field here
+ */
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_ht {
+ wl_radiotap_basic_t basic;
+ uint8 PAD[3];
+ uint32 xchannel_flags;
+ uint16 xchannel_freq;
+ uint8 xchannel_channel;
+ uint8 xchannel_maxpower;
+ uint8 mcs_known;
+ uint8 mcs_flags;
+ uint8 mcs_index;
+ uint8 PAD;
+ uint32 ampdu_ref_num; /* A-MPDU ID */
+ uint16 ampdu_flags; /* A-MPDU flags */
+ uint8 ampdu_delim_crc; /* Delimiter CRC if present in flags */
+ uint8 ampdu_reserved;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_radiotap_ht wl_radiotap_ht_t;
+
+/* Radiotap standard that includes VHT information.
+ * This is for use with monitor type 3 whenever frame is
+ * received by HT-PHY (VHT-PHY), and received rate is VHT.
+ * Struct ieee80211_radiotap_header is of variable length due to possible
+ * extra it_present bitmap fields.
+ * It should not be included as a static length field here
+ */
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_vht {
+ wl_radiotap_basic_t basic;
+ uint8 PAD[3];
+ uint32 ampdu_ref_num; /* A-MPDU ID */
+ uint16 ampdu_flags; /* A-MPDU flags */
+ uint8 ampdu_delim_crc; /* Delimiter CRC if present in flags */
+ uint8 ampdu_reserved;
+ uint16 vht_known; /* IEEE80211_RADIOTAP_VHT */
+ uint8 vht_flags; /* IEEE80211_RADIOTAP_VHT */
+ uint8 vht_bw; /* IEEE80211_RADIOTAP_VHT */
+ uint8 vht_mcs_nss[4]; /* IEEE80211_RADIOTAP_VHT */
+ uint8 vht_coding; /* IEEE80211_RADIOTAP_VHT */
+ uint8 vht_group_id; /* IEEE80211_RADIOTAP_VHT */
+ uint16 vht_partial_aid; /* IEEE80211_RADIOTAP_VHT */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_radiotap_vht wl_radiotap_vht_t;
+
+/* Radiotap standard that includes HE information. */
+BWL_PRE_PACKED_STRUCT struct wl_radiotap_he {
+ wl_radiotap_basic_t basic;
+ uint8 PAD[3];
+ uint32 ampdu_ref_num; /* A-MPDU ID */
+ uint16 ampdu_flags; /* A-MPDU flags */
+ uint8 ampdu_delim_crc; /* Delimiter CRC if present in flags */
+ uint8 ampdu_reserved;
+ uint16 data1;
+ uint16 data2;
+ uint16 data3;
+ uint16 data4;
+ uint16 data5;
+ uint16 data6;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_radiotap_he wl_radiotap_he_t;
+
+BWL_PRE_PACKED_STRUCT struct radiotap_vendor_ns {
+ uint8 vend_oui[3];
+ uint8 sns;
+ uint16 skip_len;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct radiotap_vendor_ns radiotap_vendor_ns_t;
+
+#define WL_RADIOTAP_PRESENT_BASIC \
+ ((1 << IEEE80211_RADIOTAP_TSFT) | \
+ (1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
+ (1 << IEEE80211_RADIOTAP_ANTENNA))
+
+#define WL_RADIOTAP_PRESENT_LEGACY \
+ WL_RADIOTAP_PRESENT_BASIC | \
+ (1 << IEEE80211_RADIOTAP_RATE)
+
+#define WL_RADIOTAP_PRESENT_HT \
+ WL_RADIOTAP_PRESENT_BASIC | \
+ ((1 << IEEE80211_RADIOTAP_XCHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_MCS) | \
+ (1 << IEEE80211_RADIOTAP_AMPDU))
+
+#define WL_RADIOTAP_PRESENT_VHT \
+ WL_RADIOTAP_PRESENT_BASIC | \
+ ((1 << IEEE80211_RADIOTAP_AMPDU) | \
+ (1 << IEEE80211_RADIOTAP_VHT))
+
+#define WL_RADIOTAP_PRESENT_HE \
+ WL_RADIOTAP_PRESENT_BASIC | \
+ ((1 << IEEE80211_RADIOTAP_AMPDU) | \
+ (1 << IEEE80211_RADIOTAP_HE))
+
+/* include/linux/if_arp.h
+ * #define ARPHRD_IEEE80211_PRISM 802 IEEE 802.11 + Prism2 header
+ * #define ARPHRD_IEEE80211_RADIOTAP 803 IEEE 802.11 + radiotap header
+ * include/net/ieee80211_radiotap.h
+ * radiotap structure
+ */
+
+#ifndef ARPHRD_IEEE80211_RADIOTAP
+#define ARPHRD_IEEE80211_RADIOTAP 803
+#endif
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+extern void wl_rtapParseInit(radiotap_parse_t *rtap, uint8 *rtap_header);
+extern ratespec_t wl_calcRspecFromRTap(uint8 *rtap_header);
+extern bool wl_rtapFlags(uint8 *rtap_header, uint8* flags);
+extern uint wl_radiotap_rx(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ bsd_header_rx_t *bsd_header);
+extern uint wl_radiotap_rx_legacy(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t* rtap_hdr);
+extern uint wl_radiotap_rx_ht(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t* rtap_hdr);
+extern uint wl_radiotap_rx_vht(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t* rtap_hdr);
+extern uint wl_radiotap_rx_he(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t* rtap_hdr);
+extern uint wl_radiotap_rx_eht(struct dot11_header *mac_header, wl_rxsts_t *rxsts,
+ ieee80211_radiotap_header_t *rtap_hdr);
+
+/* Legacy phy radiotap header may include VHT bw signaling VS element */
+#define MAX_RADIOTAP_LEGACY_SIZE (sizeof(wl_radiotap_legacy_t) + \
+ sizeof(radiotap_vendor_ns_t) + sizeof(wl_radiotap_nonht_vht_t))
+
+/* RadioTap header starts with a fixed struct ieee80211_radiotap_header,
+ * followed by variable fields for the 4 encodings supported, HE, VHT, HT, and Legacy
+ */
+#define MAX_RADIOTAP_SIZE (sizeof(struct ieee80211_radiotap_header) + \
+ MAX(sizeof(wl_radiotap_he_t), \
+ MAX(sizeof(wl_radiotap_vht_t), \
+ MAX(sizeof(wl_radiotap_ht_t), MAX_RADIOTAP_LEGACY_SIZE))))
+#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
+
+#endif /* _BCMWIFI_RADIOTAP_H_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_rates.h b/bcmdhd.101.10.361.x/include/bcmwifi_rates.h
new file mode 100755
index 0000000..9be50eb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmwifi_rates.h
@@ -0,0 +1,1262 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#include <typedefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define WL_RATESET_SZ_DSSS 4
+#define WL_RATESET_SZ_OFDM 8
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS 10
+#else
+#define WL_RATESET_SZ_HT_MCS 8
+#endif
+#define WL_RATESET_SZ_VHT_MCS 10
+#define WL_RATESET_SZ_VHT_MCS_P 12 /* 10 VHT rates + 2 proprietary rates */
+#define WL_RATESET_SZ_HE_MCS 12 /* 12 HE rates (mcs 0-11) */
+#define WL_RATESET_SZ_EHT_MCS 14u /* 14 EHT rates (mcs 0-13) */
+
+#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */
+
+#define WL_TX_CHAINS_MAX 4
+
+#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */
+
+/* Transmit channel bandwidths */
+typedef enum wl_tx_bw {
+ WL_TX_BW_20,
+ WL_TX_BW_40,
+ WL_TX_BW_80,
+ WL_TX_BW_20IN40,
+ WL_TX_BW_20IN80,
+ WL_TX_BW_40IN80,
+ WL_TX_BW_160,
+ WL_TX_BW_20IN160,
+ WL_TX_BW_40IN160,
+ WL_TX_BW_80IN160,
+ WL_TX_BW_240,
+ WL_TX_BW_20IN240,
+ WL_TX_BW_40IN240,
+ WL_TX_BW_80IN240,
+ WL_TX_BW_160IN240,
+ WL_TX_BW_320,
+ WL_TX_BW_20IN320,
+ WL_TX_BW_40IN320,
+ WL_TX_BW_80IN320,
+ WL_TX_BW_160IN320,
+ WL_TX_BW_ALL
+} wl_tx_bw_t;
+
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
+typedef enum wl_tx_mode {
+ WL_TX_MODE_NONE,
+ WL_TX_MODE_STBC,
+ WL_TX_MODE_CDD,
+ WL_TX_MODE_TXBF,
+ WL_NUM_TX_MODES
+} wl_tx_mode_t;
+
+/* Number of transmit chains */
+typedef enum wl_tx_chains {
+ WL_TX_CHAINS_1 = 1,
+ WL_TX_CHAINS_2,
+ WL_TX_CHAINS_3,
+ WL_TX_CHAINS_4
+} wl_tx_chains_t;
+
+/* Number of transmit streams */
+typedef enum wl_tx_nss {
+ WL_TX_NSS_1 = 1,
+ WL_TX_NSS_2,
+ WL_TX_NSS_3,
+ WL_TX_NSS_4
+} wl_tx_nss_t;
+
+/* 802.11ax rate types */
+typedef enum wl_he_rate_type {
+ WL_HE_RT_SU = 0,
+ WL_HE_RT_RU26 = 1,
+ WL_HE_RT_RU52 = 2,
+ WL_HE_RT_RU106 = 3,
+ WL_HE_RT_UB = 4,
+ WL_HE_RT_LUB = 5,
+ WL_HE_RT_RU242 = 6,
+ WL_HE_RT_RU484 = 7,
+ WL_HE_RT_RU996 = 8
+} wl_he_rate_type_t;
+
+#define WL_NUM_HE_RT 9u
+
+/* This enum maps each rate to a CLM index
+ * 802.11ax OFDMA (RU) rates are in separate enum
+ */
+
+typedef enum clm_rates {
+ /************
+ * 1 chain *
+ ************
+ */
+
+ /* 1 Stream */
+ WL_RATE_1X1_DSSS_1 = 0,
+ WL_RATE_1X1_DSSS_2 = 1,
+ WL_RATE_1X1_DSSS_5_5 = 2,
+ WL_RATE_1X1_DSSS_11 = 3,
+
+ WL_RATE_1X1_OFDM_6 = 4,
+ WL_RATE_1X1_OFDM_9 = 5,
+ WL_RATE_1X1_OFDM_12 = 6,
+ WL_RATE_1X1_OFDM_18 = 7,
+ WL_RATE_1X1_OFDM_24 = 8,
+ WL_RATE_1X1_OFDM_36 = 9,
+ WL_RATE_1X1_OFDM_48 = 10,
+ WL_RATE_1X1_OFDM_54 = 11,
+
+ WL_RATE_1X1_MCS0 = 12,
+ WL_RATE_1X1_MCS1 = 13,
+ WL_RATE_1X1_MCS2 = 14,
+ WL_RATE_1X1_MCS3 = 15,
+ WL_RATE_1X1_MCS4 = 16,
+ WL_RATE_1X1_MCS5 = 17,
+ WL_RATE_1X1_MCS6 = 18,
+ WL_RATE_1X1_MCS7 = 19,
+ WL_RATE_P_1X1_MCS87 = 20,
+ WL_RATE_P_1X1_MCS88 = 21,
+
+ WL_RATE_1X1_VHT0SS1 = 12,
+ WL_RATE_1X1_VHT1SS1 = 13,
+ WL_RATE_1X1_VHT2SS1 = 14,
+ WL_RATE_1X1_VHT3SS1 = 15,
+ WL_RATE_1X1_VHT4SS1 = 16,
+ WL_RATE_1X1_VHT5SS1 = 17,
+ WL_RATE_1X1_VHT6SS1 = 18,
+ WL_RATE_1X1_VHT7SS1 = 19,
+ WL_RATE_1X1_VHT8SS1 = 20,
+ WL_RATE_1X1_VHT9SS1 = 21,
+ WL_RATE_P_1X1_VHT10SS1 = 22,
+ WL_RATE_P_1X1_VHT11SS1 = 23,
+
+ WL_RATE_1X1_HE0SS1 = 24,
+ WL_RATE_1X1_HE1SS1 = 25,
+ WL_RATE_1X1_HE2SS1 = 26,
+ WL_RATE_1X1_HE3SS1 = 27,
+ WL_RATE_1X1_HE4SS1 = 28,
+ WL_RATE_1X1_HE5SS1 = 29,
+ WL_RATE_1X1_HE6SS1 = 30,
+ WL_RATE_1X1_HE7SS1 = 31,
+ WL_RATE_1X1_HE8SS1 = 32,
+ WL_RATE_1X1_HE9SS1 = 33,
+ WL_RATE_1X1_HE10SS1 = 34,
+ WL_RATE_1X1_HE11SS1 = 35,
+
+ /************
+ * 2 chains *
+ ************
+ */
+
+ /* 1 Stream expanded + 1 */
+ WL_RATE_1X2_DSSS_1 = 36,
+ WL_RATE_1X2_DSSS_2 = 37,
+ WL_RATE_1X2_DSSS_5_5 = 38,
+ WL_RATE_1X2_DSSS_11 = 39,
+
+ WL_RATE_1X2_CDD_OFDM_6 = 40,
+ WL_RATE_1X2_CDD_OFDM_9 = 41,
+ WL_RATE_1X2_CDD_OFDM_12 = 42,
+ WL_RATE_1X2_CDD_OFDM_18 = 43,
+ WL_RATE_1X2_CDD_OFDM_24 = 44,
+ WL_RATE_1X2_CDD_OFDM_36 = 45,
+ WL_RATE_1X2_CDD_OFDM_48 = 46,
+ WL_RATE_1X2_CDD_OFDM_54 = 47,
+
+ WL_RATE_1X2_CDD_MCS0 = 48,
+ WL_RATE_1X2_CDD_MCS1 = 49,
+ WL_RATE_1X2_CDD_MCS2 = 50,
+ WL_RATE_1X2_CDD_MCS3 = 51,
+ WL_RATE_1X2_CDD_MCS4 = 52,
+ WL_RATE_1X2_CDD_MCS5 = 53,
+ WL_RATE_1X2_CDD_MCS6 = 54,
+ WL_RATE_1X2_CDD_MCS7 = 55,
+ WL_RATE_P_1X2_CDD_MCS87 = 56,
+ WL_RATE_P_1X2_CDD_MCS88 = 57,
+
+ WL_RATE_1X2_VHT0SS1 = 48,
+ WL_RATE_1X2_VHT1SS1 = 49,
+ WL_RATE_1X2_VHT2SS1 = 50,
+ WL_RATE_1X2_VHT3SS1 = 51,
+ WL_RATE_1X2_VHT4SS1 = 52,
+ WL_RATE_1X2_VHT5SS1 = 53,
+ WL_RATE_1X2_VHT6SS1 = 54,
+ WL_RATE_1X2_VHT7SS1 = 55,
+ WL_RATE_1X2_VHT8SS1 = 56,
+ WL_RATE_1X2_VHT9SS1 = 57,
+ WL_RATE_P_1X2_VHT10SS1 = 58,
+ WL_RATE_P_1X2_VHT11SS1 = 59,
+
+ WL_RATE_1X2_HE0SS1 = 60,
+ WL_RATE_1X2_HE1SS1 = 61,
+ WL_RATE_1X2_HE2SS1 = 62,
+ WL_RATE_1X2_HE3SS1 = 63,
+ WL_RATE_1X2_HE4SS1 = 64,
+ WL_RATE_1X2_HE5SS1 = 65,
+ WL_RATE_1X2_HE6SS1 = 66,
+ WL_RATE_1X2_HE7SS1 = 67,
+ WL_RATE_1X2_HE8SS1 = 68,
+ WL_RATE_1X2_HE9SS1 = 69,
+ WL_RATE_1X2_HE10SS1 = 70,
+ WL_RATE_1X2_HE11SS1 = 71,
+
+ /* 2 Streams */
+ WL_RATE_2X2_STBC_MCS0 = 72,
+ WL_RATE_2X2_STBC_MCS1 = 73,
+ WL_RATE_2X2_STBC_MCS2 = 74,
+ WL_RATE_2X2_STBC_MCS3 = 75,
+ WL_RATE_2X2_STBC_MCS4 = 76,
+ WL_RATE_2X2_STBC_MCS5 = 77,
+ WL_RATE_2X2_STBC_MCS6 = 78,
+ WL_RATE_2X2_STBC_MCS7 = 79,
+ WL_RATE_P_2X2_STBC_MCS87 = 80,
+ WL_RATE_P_2X2_STBC_MCS88 = 81,
+
+ WL_RATE_2X2_STBC_VHT0SS1 = 72,
+ WL_RATE_2X2_STBC_VHT1SS1 = 73,
+ WL_RATE_2X2_STBC_VHT2SS1 = 74,
+ WL_RATE_2X2_STBC_VHT3SS1 = 75,
+ WL_RATE_2X2_STBC_VHT4SS1 = 76,
+ WL_RATE_2X2_STBC_VHT5SS1 = 77,
+ WL_RATE_2X2_STBC_VHT6SS1 = 78,
+ WL_RATE_2X2_STBC_VHT7SS1 = 79,
+ WL_RATE_2X2_STBC_VHT8SS1 = 80,
+ WL_RATE_2X2_STBC_VHT9SS1 = 81,
+ WL_RATE_P_2X2_STBC_VHT10SS1 = 82,
+ WL_RATE_P_2X2_STBC_VHT11SS1 = 83,
+
+ WL_RATE_2X2_SDM_MCS8 = 84,
+ WL_RATE_2X2_SDM_MCS9 = 85,
+ WL_RATE_2X2_SDM_MCS10 = 86,
+ WL_RATE_2X2_SDM_MCS11 = 87,
+ WL_RATE_2X2_SDM_MCS12 = 88,
+ WL_RATE_2X2_SDM_MCS13 = 89,
+ WL_RATE_2X2_SDM_MCS14 = 90,
+ WL_RATE_2X2_SDM_MCS15 = 91,
+ WL_RATE_P_2X2_SDM_MCS99 = 92,
+ WL_RATE_P_2X2_SDM_MCS100 = 93,
+
+ WL_RATE_2X2_VHT0SS2 = 84,
+ WL_RATE_2X2_VHT1SS2 = 85,
+ WL_RATE_2X2_VHT2SS2 = 86,
+ WL_RATE_2X2_VHT3SS2 = 87,
+ WL_RATE_2X2_VHT4SS2 = 88,
+ WL_RATE_2X2_VHT5SS2 = 89,
+ WL_RATE_2X2_VHT6SS2 = 90,
+ WL_RATE_2X2_VHT7SS2 = 91,
+ WL_RATE_2X2_VHT8SS2 = 92,
+ WL_RATE_2X2_VHT9SS2 = 93,
+ WL_RATE_P_2X2_VHT10SS2 = 94,
+ WL_RATE_P_2X2_VHT11SS2 = 95,
+
+ WL_RATE_2X2_HE0SS2 = 96,
+ WL_RATE_2X2_HE1SS2 = 97,
+ WL_RATE_2X2_HE2SS2 = 98,
+ WL_RATE_2X2_HE3SS2 = 99,
+ WL_RATE_2X2_HE4SS2 = 100,
+ WL_RATE_2X2_HE5SS2 = 101,
+ WL_RATE_2X2_HE6SS2 = 102,
+ WL_RATE_2X2_HE7SS2 = 103,
+ WL_RATE_2X2_HE8SS2 = 104,
+ WL_RATE_2X2_HE9SS2 = 105,
+ WL_RATE_2X2_HE10SS2 = 106,
+ WL_RATE_2X2_HE11SS2 = 107,
+
+ /****************************
+ * TX Beamforming, 2 chains *
+ ****************************
+ */
+
+ /* 1 Stream expanded + 1 */
+ WL_RATE_1X2_TXBF_OFDM_6 = 108,
+ WL_RATE_1X2_TXBF_OFDM_9 = 109,
+ WL_RATE_1X2_TXBF_OFDM_12 = 110,
+ WL_RATE_1X2_TXBF_OFDM_18 = 111,
+ WL_RATE_1X2_TXBF_OFDM_24 = 112,
+ WL_RATE_1X2_TXBF_OFDM_36 = 113,
+ WL_RATE_1X2_TXBF_OFDM_48 = 114,
+ WL_RATE_1X2_TXBF_OFDM_54 = 115,
+
+ WL_RATE_1X2_TXBF_MCS0 = 116,
+ WL_RATE_1X2_TXBF_MCS1 = 117,
+ WL_RATE_1X2_TXBF_MCS2 = 118,
+ WL_RATE_1X2_TXBF_MCS3 = 119,
+ WL_RATE_1X2_TXBF_MCS4 = 120,
+ WL_RATE_1X2_TXBF_MCS5 = 121,
+ WL_RATE_1X2_TXBF_MCS6 = 122,
+ WL_RATE_1X2_TXBF_MCS7 = 123,
+ WL_RATE_P_1X2_TXBF_MCS87 = 124,
+ WL_RATE_P_1X2_TXBF_MCS88 = 125,
+
+ WL_RATE_1X2_TXBF_VHT0SS1 = 116,
+ WL_RATE_1X2_TXBF_VHT1SS1 = 117,
+ WL_RATE_1X2_TXBF_VHT2SS1 = 118,
+ WL_RATE_1X2_TXBF_VHT3SS1 = 119,
+ WL_RATE_1X2_TXBF_VHT4SS1 = 120,
+ WL_RATE_1X2_TXBF_VHT5SS1 = 121,
+ WL_RATE_1X2_TXBF_VHT6SS1 = 122,
+ WL_RATE_1X2_TXBF_VHT7SS1 = 123,
+ WL_RATE_1X2_TXBF_VHT8SS1 = 124,
+ WL_RATE_1X2_TXBF_VHT9SS1 = 125,
+ WL_RATE_P_1X2_TXBF_VHT10SS1 = 126,
+ WL_RATE_P_1X2_TXBF_VHT11SS1 = 127,
+
+ WL_RATE_1X2_TXBF_HE0SS1 = 128,
+ WL_RATE_1X2_TXBF_HE1SS1 = 129,
+ WL_RATE_1X2_TXBF_HE2SS1 = 130,
+ WL_RATE_1X2_TXBF_HE3SS1 = 131,
+ WL_RATE_1X2_TXBF_HE4SS1 = 132,
+ WL_RATE_1X2_TXBF_HE5SS1 = 133,
+ WL_RATE_1X2_TXBF_HE6SS1 = 134,
+ WL_RATE_1X2_TXBF_HE7SS1 = 135,
+ WL_RATE_1X2_TXBF_HE8SS1 = 136,
+ WL_RATE_1X2_TXBF_HE9SS1 = 137,
+ WL_RATE_1X2_TXBF_HE10SS1 = 138,
+ WL_RATE_1X2_TXBF_HE11SS1 = 139,
+
+ /* 2 Streams */
+ WL_RATE_2X2_TXBF_SDM_MCS8 = 140,
+ WL_RATE_2X2_TXBF_SDM_MCS9 = 141,
+ WL_RATE_2X2_TXBF_SDM_MCS10 = 142,
+ WL_RATE_2X2_TXBF_SDM_MCS11 = 143,
+ WL_RATE_2X2_TXBF_SDM_MCS12 = 144,
+ WL_RATE_2X2_TXBF_SDM_MCS13 = 145,
+ WL_RATE_2X2_TXBF_SDM_MCS14 = 146,
+ WL_RATE_2X2_TXBF_SDM_MCS15 = 147,
+ WL_RATE_P_2X2_TXBF_SDM_MCS99 = 148,
+ WL_RATE_P_2X2_TXBF_SDM_MCS100 = 149,
+
+ WL_RATE_2X2_TXBF_VHT0SS2 = 140,
+ WL_RATE_2X2_TXBF_VHT1SS2 = 141,
+ WL_RATE_2X2_TXBF_VHT2SS2 = 142,
+ WL_RATE_2X2_TXBF_VHT3SS2 = 143,
+ WL_RATE_2X2_TXBF_VHT4SS2 = 144,
+ WL_RATE_2X2_TXBF_VHT5SS2 = 145,
+ WL_RATE_2X2_TXBF_VHT6SS2 = 146,
+ WL_RATE_2X2_TXBF_VHT7SS2 = 147,
+ WL_RATE_2X2_TXBF_VHT8SS2 = 148,
+ WL_RATE_2X2_TXBF_VHT9SS2 = 149,
+ WL_RATE_P_2X2_TXBF_VHT10SS2 = 150,
+ WL_RATE_P_2X2_TXBF_VHT11SS2 = 151,
+
+ WL_RATE_2X2_TXBF_HE0SS2 = 152,
+ WL_RATE_2X2_TXBF_HE1SS2 = 153,
+ WL_RATE_2X2_TXBF_HE2SS2 = 154,
+ WL_RATE_2X2_TXBF_HE3SS2 = 155,
+ WL_RATE_2X2_TXBF_HE4SS2 = 156,
+ WL_RATE_2X2_TXBF_HE5SS2 = 157,
+ WL_RATE_2X2_TXBF_HE6SS2 = 158,
+ WL_RATE_2X2_TXBF_HE7SS2 = 159,
+ WL_RATE_2X2_TXBF_HE8SS2 = 160,
+ WL_RATE_2X2_TXBF_HE9SS2 = 161,
+ WL_RATE_2X2_TXBF_HE10SS2 = 162,
+ WL_RATE_2X2_TXBF_HE11SS2 = 163,
+
+ /************
+ * 3 chains *
+ ************
+ */
+
+ /* 1 Stream expanded + 2 */
+ WL_RATE_1X3_DSSS_1 = 164,
+ WL_RATE_1X3_DSSS_2 = 165,
+ WL_RATE_1X3_DSSS_5_5 = 166,
+ WL_RATE_1X3_DSSS_11 = 167,
+
+ WL_RATE_1X3_CDD_OFDM_6 = 168,
+ WL_RATE_1X3_CDD_OFDM_9 = 169,
+ WL_RATE_1X3_CDD_OFDM_12 = 170,
+ WL_RATE_1X3_CDD_OFDM_18 = 171,
+ WL_RATE_1X3_CDD_OFDM_24 = 172,
+ WL_RATE_1X3_CDD_OFDM_36 = 173,
+ WL_RATE_1X3_CDD_OFDM_48 = 174,
+ WL_RATE_1X3_CDD_OFDM_54 = 175,
+
+ WL_RATE_1X3_CDD_MCS0 = 176,
+ WL_RATE_1X3_CDD_MCS1 = 177,
+ WL_RATE_1X3_CDD_MCS2 = 178,
+ WL_RATE_1X3_CDD_MCS3 = 179,
+ WL_RATE_1X3_CDD_MCS4 = 180,
+ WL_RATE_1X3_CDD_MCS5 = 181,
+ WL_RATE_1X3_CDD_MCS6 = 182,
+ WL_RATE_1X3_CDD_MCS7 = 183,
+ WL_RATE_P_1X3_CDD_MCS87 = 184,
+ WL_RATE_P_1X3_CDD_MCS88 = 185,
+
+ WL_RATE_1X3_VHT0SS1 = 176,
+ WL_RATE_1X3_VHT1SS1 = 177,
+ WL_RATE_1X3_VHT2SS1 = 178,
+ WL_RATE_1X3_VHT3SS1 = 179,
+ WL_RATE_1X3_VHT4SS1 = 180,
+ WL_RATE_1X3_VHT5SS1 = 181,
+ WL_RATE_1X3_VHT6SS1 = 182,
+ WL_RATE_1X3_VHT7SS1 = 183,
+ WL_RATE_1X3_VHT8SS1 = 184,
+ WL_RATE_1X3_VHT9SS1 = 185,
+ WL_RATE_P_1X3_VHT10SS1 = 186,
+ WL_RATE_P_1X3_VHT11SS1 = 187,
+
+ WL_RATE_1X3_HE0SS1 = 188,
+ WL_RATE_1X3_HE1SS1 = 189,
+ WL_RATE_1X3_HE2SS1 = 190,
+ WL_RATE_1X3_HE3SS1 = 191,
+ WL_RATE_1X3_HE4SS1 = 192,
+ WL_RATE_1X3_HE5SS1 = 193,
+ WL_RATE_1X3_HE6SS1 = 194,
+ WL_RATE_1X3_HE7SS1 = 195,
+ WL_RATE_1X3_HE8SS1 = 196,
+ WL_RATE_1X3_HE9SS1 = 197,
+ WL_RATE_1X3_HE10SS1 = 198,
+ WL_RATE_1X3_HE11SS1 = 199,
+
+ /* 2 Streams expanded + 1 */
+ WL_RATE_2X3_STBC_MCS0 = 200,
+ WL_RATE_2X3_STBC_MCS1 = 201,
+ WL_RATE_2X3_STBC_MCS2 = 202,
+ WL_RATE_2X3_STBC_MCS3 = 203,
+ WL_RATE_2X3_STBC_MCS4 = 204,
+ WL_RATE_2X3_STBC_MCS5 = 205,
+ WL_RATE_2X3_STBC_MCS6 = 206,
+ WL_RATE_2X3_STBC_MCS7 = 207,
+ WL_RATE_P_2X3_STBC_MCS87 = 208,
+ WL_RATE_P_2X3_STBC_MCS88 = 209,
+
+ WL_RATE_2X3_STBC_VHT0SS1 = 200,
+ WL_RATE_2X3_STBC_VHT1SS1 = 201,
+ WL_RATE_2X3_STBC_VHT2SS1 = 202,
+ WL_RATE_2X3_STBC_VHT3SS1 = 203,
+ WL_RATE_2X3_STBC_VHT4SS1 = 204,
+ WL_RATE_2X3_STBC_VHT5SS1 = 205,
+ WL_RATE_2X3_STBC_VHT6SS1 = 206,
+ WL_RATE_2X3_STBC_VHT7SS1 = 207,
+ WL_RATE_2X3_STBC_VHT8SS1 = 208,
+ WL_RATE_2X3_STBC_VHT9SS1 = 209,
+ WL_RATE_P_2X3_STBC_VHT10SS1 = 210,
+ WL_RATE_P_2X3_STBC_VHT11SS1 = 211,
+
+ WL_RATE_2X3_SDM_MCS8 = 212,
+ WL_RATE_2X3_SDM_MCS9 = 213,
+ WL_RATE_2X3_SDM_MCS10 = 214,
+ WL_RATE_2X3_SDM_MCS11 = 215,
+ WL_RATE_2X3_SDM_MCS12 = 216,
+ WL_RATE_2X3_SDM_MCS13 = 217,
+ WL_RATE_2X3_SDM_MCS14 = 218,
+ WL_RATE_2X3_SDM_MCS15 = 219,
+ WL_RATE_P_2X3_SDM_MCS99 = 220,
+ WL_RATE_P_2X3_SDM_MCS100 = 221,
+
+ WL_RATE_2X3_VHT0SS2 = 212,
+ WL_RATE_2X3_VHT1SS2 = 213,
+ WL_RATE_2X3_VHT2SS2 = 214,
+ WL_RATE_2X3_VHT3SS2 = 215,
+ WL_RATE_2X3_VHT4SS2 = 216,
+ WL_RATE_2X3_VHT5SS2 = 217,
+ WL_RATE_2X3_VHT6SS2 = 218,
+ WL_RATE_2X3_VHT7SS2 = 219,
+ WL_RATE_2X3_VHT8SS2 = 220,
+ WL_RATE_2X3_VHT9SS2 = 221,
+ WL_RATE_P_2X3_VHT10SS2 = 222,
+ WL_RATE_P_2X3_VHT11SS2 = 223,
+
+ WL_RATE_2X3_HE0SS2 = 224,
+ WL_RATE_2X3_HE1SS2 = 225,
+ WL_RATE_2X3_HE2SS2 = 226,
+ WL_RATE_2X3_HE3SS2 = 227,
+ WL_RATE_2X3_HE4SS2 = 228,
+ WL_RATE_2X3_HE5SS2 = 229,
+ WL_RATE_2X3_HE6SS2 = 230,
+ WL_RATE_2X3_HE7SS2 = 231,
+ WL_RATE_2X3_HE8SS2 = 232,
+ WL_RATE_2X3_HE9SS2 = 233,
+ WL_RATE_2X3_HE10SS2 = 234,
+ WL_RATE_2X3_HE11SS2 = 235,
+
+ /* 3 Streams */
+ WL_RATE_3X3_SDM_MCS16 = 236,
+ WL_RATE_3X3_SDM_MCS17 = 237,
+ WL_RATE_3X3_SDM_MCS18 = 238,
+ WL_RATE_3X3_SDM_MCS19 = 239,
+ WL_RATE_3X3_SDM_MCS20 = 240,
+ WL_RATE_3X3_SDM_MCS21 = 241,
+ WL_RATE_3X3_SDM_MCS22 = 242,
+ WL_RATE_3X3_SDM_MCS23 = 243,
+ WL_RATE_P_3X3_SDM_MCS101 = 244,
+ WL_RATE_P_3X3_SDM_MCS102 = 245,
+
+ WL_RATE_3X3_VHT0SS3 = 236,
+ WL_RATE_3X3_VHT1SS3 = 237,
+ WL_RATE_3X3_VHT2SS3 = 238,
+ WL_RATE_3X3_VHT3SS3 = 239,
+ WL_RATE_3X3_VHT4SS3 = 240,
+ WL_RATE_3X3_VHT5SS3 = 241,
+ WL_RATE_3X3_VHT6SS3 = 242,
+ WL_RATE_3X3_VHT7SS3 = 243,
+ WL_RATE_3X3_VHT8SS3 = 244,
+ WL_RATE_3X3_VHT9SS3 = 245,
+ WL_RATE_P_3X3_VHT10SS3 = 246,
+ WL_RATE_P_3X3_VHT11SS3 = 247,
+
+ WL_RATE_3X3_HE0SS3 = 248,
+ WL_RATE_3X3_HE1SS3 = 249,
+ WL_RATE_3X3_HE2SS3 = 250,
+ WL_RATE_3X3_HE3SS3 = 251,
+ WL_RATE_3X3_HE4SS3 = 252,
+ WL_RATE_3X3_HE5SS3 = 253,
+ WL_RATE_3X3_HE6SS3 = 254,
+ WL_RATE_3X3_HE7SS3 = 255,
+ WL_RATE_3X3_HE8SS3 = 256,
+ WL_RATE_3X3_HE9SS3 = 257,
+ WL_RATE_3X3_HE10SS3 = 258,
+ WL_RATE_3X3_HE11SS3 = 259,
+
+ /****************************
+ * TX Beamforming, 3 chains *
+ ****************************
+ */
+
+ /* 1 Stream expanded + 2 */
+ WL_RATE_1X3_TXBF_OFDM_6 = 260,
+ WL_RATE_1X3_TXBF_OFDM_9 = 261,
+ WL_RATE_1X3_TXBF_OFDM_12 = 262,
+ WL_RATE_1X3_TXBF_OFDM_18 = 263,
+ WL_RATE_1X3_TXBF_OFDM_24 = 264,
+ WL_RATE_1X3_TXBF_OFDM_36 = 265,
+ WL_RATE_1X3_TXBF_OFDM_48 = 266,
+ WL_RATE_1X3_TXBF_OFDM_54 = 267,
+
+ WL_RATE_1X3_TXBF_MCS0 = 268,
+ WL_RATE_1X3_TXBF_MCS1 = 269,
+ WL_RATE_1X3_TXBF_MCS2 = 270,
+ WL_RATE_1X3_TXBF_MCS3 = 271,
+ WL_RATE_1X3_TXBF_MCS4 = 272,
+ WL_RATE_1X3_TXBF_MCS5 = 273,
+ WL_RATE_1X3_TXBF_MCS6 = 274,
+ WL_RATE_1X3_TXBF_MCS7 = 275,
+ WL_RATE_P_1X3_TXBF_MCS87 = 276,
+ WL_RATE_P_1X3_TXBF_MCS88 = 277,
+
+ WL_RATE_1X3_TXBF_VHT0SS1 = 268,
+ WL_RATE_1X3_TXBF_VHT1SS1 = 269,
+ WL_RATE_1X3_TXBF_VHT2SS1 = 270,
+ WL_RATE_1X3_TXBF_VHT3SS1 = 271,
+ WL_RATE_1X3_TXBF_VHT4SS1 = 272,
+ WL_RATE_1X3_TXBF_VHT5SS1 = 273,
+ WL_RATE_1X3_TXBF_VHT6SS1 = 274,
+ WL_RATE_1X3_TXBF_VHT7SS1 = 275,
+ WL_RATE_1X3_TXBF_VHT8SS1 = 276,
+ WL_RATE_1X3_TXBF_VHT9SS1 = 277,
+ WL_RATE_P_1X3_TXBF_VHT10SS1 = 278,
+ WL_RATE_P_1X3_TXBF_VHT11SS1 = 279,
+
+ WL_RATE_1X3_TXBF_HE0SS1 = 280,
+ WL_RATE_1X3_TXBF_HE1SS1 = 281,
+ WL_RATE_1X3_TXBF_HE2SS1 = 282,
+ WL_RATE_1X3_TXBF_HE3SS1 = 283,
+ WL_RATE_1X3_TXBF_HE4SS1 = 284,
+ WL_RATE_1X3_TXBF_HE5SS1 = 285,
+ WL_RATE_1X3_TXBF_HE6SS1 = 286,
+ WL_RATE_1X3_TXBF_HE7SS1 = 287,
+ WL_RATE_1X3_TXBF_HE8SS1 = 288,
+ WL_RATE_1X3_TXBF_HE9SS1 = 289,
+ WL_RATE_1X3_TXBF_HE10SS1 = 290,
+ WL_RATE_1X3_TXBF_HE11SS1 = 291,
+
+ /* 2 Streams expanded + 1 */
+ WL_RATE_2X3_TXBF_SDM_MCS8 = 292,
+ WL_RATE_2X3_TXBF_SDM_MCS9 = 293,
+ WL_RATE_2X3_TXBF_SDM_MCS10 = 294,
+ WL_RATE_2X3_TXBF_SDM_MCS11 = 295,
+ WL_RATE_2X3_TXBF_SDM_MCS12 = 296,
+ WL_RATE_2X3_TXBF_SDM_MCS13 = 297,
+ WL_RATE_2X3_TXBF_SDM_MCS14 = 298,
+ WL_RATE_2X3_TXBF_SDM_MCS15 = 299,
+ WL_RATE_P_2X3_TXBF_SDM_MCS99 = 300,
+ WL_RATE_P_2X3_TXBF_SDM_MCS100 = 301,
+
+ WL_RATE_2X3_TXBF_VHT0SS2 = 292,
+ WL_RATE_2X3_TXBF_VHT1SS2 = 293,
+ WL_RATE_2X3_TXBF_VHT2SS2 = 294,
+ WL_RATE_2X3_TXBF_VHT3SS2 = 295,
+ WL_RATE_2X3_TXBF_VHT4SS2 = 296,
+ WL_RATE_2X3_TXBF_VHT5SS2 = 297,
+ WL_RATE_2X3_TXBF_VHT6SS2 = 298,
+ WL_RATE_2X3_TXBF_VHT7SS2 = 299,
+ WL_RATE_2X3_TXBF_VHT8SS2 = 300,
+ WL_RATE_2X3_TXBF_VHT9SS2 = 301,
+ WL_RATE_P_2X3_TXBF_VHT10SS2 = 302,
+ WL_RATE_P_2X3_TXBF_VHT11SS2 = 303,
+
+ WL_RATE_2X3_TXBF_HE0SS2 = 304,
+ WL_RATE_2X3_TXBF_HE1SS2 = 305,
+ WL_RATE_2X3_TXBF_HE2SS2 = 306,
+ WL_RATE_2X3_TXBF_HE3SS2 = 307,
+ WL_RATE_2X3_TXBF_HE4SS2 = 308,
+ WL_RATE_2X3_TXBF_HE5SS2 = 309,
+ WL_RATE_2X3_TXBF_HE6SS2 = 310,
+ WL_RATE_2X3_TXBF_HE7SS2 = 311,
+ WL_RATE_2X3_TXBF_HE8SS2 = 312,
+ WL_RATE_2X3_TXBF_HE9SS2 = 313,
+ WL_RATE_2X3_TXBF_HE10SS2 = 314,
+ WL_RATE_2X3_TXBF_HE11SS2 = 315,
+
+ /* 3 Streams */
+ WL_RATE_3X3_TXBF_SDM_MCS16 = 316,
+ WL_RATE_3X3_TXBF_SDM_MCS17 = 317,
+ WL_RATE_3X3_TXBF_SDM_MCS18 = 318,
+ WL_RATE_3X3_TXBF_SDM_MCS19 = 319,
+ WL_RATE_3X3_TXBF_SDM_MCS20 = 320,
+ WL_RATE_3X3_TXBF_SDM_MCS21 = 321,
+ WL_RATE_3X3_TXBF_SDM_MCS22 = 322,
+ WL_RATE_3X3_TXBF_SDM_MCS23 = 323,
+ WL_RATE_P_3X3_TXBF_SDM_MCS101 = 324,
+ WL_RATE_P_3X3_TXBF_SDM_MCS102 = 325,
+
+ WL_RATE_3X3_TXBF_VHT0SS3 = 316,
+ WL_RATE_3X3_TXBF_VHT1SS3 = 317,
+ WL_RATE_3X3_TXBF_VHT2SS3 = 318,
+ WL_RATE_3X3_TXBF_VHT3SS3 = 319,
+ WL_RATE_3X3_TXBF_VHT4SS3 = 320,
+ WL_RATE_3X3_TXBF_VHT5SS3 = 321,
+ WL_RATE_3X3_TXBF_VHT6SS3 = 322,
+ WL_RATE_3X3_TXBF_VHT7SS3 = 323,
+ WL_RATE_3X3_TXBF_VHT8SS3 = 324,
+ WL_RATE_3X3_TXBF_VHT9SS3 = 325,
+ WL_RATE_P_3X3_TXBF_VHT10SS3 = 326,
+ WL_RATE_P_3X3_TXBF_VHT11SS3 = 327,
+
+ WL_RATE_3X3_TXBF_HE0SS3 = 328,
+ WL_RATE_3X3_TXBF_HE1SS3 = 329,
+ WL_RATE_3X3_TXBF_HE2SS3 = 330,
+ WL_RATE_3X3_TXBF_HE3SS3 = 331,
+ WL_RATE_3X3_TXBF_HE4SS3 = 332,
+ WL_RATE_3X3_TXBF_HE5SS3 = 333,
+ WL_RATE_3X3_TXBF_HE6SS3 = 334,
+ WL_RATE_3X3_TXBF_HE7SS3 = 335,
+ WL_RATE_3X3_TXBF_HE8SS3 = 336,
+ WL_RATE_3X3_TXBF_HE9SS3 = 337,
+ WL_RATE_3X3_TXBF_HE10SS3 = 338,
+ WL_RATE_3X3_TXBF_HE11SS3 = 339,
+
+ /************
+ * 4 chains *
+ ************
+ */
+
+ /* 1 Stream expanded + 3 */
+ WL_RATE_1X4_DSSS_1 = 340,
+ WL_RATE_1X4_DSSS_2 = 341,
+ WL_RATE_1X4_DSSS_5_5 = 342,
+ WL_RATE_1X4_DSSS_11 = 343,
+
+ WL_RATE_1X4_CDD_OFDM_6 = 344,
+ WL_RATE_1X4_CDD_OFDM_9 = 345,
+ WL_RATE_1X4_CDD_OFDM_12 = 346,
+ WL_RATE_1X4_CDD_OFDM_18 = 347,
+ WL_RATE_1X4_CDD_OFDM_24 = 348,
+ WL_RATE_1X4_CDD_OFDM_36 = 349,
+ WL_RATE_1X4_CDD_OFDM_48 = 350,
+ WL_RATE_1X4_CDD_OFDM_54 = 351,
+
+ WL_RATE_1X4_CDD_MCS0 = 352,
+ WL_RATE_1X4_CDD_MCS1 = 353,
+ WL_RATE_1X4_CDD_MCS2 = 354,
+ WL_RATE_1X4_CDD_MCS3 = 355,
+ WL_RATE_1X4_CDD_MCS4 = 356,
+ WL_RATE_1X4_CDD_MCS5 = 357,
+ WL_RATE_1X4_CDD_MCS6 = 358,
+ WL_RATE_1X4_CDD_MCS7 = 359,
+ WL_RATE_P_1X4_CDD_MCS87 = 360,
+ WL_RATE_P_1X4_CDD_MCS88 = 361,
+
+ WL_RATE_1X4_VHT0SS1 = 352,
+ WL_RATE_1X4_VHT1SS1 = 353,
+ WL_RATE_1X4_VHT2SS1 = 354,
+ WL_RATE_1X4_VHT3SS1 = 355,
+ WL_RATE_1X4_VHT4SS1 = 356,
+ WL_RATE_1X4_VHT5SS1 = 357,
+ WL_RATE_1X4_VHT6SS1 = 358,
+ WL_RATE_1X4_VHT7SS1 = 359,
+ WL_RATE_1X4_VHT8SS1 = 360,
+ WL_RATE_1X4_VHT9SS1 = 361,
+ WL_RATE_P_1X4_VHT10SS1 = 362,
+ WL_RATE_P_1X4_VHT11SS1 = 363,
+
+ WL_RATE_1X4_HE0SS1 = 364,
+ WL_RATE_1X4_HE1SS1 = 365,
+ WL_RATE_1X4_HE2SS1 = 366,
+ WL_RATE_1X4_HE3SS1 = 367,
+ WL_RATE_1X4_HE4SS1 = 368,
+ WL_RATE_1X4_HE5SS1 = 369,
+ WL_RATE_1X4_HE6SS1 = 370,
+ WL_RATE_1X4_HE7SS1 = 371,
+ WL_RATE_1X4_HE8SS1 = 372,
+ WL_RATE_1X4_HE9SS1 = 373,
+ WL_RATE_1X4_HE10SS1 = 374,
+ WL_RATE_1X4_HE11SS1 = 375,
+
+ /* 2 Streams expanded + 2 */
+ WL_RATE_2X4_STBC_MCS0 = 376,
+ WL_RATE_2X4_STBC_MCS1 = 377,
+ WL_RATE_2X4_STBC_MCS2 = 378,
+ WL_RATE_2X4_STBC_MCS3 = 379,
+ WL_RATE_2X4_STBC_MCS4 = 380,
+ WL_RATE_2X4_STBC_MCS5 = 381,
+ WL_RATE_2X4_STBC_MCS6 = 382,
+ WL_RATE_2X4_STBC_MCS7 = 383,
+ WL_RATE_P_2X4_STBC_MCS87 = 384,
+ WL_RATE_P_2X4_STBC_MCS88 = 385,
+
+ WL_RATE_2X4_STBC_VHT0SS1 = 376,
+ WL_RATE_2X4_STBC_VHT1SS1 = 377,
+ WL_RATE_2X4_STBC_VHT2SS1 = 378,
+ WL_RATE_2X4_STBC_VHT3SS1 = 379,
+ WL_RATE_2X4_STBC_VHT4SS1 = 380,
+ WL_RATE_2X4_STBC_VHT5SS1 = 381,
+ WL_RATE_2X4_STBC_VHT6SS1 = 382,
+ WL_RATE_2X4_STBC_VHT7SS1 = 383,
+ WL_RATE_2X4_STBC_VHT8SS1 = 384,
+ WL_RATE_2X4_STBC_VHT9SS1 = 385,
+ WL_RATE_P_2X4_STBC_VHT10SS1 = 386,
+ WL_RATE_P_2X4_STBC_VHT11SS1 = 387,
+
+ WL_RATE_2X4_SDM_MCS8 = 388,
+ WL_RATE_2X4_SDM_MCS9 = 389,
+ WL_RATE_2X4_SDM_MCS10 = 390,
+ WL_RATE_2X4_SDM_MCS11 = 391,
+ WL_RATE_2X4_SDM_MCS12 = 392,
+ WL_RATE_2X4_SDM_MCS13 = 393,
+ WL_RATE_2X4_SDM_MCS14 = 394,
+ WL_RATE_2X4_SDM_MCS15 = 395,
+ WL_RATE_P_2X4_SDM_MCS99 = 396,
+ WL_RATE_P_2X4_SDM_MCS100 = 397,
+
+ WL_RATE_2X4_VHT0SS2 = 388,
+ WL_RATE_2X4_VHT1SS2 = 389,
+ WL_RATE_2X4_VHT2SS2 = 390,
+ WL_RATE_2X4_VHT3SS2 = 391,
+ WL_RATE_2X4_VHT4SS2 = 392,
+ WL_RATE_2X4_VHT5SS2 = 393,
+ WL_RATE_2X4_VHT6SS2 = 394,
+ WL_RATE_2X4_VHT7SS2 = 395,
+ WL_RATE_2X4_VHT8SS2 = 396,
+ WL_RATE_2X4_VHT9SS2 = 397,
+ WL_RATE_P_2X4_VHT10SS2 = 398,
+ WL_RATE_P_2X4_VHT11SS2 = 399,
+
+ WL_RATE_2X4_HE0SS2 = 400,
+ WL_RATE_2X4_HE1SS2 = 401,
+ WL_RATE_2X4_HE2SS2 = 402,
+ WL_RATE_2X4_HE3SS2 = 403,
+ WL_RATE_2X4_HE4SS2 = 404,
+ WL_RATE_2X4_HE5SS2 = 405,
+ WL_RATE_2X4_HE6SS2 = 406,
+ WL_RATE_2X4_HE7SS2 = 407,
+ WL_RATE_2X4_HE8SS2 = 408,
+ WL_RATE_2X4_HE9SS2 = 409,
+ WL_RATE_2X4_HE10SS2 = 410,
+ WL_RATE_2X4_HE11SS2 = 411,
+
+ /* 3 Streams expanded + 1 */
+ WL_RATE_3X4_SDM_MCS16 = 412,
+ WL_RATE_3X4_SDM_MCS17 = 413,
+ WL_RATE_3X4_SDM_MCS18 = 414,
+ WL_RATE_3X4_SDM_MCS19 = 415,
+ WL_RATE_3X4_SDM_MCS20 = 416,
+ WL_RATE_3X4_SDM_MCS21 = 417,
+ WL_RATE_3X4_SDM_MCS22 = 418,
+ WL_RATE_3X4_SDM_MCS23 = 419,
+ WL_RATE_P_3X4_SDM_MCS101 = 420,
+ WL_RATE_P_3X4_SDM_MCS102 = 421,
+
+ WL_RATE_3X4_VHT0SS3 = 412,
+ WL_RATE_3X4_VHT1SS3 = 413,
+ WL_RATE_3X4_VHT2SS3 = 414,
+ WL_RATE_3X4_VHT3SS3 = 415,
+ WL_RATE_3X4_VHT4SS3 = 416,
+ WL_RATE_3X4_VHT5SS3 = 417,
+ WL_RATE_3X4_VHT6SS3 = 418,
+ WL_RATE_3X4_VHT7SS3 = 419,
+ WL_RATE_3X4_VHT8SS3 = 420,
+ WL_RATE_3X4_VHT9SS3 = 421,
+ WL_RATE_P_3X4_VHT10SS3 = 422,
+ WL_RATE_P_3X4_VHT11SS3 = 423,
+
+ WL_RATE_3X4_HE0SS3 = 424,
+ WL_RATE_3X4_HE1SS3 = 425,
+ WL_RATE_3X4_HE2SS3 = 426,
+ WL_RATE_3X4_HE3SS3 = 427,
+ WL_RATE_3X4_HE4SS3 = 428,
+ WL_RATE_3X4_HE5SS3 = 429,
+ WL_RATE_3X4_HE6SS3 = 430,
+ WL_RATE_3X4_HE7SS3 = 431,
+ WL_RATE_3X4_HE8SS3 = 432,
+ WL_RATE_3X4_HE9SS3 = 433,
+ WL_RATE_3X4_HE10SS3 = 434,
+ WL_RATE_3X4_HE11SS3 = 435,
+
+ /* 4 Streams */
+ WL_RATE_4X4_SDM_MCS24 = 436,
+ WL_RATE_4X4_SDM_MCS25 = 437,
+ WL_RATE_4X4_SDM_MCS26 = 438,
+ WL_RATE_4X4_SDM_MCS27 = 439,
+ WL_RATE_4X4_SDM_MCS28 = 440,
+ WL_RATE_4X4_SDM_MCS29 = 441,
+ WL_RATE_4X4_SDM_MCS30 = 442,
+ WL_RATE_4X4_SDM_MCS31 = 443,
+ WL_RATE_P_4X4_SDM_MCS103 = 444,
+ WL_RATE_P_4X4_SDM_MCS104 = 445,
+
+ WL_RATE_4X4_VHT0SS4 = 436,
+ WL_RATE_4X4_VHT1SS4 = 437,
+ WL_RATE_4X4_VHT2SS4 = 438,
+ WL_RATE_4X4_VHT3SS4 = 439,
+ WL_RATE_4X4_VHT4SS4 = 440,
+ WL_RATE_4X4_VHT5SS4 = 441,
+ WL_RATE_4X4_VHT6SS4 = 442,
+ WL_RATE_4X4_VHT7SS4 = 443,
+ WL_RATE_4X4_VHT8SS4 = 444,
+ WL_RATE_4X4_VHT9SS4 = 445,
+ WL_RATE_P_4X4_VHT10SS4 = 446,
+ WL_RATE_P_4X4_VHT11SS4 = 447,
+
+ WL_RATE_4X4_HE0SS4 = 448,
+ WL_RATE_4X4_HE1SS4 = 449,
+ WL_RATE_4X4_HE2SS4 = 450,
+ WL_RATE_4X4_HE3SS4 = 451,
+ WL_RATE_4X4_HE4SS4 = 452,
+ WL_RATE_4X4_HE5SS4 = 453,
+ WL_RATE_4X4_HE6SS4 = 454,
+ WL_RATE_4X4_HE7SS4 = 455,
+ WL_RATE_4X4_HE8SS4 = 456,
+ WL_RATE_4X4_HE9SS4 = 457,
+ WL_RATE_4X4_HE10SS4 = 458,
+ WL_RATE_4X4_HE11SS4 = 459,
+
+ /****************************
+ * TX Beamforming, 4 chains *
+ ****************************
+ */
+
+ /* 1 Stream expanded + 3 */
+ WL_RATE_1X4_TXBF_OFDM_6 = 460,
+ WL_RATE_1X4_TXBF_OFDM_9 = 461,
+ WL_RATE_1X4_TXBF_OFDM_12 = 462,
+ WL_RATE_1X4_TXBF_OFDM_18 = 463,
+ WL_RATE_1X4_TXBF_OFDM_24 = 464,
+ WL_RATE_1X4_TXBF_OFDM_36 = 465,
+ WL_RATE_1X4_TXBF_OFDM_48 = 466,
+ WL_RATE_1X4_TXBF_OFDM_54 = 467,
+
+ WL_RATE_1X4_TXBF_MCS0 = 468,
+ WL_RATE_1X4_TXBF_MCS1 = 469,
+ WL_RATE_1X4_TXBF_MCS2 = 470,
+ WL_RATE_1X4_TXBF_MCS3 = 471,
+ WL_RATE_1X4_TXBF_MCS4 = 472,
+ WL_RATE_1X4_TXBF_MCS5 = 473,
+ WL_RATE_1X4_TXBF_MCS6 = 474,
+ WL_RATE_1X4_TXBF_MCS7 = 475,
+ WL_RATE_P_1X4_TXBF_MCS87 = 476,
+ WL_RATE_P_1X4_TXBF_MCS88 = 477,
+
+ WL_RATE_1X4_TXBF_VHT0SS1 = 468,
+ WL_RATE_1X4_TXBF_VHT1SS1 = 469,
+ WL_RATE_1X4_TXBF_VHT2SS1 = 470,
+ WL_RATE_1X4_TXBF_VHT3SS1 = 471,
+ WL_RATE_1X4_TXBF_VHT4SS1 = 472,
+ WL_RATE_1X4_TXBF_VHT5SS1 = 473,
+ WL_RATE_1X4_TXBF_VHT6SS1 = 474,
+ WL_RATE_1X4_TXBF_VHT7SS1 = 475,
+ WL_RATE_1X4_TXBF_VHT8SS1 = 476,
+ WL_RATE_1X4_TXBF_VHT9SS1 = 477,
+ WL_RATE_P_1X4_TXBF_VHT10SS1 = 478,
+ WL_RATE_P_1X4_TXBF_VHT11SS1 = 479,
+
+ WL_RATE_1X4_TXBF_HE0SS1 = 480,
+ WL_RATE_1X4_TXBF_HE1SS1 = 481,
+ WL_RATE_1X4_TXBF_HE2SS1 = 482,
+ WL_RATE_1X4_TXBF_HE3SS1 = 483,
+ WL_RATE_1X4_TXBF_HE4SS1 = 484,
+ WL_RATE_1X4_TXBF_HE5SS1 = 485,
+ WL_RATE_1X4_TXBF_HE6SS1 = 486,
+ WL_RATE_1X4_TXBF_HE7SS1 = 487,
+ WL_RATE_1X4_TXBF_HE8SS1 = 488,
+ WL_RATE_1X4_TXBF_HE9SS1 = 489,
+ WL_RATE_1X4_TXBF_HE10SS1 = 490,
+ WL_RATE_1X4_TXBF_HE11SS1 = 491,
+
+ /* 2 Streams expanded + 2 */
+ WL_RATE_2X4_TXBF_SDM_MCS8 = 492,
+ WL_RATE_2X4_TXBF_SDM_MCS9 = 493,
+ WL_RATE_2X4_TXBF_SDM_MCS10 = 494,
+ WL_RATE_2X4_TXBF_SDM_MCS11 = 495,
+ WL_RATE_2X4_TXBF_SDM_MCS12 = 496,
+ WL_RATE_2X4_TXBF_SDM_MCS13 = 497,
+ WL_RATE_2X4_TXBF_SDM_MCS14 = 498,
+ WL_RATE_2X4_TXBF_SDM_MCS15 = 499,
+ WL_RATE_P_2X4_TXBF_SDM_MCS99 = 500,
+ WL_RATE_P_2X4_TXBF_SDM_MCS100 = 501,
+
+ WL_RATE_2X4_TXBF_VHT0SS2 = 492,
+ WL_RATE_2X4_TXBF_VHT1SS2 = 493,
+ WL_RATE_2X4_TXBF_VHT2SS2 = 494,
+ WL_RATE_2X4_TXBF_VHT3SS2 = 495,
+ WL_RATE_2X4_TXBF_VHT4SS2 = 496,
+ WL_RATE_2X4_TXBF_VHT5SS2 = 497,
+ WL_RATE_2X4_TXBF_VHT6SS2 = 498,
+ WL_RATE_2X4_TXBF_VHT7SS2 = 499,
+ WL_RATE_2X4_TXBF_VHT8SS2 = 500,
+ WL_RATE_2X4_TXBF_VHT9SS2 = 501,
+ WL_RATE_P_2X4_TXBF_VHT10SS2 = 502,
+ WL_RATE_P_2X4_TXBF_VHT11SS2 = 503,
+
+ WL_RATE_2X4_TXBF_HE0SS2 = 504,
+ WL_RATE_2X4_TXBF_HE1SS2 = 505,
+ WL_RATE_2X4_TXBF_HE2SS2 = 506,
+ WL_RATE_2X4_TXBF_HE3SS2 = 507,
+ WL_RATE_2X4_TXBF_HE4SS2 = 508,
+ WL_RATE_2X4_TXBF_HE5SS2 = 509,
+ WL_RATE_2X4_TXBF_HE6SS2 = 510,
+ WL_RATE_2X4_TXBF_HE7SS2 = 511,
+ WL_RATE_2X4_TXBF_HE8SS2 = 512,
+ WL_RATE_2X4_TXBF_HE9SS2 = 513,
+ WL_RATE_2X4_TXBF_HE10SS2 = 514,
+ WL_RATE_2X4_TXBF_HE11SS2 = 515,
+
+ /* 3 Streams expanded + 1 */
+ WL_RATE_3X4_TXBF_SDM_MCS16 = 516,
+ WL_RATE_3X4_TXBF_SDM_MCS17 = 517,
+ WL_RATE_3X4_TXBF_SDM_MCS18 = 518,
+ WL_RATE_3X4_TXBF_SDM_MCS19 = 519,
+ WL_RATE_3X4_TXBF_SDM_MCS20 = 520,
+ WL_RATE_3X4_TXBF_SDM_MCS21 = 521,
+ WL_RATE_3X4_TXBF_SDM_MCS22 = 522,
+ WL_RATE_3X4_TXBF_SDM_MCS23 = 523,
+ WL_RATE_P_3X4_TXBF_SDM_MCS101 = 524,
+ WL_RATE_P_3X4_TXBF_SDM_MCS102 = 525,
+
+ WL_RATE_3X4_TXBF_VHT0SS3 = 516,
+ WL_RATE_3X4_TXBF_VHT1SS3 = 517,
+ WL_RATE_3X4_TXBF_VHT2SS3 = 518,
+ WL_RATE_3X4_TXBF_VHT3SS3 = 519,
+ WL_RATE_3X4_TXBF_VHT4SS3 = 520,
+ WL_RATE_3X4_TXBF_VHT5SS3 = 521,
+ WL_RATE_3X4_TXBF_VHT6SS3 = 522,
+ WL_RATE_3X4_TXBF_VHT7SS3 = 523,
+ WL_RATE_P_3X4_TXBF_VHT8SS3 = 524,
+ WL_RATE_P_3X4_TXBF_VHT9SS3 = 525,
+ WL_RATE_P_3X4_TXBF_VHT10SS3 = 526,
+ WL_RATE_P_3X4_TXBF_VHT11SS3 = 527,
+
+ WL_RATE_3X4_TXBF_HE0SS3 = 528,
+ WL_RATE_3X4_TXBF_HE1SS3 = 529,
+ WL_RATE_3X4_TXBF_HE2SS3 = 530,
+ WL_RATE_3X4_TXBF_HE3SS3 = 531,
+ WL_RATE_3X4_TXBF_HE4SS3 = 532,
+ WL_RATE_3X4_TXBF_HE5SS3 = 533,
+ WL_RATE_3X4_TXBF_HE6SS3 = 534,
+ WL_RATE_3X4_TXBF_HE7SS3 = 535,
+ WL_RATE_3X4_TXBF_HE8SS3 = 536,
+ WL_RATE_3X4_TXBF_HE9SS3 = 537,
+ WL_RATE_3X4_TXBF_HE10SS3 = 538,
+ WL_RATE_3X4_TXBF_HE11SS3 = 539,
+
+ /* 4 Streams */
+ WL_RATE_4X4_TXBF_SDM_MCS24 = 540,
+ WL_RATE_4X4_TXBF_SDM_MCS25 = 541,
+ WL_RATE_4X4_TXBF_SDM_MCS26 = 542,
+ WL_RATE_4X4_TXBF_SDM_MCS27 = 543,
+ WL_RATE_4X4_TXBF_SDM_MCS28 = 544,
+ WL_RATE_4X4_TXBF_SDM_MCS29 = 545,
+ WL_RATE_4X4_TXBF_SDM_MCS30 = 546,
+ WL_RATE_4X4_TXBF_SDM_MCS31 = 547,
+ WL_RATE_P_4X4_TXBF_SDM_MCS103 = 548,
+ WL_RATE_P_4X4_TXBF_SDM_MCS104 = 549,
+
+ WL_RATE_4X4_TXBF_VHT0SS4 = 540,
+ WL_RATE_4X4_TXBF_VHT1SS4 = 541,
+ WL_RATE_4X4_TXBF_VHT2SS4 = 542,
+ WL_RATE_4X4_TXBF_VHT3SS4 = 543,
+ WL_RATE_4X4_TXBF_VHT4SS4 = 544,
+ WL_RATE_4X4_TXBF_VHT5SS4 = 545,
+ WL_RATE_4X4_TXBF_VHT6SS4 = 546,
+ WL_RATE_4X4_TXBF_VHT7SS4 = 547,
+ WL_RATE_P_4X4_TXBF_VHT8SS4 = 548,
+ WL_RATE_P_4X4_TXBF_VHT9SS4 = 549,
+ WL_RATE_P_4X4_TXBF_VHT10SS4 = 550,
+ WL_RATE_P_4X4_TXBF_VHT11SS4 = 551,
+
+ WL_RATE_4X4_TXBF_HE0SS4 = 552,
+ WL_RATE_4X4_TXBF_HE1SS4 = 553,
+ WL_RATE_4X4_TXBF_HE2SS4 = 554,
+ WL_RATE_4X4_TXBF_HE3SS4 = 555,
+ WL_RATE_4X4_TXBF_HE4SS4 = 556,
+ WL_RATE_4X4_TXBF_HE5SS4 = 557,
+ WL_RATE_4X4_TXBF_HE6SS4 = 558,
+ WL_RATE_4X4_TXBF_HE7SS4 = 559,
+ WL_RATE_4X4_TXBF_HE8SS4 = 560,
+ WL_RATE_4X4_TXBF_HE9SS4 = 561,
+ WL_RATE_4X4_TXBF_HE10SS4 = 562,
+ WL_RATE_4X4_TXBF_HE11SS4 = 563
+} clm_rates_t;
+
+/* Number of rate codes */
+#define WL_NUMRATES 564
+
+/* This enum maps 802.11ax OFDMA (RU) 'rates' to a CLM index */
+
+typedef enum clm_ru_rates {
+ /* RU26 OFDMA UL rates */
+ WL_RU_RATE_1X1_26SS1 = 0,
+ WL_RU_RATE_1X2_26SS1 = 1,
+ WL_RU_RATE_2X2_26SS2 = 2,
+ WL_RU_RATE_1X2_TXBF_26SS1 = 3,
+ WL_RU_RATE_2X2_TXBF_26SS2 = 4,
+ WL_RU_RATE_1X3_26SS1 = 5,
+ WL_RU_RATE_2X3_26SS2 = 6,
+ WL_RU_RATE_3X3_26SS3 = 7,
+ WL_RU_RATE_1X3_TXBF_26SS1 = 8,
+ WL_RU_RATE_2X3_TXBF_26SS2 = 9,
+ WL_RU_RATE_3X3_TXBF_26SS3 = 10,
+ WL_RU_RATE_1X4_26SS1 = 11,
+ WL_RU_RATE_2X4_26SS2 = 12,
+ WL_RU_RATE_3X4_26SS3 = 13,
+ WL_RU_RATE_4X4_26SS4 = 14,
+ WL_RU_RATE_1X4_TXBF_26SS1 = 15,
+ WL_RU_RATE_2X4_TXBF_26SS2 = 16,
+ WL_RU_RATE_3X4_TXBF_26SS3 = 17,
+ WL_RU_RATE_4X4_TXBF_26SS4 = 18,
+
+ /* RU52 OFDMA UL rates */
+ WL_RU_RATE_1X1_52SS1 = 19,
+ WL_RU_RATE_1X2_52SS1 = 20,
+ WL_RU_RATE_2X2_52SS2 = 21,
+ WL_RU_RATE_1X2_TXBF_52SS1 = 22,
+ WL_RU_RATE_2X2_TXBF_52SS2 = 23,
+ WL_RU_RATE_1X3_52SS1 = 24,
+ WL_RU_RATE_2X3_52SS2 = 25,
+ WL_RU_RATE_3X3_52SS3 = 26,
+ WL_RU_RATE_1X3_TXBF_52SS1 = 27,
+ WL_RU_RATE_2X3_TXBF_52SS2 = 28,
+ WL_RU_RATE_3X3_TXBF_52SS3 = 29,
+ WL_RU_RATE_1X4_52SS1 = 30,
+ WL_RU_RATE_2X4_52SS2 = 31,
+ WL_RU_RATE_3X4_52SS3 = 32,
+ WL_RU_RATE_4X4_52SS4 = 33,
+ WL_RU_RATE_1X4_TXBF_52SS1 = 34,
+ WL_RU_RATE_2X4_TXBF_52SS2 = 35,
+ WL_RU_RATE_3X4_TXBF_52SS3 = 36,
+ WL_RU_RATE_4X4_TXBF_52SS4 = 37,
+
+ /* RU106 OFDMA UL rates */
+ WL_RU_RATE_1X1_106SS1 = 38,
+ WL_RU_RATE_1X2_106SS1 = 39,
+ WL_RU_RATE_2X2_106SS2 = 40,
+ WL_RU_RATE_1X2_TXBF_106SS1 = 41,
+ WL_RU_RATE_2X2_TXBF_106SS2 = 42,
+ WL_RU_RATE_1X3_106SS1 = 43,
+ WL_RU_RATE_2X3_106SS2 = 44,
+ WL_RU_RATE_3X3_106SS3 = 45,
+ WL_RU_RATE_1X3_TXBF_106SS1 = 46,
+ WL_RU_RATE_2X3_TXBF_106SS2 = 47,
+ WL_RU_RATE_3X3_TXBF_106SS3 = 48,
+ WL_RU_RATE_1X4_106SS1 = 49,
+ WL_RU_RATE_2X4_106SS2 = 50,
+ WL_RU_RATE_3X4_106SS3 = 51,
+ WL_RU_RATE_4X4_106SS4 = 52,
+ WL_RU_RATE_1X4_TXBF_106SS1 = 53,
+ WL_RU_RATE_2X4_TXBF_106SS2 = 54,
+ WL_RU_RATE_3X4_TXBF_106SS3 = 55,
+ WL_RU_RATE_4X4_TXBF_106SS4 = 56,
+
+ /* Upper Bound OFDMA DL 'rates' */
+ WL_RU_RATE_1X1_UBSS1 = 57,
+ WL_RU_RATE_1X2_UBSS1 = 58,
+ WL_RU_RATE_2X2_UBSS2 = 59,
+ WL_RU_RATE_1X2_TXBF_UBSS1 = 60,
+ WL_RU_RATE_2X2_TXBF_UBSS2 = 61,
+ WL_RU_RATE_1X3_UBSS1 = 62,
+ WL_RU_RATE_2X3_UBSS2 = 63,
+ WL_RU_RATE_3X3_UBSS3 = 64,
+ WL_RU_RATE_1X3_TXBF_UBSS1 = 65,
+ WL_RU_RATE_2X3_TXBF_UBSS2 = 66,
+ WL_RU_RATE_3X3_TXBF_UBSS3 = 67,
+ WL_RU_RATE_1X4_UBSS1 = 68,
+ WL_RU_RATE_2X4_UBSS2 = 69,
+ WL_RU_RATE_3X4_UBSS3 = 70,
+ WL_RU_RATE_4X4_UBSS4 = 71,
+ WL_RU_RATE_1X4_TXBF_UBSS1 = 72,
+ WL_RU_RATE_2X4_TXBF_UBSS2 = 73,
+ WL_RU_RATE_3X4_TXBF_UBSS3 = 74,
+ WL_RU_RATE_4X4_TXBF_UBSS4 = 75,
+
+ /* Less Upper Bound OFDMA DL 'rates' */
+ WL_RU_RATE_1X1_LUBSS1 = 76,
+ WL_RU_RATE_1X2_LUBSS1 = 77,
+ WL_RU_RATE_2X2_LUBSS2 = 78,
+ WL_RU_RATE_1X2_TXBF_LUBSS1 = 79,
+ WL_RU_RATE_2X2_TXBF_LUBSS2 = 80,
+ WL_RU_RATE_1X3_LUBSS1 = 81,
+ WL_RU_RATE_2X3_LUBSS2 = 82,
+ WL_RU_RATE_3X3_LUBSS3 = 83,
+ WL_RU_RATE_1X3_TXBF_LUBSS1 = 84,
+ WL_RU_RATE_2X3_TXBF_LUBSS2 = 85,
+ WL_RU_RATE_3X3_TXBF_LUBSS3 = 86,
+ WL_RU_RATE_1X4_LUBSS1 = 87,
+ WL_RU_RATE_2X4_LUBSS2 = 88,
+ WL_RU_RATE_3X4_LUBSS3 = 89,
+ WL_RU_RATE_4X4_LUBSS4 = 90,
+ WL_RU_RATE_1X4_TXBF_LUBSS1 = 91,
+ WL_RU_RATE_2X4_TXBF_LUBSS2 = 92,
+ WL_RU_RATE_3X4_TXBF_LUBSS3 = 93,
+ WL_RU_RATE_4X4_TXBF_LUBSS4 = 94,
+
+ /* RU242 OFDMA UL rates */
+ WL_RU_RATE_1X1_242SS1 = 95,
+ WL_RU_RATE_1X2_242SS1 = 96,
+ WL_RU_RATE_2X2_242SS2 = 97,
+ WL_RU_RATE_1X2_TXBF_242SS1 = 98,
+ WL_RU_RATE_2X2_TXBF_242SS2 = 99,
+ WL_RU_RATE_1X3_242SS1 = 100,
+ WL_RU_RATE_2X3_242SS2 = 101,
+ WL_RU_RATE_3X3_242SS3 = 102,
+ WL_RU_RATE_1X3_TXBF_242SS1 = 103,
+ WL_RU_RATE_2X3_TXBF_242SS2 = 104,
+ WL_RU_RATE_3X3_TXBF_242SS3 = 105,
+ WL_RU_RATE_1X4_242SS1 = 106,
+ WL_RU_RATE_2X4_242SS2 = 107,
+ WL_RU_RATE_3X4_242SS3 = 108,
+ WL_RU_RATE_4X4_242SS4 = 109,
+ WL_RU_RATE_1X4_TXBF_242SS1 = 110,
+ WL_RU_RATE_2X4_TXBF_242SS2 = 111,
+ WL_RU_RATE_3X4_TXBF_242SS3 = 112,
+ WL_RU_RATE_4X4_TXBF_242SS4 = 113,
+
+ /* RU484 OFDMA UL rates */
+ WL_RU_RATE_1X1_484SS1 = 114,
+ WL_RU_RATE_1X2_484SS1 = 115,
+ WL_RU_RATE_2X2_484SS2 = 116,
+ WL_RU_RATE_1X2_TXBF_484SS1 = 117,
+ WL_RU_RATE_2X2_TXBF_484SS2 = 118,
+ WL_RU_RATE_1X3_484SS1 = 119,
+ WL_RU_RATE_2X3_484SS2 = 120,
+ WL_RU_RATE_3X3_484SS3 = 121,
+ WL_RU_RATE_1X3_TXBF_484SS1 = 122,
+ WL_RU_RATE_2X3_TXBF_484SS2 = 123,
+ WL_RU_RATE_3X3_TXBF_484SS3 = 124,
+ WL_RU_RATE_1X4_484SS1 = 125,
+ WL_RU_RATE_2X4_484SS2 = 126,
+ WL_RU_RATE_3X4_484SS3 = 127,
+ WL_RU_RATE_4X4_484SS4 = 128,
+ WL_RU_RATE_1X4_TXBF_484SS1 = 129,
+ WL_RU_RATE_2X4_TXBF_484SS2 = 130,
+ WL_RU_RATE_3X4_TXBF_484SS3 = 131,
+ WL_RU_RATE_4X4_TXBF_484SS4 = 132,
+
+ /* RU996 OFDMA UL rates */
+ WL_RU_RATE_1X1_996SS1 = 133,
+ WL_RU_RATE_1X2_996SS1 = 134,
+ WL_RU_RATE_2X2_996SS2 = 135,
+ WL_RU_RATE_1X2_TXBF_996SS1 = 136,
+ WL_RU_RATE_2X2_TXBF_996SS2 = 137,
+ WL_RU_RATE_1X3_996SS1 = 138,
+ WL_RU_RATE_2X3_996SS2 = 139,
+ WL_RU_RATE_3X3_996SS3 = 140,
+ WL_RU_RATE_1X3_TXBF_996SS1 = 141,
+ WL_RU_RATE_2X3_TXBF_996SS2 = 142,
+ WL_RU_RATE_3X3_TXBF_996SS3 = 143,
+ WL_RU_RATE_1X4_996SS1 = 144,
+ WL_RU_RATE_2X4_996SS2 = 145,
+ WL_RU_RATE_3X4_996SS3 = 146,
+ WL_RU_RATE_4X4_996SS4 = 147,
+ WL_RU_RATE_1X4_TXBF_996SS1 = 148,
+ WL_RU_RATE_2X4_TXBF_996SS2 = 149,
+ WL_RU_RATE_3X4_TXBF_996SS3 = 150,
+ WL_RU_RATE_4X4_TXBF_996SS4 = 151
+} clm_ru_rates_t;
+
+/* Number of OFDMA rate codes */
+#define WL_RU_NUMRATES 152
+
+/* MCS rates */
+#define WLC_MAX_VHT_MCS 11 /**< Std VHT MCS 0-9 plus prop VHT MCS 10-11 */
+#define WLC_MAX_HE_MCS 11 /**< Std HE MCS 0-11 */
+#define WLC_MAX_EHT_MCS 13 /**< Std EHT MCS 0-13 */
+
+/* Convert encoded rate value in plcp header to numerical rates in 500 KHz increments */
+#define OFDM_PHY2MAC_RATE(rlpt) plcp_ofdm_rate_tbl[(rlpt) & 0x7]
+#define CCK_PHY2MAC_RATE(signal) ((signal)/5)
+
+/* 'proprietary' string should not exist in open source(OEM_ANDROID) */
+/* given a proprietary MCS, get number of spatial streams */
+#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+
+#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) : \
+ ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
+
+#if defined(WLPROPRIETARY_11N_RATES) /* Broadcom proprietary rate support for 11n */
+#define IS_PROPRIETARY_11N_MCS(mcs) \
+ ((mcs) == 87 || (mcs) == 88 || (mcs) == 99 || (mcs) == 100 || (mcs) == 101 || (mcs) == 102)
+#define IS_PROPRIETARY_11N_SS_MCS(mcs) \
+ ((mcs) == 87 || (mcs) == 88)
+#else
+#define IS_PROPRIETARY_11N_MCS(mcs) FALSE
+#define IS_PROPRIETARY_11N_SS_MCS(mcs) FALSE /**< is proprietary HT single stream MCS */
+#endif /* WLPROPRIETARY_11N_RATES */
+
+extern const uint8 plcp_ofdm_rate_tbl[];
+
+uint8 wf_get_single_stream_mcs(uint mcs);
+
+/* extract NSS:MCS portions of the rspec */
+#define WF_NON_HT_MCS 0x80
+uint8 wf_vht_plcp_to_rate(uint8 *plcp);
+uint8 wf_he_plcp_to_rate(uint8 *plcp, bool is_mu);
+uint8 wf_eht_plcp_to_rate(uint8 *plcp, bool is_mu);
+
+/* convert rate from mcs to Kbps */
+uint wf_mcs_to_rate(uint mcs, uint nss, uint bw, int sgi);
+uint wf_he_mcs_to_rate(uint mcs, uint nss, uint bw, uint gi, bool dcm);
+
+uint wf_mcs_to_Ndbps(uint mcs, uint nss, uint bw);
+uint wf_he_mcs_to_Ndbps(uint mcs, uint nss, uint bw, bool dcm);
+uint32 wf_he_mcs_ru_to_ndbps(uint8 mcs, uint8 nss, bool dcm, uint8 ru_index);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* _bcmwifi_rates_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmwifi_rspec.h b/bcmdhd.101.10.361.x/include/bcmwifi_rspec.h
new file mode 100755
index 0000000..a90f3a3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmwifi_rspec.h
@@ -0,0 +1,286 @@
+/*
+ * Common OS-independent driver header for rate management.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _bcmwifi_rspec_h_
+#define _bcmwifi_rspec_h_
+
+#include <typedefs.h>
+
+/**
+ * ===================================================================================
+ * rate spec : holds rate and mode specific information required to generate a tx frame.
+ * Legacy CCK and OFDM information is held in the same manner as was done in the past.
+ * (in the lower byte) the upper 3 bytes primarily hold MIMO specific information
+ * ===================================================================================
+ */
+typedef uint32 ratespec_t;
+
+/* Rate spec. definitions */
+/* for WL_RSPEC_ENCODING field >= WL_RSPEC_ENCODING_HE, backward compatible */
+#define WL_RSPEC_RATE_MASK 0x000000FFu /**< Legacy rate or MCS or MCS + NSS */
+#define WL_RSPEC_TXEXP_MASK 0x00000300u /**< Tx chain expansion beyond Nsts */
+#define WL_RSPEC_TXEXP_SHIFT 8u
+#define WL_RSPEC_HE_GI_MASK 0x00000C00u /* HE GI indices */
+#define WL_RSPEC_HE_GI_SHIFT 10u
+#define WL_RSPEC_ER_MASK 0x0000C000u /**< Range extension mask */
+#define WL_RSPEC_ER_SHIFT 14u
+#define WL_RSPEC_ER_TONE_MASK 0x00004000u /**< Range extension tone config */
+#define WL_RSPEC_ER_TONE_SHIFT 14u
+#define WL_RSPEC_ER_ENAB_MASK 0x00008000u /**< Range extension enable */
+#define WL_RSPEC_ER_ENAB_SHIFT 15u
+#define WL_RSPEC_BW_MASK 0x00070000u /**< Band width */
+#define WL_RSPEC_BW_SHIFT 16u
+#define WL_RSPEC_DCM 0x00080000u /**< Dual Carrier Modulation */
+#define WL_RSPEC_DCM_SHIFT 19u
+#define WL_RSPEC_STBC 0x00100000u /**< STBC expansion, Nsts = 2 * Nss */
+#define WL_RSPEC_TXBF 0x00200000u
+#define WL_RSPEC_LDPC 0x00400000u
+#define WL_RSPEC_SGI 0x00800000u
+#define WL_RSPEC_SHORT_PREAMBLE 0x00800000u /**< DSSS short preable - Encoding 0 */
+#ifdef WL11BE
+#define WL_RSPEC_ENCODING_MASK 0x07000000u /**< Encoding of RSPEC_RATE field */
+#else
+#define WL_RSPEC_ENCODING_MASK 0x03000000u /**< Encoding of RSPEC_RATE field */
+#endif
+#define WL_RSPEC_ENCODING_SHIFT 24u
+#define WL_RSPEC_OVERRIDE_RATE 0x40000000u /**< override rate only */
+#define WL_RSPEC_OVERRIDE_MODE 0x80000000u /**< override both rate & mode */
+
+/* ======== RSPEC_HE_GI|RSPEC_SGI fields for HE ======== */
+
+/* GI for HE */
+#define RSPEC_HE_LTF_GI(rspec) (((rspec) & WL_RSPEC_HE_GI_MASK) >> WL_RSPEC_HE_GI_SHIFT)
+#define WL_RSPEC_HE_1x_LTF_GI_0_8us (0x0u)
+#define WL_RSPEC_HE_2x_LTF_GI_0_8us (0x1u)
+#define WL_RSPEC_HE_2x_LTF_GI_1_6us (0x2u)
+#define WL_RSPEC_HE_4x_LTF_GI_3_2us (0x3u)
+#define RSPEC_ISHEGI(rspec) (RSPEC_HE_LTF_GI(rspec) > WL_RSPEC_HE_1x_LTF_GI_0_8us)
+#define HE_GI_TO_RSPEC(gi) (((ratespec_t)(gi) << WL_RSPEC_HE_GI_SHIFT) & WL_RSPEC_HE_GI_MASK)
+#define HE_GI_TO_RSPEC_SET(rspec, gi) ((rspec & (~WL_RSPEC_HE_GI_MASK)) | \
+ HE_GI_TO_RSPEC(gi))
+
+/* Macros for HE LTF and GI */
+#define HE_IS_1X_LTF(gi) ((gi) == WL_RSPEC_HE_1x_LTF_GI_0_8us)
+#define HE_IS_2X_LTF(gi) (((gi) == WL_RSPEC_HE_2x_LTF_GI_0_8us) || \
+ ((gi) == WL_RSPEC_HE_2x_LTF_GI_1_6us))
+#define HE_IS_4X_LTF(gi) ((gi) == WL_RSPEC_HE_4x_LTF_GI_3_2us)
+
+#define HE_IS_GI_0_8us(gi) (((gi) == WL_RSPEC_HE_1x_LTF_GI_0_8us) || \
+ ((gi) == WL_RSPEC_HE_2x_LTF_GI_0_8us))
+#define HE_IS_GI_1_6us(gi) ((gi) == WL_RSPEC_HE_2x_LTF_GI_1_6us)
+#define HE_IS_GI_3_2us(gi) ((gi) == WL_RSPEC_HE_4x_LTF_GI_3_2us)
+
+/* RSPEC Macros for extracting and using HE-ER and DCM */
+#define RSPEC_HE_DCM(rspec) (((rspec) & WL_RSPEC_DCM) >> WL_RSPEC_DCM_SHIFT)
+#define RSPEC_HE_ER(rspec) (((rspec) & WL_RSPEC_ER_MASK) >> WL_RSPEC_ER_SHIFT)
+#ifdef WL11AX
+#define RSPEC_HE_ER_ENAB(rspec) (((rspec) & WL_RSPEC_ER_ENAB_MASK) >> \
+ WL_RSPEC_ER_ENAB_SHIFT)
+#else
+#define RSPEC_HE_ER_ENAB(rspec) FALSE
+#endif
+#define RSPEC_HE_ER_TONE(rspec) (((rspec) & WL_RSPEC_ER_TONE_MASK) >> \
+ WL_RSPEC_ER_TONE_SHIFT)
+/* ======== RSPEC_RATE field ======== */
+
+/* Encoding 0 - legacy rate */
+/* DSSS, CCK, and OFDM rates in [500kbps] units */
+#define WL_RSPEC_LEGACY_RATE_MASK 0x0000007F
+#define WLC_RATE_1M 2
+#define WLC_RATE_2M 4
+#define WLC_RATE_5M5 11
+#define WLC_RATE_11M 22
+#define WLC_RATE_6M 12
+#define WLC_RATE_9M 18
+#define WLC_RATE_12M 24
+#define WLC_RATE_18M 36
+#define WLC_RATE_24M 48
+#define WLC_RATE_36M 72
+#define WLC_RATE_48M 96
+#define WLC_RATE_54M 108
+
+/* Encoding 1 - HT MCS */
+#define WL_RSPEC_HT_MCS_MASK 0x0000007F /**< HT MCS value mask in rspec */
+
+/* Encoding >= 2 */
+#define WL_RSPEC_NSS_MCS_MASK 0x000000FF /* NSS & MCS values mask in rspec */
+#define WL_RSPEC_MCS_MASK 0x0000000F /* mimo MCS value mask in rspec */
+#define WL_RSPEC_NSS_MASK 0x000000F0 /* mimo NSS value mask in rspec */
+#define WL_RSPEC_NSS_SHIFT 4 /* mimo NSS value shift in rspec */
+
+/* Encoding 2 - VHT MCS + NSS */
+#define WL_RSPEC_VHT_MCS_MASK WL_RSPEC_MCS_MASK /**< VHT MCS value mask in rspec */
+#define WL_RSPEC_VHT_NSS_MASK WL_RSPEC_NSS_MASK /**< VHT Nss value mask in rspec */
+#define WL_RSPEC_VHT_NSS_SHIFT WL_RSPEC_NSS_SHIFT /**< VHT Nss value shift in rspec */
+
+/* Encoding 3 - HE MCS + NSS */
+#define WL_RSPEC_HE_MCS_MASK WL_RSPEC_MCS_MASK /**< HE MCS value mask in rspec */
+#define WL_RSPEC_HE_NSS_MASK WL_RSPEC_NSS_MASK /**< HE Nss value mask in rspec */
+#define WL_RSPEC_HE_NSS_SHIFT WL_RSPEC_NSS_SHIFT /**< HE Nss value shift in rpsec */
+
+/* Encoding 4 - EHT MCS + NSS */
+#define WL_RSPEC_EHT_MCS_MASK WL_RSPEC_MCS_MASK /**< EHT MCS value mask in rspec */
+#define WL_RSPEC_EHT_NSS_MASK WL_RSPEC_NSS_MASK /**< EHT Nss value mask in rspec */
+#define WL_RSPEC_EHT_NSS_SHIFT WL_RSPEC_NSS_SHIFT /**< EHT Nss value shift in rpsec */
+
+/* ======== RSPEC_BW field ======== */
+
+#define WL_RSPEC_BW_UNSPECIFIED 0u
+#define WL_RSPEC_BW_20MHZ 0x00010000u
+#define WL_RSPEC_BW_40MHZ 0x00020000u
+#define WL_RSPEC_BW_80MHZ 0x00030000u
+#define WL_RSPEC_BW_160MHZ 0x00040000u
+#define WL_RSPEC_BW_240MHZ 0x00050000u
+#define WL_RSPEC_BW_320MHZ 0x00060000u
+
+/* ======== RSPEC_ENCODING field ======== */
+
+/* NOTE: Assuming the rate field is always NSS+MCS starting from VHT encoding!
+ * Modify/fix RSPEC_ISNSSMCS() macro if above condition changes any time.
+ */
+#define WL_RSPEC_ENCODE_RATE 0x00000000u /**< Legacy rate is stored in RSPEC_RATE */
+#define WL_RSPEC_ENCODE_HT 0x01000000u /**< HT MCS is stored in RSPEC_RATE */
+#define WL_RSPEC_ENCODE_VHT 0x02000000u /**< VHT MCS and NSS are stored in RSPEC_RATE */
+#define WL_RSPEC_ENCODE_HE 0x03000000u /**< HE MCS and NSS are stored in RSPEC_RATE */
+#define WL_RSPEC_ENCODE_EHT 0x04000000u /**< EHT MCS and NSS are stored in RSPEC_RATE */
+
+/**
+ * ===============================
+ * Handy macros to parse rate spec
+ * ===============================
+ */
+#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK)
+#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ)
+#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ)
+#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ)
+#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ)
+#ifdef WL11BE
+#define RSPEC_IS240MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_240MHZ)
+#define RSPEC_IS320MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_320MHZ)
+#else
+#define RSPEC_IS320MHZ(rspec) (FALSE)
+#define RSPEC_IS240MHZ(rspec) (FALSE)
+#endif /* WL11BE */
+
+#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0)
+#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0)
+#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0)
+#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0)
+
+#define RSPEC_TXEXP(rspec) (((rspec) & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT)
+
+#define RSPEC_ENCODE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >> WL_RSPEC_ENCODING_SHIFT)
+#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE)
+
+#define RSPEC_ISCCK(rspec) (RSPEC_ISLEGACY(rspec) && \
+ (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] > 0)
+#define RSPEC_ISOFDM(rspec) (RSPEC_ISLEGACY(rspec) && \
+ (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] < 0)
+
+#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT)
+#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)
+#ifdef WL11AX
+#define RSPEC_ISHE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HE)
+#else /* WL11AX */
+#define RSPEC_ISHE(rspec) 0
+#endif /* WL11AX */
+#ifdef WL11BE
+#define RSPEC_ISEHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_EHT)
+#else /* WL11BE */
+#define RSPEC_ISEHT(rspec) 0
+#endif /* WL11BE */
+
+/* fast check if rate field is NSS+MCS format (starting from VHT ratespec) */
+#define RSPEC_ISVHTEXT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >= WL_RSPEC_ENCODE_VHT)
+/* fast check if rate field is NSS+MCS format (starting from HE ratespec) */
+#define RSPEC_ISHEEXT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >= WL_RSPEC_ENCODE_HE)
+
+/**
+ * ================================
+ * Handy macros to create rate spec
+ * ================================
+ */
+/* create ratespecs */
+#define LEGACY_RSPEC(rate) (WL_RSPEC_ENCODE_RATE | WL_RSPEC_BW_20MHZ | \
+ ((rate) & WL_RSPEC_LEGACY_RATE_MASK))
+#define CCK_RSPEC(cck) LEGACY_RSPEC(cck)
+#define OFDM_RSPEC(ofdm) LEGACY_RSPEC(ofdm)
+#define HT_RSPEC(mcs) (WL_RSPEC_ENCODE_HT | ((mcs) & WL_RSPEC_HT_MCS_MASK))
+#define VHT_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_VHT | \
+ (((nss) << WL_RSPEC_VHT_NSS_SHIFT) & WL_RSPEC_VHT_NSS_MASK) | \
+ ((mcs) & WL_RSPEC_VHT_MCS_MASK))
+#define HE_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_HE | \
+ (((nss) << WL_RSPEC_HE_NSS_SHIFT) & WL_RSPEC_HE_NSS_MASK) | \
+ ((mcs) & WL_RSPEC_HE_MCS_MASK))
+#define EHT_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_EHT | \
+ (((nss) << WL_RSPEC_EHT_NSS_SHIFT) & WL_RSPEC_EHT_NSS_MASK) | \
+ ((mcs) & WL_RSPEC_EHT_MCS_MASK))
+
+/**
+ * ==================
+ * Other handy macros
+ * ==================
+ */
+/* return rate in unit of Kbps */
+#define RSPEC2KBPS(rspec) wf_rspec_to_rate(rspec)
+
+/* return rate in unit of 500Kbps */
+/* works only for legacy rate */
+#ifdef BCMDBG
+#define RSPEC2RATE(rspec) wf_rspec_to_rate_legacy(rspec)
+#else
+#define RSPEC2RATE(rspec) ((rspec) & WL_RSPEC_LEGACY_RATE_MASK)
+#endif
+
+/**
+ * =================================
+ * Macros to use the rate_info table
+ * =================================
+ */
+/* phy_rate table index is in [500kbps] units */
+#define WLC_MAXRATE 108 /**< in 500kbps units */
+extern const uint8 rate_info[];
+/* phy_rate table value is encoded */
+#define RATE_INFO_OFDM_MASK 0x80 /* ofdm mask */
+#define RATE_INFO_RATE_MASK 0x7f /* rate signal index mask */
+#define RATE_INFO_M_RATE_MASK 0x0f /* M_RATE_TABLE index mask */
+#define RATE_INFO_RATE_ISCCK(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] > 0)
+#define RATE_INFO_RATE_ISOFDM(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] < 0)
+
+/**
+ * ===================
+ * function prototypes
+ * ===================
+ */
+ratespec_t wf_vht_plcp_to_rspec(uint8 *plcp);
+ratespec_t wf_he_plcp_to_rspec(uint8 *plcp);
+ratespec_t wf_eht_plcp_to_rspec(uint8 *plcp);
+ratespec_t wf_ht_plcp_to_rspec(uint8 *plcp);
+
+#ifdef BCMDBG
+uint wf_rspec_to_rate_legacy(ratespec_t rspec);
+#endif
+uint wf_rspec_to_rate(ratespec_t rspec);
+uint wf_rspec_to_rate_rsel(ratespec_t rspec);
+
+#endif /* _bcmwifi_rspec_h_ */
diff --git a/bcmdhd.101.10.361.x/include/bcmwpa.h b/bcmdhd.101.10.361.x/include/bcmwpa.h
new file mode 100755
index 0000000..45d4a9f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/bcmwpa.h
@@ -0,0 +1,634 @@
+/*
+ * bcmwpa.h - interface definitions of shared WPA-related functions
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#ifndef _BCMWPA_H_
+#define _BCMWPA_H_
+#ifdef BCM_EXTERNAL_APP
+typedef int osl_t;
+#endif
+#include <wpa.h>
+#if defined(BCMSUP_PSK) || defined(BCMSUPPL) || \
+ defined(MFP) || defined(BCMAUTH_PSK) || defined(WLFBT) || \
+ defined(WL_OKC) || defined(GTKOE) || defined(WL_FILS)
+#include <eapol.h>
+#endif
+#include <802.11.h>
+#ifdef WLP2P
+#include <p2p.h>
+#endif
+#include <rc4.h>
+#include <bcmutils.h>
+#include <wlioctl.h>
+#include <sha2.h>
+#ifdef WL_OCV
+#include <bcm_ocv.h>
+#endif /* WL_OCV */
+
+/* Field sizes for WPA key hierarchy */
+#define WPA_TEMP_TX_KEY_LEN 8u
+#define WPA_TEMP_RX_KEY_LEN 8u
+
+#define PMK_LEN 32u
+#define TKIP_PTK_LEN 64u
+#define TKIP_TK_LEN 32u
+#define AES_PTK_LEN 48u
+#define AES_TK_LEN 16u
+#define AES_GCM_PTK_LEN 48u
+#define AES_GCM_TK_LEN 16u
+#define AES_GCM256_PTK_LEN 64u
+#define AES_GCM256_TK_LEN 32u
+
+/* limits for pre-shared key lengths */
+#define WPA_MIN_PSK_LEN 8u
+#define WPA_MAX_PSK_LEN 64u
+
+#define WPA_KEY_DATA_LEN_256 256u /* allocation size of 256 for temp data pointer. */
+#define WPA_KEY_DATA_LEN_128 128u /* allocation size of 128 for temp data pointer. */
+
+/* Minimum length of WPA2 GTK encapsulation in EAPOL */
+#define EAPOL_WPA2_GTK_ENCAP_MIN_LEN (EAPOL_WPA2_ENCAP_DATA_HDR_LEN - \
+ TLV_HDR_LEN + EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN)
+
+/* Minimum length of WPA2 IGTK encapsulation in EAPOL */
+#define EAPOL_WPA2_IGTK_ENCAP_MIN_LEN (EAPOL_WPA2_ENCAP_DATA_HDR_LEN - \
+ TLV_HDR_LEN + EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN)
+
+/* Minimum length of BIGTK encapsulation in EAPOL */
+#define EAPOL_WPA2_BIGTK_ENCAP_MIN_LEN (EAPOL_WPA2_ENCAP_DATA_HDR_LEN - \
+ TLV_HDR_LEN + EAPOL_WPA2_KEY_BIGTK_ENCAP_HDR_LEN)
+
+#ifdef WL_OCV
+/* Size of the OCI element */
+#define WPA_OCV_OCI_IE_SIZE \
+ (bcm_ocv_get_oci_len() + BCM_TLV_EXT_HDR_SIZE)
+
+/* Size of the OCI KDE */
+#define WPA_OCV_OCI_KDE_SIZE \
+ (bcm_ocv_get_oci_len() + EAPOL_WPA2_ENCAP_DATA_HDR_LEN)
+
+/* Size of the OCI subelement */
+#define WPA_OCV_OCI_SUBELEM_SIZE \
+ (bcm_ocv_get_oci_len() + TLV_HDR_LEN)
+
+/* Minimum length of WPA2 OCI encapsulation in EAPOL */
+#define EAPOL_WPA2_OCI_ENCAP_MIN_LEN \
+ (WPA_OCV_OCI_KDE_SIZE - TLV_HDR_LEN)
+#endif /* WL_OCV */
+
+#ifdef WLFIPS
+#define WLC_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \
+ ((bsscfg)->wsec & (WSEC_SWFLAG | FIPS_ENABLED))))
+#else
+#define WLC_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \
+ ((bsscfg)->wsec & WSEC_SWFLAG)))
+#endif /* WLFIPS */
+
+/* This doesn't really belong here, but neither does WSEC_CKIP* */
+/* per-packet encryption exemption policy */
+/* no exemption...follow whatever standard rules apply */
+#define WSEC_EXEMPT_NO 0
+/* send unencrypted */
+#define WSEC_EXEMPT_ALWAYS 1
+/* send unencrypted if no pairwise key */
+#define WSEC_EXEMPT_NO_PAIRWISE 2
+
+#define WPA_CIPHER_UNSPECIFIED 0xff
+#define WPA_P_CIPHERS_UNSPECIFIED 0x80000000
+
+#ifdef RSN_IE_INFO_STRUCT_RELOCATED
+#define WPA_AKMS_UNSPECIFIED 0x80000000
+#else
+#define WPA_AKMS_UNSPECIFIED 0
+#endif
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_AUTH(auth) ((auth) == WAPI_AUTH_UNSPECIFIED || \
+ (auth) == WAPI_AUTH_PSK)
+#define INCLUDES_WAPI_AUTH(auth) \
+ ((auth) & (WAPI_AUTH_UNSPECIFIED | \
+ WAPI_AUTH_PSK))
+#endif /* BCMWAPI_WAI */
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+
+#define IS_WPA2_AKM(akm) ((akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK || \
+ (akm) == RSN_AKM_FILS_SHA256 || \
+ (akm) == RSN_AKM_FILS_SHA384)
+
+/* this doesn't mean much. A WPA (not RSN) akm type would match this */
+#define RSN_AKM_MASK (\
+ BCM_BIT(RSN_AKM_UNSPECIFIED) | \
+ BCM_BIT(RSN_AKM_PSK) | \
+ BCM_BIT(RSN_AKM_SAE_PSK) | \
+ BCM_BIT(RSN_AKM_FILS_SHA256) | \
+ BCM_BIT(RSN_AKM_FILS_SHA384) | \
+ BCM_BIT(RSN_AKM_OWE) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA384_1X))
+
+/* verify less than 32 before shifting bits */
+#define VALID_AKM_BIT(akm) ((akm) < 32u ? BCM_BIT((akm)) : 0u)
+
+#define IS_RSN_AKM(akm) (VALID_AKM_BIT((akm)) & RSN_AKM_MASK)
+
+#define FBT_AKM_MASK (BCM_BIT(RSN_AKM_FBT_1X) | \
+ BCM_BIT(RSN_AKM_FBT_PSK) | \
+ BCM_BIT(RSN_AKM_SAE_FBT) | \
+ BCM_BIT(RSN_AKM_FBT_SHA256_FILS) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_FILS) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_PSK))
+
+#define IS_FBT_AKM(akm) (VALID_AKM_BIT((akm)) & FBT_AKM_MASK)
+
+#define FILS_AKM_MASK (\
+ BCM_BIT(RSN_AKM_FILS_SHA256) | \
+ BCM_BIT(RSN_AKM_FILS_SHA384))
+
+#define IS_FILS_AKM(akm) (VALID_AKM_BIT((akm)) & FILS_AKM_MASK)
+
+#define MFP_AKM_MASK (\
+ BCM_BIT(RSN_AKM_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SHA256_PSK))
+
+#define IS_MFP_AKM(akm) (MFP_AKM_MASK & VALID_AKM_BIT((akm)))
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+#endif /* BCMWAPI_WAI */
+
+#define IS_TDLS_AKM(akm) ((akm) == RSN_AKM_TPK)
+
+/* Broadcom(OUI) authenticated key managment suite */
+#define BRCM_AKM_NONE 0
+#define BRCM_AKM_PSK 1u /* Proprietary PSK AKM */
+
+#define IS_BRCM_AKM(akm) ((akm) == BRCM_AKM_PSK)
+
+#define ONE_X_AKM_MASK (BCM_BIT(RSN_AKM_FBT_1X) | \
+ BCM_BIT(RSN_AKM_MFP_1X) | \
+ BCM_BIT(RSN_AKM_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_UNSPECIFIED))
+
+#define IS_1X_AKM(akm) (VALID_AKM_BIT((akm)) & ONE_X_AKM_MASK)
+
+#define SUITEB_AKM_MASK (BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA384_1X))
+#define IS_1X_SUITEB_AKM(akm) (VALID_AKM_BIT((akm)) & SUITEB_AKM_MASK)
+
+#define SAE_AKM_MASK (BCM_BIT(RSN_AKM_SAE_PSK) | BCM_BIT(RSN_AKM_SAE_FBT))
+#define IS_SAE_AKM(akm) (VALID_AKM_BIT((akm)) & SAE_AKM_MASK)
+
+#define SHA256_AKM_MASK (BCM_BIT(RSN_AKM_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SHA256_PSK) | \
+ BCM_BIT(RSN_AKM_SAE_PSK) | \
+ BCM_BIT(RSN_AKM_SAE_FBT) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_FILS_SHA256) | \
+ BCM_BIT(RSN_AKM_FBT_SHA256_FILS) | \
+ BCM_BIT(RSN_AKM_OWE))
+#define IS_SHA256_AKM(akm) (VALID_AKM_BIT((akm)) & SHA256_AKM_MASK)
+
+#define SHA384_AKM_MASK (BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_FILS_SHA384) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_FILS) | \
+ BCM_BIT(RSN_AKM_PSK_SHA384))
+#define IS_SHA384_AKM(akm) (VALID_AKM_BIT((akm)) & SHA384_AKM_MASK)
+
+#define OPEN_AUTH_AKM_MASK (\
+ BCM_BIT(RSN_AKM_UNSPECIFIED) | \
+ BCM_BIT(RSN_AKM_PSK) | \
+ BCM_BIT(RSN_AKM_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SHA256_PSK) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_PSK_SHA384))
+#define IS_OPEN_AUTH_AKM(akm) (VALID_AKM_BIT((akm)) & OPEN_AUTH_AKM_MASK)
+
+typedef enum akm_type {
+ WPA_AUTH_IE = 0x01,
+ RSN_AUTH_IE = 0x02,
+ OSEN_AUTH_IE = 0x04
+} akm_type_t;
+
+#define MAX_ARRAY 1
+#define MIN_ARRAY 0
+
+#define WPS_ATID_SEL_REGISTRAR 0x1041
+
+/* move these to appropriate file(s) */
+#define WPS_IE_FIXED_LEN 6
+
+/* GTK indices we use - 0-3 valid per IEEE/802.11 2012 */
+#define GTK_INDEX_1 1
+#define GTK_INDEX_2 2
+
+/* IGTK indices we use - 4-5 are valid per IEEE 802.11 2012 */
+#define IGTK_INDEX_1 4
+#define IGTK_INDEX_2 5
+
+/* following needed for compatibility for router code because it automerges */
+#define IGTK_ID_TO_WSEC_INDEX(_id) (_id)
+#define WPA_AES_CMAC_CALC aes_cmac_calc
+
+#define IS_IGTK_INDEX(x) ((x) == IGTK_INDEX_1 || (x) == IGTK_INDEX_2)
+
+#ifdef RSN_IE_INFO_STRUCT_RELOCATED
+typedef struct rsn_ie_info {
+ uint8 version;
+ int parse_status;
+ device_type_t dev_type; /* AP or STA */
+ auth_ie_type_mask_t auth_ie_type; /* bit field of WPA, WPA2 and (not yet) WAPI */
+ rsn_cipher_t g_cipher;
+ rsn_akm_t sta_akm; /* single STA akm */
+ uint16 caps;
+ rsn_ciphers_t rsn_p_ciphers;
+ rsn_ciphers_t wpa_p_ciphers;
+ rsn_akm_mask_t rsn_akms;
+ rsn_akm_mask_t wpa_akms;
+ uint8 pmkid_count;
+ uint8 pmkids_offset; /* offset into the IE */
+ rsn_cipher_t g_mgmt_cipher;
+ rsn_cipher_t sta_cipher; /* single STA cipher */
+ uint16 key_desc; /* key descriptor version as STA */
+ uint16 mic_len; /* unused. keep for ROM compatibility. */
+ uint8 pmk_len; /* EAPOL PMK */
+ uint8 kck_mic_len; /* EAPOL MIC (by KCK) */
+ uint8 kck_len; /* EAPOL KCK */
+ uint8 kek_len; /* EAPOL KEK */
+ uint8 tk_len; /* EAPOL TK */
+ uint8 ptk_len; /* EAPOL PTK */
+ uint8 kck2_len; /* EAPOL KCK2 */
+ uint8 kek2_len; /* EAPOL KEK2 */
+ uint8* rsn_ie; /* RSN IE from beacon or assoc request */
+ uint16 rsn_ie_len; /* RSN IE length */
+ uint8* wpa_ie; /* WPA IE */
+ uint16 wpa_ie_len; /* WPA IE length (is it fixed ? */
+ /* the following are helpers in the AP rsn info to be filled in by the STA
+ * after determination of which IE is being used.in wsec_filter.
+ */
+ uint32 p_ciphers; /* current ciphers for the chosen auth IE */
+ uint32 akms; /* current ciphers for the chosen auth IE */
+ uint8 *auth_ie; /* pointer to current chosen auth IE */
+ uint16 auth_ie_len;
+ uint8 ref_count; /* external reference count to decide if structure must be freed */
+ uint8 rsnxe_len; /* RSNXE IE length */
+ uint8 PAD[3];
+ uint8* rsnxe; /* RSNXE IE TLV buffer */
+ uint32 rsnxe_cap; /* RSNXE IE cap flag, refer to 802.11.h */
+} rsn_ie_info_t;
+#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
+
+/* WiFi WPS Attribute fixed portion */
+typedef struct wps_at_fixed {
+ uint8 at[2];
+ uint8 len[2];
+ uint8 data[1];
+} wps_at_fixed_t;
+
+typedef const struct oui_akm_wpa_tbl {
+ const char *oui; /* WPA auth category */
+ uint16 rsn_akm;
+ uint32 wpa_auth;
+} oui_akm_wpa_tbl_t;
+
+#define WPS_AT_FIXED_LEN 4
+
+#define wps_ie_fixed_t wpa_ie_fixed_t
+
+/* What should be the multicast mask for AES ? */
+#define WPA_UNICAST_AES_MASK (\
+ BCM_BIT(WPA_CIPHER_AES_CCM) | \
+ BCM_BIT(WPA_CIPHER_AES_GCM) | \
+ BCM_BIT(WPA_CIPHER_AES_GCM256))
+
+#define WPA_CIPHER_WEP_MASK (\
+ BCM_BIT(WPA_CIPHER_WEP_104) | \
+ BCM_BIT(WPA_CIPHER_WEP_40))
+
+/* temporary to pass pre-commit */
+#ifdef TMP_USE_RSN_INFO
+/* wsec macros */
+#ifdef EXT_STA
+#define UCAST_NONE(rsn_info) (((rsn_info)->p_ciphers == (1 << WPA_CIPHER_NONE)) && \
+ (!WLEXTSTA_ENAB(wlc->pub) || wlc->use_group_enabled))
+#else
+#define UCAST_NONE(rsn_info) (rsn_info->p_ciphers == (1 << WPA_CIPHER_NONE))
+#endif /* EXT_STA */
+
+#define UCAST_AES(rsn_info) (rsn_info->p_ciphers & WPA_UNICAST_AES_MASK)
+#define UCAST_TKIP(rsn_info) (rsn_info->p_ciphers & (1 << WPA_CIPHER_TKIP))
+#define UCAST_WEP(rsn_info) (rsn_info->p_ciphers & WPA_CIPHER_WEP_MASK)
+
+#define MCAST_NONE(rsn_info) ((rsn_info)->g_cipher == WPA_CIPHER_NONE)
+#define MCAST_AES(rsn_info) ((1 << rsn_info->g_cipher) & WPA_UNICAST_AES_MASK)
+#define MCAST_TKIP(rsn_info) (rsn_info->g_cipher == WPA_CIPHER_TKIP)
+#define MCAST_WEP(rsn_info) ((1 << rsn_info->g_cipher) & WPA_CIPHER_WEP_MASK)
+
+#endif /* TMP_USE_RSN_INFO */
+
+#define AKM_SHA256_MASK (\
+ BCM_BIT(RSN_AKM_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_SHA256_PSK) | \
+ BCM_BIT(RSN_AKM_SAE_PSK) | \
+ BCM_BIT(RSN_AKM_OWE) | \
+ BCM_BIT(RSN_AKM_SUITEB_SHA256_1X) | \
+ BCM_BIT(RSN_AKM_FILS_SHA256) | \
+ BCM_BIT(RSN_AKM_FBT_SHA256_FILS) | \
+ BCM_BIT(RSN_AKM_SAE_FBT))
+
+#define AKM_SHA384_MASK (\
+ BCM_BIT(RSN_AKM_SUITEB_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_1X) | \
+ BCM_BIT(RSN_AKM_FILS_SHA384) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_FILS) | \
+ BCM_BIT(RSN_AKM_FBT_SHA384_PSK) | \
+ BCM_BIT(RSN_AKM_PSK_SHA384))
+
+/* these AKMs require MFP capable set in their IE */
+#define RSN_MFPC_AKM_MASK (\
+ BCM_BIT(RSN_AKM_SAE_PSK) | \
+ BCM_BIT(RSN_AKM_OWE) | \
+ BCM_BIT(RSN_AKM_SAE_FBT))
+
+/* AKMs that supported by in-driver supplicant.
+ * TODO: have to redesign this to include 1x and other PSK AKMs.
+ */
+#define IS_BCMSUP_AKM(akm) \
+ ((akm == RSN_AKM_PSK) | \
+ (akm == RSN_AKM_SAE_PSK) | \
+ (akm == RSN_AKM_OWE) | \
+ (akm == RSN_AKM_FBT_PSK) | \
+ (akm == RSN_AKM_SAE_FBT) | \
+ (akm == RSN_AKM_FBT_SHA384_1X) | \
+ (akm == RSN_AKM_FBT_SHA384_PSK))
+
+/* AKMs use common PSK which identified by broadcast addr */
+#define IS_SHARED_PMK_AKM(akm) \
+ ((akm == RSN_AKM_PSK) | \
+ (akm == RSN_AKM_FBT_PSK) | \
+ (akm == RSN_AKM_SHA256_PSK) | \
+ (akm == RSN_AKM_FBT_SHA384_PSK) | \
+ (akm == RSN_AKM_PSK_SHA384))
+
+#define RSN_AKM_USE_KDF(akm) (akm >= RSN_AKM_FBT_1X ? 1u : 0)
+
+/* Macro to abstract access to the rsn_ie_info strucuture in case
+ * we want to move it to a cubby or something else.
+ * Gives the rsn_info pointer
+ */
+
+#define RSN_INFO_GET(s) (s->rsn_info)
+/* where the rsn_info resides */
+#define RSN_INFO_GET_PTR(s) (&s->rsn_info)
+
+#define AUTH_AKM_INCLUDED(s) (s->rsn_info != NULL && s->rsn_info->parse_status == BCME_OK && \
+ s->rsn_info->akms != WPA_AKMS_UNSPECIFIED)
+
+#define AKM_IS_MEMBER(akm, mask) ((mask) & VALID_AKM_BIT((akm)) || ((akm) == 0 && (mask) == 0))
+
+typedef enum eapol_key_type {
+ EAPOL_KEY_NONE = 0,
+ EAPOL_KEY_PMK = 1,
+ EAPOL_KEY_KCK_MIC = 2,
+ EAPOL_KEY_KEK = 3,
+ EAPOL_KEY_TK = 4,
+ EAPOL_KEY_PTK = 5,
+ EAPOL_KEY_KCK = 6,
+ EAPOL_KEY_KCK2 = 7,
+ EAPOL_KEY_KEK2 = 8
+} eapol_key_type_t;
+
+/* Return address of max or min array depending first argument.
+ * Return NULL in case of a draw.
+ */
+extern const uint8 *wpa_array_cmp(int max_array, const uint8 *x, const uint8 *y, uint len);
+
+/* Increment the array argument */
+extern void wpa_incr_array(uint8 *array, uint len);
+
+/* Convert WPA IE cipher suite to locally used value */
+extern bool wpa_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok);
+
+/* Look for a WPA IE; return it's address if found, NULL otherwise */
+extern wpa_ie_fixed_t *bcm_find_wpaie(uint8 *parse, uint len);
+extern bcm_tlv_t *bcm_find_wmeie(uint8 *parse, uint len, uint8 subtype, uint8 subtype_len);
+/* Look for a WPS IE; return it's address if found, NULL otherwise */
+extern wps_ie_fixed_t *bcm_find_wpsie(const uint8 *parse, uint len);
+extern wps_at_fixed_t *bcm_wps_find_at(wps_at_fixed_t *at, uint len, uint16 id);
+int bcm_find_security_ies(uint8 *buf, uint buflen, void **wpa_ie,
+ void **rsn_ie);
+
+#ifdef WLP2P
+/* Look for a WiFi P2P IE; return it's address if found, NULL otherwise */
+extern wifi_p2p_ie_t *bcm_find_p2pie(const uint8 *parse, uint len);
+#endif
+/* Look for a hotspot2.0 IE; return it's address if found, NULL otherwise */
+bcm_tlv_t *bcm_find_hs20ie(uint8 *parse, uint len);
+/* Look for a OSEN IE; return it's address if found, NULL otherwise */
+bcm_tlv_t *bcm_find_osenie(uint8 *parse, uint len);
+
+/* Check whether the given IE has the specific OUI and the specific type. */
+extern bool bcm_has_ie(uint8 *ie, uint8 **tlvs, uint *tlvs_len,
+ const uint8 *oui, uint oui_len, uint8 type);
+
+/* Check whether pointed-to IE looks like WPA. */
+#define bcm_is_wpa_ie(ie, tlvs, len) bcm_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPA_OUI, WPA_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define bcm_is_wps_ie(ie, tlvs, len) bcm_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+#ifdef WLP2P
+/* Check whether the given IE looks like WFA P2P IE. */
+#define bcm_is_p2p_ie(ie, tlvs, len) bcm_has_ie(ie, tlvs, len, \
+ (const uint8 *)P2P_OUI, P2P_OUI_LEN, P2P_OUI_TYPE)
+#endif
+
+/* Convert WPA2 IE cipher suite to locally used value */
+extern bool wpa2_cipher(wpa_suite_t *suite, ushort *cipher, bool wep_ok);
+
+#if defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS)
+/* Look for an encapsulated GTK; return it's address if found, NULL otherwise */
+extern eapol_wpa2_encap_data_t *wpa_find_gtk_encap(uint8 *parse, uint len);
+
+/* Check whether pointed-to IE looks like an encapsulated GTK. */
+extern bool wpa_is_gtk_encap(uint8 *ie, uint8 **tlvs, uint *tlvs_len);
+
+/* Look for encapsulated key data; return it's address if found, NULL otherwise */
+extern eapol_wpa2_encap_data_t *wpa_find_kde(const uint8 *parse, uint len, uint8 type);
+
+/* Find kde data given eapol header. */
+extern int wpa_find_eapol_kde_data(eapol_header_t *eapol, uint8 eapol_mic_len,
+ uint8 subtype, eapol_wpa2_encap_data_t **out_data);
+
+/* Look for kde data in key data. */
+extern int wpa_find_kde_data(const uint8 *kde_buf, uint16 buf_len,
+ uint8 subtype, eapol_wpa2_encap_data_t **out_data);
+
+#ifdef WL_OCV
+/* Check if both local and remote are OCV capable */
+extern bool wpa_check_ocv_caps(uint16 local_caps, uint16 peer_caps);
+
+/* Write OCI KDE into the buffer */
+extern int wpa_add_oci_encap(chanspec_t chspec, uint8* buf, uint buf_len);
+
+/* Validate OCI KDE */
+extern int wpa_validate_oci_encap(chanspec_t chspec, const uint8* buf, uint buf_len);
+
+/* Write OCI IE into the buffer */
+extern int wpa_add_oci_ie(chanspec_t chspec, uint8* buf, uint buf_len);
+
+/* Validate OCI IE */
+extern int wpa_validate_oci_ie(chanspec_t chspec, const uint8* buf, uint buf_len);
+
+/* Write OCI subelement into the FTE buffer */
+extern int wpa_add_oci_ft_subelem(chanspec_t chspec, uint8* buf, uint buf_len);
+
+/* Validate OCI FTE subelement */
+extern int wpa_validate_oci_ft_subelem(chanspec_t chspec,
+ const uint8* buf, uint buf_len);
+#endif /* WL_OCV */
+#endif /* defined(BCMSUP_PSK) || defined(BCMSUPPL) || defined(GTKOE) || defined(WL_FILS) */
+
+#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK)|| \
+ defined(WL_OKC) || defined(GTKOE)
+/* Calculate a pair-wise transient key */
+extern int wpa_calc_ptk(rsn_akm_t akm, const struct ether_addr *auth_ea,
+ const struct ether_addr *sta_ea, const uint8 *anonce, uint8 anonce_len,
+ const uint8* snonce, uint8 snonce_len, const uint8 *pmk,
+ uint pmk_len, uint8 *ptk, uint ptk_len);
+
+/* Compute Message Integrity Code (MIC) over EAPOL message */
+extern int wpa_make_mic(eapol_header_t *eapol, uint key_desc, uint8 *mic_key,
+ rsn_ie_info_t *rsn_info, uchar *mic, uint mic_len);
+
+/* Check MIC of EAPOL message */
+extern bool wpa_check_mic(eapol_header_t *eapol,
+ uint key_desc, uint8 *mic_key, rsn_ie_info_t *rsn_info);
+
+/* Calculate PMKID */
+extern void wpa_calc_pmkid(const struct ether_addr *auth_ea,
+ const struct ether_addr *sta_ea, const uint8 *pmk, uint pmk_len, uint8 *pmkid);
+
+/* Encrypt key data for a WPA key message */
+extern bool wpa_encr_key_data(eapol_wpa_key_header_t *body, uint16 key_info,
+ uint8 *ekey, uint8 *gtk, uint8 *data, uint8 *encrkey, rc4_ks_t *rc4key,
+ const rsn_ie_info_t *rsn_info);
+
+typedef uint8 wpa_rc4_ivkbuf_t[EAPOL_WPA_KEY_IV_LEN + EAPOL_WPA_ENCR_KEY_MAX_LEN];
+/* Decrypt key data from a WPA key message */
+extern int wpa_decr_key_data(eapol_wpa_key_header_t *body, uint16 key_info,
+ uint8 *ekey, wpa_rc4_ivkbuf_t ivk, rc4_ks_t *rc4key, const rsn_ie_info_t *rsn_info,
+ uint16 *dec_len);
+#endif /* BCMSUP_PSK || WLFBT || BCMAUTH_PSK || defined(GTKOE) */
+
+#if defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK)|| \
+ defined(WL_OKC) || defined(GTKOE) || defined(WLHOSTFBT)
+
+/* Calculate PMKR0 for FT association */
+extern void wpa_calc_pmkR0(sha2_hash_type_t hash_type, const uint8 *ssid, uint ssid_len,
+ uint16 mdid, const uint8 *r0kh, uint r0kh_len, const struct ether_addr *sta_ea,
+ const uint8 *pmk, uint pmk_len, uint8 *pmkr0, uint8 *pmkr0name);
+
+/* Calculate PMKR1 for FT association */
+extern void wpa_calc_pmkR1(sha2_hash_type_t hash_type, const struct ether_addr *r1kh,
+ const struct ether_addr *sta_ea, const uint8 *pmk, uint pmk_len,
+ const uint8 *pmkr0name, uint8 *pmkr1, uint8 *pmkr1name);
+
+/* Calculate PTK for FT association */
+extern void wpa_calc_ft_ptk(sha2_hash_type_t hash_type, const struct ether_addr *bssid,
+ const struct ether_addr *sta_ea, const uint8 *anonce, const uint8* snonce,
+ const uint8 *pmk, uint pmk_len, uint8 *ptk, uint ptk_len);
+
+extern void wpa_derive_pmkR1_name(sha2_hash_type_t hash_type, struct ether_addr *r1kh,
+ struct ether_addr *sta_ea, uint8 *pmkr0name, uint8 *pmkr1name);
+
+#endif /* defined(BCMSUP_PSK) || defined(WLFBT) || defined(BCMAUTH_PSK) ||
+ * defined(WL_OKC) || defined(WLTDLS) || defined(GTKOE) || defined(WLHOSTFBT)
+ */
+
+#if defined(BCMSUP_PSK) || defined(BCMSUPPL)
+
+/* Translate RSNE group mgmt cipher to CRYPTO_ALGO_XXX */
+extern uint8 bcmwpa_find_group_mgmt_algo(rsn_cipher_t g_mgmt_cipher);
+
+#endif /* BCMSUP_PSK || BCMSUPPL */
+
+extern bool bcmwpa_akm2WPAauth(uint8 *akm, uint32 *auth, bool sta_iswpa);
+
+extern bool bcmwpa_cipher2wsec(uint8 *cipher, uint32 *wsec);
+
+#ifdef RSN_IE_INFO_STRUCT_RELOCATED
+extern uint32 bcmwpa_wpaciphers2wsec(uint32 unicast);
+extern int bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info,
+ uint32 *remaining, uint8 *type);
+
+/* to be removed after merge to NEWT (changed into bcmwpa_rsn_ie_info_reset) */
+void rsn_ie_info_reset(rsn_ie_info_t *rsn_info, osl_t *osh);
+uint32 wlc_convert_rsn_to_wsec_bitmap(uint32 ap_cipher_mask);
+#else
+uint32 bcmwpa_wpaciphers2wsec(uint8 wpacipher);
+int bcmwpa_decode_ie_type(const bcm_tlv_t *ie, rsn_ie_info_t *info, uint32 *remaining);
+#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
+
+extern int bcmwpa_parse_rsnie(const bcm_tlv_t *ie, rsn_ie_info_t *info, device_type_t dev_type);
+
+/* Calculate PMKID */
+extern void kdf_calc_pmkid(const struct ether_addr *auth_ea,
+ const struct ether_addr *sta_ea, const uint8 *key, uint key_len, uint8 *pmkid,
+ rsn_ie_info_t *rsn_info);
+
+extern void kdf_calc_ptk(const struct ether_addr *auth_ea, const struct ether_addr *sta_ea,
+ const uint8 *anonce, const uint8 *snonce, const uint8 *pmk, uint pmk_len,
+ uint8 *ptk, uint ptk_len);
+
+#ifdef WLTDLS
+/* Calculate TPK for TDLS association */
+extern void wpa_calc_tpk(const struct ether_addr *init_ea,
+ const struct ether_addr *resp_ea, const struct ether_addr *bssid,
+ const uint8 *anonce, const uint8* snonce, uint8 *tpk, uint tpk_len);
+#endif
+extern bool bcmwpa_is_wpa_auth(uint32 wpa_auth);
+extern bool bcmwpa_includes_wpa_auth(uint32 wpa_auth);
+extern bool bcmwpa_is_rsn_auth(uint32 wpa_auth);
+extern bool bcmwpa_includes_rsn_auth(uint32 wpa_auth);
+extern int bcmwpa_get_algo_key_len(uint8 algo, uint16 *key_len);
+
+/* macro to pass precommit on ndis builds */
+#define bcmwpa_is_wpa2_auth(wpa_auth) bcmwpa_is_rsn_auth(wpa_auth)
+extern uint8 bcmwpa_eapol_key_length(eapol_key_type_t key, rsn_akm_t akm, rsn_cipher_t cipher);
+
+/* rsn info allocation utilities. */
+void bcmwpa_rsn_ie_info_reset(rsn_ie_info_t *rsn_info, osl_t *osh);
+void bcmwpa_rsn_ie_info_rel_ref(rsn_ie_info_t **rsn_info, osl_t *osh);
+int bcmwpa_rsn_ie_info_add_ref(rsn_ie_info_t *rsn_info);
+int bcmwpa_rsn_akm_cipher_match(rsn_ie_info_t *rsn_info);
+int bcmwpa_rsnie_eapol_key_len(rsn_ie_info_t *info);
+#if defined(WL_BAND6G)
+/* Return TRUE if any of the akm in akms_bmp is invalid in 6Ghz */
+bool bcmwpa_is_invalid_6g_akm(const rsn_akm_mask_t akms_bmp);
+/* Return TRUE if any of the cipher in ciphers_bmp is invalid in 6Ghz */
+bool bcmwpa_is_invalid_6g_cipher(const rsn_ciphers_t ciphers_bmp);
+#endif /* WL_BAND6G */
+#endif /* _BCMWPA_H_ */
diff --git a/bcmdhd.101.10.361.x/include/brcm_nl80211.h b/bcmdhd.101.10.361.x/include/brcm_nl80211.h
new file mode 100755
index 0000000..29a4281
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/brcm_nl80211.h
@@ -0,0 +1,77 @@
+/*
+ * Definitions for nl80211 vendor command/event access to host driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+
+#ifndef _brcm_nl80211_h_
+#define _brcm_nl80211_h_
+
+//#ifdef OEM_ANDROID Need proper #ifdef in the referencing code as well
+#define OUI_BRCM 0x001018
+#define OUI_GOOGLE 0x001A11
+
+enum wl_vendor_subcmd {
+ BRCM_VENDOR_SCMD_UNSPEC = 0,
+ BRCM_VENDOR_SCMD_PRIV_STR = 1,
+ BRCM_VENDOR_SCMD_BCM_STR = 2,
+ BRCM_VENDOR_SCMD_BCM_PSK = 3,
+ BRCM_VENDOR_SCMD_SET_PMK = 4,
+ BRCM_VENDOR_SCMD_GET_FEATURES = 5,
+ BRCM_VENDOR_SCMD_SET_MAC = 6,
+ BRCM_VENDOR_SCMD_SET_CONNECT_PARAMS = 7,
+ BRCM_VENDOR_SCMD_SET_START_AP_PARAMS = 8,
+ BRCM_VENDOR_SCMD_MAX = 9
+};
+
+struct bcm_nlmsg_hdr {
+ uint cmd; /* common ioctl definition */
+ int len; /* expected return buffer length */
+ uint offset; /* user buffer offset */
+ uint set; /* get or set request optional */
+ uint magic; /* magic number for verification */
+};
+
+enum bcmnl_attrs {
+ BCM_NLATTR_UNSPEC,
+
+ BCM_NLATTR_LEN,
+ BCM_NLATTR_DATA,
+
+ __BCM_NLATTR_AFTER_LAST,
+ BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1
+};
+
+struct nl_prv_data {
+ int err; /* return result */
+ void *data; /* ioctl return buffer pointer */
+ uint len; /* ioctl return buffer length */
+ struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */
+};
+//#endif /* OEM_ANDROID */
+
+/* Keep common BCM netlink macros here */
+#define BCM_NL_USER 31
+#define BCM_NL_OXYGEN 30
+#define BCM_NL_TS 29
+/* ====== !! ADD NEW NL socket related defines here !! ====== */
+
+#endif /* _brcm_nl80211_h_ */
diff --git a/bcmdhd.101.10.361.x/include/d11.h b/bcmdhd.101.10.361.x/include/d11.h
new file mode 100755
index 0000000..5e8e7b2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/d11.h
@@ -0,0 +1,6055 @@
+/*
+ * Chip-specific hardware definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#ifndef _D11_H
+#define _D11_H
+
+/*
+ * Notes:
+ * 1. pre40/pre rev40: corerev < 40
+ * 2. pre80/pre rev80: 40 <= corerev < 80
+ * 3. rev40/D11AC: 80 > corerev >= 40
+ * 4. rev80: corerev >= 80
+ */
+
+#include <typedefs.h>
+#include <hndsoc.h>
+#include <sbhnddma.h>
+#include <802.11.h>
+
+#if defined(BCMDONGLEHOST) || defined(WL_UNITTEST)
+typedef struct {
+ uint32 pad;
+} shmdefs_t;
+#else /* defined(BCMDONGLEHOST)|| defined(WL_UNITTEST) */
+#include <d11shm.h>
+#ifdef USE_BCMCONF_H
+#include <bcmconf.h>
+#else
+#include <wlc_cfg.h>
+#endif
+#endif /* !defined(BCMDONGLEHOST)|| !defined(WL_UNITTEST) */
+
+#include <d11regs.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+#define D11AC_BCN_TMPL_LEN 640 /**< length of the BCN template area for 11AC */
+
+#define LPRS_TMPL_LEN 512 /**< length of the legacy PRS template area */
+
+/* RX FIFO numbers */
+#define RX_FIFO 0 /**< data and ctl frames */
+#define RX_FIFO1 1 /**< ctl frames */
+#define RX_FIFO2 2 /**< ctl frames */
+#define RX_FIFO_NUMBER 3
+
+/* TX FIFO numbers using WME Access Classes */
+#define TX_AC_BK_FIFO 0 /**< Access Category Background TX FIFO */
+#define TX_AC_BE_FIFO 1 /**< Access Category Best-Effort TX FIFO */
+#define TX_AC_VI_FIFO 2 /**< Access Class Video TX FIFO */
+#define TX_AC_VO_FIFO 3 /**< Access Class Voice TX FIFO */
+#define TX_BCMC_FIFO 4 /**< Broadcast/Multicast TX FIFO */
+#define TX_ATIM_FIFO 5 /**< TX fifo for ATIM window info */
+#define TX_AC_N_DATA_FIFO 4 /**< Number of legacy Data Fifos (BK, BE, VI, VO) */
+
+/* TX FIFO numbers for trigger queues for HE STA only chips (i.e
+ * This is valid only for 4369 or similar STA chips that supports
+ * a single HE STA connection.
+ */
+#define TX_TRIG_BK_FIFO 6 /**< Access Category Background TX FIFO */
+#define TX_TRIG_BE_FIFO 7 /**< Access Category Best-Effort TX FIFO */
+#define TX_TRIG_VI_FIFO 8 /**< Access Class Video TX FIFO */
+#define TX_TRIG_VO_FIFO 9 /**< Access Class Voice TX FIFO */
+#define TX_TRIG_HP_FIFO 10 /**< Access High Priority TX FIFO */
+#define TX_TRIG_N_DATA_FIFO 4 /**< Number of Trigger Data Fifos (BK, BE, VI, VO) */
+
+#if defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED)
+#define IS_TRIG_FIFO(fifo) \
+ (((fifo) >= TX_TRIG_BK_FIFO) && ((fifo) < (TX_TRIG_BK_FIFO + TX_TRIG_N_DATA_FIFO)))
+#else
+#define IS_TRIG_FIFO(fifo) FALSE
+#endif /* defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED) */
+
+#define IS_AC_FIFO(fifo) \
+ ((fifo) < (TX_AC_BK_FIFO + TX_AC_N_DATA_FIFO))
+
+/** Legacy TX FIFO numbers */
+#define TX_DATA_FIFO TX_AC_BE_FIFO
+#define TX_CTL_FIFO TX_AC_VO_FIFO
+
+/** Trig TX FIFO numbers */
+#define TX_TRIG_DATA_FIFO TX_TRIG_BE_FIFO
+#define TX_TRIG_CTL_FIFO TX_TRIG_VO_FIFO
+
+/* Extended FIFOs for corerev >= 64 */
+#define TX_FIFO_6 6
+#define TX_FIFO_7 7
+#define TX_FIFO_16 16
+#define TX_FIFO_23 23
+#define TX_FIFO_25 25
+
+#define TX_FIFO_EXT_START TX_FIFO_6 /* Starting index of extendied HW TX FIFOs */
+#define TX_FIFO_MU_START 8 /* index at which MU TX FIFOs start */
+
+#define D11REG_IHR_WBASE 0x200
+#define D11REG_IHR_BASE (D11REG_IHR_WBASE << 1)
+
+#define PIHR_BASE 0x0400 /**< byte address of packed IHR region */
+
+/* biststatus */
+#define BT_DONE (1U << 31) /**< bist done */
+#define BT_B2S (1 << 30) /**< bist2 ram summary bit */
+
+/* DMA intstatus and intmask */
+#define I_PC (1 << 10) /**< pci descriptor error */
+#define I_PD (1 << 11) /**< pci data error */
+#define I_DE (1 << 12) /**< descriptor protocol error */
+#define I_RU (1 << 13) /**< receive descriptor underflow */
+#define I_RO (1 << 14) /**< receive fifo overflow */
+#define I_XU (1 << 15) /**< transmit fifo underflow */
+#define I_RI (1 << 16) /**< receive interrupt */
+#define I_XI (1 << 24) /**< transmit interrupt */
+
+/* interrupt receive lazy */
+#define IRL_TO_MASK 0x00ffffff /**< timeout */
+#define IRL_FC_MASK 0xff000000 /**< frame count */
+#define IRL_FC_SHIFT 24 /**< frame count */
+#define IRL_DISABLE 0x01000000 /**< Disabled value: int on 1 frame, zero time */
+
+/** for correv >= 80. prev rev uses bit 21 */
+#define MCTL_BCNS_PROMISC_SHIFT 21
+/** for correv < 80. prev rev uses bit 20 */
+#define MCTL_BCNS_PROMISC_SHIFT_LT80 20
+
+/* maccontrol register */
+#define MCTL_GMODE (1U << 31)
+#define MCTL_DISCARD_PMQ (1 << 30)
+#define MCTL_DISCARD_TXSTATUS (1 << 29)
+#define MCTL_TBTT_HOLD (1 << 28)
+#define MCTL_CLOSED_NETWORK (1 << 27)
+#define MCTL_WAKE (1 << 26)
+#define MCTL_HPS (1 << 25)
+#define MCTL_PROMISC (1 << 24)
+#define MCTL_KEEPBADFCS (1 << 23)
+#define MCTL_KEEPCONTROL (1 << 22)
+#define MCTL_BCNS_PROMISC (1 << MCTL_BCNS_PROMISC_SHIFT)
+#define MCTL_BCNS_PROMISC_LT80 (1 << MCTL_BCNS_PROMISC_SHIFT_LT80)
+#define MCTL_NO_TXDMA_LAST_PTR (1 << 20) /** for correv >= 85 */
+#define MCTL_LOCK_RADIO (1 << 19)
+#define MCTL_AP (1 << 18)
+#define MCTL_INFRA (1 << 17)
+#define MCTL_BIGEND (1 << 16)
+#define MCTL_DISABLE_CT (1 << 14) /** for corerev >= 83.1 */
+#define MCTL_GPOUT_SEL_MASK (3 << 14)
+#define MCTL_GPOUT_SEL_SHIFT 14
+#define MCTL_EN_PSMDBG (1 << 13)
+#define MCTL_IHR_EN (1 << 10)
+#define MCTL_SHM_UPPER (1 << 9)
+#define MCTL_SHM_EN (1 << 8)
+#define MCTL_PSM_JMP_0 (1 << 2)
+#define MCTL_PSM_RUN (1 << 1)
+#define MCTL_EN_MAC (1 << 0)
+
+/* maccontrol1 register */
+#define MCTL1_GCPS (1u << 0u)
+#define MCTL1_EGS_MASK 0x0000c000
+#define MCTL1_EGS_SHIFT 14u
+#define MCTL1_AVB_ENABLE (1u << 1u)
+#define MCTL1_GPIOSEL_SHIFT 8u
+#define MCTL1_GPIOSEL (0x3F)
+#define MCTL1_GPIOSEL_MASK (MCTL1_GPIOSEL << MCTL1_GPIOSEL_SHIFT)
+/* Select MAC_SMPL_CPTR debug data that is placed in pc<7:1> & ifs_gpio_out<8:0> GPIOs */
+#define MCTL1_GPIOSEL_TSF_PC_IFS(_corerev) (D11REV_GE(_corerev, 85) ? 0x3b : 0x36)
+#define MCTL1_AVB_TRIGGER (1u << 2u)
+#define MCTL1_THIRD_AXI1_FOR_PSM (1u << 3u)
+#define MCTL1_AXI1_FOR_RX (1u << 4u)
+#define MCTL1_TXDMA_ENABLE_PASS (1u << 5u)
+/* SampleCollectPlayCtrl */
+#define SC_PLAYCTRL_MASK_ENABLE (1u << 8u)
+#define SC_PLAYCTRL_TRANS_MODE (1u << 6u)
+#define SC_PLAYCTRL_SRC_SHIFT 3u
+#define SC_PLAYCTRL_SRC_MASK (3u << SC_PLAYCTRL_SRC_SHIFT)
+#define SC_PLAYCTRL_SRC_PHY_DBG (3u << SC_PLAYCTRL_SRC_SHIFT)
+#define SC_PLAYCTRL_SRC_GPIO_OUT (2u << SC_PLAYCTRL_SRC_SHIFT)
+#define SC_PLAYCTRL_SRC_GPIO_IN (1u << SC_PLAYCTRL_SRC_SHIFT)
+#define SC_PLAYCTRL_SRC_PHY_SMPL (0u << SC_PLAYCTRL_SRC_SHIFT)
+#define SC_PLAYCTRL_STOP (1u << 2u)
+#define SC_PLAYCTRL_PAUSE (1u << 1u)
+#define SC_PLAYCTRL_START (1u << 0u)
+/* SCPortalSel fields */
+#define SC_PORTAL_SEL_AUTO_INCR (1u << 15u) /* Autoincr */
+#define SC_PORTAL_SEL_STORE_MASK (0u << 5u) /* Bits 14:5 SCStoreMask15to0 */
+#define SC_PORTAL_SEL_MATCH_MASK (4u << 5u) /* Bits 14:5 SCMatchMask15to0 */
+#define SC_PORTAL_SEL_MATCH_VALUE (8u << 5u) /* Bits 14:5 SCMatchValue15to0 */
+#define SC_PORTAL_SEL_TRIGGER_MASK (12u << 0u) /* Bits 4:0 SCTriggerMask15to0 */
+#define SC_PORTAL_SEL_TRIGGER_VALUE (16u << 0u) /* Bits 4:0 SCTriggerValue15to0 */
+#define SC_PORTAL_SEL_TRANS_MASK (20u << 0u) /* Bits 4:0 SCTransMask15to0 */
+
+/* GpioOut register */
+#define MGPIO_OUT_RXQ1_IFIFO_CNT_MASK 0x1fc0u
+#define MGPIO_OUT_RXQ1_IFIFO_CNT_SHIFT 6u
+
+#define MAC_RXQ1_IFIFO_CNT_ADDR 0x26u
+#define MAC_RXQ1_IFIFO_MAXLEN 3u
+
+/* maccommand register */
+#define MCMD_BCN0VLD (1 << 0)
+#define MCMD_BCN1VLD (1 << 1)
+#define MCMD_DIRFRMQVAL (1 << 2)
+#define MCMD_CCA (1 << 3)
+#define MCMD_BG_NOISE (1 << 4)
+#define MCMD_SKIP_SHMINIT (1 << 5) /**< only used for simulation */
+#define MCMD_SLOWCAL (1 << 6)
+#define MCMD_SAMPLECOLL MCMD_SKIP_SHMINIT /**< reuse for sample collect */
+#define MCMD_IF_DOWN (1 << 8 ) /**< indicate interface is going down */
+#define MCMD_TOF (1 << 9) /**< wifi ranging processing in ucode for rxd frames */
+#define MCMD_TSYNC (1 << 10) /**< start timestamp sync process in ucode */
+#define MCMD_RADIO_DOWN (1 << 11) /**< radio down by ucode */
+#define MCMD_RADIO_UP (1 << 12) /**< radio up by ucode */
+#define MCMD_TXPU (1 << 13) /**< txpu control by ucode */
+
+/* macintstatus/macintmask */
+#define MI_MACSSPNDD (1 << 0) /**< MAC has gracefully suspended */
+#define MI_BCNTPL (1 << 1) /**< beacon template available */
+#define MI_TBTT (1 << 2) /**< TBTT indication */
+#define MI_BCNSUCCESS (1 << 3) /**< beacon successfully tx'd */
+#define MI_BCNCANCLD (1 << 4) /**< beacon canceled (IBSS) */
+#define MI_ATIMWINEND (1 << 5) /**< end of ATIM-window (IBSS) */
+#define MI_PMQ (1 << 6) /**< PMQ entries available */
+#define MI_ALTTFS (1 << 7) /**< TX status interrupt for ARM offloads */
+#define MI_NSPECGEN_1 (1 << 8) /**< non-specific gen-stat bits that are set by PSM */
+#define MI_MACTXERR (1 << 9) /**< MAC level Tx error */
+#define MI_PMQERR (1 << 10)
+#define MI_PHYTXERR (1 << 11) /**< PHY Tx error */
+#define MI_PME (1 << 12) /**< Power Management Event */
+#define MI_GP0 (1 << 13) /**< General-purpose timer0 */
+#define MI_GP1 (1 << 14) /**< General-purpose timer1 */
+#define MI_DMAINT (1 << 15) /**< (ORed) DMA-interrupts */
+#define MI_TXSTOP (1 << 16) /**< MAC has completed a TX FIFO Suspend/Flush */
+#define MI_CCA (1 << 17) /**< MAC has completed a CCA measurement */
+#define MI_BG_NOISE (1 << 18) /**< MAC has collected background noise samples */
+#define MI_DTIM_TBTT (1 << 19) /**< MBSS DTIM TBTT indication */
+#define MI_PRQ (1 << 20) /**< Probe response queue needs attention */
+#define MI_HEB (1 << 21) /**< HEB (Hardware Event Block) interrupt - 11ax cores */
+#define MI_BT_RFACT_STUCK (1 << 22) /**< MAC has detected invalid BT_RFACT pin,
+ * valid when rev < 15
+ */
+#define MI_TTTT (1 << 22) /**< Target TIM Transmission Time,
+ * valid in rev = 26/29, or rev >= 42
+ */
+#define MI_BT_PRED_REQ (1 << 23) /**< MAC requested driver BTCX predictor calc */
+#define MI_BCNTRIM_RX (1 << 24) /**< PSM received a partial beacon */
+#define MI_P2P (1 << 25) /**< WiFi P2P interrupt */
+#define MI_DMATX (1 << 26) /**< MAC new frame ready */
+#define MI_TSSI_LIMIT (1 << 27) /**< Tssi Limit Reach, TxIdx=0/127 Interrupt */
+#define MI_HWACI_NOTIFY (1 << 27) /**< HWACI detects ACI, Apply Mitigation settings */
+#define MI_RFDISABLE (1 << 28) /**< MAC detected a change on RF Disable input
+ * (corerev >= 10)
+ */
+#define MI_TFS (1 << 29) /**< MAC has completed a TX (corerev >= 5) */
+#define MI_LEGACY_BUS_ERROR (1 << 30) /**< uCode indicated bus error */
+#define MI_TO (1U << 31) /**< general purpose timeout (corerev >= 3) */
+
+#define MI_RXOV MI_NSPECGEN_1 /**< rxfifo overflow interrupt */
+
+/* macintstatus_ext/macintmask_ext */
+#define MI_BUS_ERROR (1U << 0u) /**< uCode indicated bus error */
+#define MI_VCOPLL (1U << 1u) /**< uCode indicated PLL lock issue */
+#define MI_EXT_PS_CHG (1U << 2u) /**< Power state is changing (PS 0 <-> 1) */
+#define MI_DIS_ULOFDMA (1U << 3u) /**< ucode indicated disabling ULOFDMA request */
+#define MI_EXT_PM_OFFLOAD (1U << 4u) /**< PM offload */
+#define MI_OBSS_INTR (1U << 5u) /**< OBSS detection interrupt */
+#define MI_SENSORC_CX_REQ (1U << 6u) /**< SensorC Mitigation Request interrupt */
+#define MI_RLL_NAV_HOF (1U << 7u) /**< RLLW Switch */
+
+#define MI_EXT_TXE_SHARED_ERR (1U << 28u) /* Error event in blocks inside TXE shared
+ * (BMC/AQM/AQM-DMA/MIF)
+ */
+
+/* Mac capabilities registers */
+#define MCAP_TKIPMIC 0x80000000 /**< TKIP MIC hardware present */
+#define MCAP_TKIPPH2KEY 0x40000000 /**< TKIP phase 2 key hardware present */
+#define MCAP_BTCX 0x20000000 /**< BT coexistence hardware and pins present */
+#define MCAP_MBSS 0x10000000 /**< Multi-BSS hardware present */
+#define MCAP_RXFSZ_MASK 0x0ff80000 /**< Rx fifo size in blocks (revid >= 16) */
+#define MCAP_RXFSZ_SHIFT 19
+#define MCAP_NRXQ_MASK 0x00070000 /**< Max Rx queues supported - 1 */
+#define MCAP_NRXQ_SHIFT 16
+#define MCAP_UCMSZ_MASK 0x0000e000 /**< Ucode memory size */
+#define MCAP_UCMSZ_3K3 0 /**< 3328 Words Ucode memory, in unit of 50-bit */
+#define MCAP_UCMSZ_4K 1 /**< 4096 Words Ucode memory */
+#define MCAP_UCMSZ_5K 2 /**< 5120 Words Ucode memory */
+#define MCAP_UCMSZ_6K 3 /**< 6144 Words Ucode memory */
+#define MCAP_UCMSZ_8K 4 /**< 8192 Words Ucode memory */
+#define MCAP_UCMSZ_SHIFT 13
+#define MCAP_TXFSZ_MASK 0x00000ff8 /**< Tx fifo size (* 512 bytes) */
+#define MCAP_TXFSZ_SHIFT 3
+#define MCAP_NTXQ_MASK 0x00000007 /**< Max Tx queues supported - 1 */
+#define MCAP_NTXQ_SHIFT 0
+
+#define MCAP_BTCX_SUP(corerev) (MCAP_BTCX)
+
+#define MCAP_UCMSZ_TYPES 8 /**< different Ucode memory size types */
+
+/* machwcap1 */
+#define MCAP1_ERC_MASK 0x00000001 /**< external radio coexistence */
+#define MCAP1_ERC_SHIFT 0
+#define MCAP1_SHMSZ_MASK 0x0000000e /**< shm size (corerev >= 16) */
+#define MCAP1_SHMSZ_SHIFT 1
+#define MCAP1_SHMSZ_1K 0 /**< 1024 words in unit of 32-bit */
+#define MCAP1_SHMSZ_2K 1 /**< 1536 words in unit of 32-bit */
+#define MCAP1_NUMMACCHAINS 0x00003000 /**< Indicates one less than the
+ number of MAC Chains in the MAC.
+ */
+#define MCAP1_NUMMACCHAINS_SHIFT 12
+#define MCAP1_RXBLMAX_MASK 0x1800000u
+#define MCAP1_RXBLMAX_SHIFT 23u
+#define MCAP1_NUM_HEB_MASK 0xE0000000u
+#define MCAP1_NUM_HEB_SHIFT 29u
+#define MCAP1_NUM_HEB_FACTOR 3u
+#define MCAP1_CT_CAPABLE_SHIFT 17
+
+/* BTCX control */
+#define BTCX_CTRL_EN 0x0001 /**< Enable BTCX module */
+#define BTCX_CTRL_SW 0x0002 /**< Enable software override */
+#define BTCX_CTRL_DSBLBTCXOUT 0x8000 /* Disable txconf/prisel signal output from btcx module */
+
+#define BTCX_CTRL_PRI_POL 0x0080 /* Invert prisel polarity */
+#define BTCX_CTRL_TXC_POL 0x0020 /* Invert txconf polarity */
+
+#define SW_PRI_ON 1 /* switch prisel polarity */
+#define SW_TXC_ON 2 /* switch txconf polarity */
+
+/* BTCX status */
+#define BTCX_STAT_RA 0x0001 /**< RF_ACTIVE state */
+
+/* BTCX transaction control */
+#define BTCX_TRANS_ANTSEL 0x0040 /**< ANTSEL output */
+#define BTCX_TRANS_TXCONF 0x0080 /**< TX_CONF output */
+
+/* pmqhost data */
+#define PMQH_DATA_MASK 0xffff0000 /**< data entry of head pmq entry */
+#define PMQH_BSSCFG 0x00100000 /**< PM entry for BSS config */
+#define PMQH_PMOFF 0x00010000 /**< PM Mode OFF: power save off */
+#define PMQH_PMON 0x00020000 /**< PM Mode ON: power save on */
+#define PMQH_PMPS 0x00200000 /**< PM Mode PRETEND */
+#define PMQH_DASAT 0x00040000 /**< Dis-associated or De-authenticated */
+#define PMQH_ATIMFAIL 0x00080000 /**< ATIM not acknowledged */
+#define PMQH_DEL_ENTRY 0x00000001 /**< delete head entry */
+#define PMQH_DEL_MULT 0x00000002 /**< delete head entry to cur read pointer -1 */
+#define PMQH_OFLO 0x00000004 /**< pmq overflow indication */
+#define PMQH_NOT_EMPTY 0x00000008 /**< entries are present in pmq */
+
+/* phydebug (corerev >= 3) */
+#define PDBG_CRS (1 << 0) /**< phy is asserting carrier sense */
+#define PDBG_TXA (1 << 1) /**< phy is taking xmit byte from mac this cycle */
+#define PDBG_TXF (1 << 2) /**< mac is instructing the phy to transmit a frame */
+#define PDBG_TXE (1 << 3) /**< phy is signaling a transmit Error to the mac */
+#define PDBG_RXF (1 << 4) /**< phy detected the end of a valid frame preamble */
+#define PDBG_RXS (1 << 5) /**< phy detected the end of a valid PLCP header */
+#define PDBG_RXFRG (1 << 6) /**< rx start not asserted */
+#define PDBG_RXV (1 << 7) /**< mac is taking receive byte from phy this cycle */
+#define PDBG_RFD (1 << 16) /**< RF portion of the radio is disabled */
+
+/* objaddr register */
+#define OBJADDR_UCM_SEL 0x00000000
+#define OBJADDR_SHM_SEL 0x00010000
+#define OBJADDR_SCR_SEL 0x00020000
+#define OBJADDR_IHR_SEL 0x00030000
+#define OBJADDR_RCMTA_SEL 0x00040000
+#define OBJADDR_AMT_SEL 0x00040000
+#define OBJADDR_SRCHM_SEL 0x00060000
+#define OBJADDR_KEYTBL_SEL 0x000c0000
+#define OBJADDR_HEB_SEL 0x00120000
+#define OBJADDR_TXDC_TBL_SEL 0x00140000
+#define OBJADDR_TXDC_RIB_SEL 0x00150000
+#define OBJADDR_FCBS_SEL 0x00160000
+#define OBJADDR_LIT_SEL 0x00170000
+#define OBJADDR_LIB_SEL 0x00180000
+#define OBJADDR_WINC 0x01000000
+#define OBJADDR_RINC 0x02000000
+#define OBJADDR_AUTO_INC 0x03000000
+/* SHM/SCR/IHR/SHMX/SCRX/IHRX allow 2 bytes read/write, else only 4 bytes */
+#define OBJADDR_2BYTES_ACCESS(sel) \
+ (((sel & 0x70000) == OBJADDR_SHM_SEL) || \
+ ((sel & 0x70000) == OBJADDR_SCR_SEL) || \
+ ((sel & 0x70000) == OBJADDR_IHR_SEL))
+
+/* objdata register */
+#define OBJDATA_WR_COMPLT 0x00000001
+
+/* frmtxstatus */
+#define TXS_V (1 << 0) /**< valid bit */
+
+#define TXS_STATUS_MASK 0xffff
+/* sw mask to map txstatus for corerevs <= 4 to be the same as for corerev > 4 */
+#define TXS_COMPAT_MASK 0x3
+#define TXS_COMPAT_SHIFT 1
+#define TXS_FID_MASK 0xffff0000
+#define TXS_FID_SHIFT 16
+
+/* frmtxstatus2 */
+#define TXS_SEQ_MASK 0xffff
+#define TXS_PTX_MASK 0xff0000
+#define TXS_PTX_SHIFT 16
+#define TXS_MU_MASK 0x01000000
+#define TXS_MU_SHIFT 24
+
+/* clk_ctl_st, corerev >= 17 */
+#define CCS_ERSRC_REQ_D11PLL 0x00000100 /**< d11 core pll request */
+#define CCS_ERSRC_REQ_PHYPLL 0x00000200 /**< PHY pll request */
+#define CCS_ERSRC_REQ_PTMPLL 0x00001000 /* PTM clock request */
+#define CCS_ERSRC_AVAIL_D11PLL 0x01000000 /**< d11 core pll available */
+#define CCS_ERSRC_AVAIL_PHYPLL 0x02000000 /**< PHY pll available */
+#define CCS_ERSRC_AVAIL_PTMPLL 0x10000000 /**< PHY pll available */
+
+/* tsf_cfprep register */
+#define CFPREP_CBI_MASK 0xffffffc0
+#define CFPREP_CBI_SHIFT 6
+#define CFPREP_CFPP 0x00000001
+
+/* receive fifo control */
+#define RFC_FR (1 << 0) /**< frame ready */
+#define RFC_DR (1 << 1) /**< data ready */
+
+/* tx fifo sizes for corerev >= 9 */
+/* tx fifo sizes values are in terms of 256 byte blocks */
+#define TXFIFOCMD_RESET_MASK (1 << 15) /**< reset */
+#define TXFIFOCMD_FIFOSEL_SHIFT 8 /**< fifo */
+#define TXFIFOCMD_FIFOSEL_SET(val) ((val & 0x7) << TXFIFOCMD_FIFOSEL_SHIFT) /* fifo */
+#define TXFIFOCMD_FIFOSEL_GET(val) ((val >> TXFIFOCMD_FIFOSEL_SHIFT) & 0x7) /* fifo */
+#define TXFIFO_FIFOTOP_SHIFT 8 /**< fifo start */
+
+#define TXFIFO_FIFO_START(def, def1) ((def & 0xFF) | ((def1 & 0xFF) << 8))
+#define TXFIFO_FIFO_END(def, def1) (((def & 0xFF00) >> 8) | (def1 & 0xFF00))
+
+/* Must redefine to 65 for 16 MBSS */
+#ifdef WLLPRS
+#define TXFIFO_START_BLK16 (65+16) /**< Base address + 32 * 512 B/P + 8 * 512 11g P */
+#else /* WLLPRS */
+#define TXFIFO_START_BLK16 65 /**< Base address + 32 * 512 B/P */
+#endif /* WLLPRS */
+#define TXFIFO_START_BLK 6 /**< Base address + 6 * 256 B */
+#define TXFIFO_START_BLK_NIN 7 /**< Base address + 6 * 256 B */
+
+#define TXFIFO_AC_SIZE_PER_UNIT 512 /**< one unit corresponds to 512 bytes */
+
+#define MBSS16_TEMPLMEM_MINBLKS 65 /**< one unit corresponds to 256 bytes */
+
+/* phy versions, PhyVersion:Revision field */
+#define PV_AV_MASK 0xf000 /**< analog block version */
+#define PV_AV_SHIFT 12 /**< analog block version bitfield offset */
+#define PV_PT_MASK 0x0f00 /**< phy type */
+#define PV_PT_SHIFT 8 /**< phy type bitfield offset */
+#define PV_PV_MASK 0x00ff /**< phy version */
+#define PHY_TYPE(v) ((v & PV_PT_MASK) >> PV_PT_SHIFT)
+
+/* phy types, PhyVersion:PhyType field */
+#ifndef USE_BCMCONF_H
+#define PHY_TYPE_A 0 /**< A-Phy value */
+#define PHY_TYPE_B 1 /**< B-Phy value */
+#define PHY_TYPE_G 2 /**< G-Phy value */
+#define PHY_TYPE_N 4 /**< N-Phy value */
+/* #define PHY_TYPE_LP 5 */ /**< LP-Phy value */
+/* #define PHY_TYPE_SSN 6 */ /**< SSLPN-Phy value */
+#define PHY_TYPE_HT 7 /**< 3x3 HTPhy value */
+#define PHY_TYPE_LCN 8 /**< LCN-Phy value */
+#define PHY_TYPE_LCNXN 9 /**< LCNXN-Phy value */
+#define PHY_TYPE_LCN40 10 /**< LCN40-Phy value */
+#define PHY_TYPE_AC 11 /**< AC-Phy value */
+#define PHY_TYPE_LCN20 12 /**< LCN20-Phy value */
+#define PHY_TYPE_HE 13 /**< HE-Phy value */
+#define PHY_TYPE_NULL 0xf /**< Invalid Phy value */
+#endif /* USE_BCMCONF_H */
+
+/* analog types, PhyVersion:AnalogType field */
+#define ANA_11G_018 1
+#define ANA_11G_018_ALL 2
+#define ANA_11G_018_ALLI 3
+#define ANA_11G_013 4
+#define ANA_11N_013 5
+#define ANA_11LP_013 6
+
+/** 802.11a PLCP header def */
+typedef struct ofdm_phy_hdr ofdm_phy_hdr_t;
+BWL_PRE_PACKED_STRUCT struct ofdm_phy_hdr {
+ uint8 rlpt[3]; /**< rate, length, parity, tail */
+ uint16 service;
+ uint8 pad;
+} BWL_POST_PACKED_STRUCT;
+
+#define D11A_PHY_HDR_GRATE(phdr) ((phdr)->rlpt[0] & 0x0f)
+#define D11A_PHY_HDR_GRES(phdr) (((phdr)->rlpt[0] >> 4) & 0x01)
+#define D11A_PHY_HDR_GLENGTH(phdr) (((*((uint32 *)((phdr)->rlpt))) >> 5) & 0x0fff)
+#define D11A_PHY_HDR_GPARITY(phdr) (((phdr)->rlpt[3] >> 1) & 0x01)
+#define D11A_PHY_HDR_GTAIL(phdr) (((phdr)->rlpt[3] >> 2) & 0x3f)
+
+/** rate encoded per 802.11a-1999 sec 17.3.4.1 */
+#define D11A_PHY_HDR_SRATE(phdr, rate) \
+ ((phdr)->rlpt[0] = ((phdr)->rlpt[0] & 0xf0) | ((rate) & 0xf))
+/** set reserved field to zero */
+#define D11A_PHY_HDR_SRES(phdr) ((phdr)->rlpt[0] &= 0xef)
+/** length is number of octets in PSDU */
+#define D11A_PHY_HDR_SLENGTH(phdr, length) \
+ (*(uint32 *)((phdr)->rlpt) = *(uint32 *)((phdr)->rlpt) | \
+ (((length) & 0x0fff) << 5))
+/** set the tail to all zeros */
+#define D11A_PHY_HDR_STAIL(phdr) ((phdr)->rlpt[3] &= 0x03)
+
+#define D11A_PHY_HDR_LEN_L 3 /**< low-rate part of PLCP header */
+#define D11A_PHY_HDR_LEN_R 2 /**< high-rate part of PLCP header */
+
+#define D11A_PHY_TX_DELAY (2) /**< 2.1 usec */
+
+#define D11A_PHY_HDR_TIME (4) /**< low-rate part of PLCP header */
+#define D11A_PHY_PRE_TIME (16)
+#define D11A_PHY_PREHDR_TIME (D11A_PHY_PRE_TIME + D11A_PHY_HDR_TIME)
+
+/** 802.11b PLCP header def */
+typedef struct cck_phy_hdr cck_phy_hdr_t;
+BWL_PRE_PACKED_STRUCT struct cck_phy_hdr {
+ uint8 signal;
+ uint8 service;
+ uint16 length;
+ uint16 crc;
+} BWL_POST_PACKED_STRUCT;
+
+#define D11B_PHY_HDR_LEN 6
+
+#define D11B_PHY_TX_DELAY (3) /**< 3.4 usec */
+
+#define D11B_PHY_LHDR_TIME (D11B_PHY_HDR_LEN << 3)
+#define D11B_PHY_LPRE_TIME (144)
+#define D11B_PHY_LPREHDR_TIME (D11B_PHY_LPRE_TIME + D11B_PHY_LHDR_TIME)
+
+#define D11B_PHY_SHDR_TIME (D11B_PHY_LHDR_TIME >> 1)
+#define D11B_PHY_SPRE_TIME (D11B_PHY_LPRE_TIME >> 1)
+#define D11B_PHY_SPREHDR_TIME (D11B_PHY_SPRE_TIME + D11B_PHY_SHDR_TIME)
+
+#define D11B_PLCP_SIGNAL_LOCKED (1 << 2)
+#define D11B_PLCP_SIGNAL_LE (1 << 7)
+
+/* AMPDUXXX: move to ht header file once it is ready: Mimo PLCP */
+#define MIMO_PLCP_MCS_MASK 0x7f /**< mcs index */
+#define MIMO_PLCP_40MHZ 0x80 /**< 40 Hz frame */
+#define MIMO_PLCP_AMPDU 0x08 /**< ampdu */
+
+#define WLC_GET_CCK_PLCP_LEN(plcp) (plcp[4] + (plcp[5] << 8))
+#define WLC_GET_MIMO_PLCP_LEN(plcp) (plcp[1] + (plcp[2] << 8))
+#define WLC_SET_MIMO_PLCP_LEN(plcp, len) \
+ plcp[1] = len & 0xff; plcp[2] = ((len >> 8) & 0xff);
+
+#define WLC_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
+#define WLC_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
+#define WLC_IS_MIMO_PLCP_AMPDU(plcp) (plcp[3] & MIMO_PLCP_AMPDU)
+
+/**
+ * The dot11a PLCP header is 5 bytes. To simplify the software (so that we don't need eg different
+ * tx DMA headers for 11a and 11b), the PLCP header has padding added in the ucode.
+ */
+#define D11_PHY_HDR_LEN 6u
+
+/** For the AC phy PLCP is 12 bytes and not all bytes are used for all the modulations */
+#define D11AC_PHY_HDR_LEN 12
+#define D11AC_PHY_VHT_PLCP_OFFSET 0
+#define D11AC_PHY_HTMM_PLCP_OFFSET 0
+#define D11AC_PHY_HTGF_PLCP_OFFSET 3
+#define D11AC_PHY_OFDM_PLCP_OFFSET 3
+#define D11AC_PHY_CCK_PLCP_OFFSET 6
+#define D11AC_PHY_BEACON_PLCP_OFFSET 0
+
+#define D11_PHY_RXPLCP_LEN(rev) (D11_PHY_HDR_LEN)
+#define D11_PHY_RXPLCP_OFF(rev) (0)
+
+/** TX descriptor - pre40 */
+typedef struct d11txh_pre40 d11txh_pre40_t;
+BWL_PRE_PACKED_STRUCT struct d11txh_pre40 {
+ uint16 MacTxControlLow; /* 0x0 */
+ uint16 MacTxControlHigh; /* 0x1 */
+ uint16 MacFrameControl; /* 0x2 */
+ uint16 TxFesTimeNormal; /* 0x3 */
+ uint16 PhyTxControlWord; /* 0x4 */
+ uint16 PhyTxControlWord_1; /* 0x5 */
+ uint16 PhyTxControlWord_1_Fbr; /* 0x6 */
+ uint16 PhyTxControlWord_1_Rts; /* 0x7 */
+ uint16 PhyTxControlWord_1_FbrRts; /* 0x8 */
+ uint16 MainRates; /* 0x9 */
+ uint16 XtraFrameTypes; /* 0xa */
+ uint8 IV[16]; /* 0x0b - 0x12 */
+ uint8 TxFrameRA[6]; /* 0x13 - 0x15 */
+ uint16 TxFesTimeFallback; /* 0x16 */
+ uint8 RTSPLCPFallback[6]; /* 0x17 - 0x19 */
+ uint16 RTSDurFallback; /* 0x1a */
+ uint8 FragPLCPFallback[6]; /* 0x1b - 1d */
+ uint16 FragDurFallback; /* 0x1e */
+ uint16 MModeLen; /* 0x1f */
+ uint16 MModeFbrLen; /* 0x20 */
+ uint16 TstampLow; /* 0x21 */
+ uint16 TstampHigh; /* 0x22 */
+ uint16 ABI_MimoAntSel; /* 0x23 */
+ uint16 PreloadSize; /* 0x24 */
+ uint16 AmpduSeqCtl; /* 0x25 */
+ uint16 TxFrameID; /* 0x26 */
+ uint16 TxStatus; /* 0x27 */
+ uint16 MaxNMpdus; /* 0x28 corerev >=16 */
+ BWL_PRE_PACKED_STRUCT union {
+ uint16 MaxAggDur; /* 0x29 corerev >=16 */
+ uint16 MaxAggLen;
+ } BWL_POST_PACKED_STRUCT u1;
+ BWL_PRE_PACKED_STRUCT union {
+ BWL_PRE_PACKED_STRUCT struct { /* 0x29 corerev >=16 */
+ uint8 MaxRNum;
+ uint8 MaxAggBytes; /* Max Agg Bytes in power of 2 */
+ } BWL_POST_PACKED_STRUCT s1;
+ uint16 MaxAggLen_FBR;
+ } BWL_POST_PACKED_STRUCT u2;
+ uint16 MinMBytes; /* 0x2b corerev >=16 */
+ uint8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */
+ struct dot11_rts_frame rts_frame; /* 0x2f - 0x36 */
+ uint16 pad; /* 0x37 */
+} BWL_POST_PACKED_STRUCT;
+
+#define D11_TXH_LEN 112 /**< bytes */
+
+/* Frame Types */
+#define FT_LEGACY (-1)
+#define FT_CCK 0
+#define FT_OFDM 1
+#define FT_HT 2
+#define FT_VHT 3
+#define FT_HE 4
+#define FT_EHT 6
+
+/* HE PPDU type */
+#define HE_SU_PPDU 0
+#define HE_SU_RE_PPDU 1
+#define HE_MU_PPDU 2
+#define HE_TRIG_PPDU 3
+
+/* Position of MPDU inside A-MPDU; indicated with bits 10:9 of MacTxControlLow */
+#define TXC_AMPDU_SHIFT 9 /**< shift for ampdu settings */
+#define TXC_AMPDU_NONE 0 /**< Regular MPDU, not an A-MPDU */
+#define TXC_AMPDU_FIRST 1 /**< first MPDU of an A-MPDU */
+#define TXC_AMPDU_MIDDLE 2 /**< intermediate MPDU of an A-MPDU */
+#define TXC_AMPDU_LAST 3 /**< last (or single) MPDU of an A-MPDU */
+
+/* MacTxControlLow */
+#define TXC_AMIC 0x8000
+#define TXC_USERIFS 0x4000
+#define TXC_LIFETIME 0x2000
+#define TXC_FRAMEBURST 0x1000
+#define TXC_SENDCTS 0x0800
+#define TXC_AMPDU_MASK 0x0600
+#define TXC_BW_40 0x0100
+#define TXC_FREQBAND_5G 0x0080
+#define TXC_DFCS 0x0040
+#define TXC_IGNOREPMQ 0x0020
+#define TXC_HWSEQ 0x0010
+#define TXC_STARTMSDU 0x0008
+#define TXC_SENDRTS 0x0004
+#define TXC_LONGFRAME 0x0002
+#define TXC_IMMEDACK 0x0001
+
+/* MacTxControlHigh */
+#define TXC_PREAMBLE_RTS_FB_SHORT 0x8000 /* RTS fallback preamble type 1 = SHORT 0 = LONG */
+#define TXC_PREAMBLE_RTS_MAIN_SHORT 0x4000 /* RTS main rate preamble type 1 = SHORT 0 = LONG */
+#define TXC_PREAMBLE_DATA_FB_SHORT 0x2000 /**< Main fallback rate preamble type
+ * 1 = SHORT for OFDM/GF for MIMO
+ * 0 = LONG for CCK/MM for MIMO
+ */
+/* TXC_PREAMBLE_DATA_MAIN is in PhyTxControl bit 5 */
+#define TXC_AMPDU_FBR 0x1000 /**< use fallback rate for this AMPDU */
+#define TXC_SECKEY_MASK 0x0FF0
+#define TXC_SECKEY_SHIFT 4
+#define TXC_ALT_TXPWR 0x0008 /**< Use alternate txpwr defined at loc. M_ALT_TXPWR_IDX */
+#define TXC_SECTYPE_MASK 0x0007
+#define TXC_SECTYPE_SHIFT 0
+
+/* Null delimiter for Fallback rate */
+#define AMPDU_FBR_NULL_DELIM 5 /**< Location of Null delimiter count for AMPDU */
+
+/* PhyTxControl for Mimophy */
+#define PHY_TXC_PWR_MASK 0xFC00
+#define PHY_TXC_PWR_SHIFT 10
+#define PHY_TXC_ANT_MASK 0x03C0 /**< bit 6, 7, 8, 9 */
+#define PHY_TXC_ANT_SHIFT 6
+#define PHY_TXC_ANT_0_1 0x00C0 /**< auto, last rx */
+#define PHY_TXC_LPPHY_ANT_LAST 0x0000
+#define PHY_TXC_ANT_3 0x0200 /**< virtual antenna 3 */
+#define PHY_TXC_ANT_2 0x0100 /**< virtual antenna 2 */
+#define PHY_TXC_ANT_1 0x0080 /**< virtual antenna 1 */
+#define PHY_TXC_ANT_0 0x0040 /**< virtual antenna 0 */
+
+#define PHY_TXC_SHORT_HDR 0x0010
+#define PHY_TXC_FT_MASK 0x0003
+
+#define PHY_TXC_FT_CCK 0x0000
+#define PHY_TXC_FT_OFDM 0x0001
+#define PHY_TXC_FT_HT 0x0002
+#define PHY_TXC_FT_VHT 0x0003
+#define PHY_TXC_FT_HE 0x0004
+#define PHY_TXC_FT_EHT 0x0006
+
+#define PHY_TXC_OLD_ANT_0 0x0000
+#define PHY_TXC_OLD_ANT_1 0x0100
+#define PHY_TXC_OLD_ANT_LAST 0x0300
+
+/** PhyTxControl_1 for Mimophy */
+#define PHY_TXC1_BW_MASK 0x0007
+#define PHY_TXC1_BW_10MHZ 0
+#define PHY_TXC1_BW_10MHZ_UP 1
+#define PHY_TXC1_BW_20MHZ 2
+#define PHY_TXC1_BW_20MHZ_UP 3
+#define PHY_TXC1_BW_40MHZ 4
+#define PHY_TXC1_BW_40MHZ_DUP 5
+#define PHY_TXC1_MODE_SHIFT 3
+#define PHY_TXC1_MODE_MASK 0x0038
+#define PHY_TXC1_MODE_SISO 0
+#define PHY_TXC1_MODE_CDD 1
+#define PHY_TXC1_MODE_STBC 2
+#define PHY_TXC1_MODE_SDM 3
+#define PHY_TXC1_CODE_RATE_SHIFT 8
+#define PHY_TXC1_CODE_RATE_MASK 0x0700
+#define PHY_TXC1_CODE_RATE_1_2 0
+#define PHY_TXC1_CODE_RATE_2_3 1
+#define PHY_TXC1_CODE_RATE_3_4 2
+#define PHY_TXC1_CODE_RATE_4_5 3
+#define PHY_TXC1_CODE_RATE_5_6 4
+#define PHY_TXC1_CODE_RATE_7_8 6
+#define PHY_TXC1_MOD_SCHEME_SHIFT 11
+#define PHY_TXC1_MOD_SCHEME_MASK 0x3800
+#define PHY_TXC1_MOD_SCHEME_BPSK 0
+#define PHY_TXC1_MOD_SCHEME_QPSK 1
+#define PHY_TXC1_MOD_SCHEME_QAM16 2
+#define PHY_TXC1_MOD_SCHEME_QAM64 3
+#define PHY_TXC1_MOD_SCHEME_QAM256 4
+
+/* PhyTxControl for HTphy that are different from Mimophy */
+#define PHY_TXC_HTANT_MASK 0x3fC0 /**< bit 6, 7, 8, 9, 10, 11, 12, 13 */
+#define PHY_TXC_HTCORE_MASK 0x03C0 /**< core enable core3:core0, 1=enable, 0=disable */
+#define PHY_TXC_HTCORE_SHIFT 6 /**< bit 6, 7, 8, 9 */
+#define PHY_TXC_HTANT_IDX_MASK 0x3C00 /**< 4-bit, 16 possible antenna configuration */
+#define PHY_TXC_HTANT_IDX_SHIFT 10
+#define PHY_TXC_HTANT_IDX0 0
+#define PHY_TXC_HTANT_IDX1 1
+#define PHY_TXC_HTANT_IDX2 2
+#define PHY_TXC_HTANT_IDX3 3
+
+/* PhyTxControl_1 for HTphy that are different from Mimophy */
+#define PHY_TXC1_HTSPARTIAL_MAP_MASK 0x7C00 /**< bit 14:10 */
+#define PHY_TXC1_HTSPARTIAL_MAP_SHIFT 10
+#define PHY_TXC1_HTTXPWR_OFFSET_MASK 0x01f8 /**< bit 8:3 */
+#define PHY_TXC1_HTTXPWR_OFFSET_SHIFT 3
+
+/* TxControl word follows new interface for AX */
+/* PhyTxControl_6 for AXphy */
+#define PHY_TXC5_AXTXPWR_OFFSET_C0_MASK 0xff00 /**< bit 15:8 */
+#define PHY_TXC5_AXTXPWR_OFFSET_C0_SHIFT 8
+#define PHY_TXC6_AXTXPWR_OFFSET_C1_MASK 0x00ff /**< bit 7:0 */
+#define PHY_TXC6_AXTXPWR_OFFSET_C1_SHIFT 0
+#define PHY_TXC5_AXTXPWR_OFFSET_C2_MASK 0x00ff /**< bit 7:0 */
+#define PHY_TXC5_AXTXPWR_OFFSET_C2_SHIFT 0
+
+/* XtraFrameTypes */
+#define XFTS_RTS_FT_SHIFT 2
+#define XFTS_FBRRTS_FT_SHIFT 4
+#define XFTS_CHANNEL_SHIFT 8
+
+/** Antenna diversity bit in ant_wr_settle */
+#define PHY_AWS_ANTDIV 0x2000
+
+/* IFS ctl */
+#define IFS_USEEDCF (1 << 2)
+
+/* IFS ctl1 */
+#define IFS_CTL1_EDCRS (1 << 3)
+#define IFS_CTL1_EDCRS_20L (1 << 4)
+#define IFS_CTL1_EDCRS_40 (1 << 5)
+#define IFS_EDCRS_MASK (IFS_CTL1_EDCRS | IFS_CTL1_EDCRS_20L | IFS_CTL1_EDCRS_40)
+#define IFS_EDCRS_SHIFT 3
+
+/* IFS ctl sel pricrs */
+#define IFS_CTL_CRS_SEL_20LL 1
+#define IFS_CTL_CRS_SEL_20LU 2
+#define IFS_CTL_CRS_SEL_20UL 4
+#define IFS_CTL_CRS_SEL_20UU 8
+#define IFS_CTL_CRS_SEL_MASK (IFS_CTL_CRS_SEL_20LL | IFS_CTL_CRS_SEL_20LU | \
+ IFS_CTL_CRS_SEL_20UL | IFS_CTL_CRS_SEL_20UU)
+#define IFS_CTL_ED_SEL_20LL (1 << 8)
+#define IFS_CTL_ED_SEL_20LU (1 << 9)
+#define IFS_CTL_ED_SEL_20UL (1 << 10)
+#define IFS_CTL_ED_SEL_20UU (1 << 11)
+#define IFS_CTL_ED_SEL_MASK (IFS_CTL_ED_SEL_20LL | IFS_CTL_ED_SEL_20LU | \
+ IFS_CTL_ED_SEL_20UL | IFS_CTL_ED_SEL_20UU)
+
+/* ABI_MimoAntSel */
+#define ABI_MAS_ADDR_BMP_IDX_MASK 0x0f00
+#define ABI_MAS_ADDR_BMP_IDX_SHIFT 8
+#define ABI_MAS_FBR_ANT_PTN_MASK 0x00f0
+#define ABI_MAS_FBR_ANT_PTN_SHIFT 4
+#define ABI_MAS_MRT_ANT_PTN_MASK 0x000f
+
+#ifdef WLAWDL
+#define ABI_MAS_AWDL_TS_INSERT 0x1000 /**< bit 12 */
+#endif
+
+#define ABI_MAS_TIMBC_TSF 0x2000 /**< Enable TIMBC tsf field present */
+
+/* MinMBytes */
+#define MINMBYTES_PKT_LEN_MASK 0x0300
+#define MINMBYTES_FBRATE_PWROFFSET_MASK 0xFC00
+#define MINMBYTES_FBRATE_PWROFFSET_SHIFT 10
+
+/* Rev40 template constants */
+
+/** templates include a longer PLCP header that matches the MAC / PHY interface */
+#define D11_VHT_PLCP_LEN 12
+
+/* 11AC TX DMA buffer header */
+
+#define D11AC_TXH_NUM_RATES 4
+
+/** per rate info - rev40 */
+typedef struct d11actxh_rate d11actxh_rate_t;
+BWL_PRE_PACKED_STRUCT struct d11actxh_rate {
+ uint16 PhyTxControlWord_0; /* 0 - 1 */
+ uint16 PhyTxControlWord_1; /* 2 - 3 */
+ uint16 PhyTxControlWord_2; /* 4 - 5 */
+ uint8 plcp[D11_PHY_HDR_LEN]; /* 6 - 11 */
+ uint16 FbwInfo; /* 12 -13, fall back bandwidth info */
+ uint16 TxRate; /* 14 */
+ uint16 RtsCtsControl; /* 16 */
+ uint16 Bfm0; /* 18 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Bit definition for FbwInfo field */
+#define FBW_BW_MASK 3
+#define FBW_BW_SHIFT 0
+#define FBW_TXBF 4
+#define FBW_TXBF_SHIFT 2
+/* this needs to be re-visited if we want to use this feature */
+#define FBW_BFM0_TXPWR_MASK 0x1F8
+#define FBW_BFM0_TXPWR_SHIFT 3
+#define FBW_BFM_TXPWR_MASK 0x7E00
+#define FBW_BFM_TXPWR_SHIFT 9
+
+/* Bit definition for Bfm0 field */
+#define BFM0_TXPWR_MASK 0x3f
+#define BFM0_STBC_SHIFT 6
+#define BFM0_STBC (1 << BFM0_STBC_SHIFT)
+/* should find a chance to converge the two */
+#define D11AC2_BFM0_TXPWR_MASK 0x7f
+#define D11AC2_BFM0_STBC_SHIFT 7
+#define D11AC2_BFM0_STBC (1 << D11AC2_BFM0_STBC_SHIFT)
+
+/* per packet info */
+typedef struct d11pktinfo_common d11pktinfo_common_t;
+typedef struct d11pktinfo_common d11actxh_pkt_t;
+BWL_PRE_PACKED_STRUCT struct d11pktinfo_common {
+ /* Per pkt info */
+ uint16 TSOInfo; /* 0 */
+ uint16 MacTxControlLow; /* 2 */
+ uint16 MacTxControlHigh; /* 4 */
+ uint16 Chanspec; /* 6 */
+ uint8 IVOffset; /* 8 */
+ uint8 PktCacheLen; /* 9 */
+ uint16 FrameLen; /* 10. In [bytes] units. */
+ uint16 TxFrameID; /* 12 */
+ uint16 Seq; /* 14 */
+ uint16 Tstamp; /* 16 */
+ uint16 TxStatus; /* 18 */
+} BWL_POST_PACKED_STRUCT;
+
+/* common cache info between rev40 and rev80 formats */
+typedef struct d11txh_cache_common d11txh_cache_common_t;
+BWL_PRE_PACKED_STRUCT struct d11txh_cache_common {
+ uint8 BssIdEncAlg; /* 0 */
+ uint8 KeyIdx; /* 1 */
+ uint8 PrimeMpduMax; /* 2 */
+ uint8 FallbackMpduMax; /* 3 */
+ uint16 AmpduDur; /* 4 - 5 */
+ uint8 BAWin; /* 6 */
+ uint8 MaxAggLen; /* 7 */
+} BWL_POST_PACKED_STRUCT;
+
+/** Per cache info - rev40 */
+typedef struct d11actxh_cache d11actxh_cache_t;
+BWL_PRE_PACKED_STRUCT struct d11actxh_cache {
+ d11txh_cache_common_t common; /* 0 - 7 */
+ uint8 TkipPH1Key[10]; /* 8 - 17 */
+ uint8 TSCPN[6]; /* 18 - 23 */
+} BWL_POST_PACKED_STRUCT;
+
+/** Long format tx descriptor - rev40 */
+typedef struct d11actxh d11actxh_t;
+BWL_PRE_PACKED_STRUCT struct d11actxh {
+ /* Per pkt info */
+ d11actxh_pkt_t PktInfo; /* 0 - 19 */
+
+ union {
+
+ /** Rev 40 to rev 63 layout */
+ struct {
+ /** Per rate info */
+ d11actxh_rate_t RateInfo[D11AC_TXH_NUM_RATES]; /* 20 - 99 */
+
+ /** Per cache info */
+ d11actxh_cache_t CacheInfo; /* 100 - 123 */
+ } rev40;
+
+ /** Rev >= 64 layout */
+ struct {
+ /** Per cache info */
+ d11actxh_cache_t CacheInfo; /* 20 - 43 */
+
+ /** Per rate info */
+ d11actxh_rate_t RateInfo[D11AC_TXH_NUM_RATES]; /* 44 - 123 */
+ } rev64;
+
+ };
+} BWL_POST_PACKED_STRUCT;
+
+#define D11AC_TXH_LEN sizeof(d11actxh_t) /* 124 bytes */
+
+/* Short format tx descriptor only has per packet info */
+#define D11AC_TXH_SHORT_LEN sizeof(d11actxh_pkt_t) /* 20 bytes */
+
+/* -TXDC-TxH Excluding Rate Info 41 bytes (Note 1 byte of RATEINFO is removed */
+#define D11AC_TXH_SHORT_EXT_LEN (sizeof(d11txh_rev80_t) - 1)
+
+/* Retry limit regs */
+/* Current retries for the fallback rates are hardcoded */
+#define D11AC_TXDC_SRL_FB (3u) /* Short Retry Limit - Fallback */
+#define D11AC_TXDC_LRL_FB (2u) /* Long Retry Limit - Fallback */
+
+#define D11AC_TXDC_RET_LIM_MASK (0x000Fu)
+#define D11AC_TXDC_SRL_SHIFT (0u) /* Short Retry Limit */
+#define D11AC_TXDC_SRL_FB_SHIFT (4u) /* Short Retry Limit - Fallback */
+#define D11AC_TXDC_LRL_SHIFT (8u) /* Long Retry Limit */
+#define D11AC_TXDC_LRL_FB_SHIFT (12u) /* Long Retry Limit - Fallback */
+
+/* MacTxControlLow */
+#define D11AC_TXC_HDR_FMT_SHORT 0x0001 /**< 0: long format, 1: short format */
+#define D11AC_TXC_UPD_CACHE 0x0002
+#define D11AC_TXC_CACHE_IDX_MASK 0x003C /**< Cache index 0 .. 15 */
+#define D11AC_TXC_CACHE_IDX_SHIFT 2
+
+#define D11AC_TXDC_IDX_SHIFT 1
+#define D11AC_TXDC_CPG_SHIFT 5
+#define D11REV80_TXDC_RIB_CPG 0x0020 /**< Cache Index CPG (Bit 5) -TXDC- */
+#define D11REV80_TXDC_RIB_DEL_MASK 0x001E /**< Cache index CIPX 0 .. 15 (Bit 1-4 -TXDC- */
+#define D11REV80_TXDC_RIB_IMM_MASK 0x003E /**< Cache index CIPX 0 .. 31 (Bit 1-5) -TXDC- */
+#define D11AC_TXC_AMPDU 0x0040 /**< Is aggregate-able */
+#define D11AC_TXC_IACK 0x0080 /**< Expect immediate ACK */
+#define D11AC_TXC_LFRM 0x0100 /**< Use long/short retry frame count/limit */
+#define D11AC_TXC_IPMQ 0x0200 /**< Ignore PMQ */
+#define D11AC_TXC_MBURST 0x0400 /**< Burst mode */
+#define D11AC_TXC_ASEQ 0x0800 /**< Add ucode generated seq num */
+#define D11AC_TXC_AGING 0x1000 /**< Use lifetime */
+#define D11AC_TXC_AMIC 0x2000 /**< Compute and add TKIP MIC */
+#define D11AC_TXC_STMSDU 0x4000 /**< First MSDU */
+#define D11AC_TXC_URIFS 0x8000 /**< Use RIFS */
+
+/* MacTxControlHigh */
+#define D11AC_TXC_DISFCS 0x0001 /**< Discard FCS */
+#define D11AC_TXC_FIX_RATE 0x0002 /**< Use primary rate only */
+#define D11AC_TXC_SVHT 0x0004 /**< Single VHT mpdu ampdu */
+#define D11AC_TXC_PPS 0x0008 /**< Enable PS Pretend feature */
+#define D11AC_TXC_UCODE_SEQ 0x0010 /* Sequence counter for BK traffic, for offloads */
+#define D11AC_TXC_TIMBC_TSF 0x0020 /**< Enable TIMBC tsf field present */
+#define D11AC_TXC_TCPACK 0x0040
+#define D11AC_TXC_AWDL_PHYTT 0x0080 /**< Fill in PHY Transmission Time for AWDL action frames */
+#define D11AC_TXC_TOF 0x0100 /**< Enable wifi ranging processing for rxd frames */
+#define D11AC_TXC_MU 0x0200 /**< MU Tx data */
+#define D11AC_TXC_BFIX 0x0800 /**< BFI from SHMx */
+#define D11AC_TXC_NORETRY 0x0800 /**< Disable retry for tsync frames */
+#define D11AC_TXC_UFP 0x1000 /**< UFP */
+#define D11AC_TXC_OVERRIDE_NAV 0x1000 /**< if set, ucode will tx without honoring NAV */
+#define D11AC_TXC_DYNBW 0x2000 /**< Dynamic BW */
+#define D11AC_TXC_TXPROF_EN 0x8000 /**< TxProfile Enable TODO: support multiple idx */
+#define D11AC_TXC_SLTF 0x8000 /**< 11az Secure Ranging frame */
+
+#define D11AC_TSTAMP_SHIFT 8 /**< Tstamp in 256us units */
+
+/* PhyTxControlWord_0 */
+#define D11AC_PHY_TXC_FT_MASK 0x0003
+
+/* vht txctl0 */
+#define D11AC_PHY_TXC_NON_SOUNDING 0x0004
+#define D11AC_PHY_TXC_BFM 0x0008
+#define D11AC_PHY_TXC_SHORT_PREAMBLE 0x0010
+#define D11AC2_PHY_TXC_STBC 0x0020
+#define D11AC_PHY_TXC_ANT_MASK 0x3FC0
+#define D11AC_PHY_TXC_CORE_MASK 0x03C0
+#define D11AC_PHY_TXC_CORE_SHIFT 6
+#define D11AC_PHY_TXC_ANT_IDX_MASK 0x3C00
+#define D11AC_PHY_TXC_ANT_IDX_SHIFT 10
+#define D11AC_PHY_TXC_BW_MASK 0xC000
+#define D11AC_PHY_TXC_BW_SHIFT 14
+#define D11AC_PHY_TXC_BW_20MHZ 0x0000
+#define D11AC_PHY_TXC_BW_40MHZ 0x4000
+#define D11AC_PHY_TXC_BW_80MHZ 0x8000
+#define D11AC_PHY_TXC_BW_160MHZ 0xC000
+
+/* PhyTxControlWord_1 */
+#define D11AC_PHY_TXC_PRIM_SUBBAND_MASK 0x0007
+#define D11AC_PHY_TXC_PRIM_SUBBAND_LLL 0x0000
+#define D11AC_PHY_TXC_PRIM_SUBBAND_LLU 0x0001
+#define D11AC_PHY_TXC_PRIM_SUBBAND_LUL 0x0002
+#define D11AC_PHY_TXC_PRIM_SUBBAND_LUU 0x0003
+#define D11AC_PHY_TXC_PRIM_SUBBAND_ULL 0x0004
+#define D11AC_PHY_TXC_PRIM_SUBBAND_ULU 0x0005
+#define D11AC_PHY_TXC_PRIM_SUBBAND_UUL 0x0006
+#define D11AC_PHY_TXC_PRIM_SUBBAND_UUU 0x0007
+#define D11AC_PHY_TXC_TXPWR_OFFSET_MASK 0x01F8
+#define D11AC_PHY_TXC_TXPWR_OFFSET_SHIFT 3
+#define D11AC2_PHY_TXC_TXPWR_OFFSET_MASK 0x03F8
+#define D11AC2_PHY_TXC_TXPWR_OFFSET_SHIFT 3
+#define D11AC_PHY_TXC_TXBF_USER_IDX_MASK 0x7C00
+#define D11AC_PHY_TXC_TXBF_USER_IDX_SHIFT 10
+#define D11AC2_PHY_TXC_DELTA_TXPWR_OFFSET_MASK 0x7C00
+#define D11AC2_PHY_TXC_DELTA_TXPWR_OFFSET_SHIFT 10
+/* Rather awkward bit mapping to keep pctl1 word same as legacy, for proprietary 11n rate support */
+#define D11AC_PHY_TXC_11N_PROP_MCS 0x8000 /* this represents bit mcs[6] */
+#define D11AC2_PHY_TXC_MU 0x8000
+
+/* PhyTxControlWord_2 phy rate */
+#define D11AC_PHY_TXC_PHY_RATE_MASK 0x003F
+#define D11AC2_PHY_TXC_PHY_RATE_MASK 0x007F
+
+/* 11b phy rate */
+#define D11AC_PHY_TXC_11B_PHY_RATE_MASK 0x0003
+#define D11AC_PHY_TXC_11B_PHY_RATE_1 0x0000
+#define D11AC_PHY_TXC_11B_PHY_RATE_2 0x0001
+#define D11AC_PHY_TXC_11B_PHY_RATE_5_5 0x0002
+#define D11AC_PHY_TXC_11B_PHY_RATE_11 0x0003
+
+/* 11a/g phy rate */
+#define D11AC_PHY_TXC_11AG_PHY_RATE_MASK 0x0007
+#define D11AC_PHY_TXC_11AG_PHY_RATE_6 0x0000
+#define D11AC_PHY_TXC_11AG_PHY_RATE_9 0x0001
+#define D11AC_PHY_TXC_11AG_PHY_RATE_12 0x0002
+#define D11AC_PHY_TXC_11AG_PHY_RATE_18 0x0003
+#define D11AC_PHY_TXC_11AG_PHY_RATE_24 0x0004
+#define D11AC_PHY_TXC_11AG_PHY_RATE_36 0x0005
+#define D11AC_PHY_TXC_11AG_PHY_RATE_48 0x0006
+#define D11AC_PHY_TXC_11AG_PHY_RATE_54 0x0007
+
+/* 11ac phy rate */
+#define D11AC_PHY_TXC_11AC_MCS_MASK 0x000F
+#define D11AC_PHY_TXC_11AC_NSS_MASK 0x0030
+#define D11AC_PHY_TXC_11AC_NSS_SHIFT 4
+
+/* 11n phy rate */
+#define D11AC_PHY_TXC_11N_MCS_MASK 0x003F
+#define D11AC2_PHY_TXC_11N_MCS_MASK 0x007F
+#define D11AC2_PHY_TXC_11N_PROP_MCS 0x0040 /* this represents bit mcs[6] */
+
+/* PhyTxControlWord_2 rest */
+#define D11AC_PHY_TXC_STBC 0x0040
+#define D11AC_PHY_TXC_DYN_BW_IN_NON_HT_PRESENT 0x0080
+#define D11AC_PHY_TXC_DYN_BW_IN_NON_HT_DYNAMIC 0x0100
+#define D11AC2_PHY_TXC_TXBF_USER_IDX_MASK 0xFE00
+#define D11AC2_PHY_TXC_TXBF_USER_IDX_SHIFT 9
+
+/* RtsCtsControl */
+#define D11AC_RTSCTS_FRM_TYPE_MASK 0x0001 /**< frame type */
+#define D11AC_RTSCTS_FRM_TYPE_11B 0x0000 /**< 11b */
+#define D11AC_RTSCTS_FRM_TYPE_11AG 0x0001 /**< 11a/g */
+#define D11AC_RTSCTS_USE_RTS 0x0004 /**< Use RTS */
+#define D11AC_RTSCTS_USE_CTS 0x0008 /**< Use CTS */
+#define D11AC_RTSCTS_SHORT_PREAMBLE 0x0010 /**< Long/short preamble: 0 - long, 1 - short? */
+#define D11AC_RTSCTS_LAST_RATE 0x0020 /**< this is last rate */
+#define D11AC_RTSCTS_IMBF 0x0040 /**< Implicit TxBF */
+#define D11AC_RTSCTS_MIMOPS_RTS 0x8000 /**< Use RTS for mimops */
+#define D11AC_RTSCTS_DPCU_VALID 0x0080 /**< DPCU Valid : Same bitfield as above */
+#define D11AC_RTSCTS_BF_IDX_MASK 0xF000 /**< 4-bit index to the beamforming block */
+#define D11AC_RTSCTS_BF_IDX_SHIFT 12
+#define D11AC_RTSCTS_RATE_MASK 0x0F00 /**< Rate table offset: bit 3-0 of PLCP byte 0 */
+#define D11AC_RTSCTS_USE_RATE_SHIFT 8
+
+/* BssIdEncAlg */
+#define D11AC_BSSID_MASK 0x000F /**< BSS index */
+#define D11AC_BSSID_SHIFT 0
+#define D11AC_ENCRYPT_ALG_MASK 0x00F0 /**< Encryption algoritm */
+#define D11AC_ENCRYPT_ALG_SHIFT 4
+#define D11AC_ENCRYPT_ALG_NOSEC 0x0000 /**< No security */
+#define D11AC_ENCRYPT_ALG_WEP 0x0010 /**< WEP */
+#define D11AC_ENCRYPT_ALG_TKIP 0x0020 /**< TKIP */
+#define D11AC_ENCRYPT_ALG_AES 0x0030 /**< AES */
+#define D11AC_ENCRYPT_ALG_WEP128 0x0040 /**< WEP128 */
+#define D11AC_ENCRYPT_ALG_NA 0x0050 /**< N/A */
+#define D11AC_ENCRYPT_ALG_WAPI 0x0060 /**< WAPI */
+
+/* AmpduDur */
+#define D11AC_AMPDU_MIN_DUR_IDX_MASK 0x000F /**< AMPDU minimum duration index */
+#define D11AC_AMPDU_MIN_DUR_IDX_SHIFT 0
+#define D11AC_AMPDU_MAX_DUR_MASK 0xFFF0 /**< AMPDU maximum duration in unit 16 usec */
+#define D11AC_AMPDU_MAX_DUR_SHIFT 4
+
+/**
+ * TX Descriptor definitions for supporting rev80 (HE)
+ */
+/* Maximum number of TX fallback rates per packet */
+#define D11_REV80_TXH_NUM_RATES 4
+#define D11_REV80_TXH_PHYTXCTL_MIN_LENGTH 1
+
+/** per rate info - fixed portion - rev80 */
+typedef struct d11txh_rev80_rate_fixed d11txh_rev80_rate_fixed_t;
+BWL_PRE_PACKED_STRUCT struct d11txh_rev80_rate_fixed {
+ uint16 TxRate; /* rate in 500Kbps */
+ uint16 RtsCtsControl; /* RTS - CTS control */
+ uint8 plcp[D11_PHY_HDR_LEN]; /* 6 bytes */
+} BWL_POST_PACKED_STRUCT;
+
+/* rev80 specific per packet info fields */
+typedef struct d11pktinfo_rev80 d11pktinfo_rev80_t;
+BWL_PRE_PACKED_STRUCT struct d11pktinfo_rev80 {
+ uint16 HEModeControl; /* 20 */
+ uint16 length; /* 22 - length of txd in bytes */
+} BWL_POST_PACKED_STRUCT;
+
+#define D11_REV80_TXH_TX_MODE_SHIFT 0 /* Bits 2:0 of HeModeControl */
+#define D11_REV80_TXH_TX_MODE_MASK 0x3
+#define D11_REV80_TXH_HTC_OFFSET_SHIFT 4 /* Bits 8:4 of HeModeControl */
+#define D11_REV80_TXH_HTC_OFFSET_MASK 0x01F0u
+#define D11_REV80_TXH_TWT_EOSP 0x0200u /* bit 9 indicate TWT EOSP */
+#define D11_REV80_TXH_QSZ_QOS_CTL_IND_SHIFT 10 /* Bit 10 of HeModeControl */
+#define D11_REV80_TXH_QSZ_QOS_CTL_IND_MASK (1 << D11_REV80_TXH_QSZ_QOS_CTL_IND_SHIFT)
+#define D11_REV80_TXH_USE_BSSCOLOR_SHM_SHIFT 15 /* Bit 15 of HEModeControl */
+#define D11_REV80_TXH_USE_BSSCOLOR_SHM_MASK (1 << D11_REV80_TXH_USE_BSSCOLOR_SHM_SHIFT)
+
+/* Calculate Length for short format TXD */
+#define D11_TXH_SHORT_LEN(__corerev__) (D11REV_GE(__corerev__, 80) ? \
+ D11_REV80_TXH_SHORT_LEN : \
+ D11AC_TXH_SHORT_LEN)
+
+/* Calculate Length for short format TXD (TXDC and/or FMF) */
+#define D11_TXH_SHORT_EX_LEN(__corerev__) (D11REV_GE(__corerev__, 80) ? \
+ D11_REV80_TXH_SHORT_EX_LEN : \
+ D11AC_TXH_SHORT_LEN)
+
+#define D11_REV80_TXH_IS_HE_AMPDU_SHIFT 11 /* Bit 11 of HeModeControl */
+#define D11_REV80_TXH_IS_HE_AMPDU_MASK (1 << D11_REV80_TXH_IS_HE_AMPDU_SHIFT)
+
+#define D11_REV80_PHY_TXC_EDCA 0x00
+#define D11_REV80_PHY_TXC_OFDMA_RA 0x01 /* Use Random Access Trigger for Tx */
+#define D11_REV80_PHY_TXC_OFDMA_DT 0x02 /* Use Directed Trigger for Tx */
+#define D11_REV80_PHY_TXC_OFDMA_ET 0x03 /* Use earliest Trigger Opportunity */
+
+/** Per cache info - rev80 */
+typedef struct d11txh_rev80_cache d11txh_rev80_cache_t;
+BWL_PRE_PACKED_STRUCT struct d11txh_rev80_cache {
+ d11txh_cache_common_t common; /* 0 - 7 */
+ uint16 ampdu_mpdu_all; /* 8 - 9 */
+ uint16 aggid; /* 10 - 11 */
+ uint8 tkipph1_index; /* 12 */
+ uint8 pktext; /* 13 */
+ uint16 hebid_map; /* 14 -15: HEB ID bitmap */
+} BWL_POST_PACKED_STRUCT;
+
+/** Fixed size portion of TX descriptor - rev80 */
+typedef struct d11txh_rev80 d11txh_rev80_t;
+BWL_PRE_PACKED_STRUCT struct d11txh_rev80 {
+ /**
+ * Per pkt info fields (common + rev80 specific)
+ *
+ * Note : Ensure that PktInfo field is always the first member
+ * of the d11txh_rev80 struct (that is at OFFSET - 0)
+ */
+ d11pktinfo_common_t PktInfo; /* 0 - 19 */
+ d11pktinfo_rev80_t PktInfoExt; /* 20 - 23 */
+
+ /** Per cache info */
+ d11txh_rev80_cache_t CacheInfo; /* 24 - 39 */
+
+ /**
+ * D11_REV80_TXH_NUM_RATES number of Rate Info blocks
+ * contribute to the variable size portion of the TXD.
+ * Each Rate Info element (block) is a funtion of
+ * (N_PwrOffset, N_RU, N_User).
+ */
+ uint8 RateInfoBlock[1];
+} BWL_POST_PACKED_STRUCT;
+
+/* Size of fixed portion in TX descriptor (without CacheInfo(Link info) and RateInfoBlock)
+ * this portion never change regardless of TXDC/FMF support.
+ */
+/* OFFSETOF() is available in bcmutils.h but including it will cause
+ * recursive inclusion of d11.h specifically on NDIS platforms.
+ */
+#ifdef BCMFUZZ
+ /* use 0x10 offset to avoid undefined behavior error due to NULL access */
+#define D11_REV80_TXH_FIXED_LEN (((uint)(uintptr)&((d11txh_rev80_t *)0x10)->CacheInfo) - 0x10)
+#else
+#define D11_REV80_TXH_FIXED_LEN ((uint)(uintptr)&((d11txh_rev80_t *)0)->CacheInfo)
+#endif /* BCMFUZZ */
+
+/* Short format tx descriptor only has per packet info (24 bytes) */
+#define D11_REV80_TXH_SHORT_LEN (sizeof(d11pktinfo_common_t) + sizeof(d11pktinfo_rev80_t))
+
+/* Size of CacheInfo(Link info) in TX descriptor */
+#define D11_REV80_TXH_LINK_INFO_LEN (sizeof(d11txh_rev80_cache_t))
+
+/* Size of Short format TX descriptor
+ * with TXDC - Short TXD(40 bytes) shall include PktInfo and Cache info without Rate info
+ * with TXDC+FMF - Short TXD(24 bytes) shall include PktInfo only without Link info and Rate info
+ * do NOT use D11_REV80_TXH_SHORT_EX_LEN to calculate long TXD length, value depends on FMF feature
+ */
+#if defined(FMF_LIT) && !defined(FMF_LIT_DISABLED)
+#define D11_REV80_TXH_SHORT_EX_LEN D11_REV80_TXH_FIXED_LEN
+#else
+#define D11_REV80_TXH_SHORT_EX_LEN (D11_REV80_TXH_FIXED_LEN + D11_REV80_TXH_LINK_INFO_LEN)
+#endif /* FMF_LIT && !FMF_LIT_DISABLED */
+
+/* Length of BFM0 field in RateInfo Blk */
+#define D11_REV80_TXH_BFM0_FIXED_LEN(pwr_offs) 2u
+
+/**
+ * Length of FBWInfo field in RateInfo Blk
+ *
+ * Note : for now return fixed length of 1 word
+ */
+#define D11_REV80_TXH_FBWINFO_FIXED_LEN(pwr_offs) 2
+
+#define D11_REV80_TXH_FIXED_RATEINFO_LEN sizeof(d11txh_rev80_rate_fixed_t)
+
+/**
+ * Macros to find size of N-RUs field in the PhyTxCtlWord.
+ */
+#define D11_REV80_TXH_TXC_N_RUs_FIELD_SIZE 1
+#define D11_REV80_TXH_TXC_PER_RU_INFO_SIZE 4
+#define D11_REV80_TXH_TXC_PER_RU_MIN_SIZE 2
+
+#define D11_REV80_TXH_TXC_RU_FIELD_SIZE(n_rus) ((n_rus == 1) ? \
+ (D11_REV80_TXH_TXC_PER_RU_MIN_SIZE) : \
+ ((D11_REV80_TXH_TXC_N_RUs_FIELD_SIZE) + \
+ ((n_rus) * D11_REV80_TXH_TXC_PER_RU_INFO_SIZE)))
+
+/**
+ * Macros to find size of N-Users field in the TXCTL_EXT
+ */
+#define D11_REV80_TXH_TXC_EXT_N_USERs_FIELD_SIZE 1
+#define D11_REV80_TXH_TXC_EXT_PER_USER_INFO_SIZE 4
+
+#define D11_REV80_TXH_TXC_N_USERs_FIELD_SIZE(n_users) \
+ ((n_users) ? \
+ (((n_users) * \
+ (D11_REV80_TXH_TXC_EXT_PER_USER_INFO_SIZE)) + \
+ (D11_REV80_TXH_TXC_EXT_N_USERs_FIELD_SIZE)) : \
+ (n_users))
+
+/**
+ * Size of each Tx Power Offset field in PhyTxCtlWord.
+ */
+#define D11_REV80_TXH_TXC_PWR_OFFSET_SIZE 1u
+
+/**
+ * Size of fixed / static fields in PhyTxCtlWord (all fields except N-RUs, N-Users and Pwr offsets)
+ */
+#define D11_REV80_TXH_TXC_CONST_FIELDS_SIZE 6u
+
+/**
+ * Macros used for filling PhyTxCtlWord
+ */
+
+/* PhyTxCtl Byte 0 */
+#define D11_REV80_PHY_TXC_FT_MASK 0x0007u
+#define D11_REV80_PHY_TXC_HE_FMT_MASK 0x0018u
+#define D11_REV80_PHY_TXC_SOFT_AP_MODE 0x0020u
+#define D11_REV80_PHY_TXC_NON_SOUNDING 0x0040u
+#define D11_REV80_PHY_TXC_SHORT_PREAMBLE 0x0080u
+#define D11_REV80_PHY_TXC_FRAME_TYPE_VHT 0X0003u
+#define D11_REV80_PHY_TXC_FRAME_TYPE_HT 0X0002u
+#define D11_REV80_PHY_TXC_FRAME_TYPE_LEG 0X0001u
+
+#define D11_REV80_PHY_TXC_HE_FMT_SHIFT 3u
+
+/* PhyTxCtl Byte 1 */
+#define D11_REV80_PHY_TXC_STBC 0x0080u
+
+/* PhyTxCtl Word 1 (Bytes 2 - 3) */
+#define D11_REV80_PHY_TXC_DPCU_SUBBAND_SHIFT 5u
+#define D11_REV80_PHY_TXC_DYNBW_PRESENT 0x2000u
+#define D11_REV80_PHY_TXC_DYNBW_MODE 0x4000u
+#define D11_REV80_PHY_TXC_MU 0x8000u
+#define D11_REV80_PHY_TXC_BW_MASK 0x0003u
+#define D11_REV80_PHY_TXC_BW_20MHZ 0x0000u
+#define D11_REV80_PHY_TXC_BW_40MHZ 0x0001u
+#define D11_REV80_PHY_TXC_BW_80MHZ 0x0002u
+#define D11_REV80_PHY_TXC_BW_160MHZ 0x0003u
+/* PhyTxCtl Word 2 (Bytes 4 -5) */
+/* Though the width antennacfg, coremask fields are 8-bits,
+ * only 4 bits is valid for 4369a0, hence masking only 4 bits
+ */
+#define D11_REV80_PHY_TXC_ANT_CONFIG_MASK 0x00F0u
+#define D11_REV80_PHY_TXC_CORE_MASK 0x000Fu
+#define D11_REV80_PHY_TXC_ANT_CONFIG_SHIFT 4u
+/* upper byte- Ant. cfg, lower byte - Core */
+#define D11_REV80_PHY_TXC_ANT_CORE_MASK 0x0F0Fu
+
+/* PhyTxCtl BFM field */
+#define D11_REV80_PHY_TXC_BFM 0x80u
+
+/* PhyTxCtl power offsets */
+#define D11_REV80_PHY_TXC_PWROFS0_BYTE_POS 6u
+
+/* Phytx Ctl Sub band location */
+#define D11_REV80_PHY_TXC_SB_SHIFT 2u
+#define D11_REV80_PHY_TXC_SB_MASK 0x001Cu
+
+/* 11n phy rate */
+#define D11_REV80_PHY_TXC_11N_MCS_MASK 0x003Fu
+#define D11_REV80_PHY_TXC_11N_PROP_MCS 0x0040u /* this represents bit mcs[6] */
+
+/* 11ac phy rate */
+#define D11_REV80_PHY_TXC_11AC_NSS_SHIFT 4u
+
+/* PhyTxCtl Word0 */
+#define D11_REV80_PHY_TXC_MCS_NSS_MASK 0x7F00u
+#define D11_REV80_PHY_TXC_MCS_MASK 0xF00u
+#define D11_REV80_PHY_TXC_MCS_NSS_SHIFT 8u
+
+/* 11ax phy rate */
+#define D11_REV80_PHY_TXC_11AX_NSS_SHIFT 4u
+
+#define D11_PHY_TXC_FT_MASK(corerev) ((D11REV_GE(corerev, 80)) ? D11_REV80_PHY_TXC_FT_MASK : \
+ D11AC_PHY_TXC_FT_MASK)
+
+/* PhyTxCtl Word 4 */
+#define D11_REV80_PHY_TXC_HEHL_ENABLE 0x2000u
+
+/* PhyTxCtl Word 5 */
+#define D11_REV80_PHY_TXC_CORE0_PWR_OFFSET_SHIFT 8u
+#define D11_REV80_PHY_TXC_CORE0_PWR_OFFSET_MASK 0xFF00u
+/* PhyTxCtl Word 6 */
+#define D11_REV80_PHY_TXC_CORE1_PWR_OFFSET_MASK 0x00FFu
+/* Number of RU assigned */
+#define D11_REV80_PHY_TXC_NRU 0x0100u
+
+/* A wrapper structure for all versions of TxD/d11txh structures */
+typedef union d11txhdr {
+ d11txh_pre40_t pre40;
+ d11actxh_t rev40;
+ d11txh_rev80_t rev80;
+} d11txhdr_t;
+
+/**
+ * Generic tx status packet for software use. This is independent of hardware
+ * structure for a particular core. Hardware structure should be read and converted
+ * to this structure before being sent for the sofware consumption.
+ */
+typedef struct tx_status tx_status_t;
+typedef struct tx_status_macinfo tx_status_macinfo_t;
+
+BWL_PRE_PACKED_STRUCT struct tx_status_macinfo {
+ int8 pad0;
+ int8 is_intermediate;
+ int8 pm_indicated;
+ int8 pad1;
+ uint8 suppr_ind;
+ int8 was_acked;
+ uint16 rts_tx_cnt;
+ uint16 frag_tx_cnt;
+ uint16 cts_rx_cnt;
+ uint16 raw_bits;
+ uint32 s3;
+ uint32 s4;
+ uint32 s5;
+ uint32 s8;
+ uint32 s9;
+ uint32 s10;
+ uint32 s11;
+ uint32 s12;
+ uint32 s13;
+ uint32 s14;
+ /* 128BA support */
+ uint16 ncons_ext;
+ uint16 s15;
+ uint32 ack_map[8];
+ /* pktlat */
+ uint16 pkt_fetch_ts; /* PSM Packet Fetch Time */
+ uint16 med_acc_dly; /* Medium Access Delay */
+ uint16 rx_dur; /* Rx duration */
+ uint16 mac_susp_dur; /* Mac Suspend Duration */
+ uint16 txstatus_ts; /* TxStatus Time */
+ uint16 tx_en_cnt; /* Number of times Tx was enabled */
+ uint16 oac_txs_cnt; /* Other AC TxStatus count */
+ uint16 data_retx_cnt; /* DataRetry count */
+ uint16 pktlat_rsvd; /* reserved */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct tx_status {
+ uint16 framelen;
+ uint16 frameid;
+ uint16 sequence;
+ uint16 phyerr;
+ uint32 lasttxtime;
+ uint16 ackphyrxsh;
+ uint16 procflags; /* tx status processing flags */
+ uint32 dequeuetime;
+ tx_status_macinfo_t status;
+} BWL_POST_PACKED_STRUCT;
+
+/* Bits in struct tx_status procflags */
+#define TXS_PROCFLAG_AMPDU_BA_PKG2_READ_REQD 0x1 /* AMPDU BA txs pkg2 read required */
+
+/* status field bit definitions */
+#define TX_STATUS_FRM_RTX_MASK 0xF000
+#define TX_STATUS_FRM_RTX_SHIFT 12
+#define TX_STATUS_RTS_RTX_MASK 0x0F00
+#define TX_STATUS_RTS_RTX_SHIFT 8
+#define TX_STATUS_MASK 0x00FE
+#define TX_STATUS_PMINDCTD (1 << 7) /**< PM mode indicated to AP */
+#define TX_STATUS_INTERMEDIATE (1 << 6) /**< intermediate or 1st ampdu pkg */
+#define TX_STATUS_AMPDU (1 << 5) /**< AMPDU status */
+#define TX_STATUS_SUPR_MASK 0x1C /**< suppress status bits (4:2) */
+#define TX_STATUS_SUPR_SHIFT 2
+#define TX_STATUS_ACK_RCV (1 << 1) /**< ACK received */
+#define TX_STATUS_VALID (1 << 0) /**< Tx status valid (corerev >= 5) */
+#define TX_STATUS_NO_ACK 0
+#define TX_STATUS_BE (TX_STATUS_ACK_RCV | TX_STATUS_PMINDCTD)
+
+/* TX_STATUS for fw initiated pktfree event */
+#define TX_STATUS_SW_Q_FLUSH 0x10000
+
+/* status field bit definitions phy rev > 40 */
+#define TX_STATUS40_FIRST 0x0002
+#define TX_STATUS40_INTERMEDIATE 0x0004
+#define TX_STATUS40_PMINDCTD 0x0008
+
+#define TX_STATUS40_SUPR 0x00f0
+#define TX_STATUS40_SUPR_SHIFT 4
+
+#define TX_STATUS40_NCONS 0x7f00
+
+#define TX_STATUS40_NCONS_SHIFT 8
+
+#define TX_STATUS40_ACK_RCV 0x8000
+
+/* tx status bytes 8-16 */
+#define TX_STATUS40_TXCNT_RATE0_MASK 0x000000ff
+#define TX_STATUS40_TXCNT_RATE0_SHIFT 0
+
+#define TX_STATUS40_TXCNT_RATE1_MASK 0x00ff0000
+#define TX_STATUS40_TXCNT_RATE1_SHIFT 16
+
+#define TX_STATUS40_MEDIUM_DELAY_MASK 0xFFFF
+
+#define TX_STATUS40_TXCNT(s3, s4) \
+ (((s3 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT) + \
+ ((s3 & TX_STATUS40_TXCNT_RATE1_MASK) >> TX_STATUS40_TXCNT_RATE1_SHIFT) + \
+ ((s4 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT) + \
+ ((s4 & TX_STATUS40_TXCNT_RATE1_MASK) >> TX_STATUS40_TXCNT_RATE1_SHIFT))
+
+#define TX_STATUS40_TXCNT_RT0(s3) \
+ ((s3 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT)
+
+#define TX_STATUS_EXTBA_TXCNT_BITS 0x3u
+#define TX_STATUS_EXTBA_TXSUCCNT_BITS 0x1u
+#define TX_STATUS_EXTBA_TXSIZE_RT 0x4u
+
+#define TX_STATUS_EXTBA_TXCNT_RATE_MASK 0x7u
+#define TX_STATUS_EXTBA_TXSUCCNT_RATE_MASK 0x8u
+
+#define TX_STATUS_EXTBA_TXCNT_RATE_SHIFT 0x8u
+#define TX_STATUS_EXTBA_TXSUCCNT_RATE_SHIFT 0x8u
+
+#define TX_STATUS_EXTBA_TXCNT_RT(s15, rt) \
+ ((((s15) & (TX_STATUS_EXTBA_TXCNT_RATE_MASK << ((rt) * TX_STATUS_EXTBA_TXSIZE_RT))) >> \
+ ((rt) * TX_STATUS_EXTBA_TXSIZE_RT)) << TX_STATUS_EXTBA_TXCNT_RATE_SHIFT)
+
+#define TX_STATUS_EXTBA_TXSUCCNT_RT(s15, rt) \
+ ((((s15) & (TX_STATUS_EXTBA_TXSUCCNT_RATE_MASK << ((rt) * TX_STATUS_EXTBA_TXSIZE_RT))) >> \
+ (((rt) * TX_STATUS_EXTBA_TXSIZE_RT))) << TX_STATUS_EXTBA_TXSUCCNT_RATE_SHIFT)
+
+#define TX_STATUS40_TX_MEDIUM_DELAY(txs) ((txs)->status.s8 & TX_STATUS40_MEDIUM_DELAY_MASK)
+
+/* chip rev 40 pkg 2 fields */
+#define TX_STATUS40_IMPBF_MASK 0x0000000Cu /* implicit bf applied */
+#define TX_STATUS40_IMPBF_BAD_MASK 0x00000010u /* impl bf applied but ack frm has no bfm */
+#define TX_STATUS40_IMPBF_LOW_MASK 0x00000020u /* ack received with low rssi */
+#define TX_STATUS40_BFTX 0x00000040u /* Beamformed pkt TXed */
+/* pkt two status field bit definitions mac rev > 64 */
+#define TX_STATUS64_MUTX 0x00000080u /* Not used in STA-dongle chips */
+
+/* pkt two status field bit definitions mac rev > 80 */
+
+/* TXS rate cookie contains
+ * mac rev 81/82 : RIT idx in bit[4:0] of RIB CtrlStat[0]
+ * mac rev >= 83 : RIB version in bit[4:0] of RIB CtrlStat[1]
+ */
+#define TX_STATUS80_RATE_COOKIE_MASK 0x00003E00u
+#define TX_STATUS80_RATE_COOKIE_SHIFT 9u
+#define TX_STATUS80_NAV_HDR 0x00004000u /* NAV Overriden */
+
+#define TX_STATUS80_TBPPDU_MASK 0x00000040u /* Indicates TBPPDU TX */
+#define TX_STATUS80_TBPPDU_SHIFT 6u
+#define TX_STATUS40_RTS_RTX_MASK 0x00ff0000u
+#define TX_STATUS40_RTS_RTX_SHIFT 16u
+#define TX_STATUS40_CTS_RRX_MASK 0xff000000u
+#define TX_STATUS40_CTS_RRX_SHIFT 24u
+
+/*
+ * Intermediate status for TBPPDU (for stats purposes)
+ * First uint16 word (word0 - status): VALID, !FIRST, INTERMEDIATE
+ * Remaining word0 bits (3 - 15) are unasisgned
+ */
+#define TX_ITBSTATUS(status) \
+ (((status) & (TX_STATUS40_FIRST | TX_STATUS40_INTERMEDIATE)) == TX_STATUS40_INTERMEDIATE)
+/* Remainder of first uint32 (words 0 and 1) */
+#define TX_ITBSTATUS_LSIG_MASK 0x0000fff0u
+#define TX_ITBSTATUS_LSIG_SHIFT 4u
+#define TX_ITBSTATUS_TXPOWER_MASK 0xffff0000u
+#define TX_ITBSTATUS_TXPOWER_SHIFT 16u
+/* Second uint32 (words 2 and 3) */
+#define TX_ITBSTATUS_NULL_DELIMS_MASK 0x0007ffffu /* 19 bits * 4B => ~2M bytes */
+#define TX_ITBSTATUS_NULL_DELIMS_SHIFT 0u
+#define TX_ITBSTATUS_ACKED_MPDUS_MASK 0x3ff80000u /* 11 bits: 0-2047 */
+#define TX_ITBSTATUS_ACKED_MPDUS_SHIFT 19u
+/* Third uint32 (words 4 and 5) */
+#define TX_ITBSTATUS_SENT_MPDUS_MASK 0x0000ffe0u /* 11 bits: 0-2047 */
+#define TX_ITBSTATUS_SENT_MPDUS_SHIFT 5u
+#define TX_ITBSTATUS_APTXPWR_MASK 0x003f0000u /* 0-60 => -20 - 40 */
+#define TX_ITBSTATUS_APTXPWR_SHIFT 16u
+#define TX_ITBSTATUS_ULPKTEXT_MASK 0x01c00000u
+#define TX_ITBSTATUS_ULPKTEXT_SHIFT 22u
+#define TX_ITBSTATUS_MORETF_MASK 0x02000000u
+#define TX_ITBSTATUS_MORETF_SHIFT 25u
+#define TX_ITBSTATUS_CSREQ_MASK 0x04000000u
+#define TX_ITBSTATUS_CSREQ_SHIFT 26u
+#define TX_ITBSTATUS_ULBW_MASK 0x18000000u
+#define TX_ITBSTATUS_ULBW_SHIFT 27u
+#define TX_ITBSTATUS_GI_LTF_MASK 0x60000000u
+#define TX_ITBSTATUS_GI_LTF_SHIFT 29u
+#define TX_ITBSTATUS_MUMIMO_LTF_MASK 0x80000000u
+#define TX_ITBSTATUS_MUMIMO_LTF_SHIFT 30u
+/* Fourth uint32 (words 6 and 7) */
+#define TX_ITBSTATUS_CODING_TYPE_MASK 0x00000001u
+#define TX_ITBSTATUS_CODING_TYPE_SHIFT 0u
+#define TX_ITBSTATUS_MCS_MASK 0x0000001eu
+#define TX_ITBSTATUS_MCS_SHIFT 1u
+#define TX_ITBSTATUS_DCM_MASK 0x00000020u
+#define TX_ITBSTATUS_DCM_SHIFT 5u
+#define TX_ITBSTATUS_RU_ALLOC_MASK 0x00003fc0u
+#define TX_ITBSTATUS_RU_ALLOC_SHIFT 6u
+/* Bits 14 and 15 unassigned */
+#define TX_ITBSTATUS_NSS_MASK 0x00030000u
+#define TX_ITBSTATUS_NSS_SHIFT 16u
+#define TX_ITBSTATUS_TARGET_RSSI_MASK 0x03fc0000u
+#define TX_ITBSTATUS_TARGET_RSSI_SHIFT 18u
+#define TX_ITBSTATUS_RA_RU_MASK 0x04000000u
+#define TX_ITBSTATUS_RA_RU_SHIFT 26u
+/* Bits 27 through 31 unassigned */
+/* End of intermediate TBPPDU txstatus definitions */
+
+/* MU group info txstatus field (s3 b[31:16]) */
+#define TX_STATUS64_MU_GID_MASK 0x003f0000u
+#define TX_STATUS64_MU_GID_SHIFT 16u
+#define TX_STATUS64_MU_BW_MASK 0x00c00000u
+#define TX_STATUS64_MU_BW_SHIFT 22u
+#define TX_STATUS64_MU_TXPWR_MASK 0x7f000000u
+#define TX_STATUS64_MU_TXPWR_SHIFT 24u
+#define TX_STATUS64_MU_SGI_MASK 0x80000080u
+#define TX_STATUS64_MU_SGI_SHIFT 31u
+#define TX_STATUS64_INTERM_MUTXCNT(s3) \
+ ((s3 & TX_STATUS40_TXCNT_RATE0_MASK) >> TX_STATUS40_TXCNT_RATE0_SHIFT)
+
+#define TX_STATUS64_MU_GID(s3) ((s3 & TX_STATUS64_MU_GID_MASK) >> TX_STATUS64_MU_GID_SHIFT)
+#define TX_STATUS64_MU_BW(s3) ((s3 & TX_STATUS64_MU_BW_MASK) >> TX_STATUS64_MU_BW_SHIFT)
+#define TX_STATUS64_MU_TXPWR(s3) ((s3 & TX_STATUS64_MU_TXPWR_MASK) >> TX_STATUS64_MU_TXPWR_SHIFT)
+#define TX_STATUS64_MU_SGI(s3) ((s3 & TX_STATUS64_MU_SGI_MASK) >> TX_STATUS64_MU_SGI_SHIFT)
+
+/* MU user info0 txstatus field (s4 b[15:0]) */
+#define TX_STATUS64_MU_MCS_MASK 0x0000000f
+#define TX_STATUS64_MU_MCS_SHIFT 0
+#define TX_STATUS64_MU_NSS_MASK 0x00000070
+#define TX_STATUS64_MU_NSS_SHIFT 4
+#define TX_STATUS64_MU_SNR_MASK 0x0000ff00
+#define TX_STATUS64_MU_SNR_SHIFT 8
+
+#define TX_STATUS64_MU_MCS(s4) ((s4 & TX_STATUS64_MU_MCS_MASK) >> TX_STATUS64_MU_MCS_SHIFT)
+#define TX_STATUS64_MU_NSS(s4) ((s4 & TX_STATUS64_MU_NSS_MASK) >> TX_STATUS64_MU_NSS_SHIFT)
+#define TX_STATUS64_MU_SNR(s4) ((s4 & TX_STATUS64_MU_SNR_MASK) >> TX_STATUS64_MU_SNR_SHIFT)
+
+/* MU txstatus rspec field (NSS | MCS) */
+#define TX_STATUS64_MU_RSPEC_MASK (TX_STATUS64_MU_NSS_MASK | TX_STATUS64_MU_MCS_MASK)
+#define TX_STATUS64_MU_RSPEC_SHIFT 0
+
+#define TX_STATUS64_MU_RSPEC(s4) ((s4 & TX_STATUS64_MU_RSPEC_MASK) >> TX_STATUS64_MU_RSPEC_SHIFT)
+
+/* MU user info0 txstatus field (s4 b[31:16]) */
+#define TX_STATUS64_MU_GBMP_MASK 0x000f0000
+#define TX_STATUS64_MU_GBMP_SHIFT 16
+#define TX_STATUS64_MU_GPOS_MASK 0x00300000
+#define TX_STATUS64_MU_GPOS_SHIFT 20
+#define TX_STATUS64_MU_TXCNT_MASK 0x0fc00000
+#define TX_STATUS64_MU_TXCNT_SHIFT 22
+
+#define TX_STATUS64_MU_GBMP(s4) ((s4 & TX_STATUS64_MU_GBMP_MASK) >> TX_STATUS64_MU_GBMP_SHIFT)
+#define TX_STATUS64_MU_GPOS(s4) ((s4 & TX_STATUS64_MU_GPOS_MASK) >> TX_STATUS64_MU_GPOS_SHIFT)
+#define TX_STATUS64_MU_TXCNT(s4) ((s4 & TX_STATUS64_MU_TXCNT_MASK) >> TX_STATUS64_MU_TXCNT_SHIFT)
+
+#define HE_MU_APTX_PWR_MAX 60u
+#define HE_TXS_MU_APTX_PWR_DBM(aptx_pwr) ((aptx_pwr) - 20u)
+
+#define HE_TXS_MU_TARGET_RSSI_RANG 90
+#define HE_TXS_MU_TARGET_RSSI_MAX_PWR 127
+#define HE_TXS_MU_TARGET_RSSI_DBM(rssi) ((rssi) - 110)
+
+#define HE_TXS_W4_MU_GET_RU_INDEX(index) ((index <= HE_MAX_26_TONE_RU_INDX) ? 0u : \
+ ((index) <= HE_MAX_52_TONE_RU_INDX) ? 1u : \
+ ((index) <= HE_MAX_106_TONE_RU_INDX) ? 2u : \
+ ((index) <= HE_MAX_242_TONE_RU_INDX) ? 3u : \
+ ((index) <= HE_MAX_484_TONE_RU_INDX) ? 4u :\
+ ((index) <= HE_MAX_996_TONE_RU_INDX) ? 5u : 6u)
+
+/* Bit 8 indicates upper 80 MHz */
+#define HE_TXS_W4_MU_RU_INDEX_RU_INDEX_MASK 0x7Fu
+#define HE_TXS_W4_MU_RU_INDEX_TONE(index) HE_TXS_W4_MU_GET_RU_INDEX(((index) & \
+ HE_TXS_W4_MU_RU_INDEX_RU_INDEX_MASK))
+
+#define HE_TXS_W3_MU_APTX_PWR_MASK 0x003F0000u
+#define HE_TXS_W3_MU_APTX_PWR_SHIFT 16u
+#define HE_TXS_W3_MU_PKT_EXT_MASK 0x01C00000u
+#define HE_TXS_W3_MU_PKT_EXT_SHIFT 22u
+#define HE_TXS_W3_MU_MORE_TF_MASK 0x02000000u
+#define HE_TXS_W3_MU_MORE_TF_SHIFT 25u
+#define HE_TXS_W3_MU_CS_REQ_MASK 0x04000000u
+#define HE_TXS_W3_MU_CS_REQ_SHIFT 26u
+#define HE_TXS_W3_MU_UL_BW_MASK 0x18000000u
+#define HE_TXS_W3_MU_UL_BW_SHIFT 27u
+#define HE_TXS_W3_MU_GI_LTF_MASK 0x60000000u
+#define HE_TXS_W3_MU_GI_LTF_SHIFT 29u
+#define HE_TXS_W3_MU_MIMO_LTF_MASK 0x80000000u
+#define HE_TXS_W3_MU_MIMO_LTF_SHIFT 31u
+
+#define HE_TXS_W3_MU_APTX_PWR(s3) (((s3) & HE_TXS_W3_MU_APTX_PWR_MASK) >> \
+ HE_TXS_W3_MU_APTX_PWR_SHIFT)
+#define HE_TXS_W3_MU_PKT_EXT(s3) (((s3) & HE_TXS_W3_MU_PKT_EXT_MASK) >> \
+ HE_TXS_W3_MU_PKT_EXT_SHIFT)
+#define HE_TXS_W3_MU_MORE_TF(s3) (((s3) & HE_TXS_W3_MU_MORE_TF_MASK) >> \
+ HE_TXS_W3_MU_MORE_TF_SHIFT)
+#define HE_TXS_W3_MU_CS_REQ(s3) (((s3) & HE_TXS_W3_MU_CS_REQ_MASK) >> \
+ HE_TXS_W3_MU_CS_REQ_SHIFT)
+#define HE_TXS_W3_MU_UL_BW(s3) (((s3) & HE_TXS_W3_MU_UL_BW_MASK) >> \
+ HE_TXS_W3_MU_UL_BW_SHIFT)
+#define HE_TXS_W3_MU_GI_LTF(s3) (((s3) & HE_TXS_W3_MU_GI_LTF_MASK) >> \
+ HE_TXS_W3_MU_GI_LTF_SHIFT)
+#define HE_TXS_W3_MU_MIMO_LT(s3) (((s3) & HE_TXS_W3_MU_MIMO_LTF_MASK) >> \
+ HE_TXS_W3_MU_MIMO_LTF_SHIFT)
+
+#define HE_TXS_W4_MU_CODINF_TYPE_MASK 0x00000001u
+#define HE_TXS_W4_MU_CODINF_TYPE_SHIFT 0u
+#define HE_TXS_W4_MU_MCS_MASK 0x0000001Eu
+#define HE_TXS_W4_MU_MCS_SHIFT 1u
+#define HE_TXS_W4_MU_DCM_MASK 0x00000020u
+#define HE_TXS_W4_MU_DCM_SHIFT 5u
+#define HE_TXS_W4_RU_ALLOCATION_MASK 0x00003FC0u
+#define HE_TXS_W4_RU_ALLOCATION_SHIFT 6u
+
+#define HE_TXS_W4_MU_CODINF_TYPE(s4) (((s4) & HE_TXS_W4_MU_CODINF_TYPE_MASK) >> \
+ HE_TXS_W4_MU_CODINF_TYPE_SHIFT)
+#define HE_TXS_W4_MU_MCS(s4) (((s4) & HE_TXS_W4_MU_MCS_MASK) >> \
+ HE_TXS_W4_MU_MCS_SHIFT)
+#define HE_TXS_W4_MU_DCM(s4) (((s4) & HE_TXS_W4_MU_DCM_MASK) >> \
+ HE_TXS_W4_MU_DCM_SHIFT)
+#define HE_TXS_W4_RU_ALLOCATION(s4) (((s4) & HE_TXS_W4_RU_ALLOCATION_MASK) >> \
+ HE_TXS_W4_RU_ALLOCATION_SHIFT)
+
+#define HE_TXS_W4_MU_NSS_MASK 0x00030000u
+#define HE_TXS_W4_MU_NSS_SHIFT 16u
+#define HE_TXS_W4_MU_TARGET_RSSI_MASK 0x03FC0000u
+#define HE_TXS_W4_MU_TARGET_RSSI_SHIFT 18u
+
+#define HE_TXS_W4_MU_NSS(s4) (((s4) & HE_TXS_W4_MU_NSS_MASK) >> \
+ HE_TXS_W4_MU_NSS_SHIFT)
+#define HE_TXS_W4_MU_TARGET_RSSI(s4) (((s4) & HE_TXS_W4_MU_TARGET_RSSI_MASK) >> \
+ HE_TXS_W4_MU_TARGET_RSSI_SHIFT)
+
+/* WARNING: Modifying suppress reason codes?
+ * Update wlc_tx_status_t and TX_STS_REASON_STRINGS and
+ * wlc_tx_status_map_hw_to_sw_supr_code() also
+ */
+/* status field bit definitions */
+/** suppress status reason codes */
+enum {
+ TX_STATUS_SUPR_NONE = 0,
+ TX_STATUS_SUPR_PMQ = 1, /**< PMQ entry */
+ TX_STATUS_SUPR_FLUSH = 2, /**< flush request */
+ TX_STATUS_SUPR_FRAG = 3, /**< previous frag failure */
+ TX_STATUS_SUPR_TBTT = 3, /**< SHARED: Probe response supr for TBTT */
+ TX_STATUS_SUPR_BADCH = 4, /**< channel mismatch */
+ TX_STATUS_SUPR_EXPTIME = 5, /**< lifetime expiry */
+ TX_STATUS_SUPR_UF = 6, /**< underflow */
+#ifdef WLP2P_UCODE
+ TX_STATUS_SUPR_NACK_ABS = 7, /**< BSS entered ABSENCE period */
+#endif
+ TX_STATUS_SUPR_PPS = 8, /**< Pretend PS */
+ TX_STATUS_SUPR_PHASE1_KEY = 9, /**< Request new TKIP phase-1 key */
+ TX_STATUS_UNUSED = 10, /**< Unused in trunk */
+ TX_STATUS_INT_XFER_ERR = 11, /**< Internal DMA xfer error */
+ TX_STATUS_SUPR_TWT_SP_OUT = 12, /**< Suppress Tx outside TWTSP */
+ NUM_TX_STATUS_SUPR
+};
+
+/** Unexpected tx status for rate update */
+#define TX_STATUS_UNEXP(status) \
+ ((((status.is_intermediate))) && \
+ TX_STATUS_UNEXP_AMPDU(status))
+
+/** Unexpected tx status for A-MPDU rate update */
+#ifdef WLP2P_UCODE
+#define TX_STATUS_UNEXP_AMPDU(status) \
+ ((((status.suppr_ind)) != TX_STATUS_SUPR_NONE) && \
+ (((status.suppr_ind)) != TX_STATUS_SUPR_EXPTIME) && \
+ (((status.suppr_ind)) != TX_STATUS_SUPR_NACK_ABS))
+#else
+#define TX_STATUS_UNEXP_AMPDU(status) \
+ ((((status.suppr_ind)) != TX_STATUS_SUPR_NONE) && \
+ (((status.suppr_ind)) != TX_STATUS_SUPR_EXPTIME))
+#endif
+
+/**
+ * This defines the collection of supp reasons (including none)
+ * for which mac has done its (re-)transmission in any of ucode retx schemes
+ * which include ucode/hw/aqm agg
+ */
+#define TXS_SUPR_MAGG_DONE_MASK ((1 << TX_STATUS_SUPR_NONE) | \
+ (1 << TX_STATUS_SUPR_UF) | \
+ (1 << TX_STATUS_SUPR_FRAG) | \
+ (1 << TX_STATUS_SUPR_EXPTIME))
+#define TXS_SUPR_MAGG_DONE(suppr_ind) \
+ ((1 << (suppr_ind)) & TXS_SUPR_MAGG_DONE_MASK)
+
+#define TX_STATUS_BA_BMAP03_MASK 0xF000 /**< ba bitmap 0:3 in 1st pkg */
+#define TX_STATUS_BA_BMAP03_SHIFT 12 /**< ba bitmap 0:3 in 1st pkg */
+#define TX_STATUS_BA_BMAP47_MASK 0x001E /**< ba bitmap 4:7 in 2nd pkg */
+#define TX_STATUS_BA_BMAP47_SHIFT 3 /**< ba bitmap 4:7 in 2nd pkg */
+
+/* RXE (Receive Engine) */
+
+/* RCM_CTL */
+#define RCM_INC_MASK_H 0x0080
+#define RCM_INC_MASK_L 0x0040
+#define RCM_INC_DATA 0x0020
+#define RCM_INDEX_MASK 0x001F
+#define RCM_SIZE 15
+
+#define RCM_MAC_OFFSET 0 /**< current MAC address */
+#define RCM_BSSID_OFFSET 3 /**< current BSSID address */
+#define RCM_F_BSSID_0_OFFSET 6 /**< foreign BSS CFP tracking */
+#define RCM_F_BSSID_1_OFFSET 9 /**< foreign BSS CFP tracking */
+#define RCM_F_BSSID_2_OFFSET 12 /**< foreign BSS CFP tracking */
+
+#define RCM_WEP_TA0_OFFSET 16
+#define RCM_WEP_TA1_OFFSET 19
+#define RCM_WEP_TA2_OFFSET 22
+#define RCM_WEP_TA3_OFFSET 25
+
+/* AMT - Address Match Table */
+
+/* AMT Attribute bits */
+#define AMT_ATTR_VALID 0x8000 /**< Mark the table entry valid */
+#define AMT_ATTR_A1 0x0008 /**< Match for A1 */
+#define AMT_ATTR_A2 0x0004 /**< Match for A2 */
+#define AMT_ATTR_A3 0x0002 /**< Match for A3 */
+
+/* AMT Index defines */
+#define AMT_SIZE_64 64 /* number of AMT entries */
+#define AMT_SIZE_128 128 /* number of AMT entries for corerev >= 64 */
+#define AMT_IDX_MAC 63 /**< device MAC */
+#define AMT_IDX_BSSID 62 /**< BSSID match */
+#define AMT_IDX_TRANSMITTED_BSSID 60 /**< transmitted BSSID in multiple BSSID set */
+#define AMT_WORD_CNT 2 /* Number of word count per AMT entry */
+
+#define AMT_SIZE(_corerev) (D11REV_GE(_corerev, 64) ? \
+ (D11REV_GE(_corerev, 80) ? AMT_SIZE_64 : AMT_SIZE_128) : \
+ AMT_SIZE_64)
+
+/* RMC entries */
+#define AMT_IDX_MCAST_ADDR 61 /**< MCAST address for Reliable Mcast feature */
+#define AMT_IDX_MCAST_ADDR1 59 /**< MCAST address for Reliable Mcast feature */
+#define AMT_IDX_MCAST_ADDR2 58 /**< MCAST address for Reliable Mcast feature */
+#define AMT_IDX_MCAST_ADDR3 57 /**< MCAST address for Reliable Mcast feature */
+
+#ifdef WLMESH
+/* note: this is max supported by ucode. But ARM-driver can
+ * only mesh_info->mesh_max_peers which should be <= this value.
+ */
+
+#define AMT_MAX_MESH_PEER 10
+#define AMT_MAXIDX_MESH_PEER 60
+#define AMT_MAXIDX_P2P_USE \
+ (AMT_MAXIDX_MESH_PEER - AMT_MAX_MESH_PEER)
+#else
+#define AMT_MAXIDX_P2P_USE 60 /**< Max P2P entry to use */
+#endif /* WL_STA_MONITOR */
+#define AMT_MAX_TXBF_ENTRIES 7 /**< Max tx beamforming entry */
+/* PSTA AWARE AP: Max PSTA Tx beamforming entry */
+#define AMT_MAX_TXBF_PSTA_ENTRIES 20
+
+/* M_AMT_INFO SHM bit field definition */
+#define AMTINFO_BMP_IBSS (1u << 0u) /* IBSS Station */
+#define AMTINFO_BMP_MESH (1u << 1u) /* MESH Station */
+#define AMTINFO_BMP_BSSID (1u << 2u) /* BSSID-only */
+#define AMTINFO_BMP_IS_WAPI (1u << 3u) /* For WAPI keyid extraction */
+#define AMTINFO_BMP_IS_HE (1u << 13u) /* For HE peer indication */
+
+#define AUXPMQ_ENTRIES 64 /* number of AUX PMQ entries */
+#define AUXPMQ_ENTRY_SIZE 8
+
+/* PSM Block */
+
+/* psm_phy_hdr_param bits */
+#define MAC_PHY_RESET 1
+#define MAC_PHY_CLOCK_EN 2
+#define MAC_PHY_FORCE_CLK 4
+#define MAC_IHRP_CLOCK_EN 15
+
+/* PSMCoreControlStatus (IHR Address 0x078) bit definitions */
+#define PSM_CORE_CTL_AR (1 << 0)
+#define PSM_CORE_CTL_HR (1 << 1)
+#define PSM_CORE_CTL_IR (1 << 2)
+#define PSM_CORE_CTL_AAR (1 << 3)
+#define PSM_CORE_CTL_HAR (1 << 4)
+#define PSM_CORE_CTL_PPAR (1 << 5)
+#define PSM_CORE_CTL_SS (1 << 6)
+#define PSM_CORE_CTL_REHE (1 << 7)
+#define PSM_CORE_CTL_PPAS (1 << 13)
+#define PSM_CORE_CTL_AAS (1 << 14)
+#define PSM_CORE_CTL_HAS (1 << 15)
+
+#define PSM_CORE_CTL_LTR_BIT 9
+#define PSM_CORE_CTL_LTR_MASK 0x3
+
+#define PSM_SBACCESS_FIFO_MODE (1 << 1)
+#define PSM_SBACCESS_EXT_ERR (1 << 11)
+
+/* WEP Block */
+
+/* WEP_WKEY */
+#define WKEY_START (1 << 8)
+#define WKEY_SEL_MASK 0x1F
+
+/* WEP data formats */
+
+/* the number of RCMTA entries */
+#define RCMTA_SIZE 50
+
+/* max keys in M_TKMICKEYS_BLK - 96 * sizeof(uint16) */
+#define WSEC_MAX_TKMIC_ENGINE_KEYS(_corerev) ((D11REV_GE(_corerev, 64)) ? \
+ AMT_SIZE(_corerev) : 12) /* 8 + 4 default - 2 mic keys 8 bytes each */
+
+/* max keys in M_WAPIMICKEYS_BLK - 64 * sizeof(uint16) */
+#define WSEC_MAX_SMS4MIC_ENGINE_KEYS(_corerev) ((D11REV_GE(_corerev, 64)) ? \
+ AMT_SIZE(_corerev) : 8) /* 4 + 4 default - 16 bytes each */
+
+/* max RXE match registers */
+#define WSEC_MAX_RXE_KEYS 4
+
+/* SECKINDXALGO (Security Key Index & Algorithm Block) word format */
+/* SKL (Security Key Lookup) */
+#define SKL_POST80_ALGO_MASK 0x000F
+#define SKL_PRE80_ALGO_MASK 0x0007
+#define SKL_ALGO_SHIFT 0
+
+#define SKL_ALGO_MASK(_corerev) (D11REV_GE(_corerev, 80) ? SKL_POST80_ALGO_MASK : \
+ SKL_PRE80_ALGO_MASK)
+
+#define SKL_WAPI_KEYID_MASK 0x8000
+#define SKL_WAPI_KEYID_SHIFT 15
+#define SKL_INDEX_SHIFT 4
+
+#define SKL_PRE80_WAPI_KEYID_MASK 0x0008
+#define SKL_PRE80_WAPI_KEYID_SHIFT 3
+
+#define SKL_INDEX_MASK(_corerev) ((D11REV_GE(_corerev, 64)) ? \
+ (0x0FF0) : (0x03F0))
+#define SKL_GRP_ALGO_MASK(_corerev) ((D11REV_GE(_corerev, 64)) ? \
+ ((D11REV_GE(_corerev, 80)) ? (0xE000) : (0x7000)) : (0x1c00))
+#define SKL_GRP_ALGO_SHIFT(_corerev) ((D11REV_GE(_corerev, 64)) ? \
+ ((D11REV_GE(_corerev, 80)) ? (13) : (12)) : (10))
+
+#define SKL_STAMON_NBIT 0x8000 /* STA monitor bit */
+
+/* additional bits defined for IBSS group key support */
+#define SKL_IBSS_INDEX_MASK 0x01F0
+#define SKL_IBSS_INDEX_SHIFT 4
+#define SKL_IBSS_KEYID1_MASK 0x0600
+#define SKL_IBSS_KEYID1_SHIFT 9
+#define SKL_IBSS_KEYID2_MASK 0x1800
+#define SKL_IBSS_KEYID2_SHIFT 11
+#define SKL_IBSS_KEYALGO_MASK 0xE000
+#define SKL_IBSS_KEYALGO_SHIFT 13
+
+#define WSEC_MODE_OFF 0
+#define WSEC_MODE_HW 1
+#define WSEC_MODE_SW 2
+
+/* Mapped as per HW_ALGO */
+#define WSEC_ALGO_OFF 0
+#define WSEC_ALGO_WEP1 1
+#define WSEC_ALGO_TKIP 2
+#define WSEC_ALGO_WEP128 3
+#define WSEC_ALGO_AES_LEGACY 4
+#define WSEC_ALGO_AES 5
+#define WSEC_ALGO_SMS4 6
+#define WSEC_ALGO_SMS4_DFT_2005_09_07 7 /**< Not used right now */
+#define WSEC_ALGO_NALG 8
+
+/* For CORE_REV 80 */
+#define WSEC_ALGO_AES_GCM 8
+#define WSEC_ALGO_AES_GCM256 9
+
+/* For CORE_REV Less than 80 and */
+#define WSEC_ALGO_AES_PRE80_GCM 6
+#define WSEC_ALGO_AES_PRE80_GCM256 8
+
+/* D11 MAX TTAK INDEX */
+#define TSC_TTAK_PRE80_MAX_INDEX 50
+#define TSC_TTAK_MAX_INDEX 8
+/* D11 COREREV 80 TTAK KEY INDEX SHIFT */
+#define SKL_TTAK_INDEX_SHIFT 13
+#define SKL_TTAK_INDEX_MASK 0xE000
+
+/* D11 PRECOREREV 40 Hw algos...changed from corerev 40 */
+#define D11_PRE40_WSEC_ALGO_AES 3
+#define D11_PRE40_WSEC_ALGO_WEP128 4
+#define D11_PRE40_WSEC_ALGO_AES_LEGACY 5
+#define D11_PRE40_WSEC_ALGO_SMS4 6
+#define D11_PRE40_WSEC_ALGO_NALG 7
+
+#define D11_WSEC_ALGO_AES(_corerev) WSEC_ALGO_AES
+
+#define AES_MODE_NONE 0
+#define AES_MODE_CCM 1
+#define AES_MODE_OCB_MSDU 2
+#define AES_MODE_OCB_MPDU 3
+#define AES_MODE_CMAC 4
+#define AES_MODE_GCM 5
+#define AES_MODE_GMAC 6
+
+/* WEP_CTL (Rev 0) */
+#define WECR0_KEYREG_SHIFT 0
+#define WECR0_KEYREG_MASK 0x7
+#define WECR0_DECRYPT (1 << 3)
+#define WECR0_IVINLINE (1 << 4)
+#define WECR0_WEPALG_SHIFT 5
+#define WECR0_WEPALG_MASK (0x7 << 5)
+#define WECR0_WKEYSEL_SHIFT 8
+#define WECR0_WKEYSEL_MASK (0x7 << 8)
+#define WECR0_WKEYSTART (1 << 11)
+#define WECR0_WEPINIT (1 << 14)
+#define WECR0_ICVERR (1 << 15)
+
+/* Frame template map byte offsets */
+#define T_ACTS_TPL_BASE (0)
+#define T_NULL_TPL_BASE (0xc * 2)
+#define T_QNULL_TPL_BASE (0x1c * 2)
+#define T_RR_TPL_BASE (0x2c * 2)
+#define T_BCN0_TPL_BASE (0x34 * 2)
+#define T_PRS_TPL_BASE (0x134 * 2)
+#define T_BCN1_TPL_BASE (0x234 * 2)
+#define T_P2P_NULL_TPL_BASE (0x340 * 2)
+#define T_P2P_NULL_TPL_SIZE (32)
+#define T_TRIG_TPL_BASE (0x90 * 2)
+
+/* FCBS base addresses and sizes in BM */
+
+#define FCBS_DS0_BM_CMD_SZ_CORE0 0x0200 /* 512 bytes */
+#define FCBS_DS0_BM_DAT_SZ_CORE0 0x0200 /* 512 bytes */
+
+#ifndef FCBS_DS0_BM_CMDPTR_BASE_CORE0
+#define FCBS_DS0_BM_CMDPTR_BASE_CORE0 0x3000
+#endif
+#define FCBS_DS0_BM_DATPTR_BASE_CORE0 (FCBS_DS0_BM_CMDPTR_BASE_CORE0 + FCBS_DS0_BM_CMD_SZ_CORE0)
+
+#define FCBS_DS0_BM_CMD_SZ_CORE1 0x0200 /* 512 bytes */
+#define FCBS_DS0_BM_DAT_SZ_CORE1 0x0200 /* 512 bytes */
+
+#ifndef FCBS_DS0_BM_CMDPTR_BASE_CORE1
+#define FCBS_DS0_BM_CMDPTR_BASE_CORE1 0x2400
+#endif
+#define FCBS_DS0_BM_DATPTR_BASE_CORE1 (FCBS_DS0_BM_CMDPTR_BASE_CORE1 + FCBS_DS0_BM_CMD_SZ_CORE1)
+
+#define FCBS_DS0_BM_CMD_SZ_CORE2 0x0200 /* 512 bytes */
+#define FCBS_DS0_BM_DAT_SZ_CORE2 0x0200 /* 512 bytes */
+
+#define FCBS_DS1_BM_CMD_SZ_CORE0 0x2000 /* Not used */
+#define FCBS_DS1_BM_DAT_SZ_CORE0 0x2000 /* Not used */
+
+#define FCBS_DS1_BM_CMDPTR_BASE_CORE0 0x17B4
+#define FCBS_DS1_BM_DATPTR_BASE_CORE0 (FCBS_DS1_BM_CMDPTR_BASE_CORE0 + FCBS_DS1_BM_CMD_SZ_CORE0)
+
+#define FCBS_DS1_BM_CMD_SZ_CORE1 0x2000 /* Not used */
+#define FCBS_DS1_BM_DAT_SZ_CORE1 0x2000 /* Not used */
+
+#define FCBS_DS1_BM_CMDPTR_BASE_CORE1 0x17B4
+#define FCBS_DS1_BM_DATPTR_BASE_CORE1 (FCBS_DS1_BM_CMDPTR_BASE_CORE1 + FCBS_DS1_BM_CMD_SZ_CORE1)
+
+#define T_BA_TPL_BASE T_QNULL_TPL_BASE /**< template area for BA */
+
+#define T_RAM_ACCESS_SZ 4 /**< template ram is 4 byte access only */
+
+#define TPLBLKS_PER_BCN_NUM 2
+#define TPLBLKS_AC_PER_BCN_NUM 1
+
+#if defined(WLLPRS) && defined(MBSS)
+#define TPLBLKS_PER_PRS_NUM 4
+#define TPLBLKS_AC_PER_PRS_NUM 2
+#else
+#define TPLBLKS_PER_PRS_NUM 2
+#define TPLBLKS_AC_PER_PRS_NUM 1
+#endif /* WLLPRS && MBSS */
+
+/* MAC Sample Collect Params */
+
+/* SampleCapture set-up options in
+ * different registers based on CoreRev
+ */
+/* CoreRev >= 50, use SMP_CTRL in TXE_IHR */
+#define SC_SRC_MAC 2 /* MAC as Sample Collect Src */
+#define SC_SRC_SHIFT 3 /* SC_SRC bits [3:4] */
+#define SC_TRIG_SHIFT 5
+#define SC_TRANS_SHIFT 6
+#define SC_MATCH_SHIFT 7
+#define SC_STORE_SHIFT 8
+
+#define SC_STRT 1
+#define SC_TRIG_EN (1 << SC_TRIG_SHIFT)
+#define SC_TRANS_EN (1 << SC_TRANS_SHIFT)
+#define SC_MATCH_EN (1 << SC_MATCH_SHIFT)
+#define SC_STORE_EN (1 << SC_STORE_SHIFT)
+
+/* CoreRev < 50, use PHY_CTL in PSM_IHR */
+#define PHYCTL_PHYCLKEN (1 << 1)
+#define PHYCTL_FORCE_GATED_CLK_ON (1 << 2)
+#define PHYCTL_SC_STRT (1 << 4)
+#define PHYCTL_SC_SRC_LB (1 << 7)
+#define PHYCTL_SC_TRIG_EN (1 << 8)
+#define PHYCTL_SC_TRANS_EN (1 << 9)
+#define PHYCTL_SC_STR_EN (1 << 10)
+#define PHYCTL_IHRP_CLK_EN (1 << 15)
+/* End MAC Sample Collect Params */
+
+#define ANTSEL_CLKDIV_4MHZ 6
+#define MIMO_ANTSEL_BUSY 0x4000 /**< bit 14 (busy) */
+#define MIMO_ANTSEL_SEL 0x8000 /**< bit 15 write the value */
+#define MIMO_ANTSEL_WAIT 50 /**< 50us wait */
+#define MIMO_ANTSEL_OVERRIDE 0x8000 /**< flag */
+
+typedef struct shm_acparams shm_acparams_t;
+BWL_PRE_PACKED_STRUCT struct shm_acparams {
+ uint16 txop;
+ uint16 cwmin;
+ uint16 cwmax;
+ uint16 cwcur;
+ uint16 aifs;
+ uint16 bslots;
+ uint16 reggap;
+ uint16 status;
+ uint16 txcnt;
+ uint16 rsvd[7];
+} BWL_POST_PACKED_STRUCT;
+
+#define WME_STATUS_NEWAC (1 << 8)
+
+/* M_HOST_FLAGS */
+#define MHFMAX 5 /* Number of valid hostflag half-word (uint16) */
+#define MHF1 0 /* Hostflag 1 index */
+#define MHF2 1 /* Hostflag 2 index */
+#define MHF3 2 /* Hostflag 3 index */
+#define MHF4 3 /* Hostflag 4 index */
+#define MHF5 4 /* Hostflag 5 index */
+
+#define MXHFMAX 1 /* Number of valid PSMx hostflag half-word (uint16) */
+#define MXHF0 64 /* PSMx Hostflag 0 index */
+
+/* Flags in M_HOST_FLAGS */
+#define MHF1_D11AC_DYNBW 0x0001 /**< dynamic bw */
+#define MHF1_WLAN_CRITICAL 0x0002 /**< WLAN is in critical state */
+#define MHF1_MBSS_EN 0x0004 /**< Enable MBSS: RXPUWAR deprecated for rev >= 9 */
+#define MHF1_BTCOEXIST 0x0010 /**< Enable Bluetooth / WLAN coexistence */
+#define MHF1_P2P_SKIP_TIME_UPD 0x0020 /**< Skip P2P SHM updates and P2P event generations */
+#define MHF1_TXMUTE_WAR 0x0040 /**< ucode based Tx mute */
+#define MHF1_RXFIFO1 0x0080 /**< Switch data reception from RX fifo 0 to fifo 1 */
+#define MHF1_EDCF 0x0100 /**< Enable EDCF access control */
+#define MHF1_ULP 0x0200 /**< Force Ucode to put chip in low power state */
+#define MHF1_FORCE_SEND_BCN 0x0800 /**< Force send bcn, even if rcvd from peer STA (IBSS) */
+#define MHF1_TIMBC_EN 0x1000 /**< Enable Target TIM Transmission Time function */
+#define MHF1_RADARWAR 0x2000 /**< Enable Radar Detect WAR PR 16559 */
+#define MHF1_DEFKEYVALID 0x4000 /**< Enable use of the default keys */
+#define MHF1_CTS2SELF 0x8000 /**< Enable CTS to self full phy bw protection */
+
+/* Flags in M_HOST_FLAGS2 */
+#define MHF2_DISABLE_PRB_RESP 0x0001 /**< disable Probe Response in ucode */
+#define MHF2_HIB_FEATURE_ENABLE 0x0008 /* Enable HIB feature in ucode (60<=rev<80) */
+#define MHF2_SKIP_ADJTSF 0x0010 /**< skip TSF update when receiving bcn/probeRsp */
+#define MHF2_RSPBW20 0x0020 /**< Uses bw20 for response frames ack/ba/cts */
+#define MHF2_TXBCMC_NOW 0x0040 /**< Flush BCMC FIFO immediately */
+#define MHF2_PPR_HWPWRCTL 0x0080 /**< TSSI_DIV WAR (rev<80) */
+#define MHF2_BTC2WIRE_ALTGPIO 0x0100 /**< BTC 2wire in alternate pins */
+#define MHF2_BTCPREMPT 0x0200 /**< BTC enable bluetooth check during tx */
+#define MHF2_SKIP_CFP_UPDATE 0x0400 /**< Skip CFP update ; for d11 rev <= 80 */
+#define MHF2_TX_TMSTMP 0x0800 /**< Enable passing tx-timestamps in tx-status */
+#define MHF2_UFC_GE84 0x2000 /**< Enable UFC in CT mode */
+#define MHF2_NAV_NORST_WAR 0x4000 /**< WAR to use rogue NAV duration */
+#define MHF2_BTCANTMODE 0x4000 // OBSOLETE (TO BE REMOVED)
+
+/* Flags in M_HOST_FLAGS3 */
+#define MHF3_ANTSEL_EN 0x0001 /**< enabled mimo antenna selection (REV<80) */
+#define MHF3_TKIP_FRAG_WAR 0x0001 /**< TKIP fragment corrupt WAR (REV>=80) */
+#define MHF3_TXSHAPER_EN 0x0002 /** enable tx shaper for non-OFDM-A frames */
+#define MHF3_ANTSEL_MODE 0x0002 /**< antenna selection mode: 0: 2x3, 1: 2x4 (REV<80) */
+#define MHF3_BTCX_DEF_BT 0x0004 /**< corerev >= 13 BT Coex. */
+#define MHF3_BTCX_ACTIVE_PROT 0x0008 /**< corerev >= 13 BT Coex. */
+#define MHF3_PKTENG_PROMISC 0x0010 /**< pass frames to driver in packet engine Rx mode */
+#define MHF3_SCANCORE_PM_EN 0x0040 /**< enable ScanCore PM from ucode */
+#define MHF3_PM_BCNRX 0x0080 /**< PM single core beacon RX for power reduction */
+#define MHF3_BTCX_SIM_RSP 0x0100 /**< allow limited lwo power tx when BT is active */
+#define MHF3_BTCX_PS_PROTECT 0x0200 /**< use PS mode to protect BT activity */
+#define MHF3_BTCX_SIM_TX_LP 0x0400 /**< use low power for simultaneous tx responses */
+#define MHF3_SELECT_RXF1 0x0800 /**< enable frame classification in pcie FD */
+#define MHF3_BTCX_ECI 0x1000 /**< Enable BTCX ECI interface */
+#define MHF3_NOISECAL_ENHANCE 0x2000
+
+/* Flags in M_HOST_FLAGS4 */
+#define MHF4_RCMTA_BSSID_EN 0x0002 /**< BTAMP: multiSta BSSIDs matching in RCMTA area */
+#define MHF4_SC_MIX_EN 0x0002 /**< set to enable 4389a0 specific changes */
+#define MHF4_BCN_ROT_RR 0x0004 /**< MBSSID: beacon rotate in round-robin fashion */
+#define MHF4_OPT_SLEEP 0x0008 /**< enable opportunistic sleep (REV<80) */
+#define MHF4_PM_OFFLOAD 0x0008 /**< enable PM offload */
+#define MHF4_PROXY_STA 0x0010 /**< enable proxy-STA feature */
+#define MHF4_AGING 0x0020 /**< Enable aging threshold for RF awareness */
+#define MHF4_STOP_BA_ON_NDP 0x0080 /**< Stop BlockAck to AP to get chance to send NULL data */
+#define MHF4_NOPHYHANGWAR 0x0100 /**< disable ucode WAR for idletssi cal (rev=61) */
+#define MHF4_WMAC_ACKTMOUT 0x0200 /**< reserved for WMAC testing */
+#define MHF4_NAPPING_ENABLE 0x0400 /**< Napping enable (REV<80) */
+#define MHF4_IBSS_SEC 0x0800 /**< IBSS WPA2-PSK operating mode */
+#define MHF4_SISO_BCMC_RX 0x1000 /* Disable switch to MIMO on recving multicast TIM */
+#define MHF4_RSDB_CR1_MINIPMU_CAL_EN 0x8000 /* for 4349B0. JIRA:SW4349-1469 */
+
+/* Flags in M_HOST_FLAGS5 */
+#define MHF5_BTCX_LIGHT 0x0002 /**< light coex mode, off txpu only for critical BT */
+#define MHF5_BTCX_PARALLEL 0x0004 /**< BT and WLAN run in parallel. */
+#define MHF5_BTCX_DEFANT 0x0008 /**< default position for shared antenna */
+#define MHF5_P2P_MODE 0x0010 /**< Enable P2P mode */
+#define MHF5_LEGACY_PRS 0x0020 /**< Enable legacy probe resp support */
+#define MHF5_HWRSSI_EN 0x0800 /**< Enable HW RSSI (ac) */
+#define MHF5_HIBERNATE 0x1000 /**< Force ucode to power save until wake-bit */
+#define MHF5_BTCX_GPIO_DEBUG 0x4000 /**< Enable gpio pins for btcoex ECI signals */
+#define MHF5_SUPPRESS_PRB_REQ 0x8000 /**< Suppress probe requests at ucode level */
+
+/* Flags in M_HOST_FLAGS6 */
+#define MHF6_TXPWRCAP_RST_EN 0x0001 /** < Ucode clear phyreg after each tx */
+#define MHF6_TXPWRCAP_EN 0x0002 /** < Enable TX power capping in ucode */
+#define MHF6_TSYNC_AVB 0x0004 /** Enable AVB for timestamping */
+#define MHF6_TSYNC_3PKG 0x0020 /** < Enable 3rd txstatus package */
+#define MHF6_TDMTX 0x0040 /** < Enable SDB TDM in ucode */
+#define MHF6_TSYNC_NODEEPSLP 0x0080 /** < Disable deep sleep to keep AVB clock */
+#define MHF6_TSYNC_CAL 0x0100 /** < Enable Tsync cal in ucode */
+#define MHF6_TXPWRCAP_IOS_NBIT 0x0200 /** < Enable IOS mode of operation for Txpwrcap (REV>=80) */
+#define MHF6_MULBSSID_NBIT 0x0400 /** < associated to AP belonging to a multiple BSSID set */
+#define MHF6_HEBCN_TX_NBIT 0x0800 /** < HE BCN-TX */
+#define MHF6_LATENCY_EN 0x2000 /** < Enable Latency instrumentation in ucode */
+#define MHF6_PTMSTS_EN 0x4000 /** < Enable PTM Status */
+
+/* MX_HOST_FLAGS */
+/* Flags for MX_HOST_FLAGS0 */
+#define MXHF0_RSV0 0x0001 /* ucode internal, not exposed yet */
+#define MXHF0_TXDRATE 0x0002 /* mu txrate to use rate from txd */
+#define MXHF0_CHKFID 0x0004 /* check if frameid->fifo matches hw txfifo idx */
+#define MXHF0_DISWAR 0x0008 /* disable some WAR. */
+
+/* M_AXC_HOST_FLAGS0 */
+#define MAXCHF0_WAIT_TRIG 0x0001 /* Hold frames till trigger frame is rxed */
+#define MAXCHF0_HTC_SUPPORT 0x0002 /* 11AX HTC field support */
+#define MAXCHF0_AX_ASSOC_SHIFT 0x0003 /* 11AX association indicator */
+#define MAXCHF0_HEB_CONFIG 0x0004 /* HEB configuration */
+#define MAXCHF0_ACI_DET 0x0008 /* ACI detect soft enable */
+#define MAXCHF0_TRIGRES_LP 0x0010 /* Lite-Point testing */
+#define MAXCHF0_HDRCONV_SHIFT 5u /* Enable header conversion */
+#define MAXCHF0_HDRCONV (1 << MAXCHF0_HDRCONV_SHIFT)
+#define MAXCHF0_FORCE_ZERO_PPR_SHIFT 6u /* Force PPR value to 0 for ULTPC */
+#define MAXCHF0_FORCE_ZERO_PPR (1 << MAXCHF0_FORCE_ZERO_PPR_SHIFT)
+#define MAXCHF0_DISABLE_PYLDECWAR_SHIFT 7u /* Disable WAR for Paydecode issue */
+#define MAXCHF0_DISABLE_PYLDECWAR (1 << MAXCHF0_DISABLE_PYLDECWAR_SHIFT)
+#define MAXCHF0_BSR_SUPPORT_SHIFT 8u /* BSR is supported */
+#define MAXCHF0_BSR_SUPPORT (1 << MAXCHF0_BSR_SUPPORT_SHIFT)
+#define MAXCHF0_MUEDCA_VALID_SHIFT 9u /* MUEDCA information is valid */
+#define MAXCHF0_MUEDCA_VALID (1 << MAXCHF0_MUEDCA_VALID_SHIFT)
+/* Bit 10 definition missing? */
+#define MAXCHF0_TWT_PKTSUPP_SHIFT 11u /* Enable pkt suppress outside TWT SP */
+#define MAXCHF0_TWT_PKTSUPP_EN (1 << MAXCHF0_TWT_PKTSUPP_SHIFT)
+#define MAXCHF0_TBPPDU_STATUS_SHIFT 12u
+#define MAXCHF0_TBPPDU_STATUS_EN (1 << MAXCHF0_TBPPDU_STATUS_SHIFT)
+#define MAXCHF0_11AX_TXSTATUS_EXT_SHIFT 13u /* Enable 128 BA pkg in TX status */
+#define MAXCHF0_11AX_TXSTATUS_EXT_EN (1u << MAXCHF0_11AX_TXSTATUS_EXT_SHIFT)
+#define MAXCHF1_11AX_TXSTATUS_EXT_SHIFT 0u /* Enable 256 BA pkg in TX status */
+#define MAXCHF1_11AX_TXSTATUS_EXT_EN (1u << MAXCHF1_11AX_TXSTATUS_EXT_SHIFT)
+/* Bit 14 for UORA_EN */
+#define MAXCHF0_11AX_UORA_SHIFT 14u /* Enable UORA support */
+#define MAXCHF0_11AX_UORA_EN (1u << MAXCHF0_11AX_UORA_SHIFT)
+
+/* M_AXC_HOST_FLAGS1 */
+#define MAXCHF1_ITXSTATUS_EN 0x0004u /* Enable intermediate txs for TB PPDU */
+#define MAXCHF1_OBSSHWSTATS_EN 0x0008u /* Enable ucode OBSS stats monitoring */
+
+/* M_SC_HOST_FLAGS */
+#define C_SCCX_STATS_EN 0x0001u /* Enable SC stats */
+#define C_SC_BTMC_COEX_EN 0x0002u /* Enable WLSC-BTMC coex */
+
+/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - rev61.1 */
+typedef struct d11rxhdrshort_rev61_1 d11rxhdrshort_rev61_1_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_rev61_1 {
+ uint16 RxFrameSize; /**< Actual byte length of the frame data received */
+
+ /* These two 8-bit fields remain in the same order regardless of
+ * processor byte order.
+ */
+ uint8 dma_flags; /**< bit 0 indicates short or long rx status. 1 == short. */
+ uint8 fifo; /**< rx fifo number */
+ uint16 mrxs; /**< MAC Rx Status */
+ uint16 RxFrameSize0; /**< rxframesize for fifo-0 (in bytes). */
+ uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */
+ uint16 RxTSFTimeL; /**< RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */
+ uint16 RxTSFTimeH; /**< RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */
+ uint16 aux_status; /**< DMA writes into this field. ucode treats as reserved. */
+} BWL_POST_PACKED_STRUCT;
+
+/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - pre80 */
+typedef struct d11rxhdrshort_lt80 d11rxhdrshort_lt80_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_lt80 {
+ uint16 RxFrameSize; /**< Actual byte length of the frame data received */
+
+ /* These two 8-bit fields remain in the same order regardless of
+ * processor byte order.
+ */
+ uint8 dma_flags; /**< bit 0 indicates short or long rx status. 1 == short. */
+ uint8 fifo; /**< rx fifo number */
+ uint16 mrxs; /**< MAC Rx Status */
+ uint16 RxTSFTime; /**< RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */
+ uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */
+ uint16 aux_status; /**< DMA writes into this field. ucode treats as reserved. */
+} BWL_POST_PACKED_STRUCT;
+
+/* Errflag bits for ge80 */
+#define ERRFLAGS_ERR_STATE 0x0003u
+#define ERRFLAGS_GREATER_MSDU_LEN 0x0001u
+#define ERRFLAGS_AMSDU_TRUNCATED 0x0002u
+#define ERRFLAGS_HDRCONV_MASK 0x00F0u
+#define ERRFLAGS_HDRCONV_SHIFT 4u
+#define ERRFLAGS_CSI_LEN_64K 0x0100u
+#define ERRFLAGS_MESH_FMT_ERR 0x0200u
+
+/* Register 'D11_RXE_ERRVAL' bits for ge80 */
+#define RXEERR_GREATER_MSDU_LEN (1u << 6)
+
+/* 128 BA configuration */
+/* Register D11_TXBA_DataSel bits for ge80 */
+#define TXBA_DATASEL_WSIZE_BITMAP_LEN_ENC_SEL (1u << 0u)
+
+/* Register D11_TXBA_Data bits (ge80) */
+#define TXBA_DATA_WSIZE_256 (0x100u)
+#define TXBA_DATA_WSIZE_128 (0x80u)
+#define TXBA_DATA_WSIZE_64 (0x40u)
+
+/* HW optimisation to generate bitmap based on start SSN & max SSN */
+#define TXBA_DATA_HW_CONST (0xfu << 12)
+
+/* Register D11_RXE_BA_LEN bits (ge80) */
+#define RXE_BA_LEN_RXBA_64 (0x0u)
+#define RXE_BA_LEN_RXBA_128 (0x1u)
+#define RXE_BA_LEN_RXBA_256 (0x2u)
+#define RXE_BA_LEN_TID0_SHIFT (0u)
+#define RXE_BA_LEN_TID1_SHIFT (2u)
+#define RXE_BA_LEN_TID2_SHIFT (4u)
+#define RXE_BA_LEN_TID3_SHIFT (6u)
+#define RXE_BA_LEN_TID4_SHIFT (8u)
+#define RXE_BA_LEN_TID5_SHIFT (10u)
+#define RXE_BA_LEN_TID6_SHIFT (12u)
+#define RXE_BA_LEN_TID7_SHIFT (14u)
+
+/* Register D11_RXE_BA_LEN_ENC bits (ge80) */
+#define RXE_BA_LEN_ENC_BA32_VAL (0x3u << 0u)
+#define RXE_BA_LEN_ENC_BA64_VAL (0x0u << 2u)
+#define RXE_BA_LEN_ENC_BA128_VAL (0x1u << 4u)
+#define RXE_BA_LEN_ENC_BA256_VAL (0x2u << 6u)
+
+/* Register D11_RXE_TXBA_CTL2 (ge80) */
+#define RXE_TXBA_CTL2_CONIG_SINGLE_TID (0x0u << 0u)
+#define RXE_TXBA_CTL2_CONIG_ALL_TID (0x1u << 0u)
+#define RXE_TXBA_CTL2_SEL_TID0 (0x0u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID1 (0x1u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID2 (0x2u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID3 (0x3u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID4 (0x4u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID5 (0x5u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID6 (0x6u << 12u)
+#define RXE_TXBA_CTL2_SEL_TID7 (0x7u << 12u)
+
+/**
+ * Special Notes
+ * #1: dma_flags, fifo
+ * These two 8-bit fields remain in the same order regardless of
+ * processor byte order.
+ * #2: pktclass
+ * 16 bit bitmap is a result of Packet (or Flow ) Classification.
+ *
+ * 0 : Flow ID Different
+ * 1,2,3 : A1, A2, A3 Different
+ * 4 : TID Different
+ * 5, 6 : DA, SA from AMSDU SubFrame Different
+ * 7 : FC Different
+ * 8 : AMPDU boundary
+ * 9 - 15 : Reserved
+ * #3: errflags
+ * These bits indicate specific errors detected by the HW on the Rx Path.
+ * However, these will be relevant for Last MSDU Status only.
+ *
+ * Whenever there is an error at any MSDU, HW treats it as last
+ * MSDU and send out last MSDU status.
+ */
+
+#define D11RXHDR_HW_STATUS_GE80 \
+ uint16 RxFrameSize; /**< Actual byte length of the frame data received */ \
+ /* For comments see special note #1 above */\
+ uint8 dma_flags; /**< bit 0 indicates short or long rx status. 1 == short. */ \
+ uint8 fifo; /**< rx fifo number */ \
+ \
+ uint16 mrxs; /**< MAC Rx Status */ \
+ uint16 RxFrameSize0; /**< rxframesize for fifo-0 (in bytes). */ \
+ uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */ \
+ uint16 pktclass; \
+ uint32 filtermap; /**< 32 bit bitmap indicates which "Filters" have matched. */ \
+ /* For comments see special note #2 above */ \
+ uint16 flowid; /**< result of Flow ID Look Up performed by the HW. */ \
+ /* For comments see special note #3 above */\
+ uint16 errflags;
+
+#define D11RXHDR_UCODE_STATUS_GE80 \
+ /**< Ucode Generated Status (16 Bytes) */ \
+ uint16 RxStatus1; /**< MAC Rx Status */ \
+ uint16 RxStatus2; /**< extended MAC Rx status */ \
+ uint16 RxChan; /**< Rx channel info or chanspec */ \
+ uint16 AvbRxTimeL; /**< AVB RX timestamp low16 */ \
+ uint16 AvbRxTimeH; /**< AVB RX timestamp high16 */ \
+ uint16 RxTSFTime; /**< Lower 16 bits of Rx timestamp */ \
+ uint16 RxTsfTimeH; /**< Higher 16 bits of Rx timestamp */ \
+ uint16 MuRate; /**< MU rate info (bit3:0 MCS, bit6:4 NSTS) */
+
+#define D11RXHDR_HW_STATUS_GE87_1 /**< HW Generated 24 bytes RX Status */ \
+ D11RXHDR_HW_STATUS_GE80 /**< First 20 bytes are same as mac rev >= 80 */ \
+ uint16 roe_hw_sts; /**< ROE HW status */ \
+ uint16 roe_err_flags; /**< ROE error flags */
+
+#define D11RXHDR_UCODE_STATUS_GE87_1 /**< Ucode Generated Status (22 Bytes) */ \
+ uint16 RxStatus1; /**< MAC Rx Status */ \
+ uint16 RxStatus2; /**< extended MAC Rx status */ \
+ uint16 RxChan; /**< Rx channel info or chanspec */ \
+ uint16 MuRate; /**< MU rate info (bit3:0 MCS, bit6:4 NSTS) */ \
+ uint32 AVBRxTime; /**< 32 bit AVB timestamp */ \
+ uint32 TSFRxTime; /**< 32 bit TSF timestamp */ \
+ uint64 PTMRxTime; /**< 64 bit PTM timestamp */
+
+ /**< HW Generated Status (20 Bytes) */
+/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - rev80 */
+typedef struct d11rxhdrshort_ge87_1 d11rxhdrshort_ge87_1_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_ge87_1 {
+
+ D11RXHDR_HW_STATUS_GE87_1
+
+} BWL_POST_PACKED_STRUCT;
+
+/** Mid version of receive frame status. Only used for MPDU of AMPDU - rev80 */
+typedef struct d11rxhdrmid_ge87_1 d11rxhdrmid_ge87_1_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdrmid_ge87_1 {
+
+ D11RXHDR_HW_STATUS_GE87_1
+ D11RXHDR_UCODE_STATUS_GE87_1
+} BWL_POST_PACKED_STRUCT;
+
+/** Short version of receive frame status. Only used for non-last MSDU of AMSDU - rev80 */
+typedef struct d11rxhdrshort_ge80 d11rxhdrshort_ge80_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdrshort_ge80 {
+
+ D11RXHDR_HW_STATUS_GE80
+
+} BWL_POST_PACKED_STRUCT;
+
+/** Mid version of receive frame status. Only used for MPDU of AMPDU - rev80 */
+typedef struct d11rxhdrmid_ge80 d11rxhdrmid_ge80_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdrmid_ge80 {
+
+ D11RXHDR_HW_STATUS_GE80
+ D11RXHDR_UCODE_STATUS_GE80
+
+} BWL_POST_PACKED_STRUCT;
+
+/** Receive Frame Data Header - pre80 */
+typedef struct d11rxhdr_lt80 d11rxhdr_lt80_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdr_lt80 {
+ uint16 RxFrameSize; /**< Actual byte length of the frame data received */
+
+ /**
+ * These two 8-bit fields remain in the same order regardless of
+ * processor byte order.
+ */
+ uint8 dma_flags; /* bit 0 indicates short or long rx status. 1 == short. */
+ uint8 fifo; /* rx fifo number */
+
+ uint16 PhyRxStatus_0; /**< PhyRxStatus 15:0 */
+ uint16 PhyRxStatus_1; /**< PhyRxStatus 31:16 */
+ uint16 PhyRxStatus_2; /**< PhyRxStatus 47:32 */
+ uint16 PhyRxStatus_3; /**< PhyRxStatus 63:48 */
+ uint16 PhyRxStatus_4; /**< PhyRxStatus 79:64 */
+ uint16 PhyRxStatus_5; /**< PhyRxStatus 95:80 */
+ uint16 RxStatus1; /**< MAC Rx Status */
+ uint16 RxStatus2; /**< extended MAC Rx status */
+
+ /**
+ * - RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY
+ */
+ uint16 RxTSFTime;
+
+ uint16 RxChan; /**< Rx channel info or chanspec */
+ uint16 RxFrameSize0; /**< size of rx-frame in fifo-0 in case frame is copied to fifo-1 */
+ uint16 HdrConvSt; /**< hdr conversion status. Copy of ihr(RCV_HDR_CTLSTS). */
+ uint16 AvbRxTimeL; /**< AVB RX timestamp low16 */
+ uint16 AvbRxTimeH; /**< AVB RX timestamp high16 */
+ uint16 MuRate; /**< MU rate info (bit3:0 MCS, bit6:4 NSTS) */
+ /**
+ * These bits indicate specific errors detected by the HW on the Rx Path.
+ * However, these will be relevant for Last MSDU Status only.
+ *
+ * Whenever there is an error at any MSDU, HW treats it as last
+ * MSDU and send out last MSDU status.
+ */
+ uint16 errflags;
+} BWL_POST_PACKED_STRUCT;
+
+#define N_PRXS_GE80 16 /* Total number of PhyRx status words for corerev >= 80 */
+#define N_PRXS_LT80 6 /* Total number of PhyRx status words for corerev < 80 */
+
+/* number of PhyRx status words newly added for (corerev >= 80) */
+#define N_PRXS_REM_GE80 (N_PRXS_GE80 - N_PRXS_LT80)
+
+/** RX Hdr definition - rev80 */
+typedef struct d11rxhdr_ge80 d11rxhdr_ge80_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdr_ge80 {
+ /**
+ * Even though rxhdr can be in short or long format, always declare it here
+ * to be in long format. So the offsets for the other fields are always the same.
+ */
+
+ /**< HW Generated Status (20 Bytes) */
+ D11RXHDR_HW_STATUS_GE80
+ D11RXHDR_UCODE_STATUS_GE80
+
+ /**< PHY Generated Status (32 Bytes) */
+ uint16 PhyRxStatus_0; /**< PhyRxStatus 15:0 */
+ uint16 PhyRxStatus_1; /**< PhyRxStatus 31:16 */
+ uint16 PhyRxStatus_2; /**< PhyRxStatus 47:32 */
+ uint16 PhyRxStatus_3; /**< PhyRxStatus 63:48 */
+ uint16 PhyRxStatus_4; /**< PhyRxStatus 79:64 */
+ uint16 PhyRxStatus_5; /**< PhyRxStatus 95:80 */
+ uint16 phyrxs_rem[N_PRXS_REM_GE80]; /**< 20 bytes of remaining prxs (corerev >= 80) */
+ /* Currently only 6 words are being pushed out of uCode: 6, 9, 16, 17, 21, 23 */
+} BWL_POST_PACKED_STRUCT;
+
+#define N_PRXS_GE85 32u // total number of PhyRxStatus BYTEs for rev >= 85
+
+typedef struct d11rxhdr_ge87_1 d11rxhdr_ge87_1_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdr_ge87_1 {
+ /**
+ * Even though rxhdr can be in short or long format, always declare it here
+ * to be in long format. So the offsets for the other fields are always the same.
+ */
+
+ D11RXHDR_HW_STATUS_GE87_1 /**< HW Generated Status (24 Bytes) */
+ D11RXHDR_UCODE_STATUS_GE87_1 /**< uCode Generated Status (24 Bytes) */
+ uint8 PHYRXSTATUS[N_PRXS_GE85]; /**< PHY Generated Status (32 Bytes) */
+} BWL_POST_PACKED_STRUCT;
+
+/* A wrapper structure for all versions of d11rxh short structures */
+typedef struct d11rxhdr_ge85 d11rxhdr_ge85_t;
+BWL_PRE_PACKED_STRUCT struct d11rxhdr_ge85 {
+ /**
+ * Even though rxhdr can be in short or long format, always declare it here
+ * to be in long format. So the offsets for the other fields are always the same.
+ */
+
+ /**< HW Generated Status (20 Bytes) */
+ D11RXHDR_HW_STATUS_GE80
+ D11RXHDR_UCODE_STATUS_GE80
+
+ /**< PHY Generated Status (32 Bytes) */
+ uint8 PHYRXSTATUS[N_PRXS_GE85];
+} BWL_POST_PACKED_STRUCT;
+
+/* A wrapper structure for all versions of d11rxh short structures */
+typedef union d11rxhdrshort {
+ d11rxhdrshort_rev61_1_t rev61_1;
+ d11rxhdrshort_lt80_t lt80;
+ d11rxhdrshort_ge80_t ge80;
+ d11rxhdrshort_ge87_1_t ge87_1;
+} d11rxhdrshort_t;
+
+/* A wrapper structure for all versions of d11rxh mid structures */
+typedef union d11rxhdrmid {
+ d11rxhdrmid_ge80_t ge80;
+ d11rxhdrmid_ge87_1_t ge87_1;
+} d11rxhdrmid_t;
+
+/* A wrapper structure for all versions of d11rxh structures */
+typedef union d11rxhdr {
+ d11rxhdr_lt80_t lt80;
+ d11rxhdr_ge80_t ge80;
+ d11rxhdr_ge85_t ge85;
+ d11rxhdr_ge87_1_t ge87_1;
+} d11rxhdr_t;
+
+#define D11RXHDRSHORT_GE87_1_ACCESS_REF(srxh, member) \
+ (&((((d11rxhdrshort_t *)(srxh))->ge87_1).member))
+
+#define D11RXHDRMID_GE87_1_ACCESS_REF(mrxh, member) \
+ (&((((d11rxhdrmid_t *)(mrxh))->ge87_1).member))
+
+#define D11RXHDRSHORT_GE87_1_ACCESS_VAL(srxh, member) \
+ ((((d11rxhdrshort_t *)(srxh))->ge87_1).member)
+
+#define D11RXHDRMID_GE87_1_ACCESS_VAL(mrxh, member) \
+ ((((d11rxhdrmid_t *)(mrxh))->ge87_1).member)
+
+#define D11RXHDR_GE87_1_ACCESS_REF(rxh, member) \
+ (&((rxh)->ge87_1).member)
+
+#define D11RXHDR_GE87_1_ACCESS_VAL(rxh, member) \
+ (((rxh)->ge87_1).member)
+
+#define D11RXHDR_GE87_1_SET_VAL(rxh, member, value) \
+ (((rxh)->ge87_1).member = value)
+
+#define D11RXHDRSHORT_GE80_ACCESS_REF(srxh, member) \
+ (&((((d11rxhdrshort_t *)(srxh))->ge80).member))
+
+#define D11RXHDRMID_GE80_ACCESS_REF(mrxh, member) \
+ (&((((d11rxhdrmid_t *)(mrxh))->ge80).member))
+
+#define D11RXHDRSHORT_LT80_ACCESS_REF(srxh, member) \
+ (&((((d11rxhdrshort_t *)(srxh))->lt80).member))
+
+#define D11RXHDRSHORT_GE80_ACCESS_VAL(srxh, member) \
+ ((((d11rxhdrshort_t *)(srxh))->ge80).member)
+
+#define D11RXHDRMID_GE80_ACCESS_VAL(mrxh, member) \
+ ((((d11rxhdrmid_t *)(mrxh))->ge80).member)
+
+#define D11RXHDRSHORT_LT80_ACCESS_VAL(srxh, member) \
+ ((((d11rxhdrshort_t *)(srxh))->lt80).member)
+
+#define D11RXHDR_GE80_ACCESS_REF(rxh, member) \
+ (&((rxh)->ge80).member)
+
+#define D11RXHDR_LT80_ACCESS_REF(rxh, member) \
+ (&((rxh)->lt80).member)
+
+#define D11RXHDR_GE80_ACCESS_VAL(rxh, member) \
+ (((rxh)->ge80).member)
+
+#define D11RXHDR_GE80_SET_VAL(rxh, member, value) \
+ (((rxh)->ge80).member = value)
+
+#define D11RXHDR_LT80_ACCESS_VAL(rxh, member) \
+ (((rxh)->lt80).member)
+
+#define D11RXHDR_LT80_SET_VAL(rxh, member, value) \
+ (((rxh)->lt80).member = value)
+
+/** For accessing members of d11rxhdrshort_t by reference (address of members) */
+#define D11RXHDRSHORT_ACCESS_REF(srxh, corerev, corerev_minor, member) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDRSHORT_GE87_1_ACCESS_REF(srxh, member) : \
+ D11REV_GE(corerev, 80) ? D11RXHDRSHORT_GE80_ACCESS_REF(srxh, member) : \
+ D11RXHDRSHORT_LT80_ACCESS_REF(srxh, member))
+
+/** For accessing members of d11rxhdrshort_t by value (only value stored inside members accessed) */
+#define D11RXHDRSHORT_ACCESS_VAL(srxh, corerev, corerev_minor, member) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDRSHORT_GE87_1_ACCESS_VAL(srxh, member) : \
+ D11REV_GE(corerev, 80) ? D11RXHDRSHORT_GE80_ACCESS_VAL(srxh, member) : \
+ D11RXHDRSHORT_LT80_ACCESS_VAL(srxh, member))
+
+/** For accessing members of d11rxhdrmid_t by reference (address of members) */
+#define D11RXHDRMID_ACCESS_REF(mrxh, corerev, corerev_minor, member) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDRMID_GE87_1_ACCESS_REF(mrxh, member) : \
+ D11REV_GE(corerev, 80) ? D11RXHDRMID_GE80_ACCESS_REF(mrxh, member) : NULL)
+
+/** For accessing members of d11rxhdrmid_t by value (only value stored inside members accessed) */
+#define D11RXHDRMID_ACCESS_VAL(mrxh, corerev, corerev_minor, member) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDRMID_GE87_1_ACCESS_VAL(mrxh, member) : \
+ D11REV_GE(corerev, 80) ? D11RXHDRMID_GE80_ACCESS_VAL(mrxh, member) : NULL)
+
+/** For accessing members of d11rxhdr_t by reference (address of members) */
+#define D11RXHDR_ACCESS_REF(rxh, corerev, corerev_minor, member) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_REF(rxh, member) : \
+ D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_REF(rxh, member) : \
+ D11RXHDR_LT80_ACCESS_REF(rxh, member))
+
+/** For accessing members of d11rxhdr_t by value (only value stored inside members accessed) */
+#define D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, member) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, member) : \
+ D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, member) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, member))
+
+/** For accessing members of d11rxhdr_t by value (only value stored inside members accessed) */
+#define D11RXHDR_SET_VAL(rxh, corerev, corerev_minor, member, value) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_SET_VAL(rxh, member, value) : \
+ D11REV_GE(corerev, 80) ? D11RXHDR_GE80_SET_VAL(rxh, member, value) : \
+ D11RXHDR_LT80_SET_VAL(rxh, member, value))
+
+#define D11RXHDR_PTM(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, PTMRxTime) : 0)
+
+#define D11RXHDR_AVB(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ (uint32)D11RXHDR_GE87_1_ACCESS_VAL(rxh, AVBRxTime) : \
+ D11REV_GE(corerev, 80) ? ((uint32)D11RXHDR_GE80_ACCESS_VAL(rxh, AvbRxTimeL) | \
+ ((uint32)D11RXHDR_GE80_ACCESS_VAL(rxh, AvbRxTimeH) << 16u)) : \
+ ((uint32)D11RXHDR_LT80_ACCESS_VAL(rxh, AvbRxTimeL) | \
+ ((uint32)D11RXHDR_LT80_ACCESS_VAL(rxh, AvbRxTimeH) << 16u)))
+
+#define D11RXHDR_TSF_REF(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_REF(rxh, TSFRxTime) : \
+ D11REV_GE(corerev, 80) ? (uint32*)D11RXHDR_GE80_ACCESS_REF(rxh, RxTSFTime) : \
+ (uint32*)D11RXHDR_LT80_ACCESS_REF(rxh, RxTSFTime))
+
+#define D11RXHDR_TSF(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, TSFRxTime) : \
+ D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, RxTSFTime) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, RxTSFTime))
+
+#define RXS_SHORT_ENAB(rev) (D11REV_GE(rev, 64) || \
+ D11REV_IS(rev, 60) || \
+ D11REV_IS(rev, 62))
+
+#define RXS_MID_ENAB(rev) (D11REV_GE(rev, 80))
+#define RXS_LONG_ENAB(rev) (D11REV_GE(rev, 80))
+
+#define IS_D11RXHDRSHORT(rxh, rev, rev_min) ((RXS_SHORT_ENAB(rev) && \
+ ((D11RXHDR_ACCESS_VAL((rxh), (rev), (rev_min), dma_flags)) & RXS_SHORT_MASK)) != 0)
+
+#define IS_D11RXHDRMID(rxh, rev, rev_min) ((RXS_MID_ENAB(rev) && \
+ ((D11RXHDR_ACCESS_VAL((rxh), (rev), (rev_min), dma_flags)) == 0)))
+
+#define IS_D11RXHDRLONG(rxh, rev, rev_min) \
+ ((!(IS_D11RXHDRSHORT((rxh), (rev), (rev_min)))) && \
+ (!(IS_D11RXHDRMID((rxh), (rev), (rev_min)))))
+
+#define D11RXHDR_HAS_UCODE_STATUS(rxhdr, corerev, corerev_minor) \
+ ((!IS_D11RXHDRSHORT((rxhdr), (corerev), (corerev_minor))) || \
+ (IS_D11RXHDRMID((rxhdr), (corerev), (corerev_minor))))
+
+#define IS_PHYRXHDR_VALID(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ (D11RXHDR_GE87_1_ACCESS_VAL(rxh, dma_flags) == RXS_PHYRXST_VALID_REV_GE80) : \
+ D11REV_GE(corerev, 80) ? \
+ (D11RXHDR_GE80_ACCESS_VAL(rxh, dma_flags) == RXS_PHYRXST_VALID_REV_GE80) : \
+ (D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus2) & RXS_PHYRXST_VALID))
+
+#define RXHDR_GET_PAD_LEN(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \
+ ((((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \
+ D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_PBPRES) != 0) ? HDRCONV_PAD : 0) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ (((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs) & \
+ RXSS_PBPRES) != 0) ? HDRCONV_PAD : 0) : \
+ (((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus1) & RXS_PBPRES) != 0) ? HDRCONV_PAD : 0)))
+
+#define RXHDR_GET_PAD_PRES(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \
+ (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \
+ D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_PBPRES) != 0) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs) & \
+ RXSS_PBPRES) != 0) : \
+ (((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus1) & RXS_PBPRES) != 0))))
+
+#define RXHDR_GET_CONV_TYPE(rxh, corerev, corerev_minor) \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, \
+ HdrConvSt) & HDRCONV_ETH_FRAME) != 0) : ((D11RXHDR_ACCESS_VAL(rxh, \
+ corerev, corerev_minor, HdrConvSt) & HDRCONV_ETH_FRAME) != 0))
+
+#define RXHDR_GET_ROE_ERR_STS(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_err_flags))) : 0)
+
+#define RXHDR_GET_ROE_L3_TYPE(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L3_PROT_TYPE_MASK) : 0)
+
+#define RXHDR_GET_ROE_L4_TYPE(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L4_PROT_TYPE_MASK) : 0)
+
+#define RXHDR_GET_ROE_L3_STATUS(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L3_CHKSUM_STATUS_MASK) : 0)
+
+#define RXHDR_GET_ROE_L4_STATUS(rxh, corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ ((D11RXHDR_GE87_1_ACCESS_VAL(rxh, roe_hw_sts)) & ROE_L4_CHKSUM_STATUS_MASK) : 0)
+
+#define RXHDR_GET_AGG_TYPE(rxh, corerev, corerev_minor) \
+ (D11REV_GE(corerev, 80) ? \
+ (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \
+ D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_AGGTYPE_MASK) >> RXSS_AGGTYPE_SHIFT) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs) \
+ & RXSS_AGGTYPE_MASK) >> RXSS_AGGTYPE_SHIFT) : \
+ ((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus2) & RXS_AGGTYPE_MASK) >> RXS_AGGTYPE_SHIFT)))
+
+#define RXHDR_GET_PBPRS_REF(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_REF(rxh, mrxs) : \
+ D11RXHDR_GE80_ACCESS_REF(rxh, mrxs)) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ ((D11RXHDRSHORT_ACCESS_REF(rxh, corerev, corerev_minor, mrxs))) : \
+ (D11RXHDR_LT80_ACCESS_REF(rxh, RxStatus1))))
+
+#define RXHDR_GET_IS_DEFRAG(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \
+ (D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxStatus1) & RXS_IS_DEFRAG) : 0)
+
+#define SET_RXHDR_PBPRS_REF_VAL(rxh, corerev, corerev_minor, val) \
+ (D11REV_GE(corerev, 80) ? \
+ (*val |= RXSS_PBPRES) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? (*val |= RXSS_PBPRES) : \
+ (*val |= RXS_PBPRES)))
+
+#define CLEAR_RXHDR_PBPRS_REF_VAL(rxh, corerev, corerev_minor, val) \
+ (D11REV_GE(corerev, 80) ? \
+ (*val &= ~RXSS_PBPRES) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? (*val &= ~RXSS_PBPRES) : \
+ (*val &= ~RXS_PBPRES)))
+
+#define RXHDR_GET_AMSDU(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \
+ (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \
+ D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_AMSDU_MASK) != 0) : \
+ (IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ ((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, \
+ mrxs) & RXSS_AMSDU_MASK) != 0) : \
+ ((D11RXHDR_LT80_ACCESS_VAL(rxh, RxStatus2) & RXS_AMSDU_MASK) != 0)))
+
+#ifdef BCMDBG
+#define RXHDR_GET_MSDU_COUNT(rxh, corerev, corerev_minor) (D11REV_GE(corerev, 80) ? \
+ (((D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? \
+ D11RXHDR_GE87_1_ACCESS_VAL(rxh, mrxs) : \
+ D11RXHDR_GE80_ACCESS_VAL(rxh, mrxs)) & RXSS_MSDU_CNT_MASK) >> RXSS_MSDU_CNT_SHIFT) : \
+ IS_D11RXHDRSHORT(rxh, corerev, corerev_minor) ? \
+ (((D11RXHDRSHORT_ACCESS_VAL(rxh, corerev, corerev_minor, mrxs)) & \
+ RXSS_MSDU_CNT_MASK) >> RXSS_MSDU_CNT_SHIFT) : 0)
+
+#endif /* BCMDBG */
+
+/** Length of HW RX status in RxStatus */
+#define HW_RXHDR_LEN_REV_GE87_1 (sizeof(d11rxhdrshort_ge87_1_t)) /* 24 bytes */
+#define HW_RXHDR_LEN_REV_GE80 (sizeof(d11rxhdrshort_ge80_t)) /* 20 bytes */
+#define HW_RXHDR_LEN_REV_LT80 (sizeof(d11rxhdrshort_lt80_t)) /* 12 bytes */
+#define HW_RXHDR_LEN_REV_61_1 (sizeof(d11rxhdrshort_rev61_1_t)) /* 16 bytes */
+
+/** Length of HW RX status + ucode Rx status in RxStatus */
+#define MID_RXHDR_LEN_REV_GE87_1 (sizeof(d11rxhdrmid_ge87_1_t)) /* 48 bytes */
+#define MID_RXHDR_LEN_REV_GE80 (sizeof(d11rxhdrmid_ge80_t)) /* 36 bytes */
+
+/** Length of HW RX status + ucode RX status + PHY RX status + padding(if need align) */
+#define D11_RXHDR_LEN_REV_GE87_1 (sizeof(d11rxhdr_ge87_1_t)) /* 80 bytes */
+#define D11_RXHDR_LEN_REV_GE80 (sizeof(d11rxhdr_ge80_t)) /* 68 bytes */
+#define D11_RXHDR_LEN_REV_LT80 (sizeof(d11rxhdr_lt80_t)) /* 36 bytes */
+
+#define HW_RXHDR_LEN(corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? HW_RXHDR_LEN_REV_GE87_1 : \
+ D11REV_GE(corerev, 80) ? HW_RXHDR_LEN_REV_GE80 : HW_RXHDR_LEN_REV_LT80)
+
+#define MID_RXHDR_LEN(corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? MID_RXHDR_LEN_REV_GE87_1 : \
+ D11REV_GE(corerev, 80) ? \
+ MID_RXHDR_LEN_REV_GE80 : NULL)
+
+#define D11_RXHDR_LEN(corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? D11_RXHDR_LEN_REV_GE87_1 : \
+ D11REV_GE(corerev, 80) ? D11_RXHDR_LEN_REV_GE80 : \
+ D11_RXHDR_LEN_REV_LT80)
+
+#define FRAMELEN(corerev, corerev_minor, rxh) \
+ D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, RxFrameSize)
+
+#define RXS_SHORT_MASK 0x01 /**< Short vs full rx status in dma_flags field of d11rxhdr */
+
+/** validate chip specific phychain info for MCSSQ snr.
+ * should sync with uCode reporting.
+ * please add a condition with decending order to avoid any wrong skip
+ * Note: this macro can be removed once NEWT no longer needs 4368a0.
+ */
+#define IS_MCSSQ_ANT3_VALID_GE80(corerev, corerev_minor) \
+ (D11REV_IS(corerev, 83) && (D11MINORREV_IS(corerev_minor, 1)))
+
+/* Header conversion status register bit fields */
+#define HDRCONV_USR_ENAB 0x0001
+#define HDRCONV_ENAB 0x0100
+#define HDRCONV_ETH_FRAME 0x0200
+#define HDRCONV_STATUS_VALID 0x8000
+
+#define ROE_L3_PROT_TYPE_IPV4 (0x10u)
+#define ROE_L3_PROT_TYPE_IPV6 (0x20u)
+#define ROE_L3_PROT_TYPE_MASK (0x30u)
+#define ROE_L3_PROT_TYPE_SHIFT (4u)
+
+#define ROE_L4_PROT_TYPE_TCP (0x40u)
+#define ROE_L4_PROT_TYPE_UDP (0x80u)
+#define ROE_L4_PROT_TYPE_MASK (0xC0u)
+#define ROE_L4_PROT_TYPE_SHIFT (6u)
+
+#define ROE_L3_CHKSUM_STATUS_FAIL (0x100u)
+#define ROE_L3_CHKSUM_STATUS_SUCCESS (0x200u)
+#define ROE_L3_CHKSUM_STATUS_MASK (0x300u)
+#define ROE_L3_CHKSUM_STATUS_SHIFT (8u)
+
+#define ROE_L4_CHKSUM_STATUS_FAIL (0x400u)
+#define ROE_L4_CHKSUM_STATUS_SUCCESS (0x800u)
+#define ROE_L4_CHKSUM_STATUS_MASK (0xC00u)
+#define ROE_L4_CHKSUM_STATUS_SHIFT (10u)
+
+/** NOTE: Due to precommit issue, _d11_autophyrxsts_ will be moved
+ * to a separated file when 4387 trunk build is stable
+ */
+#ifndef _d11_autophyrxsts_
+#define _d11_autophyrxsts_
+
+#define APRXS_WD0_L_EN_GE85 1u
+#define APRXS_WD0_H_EN_GE85 1u
+#define APRXS_WD1_L_EN_GE85 1u
+#define APRXS_WD1_H_EN_GE85 1u
+#define APRXS_WD2_L_EN_GE85 1u
+#define APRXS_WD2_H_EN_GE85 1u
+#define APRXS_WD3_L_EN_GE85 1u
+#define APRXS_WD3_H_EN_GE85 0u // DO NOT ENABLE WD3_H
+#define APRXS_WD4_L_EN_GE85 1u
+#define APRXS_WD4_H_EN_GE85 1u
+#define APRXS_WD5_L_EN_GE85 1u
+#define APRXS_WD5_H_EN_GE85 1u
+#define APRXS_WD6_L_EN_GE85 0u
+#define APRXS_WD6_H_EN_GE85 0u
+#define APRXS_WD7_L_EN_GE85 0u
+#define APRXS_WD7_H_EN_GE85 0u
+#define APRXS_WD8_L_EN_GE85 0u
+#define APRXS_WD8_H_EN_GE85 1u
+#define APRXS_WD9_L_EN_GE85 0u
+#define APRXS_WD9_H_EN_GE85 0u
+#define APRXS_WD10_L_EN_GE85 0u
+#define APRXS_WD10_H_EN_GE85 0u
+#define APRXS_WD11_L_EN_GE85 0u
+#define APRXS_WD11_H_EN_GE85 0u
+#define APRXS_WD12_L_EN_GE85 0u
+#define APRXS_WD12_H_EN_GE85 0u
+#define APRXS_WD13_L_EN_GE85 0u
+#define APRXS_WD13_H_EN_GE85 0u
+#define APRXS_WD14_L_EN_GE85 0u
+#define APRXS_WD14_H_EN_GE85 0u
+#define APRXS_WD15_L_EN_GE85 0u
+#define APRXS_WD15_H_EN_GE85 0u
+#define APRXS_WD16_L_EN_GE85 1u
+#define APRXS_WD16_H_EN_GE85 0u
+#define APRXS_WD17_L_EN_GE85 0u
+#define APRXS_WD17_H_EN_GE85 0u
+#define APRXS_WD18_L_EN_GE85 1u
+#define APRXS_WD18_H_EN_GE85 0u
+#define APRXS_WD19_L_EN_GE85 0u
+#define APRXS_WD19_H_EN_GE85 0u
+#define APRXS_WD20_L_EN_GE85 1u
+#define APRXS_WD20_H_EN_GE85 1u
+#define APRXS_WD21_L_EN_GE85 0u
+#define APRXS_WD21_H_EN_GE85 1u
+#define APRXS_WD22_L_EN_GE85 1u
+#define APRXS_WD22_H_EN_GE85 1u
+#define APRXS_WD23_L_EN_GE85 1u
+#define APRXS_WD23_H_EN_GE85 1u
+#define APRXS_WD24_L_EN_GE85 0u
+#define APRXS_WD24_H_EN_GE85 0u
+#define APRXS_WD25_L_EN_GE85 0u
+#define APRXS_WD25_H_EN_GE85 0u
+
+enum {
+ APRXS_WD0_L_SHIFT = 0, // frameType, unsupportedRate, band, lostCRS, shortPreamble
+ APRXS_WD0_H_SHIFT, // PLCPViolation, MFCRSFired, ACCRSFired, MUPPDU, OBSSStat
+ APRXS_WD1_L_SHIFT, // coremask, antcfg,
+ APRXS_WD1_H_SHIFT, // BWclassification
+ APRXS_WD2_L_SHIFT, // RxPwrAnt0
+ APRXS_WD2_H_SHIFT, // RxPwrAnt1
+ APRXS_WD3_L_SHIFT, // RxPwrAnt2
+ APRXS_WD3_H_SHIFT, // RxPwrAnt3, OCL
+ APRXS_WD4_L_SHIFT, // RSSI factional bit
+ APRXS_WD4_H_SHIFT, // AGC type, ACI mitigation state, ClipCount, DynBWInNonHT
+ APRXS_WD5_L_SHIFT, // MCSSQSNRCore0
+ APRXS_WD5_H_SHIFT, // MCSSQSNRCore1
+ APRXS_WD6_L_SHIFT, // MCSSQSNRCore2
+ APRXS_WD6_H_SHIFT, // MCSSQSNRCore3, OCL 1
+ APRXS_WD7_L_SHIFT, // MUIntProcessType,
+ APRXS_WD7_H_SHIFT, // coarse freq_offset, packet abort
+ APRXS_WD8_L_SHIFT = 0, // fine freq offset
+ APRXS_WD8_H_SHIFT, // ChBWInNonHT, MLUsed, SINRBasedACIDet
+ APRXS_WD9_L_SHIFT, // SpatialSQCnt
+ APRXS_WD9_H_SHIFT, // packet gain
+ APRXS_WD10_L_SHIFT, // RxPwrAntExt
+ APRXS_WD10_H_SHIFT, // coarse freq_offset of 2nd 80mhz
+ APRXS_WD11_L_SHIFT, // fine freq_offset of 2nd 80mhz
+ APRXS_WD11_H_SHIFT,
+ APRXS_WD12_L_SHIFT,
+ APRXS_WD12_H_SHIFT,
+ APRXS_WD13_L_SHIFT,
+ APRXS_WD13_H_SHIFT,
+ APRXS_WD14_L_SHIFT,
+ APRXS_WD14_H_SHIFT,
+ APRXS_WD15_L_SHIFT,
+ APRXS_WD15_H_SHIFT,
+ APRXS_WD16_L_SHIFT = 0,
+ APRXS_WD16_H_SHIFT,
+ APRXS_WD17_L_SHIFT,
+ APRXS_WD17_H_SHIFT,
+ APRXS_WD18_L_SHIFT,
+ APRXS_WD18_H_SHIFT,
+ APRXS_WD19_L_SHIFT,
+ APRXS_WD19_H_SHIFT,
+ APRXS_WD20_L_SHIFT,
+ APRXS_WD20_H_SHIFT,
+ APRXS_WD21_L_SHIFT,
+ APRXS_WD21_H_SHIFT,
+ APRXS_WD22_L_SHIFT, // STA ID
+ APRXS_WD22_H_SHIFT, // STA ID, NSTS, TXBF, DCM
+ APRXS_WD23_L_SHIFT,
+ APRXS_WD23_H_SHIFT,
+ APRXS_WD24_L_SHIFT = 0,
+ APRXS_WD24_H_SHIFT,
+ APRXS_WD25_L_SHIFT,
+ APRXS_WD25_H_SHIFT
+};
+
+#define APRXS_WD0_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD0_L_EN_GE85 : 0)
+#define APRXS_WD0_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD0_H_EN_GE85 : 0)
+#define APRXS_WD1_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD1_L_EN_GE85 : 0)
+#define APRXS_WD1_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD1_H_EN_GE85 : 0)
+#define APRXS_WD2_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD2_L_EN_GE85 : 0)
+#define APRXS_WD2_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD2_H_EN_GE85 : 0)
+#define APRXS_WD3_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD3_L_EN_GE85 : 0)
+#define APRXS_WD3_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD3_H_EN_GE85 : 0)
+#define APRXS_WD4_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD4_L_EN_GE85 : 0)
+#define APRXS_WD4_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD4_H_EN_GE85 : 0)
+#define APRXS_WD5_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD5_L_EN_GE85 : 0)
+#define APRXS_WD5_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD5_H_EN_GE85 : 0)
+#define APRXS_WD6_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD6_L_EN_GE85 : 0)
+#define APRXS_WD6_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD6_H_EN_GE85 : 0)
+#define APRXS_WD7_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD7_L_EN_GE85 : 0)
+#define APRXS_WD7_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD7_H_EN_GE85 : 0)
+#define APRXS_WD8_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD8_L_EN_GE85 : 0)
+#define APRXS_WD8_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD8_H_EN_GE85 : 0)
+#define APRXS_WD9_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD9_L_EN_GE85 : 0)
+#define APRXS_WD9_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD9_H_EN_GE85 : 0)
+#define APRXS_WD10_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD10_L_EN_GE85 : 0)
+#define APRXS_WD10_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD10_H_EN_GE85 : 0)
+#define APRXS_WD11_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD11_L_EN_GE85 : 0)
+#define APRXS_WD11_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD11_H_EN_GE85 : 0)
+#define APRXS_WD12_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD12_L_EN_GE85 : 0)
+#define APRXS_WD12_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD12_H_EN_GE85 : 0)
+#define APRXS_WD13_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD13_L_EN_GE85 : 0)
+#define APRXS_WD13_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD13_H_EN_GE85 : 0)
+#define APRXS_WD14_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD14_L_EN_GE85 : 0)
+#define APRXS_WD14_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD14_H_EN_GE85 : 0)
+#define APRXS_WD15_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD15_L_EN_GE85 : 0)
+#define APRXS_WD15_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD15_H_EN_GE85 : 0)
+#define APRXS_WD16_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD16_L_EN_GE85 : 0)
+#define APRXS_WD16_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD16_H_EN_GE85 : 0)
+#define APRXS_WD17_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD17_L_EN_GE85 : 0)
+#define APRXS_WD17_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD17_H_EN_GE85 : 0)
+#define APRXS_WD18_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD18_L_EN_GE85 : 0)
+#define APRXS_WD18_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD18_H_EN_GE85 : 0)
+#define APRXS_WD19_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD19_L_EN_GE85 : 0)
+#define APRXS_WD19_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD19_H_EN_GE85 : 0)
+#define APRXS_WD20_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD20_L_EN_GE85 : 0)
+#define APRXS_WD20_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD20_H_EN_GE85 : 0)
+#define APRXS_WD21_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD21_L_EN_GE85 : 0)
+#define APRXS_WD21_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD21_H_EN_GE85 : 0)
+#define APRXS_WD22_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD22_L_EN_GE85 : 0)
+#define APRXS_WD22_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD22_H_EN_GE85 : 0)
+#define APRXS_WD23_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD23_L_EN_GE85 : 0)
+#define APRXS_WD23_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD23_H_EN_GE85 : 0)
+#define APRXS_WD24_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD24_L_EN_GE85 : 0)
+#define APRXS_WD24_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD24_H_EN_GE85 : 0)
+#define APRXS_WD25_L_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD25_L_EN_GE85 : 0)
+#define APRXS_WD25_H_EN(rev) ((D11REV_GE(rev, 85)) ? \
+ APRXS_WD25_H_EN_GE85 : 0)
+
+#define APRXS_BMAP0(rev) ((APRXS_WD0_L_EN(rev) << APRXS_WD0_L_SHIFT) | \
+ (APRXS_WD0_H_EN(rev) << APRXS_WD0_H_SHIFT) |\
+ (APRXS_WD1_L_EN(rev) << APRXS_WD1_L_SHIFT) |\
+ (APRXS_WD1_H_EN(rev) << APRXS_WD1_H_SHIFT) |\
+ (APRXS_WD2_L_EN(rev) << APRXS_WD2_L_SHIFT) |\
+ (APRXS_WD2_H_EN(rev) << APRXS_WD2_H_SHIFT) |\
+ (APRXS_WD3_L_EN(rev) << APRXS_WD3_L_SHIFT) |\
+ (APRXS_WD3_H_EN(rev) << APRXS_WD3_H_SHIFT) |\
+ (APRXS_WD4_L_EN(rev) << APRXS_WD4_L_SHIFT) |\
+ (APRXS_WD4_H_EN(rev) << APRXS_WD4_H_SHIFT) |\
+ (APRXS_WD5_L_EN(rev) << APRXS_WD5_L_SHIFT) |\
+ (APRXS_WD5_H_EN(rev) << APRXS_WD5_H_SHIFT) |\
+ (APRXS_WD6_L_EN(rev) << APRXS_WD6_L_SHIFT) |\
+ (APRXS_WD6_H_EN(rev) << APRXS_WD6_H_SHIFT) |\
+ (APRXS_WD7_L_EN(rev) << APRXS_WD7_L_SHIFT) |\
+ (APRXS_WD7_H_EN(rev) << APRXS_WD7_H_SHIFT))
+
+#define APRXS_BMAP1(rev) ((APRXS_WD8_L_EN(rev) << APRXS_WD8_L_SHIFT) | \
+ (APRXS_WD8_H_EN(rev) << APRXS_WD8_H_SHIFT) |\
+ (APRXS_WD9_L_EN(rev) << APRXS_WD9_L_SHIFT) |\
+ (APRXS_WD9_H_EN(rev) << APRXS_WD9_H_SHIFT) |\
+ (APRXS_WD10_L_EN(rev) << APRXS_WD10_L_SHIFT) |\
+ (APRXS_WD10_H_EN(rev) << APRXS_WD10_H_SHIFT) |\
+ (APRXS_WD11_L_EN(rev) << APRXS_WD11_L_SHIFT) |\
+ (APRXS_WD11_H_EN(rev) << APRXS_WD11_H_SHIFT) |\
+ (APRXS_WD12_L_EN(rev) << APRXS_WD12_L_SHIFT) |\
+ (APRXS_WD12_H_EN(rev) << APRXS_WD12_H_SHIFT) |\
+ (APRXS_WD13_L_EN(rev) << APRXS_WD13_L_SHIFT) |\
+ (APRXS_WD13_H_EN(rev) << APRXS_WD13_H_SHIFT) |\
+ (APRXS_WD14_L_EN(rev) << APRXS_WD14_L_SHIFT) |\
+ (APRXS_WD14_H_EN(rev) << APRXS_WD14_H_SHIFT) |\
+ (APRXS_WD15_L_EN(rev) << APRXS_WD15_L_SHIFT) |\
+ (APRXS_WD15_H_EN(rev) << APRXS_WD15_H_SHIFT))
+
+#define APRXS_BMAP2(rev) ((APRXS_WD16_L_EN(rev) << APRXS_WD16_L_SHIFT) | \
+ (APRXS_WD16_H_EN(rev) << APRXS_WD16_H_SHIFT) |\
+ (APRXS_WD17_L_EN(rev) << APRXS_WD17_L_SHIFT) |\
+ (APRXS_WD17_H_EN(rev) << APRXS_WD17_H_SHIFT) |\
+ (APRXS_WD18_L_EN(rev) << APRXS_WD18_L_SHIFT) |\
+ (APRXS_WD18_H_EN(rev) << APRXS_WD18_H_SHIFT) |\
+ (APRXS_WD19_L_EN(rev) << APRXS_WD19_L_SHIFT) |\
+ (APRXS_WD19_H_EN(rev) << APRXS_WD19_H_SHIFT) |\
+ (APRXS_WD20_L_EN(rev) << APRXS_WD20_L_SHIFT) |\
+ (APRXS_WD20_H_EN(rev) << APRXS_WD20_H_SHIFT) |\
+ (APRXS_WD21_L_EN(rev) << APRXS_WD21_L_SHIFT) |\
+ (APRXS_WD21_H_EN(rev) << APRXS_WD21_H_SHIFT) |\
+ (APRXS_WD22_L_EN(rev) << APRXS_WD22_L_SHIFT) |\
+ (APRXS_WD22_H_EN(rev) << APRXS_WD22_H_SHIFT) |\
+ (APRXS_WD23_L_EN(rev) << APRXS_WD23_L_SHIFT) |\
+ (APRXS_WD23_H_EN(rev) << APRXS_WD23_H_SHIFT))
+
+#define APRXS_BMAP3(rev) ((APRXS_WD24_L_EN(rev) << APRXS_WD24_L_SHIFT) | \
+ (APRXS_WD24_H_EN(rev) << APRXS_WD24_H_SHIFT) |\
+ (APRXS_WD25_L_EN(rev) << APRXS_WD25_L_SHIFT) |\
+ (APRXS_WD25_H_EN(rev) << APRXS_WD25_H_SHIFT))
+/* byte position */
+#define APRXS_WD0_L_POS(rev) 0u
+#define APRXS_WD0_H_POS(rev) (APRXS_WD0_L_POS(rev) + APRXS_WD0_L_EN(rev)) /* 1 */
+#define APRXS_WD1_L_POS(rev) (APRXS_WD0_H_POS(rev) + APRXS_WD0_H_EN(rev)) /* 2 */
+#define APRXS_WD1_H_POS(rev) (APRXS_WD1_L_POS(rev) + APRXS_WD1_L_EN(rev)) /* 3 */
+#define APRXS_WD2_L_POS(rev) (APRXS_WD1_H_POS(rev) + APRXS_WD1_H_EN(rev)) /* 4 */
+#define APRXS_WD2_H_POS(rev) (APRXS_WD2_L_POS(rev) + APRXS_WD2_L_EN(rev)) /* 5 */
+#define APRXS_WD3_L_POS(rev) (APRXS_WD2_H_POS(rev) + APRXS_WD2_H_EN(rev)) /* 6 */
+#define APRXS_WD3_H_POS(rev) (APRXS_WD3_L_POS(rev) + APRXS_WD3_L_EN(rev)) /* 7 */
+#define APRXS_WD4_L_POS(rev) (APRXS_WD3_H_POS(rev) + APRXS_WD3_H_EN(rev)) /* 7 */
+#define APRXS_WD4_H_POS(rev) (APRXS_WD4_L_POS(rev) + APRXS_WD4_L_EN(rev)) /* 8 */
+#define APRXS_WD5_L_POS(rev) (APRXS_WD4_H_POS(rev) + APRXS_WD4_H_EN(rev)) /* 9 */
+#define APRXS_WD5_H_POS(rev) (APRXS_WD5_L_POS(rev) + APRXS_WD5_L_EN(rev)) /* 10 */
+#define APRXS_WD6_L_POS(rev) (APRXS_WD5_H_POS(rev) + APRXS_WD5_H_EN(rev)) /* 11 */
+#define APRXS_WD6_H_POS(rev) (APRXS_WD6_L_POS(rev) + APRXS_WD6_L_EN(rev)) /* 11 */
+#define APRXS_WD7_L_POS(rev) (APRXS_WD6_H_POS(rev) + APRXS_WD6_H_EN(rev)) /* 11 */
+#define APRXS_WD7_H_POS(rev) (APRXS_WD7_L_POS(rev) + APRXS_WD7_L_EN(rev)) /* 11 */
+#define APRXS_WD8_L_POS(rev) (APRXS_WD7_H_POS(rev) + APRXS_WD7_H_EN(rev)) /* 11 */
+#define APRXS_WD8_H_POS(rev) (APRXS_WD8_L_POS(rev) + APRXS_WD8_L_EN(rev)) /* 11 */
+#define APRXS_WD9_L_POS(rev) (APRXS_WD8_H_POS(rev) + APRXS_WD8_H_EN(rev)) /* 12 */
+#define APRXS_WD9_H_POS(rev) (APRXS_WD9_L_POS(rev) + APRXS_WD9_L_EN(rev)) /* 12 */
+#define APRXS_WD10_L_POS(rev) (APRXS_WD9_H_POS(rev) + APRXS_WD9_H_EN(rev)) /* 12 */
+#define APRXS_WD10_H_POS(rev) (APRXS_WD10_L_POS(rev) + APRXS_WD10_L_EN(rev)) /* 12 */
+#define APRXS_WD11_L_POS(rev) (APRXS_WD10_H_POS(rev) + APRXS_WD10_H_EN(rev)) /* 12 */
+#define APRXS_WD11_H_POS(rev) (APRXS_WD11_L_POS(rev) + APRXS_WD11_L_EN(rev)) /* 12 */
+#define APRXS_WD12_L_POS(rev) (APRXS_WD11_H_POS(rev) + APRXS_WD11_H_EN(rev)) /* 12 */
+#define APRXS_WD12_H_POS(rev) (APRXS_WD12_L_POS(rev) + APRXS_WD12_L_EN(rev)) /* 12 */
+#define APRXS_WD13_L_POS(rev) (APRXS_WD12_H_POS(rev) + APRXS_WD12_H_EN(rev)) /* 12 */
+#define APRXS_WD13_H_POS(rev) (APRXS_WD13_L_POS(rev) + APRXS_WD13_L_EN(rev)) /* 12 */
+#define APRXS_WD14_L_POS(rev) (APRXS_WD13_H_POS(rev) + APRXS_WD13_H_EN(rev)) /* 12 */
+#define APRXS_WD14_H_POS(rev) (APRXS_WD14_L_POS(rev) + APRXS_WD14_L_EN(rev)) /* 12 */
+#define APRXS_WD15_L_POS(rev) (APRXS_WD14_H_POS(rev) + APRXS_WD14_H_EN(rev)) /* 12 */
+#define APRXS_WD15_H_POS(rev) (APRXS_WD15_L_POS(rev) + APRXS_WD15_L_EN(rev)) /* 12 */
+#define APRXS_WD16_L_POS(rev) (APRXS_WD15_H_POS(rev) + APRXS_WD15_H_EN(rev)) /* 12 */
+#define APRXS_WD16_H_POS(rev) (APRXS_WD16_L_POS(rev) + APRXS_WD16_L_EN(rev)) /* 13 */
+#define APRXS_WD17_L_POS(rev) (APRXS_WD16_H_POS(rev) + APRXS_WD16_H_EN(rev)) /* 13 */
+#define APRXS_WD17_H_POS(rev) (APRXS_WD17_L_POS(rev) + APRXS_WD17_L_EN(rev)) /* 13 */
+#define APRXS_WD18_L_POS(rev) (APRXS_WD17_H_POS(rev) + APRXS_WD17_H_EN(rev)) /* 13 */
+#define APRXS_WD18_H_POS(rev) (APRXS_WD18_L_POS(rev) + APRXS_WD18_L_EN(rev)) /* 14 */
+#define APRXS_WD19_L_POS(rev) (APRXS_WD18_H_POS(rev) + APRXS_WD18_H_EN(rev)) /* 14 */
+#define APRXS_WD19_H_POS(rev) (APRXS_WD19_L_POS(rev) + APRXS_WD19_L_EN(rev)) /* 14 */
+#define APRXS_WD20_L_POS(rev) (APRXS_WD19_H_POS(rev) + APRXS_WD19_H_EN(rev)) /* 14 */
+#define APRXS_WD20_H_POS(rev) (APRXS_WD20_L_POS(rev) + APRXS_WD20_L_EN(rev)) /* 15 */
+#define APRXS_WD21_L_POS(rev) (APRXS_WD20_H_POS(rev) + APRXS_WD20_H_EN(rev)) /* 16 */
+#define APRXS_WD21_H_POS(rev) (APRXS_WD21_L_POS(rev) + APRXS_WD21_L_EN(rev)) /* 16 */
+#define APRXS_WD22_L_POS(rev) (APRXS_WD21_H_POS(rev) + APRXS_WD21_H_EN(rev)) /* 17 */
+#define APRXS_WD22_H_POS(rev) (APRXS_WD22_L_POS(rev) + APRXS_WD22_L_EN(rev)) /* 18 */
+#define APRXS_WD23_L_POS(rev) (APRXS_WD22_H_POS(rev) + APRXS_WD22_H_EN(rev)) /* 19 */
+#define APRXS_WD23_H_POS(rev) (APRXS_WD23_L_POS(rev) + APRXS_WD23_L_EN(rev)) /* 20 */
+#define APRXS_WD24_L_POS(rev) (APRXS_WD23_H_POS(rev) + APRXS_WD23_H_EN(rev)) /* 21 */
+#define APRXS_WD24_H_POS(rev) (APRXS_WD24_L_POS(rev) + APRXS_WD24_L_EN(rev)) /* 21 */
+#define APRXS_WD25_L_POS(rev) (APRXS_WD24_H_POS(rev) + APRXS_WD24_H_EN(rev)) /* 22 */
+#define APRXS_WD25_H_POS(rev) (APRXS_WD25_L_POS(rev) + APRXS_WD25_L_EN(rev)) /* 23 */
+
+#define APRXS_NBYTES(rev) (APRXS_WD25_H_POS(rev)) // total number of bytes enabled
+
+// frame type
+#define APRXS_FT_POS(rev) APRXS_WD0_L_POS(rev)
+#define APRXS_FT_MASK 0xFu
+#define APRXS_FT(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_FT_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_FT_POS(rev)]) & \
+ APRXS_FT_MASK)
+
+// unsupported rate
+#define APRXS_UNSRATE_POS(rev) APRXS_WD0_L_POS(rev)
+#define APRXS_UNSRATE_MASK 0x10u
+#define APRXS_UNSRATE_SHIFT 4u
+#define APRXS_UNSRATE(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_UNSRATE_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_UNSRATE_POS(rev)]) & \
+ APRXS_UNSRATE_MASK) >> APRXS_UNSRATE_SHIFT)
+
+// band
+#define APRXS_BAND_POS(rev) APRXS_WD0_L_POS(rev)
+#define APRXS_BAND_MASK 0x20u
+#define APRXS_BAND_SHIFT 5u
+#define APRXS_BAND(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_BAND_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_BAND_POS(rev)]) & \
+ APRXS_BAND_MASK) >> APRXS_BAND_SHIFT)
+
+// lost CRS
+#define APRXS_LOSTCRS_POS(rev) APRXS_WD0_L_POS(rev)
+#define APRXS_LOSTCRS_MASK 0x40u
+#define APRXS_LOSTCRS_SHIFT 6u
+#define APRXS_LOSTCRS(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_LOSTCRS_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_LOSTCRS_POS(rev)]) & \
+ APRXS_LOSTCRS_MASK) >> APRXS_LOSTCRS_SHIFT)
+
+// short preamble
+#define APRXS_SHORTH_POS(rev) APRXS_WD0_L_POS(rev)
+#define APRXS_SHORTH_MASK 0x80u
+#define APRXS_SHORTH_SHIFT 7u
+#define APRXS_SHORTH(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_SHORTH_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_SHORTH_POS(rev)]) & \
+ APRXS_SHORTH_MASK) >> APRXS_SHORTH_SHIFT)
+
+// plcp format violation
+#define APRXS_PLCPFV_POS(rev) APRXS_WD0_H_POS(rev)
+#define APRXS_PLCPFV_MASK 0x1u
+#define APRXS_PLCPFV(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_PLCPFV_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_PLCPFV_POS(rev)]) & \
+ APRXS_PLCPFV_MASK)
+
+// plcp header CRC failed
+#define APRXS_PLCPHCF_POS(rev) APRXS_WD0_H_POS(rev)
+#define APRXS_PLCPHCF_MASK 0x2u
+#define APRXS_PLCPHCF_SHIFT 1u
+#define APRXS_PLCPHCF(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_PLCPHCF_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_PLCPHCF_POS(rev)]) & \
+ APRXS_PLCPHCF_MASK) >> APRXS_PLCPHCF_SHIFT)
+
+// MFCRS fired
+#define APRXS_MFCRS_FIRED_POS(rev) APRXS_WD0_H_POS(rev)
+#define APRXS_MFCRS_FIRED_MASK 0x4u
+#define APRXS_MFCRS_FIRED_SHIFT 2u
+#define APRXS_MFCRS_FIRED(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_MFCRS_FIRED_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_MFCRS_FIRED_POS(rev)]) & \
+ APRXS_MFCRS_FIRED_MASK) >> APRXS_MFCRS_FIRED_SHIFT)
+
+// ACCRS fired
+#define APRXS_ACCRS_FIRED_POS(rev) APRXS_WD0_H_POS(rev)
+#define APRXS_ACCRS_FIRED_MASK 0x8u
+#define APRXS_ACCRS_FIRED_SHIFT 3u
+#define APRXS_ACCRS_FIRED(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_ACCRS_FIRED_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_ACCRS_FIRED_POS(rev)]) & \
+ APRXS_ACCRS_FIRED_MASK) >> APRXS_ACCRS_FIRED_SHIFT)
+
+// MU PPDU
+#define APRXS_MUPPDU_POS(rev) APRXS_WD0_H_POS(rev)
+#define APRXS_MUPPDU_MASK 0x10u
+#define APRXS_MUPPDU_SHIFT 4u
+#define APRXS_MUPPDU(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_MUPPDU_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_MUPPDU_POS(rev)]) & \
+ APRXS_MUPPDU_MASK) >> APRXS_MUPPDU_SHIFT)
+
+// OBSS status
+#define APRXS_OBSS_STS_POS(rev) APRXS_WD0_H_POS(rev)
+#define APRXS_OBSS_STS_MASK 0xE0u
+#define APRXS_OBSS_STS_SHIFT 5u
+#define APRXS_OBSS_STS(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_OBSS_STS_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_OBSS_STS_POS(rev)]) & \
+ APRXS_OBSS_STS_MASK) >> APRXS_OBSS_STS_SHIFT)
+
+// coremask
+#define APRXS_COREMASK_POS(rev) APRXS_WD1_L_POS(rev)
+#define APRXS_COREMASK_MASK 0xFu
+#define APRXS_COREMASK(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_COREMASK_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_COREMASK_POS(rev)]) & \
+ APRXS_COREMASK_MASK)
+
+// antcfg
+#define APRXS_ANTCFG_POS(rev) APRXS_WD1_L_POS(rev)
+#define APRXS_ANTCFG_MASK 0xF0u
+#define APRXS_ANTCFG_SHIFT 4u
+#define APRXS_ANTCFG(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_ANTCFG_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_ANTCFG_POS(rev)]) & \
+ APRXS_ANTCFG_MASK) >> APRXS_ANTCFG_SHIFT)
+
+// final BW classification
+#define APRXS_SUBBAND_POS(rev) APRXS_WD1_H_POS(rev)
+#define APRXS_SUBBAND_MASK 0xFFu
+#define APRXS_SUBBAND(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_SUBBAND_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_SUBBAND_POS(rev)]) & \
+ APRXS_SUBBAND_MASK)
+
+// Rx power Antenna0
+#define APRXS_RXPWR_ANT0_POS(rev) APRXS_WD2_L_POS(rev)
+#define APRXS_RXPWR_ANT0_MASK 0xFFu
+#define APRXS_RXPWR_ANT0(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT0_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT0_POS(rev)]) & \
+ APRXS_RXPWR_ANT0_MASK)
+
+// Rx power Antenna1
+#define APRXS_RXPWR_ANT1_POS(rev) APRXS_WD2_H_POS(rev)
+#define APRXS_RXPWR_ANT1_MASK 0xFFu
+#define APRXS_RXPWR_ANT1(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT1_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT1_POS(rev)]) & \
+ APRXS_RXPWR_ANT1_MASK)
+
+// Rx power Antenna2
+#define APRXS_RXPWR_ANT2_POS(rev) APRXS_WD3_L_POS(rev)
+#define APRXS_RXPWR_ANT2_MASK 0xFFu
+#define APRXS_RXPWR_ANT2(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT2_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT2_POS(rev)]) & \
+ APRXS_RXPWR_ANT2_MASK)
+
+// Rx power Antenna3
+#define APRXS_RXPWR_ANT3_POS(rev) APRXS_WD3_H_POS(rev)
+#define APRXS_RXPWR_ANT3_MASK 0xFFu
+#define APRXS_RXPWR_ANT3(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_ANT3_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_ANT3_POS(rev)]) & \
+ APRXS_RXPWR_ANT3_MASK)
+
+// RX ELNA INDEX ANT0
+#define APRXS_ELNA_IDX_ANT0_POS(rev) APRXS_WD20_L_POS(rev)
+#define APRXS_ELNA_IDX_ANT0_MASK 0x2u
+#define APRXS_ELNA_IDX_ANT0_SHIFT 1u
+#define APRXS_ELNA_IDX_ANT0(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_ELNA_IDX_ANT0_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_ELNA_IDX_ANT0_POS(rev)]) & \
+ APRXS_ELNA_IDX_ANT0_MASK) >> APRXS_ELNA_IDX_ANT0_SHIFT)
+
+// RX ELNA INDEX ANT1
+#define APRXS_ELNA_IDX_ANT1_POS(rev) APRXS_WD20_L_POS(rev)
+#define APRXS_ELNA_IDX_ANT1_MASK 0x20u
+#define APRXS_ELNA_IDX_ANT1_SHIFT 5u
+#define APRXS_ELNA_IDX_ANT1(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_ELNA_IDX_ANT1_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_ELNA_IDX_ANT1_POS(rev)]) & \
+ APRXS_ELNA_IDX_ANT1_MASK) >> APRXS_ELNA_IDX_ANT1_SHIFT)
+
+// RX TIA INDEX ANT0 LO
+#define APRXS_TIA_IDX_ANT0_POS(rev) APRXS_WD16_L_POS(rev)
+#define APRXS_TIA_IDX_ANT0_MASK 0x1Cu
+#define APRXS_TIA_IDX_ANT0_SHIFT 2u
+#define APRXS_TIA_IDX_ANT0(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_TIA_IDX_ANT0_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_TIA_IDX_ANT0_POS(rev)]) & \
+ APRXS_TIA_IDX_ANT0_MASK) >> APRXS_TIA_IDX_ANT0_SHIFT)
+
+// RX TIA INDEX ANT1 LO
+#define APRXS_TIA_IDX_ANT1_POS(rev) APRXS_WD18_L_POS(rev)
+#define APRXS_TIA_IDX_ANT1_MASK 0x1Cu
+#define APRXS_TIA_IDX_ANT1_SHIFT 2u
+#define APRXS_TIA_IDX_ANT1(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_TIA_IDX_ANT1_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_TIA_IDX_ANT1_POS(rev)]) & \
+ APRXS_TIA_IDX_ANT1_MASK) >> APRXS_TIA_IDX_ANT1_SHIFT)
+
+// RX VSW INDEX ANT0
+#define APRXS_VSW_IDX_ANT0_POS(rev) APRXS_WD20_L_POS(rev)
+#define APRXS_VSW_IDX_ANT0_MASK 0x8u
+#define APRXS_VSW_IDX_ANT0_SHIFT 3u
+#define APRXS_VSW_IDX_ANT0(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_VSW_IDX_ANT0_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_VSW_IDX_ANT0_POS(rev)]) & \
+ APRXS_VSW_IDX_ANT0_MASK) >> APRXS_VSW_IDX_ANT0_SHIFT)
+
+// RX VSW INDEX ANT1
+#define APRXS_VSW_IDX_ANT1_POS(rev) APRXS_WD20_L_POS(rev)
+#define APRXS_VSW_IDX_ANT1_MASK 0x80u
+#define APRXS_VSW_IDX_ANT1_SHIFT 7u
+#define APRXS_VSW_IDX_ANT1(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_VSW_IDX_ANT1_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_VSW_IDX_ANT1_POS(rev)]) & \
+ APRXS_VSW_IDX_ANT1_MASK) >> APRXS_VSW_IDX_ANT1_SHIFT)
+
+// RSSI fractional bits
+#define APRXS_RXPWR_FRAC_POS(rev) APRXS_WD4_L_POS(rev)
+#define APRXS_RXPWR_FRAC_MASK 0xFFu
+#define APRXS_RXPWR_FRAC(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_RXPWR_FRAC_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_RXPWR_FRAC_POS(rev)]) & \
+ APRXS_RXPWR_FRAC_MASK)
+
+// Ucode overwrites ClipCount with GILTF
+#define APRXS_GILTF_POS(rev) APRXS_WD4_H_POS(rev)
+#define APRXS_GILTF_MASK 0x18u
+#define APRXS_GILTF_SHIFT 3u
+#define APRXS_GILTF(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_GILTF_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_GILTF_POS(rev)]) & \
+ APRXS_GILTF_MASK) >> APRXS_GILTF_SHIFT)
+
+#define APRXS_DYNBWINNONHT_POS(rev) APRXS_WD4_H_POS(rev)
+#define APRXS_DYNBWINNONHT_MASK 0x20u
+#define APRXS_DYNBWINNONHT_SHIFT 5u
+#define APRXS_DYNBWINNONHT(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_DYNBWINNONHT_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_DYNBWINNONHT_POS(rev)]) & \
+ APRXS_DYNBWINNONHT_MASK) >> APRXS_DYNBWINNONHT_SHIFT)
+
+#define APRXS_MCSSQSNR0_POS(rev) APRXS_WD5_L_POS(rev)
+#define APRXS_MCSSQSNR0_MASK 0xFFu
+#define APRXS_MCSSQSNR0(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_MCSSQSNR0_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_MCSSQSNR0_POS(rev)]) & \
+ APRXS_MCSSQSNR0_MASK)
+
+#define APRXS_MCSSQSNR1_POS(rev) APRXS_WD5_H_POS(rev)
+#define APRXS_MCSSQSNR1_MASK 0xFFu
+#define APRXS_MCSSQSNR1(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_MCSSQSNR1_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_MCSSQSNR1_POS(rev)]) & \
+ APRXS_MCSSQSNR1_MASK)
+
+#define APRXS_MCSSQSNR2_POS(rev) APRXS_WD6_L_POS(rev)
+#define APRXS_MCSSQSNR2_MASK 0xFFu
+#define APRXS_MCSSQSNR2(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_MCSSQSNR2_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_MCSSQSNR2_POS(rev)]) & \
+ APRXS_MCSSQSNR2_MASK)
+
+#define APRXS_CHBWINNONHT_POS(rev) APRXS_WD8_H_POS(rev)
+#define APRXS_CHBWINNONHT_MASK 0x3u
+#define APRXS_CHBWINNONHT(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_CHBWINNONHT_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_CHBWINNONHT_POS(rev)]) & \
+ APRXS_CHBWINNONHT_MASK)
+
+// User type
+#define APRXS_USTY_POS(rev) APRXS_WD23_H_POS(rev)
+#define APRXS_USTY_MASK 0xE0u
+#define APRXS_USTY_SHIFT 0x5u
+#define APRXS_USTY(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_USTY_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_USTY_POS(rev)]) & \
+ APRXS_USTY_MASK) >> APRXS_USTY_SHIFT)
+
+// 11ax frame format
+#define APRXS_AXFF_POS(rev) APRXS_WD20_H_POS(rev)
+#define APRXS_AXFF_MASK 0x7u
+#define APRXS_AXFF(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_AXFF_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_AXFF_POS(rev)]) & \
+ APRXS_AXFF_MASK)
+
+// MCS
+#define APRXS_AXMCS_POS(rev) APRXS_WD21_H_POS(rev)
+#define APRXS_AXMCS_MASK 0xFu
+#define APRXS_AXMCS(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_AXMCS_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_AXMCS_POS(rev)]) & \
+ APRXS_AXMCS_MASK)
+
+// Coding
+#define APRXS_CODING_POS(rev) APRXS_WD21_H_POS(rev)
+#define APRXS_CODING_MASK 0x10u
+#define APRXS_CODING_SHIFT 4u
+#define APRXS_CODING(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_CODING_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_CODING_POS(rev)]) & \
+ APRXS_CODING_MASK) >> APRXS_CODING_SHIFT)
+
+// STAID
+#define APRXS_AX_STAID_L_POS(rev) APRXS_WD22_L_POS(rev)
+#define APRXS_AX_STAID_L_MASK 0xFFu
+#define APRXS_AX_STAID_L(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_AX_STAID_L_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_AX_STAID_L_POS(rev)]) & \
+ APRXS_AX_STAID_L_MASK)
+
+#define APRXS_AX_STAID_H_POS(rev) APRXS_WD22_H_POS(rev)
+#define APRXS_AX_STAID_H_MASK 0x03u
+#define APRXS_AX_STAID_H(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_AX_STAID_H_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_AX_STAID_H_POS(rev)]) & \
+ APRXS_AX_STAID_H_MASK)
+
+#define APRXS_AX_STAID(rxh, rev, min_rev) ((APRXS_AX_STAID_H(rxh, rev, min_rev) << 1) |\
+ APRXS_AX_STAID_L(rxh, rev, min_rev))
+
+// NSTS
+#define APRXS_NSTS_POS(rev) APRXS_WD22_H_POS(rev)
+#define APRXS_NSTS_MASK 0x38u
+#define APRXS_NSTS_SHIFT 3u
+#define APRXS_NSTS(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_DCM_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_DCM_POS(rev)]) & \
+ APRXS_NSTS_MASK) >> APRXS_NSTS_SHIFT)
+
+// TXBF
+#define APRXS_TXBF_POS(rev) APRXS_WD22_H_POS(rev)
+#define APRXS_TXBF_MASK 0x40u
+#define APRXS_TXBF_SHIFT 6u
+#define APRXS_TXBF(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_TXBF_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_TXBF_POS(rev)]) & \
+ APRXS_TXBF_MASK) >> APRXS_TXBF_SHIFT)
+
+//DCM
+#define APRXS_DCM_POS(rev) APRXS_WD22_H_POS(rev)
+#define APRXS_DCM_MASK 0x80u
+#define APRXS_DCM_SHIFT 7u
+#define APRXS_DCM(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_DCM_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_DCM_POS(rev)]) & \
+ APRXS_DCM_MASK) >> APRXS_DCM_SHIFT)
+
+// RU Offset
+#define APRXS_AX_RUALLOC_POS(rev) APRXS_WD23_L_POS(rev)
+#define APRXS_AX_RUALLOC_MASK 0x7Fu
+#define APRXS_AX_RUALLOC_SHIFT 0u
+#define APRXS_AX_RUALLOC(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_AX_RUALLOC_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_AX_RUALLOC_POS(rev)]) & \
+ APRXS_AX_RUALLOC_MASK) >> APRXS_AX_RUALLOC_SHIFT)
+
+#define APRXS_PE_L_POS(rev) APRXS_WD23_L_POS(rev)
+#define APRXS_PE_L_MASK 0x80u
+#define APRXS_PE_L_SHIFT 7u
+#define APRXS_PE_L(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_PE_L_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_PE_L_POS(rev)]) & \
+ APRXS_PE_L_MASK) >> APRXS_PE_L_SHIFT)
+
+#define APRXS_PE_H_POS(rev) APRXS_WD23_H_POS(rev)
+#define APRXS_PE_H_MASK 0x3u
+#define APRXS_PE_H(rxh, rev, min_rev) \
+ ((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_PE_H_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_PE_H_POS(rev)]) & \
+ APRXS_PE_H_MASK)
+
+#define APRXS_PE(rxh, rev, rev_min) \
+ ((APRXS_PE_H(rxh, rev, rev_min) << 1) | APRXS_PE_L(rxh, rev, rev_min))
+
+#define APRXS_RU_POS(rev) APRXS_WD23_H_POS(rev)
+#define APRXS_RU_MASK 0x1Cu
+#define APRXS_RU_SHIFT 2u
+#define APRXS_RU(rxh, rev, min_rev) \
+ (((D11REV_MAJ_MIN_GE(rev, min_rev, 87, 1) ? \
+ (rxh)->ge87_1.PHYRXSTATUS[APRXS_RU_POS(rev)] : \
+ (rxh)->ge85.PHYRXSTATUS[APRXS_RU_POS(rev)]) & \
+ APRXS_RU_MASK) >> APRXS_RU_SHIFT)
+
+#endif /* _d11_autophyrxsts_ */
+
+#if defined(AUTO_PHYRXSTS)
+#define AUTO_PHYRXSTS_ENAB() 1u
+#else
+#define AUTO_PHYRXSTS_ENAB() 0u
+#endif /* AUTO_PHYRXSTS */
+
+/* PhyRxStatus_0: */
+#define PRXS0_FT_MASK 0x0003u /**< [PRE-HE] NPHY only: CCK, OFDM, HT, VHT */
+#define PRXS0_CLIP_MASK 0x000Cu /**< NPHY only: clip count adjustment steps by AGC */
+#define PRXS0_CLIP_SHIFT 2u /**< SHIFT bits for clip count adjustment */
+#define PRXS0_UNSRATE 0x0010u /**< PHY received a frame with unsupported rate */
+#define PRXS0_UNSRATE_SHIFT 4u
+#define PRXS0_RXANT_UPSUBBAND 0x0020u /**< GPHY: rx ant, NPHY: upper sideband */
+#define PRXS0_LCRS 0x0040u /**< CCK frame only: lost crs during cck frame reception */
+#define PRXS0_SHORTH 0x0080u /**< Short Preamble */
+#define PRXS0_SHORTH_SHIFT 7u
+#define PRXS0_PLCPFV 0x0100u /**< PLCP violation */
+#define PRXS0_PLCPFV_SHIFT 8u
+#define PRXS0_PLCPHCF 0x0200u /**< PLCP header integrity check failed */
+#define PRXS0_PLCPHCF_SHIFT 9u
+#define PRXS0_GAIN_CTL 0x4000u /**< legacy PHY gain control */
+#define PRXS0_ANTSEL_MASK 0xF000u /**< NPHY: Antennas used for received frame, bitmask */
+#define PRXS0_ANTSEL_SHIFT 12u /**< SHIFT bits for Antennas used for received frame */
+#define PRXS0_PPDU_MASK 0x1000u /**< PPDU type SU/MU */
+
+/* subfield PRXS0_FT_MASK [PRXS0_PRE_HE_FT_MASK] */
+#define PRXS0_CCK 0x0000u
+#define PRXS0_OFDM 0x0001u /**< valid only for G phy, use rxh->RxChan for A phy */
+#define PRXS0_PREN 0x0002u
+#define PRXS0_STDN 0x0003u
+
+/* subfield PRXS0_ANTSEL_MASK */
+#define PRXS0_ANTSEL_0 0x0u /**< antenna 0 is used */
+#define PRXS0_ANTSEL_1 0x2u /**< antenna 1 is used */
+#define PRXS0_ANTSEL_2 0x4u /**< antenna 2 is used */
+#define PRXS0_ANTSEL_3 0x8u /**< antenna 3 is used */
+
+/* PhyRxStatus_1: */
+#define PRXS1_JSSI_MASK 0x00FFu
+#define PRXS1_JSSI_SHIFT 0u
+#define PRXS1_SQ_MASK 0xFF00u
+#define PRXS1_SQ_SHIFT 8u
+#define PRXS1_COREMAP 0x000Fu /**< core enable bits for core 0/1/2/3 */
+#define PRXS1_ANTCFG 0x00F0u /**< anttenna configuration bits */
+
+#define PHY_COREMAP_LT85(rxh, rev) \
+ ((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_1) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_1)) & \
+ PRXS1_COREMAP)
+#define PHY_COREMAP(rev, rev_min, rxh) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_COREMASK(rxh, rev, rev_min) : PHY_COREMAP_LT85(rxh, rev))
+
+#define PHY_ANTMAP_LT85(rxh, corerev) \
+ (((D11REV_GE(corerev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_1) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_1)) & \
+ PRXS1_ANTCFG) >> 4)
+#define PHY_ANTMAP(rev, rev_min, rxh) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_ANTCFG(rxh, rev, rev_min) : PHY_ANTMAP_LT85(rxh, rev))
+
+/* nphy PhyRxStatus_1: */
+#define PRXS1_nphy_PWR0_MASK 0x00FF
+#define PRXS1_nphy_PWR1_MASK 0xFF00
+
+/* PhyRxStatus_2: */
+#define PRXS2_LNAGN_MASK 0xC000
+#define PRXS2_LNAGN_SHIFT 14
+#define PRXS2_PGAGN_MASK 0x3C00
+#define PRXS2_PGAGN_SHIFT 10
+#define PRXS2_FOFF_MASK 0x03FF
+
+/* nphy PhyRxStatus_2: */
+#define PRXS2_nphy_SQ_ANT0 0x000F /**< nphy overall signal quality for antenna 0 */
+#define PRXS2_nphy_SQ_ANT1 0x00F0 /**< nphy overall signal quality for antenna 0 */
+#define PRXS2_nphy_cck_SQ 0x00FF /**< bphy signal quality(when FT field is 0) */
+#define PRXS3_nphy_SSQ_MASK 0xFF00 /**< spatial conditioning of the two receive channels */
+#define PRXS3_nphy_SSQ_SHIFT 8
+
+/* PhyRxStatus_3: */
+#define PRXS3_DIGGN_MASK 0x1800
+#define PRXS3_DIGGN_SHIFT 11
+#define PRXS3_TRSTATE 0x0400
+
+/* nphy PhyRxStatus_3: */
+#define PRXS3_nphy_MMPLCPLen_MASK 0x0FFF /**< Mixed-mode preamble PLCP length */
+#define PRXS3_nphy_MMPLCP_RATE_MASK 0xF000 /**< Mixed-mode preamble rate field */
+#define PRXS3_nphy_MMPLCP_RATE_SHIFT 12
+
+/* HTPHY Rx Status defines */
+/* htphy PhyRxStatus_0: those bit are overlapped with PhyRxStatus_0 */
+#define PRXS0_BAND 0x0400 /**< 0 = 2.4G, 1 = 5G */
+#define PRXS0_RSVD 0x0800 /**< reserved; set to 0 */
+#define PRXS0_UNUSED 0xF000 /**< unused and not defined; set to 0 */
+
+/* htphy PhyRxStatus_1: */
+#define PRXS1_HTPHY_MMPLCPLenL_MASK 0xFF00 /**< Mixmode PLCP Length low byte mask */
+
+/* htphy PhyRxStatus_2: */
+#define PRXS2_HTPHY_MMPLCPLenH_MASK 0x000F /**< Mixmode PLCP Length high byte maskw */
+#define PRXS2_HTPHY_MMPLCH_RATE_MASK 0x00F0 /**< Mixmode PLCP rate mask */
+#define PRXS2_HTPHY_RXPWR_ANT0 0xFF00 /**< Rx power on core 0 */
+
+/* htphy PhyRxStatus_3: */
+#define PRXS3_HTPHY_RXPWR_ANT1 0x00FF /**< Rx power on core 1 */
+#define PRXS3_HTPHY_RXPWR_ANT2 0xFF00 /**< Rx power on core 2 */
+
+/* htphy PhyRxStatus_4: */
+#define PRXS4_HTPHY_RXPWR_ANT3 0x00FF /**< Rx power on core 3 */
+#define PRXS4_HTPHY_CFO 0xFF00 /**< Coarse frequency offset */
+
+/* htphy PhyRxStatus_5: */
+#define PRXS5_HTPHY_FFO 0x00FF /**< Fine frequency offset */
+#define PRXS5_HTPHY_AR 0xFF00 /**< Advance Retard */
+
+/* ACPHY RxStatus defs */
+
+/* ACPHY PhyRxStatus_0: */
+#define PRXS0_ACPHY_FT_MASK 0x0003 /**< CCK, OFDM, HT, VHT */
+#define PRXS0_ACPHY_CLIP_MASK 0x000C /**< clip count adjustment steps by AGC */
+#define PRXS0_ACPHY_CLIP_SHIFT 2
+#define PRXS0_ACPHY_UNSRATE 0x0010 /**< PHY received a frame with unsupported rate */
+#define PRXS0_ACPHY_BAND5G 0x0020 /**< Rx Band indication: 0 -> 2G, 1 -> 5G */
+#define PRXS0_ACPHY_LCRS 0x0040 /**< CCK frame only: lost crs during cck frame reception */
+#define PRXS0_ACPHY_SHORTH 0x0080 /**< Short Preamble (CCK), GF preamble (HT) */
+#define PRXS0_ACPHY_PLCPFV 0x0100 /**< PLCP violation */
+#define PRXS0_ACPHY_PLCPHCF 0x0200 /**< PLCP header integrity check failed */
+#define PRXS0_ACPHY_MFCRS 0x0400 /**< Matched Filter CRS fired */
+#define PRXS0_ACPHY_ACCRS 0x0800 /**< Autocorrelation CRS fired */
+#define PRXS0_ACPHY_SUBBAND_MASK 0xF000 /**< FinalBWClassification:
+ * lower nibble Bitfield of sub-bands occupied by Rx frame
+ */
+/* ACPHY PhyRxStatus_1: */
+#define PRXS1_ACPHY_ANT_CORE0 0x0001 /* Antenna Config for core 0 */
+#define PRXS1_ACPHY_SUBBAND_MASK_GEN2 0xFF00 /**< FinalBWClassification:
+ * lower byte Bitfield of sub-bands occupied by Rx frame
+ */
+#define PRXS0_ACPHY_SUBBAND_SHIFT 12
+#define PRXS1_ACPHY_SUBBAND_SHIFT_GEN2 8
+
+/* acphy PhyRxStatus_3: */
+#define PRXS2_ACPHY_RXPWR_ANT0 0xFF00 /**< Rx power on core 1 */
+#define PRXS3_ACPHY_RXPWR_ANT1 0x00FF /**< Rx power on core 1 */
+#define PRXS3_ACPHY_RXPWR_ANT2 0xFF00 /**< Rx power on core 2 */
+#define PRXS3_ACPHY_SNR_ANT0 0xFF00 /* SNR on core 0 */
+
+/* acphy PhyRxStatus_4: */
+/** FinalBWClassification:upper nibble of sub-bands occupied by Rx frame */
+#define PRXS4_ACPHY_SUBBAND_MASK 0x000F
+#define PRXS4_ACPHY_RXPWR_ANT3 0x00FF /**< Rx power on core 3 */
+#define PRXS4_ACPHY_SNR_ANT1 0xFF00 /* SNR on core 1 */
+
+#define PRXS5_ACPHY_CHBWINNONHT_MASK 0x0003
+#define PRXS5_ACPHY_CHBWINNONHT_20MHZ 0
+#define PRXS5_ACPHY_CHBWINNONHT_40MHZ 1
+#define PRXS5_ACPHY_CHBWINNONHT_80MHZ 2
+#define PRXS5_ACPHY_CHBWINNONHT_160MHZ 3 /* includes 80+80 */
+#define PRXS5_ACPHY_DYNBWINNONHT_MASK 0x0004
+
+/** Get Rx power on core 0 */
+#define ACPHY_RXPWR_ANT0(rxs) (((rxs)->lt80.PhyRxStatus_2 & PRXS2_ACPHY_RXPWR_ANT0) >> 8)
+/** Get Rx power on core 1 */
+#define ACPHY_RXPWR_ANT1(rxs) ((rxs)->lt80.PhyRxStatus_3 & PRXS3_ACPHY_RXPWR_ANT1)
+/** Get Rx power on core 2 */
+#define ACPHY_RXPWR_ANT2(rxs) (((rxs)->lt80.PhyRxStatus_3 & PRXS3_ACPHY_RXPWR_ANT2) >> 8)
+/** Get Rx power on core 3 */
+#define ACPHY_RXPWR_ANT3(rxs) ((rxs)->lt80.PhyRxStatus_4 & PRXS4_ACPHY_RXPWR_ANT3)
+
+/** MCSSQSNR location access. MCSSQ usage is limited by chip specific impl,
+ * and there is no way to commonize these status location yet.
+ * TODO: When the storage locations are settled we need to revisit
+ * this defs controls.
+ */
+
+/* exception handling */
+#ifdef PHY_CORE_MAX
+#if PHY_CORE_MAX > 4
+#error "PHY_CORE_MAX is exceeded more than MCSSQSNR defs (4)"
+#endif
+#endif /* PHY_CORE_MAX */
+
+/* rev 48/55/59 are obsoleted for SNR in trunk */
+#define D11_PRXS_MCSSQ_SNR_SUPPORT(corerev) (D11REV_GE((corerev), 80))
+
+#define ACPHY_SNR_MASK (0xFF)
+#define ACPHY_SNR_SHIFT (8)
+
+#define PRXS5_ACPHY_DYNBWINNONHT(rxs) ((rxs)->lt80.PhyRxStatus_5 & PRXS5_ACPHY_DYNBWINNONHT_MASK)
+#define PRXS5_ACPHY_CHBWINNONHT(rxs) ((rxs)->lt80.PhyRxStatus_5 & PRXS5_ACPHY_CHBWINNONHT_MASK)
+
+#define D11N_MMPLCPLen(rxs) ((rxs)->lt80.PhyRxStatus_3 & PRXS3_nphy_MMPLCPLen_MASK)
+#define D11HT_MMPLCPLen(rxs) ((((rxs)->lt80.PhyRxStatus_1 & PRXS1_HTPHY_MMPLCPLenL_MASK) >> 8) | \
+ (((rxs)->lt80.PhyRxStatus_2 & PRXS2_HTPHY_MMPLCPLenH_MASK) << 8))
+
+/* REV80 Defintions (corerev >= 80) */
+
+/** Dma_flags Masks */
+#define RXS_PHYRXST_VALID_REV_GE80 0x02
+
+/** Get RxStatus1 */
+#define RXSTATUS1_REV_GE87_1(rxs) ((rxs)->ge87_1.RxStatus1)
+#define RXSTATUS1_REV_GE80(rxs) ((rxs)->ge80.RxStatus1)
+#define RXSTATUS1_REV_LT80(rxs) ((rxs)->lt80.RxStatus1)
+
+#define PHY_RXSTATUS1(corerev, corerev_minor, rxs) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? RXSTATUS1_REV_GE87_1(rxs) : \
+ D11REV_GE(corerev, 80) ? RXSTATUS1_REV_GE80(rxs) : \
+ RXSTATUS1_REV_LT80(rxs))
+
+/* (FT Mask) PhyRxStatus_0: */
+#define PRXS0_FT_MASK_REV_LT80 PRXS0_FT_MASK /**< (corerev < 80) frame type field mask */
+
+#define PRXS0_FT_SHIFT_REV_GE80 8
+#define PRXS0_FT_MASK_REV_GE80 0x0700 /**
+ * (corerev >= 80) frame type field mask.
+ *
+ * 0 = CCK, 1 = 11a/g legacy OFDM,
+ * 2 = HT, 3 = VHT, 4 = 11ah, 5 = HE,
+ * 6-15 Rsvd.
+ */
+
+/* *
+* Macro to find Frame type from RX Hdr based on corerev.
+*
+* Note: From rev80 onwards frame type is indicated only
+* in the phyrxstatus, which is valid only for the last
+* MPDU of an AMPDU. Since FT is required for every MPDU,
+* frametype for core-revs >= 80, shall be
+* provided in bits (8:10) of MuRate field in RXH.
+*
+*/
+#define D11PPDU_FT(rxh, rev) (\
+ (D11REV_GE(rev, 80) ? \
+ ((D11RXHDR_ACCESS_VAL(rxh, rev, 0, MuRate) & PRXS_FT_MASK(rev)) >> \
+ (PRXS0_FT_SHIFT_REV_GE80)) : \
+ (D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0) & PRXS_FT_MASK(rev))))
+
+#define PRXS_UNSRATE_LT85(rxh, rev) \
+ (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \
+ PRXS0_UNSRATE) >> PRXS0_UNSRATE_SHIFT)
+
+#define PRXS_UNSRATE(rxh, rev, min_rev) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_UNSRATE(rxh, rev, min_rev) : PRXS_UNSRATE_LT85(rxh, rev))
+
+// 1: short (or GF) preamble, 0: long (or MM) preamble
+#define PRXS_SHORTH_LT85(rxh, rev) \
+ (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \
+ PRXS0_SHORTH) >> PRXS0_SHORTH_SHIFT)
+#define PRXS_SHORTH(rxh, rev, min_rev) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_SHORTH(rxh, rev, min_rev) : \
+ PRXS_SHORTH_LT85(rxh, rev))
+
+#define PRXS_PLCPFV_LT85(rxh, rev) \
+ (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \
+ PRXS0_PLCPFV) >> PRXS0_PLCPFV_SHIFT)
+#define PRXS_PLCPFV(rxh, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_PLCPFV(rxh, rev, rev_min) : PRXS_PLCPFV_LT85(rxh, rev))
+
+#define PRXS_PLCPHCF_LT85(rxh, rev) \
+ (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_0) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0)) & \
+ PRXS0_PLCPHCF) >> PRXS0_PLCPHCF_SHIFT)
+#define PRXS_PLCPHCF(rxh, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_PLCPHCF(rxh, rev, rev_min) : PRXS_PLCPHCF_LT85(rxh, rev))
+
+// final BW classification
+#define PRXS_SUBBAND_ACPHY(rxh, rev, rev_min) \
+ (((D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_0) & \
+ PRXS0_ACPHY_SUBBAND_MASK) >> PRXS0_ACPHY_SUBBAND_SHIFT) | \
+ ((D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_4) & \
+ PRXS4_ACPHY_SUBBAND_MASK) << 4))
+#define PRXS_SUBBAND_ACPHY2(rxh, rev, rev_min) \
+ (((D11REV_GE(rev, 80) ? D11RXHDR_GE80_ACCESS_VAL(rxh, PhyRxStatus_1) : \
+ D11RXHDR_LT80_ACCESS_VAL(rxh, PhyRxStatus_1)) & PRXS1_ACPHY2_SUBBAND_MASK) >> \
+ PRXS1_ACPHY2_SUBBAND_SHIFT)
+
+#define PRXS_SUBBAND(rxh, rev, rev_min, phyrev) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_SUBBAND(rxh, rev, rev_min) : (ACREV_GE(phyrev, 32) ? \
+ PRXS_SUBBAND_ACPHY2(rxh, rev, rev_min) : \
+ PRXS_SUBBAND_ACPHY(rxh, rev, rev_min)))
+
+/* Macros to access MCS, NSTS and MU valididity from MuRate field in corerev > 80 RXH */
+#define RXS_MU_VALID_MASK_REV80 0x0080
+#define RXS_MU_VALID_SHIFT_REV80 7
+#define RXS_MCS_MASK_REV80 0x000F
+#define RXS_MCS_SHIFT_REV80 0
+#define RXS_NSTS_MASK_REV80 0x0070
+#define RXS_NSTS_SHIFT_REV80 4
+
+#define D11PPDU_ISMU_REV80(rxh, corerev, corerev_minor) \
+ ((D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, MuRate) & \
+ (RXS_MU_VALID_MASK_REV80)) >> RXS_MU_VALID_SHIFT_REV80)
+#define D11RXHDR_GE80_GET_MCS(rxh, corerev, corerev_minor) \
+ ((D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, MuRate) & \
+ (RXS_MCS_MASK_REV80)) >> RXS_MCS_SHIFT_REV80)
+#define D11RXHDR_GE80_GET_NSTS(rxh, corerev, corerev_minor) \
+ ((D11RXHDR_ACCESS_VAL(rxh, corerev, corerev_minor, MuRate) & \
+ (RXS_NSTS_MASK_REV80)) >> RXS_NSTS_SHIFT_REV80)
+
+/* subfield PRXS0_FT_MASK_REV_GE80 */
+#define PRXS0_HE 0x0004 /**< HE frame type */
+
+/* (Corerev >= 80) PhyRxStatus_2: */
+#define PRXS2_RXPWR_ANT0_REV_GE80 0x00FF /**< (corerev >= 80) Rx power on first antenna */
+#define PRXS2_RXPWR_ANT1_REV_GE80 0xFF00 /**< (corerev >= 80) Rx power on second antenna */
+
+/* (Corerev >= 80) PhyRxStatus_3: */
+#define PRXS3_RXPWR_ANT2_REV_GE80 0x00FF /**< (corerev >= 80) Rx power on third antenna */
+#define PRXS3_RXPWR_ANT3_REV_GE80 0xFF00 /**
+ * (corerev >= 80) Rx power on fourth antenna.
+ *
+ * Note: For PHY revs 3 and > 4, OCL Status
+ * byte 0 will be reported if PHY register
+ * OCL_RxStatus_Ctrl is set to 0x2 or 0x6.
+ */
+#define PRXS3_RXPWR_FRAC_REV_GE80 0xFFu
+
+/** Get Rx power on ANT 0 */
+#define RXPWR_ANT0_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_2 & \
+ (PRXS2_RXPWR_ANT0_REV_GE80))
+
+#define PHY_RXPWR_ANT0(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_RXPWR_ANT0(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ RXPWR_ANT0_REV_GE80(rxs) : ACPHY_RXPWR_ANT0(rxs)))
+
+/** Get Rx power on ANT 1 */
+#define RXPWR_ANT1_REV_GE80(rxs) (((rxs)->ge80.PhyRxStatus_2 & \
+ (PRXS2_RXPWR_ANT1_REV_GE80)) >> 8)
+
+#define PHY_RXPWR_ANT1(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_RXPWR_ANT1(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ RXPWR_ANT1_REV_GE80(rxs) : ACPHY_RXPWR_ANT1(rxs)))
+
+/** Get Rx power on ANT 2 */
+#define RXPWR_ANT2_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_3 & \
+ (PRXS3_RXPWR_ANT2_REV_GE80))
+
+#define PHY_RXPWR_ANT2(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_RXPWR_ANT2(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ RXPWR_ANT2_REV_GE80(rxs) : ACPHY_RXPWR_ANT2(rxs)))
+
+/** Get Rx power on ANT 3 */
+#define RXPWR_ANT3_REV_GE80(rxs) (((rxs)->ge80.PhyRxStatus_3 & \
+ (PRXS3_RXPWR_ANT3_REV_GE80)) >> 8)
+
+#define PHY_RXPWR_ANT3(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_RXPWR_ANT3(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ RXPWR_ANT3_REV_GE80(rxs) : ACPHY_RXPWR_ANT3(rxs)))
+
+/* Get the following entries from RXStatus bytes
+* for RSSI compensation
+* based on factory calibration
+* TIA Index
+* eLNA Index
+* V_path Switch
+*/
+#define PHY_ELNA_IDX_ANT0_REV_GE85(corerev, corerev_min, rxs) \
+ APRXS_ELNA_IDX_ANT0(rxs, corerev, corerev_min)
+#define PHY_ELNA_IDX_ANT1_REV_GE85(corerev, corerev_min, rxs) \
+ APRXS_ELNA_IDX_ANT1(rxs, corerev, corerev_min)
+#define PHY_TIA_IDX_ANT0_REV_GE85(corerev, corerev_min, rxs) \
+ APRXS_TIA_IDX_ANT0(rxs, corerev, corerev_min)
+#define PHY_TIA_IDX_ANT1_REV_GE85(corerev, corerev_min, rxs) \
+ APRXS_TIA_IDX_ANT1(rxs, corerev, corerev_min)
+#define PHY_VSW_IDX_ANT0_REV_GE85(corerev, corerev_min, rxs) \
+ APRXS_VSW_IDX_ANT0(rxs, corerev, corerev_min)
+#define PHY_VSW_IDX_ANT1_REV_GE85(corerev, corerev_min, rxs) \
+ APRXS_VSW_IDX_ANT1(rxs, corerev, corerev_min)
+
+/** Get RSSI fractional bits */
+#define RXPWR_FRAC_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_4 & \
+ (PRXS3_RXPWR_FRAC_REV_GE80))
+
+#define RXPWR_FRAC(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_RXPWR_FRAC(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ RXPWR_FRAC_REV_GE80(rxs) : 0))
+
+/* HECAPPHY PhyRxStatus_4: */
+#define PRXS4_DYNBWINNONHT_MASK_REV_GE80 0x1000
+#define PRXS4_DYNBWINNONHT_REV_GE80(rxs) ((rxs)->ge80.PhyRxStatus_4 & \
+ PRXS4_DYNBWINNONHT_MASK_REV_GE80)
+
+#define PRXS_PHY_DYNBWINNONHT(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_DYNBWINNONHT(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ PRXS4_DYNBWINNONHT_REV_GE80(rxs) : PRXS5_ACPHY_DYNBWINNONHT(rxs)))
+
+/** (corerev >= 80) PhyRxStatus_5: MCSSQ SNR for core 0 and 1 */
+#define PRXS5_MCSSQ_SHIFT (8u)
+#define PRXS5_MCSSQ_CORE0_REV_GE80 (0x00FF)
+#define PRXS5_MCSSQ_CORE1_REV_GE80 (0xFF00)
+
+#define MCSSQ_SNR_ANT0_GE80(rxs) ((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE0_REV_GE80)
+#define MCSSQ_SNR_ANT0(rxs, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_MCSSQSNR0(rxs, rev, rev_min) : \
+ ((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE0_REV_GE80))
+
+#define MCSSQ_SNR_ANT1_GE80(rxs) (((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE1_REV_GE80) \
+ >> PRXS5_MCSSQ_SHIFT)
+#define MCSSQ_SNR_ANT1(rxs, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_MCSSQSNR1(rxs, rev, rev_min) : \
+ (((rxs)->ge80.PhyRxStatus_5 & PRXS5_MCSSQ_CORE1_REV_GE80) \
+ >> PRXS5_MCSSQ_SHIFT))
+
+/** (corerev >= 80) PhyRxStatus_6: MCSSQ SNR for core 2 and 3 */
+#define PRXS6_MCSSQ_SHIFT (8u)
+#define PRXS6_MCSSQ_CORE2_REV_GE80 (0x00FF)
+#define PRXS6_MCSSQ_CORE3_REV_GE80 (0xFF00)
+
+#define MCSSQ_SNR_ANT2_GE80(rxs) (((rxs)->ge80.phyrxs_rem[0] & \
+ PRXS6_MCSSQ_CORE2_REV_GE80))
+#define MCSSQ_SNR_ANT2(rxs, rev, rev_min) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_MCSSQSNR2(rxs, rev, rev_min) : \
+ (((rxs)->ge80.phyrxs_rem[0] & PRXS6_MCSSQ_CORE2_REV_GE80)))
+
+/* HECAPPHY PhyRxStatus_8 (part of phyrxs_rem[2]) : */
+#define PRXS8_CHBWINNONHT_MASK_REV_GE80 0x0100
+#define PRXS8_CHBWINNONHT_REV_GE80(rxs) ((rxs)->ge80.phyrxs_rem[2] & \
+ PRXS8_CHBWINNONHT_MASK_REV_GE80)
+
+#define PRXS_PHY_CHBWINNONHT(corerev, corerev_minor, rxs) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_CHBWINNONHT(rxs, corerev, corerev_minor) : (D11REV_GE(corerev, 80) ? \
+ PRXS8_CHBWINNONHT_REV_GE80(rxs) : PRXS5_ACPHY_CHBWINNONHT(rxs)))
+
+/* HE phyrxs_rem[4] */
+#define PRXS_REM4_PE_MASK_REV80 0x0380
+#define PRXS_REM4_PE_SHIFT_REV80 7u
+#define PRXS_REM4_RU_TYPE_MASK_REV80 0x1c00
+#define PRXS_REM4_RU_TYPE_SHIFT_REV80 10u
+#define PRXS_REM4_NUM_USER_SHIFT_REV80 13u
+#define PRXS_REM4_NUM_USER_BIT_MASK_REV80 0xe000
+
+/* HE phyrxs_rem[5] */
+#define PRXS_REM5_GI_LTF_MASK_REV80 0x0003
+#define PRXS_REM5_GI_LTF_SHIFT_REV80 0u
+#define PRXS_REM5_11AX_FF_MASK_REV80 0x0700
+#define PRXS_REM5_11AX_FF_SHIFT_REV80 8u
+
+/* HE phyrxs_rem[6] */
+#define PRXS_REM6_MCS_MASK_REV80 0x0f00
+#define PRXS_REM6_MCS_SHIFT_REV80 8u
+#define PRXS_REM6_CODING_MASK_REV80 0x1000
+#define PRXS_REM6_CODING_SHIFT_REV80 12u
+
+/* HE phyrxs_rem[7] */
+#define PRXS_REM7_DCM_MASK_REV80 0x8000
+#define PRXS_REM7_DCM_SHIFT_REV80 15u
+#define PRXS_REM7_TXBF_MASK_REV80 0x4000
+#define PRXS_REM7_TXBF_SHIFT_REV80 14u
+#define PRXS_REM7_NSTS_MASK_REV80 0x3800
+#define PRXS_REM7_NSTS_SHIFT_REV80 11u
+#define PRXS_REM7_RU_ALLOC_MASK_REV80 0x007f
+#define PRXS_REM7_RU_ALLOC_SHIFT_REV80 0u
+
+#define PRXS_STAID_MASK 0x07ff
+#define PRXS_STAID_SHIFT 0u
+
+enum {
+ HE_RU_TYPE_26T = 0, /* 26 tone RU, 0 - 36 */
+ HE_RU_TYPE_52T = 1, /* 52 tone RU, 37 - 52 */
+ HE_RU_TYPE_106T = 2, /* 106 tone RU, 53 - 60 */
+ HE_RU_TYPE_242T = 3, /* 242 tone RU, 61 - 64 */
+ HE_RU_TYPE_484T = 4, /* 484 tone RU, 65 - 66 */
+ HE_RU_TYPE_996T = 5, /* 996 tone RU, 67 - 68 */
+ HE_RU_TYPE_2x996T = 6, /* 2x996 tone RU, 69 */
+ HE_RU_TYPE_LAST = 7 /* Reserved, Invalid */
+};
+
+#define HE_RU_TYPE_MAX 6
+
+/* received PE duration is present in phyrxs_rem[4] bit position [7-9] */
+#define D11PPDU_PE_GE80(rxh, corerev) ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[4]) & \
+ (PRXS_REM4_PE_MASK_REV80)) >> PRXS_REM4_PE_SHIFT_REV80)
+
+#define D11PPDU_PE(rxh, corerev, corerev_minor) (AUTO_PHYRXSTS_ENAB() ? \
+ APRXS_PE(rxh, corerev, corerev_minor) : D11PPDU_PE_GE80(rxh, corerev))
+
+/* received RU type is present in phyrxs_rem[4] bit position [10-11] */
+#define D11PPDU_RU_TYPE(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_RU(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[4]) & \
+ (PRXS_REM4_RU_TYPE_MASK_REV80)) >> PRXS_REM4_RU_TYPE_SHIFT_REV80) : 0))
+
+/* received he num of user type is present in phyrxs_rem[4] bit position [13-15] */
+#define D11PPDU_HE_NUM_USER_TYPE(rxh, corerev, corerev_min) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_USTY(rxh, corerev, corerev_min) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[4]) & \
+ (PRXS_REM4_NUM_USER_BIT_MASK_REV80)) >> PRXS_REM4_NUM_USER_SHIFT_REV80) : 0))
+
+#define D11PPDU_FF_TYPE(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_AXFF(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[5]) & \
+ (PRXS_REM5_11AX_FF_MASK_REV80)) >> PRXS_REM5_11AX_FF_SHIFT_REV80) : 0))
+
+/* DCM is present in phyrxs_rem[7] byte 27, bit position [7] */
+#define D11PPDU_DCM(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_DCM(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \
+ (PRXS_REM7_DCM_MASK_REV80)) >> PRXS_REM7_DCM_SHIFT_REV80) : 0))
+
+/* coding used is present in phyrxs_rem[6] byte:25, bit position [12] */
+#define D11PPDU_CODING(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_CODING(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[6]) & \
+ (PRXS_REM6_CODING_MASK_REV80)) >> PRXS_REM6_CODING_SHIFT_REV80) : 0))
+
+/* spatial reuse 2 / STA-ID */
+#define D11PPDU_STAID(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_AX_STAID(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \
+ (PRXS_STAID_MASK)) >> PRXS_STAID_SHIFT) : 0))
+
+#define D11PPDU_TXBF(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_TXBF(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \
+ (PRXS_REM7_TXBF_MASK_REV80)) >> PRXS_REM7_TXBF_SHIFT_REV80) : 0))
+
+/* GI_LTF is present in phyrxs_rem[5] bit position [0-1] */
+#define D11PPDU_GI_LTF(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_GILTF(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[5]) & \
+ (PRXS_REM5_GI_LTF_MASK_REV80)) >> PRXS_REM5_GI_LTF_SHIFT_REV80) : 0))
+
+/* MCS is present in phyrxs_rem[6] - byte 25, bit position [8-11] */
+#define D11PPDU_MCS(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_AXMCS(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[6]) & \
+ (PRXS_REM6_MCS_MASK_REV80)) >> PRXS_REM6_MCS_SHIFT_REV80) : 0))
+
+/* NSTS present in phyrxs_rem[7] bit position [11-13] */
+#define D11PPDU_NSTS(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_NSTS(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \
+ (PRXS_REM7_NSTS_MASK_REV80)) >> PRXS_REM7_NSTS_SHIFT_REV80) : 0))
+
+/* RU ALLOC present in phyrxs_rem[7]- byte 26; bit position [6:0] */
+#define D11PPDU_RU_ALLOC(rxh, corerev, corerev_minor) \
+ (AUTO_PHYRXSTS_ENAB() ? APRXS_AX_RUALLOC(rxh, corerev, corerev_minor) : \
+ (D11REV_GE(corerev, 80) ? ((D11RXHDR_GE80_ACCESS_VAL(rxh, phyrxs_rem[7]) & \
+ (PRXS_REM7_RU_ALLOC_MASK_REV80)) >> PRXS_REM7_RU_ALLOC_SHIFT_REV80) : 0)
+
+/* PHY RX status "Frame Type" field mask. */
+#define PRXS_FT_MASK(corerev) \
+ (D11REV_GE(corerev, 80) ? (PRXS0_FT_MASK_REV_GE80) : \
+ (PRXS0_FT_MASK_REV_LT80))
+
+/**
+ * ACPHY PhyRxStatus0 SubBand (FinalBWClassification) bit defs
+ * FinalBWClassification is a 4 bit field, each bit representing one 20MHz sub-band
+ * of a channel.
+ */
+enum prxs_subband {
+ PRXS_SUBBAND_20LL = 0x0001,
+ PRXS_SUBBAND_20LU = 0x0002,
+ PRXS_SUBBAND_20UL = 0x0004,
+ PRXS_SUBBAND_20UU = 0x0008,
+ PRXS_SUBBAND_40L = 0x0003,
+ PRXS_SUBBAND_40U = 0x000C,
+ PRXS_SUBBAND_80 = 0x000F,
+ PRXS_SUBBAND_20LLL = 0x0001,
+ PRXS_SUBBAND_20LLU = 0x0002,
+ PRXS_SUBBAND_20LUL = 0x0004,
+ PRXS_SUBBAND_20LUU = 0x0008,
+ PRXS_SUBBAND_20ULL = 0x0010,
+ PRXS_SUBBAND_20ULU = 0x0020,
+ PRXS_SUBBAND_20UUL = 0x0040,
+ PRXS_SUBBAND_20UUU = 0x0080,
+ PRXS_SUBBAND_40LL = 0x0003,
+ PRXS_SUBBAND_40LU = 0x000c,
+ PRXS_SUBBAND_40UL = 0x0030,
+ PRXS_SUBBAND_40UU = 0x00c0,
+ PRXS_SUBBAND_80L = 0x000f,
+ PRXS_SUBBAND_80U = 0x00f0,
+ PRXS_SUBBAND_160 = 0x00ff
+};
+
+enum prxs_subband_bphy {
+ PRXS_SUBBAND_BPHY_20L = 0x0000,
+ PRXS_SUBBAND_BPHY_20U = 0x0001
+};
+
+/* ACPHY Gen2 RxStatus defs */
+
+/* ACPHY Gen2 PhyRxStatus_0: */
+#define PRXS0_ACPHY2_MUPPDU 0x1000 /**< 0: SU PPDU; 1: MU PPDU */
+#define PRXS0_ACPHY2_OBSS 0xE000 /**< OBSS mitigation state */
+
+/* ACPHY Gen2 PhyRxStatus_1: */
+#define PRXS1_ACPHY2_SUBBAND_MASK 0xFF00 /**< FinalBWClassification:
+ * 8-bit bitfield of sub-bands occupied by Rx frame
+ */
+#define PRXS1_ACPHY2_SUBBAND_SHIFT 8
+
+/* ACPHY Gen2 PhyRxStatus_2: */
+#define PRXS2_ACPHY2_MU_INT 0x003F /**< MU interference processing type */
+
+/* ACPHY Gen2 PhyRxStatus_5: */
+#define PRXS5_ACPHY2_RSSI_FRAC 0xFF00 /**< RSSI fractional bits */
+
+/* ucode RxStatus1: */
+#define RXS_BCNSENT 0x8000
+#define RXS_TOFINFO 0x4000 /**< Rxed measurement frame processed by ucode */
+#define RXS_GRANTBT 0x2000 /* Indicate medium given to BT */
+#define RXS_SECKINDX_MASK_GE64 0x1fe0
+#define RXS_SECKINDX_MASK 0x07e0
+#define RXS_IS_DEFRAG 0x4
+#define RXS_DEFRAG_SHIFT 2
+#define RXS_SECKINDX_SHIFT 5
+#define RXS_DECERR (1 << 4)
+#define RXS_DECATMPT (1 << 3)
+#define RXS_PBPRES (1 << 2) /**< PAD bytes to make IP data 4 bytes aligned */
+#define RXS_RESPFRAMETX (1 << 1)
+#define RXS_FCSERR (1 << 0)
+
+/* ucode RxStatus2: */
+#define RXS_AMSDU_MASK 1
+#define RXS_AGGTYPE_MASK 0x6
+#define RXS_AGGTYPE_SHIFT 1
+#define RXS_AMSDU_FIRST 1
+#define RXS_AMSDU_INTERMEDIATE 0
+#define RXS_AMSDU_LAST 2
+#define RXS_AMSDU_N_ONE 3
+#define RXS_TKMICATMPT (1 << 3)
+#define RXS_TKMICERR (1 << 4)
+#define RXS_PHYRXST_PRISEL_CLR (1 << 5) /**< PR113291: When '1', Indicates that the Rx */
+ /* packet was received while the antenna */
+ /* (prisel) had been granted to BT. */
+#define RXS_PHYRXST_VALID (1 << 8)
+#define RXS_BCNCLSG (1 << 9) /**< Coleasced beacon packet */
+#define RXS_RXANT_MASK 0x3
+#define RXS_RXANT_SHIFT_LT80 12
+#define RXS_RXANT_SHIFT_GE80 5
+#define RXS_LOOPBACK_MODE 4
+
+/* Bit definitions for MRXS word for short rx status. */
+/* RXSS = RX Status Short */
+#define RXSS_AMSDU_MASK 1 /**< 1: AMSDU */
+#define RXSS_AGGTYPE_MASK 0x6 /**< 0 intermed, 1 first, 2 last, 3 single/non-AMSDU */
+#define RXSS_AGGTYPE_SHIFT 1
+#define RXSS_PBPRES (1 << 3) /**< two-byte PAD prior to plcp */
+#define RXSS_HDRSTS (1 << 4) /**< header conversion status. 1 enabled, 0 disabled */
+#define RXSS_RES_MASK 0xE0 /**< reserved */
+#define RXSS_MSDU_CNT_MASK 0xFF00 /**< index of this AMSDU sub-frame in the AMSDU */
+#define RXSS_MSDU_CNT_SHIFT 8
+
+/* RX signal control definitions */
+/** PHYRXSTAUS validity checker; in-between ampdu, or rxs status isn't valid */
+#define PRXS_IS_VALID(rxh, rev, rev_min) \
+ ((D11REV_GE(rev, 80) && \
+ (D11RXHDR_ACCESS_VAL(rxh, rev, rev_min, dma_flags) & \
+ RXS_PHYRXST_VALID_REV_GE80)) || \
+ (D11REV_GE(rev, 64) && !(D11RXHDR_ACCESS_VAL(rxh, \
+ rev, rev_min, dma_flags) & RXS_SHORT_MASK)) || \
+ (D11RXHDR_ACCESS_VAL(rxh, rev, rev_min, RxStatus2) & RXS_PHYRXST_VALID))
+
+/* RxChan */
+#define RXS_CHAN_40 0x1000
+#define RXS_CHAN_5G 0x0800
+#define RXS_CHAN_ID_MASK 0x07f8
+#define RXS_CHAN_ID_SHIFT 3
+
+#define C_BTCX_AGGOFF_BLE (1 << 0)
+#define C_BTCX_AGGOFF_A2DP (1 << 1)
+#define C_BTCX_AGGOFF_PER (1 << 2)
+#define C_BTCX_AGGOFF_MULTIHID (1 << 3)
+#define C_BTCX_AGG_LMT_SET_HIGH (1 << 4)
+#define C_BTCX_AGGOFF_ESCO_SLAVE (1 << 5)
+
+#define BTCX_HFLG_NO_A2DP_BFR (1 << 0) /**< no check a2dp buffer */
+#define BTCX_HFLG_NO_CCK (1 << 1) /**< no cck rate for null or cts2self */
+#define BTCX_HFLG_NO_OFDM_FBR (1 << 2) /**< no ofdm fbr for null or cts2self */
+#define BTCX_HFLG_NO_INQ_DEF (1 << 3) /**< no defer inquery */
+#define BTCX_HFLG_GRANT_BT (1 << 4) /**< always grant bt */
+#define BTCX_HFLG_ANT2WL (1 << 5) /**< force prisel to wl */
+#define BTCX_HFLG_PS4ACL (1 << 7) /**< use ps null for unsniff acl */
+#define BTCX_HFLG_DYAGG (1 << 8) /**< dynamic tx aggregation */
+#define BTCX_HFLG_SKIPLMP (1 << 10) /**< no LMP check for 4331 (w 20702 A1/A3) */
+#define BTCX_HFLG_ACL_BSD_BLE_SCAN_GRNT (1 << 14) /**< ACL based grant for BLE scan */
+ /* indication to ucode */
+#define BTCX_HFLG2_TRAP_RFACTIVE (1 << 0) /* trap when RfActive too long */
+#define BTCX_HFLG2_TRAP_TXCONF (1 << 1) /* trap when coex grants txconf late */
+#define BTCX_HFLG2_TRAP_ANTDLY (1 << 2) /* trap when coex grants antdly late */
+#define BTCX_HFLG2_TRAP_BTTYPE (1 << 3) /* trap when illegal BT tasktype receive */
+/* Bit definitions for M_BTCX_CONFIG */
+#define BTCX_CONFIG_FORCE_TRAP (1 << 13) /* Force a specific BTCoex TRAP when set */
+
+/* BTCX_CONFIG bits */
+#define C_BTCX_CONFIG_SLOTTED_STATE_1 (1 << 3)
+#define C_BTCX_CONFIG_SLOTTED_STATE_2 (1 << 4)
+#define C_BTCX_CONFIG_SLOTTED_STATE_3 (1 << 5)
+#define C_BTCX_CONFIG_LOW_RSSI (1 << 7)
+#define C_BTCX_CONFIG_BT_STROBE (1 << 9)
+#define C_BTCX_CONFIG_SCO_PROT (1 << 10)
+#define C_BTCX_CFG_CMN_CTS2SELF (1 << 11)
+#define C_BTCX_CONFIG_HPP_STATE (1 << 15)
+
+#define BTC_PARAMS_FW_START_IDX 1000 /**< starting index of FW only btc params */
+/** BTC_PARAMS_FW definitions */
+typedef enum
+{
+ // allow rx-agg to be re-enabled after SCO session completes
+ BTC_FW_RX_REAGG_AFTER_SCO = BTC_PARAMS_FW_START_IDX,
+ // RSSI threshold at which SCO grant/deny limits are changed dynamically
+ BTC_FW_RSSI_THRESH_SCO = BTC_PARAMS_FW_START_IDX + 1,
+ // Enable the dynamic LE scan priority
+ BTC_FW_ENABLE_DYN_LESCAN_PRI = BTC_PARAMS_FW_START_IDX + 2,
+ // If Tput(mbps) is above this, then share antenna with BT's LE_SCAN packet type.
+ BTC_FW_LESCAN_LO_TPUT_THRESH = BTC_PARAMS_FW_START_IDX + 3,
+ // If Tput(mbps) is below this, then share antenna with BT's LE_SCAN packet type.
+ // sampled once a second.
+ BTC_FW_LESCAN_HI_TPUT_THRESH = BTC_PARAMS_FW_START_IDX + 4,
+ // Numbers of denials before granting LS scans
+ BTC_FW_LESCAN_GRANT_INT = BTC_PARAMS_FW_START_IDX + 5,
+ // number of times algorighm changes lescn pri
+ BTC_FW_LESCAN_ALG_CNT = BTC_PARAMS_FW_START_IDX + 6,
+ // RSSI threshold at which aggregation will be disabled during frequent BLE activity
+ BTC_FW_RSSI_THRESH_BLE = BTC_PARAMS_FW_START_IDX + 7,
+ // AMPDU Aggregation state requested by BTC
+ BTC_FW_AGG_STATE_REQ = BTC_PARAMS_FW_START_IDX + 8,
+ // Reserving space for parameters used in other projects
+ BTC_FW_RSVD_1 = BTC_PARAMS_FW_START_IDX + 9,
+ BTC_FW_HOLDSCO_LIMIT = BTC_PARAMS_FW_START_IDX + 10, // Lower Limit
+ BTC_FW_HOLDSCO_LIMIT_HI = BTC_PARAMS_FW_START_IDX + 11, // Higher Limit
+ BTC_FW_SCO_GRANT_HOLD_RATIO = BTC_PARAMS_FW_START_IDX + 12, // Low Ratio
+ BTC_FW_SCO_GRANT_HOLD_RATIO_HI = BTC_PARAMS_FW_START_IDX + 13, // High Ratio
+ BTC_FW_HOLDSCO_HI_THRESH = BTC_PARAMS_FW_START_IDX + 14, // BT Period Threshold
+ BTC_FW_MOD_RXAGG_PKT_SZ_FOR_SCO = BTC_PARAMS_FW_START_IDX + 15,
+ /* Modify Rx Aggregation size when SCO/eSCO detected */
+ BTC_FW_AGG_SIZE_LOW = BTC_PARAMS_FW_START_IDX + 16,
+ /* Agg size when BT period < 7500 ms */
+ BTC_FW_AGG_SIZE_HIGH = BTC_PARAMS_FW_START_IDX + 17,
+ /* Agg size when BT period >= 7500 ms */
+ BTC_FW_MOD_RXAGG_PKT_SZ_FOR_A2DP = BTC_PARAMS_FW_START_IDX + 18,
+ /* Enable COEX constraints for TWT scheduling */
+ BTC_FW_TWT_COEX_CONSTRAINTS_EN = BTC_PARAMS_FW_START_IDX + 19,
+ /* Enable Rx Aggregation for P2P_GO and SOFTAP when ACL/A2DP detected */
+ BTC_FW_MOD_RXAGG_PKT_SZ_FOR_APMODE_ACL_A2DP = BTC_PARAMS_FW_START_IDX + 20,
+ /* Disable amsdu dynamicaly during Rx limited aggregation */
+ BTC_FW_DISABLE_AMSDU_DURING_LIM_AGG = BTC_PARAMS_FW_START_IDX + 21,
+ /* Enable acl based grant for ble scan based on number of 2G slots */
+ BTC_FW_ENABLE_ACL_GRNT_FOR_BLE_SCAN = BTC_PARAMS_FW_START_IDX + 22,
+ /* Threshold slot count for 2g band to Enable acl based grant for ble scan during NAN */
+ BTC_FW_NAN_THRESHOLD_SLOTS_FOR_2G = BTC_PARAMS_FW_START_IDX + 23,
+ /* BT task bm override for critical chansw slots */
+ BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_L = BTC_PARAMS_FW_START_IDX + 24,
+ BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_H = BTC_PARAMS_FW_START_IDX + 25,
+ /* Limited Aggr AP check grace period, # of BTC watchdog timeout */
+ BTC_FW_AGG_AP_GRACE_PERIOD = BTC_PARAMS_FW_START_IDX + 26,
+ /* Limited Aggr AP check buffer limit, sample interval, # of BTC watchdog timeout */
+ BTC_FW_AGG_AP_BUFLIM_SMPLINTV = BTC_PARAMS_FW_START_IDX + 27,
+ /* Limited Aggr AP check excessive DELBA, sample interval, # of BTC watchdog timeout */
+ BTC_FW_AGG_AP_DELBA_SMPLINTV = BTC_PARAMS_FW_START_IDX + 28,
+ /* Limited Aggr AP check excessive DELBA, threshold, # of DELBA */
+ BTC_FW_AGG_AP_DELBA_THRESHOLD = BTC_PARAMS_FW_START_IDX + 29,
+ BTC_FW_MAX_INDICES // Maximum number of btc_fw sw registers
+} btcParamsFirmwareDefinitions;
+
+#define BTC_FW_NUM_INDICES (BTC_FW_MAX_INDICES - BTC_PARAMS_FW_START_IDX)
+
+// 1: Re-enable aggregation after SCO
+#define BTC_FW_RX_REAGG_AFTER_SCO_INIT_VAL 1
+
+// 1: Enable limited aggregation for SCO
+#define BTC_FW_MOD_RXAGG_PKT_SZ_FOR_SCO_INIT_VAL 0
+
+/* Enable Limited aggregation for HI interval BT periodic task only (>=7.5ms) */
+#ifdef WL_BTC_LIMAGG_HI_INT
+/* RX aggregation packet size when SCO */
+#define BTC_FW_AGG_SIZE_LOW_INIT_VAL 0
+#else
+/* RX aggregation packet size when SCO */
+#define BTC_FW_AGG_SIZE_LOW_INIT_VAL 1
+#endif
+
+/* aggregation size when BT period < BT_AMPDU_RESIZE_THRESH */
+#define BTC_FW_AGG_SIZE_HIGH_INIT_VAL 2
+/* aggregation size when BT period > BT_AMPDU_RESIZE_THRESH */
+// 0: disable weak-rssi SCO coex feature. If > 0, adjust SCO COEX algorithm for weak RSSI scenario.
+#define BTC_FW_RSSI_THRESH_SCO_INIT_VAL 0
+
+// 1: Enable limited aggregation for A2DP
+#define BTC_FW_MOD_RXAGG_PKT_SZ_FOR_A2DP_INIT_VAL 0
+
+// Enable LE Scan Priority Algorithm 0: Disable, 1: Enable
+#define BTC_FW_ENABLE_DYN_LESCAN_PRI_INIT_VAL 0
+// If WL Tput below 7 mbps, don't grant background LE Scans
+#define BTC_FW_LESCAN_LO_TPUT_THRESH_INIT_VAL 7
+// If WL Tput above 30 mbps, don't grant background LE Scans
+#define BTC_FW_LESCAN_HI_TPUT_THRESH_INIT_VAL 30
+// If LE Priority algorithm is triggered, grant one out of 2 LE_SCAN requests
+#define BTC_FW_LESCAN_GRANT_INT_INIT_VAL 2
+// If RSSI is weaker than -70 dBm and BLE activity is frequent, then disable
+// RX aggregation, and clamp TX aggregation.
+#ifdef WL_BTCX_UDM
+#define BTC_FW_RSSI_THRESH_BLE_INIT_VAL 100
+#else
+#define BTC_FW_RSSI_THRESH_BLE_INIT_VAL 70
+#endif
+#define BTC_FW_HOLDSCO_LIMIT_INIT_VAL 100
+#define BTC_FW_HOLDSCO_LIMIT_HI_INIT_VAL 10
+#define BTC_FW_SCO_GRANT_HOLD_RATIO_INIT_VAL 1500
+#define BTC_FW_SCO_GRANT_HOLD_RATIO_HI_INIT_VAL 1000
+#define BTC_FW_HOLDSCO_HI_THRESH_INIT_VAL 7400
+#define BTC_FW_TWT_COEX_CONSTRAINTS_EN_INIT_VAL 1
+/* Aggregation in AP mode (P2P_GO and SOFTAP) when ACL and A2DP */
+#define BTC_FW_MOD_RXAGG_PKT_SZ_FOR_APMODE_ACL_A2DP_INIT_VAL 16
+/* Disable amsdu dynamicaly during Rx limited aggregation */
+#define BTC_FW_DISABLE_AMSDU_DURING_LIM_AGG_INIT_VAL 1
+/* Enable acl based grant for ble scan based on number of 2G slots during NAN */
+#define BTC_FW_ENABLE_ACL_GRNT_FOR_BLE_SCAN_INIT_VAL 0
+/* Threshold slot count for 2g band to Enable acl based grant for ble
+ * scan during NAN. Setting current value to 8, considering time line is 512ms
+ * Threshold changes dynamically based on different time line
+ */
+#define BTC_FW_NAN_THRESHOLD_SLOTS_FOR_2G_INIT_VAL 8
+/* BT task bm override for critical chansw slots -initval */
+#define BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_L_INIT_VAL 0x0000
+#define BTC_FW_CHANSW_CRT_OVR_BTTASK_BM_H_INIT_VAL 0x0020
+#define BTC_FW_AGG_AP_GRACE_PERIOD_VAL 1
+#define BTC_FW_AGG_AP_BUFLIM_SMPLINTV_VAL 1
+#define BTC_FW_AGG_AP_DELBA_SMPLINTV_VAL 5
+#define BTC_FW_AGG_AP_DELBA_THRESHOLD_VAL 3
+
+/* NR Coex Params Set/Get via wl btc_params, starting index */
+#define NR5GCX_PARAMS_FW_START_IDX 1200
+
+typedef enum NR5GCX_Params {
+ // Min # of PPDU to be tracked for hysteresis
+ NR5GCX_FW_MIN_NUM_PPDU = NR5GCX_PARAMS_FW_START_IDX,
+ // Threshold for data stall detection, percentage
+ NR5GCX_FW_DATA_STALL_TH = NR5GCX_PARAMS_FW_START_IDX + 1,
+ // max number of rate recovery attempts
+ NR5GCX_FW_MAX_NUM_ATTEMPTS = NR5GCX_PARAMS_FW_START_IDX + 2,
+ // Rate recovery rate check duration
+ NR5GCX_FW_RR_RATE_CHK_DUR = NR5GCX_PARAMS_FW_START_IDX + 3,
+ // Rate recovery attempt duration
+ NR5GCX_FW_RR_ATTEMPT_DUR = NR5GCX_PARAMS_FW_START_IDX + 4,
+ // NR grant duration after a unsuccessful rate recovery
+ NR5GCX_FW_RR_UNSC_DUR = NR5GCX_PARAMS_FW_START_IDX + 5,
+ // Threshold for rate recovery, percentage
+ NR5GCX_FW_RECOVERY_TH = NR5GCX_PARAMS_FW_START_IDX + 6,
+ // Threshold for low RSSI
+ NR5GCX_FW_LOWRSSI_TH = NR5GCX_PARAMS_FW_START_IDX + 7,
+ // Maximum number of nr5gcx fw params
+ NR5GCX_FW_MAX_INDICES
+} NR5GCXParamsFirmwareDefinitions;
+
+#define NR5GCX_FW_NUM_INDICES (NR5GCX_FW_MAX_INDICES - NR5GCX_PARAMS_FW_START_IDX)
+
+#define NR5GCX_FW_MIN_NUM_PPDU_INIT 10u
+#define NR5GCX_FW_DATA_STALL_TH_INIT 75u
+#define NR5GCX_FW_MAX_NUM_ATTEMPTS_INIT 5u
+#define NR5GCX_FW_RR_RATE_CHK_DUR_INIT_MS 60u /* ms */
+#define NR5GCX_FW_RR_ATTEMPT_DUR_INIT_MS 60u /* ms */
+#define NR5GCX_FW_RR_UNSC_DUR_INIT_MS 10000u /* ms */
+#define NR5GCX_FW_RECOVERY_TH_INIT 50u
+#define NR5GCX_FW_LOWRSSI_TH_INIT 85u /* dBm */
+
+/* RC1 Coex Params Set/Get via wl btc_params, starting index */
+#define RC1CX_PARAMS_FW_START_IDX 1200
+
+typedef enum RC1CX_Params {
+ // Min # of PPDU to be tracked for hysteresis
+ RC1CX_FW_MIN_NUM_PPDU = RC1CX_PARAMS_FW_START_IDX,
+ // Threshold for data stall detection, percentage
+ RC1CX_FW_DATA_STALL_TH = RC1CX_PARAMS_FW_START_IDX + 1,
+ // max number of rate recovery attempts
+ RC1CX_FW_MAX_NUM_ATTEMPTS = RC1CX_PARAMS_FW_START_IDX + 2,
+ // Rate recovery rate check duration
+ RC1CX_FW_RR_RATE_CHK_DUR = RC1CX_PARAMS_FW_START_IDX + 3,
+ // Rate recovery attempt duration
+ RC1CX_FW_RR_ATTEMPT_DUR = RC1CX_PARAMS_FW_START_IDX + 4,
+ // NR grant duration after a unsuccessful rate recovery
+ RC1CX_FW_RR_UNSC_DUR = RC1CX_PARAMS_FW_START_IDX + 5,
+ // Threshold for rate recovery, percentage
+ RC1CX_FW_RECOVERY_TH = RC1CX_PARAMS_FW_START_IDX + 6,
+ // Threshold for low RSSI
+ RC1CX_FW_LOWRSSI_TH = RC1CX_PARAMS_FW_START_IDX + 7,
+ // Maximum number of rc1cx fw params
+ RC1CX_FW_MAX_INDICES
+} RC1CXParamsFirmwareDefinitions;
+
+#define RC1CX_FW_NUM_INDICES (RC1CX_FW_MAX_INDICES - RC1CX_PARAMS_FW_START_IDX)
+
+#define RC1CX_FW_MIN_NUM_PPDU_INIT 10u
+#define RC1CX_FW_DATA_STALL_TH_INIT 75u
+#define RC1CX_FW_MAX_NUM_ATTEMPTS_INIT 5u
+#define RC1CX_FW_RR_RATE_CHK_DUR_INIT_MS 60u /* ms */
+#define RC1CX_FW_RR_ATTEMPT_DUR_INIT_MS 60u /* ms */
+#define RC1CX_FW_RR_UNSC_DUR_INIT_MS 10000u /* ms */
+#define RC1CX_FW_RECOVERY_TH_INIT 50u
+#define RC1CX_FW_LOWRSSI_TH_INIT 85u /* dBm */
+
+#ifdef GPIO_TXINHIBIT
+/* GPIO based TX_INHIBIT:SWWLAN-109270 */
+typedef enum shm_macintstatus_ext_e {
+ C_MISE_GPIO_TXINHIBIT_VAL_NBIT = 0,
+ C_MISE_GPIO_TXINHIBIT_INT_NBIT = 1
+} shm_macintstatus_ext_t;
+#define C_MISE_GPIO_TXINHIBIT_VAL_MASK (1 << C_MISE_GPIO_TXINHIBIT_VAL_NBIT)
+#define C_MISE_GPIO_TXINHIBIT_INT_MASK (1 << C_MISE_GPIO_TXINHIBIT_INT_NBIT)
+#endif
+#define M_PSM_SOFT_REGS 0x0
+
+/** Scratch Reg defs */
+typedef enum
+{
+ S_RSV0 = 0,
+ S_RSV1,
+ S_RSV2,
+
+ /* scratch registers for Dot11-constants */
+ S_DOT11_CWMIN, /**< CW-minimum 0x03 */
+ S_DOT11_CWMAX, /**< CW-maximum 0x04 */
+ S_DOT11_CWCUR, /**< CW-current 0x05 */
+ S_DOT11_SRC_LMT, /**< short retry count limit 0x06 */
+ S_DOT11_LRC_LMT, /**< long retry count limit 0x07 */
+ S_DOT11_DTIMCOUNT, /**< DTIM-count 0x08 */
+
+ /* Tx-side scratch registers */
+ S_SEQ_NUM, /**< hardware sequence number reg 0x09 */
+ S_SEQ_NUM_FRAG, /**< seq-num for frags (Set at the start os MSDU 0x0A */
+ S_FRMRETX_CNT, /**< frame retx count 0x0B */
+ S_SSRC, /**< Station short retry count 0x0C */
+ S_SLRC, /**< Station long retry count 0x0D */
+ S_EXP_RSP, /**< Expected response frame 0x0E */
+ S_OLD_BREM, /**< Remaining backoff ctr 0x0F */
+ S_OLD_CWWIN, /**< saved-off CW-cur 0x10 */
+ S_TXECTL, /**< TXE-Ctl word constructed in scr-pad 0x11 */
+ S_CTXTST, /**< frm type-subtype as read from Tx-descr 0x12 */
+
+ /* Rx-side scratch registers */
+ S_RXTST, /**< Type and subtype in Rxframe 0x13 */
+
+ /* Global state register */
+ S_STREG, /**< state storage actual bit maps below 0x14 */
+
+ S_TXPWR_SUM, /**< Tx power control: accumulator 0x15 */
+ S_TXPWR_ITER, /**< Tx power control: iteration 0x16 */
+ S_RX_FRMTYPE, /**< Rate and PHY type for frames 0x17 */
+ S_THIS_AGG, /**< Size of this AGG (A-MSDU) 0x18 */
+
+ S_KEYINDX, /* 0x19 */
+ S_RXFRMLEN, /**< Receive MPDU length in bytes 0x1A */
+
+ /* Receive TSF time stored in SCR */
+ S_RXTSFTMRVAL_WD3, /**< TSF value at the start of rx 0x1B */
+ S_RXTSFTMRVAL_WD2, /**< TSF value at the start of rx 0x1C */
+ S_RXTSFTMRVAL_WD1, /**< TSF value at the start of rx 0x1D */
+ S_RXTSFTMRVAL_WD0, /**< TSF value at the start of rx 0x1E */
+ S_RXSSN, /**< Received start seq number for A-MPDU BA 0x1F */
+ S_RXQOSFLD, /**< Rx-QoS field (if present) 0x20 */
+
+ /* Scratch pad regs used in microcode as temp storage */
+ S_TMP0, /**< stmp0 0x21 */
+ S_TMP1, /**< stmp1 0x22 */
+ S_TMP2, /**< stmp2 0x23 */
+ S_TMP3, /**< stmp3 0x24 */
+ S_TMP4, /**< stmp4 0x25 */
+ S_TMP5, /**< stmp5 0x26 */
+ S_PRQPENALTY_CTR, /**< Probe response queue penalty counter 0x27 */
+ S_ANTCNT, /**< unsuccessful attempts on current ant. 0x28 */
+ S_SYMBOL, /**< flag for possible symbol ctl frames 0x29 */
+ S_RXTP, /**< rx frame type 0x2A */
+ S_STREG2, /**< extra state storage 0x2B */
+ S_STREG3, /**< even more extra state storage 0x2C */
+ S_STREG4, /**< ... 0x2D */
+ S_STREG5, /**< remember to initialize it to zero 0x2E */
+
+ S_UNUSED_0X2F, /**< No longer used 0x2F */
+ S_UPTR, /* Use this to initialize utrace 0x30 */
+ S_ADJPWR_IDX, /**< PR 37101 WAR, adj_pwr_idx 0x31 */
+ S_CUR_PTR, /**< Temp pointer for A-MPDU re-Tx SHM table 0x32 */
+ S_REVID4, /**< 0x33 */
+ S_INDX, /**< 0x34 */
+ S_ADDR0, /**< 0x35 */
+ S_ADDR1, /**< 0x36 */
+ S_ADDR2, /**< 0x37 */
+ S_ADDR3, /**< 0x38 */
+ S_ADDR4, /**< 0x39 */
+ S_ADDR5, /**< 0x3A */
+ S_TMP6, /**< 0x3B */
+ S_KEYINDX_BU, /**< Backup for Key index 0x3C */
+ S_MFGTEST_TMP0, /**< Temp register used for RX test calculations 0x3D */
+ S_RXESN, /**< Received end sequence number for A-MPDU BA 0x3E */
+ S_STREG6, /**< 0x3F */
+} ePsmScratchPadRegDefinitions;
+
+#define C_STREG_SLOWCAL_PD_NBIT 0x00000004 /* BIT 2 slow clock cal is pending */
+#define C_STREG_SLOWCAL_DN_NBIT 0x00000008 /* BIT 3 slow clock cal is done */
+
+#define S_BEACON_INDX S_OLD_BREM
+#define S_PRS_INDX S_OLD_CWWIN
+#define S_BTCX_BT_DUR S_REVID4
+#define S_PHYTYPE S_SSRC
+#define S_PHYVER S_SLRC
+
+/* IHR GPT_2 is corerev >= 3 */
+#define TSF_GPT_2_STAT 0x133
+#define TSF_GPT_2_CTR_L 0x134
+#define TSF_GPT_2_CTR_H 0x135
+#define TSF_GPT_2_VAL_L 0x136
+#define TSF_GPT_2_VAL_H 0x137
+
+/* IHR TSF_GPT STAT values */
+#define TSF_GPT_PERIODIC (1 << 12)
+#define TSF_GPT_ADJTSF (1 << 13)
+#define TSF_GPT_USETSF (1 << 14)
+#define TSF_GPT_ENABLE (1 << 15)
+
+/** ucode mac statistic counters in shared memory */
+#define MACSTAT_OFFSET_SZ 64
+#define MACSTAT_REV80_OFFSET_SZ 118
+
+/* ucode macstat txfunflw offset */
+#define UCODEMSTAT_TXFUNFL_BLK ((0x70 * 2) + (0x76 * 2))
+
+/* MACSTAT offset to SHM address */
+#define MACSTAT_ADDR(x, offset) (M_PSM2HOST_STATS(x) + (offset))
+
+/** ucode mac statistic counters in shared memory, base addr defined in M_UCODE_MACSTAT1 */
+typedef struct macstat1 {
+ uint16 txndpa; /* + 0 (0x0) */
+ uint16 txndp; /* + 1*2 (0x2) */
+ uint16 txsf; /* + 2*2 (0x4) */
+ uint16 txcwrts; /* + 3*2 (0x6) */
+ uint16 txcwcts; /* + 4*2 (0x8) */
+ uint16 txbfm; /* + 5*2 (0xa) */
+ uint16 rxndpaucast; /* + 6*2 (0xc) */
+ uint16 bferptrdy; /* + 7*2 (0xe) */
+ uint16 rxsfucast; /* + 8*2 (0x10) */
+ uint16 rxcwrtsucast; /* + 9*2 (0x12) */
+ uint16 rxcwctsucast; /* +10*2 (0x14) */
+ uint16 rx20s; /* +11*2 (0x16) */
+ uint16 bcntrim; /* +12*2 (0x18) */
+ uint16 btc_rfact_l; /* +13*2 (0x1a) */
+ uint16 btc_rfact_h; /* +14*2 (0x1c) */
+ uint16 btc_txconf_l; /* +15*2 (0x1e) : cnt */
+ uint16 btc_txconf_h; /* +16*2 (0x20) : cnt */
+ uint16 btc_txconf_durl; /* +17*2 (0x22) : dur */
+ uint16 btc_txconf_durh; /* +18*2 (0x24) : dur */
+ uint16 rxsecrssi0; /* +19*2 (0x26) : high bin */
+ uint16 rxsecrssi1; /* +20*2 (0x28) : med bin */
+ uint16 rxsecrssi2; /* +21*2 (0x2a) : low bin */
+ uint16 rxpri_durl; /* +22*2 (0x2c) : dur */
+ uint16 rxpri_durh; /* +23*2 (0x2e) : dur */
+ uint16 rxsec20_durl; /* +24*2 (0x30) : dur */
+ uint16 rxsec20_durh; /* +25*2 (0x32) : dur */
+ uint16 rxsec40_durl; /* +26*2 (0x34) : dur */
+ uint16 rxsec40_durh; /* +27*2 (0x36) : dur */
+} macstat1_t;
+
+#define MX_UCODEX_MACSTAT (0x40 * 2)
+/* ucodex mac statistic counters in shared memory */
+#define MACXSTAT_OFFSET_SZ 6
+
+/* psm2 statistic counters in shared memory, base addr defined in MX_PSM2HOST_STATS */
+typedef enum {
+ MCXSTOFF_MACXSUSP = 0,
+ MCXSTOFF_M2VMSG = 1,
+ MCXSTOFF_V2MMSG = 2,
+ MCXSTOFF_MBOXOUT = 3,
+ MCXSTOFF_MUSND = 4,
+ MCXSTOFF_SFB2V = 5
+} macxstat_offset_t;
+
+/* dot11 core-specific control flags */
+#define SICF_MCLKE 0x0001 /* Mac core clock Enable */
+#define SICF_FCLKON 0x0002 /* Force clocks On */
+#define SICF_PCLKE 0x0004 /**< PHY clock enable */
+#define SICF_PRST 0x0008 /**< PHY reset */
+#define SICF_MPCLKE 0x0010 /**< MAC PHY clockcontrol enable */
+#define SICF_FREF 0x0020 /**< PLL FreqRefSelect (corerev >= 5) */
+/* NOTE: the following bw bits only apply when the core is attached
+ * to a NPHY (and corerev >= 11 which it will always be for NPHYs).
+ */
+#ifdef SICF_160M_BWMASK_DEF
+#define SICF_BWMASK(macrev) (D11REV_GE(macrev, 86) ? 0x00e0 : 0x00c0) /**< phy clkmsk */
+#define SICF_BW160(macrev) (D11REV_GE(macrev, 86) ? 0x0080 : 0x00c0) /**< 160MHz BW */
+#define SICF_BW80(macrev) (D11REV_GE(macrev, 86) ? 0x0060 : 0x00c0) /**< 80MHz BW */
+#define SICF_BW40(macrev) (D11REV_GE(macrev, 86) ? 0x0040 : 0x0080) /**< 40MHz BW */
+#define SICF_BW20(macrev) (D11REV_GE(macrev, 86) ? 0x0020 : 0x0040) /**< 20MHz BW */
+#define SICF_BW10(macrev) (D11REV_GE(macrev, 86) ? 0x0000 : 0x0000) /**< 10MHz BW */
+#else
+#define SICF_BWMASK 0x00c0 /**< phy clock mask (b6 & b7) */
+#define SICF_BW160 0x00c0 /**< 160MHz BW */
+#define SICF_BW80 0x00c0 /**< 80MHz BW */
+#define SICF_BW40 0x0080 /**< 40MHz BW (160MHz phyclk) */
+#define SICF_BW20 0x0040 /**< 20MHz BW (80MHz phyclk) */
+#define SICF_BW10 0x0000 /**< 10MHz BW (40MHz phyclk) */
+#endif
+#define SICF_DAC 0x0300 /**< Highspeed DAC mode control field */
+#define SICF_GMODE 0x2000 /**< gmode enable */
+
+/* Macmode / Phymode / Opmode are used interchangebly sometimes
+ * even though they all mean the same. Going ahead with the HW
+ * signal name - using phymode here on (even though we know its
+ * a misnomer). Applicable to d11 corerev >= 50 ---- ACPHY only
+ */
+#define SICF_PHYMODE_SHIFT 16
+#define SICF_PHYMODE 0xf0000 /**< mask */
+
+#define SICF_160CLKSEL 0x100000u /* main phy clock speed selection */
+
+/* dot11 core-specific status flags */
+#define SISF_2G_PHY 0x0001 /**< 2.4G capable phy (corerev >= 5) */
+#define SISF_5G_PHY 0x0002 /**< 5G capable phy (corerev >= 5) */
+#define SISF_FCLKA 0x0004 /**< FastClkAvailable (corerev >= 5) */
+#define SISF_DB_PHY 0x0008 /**< Dualband phy (corerev >= 11) */
+
+/* === End of MAC reg, Beginning of PHY(b/a/g/n) reg, radio and LPPHY regs are separated === */
+
+/* Bits in phytest(0x0a): */
+#define TST_DDFS 0x2000
+#define TST_TXFILT1 0x0800
+#define TST_UNSCRAM 0x0400
+#define TST_CARR_SUPP 0x0200
+#define TST_DC_COMP_LOOP 0x0100
+#define TST_LOOPBACK 0x0080
+#define TST_TXFILT0 0x0040
+#define TST_TXTEST_ENABLE 0x0020
+#define TST_TXTEST_RATE 0x0018
+#define TST_TXTEST_PHASE 0x0007
+
+/* phytest txTestRate values */
+#define TST_TXTEST_RATE_1MBPS 0
+#define TST_TXTEST_RATE_2MBPS 1
+#define TST_TXTEST_RATE_5_5MBPS 2
+#define TST_TXTEST_RATE_11MBPS 3
+#define TST_TXTEST_RATE_SHIFT 3
+
+typedef struct shm_mbss_prq_entry_s shm_mbss_prq_entry_t;
+BWL_PRE_PACKED_STRUCT struct shm_mbss_prq_entry_s {
+ struct ether_addr ta;
+ uint8 prq_info[2];
+ uint8 time_stamp;
+ uint8 flags; /**< bit 0 HT STA Indication, bit 7:1 Reserved */
+} BWL_POST_PACKED_STRUCT;
+
+typedef enum shm_mbss_prq_ft_e {
+ SHM_MBSS_PRQ_FT_CCK,
+ SHM_MBSS_PRQ_FT_OFDM,
+ SHM_MBSS_PRQ_FT_MIMO,
+ SHM_MBSS_PRQ_FT_RESERVED
+} shm_mbss_prq_ft_t;
+
+#define SHM_MBSS_PRQ_FT_COUNT SHM_MBSS_PRQ_FT_RESERVED
+
+#define SHM_MBSS_PRQ_ENT_FRAMETYPE(entry) ((entry)->prq_info[0] & 0x3)
+#define SHM_MBSS_PRQ_ENT_UPBAND(entry) ((((entry)->prq_info[0] >> 2) & 0x1) != 0)
+
+/** What was the index matched? */
+#define SHM_MBSS_PRQ_ENT_UC_BSS_IDX(entry) (((entry)->prq_info[0] >> 2) & 0x3)
+#define SHM_MBSS_PRQ_ENT_PLCP0(entry) ((entry)->prq_info[1])
+
+/** Was this directed to a specific SSID or BSSID? If bit clear, quantity known */
+#define SHM_MBSS_PRQ_ENT_DIR_SSID(entry) \
+ ((((entry)->prq_info[0] >> 6) == 0) || ((entry)->prq_info[0] >> 6) == 1)
+#define SHM_MBSS_PRQ_ENT_DIR_BSSID(entry) \
+ ((((entry)->prq_info[0] >> 6) == 0) || ((entry)->prq_info[0] >> 6) == 2)
+
+#define SHM_MBSS_PRQ_ENT_TIMESTAMP(entry) ((entry)->time_stamp)
+/** Was the probe request from a ht STA or a legacy STA */
+#define SHM_MBSS_PRQ_ENT_HTSTA(entry) ((entry)->flags & 0x1)
+
+typedef struct d11ac_tso_s d11ac_tso_t;
+
+BWL_PRE_PACKED_STRUCT struct d11ac_tso_s {
+ uint8 flag[3];
+ uint8 sfh_hdr_offset;
+ uint16 tso_mss; /**< tso segment size */
+ uint16 msdu_siz; /**< msdu size */
+ uint32 tso_payload_siz; /**< total byte cnt in tcp payload */
+ uint16 ip_hdr_offset; /**< relative to the start of txd header */
+ uint16 tcp_hdr_offset; /**< relative to start of txd header */
+} BWL_POST_PACKED_STRUCT;
+
+/* toe_ctl TCP offload engine register definitions */
+#define TOE_CTL_DISAB (1u << 0)
+#define TOE_CTL_MASK (1u << 0)
+#define TOE_CTL_ENAB (0xFFFEu)
+#define TOE_CLK_GATING_DISAB (1u << 1)
+
+#define TSO_HDR_TOE_FLAG_OFFSET (0u)
+
+#define TOE_F0_HDRSIZ_NORMAL (1u << 0)
+#define TOE_F0_PASSTHROUGH (1u << 1)
+#define TOE_F0_TCPSEG_EN (1u << 3)
+#define TOE_F0_IPV4 (1u << 4)
+#define TOE_F0_IPV6 (1u << 5)
+#define TOE_F0_TCP (1u << 6)
+#define TOE_F0_UDP (1u << 7)
+
+#define TOE_F1_IPV4_CSUM_EN (1u << 0)
+#define TOE_F1_TCPUDP_CSUM_EN (1u << 1)
+#define TOE_F1_PSEUDO_CSUM_EN (1u << 2)
+#define TOE_F1_FRAG_ALLOW (1u << 5)
+#define TOE_F1_FRAMETYPE_1 (1u << 6)
+#define TOE_F1_FRAMETYPE_2 (1u << 7)
+#define TOE_F1_FT_MASK (TOE_F1_FRAMETYPE_1 | TOE_F1_FRAMETYPE_2)
+#define TOE_F1_FT_SHIFT (6u)
+
+#define TOE_F2_TXD_HEAD_SHORT (1u << 0)
+#define TOE_F2_EPOCH_SHIFT (1u)
+#define TOE_F2_EPOCH (1u << TOE_F2_EPOCH_SHIFT)
+#define TOE_F2_EPOCH_EXT (1u << 2)
+#define TOE_F2_EPOCH_EXT_MASK (TOE_F2_EPOCH | TOE_F2_EPOCH_EXT)
+#define TOE_F2_AMSDU_AGGR_EN (1u << 4)
+#define TOE_F2_AMSDU_CSUM_EN (1u << 5)
+#define TOE_F2_AMSDU_FS_MID (1u << 6)
+#define TOE_F2_AMSDU_FS_LAST (1u << 7)
+
+#define TOE_TXDMA_FLAGS_AMSDU_FIRST (0x14u)
+#define TOE_TXDMA_FLAGS_AMSDU_MID (0x24u)
+#define TOE_TXDMA_FLAGS_AMSDU_LAST (0x34u)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define SHM_BYT_CNT 0x2 /**< IHR location */
+#define MAX_BYT_CNT 0x600 /**< Maximum frame len */
+
+/* WOWL Template Regions */
+#define WOWL_NS_CHKSUM (0x57 * 2)
+#define WOWL_PSP_TPL_BASE (0x334 * 2)
+#define WOWL_GTK_MSG2 (0x434 * 2)
+#define WOWL_NS_OFFLOAD (0x634 * 2)
+#define T_KEEPALIVE_0 (0x6b4 * 2)
+#define T_KEEPALIVE_1 ((0x6b4 + 0x40) * 2)
+#define WOWL_ARP_OFFLOAD (0x734 * 2)
+#define WOWL_TX_FIFO_TXRAM_BASE (0x774 * 2) /**< conservative, leave 1KB for GTKM2 */
+
+/* template regions for 11ac */
+#define D11AC_WOWL_PSP_TPL_BASE (0x4c0 * 2)
+#define D11AC_WOWL_GTK_MSG2 (0x5c0 * 2) /**< for core rev >= 42 */
+#define WOWL_NS_OFFLOAD_GE42 (0x7c0 * 2)
+#define T_KEEPALIVE_0_GE42 (0x840 * 2)
+#define T_KEEPALIVE_1_GE42 ((0x840 + 0x40) * 2)
+#define WOWL_ARP_OFFLOAD_GE42 (0x8c0 * 2)
+#define D11AC_WOWL_TX_FIFO_TXRAM_BASE (0x900 * 2) /**< GTKM2 for core rev >= 42 */
+
+/* Event definitions */
+#define WOWL_MAGIC (1 << 0) /**< Wakeup on Magic packet */
+#define WOWL_NET (1 << 1) /**< Wakeup on Netpattern */
+#define WOWL_DIS (1 << 2) /**< Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WOWL_RETR (1 << 3) /**< Wakeup on retrograde TSF */
+#define WOWL_BCN (1 << 4) /**< Wakeup on loss of beacon */
+#define WOWL_TST (1 << 5) /**< Wakeup after test */
+#define WOWL_M1 (1 << 6) /**< Wakeup after PTK refresh */
+#define WOWL_EAPID (1 << 7) /**< Wakeup after receipt of EAP-Identity Req */
+#define WOWL_PME_GPIO (1 << 8) /**< Wakeind via PME(0) or GPIO(1) */
+#define WOWL_NEEDTKIP1 (1 << 9) /**< need tkip phase 1 key to be updated by the driver */
+#define WOWL_GTK_FAILURE (1 << 10) /**< enable wakeup if GTK fails */
+#define WOWL_EXTMAGPAT (1 << 11) /**< support extended magic packets */
+#define WOWL_ARPOFFLOAD (1 << 12) /**< support ARP/NS offloading */
+#define WOWL_WPA2 (1 << 13) /**< read protocol version for EAPOL frames */
+#define WOWL_KEYROT (1 << 14) /**< If the bit is set, use key rotaton */
+#define WOWL_BCAST (1 << 15) /**< If the bit is set, frm received was bcast frame */
+
+#define MAXBCNLOSS (1 << 13) - 1 /**< max 12-bit value for bcn loss */
+
+/* UCODE shm view:
+ * typedef struct {
+ * uint16 offset; // byte offset
+ * uint16 patternsize; // the length of value[.] in bytes
+ * uchar bitmask[MAXPATTERNSIZE/8]; // 16 bytes, the effect length is (patternsize+7)/8
+ * uchar value[MAXPATTERNSIZE]; // 128 bytes, the effect length is patternsize.
+ * } netpattern_t;
+ */
+#define NETPATTERNSIZE (148) /* 128 value + 16 mask + 4 offset + 4 patternsize */
+#define MAXPATTERNSIZE 128
+#define MAXMASKSIZE MAXPATTERNSIZE/8
+
+/** Security Algorithm defines */
+#define WOWL_TSCPN_SIZE 6
+#define WOWL_TSCPN_COUNT 4 /**< 4 ACs */
+#define WOWL_TSCPN_BLK_SIZE (WOWL_TSCPN_SIZE * WOWL_TSCPN_COUNT)
+
+#define WOWL_SECSUITE_GRP_ALGO_MASK 0x0007
+#define WOWL_SECSUITE_GRP_ALGO_SHIFT 0
+#define WOWL_SECSUITE_ALGO_MASK 0x0700
+#define WOWL_SECSUITE_ALGO_SHIFT 8
+
+#define EXPANDED_KEY_RNDS 10
+#define EXPANDED_KEY_LEN 176 /* the expanded key from KEK (4*11*4, 16-byte state, 11 rounds) */
+
+/* Organization of Template RAM is as follows
+ * typedef struct {
+ * uint8 AES_XTIME9DBE[1024];
+ * uint8 AES_INVSBOX[256];
+ * uint8 AES_KEYW[176];
+ * } AES_TABLES_t;
+ */
+/* See dot11_firmware/diag/wmac_tcl/wmac_762_wowl_gtk_aes: proc write_aes_tables,
+ * for an example of writing those tables into the tx fifo buffer.
+ */
+
+typedef struct {
+ uint16 MacTxControlLow; /**< mac-tx-ctl-low word */
+ uint16 MacTxControlHigh; /**< mac-tx-ctl-high word */
+ uint16 PhyTxControlWord; /**< phy control word */
+ uint16 PhyTxControlWord_1; /**< extra phy control word for mimophy */
+ union {
+ uint16 XtraFrameTypes; /**< frame type for RTS/FRAG fallback (used only for AES) */
+ uint16 bssenc_pos; /**< BssEnc includes key ID , for corerev >= 42 */
+ } u1;
+ uint8 plcp[6]; /**< plcp of template */
+
+ uint16 mac_frmtype; /**< MAC frame type for GTK MSG2, can be
+ * dot11_data frame (0x20) or dot11_QoS_Data frame (0x22).
+ */
+ uint16 frm_bytesize; /**< number of bytes in the template, it includes:
+ * PLCP, MAC header, IV/EIV, the data payload
+ * (eth-hdr and EAPOL-Key), TKIP MIC
+ */
+ uint16 payload_wordoffset; /**< the word offset of the data payload */
+
+ /* ALIGN */
+ uint16 seqnum; /**< Sequence number for this frame */
+ uint8 seciv[18]; /**< 10-byte TTAK used for TKIP, 8-byte IV/EIV.
+ * See <SecurityInitVector> in the general tx descriptor.
+ */
+} wowl_templ_ctxt_t;
+
+#define WOWL_TEMPL_CTXT_LEN 42 /**< For making sure that no PADs are needed */
+#define WOWL_TEMPL_CTXT_FRMTYPE_DATA 0x2
+#define WOWL_TEMPL_CTXT_FRMTYPE_QOS 0x22
+
+/** constant tables required for AES key unwrapping for key rotation */
+extern uint16 aes_invsbox[128];
+extern uint16 aes_xtime9dbe[512];
+
+#define MAX_MPDU_SPACE (D11_TXH_LEN + 1538)
+
+/* Bits in TXE_BMCCTL */
+#define BMCCTL_INITREQ_SHIFT 0
+#define BMC_CTL_DONE (1 << BMCCTL_INITREQ_SHIFT)
+#define BMCCTL_RESETSTATS_SHIFT 1
+#define BMCCTL_TXBUFSIZE_SHIFT 2
+#define BMCCTL_LOOPBACK_SHIFT 5
+#define BMCCTL_TXBUFSZ_MASK ((1 << BMCCTL_LOOPBACK_SHIFT) - (1 << BMCCTL_TXBUFSIZE_SHIFT))
+#define BMCCTL_CLKGATEEN_SHIFT 8
+
+/* Bits in TXE_BMCConfig */
+#define BMCCONFIG_BUFCNT_SHIFT 0
+#define BMCCONFIG_DISCLKGATE_SHIFT 13
+#define BMCCONFIG_BUFCNT_MASK ((1 << BMCCONFIG_DISCLKGATE_SHIFT) - (1 << BMCCONFIG_BUFCNT_SHIFT))
+
+/* Bits in TXE_BMCStartAddr */
+#define BMCSTARTADDR_STRTADDR_MASK 0x3ff
+
+/* Bits in TXE_BMCDescrLen */
+#define BMCDescrLen_ShortLen_SHIFT 0
+#define BMCDescrLen_LongLen_SHIFT 8
+
+/* Bits in TXE_BMCAllocCtl */
+#define BMCAllocCtl_AllocCount_SHIFT 0
+/* Rev==50 || Rev>52
+* BMCAllocCtl.AllocCount [0:10]
+* BMCAllocCtl.AllocThreshold [11:14]
+* !Rev50
+* BMCAllocCtl.AllocCount [0:7]
+* BMCAllocCtl.AllocThreshold [8:15]
+*/
+#define BMCAllocCtl_AllocThreshold_SHIFT_Rev50 11
+#define BMCAllocCtl_AllocThreshold_SHIFT 8
+
+/* Bits in TXE_BMCCmd1 */
+#define BMCCMD1_TIDSEL_SHIFT 1
+#define BMCCMD1_RDSRC_SHIFT 6
+#define BMCCmd1_RXMapPassThru_SHIFT 12
+#define BMCCMD1_BQSelNum_SHIFT 1u
+#define BMCCMD1_BQSelType_SHIFT 7u
+#define BMCCMD1_RDSRC_Group0 0u /* register itself */
+#define BMCCMD1_RDSRC_Group1 1u /* staged max/min */
+#define BMCCMD1_RDSRC_Group2 2u /* staged max/previous min */
+#define BMCCMD1_RDSRC_Group3 3u /* active max/min */
+#define BMCCMD1_RDSRC_SHIFT_rev80 10u
+#define BMCCMD1_CoreSel_SHIFT 13u
+#define BMCCMD1_CoreSel_SHIFT_rev80 15u
+
+/* Bits in TXE_BMCCmd */
+#define BMCCmd_TIDSel_SHIFT 0
+#define BMCCmd_Enable_SHIFT 4
+#define BMCCmd_ReleasePreAlloc_SHIFT 5
+#define BMCCmd_ReleasePreAllocAll_SHIFT 6
+#define BMCCmd_UpdateBA_SHIFT 7
+#define BMCCmd_Consume_SHIFT 8
+#define BMCCmd_Aggregate_SHIFT 9
+#define BMCCmd_UpdateRetryCount_SHIFT 10
+#define BMCCmd_DisableTID_SHIFT 11
+
+#define BMCCmd_BQSelType_TX 0
+#define BMCCmd_BQSelType_RX 1
+#define BMCCmd_BQSelType_Templ 2
+
+/* Bits in TXE_BMCCMD for rev >= 80 */
+#define BMCCmd_BQSelType_MASK_Rev80 0x00c0
+#define BMCCmd_BQSelType_SHIFT_Rev80 6
+#define BMCCmd_Enable_SHIFT_rev80 8
+#define BMCCmd_ReleasePreAllocAll_SHIFT_rev80 10
+
+/* Bits in TXE_BMCCmd1 */
+#define BMCCmd1_Minmaxappall_SHIFT 0
+#define BMCCmd1_Minmaxlden_SHIFT 5
+#define BMCCmd1_Minmaxffszlden_SHIFT 8
+#define BMCCmd_Core1_Sel_MASK 0x2000
+
+/* Bits in TXE_BMCStatCtl */
+#define BMCStatCtl_TIDSel_SHIFT 0u
+#define BMCStatCtl_STATSel_SHIFT 4u
+#define BMCStatCtl_BQSelNum_SHIFT 0u
+#define BMCStatCtl_BQSelType_SHIFT 6u
+#define BMCStatCtl_STATSel_SHIFT_rev80 8u
+
+/* Bits in BMVpConfig */
+#define BMCVPConfig_SingleVpModePortA_SHIFT 4
+
+/* Bits in TXE_PsmMSDUAccess */
+#define PsmMSDUAccess_TIDSel_SHIFT 0
+#define PsmMSDUAccess_MSDUIdx_SHIFT 4
+#define PsmMSDUAccess_ReadBusy_SHIFT 14
+#define PsmMSDUAccess_WriteBusy_SHIFT 15
+
+/* Bits in TXE_PsmMSDUAccess for rev >= 80 */
+#define PsmMSDUAccess_BQSelType_SHIFT 5
+#define PsmMSDUAccess_MSDUIdx_SHIFT_rev80 7
+#define PsmMSDUAccess_BQSelType_Templ 2
+#define PsmMSDUAccess_BQSelType_TX 0
+
+#ifdef WLRSDB
+#define MAX_RSDB_MAC_NUM 2
+#else
+#define MAX_RSDB_MAC_NUM 1
+#endif
+#define MAX_MIMO_MAC_NUM 1
+
+#ifdef WL_SCAN_CORE
+#define MAX_MAC_CORE_NUM (MAX_RSDB_MAC_NUM + 1)
+#else
+#define MAX_MAC_CORE_NUM (MAX_RSDB_MAC_NUM)
+#endif /* WL_SCAN_CORE */
+
+#define MAC_CORE_UNIT_0 0x0u /**< First mac core unit */
+#define MAC_CORE_UNIT_1 0x1u /**< Second mac core unit */
+
+/* HW unit of scan core.
+ * This is used to overwrite the tunables specific to scan core
+ */
+#define SCAN_CORE_UNIT 0x2u
+
+/* Supported phymodes / macmodes / opmodes */
+#define SINGLE_MAC_MODE 0x0 /**< only single mac is enabled */
+#define DUAL_MAC_MODE 0x1 /**< enables dual mac */
+/* (JIRA: CRDOT11ACPHY-652) Following two #defines support
+ * exclusive reg access to core 0/1 in MIMO mode
+ */
+#define SUPPORT_EXCLUSIVE_REG_ACCESS_CORE0 0x2
+#define SUPPORT_EXCLUSIVE_REG_ACCESS_CORE1 0x4 /**< not functional in 4349A0 */
+#define SUPPORT_CHANNEL_BONDING 0x8 /**< enables channel bonding,
+ * supported in single mac mode only
+ */
+#define SCAN_CORE_ACTIVE 0x10 /* scan core enabled for background DFS */
+
+#define PHYMODE_MIMO (SINGLE_MAC_MODE)
+#define PHYMODE_80P80 (SINGLE_MAC_MODE | SUPPORT_CHANNEL_BONDING)
+#define PHYMODE_RSDB_SISO_0 (DUAL_MAC_MODE | SUPPORT_EXCLUSIVE_REG_ACCESS_CORE0)
+#define PHYMODE_RSDB_SISO_1 (DUAL_MAC_MODE | SUPPORT_EXCLUSIVE_REG_ACCESS_CORE1)
+#define PHYMODE_RSDB (PHYMODE_RSDB_SISO_0 | PHYMODE_RSDB_SISO_1)
+#define PHYMODE_BGDFS 31
+#define PHYMODE_3x3_1x1 31
+
+#define RX_INTR_FIFO_0 0x1 /**< FIFO-0 interrupt */
+#define RX_INTR_FIFO_1 0x2 /**< FIFO-1 interrupt */
+#define RX_INTR_FIFO_2 0x4 /**< FIFO-2 interrupt */
+
+#define MAX_RX_FIFO 3
+
+#define RX_CTL_FIFOSEL_SHIFT 8
+#define RX_CTL_FIFOSEL_MASK (0x3 << RX_CTL_FIFOSEL_SHIFT)
+
+#define RCO_EN (0x1u) /**< Receive checksum offload */
+
+/* MAC_PTM_CTRL1 bit definitions */
+#define PTM_RX_TMSTMP_CAPTURE_EN 0x0001u
+#define PTM_TX_TMSTMP_CAPTURE_EN 0x0001u
+#define PTM_TMSTMP_OVERRIDE_EN 0x1000u
+
+/* For corerev >= 64
+ * Additional DMA descriptor flags for AQM Descriptor. These are used in
+ * conjunction with the descriptor control flags defined in sbhnddma.h
+ */
+/* AQM DMA Descriptor control flags 1 */
+#define D64_AQM_CTRL1_SOFPTR 0x0000FFFF /* index of the descr which
+ * is SOF decriptor in DMA table
+ */
+#define D64_AQM_CTRL1_EPOCH 0x00010000 /* Epoch bit for the frame */
+#define D64_AQM_CTRL1_NUMD_MASK 0x00F00000 /* NumberofDescriptors(NUMD) */
+#define D64_AQM_CTRL1_NUMD_SHIFT 20
+#define D64_AQM_CTRL1_AC_MASK 0x0F000000 /* AC of the current frame */
+#define D64_AQM_CTRL1_AC_SHIFT 24
+
+/* AQM DMA Descriptor control flags 2 */
+#define D64_AQM_CTRL2_MPDULEN_MASK 0x00003FFF /* Length of the entire MPDU */
+#define D64_AQM_CTRL2_TXDTYPE 0x00080000 /* When set to 1 the long form of the
+ * TXD is used for the frame.
+ */
+/* For corerev >= 83
+ * DMA descriptor flags for AQM Descriptor. These are used in
+ * conjunction with the descriptor control flags defined in sbhnddma.h
+ */
+/* AQM DMA Descriptor control flags 1 */
+#define D11_REV83_AQM_DESC_CTRL1_SOFPTR 0x0000FFFFu /* index of the descr which
+ * is SOF decriptor in DMA table
+ */
+#define D11_REV83_AQM_DESC_CTRL1_EPOCH_SHIFT 16u
+#define D11_REV83_AQM_DESC_CTRL1_EPOCH (1u << D11_REV83_AQM_DESC_CTRL1_EPOCH_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT_SHIFT 17u
+#define D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT (1u << \
+ D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL1_EPOCH_MASK (D11_REV83_AQM_DESC_CTRL1_EPOCH | \
+ D11_REV83_AQM_DESC_CTRL1_EPOCH_EXT)
+#define D11_REV83_AQM_DESC_CTRL1_RESV1 0x00040000u /* RESERVED */
+#define D11_REV83_AQM_DESC_CTRL1_FRAGALLOW_SHIFT 19u /* Fragmentation allowance flag
+ * shift.
+ */
+#define D11_REV83_AQM_DESC_CTRL1_FRAGALLOW (1u << D11_REV83_AQM_DESC_CTRL1_FRAGALLOW_SHIFT)
+ /* Fragmentation allowance flag
+ * of the frame
+ */
+#define D11_REV83_AQM_DESC_CTRL1_NUMD_SHIFT 20u /* NumberofDescriptors(NUMD) */
+#define D11_REV83_AQM_DESC_CTRL1_NUMD_MASK (0xFu << D11_REV83_AQM_DESC_CTRL1_NUMD_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL1_AC_SHIFT 24u /* AC of the current frame */
+#define D11_REV83_AQM_DESC_CTRL1_AC_MASK (0xFu << D11_REV83_AQM_DESC_CTRL1_AC_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL1_ET 0x10000000u /* End of table */
+#define D11_REV83_AQM_DESC_CTRL1_IC 0x20000000u /* Interrupt on Completion */
+#define D11_REV83_AQM_DESC_CTRL1_RESV2 0x40000000u /* Used to be EF: End of frame,
+ * and would have been set to 1.
+ */
+#define D11_REV83_AQM_DESC_CTRL1_RESV3 0x80000000u /* Used to be SF: Start of Frame,
+ * and would have been set to 1
+ */
+
+/* AQM DMA Descriptor control flags 2 */
+#define D11_REV83_AQM_DESC_CTRL2_MPDULEN_MASK 0x00003FFFu /* Length of the entire MPDU */
+#define D11_REV83_AQM_DESC_CTRL2_FTYPE_SHIFT 14u /* Frame Type, Indicate whether
+ * frame is Data, Management or
+ * Control Frame. 2 bits:
+ * 2'b00=Data, 2'b01=Management,
+ * 2'b10=Control, 2'b11=Invalid
+ * value
+ */
+#define D11_REV83_AQM_DESC_CTRL2_FTYPE_MASK (0x3u << D11_REV83_AQM_DESC_CTRL2_FTYPE_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL2_PTXDLENIDX_SHIFT 16u /* pTxD length index in 4-deep table */
+#define D11_REV83_AQM_DESC_CTRL2_PTXDLENIDX_MASK (0x3u << \
+ D11_REV83_AQM_DESC_CTRL2_PTXDLENIDX_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL2_PT 0x00040000u /* Parity bit. Choose a
+ * value such that the entire
+ * descriptor haseven parity
+ */
+#define D11_REV83_AQM_DESC_CTRL2_USERIT 0x00080000u /* If set, the Rate Table Index and
+ * RIT entry are fetched into SHM by
+ * hardware. Otherwise, software
+ * uses pTxD to convey this
+ * information to ucode
+ */
+#define D11_REV83_AQM_DESC_CTRL2_USELIT 0x00100000u /* If set, the Link Info Table Index
+ * and LIT entry are fetched into
+ * SHM by hardware. Otherwise,
+ * software uses pTxD to convey this
+ * information to ucode
+ */
+#define D11_REV83_AQM_DESC_CTRL2_LIT_SHIFT 21u /* LTI(Link info Table Index) */
+#define D11_REV83_AQM_DESC_CTRL2_LIT_MASK (0x3Fu << D11_REV83_AQM_DESC_CTRL2_LIT_SHIFT)
+#define D11_REV83_AQM_DESC_CTRL2_RIT_SHIFT 27u /* bit[4:0] of RTI(Rate info Table Index) */
+#define D11_REV83_AQM_DESC_CTRL2_RIT_MASK (0x1Fu << D11_REV83_AQM_DESC_CTRL2_RIT_SHIFT)
+
+/* AQM DMA Descriptor control flags 3 */
+#define D11_REV86_AQM_DESC_CTRL3_RTI_BIT5 0x00000001u /* bit[5] of RTI (cont'd from ctrl2) */
+#define D11_REV86_AQM_DESC_CTRL3_RTI_BIT5_MASK 1u /* bit[5] of RTI (cont'd from ctrl2) */
+#define D11_REV86_AQM_DESC_CTRL3_RTI_BIT5_SHIFT 0u
+#define D11_REV83_AQM_DESC_CTRL3_AGGR_ID 0x0000000Eu /* Aggregation ID */
+#define D11_REV83_AQM_DESC_CTRL3_CO 0x00000010u /* Coherency */
+#define D11_REV84_AQM_DESC_CTRL3_TXDPTR_SHIFT 5u /* TxD ptr */
+#define D11_REV84_AQM_DESC_CTRL3_TXDPTR_MASK 0xFFFFFFu /* bit[23:0] of TxD addr */
+#define D11_REV86_AQM_DESC_CTRL3_TID_SHIFT 29u /* TID for BSR */
+#define D11_REV86_AQM_DESC_CTRL3_TID_MASK (0x7u << D11_REV86_AQM_DESC_CTRL3_TID_SHIFT)
+
+/* values for psm_patchcopy_ctrl (0x1AC) post corerev 60 */
+#define PSM_PATCHCC_PMODE_MASK (0x3)
+#define PSM_PATCHCC_PMODE_RAM (0) /* default */
+#define PSM_PATCHCC_PMODE_ROM_RO (1)
+#define PSM_PATCHCC_PMODE_ROM_PATCH (2)
+
+#define PSM_PATCHCC_PENG_TRIGGER_SHIFT (2)
+#define PSM_PATCHCC_PENG_TRIGGER_MASK (1 << PSM_PATCHCC_PENG_TRIGGER_SHIFT)
+#define PSM_PATCHCC_PENG_TRIGGER (1 << PSM_PATCHCC_PENG_TRIGGER_SHIFT)
+
+#define PSM_PATCHCC_PCTRL_RST_SHIFT (3)
+#define PSM_PATCHCC_PCTRL_RST_MASK (0x3 << PSM_PATCHCC_PCTRL_RST_SHIFT)
+#define PSM_PATCHCC_PCTRL_RST_RESET (0x0 << PSM_PATCHCC_PCTRL_RST_SHIFT)
+#define PSM_PATCHCC_PCTRL_RST_HW (0x1 << PSM_PATCHCC_PCTRL_RST_SHIFT)
+
+#define PSM_PATCHCC_COPYEN_SHIFT (5)
+#define PSM_PATCHCC_COPYEN_MASK (1 << PSM_PATCHCC_COPYEN_SHIFT)
+#define PSM_PATCHCC_COPYEN (1 << PSM_PATCHCC_COPYEN_SHIFT)
+
+#define PSM_PATCHCC_UCIMGSEL_SHIFT (16)
+#define PSM_PATCHCC_UCIMGSEL_MASK (0x30000)
+#define PSM_PATCHCC_UCIMGSEL_DS0 (0x00000) /* default image */
+#define PSM_PATCHCC_UCIMGSEL_DS1 (0x10000) /* image 1 */
+
+/* patch copy delay for psm: 2millisec */
+#define PSM_PATCHCOPY_DELAY (2000)
+
+/* START-below WAS in d11_if_shm.h which we can move to auto shm.
+ * Some of them are offsets, but some of them are not given by ucode [possibly legacy]
+ * so, not taken care by autoshm.
+ */
+
+/* Addr is byte address used by SW; offset is word offset used by uCode */
+
+/** Per AC TX limit settings */
+#define M_AC_TXLMT_ADDR(x, _ac) (M_AC_TXLMT_BLK(x) + (2 * (_ac)))
+
+/** delay from end of PLCP reception to RxTSFTime */
+#define M_APHY_PLCPRX_DLY 3
+#define M_BPHY_PLCPRX_DLY 4
+
+/* btcx debug shmem size */
+#define C_BTCX_DBGBLK_SZ 6 /**< Number of 16bit words */
+#define C_BTCX_DBGBLK2_SZ 11 /* size of statistics at 2nd SHM segment */
+
+#define C_BTCX_STATS_DBGBLK_SZ 18 /* total size of statistics at A2DP stats */
+#define C_BTCX_A2DP_PRI_SZ 6 /* size of a2dp priority counters stats */
+#define C_BTCX_A2DP_BUFCNT_SZ 8 /* size of a2dp buffer counters stats */
+#define C_BTCX_ANT_GRANT_SZ 4 /* size of ant granted duration to BT */
+#define C_BTCX_STATS_ECNTR_BLK_SZ C_BTCX_STATS_DBGBLK_SZ /* blk size for btcx ecounters */
+
+#define D11_DMA_CHANNELS 6
+
+/* WME shared memory */
+#define M_EDCF_STATUS_OFF(x) (0x007 * 2)
+
+/* Beacon-related parameters */
+#define M_BCN_LI(x) M_PS_MORE_DTIM_TBTT(x) /**< beacon listen interval */
+
+/* prerev 40 defines */
+#define D11_PRE40_M_SECKINDXALGO_BLK(x) (0x2ea * 2)
+
+/* corerev 40 defines */
+/* BLK SIZE needs to change for GE64 */
+#define D11_POST80_MAX_KEY_SIZE 32
+#define D11_PRE80_MAX_KEY_SIZE 16
+
+#define D11_MAX_KEY_SIZE(_corerev) ((D11REV_GE(_corerev, 80)) ? \
+ D11_POST80_MAX_KEY_SIZE : D11_PRE80_MAX_KEY_SIZE)
+
+#define M_SECKINDXALGO_BLK_SZ(_corerev) (AMT_SIZE(_corerev) + 4 /* default keys */)
+
+#define C_CTX_PCTLWD_POS (0x4 * 2)
+
+#define D11_MAX_TX_FRMS 32 /**< max frames allowed in tx fifo */
+
+/* Current channel number plus upper bits */
+#define D11_CURCHANNEL_5G 0x0100;
+#define D11_CURCHANNEL_40 0x0200;
+#define D11_CURCHANNEL_MAX 0x00FF;
+
+#define INVALIDFID 0xffff
+
+#define D11_RT_DIRMAP_SIZE 16
+
+/** Rate table entry offsets */
+#define M_RT_PRS_PLCP_POS(x) 10
+#define M_RT_PRS_DUR_POS(x) 16
+#define M_RT_OFDM_PCTL1_POS(x) 18
+#define M_RT_TXPWROFF_POS(x) 20
+#define M_REV40_RT_TXPWROFF_POS(x) 14
+
+#define MIMO_MAXSYM_DEF 0x8000 /* 32k */
+#define MIMO_MAXSYM_MAX 0xffff /* 64k */
+
+#define WATCHDOG_8TU_DEF_LT42 5
+#define WATCHDOG_8TU_MAX_LT42 10
+#define WATCHDOG_8TU_DEF 3
+#define WATCHDOG_8TU_MAX 4
+
+#define M_PKTENG_RXAVGPWR_ANT(x, w) (M_MFGTEST_RXAVGPWR_ANT0(x) + (w) * 2)
+
+/* M_MFGTEST_NUM (pkt eng) bit definitions */
+#define MFGTEST_TXMODE 0x0001 /* TX frames indefinitely */
+#define MFGTEST_RXMODE 0x0002 /* RX frames */
+#define MFGTEST_RXMODE_ACK 0x0402 /* RX frames with sending ACKs back */
+#define MFGTEST_RXMODE_FWD2FW 0x8000 /* RX frames - forward packet to the fw */
+#define MFGTEST_TXMODE_FRMCNT 0x0101 /* TX frames by frmcnt */
+#define MFGTEST_RU_TXMODE 0x0011 /* RU frames TX indefinetly */
+#define MFGTEST_RU_TXMODE_FRMCNT 0x0111 /* RU TX frames by frmcnt */
+
+/* UOTA interface bit definitions */
+enum {
+ C_UOTA_CNTSRT_NBIT = 0, /* 0 OTA rx frame count start bit (14 LSB's) */
+ C_UOTA_RXFST_NBIT = 14, /* 14 indicating first frame */
+ C_UOTA_RSSION_NBIT = 15, /* 15 OTA rx ON bit position */
+};
+
+#define M_EDCF_QLEN(x) (M_EDCF_QINFO1_OFFSET(x))
+#define M_PWRIND_MAP(x, core) (M_PWRIND_BLKS(x) + ((core)<<1))
+
+#define M_BTCX_MAX_INDEX 320u
+#define M_BTCX_BACKUP_SIZE 130
+#define BTCX_AMPDU_MAX_DUR 2500
+
+#define ADDR_STAMON_NBIT (1 << 10) /* STA monitor bit in AMT_INFO_BLK entity */
+
+#ifdef WLP2P_UCODE
+
+/** The number of scheduling blocks */
+#ifdef BCMFUZZ /* need more for fuzzing */
+#define M_P2P_BSS_MAX 8
+#else
+#define M_P2P_BSS_MAX 4
+#endif /* BCMFUZZ */
+
+/** WiFi P2P interrupt block positions */
+#define M_P2P_I_BLK_SZ 4
+#define M_P2P_I_BLK_OFFSET(x) (M_P2P_INTR_BLK(x) - M_P2P_INTF_BLK(x))
+#define M_P2P_I_BLK(x, b) (M_P2P_I_BLK_OFFSET(x) + (M_P2P_I_BLK_SZ * (b) * 2))
+#define M_P2P_I(x, b, i) (M_P2P_I_BLK(x, b) + ((i) * 2))
+
+#define M_P2P_I_PRE_TBTT 0 /**< pretbtt, wake up just before beacon reception */
+#define M_P2P_I_CTW_END 1 /**< CTWindow ends */
+#define M_P2P_I_ABS 2 /**< absence period start, trigger for switching channels */
+#define M_P2P_I_PRS 3 /**< presence period starts */
+
+/** P2P hps flags */
+#define M_P2P_HPS_CTW(b) (1 << (b))
+#define M_P2P_HPS_NOA(b) (1 << ((b) + M_P2P_BSS_MAX))
+
+/** WiFi P2P address attribute block */
+#define M_ADDR_BMP_BLK_SZ 12
+#define M_ADDR_RANDMAC_BMP_BLK_SZ 40u
+
+#define M_ADDR_BMP_BLK(x, b) (M_ADDR_BMP_BLK_OFFSET(x) + ((b) * 2))
+
+#define ADDR_BMP_RA (1 << 0) /**< Receiver Address (RA) */
+#define ADDR_BMP_TA (1 << 1) /**< Transmitter Address (TA) */
+#define ADDR_BMP_BSSID (1 << 2) /**< BSSID */
+#define ADDR_BMP_AP (1 << 3) /**< Infra-BSS Access Point (AP) */
+#define ADDR_BMP_STA (1 << 4) /**< Infra-BSS Station (STA) */
+#define ADDR_BMP_P2P_DISC (1 << 5) /**< P2P Device */
+#define ADDR_BMP_P2P_GO (1 << 6) /**< P2P Group Owner */
+#define ADDR_BMP_P2P_GC (1 << 7) /**< P2P Client */
+#define ADDR_BMP_BSS_IDX_MASK (3 << 8) /**< BSS control block index */
+#define ADDR_BMP_BSS_IDX_SHIFT 8
+
+/** WiFi P2P address starts from this entry in RCMTA */
+#define P2P_ADDR_STRT_INDX (RCMTA_SIZE - M_ADDR_BMP_BLK_SZ)
+
+/* WiFi P2P per BSS control block positions.
+ * all time related fields are in units of (1<<P2P_UCODE_TIME_SHIFT)us unless noted otherwise.
+ */
+
+#define P2P_UCODE_TIME_SHIFT 7
+#define M_P2P_BSS_BLK_SZ 12
+#define M_P2P_BSS_BLK_OFFSET(x) (M_P2P_PERBSS_BLK(x) - M_P2P_INTF_BLK(x))
+#define M_P2P_BSS_BLK(x, b) (M_P2P_BSS_BLK_OFFSET(x) + (M_P2P_BSS_BLK_SZ * (b) * 2))
+#define M_P2P_BSS(x, b, p) (M_P2P_BSS_BLK(x, b) + (p) * 2)
+#define M_P2P_BSS_BCN_INT(x, b) (M_P2P_BSS_BLK(x, b) + (0 * 2)) /**< beacon interval */
+#define M_P2P_BSS_DTIM_PRD(x, b) (M_P2P_BSS_BLK(x, b) + (1 * 2)) /**< DTIM period */
+#define M_P2P_BSS_ST(x, b) (M_P2P_BSS_BLK(x, b) + (2 * 2)) /**< current state */
+#define M_P2P_BSS_N_PRE_TBTT(x, b) (M_P2P_BSS_BLK(x, b) + (3 * 2)) /**< next pretbtt time */
+#define M_P2P_BSS_CTW(x, b) (M_P2P_BSS_BLK(x, b) + (4 * 2)) /**< CTWindow duration */
+#define M_P2P_BSS_N_CTW_END(x, b) (M_P2P_BSS_BLK(x, b) + (5 * 2)) /**< next CTWindow end */
+#define M_P2P_BSS_NOA_CNT(x, b) (M_P2P_BSS_BLK(x, b) + (6 * 2)) /**< NoA count */
+#define M_P2P_BSS_N_NOA(x, b) (M_P2P_BSS_BLK(x, b) + (7 * 2)) /**< next absence time */
+#define M_P2P_BSS_NOA_DUR(x, b) (M_P2P_BSS_BLK(x, b) + (8 * 2)) /**< absence period */
+#define M_P2P_BSS_NOA_TD(x, b) (M_P2P_BSS_BLK(x, b) + (9 * 2))
+ /**< presence period (int - dur) */
+#define M_P2P_BSS_NOA_OFS(x, b) (M_P2P_BSS_BLK(x, b) + (10 * 2))
+ /* last 7 bits of interval in us */
+#define M_P2P_BSS_DTIM_CNT(x, b) (M_P2P_BSS_BLK(x, b) + (11 * 2))
+ /**< DTIM count */
+
+/* M_P2P_BSS_ST word positions. */
+#define M_P2P_BSS_ST_CTW (1 << 0) /**< BSS is in CTWindow */
+#define M_P2P_BSS_ST_SUPR (1 << 1) /**< BSS is suppressing frames */
+#define M_P2P_BSS_ST_ABS (1 << 2) /**< BSS is in absence period */
+#define M_P2P_BSS_ST_WAKE (1 << 3)
+#define M_P2P_BSS_ST_AP (1 << 4) /**< BSS is Infra-BSS AP */
+#define M_P2P_BSS_ST_STA (1 << 5) /**< BSS is Infra-BSS STA */
+#define M_P2P_BSS_ST_GO (1 << 6) /**< BSS is P2P Group Owner */
+#define M_P2P_BSS_ST_GC (1 << 7) /**< BSS is P2P Client */
+#define M_P2P_BSS_ST_IBSS (1 << 8) /**< BSS is an IBSS */
+#define M_P2P_BSS_ST_AWDL (1 << 9) /* BSS is AWDL */
+#define M_P2P_BSS_ST_NAN (1 << 10) /**< BSS is NAN */
+#define M_P2P_BSS_ST_MULTIDTIM (1 << 11) /* BSS is Muti-DTIM enabled */
+
+/** WiFi P2P TSF block positions */
+#define M_P2P_TSF_BLK_SZ 4
+#define M_P2P_TSF_BLK_OFFSET(x) (M_P2P_TSF_OFFSET_BLK(x) - M_P2P_INTF_BLK(x))
+#define M_P2P_TSF_BLK(x, b) (M_P2P_TSF_BLK_OFFSET(x) + (M_P2P_TSF_BLK_SZ * (b) * 2))
+#define M_P2P_TSF(x, b, w) (M_P2P_TSF_BLK(x, b) + (w) * 2)
+
+#define M_P2P_TSF_DRIFT_OFFSET(x) (M_P2P_TSF_DRIFT_WD0(x) - M_P2P_INTF_BLK(x))
+#define M_P2P_TSF_DRIFT(x, w) (M_P2P_TSF_DRIFT_OFFSET(x) + (w) * 2)
+
+#define M_P2P_GO_CHANNEL_OFFSET(x) (M_P2P_GO_CHANNEL(x) - M_P2P_INTF_BLK(x))
+#define M_P2P_GO_IND_BMP_OFFSET(x) (M_P2P_GO_IND_BMP(x) - M_P2P_INTF_BLK(x))
+
+/**
+ * M_P2P_GO_IND_BMP now has multiple fields:
+ * 7:0 - GO_IND_BMP
+ * 10:8 - BSS Index
+ * 15:11 - Reserved
+*/
+#define M_P2P_GO_IND_BMP_MASK (0xFF)
+#define M_P2P_BSS_INDEX_MASK (0x700)
+#define M_P2P_BSS_INDEX_SHIFT_BITS (8)
+
+/* per BSS PreTBTT */
+/* BOM 768.0 and above */
+#define M_P2P_PRE_TBTT_OFFSET(x) (M_P2P_PRETBTT_BLK(x) - M_P2P_INTF_BLK(x))
+#define M_P2P_PRE_TBTT(x, b) (M_P2P_PRE_TBTT_OFFSET(x) + ((b) * 2)) /**< in us */
+
+/* Reserve bottom of RCMTA for P2P Addresses */
+#define WSEC_MAX_RCMTA_KEYS (54 - M_ADDR_BMP_BLK_SZ)
+#else
+#define WSEC_MAX_RCMTA_KEYS 54
+#endif /* WLP2P_UCODE */
+
+#define TXCOREMASK 0x0F
+#define SPATIAL_SHIFT 8
+#define MAX_COREMASK_BLK 5
+#define COREMASK_BLK_TRIG_FRAMES (MAX_COREMASK_BLK + 1)
+
+#define BPHY_ONE_CORE_TX (1 << 15) /**< enable TX ant diversity for 11b frames */
+
+#define M_WLCX_CONFIG_EN(x) 0x1 /**< 1: enable wifi coex */
+#define M_WLCX_CONFIG_MASTER(x) 0x2 /**< 1: Coex Master(5357) */
+
+/* ucode debug status codes */
+#define DBGST_INACTIVE 0 /**< not valid really */
+#define DBGST_INIT 1 /**< after zeroing SHM, before suspending at init */
+#define DBGST_ACTIVE 2 /**< "normal" state */
+#define DBGST_SUSPENDED 3 /**< suspended */
+#define DBGST_ASLEEP 4 /**< asleep (PS mode) */
+#define DBGST_SLP2WAKE 7 /* On wake up path. */
+
+/**
+ * Defines for Self Mac address (used currently for CTS2SELF frames
+ * generated by BTCX ucode for protection purposes) in SHM. GE40 only.
+ */
+#define M_MYMAC_ADDR_L(x) (M_MYMAC_ADDR(x))
+#define M_MYMAC_ADDR_M(x) (M_MYMAC_ADDR(x) + (1*2))
+#define M_MYMAC_ADDR_H(x) (M_MYMAC_ADDR(x) + (2*2))
+
+/* Re-uses M_SSID */
+#define SHM_MBSS_BCNLEN0(x) M_SSID(x)
+
+#define SHM_MBSS_CLOSED_NET(x) (0x80) /**< indicates closed network */
+
+/** SSID Search Engine entries */
+#define SHM_MBSS_SSIDSE_BASE_ADDR(x) (0)
+#define SHM_MBSS_SSIDSE_BLKSZ(x) (36)
+#define SHM_MBSS_SSIDLEN_BLKSZ (4)
+#define SHM_MBSS_SSID_BLKSZ (32)
+
+/* END New for ucode template based mbss */
+
+/** Definitions for PRQ fifo data */
+
+#define SHM_MBSS_PRQ_ENTRY_BYTES 10 /**< Size of each PRQ entry */
+#define SHM_MBSS_PRQ_ENTRY_COUNT 12 /**< Number of PRQ entries */
+#define SHM_MBSS_PRQ_TOT_BYTES (SHM_MBSS_PRQ_ENTRY_BYTES * SHM_MBSS_PRQ_ENTRY_COUNT)
+
+#define M_WOWL_NOBCN (0x06c * 2) /**< loss of bcn value */
+
+#define M_KEK(x) M_EAPOLMICKEY_BLK(x) + (0x10 * 2) /* < KEK for WEP/TKIP */
+
+#define M_ARPRESP_BYTESZ_OFFSET 0 /**< 2 bytes; ARP resp pkt size */
+#define M_NA_BYTESZ_0_OFFSET 2 /**< 2 bytes ; NA pkt size */
+#define M_NA_BYTESZ_1_OFFSET 4 /**< 2 bytes ; NA pkt size */
+#define M_KEEPALIVE_BYTESZ_0_OFFSET 6 /**< 2 bytes; size of first keepalive */
+#define M_KEEPALIVE_BYTESZ_1_OFFSET 8 /**< 2 bytes; size of second keepalive */
+#define M_NPAT_ARPIDX_OFFSET 10 /**< 2 bytes; net pattern index of ARP */
+#define M_NPAT_NS0IDX_OFFSET 12 /**< 2 bytes; net pattern index of NS 0 */
+#define M_NPAT_NS1IDX_OFFSET 14 /**< 2 bytes; net pattern index of NS 1 */
+#define M_EXTWAKEPATTERN_0_OFFSET 16 /**< 6 bytes; ext magic pattern */
+#define M_EXTWAKEPATTERN_U0_OFFSET 22 /**< 8 bytes; unaligned ext magic pattern */
+#define M_KEEPALIVE_INTVL_0_OFFSET 30 /**< 2 bytes; in no of beacon intervals */
+#define M_KEEPALIVE_INTVL_1_OFFSET 32 /**< 2 bytes; in no of beacon intervals */
+
+#define M_COREMASK_BLK_WOWL_L30 (0x298 * 2)
+
+/* corerev > 29 && corerev < 40 */
+#define M_COREMASK_BLK_WOWL (0x7e8 *2)
+
+/* corerev >= 42 */
+#define D11AC_M_COREMASK_BLK_WOWL (0x1b0*2)
+
+#define M_EXTLNA_PWRSAVE(x) M_RADIO_PWR(x) /**< External LNA power control support */
+
+/* D11AC shm location changes */
+#define D11AC_T_NULL_TPL_BASE (0x16 * 2)
+#define D11AC_T_NULL_TPL_SIZE_BYTES (24)
+#define D11_T_BCN0_TPL_BASE T_BCN0_TPL_BASE
+#define D11AC_T_BCN0_TPL_BASE (0x100 * 2)
+#define D11_T_BCN1_TPL_BASE T_BCN1_TPL_BASE
+#define D11AC_T_BCN1_TPL_BASE (0x240 * 2)
+#define D11AC_T_GACT_TWT_INFO_TPL_BASE (0xB0 * 2)
+#define D11AC_T_GACT_TWT_INFO_TPL_SIZE_BYTES (36)
+
+/* The response (ACK/BA) phyctrl words */
+#define D11AC_RSP_TXPCTL0 (0x4c * 2)
+#define D11AC_RSP_TXPCTL1 (0x4d * 2)
+
+#define D11AC_T_PRS_TPL_BASE (0x380 * 2)
+
+#define D11_M_RT_PRS_PLCP_POS(x) M_RT_PRS_PLCP_POS(x)
+#define D11_M_RT_PRS_DUR_POS(x) M_RT_PRS_DUR_POS(x)
+#define D11AC_M_RT_PRS_PLCP_POS 8
+#define D11AC_M_RT_PRS_DUR_POS 12
+
+/* Field definitions for M_REV40_RT_TXPWROFF_POS */
+#define M_REV40_RT_HTTXPWR_OFFSET_MASK 0x01f8 /**< bit 8:3 */
+#define M_REV40_RT_HTTXPWR_OFFSET_SHIFT 3
+
+/* for axphy */
+#define M_REV80_RT_TXPWR_OFFSET_MASK 0xff00 /* bit 15:8 */
+#define M_REV80_RT_TXPWR_OFFSET_SHIFT 9 /* 8 (byte align) + 1 (convert from S5.1 to S5.2) */
+
+/* shmem locations for Beamforming */
+/* shmem defined with prefix M_ are in shmem */
+#define shm_addr(base, offset) (((base)+(offset))*2)
+
+#define C_BFI_REFRESH_THR_OFFSET (1u)
+#define C_BFI_NDPA_TXLMT_OFFSET (2u)
+#define C_BFI_NRXC_OFFSET (3u)
+#define C_BFI_MLBF_LUT_OFFSET (4u) // for corerev < 64 only
+
+#define C_BFI_BLK_SIZE(corerev) ((D11REV_GE(corerev, 86) ? 18u: 16u))
+
+/* BFI block definitions (Beamforming) */
+#define C_BFI_BFRIDX_POS (0)
+#define C_BFI_NDPA_TST_POS (1)
+#define C_BFI_NDPA_TXCNT_POS (2)
+#define C_BFI_NDPA_SEQ_POS (3)
+#define C_BFI_NDPA_FCTST_POS (4)
+#define C_BFI_BFRCTL_POS (5)
+#define C_BFI_BFR_CONFIG0_POS (6)
+#define C_BFI_BFE_CONFIG0_POS (7)
+#define C_BFI_BFE_MIMOCTL_POS (8)
+#define C_BFI_BSSID0_POS (9)
+#define C_BFI_BSSID1_POS (10)
+#define C_BFI_BSSID2_POS (11)
+#define C_BFI_STAINFO_POS (12)
+#define C_BFI_STAINFO1_POS (13)
+#define C_BFI_BFE_MYAID_POS (13) /* stainfo1 is mutually exclusive */
+#define C_BFI_BFMSTAT_POS (14)
+#define C_BFI_BFE_MIMOCTL_EXT_POS (15)
+/* below SHMs for rev >= 86 */
+#define C_BFI_BFE_11AXMIMOCTL_POS (16) /* phyreg bfeMimoCtlReg for 11AX */
+#define C_BFI_BFE_NDPNR_POS (17)
+/* used by BFR */
+#define C_BFI_STA_ADDR_POS C_BFI_BSSID0_POS
+
+/* to be removed -start */
+#define M_BFI_BLK_SIZE (16u)
+#define BFI_BLK_SIZE 18
+/* to be removed -end */
+
+/* Phy cache index Bit<8> indicates the validity. Cleared during TxBf link Init
+ * to trigger a new sounding sequence.
+ */
+#define C_BFRIDX_VLD_NBIT 8 /* valid */
+#define C_BFRIDX_EN_NBIT 7 /* BFI block is enabled (has valid info),
+ * applicable only for MU BFI block in shmemx
+ */
+#define C_BFRIDX_BW_NBIT 12
+
+#define C_STAINFO_FBT_NBIT 12 /* 0: SU; 1: MU */
+#define C_STAINFO_NCIDX_NBIT 13 /* Bits13-15: NC IDX; Reserved if Feedback Type is SU */
+
+/* NDP control blk */
+#define C_BFI_BFRCTL_POS_NDP_TYPE_SHIFT (0) /* 0: HT NDP; 1: VHT NDP; HE no need */
+#define C_BFI_BFRCTL_POS_NSTS_SHIFT (1) /* 0: 2ss; 1: 3ss; 2: 4ss */
+#define C_BFI_BFRCTL_POS_MLBF_SHIFT (4) /* 1 enable MLBF(used for corerev < 64) */
+#define C_BFI_BFRCTL_POS_BFM_SHIFT (8) /* Bits15-8: BFM mask for BFM frame tx */
+
+/** dynamic rflo ucode WAR defines */
+#define UCODE_WAR_EN 1
+#define UCODE_WAR_DIS 0
+
+/** LTE coex definitions */
+#define LTECX_FLAGS_LPBK_OFF 0
+
+/** LTECX shares BTCX shmem block */
+#define M_LTECX_BLK_PTR(x) M_BTCX_BLK_PTR(x)
+
+/** NR5GCX shares BTCX shmem block */
+#define M_NR5GCX_BLK_PTR(x) M_BTCX_BLK_PTR(x)
+
+/** RC1CX shares BTCX shmem block */
+#define M_RC1CX_BLK_PTR(x) M_BTCX_BLK_PTR(x)
+
+/** RC2CX shares BTCX shmem block */
+#define M_RC2CX_BLK_PTR(x) M_BTCX_BLK_PTR(x)
+
+/* CORE0 MODE */
+#define CORE0_MODE_RSDB 0x0
+#define CORE0_MODE_MIMO 0x1
+#define CORE0_MODE_80P80 0x2
+
+#define CORE1_MODE_RSDB 0x100
+
+#define HWACI_HOST_FLAG_ADDR (0x186)
+#define HWACI_SET_SW_MITIGATION_MODE (0x0008)
+
+/* split RX war shm locations */
+#define RXFIFO_0_OFFSET 0x1A0
+#define RXFIFO_1_OFFSET 0x19E
+#define HDRCONV_FIFO0_STSLEN 0x4 /* status length in header conversion mode */
+
+/* GE80:
+ * [15:8]: Phy status length
+ * [7:0]: Ucode status length
+ */
+#define DEFAULT_FIFO0_STSLEN(corerev, corerev_minor) \
+ (D11REV_MAJ_MIN_GE(corerev, corerev_minor, 87, 1) ? 0x2018 : \
+ D11REV_GE(corerev, 80) ? 0x2010: 0x24)
+
+/* M_ULP_WAKEIND bits */
+#define C_WATCHDOG_EXPIRY (1 << 0)
+#define C_FCBS_ERROR (1 << 1)
+#define C_RETX_FAILURE (1 << 2)
+#define C_HOST_WAKEUP (1 << 3)
+#define C_INVALID_FCBS_BLOCK (1 << 4)
+#define C_HUDI_DS1_EXIT (1 << 5)
+#define C_LOB_SLEEP (1 << 6)
+
+/* values for M_ULP_FEATURES */
+#define C_P2P_NOA (0x0001)
+#define C_INFINITE_NOA (0x0002)
+#define C_P2P_CTWIN (0x0004)
+#define C_P2P_GC (0x0008)
+#define C_BCN_TRIM (0x0010)
+#define C_BT_COEX (0x0020)
+#define C_LTE_COEX (0x0040)
+#define C_ADS1 (0x0080)
+#define C_LTECX_PSPOLL_PRIO_EN (0x0100)
+#define C_ULP_SLOWCAL_SKIP (0x0200)
+#define C_HUDI_ENABLE (0x0400)
+
+#define M_WOWL_ULP_SW_DAT_BLK (0xBFF * 2) /* (0xFFF * 2) - 1024 */
+#define M_WOWL_ULP_SW_DAT_BLK_MAX_SZ (0x400) /* 1024 bytes */
+
+#define RX_INTR_FIFO_0 0x1 /* FIFO-0 interrupt */
+#define RX_INTR_FIFO_1 0x2 /* FIFO-1 interrupt */
+#define RX_INTR_FIFO_2 0x4 /* FIFO-2 interrupt */
+
+/* M_TOF_FLAG bits */
+typedef enum {
+ TOF_RX_FTM_NBIT = 0,
+ TOF_SEQ_DISRXENTX_RFCTL = 1,
+ TOF_IS_TARGET = 2,
+ TOF_TPC_FREEZE = 3
+} eTOFFlags;
+
+/* TOF feature flags */
+#define M_UCODE_F2_TOF_BIT 7 /* part of features_2 shm */
+#define M_UCODE_F3_AVB_BIT 2 /* part of features_3 shm */
+#define M_UCODE_F3_SEQ_BIT 3 /* part of features_3 shm */
+
+/* New SHM definitions required for tsync based time stamping of FTM frames.
+* More details in below conf
+* http://confluence.broadcom.com/display/WLAN/NewUcodeInterfaceForProxdFeature
+*/
+#define FTM_TIMESTAMP_SHIFT 16
+#define TXS_ACK_INDEX_SHIFT 3
+#define FTM_ACK_TS_BLOCK_SIZE 3
+#define RXH_ACK_SHIFT(corerev) (D11REV_GE((corerev), 80) ? 12u:8u)
+#define FTM_INVALID_SHM_INDEX(corerev) (D11REV_GE((corerev), 80) ? 0x04u:0x0Fu)
+#define FTM_ACK_INDEX_MASK 0x0F
+#define NUM_UCODE_ACK_TS_BLKS 4
+
+#define FTM_TXSTATUS_ACK_RSPEC_BLOCK_MASK 0xFF
+#define FTM_TXSTATUS_ACK_RSPEC_BW_MASK 0x3
+#define FTM_TXSTATUS_ACK_RSPEC_BW_SHIFT 2
+#define FTM_TXSTATUS_ACK_RSPEC_BW_20 0
+#define FTM_TXSTATUS_ACK_RSPEC_BW_40 1
+#define FTM_TXSTATUS_ACK_RSPEC_BW_80 2
+#define FTM_TXSTATUS_ACK_RSPEC_BW_160 3
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_SHIFT 4
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_MASK 0x7
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_CCK 0
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_LEG 1 /* Legacy */
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_HT 2
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_VHT 3
+#define FTM_TXSTATUS_ACK_RSPEC_TYPE_HE 4
+#define FTM_TXSTATUS_ACK_RSPEC_RATE_6M(ackword) (ackword >> 7)
+/* Following are the offsets in M_DRVR_UCODE_IF_PTR block. Start address of
+ * M_DRVR_UCODE_IF_PTR block is present in M_DRVR_UCODE_IF_PTR.
+ */
+#define M_ULP_FEATURES (0x0 * 2)
+
+/* M_HOST_FLAGS5 offset changed in ULP ucode */
+#define M_ULP_HOST_FLAGS5 (0x3d * 2)
+
+#define M_RADAR_REG_TMP (0x033 * 2)
+
+/* Bit masks for ClkGateUcodeReq2: Ucode MAC Clock Request2 (IHR Address 0x375) register */
+#define D11_FUNC16_MAC_CLOCKREQ_MASK (0x3)
+
+/*
+ * Clock gating registers
+ */
+#define CLKREQ_BLOCK 0
+#define CLKREQ_MAC_ILP 1
+#define CLKREQ_MAC_ALP 2
+#define CLKREQ_MAC_HT 3
+
+/* ClkGateSts */
+#define CLKGTE_FORCE_MAC_CLK_REQ_SHIFT 0
+#define CLKGTE_MAC_PHY_CLK_REQ_SHIFT 4
+
+/* ClkGateReqCtrl0 */
+#define CLKGTE_PSM_PATCHCOPY_CLK_REQ_SHIFT 0
+#define CLKGTE_RXKEEP_OCP_CLK_REQ_SHIFT 2
+#define CLKGTE_PSM_MAC_CLK_REQ_SHIFT 4
+#define CLKGTE_TSF_CLK_REQ_SHIFT 6
+#define CLKGTE_AQM_CLK_REQ_SHIFT 8
+#define CLKGTE_SERIAL_CLK_REQ_SHIFT 10
+#define CLKGTE_TX_CLK_REQ_SHIFT 12
+#define CLKGTE_POSTTX_CLK_REQ_SHIFT 14
+
+/* ClkGateReqCtrl1 */
+#define CLKGTE_RX_CLK_REQ_SHIFT 0
+#define CLKGTE_TXKEEP_OCP_CLK_REQ_SHIFT 2
+#define CLKGTE_HOST_RW_CLK_REQ_SHIFT 4
+#define CLKGTE_IHR_WR_CLK_REQ_SHIFT 6
+#define CLKGTE_TKIP_KEY_CLK_REQ_SHIFT 8
+#define CLKGTE_TKIP_MISC_CLK_REQ_SHIFT 10
+#define CLKGTE_AES_CLK_REQ_SHIFT 12
+#define CLKGTE_WAPI_CLK_REQ_SHIFT 14
+
+/* ClkGateReqCtrl2 */
+#define CLKGTE_WEP_CLK_REQ_SHIFT 0
+#define CLKGTE_PSM_CLK_REQ_SHIFT 2
+#define CLKGTE_MACPHY_CLK_REQ_BY_PHY_SHIFT 4
+#define CLKGTE_FCBS_CLK_REQ_SHIFT 6
+#define CLKGTE_HIN_AXI_MAC_CLK_REQ_SHIFT 8
+
+/* ClkGateStretch0 */
+#define CLKGTE_MAC_HT_CLOCK_STRETCH_SHIFT 0
+#define CLKGTE_MAC_ALP_CLOCK_STRETCH_SHIFT 8
+#define CLKGTE_MAC_HT_CLOCK_STRETCH_VAL 0x4
+
+/* ClkGateStretch1 */
+#define CLKGTE_MAC_PHY_CLOCK_STRETCH_SHIFT 13
+
+/* ClkGateMisc */
+#define CLKGTE_TPF_CLK_REQTHRESH 0xF
+#define CLKGTE_AQM_CLK_REQEXT 0x70
+
+/* ClkGateDivCtrl */
+#define CLKGTE_MAC_ILP_OFF_COUNT_MASK 0x0007
+#define CLKGTE_MAC_ILP_OFF_COUNT_SHIFT 0
+#define CLKGTE_MAC_ILP_ON_COUNT_MASK 0x0020
+#define CLKGTE_MAC_ILP_ON_COUNT_MASK_GE_REV80 0x0030
+#define CLKGTE_MAC_ALP_OFF_COUNT_MASK 0x03C0
+#define CLKGTE_MAC_ALP_OFF_COUNT_SHIFT 6
+
+/* ClkGatePhyClkCtrl */
+#define CLKGTE_PHY_MAC_PHY_CLK_REQ_EN_SHIFT 0
+#define CLKGTE_O2C_HIN_PHY_CLK_EN_SHIFT 1
+#define CLKGTE_HIN_PHY_CLK_EN_SHIFT 2
+#define CLKGTE_IHRP_PHY_CLK_EN_SHIFT 3
+#define CLKGTE_CCA_MAC_PHY_CLK_REQ_EN_SHIFT 4
+#define CLKGTE_TX_MAC_PHY_CLK_REQ_EN_SHIFT 5
+#define CLKGTE_HRP_MAC_PHY_CLK_REQ_EN_SHIFT 6
+#define CLKGTE_SYNC_MAC_PHY_CLK_REQ_EN_SHIFT 7
+#define CLKGTE_RX_FRAME_MAC_PHY_CLK_REQ_EN_SHIFT 8
+#define CLKGTE_RX_START_MAC_PHY_CLK_REQ_EN_SHIFT 9
+#define CLKGTE_FCBS_MAC_PHY_CLK_REQ_SHIFT 10
+#define CLKGTE_POSTRX_MAC_PHY_CLK_REQ_EN_SHIFT 11
+#define CLKGTE_DOT11_MAC_PHY_RXVALID_SHIFT 12
+#define CLKGTE_NOT_PHY_FIFO_EMPTY_SHIFT 13
+#define CLKGTE_DOT11_MAC_PHY_BFE_REPORT_DATA_READY 14
+#define CLKGTE_DOT11_MAC_PHY_CLK_BIT15 15
+
+/* ClkGateExtReq0 */
+#define CLKGTE_TOE_SYNC_MAC_CLK_REQ_SHIFT 0
+#define CLKGTE_TXBF_SYNC_MAC_CLK_REQ_SHIFT 2
+#define CLKGTE_HIN_SYNC_MAC_CLK_REQ_SHIFT 4
+#define CLKGTE_SLOW_SYNC_CLK_REQ_SHIFT 6
+#define CLKGTE_ERCX_SYNC_CLK_REQ_SHIFT 8
+#define CLKGTE_BTCX_SYNC_CLK_REQ_SHIFT 10
+#define CLKGTE_IFS_CRS_SYNC_CLK_REQ_SHIFT 12
+#define CLKGTE_IFS_GCI_SYNC_CLK_REQ_SHIFT 14
+
+#define CLKGTE_TOE_SYNC_MAC_CLK_REQ_80_SHIFT 2
+#define CLKGTE_TXBF_SYNC_MAC_CLK_REQ_80_SHIFT 4
+#define CLKGTE_HIN_SYNC_MAC_CLK_REQ_80_SHIFT 6
+#define CLKGTE_SLOW_SYNC_CLK_REQ_80_SHIFT 8
+#define CLKGTE_ERCX_SYNC_CLK_REQ_80_SHIFT 10
+#define CLKGTE_BTCX_SYNC_CLK_REQ_80_SHIFT 12
+#define CLKGTE_IFS_CRS_SYNC_CLK_REQ_80_SHIFT 14
+
+#define CLKGTE_TOE_SYNC_MAC_CLK_REQ_83_SHIFT 2
+#define CLKGTE_TXBF_SYNC_MAC_CLK_REQ_83_SHIFT 4
+#define CLKGTE_HIN_SYNC_MAC_CLK_REQ_83_SHIFT 6
+#define CLKGTE_SLOW_SYNC_CLK_REQ_83_SHIFT 8
+#define CLKGTE_ERCX_SYNC_CLK_REQ_83_SHIFT 10
+#define CLKGTE_BTCX2_SYNC_CLK_REQ_83_SHIFT 12
+#define CLKGTE_BTCX_SYNC_CLK_REQ_83_SHIFT 14
+
+/* ClkGateExtReq1 */
+#define CLKGTE_PHY_FIFO_SYNC_CLK_REQ_SHIFT 0
+#define CLKGTE_RXE_CHAN_SYNC_CLK_REQ_SHIFT 2
+#define CLKGTE_PMU_MDIS_SYNC_MAC_CLK_REQ_SHIFT 4
+#define CLKGTE_PSM_IPC_SYNC_CLK_REQ_SHIFT 6
+
+#define CLKGTE_IFS_GCI_SYNC_CLK_REQ_80_SHIFT 0
+#define CLKGTE_PHY_FIFO_SYNC_CLK_REQ_80_SHIFT 2
+#define CLKGTE_RXE_CHAN_SYNC_CLK_REQ_80_SHIFT 4
+#define CLKGTE_PMU_MDIS_SYNC_MAC_CLK_REQ_80_SHIFT 6
+#define CLKGTE_PSM_IPC_SYNC_CLK_REQ_80_SHIFT 8
+
+#define CLKGTE_IFS_CRS_SYNC_CLK_REQ_83_SHIFT 0
+#define CLKGTE_IFS_GCI_SYNC_CLK_REQ_83_SHIFT 2
+#define CLKGTE_PHY_FIFO_SYNC_CLK_REQ_83_SHIFT 4
+#define CLKGTE_RXE_CHAN_SYNC_CLK_REQ_83_SHIFT 6
+#define CLKGTE_PMU_MDIS_SYNC_MAC_CLK_REQ_83_SHIFT 8
+#define CLKGTE_PSM_IPC_SYNC_CLK_REQ_83_SHIFT 10
+
+/* PFE CtlStat1 register */
+#define PFE_CTLSTAT1_ROUTE_PFE_TO_BMSTAT (1u << 15u)
+#define PFE_CTLSTAT1_PFE_ENABLE (1u << 0u)
+
+/* PPR Ctrl1 register */
+#define PPR_CTMODE_SHIFT 8u
+#define PPR_CTMODE_MASK (3u << PPR_CTMODE_SHIFT)
+
+#define PPR_CTMODE_A (0u << PPR_CTMODE_SHIFT)
+#define PPR_CTMODE_B (1u << PPR_CTMODE_SHIFT)
+#define PPR_CTMODE_C (2u << PPR_CTMODE_SHIFT)
+
+/* Ptxd Len */
+#define PTXD_LEN0_SHIFT (0u)
+#define PTXD_LEN1_SHIFT (8u)
+#define PTXD_LEN2_SHIFT (0u)
+#define PTXD_LEN3_SHIFT (8u)
+/* =========== LHL regs =========== */
+/* WL ARM Timer0 Interrupt Status (lhl_wl_armtim0_st_adr) */
+#define LHL_WL_ARMTIM0_ST_WL_ARMTIM_INT_ST 0x00000001
+
+#define D11_AUTO_MEM_STBY_RET_SHIFT (4u)
+#define D11_AUTO_MEM_STBY_RET_83_SHIFT (5u)
+#define D11_AUTO_MEM_STBY_NON_RET_SHIFT (6u)
+#define D11_AUTO_MEM_STBY_BM_SHIFT (9u)
+
+#define D11_AUTO_MEM_STBY_RET_SHIFT_REV(d11rev) \
+ (((d11rev) >= 83) ? D11_AUTO_MEM_STBY_RET_83_SHIFT : D11_AUTO_MEM_STBY_RET_SHIFT)
+
+/* WiFi P2P TX stop timestamp block (only applicable with AC ucode) */
+#define P2P_TXSTOP_SHMPERBSS 2u /* 2 shmems per BSS */
+#define M_P2P_TXSTOP_TS(x, b, w) (M_P2P_TXSTOP_T_BLK(x) +\
+ (P2P_TXSTOP_SHMPERBSS * (b) + (w)) * 2)
+
+#define D11TXHDR_RATEINFO_ACCESS_VAL(txh, corerev, member) \
+ ((((txh)->corerev).RateInfo[3]).member)
+
+/* QoS + BSR information */
+#define D11_QOS_BSR_TIDQS_SHIFT 0u
+#define D11_QOS_BSR_TIDQS_SZ 8u
+#define D11_QOS_BSR_TIDQS_MASK (((1 << D11_QOS_BSR_TIDQS_SZ) - 1) << D11_QOS_BSR_TIDQS_SHIFT)
+
+#define D11_QOS_BSR_UV_SHIFT 8u
+#define D11_QOS_BSR_UV_SZ 6u
+#define D11_QOS_BSR_UV_MASK (((1 << D11_QOS_BSR_UV_SZ) - 1) << D11_QOS_BSR_UV_SHIFT)
+
+#define D11_QOS_BSR_SF_SHIFT 14u
+#define D11_QOS_BSR_SF_SZ 2u
+#define D11_QOS_BSR_SF_MASK (((1 << D11_QOS_BSR_SF_SZ) - 1) << D11_QOS_BSR_SF_SHIFT)
+
+/* Queue size in QoS control */
+#define D11_QOS_BSR_SF_0 0u
+#define D11_QOS_BSR_SF_1 1u
+#define D11_QOS_BSR_SF_2 2u
+#define D11_QOS_BSR_SF_3 3u
+
+#define D11_QS_OFFSET_SF_0 0u
+#define D11_QS_OFFSET_SF_1 1024u
+#define D11_QS_OFFSET_SF_2 17408u
+#define D11_QS_OFFSET_SF_3 148480u
+
+#define D11_QOS_BSR_SF_0_SHIFT 4u /* Scale: 16 bytes */
+#define D11_QOS_BSR_SF_1_SHIFT 8u /* Scale: 256 bytes */
+#define D11_QOS_BSR_SF_2_SHIFT 11u /* Scale: 2048 bytes */
+#define D11_QOS_BSR_SF_3_SHIFT 15u /* Scale: 32768 bytes */
+
+#define D11_MIN_QS_UV 0u
+#define D11_MAX_QS_UV 63u
+#define D11_MAX_QS_UV_SF3 ((D11_MAX_QS_UV) - 1)
+
+/* 1008: 16 * UV when the Scaling Factor subfield is 0 */
+#define D11_MAX_QS_SF_0 (D11_QS_OFFSET_SF_0 + (D11_MAX_QS_UV << D11_QOS_BSR_SF_0_SHIFT))
+/* 17152: 1024 + 256 * UV when the Scaling Factor subfield is 1 */
+#define D11_MAX_QS_SF_1 (D11_QS_OFFSET_SF_1 + (D11_MAX_QS_UV << D11_QOS_BSR_SF_1_SHIFT))
+/* 146432: 17408 + 2048 * UV when the Scaling Factor subfield is 2 */
+#define D11_MAX_QS_SF_2 (D11_QS_OFFSET_SF_2 + (D11_MAX_QS_UV << D11_QOS_BSR_SF_2_SHIFT))
+/* 2147328: 148480 + 32768 * UV when the Scaling Factor subfield is 3 */
+#define D11_MAX_QS_SF_3 (D11_QS_OFFSET_SF_3 + ((D11_MAX_QS_UV_SF3-1) << D11_QOS_BSR_SF_3_SHIFT))
+
+/* 2 bits for HE signature and 4 bits for control ID */
+#define D11_BSR_HE_SIG_SHIFT 6u
+/* HE Variant with BSR control ID */
+#define D11_BSR_HE_SIG (0xf)
+#define D11_BSR_ACI_BMAP_SHIFT (0 + D11_BSR_HE_SIG_SHIFT)
+#define D11_BSR_DELTA_TID_SHIFT (4 + D11_BSR_HE_SIG_SHIFT)
+#define D11_BSR_SF_SHIFT (8 + D11_BSR_HE_SIG_SHIFT)
+#define D11_BSR_QUEUE_SIZE_HIGH_SHIFT (10 + D11_BSR_HE_SIG_SHIFT)
+#define D11_BSR_QUEUE_SIZE_ALL_SHIFT (18 + D11_BSR_HE_SIG_SHIFT)
+
+#define D11_BSR_DELTA_TID_ALLTID_SIGNATURE 3u
+
+#define D11_BSR_QUEUE_SIZE_WIDTH 8u
+#define D11_BSR_QUEUE_SIZE_WIDTH_VAL ((1 << D11_BSR_QUEUE_SIZE_WIDTH) - 1)
+#define D11_BSR_QUEUE_SIZE_UNKNOWN (255u)
+#define D11_BSR_QUEUE_SIZE_MAX (254u)
+#define D11_BSR_QUEUE_SIZE_HIGH_MASK (D11_BSR_QUEUE_SIZE_WIDTH_VAL <<\
+ D11_BSR_QUEUE_SIZE_HIGH_SHIFT)
+#define D11_BSR_QUEUE_SIZE_ALL_MASK (D11_BSR_QUEUE_SIZE_WIDTH_VAL <<\
+ D11_BSR_QUEUE_SIZE_ALL_SHIFT)
+
+#define D11_BSR_WD1_SHIFT 16u
+
+enum {
+ D11_BSR_SF_ID_16 = 0, /* 0 */
+ D11_BSR_SF_ID_256 = 1, /* 1 */
+ D11_BSR_SF_ID_2048 = 2, /* 2 */
+ D11_BSR_SF_ID_32768 = 3 /* 3 */
+};
+
+enum {
+ D11_PING_BLOCK_VALID = 0, /* 0 */
+ D11_PONG_BLOCK_VALID = 1, /* 1 */
+ D11_UC_READING_PING_BLOCK = 2, /* 2 */
+ D11_UC_READING_PONG_BLOCK = 3 /* 3 */
+};
+
+enum {
+ D11_BSR_TID0_POS = 0, /* 0 */
+ D11_BSR_TID1_POS = 1, /* 1 */
+ D11_BSR_TID2_POS = 2, /* 2 */
+ D11_BSR_TID3_POS = 3, /* 3 */
+ D11_BSR_TID4_POS = 4, /* 4 */
+ D11_BSR_TID5_POS = 5, /* 5 */
+ D11_BSR_TID6_POS = 6, /* 6 */
+ D11_BSR_TID7_POS = 7, /* 7 */
+ D11_BSR_WD0_POS = 8, /* 8 */
+ D11_BSR_WD1_POS = 9, /* 9 */
+};
+
+#define D11_IS_PING_PONG_IN_RESET(i) (((i) & ((1 << D11_PING_BLOCK_VALID) |\
+ (1 << D11_UC_READING_PING_BLOCK) | (1 << D11_PONG_BLOCK_VALID) |\
+ (1 << D11_UC_READING_PONG_BLOCK))) == 0)
+#define D11_PING_BLOCK_VALID_MASK ((1 << D11_PONG_BLOCK_VALID) |\
+ (1 << D11_UC_READING_PING_BLOCK))
+#define D11_PONG_BLOCK_VALID_MASK ((1 << D11_PING_BLOCK_VALID) |\
+ (1 << D11_UC_READING_PONG_BLOCK))
+#define D11_PING_PONG_UPDATE_MASK ((1 << D11_PING_BLOCK_VALID) |\
+ (1 << D11_PONG_BLOCK_VALID))
+#define D11_IS_PING_BLOCK_WRITABLE(i) (((i) & D11_PING_BLOCK_VALID_MASK) == \
+ (1 << D11_PONG_BLOCK_VALID))
+#define D11_IS_PONG_BLOCK_WRITABLE(i) (((i) & D11_PONG_BLOCK_VALID_MASK) == \
+ (1 << D11_PING_BLOCK_VALID))
+#define D11_SET_PING_BLOCK_VALID(i) (((i) & ~(1 << D11_PONG_BLOCK_VALID)) |\
+ (1 << D11_PING_BLOCK_VALID))
+#define D11_SET_PONG_BLOCK_VALID(i) (((i) & ~(1 << D11_PING_BLOCK_VALID)) |\
+ (1 << D11_PONG_BLOCK_VALID))
+#define D11_SET_PING_PONG_INVALID(i) (((i) & ~(1 << D11_PING_BLOCK_VALID)) |\
+ ((i) & ~(1 << D11_PONG_BLOCK_VALID)))
+
+/* valid rx plcp check */
+#define PLCP_VALID(plcp) (((plcp)[0] | (plcp)[1] | (plcp)[2]) != 0)
+enum {
+ D11_TXTRIG_EN = 0, /* 0 */
+ D11_TXTRIG_PROG = 1, /* 1 */
+ D11_TXTRIG_DONE = 2, /* 2 */
+ D11_TXTRIG_TYPE = 4, /* 4 */
+};
+
+#define D11_SET_TXTRIG_EN (1 << D11_TXTRIG_EN)
+#define D11_TXTRIG_TYPE_MASK ((1 << D11_TXTRIG_TYPE) | (1 << (D11_TXTRIG_TYPE+1)))
+#define D11_SET_TXTRIG_TYPE(i) (((i) << D11_TXTRIG_TYPE) & D11_TXTRIG_TYPE_MASK)
+
+enum {
+ D11_MUEDCA_AIFSN = 0, /* 0 */
+ D11_MUEDCA_CWMIN = 1, /* 1 */
+ D11_MUEDCA_CWMAX = 2, /* 2 */
+ D11_MUEDCA_TIMER = 3, /* 3 */
+ D11_MUEDCA_SU_AIFSN = 4, /* 4 */
+ D11_MUEDCA_SU_CWMIN = 5, /* 5 */
+ D11_MUEDCA_SU_CWMAX = 6, /* 6 */
+ D11_MUEDCA_EXPIRY_TSF = 7, /* 7 */
+ D11_MUEDCA_QINFO = 8, /* 8 */
+ D11_MUEDCA_STAT = 9, /* 9 */
+ D11_MUEDCA_BLK_SIZE = 10 /* 10 */
+};
+#define D11_MUEDCA_BLK(x, idx, offset) (M_MUEDCA_BLK((x)) +\
+ (idx * (D11_MUEDCA_BLK_SIZE << 1)) + (offset << 1))
+
+#define D11_BSSCOLOR_VALID_SHIFT 15u
+#define D11_BSSCOLOR_VALID_MASK (1 << D11_BSSCOLOR_VALID_SHIFT)
+
+#ifdef BCMPCIE_HP2P
+/* HP2P (High Priority P2P) shared memory EDCA parameters */
+typedef struct shm_hp2p_edca_params {
+ uint16 txop;
+ uint16 cwmin;
+ uint16 cwmax;
+ uint16 cwcur;
+ uint16 aifs;
+ uint16 bslots;
+ uint16 reggap;
+ uint16 status;
+} shm_hp2p_edca_params_t;
+
+#define HP2P_STATUS_NEWPARAMS (1u << 8u)
+#endif /* BCMPCIE_HP2P */
+
+#define MAX_D11_GPIOS 16
+
+/* Workaround register */
+#define WAR_TXDMA_NONMODIFIABLE_EN 0x00000010 /* For TxDMA initiated AXI reads */
+#define WAR_AQMDMA_NONMODIFIABLE_EN 0x00000020 /* For AQMDMA initiated AXI reads */
+
+/* noise cal timeout when NAN is enabled.
+* 54 * 256 = ~14ms .
+* smallest NAN CRB possible is 16ms..choose 14ms
+* as timeout to ensure noise cal happens within this 16ms
+*/
+#define M_NOISE_CALTIMEOUT_FOR_NAN 54u
+
+#define TXPU_CMD_SET 1u /**< txpu set command */
+
+#endif /* _D11_H */
diff --git a/bcmdhd.101.10.361.x/include/d11_cfg.h b/bcmdhd.101.10.361.x/include/d11_cfg.h
new file mode 100755
index 0000000..b45c951
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/d11_cfg.h
@@ -0,0 +1,115 @@
+/*
+ * Header file for splitrx mode definitions
+ * Explains different splitrx modes, macros for classify, conversion.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#ifndef _d11_cfg_h_
+#define _d11_cfg_h_
+
+#ifdef USE_BCMCONF_H
+#include <bcmconf.h>
+#else
+#if defined(BCMDONGLEHOST) && !defined(WINNT)
+#define D11REV_IS(var, val) ((var) == (val))
+#define D11REV_GE(var, val) ((var) >= (val))
+#define D11REV_GT(var, val) ((var) > (val))
+#define D11REV_LT(var, val) ((var) < (val))
+#define D11REV_LE(var, val) ((var) <= (val))
+
+#define D11MINORREV_IS(var, val) ((var) == (val))
+#define D11MINORREV_GE(var, val) ((var) >= (val))
+#define D11MINORREV_GT(var, val) ((var) > (val))
+#define D11MINORREV_LT(var, val) ((var) < (val))
+#define D11MINORREV_LE(var, val) ((var) <= (val))
+
+#define D11REV_MAJ_MIN_GE(corerev, corerev_minor, maj, min) \
+ ((D11REV_IS((corerev), (maj)) && D11MINORREV_GE((corerev_minor), (min))) || \
+ D11REV_GT(corerev, (maj)))
+
+#endif /* BCMDONGLEHOST */
+#endif /* USE_BCMCONF_H */
+
+#define RXMODE0 0 /* no split */
+#define RXMODE1 1 /* descriptor split */
+#define RXMODE2 2 /* descriptor split + classification */
+#define RXMODE3 3 /* fifo split + classification */
+#define RXMODE4 4 /* fifo split + classification + hdr conversion */
+
+#ifdef BCMSPLITRX
+ extern bool _bcmsplitrx;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMSPLITRX_ENAB() (_bcmsplitrx)
+#elif defined(BCMSPLITRX_DISABLED)
+ #define BCMSPLITRX_ENAB() (0)
+#else
+ #define BCMSPLITRX_ENAB() (1)
+#endif
+
+ extern uint8 _bcmsplitrx_mode;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMSPLITRX_MODE() (_bcmsplitrx_mode)
+#elif defined(BCMSPLITRX_DISABLED)
+ #define BCMSPLITRX_MODE() (0)
+#else
+ #define BCMSPLITRX_MODE() (_bcmsplitrx_mode)
+#endif
+#else
+ #define BCMSPLITRX_ENAB() (0)
+ #define BCMSPLITRX_MODE() (0)
+#endif /* BCMSPLITRX */
+
+#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1))
+#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2))
+#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3))
+#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4))
+
+#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4())
+#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4())
+#define HDR_CONV() (SPLIT_RXMODE4())
+#define HDRCONV_PAD 2
+
+#define FRAG_CMN_MSG_HDROOM (16u) /* Common msg headroom required by PCIe to push txstatus */
+
+#if defined(FMF_LIT) && !defined(FMF_LIT_DISABLED)
+/* (188-4*24-16) required HEADROOM - 4 Rate info Block - CacheInfo */
+#define FRAG_HEADROOM_D11REV_GE83 76u
+#else
+#if (defined(WLC_TXDC) && !defined(WLC_TXDC_DISABLED)) || \
+ (defined(FMF_RIT) && !defined(FMF_RIT_DISABLED))
+#define FRAG_HEADROOM_D11REV_GE83 92u /* (188-4*24) required HEADROOM - 4 Rate info Block */
+#else
+/* required HEADROOM = PTXD (24) + LIT (16) + RIT (96)
+ + max dot11hdr (44)::
+ "FC+DUR+SEQ+A1+A2+A3"(24) + QOS(2) + max("HTC(4) + AES IV(8)", WAPI IV(18))
+ + MSDU data size (22):: SFH (14) + LLC (8)
+ - ETHER_HDR_LEN
+ */
+#define FRAG_HEADROOM_D11REV_GE83 188u
+#endif /* (WLC_TXDC && !WLC_TXDC_DISABLED) || (FMF_RIT && !FMF_RIT_DISABLED) */
+#endif /* defined(FMF_LIT) && !defined(FMF_LIT_DISABLED) */
+#define FRAG_HEADROOM_D11REV_LT80 226u /* TXOFF + amsdu header */
+#define FRAG_HEADROOM_D11REV_GE80 \
+ (FRAG_HEADROOM_D11REV_GE83 + 4u) /* + TSO_HEADER_PASSTHROUGH_LENGTH(4) */
+
+#ifdef USE_NEW_COREREV_API
+#define FRAG_HEAD_ROOM(corerev) (D11REV_GE(corerev, 83) ? \
+ FRAG_HEADROOM_D11REV_GE83 : D11REV_GE(corerev, 80) ? \
+ FRAG_HEADROOM_D11REV_GE80 : FRAG_HEADROOM_D11REV_LT80)
+#else
+#define FRAG_HEAD_ROOM(sih, coreid) ((si_get_corerev(sih, coreid) >= 83) ? \
+ FRAG_HEADROOM_D11REV_GE83 : ((si_get_corerev(sih, coreid) >= 80) ? \
+ FRAG_HEADROOM_D11REV_GE80 : FRAG_HEADROOM_D11REV_LT80))
+#endif
+
+#endif /* _d11_cfg_h_ */
diff --git a/bcmdhd.101.10.361.x/include/d11reglist_proto.h b/bcmdhd.101.10.361.x/include/d11reglist_proto.h
new file mode 100755
index 0000000..a7a0004
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/d11reglist_proto.h
@@ -0,0 +1,66 @@
+/* D11reglist prototype for Broadcom 802.11abgn
+ * Networking Adapter Device Drivers.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+#ifndef _d11reglist_proto_h_
+#define _d11reglist_proto_h_
+
+/* this is for dump_mac */
+enum {
+ D11REG_TYPE_IHR16 = 0,
+ D11REG_TYPE_IHR32 = 1,
+ D11REG_TYPE_SCR = 2,
+ D11REG_TYPE_SHM = 3,
+ D11REG_TYPE_TPL = 4,
+ D11REG_TYPE_GE64 = 5,
+ D11REG_TYPE_KEYTB = D11REG_TYPE_GE64,
+ D11REG_TYPE_IHRX16 = 6,
+ D11REG_TYPE_SCRX = 7,
+ D11REG_TYPE_SHMX = 8,
+ D11REG_TYPE_MAX = 9
+};
+
+#define D11REGTYPENAME { \
+ "ihr", "ihr", "scr", "shm", \
+ "tpl", "keytb", "ihrx", "scrx", \
+ "shmx" \
+}
+
+typedef struct _d11regs_bmp_list {
+ uint8 type;
+ uint16 addr;
+ uint32 bitmap;
+ uint8 step;
+ uint16 cnt; /* can be used together with bitmap or by itself */
+} d11regs_list_t;
+
+#define D11REG_BLK_SIZE 32
+typedef struct _d11regs_addr_list {
+ uint8 type;
+ uint16 cnt;
+ uint16 addr[D11REG_BLK_SIZE]; /* allow up to 32 per list */
+} d11regs_addr_t;
+
+typedef struct _d11obj_cache_t {
+ uint32 sel;
+ uint32 val;
+ uint16 addr32;
+ bool cache_valid;
+} d11obj_cache_t;
+
+typedef struct _svmp_list {
+ uint32 addr;
+ uint16 cnt;
+} svmp_list_t;
+
+#endif /* _d11reglist_proto_h_ */
diff --git a/bcmdhd.101.10.361.x/include/d11regs.h b/bcmdhd.101.10.361.x/include/d11regs.h
new file mode 100755
index 0000000..95b726f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/d11regs.h
@@ -0,0 +1,180 @@
+/*
+ * Chip-specific hardware definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#ifndef _D11REGS_H
+#define _D11REGS_H
+
+#include <typedefs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <sbconfig.h>
+
+#if !defined(BCMDONGLEHOST)
+#include <dot11mac_all_regs.h>
+#include <d11regs_comp.h>
+#endif
+
+#if defined(BCMDONGLEHOST) || defined(WL_UNITTEST)
+typedef struct {
+ uint32 pad;
+} d11regdefs_t;
+
+typedef volatile uint8 d11regs_t;
+typedef struct _d11regs_info {
+ uint32 pad;
+} d11regs_info_t;
+
+#else /* defined(BCMDONGLEHOST) || defined(WL_UNITTEST) */
+
+typedef volatile struct d11regs d11regs_t;
+
+typedef struct _d11regs_info {
+ d11regs_t *regs;
+} d11regs_info_t;
+
+#endif /* !defined(BCMDONGLEHOST) || !defined(WL_UNITTEST) */
+
+typedef volatile struct {
+ uint32 intstatus;
+ uint32 intmask;
+} intctrlregs_t;
+
+/**
+ * read: 32-bit register that can be read as 32-bit or as 2 16-bit
+ * write: only low 16b-it half can be written
+ */
+typedef volatile union {
+ uint32 pmqhostdata; /**< read only! */
+ struct {
+ uint16 pmqctrlstatus; /**< read/write */
+ uint16 PAD;
+ } w;
+} pmqreg_t;
+
+/** dma corerev >= 11 */
+typedef volatile struct {
+ dma64regs_t dmaxmt; /* dma tx */
+ pio4regs_t piotx; /* pio tx */
+ dma64regs_t dmarcv; /* dma rx */
+ pio4regs_t piorx; /* pio rx */
+} fifo64_t;
+
+/** indirect dma corerev >= 64 */
+typedef volatile struct {
+ dma64regs_t dma; /**< dma tx */
+ uint32 indintstatus;
+ uint32 indintmask;
+} ind_dma_t;
+
+/** indirect dma corerev 80, 81, 82 */
+typedef volatile struct {
+ uint32 indintstatus;
+ uint32 indintmask;
+ dma64regs_t dma; /**< dma tx, */
+} ind_dma_axc_t;
+
+/* access to register offsets and fields defined in dot11mac_all_regs.h */
+
+#define D11_REG_OFF(regname) \
+ dot11mac_##regname##_ADDR
+#define D11_REG_FIELD_MASK(regname, regfield) \
+ dot11mac_##regname##__##regfield##_MASK
+#define D11_REG_FIELD_SHIFT(regname, regfield) \
+ dot11mac_##regname##__##regfield##_SHIFT
+
+/* convert register offset to backplane address */
+
+#ifndef D11_REG_ADDR_CHK
+// #define D11_REG_ADDR_CHK
+#endif
+
+#ifdef D11_REG_ADDR_CHK
+#define D11_REG_ADDR_EXEMPT(regname) \
+ (D11_REG_OFF(regname) == D11_REG_OFF(PHY_REG_ADDR) || \
+ D11_REG_OFF(regname) == D11_REG_OFF(radioregaddr) || \
+ D11_REG_OFF(regname) == D11_REG_OFF(radioregdata) || \
+ D11_REG_OFF(regname) == D11_REG_OFF(OBJ_DATA) || \
+ 0)
+#define D11_REG32_ADDR(regbase, regname) \
+ ({ \
+ STATIC_ASSERT(D11_REG_ADDR_EXEMPT(regname) || D11_REG_OFF(regname) < 0x3e0); \
+ (volatile uint32 *)((uintptr)(regbase) + D11_REG_OFF(regname)); \
+ })
+#define D11_REG16_ADDR(regbase, regname) \
+ ({ \
+ STATIC_ASSERT(D11_REG_ADDR_EXEMPT(regname) || D11_REG_OFF(regname) >= 0x3e0); \
+ (volatile uint16 *)((uintptr)(regbase) + D11_REG_OFF(regname)); \
+ })
+#else /* !D11_REG_ADDR_CHK */
+#define D11_REG32_ADDR(regbase, regname) \
+ (volatile uint32 *)((uintptr)(regbase) + D11_REG_OFF(regname))
+#define D11_REG16_ADDR(regbase, regname) \
+ (volatile uint16 *)((uintptr)(regbase) + D11_REG_OFF(regname))
+#endif /* !D11_REG_ADDR_CHK */
+
+/* used in table */
+#define D11_REG32_ADDR_ENTRY(regbase, regname) \
+ (volatile uint32 *)((uintptr)(regbase) + D11_REG_OFF(regname))
+#define D11_REG16_ADDR_ENTRY(regbase, regname) \
+ (volatile uint16 *)((uintptr)(regbase) + D11_REG_OFF(regname))
+
+#ifndef D11_NEW_ACCESS_MACROS
+/* MOVED TO src/wl/sys/wlc_hw_priv.h */
+#define GET_MACINTSTATUS(osh, hw) R_REG((osh), D11_MACINTSTATUS(hw))
+#define SET_MACINTSTATUS(osh, hw, val) W_REG((osh), D11_MACINTSTATUS(hw), (val))
+#define GET_MACINTMASK(osh, hw) R_REG((osh), D11_MACINTMASK(hw))
+#define SET_MACINTMASK(osh, hw, val) W_REG((osh), D11_MACINTMASK(hw), (val))
+
+#define GET_MACINTSTATUS_X(osh, hw) R_REG((osh), D11_MACINTSTATUS_psmx(hw))
+#define SET_MACINTSTATUS_X(osh, hw, val) W_REG((osh), D11_MACINTSTATUS_psmx(hw), (val))
+#define GET_MACINTMASK_X(osh, hw) R_REG((osh), D11_MACINTMASK_psmx(hw))
+#define SET_MACINTMASK_X(osh, hw, val) W_REG((osh), D11_MACINTMASK_psmx(hw), (val))
+
+#define GET_MACINTSTATUS_EXT(osh, hw) R_REG((osh), D11_MACINTSTATUS_EXT(hw))
+#define SET_MACINTSTATUS_EXT(osh, hw, val) W_REG((osh), D11_MACINTSTATUS_EXT(hw), (val))
+#define GET_MACINTMASK_EXT(osh, hw) R_REG((osh), D11_MACINTMASK_EXT(hw))
+#define SET_MACINTMASK_EXT(osh, hw, val) W_REG((osh), D11_MACINTMASK_EXT(hw), (val))
+
+#define GET_MACINTSTATUS_EXT_X(osh, hw) R_REG((osh), D11_MACINTSTATUS_EXT_psmx(hw))
+#define SET_MACINTSTATUS_EXT_X(osh, hw, val) W_REG((osh), D11_MACINTSTATUS_EXT_psmx(hw), (val))
+#define GET_MACINTMASK_EXT_X(osh, hw) R_REG((osh), D11_MACINTMASK_EXT_psmx(hw))
+#define SET_MACINTMASK_EXT_X(osh, hw, val) W_REG((osh), D11_MACINTMASK_EXT_psmx(hw), (val))
+
+#define D11Reggrp_intctrlregs(hw, ix) ((intctrlregs_t*)(((volatile uint8*)D11_intstat0(hw)) + \
+ (sizeof(intctrlregs_t)*ix)))
+#define D11Reggrp_inddma(hw, ix) (D11REV_GE(hw->corerev, 86) ? \
+ ((ind_dma_t*)(((volatile uint8*)D11_ind_xmt_control(hw)) + (sizeof(ind_dma_t)*ix))) : \
+ ((ind_dma_t*)(((volatile uint8*)D11_inddma(hw)) + (sizeof(ind_dma_t)*ix))))
+#define D11Reggrp_inddma_axc(hw, ix) ((ind_dma_axc_t*)(((volatile uint8*)D11_inddma(hw)) + \
+ (sizeof(ind_dma_axc_t)*ix)))
+#define D11Reggrp_indaqm(hw, ix) (D11REV_GE(hw->corerev, 86) ? \
+ ((ind_dma_t*)(((volatile uint8*)D11_IndAQMctl(hw)) + (sizeof(ind_dma_t)*ix))) : \
+ ((ind_dma_t*)(((volatile uint8*)D11_indaqm(hw)) + (sizeof(ind_dma_t)*ix))))
+#define D11Reggrp_pmqreg(hw, ix) ((pmqreg_t*)(((volatile uint8*)D11_PMQHOSTDATA(hw)) + \
+ (sizeof(pmqreg_t)*ix)))
+#define D11Reggrp_f64regs(hw, ix) ((fifo64_t*)(((volatile uint8*)D11_xmt0ctl(hw)) + \
+ (sizeof(fifo64_t)*ix)))
+#define D11Reggrp_dmafifo(hw, ix) ((dma32diag_t*)(((volatile uint8*)D11_fifobase(hw)) + \
+ (sizeof(dma32diag_t)*ix)))
+#define D11Reggrp_intrcvlazy(hw, ix) ((volatile uint32*)(((volatile uint8*)D11_intrcvlzy0(hw)) + \
+ (sizeof(uint32)*ix)))
+#define D11Reggrp_altintmask(hw, ix) ((volatile uint32*)(((volatile uint8*)D11_alt_intmask0(hw)) + \
+ (sizeof(uint32)*ix)))
+#define D11REG_ISVALID(ptr, addr) ((volatile uint16 *)(addr) != \
+ ((volatile uint16 *) &((ptr)->regs->INVALID_ID)))
+#endif /* D11_NEW_ACCESS_MACROS */
+
+#endif /* _D11REGS_H */
diff --git a/bcmdhd.101.10.361.x/include/dbus.h b/bcmdhd.101.10.361.x/include/dbus.h
new file mode 100755
index 0000000..7761dff
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dbus.h
@@ -0,0 +1,627 @@
+/*
+ * Dongle BUS interface Abstraction layer
+ * target serial buses like USB, SDIO, SPI, etc.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ */
+
+#ifndef __DBUS_H__
+#define __DBUS_H__
+
+#include "typedefs.h"
+#include <dhd_linux.h>
+
+extern uint dbus_msglevel;
+#define DBUS_ERROR_VAL 0x0001
+#define DBUS_TRACE_VAL 0x0002
+#define DBUS_INFO_VAL 0x0004
+
+#if defined(DHD_DEBUG)
+#define DBUSERR(args) do {if (dbus_msglevel & DBUS_ERROR_VAL) printf args;} while (0)
+#define DBUSTRACE(args) do {if (dbus_msglevel & DBUS_TRACE_VAL) printf args;} while (0)
+#define DBUSINFO(args) do {if (dbus_msglevel & DBUS_INFO_VAL) printf args;} while (0)
+#else /* defined(DHD_DEBUG) */
+#define DBUSERR(args)
+#define DBUSTRACE(args)
+#define DBUSINFO(args)
+#endif
+
+enum {
+ DBUS_OK = 0,
+ DBUS_ERR = -200,
+ DBUS_ERR_TIMEOUT,
+ DBUS_ERR_DISCONNECT,
+ DBUS_ERR_NODEVICE,
+ DBUS_ERR_UNSUPPORTED,
+ DBUS_ERR_PENDING,
+ DBUS_ERR_NOMEM,
+ DBUS_ERR_TXFAIL,
+ DBUS_ERR_TXTIMEOUT,
+ DBUS_ERR_TXDROP,
+ DBUS_ERR_RXFAIL,
+ DBUS_ERR_RXDROP,
+ DBUS_ERR_TXCTLFAIL,
+ DBUS_ERR_RXCTLFAIL,
+ DBUS_ERR_REG_PARAM,
+ DBUS_STATUS_CANCELLED,
+ DBUS_ERR_NVRAM,
+ DBUS_JUMBO_NOMATCH,
+ DBUS_JUMBO_BAD_FORMAT,
+ DBUS_NVRAM_NONTXT,
+ DBUS_ERR_RXZLP
+};
+
+#define ERR_CBMASK_TXFAIL 0x00000001
+#define ERR_CBMASK_RXFAIL 0x00000002
+#define ERR_CBMASK_ALL 0xFFFFFFFF
+
+#define DBUS_CBCTL_WRITE 0
+#define DBUS_CBCTL_READ 1
+#if defined(INTR_EP_ENABLE)
+#define DBUS_CBINTR_POLL 2
+#endif /* defined(INTR_EP_ENABLE) */
+
+#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */
+#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */
+
+/*
+ * The max TCB/RCB data buffer size
+ * With USB RPC aggregation on,
+ * rx buffer has to be a single big chunk memory due to dongle->host aggregation
+ * Upper layer has to do byte copy to deaggregate the buffer to satisfy WL driver
+ * one buffer per pkt requirement
+ * Windows Vista may be able to use MDL to workaround this requirement
+ * tx buffer has to copy over RPC buffer since they are managed in different domain
+ * Without copy, DBUS and RPC has to break the encapsulation, which is not implemented
+ * RPC aggregated buffer arrives as a chained buffers. bypte copy needs to traverse the chain
+ * to form one continuous USB irb.
+ * These buffer size must accomodate the MAX rpc agg size in both direction
+ * #define BCM_RPC_TP_DNGL_AGG_MAX_BYTE
+ * #define BCM_RPC_TP_HOST_AGG_MAX_BYTE
+ * Without USB RPC aggregation, these buffer size can be smaller like normal 2K
+ * to fit max tcp pkt(ETH_MAX_DATA_SIZE) + d11/phy/rpc/overhead
+ *
+ * The number of buffer needed is upper layer dependent. e.g. rpc defines BCM_RPC_TP_DBUS_NTXQ
+ */
+#define DBUS_BUFFER_SIZE_TX 32000
+#define DBUS_BUFFER_SIZE_RX 24000
+
+#define DBUS_BUFFER_SIZE_TX_NOAGG 2048
+#define DBUS_BUFFER_SIZE_RX_NOAGG 2048
+
+/** DBUS types */
+enum {
+ DBUS_USB,
+ DBUS_SDIO,
+ DBUS_SPI,
+ DBUS_UNKNOWN
+};
+
+enum dbus_state {
+ DBUS_STATE_DL_PENDING,
+ DBUS_STATE_DL_DONE,
+ DBUS_STATE_UP,
+ DBUS_STATE_DOWN,
+ DBUS_STATE_PNP_FWDL,
+ DBUS_STATE_DISCONNECT,
+ DBUS_STATE_SLEEP,
+ DBUS_STATE_DL_NEEDED
+};
+
+enum dbus_pnp_state {
+ DBUS_PNP_DISCONNECT,
+ DBUS_PNP_SLEEP,
+ DBUS_PNP_RESUME
+};
+
+enum dbus_file {
+ DBUS_FIRMWARE,
+ DBUS_NVFILE
+};
+
+typedef enum _DEVICE_SPEED {
+ INVALID_SPEED = -1,
+ LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */
+ FULL_SPEED, /**< USB 1.1: 12 Mbps */
+ HIGH_SPEED, /**< USB 2.0: 480 Mbps */
+ SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */
+} DEVICE_SPEED;
+
+typedef struct {
+ int bustype;
+ int vid;
+ int pid;
+ int devid;
+ int chiprev; /**< chip revsion number */
+ int mtu;
+ int nchan; /**< Data Channels */
+ int has_2nd_bulk_in_ep;
+} dbus_attrib_t;
+
+/* FIX: Account for errors related to DBUS;
+ * Let upper layer account for packets/bytes
+ */
+typedef struct {
+ uint32 rx_errors;
+ uint32 tx_errors;
+ uint32 rx_dropped;
+ uint32 tx_dropped;
+} dbus_stats_t;
+
+/**
+ * Configurable BUS parameters
+ */
+enum {
+ DBUS_CONFIG_ID_RXCTL_DEFERRES = 1,
+ DBUS_CONFIG_ID_AGGR_LIMIT,
+ DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET
+};
+
+typedef struct {
+ uint32 config_id;
+ union {
+ uint32 general_param;
+ bool rxctl_deferrespok;
+ struct {
+ int maxrxsf;
+ int maxrxsize;
+ int maxtxsf;
+ int maxtxsize;
+ } aggr_param;
+ };
+} dbus_config_t;
+
+/**
+ * External Download Info
+ */
+typedef struct dbus_extdl {
+ uint8 *fw;
+ int fwlen;
+ uint8 *vars;
+ int varslen;
+} dbus_extdl_t;
+
+struct dbus_callbacks;
+struct exec_parms;
+
+typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype,
+ uint16 bus_no, uint16 slot, uint32 hdrlen);
+typedef void (*disconnect_cb_t)(void *arg);
+typedef void *(*exec_cb_t)(struct exec_parms *args);
+
+/** Client callbacks registered during dbus_attach() */
+typedef struct dbus_callbacks {
+ void (*send_complete)(void *cbarg, void *info, int status);
+ void (*recv_buf)(void *cbarg, uint8 *buf, int len);
+ void (*recv_pkt)(void *cbarg, void *pkt);
+ void (*txflowcontrol)(void *cbarg, bool onoff);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+} dbus_callbacks_t;
+
+struct dbus_pub;
+struct bcmstrbuf;
+struct dbus_irb;
+struct dbus_irb_rx;
+struct dbus_irb_tx;
+struct dbus_intf_callbacks;
+
+typedef struct {
+ void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs);
+ void (*detach)(struct dbus_pub *pub, void *bus);
+
+ int (*up)(void *bus);
+ int (*down)(void *bus);
+ int (*send_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb);
+ int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*send_ctl)(void *bus, uint8 *buf, int len);
+ int (*recv_ctl)(void *bus, uint8 *buf, int len);
+ int (*get_stats)(void *bus, dbus_stats_t *stats);
+ int (*get_attrib)(void *bus, dbus_attrib_t *attrib);
+
+ int (*pnp)(void *bus, int evnt);
+ int (*remove)(void *bus);
+ int (*resume)(void *bus);
+ int (*suspend)(void *bus);
+ int (*stop)(void *bus);
+ int (*reset)(void *bus);
+
+ /* Access to bus buffers directly */
+ void *(*pktget)(void *bus, int len);
+ void (*pktfree)(void *bus, void *pkt);
+
+ int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len,
+ bool set);
+ void (*dump)(void *bus, struct bcmstrbuf *strbuf);
+ int (*set_config)(void *bus, dbus_config_t *config);
+ int (*get_config)(void *bus, dbus_config_t *config);
+
+ bool (*device_exists)(void *bus);
+ int (*dlneeded)(void *bus);
+ int (*dlstart)(void *bus, uint8 *fw, int len);
+ int (*dlrun)(void *bus);
+ bool (*recv_needed)(void *bus);
+
+ void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+ void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+
+ int (*tx_timer_init)(void *bus);
+ int (*tx_timer_start)(void *bus, uint timeout);
+ int (*tx_timer_stop)(void *bus);
+
+ int (*sched_dpc)(void *bus);
+ int (*lock)(void *bus);
+ int (*unlock)(void *bus);
+ int (*sched_probe_cb)(void *bus);
+
+ int (*shutdown)(void *bus);
+
+ int (*recv_stop)(void *bus);
+ int (*recv_resume)(void *bus);
+
+ int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx);
+
+ int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value);
+
+ /* Add from the bottom */
+} dbus_intf_t;
+
+typedef struct dbus_pub {
+ struct osl_info *osh;
+ dbus_stats_t stats;
+ dbus_attrib_t attrib;
+ enum dbus_state busstate;
+ DEVICE_SPEED device_speed;
+ int ntxq, nrxq, rxsize;
+ void *bus;
+ struct shared_info *sh;
+ void *dev_info;
+} dbus_pub_t;
+
+#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
+
+#define ALIGNED_LOCAL_VARIABLE(var, align) \
+ uint8 buffer[SDALIGN+64]; \
+ uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align;
+
+/*
+ * Public Bus Function Interface
+ */
+
+/*
+ * FIX: Is there better way to pass OS/Host handles to DBUS but still
+ * maintain common interface for all OS??
+ * Under NDIS, param1 needs to be MiniportHandle
+ * For NDIS60, param2 is WdfDevice
+ * Under Linux, param1 and param2 are NULL;
+ */
+extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ void *param1, void *param2);
+extern int dbus_deregister(void);
+
+//extern int dbus_download_firmware(dbus_pub_t *pub);
+//extern int dbus_up(struct dhd_bus *pub);
+extern int dbus_down(dbus_pub_t *pub);
+//extern int dbus_stop(struct dhd_bus *pub);
+extern int dbus_shutdown(dbus_pub_t *pub);
+extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on);
+
+extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf);
+extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info);
+extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info);
+//extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+//extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx);
+extern int dbus_poll_intr(dbus_pub_t *pub);
+extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats);
+extern int dbus_get_device_speed(dbus_pub_t *pub);
+extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config);
+extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config);
+extern void * dbus_get_devinfo(dbus_pub_t *pub);
+
+extern void *dbus_pktget(dbus_pub_t *pub, int len);
+extern void dbus_pktfree(dbus_pub_t *pub, void* pkt);
+
+extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask);
+extern int dbus_pnp_sleep(dbus_pub_t *pub);
+extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload);
+extern int dbus_pnp_disconnect(dbus_pub_t *pub);
+
+//extern int dbus_iovar_op(dbus_pub_t *pub, const char *name,
+// void *params, int plen, void *arg, int len, bool set);
+#ifdef BCMDBG
+extern void dbus_hist_dump(dbus_pub_t *pub, struct bcmstrbuf *b);
+#endif /* BCMDBG */
+
+extern void *dhd_dbus_txq(const dbus_pub_t *pub);
+extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub);
+
+/*
+ * Private Common Bus Interface
+ */
+
+/** IO Request Block (IRB) */
+typedef struct dbus_irb {
+ struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */
+} dbus_irb_t;
+
+typedef struct dbus_irb_rx {
+ struct dbus_irb irb; /* Must be first */
+ uint8 *buf;
+ int buf_len;
+ int actual_len;
+ void *pkt;
+ void *info;
+ void *arg;
+} dbus_irb_rx_t;
+
+typedef struct dbus_irb_tx {
+ struct dbus_irb irb; /** Must be first */
+ uint8 *buf; /** mutually exclusive with struct member 'pkt' */
+ int len; /** length of field 'buf' */
+ void *pkt; /** mutually exclusive with struct member 'buf' */
+ int retry_count;
+ void *info;
+ void *arg;
+ void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */
+} dbus_irb_tx_t;
+
+/**
+ * DBUS interface callbacks are different from user callbacks
+ * so, internally, different info can be passed to upper layer
+ */
+typedef struct dbus_intf_callbacks {
+ void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb);
+ void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status);
+ void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ bool (*isr)(void *cbarg, bool *wantdpc);
+ bool (*dpc)(void *cbarg, bool bounded);
+ void (*watchdog)(void *cbarg);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+ struct dbus_irb* (*getirb)(void *cbarg, bool send);
+ void (*rxerr_indicate)(void *cbarg, bool on);
+} dbus_intf_callbacks_t;
+
+/*
+ * Porting: To support new bus, port these functions below
+ */
+
+/*
+ * Bus specific Interface
+ * Implemented by dbus_usb.c/dbus_sdio.c
+ */
+extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_deregister(void);
+extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp);
+
+/*
+ * Bus-specific and OS-specific Interface
+ * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
+ */
+extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_osl_deregister(void);
+
+/*
+ * Bus-specific, OS-specific, HW-specific Interface
+ * Mainly for SDIO Host HW controller
+ */
+extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf);
+extern int dbus_bus_osl_hw_deregister(void);
+
+extern uint usbdev_bulkin_eps(void);
+#if defined(BCM_REQUEST_FW)
+extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type,
+ uint16 boardtype, uint16 boardrev);
+extern void dbus_release_fw_nvfile(void *firmware);
+#endif /* #if defined(BCM_REQUEST_FW) */
+
+#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+/*
+ * Include file for the ECHI fastpath optimized USB
+ * Practically all the lines below have equivalent in some structures in other include (or even
+ * source) files This violates all kind of structure and layering, but cutting through layers is
+ * what the optimization is about. The definitions are NOT literally borrowed from any GPLd code;
+ * the file is intended to be GPL-clean
+ *
+ * Note that while some resemblance between this code and GPLd code in Linux might exist, it is
+ * due to the common sibling. See FreeBSD: head/sys/dev/usb/controller/ehci.h for the source of
+ * inspiration :-)
+ *
+ * The code assumes little endian throughout
+ */
+
+#if !defined(__linux__)
+#error "EHCI fastpath is for Linux only."
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ /* Backward compatibility */
+ typedef unsigned int gfp_t;
+
+ #define dma_pool pci_pool
+ #define dma_pool_create(name, dev, size, align, alloc) \
+ pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC)
+ #define dma_pool_destroy(pool) pci_pool_destroy(pool)
+ #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle)
+ #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr)
+
+ #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir)
+ #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir)
+ #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE
+ #define DMA_TO_DEVICE PCI_DMA_TODEVICE
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+/* Availability of these functions varies (when present, they have two arguments) */
+#ifndef hc32_to_cpu
+ #define hc32_to_cpu(x) le32_to_cpu(x)
+ #define cpu_to_hc32(x) cpu_to_le32(x)
+ typedef unsigned int __hc32;
+#else
+ #error Two-argument functions needed
+#endif
+
+/* Private USB opcode base */
+#define EHCI_FASTPATH 0x31
+#define EHCI_SET_EP_BYPASS EHCI_FASTPATH
+#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1)
+#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2)
+#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3)
+#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4)
+#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5)
+
+/*
+ * EHCI QTD structure (hardware and extension)
+ * NOTE that is does not need to (and does not) match its kernel counterpart
+ */
+#define EHCI_QTD_NBUFFERS 5
+#define EHCI_QTD_ALIGN 32
+#define EHCI_BULK_PACKET_SIZE 512
+#define EHCI_QTD_XACTERR_MAX 32
+
+struct ehci_qtd {
+ /* Hardware map */
+ volatile uint32_t qtd_next;
+ volatile uint32_t qtd_altnext;
+ volatile uint32_t qtd_status;
+#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff)
+#define EHCI_QTD_IOC 0x00008000
+#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3)
+#define EHCI_QTD_SET_CERR(x) ((x) << 10)
+#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3)
+#define EHCI_QTD_SET_PID(x) ((x) << 8)
+#define EHCI_QTD_ACTIVE 0x80
+#define EHCI_QTD_HALTED 0x40
+#define EHCI_QTD_BUFERR 0x20
+#define EHCI_QTD_BABBLE 0x10
+#define EHCI_QTD_XACTERR 0x08
+#define EHCI_QTD_MISSEDMICRO 0x04
+ volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS];
+ volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+
+ /* Implementation extension */
+ dma_addr_t qtd_self; /**< own hardware address */
+ struct ehci_qtd *obj_next; /**< software link to the next QTD */
+ void *rpc; /**< pointer to the rpc buffer */
+ size_t length; /**< length of the data in the buffer */
+ void *buff; /**< pointer to the reassembly buffer */
+ int xacterrs; /**< retry counter for qtd xact error */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */
+
+#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1)
+
+/**
+ * Queue Head
+ * NOTE This structure is slightly different from the one in the kernel; but needs to stay
+ * compatible.
+ */
+struct ehci_qh {
+ /* Hardware map */
+ volatile uint32_t qh_link;
+ volatile uint32_t qh_endp;
+ volatile uint32_t qh_endphub;
+ volatile uint32_t qh_curqtd;
+
+ /* QTD overlay */
+ volatile uint32_t ow_next;
+ volatile uint32_t ow_altnext;
+ volatile uint32_t ow_status;
+ volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS];
+ volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS];
+
+ /* Extension (should match the kernel layout) */
+ dma_addr_t unused0;
+ void *unused1;
+ struct list_head unused2;
+ struct ehci_qtd *dummy;
+ struct ehci_qh *unused3;
+
+ struct ehci_hcd *unused4;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct kref unused5;
+ unsigned unused6;
+
+ uint8_t unused7;
+
+ /* periodic schedule info */
+ uint8_t unused8;
+ uint8_t unused9;
+ uint8_t unused10;
+ uint16_t unused11;
+ uint16_t unused12;
+ uint16_t unused13;
+ struct usb_device *unused14;
+#else
+ unsigned unused5;
+
+ u8 unused6;
+
+ /* periodic schedule info */
+ u8 unused7;
+ u8 unused8;
+ u8 unused9;
+ unsigned short unused10;
+ unsigned short unused11;
+#define NO_FRAME ((unsigned short)~0)
+#ifdef EHCI_QUIRK_FIX
+ struct usb_device *unused12;
+#endif /* EHCI_QUIRK_FIX */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+ struct ehci_qtd *first_qtd;
+ /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */
+ /* NOTE that ehci_qh in ehci.h shall reserve this word */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/** The corresponding structure in the kernel is used to get the QH */
+struct hcd_dev { /* usb_device.hcpriv points to this */
+ struct list_head unused0;
+ struct list_head unused1;
+
+ /* array of QH pointers */
+ void *ep[32];
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc,
+ int token, int len);
+int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data,
+ int token, int len);
+int optimize_submit_async(struct ehci_qtd *qtd, int epn);
+void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma);
+struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags);
+void optimize_ehci_qtd_free(struct ehci_qtd *qtd);
+void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf);
+#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */
+
+void dbus_flowctrl_tx(void *dbi, bool on);
+#endif /* __DBUS_H__ */
diff --git a/bcmdhd.101.10.361.x/include/dhd_daemon.h b/bcmdhd.101.10.361.x/include/dhd_daemon.h
new file mode 100755
index 0000000..d0cb12d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dhd_daemon.h
@@ -0,0 +1,55 @@
+/*
+ * Header file for DHD daemon to handle timeouts
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef __BCM_DHDD_H__
+#define __BCM_DHDD_H__
+
+#include <brcm_nl80211.h>
+/**
+ * To maintain compatabily when dhd driver and dhd daemon is taken from different branches,
+ * make sure to keep this file same across dhd driver branch and dhd apps branch.
+ * TODO: Make this file as shared between apps and dhd.ko
+ */
+
+#define BCM_TO_MAGIC 0x600DB055
+#define NO_TRAP 0
+#define DO_TRAP 1
+
+typedef enum notify_dhd_daemon_reason {
+ REASON_COMMAND_TO,
+ REASON_OQS_TO,
+ REASON_SCAN_TO,
+ REASON_JOIN_TO,
+ REASON_DAEMON_STARTED,
+ REASON_DEVICE_TX_STUCK_WARNING,
+ REASON_DEVICE_TX_STUCK,
+ REASON_UNKOWN
+} notify_dhd_daemon_reason_t;
+
+typedef struct bcm_to_info {
+ int magic;
+ int reason;
+ int trap;
+} bcm_to_info_t;
+
+#endif /* __BCM_DHDD_H__ */
diff --git a/bcmdhd.101.10.361.x/include/dhdioctl.h b/bcmdhd.101.10.361.x/include/dhdioctl.h
new file mode 100755
index 0000000..6404b2c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dhdioctl.h
@@ -0,0 +1,478 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _dhdioctl_h_
+#define _dhdioctl_h_
+
+#include <typedefs.h>
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+ uint32 cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint32 len; /* length of user buffer */
+ uint32 set; /* get or set request boolean (optional) */
+ uint32 used; /* bytes read or written (optional) */
+ uint32 needed; /* bytes needed (optional) */
+ uint32 driver; /* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+ BUS_TYPE_USB = 0, /* for USB dongles */
+ BUS_TYPE_SDIO, /* for SDIO dongles */
+ BUS_TYPE_PCIE /* for PCIE dongles */
+};
+
+typedef enum {
+ DMA_XFER_SUCCESS = 0,
+ DMA_XFER_IN_PROGRESS,
+ DMA_XFER_FAILED
+} dma_xfer_status_t;
+
+typedef enum d11_lpbk_type {
+ M2M_DMA_LPBK = 0,
+ D11_LPBK = 1,
+ BMC_LPBK = 2,
+ M2M_NON_DMA_LPBK = 3,
+ D11_HOST_MEM_LPBK = 4,
+ BMC_HOST_MEM_LPBK = 5,
+ M2M_WRITE_TO_RAM = 6,
+ M2M_READ_FROM_RAM = 7,
+ D11_WRITE_TO_RAM = 8,
+ D11_READ_FROM_RAM = 9,
+ MAX_LPBK = 10
+} dma_xfer_type_t;
+
+typedef struct dmaxfer_info {
+ uint16 version;
+ uint16 length;
+ dma_xfer_status_t status;
+ dma_xfer_type_t type;
+ uint src_delay;
+ uint dest_delay;
+ uint should_wait;
+ uint core_num;
+ int error_code;
+ uint32 num_bytes;
+ uint64 time_taken;
+ uint64 tput;
+} dma_xfer_info_t;
+
+#define DHD_DMAXFER_VERSION 0x1
+
+#define DHD_FILENAME_MAX 64
+#define DHD_PATHNAME_MAX 128
+
+#ifdef EFI
+struct control_signal_ops {
+ uint32 signal;
+ uint32 val;
+};
+enum {
+ WL_REG_ON = 0,
+ DEVICE_WAKE = 1,
+ TIME_SYNC = 2
+};
+
+typedef struct wifi_properties {
+ uint8 version;
+ uint32 vendor;
+ uint32 model;
+ uint8 mac_addr[6];
+ uint32 chip_revision;
+ uint8 silicon_revision;
+ uint8 is_powered;
+ uint8 is_sleeping;
+ char module_revision[16]; /* null terminated string */
+ uint8 is_fw_loaded;
+ char fw_filename[DHD_FILENAME_MAX]; /* null terminated string */
+ char nvram_filename[DHD_FILENAME_MAX]; /* null terminated string */
+ uint8 channel;
+ uint8 module_sn[6];
+} wifi_properties_t;
+
+#define DHD_WIFI_PROPERTIES_VERSION 0x1
+
+#define DHD_OTP_SIZE_WORDS 912
+
+typedef struct intr_poll_data {
+ uint16 version;
+ uint16 length;
+ uint32 type;
+ uint32 value;
+} intr_poll_t;
+
+typedef enum intr_poll_data_type {
+ INTR_POLL_DATA_PERIOD = 0,
+ INTR_POLL_DATA_NUM_PKTS_THRESH,
+ INTR_POLL_DATA_PKT_INTVL_THRESH
+} intr_poll_type_t;
+
+#define DHD_INTR_POLL_VERSION 0x1u
+#endif /* EFI */
+
+typedef struct tput_test {
+ uint16 version;
+ uint16 length;
+ uint8 direction;
+ uint8 tput_test_running;
+ uint8 mac_sta[6];
+ uint8 mac_ap[6];
+ uint8 PAD[2];
+ uint32 payload_size;
+ uint32 num_pkts;
+ uint32 timeout_ms;
+ uint32 flags;
+
+ uint32 pkts_good;
+ uint32 pkts_bad;
+ uint32 pkts_cmpl;
+ uint64 time_ms;
+ uint64 tput_bps;
+} tput_test_t;
+
+typedef enum {
+ TPUT_DIR_TX = 0,
+ TPUT_DIR_RX
+} tput_dir_t;
+
+/*
+ * Current supported roles considered for policy management are AP, P2P and NAN.
+ * Hence max value is limited to 3.
+ */
+#define DHD_MAX_IFACE_PRIORITY 3u
+typedef enum dhd_iftype {
+ DHD_IF_TYPE_STA = 0,
+ DHD_IF_TYPE_AP = 1,
+
+#ifdef DHD_AWDL
+ DHD_IF_TYPE_AWDL = 2,
+#endif /* DHD_AWDL */
+
+ DHD_IF_TYPE_NAN_NMI = 3,
+ DHD_IF_TYPE_NAN = 4,
+ DHD_IF_TYPE_P2P_GO = 5,
+ DHD_IF_TYPE_P2P_GC = 6,
+ DHD_IF_TYPE_P2P_DISC = 7,
+ DHD_IF_TYPE_IBSS = 8,
+ DHD_IF_TYPE_MONITOR = 9,
+ DHD_IF_TYPE_AIBSS = 10,
+ DHD_IF_TYPE_MAX
+} dhd_iftype_t;
+
+typedef struct dhd_iface_mgmt_data {
+ uint8 policy;
+ uint8 priority[DHD_IF_TYPE_MAX];
+} dhd_iface_mgmt_data_t;
+
+typedef enum dhd_iface_mgmt_policy {
+ DHD_IF_POLICY_DEFAULT = 0,
+ DHD_IF_POLICY_FCFS = 1,
+ DHD_IF_POLICY_LP = 2,
+ DHD_IF_POLICY_ROLE_PRIORITY = 3,
+ DHD_IF_POLICY_CUSTOM = 4,
+ DHD_IF_POLICY_INVALID = 5
+} dhd_iface_mgmt_policy_t;
+
+#define TPUT_TEST_T_VER 1
+#define TPUT_TEST_T_LEN 68
+#define TPUT_TEST_MIN_PAYLOAD_SIZE 16
+#define TPUT_TEST_USE_ETHERNET_HDR 0x1
+#define TPUT_TEST_USE_802_11_HDR 0x2
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC 0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION 1
+
+/*
+ * Increase the DHD_IOCTL_MAXLEN to 16K for supporting download of NVRAM files of size
+ * > 8K. In the existing implementation when NVRAM is to be downloaded via the "vars"
+ * DHD IOVAR, the NVRAM is copied to the DHD Driver memory. Later on when "dwnldstate" is
+ * invoked with FALSE option, the NVRAM gets copied from the DHD driver to the Dongle
+ * memory. The simple way to support this feature without modifying the DHD application,
+ * driver logic is to increase the DHD_IOCTL_MAXLEN size. This macro defines the "size"
+ * of the buffer in which data is exchanged between the DHD App and DHD driver.
+ */
+#define DHD_IOCTL_MAXLEN (16384) /* max length ioctl buffer required */
+#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+
+/*
+ * For cases where 16K buf is not sufficient.
+ * Ex:- DHD dump output beffer is more than 16K.
+ */
+#define DHD_IOCTL_MAXLEN_32K (32768u)
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC 0
+#define DHD_GET_VERSION 1
+#define DHD_GET_VAR 2
+#define DHD_SET_VAR 3
+
+/* message levels */
+#define DHD_ERROR_VAL 0x0001
+#define DHD_TRACE_VAL 0x0002
+#define DHD_INFO_VAL 0x0004
+#define DHD_DATA_VAL 0x0008
+#define DHD_CTL_VAL 0x0010
+#define DHD_TIMER_VAL 0x0020
+#define DHD_HDRS_VAL 0x0040
+#define DHD_BYTES_VAL 0x0080
+#define DHD_INTR_VAL 0x0100
+#define DHD_LOG_VAL 0x0200
+#define DHD_GLOM_VAL 0x0400
+#define DHD_EVENT_VAL 0x0800
+#define DHD_BTA_VAL 0x1000
+#if defined(NDIS) && (NDISVER >= 0x0630) && defined(BCMDONGLEHOST)
+#define DHD_SCAN_VAL 0x2000
+#else
+#define DHD_ISCAN_VAL 0x2000
+#endif
+#define DHD_ARPOE_VAL 0x4000
+#define DHD_REORDER_VAL 0x8000
+#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */
+#define DHD_PNO_VAL 0x80000
+#define DHD_RTT_VAL 0x100000
+#define DHD_MSGTRACE_VAL 0x200000
+#define DHD_FWLOG_VAL 0x400000
+#define DHD_DBGIF_VAL 0x800000
+#ifdef DHD_PCIE_RUNTIMEPM
+#define DHD_RPM_VAL 0x1000000
+#else
+#define DHD_RPM_VAL DHD_ERROR_VAL
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#define DHD_PKT_MON_VAL 0x2000000
+#define DHD_PKT_MON_DUMP_VAL 0x4000000
+#define DHD_ERROR_MEM_VAL 0x8000000
+#define DHD_DNGL_IOVAR_SET_VAL 0x10000000 /**< logs the setting of dongle iovars */
+#define DHD_LPBKDTDUMP_VAL 0x20000000
+#define DHD_PRSRV_MEM_VAL 0x40000000
+#define DHD_IOVAR_MEM_VAL 0x80000000
+#define DHD_ANDROID_VAL 0x10000
+#define DHD_IW_VAL 0x20000
+#define DHD_CFG_VAL 0x40000
+#define DHD_CONFIG_VAL 0x80000
+#define DHD_DUMP_VAL 0x100000
+#define DUMP_EAPOL_VAL 0x0001
+#define DUMP_ARP_VAL 0x0002
+#define DUMP_DHCP_VAL 0x0004
+#define DUMP_ICMP_VAL 0x0008
+#define DUMP_DNS_VAL 0x0010
+#define DUMP_TRX_VAL 0x0080
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+ uint32 version; /* To allow structure change tracking */
+ uint32 freq; /* Max ticks between tx/rx attempts */
+ uint32 count; /* Test packets to send/rcv each attempt */
+ uint32 print; /* Print counts every <print> attempts */
+ uint32 total; /* Total packets (or bursts) */
+ uint32 minlen; /* Minimum length of packets to send */
+ uint32 maxlen; /* Maximum length of packets to send */
+ uint32 numsent; /* Count of test packets sent */
+ uint32 numrcvd; /* Count of test packets received */
+ uint32 numfail; /* Count of test send failures */
+ uint32 mode; /* Test mode (type of test packets) */
+ uint32 stop; /* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO 1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE (-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */
+
+enum dhd_maclist_xtlv_type {
+ DHD_MACLIST_XTLV_R = 0x1,
+ DHD_MACLIST_XTLV_X = 0x2,
+ DHD_SVMPLIST_XTLV = 0x3
+};
+
+typedef struct _dhd_maclist_t {
+ uint16 version; /* Version */
+ uint16 bytes_len; /* Total bytes length of lists, XTLV headers and paddings */
+ uint8 plist[1]; /* Pointer to the first list */
+} dhd_maclist_t;
+
+typedef struct _dhd_pd11regs_param {
+ uint16 start_idx;
+ uint8 verbose;
+ uint8 pad;
+ uint8 plist[1];
+} dhd_pd11regs_param;
+
+typedef struct _dhd_pd11regs_buf {
+ uint16 idx;
+ uint8 pad[2];
+ uint8 pbuf[1];
+} dhd_pd11regs_buf;
+
+/* BT logging and memory dump */
+
+#define BT_LOG_BUF_MAX_SIZE (DHD_IOCTL_MAXLEN - (2 * sizeof(int)))
+#define BT_LOG_BUF_NOT_AVAILABLE 0
+#define BT_LOG_NEXT_BUF_NOT_AVAIL 1
+#define BT_LOG_NEXT_BUF_AVAIL 2
+#define BT_LOG_NOT_READY 3
+
+typedef struct bt_log_buf_info {
+ int availability;
+ int size;
+ char buf[BT_LOG_BUF_MAX_SIZE];
+} bt_log_buf_info_t;
+
+/* request BT memory in chunks */
+typedef struct bt_mem_req {
+ int offset; /* offset from BT memory start */
+ int buf_size; /* buffer size per chunk */
+} bt_mem_req_t;
+
+typedef struct fw_download_info {
+ uint32 fw_start_addr;
+ uint32 fw_size;
+ uint32 fw_entry_pt;
+ char fw_signature_fname[DHD_FILENAME_MAX];
+ char bootloader_fname[DHD_FILENAME_MAX];
+ uint32 bootloader_start_addr;
+ char fw_path[DHD_PATHNAME_MAX];
+} fw_download_info_t;
+
+/* max dest supported */
+#define DEBUG_BUF_DEST_MAX 4
+
+/* debug buf dest stat */
+typedef struct debug_buf_dest_stat {
+ uint32 stat[DEBUG_BUF_DEST_MAX];
+} debug_buf_dest_stat_t;
+
+#ifdef DHD_PKTTS
+/* max pktts flow config supported */
+#define PKTTS_CONFIG_MAX 8
+
+#define PKTTS_OFFSET_INVALID ((uint32)(~0))
+
+/* pktts flow configuration */
+typedef struct pktts_flow {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint32 src_ip; /**< source ip address */
+ uint32 dst_ip; /**< destination ip address */
+ uint32 src_port; /**< source port */
+ uint32 dst_port; /**< destination port */
+ uint32 proto; /**< protocol */
+ uint32 ip_prec; /**< ip precedence */
+ uint32 pkt_offset; /**< offset from data[0] (TCP/UDP payload) */
+ uint32 chksum; /**< 5 tuple checksum */
+} pktts_flow_t;
+
+#define BCM_TS_MAGIC 0xB055B055
+#define BCM_TS_MAGIC_V2 0xB055B056
+#define BCM_TS_TX 1u
+#define BCM_TS_RX 2u
+#define BCM_TS_UTX 3u /* ucode tx timestamps */
+
+#define PKTTS_MAX_FWTX 4u
+#define PKTTS_MAX_UCTX 5u
+#define PKTTS_MAX_UCCNT 8u
+#define PKTTS_MAX_FWRX 2u
+
+/* Firmware timestamp header */
+typedef struct bcm_to_info_hdr {
+ uint magic; /**< magic word */
+ uint type; /**< tx/rx type */
+ uint flowid; /**< 5 tuple checksum */
+ uint prec; /**< ip precedence (IP_PREC) */
+ uint8 xbytes[16]; /**< 16bytes info from pkt offset */
+} bcm_to_info_hdr_t;
+
+/* Firmware tx timestamp payload structure */
+typedef struct bcm_to_info_tx_ts {
+ bcm_to_info_hdr_t hdr;
+ uint64 dhdt0; /**< system time - DHDT0 */
+ uint64 dhdt5; /**< system time - DHDT5 */
+ uint fwts[PKTTS_MAX_FWTX]; /**< fw timestamp - FWT0..FWT4 */
+ uint ucts[PKTTS_MAX_UCTX]; /**< uc timestamp - UCT0..UCT4 */
+ uint uccnt[PKTTS_MAX_UCCNT]; /**< uc counters */
+} bcm_to_info_tx_ts_t;
+
+/* Firmware rx timestamp payload structure */
+typedef struct bcm_to_info_rx_ts {
+ bcm_to_info_hdr_t hdr;
+ uint64 dhdr3; /**< system time - DHDR3 */
+ uint fwts[PKTTS_MAX_FWRX]; /**< fw timestamp - FWT0, FWT1 */
+} bcm_to_info_rx_ts_t;
+#endif /* DHD_PKTTS */
+
+/* devreset */
+#define DHD_DEVRESET_VERSION 1
+
+typedef struct devreset_info {
+ uint16 version;
+ uint16 length;
+ uint16 mode;
+ int16 status;
+} devreset_info_t;
+
+#ifdef DHD_TX_PROFILE
+
+#define DHD_TX_PROFILE_VERSION 1
+
+/* tx_profile structure for tagging */
+typedef struct dhd_tx_profile_protocol {
+ uint16 version;
+ uint8 profile_index;
+ uint8 layer;
+ uint32 protocol_number;
+ uint16 src_port;
+ uint16 dest_port;
+} dhd_tx_profile_protocol_t;
+
+#define DHD_TX_PROFILE_DATA_LINK_LAYER (2u) /* data link layer protocols */
+#define DHD_TX_PROFILE_NETWORK_LAYER (3u) /* network layer protocols */
+
+#define DHD_MAX_PROFILE_INDEX (7u) /* three bits are available to encode
+ the tx profile index in the rate
+ field in host_txbuf_post_t
+ */
+#define DHD_MAX_PROFILES (1u) /* ucode only supports 1 profile atm */
+
+#endif /* defined(DHD_TX_PROFILE) */
+#endif /* _dhdioctl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/dngl_rtlv.h b/bcmdhd.101.10.361.x/include/dngl_rtlv.h
new file mode 100755
index 0000000..450caa2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dngl_rtlv.h
@@ -0,0 +1,66 @@
+/*
+ * Interface definitions for reversed TLVs
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _dngl_rtlv_h_
+#define _dngl_rtlv_h_
+
+#include <typedefs.h>
+
+/* Types of reverse TLVs downloaded to the top of dongle RAM.
+ * A reverse TLV consists of:
+ * data <variable length>
+ * len <4 bytes>
+ * type <4 bytes>
+ */
+enum {
+ DNGL_RTLV_TYPE_NONE = 0,
+ /* replaces bcmrand.h BCM_NVRAM_RNG_SIGNATURE */
+ DNGL_RTLV_TYPE_RNG_SIGNATURE = 0xFEEDC0DEu, /* RNG random data */
+ DNGL_RTLV_TYPE_FW_SIGNATURE = 0xFEEDFE51, /* FW signature */
+ DNGL_RTLV_TYPE_NVRAM_SIGNATURE = 0xFEEDFE52, /* NVRAM signature */
+ DNGL_RTLV_TYPE_FWSIGN_MEM_MAP = 0xFEEDFE53, /* FW signing memory map */
+ DNGL_RTLV_TYPE_FWSIGN_STATUS = 0xFEEDFE54, /* signature verification status */
+ DNGL_RTLV_TYPE_END_MARKER = 0xFEED0E2D, /* end of rTLVs marker */
+};
+typedef uint32 dngl_rtlv_type_t;
+typedef uint32 dngl_rtlv_len_t;
+
+/* Search for a reversed TLV with the given type, starting at the given address */
+int dngl_rtlv_find(const uint8 *rtlv_ptr, const uint8 *addr_limit, dngl_rtlv_type_t type,
+ dngl_rtlv_len_t *out_len, const uint8 **out_data);
+
+/* Search for a reversed TLV with the given type, starting at the top of RAM */
+int dngl_rtlv_find_from_ramtop(dngl_rtlv_type_t type, dngl_rtlv_len_t *out_len,
+ const uint8 **out_data);
+
+/* Search for the end of the reversed TLVs at the top of RAM to return the next RAM address */
+const uint8* dngl_rtlv_skipall(void);
+
+#ifdef RTLV_DEBUG
+void dbg_log_rtlv(const char* str, const void* p1, const void* p2, const void *p3,
+ const void* p4, const void *p5);
+#else /* RTLV_DEBUG */
+#define dbg_log_rtlv(str, p1, p2, p3, p4, p5)
+#endif /* RTLV_DEBUG */
+
+#endif /* _dngl_rtlv_h_ */
diff --git a/bcmdhd.101.10.361.x/include/dngl_stats.h b/bcmdhd.101.10.361.x/include/dngl_stats.h
new file mode 100755
index 0000000..12195f2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dngl_stats.h
@@ -0,0 +1,388 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+#include <ethernet.h>
+#include <802.11.h>
+#include <linux/compat.h>
+
+/* XXX happens to mirror a section of linux's net_device_stats struct */
+typedef struct {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+} dngl_stats_t;
+
+typedef int32 wifi_radio;
+typedef int32 wifi_channel;
+typedef int32 wifi_rssi;
+typedef struct { uint16 version; uint16 length; } ver_len;
+
+typedef enum wifi_channel_width {
+ WIFI_CHAN_WIDTH_20 = 0,
+ WIFI_CHAN_WIDTH_40 = 1,
+ WIFI_CHAN_WIDTH_80 = 2,
+ WIFI_CHAN_WIDTH_160 = 3,
+ WIFI_CHAN_WIDTH_80P80 = 4,
+ WIFI_CHAN_WIDTH_5 = 5,
+ WIFI_CHAN_WIDTH_10 = 6,
+ WIFI_CHAN_WIDTH_INVALID = -1
+} wifi_channel_width_t;
+
+typedef enum {
+ WIFI_DISCONNECTED = 0,
+ WIFI_AUTHENTICATING = 1,
+ WIFI_ASSOCIATING = 2,
+ WIFI_ASSOCIATED = 3,
+ WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */
+ WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */
+} wifi_connection_state;
+
+typedef enum {
+ WIFI_ROAMING_IDLE = 0,
+ WIFI_ROAMING_ACTIVE = 1
+} wifi_roam_state;
+
+typedef enum {
+ WIFI_INTERFACE_STA = 0,
+ WIFI_INTERFACE_SOFTAP = 1,
+ WIFI_INTERFACE_IBSS = 2,
+ WIFI_INTERFACE_P2P_CLIENT = 3,
+ WIFI_INTERFACE_P2P_GO = 4,
+ WIFI_INTERFACE_NAN = 5,
+ WIFI_INTERFACE_MESH = 6
+} wifi_interface_mode;
+
+#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */
+#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11
+ * beacon frame control protected bit set)
+ */
+#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities
+ * element interworking bit is set
+ */
+#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */
+#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities
+ * element UTF-8 SSID bit is set
+ */
+#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */
+#ifdef LINUX
+#define PACK_ATTRIBUTE __attribute__ ((packed))
+#else
+#define PACK_ATTRIBUTE
+#endif
+typedef struct {
+ wifi_interface_mode mode; /* interface mode */
+ uint8 mac_addr[6]; /* interface mac address (self) */
+ uint8 PAD[2];
+ wifi_connection_state state; /* connection state (valid for STA, CLI only) */
+ wifi_roam_state roaming; /* roaming state */
+ uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */
+ uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */
+ uint8 bssid[ETHER_ADDR_LEN]; /* bssid */
+ uint8 PAD[1];
+ uint8 ap_country_str[3]; /* country string advertised by AP */
+ uint8 country_str[3]; /* country string for this association */
+ uint8 PAD[2];
+} wifi_interface_info;
+
+typedef wifi_interface_info *wifi_interface_handle;
+
+/* channel information */
+typedef struct {
+ wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */
+ wifi_channel center_freq; /* primary 20 MHz channel */
+ wifi_channel center_freq0; /* center frequency (MHz) first segment */
+ wifi_channel center_freq1; /* center frequency (MHz) second segment */
+} wifi_channel_info;
+
+/* wifi rate */
+typedef struct {
+ uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+ uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+ uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+ uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std
+ * in the units of 0.5mbps
+ */
+ /* HT/VHT it would be mcs index */
+ uint32 reserved; /* reserved */
+ uint32 bitrate; /* units of 100 Kbps */
+} wifi_rate;
+
+typedef struct {
+ uint32 preamble :3; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+ uint32 nss :2; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+ uint32 bw :3; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+ uint32 rateMcsIdx :8; /* OFDM/CCK rate code would be as per ieee std
+ * in the units of 0.5mbps HT/VHT it would be
+ * mcs index
+ */
+ uint32 reserved :16; /* reserved */
+ uint32 bitrate; /* units of 100 Kbps */
+} wifi_rate_v1;
+
+/* channel statistics */
+typedef struct {
+ wifi_channel_info channel; /* channel */
+ uint32 on_time; /* msecs the radio is awake (32 bits number
+ * accruing over time)
+ */
+ uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number
+ * accruing over time)
+ */
+} wifi_channel_stat;
+
+/* radio statistics */
+typedef struct {
+ struct {
+ uint16 version;
+ uint16 length;
+ };
+ wifi_radio radio; /* wifi radio (if multiple radio supported) */
+ uint32 on_time; /* msecs the radio is awake (32 bits number
+ * accruing over time)
+ */
+ uint32 tx_time; /* msecs the radio is transmitting (32 bits
+ * number accruing over time)
+ */
+ uint32 rx_time; /* msecs the radio is in active receive (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and
+ * GAS exchange (32 bits number accruing over time)
+ */
+ uint32 num_channels; /* number of channels */
+ wifi_channel_stat channels[1]; /* channel statistics */
+} wifi_radio_stat;
+
+typedef struct {
+ wifi_radio radio;
+ uint32 on_time;
+ uint32 tx_time;
+ uint32 rx_time;
+ uint32 on_time_scan;
+ uint32 on_time_nbd;
+ uint32 on_time_gscan;
+ uint32 on_time_roam_scan;
+ uint32 on_time_pno_scan;
+ uint32 on_time_hs20;
+ uint32 num_channels;
+} wifi_radio_stat_h;
+
+/* per rate statistics */
+typedef struct {
+ wifi_rate_v1 rate; /* rate information */
+ uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */
+ uint32 rx_mpdu; /* number of received data pkts */
+ uint32 mpdu_lost; /* number of data packet losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+} wifi_rate_stat_v1;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+ uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */
+ uint32 rx_mpdu; /* number of received data pkts */
+ uint32 mpdu_lost; /* number of data packet losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+ wifi_rate rate;
+} wifi_rate_stat;
+
+/* access categories */
+typedef enum {
+ WIFI_AC_VO = 0,
+ WIFI_AC_VI = 1,
+ WIFI_AC_BE = 2,
+ WIFI_AC_BK = 3,
+ WIFI_AC_MAX = 4
+} wifi_traffic_ac;
+
+/* wifi peer type */
+typedef enum
+{
+ WIFI_PEER_STA,
+ WIFI_PEER_AP,
+ WIFI_PEER_P2P_GO,
+ WIFI_PEER_P2P_CLIENT,
+ WIFI_PEER_NAN,
+ WIFI_PEER_TDLS,
+ WIFI_PEER_INVALID
+} wifi_peer_type;
+
+/* per peer statistics */
+typedef struct {
+ wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */
+ uint8 peer_mac_address[6]; /* mac address */
+ uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */
+ uint32 num_rate; /* number of rates */
+ wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */
+} wifi_peer_info;
+
+/* per access category statistics */
+typedef struct {
+ wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */
+ uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts
+ * (ACK rcvd)
+ */
+ uint32 rx_mpdu; /* number of received unicast mpdus */
+ uint32 tx_mcast; /* number of succesfully transmitted multicast
+ * data packets
+ */
+ /* STA case: implies ACK received from AP for the
+ * unicast packet in which mcast pkt was sent
+ */
+ uint32 rx_mcast; /* number of received multicast data packets */
+ uint32 rx_ampdu; /* number of received unicast a-mpdus */
+ uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */
+ uint32 mpdu_lost; /* number of data pkt losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+ uint32 contention_time_min; /* data pkt min contention time (usecs) */
+ uint32 contention_time_max; /* data pkt max contention time (usecs) */
+ uint32 contention_time_avg; /* data pkt avg contention time (usecs) */
+ uint32 contention_num_samples; /* num of data pkts used for contention statistics */
+} wifi_wmm_ac_stat;
+
+/* interface statistics */
+typedef struct {
+ wifi_interface_handle iface; /* wifi interface */
+ wifi_interface_info info; /* current state of the interface */
+ uint32 beacon_rx; /* access point beacon received count from
+ * connected AP
+ */
+ uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT)
+ * The average_tsf_offset field is used so as to calculate
+ * the typical beacon contention time on the channel as well
+ * may be used to debug beacon synchronization and related
+ * power consumption issue
+ */
+ uint32 leaky_ap_detected; /* indicate that this AP
+ * typically leaks packets beyond
+ * the driver guard time.
+ */
+ uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after
+ * frame with PM bit set was ACK'ed by AP
+ */
+ uint32 leaky_ap_guard_time; /* guard time currently in force
+ * (when implementing IEEE power management
+ * based on frame control PM bit), How long
+ * driver waits before shutting down the radio and after
+ * receiving an ACK for a data frame with PM bit set)
+ */
+ uint32 mgmt_rx; /* access point mgmt frames received count from
+ * connected AP (including Beacon)
+ */
+ uint32 mgmt_action_rx; /* action frames received count */
+ uint32 mgmt_action_tx; /* action frames transmit count */
+ wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI
+ * (averaged)
+ */
+ wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from
+ * connected AP
+ */
+ wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from
+ * connected AP
+ */
+ wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */
+ uint32 num_peers; /* number of peers */
+ wifi_peer_info peer_info[1]; /* per peer statistics */
+} wifi_iface_stat;
+
+#ifdef CONFIG_COMPAT
+/* interface statistics */
+typedef struct {
+ compat_uptr_t iface; /* wifi interface */
+ wifi_interface_info info; /* current state of the interface */
+ uint32 beacon_rx; /* access point beacon received count from
+ * connected AP
+ */
+ uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT)
+ * The average_tsf_offset field is used so as to calculate
+ * the typical beacon contention time on the channel as well
+ * may be used to debug beacon synchronization and related
+ * power consumption issue
+ */
+ uint32 leaky_ap_detected; /* indicate that this AP
+ * typically leaks packets beyond
+ * the driver guard time.
+ */
+ uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after
+ * frame with PM bit set was ACK'ed by AP
+ */
+ uint32 leaky_ap_guard_time; /* guard time currently in force
+ * (when implementing IEEE power management
+ * based on frame control PM bit), How long
+ * driver waits before shutting down the radio and after
+ * receiving an ACK for a data frame with PM bit set)
+ */
+ uint32 mgmt_rx; /* access point mgmt frames received count from
+ * connected AP (including Beacon)
+ */
+ uint32 mgmt_action_rx; /* action frames received count */
+ uint32 mgmt_action_tx; /* action frames transmit count */
+ wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI
+ * (averaged)
+ */
+ wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from
+ * connected AP
+ */
+ wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from
+ * connected AP
+ */
+ wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */
+ uint32 num_peers; /* number of peers */
+ wifi_peer_info peer_info[1]; /* per peer statistics */
+} compat_wifi_iface_stat;
+#endif /* CONFIG_COMPAT */
+
+#endif /* _dngl_stats_h_ */
diff --git a/bcmdhd.101.10.361.x/include/dngl_wlhdr.h b/bcmdhd.101.10.361.x/include/dngl_wlhdr.h
new file mode 100755
index 0000000..6e1f74e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dngl_wlhdr.h
@@ -0,0 +1,39 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+ uint8 type; /* Header type */
+ uint8 version; /* Header version */
+ int8 rssi; /* RSSI */
+ uint8 pad; /* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN sizeof(wl_header_t)
+#define WL_HEADER_TYPE 0
+#define WL_HEADER_VER 1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/bcmdhd.101.10.361.x/include/dnglevent.h b/bcmdhd.101.10.361.x/include/dnglevent.h
new file mode 100755
index 0000000..40a1676
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dnglevent.h
@@ -0,0 +1,174 @@
+/*
+ * Broadcom Event protocol definitions
+ *
+ * Dependencies: bcmeth.h
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ * -----------------------------------------------------------------------------
+ *
+ */
+
+/*
+ * Broadcom dngl Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _DNGLEVENT_H_
+#define _DNGLEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <bcmeth.h>
+#include <ethernet.h>
+#ifdef HEALTH_CHECK
+#include <dngl_defs.h>
+#endif /* HEALTH_CHECK */
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+#define BCM_DNGL_EVENT_MSG_VERSION 1
+#define DNGL_E_RSRVD_1 0x0
+#define DNGL_E_RSRVD_2 0x1
+#define DNGL_E_SOCRAM_IND 0x2
+#define DNGL_E_PROFILE_DATA_IND 0x3
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version; /* Current version is 1 */
+ uint16 reserved; /* reserved for any future extension */
+ uint16 event_type; /* DNGL_E_SOCRAM_IND */
+ uint16 datalen; /* Length of the event payload */
+} BWL_POST_PACKED_STRUCT bcm_dngl_event_msg_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_event {
+ struct ether_header eth;
+ bcmeth_hdr_t bcm_hdr;
+ bcm_dngl_event_msg_t dngl_event;
+ /* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_dngl_event_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_socramind {
+ uint16 tag; /* data tag */
+ uint16 length; /* data length */
+ uint8 value[1]; /* data value with variable length specified by length */
+} BWL_POST_PACKED_STRUCT bcm_dngl_socramind_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_profile_data_ind_t {
+ uint16 tag;
+ uint16 length;
+ uint8 value[];
+} BWL_POST_PACKED_STRUCT bcm_dngl_profile_data_ind_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_arm_event {
+ uint32 type;
+ uint32 value;
+} BWL_POST_PACKED_STRUCT bcm_dngl_arm_event_t;
+
+#define PROFILE_DATA_IND_INFO 0x1
+
+#define PROFILE_SUB_TYPE_ARM_STATS_INFO 0x1
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_arm_stats_ind {
+ uint16 tag;
+ uint16 length;
+ uint8 value[];
+} BWL_POST_PACKED_STRUCT bcm_dngl_arm_stats_ind_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_arm_stats {
+ uint32 cycles;
+ uint32 timestamp;
+ uint16 freq;
+ uint16 roh;
+ uint16 num_events;
+ uint16 seq_no;
+ uint8 value[];
+} BWL_POST_PACKED_STRUCT bcm_dngl_arm_stats_t;
+
+/* SOCRAM_IND type tags */
+typedef enum socram_ind_tag {
+ SOCRAM_IND_ASSERT_TAG = 1,
+ SOCRAM_IND_TAG_HEALTH_CHECK = 2
+} socram_ind_tag_t;
+
+/* Health check top level module tags */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck {
+ uint16 top_module_tag; /* top level module tag */
+ uint16 top_module_len; /* Type of PCIE issue indication */
+ uint8 value[1]; /* data value with variable length specified by length */
+} BWL_POST_PACKED_STRUCT bcm_dngl_healthcheck_t;
+
+/* Health check top level module tags */
+#define HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE 1
+#define HEALTH_CHECK_PCIEDEV_VERSION_1 1
+#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT 0
+#define HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT 1
+#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT 2
+#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT 3
+#define HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT 4
+#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT 5
+#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3 1 << HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_AER 1 << HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN 1 << HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT 1 << HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_NODS 1 << HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE 1 << HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT
+/* PCIE Module TAGs */
+#define HEALTH_CHECK_PCIEDEV_INDUCED_IND 0x1
+#define HEALTH_CHECK_PCIEDEV_H2D_DMA_IND 0x2
+#define HEALTH_CHECK_PCIEDEV_D2H_DMA_IND 0x3
+#define HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND 0x4
+#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5
+#define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6
+#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7
+#define HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND 0x8
+#define HEALTH_CHECK_PCIEDEV_FLOWRING_IND 0x9
+#define HEALTH_CHECK_PCIEDEV_HW_ASSERT_LONG_IND 0xA
+#define HEALTH_CHECK_PCIEDEV_RXPOST_LONG_IND 0xB
+
+#define HC_PCIEDEV_CONFIG_REGLIST_MAX 25
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc {
+ uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */
+ uint16 reserved;
+ uint16 pcie_err_ind_type; /* PCIE Module TAGs */
+ uint16 pcie_flag;
+ uint32 pcie_control_reg;
+ uint32 pcie_config_regs[HC_PCIEDEV_CONFIG_REGLIST_MAX];
+} BWL_POST_PACKED_STRUCT bcm_dngl_pcie_hc_t;
+
+/* define to avoid compile issues in older branches which define hchk_sw_entity_t */
+#ifdef HCHK_COMMON_SW_EVENT
+/* Enumerating top level SW entities for use by health check */
+typedef enum {
+ HCHK_SW_ENTITY_UNDEFINED = 0,
+ HCHK_SW_ENTITY_PCIE = 1,
+ HCHK_SW_ENTITY_SDIO = 2,
+ HCHK_SW_ENTITY_USB = 3,
+ HCHK_SW_ENTITY_RTE = 4,
+ HCHK_SW_ENTITY_WL_PRIMARY = 5, /* WL instance 0 */
+ HCHK_SW_ENTITY_WL_SECONDARY = 6, /* WL instance 1 */
+ HCHK_SW_ENTITY_MAX
+} hchk_sw_entity_t;
+#endif /* HCHK_COMMON_SW_EVENT */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _DNGLEVENT_H_ */
diff --git a/bcmdhd.101.10.361.x/include/dnglioctl.h b/bcmdhd.101.10.361.x/include/dnglioctl.h
new file mode 100755
index 0000000..a18c716
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/dnglioctl.h
@@ -0,0 +1,177 @@
+/*
+ * HND Run Time Environment ioctl.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _dngl_ioctl_h_
+#define _dngl_ioctl_h_
+
+/* ==== Dongle IOCTLs i.e. non-d11 IOCTLs ==== */
+
+#ifndef _rte_ioctl_h_
+/* ================================================================ */
+/* These are the existing ioctls moved from src/include/rte_ioctl.h */
+/* ================================================================ */
+
+/* RTE IOCTL definitions for generic ether devices */
+#define RTEIOCTLSTART 0x8901
+#define RTEGHWADDR 0x8901
+#define RTESHWADDR 0x8902
+#define RTEGMTU 0x8903
+#define RTEGSTATS 0x8904
+#define RTEGALLMULTI 0x8905
+#define RTESALLMULTI 0x8906
+#define RTEGPROMISC 0x8907
+#define RTESPROMISC 0x8908
+#define RTESMULTILIST 0x8909
+#define RTEGUP 0x890A
+#define RTEGPERMADDR 0x890B
+#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */
+#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */
+#define RTEDEVTIMESYNC 0x890E /* Device TimeSync */
+#define RTEDEVDSNOTIFY 0x890F /* Bus DS state notification */
+#define RTED11DMALPBK_INIT 0x8910 /* D11 DMA loopback init */
+#define RTED11DMALPBK_UNINIT 0x8911 /* D11 DMA loopback uninit */
+#define RTED11DMALPBK_RUN 0x8912 /* D11 DMA loopback run */
+#define RTEDEVTSBUFPOST 0x8913 /* Async interface for tsync buffer post */
+#define RTED11DMAHOSTLPBK_RUN 0x8914 /* D11 DMA host memory loopback run */
+#define RTEDEVGETTSF 0x8915 /* Get device TSF */
+#define RTEDURATIONUNIT 0x8916 /* Duration unit */
+#define RTEWRITE_WAR_REGS 0x8917 /* write workaround regs */
+#define RTEDEVRMPMK 0x8918 /* Remove PMK */
+#define RTEDEVDBGVAL 0x8919 /* Set debug val */
+/* Ensure last RTE IOCTL define val is assigned to RTEIOCTLEND */
+#define RTEIOCTLEND 0x8919 /* LAST RTE IOCTL value */
+
+#define RTE_IOCTL_QUERY 0x00
+#define RTE_IOCTL_SET 0x01
+#define RTE_IOCTL_OVL_IDX_MASK 0x1e
+#define RTE_IOCTL_OVL_RSV 0x20
+#define RTE_IOCTL_OVL 0x40
+#define RTE_IOCTL_OVL_IDX_SHIFT 1
+
+enum hnd_ioctl_cmd {
+ HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */
+
+ /* PCIEDEV specific wl <--> bus ioctls */
+ BUS_GET_VAR = 2,
+ BUS_SET_VAR = 3,
+ BUS_FLUSH_RXREORDER_Q = 4,
+ BUS_SET_LTR_STATE = 5,
+ BUS_FLUSH_CHAINED_PKTS = 6,
+ BUS_SET_COPY_COUNT = 7,
+ BUS_UPDATE_FLOW_PKTS_MAX = 8,
+ BUS_UPDATE_EXTRA_TXLFRAGS = 9,
+ BUS_UPDATE_FRWD_RESRV_BUFCNT = 10,
+ BUS_PCIE_CONFIG_ACCESS = 11,
+ BUS_HC_EVENT_MASK_UPDATE = 12,
+ BUS_SET_MAC_WAKE_STATE = 13,
+ BUS_FRWD_PKT_RXCMPLT = 14,
+ BUS_PCIE_LATENCY_ENAB = 15, /* to enable latency feature in pcie */
+ BUS_GET_MAXITEMS = 16,
+ BUS_SET_BUS_CSO_CAP = 17, /* Update the CSO cap from wl layer to bus layer */
+ BUS_DUMP_RX_DMA_STALL_RELATED_INFO = 18,
+ BUS_UPDATE_RESVPOOL_STATE = 19 /* Update resvpool state */
+};
+
+#define SDPCMDEV_SET_MAXTXPKTGLOM 1
+#define RTE_MEMUSEINFO_VER 0x00
+
+typedef struct memuse_info {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint32 tot; /* Total memory */
+ uint32 text_len; /* Size of Text segment memory */
+ uint32 data_len; /* Size of Data segment memory */
+ uint32 bss_len; /* Size of BSS segment memory */
+
+ uint32 arena_size; /* Total Heap size */
+ uint32 arena_free; /* Heap memory available or free */
+ uint32 inuse_size; /* Heap memory currently in use */
+ uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */
+ uint32 inuse_overhead; /* tally of allocated mem_t blocks */
+ uint32 inuse_total; /* Heap in-use + Heap overhead memory */
+ uint32 free_lwm; /* Least free size since reclaim */
+ uint32 mf_count; /* Malloc failure count */
+} memuse_info_t;
+
+/* Different DMA loopback modes */
+#define M2M_DMA_LOOPBACK 0 /* PCIE M2M mode */
+#define D11_DMA_LOOPBACK 1 /* PCIE M2M and D11 mode without ucode */
+#define BMC_DMA_LOOPBACK 2 /* PCIE M2M and D11 mode with ucode */
+#define M2M_NON_DMA_LOOPBACK 3 /* Non DMA(indirect) mode */
+#define D11_DMA_HOST_MEM_LPBK 4 /* D11 mode */
+#define M2M_DMA_WRITE_TO_RAM 6 /* PCIE M2M write to specific memory mode */
+#define M2M_DMA_READ_FROM_RAM 7 /* PCIE M2M read from specific memory mode */
+#define D11_DMA_WRITE_TO_RAM 8 /* D11 write to specific memory mode */
+#define D11_DMA_READ_FROM_RAM 9 /* D11 read from specific memory mode */
+
+/* For D11 DMA loopback test */
+typedef struct d11_dmalpbk_init_args {
+ uint8 core_num;
+ uint8 lpbk_mode;
+} d11_dmalpbk_init_args_t;
+
+typedef struct d11_dmalpbk_args {
+ uint8 *buf;
+ int32 len;
+ void *p;
+ uint8 core_num;
+ uint8 pad[3];
+} d11_dmalpbk_args_t;
+
+typedef enum wl_config_var {
+ WL_VAR_TX_PKTFETCH_INDUCE = 1,
+ WL_VAR_LAST
+} wl_config_var_t;
+
+typedef struct wl_config_buf {
+ wl_config_var_t var;
+ uint32 val;
+} wl_config_buf_t;
+
+/* ================================================================ */
+/* These are the existing ioctls moved from src/include/rte_ioctl.h */
+/* ================================================================ */
+#endif /* _rte_ioctl_h_ */
+
+/* MPU test iovar version */
+#define MPU_TEST_STRUCT_VER 0
+
+/* MPU test OP */
+#define MPU_TEST_OP_READ 0
+#define MPU_TEST_OP_WRITE 1
+#define MPU_TEST_OP_EXECUTE 2
+
+/* Debug iovar for MPU testing */
+typedef struct mpu_test_args {
+ /* version control */
+ uint16 ver;
+ uint16 len; /* the length of this structure */
+ /* data */
+ uint32 addr;
+ uint8 op; /* see MPU_TEST_OP_XXXX */
+ uint8 rsvd;
+ uint16 size; /* valid for read/write */
+ uint8 val[];
+} mpu_test_args_t;
+
+#endif /* _dngl_ioctl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/eap.h b/bcmdhd.101.10.361.x/include/eap.h
new file mode 100755
index 0000000..cfd9e58
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/eap.h
@@ -0,0 +1,121 @@
+/*
+ * Extensible Authentication Protocol (EAP) definitions
+ *
+ * See
+ * RFC 2284: PPP Extensible Authentication Protocol (EAP)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _eap_h_
+#define _eap_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* EAP packet format */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char code; /* EAP code */
+ unsigned char id; /* Current request ID */
+ unsigned short length; /* Length including header */
+ unsigned char type; /* EAP type (optional) */
+ unsigned char data[1]; /* Type data (optional) */
+} BWL_POST_PACKED_STRUCT eap_header_t;
+
+#define EAP_HEADER_LEN 4u
+#define EAP_HEADER_LEN_WITH_TYPE 5u
+#define ERP_FLAGS_LEN 1u
+#define ERP_SEQ_LEN 2u
+#define ERP_KEYNAMENAI_HEADER_LEN 2u
+#define ERP_CRYPTOSUITE_LEN 1u
+
+/* EAP codes */
+#define EAP_REQUEST 1u
+#define EAP_RESPONSE 2u
+#define EAP_SUCCESS 3u
+#define EAP_FAILURE 4u
+#define EAP_INITIATE 5u
+#define EAP_FINISH 6u
+
+/* EAP types */
+#define EAP_IDENTITY 1
+#define EAP_NOTIFICATION 2
+#define EAP_NAK 3
+#define EAP_MD5 4
+#define EAP_OTP 5
+#define EAP_GTC 6
+#define EAP_TLS 13
+#define EAP_EXPANDED 254
+#define BCM_EAP_SES 10
+#define BCM_EAP_EXP_LEN 12 /* EAP_LEN 5 + 3 bytes for SMI ID + 4 bytes for ven type */
+#define BCM_SMI_ID 0x113d
+#define WFA_VENDOR_SMI 0x009F68
+
+/* ERP types */
+#define EAP_ERP_TYPE_REAUTH_START 1u
+#define EAP_ERP_TYPE_REAUTH 2u
+
+/* EAP FLAGS */
+#define ERP_R_FLAG 0x80 /* result flag, set = failure */
+#define ERP_B_FLAG 0x40 /* bootstrap flag, set = bootstrap */
+#define ERP_L_FLAG 0x20 /* rrk lifetime tlv is present */
+
+/* ERP TV/TLV types */
+#define EAP_ERP_TLV_KEYNAME_NAI 1u
+
+/* ERP Cryptosuite */
+#define EAP_ERP_CS_HMAC_SHA256_128 2u
+
+#ifdef BCMCCX
+#define EAP_LEAP 17
+
+#define LEAP_VERSION 1
+#define LEAP_CHALLENGE_LEN 8
+#define LEAP_RESPONSE_LEN 24
+
+/* LEAP challenge */
+typedef struct {
+ unsigned char version; /* should be value of LEAP_VERSION */
+ unsigned char reserved; /* not used */
+ unsigned char chall_len; /* always value of LEAP_CHALLENGE_LEN */
+ unsigned char challenge[LEAP_CHALLENGE_LEN]; /* random */
+ unsigned char username[1];
+} leap_challenge_t;
+
+#define LEAP_CHALLENGE_HDR_LEN 12
+
+/* LEAP challenge reponse */
+typedef struct {
+ unsigned char version; /* should be value of LEAP_VERSION */
+ unsigned char reserved; /* not used */
+ unsigned char resp_len; /* always value of LEAP_RESPONSE_LEN */
+ /* MS-CHAP hash of challenge and user's password */
+ unsigned char response[LEAP_RESPONSE_LEN];
+ unsigned char username[1];
+} leap_response_t;
+
+#define LEAP_RESPONSE_HDR_LEN 28
+
+#endif /* BCMCCX */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eap_h_ */
diff --git a/bcmdhd.101.10.361.x/include/eapol.h b/bcmdhd.101.10.361.x/include/eapol.h
new file mode 100755
index 0000000..84b8b26
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/eapol.h
@@ -0,0 +1,292 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#if !defined(BCMCRYPTO_COMPONENT)
+#include <bcmcrypto/aeskeywrap.h>
+#endif /* !BCMCRYPTO_COMPONENT */
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+ unsigned char body[1]; /* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4u
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION 2u
+#define WPA_EAPOL_VERSION 1u
+#define LEAP_EAPOL_VERSION 1u
+#define SES_EAPOL_VERSION 1u
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1u
+#define EAPOL_LOGOFF 2u
+#define EAPOL_KEY 3u
+#define EAPOL_ASF 4u
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1u
+#define EAPOL_WPA2_KEY 2u /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254u /* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN 8u
+#define EAPOL_KEY_IV_LEN 16u
+#define EAPOL_KEY_SIG_LEN 16u
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short length; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */
+ unsigned char index; /* Key Flags & Index */
+ unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */
+ unsigned char key[1]; /* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 44u
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK 0x80u
+#define EAPOL_KEY_BROADCAST 0u
+#define EAPOL_KEY_UNICAST 0x80u
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK 0x7fu
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_AKW_BLOCK_LEN 8
+#define EAPOL_WPA_KEY_REPLAY_LEN 8u
+#define EAPOL_WPA_KEY_NONCE_LEN 32u
+#define EAPOL_WPA_KEY_IV_LEN 16u
+#define EAPOL_WPA_KEY_RSC_LEN 8u
+#define EAPOL_WPA_KEY_ID_LEN 8u
+#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE 32u
+#define EAPOL_WPA_KEY_MAX_MIC_LEN 32u
+#define EAPOL_WPA_ENCR_KEY_MAX_LEN 64u
+#define EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN 32u
+
+#define EAPOL_WPA_PMK_MAX_LEN 64u
+#define EAPOL_WPA_PMK_SHA384_LEN 48u
+#define EAPOL_WPA_PMK_DEFAULT_LEN 32u
+#define EAPOL_WPA_KCK_DEFAULT_LEN 16u
+#define EAPOL_WPA_KCK_SHA384_LEN 24u
+#define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u
+#define EAPOL_WPA_KCK_MIC_SHA384_LEN 24u
+#define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u
+
+#define EAPOL_WPA_KEK2_SHA256_LEN 16u
+#define EAPOL_WPA_KEK2_SHA384_LEN 32u
+#define EAPOL_WPA_KCK2_SHA256_LEN 16u
+#define EAPOL_WPA_KCK2_SHA384_LEN 24u
+
+#ifndef EAPOL_KEY_HDR_VER_V2
+#define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */
+#define EAPOL_WPA_KEY_LEN 95u /* deprecated */
+#endif
+
+#define EAPOL_PTK_KEY_MAX_LEN (EAPOL_WPA_KEY_MAX_MIC_LEN +\
+ EAPOL_WPA_ENCR_KEY_MAX_LEN +\
+ EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN +\
+ EAPOL_WPA_KCK2_SHA384_LEN +\
+ EAPOL_WPA_KEK2_SHA384_LEN)
+
+#ifndef EAPOL_KEY_HDR_VER_V2
+
+/* WPA EAPOL-Key : deprecated */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */
+ unsigned short data_len; /* Key Data Length */
+ unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+#else
+/* WPA EAPOL-Key : new structure to consider dynamic MIC length */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_v2_t;
+
+typedef eapol_wpa_key_header_v2_t eapol_wpa_key_header_t;
+#endif /* EAPOL_KEY_HDR_VER_V2 */
+
+#define EAPOL_WPA_KEY_DATA_LEN_SIZE 2u
+
+#ifdef EAPOL_KEY_HDR_VER_V2
+#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) (sizeof(eapol_wpa_key_header_v2_t) \
+ + mic_len + EAPOL_WPA_KEY_DATA_LEN_SIZE)
+
+/* WPA EAPOL-Key header macros to reach out mic/data_len/data field */
+#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t))
+#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) \
+ ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t) + mic_len)
+#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) \
+ ((uint8 *)pos + EAPOL_WPA_KEY_HDR_SIZE(mic_len))
+#else
+#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) EAPOL_WPA_KEY_LEN
+#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)&pos->mic)
+#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) ((uint8 *)&pos->data_len)
+#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) ((uint8 *)&pos->data)
+#endif /* EAPOL_KEY_HDR_VER_V2 */
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_OSEN 0x0
+#define WPA_KEY_DESC_V0 0x0
+#define WPA_KEY_DESC_V1 0x01
+#define WPA_KEY_DESC_V2 0x02
+#define WPA_KEY_DESC_V3 0x03
+#define WPA_KEY_PAIRWISE 0x08
+#define WPA_KEY_INSTALL 0x40
+#define WPA_KEY_ACK 0x80
+#define WPA_KEY_MIC 0x100
+#define WPA_KEY_SECURE 0x200
+#define WPA_KEY_ERROR 0x400
+#define WPA_KEY_REQ 0x800
+#define WPA_KEY_ENC_KEY_DATA 0x01000 /* Encrypted Key Data */
+#define WPA_KEY_SMK_MESSAGE 0x02000 /* SMK Message */
+#define WPA_KEY_DESC_VER(_ki) ((_ki) & 0x03u)
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0 0x00
+#define WPA_KEY_INDEX_1 0x10
+#define WPA_KEY_INDEX_2 0x20
+#define WPA_KEY_INDEX_3 0x30
+#define WPA_KEY_INDEX_MASK 0x30
+#define WPA_KEY_INDEX_SHIFT 0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA 0x1000
+
+/* Key Data encapsulation */
+/* this is really just a vendor-specific info element. should define
+ * this in 802.11.h
+ */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 type;
+ uint8 length;
+ uint8 oui[3];
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK 1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2
+#define WPA2_KEY_DATA_SUBTYPE_MAC 3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID 4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK 9
+#define WPA2_KEY_DATA_SUBTYPE_OCI 13
+#define WPA2_KEY_DATA_SUBTYPE_BIGTK 14
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 flags;
+ uint8 reserved;
+ uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2
+
+#define WPA2_GTK_INDEX_MASK 0x03
+#define WPA2_GTK_INDEX_SHIFT 0x00
+
+#define WPA2_GTK_TRANSMIT 0x04
+
+/* IGTK encapsulation */
+#define EAPOL_RSN_IPN_SIZE 6u
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 key_id;
+ uint8 ipn[EAPOL_RSN_IPN_SIZE];
+ uint8 key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8u
+
+/* BIGTK encapsulation */
+#define EAPOL_RSN_BIPN_SIZE 6u
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 key_id;
+ uint8 bipn[EAPOL_RSN_BIPN_SIZE];
+ uint8 key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_bigtk_encap_t;
+
+#define EAPOL_WPA2_KEY_BIGTK_ENCAP_HDR_LEN 8u
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 reserved[2];
+ uint8 mac[ETHER_ADDR_LEN];
+ uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD 0xdd
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/bcmdhd.101.10.361.x/include/epivers.h b/bcmdhd.101.10.361.x/include/epivers.h
new file mode 100755
index 0000000..231cfeb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/epivers.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define EPI_MAJOR_VERSION 101
+
+#define EPI_MINOR_VERSION 10
+
+#define EPI_RC_NUMBER 361
+
+#define EPI_INCREMENTAL_NUMBER 0
+
+#define EPI_BUILD_NUMBER 0
+
+#define EPI_VERSION 101, 10, 361, 0
+
+#define EPI_VERSION_NUM 0x650a1690
+
+#define EPI_VERSION_DEV 101.10.361
+
+/* Driver Version String, ASCII, 32 chars max */
+#if defined (WLTEST)
+#define EPI_VERSION_STR "101.10.361 (wlan=r892223 WLTEST)"
+#elif (defined (BCMDBG_ASSERT) && !defined (BCMDBG_ASSERT_DISABLED))
+#define EPI_VERSION_STR "101.10.361 (wlan=r892223 ASSRT)"
+#else
+#define EPI_VERSION_STR "101.10.361.17 (wlan=r892223-20220415-1)(20220426-1)"
+#endif /* BCMINTERNAL */
+
+#endif /* _epivers_h_ */
diff --git a/bcmdhd.101.10.361.x/include/etd.h b/bcmdhd.101.10.361.x/include/etd.h
new file mode 100755
index 0000000..013037b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/etd.h
@@ -0,0 +1,636 @@
+/*
+ * Extended Trap data component interface file.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _ETD_H_
+#define _ETD_H_
+
+#if defined(ETD) && !defined(WLETD)
+#include <hnd_trap.h>
+#endif
+#include <bcmutils.h>
+/* Tags for structures being used by etd info iovar.
+ * Related structures are defined in wlioctl.h.
+ */
+#define ETD_TAG_JOIN_CLASSIFICATION_INFO 10 /* general information about join request */
+#define ETD_TAG_JOIN_TARGET_CLASSIFICATION_INFO 11 /* per target (AP) join information */
+#define ETD_TAG_ASSOC_STATE 12 /* current state of the Device association state machine */
+#define ETD_TAG_CHANNEL 13 /* current channel on which the association was performed */
+#define ETD_TAG_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 /* number of join attempts (bss_retries) */
+
+#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1 3
+#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2 6
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#define HND_EXTENDED_TRAP_VERSION 1
+#define HND_EXTENDED_TRAP_BUFLEN 512
+
+typedef struct hnd_ext_trap_hdr {
+ uint8 version; /* Extended trap version info */
+ uint8 reserved; /* currently unused */
+ uint16 len; /* Length of data excluding this header */
+ uint8 data[]; /* TLV data */
+} hnd_ext_trap_hdr_t;
+
+typedef enum {
+ TAG_TRAP_NONE = 0u, /* None trap type */
+ TAG_TRAP_SIGNATURE = 1u, /* Processor register dumps */
+ TAG_TRAP_STACK = 2u, /* Processor stack dump (possible code locations) */
+ TAG_TRAP_MEMORY = 3u, /* Memory subsystem dump */
+ TAG_TRAP_DEEPSLEEP = 4u, /* Deep sleep health check failures */
+ TAG_TRAP_PSM_WD = 5u, /* PSM watchdog information */
+ TAG_TRAP_PHY = 6u, /* Phy related issues */
+ TAG_TRAP_BUS = 7u, /* Bus level issues */
+ TAG_TRAP_MAC_SUSP = 8u, /* Mac level suspend issues */
+ TAG_TRAP_BACKPLANE = 9u, /* Backplane related errors */
+ /* Values 10 through 14 are in use by etd_data info iovar */
+ TAG_TRAP_PCIE_Q = 15u, /* PCIE Queue state during memory trap */
+ TAG_TRAP_WLC_STATE = 16u, /* WLAN state during memory trap */
+ TAG_TRAP_MAC_WAKE = 17u, /* Mac level wake issues */
+ TAG_TRAP_PHYTXERR_THRESH = 18u, /* Phy Tx Err */
+ TAG_TRAP_HC_DATA = 19u, /* Data collected by HC module */
+ TAG_TRAP_LOG_DATA = 20u,
+ TAG_TRAP_CODE = 21u, /* The trap type */
+ TAG_TRAP_HMAP = 22u, /* HMAP violation Address and Info */
+ TAG_TRAP_PCIE_ERR_ATTN = 23u, /* PCIE error attn log */
+ TAG_TRAP_AXI_ERROR = 24u, /* AXI Error */
+ TAG_TRAP_AXI_HOST_INFO = 25u, /* AXI Host log */
+ TAG_TRAP_AXI_SR_ERROR = 26u, /* AXI SR error log */
+ TAG_TRAP_MEM_BIT_FLIP = 27u, /* Memory 1-Bit Flip error */
+ TAG_TRAP_LAST /* This must be the last entry */
+} hnd_ext_tag_trap_t;
+
+typedef struct hnd_ext_trap_bp_err
+{
+ uint32 error;
+ uint32 coreid;
+ uint32 baseaddr;
+ uint32 ioctrl;
+ uint32 iostatus;
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+} hnd_ext_trap_bp_err_t;
+
+#define HND_EXT_TRAP_AXISR_INFO_VER_1 1
+typedef struct hnd_ext_trap_axi_sr_err_v1
+{
+ uint8 version;
+ uint8 pad[3];
+ uint32 error;
+ uint32 coreid;
+ uint32 baseaddr;
+ uint32 ioctrl;
+ uint32 iostatus;
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+
+ /* axi_sr_issue_debug */
+ uint32 sr_pwr_control;
+ uint32 sr_corereset_wrapper_main;
+ uint32 sr_corereset_wrapper_aux;
+ uint32 sr_main_gci_status_0;
+ uint32 sr_aux_gci_status_0;
+ uint32 sr_dig_gci_status_0;
+} hnd_ext_trap_axi_sr_err_v1_t;
+
+#define HND_EXT_TRAP_PSMWD_INFO_VER 1
+typedef struct hnd_ext_trap_psmwd_v1 {
+ uint16 xtag;
+ uint16 version; /* version of the information following this */
+ uint32 i32_maccontrol;
+ uint32 i32_maccommand;
+ uint32 i32_macintstatus;
+ uint32 i32_phydebug;
+ uint32 i32_clk_ctl_st;
+ uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1];
+ uint16 i16_0x1a8; /* gated clock en */
+ uint16 i16_0x406; /* Rcv Fifo Ctrl */
+ uint16 i16_0x408; /* Rx ctrl 1 */
+ uint16 i16_0x41a; /* Rxe Status 1 */
+ uint16 i16_0x41c; /* Rxe Status 2 */
+ uint16 i16_0x424; /* rcv wrd count 0 */
+ uint16 i16_0x426; /* rcv wrd count 1 */
+ uint16 i16_0x456; /* RCV_LFIFO_STS */
+ uint16 i16_0x480; /* PSM_SLP_TMR */
+ uint16 i16_0x490; /* PSM BRC */
+ uint16 i16_0x500; /* TXE CTRL */
+ uint16 i16_0x50e; /* TXE Status */
+ uint16 i16_0x55e; /* TXE_xmtdmabusy */
+ uint16 i16_0x566; /* TXE_XMTfifosuspflush */
+ uint16 i16_0x690; /* IFS Stat */
+ uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
+ uint16 i16_0x694; /* IFS_TX_DUR */
+ uint16 i16_0x6a0; /* SLow_CTL */
+ uint16 i16_0x838; /* TXE_AQM fifo Ready */
+ uint16 i16_0x8c0; /* Dagg ctrl */
+ uint16 shm_prewds_cnt;
+ uint16 shm_txtplufl_cnt;
+ uint16 shm_txphyerr_cnt;
+ uint16 pad;
+} hnd_ext_trap_psmwd_v1_t;
+
+typedef struct hnd_ext_trap_psmwd {
+ uint16 xtag;
+ uint16 version; /* version of the information following this */
+ uint32 i32_maccontrol;
+ uint32 i32_maccommand;
+ uint32 i32_macintstatus;
+ uint32 i32_phydebug;
+ uint32 i32_clk_ctl_st;
+ uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2];
+ uint16 i16_0x4b8; /* psm_brwk_0 */
+ uint16 i16_0x4ba; /* psm_brwk_1 */
+ uint16 i16_0x4bc; /* psm_brwk_2 */
+ uint16 i16_0x4be; /* psm_brwk_2 */
+ uint16 i16_0x1a8; /* gated clock en */
+ uint16 i16_0x406; /* Rcv Fifo Ctrl */
+ uint16 i16_0x408; /* Rx ctrl 1 */
+ uint16 i16_0x41a; /* Rxe Status 1 */
+ uint16 i16_0x41c; /* Rxe Status 2 */
+ uint16 i16_0x424; /* rcv wrd count 0 */
+ uint16 i16_0x426; /* rcv wrd count 1 */
+ uint16 i16_0x456; /* RCV_LFIFO_STS */
+ uint16 i16_0x480; /* PSM_SLP_TMR */
+ uint16 i16_0x500; /* TXE CTRL */
+ uint16 i16_0x50e; /* TXE Status */
+ uint16 i16_0x55e; /* TXE_xmtdmabusy */
+ uint16 i16_0x566; /* TXE_XMTfifosuspflush */
+ uint16 i16_0x690; /* IFS Stat */
+ uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
+ uint16 i16_0x694; /* IFS_TX_DUR */
+ uint16 i16_0x6a0; /* SLow_CTL */
+ uint16 i16_0x490; /* psm_brc */
+ uint16 i16_0x4da; /* psm_brc_1 */
+ uint16 i16_0x838; /* TXE_AQM fifo Ready */
+ uint16 i16_0x8c0; /* Dagg ctrl */
+ uint16 shm_prewds_cnt;
+ uint16 shm_txtplufl_cnt;
+ uint16 shm_txphyerr_cnt;
+} hnd_ext_trap_psmwd_t;
+
+#define HEAP_HISTOGRAM_DUMP_LEN 6
+#define HEAP_MAX_SZ_BLKS_LEN 2
+
+/* Ignore chunks for which there are fewer than this many instances, irrespective of size */
+#define HEAP_HISTOGRAM_INSTANCE_MIN 4
+
+/*
+ * Use the last two length values for chunks larger than this, or when we run out of
+ * histogram entries (because we have too many different sized chunks) to store "other"
+ */
+#define HEAP_HISTOGRAM_SPECIAL 0xfffeu
+
+#define HEAP_HISTOGRAM_GRTR256K 0xffffu
+
+typedef struct hnd_ext_trap_heap_err {
+ uint32 arena_total;
+ uint32 heap_free;
+ uint32 heap_inuse;
+ uint32 mf_count;
+ uint32 stack_lwm;
+ uint16 heap_histogm[HEAP_HISTOGRAM_DUMP_LEN * 2]; /* size/number */
+ uint16 max_sz_free_blk[HEAP_MAX_SZ_BLKS_LEN];
+} hnd_ext_trap_heap_err_t;
+
+#define MEM_TRAP_NUM_WLC_TX_QUEUES 6
+#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V2 2
+
+/* already there are quite a few chips which are ROM'ed wth this structure
+ * Will not be adding version. This will be the V1 structure.
+ */
+typedef struct hnd_ext_trap_wlc_mem_err {
+ uint8 instance;
+ uint8 associated;
+ uint8 soft_ap_client_cnt;
+ uint8 peer_cnt;
+ uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
+} hnd_ext_trap_wlc_mem_err_t;
+
+typedef struct hnd_ext_trap_wlc_mem_err_v2 {
+ uint16 version;
+ uint16 pad;
+ uint8 instance;
+ uint8 stas_associated;
+ uint8 aps_associated;
+ uint8 soft_ap_client_cnt;
+ uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
+} hnd_ext_trap_wlc_mem_err_v2_t;
+
+#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V3 3
+
+typedef struct hnd_ext_trap_wlc_mem_err_v3 {
+ uint8 version;
+ uint8 instance;
+ uint8 stas_associated;
+ uint8 aps_associated;
+ uint8 soft_ap_client_cnt;
+ uint8 peer_cnt;
+ uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
+} hnd_ext_trap_wlc_mem_err_v3_t;
+
+typedef struct hnd_ext_trap_pcie_mem_err {
+ uint16 d2h_queue_len;
+ uint16 d2h_req_queue_len;
+} hnd_ext_trap_pcie_mem_err_t;
+
+#define MAX_DMAFIFO_ENTRIES_V1 1
+#define MAX_DMAFIFO_DESC_ENTRIES_V1 2
+#define HND_EXT_TRAP_AXIERROR_SIGNATURE 0xbabebabe
+#define HND_EXT_TRAP_AXIERROR_VERSION_1 1
+
+/* Structure to collect debug info of descriptor entry for dma channel on encountering AXI Error */
+/* Below three structures are dependant, any change will bump version of all the three */
+
+typedef struct hnd_ext_trap_desc_entry_v1 {
+ uint32 ctrl1; /* descriptor entry at din < misc control bits > */
+ uint32 ctrl2; /* descriptor entry at din <buffer count and address extension> */
+ uint32 addrlo; /* descriptor entry at din <address of data buffer, bits 31:0> */
+ uint32 addrhi; /* descriptor entry at din <address of data buffer, bits 63:32> */
+} dma_dentry_v1_t;
+
+/* Structure to collect debug info about a dma channel on encountering AXI Error */
+typedef struct hnd_ext_trap_dma_fifo_v1 {
+ uint8 valid; /* no of valid desc entries filled, non zero = fifo entry valid */
+ uint8 direction; /* TX=1, RX=2, currently only using TX */
+ uint16 index; /* Index of the DMA channel in system */
+ uint32 dpa; /* Expected Address of Descriptor table from software state */
+ uint32 desc_lo; /* Low Address of Descriptor table programmed in DMA register */
+ uint32 desc_hi; /* High Address of Descriptor table programmed in DMA register */
+ uint16 din; /* rxin / txin */
+ uint16 dout; /* rxout / txout */
+ dma_dentry_v1_t dentry[MAX_DMAFIFO_DESC_ENTRIES_V1]; /* Descriptor Entires */
+} dma_fifo_v1_t;
+
+typedef struct hnd_ext_trap_axi_error_v1 {
+ uint8 version; /* version = 1 */
+ uint8 dma_fifo_valid_count; /* Number of valid dma_fifo entries */
+ uint16 length; /* length of whole structure */
+ uint32 signature; /* indicate that its filled with AXI Error data */
+ uint32 axi_errorlog_status; /* errlog_status from slave wrapper */
+ uint32 axi_errorlog_core; /* errlog_core from slave wrapper */
+ uint32 axi_errorlog_lo; /* errlog_lo from slave wrapper */
+ uint32 axi_errorlog_hi; /* errlog_hi from slave wrapper */
+ uint32 axi_errorlog_id; /* errlog_id from slave wrapper */
+ dma_fifo_v1_t dma_fifo[MAX_DMAFIFO_ENTRIES_V1];
+} hnd_ext_trap_axi_error_v1_t;
+
+#define HND_EXT_TRAP_MACSUSP_INFO_VER 1
+typedef struct hnd_ext_trap_macsusp {
+ uint16 xtag;
+ uint8 version; /* version of the information following this */
+ uint8 trap_reason;
+ uint32 i32_maccontrol;
+ uint32 i32_maccommand;
+ uint32 i32_macintstatus;
+ uint32 i32_phydebug[4];
+ uint32 i32_psmdebug[8];
+ uint16 i16_0x41a; /* Rxe Status 1 */
+ uint16 i16_0x41c; /* Rxe Status 2 */
+ uint16 i16_0x490; /* PSM BRC */
+ uint16 i16_0x50e; /* TXE Status */
+ uint16 i16_0x55e; /* TXE_xmtdmabusy */
+ uint16 i16_0x566; /* TXE_XMTfifosuspflush */
+ uint16 i16_0x690; /* IFS Stat */
+ uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
+ uint16 i16_0x694; /* IFS_TX_DUR */
+ uint16 i16_0x7c0; /* WEP CTL */
+ uint16 i16_0x838; /* TXE_AQM fifo Ready */
+ uint16 i16_0x880; /* MHP_status */
+ uint16 shm_prewds_cnt;
+ uint16 shm_ucode_dbgst;
+} hnd_ext_trap_macsusp_t;
+
+#define HND_EXT_TRAP_MACENAB_INFO_VER 1
+typedef struct hnd_ext_trap_macenab {
+ uint16 xtag;
+ uint8 version; /* version of the information following this */
+ uint8 trap_reason;
+ uint32 i32_maccontrol;
+ uint32 i32_maccommand;
+ uint32 i32_macintstatus;
+ uint32 i32_psmdebug[8];
+ uint32 i32_clk_ctl_st;
+ uint32 i32_powerctl;
+ uint16 i16_0x1a8; /* gated clock en */
+ uint16 i16_0x480; /* PSM_SLP_TMR */
+ uint16 i16_0x490; /* PSM BRC */
+ uint16 i16_0x600; /* TSF CTL */
+ uint16 i16_0x690; /* IFS Stat */
+ uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
+ uint16 i16_0x6a0; /* SLow_CTL */
+ uint16 i16_0x6a6; /* SLow_FRAC */
+ uint16 i16_0x6a8; /* fast power up delay */
+ uint16 i16_0x6aa; /* SLow_PER */
+ uint16 shm_ucode_dbgst;
+ uint16 PAD;
+} hnd_ext_trap_macenab_t;
+
+#define HND_EXT_TRAP_PHY_INFO_VER_1 (1)
+typedef struct hnd_ext_trap_phydbg {
+ uint16 err;
+ uint16 RxFeStatus;
+ uint16 TxFIFOStatus0;
+ uint16 TxFIFOStatus1;
+ uint16 RfseqMode;
+ uint16 RfseqStatus0;
+ uint16 RfseqStatus1;
+ uint16 RfseqStatus_Ocl;
+ uint16 RfseqStatus_Ocl1;
+ uint16 OCLControl1;
+ uint16 TxError;
+ uint16 bphyTxError;
+ uint16 TxCCKError;
+ uint16 TxCtrlWrd0;
+ uint16 TxCtrlWrd1;
+ uint16 TxCtrlWrd2;
+ uint16 TxLsig0;
+ uint16 TxLsig1;
+ uint16 TxVhtSigA10;
+ uint16 TxVhtSigA11;
+ uint16 TxVhtSigA20;
+ uint16 TxVhtSigA21;
+ uint16 txPktLength;
+ uint16 txPsdulengthCtr;
+ uint16 gpioClkControl;
+ uint16 gpioSel;
+ uint16 pktprocdebug;
+ uint16 PAD;
+ uint32 gpioOut[3];
+} hnd_ext_trap_phydbg_t;
+
+/* unique IDs for separate cores in SI */
+#define REGDUMP_MASK_MAC0 BCM_BIT(1)
+#define REGDUMP_MASK_ARM BCM_BIT(2)
+#define REGDUMP_MASK_PCIE BCM_BIT(3)
+#define REGDUMP_MASK_MAC1 BCM_BIT(4)
+#define REGDUMP_MASK_PMU BCM_BIT(5)
+
+typedef struct {
+ uint16 reg_offset;
+ uint16 core_mask;
+} reg_dump_config_t;
+
+#define HND_EXT_TRAP_PHY_INFO_VER 2
+typedef struct hnd_ext_trap_phydbg_v2 {
+ uint8 version;
+ uint8 len;
+ uint16 err;
+ uint16 RxFeStatus;
+ uint16 TxFIFOStatus0;
+ uint16 TxFIFOStatus1;
+ uint16 RfseqMode;
+ uint16 RfseqStatus0;
+ uint16 RfseqStatus1;
+ uint16 RfseqStatus_Ocl;
+ uint16 RfseqStatus_Ocl1;
+ uint16 OCLControl1;
+ uint16 TxError;
+ uint16 bphyTxError;
+ uint16 TxCCKError;
+ uint16 TxCtrlWrd0;
+ uint16 TxCtrlWrd1;
+ uint16 TxCtrlWrd2;
+ uint16 TxLsig0;
+ uint16 TxLsig1;
+ uint16 TxVhtSigA10;
+ uint16 TxVhtSigA11;
+ uint16 TxVhtSigA20;
+ uint16 TxVhtSigA21;
+ uint16 txPktLength;
+ uint16 txPsdulengthCtr;
+ uint16 gpioClkControl;
+ uint16 gpioSel;
+ uint16 pktprocdebug;
+ uint32 gpioOut[3];
+ uint32 additional_regs[1];
+} hnd_ext_trap_phydbg_v2_t;
+
+#define HND_EXT_TRAP_PHY_INFO_VER_3 (3)
+typedef struct hnd_ext_trap_phydbg_v3 {
+ uint8 version;
+ uint8 len;
+ uint16 err;
+ uint16 RxFeStatus;
+ uint16 TxFIFOStatus0;
+ uint16 TxFIFOStatus1;
+ uint16 RfseqMode;
+ uint16 RfseqStatus0;
+ uint16 RfseqStatus1;
+ uint16 RfseqStatus_Ocl;
+ uint16 RfseqStatus_Ocl1;
+ uint16 OCLControl1;
+ uint16 TxError;
+ uint16 bphyTxError;
+ uint16 TxCCKError;
+ uint16 TxCtrlWrd0;
+ uint16 TxCtrlWrd1;
+ uint16 TxCtrlWrd2;
+ uint16 TxLsig0;
+ uint16 TxLsig1;
+ uint16 TxVhtSigA10;
+ uint16 TxVhtSigA11;
+ uint16 TxVhtSigA20;
+ uint16 TxVhtSigA21;
+ uint16 txPktLength;
+ uint16 txPsdulengthCtr;
+ uint16 gpioClkControl;
+ uint16 gpioSel;
+ uint16 pktprocdebug;
+ uint32 gpioOut[3];
+ uint16 HESigURateFlagStatus;
+ uint16 HESigUsRateFlagStatus;
+ uint32 additional_regs[1];
+} hnd_ext_trap_phydbg_v3_t;
+
+/* Phy TxErr Dump Structure */
+#define HND_EXT_TRAP_PHYTXERR_INFO_VER 1
+#define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2
+typedef struct hnd_ext_trap_macphytxerr {
+ uint8 version; /* version of the information following this */
+ uint8 trap_reason;
+ uint16 i16_0x63E; /* tsf_tmr_rx_ts */
+ uint16 i16_0x640; /* tsf_tmr_tx_ts */
+ uint16 i16_0x642; /* tsf_tmr_rx_end_ts */
+ uint16 i16_0x846; /* TDC_FrmLen0 */
+ uint16 i16_0x848; /* TDC_FrmLen1 */
+ uint16 i16_0x84a; /* TDC_Txtime */
+ uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */
+ uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */
+ uint16 i16_0x856; /* TDC_VhtPsduLen0 */
+ uint16 i16_0x858; /* TDC_VhtPsduLen1 */
+ uint16 i16_0x490; /* psm_brc */
+ uint16 i16_0x4d8; /* psm_brc_1 */
+ uint16 shm_txerr_reason;
+ uint16 shm_pctl0;
+ uint16 shm_pctl1;
+ uint16 shm_pctl2;
+ uint16 shm_lsig0;
+ uint16 shm_lsig1;
+ uint16 shm_plcp0;
+ uint16 shm_plcp1;
+ uint16 shm_plcp2;
+ uint16 shm_vht_sigb0;
+ uint16 shm_vht_sigb1;
+ uint16 shm_tx_tst;
+ uint16 shm_txerr_tm;
+ uint16 shm_curchannel;
+ uint16 shm_crx_rxtsf_pos;
+ uint16 shm_lasttx_tsf;
+ uint16 shm_s_rxtsftmrval;
+ uint16 i16_0x29; /* Phy indirect address */
+ uint16 i16_0x2a; /* Phy indirect address */
+} hnd_ext_trap_macphytxerr_t;
+
+typedef struct hnd_ext_trap_macphytxerr_v2 {
+ uint8 version; /* version of the information following this */
+ uint8 trap_reason;
+ uint16 i16_0x63E; /* tsf_tmr_rx_ts */
+ uint16 i16_0x640; /* tsf_tmr_tx_ts */
+ uint16 i16_0x642; /* tsf_tmr_rx_end_ts */
+ uint16 i16_0x846; /* TDC_FrmLen0 */
+ uint16 i16_0x848; /* TDC_FrmLen1 */
+ uint16 i16_0x84a; /* TDC_Txtime */
+ uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */
+ uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */
+ uint16 i16_0x856; /* TDC_VhtPsduLen0 */
+ uint16 i16_0x858; /* TDC_VhtPsduLen1 */
+ uint16 i16_0x490; /* psm_brc */
+ uint16 i16_0x4d8; /* psm_brc_1 */
+ uint16 shm_txerr_reason;
+ uint16 shm_pctl0;
+ uint16 shm_pctl1;
+ uint16 shm_pctl2;
+ uint16 shm_lsig0;
+ uint16 shm_lsig1;
+ uint16 shm_plcp0;
+ uint16 shm_plcp1;
+ uint16 shm_plcp2;
+ uint16 shm_vht_sigb0;
+ uint16 shm_vht_sigb1;
+ uint16 shm_tx_tst;
+ uint16 shm_txerr_tm;
+ uint16 shm_curchannel;
+ uint16 shm_crx_rxtsf_pos;
+ uint16 shm_lasttx_tsf;
+ uint16 shm_s_rxtsftmrval;
+ uint16 i16_0x29; /* Phy indirect address */
+ uint16 i16_0x2a; /* Phy indirect address */
+ uint8 phyerr_bmac_cnt; /* number of times bmac raised phy tx err */
+ uint8 phyerr_bmac_rsn; /* bmac reason for phy tx error */
+ uint16 pad;
+ uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */
+} hnd_ext_trap_macphytxerr_v2_t;
+
+#define HND_EXT_TRAP_PCIE_ERR_ATTN_VER_1 (1u)
+#define MAX_AER_HDR_LOG_REGS (4u)
+typedef struct hnd_ext_trap_pcie_err_attn_v1 {
+ uint8 version;
+ uint8 pad[3];
+ uint32 err_hdr_logreg1;
+ uint32 err_hdr_logreg2;
+ uint32 err_hdr_logreg3;
+ uint32 err_hdr_logreg4;
+ uint32 err_code_logreg;
+ uint32 err_type;
+ uint32 err_code_state;
+ uint32 last_err_attn_ts;
+ uint32 cfg_tlp_hdr[MAX_AER_HDR_LOG_REGS];
+} hnd_ext_trap_pcie_err_attn_v1_t;
+
+#define MAX_EVENTLOG_BUFFERS 48
+typedef struct eventlog_trapdata_info {
+ uint32 num_elements;
+ uint32 seq_num;
+ uint32 log_arr_addr;
+} eventlog_trapdata_info_t;
+
+typedef struct eventlog_trap_buf_info {
+ uint32 len;
+ uint32 buf_addr;
+} eventlog_trap_buf_info_t;
+
+#define HND_MEM_HC_FB_MEM_VER_1 (1u)
+typedef struct hnd_ext_trap_fb_mem_err {
+ uint16 version;
+ uint16 reserved;
+ uint32 flip_bit_err_time;
+} hnd_ext_trap_fb_mem_err_t;
+
+#if defined(ETD) && !defined(WLETD)
+#define ETD_SW_FLAG_MEM 0x00000001
+
+int etd_init(osl_t *osh);
+int etd_register_trap_ext_callback(void *cb, void *arg);
+int (etd_register_trap_ext_callback_late)(void *cb, void *arg);
+uint32 *etd_get_trap_ext_data(void);
+uint32 etd_get_trap_ext_swflags(void);
+void etd_set_trap_ext_swflag(uint32 flag);
+void etd_notify_trap_ext_callback(trap_t *tr);
+reg_dump_config_t *etd_get_reg_dump_config_tbl(void);
+uint etd_get_reg_dump_config_len(void);
+
+extern bool _etd_enab;
+
+#if defined(ROM_ENAB_RUNTIME_CHECK)
+ #define ETD_ENAB(pub) (_etd_enab)
+#elif defined(ETD_DISABLED)
+ #define ETD_ENAB(pub) (0)
+#else
+ #define ETD_ENAB(pub) (1)
+#endif
+
+#else
+#define ETD_ENAB(pub) (0)
+#endif /* WLETD */
+
+#endif /* !LANGUAGE_ASSEMBLY */
+
+#endif /* _ETD_H_ */
diff --git a/bcmdhd.101.10.361.x/include/ethernet.h b/bcmdhd.101.10.361.x/include/ethernet.h
new file mode 100755
index 0000000..f378dd2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/ethernet.h
@@ -0,0 +1,252 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
+#define ETHER_ADDR_LEN 6
+
+/*
+ * The number of bytes in the type field.
+ */
+#define ETHER_TYPE_LEN 2
+
+/*
+ * The number of bytes in the trailing CRC field.
+ */
+#define ETHER_CRC_LEN 4
+
+/*
+ * The length of the combined header.
+ */
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+/*
+ * The minimum packet length.
+ */
+#define ETHER_MIN_LEN 64
+
+/*
+ * The minimum packet user data length.
+ */
+#define ETHER_MIN_DATA 46
+
+/*
+ * The maximum packet length.
+ */
+#define ETHER_MAX_LEN 1518
+
+/*
+ * The maximum packet user data length.
+ */
+#define ETHER_MAX_DATA 1500
+
+/* ether types */
+#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */
+#define ETHER_TYPE_IP 0x0800 /* IP */
+#define ETHER_TYPE_ARP 0x0806 /* ARP */
+#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */
+#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */
+#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */
+#define ETHER_TYPE_802_1X 0x888e /* 802.1x */
+#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */
+#define ETHER_TYPE_WAI 0x88b4 /* WAI */
+#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */
+#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */
+#define ETHER_TYPE_1905_1 0x893a /* IEEE 1905.1 MCDU */
+
+#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */
+
+#define ETHER_TYPE_IAPP_L2_UPDATE 0x6 /* IAPP L2 update frame */
+
+/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */
+#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */
+
+/* ether header */
+#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */
+#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */
+#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */
+
+/*
+ * A macro to validate a length with
+ */
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \
+ ((uint8 *)ea)[0] = 0x01; \
+ ((uint8 *)ea)[1] = 0x00; \
+ ((uint8 *)ea)[2] = 0x5e; \
+ ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \
+ ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \
+ ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \
+}
+
+#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+BWL_PRE_PACKED_STRUCT struct ether_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+BWL_PRE_PACKED_STRUCT struct ether_addr {
+ uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif /* __INCif_etherh */
+#ifdef __INCif_etherh
+#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */
+
+/*
+ * Takes a pointer, set, test, clear, toggle locally admininistered
+ * address bit in the 48-bit Ethernet address.
+ */
+#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+/* Takes a pointer, marks unicast address bit in the MAC address */
+#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+/*
+ * Takes a pointer, returns true if a 48-bit multicast address
+ * (including broadcast, since it is all ones)
+ */
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
+#if defined(DONGLEBUILD) && defined(__ARM_ARCH_7A__) && !defined(BCMFUZZ)
+#define eacmp(a, b) (((*(const uint32 *)(a)) ^ (*(const uint32 *)(b))) || \
+ ((*(const uint16 *)(((const uint8 *)(a)) + 4)) ^ \
+ (*(const uint16 *)(((const uint8 *)(b)) + 4))))
+
+#define ehcmp(a, b) ((((const uint32 *)(a))[0] ^ ((const uint32 *)(b))[0]) || \
+ (((const uint32 *)(a))[1] ^ ((const uint32 *)(b))[1]) || \
+ (((const uint32 *)(a))[2] ^ ((const uint32 *)(b))[2]) || \
+ ((*(const uint16 *)(((const uint32 *)(a)) + 3)) ^ \
+ (*(const uint16 *)(((const uint32 *)(b)) + 3))))
+#else
+#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
+ (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
+ (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]))
+
+#define ehcmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
+ (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
+ (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]) | \
+ (((const uint16 *)(a))[3] ^ ((const uint16 *)(b))[3]) | \
+ (((const uint16 *)(a))[4] ^ ((const uint16 *)(b))[4]) | \
+ (((const uint16 *)(a))[5] ^ ((const uint16 *)(b))[5]) | \
+ (((const uint16 *)(a))[6] ^ ((const uint16 *)(b))[6]))
+#endif /* DONGLEBUILD && __ARM_ARCH_7A__ */
+
+#define ether_cmp(a, b) eacmp(a, b)
+
+/* copy an ethernet address - assumes the pointers can be referenced as shorts */
+#if defined(DONGLEBUILD) && defined(__ARM_ARCH_7A__) && !defined(BCMFUZZ)
+#define eacopy(s, d) \
+do { \
+ (*(uint32 *)(d)) = (*(const uint32 *)(s)); \
+ (*(uint16 *)(((uint8 *)(d)) + 4)) = (*(const uint16 *)(((const uint8 *)(s)) + 4)); \
+} while (0)
+#else
+#define eacopy(s, d) \
+do { \
+ ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
+ ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \
+ ((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \
+} while (0)
+#endif /* DONGLEBUILD && __ARM_ARCH_7A__ */
+
+#define ether_copy(s, d) eacopy(s, d)
+
+/* Copy an ethernet address in reverse order */
+#define ether_rcopy(s, d) \
+do { \
+ ((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \
+ ((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \
+ ((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \
+} while (0)
+
+/* Copy 14B ethernet header: 32bit aligned source and destination. */
+#define ehcopy32(s, d) \
+do { \
+ ((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \
+ ((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \
+ ((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \
+ ((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \
+} while (0)
+
+/* Dongles use bcmutils functions instead of macros.
+ * Possibly slower but saves over 800 bytes off THUMB dongle image.
+ */
+
+extern const struct ether_addr ether_bcast;
+extern const struct ether_addr ether_null;
+extern const struct ether_addr ether_ipv6_mcast;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+#define ETHER_ISBCAST(ea) ether_isbcast(ea)
+
+#if defined(__ARM_ARCH_7A__) && !defined(BCMFUZZ)
+#define ETHER_ISNULLADDR(ea) (((*(const uint32 *)(ea)) | \
+ (*(const uint16 *)(((const uint8 *)(ea)) + 4))) == 0)
+#else
+#define ETHER_ISNULLADDR(ea) ether_isnulladdr(ea)
+#endif /* __ARM_ARCH_7A__ */
+
+#define ETHER_ISNULLDEST(da) ((((const uint16 *)(da))[0] | \
+ ((const uint16 *)(da))[1] | \
+ ((const uint16 *)(da))[2]) == 0)
+#define ETHER_ISNULLSRC(sa) ETHER_ISNULLDEST(sa)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+ struct ether_header t; \
+ t = *(struct ether_header *)(s); \
+ *(struct ether_header *)(d) = t; \
+} while (0)
+
+#define ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NET_ETHERNET_H_ */
diff --git a/bcmdhd.101.10.361.x/include/event_log.h b/bcmdhd.101.10.361.x/include/event_log.h
new file mode 100755
index 0000000..8ede661
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/event_log.h
@@ -0,0 +1,666 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _EVENT_LOG_H_
+#define _EVENT_LOG_H_
+
+#include <typedefs.h>
+#include <event_log_set.h>
+#include <event_log_tag.h>
+#include <event_log_payload.h>
+
+/* logstrs header */
+#define LOGSTRS_MAGIC 0x4C4F4753
+#define LOGSTRS_VERSION 0x1
+
+/* max log size */
+#define EVENT_LOG_MAX_SIZE (64u * 1024u)
+
+/* We make sure that the block size will fit in a single packet
+ * (allowing for a bit of overhead on each packet
+ */
+#if defined(BCMPCIEDEV)
+#define EVENT_LOG_MAX_BLOCK_SIZE 1648
+#else
+#define EVENT_LOG_MAX_BLOCK_SIZE 1400
+#endif
+
+#define EVENT_LOG_BLOCK_SIZE_1K 0x400u
+#define EVENT_LOG_WL_BLOCK_SIZE 0x200
+#define EVENT_LOG_PSM_BLOCK_SIZE 0x200
+#define EVENT_LOG_MEM_API_BLOCK_SIZE 0x200
+#define EVENT_LOG_BUS_BLOCK_SIZE 0x200
+#define EVENT_LOG_ERROR_BLOCK_SIZE 0x400
+#define EVENT_LOG_MSCH_BLOCK_SIZE 0x400
+#define EVENT_LOG_WBUS_BLOCK_SIZE 0x100
+#define EVENT_LOG_PRSV_PERIODIC_BLOCK_SIZE (0x200u)
+
+#define EVENT_LOG_WL_BUF_SIZE (EVENT_LOG_WL_BLOCK_SIZE * 3u)
+
+#define EVENT_LOG_TOF_INLINE_BLOCK_SIZE 1300u
+#define EVENT_LOG_TOF_INLINE_BUF_SIZE (EVENT_LOG_TOF_INLINE_BLOCK_SIZE * 3u)
+
+#define EVENT_LOG_PRSRV_BUF_SIZE (EVENT_LOG_MAX_BLOCK_SIZE * 2)
+#define EVENT_LOG_BUS_PRSRV_BUF_SIZE (EVENT_LOG_BUS_BLOCK_SIZE * 2)
+#define EVENT_LOG_WBUS_PRSRV_BUF_SIZE (EVENT_LOG_WBUS_BLOCK_SIZE * 2)
+
+#define EVENT_LOG_BLOCK_SIZE_PRSRV_CHATTY (EVENT_LOG_MAX_BLOCK_SIZE * 1)
+#define EVENT_LOG_BLOCK_SIZE_BUS_PRSRV_CHATTY (EVENT_LOG_MAX_BLOCK_SIZE * 1)
+
+/* Maximum event log record payload size = 1016 bytes or 254 words. */
+#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE 254
+
+#define EVENT_LOG_EXT_HDR_IND (0x01)
+#define EVENT_LOG_EXT_HDR_BIN_DATA_IND (0x01 << 1)
+/* Format number to send binary data with extended event log header */
+#define EVENT_LOG_EXT_HDR_BIN_FMT_NUM (0x3FFE << 2)
+
+#define EVENT_LOGSET_ID_MASK 0x3F
+/* For event_log_get iovar, set values from 240 to 255 mean special commands for a group of sets */
+#define EVENT_LOG_GET_IOV_CMD_MASK (0xF0u)
+#define EVENT_LOG_GET_IOV_CMD_ID_MASK (0xFu)
+#define EVENT_LOG_GET_IOV_CMD_ID_FORCE_FLUSH_PRSRV (0xEu) /* 240 + 14 = 254 */
+#define EVENT_LOG_GET_IOV_CMD_ID_FORCE_FLUSH_ALL (0xFu) /* 240 + 15 = 255 */
+
+/*
+ * There are multiple levels of objects define here:
+ * event_log_set - a set of buffers
+ * event log groups - every event log call is part of just one. All
+ * event log calls in a group are handled the
+ * same way. Each event log group is associated
+ * with an event log set or is off.
+ */
+
+#ifndef __ASSEMBLER__
+
+/* On the external system where the dumper is we need to make sure
+ * that these types are the same size as they are on the ARM the
+ * produced them
+ */
+#ifdef EVENT_LOG_DUMPER
+#define _EL_BLOCK_PTR uint32
+#define _EL_TYPE_PTR uint32
+#define _EL_SET_PTR uint32
+#define _EL_TOP_PTR uint32
+#else
+#define _EL_BLOCK_PTR struct event_log_block *
+#define _EL_TYPE_PTR uint32 *
+#define _EL_SET_PTR struct event_log_set **
+#define _EL_TOP_PTR struct event_log_top *
+#endif /* EVENT_LOG_DUMPER */
+
+/* Event log sets (a logical circurlar buffer) consist of one or more
+ * event_log_blocks. The blocks themselves form a logical circular
+ * list. The log entries are placed in each event_log_block until it
+ * is full. Logging continues with the next event_log_block in the
+ * event_set until the last event_log_block is reached and then
+ * logging starts over with the first event_log_block in the
+ * event_set.
+ */
+typedef struct event_log_block {
+ _EL_BLOCK_PTR next_block;
+ _EL_BLOCK_PTR prev_block;
+ _EL_TYPE_PTR end_ptr;
+
+ /* Start of packet sent for log tracing */
+ uint16 pktlen; /* Size of rest of block */
+ uint16 count; /* Logtrace counter */
+ uint32 extra_hdr_info; /* LSB: 6 bits set id. MSB 24 bits reserved */
+ uint32 event_logs; /* Pointer to BEGINNING of event logs */
+ /* Event logs go here. Do not put extra fields below. */
+} event_log_block_t;
+
+/* Relative offset of extra_hdr_info field frpm pktlen field in log block */
+#define EVENT_LOG_BUF_EXTRA_HDR_INFO_REL_PKTLEN_OFFSET \
+ (OFFSETOF(event_log_block_t, extra_hdr_info) - OFFSETOF(event_log_block_t, pktlen))
+
+#define EVENT_LOG_SETID_MASK (0x3Fu)
+
+#define EVENT_LOG_BLOCK_HDRLEN (sizeof(((event_log_block_t *) 0)->pktlen) \
+ + sizeof(((event_log_block_t *) 0)->count) \
+ + sizeof(((event_log_block_t *) 0)->extra_hdr_info))
+#define EVENT_LOG_BLOCK_LEN (EVENT_LOG_BLOCK_HDRLEN + sizeof(event_log_hdr_t))
+
+#define EVENT_LOG_PRESERVE_BLOCK (1 << 0)
+#define EVENT_LOG_BLOCK_FLAG_MASK 0xff000000u
+#define EVENT_LOG_BLOCK_FLAG_SHIFT 24u
+
+#define EVENT_LOG_BLOCK_GET_PREV_BLOCK(block) ((_EL_BLOCK_PTR)(((uint32)((block)->prev_block)) & \
+ ~EVENT_LOG_BLOCK_FLAG_MASK))
+#define EVENT_LOG_BLOCK_SET_PREV_BLOCK(block, prev) ((block)->prev_block = \
+ ((_EL_BLOCK_PTR)((((uint32)(block)->prev_block) & EVENT_LOG_BLOCK_FLAG_MASK) | \
+ (((uint32)(prev)) & ~EVENT_LOG_BLOCK_FLAG_MASK))))
+#define EVENT_LOG_BLOCK_GET_FLAG(block) ((((uint32)(block)->prev_block) & \
+ EVENT_LOG_BLOCK_FLAG_MASK) >> EVENT_LOG_BLOCK_FLAG_SHIFT)
+#define EVENT_LOG_BLOCK_SET_FLAG(block, flag) ((block)->prev_block = \
+ (_EL_BLOCK_PTR)(((uint32)EVENT_LOG_BLOCK_GET_PREV_BLOCK(block)) | flag))
+#define EVENT_LOG_BLOCK_OR_FLAG(block, flag) EVENT_LOG_BLOCK_SET_FLAG(block, \
+ (EVENT_LOG_BLOCK_GET_FLAG(block) | flag) << EVENT_LOG_BLOCK_FLAG_SHIFT)
+
+typedef enum {
+ SET_DESTINATION_INVALID = -1,
+ SET_DESTINATION_HOST = 0, /* Eventlog buffer is sent out to host once filled. */
+ SET_DESTINATION_NONE = 1, /* The buffer is not sent out, and it will be overwritten
+ * with new messages.
+ */
+ SET_DESTINATION_FORCE_FLUSH_TO_HOST = 2, /* Buffers are sent to host once and then the
+ * value is reset back to SET_DESTINATION_NONE.
+ */
+ SET_DESTINATION_FLUSH_ON_WATERMARK = 3, /* Buffers are sent to host when the watermark is
+ * reached, defined by the feature /chip
+ */
+ SET_DESTINATION_MAX
+} event_log_set_destination_t;
+
+/* sub destination for routing at the host */
+typedef enum {
+ SET_SUB_DESTINATION_0 = 0,
+ SET_SUB_DESTINATION_1 = 1,
+ SET_SUB_DESTINATION_2 = 2,
+ SET_SUB_DESTINATION_3 = 3,
+ SET_SUB_DESTINATION_DEFAULT = SET_SUB_DESTINATION_0
+} event_log_set_sub_destination_t;
+
+/* There can be multiple event_sets with each logging a set of
+ * associated events (i.e, "fast" and "slow" events).
+ */
+typedef struct event_log_set {
+ _EL_BLOCK_PTR first_block; /* Pointer to first event_log block */
+ _EL_BLOCK_PTR last_block; /* Pointer to last event_log block */
+ _EL_BLOCK_PTR logtrace_block; /* next block traced */
+ _EL_BLOCK_PTR cur_block; /* Pointer to current event_log block */
+ _EL_TYPE_PTR cur_ptr; /* Current event_log pointer */
+ uint32 blockcount; /* Number of blocks */
+ uint16 logtrace_count; /* Last count for logtrace */
+ uint16 blockfill_count; /* Fill count for logtrace */
+ uint32 timestamp; /* Last timestamp event */
+ uint32 cyclecount; /* Cycles at last timestamp event */
+ event_log_set_destination_t destination;
+ uint16 size; /* same size for all buffers in one set */
+ uint16 flags;
+ uint16 num_preserve_blocks;
+ event_log_set_sub_destination_t sub_destination;
+ uint16 water_mark; /* not used yet: threshold to flush host in percent */
+ uint32 period; /* period to flush host in ms */
+ uint32 last_rpt_ts; /* last time to flush in ms */
+} event_log_set_t;
+
+/* Definition of flags in set */
+#define EVENT_LOG_SET_SHRINK_ACTIVE (1 << 0)
+#define EVENT_LOG_SET_CONFIG_PARTIAL_BLK_SEND (0x1 << 1)
+#define EVENT_LOG_SET_CHECK_LOG_RATE (1 << 2)
+#define EVENT_LOG_SET_PERIODIC (1 << 3)
+#define EVENT_LOG_SET_D3PRSV (1 << 4)
+
+/* Top data structure for access to everything else */
+typedef struct event_log_top {
+ uint32 magic;
+#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */
+ uint32 version;
+#define EVENT_LOG_VERSION 1
+ uint32 num_sets;
+ uint32 logstrs_size; /* Size of lognums + logstrs area */
+ uint32 timestamp; /* Last timestamp event */
+ uint32 cyclecount; /* Cycles at last timestamp event */
+ _EL_SET_PTR sets; /* Ptr to array of <num_sets> set ptrs */
+ uint16 log_count; /* Number of event logs from last flush */
+ uint16 rate_hc; /* Max number of prints per second */
+ uint32 hc_timestamp; /* Timestamp of last hc window starting */
+ bool cpu_freq_changed; /* Set to TRUE when CPU freq changed */
+ bool hostmem_access_enabled; /* Is host memory access enabled for log delivery */
+ bool event_trace_enabled; /* WLC_E_TRACE enabled/disabled */
+} event_log_top_t;
+
+/* structure of the trailing 3 words in logstrs.bin */
+typedef struct {
+ uint32 fw_id; /* FWID will be written by tool later */
+ uint32 flags; /* 0th bit indicates whether encrypted or not */
+ /* Keep version and magic last since "header" is appended to the end of logstrs file. */
+ uint32 version; /* Header version */
+ uint32 log_magic; /* MAGIC number for verification 'LOGS' */
+} logstr_trailer_t;
+
+/* Data structure of Keeping the Header from logstrs.bin */
+typedef struct {
+ uint32 logstrs_size; /* Size of the file */
+ uint32 rom_lognums_offset; /* Offset to the ROM lognum */
+ uint32 ram_lognums_offset; /* Offset to the RAM lognum */
+ uint32 rom_logstrs_offset; /* Offset to the ROM logstr */
+ uint32 ram_logstrs_offset; /* Offset to the RAM logstr */
+ uint32 fw_id; /* FWID will be written by tool later */
+ uint32 flags; /* 0th bit indicates whether encrypted or not */
+ /* Keep version and magic last since "header" is appended to the end of logstrs file. */
+ uint32 version; /* Header version */
+ uint32 log_magic; /* MAGIC number for verification 'LOGS' */
+} logstr_header_t;
+
+/* Data structure of Keeping the Header from logstrs.bin */
+typedef struct {
+ uint32 logstrs_size; /* Size of the file */
+ uint32 rom_lognums_offset; /* Offset to the ROM lognum */
+ uint32 ram_lognums_offset; /* Offset to the RAM lognum */
+ uint32 rom_logstrs_offset; /* Offset to the ROM logstr */
+ uint32 ram_logstrs_offset; /* Offset to the RAM logstr */
+ /* Keep version and magic last since "header" is appended to the end of logstrs file. */
+ uint32 version; /* Header version */
+ uint32 log_magic; /* MAGIC number for verification 'LOGS' */
+} logstr_header_v1_t;
+
+/* Event log configuration table */
+typedef struct evt_log_tag_entry {
+ uint16 tag; /* Tag value. */
+ uint8 set; /* Set number. */
+ uint8 refcnt; /* Ref_count if sdc is used */
+} evt_log_tag_entry_t;
+
+#ifdef BCMDRIVER
+/* !!! The following section is for kernel mode code only !!! */
+#include <osl_decl.h>
+
+extern bool d3_preserve_enab;
+#if defined(ROM_ENAB_RUNTIME_CHECK)
+ #define D3_PRESERVE_ENAB() (d3_preserve_enab)
+#elif defined(EVENTLOG_D3_PRESERVE_DISABLED)
+ #define D3_PRESERVE_ENAB() (0)
+#else
+ #define D3_PRESERVE_ENAB() (1)
+#endif
+
+#if defined(EVENTLOG_PRSV_PERIODIC)
+extern bool prsv_periodic_enab;
+#if defined(ROM_ENAB_RUNTIME_CHECK)
+ #define PRSV_PRD_ENAB() (prsv_periodic_enab)
+#elif defined(EVENTLOG_PRSV_PERIODIC_DISABLED)
+ #define PRSV_PRD_ENAB() (0)
+#else
+ #define PRSV_PRD_ENAB() (1)
+#endif
+#endif /* EVENTLOG_PRSV_PERIODIC */
+
+/*
+ * Use the following macros for generating log events.
+ *
+ * The FAST versions check the enable of the tag before evaluating the arguments and calling the
+ * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments
+ * and call the event_log function unconditionally. The event_log function will then skip logging
+ * if this tag is disabled.
+ *
+ * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are
+ * two variants of these macros to help.
+ *
+ * First there are the CAST versions. The event_log function normally logs uint32 values or else
+ * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit
+ * any existing code.
+ *
+ * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments
+ * to be enclosed in parentheses. This allows us to make the following mapping of an existing
+ * msglevel macro:
+ * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args)
+ *
+ * The versions of the macros without FAST or COMPACT in their name are just synonyms for the
+ * COMPACT versions.
+ *
+ * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic
+ * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed.
+ * Use the FAST macro in performance sensitive paths. The key concept here is that you should be
+ * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely.
+ *
+ */
+
+#if !defined(EVENT_LOG_DUMPER) && !defined(DHD_EFI)
+
+#ifndef EVENT_LOG_COMPILE
+
+/* Null define if no tracing */
+#define EVENT_LOG(tag, fmt, ...)
+#define EVENT_LOG_FAST(tag, fmt, ...)
+#define EVENT_LOG_COMPACT(tag, fmt, ...)
+
+#define EVENT_LOG_CAST(tag, fmt, ...)
+#define EVENT_LOG_FAST_CAST(tag, fmt, ...)
+#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...)
+
+#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs)
+#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs)
+#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs)
+
+#define EVENT_LOG_IF_READY(tag, fmt, ...)
+#define EVENT_LOG_IS_ON(tag) 0
+#define EVENT_LOG_IS_LOG_ON(tag) 0
+
+#define EVENT_LOG_BUFFER(tag, buf, size)
+#define EVENT_LOG_PRSRV_FLUSH()
+#define EVENT_LOG_FORCE_FLUSH_ALL()
+#define EVENT_LOG_FORCE_FLUSH_PRSRV_LOG_ALL()
+
+#else /* EVENT_LOG_COMPILE */
+
+/* The first few _EVENT_LOGX() macros are special because they can be done more
+ * efficiently this way and they are the common case. Once there are too many
+ * parameters the code size starts to be an issue and a loop is better
+ * The trailing arguments to the _EVENT_LOGX() macros are the format string, 'fmt',
+ * followed by the variable parameters for the format. The format string is not
+ * needed in the event_logX() replacement text, so fmt is dropped in all cases.
+ */
+#define _EVENT_LOG0(tag, fmt_num, fmt) \
+ event_log0(tag, fmt_num)
+#define _EVENT_LOG1(tag, fmt_num, fmt, t1) \
+ event_log1(tag, fmt_num, t1)
+#define _EVENT_LOG2(tag, fmt_num, fmt, t1, t2) \
+ event_log2(tag, fmt_num, t1, t2)
+#define _EVENT_LOG3(tag, fmt_num, fmt, t1, t2, t3) \
+ event_log3(tag, fmt_num, t1, t2, t3)
+#define _EVENT_LOG4(tag, fmt_num, fmt, t1, t2, t3, t4) \
+ event_log4(tag, fmt_num, t1, t2, t3, t4)
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG5(tag, fmt_num, fmt, ...) event_logn(5, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG6(tag, fmt_num, fmt, ...) event_logn(6, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG7(tag, fmt_num, fmt, ...) event_logn(7, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG8(tag, fmt_num, fmt, ...) event_logn(8, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG9(tag, fmt_num, fmt, ...) event_logn(9, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGA(tag, fmt_num, fmt, ...) event_logn(10, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGB(tag, fmt_num, fmt, ...) event_logn(11, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGC(tag, fmt_num, fmt, ...) event_logn(12, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGD(tag, fmt_num, fmt, ...) event_logn(13, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGE(tag, fmt_num, fmt, ...) event_logn(14, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGF(tag, fmt_num, fmt, ...) event_logn(15, tag, fmt_num, __VA_ARGS__)
+
+/* Casting low level macros */
+#define _EVENT_LOG_CAST0(tag, fmt_num, fmt) \
+ event_log0(tag, fmt_num)
+#define _EVENT_LOG_CAST1(tag, fmt_num, fmt, t1) \
+ event_log1(tag, fmt_num, (uint32)(t1))
+#define _EVENT_LOG_CAST2(tag, fmt_num, fmt, t1, t2) \
+ event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2))
+#define _EVENT_LOG_CAST3(tag, fmt_num, fmt, t1, t2, t3) \
+ event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3))
+#define _EVENT_LOG_CAST4(tag, fmt_num, fmt, t1, t2, t3, t4) \
+ event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4))
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__)
+
+/* Hack to make the proper routine call when variadic macros get
+ * passed. Note the max of 15 arguments. More than that can't be
+ * handled by the event_log entries anyways so best to catch it at compile
+ * time
+ *
+ * Here is what happens with this macro: when _EVENT_LOG expands this macro,
+ * its __VA_ARGS__ argument is expanded. If __VA_ARGS__ contains only ONE
+ * argument, for example, then F maps to _1, E maps to _2, and so on, so that
+ * N maps to 0, and the macro expands to BASE ## N or BASE ## 0 which is
+ * EVENT_LOG0. If __VA_ARGS__ contains two arguments, then everything is
+ * shifted down by one, because the second argument in __VA_ARGS__ now maps
+ * to _1, so F maps to _2, E maps to _3, and so on, and 1 (instead of 0) maps
+ * to N, and this macro expands to become _EVENT_LOG1. This continues all
+ * the way up until __VA_ARGS__ has 15 arguments, in which case, stuff in
+ * __VA_ARGS__ maps to all of the values _1 through _F, which makes F (in'
+ * the _EVENT_LOG macro) map to N, and this macro then expands to EVENT_LOGF.
+ */
+
+#define _EVENT_LOG_VA_NUM_ARGS(BASE, _FMT, _1, _2, _3, _4, _5, _6, _7, _8, _9, \
+ _A, _B, _C, _D, _E, _F, N, ...) BASE ## N
+
+/* Take a variable number of args and replace with only the first */
+#define FIRST_ARG(a1, ...) a1
+
+/* base = _EVENT_LOG for no casting
+ * base = _EVENT_LOG_CAST for casting of fmt arguments to uint32.
+ * Only first 4 arguments are cast to uint32. event_logn() is called
+ * if more than 4 arguments are present. This function internally assumes
+ * all arguments are uint32
+ *
+ * The variable args in this call are the format string followed by the variable
+ * parameters for the format. E.g.
+ *
+ * __VA_ARGS__ = "answer: %d", 42
+ *
+ * This means __VA_ARGS__ always has one or more arguments. Guaranteeing a non-empty
+ * __VA_ARGS__ means the special use of " , ## __VA_ARGS__" is not required to deal
+ * with a dangling comma --- the comma will always be followed with at leaset the format
+ * string. The use of ## caused issues when the format args contained a function like
+ * macro that expanded to more than one arg. The ## prevented macro expansion, so the
+ * _EVENT_LOG_VA_NUM_ARGS() calculation of the number of args was incorrect.
+ * Without the ##, the __VA_ARGS__ are macro replaced, and the num args calculation is
+ * accurate.
+ *
+ * This macro is setup so that if __VA_ARGS__ is as short as possible, then the "0" will
+ * map to "N" in the _EVENT_LOG_VA_NUM_ARGS macro, and that macro then expands to become
+ * _EVENT_LOG0. As __VA_ARGS__ gets longer, then the item that gets mapped to "N" gets
+ * pushed further and further up, so that by the time __VA_ARGS__ has 15 additional
+ * arguments, then "F" maps to "N" in the _EVENT_LOG_VA_NUM_ARGS macro.
+ */
+#define _EVENT_LOG(base, tag, ...) \
+ static char logstr[] __attribute__ ((section(".logstrs"))) = FIRST_ARG(__VA_ARGS__); \
+ static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \
+ _EVENT_LOG_VA_NUM_ARGS(base, __VA_ARGS__, \
+ F, E, D, C, B, A, 9, 8, \
+ 7, 6, 5, 4, 3, 2, 1, 0) \
+ (tag, (int) &fmtnum, __VA_ARGS__)
+
+#define EVENT_LOG_FAST(tag, ...) \
+ do { \
+ if (event_log_tag_sets != NULL) { \
+ uint8 tag_flag = *(event_log_tag_sets + tag); \
+ if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \
+ _EVENT_LOG(_EVENT_LOG, tag, __VA_ARGS__); \
+ } \
+ } \
+ } while (0)
+
+#define EVENT_LOG_COMPACT(tag, ...) \
+ do { \
+ _EVENT_LOG(_EVENT_LOG, tag, __VA_ARGS__); \
+ } while (0)
+
+/* Event log macro with casting to uint32 of arguments */
+#define EVENT_LOG_FAST_CAST(tag, ...) \
+ do { \
+ if (event_log_tag_sets != NULL) { \
+ uint8 tag_flag = *(event_log_tag_sets + tag); \
+ if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \
+ _EVENT_LOG(_EVENT_LOG_CAST, tag, __VA_ARGS__); \
+ } \
+ } \
+ } while (0)
+
+#define EVENT_LOG_COMPACT_CAST(tag, ...) \
+ do { \
+ _EVENT_LOG(_EVENT_LOG_CAST, tag, __VA_ARGS__); \
+ } while (0)
+
+#define EVENT_LOG(tag, ...) EVENT_LOG_COMPACT(tag, __VA_ARGS__)
+
+#define EVENT_LOG_CAST(tag, ...) EVENT_LOG_COMPACT_CAST(tag, __VA_ARGS__)
+
+#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__
+#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args
+
+#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \
+ EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \
+ EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \
+ EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+/* Minimal event logging. Event log internally calls event_logx()
+ * log return address in caller.
+ * Note that the if(0){..} below is to avoid compiler warnings
+ * due to unused variables caused by this macro
+ */
+#define EVENT_LOG_RA(tag, args) \
+ do { \
+ if (0) { \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, args); \
+ } \
+ event_log_caller_return_address(tag); \
+ } while (0)
+
+#define EVENT_LOG_IF_READY(_tag, ...) \
+ do { \
+ if (event_log_is_ready()) { \
+ EVENT_LOG(_tag, __VA_ARGS__); \
+ } \
+ } \
+ while (0)
+
+#define EVENT_LOG_IS_ON(tag) (*(event_log_tag_sets + (tag)) & ~EVENT_LOG_TAG_FLAG_SET_MASK)
+#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG)
+
+#define EVENT_LOG_BUFFER(tag, buf, size) event_log_buffer(tag, buf, size)
+#define EVENT_DUMP event_log_buffer
+
+/* EVENT_LOG_PRSRV_FLUSH() will be deprecated. Use EVENT_LOG_FORCE_FLUSH_ALL instead */
+#define EVENT_LOG_PRSRV_FLUSH() event_log_force_flush_all()
+#define EVENT_LOG_FORCE_FLUSH_ALL() event_log_force_flush_all()
+
+#ifdef PRESERVE_LOG
+#define EVENT_LOG_FORCE_FLUSH_PRSRV_LOG_ALL() event_log_force_flush_preserve_all()
+#else
+#define EVENT_LOG_FORCE_FLUSH_PRSRV_LOG_ALL()
+#endif /* PRESERVE_LOG */
+
+extern uint8 *event_log_tag_sets;
+
+extern int event_log_init(osl_t *osh);
+extern int event_log_set_init(osl_t *osh, int set_num, int size);
+extern int event_log_set_expand(osl_t *osh, int set_num, int size);
+extern int event_log_set_shrink(osl_t *osh, int set_num, int size);
+
+extern int event_log_tag_start(int tag, int set_num, int flags);
+extern int event_log_tag_set_retrieve(int tag);
+extern int event_log_tag_flags_retrieve(int tag);
+extern int event_log_tag_stop(int tag);
+
+typedef void (*event_log_logtrace_trigger_fn_t)(void *ctx);
+void event_log_set_logtrace_trigger_fn(event_log_logtrace_trigger_fn_t fn, void *ctx);
+
+event_log_top_t *event_log_get_top(void);
+
+extern int event_log_get(int set_num, int buflen, void *buf);
+
+extern uint8 *event_log_next_logtrace(int set_num);
+extern uint32 event_log_logtrace_max_buf_count(int set_num);
+extern int event_log_set_type(int set_num, uint8 *type, int is_get);
+extern int event_log_flush_set(wl_el_set_flush_prsrv_t *flush, int is_set);
+
+extern void event_log0(int tag, int fmtNum);
+extern void event_log1(int tag, int fmtNum, uint32 t1);
+extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2);
+extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3);
+extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4);
+extern void event_logn(int num_args, int tag, int fmtNum, ...);
+#ifdef ROM_COMPAT_MSCH_PROFILER
+/* For compatibility with ROM, for old msch event log function to pass parameters in stack */
+extern void event_logv(int num_args, int tag, int fmtNum, va_list ap);
+#endif /* ROM_COMPAT_MSCH_PROFILER */
+
+extern void event_log_time_sync(uint32 ms);
+extern bool event_log_time_sync_required(void);
+extern void event_log_cpu_freq_changed(void);
+extern void event_log_buffer(int tag, const uint8 *buf, int size);
+extern void event_log_caller_return_address(int tag);
+extern int event_log_set_destination_set(int set, event_log_set_destination_t dest);
+extern event_log_set_destination_t event_log_set_destination_get(int set);
+extern int event_log_set_sub_destination_set(uint set, event_log_set_sub_destination_t dest);
+extern event_log_set_sub_destination_t event_log_set_sub_destination_get(uint set);
+extern int event_log_flush_log_buffer(int set);
+extern int event_log_force_flush_all(void);
+extern int event_log_force_flush(int set);
+
+extern uint16 event_log_get_available_space(int set);
+extern bool event_log_is_tag_valid(int tag);
+/* returns number of blocks available for writing */
+extern int event_log_free_blocks_get(int set);
+extern bool event_log_is_ready(void);
+extern bool event_log_is_preserve_active(uint set);
+extern uint event_log_get_percentage_available_space(uint set);
+extern bool event_log_set_watermark_reached(int set_num);
+
+extern void event_log_set_config(int set, uint32 period, uint16 watermark, uint32 config_flags);
+#ifdef EVENTLOG_D3_PRESERVE
+#define EVENT_LOG_PRESERVE_EXPAND_SIZE 5u
+extern int event_log_preserve_set_shrink(osl_t *osh, int set_num);
+extern void event_log_d3_preserve_active_set(osl_t* osh, int set, bool active);
+extern void event_log_d3_prsv_set_all(osl_t *osh, bool active);
+#endif /* EVENTLOG_D3_PRESERVE */
+
+#ifdef EVENTLOG_PRSV_PERIODIC
+#define EVENT_LOG_SET_SIZE_INVALID 0xFFFFFFFFu
+#define EVENT_LOG_DEFAULT_PERIOD 3000u
+extern void event_log_prsv_periodic_wd_trigger(osl_t *osh);
+#endif /* EVENTLOG_PRSV_PERIODIC */
+
+/* Enable/disable rate health check for a set */
+#ifdef EVENT_LOG_RATE_HC
+extern int event_log_enable_hc_for_set(int set_num, bool enable);
+extern void event_log_set_hc_rate(uint16 num_prints);
+extern uint16 event_log_get_hc_rate(void);
+#endif /* EVENT_LOG_RATE_HC */
+
+/* Configure a set with ability to send partial log blocks */
+extern int event_log_send_partial_block_set(int set_num);
+
+/* Get number of log blocks associated to a log set */
+extern int event_log_num_blocks_get(int set, uint32 *num_blocks);
+
+/* Get a log buffer of a desired set */
+extern int event_log_block_get(int set, uint32 **buf, uint16 *len);
+extern uint32 event_log_get_maxsets(void);
+
+/* For all other non-logtrace consumers */
+extern int event_log_set_is_valid(int set);
+
+/* To be used by logtrace only */
+extern int event_log_get_num_sets(void);
+
+/* Given a buffer, return to which set it belongs to */
+extern int event_log_get_set_for_buffer(const void *buf);
+
+extern int event_log_flush_multiple_sets(const int *sets, uint16 num_sets);
+extern int event_log_force_flush_preserve_all(void);
+extern int event_log_get_iovar_handler(int set);
+extern int event_log_enable_hostmem_access(bool hostmem_access_enabled);
+extern int event_log_enable_event_trace(bool event_trace_enabled);
+#endif /* EVENT_LOG_COMPILE */
+
+#endif /* !EVENT_LOG_DUMPER && !DHD_EFI */
+
+#endif /* BCMDRIVER */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _EVENT_LOG_H_ */
diff --git a/bcmdhd.101.10.361.x/include/event_log_payload.h b/bcmdhd.101.10.361.x/include/event_log_payload.h
new file mode 100755
index 0000000..4485fcc
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/event_log_payload.h
@@ -0,0 +1,1775 @@
+/*
+ * EVENT_LOG System Definitions
+ *
+ * This file describes the payloads of event log entries that are data buffers
+ * rather than formatted string entries. The contents are generally XTLVs.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _EVENT_LOG_PAYLOAD_H_
+#define _EVENT_LOG_PAYLOAD_H_
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <ethernet.h>
+#include <event_log_tag.h>
+
+/**
+ * A (legacy) timestamp message
+ */
+typedef struct ts_message {
+ uint32 timestamp;
+ uint32 cyclecount;
+} ts_msg_t;
+
+/**
+ * Enhanced timestamp message
+ */
+typedef struct enhanced_ts_message {
+ uint32 version;
+ /* More data, depending on version */
+ uint8 data[];
+} ets_msg_t;
+
+#define ENHANCED_TS_MSG_VERSION_1 (1u)
+
+/**
+ * Enhanced timestamp message, version 1
+ */
+typedef struct enhanced_ts_message_v1 {
+ uint32 version;
+ uint32 timestamp; /* PMU time, in milliseconds */
+ uint32 cyclecount;
+ uint32 cpu_freq;
+} ets_msg_v1_t;
+
+#define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */
+#define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */
+#define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */
+#define EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM 3 /**< XTLV ID for scb_ampdu_tx_summary_t */
+#define EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM 4 /**< XTLV ID for bsscfg_q_summary_t */
+#define EVENT_LOG_XTLV_ID_UCTXSTATUS 5 /**< XTLV ID for ucode TxStatus array */
+#define EVENT_LOG_XTLV_ID_TXQ_SUM_V2 6 /**< XTLV ID for txq_summary_v2_t */
+#define EVENT_LOG_XTLV_ID_BUF 7 /**< XTLV ID for event_log_buffer_t */
+
+/**
+ * An XTLV holding a string
+ * String is not null terminated, length is the XTLV len.
+ */
+typedef struct xtlv_string {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_STR */
+ uint16 len; /* XTLV Len (String length) */
+ char str[1]; /* var len array characters */
+} xtlv_string_t;
+
+#define XTLV_STRING_FULL_LEN(str_len) (BCM_XTLV_HDR_SIZE + (str_len) * sizeof(char))
+
+/**
+ * Summary for a single TxQ context
+ * Two of these will be used per TxQ context---one for the high TxQ, and one for
+ * the low txq that contains DMA prepared pkts. The high TxQ is a full multi-precidence
+ * queue and also has a BSSCFG map to identify the BSSCFGS associated with the queue context.
+ * The low txq counterpart does not populate the BSSCFG map.
+ * The excursion queue will have no bsscfgs associated and is the first queue dumped.
+ */
+typedef struct txq_summary {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM */
+ uint16 len; /* XTLV Len */
+ uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */
+ uint32 stopped; /* flow control bitmap */
+ uint8 prec_count; /* count of precedences/fifos and len of following array */
+ uint8 pad;
+ uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */
+} txq_summary_t;
+
+#define TXQ_SUMMARY_LEN (OFFSETOF(txq_summary_t, plen))
+#define TXQ_SUMMARY_FULL_LEN(num_q) (TXQ_SUMMARY_LEN + (num_q) * sizeof(uint16))
+
+typedef struct txq_summary_v2 {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM_V2 */
+ uint16 len; /* XTLV Len */
+ uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */
+ uint32 stopped; /* flow control bitmap */
+ uint32 hw_stopped; /* flow control bitmap */
+ uint8 prec_count; /* count of precedences/fifos and len of following array */
+ uint8 pad;
+ uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */
+} txq_summary_v2_t;
+
+#define TXQ_SUMMARY_V2_LEN (OFFSETOF(txq_summary_v2_t, plen))
+#define TXQ_SUMMARY_V2_FULL_LEN(num_q) (TXQ_SUMMARY_V2_LEN + (num_q) * sizeof(uint16))
+
+/**
+ * Summary for tx datapath of an SCB cubby
+ * This is a generic summary structure (one size fits all) with
+ * a cubby ID and sub-ID to differentiate SCB cubby types and possible sub-queues.
+ */
+typedef struct scb_subq_summary {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_SUM */
+ uint16 len; /* XTLV Len */
+ uint32 flags; /* cubby specficic flags */
+ uint8 cubby_id; /* ID registered for cubby */
+ uint8 sub_id; /* sub ID if a cubby has more than one queue */
+ uint8 prec_count; /* count of precedences/fifos and len of following array */
+ uint8 pad;
+ uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */
+} scb_subq_summary_t;
+
+#define SCB_SUBQ_SUMMARY_LEN (OFFSETOF(scb_subq_summary_t, plen))
+#define SCB_SUBQ_SUMMARY_FULL_LEN(num_q) (SCB_SUBQ_SUMMARY_LEN + (num_q) * sizeof(uint16))
+
+/* scb_subq_summary_t.flags for APPS */
+#define SCBDATA_APPS_F_PS 0x00000001
+#define SCBDATA_APPS_F_PSPEND 0x00000002
+#define SCBDATA_APPS_F_INPVB 0x00000004
+#define SCBDATA_APPS_F_APSD_USP 0x00000008
+#define SCBDATA_APPS_F_TXBLOCK 0x00000010
+#define SCBDATA_APPS_F_APSD_HPKT_TMR 0x00000020
+#define SCBDATA_APPS_F_APSD_TX_PEND 0x00000040
+#define SCBDATA_APPS_F_INTRANS 0x00000080
+#define SCBDATA_APPS_F_OFF_PEND 0x00000100
+#define SCBDATA_APPS_F_OFF_BLOCKED 0x00000200
+#define SCBDATA_APPS_F_OFF_IN_PROG 0x00000400
+
+/**
+ * Summary for tx datapath AMPDU SCB cubby
+ * This is a specific data structure to describe the AMPDU datapath state for an SCB
+ * used instead of scb_subq_summary_t.
+ * Info is for one TID, so one will be dumped per BA TID active for an SCB.
+ */
+typedef struct scb_ampdu_tx_summary {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM */
+ uint16 len; /* XTLV Len */
+ uint32 flags; /* misc flags */
+ uint8 tid; /* initiator TID (priority) */
+ uint8 ba_state; /* internal BA state */
+ uint8 bar_cnt; /* number of bars sent with no progress */
+ uint8 retry_bar; /* reason code if bar to be retried at watchdog */
+ uint16 barpending_seq; /* seqnum for bar */
+ uint16 bar_ackpending_seq; /* seqnum of bar for which ack is pending */
+ uint16 start_seq; /* seqnum of the first unacknowledged packet */
+ uint16 max_seq; /* max unacknowledged seqnum sent */
+ uint32 released_bytes_inflight; /* Number of bytes pending in bytes */
+ uint32 released_bytes_target;
+} scb_ampdu_tx_summary_t;
+
+/* scb_ampdu_tx_summary.flags defs */
+#define SCBDATA_AMPDU_TX_F_BAR_ACKPEND 0x00000001 /* bar_ackpending */
+
+/** XTLV stuct to summarize a BSSCFG's packet queue */
+typedef struct bsscfg_q_summary {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM */
+ uint16 len; /* XTLV Len */
+ struct ether_addr BSSID; /* BSSID */
+ uint8 bsscfg_idx; /* bsscfg index */
+ uint8 type; /* bsscfg type enumeration: BSSCFG_TYPE_XXX */
+ uint8 subtype; /* bsscfg subtype enumeration: BSSCFG_SUBTYPE_XXX */
+ uint8 prec_count; /* count of precedences/fifos and len of following array */
+ uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */
+} bsscfg_q_summary_t;
+
+#define BSSCFG_Q_SUMMARY_LEN (OFFSETOF(bsscfg_q_summary_t, plen))
+#define BSSCFG_Q_SUMMARY_FULL_LEN(num_q) (BSSCFG_Q_SUMMARY_LEN + (num_q) * sizeof(uint16))
+
+/**
+ * An XTLV holding a TxStats array
+ * TxStatus entries are 8 or 16 bytes, size in words (2 or 4) givent in
+ * entry_size field.
+ * Array is uint32 words
+ */
+typedef struct xtlv_uc_txs {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_UCTXSTATUS */
+ uint16 len; /* XTLV Len */
+ uint8 entry_size; /* num uint32 words per entry */
+ uint8 pad[3]; /* reserved, zero */
+ uint32 w[1]; /* var len array of words */
+} xtlv_uc_txs_t;
+
+#define XTLV_UCTXSTATUS_LEN (OFFSETOF(xtlv_uc_txs_t, w))
+#define XTLV_UCTXSTATUS_FULL_LEN(words) (XTLV_UCTXSTATUS_LEN + (words) * sizeof(uint32))
+
+#define SCAN_SUMMARY_VERSION_1 1u
+#ifndef WLSCAN_SUMMARY_VERSION_ALIAS
+#define SCAN_SUMMARY_VERSION SCAN_SUMMARY_VERSION_1
+#endif
+/* Scan flags */
+#define SCAN_SUM_CHAN_INFO 0x1
+/* Scan_sum flags */
+#define BAND5G_SIB_ENAB 0x2
+#define BAND2G_SIB_ENAB 0x4
+#define PARALLEL_SCAN 0x8
+#define SCAN_ABORT 0x10
+/* Note: Definitions being reused in chan_info as SCAN_SUM_SCAN_CORE need clean up */
+#define SC_LOWSPAN_SCAN 0x20
+/* Note: Definitions being reused in scan summary info as WL_SSUM_CLIENT_MASK need clean up */
+#define SC_SCAN 0x40
+
+#define WL_SSUM_CLIENT_MASK 0x1C0u /* bit 8 - 6 */
+#define WL_SSUM_CLIENT_SHIFT 6u /* shift client scan opereration */
+
+#define WL_SSUM_MODE_MASK 0xE00u /* bit 11 - 9 */
+#define WL_SSUM_MODE_SHIFT 9u /* shift mode scan operation */
+
+/* Common bits for channel and scan summary info */
+#define SCAN_SUM_CHAN_RESHED 0x1000 /* Bit 12 as resched scan for chaninfo and scan summary */
+
+#define WL_SSUM_CLIENT_ASSOCSCAN 0x0u /* Log as scan requested client is assoc scan */
+#define WL_SSUM_CLIENT_ROAMSCAN 0x1u /* Log as scan requested client is roam scan */
+#define WL_SSUM_CLIENT_FWSCAN 0x2u /* Log as scan requested client is other fw scan */
+#define WL_SSUM_CLIENT_HOSTSCAN 0x3u /* Log as scan requested client is host scan */
+
+#define WL_SSUM_SCANFLAG_INVALID 0x7u /* Log for invalid scan client or mode */
+
+/* scan_channel_info flags */
+#define ACTIVE_SCAN_SCN_SUM 0x2
+#define SCAN_SUM_WLC_CORE0 0x4
+#define SCAN_SUM_WLC_CORE1 0x8
+#define HOME_CHAN 0x10
+#define SCAN_SUM_SCAN_CORE 0x20
+
+typedef struct wl_scan_ssid_info
+{
+ uint8 ssid_len; /* the length of SSID */
+ uint8 ssid[32]; /* SSID string */
+} wl_scan_ssid_info_t;
+
+typedef struct wl_scan_channel_info {
+ uint16 chanspec; /* chanspec scanned */
+ uint16 reserv;
+ uint32 start_time; /* Scan start time in
+ * milliseconds for the chanspec
+ * or home_dwell time start
+ */
+ uint32 end_time; /* Scan end time in
+ * milliseconds for the chanspec
+ * or home_dwell time end
+ */
+ uint16 probe_count; /* No of probes sent out. For future use
+ */
+ uint16 scn_res_count; /* Count of scan_results found per
+ * channel. For future use
+ */
+} wl_scan_channel_info_t;
+
+typedef struct wl_scan_summary_info {
+ uint32 total_chan_num; /* Total number of channels scanned */
+ uint32 scan_start_time; /* Scan start time in milliseconds */
+ uint32 scan_end_time; /* Scan end time in milliseconds */
+ wl_scan_ssid_info_t ssid[1]; /* SSID being scanned in current
+ * channel. For future use
+ */
+} wl_scan_summary_info_t;
+
+struct wl_scan_summary {
+ uint8 version; /* Version */
+ uint8 reserved;
+ uint16 len; /* Length of the data buffer including SSID
+ * list.
+ */
+ uint16 sync_id; /* Scan Sync ID */
+ uint16 scan_flags; /* flags [0] or SCAN_SUM_CHAN_INFO = */
+ /* channel_info, if not set */
+ /* it is scan_summary_info */
+ /* when channel_info is used, */
+ /* the following flag bits are overridden: */
+ /* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */
+ /* passive if not set */
+ /* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */
+ /* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */
+ /* flags[4] or HOME_CHAN = if set, represents home-channel */
+ /* flags[5] or SCAN_SUM_SCAN_CORE = if set,
+ * represents chan_info from scan core.
+ */
+ /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */
+ /* flags[6:11, 13:15] = reserved */
+ /* when scan_summary_info is used, */
+ /* the following flag bits are used: */
+ /* flags[1] or BAND5G_SIB_ENAB = */
+ /* allowSIBParallelPassiveScan on 5G band */
+ /* flags[2] or BAND2G_SIB_ENAB = */
+ /* allowSIBParallelPassiveScan on 2G band */
+ /* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */
+ /* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */
+ /* flags[5] = reserved */
+ /* flags[6:8] is used as count value to identify SCAN CLIENT
+ * WL_SSUM_CLIENT_ASSOCSCAN 0x0u, WL_SSUM_CLIENT_ROAMSCAN 0x1u,
+ * WL_SSUM_CLIENT_FWSCAN 0x2u, WL_SSUM_CLIENT_HOSTSCAN 0x3u
+ */
+ /* flags[9:11] is used as count value to identify SCAN MODE
+ * WL_SCAN_MODE_HIGH_ACC 0u, WL_SCAN_MODE_LOW_SPAN 1u,
+ * WL_SCAN_MODE_LOW_POWER 2u
+ */
+ /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */
+ /* flags[13:15] = reserved */
+ union {
+ wl_scan_channel_info_t scan_chan_info; /* scan related information
+ * for each channel scanned
+ */
+ wl_scan_summary_info_t scan_sum_info; /* Cumulative scan related
+ * information.
+ */
+ } u;
+};
+
+#define SCAN_SUMMARY_VERSION_2 2u
+struct wl_scan_summary_v2 {
+ uint8 version; /* Version */
+ uint8 reserved;
+ uint16 len; /* Length of the data buffer including SSID
+ * list.
+ */
+ uint16 sync_id; /* Scan Sync ID */
+ uint16 scan_flags; /* flags [0] or SCAN_SUM_CHAN_INFO = */
+ /* channel_info, if not set */
+ /* it is scan_summary_info */
+ /* when channel_info is used, */
+ /* the following flag bits are overridden: */
+ /* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */
+ /* passive if not set */
+ /* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */
+ /* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */
+ /* flags[4] or HOME_CHAN = if set, represents home-channel */
+ /* flags[5] or SCAN_SUM_SCAN_CORE = if set,
+ * represents chan_info from scan core.
+ */
+ /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */
+ /* flags[6:11, 13:15] = reserved */
+ /* when scan_summary_info is used, */
+ /* the following flag bits are used: */
+ /* flags[1] or BAND5G_SIB_ENAB = */
+ /* allowSIBParallelPassiveScan on 5G band */
+ /* flags[2] or BAND2G_SIB_ENAB = */
+ /* allowSIBParallelPassiveScan on 2G band */
+ /* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */
+ /* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */
+ /* flags[5] = reserved */
+ /* flags[6:8] is used as count value to identify SCAN CLIENT
+ * WL_SSUM_CLIENT_ASSOCSCAN 0x0u, WL_SSUM_CLIENT_ROAMSCAN 0x1u,
+ * WL_SSUM_CLIENT_FWSCAN 0x2u, WL_SSUM_CLIENT_HOSTSCAN 0x3u
+ */
+ /* flags[9:11] is used as count value to identify SCAN MODE
+ * WL_SCAN_MODE_HIGH_ACC 0u, WL_SCAN_MODE_LOW_SPAN 1u,
+ * WL_SCAN_MODE_LOW_POWER 2u
+ */
+ /* flags[12] SCAN_SUM_CHAN_RESHED indicate scan rescheduled */
+ /* flags[13:15] = reserved */
+ /* scan_channel_ctx_t chan_cnt; */
+ uint8 channel_cnt_aux; /* Number of channels to be scanned on Aux core */
+ uint8 channel_cnt_main; /* Number of channels to be scanned on Main core */
+ uint8 channel_cnt_sc; /* Number of channels to be scanned on Scan core */
+ uint8 active_channel_cnt;
+ uint8 passive_channel_cnt;
+ char pad[3]; /* Pad to keep it 32 bit aligned */
+ union {
+ wl_scan_channel_info_t scan_chan_info; /* scan related information
+ * for each channel scanned
+ */
+ wl_scan_summary_info_t scan_sum_info; /* Cumulative scan related
+ * information.
+ */
+ } u;
+};
+/* Channel switch log record structure
+ * Host may map the following structure on channel switch event log record
+ * received from dongle. Note that all payload entries in event log record are
+ * uint32/int32.
+ */
+typedef struct wl_chansw_event_log_record {
+ uint32 time; /* Time in us */
+ uint32 old_chanspec; /* Old channel spec */
+ uint32 new_chanspec; /* New channel spec */
+ uint32 chansw_reason; /* Reason for channel change */
+ int32 dwell_time;
+} wl_chansw_event_log_record_t;
+
+typedef struct wl_chansw_event_log_record_v2 {
+ uint32 time; /* Time in us */
+ uint32 old_chanspec; /* Old channel spec */
+ uint32 new_chanspec; /* New channel spec */
+ uint32 chansw_reason; /* Reason for channel change */
+ int32 dwell_time;
+ uint32 core;
+ int32 phychanswtime; /* channel switch time */
+} wl_chansw_event_log_record_v2_t;
+
+/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */
+typedef enum {
+ WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_RXMCSx2 = 1,
+ WL_AMPDU_STATS_TYPE_RXMCSx3 = 2,
+ WL_AMPDU_STATS_TYPE_RXMCSx4 = 3,
+ WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_RXVHTx2 = 5,
+ WL_AMPDU_STATS_TYPE_RXVHTx3 = 6,
+ WL_AMPDU_STATS_TYPE_RXVHTx4 = 7,
+ WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_TXMCSx2 = 9,
+ WL_AMPDU_STATS_TYPE_TXMCSx3 = 10,
+ WL_AMPDU_STATS_TYPE_TXMCSx4 = 11,
+ WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_TXVHTx2 = 13,
+ WL_AMPDU_STATS_TYPE_TXVHTx3 = 14,
+ WL_AMPDU_STATS_TYPE_TXVHTx4 = 15,
+ WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */
+ WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */
+ WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */
+ WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */
+ WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */
+ WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */
+ WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */
+ WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */
+ WL_AMPDU_STATS_TYPE_RX_HE_SUOK = 32, /* DL SU MPDU frame per MCS */
+ WL_AMPDU_STATS_TYPE_RX_HE_SU_DENS = 33, /* DL SU AMPDU DENSITY */
+ WL_AMPDU_STATS_TYPE_RX_HE_MUMIMOOK = 34, /* DL MUMIMO Frame per MCS */
+ WL_AMPDU_STATS_TYPE_RX_HE_MUMIMO_DENS = 35, /* DL MUMIMO AMPDU Density */
+ WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMAOK = 36, /* DL OFDMA Frame per MCS */
+ WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_DENS = 37, /* DL OFDMA AMPDU Density */
+ WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_HIST = 38, /* DL OFDMA frame RU histogram */
+ WL_AMPDU_STATS_TYPE_TX_HE_MCSALL = 39, /* TX HE (SU+MU) frames, all rates */
+ WL_AMPDU_STATS_TYPE_TX_HE_MCSOK = 40, /* TX HE (SU+MU) frames succeeded */
+ WL_AMPDU_STATS_TYPE_TX_HE_MUALL = 41, /* TX MU (UL OFDMA) frames all rates */
+ WL_AMPDU_STATS_TYPE_TX_HE_MUOK = 42, /* TX MU (UL OFDMA) frames succeeded */
+ WL_AMPDU_STATS_TYPE_TX_HE_RUBW = 43, /* TX UL RU by BW histogram */
+ WL_AMPDU_STATS_TYPE_TX_HE_PADDING = 44, /* TX padding total (single value) */
+ WL_AMPDU_STATS_TYPE_RX_COUNTERS = 45, /* Additional AMPDU_RX module counters
+ * per-slice
+ */
+ WL_AMPDU_STATS_MAX_CNTS = 64
+} wl_ampdu_stat_enum_t;
+typedef struct {
+ uint16 type; /* AMPDU statistics sub-type */
+ uint16 len; /* Number of 32-bit counters */
+ uint32 counters[WL_AMPDU_STATS_MAX_CNTS];
+} wl_ampdu_stats_generic_t;
+
+typedef wl_ampdu_stats_generic_t wl_ampdu_stats_rx_t;
+typedef wl_ampdu_stats_generic_t wl_ampdu_stats_tx_t;
+
+typedef struct {
+ uint16 type; /* AMPDU statistics sub-type */
+ uint16 len; /* Number of 32-bit counters + 2 */
+ uint32 total_ampdu;
+ uint32 total_mpdu;
+ uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1];
+} wl_ampdu_stats_aggrsz_t;
+
+/* AMPDU_RX module's per-slice counters. Sent by ecounters as subtype of
+ * WL_IFSTATS_XTLV_RX_AMPDU_STATS ecounters type
+ */
+#define WLC_AMPDU_RX_STATS_V1 (1u)
+typedef struct wlc_ampdu_rx_stats {
+ uint16 version;
+ uint16 len;
+ /* responder side counters */
+ uint32 rxampdu; /**< ampdus recd */
+ uint32 rxmpdu; /**< mpdus recd in a ampdu */
+ uint32 rxht; /**< mpdus recd at ht rate and not in a ampdu */
+ uint32 rxlegacy; /**< mpdus recd at legacy rate */
+ uint32 rxampdu_sgi; /**< ampdus recd with sgi */
+ uint32 rxampdu_stbc; /**< ampdus recd with stbc */
+ uint32 rxnobapol; /**< mpdus recd without a ba policy */
+ uint32 rxholes; /**< missed seq numbers on rx side */
+ uint32 rxqed; /**< pdus buffered before sending up */
+ uint32 rxdup; /**< duplicate pdus */
+ uint32 rxstuck; /**< watchdog bailout for stuck state */
+ uint32 rxoow; /**< out of window pdus */
+ uint32 rxoos; /**< out of seq pdus */
+ uint32 rxaddbareq; /**< addba req recd */
+ uint32 txaddbaresp; /**< addba resp sent */
+ uint32 rxbar; /**< bar recd */
+ uint32 txba; /**< ba sent */
+
+ /* general: both initiator and responder */
+ uint32 rxunexp; /**< unexpected packets */
+ uint32 txdelba; /**< delba sent */
+ uint32 rxdelba; /**< delba recd */
+} wlc_ampdu_rx_stats_t;
+
+/* Sub-block type for WL_IFSTATS_XTLV_HE_TXMU_STATS */
+typedef enum {
+ /* Reserve 0 to avoid potential concerns */
+ WL_HE_TXMU_STATS_TYPE_TIME = 1, /* per-dBm, total usecs transmitted */
+ WL_HE_TXMU_STATS_TYPE_PAD_TIME = 2, /* per-dBm, padding usecs transmitted */
+} wl_he_txmu_stat_enum_t;
+#define WL_IFSTATS_HE_TXMU_MAX 32u
+
+/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */
+#define WL_MSCH_PROFILER_START 0 /* start event check */
+#define WL_MSCH_PROFILER_EXIT 1 /* exit event check */
+#define WL_MSCH_PROFILER_REQ 2 /* request event */
+#define WL_MSCH_PROFILER_CALLBACK 3 /* call back event */
+#define WL_MSCH_PROFILER_MESSAGE 4 /* message event */
+#define WL_MSCH_PROFILER_PROFILE_START 5
+#define WL_MSCH_PROFILER_PROFILE_END 6
+#define WL_MSCH_PROFILER_REQ_HANDLE 7
+#define WL_MSCH_PROFILER_REQ_ENTITY 8
+#define WL_MSCH_PROFILER_CHAN_CTXT 9
+#define WL_MSCH_PROFILER_EVENT_LOG 10
+#define WL_MSCH_PROFILER_REQ_TIMING 11
+#define WL_MSCH_PROFILER_TYPE_MASK 0x00ff
+#define WL_MSCH_PROFILER_WLINDEX_SHIFT 8
+#define WL_MSCH_PROFILER_WLINDEX_MASK 0x0f00
+#define WL_MSCH_PROFILER_VER_SHIFT 12
+#define WL_MSCH_PROFILER_VER_MASK 0xf000
+
+/* MSCH Event data current verion */
+#define WL_MSCH_PROFILER_VER 2
+
+/* msch version history */
+#define WL_MSCH_PROFILER_RSDB_VER 1
+#define WL_MSCH_PROFILER_REPORT_VER 2
+
+/* msch collect header size */
+#define WL_MSCH_PROFILE_HEAD_SIZE OFFSETOF(msch_collect_tlv_t, value)
+
+/* msch event log header size */
+#define WL_MSCH_EVENT_LOG_HEAD_SIZE OFFSETOF(msch_event_log_profiler_event_data_t, data)
+
+/* MSCH data buffer size */
+#define WL_MSCH_PROFILER_BUFFER_SIZE 512
+
+/* request type used in wlc_msch_req_param_t struct */
+#define WL_MSCH_RT_BOTH_FIXED 0 /* both start and end time is fixed */
+#define WL_MSCH_RT_START_FLEX 1 /* start time is flexible and duration is fixed */
+#define WL_MSCH_RT_DUR_FLEX 2 /* start time is fixed and end time is flexible */
+#define WL_MSCH_RT_BOTH_FLEX 3 /* Both start and duration is flexible */
+
+/* Flags used in wlc_msch_req_param_t struct */
+#define WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS (1 << 0) /* Don't break up channels in chanspec_list */
+#define WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS (1 << 1) /* No slot end if slots are continous */
+#define WL_MSCH_REQ_FLAGS_PREMTABLE (1 << 2) /* Req can be pre-empted by PREMT_CURTS req */
+#define WL_MSCH_REQ_FLAGS_PREMT_CURTS (1 << 3) /* Pre-empt request at the end of curts */
+#define WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE (1 << 4) /* Pre-empt cur_ts immediately */
+
+/* Requested slot Callback states
+ * req->pend_slot/cur_slot->flags
+ */
+#define WL_MSCH_RC_FLAGS_ONCHAN_FIRE (1 << 0)
+#define WL_MSCH_RC_FLAGS_START_FIRE_DONE (1 << 1)
+#define WL_MSCH_RC_FLAGS_END_FIRE_DONE (1 << 2)
+#define WL_MSCH_RC_FLAGS_ONFIRE_DONE (1 << 3)
+#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_START (1 << 4)
+#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_END (1 << 5)
+#define WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE (1 << 6)
+
+/* Request entity flags */
+#define WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE (1 << 0)
+
+/* Request Handle flags */
+#define WL_MSCH_REQ_HDL_FLAGS_NEW_REQ (1 << 0) /* req_start callback */
+
+/* MSCH state flags (msch_info->flags) */
+#define WL_MSCH_STATE_IN_TIEMR_CTXT 0x1
+#define WL_MSCH_STATE_SCHD_PENDING 0x2
+
+/* MSCH callback type */
+#define WL_MSCH_CT_REQ_START 0x1
+#define WL_MSCH_CT_ON_CHAN 0x2
+#define WL_MSCH_CT_SLOT_START 0x4
+#define WL_MSCH_CT_SLOT_END 0x8
+#define WL_MSCH_CT_SLOT_SKIP 0x10
+#define WL_MSCH_CT_OFF_CHAN 0x20
+#define WL_MSCH_CT_OFF_CHAN_DONE 0x40
+#define WL_MSCH_CT_REQ_END 0x80
+#define WL_MSCH_CT_PARTIAL 0x100
+#define WL_MSCH_CT_PRE_ONCHAN 0x200
+#define WL_MSCH_CT_PRE_REQ_START 0x400
+
+/* MSCH command bits */
+#define WL_MSCH_CMD_ENABLE_BIT 0x01
+#define WL_MSCH_CMD_PROFILE_BIT 0x02
+#define WL_MSCH_CMD_CALLBACK_BIT 0x04
+#define WL_MSCH_CMD_REGISTER_BIT 0x08
+#define WL_MSCH_CMD_ERROR_BIT 0x10
+#define WL_MSCH_CMD_DEBUG_BIT 0x20
+#define WL_MSCH_CMD_INFOM_BIT 0x40
+#define WL_MSCH_CMD_TRACE_BIT 0x80
+#define WL_MSCH_CMD_ALL_BITS 0xfe
+#define WL_MSCH_CMD_SIZE_MASK 0x00ff0000
+#define WL_MSCH_CMD_SIZE_SHIFT 16
+#define WL_MSCH_CMD_VER_MASK 0xff000000
+#define WL_MSCH_CMD_VER_SHIFT 24
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_MSCH_NUMCHANNELS 64
+
+typedef struct msch_collect_tlv {
+ uint16 type;
+ uint16 size;
+ char value[1];
+} msch_collect_tlv_t;
+
+typedef struct msch_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+} msch_profiler_event_data_t;
+
+typedef struct msch_start_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint32 status;
+} msch_start_profiler_event_data_t;
+
+typedef struct msch_message_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ char message[1]; /* message */
+} msch_message_profiler_event_data_t;
+
+typedef struct msch_event_log_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ event_log_hdr_t hdr; /* event log header */
+ uint32 data[9]; /* event data */
+} msch_event_log_profiler_event_data_t;
+
+typedef struct msch_req_param_profiler_event_data {
+ uint16 flags; /* Describe various request properties */
+ uint8 req_type; /* Describe start and end time flexiblilty */
+ uint8 priority; /* Define the request priority */
+ uint32 start_time_l; /* Requested start time offset in us unit */
+ uint32 start_time_h;
+ uint32 duration; /* Requested duration in us unit */
+ uint32 interval; /* Requested periodic interval in us unit,
+ * 0 means non-periodic
+ */
+ union {
+ uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */
+ struct {
+ uint32 min_dur; /* min duration for traffic, maps to home_time */
+ uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */
+ uint32 hi_prio_time_l;
+ uint32 hi_prio_time_h;
+ uint32 hi_prio_interval; /* repeated high priority interval */
+ } bf;
+ } flex;
+} msch_req_param_profiler_event_data_t;
+
+typedef struct msch_req_timing_profiler_event_data {
+ uint32 p_req_timing;
+ uint32 p_prev;
+ uint32 p_next;
+ uint16 flags;
+ uint16 timeslot_ptr;
+ uint32 fire_time_l;
+ uint32 fire_time_h;
+ uint32 pre_start_time_l;
+ uint32 pre_start_time_h;
+ uint32 start_time_l;
+ uint32 start_time_h;
+ uint32 end_time_l;
+ uint32 end_time_h;
+ uint32 p_timeslot;
+} msch_req_timing_profiler_event_data_t;
+
+typedef struct msch_chan_ctxt_profiler_event_data {
+ uint32 p_chan_ctxt;
+ uint32 p_prev;
+ uint32 p_next;
+ uint16 chanspec;
+ uint16 bf_sch_pending;
+ uint32 bf_link_prev;
+ uint32 bf_link_next;
+ uint32 onchan_time_l;
+ uint32 onchan_time_h;
+ uint32 actual_onchan_dur_l;
+ uint32 actual_onchan_dur_h;
+ uint32 pend_onchan_dur_l;
+ uint32 pend_onchan_dur_h;
+ uint16 req_entity_list_cnt;
+ uint16 req_entity_list_ptr;
+ uint16 bf_entity_list_cnt;
+ uint16 bf_entity_list_ptr;
+ uint32 bf_skipped_count;
+} msch_chan_ctxt_profiler_event_data_t;
+
+typedef struct msch_req_entity_profiler_event_data {
+ uint32 p_req_entity;
+ uint32 req_hdl_link_prev;
+ uint32 req_hdl_link_next;
+ uint32 chan_ctxt_link_prev;
+ uint32 chan_ctxt_link_next;
+ uint32 rt_specific_link_prev;
+ uint32 rt_specific_link_next;
+ uint32 start_fixed_link_prev;
+ uint32 start_fixed_link_next;
+ uint32 both_flex_list_prev;
+ uint32 both_flex_list_next;
+ uint16 chanspec;
+ uint16 priority;
+ uint16 cur_slot_ptr;
+ uint16 pend_slot_ptr;
+ uint16 pad;
+ uint16 chan_ctxt_ptr;
+ uint32 p_chan_ctxt;
+ uint32 p_req_hdl;
+ uint32 bf_last_serv_time_l;
+ uint32 bf_last_serv_time_h;
+ uint16 onchan_chn_idx;
+ uint16 cur_chn_idx;
+ uint32 flags;
+ uint32 actual_start_time_l;
+ uint32 actual_start_time_h;
+ uint32 curts_fire_time_l;
+ uint32 curts_fire_time_h;
+} msch_req_entity_profiler_event_data_t;
+
+typedef struct msch_req_handle_profiler_event_data {
+ uint32 p_req_handle;
+ uint32 p_prev;
+ uint32 p_next;
+ uint32 cb_func;
+ uint32 cb_ctxt;
+ uint16 req_param_ptr;
+ uint16 req_entity_list_cnt;
+ uint16 req_entity_list_ptr;
+ uint16 chan_cnt;
+ uint32 flags;
+ uint16 chanspec_list;
+ uint16 chanspec_cnt;
+ uint16 chan_idx;
+ uint16 last_chan_idx;
+ uint32 req_time_l;
+ uint32 req_time_h;
+} msch_req_handle_profiler_event_data_t;
+
+typedef struct msch_profiler_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint32 free_req_hdl_list;
+ uint32 free_req_entity_list;
+ uint32 free_chan_ctxt_list;
+ uint32 free_chanspec_list;
+ uint16 cur_msch_timeslot_ptr;
+ uint16 next_timeslot_ptr;
+ uint32 p_cur_msch_timeslot;
+ uint32 p_next_timeslot;
+ uint32 cur_armed_timeslot;
+ uint32 flags;
+ uint32 ts_id;
+ uint32 service_interval;
+ uint32 max_lo_prio_interval;
+ uint16 flex_list_cnt;
+ uint16 msch_chanspec_alloc_cnt;
+ uint16 msch_req_entity_alloc_cnt;
+ uint16 msch_req_hdl_alloc_cnt;
+ uint16 msch_chan_ctxt_alloc_cnt;
+ uint16 msch_timeslot_alloc_cnt;
+ uint16 msch_req_hdl_list_cnt;
+ uint16 msch_req_hdl_list_ptr;
+ uint16 msch_chan_ctxt_list_cnt;
+ uint16 msch_chan_ctxt_list_ptr;
+ uint16 msch_req_timing_list_cnt;
+ uint16 msch_req_timing_list_ptr;
+ uint16 msch_start_fixed_list_cnt;
+ uint16 msch_start_fixed_list_ptr;
+ uint16 msch_both_flex_req_entity_list_cnt;
+ uint16 msch_both_flex_req_entity_list_ptr;
+ uint16 msch_start_flex_list_cnt;
+ uint16 msch_start_flex_list_ptr;
+ uint16 msch_both_flex_list_cnt;
+ uint16 msch_both_flex_list_ptr;
+ uint32 slotskip_flag;
+} msch_profiler_profiler_event_data_t;
+
+typedef struct msch_req_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint16 chanspec_cnt;
+ uint16 chanspec_ptr;
+ uint16 req_param_ptr;
+ uint16 pad;
+} msch_req_profiler_event_data_t;
+
+typedef struct msch_callback_profiler_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint16 type; /* callback type */
+ uint16 chanspec; /* actual chanspec, may different with requested one */
+ uint32 start_time_l; /* time slot start time low 32bit */
+ uint32 start_time_h; /* time slot start time high 32bit */
+ uint32 end_time_l; /* time slot end time low 32 bit */
+ uint32 end_time_h; /* time slot end time high 32 bit */
+ uint32 timeslot_id; /* unique time slot id */
+ uint32 p_req_hdl;
+ uint32 onchan_idx; /* Current channel index */
+ uint32 cur_chan_seq_start_time_l; /* start time of current sequence */
+ uint32 cur_chan_seq_start_time_h;
+} msch_callback_profiler_event_data_t;
+
+typedef struct msch_timeslot_profiler_event_data {
+ uint32 p_timeslot;
+ uint32 timeslot_id;
+ uint32 pre_start_time_l;
+ uint32 pre_start_time_h;
+ uint32 end_time_l;
+ uint32 end_time_h;
+ uint32 sch_dur_l;
+ uint32 sch_dur_h;
+ uint32 p_chan_ctxt;
+ uint32 fire_time_l;
+ uint32 fire_time_h;
+ uint32 state;
+} msch_timeslot_profiler_event_data_t;
+
+typedef struct msch_register_params {
+ uint16 wlc_index; /* Optional wlc index */
+ uint16 flags; /* Describe various request properties */
+ uint32 req_type; /* Describe start and end time flexiblilty */
+ uint16 id; /* register id */
+ uint16 priority; /* Define the request priority */
+ uint32 start_time; /* Requested start time offset in ms unit */
+ uint32 duration; /* Requested duration in ms unit */
+ uint32 interval; /* Requested periodic interval in ms unit,
+ * 0 means non-periodic
+ */
+ uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */
+ uint32 min_dur; /* min duration for traffic, maps to home_time */
+ uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */
+ uint32 hi_prio_time;
+ uint32 hi_prio_interval; /* repeated high priority interval */
+ uint32 chanspec_cnt;
+ uint16 chanspec_list[WL_MSCH_NUMCHANNELS];
+} msch_register_params_t;
+
+typedef struct {
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 goodfcs; /**< Good fcs counters */
+ uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
+ uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
+} phy_periodic_counters_v1_t;
+
+typedef struct {
+
+ /* RX error related */
+ uint32 rxrsptmout; /* number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 rxbadplcp; /* number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /* number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /* number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
+ uint32 rxtoolate; /* receive too late */
+ uint32 rxf0ovfl; /* Rx FIFO0 overflow counters information */
+ uint32 rxf1ovfl; /* Rx FIFO1 overflow counters information */
+ uint32 rxanyerr; /* Any RX error that is not counted by other counters. */
+ uint32 rxdropped; /* Frame dropped */
+ uint32 rxnobuf; /* Rx error due to no buffer */
+ uint32 rxrunt; /* Runt frame counter */
+ uint32 rxfrmtoolong; /* Number of received frame that are too long */
+ uint32 rxdrop20s;
+
+ /* RX related */
+ uint32 rxstrt; /* number of received frames with a good PLCP */
+ uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxdtucastmbss; /* number of received DATA frames with good FCS and matching RA */
+ uint32 rxdtocast; /* number of received DATA frames (good FCS and no matching RA) */
+ uint32 goodfcs; /* Good fcs counters */
+ uint32 rxctl; /* Number of control frames */
+ uint32 rxaction; /* Number of action frames */
+ uint32 rxback; /* Number of block ack frames rcvd */
+ uint32 rxctlucast; /* Number of received unicast ctl frames */
+ uint32 rxframe; /* Number of received frames */
+
+ /* TX related */
+ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txmpdu; /* Numer of transmitted mpdus */
+ uint32 txackbackctsfrm; /* Number of ACK + BACK + CTS */
+
+ /* TX error related */
+ uint32 txrtsfail; /* RTS TX failure count */
+ uint32 txphyerr; /* PHY TX error count */
+
+ uint16 nav_cntr_l; /* The state of the NAV */
+ uint16 nav_cntr_h;
+} phy_periodic_counters_v3_t;
+
+typedef struct phy_periodic_counters_v4 {
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
+ uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxdropped;
+ uint32 rxcrc;
+ uint32 rxnobuf;
+ uint32 rxrunt;
+ uint32 rxgiant;
+ uint32 rxctl;
+ uint32 rxaction;
+ uint32 rxdrop20s;
+ uint32 rxctsucast;
+ uint32 rxrtsucast;
+ uint32 txctsfrm;
+ uint32 rxackucast;
+ uint32 rxback;
+ uint32 txphyerr;
+ uint32 txrtsfrm;
+ uint32 txackfrm;
+ uint32 txback;
+ uint32 rxnodelim;
+ uint32 rxfrmtoolong;
+ uint32 rxctlucast;
+ uint32 txbcnfrm;
+ uint32 txdnlfrm;
+ uint32 txampdu;
+ uint32 txmpdu;
+ uint32 txinrtstxop;
+ uint32 prs_timeout;
+} phy_periodic_counters_v4_t;
+
+typedef struct phycal_log_cmn {
+ uint16 chanspec; /* Current phy chanspec */
+ uint8 last_cal_reason; /* Last Cal Reason */
+ uint8 pad1; /* Padding byte to align with word */
+ uint32 last_cal_time; /* Last cal time in sec */
+} phycal_log_cmn_t;
+
+typedef struct phycal_log_cmn_v2 {
+ uint16 chanspec; /* current phy chanspec */
+ uint8 reason; /* cal reason */
+ uint8 phase; /* cal phase */
+ uint32 time; /* time at which cal happened in sec */
+ uint16 temp; /* temperature at the time of cal */
+ uint16 dur; /* duration of cal in usec */
+
+ /* Misc general purpose debug counters (will be used for future debugging) */
+ uint16 debug_01;
+ uint16 debug_02;
+ uint16 debug_03;
+ uint16 debug_04;
+} phycal_log_cmn_v2_t;
+
+typedef struct phycal_log_core {
+ uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
+ uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
+ uint16 ofdm_txd; /* contain di & dq */
+ uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */
+ uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */
+ uint16 bphy_txd; /* contain di & dq */
+
+ uint16 rxa; /* Rx IQ Cal A coeffecient */
+ uint16 rxb; /* Rx IQ Cal B coeffecient */
+ int32 rxs; /* FDIQ Slope coeffecient */
+
+ uint8 baseidx; /* TPC Base index */
+ uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */
+ uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */
+ uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */
+ uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */
+ uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */
+ uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */
+ uint8 pad; /* Padding byte to align with word */
+} phycal_log_core_t;
+
+typedef struct phycal_log_core_v3 {
+ uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
+ uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
+ uint16 ofdm_txd; /* contain di & dq */
+ uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */
+ uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */
+ uint16 bphy_txd; /* contain di & dq */
+
+ uint16 rxa; /* Rx IQ Cal A coeffecient */
+ uint16 rxb; /* Rx IQ Cal B coeffecient */
+ int32 rxs; /* FDIQ Slope coeffecient */
+
+ uint8 baseidx; /* TPC Base index */
+ uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */
+ uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */
+ uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */
+ uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */
+ uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */
+ uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */
+ uint8 pad; /* Padding byte to align with word */
+
+ /* Gain index based txiq ceffiecients for 2G(3 gain indices) */
+ uint16 txiqlo_2g_a0; /* 2G TXIQ Cal a coeff for high TX gain */
+ uint16 txiqlo_2g_b0; /* 2G TXIQ Cal b coeff for high TX gain */
+ uint16 txiqlo_2g_a1; /* 2G TXIQ Cal a coeff for mid TX gain */
+ uint16 txiqlo_2g_b1; /* 2G TXIQ Cal b coeff for mid TX gain */
+ uint16 txiqlo_2g_a2; /* 2G TXIQ Cal a coeff for low TX gain */
+ uint16 txiqlo_2g_b2; /* 2G TXIQ Cal b coeff for low TX gain */
+
+ uint16 rxa_vpoff; /* Rx IQ Cal A coeff Vp off */
+ uint16 rxb_vpoff; /* Rx IQ Cal B coeff Vp off */
+ uint16 rxa_ipoff; /* Rx IQ Cal A coeff Ip off */
+ uint16 rxb_ipoff; /* Rx IQ Cal B coeff Ip off */
+ int32 rxs_vpoff; /* FDIQ Slope coeff Vp off */
+ int32 rxs_ipoff; /* FDIQ Slope coeff Ip off */
+} phycal_log_core_v3_t;
+
+#define PHYCAL_LOG_VER1 (1u)
+
+typedef struct phycal_log_v1 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Numbe of cores for which core specific data present */
+ uint16 length; /* Length of the entire structure */
+ phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */
+ /* This will be a variable length based on the numcores field defined above */
+ phycal_log_core_t phycal_log_core[1];
+} phycal_log_v1_t;
+
+typedef struct phy_periodic_log_cmn {
+ uint16 chanspec; /* Current phy chanspec */
+ uint16 vbatmeas; /* Measured VBAT sense value */
+ uint16 featureflag; /* Currently active feature flags */
+ int8 chiptemp; /* Chip temparature */
+ int8 femtemp; /* Fem temparature */
+
+ uint32 nrate; /* Current Tx nrate */
+
+ uint8 cal_phase_id; /* Current Multi phase cal ID */
+ uint8 rxchain; /* Rx Chain */
+ uint8 txchain; /* Tx Chain */
+ uint8 ofdm_desense; /* OFDM desense */
+
+ uint8 bphy_desense; /* BPHY desense */
+ uint8 pll_lockstatus; /* PLL Lock status */
+ uint8 pad1; /* Padding byte to align with word */
+ uint8 pad2; /* Padding byte to align with word */
+
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+
+} phy_periodic_log_cmn_t;
+
+typedef struct phy_periodic_log_cmn_v2 {
+ uint16 chanspec; /* Current phy chanspec */
+ uint16 vbatmeas; /* Measured VBAT sense value */
+ uint16 featureflag; /* Currently active feature flags */
+ int8 chiptemp; /* Chip temparature */
+ int8 femtemp; /* Fem temparature */
+
+ uint32 nrate; /* Current Tx nrate */
+
+ uint8 cal_phase_id; /* Current Multi phase cal ID */
+ uint8 rxchain; /* Rx Chain */
+ uint8 txchain; /* Tx Chain */
+ uint8 ofdm_desense; /* OFDM desense */
+
+ uint8 bphy_desense; /* BPHY desense */
+ uint8 pll_lockstatus; /* PLL Lock status */
+
+ uint32 duration; /* millisecs spent sampling this channel */
+ uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
+ /* move if cur bss moves channels) */
+ uint32 congest_obss; /* traffic not in our bss */
+ uint32 interference; /* millisecs detecting a non 802.11 interferer. */
+
+ uint8 slice;
+ uint8 version; /* version of fw/ucode for debug purposes */
+ bool phycal_disable; /* Set if calibration is disabled */
+ uint8 pad;
+ uint16 phy_log_counter;
+ uint16 noise_mmt_overdue; /* Count up if ucode noise mmt is overdue for 5 sec */
+ uint16 chan_switch_tm; /* Channel switch time */
+
+ /* HP2P related params */
+ uint16 shm_mpif_cnt_val;
+ uint16 shm_thld_cnt_val;
+ uint16 shm_nav_cnt_val;
+ uint16 shm_cts_cnt_val;
+
+ uint16 shm_m_prewds_cnt; /* Count of pre-wds fired in the ucode */
+ uint32 last_cal_time; /* Last cal execution time */
+ uint16 deaf_count; /* Depth of stay_in_carrier_search function */
+ uint32 ed20_crs0; /* ED-CRS status on core 0 */
+ uint32 ed20_crs1; /* ED-CRS status on core 1 */
+ uint32 noise_cal_req_ts; /* Time-stamp when noise cal was requested */
+ uint32 noise_cal_intr_ts; /* Time-stamp when noise cal was completed */
+ uint32 phywdg_ts; /* Time-stamp when wd was fired */
+ uint32 phywd_dur; /* Duration of the watchdog */
+ uint32 noise_mmt_abort_crs; /* Count of CRS during noise mmt */
+ uint32 chanspec_set_ts; /* Time-stamp when chanspec was set */
+ uint32 vcopll_failure_cnt; /* Number of VCO cal failures
+ * (including failures detected in ucode).
+ */
+ uint32 dcc_fail_counter; /* Number of DC cal failures */
+ uint32 log_ts; /* Time-stamp when this log was collected */
+
+ uint16 btcxovrd_dur; /* Cumulative btcx overide between WDGs */
+ uint16 btcxovrd_err_cnt; /* BTCX override flagged errors */
+
+ uint16 femtemp_read_fail_counter; /* Fem temparature read fail counter */
+ /* Misc general purpose debug counters (will be used for future debugging) */
+ uint16 debug_01;
+ uint16 debug_02;
+} phy_periodic_log_cmn_v2_t;
+
+typedef struct phy_periodic_log_cmn_v3 {
+ uint32 nrate; /* Current Tx nrate */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+ uint32 noise_cfg_exit1;
+ uint32 noise_cfg_exit2;
+ uint32 noise_cfg_exit3;
+ uint32 noise_cfg_exit4;
+ uint32 ed20_crs0;
+ uint32 ed20_crs1;
+ uint32 noise_cal_req_ts;
+ uint32 noise_cal_crs_ts;
+ uint32 log_ts;
+ uint32 last_cal_time;
+ uint32 phywdg_ts;
+ uint32 chanspec_set_ts;
+ uint32 noise_zero_inucode;
+ uint32 phy_crs_during_noisemmt;
+ uint32 wd_dur;
+
+ int32 deaf_count;
+
+ uint16 chanspec; /* Current phy chanspec */
+ uint16 vbatmeas; /* Measured VBAT sense value */
+ uint16 featureflag; /* Currently active feature flags */
+ uint16 nav_cntr_l;
+ uint16 nav_cntr_h;
+ uint16 chanspec_set_last;
+ uint16 ucode_noise_fb_overdue;
+ uint16 phy_log_counter;
+ uint16 shm_mpif_cnt_val;
+ uint16 shm_thld_cnt_val;
+ uint16 shm_nav_cnt_val;
+ uint16 shm_dc_cnt_val;
+ uint16 shm_txff_cnt_val;
+ uint16 shm_cts_cnt_val;
+ uint16 shm_m_prewds_cnt;
+
+ uint8 cal_phase_id; /* Current Multi phase cal ID */
+ uint8 rxchain; /* Rx Chain */
+ uint8 txchain; /* Tx Chain */
+ uint8 ofdm_desense; /* OFDM desense */
+ uint8 bphy_desense; /* BPHY desense */
+ uint8 pll_lockstatus; /* PLL Lock status */
+ int8 chiptemp; /* Chip temparature */
+ int8 femtemp; /* Fem temparature */
+
+ bool phycal_disable;
+ uint8 pad; /* Padding byte to align with word */
+} phy_periodic_log_cmn_v3_t;
+
+typedef struct phy_periodic_log_cmn_v4 {
+ uint16 chanspec; /* Current phy chanspec */
+ uint16 vbatmeas; /* Measured VBAT sense value */
+
+ uint16 featureflag; /* Currently active feature flags */
+ int8 chiptemp; /* Chip temparature */
+ int8 femtemp; /* Fem temparature */
+
+ uint32 nrate; /* Current Tx nrate */
+
+ uint8 cal_phase_id; /* Current Multi phase cal ID */
+ uint8 rxchain; /* Rx Chain */
+ uint8 txchain; /* Tx Chain */
+ uint8 ofdm_desense; /* OFDM desense */
+
+ uint8 slice;
+ uint8 dbgfw_ver; /* version of fw/ucode for debug purposes */
+ uint8 bphy_desense; /* BPHY desense */
+ uint8 pll_lockstatus; /* PLL Lock status */
+
+ uint32 duration; /* millisecs spent sampling this channel */
+ uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
+ /* move if cur bss moves channels) */
+ uint32 congest_obss; /* traffic not in our bss */
+ uint32 interference; /* millisecs detecting a non 802.11 interferer. */
+
+ /* HP2P related params */
+ uint16 shm_mpif_cnt_val;
+ uint16 shm_thld_cnt_val;
+ uint16 shm_nav_cnt_val;
+ uint16 shm_cts_cnt_val;
+
+ uint16 shm_m_prewds_cnt; /* Count of pre-wds fired in the ucode */
+ uint16 deaf_count; /* Depth of stay_in_carrier_search function */
+ uint32 last_cal_time; /* Last cal execution time */
+ uint32 ed20_crs0; /* ED-CRS status on core 0 */
+ uint32 ed20_crs1; /* ED-CRS status on core 1 */
+ uint32 noise_cal_req_ts; /* Time-stamp when noise cal was requested */
+ uint32 noise_cal_intr_ts; /* Time-stamp when noise cal was completed */
+ uint32 phywdg_ts; /* Time-stamp when wd was fired */
+ uint32 phywd_dur; /* Duration of the watchdog */
+ uint32 noise_mmt_abort_crs; /* Count of CRS during noise mmt */
+ uint32 chanspec_set_ts; /* Time-stamp when chanspec was set */
+ uint32 vcopll_failure_cnt; /* Number of VCO cal failures
+ * (including failures detected in ucode).
+ */
+ uint16 dcc_attempt_counter; /* Number of DC cal attempts */
+ uint16 dcc_fail_counter; /* Number of DC cal failures */
+ uint32 log_ts; /* Time-stamp when this log was collected */
+
+ uint16 btcxovrd_dur; /* Cumulative btcx overide between WDGs */
+ uint16 btcxovrd_err_cnt; /* BTCX override flagged errors */
+
+ uint16 femtemp_read_fail_counter; /* Fem temparature read fail counter */
+ uint16 phy_log_counter;
+ uint16 noise_mmt_overdue; /* Count up if ucode noise mmt is overdue for 5 sec */
+ uint16 chan_switch_tm; /* Channel switch time */
+
+ bool phycal_disable; /* Set if calibration is disabled */
+
+ /* dccal dcoe & idacc */
+ uint8 dcc_err; /* dccal health check error status */
+ uint8 dcoe_num_tries; /* number of retries on dcoe cal */
+ uint8 idacc_num_tries; /* number of retries on idac cal */
+
+ uint8 dccal_phyrxchain; /* phy rxchain during dc calibration */
+ uint8 dccal_type; /* DC cal type: single/multi phase, chan change, etc. */
+ uint16 dcc_hcfail; /* dcc health check failure count */
+ uint16 dcc_calfail; /* dcc failure count */
+
+ /* Misc general purpose debug counters (will be used for future debugging) */
+ uint16 debug_01;
+ uint16 debug_02;
+ uint16 debug_03;
+ uint16 debug_04;
+ uint16 debug_05;
+} phy_periodic_log_cmn_v4_t;
+
+typedef struct phy_periodic_log_core {
+ uint8 baseindxval; /* TPC Base index */
+ int8 tgt_pwr; /* Programmed Target power */
+ int8 estpwradj; /* Current Est Power Adjust value */
+ int8 crsmin_pwr; /* CRS Min/Noise power */
+ int8 rssi_per_ant; /* RSSI Per antenna */
+ int8 snr_per_ant; /* SNR Per antenna */
+ int8 pad1; /* Padding byte to align with word */
+ int8 pad2; /* Padding byte to align with word */
+} phy_periodic_log_core_t;
+
+typedef struct phy_periodic_log_core_v3 {
+ uint8 baseindxval; /* TPC Base index */
+ int8 tgt_pwr; /* Programmed Target power */
+ int8 estpwradj; /* Current Est Power Adjust value */
+ int8 crsmin_pwr; /* CRS Min/Noise power */
+ int8 rssi_per_ant; /* RSSI Per antenna */
+ int8 snr_per_ant; /* SNR Per antenna */
+
+ /* dccal dcoe & idacc */
+ uint16 dcoe_done_0; /* dccal control register 44 */
+ uint16 dcoe_done_1; /* dccal control register 45 */
+ uint16 dcoe_done_2; /* dccal control register 46 */
+ uint16 idacc_done_0; /* dccal control register 21 */
+ uint16 idacc_done_1; /* dccal control register 60 */
+ uint16 idacc_done_2; /* dccal control register 61 */
+ int16 psb; /* psb read during dccal health check */
+ uint8 pktproc; /* pktproc read during dccal health check */
+
+ int8 pad1; /* Padding byte to align with word */
+ int8 pad2; /* Padding byte to align with word */
+ int8 pad3; /* Padding byte to align with word */
+} phy_periodic_log_core_v3_t;
+
+typedef struct phy_periodic_log_core_v2 {
+ int32 rxs; /* FDIQ Slope coeffecient */
+
+ uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
+ uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
+ uint16 ofdm_txd; /* contain di & dq */
+ uint16 rxa; /* Rx IQ Cal A coeffecient */
+ uint16 rxb; /* Rx IQ Cal B coeffecient */
+ uint16 baseidx; /* TPC Base index */
+
+ uint8 baseindxval; /* TPC Base index */
+
+ int8 tgt_pwr; /* Programmed Target power */
+ int8 estpwradj; /* Current Est Power Adjust value */
+ int8 crsmin_pwr; /* CRS Min/Noise power */
+ int8 rssi_per_ant; /* RSSI Per antenna */
+ int8 snr_per_ant; /* SNR Per antenna */
+ int8 pad1; /* Padding byte to align with word */
+ int8 pad2; /* Padding byte to align with word */
+} phy_periodic_log_core_v2_t;
+
+#define PHY_PERIODIC_LOG_VER1 (1u)
+
+typedef struct phy_periodic_log_v1 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Number of cores for which core specific data present */
+ uint16 length; /* Length of the entire structure */
+ phy_periodic_log_cmn_t phy_perilog_cmn;
+ phy_periodic_counters_v1_t counters_peri_log;
+ /* This will be a variable length based on the numcores field defined above */
+ phy_periodic_log_core_t phy_perilog_core[1];
+} phy_periodic_log_v1_t;
+
+#define PHYCAL_LOG_VER3 (3u)
+#define PHY_PERIODIC_LOG_VER3 (3u)
+
+/* 4387 onwards */
+typedef struct phy_periodic_log_v3 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Number of cores for which core specific data present */
+ uint16 length; /* Length of the structure */
+
+ /* Logs general PHY parameters */
+ phy_periodic_log_cmn_v2_t phy_perilog_cmn;
+
+ /* Logs ucode counters and NAVs */
+ phy_periodic_counters_v3_t counters_peri_log;
+
+ /* Logs data pertaining to each core */
+ phy_periodic_log_core_t phy_perilog_core[1];
+} phy_periodic_log_v3_t;
+
+#define PHY_PERIODIC_LOG_VER5 (5u)
+
+typedef struct phy_periodic_log_v5 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Number of cores for which core specific data present */
+ uint16 length; /* Length of the structure */
+
+ /* Logs general PHY parameters */
+ phy_periodic_log_cmn_v4_t phy_perilog_cmn;
+
+ /* Logs ucode counters and NAVs */
+ phy_periodic_counters_v3_t counters_peri_log;
+
+ /* Logs data pertaining to each core */
+ phy_periodic_log_core_v3_t phy_perilog_core[1];
+} phy_periodic_log_v5_t;
+
+typedef struct phycal_log_v3 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Number of cores for which core specific data present */
+ uint16 length; /* Length of the entire structure */
+ phycal_log_cmn_v2_t phycal_log_cmn; /* Logging common structure */
+ /* This will be a variable length based on the numcores field defined above */
+ phycal_log_core_v3_t phycal_log_core[1];
+} phycal_log_v3_t;
+
+/* Note: The version 2 is reserved for 4357 only. Future chips must not use this version. */
+
+#define MAX_CORE_4357 (2u)
+#define PHYCAL_LOG_VER2 (2u)
+#define PHY_PERIODIC_LOG_VER2 (2u)
+
+typedef struct {
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
+ uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+} phy_periodic_counters_v2_t;
+
+/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
+
+typedef struct phycal_log_core_v2 {
+ uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
+ uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
+ uint16 ofdm_txd; /* contain di & dq */
+ uint16 rxa; /* Rx IQ Cal A coeffecient */
+ uint16 rxb; /* Rx IQ Cal B coeffecient */
+ uint8 baseidx; /* TPC Base index */
+ uint8 pad;
+ int32 rxs; /* FDIQ Slope coeffecient */
+} phycal_log_core_v2_t;
+
+/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
+
+typedef struct phycal_log_v2 {
+ uint8 version; /* Logging structure version */
+ uint16 length; /* Length of the entire structure */
+ uint8 pad;
+ phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */
+ phycal_log_core_v2_t phycal_log_core[MAX_CORE_4357];
+} phycal_log_v2_t;
+
+/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
+
+typedef struct phy_periodic_log_v2 {
+ uint8 version; /* Logging structure version */
+ uint16 length; /* Length of the entire structure */
+ uint8 pad;
+ phy_periodic_log_cmn_t phy_perilog_cmn;
+ phy_periodic_counters_v2_t counters_peri_log;
+ phy_periodic_log_core_t phy_perilog_core[MAX_CORE_4357];
+} phy_periodic_log_v2_t;
+
+#define PHY_PERIODIC_LOG_VER4 (4u)
+
+/*
+ * Note: The version 4 is reserved for 4357 Deafness Debug only.
+ * All future chips must not use this version.
+ */
+typedef struct phy_periodic_log_v4 {
+ uint8 version; /* Logging structure version */
+ uint8 pad;
+ uint16 length; /* Length of the entire structure */
+ phy_periodic_log_cmn_v3_t phy_perilog_cmn;
+ phy_periodic_counters_v4_t counters_peri_log;
+ phy_periodic_log_core_v2_t phy_perilog_core[MAX_CORE_4357];
+} phy_periodic_log_v4_t;
+
+/* Event log payload for enhanced roam log */
+typedef enum {
+ ROAM_LOG_SCANSTART = 1, /* EVT log for roam scan start */
+ ROAM_LOG_SCAN_CMPLT = 2, /* EVT log for roam scan completeted */
+ ROAM_LOG_ROAM_CMPLT = 3, /* EVT log for roam done */
+ ROAM_LOG_NBR_REQ = 4, /* EVT log for Neighbor REQ */
+ ROAM_LOG_NBR_REP = 5, /* EVT log for Neighbor REP */
+ ROAM_LOG_BCN_REQ = 6, /* EVT log for BCNRPT REQ */
+ ROAM_LOG_BCN_REP = 7, /* EVT log for BCNRPT REP */
+ ROAM_LOG_BTM_REP = 8, /* EVT log for BTM REP */
+ ROAM_LOG_WIPS_EVENT = 9, /* EVT log for WIPS Event */
+ PRSV_PERIODIC_ID_MAX
+} prsv_periodic_id_enum_t;
+
+typedef struct prsv_periodic_log_hdr {
+ uint8 version;
+ uint8 id;
+ uint16 length;
+} prsv_periodic_log_hdr_t;
+
+#define ROAM_LOG_VER_1 (1u)
+#define ROAM_LOG_VER_2 (2u)
+#define ROAM_LOG_VER_3 (3u)
+#define ROAM_SSID_LEN (32u)
+typedef struct roam_log_trig_v1 {
+ prsv_periodic_log_hdr_t hdr;
+ int8 rssi;
+ uint8 current_cu;
+ uint8 pad[2];
+ uint reason;
+ int result;
+ union {
+ struct {
+ uint rcvd_reason;
+ } prt_roam;
+ struct {
+ uint8 req_mode;
+ uint8 token;
+ uint16 nbrlist_size;
+ uint32 disassoc_dur;
+ uint32 validity_dur;
+ uint32 bss_term_dur;
+ } bss_trans;
+ };
+} roam_log_trig_v1_t;
+
+typedef struct roam_log_trig_v2 {
+ prsv_periodic_log_hdr_t hdr;
+ int8 rssi;
+ uint8 current_cu;
+ uint8 full_scan;
+ uint8 pad;
+ uint reason;
+ int result;
+ union {
+ struct {
+ uint rcvd_reason;
+ } prt_roam;
+ struct {
+ uint8 req_mode;
+ uint8 token;
+ uint16 nbrlist_size;
+ uint32 disassoc_dur;
+ uint32 validity_dur;
+ uint32 bss_term_dur;
+ } bss_trans;
+ struct {
+ int rssi_threshold;
+ } low_rssi;
+ };
+} roam_log_trig_v2_t;
+
+#define ROAM_LOG_RPT_SCAN_LIST_SIZE 3
+#define ROAM_LOG_INVALID_TPUT 0xFFFFFFFFu
+typedef struct roam_scan_ap_info {
+ int8 rssi;
+ uint8 cu;
+ uint8 pad[2];
+ uint32 score;
+ uint16 chanspec;
+ struct ether_addr addr;
+ uint32 estm_tput;
+} roam_scan_ap_info_t;
+
+typedef struct roam_log_scan_cmplt_v1 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 full_scan;
+ uint8 scan_count;
+ uint8 scan_list_size;
+ uint8 pad;
+ int32 score_delta;
+ roam_scan_ap_info_t cur_info;
+ roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE];
+} roam_log_scan_cmplt_v1_t;
+
+#define ROAM_CHN_UNI_2A 36u
+#define ROAM_CHN_UNI_2A_MAX 64u
+#define ROAM_CHN_UNI_2C 100u
+#define ROAM_CHN_UNI_2C_MAX 144u
+#define ROAM_CHN_UNI_3 149u
+#define ROAM_CHN_UNI_3_MAX 165u
+#define ROAM_CHN_SPACE 2u /* channel index space for 5G */
+
+typedef struct roam_log_scan_cmplt_v2 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 scan_count;
+ uint8 scan_list_size;
+ uint8 chan_num;
+ uint8 pad;
+ uint16 band2g_chan_list;
+ uint16 uni2a_chan_list;
+ uint8 uni2c_chan_list[3];
+ uint8 uni3_chan_list;
+ int32 score_delta;
+ roam_scan_ap_info_t cur_info;
+ roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE];
+} roam_log_scan_cmplt_v2_t;
+
+typedef struct roam_log_cmplt_v1 {
+ prsv_periodic_log_hdr_t hdr;
+ uint status; /* status code WLC_E STATUS */
+ uint reason; /* roam trigger reason */
+ uint16 chanspec; /* new bssid chansepc */
+ struct ether_addr addr; /* ether addr */
+ uint8 pad[3];
+ uint8 retry;
+} roam_log_cmplt_v1_t;
+
+typedef roam_log_cmplt_v1_t roam_log_cmplt_v2_t;
+
+typedef struct roam_log_nbrrep {
+ prsv_periodic_log_hdr_t hdr;
+ uint channel_num;
+} roam_log_nbrrep_v1_t;
+
+typedef struct roam_log_nbrrep_v2 {
+ prsv_periodic_log_hdr_t hdr;
+ uint channel_num;
+ uint16 band2g_chan_list; /* channel bit map */
+ uint16 uni2a_chan_list;
+ uint8 uni2c_chan_list[3];
+ uint8 uni3_chan_list;
+} roam_log_nbrrep_v2_t;
+
+typedef struct roam_log_nbrreq {
+ prsv_periodic_log_hdr_t hdr;
+ uint token;
+} roam_log_nbrreq_v1_t;
+
+typedef roam_log_nbrreq_v1_t roam_log_nbrreq_v2_t;
+
+typedef struct roam_log_bcnrptreq {
+ prsv_periodic_log_hdr_t hdr;
+ int32 result;
+ uint8 reg; /* operating class */
+ uint8 channel; /* number of requesting channel */
+ uint8 mode; /* request mode d11 rmreq bcn */
+ uint8 bssid_wild; /* is wild bssid */
+ uint8 ssid_len; /* length of SSID */
+ uint8 pad;
+ uint16 duration; /* duration */
+ uint8 ssid[ROAM_SSID_LEN];
+} roam_log_bcnrpt_req_v1_t;
+
+typedef roam_log_bcnrpt_req_v1_t roam_log_bcnrpt_req_v2_t;
+
+typedef struct roam_log_bcnrptrep {
+ prsv_periodic_log_hdr_t hdr;
+ uint32 count;
+} roam_log_bcnrpt_rep_v1_t;
+
+typedef struct roam_log_bcnrptrep_v2 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 scan_inprogress; /* if scan in progress TRUE */
+ uint8 reason; /* report mode d11 RMREP mode */
+ uint32 count;
+} roam_log_bcnrpt_rep_v2_t;
+
+typedef struct roam_log_btmrep_v2 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 req_mode; /* d11 BSSTRANS req mode */
+ uint8 status; /* d11 BSSTRANS response status code */
+ uint16 pad[2];
+ int result;
+} roam_log_btm_rep_v2_t;
+
+/* ROAM_LOG_VER_3 specific structures */
+typedef struct roam_log_btmrep_v3 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 req_mode; /* d11 BSSTRANS req mode */
+ uint8 status; /* d11 BSSTRANS response status code */
+ uint16 pad[2];
+ struct ether_addr target_addr; /* bssid to move */
+ int result;
+} roam_log_btm_rep_v3_t;
+
+typedef struct roam_log_bcnrptreq_v3 {
+ prsv_periodic_log_hdr_t hdr;
+ int32 result;
+ uint8 reg; /* operating class */
+ uint8 channel; /* number of requesting channel */
+ uint8 mode; /* request mode d11 rmreq bcn */
+ uint8 bssid_wild; /* is wild bssid */
+ uint8 ssid_len; /* length of SSID */
+ uint8 pad;
+ uint16 duration; /* duration */
+ uint8 ssid[ROAM_SSID_LEN];
+ uint channel_num; /* number of scan channel */
+ uint16 band2g_chan_list; /* channel bit map */
+ uint16 uni2a_chan_list;
+ uint8 uni2c_chan_list[3];
+ uint8 uni3_chan_list;
+} roam_log_bcnrpt_req_v3_t;
+
+#define BCNRPT_RSN_SUCCESS 0
+#define BCNRPT_RSN_BADARG 1
+#define BCNRPT_RSN_SCAN_ING 2
+#define BCNRPT_RSN_SCAN_FAIL 3
+
+typedef struct roam_log_bcnrptrep_v3 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 scan_status; /* scan status */
+ uint8 reason; /* report mode d11 RMREP mode */
+ uint16 reason_detail;
+ uint32 count;
+ uint16 duration; /* duration */
+ uint16 pad;
+} roam_log_bcnrpt_rep_v3_t;
+
+typedef struct roam_log_wips_evt_v3 {
+ prsv_periodic_log_hdr_t hdr;
+ uint32 timestamp;
+ struct ether_addr bssid; /* ether addr */
+ uint16 misdeauth;
+ int16 current_rssi;
+ int16 deauth_rssi;
+} roam_log_wips_evt_v3_t;
+
+#define EVENT_LOG_BUFFER_ID_PMK 0
+#define EVENT_LOG_BUFFER_ID_ANONCE 1
+#define EVENT_LOG_BUFFER_ID_SNONCE 2
+#define EVENT_LOG_BUFFER_ID_WPA_M3_KEYDATA 3
+#define EVENT_LOG_BUFFER_ID_WPA_CACHED_KEYDATA 4
+
+typedef struct event_log_buffer {
+ uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_BUF */
+ uint16 len; /* XTLV Len */
+ uint16 buf_id; /* One of the above EVENT_LOG_BUFFER_ID_XXXs */
+ uint16 pad; /* for 4-byte start alignment of data */
+ uint8 data[]; /* the payload of interest */
+} event_log_buffer_t;
+
+#define XTLV_EVENT_LOG_BUFFER_LEN (OFFSETOF(event_log_buffer_t, data))
+#define XTLV_EVENT_LOG_BUFFER_FULL_LEN(buf_len) ALIGN_SIZE((XTLV_EVENT_LOG_BUFFER_LEN + \
+ (buf_len) * sizeof(uint8)), sizeof(uint32))
+
+/* Structures for parsing FSM log data
+ * Only used by host to parse data coming in FSM log set
+ * Following log tags use this structured data:
+ * EVENT_LOG_TAG_ASSOC_SM
+ * EVENT_LOG_TAG_SUP_SM
+ * EVENT_LOG_TAG_AUTH_SM
+ * EVENT_LOG_TAG_SAE_SM
+ * EVENT_LOG_TAG_FTM_SM
+ * EVENT_LOG_TAG_NAN_SM
+ * More state machine log tags may also use this format
+ */
+
+/* Generic FSM structure for logging. Must be wrapped into a proper structure. The wrapper
+ * structure can add more information but this needs to be one of the members of the wrapper
+ * structure.
+ */
+typedef struct event_log_generic_fsm_struct {
+ uint32 old_state;
+ uint32 new_state;
+ uint32 reason;
+ uint32 caller;
+} event_log_generic_fsm_struct_t;
+
+typedef struct event_log_wl_fsm_struct {
+ uint32 unit;
+ uint32 bsscfg_idx;
+ event_log_generic_fsm_struct_t generic_fsm;
+ uint32 data[]; /* Any other information relevant to this state transition */
+} event_log_wl_fsm_struct_t;
+
+/* To be used by DVFS event log FSM logging */
+typedef struct event_log_rte_dvfs_fsm_struct {
+ event_log_generic_fsm_struct_t generic_fsm;
+ uint32 data[]; /* Any other information relevant to this state transition */
+} event_log_rte_dvfs_fsm_struct_t;
+
+#endif /* _EVENT_LOG_PAYLOAD_H_ */
diff --git a/bcmdhd.101.10.361.x/include/event_log_set.h b/bcmdhd.101.10.361.x/include/event_log_set.h
new file mode 100755
index 0000000..5e098d8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/event_log_set.h
@@ -0,0 +1,142 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _EVENT_LOG_SET_H_
+#define _EVENT_LOG_SET_H_
+
+/* Set assignments */
+#define EVENT_LOG_SET_BUS (0u)
+#define EVENT_LOG_SET_WL (1u)
+#define EVENT_LOG_SET_PSM (2u)
+#define EVENT_LOG_SET_ERROR (3u)
+
+/* MSCH logging */
+#define EVENT_LOG_SET_MSCH_PROFILER (4u)
+
+/* A particular customer uses sets 5, 6, and 7. There is a request
+ * to not name these log sets as that could limit their ability to
+ * use different log sets in future.
+ * Sets 5, 6, and 7 are instantiated by host
+ * In such case, ecounters could be mapped to any set that host
+ * configures. They may or may not use set 5.
+ */
+#define EVENT_LOG_SET_5 (5u)
+#define EVENT_LOG_SET_ECOUNTERS (EVENT_LOG_SET_5)
+#define EVENT_LOG_SET_6 (6u)
+#define EVENT_LOG_SET_7 (7u)
+
+/* Temporary change to satisfy compilation across branches
+ * Will be removed after checkin
+ */
+#define EVENT_LOG_SET_8 (8u)
+#define EVENT_LOG_SET_PRSRV (EVENT_LOG_SET_8)
+
+#define EVENT_LOG_SET_9 (9u)
+/* General purpose preserve chatty.
+ * EVENT_LOG_SET_PRSRV_CHATTY log set should not be used by FW as it is
+ * used by customer host. FW should use EVENT_LOG_SET_GP_PRSRV_CHATTY
+ * for general purpose preserve chatty logs.
+ */
+#define EVENT_LOG_SET_GP_PRSRV_CHATTY (EVENT_LOG_SET_9)
+#define EVENT_LOG_SET_PRSRV_CHATTY (EVENT_LOG_SET_6)
+
+/* BUS preserve */
+#define EVENT_LOG_SET_PRSRV_BUS (10u)
+
+/* WL preserve */
+#define EVENT_LOG_SET_PRSRV_WL (11u)
+
+/* Slotted BSS set */
+#define EVENT_LOG_SET_WL_SLOTTED_BSS (12u)
+
+/* PHY entity logging */
+#define EVENT_LOG_SET_PHY (13u)
+
+/* PHY preserve */
+#define EVENT_LOG_SET_PRSRV_PHY (14u)
+
+/* RTE entity */
+#define EVENT_LOG_SET_RTE (15u)
+
+/* Malloc and free logging */
+#define EVENT_LOG_SET_MEM_API (16u)
+
+/* Console buffer */
+#define EVENT_LOG_SET_RTE_CONS_BUF (17u)
+
+/* three log sets for general debug purposes */
+#define EVENT_LOG_SET_GENERAL_DBG_1 (18u)
+#define EVENT_LOG_SET_GENERAL_DBG_2 (19u)
+#define EVENT_LOG_SET_GENERAL_DBG_3 (20u)
+
+/* Log sets for capturing power related logs. Note that these sets
+ * are to be used across entire system and not just WL.
+ */
+#define EVENT_LOG_SET_POWER_1 (21u)
+#define EVENT_LOG_SET_POWER_2 (22u)
+
+/* Used for timestamp plotting, TS_LOG() */
+#define EVENT_LOG_SET_TS_LOG (23u)
+
+/* BUS preserve chatty */
+#define EVENT_LOG_SET_PRSRV_BUS_CHATTY (24u)
+
+/* PRESERVE_PREIODIC_LOG_SET */
+/* flush if host is in D0 at every period */
+#define EVENT_LOG_SET_PRSV_PERIODIC (25u)
+
+/* AMT logging and other related information */
+#define EVENT_LOG_SET_AMT (26u)
+
+/* State machine logging. Part of preserve logs */
+#define EVENT_LOG_SET_FSM (27u)
+
+/* wbus related logging */
+#define EVENT_LOG_SET_WBUS (28u)
+
+/* bcm trace logging */
+#define EVENT_LOG_SET_BCM_TRACE (29u)
+
+/* For PM alert related logging */
+#define EVENT_LOG_SET_WL_PS_LOG (30u)
+
+#ifndef NUM_EVENT_LOG_SETS
+/* Set a maximum number of sets here. It is not dynamic for
+ * efficiency of the EVENT_LOG calls. Old branches could define
+ * this to an appropriat enumber in their makefiles to reduce
+ * ROM invalidation
+ */
+#ifdef NUM_EVENT_LOG_SETS_V2
+/* for v2, everything has became unsigned */
+#define NUM_EVENT_LOG_SETS (31u)
+#else /* NUM_EVENT_LOG_SETS_V2 */
+#define NUM_EVENT_LOG_SETS (31)
+#endif /* NUM_EVENT_LOG_SETS_V2 */
+#endif /* NUM_EVENT_LOG_SETS */
+
+/* send delayed logs when >= 50% of buffer is full */
+#ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE
+#define ECOUNTERS_DELAYED_FLUSH_PERCENTAGE (50)
+#endif
+
+#endif /* _EVENT_LOG_SET_H_ */
diff --git a/bcmdhd.101.10.361.x/include/event_log_tag.h b/bcmdhd.101.10.361.x/include/event_log_tag.h
new file mode 100755
index 0000000..54d93c6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/event_log_tag.h
@@ -0,0 +1,617 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _EVENT_LOG_TAG_H_
+#define _EVENT_LOG_TAG_H_
+
+#include <typedefs.h>
+
+/* Define new event log tags here */
+#define EVENT_LOG_TAG_NULL 0 /* Special null tag */
+#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */
+
+/* HSIC Legacy support */
+/* Possible candidates for reuse */
+#define EVENT_LOG_TAG_BUS_OOB 2
+#define EVENT_LOG_TAG_BUS_STATE 3
+#define EVENT_LOG_TAG_BUS_PROTO 4
+#define EVENT_LOG_TAG_BUS_CTL 5
+#define EVENT_LOG_TAG_BUS_EVENT 6
+#define EVENT_LOG_TAG_BUS_PKT 7
+#define EVENT_LOG_TAG_BUS_FRAME 8
+#define EVENT_LOG_TAG_BUS_DESC 9
+#define EVENT_LOG_TAG_BUS_SETUP 10
+#define EVENT_LOG_TAG_BUS_MISC 11
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_AWDL_ERR 12
+#define EVENT_LOG_TAG_AWDL_WARN 13
+#define EVENT_LOG_TAG_AWDL_INFO 14
+#define EVENT_LOG_TAG_AWDL_DEBUG 15
+#define EVENT_LOG_TAG_AWDL_TRACE_TIMER 16
+#define EVENT_LOG_TAG_AWDL_TRACE_SYNC 17
+#define EVENT_LOG_TAG_AWDL_TRACE_CHAN 18
+#define EVENT_LOG_TAG_AWDL_TRACE_DP 19
+#define EVENT_LOG_TAG_AWDL_TRACE_MISC 20
+#define EVENT_LOG_TAG_AWDL_TEST 21
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_SRSCAN 22
+#define EVENT_LOG_TAG_PWRSTATS_INFO 23
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_AWDL_TRACE_CHANSW 24
+#define EVENT_LOG_TAG_AWDL_TRACE_PEER_OPENCLOSE 25
+#endif /* WLAWDL */
+
+/* Timestamp logging for plotting. */
+#define EVENT_LOG_TAG_TSLOG 26
+
+/* Possible candidates for reuse */
+#define EVENT_LOG_TAG_UCODE_FIFO 27
+
+#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28
+#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29
+#define EVENT_LOG_TAG_SCAN_ERROR 30
+#define EVENT_LOG_TAG_SCAN_WARN 31
+#define EVENT_LOG_TAG_MPF_ERR 32
+#define EVENT_LOG_TAG_MPF_WARN 33
+#define EVENT_LOG_TAG_MPF_INFO 34
+#define EVENT_LOG_TAG_MPF_DEBUG 35
+#define EVENT_LOG_TAG_EVENT_INFO 36
+#define EVENT_LOG_TAG_EVENT_ERR 37
+#define EVENT_LOG_TAG_PWRSTATS_ERROR 38
+#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39
+#define EVENT_LOG_TAG_IOCTL_LOG 40
+#define EVENT_LOG_TAG_PFN_ERR 41
+#define EVENT_LOG_TAG_PFN_WARN 42
+#define EVENT_LOG_TAG_PFN_INFO 43
+#define EVENT_LOG_TAG_PFN_DEBUG 44
+#define EVENT_LOG_TAG_BEACON_LOG 45
+#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46
+#define EVENT_LOG_TAG_TRACE_CHANSW 47
+#define EVENT_LOG_TAG_PCI_ERROR 48
+#define EVENT_LOG_TAG_PCI_TRACE 49
+#define EVENT_LOG_TAG_PCI_WARN 50
+#define EVENT_LOG_TAG_PCI_INFO 51
+#define EVENT_LOG_TAG_PCI_DBG 52
+#define EVENT_LOG_TAG_PCI_DATA 53
+#define EVENT_LOG_TAG_PCI_RING 54
+
+#ifdef WLAWDL
+/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA
+ * keeping it here to avoid compilation error on trunk
+ */
+#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_RANGING_TRACE 55
+#define EVENT_LOG_TAG_WL_ERROR 56
+#define EVENT_LOG_TAG_PHY_ERROR 57
+#define EVENT_LOG_TAG_OTP_ERROR 58
+#define EVENT_LOG_TAG_NOTIF_ERROR 59
+#define EVENT_LOG_TAG_MPOOL_ERROR 60
+#define EVENT_LOG_TAG_OBJR_ERROR 61
+#define EVENT_LOG_TAG_DMA_ERROR 62
+#define EVENT_LOG_TAG_PMU_ERROR 63
+#define EVENT_LOG_TAG_BSROM_ERROR 64
+#define EVENT_LOG_TAG_SI_ERROR 65
+#define EVENT_LOG_TAG_ROM_PRINTF 66
+#define EVENT_LOG_TAG_RATE_CNT 67
+#define EVENT_LOG_TAG_CTL_MGT_CNT 68
+#define EVENT_LOG_TAG_AMPDU_DUMP 69
+#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70
+#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71
+#define EVENT_LOG_TAG_MEM_FREE 72
+#define EVENT_LOG_TAG_WL_ASSOC_LOG 73
+#define EVENT_LOG_TAG_WL_PS_LOG 74
+#define EVENT_LOG_TAG_WL_ROAM_LOG 75
+#define EVENT_LOG_TAG_WL_MPC_LOG 76
+#define EVENT_LOG_TAG_WL_WSEC_LOG 77
+#define EVENT_LOG_TAG_WL_WSEC_DUMP 78
+#define EVENT_LOG_TAG_WL_MCNX_LOG 79
+#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80
+#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81
+#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82
+#define EVENT_LOG_TAG_WL_COUNTERS 83
+#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84
+#define EVENT_LOG_TAG_WL_P2P_LOG 85
+#define EVENT_LOG_TAG_SDIO_ERROR 86
+#define EVENT_LOG_TAG_SDIO_TRACE 87
+#define EVENT_LOG_TAG_SDIO_DBG 88
+#define EVENT_LOG_TAG_SDIO_PRHDRS 89
+#define EVENT_LOG_TAG_SDIO_PRPKT 90
+#define EVENT_LOG_TAG_SDIO_INFORM 91
+#define EVENT_LOG_TAG_MIMO_PS_ERROR 92
+#define EVENT_LOG_TAG_MIMO_PS_TRACE 93
+#define EVENT_LOG_TAG_MIMO_PS_INFO 94
+#define EVENT_LOG_TAG_BTCX_STATS 95
+#define EVENT_LOG_TAG_LEAKY_AP_STATS 96
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_MIMO_PS_STATS 98
+#define EVENT_LOG_TAG_PWRSTATS_PHY 99
+#define EVENT_LOG_TAG_PWRSTATS_SCAN 100
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_PWRSTATS_AWDL 101
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102
+#define EVENT_LOG_TAG_LQM 103
+#define EVENT_LOG_TAG_TRACE_WL_INFO 104
+#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105
+#define EVENT_LOG_TAG_ECOUNTERS_TIME_DATA 106
+#define EVENT_LOG_TAG_NAN_ERROR 107
+#define EVENT_LOG_TAG_NAN_INFO 108
+#define EVENT_LOG_TAG_NAN_DBG 109
+#define EVENT_LOG_TAG_STF_ARBITRATOR_ERROR 110
+#define EVENT_LOG_TAG_STF_ARBITRATOR_TRACE 111
+#define EVENT_LOG_TAG_STF_ARBITRATOR_WARN 112
+#define EVENT_LOG_TAG_SCAN_SUMMARY 113
+#define EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT 114
+#define EVENT_LOG_TAG_OCL_INFO 115
+#define EVENT_LOG_TAG_RSDB_PMGR_DEBUG 116
+#define EVENT_LOG_TAG_RSDB_PMGR_ERR 117
+#define EVENT_LOG_TAG_NAT_ERR 118
+#define EVENT_LOG_TAG_NAT_WARN 119
+#define EVENT_LOG_TAG_NAT_INFO 120
+#define EVENT_LOG_TAG_NAT_DEBUG 121
+#define EVENT_LOG_TAG_STA_INFO 122
+#define EVENT_LOG_TAG_PROXD_ERROR 123
+#define EVENT_LOG_TAG_PROXD_TRACE 124
+#define EVENT_LOG_TAG_PROXD_INFO 125
+#define EVENT_LOG_TAG_IE_ERROR 126
+#define EVENT_LOG_TAG_ASSOC_ERROR 127
+#define EVENT_LOG_TAG_SCAN_ERR 128
+#define EVENT_LOG_TAG_AMSDU_ERROR 129
+#define EVENT_LOG_TAG_AMPDU_ERROR 130
+#define EVENT_LOG_TAG_KM_ERROR 131
+#define EVENT_LOG_TAG_DFS 132
+#define EVENT_LOG_TAG_REGULATORY 133
+#define EVENT_LOG_TAG_CSA 134
+#define EVENT_LOG_TAG_WNM_BSSTRANS_ERR 135
+#define EVENT_LOG_TAG_SUP_INFO 136
+#define EVENT_LOG_TAG_SUP_ERROR 137
+#define EVENT_LOG_TAG_CHANCTXT_TRACE 138
+#define EVENT_LOG_TAG_CHANCTXT_INFO 139
+#define EVENT_LOG_TAG_CHANCTXT_ERROR 140
+#define EVENT_LOG_TAG_CHANCTXT_WARN 141
+#define EVENT_LOG_TAG_MSCHPROFILE 142
+#define EVENT_LOG_TAG_4WAYHANDSHAKE 143
+#define EVENT_LOG_TAG_MSCHPROFILE_TLV 144
+#define EVENT_LOG_TAG_ADPS 145
+#define EVENT_LOG_TAG_MBO_DBG 146
+#define EVENT_LOG_TAG_MBO_INFO 147
+#define EVENT_LOG_TAG_MBO_ERR 148
+#define EVENT_LOG_TAG_TXDELAY 149
+#define EVENT_LOG_TAG_BCNTRIM_INFO 150
+#define EVENT_LOG_TAG_BCNTRIM_TRACE 151
+#define EVENT_LOG_TAG_OPS_INFO 152
+#define EVENT_LOG_TAG_STATS 153
+#define EVENT_LOG_TAG_BAM 154
+#define EVENT_LOG_TAG_TXFAIL 155
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156
+#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157
+#define EVENT_LOG_TAG_AWDL_PEER_DBG 158
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_RANDMAC_INFO 159
+#define EVENT_LOG_TAG_RANDMAC_DBG 160
+#define EVENT_LOG_TAG_RANDMAC_ERR 161
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_MSCH_CAL 163
+#define EVENT_LOG_TAG_MSCH_OPP_CAL 164
+#define EVENT_LOG_TAG_MSCH 165
+#define EVENT_LOG_TAG_NAN_SYNC 166
+#define EVENT_LOG_TAG_NAN_DPE 167
+#define EVENT_LOG_TAG_NAN_SCHED 168
+#define EVENT_LOG_TAG_NAN_RNG 169
+#define EVENT_LOG_TAG_NAN_DAM 170
+#define EVENT_LOG_TAG_NAN_NA 171
+#define EVENT_LOG_TAG_NAN_NDL 172
+#define EVENT_LOG_TAG_NAN_NDP 173
+#define EVENT_LOG_TAG_NAN_SEC 174
+#define EVENT_LOG_TAG_NAN_MAC 175
+#define EVENT_LOG_TAG_NAN_FSM 176
+
+#define EVENT_LOG_TAG_TPA_ERR 192
+#define EVENT_LOG_TAG_TPA_INFO 193
+#define EVENT_LOG_TAG_OCE_DBG 194
+#define EVENT_LOG_TAG_OCE_INFO 195
+#define EVENT_LOG_TAG_OCE_ERR 196
+#define EVENT_LOG_TAG_WL_WARN 197
+#define EVENT_LOG_TAG_SB_ERR 198
+#define EVENT_LOG_TAG_SB_INFO 199
+#define EVENT_LOG_TAG_SB_SCHED 200
+#define EVENT_LOG_TAG_ADPS_INFO 201
+#define EVENT_LOG_TAG_SB_CMN_SYNC_INFO 202
+#define EVENT_LOG_TAG_PHY_CAL_INFO 203 /* PHY CALs scheduler info */
+#define EVENT_LOG_TAG_EVT_NOTIF_INFO 204
+#define EVENT_LOG_TAG_PHY_HC_ERROR 205
+#define EVENT_LOG_TAG_PHY_TXPWR_WARN 206
+#define EVENT_LOG_TAG_PHY_TXPWR_INFO 207
+#define EVENT_LOG_TAG_PHY_ACI_INFO 208
+#define EVENT_LOG_TAG_WL_COUNTERS_AUX 209
+#define EVENT_LOG_TAG_AMPDU_DUMP_AUX 210
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_PWRSTATS_AWDL_AUX 211
+#endif /* WLAWDL */
+
+#define EVENT_LOG_TAG_PWRSTATS_PHY_AUX 212
+#define EVENT_LOG_TAG_PWRSTATS_SCAN_AUX 213
+#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2_AUX 214
+#define EVENT_LOG_TAG_SVT_TESTING 215 /* SVT testing/verification */
+#define EVENT_LOG_TAG_HND_SMD_ERROR 216
+#define EVENT_LOG_TAG_PSBW_INFO 217
+#define EVENT_LOG_TAG_PHY_CAL_DBG 218
+#define EVENT_LOG_TAG_FILS_DBG 219
+#define EVENT_LOG_TAG_FILS_INFO 220
+#define EVENT_LOG_TAG_FILS_ERROR 221
+#define EVENT_LOG_TAG_UNUSED1 222
+#define EVENT_LOG_TAG_UNUSED2 223
+#define EVENT_LOG_TAG_PPR_ERROR 224
+
+/* Arbitrator callback log tags */
+#define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224
+#define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225
+#define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226
+#define EVENT_LOG_TAG_RTE_ERROR 227
+#define EVENT_LOG_TAG_CPLT_ERROR 228
+#define EVENT_LOG_TAG_DNGL_ERROR 229
+#define EVENT_LOG_TAG_NVRAM_ERROR 230
+#define EVENT_LOG_TAG_NAC 231
+#define EVENT_LOG_TAG_HP2P_ERR 232
+#define EVENT_LOG_TAG_SB_SCHED_DBG_SYNC 233
+#define EVENT_LOG_TAG_ENHANCED_TS 234
+
+/* Available space for new tags for Dingo, Iguana and branches
+ * prior to Koala only. From Koala onwards, new tags must be greater
+ * than 255. If a tag is required for Koala and legacy productization branches,
+ * add that tag here. Tags > 255 will generate extended header. Legacy code
+ * does not understand extended header.
+ */
+
+/* Debug tags for making debug builds */
+#define EVENT_LOG_TAG_DBG1 251
+#define EVENT_LOG_TAG_DBG2 252
+#define EVENT_LOG_TAG_DBG3 253
+#define EVENT_LOG_TAG_DBG4 254
+#define EVENT_LOG_TAG_DBG5 255
+
+/* Insert new tags here for Koala onwards */
+
+/* NAN INFO/ERR evnt tags */
+#define EVENT_LOG_TAG_NAN_SYNC_INFO 256
+#define EVENT_LOG_TAG_NAN_DPE_INFO 257
+#define EVENT_LOG_TAG_NAN_SCHED_INFO 258
+#define EVENT_LOG_TAG_NAN_RNG_INFO 259
+#define EVENT_LOG_TAG_NAN_DAM_INFO 260
+#define EVENT_LOG_TAG_NAN_NA_INFO 261
+#define EVENT_LOG_TAG_NAN_NDL_INFO 262
+#define EVENT_LOG_TAG_NAN_NDP_INFO 263
+#define EVENT_LOG_TAG_NAN_SEC_INFO 264
+#define EVENT_LOG_TAG_NAN_MAC_INFO 265
+#define EVENT_LOG_TAG_NAN_FSM_INFO 266
+#define EVENT_LOG_TAG_NAN_PEER_INFO 267
+#define EVENT_LOG_TAG_NAN_AVAIL_INFO 268
+#define EVENT_LOG_TAG_NAN_CMN_INFO 269
+#define EVENT_LOG_TAG_NAN_SYNC_ERR 270
+#define EVENT_LOG_TAG_NAN_DPE_ERR 271
+#define EVENT_LOG_TAG_NAN_SCHED_ERR 272
+#define EVENT_LOG_TAG_NAN_RNG_ERR 273
+#define EVENT_LOG_TAG_NAN_DAM_ERR 274
+#define EVENT_LOG_TAG_NAN_NA_ERR 275
+#define EVENT_LOG_TAG_NAN_NDL_ERR 276
+#define EVENT_LOG_TAG_NAN_NDP_ERR 277
+#define EVENT_LOG_TAG_NAN_SEC_ERR 278
+#define EVENT_LOG_TAG_NAN_MAC_ERR 279
+#define EVENT_LOG_TAG_NAN_FSM_ERR 280
+#define EVENT_LOG_TAG_NAN_PEER_ERR 281
+#define EVENT_LOG_TAG_NAN_AVAIL_ERR 282
+#define EVENT_LOG_TAG_NAN_CMN_ERR 283
+
+/* More NAN DBG evt Tags */
+#define EVENT_LOG_TAG_NAN_PEER 284
+#define EVENT_LOG_TAG_NAN_AVAIL 285
+#define EVENT_LOG_TAG_NAN_CMN 286
+
+#define EVENT_LOG_TAG_SAE_ERROR 287
+#define EVENT_LOG_TAG_SAE_INFO 288
+
+/* rxsig module logging */
+#define EVENT_LOG_TAG_RXSIG_ERROR 289
+#define EVENT_LOG_TAG_RXSIG_DEBUG 290
+#define EVENT_LOG_TAG_RXSIG_INFO 291
+
+/* HE TWT HEB EVEVNT_LOG_TAG */
+#define EVENT_LOG_TAG_WL_HE_INFO 292
+#define EVENT_LOG_TAG_WL_HE_TRACE 293
+#define EVENT_LOG_TAG_WL_HE_WARN 294
+#define EVENT_LOG_TAG_WL_HE_ERROR 295
+#define EVENT_LOG_TAG_WL_TWT_INFO 296
+#define EVENT_LOG_TAG_WL_TWT_TRACE 297
+#define EVENT_LOG_TAG_WL_TWT_WARN 298
+#define EVENT_LOG_TAG_WL_TWT_ERROR 299
+#define EVENT_LOG_TAG_WL_HEB_ERROR 300
+#define EVENT_LOG_TAG_WL_HEB_TRACE 301
+
+/* RRM EVENT_LOG_TAG */
+#define EVENT_LOG_TAG_RRM_DBG 302
+#define EVENT_LOG_TAG_RRM_INFO 303
+#define EVENT_LOG_TAG_RRM_ERR 304
+
+/* scan core */
+#define EVENT_LOG_TAG_SC 305
+
+#define EVENT_LOG_TAG_ESP_DBG 306
+#define EVENT_LOG_TAG_ESP_INFO 307
+#define EVENT_LOG_TAG_ESP_ERR 308
+
+/* SDC */
+#define EVENT_LOG_TAG_SDC_DBG 309
+#define EVENT_LOG_TAG_SDC_INFO 310
+#define EVENT_LOG_TAG_SDC_ERR 311
+
+/* RTE */
+#define EVENT_LOG_TAG_RTE_ERR 312
+
+/* TX FIFO */
+#define EVENT_LOG_TAG_FIFO_INFO 313
+
+/* PKTTS */
+#define EVENT_LOG_TAG_LATENCY_INFO 314
+
+/* TDLS */
+#define EVENT_LOG_TAG_WL_TDLS_INFO 315
+#define EVENT_LOG_TAG_WL_TDLS_DBG 316
+#define EVENT_LOG_TAG_WL_TDLS_ERR 317
+
+/* MSCH messages */
+#define EVENT_LOG_TAG_MSCH_DATASTRUCT 319 /* don't use, kept for backward compatibility */
+#define EVENT_LOG_TAG_MSCH_PROFILE 319
+#define EVENT_LOG_TAG_MSCH_REGISTER 320
+#define EVENT_LOG_TAG_MSCH_CALLBACK 321
+#define EVENT_LOG_TAG_MSCH_ERROR 322
+#define EVENT_LOG_TAG_MSCH_DEBUG 323
+#define EVENT_LOG_TAG_MSCH_INFORM 324
+#define EVENT_LOG_TAG_MSCH_TRACE 325
+
+/* bus low power related info messages */
+#define EVENT_LOG_TAG_WL_BUS_LP_INFO 326
+#define EVENT_LOG_TAG_PCI_LP_INFO 327
+
+/* SBSS BT-Coex */
+#define EVENT_LOG_TAG_SB_BTCX_INFO 328
+
+/* wbus */
+#define EVENT_LOG_TAG_WBUS_ERR 329
+#define EVENT_LOG_TAG_WBUS_INFO 330
+#define EVENT_LOG_TAG_WBUS_SCHED 331
+
+/* MODESW */
+#define EVENT_LOG_TAG_MODESW_ERR 332
+
+/* LPHS */
+#define EVENT_LOG_TAG_LPHS_ERR 333
+
+/* CPU statistics */
+#define EVENT_LOG_TAG_ARM_STAT 334
+
+/* Event log tags for SOE */
+#define EVENT_LOG_TAG_SOE_ERROR 335
+#define EVENT_LOG_TAG_SOE_INFO 336
+
+/* Event log tags for GCI Shared Memory */
+#define EVENT_LOG_TAG_GCISHM_ERR 337
+#define EVENT_LOG_TAG_GCISHM_INFO 338
+
+/* Eevent log tags for Enhanced Roam Log */
+#define EVENT_LOG_TAG_ROAM_ENHANCED_LOG 339
+
+/* WL BTCEC */
+#define EVENT_LOG_TAG_BTCEC_ERR 340
+#define EVENT_LOG_TAG_BTCEC_INFO 341
+#define EVENT_LOG_TAG_BTCEC_SCHED 342
+
+#ifdef WLAWDL
+#define EVENT_LOG_TAG_AWDL_HC 343
+#endif /* WLAWDL */
+
+#ifdef SLOT_SCHED
+#define EVENT_LOG_TAG_SBSS_HC 344
+#endif /* SLOT_SCHED */
+
+/* wlc_chan_cal */
+#define EVENT_LOG_TAG_WCC_ERR 345
+#define EVENT_LOG_TAG_WCC_INFO 346
+#define EVENT_LOG_TAG_WCC_TRACE 347
+
+/* AMT logging */
+#define EVENT_LOG_TAG_AMT_ERR 348
+#define EVENT_LOG_TAG_AMT_INFO 349
+#define EVENT_LOG_TAG_AMT_TRACE 350
+
+/* OBSS hw logging */
+#define EVENT_LOG_TAG_WLC_OBSS_ERR 351
+#define EVENT_LOG_TAG_WLC_OBSS_TRACE 352
+#define EVENT_LOG_TAG_WLC_OBSS_INFO 353
+
+#define EVENT_LOG_TAG_ALLOC_TRACE 354
+
+/* ASSOC and SUP state machine log tags */
+#define EVENT_LOG_TAG_ASSOC_SM 355
+#define EVENT_LOG_TAG_SUP_SM 356
+/* Place holders for additional state machine logging */
+#define EVENT_LOG_TAG_AUTH_SM 357
+#define EVENT_LOG_TAG_SAE_SM 358
+#define EVENT_LOG_TAG_FTM_SM 359
+#define EVENT_LOG_TAG_NAN_SM 360
+
+/* HP2P - RLLW logging */
+#define EVENT_LOG_TAG_RLLW_TRACE 361
+
+#define EVENT_LOG_TAG_SDTC_INFO 362
+#define EVENT_LOG_TAG_SDTC_ERR 363
+
+/* KEEPALIVE logging */
+#define EVENT_LOG_TAG_KEEPALIVE 364
+#define EVENT_LOG_TAG_DTIM_SCHED_LOG 365
+
+/* For printing PHY init time in the event logs for both slices. */
+#define EVENT_LOG_TAG_PHY_INIT_TM 366
+
+/* SensorC Coex logging */
+#define EVENT_LOG_TAG_SSCCX_ERR 367
+#define EVENT_LOG_TAG_SSCCX_INFO 368
+#define EVENT_LOG_TAG_SSCCX_TRACE 369
+/* TAG for channel info */
+#define EVENT_LOG_TAG_SCAN_CHANNEL_INFO 370
+/* Robust Audio Video (RAV) - Mirrored Stream Classification Service (MSCS) */
+#define EVENT_LOG_TAG_RAV_MSCS_ERROR 371
+#define EVENT_LOG_TAG_RAV_MSCS_INFO 372
+
+/* DVFS state machine related tag */
+#define EVENT_LOG_TAG_DVFS_SM 373
+
+/* IPL info */
+#define EVENT_LOG_TAG_IPL_INFO 374
+
+/* bcmtrace */
+#define EVENT_LOG_TAG_BCM_TRACE 375
+
+/* noise cal */
+#define EVENT_LOG_TAG_NOISE_CAL 376
+
+/* FTM hw */
+#define EVENT_LOG_TAG_FTM_HW_ERR 377
+#define EVENT_LOG_TAG_FTM_HW_INFO 378
+#define EVENT_LOG_TAG_FTM_HW_TRACE 379
+
+#define EVENT_LOG_TAG_NOISE_CAL_DBG 380
+
+/* EHT EVEVNT_LOG_TAG */
+#define EVENT_LOG_TAG_WL_EHT_INFO 381
+#define EVENT_LOG_TAG_WL_EHT_TRACE 382
+#define EVENT_LOG_TAG_WL_EHT_WARN 383
+#define EVENT_LOG_TAG_WL_EHT_ERROR 384
+
+#define EVENT_LOG_TAG_CHNCTX_INFO 385
+#define EVENT_LOG_TAG_CHNCTX_ERROR 386
+#define EVENT_LOG_TAG_ECOUNTERS_INFORM 387
+#define EVENT_LOG_TAG_STA_SC_OFLD_ERR 388
+
+#define EVENT_LOG_TAG_PKTFLTR_INFO 389
+#define EVENT_LOG_TAG_PKTFLTR_TRACE 390
+#define EVENT_LOG_TAG_PKTFLTR_WARN 391
+#define EVENT_LOG_TAG_PKTFLTR_ERROR 392
+/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */
+#define EVENT_LOG_TAG_MAX 392
+
+typedef enum wl_el_set_type_def {
+ EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */
+ EVENT_LOG_SET_TYPE_PRSRV = 1, /* flush the log buffer based on fw or host trigger */
+ EVENT_LOG_SET_TYPE_DFLUSH = 2 /* flush the log buffer once the watermark is reached */
+} wl_el_set_type_def_t;
+
+#define EVENT_LOG_TAG_FLUSH_NONE 0x00 /* No flush */
+#define EVENT_LOG_TAG_FLUSH_ALL 0x40 /* Flush all preserved sets */
+#define EVENT_LOG_TAG_FLUSH_SETNUM 0x80 /* Flush preserved set */
+#define EVENT_LOG_TAG_FLUSH_MASK 0x3f /* SetNum Mask */
+
+typedef enum wl_el_flush_type {
+ EL_TAG_PRSRV_FLUSH_NONE = 0, /* No flush of preserve buf on this tag */
+ EL_TAG_PRSRV_FLUSH_SETNUM, /* Flush the buffer set specifid on this tag */
+ EL_TAG_PRSRV_FLUSH_ALL /* Flush all preserved buffer set on this tag */
+} wl_el_flush_type_t;
+
+#define EVENT_LOG_FLUSH_CURRENT_VERSION 0
+typedef struct wl_el_set_flush_prsrv_s {
+ uint16 version;
+ uint16 len;
+ uint16 tag; /* Tag for which preserve flush should be done */
+ uint8 flush_type; /* Check wl_el_flush_type_t */
+ uint8 set_num; /* Log set num to flush. Max is NUM_EVENT_LOG_SETS. Valid only when
+ * action is EVENT_LOG_TAG_FLUSH_SETNUM
+ */
+} wl_el_set_flush_prsrv_t;
+
+#define SD_PRHDRS(i, s, h, p, n, l)
+#define SD_PRPKT(m, b, n)
+#define SD_INFORM(args)
+
+/* Flags for tag control */
+#define EVENT_LOG_TAG_FLAG_NONE 0
+#define EVENT_LOG_TAG_FLAG_LOG 0x80
+#define EVENT_LOG_TAG_FLAG_PRINT 0x40
+#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f
+
+/* Each event log entry has a type. The type is the LAST word of the
+ * event log. The printing code walks the event entries in reverse
+ * order to find the first entry.
+ */
+typedef union event_log_hdr {
+ struct {
+ uint8 tag; /* Event_log entry tag */
+ uint8 count; /* Count of 4-byte entries */
+ uint16 fmt_num; /* Format number */
+ };
+ uint32 t; /* Type cheat */
+} event_log_hdr_t;
+
+/* for internal use - legacy max. tag */
+#define EVENT_LOG_TAG_MAX_LEGACY_FORMAT 255
+
+/*
+ * The position of the extended header in the event log stream will be as follows:
+ * <event log payload><ARM cycle count timestamp><extended header><regular header>
+ * Extended header could be due to count > 255 or tag > 255.
+ *
+ * Extended count: 6 bits long. 8 bits (existing) + 6 bits =>
+ * 2^14 words = 65536 bytes payload max
+ * Extended count field is currently reserved
+ * Extended tag: 8 (existing) + 4 bits = 12 bits =>2^12 = 4096 tags
+ * bits[7..4] of extended tags are reserved.
+ * MSB 16 bits of the extended header are reserved for future use.
+ */
+
+typedef union event_log_extended_hdr {
+ struct {
+ uint8 extended_tag; /* Extended tag, bits[7..4] are reserved */
+ uint8 extended_count; /* Extended count. Reserved for now. */
+ uint16 rsvd; /* Reserved */
+ };
+
+ uint32 t; /* Type cheat */
+} event_log_extended_hdr_t;
+#endif /* _EVENT_LOG_TAG_H_ */
diff --git a/bcmdhd.101.10.361.x/include/event_trace.h b/bcmdhd.101.10.361.x/include/event_trace.h
new file mode 100755
index 0000000..3be93c6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/event_trace.h
@@ -0,0 +1,187 @@
+/*
+ * Trace log blocks sent over HBUS
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/**
+ * @file
+ * @brief
+ * Define the trace event ID and tag ID
+ */
+
+#ifndef _WL_DIAG_H
+#define _WL_DIAG_H
+
+#include <event_log.h>
+
+#define DIAG_MAJOR_VERSION 1 /* 4 bits */
+#define DIAG_MINOR_VERSION 0 /* 4 bits */
+#define DIAG_MICRO_VERSION 0 /* 4 bits */
+
+#define DIAG_VERSION \
+ ((DIAG_MICRO_VERSION&0xF) | (DIAG_MINOR_VERSION&0xF)<<4 | \
+ (DIAG_MAJOR_VERSION&0xF)<<8)
+ /* bit[11:8] major ver */
+ /* bit[7:4] minor ver */
+ /* bit[3:0] micro ver */
+
+/* event ID for trace purpose only, to avoid the conflict with future new
+* WLC_E_ , starting from 0x8000
+*/
+#define TRACE_FW_AUTH_STARTED 0x8000
+#define TRACE_FW_ASSOC_STARTED 0x8001
+#define TRACE_FW_RE_ASSOC_STARTED 0x8002
+#define TRACE_G_SCAN_STARTED 0x8003
+#define TRACE_ROAM_SCAN_STARTED 0x8004
+#define TRACE_ROAM_SCAN_COMPLETE 0x8005
+#define TRACE_FW_EAPOL_FRAME_TRANSMIT_START 0x8006
+#define TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP 0x8007
+#define TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE 0x8008 /* protocol status */
+#define TRACE_BT_COEX_BT_SCO_START 0x8009
+#define TRACE_BT_COEX_BT_SCO_STOP 0x800a
+#define TRACE_BT_COEX_BT_SCAN_START 0x800b
+#define TRACE_BT_COEX_BT_SCAN_STOP 0x800c
+#define TRACE_BT_COEX_BT_HID_START 0x800d
+#define TRACE_BT_COEX_BT_HID_STOP 0x800e
+#define TRACE_ROAM_AUTH_STARTED 0x800f
+/* Event ID for NAN, start from 0x9000 */
+#define TRACE_NAN_CLUSTER_STARTED 0x9000
+#define TRACE_NAN_CLUSTER_JOINED 0x9001
+#define TRACE_NAN_CLUSTER_MERGED 0x9002
+#define TRACE_NAN_ROLE_CHANGED 0x9003
+#define TRACE_NAN_SCAN_COMPLETE 0x9004
+#define TRACE_NAN_STATUS_CHNG 0x9005
+
+/* Parameters of wifi logger events are TLVs */
+/* Event parameters tags are defined as: */
+#define TRACE_TAG_VENDOR_SPECIFIC 0 /* take a byte stream as parameter */
+#define TRACE_TAG_BSSID 1 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR 2 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_SSID 3 /* takes a 32 bytes SSID address as parameter */
+#define TRACE_TAG_STATUS 4 /* takes an integer as parameter */
+#define TRACE_TAG_CHANNEL_SPEC 5 /* takes one or more wifi_channel_spec as */
+ /* parameter */
+#define TRACE_TAG_WAKE_LOCK_EVENT 6 /* takes a wake_lock_event struct as parameter */
+#define TRACE_TAG_ADDR1 7 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR2 8 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR3 9 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR4 10 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_TSF 11 /* take a 64 bits TSF value as parameter */
+#define TRACE_TAG_IE 12 /* take one or more specific 802.11 IEs */
+ /* parameter, IEs are in turn indicated in */
+ /* TLV format as per 802.11 spec */
+#define TRACE_TAG_INTERFACE 13 /* take interface name as parameter */
+#define TRACE_TAG_REASON_CODE 14 /* take a reason code as per 802.11 */
+ /* as parameter */
+#define TRACE_TAG_RATE_MBPS 15 /* take a wifi rate in 0.5 mbps */
+#define TRACE_TAG_REQUEST_ID 16 /* take an integer as parameter */
+#define TRACE_TAG_BUCKET_ID 17 /* take an integer as parameter */
+#define TRACE_TAG_GSCAN_PARAMS 18 /* takes a wifi_scan_cmd_params struct as parameter */
+#define TRACE_TAG_GSCAN_CAPABILITIES 19 /* takes a wifi_gscan_capabilities struct as parameter */
+#define TRACE_TAG_SCAN_ID 20 /* take an integer as parameter */
+#define TRACE_TAG_RSSI 21 /* take an integer as parameter */
+#define TRACE_TAG_CHANNEL 22 /* take an integer as parameter */
+#define TRACE_TAG_LINK_ID 23 /* take an integer as parameter */
+#define TRACE_TAG_LINK_ROLE 24 /* take an integer as parameter */
+#define TRACE_TAG_LINK_STATE 25 /* take an integer as parameter */
+#define TRACE_TAG_LINK_TYPE 26 /* take an integer as parameter */
+#define TRACE_TAG_TSCO 27 /* take an integer as parameter */
+#define TRACE_TAG_RSCO 28 /* take an integer as parameter */
+#define TRACE_TAG_EAPOL_MESSAGE_TYPE 29 /* take an integer as parameter */
+ /* M1-1, M2-2, M3-3, M4-4 */
+
+typedef union {
+ struct {
+ uint16 event: 16;
+ uint16 version: 16;
+ };
+ uint32 t;
+} wl_event_log_id_ver_t;
+
+#define ETHER_ADDR_PACK_LOW(addr) (((addr)->octet[3])<<24 | ((addr)->octet[2])<<16 | \
+ ((addr)->octet[1])<<8 | ((addr)->octet[0]))
+#define ETHER_ADDR_PACK_HI(addr) (((addr)->octet[5])<<8 | ((addr)->octet[4]))
+#define SSID_PACK(addr) (((uint8)(addr)[0])<<24 | ((uint8)(addr)[1])<<16 | \
+ ((uint8)(addr)[2])<<8 | ((uint8)(addr)[3]))
+
+/* for each event id with logging data, define its logging data structure */
+
+typedef union {
+ struct {
+ uint16 status: 16;
+ uint16 paraset: 16;
+ };
+ uint32 t;
+} wl_event_log_blk_ack_t;
+
+typedef union {
+ struct {
+ uint8 mode: 8;
+ uint8 count: 8;
+ uint16 ch: 16;
+ };
+ uint32 t;
+} wl_event_log_csa_t;
+
+typedef union {
+ struct {
+ uint8 status: 1;
+ uint16 notused: 15;
+ uint16 frag_tx_cnt: 16;
+ };
+ uint32 t;
+} wl_event_log_eapol_tx_t;
+
+typedef union {
+ struct {
+ uint16 tag;
+ uint16 length; /* length of value in bytes */
+ };
+ uint32 t;
+} wl_event_log_tlv_hdr_t;
+
+#ifdef WL_EVENT_LOG_COMPILE
+#define _WL_EVENT_LOG(tag, event, ...) \
+ do { \
+ event_log_top_t * event_log_top = event_log_get_top(); \
+ wl_event_log_id_ver_t entry = {{event, DIAG_VERSION}}; \
+ event_log_top->timestamp = OSL_SYSUPTIME(); \
+ EVENT_LOG(tag, "WL event", entry.t , ## __VA_ARGS__); \
+ } while (0)
+#define WL_EVENT_LOG(args) _WL_EVENT_LOG args
+#else
+#define WL_EVENT_LOG(args)
+#endif /* WL_EVENT_LOG_COMPILE */
+
+#ifdef NAN_EVENT_LOG_COMPILE
+#define _NAN_EVENT_LOG(tag, event, ...) \
+ do { \
+ event_log_top_t * event_log_top = event_log_get_top(); \
+ wl_event_log_id_ver_t hdr = {{event, DIAG_VERSION}}; \
+ event_log_top->timestamp = OSL_SYSUPTIME(); \
+ EVENT_LOG(tag, "NAN event", hdr.t , ## __VA_ARGS__); \
+ } while (0)
+#define NAN_EVENT_LOG(args) _NAN_EVENT_LOG args
+#else
+#define NAN_EVENT_LOG(args)
+#endif /* NAN_EVENT_LOG_COMPILE */
+
+#endif /* _WL_DIAG_H */
diff --git a/bcmdhd.101.10.361.x/include/fils.h b/bcmdhd.101.10.361.x/include/fils.h
new file mode 100755
index 0000000..1797abf
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/fils.h
@@ -0,0 +1,424 @@
+/*
+ * Fundamental types and constants relating to FILS AUTHENTICATION
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _FILSAUTH_H_
+#define _FILSAUTH_H_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* 11ai D6.0 8.6.8.36 FILS Discovery frame format
+ category
+ action
+ fils_discovery_info_field_t
+ fils_rnr_element_t
+ fils_indication_element_t
+ fils_vendor_specific_element_t
+*/
+
+/* 11revmc D4.0 8.4.2.25 Vendor Specific element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_vendor_specific_element {
+ uint8 elementid;
+ uint8 length;
+ /* variable len info */
+ uint8 orgid_vendorspecific_content[];
+} BWL_POST_PACKED_STRUCT fils_vendor_specific_element_t;
+
+#define FILS_VS_ELEM_HDR_LEN (sizeof(fils_vendor_specific_element_t))
+
+/* 11ai D6.0 8.4.2.178 FILS Indication element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_indication_element {
+ uint8 elementid;
+ uint8 length;
+ uint16 fils_info;
+ /* variable len info */
+ uint8 cache_domain_publickey_id[];
+} BWL_POST_PACKED_STRUCT fils_indication_element_t;
+
+#define FILS_INDICATION_ELEM_HDR_LEN (sizeof(fils_indication_element_t))
+
+#define FILS_INDICATION_IE_TAG_FIXED_LEN 2
+
+#define FI_INFO_CACHE_IND_SUBFIELD_SIZE 2
+
+/* FILS Indication Information field */
+#define FI_INFO_PUB_KEY_IDENTS_MASK (0x0007)
+#define FI_INFO_REALM_IDENTS_MASK (0x0038)
+#define FI_INFO_IP_ADDR_CFG_MASK (0x0040)
+#define FI_INFO_CACHE_IDENT_MASK (0x0080)
+#define FI_INFO_HESSID_MASK (0x0100)
+#define FI_INFO_SHRKEY_AUTH_WOPFS_MASK (0x0200)
+#define FI_INFO_SHRKEY_AUTH_WPFS_MASK (0x0400)
+#define FI_INFO_PUBKEY_AUTH_MASK (0x0800)
+
+#define FI_INFO_CACHE_IDENT(fc) (((fc) & FI_INFO_CACHE_IDENT_MASK) != 0)
+#define FI_INFO_HESSID(fc) (((fc) & FI_INFO_HESSID_MASK) != 0)
+#define FI_INFO_SHRKEY_AUTH_WOPFS(fc) (((fc) & FI_INFO_SHRKEY_AUTH_WOPFS_MASK) != 0)
+#define FI_INFO_SHRKEY_AUTH_WPFS(fc) (((fc) & FI_INFO_SHRKEY_AUTH_WPFS_MASK) != 0)
+
+typedef struct ether_addr tbtt_bssid_t;
+
+/* As per D5.0 in 802.11ax Table 9 281 TBTT Information field contents . */
+
+typedef BWL_PRE_PACKED_STRUCT union rnr_tbtt_info_field {
+ BWL_PRE_PACKED_STRUCT struct len2 {
+ uint8 tbtt_offset;
+ uint8 bss_params;
+ } BWL_POST_PACKED_STRUCT len2_t;
+
+ BWL_PRE_PACKED_STRUCT struct len5 {
+ uint8 tbtt_offset;
+ uint32 short_ssid;
+ } BWL_POST_PACKED_STRUCT len5_t;
+
+ BWL_PRE_PACKED_STRUCT struct len6 {
+ uint8 tbtt_offset;
+ uint32 short_ssid;
+ uint8 bss_params;
+ } BWL_POST_PACKED_STRUCT len6_t;
+
+ BWL_PRE_PACKED_STRUCT struct len7 {
+ uint8 tbtt_offset;
+ tbtt_bssid_t bssid;
+ } BWL_POST_PACKED_STRUCT len7_t;
+
+ BWL_PRE_PACKED_STRUCT struct len8 {
+ uint8 tbtt_offset;
+ tbtt_bssid_t bssid;
+ uint8 bss_params;
+ } BWL_POST_PACKED_STRUCT len8_t;
+
+ BWL_PRE_PACKED_STRUCT struct len9 {
+ uint8 tbtt_offset;
+ tbtt_bssid_t bssid;
+ uint8 bss_params;
+ uint8 psd_20mhz;
+ } BWL_POST_PACKED_STRUCT len9_t;
+
+ BWL_PRE_PACKED_STRUCT struct len11 {
+ uint8 tbtt_offset;
+ tbtt_bssid_t bssid;
+ uint32 short_ssid;
+ } BWL_POST_PACKED_STRUCT len11_t;
+
+ BWL_PRE_PACKED_STRUCT struct len12 {
+ uint8 tbtt_offset;
+ tbtt_bssid_t bssid;
+ uint32 short_ssid;
+ uint8 bss_params;
+ } BWL_POST_PACKED_STRUCT len12_t;
+
+ BWL_PRE_PACKED_STRUCT struct len13 {
+ uint8 tbtt_offset;
+ tbtt_bssid_t bssid;
+ uint32 short_ssid;
+ uint8 bss_params;
+ uint8 psd_20mhz;
+ } BWL_POST_PACKED_STRUCT len13_t;
+} BWL_POST_PACKED_STRUCT rnr_tbtt_info_field_t;
+
+/* 11ai D11.0 9.4.2.171.1 TBTT Information field */
+typedef BWL_PRE_PACKED_STRUCT struct tbtt_info_field {
+ uint8 tbtt_offset;
+ struct ether_addr bssid;
+ uint32 short_ssid;
+ uint8 bss_params;
+} BWL_POST_PACKED_STRUCT tbtt_info_field_t;
+#define TBTT_INFO_FIELD_HDR_LEN (sizeof(tbtt_info_field_t))
+
+/* 11ai D11.0 9.4.2.171.1 Neighbor AP Information field */
+typedef BWL_PRE_PACKED_STRUCT struct neighbor_ap_info_field {
+ uint16 tbtt_info_header;
+ uint8 op_class;
+ uint8 channel;
+ /* variable len info */
+ uint8 tbtt_info_field[];
+} BWL_POST_PACKED_STRUCT neighbor_ap_info_field_t;
+
+#define NEIGHBOR_AP_INFO_FIELD_HDR_LEN (sizeof(neighbor_ap_info_field_t))
+
+/* 11ai D11.0 9.4.2.171 Reduced Neighbor Report element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_rnr_element {
+ uint8 elementid;
+ uint8 length;
+ /* variable len info */
+ uint8 neighbor_ap_info[];
+} BWL_POST_PACKED_STRUCT fils_rnr_element_t;
+
+#define FILS_RNR_ELEM_HDR_LEN (sizeof(fils_rnr_element_t))
+
+/* TBTT Info Header macros */
+#define TBTT_INFO_HDR_FIELD_TYPE_MASK (0x0003u)
+#define TBTT_INFO_HDR_FN_AP_MASK (0x0004u)
+#define TBTT_INFO_HDR_COUNT_MASK (0x00f0u)
+#define TBTT_INFO_HDR_LENGTH_MASK (0xff00u)
+
+#define TBTT_INFO_HDR_FIELD_TYPE(hdr)\
+ ((hdr) & TBTT_INFO_HDR_FIELD_TYPE_MASK)
+#define TBTT_INFO_HDR_FN_AP(hdr)\
+ (((hdr) & TBTT_INFO_HDR_FN_AP_MASK) != 0)
+#define TBTT_INFO_HDR_COUNT(hdr)\
+ (((hdr) & TBTT_INFO_HDR_COUNT_MASK) >> 4u)
+#define TBTT_INFO_HDR_LENGTH(hdr)\
+ (((hdr) & TBTT_INFO_HDR_LENGTH_MASK) >> 8u)
+
+/* BSS Params Macros */
+#define RNR_BSS_PARAMS_OCT_REC_MASK (0x01u)
+#define RNR_BSS_PARAMS_SAME_SSID_MASK (0x02u)
+#define RNR_BSS_PARAMS_MUTIPLE_BSSID_MASK (0x04u)
+#define RNR_BSS_PARAMS_TRANSMITTED_BSSID_MASK (0x08u)
+#define RNR_BSS_MEMBER_OF_ESS_MASK (0x10u)
+#define RNR_BSS_20_TU_PRB_RSP_ACTIVE_MASK (0x20u)
+#define RNR_BSS_COLOCATED_AP_MASK (0x40u)
+
+#define RNR_BSS_PARAMS_OCT_REC(bss)\
+ (((bss) & RNR_BSS_PARAMS_OCT_REC_MASK) != 0)
+#define RNR_BSS_PARAMS_SAME_SSID(bss)\
+ (((bss) & RNR_BSS_PARAMS_SAME_SSID_MASK) != 0)
+#define RNR_BSS_PARAMS_MUTIPLE_BSSID(bss)\
+ (((bss) & RNR_BSS_PARAMS_MUTIPLE_BSSID_MASK) != 0)
+#define RNR_BSS_PARAMS_TRANSMITTED_BSSID(bss)\
+ (((bss) & RNR_BSS_PARAMS_TRANSMITTED_BSSID_MASK) != 0)
+#define RNR_BSS_MEMBER_OF_ESS(bss)\
+ (((bss) & RNR_BSS_MEMBER_OF_ESS_MASK) != 0)
+#define RNR_BSS_20_TU_PRB_RSP_ACTIVE(bss)\
+ (((bss) & RNR_BSS_20_TU_PRB_RSP_ACTIVE_MASK) != 0)
+#define RNR_BSS_COLOCATED_AP(bss)\
+ (((bss) & RNR_BSS_COLOCATED_AP_MASK) != 0)
+
+/* TBTT Information field Contents */
+/* NBR_AP TBTT OFFSET field ( 1 Byte) */
+#define NBR_AP_TBTT_LEN 1U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSPARAMS(1) 2Bytes */
+#define NBR_AP_TBTT_BSS_LEN 2U
+
+/* NBR_AP TBTT OFFSETfield(1) + SHORTSSID (4) 5 Bytes */
+#define NBR_AP_TBTT_SHORT_SSID_LEN 5U
+
+/* NBR_AP TBTT OFFSETfield(1)+SHORTSSID (4)+BSS(1) 6 Bytes */
+#define NBR_AP_TBTT_BSS_SHORT_SSID_LEN 6U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSID(6) 7BYTES */
+#define NBR_AP_TBTT_BSSID_LEN 7U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+BSS(1) 8BYTES */
+#define NBR_AP_TBTT_BSSID_BSS_LEN 8U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+BSS(1) + 20Mhz PSD(1) = 9BYTES */
+#define NBR_AP_TBTT_BSSID_BSS_PSD_LEN 9U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+SHORTSSID (4) 11Bytes */
+#define NBR_AP_TBTT_BSSID_SHORT_SSID_LEN 11U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSID(6)+SHORTSSID (4)+BSS(1) 12 BYTES */
+#define NBR_AP_TBTT_BSSID_SHORT_SSID_BSS_LEN 12U
+
+/* NBR_AP TBTT OFFSETfield(1) + BSSID(6) +
+ * SHORTSSID (4)+BSS(1) + 20Mhz PSD(1) = 13 BYTES
+ */
+#define NBR_AP_TBTT_BSSID_SHORT_SSID_BSS_PSD_LEN 13U
+
+/* FILS Nonce element */
+#define FILS_NONCE_LENGTH 16u
+
+typedef BWL_PRE_PACKED_STRUCT struct fils_nonce_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 fils_nonce[FILS_NONCE_LENGTH];
+} BWL_POST_PACKED_STRUCT fils_nonce_element_t;
+
+/* 11ai 9.4.2.186 FILS Key Delivery element */
+#define FILS_KEY_RSC_LENGTH 8u
+
+typedef BWL_PRE_PACKED_STRUCT struct fils_key_delivery_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 key_rsc[FILS_KEY_RSC_LENGTH];
+ uint8 kde_list[]; /* Key Data Elements */
+} BWL_POST_PACKED_STRUCT fils_key_delivery_element_t;
+
+/* 8.4.2.175 FILS Session element */
+#define FILS_SESSION_LENGTH 8u
+
+typedef BWL_PRE_PACKED_STRUCT struct fils_session_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 fils_session[FILS_SESSION_LENGTH];
+} BWL_POST_PACKED_STRUCT fils_session_element_t;
+
+#define FILS_SESSION_ELEM_LEN (sizeof(fils_session_element_t))
+
+/* 9.4.2.179 FILS key confirmation element */
+#define FILS_KEY_CONFIRMATION_HEADER_LEN 3u
+
+typedef BWL_PRE_PACKED_STRUCT struct fils_key_conf_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ /* variable len info */
+ uint8 key_auth[];
+} BWL_POST_PACKED_STRUCT fils_key_conf_element_t;
+
+/* 8.4.2.174 FILS Key Confirmation element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_key_confirm_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ /* variable len info */
+ uint8 keyauth[];
+} BWL_POST_PACKED_STRUCT fils_key_confirm_element_t;
+
+#define FILS_CONFIRM_ELEM_HDR_LEN (sizeof(fils_key_confirm_element_t))
+
+/* 9.4.2.180 FILS Public Key element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_public_key_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 key_type;
+ /* variable len info */
+ uint8 pub_key[];
+} BWL_POST_PACKED_STRUCT fils_public_key_element_t;
+
+/* 11ai D6.0 8.6.8.36 FILS Discovery frame format */
+typedef BWL_PRE_PACKED_STRUCT struct fils_discovery_info_field {
+ uint16 framecontrol;
+ uint32 timestamp[2];
+ uint16 bcninterval;
+ /* variable len info */
+ uint8 disc_info[];
+} BWL_POST_PACKED_STRUCT fils_discovery_info_field_t;
+
+#define FD_INFO_FIELD_HDR_LEN (sizeof(fils_discovery_info_field_t))
+
+#define FD_INFO_LENGTH_FIELD_SIZE 1u
+#define FD_INFO_CAP_SUBFIELD_SIZE 2u
+#define FD_INFO_OPCLASS_SUBFIED_SIZE 1u
+#define FD_INFO_PRIM_CHAN_SUBFIELD_SIZE 1u
+#define FD_INFO_APCSN_SUBFIELD_SIZE 1u
+#define FD_INFO_ANO_SUBFIELD_SIZE 1u
+#define FD_INFO_RSN_INFO_SUBFIELD_SIZE 5u
+#define FD_INFO_CH_CENTER_FR_SUBFIELD_SIZE 1u
+#define FD_INFO_MD_SUBFIELD_SIZE 3u
+
+/* FILS Discovery Information field */
+#define FD_INFO_SSID_LENGTH_MASK (0x001f)
+#define FD_INFO_CAP_IND_MASK (0x0020)
+#define FD_INFO_SHORT_SSID_IND_MASK (0x0040)
+#define FD_INFO_APCSN_IND_MASK (0x0080)
+#define FD_INFO_ANO_IND_MASK (0x0100)
+#define FD_INFO_CH_CENTER_FR_IND_MASK (0x0200)
+#define FD_INFO_PRIMARY_CH_IND_MASK (0x0400)
+#define FD_INFO_RSN_IND_MASK (0x0800)
+#define FD_INFO_LENGTH_IND_MASK (0x1000)
+#define FD_INFO_MD_IND_MASK (0x2000)
+
+#define FD_INFO_SET_SSID_LENGTH(fc, len) ((fc) |= ((uint16)(len) & FD_INFO_SSID_LENGTH_MASK))
+#define FD_INFO_SET_CAP_PRESENT(fc) ((fc) |= FD_INFO_CAP_IND_MASK)
+#define FD_INFO_SET_SHORT_SSID_PRESENT(fc) ((fc) |= FD_INFO_SHORT_SSID_IND_MASK)
+#define FD_INFO_SET_APCSN_PRESENT(fc) ((fc) |= FD_INFO_APCSN_IND_MASK)
+#define FD_INFO_SET_ANO_PRESENT(fc) ((fc) |= FD_INFO_ANO_IND_MASK)
+#define FD_INFO_SET_CH_CENTER_FR_PRESENT(fc) ((fc) |= FD_INFO_CH_CENTER_FR_IND_MASK)
+#define FD_INFO_SET_PRIMARY_CH_PRESENT(fc) ((fc) |= FD_INFO_PRIMARY_CH_IND_MASK)
+#define FD_INFO_SET_RSN_PRESENT(fc) ((fc) |= FD_INFO_RSN_IND_MASK)
+#define FD_INFO_SET_LENGTH_PRESENT(fc) ((fc) |= FD_INFO_LENGTH_IND_MASK)
+#define FD_INFO_SET_MD_PRESENT(fc) ((fc) |= FD_INFO_MD_IND_MASK)
+
+#define FD_INFO_SSID_LENGTH(fc) ((fc) & FD_INFO_SSID_LENGTH_MASK)
+#define FD_INFO_IS_CAP_PRESENT(fc) (((fc) & FD_INFO_CAP_IND_MASK) != 0)
+#define FD_INFO_IS_SHORT_SSID_PRESENT(fc) (((fc) & FD_INFO_SHORT_SSID_IND_MASK) != 0)
+#define FD_INFO_IS_APCSN_PRESENT(fc) (((fc) & FD_INFO_APCSN_IND_MASK) != 0)
+#define FD_INFO_IS_ANO_PRESENT(fc) (((fc) & FD_INFO_ANO_IND_MASK) != 0)
+#define FD_INFO_IS_CH_CENTER_FR_PRESENT(fc) (((fc) & FD_INFO_CH_CENTER_FR_IND_MASK) != 0)
+#define FD_INFO_IS_PRIMARY_CH_PRESENT(fc) (((fc) & FD_INFO_PRIMARY_CH_IND_MASK) != 0)
+#define FD_INFO_IS_RSN_PRESENT(fc) (((fc) & FD_INFO_RSN_IND_MASK) != 0)
+#define FD_INFO_IS_LENGTH_PRESENT(fc) (((fc) & FD_INFO_LENGTH_IND_MASK) != 0)
+#define FD_INFO_IS_MD_PRESENT(fc) (((fc) & FD_INFO_MD_IND_MASK) != 0)
+
+/* FILS Discovery Capability subfield */
+#define FD_CAP_ESS_MASK (0x0001)
+#define FD_CAP_PRIVACY_MASK (0x0002)
+#define FD_CAP_BSS_CH_WIDTH_MASK (0x001c)
+#define FD_CAP_MAX_NSS_MASK (0x00e0)
+#define FD_CAP_MULTI_BSS_MASK (0x0200)
+#define FD_CAP_PHY_INDEX_MASK (0x1c00)
+#define FD_CAP_FILS_MIN_RATE_MASK (0xe000)
+
+#define FD_CAP_ESS(cap) (((cap) & FD_CAP_ESS_MASK) != 0)
+#define FD_CAP_PRIVACY(cap) (((cap) & FD_CAP_PRIVACY_MASK) != 0)
+#define FD_CAP_BSS_CH_WIDTH(cap) (((cap) & FD_CAP_BSS_CH_WIDTH_MASK) >> 2)
+#define FD_CAP_MAX_NSS(cap) (((cap) & FD_CAP_MAX_NSS_MASK) >> 5)
+#define FD_CAP_MULTI_BSS(cap) (((cap) & FD_CAP_MULTI_BSS_MASK) != 0)
+#define FD_CAP_PHY_INDEX(cap) (((cap) & FD_CAP_PHY_INDEX_MASK) >> 10)
+#define FD_CAP_FILS_MIN_RATE(cap) (((cap) & FD_CAP_FILS_MIN_RATE_MASK) >> 13)
+
+#define FD_CAP_SET_ESS(cap) (((cap) |= FD_CAP_ESS_MASK))
+#define FD_CAP_SET_PRIVACY(cap) (((cap) & FD_CAP_PRIVACY_MASK) >> 1)
+#define FD_CAP_SET_BSS_CH_WIDTH(cap, w) ((cap) |= (((w) << 2) & FD_CAP_BSS_CH_WIDTH_MASK))
+#define FD_CAP_SET_MAX_NSS(cap) (((cap) & FD_CAP_MAX_NSS_MASK) >> 5)
+#define FD_CAP_SET_MULTI_BSS(cap) (((cap) & FD_CAP_MULTI_BSS_MASK) >> 9)
+#define FD_CAP_SET_PHY_INDEX(cap) (((cap) & FD_CAP_PHY_INDEX_MASK) >> 10)
+#define FD_CAP_SET_FILS_MIN_RATE(cap) (((cap) & FD_CAP_FILS_MIN_RATE_MASK) >> 13)
+
+/* 11ai D6.0 8.4.2.173 FILS Request Parameters element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_request_parameters_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 params_bitmap;
+ /* variable len info */
+ uint8 params_fields[];
+} BWL_POST_PACKED_STRUCT fils_request_parameters_element_t;
+
+#define FILS_PARAM_MAX_CHANNEL_TIME (1 << 2)
+
+/* 11ai 9.4.2.184 FILS HLP Container element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_hlp_container_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ uint8 dest_addr[ETHER_ADDR_LEN];
+ uint8 src_addr[ETHER_ADDR_LEN];
+ /* variable len hlp packet */
+ uint8 hlp[];
+} BWL_POST_PACKED_STRUCT fils_hlp_container_element_t;
+
+/* 11ai 9.4.2.184 FILS Wrapped Data element */
+typedef BWL_PRE_PACKED_STRUCT struct fils_wrapped_data_element {
+ uint8 elementid;
+ uint8 length;
+ uint8 element_id_ext;
+ /* variable len wrapped data packet */
+ uint8 wrapped_data[];
+} BWL_POST_PACKED_STRUCT fils_wrapped_data_element_t;
+
+#define FILS_HLP_CONTAINER_ELEM_LEN (sizeof(fils_hlp_container_element_t))
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* __FILSAUTH_H__ */
diff --git a/bcmdhd.101.10.361.x/include/hnd_armtrap.h b/bcmdhd.101.10.361.x/include/hnd_armtrap.h
new file mode 100755
index 0000000..ca41851
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnd_armtrap.h
@@ -0,0 +1,86 @@
+/*
+ * HND arm trap handling.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hnd_armtrap_h_
+#define _hnd_armtrap_h_
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define TRAP_STRIDE 4
+#define FIRST_TRAP TR_RST
+#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+/* Number of core ARM registers. */
+#define TR_REGS_NUM 16u
+
+#define TRAP_T_SIZE 80
+#define ASSERT_TRAP_SVC_NUMBER 255
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+ uint32 type;
+ uint32 epc;
+ uint32 cpsr;
+ uint32 spsr;
+ uint32 r0; /* a1 */
+ uint32 r1; /* a2 */
+ uint32 r2; /* a3 */
+ uint32 r3; /* a4 */
+ uint32 r4; /* v1 */
+ uint32 r5; /* v2 */
+ uint32 r6; /* v3 */
+ uint32 r7; /* v4 */
+ uint32 r8; /* v5 */
+ uint32 r9; /* sb/v6 */
+ uint32 r10; /* sl/v7 */
+ uint32 r11; /* fp/v8 */
+ uint32 r12; /* ip */
+ uint32 r13; /* sp */
+ uint32 r14; /* lr */
+ uint32 pc; /* r15 */
+} trap_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+
+#endif /* _hnd_armtrap_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hnd_cons.h b/bcmdhd.101.10.361.x/include/hnd_cons.h
new file mode 100755
index 0000000..d8ade3d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnd_cons.h
@@ -0,0 +1,98 @@
+/*
+ * Console support for RTE - for host use only.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _hnd_cons_h_
+#define _hnd_cons_h_
+
+#include <typedefs.h>
+
+#if defined(RWL_DONGLE) || defined(UART_REFLECTOR)
+/* For Dongle uart tranport max cmd len is 256 bytes + header length (16 bytes)
+ * In case of ASD commands we are not sure about how much is the command size
+ * To be on the safe side, input buf len CBUF_LEN is increased to max (512) bytes.
+ */
+#define RWL_MAX_DATA_LEN (512 + 8) /* allow some extra bytes for '/n' termination */
+#define CBUF_LEN (RWL_MAX_DATA_LEN + 64) /* allow 64 bytes for header ("rwl...") */
+#else
+#define CBUF_LEN (128)
+#endif /* RWL_DONGLE || UART_REFLECTOR */
+
+#ifndef LOG_BUF_LEN
+#if defined(BCMDBG) || defined (BCM_BIG_LOG)
+#define LOG_BUF_LEN (16 * 1024)
+#elif defined(ATE_BUILD)
+#define LOG_BUF_LEN (2 * 1024)
+#elif defined(BCMQT)
+#define LOG_BUF_LEN (16 * 1024)
+#else
+#define LOG_BUF_LEN 1024
+#endif
+#endif /* LOG_BUF_LEN */
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+#undef RWL_MAX_DATA_LEN
+#undef CBUF_LEN
+#undef LOG_BUF_LEN
+#define RWL_MAX_DATA_LEN (4 * 1024 + 8)
+#define CBUF_LEN (RWL_MAX_DATA_LEN + 64)
+#define LOG_BUF_LEN (16 * 1024)
+#endif
+
+typedef struct {
+#ifdef BCMDONGLEHOST
+ uint32 buf; /* Can't be pointer on (64-bit) hosts */
+#else
+ /* Physical buffer address, read by host code to dump console. */
+ char* PHYS_ADDR_N(buf);
+#endif
+ uint buf_size;
+ uint idx;
+ uint out_idx; /* output index */
+ uint dump_idx; /* read idx for wl dump */
+} hnd_log_t;
+
+typedef struct {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn), the host should write a complete
+ * input line directly into cbuf and then write the length into vcons_in.
+ * This may also be used when there is a real UART (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host polls.
+ */
+ hnd_log_t log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf until <CR> is received, then
+ * the buffer is processed as a command line. Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+} hnd_cons_t;
+
+#endif /* _hnd_cons_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hnd_debug.h b/bcmdhd.101.10.361.x/include/hnd_debug.h
new file mode 100755
index 0000000..c7ffe2a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnd_debug.h
@@ -0,0 +1,250 @@
+/*
+ * HND Run Time Environment debug info area
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _HND_DEBUG_H
+#define _HND_DEBUG_H
+
+/* Magic number at a magic location to find HND_DEBUG pointers */
+#define HND_DEBUG_PTR_PTR_MAGIC 0x50504244u /* DBPP */
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+/* Includes only when building dongle code */
+#ifdef _RTE_
+#include <event_log.h>
+#include <hnd_trap.h>
+#include <hnd_cons.h>
+#endif
+
+/* We use explicit sizes here since this gets included from different
+ * systems. The sizes must be the size of the creating system
+ * (currently 32 bit ARM) since this is gleaned from dump.
+ */
+
+#ifdef FWID
+extern uint32 gFWID;
+#endif
+
+enum hnd_debug_reloc_entry_type {
+ HND_DEBUG_RELOC_ENTRY_TYPE_ROM = 0u,
+ HND_DEBUG_RELOC_ENTRY_TYPE_RAM = 1u,
+ HND_DEBUG_RELOC_ENTRY_TYPE_MTH_STACK = 2u, /* main thread stack */
+};
+typedef uint32 hnd_debug_reloc_entry_type_t;
+
+typedef struct hnd_debug_reloc_entry {
+ /* Identifies the type(hnd_debug_reloc_entry_type) of the data */
+ hnd_debug_reloc_entry_type_t type;
+ uint32 phys_addr; /* Physical address */
+ uint32 virt_addr; /* Virtual address */
+ uint32 size; /* Specifies the size of the segment */
+} hnd_debug_reloc_entry_t;
+
+#ifdef _RTE_
+/* Define pointers for normal ARM use */
+#define _HD_EVLOG_P event_log_top_t *
+#define _HD_CONS_P hnd_cons_t *
+#define _HD_TRAP_P trap_t *
+#define _HD_DEBUG_RELOC_ENTRY_P hnd_debug_reloc_entry_t *
+#define _HD_DEBUG_RELOC_P hnd_debug_reloc_t *
+
+#else
+/* Define pointers for use on other systems */
+#define _HD_EVLOG_P uint32
+#define _HD_CONS_P uint32
+#define _HD_TRAP_P uint32
+#define _HD_DEBUG_RELOC_ENTRY_P uint32
+#define _HD_DEBUG_RELOC_P uint32
+
+#endif /* _RTE_ */
+
+/* MMU relocation info in the debug area */
+typedef struct hnd_debug_reloc {
+ _HD_DEBUG_RELOC_ENTRY_P hnd_reloc_ptr; /* contains the pointer to the MMU reloc table */
+ uint32 hnd_reloc_ptr_size; /* Specifies the size of the MMU reloc table */
+} hnd_debug_reloc_t;
+
+/* Number of MMU relocation entries supported in v2 */
+#define RELOC_NUM_ENTRIES 4u
+
+/* Total MMU relocation table size for v2 */
+#define HND_DEBUG_RELOC_PTR_SIZE (RELOC_NUM_ENTRIES * sizeof(hnd_debug_reloc_entry_t))
+
+#define HND_DEBUG_VERSION_1 1u /* Legacy, version 1 */
+#define HND_DEBUG_VERSION_2 2u /* Version 2 contains the MMU information
+ * used for stack virtualization, etc.
+ */
+
+/* Legacy debug version for older branches. */
+#define HND_DEBUG_VERSION HND_DEBUG_VERSION_1
+
+/* This struct is placed at a well-defined location, and contains a pointer to hnd_debug. */
+typedef struct hnd_debug_ptr {
+ uint32 magic;
+
+ /* RAM address of 'hnd_debug'. For legacy versions of this struct, it is a 0-indexed
+ * offset instead.
+ */
+ uint32 hnd_debug_addr;
+
+ /* Base address of RAM. This field does not exist for legacy versions of this struct. */
+ uint32 ram_base_addr;
+
+} hnd_debug_ptr_t;
+extern hnd_debug_ptr_t debug_info_ptr;
+
+#define HND_DEBUG_EPIVERS_MAX_STR_LEN 32u
+
+/* chip id string is 8 bytes long with null terminator. Example 43452a3 */
+#define HND_DEBUG_BUILD_SIGNATURE_CHIPID_LEN 13u
+
+#define HND_DEBUG_BUILD_SIGNATURE_FWID_LEN 17u
+
+/* ver=abc.abc.abc.abcdefgh size = 24bytes. 6 bytes extra for expansion */
+#define HND_DEBUG_BUILD_SIGNATURE_VER_LEN 30u
+
+typedef struct hnd_debug {
+ uint32 magic;
+#define HND_DEBUG_MAGIC 0x47424544u /* 'DEBG' */
+
+#ifndef HND_DEBUG_USE_V2
+ uint32 version; /* Legacy, debug struct version */
+#else
+ /* Note: The original uint32 version is split into two fields:
+ * uint16 version and uint16 length to accomidate future expansion
+ * of the strucutre.
+ *
+ * The length field is not populated for the version 1 of the structure.
+ */
+ uint16 version; /* Debug struct version */
+ uint16 length; /* Size of the whole structure in bytes */
+#endif /* HND_DEBUG_USE_V2 */
+
+ uint32 fwid; /* 4 bytes of fw info */
+ char epivers[HND_DEBUG_EPIVERS_MAX_STR_LEN];
+
+ _HD_TRAP_P PHYS_ADDR_N(trap_ptr); /* trap_t data struct physical address. */
+ _HD_CONS_P PHYS_ADDR_N(console); /* Console physical address. */
+
+ uint32 ram_base;
+ uint32 ram_size;
+
+ uint32 rom_base;
+ uint32 rom_size;
+
+ _HD_EVLOG_P event_log_top; /* EVENT_LOG address. */
+
+ /* To populated fields below,
+ * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled
+ */
+ char fwid_signature[HND_DEBUG_BUILD_SIGNATURE_FWID_LEN]; /* fwid=<FWID> */
+ /* ver=abc.abc.abc.abcdefgh size = 24bytes. 6 bytes extra for expansion */
+ char ver_signature[HND_DEBUG_BUILD_SIGNATURE_VER_LEN];
+ char chipid_signature[HND_DEBUG_BUILD_SIGNATURE_CHIPID_LEN]; /* chip=12345a3 */
+
+#ifdef HND_DEBUG_USE_V2
+ /* Version 2 fields */
+ /* Specifies the hnd debug MMU info */
+ _HD_DEBUG_RELOC_P hnd_debug_reloc_ptr;
+#endif /* HND_DEBUG_USE_V2 */
+} hnd_debug_t;
+
+#ifdef HND_DEBUG_USE_V2
+#define HND_DEBUG_V1_SIZE (OFFSETOF(hnd_debug_t, chipid_signature) + \
+ sizeof(((hnd_debug_t *)0)->chipid_signature))
+
+#define HND_DEBUG_V2_BASE_SIZE (OFFSETOF(hnd_debug_t, hnd_debug_reloc_ptr) + \
+ sizeof(((hnd_debug_t *)0)->hnd_debug_reloc_ptr))
+#endif /* HND_DEBUG_USE_V2 */
+
+/* The following structure is used in populating build information */
+typedef struct hnd_build_info {
+ uint8 version; /* Same as HND_DEBUG_VERSION */
+ uint8 rsvd[3]; /* Reserved fields for padding purposes */
+ /* To populated fields below,
+ * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled
+ */
+ uint32 fwid;
+ uint32 ver[4];
+ char chipid_signature[HND_DEBUG_BUILD_SIGNATURE_CHIPID_LEN]; /* chip=12345a3 */
+} hnd_build_info_t;
+
+/*
+ * timeval_t and prstatus_t are copies of the Linux structures.
+ * Included here because we need the definitions for the target processor
+ * (32 bits) and not the definition on the host this is running on
+ * (which could be 64 bits).
+ */
+
+typedef struct { /* Time value with microsecond resolution */
+ uint32 tv_sec; /* Seconds */
+ uint32 tv_usec; /* Microseconds */
+} timeval_t;
+
+/* Linux/ARM 32 prstatus for notes section */
+typedef struct prstatus {
+ int32 si_signo; /* Signal number */
+ int32 si_code; /* Extra code */
+ int32 si_errno; /* Errno */
+ uint16 pr_cursig; /* Current signal. */
+ uint16 unused;
+ uint32 pr_sigpend; /* Set of pending signals. */
+ uint32 pr_sighold; /* Set of held signals. */
+ uint32 pr_pid;
+ uint32 pr_ppid;
+ uint32 pr_pgrp;
+ uint32 pr_sid;
+ timeval_t pr_utime; /* User time. */
+ timeval_t pr_stime; /* System time. */
+ timeval_t pr_cutime; /* Cumulative user time. */
+ timeval_t pr_cstime; /* Cumulative system time. */
+ uint32 uregs[18];
+ int32 pr_fpvalid; /* True if math copro being used. */
+} prstatus_t;
+
+/* for mkcore and other utilities use */
+#define DUMP_INFO_PTR_PTR_0 0x74
+#define DUMP_INFO_PTR_PTR_1 0x78
+#define DUMP_INFO_PTR_PTR_2 0xf0
+#define DUMP_INFO_PTR_PTR_3 0xf8
+#define DUMP_INFO_PTR_PTR_4 0x874
+#define DUMP_INFO_PTR_PTR_5 0x878
+#define DUMP_INFO_PTR_PTR_END 0xffffffff
+#define DUMP_INFO_PTR_PTR_LIST DUMP_INFO_PTR_PTR_0, \
+ DUMP_INFO_PTR_PTR_1, \
+ DUMP_INFO_PTR_PTR_2, \
+ DUMP_INFO_PTR_PTR_3, \
+ DUMP_INFO_PTR_PTR_4, \
+ DUMP_INFO_PTR_PTR_5, \
+ DUMP_INFO_PTR_PTR_END
+
+extern bool hnd_debug_info_in_trap_context(void);
+
+/* Get build information. */
+extern int hnd_build_info_get(void *ctx, void *arg2, uint32 *buf, uint16 *len);
+
+#endif /* !LANGUAGE_ASSEMBLY */
+
+#endif /* _HND_DEBUG_H */
diff --git a/bcmdhd.101.10.361.x/include/hnd_pktpool.h b/bcmdhd.101.10.361.x/include/hnd_pktpool.h
new file mode 100755
index 0000000..ce241e8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnd_pktpool.h
@@ -0,0 +1,288 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hnd_pktpool_h_
+#define _hnd_pktpool_h_
+
+#include <typedefs.h>
+#include <osl.h>
+#include <osl_ext.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTPOOL_THREAD_SAFE
+#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex)
+#else
+#define HND_PKTPOOL_MUTEX_DECL(mutex)
+#endif
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool) ((pool) && (pool)->inited)
+#else /* BCMPKTPOOL */
+#define POOL_ENAB(bus) 0
+#endif /* BCMPKTPOOL */
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX 40
+#endif /* PKTPOOL_LEN_MAX */
+#define PKTPOOL_CB_MAX 3
+#define PKTPOOL_CB_MAX_AVL 4
+
+/* REMOVE_RXCPLID is an arg for pktpool callback function for removing rxcplID
+ * and host addr associated with the rxfrag or shared pool buffer during pktpool_reclaim().
+ */
+#define REMOVE_RXCPLID 2
+
+#define FREE_ALL_PKTS 0
+#define FREE_ALL_FRAG_PKTS 1
+
+/* forward declaration */
+struct pktpool;
+
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+ pktpool_cb_t cb;
+ void *arg;
+ uint8 refcnt;
+} pktpool_cbinfo_t;
+
+/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, int arg2,
+ uint *pktcnt);
+typedef struct {
+ pktpool_cb_extn_t cb;
+ void *arg;
+} pktpool_cbextn_info_t;
+
+#ifdef BCMDBG_POOL
+/* pkt pool debug states */
+#define POOL_IDLE 0
+#define POOL_RXFILL 1
+#define POOL_RXDH 2
+#define POOL_RXD11 3
+#define POOL_TXDH 4
+#define POOL_TXD11 5
+#define POOL_AMPDU 6
+#define POOL_TXENQ 7
+
+typedef struct {
+ void *p;
+ uint32 cycles;
+ uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+ uint8 txdh; /* tx to host */
+ uint8 txd11; /* tx to d11 */
+ uint8 enq; /* waiting in q */
+ uint8 rxdh; /* rx from host */
+ uint8 rxd11; /* rx from d11 */
+ uint8 rxfill; /* dma_rxfill */
+ uint8 idle; /* avail in pool */
+} pktpool_stats_t;
+#endif /* BCMDBG_POOL */
+
+typedef struct pktpool {
+ bool inited; /**< pktpool_init was successful */
+ uint8 type; /**< type of lbuf: basic, frag, etc */
+ uint8 id; /**< pktpool ID: index in registry */
+ bool istx; /**< direction: transmit or receive data path */
+ HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */
+
+ void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+ uint16 avail; /**< number of packets in pool's free list */
+ uint16 n_pkts; /**< number of packets managed by pool */
+ uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */
+ uint16 max_pkt_bytes; /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */
+
+ bool empty;
+ uint8 cbtoggle;
+ uint8 cbcnt;
+ uint8 ecbcnt;
+ uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */
+ pktpool_cbinfo_t *availcb_excl;
+ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL];
+ pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+ pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */
+ pktpool_cbextn_info_t rxcplidfn;
+ pktpool_cbinfo_t dmarxfill;
+ /* variables for pool_heap management */
+ uint32 poolheap_flag;
+ uint16 poolheap_count; /* Number of allocation done from this pool */
+ uint16 min_backup_buf; /* Minimum number of buffer that should be kept in pool */
+ bool is_heap_pool; /* Whether this pool can be used as heap */
+ bool release_active;
+ uint8 mem_handle;
+#ifdef BCMDBG_POOL
+ uint8 dbg_cbcnt;
+ pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+ uint16 dbg_qlen;
+ pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+} pktpool_t;
+
+pktpool_t *get_pktpools_registry(int id);
+#define pktpool_get(pktp) (pktpool_get_ext((pktp), (pktp)->type, NULL))
+
+/* Incarnate a pktpool registry. On success returns total_pools. */
+extern int pktpool_attach(osl_t *osh, uint32 total_pools);
+extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type,
+ bool is_heap_pool, uint32 heap_pool_flag, uint16 min_backup_buf);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern int pktpool_empty(osl_t *osh, pktpool_t *pktp);
+extern uint16 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt, uint8 action);
+void pktpool_update_freelist(pktpool_t *pktp, void *p, uint pkts_consumed);
+extern void* pktpool_get_ext(pktpool_t *pktp, uint8 type, uint *pktcnt);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+void pktpool_nfree(pktpool_t *pktp, void *head, void *tail, uint count);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 max_pkts);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 max_pkts);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1);
+extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg);
+extern void pktpool_invoke_dmarxfill(pktpool_t *pktp);
+extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_avail(pktpool_t *pktpool);
+
+#define POOLPTR(pp) ((pktpool_t *)(pp))
+#define POOLID(pp) (POOLPTR(pp)->id)
+
+#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
+
+#define pktpool_tot_pkts(pp) (POOLPTR(pp)->n_pkts) /**< n_pkts = avail + in_use <= max_pkts */
+#define pktpool_max_pkt_bytes(pp) (POOLPTR(pp)->max_pkt_bytes)
+#define pktpool_max_pkts(pp) (POOLPTR(pp)->maxlen)
+
+/*
+ * ----------------------------------------------------------------------------
+ * A pool ID is assigned with a pkt pool during pool initialization. This is
+ * done by maintaining a registry of all initialized pools, and the registry
+ * index at which the pool is registered is used as the pool's unique ID.
+ * ID 0 is reserved and is used to signify an invalid pool ID.
+ * All packets henceforth allocated from a pool will be tagged with the pool's
+ * unique ID. Packets allocated from the heap will use the reserved ID = 0.
+ * Packets with non-zero pool id signify that they were allocated from a pool.
+ * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used
+ * in place of a 32bit pool pointer in each packet.
+ * ----------------------------------------------------------------------------
+ */
+#define PKTPOOL_INVALID_ID (0)
+#define PKTPOOL_MAXIMUM_ID (15)
+
+/* Registry of pktpool(s) */
+/* Pool ID to/from Pool Pointer converters */
+#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id))
+#define PKTPOOL_PTR2ID(pp) (POOLID(pp))
+
+#ifndef PKTID_POOL
+/* max pktids reserved for pktpool is updated properly in Makeconf */
+#define PKTID_POOL (PKT_MAXIMUM_ID - 32u)
+#endif /* PKTID_POOL */
+extern uint32 total_pool_pktid_count;
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif /* BCMDBG_POOL */
+
+#ifdef BCMPKTPOOL
+#define SHARED_POOL (pktpool_shared)
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL (pktpool_shared_lfrag)
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+
+#ifdef BCMALFRAGPOOL
+#define SHARED_ALFRAG_POOL (pktpool_shared_alfrag)
+extern pktpool_t *pktpool_shared_alfrag;
+
+#define SHARED_ALFRAG_DATA_POOL (pktpool_shared_alfrag_data)
+extern pktpool_t *pktpool_shared_alfrag_data;
+#endif
+
+#ifdef BCMRESVFRAGPOOL
+#define RESV_FRAG_POOL (pktpool_resv_lfrag)
+#define RESV_POOL_INFO (resv_pool_info)
+#else
+#define RESV_FRAG_POOL ((struct pktpool *)NULL)
+#define RESV_POOL_INFO (NULL)
+#endif /* BCMRESVFRAGPOOL */
+
+/** PCIe SPLITRX related */
+#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag)
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+#define SHARED_RXDATA_POOL (pktpool_shared_rxdata)
+extern pktpool_t *pktpool_shared_rxdata;
+
+int hnd_pktpool_init(osl_t *osh);
+void hnd_pktpool_deinit(osl_t *osh);
+int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal);
+void hnd_pktpool_refill(bool minimal);
+
+#ifdef BCMRESVFRAGPOOL
+extern pktpool_t *pktpool_resv_lfrag;
+extern struct resv_info *resv_pool_info;
+#endif /* BCMRESVFRAGPOOL */
+
+/* Current identified use case flags for pool heap manager */
+#define POOL_HEAP_FLAG_D3 (1 << 0)
+#define POOL_HEAP_FLAG_RSRVPOOL (1 << 1)
+
+#ifdef POOL_HEAP_RECONFIG
+typedef void (*pktpool_heap_cb_t)(void *arg, bool entry);
+
+extern void hnd_pktpool_heap_handle(osl_t *osh, uint32 flag, bool enable);
+extern int hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn, void *ctxt, uint32 flag);
+extern int hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn);
+extern void *hnd_pktpool_freelist_alloc(uint size, uint alignbits, uint32 flag);
+extern uint16 hnd_pktpool_get_min_bkup_buf(pktpool_t *pktp);
+#endif /* POOL_HEAP_RECONFIG */
+extern uint32 hnd_pktpool_get_total_poolheap_count(void);
+
+#else /* BCMPKTPOOL */
+#define SHARED_POOL ((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _hnd_pktpool_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hnd_pktq.h b/bcmdhd.101.10.361.x/include/hnd_pktq.h
new file mode 100755
index 0000000..375ebd8
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnd_pktq.h
@@ -0,0 +1,330 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hnd_pktq_h_
+#define _hnd_pktq_h_
+
+#include <osl.h>
+#include <osl_ext.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTQ_THREAD_SAFE
+#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex)
+#else
+#define HND_PKTQ_MUTEX_DECL(mutex)
+#endif
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_MAX 0xFFFFu /* Max uint16 65535 packets */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128u /* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
+#endif
+
+/** Queue for a single precedence level */
+typedef struct pktq_prec {
+ void *head; /**< first packet to dequeue */
+ void *tail; /**< last packet to dequeue */
+ uint16 n_pkts; /**< number of queued packets */
+ uint16 max_pkts; /**< maximum number of queued packets */
+ uint16 stall_count; /**< # seconds since no packets are dequeued */
+ uint16 dequeue_count; /**< # of packets dequeued in last 1 second */
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+ uint32 requested; /**< packets requested to be stored */
+ uint32 stored; /**< packets stored */
+ uint32 saved; /**< packets saved,
+ because a lowest priority queue has given away one packet
+ */
+ uint32 selfsaved; /**< packets saved,
+ because an older packet from the same queue has been dropped
+ */
+ uint32 full_dropped; /**< packets dropped,
+ because pktq is full with higher precedence packets
+ */
+ uint32 dropped; /**< packets dropped because pktq per that precedence is full */
+ uint32 sacrificed; /**< packets dropped,
+ in order to save one from a queue of a highest priority
+ */
+ uint32 busy; /**< packets droped because of hardware/transmission error */
+ uint32 retry; /**< packets re-sent because they were not received */
+ uint32 ps_retry; /**< packets retried again prior to moving power save mode */
+ uint32 suppress; /**< packets which were suppressed and not transmitted */
+ uint32 retry_drop; /**< packets finally dropped after retry limit */
+ uint32 max_avail; /**< the high-water mark of the queue capacity for packets -
+ goes to zero as queue fills
+ */
+ uint32 max_used; /**< the high-water mark of the queue utilisation for packets -
+ increases with use ('inverse' of max_avail)
+ */
+ uint32 queue_capacity; /**< the maximum capacity of the queue */
+ uint32 rtsfail; /**< count of rts attempts that failed to receive cts */
+ uint32 acked; /**< count of packets sent (acked) successfully */
+ uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */
+ uint32 txrate_main; /**< running totoal of primary phy rate of all packets */
+ uint32 throughput; /**< actual data transferred successfully */
+ uint32 airtime; /**< cumulative total medium access delay in useconds */
+ uint32 _logtime; /**< timestamp of last counter clear */
+} pktq_counters_t;
+
+#define PKTQ_LOG_COMMON \
+ uint32 pps_time; /**< time spent in ps pretend state */ \
+ uint32 _prec_log;
+
+typedef struct {
+ PKTQ_LOG_COMMON
+ pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */
+} pktq_log_t;
+#else
+typedef struct pktq_log pktq_log_t;
+#endif /* PKTQ_LOG */
+
+/** multi-priority packet queue */
+struct pktq {
+ HND_PKTQ_MUTEX_DECL(mutex)
+ pktq_log_t *pktqlog;
+ uint16 num_prec; /**< number of precedences in use */
+ uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */
+ uint16 max_pkts; /**< max packets */
+ uint16 n_pkts_tot; /**< total (cummulative over all precedences) number of packets */
+ /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+ struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+/** simple, non-priority packet queue */
+struct spktq {
+ HND_PKTQ_MUTEX_DECL(mutex)
+ struct pktq_prec q;
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* fn(pkt, arg). return true if pkt belongs to bsscfg */
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+/*
+ * pktq filter support
+ */
+
+/** filter function return values */
+typedef enum {
+ PKT_FILTER_NOACTION = 0, /**< restore the pkt to its position in the queue */
+ PKT_FILTER_DELETE = 1, /**< delete the pkt */
+ PKT_FILTER_REMOVE = 2, /**< do not restore the pkt to the queue,
+ * filter fn has taken ownership of the pkt
+ */
+} pktq_filter_result_t;
+
+/**
+ * Caller supplied filter function to pktq_pfilter(), pktq_filter().
+ * Function filter(ctx, pkt) is called with its ctx pointer on each pkt in the
+ * pktq. When the filter function is called, the supplied pkt will have been
+ * unlinked from the pktq. The filter function returns a pktq_filter_result_t
+ * result specifying the action pktq_filter()/pktq_pfilter() should take for
+ * the pkt.
+ * Here are the actions taken by pktq_filter/pfilter() based on the supplied
+ * filter function's return value:
+ *
+ * PKT_FILTER_NOACTION - The filter will re-link the pkt at its
+ * previous location.
+ *
+ * PKT_FILTER_DELETE - The filter will not relink the pkt and will
+ * call the user supplied defer_free_pkt fn on the packet.
+ *
+ * PKT_FILTER_REMOVE - The filter will not relink the pkt. The supplied
+ * filter fn took ownership (or deleted) the pkt.
+ *
+ * WARNING: pkts inserted by the user (in pkt_filter and/or flush callbacks
+ * and chains) in the prec queue will not be seen by the filter, and the prec
+ * queue will be temporarily be removed from the queue hence there're side
+ * effects including pktq_n_pkts_tot() on the queue won't reflect the correct number
+ * of packets in the queue.
+ */
+
+typedef pktq_filter_result_t (*pktq_filter_t)(void* ctx, void* pkt);
+
+/**
+ * The defer_free_pkt callback is invoked when the the pktq_filter callback
+ * returns PKT_FILTER_DELETE decision, which allows the user to deposite
+ * the packet appropriately based on the situation (free the packet or
+ * save it in a temporary queue etc.).
+ */
+typedef void (*defer_free_pkt_fn_t)(void *ctx, void *pkt);
+
+/**
+ * The flush_free_pkt callback is invoked when all packets in the pktq
+ * are processed.
+ */
+typedef void (*flush_free_pkt_fn_t)(void *ctx);
+
+#if defined(PROP_TXSTATUS)
+/* this callback will be invoked when in low_txq_scb flush()
+ * two back-to-back pkts has same epoch value.
+ */
+typedef void (*flip_epoch_t)(void *ctx, void *pkt, uint8 *flipEpoch, uint8 *lastEpoch);
+#endif /* defined(PROP_TXSTATUS) */
+
+/** filter a pktq, using the caller supplied filter/deposition/flush functions */
+extern void pktq_filter(struct pktq *pq, pktq_filter_t fn, void* arg,
+ defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
+/** filter a particular precedence in pktq, using the caller supplied filter function */
+extern void pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fn, void* arg,
+ defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
+/** filter a simple non-precedence in spktq, using the caller supplied filter function */
+extern void spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx,
+ defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
+
+/* operations on a specific precedence in packet queue */
+#define pktqprec_max_pkts(pq, prec) ((pq)->q[prec].max_pkts)
+#define pktqprec_n_pkts(pq, prec) ((pq)->q[prec].n_pkts)
+#define pktqprec_empty(pq, prec) ((pq)->q[prec].n_pkts == 0)
+#define pktqprec_peek(pq, prec) ((pq)->q[prec].head)
+#define pktqprec_peek_tail(pq, prec) ((pq)->q[prec].tail)
+#define spktq_peek_tail(pq) ((pq)->q.tail)
+#ifdef HND_PKTQ_THREAD_SAFE
+extern int pktqprec_avail_pkts(struct pktq *pq, int prec);
+extern bool pktqprec_full(struct pktq *pq, int prec);
+#else
+#define pktqprec_avail_pkts(pq, prec) ((pq)->q[prec].max_pkts - (pq)->q[prec].n_pkts)
+#define pktqprec_full(pq, prec) ((pq)->q[prec].n_pkts >= (pq)->q[prec].max_pkts)
+#endif /* HND_PKTQ_THREAD_SAFE */
+
+extern void pktq_append(struct pktq *pq, int prec, struct spktq *list);
+extern void spktq_append(struct spktq *spq, struct spktq *list);
+extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
+extern void spktq_prepend(struct spktq *spq, struct spktq *list);
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/** Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* For single precedence queues */
+extern void *spktq_enq_chain(struct spktq *dspq, struct spktq *sspq);
+extern void *spktq_enq(struct spktq *spq, void *p);
+extern void *spktq_enq_head(struct spktq *spq, void *p);
+extern void *spktq_deq(struct spktq *spq);
+extern void *spktq_deq_virt(struct spktq *spq);
+extern void *spktq_deq_tail(struct spktq *spq);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_n_pkts_tot(pq) ((int)(pq)->n_pkts_tot)
+#define pktq_max(pq) ((int)(pq)->max_pkts)
+#define pktq_empty(pq) ((pq)->n_pkts_tot == 0)
+#define spktq_n_pkts(spq) ((int)(spq)->q.n_pkts)
+#define spktq_empty(spq) ((spq)->q.n_pkts == 0)
+
+#define spktq_max(spq) ((int)(spq)->q.max_pkts)
+#define spktq_empty(spq) ((spq)->q.n_pkts == 0)
+#ifdef HND_PKTQ_THREAD_SAFE
+extern int pktq_avail(struct pktq *pq);
+extern bool pktq_full(struct pktq *pq);
+extern int spktq_avail(struct spktq *spq);
+extern bool spktq_full(struct spktq *spq);
+#else
+#define pktq_avail(pq) ((int)((pq)->max_pkts - (pq)->n_pkts_tot))
+#define pktq_full(pq) ((pq)->n_pkts_tot >= (pq)->max_pkts)
+#define spktq_avail(spq) ((int)((spq)->q.max_pkts - (spq)->q.n_pkts))
+#define spktq_full(spq) ((spq)->q.n_pkts >= (spq)->q.max_pkts)
+#endif /* HND_PKTQ_THREAD_SAFE */
+
+/* operations for single precedence queues */
+#define pktenq(pq, p) pktq_penq((pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head((pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq((pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail((pq), 0)
+#define pktqflush(osh, pq, dir) pktq_pflush(osh, (pq), 0, (dir))
+#define pktqinit(pq, max_pkts) pktq_init((pq), 1, (max_pkts))
+#define pktqdeinit(pq) pktq_deinit((pq))
+#define pktqavail(pq) pktq_avail((pq))
+#define pktqfull(pq) pktq_full((pq))
+#define pktqfilter(pq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \
+ pktq_pfilter((pq), 0, (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx))
+
+/* operations for simple non-precedence queues */
+#define spktenq(spq, p) spktq_enq((spq), (p))
+#define spktenq_head(spq, p) spktq_enq_head((spq), (p))
+#define spktdeq(spq) spktq_deq((spq))
+#define spktdeq_tail(spq) spktq_deq_tail((spq))
+#define spktqflush(osh, spq, dir) spktq_flush((osh), (spq), (dir))
+#define spktqinit(spq, max_pkts) spktq_init((spq), (max_pkts))
+#define spktqdeinit(spq) spktq_deinit((spq))
+#define spktqavail(spq) spktq_avail((spq))
+#define spktqfull(spq) spktq_full((spq))
+
+#define spktqfilter(spq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \
+ spktq_filter((spq), (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx))
+extern bool pktq_init(struct pktq *pq, int num_prec, uint max_pkts);
+extern bool pktq_deinit(struct pktq *pq);
+extern bool spktq_init(struct spktq *spq, uint max_pkts);
+extern bool spktq_init_list(struct spktq *spq, uint max_pkts,
+ void *head, void *tail, uint16 n_pkts);
+extern bool spktq_deinit(struct spktq *spq);
+
+extern void pktq_set_max_plen(struct pktq *pq, int prec, uint max_pkts);
+
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *spktq_peek(struct spktq *spq);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+
+/** flush pktq */
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
+/* single precedence queue with callback before deleting a packet */
+extern void spktq_flush_ext(osl_t *osh, struct spktq *spq, bool dir,
+ void (*pktq_flush_cb)(void *ctx, void *pkt), void *pktq_flush_ctx);
+/* single precedence queue */
+#define spktq_flush(osh, spq, dir) spktq_flush_ext(osh, spq, dir, NULL, NULL)
+/** Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir);
+
+typedef void (*spktq_cb_t)(void *arg, struct spktq *spq);
+extern void spktq_free_register(spktq_cb_t cb, void *arg);
+extern void spktq_cb(void *spq);
+#define SPKTQFREE spktq_cb
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _hnd_pktq_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hnd_trap.h b/bcmdhd.101.10.361.x/include/hnd_trap.h
new file mode 100755
index 0000000..eded5da
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnd_trap.h
@@ -0,0 +1,33 @@
+/*
+ * HND Trap handling.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hnd_trap_h_
+#define _hnd_trap_h_
+
+#if defined(__arm__) || defined(__thumb__) || defined(__thumb2__) || defined(WLETD)
+#include <hnd_armtrap.h>
+#else
+#error "unsupported CPU architecture"
+#endif
+
+#endif /* _hnd_trap_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hndchipc.h b/bcmdhd.101.10.361.x/include/hndchipc.h
new file mode 100755
index 0000000..26e53b3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndchipc.h
@@ -0,0 +1,47 @@
+/*
+ * HND SiliconBackplane chipcommon support - OS independent.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hndchipc_h_
+#define _hndchipc_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+#ifdef RTE_UART
+typedef void (*si_serial_init_fn)(si_t *sih, void *regs, uint irq, uint baud_base, uint reg_shift);
+#else
+typedef void (*si_serial_init_fn)(void *regs, uint irq, uint baud_base, uint reg_shift);
+#endif
+extern void si_serial_init(si_t *sih, si_serial_init_fn add);
+
+extern volatile void *hnd_jtagm_init(si_t *sih, uint clkd, bool exttap, uint32 *prev_jtagctrl);
+extern void hnd_jtagm_disable(si_t *sih, volatile void *h, uint32 *prev_jtagctrl);
+extern uint32 jtag_scan(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint32 ir1,
+ uint drsz, uint32 dr0, uint32 *dr1, bool rti);
+extern uint32 jtag_read_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz,
+ uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3);
+extern uint32 jtag_write_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz,
+ uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3);
+extern int jtag_setbit_128(si_t *sih, uint32 jtagureg_addr, uint8 bit_pos, uint8 bit_val);
+
+#endif /* _hndchipc_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hndd11.h b/bcmdhd.101.10.361.x/include/hndd11.h
new file mode 100755
index 0000000..bd6f1da
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndd11.h
@@ -0,0 +1,121 @@
+/*
+ * Generic functions for d11 access
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#ifndef _hndd11_h_
+#define _hndd11_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <d11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX 4 /**< max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+BWL_PRE_PACKED_STRUCT struct wl_d11rxrssi {
+ int8 dBm; /* number of full dBms */
+ /* sub-dbm resolution */
+ int8 decidBm; /* sub dBms : value after the decimal point */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wl_d11rxrssi wlc_d11rxrssi_t;
+
+BWL_PRE_PACKED_STRUCT struct wlc_d11rxhdr {
+ /* SW header */
+ uint32 tsf_l; /**< TSF_L reading */
+ int8 rssi; /**< computed instantaneous rssi */
+ int8 rssi_qdb; /**< qdB portion of the computed rssi */
+ int16 snr; /**< computed snginal-to-noise instantaneous snr */
+ int8 rxpwr[ROUNDUP(WL_RSSI_ANT_MAX,2)]; /**< rssi for supported antennas */
+ /**
+ * Even though rxhdr can be in short or long format, always declare it here
+ * to be in long format. So the offsets for the other fields are always the same.
+ */
+ d11rxhdr_t rxhdr;
+} BWL_POST_PACKED_STRUCT;
+
+/* SW RXHDR + HW RXHDR */
+typedef struct wlc_d11rxhdr wlc_d11rxhdr_t;
+
+/* extension of wlc_d11rxhdr..
+ * This extra block can be used to store extra internal information that cannot fit into
+ * wlc_d11rxhdr.
+ * At the moment, it is only used to store and possibly transmit the per-core quater dbm rssi
+ * information produced by the phy.
+ * NOTE: To avoid header overhead and amsdu handling complexities this usage is limited to
+ * only in case that host need to get the extra info. e.g., monitoring mode packet.
+ */
+
+BWL_PRE_PACKED_STRUCT struct wlc_d11rxhdr_ext {
+#ifdef BCM_MON_QDBM_RSSI
+ wlc_d11rxrssi_t rxpwr[WL_RSSI_ANT_MAX];
+#endif
+ wlc_d11rxhdr_t wlc_d11rx;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct wlc_d11rxhdr_ext wlc_d11rxhdr_ext_t;
+
+/* Length of software rx header extension */
+#define WLC_SWRXHDR_EXT_LEN (OFFSETOF(wlc_d11rxhdr_ext_t, wlc_d11rx))
+
+/* Length of SW header (12 bytes) */
+#define WLC_RXHDR_LEN (OFFSETOF(wlc_d11rxhdr_t, rxhdr))
+/* Length of RX headers - SW header + HW/ucode/PHY RX status */
+#define WL_RXHDR_LEN(corerev, corerev_minor) \
+ (WLC_RXHDR_LEN + D11_RXHDR_LEN(corerev, corerev_minor))
+#define WL_RXHDR_LEN_TMP(corerev, corerev_minor) \
+ (WLC_RXHDR_LEN + D11_RXHDR_LEN_TMP(corerev, corerev_minor))
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* Structure to hold d11 corerev information */
+typedef struct d11_info d11_info_t;
+struct d11_info {
+ uint major_revid;
+ uint minor_revid;
+};
+
+/* ulp dbg macro */
+#define HNDD11_DBG(x)
+#define HNDD11_ERR(x) printf x
+
+/* d11 slice index */
+#define DUALMAC_MAIN 0
+#define DUALMAC_AUX 1
+#define DUALMAC_SCAN 2
+
+extern void hndd11_read_shm(si_t *sih, uint coreunit, uint offset, void* buf);
+extern void hndd11_write_shm(si_t *sih, uint coreunit, uint offset, const void* buf);
+
+extern void hndd11_copyfrom_shm(si_t *sih, uint coreunit, uint offset, void* buf, int len);
+extern void hndd11_copyto_shm(si_t *sih, uint coreunit, uint offset, const void* buf, int len);
+
+extern uint32 hndd11_bm_read(osl_t *osh, d11regs_info_t *regsinfo, uint32 offset, uint32 len,
+ uint32 *buf);
+extern uint32 hndd11_bm_write(osl_t *osh, d11regs_info_t *regsinfo, uint32 offset, uint32 len,
+ const uint32 *buf);
+extern void hndd11_bm_dump(osl_t *osh, d11regs_info_t *regsinfo, uint32 offset, uint32 len);
+
+extern int hndd11_get_reginfo(si_t *sih, d11regs_info_t *regsinfo, uint coreunit);
+
+#endif /* _hndd11_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hnddma.h b/bcmdhd.101.10.361.x/include/hnddma.h
new file mode 100755
index 0000000..bbc8455
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hnddma.h
@@ -0,0 +1,338 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hnddma_h_
+#define _hnddma_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
+#include <sbhnddma.h>
+#include <hnd_pktq.h>
+#include <hnd_pktpool.h>
+
+#ifndef _hnddma_pub_
+#define _hnddma_pub_
+typedef const struct hnddma_pub hnddma_t;
+#endif /* _hnddma_pub_ */
+
+/* range param for dma_getnexttxp() and dma_txreclaim */
+typedef enum txd_range {
+ HNDDMA_RANGE_ALL = 1,
+ HNDDMA_RANGE_TRANSMITTED,
+ HNDDMA_RANGE_TRANSFERED
+} txd_range_t;
+
+/* dma parameters id */
+enum dma_param_id {
+ HNDDMA_PID_TX_MULTI_OUTSTD_RD = 0,
+ HNDDMA_PID_TX_PREFETCH_CTL,
+ HNDDMA_PID_TX_PREFETCH_THRESH,
+ HNDDMA_PID_TX_BURSTLEN,
+ HNDDMA_PID_TX_CHAN_SWITCH,
+
+ HNDDMA_PID_RX_PREFETCH_CTL = 0x100,
+ HNDDMA_PID_RX_PREFETCH_THRESH,
+ HNDDMA_PID_RX_BURSTLEN,
+ HNDDMA_PID_BURSTLEN_CAP,
+ HNDDMA_PID_BURSTLEN_WAR,
+ HNDDMA_SEP_RX_HDR, /**< SPLITRX related */
+ HNDDMA_SPLIT_FIFO,
+ HNDDMA_PID_D11RX_WAR,
+ HNDDMA_PID_RX_WAIT_CMPL,
+ HNDDMA_NRXPOST,
+ HNDDMA_NRXBUFSZ,
+ HNDDMA_PID_RXCTL_MOW,
+ HNDDMA_M2M_RXBUF_RAW /* rx buffers are raw buffers, not lbufs/lfrags */
+};
+
+#define SPLIT_FIFO_0 1
+#define SPLIT_FIFO_1 2
+
+typedef void (*setup_context_t)(void *ctx, void *p, uint8 **desc0, uint16 *len0,
+ uint8 **desc1, uint16 *len1);
+
+/**
+ * Exported data structure (read-only)
+ */
+/* export structure */
+struct hnddma_pub {
+ uint dmastflags; /* dma status flags */
+ uint dmactrlflags; /**< dma control flags */
+
+ /* rx error counters */
+ uint rxgiants; /**< rx giant frames */
+ uint rxnobuf; /**< rx out of dma descriptors */
+ /* tx error counters */
+ uint txnobuf; /**< tx out of dma descriptors */
+ uint txnodesc; /**< tx out of dma descriptors running count */
+};
+
+/* DMA status flags */
+#define BCM_DMA_STF_RX (1u << 0u) /* the channel is RX DMA */
+
+typedef struct dma_common dma_common_t;
+typedef struct dma_dd_pool dma_dd_pool_t;
+
+/* Flags for dma_attach_ext function */
+#define BCM_DMA_IND_INTF_FLAG 0x00000001 /* set for using INDIRECT DMA INTERFACE */
+#define BCM_DMA_DESC_ONLY_FLAG 0x00000002 /* For DMA that posts descriptors only and
+ * no packets
+ */
+#define BCM_DMA_CHAN_SWITCH_EN 0x00000008 /* for d11 corerev 64+ to help arbitrate
+ * btw dma channels.
+ */
+#define BCM_DMA_ROEXT_SUPPORT 0x00000010 /* for d11 corerev 128+ to support receive
+ * frame offset >=128B and <= 255B
+ */
+#define BCM_DMA_RX_ALIGN_8BYTE 0x00000020 /* RXDMA address 8-byte aligned */
+#define BCM_DMA_DESC_SHARED_POOL 0x00000100 /* For TX DMA that uses shared desc pool */
+#define BCM_DMA_RXP_LIST 0x00000200 /* linked list for RXP instead of array */
+
+typedef int (*rxpkt_error_check_t)(const void* ctx, void* pkt);
+
+extern dma_common_t * dma_common_attach(osl_t *osh, volatile uint32 *indqsel,
+ volatile uint32 *suspreq, volatile uint32 *flushreq, rxpkt_error_check_t cb, void *ctx);
+extern void dma_common_detach(dma_common_t *dmacommon);
+extern void dma_common_set_ddpool_ctx(dma_common_t *dmacommon, void *desc_pool);
+extern void * dma_common_get_ddpool_ctx(dma_common_t *dmacommon, void **va);
+extern bool dma_check_last_desc(hnddma_t *dmah);
+extern void dma_txfrwd(hnddma_t *dmah);
+
+#ifdef BCM_DMA_INDIRECT
+/* Use indirect registers for non-ctmode */
+#define DMA_INDQSEL_IA (1 << 31)
+extern void dma_set_indqsel(hnddma_t *di, bool force);
+extern bool dma_is_indirect(hnddma_t *dmah);
+#else
+#define dma_set_indqsel(a, b)
+#define dma_is_indirect(a) FALSE
+#endif /* #ifdef BCM_DMA_INDIRECT */
+
+extern hnddma_t * dma_attach_ext(dma_common_t *dmac, osl_t *osh, const char *name, si_t *sih,
+ volatile void *dmaregstx, volatile void *dmaregsrx, uint32 flags, uint8 qnum,
+ uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset,
+ uint *msg_level, uint coreunit);
+
+extern hnddma_t * dma_attach(osl_t *osh, const char *name, si_t *sih,
+ volatile void *dmaregstx, volatile void *dmaregsrx,
+ uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost,
+ uint rxoffset, uint *msg_level);
+
+void dma_rx_desc_init(hnddma_t *dmah, uint rxfifo);
+void dma_detach(hnddma_t *dmah);
+bool dma_txreset(hnddma_t *dmah);
+bool dma_rxreset(hnddma_t *dmah);
+bool dma_rxidle(hnddma_t *dmah);
+void dma_txinit(hnddma_t *dmah);
+bool dma_txenabled(hnddma_t *dmah);
+void dma_rxinit(hnddma_t *dmah);
+void dma_txsuspend(hnddma_t *dmah);
+void dma_txresume(hnddma_t *dmah);
+bool dma_txsuspended(hnddma_t *dmah);
+bool dma_txsuspendedidle(hnddma_t *dmah);
+void dma_txflush(hnddma_t *dmah);
+void dma_txflush_clear(hnddma_t *dmah);
+int dma_txfast_ext(hnddma_t *dmah, void *p0, bool commit, uint16 *pre_txout, uint16 *numd);
+int dma_txfast_alfrag(hnddma_t *dmah, hnddma_t *aqm_dmah, void *p, bool commit, dma64dd_t *aqmdesc,
+ uint d11_txh_len, bool ptxd_hw_enab);
+#define dma_txfast(dmah, p0, commit) \
+ dma_txfast_ext((dmah), (p0), (commit), NULL, NULL)
+void dma_txcommit(hnddma_t *dmah);
+int dma_txunframed(hnddma_t *dmah, void *buf, uint len, bool commit);
+void *dma_getpos(hnddma_t *dmah, bool direction);
+void dma_fifoloopbackenable(hnddma_t *dmah);
+void dma_fifoloopbackdisable(hnddma_t *dmah);
+bool dma_txstopped(hnddma_t *dmah);
+bool dma_rxstopped(hnddma_t *dmah);
+void dma_rxenable(hnddma_t *dmah);
+bool dma_rxenabled(hnddma_t *dmah);
+void *dma_rx(hnddma_t *dmah);
+#ifdef APP_RX
+void dma_getnextrxp_app(hnddma_t *dmah, bool forceall, uint *pktcnt,
+ void **head, void **tail);
+void dma_rxfill_haddr_getparams(hnddma_t *dmah, uint *nrxd, uint16 *rxout,
+ dma64dd_t **ddring, uint *rxextrahdrroom, uint32 **rxpktid);
+void dma_rxfill_haddr_setparams(hnddma_t *dmah, uint16 rxout);
+#endif /* APP_RX */
+uint dma_rx_get_rxoffset(hnddma_t *dmah);
+bool dma_rxfill(hnddma_t *dmah);
+bool dma_rxfill_required(hnddma_t *dmah);
+void dma_txreclaim(hnddma_t *dmah, txd_range_t range);
+void dma_rxreclaim(hnddma_t *dmah);
+#define _DMA_GETUINTVARPTR_
+uint *dma_getuintvarptr(hnddma_t *dmah, const char *name);
+uint8 dma_getuint8var(hnddma_t *dmah, const char *name);
+uint16 dma_getuint16var(hnddma_t *dmah, const char *name);
+uint32 dma_getuint32var(hnddma_t *dmah, const char *name);
+void * dma_getnexttxp(hnddma_t *dmah, txd_range_t range);
+void * dma_getnextp(hnddma_t *dmah);
+void * dma_getnextrxp(hnddma_t *dmah, bool forceall);
+void * dma_peeknexttxp(hnddma_t *dmah, txd_range_t range);
+int dma_peekntxp(hnddma_t *dmah, int *len, void *txps[], txd_range_t range);
+void * dma_peeknextrxp(hnddma_t *dmah);
+void dma_rxparam_get(hnddma_t *dmah, uint16 *rxoffset, uint16 *rxbufsize);
+bool dma_is_rxfill_suspend(hnddma_t *dmah);
+void dma_txblock(hnddma_t *dmah);
+void dma_txunblock(hnddma_t *dmah);
+uint dma_txactive(hnddma_t *dmah);
+uint dma_rxactive(hnddma_t *dmah);
+void dma_txrotate(hnddma_t *dmah);
+void dma_counterreset(hnddma_t *dmah);
+uint dma_ctrlflags(hnddma_t *dmah, uint mask, uint flags);
+uint dma_txpending(hnddma_t *dmah);
+uint dma_txcommitted(hnddma_t *dmah);
+int dma_pktpool_set(hnddma_t *dmah, pktpool_t *pool);
+int dma_rxdatapool_set(hnddma_t *dmah, pktpool_t *pktpool);
+pktpool_t *dma_rxdatapool_get(hnddma_t *dmah);
+
+void dma_dump_txdmaregs(hnddma_t *dmah, uint32 **buf);
+void dma_dump_rxdmaregs(hnddma_t *dmah, uint32 **buf);
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_DMA)
+void dma_dump(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
+void dma_dumptx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
+void dma_dumprx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
+#endif
+bool dma_rxtxerror(hnddma_t *dmah, bool istx);
+void dma_burstlen_set(hnddma_t *dmah, uint8 rxburstlen, uint8 txburstlen);
+uint dma_avoidance_cnt(hnddma_t *dmah);
+void dma_param_set(hnddma_t *dmah, uint16 paramid, uint16 paramval);
+void dma_param_get(hnddma_t *dmah, uint16 paramid, uint *paramval);
+void dma_context(hnddma_t *dmah, setup_context_t fn, void *ctx);
+
+bool dma_glom_enable(hnddma_t *dmah, uint32 val);
+uint dma_activerxbuf(hnddma_t *dmah);
+bool dma_rxidlestatus(hnddma_t *dmah);
+uint dma_get_rxpost(hnddma_t *dmah);
+
+/* return addresswidth allowed
+ * This needs to be done after SB attach but before dma attach.
+ * SB attach provides ability to probe backplane and dma core capabilities
+ * This info is needed by DMA_ALLOC_CONSISTENT in dma attach
+ */
+extern uint dma_addrwidth(si_t *sih, void *dmaregs);
+
+/* count the number of tx packets that are queued to the dma ring */
+extern uint dma_txp(hnddma_t *di);
+
+extern void dma_txrewind(hnddma_t *di);
+
+/* pio helpers */
+extern int dma_msgbuf_txfast(hnddma_t *di, dma64addr_t p0, bool com, uint32 ln, bool fst, bool lst);
+extern int dma_ptrbuf_txfast(hnddma_t *dmah, dma64addr_t p0, void *p, bool commit,
+ uint32 len, bool first, bool last);
+
+extern int dma_rxfast(hnddma_t *di, dma64addr_t p, uint32 len);
+extern int dma_rxfill_suspend(hnddma_t *dmah, bool suspended);
+extern void dma_link_handle(hnddma_t *dmah1, hnddma_t *dmah2);
+extern void dma_unlink_handle(hnddma_t *dmah1, hnddma_t *dmah2);
+extern int dma_rxfill_unframed(hnddma_t *di, void *buf, uint len, bool commit);
+
+extern uint16 dma_get_next_txd_idx(hnddma_t *di, bool txout);
+extern uint16 dma_get_txd_count(hnddma_t *dmah, uint16 start, bool txout);
+extern uintptr dma_get_txd_addr(hnddma_t *di, uint16 idx);
+
+/* returns the memory address (hi and low) of the buffer associated with the dma descriptor
+ * having index idx.
+ */
+extern void dma_get_txd_memaddr(hnddma_t *dmah, uint32 *addrlo, uint32 *addrhi, uint idx);
+
+extern int dma_txdesc(hnddma_t *dmah, dma64dd_t *dd, bool commit);
+extern int dma_nexttxdd(hnddma_t *dmah, txd_range_t range, uint32 *flags1, uint32 *flags2,
+ bool advance);
+
+extern void dma_update_rxfill(hnddma_t *dmah);
+extern void dma_rxchan_reset(hnddma_t *di);
+extern void dma_txchan_reset(hnddma_t *di);
+extern void dma_chan_reset(hnddma_t *dmah);
+extern pktpool_t* dma_pktpool_get(hnddma_t *dmah);
+extern void dma_clearrxp(hnddma_t *dmah);
+extern void dma_cleartxp(hnddma_t *dmah);
+
+#define dma_getnexttxdd(dmah, range, flags1, flags2) \
+ dma_nexttxdd((dmah), (range), (flags1), (flags2), TRUE)
+
+#define dma_peeknexttxdd(dmah, range, flags1, flags2) \
+ dma_nexttxdd((dmah), (range), (flags1), (flags2), FALSE)
+
+#define NUM_VEC_PCIE 4
+
+#define XFER_FROM_LBUF 0x1
+#define XFER_TO_LBUF 0x2
+#define XFER_INJ_ERR 0x4
+
+typedef struct m2m_vec_s {
+ dma64addr_t addr;
+ uint32 len;
+} m2m_vec_t;
+
+typedef struct m2m_desc_s {
+ uint8 num_rx_vec;
+ uint8 num_tx_vec;
+ uint8 flags;
+ bool commit;
+ m2m_vec_t vec[];
+} m2m_desc_t;
+
+#define INIT_M2M_DESC(desc) \
+{\
+ desc->num_rx_vec = 0; \
+ desc->num_tx_vec = 0; \
+ desc->flags = 0; \
+ desc->commit = TRUE; \
+}
+
+#define SETUP_RX_DESC(desc, rxaddr, rxlen) \
+{\
+ ASSERT(desc->num_tx_vec == 0); \
+ desc->vec[desc->num_rx_vec].addr = rxaddr; \
+ desc->vec[desc->num_rx_vec].len = rxlen; \
+ desc->num_rx_vec++; \
+}
+
+#define SETUP_TX_DESC(desc, txaddr, txlen) \
+{\
+ desc->vec[desc->num_tx_vec + desc->num_rx_vec].addr = txaddr; \
+ desc->vec[desc->num_tx_vec + desc->num_rx_vec].len = txlen; \
+ desc->num_tx_vec++; \
+}
+
+#define SETUP_XFER_FLAGS(desc, flag) \
+{\
+ desc->flags |= flag; \
+}
+
+#define DD_IS_SHARED_POOL(di) ((di)->dmactrlflags & DMA_CTRL_SHARED_POOL)
+
+extern int dma_m2m_submit(hnddma_t *dmah, m2m_desc_t *desc, bool implicit);
+extern void dma_chan_enable(hnddma_t *dmah, bool enable);
+
+extern bool dma_rxfill_p(hnddma_t *dmah, void *p);
+extern void dma_aqm_di_link(hnddma_t *dmah_aqm, hnddma_t *dmah_hw);
+extern void dma_dump_aqminfo(hnddma_t * dmah, struct bcmstrbuf *b, uint16 fifonum);
+
+/* To dump ntxd and nrxd from the DMA ring */
+void dma_dump_info(hnddma_t *dmah, uint16 fifonum, struct bcmstrbuf *b);
+
+#endif /* _hnddma_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hndlhl.h b/bcmdhd.101.10.361.x/include/hndlhl.h
new file mode 100755
index 0000000..e2068a3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndlhl.h
@@ -0,0 +1,94 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hndlhl_h_
+#define _hndlhl_h_
+
+enum {
+ LHL_MAC_TIMER = 0,
+ LHL_ARM_TIMER = 1
+};
+
+typedef struct {
+ uint16 offset;
+ uint32 mask;
+ uint32 val;
+} lhl_reg_set_t;
+
+#define LHL_REG_OFF(reg) OFFSETOF(gciregs_t, reg)
+
+extern void si_lhl_timer_config(si_t *sih, osl_t *osh, int timer_type);
+extern void si_lhl_timer_enable(si_t *sih);
+extern void si_lhl_timer_reset(si_t *sih, uint coreid, uint coreunit);
+
+extern void si_lhl_setup(si_t *sih, osl_t *osh);
+extern void si_lhl_enable(si_t *sih, osl_t *osh, bool enable);
+extern void si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period);
+extern void si_lhl_enable_sdio_wakeup(si_t *sih, osl_t *osh);
+extern void si_lhl_disable_sdio_wakeup(si_t *sih);
+extern int si_lhl_set_lpoclk(si_t *sih, osl_t *osh, uint32 lpo_force);
+extern void si_set_lv_sleep_mode_lhl_config_4369(si_t *sih);
+extern void si_set_lv_sleep_mode_lhl_config_4362(si_t *sih);
+extern void si_set_lv_sleep_mode_lhl_config_4378(si_t *sih);
+extern void si_set_lv_sleep_mode_lhl_config_4387(si_t *sih);
+extern void si_set_lv_sleep_mode_lhl_config_4389(si_t *sih);
+
+#define HIB_EXT_WAKEUP_CAP(sih) (PMUREV(sih->pmurev) >= 33)
+
+#ifdef WL_FWSIGN
+#define LHL_IS_PSMODE_0(sih) (1)
+#define LHL_IS_PSMODE_1(sih) (0)
+#else
+#define LHL_IS_PSMODE_0(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_0)
+#define LHL_IS_PSMODE_1(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_1)
+#endif /* WL_FWSIGN */
+
+/* LHL revid in capabilities register */
+#define LHL_CAP_REV_MASK 0x000000ff
+
+/* LHL rev 6 requires this bit to be set first */
+#define LHL_PWRSEQCTL_WL_FLLPU_EN (1 << 7)
+
+#define LHL_CBUCK_VOLT_SLEEP_SHIFT 12u
+#define LHL_CBUCK_VOLT_SLEEP_MASK 0x0000F000
+
+#define LHL_ABUCK_VOLT_SLEEP_SHIFT 0u
+#define LHL_ABUCK_VOLT_SLEEP_MASK 0x0000000F
+
+extern void si_lhl_mactim0_set(si_t *sih, uint32 val);
+
+/* LHL Chip Control 1 Register */
+#define LHL_1MHZ_FLL_DAC_EXT_SHIFT (9u)
+#define LHL_1MHZ_FLL_DAC_EXT_MASK (0xffu << 9u)
+#define LHL_1MHZ_FLL_PRELOAD_MASK (1u << 17u)
+
+/* LHL Top Level Power Sequence Control Register */
+#define LHL_TOP_PWRSEQ_SLEEP_ENAB_MASK (1u << 0)
+#define LHL_TOP_PWRSEQ_TOP_ISO_EN_MASK (1u << 3u)
+#define LHL_TOP_PWRSEQ_TOP_SLB_EN_MASK (1u << 4u)
+#define LHL_TOP_PWRSEQ_TOP_PWRSW_EN_MASK (1u << 5u)
+#define LHL_TOP_PWRSEQ_MISCLDO_PU_EN_MASK (1u << 6u)
+#define LHL_TOP_PWRSEQ_SERDES_SLB_EN_MASK (1u << 9u)
+#define LHL_TOP_PWRSEQ_SERDES_CLK_DIS_EN_MASK (1u << 10u)
+
+#endif /* _hndlhl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hndmem.h b/bcmdhd.101.10.361.x/include/hndmem.h
new file mode 100755
index 0000000..b77b751
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndmem.h
@@ -0,0 +1,74 @@
+/*
+ * Utility routines for configuring different memories in Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _HNDMEM_H_
+#define _HNDMEM_H_
+
+typedef enum {
+ MEM_SOCRAM = 0,
+ MEM_BM = 1,
+ MEM_UCM = 2,
+ MEM_SHM = 3,
+ MEM_MAX = 4
+} hndmem_type_t;
+
+/* PDA (Power Down Array) configuration */
+typedef enum {
+ PDA_CONFIG_CLEAR = 0, /* Clear PDA, i.e. Turns on the memory bank */
+ PDA_CONFIG_SET_FULL = 1, /* Set PDA, i.e. Truns off the memory bank */
+ PDA_CONFIG_SET_PARTIAL = 2, /* Set PDA, i.e. Truns off the memory bank */
+ PDA_CONFIG_MAX = 3
+} hndmem_config_t;
+
+/* Returns the number of banks in a given memory */
+extern int hndmem_num_banks(si_t *sih, int mem);
+
+/* Returns the size of a give bank in a given memory */
+extern int hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num);
+
+/* Returns the start address of given memory */
+extern uint32 hndmem_mem_base(si_t *sih, hndmem_type_t mem);
+
+#ifdef BCMDEBUG
+/* Dumps the complete memory information */
+extern void hndmem_dump_meminfo_all(si_t *sih);
+#endif /* BCMDEBUG */
+
+/* Configures the Sleep PDA for a particular bank for a given memory type */
+extern int hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem,
+ int bank_num, hndmem_config_t config, uint32 pda);
+/* Configures the Active PDA for a particular bank for a given memory type */
+extern int hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem,
+ int bank_num, hndmem_config_t config, uint32 pda);
+
+/* Configures the Sleep PDA for all the banks for a given memory type */
+extern int hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem,
+ hndmem_config_t config);
+/* Configures the Active PDA for all the banks for a given memory type */
+extern int hndmem_activepda_config(si_t *sih, hndmem_type_t mem,
+ hndmem_config_t config);
+
+/* Turn off/on all the possible banks in a given memory range */
+extern int hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem,
+ uint32 mem_start, uint32 size, hndmem_config_t config);
+#endif /* _HNDMEM_H_ */
diff --git a/bcmdhd.101.10.361.x/include/hndoobr.h b/bcmdhd.101.10.361.x/include/hndoobr.h
new file mode 100755
index 0000000..c27070e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndoobr.h
@@ -0,0 +1,93 @@
+/*
+ * HND OOBR interface header
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hndoobr_h_
+#define _hndoobr_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+/* for 'srcpidx' of hnd_oobr_get_intr_config() */
+#define HND_CORE_MAIN_INTR 0
+#define HND_CORE_ALT_INTR 1
+
+uint32 hnd_oobr_get_clkpwrreq(si_t *sih, uint coreid);
+uint32 hnd_oobr_get_intstatus(si_t *sih);
+int hnd_oobr_get_intr_config(si_t *sih, uint srccidx, uint srcpidx, uint dstcidx, uint *dstpidx);
+int hnd_oobr_set_intr_src(si_t *sih, uint dstcidx, uint dstpidx, uint intrnum);
+void hnd_oobr_init(si_t *sih);
+
+#ifdef BCMDBG
+/* dump oobr registers values to console */
+void hnd_oobr_dump(si_t *sih);
+#endif
+
+#define OOBR_INVALID_PORT 0xFFu
+
+/* per core source/dest sel reg */
+#define OOBR_INTR_PER_CONFREG 4u /* 4 interrupts per configure reg */
+#define OOBR_INTR_NUM_MASK 0x7Fu
+#define OOBR_INTR_EN 0x80u
+/* per core config reg */
+#define OOBR_CORECNF_OUTPUT_MASK 0x0000FF00u
+#define OOBR_CORECNF_OUTPUT_SHIFT 8u
+#define OOBR_CORECNF_INPUT_MASK 0x00FF0000u
+#define OOBR_CORECNF_INPUT_SHIFT 16u
+
+#define OOBR_EXT_RSRC_REQ_PERCORE_OFFSET 0x34u
+#define OOBR_EXT_RSRC_OFFSET 0x100u
+#define OOBR_EXT_RSRC_SHIFT 7u
+#define OOBR_EXT_RSRC_REQ_ADDR(oodr_base, core_idx) (uint32)((uintptr)(oodr_base) +\
+ OOBR_EXT_RSRC_OFFSET + ((core_idx) << OOBR_EXT_RSRC_SHIFT) +\
+ OOBR_EXT_RSRC_REQ_PERCORE_OFFSET)
+
+typedef volatile struct hndoobr_percore_reg {
+ uint32 sourcesel[OOBR_INTR_PER_CONFREG]; /* 0x00 - 0x0c */
+ uint32 destsel[OOBR_INTR_PER_CONFREG]; /* 0x10 - 0x1c */
+ uint32 reserved[4];
+ uint32 clkpwrreq; /* 0x30 */
+ uint32 extrsrcreq; /* 0x34 */
+ uint32 config; /* 0x38 */
+ uint32 reserved1[17]; /* 0x3c to 0x7c */
+} hndoobr_percore_reg_t;
+
+/* capability reg */
+#define OOBR_CAP_CORECNT_MASK 0x0000001Fu
+#define OOBR_CAP_MAX_INT2CORE_MASK 0x00F00000u
+#define OOBR_CAP_MAX_INT2CORE_SHIFT 20u
+
+#define OOBR_MAX_INT_PER_REG 4u
+
+/* CoreNConfig reg */
+#define OOBR_PERCORE_CORENCONFIG_INTOUTPUTS_MASK 0x0000FF00u
+#define OOBR_PERCORE_CORENCONFIG_INTOUTPUTS_SHIFT 8u
+
+typedef volatile struct hndoobr_reg {
+ uint32 capability; /* 0x00 */
+ uint32 reserved[3];
+ uint32 intstatus[4]; /* 0x10 - 0x1c */
+ uint32 reserved1[56]; /* 0x20 - 0xfc */
+ hndoobr_percore_reg_t percore_reg[1]; /* 0x100 */
+} hndoobr_reg_t;
+
+#endif /* _hndoobr_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hndpmu.h b/bcmdhd.101.10.361.x/include/hndpmu.h
new file mode 100755
index 0000000..63d14f9
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndpmu.h
@@ -0,0 +1,348 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#if defined(BTOVERPCIE) || defined(BT_WLAN_REG_ON_WAR)
+#include <hnd_gcisem.h>
+#endif /* BTOVERPCIE || BT_WLAN_REG_ON_WAR */
+
+#if !defined(BCMDONGLEHOST)
+
+#define SET_LDO_VOLTAGE_LDO1 1
+#define SET_LDO_VOLTAGE_LDO2 2
+#define SET_LDO_VOLTAGE_LDO3 3
+#define SET_LDO_VOLTAGE_PAREF 4
+#define SET_LDO_VOLTAGE_CLDO_PWM 5
+#define SET_LDO_VOLTAGE_CLDO_BURST 6
+#define SET_LDO_VOLTAGE_CBUCK_PWM 7
+#define SET_LDO_VOLTAGE_CBUCK_BURST 8
+#define SET_LDO_VOLTAGE_LNLDO1 9
+#define SET_LDO_VOLTAGE_LNLDO2_SEL 10
+#define SET_LNLDO_PWERUP_LATCH_CTRL 11
+#define SET_LDO_VOLTAGE_LDO3P3 12
+
+#define BBPLL_NDIV_FRAC_BITS 24
+#define P1_DIV_SCALE_BITS 12
+
+#define PMUREQTIMER (1 << 0)
+
+#define XTAL_FREQ_40MHZ 40000
+#define XTAL_FREQ_54MHZ 54000
+
+/* selects core based on AOB_ENAB() */
+#define PMUREGADDR(sih, pmur, ccr, member) \
+ (AOB_ENAB(sih) ? (&(pmur)->member) : (&(ccr)->member))
+
+/* prevents backplane stall caused by subsequent writes to 'ilp domain' PMU registers */
+#define HND_PMU_SYNC_WR(sih, pmur, ccr, osh, r, v) do { \
+ if ((sih) && (sih)->pmurev >= 22) { \
+ while (R_REG(osh, PMUREGADDR(sih, pmur, ccr, pmustatus)) & \
+ PST_SLOW_WR_PENDING) { \
+ ; /* empty */ \
+ } \
+ } \
+ W_REG(osh, r, v); \
+ (void)R_REG(osh, r); \
+} while (0)
+
+/* PMU Stat Timer */
+
+/* for count mode */
+enum {
+ PMU_STATS_LEVEL_HIGH = 0,
+ PMU_STATS_LEVEL_LOW,
+ PMU_STATS_EDGE_RISE,
+ PMU_STATS_EDGE_FALL
+};
+
+typedef struct {
+ uint8 src_num; /* predefined source hw signal num to map timer */
+ bool enable; /* timer enable/disable */
+ bool int_enable; /* overflow interrupts enable/disable */
+ uint8 cnt_mode;
+} pmu_stats_timer_t;
+
+/* internal hw signal source number for Timer */
+#define SRC_PMU_RESRC_OFFSET 0x40
+
+#define SRC_LINK_IN_L12 0
+#define SRC_LINK_IN_L23 1
+#define SRC_PM_ST_IN_D0 2
+#define SRC_PM_ST_IN_D3 3
+
+#define SRC_XTAL_PU (SRC_PMU_RESRC_OFFSET + RES4347_XTAL_PU)
+#define SRC_CORE_RDY_MAIN (SRC_PMU_RESRC_OFFSET + RES4347_CORE_RDY_MAIN)
+#define SRC_CORE_RDY_AUX (SRC_PMU_RESRC_OFFSET + RES4347_CORE_RDY_AUX)
+
+#ifdef BCMPMU_STATS
+extern bool _pmustatsenab;
+#if defined(ROM_ENAB_RUNTIME_CHECK)
+ #define PMU_STATS_ENAB() (_pmustatsenab)
+#elif defined(BCMPMU_STATS_DISABLED)
+ #define PMU_STATS_ENAB() (0)
+#else
+ #define PMU_STATS_ENAB() (1)
+#endif
+#else
+ #define PMU_STATS_ENAB() (0)
+#endif /* BCMPMU_STATS */
+
+#define RES4369_HTAVAIL_VAL 0x00a80022
+
+#if defined(BTOVERPCIE) && defined(BT_WLAN_REG_ON_WAR)
+#error "'BT over PCIe' and 'WLAN/BT REG_ON WAR' are mutually exclusive as "
+ "both share the same GCI semaphore - THREAD_0_GCI_SEM_3_ID"
+#endif /* BTOVERPCIE && BT_WLAN_REG_ON_WAR */
+
+#if defined(BTOVERPCIE)
+#define GCI_PLL_LOCK_SEM THREAD_0_GCI_SEM_3_ID
+/* changed from msec to usec */
+#define GCI_PLL_LOCK_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000)
+#endif /* BTOVERPCIE */
+
+#if defined(BT_WLAN_REG_ON_WAR)
+#define GCI_BT_WLAN_REG_ON_WAR_SEM THREAD_0_GCI_SEM_3_ID
+#define GCI_BT_WLAN_REG_ON_WAR_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000)
+#endif /* BT_WLAN_REG_ON_WAR */
+
+#define GCI_INDIRECT_ACCESS_SEM THREAD_0_GCI_SEM_2_ID
+#define GCI_INDIRECT_ACCESS_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000)
+
+#define GCI_TREFUP_DS_SEM THREAD_0_GCI_SEM_5_ID
+#define GCI_TREFUP_DS_SEM_TIMEOUT (GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000)
+
+#define GCI_BT_BOOTSTAGE_MEMOFFSET (0x570u)
+#define GCI_BT_BOOTSTAGE_FW_WAIT 0u /* BT ROM code waiting on FW boot */
+#define GCI_BT_BOOTSTAGE_FW_BOOT 2u /* upon FW boot/start */
+#define GCI_BT_BOOTSTAGE_FW_TRAP 3u /* upon a trap */
+#define GCI_BT_BOOTSTAGE_FW_INVALID 0xFFu
+
+#define GCI_TREFUP_DS_MEMOFFSET (0x57Cu)
+#define GCI_TREFUP_DS_WLAN (1u << 0u)
+#define GCI_TREFUP_DS_BT (1u << 1u)
+#define GCI_SHARED_SFLASH_RSVD (1u << 2u)
+
+#define GCI_SHARED_SFLASH_SEM THREAD_0_GCI_SEM_6_ID
+#define GCI_SHARED_SFLASH_SEM_TIMEOUT GCI_SEM_TIMEOUT_AFTER_RESERVE * 1000
+#define GCI_SHARED_SFLASH_SEM_ERASE_RSVD_TIMEOUT 50 + 30 /* 50 us + headroom */
+
+#define SLEW_RATE_VALUE_REG_4369 (PMU_VREG_6)
+#define SLEW_RATE_SHIFT_4369(x) (9u + (x * 8u))
+#define SLEW_RATE_SIZE_4369 (3u)
+#define SLEW_RATE_MASK_4369 ((1u << SLEW_RATE_SIZE_4369) - 1u)
+#define SOFT_START_EN_REG_4369 (PMU_VREG_5)
+#define SOFT_START_EN_SHIFT_4369(x) (4u + x)
+#define SOFT_START_EN_SIZE_4369 (1u)
+#define SOFT_START_EN_MASK_4369 ((1u << SOFT_START_EN_SIZE_4369) - 1u)
+#define SOFT_START_EN_VALUE_4369 (1u)
+
+#define SLEW_RATE_VALUE_REG_4378 (PMU_VREG_6)
+#define SLEW_RATE_SHIFT_4378(x) (9u + (x * 8u))
+#define SLEW_RATE_SIZE_4378 (3u)
+#define SLEW_RATE_MASK_4378 ((1u << SLEW_RATE_SIZE_4378) - 1u)
+#define SOFT_START_EN_REG_4378 (PMU_VREG_5)
+#define SOFT_START_EN_SHIFT_4378(x) (4u + x)
+#define SOFT_START_EN_SIZE_4378 (1u)
+#define SOFT_START_EN_MASK_4378 ((1u << SOFT_START_EN_SIZE_4378) - 1u)
+#define SOFT_START_EN_VALUE_4378 (1u)
+#define SOFT_START_EN_VALUE_4378_REV37 (0u)
+
+#define SLEW_RATE_VALUE_REG_4387 (PMU_VREG_6)
+#define SLEW_RATE_SHIFT_4387(x) (18u)
+#define SLEW_RATE_SIZE_4387 (2u)
+#define SLEW_RATE_MASK_4387 ((1u << SLEW_RATE_SIZE_4387) - 1u)
+#define SOFT_START_EN_REG_4387 (PMU_VREG_6)
+#define SOFT_START_EN_SHIFT_4387(x) (17u)
+#define SOFT_START_EN_SIZE_4387 (1u)
+#define SOFT_START_EN_MASK_4387 ((1u << SOFT_START_EN_SIZE_4387) - 1u)
+#define SOFT_START_EN_VALUE_4387 (0u)
+
+extern void si_pmu_init(si_t *sih, osl_t *osh);
+extern void si_pmu_chip_init(si_t *sih, osl_t *osh);
+extern void si_pmu_pll_init(si_t *sih, osl_t *osh, uint32 xtalfreq);
+extern void si_pmu_res_init(si_t *sih, osl_t *osh);
+extern void si_pmu_swreg_init(si_t *sih, osl_t *osh);
+extern void si_pmu_res_minmax_update(si_t *sih, osl_t *osh);
+extern void si_pmu_clear_intmask(si_t *sih);
+
+extern uint32 si_pmu_si_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */
+extern uint32 si_pmu_cpu_clock(si_t *sih, osl_t *osh); /* returns [hz] units */
+extern uint32 si_pmu_mem_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */
+extern uint32 si_pmu_alp_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */
+extern void si_pmu_ilp_clock_set(uint32 cycles);
+extern uint32 si_pmu_ilp_clock(si_t *sih, osl_t *osh); /* returns [Hz] units */
+
+extern void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, uint8 ldo, uint8 voltage);
+extern uint16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh);
+extern uint si_pmu_fast_pwrup_delay_dig(si_t *sih, osl_t *osh);
+extern void si_pmu_pllupd(si_t *sih);
+extern void si_pmu_spuravoid(si_t *sih, osl_t *osh, uint8 spuravoid);
+extern void si_pmu_pll_off_PARR(si_t *sih, osl_t *osh, uint32 *min_res_mask,
+ uint32 *max_res_mask, uint32 *clk_ctl_st);
+extern uint32 si_pmu_pll28nm_fvco(si_t *sih);
+/* below function are only for BBPLL parallel purpose */
+extern void si_pmu_gband_spurwar(si_t *sih, osl_t *osh);
+
+extern bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh);
+extern uint32 si_pmu_measure_alpclk(si_t *sih, osl_t *osh);
+
+extern uint32 si_pmu_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+#if defined(SAVERESTORE)
+extern void si_set_abuck_mode_4362(si_t *sih, uint8 mode);
+#endif /* SAVERESTORE */
+
+#define si_pmu_regcontrol si_pmu_vreg_control /* prevents build err because of usage in PHY */
+extern uint32 si_pmu_vreg_control(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_pmu_pllcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern void si_pmu_pllupd(si_t *sih);
+
+extern uint32 si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, uint32 clk, uint32 delay);
+extern uint32 si_pmu_get_bb_vcofreq(si_t *sih, osl_t *osh, int xtalfreq);
+typedef void (*si_pmu_callback_t)(void* arg);
+
+extern uint32 si_mac_clk(si_t *sih, osl_t *osh);
+extern void si_pmu_switch_on_PARLDO(si_t *sih, osl_t *osh);
+extern void si_pmu_switch_off_PARLDO(si_t *sih, osl_t *osh);
+
+/* TODO: need a better fn name or better abstraction than the raw fvco
+ * and MAC clock channel divisor...
+ */
+extern int si_pmu_fvco_macdiv(si_t *sih, uint32 *fvco, uint32 *div);
+
+extern bool si_pmu_reset_ret_sleep_log(si_t *sih, osl_t *osh);
+extern bool si_pmu_reset_chip_sleep_log(si_t *sih, osl_t *osh);
+extern int si_pmu_openloop_cal(si_t *sih, uint16 currtemp);
+
+#ifdef LDO3P3_MIN_RES_MASK
+extern int si_pmu_min_res_ldo3p3_set(si_t *sih, osl_t *osh, bool on);
+extern int si_pmu_min_res_ldo3p3_get(si_t *sih, osl_t *osh, int *res);
+#endif /* LDO3P3_MIN_RES_MASK */
+
+void si_pmu_bt_ldo_pu(si_t *sih, bool up);
+
+int si_pmu_ldo3p3_soft_start_wl_get(si_t *sih, osl_t *osh, int *res);
+int si_pmu_ldo3p3_soft_start_wl_set(si_t *sih, osl_t *osh, uint32 slew_rate);
+int si_pmu_ldo3p3_soft_start_bt_get(si_t *sih, osl_t *osh, int *res);
+int si_pmu_ldo3p3_soft_start_bt_set(si_t *sih, osl_t *osh, uint32 slew_rate);
+extern int si_pmu_min_res_otp_pu_set(si_t *sih, osl_t *osh, bool on);
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(EDV)
+extern uint32 si_pmu_get_backplaneclkspeed(si_t *sih);
+extern void si_pmu_update_backplane_clock(si_t *sih, osl_t *osh, uint reg, uint32 mask, uint32 val);
+#endif
+
+extern uint32 si_pmu_rsrc_macphy_clk_deps(si_t *sih, osl_t *osh, int maccore_index);
+extern uint32 si_pmu_rsrc_ht_avail_clk_deps(si_t *sih, osl_t *osh);
+extern uint32 si_pmu_rsrc_cb_ready_deps(si_t *sih, osl_t *osh);
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
+extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag);
+extern uint32 si_pmu_dump_pmucap_binary(si_t *sih, uchar *p);
+extern uint32 si_pmu_dump_buf_size_pmucap(si_t *sih);
+extern int si_pmu_wait_for_steady_state(si_t *sih, osl_t *osh, pmuregs_t *pmu);
+#ifdef ATE_BUILD
+extern void hnd_pmu_clr_int_sts_req_active(osl_t *hnd_osh, si_t *hnd_sih);
+#endif
+extern uint32 si_pmu_wake_bit_offset(si_t *sih);
+extern uint32 si_pmu_get_pmutimer(si_t *sih);
+extern void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask);
+extern void si_pmu_set_mac_rsrc_req(si_t *sih, int macunit);
+extern void si_pmu_set_mac_rsrc_req_sc(si_t *sih, osl_t *osh);
+extern bool si_pmu_fast_lpo_enable_pcie(si_t *sih);
+extern bool si_pmu_fast_lpo_enable_pmu(si_t *sih);
+extern uint32 si_cur_pmu_time(si_t *sih);
+extern bool si_pmu_cap_fast_lpo(si_t *sih);
+extern int si_pmu_fast_lpo_disable(si_t *sih);
+extern void si_pmu_dmn1_perst_wakeup(si_t *sih, bool set);
+#ifdef BCMPMU_STATS
+extern void si_pmustatstimer_init(si_t *sih);
+extern void si_pmustatstimer_dump(si_t *sih);
+extern void si_pmustatstimer_start(si_t *sih, uint8 timerid);
+extern void si_pmustatstimer_stop(si_t *sih, uint8 timerid);
+extern void si_pmustatstimer_clear(si_t *sih, uint8 timerid);
+extern void si_pmustatstimer_clear_overflow(si_t *sih);
+extern uint32 si_pmustatstimer_read(si_t *sih, uint8 timerid);
+extern void si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid);
+extern void si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid);
+extern void si_pmustatstimer_int_enable(si_t *sih);
+extern void si_pmustatstimer_int_disable(si_t *sih);
+#endif /* BCMPMU_STATS */
+extern int si_pmu_min_res_set(si_t *sih, osl_t *osh, uint min_mask, bool set);
+extern void si_pmu_disable_intr_pwrreq(si_t *sih);
+
+#ifdef DONGLEBUILD
+/* Get PMU registers in rodata */
+extern int si_pmu_regs_in_rodata_dump(void *sih, void *arg2, uint32 *bufptr, uint16 *len);
+#endif
+
+extern void si_pmu_fis_setup(si_t *sih);
+
+extern uint si_pmu_get_mac_rsrc_req_tmr_cnt(si_t *sih);
+extern uint si_pmu_get_pmu_interrupt_rcv_cnt(si_t *sih);
+
+extern bool _bcm_pwr_opt_dis;
+#define BCM_PWR_OPT_ENAB() (FALSE)
+
+extern int si_pmu_mem_pwr_off(si_t *sih, int core_idx);
+extern int si_pmu_mem_pwr_on(si_t *sih);
+extern int si_pmu_lvm_csr_update(si_t *sih, bool lvm);
+
+#if defined(BT_WLAN_REG_ON_WAR)
+#define REG_ON_WAR_PMU_EXT_WAKE_REQ_MASK0_VAL 0x060000CDu
+
+extern void si_pmu_reg_on_war_ext_wake_perst_set(si_t *sih);
+extern void si_pmu_reg_on_war_ext_wake_perst_clear(si_t *sih);
+#endif /* BT_WLAN_REG_ON_WAR */
+
+#if defined (BCMSRTOPOFF)
+ extern bool _srtopoff_enab;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMSRTOPOFF_ENAB() (_srtopoff_enab)
+#elif defined(BCMSRTOPOFF_DISABLED)
+ #define BCMSRTOPOFF_ENAB() (0)
+#else
+ #define BCMSRTOPOFF_ENAB() (_srtopoff_enab)
+#endif
+#else
+ #define BCMSRTOPOFF_ENAB() (0)
+#endif /* BCMSRTOPOFF */
+
+#ifdef BCM_PMU_FLL_PU_MANAGE
+#define PMU_FLL_PU_ENAB() (TRUE)
+#else
+#define PMU_FLL_PU_ENAB() (FALSE)
+#endif
+
+extern pmuregs_t *hnd_pmur; /* PMU core regs */
+extern void si_pmu_res_state_wait(si_t *sih, uint rsrc);
+#endif /* _hndpmu_h_ */
diff --git a/bcmdhd.101.10.361.x/include/hndsoc.h b/bcmdhd.101.10.361.x/include/hndsoc.h
new file mode 100755
index 0000000..7349586
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/hndsoc.h
@@ -0,0 +1,353 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _HNDSOC_H
+#define _HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */
+#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
+#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */
+
+#ifndef SI_ENUM_BASE_DEFAULT
+#define SI_ENUM_BASE_DEFAULT 0x18000000 /* Enumeration space base */
+#endif
+
+#ifndef SI_WRAP_BASE_DEFAULT
+#define SI_WRAP_BASE_DEFAULT 0x18100000 /* Wrapper space base */
+#endif
+
+#define WL_BRIDGE1_S (0x18132000)
+#define WL_BRIDGE2_S (0x18133000)
+
+/** new(er) chips started locating their chipc core at a different BP address than 0x1800_0000 */
+#ifdef DONGLEBUILD
+// firmware is always compiled for a particular chip
+#define SI_ENUM_BASE(sih) SI_ENUM_BASE_DEFAULT
+#define SI_WRAP_BASE(sih) SI_WRAP_BASE_DEFAULT
+#else
+// NIC and DHD driver binaries should support both old(er) and new(er) chips at the same time
+#define SI_ENUM_BASE(sih) ((sih)->enum_base)
+#define SI_WRAP_BASE(sih) (SI_ENUM_BASE(sih) + 0x00100000)
+#endif /* DONGLEBUILD */
+
+#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
+
+#define SI_NIC400_GPV_BASE 0x18200000 /* NIC-400 Global Programmers View (GPV) */
+#define SI_GPV_WR_CAP_ADDR 0x4008 /* WR-CAP offset */
+#define SI_GPV_RD_CAP_EN 0x1 /* issue read */
+#define SI_GPV_WR_CAP_EN 0x2 /* issue write */
+
+#define SI_GPV_SL4_BM_ADDR 0x44024 /* NIC-400 Slave interface 4 Bypass merge */
+#define SI_GPV_SL6_BM_ADDR 0x46024 /* NIC-400 Slave interface 6 Bypass merge */
+#define SI_GPV_SL8_BM_ADDR 0x4a024 /* NIC-400 Slave interface 8 Bypass merge */
+#define SI_GPV_SL9_BM_ADDR 0x4b024 /* NIC-400 Slave interface 9 Bypass merge */
+
+/* AXI Slave Interface Block (ASIB) offsets */
+#define ASIB_FN_MOD2 0x24
+
+#ifndef SI_MAXCORES
+#ifdef _RTE_
+#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+#else
+#define SI_MAXCORES 32 /* NorthStar has more cores */
+#endif /* _RTE_ */
+#endif /* SI_MAXCORES */
+
+#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+
+#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
+#define SI_FASTRAM_SWAPPED 0x19800000
+
+#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
+#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
+#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */
+#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
+#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
+#define SI_FLASH_WINDOW 0x01000000 /* Flash XIP Window */
+
+#define SI_NS_NANDFLASH 0x1c000000 /* NorthStar NAND flash base */
+#define SI_NS_NORFLASH 0x1e000000 /* NorthStar NOR flash base */
+#define SI_NS_ROM 0xfffd0000 /* NorthStar ROM */
+#define SI_NS_FLASH_WINDOW 0x02000000 /* Flash XIP Window */
+
+#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */
+#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */
+#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
+#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
+#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */
+#ifndef SI_ARMCA7_RAM
+#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */
+#endif
+#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
+#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
+
+#define SI_SFLASH 0x14000000
+#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), low 32 bits
+ */
+#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+/* APB bridge code */
+#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */
+
+/* ADB bridge code */
+#define ADB_BRIDGE_ID 0x031
+
+/* AXI-AHB bridge code */
+#define AXI2AHB_BRIDGE_ID 0x240 /* AXI_AHB Bridge */
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
+#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */
+#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */
+#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */
+#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */
+#define GCI_CORE_ID 0x840 /* GCI Core */
+#define SR_CORE_ID 0x841 /* SR_CORE ID */
+#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */
+#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */
+#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */
+#define SYSMEM_CORE_ID 0x849 /* System memory core */
+#define HUB_CORE_ID 0x84b /* Hub core ID */
+#define HWA_CORE_ID 0x851 /* HWA Core ID */
+#define SPMI_SLAVE_CORE_ID 0x855 /* SPMI Slave Core ID */
+#define BT_CORE_ID 0x857 /* Bluetooth Core ID */
+#define HND_OOBR_CORE_ID 0x85c /* Hnd oob router core ID */
+#define SOE_CORE_ID 0x85d /* SOE core */
+#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
+#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
+#define EROM_CORE_ID 0x366 /* EROM core ID */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define CCI400_CORE_ID 0x420 /* CCI-400 (Cache Coherent Interconnect) core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
+ * unused address ranges
+ */
+
+#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */
+#define NS_DMA_CORE_ID 0x502 /* DMA core */
+#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */
+#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */
+#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */
+#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */
+#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */
+#define NS_ROM_CORE_ID 0x508 /* ROM core */
+#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */
+#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */
+#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */
+#define NS_SOCRAM_CORE_ID 0x50e /* internal memory core */
+#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */
+#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */
+#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */
+#define ALTA_CORE_ID 0x534 /* I2S core */
+#define DDR23_PHY_CORE_ID 0x5dd
+
+#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE_DEFAULT above,
+ * and chipcommon being the first core:
+ */
+#define SI_CC_IDX 0
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0u
+#define SOCI_AI 1u
+#define SOCI_UBUS 2u
+#define SOCI_NAI 3u
+#define SOCI_DVTBUS 4u /* BCM7XXX Digital Video Tech bus */
+#define SOCI_NCI 6u /* NCI (non coherent interconnect) i.e. BOOKER */
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_PCEN 0x0004
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+#define SISF_CORE_BITS_SCAN 0x0010 /* SCAN core */
+
+/* Norstar core status flags */
+#define SISF_NS_BOOTDEV_MASK 0x0003 /* ROM core */
+#define SISF_NS_BOOTDEV_NOR 0x0000 /* ROM core */
+#define SISF_NS_BOOTDEV_NAND 0x0001 /* ROM core */
+#define SISF_NS_BOOTDEV_ROM 0x0002 /* ROM core */
+#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */
+#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */
+
+/* dot11 core-specific status flags */
+#define SISF_MINORREV_D11_SHIFT 16
+#define SISF_MINORREV_D11_MASK 0xF /**< minor corerev (corerev == 61) */
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+#define SI_PWR_CTL_ST 0x1e8 /* For memory clock gating */
+
+/* clk_ctl_st register */
+#define CCS_FORCEALP 0x00000001 /* force ALP request */
+#define CCS_FORCEHT 0x00000002 /* force HT request */
+#define CCS_FORCEILP 0x00000004 /* force ILP request */
+#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
+#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
+#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
+#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */
+#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */
+#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */
+#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */
+#define CCS_SFLASH_CLKREQ 0x00000200 /* Sflash clk request */
+#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */
+#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT 8
+#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
+#define CCS_HTAVAIL 0x00020000 /* HT is available */
+#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */
+#define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */
+#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
+#define CCS_ERSRC_STS_SHIFT 24
+#define CCS_SECI_AVAIL 0x01000000 /* RO: SECI is available */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size */
+#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
+#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */
+#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
+#define BISZ_TXTST_IDX 1 /* 1: text start */
+#define BISZ_TXTEND_IDX 2 /* 2: text end */
+#define BISZ_DATAST_IDX 3 /* 3: data start */
+#define BISZ_DATAEND_IDX 4 /* 4: data end */
+#define BISZ_BSSST_IDX 5 /* 5: bss start */
+#define BISZ_BSSEND_IDX 6 /* 6: bss end */
+#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
+
+/* Boot/Kernel related defintion and functions */
+#define SOC_BOOTDEV_ROM 0x00000001
+#define SOC_BOOTDEV_PFLASH 0x00000002
+#define SOC_BOOTDEV_SFLASH 0x00000004
+#define SOC_BOOTDEV_NANDFLASH 0x00000008
+
+#define SOC_KNLDEV_NORFLASH 0x00000002
+#define SOC_KNLDEV_NANDFLASH 0x00000004
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+int soc_boot_dev(void *sih);
+int soc_knl_dev(void *sih);
+#endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */
+
+#define PMU_BASE_OFFSET 0x00012000 /* PMU offset is changed for ccrev >= 56 */
+#endif /* _HNDSOC_H */
diff --git a/bcmdhd.101.10.361.x/include/ieee80211_radiotap.h b/bcmdhd.101.10.361.x/include/ieee80211_radiotap.h
new file mode 100755
index 0000000..1f08faa
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/ieee80211_radiotap.h
@@ -0,0 +1,400 @@
+/* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.11 2007/12/13 01:23:40 sam Exp $ */
+/* $NetBSD: ieee80211_radiotap.h,v 1.16 2007/01/06 05:51:15 dyoung Exp $ */
+/* FILE-CSTYLED */
+
+/*
+ * Copyright (c) 2003, 2004 David Young. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID
+ * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+
+/*
+ * <<Broadcom-WL-IPTag/Open:>>
+ */
+
+#ifndef _NET80211_IEEE80211_RADIOTAP_H_
+#define _NET80211_IEEE80211_RADIOTAP_H_
+
+/* A generic radio capture format is desirable. It must be
+ * rigidly defined (e.g., units for fields should be given),
+ * and easily extensible.
+ *
+ * The following is an extensible radio capture format. It is
+ * based on a bitmap indicating which fields are present.
+ *
+ * I am trying to describe precisely what the application programmer
+ * should expect in the following, and for that reason I tell the
+ * units and origin of each measurement (where it applies), or else I
+ * use sufficiently weaselly language ("is a monotonically nondecreasing
+ * function of...") that I cannot set false expectations for lawyerly
+ * readers.
+ */
+#if defined(__KERNEL__) || defined(_KERNEL)
+#ifndef DLT_IEEE802_11_RADIO
+#define DLT_IEEE802_11_RADIO 127 /* 802.11 plus WLAN header */
+#endif
+#endif /* defined(__KERNEL__) || defined(_KERNEL) */
+
+#define IEEE80211_RADIOTAP_HDRLEN 64 /* deprecated */
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/*
+ * The radio capture header precedes the 802.11 header.
+ *
+ * Note well: all radiotap fields are little-endian.
+ */
+BWL_PRE_PACKED_STRUCT struct ieee80211_radiotap_header {
+ uint8 it_version; /* Version 0. Only increases
+ * for drastic changes,
+ * introduction of compatible
+ * new fields does not count.
+ */
+ uint8 it_pad;
+ uint16 it_len; /* length of the whole
+ * header in bytes, including
+ * it_version, it_pad,
+ * it_len, and data fields.
+ */
+ uint32 it_present; /* A bitmap telling which
+ * fields are present. Set bit 31
+ * (0x80000000) to extend the
+ * bitmap by another 32 bits.
+ * Additional extensions are made
+ * by setting bit 31.
+ */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/*
+ * Name Data type Units
+ * ---- --------- -----
+ *
+ * IEEE80211_RADIOTAP_TSFT uint64_t microseconds
+ *
+ * Value in microseconds of the MAC's 64-bit 802.11 Time
+ * Synchronization Function timer when the first bit of the
+ * MPDU arrived at the MAC. For received frames, only.
+ *
+ * IEEE80211_RADIOTAP_CHANNEL 2 x uint16_t MHz, bitmap
+ *
+ * Tx/Rx frequency in MHz, followed by flags (see below).
+ *
+ * IEEE80211_RADIOTAP_FHSS uint16_t see below
+ *
+ * For frequency-hopping radios, the hop set (first byte)
+ * and pattern (second byte).
+ *
+ * IEEE80211_RADIOTAP_RATE uint8_t 500kb/s or index
+ *
+ * Tx/Rx data rate. If bit 0x80 is set then it represents an
+ * an MCS index and not an IEEE rate.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * RF signal power at the antenna, decibel difference from
+ * one milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * RF noise power at the antenna, decibel difference from one
+ * milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTSIGNAL uint8_t decibel (dB)
+ *
+ * RF signal power at the antenna, decibel difference from an
+ * arbitrary, fixed reference.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTNOISE uint8_t decibel (dB)
+ *
+ * RF noise power at the antenna, decibel difference from an
+ * arbitrary, fixed reference point.
+ *
+ * IEEE80211_RADIOTAP_TXFLAGS uint16_t txflags
+ * Properties of Transmitted frames
+ *
+ * IEEE80211_RADIOTAP_RETRIES uint8_t retries
+ * Number of retries
+ *
+ * IEEE80211_RADIOTAP_LOCK_QUALITY uint16_t unitless
+ *
+ * Quality of Barker code lock. Unitless. Monotonically
+ * nondecreasing with "better" lock strength. Called "Signal
+ * Quality" in datasheets. (Is there a standard way to measure
+ * this?)
+ *
+ * IEEE80211_RADIOTAP_TX_ATTENUATION uint16_t unitless
+ *
+ * Transmit power expressed as unitless distance from max
+ * power set at factory calibration. 0 is max power.
+ * Monotonically nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DB_TX_ATTENUATION uint16_t decibels (dB)
+ *
+ * Transmit power expressed as decibel distance from max power
+ * set at factory calibration. 0 is max power. Monotonically
+ * nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * Transmit power expressed as dBm (decibels from a 1 milliwatt
+ * reference). This is the absolute power level measured at
+ * the antenna port.
+ *
+ * IEEE80211_RADIOTAP_FLAGS uint8_t bitmap
+ *
+ * Properties of transmitted and received frames. See flags
+ * defined below.
+ *
+ * IEEE80211_RADIOTAP_ANTENNA uint8_t antenna index
+ *
+ * Unitless indication of the Rx/Tx antenna for this packet.
+ * The first antenna is antenna 0.
+ *
+ * IEEE80211_RADIOTAP_XCHANNEL uint32_t bitmap
+ * uint16_t MHz
+ * uint8_t channel number
+ * int8_t .5 dBm
+ *
+ * Extended channel specification: flags (see below) followed by
+ * frequency in MHz, the corresponding IEEE channel number, and
+ * finally the maximum regulatory transmit power cap in .5 dBm
+ * units. This property supersedes IEEE80211_RADIOTAP_CHANNEL
+ * and only one of the two should be present.
+ *
+ * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless
+ *
+ * Contains a bitmap of known fields/flags, the flags, and
+ * the MCS index.
+ *
+ */
+enum ieee80211_radiotap_type {
+ IEEE80211_RADIOTAP_TSFT = 0,
+ IEEE80211_RADIOTAP_FLAGS = 1,
+ IEEE80211_RADIOTAP_RATE = 2,
+ IEEE80211_RADIOTAP_CHANNEL = 3,
+ IEEE80211_RADIOTAP_FHSS = 4,
+ IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5,
+ IEEE80211_RADIOTAP_DBM_ANTNOISE = 6,
+ IEEE80211_RADIOTAP_LOCK_QUALITY = 7,
+ IEEE80211_RADIOTAP_TX_ATTENUATION = 8,
+ IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9,
+ IEEE80211_RADIOTAP_DBM_TX_POWER = 10,
+ IEEE80211_RADIOTAP_ANTENNA = 11,
+ IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12,
+ IEEE80211_RADIOTAP_DB_ANTNOISE = 13,
+ /* NB: gap for netbsd definitions */
+ IEEE80211_RADIOTAP_TXFLAGS = 15,
+ IEEE80211_RADIOTAP_RETRIES = 17,
+ IEEE80211_RADIOTAP_XCHANNEL = 18,
+ IEEE80211_RADIOTAP_MCS = 19,
+ IEEE80211_RADIOTAP_AMPDU = 20,
+ IEEE80211_RADIOTAP_VHT = 21,
+ IEEE80211_RADIOTAP_HE = 23,
+ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
+ IEEE80211_RADIOTAP_EXT = 31,
+
+ };
+
+#ifndef _KERNEL
+/* channel attributes */
+#define IEEE80211_CHAN_TURBO 0x00000010 /* Turbo channel */
+#define IEEE80211_CHAN_CCK 0x00000020 /* CCK channel */
+#define IEEE80211_CHAN_OFDM 0x00000040 /* OFDM channel */
+#define IEEE80211_CHAN_2GHZ 0x00000080 /* 2 GHz spectrum channel. */
+#define IEEE80211_CHAN_5GHZ 0x00000100 /* 5 GHz spectrum channel */
+#define IEEE80211_CHAN_PASSIVE 0x00000200 /* Only passive scan allowed */
+#define IEEE80211_CHAN_DYN 0x00000400 /* Dynamic CCK-OFDM channel */
+#define IEEE80211_CHAN_GFSK 0x00000800 /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_GSM 0x00001000 /* 900 MHz spectrum channel */
+#define IEEE80211_CHAN_STURBO 0x00002000 /* 11a static turbo channel only */
+#define IEEE80211_CHAN_HALF 0x00004000 /* Half rate channel */
+#define IEEE80211_CHAN_QUARTER 0x00008000 /* Quarter rate channel */
+#define IEEE80211_CHAN_HT20 0x00010000 /* HT 20 channel */
+#define IEEE80211_CHAN_HT40U 0x00020000 /* HT 40 channel w/ ext above */
+#define IEEE80211_CHAN_HT40D 0x00040000 /* HT 40 channel w/ ext below */
+#endif /* !_KERNEL */
+
+/* For IEEE80211_RADIOTAP_FLAGS */
+#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received
+ * during CFP
+ */
+#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received
+ * with short
+ * preamble
+ */
+#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received
+ * with WEP encryption
+ */
+#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received
+ * with fragmentation
+ */
+#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */
+#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between
+ * 802.11 header and payload
+ * (to 32-bit boundary)
+ */
+#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* does not pass FCS check */
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03
+#define IEEE80211_RADIOTAP_MCS_BW_20 0
+#define IEEE80211_RADIOTAP_MCS_BW_40 1
+#define IEEE80211_RADIOTAP_MCS_BW_20L 2
+#define IEEE80211_RADIOTAP_MCS_BW_20U 3
+#define IEEE80211_RADIOTAP_MCS_SGI 0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
+
+/* remove, only used on macos */
+#define IEEE80211_RADIOTAP_MCS_BW_80 0x20
+#define IEEE80211_RADIOTAP_MCS_BW_20LL 0x40
+#define IEEE80211_RADIOTAP_MCS_BW_20LU 0x60
+#define IEEE80211_RADIOTAP_MCS_BW_20UL 0x80
+#define IEEE80211_RADIOTAP_MCS_BW_20UU 0xa0
+#define IEEE80211_RADIOTAP_MCS_BW_40L 0xc0
+#define IEEE80211_RADIOTAP_MCS_BW_40U 0xe0
+
+/* For IEEE80211_RADIOTAP_VHT */
+#define IEEE80211_RADIOTAP_VHT_HAVE_STBC 0x0001
+#define IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS 0x0002
+#define IEEE80211_RADIOTAP_VHT_HAVE_GI 0x0004
+#define IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA 0x0008
+#define IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA 0x0010
+#define IEEE80211_RADIOTAP_VHT_HAVE_BF 0x0020
+#define IEEE80211_RADIOTAP_VHT_HAVE_BW 0x0040
+#define IEEE80211_RADIOTAP_VHT_HAVE_GID 0x0080
+#define IEEE80211_RADIOTAP_VHT_HAVE_PAID 0x0100
+
+#define IEEE80211_RADIOTAP_VHT_STBC 0x01
+#define IEEE80211_RADIOTAP_VHT_TXOP_PS 0x02
+#define IEEE80211_RADIOTAP_VHT_SGI 0x04
+#define IEEE80211_RADIOTAP_VHT_SGI_NSYM_DA 0x08
+#define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA 0x10
+#define IEEE80211_RADIOTAP_VHT_BF 0x20
+
+#define IEEE80211_RADIOTAP_VHT_NSS 0x0f
+#define IEEE80211_RADIOTAP_VHT_MCS 0xf0
+
+#define IEEE80211_RADIOTAP_VHT_CODING_LDPC 0x01
+
+#define IEEE80211_RADIOTAP_VHT_BW_20 IEEE80211_RADIOTAP_MCS_BW_20
+#define IEEE80211_RADIOTAP_VHT_BW_40 IEEE80211_RADIOTAP_MCS_BW_40
+#define IEEE80211_RADIOTAP_VHT_BW_20L IEEE80211_RADIOTAP_MCS_BW_20L
+#define IEEE80211_RADIOTAP_VHT_BW_20U IEEE80211_RADIOTAP_MCS_BW_20U
+#define IEEE80211_RADIOTAP_VHT_BW_80 4
+#define IEEE80211_RADIOTAP_VHT_BW_40L 5
+#define IEEE80211_RADIOTAP_VHT_BW_40U 6
+#define IEEE80211_RADIOTAP_VHT_BW_20LL 7
+#define IEEE80211_RADIOTAP_VHT_BW_20LU 8
+#define IEEE80211_RADIOTAP_VHT_BW_20UL 9
+#define IEEE80211_RADIOTAP_VHT_BW_20UU 10
+#define IEEE80211_RADIOTAP_VHT_BW_160 11
+#define IEEE80211_RADIOTAP_VHT_BW_80L 12
+#define IEEE80211_RADIOTAP_VHT_BW_80U 13
+#define IEEE80211_RADIOTAP_VHT_BW_40LL 14
+#define IEEE80211_RADIOTAP_VHT_BW_40LU 15
+#define IEEE80211_RADIOTAP_VHT_BW_40UL 16
+#define IEEE80211_RADIOTAP_VHT_BW_40UU 17
+#define IEEE80211_RADIOTAP_VHT_BW_20LLL 18
+#define IEEE80211_RADIOTAP_VHT_BW_20LLU 19
+#define IEEE80211_RADIOTAP_VHT_BW_20LUL 20
+#define IEEE80211_RADIOTAP_VHT_BW_20LUU 21
+#define IEEE80211_RADIOTAP_VHT_BW_20ULL 22
+#define IEEE80211_RADIOTAP_VHT_BW_20ULU 23
+#define IEEE80211_RADIOTAP_VHT_BW_20UUL 24
+#define IEEE80211_RADIOTAP_VHT_BW_20UUU 25
+
+/* For IEEE80211_RADIOTAP_HE */
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_FORMAT 0x0003
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_BSS_COLOR 0x0004
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_BEAM_CHANGE 0x0008
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_DL_UL 0x0010
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_MCS 0x0020
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_DCM 0x0040
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_CODING 0x0080
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_LDPC 0x0100
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_STBC 0x0200
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_SR 0x0400
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_BW 0x4000
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_DOPPLER 0x8000
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_LTF 0x0004
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_TXBF 0x0010
+#define IEEE80211_RADIOTAP_HE_SIGA_HAVE_TXOP 0x0040
+
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_FORMAT 0x0001
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_BEAM_CHANGE 0x0002
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_DL_UL 0x0004
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_MCS 0x0008
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_DCM 0x0010
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_BSS_COLOR 0x0020
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_B14 0x0040
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_SR 0x0080
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_BW 0x0100
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_LTF 0x0200
+#define IEEE80211_RADIOTAP_HE_SIGA1_HAVE_NSTS 0x0400
+
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_TXOP 0x0001
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_CODING 0x0002
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_LDPC 0x0004
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_STBC 0x0008
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_TXBF 0x0010
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_FEC 0x0020
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_PED 0x0040
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_DOPPLER 0x0100
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_CRC 0x0400
+#define IEEE80211_RADIOTAP_HE_SIGA2_HAVE_TAIL 0x0800
+
+/* For IEEE80211_RADIOTAP_TXFLAGS */
+#define IEEE80211_RADIOTAP_TXF_FAIL 0x0001 /* TX failed due to excessive retries */
+#define IEEE80211_RADIOTAP_TXF_CTS 0x0002 /* TX used CTS-to-self protection */
+#define IEEE80211_RADIOTAP_TXF_RTSCTS 0x0004 /* TX used RTS/CTS */
+#define IEEE80211_RADIOTAP_TXF_NOACK 0x0008 /* For injected TX: don't expect ACK */
+#define IEEE80211_RADIOTAP_TXF_SEQOVR 0x0010 /* For injected TX: use pre-configured seq */
+
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020
+#define IEEE80211_RADIOTAP_AMPDU_MPDU_ONLY 0x8000
+
+#endif /* !_NET80211_IEEE80211_RADIOTAP_H_ */
diff --git a/bcmdhd.101.10.361.x/include/linux_osl.h b/bcmdhd.101.10.361.x/include/linux_osl.h
new file mode 100755
index 0000000..a0a0937
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/linux_osl.h
@@ -0,0 +1,868 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+#define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x)))
+
+/* Linux Kernel: File Operations: start */
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCMDRIVER
+
+/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+#endif /* SHARED_OSL_CMN */
+
+extern void osl_detach(osl_t *osh);
+extern int osl_static_mem_init(osl_t *osh, void *adapter);
+extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
+extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
+extern void* osl_get_bus_handle(osl_t *osh);
+#ifdef DHD_MAP_LOGGING
+extern void osl_dma_map_dump(osl_t *osh);
+#define OSL_DMA_MAP_DUMP(osh) osl_dma_map_dump(osh)
+#else
+#define OSL_DMA_MAP_DUMP(osh) do {} while (0)
+#endif /* DHD_MAP_LOGGING */
+
+/* Global ASSERT type */
+extern uint32 g_assert_type;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRI_FMT_x "llx"
+#define PRI_FMT_X "llX"
+#define PRI_FMT_o "llo"
+#define PRI_FMT_d "lld"
+#else
+#define PRI_FMT_x "x"
+#define PRI_FMT_X "X"
+#define PRI_FMT_o "o"
+#define PRI_FMT_d "d"
+#endif /* CONFIG_PHYS_ADDR_T_64BIT */
+/* ASSERT */
+#ifndef ASSERT
+#if (defined(BCMDBG_ASSERT) || defined(BCMASSERT_LOG)) && !defined(BINCMP)
+ #define ASSERT(exp) \
+ do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+#ifdef __GNUC__
+ #define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 30100
+ #define ASSERT(exp) do {} while (0)
+#else
+ /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
+ #define ASSERT(exp)
+#endif /* GCC_VERSION > 30100 */
+#endif /* __GNUC__ */
+#endif /* (BCMDBG_ASSERT || BCMASSERT_LOG) && !BINCMP */
+#endif /* ASSERT */
+
+#define ASSERT_FP(exp) ASSERT(exp)
+
+/* microsecond delay */
+#define OSL_DELAY(usec) osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_SLEEP(ms) osl_sleep(ms)
+extern void osl_sleep(uint ms);
+
+/* PCI configuration space access macros */
+#define OSL_PCI_READ_CONFIG(osh, offset, size) \
+ osl_pci_read_config((osh), (offset), (size))
+#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+ osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+#ifdef BCMPCIE
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+#define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh)
+#define OSL_PCIE_BUS(osh) osl_pcie_bus(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern uint osl_pcie_domain(osl_t *osh);
+extern uint osl_pcie_bus(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+#endif
+
+/* precommit failed when this is removed */
+/* BLAZAR_BRANCH_101_10_DHD_003/build/dhd/linux-fc30/brix-brcm */
+/* TBD: Revisit later */
+#if defined(BCMINTERNAL)
+/* Flags that can be used to handle OSL specifcs */
+#define OSL_PHYS_MEM_LESS_THAN_16MB (1<<0L)
+#endif /* BCMINTERNAL */
+
+#define OSL_ACP_COHERENCE (1<<1L)
+#define OSL_FWDERBUF (1<<2L)
+
+/* Pkttag flag should be part of public information */
+typedef struct {
+ bool pkttag;
+ bool mmbus; /**< Bus supports memory-mapped register accesses */
+ pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */
+ void *tx_ctx; /**< Context to the callback function */
+#ifdef OSLREGOPS
+ osl_rreg_fn_t rreg_fn; /**< Read Register function */
+ osl_wreg_fn_t wreg_fn; /**< Write Register function */
+ void *reg_ctx; /**< Context to the reg callback functions */
+#else
+ void *unused[3]; /**< temp fix for USBAP cftpool handle currption */
+#endif
+ void (*rx_fn)(void *rx_ctx, void *p);
+ void *rx_ctx;
+} osl_pubinfo_t;
+
+extern void osl_flag_set(osl_t *osh, uint32 mask);
+extern void osl_flag_clr(osl_t *osh, uint32 mask);
+extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
+ ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
+ } while (0)
+
+#define PKTFREESETRXCB(osh, _rx_fn, _rx_ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->rx_fn = _rx_fn; \
+ ((osl_pubinfo_t*)osh)->rx_ctx = _rx_ctx; \
+ } while (0)
+
+#ifdef OSLREGOPS
+#define REGOPSSET(osh, rreg, wreg, ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->rreg_fn = rreg; \
+ ((osl_pubinfo_t*)osh)->wreg_fn = wreg; \
+ ((osl_pubinfo_t*)osh)->reg_ctx = ctx; \
+ } while (0)
+#endif /* OSLREGOPS */
+
+/* host/bus architecture-specific byte swap */
+#define BUS_SWAP32(v) (v)
+
+#if defined(BCMDBG_MEM) && !defined(BINCMP)
+ #define MALLOC(osh, size) osl_debug_malloc((osh), (size), __LINE__, __FILE__)
+ #define MALLOCZ(osh, size) osl_debug_mallocz((osh), (size), __LINE__, __FILE__)
+ #define MFREE(osh, addr, size) \
+ ({osl_debug_mfree((osh), ((void *)addr), (size), __LINE__, __FILE__);(addr) = NULL;})
+ #define VMALLOC(osh, size) osl_debug_vmalloc((osh), (size), __LINE__, __FILE__)
+ #define VMALLOCZ(osh, size) osl_debug_vmallocz((osh), (size), __LINE__, __FILE__)
+ #define VMFREE(osh, addr, size) osl_debug_vmfree((osh), (addr), (size), __LINE__, __FILE__)
+ #define MALLOCED(osh) osl_malloced((osh))
+ #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
+ #define MALLOC_DUMP(osh, b) osl_debug_memdump((osh), (b))
+ extern void *osl_debug_malloc(osl_t *osh, uint size, int line, const char* file);
+ extern void *osl_debug_mallocz(osl_t *osh, uint size, int line, const char* file);
+ extern void osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, const char* file);
+ extern void *osl_debug_vmalloc(osl_t *osh, uint size, int line, const char* file);
+ extern void *osl_debug_vmallocz(osl_t *osh, uint size, int line, const char* file);
+ extern void osl_debug_vmfree(osl_t *osh, void *addr, uint size, int line, const char* file);
+ extern uint osl_malloced(osl_t *osh);
+ struct bcmstrbuf;
+ extern int osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b);
+ extern uint osl_check_memleak(osl_t *osh);
+#else /* BCMDBG_MEM && !BINCMP */
+ #define MALLOC(osh, size) osl_malloc((osh), (size))
+ #define MALLOCZ(osh, size) osl_mallocz((osh), (size))
+ #define MALLOC_RA(osh, size, callsite) osl_mallocz((osh), (size))
+ #define MFREE(osh, addr, size) ({osl_mfree((osh), ((void *)addr), (size));(addr) = NULL;})
+ #define VMALLOC(osh, size) osl_vmalloc((osh), (size))
+ #define VMALLOCZ(osh, size) osl_vmallocz((osh), (size))
+ #define VMFREE(osh, addr, size) osl_vmfree((osh), (addr), (size))
+ #define MALLOCED(osh) osl_malloced((osh))
+ #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
+ extern void *osl_malloc(osl_t *osh, uint size);
+ extern void *osl_mallocz(osl_t *osh, uint size);
+ extern void osl_mfree(osl_t *osh, void *addr, uint size);
+ extern void *osl_vmalloc(osl_t *osh, uint size);
+ extern void *osl_vmallocz(osl_t *osh, uint size);
+ extern void osl_vmfree(osl_t *osh, void *addr, uint size);
+ extern uint osl_malloced(osl_t *osh);
+ extern uint osl_check_memleak(osl_t *osh);
+#endif /* BCMDBG_MEM && !BINCMP */
+
+extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n);
+extern int memset_s(void *dest, size_t destsz, int c, size_t n);
+#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+/* allocate/free shared (dma-able) consistent memory */
+#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
+#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+#define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
+ uint *tot, dmaaddr_t *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+
+/* map/unmap direction */
+#define DMA_NO 0 /* Used to skip cache op */
+#define DMA_TX 1 /* TX direction for DMA */
+#define DMA_RX 2 /* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+extern void osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *txp_dmah);
+extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *txp_dmah);
+extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction);
+
+#ifndef PHYS_TO_VIRT
+#define PHYS_TO_VIRT(pa) osl_phys_to_virt(pa)
+#endif
+#ifndef VIRT_TO_PHYS
+#define VIRT_TO_PHYS(va) osl_virt_to_phys(va)
+#endif
+extern void * osl_phys_to_virt(void * pa);
+extern void * osl_virt_to_phys(void * va);
+
+/* API for DMA addressing capability */
+#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
+
+#define OSL_SMP_WMB() smp_wmb()
+
+/* API for CPU relax */
+extern void osl_cpu_relax(void);
+#define OSL_CPU_RELAX() osl_cpu_relax()
+
+extern void osl_preempt_disable(osl_t *osh);
+extern void osl_preempt_enable(osl_t *osh);
+#define OSL_DISABLE_PREEMPTION(osh) osl_preempt_disable(osh)
+#define OSL_ENABLE_PREEMPTION(osh) osl_preempt_enable(osh)
+
+#if (defined(BCMPCIE) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__))
+
+ extern void osl_cache_flush(void *va, uint size);
+ extern void osl_cache_inv(void *va, uint size);
+ extern void osl_prefetch(const void *ptr);
+ #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *)(va), len)
+ #define OSL_CACHE_INV(va, len) osl_cache_inv((void *)(va), len)
+ #define OSL_PREFETCH(ptr) osl_prefetch(ptr)
+#else /* !__ARM_ARCH_7A__ */
+ #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va)
+ #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va)
+ #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr)
+#endif /* !__ARM_ARCH_7A__ */
+
+#ifdef AXI_TIMEOUTS_NIC
+extern void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx);
+extern void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size);
+#endif /* AXI_TIMEOUTS_NIC */
+
+/* register access macros */
+#if defined(BCMSDIO)
+ #include <bcmsdh.h>
+ #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
+ (uintptr)(r), sizeof(*(r)), (v)))
+ #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
+ (uintptr)(r), sizeof(*(r))))
+#elif defined(AXI_TIMEOUTS_NIC)
+#define OSL_READ_REG(osh, r) \
+ ({\
+ __typeof(*(r)) __osl_v; \
+ osl_bpt_rreg(osh, (uintptr)(r), &__osl_v, sizeof(*(r))); \
+ __osl_v; \
+ })
+#endif
+
+#if defined(AXI_TIMEOUTS_NIC)
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
+#else /* !AXI_TIMEOUTS_NIC */
+#if defined(BCMSDIO)
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+ mmap_op else bus_op
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+ mmap_op : bus_op
+#else
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+#endif /* defined(BCMSDIO) */
+#endif /* AXI_TIMEOUTS_NIC */
+
+#define OSL_ERROR(bcmerror) osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */
+
+#define OSH_NULL NULL
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#ifndef BINOSL
+#include <linuxver.h> /* use current 2.4.x calling conventions */
+#include <linux/kernel.h> /* for vsn/printf's */
+#include <linux/string.h> /* for mem*, str* */
+extern uint64 osl_sysuptime_us(void);
+#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies))
+#define OSL_SYSUPTIME_US() osl_sysuptime_us()
+extern uint64 osl_localtime_ns(void);
+extern void osl_get_localtime(uint64 *sec, uint64 *usec);
+extern uint64 osl_systztime_us(void);
+#define OSL_LOCALTIME_NS() osl_localtime_ns()
+#define OSL_GET_LOCALTIME(sec, usec) osl_get_localtime((sec), (usec))
+#define OSL_SYSTZTIME_US() osl_systztime_us()
+#define printf(fmt, args...) printk(PERCENT_S DHD_LOG_PREFIXS fmt, PRINTF_SYSTEM_TIME, ## args)
+#include <linux/kernel.h> /* for vsn/printf's */
+#include <linux/string.h> /* for mem*, str* */
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define bcopy_hw(src, dst, len) memcpy((dst), (src), (len))
+#define bcopy_hw_async(src, dst, len) memcpy((dst), (src), (len))
+#define bcopy_hw_poll_for_completion()
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_GS101)
+extern int pcie_ch_num;
+extern int exynos_pcie_l1_exit(int ch_num);
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
+ * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_GS101
+ */
+
+/* register access macros */
+#if defined(OSLREGOPS)
+#define R_REG(osh, r) (\
+ sizeof(*(r)) == sizeof(uint8) ? osl_readb((osh), (volatile uint8*)(r)) : \
+ sizeof(*(r)) == sizeof(uint16) ? osl_readw((osh), (volatile uint16*)(r)) : \
+ sizeof(*(r)) == sizeof(uint32) ? osl_readl((osh), (volatile uint32*)(r)) : \
+ osl_readq((osh), (volatile uint64*)(r)) \
+)
+
+#define W_REG(osh, r, v) do { \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): osl_writeb((osh), (volatile uint8*)(r), (uint8)(v)); break; \
+ case sizeof(uint16): osl_writew((osh), (volatile uint16*)(r), (uint16)(v)); break; \
+ case sizeof(uint32): osl_writel((osh), (volatile uint32*)(r), (uint32)(v)); break; \
+ case sizeof(uint64): osl_writeq((osh), (volatile uint64*)(r), (uint64)(v)); break; \
+ } \
+} while (0)
+
+extern uint8 osl_readb(osl_t *osh, volatile uint8 *r);
+extern uint16 osl_readw(osl_t *osh, volatile uint16 *r);
+extern uint32 osl_readl(osl_t *osh, volatile uint32 *r);
+extern uint32 osl_readq(osl_t *osh, volatile uint64 *r);
+extern void osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v);
+extern void osl_writew(osl_t *osh, volatile uint16 *r, uint16 v);
+extern void osl_writel(osl_t *osh, volatile uint32 *r, uint32 v);
+extern void osl_writeq(osl_t *osh, volatile uint64 *r, uint64 v);
+
+#else /* OSLREGOPS */
+
+#ifndef IL_BIGENDIAN
+#ifdef CONFIG_64BIT
+/* readq is defined only for 64 bit platform */
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_GS101)
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v = 0; \
+ exynos_pcie_l1_exit(pcie_ch_num); \
+ BCM_REFERENCE(osh); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ case sizeof(uint64): __osl_v = \
+ readq((volatile uint64*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#else
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v = 0; \
+ BCM_REFERENCE(osh); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ case sizeof(uint64): __osl_v = \
+ readq((volatile uint64*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
+ * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_GS101
+ */
+#else /* !CONFIG_64BIT */
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v = 0; \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_64BIT
+/* writeq is defined only for 64 bit platform */
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
+ defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_GS101)
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ ({ \
+ exynos_pcie_l1_exit(pcie_ch_num); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), \
+ (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), \
+ (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), \
+ (volatile uint32*)(r)); break; \
+ case sizeof(uint64): writeq((uint64)(v), \
+ (volatile uint64*)(r)); break; \
+ } \
+ }), \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#else
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
+ * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_GS101
+ */
+#else /* !CONFIG_64BIT */
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#endif /* CONFIG_64BIT */
+
+#else /* IL_BIGENDIAN */
+
+#ifdef CONFIG_64BIT
+/* readq and writeq is defined only for 64 bit platform */
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v = 0; \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)((uintptr)(r)^3)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)((uintptr)(r)^2)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ case sizeof(uint64): __osl_v = \
+ readq((volatile uint64*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), \
+ (volatile uint8*)((uintptr)(r)^3)); break; \
+ case sizeof(uint16): writew((uint16)(v), \
+ (volatile uint16*)((uintptr)(r)^2)); break; \
+ case sizeof(uint32): writel((uint32)(v), \
+ (volatile uint32*)(r)); break; \
+ case sizeof(uint64): writeq((uint64)(v), \
+ (volatile uint64*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+
+#else /* !CONFIG_64BIT */
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v = 0; \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)((uintptr)(r)^3)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)((uintptr)(r)^2)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), \
+ (volatile uint8*)((uintptr)(r)^3)); break; \
+ case sizeof(uint16): writew((uint16)(v), \
+ (volatile uint16*)((uintptr)(r)^2)); break; \
+ case sizeof(uint32): writel((uint32)(v), \
+ (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#endif /* CONFIG_64BIT */
+#endif /* IL_BIGENDIAN */
+
+#endif /* OSLREGOPS */
+
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+
+/* bcopy, bcmp, and bzero functions */
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+/* uncached/cached virtual address */
+#define OSL_UNCACHED(va) ((void *)va)
+#define OSL_CACHED(va) ((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
+#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
+
+/* get processor cycle count */
+#if defined(__i386__)
+#define OSL_GETCYCLES(x) rdtscl((x))
+#else
+#define OSL_GETCYCLES(x) ((x) = 0)
+#endif /* __i386__ */
+
+/* dereference an address that may cause a bus exception */
+#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
+
+/* map/unmap physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
+#define REG_MAP(pa, size) ioremap((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#endif
+#else
+#define REG_MAP(pa, size) (void *)(0)
+#endif /* !defined(CONFIG_MMC_MSM7X00A */
+#define REG_UNMAP(va) iounmap((va))
+
+/* shared (dma-able) memory access macros */
+#define R_SM(r) *(r)
+#define W_SM(r, v) (*(r) = (v))
+#define OR_SM(r, v) (*(r) |= (v))
+#define BZERO_SM(r, len) memset((r), '\0', (len))
+
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons), we need the Linux headers.
+ */
+#include <linuxver.h> /* use current 2.4.x calling conventions */
+
+#else /* BINOSL */
+
+/* Where to get the declarations for mem, str, printf, bcopy's? Two basic approaches.
+ *
+ * First, use the Linux header files and the C standard library replacmenent versions
+ * built-in to the kernel. Use this approach when compiling non hybrid code or compling
+ * the OS port files. The second approach is to use our own defines/prototypes and
+ * functions we have provided in the Linux OSL, i.e. linux_osl.c. Use this approach when
+ * compiling the files that make up the hybrid binary. We are ensuring we
+ * don't directly link to the kernel replacement routines from the hybrid binary.
+ *
+ * NOTE: The issue we are trying to avoid is any questioning of whether the
+ * hybrid binary is derived from Linux. The wireless common code (wlc) is designed
+ * to be OS independent through the use of the OSL API and thus the hybrid binary doesn't
+ * derive from the Linux kernel at all. But since we defined our OSL API to include
+ * a small collection of standard C library routines and these routines are provided in
+ * the kernel we want to avoid even the appearance of deriving at all even though clearly
+ * usage of a C standard library API doesn't represent a derivation from Linux. Lastly
+ * note at the time of this checkin 4 references to memcpy/memset could not be eliminated
+ * from the binary because they are created internally by GCC as part of things like
+ * structure assignment. I don't think the compiler should be doing this but there is
+ * no options to disable it on Intel architectures (there is for MIPS so somebody must
+ * agree with me). I may be able to even remove these references eventually with
+ * a GNU binutil such as objcopy via a symbol rename (i.e. memcpy to osl_memcpy).
+ */
+ #define printf(fmt, args...) printk(fmt , ## args)
+ #include <linux/kernel.h> /* for vsn/printf's */
+ #include <linux/string.h> /* for mem*, str* */
+ /* bcopy's: Linux kernel doesn't provide these (anymore) */
+ #define bcopy(src, dst, len) memcpy((dst), (src), (len))
+ #define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+ #define bzero(b, len) memset((b), '\0', (len))
+
+ /* These are provided only because when compiling linux_osl.c there
+ * must be an explicit prototype (separate from the definition) because
+ * we are compiling with GCC option -Wstrict-prototypes. Conversely
+ * these could be placed directly in linux_osl.c.
+ */
+ extern int osl_printf(const char *format, ...);
+ extern int osl_sprintf(char *buf, const char *format, ...);
+ extern int osl_snprintf(char *buf, size_t n, const char *format, ...);
+ extern int osl_vsprintf(char *buf, const char *format, va_list ap);
+ extern int osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap);
+ extern int osl_strcmp(const char *s1, const char *s2);
+ extern int osl_strncmp(const char *s1, const char *s2, uint n);
+ extern int osl_strlen(const char *s);
+ extern char* osl_strcpy(char *d, const char *s);
+ extern char* osl_strncpy(char *d, const char *s, uint n);
+ extern char* osl_strchr(const char *s, int c);
+ extern char* osl_strrchr(const char *s, int c);
+ extern void *osl_memset(void *d, int c, size_t n);
+ extern void *osl_memcpy(void *d, const void *s, size_t n);
+ extern void *osl_memmove(void *d, const void *s, size_t n);
+ extern int osl_memcmp(const void *s1, const void *s2, size_t n);
+
+/* register access macros */
+#if !defined(BCMSDIO)
+#define R_REG(osh, r) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ sizeof(*(r)) == sizeof(uint8) ? osl_readb((volatile uint8*)(r)) : \
+ sizeof(*(r)) == sizeof(uint16) ? osl_readw((volatile uint16*)(r)) : \
+ sizeof(*(r)) == sizeof(uint32) ? osl_readl((volatile uint32*)(r)) : \
+ osl_readq((volatile uint64*)(r)); \
+ })
+#define W_REG(osh, r, v) do { \
+ BCM_REFERENCE(osh); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): osl_writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): osl_writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): osl_writel((uint32)(v), (volatile uint32*)(r)); break; \
+ case sizeof(uint64): osl_writeq((uint64)(v), (volatile uint64*)(r)); break; \
+ } \
+} while (0)
+
+#else
+#define R_REG(osh, r) OSL_READ_REG(osh, r)
+#define W_REG(osh, r, v) do { OSL_WRITE_REG(osh, r, v); } while (0)
+#endif /* !defined(BCMSDIO) */
+
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+extern uint8 osl_readb(volatile uint8 *r);
+extern uint16 osl_readw(volatile uint16 *r);
+extern uint32 osl_readl(volatile uint32 *r);
+extern uint64 osl_readq(volatile uint64 *r);
+extern void osl_writeb(uint8 v, volatile uint8 *r);
+extern void osl_writew(uint16 v, volatile uint16 *r);
+extern void osl_writel(uint32 v, volatile uint32 *r);
+extern void osl_writeq(uint64 v, volatile uint64 *r);
+
+/* system up time in ms */
+#define OSL_SYSUPTIME() osl_sysuptime()
+extern uint32 osl_sysuptime(void);
+
+/* uncached/cached virtual address */
+#define OSL_UNCACHED(va) osl_uncached((va))
+extern void *osl_uncached(void *va);
+#define OSL_CACHED(va) osl_cached((va))
+extern void *osl_cached(void *va);
+
+#define OSL_PREF_RANGE_LD(va, sz)
+#define OSL_PREF_RANGE_ST(va, sz)
+
+/* get processor cycle count */
+#define OSL_GETCYCLES(x) ((x) = osl_getcycles())
+extern uint osl_getcycles(void);
+
+/* dereference an address that may target abort */
+#define BUSPROBE(val, addr) osl_busprobe(&(val), (addr))
+extern int osl_busprobe(uint32 *val, uint32 addr);
+
+/* map/unmap physical to virtual */
+#define REG_MAP(pa, size) osl_reg_map((pa), (size))
+#define REG_UNMAP(va) osl_reg_unmap((va))
+extern void *osl_reg_map(uint32 pa, uint size);
+extern void osl_reg_unmap(void *va);
+
+/* shared (dma-able) memory access macros */
+#define R_SM(r) *(r)
+#define W_SM(r, v) (*(r) = (v))
+#define OR_SM(r, v) (*(r) |= (v))
+#define BZERO_SM(r, len) bzero((r), (len))
+
+#endif /* BINOSL */
+
+#define OSL_RAND() osl_rand()
+extern uint32 osl_rand(void);
+
+#define DMA_FLUSH(osh, va, size, direction, p, dmah) \
+ osl_dma_flush((osh), (va), (size), (direction), (p), (dmah))
+#define DMA_MAP(osh, va, size, direction, p, dmah) \
+ osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
+
+#else /* ! BCMDRIVER */
+
+/* ASSERT */
+#ifdef BCMDBG_ASSERT
+ #include <assert.h>
+ #define ASSERT assert
+#else /* BCMDBG_ASSERT */
+ #define ASSERT(exp) do {} while (0)
+#endif /* BCMDBG_ASSERT */
+
+#define ASSERT_FP(exp) ASSERT(exp)
+
+/* MALLOC and MFREE */
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+/* str* and mem* functions */
+#include <string.h>
+
+/* *printf functions */
+#include <stdio.h>
+
+/* bcopy, bcmp, and bzero */
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif /* ! BCMDRIVER */
+
+typedef struct sk_buff_head PKT_LIST;
+#define PKTLIST_INIT(x) skb_queue_head_init((x))
+#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y))
+#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x))
+#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
+#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x))
+
+#ifndef _linuxver_h_
+typedef struct timer_list_compat timer_list_compat_t;
+#endif /* _linuxver_h_ */
+typedef struct osl_timer {
+ timer_list_compat_t *timer;
+ bool set;
+#ifdef BCMDBG
+ char *name; /* Desription of the timer */
+#endif
+} osl_timer_t;
+
+typedef void (*linux_timer_fn)(ulong arg);
+
+extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg);
+extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
+extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
+extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
+
+#ifdef BCMDRIVER
+typedef atomic_t osl_atomic_t;
+#define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x)
+#define OSL_ATOMIC_INIT(osh, v) atomic_set(v, 0)
+#define OSL_ATOMIC_INC(osh, v) atomic_inc(v)
+#define OSL_ATOMIC_INC_RETURN(osh, v) atomic_inc_return(v)
+#define OSL_ATOMIC_DEC(osh, v) atomic_dec(v)
+#define OSL_ATOMIC_DEC_RETURN(osh, v) atomic_dec_return(v)
+#define OSL_ATOMIC_READ(osh, v) atomic_read(v)
+#define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x)
+
+#ifndef atomic_set_mask
+#define OSL_ATOMIC_OR(osh, v, x) atomic_or(x, v)
+#define OSL_ATOMIC_AND(osh, v, x) atomic_and(x, v)
+#else
+#define OSL_ATOMIC_OR(osh, v, x) atomic_set_mask(x, v)
+#define OSL_ATOMIC_AND(osh, v, x) atomic_clear_mask(~x, v)
+#endif
+#endif /* BCMDRIVER */
+
+extern void *osl_spin_lock_init(osl_t *osh);
+extern void osl_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long osl_spin_lock(void *lock);
+extern void osl_spin_unlock(void *lock, unsigned long flags);
+extern unsigned long osl_spin_lock_irq(void *lock);
+extern void osl_spin_unlock_irq(void *lock, unsigned long flags);
+extern unsigned long osl_spin_lock_bh(void *lock);
+extern void osl_spin_unlock_bh(void *lock, unsigned long flags);
+
+extern void *osl_mutex_lock_init(osl_t *osh);
+extern void osl_mutex_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long osl_mutex_lock(void *lock);
+void osl_mutex_unlock(void *lock, unsigned long flags);
+
+typedef struct osl_timespec {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
+ __kernel_old_time_t tv_sec; /* seconds */
+#else
+ __kernel_time_t tv_sec; /* seconds */
+#endif
+ __kernel_suseconds_t tv_usec; /* microseconds */
+ long tv_nsec; /* nanoseconds */
+} osl_timespec_t;
+extern void osl_do_gettimeofday(struct osl_timespec *ts);
+extern void osl_get_monotonic_boottime(struct osl_timespec *ts);
+extern uint32 osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts);
+#endif /* _linux_osl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/linux_pkt.h b/bcmdhd.101.10.361.x/include/linux_pkt.h
new file mode 100755
index 0000000..f2fcf5f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/linux_pkt.h
@@ -0,0 +1,421 @@
+/*
+ * Linux Packet (skb) interface
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _linux_pkt_h_
+#define _linux_pkt_h_
+
+#include <typedefs.h>
+
+#ifdef __ARM_ARCH_7A__
+#define PKT_HEADROOM_DEFAULT NET_SKB_PAD /**< NET_SKB_PAD is defined in a linux kernel header */
+#else
+#define PKT_HEADROOM_DEFAULT 16
+#endif /* __ARM_ARCH_7A__ */
+
+#ifdef BCMDRIVER
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#ifndef BINOSL
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons), we need the Linux headers.
+ */
+#include <linuxver.h>
+
+/* packet primitives */
+#ifndef BCMDBG_PKT
+#ifdef BCMDBG_CTRACE
+#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#else
+#ifdef BCM_OBJECT_TRACE
+#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FUNCTION__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
+#else
+#define PKTGET(osh, len, send) linux_pktget((osh), (len))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
+#else /* BCMDBG_PKT pkt logging for debugging */
+#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#define PKTLIST_DUMP(osh, buf) osl_pktlist_dump(osh, buf)
+#define BCMDBG_PTRACE
+#define PKTLIST_IDX(skb) ((uint16 *)((char *)PKTTAG(skb) + \
+ sizeof(((struct sk_buff*)(skb))->cb) - sizeof(uint16)))
+#define PKTDBG_TRACE(osh, pkt, bit) osl_pkttrace(osh, pkt, bit)
+#endif /* BCMDBG_PKT */
+#if defined(BCM_OBJECT_TRACE)
+#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
+#else
+#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send))
+#endif /* BCM_OBJECT_TRACE */
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
+#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
+#else
+#define PKTGET_STATIC PKTGET
+#define PKTFREE_STATIC PKTFREE
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
+#define PKTHEAD(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->head);})
+#define PKTSOCK(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->sk);})
+#define PKTSETHEAD(osh, skb, h) ({BCM_REFERENCE(osh); \
+ (((struct sk_buff *)(skb))->head = (h));})
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTEXPHEADROOM(osh, skb, b) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+ })
+#define PKTTAILROOM(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_tailroom((struct sk_buff*)(skb)); \
+ })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_pad((struct sk_buff*)(skb), (padlen)); \
+ })
+#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define PKTSETNEXT(osh, skb, x) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+ })
+#define PKTSETLEN(osh, skb, len) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ __skb_trim((struct sk_buff*)(skb), (len)); \
+ })
+#define PKTPUSH(osh, skb, bytes) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_push((struct sk_buff*)(skb), (bytes)); \
+ })
+#define PKTPULL(osh, skb, bytes) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_pull((struct sk_buff*)(skb), (bytes)); \
+ })
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh)
+#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb) PKTLINK(skb)
+#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x))
+#define PKTPTR(skb) (skb)
+#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTIDAVAIL() (0xFFFFFFFFu)
+#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
+#define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq)
+extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq);
+#else
+#define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;})
+#endif /* Linux Version >= 3.6 */
+
+#ifdef BCMDBG_CTRACE
+#define DEL_CTRACE(zosh, zskb) { \
+ unsigned long zflags; \
+ OSL_CTRACE_LOCK(&(zosh)->ctrace_lock, zflags); \
+ list_del(&(zskb)->ctrace_list); \
+ (zosh)->ctrace_num--; \
+ (zskb)->ctrace_start = 0; \
+ (zskb)->ctrace_count = 0; \
+ OSL_CTRACE_UNLOCK(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define UPDATE_CTRACE(zskb, zfile, zline) { \
+ struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
+ if (_zskb->ctrace_count < CTRACE_NUM) { \
+ _zskb->func[_zskb->ctrace_count] = zfile; \
+ _zskb->line[_zskb->ctrace_count] = zline; \
+ _zskb->ctrace_count++; \
+ } \
+ else { \
+ _zskb->func[_zskb->ctrace_start] = zfile; \
+ _zskb->line[_zskb->ctrace_start] = zline; \
+ _zskb->ctrace_start++; \
+ if (_zskb->ctrace_start >= CTRACE_NUM) \
+ _zskb->ctrace_start = 0; \
+ } \
+}
+
+#define ADD_CTRACE(zosh, zskb, zfile, zline) { \
+ unsigned long zflags; \
+ OSL_CTRACE_LOCK(&(zosh)->ctrace_lock, zflags); \
+ list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
+ (zosh)->ctrace_num++; \
+ UPDATE_CTRACE(zskb, zfile, zline); \
+ OSL_CTRACE_UNLOCK(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
+#endif /* BCMDBG_CTRACE */
+
+#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTLITIDX(skb) ({BCM_REFERENCE(skb); 0;})
+#define PKTSETLITIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#define PKTRESETLITIDX(skb) ({BCM_REFERENCE(skb);})
+#define PKTRITIDX(skb) ({BCM_REFERENCE(skb); 0;})
+#define PKTSETRITIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#define PKTRESETRITIDX(skb) ({BCM_REFERENCE(skb);})
+
+#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+
+#define PKTFRAGLEN(osh, lb, ix) (0)
+#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh)
+
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define AUX_TCP_FIN_RST (1 << 0)
+#define AUX_FREED (1 << 1)
+#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define PKTISFABRIDGED(skb) PKTISFAAUX(skb)
+#else
+#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;})
+
+#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb) BCM_REFERENCE(skb)
+#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
+#if defined(BCM_OBJECT_TRACE)
+extern void linux_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
+#else
+extern void linux_pktfree(osl_t *osh, void *skb, bool send);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void osl_pktclone(osl_t *osh, void **pkt);
+
+#ifdef BCMDBG_PKT /* pkt logging for debugging */
+extern void *linux_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+extern void osl_pktlist_add(osl_t *osh, void *p, int line, char *file);
+extern void osl_pktlist_remove(osl_t *osh, void *p);
+extern char *osl_pktlist_dump(osl_t *osh, char *buf);
+#ifdef BCMDBG_PTRACE
+extern void osl_pkttrace(osl_t *osh, void *pkt, uint16 bit);
+#endif /* BCMDBG_PTRACE */
+#else /* BCMDBG_PKT */
+#ifdef BCMDBG_CTRACE
+#define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b))
+extern void *linux_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+struct bcmstrbuf;
+extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
+#else
+#ifdef BCM_OBJECT_TRACE
+extern void *linux_pktget(osl_t *osh, uint len, int line, const char *caller);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
+#else
+extern void *linux_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+#endif /* BCMDBG_CTRACE */
+#endif /* BCMDBG_PKT */
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#ifdef BCMDBG_PKT
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \
+ (struct sk_buff*)(skb), __LINE__, __FILE__)
+#else /* BCMDBG_PKT */
+#ifdef BCMDBG_CTRACE
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \
+ (struct sk_buff*)(skb), __LINE__, __FILE__)
+#define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
+#else
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#endif /* BCMDBG_CTRACE */
+#endif /* BCMDBG_PKT */
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define PKTMARK(p) (((struct sk_buff *)(p))->mark)
+#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m)
+#else /* !2.6.0 */
+#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark)
+#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m)
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
+#define PKTMARK(p) 0
+#define PKTSETMARK(p, m)
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#else /* BINOSL */
+
+#define OSL_PREF_RANGE_LD(va, sz)
+#define OSL_PREF_RANGE_ST(va, sz)
+
+/* packet primitives */
+#ifdef BCMDBG_PKT
+#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb), __LINE__, __FILE__)
+#define PKTLIST_DUMP(osh, buf) osl_pktlist_dump(osh, buf)
+#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
+#else /* BCMDBG_PKT */
+#ifdef BCMDBG_CTRACE
+#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb), __LINE__, __FILE__)
+#else
+#ifdef BCM_OBJECT_TRACE
+#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FUNCTION__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
+#else
+#define PKTGET(osh, len, send) linux_pktget((osh), (len))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#endif /* BCM_OBJECT_TRACE */
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf) ({BCM_REFERENCE(osh); BCM_REFERENCE(buf);})
+#define PKTDBG_TRACE(osh, pkt, bit) ({BCM_REFERENCE(osh); BCM_REFERENCE(pkt);})
+#endif /* BCMDBG_PKT */
+#if defined(BCM_OBJECT_TRACE)
+#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
+#else
+#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send))
+#endif /* BCM_OBJECT_TRACE */
+#define PKTDATA(osh, skb) osl_pktdata((osh), (skb))
+#define PKTLEN(osh, skb) osl_pktlen((osh), (skb))
+#define PKTHEADROOM(osh, skb) osl_pktheadroom((osh), (skb))
+#define PKTTAILROOM(osh, skb) osl_pkttailroom((osh), (skb))
+#define PKTNEXT(osh, skb) osl_pktnext((osh), (skb))
+#define PKTSETNEXT(osh, skb, x) ({BCM_REFERENCE(osh); osl_pktsetnext((skb), (x));})
+#define PKTSETLEN(osh, skb, len) osl_pktsetlen((osh), (skb), (len))
+#define PKTPUSH(osh, skb, bytes) osl_pktpush((osh), (skb), (bytes))
+#define PKTPULL(osh, skb, bytes) osl_pktpull((osh), (skb), (bytes))
+#define PKTTAG(skb) osl_pkttag((skb))
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osh), (pkt))
+#define PKTLINK(skb) osl_pktlink((skb))
+#define PKTSETLINK(skb, x) osl_pktsetlink((skb), (x))
+#define PKTPRIO(skb) osl_pktprio((skb))
+#define PKTSETPRIO(skb, x) osl_pktsetprio((skb), (x))
+#define PKTSHARED(skb) osl_pktshared((skb))
+#define PKTSETPOOL(osh, skb, x, y) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb) PKTLINK(skb)
+#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x))
+#define PKTPTR(skb) (skb)
+#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTIDAVAIL() (0xFFFFFFFFu)
+
+#ifdef BCMDBG_PKT /* pkt logging for debugging */
+extern void *linux_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+#else /* BCMDBG_PKT */
+#ifdef BCM_OBJECT_TRACE
+extern void *linux_pktget(osl_t *osh, uint len, int line, const char *caller);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
+#else
+extern void *linux_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+#endif /* BCMDBG_PKT */
+#if defined(BCM_OBJECT_TRACE)
+extern void linux_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
+#else
+extern void linux_pktfree(osl_t *osh, void *skb, bool send);
+#endif /* BCM_OBJECT_TRACE */
+extern uchar *osl_pktdata(osl_t *osh, void *skb);
+extern uint osl_pktlen(osl_t *osh, void *skb);
+extern uint osl_pktheadroom(osl_t *osh, void *skb);
+extern uint osl_pkttailroom(osl_t *osh, void *skb);
+extern void *osl_pktnext(osl_t *osh, void *skb);
+extern void osl_pktsetnext(void *skb, void *x);
+extern void osl_pktsetlen(osl_t *osh, void *skb, uint len);
+extern uchar *osl_pktpush(osl_t *osh, void *skb, int bytes);
+extern uchar *osl_pktpull(osl_t *osh, void *skb, int bytes);
+extern void *osl_pkttag(void *skb);
+extern void *osl_pktlink(void *skb);
+extern void osl_pktsetlink(void *skb, void *x);
+extern uint osl_pktprio(void *skb);
+extern void osl_pktsetprio(void *skb, uint x);
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+extern bool osl_pktshared(void *skb);
+
+#ifdef BCMDBG_PKT /* pkt logging for debugging */
+extern char *osl_pktlist_dump(osl_t *osh, char *buf);
+extern void osl_pktlist_add(osl_t *osh, void *p, int line, char *file);
+extern void osl_pktlist_remove(osl_t *osh, void *p);
+#endif /* BCMDBG_PKT */
+
+#endif /* BINOSL */
+
+#define PKTALLOCED(osh) osl_pktalloced(osh)
+extern uint osl_pktalloced(osl_t *osh);
+
+#define PKTPOOLHEAPCOUNT() (0u)
+
+#endif /* BCMDRIVER */
+
+#endif /* _linux_pkt_h_ */
diff --git a/bcmdhd.101.10.361.x/include/linuxver.h b/bcmdhd.101.10.361.x/include/linuxver.h
new file mode 100755
index 0000000..44f32ce
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/linuxver.h
@@ -0,0 +1,945 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+/*
+ * The below pragmas are added as workaround for errors caused by update
+ * of gcc version to 4.8.2. GCC 4.6 adds -Wunused-but-set-variable and
+ * -Wunused-but-set-parameter to -Wall, for some configurations those
+ * warnings are produced in linux kernel. So for now the below pragmas
+ * disable the offending warnings. Permanent solution is to use -isystem
+ * but there is a performance problem with this change on RHEL5 servers
+ *
+ */
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
+#endif
+
+#include <typedefs.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#include <linux/kconfig.h>
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+ MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+/* linux/malloc.h is deprecated, use linux/slab.h instead. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
+
+/*
+ * TODO:
+ * daemonize() API is deprecated from kernel-3.8 onwards. More debugging
+ * has to be done whether this can cause any issue in case, if driver is
+ * loaded as a module from userspace.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a) do { \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM); \
+ } while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
+ } while (0);
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
+#else
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
+#if (!(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
+/* Exclude RHEL 5 */
+typedef void (*work_func_t)(void *work);
+#endif
+#endif /* >= 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* Some distributions have their own 2.6.x compatibility layers */
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED SA_SHIRQ
+#endif /* < 2.6.18 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef CONFIG_NET_RADIO
+#endif
+#endif /* < 2.6.17 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#include <linux/sched/rt.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#include <uapi/linux/sched/types.h>
+#endif /* LINUX_VERS >= 4.11.0 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+ #define __devinit __init
+#else
+/* All devices are hotpluggable since linux 3.8.0 */
+ #define __devinit
+#endif
+#endif /* !__devinit */
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x) x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev) (dev)->sysdata
+#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
+
+/*
+ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
+ */
+
+struct pci_device_id {
+ unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
+ unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
+ unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
+ unsigned long driver_data; /* Data private to the driver */
+};
+
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table; /* NULL if wants all devices */
+ int (*probe)(struct pci_dev *dev,
+ const struct pci_device_id *id); /* New device inserted */
+ void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug
+ * capable driver)
+ */
+ void (*suspend)(struct pci_dev *dev); /* Device suspended */
+ void (*resume)(struct pci_dev *dev); /* Device woken up */
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+/* compatpci.c */
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif /* PCI registration */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x) __initcall(x);
+#define module_exit(x) __exitcall(x);
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+/*
+ * DMA mapping
+ *
+ * See linux/Documentation/DMA-mapping.txt
+ */
+
+#ifndef PCI_DMA_TODEVICE
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#endif
+
+typedef u32 dma_addr_t;
+
+/* Pure 2^n version of get_order */
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ bzero(ret, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#ifdef ILSIM
+extern uint pci_map_single(void *dev, void *va, uint size, int direction);
+extern void pci_unmap_single(void *dev, uint pa, uint size, int direction);
+#else
+#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+#endif
+
+#endif /* DMA mapping */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+
+typedef struct timer_list timer_list_compat_t;
+
+#define init_timer_compat(timer_compat, cb, priv) \
+ init_timer(timer_compat); \
+ (timer_compat)->data = (ulong)priv; \
+ (timer_compat)->function = cb
+#define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
+#define timer_expires(timer_compat) (timer_compat)->expires
+
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
+
+typedef struct timer_list_compat {
+ struct timer_list timer;
+ void *arg;
+ void (*callback)(ulong arg);
+} timer_list_compat_t;
+
+extern void timer_cb_compat(struct timer_list *tl);
+
+#define init_timer_compat(timer_compat, cb, priv) \
+ (timer_compat)->arg = priv; \
+ (timer_compat)->callback = cb; \
+ timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
+#define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
+#define timer_expires(timer_compat) (timer_compat)->timer.expires
+
+#define del_timer(t) del_timer(&((t)->timer))
+#ifndef del_timer_sync
+#define del_timer_sync(t) del_timer_sync(&((t)->timer))
+#endif
+#define timer_pending(t) timer_pending(&((t)->timer))
+#define add_timer(t) add_timer(&((t)->timer))
+#define mod_timer(t, j) mod_timer(&((t)->timer), j)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+#define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b)
+#else
+#define rtc_time_to_tm(a, b) rtc_time_to_tm(a, b)
+#endif /* LINUX_VER >= 3.19.0 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+#define time_to_tm(a, b, c) time64_to_tm(a, b, c)
+#else
+#define time_to_tm(a, b, c) time_to_tm(a, b, c)
+#endif /* LINUX_VER >= 4.20.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a) dev_kfree_skb(a)
+#define netif_down(dev) do { (dev)->start = 0; } while (0)
+
+/* pcmcia-cs provides its own netdevice compatibility layer */
+#ifndef _COMPAT_NETDEVICE_H
+
+/*
+ * SoftNet
+ *
+ * For pre-softnet kernels we need to tell the upper layer not to
+ * re-enter start_xmit() while we are in there. However softnet
+ * guarantees not to enter while we are in there so there is no need
+ * to do the netif_stop_queue() dance unless the transmit queue really
+ * gets stuck. This should also improve performance according to tests
+ * done by Aman Singla.
+ */
+
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+ do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+}
+
+#define netif_queue_stopped(dev) (dev)->tbusy
+#define netif_running(dev) (dev)->start
+
+#endif /* _COMPAT_NETDEVICE_H */
+
+#define netif_device_attach(dev) netif_start_queue(dev)
+#define netif_device_detach(dev) netif_stop_queue(dev)
+
+/* 2.4.x renamed bottom halves to tasklets */
+#define tasklet_struct tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+ queue_task(tasklet, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+ void (*func)(unsigned long),
+ unsigned long data)
+{
+ tasklet->next = NULL;
+ tasklet->sync = 0;
+ tasklet->routine = (void (*)(void *))func;
+ tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet) { do {} while (0); }
+
+/* 2.4.x introduced del_timer_sync() */
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif /* SoftNet */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/*
+ * Emit code to initialise a tq_struct's routine and data pointers
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+
+/*
+ * Emit code to initialise all of a tq_struct
+ */
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
+
+/* Power management related macro & routines */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define PCI_SAVE_STATE(a, b) pci_save_state(a)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
+#else
+#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+ if (buffer) {
+ /* 100% dword access ok here? */
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4, &buffer[i]);
+ }
+ return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(dev, i * 4, buffer[i]);
+ }
+ /*
+ * otherwise, write the context information we know from bootup.
+ * This works around a problem where warm-booting from Windows
+ * combined with a D3(hot)->D0 transition causes PCI config
+ * header data to be forgotten.
+ */
+ else {
+ for (i = 0; i < 6; i ++)
+ pci_write_config_dword(dev,
+ PCI_BASE_ADDRESS_0 + (i * 4),
+ pci_resource_start(dev, i));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ }
+ return 0;
+}
+#endif /* PCI power management */
+
+/* Old cp0 access macros deprecated in 2.4.19 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+/* Module refcount handled internally in 2.6.x */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev) do {} while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev) kfree(dev)
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* struct packet_type redefined in 2.6.x */
+#define af_packet_priv data
+#endif
+
+/* suspend args */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+ void *parent; /* some external entity that the thread supposed to work for */
+ char *proc_name;
+ struct task_struct *p_task;
+ long thr_pid;
+ int prio; /* priority */
+ struct semaphore sema;
+ int terminated;
+ struct completion completed;
+ int flush_ind;
+ struct completion flushed;
+ spinlock_t spinlock;
+ int up_cnt;
+} tsk_ctl_t;
+
+/* ANDREY: new MACROs to start stop threads(OLD kthread API STYLE) */
+/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
+/* note this macro assumes there may be only one context waiting on thread's completion */
+#ifdef KERNEL_TIMESTAMP
+extern char *dhd_log_dump_get_timestamp(void);
+#ifdef SYSTEM_TIMESTAMP
+extern char* dhd_dbg_get_system_timestamp(void);
+#define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp(), dhd_dbg_get_system_timestamp()
+#define PERCENT_S "[%s][%s]"
+#else
+#define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp()
+#define PERCENT_S "[%s]"
+#endif
+#else
+#define PRINTF_SYSTEM_TIME ""
+#define PERCENT_S "%s"
+#endif
+#ifndef DHD_LOG_PREFIX
+#define DHD_LOG_PREFIX "[dhd]"
+#endif
+#define DHD_LOG_PREFIXS DHD_LOG_PREFIX" "
+#ifdef DHD_DEBUG
+#define printf_thr(fmt, args...) printk(PERCENT_S DHD_LOG_PREFIXS fmt, PRINTF_SYSTEM_TIME, ## args)
+#define DBG_THR(args) do {printf_thr args;} while (0)
+#else
+#define DBG_THR(x)
+#endif
+
+extern unsigned long osl_spin_lock(void *lock);
+extern void osl_spin_unlock(void *lock, unsigned long flags);
+
+#define TSK_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define TSK_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+static inline bool binary_sema_down(tsk_ctl_t *tsk)
+{
+ if (down_interruptible(&tsk->sema) == 0) {
+ unsigned long flags = 0;
+ TSK_LOCK(&tsk->spinlock, flags);
+ if (tsk->up_cnt == 1)
+ tsk->up_cnt--;
+ else {
+ DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
+ }
+ TSK_UNLOCK(&tsk->spinlock, flags);
+ return false;
+ } else
+ return true;
+}
+
+static inline bool binary_sema_up(tsk_ctl_t *tsk)
+{
+ bool sem_up = false;
+ unsigned long flags = 0;
+
+ TSK_LOCK(&tsk->spinlock, flags);
+ if (tsk->up_cnt == 0) {
+ tsk->up_cnt++;
+ sem_up = true;
+ } else if (tsk->up_cnt == 1) {
+ /* dhd_sched_dpc: dpc is alread up! */
+ } else
+ DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
+
+ TSK_UNLOCK(&tsk->spinlock, flags);
+
+ if (sem_up)
+ up(&tsk->sema);
+
+ return sem_up;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
+#define SMP_RD_BARRIER_DEPENDS(x)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
+#else
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+#endif
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
+{ \
+ sema_init(&((tsk_ctl)->sema), 0); \
+ init_completion(&((tsk_ctl)->completed)); \
+ init_completion(&((tsk_ctl)->flushed)); \
+ (tsk_ctl)->parent = owner; \
+ (tsk_ctl)->proc_name = name; \
+ (tsk_ctl)->terminated = FALSE; \
+ (tsk_ctl)->flush_ind = FALSE; \
+ (tsk_ctl)->up_cnt = 0; \
+ (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
+ if (IS_ERR((tsk_ctl)->p_task)) { \
+ (tsk_ctl)->thr_pid = -1; \
+ DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name)); \
+ } else { \
+ (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
+ spin_lock_init(&((tsk_ctl)->spinlock)); \
+ DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ }; \
+}
+
+#define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ (tsk_ctl)->terminated = TRUE; \
+ smp_wmb(); \
+ up(&((tsk_ctl)->sema)); \
+ DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
+ if (timeout == 0) \
+ DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ else \
+ DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->parent = NULL; \
+ (tsk_ctl)->proc_name = NULL; \
+ (tsk_ctl)->thr_pid = -1; \
+ (tsk_ctl)->up_cnt = 0; \
+}
+
+#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
+{ \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ (tsk_ctl)->terminated = TRUE; \
+ smp_wmb(); \
+ binary_sema_up(tsk_ctl); \
+ DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
+ if (timeout == 0) \
+ DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ else \
+ DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->parent = NULL; \
+ (tsk_ctl)->proc_name = NULL; \
+ (tsk_ctl)->thr_pid = -1; \
+}
+
+/*
+* Flush is non-rentrant, so callers must make sure
+* there is no race condition.
+* For safer exit, added wait_for_completion_timeout
+* with 1 sec timeout.
+*/
+#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
+{ \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ (tsk_ctl)->flush_ind = TRUE; \
+ smp_wmb(); \
+ binary_sema_up(tsk_ctl); \
+ DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
+ if (timeout == 0) \
+ DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ else \
+ DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+}
+
+/* ----------------------- */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid; \
+pid = find_get_pid((pid_t)nr); \
+tsk = pid_task(pid, PIDTYPE_PID); \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+ struct task_struct *tsk; \
+ tsk = find_task_by_vpid(pid); \
+ if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+ kill_proc(pid, sig, 1); \
+}
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+/*
+For < 2.6.24, wl creates its own netdev but doesn't
+align the priv area like the genuine alloc_netdev().
+Since netdev_priv() always gives us the aligned address, it will
+not match our unaligned address for < 2.6.24
+*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev) (dev->priv)
+#else
+#define DEV_PRIV(dev) netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p) wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p) wl_isr((i), (d), (p))
+#endif /* < 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
+#else
+#define CAN_SLEEP() (FALSE)
+#endif
+
+#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define RANDOM32 prandom_u32
+#define RANDOM_BYTES prandom_bytes
+#else
+#define RANDOM32 random32
+#define RANDOM_BYTES get_random_bytes
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define SRANDOM32(entropy) prandom_seed(entropy)
+#else
+#define SRANDOM32(entropy) srandom32(entropy)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+/*
+ * Overide latest kfifo functions with
+ * older version to work on older kernels
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
+#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
+#define kfifo_esize(a) 1
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d)
+#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d)
+#define kfifo_esize(a) 1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic pop
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#include <linux/fs.h>
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_dentry->d_inode;
+}
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
+#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
+int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
+#define netdev_tx_t int
+#endif
+
+#endif /* _linuxver_h_ */
diff --git a/bcmdhd.101.10.361.x/include/lpflags.h b/bcmdhd.101.10.361.x/include/lpflags.h
new file mode 100755
index 0000000..f284bbb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/lpflags.h
@@ -0,0 +1,39 @@
+/*
+ * Chip related low power flags
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _lpflags_h_
+#define _lpflags_h_
+
+/* Chip related low power flags (lpflags) */
+#define LPFLAGS_SI_GLOBAL_DISABLE (1 << 0)
+#define LPFLAGS_SI_MEM_STDBY_DISABLE (1 << 1)
+#define LPFLAGS_SI_SFLASH_DISABLE (1 << 2)
+#define LPFLAGS_SI_BTLDO3P3_DISABLE (1 << 3)
+#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE (1 << 4)
+#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5)
+#define LPFLAGS_SI_DS0_SLEEP_PDA_DISABLE (1 << 6)
+#define LPFLAGS_SI_DS1_SLEEP_PDA_DISABLE (1 << 7)
+#define LPFLAGS_PHY_GLOBAL_DISABLE (1 << 16)
+#define LPFLAGS_PHY_LP_DISABLE (1 << 17)
+#define LPFLAGS_PSM_PHY_CTL (1 << 18)
+
+#endif /* _lpflags_h_ */
diff --git a/bcmdhd.101.10.361.x/include/mbo.h b/bcmdhd.101.10.361.x/include/mbo.h
new file mode 100755
index 0000000..14bd92b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/mbo.h
@@ -0,0 +1,279 @@
+/*
+ * Fundamental types and constants relating to WFA MBO
+ * (Multiband Operation)
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _MBO_H_
+#define _MBO_H_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* WiFi MBO OUI values */
+#define MBO_OUI WFA_OUI /* WiFi OUI 50:6F:9A */
+/* oui_type field identifying the type and version of the MBO IE. */
+#define MBO_OUI_TYPE WFA_OUI_TYPE_MBO /* OUI Type/Version */
+/* IEEE 802.11 vendor specific information element. */
+#define MBO_IE_ID 0xdd
+
+/* MBO ATTR related macros */
+#define MBO_ATTR_ID_OFF 0
+#define MBO_ATTR_LEN_OFF 1
+#define MBO_ATTR_DATA_OFF 2
+
+#define MBO_ATTR_ID_LEN 1 /* Attr ID field length */
+#define MBO_ATTR_LEN_LEN 1 /* Attr Length field length */
+#define MBO_ATTR_HDR_LEN 2 /* ID + 1-byte length field */
+
+/* MBO subelemts related */
+#define MBO_SUBELEM_ID 0xdd
+#define MBO_SUBELEM_OUI WFA_OUI
+
+#define MBO_SUBELEM_ID_LEN 1 /* SubElement ID field length */
+#define MBO_SUBELEM_LEN_LEN 1 /* SubElement length field length */
+#define MBO_SUBELEM_HDR_LEN 6 /* ID + length + OUI + OUY TYPE */
+
+#define MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L) (7 + (L)) /* value of length field */
+#define MBO_NON_PREF_CHAN_SUBELEM_TOT_LEN(L) \
+ (MBO_SUBELEM_ID_LEN + MBO_SUBELEM_LEN_LEN + MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L))
+/* MBO attributes as defined in the mbo spec */
+enum {
+ MBO_ATTR_MBO_AP_CAPABILITY = 1,
+ MBO_ATTR_NON_PREF_CHAN_REPORT = 2,
+ MBO_ATTR_CELL_DATA_CAP = 3,
+ MBO_ATTR_ASSOC_DISALLOWED = 4,
+ MBO_ATTR_CELL_DATA_CONN_PREF = 5,
+ MBO_ATTR_TRANS_REASON_CODE = 6,
+ MBO_ATTR_TRANS_REJ_REASON_CODE = 7,
+ MBO_ATTR_ASSOC_RETRY_DELAY = 8
+};
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ie_s {
+ uint8 id; /* IE ID: MBO_IE_ID 0xDD */
+ uint8 len; /* IE length */
+ uint8 oui[WFA_OUI_LEN]; /* MBO_OUI 50:6F:9A */
+ uint8 oui_type; /* MBO_OUI_TYPE 0x16 */
+ uint8 attr[1]; /* var len attributes */
+} BWL_POST_PACKED_STRUCT wifi_mbo_ie_t;
+
+#define MBO_IE_HDR_SIZE (OFFSETOF(wifi_mbo_ie_t, attr))
+/* oui:3 bytes + oui type:1 byte */
+#define MBO_IE_NO_ATTR_LEN 4
+
+/* MBO AP Capability Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ap_cap_ind_attr_s {
+ /* Attribute ID - 0x01. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* AP capability bitmap */
+ uint8 cap_ind;
+} BWL_POST_PACKED_STRUCT wifi_mbo_ap_cap_ind_attr_t;
+
+/* MBO AP Capability Indication Field Values */
+#define MBO_AP_CAP_IND_CELLULAR_AWARE 0x40
+
+/* Non-preferred Channel Report Attribute */
+#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_OFF 2
+#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF 3
+#define MBO_NON_PREF_CHAN_ATTR_PREF_OFF(L) \
+ (MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF + (L))
+
+#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_LEN 1
+#define MBO_NON_PREF_CHAN_ATTR_PREF_LEN 1
+#define MBO_NON_PREF_CHAN_ATTR_REASON_LEN 1
+
+#define MBO_NON_PREF_CHAN_ATTR_LEN(L) ((L) + 3)
+#define MBO_NON_PREF_CHAN_ATTR_TOT_LEN(L) (MBO_ATTR_HDR_LEN + (L) + 3)
+
+/* attribute len - (opclass + Pref + Reason) */
+#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_LEN(L) ((L) - 3)
+
+/* MBO Non-preferred Channel Report: "Preference" field value */
+enum {
+ MBO_STA_NON_OPERABLE_BAND_CHAN = 0,
+ MBO_STA_NON_PREFERRED_BAND_CHAN = 1,
+ MBO_STA_PREFERRED_BAND_CHAN = 255
+};
+
+/* MBO Non-preferred Channel Report: "Reason Code" field value */
+enum {
+ MBO_NON_PREF_CHAN_RC_UNSPECIFIED = 0,
+ MBO_NON_PREF_CHAN_RC_BCN_STRENGTH = 1,
+ MBO_NON_PREF_CHAN_RC_CO_LOC_INTERFERENCE = 2,
+ MBO_NON_PREF_CHAN_RC_IN_DEV_INTERFERENCE = 3
+};
+
+/* Cellular Data Capability Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_cap_attr_s {
+ /* Attribute ID - 0x03. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* MBO STA's cellular capability */
+ uint8 cell_conn;
+} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_cap_attr_t;
+
+/* MBO Cellular Data Capability: "Cellular Connectivity" field value */
+enum {
+ MBO_CELL_DATA_CONN_AVAILABLE = 1,
+ MBO_CELL_DATA_CONN_NOT_AVAILABLE = 2,
+ MBO_CELL_DATA_CONN_NOT_CAPABLE = 3
+};
+
+/* Association Disallowed attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_disallowed_attr_s {
+ /* Attribute ID - 0x04. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* Reason of not accepting new association */
+ uint8 reason_code;
+} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_disallowed_attr_t;
+
+/* Association Disallowed attr Reason code field values */
+enum {
+ MBO_ASSOC_DISALLOWED_RC_UNSPECIFIED = 1,
+ MBO_ASSOC_DISALLOWED_RC_MAX_STA_REACHED = 2,
+ MBO_ASSOC_DISALLOWED_RC_AIR_IFACE_OVERLOADED = 3,
+ MBO_ASSOC_DISALLOWED_RC_AUTH_SRVR_OVERLOADED = 4,
+ MBO_ASSOC_DISALLOWED_RC_INSUFFIC_RSSI = 5
+};
+
+/* Cellular Data Conn Pref attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_conn_pref_attr_s {
+ /* Attribute ID - 0x05. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* Preference value of cellular connection */
+ uint8 cell_pref;
+} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_conn_pref_attr_t;
+
+/* Cellular Data Conn Pref attr: Cellular Pref field values */
+enum {
+ MBO_CELLULAR_DATA_CONN_EXCLUDED = 1,
+ MBO_CELLULAR_DATA_CONN_NOT_PREFERRED = 2,
+ MBO_CELLULAR_DATA_CONN_PREFERRED = 255
+};
+
+/* Transition Reason Code Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_reason_code_attr_s {
+ /* Attribute ID - 0x06. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* Reason of transition recommendation */
+ uint8 trans_reason_code;
+} BWL_POST_PACKED_STRUCT wifi_mbo_trans_reason_code_attr_t;
+
+/* Transition Reason Code Attr: trans reason code field values */
+enum {
+ MBO_TRANS_REASON_UNSPECIFIED = 0,
+ MBO_TRANS_REASON_EXCESSV_FRM_LOSS_RATE = 1,
+ MBO_TRANS_REASON_EXCESSV_TRAFFIC_DELAY = 2,
+ MBO_TRANS_REASON_INSUFF_BW = 3,
+ MBO_TRANS_REASON_LOAD_BALANCING = 4,
+ MBO_TRANS_REASON_LOW_RSSI = 5,
+ MBO_TRANS_REASON_EXCESSV_RETRANS_RCVD = 6,
+ MBO_TRANS_REASON_HIGH_INTERFERENCE = 7,
+ MBO_TRANS_REASON_GRAY_ZONE = 8,
+ MBO_TRANS_REASON_PREMIUM_AP_TRANS = 9
+};
+
+/* Transition Rejection Reason Code Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_rej_reason_code_attr_s {
+ /* Attribute ID - 0x07. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* Reason of transition rejection */
+ uint8 trans_rej_reason_code;
+} BWL_POST_PACKED_STRUCT wifi_mbo_trans_rej_reason_code_attr_t;
+
+/* Transition Rej Reason Code Attr: trans rej reason code field values */
+enum {
+ MBO_TRANS_REJ_REASON_UNSPECIFIED = 0,
+ MBO_TRANS_REJ_REASON_EXSSIV_FRM_LOSS_RATE = 1,
+ MBO_TRANS_REJ_REASON_EXSSIV_TRAFFIC_DELAY = 2,
+ MBO_TRANS_REJ_REASON_INSUFF_QOS_CAPACITY = 3,
+ MBO_TRANS_REJ_REASON_LOW_RSSI = 4,
+ MBO_TRANS_REJ_REASON_HIGH_INTERFERENCE = 5,
+ MBO_TRANS_REJ_REASON_SERVICE_UNAVAIL = 6
+};
+
+/* Assoc Retry Delay Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_retry_delay_attr_s {
+ /* Attribute ID - 0x08. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint8 len;
+ /* No of Seconds before next assoc attempt */
+ uint16 reassoc_delay;
+} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_retry_delay_attr_t;
+
+#define MBO_ANQP_OUI_TYPE 0x12 /* OUTI Type/Version */
+
+/* MBO ANQP Element */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_anqp_elem_s {
+ /* ID - 56797 */
+ uint16 info_id;
+ /* Length of the OUI + Vendor Specific content */
+ uint16 len;
+ /* WFA_OUI 50:6F:9A */
+ uint8 oui[WFA_OUI_LEN];
+ /* MBO_ANQP_OUI_TYPE 0x12 */
+ uint8 oui_type;
+ /* MBO ANQP element type */
+ uint8 sub_type;
+ /* variable len payload */
+ uint8 payload[1];
+} BWL_POST_PACKED_STRUCT wifi_mbo_anqp_elem_t;
+
+#define MBO_ANQP_ELEM_HDR_SIZE (OFFSETOF(wifi_mbo_anqp_elem_t, payload))
+
+/* oui:3 bytes + oui type:1 byte + sub type:1 byte */
+#define MBO_ANQP_ELEM_NO_PAYLOAD_LEN 5
+
+/* MBO ANQP Subtype Values */
+enum {
+ MBO_ANQP_ELEM_MBO_QUERY_LIST = 1,
+ MBO_ANQP_ELEM_CELL_DATA_CONN_PREF = 2
+};
+
+/* MBO sub-elements */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_cap_subelem_s {
+ /* 0xDD */
+ uint8 sub_elem_id;
+ /* Length of the following fields in sub-element */
+ uint8 len;
+ /* WFA_OUI 50:6F:9A */
+ uint8 oui[WFA_OUI_LEN];
+ /* OUI_TYPE 0x03 */
+ uint8 oui_type;
+ /* STA cellular capability */
+ uint8 cell_conn;
+} BWL_POST_PACKED_STRUCT wifi_mbo_cell_cap_subelem_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* __MBO_H__ */
diff --git a/bcmdhd.101.10.361.x/include/miniopt.h b/bcmdhd.101.10.361.x/include/miniopt.h
new file mode 100755
index 0000000..b486c07
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/miniopt.h
@@ -0,0 +1,73 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY 128 /* Max options */
+typedef struct miniopt {
+
+ /* These are persistent after miniopt_init() */
+ const char* name; /* name for prompt in error strings */
+ const char* flags; /* option chars that take no args */
+ bool longflags; /* long options may be flags */
+ bool opt_end; /* at end of options (passed a "--") */
+
+ /* These are per-call to miniopt() */
+
+ int consumed; /* number of argv entries cosumed in
+ * the most recent call to miniopt()
+ */
+ bool positional;
+ bool good_int; /* 'val' member is the result of a sucessful
+ * strtol conversion of the option value
+ */
+ char opt;
+ char key[MINIOPT_MAXKEY];
+ char* valstr; /* positional param, or value for the option,
+ * or null if the option had
+ * no accompanying value
+ */
+ uint uval; /* strtol translation of valstr */
+ int val; /* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* MINI_OPT_H */
diff --git a/bcmdhd.101.10.361.x/include/monitor.h b/bcmdhd.101.10.361.x/include/monitor.h
new file mode 100755
index 0000000..4b92cda
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/monitor.h
@@ -0,0 +1,230 @@
+/*
+ * Monitor Mode definitions.
+ * This header file housing the define and function prototype use by
+ * both the wl firmware and drivers.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+#ifndef _MONITOR_H_
+#define _MONITOR_H_
+
+#include <bcmwifi_channels.h>
+
+#include <packed_section_start.h>
+/* wl_monitor rx status per packet */
+typedef struct BWL_PRE_PACKED_STRUCT wl_rxsts {
+ uint pkterror; /* error flags per pkt */
+ uint phytype; /* 802.11 A/B/G /N */
+ chanspec_t chanspec; /* channel spec */
+ uint16 datarate; /* rate in 500kbps */
+ uint8 mcs; /* MCS for HT frame */
+ uint8 htflags; /* HT modulation flags */
+ uint antenna; /* antenna pkts received on */
+ uint pktlength; /* pkt length minus bcm phy hdr */
+ uint32 mactime; /* time stamp from mac, count per 1us */
+ uint sq; /* signal quality */
+ int32 signal; /* in dBm */
+ int32 noise; /* in dBm */
+ uint preamble; /* Unknown, short, long */
+ uint encoding; /* Unknown, CCK, PBCC, OFDM, HT, VHT */
+ uint nfrmtype; /* special 802.11n frames(AMPDU, AMSDU) */
+ uint8 nss; /* Number of spatial streams for VHT frame */
+ uint8 coding;
+ uint16 aid; /* Partial AID for VHT frame */
+ uint8 gid; /* Group ID for VHT frame */
+ uint8 bw; /* Bandwidth for VHT frame */
+ uint16 vhtflags; /* VHT modulation flags */
+ uint16 bw_nonht; /* non-HT bw advertised in rts/cts */
+ uint32 ampdu_counter; /* AMPDU counter for sniffer */
+ uint32 sig_a1; /* TODO: this unused field needs to be removed */
+ uint32 sig_a2; /* TODO: this unused field needs to be removed */
+ uint16 data1;
+ uint16 data2;
+ uint16 data3;
+ uint16 data4;
+ uint16 data5;
+ uint16 data6;
+ uint8 ru_channel1[4];
+ uint8 ru_channel2[4];
+ uint16 flag1;
+ uint16 flag2;
+} BWL_POST_PACKED_STRUCT wl_rxsts_t, wl_mon_rxsts_t;
+#include <packed_section_end.h>
+
+#define WLMONRXSTS_SIZE sizeof(wl_rxsts_t)
+
+/* phy type */
+#define WL_RXS_PHY_N 0x00000004 /* N phy type */
+
+/* encoding */
+#define WL_RXS_ENCODING_UNKNOWN 0x00000000
+#define WL_RXS_ENCODING_DSSS_CCK 0x00000001 /* DSSS/CCK encoding (1, 2, 5.5, 11) */
+#define WL_RXS_ENCODING_OFDM 0x00000002 /* OFDM encoding */
+#define WL_RXS_ENCODING_HT 0x00000003 /* HT encoding */
+#define WL_RXS_ENCODING_VHT 0x00000004 /* VHT encoding */
+#define WL_RXS_ENCODING_HE 0x00000005 /* HE encoding */
+#define WL_RXS_ENCODING_EHT 0x00000006 /* EHT encoding */
+
+/* status per error RX pkt */
+#define WL_RXS_CRC_ERROR 0x00000001 /* CRC Error in packet */
+#define WL_RXS_RUNT_ERROR 0x00000002 /* Runt packet */
+#define WL_RXS_ALIGN_ERROR 0x00000004 /* Misaligned packet */
+#define WL_RXS_OVERSIZE_ERROR 0x00000008 /* packet bigger than RX_LENGTH (usually 1518) */
+#define WL_RXS_WEP_ICV_ERROR 0x00000010 /* Integrity Check Value error */
+#define WL_RXS_WEP_ENCRYPTED 0x00000020 /* Encrypted with WEP */
+#define WL_RXS_PLCP_SHORT 0x00000040 /* Short PLCP error */
+#define WL_RXS_DECRYPT_ERR 0x00000080 /* Decryption error */
+#define WL_RXS_OTHER_ERR 0x80000000 /* Other errors */
+
+/* preamble */
+#define WL_RXS_UNUSED_STUB 0x0 /**< stub to match with wlc_ethereal.h */
+#define WL_RXS_PREAMBLE_SHORT 0x00000001 /**< Short preamble */
+#define WL_RXS_PREAMBLE_LONG 0x00000002 /**< Long preamble */
+#define WL_RXS_PREAMBLE_HT_MM 0x00000003 /**< HT mixed mode preamble */
+#define WL_RXS_PREAMBLE_HT_GF 0x00000004 /**< HT green field preamble */
+
+/* htflags */
+#define WL_RXS_HTF_BW_MASK 0x07
+#define WL_RXS_HTF_40 0x01
+#define WL_RXS_HTF_20L 0x02
+#define WL_RXS_HTF_20U 0x04
+#define WL_RXS_HTF_SGI 0x08
+#define WL_RXS_HTF_STBC_MASK 0x30
+#define WL_RXS_HTF_STBC_SHIFT 4
+#define WL_RXS_HTF_LDPC 0x40
+
+#ifdef WLTXMONITOR
+/* reuse bw-bits in ht for vht */
+#define WL_RXS_VHTF_BW_MASK 0x87
+#define WL_RXS_VHTF_40 0x01
+#define WL_RXS_VHTF_20L WL_RXS_VHTF_20LL
+#define WL_RXS_VHTF_20U WL_RXS_VHTF_20LU
+#define WL_RXS_VHTF_80 0x02
+#define WL_RXS_VHTF_20LL 0x03
+#define WL_RXS_VHTF_20LU 0x04
+#define WL_RXS_VHTF_20UL 0x05
+#define WL_RXS_VHTF_20UU 0x06
+#define WL_RXS_VHTF_40L 0x07
+#define WL_RXS_VHTF_40U 0x80
+#endif /* WLTXMONITOR */
+
+/* vhtflags */
+#define WL_RXS_VHTF_STBC 0x01
+#define WL_RXS_VHTF_TXOP_PS 0x02
+#define WL_RXS_VHTF_SGI 0x04
+#define WL_RXS_VHTF_SGI_NSYM_DA 0x08
+#define WL_RXS_VHTF_LDPC_EXTRA 0x10
+#define WL_RXS_VHTF_BF 0x20
+#define WL_RXS_VHTF_DYN_BW_NONHT 0x40
+#define WL_RXS_VHTF_CODING_LDCP 0x01
+
+#define WL_RXS_VHT_BW_20 0
+#define WL_RXS_VHT_BW_40 1
+#define WL_RXS_VHT_BW_20L 2
+#define WL_RXS_VHT_BW_20U 3
+#define WL_RXS_VHT_BW_80 4
+#define WL_RXS_VHT_BW_40L 5
+#define WL_RXS_VHT_BW_40U 6
+#define WL_RXS_VHT_BW_20LL 7
+#define WL_RXS_VHT_BW_20LU 8
+#define WL_RXS_VHT_BW_20UL 9
+#define WL_RXS_VHT_BW_20UU 10
+#define WL_RXS_VHT_BW_160 11
+#define WL_RXS_VHT_BW_80L 12
+#define WL_RXS_VHT_BW_80U 13
+#define WL_RXS_VHT_BW_40LL 14
+#define WL_RXS_VHT_BW_40LU 15
+#define WL_RXS_VHT_BW_40UL 16
+#define WL_RXS_VHT_BW_40UU 17
+#define WL_RXS_VHT_BW_20LLL 18
+#define WL_RXS_VHT_BW_20LLU 19
+#define WL_RXS_VHT_BW_20LUL 20
+#define WL_RXS_VHT_BW_20LUU 21
+#define WL_RXS_VHT_BW_20ULL 22
+#define WL_RXS_VHT_BW_20ULU 23
+#define WL_RXS_VHT_BW_20UUL 24
+#define WL_RXS_VHT_BW_20UUU 25
+
+#define WL_RXS_NFRM_AMPDU_FIRST 0x00000001 /* first MPDU in A-MPDU */
+#define WL_RXS_NFRM_AMPDU_SUB 0x00000002 /* subsequent MPDU(s) in A-MPDU */
+#define WL_RXS_NFRM_AMSDU_FIRST 0x00000004 /* first MSDU in A-MSDU */
+#define WL_RXS_NFRM_AMSDU_SUB 0x00000008 /* subsequent MSDU(s) in A-MSDU */
+
+/* HE flags */
+#define WL_RXS_HEF_SIGA_PPDU_SU 0x0000
+#define WL_RXS_HEF_SIGA_PPDU_EXT_SU 0x0001
+#define WL_RXS_HEF_SIGA_PPDU_MU 0x0002
+#define WL_RXS_HEF_SIGA_PPDU_TRIG 0x0003
+#define WL_RXS_HEF_SIGA_BSS_COLOR 0x0004
+#define WL_RXS_HEF_SIGA_BEAM_CHANGE 0x0008
+#define WL_RXS_HEF_SIGA_DL_UL 0x0010
+#define WL_RXS_HEF_SIGA_MCS 0x0020
+#define WL_RXS_HEF_SIGA_DCM 0x0040
+#define WL_RXS_HEF_SIGA_CODING 0x0080
+#define WL_RXS_HEF_SIGA_LDPC 0x0100
+#define WL_RXS_HEF_SIGA_STBC 0x0200
+#define WL_RXS_HEF_SIGA_SPATIAL_REUSE 0x0400
+#define WL_RXS_HEF_SIGA_STA_ID 0x0800
+#define WL_RXS_HEF_SIGA_SPATIAL_REUSE2 0x0800
+#define WL_RXS_HEF_SIGA_SPATIAL_REUSE3 0x1000
+#define WL_RXS_HEF_SIGA_SPATIAL_REUSE4 0x2000
+#define WL_RXS_HEF_SIGA_BW 0x4000
+#define WL_RXS_HEF_SIGA_RU_ALLOC 0x4000
+#define WL_RXS_HEF_SIGA_DOPPLER 0x8000
+#define WL_RXS_HEF_SIGA_GI 0x0002
+#define WL_RXS_HEF_SIGA_LTF_SIZE 0x0004 /* no explicit known field */
+#define WL_RXS_HEF_SIGA_NUM_LTF 0x0004
+#define WL_RXS_HEF_SIGA_PADDING 0x0008
+#define WL_RXS_HEF_SIGA_TXBF 0x0010
+#define WL_RXS_HEF_SIGA_PE 0x0020
+#define WL_RXS_HEF_SIGA_TXOP 0x0040
+#define WL_RXS_HEF_SIGA_MIDAMBLE 0x0080
+
+/* https://www.radiotap.org/fields/HE-MU.html */
+#define WL_RXS_HEF_SIGB_MCS_KNOWN 0x0010
+#define WL_RXS_HEF_SIGB_DCM_KNOWN 0x0040
+#define WL_RXS_HEF_CH2_26TONE_RU_KNOWN 0x0080
+#define WL_RXS_HEF_CH1_RU_KNOWN 0x0100
+#define WL_RXS_HEF_CH2_RU_KNOWN 0x0200
+#define WL_RXS_HEF_CH1_26TONE_RU_KNOWN 0x1000
+#define WL_RXS_HEF_SIGB_COMP_KNOWN 0x4000
+#define WL_RXS_HEF_NUM_SIGB_SYMB_KNOWN 0x8000
+#define WL_RXS_HEF_BW_SIGA_KNOWN 0x0004
+#define WL_RXS_HEF_PREPUNCR_SIGA_KNOWN 0x0400
+#define WL_RXS_HEF_SIGB_SYMB_KNOWN 0x8000
+#define WL_RXS_HEF_PREPUNCR_KNOWN 0x0400
+
+#include <packed_section_start.h>
+typedef struct BWL_PRE_PACKED_STRUCT wl_txsts {
+ uint pkterror; /**< error flags per pkt */
+ uint phytype; /**< 802.11 A/B/G /N */
+ chanspec_t chanspec; /**< channel spec */
+ uint16 datarate; /**< rate in 500kbps */
+ uint8 mcs; /**< MCS for HT frame */
+ uint8 htflags; /**< HT modulation flags */
+ uint antenna; /**< antenna pkt transmitted on */
+ uint pktlength; /**< pkt length minus bcm phy hdr */
+ uint32 mactime; /**< ? time stamp from mac, count per 1us */
+ uint preamble; /**< Unknown, short, long */
+ uint encoding; /**< Unknown, CCK, PBCC, OFDM, HT */
+ uint nfrmtype; /**< special 802.11n frames(AMPDU, AMSDU) */
+ uint txflags; /**< As defined in radiotap field 15 */
+ uint retries; /**< Number of retries */
+ struct wl_if *wlif; /**< wl interface */
+} BWL_POST_PACKED_STRUCT wl_txsts_t;
+#include <packed_section_end.h>
+
+#define WL_TXS_TXF_FAIL 0x01 /**< TX failed due to excessive retries */
+#define WL_TXS_TXF_CTS 0x02 /**< TX used CTS-to-self protection */
+#define WL_TXS_TXF_RTSCTS 0x04 /**< TX used RTS/CTS */
+
+#endif /* _MONITOR_H_ */
diff --git a/bcmdhd.101.10.361.x/include/msf.h b/bcmdhd.101.10.361.x/include/msf.h
new file mode 100755
index 0000000..1511ef1
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/msf.h
@@ -0,0 +1,60 @@
+/*
+ * Common interface to MSF (multi-segment format) definitions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _WLC_MSF_H_
+#define _WLC_MSF_H_
+
+struct wl_segment {
+ uint32 type;
+ uint32 offset;
+ uint32 length;
+ uint32 crc32;
+ uint32 flags;
+};
+typedef struct wl_segment wl_segment_t;
+
+struct wl_segment_info {
+ uint8 magic[4];
+ uint32 hdr_len;
+ uint32 crc32;
+ uint32 file_type;
+ uint32 num_segments;
+ wl_segment_t segments[1];
+};
+typedef struct wl_segment_info wl_segment_info_t;
+
+typedef struct wlc_blob_segment {
+ uint32 type;
+ uint8 *data;
+ uint32 length;
+} wlc_blob_segment_t;
+
+/** Segment types in Binary Eventlog Archive file */
+enum bea_seg_type_e {
+ MSF_SEG_TYP_RTECDC_BIN = 1,
+ MSF_SEG_TYP_LOGSTRS_BIN = 2,
+ MSF_SEG_TYP_FW_SYMBOLS = 3,
+ MSF_SEG_TYP_ROML_BIN = 4
+};
+
+#endif /* _WLC_MSF_H */
diff --git a/bcmdhd.101.10.361.x/include/msgtrace.h b/bcmdhd.101.10.361.x/include/msgtrace.h
new file mode 100755
index 0000000..f564999
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/msgtrace.h
@@ -0,0 +1,56 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _MSGTRACE_H
+#define _MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+ uint8 version;
+ uint8 trace_type;
+#define MSGTRACE_HDR_TYPE_MSG 0
+#define MSGTRACE_HDR_TYPE_LOG 1
+ uint16 len; /* Len of the trace */
+ uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost
+ * because of DMA error or a bus reset (ex: SDIO Func2)
+ */
+ /* Msgtrace type only */
+ uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */
+ uint32 discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _MSGTRACE_H */
diff --git a/bcmdhd.101.10.361.x/include/nan.h b/bcmdhd.101.10.361.x/include/nan.h
new file mode 100755
index 0000000..dbbb8ee
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/nan.h
@@ -0,0 +1,1562 @@
+/*
+ * Fundamental types and constants relating to WFA NAN
+ * (Neighbor Awareness Networking)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _NAN_H_
+#define _NAN_H_
+
+#include <typedefs.h>
+#include <802.11.h>
+
+/* Do we want to include p2p.h for constants like P2P_WFDS_HASH_LEN and
+ * maybe P2P_WFDS_MAX_SVC_NAME_LEN etc.?
+ */
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* WiFi NAN OUI values */
+#define NAN_OUI "\x50\x6F\x9A" /* WFA OUI. WiFi-Alliance OUI */
+/* For oui_type field identifying the type and version of the NAN IE. */
+#define NAN_OUI_TYPE 0x13 /* Type/Version */
+#define NAN_AF_OUI_TYPE 0x18 /* Type/Version */
+/* IEEE 802.11 vendor specific information element. (Same as P2P_IE_ID.) */
+#define NAN_IE_ID 0xdd
+
+/* Same as P2P_PUB_AF_CATEGORY and DOT11_ACTION_CAT_PUBLIC */
+#define NAN_PUB_AF_CATEGORY DOT11_ACTION_CAT_PUBLIC
+/* Protected dual public action frame category */
+#define NAN_PROT_DUAL_PUB_AF_CATEGORY DOT11_ACTION_CAT_PDPA
+/* IEEE 802.11 Public Action Frame Vendor Specific. (Same as P2P_PUB_AF_ACTION.) */
+#define NAN_PUB_AF_ACTION DOT11_PUB_ACTION_VENDOR_SPEC
+/* Number of octents in hash of service name. (Same as P2P_WFDS_HASH_LEN.) */
+#define NAN_SVC_HASH_LEN 6
+/* Size of fixed length part of nan_pub_act_frame_t before attributes. */
+#define NAN_PUB_ACT_FRAME_FIXED_LEN 6
+/* Number of octents in master rank value. */
+#define NAN_MASTER_RANK_LEN 8
+/* NAN public action frame header size */
+#define NAN_PUB_ACT_FRAME_HDR_SIZE (OFFSETOF(nan_pub_act_frame_t, data))
+/* NAN network ID */
+#define NAN_NETWORK_ID "\x51\x6F\x9A\x01\x00\x00"
+/* Service Control Type length */
+#define NAN_SVC_CONTROL_TYPE_LEN 2
+/* Binding Bitmap length */
+#define NAN_BINDING_BITMAP_LEN 2
+/* Service Response Filter (SRF) control field masks */
+#define NAN_SRF_BLOOM_MASK 0x01
+#define NAN_SRF_INCLUDE_MASK 0x02
+#define NAN_SRF_INDEX_MASK 0x0C
+/* SRF Bloom Filter index shift */
+#define NAN_SRF_BLOOM_SHIFT 2
+#define NAN_SRF_INCLUDE_SHIFT 1
+/* Mask for CRC32 output, used in hash function for NAN bloom filter */
+#define NAN_BLOOM_CRC32_MASK 0xFFFF
+
+/* Attribute TLV header size */
+#define NAN_ATTR_ID_OFF 0
+#define NAN_ATTR_LEN_OFF 1
+#define NAN_ATTR_DATA_OFF 3
+
+#define NAN_ATTR_ID_LEN 1u /* ID field length */
+#define NAN_ATTR_LEN_LEN 2u /* Length field length */
+#define NAN_ATTR_HDR_LEN (NAN_ATTR_ID_LEN + NAN_ATTR_LEN_LEN)
+#define NAN_ENTRY_CTRL_LEN 1 /* Entry control field length from FAM attribute */
+#define NAN_MAP_ID_LEN 1 /* MAP ID length to signify band */
+#define NAN_OPERATING_CLASS_LEN 1 /* operating class field length from NAN FAM */
+#define NAN_CHANNEL_NUM_LEN 1 /* channel number field length 1 byte */
+
+/* generic nan attribute total length */
+#define NAN_ATTR_TOT_LEN(_nan_attr) (ltoh16_ua(((const uint8 *)(_nan_attr)) + \
+ NAN_ATTR_ID_LEN) + NAN_ATTR_HDR_LEN)
+
+/* NAN slot duration / period */
+#define NAN_MIN_TU 16
+#define NAN_TU_PER_DW 512
+#define NAN_MAX_DW 16
+#define NAN_MAX_TU (NAN_MAX_DW * NAN_TU_PER_DW)
+
+#define NAN_SLOT_DUR_0TU 0
+#define NAN_SLOT_DUR_16TU 16
+#define NAN_SLOT_DUR_32TU 32
+#define NAN_SLOT_DUR_64TU 64
+#define NAN_SLOT_DUR_128TU 128
+#define NAN_SLOT_DUR_256TU 256
+#define NAN_SLOT_DUR_512TU 512
+#define NAN_SLOT_DUR_1024TU 1024
+#define NAN_SLOT_DUR_2048TU 2048
+#define NAN_SLOT_DUR_4096TU 4096
+#define NAN_SLOT_DUR_8192TU 8192
+
+#define NAN_SOC_CHAN_2G 6 /* NAN 2.4G discovery channel */
+#define NAN_SOC_CHAN_5G_CH149 149 /* NAN 5G discovery channel if upper band allowed */
+#define NAN_SOC_CHAN_5G_CH44 44 /* NAN 5G discovery channel if only lower band allowed */
+
+/* size of ndc id */
+#define NAN_DATA_NDC_ID_SIZE 6
+
+#define NAN_AVAIL_ENTRY_LEN_RES0 7 /* Avail entry len in FAM attribute for resolution 16TU */
+#define NAN_AVAIL_ENTRY_LEN_RES1 5 /* Avail entry len in FAM attribute for resolution 32TU */
+#define NAN_AVAIL_ENTRY_LEN_RES2 4 /* Avail entry len in FAM attribute for resolution 64TU */
+
+/* map id field */
+#define NAN_MAPID_SPECIFIC_MAP_MASK 0x01 /* apply to specific map */
+#define NAN_MAPID_MAPID_MASK 0x1E
+#define NAN_MAPID_MAPID_SHIFT 1
+#define NAN_MAPID_SPECIFIC_MAP(_mapid) ((_mapid) & NAN_MAPID_SPECIFIC_MAP_MASK)
+#define NAN_MAPID_ALL_MAPS(_mapid) (!NAN_MAPID_SPECIFIC_MAP(_mapid))
+#define NAN_MAPID_MAPID(_mapid) (((_mapid) & NAN_MAPID_MAPID_MASK) \
+ >> NAN_MAPID_MAPID_SHIFT)
+#define NAN_MAPID_SET_SPECIFIC_MAPID(map_id) ((((map_id) << NAN_MAPID_MAPID_SHIFT) \
+ & NAN_MAPID_MAPID_MASK) | NAN_MAPID_SPECIFIC_MAP_MASK)
+
+/* Vendor-specific public action frame for NAN */
+typedef BWL_PRE_PACKED_STRUCT struct nan_pub_act_frame_s {
+ /* NAN_PUB_AF_CATEGORY 0x04 */
+ uint8 category_id;
+ /* NAN_PUB_AF_ACTION 0x09 */
+ uint8 action_field;
+ /* NAN_OUI 0x50-6F-9A */
+ uint8 oui[DOT11_OUI_LEN];
+ /* NAN_OUI_TYPE 0x13 */
+ uint8 oui_type;
+ /* One or more NAN Attributes follow */
+ uint8 data[];
+} BWL_POST_PACKED_STRUCT nan_pub_act_frame_t;
+
+/* NAN attributes as defined in the nan spec */
+enum {
+ NAN_ATTR_MASTER_IND = 0,
+ NAN_ATTR_CLUSTER = 1,
+ NAN_ATTR_SVC_ID_LIST = 2,
+ NAN_ATTR_SVC_DESCRIPTOR = 3,
+ NAN_ATTR_CONN_CAP = 4,
+ NAN_ATTR_INFRA = 5,
+ NAN_ATTR_P2P = 6,
+ NAN_ATTR_IBSS = 7,
+ NAN_ATTR_MESH = 8,
+ NAN_ATTR_FURTHER_NAN_SD = 9,
+ NAN_ATTR_FURTHER_AVAIL = 10,
+ NAN_ATTR_COUNTRY_CODE = 11,
+ NAN_ATTR_RANGING = 12,
+ NAN_ATTR_CLUSTER_DISC = 13,
+ /* nan 2.0 */
+ NAN_ATTR_SVC_DESC_EXTENSION = 14,
+ NAN_ATTR_NAN_DEV_CAP = 15,
+ NAN_ATTR_NAN_NDP = 16,
+ NAN_ATTR_NAN_NMSG = 17,
+ NAN_ATTR_NAN_AVAIL = 18,
+ NAN_ATTR_NAN_NDC = 19,
+ NAN_ATTR_NAN_NDL = 20,
+ NAN_ATTR_NAN_NDL_QOS = 21,
+ NAN_ATTR_MCAST_SCHED = 22,
+ NAN_ATTR_UNALIGN_SCHED = 23,
+ NAN_ATTR_PAGING_UCAST = 24,
+ NAN_ATTR_PAGING_MCAST = 25,
+ NAN_ATTR_RANGING_INFO = 26,
+ NAN_ATTR_RANGING_SETUP = 27,
+ NAN_ATTR_FTM_RANGE_REPORT = 28,
+ NAN_ATTR_ELEMENT_CONTAINER = 29,
+ NAN_ATTR_WLAN_INFRA_EXT = 30,
+ NAN_ATTR_EXT_P2P_OPER = 31,
+ NAN_ATTR_EXT_IBSS = 32,
+ NAN_ATTR_EXT_MESH = 33,
+ NAN_ATTR_CIPHER_SUITE_INFO = 34,
+ NAN_ATTR_SEC_CTX_ID_INFO = 35,
+ NAN_ATTR_SHARED_KEY_DESC = 36,
+ NAN_ATTR_MCAST_SCHED_CHANGE = 37,
+ NAN_ATTR_MCAST_SCHED_OWNER_CHANGE = 38,
+ NAN_ATTR_PUBLIC_AVAILABILITY = 39,
+ NAN_ATTR_SUB_SVC_ID_LIST = 40,
+ NAN_ATTR_NDPE = 41,
+ /* change NAN_ATTR_MAX_ID to max ids + 1, excluding NAN_ATTR_VENDOR_SPECIFIC.
+ * This is used in nan_parse.c
+ */
+ NAN_ATTR_MAX_ID = NAN_ATTR_NDPE + 1,
+
+ NAN_ATTR_VENDOR_SPECIFIC = 221
+};
+
+enum wifi_nan_avail_resolution {
+ NAN_AVAIL_RES_16_TU = 0,
+ NAN_AVAIL_RES_32_TU = 1,
+ NAN_AVAIL_RES_64_TU = 2,
+ NAN_AVAIL_RES_INVALID = 255
+};
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ie_s {
+ uint8 id; /* IE ID: NAN_IE_ID 0xDD */
+ uint8 len; /* IE length */
+ uint8 oui[DOT11_OUI_LEN]; /* NAN_OUI 50:6F:9A */
+ uint8 oui_type; /* NAN_OUI_TYPE 0x13 */
+ uint8 attr[]; /* var len attributes */
+} BWL_POST_PACKED_STRUCT wifi_nan_ie_t;
+
+#define NAN_IE_HDR_SIZE (OFFSETOF(wifi_nan_ie_t, attr))
+
+/* master indication record */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_master_ind_attr_s {
+ uint8 id;
+ uint16 len;
+ uint8 master_preference;
+ uint8 random_factor;
+} BWL_POST_PACKED_STRUCT wifi_nan_master_ind_attr_t;
+
+/* cluster attr record */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_cluster_attr_s {
+ uint8 id;
+ uint16 len;
+ uint8 amr[NAN_MASTER_RANK_LEN];
+ uint8 hop_count;
+ /* Anchor Master Beacon Transmission Time */
+ uint32 ambtt;
+} BWL_POST_PACKED_STRUCT wifi_nan_cluster_attr_t;
+
+/* container for service ID records */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_id_attr_s {
+ uint8 id;
+ uint16 len;
+ uint8 svcid[0]; /* 6*len of srvc IDs */
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_id_attr_t;
+
+/* service_control bitmap for wifi_nan_svc_descriptor_attr_t below */
+#define NAN_SC_PUBLISH 0x0
+#define NAN_SC_SUBSCRIBE 0x1
+#define NAN_SC_FOLLOWUP 0x2
+/* Set to 1 if a Matching Filter field is included in descriptors. */
+#define NAN_SC_MATCHING_FILTER_PRESENT 0x4
+/* Set to 1 if a Service Response Filter field is included in descriptors. */
+#define NAN_SC_SR_FILTER_PRESENT 0x8
+/* Set to 1 if a Service Info field is included in descriptors. */
+#define NAN_SC_SVC_INFO_PRESENT 0x10
+/* range is close proximity only */
+#define NAN_SC_RANGE_LIMITED 0x20
+/* Set to 1 if binding bitamp is present in descriptors */
+#define NAN_SC_BINDING_BITMAP_PRESENT 0x40
+
+/* Service descriptor */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_descriptor_attr_s {
+ /* Attribute ID - 0x03. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* Hash of the Service Name */
+ uint8 svc_hash[NAN_SVC_HASH_LEN];
+ /* Publish or subscribe instance id */
+ uint8 instance_id;
+ /* Requestor Instance ID */
+ uint8 requestor_id;
+ /* Service Control Bitmask. Also determines what data follows. */
+ uint8 svc_control;
+ /* Optional fields follow */
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_descriptor_attr_t;
+
+/* IBSS attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s {
+ /* Attribute ID - 0x07. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* BSSID of the ibss */
+ struct ether_addr bssid;
+ /*
+ map control:, bits:
+ [0-3]: Id for associated further avail map attribute
+ [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved
+ [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf?
+ [7] : reserved
+ */
+ uint8 map_ctrl;
+ /* avail. intervals bitmap, var len */
+ uint8 avail_bmp[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t;
+
+/* Country code attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_country_code_attr_s {
+ /* Attribute ID - 0x0B. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* Condensed Country String first two octets */
+ uint8 country_str[2];
+} BWL_POST_PACKED_STRUCT wifi_nan_country_code_attr_t;
+
+/* Further Availability MAP attr */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s {
+ /* Attribute ID - 0x0A. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* MAP id: val [0..15], values[16-255] reserved */
+ uint8 map_id;
+ /* availibility entry, var len */
+ uint8 avil_entry[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_favail_attr_t;
+
+/* Further Availability MAP attr */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_s {
+ /*
+ entry control
+ [0-1]: avail interval duration: 0:16ms; 1:32ms; 2:64ms;
+ [2:7] reserved
+ */
+ uint8 entry_ctrl;
+ /* operating class: freq band etc IEEE 802.11 */
+ uint8 opclass;
+ /* channel number */
+ uint8 chan;
+ /* avail bmp, var len */
+ uint8 avail_bmp[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_t;
+
+/* Map control Field */
+#define NAN_MAPCTRL_IDMASK 0x7
+#define NAN_MAPCTRL_DURSHIFT 4
+#define NAN_MAPCTRL_DURMASK 0x30
+#define NAN_MAPCTRL_REPEAT 0x40
+#define NAN_MAPCTRL_REPEATSHIFT 6
+
+#define NAN_VENDOR_TYPE_RTT 0
+#define NAN_VENDOR_TYPE_P2P 1
+
+/* Vendor Specific Attribute - old definition */
+/* TODO remove */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vendor_attr_s {
+ uint8 id; /* 0xDD */
+ uint16 len; /* IE length */
+ uint8 oui[DOT11_OUI_LEN]; /* 00-90-4C */
+ uint8 type; /* attribute type */
+ uint8 attr[1]; /* var len attributes */
+} BWL_POST_PACKED_STRUCT wifi_nan_vendor_attr_t;
+
+#define NAN_VENDOR_HDR_SIZE (OFFSETOF(wifi_nan_vendor_attr_t, attr))
+
+/* vendor specific attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vndr_attr_s {
+ uint8 id; /* 0xDD */
+ uint16 len; /* length of following fields */
+ uint8 oui[DOT11_OUI_LEN]; /* vendor specific OUI */
+ uint8 body[];
+} BWL_POST_PACKED_STRUCT wifi_nan_vndr_attr_t;
+
+/* p2p operation attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_p2p_op_attr_s {
+ /* Attribute ID - 0x06. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* P2P device role */
+ uint8 dev_role;
+ /* BSSID of the ibss */
+ struct ether_addr p2p_dev_addr;
+ /*
+ map control:, bits:
+ [0-3]: Id for associated further avail map attribute
+ [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved
+ [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf?
+ [7] : reserved
+ */
+ uint8 map_ctrl;
+ /* avail. intervals bitmap */
+ uint8 avail_bmp[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_p2p_op_attr_t;
+
+/* ranging attribute */
+#define NAN_RANGING_MAP_CTRL_ID_SHIFT 0
+#define NAN_RANGING_MAP_CTRL_ID_MASK 0x0F
+#define NAN_RANGING_MAP_CTRL_DUR_SHIFT 4
+#define NAN_RANGING_MAP_CTRL_DUR_MASK 0x30
+#define NAN_RANGING_MAP_CTRL_REPEAT_SHIFT 6
+#define NAN_RANGING_MAP_CTRL_REPEAT_MASK 0x40
+#define NAN_RANGING_MAP_CTRL_REPEAT_DW(_ctrl) (((_ctrl) & \
+ NAN_RANGING_MAP_CTRL_DUR_MASK) ? 16 : 1)
+#define NAN_RANGING_MAP_CTRL(_id, _dur, _repeat) (\
+ (((_id) << NAN_RANGING_MAP_CTRL_ID_SHIFT) & \
+ NAN_RANGING_MAP_CTRL_ID_MASK) | \
+ (((_dur) << NAN_RANGING_MAP_CTRL_DUR_SHIFT) & \
+ NAN_RANGING_MAP_CTRL_DUR_MASK) | \
+ (((_repeat) << NAN_RANGING_MAP_CTRL_REPEAT_SHIFT) & \
+ NAN_RANGING_MAP_CTRL_REPEAT_MASK))
+
+enum {
+ NAN_RANGING_PROTO_FTM = 0
+};
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_attr_s {
+ uint8 id; /* 0x0C */
+ uint16 len; /* length that follows */
+ struct ether_addr dev_addr; /* device mac address */
+
+ /*
+ map control:, bits:
+ [0-3]: Id for associated further avail map attribute
+ [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved
+ [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf?
+ [7] : reserved
+ */
+ uint8 map_ctrl;
+
+ uint8 protocol; /* FTM = 0 */
+ uint32 avail_bmp; /* avail interval bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_attr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_info_attr_s {
+ uint8 id; /* 0x1A */
+ uint16 len; /* length that follows */
+ /*
+ location info availability bit map
+ 0: LCI Local Coordinates
+ 1: Geospatial LCI WGS84
+ 2: Civi Location
+ 3: Last Movement Indication
+ [4-7]: reserved
+ */
+ uint8 lc_info_avail;
+ /*
+ Last movement indication
+ present if bit 3 is set in lc_info_avail
+ cluster TSF[29:14] at the last detected platform movement
+ */
+ uint16 last_movement;
+
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_info_attr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_hdr_s {
+ uint8 id; /* 0x1B */
+ uint16 len; /* length that follows */
+ uint8 dialog_token; /* Identify req and resp */
+ uint8 type_status; /* bits 0-3 type, 4-7 status */
+ /* reason code
+ i. when frm type = response & status = reject
+ ii. frm type = termination
+ */
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_s {
+
+ wifi_nan_ranging_setup_attr_hdr_t setup_attr_hdr;
+ /* Below fields not required when frm type = termination */
+ uint8 ranging_ctrl; /* Bit 0: ranging report required or not */
+ uint8 ftm_params[3];
+ uint8 data[]; /* schedule entry list */
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_t;
+
+#define NAN_RANGE_SETUP_ATTR_OFFSET_TBM_INFO (OFFSETOF(wifi_nan_ranging_setup_attr_t, data))
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_report_attr_s {
+ uint8 id; /* 0x1C */
+ uint16 len; /* length that follows */
+ /* FTM report format in spec.
+ See definition in 9.4.2.22.18 in 802.11mc D5.0
+ */
+ uint8 entry_count;
+ uint8 data[]; /* Variable size range entry */
+ /*
+ dot11_ftm_range_entry_t entries[entry_count];
+ uint8 error_count;
+ dot11_ftm_error_entry_t errors[error_count];
+ */
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_report_attr_t;
+
+/* Ranging control flags */
+#define NAN_RNG_REPORT_REQUIRED 0x01
+#define NAN_RNG_FTM_PARAMS_PRESENT 0x02
+#define NAN_RNG_SCHED_ENTRY_PRESENT 0X04
+
+/* Location info flags */
+#define NAN_RNG_LOCATION_FLAGS_LOCAL_CORD 0x1
+#define NAN_RNG_LOCATION_FLAGS_GEO_SPATIAL 0x2
+#define NAN_RNG_LOCATION_FLAGS_CIVIC 0x4
+#define NAN_RNG_LOCATION_FLAGS_LAST_MVMT 0x8
+
+/* Last movement mask and shift value */
+#define NAN_RNG_LOCATION_MASK_LAST_MVT_TSF 0x3FFFC000
+#define NAN_RNG_LOCATION_SHIFT_LAST_MVT_TSF 14
+
+/* FTM params shift values */
+#define NAN_FTM_MAX_BURST_DUR_SHIFT 0
+#define NAN_FTM_MIN_FTM_DELTA_SHIFT 4
+#define NAN_FTM_NUM_FTM_SHIFT 10
+#define NAN_FTM_FORMAT_BW_SHIFT 15
+
+/* FTM params mask */
+#define NAN_FTM_MAX_BURST_DUR_MASK 0x00000F
+#define NAN_FTM_MIN_FTM_DELTA_MASK 0x00003F
+#define NAN_FTM_NUM_FTM_MASK 0x00001F
+#define NAN_FTM_FORMAT_BW_MASK 0x00003F
+
+#define FTM_PARAMS_BURSTTMO_FACTOR 250
+
+/* set to value to uint32 */
+#define NAN_FTM_SET_BURST_DUR(ftm, dur) (ftm |= (((dur + 2) & NAN_FTM_MAX_BURST_DUR_MASK) <<\
+ NAN_FTM_MAX_BURST_DUR_SHIFT))
+#define NAN_FTM_SET_FTM_DELTA(ftm, delta) (ftm |= (((delta/100) & NAN_FTM_MIN_FTM_DELTA_MASK) <<\
+ NAN_FTM_MIN_FTM_DELTA_SHIFT))
+#define NAN_FTM_SET_NUM_FTM(ftm, delta) (ftm |= ((delta & NAN_FTM_NUM_FTM_MASK) <<\
+ NAN_FTM_NUM_FTM_SHIFT))
+#define NAN_FTM_SET_FORMAT_BW(ftm, delta) (ftm |= ((delta & NAN_FTM_FORMAT_BW_MASK) <<\
+ NAN_FTM_FORMAT_BW_SHIFT))
+/* set uint32 to attribute */
+#define NAN_FTM_PARAMS_UINT32_TO_ATTR(ftm_u32, ftm_attr) {ftm_attr[0] = ftm_u32 & 0xFF; \
+ ftm_attr[1] = (ftm_u32 >> 8) & 0xFF; ftm_attr[2] = (ftm_u32 >> 16) & 0xFF;}
+
+/* get atrribute to uint32 */
+#define NAN_FTM_PARAMS_ATTR_TO_UINT32(ftm_p, ftm_u32) (ftm_u32 = ftm_p[0] | ftm_p[1] << 8 | \
+ ftm_p[2] << 16)
+/* get param values from uint32 */
+#define NAN_FTM_GET_BURST_DUR(ftm) (((ftm >> NAN_FTM_MAX_BURST_DUR_SHIFT) &\
+ NAN_FTM_MAX_BURST_DUR_MASK))
+#define NAN_FTM_GET_BURST_DUR_USEC(_val) ((1 << ((_val)-2)) * FTM_PARAMS_BURSTTMO_FACTOR)
+#define NAN_FTM_GET_FTM_DELTA(ftm) (((ftm >> NAN_FTM_MIN_FTM_DELTA_SHIFT) &\
+ NAN_FTM_MIN_FTM_DELTA_MASK)*100)
+#define NAN_FTM_GET_NUM_FTM(ftm) ((ftm >> NAN_FTM_NUM_FTM_SHIFT) &\
+ NAN_FTM_NUM_FTM_MASK)
+#define NAN_FTM_GET_FORMAT_BW(ftm) ((ftm >> NAN_FTM_FORMAT_BW_SHIFT) &\
+ NAN_FTM_FORMAT_BW_MASK)
+
+#define NAN_CONN_CAPABILITY_WFD 0x0001
+#define NAN_CONN_CAPABILITY_WFDS 0x0002
+#define NAN_CONN_CAPABILITY_TDLS 0x0004
+#define NAN_CONN_CAPABILITY_INFRA 0x0008
+#define NAN_CONN_CAPABILITY_IBSS 0x0010
+#define NAN_CONN_CAPABILITY_MESH 0x0020
+
+#define NAN_DEFAULT_MAP_ID 0 /* nan default map id */
+#define NAN_DEFAULT_MAP_CTRL 0 /* nan default map control */
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_conn_cap_attr_s {
+ /* Attribute ID - 0x04. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ uint16 conn_cap_bmp; /* Connection capability bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_conn_cap_attr_t;
+
+/* NAN Element container Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_container_attr_s {
+ uint8 id; /* id - 0x20 */
+ uint16 len; /* Total length of following IEs */
+ uint8 map_id; /* map id */
+ uint8 data[1]; /* Data pointing to one or more IEs */
+} BWL_POST_PACKED_STRUCT wifi_nan_container_attr_t;
+
+/* NAN 2.0 NAN avail attribute */
+
+/* Availability Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_attr_s {
+ uint8 id; /* id - 0x12 */
+ uint16 len; /* total length */
+ uint8 seqid; /* sequence id */
+ uint16 ctrl; /* attribute control */
+ uint8 entry[1]; /* availability entry list */
+} BWL_POST_PACKED_STRUCT wifi_nan_avail_attr_t;
+
+/* for processing/building time bitmap info in nan_avail_entry */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_time_bitmap_s {
+ uint16 ctrl; /* Time bitmap control */
+ uint8 len; /* Time bitmap length */
+ uint8 bitmap[]; /* Time bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_time_bitmap_t;
+
+/* Availability Entry format */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_attr_s {
+ uint16 len; /* Length */
+ uint16 entry_cntrl; /* Entry Control */
+ uint8 var[]; /* Time bitmap and channel entry list */
+} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_attr_t;
+
+/* FAC Channel Entry (section 10.7.19.1.5) */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_chan_entry_s {
+ uint8 oper_class; /* Operating Class */
+ uint16 chan_bitmap; /* Channel Bitmap */
+ uint8 primary_chan_bmp; /* Primary Channel Bitmap */
+ uint8 aux_chan[0]; /* Auxiliary Channel bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_chan_entry_t;
+
+/* Channel entry */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_s {
+ uint8 opclass; /* Operating class */
+ uint16 chan_bitmap; /* Channel bitmap */
+ uint8 prim_bitmap; /* Primary channel bitmap */
+ uint16 aux_bitmap; /* Time bitmap length */
+} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_t;
+
+/* Type of Availability: committed */
+#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL_MASK 0x1
+/* Type of Availability: potential */
+#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL_MASK 0x2
+/* Type of Availability: conditional */
+#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL_MASK 0x4
+
+#define NAN_AVAIL_CTRL_MAP_ID_MASK 0x000F
+#define NAN_AVAIL_CTRL_MAP_ID(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MAP_ID_MASK)
+#define NAN_AVAIL_CTRL_COMM_CHANGED_MASK 0x0010
+#define NAN_AVAIL_CTRL_COMM_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_COMM_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_POTEN_CHANGED_MASK 0x0020
+#define NAN_AVAIL_CTRL_POTEN_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_POTEN_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK 0x0040
+#define NAN_AVAIL_CTRL_PUBLIC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_NDC_CHANGED_MASK 0x0080
+#define NAN_AVAIL_CTRL_NDC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_NDC_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_MCAST_CHANGED_MASK 0x0100
+#define NAN_AVAIL_CTRL_MCAST_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK 0x0200
+#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_CHANGED_FLAGS_MASK 0x03f0
+
+#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK 0x07
+#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE(_flags) ((_flags) & NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK)
+#define NAN_AVAIL_ENTRY_CTRL_USAGE_MASK 0x18
+#define NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT 3
+#define NAN_AVAIL_ENTRY_CTRL_USAGE(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_USAGE_MASK) \
+ >> NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0xE0
+#define NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT 5
+#define NAN_AVAIL_ENTRY_CTRL_UTIL(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_UTIL_MASK) \
+ >> NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK 0xF00
+#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT 8
+#define NAN_AVAIL_ENTRY_CTRL_RX_NSS(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK) \
+ >> NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK 0x1000
+#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT 12
+#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT(_flags) (((_flags) & \
+ NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK) >> NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_TIME_BITMAP_PRESENT 1
+#define NAN_AVAIL_ENTRY_CTRL_USAGE_PREFERENCE 0x3
+
+#define NAN_TIME_BMAP_CTRL_BITDUR_MASK 0x07
+#define NAN_TIME_BMAP_CTRL_BITDUR(_flags) ((_flags) & NAN_TIME_BMAP_CTRL_BITDUR_MASK)
+#define NAN_TIME_BMAP_CTRL_PERIOD_MASK 0x38
+#define NAN_TIME_BMAP_CTRL_PERIOD_SHIFT 3
+#define NAN_TIME_BMAP_CTRL_PERIOD(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_PERIOD_MASK) \
+ >> NAN_TIME_BMAP_CTRL_PERIOD_SHIFT)
+#define NAN_TIME_BMAP_CTRL_OFFSET_MASK 0x7FC0
+#define NAN_TIME_BMAP_CTRL_OFFSET_SHIFT 6
+#define NAN_TIME_BMAP_CTRL_OFFSET(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_OFFSET_MASK) \
+ >> NAN_TIME_BMAP_CTRL_OFFSET_SHIFT)
+#define NAN_TIME_BMAP_LEN(avail_entry) \
+ (*(uint8 *)(((wifi_nan_avail_entry_attr_t *)avail_entry)->var + 2))
+
+#define NAN_AVAIL_CHAN_LIST_HDR_LEN 1
+#define NAN_AVAIL_CHAN_LIST_TYPE_BAND 0x00
+#define NAN_AVAIL_CHAN_LIST_TYPE_CHANNEL 0x01
+#define NAN_AVAIL_CHAN_LIST_NON_CONTIG_BW 0x02
+#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK 0xF0
+#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT 4
+#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES(_ctrl) (((_ctrl) & NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK) \
+ >> NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT)
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_list_s {
+ uint8 chan_info;
+ uint8 var[0];
+} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_list_t;
+
+/* define for chan_info */
+#define NAN_CHAN_OP_CLASS_MASK 0x01
+#define NAN_CHAN_NON_CONT_BW_MASK 0x02
+#define NAN_CHAN_RSVD_MASK 0x03
+#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s {
+ uint8 band[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t;
+
+/* Type of Availability: committed */
+#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL 0x1
+/* Type of Availability: potential */
+#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL 0x2
+/* Type of Availability: conditional */
+#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL 0x4
+/* Committed + Potential */
+#define NAN_ENTRY_CNTRL_TYPE_COMM_POTEN \
+ (NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL)
+/* Conditional + Potential */
+#define NAN_ENTRY_CNTRL_TYPE_COND_POTEN \
+ (NAN_ENTRY_CNTRL_TYPE_COND_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL)
+
+/* Type of Availability */
+#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_MASK 0x07
+#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_SHIFT 0
+/* Usage Preference */
+#define NAN_ENTRY_CNTRL_USAGE_PREF_MASK 0x18
+#define NAN_ENTRY_CNTRL_USAGE_PREF_SHIFT 3
+/* Utilization */
+#define NAN_ENTRY_CNTRL_UTIL_MASK 0x1E0
+#define NAN_ENTRY_CNTRL_UTIL_SHIFT 5
+
+/* Time Bitmap Control field (section 5.7.18.2.3) */
+
+/* Reserved */
+#define NAN_TIME_BMP_CNTRL_RSVD_MASK 0x01
+#define NAN_TIME_BMP_CNTRL_RSVD_SHIFT 0
+/* Bitmap Len */
+#define NAN_TIME_BMP_CNTRL_BMP_LEN_MASK 0x7E
+#define NAN_TIME_BMP_CNTRL_BMP_LEN_SHIFT 1
+/* Bit Duration */
+#define NAN_TIME_BMP_CNTRL_BIT_DUR_MASK 0x380
+#define NAN_TIME_BMP_CNTRL_BIT_DUR_SHIFT 7
+/* Bitmap Len */
+#define NAN_TIME_BMP_CNTRL_PERIOD_MASK 0x1C00
+#define NAN_TIME_BMP_CNTRL_PERIOD_SHIFT 10
+/* Start Offset */
+#define NAN_TIME_BMP_CNTRL_START_OFFSET_MASK 0x3FE000
+#define NAN_TIME_BMP_CNTRL_START_OFFSET_SHIFT 13
+/* Reserved */
+#define NAN_TIME_BMP_CNTRL_RESERVED_MASK 0xC00000
+#define NAN_TIME_BMP_CNTRL_RESERVED_SHIFT 22
+
+/* Time Bitmap Control field: Period */
+typedef enum
+{
+ NAN_TIME_BMP_CTRL_PERIOD_128TU = 1,
+ NAN_TIME_BMP_CTRL_PERIOD_256TU = 2,
+ NAN_TIME_BMP_CTRL_PERIOD_512TU = 3,
+ NAN_TIME_BMP_CTRL_PERIOD_1024TU = 4,
+ NAN_TIME_BMP_CTRL_PERIOD_2048U = 5,
+ NAN_TIME_BMP_CTRL_PERIOD_4096U = 6,
+ NAN_TIME_BMP_CTRL_PERIOD_8192U = 7
+} nan_time_bmp_ctrl_repeat_interval_t;
+
+enum
+{
+ NAN_TIME_BMP_BIT_DUR_16TU_IDX = 0,
+ NAN_TIME_BMP_BIT_DUR_32TU_IDX = 1,
+ NAN_TIME_BMP_BIT_DUR_64TU_IDX = 2,
+ NAN_TIME_BMP_BIT_DUR_128TU_IDX = 3
+};
+
+enum
+{
+ NAN_TIME_BMP_BIT_DUR_IDX_0 = 16,
+ NAN_TIME_BMP_BIT_DUR_IDX_1 = 32,
+ NAN_TIME_BMP_BIT_DUR_IDX_2 = 64,
+ NAN_TIME_BMP_BIT_DUR_IDX_3 = 128
+};
+
+enum
+{
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_1 = 128,
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_2 = 256,
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_3 = 512,
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_4 = 1024,
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_5 = 2048,
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_6 = 4096,
+ NAN_TIME_BMP_CTRL_PERIOD_IDX_7 = 8192
+};
+
+/* Channel Entries List field */
+
+/* Type */
+#define NAN_CHAN_ENTRY_TYPE_MASK 0x01
+#define NAN_CHAN_ENTRY_TYPE_SHIFT 0
+/* Channel Entry Length Indication */
+#define NAN_CHAN_ENTRY_LEN_IND_MASK 0x02
+#define NAN_CHAN_ENTRY_LEN_IND_SHIFT 1
+/* Reserved */
+#define NAN_CHAN_ENTRY_RESERVED_MASK 0x0C
+#define NAN_CHAN_ENTRY_RESERVED_SHIFT 2
+/* Number of FAC Band or Channel Entries */
+#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_MASK 0xF0
+#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_SHIFT 4
+
+#define NAN_CHAN_ENTRY_TYPE_BANDS 0
+#define NAN_CHAN_ENTRY_TYPE_OPCLASS_CHANS 1
+
+#define NAN_CHAN_ENTRY_BW_LT_80MHZ 0
+#define NAN_CHAN_ENTRY_BW_EQ_160MHZ 1
+
+/*
+ * NDL Attribute WFA Tech. Spec ver 1.0.r12 (section 10.7.19.2)
+ */
+#define NDL_ATTR_IM_MAP_ID_LEN 1
+#define NDL_ATTR_IM_TIME_BMP_CTRL_LEN 2
+#define NDL_ATTR_IM_TIME_BMP_LEN_LEN 1
+
+/*
+ * NDL Control field - Table xx
+ */
+#define NDL_ATTR_CTRL_PEER_ID_PRESENT_MASK 0x01
+#define NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT 0
+#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_MASK 0x02
+#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT 1
+#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_MASK 0x04
+#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT 2
+#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_MASK 0x08
+#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT 3
+#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_MASK 0x10 /* max idle period */
+#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT 4
+#define NDL_ATTR_CTRL_NDL_TYPE_MASK 0x20 /* NDL type */
+#define NDL_ATTR_CTRL_NDL_TYPE_SHIFT 5
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_MASK 0xC0 /* NDL Setup Reason */
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_SHIFT 6
+
+/* NDL setup Reason */
+#define NDL_ATTR_CTRL_NDL_TYPE_S_NDL 0x0 /* S-NDL */
+#define NDL_ATTR_CTRL_NDL_TYPE_P_NDL 0x1 /* P-NDL */
+
+/* NDL setup Reason */
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_NDP_RANG 0x0 /* NDP or Ranging */
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_FSD_GAS 0x1 /* FSD using GAS */
+
+#define NAN_NDL_TYPE_MASK 0x0F
+#define NDL_ATTR_TYPE_STATUS_REQUEST 0x00
+#define NDL_ATTR_TYPE_STATUS_RESPONSE 0x01
+#define NDL_ATTR_TYPE_STATUS_CONFIRM 0x02
+#define NDL_ATTR_TYPE_STATUS_CONTINUED 0x00
+#define NDL_ATTR_TYPE_STATUS_ACCEPTED 0x10
+#define NDL_ATTR_TYPE_STATUS_REJECTED 0x20
+
+#define NAN_NDL_TYPE_CHECK(_ndl, x) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == (x))
+#define NAN_NDL_REQUEST(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
+ NDL_ATTR_TYPE_STATUS_REQUEST)
+#define NAN_NDL_RESPONSE(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
+ NDL_ATTR_TYPE_STATUS_RESPONSE)
+#define NAN_NDL_CONFIRM(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
+ NDL_ATTR_TYPE_STATUS_CONFIRM)
+
+#define NAN_NDL_STATUS_SHIFT 4
+#define NAN_NDL_STATUS_MASK 0xF0
+#define NAN_NDL_CONT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
+ NDL_ATTR_TYPE_STATUS_CONTINUED)
+#define NAN_NDL_ACCEPT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
+ NDL_ATTR_TYPE_STATUS_ACCEPTED)
+#define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
+ NDL_ATTR_TYPE_STATUS_REJECTED)
+#define NAN_NDL_FRM_STATUS(_ndl) \
+ (((_ndl)->type_status & NAN_NDL_STATUS_MASK) >> NAN_NDL_STATUS_SHIFT)
+
+#define NDL_ATTR_CTRL_NONE 0
+#define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_IMSCHED_PRESENT (1 << NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_NDC_PRESENT (1 << NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_NDL_QOS_PRESENT (1 << NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT (1 << NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT)
+
+#define NA_NDL_IS_IMMUT_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_IMSCHED_PRESENT)
+#define NA_NDL_IS_PEER_ID_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_PEER_ID_PRESENT)
+#define NA_NDL_IS_MAX_IDLE_PER_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT)
+
+#define NDL_ATTR_PEERID_LEN 1
+#define NDL_ATTR_MAX_IDLE_PERIOD_LEN 2
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_attr_s {
+ uint8 id; /* NAN_ATTR_NAN_NDL = 0x17 */
+ uint16 len; /* Length of the fields in the attribute */
+ uint8 dialog_token; /* Identify req and resp */
+ uint8 type_status; /* Bits[3-0] type subfield, Bits[7-4] status subfield */
+ uint8 reason; /* Identifies reject reason */
+ uint8 ndl_ctrl; /* NDL control field */
+ uint8 var[]; /* Optional fields follow */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndl_attr_t;
+
+/*
+ * NDL QoS Attribute WFA Tech. Spec ver r26
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_qos_attr_s {
+ uint8 id; /* NAN_ATTR_NAN_NDL_QOS = 24 */
+ uint16 len; /* Length of the attribute field following */
+ uint8 min_slots; /* Min. number of FAW slots needed per DW interval */
+ uint16 max_latency; /* Max interval between non-cont FAW */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndl_qos_attr_t;
+
+/* no preference to min time slots */
+#define NAN_NDL_QOS_MIN_SLOT_NO_PREF 0
+/* no preference to no. of slots between two non-contiguous slots */
+#define NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF
+
+/* Device Capability Attribute */
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_dev_cap_s {
+ uint8 id; /* 0x0F */
+ uint16 len; /* Length */
+ uint8 map_id; /* map id */
+ uint16 commit_dw_info; /* Committed DW Info */
+ uint8 bands_supported; /* Supported Bands */
+ uint8 op_mode; /* Operation Mode */
+ uint8 num_antennas; /* Bit 0-3 tx, 4-7 rx */
+ uint16 chan_switch_time; /* Max channel switch time in us */
+ uint8 capabilities; /* DFS Master, Extended key id etc */
+} BWL_POST_PACKED_STRUCT wifi_nan_dev_cap_t;
+
+/* map id related */
+
+/* all maps */
+#define NAN_DEV_CAP_ALL_MAPS_FLAG_MASK 0x1 /* nan default map control */
+#define NAN_DEV_CAP_ALL_MAPS_FLAG_SHIFT 0
+/* map id */
+#define NAN_DEV_CAP_MAPID_MASK 0x1E
+#define NAN_DEV_CAP_MAPID_SHIFT 1
+
+/* Awake DW Info field format */
+
+/* 2.4GHz DW */
+#define NAN_DEV_CAP_AWAKE_DW_2G_MASK 0x07
+/* 5GHz DW */
+#define NAN_DEV_CAP_AWAKE_DW_5G_MASK 0x38
+/* Reserved */
+#define NAN_DEV_CAP_AWAKE_DW_RSVD_MASK 0xC0
+
+/* bit shift for dev cap */
+#define NAN_DEV_CAP_AWAKE_DW_2G_SHIFT 0
+#define NAN_DEV_CAP_AWAKE_DW_5G_SHIFT 3
+
+/* Device Capability Attribute Format */
+
+/* Committed DW Info field format */
+/* 2.4GHz DW */
+#define NAN_DEV_CAP_COMMIT_DW_2G_MASK 0x07
+#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_MASK 0x3C0
+/* 5GHz DW */
+#define NAN_DEV_CAP_COMMIT_DW_5G_MASK 0x38
+#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_MASK 0x3C00
+/* Reserved */
+#define NAN_DEV_CAP_COMMIT_DW_RSVD_MASK 0xC000
+/* Committed DW bit shift for dev cap */
+#define NAN_DEV_CAP_COMMIT_DW_2G_SHIFT 0
+#define NAN_DEV_CAP_COMMIT_DW_5G_SHIFT 3
+#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_SHIFT 6
+#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_SHIFT 10
+/* Operation Mode */
+#define NAN_DEV_CAP_OP_PHY_MODE_HT_ONLY 0x00
+#define NAN_DEV_CAP_OP_PHY_MODE_VHT 0x01
+#define NAN_DEV_CAP_OP_PHY_MODE_VHT_8080 0x02
+#define NAN_DEV_CAP_OP_PHY_MODE_VHT_160 0x04
+#define NAN_DEV_CAP_OP_PAGING_NDL 0x08
+
+#define NAN_DEV_CAP_OP_MODE_VHT_MASK 0x01
+#define NAN_DEV_CAP_OP_MODE_VHT_SHIFT 0
+#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK 0x02
+#define NAN_DEV_CAP_OP_MODE_VHT8080_SHIFT 1
+#define NAN_DEV_CAP_OP_MODE_VHT160_MASK 0x04
+#define NAN_DEV_CAP_OP_MODE_VHT160_SHIFT 2
+#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_MASK 0x08
+#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_SHIFT 3
+
+#define NAN_DEV_CAP_RX_ANT_SHIFT 4
+#define NAN_DEV_CAP_TX_ANT_MASK 0x0F
+#define NAN_DEV_CAP_RX_ANT_MASK 0xF0
+#define NAN_DEV_CAP_TX_ANT(_ant) ((_ant) & NAN_DEV_CAP_TX_ANT_MASK)
+#define NAN_DEV_CAP_RX_ANT(_ant) (((_ant) & NAN_DEV_CAP_RX_ANT_MASK) \
+ >> NAN_DEV_CAP_RX_ANT_SHIFT)
+
+/* Device capabilities */
+
+/* DFS master capability */
+#define NAN_DEV_CAP_DFS_MASTER_MASK 0x01
+#define NAN_DEV_CAP_DFS_MASTER_SHIFT 0
+/* extended iv cap */
+#define NAN_DEV_CAP_EXT_KEYID_MASK 0x02
+#define NAN_DEV_CAP_EXT_KEYID_SHIFT 1
+/* NDPE attribute support */
+#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK 0x08
+#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT(_cap) ((_cap) & NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK)
+
+/* Band IDs */
+enum {
+ NAN_BAND_ID_TVWS = 0,
+ NAN_BAND_ID_SIG = 1, /* Sub 1 GHz */
+ NAN_BAND_ID_2G = 2, /* 2.4 GHz */
+ NAN_BAND_ID_3G = 3, /* 3.6 GHz */
+ NAN_BAND_ID_5G = 4, /* 4.9 & 5 GHz */
+ NAN_BAND_ID_60G = 5, /* 60 GHz */
+ NAN_BAND_ID_6G = 6 /* 6 GHz (proprietary) */
+};
+typedef uint8 nan_band_id_t;
+
+/* NAN supported band in device capability */
+#define NAN_DEV_CAP_SUPPORTED_BANDS_2G (1 << NAN_BAND_ID_2G)
+#define NAN_DEV_CAP_SUPPORTED_BANDS_5G (1 << NAN_BAND_ID_5G)
+
+/*
+ * Unaligned schedule attribute section 10.7.19.6 spec. ver r15
+ */
+#define NAN_ULW_ATTR_CTRL_SCHED_ID_MASK 0x000F
+#define NAN_ULW_ATTR_CTRL_SCHED_ID_SHIFT 0
+#define NAN_ULW_ATTR_CTRL_SEQ_ID_MASK 0xFF00
+#define NAN_ULW_ATTR_CTRL_SEQ_ID_SHIFT 8
+
+#define NAN_ULW_OVWR_ALL_MASK 0x01
+#define NAN_ULW_OVWR_ALL_SHIFT 0
+#define NAN_ULW_OVWR_MAP_ID_MASK 0x1E
+#define NAN_ULW_OVWR_MAP_ID_SHIFT 1
+
+#define NAN_ULW_CTRL_TYPE_MASK 0x03
+#define NAN_ULW_CTRL_TYPE_SHIFT 0
+#define NAN_ULW_CTRL_TYPE(ctrl) (ctrl & NAN_ULW_CTRL_TYPE_MASK)
+#define NAN_ULW_CTRL_CHAN_AVAIL_MASK 0x04
+#define NAN_ULW_CTRL_CHAN_AVAIL_SHIFT 2
+#define NAN_ULW_CTRL_CHAN_AVAIL(ctrl) ((ctrl & NAN_ULW_CTRL_CHAN_AVAIL_MASK) \
+ >> NAN_ULW_CTRL_CHAN_AVAIL_SHIFT)
+#define NAN_ULW_CTRL_RX_NSS_MASK 0x78
+#define NAN_ULW_CTRL_RX_NSS_SHIFT 3
+
+#define NAN_ULW_CTRL_TYPE_BAND 0
+#define NAN_ULW_CTRL_TYPE_CHAN_NOAUX 1
+#define NAN_ULW_CTRL_TYPE_CHAN_AUX 2
+
+#define NAN_ULW_CNT_DOWN_NO_EXPIRE 0xFF /* ULWs doen't end until next sched update */
+#define NAN_ULW_CNT_DOWN_CANCEL 0x0 /* cancel remaining ulws */
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ulw_attr_s {
+ uint8 id;
+ uint16 len;
+ uint16 ctrl;
+ uint32 start; /* low 32 bits of tsf */
+ uint32 dur;
+ uint32 period;
+ uint8 count_down;
+ uint8 overwrite;
+ /*
+ * ulw[0] == optional field ULW control when present.
+ * band ID or channel follows
+ */
+ uint8 ulw_entry[];
+} BWL_POST_PACKED_STRUCT wifi_nan_ulw_attr_t;
+
+/* NAN2 Management Frame (section 5.6) */
+
+/* Public action frame for NAN2 */
+typedef BWL_PRE_PACKED_STRUCT struct nan2_pub_act_frame_s {
+ /* NAN_PUB_AF_CATEGORY 0x04 */
+ uint8 category_id;
+ /* NAN_PUB_AF_ACTION 0x09 */
+ uint8 action_field;
+ /* NAN_OUI 0x50-6F-9A */
+ uint8 oui[DOT11_OUI_LEN];
+ /* NAN_OUI_TYPE TBD */
+ uint8 oui_type;
+ /* NAN_OUI_SUB_TYPE TBD */
+ uint8 oui_sub_type;
+ /* One or more NAN Attributes follow */
+ uint8 data[];
+} BWL_POST_PACKED_STRUCT nan2_pub_act_frame_t;
+
+#define NAN2_PUB_ACT_FRM_SIZE (OFFSETOF(nan2_pub_act_frame_t, data))
+
+/* NAN Action Frame Subtypes */
+/* Subtype-0 is Reserved */
+#define NAN_MGMT_FRM_SUBTYPE_RESERVED 0
+#define NAN_MGMT_FRM_SUBTYPE_INVALID 0
+/* NAN Ranging Request */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_REQ 1
+/* NAN Ranging Response */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_RESP 2
+/* NAN Ranging Termination */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_TERM 3
+/* NAN Ranging Report */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_RPT 4
+/* NDP Request */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_REQ 5
+/* NDP Response */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_RESP 6
+/* NDP Confirm */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_CONFIRM 7
+/* NDP Key Installment */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_KEY_INST 8
+/* NDP Termination */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_END 9
+/* Schedule Request */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_REQ 10
+/* Schedule Response */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_RESP 11
+/* Schedule Confirm */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_CONF 12
+/* Schedule Update */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13
+
+/* Vendor specific NAN OOB AF subtype */
+#define NAN_MGMT_FRM_SUBTYPE_NAN_OOB_AF 0xDD
+
+#define NAN_SCHEDULE_AF(_naf_subtype) \
+ ((_naf_subtype >= NAN_MGMT_FRM_SUBTYPE_SCHED_REQ) && \
+ (_naf_subtype <= NAN_MGMT_FRM_SUBTYPE_SCHED_UPD))
+
+/* Reason code defines */
+#define NAN_REASON_RESERVED 0x0
+#define NAN_REASON_UNSPECIFIED 0x1
+#define NAN_REASON_RESOURCE_LIMIT 0x2
+#define NAN_REASON_INVALID_PARAMS 0x3
+#define NAN_REASON_FTM_PARAM_INCAP 0x4
+#define NAN_REASON_NO_MOVEMENT 0x5
+#define NAN_REASON_INVALID_AVAIL 0x6
+#define NAN_REASON_IMMUT_UNACCEPT 0x7
+#define NAN_REASON_SEC_POLICY 0x8
+#define NAN_REASON_QOS_UNACCEPT 0x9
+#define NAN_REASON_NDP_REJECT 0xa
+#define NAN_REASON_NDL_UNACCEPTABLE 0xb
+
+/* nan 2.0 qos (not attribute) */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_qos_s {
+ uint8 tid; /* traffic identifier */
+ uint16 pkt_size; /* service data pkt size */
+ uint8 data_rate; /* mean data rate */
+ uint8 svc_interval; /* max service interval */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndp_qos_t;
+
+/* NDP control bitmap defines */
+#define NAN_NDP_CTRL_CONFIRM_REQUIRED 0x01
+#define NAN_NDP_CTRL_SECURTIY_PRESENT 0x04
+#define NAN_NDP_CTRL_PUB_ID_PRESENT 0x08
+#define NAN_NDP_CTRL_RESP_NDI_PRESENT 0x10
+#define NAN_NDP_CTRL_SPEC_INFO_PRESENT 0x20
+#define NAN_NDP_CTRL_RESERVED 0xA0
+
+/* Used for both NDP Attribute and NDPE Attribute, since the structures are identical */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s {
+ uint8 id; /* NDP: 0x10, NDPE: 0x29 */
+ uint16 len; /* length */
+ uint8 dialog_token; /* dialog token */
+ uint8 type_status; /* bits 0-3 type, 4-7 status */
+ uint8 reason; /* reason code */
+ struct ether_addr init_ndi; /* ndp initiator's data interface address */
+ uint8 ndp_id; /* ndp identifier (created by initiator */
+ uint8 control; /* ndp control field */
+ uint8 var[]; /* Optional fields follow */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndp_attr_t;
+/* NDP attribute type and status macros */
+#define NAN_NDP_TYPE_MASK 0x0F
+#define NAN_NDP_TYPE_REQUEST 0x0
+#define NAN_NDP_TYPE_RESPONSE 0x1
+#define NAN_NDP_TYPE_CONFIRM 0x2
+#define NAN_NDP_TYPE_SECURITY 0x3
+#define NAN_NDP_TYPE_TERMINATE 0x4
+#define NAN_NDP_REQUEST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_REQUEST)
+#define NAN_NDP_RESPONSE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_RESPONSE)
+#define NAN_NDP_CONFIRM(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_CONFIRM)
+#define NAN_NDP_SECURITY_INST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \
+ NAN_NDP_TYPE_SECURITY)
+#define NAN_NDP_TERMINATE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \
+ NAN_NDP_TYPE_TERMINATE)
+#define NAN_NDP_STATUS_SHIFT 4
+#define NAN_NDP_STATUS_MASK 0xF0
+#define NAN_NDP_STATUS_CONT (0 << NAN_NDP_STATUS_SHIFT)
+#define NAN_NDP_STATUS_ACCEPT (1 << NAN_NDP_STATUS_SHIFT)
+#define NAN_NDP_STATUS_REJECT (2 << NAN_NDP_STATUS_SHIFT)
+#define NAN_NDP_CONT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == NAN_NDP_STATUS_CONT)
+#define NAN_NDP_ACCEPT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \
+ NAN_NDP_STATUS_ACCEPT)
+#define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \
+ NAN_NDP_STATUS_REJECT)
+
+#define NAN_NDP_FRM_STATUS(_ndp) \
+ (((_ndp)->type_status & NAN_NDP_STATUS_MASK) >> NAN_NDP_STATUS_SHIFT)
+
+/* NDP Setup Status */
+#define NAN_NDP_SETUP_STATUS_OK 1
+#define NAN_NDP_SETUP_STATUS_FAIL 0
+#define NAN_NDP_SETUP_STATUS_REJECT 2
+
+/* NDPE TLV list */
+#define NDPE_TLV_TYPE_IPV6 0x00
+#define NDPE_TLV_TYPE_SVC_INFO 0x01
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndpe_tlv_s {
+ uint8 type; /* Operating Class */
+ uint16 length; /* Channel Bitmap */
+ uint8 data[];
+} BWL_POST_PACKED_STRUCT wifi_nan_ndpe_tlv_t;
+
+/* Rng setup attribute type and status macros */
+#define NAN_RNG_TYPE_MASK 0x0F
+#define NAN_RNG_TYPE_REQUEST 0x0
+#define NAN_RNG_TYPE_RESPONSE 0x1
+#define NAN_RNG_TYPE_TERMINATE 0x2
+
+#define NAN_RNG_STATUS_SHIFT 4
+#define NAN_RNG_STATUS_MASK 0xF0
+#define NAN_RNG_STATUS_ACCEPT (0 << NAN_RNG_STATUS_SHIFT)
+#define NAN_RNG_STATUS_REJECT (1 << NAN_RNG_STATUS_SHIFT)
+
+#define NAN_RNG_ACCEPT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \
+ NAN_RNG_STATUS_ACCEPT)
+#define NAN_RNG_REJECT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \
+ NAN_RNG_STATUS_REJECT)
+
+/* schedule entry */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sched_entry_s {
+ uint8 map_id; /* map id */
+ uint16 tbmp_ctrl; /* time bitmap control */
+ uint8 tbmp_len; /* time bitmap len */
+ uint8 tbmp[]; /* time bitmap - Optional */
+} BWL_POST_PACKED_STRUCT wifi_nan_sched_entry_t;
+
+#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F
+#define NAN_SCHED_ENTRY_MIN_SIZE OFFSETOF(wifi_nan_sched_entry_t, tbmp)
+#define NAN_SCHED_ENTRY_SIZE(_entry) (NAN_SCHED_ENTRY_MIN_SIZE + (_entry)->tbmp_len)
+
+/* for dev cap, element container etc. */
+#define NAN_DEV_ELE_MAPID_CTRL_MASK 0x1
+#define NAN_DEV_ELE_MAPID_CTRL_SHIFT 0
+#define NAN_DEV_ELE_MAPID_MASK 0x1E
+#define NAN_DEV_ELE_MAPID_SHIFT 1
+
+#define NAN_DEV_ELE_MAPID_CTRL_SET(_mapid_field, value) \
+ do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_CTRL_MASK; \
+ (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_CTRL_SHIFT) & \
+ NAN_DEV_ELE_MAPID_CTRL_MASK); \
+ } while (0);
+
+#define NAN_DEV_ELE_MAPID_CTRL_GET(_mapid_field) \
+ (((_mapid_field) & NAN_DEV_ELE_MAPID_CTRL_MASK) >> \
+ NAN_DEV_ELE_MAPID_CTRL_SHIFT)
+
+#define NAN_DEV_ELE_MAPID_SET(_mapid_field, value) \
+ do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_MASK; \
+ (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_SHIFT) & \
+ NAN_DEV_ELE_MAPID_MASK); \
+ } while (0);
+
+#define NAN_DEV_ELE_MAPID_GET(_mapid_field) \
+ (((_mapid_field) & NAN_DEV_ELE_MAPID_MASK) >> \
+ NAN_DEV_ELE_MAPID_SHIFT)
+
+/* schedule entry map id handling */
+#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F
+#define NAN_SCHED_ENTRY_MAPID_SHIFT 0
+
+#define NAN_SCHED_ENTRY_MAPID_SET(_mapid_field, value) \
+ do {(_mapid_field) &= ~NAN_SCHED_ENTRY_MAPID_MASK; \
+ (_mapid_field) |= ((value << NAN_SCHED_ENTRY_MAPID_SHIFT) & \
+ NAN_SCHED_ENTRY_MAPID_MASK); \
+ } while (0);
+
+#define NAN_SCHED_ENTRY_MAPID_GET(_mapid_field) \
+ (((_mapid_field) & NAN_SCHED_ENTRY_MAPID_MASK) >> \
+ NAN_SCHED_ENTRY_MAPID_SHIFT)
+
+/* NDC attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndc_attr_s {
+ uint8 id;
+ uint16 len;
+ uint8 ndc_id[NAN_DATA_NDC_ID_SIZE];
+ uint8 attr_cntrl;
+ uint8 var[];
+} BWL_POST_PACKED_STRUCT wifi_nan_ndc_attr_t;
+
+/* Attribute control subfield of NDC attr */
+/* Proposed NDC */
+#define NAN_NDC_ATTR_PROPOSED_NDC_MASK 0x1
+#define NAN_NDC_ATTR_PROPOSED_NDC_SHIFT 0
+
+/* get & set */
+#define NAN_NDC_GET_PROPOSED_FLAG(_attr) \
+ (((_attr)->attr_cntrl & NAN_NDC_ATTR_PROPOSED_NDC_MASK) >> \
+ NAN_NDC_ATTR_PROPOSED_NDC_SHIFT)
+#define NAN_NDC_SET_PROPOSED_FLAG(_attr, value) \
+ do {((_attr)->attr_cntrl &= ~NAN_NDC_ATTR_PROPOSED_NDC_MASK); \
+ ((_attr)->attr_cntrl |= \
+ (((value) << NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) & NAN_NDC_ATTR_PROPOSED_NDC_MASK)); \
+ } while (0)
+
+/* Service descriptor extension attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_desc_ext_attr_s {
+ /* Attribute ID - 0x11 */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* Instance id of associated service descriptor attribute */
+ uint8 instance_id;
+ /* SDE control field */
+ uint16 control;
+ /* range limit, svc upd indicator etc. */
+ uint8 var[];
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_desc_ext_attr_t;
+
+#define NAN_SDE_ATTR_MIN_LEN OFFSETOF(wifi_nan_svc_desc_ext_attr_t, var)
+#define NAN_SDE_ATTR_RANGE_LEN 4
+#define NAN_SDE_ATTR_SUI_LEN 1
+#define NAN_SDE_ATTR_INFO_LEN_PARAM_LEN 2
+#define NAN_SDE_ATTR_RANGE_INGRESS_LEN 2
+#define NAN_SDE_ATTR_RANGE_EGRESS_LEN 2
+#define NAN_SDE_ATTR_CTRL_LEN 2
+/* max length of variable length field (matching filter, service response filter,
+ * or service info) in service descriptor attribute
+ */
+#define NAN_DISC_SDA_FIELD_MAX_LEN 255
+
+/* SDEA control field bit definitions and access macros */
+#define NAN_SDE_CF_FSD_REQUIRED (1 << 0)
+#define NAN_SDE_CF_FSD_GAS (1 << 1)
+#define NAN_SDE_CF_DP_REQUIRED (1 << 2)
+#define NAN_SDE_CF_DP_TYPE (1 << 3)
+#define NAN_SDE_CF_MULTICAST_TYPE (1 << 4)
+#define NAN_SDE_CF_QOS_REQUIRED (1 << 5)
+#define NAN_SDE_CF_SECURITY_REQUIRED (1 << 6)
+#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7)
+#define NAN_SDE_CF_RANGE_PRESENT (1 << 8)
+#define NAN_SDE_CF_SVC_UPD_IND_PRESENT (1 << 9)
+/* Using Reserved Bits as per Spec */
+#define NAN_SDE_CF_LIFE_CNT_PUB_RX (1 << 15)
+#define NAN_SDE_FSD_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_FSD_REQUIRED)
+#define NAN_SDE_FSD_GAS(_sde) ((_sde)->control & NAN_SDE_CF_FSD_GAS)
+#define NAN_SDE_DP_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_DP_REQUIRED)
+#define NAN_SDE_DP_MULTICAST(_sde) ((_sde)->control & NAN_SDE_CF_DP_TYPE)
+#define NAN_SDE_MULTICAST_M_TO_M(_sde) ((_sde)->control & NAN_SDE_CF_MULTICAST_TYPE)
+#define NAN_SDE_QOS_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_QOS_REQUIRED)
+#define NAN_SDE_SECURITY_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_SECURITY_REQUIRED)
+#define NAN_SDE_RANGING_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_RANGING_REQUIRED)
+#define NAN_SDE_RANGE_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_RANGE_PRESENT)
+#define NAN_SDE_SVC_UPD_IND_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_SVC_UPD_IND_PRESENT)
+#define NAN_SDE_LIFE_COUNT_FOR_PUB_RX(_sde) (_sde & NAN_SDE_CF_LIFE_CNT_PUB_RX)
+
+/* nan2 security */
+
+/*
+ * Cipher suite information Attribute.
+ * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.2)
+ */
+#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_4 0
+#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_16 (1 << 0)
+
+/* enum security algo.
+*/
+enum nan_sec_csid {
+ NAN_SEC_ALGO_NONE = 0,
+ NAN_SEC_ALGO_NCS_SK_CCM_128 = 1, /* CCMP 128 */
+ NAN_SEC_ALGO_NCS_SK_GCM_256 = 2, /* GCMP 256 */
+ NAN_SEC_ALGO_LAST = 3
+};
+typedef int8 nan_sec_csid_e;
+
+/* nan2 cipher suite attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_field_s {
+ uint8 cipher_suite_id;
+ uint8 inst_id; /* Instance Id */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_field_t;
+
+/* nan2 cipher suite information attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_info_attr_s {
+ uint8 attr_id; /* 0x22 - NAN_ATTR_CIPHER_SUITE_INFO */
+ uint16 len;
+ uint8 capabilities;
+ uint8 var[]; /* cipher suite list */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_info_attr_t;
+
+/*
+ * Security context identifier attribute
+ * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.4)
+ */
+
+#define NAN_SEC_CTX_ID_TYPE_PMKID (1 << 0)
+
+/* nan2 security context identifier attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_field_s {
+ uint16 sec_ctx_id_type_len; /* length of security ctx identifier */
+ uint8 sec_ctx_id_type;
+ uint8 inst_id; /* Instance Id */
+ uint8 var[]; /* security ctx identifier */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_field_t;
+
+/* nan2 security context identifier info attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_info_attr_s {
+ uint8 attr_id; /* 0x23 - NAN_ATTR_SEC_CTX_ID_INFO */
+ uint16 len;
+ uint8 var[]; /* security context identifier list */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_info_attr_t;
+
+/*
+ * Nan shared key descriptor attribute
+ * WFA Tech. Spec ver 23
+ */
+
+#define NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN 8
+#define NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN 32
+
+/* nan shared key descriptor attr field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ncssk_key_desc_attr_s {
+ uint8 attr_id; /* 0x24 - NAN_ATTR_SHARED_KEY_DESC */
+ uint16 len;
+ uint8 inst_id; /* Publish service instance ID */
+ uint8 desc_type;
+ uint16 key_info;
+ uint16 key_len;
+ uint8 key_replay_cntr[NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN];
+ uint8 key_nonce[NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN];
+ uint8 reserved[32]; /* EAPOL IV + Key RSC + Rsvd fields in EAPOL Key */
+ uint8 mic[]; /* mic + key data len + key data */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_ncssk_key_desc_attr_t;
+
+/* Key Info fields */
+#define NAN_SEC_NCSSK_DESC_MASK 0x7
+#define NAN_SEC_NCSSK_DESC_SHIFT 0
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK 0x8
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT 3
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK 0x40
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT 6
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_MASK 0x80
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT 7
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_MASK 0x100
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT 8
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_MASK 0x200
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT 9
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_MASK 0x400
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT 10
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_MASK 0x800
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT 11
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK 0x1000
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT 12
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK 0x2000
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT 13
+
+/* Key Info get & set macros */
+#define NAN_SEC_NCSSK_KEY_DESC_VER_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_MASK) >> NAN_SEC_NCSSK_DESC_SHIFT)
+#define NAN_SEC_NCSSK_KEY_DESC_VER_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK) >> NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK) >> \
+ NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ACK_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ACK_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_ACK_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_MIC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_MIC_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_MIC_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SEC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SEC_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_SEC_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ERR_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ERR_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_ERR_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_REQ_MASK) >> NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_REQ_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_REQ_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK) >> \
+ NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_GET(_key_info) \
+ (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK) >> \
+ NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SET(_val, _key_info) \
+ do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK; \
+ (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) & \
+ NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK);} while (0)
+
+#define NAN_SEC_NCSSK_IEEE80211_KDESC_TYPE 2 /* IEEE 802.11 Key Descriptor Type */
+#define NAN_SEC_NCSSK_KEY_DESC_VER 0 /* NCSSK-128/256 */
+#define NAN_SEC_NCSSK_KEY_TYPE_PAIRWISE 1 /* Pairwise */
+#define NAN_SEC_NCSSK_LIFETIME_KDE 7 /* Lifetime KDE type */
+
+/* TODO include MTK related attributes */
+
+/* NAN Multicast service group(NMSG) definitions */
+/* Length of NMSG_ID -- (NDI * 2^16 + pub_id * 2^8 + Random_factor) */
+#define NAN_NMSG_ID_LEN 8
+
+#define NAN_NMSG_TYPE_MASK 0x0F
+#define NMSG_ATTR_TYPE_STATUS_REQUEST 0x00
+#define NMSG_ATTR_TYPE_STATUS_RESPONSE 0x01
+#define NMSG_ATTR_TYPE_STATUS_CONFIRM 0x02
+#define NMSG_ATTR_TYPE_STATUS_SEC_INSTALL 0x03
+#define NMSG_ATTR_TYPE_STATUS_TERMINATE 0x04
+#define NMSG_ATTR_TYPE_STATUS_IMPLICIT_ENROL 0x05
+
+#define NMSG_ATTR_TYPE_STATUS_CONTINUED 0x00
+#define NMSG_ATTR_TYPE_STATUS_ACCEPTED 0x10
+#define NMSG_ATTR_TYPE_STATUS_REJECTED 0x20
+
+#define NMSG_CTRL_PUB_ID_PRESENT 0x0001
+#define NMSG_CTRL_NMSG_ID_PRESENT 0x0002
+#define NMSG_CTRL_SECURITY_PRESENT 0x0004
+#define NMSG_CTRL_MANY_TO_MANY_PRESENT 0x0008
+#define NMSG_CTRL_SVC_INFO_PRESENT 0x0010
+
+/* NMSG attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_nmsg_attr_s {
+ uint8 id; /* Attribute ID - 0x11 */
+ uint16 len; /* Length including pubid, NMSGID and svc info */
+ uint8 dialog_token;
+ uint8 type_status; /* Type and Status field byte */
+ uint8 reason_code;
+ uint8 mc_id; /* Multicast id similar to NDPID */
+ uint8 nmsg_ctrl; /* NMSG control field */
+ /* Optional publish id, NMSGID and svc info are included in var[] */
+ uint8 var[0];
+} BWL_POST_PACKED_STRUCT wifi_nan_nmsg_attr_t;
+
+#define NMSG_ATTR_MCAST_SCHED_MAP_ID_MASK 0x1E
+#define NMSG_ATTR_MCAST_SCHED_MAP_ID_SHIFT 1
+#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_MASK 0x20
+#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_SHIFT 5
+
+/* NAN Multicast Schedule atribute structure */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_mcast_sched_attr_s {
+ uint8 id; /* 0x16 */
+ uint16 len;
+ uint8 nmsg_id[NAN_NMSG_ID_LEN];
+ uint8 attr_cntrl;
+ uint8 sched_own[ETHER_ADDR_LEN];
+ uint8 var[]; /* multicast sched entry list (schedule_entry_list) */
+} BWL_POST_PACKED_STRUCT wifi_nan_mcast_sched_attr_t;
+
+/* FAC Channel Entry (section 10.7.19.1.5) */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_fac_chan_entry_s {
+ uint8 oper_class; /* Operating Class */
+ uint16 chan_bitmap; /* Channel Bitmap */
+ uint8 primary_chan_bmp; /* Primary Channel Bitmap */
+ uint16 aux_chan; /* Auxiliary Channel bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_fac_chan_entry_t;
+
+/* TODO move this from nan.h */
+#define NAN_ALL_NAN_MGMT_FRAMES (NAN_FRM_SCHED_AF | \
+ NAN_FRM_NDP_AF | NAN_FRM_NDL_AF | \
+ NAN_FRM_DISC_BCN | NAN_FRM_SYNC_BCN | \
+ NAN_FRM_SVC_DISC | NAN_FRM_RNG_REQ_AF | \
+ NAN_FRM_RNG_RESP_AF | NAN_FRM_RNG_REPORT_AF | \
+ NAN_FRM_RNG_TERM_AF)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NAN_H_ */
diff --git a/bcmdhd.101.10.361.x/include/nci.h b/bcmdhd.101.10.361.x/include/nci.h
new file mode 100755
index 0000000..188016a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/nci.h
@@ -0,0 +1,96 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the BOOKER NCI (non coherent interconnect) based Broadcom chips.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ */
+
+#ifndef _NCI_H
+#define _NCI_H
+
+#include <siutils.h>
+
+#ifdef SOCI_NCI_BUS
+void nci_uninit(void *nci);
+uint32 nci_scan(si_t *sih);
+void nci_dump_erom(void *nci);
+void* nci_init(si_t *sih, chipcregs_t *cc, uint bustype);
+volatile void *nci_setcore(si_t *sih, uint coreid, uint coreunit);
+volatile void *nci_setcoreidx(si_t *sih, uint coreidx);
+uint nci_findcoreidx(const si_t *sih, uint coreid, uint coreunit);
+volatile uint32 *nci_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+uint nci_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+uint nci_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+uint nci_corerev_minor(const si_t *sih);
+uint nci_corerev(const si_t *sih);
+uint nci_corevendor(const si_t *sih);
+uint nci_get_wrap_reg(const si_t *sih, uint32 offset, uint32 mask, uint32 val);
+void nci_core_reset(const si_t *sih, uint32 bits, uint32 resetbits);
+void nci_core_disable(const si_t *sih, uint32 bits);
+bool nci_iscoreup(const si_t *sih);
+uint32 nci_coreid(const si_t *sih, uint coreidx);
+uint nci_numcoreunits(const si_t *sih, uint coreid);
+uint32 nci_addr_space(const si_t *sih, uint spidx, uint baidx);
+uint32 nci_addr_space_size(const si_t *sih, uint spidx, uint baidx);
+bool nci_iscoreup(const si_t *sih);
+uint nci_intflag(si_t *sih);
+uint nci_flag(si_t *sih);
+uint nci_flag_alt(const si_t *sih);
+void nci_setint(const si_t *sih, int siflag);
+uint32 nci_oobr_baseaddr(const si_t *sih, bool second);
+uint nci_coreunit(const si_t *sih);
+uint nci_corelist(const si_t *sih, uint coreid[]);
+int nci_numaddrspaces(const si_t *sih);
+uint32 nci_addrspace(const si_t *sih, uint spidx, uint baidx);
+uint32 nci_addrspacesize(const si_t *sih, uint spidx, uint baidx);
+void nci_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+uint32 nci_core_cflags(const si_t *sih, uint32 mask, uint32 val);
+void nci_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val);
+uint32 nci_core_sflags(const si_t *sih, uint32 mask, uint32 val);
+uint nci_wrapperreg(const si_t *sih, uint32 offset, uint32 mask, uint32 val);
+void nci_invalidate_second_bar0win(si_t *sih);
+int nci_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read);
+int nci_backplane_access_64(si_t *sih, uint addr, uint size, uint64 *val, bool read);
+uint nci_num_slaveports(const si_t *sih, uint coreid);
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+void nci_dumpregs(const si_t *sih, struct bcmstrbuf *b);
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+#ifdef BCMDBG
+void nci_view(si_t *sih, bool verbose);
+void nci_viewall(si_t *sih, bool verbose);
+#endif /* BCMDBG */
+uint32 nci_get_nth_wrapper(const si_t *sih, int32 wrap_pos);
+uint32 nci_get_axi_addr(const si_t *sih, uint32 *size);
+uint32* nci_wrapper_dump_binary_one(const si_info_t *sii, uint32 *p32, uint32 wrap_ba);
+uint32 nci_wrapper_dump_binary(const si_t *sih, uchar *p);
+uint32 nci_wrapper_dump_last_timeout(const si_t *sih, uint32 *error,
+ uint32 *core, uint32 *ba, uchar *p);
+bool nci_check_enable_backplane_log(const si_t *sih);
+uint32 nci_get_core_baaddr(const si_t *sih, uint32 *size, int32 baidx);
+uint32 nci_clear_backplane_to(si_t *sih);
+uint32 nci_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap);
+bool nci_ignore_errlog(const si_info_t *sii, const aidmp_t *ai,
+ uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts);
+void nci_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core, uint32 *lo,
+ uint32 *hi, uint32 *id);
+uint32 nci_get_axi_timeout_reg(void);
+uint32 nci_findcoreidx_by_axiid(const si_t *sih, uint32 axiid);
+uint32* nci_wrapper_dump_binary_one(const si_info_t *sii, uint32 *p32, uint32 wrap_ba);
+uint32 nci_wrapper_dump_binary(const si_t *sih, uchar *p);
+uint32 nci_wrapper_dump_last_timeout(const si_t *sih, uint32 *error,
+ uint32 *core, uint32 *ba, uchar *p);
+bool nci_check_enable_backplane_log(const si_t *sih);
+uint32 ai_wrapper_dump_buf_size(const si_t *sih);
+uint32 nci_wrapper_dump_buf_size(const si_t *sih);
+#endif /* SOCI_NCI_BUS */
+#endif /* _NCI_H */
diff --git a/bcmdhd.101.10.361.x/include/osl.h b/bcmdhd.101.10.361.x/include/osl.h
new file mode 100755
index 0000000..7ea182e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/osl.h
@@ -0,0 +1,482 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+#include <osl_decl.h>
+
+enum {
+ TAIL_BYTES_TYPE_FCS = 1,
+ TAIL_BYTES_TYPE_ICV = 2,
+ TAIL_BYTES_TYPE_MIC = 3
+};
+
+#ifdef DHD_EFI
+#define OSL_PKTTAG_SZ 40 /* Size of PktTag */
+#elif defined(MACOSX)
+#define OSL_PKTTAG_SZ 56
+#elif defined(__linux__)
+#define OSL_PKTTAG_SZ 48 /* standard linux pkttag size is 48 bytes */
+#else
+#ifndef OSL_PKTTAG_SZ
+#define OSL_PKTTAG_SZ 32 /* Size of PktTag */
+#endif /* !OSL_PKTTAG_SZ */
+#endif /* DHD_EFI */
+
+/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+/* Drivers use REGOPSSET() to register register read/write funcitons */
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+#if defined(EFI)
+#include <efi_osl.h>
+#elif defined(WL_UNITTEST)
+#include <utest_osl.h>
+#elif defined(__linux__)
+#include <linux_osl.h>
+#include <linux_pkt.h>
+#elif defined(NDIS)
+#include <ndis_osl.h>
+#elif defined(_RTE_)
+#include <rte_osl.h>
+#include <hnd_pkt.h>
+#elif defined(MACOSX)
+#include <macosx_osl.h>
+#else
+#error "Unsupported OSL requested"
+#endif /* defined(DOS) */
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
+#endif
+
+#ifndef BCM_UPTIME_PROFILE
+#define OSL_GETCYCLES_PROF(x)
+#endif
+
+/* --------------------------------------------------------------------------
+** Register manipulation macros.
+*/
+
+#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif /* !AND_REG */
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif /* !OR_REG */
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_NOT_DEFINED 1
+#endif /* !defined(OSL_SYSUPTIME) */
+
+#if !defined(OSL_SYSUPTIME_US)
+#define OSL_SYSUPTIME_US() (0)
+#define OSL_SYSUPTIME_US_NOT_DEFINED 1
+#endif /* !defined(OSL_SYSUPTIME) */
+
+#if defined(OSL_SYSUPTIME_NOT_DEFINED) && defined(OSL_SYSUPTIME_US_NOT_DEFINED)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif /* OSL_SYSUPTIME */
+
+#ifndef OSL_GET_LOCALTIME
+#define OSL_GET_LOCALTIME(sec, usec) \
+ do { \
+ BCM_REFERENCE(sec); \
+ BCM_REFERENCE(usec); \
+ } while (0)
+#endif /* OSL_GET_LOCALTIME */
+
+#ifndef OSL_LOCALTIME_NS
+#define OSL_LOCALTIME_NS() (OSL_SYSUPTIME_US() * NSEC_PER_USEC)
+#endif /* OSL_LOCALTIME_NS */
+
+#ifndef OSL_SYSTZTIME_US
+#define OSL_SYSTZTIME_US() OSL_SYSUPTIME_US()
+#endif /* OSL_GET_SYSTZTIME */
+
+#if !defined(OSL_CPU_COUNTS_PER_US)
+#define OSL_CPU_COUNTS_PER_US() (0)
+#define OSL_CPU_COUNTS_PER_US_NOT_DEFINED 1
+#endif /* !defined(OSL_CPU_COUNTS_PER_US) */
+
+#ifndef OSL_SYS_HALT
+#ifdef __COVERITY__
+/*
+ * For Coverity builds, provide a definition that allows Coverity
+ * to model the lack of return. This avoids Coverity False Positive
+ * defects associated with data inconsistency being detected after
+ * we otherwise would have halted.
+ */
+#define OSL_SYS_HALT() __coverity_panic__()
+#else /* __COVERITY__ */
+#define OSL_SYS_HALT() do {} while (0)
+#endif /* __COVERITY__ */
+#endif /* OSL_SYS_HALT */
+
+#ifndef DMB
+#define DMB() do {} while (0)
+#endif /* DMB */
+
+#ifndef OSL_MEM_AVAIL
+#define OSL_MEM_AVAIL() (0xffffffff)
+#endif
+
+#ifndef OSL_OBFUSCATE_BUF
+#if defined (_RTE_)
+#define OSL_OBFUSCATE_BUF(x) osl_obfuscate_ptr(x)
+#else
+#define OSL_OBFUSCATE_BUF(x) (x)
+#endif /* _RTE_ */
+#endif /* OSL_OBFUSCATE_BUF */
+
+#ifndef OSL_GET_HCAPISTIMESYNC
+#if defined (_RTE_)
+#define OSL_GET_HCAPISTIMESYNC() osl_get_hcapistimesync()
+#else
+#define OSL_GET_HCAPISTIMESYNC()
+#endif /* _RTE_ */
+#endif /* OSL_GET_HCAPISTIMESYNC */
+
+#ifndef OSL_GET_HCAPISPKTTXS
+#if defined (_RTE_)
+#define OSL_GET_HCAPISPKTTXS() osl_get_hcapispkttxs()
+#else
+#define OSL_GET_HCAPISPKTTXS()
+#endif /* _RTE_ */
+#endif /* OSL_GET_HCAPISPKTTXS */
+
+#if !defined(PKTC_DONGLE)
+#define PKTCGETATTR(skb) (0)
+#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
+#define PKTCCLRATTR(skb) BCM_REFERENCE(skb)
+#define PKTCCNT(skb) (1)
+#define PKTCLEN(skb) PKTLEN(NULL, skb)
+#define PKTCGETFLAGS(skb) (0)
+#define PKTCSETFLAGS(skb, f) BCM_REFERENCE(skb)
+#define PKTCCLRFLAGS(skb) BCM_REFERENCE(skb)
+#define PKTCFLAGS(skb) (0)
+#define PKTCSETCNT(skb, c) BCM_REFERENCE(skb)
+#define PKTCINCRCNT(skb) BCM_REFERENCE(skb)
+#define PKTCADDCNT(skb, c) BCM_REFERENCE(skb)
+#define PKTCSETLEN(skb, l) BCM_REFERENCE(skb)
+#define PKTCADDLEN(skb, l) BCM_REFERENCE(skb)
+#define PKTCSETFLAG(skb, fb) BCM_REFERENCE(skb)
+#define PKTCCLRFLAG(skb, fb) BCM_REFERENCE(skb)
+#define PKTCLINK(skb) NULL
+#define PKTSETCLINK(skb, x) BCM_REFERENCE(skb)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+ for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define PKTCFREE PKTFREE
+#define PKTCENQTAIL(h, t, p) \
+do { \
+ if ((t) == NULL) { \
+ (h) = (t) = (p); \
+ } \
+} while (0)
+#endif /* !PKTC_DONGLE */
+
+#ifndef PKTSETCHAINED
+#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTCLRCHAINED
+#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTISCHAINED
+#define PKTISCHAINED(skb) FALSE
+#endif
+
+#ifndef PKTGETPROFILEIDX
+#define PKTGETPROFILEIDX(p) (-1)
+#endif
+
+#ifndef PKTCLRPROFILEIDX
+#define PKTCLRPROFILEIDX(p)
+#endif
+
+#ifndef PKTSETPROFILEIDX
+#define PKTSETPROFILEIDX(p, idx) BCM_REFERENCE(idx)
+#endif
+
+#ifndef _RTE_
+/* Lbuf with fraglist */
+#ifndef PKTFRAGPKTID
+#define PKTFRAGPKTID(osh, lb) (0)
+#endif
+#ifndef PKTSETFRAGPKTID
+#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGTOTNUM
+#define PKTFRAGTOTNUM(osh, lb) (0)
+#endif
+#ifndef PKTSETFRAGTOTNUM
+#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGTOTLEN
+#define PKTFRAGTOTLEN(osh, lb) (0)
+#endif
+#ifndef PKTSETFRAGTOTLEN
+#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTIFINDEX
+#define PKTIFINDEX(osh, lb) (0)
+#endif
+#ifndef PKTSETIFINDEX
+#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTGETLF
+#define PKTGETLF(osh, len, send, lbuf_type) (0)
+#endif
+
+/* in rx path, reuse totlen as used len */
+#ifndef PKTFRAGUSEDLEN
+#define PKTFRAGUSEDLEN(osh, lb) (0)
+#endif
+#ifndef PKTSETFRAGUSEDLEN
+#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGLEN
+#define PKTFRAGLEN(osh, lb, ix) (0)
+#endif
+#ifndef PKTSETFRAGLEN
+#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGDATA_LO
+#define PKTFRAGDATA_LO(osh, lb, ix) (0)
+#endif
+#ifndef PKTSETFRAGDATA_LO
+#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGDATA_HI
+#define PKTFRAGDATA_HI(osh, lb, ix) (0)
+#endif
+#ifndef PKTSETFRAGDATA_HI
+#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh)
+#endif
+
+#ifndef PKTFRAGMOVE
+#define PKTFRAGMOVE(osh, dst, src) (BCM_REFERENCE(osh), BCM_REFERENCE(dst), BCM_REFERENCE(src))
+#endif
+
+/* RX FRAG */
+#ifndef PKTISRXFRAG
+#define PKTISRXFRAG(osh, lb) (0)
+#endif
+#ifndef PKTSETRXFRAG
+#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETRXFRAG
+#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+/* TX FRAG */
+#ifndef PKTISTXFRAG
+#define PKTISTXFRAG(osh, lb) (0)
+#endif
+#ifndef PKTSETTXFRAG
+#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+/* TX ALFRAG */
+#ifndef PKTISTXALFRAG
+#define PKTISTXALFRAG(osh, lb) (0)
+#endif
+#ifndef PKTSETTXALFRAG
+#define PKTSETTXALFRAG(osh, lb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETTXALFRAG
+#define PKTRESETTXALFRAG(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+#ifndef PKTNUMMPDUS
+#define PKTNUMMPDUS(osh, lb) (1)
+#endif
+#ifndef PKTNUMPKTS
+#define PKTNUMPKTS(osh, lb) (1)
+#endif
+
+#ifndef PKTISHWCSO
+#define PKTISHWCSO(osh, lb) (FALSE)
+#endif
+
+#ifndef PKTISSUBMSDUTOEHDR
+#define PKTISSUBMSDUTOEHDR(osh, lb) (FALSE)
+#endif
+
+#ifndef PKT_IS_HOST_SFHLLC
+#define PKT_IS_HOST_SFHLLC(osh, lb) (FALSE)
+#endif
+
+#ifndef PKT_SET_HOST_SFHLLC
+#define PKT_SET_HOST_SFHLLC(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+#ifndef PKT_IS_HOST_SFHLLC_DONE
+#define PKT_IS_HOST_SFHLLC_DONE(osh, lb) (FALSE)
+#endif
+
+#ifndef PKT_SET_HOST_SFHLLC_DONE
+#define PKT_SET_HOST_SFHLLC_DONE(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+/* Need Rx completion used for AMPDU reordering */
+#ifndef PKTNEEDRXCPL
+#define PKTNEEDRXCPL(osh, lb) (TRUE)
+#endif
+#ifndef PKTSETNORXCPL
+#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETNORXCPL
+#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTISFRAG
+#define PKTISFRAG(osh, lb) (0)
+#endif
+#ifndef PKTFRAGISCHAINED
+#define PKTFRAGISCHAINED(osh, i) (0)
+#endif
+#ifndef PKTISHDRCONVTD
+#define PKTISHDRCONVTD(osh, lb) (0)
+#endif
+
+/* Forwarded pkt indication */
+#ifndef PKTISFRWDPKT
+#define PKTISFRWDPKT(osh, lb) 0
+#endif
+#ifndef PKTSETFRWDPKT
+#define PKTSETFRWDPKT(osh, lb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETFRWDPKT
+#define PKTRESETFRWDPKT(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+/* PKT consumed for totlen calculation */
+#ifndef PKTISUSEDTOTLEN
+#define PKTISUSEDTOTLEN(osh, lb) 0
+#endif
+#ifndef PKTSETUSEDTOTLEN
+#define PKTSETUSEDTOTLEN(osh, lb) BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETUSEDTOTLEN
+#define PKTRESETUSEDTOTLEN(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+/* UDR Packet Indication */
+#ifndef PKTISUDR
+#define PKTISUDR(osh, lb) 0
+#endif
+
+#ifndef PKTSETUDR
+#define PKTSETUDR(osh, lb) BCM_REFERENCE(osh)
+#endif
+
+#ifndef PKTSETUDR
+#define PKTRESETUDR(osh, lb) BCM_REFERENCE(osh)
+#endif
+#endif /* _RTE_ */
+
+#if !(defined(__linux__))
+#define PKTLIST_INIT(x) BCM_REFERENCE(x)
+#define PKTLIST_ENQ(x, y) BCM_REFERENCE(x)
+#define PKTLIST_DEQ(x) BCM_REFERENCE(x)
+#define PKTLIST_UNLINK(x, y) BCM_REFERENCE(x)
+#define PKTLIST_FINI(x) BCM_REFERENCE(x)
+#endif
+
+#ifndef ROMMABLE_ASSERT
+#define ROMMABLE_ASSERT(exp) ASSERT(exp)
+#endif /* ROMMABLE_ASSERT */
+
+#ifndef MALLOC_NOPERSIST
+ #define MALLOC_NOPERSIST MALLOC
+#endif /* !MALLOC_NOPERSIST */
+
+#ifndef MALLOC_PERSIST
+ #define MALLOC_PERSIST MALLOC
+#endif /* !MALLOC_PERSIST */
+
+#ifndef MALLOC_RA
+ #define MALLOC_RA(osh, size, callsite) MALLOCZ(osh, size)
+#endif /* !MALLOC_RA */
+
+#ifndef MALLOC_PERSIST_ATTACH
+ #define MALLOC_PERSIST_ATTACH MALLOC
+#endif /* !MALLOC_PERSIST_ATTACH */
+
+#ifndef MALLOCZ_PERSIST_ATTACH
+ #define MALLOCZ_PERSIST_ATTACH MALLOCZ
+#endif /* !MALLOCZ_PERSIST_ATTACH */
+
+#ifndef MALLOCZ_NOPERSIST
+ #define MALLOCZ_NOPERSIST MALLOCZ
+#endif /* !MALLOCZ_NOPERSIST */
+
+#ifndef MALLOCZ_PERSIST
+ #define MALLOCZ_PERSIST MALLOCZ
+#endif /* !MALLOCZ_PERSIST */
+
+#ifndef MFREE_PERSIST
+ #define MFREE_PERSIST MFREE
+#endif /* !MFREE_PERSIST */
+
+#ifndef MALLOC_SET_NOPERSIST
+ #define MALLOC_SET_NOPERSIST(osh) do { } while (0)
+#endif /* !MALLOC_SET_NOPERSIST */
+
+#ifndef MALLOC_CLEAR_NOPERSIST
+ #define MALLOC_CLEAR_NOPERSIST(osh) do { } while (0)
+#endif /* !MALLOC_CLEAR_NOPERSIST */
+
+#if defined(OSL_MEMCHECK)
+#define MEMCHECK(f, l) osl_memcheck(f, l)
+#else
+#define MEMCHECK(f, l)
+#endif /* OSL_MEMCHECK */
+
+#ifndef BCMDBGPERF
+#define PERF_TRACE_START(id) do {} while (0)
+#define PERF_TRACE_END(id) do {} while (0)
+#define PERF_TRACE_END2(id, mycounters) do {} while (0)
+#define PERF_TRACE_END3(id, mycounters, coreunit) do {} while (0)
+#define UPDATE_PERF_TRACE_COUNTER(counter, val) do {} while (0)
+#define ADD_PERF_TRACE_COUNTER(counter, val) do {} while (0)
+#endif /* OSL_MEMCHECK */
+
+/* Virtual/physical address translation. */
+#if !defined(OSL_VIRT_TO_PHYS_ADDR)
+ #define OSL_VIRT_TO_PHYS_ADDR(va) ((void*)(uintptr)(va))
+#endif
+
+#if !defined(OSL_PHYS_TO_VIRT_ADDR)
+ #define OSL_PHYS_TO_VIRT_ADDR(pa) ((void*)(uintptr)(pa))
+#endif
+
+#endif /* _osl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/osl_decl.h b/bcmdhd.101.10.361.x/include/osl_decl.h
new file mode 100755
index 0000000..a86805a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/osl_decl.h
@@ -0,0 +1,31 @@
+/*
+ * osl forward declarations
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _osl_decl_h_
+#define _osl_decl_h_
+
+/* osl handle type forward declaration */
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+extern unsigned int lmtest; /* low memory test */
+#endif
diff --git a/bcmdhd.101.10.361.x/include/osl_ext.h b/bcmdhd.101.10.361.x/include/osl_ext.h
new file mode 100755
index 0000000..460bc3a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/osl_ext.h
@@ -0,0 +1,759 @@
+/*
+ * OS Abstraction Layer Extension - the APIs defined by the "extension" API
+ * are only supported by a subset of all operating systems.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _osl_ext_h_
+#define _osl_ext_h_
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#if defined(THREADX)
+ #include <threadx_osl_ext.h>
+#else
+ #define OSL_EXT_DISABLED
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* -----------------------------------------------------------------------
+ * Generic OS types.
+ */
+typedef enum osl_ext_status_t
+{
+ OSL_EXT_SUCCESS,
+ OSL_EXT_ERROR,
+ OSL_EXT_TIMEOUT
+
+} osl_ext_status_t;
+
+#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status;
+
+#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1))
+
+typedef unsigned int osl_ext_time_ms_t;
+typedef unsigned int osl_ext_time_us_t;
+
+typedef unsigned int osl_ext_event_bits_t;
+
+typedef unsigned int osl_ext_interrupt_state_t;
+
+/* -----------------------------------------------------------------------
+ * Timers.
+ */
+typedef enum
+{
+ /* One-shot timer. */
+ OSL_EXT_TIMER_MODE_ONCE,
+
+ /* Periodic timer. */
+ OSL_EXT_TIMER_MODE_REPEAT
+
+} osl_ext_timer_mode_t;
+
+/* User registered callback and parameter to invoke when timer expires. */
+typedef void* osl_ext_timer_arg_t;
+typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg);
+
+/* -----------------------------------------------------------------------
+ * Tasks.
+ */
+
+/* Task entry argument. */
+typedef void* osl_ext_task_arg_t;
+
+/* Task entry function. */
+typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg);
+
+/* Abstract task priority levels. */
+typedef enum
+{
+ OSL_EXT_TASK_IDLE_PRIORITY,
+ OSL_EXT_TASK_CPUUTIL_PRIORITY,
+ OSL_EXT_TASK_LOW_PRIORITY,
+ OSL_EXT_TASK_LOW_NORMAL_PRIORITY,
+ OSL_EXT_TASK_NORMAL_PRIORITY,
+ OSL_EXT_TASK_HIGH_NORMAL_PRIORITY,
+ OSL_EXT_TASK_HIGHEST_PRIORITY,
+ OSL_EXT_TASK_TIME_CRITICAL_PRIORITY,
+
+ /* This must be last. */
+ OSL_EXT_TASK_NUM_PRIORITES
+} osl_ext_task_priority_t;
+
+#ifndef OSL_EXT_DISABLED
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+/* --------------------------------------------------------------------------
+** Semaphore
+*/
+
+/****************************************************************************
+* Function: osl_ext_sem_create
+*
+* Purpose: Creates a counting semaphore object, which can subsequently be
+* used for thread notification.
+*
+* Parameters: name (in) Name to assign to the semaphore (must be unique).
+* init_cnt (in) Initial count that the semaphore should have.
+* sem (out) Newly created semaphore.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function: osl_ext_sem_delete
+*
+* Purpose: Destroys a previously created semaphore object.
+*
+* Parameters: sem (mod) Semaphore object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function: osl_ext_sem_give
+*
+* Purpose: Increments the count associated with the semaphore. This will
+* cause one thread blocked on a take to wake up.
+*
+* Parameters: sem (mod) Semaphore object to give.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function: osl_ext_sem_take
+*
+* Purpose: Decrements the count associated with the semaphore. If the count
+* is less than zero, then the calling task will become blocked until
+* another thread does a give on the semaphore. This function will only
+* block the calling thread for timeout_msec milliseconds, before
+* returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: sem (mod) Semaphore object to take.
+* timeout_msec (in) Number of milliseconds to wait for the
+* semaphore to enter a state where it can be
+* taken.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec);
+
+/* --------------------------------------------------------------------------
+** Mutex
+*/
+
+/****************************************************************************
+* Function: osl_ext_mutex_create
+*
+* Purpose: Creates a mutex object, which can subsequently be used to control
+* mutually exclusion of resources.
+*
+* Parameters: name (in) Name to assign to the mutex (must be unique).
+* mutex (out) Mutex object to initialize.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex);
+
+/****************************************************************************
+* Function: osl_ext_mutex_delete
+*
+* Purpose: Destroys a previously created mutex object.
+*
+* Parameters: mutex (mod) Mutex object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex);
+
+/****************************************************************************
+* Function: osl_ext_mutex_acquire
+*
+* Purpose: Acquires the indicated mutual exclusion object. If the object is
+* currently acquired by another task, then this function will wait
+* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: mutex (mod) Mutex object to acquire.
+* timeout_msec (in) Number of milliseconds to wait for the mutex.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec);
+
+/****************************************************************************
+* Function: osl_ext_mutex_release
+*
+* Purpose: Releases the indicated mutual exclusion object. This makes it
+* available for another task to acquire.
+*
+* Parameters: mutex (mod) Mutex object to release.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex);
+
+/* --------------------------------------------------------------------------
+** Timers
+*/
+
+/****************************************************************************
+* Function: osl_ext_timer_create
+*
+* Purpose: Creates a timer object.
+*
+* Parameters: name (in) Name of timer.
+* timeout_msec (in) Invoke callback after this number of milliseconds.
+* mode (in) One-shot or periodic timer.
+* func (in) Callback function to invoke on timer expiry.
+* arg (in) Argument to callback function.
+* timer (out) Timer object to create.
+*
+* Note: The function callback occurs in interrupt context. The application is
+* required to provide context switch for the callback if required.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode,
+ osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function: osl_ext_timer_delete
+*
+* Purpose: Destroys a previously created timer object.
+*
+* Parameters: timer (mod) Timer object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function: osl_ext_timer_start
+*
+* Purpose: Start a previously created timer object.
+*
+* Parameters: timer (in) Timer object.
+* timeout_msec (in) Invoke callback after this number of milliseconds.
+* mode (in) One-shot or periodic timer.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_start(osl_ext_timer_t *timer,
+ osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode);
+
+/****************************************************************************
+* Function: osl_ext_timer_start
+*
+* Purpose: Start a previously created timer object.
+*
+* Parameters: timer (in) Timer object.
+* timeout_usec (in) Invoke callback after this number of micro-seconds.
+* mode (in) One-shot or periodic timer.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_start_us(osl_ext_timer_t *timer,
+ osl_ext_time_us_t timeout_usec, osl_ext_timer_mode_t mode);
+
+/****************************************************************************
+* Function: osl_ext_timer_stop
+*
+* Purpose: Stop a previously created timer object.
+*
+* Parameters: timer (in) Timer object.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_stop(osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function: osl_ext_time_get
+*
+* Purpose: Returns incrementing time counter.
+*
+* Parameters: None.
+*
+* Returns: Returns incrementing time counter in msec.
+*****************************************************************************
+*/
+osl_ext_time_ms_t osl_ext_time_get(void);
+
+/* --------------------------------------------------------------------------
+** Tasks
+*/
+
+/****************************************************************************
+* Function: osl_ext_task_create
+*
+* Purpose: Create a task.
+*
+* Parameters: name (in) Pointer to task string descriptor.
+* stack (in) Pointer to stack. NULL to allocate.
+* stack_size (in) Stack size - in bytes.
+* priority (in) Abstract task priority.
+* func (in) A pointer to the task entry point function.
+* arg (in) Value passed into task entry point function.
+* task (out) Task to create.
+*
+* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
+* error code if the task could not be created.
+*****************************************************************************
+*/
+
+#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
+ osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \
+ (arg), TRUE, (task))
+
+/****************************************************************************
+* Function: osl_ext_task_create_ex
+*
+* Purpose: Create a task with autostart option.
+*
+* Parameters: name (in) Pointer to task string descriptor.
+* stack (in) Pointer to stack. NULL to allocate.
+* stack_size (in) Stack size - in bytes.
+* priority (in) Abstract task priority.
+* func (in) A pointer to the task entry point function.
+* arg (in) Value passed into task entry point function.
+* autostart (in) TRUE to start task after creation.
+* task (out) Task to create.
+*
+* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
+* error code if the task could not be created.
+*****************************************************************************
+*/
+
+osl_ext_status_t osl_ext_task_create_ex(char* name,
+ void *stack, unsigned int stack_size, osl_ext_task_priority_t priority,
+ osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg,
+ bool autostart, osl_ext_task_t *task);
+
+/****************************************************************************
+* Function: osl_ext_task_change_priority
+*
+* Purpose: Change priority of a task.
+*
+* Parameters: task
+* new_priority (in) New task priority.
+* old_priority (out) Old task priroty.
+*
+* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
+* error code if the priority could not be changed..
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_change_priority(osl_ext_task_t *task,
+ osl_ext_task_priority_t new_priority, osl_ext_task_priority_t *old_priority);
+
+/****************************************************************************
+* Function: osl_ext_task_delete
+*
+* Purpose: Destroy a task.
+*
+* Parameters: task (mod) Task to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
+* error code if the task could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task);
+
+/****************************************************************************
+* Function: osl_ext_task_is_running
+*
+* Purpose: Returns current running task.
+*
+* Parameters: None.
+*
+* Returns: osl_ext_task_t of current running task.
+*****************************************************************************
+*/
+osl_ext_task_t *osl_ext_task_current(void);
+
+/****************************************************************************
+* Function: osl_ext_task_yield
+*
+* Purpose: Yield the CPU to other tasks of the same priority that are
+* ready-to-run.
+*
+* Parameters: None.
+*
+* Returns: OSL_EXT_SUCCESS if successful, else error code.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_yield(void);
+
+/****************************************************************************
+* Function: osl_ext_task_suspend
+*
+* Purpose: Suspend a task.
+*
+* Parameters: task (mod) Task to suspend.
+*
+* Returns: OSL_EXT_SUCCESS if the task was suspended successfully, or an
+* error code if the task could not be suspended.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_suspend(osl_ext_task_t *task);
+
+/****************************************************************************
+* Function: osl_ext_task_resume
+*
+* Purpose: Resume a task.
+*
+* Parameters: task (mod) Task to resume.
+*
+* Returns: OSL_EXT_SUCCESS if the task was resumed successfully, or an
+* error code if the task could not be resumed.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_resume(osl_ext_task_t *task);
+
+/****************************************************************************
+* Function: osl_ext_task_enable_stack_check
+*
+* Purpose: Enable task stack checking.
+*
+* Parameters: None.
+*
+* Returns: OSL_EXT_SUCCESS if successful, else error code.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_enable_stack_check(void);
+
+/* --------------------------------------------------------------------------
+** Queue
+*/
+
+/****************************************************************************
+* Function: osl_ext_queue_create
+*
+* Purpose: Create a queue.
+*
+* Parameters: name (in) Name to assign to the queue (must be unique).
+* buffer (in) Queue buffer. NULL to allocate.
+* size (in) Size of the queue.
+* queue (out) Newly created queue.
+*
+* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an
+* error code if the queue could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_create(char *name,
+ void *queue_buffer, unsigned int queue_size,
+ osl_ext_queue_t *queue);
+
+/****************************************************************************
+* Function: osl_ext_queue_delete
+*
+* Purpose: Destroys a previously created queue object.
+*
+* Parameters: queue (mod) Queue object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an
+* error code if the queue could not be deleteed.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue);
+
+/****************************************************************************
+* Function: osl_ext_queue_send
+*
+* Purpose: Send/add data to the queue. This function will not block the
+* calling thread if the queue is full.
+*
+* Parameters: queue (mod) Queue object.
+* data (in) Data pointer to be queued.
+*
+* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an
+* error code if the data could not be queued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data);
+
+/****************************************************************************
+* Function: osl_ext_queue_send_synchronous
+*
+* Purpose: Send/add data to the queue. This function will block the
+* calling thread until the data is dequeued.
+*
+* Parameters: queue (mod) Queue object.
+* data (in) Data pointer to be queued.
+*
+* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an
+* error code if the data could not be queued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data);
+
+/****************************************************************************
+* Function: osl_ext_queue_receive
+*
+* Purpose: Receive/remove data from the queue. This function will only
+* block the calling thread for timeout_msec milliseconds, before
+* returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: queue (mod) Queue object.
+* timeout_msec (in) Number of milliseconds to wait for the
+* data from the queue.
+* data (out) Data pointer received/removed from the queue.
+*
+* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an
+* error code if the data could not be dequeued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue,
+ osl_ext_time_ms_t timeout_msec, void **data);
+
+/****************************************************************************
+* Function: osl_ext_queue_count
+*
+* Purpose: Returns the number of items in the queue.
+*
+* Parameters: queue (mod) Queue object.
+* count (out) Data pointer received/removed from the queue.
+*
+* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an
+* error code if the count is invalid.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count);
+
+/* --------------------------------------------------------------------------
+** Event
+*/
+
+/****************************************************************************
+* Function: osl_ext_event_create
+*
+* Purpose: Creates a event object, which can subsequently be used to
+* notify and trigger tasks.
+*
+* Parameters: name (in) Name to assign to the event (must be unique).
+* event (out) Event object to initialize.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event);
+
+/****************************************************************************
+* Function: osl_ext_event_delete
+*
+* Purpose: Destroys a previously created event object.
+*
+* Parameters: event (mod) Event object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event);
+
+/****************************************************************************
+* Function: osl_ext_event_get
+*
+* Purpose: Get event from specified event object.
+*
+* Parameters: event (mod) Event object to get.
+* requested (in) Requested event to get.
+* timeout_msec (in) Number of milliseconds to wait for the event.
+* event_bits (out) Event bits retrieved.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event,
+ osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec,
+ osl_ext_event_bits_t *event_bits);
+
+/****************************************************************************
+* Function: osl_ext_event_set
+*
+* Purpose: Set event of specified event object.
+*
+* Parameters: event (mod) Event object to set.
+* event_bits (in) Event bits to set.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event,
+ osl_ext_event_bits_t event_bits);
+
+/* --------------------------------------------------------------------------
+** Interrupt
+*/
+
+/****************************************************************************
+* Function: osl_ext_interrupt_disable
+*
+* Purpose: Disable CPU interrupt.
+*
+* Parameters: None.
+*
+* Returns: The interrupt state before disable for restoring interrupt.
+*****************************************************************************
+*/
+osl_ext_interrupt_state_t osl_ext_interrupt_disable(void);
+
+/****************************************************************************
+* Function: osl_ext_interrupt_restore
+*
+* Purpose: Restore CPU interrupt state.
+*
+* Parameters: state (in) Interrupt state to restore returned from
+* osl_ext_interrupt_disable().
+*
+* Returns: None.
+*****************************************************************************
+*/
+void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state);
+
+#else
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* Interrupt control */
+#define OSL_INTERRUPT_SAVE_AREA
+#define OSL_DISABLE
+#define OSL_RESTORE
+
+/* Semaphore. */
+#define osl_ext_sem_t
+#define OSL_EXT_SEM_DECL(sem)
+
+/* Mutex. */
+#define osl_ext_mutex_t
+#define OSL_EXT_MUTEX_DECL(mutex)
+
+/* Timer. */
+#define osl_ext_timer_t
+#define OSL_EXT_TIMER_DECL(timer)
+
+/* Task. */
+#define osl_ext_task_t void
+#define OSL_EXT_TASK_DECL(task)
+
+/* Queue. */
+#define osl_ext_queue_t
+#define OSL_EXT_QUEUE_DECL(queue)
+
+/* Event. */
+#define osl_ext_event_t
+#define OSL_EXT_EVENT_DECL(event)
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS)
+#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS)
+#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS)
+#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS)
+
+#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS)
+#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS)
+#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS)
+#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS)
+
+#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS)
+#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS)
+#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS)
+#define osl_ext_time_get() (0)
+
+#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS)
+#define osl_ext_task_current() (NULL)
+#define osl_ext_task_yield() (OSL_EXT_SUCCESS)
+#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS)
+
+#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS)
+#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS)
+#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS)
+#define osl_ext_queue_receive(queue, timeout_msec, data) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS)
+
+#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS)
+#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS)
+#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS)
+
+#define osl_ext_interrupt_disable(void) (0)
+#define osl_ext_interrupt_restore(state)
+
+#endif /* OSL_EXT_DISABLED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _osl_ext_h_ */
diff --git a/bcmdhd.101.10.361.x/include/p2p.h b/bcmdhd.101.10.361.x/include/p2p.h
new file mode 100755
index 0000000..727fe96
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/p2p.h
@@ -0,0 +1,695 @@
+/*
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* WiFi P2P OUI values */
+#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID 0xdd /* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+ uint8 id; /* IE ID: 0xDD */
+ uint8 len; /* IE length */
+ uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */
+ uint8 oui_type; /* Identifies P2P version: P2P_VER */
+ uint8 subelts[1]; /* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN 6
+
+#define P2P_ATTR_ID_OFF 0
+#define P2P_ATTR_LEN_OFF 1
+#define P2P_ATTR_DATA_OFF 3
+
+#define P2P_ATTR_ID_LEN 1 /* ID filed length */
+#define P2P_ATTR_LEN_LEN 2 /* length field length */
+#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */
+
+#define P2P_WFDS_HASH_LEN 6
+#define P2P_WFDS_MAX_SVC_NAME_LEN 32
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS 0 /* Status */
+#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */
+#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID 3 /* P2P Device ID */
+#define P2P_SEID_INTENT 4 /* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */
+#define P2P_SEID_CHANNEL 6 /* Listen channel */
+#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */
+#define P2P_SEID_CHAN_LIST 11 /* Channel List */
+#define P2P_SEID_ABSENCE 12 /* Notice of Absence */
+#define P2P_SEID_DEV_INFO 13 /* Device Info */
+#define P2P_SEID_GROUP_INFO 14 /* Group Info */
+#define P2P_SEID_GROUP_ID 15 /* Group ID */
+#define P2P_SEID_P2P_IF 16 /* P2P Interface */
+#define P2P_SEID_OP_CHANNEL 17 /* Operating Channel */
+#define P2P_SEID_INVITE_FLAGS 18 /* Invitation Flags */
+#define P2P_SEID_SERVICE_HASH 21 /* Service hash */
+#define P2P_SEID_SESSION 22 /* Session information */
+#define P2P_SEID_CONNECT_CAP 23 /* Connection capability */
+#define P2P_SEID_ADVERTISE_ID 24 /* Advertisement ID */
+#define P2P_SEID_ADVERTISE_SERVICE 25 /* Advertised service */
+#define P2P_SEID_SESSION_ID 26 /* Session ID */
+#define P2P_SEID_FEATURE_CAP 27 /* Feature capability */
+#define P2P_SEID_PERSISTENT_GROUP 28 /* Persistent group */
+#define P2P_SEID_SESSION_INFO_RESP 29 /* Session Information Response */
+#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES 0x1b /* BRCM proprietary subel: L2 Services */
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 dev; /* Device Capability Bitmap */
+ uint8 group; /* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_INTENT */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 go_tmo; /* GO config timeout in units of 10 ms */
+ uint8 client_tmo; /* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+/* WiFi P2P IE subelement: Listen Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_CHANNEL */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 op_class; /* Operating Class */
+ uint8 channel; /* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+/* WiFi P2P IE subelement: P2P Group BSSID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_GRP_BSSID */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P group bssid */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+/* WiFi P2P IE subelement: P2P Group ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_GROUP_ID */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P device address */
+ uint8 ssid[1]; /* ssid. device id. variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+/* WiFi P2P IE subelement: P2P Interface */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_IF */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P device address */
+ uint8 ifaddrs; /* P2P Interface Address count */
+ uint8 ifaddr[1][6]; /* P2P Interface Address list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 status; /* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS 0
+ /* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1
+ /* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+ /* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2
+ /* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED 3
+ /* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS 4
+ /* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5
+ /* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR 6
+ /* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7
+ /* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8
+ /* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT 9
+ /* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10
+ /* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT 11
+ /* Failed, rejected by user */
+#define P2P_STATSE_SUCCESS_USER_ACCEPT 12
+ /* Success, accepted by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+ uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */
+ uint8 len[2]; /* length not including eltId, len fields */
+ uint8 avail[2]; /* availibility period */
+ uint8 interval[2]; /* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN 10 /* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 band; /* Regulatory Class (band) */
+ uint8 channel; /* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+ uint8 band; /* Regulatory Class (band) */
+ uint8 num_channels; /* # of channels in the channel list */
+ uint8 channels[WL_NUMCHANNELS]; /* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_CHAN_LIST */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 num_entries; /* # of channel entries */
+ wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+ /* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi Primary Device Type structure */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+ uint16 cat_id; /* Category ID */
+ uint8 OUI[3]; /* WFA OUI: 0x0050F2 */
+ uint8 oui_type; /* WPS_OUI_TYPE */
+ uint16 sub_cat_id; /* Sub Category ID */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category
+ * maximum values for each category
+ */
+#define P2P_DISE_SUBCATEGORY_MINVAL 1
+#define P2P_DISE_CATEGORY_COMPUTER 1
+#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL 8
+#define P2P_DISE_CATEGORY_INPUT_DEVICE 2
+#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL 9
+#define P2P_DISE_CATEGORY_PRINTER 3
+#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL 5
+#define P2P_DISE_CATEGORY_CAMERA 4
+#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL 4
+#define P2P_DISE_CATEGORY_STORAGE 5
+#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL 1
+#define P2P_DISE_CATEGORY_NETWORK_INFRA 6
+#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL 4
+#define P2P_DISE_CATEGORY_DISPLAY 7
+#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL 4
+#define P2P_DISE_CATEGORY_MULTIMEDIA 8
+#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL 6
+#define P2P_DISE_CATEGORY_GAMING 9
+#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL 5
+#define P2P_DISE_CATEGORY_TELEPHONE 10
+#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL 5
+#define P2P_DISE_CATEGORY_AUDIO 11
+#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL 6
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P Device MAC address */
+ uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+ uint8 pri_devtype[8]; /* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN 8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+ uint8 len;
+ uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */
+ uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */
+ uint8 devcap; /* Device Capability */
+ uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+ uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */
+ uint8 secdts; /* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ struct ether_addr addr; /* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mg_bitmap; /* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+/* WiFi IE subelement: Operating Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_OP_CHANNEL */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 op_class; /* Operating Class */
+ uint8 channel; /* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+/* WiFi IE subelement: INVITATION FLAGS */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_INVITE_FLAGS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 flags; /* Flags */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+/* WiFi P2P IE subelement: Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_SERVICE_HASH */
+ uint8 len[2]; /* SE length not including eltId, len fields
+ * in multiple of 6 Bytes
+ */
+ uint8 hash[1]; /* Variable length - SHA256 hash of
+ * service names (can be more than one hashes)
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t;
+
+/* WiFi P2P IE subelement: Service Instance Data */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_SESSION */
+ uint8 len[2]; /* SE length not including eltId, len */
+ uint8 ssn_info[1]; /* Variable length - Session information as specified by
+ * the service layer, type matches serv. name
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t;
+
+/* WiFi P2P IE subelement: Connection capability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */
+ uint8 len[2]; /* SE length not including eltId, len */
+ uint8 conn_cap; /* 1byte capability as specified by the
+ * service layer, valid bitmask/values
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t;
+
+/* WiFi P2P IE subelement: Advertisement ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */
+ uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */
+ uint8 advt_id[4]; /* 4byte Advertisement ID of the peer device sent in
+ * PROV Disc in Network byte order
+ */
+ uint8 advt_mac[6]; /* P2P device address of the service advertiser */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t;
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s {
+ uint8 advt_id[4]; /* SE Advertise ID for the service */
+ uint16 nw_cfg_method; /* SE Network Config method for the service */
+ uint8 serv_name_len; /* SE length of the service name */
+ uint8 serv_name[1]; /* Variable length service name field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t;
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */
+ uint8 len[2]; /* SE length not including eltId, len fields mutiple len of
+ * wifi_p2p_adv_serv_info_t entries
+ */
+ wifi_p2p_adv_serv_info_t p_advt_serv_info[1]; /* Variable length
+ of multiple instances
+ of the advertise service info
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t;
+
+/* WiFi P2P IE subelement: Session ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */
+ uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */
+ uint8 ssn_id[4]; /* 4byte Session ID of the peer device sent in
+ * PROV Disc in Network byte order
+ */
+ uint8 ssn_mac[6]; /* P2P device address of the seeker - session mac */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t;
+
+#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */
+#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id +
+ * nw_config_method + serv_name_len
+ */
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+ uint8 category; /* P2P_AF_CATEGORY */
+ uint8 OUI[3]; /* OUI - P2P_OUI */
+ uint8 type; /* OUI Type - P2P_VER */
+ uint8 subtype; /* OUI Subtype - P2P_AF_* */
+ uint8 dialog_token; /* nonzero, identifies req/resp tranaction */
+ uint8 elts[1]; /* Variable length information elements. Max size =
+ * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY 0x7f
+
+#define P2P_AF_FIXED_LEN 7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+ uint8 category; /* P2P_PUB_AF_CATEGORY */
+ uint8 action; /* P2P_PUB_AF_ACTION */
+ uint8 oui[3]; /* P2P_OUI */
+ uint8 oui_type; /* OUI type - P2P_VER */
+ uint8 subtype; /* OUI subtype - P2P_TYPE_* */
+ uint8 dialog_token; /* nonzero, identifies req/rsp transaction */
+ uint8 elts[1]; /* Variable length information elements. Max size =
+ * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN 8
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+ uint8 cnt_type; /* Count/Type */
+ uint32 duration; /* Duration */
+ uint32 interval; /* Interval */
+ uint32 start; /* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+ uint8 eltId; /* Subelement ID */
+ uint8 len[2]; /* Length */
+ uint8 index; /* Index */
+ uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */
+ wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN 5
+
+#define P2P_NOA_SE_MAX_DESC 2 /* max NoA descriptors in presence request */
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK 0x7f
+#define P2P_NOA_OPS_MASK 0x80
+#define P2P_NOA_OPS_SHIFT 7
+
+#define P2P_CTW_MIN 10 /* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define P2PSD_ACTION_CATEGORY 0x04
+ /* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a
+ /* Action value for GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b
+ /* Action value for GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c
+ /* Action value for GAS Comeback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d
+ /* Action value for GAS Comeback Response AF */
+#define P2PSD_AD_EID 0x6c
+ /* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
+ /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID 0x00
+ /* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI P2P_OUI
+ /* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE P2P_VER
+ /* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID 0xDDDD
+ /* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY 0x00
+ /* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+ SVC_RPOTYPE_ALL = 0,
+ SVC_RPOTYPE_BONJOUR = 1,
+ SVC_RPOTYPE_UPNP = 2,
+ SVC_RPOTYPE_WSD = 3,
+ SVC_RPOTYPE_WFDS = 11,
+ SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+ P2PSD_RESP_STATUS_SUCCESS = 0,
+ P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+ P2PSD_RESP_STATUS_DATA_NA = 2,
+ P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+ uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus
+ * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+ */
+ uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+ uint8 id; /* IE ID: 0x6c - 108 */
+ uint8 len; /* IE length */
+ wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one
+ * tuple is defined for P2P Service Discovery
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+ uint8 oui_subtype; /* OUI Subtype: 0x09 */
+ uint16 svc_updi; /* Service Update Indicator */
+ uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request,
+ * wifi_p2psd_qresp_tlv_t type for service response
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+ uint16 len; /* Length: 5 plus size of Query Data */
+ uint8 svc_prot; /* Service Protocol Type */
+ uint8 svc_tscid; /* Service Transaction ID */
+ uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+ uint16 info_id; /* Info ID: 0xDDDD */
+ uint16 len; /* Length of service request TLV, 5 plus the size of request data */
+ uint8 oui[3]; /* WFA OUI: 0x0050F2 */
+ uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qreq_len; /* Query Request Length */
+ uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+ uint16 len; /* Length: 5 plus size of Query Data */
+ uint8 svc_prot; /* Service Protocol Type */
+ uint8 svc_tscid; /* Service Transaction ID */
+ uint8 status; /* Value defined in Table 57 of P2P spec. */
+ uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+ uint16 info_id; /* Info ID: 0xDDDD */
+ uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */
+ uint8 oui[3]; /* WFA OUI: 0x0050F2 */
+ uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+ uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */
+ uint16 cb_delay; /* GAS Comeback Delay */
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qresp_len; /* Query Response Length */
+ uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+ uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */
+ uint8 fragment_id; /* Fragmentation ID */
+ uint16 cb_delay; /* GAS Comeback Delay */
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qresp_len; /* Query Response Length */
+ uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+ uint8 category; /* 0x04 Public Action Frame */
+ uint8 action; /* 0x6c Advertisement Protocol */
+ uint8 dialog_token; /* nonzero, identifies req/rsp transaction */
+ uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t
+ * or wifi_p2psd_gas_iresp_frame_t format
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/bcmdhd.101.10.361.x/include/packed_section_end.h b/bcmdhd.101.10.361.x/include/packed_section_end.h
new file mode 100755
index 0000000..fcdad85
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/packed_section_end.h
@@ -0,0 +1,62 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is NOT defined at this
+ * point, then there is a missing include of packed_section_start.h.
+ */
+#ifdef BWL_PACKED_SECTION
+ #undef BWL_PACKED_SECTION
+#else
+ #error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4103)
+#pragma pack(pop)
+#endif
+
+#if defined(__GNUC__) && defined(EFI)
+#pragma pack(pop)
+#endif
+
+/* Compiler-specific directives for structure packing are declared in
+ * packed_section_start.h. This marks the end of the structure packing section,
+ * so, undef them here.
+ */
+#undef BWL_PRE_PACKED_STRUCT
+#undef BWL_POST_PACKED_STRUCT
diff --git a/bcmdhd.101.10.361.x/include/packed_section_start.h b/bcmdhd.101.10.361.x/include/packed_section_start.h
new file mode 100755
index 0000000..d6c35a2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/packed_section_start.h
@@ -0,0 +1,117 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/* EFI does not support STATIC_ASSERT */
+#if defined(EFI)
+#define _alignment_test_
+#endif /* EFI */
+
+#ifndef _alignment_test_
+#define _alignment_test_
+
+/* ASSERT default packing */
+typedef struct T4 {
+ uint8 a;
+ uint32 b;
+ uint16 c;
+ uint8 d;
+} T4_t;
+
+/* 4 byte alignment support */
+/*
+* a . . .
+* b b b b
+* c c d .
+*/
+
+/*
+ * Below function is meant to verify that this file is compiled with the default alignment of 4.
+ * Function will fail to compile if the condition is not met.
+ */
+#ifdef __GNUC__
+#define VARIABLE_IS_NOT_USED __attribute__ ((unused))
+#else
+#define VARIABLE_IS_NOT_USED
+#endif
+static void alignment_test(void);
+static void
+VARIABLE_IS_NOT_USED alignment_test(void)
+{
+ /* verify 4 byte alignment support */
+ STATIC_ASSERT(sizeof(T4_t) == 12);
+}
+#endif /* _alignment_test_ */
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is already defined at this
+ * point, then there is a missing include of packed_section_end.h.
+ */
+#ifdef BWL_PACKED_SECTION
+ #error "BWL_PACKED_SECTION is already defined!"
+#else
+ #define BWL_PACKED_SECTION
+#endif
+
+#if defined(BWL_DEFAULT_PACKING)
+ /* generate an error if BWL_DEFAULT_PACKING is defined */
+ #error "BWL_DEFAULT_PACKING not supported any more."
+#endif /* BWL_PACKED_SECTION */
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4103)
+#pragma pack(push)
+#pragma pack(1)
+#endif
+
+#if defined(__GNUC__) && defined(EFI)
+#pragma pack(push)
+#pragma pack(1)
+#endif
+
+/* Declare compiler-specific directives for structure packing. */
+#if defined(_MSC_VER)
+ #define BWL_PRE_PACKED_STRUCT
+ #define BWL_POST_PACKED_STRUCT
+#elif defined(__GNUC__) || defined(__lint)
+ #define BWL_PRE_PACKED_STRUCT
+ #define BWL_POST_PACKED_STRUCT __attribute__ ((packed))
+#elif defined(__CC_ARM)
+ #define BWL_PRE_PACKED_STRUCT __packed
+ #define BWL_POST_PACKED_STRUCT
+#else
+ #error "Unknown compiler!"
+#endif
diff --git a/bcmdhd.101.10.361.x/include/pcicfg.h b/bcmdhd.101.10.361.x/include/pcicfg.h
new file mode 100755
index 0000000..663be79
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/pcicfg.h
@@ -0,0 +1,730 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _h_pcicfg_
+#define _h_pcicfg_
+
+/* The following inside ifndef's so we don't collide with NTDDK.H */
+#ifndef PCI_MAX_BUS
+#define PCI_MAX_BUS 0x100
+#endif
+#ifndef PCI_MAX_DEVICES
+#define PCI_MAX_DEVICES 0x20
+#endif
+#ifndef PCI_MAX_FUNCTION
+#define PCI_MAX_FUNCTION 0x8
+#endif
+
+#ifndef PCI_INVALID_VENDORID
+#define PCI_INVALID_VENDORID 0xffff
+#endif
+#ifndef PCI_INVALID_DEVICEID
+#define PCI_INVALID_DEVICEID 0xffff
+#endif
+
+/* Convert between bus-slot-function-register and config addresses */
+
+#define PCICFG_BUS_SHIFT 16 /* Bus shift */
+#define PCICFG_SLOT_SHIFT 11 /* Slot shift */
+#define PCICFG_FUN_SHIFT 8 /* Function shift */
+#define PCICFG_OFF_SHIFT 0 /* Register shift */
+
+#define PCICFG_BUS_MASK 0xff /* Bus mask */
+#define PCICFG_SLOT_MASK 0x1f /* Slot mask */
+#define PCICFG_FUN_MASK 7 /* Function mask */
+#define PCICFG_OFF_MASK 0xff /* Bus mask */
+
+#define PCI_CONFIG_ADDR(b, s, f, o) \
+ ((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT) \
+ | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT) \
+ | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT) \
+ | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT))
+
+#define PCI_CONFIG_BUS(a) (((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK)
+#define PCI_CONFIG_SLOT(a) (((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK)
+#define PCI_CONFIG_FUN(a) (((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK)
+#define PCI_CONFIG_OFF(a) (((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK)
+
+/* PCIE Config space accessing MACROS */
+
+#define PCIECFG_BUS_SHIFT 24 /* Bus shift */
+#define PCIECFG_SLOT_SHIFT 19 /* Slot/Device shift */
+#define PCIECFG_FUN_SHIFT 16 /* Function shift */
+#define PCIECFG_OFF_SHIFT 0 /* Register shift */
+
+#define PCIECFG_BUS_MASK 0xff /* Bus mask */
+#define PCIECFG_SLOT_MASK 0x1f /* Slot/Device mask */
+#define PCIECFG_FUN_MASK 7 /* Function mask */
+#define PCIECFG_OFF_MASK 0xfff /* Register mask */
+
+#define PCIE_CONFIG_ADDR(b, s, f, o) \
+ ((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT) \
+ | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT) \
+ | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT) \
+ | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT))
+
+#define PCIE_CONFIG_BUS(a) (((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK)
+#define PCIE_CONFIG_SLOT(a) (((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK)
+#define PCIE_CONFIG_FUN(a) (((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK)
+#define PCIE_CONFIG_OFF(a) (((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK)
+
+/* The actual config space */
+
+#define PCI_BAR_MAX 6
+
+#define PCI_ROM_BAR 8
+
+#define PCR_RSVDA_MAX 2
+
+/* Bits in PCI bars' flags */
+
+#define PCIBAR_FLAGS 0xf
+#define PCIBAR_IO 0x1
+#define PCIBAR_MEM1M 0x2
+#define PCIBAR_MEM64 0x4
+#define PCIBAR_PREFETCH 0x8
+#define PCIBAR_MEM32_MASK 0xFFFFFF80
+
+typedef struct _pci_config_regs {
+ uint16 vendor;
+ uint16 device;
+ uint16 command;
+ uint16 status;
+ uint8 rev_id;
+ uint8 prog_if;
+ uint8 sub_class;
+ uint8 base_class;
+ uint8 cache_line_size;
+ uint8 latency_timer;
+ uint8 header_type;
+ uint8 bist;
+ uint32 base[PCI_BAR_MAX];
+ uint32 cardbus_cis;
+ uint16 subsys_vendor;
+ uint16 subsys_id;
+ uint32 baserom;
+ uint32 rsvd_a[PCR_RSVDA_MAX];
+ uint8 int_line;
+ uint8 int_pin;
+ uint8 min_gnt;
+ uint8 max_lat;
+ uint8 dev_dep[192];
+} pci_config_regs;
+
+#define SZPCR (sizeof (pci_config_regs))
+#define MINSZPCR 64 /* offsetof (dev_dep[0] */
+
+/* pci config status reg has a bit to indicate that capability ptr is present */
+
+#define PCI_CAPPTR_PRESENT 0x0010
+
+/* A structure for the config registers is nice, but in most
+ * systems the config space is not memory mapped, so we need
+ * field offsetts. :-(
+ */
+#define PCI_CFG_VID 0
+#define PCI_CFG_DID 2
+#define PCI_CFG_CMD 4
+#define PCI_CFG_STAT 6
+#define PCI_CFG_REV 8
+#define PCI_CFG_PROGIF 9
+#define PCI_CFG_SUBCL 0xa
+#define PCI_CFG_BASECL 0xb
+#define PCI_CFG_CLSZ 0xc
+#define PCI_CFG_LATTIM 0xd
+#define PCI_CFG_HDR 0xe
+#define PCI_CFG_BIST 0xf
+#define PCI_CFG_BAR0 0x10
+#define PCI_CFG_BAR1 0x18
+#define PCI_CFG_BAR2 0x20
+#define PCI_CFG_CIS 0x28
+#define PCI_CFG_SVID 0x2c
+#define PCI_CFG_SSID 0x2e
+#define PCI_CFG_ROMBAR 0x30
+#define PCI_CFG_CAPPTR 0x34
+#define PCI_CFG_INT 0x3c
+#define PCI_CFG_PIN 0x3d
+#define PCI_CFG_MINGNT 0x3e
+#define PCI_CFG_MAXLAT 0x3f
+#define PCI_CFG_DEVCTRL 0xd8
+#define PCI_CFG_TLCNTRL_5 0x814
+#define PCI_CFG_ERRATTN_MASK_FN0 0x8a0
+#define PCI_CFG_ERRATTN_STATUS_FN0 0x8a4
+#define PCI_CFG_ERRATTN_MASK_FN1 0x8a8
+#define PCI_CFG_ERRATTN_STATUS_FN1 0x8ac
+#define PCI_CFG_ERRATTN_MASK_CMN 0x8b0
+#define PCI_CFG_ERRATTN_STATUS_CMN 0x8b4
+
+#ifdef EFI
+#undef PCI_CLASS_BRIDGE
+#undef PCI_CLASS_OLD
+#undef PCI_CLASS_DISPLAY
+#undef PCI_CLASS_SERIAL
+#undef PCI_CLASS_SATELLITE
+#endif /* EFI */
+
+/* Classes and subclasses */
+
+typedef enum {
+ PCI_CLASS_OLD = 0,
+ PCI_CLASS_DASDI,
+ PCI_CLASS_NET,
+ PCI_CLASS_DISPLAY,
+ PCI_CLASS_MMEDIA,
+ PCI_CLASS_MEMORY,
+ PCI_CLASS_BRIDGE,
+ PCI_CLASS_COMM,
+ PCI_CLASS_BASE,
+ PCI_CLASS_INPUT,
+ PCI_CLASS_DOCK,
+ PCI_CLASS_CPU,
+ PCI_CLASS_SERIAL,
+ PCI_CLASS_INTELLIGENT = 0xe,
+ PCI_CLASS_SATELLITE,
+ PCI_CLASS_CRYPT,
+ PCI_CLASS_DSP,
+ PCI_CLASS_XOR = 0xfe
+} pci_classes;
+
+typedef enum {
+ PCI_DASDI_SCSI,
+ PCI_DASDI_IDE,
+ PCI_DASDI_FLOPPY,
+ PCI_DASDI_IPI,
+ PCI_DASDI_RAID,
+ PCI_DASDI_OTHER = 0x80
+} pci_dasdi_subclasses;
+
+typedef enum {
+ PCI_NET_ETHER,
+ PCI_NET_TOKEN,
+ PCI_NET_FDDI,
+ PCI_NET_ATM,
+ PCI_NET_OTHER = 0x80
+} pci_net_subclasses;
+
+typedef enum {
+ PCI_DISPLAY_VGA,
+ PCI_DISPLAY_XGA,
+ PCI_DISPLAY_3D,
+ PCI_DISPLAY_OTHER = 0x80
+} pci_display_subclasses;
+
+typedef enum {
+ PCI_MMEDIA_VIDEO,
+ PCI_MMEDIA_AUDIO,
+ PCI_MMEDIA_PHONE,
+ PCI_MEDIA_OTHER = 0x80
+} pci_mmedia_subclasses;
+
+typedef enum {
+ PCI_MEMORY_RAM,
+ PCI_MEMORY_FLASH,
+ PCI_MEMORY_OTHER = 0x80
+} pci_memory_subclasses;
+
+typedef enum {
+ PCI_BRIDGE_HOST,
+ PCI_BRIDGE_ISA,
+ PCI_BRIDGE_EISA,
+ PCI_BRIDGE_MC,
+ PCI_BRIDGE_PCI,
+ PCI_BRIDGE_PCMCIA,
+ PCI_BRIDGE_NUBUS,
+ PCI_BRIDGE_CARDBUS,
+ PCI_BRIDGE_RACEWAY,
+ PCI_BRIDGE_OTHER = 0x80
+} pci_bridge_subclasses;
+
+typedef enum {
+ PCI_COMM_UART,
+ PCI_COMM_PARALLEL,
+ PCI_COMM_MULTIUART,
+ PCI_COMM_MODEM,
+ PCI_COMM_OTHER = 0x80
+} pci_comm_subclasses;
+
+typedef enum {
+ PCI_BASE_PIC,
+ PCI_BASE_DMA,
+ PCI_BASE_TIMER,
+ PCI_BASE_RTC,
+ PCI_BASE_PCI_HOTPLUG,
+ PCI_BASE_OTHER = 0x80
+} pci_base_subclasses;
+
+typedef enum {
+ PCI_INPUT_KBD,
+ PCI_INPUT_PEN,
+ PCI_INPUT_MOUSE,
+ PCI_INPUT_SCANNER,
+ PCI_INPUT_GAMEPORT,
+ PCI_INPUT_OTHER = 0x80
+} pci_input_subclasses;
+
+typedef enum {
+ PCI_DOCK_GENERIC,
+ PCI_DOCK_OTHER = 0x80
+} pci_dock_subclasses;
+
+typedef enum {
+ PCI_CPU_386,
+ PCI_CPU_486,
+ PCI_CPU_PENTIUM,
+ PCI_CPU_ALPHA = 0x10,
+ PCI_CPU_POWERPC = 0x20,
+ PCI_CPU_MIPS = 0x30,
+ PCI_CPU_COPROC = 0x40,
+ PCI_CPU_OTHER = 0x80
+} pci_cpu_subclasses;
+
+typedef enum {
+ PCI_SERIAL_IEEE1394,
+ PCI_SERIAL_ACCESS,
+ PCI_SERIAL_SSA,
+ PCI_SERIAL_USB,
+ PCI_SERIAL_FIBER,
+ PCI_SERIAL_SMBUS,
+ PCI_SERIAL_OTHER = 0x80
+} pci_serial_subclasses;
+
+typedef enum {
+ PCI_INTELLIGENT_I2O
+} pci_intelligent_subclasses;
+
+typedef enum {
+ PCI_SATELLITE_TV,
+ PCI_SATELLITE_AUDIO,
+ PCI_SATELLITE_VOICE,
+ PCI_SATELLITE_DATA,
+ PCI_SATELLITE_OTHER = 0x80
+} pci_satellite_subclasses;
+
+typedef enum {
+ PCI_CRYPT_NETWORK,
+ PCI_CRYPT_ENTERTAINMENT,
+ PCI_CRYPT_OTHER = 0x80
+} pci_crypt_subclasses;
+
+typedef enum {
+ PCI_DSP_DPIO,
+ PCI_DSP_OTHER = 0x80
+} pci_dsp_subclasses;
+
+typedef enum {
+ PCI_XOR_QDMA,
+ PCI_XOR_OTHER = 0x80
+} pci_xor_subclasses;
+
+/* Overlay for a PCI-to-PCI bridge */
+
+#define PPB_RSVDA_MAX 2
+#define PPB_RSVDD_MAX 8
+
+typedef struct _ppb_config_regs {
+ uint16 vendor;
+ uint16 device;
+ uint16 command;
+ uint16 status;
+ uint8 rev_id;
+ uint8 prog_if;
+ uint8 sub_class;
+ uint8 base_class;
+ uint8 cache_line_size;
+ uint8 latency_timer;
+ uint8 header_type;
+ uint8 bist;
+ uint32 rsvd_a[PPB_RSVDA_MAX];
+ uint8 prim_bus;
+ uint8 sec_bus;
+ uint8 sub_bus;
+ uint8 sec_lat;
+ uint8 io_base;
+ uint8 io_lim;
+ uint16 sec_status;
+ uint16 mem_base;
+ uint16 mem_lim;
+ uint16 pf_mem_base;
+ uint16 pf_mem_lim;
+ uint32 pf_mem_base_hi;
+ uint32 pf_mem_lim_hi;
+ uint16 io_base_hi;
+ uint16 io_lim_hi;
+ uint16 subsys_vendor;
+ uint16 subsys_id;
+ uint32 rsvd_b;
+ uint8 rsvd_c;
+ uint8 int_pin;
+ uint16 bridge_ctrl;
+ uint8 chip_ctrl;
+ uint8 diag_ctrl;
+ uint16 arb_ctrl;
+ uint32 rsvd_d[PPB_RSVDD_MAX];
+ uint8 dev_dep[192];
+} ppb_config_regs;
+
+/* Everything below is BRCM HND proprietary */
+
+/* Brcm PCI configuration registers */
+#define cap_list rsvd_a[0]
+#define bar0_window dev_dep[0x80 - 0x40]
+#define bar1_window dev_dep[0x84 - 0x40]
+#define sprom_control dev_dep[0x88 - 0x40]
+
+/* PCI CAPABILITY DEFINES */
+#define PCI_CAP_POWERMGMTCAP_ID 0x01
+#define PCI_CAP_MSICAP_ID 0x05
+#define PCI_CAP_VENDSPEC_ID 0x09
+#define PCI_CAP_PCIECAP_ID 0x10
+#define PCI_CAP_MSIXCAP_ID 0x11
+
+/* Data structure to define the Message Signalled Interrupt facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_msi {
+ uint8 capID;
+ uint8 nextptr;
+ uint16 msgctrl;
+ uint32 msgaddr;
+} pciconfig_cap_msi;
+#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */
+
+/* Data structure to define the Power managment facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_pwrmgmt {
+ uint8 capID;
+ uint8 nextptr;
+ uint16 pme_cap;
+ uint16 pme_sts_ctrl;
+ uint8 pme_bridge_ext;
+ uint8 data;
+} pciconfig_cap_pwrmgmt;
+
+#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */
+#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */
+#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */
+#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */
+
+/* Data structure to define the PCIE capability */
+typedef struct _pciconfig_cap_pcie {
+ uint8 capID;
+ uint8 nextptr;
+ uint16 pcie_cap;
+ uint32 dev_cap;
+ uint16 dev_ctrl;
+ uint16 dev_status;
+ uint32 link_cap;
+ uint16 link_ctrl;
+ uint16 link_status;
+ uint32 slot_cap;
+ uint16 slot_ctrl;
+ uint16 slot_status;
+ uint16 root_ctrl;
+ uint16 root_cap;
+ uint32 root_status;
+} pciconfig_cap_pcie;
+
+/* PCIE Enhanced CAPABILITY DEFINES */
+#define PCIE_EXTCFG_OFFSET 0x100
+#define PCIE_ADVERRREP_CAPID 0x0001
+#define PCIE_VC_CAPID 0x0002
+#define PCIE_DEVSNUM_CAPID 0x0003
+#define PCIE_PWRBUDGET_CAPID 0x0004
+
+/* PCIE Extended configuration */
+#define PCIE_ADV_CORR_ERR_MASK 0x114
+#define PCIE_ADV_CORR_ERR_MASK_OFFSET 0x14
+#define CORR_ERR_RE (1 << 0) /* Receiver */
+#define CORR_ERR_BT (1 << 6) /* Bad TLP */
+#define CORR_ERR_BD (1 << 7) /* Bad DLLP */
+#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */
+#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */
+#define CORR_ERR_AE (1 << 13) /* Adviosry Non-Fital Error Mask */
+#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
+ CORR_ERR_RR | CORR_ERR_RT)
+
+/* PCIE Root Control Register bits (Host mode only) */
+#define PCIE_RC_CORR_SERR_EN 0x0001
+#define PCIE_RC_NONFATAL_SERR_EN 0x0002
+#define PCIE_RC_FATAL_SERR_EN 0x0004
+#define PCIE_RC_PME_INT_EN 0x0008
+#define PCIE_RC_CRS_EN 0x0010
+
+/* PCIE Root Capability Register bits (Host mode only) */
+#define PCIE_RC_CRS_VISIBILITY 0x0001
+
+/* PCIe PMCSR Register bits */
+#define PCIE_PMCSR_PMESTAT 0x8000
+
+/* Header to define the PCIE specific capabilities in the extended config space */
+typedef struct _pcie_enhanced_caphdr {
+ uint16 capID;
+ uint16 cap_ver : 4;
+ uint16 next_ptr : 12;
+} pcie_enhanced_caphdr;
+
+#define PCIE_CFG_PMCSR 0x4C
+#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */
+#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */
+#define PCI_SPROM_CONTROL 0x88 /* sprom property control */
+#define PCIE_CFG_SUBSYSTEM_CONTROL 0x88 /* used as subsystem control in PCIE devices */
+#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */
+#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */
+#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */
+#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */
+#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */
+#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */
+#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */
+#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */
+#define PCIE_CFG_DEVICE_CAPABILITY 0xb0 /* used as device capability in PCIE devices */
+#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */
+#define PCIE_CFG_DEVICE_CONTROL 0xb4 /* 0xb4 is used as device control in PCIE devices */
+#define PCIE_DC_AER_CORR_EN (1u << 0u)
+#define PCIE_DC_AER_NON_FATAL_EN (1u << 1u)
+#define PCIE_DC_AER_FATAL_EN (1u << 2u)
+#define PCIE_DC_AER_UNSUP_EN (1u << 3u)
+
+#define PCI_BAR0_WIN2_OFFSET 0x1000u
+#define PCIE2_BAR0_CORE2_WIN2_OFFSET 0x5000u
+
+#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */
+#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */
+
+/* Private Registers */
+#define PCI_STAT_CTRL 0xa80
+#define PCI_L0_EVENTCNT 0xa84
+#define PCI_L0_STATETMR 0xa88
+#define PCI_L1_EVENTCNT 0xa8c
+#define PCI_L1_STATETMR 0xa90
+#define PCI_L1_1_EVENTCNT 0xa94
+#define PCI_L1_1_STATETMR 0xa98
+#define PCI_L1_2_EVENTCNT 0xa9c
+#define PCI_L1_2_STATETMR 0xaa0
+#define PCI_L2_EVENTCNT 0xaa4
+#define PCI_L2_STATETMR 0xaa8
+
+#define PCI_LINK_STATUS 0x4dc
+#define PCI_LINK_SPEED_MASK (15u << 0u)
+#define PCI_LINK_SPEED_SHIFT (0)
+#define PCIE_LNK_SPEED_GEN1 0x1
+#define PCIE_LNK_SPEED_GEN2 0x2
+#define PCIE_LNK_SPEED_GEN3 0x3
+
+#define PCI_PL_SPARE 0x1808 /* Config to Increase external clkreq deasserted minimum time */
+#define PCI_CONFIG_EXT_CLK_MIN_TIME_MASK (1u << 31u)
+#define PCI_CONFIG_EXT_CLK_MIN_TIME_SHIFT (31)
+
+#define PCI_ADV_ERR_CAP 0x100
+#define PCI_UC_ERR_STATUS 0x104
+#define PCI_UNCORR_ERR_MASK 0x108
+#define PCI_UCORR_ERR_SEVR 0x10c
+#define PCI_CORR_ERR_STATUS 0x110
+#define PCI_CORR_ERR_MASK 0x114
+#define PCI_ERR_CAP_CTRL 0x118
+#define PCI_TLP_HDR_LOG1 0x11c
+#define PCI_TLP_HDR_LOG2 0x120
+#define PCI_TLP_HDR_LOG3 0x124
+#define PCI_TLP_HDR_LOG4 0x128
+#define PCI_TL_CTRL_5 0x814
+#define PCI_TL_HDR_FC_ST 0x980
+#define PCI_TL_TGT_CRDT_ST 0x990
+#define PCI_TL_SMLOGIC_ST 0x998
+#define PCI_DL_ATTN_VEC 0x1040
+#define PCI_DL_STATUS 0x1048
+
+#define PCI_PHY_CTL_0 0x1800
+#define PCI_SLOW_PMCLK_EXT_RLOCK (1 << 7)
+#define PCI_REG_TX_DEEMPH_3_5_DB (1 << 21)
+
+#define PCI_LINK_STATE_DEBUG 0x1c24
+#define PCI_RECOVERY_HIST 0x1ce4
+#define PCI_PHY_LTSSM_HIST_0 0x1cec
+#define PCI_PHY_LTSSM_HIST_1 0x1cf0
+#define PCI_PHY_LTSSM_HIST_2 0x1cf4
+#define PCI_PHY_LTSSM_HIST_3 0x1cf8
+#define PCI_PHY_DBG_CLKREG_0 0x1e10
+#define PCI_PHY_DBG_CLKREG_1 0x1e14
+#define PCI_PHY_DBG_CLKREG_2 0x1e18
+#define PCI_PHY_DBG_CLKREG_3 0x1e1c
+
+#define PCI_TL_CTRL_0 0x800u
+#define PCI_BEACON_DIS (1u << 20u) /* Disable Beacon Generation */
+
+/* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */
+#define PCIE_BAR1COHERENTACCEN_BIT 8
+#define PCIE_BAR2COHERENTACCEN_BIT 9
+#define PCIE_SSRESET_STATUS_BIT 13
+#define PCIE_SSRESET_DISABLE_BIT 14
+#define PCIE_SSRESET_DIS_ENUM_RST_BIT 15
+
+#define PCIE_BARCOHERENTACCEN_MASK 0x300
+
+/* Bit settings for PCI_UC_ERR_STATUS register */
+#define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */
+#define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */
+#define PCI_UC_ERR_MTLPS (1 << 18) /* Malformed TLP Status */
+#define PCI_UC_ERR_ROS (1 << 17) /* Receiver Overflow Status */
+#define PCI_UC_ERR_UCS (1 << 16) /* Unexpected Completion Status */
+#define PCI_UC_ERR_CAS (1 << 15) /* Completer Abort Status */
+#define PCI_UC_ERR_CTS (1 << 14) /* Completer Timeout Status */
+#define PCI_UC_ERR_FCPES (1 << 13) /* Flow Control Protocol Error Status */
+#define PCI_UC_ERR_PTLPS (1 << 12) /* Poisoned TLP Status */
+#define PCI_UC_ERR_DLPES (1 << 4) /* Data Link Protocol Error Status */
+
+#define PCI_DL_STATUS_PHY_LINKUP (1 << 13) /* Status of LINK */
+
+#define PCI_PMCR_REFUP 0x1814 /* Trefup time */
+#define PCI_PMCR_TREFUP_LO_MASK 0x3f
+#define PCI_PMCR_TREFUP_LO_SHIFT 24
+#define PCI_PMCR_TREFUP_LO_BITS 6
+#define PCI_PMCR_TREFUP_HI_MASK 0xf
+#define PCI_PMCR_TREFUP_HI_SHIFT 5
+#define PCI_PMCR_TREFUP_HI_BITS 4
+#define PCI_PMCR_TREFUP_MAX 0x400
+#define PCI_PMCR_TREFUP_MAX_SCALE 0x2000
+
+#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */
+#define PCI_PMCR_TREFUP_EXT_SHIFT 22
+#define PCI_PMCR_TREFUP_EXT_SCALE 3
+#define PCI_PMCR_TREFUP_EXT_ON 1
+#define PCI_PMCR_TREFUP_EXT_OFF 0
+
+#define PCI_TPOWER_SCALE_MASK 0x3
+#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */
+
+#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */
+#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the
+ * 8KB window, so their address is the "regular"
+ * address plus 4K
+ */
+/*
+ * PCIE GEN2 changed some of the above locations for
+ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase
+ * BAR0 maps 32K of register space
+*/
+#define PCIE2_BAR0_WIN2 0x70 /* config register to map 2nd 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN 0x74 /* config register to map 5th 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN2 0x78 /* config register to map 6th 4KB of BAR0 */
+
+/* PCIE GEN2 BAR0 window size */
+#define PCIE2_BAR0_WINSZ 0x8000
+
+#define PCI_BAR0_WIN2_OFFSET 0x1000u
+#define PCI_CORE_ENUM_OFFSET 0x2000u
+#define PCI_CC_CORE_ENUM_OFFSET 0x3000u
+#define PCI_SEC_BAR0_WIN_OFFSET 0x4000u
+#define PCI_SEC_BAR0_WRAP_OFFSET 0x5000u
+#define PCI_CORE_ENUM2_OFFSET 0x6000u
+#define PCI_CC_CORE_ENUM2_OFFSET 0x7000u
+#define PCI_TER_BAR0_WIN_OFFSET 0x9000u
+#define PCI_TER_BAR0_WRAP_OFFSET 0xa000u
+
+#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */
+#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */
+#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */
+
+/* On AI chips we have a second window to map DMP regs are mapped: */
+#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */
+
+/* PCI_INT_STATUS */
+#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */
+
+/* PCI_INT_MASK */
+#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */
+#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */
+#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */
+#define PCI_CTO_INT_SHIFT 16 /* backplane SBErr interrupt mask */
+#define PCI_CTO_INT_MASK (1 << PCI_CTO_INT_SHIFT) /* backplane SBErr interrupt mask */
+
+/* PCI_SPROM_CONTROL */
+#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */
+#define SPROM_LOCKED 0x08 /* SPROM Locked */
+#define SPROM_BLANK 0x04 /* indicating a blank SPROM */
+#define SPROM_WRITEEN 0x10 /* SPROM write enable */
+#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */
+#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */
+#define SPROM_OTPIN_USE 0x80 /* device OTP In use */
+#define SPROM_BAR1_COHERENT_ACC_EN 0x100 /* PCIe acceeses through BAR1 are coherent */
+#define SPROM_BAR2_COHERENT_ACC_EN 0x200 /* PCIe acceeses through BAR2 are coherent */
+#define SPROM_CFG_TO_SB_RST 0x400 /* backplane reset */
+
+/* Bits in PCI command and status regs */
+#define PCI_CMD_IO 0x00000001 /* I/O enable */
+#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */
+#define PCI_CMD_MASTER 0x00000004 /* Master enable */
+#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */
+#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */
+#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */
+#define PCI_STAT_TA 0x08000000 /* target abort status */
+
+/* Header types */
+#define PCI_HEADER_MULTI 0x80
+#define PCI_HEADER_MASK 0x7f
+typedef enum {
+ PCI_HEADER_NORMAL,
+ PCI_HEADER_BRIDGE,
+ PCI_HEADER_CARDBUS
+} pci_header_types;
+
+#define PCI_CONFIG_SPACE_SIZE 256
+
+#define DWORD_ALIGN(x) ((x) & ~(0x03))
+#define BYTE_POS(x) ((x) & 0x3)
+#define WORD_POS(x) ((x) & 0x1)
+
+#define BYTE_SHIFT(x) (8 * BYTE_POS(x))
+#define WORD_SHIFT(x) (16 * WORD_POS(x))
+
+#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
+#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
+
+#define read_pci_cfg_byte(a) \
+ BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a)
+
+#define read_pci_cfg_word(a) \
+ WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a)
+
+#define write_pci_cfg_byte(a, val) do { \
+ uint32 tmpval; \
+ tmpval = OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4); \
+ tmpval &= ~(0xFF << BYTE_SHIFT(a)); \
+ tmpval |= ((uint8)(val)) << BYTE_SHIFT(a); \
+ OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+ } while (0)
+
+#define write_pci_cfg_word(a, val) do { \
+ uint32 tmpval; \
+ tmpval = OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4); \
+ tmpval &= ~(0xFFFF << WORD_SHIFT(a))); \
+ tmpval |= ((uint16)(val)) << WORD_SHIFT(a); \
+ OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+ } while (0)
+
+#endif /* _h_pcicfg_ */
diff --git a/bcmdhd.101.10.361.x/include/pcie_core.h b/bcmdhd.101.10.361.x/include/pcie_core.h
new file mode 100755
index 0000000..80bc4c2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/pcie_core.h
@@ -0,0 +1,1485 @@
+/*
+ * BCM43XX PCIE core hardware definitions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _PCIE_CORE_H
+#define _PCIE_CORE_H
+
+#include <sbhnddma.h>
+#include <siutils.h>
+
+#define REV_GE_73(rev) (PCIECOREREV((rev)) >= 73)
+#define REV_GE_69(rev) (PCIECOREREV((rev)) >= 69)
+#define REV_GE_68(rev) (PCIECOREREV((rev)) >= 68)
+#define REV_GE_64(rev) (PCIECOREREV((rev)) >= 64)
+#define REV_GE_15(rev) (PCIECOREREV((rev)) >= 15)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+/* PCIE Enumeration space offsets */
+#define PCIE_CORE_CONFIG_OFFSET 0x0
+#define PCIE_FUNC0_CONFIG_OFFSET 0x400
+#define PCIE_FUNC1_CONFIG_OFFSET 0x500
+#define PCIE_FUNC2_CONFIG_OFFSET 0x600
+#define PCIE_FUNC3_CONFIG_OFFSET 0x700
+#define PCIE_SPROM_SHADOW_OFFSET 0x800
+#define PCIE_SBCONFIG_OFFSET 0xE00
+
+#define PCIEDEV_MAX_DMAS 4
+
+/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
+#define PCIE_DEV_BAR0_SIZE 0x4000
+#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0
+#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000
+#define PCIE_BAR0_PCIECORE_OFFSET 0x2000
+#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000
+
+/* different register spaces to access thr'u pcie indirect access */
+#define PCIE_CONFIGREGS 1 /* Access to config space */
+#define PCIE_PCIEREGS 2 /* Access to pcie registers */
+
+#define PCIEDEV_HOSTADDR_MAP_BASE 0x8000000
+#define PCIEDEV_HOSTADDR_MAP_WIN_MASK 0xFE000000
+
+#define PCIEDEV_TR0_WINDOW_START 0x08000000
+#define PCIEDEV_TR0_WINDOW_END 0x09FFFFFF
+
+#define PCIEDEV_TR1_WINDOW_START 0x0A000000
+#define PCIEDEV_TR1_WINDOW_END 0x0BFFFFFF
+
+#define PCIEDEV_TR2_WINDOW_START 0x0C000000
+#define PCIEDEV_TR2_WINDOW_END 0x0DFFFFFF
+
+#define PCIEDEV_TR3_WINDOW_START 0x0E000000
+#define PCIEDEV_TR3_WINDOW_END 0x0FFFFFFF
+
+#define PCIEDEV_TRANS_WIN_LEN 0x2000000
+#define PCIEDEV_ARM_ADDR_SPACE 0x0FFFFFFF
+
+/* PCIe translation windoes */
+#define PCIEDEV_TRANS_WIN_0 0
+#define PCIEDEV_TRANS_WIN_1 1
+#define PCIEDEV_TRANS_WIN_2 2
+#define PCIEDEV_TRANS_WIN_3 3
+
+#define PCIEDEV_ARM_ADDR(host_addr, win) \
+ (((host_addr) & 0x1FFFFFF) | ((win) << 25) | PCIEDEV_HOSTADDR_MAP_BASE)
+
+/* Current mapping of PCIe translation windows to SW features */
+
+#define PCIEDEV_TRANS_WIN_TRAP_HANDLER PCIEDEV_TRANS_WIN_0
+#define PCIEDEV_TRANS_WIN_HOSTMEM PCIEDEV_TRANS_WIN_1
+#define PCIEDEV_TRANS_WIN_SWPAGING PCIEDEV_TRANS_WIN_1
+#define PCIEDEV_TRANS_WIN_BT PCIEDEV_TRANS_WIN_2
+#define PCIEDEV_TRANS_WIN_FWTRACE PCIEDEV_TRANS_WIN_3
+
+/* dma regs to control the flow between host2dev and dev2host */
+typedef volatile struct pcie_devdmaregs {
+ dma64regs_t tx;
+ uint32 PAD[2];
+ dma64regs_t rx;
+ uint32 PAD[2];
+} pcie_devdmaregs_t;
+
+#define PCIE_DB_HOST2DEV_0 0x1
+#define PCIE_DB_HOST2DEV_1 0x2
+#define PCIE_DB_DEV2HOST_0 0x3
+#define PCIE_DB_DEV2HOST_1 0x4
+#define PCIE_DB_DEV2HOST1_0 0x5
+
+/* door bell register sets */
+typedef struct pcie_doorbell {
+ uint32 host2dev_0;
+ uint32 host2dev_1;
+ uint32 dev2host_0;
+ uint32 dev2host_1;
+} pcie_doorbell_t;
+
+/* Flow Ring Manager */
+#define IFRM_FR_IDX_MAX 256
+#define IFRM_FR_CONFIG_GID 2
+#define IFRM_FR_GID_MAX 4
+#define IFRM_FR_DEV_MAX 8
+#define IFRM_FR_TID_MAX 8
+#define IFRM_FR_DEV_VALID 2
+
+#define IFRM_VEC_REG_BITS 32
+
+#define IFRM_FR_PER_VECREG 4
+#define IFRM_FR_PER_VECREG_SHIFT 2
+#define IFRM_FR_PER_VECREG_MASK ((0x1 << IFRM_FR_PER_VECREG_SHIFT) - 1)
+
+#define IFRM_VEC_BITS_PER_FR (IFRM_VEC_REG_BITS/IFRM_FR_PER_VECREG)
+
+/* IFRM_DEV_0 : d11AC, IFRM_DEV_1 : d11AD */
+#define IFRM_DEV_0 0
+#define IFRM_DEV_1 1
+#define IHRM_FR_SW_MASK (1u << IFRM_DEV_0)
+#define IHRM_FR_HW_MASK (1u << IFRM_DEV_1)
+
+#define IFRM_FR_GID_0 0
+#define IFRM_FR_GID_1 1
+#define IFRM_FR_GID_2 2
+#define IFRM_FR_GID_3 3
+
+#define IFRM_TIDMASK 0xffffffff
+
+/* ifrm_ctrlst register */
+#define IFRM_EN (1<<0)
+#define IFRM_BUFF_INIT_DONE (1<<1)
+#define IFRM_COMPARE_EN0 (1<<4)
+#define IFRM_COMPARE_EN1 (1<<5)
+#define IFRM_COMPARE_EN2 (1<<6)
+#define IFRM_COMPARE_EN3 (1<<7)
+#define IFRM_INIT_DV0 (1<<8)
+#define IFRM_INIT_DV1 (1<<9)
+#define IFRM_INIT_DV2 (1<<10)
+#define IFRM_INIT_DV3 (1<<11)
+
+/* ifrm_msk_arr.addr, ifrm_tid_arr.addr register */
+#define IFRM_ADDR_SHIFT 0
+#define IFRM_FRG_ID_SHIFT 8
+
+/* ifrm_vec.diff_lat register */
+#define IFRM_DV_LAT (1<<0)
+#define IFRM_DV_LAT_DONE (1<<1)
+#define IFRM_SDV_OFFSET_SHIFT 4
+#define IFRM_SDV_FRGID_SHIFT 8
+#define IFRM_VECSTAT_MASK 0x3
+#define IFRM_VEC_MASK 0xff
+
+/* HMAP Windows */
+#define HMAP_MAX_WINDOWS 8
+
+/* idma frm array */
+typedef struct pcie_ifrm_array {
+ uint32 addr;
+ uint32 data;
+} pcie_ifrm_array_t;
+
+/* idma frm vector */
+typedef struct pcie_ifrm_vector {
+ uint32 diff_lat;
+ uint32 sav_tid;
+ uint32 sav_diff;
+ uint32 PAD[1];
+} pcie_ifrm_vector_t;
+
+/* idma frm interrupt */
+typedef struct pcie_ifrm_intr {
+ uint32 intstat;
+ uint32 intmask;
+} pcie_ifrm_intr_t;
+
+/* HMAP window register set */
+typedef volatile struct pcie_hmapwindow {
+ uint32 baseaddr_lo; /* BaseAddrLower */
+ uint32 baseaddr_hi; /* BaseAddrUpper */
+ uint32 windowlength; /* Window Length */
+ uint32 PAD[1];
+} pcie_hmapwindow_t;
+
+typedef struct pcie_hmapviolation {
+ uint32 hmap_violationaddr_lo; /* violating address lo */
+ uint32 hmap_violationaddr_hi; /* violating addr hi */
+ uint32 hmap_violation_info; /* violation info */
+ uint32 PAD[1];
+} pcie_hmapviolation_t;
+
+#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) || \
+ defined(ATE_BUILD) || defined(BCMDVFS)
+/* SB side: PCIE core and host control registers */
+typedef volatile struct sbpcieregs {
+ uint32 control; /* host mode only */
+ uint32 iocstatus; /* PCIE2: iostatus */
+ uint32 PAD[1];
+ uint32 biststatus; /* bist Status: 0x00C */
+ uint32 gpiosel; /* PCIE gpio sel: 0x010 */
+ uint32 gpioouten; /* PCIE gpio outen: 0x14 */
+ uint32 gpioout; /* PCIE gpio out: 0x18 */
+ uint32 PAD;
+ uint32 intstatus; /* Interrupt status: 0x20 */
+ uint32 intmask; /* Interrupt mask: 0x24 */
+ uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
+ uint32 obffcontrol; /* PCIE2: 0x2C */
+ uint32 obffintstatus; /* PCIE2: 0x30 */
+ uint32 obffdatastatus; /* PCIE2: 0x34 */
+ uint32 PAD[1];
+ uint32 ctoctrl; /* PCIE2: 0x3C */
+ uint32 errlog; /* PCIE2: 0x40 */
+ uint32 errlogaddr; /* PCIE2: 0x44 */
+ uint32 mailboxint; /* PCIE2: 0x48 */
+ uint32 mailboxintmsk; /* PCIE2: 0x4c */
+ uint32 ltrspacing; /* PCIE2: 0x50 */
+ uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */
+ uint32 msivectorassign; /* PCIE2: 0x58 */
+ uint32 intmask2; /* PCIE2: 0x5C */
+ uint32 PAD[40];
+ uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
+ uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
+ uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
+ uint32 sbtopcie0upper; /* sb to pcie translation 0: 0x10C */
+ uint32 sbtopcie1upper; /* sb to pcie translation 1: 0x110 */
+ uint32 PAD[3];
+
+ /* pcie core supports in direct access to config space */
+ uint32 configaddr; /* pcie config space access: Address field: 0x120 */
+ uint32 configdata; /* pcie config space access: Data field: 0x124 */
+ union {
+ struct {
+ /* mdio access to serdes */
+ uint32 mdiocontrol; /* controls the mdio access: 0x128 */
+ uint32 mdiodata; /* Data to the mdio access: 0x12c */
+ /* pcie protocol phy/dllp/tlp register indirect access mechanism */
+ uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */
+ uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
+ uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
+ uint32 PAD[177]; /* last 0x3FC */
+ /* 0x400 - 0x7FF, PCIE Cfg Space, note: not used anymore in PcieGen2 */
+ uint32 pciecfg[4][64];
+ } pcie1;
+ struct {
+ /* mdio access to serdes */
+ uint32 mdiocontrol; /* controls the mdio access: 0x128 */
+ uint32 mdiowrdata; /* write data to mdio 0x12C */
+ uint32 mdiorddata; /* read data to mdio 0x130 */
+ uint32 PAD[3]; /* 0x134-0x138-0x13c */
+ /* door bell registers available from gen2 rev5 onwards */
+ pcie_doorbell_t dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */
+ uint32 dataintf; /* 0x180 */
+ uint32 PAD[1]; /* 0x184 */
+ uint32 d2h_intrlazy_0; /* 0x188 */
+ uint32 h2d_intrlazy_0; /* 0x18c */
+ uint32 h2d_intstat_0; /* 0x190 */
+ uint32 h2d_intmask_0; /* 0x194 */
+ uint32 d2h_intstat_0; /* 0x198 */
+ uint32 d2h_intmask_0; /* 0x19c */
+ uint32 ltr_state; /* 0x1A0 */
+ uint32 pwr_int_status; /* 0x1A4 */
+ uint32 pwr_int_mask; /* 0x1A8 */
+ uint32 pme_source; /* 0x1AC */
+ uint32 err_hdr_logreg1; /* 0x1B0 */
+ uint32 err_hdr_logreg2; /* 0x1B4 */
+ uint32 err_hdr_logreg3; /* 0x1B8 */
+ uint32 err_hdr_logreg4; /* 0x1BC */
+ uint32 err_code_logreg; /* 0x1C0 */
+ uint32 axi_dbg_ctl; /* 0x1C4 */
+ uint32 axi_dbg_data0; /* 0x1C8 */
+ uint32 axi_dbg_data1; /* 0x1CC */
+ uint32 PAD[4]; /* 0x1D0 - 0x1DF */
+ uint32 clk_ctl_st; /* 0x1E0 */
+ uint32 PAD[1]; /* 0x1E4 */
+ uint32 powerctl; /* 0x1E8 */
+ uint32 powerctl2; /* 0x1EC */
+ uint32 PAD[4]; /* 0x1F0 - 0x1FF */
+ pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */
+ pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */
+ pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */
+ pcie_devdmaregs_t d2h1_dmaregs; /* 0x2c0 - 0x2fc */
+ pcie_devdmaregs_t h2d2_dmaregs; /* 0x300 - 0x33c */
+ pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */
+ pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */
+ pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */
+ uint32 d2h_intrlazy_1; /* 0x400 */
+ uint32 h2d_intrlazy_1; /* 0x404 */
+ uint32 h2d_intstat_1; /* 0x408 */
+ uint32 h2d_intmask_1; /* 0x40c */
+ uint32 d2h_intstat_1; /* 0x410 */
+ uint32 d2h_intmask_1; /* 0x414 */
+ uint32 PAD[2]; /* 0x418 - 0x41C */
+ uint32 d2h_intrlazy_2; /* 0x420 */
+ uint32 h2d_intrlazy_2; /* 0x424 */
+ uint32 h2d_intstat_2; /* 0x428 */
+ uint32 h2d_intmask_2; /* 0x42c */
+ uint32 d2h_intstat_2; /* 0x430 */
+ uint32 d2h_intmask_2; /* 0x434 */
+ uint32 PAD[10]; /* 0x438 - 0x45F */
+ uint32 ifrm_ctrlst; /* 0x460 */
+ uint32 PAD[1]; /* 0x464 */
+ pcie_ifrm_array_t ifrm_msk_arr; /* 0x468 - 0x46F */
+ pcie_ifrm_array_t ifrm_tid_arr[IFRM_FR_DEV_VALID];
+ /* 0x470 - 0x47F */
+ pcie_ifrm_vector_t ifrm_vec[IFRM_FR_DEV_MAX];
+ /* 0x480 - 0x4FF */
+ pcie_ifrm_intr_t ifrm_intr[IFRM_FR_DEV_MAX];
+ /* 0x500 - 0x53F */
+ /* HMAP regs for PCIE corerev >= 24 [0x540 - 0x5DF] */
+ pcie_hmapwindow_t hmapwindow[HMAP_MAX_WINDOWS]; /* 0x540 - 0x5BF */
+ pcie_hmapviolation_t hmapviolation; /* 0x5C0 - 0x5CF */
+ uint32 hmap_window_config; /* 0x5D0 */
+ uint32 PAD[3]; /* 0x5D4 - 0x5DF */
+ uint32 idma_hwa_status; /* 0x5E0 */
+ uint32 PAD[7]; /* 0x5E4 - 0x5FF */
+ uint32 PAD[2][64]; /* 0x600 - 0x7FF */
+ } pcie2;
+ } u;
+ uint16 sprom[64]; /* SPROM shadow Area : 0x800 - 0x880 */
+ uint32 PAD[96]; /* 0x880 - 0x9FF */
+ /* direct memory access (pcie2 rev19 and after) : 0xA00 - 0xAFF */
+ union {
+ /* corerev < 64 */
+ struct {
+ uint32 dar_ctrl; /* 0xA00 */
+ uint32 PAD[7]; /* 0xA04-0xA1F */
+ uint32 intstatus; /* 0xA20 */
+ uint32 PAD[1]; /* 0xA24 */
+ uint32 h2d_db_0_0; /* 0xA28 */
+ uint32 h2d_db_0_1; /* 0xA2C */
+ uint32 h2d_db_1_0; /* 0xA30 */
+ uint32 h2d_db_1_1; /* 0xA34 */
+ uint32 h2d_db_2_0; /* 0xA38 */
+ uint32 h2d_db_2_1; /* 0xA3C */
+ uint32 errlog; /* 0xA40 */
+ uint32 erraddr; /* 0xA44 */
+ uint32 mbox_int; /* 0xA48 */
+ uint32 fis_ctrl; /* 0xA4C */
+ uint32 PAD[36]; /* 0xA50 - 0xADC */
+ uint32 clk_ctl_st; /* 0xAE0 */
+ uint32 PAD[1]; /* 0xAE4 */
+ uint32 powerctl; /* 0xAE8 */
+ uint32 PAD[5]; /* 0xAEC-0xAFF */
+ } dar;
+ /* corerev > = 64 */
+ struct {
+ uint32 dar_ctrl; /* 0xA00 */
+ uint32 dar_cap; /* 0xA04 */
+ uint32 clk_ctl_st; /* 0xA08 */
+ uint32 powerctl; /* 0xA0C */
+ uint32 intstatus; /* 0xA10 */
+ uint32 PAD[3]; /* 0xA14-0xA1F */
+ uint32 h2d_db_0_0; /* 0xA20 */
+ uint32 h2d_db_0_1; /* 0xA24 */
+ uint32 h2d_db_1_0; /* 0xA28 */
+ uint32 h2d_db_1_1; /* 0xA2C */
+ uint32 h2d_db_2_0; /* 0xA30 */
+ uint32 h2d_db_2_1; /* 0xA34 */
+ uint32 h2d_db_3_0; /* 0xA38 */
+ uint32 h2d_db_3_1; /* 0xA3C */
+ uint32 h2d_db_4_0; /* 0xA40 */
+ uint32 h2d_db_4_1; /* 0xA44 */
+ uint32 h2d_db_5_0; /* 0xA48 */
+ uint32 h2d_db_5_1; /* 0xA4C */
+ uint32 h2d_db_6_0; /* 0xA50 */
+ uint32 h2d_db_6_1; /* 0xA54 */
+ uint32 h2d_db_7_0; /* 0xA58 */
+ uint32 h2d_db_7_1; /* 0xA5C */
+ uint32 errlog; /* 0xA60 */
+ uint32 erraddr; /* 0xA64 */
+ uint32 mbox_int; /* 0xA68 */
+ uint32 fis_ctrl; /* 0xA6C */
+ uint32 PAD[36]; /* 0xA70-0xAFF */
+ } dar_64;
+ } u1;
+ uint32 PAD[64]; /* 0xB00-0xBFF */
+ /* Function Control/Status Registers for corerev >= 64 */
+ /* 0xC00 - 0xCFF */
+ struct {
+ uint32 control; /* 0xC00 */
+ uint32 iostatus; /* 0xC04 */
+ uint32 capability; /* 0xC08 */
+ uint32 PAD[1]; /* 0xC0C */
+ uint32 intstatus; /* 0xC10 */
+ uint32 intmask; /* 0xC14 */
+ uint32 pwr_intstatus; /* 0xC18 */
+ uint32 pwr_intmask; /* 0xC1C */
+ uint32 msi_vector; /* 0xC20 */
+ uint32 msi_intmask; /* 0xC24 */
+ uint32 msi_intstatus; /* 0xC28 */
+ uint32 msi_pend_cnt; /* 0xC2C */
+ uint32 mbox_intstatus; /* 0xC30 */
+ uint32 mbox_intmask; /* 0xC34 */
+ uint32 ltr_state; /* 0xC38 */
+ uint32 PAD[1]; /* 0xC3C */
+ uint32 intr_vector; /* 0xC40 */
+ uint32 intr_addrlow; /* 0xC44 */
+ uint32 intr_addrhigh; /* 0xC48 */
+ uint32 PAD[45]; /* 0xC4C-0xCFF */
+ } ftn_ctrl;
+} sbpcieregs_t;
+#endif /* !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) || */
+ /* defined(ATE_BUILD) defined(BCMDVFS) */
+
+#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */
+
+/* 10th and 11th 4KB BAR0 windows */
+#define PCIE_TER_BAR0_WIN 0xc50
+#define PCIE_TER_BAR0_WRAPPER 0xc54
+
+/* PCI control */
+#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */
+#define PCIE_RST 0x02 /* Value driven out to pin */
+#define PCIE_SPERST 0x04 /* SurvivePeRst */
+#define PCIE_FORCECFGCLKON_ALP 0x08
+#define PCIE_DISABLE_L1CLK_GATING 0x10
+#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */
+#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */
+#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */
+#define PCIE_MULTIMSI_EN 0x2000 /* enable multi-vector MSI messages */
+#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */
+#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */
+#define PCIE_EN_MDIO_IN_PERST 0x20000 /* enable access to internal registers when PERST */
+#define PCIE_HWDisableL1EntryEnable 0x40000 /* set, Hw requests can do entry/exit from L1 ASPM */
+#define PCIE_MSI_B2B_EN 0x100000 /* enable back-to-back MSI messages */
+#define PCIE_MSI_FIFO_CLEAR 0x200000 /* reset MSI FIFO */
+#define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */
+#define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */
+#define PCIE_REQ_PEND_DIS_L1 0x1000000 /* prevents entering L1 on pending requests from host */
+#define PCIE_DIS_L23CLK_GATE 0x10000000 /* disable clk gating in L23(pcie_tl_clk) */
+
+/* Function control (corerev > 64) */
+#define PCIE_CPLCA_ENABLE 0x01
+/* 1: send CPL with CA on BP error, 0: send CPLD with SC and data is FFFF */
+#define PCIE_DLY_PERST_TO_COE 0x02
+/* when set, PERST is holding asserted until sprom-related register updates has completed */
+
+#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
+#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
+#define PCIE_SWPME_FN0 0x10000
+#define PCIE_SWPME_FN0_SHF 16
+
+/* Interrupt status/mask */
+#define PCIE_INTA 0x01 /* PCIE INTA message is received */
+#define PCIE_INTB 0x02 /* PCIE INTB message is received */
+#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */
+#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */
+#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */
+#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */
+#define PCIE_PERST 0x40 /* PCIE Reset Interrupt */
+
+#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */
+#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */
+#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */
+#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */
+#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */
+#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */
+#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */
+#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */
+
+/* PCIE MSI Vector Assignment register */
+#define MSIVEC_MB_0 (0x1 << 1) /* MSI Vector offset for mailbox0 is 2 */
+#define MSIVEC_MB_1 (0x1 << 2) /* MSI Vector offset for mailbox1 is 3 */
+#define MSIVEC_D2H0_DB0 (0x1 << 3) /* MSI Vector offset for interface0 door bell 0 is 4 */
+#define MSIVEC_D2H0_DB1 (0x1 << 4) /* MSI Vector offset for interface0 door bell 1 is 5 */
+
+/* PCIE MailboxInt/MailboxIntMask register */
+#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
+#define PCIE_MB_TOSB_FN0_1 0x0002
+#define PCIE_MB_TOSB_FN1_0 0x0004
+#define PCIE_MB_TOSB_FN1_1 0x0008
+#define PCIE_MB_TOSB_FN2_0 0x0010
+#define PCIE_MB_TOSB_FN2_1 0x0020
+#define PCIE_MB_TOSB_FN3_0 0x0040
+#define PCIE_MB_TOSB_FN3_1 0x0080
+#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
+#define PCIE_MB_TOPCIE_FN0_1 0x0200
+#define PCIE_MB_TOPCIE_FN1_0 0x0400
+#define PCIE_MB_TOPCIE_FN1_1 0x0800
+#define PCIE_MB_TOPCIE_FN2_0 0x1000
+#define PCIE_MB_TOPCIE_FN2_1 0x2000
+#define PCIE_MB_TOPCIE_FN3_0 0x4000
+#define PCIE_MB_TOPCIE_FN3_1 0x8000
+
+#define PCIE_MB_TOPCIE_DB0_D2H0(rev) (REV_GE_64(rev) ? 0x0001 : 0x010000)
+#define PCIE_MB_TOPCIE_DB0_D2H1(rev) (REV_GE_64(rev) ? 0x0002 : 0x020000)
+#define PCIE_MB_TOPCIE_DB1_D2H0(rev) (REV_GE_64(rev) ? 0x0004 : 0x040000)
+#define PCIE_MB_TOPCIE_DB1_D2H1(rev) (REV_GE_64(rev) ? 0x0008 : 0x080000)
+#define PCIE_MB_TOPCIE_DB2_D2H0(rev) (REV_GE_64(rev) ? 0x0010 : 0x100000)
+#define PCIE_MB_TOPCIE_DB2_D2H1(rev) (REV_GE_64(rev) ? 0x0020 : 0x200000)
+#define PCIE_MB_TOPCIE_DB3_D2H0(rev) (REV_GE_64(rev) ? 0x0040 : 0x400000)
+#define PCIE_MB_TOPCIE_DB3_D2H1(rev) (REV_GE_64(rev) ? 0x0080 : 0x800000)
+#define PCIE_MB_TOPCIE_DB4_D2H0(rev) (REV_GE_64(rev) ? 0x0100 : 0x0)
+#define PCIE_MB_TOPCIE_DB4_D2H1(rev) (REV_GE_64(rev) ? 0x0200 : 0x0)
+#define PCIE_MB_TOPCIE_DB5_D2H0(rev) (REV_GE_64(rev) ? 0x0400 : 0x0)
+#define PCIE_MB_TOPCIE_DB5_D2H1(rev) (REV_GE_64(rev) ? 0x0800 : 0x0)
+#define PCIE_MB_TOPCIE_DB6_D2H0(rev) (REV_GE_64(rev) ? 0x1000 : 0x0)
+#define PCIE_MB_TOPCIE_DB6_D2H1(rev) (REV_GE_64(rev) ? 0x2000 : 0x0)
+#define PCIE_MB_TOPCIE_DB7_D2H0(rev) (REV_GE_64(rev) ? 0x4000 : 0x0)
+#define PCIE_MB_TOPCIE_DB7_D2H1(rev) (REV_GE_64(rev) ? 0x8000 : 0x0)
+
+#define PCIE_MB_D2H_MB_MASK(rev) \
+ (PCIE_MB_TOPCIE_DB0_D2H0(rev) | PCIE_MB_TOPCIE_DB0_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB1_D2H0(rev) | PCIE_MB_TOPCIE_DB1_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB2_D2H0(rev) | PCIE_MB_TOPCIE_DB2_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB3_D2H0(rev) | PCIE_MB_TOPCIE_DB3_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB4_D2H0(rev) | PCIE_MB_TOPCIE_DB4_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB5_D2H0(rev) | PCIE_MB_TOPCIE_DB5_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB6_D2H0(rev) | PCIE_MB_TOPCIE_DB6_D2H1(rev) | \
+ PCIE_MB_TOPCIE_DB7_D2H0(rev) | PCIE_MB_TOPCIE_DB7_D2H1(rev))
+
+#define SBTOPCIE0_BASE 0x08000000
+#define SBTOPCIE1_BASE 0x0c000000
+
+/* Protection Control register */
+#define PROTECT_CFG (1 << 0)
+#define PROTECT_DMABADDR (1 << 1)
+
+#define PROTECT_FN_CFG_WRITE (1 << 0)
+#define PROTECT_FN_CFG_READ (1 << 1)
+#define PROTECT_FN_ENUM_WRITE (1 << 2)
+#define PROTECT_FN_ENUM_READ (1 << 3)
+#define PROTECT_FN_DMABADDR (1 << 4)
+
+/* On chips with CCI-400, the small pcie 128 MB region base has shifted */
+#define CCI400_SBTOPCIE0_BASE 0x20000000
+#define CCI400_SBTOPCIE1_BASE 0x24000000
+
+/* SB to PCIE translation masks */
+#define SBTOPCIE0_MASK 0xfc000000
+#define SBTOPCIE1_MASK 0xfc000000
+#define SBTOPCIE2_MASK 0xc0000000
+
+/* Access type bits (0:1) */
+#define SBTOPCIE_MEM 0
+#define SBTOPCIE_IO 1
+#define SBTOPCIE_CFG0 2
+#define SBTOPCIE_CFG1 3
+
+/* Prefetch enable bit 2 */
+#define SBTOPCIE_PF 4
+
+/* Write Burst enable for memory write bit 3 */
+#define SBTOPCIE_WR_BURST 8
+
+/* config access */
+#define CONFIGADDR_FUNC_MASK 0x7000
+#define CONFIGADDR_FUNC_SHF 12
+#define CONFIGADDR_REG_MASK 0x0FFF
+#define CONFIGADDR_REG_SHF 0
+
+#define PCIE_CONFIG_INDADDR(f, r) ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \
+ (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF))
+
+/* PCIE protocol regs Indirect Address */
+#define PCIEADDR_PROT_MASK 0x300
+#define PCIEADDR_PROT_SHF 8
+#define PCIEADDR_PL_TLP 0
+#define PCIEADDR_PL_DLLP 1
+#define PCIEADDR_PL_PLP 2
+
+#define PCIE_CORE_REG_CONTROL 0x00u /* Control */
+#define PCIE_CORE_REG_IOSTATUS 0x04u /* IO status */
+#define PCIE_CORE_REG_BITSTATUS 0x0Cu /* bitstatus */
+#define PCIE_CORE_REG_GPIO_SEL 0x10u /* gpio sel */
+#define PCIE_CORE_REG_GPIO_OUT_EN 0x14u /* gpio out en */
+#define PCIE_CORE_REG_INT_STATUS 0x20u /* int status */
+#define PCIE_CORE_REG_INT_MASK 0x24u /* int mask */
+#define PCIE_CORE_REG_SB_PCIE_MB 0x28u /* sbpcie mb */
+#define PCIE_CORE_REG_ERRLOG 0x40u /* errlog */
+#define PCIE_CORE_REG_ERR_ADDR 0x44u /* errlog addr */
+#define PCIE_CORE_REG_MB_INTR 0x48u /* MB intr */
+#define PCIE_CORE_REG_SB_PCIE_0 0x100u /* sbpcie0 map */
+#define PCIE_CORE_REG_SB_PCIE_1 0x104u /* sbpcie1 map */
+#define PCIE_CORE_REG_SB_PCIE_2 0x108u /* sbpcie2 map */
+
+/* PCIE Config registers */
+#define PCIE_CFG_DEV_STS_CTRL_2 0x0d4u /* "dev_sts_control_2 */
+#define PCIE_CFG_ADV_ERR_CAP 0x100u /* adv_err_cap */
+#define PCIE_CFG_UC_ERR_STS 0x104u /* uc_err_status */
+#define PCIE_CFG_UC_ERR_MASK 0x108u /* ucorr_err_mask */
+#define PCIE_CFG_UNCOR_ERR_SERV 0x10cu /* ucorr_err_sevr */
+#define PCIE_CFG_CORR_ERR_STS 0x110u /* corr_err_status */
+#define PCIE_CFG_CORR_ERR_MASK 0x114u /* corr_err_mask */
+#define PCIE_CFG_ADV_ERR_CTRL 0x118u /* adv_err_cap_control */
+#define PCIE_CFG_HDR_LOG1 0x11Cu /* header_log1 */
+#define PCIE_CFG_HDR_LOG2 0x120u /* header_log2 */
+#define PCIE_CFG_HDR_LOG3 0x124u /* header_log3 */
+#define PCIE_CFG_HDR_LOG4 0x128u /* header_log4 */
+#define PCIE_CFG_PML1_SUB_CAP_ID 0x240u /* PML1sub_capID */
+#define PCIE_CFG_PML1_SUB_CAP_REG 0x244u /* PML1_sub_Cap_reg */
+#define PCIE_CFG_PML1_SUB_CTRL1 0x248u /* PML1_sub_control1 */
+#define PCIE_CFG_PML1_SUB_CTRL3 0x24Cu /* PML1_sub_control2 */
+#define PCIE_CFG_TL_CTRL_5 0x814u /* tl_control_5 */
+#define PCIE_CFG_PHY_ERR_ATT_VEC 0x1820u /* phy_err_attn_vec */
+#define PCIE_CFG_PHY_ERR_ATT_MASK 0x1824u /* phy_err_attn_mask */
+
+/* PCIE protocol PHY diagnostic registers */
+#define PCIE_PLP_MODEREG 0x200u /* Mode */
+#define PCIE_PLP_STATUSREG 0x204u /* Status */
+#define PCIE_PLP_LTSSMCTRLREG 0x208u /* LTSSM control */
+#define PCIE_PLP_LTLINKNUMREG 0x20cu /* Link Training Link number */
+#define PCIE_PLP_LTLANENUMREG 0x210u /* Link Training Lane number */
+#define PCIE_PLP_LTNFTSREG 0x214u /* Link Training N_FTS */
+#define PCIE_PLP_ATTNREG 0x218u /* Attention */
+#define PCIE_PLP_ATTNMASKREG 0x21Cu /* Attention Mask */
+#define PCIE_PLP_RXERRCTR 0x220u /* Rx Error */
+#define PCIE_PLP_RXFRMERRCTR 0x224u /* Rx Framing Error */
+#define PCIE_PLP_RXERRTHRESHREG 0x228u /* Rx Error threshold */
+#define PCIE_PLP_TESTCTRLREG 0x22Cu /* Test Control reg */
+#define PCIE_PLP_SERDESCTRLOVRDREG 0x230u /* SERDES Control Override */
+#define PCIE_PLP_TIMINGOVRDREG 0x234u /* Timing param override */
+#define PCIE_PLP_RXTXSMDIAGREG 0x238u /* RXTX State Machine Diag */
+#define PCIE_PLP_LTSSMDIAGREG 0x23Cu /* LTSSM State Machine Diag */
+
+/* PCIE protocol DLLP diagnostic registers */
+#define PCIE_DLLP_LCREG 0x100u /* Link Control */
+#define PCIE_DLLP_LSREG 0x104u /* Link Status */
+#define PCIE_DLLP_LAREG 0x108u /* Link Attention */
+#define PCIE_DLLP_LAMASKREG 0x10Cu /* Link Attention Mask */
+#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110u /* Next Tx Seq Num */
+#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114u /* Acked Tx Seq Num */
+#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118u /* Purged Tx Seq Num */
+#define PCIE_DLLP_RXSEQNUMREG 0x11Cu /* Rx Sequence Number */
+#define PCIE_DLLP_LRREG 0x120u /* Link Replay */
+#define PCIE_DLLP_LACKTOREG 0x124u /* Link Ack Timeout */
+#define PCIE_DLLP_PMTHRESHREG 0x128u /* Power Management Threshold */
+#define PCIE_DLLP_RTRYWPREG 0x12Cu /* Retry buffer write ptr */
+#define PCIE_DLLP_RTRYRPREG 0x130u /* Retry buffer Read ptr */
+#define PCIE_DLLP_RTRYPPREG 0x134u /* Retry buffer Purged ptr */
+#define PCIE_DLLP_RTRRWREG 0x138u /* Retry buffer Read/Write */
+#define PCIE_DLLP_ECTHRESHREG 0x13Cu /* Error Count Threshold */
+#define PCIE_DLLP_TLPERRCTRREG 0x140u /* TLP Error Counter */
+#define PCIE_DLLP_ERRCTRREG 0x144u /* Error Counter */
+#define PCIE_DLLP_NAKRXCTRREG 0x148u /* NAK Received Counter */
+#define PCIE_DLLP_TESTREG 0x14Cu /* Test */
+#define PCIE_DLLP_PKTBIST 0x150u /* Packet BIST */
+#define PCIE_DLLP_PCIE11 0x154u /* DLLP PCIE 1.1 reg */
+
+#define PCIE_DLLP_LSREG_LINKUP (1u << 16u)
+
+/* PCIE protocol TLP diagnostic registers */
+#define PCIE_TLP_CONFIGREG 0x000u /* Configuration */
+#define PCIE_TLP_WORKAROUNDSREG 0x004u /* TLP Workarounds */
+#define PCIE_TLP_WRDMAUPPER 0x010u /* Write DMA Upper Address */
+#define PCIE_TLP_WRDMALOWER 0x014u /* Write DMA Lower Address */
+#define PCIE_TLP_WRDMAREQ_LBEREG 0x018u /* Write DMA Len/ByteEn Req */
+#define PCIE_TLP_RDDMAUPPER 0x01Cu /* Read DMA Upper Address */
+#define PCIE_TLP_RDDMALOWER 0x020u /* Read DMA Lower Address */
+#define PCIE_TLP_RDDMALENREG 0x024u /* Read DMA Len Req */
+#define PCIE_TLP_MSIDMAUPPER 0x028u /* MSI DMA Upper Address */
+#define PCIE_TLP_MSIDMALOWER 0x02Cu /* MSI DMA Lower Address */
+#define PCIE_TLP_MSIDMALENREG 0x030u /* MSI DMA Len Req */
+#define PCIE_TLP_SLVREQLENREG 0x034u /* Slave Request Len */
+#define PCIE_TLP_FCINPUTSREQ 0x038u /* Flow Control Inputs */
+#define PCIE_TLP_TXSMGRSREQ 0x03Cu /* Tx StateMachine and Gated Req */
+#define PCIE_TLP_ADRACKCNTARBLEN 0x040u /* Address Ack XferCnt and ARB Len */
+#define PCIE_TLP_DMACPLHDR0 0x044u /* DMA Completion Hdr 0 */
+#define PCIE_TLP_DMACPLHDR1 0x048u /* DMA Completion Hdr 1 */
+#define PCIE_TLP_DMACPLHDR2 0x04Cu /* DMA Completion Hdr 2 */
+#define PCIE_TLP_DMACPLMISC0 0x050u /* DMA Completion Misc0 */
+#define PCIE_TLP_DMACPLMISC1 0x054u /* DMA Completion Misc1 */
+#define PCIE_TLP_DMACPLMISC2 0x058u /* DMA Completion Misc2 */
+#define PCIE_TLP_SPTCTRLLEN 0x05Cu /* Split Controller Req len */
+#define PCIE_TLP_SPTCTRLMSIC0 0x060u /* Split Controller Misc 0 */
+#define PCIE_TLP_SPTCTRLMSIC1 0x064u /* Split Controller Misc 1 */
+#define PCIE_TLP_BUSDEVFUNC 0x068u /* Bus/Device/Func */
+#define PCIE_TLP_RESETCTR 0x06Cu /* Reset Counter */
+#define PCIE_TLP_RTRYBUF 0x070u /* Retry Buffer value */
+#define PCIE_TLP_TGTDEBUG1 0x074u /* Target Debug Reg1 */
+#define PCIE_TLP_TGTDEBUG2 0x078u /* Target Debug Reg2 */
+#define PCIE_TLP_TGTDEBUG3 0x07Cu /* Target Debug Reg3 */
+#define PCIE_TLP_TGTDEBUG4 0x080u /* Target Debug Reg4 */
+
+/* PCIE2 MDIO register offsets */
+#define PCIE2_MDIO_CONTROL 0x128
+#define PCIE2_MDIO_WR_DATA 0x12C
+#define PCIE2_MDIO_RD_DATA 0x130
+
+/* MDIO control */
+#define MDIOCTL_DIVISOR_MASK 0x7fu /* clock to be used on MDIO */
+#define MDIOCTL_DIVISOR_VAL 0x2u
+#define MDIOCTL_PREAM_EN 0x80u /* Enable preamble sequnce */
+#define MDIOCTL_ACCESS_DONE 0x100u /* Tranaction complete */
+
+/* MDIO Data */
+#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
+#define MDIODATA_TA 0x00020000 /* Turnaround */
+#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */
+#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */
+#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */
+#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */
+#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
+#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */
+#define MDIODATA_WRITE 0x10000000 /* write Transaction */
+#define MDIODATA_READ 0x20000000 /* Read Transaction */
+#define MDIODATA_START 0x40000000 /* start of Transaction */
+
+#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
+#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
+
+/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */
+#define MDIOCTL2_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
+#define MDIOCTL2_DIVISOR_VAL 0x2
+#define MDIOCTL2_REGADDR_SHF 8 /* Regaddr shift */
+#define MDIOCTL2_REGADDR_MASK 0x00FFFF00 /* Regaddr Mask */
+#define MDIOCTL2_DEVADDR_SHF 24 /* Physmedia devaddr shift */
+#define MDIOCTL2_DEVADDR_MASK 0x0f000000 /* Physmedia devaddr Mask */
+#define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */
+#define MDIOCTL2_READ 0x20000000 /* IP slave bypass */
+
+#define MDIODATA2_DONE 0x80000000u /* rd/wr transaction done */
+#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */
+#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */
+
+/* MDIO devices (SERDES modules)
+ * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
+ * two layers mapping (blockidx, register offset) is required
+ */
+#define MDIO_DEV_IEEE0 0x000
+#define MDIO_DEV_IEEE1 0x001
+#define MDIO_DEV_BLK0 0x800
+#define MDIO_DEV_BLK1 0x801
+#define MDIO_DEV_BLK2 0x802
+#define MDIO_DEV_BLK3 0x803
+#define MDIO_DEV_BLK4 0x804
+#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */
+#define MDIO_DEV_TXCTRL0 0x820
+#define MDIO_DEV_SERDESID 0x831
+#define MDIO_DEV_RXCTRL0 0x840
+
+/* XgxsBlk1_A Register Offsets */
+#define BLK1_PWR_MGMT0 0x16
+#define BLK1_PWR_MGMT1 0x17
+#define BLK1_PWR_MGMT2 0x18
+#define BLK1_PWR_MGMT3 0x19
+#define BLK1_PWR_MGMT4 0x1A
+
+/* serdes regs (rev < 10) */
+#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
+#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
+#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
+ /* SERDES RX registers */
+#define SERDES_RX_CTRL 1 /* Rx cntrl */
+#define SERDES_RX_TIMER1 2 /* Rx Timer1 */
+#define SERDES_RX_CDR 6 /* CDR */
+#define SERDES_RX_CDRBW 7 /* CDR BW */
+
+ /* SERDES RX control register */
+#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
+#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
+
+ /* SERDES PLL registers */
+#define SERDES_PLL_CTRL 1 /* PLL control reg */
+#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
+
+/* Power management threshold */
+#define PCIE_L0THRESHOLDTIME_MASK 0xFF00u /* bits 0 - 7 */
+#define PCIE_L1THRESHOLDTIME_MASK 0xFF00u /* bits 8 - 15 */
+#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
+#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
+#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */
+
+/* SPROM offsets */
+#define SRSH_ASPM_OFFSET 4 /* word 4 */
+#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
+#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
+#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
+#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
+#define SRSH_L23READY_EXIT_NOPERST 0x8000u /* bit 15 */
+#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
+#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */
+#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
+#define SRSH_BD_OFFSET 6 /* word 6 */
+#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */
+
+/* PCI Capability ID's
+ * Reference include/linux/pci_regs.h
+ * #define PCI_CAP_LIST_ID 0 // Capability ID
+ * #define PCI_CAP_ID_PM 0x01 // Power Management
+ * #define PCI_CAP_ID_AGP 0x02 // Accelerated Graphics Port
+ * #define PCI_CAP_ID_VPD 0x03 // Vital Product Data
+ * #define PCI_CAP_ID_SLOTID 0x04 // Slot Identification
+ * #define PCI_CAP_ID_MSI 0x05 // Message Signalled Interrupts
+ * #define PCI_CAP_ID_CHSWP 0x06 // CompactPCI HotSwap
+ * #define PCI_CAP_ID_PCIX 0x07 // PCI-X
+ * #define PCI_CAP_ID_HT 0x08 // HyperTransport
+ * #define PCI_CAP_ID_VNDR 0x09 // Vendor-Specific
+ * #define PCI_CAP_ID_DBG 0x0A // Debug port
+ * #define PCI_CAP_ID_CCRC 0x0B // CompactPCI Central Resource Control
+ * #define PCI_CAP_ID_SHPC 0x0C // PCI Standard Hot-Plug Controller
+ * #define PCI_CAP_ID_SSVID 0x0D // Bridge subsystem vendor/device ID
+ * #define PCI_CAP_ID_AGP3 0x0E // AGP Target PCI-PCI bridge
+ * #define PCI_CAP_ID_SECDEV 0x0F // Secure Device
+ * #define PCI_CAP_ID_MSIX 0x11 // MSI-X
+ * #define PCI_CAP_ID_SATA 0x12 // SATA Data/Index Conf.
+ * #define PCI_CAP_ID_AF 0x13 // PCI Advanced Features
+ * #define PCI_CAP_ID_EA 0x14 // PCI Enhanced Allocation
+ * #define PCI_CAP_ID_MAX PCI_CAP_ID_EA
+ */
+
+#define PCIE_CAP_ID_EXP 0x10 // PCI Express
+
+/* PCIe Capabilities Offsets
+ * Reference include/linux/pci_regs.h
+ * #define PCIE_CAP_FLAGS 2 // Capabilities register
+ * #define PCIE_CAP_DEVCAP 4 // Device capabilities
+ * #define PCIE_CAP_DEVCTL 8 // Device Control
+ * #define PCIE_CAP_DEVSTA 10 // Device Status
+ * #define PCIE_CAP_LNKCAP 12 // Link Capabilities
+ * #define PCIE_CAP_LNKCTL 16 // Link Control
+ * #define PCIE_CAP_LNKSTA 18 // Link Status
+ * #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 // v1 endpoints end here
+ * #define PCIE_CAP_SLTCAP 20 // Slot Capabilities
+ * #define PCIE_CAP_SLTCTL 24 // Slot Control
+ * #define PCIE_CAP_SLTSTA 26 // Slot Status
+ * #define PCIE_CAP_RTCTL 28 // Root Control
+ * #define PCIE_CAP_RTCAP 30 // Root Capabilities
+ * #define PCIE_CAP_RTSTA 32 // Root Status
+ */
+
+/* Linkcapability reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCAP_OFFSET 12 /* linkcap offset in pcie cap */
+#define PCIE_CAP_LINKCAP_LNKSPEED_MASK 0xf /* Supported Link Speeds */
+#define PCIE_CAP_LINKCAP_GEN2 0x2 /* Value for GEN2 */
+
+/* Uc_Err reg offset in AER Cap */
+#define PCIE_EXTCAP_ID_ERR 0x01 /* Advanced Error Reporting */
+#define PCIE_EXTCAP_AER_UCERR_OFFSET 4 /* Uc_Err reg offset in AER Cap */
+#define PCIE_EXTCAP_ERR_HEADER_LOG_0 28
+#define PCIE_EXTCAP_ERR_HEADER_LOG_1 32
+#define PCIE_EXTCAP_ERR_HEADER_LOG_2 36
+#define PCIE_EXTCAP_ERR_HEADER_LOG_3 40
+
+/* L1SS reg offset in L1SS Ext Cap */
+#define PCIE_EXTCAP_ID_L1SS 0x1e /* PCI Express L1 PM Substates Capability */
+#define PCIE_EXTCAP_L1SS_CAP_OFFSET 4 /* L1SSCap reg offset in L1SS Cap */
+#define PCIE_EXTCAP_L1SS_CONTROL_OFFSET 8 /* L1SSControl reg offset in L1SS Cap */
+#define PCIE_EXTCAP_L1SS_CONTROL2_OFFSET 0xc /* L1SSControl reg offset in L1SS Cap */
+
+/* Linkcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */
+#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
+#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
+#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
+#define PCIE_LINKSPEED_MASK 0xF0000u /* bits 0 - 3 of high word */
+#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */
+#define PCIE_LINK_STS_LINKSPEED_5Gbps (0x2 << PCIE_LINKSPEED_SHIFT) /* PCIE_LINKSPEED 5Gbps */
+
+/* Devcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */
+#define PCIE_CAP_DEVCTRL_MRRS_MASK 0x7000 /* Max read request size mask */
+#define PCIE_CAP_DEVCTRL_MRRS_SHIFT 12 /* Max read request size shift */
+#define PCIE_CAP_DEVCTRL_MRRS_128B 0 /* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_256B 1 /* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_512B 2 /* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_1024B 3 /* 1024 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_MASK 0x00e0 /* Max payload size mask */
+#define PCIE_CAP_DEVCTRL_MPS_SHIFT 5 /* Max payload size shift */
+#define PCIE_CAP_DEVCTRL_MPS_128B 0 /* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_256B 1 /* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */
+
+#define PCIE_ASPM_CTRL_MASK 3 /* bit 0 and 1 */
+#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
+
+#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */
+#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */
+
+#define PCIE_EXT_L1SS_MASK 0xf /* Bits [3:0] of L1SSControl 0x248 */
+#define PCIE_EXT_L1SS_ENAB 0xf /* Bits [3:0] of L1SSControl 0x248 */
+
+/* NumMsg and NumMsgEn in PCIE MSI Cap */
+#define MSICAP_NUM_MSG_SHF 17
+#define MSICAP_NUM_MSG_MASK (0x7 << MSICAP_NUM_MSG_SHF)
+#define MSICAP_NUM_MSG_EN_SHF 20
+#define MSICAP_NUM_MSG_EN_MASK (0x7 << MSICAP_NUM_MSG_EN_SHF)
+
+/* Devcontrol2 reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */
+#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13 /* Enable OBFF mechanism, select signaling method */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */
+
+/* LTR registers in PCIE Cap */
+#define PCIE_LTR0_REG_OFFSET 0x844u /* ltr0_reg offset in pcie cap */
+#define PCIE_LTR1_REG_OFFSET 0x848u /* ltr1_reg offset in pcie cap */
+#define PCIE_LTR2_REG_OFFSET 0x84cu /* ltr2_reg offset in pcie cap */
+#define PCIE_LTR0_REG_DEFAULT_60 0x883c883cu /* active latency default to 60usec */
+#define PCIE_LTR0_REG_DEFAULT_150 0x88968896u /* active latency default to 150usec */
+#define PCIE_LTR1_REG_DEFAULT 0x88648864u /* idle latency default to 100usec */
+#define PCIE_LTR2_REG_DEFAULT 0x90039003u /* sleep latency default to 3msec */
+#define PCIE_LTR_LAT_VALUE_MASK 0x3FFu /* LTR Latency mask */
+#define PCIE_LTR_LAT_SCALE_SHIFT 10u /* LTR Scale shift */
+#define PCIE_LTR_LAT_SCALE_MASK 0x1C00u /* LTR Scale mask */
+#define PCIE_LTR_SNOOP_REQ_SHIFT 15u /* LTR SNOOP REQ shift */
+#define PCIE_LTR_SNOOP_REQ_MASK 0x8000u /* LTR SNOOP REQ mask */
+
+/* Status reg PCIE_PLP_STATUSREG */
+#define PCIE_PLP_POLARITYINV_STAT 0x10u
+
+/* PCIE BRCM Vendor CAP REVID reg bits */
+#define BRCMCAP_PCIEREV_CT_MASK 0xF00u
+#define BRCMCAP_PCIEREV_CT_SHIFT 8u
+#define BRCMCAP_PCIEREV_REVID_MASK 0xFFu
+#define BRCMCAP_PCIEREV_REVID_SHIFT 0
+
+#define PCIE_REVREG_CT_PCIE1 0
+#define PCIE_REVREG_CT_PCIE2 1
+
+/* PCIE GEN2 specific defines */
+/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */
+#define PCIE2R0_BRCMCAP_REVID_OFFSET 4
+#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET 8
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET 12
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET 16
+#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET 20
+#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET 24
+#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET 28
+#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET 32
+#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET 36
+#define PCIE2R0_BRCMCAP_INTMASK_OFFSET 40
+#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET 44
+#define PCIE2R0_BRCMCAP_BPADDR_OFFSET 48
+#define PCIE2R0_BRCMCAP_BPDATA_OFFSET 52
+#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET 56
+
+/*
+ * definition of configuration space registers of PCIe gen2
+ */
+#define PCIECFGREG_STATUS_CMD 0x4
+#define PCIECFGREG_PM_CSR 0x4C
+#define PCIECFGREG_MSI_CAP 0x58
+#define PCIECFGREG_MSI_ADDR_L 0x5C
+#define PCIECFGREG_MSI_ADDR_H 0x60
+#define PCIECFGREG_MSI_DATA 0x64
+#define PCIECFGREG_SPROM_CTRL 0x88
+#define PCIECFGREG_LINK_STATUS_CTRL 0xBCu
+#define PCIECFGREG_LINK_STATUS_CTRL2 0xDCu
+#define PCIECFGREG_DEV_STATUS_CTRL 0xB4u
+#define PCIECFGGEN_DEV_STATUS_CTRL2 0xD4
+#define PCIECFGREG_RBAR_CTRL 0x228
+#define PCIECFGREG_PML1_SUB_CTRL1 0x248
+#define PCIECFGREG_PML1_SUB_CTRL2 0x24C
+#define PCIECFGREG_REG_BAR2_CONFIG 0x4E0
+#define PCIECFGREG_REG_BAR3_CONFIG 0x4F4
+#define PCIECFGREG_PDL_CTRL1 0x1004
+#define PCIECFGREG_PDL_CTRL5 (0x1014u)
+#define PCIECFGREG_PDL_IDDQ 0x1814
+#define PCIECFGREG_REG_PHY_CTL7 0x181c
+#define PCIECFGREG_PHY_DBG_CLKREQ0 0x1E10
+#define PCIECFGREG_PHY_DBG_CLKREQ1 0x1E14
+#define PCIECFGREG_PHY_DBG_CLKREQ2 0x1E18
+#define PCIECFGREG_PHY_DBG_CLKREQ3 0x1E1C
+#define PCIECFGREG_PHY_LTSSM_HIST_0 0x1CEC
+#define PCIECFGREG_PHY_LTSSM_HIST_1 0x1CF0
+#define PCIECFGREG_PHY_LTSSM_HIST_2 0x1CF4
+#define PCIECFGREG_PHY_LTSSM_HIST_3 0x1CF8
+#define PCIECFGREG_TREFUP 0x1814
+#define PCIECFGREG_TREFUP_EXT 0x1818
+
+/* PCIECFGREG_STATUS_CMD reg bit definitions */
+#define PCIECFG_STS_CMD_MEM_SPACE_SHIFT (1u)
+#define PCIECFG_STS_CMD_BUS_MASTER_SHIFT (2u)
+/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
+#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */
+#define PCI_PM_L1_1_ENA_MASK 0x00000002 /* PCI-PM L1.1 Enabled */
+#define ASPM_L1_2_ENA_MASK 0x00000004 /* ASPM L1.2 Enabled */
+#define ASPM_L1_1_ENA_MASK 0x00000008 /* ASPM L1.1 Enabled */
+
+/* PCIECFGREG_PDL_CTRL1 reg bit definitions */
+#define PCIECFG_PDL_CTRL1_RETRAIN_REQ_MASK (0x4000u)
+#define PCIECFG_PDL_CTRL1_RETRAIN_REQ_SHIFT (14u)
+#define PCIECFG_PDL_CTRL1_MAX_DLP_L1_ENTER_MASK (0x7Fu)
+#define PCIECFG_PDL_CTRL1_MAX_DLP_L1_ENTER_SHIFT (16u)
+#define PCIECFG_PDL_CTRL1_MAX_DLP_L1_ENTER_VAL (0x6Fu)
+
+/* PCIECFGREG_PDL_CTRL5 reg bit definitions */
+#define PCIECFG_PDL_CTRL5_DOWNSTREAM_PORT_SHIFT (8u)
+#define PCIECFG_PDL_CTRL5_GLOOPBACK_SHIFT (9u)
+
+/* PCIe gen2 mailbox interrupt masks */
+#define I_MB 0x3
+#define I_BIT0 0x1
+#define I_BIT1 0x2
+
+/* PCIE gen2 config regs */
+#define PCIIntstatus 0x090
+#define PCIIntmask 0x094
+#define PCISBMbx 0x98
+
+/* enumeration Core regs */
+#define PCIH2D_MailBox 0x140
+#define PCIH2D_DB1 0x144
+#define PCID2H_MailBox 0x148
+#define PCIH2D_MailBox_1 0x150 /* for dma channel1 */
+#define PCIH2D_DB1_1 0x154
+#define PCID2H_MailBox_1 0x158
+#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */
+#define PCIH2D_DB1_2 0x164
+#define PCID2H_MailBox_2 0x168
+#define PCIH2D_DB1_3 0x174
+#define PCIE_CLK_CTRL 0x1E0
+#define PCIE_PWR_CTRL 0x1E8
+
+#define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00)
+/* for corerev < 64 idma_en is in PCIControl regsiter */
+#define IDMAControl(rev) (REV_GE_64(rev) ? 0x480 : 0x00)
+#define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48)
+#define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C)
+#define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20)
+#define PCIFunctionIntmask(rev) (REV_GE_64(rev) ? 0xC14 : 0x24)
+#define PCIPowerIntstatus(rev) (REV_GE_64(rev) ? 0xC18 : 0x1A4)
+#define PCIPowerIntmask(rev) (REV_GE_64(rev) ? 0xC1C : 0x1A8)
+#define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0)
+#define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8)
+#define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20)
+#define PCIDARH2D_DB0(rev) (REV_GE_64(rev) ? 0xA20 : 0xA28)
+#define PCIDARErrlog(rev) (REV_GE_64(rev) ? 0xA60 : 0xA40)
+#define PCIDARErrlog_Addr(rev) (REV_GE_64(rev) ? 0xA64 : 0xA44)
+#define PCIDARMailboxint(rev) (REV_GE_64(rev) ? 0xA68 : 0xA48)
+
+#define PCIMSIVecAssign 0x58
+
+/* base of all HMAP window registers */
+/* base of all HMAP window registers */
+#define PCI_HMAP_WINDOW_BASE(rev) (REV_GE_64(rev) ? 0x580u : 0x540u)
+#define PCI_HMAP_VIOLATION_ADDR_L(rev) (REV_GE_64(rev) ? 0x600u : 0x5C0u)
+#define PCI_HMAP_VIOLATION_ADDR_U(rev) (REV_GE_64(rev) ? 0x604u : 0x5C4u)
+#define PCI_HMAP_VIOLATION_INFO(rev) (REV_GE_64(rev) ? 0x608u : 0x5C8u)
+#define PCI_HMAP_WINDOW_CONFIG(rev) (REV_GE_64(rev) ? 0x610u : 0x5D0u)
+
+/* HMAP Register related offsets */
+#define PCI_HMAP_NWINDOWS_SHIFT 8U
+#define PCI_HMAP_NWINDOWS_MASK 0x0000ff00U /* bits 8:15 */
+#define PCI_HMAP_VIO_ID_MASK 0x0000007fU /* 0:14 */
+#define PCI_HMAP_VIO_ID_SHIFT 0U
+#define PCI_HMAP_VIO_SRC_MASK 0x00008000U /* bit 15 */
+#define PCI_HMAP_VIO_SRC_SHIFT 15U
+#define PCI_HMAP_VIO_TYPE_MASK 0x00010000U /* bit 16 */
+#define PCI_HMAP_VIO_TYPE_SHIFT 16U
+#define PCI_HMAP_VIO_ERR_MASK 0x00060000U /* bit 17:18 */
+#define PCI_HMAP_VIO_ERR_SHIFT 17U
+
+#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */
+#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
+
+#define PCIECFGREG_DEVCONTROL 0xB4
+#define PCIECFGREG_BASEADDR0 0x10
+#define PCIECFGREG_BASEADDR1 0x18
+#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12
+#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT)
+#define PCIECFGREG_DEVCTRL_MPS_SHFT 5
+#define PCIECFGREG_DEVCTRL_MPS_MASK (0x7 << PCIECFGREG_DEVCTRL_MPS_SHFT)
+#define PCIECFGREG_PM_CSR_STATE_MASK 0x00000003
+#define PCIECFGREG_PM_CSR_STATE_D0 0
+#define PCIECFGREG_PM_CSR_STATE_D1 1
+#define PCIECFGREG_PM_CSR_STATE_D2 2
+#define PCIECFGREG_PM_CSR_STATE_D3_HOT 3
+#define PCIECFGREG_PM_CSR_STATE_D3_COLD 4
+
+/* Direct Access regs */
+#define DAR_ERRLOG(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.errlog) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.errlog))
+#define DAR_ERRADDR(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.erraddr) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.erraddr))
+#define DAR_CLK_CTRL(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.clk_ctl_st) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.clk_ctl_st))
+#define DAR_INTSTAT(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.intstatus) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.intstatus))
+#define DAR_PCIH2D_DB0_0(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_0) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_0))
+#define DAR_PCIH2D_DB0_1(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_1) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_1))
+#define DAR_PCIH2D_DB1_0(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_0) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_0))
+#define DAR_PCIH2D_DB1_1(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_1) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_1))
+#define DAR_PCIH2D_DB2_0(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_0) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_0))
+#define DAR_PCIH2D_DB2_1(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_1) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_1))
+#define DAR_PCIH2D_DB3_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_0)
+#define DAR_PCIH2D_DB3_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_1)
+#define DAR_PCIH2D_DB4_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_0)
+#define DAR_PCIH2D_DB4_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_1)
+#define DAR_PCIH2D_DB5_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_0)
+#define DAR_PCIH2D_DB5_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_1)
+#define DAR_PCIH2D_DB6_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_0)
+#define DAR_PCIH2D_DB6_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_1)
+#define DAR_PCIH2D_DB7_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_0)
+#define DAR_PCIH2D_DB7_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_1)
+
+#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST)
+#define DAR_PCIMailBoxInt(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.mbox_int) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.mbox_int))
+#define DAR_PCIE_PWR_CTRL(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.powerctl) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.powerctl))
+#define DAR_PCIE_DAR_CTRL(rev) (REV_GE_64(rev) ? \
+ OFFSETOF(sbpcieregs_t, u1.dar_64.dar_ctrl) : \
+ OFFSETOF(sbpcieregs_t, u1.dar.dar_ctrl))
+#else
+#define DAR_PCIMailBoxInt(rev) PCIE_dar_mailboxint_OFFSET(rev)
+#define DAR_PCIE_PWR_CTRL(rev) PCIE_dar_power_control_OFFSET(rev)
+#define DAR_PCIE_DAR_CTRL(rev) PCIE_dar_control_OFFSET(rev)
+#endif
+
+#define DAR_FIS_CTRL(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.fis_ctrl)
+
+#define DAR_FIS_START_SHIFT 0u
+#define DAR_FIS_START_MASK (1u << DAR_FIS_START_SHIFT)
+
+#define PCIE_PWR_REQ_PCIE (0x1 << 8)
+
+/* SROM hardware region */
+#define SROM_OFFSET_BAR1_CTRL 52
+
+#define BAR1_ENC_SIZE_MASK 0x000e
+#define BAR1_ENC_SIZE_SHIFT 1
+
+#define BAR1_ENC_SIZE_1M 0
+#define BAR1_ENC_SIZE_2M 1
+#define BAR1_ENC_SIZE_4M 2
+
+#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET 0xD4
+#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB 0x400
+
+/*
+ * Latency Tolerance Reporting (LTR) states
+ * Active has the least tolerant latency requirement
+ * Sleep is most tolerant
+ */
+#define LTR_ACTIVE 2
+#define LTR_ACTIVE_IDLE 1
+#define LTR_SLEEP 0
+#define LTR_FINAL_MASK 0x300
+#define LTR_FINAL_SHIFT 8
+
+/* pwrinstatus, pwrintmask regs */
+#define PCIEGEN2_PWRINT_D0_STATE_SHIFT 0
+#define PCIEGEN2_PWRINT_D1_STATE_SHIFT 1
+#define PCIEGEN2_PWRINT_D2_STATE_SHIFT 2
+#define PCIEGEN2_PWRINT_D3_STATE_SHIFT 3
+#define PCIEGEN2_PWRINT_L0_LINK_SHIFT 4
+#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT 5
+#define PCIEGEN2_PWRINT_L1_LINK_SHIFT 6
+#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT 7
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT 8
+
+#define PCIEGEN2_PWRINT_D0_STATE_MASK (1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D1_STATE_MASK (1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D2_STATE_MASK (1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D3_STATE_MASK (1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_L0_LINK_MASK (1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L0s_LINK_MASK (1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L1_LINK_MASK (1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK (1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK (1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT)
+
+/* sbtopcie mail box */
+#define SBTOPCIE_MB_FUNC0_SHIFT 8
+#define SBTOPCIE_MB_FUNC1_SHIFT 10
+#define SBTOPCIE_MB_FUNC2_SHIFT 12
+#define SBTOPCIE_MB_FUNC3_SHIFT 14
+
+#define SBTOPCIE_MB1_FUNC0_SHIFT 9
+#define SBTOPCIE_MB1_FUNC1_SHIFT 11
+#define SBTOPCIE_MB1_FUNC2_SHIFT 13
+#define SBTOPCIE_MB1_FUNC3_SHIFT 15
+
+/* pcieiocstatus */
+#define PCIEGEN2_IOC_D0_STATE_SHIFT 8
+#define PCIEGEN2_IOC_D1_STATE_SHIFT 9
+#define PCIEGEN2_IOC_D2_STATE_SHIFT 10
+#define PCIEGEN2_IOC_D3_STATE_SHIFT 11
+#define PCIEGEN2_IOC_L0_LINK_SHIFT 12
+#define PCIEGEN2_IOC_L1_LINK_SHIFT 13
+#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14
+#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15
+#define PCIEGEN2_IOC_BME_SHIFT 20
+
+#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
+#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT)
+#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT)
+#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT)
+#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT)
+#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT)
+#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
+#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
+#define PCIEGEN2_IOC_BME_MASK (1 << PCIEGEN2_IOC_BME_SHIFT)
+
+/* stat_ctrl */
+#define PCIE_STAT_CTRL_RESET 0x1
+#define PCIE_STAT_CTRL_ENABLE 0x2
+#define PCIE_STAT_CTRL_INTENABLE 0x4
+#define PCIE_STAT_CTRL_INTSTATUS 0x8
+
+/* cpl_timeout_ctrl_reg */
+#define PCIE_CTO_TO_THRESHOLD_SHIFT 0
+#define PCIE_CTO_TO_THRESHHOLD_MASK (0xfffff << PCIE_CTO_TO_THRESHOLD_SHIFT)
+
+#define PCIE_CTO_CLKCHKCNT_SHIFT 24
+#define PCIE_CTO_CLKCHKCNT_MASK (0xf << PCIE_CTO_CLKCHKCNT_SHIFT)
+
+#define PCIE_CTO_ENAB_SHIFT 31
+#define PCIE_CTO_ENAB_MASK (0x1 << PCIE_CTO_ENAB_SHIFT)
+
+/*
+ * For corerev >= 69, core_fref is always 29.9MHz instead of 37.4MHz.
+ * Use different default threshold value to have 10ms timeout (0x49FB6 * 33ns).
+ */
+#define PCIE_CTO_TO_THRESH_DEFAULT 0x58000
+#define PCIE_CTO_TO_THRESH_DEFAULT_REV69 0x49FB6
+
+#define PCIE_CTO_CLKCHKCNT_VAL 0xA
+
+/* ErrLog */
+#define PCIE_SROMRD_ERR_SHIFT 5
+#define PCIE_SROMRD_ERR_MASK (0x1 << PCIE_SROMRD_ERR_SHIFT)
+
+#define PCIE_CTO_ERR_SHIFT 8
+#define PCIE_CTO_ERR_MASK (0x1 << PCIE_CTO_ERR_SHIFT)
+
+#define PCIE_CTO_ERR_CODE_SHIFT 9
+#define PCIE_CTO_ERR_CODE_MASK (0x3 << PCIE_CTO_ERR_CODE_SHIFT)
+
+#define PCIE_BP_CLK_OFF_ERR_SHIFT 12
+#define PCIE_BP_CLK_OFF_ERR_MASK (0x1 << PCIE_BP_CLK_OFF_ERR_SHIFT)
+
+#define PCIE_BP_IN_RESET_ERR_SHIFT 13
+#define PCIE_BP_IN_RESET_ERR_MASK (0x1 << PCIE_BP_IN_RESET_ERR_SHIFT)
+
+/* PCIE control per Function */
+#define PCIE_FTN_DLYPERST_SHIFT 1
+#define PCIE_FTN_DLYPERST_MASK (1 << PCIE_FTN_DLYPERST_SHIFT)
+
+#define PCIE_FTN_WakeModeL2_SHIFT 3
+#define PCIE_FTN_WakeModeL2_MASK (1 << PCIE_FTN_WakeModeL2_SHIFT)
+
+#define PCIE_FTN_MSI_B2B_EN_SHIFT 4
+#define PCIE_FTN_MSI_B2B_EN_MASK (1 << PCIE_FTN_MSI_B2B_EN_SHIFT)
+
+#define PCIE_FTN_MSI_FIFO_CLEAR_SHIFT 5
+#define PCIE_FTN_MSI_FIFO_CLEAR_MASK (1 << PCIE_FTN_MSI_FIFO_CLEAR_SHIFT)
+
+#define PCIE_FTN_SWPME_SHIFT 6
+#define PCIE_FTN_SWPME_MASK (1 << PCIE_FTN_SWPME_SHIFT)
+
+#ifdef BCMDRIVER
+#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST)
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val);
+void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+void pcie_set_trefup_time_100us(si_t *sih);
+uint32 pcie_cto_to_thresh_default(uint corerev);
+uint32 pcie_corereg(osl_t *osh, volatile void *regs, uint32 offset, uint32 mask, uint32 val);
+#endif /* !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) */
+#if defined(DONGLEBUILD)
+void pcie_coherent_accenable(osl_t *osh, si_t *sih);
+#endif /* DONGLEBUILD */
+#endif /* BCMDRIVER */
+
+/* DMA intstatus and intmask */
+#define I_PC (1 << 10) /* pci descriptor error */
+#define I_PD (1 << 11) /* pci data error */
+#define I_DE (1 << 12) /* descriptor protocol error */
+#define I_RU (1 << 13) /* receive descriptor underflow */
+#define I_RO (1 << 14) /* receive fifo overflow */
+#define I_XU (1 << 15) /* transmit fifo underflow */
+#define I_RI (1 << 16) /* receive interrupt */
+#define I_XI (1 << 24) /* transmit interrupt */
+
+#define PD_DMA_INT_MASK_H2D 0x1DC00
+#define PD_DMA_INT_MASK_D2H 0x1DC00
+#define PD_DB_INT_MASK 0xFF0000
+
+#if defined(DONGLEBUILD)
+#if REV_GE_64(BCMPCIEREV)
+#define PD_DEV0_DB_INTSHIFT 8u
+#define PD_DEV1_DB_INTSHIFT 10u
+#define PD_DEV2_DB_INTSHIFT 12u
+#define PD_DEV3_DB_INTSHIFT 14u
+#else
+#define PD_DEV0_DB_INTSHIFT 16u
+#define PD_DEV1_DB_INTSHIFT 18u
+#define PD_DEV2_DB_INTSHIFT 20u
+#define PD_DEV3_DB_INTSHIFT 22u
+#endif /* BCMPCIEREV */
+#endif /* DONGLEBUILD */
+
+#define PCIE_INVALID_OFFSET 0x18003ffc /* Invalid Register Offset for Induce Error */
+#define PCIE_INVALID_DATA 0x55555555 /* Invalid Data for Induce Error */
+
+#define PD_DEV0_DB0_INTMASK (0x1 << PD_DEV0_DB_INTSHIFT)
+#define PD_DEV0_DB1_INTMASK (0x2 << PD_DEV0_DB_INTSHIFT)
+#define PD_DEV0_DB_INTMASK ((PD_DEV0_DB0_INTMASK) | (PD_DEV0_DB1_INTMASK))
+
+#define PD_DEV1_DB0_INTMASK (0x1 << PD_DEV1_DB_INTSHIFT)
+#define PD_DEV1_DB1_INTMASK (0x2 << PD_DEV1_DB_INTSHIFT)
+#define PD_DEV1_DB_INTMASK ((PD_DEV1_DB0_INTMASK) | (PD_DEV1_DB1_INTMASK))
+
+#define PD_DEV2_DB0_INTMASK (0x1 << PD_DEV2_DB_INTSHIFT)
+#define PD_DEV2_DB1_INTMASK (0x2 << PD_DEV2_DB_INTSHIFT)
+#define PD_DEV2_DB_INTMASK ((PD_DEV2_DB0_INTMASK) | (PD_DEV2_DB1_INTMASK))
+
+#define PD_DEV3_DB0_INTMASK (0x1 << PD_DEV3_DB_INTSHIFT)
+#define PD_DEV3_DB1_INTMASK (0x2 << PD_DEV3_DB_INTSHIFT)
+#define PD_DEV3_DB_INTMASK ((PD_DEV3_DB0_INTMASK) | (PD_DEV3_DB1_INTMASK))
+
+#define PD_DEV0_DMA_INTMASK 0x80
+
+#define PD_FUNC0_MB_INTSHIFT 8u
+#define PD_FUNC0_MB_INTMASK (0x3 << PD_FUNC0_MB_INTSHIFT)
+
+#define PD_FUNC0_PCIE_SB_INTSHIFT 0u
+#define PD_FUNC0_PCIE_SB__INTMASK (0x3 << PD_FUNC0_PCIE_SB_INTSHIFT)
+
+#define PD_DEV0_PWRSTATE_INTSHIFT 24u
+#define PD_DEV0_PWRSTATE_INTMASK (0x1 << PD_DEV0_PWRSTATE_INTSHIFT)
+
+#define PD_DEV0_PERST_INTSHIFT 6u
+#define PD_DEV0_PERST_INTMASK (0x1 << PD_DEV0_PERST_INTSHIFT)
+
+#define PD_MSI_FIFO_OVERFLOW_INTSHIFT 28u
+#define PD_MSI_FIFO_OVERFLOW_INTMASK (0x1 << PD_MSI_FIFO_OVERFLOW_INTSHIFT)
+
+#if defined(BCMPCIE_IFRM)
+#define PD_IFRM_INTSHIFT 5u
+#define PD_IFRM_INTMASK (0x1 << PD_IFRM_INTSHIFT)
+#endif /* BCMPCIE_IFRM */
+
+/* HMAP related constants */
+#define PD_HMAP_VIO_INTSHIFT 3u
+#define PD_HMAP_VIO_INTMASK (0x1 << PD_HMAP_VIO_INTSHIFT)
+#define PD_HMAP_VIO_CLR_VAL 0x3 /* write 0b11 to clear HMAP violation */
+#define PD_HMAP_VIO_SHIFT_VAL 17u /* bits 17:18 clear HMAP violation */
+
+#define PD_FLR0_IN_PROG_INTSHIFT 0u
+#define PD_FLR0_IN_PROG_INTMASK (0x1 << PD_FLR0_IN_PROG_INTSHIFT)
+#define PD_FLR1_IN_PROG_INTSHIFT 1u
+#define PD_FLR1_IN_PROG_INTMASK (0x1 << PD_FLR1_IN_PROG_INTSHIFT)
+
+/* DMA channel 2 datapath use case
+ * Implicit DMA uses DMA channel 2 (outbound only)
+ */
+#if defined(BCMPCIE_IDMA) && !defined(BCMPCIE_IDMA_DISABLED)
+#define PD_DEV2_INTMASK PD_DEV2_DB0_INTMASK
+#elif defined(BCMPCIE_IFRM) && !defined(BCMPCIE_IFRM_DISABLED)
+#define PD_DEV2_INTMASK PD_DEV2_DB0_INTMASK
+#elif defined(BCMPCIE_DMA_CH2)
+#define PD_DEV2_INTMASK PD_DEV2_DB0_INTMASK
+#else
+#define PD_DEV2_INTMASK 0u
+#endif /* BCMPCIE_IDMA || BCMPCIE_DMA_CH2 || BCMPCIE_IFRM */
+/* DMA channel 1 datapath use case */
+#ifdef BCMPCIE_DMA_CH1
+#define PD_DEV1_INTMASK PD_DEV1_DB0_INTMASK
+#else
+#define PD_DEV1_INTMASK 0u
+#endif /* BCMPCIE_DMA_CH1 */
+#if defined(BCMPCIE_IDMA) || defined(BCMPCIE_IFRM)
+#define PD_DEV1_IDMA_DW_INTMASK PD_DEV1_DB1_INTMASK
+#else
+#define PD_DEV1_IDMA_DW_INTMASK 0u
+#endif /* BCMPCIE_IDMA || BCMPCIE_IFRM */
+
+#define PD_DEV0_INTMASK \
+ (PD_DEV0_DMA_INTMASK | PD_DEV0_DB0_INTMASK | PD_DEV0_PWRSTATE_INTMASK | \
+ PD_DEV0_PERST_INTMASK | PD_DEV1_INTMASK | PD_DEV2_INTMASK | PD_DEV0_DB1_INTMASK | \
+ PD_DEV1_IDMA_DW_INTMASK)
+
+/* implicit DMA index */
+#define PD_IDMA_COMP 0xf /* implicit dma complete */
+#define PD_IDMA_IDX0_COMP ((uint32)1 << 0) /* implicit dma index0 complete */
+#define PD_IDMA_IDX1_COMP ((uint32)1 << 1) /* implicit dma index1 complete */
+#define PD_IDMA_IDX2_COMP ((uint32)1 << 2) /* implicit dma index2 complete */
+#define PD_IDMA_IDX3_COMP ((uint32)1 << 3) /* implicit dma index3 complete */
+
+#define PCIE_D2H_DB0_VAL (0x12345678)
+
+#define PD_ERR_ATTN_INTMASK (1u << 29)
+#define PD_LINK_DOWN_INTMASK (1u << 27)
+
+#define PD_ERR_TTX_REQ_DURING_D3 (1u << 31) /* Tx mem req on iface when in non-D0 */
+#define PD_PRI_SIG_TARGET_ABORT_F1 (1u << 19) /* Rcvd target Abort Err Status (CA) F1 */
+#define PD_ERR_UNSPPORT_F1 (1u << 18) /* Unsupported Request Error Status. F1 */
+#define PD_ERR_ECRC_F1 (1u << 17) /* ECRC Error TLP Status. F1 */
+#define PD_ERR_MALF_TLP_F1 (1u << 16) /* Malformed TLP Status. F1 */
+#define PD_ERR_RX_OFLOW_F1 (1u << 15) /* Receiver Overflow Status. */
+#define PD_ERR_UNEXP_CPL_F1 (1u << 14) /* Unexpected Completion Status. F1 */
+#define PD_ERR_MASTER_ABRT_F1 (1u << 13) /* Receive UR Completion Status. F1 */
+#define PD_ERR_CPL_TIMEOUT_F1 (1u << 12) /* Completer Timeout Status F1 */
+#define PD_ERR_FC_PRTL_F1 (1u << 11) /* Flow Control Protocol Error Status F1 */
+#define PD_ERR_PSND_TLP_F1 (1u << 10) /* Poisoned Error Status F1 */
+#define PD_PRI_SIG_TARGET_ABORT (1u << 9) /* Received target Abort Error Status(CA) */
+#define PD_ERR_UNSPPORT (1u << 8) /* Unsupported Request Error Status. */
+#define PD_ERR_ECRC (1u << 7) /* ECRC Error TLP Status. */
+#define PD_ERR_MALF_TLP (1u << 6) /* Malformed TLP Status. */
+#define PD_ERR_RX_OFLOW (1u << 5) /* Receiver Overflow Status. */
+#define PD_ERR_UNEXP_CPL (1u << 4) /* Unexpected Completion Status. */
+#define PD_ERR_MASTER_ABRT (1u << 3) /* Receive UR Completion Status. */
+#define PD_ERR_CPL_TIMEOUT (1u << 2) /* Completer Timeout Status */
+#define PD_ERR_FC_PRTL (1u << 1) /* Flow Control Protocol Error Status */
+#define PD_ERR_PSND_TLP (1u << 0) /* Poisoned Error Status */
+
+/* All ERR_ATTN of F1 */
+#define PD_ERR_FUNCTION1 \
+ (PD_ERR_PSND_TLP_F1 | PD_ERR_FC_PRTL_F1 | PD_ERR_CPL_TIMEOUT_F1 | PD_ERR_MASTER_ABRT_F1 | \
+ PD_ERR_UNEXP_CPL_F1 | PD_ERR_RX_OFLOW_F1 | PD_ERR_MALF_TLP_F1 | PD_ERR_ECRC_F1 | \
+ PD_ERR_UNSPPORT_F1 | PD_PRI_SIG_TARGET_ABORT_F1)
+
+#define PD_ERR_TTX_REQ_DURING_D3_FN0 (1u << 10) /* Tx mem req on iface when in non-D0 */
+
+/* H2D Doorbell Fields for IDMA / PWI */
+#define PD_DB_FRG_ID_SHIFT (0u)
+#define PD_DB_FRG_ID_MASK (0xFu) /* bits 3:0 */
+#define PD_DB_DMA_TYPE_SHIFT (4u)
+#define PD_DB_DMA_TYPE_MASK (0xFu) /* bits 7:4 */
+#define PD_DB_RINGIDX_NUM_SHIFT (8u)
+#define PD_DB_RINGIDX_NUM_MASK (0xFFu) /* bits 15:8 */
+#define PD_DB_INDEX_VAL_SHIFT (16u)
+#define PD_DB_INDEX_VAL_MASK (0xFFFFu) /* bits 31:16 */
+
+/* PWI LUT entry fields */
+#define PWI_FLOW_VALID_MASK (0x1u)
+#define PWI_FLOW_VALID_SHIFT (22u)
+#define PWI_FLOW_RING_GROUP_ID_MASK (0x3u)
+#define PWI_FLOW_RING_GROUP_ID_SHIFT (20u)
+#define PWI_HOST_RINGIDX_MASK (0xFFu) /* Host Ring Index Number[19:12] */
+#define PWI_HOST_RINGIDX_SHIFT (12u)
+
+/* DMA_TYPE Values */
+#define PD_DB_DMA_TYPE_NO_IDMA (0u)
+#define PD_DB_DMA_TYPE_IDMA (1u)
+#define PD_DB_DMA_TYPE_PWI (2u)
+#define PD_DB_DMA_TYPE_RXPOST(rev) (REV_GE_73((rev)) ? (1u) : (5u))
+#define PD_DB_DMA_TYPE_TXCPL(rev) (REV_GE_73((rev)) ? (2u) : (6u))
+#define PD_DB_DMA_TYPE_RXCPL(rev) (REV_GE_73((rev)) ? (3u) : (7u))
+
+/* All ERR_ATTN of F0 */
+#define PD_ERR_FUNCTION0 \
+ (PD_ERR_PSND_TLP | PD_ERR_FC_PRTL | PD_ERR_CPL_TIMEOUT | PD_ERR_MASTER_ABRT | \
+ PD_ERR_UNEXP_CPL | PD_ERR_RX_OFLOW | PD_ERR_MALF_TLP | PD_ERR_ECRC | \
+ PD_ERR_UNSPPORT | PD_PRI_SIG_TARGET_ABORT)
+/* Shift of F1 bits */
+#define PD_ERR_FUNCTION1_SHIFT 10u
+
+#endif /* _PCIE_CORE_H */
diff --git a/bcmdhd.101.10.361.x/include/sbchipc.h b/bcmdhd.101.10.361.x/include/sbchipc.h
new file mode 100755
index 0000000..94f3c70
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbchipc.h
@@ -0,0 +1,5282 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBCHIPC_H
+#define _SBCHIPC_H
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+#define BCM_MASK32(msb, lsb) ((~0u >> (32u - (msb) - 1u)) & (~0u << (lsb)))
+#include <bcmutils.h>
+#ifdef WL_INITVALS
+#include <wl_initvals.h>
+#endif
+
+/**
+ * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the
+ * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to
+ * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead
+ * be assigned their respective chipc-specific address space and connected to the Always On
+ * Backplane via the APB interface.
+ */
+typedef volatile struct {
+ uint32 PAD[384];
+ uint32 pmucontrol; /* 0x600 */
+ uint32 pmucapabilities; /* 0x604 */
+ uint32 pmustatus; /* 0x608 */
+ uint32 res_state; /* 0x60C */
+ uint32 res_pending; /* 0x610 */
+ uint32 pmutimer; /* 0x614 */
+ uint32 min_res_mask; /* 0x618 */
+ uint32 max_res_mask; /* 0x61C */
+ uint32 res_table_sel; /* 0x620 */
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel; /* 0x638, rev >= 1 */
+ uint32 gpioenable; /* 0x63c, rev >= 1 */
+ uint32 res_req_timer_sel; /* 0x640 */
+ uint32 res_req_timer; /* 0x644 */
+ uint32 res_req_mask; /* 0x648 */
+ uint32 core_cap_ext; /* 0x64C */
+ uint32 chipcontrol_addr; /* 0x650 */
+ uint32 chipcontrol_data; /* 0x654 */
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt; /* 0x668, corerev >= 28 */
+ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
+ uint32 retention_ctl; /* 0x670 */
+ uint32 ILPPeriod; /* 0x674 */
+ uint32 PAD[2];
+ uint32 retention_grpidx; /* 0x680 */
+ uint32 retention_grpctl; /* 0x684 */
+ uint32 mac_res_req_timer; /* 0x688 */
+ uint32 mac_res_req_mask; /* 0x68c */
+ uint32 spm_ctrl; /* 0x690 */
+ uint32 spm_cap; /* 0x694 */
+ uint32 spm_clk_ctrl; /* 0x698 */
+ uint32 int_hi_status; /* 0x69c */
+ uint32 int_lo_status; /* 0x6a0 */
+ uint32 mon_table_addr; /* 0x6a4 */
+ uint32 mon_ctrl_n; /* 0x6a8 */
+ uint32 mon_status_n; /* 0x6ac */
+ uint32 int_treshold_n; /* 0x6b0 */
+ uint32 watermarks_n; /* 0x6b4 */
+ uint32 spm_debug; /* 0x6b8 */
+ uint32 PAD[1];
+ uint32 vtrim_ctrl; /* 0x6c0 */
+ uint32 vtrim_status; /* 0x6c4 */
+ uint32 usec_timer; /* 0x6c8 */
+ uint32 usec_timer_frac; /* 0x6cc */
+ uint32 pcie_tpower_on; /* 0x6d0 */
+ uint32 pcie_tport_cnt; /* 0x6d4 */
+ uint32 pmucontrol_ext; /* 0x6d8 */
+ uint32 slowclkperiod; /* 0x6dc */
+ uint32 pmu_statstimer_addr; /* 0x6e0 */
+ uint32 pmu_statstimer_ctrl; /* 0x6e4 */
+ uint32 pmu_statstimer_N; /* 0x6e8 */
+ uint32 PAD[1];
+ uint32 mac_res_req_timer1; /* 0x6f0 */
+ uint32 mac_res_req_mask1; /* 0x6f4 */
+ uint32 PAD[2];
+ uint32 pmuintmask0; /* 0x700 */
+ uint32 pmuintmask1; /* 0x704 */
+ uint32 PAD[2];
+ uint32 fis_start_min_res_mask; /* 0x710 */
+ uint32 PAD[3];
+ uint32 rsrc_event0; /* 0x720 */
+ uint32 PAD[3];
+ uint32 slowtimer2; /* 0x730 */
+ uint32 slowtimerfrac2; /* 0x734 */
+ uint32 mac_res_req_timer2; /* 0x738 */
+ uint32 mac_res_req_mask2; /* 0x73c */
+ uint32 pmuintstatus; /* 0x740 */
+ uint32 extwakeupstatus; /* 0x744 */
+ uint32 watchdog_res_mask; /* 0x748 */
+ uint32 PAD[1]; /* 0x74C */
+ uint32 swscratch; /* 0x750 */
+ uint32 PAD[3]; /* 0x754-0x75C */
+ uint32 extwakemask0; /* 0x760 */
+ uint32 extwakemask1; /* 0x764 */
+ uint32 PAD[2]; /* 0x768-0x76C */
+ uint32 extwakereqmask[2]; /* 0x770-0x774 */
+ uint32 PAD[2]; /* 0x778-0x77C */
+ uint32 pmuintctrl0; /* 0x780 */
+ uint32 pmuintctrl1; /* 0x784 */
+ uint32 PAD[2];
+ uint32 extwakectrl[2]; /* 0x790 */
+ uint32 PAD[7];
+ uint32 fis_ctrl_status; /* 0x7b4 */
+ uint32 fis_min_res_mask; /* 0x7b8 */
+ uint32 PAD[1];
+ uint32 precision_tmr_ctrl_status; /* 0x7c0 */
+ uint32 precision_tmr_capture_low; /* 0x7c4 */
+ uint32 precision_tmr_capture_high; /* 0x7c8 */
+ uint32 precision_tmr_capture_frac; /* 0x7cc */
+ uint32 precision_tmr_running_low; /* 0x7d0 */
+ uint32 precision_tmr_running_high; /* 0x7d4 */
+ uint32 precision_tmr_running_frac; /* 0x7d8 */
+ uint32 PAD[3];
+ uint32 core_cap_ext1; /* 0x7e8 */
+ uint32 PAD[5];
+ uint32 rsrc_substate_ctl_sts; /* 0x800 */
+ uint32 rsrc_substate_trans_tmr; /* 0x804 */
+ uint32 PAD[2];
+ uint32 dvfs_ctrl1; /* 0x810 */
+ uint32 dvfs_ctrl2; /* 0x814 */
+ uint32 dvfs_voltage; /* 0x818 */
+ uint32 dvfs_status; /* 0x81c */
+ uint32 dvfs_core_table_address; /* 0x820 */
+ uint32 dvfs_core_ctrl; /* 0x824 */
+} pmuregs_t;
+
+typedef struct eci_prerev35 {
+ uint32 eci_output;
+ uint32 eci_control;
+ uint32 eci_inputlo;
+ uint32 eci_inputmi;
+ uint32 eci_inputhi;
+ uint32 eci_inputintpolaritylo;
+ uint32 eci_inputintpolaritymi;
+ uint32 eci_inputintpolarityhi;
+ uint32 eci_intmasklo;
+ uint32 eci_intmaskmi;
+ uint32 eci_intmaskhi;
+ uint32 eci_eventlo;
+ uint32 eci_eventmi;
+ uint32 eci_eventhi;
+ uint32 eci_eventmasklo;
+ uint32 eci_eventmaskmi;
+ uint32 eci_eventmaskhi;
+ uint32 PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+ uint32 eci_outputlo;
+ uint32 eci_outputhi;
+ uint32 eci_controllo;
+ uint32 eci_controlhi;
+ uint32 eci_inputlo;
+ uint32 eci_inputhi;
+ uint32 eci_inputintpolaritylo;
+ uint32 eci_inputintpolarityhi;
+ uint32 eci_intmasklo;
+ uint32 eci_intmaskhi;
+ uint32 eci_eventlo;
+ uint32 eci_eventhi;
+ uint32 eci_eventmasklo;
+ uint32 eci_eventmaskhi;
+ uint32 eci_auxtx;
+ uint32 eci_auxrx;
+ uint32 eci_datatag;
+ uint32 eci_uartescvalue;
+ uint32 eci_autobaudctr;
+ uint32 eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+ uint32 PAD[19];
+ /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */
+ uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+ uint32 chipid; /* 0x0 */
+ uint32 capabilities;
+ uint32 corecontrol; /* corerev >= 1 */
+ uint32 bist;
+
+ /* OTP */
+ uint32 otpstatus; /* 0x10, corerev >= 10 */
+ uint32 otpcontrol;
+ uint32 otpprog;
+ uint32 otplayout; /* corerev >= 23 */
+
+ /* Interrupt control */
+ uint32 intstatus; /* 0x20 */
+ uint32 intmask;
+
+ /* Chip specific regs */
+ uint32 chipcontrol; /* 0x28, rev >= 11 */
+ uint32 chipstatus; /* 0x2c, rev >= 11 */
+
+ /* Jtag Master */
+ uint32 jtagcmd; /* 0x30, rev >= 10 */
+ uint32 jtagir;
+ uint32 jtagdr;
+ uint32 jtagctrl;
+
+ /* serial flash interface registers */
+ uint32 flashcontrol; /* 0x40 */
+ uint32 flashaddress;
+ uint32 flashdata;
+ uint32 otplayoutextension; /* rev >= 35 */
+
+ /* Silicon backplane configuration broadcast control */
+ uint32 broadcastaddress; /* 0x50 */
+ uint32 broadcastdata;
+
+ /* gpio - cleared only by power-on-reset */
+ uint32 gpiopullup; /* 0x58, corerev >= 20 */
+ uint32 gpiopulldown; /* 0x5c, corerev >= 20 */
+ uint32 gpioin; /* 0x60 */
+ uint32 gpioout; /* 0x64 */
+ uint32 gpioouten; /* 0x68 */
+ uint32 gpiocontrol; /* 0x6C */
+ uint32 gpiointpolarity; /* 0x70 */
+ uint32 gpiointmask; /* 0x74 */
+
+ /* GPIO events corerev >= 11 */
+ uint32 gpioevent;
+ uint32 gpioeventintmask;
+
+ /* Watchdog timer */
+ uint32 watchdog; /* 0x80 */
+
+ /* GPIO events corerev >= 11 */
+ uint32 gpioeventintpolarity;
+
+ /* GPIO based LED powersave regs corerev >= 16 */
+ uint32 gpiotimerval; /* 0x88 */ /* Obsolete and unused now */
+ uint32 gpiotimeroutmask; /* Obsolete and unused now */
+
+ /* clock control */
+ uint32 clockcontrol_n; /* 0x90 */
+ uint32 clockcontrol_sb; /* aka m0 */
+ uint32 clockcontrol_pci; /* aka m1 */
+ uint32 clockcontrol_m2; /* mii/uart/mipsref */
+ uint32 clockcontrol_m3; /* cpu */
+ uint32 clkdiv; /* corerev >= 3 */
+ uint32 gpiodebugsel; /* corerev >= 28 */
+ uint32 capabilities_ext; /* 0xac */
+
+ /* pll delay registers (corerev >= 4) */
+ uint32 pll_on_delay; /* 0xb0 */
+ uint32 fref_sel_delay;
+ uint32 slow_clk_ctl; /* 5 < corerev < 10 */
+ uint32 PAD;
+
+ /* Instaclock registers (corerev >= 10) */
+ uint32 system_clk_ctl; /* 0xc0 */
+ uint32 clkstatestretch;
+ uint32 PAD[2];
+
+ /* Indirect backplane access (corerev >= 22) */
+ uint32 bp_addrlow; /* 0xd0 */
+ uint32 bp_addrhigh;
+ uint32 bp_data;
+ uint32 PAD;
+ uint32 bp_indaccess;
+ /* SPI registers, corerev >= 37 */
+ uint32 gsioctrl;
+ uint32 gsioaddress;
+ uint32 gsiodata;
+
+ /* More clock dividers (corerev >= 32) */
+ uint32 clkdiv2;
+ /* FAB ID (corerev >= 40) */
+ uint32 otpcontrol1;
+ uint32 fabid; /* 0xf8 */
+
+ /* In AI chips, pointer to erom */
+ uint32 eromptr; /* 0xfc */
+
+ /* ExtBus control registers (corerev >= 3) */
+ uint32 pcmcia_config; /* 0x100 */
+ uint32 pcmcia_memwait;
+ uint32 pcmcia_attrwait;
+ uint32 pcmcia_iowait;
+ uint32 ide_config;
+ uint32 ide_memwait;
+ uint32 ide_attrwait;
+ uint32 ide_iowait;
+ uint32 prog_config;
+ uint32 prog_waitcount;
+ uint32 flash_config;
+ uint32 flash_waitcount;
+ uint32 SECI_config; /* 0x130 SECI configuration */
+ uint32 SECI_status;
+ uint32 SECI_statusmask;
+ uint32 SECI_rxnibchanged;
+
+#if !defined(BCMDONGLEHOST)
+ union { /* 0x140 */
+ /* Enhanced Coexistence Interface (ECI) registers (corerev >= 21) */
+ struct eci_prerev35 lt35;
+ struct eci_rev35 ge35;
+ /* Other interfaces */
+ struct flash_config flashconf;
+ uint32 PAD[20];
+ } eci;
+#else
+ uint32 PAD[20];
+#endif /* !defined(BCMDONGLEHOST) */
+
+ /* SROM interface (corerev >= 32) */
+ uint32 sromcontrol; /* 0x190 */
+ uint32 sromaddress;
+ uint32 sromdata;
+ uint32 PAD[1]; /* 0x19C */
+ /* NAND flash registers for BCM4706 (corerev = 31) */
+ uint32 nflashctrl; /* 0x1a0 */
+ uint32 nflashconf;
+ uint32 nflashcoladdr;
+ uint32 nflashrowaddr;
+ uint32 nflashdata;
+ uint32 nflashwaitcnt0; /* 0x1b4 */
+ uint32 PAD[2];
+
+ uint32 seci_uart_data; /* 0x1C0 */
+ uint32 seci_uart_bauddiv;
+ uint32 seci_uart_fcr;
+ uint32 seci_uart_lcr;
+ uint32 seci_uart_mcr;
+ uint32 seci_uart_lsr;
+ uint32 seci_uart_msr;
+ uint32 seci_uart_baudadj;
+ /* Clock control and hardware workarounds (corerev >= 20) */
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 hw_war;
+ uint32 powerctl; /* 0x1e8 */
+ uint32 powerctl2; /* 0x1ec */
+ uint32 PAD[68];
+
+ /* UARTs */
+ uint8 uart0data; /* 0x300 */
+ uint8 uart0imr;
+ uint8 uart0fcr;
+ uint8 uart0lcr;
+ uint8 uart0mcr;
+ uint8 uart0lsr;
+ uint8 uart0msr;
+ uint8 uart0scratch;
+ uint8 PAD[184]; /* corerev >= 65 */
+ uint32 rng_ctrl_0; /* 0x3c0 */
+ uint32 rng_rng_soft_reset; /* 0x3c4 */
+ uint32 rng_rbg_soft_reset; /* 0x3c8 */
+ uint32 rng_total_bit_cnt; /* 0x3cc */
+ uint32 rng_total_bit_thrshld; /* 0x3d0 */
+ uint32 rng_rev_id; /* 0x3d4 */
+ uint32 rng_int_status_0; /* 0x3d8 */
+ uint32 rng_int_enable_0; /* 0x3dc */
+ uint32 rng_fifo_data; /* 0x3e0 */
+ uint32 rng_fifo_cnt; /* 0x3e4 */
+ uint8 PAD[24]; /* corerev >= 65 */
+
+ uint8 uart1data; /* 0x400 */
+ uint8 uart1imr;
+ uint8 uart1fcr;
+ uint8 uart1lcr;
+ uint8 uart1mcr;
+ uint8 uart1lsr;
+ uint8 uart1msr;
+ uint8 uart1scratch; /* 0x407 */
+ uint32 PAD[50];
+ uint32 sr_memrw_addr; /* 0x4d0 */
+ uint32 sr_memrw_data; /* 0x4d4 */
+ uint32 etbmemctrl; /* 0x4d8 */
+ uint32 PAD[9];
+
+ /* save/restore, corerev >= 48 */
+ uint32 sr_capability; /* 0x500 */
+ uint32 sr_control0; /* 0x504 */
+ uint32 sr_control1; /* 0x508 */
+ uint32 gpio_control; /* 0x50C */
+ uint32 PAD[29];
+ /* 2 SR engines case */
+ uint32 sr1_control0; /* 0x584 */
+ uint32 sr1_control1; /* 0x588 */
+ uint32 PAD[29];
+ /* PMU registers (corerev >= 20) */
+ /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP.
+ * The CPU must read them twice, compare, and retry if different.
+ */
+ uint32 pmucontrol; /* 0x600 */
+ uint32 pmucapabilities;
+ uint32 pmustatus;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmutimer;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 res_table_sel;
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel; /* 0x638, rev >= 1 */
+ uint32 gpioenable; /* 0x63c, rev >= 1 */
+ uint32 res_req_timer_sel;
+ uint32 res_req_timer;
+ uint32 res_req_mask;
+ uint32 core_cap_ext; /* 0x64c */
+ uint32 chipcontrol_addr; /* 0x650 */
+ uint32 chipcontrol_data; /* 0x654 */
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt; /* 0x668, corerev >= 28 */
+ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
+ uint32 retention_ctl; /* 0x670 */
+ uint32 ILPPeriod; /* 0x674 */
+ uint32 PAD[2];
+ uint32 retention_grpidx; /* 0x680 */
+ uint32 retention_grpctl; /* 0x684 */
+ uint32 mac_res_req_timer; /* 0x688 */
+ uint32 mac_res_req_mask; /* 0x68c */
+ uint32 PAD[18];
+ uint32 pmucontrol_ext; /* 0x6d8 */
+ uint32 slowclkperiod; /* 0x6dc */
+ uint32 pmu_statstimer_addr; /* 0x6e0 */
+ uint32 pmu_statstimer_ctrl; /* 0x6e4 */
+ uint32 pmu_statstimer_N; /* 0x6e8 */
+ uint32 PAD[1];
+ uint32 mac_res_req_timer1; /* 0x6f0 */
+ uint32 mac_res_req_mask1; /* 0x6f4 */
+ uint32 PAD[2];
+ uint32 pmuintmask0; /* 0x700 */
+ uint32 pmuintmask1; /* 0x704 */
+ uint32 PAD[14];
+ uint32 pmuintstatus; /* 0x740 */
+ uint32 extwakeupstatus; /* 0x744 */
+ uint32 PAD[6];
+ uint32 extwakemask0; /* 0x760 */
+ uint32 extwakemask1; /* 0x764 */
+ uint32 PAD[2]; /* 0x768-0x76C */
+ uint32 extwakereqmask[2]; /* 0x770-0x774 */
+ uint32 PAD[2]; /* 0x778-0x77C */
+ uint32 pmuintctrl0; /* 0x780 */
+ uint32 PAD[3]; /* 0x784 - 0x78c */
+ uint32 extwakectrl[1]; /* 0x790 */
+ uint32 PAD[PADSZ(0x794u, 0x7b0u)]; /* 0x794 - 0x7b0 */
+ uint32 fis_ctrl_status; /* 0x7b4 */
+ uint32 fis_min_res_mask; /* 0x7b8 */
+ uint32 PAD[PADSZ(0x7bcu, 0x7bcu)]; /* 0x7bc */
+ uint32 precision_tmr_ctrl_status; /* 0x7c0 */
+ uint32 precision_tmr_capture_low; /* 0x7c4 */
+ uint32 precision_tmr_capture_high; /* 0x7c8 */
+ uint32 precision_tmr_capture_frac; /* 0x7cc */
+ uint32 precision_tmr_running_low; /* 0x7d0 */
+ uint32 precision_tmr_running_high; /* 0x7d4 */
+ uint32 precision_tmr_running_frac; /* 0x7d8 */
+ uint32 PAD[PADSZ(0x7dcu, 0x7e4u)]; /* 0x7dc - 0x7e4 */
+ uint32 core_cap_ext1; /* 0x7e8 */
+ uint32 PAD[PADSZ(0x7ecu, 0x7fcu)]; /* 0x7ec - 0x7fc */
+
+ uint16 sromotp[512]; /* 0x800 */
+#ifdef CCNFLASH_SUPPORT
+ /* Nand flash MLC controller registers (corerev >= 38) */
+ uint32 nand_revision; /* 0xC00 */
+ uint32 nand_cmd_start;
+ uint32 nand_cmd_addr_x;
+ uint32 nand_cmd_addr;
+ uint32 nand_cmd_end_addr;
+ uint32 nand_cs_nand_select;
+ uint32 nand_cs_nand_xor;
+ uint32 PAD;
+ uint32 nand_spare_rd0;
+ uint32 nand_spare_rd4;
+ uint32 nand_spare_rd8;
+ uint32 nand_spare_rd12;
+ uint32 nand_spare_wr0;
+ uint32 nand_spare_wr4;
+ uint32 nand_spare_wr8;
+ uint32 nand_spare_wr12;
+ uint32 nand_acc_control;
+ uint32 PAD;
+ uint32 nand_config;
+ uint32 PAD;
+ uint32 nand_timing_1;
+ uint32 nand_timing_2;
+ uint32 nand_semaphore;
+ uint32 PAD;
+ uint32 nand_devid;
+ uint32 nand_devid_x;
+ uint32 nand_block_lock_status;
+ uint32 nand_intfc_status;
+ uint32 nand_ecc_corr_addr_x;
+ uint32 nand_ecc_corr_addr;
+ uint32 nand_ecc_unc_addr_x;
+ uint32 nand_ecc_unc_addr;
+ uint32 nand_read_error_count;
+ uint32 nand_corr_stat_threshold;
+ uint32 PAD[2];
+ uint32 nand_read_addr_x;
+ uint32 nand_read_addr;
+ uint32 nand_page_program_addr_x;
+ uint32 nand_page_program_addr;
+ uint32 nand_copy_back_addr_x;
+ uint32 nand_copy_back_addr;
+ uint32 nand_block_erase_addr_x;
+ uint32 nand_block_erase_addr;
+ uint32 nand_inv_read_addr_x;
+ uint32 nand_inv_read_addr;
+ uint32 PAD[2];
+ uint32 nand_blk_wr_protect;
+ uint32 PAD[3];
+ uint32 nand_acc_control_cs1;
+ uint32 nand_config_cs1;
+ uint32 nand_timing_1_cs1;
+ uint32 nand_timing_2_cs1;
+ uint32 PAD[20];
+ uint32 nand_spare_rd16;
+ uint32 nand_spare_rd20;
+ uint32 nand_spare_rd24;
+ uint32 nand_spare_rd28;
+ uint32 nand_cache_addr;
+ uint32 nand_cache_data;
+ uint32 nand_ctrl_config;
+ uint32 nand_ctrl_status;
+#endif /* CCNFLASH_SUPPORT */
+ /* Note: there is a clash between GCI and NFLASH. So,
+ * we decided to have it like below. the functions accessing following
+ * have to be protected with NFLASH_SUPPORT. The functions will
+ * assert in case the clash happens.
+ */
+ uint32 gci_corecaps0; /* GCI starting at 0xC00 */
+ uint32 gci_corecaps1;
+ uint32 gci_corecaps2;
+ uint32 gci_corectrl;
+ uint32 gci_corestat; /* 0xC10 */
+ uint32 gci_intstat; /* 0xC14 */
+ uint32 gci_intmask; /* 0xC18 */
+ uint32 gci_wakemask; /* 0xC1C */
+ uint32 gci_levelintstat; /* 0xC20 */
+ uint32 gci_eventintstat; /* 0xC24 */
+ uint32 PAD[6];
+ uint32 gci_indirect_addr; /* 0xC40 */
+ uint32 gci_gpioctl; /* 0xC44 */
+ uint32 gci_gpiostatus;
+ uint32 gci_gpiomask; /* 0xC4C */
+ uint32 gci_eventsummary; /* 0xC50 */
+ uint32 gci_miscctl; /* 0xC54 */
+ uint32 gci_gpiointmask;
+ uint32 gci_gpiowakemask;
+ uint32 gci_input[32]; /* C60 */
+ uint32 gci_event[32]; /* CE0 */
+ uint32 gci_output[4]; /* D60 */
+ uint32 gci_control_0; /* 0xD70 */
+ uint32 gci_control_1; /* 0xD74 */
+ uint32 gci_intpolreg; /* 0xD78 */
+ uint32 gci_levelintmask; /* 0xD7C */
+ uint32 gci_eventintmask; /* 0xD80 */
+ uint32 PAD[3];
+ uint32 gci_inbandlevelintmask; /* 0xD90 */
+ uint32 gci_inbandeventintmask; /* 0xD94 */
+ uint32 PAD[2];
+ uint32 gci_seciauxtx; /* 0xDA0 */
+ uint32 gci_seciauxrx; /* 0xDA4 */
+ uint32 gci_secitx_datatag; /* 0xDA8 */
+ uint32 gci_secirx_datatag; /* 0xDAC */
+ uint32 gci_secitx_datamask; /* 0xDB0 */
+ uint32 gci_seciusef0tx_reg; /* 0xDB4 */
+ uint32 gci_secif0tx_offset; /* 0xDB8 */
+ uint32 gci_secif0rx_offset; /* 0xDBC */
+ uint32 gci_secif1tx_offset; /* 0xDC0 */
+ uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */
+ uint32 gci_rxfifoctrl; /* 0xDC8 */
+ uint32 gci_uartreadid; /* DCC */
+ uint32 gci_seciuartescval; /* DD0 */
+ uint32 PAD;
+ uint32 gci_secififolevel; /* DD8 */
+ uint32 gci_seciuartdata; /* DDC */
+ uint32 gci_secibauddiv; /* DE0 */
+ uint32 gci_secifcr; /* DE4 */
+ uint32 gci_secilcr; /* DE8 */
+ uint32 gci_secimcr; /* DEC */
+ uint32 gci_secilsr; /* DF0 */
+ uint32 gci_secimsr; /* DF4 */
+ uint32 gci_baudadj; /* DF8 */
+ uint32 PAD;
+ uint32 gci_chipctrl; /* 0xE00 */
+ uint32 gci_chipsts; /* 0xE04 */
+ uint32 gci_gpioout; /* 0xE08 */
+ uint32 gci_gpioout_read; /* 0xE0C */
+ uint32 gci_mpwaketx; /* 0xE10 */
+ uint32 gci_mpwakedetect; /* 0xE14 */
+ uint32 gci_seciin_ctrl; /* 0xE18 */
+ uint32 gci_seciout_ctrl; /* 0xE1C */
+ uint32 gci_seciin_auxfifo_en; /* 0xE20 */
+ uint32 gci_seciout_txen_txbr; /* 0xE24 */
+ uint32 gci_seciin_rxbrstatus; /* 0xE28 */
+ uint32 gci_seciin_rxerrstatus; /* 0xE2C */
+ uint32 gci_seciin_fcstatus; /* 0xE30 */
+ uint32 gci_seciout_txstatus; /* 0xE34 */
+ uint32 gci_seciout_txbrstatus; /* 0xE38 */
+
+} chipcregs_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+#if !defined(IL_BIGENDIAN)
+#define CC_CHIPID 0
+#define CC_CAPABILITIES 4
+#define CC_CHIPST 0x2c
+#define CC_EROMPTR 0xfc
+#endif /* IL_BIGENDIAN */
+
+#define CC_OTPST 0x10
+#define CC_INTSTATUS 0x20
+#define CC_INTMASK 0x24
+#define CC_JTAGCMD 0x30
+#define CC_JTAGIR 0x34
+#define CC_JTAGDR 0x38
+#define CC_JTAGCTRL 0x3c
+#define CC_GPIOPU 0x58
+#define CC_GPIOPD 0x5c
+#define CC_GPIOIN 0x60
+#define CC_GPIOOUT 0x64
+#define CC_GPIOOUTEN 0x68
+#define CC_GPIOCTRL 0x6c
+#define CC_GPIOPOL 0x70
+#define CC_GPIOINTM 0x74
+#define CC_GPIOEVENT 0x78
+#define CC_GPIOEVENTMASK 0x7c
+#define CC_WATCHDOG 0x80
+#define CC_GPIOEVENTPOL 0x84
+#define CC_CLKC_N 0x90
+#define CC_CLKC_M0 0x94
+#define CC_CLKC_M1 0x98
+#define CC_CLKC_M2 0x9c
+#define CC_CLKC_M3 0xa0
+#define CC_CLKDIV 0xa4
+#define CC_CAP_EXT 0xac
+#define CC_SYS_CLK_CTL 0xc0
+#define CC_BP_ADRLOW 0xd0
+#define CC_BP_ADRHI 0xd4
+#define CC_BP_DATA 0xd8
+#define CC_SCR_DHD_TO_BL CC_BP_ADRHI
+#define CC_SCR_BL_TO_DHD CC_BP_ADRLOW
+#define CC_CLKDIV2 0xf0
+#define CC_CLK_CTL_ST SI_CLK_CTL_ST
+#define PMU_CTL 0x600
+#define PMU_CAP 0x604
+#define PMU_ST 0x608
+#define PMU_RES_STATE 0x60c
+#define PMU_RES_PENDING 0x610
+#define PMU_TIMER 0x614
+#define PMU_MIN_RES_MASK 0x618
+#define PMU_MAX_RES_MASK 0x61c
+#define CC_CHIPCTL_ADDR 0x650
+#define CC_CHIPCTL_DATA 0x654
+#define PMU_REG_CONTROL_ADDR 0x658
+#define PMU_REG_CONTROL_DATA 0x65C
+#define PMU_PLL_CONTROL_ADDR 0x660
+#define PMU_PLL_CONTROL_DATA 0x664
+#define PMU_RSRC_CONTROL_MASK 0x7B0
+
+#define CC_SROM_CTRL 0x190
+#define CC_SROM_ADDRESS 0x194u
+#define CC_SROM_DATA 0x198u
+#define CC_SROM_OTP 0x0800
+#define CC_GCI_INDIRECT_ADDR_REG 0xC40
+#define CC_GCI_CHIP_CTRL_REG 0xE00
+#define CC_GCI_CC_OFFSET_2 2
+#define CC_GCI_CC_OFFSET_5 5
+#define CC_SWD_CTRL 0x380
+#define CC_SWD_REQACK 0x384
+#define CC_SWD_DATA 0x388
+#define GPIO_SEL_0 0x00001111
+#define GPIO_SEL_1 0x11110000
+#define GPIO_SEL_8 0x00001111
+#define GPIO_SEL_9 0x11110000
+
+#define CHIPCTRLREG0 0x0
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define CHIPCTRLREG6 0x6
+#define CHIPCTRLREG13 0xd
+#define CHIPCTRLREG16 0x10
+#define REGCTRLREG4 0x4
+#define REGCTRLREG5 0x5
+#define REGCTRLREG6 0x6
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define PMU_RES_DEP_MASK 0x624
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+#define PMUREG_RESREQ_TIMER 0x688
+#define PMUREG_RESREQ_MASK1 0x6f4
+#define PMUREG_RESREQ_TIMER1 0x6f0
+#define EXT_LPO_AVAIL 0x100
+#define LPO_SEL (1 << 0)
+#define CC_EXT_LPO_PU 0x200000
+#define GC_EXT_LPO_PU 0x2
+#define CC_INT_LPO_PU 0x100000
+#define GC_INT_LPO_PU 0x1
+#define EXT_LPO_SEL 0x8
+#define INT_LPO_SEL 0x4
+#define ENABLE_FINE_CBUCK_CTRL (1 << 30)
+#define REGCTRL5_PWM_AUTO_CTRL_MASK 0x007e0000
+#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17
+#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000
+#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16
+#define CC_BP_IND_ACCESS_START_SHIFT 9
+#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT)
+#define CC_BP_IND_ACCESS_RDWR_SHIFT 8
+#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT)
+#define CC_BP_IND_ACCESS_ERROR_SHIFT 10
+#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT)
+#define GC_BT_CTRL_UARTPADS_OVRD_EN (1u << 1)
+
+#define LPO_SEL_TIMEOUT 1000
+
+#define LPO_FINAL_SEL_SHIFT 18
+
+#define LHL_LPO1_SEL 0
+#define LHL_LPO2_SEL 0x1
+#define LHL_32k_SEL 0x2
+#define LHL_EXT_SEL 0x3
+
+#define EXTLPO_BUF_PD 0x40
+#define LPO1_PD_EN 0x1
+#define LPO1_PD_SEL 0x6
+#define LPO1_PD_SEL_VAL 0x4
+#define LPO2_PD_EN 0x8
+#define LPO2_PD_SEL 0x30
+#define LPO2_PD_SEL_VAL 0x20
+#define OSC_32k_PD 0x80
+
+#define LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL 0x3
+
+#define LHL_LPO_AUTO 0x0
+#define LHL_LPO1_ENAB 0x1
+#define LHL_LPO2_ENAB 0x2
+#define LHL_OSC_32k_ENAB 0x3
+#define LHL_EXT_LPO_ENAB 0x4
+#define RADIO_LPO_ENAB 0x5
+
+#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN 0x4
+#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR 0x8
+#define LHL_CLK_DET_CNT 0xF0
+#define LHL_CLK_DET_CNT_SHIFT 4
+#define LPO_SEL_SHIFT 9
+
+#define LHL_MAIN_CTL_ADR_FINAL_CLK_SEL 0x3C0000
+#define LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL 0x600
+
+#define CLK_DET_CNT_THRESH 8
+
+#ifdef SR_DEBUG
+#define SUBCORE_POWER_ON 0x0001
+#define PHY_POWER_ON 0x0010
+#define VDDM_POWER_ON 0x0100
+#define MEMLPLDO_POWER_ON 0x1000
+#define SUBCORE_POWER_ON_CHK 0x00040000
+#define PHY_POWER_ON_CHK 0x00080000
+#define VDDM_POWER_ON_CHK 0x00100000
+#define MEMLPLDO_POWER_ON_CHK 0x00200000
+#endif /* SR_DEBUG */
+
+#ifdef CCNFLASH_SUPPORT
+/* NAND flash support */
+#define CC_NAND_REVISION 0xC00
+#define CC_NAND_CMD_START 0xC04
+#define CC_NAND_CMD_ADDR 0xC0C
+#define CC_NAND_SPARE_RD_0 0xC20
+#define CC_NAND_SPARE_RD_4 0xC24
+#define CC_NAND_SPARE_RD_8 0xC28
+#define CC_NAND_SPARE_RD_C 0xC2C
+#define CC_NAND_CONFIG 0xC48
+#define CC_NAND_DEVID 0xC60
+#define CC_NAND_DEVID_EXT 0xC64
+#define CC_NAND_INTFC_STATUS 0xC6C
+#endif /* CCNFLASH_SUPPORT */
+
+/* chipid */
+#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */
+#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */
+#define CID_REV_SHIFT 16 /**< Chip Revision shift */
+#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */
+#define CID_PKG_SHIFT 20 /**< Package Option shift */
+#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */
+#define CID_CC_SHIFT 24
+#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */
+#define CID_TYPE_SHIFT 28
+
+/* capabilities */
+#define CC_CAP_UARTS_MASK 0x00000003u /**< Number of UARTs */
+#define CC_CAP_MIPSEB 0x00000004u /**< MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL 0x00000018u /**< UARTs clock select */
+#define CC_CAP_UINTCLK 0x00000008u /**< UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO 0x00000020u /**< UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK 0x000000c0u /**< External bus mask */
+#define CC_CAP_EXTBUS_NONE 0x00000000u /**< No ExtBus present */
+#define CC_CAP_EXTBUS_FULL 0x00000040u /**< ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG 0x00000080u /**< ExtBus: ProgIf only */
+#define CC_CAP_FLASH_MASK 0x00000700u /**< Type of flash */
+#define CC_CAP_PLL_MASK 0x00038000u /**< Type of PLL */
+#define CC_CAP_PWR_CTL 0x00040000u /**< Power control */
+#define CC_CAP_OTPSIZE 0x00380000u /**< OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */
+#define CC_CAP_JTAGP 0x00400000u /**< JTAG Master Present */
+#define CC_CAP_ROM 0x00800000u /**< Internal boot rom active */
+#define CC_CAP_BKPLN64 0x08000000u /**< 64-bit backplane */
+#define CC_CAP_PMU 0x10000000u /**< PMU Present, rev >= 20 */
+#define CC_CAP_ECI 0x20000000u /**< ECI Present, rev >= 21 */
+#define CC_CAP_SROM 0x40000000u /**< Srom Present, rev >= 32 */
+#define CC_CAP_NFLASH 0x80000000u /**< Nand flash present, rev >= 35 */
+
+#define CC_CAP2_SECI 0x00000001u /**< SECI Present, rev >= 36 */
+#define CC_CAP2_GSIO 0x00000002u /**< GSIO (spi/i2c) present, rev >= 37 */
+
+/* capabilities extension */
+#define CC_CAP_EXT_SECI_PRESENT 0x00000001u /**< SECI present */
+#define CC_CAP_EXT_GSIO_PRESENT 0x00000002u /**< GSIO present */
+#define CC_CAP_EXT_GCI_PRESENT 0x00000004u /**< GCI present */
+#define CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008u /**< UART present */
+#define CC_CAP_EXT_AOB_PRESENT 0x00000040u /**< AOB present */
+#define CC_CAP_EXT_SWD_PRESENT 0x00000400u /**< SWD present */
+#define CC_CAP_SR_AON_PRESENT 0x0001E000u /**< SWD present */
+#define CC_CAP_EXT1_DVFS_PRESENT 0x00001000u /**< DVFS present */
+
+/* DVFS core count */
+#define CC_CAP_EXT1_CORE_CNT_SHIFT (7u)
+#define CC_CAP_EXT1_CORE_CNT_MASK ((0x1Fu) << CC_CAP_EXT1_CORE_CNT_SHIFT)
+
+/* SpmCtrl (Chipcommon Offset 0x690)
+ * Bits 27:16 AlpDiv
+ * Clock divider control for dividing ALP or TCK clock
+ * (bit 8 determines ALP vs TCK)
+ * Bits 8 UseDivTck
+ * See UseDivAlp (bit 1) for more details
+ * Bits 7:6 DebugMuxSel
+ * Controls the debug mux for SpmDebug register
+ * Bits 5 IntPending
+ * This field is set to 1 when any of the bits in IntHiStatus or IntLoStatus
+ * is set. It is automatically cleared after reading and clearing the
+ * IntHiStatus and IntLoStatus registers. This bit is Read only.
+ * Bits 4 SpmIdle
+ * Indicates whether the spm controller is running (SpmIdle=0) or in idle
+ * state (SpmIdle=1); Note that after setting Spmen=1 (or 0), it takes a
+ * few clock cycles (ILP or divided ALP) for SpmIdle to go to 0 (or 1).
+ * This bit is Read only.
+ * Bits 3 RoDisOutput
+ * Debug register - gate off all the SPM ring oscillator clock outputs
+ * Bits 2 RstSpm
+ * Reset spm controller.
+ * Put spm in reset before changing UseDivAlp and AlpDiv
+ * Bits 1 UseDivAlp
+ * This field, along with UseDivTck, selects the clock as the reference clock
+ * Bits [UseDivTck,UseDivAlp]:
+ * 00 - Use ILP clock as reference clock
+ * 01 - Use divided ALP clock
+ * 10 - Use divided jtag TCK
+ * Bits 0 Spmen
+ * 0 - SPM disabled
+ * 1 - SPM enabled
+ * Program all the SPM controls before enabling spm. For one-shot operation,
+ * SpmIdle indicates when the one-shot run has completed. After one-shot
+ * completion, spmen needs to be disabled first before enabling again.
+ */
+#define SPMCTRL_ALPDIV_FUNC 0x1ffu
+#define SPMCTRL_ALPDIV_RO 0xfffu
+#define SPMCTRL_ALPDIV_SHIFT 16u
+#define SPMCTRL_ALPDIV_MASK (0xfffu << SPMCTRL_ALPDIV_SHIFT)
+#define SPMCTRL_RSTSPM 0x1u
+#define SPMCTRL_RSTSPM_SHIFT 2u
+#define SPMCTRL_RSTSPM_MASK (0x1u << SPMCTRL_RSTSPM_SHIFT)
+#define SPMCTRL_USEDIVALP 0x1u
+#define SPMCTRL_USEDIVALP_SHIFT 1u
+#define SPMCTRL_USEDIVALP_MASK (0x1u << SPMCTRL_USEDIVALP_SHIFT)
+#define SPMCTRL_SPMEN 0x1u
+#define SPMCTRL_SPMEN_SHIFT 0u
+#define SPMCTRL_SPMEN_MASK (0x1u << SPMCTRL_SPMEN_SHIFT)
+
+/* SpmClkCtrl (Chipcommon Offset 0x698)
+ * Bits 31 OneShot
+ * 0 means Take periodic measurements based on IntervalValue
+ * 1 means Take a one shot measurement
+ * when OneShot=1 IntervalValue determines the amount of time to wait before
+ * taking the measurement
+ * Bits 30:28 ROClkprediv1
+ * ROClkprediv1 and ROClkprediv2 controls the clock dividers of the RO clk
+ * before it goes to the monitor
+ * The RO clk goes through prediv1, followed by prediv2
+ * prediv1:
+ * 0 - no divide
+ * 1 - divide by 2
+ * 2 - divide by 4
+ * 3 - divide by 8
+ * 4 - divide by 16
+ * 5 - divide by 32
+ * prediv2:
+ * 0 - no divide
+ * 1 to 15 - divide by (prediv2+1)
+ */
+#define SPMCLKCTRL_SAMPLETIME 0x2u
+#define SPMCLKCTRL_SAMPLETIME_SHIFT 24u
+#define SPMCLKCTRL_SAMPLETIME_MASK (0xfu << SPMCLKCTRL_SAMPLETIME_SHIFT)
+#define SPMCLKCTRL_ONESHOT 0x1u
+#define SPMCLKCTRL_ONESHOT_SHIFT 31u
+#define SPMCLKCTRL_ONESHOT_MASK (0x1u << SPMCLKCTRL_ONESHOT_SHIFT)
+
+/* MonCtrlN (Chipcommon Offset 0x6a8)
+ * Bits 15:8 TargetRo
+ * The target ring oscillator to observe
+ * Bits 7:6 TargetRoExt
+ * Extended select option to choose the target clock to monitor;
+ * 00 - selects ring oscillator clock;
+ * 10 - selects functional clock;
+ * 11 - selects DFT clocks;
+ * Bits 15:8 (TargetRO) is used to select the specific RO or functional or
+ * DFT clock
+ * Bits 3 intHiEn
+ * Interrupt hi enable (MonEn should be 1)
+ * Bits 2 intLoEn
+ * Interrupt hi enable (MonEn should be 1)
+ * Bits 1 HwEnable
+ * TBD
+ * Bits 0 MonEn
+ * Enable monitor, interrupt and watermark functions
+ */
+#define MONCTRLN_TARGETRO_PMU_ALP_CLK 0u
+#define MONCTRLN_TARGETRO_PCIE_ALP_CLK 1u
+#define MONCTRLN_TARGETRO_CB_BP_CLK 2u
+#define MONCTRLN_TARGETRO_ARMCR4_CLK_4387B0 3u
+#define MONCTRLN_TARGETRO_ARMCR4_CLK_4387C0 20u
+#define MONCTRLN_TARGETRO_SHIFT 8u
+#define MONCTRLN_TARGETRO_MASK (0xffu << MONCTRLN_TARGETRO_SHIFT)
+#define MONCTRLN_TARGETROMAX 64u
+#define MONCTRLN_TARGETROHI 32u
+#define MONCTRLN_TARGETROEXT_RO 0x0u
+#define MONCTRLN_TARGETROEXT_FUNC 0x2u
+#define MONCTRLN_TARGETROEXT_DFT 0x3u
+#define MONCTRLN_TARGETROEXT_SHIFT 6u
+#define MONCTRLN_TARGETROEXT_MASK (0x3u << MONCTRLN_TARGETROEXT_SHIFT)
+#define MONCTRLN_MONEN 0x1u
+#define MONCTRLN_MONEN_SHIFT 0u
+#define MONCTRLN_MONEN_MASK (0x1u << MONCTRLN_MONENEXT_SHIFT)
+
+/* DvfsCoreCtrlN
+ * Bits 10 Request_override_PDn
+ * When set, the dvfs_request logic for this core is overridden with the
+ * content in Request_val_PDn. This field is ignored when
+ * DVFSCtrl1.dvfs_req_override is set.
+ * Bits 9:8 Request_val_PDn
+ * see Request_override_PDn description
+ * Bits 4:0 DVFS_RsrcTrig_PDn
+ * Specifies the pmu resource that is used to trigger the DVFS request for
+ * this core request. Current plan is to use the appropriate PWRSW_* pmu
+ * resource each power domain / cores
+ */
+#define CTRLN_REQUEST_OVERRIDE_SHIFT 10u
+#define CTRLN_REQUEST_OVERRIDE_MASK (0x1u << CTRLN_REQUEST_OVERRIDE_SHIFT)
+#define CTRLN_REQUEST_VAL_SHIFT 8u
+#define CTRLN_REQUEST_VAL_MASK (0x3u << CTRLN_REQUEST_VAL_SHIFT)
+#define CTRLN_RSRC_TRIG_SHIFT 0u
+#define CTRLN_RSRC_TRIG_MASK (0x1Fu << CTRLN_RSRC_TRIG_SHIFT)
+#define CTRLN_RSRC_TRIG_CHIPC 0x1Au
+#define CTRLN_RSRC_TRIG_PCIE 0x1Au
+#define CTRLN_RSRC_TRIG_ARM 0x8u
+#define CTRLN_RSRC_TRIG_D11_MAIN 0xEu
+#define CTRLN_RSRC_TRIG_D11_AUX 0xBu
+#define CTRLN_RSRC_TRIG_D11_SCAN 0xCu
+#define CTRLN_RSRC_TRIG_HWA 0x8u
+#define CTRLN_RSRC_TRIG_BT_MAIN 0x9u
+#define CTRLN_RSRC_TRIG_BT_SCAN 0xAu
+
+/* DVFS core FW index */
+#define DVFS_CORE_CHIPC 0u
+#define DVFS_CORE_PCIE 1u
+#define DVFS_CORE_ARM 2u
+#define DVFS_CORE_D11_MAIN 3u
+#define DVFS_CORE_D11_AUX 4u
+#define DVFS_CORE_D11_SCAN 5u
+#define DVFS_CORE_BT_MAIN 6u
+#define DVFS_CORE_BT_SCAN 7u
+#define DVFS_CORE_HWA 8u
+#define DVFS_CORE_SYSMEM ((PMUREV((sih)->pmurev) < 43u) ? \
+ 9u : 8u)
+#define DVFS_CORE_MASK 0xFu
+
+#define DVFS_CORE_INVALID_IDX 0xFFu
+
+/* DVFS_Ctrl2 (PMU_BASE + 0x814)
+ * Bits 31:28 Voltage ramp down step
+ * Voltage increment amount during ramp down (10mv units)
+ * Bits 27:24 Voltage ramp up step
+ * Voltage increment amount during ramp up (10mv units)
+ * Bits 23:16 Voltage ramp down interval
+ * Number of clocks to wait during each voltage decrement
+ * Bits 15:8 Voltage ramp up interval
+ * Number of clocks to wait during each voltage increment
+ * Bits 7:0 Clock stable time
+ * Number of clocks to wait after dvfs_clk_sel is asserted
+ */
+#define DVFS_VOLTAGE_RAMP_DOWN_STEP 1u
+#define DVFS_VOLTAGE_RAMP_DOWN_STEP_SHIFT 28u
+#define DVFS_VOLTAGE_RAMP_DOWN_STEP_MASK (0xFu << DVFS_VOLTAGE_RAMP_DOWN_STEP_SHIFT)
+#define DVFS_VOLTAGE_RAMP_UP_STEP 1u
+#define DVFS_VOLTAGE_RAMP_UP_STEP_SHIFT 24u
+#define DVFS_VOLTAGE_RAMP_UP_STEP_MASK (0xFu << DVFS_VOLTAGE_RAMP_UP_STEP_SHIFT)
+#define DVFS_VOLTAGE_RAMP_DOWN_INTERVAL 1u
+#define DVFS_VOLTAGE_RAMP_DOWN_INTERVAL_SHIFT 16u
+#define DVFS_VOLTAGE_RAMP_DOWN_INTERVAL_MASK (0xFFu << DVFS_VOLTAGE_RAMP_DOWN_INTERVAL_SHIFT)
+#define DVFS_VOLTAGE_RAMP_UP_INTERVAL 1u
+#define DVFS_VOLTAGE_RAMP_UP_INTERVAL_SHIFT 8u
+#define DVFS_VOLTAGE_RAMP_UP_INTERVAL_MASK (0xFFu << DVFS_VOLTAGE_RAMP_UP_INTERVAL_SHIFT)
+#define DVFS_CLOCK_STABLE_TIME 3u
+#define DVFS_CLOCK_STABLE_TIME_SHIFT 0
+#define DVFS_CLOCK_STABLE_TIME_MASK (0xFFu << DVFS_CLOCK_STABLE_TIME_SHIFT)
+
+/* DVFS_Voltage (PMU_BASE + 0x818)
+ * Bits 22:16 HDV Voltage
+ * Specifies the target HDV voltage in 10mv units
+ * Bits 14:8 NDV Voltage
+ * Specifies the target NDV voltage in 10mv units
+ * Bits 6:0 LDV Voltage
+ * Specifies the target LDV voltage in 10mv units
+ */
+#define DVFS_VOLTAGE_XDV 0u /* Reserved */
+#ifdef WL_INITVALS
+#define DVFS_VOLTAGE_HDV (wliv_pmu_dvfs_voltage_hdv) /* 0.72V */
+#define DVFS_VOLTAGE_HDV_MAX (wliv_pmu_dvfs_voltage_hdv_max) /* 0.80V */
+#else
+#define DVFS_VOLTAGE_HDV 72u /* 0.72V */
+#define DVFS_VOLTAGE_HDV_MAX 80u /* 0.80V */
+#endif
+#define DVFS_VOLTAGE_HDV_PWR_OPT 68u /* 0.68V */
+#define DVFS_VOLTAGE_HDV_SHIFT 16u
+#define DVFS_VOLTAGE_HDV_MASK (0x7Fu << DVFS_VOLTAGE_HDV_SHIFT)
+#ifdef WL_INITVALS
+#define DVFS_VOLTAGE_NDV (wliv_pmu_dvfs_voltage_ndv) /* 0.72V */
+#define DVFS_VOLTAGE_NDV_NON_LVM (wliv_pmu_dvfs_voltage_ndv_non_lvm) /* 0.76V */
+#define DVFS_VOLTAGE_NDV_MAX (wliv_pmu_dvfs_voltage_ndv_max) /* 0.80V */
+#else
+#define DVFS_VOLTAGE_NDV 72u /* 0.72V */
+#define DVFS_VOLTAGE_NDV_NON_LVM 76u /* 0.76V */
+#define DVFS_VOLTAGE_NDV_MAX 80u /* 0.80V */
+#endif
+#define DVFS_VOLTAGE_NDV_PWR_OPT 68u /* 0.68V */
+#define DVFS_VOLTAGE_NDV_SHIFT 8u
+#define DVFS_VOLTAGE_NDV_MASK (0x7Fu << DVFS_VOLTAGE_NDV_SHIFT)
+#ifdef WL_INITVALS
+#define DVFS_VOLTAGE_LDV (wliv_pmu_dvfs_voltage_ldv) /* 0.65V */
+#else
+#define DVFS_VOLTAGE_LDV 65u /* 0.65V */
+#endif
+#define DVFS_VOLTAGE_LDV_PWR_OPT 65u /* 0.65V */
+#define DVFS_VOLTAGE_LDV_SHIFT 0u
+#define DVFS_VOLTAGE_LDV_MASK (0x7Fu << DVFS_VOLTAGE_LDV_SHIFT)
+
+/* DVFS_Status (PMU_BASE + 0x81C)
+ * Bits 27:26 Raw_Core_Reqn
+ * Bits 25:24 Active_Core_Reqn
+ * Bits 12:11 Core_dvfs_status
+ * Bits 9:8 Dvfs_clk_sel
+ * 00 - LDV
+ * 01 - NDV
+ * Bits 6:0 Dvfs Voltage
+ * The real time voltage that is being output from the dvfs controller
+ */
+#define DVFS_RAW_CORE_REQ_SHIFT 26u
+#define DVFS_RAW_CORE_REQ_MASK (0x3u << DVFS_RAW_CORE_REQ_SHIFT)
+#define DVFS_ACT_CORE_REQ_SHIFT 24u
+#define DVFS_ACT_CORE_REQ_MASK (0x3u << DVFS_ACT_CORE_REQ_SHIFT)
+#define DVFS_CORE_STATUS_SHIFT 11u
+#define DVFS_CORE_STATUS_MASK (0x3u << DVFS_CORE_STATUS_SHIFT)
+#define DVFS_CLK_SEL_SHIFT 8u
+#define DVFS_CLK_SEL_MASK (0x3u << DVFS_CLK_SEL_SHIFT)
+#define DVFS_VOLTAGE_SHIFT 0u
+#define DVFS_VOLTAGE_MASK (0x7Fu << DVFS_VOLTAGE_SHIFT)
+
+/* DVFS_Ctrl1 (PMU_BASE + 0x810)
+ * Bits 0 Enable DVFS
+ * This bit will enable DVFS operation. When cleared, the complete DVFS
+ * controller is bypassed and DVFS_voltage output will be the contents of
+ * NDV voltage register
+ */
+#define DVFS_DISABLE_DVFS 0u
+#define DVFS_ENABLE_DVFS 1u
+#define DVFS_ENABLE_DVFS_SHIFT 0u
+#define DVFS_ENABLE_DVFS_MASK (1u << DVFS_ENABLE_DVFS_SHIFT)
+
+#define DVFS_LPO_DELAY 40u /* usec (1 LPO clock + margin) */
+#define DVFS_FASTLPO_DELAY 2u /* usec (1 FAST_LPO clock + margin) */
+#define DVFS_NDV_LPO_DELAY 1500u
+#define DVFS_NDV_FASTLPO_DELAY 50u
+
+#if defined(BCM_FASTLPO) && !defined(BCM_FASTLPO_DISABLED)
+#define DVFS_DELAY DVFS_FASTLPO_DELAY
+#define DVFS_NDV_DELAY DVFS_NDV_FASTLPO_DELAY
+#else
+#define DVFS_DELAY DVFS_LPO_DELAY
+#define DVFS_NDV_DELAY DVFS_NDV_LPO_DELAY
+#endif /* BCM_FASTLPO && !BCM_FASTLPO_DISABLED */
+
+#define DVFS_LDV 0u
+#define DVFS_NDV 1u
+#define DVFS_HDV 2u
+
+/* PowerControl2 (Core Offset 0x1EC)
+ * Bits 17:16 DVFSStatus
+ * This 2-bit field is the DVFS voltage status mapped as
+ * 00 - LDV
+ * 01 - NDV
+ * 10 - HDV
+ * Bits 1:0 DVFSRequest
+ * This 2-bit field is used to request DVFS voltage mapped as shown above
+ */
+#define DVFS_REQ_LDV DVFS_LDV
+#define DVFS_REQ_NDV DVFS_NDV
+#define DVFS_REQ_HDV DVFS_HDV
+#define DVFS_REQ_SHIFT 0u
+#define DVFS_REQ_MASK (0x3u << DVFS_REQ_SHIFT)
+#define DVFS_STATUS_SHIFT 16u
+#define DVFS_STATUS_MASK (0x3u << DVFS_STATUS_SHIFT)
+
+/* GCI Chip Control 16 Register
+ * Bits 0 CB Clock sel
+ * 0 - 160MHz
+ * 1 - 80Mhz - BT can force CB backplane clock to 80Mhz when wl is down
+ */
+#define GCI_CC16_CB_CLOCK_SEL_160 0u
+#define GCI_CC16_CB_CLOCK_SEL_80 1u
+#define GCI_CC16_CB_CLOCK_SEL_SHIFT 0u
+#define GCI_CC16_CB_CLOCK_SEL_MASK (0x1u << GCI_CC16_CB_CLOCK_SEL_SHIFT)
+#define GCI_CHIPCTRL_16_PRISEL_ANT_MASK_PSM_OVR (1 << 8)
+
+/* WL Channel Info to BT via GCI - bits 40 - 47 */
+#define GCI_WL_CHN_INFO_MASK (0xFF00)
+/* WL indication of MCHAN enabled/disabled to BT - bit 36 */
+#define GCI_WL_MCHAN_BIT_MASK (0x0010)
+
+#ifdef WLC_SW_DIVERSITY
+/* WL indication of SWDIV enabled/disabled to BT - bit 33 */
+#define GCI_WL_SWDIV_ANT_VALID_BIT_MASK (0x0002)
+#define GCI_SWDIV_ANT_VALID_SHIFT 0x1
+#define GCI_SWDIV_ANT_VALID_DISABLE 0x0
+#endif
+
+/* Indicate to BT that WL is scheduling ACL based ble scan grant */
+#define GCI_WL2BT_ACL_BSD_BLE_SCAN_GRNT_MASK 0x8000000
+/* WLAN is awake Indicate to BT */
+#define GCI_WL2BT_2G_AWAKE_MASK (1u << 28u)
+
+/* WL inidcation of Aux Core 2G hibernate status - bit 50 */
+#define GCI_WL2BT_2G_HIB_STATE_MASK (0x0040000u)
+
+/* WL Traffic Indication to BT */
+#define GCI_WL2BT_TRAFFIC_IND_SHIFT (12)
+#define GCI_WL2BT_TRAFFIC_IND_MASK (0x3 << GCI_WL2BT_TRAFFIC_IND_SHIFT)
+
+/* WL Strobe to BT */
+#define GCI_WL_STROBE_BIT_MASK (0x0020)
+/* bits [51:48] - reserved for wlan TX pwr index */
+/* bits [55:52] btc mode indication */
+#define GCI_WL_BTC_MODE_SHIFT (20)
+#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT)
+#define GCI_WL_ANT_BIT_MASK (0x00c0)
+#define GCI_WL_ANT_SHIFT_BITS (6)
+
+/* bit [40] - to indicate RC2CX mode to BT */
+#define GCI_WL_RC2CX_PERCTS_MASK 0x00000100u
+
+/* PLL type */
+#define PLL_NONE 0x00000000
+#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */
+#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */
+#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */
+#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */
+#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */
+#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */
+#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */
+
+/* ILP clock */
+#define ILP_CLOCK 32000
+
+/* ALP clock on pre-PMU chips */
+#define ALP_CLOCK 20000000
+
+#ifdef CFG_SIM
+#define NS_ALP_CLOCK 84922
+#define NS_SLOW_ALP_CLOCK 84922
+#define NS_CPU_CLOCK 534500
+#define NS_SLOW_CPU_CLOCK 534500
+#define NS_SI_CLOCK 271750
+#define NS_SLOW_SI_CLOCK 271750
+#define NS_FAST_MEM_CLOCK 271750
+#define NS_MEM_CLOCK 271750
+#define NS_SLOW_MEM_CLOCK 271750
+#else
+#define NS_ALP_CLOCK 125000000
+#define NS_SLOW_ALP_CLOCK 100000000
+#define NS_CPU_CLOCK 1000000000
+#define NS_SLOW_CPU_CLOCK 800000000
+#define NS_SI_CLOCK 250000000
+#define NS_SLOW_SI_CLOCK 200000000
+#define NS_FAST_MEM_CLOCK 800000000
+#define NS_MEM_CLOCK 533000000
+#define NS_SLOW_MEM_CLOCK 400000000
+#endif /* CFG_SIM */
+
+/* HT clock */
+#define HT_CLOCK 80000000
+
+/* corecontrol */
+#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */
+#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */
+#define CC_RBG_RESET 0x00000040 /**< Reset RBG block (corerev > = 65 */
+
+/* retention_ctl */
+#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29
+#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT)
+
+/* 4321 chipcontrol */
+#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */
+
+/* Fields in the otpstatus register in rev >= 21 */
+#define OTPS_OL_MASK 0x000000ff
+#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */
+#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */
+#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */
+#define OTPS_OL_GU 0x00000008 /**< general use region is locked */
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */
+#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */
+#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */
+#define OTPS_READY 0x00001000
+#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */
+#define OTPS_RV_MASK 0x0fff0000
+#define OTPS_PROGOK 0x40000000
+
+/* Fields in the otpcontrol register in rev >= 21 */
+#define OTPC_PROGSEL 0x00000001
+#define OTPC_PCOUNT_MASK 0x0000000e
+#define OTPC_PCOUNT_SHIFT 1
+#define OTPC_VSEL_MASK 0x000000f0
+#define OTPC_VSEL_SHIFT 4
+#define OTPC_TMM_MASK 0x00000700
+#define OTPC_TMM_SHIFT 8
+#define OTPC_ODM 0x00000800
+#define OTPC_PROGEN 0x80000000
+
+/* Fields in the 40nm otpcontrol register in rev >= 40 */
+#define OTPC_40NM_PROGSEL_SHIFT 0
+#define OTPC_40NM_PCOUNT_SHIFT 1
+#define OTPC_40NM_PCOUNT_WR 0xA
+#define OTPC_40NM_PCOUNT_V1X 0xB
+#define OTPC_40NM_REGCSEL_SHIFT 5
+#define OTPC_40NM_REGCSEL_DEF 0x4
+#define OTPC_40NM_PROGIN_SHIFT 8
+#define OTPC_40NM_R2X_SHIFT 10
+#define OTPC_40NM_ODM_SHIFT 11
+#define OTPC_40NM_DF_SHIFT 15
+#define OTPC_40NM_VSEL_SHIFT 16
+#define OTPC_40NM_VSEL_WR 0xA
+#define OTPC_40NM_VSEL_V1X 0xA
+#define OTPC_40NM_VSEL_R1X 0x5
+#define OTPC_40NM_COFAIL_SHIFT 30
+
+#define OTPC1_CPCSEL_SHIFT 0
+#define OTPC1_CPCSEL_DEF 6
+#define OTPC1_TM_SHIFT 8
+#define OTPC1_TM_WR 0x84
+#define OTPC1_TM_V1X 0x84
+#define OTPC1_TM_R1X 0x4
+#define OTPC1_CLK_EN_MASK 0x00020000
+#define OTPC1_CLK_DIV_MASK 0x00FC0000
+
+/* Fields in otpprog in rev >= 21 and HND OTP */
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_MASK9 0x0001ff00 /* for ccrev >= 49 */
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+#define OTPP_READ 0x40000000 /* HND OTP */
+
+/* Fields in otplayout register */
+#define OTPL_HWRGN_OFF_MASK 0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT 0
+#define OTPL_WRAP_REVID_MASK 0x00F80000
+#define OTPL_WRAP_REVID_SHIFT 19
+#define OTPL_WRAP_TYPE_MASK 0x00070000
+#define OTPL_WRAP_TYPE_SHIFT 16
+#define OTPL_WRAP_TYPE_65NM 0
+#define OTPL_WRAP_TYPE_40NM 1
+#define OTPL_WRAP_TYPE_28NM 2
+#define OTPL_WRAP_TYPE_16NM 3
+#define OTPL_WRAP_TYPE_7NM 4
+#define OTPL_ROW_SIZE_MASK 0x0000F000
+#define OTPL_ROW_SIZE_SHIFT 12
+
+/* otplayout reg corerev >= 36 */
+#define OTP_CISFORMAT_NEW 0x80000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+/* Opcodes for OTPP_OC field (40NM) */
+#define OTPPOC_READ_40NM 0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM 2
+#define OTPPOC_VERIFY_40NM 3
+#define OTPPOC_WORD_VERIFY_1_40NM 4
+#define OTPPOC_ROW_LOCK_40NM 5
+#define OTPPOC_STBY_40NM 6
+#define OTPPOC_WAKEUP_40NM 7
+#define OTPPOC_WORD_VERIFY_0_40NM 8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM 10
+#define OTPPOC_WORDPROG_40NM 11
+#define OTPPOC_BURNIN_40NM 12
+#define OTPPOC_AUTORELOAD_40NM 13
+#define OTPPOC_OVST_READ_40NM 14
+#define OTPPOC_OVST_PROG_40NM 15
+
+/* Opcodes for OTPP_OC field (28NM) */
+#define OTPPOC_READ_28NM 0
+#define OTPPOC_READBURST_28NM 1
+#define OTPPOC_PROG_ENABLE_28NM 2
+#define OTPPOC_PROG_DISABLE_28NM 3
+#define OTPPOC_PRESCREEN_28NM 4
+#define OTPPOC_PRESCREEN_RP_28NM 5
+#define OTPPOC_FLUSH_28NM 6
+#define OTPPOC_NOP_28NM 7
+#define OTPPOC_PROG_ECC_28NM 8
+#define OTPPOC_PROG_ECC_READ_28NM 9
+#define OTPPOC_PROG_28NM 10
+#define OTPPOC_PROGRAM_RP_28NM 11
+#define OTPPOC_PROGRAM_OVST_28NM 12
+#define OTPPOC_RELOAD_28NM 13
+#define OTPPOC_ERASE_28NM 14
+#define OTPPOC_LOAD_RF_28NM 15
+#define OTPPOC_CTRL_WR_28NM 16
+#define OTPPOC_CTRL_RD_28NM 17
+#define OTPPOC_READ_HP_28NM 18
+#define OTPPOC_READ_OVST_28NM 19
+#define OTPPOC_READ_VERIFY0_28NM 20
+#define OTPPOC_READ_VERIFY1_28NM 21
+#define OTPPOC_READ_FORCE0_28NM 22
+#define OTPPOC_READ_FORCE1_28NM 23
+#define OTPPOC_BURNIN_28NM 24
+#define OTPPOC_PROGRAM_LOCK_28NM 25
+#define OTPPOC_PROGRAM_TESTCOL_28NM 26
+#define OTPPOC_READ_TESTCOL_28NM 27
+#define OTPPOC_READ_FOUT_28NM 28
+#define OTPPOC_SFT_RESET_28NM 29
+
+#define OTPP_OC_MASK_28NM 0x0f800000
+#define OTPP_OC_SHIFT_28NM 23
+
+/* OTPControl bitmap for GCI rev >= 7 */
+#define OTPC_PROGEN_28NM 0x8
+#define OTPC_DBLERRCLR 0x20
+#define OTPC_CLK_EN_MASK 0x00000040
+#define OTPC_CLK_DIV_MASK 0x00000F80
+#define OTPC_FORCE_OTP_PWR_DIS 0x00008000
+
+/* Fields in otplayoutextension */
+#define OTPLAYOUTEXT_FUSE_MASK 0x3FF
+
+/* Jtagm characteristics that appeared at a given corerev */
+#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */
+#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */
+#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */
+
+/* jtagcmd */
+#define JCMD_START 0x80000000
+#define JCMD_BUSY 0x80000000
+#define JCMD_STATE_MASK 0x60000000
+#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */
+#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */
+#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */
+#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */
+#define JCMD0_ACC_MASK 0x0000f000
+#define JCMD0_ACC_IRDR 0x00000000
+#define JCMD0_ACC_DR 0x00001000
+#define JCMD0_ACC_IR 0x00002000
+#define JCMD0_ACC_RESET 0x00003000
+#define JCMD0_ACC_IRPDR 0x00004000
+#define JCMD0_ACC_PDR 0x00005000
+#define JCMD0_IRW_MASK 0x00000f00
+#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */
+#define JCMD_ACC_IRDR 0x00000000
+#define JCMD_ACC_DR 0x00010000
+#define JCMD_ACC_IR 0x00020000
+#define JCMD_ACC_RESET 0x00030000
+#define JCMD_ACC_IRPDR 0x00040000
+#define JCMD_ACC_PDR 0x00050000
+#define JCMD_ACC_PIR 0x00060000
+#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */
+#define JCMD_IRW_MASK 0x00001f00
+#define JCMD_IRW_SHIFT 8
+#define JCMD_DRW_MASK 0x0000003f
+
+/* jtagctrl */
+#define JCTRL_FORCE_CLK 4 /**< Force clock */
+#define JCTRL_EXT_EN 2 /**< Enable external targets */
+#define JCTRL_EN 1 /**< Enable Jtag master */
+#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */
+
+/* swdmasterctrl */
+#define SWDCTRL_INT_EN 8 /**< Enable internal targets */
+#define SWDCTRL_FORCE_CLK 4 /**< Force clock */
+#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */
+#define SWDCTRL_EN 1 /**< Enable Jtag master */
+
+/* Fields in clkdiv */
+#define CLKD_SFLASH 0x1f000000
+#define CLKD_SFLASH_SHIFT 24
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+#define CLKD_JTAG 0x00000f00
+#define CLKD_JTAG_SHIFT 8
+#define CLKD_UART 0x000000ff
+
+#define CLKD2_SROM 0x00000007
+#define CLKD2_SROMDIV_32 0
+#define CLKD2_SROMDIV_64 1
+#define CLKD2_SROMDIV_96 2
+#define CLKD2_SROMDIV_128 3
+#define CLKD2_SROMDIV_192 4
+#define CLKD2_SROMDIV_256 5
+#define CLKD2_SROMDIV_384 6
+#define CLKD2_SROMDIV_512 7
+#define CLKD2_SWD 0xf8000000
+#define CLKD2_SWD_SHIFT 27
+
+/* intstatus/intmask */
+#define CI_GPIO 0x00000001 /**< gpio intr */
+#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */
+#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */
+#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */
+#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */
+#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */
+#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */
+#define CI_WECI 0x00000080 /* eci wakeup intr (corerev >= 21) */
+#define CI_SPMI 0x00100000 /* SPMI (corerev >= 65) */
+#define CI_RNG 0x00200000 /**< rng intr (corerev >= 65) */
+#define CI_SSRESET_F0 0x10000000 /**< ss reset occurred */
+#define CI_SSRESET_F1 0x20000000 /**< ss reset occurred */
+#define CI_SSRESET_F2 0x40000000 /**< ss reset occurred */
+#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */
+#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */
+#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */
+#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */
+#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled,
+ * 0: LPO is enabled
+ */
+#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock,
+ * 0: power logic control
+ */
+#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors
+ * PLL clock disable requests from core
+ */
+#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't
+ * disable crystal when appropriate
+ */
+#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_SHIFT 16
+
+/* system_clk_ctl */
+#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */
+#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */
+#define SYCC_FP 0x00000004 /**< ForcePLLOn */
+#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */
+#define SYCC_HR 0x00000010 /**< Force HT */
+#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_SHIFT 16
+
+/* watchdogcounter */
+/* WL sub-system reset */
+#define WD_SSRESET_PCIE_F0_EN 0x10000000
+/* BT sub-system reset */
+#define WD_SSRESET_PCIE_F1_EN 0x20000000
+#define WD_SSRESET_PCIE_F2_EN 0x40000000
+/* Both WL and BT sub-system reset */
+#define WD_SSRESET_PCIE_ALL_FN_EN 0x80000000
+#define WD_COUNTER_MASK 0x0fffffff
+#define WD_ENABLE_MASK \
+ (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_F1_EN | \
+ WD_SSRESET_PCIE_F2_EN | WD_SSRESET_PCIE_ALL_FN_EN)
+
+/* Indirect backplane access */
+#define BPIA_BYTEEN 0x0000000f
+#define BPIA_SZ1 0x00000001
+#define BPIA_SZ2 0x00000003
+#define BPIA_SZ4 0x00000007
+#define BPIA_SZ8 0x0000000f
+#define BPIA_WRITE 0x00000100
+#define BPIA_START 0x00000200
+#define BPIA_BUSY 0x00000200
+#define BPIA_ERROR 0x00000400
+
+/* pcmcia/prog/flash_config */
+#define CF_EN 0x00000001 /**< enable */
+#define CF_EM_MASK 0x0000000e /**< mode */
+#define CF_EM_SHIFT 1
+#define CF_EM_FLASH 0 /**< flash/asynchronous mode */
+#define CF_EM_SYNC 2 /**< synchronous mode */
+#define CF_EM_PCMCIA 4 /**< pcmcia mode */
+#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */
+#define CF_BS 0x00000020 /**< byteswap */
+#define CF_CD_MASK 0x000000c0 /**< clock divider */
+#define CF_CD_SHIFT 6
+#define CF_CD_DIV2 0x00000000 /**< backplane/2 */
+#define CF_CD_DIV3 0x00000040 /**< backplane/3 */
+#define CF_CD_DIV4 0x00000080 /**< backplane/4 */
+#define CF_CE 0x00000100 /**< clock enable */
+#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define PM_W0_MASK 0x0000003f /**< waitcount0 */
+#define PM_W1_MASK 0x00001f00 /**< waitcount1 */
+#define PM_W1_SHIFT 8
+#define PM_W2_MASK 0x001f0000 /**< waitcount2 */
+#define PM_W2_SHIFT 16
+#define PM_W3_MASK 0x1f000000 /**< waitcount3 */
+#define PM_W3_SHIFT 24
+
+/* pcmcia_attrwait */
+#define PA_W0_MASK 0x0000003f /**< waitcount0 */
+#define PA_W1_MASK 0x00001f00 /**< waitcount1 */
+#define PA_W1_SHIFT 8
+#define PA_W2_MASK 0x001f0000 /**< waitcount2 */
+#define PA_W2_SHIFT 16
+#define PA_W3_MASK 0x1f000000 /**< waitcount3 */
+#define PA_W3_SHIFT 24
+
+/* pcmcia_iowait */
+#define PI_W0_MASK 0x0000003f /**< waitcount0 */
+#define PI_W1_MASK 0x00001f00 /**< waitcount1 */
+#define PI_W1_SHIFT 8
+#define PI_W2_MASK 0x001f0000 /**< waitcount2 */
+#define PI_W2_SHIFT 16
+#define PI_W3_MASK 0x1f000000 /**< waitcount3 */
+#define PI_W3_SHIFT 24
+
+/* prog_waitcount */
+#define PW_W0_MASK 0x0000001f /**< waitcount0 */
+#define PW_W1_MASK 0x00001f00 /**< waitcount1 */
+#define PW_W1_SHIFT 8
+#define PW_W2_MASK 0x001f0000 /**< waitcount2 */
+#define PW_W2_SHIFT 16
+#define PW_W3_MASK 0x1f000000 /**< waitcount3 */
+#define PW_W3_SHIFT 24
+
+#define PW_W0 0x0000000c
+#define PW_W1 0x00000a00
+#define PW_W2 0x00020000
+#define PW_W3 0x01000000
+
+/* flash_waitcount */
+#define FW_W0_MASK 0x0000003f /**< waitcount0 */
+#define FW_W1_MASK 0x00001f00 /**< waitcount1 */
+#define FW_W1_SHIFT 8
+#define FW_W2_MASK 0x001f0000 /**< waitcount2 */
+#define FW_W2_SHIFT 16
+#define FW_W3_MASK 0x1f000000 /**< waitcount3 */
+#define FW_W3_SHIFT 24
+
+/* When Srom support present, fields in sromcontrol */
+#define SRC_START 0x80000000
+#define SRC_BUSY 0x80000000
+#define SRC_OPCODE 0x60000000
+#define SRC_OP_READ 0x00000000
+#define SRC_OP_WRITE 0x20000000
+#define SRC_OP_WRDIS 0x40000000
+#define SRC_OP_WREN 0x60000000
+#define SRC_OTPSEL 0x00000010
+#define SRC_OTPPRESENT 0x00000020
+#define SRC_LOCK 0x00000008
+#define SRC_SIZE_MASK 0x00000006
+#define SRC_SIZE_1K 0x00000000
+#define SRC_SIZE_4K 0x00000002
+#define SRC_SIZE_16K 0x00000004
+#define SRC_SIZE_SHIFT 1
+#define SRC_PRESENT 0x00000001
+
+/* Fields in pmucontrol */
+#define PCTL_ILP_DIV_MASK 0xffff0000
+#define PCTL_ILP_DIV_SHIFT 16
+#define PCTL_LQ_REQ_EN 0x00008000
+#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */
+#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */
+#define PCTL_HT_REQ_EN 0x00000100
+#define PCTL_ALP_REQ_EN 0x00000080
+#define PCTL_XTALFREQ_MASK 0x0000007c
+#define PCTL_XTALFREQ_SHIFT 2
+#define PCTL_ILP_DIV_EN 0x00000002
+#define PCTL_LPO_SEL 0x00000001
+
+/* Fields in pmucontrol_ext */
+#define PCTL_EXT_FAST_TRANS_ENAB 0x00000001u
+#define PCTL_EXT_USE_LHL_TIMER 0x00000010u
+#define PCTL_EXT_FASTLPO_ENAB 0x00000080u
+#define PCTL_EXT_FASTLPO_SWENAB 0x00000200u
+#define PCTL_EXT_FASTSEQ_ENAB 0x00001000u
+#define PCTL_EXT_FASTLPO_PCIE_SWENAB 0x00004000u /**< rev33 for FLL1M */
+#define PCTL_EXT_FASTLPO_SB_SWENAB 0x00008000u /**< rev36 for FLL1M */
+#define PCTL_EXT_REQ_MIRROR_ENAB 0x00010000u /**< rev36 for ReqMirrorEn */
+
+#define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77
+
+/* Retention Control */
+#define PMU_RCTL_CLK_DIV_SHIFT 0
+#define PMU_RCTL_CHAIN_LEN_SHIFT 12
+#define PMU_RCTL_MACPHY_DISABLE_SHIFT 26
+#define PMU_RCTL_MACPHY_DISABLE_MASK (1 << 26)
+#define PMU_RCTL_LOGIC_DISABLE_SHIFT 27
+#define PMU_RCTL_LOGIC_DISABLE_MASK (1 << 27)
+#define PMU_RCTL_MEMSLP_LOG_SHIFT 28
+#define PMU_RCTL_MEMSLP_LOG_MASK (1 << 28)
+#define PMU_RCTL_MEMRETSLP_LOG_SHIFT 29
+#define PMU_RCTL_MEMRETSLP_LOG_MASK (1 << 29)
+
+/* Retention Group Control */
+#define PMU_RCTLGRP_CHAIN_LEN_SHIFT 0
+#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT 14
+#define PMU_RCTLGRP_RMODE_ENABLE_MASK (1 << 14)
+#define PMU_RCTLGRP_DFT_ENABLE_SHIFT 15
+#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15)
+#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16
+#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16)
+
+/* Fields in clkstretch */
+#define CSTRETCH_HT 0xffff0000
+#define CSTRETCH_ALP 0x0000ffff
+#define CSTRETCH_REDUCE_8 0x00080008
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT 16
+
+/* clockcontrol_n */
+/* Some pll types use less than the number of bits in some of these (n or m) masks */
+#define CN_N1_MASK 0x3f /**< n1 control */
+#define CN_N2_MASK 0x3f00 /**< n2 control */
+#define CN_N2_SHIFT 8
+#define CN_PLLC_MASK 0xf0000 /**< pll control */
+#define CN_PLLC_SHIFT 16
+
+/* clockcontrol_sb/pci/uart */
+#define CC_M1_MASK 0x3f /**< m1 control */
+#define CC_M2_MASK 0x3f00 /**< m2 control */
+#define CC_M2_SHIFT 8
+#define CC_M3_MASK 0x3f0000 /**< m3 control */
+#define CC_M3_SHIFT 16
+#define CC_MC_MASK 0x1f000000 /**< mux control */
+#define CC_MC_SHIFT 24
+
+/* N3M Clock control magic field values */
+#define CC_F6_2 0x02 /**< A factor of 2 in */
+#define CC_F6_3 0x03 /**< 6-bit fields like */
+#define CC_F6_4 0x05 /**< N1, M1 or M3 */
+#define CC_F6_5 0x09
+#define CC_F6_6 0x11
+#define CC_F6_7 0x21
+
+#define CC_F5_BIAS 5 /**< 5-bit fields get this added */
+
+#define CC_MC_BYPASS 0x08
+#define CC_MC_M1 0x04
+#define CC_MC_M1M2 0x02
+#define CC_MC_M1M2M3 0x01
+#define CC_MC_M1M3 0x11
+
+/* Type 2 Clock control magic field values */
+#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */
+#define CC_T2M2_BIAS 3 /**< m2 bias */
+
+#define CC_T2MC_M1BYP 1
+#define CC_T2MC_M2BYP 2
+#define CC_T2MC_M3BYP 4
+
+/* Type 6 Clock control magic field values */
+#define CC_T6_MMASK 1 /**< bits of interest in m */
+#define CC_T6_M0 120000000 /**< sb clock for m = 0 */
+#define CC_T6_M1 100000000 /**< sb clock for m = 1 */
+#define SB2MIPS_T6(sb) (2 * (sb))
+
+/* Common clock base */
+#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */
+#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */
+
+/* Flash types in the chipcommon capabilities register */
+#define FLASH_NONE 0x000 /**< No flash */
+#define SFLASH_ST 0x100 /**< ST serial flash */
+#define SFLASH_AT 0x200 /**< Atmel serial flash */
+#define NFLASH 0x300 /**< NAND flash */
+#define PFLASH 0x700 /**< Parallel flash */
+#define QSPIFLASH_ST 0x800
+#define QSPIFLASH_AT 0x900
+
+/* Bits in the ExtBus config registers */
+#define CC_CFG_EN 0x0001 /**< Enable */
+#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */
+#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */
+#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */
+#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */
+#define CC_CFG_EM_IDE 0x0006 /**< IDE */
+#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */
+#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */
+#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */
+#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */
+#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */
+
+/* ExtBus address space */
+#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */
+#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */
+#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */
+#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */
+#define CC_EB_IDE 0x1a800000 /**< IDE memory base */
+#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */
+#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */
+#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */
+#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */
+
+/* Start/busy bit in flashcontrol */
+#define SFLASH_OPCODE 0x000000ff
+#define SFLASH_ACTION 0x00000700
+#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */
+#define SFLASH_START 0x80000000
+#define SFLASH_BUSY SFLASH_START
+
+/* flashcontrol action codes */
+#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */
+#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */
+#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */
+#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */
+#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */
+#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */
+#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */
+
+/* flashcontrol action+opcodes for ST flashes */
+#define SFLASH_ST_WREN 0x0006 /**< Write Enable */
+#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */
+#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */
+#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */
+#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */
+#define SFLASH_ST_PP 0x0302 /**< Page Program */
+#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */
+#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */
+#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */
+#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */
+#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */
+#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */
+
+#define SFLASH_ST_READ4B 0x6313 /* Read Data Bytes in 4Byte address */
+#define SFLASH_ST_PP4B 0x6312 /* Page Program in 4Byte address */
+#define SFLASH_ST_SE4B 0x62dc /* Sector Erase in 4Byte address */
+#define SFLASH_ST_SSE4B 0x6221 /* Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */
+#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */
+
+#define SFLASH_WINBOND_RDID 0x0390 /* Read Manufacture ID */
+#define SFLASH_WINBOND_MFID 0xef /* Winbond Manufacture ID */
+
+/* Status register bits for ST flashes */
+#define SFLASH_ST_WIP 0x01 /**< Write In Progress */
+#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */
+#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */
+#define SFLASH_ST_BP_SHIFT 2
+#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */
+
+/* flashcontrol action+opcodes for Atmel flashes */
+#define SFLASH_AT_READ 0x07e8
+#define SFLASH_AT_PAGE_READ 0x07d2
+/* PR9631: impossible to specify Atmel Buffer Read command */
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS 0x01d7
+#define SFLASH_AT_BUF1_WRITE 0x0384
+#define SFLASH_AT_BUF2_WRITE 0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286
+#define SFLASH_AT_BUF1_PROGRAM 0x0288
+#define SFLASH_AT_BUF2_PROGRAM 0x0289
+#define SFLASH_AT_PAGE_ERASE 0x0281
+#define SFLASH_AT_BLOCK_ERASE 0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define SFLASH_AT_BUF1_LOAD 0x0253
+#define SFLASH_AT_BUF2_LOAD 0x0255
+#define SFLASH_AT_BUF1_COMPARE 0x0260
+#define SFLASH_AT_BUF2_COMPARE 0x0261
+#define SFLASH_AT_BUF1_REPROGRAM 0x0258
+#define SFLASH_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for Atmel flashes */
+#define SFLASH_AT_READY 0x80
+#define SFLASH_AT_MISMATCH 0x40
+#define SFLASH_AT_ID_MASK 0x38
+#define SFLASH_AT_ID_SHIFT 3
+
+/* SPI register bits, corerev >= 37 */
+#define GSIO_START 0x80000000u
+#define GSIO_BUSY GSIO_START
+
+/* UART Function sel related */
+#define MUXENAB_DEF_UART_MASK 0x0000000fu
+#define MUXENAB_DEF_UART_SHIFT 0
+
+/* HOST_WAKE Function sel related */
+#define MUXENAB_DEF_HOSTWAKE_MASK 0x000000f0u /**< configure GPIO for host_wake */
+#define MUXENAB_DEF_HOSTWAKE_SHIFT 4u
+
+/* GCI UART Function sel related */
+#define MUXENAB_GCI_UART_MASK 0x00000f00u
+#define MUXENAB_GCI_UART_SHIFT 8u
+#define MUXENAB_GCI_UART_FNSEL_MASK 0x00003000u
+#define MUXENAB_GCI_UART_FNSEL_SHIFT 12u
+
+/* Mask used to decide whether MUX to be performed or not */
+#define MUXENAB_DEF_GETIX(val, name) \
+ ((((val) & MUXENAB_DEF_ ## name ## _MASK) >> MUXENAB_DEF_ ## name ## _SHIFT) - 1)
+
+/*
+ * These are the UART port assignments, expressed as offsets from the base
+ * register. These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
+
+#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */
+#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */
+#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR 2 /**< In: Interrupt Identity Register */
+#define UART_FCR 2 /**< Out: FIFO Control Register */
+#define UART_LCR 3 /**< Out: Line Control Register */
+#define UART_MCR 4 /**< Out: Modem Control Register */
+#define UART_LSR 5 /**< In: Line Status Register */
+#define UART_MSR 6 /**< In: Modem Status Register */
+#define UART_SCR 7 /**< I/O: Scratch Register */
+#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */
+#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */
+#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */
+#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */
+#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */
+#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */
+#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */
+#define UART_LSR_BREAK 0x10 /**< Break interrupt */
+#define UART_LSR_FRAMING 0x08 /**< Framing error */
+#define UART_LSR_PARITY 0x04 /**< Parity error */
+#define UART_LSR_OVERRUN 0x02 /**< Overrun error */
+#define UART_LSR_RXRDY 0x01 /**< Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */
+
+/* Interrupt Identity Register (IIR) bits */
+#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */
+#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */
+#define UART_IIR_NOINT 0x1 /**< No interrupt pending */
+#define UART_IIR_THRE 0x2 /**< THR empty */
+#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */
+#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */
+#define UART_IIR_CHAR_TIME 0xc /**< Character time */
+
+/* Interrupt Enable Register (IER) bits */
+#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI 8 /**< enable modem status interrupt */
+#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */
+#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI 1 /**< enable data available interrupt */
+
+/* pmustatus */
+#define PST_SLOW_WR_PENDING 0x0400
+#define PST_EXTLPOAVAIL 0x0100
+#define PST_WDRESET 0x0080
+#define PST_INTPEND 0x0040
+#define PST_SBCLKST 0x0030
+#define PST_SBCLKST_ILP 0x0010
+#define PST_SBCLKST_ALP 0x0020
+#define PST_SBCLKST_HT 0x0030
+#define PST_ALPAVAIL 0x0008
+#define PST_HTAVAIL 0x0004
+#define PST_RESINIT 0x0003
+#define PST_ILPFASTLPO 0x00010000
+
+/* pmucapabilities */
+#define PCAP_REV_MASK 0x000000ff
+#define PCAP_RC_MASK 0x00001f00
+#define PCAP_RC_SHIFT 8
+#define PCAP_TC_MASK 0x0001e000
+#define PCAP_TC_SHIFT 13
+#define PCAP_PC_MASK 0x001e0000
+#define PCAP_PC_SHIFT 17
+#define PCAP_VC_MASK 0x01e00000
+#define PCAP_VC_SHIFT 21
+#define PCAP_CC_MASK 0x1e000000
+#define PCAP_CC_SHIFT 25
+#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */
+#define PCAP5_PC_SHIFT 17
+#define PCAP5_VC_MASK 0x07c00000
+#define PCAP5_VC_SHIFT 22
+#define PCAP5_CC_MASK 0xf8000000
+#define PCAP5_CC_SHIFT 27
+
+/* pmucapabilities ext */
+#define PCAP_EXT_ST_NUM_SHIFT (8) /* stat timer number */
+#define PCAP_EXT_ST_NUM_MASK (0xf << PCAP_EXT_ST_NUM_SHIFT)
+#define PCAP_EXT_ST_SRC_NUM_SHIFT (12) /* stat timer source number */
+#define PCAP_EXT_ST_SRC_NUM_MASK (0xf << PCAP_EXT_ST_SRC_NUM_SHIFT)
+#define PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_SHIFT (20u) /* # of MAC rsrc req timers */
+#define PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_MASK (7u << PCAP_EXT_MAC_RSRC_REQ_TMR_CNT_SHIFT)
+#define PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT (23u) /* pmu int rcvr cnt */
+#define PCAP_EXT_PMU_INTR_RCVR_CNT_MASK (7u << PCAP_EXT_PMU_INTR_RCVR_CNT_SHIFT)
+
+/* pmustattimer ctrl */
+#define PMU_ST_SRC_SHIFT (0) /* stat timer source number */
+#define PMU_ST_SRC_MASK (0xff << PMU_ST_SRC_SHIFT)
+#define PMU_ST_CNT_MODE_SHIFT (10) /* stat timer count mode */
+#define PMU_ST_CNT_MODE_MASK (0x3 << PMU_ST_CNT_MODE_SHIFT)
+#define PMU_ST_EN_SHIFT (8) /* stat timer enable */
+#define PMU_ST_EN_MASK (0x1 << PMU_ST_EN_SHIFT)
+#define PMU_ST_ENAB 1
+#define PMU_ST_DISAB 0
+#define PMU_ST_INT_EN_SHIFT (9) /* stat timer enable */
+#define PMU_ST_INT_EN_MASK (0x1 << PMU_ST_INT_EN_SHIFT)
+#define PMU_ST_INT_ENAB 1
+#define PMU_ST_INT_DISAB 0
+
+/* CoreCapabilitiesExtension */
+#define PCAP_EXT_USE_MUXED_ILP_CLK_MASK 0x04000000
+
+/* PMU Resource Request Timer registers */
+/* This is based on PmuRev0 */
+#define PRRT_TIME_MASK 0x03ff
+#define PRRT_INTEN 0x0400
+/* ReqActive 25
+ * The hardware sets this field to 1 when the timer expires.
+ * Software writes this field to 1 to make immediate resource requests.
+ */
+#define PRRT_REQ_ACTIVE 0x0800 /* To check h/w status */
+#define PRRT_IMMEDIATE_RES_REQ 0x0800 /* macro for sw immediate res req */
+#define PRRT_ALP_REQ 0x1000
+#define PRRT_HT_REQ 0x2000
+#define PRRT_HQ_REQ 0x4000
+
+/* PMU Int Control register bits */
+#define PMU_INTC_ALP_REQ 0x1
+#define PMU_INTC_HT_REQ 0x2
+#define PMU_INTC_HQ_REQ 0x4
+
+/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
+#define RSRC_INTR_MASK_TIMER_INT_0 1
+#define PMU_INTR_MASK_EXTWAKE_REQ_ACTIVE_0 (1 << 20)
+
+#define PMU_INT_STAT_RSRC_EVENT_INT0_SHIFT (8u)
+#define PMU_INT_STAT_RSRC_EVENT_INT0_MASK (1u << PMU_INT_STAT_RSRC_EVENT_INT0_SHIFT)
+
+/* bit 16 of the PMU interrupt vector - Stats Timer Interrupt */
+#define PMU_INT_STAT_TIMER_INT_SHIFT (16u)
+#define PMU_INT_STAT_TIMER_INT_MASK (1u << PMU_INT_STAT_TIMER_INT_SHIFT)
+
+/*
+ * bit 18 of the PMU interrupt vector - S/R self test fails
+ */
+#define PMU_INT_STAT_SR_ERR_SHIFT (18u)
+#define PMU_INT_STAT_SR_ERR_MASK (1u << PMU_INT_STAT_SR_ERR_SHIFT)
+
+/* PMU resource bit position */
+#define PMURES_BIT(bit) (1u << (bit))
+
+/* PMU resource number limit */
+#define PMURES_MAX_RESNUM 30
+
+/* PMU chip control0 register */
+#define PMU_CHIPCTL0 0
+
+#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0)
+#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0)
+#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0xF << 6)
+#define PMU_CC0_4369B0_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1A << 6)
+#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6)
+#define PMU_CC0_4369_XTAL_RES_BYPASS_START_VAL (0 << 12)
+#define PMU_CC0_4369_XTAL_RES_BYPASS_START_MASK (0x7 << 12)
+#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_VAL (0x1 << 15)
+#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15)
+
+// This is not used. so retains reset value
+#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20u << 0u)
+
+#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3Fu << 0u)
+#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1Au << 6u)
+#define PMU_CC0_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3Fu << 6u)
+#define PMU_CC0_4362_XTAL_RES_BYPASS_START_VAL (0x00u << 12u)
+#define PMU_CC0_4362_XTAL_RES_BYPASS_START_MASK (0x07u << 12u)
+#define PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_VAL (0x02u << 15u)
+#define PMU_CC0_4362_XTAL_RES_BYPASS_NORMAL_MASK (0x07u << 15u)
+
+#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0)
+#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0)
+#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1A << 6)
+#define PMU_CC0_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6)
+#define PMU_CC0_4378_XTAL_RES_BYPASS_START_VAL (0 << 12)
+#define PMU_CC0_4378_XTAL_RES_BYPASS_START_MASK (0x7 << 12)
+#define PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_VAL (0x2 << 15)
+#define PMU_CC0_4378_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15)
+
+#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0)
+#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0)
+#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x1A << 6)
+#define PMU_CC0_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6)
+#define PMU_CC0_4387_XTAL_RES_BYPASS_START_VAL (0 << 12)
+#define PMU_CC0_4387_XTAL_RES_BYPASS_START_MASK (0x7 << 12)
+#define PMU_CC0_4387_XTAL_RES_BYPASS_NORMAL_VAL (0x2 << 15)
+#define PMU_CC0_4387_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15)
+#define PMU_CC0_4387_BT_PU_WAKE_MASK (0x3u << 30u)
+
+/* clock req types */
+#define PMU_CC1_CLKREQ_TYPE_SHIFT 19
+#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL 1
+
+/* Power Control */
+#define PWRCTL_ENAB_MEM_CLK_GATE_SHIFT 5
+#define PWRCTL_FORCE_HW_PWR_REQ_OFF_SHIFT 6
+#define PWRCTL_AUTO_MEM_STBYRET 28
+
+/* PMU chip control1 register */
+#define PMU_CHIPCTL1 1
+#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
+#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN 0x00000010
+
+#define PMU_CC1_IF_TYPE_MASK 0x00000030
+#define PMU_CC1_IF_TYPE_RMII 0x00000000
+#define PMU_CC1_IF_TYPE_MII 0x00000010
+#define PMU_CC1_IF_TYPE_RGMII 0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK 0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY 0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
+#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
+
+#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080
+#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000
+
+#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY_MASK 0x00003F00u
+#ifdef BCM_FASTLPO_PMU
+#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY 0x00002000u
+#else
+#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY 0x00000400u
+#endif /* BCM_FASTLPO_PMU */
+
+/* PMU chip control2 register */
+#define PMU_CC2_CB2WL_INTR_PWRREQ_EN (1u << 13u)
+#define PMU_CC2_RFLDO3P3_PU_FORCE_ON (1u << 15u)
+#define PMU_CC2_RFLDO3P3_PU_CLEAR 0x00000000u
+
+#define PMU_CC2_WL2CDIG_I_PMU_SLEEP (1u << 16u)
+#define PMU_CHIPCTL2 2u
+#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1u << 18u)
+#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1u << 19u)
+#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1u << 20u)
+#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1u << 21u)
+#define PMU_CC2_MASK_WL_DEV_WAKE (1u << 22u)
+#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1u << 25u)
+#define PMU_CC2_GCI2_WAKE (1u << 31u)
+
+#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u)
+#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u)
+#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u)
+#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u)
+
+#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u)
+#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u)
+#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u)
+#define PMU_CC2_4362_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u)
+
+#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u)
+#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u)
+#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u)
+#define PMU_CC2_4378_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u)
+
+#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3u << 26u)
+#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3u << 26u)
+#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0u << 28u)
+#define PMU_CC2_4387_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3u << 28u)
+
+/* PMU chip control3 register */
+#define PMU_CHIPCTL3 3u
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19u
+#define PMU_CC3_ENABLE_RF_SHIFT 22u
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23u
+
+#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_VAL (0x3Fu << 0u)
+#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_MASK (0x3Fu << 0u)
+#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_VAL (0x3Fu << 15u)
+#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_MASK (0x3Fu << 15u)
+#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_VAL (0x3Fu << 6u)
+#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_MASK (0x3Fu << 6u)
+#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_VAL (0x3Fu << 21)
+#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_MASK (0x3Fu << 21)
+#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_VAL (0x2u << 12u)
+#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_MASK (0x7u << 12u)
+#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_VAL (0x2u << 27u)
+#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_MASK (0x7u << 27u)
+
+#define PMU_CC3_4362_XTALCORESIZE_PMOS_START_VAL (0x3Fu << 0u)
+#define PMU_CC3_4362_XTALCORESIZE_PMOS_START_MASK (0x3Fu << 0u)
+#define PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_VAL (0x3Fu << 15u)
+#define PMU_CC3_4362_XTALCORESIZE_PMOS_NORMAL_MASK (0x3Fu << 15u)
+#define PMU_CC3_4362_XTALCORESIZE_NMOS_START_VAL (0x3Fu << 6u)
+#define PMU_CC3_4362_XTALCORESIZE_NMOS_START_MASK (0x3Fu << 6u)
+#define PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_VAL (0x3Fu << 21u)
+#define PMU_CC3_4362_XTALCORESIZE_NMOS_NORMAL_MASK (0x3Fu << 21u)
+#define PMU_CC3_4362_XTALSEL_BIAS_RES_START_VAL (0x02u << 12u)
+#define PMU_CC3_4362_XTALSEL_BIAS_RES_START_MASK (0x07u << 12u)
+/* Changed from 6 to 4 for wlan PHN and to 2 for BT PER issues */
+#define PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_VAL (0x02u << 27u)
+#define PMU_CC3_4362_XTALSEL_BIAS_RES_NORMAL_MASK (0x07u << 27u)
+
+#define PMU_CC3_4378_XTALCORESIZE_PMOS_START_VAL (0x3F << 0)
+#define PMU_CC3_4378_XTALCORESIZE_PMOS_START_MASK (0x3F << 0)
+#define PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_VAL (0x3F << 15)
+#define PMU_CC3_4378_XTALCORESIZE_PMOS_NORMAL_MASK (0x3F << 15)
+#define PMU_CC3_4378_XTALCORESIZE_NMOS_START_VAL (0x3F << 6)
+#define PMU_CC3_4378_XTALCORESIZE_NMOS_START_MASK (0x3F << 6)
+#define PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_VAL (0x3F << 21)
+#define PMU_CC3_4378_XTALCORESIZE_NMOS_NORMAL_MASK (0x3F << 21)
+#define PMU_CC3_4378_XTALSEL_BIAS_RES_START_VAL (0x2 << 12)
+#define PMU_CC3_4378_XTALSEL_BIAS_RES_START_MASK (0x7 << 12)
+#define PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_VAL (0x2 << 27)
+#define PMU_CC3_4378_XTALSEL_BIAS_RES_NORMAL_MASK (0x7 << 27)
+
+#define PMU_CC3_4387_XTALCORESIZE_PMOS_START_VAL (0x3F << 0)
+#define PMU_CC3_4387_XTALCORESIZE_PMOS_START_MASK (0x3F << 0)
+#define PMU_CC3_4387_XTALCORESIZE_PMOS_NORMAL_VAL (0x3F << 15)
+#define PMU_CC3_4387_XTALCORESIZE_PMOS_NORMAL_MASK (0x3F << 15)
+#define PMU_CC3_4387_XTALCORESIZE_NMOS_START_VAL (0x3F << 6)
+#define PMU_CC3_4387_XTALCORESIZE_NMOS_START_MASK (0x3F << 6)
+#define PMU_CC3_4387_XTALCORESIZE_NMOS_NORMAL_VAL (0x3F << 21)
+#define PMU_CC3_4387_XTALCORESIZE_NMOS_NORMAL_MASK (0x3F << 21)
+#define PMU_CC3_4387_XTALSEL_BIAS_RES_START_VAL (0x2 << 12)
+#define PMU_CC3_4387_XTALSEL_BIAS_RES_START_MASK (0x7 << 12)
+#define PMU_CC3_4387_XTALSEL_BIAS_RES_NORMAL_VAL (0x5 << 27)
+#define PMU_CC3_4387_XTALSEL_BIAS_RES_NORMAL_MASK (0x7 << 27)
+
+/* PMU chip control4 register */
+#define PMU_CHIPCTL4 4
+
+/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */
+#define PMU_CC4_IF_TYPE_MASK 0x00003000
+#define PMU_CC4_IF_TYPE_RMII 0x00000000
+#define PMU_CC4_IF_TYPE_MII 0x00001000
+#define PMU_CC4_IF_TYPE_RGMII 0x00002000
+
+#define PMU_CC4_SW_TYPE_MASK 0x0000c000
+#define PMU_CC4_SW_TYPE_EPHY 0x00000000
+#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000
+#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000
+#define PMU_CC4_SW_TYPE_RGMII 0x0000c000
+#define PMU_CC4_DISABLE_LQ_AVAIL (1<<27)
+
+#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON (1u << 15u)
+#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u)
+#define PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u)
+#define PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u)
+
+#define PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON (1u << 21u)
+#define PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON (1u << 22u)
+#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u)
+#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u)
+
+#define PMU_CC4_4362_PD_CBUCK2VDDB_ON (1u << 15u)
+#define PMU_CC4_4362_PD_CBUCK2VDDRET_ON (1u << 16u)
+#define PMU_CC4_4362_PD_MEMLPLDO2VDDB_ON (1u << 17u)
+#define PMU_CC4_4362_PD_MEMLPDLO2VDDRET_ON (1u << 18u)
+
+#define PMU_CC4_4378_MAIN_PD_CBUCK2VDDB_ON (1u << 15u)
+#define PMU_CC4_4378_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u)
+#define PMU_CC4_4378_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u)
+#define PMU_CC4_4378_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u)
+
+#define PMU_CC4_4378_AUX_PD_CBUCK2VDDB_ON (1u << 21u)
+#define PMU_CC4_4378_AUX_PD_CBUCK2VDDRET_ON (1u << 22u)
+#define PMU_CC4_4378_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u)
+#define PMU_CC4_4378_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u)
+
+#define PMU_CC4_4387_MAIN_PD_CBUCK2VDDB_ON (1u << 15u)
+#define PMU_CC4_4387_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u)
+#define PMU_CC4_4387_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u)
+#define PMU_CC4_4387_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u)
+
+#define PMU_CC4_4387_AUX_PD_CBUCK2VDDB_ON (1u << 21u)
+#define PMU_CC4_4387_AUX_PD_CBUCK2VDDRET_ON (1u << 22u)
+#define PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u)
+#define PMU_CC4_4387_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u)
+
+/* PMU chip control5 register */
+#define PMU_CHIPCTL5 5
+
+#define PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON (1u << 9u)
+#define PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON (1u << 10u)
+#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u)
+#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u)
+
+#define PMU_CC5_4362_SUBCORE_CBUCK2VDDB_ON (1u << 9u)
+#define PMU_CC5_4362_SUBCORE_CBUCK2VDDRET_ON (1u << 10u)
+#define PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u)
+#define PMU_CC5_4362_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u)
+
+#define PMU_CC5_4378_SUBCORE_CBUCK2VDDB_ON (1u << 9u)
+#define PMU_CC5_4378_SUBCORE_CBUCK2VDDRET_ON (1u << 10u)
+#define PMU_CC5_4378_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u)
+#define PMU_CC5_4378_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u)
+
+#define PMU_CC5_4387_SUBCORE_CBUCK2VDDB_ON (1u << 9u)
+#define PMU_CC5_4387_SUBCORE_CBUCK2VDDRET_ON (1u << 10u)
+#define PMU_CC5_4387_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u)
+#define PMU_CC5_4387_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u)
+
+#define PMU_CC5_4388_SUBCORE_SDTCCLK0_ON (1u << 3u)
+#define PMU_CC5_4388_SUBCORE_SDTCCLK1_ON (1u << 4u)
+
+#define PMU_CC5_4389_SUBCORE_SDTCCLK0_ON (1u << 3u)
+#define PMU_CC5_4389_SUBCORE_SDTCCLK1_ON (1u << 4u)
+
+/* PMU chip control6 register */
+#define PMU_CHIPCTL6 6
+#define PMU_CC6_RX4_CLK_SEQ_SELECT_MASK BCM_MASK32(1u, 0u)
+#define PMU_CC6_ENABLE_DMN1_WAKEUP (1 << 3)
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6)
+#define PMU_CC6_ENABLE_PCIE_RETENTION (1 << 12)
+#define PMU_CC6_ENABLE_PMU_EXT_PERST (1 << 13)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_PERST (1 << 14)
+#define PMU_CC6_ENABLE_LEGACY_WAKEUP (1 << 16)
+
+/* PMU chip control7 register */
+#define PMU_CHIPCTL7 7
+#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25)
+#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27)
+/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */
+#define PMU_CC7_IF_TYPE_MASK 0x000000c0
+#define PMU_CC7_IF_TYPE_RMII 0x00000000
+#define PMU_CC7_IF_TYPE_MII 0x00000040
+#define PMU_CC7_IF_TYPE_RGMII 0x00000080
+
+#define PMU_CHIPCTL8 8
+#define PMU_CHIPCTL9 9
+
+#define PMU_CHIPCTL10 10
+#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_SHIFT 0
+#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_MASK 0x000000ff
+#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_SHIFT 8
+#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_MASK 0x0000ff00
+#define PMU_CC10_PCIE_PWRSW_UP_DLY_SHIFT 16
+#define PMU_CC10_PCIE_PWRSW_UP_DLY_MASK 0x000f0000
+#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_SHIFT 20
+#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_MASK 0x00f00000
+#define PMU_CC10_FORCE_PCIE_ON (1 << 24)
+#define PMU_CC10_FORCE_PCIE_SW_ON (1 << 25)
+#define PMU_CC10_FORCE_PCIE_RETNT_ON (1 << 26)
+
+#define PMU_CC10_PCIE_PWRSW_RESET_CNT_4US 1
+#define PMU_CC10_PCIE_PWRSW_RESET_CNT_8US 2
+
+#define PMU_CC10_PCIE_PWRSW_UP_DLY_0US 0
+
+#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_4US 1
+#define PMU_CC10_PCIE_RESET0_CNT_SLOW_MASK (0xFu << 4u)
+#define PMU_CC10_PCIE_RESET1_CNT_SLOW_MASK (0xFu << 12u)
+
+#define PMU_CHIPCTL11 11
+
+/* PMU chip control12 register */
+#define PMU_CHIPCTL12 12
+#define PMU_CC12_DISABLE_LQ_CLK_ON (1u << 31u) /* HW4387-254 */
+
+/* PMU chip control13 register */
+#define PMU_CHIPCTL13 13
+
+#define PMU_CC13_SUBCORE_CBUCK2VDDB_OFF (1u << 0u)
+#define PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF (1u << 1u)
+#define PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF (1u << 2u)
+#define PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF (1u << 3u)
+
+#define PMU_CC13_MAIN_CBUCK2VDDB_OFF (1u << 4u)
+#define PMU_CC13_MAIN_CBUCK2VDDRET_OFF (1u << 5u)
+#define PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF (1u << 6u)
+#define PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF (1u << 7u)
+
+#define PMU_CC13_AUX_CBUCK2VDDB_OFF (1u << 8u)
+#define PMU_CC13_AUX_MEMLPLDO2VDDB_OFF (1u << 10u)
+#define PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF (1u << 11u)
+#define PMU_CC13_AUX_CBUCK2VDDRET_OFF (1u << 12u)
+#define PMU_CC13_CMN_MEMLPLDO2VDDRET_ON (1u << 18u)
+
+/* HW4368-331 */
+#define PMU_CC13_MAIN_ALWAYS_USE_COHERENT_IF0 (1u << 13u)
+#define PMU_CC13_MAIN_ALWAYS_USE_COHERENT_IF1 (1u << 14u)
+#define PMU_CC13_AUX_ALWAYS_USE_COHERENT_IF0 (1u << 15u)
+#define PMU_CC13_AUX_ALWAYS_USE_COHERENT_IF1 (1u << 19u)
+
+#define PMU_CC13_LHL_TIMER_SELECT (1u << 23u)
+
+#define PMU_CC13_4369_LHL_TIMER_SELECT (1u << 23u)
+#define PMU_CC13_4378_LHL_TIMER_SELECT (1u << 23u)
+
+#define PMU_CC13_4387_ENAB_RADIO_REG_CLK (1u << 9u)
+#define PMU_CC13_4387_LHL_TIMER_SELECT (1u << 23u)
+
+#define PMU_CHIPCTL14 14
+#define PMU_CHIPCTL15 15
+#define PMU_CHIPCTL16 16
+#define PMU_CC16_CLK4M_DIS (1 << 4)
+#define PMU_CC16_FF_ZERO_ADJ (4 << 5)
+
+/* PMU chip control17 register */
+#define PMU_CHIPCTL17 17u
+
+#define PMU_CC17_SCAN_DIG_SR_CLK_SHIFT (2u)
+#define PMU_CC17_SCAN_DIG_SR_CLK_MASK (3u << 2u)
+#define PMU_CC17_SCAN_CBUCK2VDDB_OFF (1u << 8u)
+#define PMU_CC17_SCAN_MEMLPLDO2VDDB_OFF (1u << 10u)
+#define PMU_CC17_SCAN_MEMLPLDO2VDDRET_OFF (1u << 11u)
+#define PMU_CC17_SCAN_CBUCK2VDDB_ON (1u << 24u)
+#define PMU_CC17_SCAN_MEMLPLDO2VDDB_ON (1u << 26u)
+#define PMU_CC17_SCAN_MEMLPLDO2VDDRET_ON (1u << 27u)
+
+#define SCAN_DIG_SR_CLK_80_MHZ (0) /* 80 MHz */
+#define SCAN_DIG_SR_CLK_53P35_MHZ (1u) /* 53.35 MHz */
+#define SCAN_DIG_SR_CLK_40_MHZ (2u) /* 40 MHz */
+
+/* PMU chip control18 register */
+#define PMU_CHIPCTL18 18u
+
+/* Expiry time for wl_SSReset if P channel sleep handshake is not through */
+#define PMU_CC18_WL_P_CHAN_TIMER_SEL_OFF (1u << 1u)
+#define PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK (7u << 1u)
+
+#define PMU_CC18_WL_P_CHAN_TIMER_SEL_8ms 7u /* (2^(7+1))*32us = 8ms */
+
+/* Enable wl booker to force a P channel sleep handshake upon assertion of wl_SSReset */
+#define PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN (1u << 4u)
+
+/* PMU chip control 19 register */
+#define PMU_CHIPCTL19 19u
+
+#define PMU_CC19_ASYNC_ATRESETMN (1u << 9u)
+
+#define PMU_CHIPCTL23 23
+#define PMU_CC23_MACPHYCLK_MASK (1u << 31u)
+
+#define PMU_CC23_AT_CLK0_ON (1u << 14u)
+#define PMU_CC23_AT_CLK1_ON (1u << 15u)
+
+/* PMU chip control14 register */
+#define PMU_CC14_MAIN_VDDB2VDDRET_UP_DLY_MASK (0xF)
+#define PMU_CC14_MAIN_VDDB2VDD_UP_DLY_MASK (0xF << 4)
+#define PMU_CC14_AUX_VDDB2VDDRET_UP_DLY_MASK (0xF << 8)
+#define PMU_CC14_AUX_VDDB2VDD_UP_DLY_MASK (0xF << 12)
+#define PMU_CC14_PCIE_VDDB2VDDRET_UP_DLY_MASK (0xF << 16)
+#define PMU_CC14_PCIE_VDDB2VDD_UP_DLY_MASK (0xF << 20)
+
+/* PMU chip control15 register */
+#define PMU_CC15_PCIE_VDDB_CURRENT_LIMIT_DELAY_MASK (0xFu << 4u)
+#define PMU_CC15_PCIE_VDDB_FORCE_RPS_PWROK_DELAY_MASK (0xFu << 8u)
+
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
+#define PMU0_PLL0_PLLCTL0 0
+#define PMU0_PLL0_PC0_PDIV_MASK 1
+#define PMU0_PLL0_PC0_PDIV_FREQ 25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
+
+/* PC0_DIV_ARM for PLLOUT_ARM */
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
+
+/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
+#define PMU0_PLL0_PLLCTL1 1
+#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
+#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
+#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00
+#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
+#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
+
+/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
+#define PMU0_PLL0_PLLCTL2 2
+#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
+#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
+
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU1_PLL0_PLLCTL0 0
+#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT 20
+#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT 24
+
+/* m<x>div */
+#define PMU1_PLL0_PLLCTL1 1
+#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT 0
+#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT 8
+#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT 16
+#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT 24
+#define PMU1_PLL0_PC1_M4DIV_BY_9 9
+#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24
+#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C
+#define PMU1_PLL0_PC1_M2_M4DIV_MASK 0xff00ff00
+#define PMU1_PLL0_PC1_HOLD_LOAD_CH 0x28
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU1_PLL0_PLLCTL2 2
+#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT 0
+#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_31 0x1f
+#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_M5DIV_BY_42 0x2a
+#define PMU1_PLL0_PC2_M5DIV_BY_60 0x3c
+#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT 8
+#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2
+#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+
+/* ndiv_frac */
+#define PMU1_PLL0_PLLCTL3 3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0
+
+/* pll_ctrl */
+#define PMU1_PLL0_PLLCTL4 4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU1_PLL0_PLLCTL5 5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+#define PMU1_PLL0_PC5_ASSERT_CH_MASK 0x3f000000
+#define PMU1_PLL0_PC5_ASSERT_CH_SHIFT 24
+#define PMU1_PLL0_PC5_DEASSERT_CH_MASK 0xff000000
+
+#define PMU1_PLL0_PLLCTL6 6
+#define PMU1_PLL0_PLLCTL7 7
+#define PMU1_PLL0_PLLCTL8 8
+
+#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1)
+
+#define PMU1_PLL0_PLLCTL9 9
+
+#define PMU1_PLL0_PLLCTL10 10
+
+/* PMU rev 2 control words */
+#define PMU2_PHY_PLL_PLLCTL 4
+#define PMU2_SI_PLL_PLLCTL 10
+
+/* PMU rev 2 */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU2_PLL_PLLCTL0 0
+#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT 20
+#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT 24
+
+/* m<x>div */
+#define PMU2_PLL_PLLCTL1 1
+#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT 0
+#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT 8
+#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT 16
+#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT 24
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU2_PLL_PLLCTL2 2
+#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT 0
+#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT 8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17
+#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20
+
+/* ndiv_frac */
+#define PMU2_PLL_PLLCTL3 3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0
+
+/* pll_ctrl */
+#define PMU2_PLL_PLLCTL4 4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU2_PLL_PLLCTL5 5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28
+
+/* PMU rev 5 (& 6) */
+#define PMU5_PLL_P1P2_OFF 0
+#define PMU5_PLL_P1_MASK 0x0f000000
+#define PMU5_PLL_P1_SHIFT 24
+#define PMU5_PLL_P2_MASK 0x00f00000
+#define PMU5_PLL_P2_SHIFT 20
+#define PMU5_PLL_M14_OFF 1
+#define PMU5_PLL_MDIV_MASK 0x000000ff
+#define PMU5_PLL_MDIV_WIDTH 8
+#define PMU5_PLL_NM5_OFF 2
+#define PMU5_PLL_NDIV_MASK 0xfff00000
+#define PMU5_PLL_NDIV_SHIFT 20
+#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000
+#define PMU5_PLL_NDIV_MODE_SHIFT 17
+#define PMU5_PLL_FMAB_OFF 3
+#define PMU5_PLL_MRAT_MASK 0xf0000000
+#define PMU5_PLL_MRAT_SHIFT 28
+#define PMU5_PLL_ABRAT_MASK 0x08000000
+#define PMU5_PLL_ABRAT_SHIFT 27
+#define PMU5_PLL_FDIV_MASK 0x07ffffff
+#define PMU5_PLL_PLLCTL_OFF 4
+#define PMU5_PLL_PCHI_OFF 5
+#define PMU5_PLL_PCHI_MASK 0x0000003f
+
+/* pmu XtalFreqRatio */
+#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
+#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
+#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
+
+/* Divider allocation in 5357 */
+#define PMU5_MAINPLL_CPU 1
+#define PMU5_MAINPLL_MEM 2
+#define PMU5_MAINPLL_SI 3
+
+#define PMU7_PLL_PLLCTL7 7
+#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 24
+#define PMU7_PLL_CTL7_M4DIV_BY_6 6
+#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL8 8
+#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT 0
+#define PMU7_PLL_CTL8_M5DIV_BY_8 8
+#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT 8
+#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL11 11
+#define PMU7_PLL_PLLCTL11_MASK 0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL 0x22222200
+
+/* PMU rev 15 */
+#define PMU15_PLL_PLLCTL0 0
+#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT 0
+#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT 2
+#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT 22
+#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT 24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27
+#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT 30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31
+
+#define PMU15_PLL_PLLCTL1 1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30
+
+#define PMU15_PLL_PLLCTL2 2
+#define PMU15_PLL_PC2_CTEN_MASK 0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT 0
+
+#define PMU15_PLL_PLLCTL3 3
+#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3
+
+#define PMU15_PLL_PLLCTL4 4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6
+#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT 9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13
+#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT 20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25
+#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT 26
+
+#define PMU15_PLL_PLLCTL5 5
+#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT 0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20
+#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT 27
+
+#define PMU15_PLL_PLLCTL6 6
+#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT 0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20
+#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT 27
+
+#define PMU15_FREQTGT_480_DEFAULT 0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5
+#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */
+#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */
+#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT 0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN 0
+#define PMU17_PLLCTL0_BBPLL_DRST 3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8
+
+/* PLL usage in 4716/47162 */
+#define PMU4716_MAINPLL_PLL0 12
+
+/* PLL Usages for 4368 */
+#define PMU4368_P1DIV_LO_SHIFT 0
+#define PMU4368_P1DIV_HI_SHIFT 2
+
+#define PMU4368_PLL1_PC4_P1DIV_MASK 0xC0000000
+#define PMU4368_PLL1_PC4_P1DIV_SHIFT 30
+#define PMU4368_PLL1_PC5_P1DIV_MASK 0x00000003
+#define PMU4368_PLL1_PC5_P1DIV_SHIFT 0
+#define PMU4368_PLL1_PC5_NDIV_INT_MASK 0x00000ffc
+#define PMU4368_PLL1_PC5_NDIV_INT_SHIFT 2
+#define PMU4368_PLL1_PC5_NDIV_FRAC_MASK 0xfffff000
+#define PMU4368_PLL1_PC5_NDIV_FRAC_SHIFT 12
+
+/* PLL usage in 4369 */
+#define PMU4369_PLL0_PC2_PDIV_MASK 0x000f0000
+#define PMU4369_PLL0_PC2_PDIV_SHIFT 16
+#define PMU4369_PLL0_PC2_NDIV_INT_MASK 0x3ff00000
+#define PMU4369_PLL0_PC2_NDIV_INT_SHIFT 20
+#define PMU4369_PLL0_PC3_NDIV_FRAC_MASK 0x000fffff
+#define PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT 0
+#define PMU4369_PLL1_PC5_P1DIV_MASK 0xc0000000
+#define PMU4369_PLL1_PC5_P1DIV_SHIFT 30
+#define PMU4369_PLL1_PC6_P1DIV_MASK 0x00000003
+#define PMU4369_PLL1_PC6_P1DIV_SHIFT 0
+#define PMU4369_PLL1_PC6_NDIV_INT_MASK 0x00000ffc
+#define PMU4369_PLL1_PC6_NDIV_INT_SHIFT 2
+#define PMU4369_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000
+#define PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT 12
+
+#define PMU4369_P1DIV_LO_SHIFT 0
+#define PMU4369_P1DIV_HI_SHIFT 2
+
+#define PMU4369_PLL6VAL_P1DIV 4
+#define PMU4369_PLL6VAL_P1DIV_BIT3_2 1
+#define PMU4369_PLL6VAL_PRE_SCALE (1 << 17)
+#define PMU4369_PLL6VAL_POST_SCALE (1 << 3)
+
+/* PLL usage in 4378
+* Temporay setting, update is needed.
+*/
+#define PMU4378_PLL0_PC2_P1DIV_MASK 0x000f0000
+#define PMU4378_PLL0_PC2_P1DIV_SHIFT 16
+#define PMU4378_PLL0_PC2_NDIV_INT_MASK 0x3ff00000
+#define PMU4378_PLL0_PC2_NDIV_INT_SHIFT 20
+
+/* PLL usage in 4387 */
+#define PMU4387_PLL0_PC1_ICH2_MDIV_SHIFT 18
+#define PMU4387_PLL0_PC1_ICH2_MDIV_MASK 0x07FC0000
+#define PMU4387_PLL0_PC2_ICH3_MDIV_MASK 0x000001ff
+
+/* PLL usage in 4388 */
+#define PMU4388_APLL_NDIV_P 0x154u
+#define PMU4388_APLL_NDIV_Q 0x1ffu
+#define PMU4388_APLL_PDIV 0x3u
+#define PMU4388_ARMPLL_I_NDIV_INT_MASK 0x01ff8000u
+#define PMU4388_ARMPLL_I_NDIV_INT_SHIFT 15u
+
+/* PLL usage in 4389 */
+#define PMU4389_APLL_NDIV_P 0x154u
+#define PMU4389_APLL_NDIV_Q 0x1ffu
+#define PMU4389_APLL_PDIV 0x3u
+#define PMU4389_ARMPLL_I_NDIV_INT_MASK 0x01ff8000u
+#define PMU4389_ARMPLL_I_NDIV_INT_SHIFT 15u
+
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */
+/* 43217 Chip specific ChipControl register bits */
+#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
+#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */
+
+#define PMU1_PLL0_CHIPCTL0 0
+#define PMU1_PLL0_CHIPCTL1 1
+#define PMU1_PLL0_CHIPCTL2 2
+
+#define SOCDEVRAM_BP_ADDR 0x1E000000
+#define SOCDEVRAM_ARM_ADDR 0x00800000
+
+#define PMU_VREG0_I_SR_CNTL_EN_SHIFT 0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3
+#define PMU_VREG0_CBUCKFSW_ADJ_SHIFT 7
+#define PMU_VREG0_CBUCKFSW_ADJ_MASK 0x1F
+#define PMU_VREG0_RAMP_SEL_SHIFT 13
+#define PMU_VREG0_RAMP_SEL_MASK 0x7
+#define PMU_VREG0_VFB_RSEL_SHIFT 17
+#define PMU_VREG0_VFB_RSEL_MASK 3
+
+#define PMU_VREG4_ADDR 4
+
+#define PMU_VREG4_CLDO_PWM_SHIFT 4
+#define PMU_VREG4_CLDO_PWM_MASK 0x7
+
+#define PMU_VREG4_LPLDO1_SHIFT 15
+#define PMU_VREG4_LPLDO1_MASK 0x7
+#define PMU_VREG4_LPLDO1_1p20V 0
+#define PMU_VREG4_LPLDO1_1p15V 1
+#define PMU_VREG4_LPLDO1_1p10V 2
+#define PMU_VREG4_LPLDO1_1p25V 3
+#define PMU_VREG4_LPLDO1_1p05V 4
+#define PMU_VREG4_LPLDO1_1p00V 5
+#define PMU_VREG4_LPLDO1_0p95V 6
+#define PMU_VREG4_LPLDO1_0p90V 7
+
+#define PMU_VREG4_LPLDO2_LVM_SHIFT 18
+#define PMU_VREG4_LPLDO2_LVM_MASK 0x7
+#define PMU_VREG4_LPLDO2_HVM_SHIFT 21
+#define PMU_VREG4_LPLDO2_HVM_MASK 0x7
+#define PMU_VREG4_LPLDO2_LVM_HVM_MASK 0x3f
+#define PMU_VREG4_LPLDO2_1p00V 0
+#define PMU_VREG4_LPLDO2_1p15V 1
+#define PMU_VREG4_LPLDO2_1p20V 2
+#define PMU_VREG4_LPLDO2_1p10V 3
+#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */
+
+#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27
+#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1
+
+#define PMU_VREG5_ADDR 5
+#define PMU_VREG5_HSICAVDD_PD_SHIFT 6
+#define PMU_VREG5_HSICAVDD_PD_MASK 0x1
+#define PMU_VREG5_HSICDVDD_PD_SHIFT 11
+#define PMU_VREG5_HSICDVDD_PD_MASK 0x1
+
+/* 43228 chipstatus reg bits */
+#define CST43228_OTP_PRESENT 0x2
+
+/* 4360 Chip specific ChipControl register bits */
+/* 43602 uses these ChipControl definitions as well */
+#define CCTRL4360_I2C_MODE (1 << 0)
+#define CCTRL4360_UART_MODE (1 << 1)
+#define CCTRL4360_SECI_MODE (1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE (1 << 3)
+#define CCTRL4360_DISCRETE_FEMCTRL_MODE (1 << 4)
+#define CCTRL4360_DIGITAL_PACTRL_MODE (1 << 5)
+#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT (1 << 6)
+#define CCTRL4360_EXTRA_GPIO_MODE (1 << 7)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8)
+#define CCTRL4360_BT_LGCY_MODE (1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21)
+#define CCTRL4360_SECI_ON_GPIO01 (1 << 24)
+
+/* 4360 Chip specific Regulator Control register bits */
+#define RCTRL4360_RFLDO_PWR_DOWN (1 << 1)
+
+/* 4360 PMU resources and chip status bits */
+#define RES4360_REGULATOR 0
+#define RES4360_ILP_AVAIL 1
+#define RES4360_ILP_REQ 2
+#define RES4360_XTAL_LDO_PU 3
+#define RES4360_XTAL_PU 4
+#define RES4360_ALP_AVAIL 5
+#define RES4360_BBPLLPWRSW_PU 6
+#define RES4360_HT_AVAIL 7
+#define RES4360_OTP_PU 8
+#define RES4360_AVB_PLL_PWRSW_PU 9
+#define RES4360_PCIE_TL_CLK_AVAIL 10
+
+#define CST4360_XTAL_40MZ 0x00000001
+#define CST4360_SFLASH 0x00000002
+#define CST4360_SPROM_PRESENT 0x00000004
+#define CST4360_SFLASH_TYPE 0x00000004
+#define CST4360_OTP_ENABLED 0x00000008
+#define CST4360_REMAP_ROM 0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK 0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT 5
+#define CST4360_ILP_DIVEN 0x00000080
+#define CST4360_MODE_USB 0x00000100
+#define CST4360_SPROM_SIZE_MASK 0x00000600
+#define CST4360_SPROM_SIZE_SHIFT 9
+#define CST4360_BBPLL_LOCK 0x00000800
+#define CST4360_AVBBPLL_LOCK 0x00001000
+#define CST4360_USBBBPLL_LOCK 0x00002000
+#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+ CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define CCTRL_4360_UART_SEL 0x2
+
+#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+ CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define PMU4360_CC1_GPIO7_OVRD (1<<23) /* GPIO7 override */
+
+/* 43602 PMU resources based on pmu_params.xls version v0.95 */
+#define RES43602_LPLDO_PU 0
+#define RES43602_REGULATOR 1
+#define RES43602_PMU_SLEEP 2
+#define RES43602_RSVD_3 3
+#define RES43602_XTALLDO_PU 4
+#define RES43602_SERDES_PU 5
+#define RES43602_BBPLL_PWRSW_PU 6
+#define RES43602_SR_CLK_START 7
+#define RES43602_SR_PHY_PWRSW 8
+#define RES43602_SR_SUBCORE_PWRSW 9
+#define RES43602_XTAL_PU 10
+#define RES43602_PERST_OVR 11
+#define RES43602_SR_CLK_STABLE 12
+#define RES43602_SR_SAVE_RESTORE 13
+#define RES43602_SR_SLEEP 14
+#define RES43602_LQ_START 15
+#define RES43602_LQ_AVAIL 16
+#define RES43602_WL_CORE_RDY 17
+#define RES43602_ILP_REQ 18
+#define RES43602_ALP_AVAIL 19
+#define RES43602_RADIO_PU 20
+#define RES43602_RFLDO_PU 21
+#define RES43602_HT_START 22
+#define RES43602_HT_AVAIL 23
+#define RES43602_MACPHY_CLKAVAIL 24
+#define RES43602_PARLDO_PU 25
+#define RES43602_RSVD_26 26
+
+/* 43602 chip status bits */
+#define CST43602_SPROM_PRESENT (1<<1)
+#define CST43602_SPROM_SIZE (1<<10) /* 0 = 16K, 1 = 4K */
+#define CST43602_BBPLL_LOCK (1<<11)
+#define CST43602_RF_LDO_OUT_OK (1<<15) /* RF LDO output OK */
+
+#define PMU43602_CC1_GPIO12_OVRD (1<<28) /* GPIO12 override */
+
+#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1) /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN (1<<2) /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3)
+#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5) /* enable pmu_wakeup to request for ALP_AVAIL */
+#define PMU43602_CC2_PERST_L_EXTEND_EN (1<<9) /* extend perst_l until rsc PERST_OVR comes up */
+#define PMU43602_CC2_FORCE_EXT_LPO (1<<19) /* 1=ext LPO clock is the final LPO clock */
+#define PMU43602_CC2_XTAL32_SEL (1<<30) /* 0=ext_clock, 1=xtal */
+
+#define CC_SR1_43602_SR_ASM_ADDR (0x0)
+
+/* PLL CTL register values for open loop, used during S/R operation */
+#define PMU43602_PLL_CTL6_VAL 0x68000528
+#define PMU43602_PLL_CTL7_VAL 0x6
+
+#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29)
+
+#define CC_SR0_43602_SR_ENG_EN_MASK 0x1
+#define CC_SR0_43602_SR_ENG_EN_SHIFT 0
+
+/* GCI function sel values */
+#define CC_FNSEL_HWDEF (0u)
+#define CC_FNSEL_SAMEASPIN (1u)
+#define CC_FNSEL_GPIO0 (2u)
+#define CC_FNSEL_GPIO1 (3u)
+#define CC_FNSEL_GCI0 (4u)
+#define CC_FNSEL_GCI1 (5u)
+#define CC_FNSEL_UART (6u)
+#define CC_FNSEL_SFLASH (7u)
+#define CC_FNSEL_SPROM (8u)
+#define CC_FNSEL_MISC0 (9u)
+#define CC_FNSEL_MISC1 (10u)
+#define CC_FNSEL_MISC2 (11u)
+#define CC_FNSEL_IND (12u)
+#define CC_FNSEL_PDN (13u)
+#define CC_FNSEL_PUP (14u)
+#define CC_FNSEL_TRI (15u)
+
+/* 4387 GCI function sel values */
+#define CC4387_FNSEL_FUART (3u)
+#define CC4387_FNSEL_DBG_UART (6u)
+#define CC4387_FNSEL_SPI (7u)
+
+/* Indices of PMU voltage regulator registers */
+#define PMU_VREG_0 (0u)
+#define PMU_VREG_1 (1u)
+#define PMU_VREG_2 (2u)
+#define PMU_VREG_3 (3u)
+#define PMU_VREG_4 (4u)
+#define PMU_VREG_5 (5u)
+#define PMU_VREG_6 (6u)
+#define PMU_VREG_7 (7u)
+#define PMU_VREG_8 (8u)
+#define PMU_VREG_9 (9u)
+#define PMU_VREG_10 (10u)
+#define PMU_VREG_11 (11u)
+#define PMU_VREG_12 (12u)
+#define PMU_VREG_13 (13u)
+#define PMU_VREG_14 (14u)
+#define PMU_VREG_15 (15u)
+#define PMU_VREG_16 (16u)
+
+/* 43012 Chipcommon ChipStatus bits */
+#define CST43012_FLL_LOCK (1 << 13)
+/* 43012 resources - End */
+
+/* 43012 related Cbuck modes */
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410
+
+/* 43012 related dynamic cbuck mode mask */
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFC07
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFFFF
+
+/* 4369 related VREG masks */
+#define PMU_4369_VREG_5_MISCLDO_POWER_UP_MASK (1u << 11u)
+#define PMU_4369_VREG_5_MISCLDO_POWER_UP_SHIFT 11u
+#define PMU_4369_VREG_5_LPLDO_POWER_UP_MASK (1u << 27u)
+#define PMU_4369_VREG_5_LPLDO_POWER_UP_SHIFT 27u
+#define PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(23u, 20u)
+#define PMU_4369_VREG_5_LPLDO_OP_VLT_ADJ_CTRL_SHIFT 20u
+#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(31, 28)
+#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT 28u
+
+#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK (1u << 3u)
+#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT 3u
+
+#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK (1u << 27u)
+#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_SHIFT 27u
+#define PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK (1u << 28u)
+#define PMU_4369_VREG_7_WL_PMU_LP_MODE_SHIFT 28u
+#define PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK (1u << 29u)
+#define PMU_4369_VREG_7_WL_PMU_LV_MODE_SHIFT 29u
+
+#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK BCM_MASK32(4, 0)
+#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT 0u
+
+#define PMU_4369_VREG13_RSRC_EN0_ASR_MASK BCM_MASK32(9, 9)
+#define PMU_4369_VREG13_RSRC_EN0_ASR_SHIFT 9u
+#define PMU_4369_VREG13_RSRC_EN1_ASR_MASK BCM_MASK32(10, 10)
+#define PMU_4369_VREG13_RSRC_EN1_ASR_SHIFT 10u
+#define PMU_4369_VREG13_RSRC_EN2_ASR_MASK BCM_MASK32(11, 11)
+#define PMU_4369_VREG13_RSRC_EN2_ASR_SHIFT 11u
+
+#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK (1u << 23u)
+#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT 23u
+
+#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_MASK BCM_MASK32(2, 0)
+#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_SHIFT 0u
+#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_MASK BCM_MASK32(17, 15)
+#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_SHIFT 15u
+#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK BCM_MASK32(20, 18)
+#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT 18u
+#define PMU_4369_VREG16_RSRC2_ABUCK_MODE_MASK BCM_MASK32(23, 21)
+#define PMU_4369_VREG16_RSRC2_ABUCK_MODE_SHIFT 21u
+
+/* 4362 related VREG masks */
+#define PMU_4362_VREG_5_MISCLDO_POWER_UP_MASK (1u << 11u)
+#define PMU_4362_VREG_5_MISCLDO_POWER_UP_SHIFT (11u)
+#define PMU_4362_VREG_5_LPLDO_POWER_UP_MASK (1u << 27u)
+#define PMU_4362_VREG_5_LPLDO_POWER_UP_SHIFT (27u)
+#define PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(31, 28)
+#define PMU_4362_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT (28u)
+#define PMU_4362_VREG_6_MEMLPLDO_POWER_UP_MASK (1u << 3u)
+#define PMU_4362_VREG_6_MEMLPLDO_POWER_UP_SHIFT (3u)
+
+#define PMU_4362_VREG_7_PMU_FORCE_HP_MODE_MASK (1u << 27u)
+#define PMU_4362_VREG_7_PMU_FORCE_HP_MODE_SHIFT (27u)
+#define PMU_4362_VREG_7_WL_PMU_LP_MODE_MASK (1u << 28u)
+#define PMU_4362_VREG_7_WL_PMU_LP_MODE_SHIFT (28u)
+#define PMU_4362_VREG_7_WL_PMU_LV_MODE_MASK (1u << 29u)
+#define PMU_4362_VREG_7_WL_PMU_LV_MODE_SHIFT (29u)
+
+#define PMU_4362_VREG8_ASR_OVADJ_LPPFM_MASK BCM_MASK32(4, 0)
+#define PMU_4362_VREG8_ASR_OVADJ_LPPFM_SHIFT (0u)
+
+#define PMU_4362_VREG8_ASR_OVADJ_PFM_MASK BCM_MASK32(9, 5)
+#define PMU_4362_VREG8_ASR_OVADJ_PFM_SHIFT (5u)
+
+#define PMU_4362_VREG8_ASR_OVADJ_PWM_MASK BCM_MASK32(14, 10)
+#define PMU_4362_VREG8_ASR_OVADJ_PWM_SHIFT (10u)
+
+#define PMU_4362_VREG13_RSRC_EN0_ASR_MASK BCM_MASK32(9, 9)
+#define PMU_4362_VREG13_RSRC_EN0_ASR_SHIFT 9u
+#define PMU_4362_VREG13_RSRC_EN1_ASR_MASK BCM_MASK32(10, 10)
+#define PMU_4362_VREG13_RSRC_EN1_ASR_SHIFT 10u
+#define PMU_4362_VREG13_RSRC_EN2_ASR_MASK BCM_MASK32(11, 11)
+#define PMU_4362_VREG13_RSRC_EN2_ASR_SHIFT 11u
+
+#define PMU_4362_VREG14_RSRC_EN_CSR_MASK0_MASK (1u << 23u)
+#define PMU_4362_VREG14_RSRC_EN_CSR_MASK0_SHIFT (23u)
+
+#define PMU_4362_VREG16_RSRC0_CBUCK_MODE_MASK BCM_MASK32(2, 0)
+#define PMU_4362_VREG16_RSRC0_CBUCK_MODE_SHIFT (0u)
+#define PMU_4362_VREG16_RSRC0_ABUCK_MODE_MASK BCM_MASK32(17, 15)
+#define PMU_4362_VREG16_RSRC0_ABUCK_MODE_SHIFT (15u)
+#define PMU_4362_VREG16_RSRC1_ABUCK_MODE_MASK BCM_MASK32(20, 18)
+#define PMU_4362_VREG16_RSRC1_ABUCK_MODE_SHIFT (18u)
+#define PMU_4362_VREG16_RSRC2_ABUCK_MODE_MASK BCM_MASK32(23, 21)
+#define PMU_4362_VREG16_RSRC2_ABUCK_MODE_SHIFT 21u
+
+#define VREG0_4378_CSR_VOLT_ADJ_PWM_MASK 0x00001F00u
+#define VREG0_4378_CSR_VOLT_ADJ_PWM_SHIFT 8u
+#define VREG0_4378_CSR_VOLT_ADJ_PFM_MASK 0x0003E000u
+#define VREG0_4378_CSR_VOLT_ADJ_PFM_SHIFT 13u
+#define VREG0_4378_CSR_VOLT_ADJ_LP_PFM_MASK 0x007C0000u
+#define VREG0_4378_CSR_VOLT_ADJ_LP_PFM_SHIFT 18u
+#define VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_MASK 0x07800000u
+#define VREG0_4378_CSR_OUT_VOLT_TRIM_ADJ_SHIFT 23u
+
+#define PMU_4387_VREG1_CSR_OVERI_DIS_MASK (1u << 22u)
+#define PMU_4387_VREG6_WL_PMU_LV_MODE_MASK (0x00000002u)
+#define PMU_4387_VREG6_MEMLDO_PU_MASK (0x00000008u)
+#define PMU_4387_VREG8_ASR_OVERI_DIS_MASK (1u << 7u)
+
+#define PMU_4388_VREG6_WL_PMU_LV_MODE_SHIFT (1u)
+#define PMU_4388_VREG6_WL_PMU_LV_MODE_MASK (1u << PMU_4388_VREG6_WL_PMU_LV_MODE_SHIFT)
+#define PMU_4388_VREG6_MEMLDO_PU_SHIFT (3u)
+#define PMU_4388_VREG6_MEMLDO_PU_MASK (1u << PMU_4388_VREG6_MEMLDO_PU_SHIFT)
+
+#define PMU_4389_VREG6_WL_PMU_LV_MODE_SHIFT (1u)
+#define PMU_4389_VREG6_WL_PMU_LV_MODE_MASK (1u << PMU_4389_VREG6_WL_PMU_LV_MODE_SHIFT)
+#define PMU_4389_VREG6_MEMLDO_PU_SHIFT (3u)
+#define PMU_4389_VREG6_MEMLDO_PU_MASK (1u << PMU_4389_VREG6_MEMLDO_PU_SHIFT)
+
+#define PMU_VREG13_ASR_OVADJ_PWM_MASK (0x001F0000u)
+#define PMU_VREG13_ASR_OVADJ_PWM_SHIFT (16u)
+
+#define PMU_VREG14_RSRC_EN_ASR_PWM_PFM_MASK (1u << 18u)
+#define PMU_VREG14_RSRC_EN_ASR_PWM_PFM_SHIFT (18u)
+
+#define CSR_VOLT_ADJ_PWM_4378 (0x17u)
+#define CSR_VOLT_ADJ_PFM_4378 (0x17u)
+#define CSR_VOLT_ADJ_LP_PFM_4378 (0x17u)
+#define CSR_OUT_VOLT_TRIM_ADJ_4378 (0xEu)
+
+#ifdef WL_INITVALS
+#define ABUCK_VOLT_SW_DEFAULT_4387 (wliv_pmu_abuck_volt) /* 1.00V */
+#define CBUCK_VOLT_SW_DEFAULT_4387 (wliv_pmu_cbuck_volt) /* 0.68V */
+#define CBUCK_VOLT_NON_LVM (wliv_pmu_cbuck_volt_non_lvm) /* 0.76V */
+#else
+#define ABUCK_VOLT_SW_DEFAULT_4387 (0x1Fu) /* 1.00V */
+#define CBUCK_VOLT_SW_DEFAULT_4387 (0xFu) /* 0.68V */
+#define CBUCK_VOLT_NON_LVM (0x13u) /* 0.76V */
+#endif
+
+#define CC_GCI1_REG (0x1)
+
+#define FORCE_CLK_ON 1
+#define FORCE_CLK_OFF 0
+
+#define PMU1_PLL0_SWITCH_MACCLOCK_120MHZ (0)
+#define PMU1_PLL0_SWITCH_MACCLOCK_160MHZ (1)
+#define PMU1_PLL0_PC1_M2DIV_VALUE_120MHZ 8
+#define PMU1_PLL0_PC1_M2DIV_VALUE_160MHZ 6
+
+/* 4369 Related */
+
+/*
+ * PMU VREG Definitions:
+ * http://confluence.broadcom.com/display/WLAN/BCM4369+PMU+Vreg+Control+Register
+ */
+/* PMU VREG4 */
+#define PMU_28NM_VREG4_WL_LDO_CNTL_EN (0x1 << 10)
+
+/* PMU VREG6 */
+#define PMU_28NM_VREG6_BTLDO3P3_PU (0x1 << 12)
+#define PMU_4387_VREG6_BTLDO3P3_PU (0x1 << 8)
+
+/* PMU resources */
+#define RES4347_XTAL_PU 6
+#define RES4347_CORE_RDY_DIG 17
+#define RES4347_CORE_RDY_AUX 18
+#define RES4347_CORE_RDY_MAIN 22
+
+/* 4369 PMU Resources */
+#define RES4369_DUMMY 0
+#define RES4369_ABUCK 1
+#define RES4369_PMU_SLEEP 2
+#define RES4369_MISCLDO 3
+#define RES4369_LDO3P3 4
+#define RES4369_FAST_LPO_AVAIL 5
+#define RES4369_XTAL_PU 6
+#define RES4369_XTAL_STABLE 7
+#define RES4369_PWRSW_DIG 8
+#define RES4369_SR_DIG 9
+#define RES4369_SLEEP_DIG 10
+#define RES4369_PWRSW_AUX 11
+#define RES4369_SR_AUX 12
+#define RES4369_SLEEP_AUX 13
+#define RES4369_PWRSW_MAIN 14
+#define RES4369_SR_MAIN 15
+#define RES4369_SLEEP_MAIN 16
+#define RES4369_DIG_CORE_RDY 17
+#define RES4369_CORE_RDY_AUX 18
+#define RES4369_ALP_AVAIL 19
+#define RES4369_RADIO_AUX_PU 20
+#define RES4369_MINIPMU_AUX_PU 21
+#define RES4369_CORE_RDY_MAIN 22
+#define RES4369_RADIO_MAIN_PU 23
+#define RES4369_MINIPMU_MAIN_PU 24
+#define RES4369_PCIE_EP_PU 25
+#define RES4369_COLD_START_WAIT 26
+#define RES4369_ARMHTAVAIL 27
+#define RES4369_HT_AVAIL 28
+#define RES4369_MACPHY_AUX_CLK_AVAIL 29
+#define RES4369_MACPHY_MAIN_CLK_AVAIL 30
+
+/*
+* 4378 PMU Resources
+*/
+#define RES4378_DUMMY 0
+#define RES4378_ABUCK 1
+#define RES4378_PMU_SLEEP 2
+#define RES4378_MISC_LDO 3
+#define RES4378_LDO3P3_PU 4
+#define RES4378_FAST_LPO_AVAIL 5
+#define RES4378_XTAL_PU 6
+#define RES4378_XTAL_STABLE 7
+#define RES4378_PWRSW_DIG 8
+#define RES4378_SR_DIG 9
+#define RES4378_SLEEP_DIG 10
+#define RES4378_PWRSW_AUX 11
+#define RES4378_SR_AUX 12
+#define RES4378_SLEEP_AUX 13
+#define RES4378_PWRSW_MAIN 14
+#define RES4378_SR_MAIN 15
+#define RES4378_SLEEP_MAIN 16
+#define RES4378_CORE_RDY_DIG 17
+#define RES4378_CORE_RDY_AUX 18
+#define RES4378_ALP_AVAIL 19
+#define RES4378_RADIO_AUX_PU 20
+#define RES4378_MINIPMU_AUX_PU 21
+#define RES4378_CORE_RDY_MAIN 22
+#define RES4378_RADIO_MAIN_PU 23
+#define RES4378_MINIPMU_MAIN_PU 24
+#define RES4378_CORE_RDY_CB 25
+#define RES4378_PWRSW_CB 26
+#define RES4378_ARMHTAVAIL 27
+#define RES4378_HT_AVAIL 28
+#define RES4378_MACPHY_AUX_CLK_AVAIL 29
+#define RES4378_MACPHY_MAIN_CLK_AVAIL 30
+#define RES4378_RESERVED_31 31
+
+/*
+* 4387 PMU Resources
+*/
+#define RES4387_DUMMY 0
+#define RES4387_RESERVED_1 1
+#define RES4387_FAST_LPO_AVAIL 1 /* C0 */
+#define RES4387_PMU_SLEEP 2
+#define RES4387_PMU_LP 2 /* C0 */
+#define RES4387_MISC_LDO 3
+#define RES4387_RESERVED_4 4
+#define RES4387_SERDES_AFE_RET 4 /* C0 */
+#define RES4387_XTAL_HQ 5
+#define RES4387_XTAL_PU 6
+#define RES4387_XTAL_STABLE 7
+#define RES4387_PWRSW_DIG 8
+#define RES4387_CORE_RDY_BTMAIN 9
+#define RES4387_CORE_RDY_BTSC 10
+#define RES4387_PWRSW_AUX 11
+#define RES4387_PWRSW_SCAN 12
+#define RES4387_CORE_RDY_SCAN 13
+#define RES4387_PWRSW_MAIN 14
+#define RES4387_RESERVED_15 15
+#define RES4387_XTAL_PM_CLK 15 /* C0 */
+#define RES4387_RESERVED_16 16
+#define RES4387_CORE_RDY_DIG 17
+#define RES4387_CORE_RDY_AUX 18
+#define RES4387_ALP_AVAIL 19
+#define RES4387_RADIO_PU_AUX 20
+#define RES4387_RADIO_PU_SCAN 21
+#define RES4387_CORE_RDY_MAIN 22
+#define RES4387_RADIO_PU_MAIN 23
+#define RES4387_MACPHY_CLK_SCAN 24
+#define RES4387_CORE_RDY_CB 25
+#define RES4387_PWRSW_CB 26
+#define RES4387_ARMCLK_AVAIL 27
+#define RES4387_HT_AVAIL 28
+#define RES4387_MACPHY_CLK_AUX 29
+#define RES4387_MACPHY_CLK_MAIN 30
+#define RES4387_RESERVED_31 31
+
+/* 4388 PMU Resources */
+#define RES4388_DUMMY 0u
+#define RES4388_FAST_LPO_AVAIL 1u
+#define RES4388_PMU_LP 2u
+#define RES4388_MISC_LDO 3u
+#define RES4388_SERDES_AFE_RET 4u
+#define RES4388_XTAL_HQ 5u
+#define RES4388_XTAL_PU 6u
+#define RES4388_XTAL_STABLE 7u
+#define RES4388_PWRSW_DIG 8u
+#define RES4388_BTMC_TOP_RDY 9u
+#define RES4388_BTSC_TOP_RDY 10u
+#define RES4388_PWRSW_AUX 11u
+#define RES4388_PWRSW_SCAN 12u
+#define RES4388_CORE_RDY_SCAN 13u
+#define RES4388_PWRSW_MAIN 14u
+#define RES4388_RESERVED_15 15u
+#define RES4388_RESERVED_16 16u
+#define RES4388_CORE_RDY_DIG 17u
+#define RES4388_CORE_RDY_AUX 18u
+#define RES4388_ALP_AVAIL 19u
+#define RES4388_RADIO_PU_AUX 20u
+#define RES4388_RADIO_PU_SCAN 21u
+#define RES4388_CORE_RDY_MAIN 22u
+#define RES4388_RADIO_PU_MAIN 23u
+#define RES4388_MACPHY_CLK_SCAN 24u
+#define RES4388_CORE_RDY_CB 25u
+#define RES4388_PWRSW_CB 26u
+#define RES4388_ARMCLKAVAIL 27u
+#define RES4388_HT_AVAIL 28u
+#define RES4388_MACPHY_CLK_AUX 29u
+#define RES4388_MACPHY_CLK_MAIN 30u
+#define RES4388_RESERVED_31 31u
+
+/* 4389 PMU Resources */
+#define RES4389_DUMMY 0u
+#define RES4389_FAST_LPO_AVAIL 1u
+#define RES4389_PMU_LP 2u
+#define RES4389_MISC_LDO 3u
+#define RES4389_SERDES_AFE_RET 4u
+#define RES4389_XTAL_HQ 5u
+#define RES4389_XTAL_PU 6u
+#define RES4389_XTAL_STABLE 7u
+#define RES4389_PWRSW_DIG 8u
+#define RES4389_BTMC_TOP_RDY 9u
+#define RES4389_BTSC_TOP_RDY 10u
+#define RES4389_PWRSW_AUX 11u
+#define RES4389_PWRSW_SCAN 12u
+#define RES4389_CORE_RDY_SCAN 13u
+#define RES4389_PWRSW_MAIN 14u
+#define RES4389_RESERVED_15 15u
+#define RES4389_RESERVED_16 16u
+#define RES4389_CORE_RDY_DIG 17u
+#define RES4389_CORE_RDY_AUX 18u
+#define RES4389_ALP_AVAIL 19u
+#define RES4389_RADIO_PU_AUX 20u
+#define RES4389_RADIO_PU_SCAN 21u
+#define RES4389_CORE_RDY_MAIN 22u
+#define RES4389_RADIO_PU_MAIN 23u
+#define RES4389_MACPHY_CLK_SCAN 24u
+#define RES4389_CORE_RDY_CB 25u
+#define RES4389_PWRSW_CB 26u
+#define RES4389_ARMCLKAVAIL 27u
+#define RES4389_HT_AVAIL 28u
+#define RES4389_MACPHY_CLK_AUX 29u
+#define RES4389_MACPHY_CLK_MAIN 30u
+#define RES4389_RESERVED_31 31u
+
+/* 4397 PMU Resources */
+#define RES4397_DUMMY 0u
+#define RES4397_FAST_LPO_AVAIL 1u
+#define RES4397_PMU_LP 2u
+#define RES4397_MISC_LDO 3u
+#define RES4397_SERDES_AFE_RET 4u
+#define RES4397_XTAL_HQ 5u
+#define RES4397_XTAL_PU 6u
+#define RES4397_XTAL_STABLE 7u
+#define RES4397_PWRSW_DIG 8u
+#define RES4397_BTMC_TOP_RDY 9u
+#define RES4397_BTSC_TOP_RDY 10u
+#define RES4397_PWRSW_AUX 11u
+#define RES4397_PWRSW_SCAN 12u
+#define RES4397_CORE_RDY_SCAN 13u
+#define RES4397_PWRSW_MAIN 14u
+#define RES4397_XTAL_PM_CLK 15u
+#define RES4397_PWRSW_DRR2 16u
+#define RES4397_CORE_RDY_DIG 17u
+#define RES4397_CORE_RDY_AUX 18u
+#define RES4397_ALP_AVAIL 19u
+#define RES4397_RADIO_PU_AUX 20u
+#define RES4397_RADIO_PU_SCAN 21u
+#define RES4397_CORE_RDY_MAIN 22u
+#define RES4397_RADIO_PU_MAIN 23u
+#define RES4397_MACPHY_CLK_SCAN 24u
+#define RES4397_CORE_RDY_CB 25u
+#define RES4397_PWRSW_CB 26u
+#define RES4397_ARMCLKAVAIL 27u
+#define RES4397_HT_AVAIL 28u
+#define RES4397_MACPHY_CLK_AUX 29u
+#define RES4397_MACPHY_CLK_MAIN 30u
+#define RES4397_RESERVED_31 31u
+
+/* 0: BToverPCIe, 1: BToverUART */
+#define CST4378_CHIPMODE_BTOU(cs) (((cs) & (1 << 6)) != 0)
+#define CST4378_CHIPMODE_BTOP(cs) (((cs) & (1 << 6)) == 0)
+#define CST4378_SPROM_PRESENT 0x00000010
+
+#define CST4387_SFLASH_PRESENT 0x00000010U
+
+#define CST4387_CHIPMODE_BTOU(cs) (((cs) & (1 << 6)) != 0)
+#define CST4387_CHIPMODE_BTOP(cs) (((cs) & (1 << 6)) == 0)
+#define CST4387_SPROM_PRESENT 0x00000010
+
+/* GCI chip status */
+#define GCI_CS_4369_FLL1MHZ_LOCK_MASK (1 << 1)
+#define GCI_CS_4387_FLL1MHZ_LOCK_MASK (1 << 1)
+
+#define GCI_CS_4387_FLL1MHZ_DAC_OUT_SHIFT (16u)
+#define GCI_CS_4387_FLL1MHZ_DAC_OUT_MASK (0x00ff0000u)
+#define GCI_CS_4389_FLL1MHZ_DAC_OUT_SHIFT (16u)
+#define GCI_CS_4389_FLL1MHZ_DAC_OUT_MASK (0x00ff0000u)
+
+/* GCI chip control registers */
+#define GCI_CC7_AAON_BYPASS_PWRSW_SEL 13
+#define GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON 14
+
+/* 4368 GCI chip control registers */
+#define GCI_CC7_PRISEL_MASK (1 << 8 | 1 << 9)
+#define GCI_CC12_PRISEL_MASK (1 << 0 | 1 << 1)
+#define GCI_CC12_PRISEL_SHIFT 0
+#define GCI_CC12_DMASK_MASK (0x3ff << 10)
+#define GCI_CC16_ANT_SHARE_MASK (1 << 16 | 1 << 17)
+
+#define CC2_4362_SDIO_AOS_WAKEUP_MASK (1u << 24u)
+#define CC2_4362_SDIO_AOS_WAKEUP_SHIFT 24u
+
+#define CC2_4378_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u)
+#define CC2_4378_MAIN_MEMLPLDO_VDDB_OFF_SHIFT 12u
+#define CC2_4378_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u)
+#define CC2_4378_AUX_MEMLPLDO_VDDB_OFF_SHIFT 13u
+#define CC2_4378_MAIN_VDDRET_ON_MASK (1u << 15u)
+#define CC2_4378_MAIN_VDDRET_ON_SHIFT 15u
+#define CC2_4378_AUX_VDDRET_ON_MASK (1u << 16u)
+#define CC2_4378_AUX_VDDRET_ON_SHIFT 16u
+#define CC2_4378_GCI2WAKE_MASK (1u << 31u)
+#define CC2_4378_GCI2WAKE_SHIFT 31u
+#define CC2_4378_SDIO_AOS_WAKEUP_MASK (1u << 24u)
+#define CC2_4378_SDIO_AOS_WAKEUP_SHIFT 24u
+#define CC4_4378_LHL_TIMER_SELECT (1u << 0u)
+#define CC6_4378_PWROK_WDT_EN_IN_MASK (1u << 6u)
+#define CC6_4378_PWROK_WDT_EN_IN_SHIFT 6u
+#define CC6_4378_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u)
+#define CC6_4378_SDIO_AOS_CHIP_WAKEUP_SHIFT 24u
+
+#define CC2_4378_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u)
+#define CC2_4378_USE_WLAN_BP_CLK_ON_REQ_SHIFT 15u
+#define CC2_4378_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u)
+#define CC2_4378_USE_CMN_BP_CLK_ON_REQ_SHIFT 16u
+
+#define CC2_4387_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u)
+#define CC2_4387_MAIN_MEMLPLDO_VDDB_OFF_SHIFT 12u
+#define CC2_4387_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u)
+#define CC2_4387_AUX_MEMLPLDO_VDDB_OFF_SHIFT 13u
+#define CC2_4387_MAIN_VDDRET_ON_MASK (1u << 15u)
+#define CC2_4387_MAIN_VDDRET_ON_SHIFT 15u
+#define CC2_4387_AUX_VDDRET_ON_MASK (1u << 16u)
+#define CC2_4387_AUX_VDDRET_ON_SHIFT 16u
+#define CC2_4387_GCI2WAKE_MASK (1u << 31u)
+#define CC2_4387_GCI2WAKE_SHIFT 31u
+#define CC2_4387_SDIO_AOS_WAKEUP_MASK (1u << 24u)
+#define CC2_4387_SDIO_AOS_WAKEUP_SHIFT 24u
+#define CC4_4387_LHL_TIMER_SELECT (1u << 0u)
+#define CC6_4387_PWROK_WDT_EN_IN_MASK (1u << 6u)
+#define CC6_4387_PWROK_WDT_EN_IN_SHIFT 6u
+#define CC6_4387_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u)
+#define CC6_4387_SDIO_AOS_CHIP_WAKEUP_SHIFT 24u
+
+#define CC2_4387_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u)
+#define CC2_4387_USE_WLAN_BP_CLK_ON_REQ_SHIFT 15u
+#define CC2_4387_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u)
+#define CC2_4387_USE_CMN_BP_CLK_ON_REQ_SHIFT 16u
+
+#define CC2_4388_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u)
+#define CC2_4388_MAIN_MEMLPLDO_VDDB_OFF_SHIFT (12u)
+#define CC2_4388_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u)
+#define CC2_4388_AUX_MEMLPLDO_VDDB_OFF_SHIFT (13u)
+#define CC2_4388_MAIN_VDDRET_ON_MASK (1u << 15u)
+#define CC2_4388_MAIN_VDDRET_ON_SHIFT (15u)
+#define CC2_4388_AUX_VDDRET_ON_MASK (1u << 16u)
+#define CC2_4388_AUX_VDDRET_ON_SHIFT (16u)
+#define CC2_4388_GCI2WAKE_MASK (1u << 31u)
+#define CC2_4388_GCI2WAKE_SHIFT (31u)
+#define CC2_4388_SDIO_AOS_WAKEUP_MASK (1u << 24u)
+#define CC2_4388_SDIO_AOS_WAKEUP_SHIFT (24u)
+#define CC4_4388_LHL_TIMER_SELECT (1u << 0u)
+#define CC6_4388_PWROK_WDT_EN_IN_MASK (1u << 6u)
+#define CC6_4388_PWROK_WDT_EN_IN_SHIFT (6u)
+#define CC6_4388_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u)
+#define CC6_4388_SDIO_AOS_CHIP_WAKEUP_SHIFT (24u)
+
+#define CC2_4388_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u)
+#define CC2_4388_USE_WLAN_BP_CLK_ON_REQ_SHIFT (15u)
+#define CC2_4388_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u)
+#define CC2_4388_USE_CMN_BP_CLK_ON_REQ_SHIFT (16u)
+
+#define CC2_4389_MAIN_MEMLPLDO_VDDB_OFF_MASK (1u << 12u)
+#define CC2_4389_MAIN_MEMLPLDO_VDDB_OFF_SHIFT (12u)
+#define CC2_4389_AUX_MEMLPLDO_VDDB_OFF_MASK (1u << 13u)
+#define CC2_4389_AUX_MEMLPLDO_VDDB_OFF_SHIFT (13u)
+#define CC2_4389_MAIN_VDDRET_ON_MASK (1u << 15u)
+#define CC2_4389_MAIN_VDDRET_ON_SHIFT (15u)
+#define CC2_4389_AUX_VDDRET_ON_MASK (1u << 16u)
+#define CC2_4389_AUX_VDDRET_ON_SHIFT (16u)
+#define CC2_4389_GCI2WAKE_MASK (1u << 31u)
+#define CC2_4389_GCI2WAKE_SHIFT (31u)
+#define CC2_4389_SDIO_AOS_WAKEUP_MASK (1u << 24u)
+#define CC2_4389_SDIO_AOS_WAKEUP_SHIFT (24u)
+#define CC4_4389_LHL_TIMER_SELECT (1u << 0u)
+#define CC6_4389_PWROK_WDT_EN_IN_MASK (1u << 6u)
+#define CC6_4389_PWROK_WDT_EN_IN_SHIFT (6u)
+#define CC6_4389_SDIO_AOS_CHIP_WAKEUP_MASK (1u << 24u)
+#define CC6_4389_SDIO_AOS_CHIP_WAKEUP_SHIFT (24u)
+
+#define CC2_4389_USE_WLAN_BP_CLK_ON_REQ_MASK (1u << 15u)
+#define CC2_4389_USE_WLAN_BP_CLK_ON_REQ_SHIFT (15u)
+#define CC2_4389_USE_CMN_BP_CLK_ON_REQ_MASK (1u << 16u)
+#define CC2_4389_USE_CMN_BP_CLK_ON_REQ_SHIFT (16u)
+
+#define PCIE_GPIO1_GPIO_PIN CC_GCI_GPIO_0
+#define PCIE_PERST_GPIO_PIN CC_GCI_GPIO_1
+#define PCIE_CLKREQ_GPIO_PIN CC_GCI_GPIO_2
+
+#define VREG5_4378_MEMLPLDO_ADJ_MASK 0xF0000000
+#define VREG5_4378_MEMLPLDO_ADJ_SHIFT 28
+#define VREG5_4378_LPLDO_ADJ_MASK 0x00F00000
+#define VREG5_4378_LPLDO_ADJ_SHIFT 20
+
+#define VREG5_4387_MISCLDO_PU_MASK (0x00000800u)
+#define VREG5_4387_MISCLDO_PU_SHIFT (11u)
+
+#define VREG5_4387_MEMLPLDO_ADJ_MASK 0xF0000000
+#define VREG5_4387_MEMLPLDO_ADJ_SHIFT 28
+#define VREG5_4387_LPLDO_ADJ_MASK 0x00F00000
+#define VREG5_4387_LPLDO_ADJ_SHIFT 20
+#define VREG5_4387_MISC_LDO_ADJ_MASK (0xfu)
+#define VREG5_4387_MISC_LDO_ADJ_SHIFT (0)
+
+/* misc ldo voltage
+ * https://drive.google.com/file/d/1JjvNhp-RIXJBtw99M4w5ww4MmDsBJbpD
+ */
+#define PMU_VREG5_MISC_LDO_VOLT_0p931 (0x7u) /* 0.93125 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p912 (0x6u) /* 0.91250 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p893 (0x5u) /* 0.89375 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p875 (0x4u) /* 0.87500 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p856 (0x3u) /* 0.85625 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p837 (0x2u) /* 0.83750 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p818 (0x1u) /* 0.81875 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p800 (0) /* 0.80000 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p781 (0xfu) /* 0.78125 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p762 (0xeu) /* 0.76250 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p743 (0xdu) /* 0.74375 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p725 (0xcu) /* 0.72500 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p706 (0xbu) /* 0.70625 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p687 (0xau) /* 0.68750 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p668 (0x9u) /* 0.66875 v */
+#define PMU_VREG5_MISC_LDO_VOLT_0p650 (0x8u) /* 0.65000 v */
+
+/* lpldo/memlpldo voltage */
+#define PMU_VREG5_LPLDO_VOLT_0_88 0xf /* 0.88v */
+#define PMU_VREG5_LPLDO_VOLT_0_86 0xe /* 0.86v */
+#define PMU_VREG5_LPLDO_VOLT_0_84 0xd /* 0.84v */
+#define PMU_VREG5_LPLDO_VOLT_0_82 0xc /* 0.82v */
+#define PMU_VREG5_LPLDO_VOLT_0_80 0xb /* 0.80v */
+#define PMU_VREG5_LPLDO_VOLT_0_78 0xa /* 0.78v */
+#define PMU_VREG5_LPLDO_VOLT_0_76 0x9 /* 0.76v */
+#define PMU_VREG5_LPLDO_VOLT_0_74 0x8 /* 0.74v */
+#define PMU_VREG5_LPLDO_VOLT_0_72 0x7 /* 0.72v */
+#define PMU_VREG5_LPLDO_VOLT_1_10 0x6 /* 1.10v */
+#define PMU_VREG5_LPLDO_VOLT_1_00 0x5 /* 1.00v */
+#define PMU_VREG5_LPLDO_VOLT_0_98 0x4 /* 0.98v */
+#define PMU_VREG5_LPLDO_VOLT_0_96 0x3 /* 0.96v */
+#define PMU_VREG5_LPLDO_VOLT_0_94 0x2 /* 0.94v */
+#define PMU_VREG5_LPLDO_VOLT_0_92 0x1 /* 0.92v */
+#define PMU_VREG5_LPLDO_VOLT_0_90 0x0 /* 0.90v */
+
+/* Save/Restore engine */
+
+/* 512 bytes block */
+#define SR_ASM_ADDR_BLK_SIZE_SHIFT (9u)
+
+#define BM_ADDR_TO_SR_ADDR(bmaddr) ((bmaddr) >> SR_ASM_ADDR_BLK_SIZE_SHIFT)
+#define SR_ADDR_TO_BM_ADDR(sraddr) ((sraddr) << SR_ASM_ADDR_BLK_SIZE_SHIFT)
+
+/* Txfifo is 512KB for main core and 128KB for aux core
+ * We use first 12kB (0x3000) in BMC buffer for template in main core and
+ * 6.5kB (0x1A00) in aux core, followed by ASM code
+ */
+#define SR_ASM_ADDR_MAIN_4369 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_AUX_4369 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_DIG_4369 (0x0)
+
+#define SR_ASM_ADDR_MAIN_4362 BM_ADDR_TO_SR_ADDR(0xc00u)
+#define SR_ASM_ADDR_DIG_4362 (0x0u)
+
+#define SR_ASM_ADDR_MAIN_4378 (0x18)
+#define SR_ASM_ADDR_AUX_4378 (0xd)
+/* backplane address, use last 16k of BTCM for s/r */
+#define SR_ASM_ADDR_DIG_4378A0 (0x51c000)
+
+/* backplane address, use last 32k of BTCM for s/r */
+#define SR_ASM_ADDR_DIG_4378B0 (0x518000)
+
+#define SR_ASM_ADDR_MAIN_4387 (0x18)
+#define SR_ASM_ADDR_AUX_4387 (0xd)
+#define SR_ASM_ADDR_SCAN_4387 (0)
+/* backplane address */
+#define SR_ASM_ADDR_DIG_4387 (0x800000)
+
+#define SR_ASM_ADDR_MAIN_4387C0 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_AUX_4387C0 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_DIG_4387C0 (0x931000)
+#define SR_ASM_ADDR_DIG_4387_C0 (0x931000)
+
+#define SR_ASM_ADDR_MAIN_4388 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_AUX_4388 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_SCAN_4388 BM_ADDR_TO_SR_ADDR(0)
+#define SR_ASM_ADDR_DIG_4388 (0x18520000)
+#define SR_ASM_SIZE_DIG_4388 (65536u)
+#define FIS_CMN_SUBCORE_ADDR_4388 (0x1640u)
+
+#define SR_ASM_ADDR_MAIN_4389C0 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_AUX_4389C0 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_SCAN_4389C0 BM_ADDR_TO_SR_ADDR(0x000)
+#define SR_ASM_ADDR_DIG_4389C0 (0x18520000)
+#define SR_ASM_SIZE_DIG_4389C0 (8192u * 8u)
+
+#define SR_ASM_ADDR_MAIN_4389 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_AUX_4389 BM_ADDR_TO_SR_ADDR(0xC00)
+#define SR_ASM_ADDR_SCAN_4389 BM_ADDR_TO_SR_ADDR(0x000)
+#define SR_ASM_ADDR_DIG_4389 (0x18520000)
+#define SR_ASM_SIZE_DIG_4389 (8192u * 8u)
+#define FIS_CMN_SUBCORE_ADDR_4389 (0x1640u)
+
+#define SR_ASM_ADDR_DIG_4397 (0x18520000)
+
+/* SR Control0 bits */
+#define SR0_SR_ENG_EN_MASK 0x1
+#define SR0_SR_ENG_EN_SHIFT 0
+#define SR0_SR_ENG_CLK_EN (1 << 1)
+#define SR0_RSRC_TRIGGER (0xC << 2)
+#define SR0_WD_MEM_MIN_DIV (0x3 << 6)
+#define SR0_INVERT_SR_CLK (1 << 11)
+#define SR0_MEM_STBY_ALLOW (1 << 16)
+#define SR0_ENABLE_SR_ILP (1 << 17)
+#define SR0_ENABLE_SR_ALP (1 << 18)
+#define SR0_ENABLE_SR_HT (1 << 19)
+#define SR0_ALLOW_PIC (3 << 20)
+#define SR0_ENB_PMU_MEM_DISABLE (1 << 30)
+
+/* SR Control0 bits for 4369 */
+#define SR0_4369_SR_ENG_EN_MASK 0x1
+#define SR0_4369_SR_ENG_EN_SHIFT 0
+#define SR0_4369_SR_ENG_CLK_EN (1 << 1)
+#define SR0_4369_RSRC_TRIGGER (0xC << 2)
+#define SR0_4369_WD_MEM_MIN_DIV (0x2 << 6)
+#define SR0_4369_INVERT_SR_CLK (1 << 11)
+#define SR0_4369_MEM_STBY_ALLOW (1 << 16)
+#define SR0_4369_ENABLE_SR_ILP (1 << 17)
+#define SR0_4369_ENABLE_SR_ALP (1 << 18)
+#define SR0_4369_ENABLE_SR_HT (1 << 19)
+#define SR0_4369_ALLOW_PIC (3 << 20)
+#define SR0_4369_ENB_PMU_MEM_DISABLE (1 << 30)
+
+/* SR Control0 bits for 4378 */
+#define SR0_4378_SR_ENG_EN_MASK 0x1
+#define SR0_4378_SR_ENG_EN_SHIFT 0
+#define SR0_4378_SR_ENG_CLK_EN (1 << 1)
+#define SR0_4378_RSRC_TRIGGER (0xC << 2)
+#define SR0_4378_WD_MEM_MIN_DIV (0x2 << 6)
+#define SR0_4378_INVERT_SR_CLK (1 << 11)
+#define SR0_4378_MEM_STBY_ALLOW (1 << 16)
+#define SR0_4378_ENABLE_SR_ILP (1 << 17)
+#define SR0_4378_ENABLE_SR_ALP (1 << 18)
+#define SR0_4378_ENABLE_SR_HT (1 << 19)
+#define SR0_4378_ALLOW_PIC (3 << 20)
+#define SR0_4378_ENB_PMU_MEM_DISABLE (1 << 30)
+
+/* SR Control0 bits for 4387 */
+#define SR0_4387_SR_ENG_EN_MASK 0x1
+#define SR0_4387_SR_ENG_EN_SHIFT 0
+#define SR0_4387_SR_ENG_CLK_EN (1 << 1)
+#define SR0_4387_RSRC_TRIGGER (0xC << 2)
+#define SR0_4387_WD_MEM_MIN_DIV (0x2 << 6)
+#define SR0_4387_WD_MEM_MIN_DIV_AUX (0x4 << 6)
+#define SR0_4387_INVERT_SR_CLK (1 << 11)
+#define SR0_4387_MEM_STBY_ALLOW (1 << 16)
+#define SR0_4387_ENABLE_SR_ILP (1 << 17)
+#define SR0_4387_ENABLE_SR_ALP (1 << 18)
+#define SR0_4387_ENABLE_SR_HT (1 << 19)
+#define SR0_4387_ALLOW_PIC (3 << 20)
+#define SR0_4387_ENB_PMU_MEM_DISABLE (1 << 30)
+
+/* SR Control0 bits for 4388 */
+#define SR0_4388_SR_ENG_EN_MASK 0x1u
+#define SR0_4388_SR_ENG_EN_SHIFT 0
+#define SR0_4388_SR_ENG_CLK_EN (1u << 1u)
+#define SR0_4388_RSRC_TRIGGER (0xCu << 2u)
+#define SR0_4388_WD_MEM_MIN_DIV (0x2u << 6u)
+#define SR0_4388_INVERT_SR_CLK (1u << 11u)
+#define SR0_4388_MEM_STBY_ALLOW (1u << 16u)
+#define SR0_4388_ENABLE_SR_ILP (1u << 17u)
+#define SR0_4388_ENABLE_SR_ALP (1u << 18u)
+#define SR0_4388_ENABLE_SR_HT (1u << 19u)
+#define SR0_4388_ALLOW_PIC (3u << 20u)
+#define SR0_4388_ENB_PMU_MEM_DISABLE (1u << 30u)
+
+/* SR Control0 bits for 4389 */
+#define SR0_4389_SR_ENG_EN_MASK 0x1
+#define SR0_4389_SR_ENG_EN_SHIFT 0
+#define SR0_4389_SR_ENG_CLK_EN (1 << 1)
+#define SR0_4389_RSRC_TRIGGER (0xC << 2)
+#define SR0_4389_WD_MEM_MIN_DIV (0x2 << 6)
+#define SR0_4389_INVERT_SR_CLK (1 << 11)
+#define SR0_4389_MEM_STBY_ALLOW (1 << 16)
+#define SR0_4389_ENABLE_SR_ILP (1 << 17)
+#define SR0_4389_ENABLE_SR_ALP (1 << 18)
+#define SR0_4389_ENABLE_SR_HT (1 << 19)
+#define SR0_4389_ALLOW_PIC (3 << 20)
+#define SR0_4389_ENB_PMU_MEM_DISABLE (1 << 30)
+
+/* SR Control1 bits */
+#define SR1_INIT_ADDR_MASK (0x000003FFu)
+#define SR1_SELFTEST_ENB_MASK (0x00004000u)
+#define SR1_SELFTEST_ERR_INJCT_ENB_MASK (0x00008000u)
+#define SR1_SELFTEST_ERR_INJCT_PRD_MASK (0xFFFF0000u)
+#define SR1_SELFTEST_ERR_INJCT_PRD_SHIFT (16u)
+
+/* SR Control2 bits */
+#define SR2_INIT_ADDR_LONG_MASK (0x00003FFFu)
+
+#define SR_SELFTEST_ERR_INJCT_PRD (0x10u)
+
+/* SR Status1 bits */
+#define SR_STS1_SR_ERR_MASK (0x00000001u)
+
+/* =========== LHL regs =========== */
+/* 4369 LHL register settings */
+#define LHL4369_UP_CNT 0
+#define LHL4369_DN_CNT 2
+#define LHL4369_PWRSW_EN_DWN_CNT (LHL4369_DN_CNT + 2)
+#define LHL4369_ISO_EN_DWN_CNT (LHL4369_PWRSW_EN_DWN_CNT + 3)
+#define LHL4369_SLB_EN_DWN_CNT (LHL4369_ISO_EN_DWN_CNT + 1)
+#define LHL4369_ASR_CLK4M_DIS_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_ASR_LPPFM_MODE_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_ASR_MODE_SEL_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_ASR_MANUAL_MODE_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_ASR_ADJ_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_ASR_OVERI_DIS_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_ASR_TRIM_ADJ_DWN_CNT (LHL4369_DN_CNT)
+#define LHL4369_VDDC_SW_DIS_DWN_CNT (LHL4369_SLB_EN_DWN_CNT + 1)
+#define LHL4369_VMUX_ASR_SEL_DWN_CNT (LHL4369_VDDC_SW_DIS_DWN_CNT + 1)
+#define LHL4369_CSR_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_CSR_MODE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_CSR_OVERI_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_HPBG_CHOP_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_SRBG_REF_SEL_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_PFM_PWR_SLICE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_CSR_TRIM_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_CSR_VOLTAGE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4369_HPBG_PU_EN_DWN_CNT (LHL4369_CSR_MODE_DWN_CNT + 1)
+
+#define LHL4369_HPBG_PU_EN_UP_CNT (LHL4369_UP_CNT + 1)
+#define LHL4369_CSR_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_CSR_MODE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_CSR_OVERI_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_HPBG_CHOP_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_SRBG_REF_SEL_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_PFM_PWR_SLICE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_CSR_TRIM_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_CSR_VOLTAGE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4369_VMUX_ASR_SEL_UP_CNT (LHL4369_CSR_MODE_UP_CNT + 1)
+#define LHL4369_VDDC_SW_DIS_UP_CNT (LHL4369_VMUX_ASR_SEL_UP_CNT + 1)
+#define LHL4369_SLB_EN_UP_CNT (LHL4369_VDDC_SW_DIS_UP_CNT + 8)
+#define LHL4369_ISO_EN_UP_CNT (LHL4369_SLB_EN_UP_CNT + 1)
+#define LHL4369_PWRSW_EN_UP_CNT (LHL4369_ISO_EN_UP_CNT + 3)
+#define LHL4369_ASR_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+#define LHL4369_ASR_CLK4M_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+#define LHL4369_ASR_LPPFM_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+#define LHL4369_ASR_MODE_SEL_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+#define LHL4369_ASR_MANUAL_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+#define LHL4369_ASR_OVERI_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+#define LHL4369_ASR_TRIM_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
+
+/* 4362 LHL register settings */
+#define LHL4362_UP_CNT (0u)
+#define LHL4362_DN_CNT (2u)
+#define LHL4362_PWRSW_EN_DWN_CNT (LHL4362_DN_CNT + 2)
+#define LHL4362_ISO_EN_DWN_CNT (LHL4362_PWRSW_EN_DWN_CNT + 3)
+#define LHL4362_SLB_EN_DWN_CNT (LHL4362_ISO_EN_DWN_CNT + 1)
+#define LHL4362_ASR_CLK4M_DIS_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_ASR_LPPFM_MODE_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_ASR_MODE_SEL_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_ASR_MANUAL_MODE_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_ASR_ADJ_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_ASR_OVERI_DIS_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_ASR_TRIM_ADJ_DWN_CNT (LHL4362_DN_CNT)
+#define LHL4362_VDDC_SW_DIS_DWN_CNT (LHL4362_SLB_EN_DWN_CNT + 1)
+#define LHL4362_VMUX_ASR_SEL_DWN_CNT (LHL4362_VDDC_SW_DIS_DWN_CNT + 1)
+#define LHL4362_CSR_ADJ_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_CSR_MODE_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_CSR_OVERI_DIS_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_HPBG_CHOP_DIS_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_SRBG_REF_SEL_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_PFM_PWR_SLICE_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_CSR_TRIM_ADJ_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_CSR_VOLTAGE_DWN_CNT (LHL4362_VMUX_ASR_SEL_DWN_CNT + 1)
+#define LHL4362_HPBG_PU_EN_DWN_CNT (LHL4362_CSR_MODE_DWN_CNT + 1)
+
+#define LHL4362_HPBG_PU_EN_UP_CNT (LHL4362_UP_CNT + 1)
+#define LHL4362_CSR_ADJ_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_CSR_MODE_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_CSR_OVERI_DIS_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_HPBG_CHOP_DIS_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_SRBG_REF_SEL_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_PFM_PWR_SLICE_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_CSR_TRIM_ADJ_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_CSR_VOLTAGE_UP_CNT (LHL4362_HPBG_PU_EN_UP_CNT + 1)
+#define LHL4362_VMUX_ASR_SEL_UP_CNT (LHL4362_CSR_MODE_UP_CNT + 1)
+#define LHL4362_VDDC_SW_DIS_UP_CNT (LHL4362_VMUX_ASR_SEL_UP_CNT + 1)
+#define LHL4362_SLB_EN_UP_CNT (LHL4362_VDDC_SW_DIS_UP_CNT + 8)
+#define LHL4362_ISO_EN_UP_CNT (LHL4362_SLB_EN_UP_CNT + 1)
+#define LHL4362_PWRSW_EN_UP_CNT (LHL4362_ISO_EN_UP_CNT + 3)
+#define LHL4362_ASR_ADJ_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+#define LHL4362_ASR_CLK4M_DIS_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+#define LHL4362_ASR_LPPFM_MODE_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+#define LHL4362_ASR_MODE_SEL_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+#define LHL4362_ASR_MANUAL_MODE_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+#define LHL4362_ASR_OVERI_DIS_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+#define LHL4362_ASR_TRIM_ADJ_UP_CNT (LHL4362_PWRSW_EN_UP_CNT + 1)
+
+/* 4378 LHL register settings */
+#define LHL4378_CSR_OVERI_DIS_DWN_CNT 5u
+#define LHL4378_CSR_MODE_DWN_CNT 5u
+#define LHL4378_CSR_ADJ_DWN_CNT 5u
+
+#define LHL4378_CSR_OVERI_DIS_UP_CNT 1u
+#define LHL4378_CSR_MODE_UP_CNT 1u
+#define LHL4378_CSR_ADJ_UP_CNT 1u
+
+#define LHL4378_VDDC_SW_DIS_DWN_CNT 3u
+#define LHL4378_ASR_ADJ_DWN_CNT 3u
+#define LHL4378_HPBG_CHOP_DIS_DWN_CNT 0
+
+#define LHL4378_VDDC_SW_DIS_UP_CNT 3u
+#define LHL4378_ASR_ADJ_UP_CNT 1u
+#define LHL4378_HPBG_CHOP_DIS_UP_CNT 0
+
+#define LHL4378_ASR_MANUAL_MODE_DWN_CNT 5u
+#define LHL4378_ASR_MODE_SEL_DWN_CNT 5u
+#define LHL4378_ASR_LPPFM_MODE_DWN_CNT 5u
+#define LHL4378_ASR_CLK4M_DIS_DWN_CNT 0
+
+#define LHL4378_ASR_MANUAL_MODE_UP_CNT 1u
+#define LHL4378_ASR_MODE_SEL_UP_CNT 1u
+#define LHL4378_ASR_LPPFM_MODE_UP_CNT 1u
+#define LHL4378_ASR_CLK4M_DIS_UP_CNT 0
+
+#define LHL4378_PFM_PWR_SLICE_DWN_CNT 5u
+#define LHL4378_ASR_OVERI_DIS_DWN_CNT 5u
+#define LHL4378_SRBG_REF_SEL_DWN_CNT 5u
+#define LHL4378_HPBG_PU_EN_DWN_CNT 6u
+
+#define LHL4378_PFM_PWR_SLICE_UP_CNT 1u
+#define LHL4378_ASR_OVERI_DIS_UP_CNT 1u
+#define LHL4378_SRBG_REF_SEL_UP_CNT 1u
+#define LHL4378_HPBG_PU_EN_UP_CNT 0
+
+#define LHL4378_CSR_TRIM_ADJ_CNT_SHIFT (16u)
+#define LHL4378_CSR_TRIM_ADJ_CNT_MASK (0x3Fu << LHL4378_CSR_TRIM_ADJ_CNT_SHIFT)
+#define LHL4378_CSR_TRIM_ADJ_DWN_CNT 0
+#define LHL4378_CSR_TRIM_ADJ_UP_CNT 0
+
+#define LHL4378_ASR_TRIM_ADJ_CNT_SHIFT (0u)
+#define LHL4378_ASR_TRIM_ADJ_CNT_MASK (0x3Fu << LHL4378_ASR_TRIM_ADJ_CNT_SHIFT)
+#define LHL4378_ASR_TRIM_ADJ_UP_CNT 0
+#define LHL4378_ASR_TRIM_ADJ_DWN_CNT 0
+
+#define LHL4378_PWRSW_EN_DWN_CNT 0
+#define LHL4378_SLB_EN_DWN_CNT 2u
+#define LHL4378_ISO_EN_DWN_CNT 1u
+
+#define LHL4378_VMUX_ASR_SEL_DWN_CNT 4u
+
+#define LHL4378_PWRSW_EN_UP_CNT 6u
+#define LHL4378_SLB_EN_UP_CNT 4u
+#define LHL4378_ISO_EN_UP_CNT 5u
+
+#define LHL4378_VMUX_ASR_SEL_UP_CNT 2u
+
+#define LHL4387_VMUX_ASR_SEL_DWN_CNT (8u)
+#define LHL4387_VMUX_ASR_SEL_UP_CNT (0x14u)
+
+/* 4387 LHL register settings for top off mode */
+#define LHL4387_TO_CSR_OVERI_DIS_DWN_CNT 3u
+#define LHL4387_TO_CSR_MODE_DWN_CNT 3u
+#define LHL4387_TO_CSR_ADJ_DWN_CNT 0
+
+#define LHL4387_TO_CSR_OVERI_DIS_UP_CNT 1u
+#define LHL4387_TO_CSR_MODE_UP_CNT 1u
+#define LHL4387_TO_CSR_ADJ_UP_CNT 0
+
+#define LHL4387_TO_VDDC_SW_DIS_DWN_CNT 4u
+#define LHL4387_TO_ASR_ADJ_DWN_CNT 3u
+#define LHL4387_TO_LP_MODE_DWN_CNT 6u
+#define LHL4387_TO_HPBG_CHOP_DIS_DWN_CNT 3u
+
+#define LHL4387_TO_VDDC_SW_DIS_UP_CNT 0
+#define LHL4387_TO_ASR_ADJ_UP_CNT 1u
+#define LHL4387_TO_LP_MODE_UP_CNT 0
+#define LHL4387_TO_HPBG_CHOP_DIS_UP_CNT 1u
+
+#define LHL4387_TO_ASR_MANUAL_MODE_DWN_CNT 3u
+#define LHL4387_TO_ASR_MODE_SEL_DWN_CNT 3u
+#define LHL4387_TO_ASR_LPPFM_MODE_DWN_CNT 3u
+#define LHL4387_TO_ASR_CLK4M_DIS_DWN_CNT 3u
+
+#define LHL4387_TO_ASR_MANUAL_MODE_UP_CNT 1u
+#define LHL4387_TO_ASR_MODE_SEL_UP_CNT 1u
+#define LHL4387_TO_ASR_LPPFM_MODE_UP_CNT 1u
+#define LHL4387_TO_ASR_CLK4M_DIS_UP_CNT 1u
+
+#define LHL4387_TO_PFM_PWR_SLICE_DWN_CNT 3u
+#define LHL4387_TO_ASR_OVERI_DIS_DWN_CNT 3u
+#define LHL4387_TO_SRBG_REF_SEL_DWN_CNT 3u
+#define LHL4387_TO_HPBG_PU_EN_DWN_CNT 4u
+
+#define LHL4387_TO_PFM_PWR_SLICE_UP_CNT 1u
+#define LHL4387_TO_ASR_OVERI_DIS_UP_CNT 1u
+#define LHL4387_TO_SRBG_REF_SEL_UP_CNT 1u
+#define LHL4387_TO_HPBG_PU_EN_UP_CNT 1u
+
+#define LHL4387_TO_PWRSW_EN_DWN_CNT 0
+#define LHL4387_TO_SLB_EN_DWN_CNT 4u
+#define LHL4387_TO_ISO_EN_DWN_CNT 2u
+#define LHL4387_TO_TOP_SLP_EN_DWN_CNT 0
+
+#define LHL4387_TO_PWRSW_EN_UP_CNT 0x16u
+#define LHL4387_TO_SLB_EN_UP_CNT 0xeu
+#define LHL4387_TO_ISO_EN_UP_CNT 0x10u
+#define LHL4387_TO_TOP_SLP_EN_UP_CNT 2u
+
+/* MacResourceReqTimer0/1 */
+#define MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT 24
+#define MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT 26
+#define MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT 27
+#define MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT 28
+#define MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT 29
+
+/* for pmu rev32 and higher */
+#define PMU32_MAC_MAIN_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \
+ (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \
+ (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \
+ (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \
+ (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT))
+
+#define PMU32_MAC_AUX_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \
+ (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \
+ (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \
+ (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \
+ (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT))
+
+/* for pmu rev38 and higher */
+#define PMU32_MAC_SCAN_RSRC_REQ_TIMER ((1u << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \
+ (1u << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \
+ (1u << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \
+ (1u << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \
+ (0u << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT))
+
+/* 4369 related: 4369 parameters
+ * http://www.sj.broadcom.com/projects/BCM4369/gallery_backend.RC6.0/design/backplane/pmu_params.xls
+ */
+#define RES4369_DUMMY 0
+#define RES4369_ABUCK 1
+#define RES4369_PMU_SLEEP 2
+#define RES4369_MISCLDO_PU 3
+#define RES4369_LDO3P3_PU 4
+#define RES4369_FAST_LPO_AVAIL 5
+#define RES4369_XTAL_PU 6
+#define RES4369_XTAL_STABLE 7
+#define RES4369_PWRSW_DIG 8
+#define RES4369_SR_DIG 9
+#define RES4369_SLEEP_DIG 10
+#define RES4369_PWRSW_AUX 11
+#define RES4369_SR_AUX 12
+#define RES4369_SLEEP_AUX 13
+#define RES4369_PWRSW_MAIN 14
+#define RES4369_SR_MAIN 15
+#define RES4369_SLEEP_MAIN 16
+#define RES4369_DIG_CORE_RDY 17
+#define RES4369_CORE_RDY_AUX 18
+#define RES4369_ALP_AVAIL 19
+#define RES4369_RADIO_AUX_PU 20
+#define RES4369_MINIPMU_AUX_PU 21
+#define RES4369_CORE_RDY_MAIN 22
+#define RES4369_RADIO_MAIN_PU 23
+#define RES4369_MINIPMU_MAIN_PU 24
+#define RES4369_PCIE_EP_PU 25
+#define RES4369_COLD_START_WAIT 26
+#define RES4369_ARMHTAVAIL 27
+#define RES4369_HT_AVAIL 28
+#define RES4369_MACPHY_AUX_CLK_AVAIL 29
+#define RES4369_MACPHY_MAIN_CLK_AVAIL 30
+#define RES4369_RESERVED_31 31
+
+#define CST4369_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
+#define CST4369_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
+#define CST4369_SPROM_PRESENT 0x00000010
+
+#define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF
+#define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647
+
+/* 4362 related */
+/* 4362 resource_table
+ * http://www.sj.broadcom.com/projects/BCM4362/gallery_backend.RC1.mar_15_2017/design/backplane/
+ * pmu_params.xls
+ */
+#define RES4362_DUMMY (0u)
+#define RES4362_ABUCK (1u)
+#define RES4362_PMU_SLEEP (2u)
+#define RES4362_MISCLDO_PU (3u)
+#define RES4362_LDO3P3_PU (4u)
+#define RES4362_FAST_LPO_AVAIL (5u)
+#define RES4362_XTAL_PU (6u)
+#define RES4362_XTAL_STABLE (7u)
+#define RES4362_PWRSW_DIG (8u)
+#define RES4362_SR_DIG (9u)
+#define RES4362_SLEEP_DIG (10u)
+#define RES4362_PWRSW_AUX (11u)
+#define RES4362_SR_AUX (12u)
+#define RES4362_SLEEP_AUX (13u)
+#define RES4362_PWRSW_MAIN (14u)
+#define RES4362_SR_MAIN (15u)
+#define RES4362_SLEEP_MAIN (16u)
+#define RES4362_DIG_CORE_RDY (17u)
+#define RES4362_CORE_RDY_AUX (18u)
+#define RES4362_ALP_AVAIL (19u)
+#define RES4362_RADIO_AUX_PU (20u)
+#define RES4362_MINIPMU_AUX_PU (21u)
+#define RES4362_CORE_RDY_MAIN (22u)
+#define RES4362_RADIO_MAIN_PU (23u)
+#define RES4362_MINIPMU_MAIN_PU (24u)
+#define RES4362_PCIE_EP_PU (25u)
+#define RES4362_COLD_START_WAIT (26u)
+#define RES4362_ARMHTAVAIL (27u)
+#define RES4362_HT_AVAIL (28u)
+#define RES4362_MACPHY_AUX_CLK_AVAIL (29u)
+#define RES4362_MACPHY_MAIN_CLK_AVAIL (30u)
+#define RES4362_RESERVED_31 (31u)
+
+#define CST4362_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
+#define CST4362_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
+#define CST4362_SPROM_PRESENT (0x00000010u)
+
+#define PMU_4362_MACCORE_0_RES_REQ_MASK (0x3FCBF7FFu)
+#define PMU_4362_MACCORE_1_RES_REQ_MASK (0x7FFB3647u)
+
+#define PMU_MACCORE_0_RES_REQ_TIMER 0x1d000000
+#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F
+
+#define PMU43012_MAC_RES_REQ_TIMER 0x1D000000
+#define PMU43012_MAC_RES_REQ_MASK 0x3FBBF7FF
+
+#define PMU_MACCORE_1_RES_REQ_TIMER 0x1d000000
+#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F
+
+/* defines to detect active host interface in use */
+#define CHIP_HOSTIF_PCIEMODE 0x1
+#define CHIP_HOSTIF_USBMODE 0x2
+#define CHIP_HOSTIF_SDIOMODE 0x4
+#define CHIP_HOSTIF_PCIE(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE)
+#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE)
+#define CHIP_HOSTIF_SDIO(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE)
+
+#define PATCHTBL_SIZE (0x800)
+#define CR4_4335_RAM_BASE (0x180000)
+#define CR4_4345_LT_C0_RAM_BASE (0x1b0000)
+#define CR4_4345_GE_C0_RAM_BASE (0x198000)
+#define CR4_4349_RAM_BASE (0x180000)
+#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000)
+#define CR4_4350_RAM_BASE (0x180000)
+#define CR4_4360_RAM_BASE (0x0)
+#define CR4_43602_RAM_BASE (0x180000)
+
+#define CR4_4347_RAM_BASE (0x170000)
+#define CR4_4362_RAM_BASE (0x170000)
+#define CR4_4364_RAM_BASE (0x160000)
+#define CR4_4369_RAM_BASE (0x170000)
+#define CR4_4377_RAM_BASE (0x170000)
+#define CR4_43751_RAM_BASE (0x170000)
+#define CR4_43752_RAM_BASE (0x170000)
+#define CR4_4376_RAM_BASE (0x352000)
+#define CR4_4378_RAM_BASE (0x352000)
+#define CR4_4387_RAM_BASE (0x740000)
+#define CR4_4385_RAM_BASE (0x740000)
+#define CA7_4388_RAM_BASE (0x200000)
+#define CA7_4389_RAM_BASE (0x200000)
+#define CA7_4385_RAM_BASE (0x200000)
+
+/* Physical memory in 4388a0 HWA is 64KB (8192 x 64 bits) even though
+ * the memory space allows 192KB (0x1850_0000 - 0x1852_FFFF)
+ */
+#define HWA_MEM_BASE_4388 (0x18520000u)
+#define HWA_MEM_SIZE_4388 (0x10000u)
+
+/* 43012 PMU resources based on pmu_params.xls - Start */
+#define RES43012_MEMLPLDO_PU 0
+#define RES43012_PMU_SLEEP 1
+#define RES43012_FAST_LPO 2
+#define RES43012_BTLPO_3P3 3
+#define RES43012_SR_POK 4
+#define RES43012_DUMMY_PWRSW 5
+#define RES43012_DUMMY_LDO3P3 6
+#define RES43012_DUMMY_BT_LDO3P3 7
+#define RES43012_DUMMY_RADIO 8
+#define RES43012_VDDB_VDDRET 9
+#define RES43012_HV_LDO3P3 10
+#define RES43012_OTP_PU 11
+#define RES43012_XTAL_PU 12
+#define RES43012_SR_CLK_START 13
+#define RES43012_XTAL_STABLE 14
+#define RES43012_FCBS 15
+#define RES43012_CBUCK_MODE 16
+#define RES43012_CORE_READY 17
+#define RES43012_ILP_REQ 18
+#define RES43012_ALP_AVAIL 19
+#define RES43012_RADIOLDO_1P8 20
+#define RES43012_MINI_PMU 21
+#define RES43012_UNUSED 22
+#define RES43012_SR_SAVE_RESTORE 23
+#define RES43012_PHY_PWRSW 24
+#define RES43012_VDDB_CLDO 25
+#define RES43012_SUBCORE_PWRSW 26
+#define RES43012_SR_SLEEP 27
+#define RES43012_HT_START 28
+#define RES43012_HT_AVAIL 29
+#define RES43012_MACPHY_CLK_AVAIL 30
+#define CST43012_SPROM_PRESENT 0x00000010
+
+/* SR Control0 bits */
+#define SR0_43012_SR_ENG_EN_MASK 0x1u
+#define SR0_43012_SR_ENG_EN_SHIFT 0u
+#define SR0_43012_SR_ENG_CLK_EN (1u << 1u)
+#define SR0_43012_SR_RSRC_TRIGGER (0xCu << 2u)
+#define SR0_43012_SR_WD_MEM_MIN_DIV (0x3u << 6u)
+#define SR0_43012_SR_MEM_STBY_ALLOW_MSK (1u << 16u)
+#define SR0_43012_SR_MEM_STBY_ALLOW_SHIFT 16u
+#define SR0_43012_SR_ENABLE_ILP (1u << 17u)
+#define SR0_43012_SR_ENABLE_ALP (1u << 18u)
+#define SR0_43012_SR_ENABLE_HT (1u << 19u)
+#define SR0_43012_SR_ALLOW_PIC (3u << 20u)
+#define SR0_43012_SR_PMU_MEM_DISABLE (1u << 30u)
+#define CC_43012_VDDM_PWRSW_EN_MASK (1u << 20u)
+#define CC_43012_VDDM_PWRSW_EN_SHIFT (20u)
+#define CC_43012_SDIO_AOS_WAKEUP_MASK (1u << 24u)
+#define CC_43012_SDIO_AOS_WAKEUP_SHIFT (24u)
+
+/* 43012 - offset at 5K */
+#define SR1_43012_SR_INIT_ADDR_MASK 0x3ffu
+#define SR1_43012_SR_ASM_ADDR 0xAu
+
+/* PLL usage in 43012 */
+#define PMU43012_PLL0_PC0_NDIV_INT_MASK 0x0000003fu
+#define PMU43012_PLL0_PC0_NDIV_INT_SHIFT 0u
+#define PMU43012_PLL0_PC0_NDIV_FRAC_MASK 0xfffffc00u
+#define PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT 10u
+#define PMU43012_PLL0_PC3_PDIV_MASK 0x00003c00u
+#define PMU43012_PLL0_PC3_PDIV_SHIFT 10u
+#define PMU43012_PLL_NDIV_FRAC_BITS 20u
+#define PMU43012_PLL_P_DIV_SCALE_BITS 10u
+
+#define CCTL_43012_ARM_OFFCOUNT_MASK 0x00000003u
+#define CCTL_43012_ARM_OFFCOUNT_SHIFT 0u
+#define CCTL_43012_ARM_ONCOUNT_MASK 0x0000000cu
+#define CCTL_43012_ARM_ONCOUNT_SHIFT 2u
+
+/* PMU Rev >= 30 */
+#define PMU30_ALPCLK_ONEMHZ_ENAB 0x80000000u
+
+/* 43012 PMU Chip Control Registers */
+#define PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON 0x00000010u
+#define PMUCCTL02_43012_PHY_PWRSW_FORCE_ON 0x00000040u
+#define PMUCCTL02_43012_LHL_TIMER_SELECT 0x00000800u
+#define PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON 0x00008000u
+#define PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB 0x00010000u
+#define PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF (1u << 12u)
+
+#define PMUCCTL04_43012_BBPLL_ENABLE_PWRDN 0x00100000u
+#define PMUCCTL04_43012_BBPLL_ENABLE_PWROFF 0x00200000u
+#define PMUCCTL04_43012_FORCE_BBPLL_ARESET 0x00400000u
+#define PMUCCTL04_43012_FORCE_BBPLL_DRESET 0x00800000u
+#define PMUCCTL04_43012_FORCE_BBPLL_PWRDN 0x01000000u
+#define PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH 0x02000000u
+#define PMUCCTL04_43012_FORCE_BBPLL_PWROFF 0x04000000u
+#define PMUCCTL04_43012_DISABLE_LQ_AVAIL 0x08000000u
+#define PMUCCTL04_43012_DISABLE_HT_AVAIL 0x10000000u
+#define PMUCCTL04_43012_USE_LOCK 0x20000000u
+#define PMUCCTL04_43012_OPEN_LOOP_ENABLE 0x40000000u
+#define PMUCCTL04_43012_FORCE_OPEN_LOOP 0x80000000u
+#define PMUCCTL05_43012_DISABLE_SPM_CLK (1u << 8u)
+#define PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN (1u << 14u)
+#define PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB (1u << 31u)
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_MASK 0x00000FC0u
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_SHIFT 6u
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_MASK 0x00FC0000u
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_SHIFT 18u
+#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x07000000u
+#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 24u
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x0003F000u
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 12u
+#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_MASK 0x00000038u
+#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_SHIFT 3u
+
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_MASK 0x00000FC0u
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_SHIFT 6u
+/* during normal operation normal value is reduced for optimized power */
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_VAL 0x1Fu
+
+#define PMUCCTL13_43012_FCBS_UP_TRIG_EN 0x00000400
+
+#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL 0x00000001
+#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL 0x00000020
+#define PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL 0x00000080
+#define PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL 0x00000200
+#define PMUCCTL14_43012_SDIOD_RESET_INIVAL 0x00000400
+#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL 0x00001000
+#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL 0x00004000
+#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL 0x00008000
+#define PMUCCTL14_43012_DISABLE_LQ_AVAIL 0x08000000
+
+#define VREG6_43012_MEMLPLDO_ADJ_MASK 0x0000F000
+#define VREG6_43012_MEMLPLDO_ADJ_SHIFT 12
+
+#define VREG6_43012_LPLDO_ADJ_MASK 0x000000F0
+#define VREG6_43012_LPLDO_ADJ_SHIFT 4
+
+#define VREG7_43012_PWRSW_1P8_PU_MASK 0x00400000
+#define VREG7_43012_PWRSW_1P8_PU_SHIFT 22
+
+/* 4378 PMU Chip Control Registers */
+#define PMUCCTL03_4378_XTAL_CORESIZE_PMOS_NORMAL_MASK 0x001F8000
+#define PMUCCTL03_4378_XTAL_CORESIZE_PMOS_NORMAL_SHIFT 15
+#define PMUCCTL03_4378_XTAL_CORESIZE_PMOS_NORMAL_VAL 0x3F
+
+#define PMUCCTL03_4378_XTAL_CORESIZE_NMOS_NORMAL_MASK 0x07E00000
+#define PMUCCTL03_4378_XTAL_CORESIZE_NMOS_NORMAL_SHIFT 21
+#define PMUCCTL03_4378_XTAL_CORESIZE_NMOS_NORMAL_VAL 0x3F
+
+#define PMUCCTL03_4378_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x38000000
+#define PMUCCTL03_4378_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 27
+#define PMUCCTL03_4378_XTAL_SEL_BIAS_RES_NORMAL_VAL 0x0
+
+#define PMUCCTL00_4378_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x00000FC0
+#define PMUCCTL00_4378_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 6
+#define PMUCCTL00_4378_XTAL_CORESIZE_BIAS_ADJ_NORMAL_VAL 0x5
+
+#define PMUCCTL00_4378_XTAL_RES_BYPASS_NORMAL_MASK 0x00038000
+#define PMUCCTL00_4378_XTAL_RES_BYPASS_NORMAL_SHIFT 15
+#define PMUCCTL00_4378_XTAL_RES_BYPASS_NORMAL_VAL 0x7
+
+/* 4387 PMU Chip Control Registers */
+#define PMUCCTL03_4387_XTAL_CORESIZE_PMOS_NORMAL_MASK 0x001F8000
+#define PMUCCTL03_4387_XTAL_CORESIZE_PMOS_NORMAL_SHIFT 15
+#define PMUCCTL03_4387_XTAL_CORESIZE_PMOS_NORMAL_VAL 0x3F
+
+#define PMUCCTL03_4387_XTAL_CORESIZE_NMOS_NORMAL_MASK 0x07E00000
+#define PMUCCTL03_4387_XTAL_CORESIZE_NMOS_NORMAL_SHIFT 21
+#define PMUCCTL03_4387_XTAL_CORESIZE_NMOS_NORMAL_VAL 0x3F
+
+#define PMUCCTL03_4387_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x38000000
+#define PMUCCTL03_4387_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 27
+#define PMUCCTL03_4387_XTAL_SEL_BIAS_RES_NORMAL_VAL 0x0
+
+#define PMUCCTL00_4387_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x00000FC0
+#define PMUCCTL00_4387_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 6
+#define PMUCCTL00_4387_XTAL_CORESIZE_BIAS_ADJ_NORMAL_VAL 0x5
+
+#define PMUCCTL00_4387_XTAL_RES_BYPASS_NORMAL_MASK 0x00038000
+#define PMUCCTL00_4387_XTAL_RES_BYPASS_NORMAL_SHIFT 15
+#define PMUCCTL00_4387_XTAL_RES_BYPASS_NORMAL_VAL 0x7
+
+/* GPIO pins */
+#define CC_PIN_GPIO_00 (0u)
+#define CC_PIN_GPIO_01 (1u)
+#define CC_PIN_GPIO_02 (2u)
+#define CC_PIN_GPIO_03 (3u)
+#define CC_PIN_GPIO_04 (4u)
+#define CC_PIN_GPIO_05 (5u)
+#define CC_PIN_GPIO_06 (6u)
+#define CC_PIN_GPIO_07 (7u)
+#define CC_PIN_GPIO_08 (8u)
+#define CC_PIN_GPIO_09 (9u)
+#define CC_PIN_GPIO_10 (10u)
+#define CC_PIN_GPIO_11 (11u)
+#define CC_PIN_GPIO_12 (12u)
+#define CC_PIN_GPIO_13 (13u)
+#define CC_PIN_GPIO_14 (14u)
+#define CC_PIN_GPIO_15 (15u)
+#define CC_PIN_GPIO_16 (16u)
+#define CC_PIN_GPIO_17 (17u)
+#define CC_PIN_GPIO_18 (18u)
+#define CC_PIN_GPIO_19 (19u)
+#define CC_PIN_GPIO_20 (20u)
+#define CC_PIN_GPIO_21 (21u)
+#define CC_PIN_GPIO_22 (22u)
+#define CC_PIN_GPIO_23 (23u)
+#define CC_PIN_GPIO_24 (24u)
+#define CC_PIN_GPIO_25 (25u)
+#define CC_PIN_GPIO_26 (26u)
+#define CC_PIN_GPIO_27 (27u)
+#define CC_PIN_GPIO_28 (28u)
+#define CC_PIN_GPIO_29 (29u)
+#define CC_PIN_GPIO_30 (30u)
+#define CC_PIN_GPIO_31 (31u)
+
+/* Last GPIO Pad */
+#define CC_PIN_GPIO_LAST CC_PIN_GPIO_31
+
+/* GCI chipcontrol register indices */
+#define CC_GCI_CHIPCTRL_00 (0)
+#define CC_GCI_CHIPCTRL_01 (1)
+#define CC_GCI_CHIPCTRL_02 (2)
+#define CC_GCI_CHIPCTRL_03 (3)
+#define CC_GCI_CHIPCTRL_04 (4)
+#define CC_GCI_CHIPCTRL_05 (5)
+#define CC_GCI_CHIPCTRL_06 (6)
+#define CC_GCI_CHIPCTRL_07 (7)
+#define CC_GCI_CHIPCTRL_08 (8)
+#define CC_GCI_CHIPCTRL_09 (9)
+#define CC_GCI_CHIPCTRL_10 (10)
+#define CC_GCI_CHIPCTRL_10 (10)
+#define CC_GCI_CHIPCTRL_11 (11)
+#define CC_GCI_CHIPCTRL_12 (12)
+#define CC_GCI_CHIPCTRL_13 (13)
+#define CC_GCI_CHIPCTRL_14 (14)
+#define CC_GCI_CHIPCTRL_15 (15)
+#define CC_GCI_CHIPCTRL_16 (16)
+#define CC_GCI_CHIPCTRL_17 (17)
+#define CC_GCI_CHIPCTRL_18 (18)
+#define CC_GCI_CHIPCTRL_19 (19)
+#define CC_GCI_CHIPCTRL_20 (20)
+#define CC_GCI_CHIPCTRL_21 (21)
+#define CC_GCI_CHIPCTRL_22 (22)
+#define CC_GCI_CHIPCTRL_23 (23)
+#define CC_GCI_CHIPCTRL_24 (24)
+#define CC_GCI_CHIPCTRL_25 (25)
+#define CC_GCI_CHIPCTRL_26 (26)
+#define CC_GCI_CHIPCTRL_27 (27)
+#define CC_GCI_CHIPCTRL_28 (28)
+
+/* GCI chip ctrl SDTC Soft reset */
+#define GCI_CHIP_CTRL_SDTC_SOFT_RESET (1 << 31)
+
+#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
+
+#define CC_GCI_04_SDIO_DRVSTR_SHIFT 15
+#define CC_GCI_04_SDIO_DRVSTR_MASK (0x0f << CC_GCI_04_SDIO_DRVSTR_SHIFT) /* 0x00078000 */
+#define CC_GCI_04_SDIO_DRVSTR_OVERRIDE_BIT (1 << 18)
+#define CC_GCI_04_SDIO_DRVSTR_DEFAULT_MA 14
+#define CC_GCI_04_SDIO_DRVSTR_MIN_MA 2
+#define CC_GCI_04_SDIO_DRVSTR_MAX_MA 16
+
+#define CC_GCI_04_4387C0_XTAL_PM_CLK (1u << 20u)
+
+#define CC_GCI_05_4387C0_AFE_RET_ENB_MASK (1u << 7u)
+
+#define CC_GCI_CHIPCTRL_07_BTDEFLO_ANT0_NBIT 2u
+#define CC_GCI_CHIPCTRL_07_BTDEFLO_ANT0_MASK 0xFu
+#define CC_GCI_CHIPCTRL_07_BTDEFHI_ANT0_NBIT 11u
+#define CC_GCI_CHIPCTRL_07_BTDEFHI_ANT0_MASK 1u
+
+#define CC_GCI_CHIPCTRL_18_BTDEF_ANT0_NBIT 10u
+#define CC_GCI_CHIPCTRL_18_BTDEF_ANT0_MASK 0x1Fu
+#define CC_GCI_CHIPCTRL_18_BTDEFLO_ANT1_NBIT 15u
+#define CC_GCI_CHIPCTRL_18_BTDEFLO_ANT1_MASK 1u
+#define CC_GCI_CHIPCTRL_18_BTDEFHI_ANT1_NBIT 26u
+#define CC_GCI_CHIPCTRL_18_BTDEFHI_ANT1_MASK 0x3Fu
+
+#define CC_GCI_CHIPCTRL_19_BTDEF_ANT1_NBIT 10u
+#define CC_GCI_CHIPCTRL_19_BTDEF_ANT1_MASK 0x7u
+
+#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_FORCE_NBIT 16u
+#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_VAL_NBIT 17u
+#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_FORCE_NBIT 18u
+#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_VAL_NBIT 19u
+#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_NBIT 20u
+#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_NBIT 21u
+#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_FORCE_NBIT 22u
+#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_VAL_NBIT 23u
+#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_FORCE_NBIT 24u
+#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_VAL_NBIT 25u
+#define CC_GCI_CHIPCTRL_23_LVM_MODE_DISABLE_NBIT 26u
+
+#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_FORCE_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_FORCE_NBIT)
+#define CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_VAL_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_MAIN_WLSC_PRISEL_VAL_NBIT)
+#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_FORCE_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_FORCE_NBIT)
+#define CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_VAL_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_AUX_WLSC_PRISEL_VAL_NBIT)
+#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_NBIT)
+#define CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_NBIT)
+#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_FORCE_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_FORCE_NBIT)
+#define CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_VAL_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_WLSC_BTMAIN_PRISEL_VAL_NBIT)
+#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_FORCE_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_FORCE_NBIT)
+#define CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_VAL_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_BTMAIN_BTSC_PRISEL_VAL_NBIT)
+#define CC_GCI_CHIPCTRL_23_LVM_MODE_DISABLE_MASK (1u <<\
+ CC_GCI_CHIPCTRL_23_LVM_MODE_DISABLE_NBIT)
+
+/* 2G core0/core1 Pulse width register (offset : 0x47C)
+* wl_rx_long_pulse_width_2g_core0 [4:0];
+* wl_rx_short_pulse_width_2g_core0 [9:5];
+* wl_rx_long_pulse_width_2g_core1 [20:16];
+* wl_rx_short_pulse_width_2g_core1 [25:21];
+*/
+#define CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE1_NBIT (16u)
+#define CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE0_MASK (0x1Fu)
+#define CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE1_MASK (0x1Fu <<\
+ CC_GCI_CNCB_RESET_PULSE_WIDTH_2G_CORE1_NBIT)
+
+#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE0_NBIT (5u)
+#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE1_NBIT (16u)
+#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE1_NBIT (21u)
+
+#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE0_MASK (0x1Fu)
+#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE1_MASK (0x1Fu <<\
+ CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_2G_CORE1_NBIT)
+#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE0_MASK (0x1Fu <<\
+ CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE0_NBIT)
+#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE1_MASK (0x1Fu <<\
+ CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_2G_CORE1_NBIT)
+
+/* 5G core0/Core1 (offset : 0x480)
+* wl_rx_long_pulse_width_5g[4:0];
+* wl_rx_short_pulse_width_5g[9:5]
+*/
+
+#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_5G_NBIT (5u)
+
+#define CC_GCI_CNCB_LONG_RESET_PULSE_WIDTH_5G_MASK (0x1Fu)
+#define CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_5G_MASK (0x1Fu <<\
+ CC_GCI_CNCB_SHORT_RESET_PULSE_WIDTH_5G_NBIT)
+
+#define CC_GCI_CNCB_GLITCH_FILTER_WIDTH_MASK (0xFFu)
+
+#define CC_GCI_RESET_OVERRIDE_NBIT 0x1u
+#define CC_GCI_RESET_OVERRIDE_MASK (0x1u << \
+ CC_GCI_RESET_OVERRIDE_NBIT)
+
+#define CC_GCI_06_JTAG_SEL_SHIFT 4u
+#define CC_GCI_06_JTAG_SEL_MASK (1u << 4u)
+
+#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00u) >> 8u)
+
+#define CC_GCI_03_LPFLAGS_SFLASH_MASK (0xFFFFFFu << 8u)
+#define CC_GCI_03_LPFLAGS_SFLASH_VAL (0xCCCCCCu << 8u)
+
+#define CC_GCI_13_INSUFF_TREFUP_FIX_SHIFT 31u
+/* Note: For 4368 B0 onwards, the shift offset remains the same,
+* but the Chip Common Ctrl GCI register is 16
+*/
+#define CC_GCI_16_INSUFF_TREFUP_FIX_SHIFT 31u
+
+#define GPIO_CTRL_REG_DISABLE_INTERRUPT (3u << 9u)
+#define GPIO_CTRL_REG_COUNT 40
+
+#ifdef WL_INITVALS
+#define XTAL_HQ_SETTING_4387 (wliv_pmu_xtal_HQ)
+#define XTAL_LQ_SETTING_4387 (wliv_pmu_xtal_LQ)
+#else
+#define XTAL_HQ_SETTING_4387 (0xFFF94D30u)
+#define XTAL_LQ_SETTING_4387 (0xFFF94380u)
+#endif
+
+#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_MASK (0x00000200u)
+#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_1_SHIFT (9u)
+#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_MASK (0xFFFFFC00u)
+#define CC_GCI_16_BBPLL_CH_CTRL_GRP_PD_TRIG_24_3_SHIFT (10u)
+
+#define CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_MASK (0x0000FC00u)
+#define CC_GCI_17_BBPLL_CH_CTRL_GRP_PD_TRIG_30_25_SHIFT (10u)
+#define CC_GCI_17_BBPLL_CH_CTRL_EN_MASK (0x04000000u)
+
+#define CC_GCI_20_BBPLL_CH_CTRL_GRP_MASK (0xFC000000u)
+#define CC_GCI_20_BBPLL_CH_CTRL_GRP_SHIFT (26u)
+
+/* GCI Chip Ctrl Regs */
+#define GCI_CC28_IHRP_SEL_MASK (7 << 24)
+#define GCI_CC28_IHRP_SEL_SHIFT (24u)
+
+/* 30=MACPHY_CLK_MAIN, 29=MACPHY_CLK_AUX, 23=RADIO_PU_MAIN, 22=CORE_RDY_MAIN
+ * 20=RADIO_PU_AUX, 18=CORE_RDY_AUX, 14=PWRSW_MAIN, 11=PWRSW_AUX
+ */
+#define GRP_PD_TRIGGER_MASK_4387 (0x60d44800u)
+
+/* power down ch0=MAIN/AUX PHY_clk, ch2=MAIN/AUX MAC_clk, ch5=RFFE_clk */
+#define GRP_PD_MASK_4387 (0x25u)
+
+#define CC_GCI_CHIPCTRL_11_2x2_ANT_MASK 0x03
+#define CC_GCI_CHIPCTRL_11_SHIFT_ANT_MASK 26
+
+/* GCI chipstatus register indices */
+#define GCI_CHIPSTATUS_00 (0)
+#define GCI_CHIPSTATUS_01 (1)
+#define GCI_CHIPSTATUS_02 (2)
+#define GCI_CHIPSTATUS_03 (3)
+#define GCI_CHIPSTATUS_04 (4)
+#define GCI_CHIPSTATUS_05 (5)
+#define GCI_CHIPSTATUS_06 (6)
+#define GCI_CHIPSTATUS_07 (7)
+#define GCI_CHIPSTATUS_08 (8)
+#define GCI_CHIPSTATUS_09 (9)
+#define GCI_CHIPSTATUS_10 (10)
+#define GCI_CHIPSTATUS_11 (11)
+#define GCI_CHIPSTATUS_12 (12)
+#define GCI_CHIPSTATUS_13 (13)
+#define GCI_CHIPSTATUS_15 (15)
+
+/* 43021 GCI chipstatus registers */
+#define GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK (1 << 3)
+
+/* GCI Core Control Reg */
+#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */
+#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */
+#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */
+#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */
+#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */
+#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */
+#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */
+#define GCI_CORECTRL_FORCEREGCLK_MASK (1 << 18) /* ForceRegClk */
+
+/* 4378 & 4387 GCI AVS function */
+#define GCI6_AVS_ENAB 1u
+#define GCI6_AVS_ENAB_SHIFT 31u
+#define GCI6_AVS_ENAB_MASK (1u << GCI6_AVS_ENAB_SHIFT)
+#define GCI6_AVS_CBUCK_VOLT_SHIFT 25u
+#define GCI6_AVS_CBUCK_VOLT_MASK (0x1Fu << GCI6_AVS_CBUCK_VOLT_SHIFT)
+
+/* GCI GPIO for function sel GCI-0/GCI-1 */
+#define CC_GCI_GPIO_0 (0)
+#define CC_GCI_GPIO_1 (1)
+#define CC_GCI_GPIO_2 (2)
+#define CC_GCI_GPIO_3 (3)
+#define CC_GCI_GPIO_4 (4)
+#define CC_GCI_GPIO_5 (5)
+#define CC_GCI_GPIO_6 (6)
+#define CC_GCI_GPIO_7 (7)
+#define CC_GCI_GPIO_8 (8)
+#define CC_GCI_GPIO_9 (9)
+#define CC_GCI_GPIO_10 (10)
+#define CC_GCI_GPIO_11 (11)
+#define CC_GCI_GPIO_12 (12)
+#define CC_GCI_GPIO_13 (13)
+#define CC_GCI_GPIO_14 (14)
+#define CC_GCI_GPIO_15 (15)
+
+/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
+#define CC_GCI_GPIO_INVALID 0xFF
+
+/* 4378 LHL GPIO configuration */
+#define LHL_IOCFG_P_ADDR_LHL_GPIO_DOUT_SEL_SHIFT (3u)
+#define LHL_IOCFG_P_ADDR_LHL_GPIO_DOUT_SEL_MASK (1u << LHL_IOCFG_P_ADDR_LHL_GPIO_DOUT_SEL_SHIFT)
+
+/* 4378 LHL SPMI bit definitions */
+#define LHL_LP_CTL5_SPMI_DATA_SEL_SHIFT (8u)
+#define LHL_LP_CTL5_SPMI_DATA_SEL_MASK (0x3u << LHL_LP_CTL5_SPMI_CLK_DATA_SHIFT)
+#define LHL_LP_CTL5_SPMI_CLK_SEL_SHIFT (6u)
+#define LHL_LP_CTL5_SPMI_CLK_SEL_MASK (0x3u << LHL_LP_CTL5_SPMI_CLK_SEL_SHIFT)
+#define LHL_LP_CTL5_SPMI_CLK_DATA_GPIO0 (0u)
+#define LHL_LP_CTL5_SPMI_CLK_DATA_GPIO1 (1u)
+#define LHL_LP_CTL5_SPMI_CLK_DATA_GPIO2 (2u)
+
+/* Plese do not these following defines */
+/* find the 4 bit mask given the bit position */
+#define GCIMASK(pos) (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL(val, pos) ((val >> pos) & 0xF)
+
+/* find the 8 bit mask given the bit position */
+#define GCIMASK_8B(pos) (((uint32)0xFF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_8B(val, pos) ((((uint32)val) << pos) & GCIMASK_8B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_8B(val, pos) ((val >> pos) & 0xFF)
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK_4B(pos) (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_4B(val, pos) ((((uint32)val) << pos) & GCIMASK_4B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF)
+
+/* GCI Intstatus(Mask)/WakeMask Register bits. */
+#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */
+#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */
+#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */
+#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */
+#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */
+#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */
+#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
+#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
+#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTSTATUS_EVENT (1 << 21) /* GCI Event Interrupt */
+#define GCI_INTSTATUS_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */
+#define GCI_INTSTATUS_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */
+#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */
+#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */
+#define GCI_INTSTATUS_LHLWLWAKE (1 << 30) /* LHL WL wake */
+
+/* GCI IntMask Register bits. */
+#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */
+#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */
+#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */
+#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */
+#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */
+#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */
+#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
+#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
+#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTMASK_EVENT (1 << 21) /* GCI Event Interrupt */
+#define GCI_INTMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */
+#define GCI_INTMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */
+#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */
+#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */
+#define GCI_INTMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */
+
+/* GCI WakeMask Register bits. */
+#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */
+#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */
+#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */
+#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */
+#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */
+#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */
+#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
+#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
+#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_WAKEMASK_EVENT (1 << 21) /* GCI Event Interrupt */
+#define GCI_WAKEMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */
+#define GCI_WAKEMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */
+#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */
+#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */
+#define GCI_WAKEMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */
+
+#define GCI_WAKE_ON_GCI_GPIO1 1
+#define GCI_WAKE_ON_GCI_GPIO2 2
+#define GCI_WAKE_ON_GCI_GPIO3 3
+#define GCI_WAKE_ON_GCI_GPIO4 4
+#define GCI_WAKE_ON_GCI_GPIO5 5
+#define GCI_WAKE_ON_GCI_GPIO6 6
+#define GCI_WAKE_ON_GCI_GPIO7 7
+#define GCI_WAKE_ON_GCI_GPIO8 8
+#define GCI_WAKE_ON_GCI_SECI_IN 9
+
+#define PMU_EXT_WAKE_MASK_0_SDIO (1u << 2u)
+#define PMU_EXT_WAKE_MASK_0_PCIE_PERST (1u << 5u)
+
+#define PMU_4362_EXT_WAKE_MASK_0_SDIO (1u << 1u | 1u << 2u)
+
+/* =========== LHL regs =========== */
+#define LHL_PWRSEQCTL_SLEEP_EN (1 << 0)
+#define LHL_PWRSEQCTL_PMU_SLEEP_MODE (1 << 1)
+#define LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN (1 << 2)
+#define LHL_PWRSEQCTL_PMU_TOP_ISO_EN (1 << 3)
+#define LHL_PWRSEQCTL_PMU_TOP_SLB_EN (1 << 4)
+#define LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN (1 << 5)
+#define LHL_PWRSEQCTL_PMU_CLDO_PD (1 << 6)
+#define LHL_PWRSEQCTL_PMU_LPLDO_PD (1 << 7)
+#define LHL_PWRSEQCTL_PMU_RSRC6_EN (1 << 8)
+
+#define PMU_SLEEP_MODE_0 (LHL_PWRSEQCTL_SLEEP_EN |\
+ LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN)
+
+#define PMU_SLEEP_MODE_1 (LHL_PWRSEQCTL_SLEEP_EN |\
+ LHL_PWRSEQCTL_PMU_SLEEP_MODE |\
+ LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\
+ LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\
+ LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\
+ LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\
+ LHL_PWRSEQCTL_PMU_CLDO_PD |\
+ LHL_PWRSEQCTL_PMU_RSRC6_EN)
+
+#define PMU_SLEEP_MODE_2 (LHL_PWRSEQCTL_SLEEP_EN |\
+ LHL_PWRSEQCTL_PMU_SLEEP_MODE |\
+ LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\
+ LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\
+ LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\
+ LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\
+ LHL_PWRSEQCTL_PMU_CLDO_PD |\
+ LHL_PWRSEQCTL_PMU_LPLDO_PD |\
+ LHL_PWRSEQCTL_PMU_RSRC6_EN)
+
+#define LHL_PWRSEQ_CTL (0x000000ff)
+
+/* LHL Top Level Power Up Control Register (lhl_top_pwrup_ctl_adr, Offset 0xE78)
+* Top Level Counter values for isolation, retention, Power Switch control
+*/
+#define LHL_PWRUP_ISOLATION_CNT (0x6 << 8)
+#define LHL_PWRUP_RETENTION_CNT (0x5 << 16)
+#define LHL_PWRUP_PWRSW_CNT (0x7 << 24)
+/* Mask is taken only for isolation 8:13 , Retention 16:21 ,
+* Power Switch control 24:29
+*/
+#define LHL_PWRUP_CTL_MASK (0x3F3F3F00)
+#define LHL_PWRUP_CTL (LHL_PWRUP_ISOLATION_CNT |\
+ LHL_PWRUP_RETENTION_CNT |\
+ LHL_PWRUP_PWRSW_CNT)
+
+#define LHL_PWRUP2_CLDO_DN_CNT (0x0)
+#define LHL_PWRUP2_LPLDO_DN_CNT (0x0 << 8)
+#define LHL_PWRUP2_RSRC6_DN_CN (0x4 << 16)
+#define LHL_PWRUP2_RSRC7_DN_CN (0x0 << 24)
+#define LHL_PWRUP2_CTL_MASK (0x3F3F3F3F)
+#define LHL_PWRUP2_CTL (LHL_PWRUP2_CLDO_DN_CNT |\
+ LHL_PWRUP2_LPLDO_DN_CNT |\
+ LHL_PWRUP2_RSRC6_DN_CN |\
+ LHL_PWRUP2_RSRC7_DN_CN)
+
+/* LHL Top Level Power Down Control Register (lhl_top_pwrdn_ctl_adr, Offset 0xE74) */
+#define LHL_PWRDN_SLEEP_CNT (0x4)
+#define LHL_PWRDN_CTL_MASK (0x3F)
+
+/* LHL Top Level Power Down Control 2 Register (lhl_top_pwrdn2_ctl_adr, Offset 0xE80) */
+#define LHL_PWRDN2_CLDO_DN_CNT (0x4)
+#define LHL_PWRDN2_LPLDO_DN_CNT (0x4 << 8)
+#define LHL_PWRDN2_RSRC6_DN_CN (0x3 << 16)
+#define LHL_PWRDN2_RSRC7_DN_CN (0x0 << 24)
+#define LHL_PWRDN2_CTL (LHL_PWRDN2_CLDO_DN_CNT |\
+ LHL_PWRDN2_LPLDO_DN_CNT |\
+ LHL_PWRDN2_RSRC6_DN_CN |\
+ LHL_PWRDN2_RSRC7_DN_CN)
+#define LHL_PWRDN2_CTL_MASK (0x3F3F3F3F)
+
+#define LHL_FAST_WRITE_EN (1 << 14)
+
+#define LHL_WL_MACTIMER_MASK 0xFFFFFFFF
+/* Write 1 to clear */
+#define LHL_WL_MACTIMER_INT_ST_MASK (0x1u)
+
+/* WL ARM Timer0 Interrupt Mask (lhl_wl_armtim0_intrp_adr) */
+#define LHL_WL_ARMTIM0_INTRP_EN 0x00000001
+#define LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER 0x00000002
+
+/* WL ARM Timer0 Interrupt Status (lhl_wl_armtim0_st_adr) */
+#define LHL_WL_ARMTIM0_ST_WL_ARMTIM_INT_ST 0x00000001
+
+/* WL MAC TimerX Interrupt Mask (lhl_wl_mactimX_intrp_adr) */
+#define LHL_WL_MACTIM_INTRP_EN 0x00000001
+#define LHL_WL_MACTIM_INTRP_EDGE_TRIGGER 0x00000002
+
+/* WL MAC TimerX Interrupt Status (lhl_wl_mactimX_st_adr) */
+#define LHL_WL_MACTIM_ST_WL_MACTIM_INT_ST 0x00000001
+
+/* LHL Wakeup Status (lhl_wkup_status_adr) */
+#define LHL_WKUP_STATUS_WR_PENDING_ARMTIM0 0x00100000
+
+#define LHL_PS_MODE_0 0
+#define LHL_PS_MODE_1 1
+
+/* GCI EventIntMask Register SW bits */
+#define GCI_MAILBOXDATA_TOWLAN (1 << 0)
+#define GCI_MAILBOXDATA_TOBT (1 << 1)
+#define GCI_MAILBOXDATA_TONFC (1 << 2)
+#define GCI_MAILBOXDATA_TOGPS (1 << 3)
+#define GCI_MAILBOXDATA_TOLTE (1 << 4)
+#define GCI_MAILBOXACK_TOWLAN (1 << 8)
+#define GCI_MAILBOXACK_TOBT (1 << 9)
+#define GCI_MAILBOXACK_TONFC (1 << 10)
+#define GCI_MAILBOXACK_TOGPS (1 << 11)
+#define GCI_MAILBOXACK_TOLTE (1 << 12)
+#define GCI_WAKE_TOWLAN (1 << 16)
+#define GCI_WAKE_TOBT (1 << 17)
+#define GCI_WAKE_TONFC (1 << 18)
+#define GCI_WAKE_TOGPS (1 << 19)
+#define GCI_WAKE_TOLTE (1 << 20)
+#define GCI_SWREADY (1 << 24)
+
+/* GCI SECI_OUT TX Status Regiser bits */
+#define GCI_SECIOUT_TXSTATUS_TXHALT (1 << 0)
+#define GCI_SECIOUT_TXSTATUS_TI (1 << 16)
+
+/* 43012 MUX options */
+#define MUXENAB43012_HOSTWAKE_MASK (0x00000001)
+#define MUXENAB43012_GETIX(val, name) (val - 1)
+
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
+#if defined(BCMQT) && defined(BCMDONGLEHOST)
+#define PMU_MAX_TRANSITION_DLY 1500000
+#else
+#define PMU_MAX_TRANSITION_DLY 15000
+#endif /* BCMDONGLEHOST */
+
+/* PMU resource up transition time in ILP cycles */
+#define PMURES_UP_TRANSITION 2
+
+#if !defined(BCMDONGLEHOST)
+/*
+* Information from BT to WLAN over eci_inputlo, eci_inputmi &
+* eci_inputhi register. Rev >=21
+*/
+/* Fields in eci_inputlo register - [0:31] */
+#define ECI_INLO_TASKTYPE_MASK 0x0000000f /* [3:0] - 4 bits */
+#define ECI_INLO_TASKTYPE_SHIFT 0
+#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT 4
+#define ECI_INLO_ROLE_MASK 0x00000100 /* [8] - 1 bits */
+#define ECI_INLO_ROLE_SHIFT 8
+#define ECI_INLO_MLP_MASK 0x00000e00 /* [11:9] - 3 bits */
+#define ECI_INLO_MLP_SHIFT 9
+#define ECI_INLO_TXPWR_MASK 0x000ff000 /* [19:12] - 8 bits */
+#define ECI_INLO_TXPWR_SHIFT 12
+#define ECI_INLO_RSSI_MASK 0x0ff00000 /* [27:20] - 8 bits */
+#define ECI_INLO_RSSI_SHIFT 20
+#define ECI_INLO_VAD_MASK 0x10000000 /* [28] - 1 bits */
+#define ECI_INLO_VAD_SHIFT 28
+
+/*
+* Register eci_inputlo bitfield values.
+* - BT packet type information bits [7:0]
+*/
+/* [3:0] - Task (link) type */
+#define BT_ACL 0x00
+#define BT_SCO 0x01
+#define BT_eSCO 0x02
+#define BT_A2DP 0x03
+#define BT_SNIFF 0x04
+#define BT_PAGE_SCAN 0x05
+#define BT_INQUIRY_SCAN 0x06
+#define BT_PAGE 0x07
+#define BT_INQUIRY 0x08
+#define BT_MSS 0x09
+#define BT_PARK 0x0a
+#define BT_RSSISCAN 0x0b
+#define BT_MD_ACL 0x0c
+#define BT_MD_eSCO 0x0d
+#define BT_SCAN_WITH_SCO_LINK 0x0e
+#define BT_SCAN_WITHOUT_SCO_LINK 0x0f
+/* [7:4] = packet duration code */
+/* [8] - Master / Slave */
+#define BT_MASTER 0
+#define BT_SLAVE 1
+/* [11:9] - multi-level priority */
+#define BT_LOWEST_PRIO 0x0
+#define BT_HIGHEST_PRIO 0x3
+/* [19:12] - BT transmit power */
+/* [27:20] - BT RSSI */
+/* [28] - VAD silence */
+/* [31:29] - Undefined */
+/* Register eci_inputmi values - [32:63] - none defined */
+/* [63:32] - Undefined */
+
+/* Information from WLAN to BT over eci_output register. */
+/* Fields in eci_output register - [0:31] */
+#define ECI48_OUT_MASKMAGIC_HIWORD 0x55550000
+#define ECI_OUT_CHANNEL_MASK(ccrev) ((ccrev) < 35 ? 0xf : (ECI48_OUT_MASKMAGIC_HIWORD | 0xf000))
+#define ECI_OUT_CHANNEL_SHIFT(ccrev) ((ccrev) < 35 ? 0 : 12)
+#define ECI_OUT_BW_MASK(ccrev) ((ccrev) < 35 ? 0x70 : (ECI48_OUT_MASKMAGIC_HIWORD | 0xe00))
+#define ECI_OUT_BW_SHIFT(ccrev) ((ccrev) < 35 ? 4 : 9)
+#define ECI_OUT_ANTENNA_MASK(ccrev) ((ccrev) < 35 ? 0x80 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x100))
+#define ECI_OUT_ANTENNA_SHIFT(ccrev) ((ccrev) < 35 ? 7 : 8)
+#define ECI_OUT_SIMUL_TXRX_MASK(ccrev) \
+ ((ccrev) < 35 ? 0x10000 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x80))
+#define ECI_OUT_SIMUL_TXRX_SHIFT(ccrev) ((ccrev) < 35 ? 16 : 7)
+#define ECI_OUT_FM_DISABLE_MASK(ccrev) \
+ ((ccrev) < 35 ? 0x40000 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x40))
+#define ECI_OUT_FM_DISABLE_SHIFT(ccrev) ((ccrev) < 35 ? 18 : 6)
+
+/* Indicate control of ECI bits between s/w and dot11mac.
+ * 0 => FW control, 1=> MAC/ucode control
+
+ * Current assignment (ccrev >= 35):
+ * 0 - TxConf (ucode)
+ * 38 - FM disable (wl)
+ * 39 - Allow sim rx (ucode)
+ * 40 - Num antennas (wl)
+ * 43:41 - WLAN channel exclusion BW (wl)
+ * 47:44 - WLAN channel (wl)
+ *
+ * (ccrev < 35)
+ * 15:0 - wl
+ * 16 -
+ * 18 - FM disable
+ * 30 - wl interrupt
+ * 31 - ucode interrupt
+ * others - unassigned (presumed to be with dot11mac/ucode)
+ */
+#define ECI_MACCTRL_BITS 0xbffb0000
+#define ECI_MACCTRLLO_BITS 0x1
+#define ECI_MACCTRLHI_BITS 0xFF
+
+#endif /* !defined(BCMDONGLEHOST) */
+
+/* SECI Status (0x134) & Mask (0x138) bits - Rev 35 */
+#define SECI_STAT_BI (1 << 0) /* Break Interrupt */
+#define SECI_STAT_SPE (1 << 1) /* Parity Error */
+#define SECI_STAT_SFE (1 << 2) /* Parity Error */
+#define SECI_STAT_SDU (1 << 3) /* Data Updated */
+#define SECI_STAT_SADU (1 << 4) /* Auxiliary Data Updated */
+#define SECI_STAT_SAS (1 << 6) /* AUX State */
+#define SECI_STAT_SAS2 (1 << 7) /* AUX2 State */
+#define SECI_STAT_SRITI (1 << 8) /* Idle Timer Interrupt */
+#define SECI_STAT_STFF (1 << 9) /* Tx FIFO Full */
+#define SECI_STAT_STFAE (1 << 10) /* Tx FIFO Almost Empty */
+#define SECI_STAT_SRFE (1 << 11) /* Rx FIFO Empty */
+#define SECI_STAT_SRFAF (1 << 12) /* Rx FIFO Almost Full */
+#define SECI_STAT_SFCE (1 << 13) /* Flow Control Event */
+
+/* SECI configuration */
+#define SECI_MODE_UART 0x0
+#define SECI_MODE_SECI 0x1
+#define SECI_MODE_LEGACY_3WIRE_BT 0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3
+#define SECI_MODE_HALF_SECI 0x4
+
+#define SECI_RESET (1 << 0)
+#define SECI_RESET_BAR_UART (1 << 1)
+#define SECI_ENAB_SECI_ECI (1 << 2)
+#define SECI_ENAB_SECIOUT_DIS (1 << 3)
+#define SECI_MODE_MASK 0x7
+#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */
+#define SECI_UPD_SECI (1 << 7)
+
+#define SECI_AUX_TX_START (1 << 31)
+#define SECI_SLIP_ESC_CHAR 0xDB
+#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR
+#define SECI_SIGNOFF_1 0
+#define SECI_REFRESH_REQ 0xDA
+
+/* seci clk_ctl_st bits */
+#define CLKCTL_STS_HT_AVAIL_REQ (1 << 4)
+#define CLKCTL_STS_SECI_CLK_REQ (1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE (1 << 0)
+#define SECI_UART_MSR_RTS_STATE (1 << 1)
+#define SECI_UART_SECI_IN_STATE (1 << 2)
+#define SECI_UART_SECI_IN2_STATE (1 << 3)
+
+/* GCI RX FIFO Control Register */
+#define GCI_RXF_LVL_MASK (0xFF << 0)
+#define GCI_RXF_TIMEOUT_MASK (0xFF << 8)
+
+/* GCI UART Registers' Bit definitions */
+/* Seci Fifo Level Register */
+#define SECI_TXF_LVL_MASK (0x3F << 8)
+#define TXF_AE_LVL_DEFAULT 0x4
+#define SECI_RXF_LVL_FC_MASK (0x3F << 16)
+
+/* SeciUARTFCR Bit definitions */
+#define SECI_UART_FCR_RFR (1 << 0)
+#define SECI_UART_FCR_TFR (1 << 1)
+#define SECI_UART_FCR_SR (1 << 2)
+#define SECI_UART_FCR_THP (1 << 3)
+#define SECI_UART_FCR_AB (1 << 4)
+#define SECI_UART_FCR_ATOE (1 << 5)
+#define SECI_UART_FCR_ARTSOE (1 << 6)
+#define SECI_UART_FCR_ABV (1 << 7)
+#define SECI_UART_FCR_ALM (1 << 8)
+
+/* SECI UART LCR register bits */
+#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */
+#define SECI_UART_LCR_PARITY_EN (1 << 1)
+#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */
+#define SECI_UART_LCR_RX_EN (1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */
+#define SECI_UART_LCR_TXO_EN (1 << 5)
+#define SECI_UART_LCR_RTSO_EN (1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN (1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK (1 << 8)
+#define SECI_UART_LCR_TXCRC_INV (1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF (1 << 10)
+#define SECI_UART_LCR_TXCRC_EN (1 << 11)
+#define SECI_UART_LCR_RXSYNC_EN (1 << 12)
+
+#define SECI_UART_MCR_TX_EN (1 << 0)
+#define SECI_UART_MCR_PRTS (1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN (1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN (1 << 4)
+#define SECI_UART_MCR_AUTO_RTS (1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT (1 << 9)
+
+/* SeciUARTLSR Bit Mask */
+#define SECI_UART_LSR_RXOVR_MASK (1 << 0)
+#define SECI_UART_LSR_RFF_MASK (1 << 1)
+#define SECI_UART_LSR_TFNE_MASK (1 << 2)
+#define SECI_UART_LSR_TI_MASK (1 << 3)
+#define SECI_UART_LSR_TPR_MASK (1 << 4)
+#define SECI_UART_LSR_TXHALT_MASK (1 << 5)
+
+/* SeciUARTMSR Bit Mask */
+#define SECI_UART_MSR_CTSS_MASK (1 << 0)
+#define SECI_UART_MSR_RTSS_MASK (1 << 1)
+#define SECI_UART_MSR_SIS_MASK (1 << 2)
+#define SECI_UART_MSR_SIS2_MASK (1 << 3)
+
+/* SeciUARTData Bits */
+#define SECI_UART_DATA_RF_NOT_EMPTY_BIT (1 << 12)
+#define SECI_UART_DATA_RF_FULL_BIT (1 << 13)
+#define SECI_UART_DATA_RF_OVRFLOW_BIT (1 << 14)
+#define SECI_UART_DATA_FIFO_PTR_MASK 0xFF
+#define SECI_UART_DATA_RF_RD_PTR_SHIFT 16
+#define SECI_UART_DATA_RF_WR_PTR_SHIFT 24
+
+/* LTECX: ltecxmux */
+#define LTECX_EXTRACT_MUX(val, idx) (getbit4(&(val), (idx)))
+
+/* LTECX: ltecxmux MODE */
+#define LTECX_MUX_MODE_IDX 0
+#define LTECX_MUX_MODE_WCI2 0x0
+#define LTECX_MUX_MODE_GPIO 0x1
+
+/* LTECX GPIO Information Index */
+#define LTECX_NVRAM_FSYNC_IDX 0
+#define LTECX_NVRAM_LTERX_IDX 1
+#define LTECX_NVRAM_LTETX_IDX 2
+#define LTECX_NVRAM_WLPRIO_IDX 3
+
+/* LTECX WCI2 Information Index */
+#define LTECX_NVRAM_WCI2IN_IDX 0
+#define LTECX_NVRAM_WCI2OUT_IDX 1
+
+/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */
+#define LTECX_EXTRACT_PADNUM(val, idx) (getbit8(&(val), (idx)))
+#define LTECX_EXTRACT_FNSEL(val, idx) (getbit4(&(val), (idx)))
+#define LTECX_EXTRACT_GCIGPIO(val, idx) (getbit4(&(val), (idx)))
+
+/* WLAN channel numbers - used from wifi.h */
+
+/* WLAN BW */
+#define ECI_BW_20 0x0
+#define ECI_BW_25 0x1
+#define ECI_BW_30 0x2
+#define ECI_BW_35 0x3
+#define ECI_BW_40 0x4
+#define ECI_BW_45 0x5
+#define ECI_BW_50 0x6
+#define ECI_BW_ALL 0x7
+
+/* WLAN - number of antenna */
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+/* otpctrl1 0xF4 */
+#define OTPC_FORCE_PWR_OFF 0x02000000
+/* chipcommon s/r registers introduced with cc rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK 0x1
+#define CC_SR_CTL0_ENABLE_SHIFT 0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to sr_engine */
+#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk in sr_engine */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 /* Allow Subcore mem StandBy? */
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power domains */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
+#define CC_SR_CTL1_SR_INIT_MASK 0x3FF
+#define CC_SR_CTL1_SR_INIT_SHIFT 0
+
+#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT 4
+
+/* gci chip control bits */
+#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT 0
+#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT 1
+#define GCI_GPIO_CHIPCTRL_INVERT_BIT 2
+#define GCI_GPIO_CHIPCTRL_PULLUP_BIT 3
+#define GCI_GPIO_CHIPCTRL_PULLDN_BIT 4
+#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT 5
+#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT 6
+#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT 7
+
+/* gci GPIO input status bits */
+#define GCI_GPIO_STS_VALUE_BIT 0
+#define GCI_GPIO_STS_POS_EDGE_BIT 1
+#define GCI_GPIO_STS_NEG_EDGE_BIT 2
+#define GCI_GPIO_STS_FAST_EDGE_BIT 3
+#define GCI_GPIO_STS_CLEAR 0xF
+
+#define GCI_GPIO_STS_EDGE_TRIG_BIT 0
+#define GCI_GPIO_STS_NEG_EDGE_TRIG_BIT 1
+#define GCI_GPIO_STS_DUAL_EDGE_TRIG_BIT 2
+#define GCI_GPIO_STS_WL_DIN_SELECT 6
+
+#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT)
+
+/* SR Power Control */
+#define SRPWR_DMN0_PCIE (0) /* PCIE */
+#define SRPWR_DMN0_PCIE_SHIFT (SRPWR_DMN0_PCIE) /* PCIE */
+#define SRPWR_DMN0_PCIE_MASK (1 << SRPWR_DMN0_PCIE_SHIFT) /* PCIE */
+#define SRPWR_DMN1_ARMBPSD (1) /* ARM/BP/SDIO */
+#define SRPWR_DMN1_ARMBPSD_SHIFT (SRPWR_DMN1_ARMBPSD) /* ARM/BP/SDIO */
+#define SRPWR_DMN1_ARMBPSD_MASK (1 << SRPWR_DMN1_ARMBPSD_SHIFT) /* ARM/BP/SDIO */
+#define SRPWR_DMN2_MACAUX (2) /* MAC/Phy Aux */
+#define SRPWR_DMN2_MACAUX_SHIFT (SRPWR_DMN2_MACAUX) /* MAC/Phy Aux */
+#define SRPWR_DMN2_MACAUX_MASK (1 << SRPWR_DMN2_MACAUX_SHIFT) /* MAC/Phy Aux */
+#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */
+#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */
+#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */
+
+#define SRPWR_DMN4_MACSCAN (4) /* MAC/Phy Scan */
+#define SRPWR_DMN4_MACSCAN_SHIFT (SRPWR_DMN4_MACSCAN) /* MAC/Phy Scan */
+#define SRPWR_DMN4_MACSCAN_MASK (1 << SRPWR_DMN4_MACSCAN_SHIFT) /* MAC/Phy Scan */
+
+#define SRPWR_DMN_MAX (5)
+/* all power domain mask */
+#define SRPWR_DMN_ALL_MASK(sih) si_srpwr_domain_all_mask(sih)
+
+#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */
+#define SRPWR_REQON_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_REQON_SHIFT)
+
+#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */
+#define SRPWR_STATUS_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_STATUS_SHIFT)
+
+#define SRPWR_BT_STATUS_SHIFT (20) /* PowerDomain[20:21], RO */
+#define SRPWR_BT_STATUS_MASK (0x3)
+
+#define SRPWR_DMN_ID_SHIFT (28) /* PowerDomain[31:28], RO */
+#define SRPWR_DMN_ID_MASK (0xF)
+
+#define SRPWR_UP_DOWN_DELAY 100 /* more than 3 ILP clocks */
+
+/* PMU Precision Usec Timer */
+#define PMU_PREC_USEC_TIMER_ENABLE 0x1
+
+/* Random Number/Bit Generator defines */
+#define MASK_1BIT(offset) (0x1u << offset)
+
+#define CC_RNG_CTRL_0_RBG_EN_SHIFT (0u)
+#define CC_RNG_CTRL_0_RBG_EN_MASK (0x1FFFu << CC_RNG_CTRL_0_RBG_EN_SHIFT)
+#define CC_RNG_CTRL_0_RBG_EN (0x1FFFu)
+#define CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT (12u)
+#define CC_RNG_CTRL_0_RBG_DEV_CTRL_MASK (0x3u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT)
+#define CC_RNG_CTRL_0_RBG_DEV_CTRL_1MHz (0x3u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT)
+#define CC_RNG_CTRL_0_RBG_DEV_CTRL_2MHz (0x2u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT)
+#define CC_RNG_CTRL_0_RBG_DEV_CTRL_4MHz (0x1u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT)
+#define CC_RNG_CTRL_0_RBG_DEV_CTRL_8MHz (0x0u << CC_RNG_CTRL_0_RBG_DEV_CTRL_SHIFT)
+
+/* RNG_FIFO_COUNT */
+/* RFC - RNG FIFO COUNT */
+#define CC_RNG_FIFO_COUNT_RFC_SHIFT (0u)
+#define CC_RNG_FIFO_COUNT_RFC_MASK (0xFFu << CC_RNG_FIFO_COUNT_RFC_SHIFT)
+
+/* RNG interrupt */
+#define CC_RNG_TOT_BITS_CNT_IRQ_SHIFT (0u)
+#define CC_RNG_TOT_BITS_CNT_IRQ_MASK (0x1u << CC_RNG_TOT_BITS_CNT_IRQ_SHIFT)
+#define CC_RNG_TOT_BITS_MAX_IRQ_SHIFT (1u)
+#define CC_RNG_TOT_BITS_MAX_IRQ_MASK (0x1u << CC_RNG_TOT_BITS_MAX_IRQ_SHIFT)
+#define CC_RNG_FIFO_FULL_IRQ_SHIFT (2u)
+#define CC_RNG_FIFO_FULL_IRQ_MASK (0x1u << CC_RNG_FIFO_FULL_IRQ_SHIFT)
+#define CC_RNG_FIFO_OVER_RUN_IRQ_SHIFT (3u)
+#define CC_RNG_FIFO_OVER_RUN_IRQ_MASK (0x1u << CC_RNG_FIFO_OVER_RUN_IRQ_SHIFT)
+#define CC_RNG_FIFO_UNDER_RUN_IRQ_SHIFT (4u)
+#define CC_RNG_FIFO_UNDER_RUN_IRQ_MASK (0x1u << CC_RNG_FIFO_UNDER_RUN_IRQ_SHIFT)
+#define CC_RNG_NIST_FAIL_IRQ_SHIFT (5u)
+#define CC_RNG_NIST_FAIL_IRQ_MASK (0x1u << CC_RNG_NIST_FAIL_IRQ_SHIFT)
+#define CC_RNG_STARTUP_TRANSITION_MET_IRQ_SHIFT (17u)
+#define CC_RNG_STARTUP_TRANSITION_MET_IRQ_MASK (0x1u << \
+ CC_RNG_STARTUP_TRANSITION_MET_IRQ_SHIFT)
+#define CC_RNG_MASTER_FAIL_LOCKOUT_IRQ_SHIFT (31u)
+#define CC_RNG_MASTER_FAIL_LOCKOUT_IRQ_MASK (0x1u << \
+ CC_RNG_MASTER_FAIL_LOCKOUT_IRQ_SHIFT)
+
+/* FISCtrlStatus */
+#define PMU_CLEAR_FIS_DONE_SHIFT 1u
+#define PMU_CLEAR_FIS_DONE_MASK (1u << PMU_CLEAR_FIS_DONE_SHIFT)
+
+#define PMU_FIS_FORCEON_ALL_SHIFT 4u
+#define PMU_FIS_FORCEON_ALL_MASK (1u << PMU_FIS_FORCEON_ALL_SHIFT)
+
+#define PMU_FIS_DN_TIMER_VAL_SHIFT 16u
+#define PMU_FIS_DN_TIMER_VAL_MASK 0x7FFF0000u
+
+#define PMU_FIS_DN_TIMER_VAL_4378 0x2f80u /* micro second */
+#define PMU_FIS_DN_TIMER_VAL_4388 0x3f80u /* micro second */
+#define PMU_FIS_DN_TIMER_VAL_4389 0x3f80u /* micro second */
+
+#define PMU_FIS_PCIE_SAVE_EN_SHIFT 5u
+#define PMU_FIS_PCIE_SAVE_EN_VALUE (1u << PMU_FIS_PCIE_SAVE_EN_SHIFT)
+
+#define PMU_REG6_RFLDO_CTRL 0x000000E0
+#define PMU_REG6_RFLDO_CTRL_SHFT 5
+
+#define PMU_REG6_BTLDO_CTRL 0x0000E000
+#define PMU_REG6_BTLDO_CTRL_SHFT 13
+
+/* ETBMemCtrl */
+#define CC_ETBMEMCTRL_FORCETMCINTFTOETB_SHIFT 1u
+#define CC_ETBMEMCTRL_FORCETMCINTFTOETB_MASK (1u << CC_ETBMEMCTRL_FORCETMCINTFTOETB_SHIFT)
+
+/* SSSR dumps locations on the backplane space */
+#define BCM4387_SSSR_DUMP_AXI_MAIN 0xE8C00000u
+#define BCM4387_SSSR_DUMP_MAIN_SIZE 160000u
+#define BCM4387_SSSR_DUMP_AXI_AUX 0xE8400000u
+#define BCM4387_SSSR_DUMP_AUX_SIZE 160000u
+#define BCM4387_SSSR_DUMP_AXI_SCAN 0xE9400000u
+#define BCM4387_SSSR_DUMP_SCAN_SIZE 32768u
+
+#endif /* _SBCHIPC_H */
diff --git a/bcmdhd.101.10.361.x/include/sbconfig.h b/bcmdhd.101.10.361.x/include/sbconfig.h
new file mode 100755
index 0000000..283eb0e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbconfig.h
@@ -0,0 +1,279 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBCONFIG_H
+#define _SBCONFIG_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+/* enumeration in SB is based on the premise that cores are contiguous in the
+ * enumeration space.
+ */
+#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */
+#define SB_BUS_BASE(sih, b) (SI_ENUM_BASE(sih) + (b) * SB_BUS_SIZE)
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */
+
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */
+#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */
+
+#define SBIPSFLAG 0x08
+#define SBTPSFLAG 0x18
+#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */
+#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */
+#define SBADMATCH3 0x60
+#define SBADMATCH2 0x68
+#define SBADMATCH1 0x70
+#define SBIMSTATE 0x90
+#define SBINTVEC 0x94
+#define SBTMSTATELOW 0x98
+#define SBTMSTATEHIGH 0x9c
+#define SBBWA0 0xa0
+#define SBIMCONFIGLOW 0xa8
+#define SBIMCONFIGHIGH 0xac
+#define SBADMATCH0 0xb0
+#define SBTMCONFIGLOW 0xb8
+#define SBTMCONFIGHIGH 0xbc
+#define SBBCONFIG 0xc0
+#define SBBSTATE 0xc8
+#define SBACTCNFG 0xd8
+#define SBFLAGST 0xe8
+#define SBIDLOW 0xf8
+#define SBIDHIGH 0xfc
+
+/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
+ * a few registers *below* that line. I think it would be very confusing to try
+ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
+ */
+
+#define SBIMERRLOGA 0xea8
+#define SBIMERRLOG 0xeb0
+#define SBTMPORTCONNID0 0xed8
+#define SBTMPORTLOCK0 0xef8
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _sbconfig {
+ uint32 PAD[2];
+ uint32 sbipsflag; /**< initiator port ocp slave flag */
+ uint32 PAD[3];
+ uint32 sbtpsflag; /**< target port ocp slave flag */
+ uint32 PAD[11];
+ uint32 sbtmerrloga; /**< (sonics >= 2.3) */
+ uint32 PAD;
+ uint32 sbtmerrlog; /**< (sonics >= 2.3) */
+ uint32 PAD[3];
+ uint32 sbadmatch3; /**< address match3 */
+ uint32 PAD;
+ uint32 sbadmatch2; /**< address match2 */
+ uint32 PAD;
+ uint32 sbadmatch1; /**< address match1 */
+ uint32 PAD[7];
+ uint32 sbimstate; /**< initiator agent state */
+ uint32 sbintvec; /**< interrupt mask */
+ uint32 sbtmstatelow; /**< target state */
+ uint32 sbtmstatehigh; /**< target state */
+ uint32 sbbwa0; /**< bandwidth allocation table0 */
+ uint32 PAD;
+ uint32 sbimconfiglow; /**< initiator configuration */
+ uint32 sbimconfighigh; /**< initiator configuration */
+ uint32 sbadmatch0; /**< address match0 */
+ uint32 PAD;
+ uint32 sbtmconfiglow; /**< target configuration */
+ uint32 sbtmconfighigh; /**< target configuration */
+ uint32 sbbconfig; /**< broadcast configuration */
+ uint32 PAD;
+ uint32 sbbstate; /**< broadcast state */
+ uint32 PAD[3];
+ uint32 sbactcnfg; /**< activate configuration */
+ uint32 PAD[3];
+ uint32 sbflagst; /**< current sbflags */
+ uint32 PAD[3];
+ uint32 sbidlow; /**< identification */
+ uint32 sbidhigh; /**< identification */
+} sbconfig_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* sbipsflag */
+#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */
+#define SBIPS_INT1_SHIFT 0
+#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */
+#define SBIPS_INT2_SHIFT 8
+#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */
+#define SBIPS_INT3_SHIFT 16
+#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */
+#define SBIPS_INT4_SHIFT 24
+
+/* sbtpsflag */
+#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */
+#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */
+
+/* sbtmerrlog */
+#define SBTMEL_CM 0x00000007 /**< command */
+#define SBTMEL_CI 0x0000ff00 /**< connection id */
+#define SBTMEL_EC 0x0f000000 /**< error code */
+#define SBTMEL_ME 0x80000000 /**< multiple error */
+
+/* sbimstate */
+#define SBIM_PC 0xf /**< pipecount */
+#define SBIM_AP_MASK 0x30 /**< arbitration policy */
+#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */
+#define SBIM_AP_TS 0x10 /**< use timesliaces only */
+#define SBIM_AP_TK 0x20 /**< use token only */
+#define SBIM_AP_RSV 0x30 /**< reserved */
+#define SBIM_IBE 0x20000 /**< inbanderror */
+#define SBIM_TO 0x40000 /**< timeout */
+#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */
+#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define SBTML_RESET 0x0001 /**< reset */
+#define SBTML_REJ_MASK 0x0006 /**< reject field */
+#define SBTML_REJ 0x0002 /**< reject */
+#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */
+
+#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define SBTMH_SERR 0x0001 /**< serror */
+#define SBTMH_INT 0x0002 /**< interrupt */
+#define SBTMH_BUSY 0x0004 /**< busy */
+#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */
+
+#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */
+
+/* sbbwa0 */
+#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */
+#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */
+#define SBBWA_TAB1_SHIFT 16
+
+/* sbimconfiglow */
+#define SBIMCL_STO_MASK 0x7 /**< service timeout */
+#define SBIMCL_RTO_MASK 0x70 /**< request timeout */
+#define SBIMCL_RTO_SHIFT 4
+#define SBIMCL_CID_MASK 0xff0000 /**< connection id */
+#define SBIMCL_CID_SHIFT 16
+
+/* sbimconfighigh */
+#define SBIMCH_IEM_MASK 0xc /**< inband error mode */
+#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */
+#define SBIMCH_TEM_SHIFT 4
+#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */
+#define SBIMCH_BEM_SHIFT 6
+
+/* sbadmatch0 */
+#define SBAM_TYPE_MASK 0x3 /**< address type */
+#define SBAM_AD64 0x4 /**< reserved */
+#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */
+#define SBAM_ADINT0_SHIFT 3
+#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */
+#define SBAM_ADINT1_SHIFT 3
+#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */
+#define SBAM_ADINT2_SHIFT 3
+#define SBAM_ADEN 0x400 /**< enable */
+#define SBAM_ADNEG 0x800 /**< negative decode */
+#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */
+#define SBAM_BASE0_SHIFT 8
+#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */
+#define SBAM_BASE1_SHIFT 12
+#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */
+#define SBAM_BASE2_SHIFT 16
+
+/* sbtmconfiglow */
+#define SBTMCL_CD_MASK 0xff /**< clock divide */
+#define SBTMCL_CO_MASK 0xf800 /**< clock offset */
+#define SBTMCL_CO_SHIFT 11
+#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */
+#define SBTMCL_IF_SHIFT 18
+#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */
+#define SBTMCL_IM_SHIFT 24
+
+/* sbtmconfighigh */
+#define SBTMCH_BM_MASK 0x3 /**< busy mode */
+#define SBTMCH_RM_MASK 0x3 /**< retry mode */
+#define SBTMCH_RM_SHIFT 2
+#define SBTMCH_SM_MASK 0x30 /**< stop mode */
+#define SBTMCH_SM_SHIFT 4
+#define SBTMCH_EM_MASK 0x300 /**< sb error mode */
+#define SBTMCH_EM_SHIFT 8
+#define SBTMCH_IM_MASK 0xc00 /**< int mode */
+#define SBTMCH_IM_SHIFT 10
+
+/* sbbconfig */
+#define SBBC_LAT_MASK 0x3 /**< sb latency */
+#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */
+#define SBBC_MAX0_SHIFT 16
+#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */
+#define SBBC_MAX1_SHIFT 20
+
+/* sbbstate */
+#define SBBS_SRD 0x1 /**< st reg disable */
+#define SBBS_HRD 0x2 /**< hold reg disable */
+
+/* sbidlow */
+#define SBIDL_CS_MASK 0x3 /**< config space */
+#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */
+#define SBIDL_AR_SHIFT 3
+#define SBIDL_SYNCH 0x40 /**< sync */
+#define SBIDL_INIT 0x80 /**< initiator */
+#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */
+#define SBIDL_MINLAT_SHIFT 8
+#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */
+#define SBIDL_MAXLAT_SHIFT 12
+#define SBIDL_FIRST 0x10000 /**< this initiator is first */
+#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */
+#define SBIDL_CW_SHIFT 18
+#define SBIDL_TP_MASK 0xf00000 /**< target ports */
+#define SBIDL_TP_SHIFT 20
+#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */
+#define SBIDL_IP_SHIFT 24
+#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */
+#define SBIDL_RV_SHIFT 28
+#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */
+#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */
+
+/* sbidhigh */
+#define SBIDH_RC_MASK 0x000f /**< revision code */
+#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0 /**< core code */
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */
+#define SBIDH_VC_SHIFT 16
+
+#define SB_COMMIT 0xfd8 /**< update buffered registers value */
+
+/* vendor codes */
+#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */
+
+#endif /* _SBCONFIG_H */
diff --git a/bcmdhd.101.10.361.x/include/sbgci.h b/bcmdhd.101.10.361.x/include/sbgci.h
new file mode 100755
index 0000000..0b265b6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbgci.h
@@ -0,0 +1,424 @@
+/*
+ * SiliconBackplane GCI core hardware definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBGCI_H
+#define _SBGCI_H
+
+#include <bcmutils.h>
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+#define GCI_OFFSETOF(sih, reg) \
+ (AOB_ENAB(sih) ? OFFSETOF(gciregs_t, reg) : OFFSETOF(chipcregs_t, reg))
+#define GCI_CORE_IDX(sih) (AOB_ENAB(sih) ? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX)
+
+typedef volatile struct {
+ uint32 gci_corecaps0; /* 0x000 */
+ uint32 gci_corecaps1; /* 0x004 */
+ uint32 gci_corecaps2; /* 0x008 */
+ uint32 gci_corectrl; /* 0x00c */
+ uint32 gci_corestat; /* 0x010 */
+ uint32 gci_intstat; /* 0x014 */
+ uint32 gci_intmask; /* 0x018 */
+ uint32 gci_wakemask; /* 0x01c */
+ uint32 gci_levelintstat; /* 0x020 */
+ uint32 gci_eventintstat; /* 0x024 */
+ uint32 gci_wakelevelintstat; /* 0x028 */
+ uint32 gci_wakeeventintstat; /* 0x02c */
+ uint32 semaphoreintstatus; /* 0x030 */
+ uint32 semaphoreintmask; /* 0x034 */
+ uint32 semaphorerequest; /* 0x038 */
+ uint32 semaphorereserve; /* 0x03c */
+ uint32 gci_indirect_addr; /* 0x040 */
+ uint32 gci_gpioctl; /* 0x044 */
+ uint32 gci_gpiostatus; /* 0x048 */
+ uint32 gci_gpiomask; /* 0x04c */
+ uint32 gci_eventsummary; /* 0x050 */
+ uint32 gci_miscctl; /* 0x054 */
+ uint32 gci_gpiointmask; /* 0x058 */
+ uint32 gci_gpiowakemask; /* 0x05c */
+ uint32 gci_input[32]; /* 0x060 */
+ uint32 gci_event[32]; /* 0x0e0 */
+ uint32 gci_output[4]; /* 0x160 */
+ uint32 gci_control_0; /* 0x170 */
+ uint32 gci_control_1; /* 0x174 */
+ uint32 gci_intpolreg; /* 0x178 */
+ uint32 gci_levelintmask; /* 0x17c */
+ uint32 gci_eventintmask; /* 0x180 */
+ uint32 wakelevelintmask; /* 0x184 */
+ uint32 wakeeventintmask; /* 0x188 */
+ uint32 hwmask; /* 0x18c */
+ uint32 PAD;
+ uint32 gci_inbandeventintmask; /* 0x194 */
+ uint32 PAD;
+ uint32 gci_inbandeventstatus; /* 0x19c */
+ uint32 gci_seciauxtx; /* 0x1a0 */
+ uint32 gci_seciauxrx; /* 0x1a4 */
+ uint32 gci_secitx_datatag; /* 0x1a8 */
+ uint32 gci_secirx_datatag; /* 0x1ac */
+ uint32 gci_secitx_datamask; /* 0x1b0 */
+ uint32 gci_seciusef0tx_reg; /* 0x1b4 */
+ uint32 gci_secif0tx_offset; /* 0x1b8 */
+ uint32 gci_secif0rx_offset; /* 0x1bc */
+ uint32 gci_secif1tx_offset; /* 0x1c0 */
+ uint32 gci_rxfifo_common_ctrl; /* 0x1c4 */
+ uint32 gci_rxfifoctrl; /* 0x1c8 */
+ uint32 gci_hw_sema_status; /* 0x1cc */
+ uint32 gci_seciuartescval; /* 0x1d0 */
+ uint32 gic_seciuartautobaudctr; /* 0x1d4 */
+ uint32 gci_secififolevel; /* 0x1d8 */
+ uint32 gci_seciuartdata; /* 0x1dc */
+ uint32 gci_secibauddiv; /* 0x1e0 */
+ uint32 gci_secifcr; /* 0x1e4 */
+ uint32 gci_secilcr; /* 0x1e8 */
+ uint32 gci_secimcr; /* 0x1ec */
+ uint32 gci_secilsr; /* 0x1f0 */
+ uint32 gci_secimsr; /* 0x1f4 */
+ uint32 gci_baudadj; /* 0x1f8 */
+ uint32 gci_inbandintmask; /* 0x1fc */
+ uint32 gci_chipctrl; /* 0x200 */
+ uint32 gci_chipsts; /* 0x204 */
+ uint32 gci_gpioout; /* 0x208 */
+ uint32 gci_gpioout_read; /* 0x20C */
+ uint32 gci_mpwaketx; /* 0x210 */
+ uint32 gci_mpwakedetect; /* 0x214 */
+ uint32 gci_seciin_ctrl; /* 0x218 */
+ uint32 gci_seciout_ctrl; /* 0x21C */
+ uint32 gci_seciin_auxfifo_en; /* 0x220 */
+ uint32 gci_seciout_txen_txbr; /* 0x224 */
+ uint32 gci_seciin_rxbrstatus; /* 0x228 */
+ uint32 gci_seciin_rxerrstatus; /* 0x22C */
+ uint32 gci_seciin_fcstatus; /* 0x230 */
+ uint32 gci_seciout_txstatus; /* 0x234 */
+ uint32 gci_seciout_txbrstatus; /* 0x238 */
+ uint32 wlan_mem_info; /* 0x23C */
+ uint32 wlan_bankxinfo; /* 0x240 */
+ uint32 bt_smem_select; /* 0x244 */
+ uint32 bt_smem_stby; /* 0x248 */
+ uint32 bt_smem_status; /* 0x24C */
+ uint32 wlan_bankxactivepda; /* 0x250 */
+ uint32 wlan_bankxsleeppda; /* 0x254 */
+ uint32 wlan_bankxkill; /* 0x258 */
+ uint32 reset_override; /* 0x25C */
+ uint32 ip_id; /* 0x260 */
+ uint32 lpo_safe_zone; /* 0x264 */
+ uint32 function_sel_control_and_status; /* 0x268 */
+ uint32 bt_smem_control0; /* 0x26C */
+ uint32 bt_smem_control1; /* 0x270 */
+ uint32 PAD[PADSZ(0x274, 0x2fc)]; /* 0x274-0x2fc */
+ uint32 gci_chipid; /* 0x300 */
+ uint32 PAD[PADSZ(0x304, 0x30c)]; /* 0x304-0x30c */
+ uint32 otpstatus; /* 0x310 */
+ uint32 otpcontrol; /* 0x314 */
+ uint32 otpprog; /* 0x318 */
+ uint32 otplayout; /* 0x31c */
+ uint32 otplayoutextension; /* 0x320 */
+ uint32 otpcontrol1; /* 0x324 */
+ uint32 otpprogdata; /* 0x328 */
+ uint32 PAD[PADSZ(0x32c, 0x3f8)]; /* 0x32c-0x3f8 */
+ uint32 otpECCstatus; /* 0x3FC */
+ uint32 gci_rffe_rfem_data0; /* 0x400 */
+ uint32 gci_rffe_rfem_data1; /* 0x404 */
+ uint32 gci_rffe_rfem_data2; /* 0x408 */
+ uint32 gci_rffe_rfem_data3; /* 0x40c */
+ uint32 gci_rffe_rfem_addr; /* 0x410 */
+ uint32 gci_rffe_config; /* 0x414 */
+ uint32 gci_rffe_clk_ctrl; /* 0x418 */
+ uint32 gci_rffe_ctrl; /* 0x41c */
+ uint32 gci_rffe_misc_ctrl; /* 0x420 */
+ uint32 gci_rffe_rfem_reg0_field_ctrl; /* 0x424 */
+ uint32 PAD[PADSZ(0x428, 0x438)]; /* 0x428-0x438 */
+ uint32 gci_rffe_rfem_mapping_mux0; /* 0x43c */
+ uint32 gci_rffe_rfem_mapping_mux1; /* 0x440 */
+ uint32 gci_rffe_rfem_mapping_mux2; /* 0x444 */
+ uint32 gci_rffe_rfem_mapping_mux3; /* 0x448 */
+ uint32 gci_rffe_rfem_mapping_mux4; /* 0x44c */
+ uint32 gci_rffe_rfem_mapping_mux5; /* 0x450 */
+ uint32 gci_rffe_rfem_mapping_mux6; /* 0x454 */
+ uint32 gci_rffe_rfem_mapping_mux7; /* 0x458 */
+ uint32 gci_rffe_change_detect_ovr_wlmc; /* 0x45c */
+ uint32 gci_rffe_change_detect_ovr_wlac; /* 0x460 */
+ uint32 gci_rffe_change_detect_ovr_wlsc; /* 0x464 */
+ uint32 gci_rffe_change_detect_ovr_btmc; /* 0x468 */
+ uint32 gci_rffe_change_detect_ovr_btsc; /* 0x46c */
+ uint32 gci_cncb_ctrl_status; /* 0x470 */
+ uint32 gci_cncb_2g_force_unlock; /* 0x474 */
+ uint32 gci_cncb_5g_force_unlock; /* 0x478 */
+ uint32 gci_cncb_2g_reset_pulse_width; /* 0x47c */
+ uint32 gci_cncb_5g_reset_pulse_width; /* 0x480 */
+ uint32 gci_cncb_lut_indirect_addr; /* 0x484 */
+ uint32 gci_cncb_2g_lut; /* 0x488 */
+ uint32 gci_cncb_5g_lut; /* 0x48c */
+ uint32 gci_cncb_glitch_filter_width; /* 0x490 */
+ uint32 PAD[PADSZ(0x494, 0x5fc)]; /* 0x494-0x5fc */
+ uint32 sgr_fifo_control_reg_5g; /* 0x600 */
+ uint32 sgr_fifo_control_reg_2g; /* 0x604 */
+ uint32 sgr_fifo_control_reg_bt; /* 0x608 */
+ uint32 PAD; /* 0x60c */
+ uint32 sgr_rx_fifo0_read_reg0; /* 0x610 */
+ uint32 sgr_rx_fifo0_read_reg1; /* 0x614 */
+ uint32 sgr_rx_fifo0_read_reg2; /* 0x618 */
+ uint32 sgr_rx_fifo1_read_reg0; /* 0x61c */
+ uint32 sgr_rx_fifo1_read_reg1; /* 0x620 */
+ uint32 sgr_rx_fifo1_read_reg2; /* 0x624 */
+ uint32 sgr_rx_fifo2_read_reg0; /* 0x628 */
+ uint32 sgr_rx_fifo2_read_reg1; /* 0x62c */
+ uint32 sgr_rx_fifo2_read_reg2; /* 0x630 */
+ uint32 sgr_rx_fifo3_read_reg0; /* 0x634 */
+ uint32 sgr_rx_fifo3_read_reg1; /* 0x638 */
+ uint32 sgr_rx_fifo3_read_reg2; /* 0x63c */
+ uint32 sgr_rx_fifo4_read_reg0; /* 0x640 */
+ uint32 sgr_rx_fifo4_read_reg1; /* 0x644 */
+ uint32 sgr_rx_fifo4_read_reg2; /* 0x648 */
+ uint32 sgr_rx_fifo5_read_reg0; /* 0x64c */
+ uint32 sgr_rx_fifo5_read_reg1; /* 0x650 */
+ uint32 sgr_rx_fifo5_read_reg2; /* 0x654 */
+ uint32 sgr_rx_fifo6_read_reg0; /* 0x658 */
+ uint32 sgr_rx_fifo6_read_reg1; /* 0x65c */
+ uint32 sgr_rx_fifo6_read_reg2; /* 0x660 */
+ uint32 sgr_rx_fifo7_read_reg0; /* 0x664 */
+ uint32 sgr_rx_fifo7_read_reg1; /* 0x668 */
+ uint32 sgr_rx_fifo7_read_reg2; /* 0x66c */
+ uint32 sgr_rx_fifo8_read_reg0; /* 0x670 */
+ uint32 sgr_rx_fifo8_read_reg1; /* 0x674 */
+ uint32 sgr_rx_fifo8_read_reg2; /* 0x678 */
+ uint32 sgr_rx_fifo0_read_status; /* 0x67c */
+ uint32 sgr_rx_fifo1_read_status; /* 0x680 */
+ uint32 sgr_rx_fifo2_read_status; /* 0x684 */
+ uint32 sgr_rx_fifo3_read_status; /* 0x688 */
+ uint32 sgr_rx_fifo4_read_status; /* 0x68c */
+ uint32 sgr_rx_fifo5_read_status; /* 0x690 */
+ uint32 sgr_rx_fifo6_read_status; /* 0x694 */
+ uint32 sgr_rx_fifo7_read_status; /* 0x698 */
+ uint32 sgr_rx_fifo8_read_status; /* 0x69c */
+ uint32 wl_tx_fifo_data_idx_reg; /* 0x6a0 */
+ uint32 wl_tx_fifo_data_reg0; /* 0x6a4 */
+ uint32 wl_tx_fifo_data_reg1; /* 0x6a8 */
+ uint32 wl_tx_fifo_data_reg2; /* 0x6ac */
+ uint32 mac_main_core_tx_fifo_data_idx_reg; /* 0x6b0 */
+ uint32 mac_main_core_tx_fifo_data_reg0; /* 0x6b4 */
+ uint32 mac_main_core_tx_fifo_data_reg1; /* 0x6b8 */
+ uint32 mac_main_core_tx_fifo_data_reg2; /* 0x6bc */
+ uint32 mac_aux_core_tx_fifo_data_idx_reg; /* 0x6c0 */
+ uint32 mac_aux_core_tx_fifo_data_reg0; /* 0x6c4 */
+ uint32 mac_aux_core_tx_fifo_data_reg1; /* 0x6c8 */
+ uint32 mac_aux_core_tx_fifo_data_reg2; /* 0x6cc */
+ uint32 bt_tx_fifo_data_idx_reg; /* 0x6d0 */
+ uint32 bt_tx_fifo_data_reg0; /* 0x6d4 */
+ uint32 bt_tx_fifo_data_reg1; /* 0x6d8 */
+ uint32 bt_tx_fifo_data_reg2; /* 0x6dc */
+ uint32 wci2_tx_fifo_data_reg0; /* 0x6e0 */
+ uint32 wci2_tx_fifo_data_reg1; /* 0x6e4 */
+ uint32 sgt_tx_fifo_ctrl; /* 0x6e8 */
+ uint32 sgt_fifo_status_hpri; /* 0x6ec */
+ uint32 sgt_fifo_status_norm; /* 0x6f0 */
+ uint32 sgt_fifo_status_lpri; /* 0x6f4 */
+ uint32 PAD[PADSZ(0x6f8, 0x7a0)]; /* 0x6f8-0x7a0 */
+ uint32 sg_timestamp_fifo_ctrl; /* 0x7a4 */
+ uint32 sgr_timestamp_data_rx; /* 0x7a8 */
+ uint32 sgr_timestamp_data_tx; /* 0x7ac */
+ uint32 sgr_fifo_int_reg; /* 0x7b0 */
+ uint32 sgr_fifo_int_mask_reg; /* 0x7b4 */
+ uint32 sgt_fifo_int_reg; /* 0x7b8 */
+ uint32 sgt_fifo_int_mask_reg; /* 0x7bc */
+ uint32 sg_fifo_debug_bus; /* 0x7c0 */
+ uint32 PAD[PADSZ(0x7c4, 0xbfc)]; /* 0x7c4-0xbfc */
+ uint32 lhl_core_capab_adr; /* 0xC00 */
+ uint32 lhl_main_ctl_adr; /* 0xC04 */
+ uint32 lhl_pmu_ctl_adr; /* 0xC08 */
+ uint32 lhl_extlpo_ctl_adr; /* 0xC0C */
+ uint32 lpo_ctl_adr; /* 0xC10 */
+ uint32 lhl_lpo2_ctl_adr; /* 0xC14 */
+ uint32 lhl_osc32k_ctl_adr; /* 0xC18 */
+ uint32 lhl_clk_status_adr; /* 0xC1C */
+ uint32 lhl_clk_det_ctl_adr; /* 0xC20 */
+ uint32 lhl_clk_sel_adr; /* 0xC24 */
+ uint32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */
+ uint32 lhl_autoclk_ctl_adr; /* 0xC30 */
+ uint32 PAD; /* reserved */
+ uint32 lhl_hibtim_adr; /* 0xC38 */
+ uint32 lhl_wl_ilp_val_adr; /* 0xC3C */
+ uint32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */
+ uint32 lhl_wl_armtim0_st_adr; /* 0xC44 */
+ uint32 lhl_wl_armtim0_adr; /* 0xC48 */
+ uint32 PAD[PADSZ(0xc4c, 0xc6c)]; /* 0xC4C-0xC6C */
+ uint32 lhl_wl_mactim0_intrp_adr; /* 0xC70 */
+ uint32 lhl_wl_mactim0_st_adr; /* 0xC74 */
+ uint32 lhl_wl_mactim_int0_adr; /* 0xC78 */
+ uint32 lhl_wl_mactim_frac0_adr; /* 0xC7C */
+ uint32 lhl_wl_mactim1_intrp_adr; /* 0xC80 */
+ uint32 lhl_wl_mactim1_st_adr; /* 0xC84 */
+ uint32 lhl_wl_mactim_int1_adr; /* 0xC88 */
+ uint32 lhl_wl_mactim_frac1_adr; /* 0xC8C */
+ uint32 lhl_wl_mactim2_intrp_adr; /* 0xC90 */
+ uint32 lhl_wl_mactim2_st_adr; /* 0xC94 */
+ uint32 lhl_wl_mactim_int2_adr; /* 0xC98 */
+ uint32 lhl_wl_mactim_frac2_adr; /* 0xC9C */
+ uint32 PAD[PADSZ(0xca0, 0xcac)]; /* 0xCA0-0xCAC */
+ uint32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */
+ uint32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */
+ uint32 gpio_ctrl_iocfg_p_adr[40]; /* 0xCD0-0xD6C */
+ uint32 lhl_lp_up_ctl1_adr; /* 0xd70 */
+ uint32 lhl_lp_dn_ctl1_adr; /* 0xd74 */
+ uint32 PAD[PADSZ(0xd78, 0xdb4)]; /* 0xd78-0xdb4 */
+ uint32 lhl_sleep_timer_adr; /* 0xDB8 */
+ uint32 lhl_sleep_timer_ctl_adr; /* 0xDBC */
+ uint32 lhl_sleep_timer_load_val_adr; /* 0xDC0 */
+ uint32 lhl_lp_main_ctl_adr; /* 0xDC4 */
+ uint32 lhl_lp_up_ctl_adr; /* 0xDC8 */
+ uint32 lhl_lp_dn_ctl_adr; /* 0xDCC */
+ uint32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */
+ uint32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */
+ uint32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */
+ uint32 PAD[PADSZ(0xddc, 0xdf8)]; /* 0xDDC-0xDF8 */
+ uint32 lhl_gpio_din0_adr; /* 0xDFC */
+ uint32 lhl_gpio_din1_adr; /* 0xE00 */
+ uint32 lhl_wkup_status_adr; /* 0xE04 */
+ uint32 lhl_ctl_adr; /* 0xE08 */
+ uint32 lhl_adc_ctl_adr; /* 0xE0C */
+ uint32 lhl_qdxyz_in_dly_adr; /* 0xE10 */
+ uint32 lhl_optctl_adr; /* 0xE14 */
+ uint32 lhl_optct2_adr; /* 0xE18 */
+ uint32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */
+ uint32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */
+ uint32 lhl_optx_smp_val_adr; /* 0xE38 */
+ uint32 lhl_opty_smp_val_adr; /* 0xE3C */
+ uint32 lhl_optz_smp_val_adr; /* 0xE40 */
+ uint32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */
+ uint32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */
+ uint32 lhl_wl_fw_ctl; /* 0xE60 */
+ uint32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */
+ uint32 lhl_bt_hw_ctl_adr; /* 0xE6C */
+ uint32 lhl_top_pwrseq_en_adr; /* 0xE70 */
+ uint32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */
+ uint32 lhl_top_pwrup_ctl_adr; /* 0xE78 */
+ uint32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */
+ uint32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */
+ uint32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */
+ uint32 wpt_regon_intrp_cfg_adr; /* 0xE88 */
+ uint32 bt_regon_intrp_cfg_adr; /* 0xE8C */
+ uint32 wl_regon_intrp_cfg_adr; /* 0xE90 */
+ uint32 regon_intrp_st_adr; /* 0xE94 */
+ uint32 regon_intrp_en_adr; /* 0xE98 */
+ uint32 PAD[PADSZ(0xe9c, 0xeb4)]; /* 0xe9c-0xeb4 */
+ uint32 lhl_lp_main_ctl1_adr; /* 0xeb8 */
+ uint32 lhl_lp_up_ctl2_adr; /* 0xebc */
+ uint32 lhl_lp_dn_ctl2_adr; /* 0xec0 */
+ uint32 lhl_lp_up_ctl3_adr; /* 0xec4 */
+ uint32 lhl_lp_dn_ctl3_adr; /* 0xec8 */
+ uint32 PAD[PADSZ(0xecc, 0xed8)]; /* 0xecc-0xed8 */
+ uint32 lhl_lp_main_ctl2_adr; /* 0xedc */
+ uint32 lhl_lp_up_ctl4_adr; /* 0xee0 */
+ uint32 lhl_lp_dn_ctl4_adr; /* 0xee4 */
+ uint32 lhl_lp_up_ctl5_adr; /* 0xee8 */
+ uint32 lhl_lp_dn_ctl5_adr; /* 0xeec */
+ uint32 lhl_top_pwrdn3_ctl_adr; /* 0xEF0 */
+ uint32 lhl_top_pwrup3_ctl_adr; /* 0xEF4 */
+ uint32 PAD[PADSZ(0xef8, 0xf00)]; /* 0xEF8 - 0xF00 */
+ uint32 error_status; /* 0xF04 */
+ uint32 error_parity; /* 0xF08 */
+ uint32 PAD; /* 0xF0C */
+ uint32 msg_buf_0[8]; /* 0xF10 - 0xF2C */
+ uint32 PAD[PADSZ(0xf30, 0xf3c)]; /* 0xF30 - 0xF3C */
+ uint32 CTRL_REG0; /* 0xF40 */
+ uint32 CTRL_REG1; /* 0xF44 */
+ uint32 chipID; /* 0xF48 */
+ uint32 PAD[PADSZ(0xf4c, 0xf54)]; /* 0xF4C - 0xF54 */
+ uint32 timestamp_mask0; /* 0xf58 */
+ uint32 timestamp_mask1; /* 0xf5c */
+ uint32 wl_event_rdAddress; /* 0xF60 */
+ uint32 bt_event_rdAddress; /* 0xF64 */
+ uint32 interrupt_Address; /* 0xF68 */
+ uint32 PAD[PADSZ(0xf6c, 0xf70)]; /* 0xF6c - 0xF70 */
+ uint32 coex_error_status; /* 0xF74 */
+ uint32 coex_error_parity; /* 0xF78 */
+ uint32 PAD; /* 0xF7C */
+ uint32 ar_buf_01[4]; /* 0xF80 - 0xF8C */
+ uint32 PAD[PADSZ(0xf90,0xfac)]; /* 0xF90 - 0xFAC */
+ uint32 coex_ctrl_reg0; /* 0xFB0 */
+ uint32 coex_ctrl_reg1; /* 0xFB4 */
+ uint32 coex_chip_id; /* 0xFB8 */
+ uint32 PAD[PADSZ(0xfbc, 0xfcc)]; /* 0xFBC - 0xFCC */
+ uint32 coex_wl_event_rd; /* 0xFD0 */
+ uint32 coex_bt_event_rd; /* 0xFD4 */
+ uint32 coex_interrupt; /* 0xFD8 */
+ uint32 PAD; /* 0xFDC */
+ uint32 spmi_shared_reg_status_intMask_adr; /* 0xFE0 */
+ uint32 spmi_shared_reg_status_intStatus_adr; /* 0xFE4 */
+ uint32 spmi_shared_reg_status_wakeMask_adr; /* 0xFE8 */
+ uint32 spmi_shared_event_map_idx_adr; /* 0xFEC */
+ uint32 spmi_shared_event_map_data_adr; /* 0xFF0 */
+ uint32 spmi_coex_event_gpr_status_adr; /* 0xFF4 */
+} gciregs_t;
+
+#define GCI_CAP0_REV_MASK 0x000000ff
+
+/* GCI Capabilities registers */
+#define GCI_CORE_CAP_0_COREREV_MASK 0xFF
+#define GCI_CORE_CAP_0_COREREV_SHIFT 0
+
+#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK 0x3F
+#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT 0
+#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK 0xF
+#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT 16
+
+#define WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK 0xFFFF
+
+#define WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK 0x1
+
+/* WLAN BankXInfo Register */
+#define WLAN_BANKXINFO_BANK_SIZE_MASK 0x00FFF000
+#define WLAN_BANKXINFO_BANK_SIZE_SHIFT 12
+
+/* WLAN Mem Info Register */
+#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK 0x000000FF
+#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT 0
+
+#define WLAN_MEM_INFO_REG_NUMD11MACBM_MASK 0x0000FF00
+#define WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT 8
+
+#define WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK 0x00FF0000
+#define WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT 16
+
+#define WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK 0xFF000000
+#define WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT 24
+
+/* GCI chip status register 9 */
+#define GCI_CST9_SCAN_DIS (1u << 31u) /* scan core disable */
+
+/* GCI Output register indices */
+#define GCI_OUTPUT_IDX_0 0
+#define GCI_OUTPUT_IDX_1 1
+#define GCI_OUTPUT_IDX_2 2
+#define GCI_OUTPUT_IDX_3 3
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+#endif /* _SBGCI_H */
diff --git a/bcmdhd.101.10.361.x/include/sbhndarm.h b/bcmdhd.101.10.361.x/include/sbhndarm.h
new file mode 100755
index 0000000..bdddbce
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbhndarm.h
@@ -0,0 +1,414 @@
+/*
+ * Broadcom SiliconBackplane ARM definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _sbhndarm_h_
+#define _sbhndarm_h_
+
+#ifdef DONGLEBUILD
+
+#include <arminc.h>
+#include <sbconfig.h>
+
+/* register offsets */
+#define ARM7_CORECTL 0
+
+/* bits in corecontrol */
+#define ACC_FORCED_RST 0x1
+#define ACC_SERRINT 0x2
+#define ACC_WFICLKSTOP 0x4
+
+#if !defined(__ARM_ARCH_7A__)
+#define ACC_NOTSLEEPINGCLKREQ_SHIFT 24
+#endif /* !__ARM_ARCH_7A__ */
+
+#if defined(__ARM_ARCH_7A__)
+
+#define ACC_FORCECLOCKRATIO (0x1u << 8)
+#define ACC_CLOCKRATIO_SHIFT (9u)
+#define ACC_CLOCKRATIO_MASK (0xFu << ACC_CLOCKRATIO_SHIFT)
+
+#define ACC_CLOCKRATIO_1_TO_1 (0u)
+#define ACC_CLOCKRATIO_2_TO_1 (1u)
+#define ACC_CLOCKRATIO_3_TO_1 (2u)
+#define ACC_CLOCKRATIO_4_TO_1 (3u)
+
+#define ACC_FASTCLOCKCHANNEL_SHIFT (24u)
+#define ACC_FASTCLOCKCHANNEL_MASK (0x3u << ACC_FASTCLOCKCHANNEL_SHIFT)
+#define ACC_NUM_FASTCLOCKS_SHIFT (2u)
+#define ACC_NUM_FASTCLOCKS_MASK (0x3u << ACC_NUM_FASTCLOCKS_SHIFT)
+
+#define ACC_NOTSLEEPINGCLKREQ_SHIFT (4u)
+#define ACC_NOTSLEEPINGCLKREQ_MASK (0x3u << ACC_NOT_SLEEPING_CLKREQ_SHIFT)
+#define ACC_NOTSLEEPING_ALP (0u)
+#define ACC_NOTSLEEPING_HT (1u)
+#define ACC_NOTSLEEPING_ALP_HT_AVAIL (2u)
+#define ACC_NOTSLEEPING_HT_AVAIL (3u)
+
+#elif defined(__ARM_ARCH_7R__) /* CR4 */
+
+#define ACC_FORCECLOCKRATIO (1u << 7u)
+#define ACC_CLOCKRATIO_SHIFT 8u
+#define ACC_CLOCKRATIO_MASK (0xFu << ACC_CLOCKRATIO_SHIFT)
+#define ACC_CLOCKMODE_SHIFT 12u
+#define ACC_CLOCKMODE_MASK (7u << ACC_CLOCKMODE_SHIFT)
+#define ACC_NOTSLEEPCLKREQ0 3u
+#define ACC_NOTSLEEPCLKREQ1 18u
+#define ACC_FLOPSPROTECT (1u << 20u)
+
+#define ACC_CLOCKRATIO_1_TO_1 (0u)
+#define ACC_CLOCKRATIO_2_TO_1 (4u)
+
+#endif /* __ARM_ARCH_7A__ */
+
+#define ACC_CLOCKMODE_SAME (0) /**< BP and CPU clock are the same */
+#define ACC_CLOCKMODE_ASYNC (1) /**< BP and CPU clock are asynchronous */
+#define ACC_CLOCKMODE_SYNCH (2) /**< BP and CPU clock are synch, ratio 1:1 or 1:2 */
+
+/* Request ALP on backplane bit 3 and 18 */
+#define ACC_REQALP ((1<<ACC_NOTSLEEPCLKREQ0) | (1<<ACC_NOTSLEEPCLKREQ1))
+
+#define ACC_MPU_SHIFT 25u
+#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
+
+#define ACC_MPU_REGION_CNT_MASK 0x7u
+#define ACC_MPU_REGION_CNT_SHIFT 3u
+
+#define ACC_MPU_SECURE_SHIFT 27u
+#define ACC_MPU_SECURE_MASK (0x1u << ACC_MPU_SECURE_SHIFT)
+#define ACC_MPU_READ_SHIFT 30u
+#define ACC_MPU_READ_MASK (0x1u << ACC_MPU_READ_SHIFT)
+#define ACC_MPU_WRITE_SHIFT 29u
+#define ACC_MPU_WRITE_MASK (0x1u << ACC_MPU_WRITE_SHIFT)
+#define ACC_MPU_VALID_SHIFT 31u
+#define ACC_MPU_VALID_MASK (0x1u << ACC_MPU_VALID_SHIFT)
+
+/* arm resetlog */
+#define SBRESETLOG 0x1
+#define SERRORLOG 0x2
+
+/* arm core-specific control flags */
+#define SICF_REMAP_MSK 0x001c
+#define SICF_REMAP_NONE 0
+#define SICF_REMAP_ROM 0x0004
+#define SIFC_REMAP_FLASH 0x0008
+
+/* misc core-specific defines */
+#if defined(__ARM_ARCH_7M__)
+/* cortex-m3 */
+/* backplane related stuff */
+#define ARM_CORE_ID ARMCM3_CORE_ID /**< arm coreid */
+#define SI_ARM_ROM SI_ARMCM3_ROM /**< ROM backplane/system address */
+#define SI_ARM_SRAM2 SI_ARMCM3_SRAM2 /**< RAM backplane address when remap is 1 or 2 */
+/* core registers offsets */
+#define ARMCM3_CYCLECNT 0x90 /**< Cortex-M3 core registers offsets */
+#define ARMCM3_INTTIMER 0x94
+#define ARMCM3_INTMASK 0x98
+#define ARMCM3_INTSTATUS 0x9c
+/* interrupt/exception */
+#define ARMCM3_NUMINTS 16 /**< # of external interrupts */
+#define ARMCM3_INTALL ((1 << ARMCM3_NUMINTS) - 1) /**< Interrupt mask */
+#define ARMCM3_SHARED_INT 0 /**< Interrupt shared by multiple cores */
+#define ARMCM3_INT(i) (1 << (i)) /**< Individual interrupt enable/disable */
+/* intmask/intstatus bits */
+#define ARMCM3_INTMASK_TIMER 0x1
+#define ARMCM3_INTMASK_SYSRESET 0x4
+#define ARMCM3_INTMASK_LOCKUP 0x8
+
+/*
+ * Overlay Support in Rev 5
+ */
+#define ARMCM3_OVL_VALID_SHIFT 0
+#define ARMCM3_OVL_VALID 1
+#define ARMCM3_OVL_SZ_SHIFT 1
+#define ARMCM3_OVL_SZ_MASK 0x0000000e
+#define ARMCM3_OVL_SZ_512B 0 /* 512B */
+#define ARMCM3_OVL_SZ_1KB 1 /* 1KB */
+#define ARMCM3_OVL_SZ_2KB 2 /* 2KB */
+#define ARMCM3_OVL_SZ_4KB 3 /* 4KB */
+#define ARMCM3_OVL_SZ_8KB 4 /* 8KB */
+#define ARMCM3_OVL_SZ_16KB 5 /* 16KB */
+#define ARMCM3_OVL_SZ_32KB 6 /* 32KB */
+#define ARMCM3_OVL_SZ_64KB 7 /* 64KB */
+#define ARMCM3_OVL_ADDR_SHIFT 9
+#define ARMCM3_OVL_ADDR_MASK 0x003FFE00
+#define ARMCM3_OVL_MAX 16
+
+#elif defined(__ARM_ARCH_7R__)
+/* cortex-r4 */
+/* backplane related stuff */
+#define ARM_CORE_ID ARMCR4_CORE_ID /**< arm coreid */
+#define SI_ARM_ROM SI_ARMCR4_ROM /**< ROM backplane/system address */
+#define SI_ARM_SRAM2 0x0 /**< In the cr4 the RAM is just not available
+ * when remap is 1
+ */
+
+/* core registers offsets */
+#define ARMCR4_CORECTL 0
+#define ARMCR4_CORECAP 4
+#define ARMCR4_COREST 8
+
+#define ARMCR4_FIQRSTATUS 0x10
+#define ARMCR4_FIQMASK 0x14
+#define ARMCR4_IRQMASK 0x18
+
+#define ARMCR4_INTSTATUS 0x20
+#define ARMCR4_INTMASK 0x24
+#define ARMCR4_CYCLECNT 0x28
+#define ARMCR4_INTTIMER 0x2c
+
+#define ARMCR4_GPIOSEL 0x30
+#define ARMCR4_GPIOEN 0x34
+
+#define ARMCR4_BANKIDX 0x40
+#define ARMCR4_BANKINFO 0x44
+#define ARMCR4_BANKSTBY 0x48
+#define ARMCR4_BANKPDA 0x4c
+
+#define ARMCR4_TCAMPATCHCTRL 0x68
+#define ARMCR4_TCAMPATCHTBLBASEADDR 0x6C
+#define ARMCR4_TCAMCMDREG 0x70
+#define ARMCR4_TCAMDATAREG 0x74
+#define ARMCR4_TCAMBANKXMASKREG 0x78
+
+#define ARMCR4_ROMNB_MASK 0xf00
+#define ARMCR4_ROMNB_SHIFT 8
+#define ARMCR4_MSB_ROMNB_MASK 0x1E00000
+/* adjusted shift to fit 4-LSB (21 - 4 = 17) */
+#define ARMCR4_MSB_ROMNB_SHIFT 17
+#define ARMCR4_TCBBNB_MASK 0xf0
+#define ARMCR4_TCBBNB_SHIFT 4
+#define ARMCR4_TCBANB_MASK 0xf
+#define ARMCR4_TCBANB_SHIFT 0
+
+#define ARMCR4_MT_MASK 0x300
+#define ARMCR4_MT_SHIFT 8
+#define ARMCR4_MT_ROM 0x100
+#define ARMCR4_MT_RAM 0
+
+#define ARMCR4_BSZ_MASK 0x7f
+#define ARMCR4_BUNITSZ_MASK 0x200
+#define ARMCR4_BSZ_8K 8192
+#define ARMCR4_BSZ_1K 1024
+
+#define ARMCR4_STBY_SUPPORTED 0x400
+#define ARMCR4_STBY_TIMER_PRESENT 0x800
+
+#define ARMCR4_TIMER_VAL_MASK 0xfffff
+#define ARMCR4_STBY_TIMER_ENABLE (1 << 24)
+#define ARMCR4_STBY_OVERRIDE (1 << 31)
+
+#define ARMCR4_TCAM_ENABLE (1u << 31u)
+#define ARMCR4_TCAM_CLKENAB (1u << 30u)
+#define ARMCR4_TCAM_WRITEPROT (1u << 29u)
+#define ARMCR4_TCAM_PATCHCNT_MASK 0xfu
+
+#define ARMCR4_TCAM_CMD_DONE (1u << 31)
+#define ARMCR4_TCAM_MATCH (1u << 24)
+#define ARMCR4_TCAM_OPCODE_MASK (3 << 16)
+#define ARMCR4_TCAM_OPCODE_SHIFT 16
+#define ARMCR4_TCAM_ADDR_MASK 0xffff
+#define ARMCR4_TCAM_NONE (0 << ARMCR4_TCAM_OPCODE_SHIFT)
+#define ARMCR4_TCAM_READ (1 << ARMCR4_TCAM_OPCODE_SHIFT)
+#define ARMCR4_TCAM_WRITE (2 << ARMCR4_TCAM_OPCODE_SHIFT)
+#define ARMCR4_TCAM_COMPARE (3 << ARMCR4_TCAM_OPCODE_SHIFT)
+#define ARMCR4_TCAM_CMD_DONE_DLY 1000
+
+#define ARMCR4_DATA_MASK (~0x7)
+#define ARMCR4_DATA_VALID (1u << 0)
+
+/* intmask/intstatus bits */
+#define ARMCR4_INTMASK_TIMER (0x1)
+#define ARMCR4_INTMASK_CLOCKSTABLE (0x20000000)
+
+#define CHIP_SDRENABLE(sih) (sih->boardflags2 & BFL2_SDR_EN)
+#define CHIP_TCMPROTENAB(sih) (si_arm_sflags(sih) & SISF_TCMPROT)
+
+/* Power Control */
+#define ARM_ENAB_MEM_CLK_GATE_SHIFT 5
+
+#define ROM_STBY_TIMER_4378 0xb0
+#define RAM_STBY_TIMER_4378 0x64
+
+#define ROM_STBY_TIMER_4387 0x10
+#define RAM_STBY_TIMER_4387 0x100
+
+#define RAM_STBY_DEFAULT_WAIT_TIME (3u)
+#define ROM_STBY_DEFAULT_WAIT_TIME (4u)
+#define DEFAULT_FORCE_STBY_IN_WFI (1u)
+#define ARMCR4_DYN_STBY_CTRL_RAM_STBY_WAIT_TIMER_SHIFT (0u)
+#define ARMCR4_DYN_STBY_CTRL_RAM_STBY_WAIT_TIMER_MASK (0xF << \
+ ARMCR4_DYN_STBY_CTRL_RAM_STBY_WAIT_TIMER_SHIFT)
+#define ARMCR4_DYN_STBY_CTRL_ROM_STBY_WAIT_TIMER_SHIFT (8u)
+#define ARMCR4_DYN_STBY_CTRL_ROM_STBY_WAIT_TIMER_MASK (0x3F << \
+ ARMCR4_DYN_STBY_CTRL_ROM_STBY_WAIT_TIMER_SHIFT)
+#define ARMCR4_DYN_STBY_CTRL_FORCE_STBY_IN_WFI_SHIFT (16u)
+#define ARMCR4_DYN_STBY_CTRL_FORCE_STBY_IN_WFI_MASK (0x1 << \
+ ARMCR4_DYN_STBY_CTRL_FORCE_STBY_IN_WFI_SHIFT)
+
+/* using CHIPID because no capabilities bit */
+#define ARM_CLKGATING_CAP(sih) ((void)(sih), (BCM4378_CHIP(sih->chip) ||\
+ BCM4387_CHIP(sih->chip)))
+
+#define ARM_CLKGATING_ENAB(sih) (ARM_CLKGATING_CAP(sih) && 1)
+
+#elif defined(__ARM_ARCH_7A__)
+
+#if defined(CA7)
+/* backplane related stuff */
+#define ARM_CORE_ID ARMCA7_CORE_ID
+#define SI_ARM_ROM SI_ARMCA7_ROM /**< ROM backplane/system address */
+
+#else
+/* backplane related stuff */
+#define ARM_CORE_ID ARMCA9_CORE_ID /* arm coreid */
+#endif /* __ARM_ARCH_7A__ */
+#else /* !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */
+#error Unrecognized ARM Architecture
+#endif /* !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */
+
+#endif /* DONGLEBUILD */
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* cortex-m3 */
+typedef volatile struct {
+ uint32 corecontrol; /* 0x0 */
+ uint32 corestatus; /* 0x4 */
+ uint32 PAD[1];
+ uint32 biststatus; /* 0xc */
+ uint32 nmiisrst; /* 0x10 */
+ uint32 nmimask; /* 0x14 */
+ uint32 isrmask; /* 0x18 */
+ uint32 PAD[1];
+ uint32 resetlog; /* 0x20 */
+ uint32 gpioselect; /* 0x24 */
+ uint32 gpioenable; /* 0x28 */
+ uint32 PAD[1];
+ uint32 bpaddrlo; /* 0x30 */
+ uint32 bpaddrhi; /* 0x34 */
+ uint32 bpdata; /* 0x38 */
+ uint32 bpindaccess; /* 0x3c */
+ uint32 ovlidx; /* 0x40 */
+ uint32 ovlmatch; /* 0x44 */
+ uint32 ovladdr; /* 0x48 */
+ uint32 PAD[13];
+ uint32 bwalloc; /* 0x80 */
+ uint32 PAD[3];
+ uint32 cyclecnt; /* 0x90 */
+ uint32 inttimer; /* 0x94 */
+ uint32 intmask; /* 0x98 */
+ uint32 intstatus; /* 0x9c */
+ uint32 PAD[80];
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 PAD[1];
+ uint32 powerctl; /* 0x1e8 */
+} cm3regs_t;
+#define ARM_CM3_REG(regs, reg) (&((cm3regs_t *)regs)->reg)
+
+/* cortex-R4 */
+typedef volatile struct {
+ uint32 corecontrol; /* 0x0 */
+ uint32 corecapabilities; /* 0x4 */
+ uint32 corestatus; /* 0x8 */
+ uint32 biststatus; /* 0xc */
+ uint32 nmiisrst; /* 0x10 */
+ uint32 nmimask; /* 0x14 */
+ uint32 isrmask; /* 0x18 */
+ uint32 swintreg; /* 0x1C */
+ uint32 intstatus; /* 0x20 */
+ uint32 intmask; /* 0x24 */
+ uint32 cyclecnt; /* 0x28 */
+ uint32 inttimer; /* 0x2c */
+ uint32 gpioselect; /* 0x30 */
+ uint32 gpioenable; /* 0x34 */
+ uint32 PAD[2];
+ uint32 bankidx; /* 0x40 */
+ uint32 bankinfo; /* 0x44 */
+ uint32 bankstbyctl; /* 0x48 */
+ uint32 bankpda; /* 0x4c */
+ uint32 dyn_stby_control; /* 0x50 */
+ uint32 PAD[5];
+ uint32 tcampatchctrl; /* 0x68 */
+ uint32 tcampatchtblbaseaddr; /* 0x6c */
+ uint32 tcamcmdreg; /* 0x70 */
+ uint32 tcamdatareg; /* 0x74 */
+ uint32 tcambankxmaskreg; /* 0x78 */
+ uint32 PAD[5];
+ uint32 mpucontrol; /* 0x90 */
+ uint32 mpucapabilities; /* 0x94 */
+ uint32 rom_reloc_addr; /* 0x98 */
+ uint32 PAD[1];
+ uint32 region_n_regs[16]; /* 0xa0 - 0xdc */
+ uint32 PAD[16];
+ uint32 initiat_n_masks[16]; /* 0x120 - 0x15c */
+ uint32 PAD[32];
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 hw_war; /* 0x1e4 */
+ uint32 powerctl; /* 0x1e8 */
+ uint32 powerctl2; /* 0x1ec */
+} cr4regs_t;
+#define ARM_CR4_REG(regs, reg) (&((cr4regs_t *)regs)->reg)
+
+#define SBRESETLOG_CR4 0x4
+
+/* cortex-A7 */
+typedef volatile struct {
+ uint32 corecontrol; /* 0x0 */
+ uint32 corecapabilities; /* 0x4 */
+ uint32 corestatus; /* 0x8 */
+ uint32 tracecontrol; /* 0xc */
+ uint32 gpioselect; /* 0x10 */
+ uint32 gpioenable; /* 0x14 */
+ uint32 PAD[114];
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 workaround; /* 0x1e4 */
+ uint32 powerctl; /* 0x1e8 */
+ uint32 powerctl2; /* 0x1ec */
+} ca7regs_t;
+#define ARM_CA7_REG(regs, reg) (&((ca7regs_t *)regs)->reg)
+
+#if defined(__ARM_ARCH_7M__)
+#define ARMREG(regs, reg) ARM_CM3_REG(regs, reg)
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__)
+#define ARMREG(regs, reg) ARM_CR4_REG(regs, reg)
+#endif /* __ARM_ARCH_7R__ */
+
+#if defined(__ARM_ARCH_7A__)
+#define ARMREG(regs, reg) ARM_CA7_REG(regs, reg)
+#endif /* __ARM_ARCH_7A__ */
+
+/* MPU is present mask of ca7regs_t->corecapabilities */
+#define CAP_MPU_MASK 2000000u
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+#endif /* _sbhndarm_h_ */
diff --git a/bcmdhd.101.10.361.x/include/sbhnddma.h b/bcmdhd.101.10.361.x/include/sbhnddma.h
new file mode 100755
index 0000000..bf7f3ba
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbhnddma.h
@@ -0,0 +1,481 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _sbhnddma_h_
+#define _sbhnddma_h_
+
+/* DMA structure:
+ * support two DMA engines: 32 bits address or 64 bit addressing
+ * basic DMA register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+
+/* 32 bits addressing */
+
+/** dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+ uint32 control; /**< enable, et al */
+ uint32 addr; /**< descriptor ring base address (4K aligned) */
+ uint32 ptr; /**< last descriptor posted to chip */
+ uint32 status; /**< current active descriptor, et al */
+} dma32regs_t;
+
+typedef volatile struct {
+ dma32regs_t xmt; /**< dma tx channel */
+ dma32regs_t rcv; /**< dma rx channel */
+} dma32regp_t;
+
+typedef volatile struct { /* diag access */
+ uint32 fifoaddr; /**< diag address */
+ uint32 fifodatalow; /**< low 32bits of data */
+ uint32 fifodatahigh; /**< high 32bits of data */
+ uint32 pad; /**< reserved */
+} dma32diag_t;
+
+/**
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+ uint32 ctrl; /**< misc control bits & bufcount */
+ uint32 addr; /**< data buffer address */
+} dma32dd_t;
+
+/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */
+#define D32RINGALIGN_BITS 12
+#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
+#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
+
+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
+
+/* transmit channel control */
+#define XC_XE ((uint32)1 << 0) /**< transmit enable */
+#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */
+#define XC_LE ((uint32)1 << 2) /**< loopback enable */
+#define XC_FL ((uint32)1 << 4) /**< flush request */
+#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */
+#define XC_MR_SHIFT 6
+#define XC_PD ((uint32)1 << 11) /**< parity check disable */
+#define XC_AE ((uint32)3 << 16) /**< address extension bits */
+#define XC_AE_SHIFT 16
+#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */
+#define XC_BL_SHIFT 18
+#define XC_PC_MASK 0x00E00000 /**< Prefetch control */
+#define XC_PC_SHIFT 21
+#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */
+#define XC_PT_SHIFT 24
+
+/** Multiple outstanding reads */
+#define DMA_MR_1 0
+#define DMA_MR_2 1
+#define DMA_MR_4 2
+#define DMA_MR_8 3
+#define DMA_MR_12 4
+#define DMA_MR_16 5
+#define DMA_MR_20 6
+#define DMA_MR_32 7
+
+/** DMA Burst Length in bytes */
+#define DMA_BL_16 0
+#define DMA_BL_32 1
+#define DMA_BL_64 2
+#define DMA_BL_128 3
+#define DMA_BL_256 4
+#define DMA_BL_512 5
+#define DMA_BL_1024 6
+#define DMA_BL_INVALID 0xFF
+
+/** Prefetch control */
+#define DMA_PC_0 0
+#define DMA_PC_4 1
+#define DMA_PC_8 2
+#define DMA_PC_16 3
+#define DMA_PC_32 4
+/* others: reserved */
+
+/** Prefetch threshold */
+#define DMA_PT_1 0
+#define DMA_PT_2 1
+#define DMA_PT_4 2
+#define DMA_PT_8 3
+
+/** Channel Switch */
+#define DMA_CS_OFF 0
+#define DMA_CS_ON 1
+
+/* transmit descriptor table pointer */
+#define XP_LD_MASK 0xfff /**< last valid descriptor */
+
+/* transmit channel status */
+#define XS_CD_MASK 0x0fff /**< current descriptor pointer */
+#define XS_XS_MASK 0xf000 /**< transmit state */
+#define XS_XS_SHIFT 12
+#define XS_XS_DISABLED 0x0000 /**< disabled */
+#define XS_XS_ACTIVE 0x1000 /**< active */
+#define XS_XS_IDLE 0x2000 /**< idle wait */
+#define XS_XS_STOPPED 0x3000 /**< stopped */
+#define XS_XS_SUSP 0x4000 /**< suspend pending */
+#define XS_XE_MASK 0xf0000 /**< transmit errors */
+#define XS_XE_SHIFT 16
+#define XS_XE_NOERR 0x00000 /**< no error */
+#define XS_XE_DPE 0x10000 /**< descriptor protocol error */
+#define XS_XE_DFU 0x20000 /**< data fifo underrun */
+#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */
+#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */
+#define XS_AD_MASK 0xfff00000 /**< active descriptor */
+#define XS_AD_SHIFT 20
+
+/* receive channel control */
+#define RC_RE ((uint32)1 << 0) /**< receive enable */
+#define RC_RO_MASK 0xfe /**< receive frame offset */
+#define RC_RO_SHIFT 1
+#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */
+#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */
+#define RC_OC ((uint32)1 << 10) /**< overflow continue */
+#define RC_PD ((uint32)1 << 11) /**< parity check disable */
+#define RC_AE ((uint32)3 << 16) /**< address extension bits */
+#define RC_AE_SHIFT 16
+#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */
+#define RC_BL_SHIFT 18
+#define RC_PC_MASK 0x00E00000 /**< Prefetch control */
+#define RC_PC_SHIFT 21
+#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */
+#define RC_PT_SHIFT 24
+#define RC_WAITCMP_MASK 0x00001000
+#define RC_WAITCMP_SHIFT 12
+/* receive descriptor table pointer */
+#define RP_LD_MASK 0xfff /**< last valid descriptor */
+
+/* receive channel status */
+#define RS_CD_MASK 0x0fff /**< current descriptor pointer */
+#define RS_RS_MASK 0xf000 /**< receive state */
+#define RS_RS_SHIFT 12
+#define RS_RS_DISABLED 0x0000 /**< disabled */
+#define RS_RS_ACTIVE 0x1000 /**< active */
+#define RS_RS_IDLE 0x2000 /**< idle wait */
+#define RS_RS_STOPPED 0x3000 /**< reserved */
+#define RS_RE_MASK 0xf0000 /**< receive errors */
+#define RS_RE_SHIFT 16
+#define RS_RE_NOERR 0x00000 /**< no error */
+#define RS_RE_DPE 0x10000 /**< descriptor protocol error */
+#define RS_RE_DFO 0x20000 /**< data fifo overflow */
+#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */
+#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */
+#define RS_AD_MASK 0xfff00000 /**< active descriptor */
+#define RS_AD_SHIFT 20
+
+/* fifoaddr */
+#define FA_OFF_MASK 0xffff /**< offset */
+#define FA_SEL_MASK 0xf0000 /**< select */
+#define FA_SEL_SHIFT 16
+#define FA_SEL_XDD 0x00000 /**< transmit dma data */
+#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */
+#define FA_SEL_RDD 0x40000 /**< receive dma data */
+#define FA_SEL_RDP 0x50000 /**< receive dma pointers */
+#define FA_SEL_XFD 0x80000 /**< transmit fifo data */
+#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */
+#define FA_SEL_RFD 0xc0000 /**< receive fifo data */
+#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */
+#define FA_SEL_RSD 0xe0000 /**< receive frame status data */
+#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */
+
+/* descriptor control flags */
+#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */
+#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */
+#define CTRL_AE_SHIFT 16
+#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */
+#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */
+#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */
+#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */
+#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */
+
+/** control flags in the range [27:20] are core-specific and not defined here */
+#define CTRL_CORE_MASK 0x0ff00000
+
+/* 64 bits addressing */
+
+/** dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+ uint32 control; /**< enable, et al */
+ uint32 ptr; /**< last descriptor posted to chip */
+ uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */
+ uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */
+ uint32 status0; /**< current descriptor, xmt state */
+ uint32 status1; /**< active descriptor, xmt error */
+} dma64regs_t;
+
+typedef volatile struct {
+ dma64regs_t tx; /**< dma64 tx channel */
+ dma64regs_t rx; /**< dma64 rx channel */
+} dma64regp_t;
+
+typedef volatile struct { /**< diag access */
+ uint32 fifoaddr; /**< diag address */
+ uint32 fifodatalow; /**< low 32bits of data */
+ uint32 fifodatahigh; /**< high 32bits of data */
+ uint32 pad; /**< reserved */
+} dma64diag_t;
+
+/**
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+ uint32 ctrl1; /**< misc control bits */
+ uint32 ctrl2; /**< buffer count and address extension */
+ uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */
+ uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */
+} dma64dd_t;
+
+/**
+ * Pool implementation: each pool is 64KB max. Align it to maximize ability to grow
+ */
+#define D64POOLALIGN_BITS 15u
+#define D64POOLALIGN_BITS_MAX 16u
+/**
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS)
+
+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
+
+/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
+#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t))
+
+/**
+ * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+ * 64K boundary
+ */
+#define D64RINGBOUNDARY_LARGE (1 << 16)
+
+/*
+ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
+ * When this field contains the value N, the burst length is 2**(N + 4) bytes.
+ */
+#define D64_DEF_USBBURSTLEN 2
+#define D64_DEF_SDIOBURSTLEN 1
+
+#ifndef D64_USBBURSTLEN
+#define D64_USBBURSTLEN DMA_BL_64
+#endif
+#ifndef D64_SDIOBURSTLEN
+#define D64_SDIOBURSTLEN DMA_BL_32
+#endif
+
+/* transmit channel control */
+#define D64_XC_XE 0x00000001 /**< transmit enable */
+#define D64_XC_SE 0x00000002 /**< transmit suspend request */
+#define D64_XC_LE 0x00000004 /**< loopback enable */
+#define D64_XC_FL 0x00000010 /**< flush request */
+#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */
+#define D64_XC_MR_SHIFT 6
+#define D64_XC_CS_SHIFT 9 /**< channel switch enable */
+#define D64_XC_CS_MASK 0x00000200 /**< channel switch enable */
+#define D64_XC_PD 0x00000800 /**< parity check disable */
+#define D64_XC_AE 0x00030000 /**< address extension bits */
+#define D64_XC_AE_SHIFT 16
+#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */
+#define D64_XC_BL_SHIFT 18
+#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */
+#define D64_XC_PC_SHIFT 21
+#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */
+#define D64_XC_PT_SHIFT 24
+#define D64_XC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */
+#define D64_XC_CO_SHIFT 26
+
+/* transmit descriptor table pointer */
+#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */
+
+/* transmit channel status */
+#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */
+#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */
+#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */
+#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */
+#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */
+#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */
+
+#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */
+#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000 /**< no error */
+#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */
+#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */
+#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */
+#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */
+#define D64_XS1_XE_COREE 0x50000000 /**< core error */
+
+/* receive channel control */
+#define D64_RC_RE 0x00000001 /**< receive enable */
+#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */
+#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */
+#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */
+#define D64_RC_OC 0x00000400 /**< overflow continue */
+#define D64_RC_PD 0x00000800 /**< parity check disable */
+#define D64_RC_WAITCMP_MASK 0x00001000
+#define D64_RC_WAITCMP_SHIFT 12
+#define D64_RC_SA 0x00002000 /**< select active */
+#define D64_RC_GE 0x00004000 /**< Glom enable */
+#define D64_RC_AE 0x00030000 /**< address extension bits */
+#define D64_RC_AE_SHIFT 16
+#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */
+#define D64_RC_BL_SHIFT 18
+#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */
+#define D64_RC_PC_SHIFT 21
+#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */
+#define D64_RC_PT_SHIFT 24
+#define D64_RC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */
+#define D64_RC_CO_SHIFT 26
+#define D64_RC_ROEXT_MASK 0x08000000 /**< receive frame offset extension bit */
+#define D64_RC_ROEXT_SHIFT 27
+#define D64_RC_MOW_SHIFT (28u) /**< multiple outstanding write */
+#define D64_RC_MOW_MASK ((0x3u) << D64_RC_MOW_SHIFT)
+
+/* receive control values */
+/* RcvCtrl.MultipleOutstandingWrites(MOW) valid values(N) listed below.
+ * (N + 1) out standing write(s) supported
+ */
+#define D64_RC_MOW_1 (0u) /**< 1 outstanding write */
+#define D64_RC_MOW_2 (1u) /**< 2 outstanding writes */
+#define D64_RC_MOW_3 (2u) /**< 3 outstanding writes */
+#define D64_RC_MOW_4 (3u) /**< 4 outstanding writes */
+
+/* flags for dma controller */
+#define DMA_CTRL_PEN (1u << 0u) /**< partity enable */
+#define DMA_CTRL_ROC (1u << 1u) /**< rx overflow continue */
+#define DMA_CTRL_RXMULTI (1u << 2u) /**< allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED (1u << 3u) /**< Unframed Rx/Tx data */
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1u << 4u) /**< USB core REV9's SETUP dma channel's
+ * buffer can not crossed 4K boundary PR80468
+ */
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1u << 5u) /**< DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE (1u << 6u) /**< always single buffer */
+#define DMA_CTRL_SDIO_RXGLOM (1u << 7u) /**< DMA Rx glome is enabled */
+#define DMA_CTRL_DESC_ONLY_FLAG (1u << 8u) /**< For DMA which posts only descriptors,
+ * no packets
+ */
+#define DMA_CTRL_DESC_CD_WAR (1u << 9u) /**< WAR for descriptor only DMA's CD not being
+ * updated correctly by HW in CT mode.
+ */
+#define DMA_CTRL_CS (1u << 10u) /* channel switch enable */
+#define DMA_CTRL_ROEXT (1u << 11u) /* receive frame offset extension support */
+#define DMA_CTRL_RX_ALIGN_8BYTE (1u << 12u) /* RXDMA address 8-byte aligned */
+#define DMA_CTRL_SHARED_POOL (1u << 15u) /** shared descriptor pool */
+#define DMA_CTRL_COREUNIT_SHIFT (17u) /* Core unit shift */
+#define DMA_CTRL_COREUNIT_MASK (0x3u << 17u) /* Core unit mask */
+
+#define DMA_CTRL_SET_COREUNIT(di, coreunit) \
+ ((di)->hnddma.dmactrlflags |= \
+ (((coreunit) << DMA_CTRL_COREUNIT_SHIFT) & DMA_CTRL_COREUNIT_MASK))
+
+#define DMA_CTRL_GET_COREUNIT(di) \
+ (((di)->hnddma.dmactrlflags & DMA_CTRL_COREUNIT_MASK) >> DMA_CTRL_COREUNIT_SHIFT)
+
+/* receive descriptor table pointer */
+#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */
+
+/* receive channel status */
+#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */
+#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */
+#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */
+#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */
+#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */
+#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */
+
+#define D64_RS1_AD_MASK (di->d64_rs1_ad_mask) /* active descriptor pointer */
+#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000 /**< no error */
+#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */
+#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */
+#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */
+#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */
+#define D64_RS1_RE_COREE 0x50000000 /**< core error */
+
+/* fifoaddr */
+#define D64_FA_OFF_MASK 0xffff /**< offset */
+#define D64_FA_SEL_MASK 0xf0000 /**< select */
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */
+#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */
+#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */
+#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */
+#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */
+#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */
+#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */
+#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */
+#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */
+#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */
+
+/**< bzero operation for receive channels or a compare-to-zero operation for transmit engines */
+#define D64_CTRL1_BIT_BZEROBCMP (15u)
+/* WAR for JIRA CRWLDMA-245 */
+#define D64_DMA_COREFLAGS_WAR_BIT (25u)
+
+#define D64_CTRL1_COHERENT ((uint32)1 << 17) /**< cache coherent per transaction */
+#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */
+#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */
+#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */
+#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */
+#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */
+#define D64_CTRL1_SOFPTR 0x0000FFFFu
+#define D64_CTRL1_NUMD_MASK 0x00F00000u
+#define D64_CTRL1_NUMD_SHIFT 20u
+
+/* descriptor control flags 2 */
+#define D64_CTRL2_MAX_LEN 0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */
+#define D64_CTRL2_BC_MASK 0x0000ffff /**< mask for buffer byte count */
+#define D64_CTRL2_AE 0x00030000 /**< address extension bits */
+#define D64_CTRL2_AE_SHIFT 16
+#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
+
+/** control flags in the range [27:20] are core-specific and not defined here */
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */
+#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DSCRCNT_SHIFT 24 /* Shift for no .of dma descriptor field */
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */
+
+#define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \
+(((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len))
+
+/** receive frame status */
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} dma_rxh_t;
+
+#endif /* _sbhnddma_h_ */
diff --git a/bcmdhd.101.10.361.x/include/sbhndpio.h b/bcmdhd.101.10.361.x/include/sbhndpio.h
new file mode 100755
index 0000000..f4038f3
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbhndpio.h
@@ -0,0 +1,60 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) PIO engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _sbhndpio_h_
+#define _sbhndpio_h_
+
+/* PIO structure,
+ * support two PIO format: 2 bytes access and 4 bytes access
+ * basic FIFO register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+
+/* 2byte-wide pio register set per channel(xmt or rcv) */
+typedef volatile struct {
+ uint16 fifocontrol;
+ uint16 fifodata;
+ uint16 fifofree; /* only valid in xmt channel, not in rcv channel */
+ uint16 PAD;
+} pio2regs_t;
+
+/* a pair of pio channels(tx and rx) */
+typedef volatile struct {
+ pio2regs_t tx;
+ pio2regs_t rx;
+} pio2regp_t;
+
+/* 4byte-wide pio register set per channel(xmt or rcv) */
+typedef volatile struct {
+ uint32 fifocontrol;
+ uint32 fifodata;
+} pio4regs_t;
+
+/* a pair of pio channels(tx and rx) */
+typedef volatile struct {
+ pio4regs_t tx;
+ pio4regs_t rx;
+} pio4regp_t;
+
+#endif /* _sbhndpio_h_ */
diff --git a/bcmdhd.101.10.361.x/include/sbpcmcia.h b/bcmdhd.101.10.361.x/include/sbpcmcia.h
new file mode 100755
index 0000000..77f65f4
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbpcmcia.h
@@ -0,0 +1,415 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBPCMCIA_H
+#define _SBPCMCIA_H
+
+/* All the addresses that are offsets in attribute space are divided
+ * by two to account for the fact that odd bytes are invalid in
+ * attribute space and our read/write routines make the space appear
+ * as if they didn't exist. Still we want to show the original numbers
+ * as documented in the hnd_pcmcia core manual.
+ */
+
+/* PCMCIA Function Configuration Registers */
+#define PCMCIA_FCR (0x700 / 2)
+
+#define FCR0_OFF 0
+#define FCR1_OFF (0x40 / 2)
+#define FCR2_OFF (0x80 / 2)
+#define FCR3_OFF (0xc0 / 2)
+
+#define PCMCIA_FCR0 (0x700 / 2)
+#define PCMCIA_FCR1 (0x740 / 2)
+#define PCMCIA_FCR2 (0x780 / 2)
+#define PCMCIA_FCR3 (0x7c0 / 2)
+
+/* Standard PCMCIA FCR registers */
+
+#define PCMCIA_COR 0
+
+#define COR_RST 0x80
+#define COR_LEV 0x40
+#define COR_IRQEN 0x04
+#define COR_BLREN 0x01
+#define COR_FUNEN 0x01
+
+#define PCICIA_FCSR (2 / 2)
+#define PCICIA_PRR (4 / 2)
+#define PCICIA_SCR (6 / 2)
+#define PCICIA_ESR (8 / 2)
+
+#define PCM_MEMOFF 0x0000
+#define F0_MEMOFF 0x1000
+#define F1_MEMOFF 0x2000
+#define F2_MEMOFF 0x3000
+#define F3_MEMOFF 0x4000
+
+/* Memory base in the function fcr's */
+#define MEM_ADDR0 (0x728 / 2)
+#define MEM_ADDR1 (0x72a / 2)
+#define MEM_ADDR2 (0x72c / 2)
+
+/* PCMCIA base plus Srom access in fcr0: */
+#define PCMCIA_ADDR0 (0x072e / 2)
+#define PCMCIA_ADDR1 (0x0730 / 2)
+#define PCMCIA_ADDR2 (0x0732 / 2)
+
+#define MEM_SEG (0x0734 / 2)
+#define SROM_CS (0x0736 / 2)
+#define SROM_DATAL (0x0738 / 2)
+#define SROM_DATAH (0x073a / 2)
+#define SROM_ADDRL (0x073c / 2)
+#define SROM_ADDRH (0x073e / 2)
+#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */
+#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */
+
+/* Values for srom_cs: */
+#define SROM_IDLE 0
+#define SROM_WRITE 1
+#define SROM_READ 2
+#define SROM_WEN 4
+#define SROM_WDS 7
+#define SROM_DONE 8
+
+/* Fields in srom_info: */
+#define SRI_SZ_MASK 0x03
+#define SRI_BLANK 0x04
+#define SRI_OTP 0x80
+
+#define SROM16K_BANK_SEL_MASK (3 << 11)
+#define SROM16K_BANK_SHFT_MASK 11
+#define SROM16K_ADDR_SEL_MASK ((1 << SROM16K_BANK_SHFT_MASK) - 1)
+#define SROM_PRSNT_MASK 0x1
+#define SROM_SUPPORT_SHIFT_MASK 30
+#define SROM_SUPPORTED (0x1 << SROM_SUPPORT_SHIFT_MASK)
+#define SROM_SIZE_MASK 0x00000006
+#define SROM_SIZE_2K 2
+#define SROM_SIZE_512 1
+#define SROM_SIZE_128 0
+#define SROM_SIZE_SHFT_MASK 1
+
+/* CIS stuff */
+
+/* The CIS stops where the FCRs start */
+#define CIS_SIZE PCMCIA_FCR
+#define CIS_SIZE_12K 1154 /* Maximum h/w + s/w sub region size for 12k OTP */
+
+/* CIS tuple length field max */
+#define CIS_TUPLE_LEN_MAX 0xff
+
+/* Standard tuples we know about */
+
+#define CISTPL_NULL 0x00
+#define CISTPL_END 0xff /* End of the CIS tuple chain */
+
+#define CISTPL_VERS_1 0x15 /* CIS ver, manf, dev & ver strings */
+#define CISTPL_MANFID 0x20 /* Manufacturer and device id */
+#define CISTPL_FUNCID 0x21 /* Function identification */
+#define CISTPL_FUNCE 0x22 /* Function extensions */
+#define CISTPL_CFTABLE 0x1b /* Config table entry */
+
+/* Function identifier provides context for the function extentions tuple */
+#define CISTPL_FID_SDIO 0x0c /* Extensions defined by SDIO spec */
+
+/* Function extensions for LANs (assumed for extensions other than SDIO) */
+#define LAN_TECH 1 /* Technology type */
+#define LAN_SPEED 2 /* Raw bit rate */
+#define LAN_MEDIA 3 /* Transmission media */
+#define LAN_NID 4 /* Node identification (aka MAC addr) */
+#define LAN_CONN 5 /* Connector standard */
+
+/* CFTable */
+#define CFTABLE_REGWIN_2K 0x08 /* 2k reg windows size */
+#define CFTABLE_REGWIN_4K 0x10 /* 4k reg windows size */
+#define CFTABLE_REGWIN_8K 0x20 /* 8k reg windows size */
+
+/* Vendor unique tuples are 0x80-0x8f. Within Broadcom we'll
+ * take one for HNBU, and use "extensions" (a la FUNCE) within it.
+ */
+
+#define CISTPL_BRCM_HNBU 0x80
+
+/* Subtypes of BRCM_HNBU: */
+
+#define HNBU_SROMREV 0x00 /* A byte with sromrev, 1 if not present */
+#define HNBU_CHIPID 0x01 /* Two 16bit values: PCI vendor & device id */
+
+#define HNBU_BOARDREV 0x02 /* One byte board revision */
+
+#define HNBU_PAPARMS 0x03 /* PA parameters: 8 (sromrev == 1)
+ * or 9 (sromrev > 1) bytes
+ */
+#define HNBU_OEM 0x04 /* Eight bytes OEM data (sromrev == 1) */
+#define HNBU_CC 0x05 /* Default country code (sromrev == 1) */
+#define HNBU_AA 0x06 /* Antennas available */
+#define HNBU_AG 0x07 /* Antenna gain */
+#define HNBU_BOARDFLAGS 0x08 /* board flags (2 or 4 bytes) */
+#define HNBU_UNUSED 0x09 /* UNUSED (was LEDs) */
+#define HNBU_CCODE 0x0a /* Country code (2 bytes ascii + 1 byte cctl)
+ * in rev 2
+ */
+#define HNBU_CCKPO 0x0b /* 2 byte cck power offsets in rev 3 */
+#define HNBU_OFDMPO 0x0c /* 4 byte 11g ofdm power offsets in rev 3 */
+#define HNBU_GPIOTIMER 0x0d /* 2 bytes with on/off values in rev 3 */
+#define HNBU_PAPARMS5G 0x0e /* 5G PA params */
+#define HNBU_ANT5G 0x0f /* 4328 5G antennas available/gain */
+#define HNBU_RDLID 0x10 /* 2 byte USB remote downloader (RDL) product Id */
+#define HNBU_RSSISMBXA2G 0x11 /* 4328 2G RSSI mid pt sel & board switch arch,
+ * 2 bytes, rev 3.
+ */
+#define HNBU_RSSISMBXA5G 0x12 /* 4328 5G RSSI mid pt sel & board switch arch,
+ * 2 bytes, rev 3.
+ */
+#define HNBU_XTALFREQ 0x13 /* 4 byte Crystal frequency in kilohertz */
+#define HNBU_TRI2G 0x14 /* 4328 2G TR isolation, 1 byte */
+#define HNBU_TRI5G 0x15 /* 4328 5G TR isolation, 3 bytes */
+#define HNBU_RXPO2G 0x16 /* 4328 2G RX power offset, 1 byte */
+#define HNBU_RXPO5G 0x17 /* 4328 5G RX power offset, 1 byte */
+#define HNBU_BOARDNUM 0x18 /* board serial number, independent of mac addr */
+#define HNBU_MACADDR 0x19 /* mac addr override for the standard CIS LAN_NID */
+#define HNBU_RDLSN 0x1a /* 2 bytes; serial # advertised in USB descriptor */
+
+#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */
+
+#define HNBU_UNUSED2 0x1c /* was LEDs duty cycle */
+
+#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */
+
+#define HNBU_PAPARMS_SSLPNPHY 0x1e /* SSLPNPHY PA params */
+#define HNBU_RSSISMBXA2G_SSLPNPHY 0x1f /* SSLPNPHY RSSI mid pt sel & board switch arch */
+#define HNBU_RDLRNDIS 0x20 /* 1 byte; 1 = RDL advertises RNDIS config */
+#define HNBU_CHAINSWITCH 0x21 /* 2 byte; txchain, rxchain */
+#define HNBU_REGREV 0x22 /* 1 byte; */
+#define HNBU_FEM 0x23 /* 2 or 4 byte: 11n frontend specification */
+#define HNBU_PAPARMS_C0 0x24 /* 8 or 30 bytes: 11n pa paramater for chain 0 */
+#define HNBU_PAPARMS_C1 0x25 /* 8 or 30 bytes: 11n pa paramater for chain 1 */
+#define HNBU_PAPARMS_C2 0x26 /* 8 or 30 bytes: 11n pa paramater for chain 2 */
+#define HNBU_PAPARMS_C3 0x27 /* 8 or 30 bytes: 11n pa paramater for chain 3 */
+#define HNBU_PO_CCKOFDM 0x28 /* 6 or 18 bytes: cck2g/ofdm2g/ofdm5g power offset */
+#define HNBU_PO_MCS2G 0x29 /* 8 bytes: mcs2g power offset */
+#define HNBU_PO_MCS5GM 0x2a /* 8 bytes: mcs5g mid band power offset */
+#define HNBU_PO_MCS5GLH 0x2b /* 16 bytes: mcs5g low-high band power offset */
+#define HNBU_PO_CDD 0x2c /* 2 bytes: cdd2g/5g power offset */
+#define HNBU_PO_STBC 0x2d /* 2 bytes: stbc2g/5g power offset */
+#define HNBU_PO_40M 0x2e /* 2 bytes: 40Mhz channel 2g/5g power offset */
+#define HNBU_PO_40MDUP 0x2f /* 2 bytes: 40Mhz channel dup 2g/5g power offset */
+
+#define HNBU_RDLRWU 0x30 /* 1 byte; 1 = RDL advertises Remote Wake-up */
+#define HNBU_WPS 0x31 /* 1 byte; GPIO pin for WPS button */
+#define HNBU_USBFS 0x32 /* 1 byte; 1 = USB advertises FS mode only */
+#define HNBU_BRMIN 0x33 /* 4 byte bootloader min resource mask */
+#define HNBU_BRMAX 0x34 /* 4 byte bootloader max resource mask */
+#define HNBU_PATCH 0x35 /* bootloader patch addr(2b) & data(4b) pair */
+#define HNBU_CCKFILTTYPE 0x36 /* CCK digital filter selection options */
+#define HNBU_OFDMPO5G 0x37 /* 4 * 3 = 12 byte 11a ofdm power offsets in rev 3 */
+#define HNBU_ELNA2G 0x38
+#define HNBU_ELNA5G 0x39
+#define HNBU_TEMPTHRESH 0x3A /* 2 bytes
+ * byte1 tempthresh
+ * byte2 period(msb 4 bits) | hysterisis(lsb 4 bits)
+ */
+#define HNBU_UUID 0x3B /* 16 Bytes Hex */
+
+#define HNBU_USBEPNUM 0x40 /* USB endpoint numbers */
+
+/* POWER PER RATE for SROM V9 */
+#define HNBU_CCKBW202GPO 0x41 /* 2 bytes each
+ * CCK Power offsets for 20 MHz rates (11, 5.5, 2, 1Mbps)
+ * cckbw202gpo cckbw20ul2gpo
+ */
+
+#define HNBU_LEGOFDMBW202GPO 0x42 /* 4 bytes each
+ * OFDM power offsets for 20 MHz Legacy rates
+ * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
+ * legofdmbw202gpo legofdmbw20ul2gpo
+ */
+
+#define HNBU_LEGOFDMBW205GPO 0x43 /* 4 bytes each
+ * 5G band: OFDM power offsets for 20 MHz Legacy rates
+ * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
+ * low subband : legofdmbw205glpo legofdmbw20ul2glpo
+ * mid subband :legofdmbw205gmpo legofdmbw20ul2gmpo
+ * high subband :legofdmbw205ghpo legofdmbw20ul2ghpo
+ */
+
+#define HNBU_MCS2GPO 0x44 /* 4 bytes each
+ * mcs 0-7 power-offset. LSB nibble: m0, MSB nibble: m7
+ * mcsbw202gpo mcsbw20ul2gpo mcsbw402gpo
+ */
+#define HNBU_MCS5GLPO 0x45 /* 4 bytes each
+ * 5G low subband mcs 0-7 power-offset.
+ * LSB nibble: m0, MSB nibble: m7
+ * mcsbw205glpo mcsbw20ul5glpo mcsbw405glpo
+ */
+#define HNBU_MCS5GMPO 0x46 /* 4 bytes each
+ * 5G mid subband mcs 0-7 power-offset.
+ * LSB nibble: m0, MSB nibble: m7
+ * mcsbw205gmpo mcsbw20ul5gmpo mcsbw405gmpo
+ */
+#define HNBU_MCS5GHPO 0x47 /* 4 bytes each
+ * 5G high subband mcs 0-7 power-offset.
+ * LSB nibble: m0, MSB nibble: m7
+ * mcsbw205ghpo mcsbw20ul5ghpo mcsbw405ghpo
+ */
+#define HNBU_MCS32PO 0x48 /* 2 bytes total
+ * mcs-32 power offset for each band/subband.
+ * LSB nibble: 2G band, MSB nibble:
+ * mcs322ghpo, mcs325gmpo, mcs325glpo, mcs322gpo
+ */
+#define HNBU_LEG40DUPPO 0x49 /* 2 bytes total
+ * Additional power offset for Legacy Dup40 transmissions.
+ * Applied in addition to legofdmbw20ulXpo, X=2g, 5gl, 5gm, or 5gh.
+ * LSB nibble: 2G band, MSB nibble: 5G band high subband.
+ * leg40dup5ghpo, leg40dup5gmpo, leg40dup5glpo, leg40dup2gpo
+ */
+
+#define HNBU_PMUREGS 0x4a /* Variable length (5 bytes for each register)
+ * The setting of the ChipCtrl, PLL, RegulatorCtrl, Up/Down Timer and
+ * ResourceDependency Table registers.
+ */
+
+#define HNBU_PATCH2 0x4b /* bootloader TCAM patch addr(4b) & data(4b) pair .
+ * This is required for socram rev 15 onwards.
+ */
+
+#define HNBU_USBRDY 0x4c /* Variable length (upto 5 bytes)
+ * This is to indicate the USB/HSIC host controller
+ * that the device is ready for enumeration.
+ */
+
+#define HNBU_USBREGS 0x4d /* Variable length
+ * The setting of the devcontrol, HSICPhyCtrl1 and HSICPhyCtrl2
+ * registers during the USB initialization.
+ */
+
+#define HNBU_BLDR_TIMEOUT 0x4e /* 2 bytes used for HSIC bootloader to reset chip
+ * on connect timeout.
+ * The Delay after USBConnect for timeout till dongle receives
+ * get_descriptor request.
+ */
+#define HNBU_USBFLAGS 0x4f
+#define HNBU_PATCH_AUTOINC 0x50
+#define HNBU_MDIO_REGLIST 0x51
+#define HNBU_MDIOEX_REGLIST 0x52
+/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+#define HNBU_UMANFID 0x53
+#define HNBU_PUBKEY 0x54 /* 128 byte; publick key to validate downloaded FW */
+#define HNBU_WOWLGPIO 0x55 /* 1 byte bit 7 initial polarity, bit 6..0 gpio pin */
+#define HNBU_MUXENAB 0x56 /* 1 byte to enable mux options */
+#define HNBU_GCI_CCR 0x57 /* GCI Chip control register */
+
+#define HNBU_FEM_CFG 0x58 /* FEM config */
+#define HNBU_ACPA_C0 0x59 /* ACPHY PA parameters: chain 0 */
+#define HNBU_ACPA_C1 0x5a /* ACPHY PA parameters: chain 1 */
+#define HNBU_ACPA_C2 0x5b /* ACPHY PA parameters: chain 2 */
+#define HNBU_MEAS_PWR 0x5c
+#define HNBU_PDOFF 0x5d
+#define HNBU_ACPPR_2GPO 0x5e /* ACPHY Power-per-rate 2gpo */
+#define HNBU_ACPPR_5GPO 0x5f /* ACPHY Power-per-rate 5gpo */
+#define HNBU_ACPPR_SBPO 0x60 /* ACPHY Power-per-rate sbpo */
+#define HNBU_NOISELVL 0x61
+#define HNBU_RXGAIN_ERR 0x62
+#define HNBU_AGBGA 0x63
+#define HNBU_USBDESC_COMPOSITE 0x64 /* USB WLAN/BT composite descriptor */
+#define HNBU_PATCH_AUTOINC8 0x65 /* Auto increment patch entry for 8 byte patching */
+#define HNBU_PATCH8 0x66 /* Patch entry for 8 byte patching */
+#define HNBU_ACRXGAINS_C0 0x67 /* ACPHY rxgains: chain 0 */
+#define HNBU_ACRXGAINS_C1 0x68 /* ACPHY rxgains: chain 1 */
+#define HNBU_ACRXGAINS_C2 0x69 /* ACPHY rxgains: chain 2 */
+#define HNBU_TXDUTY 0x6a /* Tx duty cycle for ACPHY 5g 40/80 Mhz */
+#define HNBU_USBUTMI_CTL 0x6b /* 2 byte USB UTMI/LDO Control */
+#define HNBU_PDOFF_2G 0x6c
+#define HNBU_USBSSPHY_UTMI_CTL0 0x6d /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_UTMI_CTL1 0x6e /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_UTMI_CTL2 0x6f /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_SLEEP0 0x70 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP1 0x71 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP2 0x72 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP3 0x73 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_MDIO 0x74 /* USB SSPHY INIT regs setting */
+#define HNBU_USB30PHY_NOSS 0x75 /* USB30 NO Super Speed */
+#define HNBU_USB30PHY_U1U2 0x76 /* USB30 PHY U1U2 Enable */
+#define HNBU_USB30PHY_REGS 0x77 /* USB30 PHY REGs update */
+#define HNBU_GPIO_PULL_DOWN 0x78 /* 4 byte GPIO pull down mask */
+
+#define HNBU_SROM3SWRGN 0x80 /* 78 bytes; srom rev 3 s/w region without crc8
+ * plus extra info appended.
+ */
+#define HNBU_RESERVED 0x81
+#define HNBU_CUSTOM1 0x82 /* 4 byte; For non-BRCM post-mfg additions */
+#define HNBU_CUSTOM2 0x83 /* Reserved; For non-BRCM post-mfg additions */
+#define HNBU_ACPAPARAM 0x84 /* ACPHY PAPARAM */
+#define HNBU_ACPA_CCK_C0 0x86 /* ACPHY PA trimming parameters: CCK */
+#define HNBU_ACPA_40 0x87 /* ACPHY PA trimming parameters: 40 */
+#define HNBU_ACPA_80 0x88 /* ACPHY PA trimming parameters: 80 */
+#define HNBU_ACPA_4080 0x89 /* ACPHY PA trimming parameters: 40/80 */
+#define HNBU_SUBBAND5GVER 0x8a /* subband5gver */
+#define HNBU_PAPARAMBWVER 0x8b /* paparambwver */
+
+#define HNBU_MCS5Gx1PO 0x8c
+#define HNBU_ACPPR_SB8080_PO 0x8d
+#define HNBU_TXBFRPCALS 0x8f /* phy txbf rpcalvars */
+#define HNBU_MACADDR2 0x90 /* (optional) 2nd mac-addr for RSDB chips */
+
+#define HNBU_ACPA_4X4C0 0x91
+#define HNBU_ACPA_4X4C1 0x92
+#define HNBU_ACPA_4X4C2 0x93
+#define HNBU_ACPA_4X4C3 0x94
+#define HNBU_ACPA_BW20_4X4C0 0x95
+#define HNBU_ACPA_BW40_4X4C0 0x96
+#define HNBU_ACPA_BW80_4X4C0 0x97
+#define HNBU_ACPA_BW20_4X4C1 0x98
+#define HNBU_ACPA_BW40_4X4C1 0x99
+#define HNBU_ACPA_BW80_4X4C1 0x9a
+#define HNBU_ACPA_BW20_4X4C2 0x9b
+#define HNBU_ACPA_BW40_4X4C2 0x9c
+#define HNBU_ACPA_BW80_4X4C2 0x9d
+#define HNBU_ACPA_BW20_4X4C3 0x9e
+#define HNBU_ACPA_BW40_4X4C3 0x9f
+#define HNBU_ACPA_BW80_4X4C3 0xa0
+#define HNBU_ACPA_CCK_C1 0xa1 /* ACPHY PA trimming parameters: CCK */
+
+#define HNBU_GAIN_CAL_TEMP 0xa2 /* RSSI Cal temperature parameter */
+#define HNBU_RSSI_DELTA_2G_B0 0xa3 /* RSSI Cal parameter for 2G channel group 0 */
+#define HNBU_RSSI_DELTA_2G_B1 0xa4 /* RSSI Cal parameter for 2G channel group 1 */
+#define HNBU_RSSI_DELTA_2G_B2 0xa5 /* RSSI Cal parameter for 2G channel group 2 */
+#define HNBU_RSSI_DELTA_2G_B3 0xa6 /* RSSI Cal parameter for 2G channel group 3 */
+#define HNBU_RSSI_DELTA_2G_B4 0xa7 /* RSSI Cal parameter for 2G channel group 4 */
+#define HNBU_RSSI_CAL_FREQ_GRP_2G 0xa8 /* RSSI Cal parameter for channel group def. */
+#define HNBU_RSSI_DELTA_5GL 0xa9 /* RSSI Cal parameter for 5G low channel */
+#define HNBU_RSSI_DELTA_5GML 0xaa /* RSSI Cal parameter for 5G mid lower channel */
+#define HNBU_RSSI_DELTA_5GMU 0xab /* RSSI Cal parameter for 5G mid upper channel */
+#define HNBU_RSSI_DELTA_5GH 0xac /* RSSI Cal parameter for 5G high channel */
+
+#define HNBU_ACPA_6G_C0 0xad /* paparams for 6G Core0 */
+#define HNBU_ACPA_6G_C1 0xae /* paparams for 6G Core1 */
+#define HNBU_ACPA_6G_C2 0xaf /* paparams for 6G Core2 */
+
+/* sbtmstatelow */
+#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */
+#define SBTML_INT_EN 0x20000 /* enable sb interrupt */
+
+/* sbtmstatehigh */
+#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */
+#endif /* _SBPCMCIA_H */
diff --git a/bcmdhd.101.10.361.x/include/sbsdio.h b/bcmdhd.101.10.361.x/include/sbsdio.h
new file mode 100755
index 0000000..0a2c227
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbsdio.h
@@ -0,0 +1,185 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBSDIO_H
+#define _SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
+#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1
+#define SBSDIO_FUNC1_SLEEPCSR 0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE 0
+#define SBSDIO_SPROM_WRITE 1
+#define SBSDIO_SPROM_READ 2
+#define SBSDIO_SPROM_WEN 4
+#define SBSDIO_SPROM_WDS 7
+#define SBSDIO_SPROM_DONE 8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
+#define SROM_OTP 0x80 /* OTP present */
+
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
+ * to wait before sending data to host
+ */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK 0x7f
+#define SBSDIO_MESBUSYCTRL_ENAB 0x80 /* Enable busy capability for MES access */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
+ * receiving CMD53
+ */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
+ * synchronous to the sdio clock
+ */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
+ * except the chipActive (rev 8)
+ */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
+ * external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9)
+ */
+#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10 /* Enable function 2 tx for each block */
+#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Enable F2 Watermark */
+#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 0x20 /* Isolate sdio clk and cmd (non-data) */
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL 0x40
+#define SBSDIO_Rev8_ALP_AVAIL 0x80
+#define SBSDIO_CSR_MASK 0x1F
+
+/* WAR for PR 40695: determine HT/ALP regardless of actual bit order. Need to use
+ * before we know corerev. (Can drop if all supported revs have same bit order.)
+ */
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
+ (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
+#ifdef BCMSPI
+#define SBSDIO_CIS_SIZE_LIMIT 0x100 /* maximum bytes in one spi CIS */
+#else
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
+#endif /* !BCMSPI */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
+ * link bytes
+ */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from
+ * 8th byte
+ */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one
+ * data comamnd
+ */
+
+#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */
+
+#endif /* _SBSDIO_H */
diff --git a/bcmdhd.101.10.361.x/include/sbsdpcmdev.h b/bcmdhd.101.10.361.x/include/sbsdpcmdev.h
new file mode 100755
index 0000000..ced0aff
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbsdpcmdev.h
@@ -0,0 +1,307 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _sbsdpcmdev_h_
+#define _sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+typedef volatile struct {
+ dma64regs_t xmt; /* dma tx */
+ uint32 PAD[2];
+ dma64regs_t rcv; /* dma rx */
+ uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+ dma64p_t dma64regs[2];
+ dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */
+ uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+ dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */
+ uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+ dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */
+ uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+ uint32 corecontrol; /* CoreControl, 0x000, rev8 */
+ uint32 corestatus; /* CoreStatus, 0x004, rev8 */
+ uint32 PAD[1];
+ uint32 biststatus; /* BistStatus, 0x00c, rev8 */
+
+ /* PCMCIA access */
+ uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */
+ uint16 PAD[1];
+
+ /* interrupt */
+ uint32 intstatus; /* IntStatus, 0x020, rev8 */
+ uint32 hostintmask; /* IntHostMask, 0x024, rev8 */
+ uint32 intmask; /* IntSbMask, 0x028, rev8 */
+ uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */
+ uint32 sbintmask; /* SBIntMask, 0x030, rev8 */
+ uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */
+ uint32 PAD[2];
+ uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */
+ uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */
+ uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */
+ uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */
+ uint32 PAD[1];
+ uint32 MiscHostAccessIntEn;
+ uint32 PAD[1];
+
+ /* PCMCIA frame control */
+ uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
+ uint8 PAD[3];
+ uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */
+ uint8 PAD[155];
+
+ /* interrupt batching control */
+ uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */
+ uint32 PAD[3];
+
+ /* counters */
+ uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+ uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+ uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+ uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+ uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+ uint32 PAD[40];
+ uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */
+ uint32 PAD[1];
+ uint32 powerctl; /* 0x1e8 */
+ uint32 PAD[5];
+
+ /* DMA engines */
+ volatile union {
+ pcmdma32_t pcm32;
+ sdiodma32_t sdiod32;
+ sdiodma64_t sdiod64;
+ } dma;
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */
+ uint16 PAD[55];
+
+ /* PCMCIA backplane access */
+ uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */
+ uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */
+ uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */
+ uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */
+ uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */
+ uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */
+ uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */
+ uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */
+ uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */
+ uint16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */
+ uint32 PAD[464];
+
+ /* Sonics SiliconBackplane registers */
+ sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */
+#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR (1 << 8) /* Backplane SError (write) */
+#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */
+#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */
+#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */
+#define SDA_WRITE 0x01000000 /* Write bit */
+#define SDA_READ 0x00000000 /* Write bit cleared for Read */
+#define SDA_BUSY 0x80000000 /* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
+#define SDA_F3_FBR_SPACE 0x400 /* sdioAccess F3 FBR register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
+#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */
+#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */
+#define SDA_DEVICECONTROL 0x009 /* DeviceControl */
+#define SDA_SBADDRLOW 0x00a /* SbAddrLow */
+#define SDA_SBADDRMID 0x00b /* SbAddrMid */
+#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */
+#define SDA_FRAMECTRL 0x00d /* FrameCtrl */
+#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */
+#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
+#define SDA_MESBUSYCNTRL 0x01d /* mesBusyCntrl */
+#define SDA_WAKEUPCTRL 0x01e /* WakeupCtrl */
+#define SDA_SLEEPCSR 0x01f /* sleepCSR */
+
+/* SDA_F1_REG_SPACE register bits */
+/* sleepCSR register */
+#define SDA_SLEEPCSR_KEEP_SDIO_ON 0x1
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+
+/* intrcvlazy */
+#define IRL_TO_MASK 0x00ffffff /* timeout */
+#define IRL_FC_MASK 0xff000000 /* frame count */
+#define IRL_FC_SHIFT 24 /* frame count */
+
+/* rx header */
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC 0x0001 /* CRC error detected */
+#define RXF_WOOS 0x0002 /* write frame out of sync */
+#define RXF_WF_TERM 0x0004 /* write frame terminated */
+#define RXF_ABORT 0x0008 /* write frame aborted */
+#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
+
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+#define SDPCM_HWEXT_LEN 8
+#else
+#define SDPCM_HWEXT_LEN 0
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+
+#endif /* _sbsdpcmdev_h_ */
diff --git a/bcmdhd.101.10.361.x/include/sbsocram.h b/bcmdhd.101.10.361.x/include/sbsocram.h
new file mode 100755
index 0000000..f8d6b0d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbsocram.h
@@ -0,0 +1,198 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBSOCRAM_H
+#define _SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* Memcsocram core registers */
+typedef volatile struct sbsocramregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus; /* rev 6 */
+ uint32 errlogaddr; /* rev 6 */
+ /* used for patching rev 3 & 5 */
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 cambankmaskreg;
+ uint32 PAD[1];
+ uint32 bankinfo; /* corev 8 */
+ uint32 bankpda;
+ uint32 PAD[14];
+ uint32 extmemconfig;
+ uint32 extmemparitycsr;
+ uint32 extmemparityerrdata;
+ uint32 extmemparityerrcnt;
+ uint32 extmemwrctrlandsize;
+ uint32 PAD[84];
+ uint32 workaround;
+ uint32 pwrctl; /* corerev >= 2 */
+ uint32 PAD[133];
+ uint32 sr_control; /* corerev >= 15 */
+ uint32 sr_status; /* corerev >= 15 */
+ uint32 sr_address; /* corerev >= 15 */
+ uint32 sr_data; /* corerev >= 15 */
+} sbsocramregs_t;
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+/* Coreinfo register */
+#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */
+#define SRCI_PT_SHIFT 16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+/* corerev >= 3 */
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define SRCI_ROMNB_MASK 0xf000
+#define SRCI_ROMNB_SHIFT 12
+#define SRCI_ROMBSZ_MASK 0xf00
+#define SRCI_ROMBSZ_SHIFT 8
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+
+#define SRCI_SRNB_MASK_EXT 0x100
+
+#define SR_BSZ_BASE 14
+
+/* Standby control register */
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */
+#define SRSC_SBYEN_SHIFT 24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_SZMASK 0x7f
+#define SOCRAM_BANKIDX_ROM_MASK 0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM 0
+#define SOCRAM_MEMTYPE_ROM 1
+#define SOCRAM_MEMTYPE_DEVRAM 2
+
+#define SOCRAM_BANKINFO_REG 0x40
+#define SOCRAM_BANKIDX_REG 0x10
+#define SOCRAM_BANKINFO_STDBY_MASK 0x400
+#define SOCRAM_BANKINFO_STDBY_TIMER 0x800
+
+/* bankinfo rev >= 10 */
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT 17
+#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000
+
+/* extracoreinfo register */
+#define SOCRAM_DEVRAMBANK_MASK 0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT 12
+
+/* bank info to calculate bank size */
+#define SOCRAM_BANKINFO_SZBASE 8192
+#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */
+
+#endif /* _SBSOCRAM_H */
diff --git a/bcmdhd.101.10.361.x/include/sbsprom.h b/bcmdhd.101.10.361.x/include/sbsprom.h
new file mode 100755
index 0000000..f43da2d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbsprom.h
@@ -0,0 +1,236 @@
+/*
+ * SPROM format definitions for the Broadcom 47xx and 43xx chip family.
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#ifndef _SBSPROM_H
+#define _SBSPROM_H
+
+#include "typedefs.h"
+#include "bcmdevs.h"
+
+/* A word is this many bytes */
+#define SRW 2
+
+/* offset into PCI config space for write enable bit */
+#define CFG_SROM_WRITABLE_OFFSET 0x88
+#define SROM_WRITEABLE 0x10
+
+/* enumeration space consists of N contiguous 4Kbyte core register sets */
+#define SBCORES_BASE 0x18000000
+#define SBCORES_EACH 0x1000
+
+/* offset from BAR0 for srom space */
+#define SROM_BASE 4096
+
+/* number of 2-byte words in srom */
+#define SROM_SIZE 64
+
+#define SROM_BYTES (SROM_SIZE * SRW)
+
+#define MAX_FN 4
+
+/* Word 0, Hardware control */
+#define SROM_HWCTL 0
+#define HW_FUNMSK 0x000f
+#define HW_FCLK 0x0200
+#define HW_CBM 0x0400
+#define HW_PIMSK 0xf000
+#define HW_PISHIFT 12
+#define HW_PI4402 0x2
+#define HW_FUN4401 0x0001
+#define HW_FCLK4402 0x0000
+
+/* Word 1, common-power/boot-rom */
+#define SROM_COMMPW 1
+/* boot rom present bit */
+#define BR_PRESSHIFT 8
+/* 15:9 for n; boot rom size is 2^(14 + n) bytes */
+#define BR_SIZESHIFT 9
+
+/* Word 2, SubsystemId */
+#define SROM_SSID 2
+
+/* Word 3, VendorId */
+#define SROM_VID 3
+
+/* Function 0 info, function info length */
+#define SROM_FN0 4
+#define SROM_FNSZ 8
+
+/* Within each function: */
+/* Word 0, deviceID */
+#define SRFN_DID 0
+
+/* Words 1-2, ClassCode */
+#define SRFN_CCL 1
+/* Word 2, D0 Power */
+#define SRFN_CCHD0 2
+
+/* Word 3, PME and D1D2D3 power */
+#define SRFN_PMED123 3
+
+#define PME_IL 0
+#define PME_ENET0 1
+#define PME_ENET1 2
+#define PME_CODEC 3
+
+#define PME_4402_ENET 0
+#define PME_4402_CODEC 1
+#define PMEREP_4402_ENET (PMERD3CV | PMERD3CA | PMERD3H | PMERD2 | PMERD1 | PMERD0 | PME)
+
+/* Word 4, Bar1 enable, pme reports */
+#define SRFN_B1PMER 4
+#define B1E 1
+#define B1SZMSK 0xe
+#define B1SZSH 1
+#define PMERMSK 0x0ff0
+#define PME 0x0010
+#define PMERD0 0x0020
+#define PMERD1 0x0040
+#define PMERD2 0x0080
+#define PMERD3H 0x0100
+#define PMERD3CA 0x0200
+#define PMERD3CV 0x0400
+#define IGNCLKRR 0x0800
+#define B0LMSK 0xf000
+
+/* Words 4-5, Bar0 Sonics value */
+#define SRFN_B0H 5
+/* Words 6-7, CIS Pointer */
+#define SRFN_CISL 6
+#define SRFN_CISH 7
+
+/* Words 36-38: iLine MAC address */
+#define SROM_I_MACHI 36
+#define SROM_I_MACMID 37
+#define SROM_I_MACLO 38
+
+/* Words 36-38: wireless0 MAC address on 43xx */
+#define SROM_W0_MACHI 36
+#define SROM_W0_MACMID 37
+#define SROM_W0_MACLO 38
+
+/* Words 39-41: enet0 MAC address */
+#define SROM_E0_MACHI 39
+#define SROM_E0_MACMID 40
+#define SROM_E0_MACLO 41
+
+/* Words 42-44: enet1 MAC address */
+#define SROM_E1_MACHI 42
+#define SROM_E1_MACMID 43
+#define SROM_E1_MACLO 44
+
+#define SROM_EPHY 45
+
+/* Words 47-51 wl0 PA bx */
+#define SROM_WL0_PAB0 47
+#define SROM_WL0_PAB1 48
+#define SROM_WL0_PAB2 49
+#define SROM_WL0_PAB3 50
+#define SROM_WL0_PAB4 51
+
+/* Word 52: wl0/wl1 MaxPower */
+#define SROM_WL_MAXPWR 52
+
+/* Words 53-55 wl1 PA bx */
+#define SROM_WL1_PAB0 53
+#define SROM_WL1_PAB1 54
+#define SROM_WL1_PAB2 55
+
+/* Woprd 56: itt */
+#define SROM_ITT 56
+
+/* Words 59-62: OEM Space */
+#define SROM_WL_OEM 59
+#define SROM_OEM_SIZE 4
+
+/* Contents for the srom */
+
+#define BU4710_SSID 0x0400
+#define VSIM4710_SSID 0x0401
+#define QT4710_SSID 0x0402
+
+#define BU4610_SSID 0x0403
+#define VSIM4610_SSID 0x0404
+
+#define BU4402_SSID 0x4402
+
+#define CLASS_OTHER 0x8000
+#define CLASS_ETHER 0x0000
+#define CLASS_NET 0x0002
+#define CLASS_COMM 0x0007
+#define CLASS_MODEM 0x0300
+#define CLASS_MIPS 0x3000
+#define CLASS_PROC 0x000b
+#define CLASS_FLASH 0x0100
+#define CLASS_MEM 0x0005
+#define CLASS_SERIALBUS 0x000c
+#define CLASS_OHCI 0x0310
+
+/* Broadcom IEEE MAC addresses are 00:90:4c:xx:xx:xx */
+#define MACHI 0x90
+
+#define MACMID_BU4710I 0x4c17
+#define MACMID_BU4710E0 0x4c18
+#define MACMID_BU4710E1 0x4c19
+
+#define MACMID_94710R1I 0x4c1a
+#define MACMID_94710R1E0 0x4c1b
+#define MACMID_94710R1E1 0x4c1c
+
+#define MACMID_94710R4I 0x4c1d
+#define MACMID_94710R4E0 0x4c1e
+#define MACMID_94710R4E1 0x4c1f
+
+#define MACMID_94710DEVI 0x4c20
+#define MACMID_94710DEVE0 0x4c21
+#define MACMID_94710DEVE1 0x4c22
+
+#define MACMID_BU4402 0x4c23
+
+#define MACMID_BU4610I 0x4c24
+#define MACMID_BU4610E0 0x4c25
+#define MACMID_BU4610E1 0x4c26
+
+#define MACMID_BU4401 0x4c37
+
+/* Enet phy settings one or two singles or a dual */
+/* Bits 4-0 : MII address for enet0 (0x1f for not there */
+/* Bits 9-5 : MII address for enet1 (0x1f for not there */
+/* Bit 14 : Mdio for enet0 */
+/* Bit 15 : Mdio for enet1 */
+
+/* bu4710 with only one phy on enet1 with address 7: */
+#define SROM_EPHY_ONE 0x80ff
+
+/* bu4710 with two individual phys, at 6 and 7, */
+/* each mdio connected to its own mac: */
+#define SROM_EPHY_TWO 0x80e6
+
+/* bu4710 with a dual phy addresses 0 & 1, mdio-connected to enet0 */
+/* bringup board has phyaddr0 and phyaddr1 swapped */
+#define SROM_EPHY_DUAL 0x0001
+
+/* r1 board with a dual phy at 0, 1 (NOT swapped and mdc0 */
+#define SROM_EPHY_R1 0x0010
+
+/* r4 board with a single phy on enet0 at address 5 and a switch */
+/* chip on enet1 (speciall case: 0x1e */
+#define SROM_EPHY_R4 0x83e5
+
+/* 4402 uses an internal phy at phyaddr 1; want mdcport == coreunit == 0 */
+#define SROM_EPHY_INTERNAL 0x0001
+
+#define SROM_VERS 0x0001
+
+#endif /* _SBSPROM_H */
diff --git a/bcmdhd.101.10.361.x/include/sbsysmem.h b/bcmdhd.101.10.361.x/include/sbsysmem.h
new file mode 100755
index 0000000..5c86c0c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sbsysmem.h
@@ -0,0 +1,191 @@
+/*
+ * SiliconBackplane System Memory core
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SBSYSMEM_H
+#define _SBSYSMEM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* sysmem core registers */
+typedef volatile struct sysmemregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus;
+ uint32 errlogaddr;
+
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 standbywait;
+ uint32 PAD[1];
+ uint32 bankinfo;
+ uint32 PAD[7];
+ uint32 region_n_regs[32];
+ uint32 initiat_n_masks[31];
+ uint32 PAD[1];
+ uint32 mpucontrol;
+ uint32 mpucapabilities;
+ uint32 PAD[31];
+ uint32 workaround;
+ uint32 pwrctl;
+ uint32 PAD[133];
+ uint32 sr_control;
+ uint32 sr_status;
+ uint32 sr_address;
+ uint32 sr_data;
+} sysmemregs_t;
+
+/* bus MPU region count mask of sysmemregs_t->mpucapabilities */
+#define ACC_MPU_REGION_CNT_MASK 0x7u
+/* bus MPU disable mask of sysmemregs_t->mpucontrol */
+#define BUSMPU_DISABLE_MASK 0xfu
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+/* Coreinfo register */
+#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */
+#define SRCI_PT_SHIFT 16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define SYSMEM_SRCI_ROMNB_MASK 0x3e0
+#define SYSMEM_SRCI_ROMNB_SHIFT 5
+#define SYSMEM_SRCI_SRNB_MASK 0x1f
+#define SYSMEM_SRCI_SRNB_SHIFT 0
+/* Above bits are obsolete and replaced with below in rev 12 */
+#define SYSMEM_SRCI_NEW_ROMNB_MASK 0xff000000u
+#define SYSMEM_SRCI_NEW_ROMNB_SHIFT 24u
+#define SYSMEM_SRCI_NEW_SRNB_MASK 0xff0000u
+#define SYSMEM_SRCI_NEW_SRNB_SHIFT 16u
+
+/* Standby control register */
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_SHIFT 24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK 0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+/* bankidx and bankinfo reg defines */
+#define SYSMEM_BANKINFO_SZMASK 0x7f
+#define SYSMEM_BANKIDX_ROM_MASK 0x80
+
+#define SYSMEM_BANKINFO_REG 0x40
+#define SYSMEM_BANKIDX_REG 0x10
+#define SYSMEM_BANKINFO_STDBY_MASK 0x200
+#define SYSMEM_BANKINFO_STDBY_TIMER 0x400
+
+#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 14
+#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x4000
+#define SYSMEM_BANKINFO_PDASZ_SHIFT 16
+#define SYSMEM_BANKINFO_PDASZ_MASK 0x001F0000
+
+/* extracoreinfo register */
+#define SYSMEM_DEVRAMBANK_MASK 0xF000
+#define SYSMEM_DEVRAMBANK_SHIFT 12
+
+/* bank info to calculate bank size */
+#define SYSMEM_BANKINFO_SZBASE 8192
+#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */
+
+/* standbycontrol register default values */
+#define SYSMEM_SBYCNTRL_TIMEVAL 0x100000u /* standbycontrol timeval[23:0] */
+#define SYSMEM_SBYCNTRL_TIMEVAL_MASK 0xffffffu
+
+/* sbywaitcycle register default values (sysme rev 8) */
+#define SYSMEM_SBYWAIT_RAM_TIMEVAL 0xau /* RAM memory access after standby exit */
+
+#endif /* _SBSYSMEM_H */
diff --git a/bcmdhd.101.10.361.x/include/sdio.h b/bcmdhd.101.10.361.x/include/sdio.h
new file mode 100755
index 0000000..b0343f0
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sdio.h
@@ -0,0 +1,644 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SDIO_H
+#define _SDIO_H
+
+#ifdef BCMSDIO
+/*
+ * Standard SD Device Register Map.
+ *
+ * Reference definitions from:
+ * SD Specifications, Part E1: SDIO Specification
+ * Version 1.10
+ * August 18, 2004
+ * http://www.sdcard.org
+ *
+ * EXCEPTION: The speed_control register defined here is based on a
+ * draft of the next version, and is thus nonstandard.
+ */
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+ uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */
+ uint8 sd_rev; /* RO, sd spec revision */
+ uint8 io_en; /* I/O enable */
+ uint8 io_rdy; /* I/O ready reg */
+ uint8 intr_ctl; /* Master and per function interrupt enable control */
+ uint8 intr_status; /* RO, interrupt pending status */
+ uint8 io_abort; /* read/write abort or reset all functions */
+ uint8 bus_inter; /* bus interface control */
+ uint8 capability; /* RO, card capability */
+
+ uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */
+ uint8 cis_base_mid;
+ uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */
+
+ /* suspend/resume registers */
+ uint8 bus_suspend; /* 0xC */
+ uint8 func_select; /* 0xD */
+ uint8 exec_flag; /* 0xE */
+ uint8 ready_flag; /* 0xF */
+
+ uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */
+
+ uint8 power_control; /* 0x12 (SDIO version 1.10) */
+
+ uint8 speed_control; /* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV 0x00
+#define SDIOD_CCCR_SDREV 0x01
+#define SDIOD_CCCR_IOEN 0x02
+#define SDIOD_CCCR_IORDY 0x03
+#define SDIOD_CCCR_INTEN 0x04
+#define SDIOD_CCCR_INTPEND 0x05
+#define SDIOD_CCCR_IOABORT 0x06
+#define SDIOD_CCCR_BICTRL 0x07
+#define SDIOD_CCCR_CAPABLITIES 0x08
+#define SDIOD_CCCR_CISPTR_0 0x09
+#define SDIOD_CCCR_CISPTR_1 0x0A
+#define SDIOD_CCCR_CISPTR_2 0x0B
+#define SDIOD_CCCR_BUSSUSP 0x0C
+#define SDIOD_CCCR_FUNCSEL 0x0D
+#define SDIOD_CCCR_EXECFLAGS 0x0E
+#define SDIOD_CCCR_RDYFLAGS 0x0F
+#define SDIOD_CCCR_BLKSIZE_0 0x10
+#define SDIOD_CCCR_BLKSIZE_1 0x11
+#define SDIOD_CCCR_POWER_CONTROL 0x12
+#define SDIOD_CCCR_SPEED_CONTROL 0x13
+#define SDIOD_CCCR_UHSI_SUPPORT 0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH 0x15
+#define SDIOD_CCCR_INTR_EXTN 0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP 0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
+#define SDIOD_CCCR_BRCM_CARDCTL 0xf1
+#define SDIOD_CCCR_BRCM_SEPINT 0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
+#define SDIO_SPEC_VERSION_3_0 0x40 /* SDIO spec version 3.0 */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK 0x0f /* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */
+#if defined (BT_OVER_SDIO)
+#define SDIO_FUNC_ENABLE_3 0x08 /* function 2 I/O enable */
+#define SDIO_FUNC_DISABLE_3 0xF0 /* function 2 I/O enable */
+#endif /* defined (BT_OVER_SDIO) */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */
+#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */
+#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */
+#if defined (BT_OVER_SDIO)
+#define INTR_CTL_FUNC3_EN 0x8 /* interrupt enable for function 3 */
+#endif /* defined (BT_OVER_SDIO) */
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */
+#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */
+#define SDIO_CAP_LSC 0x40 /* low speed card */
+#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS 0x08 /* support suspend/resume */
+#define SDIO_CAP_SRW 0x04 /* support read wait */
+#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */
+#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */
+#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
+#define SDIO_SPEED_UHSI_DDR50 0x08
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S 1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S 0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S 0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S 4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S 0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S 1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+ uint8 devctr; /* device interface, CSA control */
+ uint8 ext_dev; /* extended standard I/O device type code */
+ uint8 pwr_sel; /* power selection support */
+ uint8 PAD[6]; /* reserved */
+
+ uint8 cis_low; /* CIS LSB */
+ uint8 cis_mid;
+ uint8 cis_high; /* CIS MSB */
+ uint8 csa_low; /* code storage area, LSB */
+ uint8 csa_mid;
+ uint8 csa_high; /* code storage area, MSB */
+ uint8 csa_dat_win; /* data access window to function */
+
+ uint8 fnx_blk_size[2]; /* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS 8
+#define SDIOD_MAX_IOFUNCS 7
+
+/* SDIO Device FBR Start Address */
+#define SDIOD_FBR_STARTADDR 0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE 0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n) ((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */
+#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0 0x09
+#define SDIOD_FBR_CISPTR_1 0x0A
+#define SDIOD_FBR_CISPTR_2 0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0 0x0C
+#define SDIOD_FBR_CSA_ADDR_1 0x0D
+#define SDIOD_FBR_CSA_ADDR_2 0x0E
+#define SDIOD_FBR_CSA_DATA 0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0 0x10
+#define SDIOD_FBR_BLKSIZE_1 0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */
+#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART 1
+#define SDIOD_DIC_BLUETOOTH_A 2
+#define SDIOD_DIC_BLUETOOTH_B 3
+#define SDIOD_DIC_GPS 4
+#define SDIOD_DIC_CAMERA 5
+#define SDIOD_DIC_PHS 6
+#define SDIOD_DIC_WLAN 7
+#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */
+#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+#define SDIO_FUNC_4 4
+#define SDIO_FUNC_5 5
+#define SDIO_FUNC_6 6
+#define SDIO_FUNC_7 7
+
+#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */
+#define SD_CARD_TYPE_IO 1 /* IO only card */
+#define SD_CARD_TYPE_MEMORY 2 /* memory only card */
+#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE 31
+#define CARDREG_STATUS_BIT_COMCRCERROR 23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22
+#define CARDREG_STATUS_BIT_ERROR 19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
+
+/* ----------------------------------------------------
+ * SDIO Protocol Definitions -- commands and responses
+ *
+ * Reference definitions from SDIO Specification v1.10
+ * of August 18, 2004; and SD Physical Layer v1.10 of
+ * October 15, 2004.
+ * ----------------------------------------------------
+ */
+
+/* Straight defines, mostly used by older driver(s). */
+
+#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND 1
+#define SD_CMD_MMC_SET_RCA 3
+#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD 7
+#define SD_CMD_SEND_CSD 9
+#define SD_CMD_SEND_CID 10
+#define SD_CMD_STOP_TRANSMISSION 12
+#define SD_CMD_SEND_STATUS 13
+#define SD_CMD_GO_INACTIVE_STATE 15
+#define SD_CMD_SET_BLOCKLEN 16
+#define SD_CMD_READ_SINGLE_BLOCK 17
+#define SD_CMD_READ_MULTIPLE_BLOCK 18
+#define SD_CMD_WRITE_BLOCK 24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK 25
+#define SD_CMD_PROGRAM_CSD 27
+#define SD_CMD_SET_WRITE_PROT 28
+#define SD_CMD_CLR_WRITE_PROT 29
+#define SD_CMD_SEND_WRITE_PROT 30
+#define SD_CMD_ERASE_WR_BLK_START 32
+#define SD_CMD_ERASE_WR_BLK_END 33
+#define SD_CMD_ERASE 38
+#define SD_CMD_LOCK_UNLOCK 42
+#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */
+#define SD_CMD_APP_CMD 55
+#define SD_CMD_GEN_CMD 56
+#define SD_CMD_READ_OCR 58
+#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS 13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS 22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23
+#define SD_ACMD_SD_SEND_OP_COND 41
+#define SD_ACMD_SET_CLR_CARD_DETECT 42
+#define SD_ACMD_SEND_SCR 51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ 0 /* Read_Write: Read */
+#define SD_IO_OP_WRITE 1 /* Read_Write: Write */
+#define SD_IO_RW_NORMAL 0 /* no RAW */
+#define SD_IO_RW_RAW 1 /* RAW */
+#define SD_IO_BYTE_MODE 0 /* Byte Mode */
+#define SD_IO_BLOCK_MODE 1 /* BlockMode */
+#define SD_IO_FIXED_ADDRESS 0 /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+ (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+ (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE 0
+#define SD_RSP_NO_1 1
+#define SD_RSP_NO_2 2
+#define SD_RSP_NO_3 3
+#define SD_RSP_NO_4 4
+#define SD_RSP_NO_5 5
+#define SD_RSP_NO_6 6
+
+ /* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR 0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000
+#define SD_RSP_MR6_ERROR 0x2000
+
+ /* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT 0x80
+#define SD_RSP_MR1_PARAMETER_ERROR 0x40
+#define SD_RSP_MR1_RFU5 0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10
+#define SD_RSP_MR1_COM_CRC_ERROR 0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04
+#define SD_RSP_MR1_RFU1 0x02
+#define SD_RSP_MR1_IDLE_STATE 0x01
+
+ /* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR 0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND 0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1 0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0 0x10
+#define SD_RSP_R5_ERROR 0x08
+#define SD_RSP_R5_RFU 0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR 0x02
+#define SD_RSP_R5_OUT_OF_RANGE 0x01
+
+#define SD_RSP_R5_ERRBITS 0xCB
+
+/* Mask/shift form, commonly used in newer driver(s) */
+
+/* ------------------------------------------------
+ * SDIO Commands and responses
+ *
+ * I/O only commands are:
+ * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0 0
+#define SDIOH_CMD_3 3
+#define SDIOH_CMD_5 5
+#define SDIOH_CMD_7 7
+#define SDIOH_CMD_11 11
+#define SDIOH_CMD_14 14
+#define SDIOH_CMD_15 15
+#define SDIOH_CMD_19 19
+#define SDIOH_CMD_52 52
+#define SDIOH_CMD_53 53
+#define SDIOH_CMD_59 59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE 0
+#define SDIOH_RSP_R1 1
+#define SDIOH_RSP_R2 2
+#define SDIOH_RSP_R3 3
+#define SDIOH_RSP_R4 4
+#define SDIOH_RSP_R5 5
+#define SDIOH_RSP_R6 6
+
+/*
+ * SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS 0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M BITFIELD_MASK(24)
+#define CMD5_OCR_S 0
+
+#define CMD5_S18R_M BITFIELD_MASK(1)
+#define CMD5_S18R_S 24
+
+#define CMD7_RCA_M BITFIELD_MASK(16)
+#define CMD7_RCA_S 16
+
+#define CMD14_RCA_M BITFIELD_MASK(16)
+#define CMD14_RCA_S 16
+#define CMD14_SLEEP_M BITFIELD_MASK(1)
+#define CMD14_SLEEP_S 15
+
+#define CMD_15_RCA_M BITFIELD_MASK(16)
+#define CMD_15_RCA_S 16
+
+#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52
+ */
+#define CMD52_DATA_S 0
+#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD52_REG_ADDR_S 9
+#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */
+#define CMD52_RAW_S 27
+#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD52_FUNCTION_S 28
+#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD52_RW_FLAG_S 31
+
+#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S 0
+#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD53_REG_ADDR_S 9
+#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */
+#define CMD53_OP_CODE_S 26
+#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */
+#define CMD53_BLK_MODE_S 27
+#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD53_FUNCTION_S 28
+#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD53_RW_FLAG_S 31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ * -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S 0
+
+#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S 24
+
+#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S 24
+#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */
+#define RSP4_MEM_PRESENT_S 27
+#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S 28
+#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */
+#define RSP4_CARD_READY_S 31
+
+#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0]
+ */
+#define RSP6_STATUS_S 0
+#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S 16
+
+#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S 3
+#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP1_APP_CMD_S 5
+#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S 8
+#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card
+ * when Cmd was received
+ */
+#define RSP1_CURR_STATE_S 9
+#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */
+#define RSP1_EARSE_RESET_S 13
+#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S 14
+#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S 15
+#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits
+ * of CSD
+ */
+#define RSP1_CID_CSD_OVERW_S 16
+#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */
+#define RSP1_ERROR_S 19
+#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */
+#define RSP1_CC_ERROR_S 20
+#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed
+ * to correct data
+ */
+#define RSP1_CARD_ECC_FAILED_S 21
+#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S 22
+#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed
+ */
+#define RSP1_COM_CRC_ERROR_S 23
+#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S 24
+#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */
+#define RSP1_CARD_LOCKED_S 25
+#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program
+ * write-protected blocks
+ */
+#define RSP1_WP_VIOLATION_S 26
+#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S 27
+#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S 28
+#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */
+#define RSP1_BLK_LEN_ERR_S 29
+#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */
+#define RSP1_ADDR_ERR_S 30
+#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S 31
+
+#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
+#define RSP5_DATA_S 0
+#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
+#define RSP5_FLAGS_S 8
+#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S 16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S 0
+#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */
+#define SPIRSP4_STUFF_S 16
+#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */
+#define SPIRSP4_MEM_PRESENT_S 19
+#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S 20
+#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */
+#define SPIRSP4_CARD_READY_S 23
+#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */
+#define SPIRSP4_IDLE_STATE_S 24
+#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S 26
+#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S 27
+#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP4_FUNC_NUM_ERROR_S 28
+#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S 30
+#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP4_START_BIT_S 31
+
+#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */
+#define SPIRSP5_DATA_S 16
+#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */
+#define SPIRSP5_IDLE_STATE_S 24
+#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S 26
+#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S 27
+#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP5_FUNC_NUM_ERROR_S 28
+#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S 30
+#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP5_START_BIT_S 31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error
+ */
+#define RSP6STAT_AKE_SEQ_ERROR_S 3
+#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP6STAT_APP_CMD_S 5
+#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data
+ * (buff empty)
+ */
+#define RSP6STAT_READY_FOR_DATA_S 8
+#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at
+ * Cmd reception
+ */
+#define RSP6STAT_CURR_STATE_S 9
+#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19
+ */
+#define RSP6STAT_ERROR_S 13
+#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for
+ * card state Bit 22
+ */
+#define RSP6STAT_ILLEGAL_CMD_S 14
+#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command
+ * failed Bit 23
+ */
+#define RSP6STAT_COM_CRC_ERROR_S 15
+
+#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT 0
+#define CMD_OPTION_TUNING 1
+
+#endif /* def BCMSDIO */
+#endif /* _SDIO_H */
diff --git a/bcmdhd.101.10.361.x/include/sdioh.h b/bcmdhd.101.10.361.x/include/sdioh.h
new file mode 100755
index 0000000..805f061
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sdioh.h
@@ -0,0 +1,459 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SDIOH_H
+#define _SDIOH_H
+
+/*
+ * Standard SD Host Control Register Map.
+ *
+ * Reference definitions from:
+ * SD Specification, Part A2: SD Host Controller Standard Specification
+ * Version 1.00
+ * February, 2004
+ * http://www.sdcard.org
+ *
+ * One set for each SDIO slot on the controller board.
+ * In PCI, each set is mapped into a BAR. Since PCI only
+ * has six BARS, spec compliant PCI SDIO host controllers are
+ * limited to 6 slots.
+ */
+#define SD_SysAddr 0x000
+#define SD_BlockSize 0x004
+#define SD_BlockCount 0x006
+#define SD_Arg0 0x008
+#define SD_Arg1 0x00A /* Not really in spec, remove? */
+#define SD_TransferMode 0x00C
+#define SD_Command 0x00E
+#define SD_Response0 0x010
+#define SD_Response1 0x012
+#define SD_Response2 0x014
+#define SD_Response3 0x016
+#define SD_Response4 0x018
+#define SD_Response5 0x01A
+#define SD_Response6 0x01C
+#define SD_Response7 0x01E
+#define SD_BufferDataPort0 0x020
+#define SD_BufferDataPort1 0x022
+#define SD_PresentState 0x024
+#define SD_HostCntrl 0x028
+#define SD_PwrCntrl 0x029
+#define SD_BlockGapCntrl 0x02A
+#define SD_WakeupCntrl 0x02B
+#define SD_ClockCntrl 0x02C /* Add (and use) bitdefs */
+#define SD_TimeoutCntrl 0x02E /* Add (and use) bitdefs */
+#define SD_SoftwareReset 0x02F
+#define SD_IntrStatus 0x030
+#define SD_ErrorIntrStatus 0x032 /* Add (and use) bitdefs */
+#define SD_IntrStatusEnable 0x034
+#define SD_ErrorIntrStatusEnable 0x036
+#define SD_IntrSignalEnable 0x038
+#define SD_ErrorIntrSignalEnable 0x03A
+#define SD_CMD12ErrorStatus 0x03C
+#define SD_Capabilities 0x040
+#define SD_Capabilities3 0x044
+#define SD_MaxCurCap 0x048
+#define SD_MaxCurCap_Reserved 0x04C
+#define SD_ADMA_ErrStatus 0x054
+#define SD_ADMA_SysAddr 0x58
+#define SD_SlotInterruptStatus 0x0FC
+#define SD_HostControllerVersion 0x0FE
+#define SD_GPIO_Reg 0x100
+#define SD_GPIO_OE 0x104
+#define SD_GPIO_Enable 0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo 0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2 0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart 0x060
+#define SD3_PresetValCount 8
+/* preset-indiv regs */
+#define SD3_PresetVal_init 0x060
+#define SD3_PresetVal_default 0x062
+#define SD3_PresetVal_HS 0x064
+#define SD3_PresetVal_SDR12 0x066
+#define SD3_PresetVal_SDR25 0x068
+#define SD3_PresetVal_SDR50 0x06a
+#define SD3_PresetVal_SDR104 0x06c
+#define SD3_PresetVal_DDR50 0x06e
+/* SDIO3.0 Revx specific Registers */
+#define SD3_Tuning_Info_Register 0x0EC
+#define SD3_WL_BT_reset_register 0x0F0
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX 0
+#define SD3_PRESETVAL_DESPEED_IX 1
+#define SD3_PRESETVAL_HISPEED_IX 2
+#define SD3_PRESETVAL_SDR12_IX 3
+#define SD3_PRESETVAL_SDR25_IX 4
+#define SD3_PRESETVAL_SDR50_IX 5
+#define SD3_PRESETVAL_SDR104_IX 6
+#define SD3_PRESETVAL_DDR50_IX 7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 0
+#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+ bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M BITFIELD_MASK(8)
+#define CAP_BASECLK_S 8
+#define CAP_MAXBLOCK_M BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S 16
+#define CAP_ADMA2_M BITFIELD_MASK(1)
+#define CAP_ADMA2_S 19
+#define CAP_ADMA1_M BITFIELD_MASK(1)
+#define CAP_ADMA1_S 20
+#define CAP_HIGHSPEED_M BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S 21
+#define CAP_DMA_M BITFIELD_MASK(1)
+#define CAP_DMA_S 22
+#define CAP_SUSPEND_M BITFIELD_MASK(1)
+#define CAP_SUSPEND_S 23
+#define CAP_VOLT_3_3_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S 24
+#define CAP_VOLT_3_0_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S 25
+#define CAP_VOLT_1_8_M BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S 26
+#define CAP_64BIT_HOST_M BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S 28
+
+#define SDIO_OCR_READ_FAIL (2)
+
+#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S 29
+
+#define CAP_SLOTTYPE_M BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S 30
+
+#define CAP3_MSBits_OFFSET (32)
+/* note: following are caps MSB32 bits.
+ So the bits start from 0, instead of 32. that is why
+ CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_DISABLED (0x0)
+#define CAP3_RETUNING_TC_1024S (0xB)
+#define CAP3_RETUNING_TC_OTHER (0xF)
+
+#define CAP3_CLK_MULT_M BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S 14
+
+#define PRESET_CLK_DIV_M BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S 0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S 0
+#define CAP_CURR_3_0_M BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S 8
+#define CAP_CURR_1_8_M BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S 16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S 0
+#define BLKSZ_BNDRY_M BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S 12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S 0
+#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S 1
+#define XFER_CMD_12_EN_M BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 2
+#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S 4
+#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S 5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 0
+#define RESP_TYPE_136 1
+#define RESP_TYPE_48 2
+#define RESP_TYPE_48_BUSY 3
+/* type field */
+#define CMD_TYPE_NORMAL 0
+#define CMD_TYPE_SUSPEND 1
+#define CMD_TYPE_RESUME 2
+#define CMD_TYPE_ABORT 3
+
+#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */
+#define CMD_RESP_TYPE_S 0
+#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */
+#define CMD_CRC_EN_S 3
+#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */
+#define CMD_INDEX_EN_S 4
+#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */
+#define CMD_DATA_EN_S 5
+#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc
+ */
+#define CMD_TYPE_S 6
+#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */
+#define CMD_INDEX_S 8
+
+/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */
+/* SD_PresentState : Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */
+#define PRES_CMD_INHIBIT_S 0
+#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */
+#define PRES_DAT_INHIBIT_S 1
+#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */
+#define PRES_DAT_BUSY_S 2
+#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */
+#define PRES_PRESENT_RSVD_S 3
+#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */
+#define PRES_WRITE_ACTIVE_S 8
+#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */
+#define PRES_READ_ACTIVE_S 9
+#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S 10
+#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */
+#define PRES_READ_DATA_RDY_S 11
+#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */
+#define PRES_CARD_PRESENT_S 16
+#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */
+#define PRES_CARD_STABLE_S 17
+#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */
+#define PRES_CARD_PRESENT_RAW_S 18
+#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */
+#define PRES_WRITE_ENABLED_S 19
+#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */
+#define PRES_DAT_SIGNAL_S 20
+#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */
+#define PRES_CMD_SIGNAL_S 24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */
+#define HOST_LED_S 0
+#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */
+#define HOST_DATA_WIDTH_S 1
+#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */
+#define HOST_DMA_SEL_S 3
+#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */
+#define HOST_HI_SPEED_EN_S 2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */
+
+#define HOST_CONTR_VER_2 (1)
+#define HOST_CONTR_VER_3 (2)
+
+/* misc defines */
+/* Driver uses of these should be replaced! */
+#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */
+#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */
+#define PWR_BUS_EN_S 0
+#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */
+#define PWR_VOLTS_S 1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */
+#define SW_RESET_ALL_S 0
+#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */
+#define SW_RESET_CMD_S 1
+#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */
+#define SW_RESET_DAT_S 2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S 0
+#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S 1
+#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S 2
+#define INTSTAT_DMA_INT_M BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S 3
+#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S 4
+#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S 5
+#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S 6
+#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S 7
+#define INTSTAT_CARD_INT_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S 8
+#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */
+#define INTSTAT_RETUNING_INT_S 12
+#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */
+#define INTSTAT_ERROR_INT_S 15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S 0
+#define ERRINT_CMD_CRC_M BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S 1
+#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S 2
+#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S 3
+#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S 4
+#define ERRINT_DATA_CRC_M BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S 5
+#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S 6
+#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S 7
+#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S 8
+#define ERRINT_VENDOR_M BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S 12
+#define ERRINT_ADMA_M BITFIELD_MASK(1)
+#define ERRINT_ADMA_S 9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT 0x0001
+#define ERRINT_CMD_CRC_BIT 0x0002
+#define ERRINT_CMD_ENDBIT_BIT 0x0004
+#define ERRINT_CMD_INDEX_BIT 0x0008
+#define ERRINT_DATA_TIMEOUT_BIT 0x0010
+#define ERRINT_DATA_CRC_BIT 0x0020
+#define ERRINT_DATA_ENDBIT_BIT 0x0040
+#define ERRINT_CURRENT_LIMIT_BIT 0x0080
+#define ERRINT_AUTO_CMD12_BIT 0x0100
+#define ERRINT_ADMA_BIT 0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+ ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+ ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl : Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */
+/* SD_IntrStatus : Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */
+/* SD_Capabilities : Offset 0x040 , size = bytes */
+/* SD_MaxCurCap : Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE 0
+#define SDIOH_ADMA1_MODE 1
+#define SDIOH_ADMA2_MODE 2
+#define SDIOH_ADMA2_64_MODE 3
+
+#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+ uint32 len_attr;
+ uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+ uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/bcmdhd.101.10.361.x/include/sdiovar.h b/bcmdhd.101.10.361.x/include/sdiovar.h
new file mode 100755
index 0000000..33c8825
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sdiovar.h
@@ -0,0 +1,124 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+typedef struct sdreg {
+ int func;
+ int offset;
+ int value;
+} sdreg_t;
+
+typedef struct sdreg_64 {
+ int func;
+ int offset;
+ uint64 value;
+} sdreg_64_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL 0x0001 /* Error */
+#define SDH_TRACE_VAL 0x0002 /* Trace */
+#define SDH_INFO_VAL 0x0004 /* Info */
+#define SDH_DEBUG_VAL 0x0008 /* Debug */
+#define SDH_DATA_VAL 0x0010 /* Data */
+#define SDH_CTRL_VAL 0x0020 /* Control Regs */
+#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
+#define SDH_DMA_VAL 0x0080 /* DMA */
+#define SDH_COST_VAL 0x8000 /* Control Regs */
+
+#define NUM_PREV_TRANSACTIONS 16
+
+#ifdef BCMSPI
+/* Error statistics for gSPI */
+struct spierrstats_t {
+ uint32 dna; /* The requested data is not available. */
+ uint32 rdunderflow; /* FIFO underflow happened due to current (F2, F3) rd command */
+ uint32 wroverflow; /* FIFO underflow happened due to current (F1, F2, F3) wr command */
+
+ uint32 f2interrupt; /* OR of all F2 related intr status bits. */
+ uint32 f3interrupt; /* OR of all F3 related intr status bits. */
+
+ uint32 f2rxnotready; /* F2 FIFO is not ready to receive data (FIFO empty) */
+ uint32 f3rxnotready; /* F3 FIFO is not ready to receive data (FIFO empty) */
+
+ uint32 hostcmddataerr; /* Error in command or host data, detected by CRC/checksum
+ * (optional)
+ */
+ uint32 f2pktavailable; /* Packet is available in F2 TX FIFO */
+ uint32 f3pktavailable; /* Packet is available in F2 TX FIFO */
+
+ uint32 dstatus[NUM_PREV_TRANSACTIONS]; /* dstatus bits of last 16 gSPI transactions */
+ uint32 spicmd[NUM_PREV_TRANSACTIONS];
+};
+#endif /* BCMSPI */
+
+typedef struct sdio_bus_metrics {
+ uint32 active_dur; /* msecs */
+
+ /* Generic */
+ uint32 data_intr_cnt; /* data interrupt counter */
+ uint32 mb_intr_cnt; /* mailbox interrupt counter */
+ uint32 error_intr_cnt; /* error interrupt counter */
+ uint32 wakehost_cnt; /* counter for OOB wakehost */
+
+ /* DS forcewake */
+ uint32 ds_wake_on_cnt; /* counter for (clock) ON */
+ uint32 ds_wake_on_dur; /* duration for (clock) ON) */
+ uint32 ds_wake_off_cnt; /* counter for (clock) OFF */
+ uint32 ds_wake_off_dur; /* duration for (clock) OFF */
+
+ /* DS_D0 state */
+ uint32 ds_d0_cnt; /* counter for DS_D0 state */
+ uint32 ds_d0_dur; /* duration for DS_D0 state */
+
+ /* DS_D3 state */
+ uint32 ds_d3_cnt; /* counter for DS_D3 state */
+ uint32 ds_d3_dur; /* duration for DS_D3 state */
+
+ /* DS DEV_WAKE */
+ uint32 ds_dw_assrt_cnt; /* counter for DW_ASSERT */
+ uint32 ds_dw_dassrt_cnt; /* counter for DW_DASSERT */
+
+ /* DS mailbox signals */
+ uint32 ds_tx_dsreq_cnt; /* counter for tx HMB_DATA_DSREQ */
+ uint32 ds_tx_dsexit_cnt; /* counter for tx HMB_DATA_DSEXIT */
+ uint32 ds_tx_d3ack_cnt; /* counter for tx HMB_DATA_D3ACK */
+ uint32 ds_tx_d3exit_cnt; /* counter for tx HMB_DATA_D3EXIT */
+ uint32 ds_rx_dsack_cnt; /* counter for rx SMB_DATA_DSACK */
+ uint32 ds_rx_dsnack_cnt; /* counter for rx SMB_DATA_DSNACK */
+ uint32 ds_rx_d3inform_cnt; /* counter for rx SMB_DATA_D3INFORM */
+} sdio_bus_metrics_t;
+
+/* Bus interface info for SDIO */
+typedef struct wl_pwr_sdio_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_SDIO */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ sdio_bus_metrics_t sdio; /* stats from SDIO bus driver */
+} wl_pwr_sdio_stats_t;
+
+#endif /* _sdiovar_h_ */
diff --git a/bcmdhd.101.10.361.x/include/sdspi.h b/bcmdhd.101.10.361.x/include/sdspi.h
new file mode 100755
index 0000000..b030c69
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/sdspi.h
@@ -0,0 +1,72 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _SD_SPI_H
+#define _SD_SPI_H
+
+#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */
+#define SPI_START_S 31
+#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */
+#define SPI_DIR_S 30
+#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S 24
+#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */
+#define SPI_RW_S 23
+#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */
+#define SPI_FUNC_S 20
+#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */
+#define SPI_RAW_S 19
+#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */
+#define SPI_STUFF_S 18
+#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */
+#define SPI_BLKMODE_S 19
+#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */
+#define SPI_OPCODE_S 18
+#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */
+#define SPI_ADDR_S 1
+#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */
+#define SPI_STUFF0_S 0
+
+#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */
+#define SPI_RSP_START_S 7
+#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */
+#define SPI_RSP_PARAM_ERR_S 6
+#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */
+#define SPI_RSP_RFU5_S 5
+#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */
+#define SPI_RSP_FUNC_ERR_S 4
+#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */
+#define SPI_RSP_CRC_ERR_S 3
+#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */
+#define SPI_RSP_ILL_CMD_S 2
+#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */
+#define SPI_RSP_RFU1_S 1
+#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */
+#define SPI_RSP_IDLE_S 0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */
+#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK 0x80
+
+#endif /* _SD_SPI_H */
diff --git a/bcmdhd.101.10.361.x/include/siutils.h b/bcmdhd.101.10.361.x/include/siutils.h
new file mode 100755
index 0000000..0078bbd
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/siutils.h
@@ -0,0 +1,1057 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _siutils_h_
+#define _siutils_h_
+
+#include <osl_decl.h>
+
+/* Make the d11 core(s) selectable by the user config... */
+#ifndef D11_CORE_UNIT_MASK
+/* By default we allow all d11 cores to be used */
+#define D11_CORE_UNIT_MASK 0xFFFFFFFFu
+#endif
+
+/* Generic interrupt bit mask definitions */
+enum bcm_int_reg_idx {
+ BCM_INT_REG_IDX_0 = 0,
+ BCM_INT_REG_IDX_1 = 1,
+ /* temp work around to avoid > 50K invalidation on 4388a0-roml */
+#ifndef ROM_COMPAT_INT_REG_IDX
+ BCM_INT_REG_IDX_2 = 2,
+#endif /* ROM_COMPAT_INT_REG_IDX */
+ BCM_INT_REGS_NUM
+};
+
+typedef struct bcm_int_bitmask {
+ uint32 bits[BCM_INT_REGS_NUM];
+} bcm_int_bitmask_t;
+
+#ifndef ROM_COMPAT_INT_REG_IDX
+
+#define BCM_INT_BITMASK_IS_EQUAL(b, cmp) (\
+ (b)->bits[BCM_INT_REG_IDX_0] == (cmp)->bits[BCM_INT_REG_IDX_0] && \
+ (b)->bits[BCM_INT_REG_IDX_1] == (cmp)->bits[BCM_INT_REG_IDX_1] && \
+ (b)->bits[BCM_INT_REG_IDX_2] == (cmp)->bits[BCM_INT_REG_IDX_2])
+
+#define BCM_INT_BITMASK_IS_ZERO(b) (\
+ (b)->bits[BCM_INT_REG_IDX_0] == 0 && \
+ (b)->bits[BCM_INT_REG_IDX_1] == 0 && \
+ (b)->bits[BCM_INT_REG_IDX_2] == 0)
+
+#define BCM_INT_BITMASK_SET(to, from) do { \
+ (to)->bits[BCM_INT_REG_IDX_0] = (from)->bits[BCM_INT_REG_IDX_0]; \
+ (to)->bits[BCM_INT_REG_IDX_1] = (from)->bits[BCM_INT_REG_IDX_1]; \
+ (to)->bits[BCM_INT_REG_IDX_2] = (from)->bits[BCM_INT_REG_IDX_2]; \
+} while (0)
+#define BCM_INT_BITMASK_OR(to, from) do { \
+ (to)->bits[BCM_INT_REG_IDX_0] |= (from)->bits[BCM_INT_REG_IDX_0]; \
+ (to)->bits[BCM_INT_REG_IDX_1] |= (from)->bits[BCM_INT_REG_IDX_1]; \
+ (to)->bits[BCM_INT_REG_IDX_2] |= (from)->bits[BCM_INT_REG_IDX_2]; \
+} while (0)
+
+#define BCM_INT_BITMASK_AND(to, mask) do { \
+ (to)->bits[BCM_INT_REG_IDX_0] &= (mask)->bits[BCM_INT_REG_IDX_0]; \
+ (to)->bits[BCM_INT_REG_IDX_1] &= (mask)->bits[BCM_INT_REG_IDX_1]; \
+ (to)->bits[BCM_INT_REG_IDX_2] &= (mask)->bits[BCM_INT_REG_IDX_2]; \
+} while (0)
+
+#else
+
+#define BCM_INT_BITMASK_IS_EQUAL(b, cmp) (\
+ (b)->bits[BCM_INT_REG_IDX_0] == (cmp)->bits[BCM_INT_REG_IDX_0] && \
+ (b)->bits[BCM_INT_REG_IDX_1] == (cmp)->bits[BCM_INT_REG_IDX_1]) \
+
+#define BCM_INT_BITMASK_IS_ZERO(b) (\
+ (b)->bits[BCM_INT_REG_IDX_0] == 0 && \
+ (b)->bits[BCM_INT_REG_IDX_1] == 0)
+
+#define BCM_INT_BITMASK_SET(to, from) do { \
+ (to)->bits[BCM_INT_REG_IDX_0] = (from)->bits[BCM_INT_REG_IDX_0]; \
+ (to)->bits[BCM_INT_REG_IDX_1] = (from)->bits[BCM_INT_REG_IDX_1]; \
+} while (0)
+
+#define BCM_INT_BITMASK_OR(to, from) do { \
+ (to)->bits[BCM_INT_REG_IDX_0] |= (from)->bits[BCM_INT_REG_IDX_0]; \
+ (to)->bits[BCM_INT_REG_IDX_1] |= (from)->bits[BCM_INT_REG_IDX_1]; \
+} while (0)
+
+#define BCM_INT_BITMASK_AND(to, mask) do { \
+ (to)->bits[BCM_INT_REG_IDX_0] &= (mask)->bits[BCM_INT_REG_IDX_0]; \
+ (to)->bits[BCM_INT_REG_IDX_1] &= (mask)->bits[BCM_INT_REG_IDX_1]; \
+} while (0)
+
+#endif /* ROM_COMPAT_INT_REG_IDX */
+
+#define WARM_BOOT 0xA0B0C0D0
+
+typedef struct si_axi_error_info si_axi_error_info_t;
+
+#ifdef AXI_TIMEOUTS_NIC
+#define SI_MAX_ERRLOG_SIZE 4
+typedef struct si_axi_error
+{
+ uint32 error;
+ uint32 coreid;
+ uint32 errlog_lo;
+ uint32 errlog_hi;
+ uint32 errlog_id;
+ uint32 errlog_flags;
+ uint32 errlog_status;
+} si_axi_error_t;
+
+struct si_axi_error_info
+{
+ uint32 count;
+ si_axi_error_t axi_error[SI_MAX_ERRLOG_SIZE];
+};
+#endif /* AXI_TIMEOUTS_NIC */
+
+/**
+ * Data structure to export all chip specific common variables
+ * public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
+ */
+struct si_pub {
+ bool issim; /**< chip is in simulation or emulation */
+
+ uint16 socitype; /**< SOCI_SB, SOCI_AI */
+ int16 socirev; /**< SOC interconnect rev */
+
+ uint16 bustype; /**< SI_BUS, PCI_BUS */
+ uint16 buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID */
+ int16 buscorerev; /**< buscore rev */
+ uint16 buscoreidx; /**< buscore index */
+
+ int16 ccrev; /**< chip common core rev */
+ uint32 cccaps; /**< chip common capabilities */
+ uint32 cccaps_ext; /**< chip common capabilities extension */
+ int16 pmurev; /**< pmu core rev */
+ uint32 pmucaps; /**< pmu capabilities */
+
+ uint32 boardtype; /**< board type */
+ uint32 boardrev; /* board rev */
+ uint32 boardvendor; /**< board vendor */
+ uint32 boardflags; /**< board flags */
+ uint32 boardflags2; /**< board flags2 */
+ uint32 boardflags4; /**< board flags4 */
+
+ uint32 chip; /**< chip number */
+ uint16 chiprev; /**< chip revision */
+ uint16 chippkg; /**< chip package option */
+ uint32 chipst; /**< chip status */
+
+ int16 gcirev; /**< gci core rev */
+ int16 lhlrev; /**< gci core rev */
+
+ uint32 lpflags; /**< low power flags */
+ uint32 enum_base; /**< backplane address where the chipcommon core resides */
+ bool _multibp_enable;
+ bool rffe_debug_mode;
+ bool rffe_elnabyp_mode;
+
+ si_axi_error_info_t * err_info;
+};
+
+/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
+ * for monolithic driver, it is readonly to prevent accident change
+ */
+typedef struct si_pub si_t;
+
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach(). Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core.
+ */
+#define SI_OSH NULL /**< Use for si_kattach when no osh is available */
+
+#ifndef SOCI_NCI_BUS
+#define BADIDX (SI_MAXCORES + 1)
+#else
+#define BADIDX (0xffffu) /* MAXCORES will be dynamically calculated for NCI. */
+#endif /* SOCI_NCI_BUS */
+
+/* clkctl xtal what flags */
+#define XTAL 0x1 /**< primary crystal oscillator (2050) */
+#define PLL 0x2 /**< main chip pll */
+
+/* clkctl clk mode */
+#define CLK_FAST 0 /**< force fast (pll) clock */
+#define CLK_DYNAMIC 2 /**< enable dynamic clock control */
+
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY 0 /**< Driver */
+#define GPIO_APP_PRIORITY 1 /**< Application */
+#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */
+
+/* GPIO pull up/down */
+#define GPIO_PULLUP 0
+#define GPIO_PULLDN 1
+
+/* GPIO event regtype */
+#define GPIO_REGEVT 0 /**< GPIO register event */
+#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */
+
+/* device path */
+#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */
+
+/* SI routine enumeration: to be used by update function with multiple hooks */
+#define SI_DOATTACH 1
+#define SI_PCIDOWN 2 /**< wireless interface is down */
+#define SI_PCIUP 3 /**< wireless interface is up */
+
+#ifdef SR_DEBUG
+#define PMU_RES 31
+#endif /* SR_DEBUG */
+
+/* "access" param defines for si_seci_access() below */
+#define SECI_ACCESS_STATUSMASK_SET 0
+#define SECI_ACCESS_INTRS 1
+#define SECI_ACCESS_UART_CTS 2
+#define SECI_ACCESS_UART_RTS 3
+#define SECI_ACCESS_UART_RXEMPTY 4
+#define SECI_ACCESS_UART_GETC 5
+#define SECI_ACCESS_UART_TXFULL 6
+#define SECI_ACCESS_UART_PUTC 7
+#define SECI_ACCESS_STATUSMASK_GET 8
+
+#if defined(BCMQT)
+#define ISSIM_ENAB(sih) TRUE
+#else /* !defined(BCMQT) */
+#define ISSIM_ENAB(sih) FALSE
+#endif /* defined(BCMQT) */
+
+#if defined(ATE_BUILD)
+#define ATE_BLD_ENAB(sih) TRUE
+#else
+#define ATE_BLD_ENAB(sih) FALSE
+#endif
+
+#define INVALID_ADDR (0xFFFFFFFFu)
+
+/* PMU clock/power control */
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih) (BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+#if defined(BCMAOBENAB)
+#define AOB_ENAB(sih) (BCMAOBENAB)
+#else
+#define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \
+ ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0)
+#endif /* BCMAOBENAB */
+
+/* chipcommon clock/power control (exclusive with PMU's) */
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih) (0)
+#define CCPLL_ENAB(sih) (0)
+#else
+#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+
+typedef void (*wci2_handler_t)(void *ctx, char *buf, int len);
+
+/* External BT Coex enable mask */
+#define CC_BTCOEX_EN_MASK 0x01
+/* External PA enable mask */
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+/* WL/BT control enable mask */
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+#define UCODE_WAKE_STATUS_BIT 1
+
+#if defined(BCMDONGLEHOST)
+
+/* CR4 specific defines used by the host driver */
+#define SI_CR4_CAP (0x04)
+#define SI_CR4_BANKIDX (0x40)
+#define SI_CR4_BANKINFO (0x44)
+#define SI_CR4_BANKPDA (0x4C)
+
+#define ARMCR4_TCBBNB_MASK 0xf0
+#define ARMCR4_TCBBNB_SHIFT 4
+#define ARMCR4_TCBANB_MASK 0xf
+#define ARMCR4_TCBANB_SHIFT 0
+
+#define SICF_CPUHALT (0x0020)
+#define ARMCR4_BSZ_MASK 0x7f
+#define ARMCR4_BUNITSZ_MASK 0x200
+#define ARMCR4_BSZ_8K 8192
+#define ARMCR4_BSZ_1K 1024
+#endif /* BCMDONGLEHOST */
+#define SI_BPIND_1BYTE 0x1
+#define SI_BPIND_2BYTE 0x3
+#define SI_BPIND_4BYTE 0xF
+
+#define GET_GCI_OFFSET(sih, gci_reg) \
+ (AOB_ENAB(sih)? OFFSETOF(gciregs_t, gci_reg) : OFFSETOF(chipcregs_t, gci_reg))
+
+#define GET_GCI_CORE(sih) \
+ (AOB_ENAB(sih)? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX)
+
+#define VARBUF_PRIO_INVALID 0u
+#define VARBUF_PRIO_NVRAM 1u
+#define VARBUF_PRIO_SROM 2u
+#define VARBUF_PRIO_OTP 3u
+#define VARBUF_PRIO_SH_SFLASH 4u
+
+#define BT_IN_RESET_BIT_SHIFT 19u
+#define BT_IN_PDS_BIT_SHIFT 10u
+
+/* === exported functions === */
+extern si_t *si_attach(uint pcidev, osl_t *osh, volatile void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern volatile void *si_d11_switch_addrbase(si_t *sih, uint coreunit);
+extern uint si_corelist(const si_t *sih, uint coreid[]);
+extern uint si_coreid(const si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_flag_alt(const si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(const si_t *sih);
+extern uint si_get_num_cores(const si_t *sih);
+extern uint si_coreunit(const si_t *sih);
+extern uint si_corevendor(const si_t *sih);
+extern uint si_corerev(const si_t *sih);
+extern uint si_corerev_minor(const si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern int si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read);
+
+/* precommit failed when this is removed */
+/* BLAZAR_BRANCH_101_10_DHD_002/build/dhd/linux-fc30/brix-brcm */
+/* TBD: Revisit later */
+#ifdef BCMINTERNAL
+extern int si_backplane_access_64(si_t *sih, uint addr, uint size,
+ uint64 *val, bool read);
+#endif /* BCMINTERNAL */
+
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
+extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern volatile void *si_coreregs(const si_t *sih);
+extern uint si_wrapperreg(const si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val);
+extern void *si_wrapperregs(const si_t *sih);
+extern uint32 si_core_cflags(const si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(const si_t *sih, uint32 mask, uint32 val);
+extern void si_commit(si_t *sih);
+extern bool si_iscoreup(const si_t *sih);
+extern uint si_numcoreunits(const si_t *sih, uint coreid);
+extern uint si_numd11coreunits(const si_t *sih);
+extern uint si_findcoreidx(const si_t *sih, uint coreid, uint coreunit);
+extern uint si_findcoreid(const si_t *sih, uint coreidx);
+extern volatile void *si_setcoreidx(si_t *sih, uint coreidx);
+extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern uint32 si_oobr_baseaddr(const si_t *sih, bool second);
+#if !defined(BCMDONGLEHOST)
+extern uint si_corereg_ifup(si_t *sih, uint core_id, uint regoff, uint mask, uint val);
+extern void si_lowpwr_opt(si_t *sih);
+#endif /* !defined(BCMDONGLEHOST */
+extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx,
+ bcm_int_bitmask_t *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, bcm_int_bitmask_t *intr_val);
+#ifdef USE_NEW_COREREV_API
+extern uint si_corerev_ext(si_t *sih, uint coreid, uint coreunit);
+#else
+uint si_get_corerev(si_t *sih, uint core_id);
+#endif
+extern int si_numaddrspaces(const si_t *sih);
+extern uint32 si_addrspace(const si_t *sih, uint spidx, uint baidx);
+extern uint32 si_addrspacesize(const si_t *sih, uint spidx, uint baidx);
+extern void si_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(const si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(const si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint si_chip_hostif(const si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */
+extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern int si_pcie_setup(si_t *sih, uint coreidx);
+extern void si_setint(const si_t *sih, int siflag);
+extern bool si_backplane64(const si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(const si_t *sih);
+extern void si_set_device_removed(si_t *sih, bool status);
+extern uint32 si_sysmem_size(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda);
+extern bool si_is_bus_mpu_present(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern volatile void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(const si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(const si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode);
+extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
+extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
+extern uint8 si_gci_time_sync_gpio_init(si_t *sih);
+extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
+extern void si_gci_time_sync_gpio_enable(si_t *sih, uint8 gpio, bool state);
+extern void si_gci_host_wake_gpio_tristate(si_t *sih, uint8 gpio, bool state);
+extern int si_gpio_enable(si_t *sih, uint32 mask);
+
+extern void si_invalidate_second_bar0win(si_t *sih);
+
+extern void si_gci_shif_config_wake_pin(si_t *sih, uint8 gpio_n,
+ uint8 wake_events, bool gci_gpio);
+extern void si_shif_int_enable(si_t *sih, uint8 gpio_n, uint8 wake_events, bool enable);
+
+/* GCI interrupt handlers */
+extern void si_gci_handler_process(si_t *sih);
+
+extern void si_enable_gpio_wake(si_t *sih, uint8 *wake_mask, uint8 *cur_status, uint8 gci_gpio,
+ uint32 pmu_cc2_mask, uint32 pmu_cc2_value);
+
+/* GCI GPIO event handlers */
+extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
+ gci_gpio_handler_t cb, void *arg);
+extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
+
+extern void si_gci_gpio_chipcontrol_ex(si_t *si, uint8 gpoi, uint8 opt);
+extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value);
+extern void si_gci_config_wake_pin(si_t *sih, uint8 gpio_n, uint8 wake_events,
+ bool gci_gpio);
+extern void si_gci_free_wake_pin(si_t *sih, uint8 gpio_n);
+#if !defined(BCMDONGLEHOST)
+extern uint8 si_gci_gpio_wakemask(si_t *sih, uint8 gpio, uint8 mask, uint8 value);
+extern uint8 si_gci_gpio_intmask(si_t *sih, uint8 gpio, uint8 mask, uint8 value);
+#endif /* !defined(BCMDONGLEHOST) */
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool si_pci_pmestat(const si_t *sih);
+extern void si_pci_pmeclr(const si_t *sih);
+extern void si_pci_pmeen(const si_t *sih);
+extern void si_pci_pmestatclr(const si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+
+#ifdef BCMSDIO
+extern void si_sdio_init(si_t *sih);
+#endif
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+ uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+extern uint32 si_seci_access(si_t *sih, uint32 val, int access);
+extern volatile void* si_seci_init(si_t *sih, uint8 seci_mode);
+extern void si_seci_clk_force(si_t *sih, bool val);
+extern bool si_seci_clk_force_status(si_t *sih);
+
+#if (defined(BCMECICOEX) && !defined(BCMDONGLEHOST))
+extern bool si_eci(const si_t *sih);
+extern int si_eci_init(si_t *sih);
+extern void si_eci_notify_bt(si_t *sih, uint32 mask, uint32 val, bool interrupt);
+extern bool si_seci(const si_t *sih);
+extern void* si_gci_init(si_t *sih);
+extern void si_seci_down(si_t *sih);
+extern void si_seci_upd(si_t *sih, bool enable);
+extern bool si_gci(const si_t *sih);
+extern bool si_sraon(const si_t *sih);
+#else
+#define si_eci(sih) 0
+#define si_eci_init(sih) 0
+#define si_eci_notify_bt(sih, type, val) (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a) do {} while (0)
+#define si_gci_init(sih) NULL
+#define si_seci_down(sih) do {} while (0)
+#define si_gci(sih) 0
+#define si_sraon(sih) 0
+#endif /* BCMECICOEX */
+
+/* OTP status */
+extern bool si_is_otp_disabled(const si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask);
+
+/* SPROM availability */
+extern bool si_is_sprom_available(si_t *sih);
+#ifdef SI_SPROM_PROBE
+extern void si_sprom_init(si_t *sih);
+#endif /* SI_SPROM_PROBE */
+
+/* SFlash availability */
+bool si_is_sflash_available(const si_t *sih);
+
+/* OTP/SROM CIS stuff */
+extern int si_cis_source(const si_t *sih);
+#define CIS_DEFAULT 0
+#define CIS_SROM 1
+#define CIS_OTP 2
+
+/* Fab-id information */
+#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */
+#define CSM_FAB7 0x1 /**< CSM Fab7 chip */
+#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */
+#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */
+
+/* bp_ind_access default timeout */
+#define BP_ACCESS_TO (500u * 1000u)
+
+extern uint16 BCMATTACHFN(si_fabid)(si_t *sih);
+extern uint16 BCMINITFN(si_chipid)(const si_t *sih);
+
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
+extern int si_devpath(const si_t *sih, char *path, int size);
+extern int si_devpath_pcie(const si_t *sih, char *path, int size);
+/* Read variable with prepending the devpath to the name */
+extern char *si_getdevpathvar(const si_t *sih, const char *name);
+extern int si_getdevpathintvar(const si_t *sih, const char *name);
+extern char *si_coded_devpathvar(const si_t *sih, char *varname, int var_len, const char *name);
+
+/* === HW PR WARs === */
+extern uint8 si_pcieclkreq(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(const si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieltrenable(const si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieobffenable(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltr_reg(const si_t *sih, uint32 reg, uint32 mask, uint32 val);
+extern uint32 si_pcieltrspacing_reg(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltrhysteresiscnt_reg(const si_t *sih, uint32 mask, uint32 val);
+extern void si_pcie_set_error_injection(const si_t *sih, uint32 mode);
+extern void si_pcie_set_L1substate(const si_t *sih, uint32 substate);
+#ifndef BCM_BOOTLOADER
+extern uint32 si_pcie_get_L1substate(const si_t *sih);
+#endif /* BCM_BOOTLOADER */
+extern void si_pci_down(const si_t *sih);
+extern void si_pci_up(const si_t *sih);
+extern void si_pci_sleep(const si_t *sih);
+extern void si_pcie_war_ovr_update(const si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(const si_t *sih, bool enable);
+extern int si_pci_fixcfg(si_t *sih);
+extern bool si_is_warmboot(void);
+
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+extern void si_srom_clk_set(si_t *sih); /**< for chips with fast BP clock */
+extern void si_btc_enable_chipcontrol(si_t *sih);
+extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag);
+/* === debug routines === */
+
+extern bool si_taclear(si_t *sih, bool details);
+
+#ifdef BCMDBG
+extern void si_view(si_t *sih, bool verbose);
+extern void si_viewall(si_t *sih, bool verbose);
+#endif /* BCMDBG */
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP) || \
+ defined(WLTEST)
+struct bcmstrbuf;
+extern int si_dump_pcieinfo(const si_t *sih, struct bcmstrbuf *b);
+extern void si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b);
+extern int si_dump_pcieregs(const si_t *sih, struct bcmstrbuf *b);
+#endif /* BCMDBG || BCMDBG_DUMP || WLTEST */
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+extern void si_dump(const si_t *sih, struct bcmstrbuf *b);
+extern void si_ccreg_dump(si_t *sih, struct bcmstrbuf *b);
+extern void si_clkctl_dump(si_t *sih, struct bcmstrbuf *b);
+extern int si_gpiodump(si_t *sih, struct bcmstrbuf *b);
+
+extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+
+extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_pciereg(const si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low,
+ int32* data, bool read, uint32 us_timeout);
+extern void sih_write_sraon(si_t *sih, int offset, int len, const uint32* data);
+#ifdef SR_DEBUG
+extern void si_dump_pmu(si_t *sih, void *pmu_var);
+extern void si_pmu_keep_on(const si_t *sih, int32 int_val);
+extern uint32 si_pmu_keep_on_get(const si_t *sih);
+extern uint32 si_power_island_set(si_t *sih, uint32 int_val);
+extern uint32 si_power_island_get(si_t *sih);
+#endif /* SR_DEBUG */
+
+extern uint32 si_pcieserdesreg(const si_t *sih, uint32 mdioslave, uint32 offset,
+ uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(const si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(const si_t *sih);
+extern void si_pcie_set_maxpayload_size(const si_t *sih, uint16 size);
+extern uint16 si_pcie_get_maxpayload_size(const si_t *sih);
+extern uint16 si_pcie_get_ssid(const si_t *sih);
+extern uint32 si_pcie_get_bar0(const si_t *sih);
+extern int si_pcie_configspace_cache(const si_t *sih);
+extern int si_pcie_configspace_restore(const si_t *sih);
+extern int si_pcie_configspace_get(const si_t *sih, uint8 *buf, uint size);
+
+#ifndef BCMDONGLEHOST
+extern void si_muxenab(si_t *sih, uint32 w);
+extern uint32 si_clear_backplane_to(si_t *sih);
+extern void si_slave_wrapper_add(si_t *sih);
+
+#ifdef AXI_TIMEOUTS_NIC
+extern uint32 si_clear_backplane_to_fast(void *sih, void *addr);
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+extern uint32 si_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap);
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+#endif /* !BCMDONGLEHOST */
+
+extern uint32 si_findcoreidx_by_axiid(const si_t *sih, uint32 axiid);
+extern void si_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core,
+ uint32 *lo, uint32 *hi, uint32 *id);
+extern uint32 si_get_axi_timeout_reg(const si_t *sih);
+
+#ifdef AXI_TIMEOUTS_NIC
+extern const si_axi_error_info_t * si_get_axi_errlog_info(const si_t *sih);
+extern void si_reset_axi_errlog_info(const si_t * sih);
+#endif /* AXI_TIMEOUTS_NIC */
+
+extern void si_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout, uint32 cid);
+
+#if defined(BCMDONGLEHOST)
+extern uint32 si_tcm_size(si_t *sih);
+extern bool si_has_flops(si_t *sih);
+#endif /* BCMDONGLEHOST */
+
+extern int si_set_sromctl(si_t *sih, uint32 value);
+extern uint32 si_get_sromctl(si_t *sih);
+
+extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_input(si_t *sih, uint reg);
+extern uint32 si_gci_int_enable(si_t *sih, bool enable);
+extern void si_gci_reset(si_t *sih);
+#ifdef BCMLTECOEX
+extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+#endif /* BCMLTECOEX */
+#if defined(BCMLTECOEX) && !defined(WLTEST)
+extern int si_wci2_rxfifo_handler_register(si_t *sih, wci2_handler_t rx_cb, void *ctx);
+extern void si_wci2_rxfifo_handler_unregister(si_t *sih);
+#endif /* BCMLTECOEX && !WLTEST */
+extern void si_gci_seci_init(si_t *sih);
+extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio, uint32 xtalfreq);
+
+extern bool si_btcx_wci2_init(si_t *sih);
+
+extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel);
+extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin);
+extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel);
+extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos);
+extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
+extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
+extern uint8 si_get_device_wake_opt(si_t *sih);
+extern void si_swdenable(si_t *sih, uint32 swdflag);
+extern uint8 si_enable_perst_wake(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status);
+
+extern uint32 si_get_pmu_reg_addr(si_t *sih, uint32 offset);
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+
+void si_update_masks(si_t *sih);
+void si_force_islanding(si_t *sih, bool enable);
+extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
+extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_pcie_ltr_war(const si_t *sih);
+extern void si_pcie_hw_LTR_war(const si_t *sih);
+extern void si_pcie_hw_L1SS_war(const si_t *sih);
+extern void si_pciedev_crwlpciegen2(const si_t *sih);
+extern void si_pcie_prep_D3(const si_t *sih, bool enter_D3);
+extern void si_pciedev_reg_pm_clk_period(const si_t *sih);
+extern void si_pcie_disable_oobselltr(const si_t *sih);
+extern uint32 si_raw_reg(const si_t *sih, uint32 reg, uint32 val, uint32 wrire_req);
+
+/* Macro to enable clock gating changes in different cores */
+#define MEM_CLK_GATE_BIT 5
+#define GCI_CLK_GATE_BIT 18
+
+#define USBAPP_CLK_BIT 0
+#define PCIE_CLK_BIT 3
+#define ARMCR4_DBG_CLK_BIT 4
+#define SAMPLE_SYNC_CLK_BIT 17
+#define PCIE_TL_CLK_BIT 18
+#define HQ_REQ_BIT 24
+#define PLL_DIV2_BIT_START 9
+#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START)
+#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START)
+
+#define pmu_corereg(si, cc_idx, member, mask, val) \
+ (AOB_ENAB(si) ? \
+ si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+ OFFSETOF(pmuregs_t, member), mask, val): \
+ si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val))
+
+#define PMU_REG(si, member, mask, val) \
+ (AOB_ENAB(si) ? \
+ si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+ OFFSETOF(pmuregs_t, member), mask, val): \
+ si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val))
+
+/* Used only for the regs present in the pmu core and not present in the old cc core */
+#define PMU_REG_NEW(si, member, mask, val) \
+ si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+ OFFSETOF(pmuregs_t, member), mask, val)
+
+#define GCI_REG(si, offset, mask, val) \
+ (AOB_ENAB(si) ? \
+ si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
+ offset, mask, val): \
+ si_corereg(si, SI_CC_IDX, offset, mask, val))
+
+/* Used only for the regs present in the gci core and not present in the old cc core */
+#define GCI_REG_NEW(si, member, mask, val) \
+ si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
+ OFFSETOF(gciregs_t, member), mask, val)
+
+#define LHL_REG(si, member, mask, val) \
+ si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
+ OFFSETOF(gciregs_t, member), mask, val)
+
+#define CHIPC_REG(si, member, mask, val) \
+ si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val)
+
+/* GCI Macros */
+#define ALLONES_32 0xFFFFFFFF
+#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */
+#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */
+#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */
+#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */
+#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */
+#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */
+#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */
+#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */
+#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */
+#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */
+#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */
+#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */
+#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */
+#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */
+#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */
+#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */
+#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */
+#define GCI_CCTL_SCS 25 /* SeciClkStretch */
+
+#define GCI_MODE_UART 0x0
+#define GCI_MODE_SECI 0x1
+#define GCI_MODE_BTSIG 0x2
+#define GCI_MODE_GPIO 0x3
+#define GCI_MODE_MASK 0x7
+
+#define GCI_CCTL_LOWTOUT_DIS 0x0
+#define GCI_CCTL_LOWTOUT_10BIT 0x1
+#define GCI_CCTL_LOWTOUT_20BIT 0x2
+#define GCI_CCTL_LOWTOUT_30BIT 0x3
+#define GCI_CCTL_LOWTOUT_MASK 0x3
+
+#define GCI_CCTL_SCS_DEF 0x19
+#define GCI_CCTL_SCS_MASK 0xFF
+
+#define GCI_SECIIN_MODE_OFFSET 0
+#define GCI_SECIIN_GCIGPIO_OFFSET 4
+#define GCI_SECIIN_RXID2IP_OFFSET 8
+
+#define GCI_SECIIN_MODE_MASK 0x7
+#define GCI_SECIIN_GCIGPIO_MASK 0xF
+
+#define GCI_SECIOUT_MODE_OFFSET 0
+#define GCI_SECIOUT_GCIGPIO_OFFSET 4
+#define GCI_SECIOUT_LOOPBACK_OFFSET 8
+#define GCI_SECIOUT_SECIINRELATED_OFFSET 16
+
+#define GCI_SECIOUT_MODE_MASK 0x7
+#define GCI_SECIOUT_GCIGPIO_MASK 0xF
+#define GCI_SECIOUT_SECIINRELATED_MASK 0x1
+
+#define GCI_SECIOUT_SECIINRELATED 0x1
+
+#define GCI_SECIAUX_RXENABLE_OFFSET 0
+#define GCI_SECIFIFO_RXENABLE_OFFSET 16
+
+#define GCI_SECITX_ENABLE_OFFSET 0
+
+#define GCI_GPIOCTL_INEN_OFFSET 0
+#define GCI_GPIOCTL_OUTEN_OFFSET 1
+#define GCI_GPIOCTL_PDN_OFFSET 4
+
+#define GCI_GPIOIDX_OFFSET 16
+
+#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */
+#define GCI_LTECX_TXCONF_EN_OFFSET 2
+#define GCI_LTECX_PRISEL_EN_OFFSET 3
+
+/* To access per GCI bit registers */
+#define GCI_REG_WIDTH 32
+
+/* number of event summary bits */
+#define GCI_EVENT_NUM_BITS 32
+
+/* gci event bits per core */
+#define GCI_EVENT_BITS_PER_CORE 4
+#define GCI_EVENT_HWBIT_1 1
+#define GCI_EVENT_HWBIT_2 2
+#define GCI_EVENT_SWBIT_1 3
+#define GCI_EVENT_SWBIT_2 4
+
+#define GCI_MBDATA_TOWLAN_POS 96
+#define GCI_MBACK_TOWLAN_POS 104
+#define GCI_WAKE_TOWLAN_PO 112
+#define GCI_SWREADY_POS 120
+
+/* GCI bit positions */
+/* GCI [127:000] = WLAN [127:0] */
+#define GCI_WLAN_IP_ID 0
+#define GCI_WLAN_BEGIN 0
+#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4)
+#define GCI_WLAN_PERST_POS (GCI_WLAN_BEGIN + 15)
+
+/* GCI [255:128] = BT [127:0] */
+#define GCI_BT_IP_ID 1
+#define GCI_BT_BEGIN 128
+#define GCI_BT_MBDATA_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBDATA_TOWLAN_POS)
+#define GCI_BT_MBACK_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBACK_TOWLAN_POS)
+#define GCI_BT_WAKE_TOWLAN_POS (GCI_BT_BEGIN + GCI_WAKE_TOWLAN_PO)
+#define GCI_BT_SWREADY_POS (GCI_BT_BEGIN + GCI_SWREADY_POS)
+
+/* GCI [639:512] = LTE [127:0] */
+#define GCI_LTE_IP_ID 4
+#define GCI_LTE_BEGIN 512
+#define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0)
+#define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1)
+#define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2)
+#define GCI_LTE_WCI2TYPE_POS (GCI_LTE_BEGIN + 48)
+#define GCI_LTE_WCI2TYPE_MASK 7
+#define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56)
+
+/* Reg Index corresponding to ECI bit no x of ECI space */
+#define GCI_REGIDX(x) ((x)/GCI_REG_WIDTH)
+/* Bit offset of ECI bit no x in 32-bit words */
+#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH)
+
+#define GCI_ECI_HW0(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 0)
+#define GCI_ECI_HW1(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 1)
+#define GCI_ECI_SW0(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 2)
+#define GCI_ECI_SW1(ip_id) (((ip_id) * GCI_EVENT_BITS_PER_CORE) + 3)
+
+/* BT SMEM Control Register 0 */
+#define GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL (1 << 28)
+
+/* GCI RXFIFO Common control */
+#define GCI_RXFIFO_CTRL_AUX_EN 0xFF
+#define GCI_RXFIFO_CTRL_FIFO_EN 0xFF00
+#define GCI_RXFIFO_CTRL_FIFO_TYPE2_EN 0x400
+
+/* End - GCI Macros */
+
+extern void si_pll_sr_reinit(si_t *sih);
+extern void si_pll_closeloop(si_t *sih);
+extern uint si_num_slaveports(const si_t *sih, uint coreid);
+extern uint32 si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx,
+ uint core_id, uint coreunit);
+extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint spidx,
+ uint baidx, uint coreunit);
+void si_introff(const si_t *sih, bcm_int_bitmask_t *intr_val);
+void si_intrrestore(const si_t *sih, bcm_int_bitmask_t *intr_val);
+bool si_get_nvram_rfldo3p3_war(const si_t *sih);
+void si_nvram_res_masks(const si_t *sih, uint32 *min_mask, uint32 *max_mask);
+extern uint32 si_xtalfreq(const si_t *sih);
+extern uint8 si_getspurmode(const si_t *sih);
+extern uint32 si_get_openloop_dco_code(const si_t *sih);
+extern void si_set_openloop_dco_code(si_t *sih, uint32 openloop_dco_code);
+extern uint32 si_wrapper_dump_buf_size(const si_t *sih);
+extern uint32 si_wrapper_dump_binary(const si_t *sih, uchar *p);
+extern uint32 si_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, uint32 *core,
+ uint32 *ba, uchar *p);
+
+/* SR Power Control */
+extern uint32 si_srpwr_request(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_srpwr_request_on_rev80(si_t *sih, uint32 mask, uint32 val,
+ uint32 ucode_awake);
+extern uint32 si_srpwr_stat_spinwait(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_srpwr_stat(si_t *sih);
+extern uint32 si_srpwr_domain(si_t *sih);
+extern uint32 si_srpwr_domain_all_mask(const si_t *sih);
+extern uint8 si_srpwr_domain_wl(si_t *sih);
+extern uint32 si_srpwr_bt_status(si_t *sih);
+/* SR Power Control */
+bool si_srpwr_cap(si_t *sih);
+#define SRPWR_CAP(sih) (si_srpwr_cap(sih))
+
+#ifdef BCMSRPWR
+ extern bool _bcmsrpwr;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define SRPWR_ENAB() (_bcmsrpwr)
+#elif defined(BCMSRPWR_DISABLED)
+ #define SRPWR_ENAB() (0)
+#else
+ #define SRPWR_ENAB() (1)
+#endif
+#else
+ #define SRPWR_ENAB() (0)
+#endif /* BCMSRPWR */
+
+/*
+ * Multi-BackPlane architecture. Each can power up/down independently.
+ * Common backplane: shared between BT and WL
+ * ChipC, PCIe, GCI, PMU, SRs
+ * HW powers up as needed
+ * WL BackPlane (WLBP):
+ * ARM, TCM, Main, Aux
+ * Host needs to power up
+ */
+#define MULTIBP_CAP(sih) (BCM4378_CHIP(sih->chip) || \
+ BCM4387_CHIP(sih->chip) || BCM4388_CHIP(sih->chip) || \
+ BCM4389_CHIP(sih->chip) || BCM4385_CHIP(sih->chip) || \
+ BCM4376_CHIP(sih->chip) || BCM4397_CHIP(sih->chip))
+#define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable)
+
+#ifdef DONGLEBUILD
+extern bool si_check_enable_backplane_log(const si_t *sih);
+#endif /* DONGLEBUILD */
+
+uint32 si_enum_base(uint devid);
+
+/* Default ARM PLL freq 4369/4368 */
+#define ARMPLL_FREQ_400MHZ (400u)
+#define ARMPLL_FREQ_800MHZ (800u)
+/* ARM PLL freq computed using chip defaults is 1002.8235 Mhz */
+#define ARMPLL_FREQ_1000MHZ (1003u)
+
+extern uint8 si_lhl_ps_mode(const si_t *sih);
+extern uint32 si_get_armpllclkfreq(const si_t *sih);
+uint8 si_get_ccidiv(const si_t *sih);
+extern uint8 si_hib_ext_wakeup_isenab(const si_t *sih);
+
+#ifdef UART_TRAP_DBG
+void si_dump_APB_Bridge_registers(const si_t *sih);
+#endif /* UART_TRAP_DBG */
+void si_force_clocks(const si_t *sih, uint clock_state);
+
+#if defined(BCMSDIODEV_ENABLED) && defined(ATE_BUILD)
+bool si_chipcap_sdio_ate_only(const si_t *sih);
+#endif /* BCMSDIODEV_ENABLED && ATE_BUILD */
+
+/* indicates to the siutils how the PICe BAR0 is mappend.
+ * here is the current scheme, which are all using BAR0:
+ * id enum wrapper
+ * ==== ========= =========
+ * 0 0000-0FFF 1000-1FFF
+ * 1 4000-4FFF 5000-5FFF
+ * 2 9000-9FFF A000-AFFF
+ * >= 3 not supported
+ */
+void si_set_slice_id(si_t *sih, uint8 slice);
+uint8 si_get_slice_id(const si_t *sih);
+
+/* query the d11 core type */
+#define D11_CORE_TYPE_NORM 0u
+#define D11_CORE_TYPE_SCAN 1u
+uint si_core_d11_type(si_t *sih, uint coreunit);
+
+/* check if the package option allows the d11 core */
+bool si_pkgopt_d11_allowed(si_t *sih, uint coreuint);
+
+/* return if scan core is present */
+bool si_scan_core_present(const si_t *sih);
+void si_configure_pwrthrottle_gpio(si_t *sih, uint8 pwrthrottle_gpio_pin);
+void si_configure_onbody_gpio(si_t *sih, uint8 onbody_gpio_pin);
+
+/* check if HWA core present */
+bool si_hwa_present(const si_t *sih);
+
+/* check if SYSMEM present */
+bool si_sysmem_present(const si_t *sih);
+
+/* return BT state */
+bool si_btc_bt_status_in_reset(si_t *sih);
+bool si_btc_bt_status_in_pds(si_t *sih);
+int si_btc_bt_pds_wakeup_force(si_t *sih, bool force);
+
+/* RFFE RFEM Functions */
+#ifndef BCMDONGLEHOST
+void si_rffe_rfem_init(si_t *sih);
+void si_rffe_set_debug_mode(si_t *sih, bool enable);
+bool si_rffe_get_debug_mode(si_t *sih);
+int si_rffe_set_elnabyp_mode(si_t *sih, uint8 mode);
+int8 si_rffe_get_elnabyp_mode(si_t *sih);
+int si_rffe_rfem_read(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr, uint32 *val);
+int si_rffe_rfem_write(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr, uint32 data);
+#endif /* !BCMDONGLEHOST */
+extern void si_jtag_udr_pwrsw_main_toggle(si_t *sih, bool on);
+extern int si_pmu_res_state_pwrsw_main_wait(si_t *sih);
+extern uint32 si_d11_core_sssr_addr(si_t *sih, uint unit, uint32 *sssr_dmp_sz);
+
+#ifdef USE_LHL_TIMER
+/* Get current HIB time API */
+uint32 si_cur_hib_time(si_t *sih);
+#endif
+
+#endif /* _siutils_h_ */
diff --git a/bcmdhd.101.10.361.x/include/spid.h b/bcmdhd.101.10.361.x/include/spid.h
new file mode 100755
index 0000000..0fbbb23
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/spid.h
@@ -0,0 +1,164 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _SPI_H
+#define _SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+ uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */
+ uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */
+ uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay
+ * function selection, command/data error check
+ */
+ uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+ uint16 intr_reg; /* 0x04, Intr status register */
+ uint16 intr_en_reg; /* 0x06, Intr mask register */
+ uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */
+ uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */
+ uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */
+ uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */
+ uint32 test_read; /* 0x14, RO 0xfeedbead signature */
+ uint32 test_rw; /* 0x18, RW */
+ uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+ uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+ uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+ uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG 0x00
+#define SPID_RESPONSE_DELAY 0x01
+#define SPID_STATUS_ENABLE 0x02
+#define SPID_RESET_BP 0x03 /* (corerev >= 1) */
+#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG 0x08 /* 32 bits */
+#define SPID_F1_INFO_REG 0x0C /* 16 bits */
+#define SPID_F2_INFO_REG 0x0E /* 16 bits */
+#define SPID_F3_INFO_REG 0x10 /* 16 bits */
+#define SPID_TEST_READ 0x14 /* 32 bits */
+#define SPID_TEST_RW 0x18 /* 32 bits */
+#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */
+#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */
+#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */
+#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */
+#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */
+#define RESET_SPI 0x80 /* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW 0x0002
+#define F2_F3_FIFO_WR_OVERFLOW 0x0004
+#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */
+#define DATA_ERROR 0x0010 /* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE 0x0020
+#define F3_PACKET_AVAILABLE 0x0040
+#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0 0x0100
+#define MISC_INTR1 0x0200
+#define MISC_INTR2 0x0400
+#define MISC_INTR3 0x0800
+#define MISC_INTR4 0x1000
+#define F1_INTR 0x2000
+#define F2_INTR 0x4000
+#define F3_INTR 0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE 0x00000001
+#define STATUS_UNDERFLOW 0x00000002
+#define STATUS_OVERFLOW 0x00000004
+#define STATUS_F2_INTR 0x00000008
+#define STATUS_F3_INTR 0x00000010
+#define STATUS_F2_RX_READY 0x00000020
+#define STATUS_F3_RX_READY 0x00000040
+#define STATUS_HOST_CMD_DATA_ERR 0x00000080
+#define STATUS_F2_PKT_AVAILABLE 0x00000100
+#define STATUS_F2_PKT_LEN_MASK 0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT 9
+#define STATUS_F3_PKT_AVAILABLE 0x00100000
+#define STATUS_F3_PKT_LEN_MASK 0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT 21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 0x0001
+#define F1_RDY_FOR_DATA_TRANSFER 0x0002
+#define F1_MAX_PKT_SIZE 0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 0x0001
+#define F2_RDY_FOR_DATA_TRANSFER 0x0002
+#define F2_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 0x0001
+#define F3_RDY_FOR_DATA_TRANSFER 0x0002
+#define F3_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+
+#define SPI_MAX_PKT_LEN (2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0 0
+#define SPI_FUNC_1 1
+#define SPI_FUNC_2 2
+#define SPI_FUNC_3 3
+
+/* with gspi mode, we observed that it almost takes 110ms to come up */
+/* check the register 20ms, for 100 times. 2 seconds would be enough */
+#define WAIT_F2RXFIFORDY 100
+#define WAIT_F2RXFIFORDY_DELAY 20
+
+#endif /* _SPI_H */
diff --git a/bcmdhd.101.10.361.x/include/trxhdr.h b/bcmdhd.101.10.361.x/include/trxhdr.h
new file mode 100755
index 0000000..5af956c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/trxhdr.h
@@ -0,0 +1,93 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_MAX_LEN 0x3B0000 /* Max length */
+#define TRX_NO_HEADER 1 /* Do not write TRX header */
+#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */
+#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */
+
+#define TRX_V1 1
+#define TRX_V1_MAX_OFFSETS 3 /* V1: Max number of individual files */
+
+#ifndef BCMTRXV2
+#define TRX_VERSION TRX_V1 /* Version 1 */
+#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS
+#endif
+
+/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
+ * Ver 2 of trx header. To make it generic, trx_header is structure is modified
+ * as below where size of "offsets" field will vary as per the TRX version.
+ * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well.
+ * To make sure, other applications like "dhdl" which are yet to be enhanced to support
+ * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2
+ * is defined.
+ */
+struct trx_header {
+ uint32 magic; /* "HDR0" */
+ uint32 len; /* Length of file including header */
+ uint32 crc32; /* 32-bit CRC from flag_version to end of file */
+ uint32 flag_version; /* 0:15 flags, 16:31 version */
+#ifndef BCMTRXV2
+ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
+#else
+ uint32 offsets[1]; /* Offsets of partitions from start of header */
+#endif
+};
+
+#ifdef BCMTRXV2
+#define TRX_VERSION TRX_V2 /* Version 2 */
+#define TRX_MAX_OFFSET TRX_V2_MAX_OFFSETS
+
+#define TRX_V2 2
+/* V2: Max number of individual files
+ * To support SDR signature + Config data region
+ */
+#define TRX_V2_MAX_OFFSETS 5
+#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32))
+#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32))
+#ifdef IL_BIGENDIAN
+#define TRX_VER(trx) (ltoh32((trx)->flag_version>>16))
+#else
+#define TRX_VER(trx) ((trx)->flag_version>>16)
+#endif
+#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1)
+#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2)
+/* For V2, return size of V2 size: others, return V1 size */
+#define SIZEOF_TRX(trx) (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1)
+#else
+#define SIZEOF_TRX(trx) (sizeof(struct trx_header))
+#endif /* BCMTRXV2 */
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/bcmdhd.101.10.361.x/include/typedefs.h b/bcmdhd.101.10.361.x/include/typedefs.h
new file mode 100755
index 0000000..6a25130
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/typedefs.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#if (!defined(EDK_RELEASE_VERSION) || (EDK_RELEASE_VERSION < 0x00020000)) || \
+ !defined(BWL_NO_INTERNAL_STDLIB_SUPPORT)
+
+#ifdef SITE_TYPEDEFS
+
+/*
+ * Define SITE_TYPEDEFS in the compile to include a site-specific
+ * typedef file "site_typedefs.h".
+ *
+ * If SITE_TYPEDEFS is not defined, then the code section below makes
+ * inferences about the compile environment based on defined symbols and
+ * possibly compiler pragmas.
+ *
+ * Following these two sections is the Default Typedefs section.
+ * This section is only processed if USE_TYPEDEF_DEFAULTS is
+ * defined. This section has a default set of typedefs and a few
+ * preprocessor symbols (TRUE, FALSE, NULL, ...).
+ */
+
+#include "site_typedefs.h"
+
+#else
+
+/*
+ * Infer the compile environment based on preprocessor symbols and pragmas.
+ * Override type definitions as needed, and include configuration-dependent
+ * header files to define types.
+ */
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE false
+#endif
+#ifndef TRUE
+#define TRUE true
+#endif
+
+#else /* ! __cplusplus */
+
+#if defined(_WIN32)
+
+#define TYPEDEF_BOOL
+typedef unsigned char bool; /* consistent w/BOOL */
+
+#endif /* _WIN32 */
+
+#endif /* ! __cplusplus */
+
+#if defined(EFI) && !defined(EFI_WINBLD) && !defined(__size_t__)
+typedef long unsigned int size_t;
+#endif /* EFI */
+
+#if !defined(TYPEDEF_UINTPTR)
+#if defined(_WIN64) && !defined(EFI)
+/* use the Windows ULONG_PTR type when compiling for 64 bit */
+#include <basetsd.h>
+#define TYPEDEF_UINTPTR
+typedef ULONG_PTR uintptr;
+#elif defined(__LP64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+#endif /* TYPEDEF_UINTPTR */
+
+#if defined(_RTE_)
+#define _NEED_SIZE_T_
+#endif
+
+/* float_t types conflict with the same typedefs from the standard ANSI-C
+** math.h header file. Don't re-typedef them here.
+*/
+
+#if defined(MACOSX)
+#define TYPEDEF_FLOAT_T
+#endif /* MACOSX */
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+#ifdef _MSC_VER /* Microsoft C */
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+typedef signed __int64 int64;
+typedef unsigned __int64 uint64;
+#endif
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+#if defined(__linux__) && !defined(EFI)
+/*
+ * If this is either a Linux hybrid build or the per-port code of a hybrid build
+ * then use the Linux header files to get some of the typedefs. Otherwise, define
+ * them entirely in this file. We can't always define the types because we get
+ * a duplicate typedef error; there is no way to "undefine" a typedef.
+ * We know when it's per-port code because each file defines LINUX_PORT at the top.
+ */
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif /* TARGETENV_android */
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif /* >= 2.6.19 */
+/* special detection for 2.6.18-128.7.1.0.1.el5 */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif /* == 2.6.18 */
+#endif /* __KERNEL__ */
+#endif /* linux && !EFI */
+
+#if !defined(__linux__) && !defined(_WIN32) && \
+ !defined(_RTE_) && !defined(__DJGPP__) && \
+ !defined(__BOB__) && !defined(EFI)
+#define TYPEDEF_UINT
+#define TYPEDEF_USHORT
+#endif
+
+/* Do not support the (u)int64 types with strict ansi for GNU C */
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */
+
+/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode
+ * for signed or unsigned
+ */
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif /* __ICL */
+
+#if !defined(_WIN32) && !defined(_RTE_) && \
+ !defined(__DJGPP__) && !defined(__BOB__) && !defined(EFI)
+
+/* pick up ushort & uint from standard types.h */
+#if defined(__linux__) && defined(__KERNEL__)
+
+/* See note above */
+#ifdef USER_MODE
+#include <sys/types.h>
+#else
+#include <linux/types.h> /* sys/types.h and linux/types.h are oil and water */
+#endif /* USER_MODE */
+
+#else
+
+#include <sys/types.h>
+
+#endif /* linux && __KERNEL__ */
+
+#endif /* !_WIN32 && !_RTE_ && !__DJGPP__ */
+
+/* use the default typedefs in the next section of this file */
+#define USE_TYPEDEF_DEFAULTS
+
+#endif /* SITE_TYPEDEFS */
+
+/*
+ * Default Typedefs
+ */
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef /* @abstract@ */ unsigned char bool;
+#endif /* endif TYPEDEF_BOOL */
+
+/* define uchar, ushort, uint, ulong */
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long ulong;
+#endif
+
+/* define [u]int8/16/32/64, uintptr */
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+/* define float32/64, float_t */
+
+#ifndef TYPEDEF_FLOAT32
+typedef float float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double float64;
+#endif
+
+/*
+ * abstracted floating point type allows for compile time selection of
+ * single or double precision arithmetic. Compiling with -DFLOAT32
+ * selects single precision; the default is double precision.
+ */
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else /* default to double precision floating point */
+typedef float64 float_t;
+#endif
+
+#endif /* TYPEDEF_FLOAT_T */
+
+/* define macro values */
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1 /* TRUE */
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef OFF
+#define OFF 0
+#endif
+
+#ifndef ON
+#define ON 1 /* ON = 1 */
+#endif
+
+#define AUTO (-1) /* Auto = -1 */
+
+/* define PTRSZ, INLINE */
+
+#ifndef PTRSZ
+#define PTRSZ sizeof(char*)
+#endif
+
+/* Detect compiler type. */
+#ifdef _MSC_VER
+ #define BWL_COMPILER_MICROSOFT
+#elif defined(__GNUC__) || defined(__lint)
+ #define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+ #define BWL_COMPILER_ARMCC
+#else
+ #error "Unknown compiler!"
+#endif /* _MSC_VER */
+
+#ifndef INLINE
+#if defined(BWL_COMPILER_MICROSOFT)
+ #define INLINE __inline
+#elif defined(BWL_COMPILER_GNU)
+ #define INLINE __inline__
+#elif defined(BWL_COMPILER_ARMCC)
+ #define INLINE __inline
+#else
+ #define INLINE
+#endif /* _MSC_VER */
+#endif /* INLINE */
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif /* USE_TYPEDEF_DEFAULTS */
+
+/* Suppress unused parameter warning */
+#define UNUSED_PARAMETER(x) (void)(x)
+
+/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+#else /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */
+
+#include <sys/types.h>
+#include <strings.h>
+#include <stdlib.h>
+
+#ifdef stderr
+#undef stderr
+#define stderr stdout
+#endif
+
+typedef UINT8 uint8;
+typedef UINT16 uint16;
+typedef UINT32 uint32;
+typedef UINT64 uint64;
+typedef INT8 int8;
+typedef INT16 int16;
+typedef INT32 int32;
+typedef INT64 int64;
+
+typedef BOOLEAN bool;
+typedef unsigned char uchar;
+typedef UINTN uintptr;
+
+#define UNUSED_PARAMETER(x) (void)(x)
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+#define INLINE
+#define AUTO (-1) /* Auto = -1 */
+#define ON 1 /* ON = 1 */
+#define OFF 0
+
+#endif /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */
+
+/*
+ * Including the bcmdefs.h here, to make sure everyone including typedefs.h
+ * gets this automatically
+*/
+#include <bcmdefs.h>
+#endif /* _TYPEDEFS_H_ */
diff --git a/bcmdhd.101.10.361.x/include/usbrdl.h b/bcmdhd.101.10.361.x/include/usbrdl.h
new file mode 100755
index 0000000..be5bd69
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/usbrdl.h
@@ -0,0 +1,134 @@
+/*
+ * Broadcom USB remote download definitions
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: usbrdl.h 597933 2015-11-06 18:52:06Z $
+ */
+
+#ifndef _USB_RDL_H
+#define _USB_RDL_H
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
+#define DL_CHECK_CRC 1 /* currently unused */
+#define DL_GO 2 /* execute downloaded image */
+#define DL_START 3 /* initialize dl state */
+#define DL_REBOOT 4 /* reboot the device in 2 seconds */
+#define DL_GETVER 5 /* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset event
+ * to occur in 2 seconds. It is the responsibility
+ * of the downloaded code to clear this event
+ */
+#define DL_EXEC 7 /* jump to a supplied address */
+#define DL_RESETCFG 8 /* To support single enum on dongle
+ * - Not used by bootloader
+ */
+#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
+ * if resp unavailable
+ */
+#define DL_CHGSPD 0x0A
+
+#define DL_HWCMD_MASK 0xfc /* Mask for hardware read commands: */
+#define DL_RDHW 0x10 /* Read a hardware address (Ctl-in) */
+#define DL_RDHW32 0x10 /* Read a 32 bit word */
+#define DL_RDHW16 0x11 /* Read 16 bits */
+#define DL_RDHW8 0x12 /* Read an 8 bit byte */
+#define DL_WRHW 0x14 /* Write a hardware address (Ctl-out) */
+#define DL_WRHW_BLK 0x13 /* Block write to hardware access */
+
+#define DL_CMD_WRHW 2
+
+
+/* states */
+#define DL_WAITING 0 /* waiting to rx first pkt that includes the hdr info */
+#define DL_READY 1 /* hdr was good, waiting for more of the compressed image */
+#define DL_BAD_HDR 2 /* hdr was corrupted */
+#define DL_BAD_CRC 3 /* compressed image was corrupted */
+#define DL_RUNNABLE 4 /* download was successful, waiting for go cmd */
+#define DL_START_FAIL 5 /* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM value */
+#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START for rdl) */
+
+#define TIMEOUT 5000 /* Timeout for usb commands */
+
+struct bcm_device_id {
+ char *name;
+ uint32 vend;
+ uint32 prod;
+};
+
+typedef struct {
+ uint32 state;
+ uint32 bytes;
+} rdl_state_t;
+
+typedef struct {
+ uint32 chip; /* Chip id */
+ uint32 chiprev; /* Chip rev */
+ uint32 ramsize; /* Size of RAM */
+ uint32 remapbase; /* Current remap base address */
+ uint32 boardtype; /* Type of board */
+ uint32 boardrev; /* Board revision */
+} bootrom_id_t;
+
+/* struct for backplane & jtag accesses */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 addr; /* backplane address for write */
+ uint32 len; /* length of data: 1, 2, 4 bytes */
+ uint32 data; /* data to write */
+} hwacc_t;
+
+
+/* struct for querying nvram params from bootloader */
+#define QUERY_STRING_MAX 32
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ char var[QUERY_STRING_MAX]; /* param name */
+} nvparam_t;
+
+typedef void (*exec_fn_t)(void *sih);
+
+#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE)
+#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE)
+
+#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */
+#define USB_BULK_EP_TIMEOUT 500 /* Timeout used in USB bulk transactions. */
+
+#define RDL_CHUNK_MAX (64 * 1024) /* max size of each dl transfer */
+#define RDL_CHUNK 1500 /* size of each dl transfer */
+
+/* bootloader makes special use of trx header "offsets" array */
+#define TRX_OFFSETS_DLFWLEN_IDX 0 /* Size of the fw; used in uncompressed case */
+#define TRX_OFFSETS_JUMPTO_IDX 1 /* RAM address for jumpto after download */
+#define TRX_OFFSETS_NVM_LEN_IDX 2 /* Length of appended NVRAM data */
+#ifdef BCMTRXV2
+#define TRX_OFFSETS_DSG_LEN_IDX 3 /* Length of digital signature for the first image */
+#define TRX_OFFSETS_CFG_LEN_IDX 4 /* Length of config region, which is not digitally signed */
+#endif /* BCMTRXV2 */
+
+#define TRX_OFFSETS_DLBASE_IDX 0 /* RAM start address for download */
+
+#endif /* _USB_RDL_H */
diff --git a/bcmdhd.101.10.361.x/include/vlan.h b/bcmdhd.101.10.361.x/include/vlan.h
new file mode 100755
index 0000000..bbf5e50
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/vlan.h
@@ -0,0 +1,91 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#ifndef VLAN_VID_MASK
+#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */
+#endif
+
+#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */
+#define VLAN_PRI_SHIFT 13 /* user priority */
+
+#define VLAN_PRI_MASK 7 /* 3 bits of priority */
+
+#define VLAN_TPID_OFFSET 12 /* offset of tag protocol id field */
+#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */
+
+#define VLAN_TAG_LEN 4
+#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */
+
+#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */
+
+struct vlan_header {
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
+};
+
+BWL_PRE_PACKED_STRUCT struct ethervlan_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
+ uint16 ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+struct dot3_mac_llc_snapvlan_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */
+ uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */
+ uint16 length; /* frame length incl header */
+ uint8 dsap; /* always 0xAA */
+ uint8 ssap; /* always 0xAA */
+ uint8 ctl; /* always 0x03 */
+ uint8 oui[3]; /* RFC1042: 0x00 0x00 0x00
+ * Bridge-Tunnel: 0x00 0x00 0xF8
+ */
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
+ uint16 ether_type; /* ethertype */
+};
+
+#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+ struct ethervlan_header t; \
+ t = *(struct ethervlan_header *)(s); \
+ *(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif /* _vlan_h_ */
diff --git a/bcmdhd.101.10.361.x/include/wl_bam.h b/bcmdhd.101.10.361.x/include/wl_bam.h
new file mode 100755
index 0000000..2c7d59c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wl_bam.h
@@ -0,0 +1,74 @@
+/*
+ * Bad AP Manager for ADPS
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _WL_BAM_H_
+#define _WL_BAM_H_
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include <wl_cfgp2p.h>
+#include <dhd.h>
+
+#define WL_BAD_AP_MAX_ENTRY_NUM 20u
+
+typedef struct wl_bad_ap_mngr {
+ osl_t *osh;
+
+ uint32 num;
+ spinlock_t lock;
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ struct mutex fs_lock; /* lock for bad ap file list */
+#endif /* !DHD_ADPS_BAM_EXPORT */
+ struct list_head list;
+} wl_bad_ap_mngr_t;
+
+typedef struct wl_bad_ap_info {
+ struct ether_addr bssid;
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ struct tm tm;
+ uint32 status;
+ uint32 reason;
+ uint32 connect_count;
+#endif /* !DHD_ADPS_BAM_EXPORT */
+} wl_bad_ap_info_t;
+
+typedef struct wl_bad_ap_info_entry {
+ wl_bad_ap_info_t bad_ap;
+ struct list_head list;
+} wl_bad_ap_info_entry_t;
+
+void wl_bad_ap_mngr_init(struct bcm_cfg80211 *cfg);
+void wl_bad_ap_mngr_deinit(struct bcm_cfg80211 *cfg);
+
+int wl_bad_ap_mngr_add(wl_bad_ap_mngr_t *bad_ap_mngr, wl_bad_ap_info_t *bad_ap_info);
+wl_bad_ap_info_entry_t* wl_bad_ap_mngr_find(wl_bad_ap_mngr_t *bad_ap_mngr,
+ const struct ether_addr *bssid);
+
+bool wl_adps_bad_ap_check(struct bcm_cfg80211 *cfg, const struct ether_addr *bssid);
+int wl_adps_enabled(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+int wl_adps_set_suspend(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 suspend);
+
+s32 wl_adps_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* _WL_BAM_H_ */
diff --git a/bcmdhd.101.10.361.x/include/wl_bigdata.h b/bcmdhd.101.10.361.x/include/wl_bigdata.h
new file mode 100755
index 0000000..bdd4019
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wl_bigdata.h
@@ -0,0 +1,81 @@
+/*
+ * Bigdata logging and report. None EWP and Hang event.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef __WL_BIGDATA_H_
+#define __WL_BIGDATA_H_
+
+#include <802.11.h>
+#include <bcmevent.h>
+#include <bcmwifi_channels.h>
+
+#define MAX_STA_INFO_AP_CNT 20
+
+#define DOT11_11B_MAX_RATE 11
+#define DOT11_2GHZ_MAX_CH_NUM 14
+#define DOT11_HT_MCS_RATE_MASK 0xFF
+
+enum {
+ BIGDATA_DOT11_11B_MODE = 0,
+ BIGDATA_DOT11_11G_MODE = 1,
+ BIGDATA_DOT11_11N_MODE = 2,
+ BIGDATA_DOT11_11A_MODE = 3,
+ BIGDATA_DOT11_11AC_MODE = 4,
+ BIGDATA_DOT11_11AX_MODE = 5,
+ BIGDATA_DOT11_MODE_MAX
+};
+
+typedef struct wl_ap_sta_data
+{
+ struct ether_addr mac;
+ uint32 mode_80211;
+ uint32 nss;
+ chanspec_t chanspec;
+ int16 rssi;
+ uint32 rate;
+ uint8 channel;
+ uint32 mimo;
+ uint32 disconnected;
+ uint32 is_empty;
+ uint32 reason_code;
+} wl_ap_sta_data_t;
+
+typedef struct ap_sta_wq_data
+{
+ wl_event_msg_t e;
+ void *dhdp;
+ void *bcm_cfg;
+ void *ndev;
+} ap_sta_wq_data_t;
+
+typedef struct wl_ap_sta_info
+{
+ wl_ap_sta_data_t *ap_sta_data;
+ uint32 sta_list_cnt;
+ struct mutex wq_data_sync;
+} wl_ap_sta_info_t;
+
+int wl_attach_ap_stainfo(void *bcm_cfg);
+int wl_detach_ap_stainfo(void *bcm_cfg);
+int wl_ap_stainfo_init(void *bcm_cfg);
+void wl_gather_ap_stadata(void *handle, void *event_info, u8 event);
+int wl_get_ap_stadata(void *bcm_cfg, struct ether_addr *sta_mac, void **data);
+#endif /* __WL_BIGDATA_H_ */
diff --git a/bcmdhd.101.10.361.x/include/wldev_common.h b/bcmdhd.101.10.361.x/include/wldev_common.h
new file mode 100755
index 0000000..529704e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wldev_common.h
@@ -0,0 +1,135 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ * netdev_ops->ndo_do_ioctl in new kernels)
+ * @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+s32 wldev_ioctl_get(
+ struct net_device *dev, u32 cmd, void *arg, u32 len);
+
+s32 wldev_ioctl_set(
+ struct net_device *dev, u32 cmd, const void *arg, u32 len);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ const void *param, u32 paramlen, void *buf, u32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ * indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, const s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, const s8 *iovar_name, const void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern int dhd_net_set_fw_path(struct net_device *dev, char *fw);
+extern int dhd_net_bus_suspend(struct net_device *dev);
+extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage);
+extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on,
+ unsigned long delay_msec);
+extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+ wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
+
+#ifdef OEM_ANDROID
+extern bool dhd_force_country_change(struct net_device *dev);
+#endif
+
+extern void dhd_bus_band_set(struct net_device *dev, uint band);
+extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,
+ int revinfo);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
+extern int net_os_set_max_dtim_enable(struct net_device *dev, int val);
+#ifdef DISABLE_DTIM_IN_SUSPEND
+extern int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val);
+#endif /* DISABLE_DTIM_IN_SUSPEND */
+
+#if defined(OEM_ANDROID)
+extern int wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
+ int max, int *bytes_left);
+#endif /* defined(OEM_ANDROID) */
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+int wldev_get_mode(struct net_device *dev, uint8 *pband, uint8 caplen);
+int wldev_get_datarate(struct net_device *dev, int *datarate);
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/bcmdhd.101.10.361.x/include/wlfc_proto.h b/bcmdhd.101.10.361.x/include/wlfc_proto.h
new file mode 100755
index 0000000..d8d1009
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wlfc_proto.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+
+/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */
+
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+ /* Use TLV to convey WLFC information.
+ ---------------------------------------------------------------------------
+ | Type | Len | value | Description
+ ---------------------------------------------------------------------------
+ | 1 | 1 | (handle) | MAC OPEN
+ ---------------------------------------------------------------------------
+ | 2 | 1 | (handle) | MAC CLOSE
+ ---------------------------------------------------------------------------
+ | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+ ---------------------------------------------------------------------------
+ | 4 | 4+ | see pkttag comments | TXSTATUS
+ | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled
+ ---------------------------------------------------------------------------
+ | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware]
+ ---------------------------------------------------------------------------
+ | 6 | 8 | (handle, ifid, MAC) | MAC ADD
+ ---------------------------------------------------------------------------
+ | 7 | 8 | (handle, ifid, MAC) | MAC DEL
+ ---------------------------------------------------------------------------
+ | 8 | 1 | (rssi) | RSSI - RSSI value for the packet.
+ ---------------------------------------------------------------------------
+ | 9 | 1 | (interface ID) | Interface OPEN
+ ---------------------------------------------------------------------------
+ | 10 | 1 | (interface ID) | Interface CLOSE
+ ---------------------------------------------------------------------------
+ | 11 | 8 | fifo credit returns map | FIFO credits back to the host
+ | | | |
+ | | | | --------------------------------------
+ | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+ | | | | --------------------------------------
+ | | | |
+ ---------------------------------------------------------------------------
+ | 12 | 2 | MAC handle, | Host provides a bitmap of pending
+ | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn.
+ | | | | [host->firmware]
+ ---------------------------------------------------------------------------
+ | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific
+ | | | | MAC destination.
+ ---------------------------------------------------------------------------
+ | 15 | 12 | (pkttag, timestamps) | Send TX timestamp at reception from host
+ ---------------------------------------------------------------------------
+ | 16 | 12 | (pkttag, timestamps) | Send WLAN RX timestamp along with RX frame
+ ---------------------------------------------------------------------------
+ | 255 | N/A | N/A | FILLER - This is a special type
+ | | | | that has no length or value.
+ | | | | Typically used for padding.
+ ---------------------------------------------------------------------------
+ */
+
+typedef enum {
+ WLFC_CTL_TYPE_MAC_OPEN = 1,
+ WLFC_CTL_TYPE_MAC_CLOSE = 2,
+ WLFC_CTL_TYPE_MAC_REQUEST_CREDIT = 3,
+ WLFC_CTL_TYPE_TXSTATUS = 4,
+ WLFC_CTL_TYPE_PKTTAG = 5, /** host<->dongle */
+
+ WLFC_CTL_TYPE_MACDESC_ADD = 6,
+ WLFC_CTL_TYPE_MACDESC_DEL = 7,
+ WLFC_CTL_TYPE_RSSI = 8,
+
+ WLFC_CTL_TYPE_INTERFACE_OPEN = 9,
+ WLFC_CTL_TYPE_INTERFACE_CLOSE = 10,
+
+ WLFC_CTL_TYPE_FIFO_CREDITBACK = 11,
+
+ WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP = 12, /** host->dongle */
+ WLFC_CTL_TYPE_MAC_REQUEST_PACKET = 13,
+ WLFC_CTL_TYPE_HOST_REORDER_RXPKTS = 14,
+
+ WLFC_CTL_TYPE_TX_ENTRY_STAMP = 15,
+ WLFC_CTL_TYPE_RX_STAMP = 16,
+
+ WLFC_CTL_TYPE_UPD_FLR_FETCH = 17, /* PCIE_FLOWCTL: Update Flowring Fetch */
+
+ WLFC_CTL_TYPE_TRANS_ID = 18,
+ WLFC_CTL_TYPE_COMP_TXSTATUS = 19,
+
+ WLFC_CTL_TYPE_TID_OPEN = 20, /* open flowring/s with tid */
+ WLFC_CTL_TYPE_TID_CLOSE = 21, /* close flowring/s with tid */
+ WLFC_CTL_TYPE_UPD_FLR_WEIGHT = 22, /* WLATF_DONGLE */
+ WLFC_CTL_TYPE_ENAB_FFSCH = 23, /* WLATF_DONGLE */
+
+ WLFC_CTL_TYPE_UPDATE_FLAGS = 24, /* clear the flags set in flowring */
+ WLFC_CTL_TYPE_CLEAR_SUPPR = 25, /* free the supression info in the flowring */
+
+ WLFC_CTL_TYPE_FLOWID_OPEN = 26, /* open flowring with flowid */
+ WLFC_CTL_TYPE_FLOWID_CLOSE = 27, /* close flowring with flowid */
+
+ WLFC_CTL_TYPE_PENDING_TX_PKTS = 28, /* Get the outstandinding packets in host
+ * flowring for the given interface.
+ */
+ WLFC_CTL_TYPE_UPD_SCB_RATESEL_CHANGE = 29, /* Upd flow's max rate dynamically */
+ WLFC_CTL_TYPE_AMSDU_STATE = 30, /* Upd flow's AMSDU state(Enabled/Disabled) */
+ WLFC_CTL_TYPE_APP_STATE = 31, /* Upd flow's APP state, enable/disable APP */
+ WLFC_CTL_TYPE_HP2P_EXT_TXSTATUS = 32, /* Hp2p extended tx status */
+ WLFC_CTL_TYPE_HP2P_ACTIVE_STATE = 33, /* Get status of HP2P ring active or not */
+ WLFC_CTL_TYPE_HP2P_QUERY_LIFETIME = 34, /* Query lifetime for last unacked */
+ WLFC_CTL_TYPE_FILLER = 255
+} wlfc_ctl_type_t;
+
+#define WLFC_CTL_VALUE_LEN_FLOWID 2u /* flowid legth in TLV */
+
+#define WLFC_CTL_VALUE_LEN_MACDESC 8u /** handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC 1u /** MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI 1u
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE 1u
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2u
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS 4u
+#define WLFC_CTL_VALUE_LEN_PKTTAG 4u
+#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12u /** 4-byte rate info + 2 TSF */
+
+#define WLFC_CTL_VALUE_LEN_SEQ 2u
+#define WLFC_CTL_VALUE_LEN_TID 3u /* interface index, TID */
+
+#define WLFC_CTL_EXT_TXSTATUS_PAYLOAD_LEN 8u /* Payload legnth of extention tx status */
+
+/* Reset the flags set for the corresponding flowring of the SCB which is de-inited */
+/* FLOW_RING_FLAG_LAST_TIM | FLOW_RING_FLAG_INFORM_PKTPEND | FLOW_RING_FLAG_PKT_REQ */
+#define WLFC_RESET_ALL_FLAGS 0
+#define WLFC_CTL_VALUE_LEN_FLAGS 7 /** flags, MAC */
+
+/* free the data stored to be used for suppressed packets in future */
+#define WLFC_CTL_VALUE_LEN_SUPR 8 /** ifindex, tid, MAC */
+
+#define WLFC_CTL_VALUE_LEN_SCB_RATESEL_CHANGE 7 /* ifindex, MAC */
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
+
+/*
+ WLFC packet identifier: b[31:0] (WLFC_CTL_TYPE_PKTTAG)
+
+ Generation : b[31] => generation number for this packet [host->fw]
+ OR, current generation number [fw->host]
+ Flags : b[30:27] => command, status flags
+ FIFO-AC : b[26:24] => AC-FIFO id
+
+ h-slot : b[23:8] => hanger-slot
+ freerun : b[7:0] => A free running counter?
+
+ As far as the firmware is concerned, host generated b[23:0] should be just
+ reflected back on txstatus.
+*/
+
+#ifndef WLFC_PKTFLAG_COMPAT
+#define WLFC_PKTFLAG_PKTFROMHOST 0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
+#define WLFC_PKTFLAG_PKT_SENDTOHOST 0x04
+#define WLFC_PKTFLAG_PKT_FLUSHED 0x08
+#else
+#define WLFC_PKTFLAG_PKTFROMHOST_MASK 0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED_MASK 0x02
+#define WLFC_PKTFLAG_PKT_SENDTOHOST_MASK 0x04
+#define WLFC_PKTFLAG_PKT_FLUSHED_MASK 0x08
+#endif /* WLFC_PKTFLAG_COMPAT */
+
+#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */
+#define WL_TXSTATUS_STATUS_SHIFT 24
+
+#define WL_TXSTATUS_SET_STATUS(x, status) ((x) = \
+ ((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \
+ (((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT))
+#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
+ WL_TXSTATUS_STATUS_MASK)
+
+/**
+ * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the
+ * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The
+ * firmware accepts a packet when the generation matches; on reset (startup) both "current" and
+ * "expected" are set to 0.
+ */
+#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */
+#define WL_TXSTATUS_GENERATION_SHIFT 31
+
+#define WL_TXSTATUS_SET_GENERATION(x, gen) ((x) = \
+ ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+ (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WL_TXSTATUS_GET_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+ WL_TXSTATUS_GENERATION_MASK)
+
+#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT 27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+ WL_TXSTATUS_FLAGS_MASK)
+#define WL_TXSTATUS_CLEAR_FLAGS(x, flags) ((x) = \
+ ((x) & ~(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT)))
+
+#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT 24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \
+ ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK)
+
+#define WL_TXSTATUS_HSLOT_MASK 0xffff /* allow 16 bits */
+#define WL_TXSTATUS_HSLOT_SHIFT 8
+
+#define WL_TXSTATUS_SET_HSLOT(x, hslot) ((x) = \
+ ((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \
+ (((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT))
+#define WL_TXSTATUS_GET_HSLOT(x) (((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \
+ WL_TXSTATUS_HSLOT_MASK)
+
+#define WL_TXSTATUS_FREERUNCTR_MASK 0xff /* allow 8 bits */
+
+#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \
+ ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
+#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+
+/* packet prio phase bit updated */
+#define WL_SEQ_PKTPRIO_PHASE_MASK 0x1
+#define WL_SEQ_PKTPRIO_PHASE_SHIFT 15
+#define WL_SEQ_SET_PKTPRIO_PHASE(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_PKTPRIO_PHASE_MASK << WL_SEQ_PKTPRIO_PHASE_SHIFT)) | \
+ (((val) & WL_SEQ_PKTPRIO_PHASE_MASK) << WL_SEQ_PKTPRIO_PHASE_SHIFT))
+#define WL_SEQ_PKTPRIO_PHASE(x) (((x) >> WL_SEQ_PKTPRIO_PHASE_SHIFT) & \
+ WL_SEQ_PKTPRIO_PHASE_MASK)
+
+/* AMSDU part of d11 seq number */
+#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_AMSDU_SHIFT 14
+#define WL_SEQ_SET_AMSDU(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \
+ (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) /**< sets a single AMSDU bit */
+/** returns TRUE if ring item is AMSDU (seq = d11 seq nr) */
+#define WL_SEQ_IS_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \
+ WL_SEQ_AMSDU_MASK)
+
+/* indicates last_suppr_seq is valid */
+#define WL_SEQ_VALIDSUPPR_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_VALIDSUPPR_SHIFT 12
+#define WL_SEQ_SET_VALIDSUPPR(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_VALIDSUPPR_MASK << WL_SEQ_VALIDSUPPR_SHIFT)) | \
+ (((val) & WL_SEQ_VALIDSUPPR_MASK) << WL_SEQ_VALIDSUPPR_SHIFT))
+#define WL_SEQ_GET_VALIDSUPPR(x) (((x) >> WL_SEQ_VALIDSUPPR_SHIFT) & \
+ WL_SEQ_VALIDSUPPR_MASK)
+
+#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_FROMFW_SHIFT 13
+#define WL_SEQ_SET_FROMFW(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \
+ (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT))
+/** Set when firmware assigns D11 sequence number to packet */
+#define SET_WL_HAS_ASSIGNED_SEQ(x) WL_SEQ_SET_FROMFW((x), 1)
+
+/** returns TRUE if packet has been assigned a d11 seq number by the WL firmware layer */
+#define GET_WL_HAS_ASSIGNED_SEQ(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & WL_SEQ_FROMFW_MASK)
+
+#ifdef WLFC_PKTFLAG_COMPAT
+/* Helper macros for WLFC pktflags */
+#define WLFC_PKTFLAG_PKTFROMHOST(p) \
+ (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKTFROMHOST_MASK)
+#define WLFC_PKTFLAG_PKT_REQUESTED(p) \
+ (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKT_REQUESTED_MASK)
+#define WLFC_PKTFLAG_PKT_SENDTOHOST(p) \
+ (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKT_SENDTOHOST_MASK)
+#define WLFC_PKTFLAG_PKT_FLUSHED(p) \
+ (WL_TXSTATUS_GET_FLAGS(WLPKTTAG(p)->wl_hdr_information) & WLFC_PKTFLAG_PKT_FLUSHED_MASK)
+#endif /* WLFC_PKTFLAG_COMPAT */
+
+/**
+ * Proptxstatus related.
+ *
+ * When a packet is suppressed by WL or the D11 core, the packet has to be retried. Assigning
+ * a new d11 sequence number for the packet when retrying would cause the peer to be unable to
+ * reorder the packets within an AMPDU. So, suppressed packet from bus layer (DHD for SDIO and
+ * pciedev for PCIE) is re-using d11 seq number, so FW should not assign a new one.
+ */
+#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_FROMDRV_SHIFT 12
+
+/**
+ * Proptxstatus, host or fw PCIe layer requests WL layer to reuse d11 seq no. Bit is reset by WL
+ * subsystem when it reuses the seq number.
+ */
+#define WL_SEQ_SET_REUSE(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \
+ (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT))
+#define SET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 1)
+#define RESET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 0)
+
+/** Proptxstatus, related to reuse of d11 seq numbers when retransmitting */
+#define IS_WL_TO_REUSE_SEQ(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \
+ WL_SEQ_FROMDRV_MASK)
+
+#define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */
+#define WL_SEQ_NUM_SHIFT 0
+/** Proptxstatus, sets d11seq no in pkt tag, related to reuse of d11seq no when retransmitting */
+#define WL_SEQ_SET_NUM(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \
+ (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT))
+/** Proptxstatus, gets d11seq no from pkt tag, related to reuse of d11seq no when retransmitting */
+#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \
+ WL_SEQ_NUM_MASK)
+
+#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \
+ (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \
+ (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT))
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE 32
+#define WLFC_MAX_IFNUM 16
+#define WLFC_MAC_DESC_ID_INVALID 0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \
+ (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
+ ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_MAX_PENDING_DATALEN 120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD 0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1
+/* WL firmware suppressed a packet because MAC is
+ already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3
+/* Firmware tossed after retries */
+#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4
+/* Firmware wrongly reported suppressed previously,now fixing to acked */
+#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5
+/* Firmware send this packet expired, lifetime expiration */
+#define WLFC_CTL_PKTFLAG_EXPIRED 6
+/* Firmware drop this packet for any other reason */
+#define WLFC_CTL_PKTFLAG_DROPPED 7
+/* Firmware free this packet */
+#define WLFC_CTL_PKTFLAG_MKTFREE 8
+/* Firmware dropped the frame after suppress retries reached max */
+#define WLFC_CTL_PKTFLAG_MAX_SUP_RETR 9
+
+/* Firmware forced packet lifetime expiry */
+#define WLFC_CTL_PKTFLAG_FORCED_EXPIRED 10
+
+#define WLFC_CTL_PKTFLAG_MASK (0x0f) /* For 4-bit mask with one extra bit */
+
+#if defined(PROP_TXSTATUS_DEBUG) && !defined(BINCMP)
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+ {printf("WLFC :%d:caller:%p\n", \
+ __LINE__, CALL_SITE);}} while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %d, %s\n", __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_WHEREIS(s)
+#endif /* PROP_TXSTATUS_DEBUG && !BINCMP */
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS 256
+#define WLHOST_REORDERDATA_LEN 10
+#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET 0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET 4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET 6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8
+
+#define WLHOST_REORDERDATA_DEL_FLOW 0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL 0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID 0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08
+#define WLHOST_REORDERDATA_NEW_HOLE 0x10
+
+/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */
+#define WLFC_CTL_TRANS_ID_LEN 6
+#define WLFC_TYPE_TRANS_ID_LEN 6
+
+#define WLFC_MODE_HANGER 1 /* use hanger */
+#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */
+#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
+
+#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */
+#define WLFC_SET_AFQ(x, val) ((x) = \
+ ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \
+ (((val) & 1) << WLFC_MODE_AFQ_SHIFT))
+/** returns TRUE if firmware supports 'at firmware queue' feature */
+#define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1)
+
+#define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */
+#define WLFC_SET_REUSESEQ(x, val) ((x) = \
+ ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
+ (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
+
+/** returns TRUE if 'd11 sequence reuse' has been agreed upon between host and dongle */
+#if defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK)
+/* GET_REUSESEQ is always TRUE in pciedev */
+#define WLFC_GET_REUSESEQ(x) (TRUE)
+#else
+#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
+#endif /* defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK) */
+
+#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */
+#define WLFC_SET_REORDERSUPP(x, val) ((x) = \
+ ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \
+ (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT))
+/** returns TRUE if 'reorder suppress' has been agreed upon between host and dongle */
+#define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1)
+
+#define FLOW_RING_CREATE 1u
+#define FLOW_RING_DELETE 2u
+#define FLOW_RING_FLUSH 3u
+#define FLOW_RING_OPEN 4u
+#define FLOW_RING_CLOSED 5u
+#define FLOW_RING_FLUSHED 6u
+#define FLOW_RING_TIM_SET 7u
+#define FLOW_RING_TIM_RESET 8u
+#define FLOW_RING_FLUSH_TXFIFO 9u
+#define FLOW_RING_GET_PKT_MAX 10u
+#define FLOW_RING_RESET_WEIGHT 11u
+#define FLOW_RING_UPD_PRIOMAP 12u
+#define FLOW_RING_HP2P_CREATE 13u
+#define FLOW_RING_HP2P_DELETE 14u
+#define FLOW_RING_GET_BUFFERED_TIME 15u
+#define FLOW_RING_HP2P_TXQ_STRT 16u
+#define FLOW_RING_HP2P_TXQ_STOP 17u
+
+/* bit 7, indicating if is TID(1) or AC(0) mapped info in tid field) */
+#define PCIEDEV_IS_AC_TID_MAP_MASK 0x80
+
+#define WLFC_PCIEDEV_AC_PRIO_MAP 0
+#define WLFC_PCIEDEV_TID_PRIO_MAP 1
+#define WLFC_PCIEDEV_LLR_PRIO_MAP 2
+
+void wlc_wlfc_set_pkttime(void* pkt, uint16 time);
+
+/* reason for disabling APP, when none are set, APP will be enabled */
+typedef enum {
+ APP_STS_FLOWRING_NO_APP = 0u, /* Reason code used by pciedev */
+ APP_STS_FLOWRING_CLOSED = 1u, /* Disable APP as flowring is closed */
+ APP_STS_CRYPTO_UNSUPPORTED = 2u, /* Secuirity type doesn't support APP */
+ APP_STS_80211_FRAGMENTATION = 3u, /* 802.11 fragmentation enabled */
+ APP_STS_MAX = 4u /* MAX */
+} app_disable_reason_s;
+
+/* shared structure between wlc and pciedev layer to set/reset a reason code */
+typedef struct app_upd_sts {
+ bool set; /* if set, app is disabled for reason rsn */
+ bool sta; /* set if scb/flowring belong to sta */
+ app_disable_reason_s rsn; /* APP disable reason codes. */
+} app_upd_sts_t;
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/bcmdhd.101.10.361.x/include/wlioctl.h b/bcmdhd.101.10.361.x/include/wlioctl.h
new file mode 100755
index 0000000..97f0148
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wlioctl.h
@@ -0,0 +1,25850 @@
+/*
+ * Custom OID/ioctl definitions for
+ *
+ *
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wlioctl_h_
+#define _wlioctl_h_
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <bcmip.h>
+#include <bcmeth.h>
+#include <bcmip.h>
+#include <bcmipv6.h>
+#include <bcmevent.h>
+#include <802.11.h>
+#include <802.11s.h>
+#include <802.1d.h>
+#include <bcmwifi_channels.h>
+#ifdef WL11AX
+#include <802.11ax.h>
+#endif /* WL11AX */
+#include <bcmwifi_rates.h>
+#include <wlioctl_defs.h>
+#include <bcmipv6.h>
+
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+#define SSSR_NEW_API
+
+/* Include bcmerror.h for error codes or aliases */
+#ifdef BCMUTILS_ERR_CODES
+#include <bcmerror.h>
+#endif /* BCMUTILS_ERR_CODES */
+#include <bcmtlv.h>
+
+/* NOTE re: Module specific error codes.
+ *
+ * BCME_.. error codes are extended by various features - e.g. FTM, NAN, SAE etc.
+ * The current process is to allocate a range of 1024 negative 32 bit integers to
+ * each module that extends the error codes to indicate a module specific status.
+ *
+ * The next range to use is below. If that range is used for a new feature, please
+ * update the range to be used by the next feature.
+ *
+ * The error codes -4096 ... -5119 are reserved for firmware signing.
+ *
+ * Next available (inclusive) range: [-8*1024 + 1, -7*1024]
+ *
+ * End Note
+ */
+
+/* 11ax trigger frame format - versioning info */
+#define TRIG_FRAME_FORMAT_11AX_DRAFT_1P1 0
+
+typedef struct {
+ uint32 num;
+ chanspec_t list[1];
+} chanspec_list_t;
+
+#define RSN_KCK_LENGTH 16
+#define RSN_KEK_LENGTH 16
+#define TPK_FTM_LEN 16
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ 16
+#endif
+
+#define WL_ASSOC_START_EVT_DATA_VERSION 1
+
+typedef struct assoc_event_data {
+ uint32 version;
+ uint32 flags;
+ chanspec_t join_chspec;
+} assoc_event_data_t;
+
+/**Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+ cdc_ioctl_t msg;
+ uint32 data_len;
+ char intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+
+#define BCM_IOV_XTLV_VERSION 0
+
+#define MAX_NUM_D11CORES 2
+
+/**DFS Forced param */
+typedef struct wl_dfs_forced_params {
+ chanspec_t chspec;
+ uint16 version;
+ chanspec_list_t chspec_list;
+} wl_dfs_forced_t;
+
+#define DFS_PREFCHANLIST_VER 0x01
+#define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list)
+/* size of dfs forced param size given n channels are in the list */
+#define WL_DFS_FORCED_PARAMS_SIZE(n) \
+ (sizeof(wl_dfs_forced_t) + (((n) < 1) ? (0) : (((n) - 1)* sizeof(chanspec_t))))
+#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \
+ (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list))
+#define WL_DFS_FORCED_PARAMS_MAX_SIZE \
+ WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t))
+
+/**association decision information */
+typedef struct {
+ uint8 assoc_approved; /**< (re)association approved */
+ uint8 pad;
+ uint16 reject_reason; /**< reason code for rejecting association */
+ struct ether_addr da;
+ uint8 pad1[6];
+#if defined(NDIS) && (NDISVER >= 0x0620)
+ LARGE_INTEGER sys_time; /**< current system time */
+#else
+ int64 sys_time; /**< current system time */
+#endif
+} assoc_decision_t;
+
+#define DFS_SCAN_S_IDLE -1
+#define DFS_SCAN_S_RADAR_FREE 0
+#define DFS_SCAN_S_RADAR_FOUND 1
+#define DFS_SCAN_S_INPROGESS 2
+#define DFS_SCAN_S_SCAN_ABORTED 3
+#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4
+#define DFS_SCAN_S_MAX 5
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+ struct ether_addr da;
+ uint16 len;
+ uint32 packetId;
+ uint8 data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+ uint8 ssid_len; /**< the length of SSID */
+ uint8 ssid[32]; /**< SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+ uint32 channel;
+ int32 dwell_time;
+ struct ether_addr BSSID;
+ uint8 PAD[2];
+ wl_action_frame_t action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL 0
+#define MFP_TEST_FLAG_ANY_KEY 1
+typedef struct wl_sa_query {
+ uint32 flag;
+ uint8 action;
+ uint8 PAD;
+ uint16 id;
+ struct ether_addr da;
+ uint16 PAD;
+} wl_sa_query_t;
+
+/* EXT_STA */
+/**association information */
+typedef struct {
+ uint32 assoc_req; /**< offset to association request frame */
+ uint32 assoc_req_len; /**< association request frame length */
+ uint32 assoc_rsp; /**< offset to association response frame */
+ uint32 assoc_rsp_len; /**< association response frame length */
+ uint32 bcn; /**< offset to AP beacon */
+ uint32 bcn_len; /**< AP beacon length */
+ uint32 wsec; /**< ucast security algo */
+ uint32 wpaie; /**< offset to WPA ie */
+ uint8 auth_alg; /**< 802.11 authentication mode */
+ uint8 WPA_auth; /**< WPA: authenticated key management */
+ uint8 ewc_cap; /**< EWC (MIMO) capable */
+ uint8 ofdm; /**< OFDM */
+} assoc_info_t;
+/* defined(EXT_STA) */
+
+/* Flags for OBSS IOVAR Parameters */
+#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD (0x02)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04)
+#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD (0x08)
+#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD (0x10)
+#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD (0x20)
+#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD (0x40)
+
+/* OBSS IOVAR Version information */
+#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 obss_bwsw_activity_cfm_count_cfg; /**< configurable count in
+ * seconds before we confirm that OBSS is present and
+ * dynamically activate dynamic bwswitch.
+ */
+ uint8 obss_bwsw_no_activity_cfm_count_cfg; /**< configurable count in
+ * seconds before we confirm that OBSS is GONE and
+ * dynamically start pseudo upgrade. If in pseudo sense time, we
+ * will see OBSS, [means that, we false detected that OBSS-is-gone
+ * in watchdog] this count will be incremented in steps of
+ * obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS
+ * detection again. Note that, at present, max 30seconds is
+ * allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT]
+ */
+ uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above
+ */
+ uint16 obss_bwsw_pseudo_sense_count_cfg; /**< number of msecs/cnt to be in
+ * pseudo state. This is used to sense/measure the stats from lq.
+ */
+ uint8 obss_bwsw_rx_crs_threshold_cfg; /**< RX CRS default threshold */
+ uint8 obss_bwsw_dur_thres; /**< OBSS dyn bwsw trigger/RX CRS Sec */
+ uint8 obss_bwsw_txop_threshold_cfg; /**< TXOP default threshold */
+} BWL_POST_PACKED_STRUCT wlc_obss_dynbwsw_config_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 version; /**< version field */
+ uint32 config_mask;
+ uint32 reset_mask;
+ wlc_obss_dynbwsw_config_t config_params;
+} BWL_POST_PACKED_STRUCT obss_config_params_t;
+#include <packed_section_end.h>
+
+/**bsscfg type */
+typedef enum bsscfg_type {
+ BSSCFG_TYPE_GENERIC = 0, /**< Generic AP/STA/IBSS BSS */
+ BSSCFG_TYPE_P2P = 1, /**< P2P BSS */
+ /* index 2 earlier used for BTAMP */
+ BSSCFG_TYPE_PSTA = 3,
+ BSSCFG_TYPE_TDLS = 4,
+ BSSCFG_TYPE_SLOTTED_BSS = 5,
+ BSSCFG_TYPE_PROXD = 6,
+ BSSCFG_TYPE_NAN = 7,
+ BSSCFG_TYPE_MESH = 8,
+ BSSCFG_TYPE_AIBSS = 9
+} bsscfg_type_t;
+
+/* bsscfg subtype */
+typedef enum bsscfg_subtype {
+ BSSCFG_SUBTYPE_NONE = 0,
+ BSSCFG_GENERIC_STA = 1, /* GENERIC */
+ BSSCFG_GENERIC_AP = 2,
+ BSSCFG_GENERIC_IBSS = 6,
+ BSSCFG_P2P_GC = 3, /* P2P */
+ BSSCFG_P2P_GO = 4,
+ BSSCFG_P2P_DISC = 5,
+ /* Index 7 & 8 earlier used for BTAMP */
+ BSSCFG_SUBTYPE_AWDL = 9, /* SLOTTED_BSS_TYPE */
+ BSSCFG_SUBTYPE_NAN_MGMT = 10,
+ BSSCFG_SUBTYPE_NAN_DATA = 11,
+ BSSCFG_SUBTYPE_NAN_MGMT_DATA = 12
+} bsscfg_subtype_t;
+
+typedef struct wlc_bsscfg_info {
+ uint32 type;
+ uint32 subtype;
+} wlc_bsscfg_info_t;
+
+/* ULP SHM Offsets info */
+typedef struct ulp_shm_info {
+ uint32 m_ulp_ctrl_sdio;
+ uint32 m_ulp_wakeevt_ind;
+ uint32 m_ulp_wakeind;
+} ulp_shm_info_t;
+
+/* Note: Due to unpredictable size, bool type should not be used in any ioctl argument structure
+ * Cf PR53622
+ */
+
+#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */
+
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ uint8 bcnflags; /* additional flags w.r.t. beacon */
+ struct {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint8 freespace1; /* make implicit padding explicit */
+ uint8 load; /**< BSS Load from QBSS load IE if available */
+ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint8 padding1[3]; /**< explicit struct alignment padding */
+ uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint8 flags; /**< flags */
+ uint8 vht_cap; /**< BSS is vht capable */
+ uint8 reserved[2]; /**< Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint16 freespace2; /* making implicit padding explicit */
+ uint32 ie_length; /**< byte length of Information Elements */
+ int16 SNR; /**< average SNR of during frame reception */
+ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
+ uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
+ uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
+} wl_bss_info_v109_t;
+
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_v109_1 {
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ uint8 bcnflags; /* additional flags w.r.t. beacon */
+ struct {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint8 he_cap; /**< BSS is he capable */
+ uint8 load; /**< BSS Load from QBSS load IE if available */
+ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint8 padding1[3]; /**< explicit struct alignment padding */
+ uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint8 flags; /**< flags */
+ uint8 vht_cap; /**< BSS is vht capable */
+ uint8 reserved[2]; /**< Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint16 freespace2; /* making implicit padding explicit */
+ uint32 ie_length; /**< byte length of Information Elements */
+ int16 SNR; /**< average SNR of during frame reception */
+ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
+ uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
+ uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
+ uint32 he_mcsmap; /**< STA's Associated hemcsmap */
+ uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+} wl_bss_info_v109_1_t;
+
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_v109_2 {
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ uint8 bcnflags; /* additional flags w.r.t. beacon */
+ struct {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint8 he_cap; /**< BSS is he capable */
+ uint8 load; /**< BSS Load from QBSS load IE if available */
+ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint8 RSVD1[3];
+ uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint8 flags; /**< flags */
+ uint8 vht_cap; /**< BSS is vht capable */
+ uint8 RSVD2[2];
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint16 freespace2; /* making implicit padding explicit */
+ uint32 ie_length; /**< byte length of Information Elements */
+ int16 SNR; /**< average SNR of during frame reception */
+ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
+ uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
+ uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
+ uint32 he_mcsmap; /**< STA's Associated hemcsmap */
+ uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */
+} wl_bss_info_v109_2_t;
+
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_v112 {
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ uint8 bcnflags; /* additional flags w.r.t. beacon */
+ struct {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint8 he_cap; /**< BSS is he capable */
+ uint8 load; /**< BSS Load from QBSS load IE if available */
+ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint8 RSVD1[3];
+ uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint8 flags; /**< flags */
+ uint8 vht_cap; /**< BSS is vht capable */
+ uint8 RSVD2[2];
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint16 freespace2; /* making implicit padding explicit */
+ uint32 ie_length; /**< byte length of Information Elements */
+ int16 SNR; /**< average SNR of during frame reception */
+ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
+ uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
+ uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
+ uint32 he_mcsmap; /**< STA's Associated hemcsmap */
+ uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+ uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */
+ uint8 eht_cap; /* BSS is EHT capable */
+ uint8 RSVD3[3];
+ /* by the spec. it is maximum 16 streams hence all mcs code for all nss may not fit
+ * in a 32 bit mcs nss map but since this field only reflects the common mcs nss map
+ * between that of the peer and our device so it's probably ok to make it 32 bit and
+ * allow only a limited number of nss e.g. upto 8 of them in the map given the fact
+ * that our device probably won't exceed 4 streams anyway...
+ */
+ uint32 eht_mcsmap; /* STA's associated EHT mcs code map */
+ /* FIXME: change the following mcs code map to uint32 if all mcs+nss can fit in */
+ uint8 eht_rxmcsmap[6]; /* EHT rx mcs code map */
+ uint8 eht_txmcsmap[6]; /* EHT tx mcs code map */
+} wl_bss_info_v112_t;
+
+#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_bss_info_v109_t wl_bss_info_t;
+#endif
+
+#define WL_GSCAN_FULL_RESULT_VERSION 2 /* current version of wl_gscan_result_t struct */
+
+typedef struct wl_gscan_bss_info_v2 {
+ uint32 timestamp[2];
+ wl_bss_info_v109_t info;
+ /* Do not add any more members below, fixed */
+ /* and variable length Information Elements to follow */
+} wl_gscan_bss_info_v2_t;
+
+typedef struct wl_gscan_bss_info_v3 {
+ uint32 timestamp[2];
+ uint8 info[]; /* var length wl_bss_info_X structures */
+ /* Do not add any more members below, fixed */
+ /* and variable length Information Elements to follow */
+} wl_gscan_bss_info_v3_t;
+
+#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_gscan_bss_info_v2_t wl_gscan_bss_info_t;
+#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t))
+#endif
+
+typedef struct wl_bsscfg {
+ uint32 bsscfg_idx;
+ uint32 wsec;
+ uint32 WPA_auth;
+ uint32 wsec_index;
+ uint32 associated;
+ uint32 BSS;
+ uint32 phytest_on;
+ struct ether_addr prev_BSSID;
+ struct ether_addr BSSID;
+ uint32 targetbss_wpa2_flags;
+ uint32 assoc_type;
+ uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_if_add {
+ uint32 bsscfg_flags;
+ uint32 if_flags;
+ uint32 ap;
+ struct ether_addr mac_addr;
+ uint16 PAD;
+ uint32 wlc_index;
+} wl_if_add_t;
+
+typedef struct wl_bss_config {
+ uint32 atim_window;
+ uint32 beacon_period;
+ uint32 chanspec;
+} wl_bss_config_t;
+
+/* Number of Bsscolor supported per core */
+#ifndef HE_MAX_BSSCOLOR_RES
+#define HE_MAX_BSSCOLOR_RES 2
+#endif
+
+#ifndef HE_MAX_STAID_PER_BSSCOLOR
+#define HE_MAX_STAID_PER_BSSCOLOR 4
+#endif
+
+/* BSSColor indices */
+#define BSSCOLOR0_IDX 0
+#define BSSCOLOR1_IDX 1
+#define HE_BSSCOLOR0 0
+#define HE_BSSCOLOR_MAX_VAL 63
+
+/* STAID indices */
+#define STAID0_IDX 0
+#define STAID1_IDX 1
+#define STAID2_IDX 2
+#define STAID3_IDX 3
+#define HE_STAID_MAX_VAL 0x07FF
+
+typedef struct wl_bsscolor_info {
+ uint16 version; /**< structure version */
+ uint16 length; /**< length of the bsscolor info */
+ uint8 bsscolor_index; /**< bsscolor index 0-1 */
+ uint8 bsscolor; /**<bsscolor value from 0 to 63 */
+ uint8 partial_bsscolor_ind;
+ uint8 disable_bsscolor_ind; /**< To disable particular bsscolor */
+ /* bsscolor_disable to be added as part of D1.0 */
+ uint16 staid_info[HE_MAX_STAID_PER_BSSCOLOR]; /**< 0-3 staid info of each bsscolor */
+} wl_bsscolor_info_t;
+
+#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /**< User application will randomly select
+ * radar channel.
+ */
+
+#define DLOAD_HANDLER_VER 1 /**< Downloader version */
+#define DLOAD_FLAG_VER_MASK 0xf000 /**< Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */
+
+#define DL_CRC_NOT_INUSE 0x0001
+#define DL_BEGIN 0x0002
+#define DL_END 0x0004
+
+/* Flags for Major/Minor/Date number shift and mask */
+#define EPI_VER_SHIFT 16
+#define EPI_VER_MASK 0xFFFF
+/** generic download types & flags */
+enum {
+ DL_TYPE_UCODE = 1,
+ DL_TYPE_CLM = 2
+};
+
+/** ucode type values */
+enum {
+ UCODE_FW,
+ INIT_VALS,
+ BS_INIT_VALS
+};
+
+struct wl_dload_data {
+ uint16 flag;
+ uint16 dload_type;
+ uint32 len;
+ uint32 crc;
+ uint8 data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+ uint32 ucode_type;
+ uint32 num_chunks;
+ uint32 chunk_len;
+ uint32 chunk_num;
+ uint8 data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+ uint32 ds_id;
+ uint32 clm_total_len;
+ uint32 num_chunks;
+ uint32 chunk_len;
+ uint32 chunk_offset;
+ uint8 data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+typedef struct wlc_ssid {
+ uint32 SSID_len;
+ uint8 SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_t;
+
+typedef struct wlc_ssid_ext {
+ uint8 hidden;
+ uint8 PAD;
+ uint16 flags;
+ uint8 SSID_len;
+ int8 rssi_thresh;
+ uint8 SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_ext_t;
+
+#define MAX_PREFERRED_AP_NUM 5
+typedef struct wlc_fastssidinfo {
+ uint32 SSID_channel[MAX_PREFERRED_AP_NUM];
+ wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef struct wnm_url {
+ uint8 len;
+ uint8 data[1];
+} wnm_url_t;
+
+typedef struct chan_scandata {
+ uint8 txpower;
+ uint8 pad;
+ chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */
+ uint32 channel_mintime;
+ uint32 channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+ EXTDSCAN_FOREGROUND_SCAN,
+ EXTDSCAN_BACKGROUND_SCAN,
+ EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID 5
+
+typedef struct wl_extdscan_params {
+ int8 nprobes; /**< 0, passive, otherwise active */
+ int8 split_scan; /**< split scan */
+ int8 band; /**< band */
+ int8 pad;
+ wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */
+ uint32 tx_rate; /**< in 500ksec units */
+ wl_scan_type_t scan_type; /**< enum */
+ int32 channel_num;
+ chan_scandata_t channel_list[1]; /**< list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+
+struct wl_scan_params {
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 scan_type; /**< flags, 0 use default */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /**< count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[1]; /**< list of chanspecs */
+};
+
+/* changes in wl_scan_params_v2 as comapred to wl_scan_params (v1)
+* unit8 scantype to uint32
+*/
+typedef struct wl_scan_params_v2 {
+ uint16 version; /* Version of wl_scan_params, change value of
+ * WL_SCAN_PARAM_VERSION on version update
+ */
+ uint16 length; /* length of structure wl_scan_params_v1_t
+ * without implicit pad
+ */
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 PAD;
+ uint32 scan_type; /**< flags, 0 use default, and flags specified in
+ * WL_SCANFLAGS_XXX
+ */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /**< count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[1]; /**< list of chanspecs */
+} wl_scan_params_v2_t;
+
+#define WL_SCAN_PARAMS_VERSION_V2 2
+
+/** size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_scan_params_v2_t, channel_list))
+#define WL_MAX_ROAMSCAN_DATSZ \
+ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+#define WL_MAX_ROAMSCAN_V2_DATSZ \
+ (WL_SCAN_PARAMS_V2_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+
+/* changes in wl_scan_params_v3 as comapred to wl_scan_params (v2)
+* pad byte used to differentiate Short SSID and Regular SSID
+*/
+typedef struct wl_scan_params_v3 {
+ uint16 version; /* Version of wl_scan_params, change value of
+ * WL_SCAN_PARAM_VERSION on version update
+ */
+ uint16 length; /* length of structure wl_scan_params_v1_t
+ * without implicit pad
+ */
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 ssid_type; /**< ssid_type_flag ,0 use default, and flags specified
+ * WL_SCAN_SSID_FLAGS
+ */
+ uint32 scan_type; /**< flags, 0 use default, and flags specified in
+ * WL_SCANFLAGS_XXX
+ */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /**< count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[]; /**< list of chanspecs */
+} wl_scan_params_v3_t;
+
+#define WL_SCAN_PARAMS_VERSION_V3 3
+
+/** size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_V3_FIXED_SIZE (OFFSETOF(wl_scan_params_v3_t, channel_list))
+#define WL_MAX_ROAMSCAN_V3_DATSZ \
+ (WL_SCAN_PARAMS_V3_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+
+#define ISCAN_REQ_VERSION 1
+#define ISCAN_REQ_VERSION_V2 2
+
+/** incremental scan struct */
+struct wl_iscan_params {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ struct wl_scan_params params;
+};
+
+/** incremental scan struct */
+typedef struct wl_iscan_params_v2 {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ wl_scan_params_v2_t params;
+} wl_iscan_params_v2_t;
+
+/** incremental scan struct */
+typedef struct wl_iscan_params_v3 {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ wl_scan_params_v3_t params;
+} wl_iscan_params_v3_t;
+
+/** 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+#define WL_ISCAN_PARAMS_V2_FIXED_SIZE \
+ (OFFSETOF(wl_iscan_params_v2_t, params) + sizeof(wlc_ssid_t))
+#define WL_ISCAN_PARAMS_V3_FIXED_SIZE \
+ (OFFSETOF(wl_iscan_params_v3_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results_v109 {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_v109_t bss_info[1];
+} wl_scan_results_v109_t;
+
+typedef struct wl_scan_results_v2 {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ uint8 bss_info[]; /* var length wl_bss_info_X structures */
+} wl_scan_results_v2_t;
+
+#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_scan_results_v109_t wl_scan_results_t;
+/** size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+#endif
+
+#if defined(SIMPLE_ISCAN)
+/** the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ int8 iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+#endif /* SIMPLE_ISCAN */
+#define ESCAN_REQ_VERSION 1
+#define ESCAN_REQ_VERSION_V2 2
+
+/** event scan reduces amount of SOC memory needed to store scan results */
+struct wl_escan_params {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ struct wl_scan_params params;
+};
+
+typedef struct wl_escan_params_v2 {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_v2_t params;
+} wl_escan_params_v2_t;
+
+typedef struct wl_escan_params_v3 {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_v3_t params;
+} wl_escan_params_v3_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+#define WL_ESCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_escan_params_v2_t, params) + sizeof(wlc_ssid_t))
+#define WL_ESCAN_PARAMS_V3_FIXED_SIZE (OFFSETOF(wl_escan_params_v3_t, params) + sizeof(wlc_ssid_t))
+/* New scan version is defined then change old version of scan to
+ * wl_scan_params_v1_t and new one to wl_scan_params_t
+ */
+#if defined (WL_SCAN_PARAMS_V3)
+typedef struct wl_scan_params wl_scan_params_v1_t;
+typedef struct wl_escan_params wl_escan_params_v1_t;
+typedef struct wl_iscan_params wl_iscan_params_v1_t;
+typedef struct wl_scan_params_v3 wl_scan_params_t;
+typedef struct wl_escan_params_v3 wl_escan_params_t;
+typedef struct wl_iscan_params_v3 wl_iscan_params_t;
+#define WL_SCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_scan_params_t, channel_list))
+#elif defined(WL_SCAN_PARAMS_V2)
+typedef struct wl_scan_params wl_scan_params_v1_t;
+typedef struct wl_escan_params wl_escan_params_v1_t;
+typedef struct wl_iscan_params wl_iscan_params_v1_t;
+typedef struct wl_scan_params_v2 wl_scan_params_t;
+typedef struct wl_escan_params_v2 wl_escan_params_t;
+typedef struct wl_iscan_params_v2 wl_iscan_params_t;
+#define WL_SCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_scan_params_t, channel_list))
+#else
+typedef struct wl_scan_params wl_scan_params_t;
+typedef struct wl_escan_params wl_escan_params_t;
+typedef struct wl_iscan_params wl_iscan_params_t;
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+#endif /* WL_SCAN_PARAMS_V3 */
+
+/** event scan reduces amount of SOC memory needed to store scan results */
+typedef struct wl_escan_result_v109 {
+ uint32 buflen;
+ uint32 version;
+ uint16 sync_id;
+ uint16 bss_count;
+ wl_bss_info_v109_t bss_info[1];
+} wl_escan_result_v109_t;
+
+/** event scan reduces amount of SOC memory needed to store scan results */
+typedef struct wl_escan_result_v2 {
+ uint32 buflen;
+ uint32 version;
+ uint16 sync_id;
+ uint16 bss_count;
+ uint8 bss_info[]; /* var length wl_bss_info_X structures */
+} wl_escan_result_v2_t;
+
+#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_escan_result_v109_t wl_escan_result_t;
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+#endif
+
+typedef struct wl_gscan_result_v2 {
+ uint32 buflen;
+ uint32 version;
+ uint32 scan_ch_bucket;
+ wl_gscan_bss_info_v2_t bss_info[1];
+} wl_gscan_result_v2_t;
+
+typedef struct wl_gscan_result_v2_1 {
+ uint32 buflen;
+ uint32 version;
+ uint32 scan_ch_bucket;
+ uint8 bss_info[]; /* var length wl_bss_info_X structures */
+} wl_gscan_result_v2_1_t;
+
+#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_gscan_result_v2_t wl_gscan_result_t;
+#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t))
+#endif
+
+/** incremental scan results struct */
+typedef struct wl_iscan_results {
+ uint32 status;
+ wl_scan_results_v109_t results;
+} wl_iscan_results_v109_t;
+
+/** incremental scan results struct */
+typedef struct wl_iscan_results_v2 {
+ uint32 status;
+ wl_scan_results_v2_t results;
+} wl_iscan_results_v2_t;
+
+#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_iscan_results_v109_t wl_iscan_results_t;
+/** size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+#endif
+
+typedef struct wl_probe_params {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */
+
+typedef struct wl_rateset {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+#define WL_VHT_CAP_MCS_MAP_NSS_MAX 8
+
+typedef struct wl_rateset_args_v1 {
+ uint32 count; /**< # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
+ uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
+ uint16 vht_mcs[WL_VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */
+} wl_rateset_args_v1_t;
+
+#define RATESET_ARGS_V1 (1)
+#define RATESET_ARGS_V2 (2)
+#define RATESET_ARGS_V3 (3)
+
+/* RATESET_VERSION_ENABLED is defined in wl.mk post J branch.
+ * Guidelines to use wl_rateset_args_t:
+ * [a] in wlioctl.h: Add macro RATESET_ARGS_VX where X is the new version number.
+ * [b] in wlioctl.h: Add a new structure with wl_rateset_args_vX_t
+ * [c] in wlu.c app: Add support to parse new structure under RATESET_ARGS_VX
+ * [d] in wlc_types.h: in respective branch and trunk: redefine wl_rateset_args_t with
+ * new wl_rateset_args_vX_t
+ */
+#ifndef RATESET_VERSION_ENABLED
+/* rateset structure before versioning. legacy. DONOT update anymore here */
+#define RATESET_ARGS_VERSION (RATESET_ARGS_V1)
+typedef wl_rateset_args_v1_t wl_rateset_args_t;
+#endif /* RATESET_VERSION_ENABLED */
+
+/* Note: dependent structures: sta_info_vX_t. When any update to this structure happens,
+ * update sta_info_vX_t also.
+ */
+#define WL_HE_CAP_MCS_MAP_NSS_MAX 8
+
+typedef struct wl_rateset_args_v2 {
+ uint16 version; /**< version. */
+ uint16 len; /**< length */
+ uint32 count; /**< # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
+ uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
+ uint16 vht_mcs[WL_VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */
+ uint16 he_mcs[WL_HE_CAP_MCS_MAP_NSS_MAX]; /**< supported he mcs index bit map per nss */
+} wl_rateset_args_v2_t;
+
+/* HE Rates BITMAP */
+#define WL_HE_CAP_MCS_0_7_MAP 0x00ff
+#define WL_HE_CAP_MCS_0_9_MAP 0x03ff
+#define WL_HE_CAP_MCS_0_11_MAP 0x0fff
+
+/* Note: dependent structures: sta_info_vX_t. When any update to this structure happens,
+ * update sta_info_vX_t also.
+ */
+#define WL_EHT_CAP_MCS_MAP_NSS_MAX 8u /* could be max. 16 streams by the spec,
+ * but it's to control our own rateset
+ * so it probably won't exceed 4 streams
+ * anyway...
+ */
+
+typedef struct wl_rateset_args_v3 {
+ uint16 version; /**< version. */
+ uint16 len; /**< length */
+ uint32 count; /**< # rates in 'rates' */
+ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
+ uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
+ uint16 vht_mcs[WL_VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported VHT mcs per nss */
+ uint16 he_mcs[WL_HE_CAP_MCS_MAP_NSS_MAX]; /**< supported HE mcs per nss */
+ uint16 eht_mcs[WL_EHT_CAP_MCS_MAP_NSS_MAX]; /**< supported EHT mcs bitmap per nss */
+} wl_rateset_args_v3_t;
+
+/* EHT MCS BITMAP */
+#define WL_EHT_CAP_MCS_0_7_MAP 0x00ffu
+#define WL_EHT_CAP_MCS_0_9_MAP 0x03ffu
+#define WL_EHT_CAP_MCS_0_11_MAP 0x0fffu
+#define WL_EHT_CAP_MCS_0_13_MAP 0x3fffu
+
+#define TXBF_RATE_MCS_ALL 4
+#define TXBF_RATE_VHT_ALL 4
+#define TXBF_RATE_OFDM_ALL 8
+
+typedef struct wl_txbf_rateset {
+ uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /**< one for each stream */
+ uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /**< one for each stream */
+ uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /**< one for each stream */
+ uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /**< one for each stream */
+ uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /**< bitmap of ofdm rates that enables txbf */
+ uint8 txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+ uint8 txbf_rate_ofdm_cnt;
+ uint8 txbf_rate_ofdm_cnt_bcm;
+} wl_txbf_rateset_t;
+
+#define NUM_BFGAIN_ARRAY_1RX 2
+#define NUM_BFGAIN_ARRAY_2RX 3
+#define NUM_BFGAIN_ARRAY_3RX 4
+#define NUM_BFGAIN_ARRAY_4RX 5
+
+typedef struct wl_txbf_expgainset {
+ /* bitmap for each element: B[4:0]=>c0, B[9:5]=>c1, B[14:10]=>c2, B[19:15]=>c[3-7]
+ * B[24:20]=>c[8-9], B[29:25]=>c[10-11]
+ */
+ uint32 bfgain_2x1[NUM_BFGAIN_ARRAY_1RX]; /* exp 1ss, imp 1ss */
+ uint32 bfgain_2x2[NUM_BFGAIN_ARRAY_2RX]; /* exp [1-2]ss, imp 1ss */
+ uint32 bfgain_3x1[NUM_BFGAIN_ARRAY_1RX];
+ uint32 bfgain_3x2[NUM_BFGAIN_ARRAY_2RX];
+ uint32 bfgain_3x3[NUM_BFGAIN_ARRAY_3RX]; /* exp [1-3]ss, imp 1ss */
+ uint32 bfgain_4x1[NUM_BFGAIN_ARRAY_1RX];
+ uint32 bfgain_4x2[NUM_BFGAIN_ARRAY_2RX];
+ uint32 bfgain_4x3[NUM_BFGAIN_ARRAY_3RX];
+ uint32 bfgain_4x4[NUM_BFGAIN_ARRAY_4RX]; /* exp [1-4]ss, imp 1ss */
+} wl_txbf_expgainset_t;
+
+#define OFDM_RATE_MASK 0x0000007f
+typedef uint8 ofdm_rates_t;
+
+typedef struct wl_rates_info {
+ wl_rateset_t rs_tgt;
+ uint32 phy_type;
+ int32 bandtype;
+ uint8 cck_only;
+ uint8 rate_mask;
+ uint8 mcsallow;
+ uint8 bw;
+ uint8 txstreams;
+ uint8 PAD[3];
+} wl_rates_info_t;
+
+/**uint32 list */
+typedef struct wl_uint32_list {
+ /** in - # of elements, out - # of entries */
+ uint32 count;
+ /** variable length uint32 list */
+ uint32 element[1];
+} wl_uint32_list_t;
+/* Size in bytes for wl_uint32_list_t with 'count' elements */
+#define WL_UINT32_LIST_SIZE(count) (((count) + 1) * sizeof(uint32))
+
+#define CHAN_INFO_LIST_ALL_V1 1
+
+typedef struct wl_chanspec_attr_s_v1 {
+ uint32 chaninfo;
+ uint32 chanspec;
+} wl_chanspec_attr_v1_t;
+
+/**chanspecs list */
+typedef struct wl_chanspec_list_s_v1 {
+ uint16 version;
+ /** in - # of chanspecs, out - # of entries */
+ uint16 count;
+ /** variable length chanspecs list */
+ wl_chanspec_attr_v1_t chspecs[1];
+} wl_chanspec_list_v1_t;
+
+/* WLC_SET_ALLOW_MODE values */
+#define ALLOW_MODE_ANY_BSSID 0
+#define ALLOW_MODE_ONLY_DESIRED_BSSID 1
+#define ALLOW_MODE_NO_BSSID 2
+
+/** used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+ struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */
+ uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid,
+ * otherwise count of chanspecs in chanspec_list
+ * AND paired bssids following chanspec_list
+ * also, chanspec_num has to be set to zero
+ * for bssid list to be used
+ */
+ int32 chanspec_num; /**< 0: all available channels,
+ * otherwise count of chanspecs in chanspec_list
+ */
+ chanspec_t chanspec_list[1]; /**< list of chanspecs */
+
+} wl_assoc_params_t;
+
+typedef struct wl_assoc_params_v1 {
+ uint16 version;
+ uint16 flags;
+ struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */
+ uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid,
+ * otherwise count of chanspecs in chanspec_list
+ * AND paired bssids following chanspec_list
+ * also, chanspec_num has to be set to zero
+ * for bssid list to be used
+ */
+ int32 chanspec_num; /**< 0: all available channels,
+ * otherwise count of chanspecs in chanspec_list
+ */
+ chanspec_t chanspec_list[1]; /**< list of chanspecs */
+} wl_assoc_params_v1_t;
+
+#define ASSOC_HINT_BSSID_PRESENT (1 << 0)
+
+#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list)
+#define WL_ASSOC_PARAMS_FIXED_SIZE_V1 OFFSETOF(wl_assoc_params_v1_t, chanspec_list)
+/** used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+#define WL_EXT_REASSOC_VER 1
+
+typedef struct wl_ext_reassoc_params {
+ uint16 version;
+ uint16 length;
+ uint32 flags;
+ wl_reassoc_params_t params;
+} wl_ext_reassoc_params_t;
+
+/* Flags field defined above in wl_ext_reassoc_params
+ * The value in bits [2:0] is used to specify the type
+ * of scan to be used for reassoc
+ */
+
+#define WL_SCAN_MODE_HIGH_ACC 0u /**< use high accuracy scans for roam */
+#define WL_SCAN_MODE_LOW_SPAN 1u /**< use low span scans for roam */
+#define WL_SCAN_MODE_LOW_POWER 2u /**< use low power scans for roam */
+
+#define WL_EXTREASSOC_PARAMS_FIXED_SIZE (OFFSETOF(wl_ext_reassoc_params_t, params) + \
+ WL_REASSOC_PARAMS_FIXED_SIZE)
+
+/** used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+typedef wl_assoc_params_v1_t wl_join_assoc_params_v1_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE_V1 WL_ASSOC_PARAMS_FIXED_SIZE_V1
+/** used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+ wlc_ssid_t ssid;
+ wl_assoc_params_t params; /**< optional field, but it must include the fixed portion
+ * of the wl_assoc_params_t struct when it does present.
+ */
+} wl_join_params_t;
+
+/** used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params_v1 {
+ wlc_ssid_t ssid;
+ wl_assoc_params_v1_t params; /**< optional field, but it must include the fixed portion
+ * of the wl_assoc_params_t struct when it does present.
+ */
+} wl_join_params_v1_t;
+
+#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \
+ WL_ASSOC_PARAMS_FIXED_SIZE)
+#define WL_JOIN_PARAMS_FIXED_SIZE_V1 (OFFSETOF(wl_join_params_v1_t, params) + \
+ WL_ASSOC_PARAMS_FIXED_SIZE_V1)
+typedef struct wlc_roam_exp_params {
+ int8 a_band_boost_threshold;
+ int8 a_band_penalty_threshold;
+ int8 a_band_boost_factor;
+ int8 a_band_penalty_factor;
+ int8 cur_bssid_boost;
+ int8 alert_roam_trigger_threshold;
+ int16 a_band_max_boost;
+} wlc_roam_exp_params_t;
+
+#define ROAM_EXP_CFG_VERSION 1
+
+#define ROAM_EXP_ENABLE_FLAG (1 << 0)
+
+#define ROAM_EXP_CFG_PRESENT (1 << 1)
+
+typedef struct wl_roam_exp_cfg {
+ uint16 version;
+ uint16 flags;
+ wlc_roam_exp_params_t params;
+} wl_roam_exp_cfg_t;
+
+typedef struct wl_bssid_pref_list {
+ struct ether_addr bssid;
+ /* Add this to modify rssi */
+ int8 rssi_factor;
+ int8 flags;
+} wl_bssid_pref_list_t;
+
+#define BSSID_PREF_LIST_VERSION 1
+#define ROAM_EXP_CLEAR_BSSID_PREF (1 << 0)
+
+typedef struct wl_bssid_pref_cfg {
+ uint16 version;
+ uint16 flags;
+ uint16 count;
+ uint16 reserved;
+ wl_bssid_pref_list_t bssids[];
+} wl_bssid_pref_cfg_t;
+
+#define SSID_WHITELIST_VERSION 1
+
+#define ROAM_EXP_CLEAR_SSID_WHITELIST (1 << 0)
+
+/* Roam SSID whitelist, ssids in this list are ok to */
+/* be considered as targets to join when considering a roam */
+
+typedef struct wl_ssid_whitelist {
+
+ uint16 version;
+ uint16 flags;
+
+ uint8 ssid_count;
+ uint8 reserved[3];
+ wlc_ssid_t ssids[];
+} wl_ssid_whitelist_t;
+
+#define ROAM_EXP_EVENT_VERSION 1
+
+typedef struct wl_roam_exp_event {
+
+ uint16 version;
+ uint16 flags;
+ wlc_ssid_t cur_ssid;
+} wl_roam_exp_event_t;
+
+/** scan params for extended join */
+typedef struct wl_join_scan_params {
+ uint8 scan_type; /**< 0 use default, active or passive scan */
+ uint8 PAD[3];
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+} wl_join_scan_params_t;
+
+#define wl_join_assoc_params_t wl_assoc_params_t
+#define wl_join_assoc_params_v1_t wl_assoc_params_v1_t
+/** extended join params */
+typedef struct wl_extjoin_params {
+ wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */
+ wl_join_scan_params_t scan;
+ wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion
+ * of the wl_join_assoc_params_t struct when it does
+ * present.
+ */
+} wl_extjoin_params_t;
+
+typedef struct wl_extjoin_params_v1 {
+ uint16 version;
+ wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */
+ wl_join_scan_params_t scan;
+ wl_join_assoc_params_v1_t assoc; /**< optional field, but it must include the fixed portion
+ * of the wl_join_assoc_params_t struct when it does
+ * present.
+ */
+} wl_extjoin_params_v1_t;
+
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \
+ WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE_V1 (OFFSETOF(wl_extjoin_params_v1_t, assoc) + \
+ WL_JOIN_ASSOC_PARAMS_FIXED_SIZE_V1)
+#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */
+#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */
+typedef struct {
+ uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */
+ uint8 num_antcfg; /**< number of available antenna configurations */
+} wlc_antselcfg_t;
+
+/* This is obsolete.Pls add new fields by extending versioned structure.
+ * cca_congest_ext_vX_t [X is latest version]
+ */
+typedef struct cca_congest {
+ uint32 duration; /**< millisecs spent sampling this channel */
+ union {
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_me; /**< millisecs in my own traffic */
+ };
+ union {
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 congest_notme; /**< traffic not from/to me (including bc/mc) */
+ };
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+ uint32 timestamp; /**< second timestamp */
+} cca_congest_t;
+
+/* This is obsolete.Pls add new fields by extending versioned structure.
+ * cca_congest_ext_channel_req_vX_t [X is latest version]
+ */
+typedef struct cca_congest_channel_req {
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /**< How many secs worth of data */
+ cca_congest_t secs[1]; /**< Data */
+} cca_congest_channel_req_t;
+
+typedef struct cca_congest_ext {
+ uint32 timestamp; /**< second timestamp */
+
+ /* Base structure of cca_congest_t: CCA statistics all inclusive */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+
+ /* CCA statistics for non PM only */
+ uint32 duration_nopm; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly_nopm; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss_nopm; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss_nopm; /**< traffic not in our bss */
+ uint32 interference_nopm; /**< millisecs detecting a non 802.11 interferer. */
+
+ /* CCA statistics for during PM only */
+ uint32 duration_pm; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly_pm; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss_pm; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss_pm; /**< traffic not in our bss */
+ uint32 interference_pm; /**< millisecs detecting a non 802.11 interferer. */
+} cca_congest_ext_t;
+
+typedef struct cca_congest_ext_v2 {
+ uint32 timestamp; /**< second timestamp */
+
+ /* Base structure of cca_congest_t: CCA statistics all inclusive */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+
+ /* CCA statistics for non PM only */
+ uint32 duration_nopm; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly_nopm; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss_nopm; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss_nopm; /**< traffic not in our bss */
+ uint32 interference_nopm; /**< millisecs detecting a non 802.11 interferer. */
+
+ /* CCA statistics for during PM only */
+ uint32 duration_pm; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly_pm; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss_pm; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss_pm; /**< traffic not in our bss */
+ uint32 interference_pm; /**< millisecs detecting a non 802.11 interferer. */
+ uint32 radio_on_time; /* Awake time on this channel */
+ uint32 cca_busy_time; /* CCA is held busy on this channel */
+} cca_congest_ext_v2_t;
+
+#define WL_CCA_EXT_REQ_VER 0u
+#define WL_CCA_EXT_REQ_VER_V2 2u
+#define WL_CCA_EXT_REQ_VER_V3 3u
+
+typedef struct cca_congest_ext_channel_req {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< len of this structure */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /**< How many secs worth of data */
+ struct cca_congest_ext secs[1]; /**< Data - 3 sets for ALL - non-PM - PM */
+} cca_congest_ext_channel_req_t;
+
+typedef struct cca_congest_ext_channel_req_v2 {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< len of this structure */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /* How many secs worth of data */
+ cca_congest_ext_v2_t secs[1]; /* Data - 3 sets for ALL - non-PM - PM */
+} cca_congest_ext_channel_req_v2_t;
+
+/* Struct holding all channels cca statistics */
+typedef struct cca_congest_ext_channel_req_v3 {
+ uint16 ver;
+ uint16 len;
+ uint8 PAD[2];
+ uint16 num_of_entries;
+ cca_congest_ext_channel_req_v2_t per_chan_stats[1];
+} cca_congest_ext_channel_req_v3_t;
+
+typedef struct {
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest; /**< millisecs detecting busy CCA */
+ uint32 timestamp; /**< second timestamp */
+} cca_congest_simple_t;
+
+/* The following two structure must have same first 4 fields.
+ * The cca_chan_qual_event_t is used to report CCA in older formats and NF.
+ * The cca_only_chan_qual_event_t is used to report CCA only with newer format.
+ */
+typedef struct {
+ uint16 status;
+ uint16 id;
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 len;
+ union {
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ cca_congest_t cca_busy_ext; /**< Extended CCA report */
+ int32 noise; /**< noise floor */
+ };
+} cca_chan_qual_event_t;
+
+typedef struct {
+ uint16 status;
+ uint16 id;
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 len;
+ union {
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ struct {
+ cca_congest_t cca_busy_ext; /**< Extended CCA report */
+ cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */
+ cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */
+ };
+ };
+} cca_only_chan_qual_event_t;
+
+typedef struct {
+ uint16 status; /* Indicates the status of event */
+ uint16 id;
+ /* id is used to indicate the number of bytes to read */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 len;
+ union {
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ struct {
+ cca_congest_t cca_busy_ext; /**< Extended CCA report */
+ cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */
+ cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */
+ };
+ };
+ int32 ofdm_desense;
+} cca_only_chan_qual_event_v2_t;
+
+typedef struct {
+ uint32 msrmnt_time; /**< Time for Measurement (msec) */
+ uint32 msrmnt_done; /**< flag set when measurement complete */
+ char buf[];
+} cca_stats_n_flags;
+
+typedef struct {
+ uint32 msrmnt_query; /* host to driver query for measurement done */
+ uint32 time_req; /* time required for measurement */
+ uint8 report_opt; /* option to print different stats in report */
+ uint8 PAD[3];
+} cca_msrmnt_query;
+
+/* interference sources */
+enum interference_source {
+ ITFR_NONE = 0, /**< interference */
+ ITFR_PHONE, /**< wireless phone */
+ ITFR_VIDEO_CAMERA, /**< wireless video camera */
+ ITFR_MICROWAVE_OVEN, /**< microwave oven */
+ ITFR_BABY_MONITOR, /**< wireless baby monitor */
+ ITFR_BLUETOOTH, /**< bluetooth */
+ ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */
+ ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */
+ ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */
+ ITFR_UNIDENTIFIED /**< interference from unidentified source */
+};
+
+/** structure for interference source report */
+typedef struct {
+ uint32 flags; /**< flags. bit definitions below */
+ uint32 source; /**< last detected interference source */
+ uint32 timestamp; /**< second timestamp on interferenced flag change */
+} interference_source_rep_t;
+
+#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */
+
+typedef struct wl_country {
+ char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in
+ * the Country IE
+ */
+ int32 rev; /**< revision specifier for ccode
+ * on set, -1 indicates unspecified.
+ * on get, rev >= 0
+ */
+ char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code.
+ * variable length, but fixed size in
+ * struct allows simple allocation for
+ * expected country strings <= 3 chars.
+ */
+} wl_country_t;
+
+#define CCODE_INFO_VERSION 1
+
+typedef enum wl_ccode_role {
+ WLC_CCODE_ROLE_ACTIVE = 0,
+ WLC_CCODE_ROLE_HOST,
+ WLC_CCODE_ROLE_80211D_ASSOC,
+ WLC_CCODE_ROLE_80211D_SCAN,
+ WLC_CCODE_ROLE_DEFAULT,
+ WLC_CCODE_ROLE_DEFAULT_SROM_BKUP,
+ WLC_CCODE_LAST
+} wl_ccode_role_t;
+#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST
+
+typedef struct wl_ccode_entry {
+ uint16 reserved;
+ uint8 band;
+ uint8 role;
+ char ccode[WLC_CNTRY_BUF_SZ];
+} wl_ccode_entry_t;
+
+typedef struct wl_ccode_info {
+ uint16 version;
+ uint16 count; /**< Number of ccodes entries in the set */
+ wl_ccode_entry_t ccodelist[1];
+} wl_ccode_info_t;
+#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist)
+typedef struct wl_channels_in_country {
+ uint32 buflen;
+ uint32 band;
+ char country_abbrev[WLC_CNTRY_BUF_SZ];
+ uint32 count;
+ uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+ uint32 buflen;
+ uint32 band_set;
+ uint32 band;
+ uint32 count;
+ char country_abbrev[1];
+} wl_country_list_t;
+
+typedef struct wl_rm_req_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token; /**< token for this measurement */
+ uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /**< TSF low 32-bits */
+ uint32 dur; /**< TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+ uint32 token; /**< overall measurement set token */
+ uint32 count; /**< number of measurement requests */
+ void *cb; /**< completion callback function: may be NULL */
+ void *cb_arg; /**< arg to completion callback function */
+ wl_rm_req_elt_t req[1]; /**< variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token; /**< token for this measurement */
+ uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /**< TSF low 32-bits */
+ uint32 dur; /**< TUs */
+ uint32 len; /**< byte length of data block */
+ uint8 data[1]; /**< variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+ uint8 rpi[WL_RPI_REP_BIN_NUM];
+ int8 rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+ uint32 token; /**< overall measurement set token */
+ uint32 len; /**< length of measurement report block */
+ wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN 8
+
+#ifdef BCMCCX
+#define LEAP_USER_MAX 32
+#define LEAP_DOMAIN_MAX 32
+#define LEAP_PASSWORD_MAX 32
+
+typedef struct wl_leap_info {
+ wlc_ssid_t ssid;
+ uint8 user_len;
+ uint8 user[LEAP_USER_MAX];
+ uint8 password_len;
+ uint8 password[LEAP_PASSWORD_MAX];
+ uint8 domain_len;
+ uint8 domain[LEAP_DOMAIN_MAX];
+ uint8 PAD;
+} wl_leap_info_t;
+
+typedef struct wl_leap_list {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_leap_info_t leap_info[1];
+} wl_leap_list_t;
+#endif /* BCMCCX */
+
+typedef enum sup_auth_status {
+ /* Basic supplicant authentication states */
+ WLC_SUP_DISCONNECTED = 0,
+ WLC_SUP_CONNECTING,
+ WLC_SUP_IDREQUIRED,
+ WLC_SUP_AUTHENTICATING,
+ WLC_SUP_AUTHENTICATED,
+ WLC_SUP_KEYXCHANGE,
+ WLC_SUP_KEYED,
+ WLC_SUP_TIMEOUT,
+ WLC_SUP_LAST_BASIC_STATE,
+
+ /* Extended supplicant authentication states */
+ /** Waiting to receive handshake msg M1 */
+ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+ /** Preparing to send handshake msg M2 */
+ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+ /* Waiting to receive handshake msg M3 */
+ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+ WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */
+ WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */
+ WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+
+typedef struct wl_wsec_key {
+ uint32 index; /**< key index */
+ uint32 len; /**< key length */
+ uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */
+ uint32 pad_1[18];
+ uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ uint32 flags; /**< misc flags */
+ uint32 pad_2[2];
+ int32 pad_3;
+ int32 iv_initialized; /**< has IV been initialized already? */
+ int32 pad_4;
+ /* Rx IV */
+ struct {
+ uint32 hi; /**< upper 32 bits of IV */
+ uint16 lo; /**< lower 16 bits of IV */
+ uint16 PAD;
+ } rxiv;
+ uint32 pad_5[2];
+ struct ether_addr ea; /**< per station */
+ uint16 PAD;
+} wl_wsec_key_t;
+
+/* Min length for PSK passphrase */
+#define WSEC_MIN_PSK_LEN 8
+/* Max length of supported passphrases for PSK */
+#define WSEC_MAX_PSK_LEN 64
+/* Max length of supported passphrases for SAE */
+#define WSEC_MAX_PASSPHRASE_LEN 256u
+/* Max length of SAE password ID */
+#define WSEC_MAX_SAE_PASSWORD_ID 255u
+
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE 1u
+/* Flag indicating an SAE passphrase */
+#define WSEC_SAE_PASSPHRASE 2u
+
+/**receptacle for WLC_SET_WSEC_PMK parameter */
+
+typedef struct wsec_pmk {
+ ushort key_len; /* octets in key material */
+ ushort flags; /* key handling qualification */
+ uint8 key[WSEC_MAX_PASSPHRASE_LEN]; /* PMK material */
+ uint16 opt_len; /* optional field length */
+ uint8 opt_tlvs[1]; /* optional filed in bcm_xtlv_t format */
+} wsec_pmk_t;
+
+typedef enum {
+ WL_PMK_TLV_PASSWORD_ID = 1,
+ WL_PMK_TLV_SSID = 2,
+ WL_PMK_TLV_BSSID = 3
+} wl_pmk_tlv_types_t;
+
+#define WL_AUTH_EVENT_DATA_V1 0x1
+#define WL_AUTH_EVENT_DATA_V2 0x2
+
+/* tlv ids for auth event */
+#define WL_AUTH_PMK_TLV_ID 1u
+#define WL_AUTH_PMKID_TLV_ID 2u
+#define WL_AUTH_PMKID_TYPE_TLV_ID 3u
+#define WL_AUTH_SSID_TLV_ID 4u
+
+#define WL_AUTH_PMKID_TYPE_BSSID 1u
+#define WL_AUTH_PMKID_TYPE_SSID 2u
+/* AUTH event data
+* pmk and pmkid in case of SAE auth
+* xtlvs will be 32 bit alligned
+*/
+typedef struct wl_auth_event {
+ uint16 version;
+ uint16 length;
+ uint8 xtlvs[];
+} wl_auth_event_t;
+
+#define WL_AUTH_EVENT_FIXED_LEN_V1 OFFSETOF(wl_auth_event_t, xtlvs)
+#define WL_AUTH_EVENT_FIXED_LEN_V2 OFFSETOF(wl_auth_event_t, xtlvs)
+
+#define WL_PMKSA_EVENT_DATA_V1 1u
+
+/* tlv ids for PMKSA event */
+#define WL_PMK_TLV_ID 1u
+#define WL_PMKID_TLV_ID 2u
+#define WL_PEER_ADDR_TLV_ID 3u
+
+/* PMKSA event data structure */
+typedef struct wl_pmksa_event {
+ uint16 version;
+ uint16 length;
+ uint8 xtlvs[];
+} wl_pmksa_event_t;
+
+#define WL_PMKSA_EVENT_FIXED_LEN_V1 OFFSETOF(wl_pmksa_event_t, xtlvs)
+
+#define FILS_CACHE_ID_LEN 2u
+#define PMK_LEN_MAX 48u
+
+typedef struct _pmkid_v1 {
+ struct ether_addr BSSID;
+ uint8 PMKID[WPA2_PMKID_LEN];
+} pmkid_v1_t;
+
+#define PMKID_ELEM_V2_LENGTH (sizeof(struct ether_addr) + WPA2_PMKID_LEN + PMK_LEN_MAX + \
+ sizeof(ssid_info_t) + FILS_CACHE_ID_LEN)
+
+typedef struct _pmkid_v2 {
+ uint16 length; /* Should match PMKID_ELEM_VX_LENGTH */
+ struct ether_addr BSSID;
+ uint8 PMKID[WPA2_PMKID_LEN];
+ uint8 pmk[PMK_LEN_MAX]; /* for FILS key deriviation */
+ uint16 pmk_len;
+ ssid_info_t ssid;
+ uint8 fils_cache_id[FILS_CACHE_ID_LEN];
+} pmkid_v2_t;
+
+#define PMKID_LIST_VER_2 2
+
+typedef struct _pmkid_v3 {
+ struct ether_addr bssid;
+ uint8 pmkid[WPA2_PMKID_LEN];
+ uint8 pmkid_len;
+ uint8 pmk[PMK_LEN_MAX];
+ uint8 pmk_len;
+ uint16 fils_cache_id; /* 2-byte length */
+ uint8 akm;
+ uint8 ssid_len;
+ uint8 ssid[DOT11_MAX_SSID_LEN]; /* For FILS, to save ESSID */
+ /* one pmkid used in whole ESS */
+ uint32 time_left; /* remaining time until expirary in sec. */
+ /* 0 means expired, all 0xFF means never expire */
+} pmkid_v3_t;
+
+#define PMKID_LIST_VER_3 3
+typedef struct _pmkid_list_v1 {
+ uint32 npmkid;
+ pmkid_v1_t pmkid[1];
+} pmkid_list_v1_t;
+
+typedef struct _pmkid_list_v2 {
+ uint16 version;
+ uint16 length;
+ pmkid_v2_t pmkid[1];
+} pmkid_list_v2_t;
+
+#define PMKDB_SET_IOVAR 1u
+#define PMKDB_GET_IOVAR 2u
+#define PMKDB_CLEAR_IOVAR 4u
+
+typedef struct _pmkid_list_v3 {
+ uint16 version;
+ uint16 length;
+ uint16 count;
+ uint16 flag;
+ pmkid_v3_t pmkid[];
+} pmkid_list_v3_t;
+
+#ifndef PMKID_VERSION_ENABLED
+/* pmkid structure before versioning. legacy. DONOT update anymore here */
+typedef pmkid_v1_t pmkid_t;
+typedef pmkid_list_v1_t pmkid_list_t;
+#endif /* PMKID_VERSION_ENABLED */
+
+typedef struct _pmkid_cand {
+ struct ether_addr BSSID;
+ uint8 preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+ uint32 npmkid_cand;
+ pmkid_cand_t pmkid_cand[1];
+} pmkid_cand_list_t;
+
+#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */
+
+typedef struct wl_assoc_info {
+ uint32 req_len;
+ uint32 resp_len;
+ uint32 flags;
+ struct dot11_assoc_req req;
+ struct ether_addr reassoc_bssid; /**< used in reassoc's */
+ struct dot11_assoc_resp resp;
+ uint32 state;
+} wl_assoc_info_t;
+
+/** srom read/write struct passed through ioctl */
+typedef struct {
+ uint32 byteoff; /**< byte offset */
+ uint32 nbytes; /**< number of bytes */
+ uint16 buf[];
+} srom_rw_t;
+
+#define CISH_FLAG_PCIECIS (1 << 15) /**< write CIS format bit for PCIe CIS */
+
+/** similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+ uint16 source; /**< cis source */
+ uint16 flags; /**< flags */
+ uint32 byteoff; /**< byte offset */
+ uint32 nbytes; /**< number of bytes */
+ /* data follows here */
+} cis_rw_t;
+
+/** R_REG and W_REG struct passed through ioctl */
+typedef struct {
+ uint32 byteoff; /**< byte offset of the field in d11regs_t */
+ uint32 val; /**< read/write value of the field */
+ uint32 size; /**< sizeof the field */
+ uint32 band; /**< band (optional) */
+} rw_reg_t;
+
+/**
+ * Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band
+ * PCL - Power Control Loop
+ */
+typedef struct {
+ uint16 auto_ctrl; /**< WL_ATTEN_XX */
+ uint16 bb; /**< Baseband attenuation */
+ uint16 radio; /**< Radio attenuation */
+ uint16 txctl1; /**< Radio TX_CTL1 value */
+} atten_t;
+
+/** Per-AC retry parameters */
+struct wme_tx_params_s {
+ uint8 short_retry;
+ uint8 short_fallback;
+ uint8 long_retry;
+ uint8 long_fallback;
+ uint16 max_rate; /**< In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+/**Used to get specific link/ac parameters */
+typedef struct {
+ int32 ac;
+ uint8 val;
+ struct ether_addr ea;
+ uint8 PAD;
+} link_val_t;
+
+#define WL_PM_MUTE_TX_VER 1
+
+typedef struct wl_pm_mute_tx {
+ uint16 version; /**< version */
+ uint16 len; /**< length */
+ uint16 deadline; /**< deadline timer (in milliseconds) */
+ uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */
+ uint8 PAD;
+} wl_pm_mute_tx_t;
+
+/*
+ * Pay attention to version if structure changes.
+ */
+
+/* sta_info_t version 4 */
+typedef struct {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint16 cap; /**< sta's advertised capabilities */
+ uint16 PAD;
+ uint32 flags; /**< flags defined below */
+ uint32 idle; /**< time since data pkt rx'd from sta */
+ struct ether_addr ea; /**< Station address */
+ uint16 PAD;
+ wl_rateset_t rateset; /**< rateset in use */
+ uint32 in; /**< seconds elapsed since associated */
+ uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */
+ uint32 tx_pkts; /**< # of user packets transmitted (unicast) */
+ uint32 tx_failures; /**< # of user packets failed */
+ uint32 rx_ucast_pkts; /**< # of unicast packets received */
+ uint32 rx_mcast_pkts; /**< # of multicast packets received */
+ uint32 tx_rate; /**< Rate used by last tx frame */
+ uint32 rx_rate; /**< Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */
+ uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */
+ uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */
+ uint32 tx_mcast_pkts; /**< # of mcast pkts txed */
+ uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */
+ uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */
+ uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */
+ uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */
+ int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
+ * of data frames
+ */
+ int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */
+ uint16 aid; /**< association ID */
+ uint16 ht_capabilities; /**< advertised ht caps */
+ uint16 vht_flags; /**< converted vht flags */
+ uint16 PAD;
+ uint32 tx_pkts_retried; /**< # of frames where a retry was
+ * necessary
+ */
+ uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry
+ * was exhausted
+ */
+ int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
+ * received data frame.
+ */
+ /* TX WLAN retry/failure statistics:
+ * Separated for host requested frames and WLAN locally generated frames.
+ * Include unicast frame only where the retries/failures can be counted.
+ */
+ uint32 tx_pkts_total; /**< # user frames sent successfully */
+ uint32 tx_pkts_retries; /**< # user frames retries */
+ uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry
+ * was exhausted
+ */
+ uint32 rx_pkts_retried; /**< # rx with retry bit set */
+ uint32 tx_rate_fallback; /**< lowest fallback TX rate */
+ /* Fields above this line are common to sta_info_t versions 4 and 5 */
+
+ uint32 rx_dur_total; /* total user RX duration (estimated) */
+
+ chanspec_t chanspec; /** chanspec this sta is on */
+ uint16 PAD;
+ wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */
+ uint32 PAD;
+} sta_info_v4_t;
+
+/* Note: Version 4 is the latest version of sta_info_t. Version 5 is abandoned.
+ * Please add new fields to version 4, not version 5.
+ */
+/* sta_info_t version 5 */
+typedef struct {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint16 cap; /**< sta's advertised capabilities */
+ uint16 PAD;
+ uint32 flags; /**< flags defined below */
+ uint32 idle; /**< time since data pkt rx'd from sta */
+ struct ether_addr ea; /**< Station address */
+ uint16 PAD;
+ wl_rateset_t rateset; /**< rateset in use */
+ uint32 in; /**< seconds elapsed since associated */
+ uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */
+ uint32 tx_pkts; /**< # of user packets transmitted (unicast) */
+ uint32 tx_failures; /**< # of user packets failed */
+ uint32 rx_ucast_pkts; /**< # of unicast packets received */
+ uint32 rx_mcast_pkts; /**< # of multicast packets received */
+ uint32 tx_rate; /**< Rate used by last tx frame */
+ uint32 rx_rate; /**< Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */
+ uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */
+ uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */
+ uint32 tx_mcast_pkts; /**< # of mcast pkts txed */
+ uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */
+ uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */
+ uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */
+ uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */
+ int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
+ * of data frames
+ */
+ int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */
+ uint16 aid; /**< association ID */
+ uint16 ht_capabilities; /**< advertised ht caps */
+ uint16 vht_flags; /**< converted vht flags */
+ uint16 PAD;
+ uint32 tx_pkts_retried; /**< # of frames where a retry was
+ * necessary
+ */
+ uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry
+ * was exhausted
+ */
+ int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
+ * received data frame.
+ */
+ /* TX WLAN retry/failure statistics:
+ * Separated for host requested frames and WLAN locally generated frames.
+ * Include unicast frame only where the retries/failures can be counted.
+ */
+ uint32 tx_pkts_total; /**< # user frames sent successfully */
+ uint32 tx_pkts_retries; /**< # user frames retries */
+ uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry
+ * was exhausted
+ */
+ uint32 rx_pkts_retried; /**< # rx with retry bit set */
+ uint32 tx_rate_fallback; /**< lowest fallback TX rate */
+ /* Fields above this line are common to sta_info_t versions 4 and 5 */
+
+ chanspec_t chanspec; /** chanspec this sta is on */
+ uint16 PAD;
+ wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */
+} sta_info_v5_t;
+
+/*
+ * Pay attention to version if structure changes.
+ */
+
+/* sta_info_t version 6
+ changes to wl_rateset_args_t is leading to update this struct version as well.
+ */
+typedef struct sta_info_v6 {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint16 cap; /**< sta's advertised capabilities */
+ uint16 PAD;
+ uint32 flags; /**< flags defined below */
+ uint32 idle; /**< time since data pkt rx'd from sta */
+ struct ether_addr ea; /**< Station address */
+ uint16 PAD;
+ wl_rateset_t rateset; /**< rateset in use */
+ uint32 in; /**< seconds elapsed since associated */
+ uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */
+ uint32 tx_pkts; /**< # of user packets transmitted (unicast) */
+ uint32 tx_failures; /**< # of user packets failed */
+ uint32 rx_ucast_pkts; /**< # of unicast packets received */
+ uint32 rx_mcast_pkts; /**< # of multicast packets received */
+ uint32 tx_rate; /**< Rate used by last tx frame */
+ uint32 rx_rate; /**< Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */
+ uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */
+ uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */
+ uint32 tx_mcast_pkts; /**< # of mcast pkts txed */
+ uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */
+ uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */
+ uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */
+ uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */
+ int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
+ * of data frames
+ */
+ int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */
+ uint16 aid; /**< association ID */
+ uint16 ht_capabilities; /**< advertised ht caps */
+ uint16 vht_flags; /**< converted vht flags */
+ uint16 PAD;
+ uint32 tx_pkts_retried; /**< # of frames where a retry was
+ * necessary
+ */
+ uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry
+ * was exhausted
+ */
+ int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
+ * received data frame.
+ */
+ /* TX WLAN retry/failure statistics:
+ * Separated for host requested frames and WLAN locally generated frames.
+ * Include unicast frame only where the retries/failures can be counted.
+ */
+ uint32 tx_pkts_total; /**< # user frames sent successfully */
+ uint32 tx_pkts_retries; /**< # user frames retries */
+ uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry
+ * was exhausted
+ */
+ uint32 rx_pkts_retried; /**< # rx with retry bit set */
+ uint32 tx_rate_fallback; /**< lowest fallback TX rate */
+ /* Fields above this line are common to sta_info_t versions 4 and 5 */
+
+ uint32 rx_dur_total; /* total user RX duration (estimated) */
+
+ chanspec_t chanspec; /** chanspec this sta is on */
+ uint16 PAD;
+ wl_rateset_args_v2_t rateset_adv; /* rateset along with mcs index bitmap */
+} sta_info_v6_t;
+
+/*
+ * Pay attention to version if structure changes.
+ */
+
+/* sta_info_t version 7
+ changes to wl_rateset_args_t is leading to update this struct version as well.
+ */
+typedef struct sta_info_v7 {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint16 cap; /**< sta's advertised capabilities */
+ uint16 PAD;
+ uint32 flags; /**< flags defined below */
+ uint32 idle; /**< time since data pkt rx'd from sta */
+ struct ether_addr ea; /**< Station address */
+ uint16 PAD;
+ wl_rateset_t rateset; /**< rateset in use */
+ uint32 in; /**< seconds elapsed since associated */
+ uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */
+ uint32 tx_pkts; /**< # of user packets transmitted (unicast) */
+ uint32 tx_failures; /**< # of user packets failed */
+ uint32 rx_ucast_pkts; /**< # of unicast packets received */
+ uint32 rx_mcast_pkts; /**< # of multicast packets received */
+ uint32 tx_rate; /**< Rate used by last tx frame */
+ uint32 rx_rate; /**< Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */
+ uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */
+ uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */
+ uint32 tx_mcast_pkts; /**< # of mcast pkts txed */
+ uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */
+ uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */
+ uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */
+ uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */
+ int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
+ * of data frames
+ */
+ int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */
+ uint16 aid; /**< association ID */
+ uint16 ht_capabilities; /**< advertised ht caps */
+ uint16 vht_flags; /**< converted vht flags */
+ uint16 PAD;
+ uint32 tx_pkts_retried; /**< # of frames where a retry was
+ * necessary
+ */
+ uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry
+ * was exhausted
+ */
+ int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
+ * received data frame.
+ */
+ /* TX WLAN retry/failure statistics:
+ * Separated for host requested frames and WLAN locally generated frames.
+ * Include unicast frame only where the retries/failures can be counted.
+ */
+ uint32 tx_pkts_total; /**< # user frames sent successfully */
+ uint32 tx_pkts_retries; /**< # user frames retries */
+ uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry
+ * was exhausted
+ */
+ uint32 rx_pkts_retried; /**< # rx with retry bit set */
+ uint32 tx_rate_fallback; /**< lowest fallback TX rate */
+ /* Fields above this line are common to sta_info_t versions 4 and 5 */
+
+ uint32 rx_dur_total; /* total user RX duration (estimated) */
+
+ chanspec_t chanspec; /** chanspec this sta is on */
+ uint16 PAD;
+ wl_rateset_args_v3_t rateset_adv; /* rateset along with mcs index bitmap */
+} sta_info_v7_t;
+
+/* define to help support one version older sta_info_t from user level
+ * applications.
+ */
+#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts)
+
+#define WL_STA_VER_4 4
+#define WL_STA_VER_5 5
+/* FIXME: the user/branch should make the selection! */
+#define WL_STA_VER WL_STA_VER_4
+
+#define SWDIV_STATS_VERSION_2 2
+#define SWDIV_STATS_CURRENT_VERSION SWDIV_STATS_VERSION_2
+
+struct wlc_swdiv_stats_v1 {
+ uint32 auto_en;
+ uint32 active_ant;
+ uint32 rxcount;
+ int32 avg_snr_per_ant0;
+ int32 avg_snr_per_ant1;
+ int32 avg_snr_per_ant2;
+ uint32 swap_ge_rxcount0;
+ uint32 swap_ge_rxcount1;
+ uint32 swap_ge_snrthresh0;
+ uint32 swap_ge_snrthresh1;
+ uint32 swap_txfail0;
+ uint32 swap_txfail1;
+ uint32 swap_timer0;
+ uint32 swap_timer1;
+ uint32 swap_alivecheck0;
+ uint32 swap_alivecheck1;
+ uint32 rxcount_per_ant0;
+ uint32 rxcount_per_ant1;
+ uint32 acc_rxcount;
+ uint32 acc_rxcount_per_ant0;
+ uint32 acc_rxcount_per_ant1;
+ uint32 tx_auto_en;
+ uint32 tx_active_ant;
+ uint32 rx_policy;
+ uint32 tx_policy;
+ uint32 cell_policy;
+ uint32 swap_snrdrop0;
+ uint32 swap_snrdrop1;
+ uint32 mws_antsel_ovr_tx;
+ uint32 mws_antsel_ovr_rx;
+ uint8 swap_trig_event_id;
+};
+
+struct wlc_swdiv_stats_v2 {
+ uint16 version; /* version of the structure
+ * as defined by SWDIV_STATS_CURRENT_VERSION
+ */
+ uint16 length; /* length of the entire structure */
+ uint32 auto_en;
+ uint32 active_ant;
+ uint32 rxcount;
+ int32 avg_snr_per_ant0;
+ int32 avg_snr_per_ant1;
+ int32 avg_snr_per_ant2;
+ uint32 swap_ge_rxcount0;
+ uint32 swap_ge_rxcount1;
+ uint32 swap_ge_snrthresh0;
+ uint32 swap_ge_snrthresh1;
+ uint32 swap_txfail0;
+ uint32 swap_txfail1;
+ uint32 swap_timer0;
+ uint32 swap_timer1;
+ uint32 swap_alivecheck0;
+ uint32 swap_alivecheck1;
+ uint32 rxcount_per_ant0;
+ uint32 rxcount_per_ant1;
+ uint32 acc_rxcount;
+ uint32 acc_rxcount_per_ant0;
+ uint32 acc_rxcount_per_ant1;
+ uint32 tx_auto_en;
+ uint32 tx_active_ant;
+ uint32 rx_policy;
+ uint32 tx_policy;
+ uint32 cell_policy;
+ uint32 swap_snrdrop0;
+ uint32 swap_snrdrop1;
+ uint32 mws_antsel_ovr_tx;
+ uint32 mws_antsel_ovr_rx;
+ uint32 swap_trig_event_id;
+};
+
+#define WLC_NUMRATES 16 /**< max # of rates in a rateset */
+
+/**Used to get specific STA parameters */
+typedef struct {
+ uint32 val;
+ struct ether_addr ea;
+ uint16 PAD;
+} scb_val_t;
+
+/**Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+ uint32 code;
+ scb_val_t ioctl_args;
+} authops_t;
+
+/** channel encoding */
+typedef struct channel_info {
+ int32 hw_channel;
+ int32 target_channel;
+ int32 scan_channel;
+} channel_info_t;
+
+/** For ioctls that take a list of MAC addresses */
+typedef struct maclist {
+ uint32 count; /**< number of MAC addresses */
+ struct ether_addr ea[1]; /**< variable length array of MAC addresses */
+} maclist_t;
+
+typedef struct wds_client_info {
+ char ifname[INTF_NAME_SIZ]; /* WDS ifname */
+ struct ether_addr ea; /* WDS client MAC address */
+} wds_client_info_t;
+
+#define WDS_MACLIST_MAGIC 0xFFFFFFFF
+#define WDS_MACLIST_VERSION 1
+
+/* For wds MAC list ioctls */
+typedef struct wds_maclist {
+ uint32 count; /* Number of WDS clients */
+ uint32 magic; /* Magic number */
+ uint32 version; /* Version number */
+ struct wds_client_info client_list[1]; /* Variable length array of WDS clients */
+} wds_maclist_t;
+
+/**get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+ uint32 rx_good_pkt;
+ uint32 rx_bad_pkt;
+ uint32 tx_good_pkt;
+ uint32 tx_bad_pkt;
+ uint32 rx_ocast_good_pkt; /**< unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN 0
+#define LQ_IDX_MAX 1
+#define LQ_IDX_AVG 2
+#define LQ_IDX_SUM 2
+#define LQ_IDX_LAST 3
+#define LQ_STOP_MONITOR 0
+#define LQ_START_MONITOR 1
+
+/** Get averages RSSI, Rx PHY rate and SNR values */
+/* Link Quality */
+typedef struct {
+ int32 rssi[LQ_IDX_LAST]; /**< Array to keep min, max, avg rssi */
+ int32 snr[LQ_IDX_LAST]; /**< Array to keep min, max, avg snr */
+ int32 isvalid; /**< Flag indicating whether above data is valid */
+} wl_lq_t;
+
+typedef enum wl_wakeup_reason_type {
+ LCD_ON = 1,
+ LCD_OFF,
+ DRC1_WAKE,
+ DRC2_WAKE,
+ REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+ /** Unique filter id */
+ uint32 id;
+ /** stores the reason for the last wake up */
+ uint8 reason;
+ uint8 PAD[3];
+} wl_wr_t;
+
+/** Get MAC specific rate histogram command */
+typedef struct {
+ struct ether_addr ea; /**< MAC Address */
+ uint8 ac_cat; /**< Access Category */
+ uint8 num_pkts; /**< Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t;
+/** Get MAC rate histogram response */
+/* deprecated after JAGUAR branch */
+typedef struct {
+ uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */
+ uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */
+ uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */
+ uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */
+ uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /** MCS counts */
+} wl_mac_ratehisto_res_t;
+
+/* sta_info ecounters */
+typedef struct {
+ struct ether_addr ea; /* Station MAC addr */
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint32 tx_pkts_fw_total; /* # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /* # FW generated which
+ * failed after retry
+ */
+} sta_info_ecounters_t;
+
+#define STAMON_MODULE_VER 1
+
+/**Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+ uint32 cmd; /**< common ioctl definition */
+ void *buf; /**< pointer to user buffer */
+ uint32 len; /**< length of user buffer */
+ uint8 set; /**< 1=set IOCTL; 0=query IOCTL */
+ uint32 used; /**< bytes read or written (optional) */
+ uint32 needed; /**< bytes needed (optional) */
+} wl_ioctl_t;
+
+#ifdef CONFIG_COMPAT
+typedef struct compat_wl_ioctl {
+ uint32 cmd; /**< common ioctl definition */
+ uint32 buf; /**< pointer to user buffer */
+ uint32 len; /**< length of user buffer */
+ uint8 set; /**< 1=set IOCTL; 0=query IOCTL */
+ uint32 used; /**< bytes read or written (optional) */
+ uint32 needed; /**< bytes needed (optional) */
+} compat_wl_ioctl_t;
+#endif /* CONFIG_COMPAT */
+
+#define WL_NUM_RATES_CCK 4 /**< 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM 8 /**< 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM 8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT 2 /**< Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT 10
+#define WL_NUM_RATES_VHT_ALL (WL_NUM_RATES_VHT + WL_NUM_RATES_EXTRA_VHT)
+#define WL_NUM_RATES_HE 12
+#define WL_NUM_RATES_EHT 14
+#define WL_NUM_RATES_MCS32 1
+#define UC_PATH_LEN 128u /**< uCode path length */
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+ uint32 vendorid; /**< PCI vendor id */
+ uint32 deviceid; /**< device id of chip */
+ uint32 radiorev; /**< radio revision */
+ uint32 chiprev; /**< chip revision */
+ uint32 corerev; /**< core revision */
+ uint32 boardid; /**< board identifier (usu. PCI sub-device id) */
+ uint32 boardvendor; /**< board vendor (usu. PCI sub-vendor id) */
+ uint32 boardrev; /**< board revision */
+ uint32 driverrev; /**< driver version */
+ uint32 ucoderev; /**< uCode version */
+ uint32 bus; /**< bus type */
+ uint32 chipnum; /**< chip number */
+ uint32 phytype; /**< phy type */
+ uint32 phyrev; /**< phy revision */
+ uint32 anarev; /**< anacore rev */
+ uint32 chippkg; /**< chip package info */
+ uint32 nvramrev; /**< nvram revision number */
+ uint32 phyminorrev; /**< phy minor rev */
+ uint32 coreminorrev; /**< core minor rev */
+ uint32 drvrev_major; /**< driver version: major */
+ uint32 drvrev_minor; /**< driver version: minor */
+ uint32 drvrev_rc; /**< driver version: rc */
+ uint32 drvrev_rc_inc; /**< driver version: rc incremental */
+ uint16 ucodeprebuilt; /**< uCode prebuilt flag */
+ uint16 ucodediffct; /**< uCode diff count */
+ uchar ucodeurl[128u]; /* obsolete, kept for ROM compatiblity */
+ uchar ucodepath[UC_PATH_LEN]; /**< uCode URL or path */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH 48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+ uint32 instance;
+ int8 brand[WL_BRAND_MAX];
+ int8 PAD[4-(WL_BRAND_MAX%4)];
+} wl_instance_info_t;
+
+/** structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+ uint16 magic;
+ uint16 fifo;
+ uint16 size;
+} wl_txfifo_sz_t;
+
+/* Transfer info about an IOVar from the driver */
+/**Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN (32 + 1)
+
+typedef struct wlc_iov_trx_s {
+ uint8 module;
+ uint8 type;
+ char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/** bump this number if you change the ioctl interface */
+#define WLC_IOCTL_VERSION 2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1
+/* ifdef EXT_STA */
+typedef struct _wl_assoc_result {
+ ulong associated;
+ ulong NDIS_auth;
+ ulong NDIS_infra;
+} wl_assoc_result_t;
+/* EXT_STA */
+
+#define WL_PHY_PAVARS_LEN 64 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */
+
+#define WL_PHY_PAVAR_VER 1 /**< pavars version */
+#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */
+typedef struct wl_pavars2 {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< len of this structure */
+ uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */
+ uint16 phy_type; /**< phy type */
+ uint16 bandrange;
+ uint16 chain;
+ uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */
+} wl_pavars2_t;
+
+typedef struct wl_po {
+ uint16 phy_type; /**< Phy type */
+ uint16 band;
+ uint16 cckpo;
+ uint16 PAD;
+ uint32 ofdmpo;
+ uint16 mcspo[8];
+} wl_po_t;
+
+#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */
+
+typedef struct wl_rpcal {
+ uint16 value;
+ uint16 update;
+} wl_rpcal_t;
+
+#define WL_NUM_RPCALPHASEVARS 5 /* number of rpcal phase vars */
+
+typedef struct wl_rpcal_phase {
+ uint16 value;
+ uint16 update;
+} wl_rpcal_phase_t;
+
+typedef struct wl_aci_args {
+ int32 enter_aci_thresh; /* Trigger level to start detecting ACI */
+ int32 exit_aci_thresh; /* Trigger level to exit ACI mode */
+ int32 usec_spin; /* microsecs to delay between rssi samples */
+ int32 glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+ uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */
+ uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */
+ uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */
+ uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */
+ uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */
+ uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */
+ uint16 nphy_noise_noassoc_glitch_th_up; /**< wl interference 4 */
+ uint16 nphy_noise_noassoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_glitch_th_up;
+ uint16 nphy_noise_assoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_aci_glitch_th_up;
+ uint16 nphy_noise_assoc_aci_glitch_th_dn;
+ uint16 nphy_noise_assoc_enter_th;
+ uint16 nphy_noise_noassoc_enter_th;
+ uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+ uint16 nphy_noise_noassoc_crsidx_incr;
+ uint16 nphy_noise_assoc_crsidx_incr;
+ uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */
+
+#define WL_MACFIFO_PLAY_ARGS_T_VERSION 1u /* version of wl_macfifo_play_args_t struct */
+
+enum wl_macfifo_play_flags {
+ WL_MACFIFO_PLAY_STOP = 0x00u, /* stop playing samples */
+ WL_MACFIFO_PLAY_START = 0x01u, /* start playing samples */
+ WL_MACFIFO_PLAY_LOAD = 0x02u, /* for set: load samples
+ for get: samples are loaded
+ */
+ WL_MACFIFO_PLAY_GET_MAX_SIZE = 0x10u, /* get the macfifo buffer size */
+ WL_MACFIFO_PLAY_GET_STATUS = 0x20u, /* get macfifo play status */
+};
+
+typedef struct wl_macfifo_play_args {
+ uint16 version; /* structure version */
+ uint16 len; /* size of structure */
+ uint16 flags;
+ uint8 PAD[2];
+ uint32 data_len; /* data length */
+} wl_macfifo_play_args_t;
+
+#define WL_MACFIFO_PLAY_DATA_T_VERSION 1u /* version of wl_macfifo_play_data_t struct */
+
+typedef struct wl_macfifo_play_data {
+ uint16 version; /* structure version */
+ uint16 len; /* size of structure */
+ uint32 data_len; /* data length */
+} wl_macfifo_play_data_t;
+
+#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+ /* version 0 fields */
+ uint8 coll_us;
+ uint8 PAD[3];
+ int32 cores;
+ /* add'l version 1 fields */
+ uint16 version; /**< see definition of WL_SAMPLECOLLECT_T_VERSION */
+ uint16 length; /**< length of entire structure */
+ int8 trigger;
+ uint8 PAD;
+ uint16 timeout;
+ uint16 mode;
+ uint16 PAD;
+ uint32 pre_dur;
+ uint32 post_dur;
+ uint8 gpio_sel;
+ uint8 downsamp;
+ uint8 be_deaf;
+ uint8 agc; /**< loop from init gain and going down */
+ uint8 filter; /**< override high pass corners to lowest */
+ /* add'l version 2 fields */
+ uint8 trigger_state;
+ uint8 module_sel1;
+ uint8 module_sel2;
+ uint16 nsamps;
+ uint16 PAD;
+ int32 bitStart;
+ uint32 gpioCapMask;
+ uint8 gpio_collection;
+ uint8 PAD[3];
+} wl_samplecollect_args_t;
+
+#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+ uint16 version; /**< structure version */
+ uint16 size; /**< size of structure */
+ uint16 tag; /**< Header/Data */
+ uint16 length; /**< data length */
+ uint32 flag; /**< bit def */
+} wl_sampledata_t;
+
+/* WL_OTA START */
+/* OTA Test Status */
+enum {
+ WL_OTA_TEST_IDLE = 0, /**< Default Idle state */
+ WL_OTA_TEST_ACTIVE = 1, /**< Test Running */
+ WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */
+ WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */
+};
+
+/* OTA SYNC Status */
+enum {
+ WL_OTA_SYNC_IDLE = 0, /**< Idle state */
+ WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */
+ WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */
+};
+
+/* Various error states dut can get stuck during test */
+enum {
+ WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */
+ WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */
+ WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */
+ WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */
+ WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */
+ WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */
+};
+
+/* Differentiator for ota_tx and ota_rx */
+enum {
+ WL_OTA_TEST_TX = 0, /**< ota_tx */
+ WL_OTA_TEST_RX = 1, /**< ota_rx */
+};
+
+/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
+enum {
+ WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */
+ WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */
+ WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */
+ WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */
+};
+#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */
+#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */
+#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */
+#define OTA_STF_SISO 0
+#define OTA_STF_CDD 1
+#define OTA_STF_STBC 2
+#define OTA_STF_SDM 3
+
+typedef struct ota_rate_info {
+ uint8 rate_cnt; /**< Total number of rates */
+ uint8 PAD;
+ uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */
+ /**< for legacy rates : ratein mbps * 2 */
+ /**< for HT rates : mcs index */
+} ota_rate_info_t;
+
+typedef struct ota_power_info {
+ int8 pwr_ctrl_on; /**< power control on/off */
+ int8 start_pwr; /**< starting power/index */
+ int8 delta_pwr; /**< delta power/index */
+ int8 end_pwr; /**< end power/index */
+} ota_power_info_t;
+
+typedef struct ota_packetengine {
+ uint16 delay; /**< Inter-packet delay */
+ /**< for ota_tx, delay is tx ifs in micro seconds */
+ /* for ota_rx, delay is wait time in milliseconds */
+ uint16 nframes; /**< Number of frames */
+ uint16 length; /**< Packet length */
+} ota_packetengine_t;
+
+/*
+ * OTA txant/rxant parameter
+ * bit7-4: 4 bits swdiv_tx/rx_policy bitmask, specify antenna-policy for SW diversity
+ * bit3-0: 4 bits TxCore bitmask, specify cores used for transmit frames
+ * (maximum spatial expansion)
+ */
+#define WL_OTA_TEST_ANT_MASK 0xF0
+#define WL_OTA_TEST_CORE_MASK 0x0F
+
+/* OTA txant/rxant 'ant_mask' field; map to Tx/Rx antenna policy for SW diversity */
+enum {
+ WL_OTA_TEST_FORCE_ANT0 = 0x10, /* force antenna to Ant 0 */
+ WL_OTA_TEST_FORCE_ANT1 = 0x20, /* force antenna to Ant 1 */
+};
+
+/* antenna/core fields access */
+#define WL_OTA_TEST_GET_ANT(_txant) ((_txant) & WL_OTA_TEST_ANT_MASK)
+#define WL_OTA_TEST_GET_CORE(_txant) ((_txant) & WL_OTA_TEST_CORE_MASK)
+
+/** Test info vector */
+typedef struct wl_ota_test_args {
+ uint8 cur_test; /**< test phase */
+ uint8 chan; /**< channel */
+ uint8 bw; /**< bandwidth */
+ uint8 control_band; /**< control band */
+ uint8 stf_mode; /**< stf mode */
+ uint8 PAD;
+ ota_rate_info_t rt_info; /**< Rate info */
+ ota_packetengine_t pkteng; /**< packeteng info */
+ uint8 txant; /**< tx antenna */
+ uint8 rxant; /**< rx antenna */
+ ota_power_info_t pwr_info; /**< power sweep info */
+ uint8 wait_for_sync; /**< wait for sync or not */
+ uint8 ldpc;
+ uint8 sgi;
+ uint8 PAD;
+ /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
+} wl_ota_test_args_t;
+
+#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */
+typedef struct wl_ota_test_vector {
+ uint16 version;
+ wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */
+ uint16 test_cnt; /**< Total no of test */
+ uint8 file_dwnld_valid; /**< File successfully downloaded */
+ uint8 sync_timeout; /**< sync packet timeout */
+ int8 sync_fail_action; /**< sync fail action */
+ struct ether_addr sync_mac; /**< macaddress for sync pkt */
+ struct ether_addr tx_mac; /**< macaddress for tx */
+ struct ether_addr rx_mac; /**< macaddress for rx */
+ int8 loop_test; /**< dbg feature to loop the test */
+ uint16 test_rxcnt;
+ /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
+} wl_ota_test_vector_t;
+
+/** struct copied back form dongle to host to query the status */
+typedef struct wl_ota_test_status {
+ int16 cur_test_cnt; /**< test phase */
+ int8 skip_test_reason; /**< skip test reasoin */
+ uint8 PAD;
+ wl_ota_test_args_t test_arg; /**< cur test arg details */
+ uint16 test_cnt; /**< total no of test downloaded */
+ uint8 file_dwnld_valid; /**< file successfully downloaded ? */
+ uint8 sync_timeout; /**< sync timeout */
+ int8 sync_fail_action; /**< sync fail action */
+ struct ether_addr sync_mac; /**< macaddress for sync pkt */
+ struct ether_addr tx_mac; /**< tx mac address */
+ struct ether_addr rx_mac; /**< rx mac address */
+ uint8 test_stage; /**< check the test status */
+ int8 loop_test; /**< Debug feature to puts test enfine in a loop */
+ uint8 sync_status; /**< sync status */
+} wl_ota_test_status_t;
+
+/* FOR ioctl that take the sta monitor information */
+typedef struct stamon_data {
+ struct ether_addr ea;
+ uint8 PAD[2];
+ int32 rssi;
+} stamon_data_t;
+
+typedef struct stamon_info {
+ int32 version;
+ uint32 count;
+ stamon_data_t sta_data[1];
+} stamon_info_t;
+
+typedef struct wl_ota_rx_rssi {
+ uint16 pktcnt; /* Pkt count used for this rx test */
+ chanspec_t chanspec; /* Channel info on which the packets are received */
+ int16 rssi; /* Average RSSI of the first 50% packets received */
+} wl_ota_rx_rssi_t;
+
+#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */
+#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi)
+
+typedef struct wl_ota_test_rssi {
+ uint8 version;
+ uint8 testcnt; /* total measured RSSI values, valid on output only */
+ wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */
+} wl_ota_test_rssi_t;
+
+/* WL_OTA END */
+
+/**wl_radar_args_t */
+typedef struct {
+ int32 npulses; /**< required number of pulses at n * t_int */
+ int32 ncontig; /**< required number of pulses at t_int */
+ int32 min_pw; /**< minimum pulse width (20 MHz clocks) */
+ int32 max_pw; /**< maximum pulse width (20 MHz clocks) */
+ uint16 thresh0; /**< Radar detection, thresh 0 */
+ uint16 thresh1; /**< Radar detection, thresh 1 */
+ uint16 blank; /**< Radar detection, blank control */
+ uint16 fmdemodcfg; /**< Radar detection, fmdemod config */
+ int32 npulses_lp; /**< Radar detection, minimum long pulses */
+ int32 min_pw_lp; /**< Minimum pulsewidth for long pulses */
+ int32 max_pw_lp; /**< Maximum pulsewidth for long pulses */
+ int32 min_fm_lp; /**< Minimum fm for long pulses */
+ int32 max_span_lp; /**< Maximum deltat for long pulses */
+ int32 min_deltat; /**< Minimum spacing between pulses */
+ int32 max_deltat; /**< Maximum spacing between pulses */
+ uint16 autocorr; /**< Radar detection, autocorr on or off */
+ uint16 st_level_time; /**< Radar detection, start_timing level */
+ uint16 t2_min; /**< minimum clocks needed to remain in state 2 */
+ uint8 PAD[2];
+ uint32 version; /**< version */
+ uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */
+ int32 npulses_fra; /**< Radar detection, minimum French pulses set */
+ int32 npulses_stg2; /**< Radar detection, minimum staggered-2 pulses set */
+ int32 npulses_stg3; /**< Radar detection, minimum staggered-3 pulses set */
+ uint16 percal_mask; /**< defines which period cal is masked from radar detection */
+ uint8 PAD[2];
+ int32 quant; /**< quantization resolution to pulse positions */
+ uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */
+ uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */
+ int32 nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */
+ int32 max_pw_tol; /* maximum tolerance allowd in detected pulse width for radar detection */
+ uint16 feature_mask; /**< 16-bit mask to specify enabled features */
+ uint16 thresh0_sc; /**< Radar detection, thresh 0 */
+ uint16 thresh1_sc; /**< Radar detection, thresh 1 */
+ uint8 PAD[2];
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+ uint32 version; /**< version */
+ uint16 thresh0_20_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+ uint16 thresh1_20_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+ uint16 thresh0_40_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+ uint16 thresh1_40_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+ uint16 thresh0_80_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+ uint16 thresh1_80_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+ uint16 thresh0_20_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+ uint16 thresh1_20_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+ uint16 thresh0_40_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+ uint16 thresh1_40_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+ uint16 thresh0_80_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+ uint16 thresh1_80_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+ uint16 thresh0_160_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh1_160_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh0_160_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+ uint16 thresh1_160_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+} wl_radar_thr_t;
+
+typedef struct {
+ uint32 version; /* version */
+ uint16 thresh0_sc_20_lo;
+ uint16 thresh1_sc_20_lo;
+ uint16 thresh0_sc_40_lo;
+ uint16 thresh1_sc_40_lo;
+ uint16 thresh0_sc_80_lo;
+ uint16 thresh1_sc_80_lo;
+ uint16 thresh0_sc_20_hi;
+ uint16 thresh1_sc_20_hi;
+ uint16 thresh0_sc_40_hi;
+ uint16 thresh1_sc_40_hi;
+ uint16 thresh0_sc_80_hi;
+ uint16 thresh1_sc_80_hi;
+ uint16 fc_varth_sb;
+ uint16 fc_varth_bin5_sb;
+ uint16 notradar_enb;
+ uint16 max_notradar_lp;
+ uint16 max_notradar;
+ uint16 max_notradar_lp_sc;
+ uint16 max_notradar_sc;
+ uint16 highpow_war_enb;
+ uint16 highpow_sp_ratio; //unit is 0.5
+} wl_radar_thr2_t;
+
+#define WL_RADAR_THR_VERSION 2
+
+typedef struct {
+ uint32 ver;
+ uint32 len;
+ int32 rssi_th[3];
+ uint8 rssi_gain_80[4];
+ uint8 rssi_gain_160[4];
+} wl_dyn_switch_th_t;
+
+#define WL_PHY_DYN_SWITCH_TH_VERSION 1
+
+/** RSSI per antenna */
+typedef struct {
+ uint32 version; /**< version field */
+ uint32 count; /**< number of valid antenna rssi */
+ int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */
+ int8 rssi_sum; /**< summed rssi across all antennas */
+ int8 PAD[3];
+} wl_rssi_ant_t;
+
+/* SNR per antenna */
+typedef struct {
+ uint32 version; /* version field */
+ uint32 count; /* number of valid antenna snr */
+ int8 snr_ant[WL_RSSI_ANT_MAX]; /* snr per antenna */
+} wl_snr_ant_t;
+
+/* Weighted average support */
+#define WL_WA_VER 0 /* Initial version - Basic WA algorithm only */
+
+#define WL_WA_ALGO_BASIC 0 /* Basic weighted average algorithm (all 4 metrics) */
+#define WL_WA_TYPE_RSSI 0
+#define WL_WA_TYPE_SNR 1
+#define WL_WA_TYPE_TXRATE 2
+#define WL_WA_TYPE_RXRATE 3
+#define WL_WA_TYPE_MAX 4
+
+typedef struct { /* payload of subcmd in xtlv */
+ uint8 id;
+ uint8 n_total; /* Total number of samples (n_total >= n_recent) */
+ uint8 n_recent; /* Number of samples denoted as recent */
+ uint8 w_recent; /* Total weight for the recent samples (as percentage) */
+} wl_wa_basic_params_t;
+
+typedef struct {
+ uint16 ver;
+ uint16 len;
+ uint8 subcmd[]; /* sub-cmd in bcm_xtlv_t */
+} wl_wa_cmd_t;
+
+/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+ uint32 state; /**< noted by WL_DFS_CACSTATE_XX. */
+ uint32 duration; /**< time spent in ms in state. */
+ /**
+ * as dfs enters ISM state, it removes the operational channel from quiet channel
+ * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+ */
+ chanspec_t chanspec_cleared;
+ /** chanspec cleared used to be a uint32, add another to uint16 to maintain size */
+ uint16 pad;
+} wl_dfs_status_t;
+
+typedef struct {
+ uint32 state; /* noted by WL_DFS_CACSTATE_XX */
+ uint32 duration; /* time spent in ms in state */
+ chanspec_t chanspec; /* chanspec of this core */
+ chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */
+ uint16 sub_type; /* currently just the index of the core or the respective PLL */
+ uint16 pad;
+} wl_dfs_sub_status_t;
+
+#define WL_DFS_STATUS_ALL_VERSION (1)
+typedef struct {
+ uint16 version; /* version field; current max version 1 */
+ uint16 num_sub_status;
+ wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */
+} wl_dfs_status_all_t;
+
+#define WL_DFS_AP_MOVE_VERSION (1)
+
+struct wl_dfs_ap_move_status_v1 {
+ int16 dfs_status; /* DFS scan status */
+ chanspec_t chanspec; /* New AP Chanspec */
+ wl_dfs_status_t cac_status; /* CAC status */
+};
+
+typedef struct wl_dfs_ap_move_status_v2 {
+ int8 version; /* version field; current max version 1 */
+ int8 move_status; /* DFS move status */
+ chanspec_t chanspec; /* New AP Chanspec */
+ wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */
+} wl_dfs_ap_move_status_v2_t;
+
+#define WL_DFS_AP_MOVE_ABORT -1 /* Abort any dfs_ap_move in progress immediately */
+#define WL_DFS_AP_MOVE_STUNT -2 /* Stunt move but continue background CSA if in progress */
+
+/** data structure used in 'radar_status' wl interface, which is use to query radar det status */
+typedef struct {
+ uint8 detected;
+ uint8 PAD[3];
+ int32 count;
+ uint8 pretended;
+ uint8 PAD[3];
+ uint32 radartype;
+ uint32 timenow;
+ uint32 timefromL;
+ int32 lp_csect_single;
+ int32 detected_pulse_index;
+ int32 nconsecq_pulses;
+ chanspec_t ch;
+ uint8 PAD[2];
+ int32 pw[10];
+ int32 intv[10];
+ int32 fm[10];
+} wl_radar_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+ uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */
+ uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */
+ uint8 txpwr_local_max; /**< local max according to the AP */
+ uint8 txpwr_local_constraint; /**< local constraint according to the AP */
+ uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */
+ uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */
+ uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */
+ uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */
+ uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */
+ int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */
+ uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY 45
+#define WL_TX_POWER_MCS20_FIRST 12
+#define WL_TX_POWER_MCS20_NUM 16
+#define WL_TX_POWER_MCS40_FIRST 28
+#define WL_TX_POWER_MCS40_NUM 17
+
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ chanspec_t local_chanspec; /**< channel on which we are associated */
+ uint8 local_max; /**< local max according to the AP */
+ uint8 local_constraint; /**< local constraint according to the AP */
+ int8 antgain[2]; /**< Ant gain for each band - from SROM */
+ uint8 rf_cores; /**< count of RF Cores being reported */
+ uint8 est_Pout[4]; /**< Latest tx power out estimate per RF
+ * chain without adjustment
+ */
+ uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */
+ uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /**< User limit */
+ uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /**< Regulatory power limit */
+ uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /**< Max power board can support (SROM) */
+ uint8 target[WL_TX_POWER_RATES_LEGACY]; /**< Latest target power */
+ uint8 PAD[2];
+} tx_power_legacy2_t;
+
+#define WL_NUM_2x2_ELEMENTS 4
+#define WL_NUM_3x3_ELEMENTS 6
+#define WL_NUM_4x4_ELEMENTS 10
+
+typedef struct {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint32 flags;
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ chanspec_t local_chanspec; /**< channel on which we are associated */
+ uint32 buflen; /**< ppr buffer length */
+ uint8 pprbuf[1]; /**< Latest target power buffer */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION 1
+#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION 45
+#define TX_POWER_T_VERSION_V2 46
+
+/* curpower ppr types */
+enum {
+ PPRTYPE_TARGETPOWER = 1,
+ PPRTYPE_BOARDLIMITS = 2,
+ PPRTYPE_REGLIMITS = 3,
+ PPRTYPE_RU_REGLIMITS = 4,
+ PPRTYPE_RU_BOARDLIMITS = 5,
+ PPRTYPE_RU_TARGETPOWER = 6,
+ PPRTYPE_DYNAMIC_INFO = 7,
+ PPRTYPE_LAST
+};
+
+/** number of ppr serialization buffers, it should be reg, board and target */
+#define WL_TXPPR_SER_BUF_NUM (PPRTYPE_LAST - 1)
+
+typedef struct chanspec_txpwr_max {
+ chanspec_t chanspec; /**< chanspec */
+ uint8 txpwr_max; /**< max txpwr in all the rates */
+ uint8 padding;
+} chanspec_txpwr_max_t;
+
+typedef struct wl_chanspec_txpwr_max {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */
+ chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */
+} wl_chanspec_txpwr_max_t;
+
+#define WL_CHANSPEC_TXPWR_MAX_VER 1
+#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t))
+
+typedef struct tx_inst_power {
+ uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+#define WL_NUM_TXCHAIN_MAX 4
+typedef struct wl_txchain_pwr_offsets {
+ int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+
+/** maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS 64
+#define WL_NUMCHANNELS_MANY_CHAN 10
+#define WL_ITER_LIMIT_MANY_CHAN 5
+
+#define WL_MIMO_PS_CFG_VERSION_1 1
+
+typedef struct wl_mimops_cfg {
+ uint8 version;
+ /* active_chains: 0 for all, 1 for 1 chain. */
+ uint8 active_chains;
+ /* static (0) or dynamic (1).or disabled (3) Mode applies only when active_chains = 0. */
+ uint8 mode;
+ /* bandwidth = Full (0), 20M (1), 40M (2), 80M (3). */
+ uint8 bandwidth;
+ uint8 applychangesafterlearning;
+ uint8 pad[3];
+} wl_mimops_cfg_t;
+
+/* This event is for tracing MIMO PS metrics snapshot calls.
+ * It is helpful to debug out-of-sync issue between
+ * ucode SHM values and FW snapshot calculation.
+ * It is part of the EVENT_LOG_TAG_MIMO_PS_TRACE.
+ */
+#define WL_MIMO_PS_METRICS_SNAPSHOT_TRACE_TYPE 0
+typedef struct wl_mimo_ps_metrics_snapshot_trace {
+ /* type field for this TLV: */
+ uint16 type;
+ /* length field for this TLV */
+ uint16 len;
+ uint32 idle_slotcnt_mimo; /* MIMO idle slotcnt raw SHM value */
+ uint32 last_idle_slotcnt_mimo; /* stored value snapshot */
+ uint32 idle_slotcnt_siso; /* SISO idle slotcnt raw SHM value */
+ uint32 last_idle_slotcnt_siso; /* stored value snapshot */
+ uint32 rx_time_mimo; /* Rx MIMO raw SHM value */
+ uint32 last_rx_time_mimo; /* stored value snapshot */
+ uint32 rx_time_siso; /* RX SISO raw SHM value */
+ uint32 last_rx_time_siso; /* stored value snapshot */
+ uint32 tx_time_1chain; /* Tx 1-chain raw SHM value */
+ uint32 last_tx_time_1chain; /* stored value snapshot */
+ uint32 tx_time_2chain; /* Tx 2-chain raw SHM value */
+ uint32 last_tx_time_2chain; /* stored value snapshot */
+ uint32 tx_time_3chain; /* Tx 3-chain raw SHM value */
+ uint32 last_tx_time_3chain; /* stored value snapshot */
+ uint16 reason; /* reason for snapshot call, see below */
+ /* Does the call reset last values after delta calculation */
+ uint16 reset_last;
+} wl_mimo_ps_metrics_snapshot_trace_t;
+/* reason codes for mimo ps metrics snapshot function calls */
+#define WL_MIMOPS_METRICS_SNAPSHOT_REPORT 1
+#define WL_MIMOPS_METRICS_SNAPSHOT_RXCHAIN_SET 2
+#define WL_MIMOPS_METRICS_SNAPSHOT_ARBI 3
+#define WL_MIMOPS_METRICS_SNAPSHOT_SLOTUPD 4
+#define WL_MIMOPS_METRICS_SNAPSHOT_PMBCNRX 5
+#define WL_MIMOPS_METRICS_SNAPSHOT_BMACINIT 6
+#define WL_MIMOPS_METRICS_SNAPSHOT_HT_COMPLETE 7
+#define WL_MIMOPS_METRICS_SNAPSHOT_OCL 8
+
+#define WL_MIMO_PS_STATUS_VERSION_2 2
+typedef struct wl_mimo_ps_status {
+ uint8 version;
+ uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */
+ uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */
+ uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */
+ uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */
+ uint8 bss_rxchain; /* bss rxchain bitmask */
+ uint8 bss_txchain; /* bss txchain bitmask */
+ uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+ uint16 hw_state; /* bitmask of hw state. See below for values */
+ uint8 hw_rxchain; /* actual HW rxchain bitmask */
+ uint8 hw_txchain; /* actual HW txchain bitmask */
+ uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+ uint8 pm_bcnrx_state; /* actual state of ucode flag */
+ uint8 basic_rates_present; /* internal flag to trigger siso bcmc rx */
+ uint8 siso_bcmc_rx_state; /* actual state of ucode flag */
+} wl_mimo_ps_status_t;
+
+#define WL_MIMO_PS_STATUS_VERSION_1 1
+typedef struct wl_mimo_ps_status_v1 {
+ uint8 version;
+ uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */
+ uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */
+ uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */
+ uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */
+ uint8 bss_rxchain; /* bss rxchain bitmask */
+ uint8 bss_txchain; /* bss txchain bitmask */
+ uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+ uint16 hw_state; /* bitmask of hw state. See below for values */
+ uint8 hw_rxchain; /* actual HW rxchain bitmask */
+ uint8 hw_txchain; /* actual HW txchain bitmask */
+ uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+ uint8 pad[3];
+} wl_mimo_ps_status_v1_t;
+
+#define WL_MIMO_PS_STATUS_AP_CAP(ap_cap) (ap_cap & 0x0F)
+#define WL_MIMO_PS_STATUS_AP_CAP_BW(ap_cap) (ap_cap >> 4)
+#define WL_MIMO_PS_STATUS_ASSOC_BW_SHIFT 4
+
+/* version 3: assoc status: low nibble is status enum, high other flags */
+#define WL_MIMO_PS_STATUS_VERSION_3 3
+#define WL_MIMO_PS_STATUS_ASSOC_STATUS_MASK 0x0F
+#define WL_MIMO_PS_STATUS_ASSOC_STATUS_VHT_WITHOUT_OMN 0x80
+
+/* mimo_ps_status: ap_cap/association status */
+enum {
+ WL_MIMO_PS_STATUS_ASSOC_NONE = 0,
+ WL_MIMO_PS_STATUS_ASSOC_SISO = 1,
+ WL_MIMO_PS_STATUS_ASSOC_MIMO = 2,
+ WL_MIMO_PS_STATUS_ASSOC_LEGACY = 3
+};
+
+/* mimo_ps_status: mimo_ps_cfg states */
+enum {
+ WL_MIMO_PS_CFG_STATE_NONE = 0,
+ WL_MIMO_PS_CFG_STATE_INFORM_AP_INPROGRESS = 1,
+ WL_MIMO_PS_CFG_STATE_INFORM_AP_DONE = 2,
+ WL_MIMO_PS_CFG_STATE_LEARNING = 3,
+ WL_MIMO_PS_CFG_STATE_HW_CONFIGURE = 4,
+ WL_MIMO_PS_CFG_STATE_INFORM_AP_PENDING = 5
+};
+
+/* mimo_ps_status: hw_state values */
+#define WL_MIMO_PS_STATUS_HW_STATE_NONE 0
+#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0)
+#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1)
+
+#ifdef WLAWDL
+#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2)
+#endif /* WLAWDL */
+
+#define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3)
+#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4)
+#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5)
+#define WL_MIMO_PS_STATUS_HW_STATE_TMPSENSE (0x1 << 6)
+#define WL_MIMO_PS_STATUS_HW_STATE_IOVAR (0x1 << 7)
+#define WL_MIMO_PS_STATUS_HW_STATE_AP_BSS (0x1 << 8)
+
+/* mimo_ps_status: mrc states */
+#define WL_MIMO_PS_STATUS_MRC_NONE 0
+#define WL_MIMO_PS_STATUS_MRC_ACTIVE 1
+
+/* mimo_ps_status: core flag states for single-core beacon and siso-bcmc rx */
+#define WL_MIMO_PS_STATUS_MHF_FLAG_NONE 0
+#define WL_MIMO_PS_STATUS_MHF_FLAG_ACTIVE 1
+#define WL_MIMO_PS_STATUS_MHF_FLAG_COREDOWN 2
+#define WL_MIMO_PS_STATUS_MHF_FLAG_INVALID 3
+
+/* Type values for the REASON */
+#define WL_MIMO_PS_PS_LEARNING_ABORTED (1 << 0)
+#define WL_MIMO_PS_PS_LEARNING_COMPLETED (1 << 1)
+#define WL_MIMO_PS_PS_LEARNING_ONGOING (1 << 2)
+
+typedef struct wl_mimo_ps_learning_event_data {
+ uint32 startTimeStamp;
+ uint32 endTimeStamp;
+ uint16 reason;
+ struct ether_addr BSSID;
+ uint32 totalSISO_below_rssi_threshold;
+ uint32 totalMIMO_below_rssi_threshold;
+ uint32 totalSISO_above_rssi_threshold;
+ uint32 totalMIMO_above_rssi_threshold;
+} wl_mimo_ps_learning_event_data_t;
+
+#define WL_MIMO_PS_PS_LEARNING_CFG_ABORT (1 << 0)
+#define WL_MIMO_PS_PS_LEARNING_CFG_STATUS (1 << 1)
+#define WL_MIMO_PS_PS_LEARNING_CFG_CONFIG (1 << 2)
+#define WL_MIMO_PS_PS_LEARNING_CFG_MASK (0x7)
+
+#define WL_MIMO_PS_PS_LEARNING_CFG_V1 1
+
+typedef struct wl_mimops_learning_cfg {
+ /* flag: bit 0 for abort */
+ /* flag: bit 1 for status */
+ /* flag: bit 2 for configuring no of packets and rssi */
+ uint8 flag;
+ /* mimo ps learning version, compatible version is 0 */
+ uint8 version;
+ /* if version is 0 or rssi is 0, ignored */
+ int8 learning_rssi_threshold;
+ uint8 reserved;
+ uint32 no_of_packets_for_learning;
+ wl_mimo_ps_learning_event_data_t mimops_learning_data;
+} wl_mimops_learning_cfg_t;
+
+#define WL_OCL_STATUS_VERSION 1
+typedef struct ocl_status_info {
+ uint8 version;
+ uint8 len;
+ uint16 fw_status; /* Bits representing FW disable reasons */
+ uint8 hw_status; /* Bits for actual HW config and SISO/MIMO coremask */
+ uint8 coremask; /* The ocl core mask (indicating listening core) */
+} ocl_status_info_t;
+
+/* MWS OCL map */
+#define WL_MWS_OCL_OVERRIDE_VERSION 1
+typedef struct wl_mws_ocl_override {
+ uint16 version; /* Structure version */
+ uint16 bitmap_2g; /* bitmap for 2.4G channels bits 1-13 */
+ uint16 bitmap_5g_lo; /* bitmap for 5G low channels by 2:
+ *34-48, 52-56, 60-64, 100-102
+ */
+ uint16 bitmap_5g_mid; /* bitmap for 5G mid channels by 2:
+ * 104, 108-112, 116-120, 124-128,
+ * 132-136, 140, 149-151
+ */
+ uint16 bitmap_5g_high; /* bitmap for 5G high channels by 2
+ * 153, 157-161, 165
+ */
+} wl_mws_ocl_override_t;
+
+/* Bits for fw_status */
+#define OCL_DISABLED_HOST 0x01 /* Host has disabled through ocl_enable */
+#define OCL_DISABLED_RSSI 0x02 /* Disabled because of ocl_rssi_threshold */
+#define OCL_DISABLED_LTEC 0x04 /* Disabled due to LTE Coex activity */
+#define OCL_DISABLED_SISO 0x08 /* Disabled while in SISO mode */
+#define OCL_DISABLED_CAL 0x10 /* Disabled during active calibration */
+#define OCL_DISABLED_CHANSWITCH 0x20 /* Disabled during active channel switch */
+#define OCL_DISABLED_ASPEND 0x40 /* Disabled due to assoc pending */
+#define OCL_DISABLED_SEQ_RANGE 0x80 /* Disabled during SEQ Ranging */
+#define OCL_DISABLED_RXIQ_EST_BTLOWAR 0x100 /* Disabled if the bt-lo-war is active */
+#define OCL_DISABLED_IDLE_TSSICAL 0x200
+#define OCL_DISABLED_TONE 0x400 /* Disabled if the tone is active */
+#define OCL_DISABLED_NOISECAL 0x800 /* Disabled if the noise cal is active */
+
+/* Bits for hw_status */
+#define OCL_HWCFG 0x01 /* State of OCL config bit in phy HW */
+#define OCL_HWMIMO 0x02 /* Set if current coremask is > 1 bit */
+#define OCL_COREDOWN 0x80 /* Set if core is currently down */
+
+#define WL_OPS_CFG_VERSION_1 1
+/* Common IOVAR struct */
+typedef struct wl_ops_cfg_v1 {
+ uint16 version;
+ uint16 len; /* total length includes fixed fields and variable data[] */
+ uint16 subcmd_id; /* subcommand id */
+ uint16 padding; /* reserved / padding for 4 byte align */
+ uint8 data[]; /* subcommand data; could be empty */
+} wl_ops_cfg_v1_t;
+
+/* subcommands ids */
+enum {
+ WL_OPS_CFG_SUBCMD_ENABLE = 0, /* OPS enable/disable mybss and obss
+ * for nav and plcp options
+ */
+ WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR = 1, /* Max sleep duration used for OPS */
+ WL_OPS_CFG_SUBCMD_RESET_STATS = 2 /* Reset stats part of ops_status
+ * on both slices
+ */
+};
+
+#define WL_OPS_CFG_MASK 0xffff
+#define WL_OPS_CFG_CAP_MASK 0xffff0000
+#define WL_OPS_CFG_CAP_SHIFT 16 /* Shift bits to locate the OPS CAP */
+#define WL_OPS_MAX_SLEEP_DUR 12500 /* max ops duration in us */
+#define WL_OPS_MINOF_MAX_SLEEP_DUR 512 /* minof max ops duration in us */
+#define WL_OPS_SUPPORTED_CFG (WL_OPS_MYBSS_PLCP_DUR | WL_OPS_MYBSS_NAV_DUR \
+ | WL_OPS_OBSS_PLCP_DUR | WL_OPS_OBSS_NAV_DUR)
+#define WL_OPS_DEFAULT_CFG WL_OPS_SUPPORTED_CFG
+
+/* WL_OPS_CFG_SUBCMD_ENABLE */
+typedef struct wl_ops_cfg_enable {
+ uint32 bits; /* selectively enable ops for mybss and obss */
+} wl_ops_cfg_enable_t;
+/* Bits for WL_OPS_CFG_SUBCMD_ENABLE Parameter */
+#define WL_OPS_MYBSS_PLCP_DUR 0x1 /* OPS based on mybss 11b & 11n mixed HT frames
+ * PLCP header duration
+ */
+#define WL_OPS_MYBSS_NAV_DUR 0x2 /* OPS based on mybss RTS-CTS duration */
+#define WL_OPS_OBSS_PLCP_DUR 0x4 /* OPS based on obss 11b & 11n mixed HT frames
+ * PLCP header duration
+ */
+#define WL_OPS_OBSS_NAV_DUR 0x8 /* OPS based on obss RTS-CTS duration */
+
+/* WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR */
+typedef struct wl_ops_cfg_max_sleep_dur {
+ uint32 val; /* maximum sleep duration (us) used for OPS */
+} wl_ops_cfg_max_sleep_dur_t;
+
+/* WL_OPS_CFG_SUBCMD_RESET_STATS */
+typedef struct wl_ops_cfg_reset_stats {
+ uint32 val; /* bitmap of slices, 0 means all slices */
+} wl_ops_cfg_reset_stats_t;
+
+#define WL_OPS_STATUS_VERSION_1 1
+#define OPS_DUR_HIST_BINS 5 /* number of bins used, 0-1, 1-2, 2-4, 4-8, >8 msec */
+typedef struct wl_ops_status_v1 {
+ uint16 version;
+ uint16 len; /* Total length including all fixed fields */
+ uint8 slice_index; /* Slice for which status is reported */
+ uint8 disable_obss; /* indicate if obss cfg is disabled */
+ uint8 pad[2]; /* 4-byte alignment */
+ uint32 disable_reasons; /* FW disable reasons */
+ uint32 disable_duration; /* ops disable time(ms) due to disable reasons */
+ uint32 applied_ops_config; /* currently applied ops config */
+ uint32 partial_ops_dur; /* Total time (in usec) of partial ops duration */
+ uint32 full_ops_dur; /* Total time (in usec) of full ops duration */
+ uint32 count_dur_hist[OPS_DUR_HIST_BINS]; /* ops occurrence histogram */
+ uint32 nav_cnt; /* number of times ops triggered based NAV duration */
+ uint32 plcp_cnt; /* number of times ops triggered based PLCP duration */
+ uint32 mybss_cnt; /* number of times mybss ops trigger */
+ uint32 obss_cnt; /* number of times obss ops trigger */
+ uint32 miss_dur_cnt; /* number of times ops couldn't happen
+ * due to insufficient duration
+ */
+ uint32 miss_premt_cnt; /* number of times ops couldn't happen due
+ * to not meeting Phy preemption thresh
+ */
+ uint32 max_dur_cnt; /* number of times ops did not trigger due to
+ * frames exceeding max sleep duration
+ */
+ uint32 wake_cnt; /* number of ops miss due to wake reason */
+ uint32 bcn_wait_cnt; /* number of ops miss due to waiting for bcn */
+} wl_ops_status_v1_t;
+/* Bits for disable_reasons */
+#define OPS_DISABLED_HOST 0x01 /* Host has disabled through ops_cfg */
+#define OPS_DISABLED_UNASSOC 0x02 /* Disabled because the slice is in unassociated state */
+#define OPS_DISABLED_SCAN 0x04 /* Disabled because the slice is in scan state */
+#define OPS_DISABLED_BCN_MISS 0x08 /* Disabled because beacon missed for a duration */
+
+#define WL_PSBW_CFG_VERSION_1 1
+/* Common IOVAR struct */
+typedef struct wl_psbw_cfg_v1 {
+ uint16 version;
+ uint16 len; /* total length includes fixed fields and variable data[] */
+ uint16 subcmd_id; /* subcommand id */
+ uint16 pad; /* reserved / padding for 4 byte align */
+ uint8 data[]; /* subcommand data */
+} wl_psbw_cfg_v1_t;
+
+/* subcommands ids */
+enum {
+ /* PSBW enable/disable */
+ WL_PSBW_CFG_SUBCMD_ENABLE = 0,
+ /* override psbw disable requests */
+ WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1,
+ /* Reset stats part of psbw status */
+ WL_PSBW_CFG_SUBCMD_RESET_STATS = 2
+};
+
+#define WL_PSBW_OVERRIDE_DISA_CFG_MASK 0x0000ffff
+#define WL_PSBW_OVERRIDE_DISA_CAP_MASK 0xffff0000
+#define WL_PSBW_OVERRIDE_DISA_CAP_SHIFT 16 /* shift bits for cap */
+
+/* WL_PSBW_CFG_SUBCMD_ENABLE */
+typedef struct wl_psbw_cfg_enable {
+ bool enable; /* enable or disable */
+} wl_psbw_cfg_enable_t;
+
+/* WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */
+typedef struct wl_psbw_cfg_override_disable_mask {
+ uint32 mask; /* disable requests to override, cap and current cfg */
+} wl_psbw_cfg_override_disable_mask_t;
+
+/* WL_PSBW_CFG_SUBCMD_RESET_STATS */
+typedef struct wl_psbw_cfg_reset_stats {
+ uint32 val; /* infra interface index, 0 */
+} wl_psbw_cfg_reset_stats_t;
+
+#define WL_PSBW_STATUS_VERSION_1 1
+typedef struct wl_psbw_status_v1 {
+ uint16 version;
+ uint16 len; /* total length including all fixed fields */
+ uint8 curr_slice_index; /* current slice index of the interface */
+ uint8 associated; /* interface associatd */
+ chanspec_t chspec; /* radio chspec */
+ uint32 state; /* psbw state */
+ uint32 disable_reasons; /* FW disable reasons */
+ uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */
+ uint32 total_enable_dur; /* time(ms) psbw remains enabled total */
+ uint32 enter_cnt; /* total cnt entering PSBW active */
+ uint32 exit_cnt; /* total cnt exiting PSBW active */
+ uint32 exit_imd_cnt; /* total cnt imd exit when waited N tbtts */
+ uint32 enter_skip_cnt; /* total cnt entering PSBW active skipped */
+} wl_psbw_status_v1_t;
+
+/* Bit for state */
+#define PSBW_ACTIVE 0x1 /* active 20MHz */
+#define PSBW_TTTT_PEND 0x2 /* waiting for TTTT intr */
+#define PSBW_WAIT_ENTER 0x4 /* in wait period before entering */
+#define PSBW_CAL_DONE 0x8 /* 20M channel cal done */
+
+/* Bits for disable_reasons */
+#define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */
+#define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */
+#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* slot_bss active */
+#define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */
+#define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */
+#define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */
+#define WL_PSBW_DISA_CSA 0x00000040 /* CSA IE is present */
+#define WL_PSBW_DISA_ASSOC 0x00000080 /* assoc state is active/or unassoc */
+#define WL_PSBW_DISA_SCAN 0x00000100 /* scan state is active */
+#define WL_PSBW_DISA_CAL 0x00000200 /* cal pending or active */
+#define WL_PSBW_DISA_BCN_OFFLOAD 0x00000400 /* PSBW disabled due to scan
+ * core beacon offload
+ */
+#define WL_PSBW_DISA_DISASSOC 0x00000800 /* STA is disassociated */
+/* following are not part of disable reasons */
+#define WL_PSBW_EXIT_PM 0x00001000 /* Out of PM */
+#define WL_PSBW_EXIT_TIM 0x00002000 /* unicast TIM bit present */
+#define WL_PSBW_EXIT_DATA 0x00004000 /* Data for transmission */
+#define WL_PSBW_EXIT_MGMTDATA 0x00008000 /* management frame for transmission */
+#define WL_PSBW_EXIT_BW_UPD 0x00010000 /* BW being updated */
+#define WL_PSBW_DISA_NONE 0x80000000 /* reserved for internal use only */
+
+/* DVFS */
+#define DVFS_CMND_VERSION_1 1
+/* Common IOVAR struct */
+typedef struct dvfs_cmnd_v1 {
+ uint16 ver; /* version of this structure */
+ uint16 len; /* includes both fixed and variable data[] fields */
+ uint32 subcmd; /* subcommand id */
+ uint8 data[]; /* subcommand data */
+} dvfs_cmnd_v1_t;
+
+/* subcommand ids */
+enum {
+ DVFS_SUBCMD_ENABLE = 0, /* DVFS enable/disable, 1-byte data
+ * DVFS enable:1, disable:0
+ */
+ DVFS_SUBCMD_LDV = 1, /* DVFS force arm state to LDV, 1-byte data
+ * DVFS force LDV ON 1, LDV OFF 0
+ */
+ DVFS_SUBCMD_STATUS = 2, /* DVFS status, data[] contains dvfs_status */
+ DVFS_SUBCMD_HIST = 3, /* DVFS history, data[] contains
+ * history of dvfs state change
+ */
+ DVFS_SUBCMD_LAST
+};
+
+/* DVFS Status */
+/* current DVFS state request for ARM */
+#define DVFS_STATE_BIT_MASK 0x0Fu
+#define DVFS_STATE_BIT_SHIFT 0u
+/* Bit value for DVFS state request */
+#define DVFS_STATE_LDV 0u
+#define DVFS_STATE_NDV 1u
+/* current DVFS status */
+#define DVFS_STATUS_BIT_MASK 0xF0u
+#define DVFS_STATUS_BIT_SHIFT 4u
+/* Bit value for DVFS status */
+#define DVFS_STATUS_LDV 0u
+#define DVFS_STATUS_NDV 1u
+/* DVFS bits are for status, raw request and active request */
+/* 4387b0 supports only status bits for aux, main, and bt */
+/* 4387c0 supports all eight status and request bits */
+#define DVFS_BIT_AUX_MASK 0x0001u
+#define DVFS_BIT_AUX_SHIFT 0u
+#define DVFS_BIT_AUX_VAL(_val) (((_val) & DVFS_BIT_AUX_MASK) \
+ >> DVFS_BIT_AUX_SHIFT)
+#define DVFS_BIT_MAIN_MASK 0x0002u
+#define DVFS_BIT_MAIN_SHIFT 1u
+#define DVFS_BIT_MAIN_VAL(_val) (((_val) & DVFS_BIT_MAIN_MASK) \
+ >> DVFS_BIT_MAIN_SHIFT)
+#define DVFS_BIT_BT_MASK 0x0004u
+#define DVFS_BIT_BT_SHIFT 2u
+#define DVFS_BIT_BT_VAL(_val) (((_val) & DVFS_BIT_BT_MASK) \
+ >> DVFS_BIT_BT_SHIFT)
+#define DVFS_BIT_CHIPC_MASK 0x0008u
+#define DVFS_BIT_CHIPC_SHIFT 3u
+#define DVFS_BIT_CHIPC_VAL(_val) (((_val) & DVFS_BIT_CHIPC_MASK) \
+ >> DVFS_BIT_CHIPC_SHIFT)
+#define DVFS_BIT_PCIE_MASK 0x0010u
+#define DVFS_BIT_PCIE_SHIFT 4u
+#define DVFS_BIT_PCIE_VAL(_val) (((_val) & DVFS_BIT_PCIE_MASK) \
+ >> DVFS_BIT_PCIE_SHIFT)
+#define DVFS_BIT_ARM_MASK 0x0020u
+#define DVFS_BIT_ARM_SHIFT 5u
+#define DVFS_BIT_ARM_VAL(_val) (((_val) & DVFS_BIT_ARM_MASK) \
+ >> DVFS_BIT_ARM_SHIFT)
+#define DVFS_BIT_SCAN_MASK 0x0040u
+#define DVFS_BIT_SCAN_SHIFT 6u
+#define DVFS_BIT_SCAN_VAL(_val) (((_val) & DVFS_BIT_SCAN_MASK) \
+ >> DVFS_BIT_SCAN_SHIFT)
+#define DVFS_BIT_BTSCAN_MASK 0x0080u
+#define DVFS_BIT_BTSCAN_SHIFT 7u
+#define DVFS_BIT_BTSCAN_VAL(_val) (((_val) & DVFS_BIT_BTSCAN_MASK) \
+ >> DVFS_BIT_BTSCAN_SHIFT)
+#define DVFS_BIT_HWA_MASK 0x0100u
+#define DVFS_BIT_HWA_SHIFT 8u
+#define DVFS_BIT_HWA_VAL(_val) (((_val) & DVFS_BIT_HWA_MASK) \
+ >> DVFS_BIT_HWA_SHIFT)
+#define DVFS_BIT_SYSMEM_MASK 0x0200u
+#define DVFS_BIT_SYSMEM_SHIFT 9u
+#define DVFS_BIT_SYSMEM_VAL(_val) (((_val) & DVFS_BIT_SYSMEM_MASK) \
+ >> DVFS_BIT_SYSMEM_SHIFT)
+/* to convert voltage to volt from multiple of 10mVolt */
+#define DVFS_CONVERT_TO_VOLT 100u
+
+/* status version for 4387b0 */
+#define DVFS_STATUS_VERSION_1 1
+typedef struct dvfs_status_v1 {
+ uint16 version; /* version of dvfs_status */
+ uint16 len; /* total length including all fixed fields */
+ uint8 info; /* current dvfs state request and status */
+ uint8 voltage; /* voltage (multiple of 10mV) */
+ uint16 freq; /* arm clock frequency (in MHz) */
+ uint32 state_change_count; /* total state (LDV/NDV) transition count */
+ uint32 ldv_duration; /* total time (ms) in LDV */
+ uint32 ndv_duration; /* total time (ms) in NDV */
+ uint16 status; /* status bits */
+ uint16 pad; /* word aligned for size */
+} dvfs_status_v1_t;
+#define DVFS_STATUS_VER_1_LEN (sizeof(dvfs_status_v1_t))
+/* status version for 4387c0 */
+#define DVFS_STATUS_VERSION_2 2
+#define DVFS_STATUS_VERSION_3 3
+typedef struct dvfs_status_v2 {
+ uint16 version; /* version of dvfs_status */
+ uint16 len; /* total length including all fixed fields */
+ uint8 info; /* current dvfs state request and status */
+ uint8 voltage; /* voltage (multiple of 10mV) */
+ uint16 freq; /* arm clock frequency (in MHz) */
+ uint32 state_change_count; /* total state (LDV/NDV) transition count */
+ uint32 ldv_duration; /* total time (ms) in LDV */
+ uint32 ndv_duration; /* total time (ms) in NDV */
+ uint16 status; /* status bits */
+ uint16 raw_request; /* raw request bits */
+ uint16 active_request; /* active request bits */
+ /* DVFS_STATUS_VERSION_3 for pmurev >= 40 */
+ uint16 valid_cores; /* bitmap to indicate valid cores status */
+} dvfs_status_v2_t;
+#define DVFS_STATUS_V2_VALID_CORES (0xFFu)
+#define DVFS_STATUS_VER_3_LEN (sizeof(dvfs_status_v2_t))
+#define DVFS_STATUS_VER_2_LEN (DVFS_STATUS_VER_3_LEN - (sizeof(uint16)))
+
+/* DVFS_SUBCMD_HIST */
+#define DVFS_HIST_CMD_VERSION_1 1
+typedef struct dvfs_hist_cmd_v1 {
+ uint16 version; /* version of this structure */
+ uint16 len; /* includes both fixed and variable data[] fields */
+ uint8 data[]; /* subcommand data : array of dvfs_hist_v1_t */
+} dvfs_hist_cmd_v1_t;
+
+/* DVFS_SUBCMD_HIST data[] payload */
+typedef struct dvfs_hist_v1 {
+ uint8 old_state; /* old state */
+ uint8 new_state; /* new state */
+ uint16 reason; /* reason for state change */
+ uint32 timestamp; /* timestamp of state change */
+} dvfs_hist_v1_t;
+
+/* Bits for DVFS state change reason */
+#define WL_DVFS_REASON_NOTPM 0x0001u /* Not PM */
+#define WL_DVFS_REASON_MPC 0x0002u /* MPC */
+#define WL_DVFS_REASON_TX_ACTIVE 0x0004u /* TX Active */
+#define WL_DVFS_REASON_DBGST_ACTIVE 0x0008u /* Power state active */
+#define WL_DVFS_REASON_DBGST_ASLEEP 0x0010u /* Power state asleep */
+#define WL_DVFS_REASON_LTR_ACTIVE 0x0020u /* LTR Active */
+#define WL_DVFS_REASON_HOST 0x0040u /* Host disabled */
+#define WL_DVFS_REASON_SCAN 0x0080u /* Scan */
+#define WL_DVFS_REASON_SLOTTED_BSS 0x0100u /* Slotted BSS */
+#define WL_DVFS_REASON_CHAN 0x0200u /* Channel Change */
+#define WL_DVFS_REASON_CAL 0x0400u /* CAL */
+#define WL_DVFS_REASON_ASSOC 0x0800u /* ASSOC */
+#define WL_DVFS_REASON_WD 0x1000u /* WD */
+#define WL_DVFS_REASON_SOFTAP 0x2000u /* SoftAP */
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value. RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ * uint8 akm[DOT11_OUI_LEN+1]; akm suite
+ * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost value in dB
+ */
+
+struct tsinfo_arg {
+ uint8 octets[3];
+};
+
+#define RATE_CCK_1MBPS 0
+#define RATE_CCK_2MBPS 1
+#define RATE_CCK_5_5MBPS 2
+#define RATE_CCK_11MBPS 3
+
+#define RATE_LEGACY_OFDM_6MBPS 0
+#define RATE_LEGACY_OFDM_9MBPS 1
+#define RATE_LEGACY_OFDM_12MBPS 2
+#define RATE_LEGACY_OFDM_18MBPS 3
+#define RATE_LEGACY_OFDM_24MBPS 4
+#define RATE_LEGACY_OFDM_36MBPS 5
+#define RATE_LEGACY_OFDM_48MBPS 6
+#define RATE_LEGACY_OFDM_54MBPS 7
+
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V1 1
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V2 2
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V3 3
+
+typedef struct wl_bsstrans_rssi {
+ int8 rssi_2g; /**< RSSI in dbm for 2.4 G */
+ int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */
+} wl_bsstrans_rssi_t;
+
+#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map_v3 {
+ uint16 ver;
+ uint16 len; /**< length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /* MCS0-11 */
+ wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /* MCS0-11 */
+ wl_bsstrans_rssi_t phy_be[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_EHT]; /* MCS0-13 */
+} wl_bsstrans_rssi_rate_map_v3_t;
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map_v2 {
+ uint16 ver;
+ uint16 len; /**< length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /**< MCS0-11 */
+ wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /**< MCS0-11 */
+} wl_bsstrans_rssi_rate_map_v2_t;
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map_v1 {
+ uint16 ver;
+ uint16 len; /**< length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */
+} wl_bsstrans_rssi_rate_map_v1_t;
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map {
+ uint16 ver;
+ uint16 len; /**< length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */
+} wl_bsstrans_rssi_rate_map_t;
+
+#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1
+
+/** Configure number of scans allowed per throttle period */
+typedef struct wl_bsstrans_roamthrottle {
+ uint16 ver;
+ uint16 period;
+ uint16 scans_allowed;
+} wl_bsstrans_roamthrottle_t;
+
+#define NFIFO 6 /**< # tx/rx fifopairs */
+
+#ifndef NFIFO_EXT
+#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED)
+#ifdef WL_LLW
+#define NFIFO_EXT 11 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare + 1 LLQ */
+#else
+#define NFIFO_EXT 10 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare */
+#endif
+#elif defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED)
+#define NFIFO_EXT 10
+#else
+#define NFIFO_EXT NFIFO
+#endif /* BCM_AQM_DMA_DESC && !BCM_AQM_DMA_DESC_DISABLED */
+#endif /* NFIFO_EXT */
+
+/* When new reason codes are added to list, Please update wl_reinit_names also */
+/* Reinit reason codes */
+enum {
+ WL_REINIT_RC_NONE = 0,
+ WL_REINIT_RC_PS_SYNC = 1,
+ WL_REINIT_RC_PSM_WD = 2,
+ WL_REINIT_RC_MAC_WAKE = 3,
+ WL_REINIT_RC_MAC_SUSPEND = 4,
+ WL_REINIT_RC_MAC_SPIN_WAIT = 5,
+ WL_REINIT_RC_AXI_BUS_ERROR = 6,
+ WL_REINIT_RC_DEVICE_REMOVED = 7,
+ WL_REINIT_RC_PCIE_FATAL_ERROR = 8,
+ WL_REINIT_RC_OL_FW_TRAP = 9,
+ WL_REINIT_RC_FIFO_ERR = 10,
+ WL_REINIT_RC_INV_TX_STATUS = 11,
+ WL_REINIT_RC_MQ_ERROR = 12,
+ WL_REINIT_RC_PHYTXERR_THRESH = 13,
+ WL_REINIT_RC_USER_FORCED = 14,
+ WL_REINIT_RC_FULL_RESET = 15,
+ WL_REINIT_RC_AP_BEACON = 16,
+ WL_REINIT_RC_PM_EXCESSED = 17,
+ WL_REINIT_RC_NO_CLK = 18,
+ WL_REINIT_RC_SW_ASSERT = 19,
+ WL_REINIT_RC_PSM_JMP0 = 20,
+ WL_REINIT_RC_PSM_RUN = 21,
+ WL_REINIT_RC_ENABLE_MAC = 22,
+ WL_REINIT_RC_SCAN_TIMEOUT = 23,
+ WL_REINIT_RC_JOIN_TIMEOUT = 24,
+ /* Below error codes are generated during D3 exit validation */
+ WL_REINIT_RC_LINK_NOT_ACTIVE = 25,
+ WL_REINIT_RC_PCI_CFG_RD_FAIL = 26,
+ WL_REINIT_RC_INV_VEN_ID = 27,
+ WL_REINIT_RC_INV_DEV_ID = 28,
+ WL_REINIT_RC_INV_BAR0 = 29,
+ WL_REINIT_RC_INV_BAR2 = 30,
+ WL_REINIT_RC_AER_UC_FATAL = 31,
+ WL_REINIT_RC_AER_UC_NON_FATAL = 32,
+ WL_REINIT_RC_AER_CORR = 33,
+ WL_REINIT_RC_AER_DEV_STS = 34,
+ WL_REINIT_RC_PCIe_STS = 35,
+ WL_REINIT_RC_MMIO_RD_FAIL = 36,
+ WL_REINIT_RC_MMIO_RD_INVAL = 37,
+ WL_REINIT_RC_MMIO_ARM_MEM_RD_FAIL = 38,
+ WL_REINIT_RC_MMIO_ARM_MEM_INVAL = 39,
+ WL_REINIT_RC_SROM_LOAD_FAILED = 40,
+ WL_REINIT_RC_PHY_CRASH = 41,
+ WL_REINIT_TX_STALL = 42,
+ WL_REINIT_RC_TX_FLOW_CONTROL_BLOCKED = 43,
+ WL_REINIT_RC_RX_HC_FAIL = 44,
+ WL_REINIT_RC_RX_DMA_STALL = 45,
+ WL_REINIT_UTRACE_BUF_OVERLAP_SR = 46,
+ WL_REINIT_UTRACE_TPL_OUT_BOUNDS = 47,
+ WL_REINIT_UTRACE_TPL_OSET_STRT0 = 48,
+ WL_REINIT_RC_PHYTXERR = 49,
+ WL_REINIT_RC_PSM_FATAL_SUSP = 50,
+ WL_REINIT_RC_TX_FIFO_SUSP = 51,
+ WL_REINIT_RC_MAC_ENABLE = 52,
+ WL_REINIT_RC_SCAN_STALLED = 53,
+ WL_REINIT_RC_PHY_HC = 54,
+ WL_REINIT_RC_LAST, /* DONOT use this any more, kept for legacy reasons */
+ WL_REINIT_RC_RADIO_CRASH = 55,
+ WL_REINIT_RC_SUPPORTED_LAST /* Use for app ONLY, DONOT use this in wlc code.
+ * For wlc, use WL_REINIT_RC_VERSIONED_LAST
+ */
+};
+
+#define WL_REINIT_RC_V2 (2u)
+#define WL_REINIT_RC_LAST_V2 (WL_REINIT_RC_RADIO_CRASH)
+
+#define WL_REINIT_RC_INVALID 255
+
+#define NREINITREASONCOUNT 8
+/* NREINITREASONCOUNT is 8 in other branches.
+ * Any change to this will break wl tool compatibility with other branches
+ * #define NREINITREASONCOUNT WL_REINIT_RC_LAST
+ */
+/* REINITRSNIDX is kept for legacy reasons. Use REINIT_RSN_IDX for new versioned structure */
+#define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0)
+#define REINIT_RSN_IDX(_x) (((_x) < WL_REINIT_RC_SUPPORTED_LAST) ? (_x) : 0) /* TBD: move
+ * this to src
+ */
+#define REINIT_RSN_IDX_V2(_x) (((_x) <= WL_REINIT_RC_LAST_V2) ? (_x) : 0)
+
+#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */
+#define WL_CNT_VERSION_6 6
+#define WL_CNT_VERSION_7 7
+#define WL_CNT_VERSION_11 11
+#define WL_CNT_VERSION_XTLV 30
+
+#define WL_COUNTERS_IOV_VERSION_1 1
+#define WL_SUBCNTR_IOV_VER WL_COUNTERS_IOV_VERSION_1
+/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */
+#define FIRST_COUNTER_OFFSET 0x04
+
+/* need for now due to src/wl/ndis automerged to other branches. e.g. BISON */
+#define WLC_WITH_XTLV_CNT
+
+/* Number of xtlv info as required to calculate subcounter offsets */
+#define WL_CNT_XTLV_ID_NUM 12
+#define WL_TLV_IOV_VER 1
+
+/**
+ * tlv IDs uniquely identifies counter component
+ * packed into wl_cmd_t container
+ */
+enum wl_cnt_xtlv_id {
+ WL_CNT_XTLV_SLICE_IDX = 0x1, /**< Slice index */
+ WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */
+ WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension: LEGACY */
+ WL_CNT_XTLV_WLC_HE = 0x102, /* he counters */
+ WL_CNT_XTLV_WLC_SECVLN = 0x103, /* security vulnerabilities counters */
+ WL_CNT_XTLV_WLC_HE_OMI = 0x104, /* he omi counters */
+ WL_CNT_XTLV_WLC_RINIT_RSN_V2 = 0x105, /**< WLC layer reinitreason extension */
+ WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */
+ WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */
+ WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */
+ WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800, /* corerev >= 64 UCODEX MACSTAT */
+ WL_CNT_XTLV_GE80_UCODE_V1 = 0x900, /* corerev >= 80 UCODEX MACSTAT */
+ WL_CNT_XTLV_GE80_TXFUNFL_UCODE_V1 = 0x1000 /* corerev >= 80 UCODEX MACSTAT */
+};
+
+/* tlv IDs uniquely identifies periodic state component */
+enum wl_periodic_slice_state_xtlv_id {
+ WL_STATE_COMPACT_COUNTERS = 0x1,
+ WL_STATE_TXBF_COUNTERS = 0x2,
+ WL_STATE_COMPACT_HE_COUNTERS = 0x3
+};
+
+/* Sub tlvs for chan_counters */
+enum wl_periodic_chan_xtlv_id {
+ WL_CHAN_GENERIC_COUNTERS = 0x1,
+ WL_CHAN_PERIODIC_COUNTERS = 0x2
+};
+
+#ifdef WLC_CHAN_ECNTR_TEST
+#define WL_CHAN_PERIODIC_CNTRS_VER_1 1
+typedef struct wlc_chan_periodic_cntr
+{
+ uint16 version;
+ uint16 pad;
+ uint32 rxstrt;
+} wlc_chan_periodic_cntr_t;
+#endif /* WLC_CHAN_ECNTR_TEST */
+
+#define WL_CHANCNTR_HDR_VER_1 1
+typedef struct wlc_chan_cntr_hdr_v1
+{
+ uint16 version;
+ uint16 pad;
+ chanspec_t chanspec; /* Dont add any fields above this */
+ uint16 pad1;
+ uint32 total_time;
+ uint32 chan_entry_cnt;
+} wlc_chan_cntr_hdr_v1_t;
+
+/* tlv IDs uniquely identifies periodic state component */
+enum wl_periodic_if_state_xtlv_id {
+ WL_STATE_IF_COMPACT_STATE = 0x1,
+ WL_STATE_IF_ADPS_STATE = 0x02,
+ WL_STATE_IF_ADPS_ENERGY_GAIN = 0x03
+};
+
+enum wl_periodic_tdls_if_state_xtlv_id {
+ WL_STATE_IF_TDLS_STATE = 0x1
+};
+
+#define TDMTX_CNT_VERSION_V1 1
+#define TDMTX_CNT_VERSION_V2 2
+
+/* structure holding tdm counters that interface to iovar */
+typedef struct tdmtx_cnt_v1 {
+ uint16 ver;
+ uint16 length; /* length of this structure */
+ uint16 wlc_idx; /* index for wlc */
+ uint16 enabled; /* tdmtx is enabled on slice */
+ uint32 tdmtx_txa_on; /* TXA on requests */
+ uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */
+ uint32 tdmtx_por_on; /* TXA POR requests */
+ uint32 tdmtx_txpuen; /* Path enable requests */
+ uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
+ uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
+ uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */
+ uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
+ uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
+ uint32 tdmtx_txa_dur; /* Total time txa on */
+ uint32 tdmtx_txpri_dur; /* Total time TXPri */
+ uint32 tdmtx_txdefer_dur; /* Total time txdefer */
+ /* TDMTX input fields */
+ uint32 tdmtx_txpri;
+ uint32 tdmtx_defer;
+ uint32 tdmtx_threshold;
+ uint32 tdmtx_rssi_threshold;
+ uint32 tdmtx_txpwrboff;
+ uint32 tdmtx_txpwrboff_dt;
+} tdmtx_cnt_v1_t;
+
+typedef struct {
+ uint16 ver;
+ uint16 length; /* length of the data portion */
+ uint16 cnt;
+ uint16 pad; /* pad to align to 32 bit */
+ uint8 data[]; /* array of tdmtx_cnt_v1_t */
+} tdmtx_status_t;
+
+/* structure holding counters that match exactly shm field sizes */
+typedef struct tdmtx_cnt_shm_v1 {
+ uint16 tdmtx_txa_on; /* TXA on requests */
+ uint16 tdmtx_tmcnt; /* TXA on requests */
+ uint16 tdmtx_por_on; /* TXA POR requests */
+ uint16 tdmtx_txpuen; /* Path enable requests */
+ uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
+ uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
+ uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */
+ uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
+ uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
+ uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */
+ uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */
+ uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */
+ uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */
+ uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */
+ uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */
+} tdmtx_cnt_shm_v1_t;
+
+/* structure holding tdm counters that interface to iovar for version 2 */
+typedef struct tdmtx_cnt_v2 {
+ uint16 ver;
+ uint16 length; /* length of this structure */
+ uint16 wlc_idx; /* index for wlc */
+ uint16 enabled; /* tdmtx is enabled on slice */
+ uint32 tdmtx_txa_on; /* TXA on requests */
+ uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */
+ uint32 tdmtx_porhi_on; /* TXA PORHI requests */
+ uint32 tdmtx_porlo_on; /* TXA PORLO requests */
+ uint32 tdmtx_txpuen; /* Path enable requests */
+ uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
+ uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
+ uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */
+ uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
+ uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
+ uint32 tdmtx_txa_dur; /* Total time txa on */
+ uint32 tdmtx_txpri_dur; /* Total time TXPri */
+ uint32 tdmtx_txdefer_dur; /* Total time txdefer */
+ /* TDMTX input fields */
+ uint32 tdmtx_txpri;
+ uint32 tdmtx_defer;
+ uint32 tdmtx_threshold;
+ uint32 tdmtx_rssi_threshold;
+ uint32 tdmtx_txpwrboff;
+ uint32 tdmtx_txpwrboff_dt;
+} tdmtx_cnt_v2_t;
+
+/* structure holding counters that match exactly shm field sizes */
+typedef struct tdmtx_cnt_shm_v2 {
+ uint16 tdmtx_txa_on; /* TXA on requests */
+ uint16 tdmtx_tmcnt; /* TXA on requests */
+ uint16 tdmtx_porhi_on; /* TXA PORHI requests */
+ uint16 tdmtx_porlo_on; /* TXA PORLO requests */
+ uint16 tdmtx_txpuen; /* Path enable requests */
+ uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
+ uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
+ uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */
+ uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
+ uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
+ uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */
+ uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */
+ uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */
+ uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */
+ uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */
+ uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */
+} tdmtx_cnt_shm_v2_t;
+
+typedef struct wl_tdmtx_ioc {
+ uint16 id; /* ID of the sub-command */
+ uint16 len; /* total length of all data[] */
+ uint8 data[]; /* var len payload */
+} wl_tdmtx_ioc_t;
+
+/*
+ * iovar subcommand ids
+ */
+enum {
+ IOV_TDMTX_ENB = 1,
+ IOV_TDMTX_STATUS = 2,
+ IOV_TDMTX_TXPRI = 3,
+ IOV_TDMTX_DEFER = 4,
+ IOV_TDMTX_TXA = 5,
+ IOV_TDMTX_CFG = 6,
+ IOV_TDMTX_LAST
+};
+
+/* iovar structure for beacon simulator */
+typedef struct wl_bcnsim_ioc {
+ uint16 id; /* ID of the sub-command */
+ uint16 len; /* total length of all data[] */
+ uint8 data[]; /* var len payload */
+} wl_bcnsim_ioc_t;
+
+/* iovar subcmd ids */
+enum {
+ IOV_BCNSIM_ENB = 1,
+ IOV_BCNSIM_ERRMAX = 2,
+ IOV_BCNSIM_ERRDSTRB = 3,
+ IOV_BCNSIM_DRIFT = 4,
+ IOV_BCNSIM_RNDLYMAX = 5,
+ IOV_BCNSIM_RNDDLY_DSTRB = 6,
+ IOV_BCNSIM_CONSDLY = 7,
+ IOV_BCNSIM_OMT_PROB = 8,
+ IOV_BCNSIM_OMT_MIN_N = 9,
+ IOV_BCNSIM_OMT_MAX_N = 10,
+ IOV_BCNSIM_OMT_DSTRB = 11,
+ IOV_BCNSIM_TSF_JUMP = 12,
+ IOV_BCNSIM_PATTERN = 13,
+ IOV_BCNSIM_STATUS = 14,
+ IOV_BCNSIM_AUTH = 15,
+ IOV_BCNSIM_RNDDLY_PROB = 16,
+ IOV_BCNSIM_LAST
+};
+
+/* tlv id for beacon simulator */
+enum wl_bcnsim_xtlv_id {
+ WL_BCNSIM_XTLV_ENABLE = 0x1,
+ WL_BCNSIM_XTLV_ERRMAX = 0x2,
+ WL_BCNSIM_XTLV_ERRDSTRB = 0x3,
+ WL_BCNSIM_XTLV_DRIFT = 0x4,
+ WL_BCNSIM_XTLV_RNDLYMAX = 0x5,
+ WL_BCNSIM_XTLV_RNDDLY_DSTRB = 0x6,
+ WL_BCNSIM_XTLV_CONSDLY = 0x7,
+ WL_BCNSIM_XTLV_OMT_PROB = 0x8,
+ WL_BCNSIM_XTLV_OMT_MIN_N = 0x9,
+ WL_BCNSIM_XTLV_OMT_MAX_N = 0xa,
+ WL_BCNSIM_XTLV_OMT_DSTRB = 0xb,
+ WL_BCNSIM_XTLV_TSF_JUMP = 0xc,
+ WL_BCNSIM_XTLV_PATTERN = 0xd,
+ WL_BCNSIM_XTLV_STATUS = 0xe,
+ WL_BCNSIM_XTLV_AUTH = 0xf,
+ WL_BCNSIM_XTLV_RNDDLY_PROB = 0x10
+};
+
+/* structure to store different pattern params */
+typedef struct wlc_bcnsim_bcn_diff_v1 {
+ uint16 version;
+ uint16 dtim_cnt;
+ uint32 tx_delta;
+ uint32 ts_delta;
+} wlc_bcnsim_bcn_diff_v1_t;
+
+/* structure to store/pass pattern */
+typedef struct wlc_bcnsim_pattern_info_v1 {
+ uint16 version;
+ uint16 pattern_count;
+ uint16 current_pos;
+ wlc_bcnsim_bcn_diff_v1_t bcnsim_bcn_diff[];
+} wlc_bcnsim_pattern_info_v1_t;
+
+/* struct to store bcn sim status */
+typedef struct bcnsim_status_v1 {
+ uint16 ver;
+ uint16 length; /* length of this structure */
+ uint32 rnd_delay_max; /* random delay */
+ int32 const_delay; /* cons delay */
+ int32 tsf_jump; /* change tsf */
+ int16 drift; /* add drift */
+ uint16 error_max; /* max error */
+ uint8 error_dstrb; /* error dstrb */
+ uint8 rnd_delay_dstrb; /* rnd delay distr */
+ uint8 rnd_delay_prob; /* random delay prob */
+ uint8 omit_prob; /* control omit prob */
+ uint8 omit_min_n; /* min omit */
+ uint8 omit_max_n; /* max omit */
+ uint8 omit_dstrb; /* omit dstrb % */
+ uint8 padding1;
+} bcnsim_status_v1_t;
+
+/**
+ * The number of variables in wl macstat cnt struct.
+ * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t)
+ */
+#define WL_CNT_MCST_VAR_NUM 64
+/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */
+#define WL_CNT_MCST_STRUCT_SZ ((uint32)sizeof(uint32) * WL_CNT_MCST_VAR_NUM)
+#define WL_CNT_REV80_MCST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge80mcst_v1_t))
+#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ \
+ ((uint32)OFFSETOF(wl_cnt_ge80_txfunfl_v1_t, txfunfl))
+#define WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(fcnt) \
+ (WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ + (fcnt * sizeof(uint32)))
+#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_SZ (WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(NFIFO_EXT))
+
+#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t))
+
+#define WL_CNT_HE_STRUCT_SZ ((uint32)sizeof(wl_he_cnt_wlc_t))
+
+#define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t))
+
+#define WL_CNT_HE_OMI_STRUCT_SZ ((uint32)sizeof(wl_he_omi_cnt_wlc_v1_t))
+#define INVALID_CNT_VAL (uint32)(-1)
+
+#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \
+ (uint32)BCM_XTLV_HDR_SIZE + (uint32)sizeof(wl_cnt_wlc_t) + \
+ (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \
+ (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ)
+
+#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t))
+
+/** Top structure of counters IOVar buffer */
+typedef struct {
+ uint16 version; /**< see definition of WL_CNT_T_VERSION */
+ uint16 datalen; /**< length of data including all paddings. */
+ uint8 data []; /**< variable length payload:
+ * 1 or more bcm_xtlv_t type of tuples.
+ * each tuple is padded to multiple of 4 bytes.
+ * 'datalen' field of this structure includes all paddings.
+ */
+} wl_cnt_info_t;
+
+/* Top structure of subcounters IOVar buffer
+ * Whenever we make any change in this structure
+ * WL_SUBCNTR_IOV_VER should be updated accordingly
+ * The structure definition should remain consistant b/w
+ * FW and wl/WLM app.
+ */
+typedef struct {
+ uint16 version; /* Version of IOVAR structure. Used for backward
+ * compatibility in future. Whenever we make any
+ * changes to this structure then value of WL_SUBCNTR_IOV_VER
+ * needs to be updated properly.
+ */
+ uint16 length; /* length in bytes of this structure */
+ uint16 counters_version; /* see definition of WL_CNT_T_VERSION
+ * wl app will send the version of counters
+ * which is used to calculate the offset of counters.
+ * It must match the version of counters FW is using
+ * else FW will return error with his version of counters
+ * set in this field.
+ */
+ uint16 num_subcounters; /* Number of counter offset passed by wl app to FW. */
+ uint32 data[1]; /* variable length payload:
+ * Offsets to the counters will be passed to FW
+ * throught this data field. FW will return the value of counters
+ * at the offsets passed by wl app in this fiels itself.
+ */
+} wl_subcnt_info_t;
+
+/* Top structure of counters TLV version IOVar buffer
+ * The structure definition should remain consistant b/w
+ * FW and wl/WLM app.
+ */
+typedef struct {
+ uint16 version; /* Version of IOVAR structure. Added for backward
+ * compatibility feature. If any changes are done,
+ * WL_TLV_IOV_VER need to be updated.
+ */
+ uint16 length; /* total len in bytes of this structure + payload */
+ uint16 counters_version; /* See definition of WL_CNT_VERSION_XTLV
+ * wl app will update counter tlv version to be used
+ * so to calculate offset of supported TLVs.
+ * If there is a mismatch in the version, FW will update an error
+ */
+ uint16 num_tlv; /* Max number of TLV info passed by FW to WL app.
+ * and vice-versa
+ */
+ uint32 data[]; /* variable length payload:
+ * This stores the tlv as supported by F/W to the wl app.
+ * This table is required to compute subcounter offsets at WLapp end.
+ */
+} wl_cntr_tlv_info_t;
+
+/** wlc layer counters */
+typedef struct {
+ /* transmit stat counters */
+ uint32 txframe; /**< tx data frames */
+ uint32 txbyte; /**< tx data bytes */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txerror; /**< tx data errors (derived: sum of others) */
+ uint32 txctl; /**< tx management frames */
+ uint32 txprshort; /**< tx short preamble frames */
+ uint32 txserr; /**< tx status errors */
+ uint32 txnobuf; /**< tx out of buffers errors */
+ uint32 txnoassoc; /**< tx discard because we're not associated */
+ uint32 txrunt; /**< tx runt frames */
+ uint32 txchit; /**< tx header cache hit (fastpath) */
+ uint32 txcmiss; /**< tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /**< tx fifo underflows */
+ uint32 txphyerr; /**< tx phy errors (indicated in tx status) */
+ uint32 txphycrs; /**< PR8861/8963 counter */
+
+ /* receive stat counters */
+ uint32 rxframe; /**< rx data frames */
+ uint32 rxbyte; /**< rx data bytes */
+ uint32 rxerror; /**< rx data errors (derived: sum of others) */
+ uint32 rxctl; /**< rx management frames */
+ uint32 rxnobuf; /**< rx out of buffers errors */
+ uint32 rxnondata; /**< rx non data frames in the data channel errors */
+ uint32 rxbadds; /**< rx bad DS errors */
+ uint32 rxbadcm; /**< rx bad control or management frames */
+ uint32 rxfragerr; /**< rx fragmentation errors */
+ uint32 rxrunt; /**< rx runt frames */
+ uint32 rxgiant; /**< rx giant frames */
+ uint32 rxnoscb; /**< rx no scb error */
+ uint32 rxbadproto; /**< rx invalid frames */
+ uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */
+ uint32 rxbadda; /**< rx frames tossed for invalid da */
+ uint32 rxfilter; /**< rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /**< rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /**< tx/rx dma descriptor errors */
+ uint32 dmada; /**< tx/rx dma data errors */
+ uint32 dmape; /**< tx/rx dma descriptor protocol errors */
+ uint32 reset; /**< reset count */
+ uint32 tbtt; /**< cnts the TBTT int's */
+ uint32 txdmawar; /**< # occurrences of PR15420 workaround */
+ uint32 pkt_callback_reg_fail; /**< callbacks register failure */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /**< dot11TransmittedFragmentCount */
+ uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /**< dot11FailedCount */
+ uint32 txretry; /**< dot11RetryCount */
+ uint32 txretrie; /**< dot11MultipleRetryCount */
+ uint32 rxdup; /**< dot11FrameduplicateCount */
+ uint32 txrts; /**< dot11RTSSuccessCount */
+ uint32 txnocts; /**< dot11RTSFailureCount */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 rxfrag; /**< dot11ReceivedFragmentCount */
+ uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /**< dot11FCSErrorCount */
+ uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /**< TKIPReplays */
+ uint32 ccmpfmterr; /**< CCMPFormatErrors */
+ uint32 ccmpreplay; /**< CCMPReplays */
+ uint32 ccmpundec; /**< CCMPDecryptErrors */
+ uint32 fourwayfail; /**< FourWayHandshakeFailures */
+ uint32 wepundec; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess; /**< DecryptSuccessCount */
+ uint32 tkipicverr; /**< TKIPICVErrorCount */
+ uint32 wepexcluded; /**< dot11WEPExcludedCount */
+
+ uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /**< Count PSM watchdogs */
+ uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /**< PRQ entries read in */
+ uint32 prq_undirected_entries; /**< which were bcast bss & ssid */
+ uint32 prq_bad_entries; /**< which could not be translated to info */
+ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /**< packets rx at 1Mbps */
+ uint32 rx2mbps; /**< packets rx at 2Mbps */
+ uint32 rx5mbps5; /**< packets rx at 5.5Mbps */
+ uint32 rx6mbps; /**< packets rx at 6Mbps */
+ uint32 rx9mbps; /**< packets rx at 9Mbps */
+ uint32 rx11mbps; /**< packets rx at 11Mbps */
+ uint32 rx12mbps; /**< packets rx at 12Mbps */
+ uint32 rx18mbps; /**< packets rx at 18Mbps */
+ uint32 rx24mbps; /**< packets rx at 24Mbps */
+ uint32 rx36mbps; /**< packets rx at 36Mbps */
+ uint32 rx48mbps; /**< packets rx at 48Mbps */
+ uint32 rx54mbps; /**< packets rx at 54Mbps */
+ uint32 rx108mbps; /**< packets rx at 108mbps */
+ uint32 rx162mbps; /**< packets rx at 162mbps */
+ uint32 rx216mbps; /**< packets rx at 216 mbps */
+ uint32 rx270mbps; /**< packets rx at 270 mbps */
+ uint32 rx324mbps; /**< packets rx at 324 mbps */
+ uint32 rx378mbps; /**< packets rx at 378 mbps */
+ uint32 rx432mbps; /**< packets rx at 432 mbps */
+ uint32 rx486mbps; /**< packets rx at 486 mbps */
+ uint32 rx540mbps; /**< packets rx at 540 mbps */
+
+ uint32 rfdisable; /**< count of radio disables */
+
+ uint32 txexptime; /**< Tx frames suppressed due to timer expiration */
+
+ uint32 txmpdu_sgi; /**< count for sgi transmit */
+ uint32 rxmpdu_sgi; /**< count for sgi received */
+ uint32 txmpdu_stbc; /**< count for stbc transmit */
+ uint32 rxmpdu_stbc; /**< count for stbc received */
+
+ uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /**< TKIPReplays */
+ uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /**< CCMPReplays */
+ uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /**< DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */
+
+ uint32 dma_hang; /**< count for dma hang */
+ uint32 reinit; /**< count for reinit */
+
+ uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */
+ uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */
+ uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */
+ uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */
+ uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */
+
+ uint32 cso_passthrough; /**< hw cso required but passthrough */
+ uint32 cso_normal; /**< hw cso hdr for normal process */
+ uint32 chained; /**< number of frames chained */
+ uint32 chainedsz1; /**< number of chain size 1 frames */
+ uint32 unchained; /**< number of frames not chained */
+ uint32 maxchainsz; /**< max chain size so far */
+ uint32 currchainsz; /**< current chain size */
+ uint32 pciereset; /**< Secondary Bus Reset issued by driver */
+ uint32 cfgrestore; /**< configspace restore by driver */
+ uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */
+ uint32 rxrtry;
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+
+ /* detailed control/management frames */
+ uint32 txbar; /**< Number of TX BAR */
+ uint32 rxbar; /**< Number of RX BAR */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+ uint32 rxpspoll; /**< Number of RX PS-poll */
+ uint32 txnull; /**< Number of TX NULL_DATA */
+ uint32 rxnull; /**< Number of RX NULL_DATA */
+ uint32 txqosnull; /**< Number of TX NULL_QoSDATA */
+ uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */
+ uint32 txassocreq; /**< Number of TX ASSOC request */
+ uint32 rxassocreq; /**< Number of RX ASSOC request */
+ uint32 txreassocreq; /**< Number of TX REASSOC request */
+ uint32 rxreassocreq; /**< Number of RX REASSOC request */
+ uint32 txdisassoc; /**< Number of TX DISASSOC */
+ uint32 rxdisassoc; /**< Number of RX DISASSOC */
+ uint32 txassocrsp; /**< Number of TX ASSOC response */
+ uint32 rxassocrsp; /**< Number of RX ASSOC response */
+ uint32 txreassocrsp; /**< Number of TX REASSOC response */
+ uint32 rxreassocrsp; /**< Number of RX REASSOC response */
+ uint32 txauth; /**< Number of TX AUTH */
+ uint32 rxauth; /**< Number of RX AUTH */
+ uint32 txdeauth; /**< Number of TX DEAUTH */
+ uint32 rxdeauth; /**< Number of RX DEAUTH */
+ uint32 txprobereq; /**< Number of TX probe request */
+ uint32 rxprobereq; /**< Number of RX probe request */
+ uint32 txprobersp; /**< Number of TX probe response */
+ uint32 rxprobersp; /**< Number of RX probe response */
+ uint32 txaction; /**< Number of TX action frame */
+ uint32 rxaction; /**< Number of RX action frame */
+ uint32 ampdu_wds; /**< Number of AMPDU watchdogs */
+ uint32 txlost; /**< Number of lost packets reported in txs */
+ uint32 txdatamcast; /**< Number of TX multicast data packets */
+ uint32 txdatabcast; /**< Number of TX broadcast data packets */
+ uint32 psmxwds; /**< Number of PSMx watchdogs */
+ uint32 rxback;
+ uint32 txback;
+ uint32 p2p_tbtt; /**< Number of P2P TBTT Events */
+ uint32 p2p_tbtt_miss; /**< Number of P2P TBTT Events Miss */
+ uint32 txqueue_start;
+ uint32 txqueue_end;
+ uint32 txbcast; /* Broadcast TransmittedFrameCount */
+ uint32 txdropped; /* tx dropped pkts */
+ uint32 rxbcast; /* BroadcastReceivedFrameCount */
+ uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */
+ uint32 txq_end_assoccb; /* forced txqueue_end callback fired in assoc */
+ uint32 tx_toss_cnt; /* number of tx packets tossed */
+ uint32 rx_toss_cnt; /* number of rx packets tossed */
+ uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
+ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 pmk_badlen_cnt; /* number of invalid pmk len */
+ uint32 txbar_notx; /* number of TX BAR not sent (maybe supressed or muted) */
+ uint32 txbar_noack; /* number of TX BAR sent, but not acknowledged by peer */
+ uint32 rxfrag_agedout; /**< # of aged out rx fragmentation */
+ uint32 pmkid_mismatch_cnt; /* number of EAPOL msg1 PMKID mismatch */
+ uint32 txaction_vndr_attempt; /* Number of VS AFs scheduled successfully for Tx */
+ uint32 txaction_vndr_fail; /* Number of VS AFs not sent or not acked */
+ uint32 rxnofrag; /* # of nobuf failure due to no pkt availability */
+ uint32 rxnocmplid; /* # of nobuf failure due to rxcmplid non-availability */
+ uint32 rxnohaddr; /* # of nobuf failure due to host address non-availability */
+
+ /* Do not remove or rename in the middle of this struct.
+ * All counter variables have to be of uint32.
+ */
+} wl_cnt_wlc_t;
+
+/* he counters Version 1 */
+#define HE_COUNTERS_V1 (1)
+typedef struct wl_he_cnt_wlc_v1 {
+ uint32 he_rxtrig_myaid;
+ uint32 he_rxtrig_rand;
+ uint32 he_colormiss_cnt;
+ uint32 he_txmampdu;
+ uint32 he_txmtid_back;
+ uint32 he_rxmtid_back;
+ uint32 he_rxmsta_back;
+ uint32 he_txfrag;
+ uint32 he_rxdefrag;
+ uint32 he_txtrig;
+ uint32 he_rxtrig_basic;
+ uint32 he_rxtrig_murts;
+ uint32 he_rxtrig_bsrp;
+ uint32 he_rxdlmu;
+ uint32 he_physu_rx;
+ uint32 he_phyru_rx;
+ uint32 he_txtbppdu;
+} wl_he_cnt_wlc_v1_t;
+
+/* he counters Version 2 */
+#define HE_COUNTERS_V2 (2)
+typedef struct wl_he_cnt_wlc_v2 {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
+ uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
+ uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
+ uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
+ uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
+ uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
+ uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
+ uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
+ uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
+ uint32 he_txtrig; /**< transmission of trigger frames */
+ uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
+ uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
+ uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
+ uint32 he_rxdlmu; /**< reception of DL MU PPDU */
+ uint32 he_physu_rx; /**< reception of SU frame */
+ uint32 he_phyru_rx; /**< reception of RU frame */
+ uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
+ uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
+} wl_he_cnt_wlc_v2_t;
+
+/* he counters Version 3 */
+#define WL_RU_TYPE_MAX 6
+#define HE_COUNTERS_V3 (3)
+
+typedef struct wl_he_cnt_wlc_v3 {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
+ uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
+ uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
+ uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
+ uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
+ uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
+ uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
+ uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
+ uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
+ uint32 he_txtrig; /**< transmission of trigger frames */
+ uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
+ uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
+ uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
+ uint32 he_rxhemuppdu_cnt; /**< rxing HE MU PPDU */
+ uint32 he_physu_rx; /**< reception of SU frame */
+ uint32 he_phyru_rx; /**< reception of RU frame */
+ uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
+ uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
+ uint32 he_rxhesuppdu_cnt; /**< rxing SU PPDU */
+ uint32 he_rxhesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */
+ uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger
+ * because of zero aggregation
+ */
+ uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */
+ uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger
+ * because of no frames in fifo's
+ */
+ uint32 he_myAID_cnt;
+ uint32 he_rxtrig_bfm_cnt;
+ uint32 he_rxtrig_mubar;
+ uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */
+ uint32 txheru[WL_RU_TYPE_MAX];
+ uint32 he_mgmt_tbppdu;
+ uint32 he_cs_req_tx_cancel;
+ uint32 he_wrong_nss;
+ uint32 he_trig_unsupp_rate;
+ uint32 he_rxtrig_nfrp;
+ uint32 he_rxtrig_bqrp;
+ uint32 he_rxtrig_gcrmubar;
+} wl_he_cnt_wlc_v3_t;
+
+/* he counters Version 4 */
+#define HE_COUNTERS_V4 (4)
+typedef struct wl_he_cnt_wlc_v4 {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
+ uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
+ uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
+ uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
+ uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
+ uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
+ uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
+ uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
+ uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
+ uint32 he_txtrig; /**< transmission of trigger frames */
+ uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
+ uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
+ uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
+ uint32 he_rxtsrt_hemuppdu_cnt; /**< rxing HE MU PPDU */
+ uint32 he_physu_rx; /**< reception of SU frame */
+ uint32 he_phyru_rx; /**< reception of RU frame */
+ uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
+ uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
+ uint32 he_rxstrt_hesuppdu_cnt; /**< rxing SU PPDU */
+ uint32 he_rxstrt_hesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */
+ uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger
+ * because of zero aggregation
+ */
+ uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */
+ uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger
+ * because of no frames in fifo's
+ */
+ uint32 he_myAID_cnt;
+ uint32 he_rxtrig_bfm_cnt;
+ uint32 he_rxtrig_mubar;
+ uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */
+ uint32 txheru[WL_RU_TYPE_MAX];
+ uint32 he_mgmt_tbppdu;
+ uint32 he_cs_req_tx_cancel;
+ uint32 he_wrong_nss;
+ uint32 he_trig_unsupp_rate;
+ uint32 he_rxtrig_nfrp;
+ uint32 he_rxtrig_bqrp;
+ uint32 he_rxtrig_gcrmubar;
+ uint32 he_rxtrig_basic_htpack; /**< triggers received with HTP ack policy */
+ uint32 he_rxtrig_ed_cncl; /**< count of cancelled packets
+ * becasue of cs_req in trigger frame
+ */
+ uint32 he_rxtrig_suppr_null_tbppdu; /**< count of null frame sent becasue of
+ * suppression scenarios
+ */
+ uint32 he_ulmu_disable; /**< number of UL MU disable scenario's handled in ucode */
+ uint32 he_ulmu_data_disable; /**<number of UL MU data disable scenarios
+ * handled in ucode
+ */
+} wl_he_cnt_wlc_v4_t;
+
+/* he counters Version 5 */
+#define HE_COUNTERS_V5 (5)
+typedef struct wl_he_cnt_wlc_v5 {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid; /* rxed valid trigger frame with myaid */
+ uint32 he_rxtrig_rand; /* rxed valid trigger frame with random aid */
+ uint32 he_colormiss_cnt; /* for bss color mismatch cases */
+ uint32 he_txmampdu; /* for multi-TID AMPDU transmission */
+ uint32 he_txmtid_back; /* for multi-TID BACK transmission */
+ uint32 he_rxmtid_back; /* reception of multi-TID BACK */
+ uint32 he_rxmsta_back; /* reception of multi-STA BACK */
+ uint32 he_txfrag; /* transmission of Dynamic fragmented packets */
+ uint32 he_rxdefrag; /* reception of dynamic fragmented packets */
+ uint32 he_txtrig; /* transmission of trigger frames */
+ uint32 he_rxtrig_basic; /* reception of basic trigger frame */
+ uint32 he_rxtrig_murts; /* reception of MU-RTS trigger frame */
+ uint32 he_rxtrig_bsrp; /* reception of BSR poll trigger frame */
+ uint32 he_rxtsrt_hemuppdu_cnt; /* rxing HE MU PPDU */
+ uint32 he_physu_rx; /* reception of SU frame */
+ uint32 he_phyru_rx; /* reception of RU frame */
+ uint32 he_txtbppdu; /* increments on transmission of every TB PPDU */
+ uint32 he_null_tbppdu; /* null TBPPDU's sent as a response to
+ * basic trigger frame
+ */
+ uint32 he_rxstrt_hesuppdu_cnt; /* rxing SU PPDU */
+ uint32 he_rxstrt_hesureppdu_cnt; /* rxing Range Extension(RE) SU PPDU */
+ uint32 he_null_zero_agg; /* nullAMPDU's transmitted in response to
+ * basic trigger because of zero aggregation
+ */
+ uint32 he_null_bsrp_rsp; /* null AMPDU's txed in response to BSR poll */
+ uint32 he_null_fifo_empty; /* null AMPDU's in response to basic trigger
+ * because of no frames in fifo's
+ */
+ uint32 he_rxtrig_bfm_cnt;
+ uint32 he_rxtrig_mubar;
+ uint32 rxheru[WL_RU_TYPE_MAX]; /* HE of rx pkts */
+ uint32 txheru[WL_RU_TYPE_MAX];
+ uint32 he_mgmt_tbppdu;
+ uint32 he_cs_req_tx_cancel;
+ uint32 he_wrong_nss;
+ uint32 he_trig_unsupp_rate;
+ uint32 he_rxtrig_nfrp;
+ uint32 he_rxtrig_bqrp;
+ uint32 he_rxtrig_gcrmubar;
+ uint32 he_rxtrig_basic_htpack; /* triggers received with HTP ack policy */
+ uint32 he_rxtrig_suppr_null_tbppdu; /* count of null frame sent becasue of
+ * suppression scenarios
+ */
+ uint32 he_ulmu_disable; /* number of ULMU dis scenario's handled in ucode */
+ uint32 he_ulmu_data_disable; /* number of UL MU data disable scenarios
+ * handled in ucode
+ */
+ uint32 rxheru_2x996T;
+} wl_he_cnt_wlc_v5_t;
+
+#ifndef HE_COUNTERS_VERSION_ENABLED
+#define HE_COUNTERS_VERSION (HE_COUNTERS_V1)
+typedef wl_he_cnt_wlc_v1_t wl_he_cnt_wlc_t;
+#endif /* HE_COUNTERS_VERSION_ENABLED */
+
+/* he omi counters Version 1 */
+#define HE_OMI_COUNTERS_V1 (1)
+typedef struct wl_he_omi_cnt_wlc_v1 {
+ uint16 version;
+ uint16 len;
+ uint32 he_omitx_sched; /* Count for total number of OMIs scheduled */
+ uint32 he_omitx_success; /* Count for OMI Tx success */
+ uint32 he_omitx_retries; /* Count for OMI retries as TxDone not set */
+ uint32 he_omitx_dur; /* Accumulated duration of OMI completion time */
+ uint32 he_omitx_ulmucfg; /* count for UL MU enable/disable change req */
+ uint32 he_omitx_ulmucfg_ack; /* count for UL MU enable/disable req txed successfully */
+ uint32 he_omitx_txnsts; /* count for Txnsts change req */
+ uint32 he_omitx_txnsts_ack; /* count for Txnsts change req txed successfully */
+ uint32 he_omitx_rxnss; /* count for Rxnss change req */
+ uint32 he_omitx_rxnss_ack; /* count for Rxnss change req txed successfully */
+ uint32 he_omitx_bw; /* count for BW change req */
+ uint32 he_omitx_bw_ack; /* count for BW change req txed successfully */
+ uint32 he_omitx_ersudis; /* count for ER SU enable/disable req */
+ uint32 he_omitx_ersudis_ack; /* count for ER SU enable/disable req txed successfully */
+ uint32 he_omitx_dlmursdrec; /* count for Resound recommendation change req */
+ uint32 he_omitx_dlmursdrec_ack; /* count for Resound recommendation req txed successfully */
+} wl_he_omi_cnt_wlc_v1_t;
+
+/* WL_IFSTATS_XTLV_WL_SLICE_TXBF */
+/* beamforming counters version 1 */
+#define TXBF_ECOUNTERS_V1 (1u)
+#define WL_TXBF_CNT_ARRAY_SZ (8u)
+typedef struct wl_txbf_ecounters_v1 {
+ uint16 version;
+ uint16 len;
+ /* transmit beamforming stats */
+ uint16 txndpa; /* null data packet announcements */
+ uint16 txndp; /* null data packets */
+ uint16 txbfpoll; /* beamforming report polls */
+ uint16 txsf; /* subframes */
+ uint16 txcwrts; /* contention window rts */
+ uint16 txcwcts; /* contention window cts */
+ uint16 txbfm;
+ /* receive beamforming stats */
+ uint16 rxndpa_u; /* unicast NDPAs */
+ uint16 rxndpa_m; /* multicast NDPAs */
+ uint16 rxbfpoll; /* unicast bf-polls */
+ uint16 bferpt; /* beamforming reports */
+ uint16 rxsf;
+ uint16 rxcwrts;
+ uint16 rxcwcts;
+ uint16 rxtrig_bfpoll;
+ uint16 unused_uint16; /* pad */
+ /* sounding stats - interval capture */
+ uint16 rxnontb_sound[WL_TXBF_CNT_ARRAY_SZ]; /* non-TB sounding for last 8 captures */
+ uint16 rxtb_sound[WL_TXBF_CNT_ARRAY_SZ]; /* TB sounding count for last 8 captures */
+ uint32 cap_dur_ms[WL_TXBF_CNT_ARRAY_SZ]; /* last 8 capture durations (in ms) */
+ uint32 cap_last_ts; /* timestamp of last sample capture */
+} wl_txbf_ecounters_v1_t;
+
+/* security vulnerabilities counters */
+typedef struct {
+ uint32 ie_unknown; /* number of unknown IEs */
+ uint32 ie_invalid_length; /* number of IEs with invalid length */
+ uint32 ie_invalid_data; /* number of IEs with invalid data */
+ uint32 ipv6_invalid_length; /* number of IPv6 packets with invalid payload length */
+} wl_secvln_cnt_t;
+
+/* Reinit reasons - do not put anything else other than reinit reasons here */
+/* LEGACY STRUCTURE, DO NO MODIFY, SEE reinit_rsns_v1_t and further versions */
+typedef struct {
+ uint32 rsn[WL_REINIT_RC_LAST];
+} reinit_rsns_t;
+
+typedef struct {
+ uint16 version;
+ uint16 len;
+ uint32 rsn[WL_REINIT_RC_LAST_V2 + 1u]; /* Note:WL_REINIT_RC_LAST_V2 is last value */
+} reinit_rsns_v2_t;
+
+/* MACXSTAT counters for ucodex (corerev >= 64) */
+typedef struct {
+ uint32 macxsusp;
+ uint32 m2vmsg;
+ uint32 v2mmsg;
+ uint32 mboxout;
+ uint32 musnd;
+ uint32 sfb2v;
+} wl_cnt_ge64mcxst_v1_t;
+
+/** MACSTAT counters for ucode (corerev >= 40) */
+typedef struct {
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txampdu; /**< number of AMPDUs transmitted */
+ uint32 txmpdu; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+ uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 missbcn_dbg; /**< number of beacon missed to receive */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ /* All counter variables have to be of uint32. */
+} wl_cnt_ge40mcst_v1_t;
+
+/** MACSTAT counters for ucode (corerev < 40) */
+typedef struct {
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txampdu; /**< number of AMPDUs transmitted */
+ uint32 txmpdu; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+ uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 dbgoff46; /**< BTCX protection failure count,
+ * getting RX antenna in PHY DEBUG,
+ * PR84273 timeout count
+ */
+ uint32 dbgoff47; /**< BTCX preemption failure count,
+ * getting RX antenna in PHY DEBUG,
+ * PR84273 reset CCA count,
+ * RATEENGDBG
+ */
+ uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 phywatch; /**< number of phywatchdog to kill any pending transmissions.
+ * (PR 38187 corerev == 11)
+ */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ /* All counter variables have to be of uint32. */
+} wl_cnt_lt40mcst_v1_t;
+
+/** MACSTAT counters for ucode (corerev >= 80) */
+typedef struct {
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ /* Start of PSM2HOST stats(72) block */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txampdu; /**< number of AMPDUs transmitted */
+ uint32 txmpdu; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+ uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 missbcn_dbg; /**< number of beacon missed to receive */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 rxtrig_myaid; /* New counters added in corerev 80 */
+ uint32 rxtrig_rand;
+ uint32 goodfcs;
+ uint32 colormiss;
+ uint32 txmampdu;
+ uint32 rxmtidback;
+ uint32 rxmstaback;
+ uint32 txfrag;
+ /* End of PSM2HOST stats block */
+ /* start of rxerror overflow counter(24) block which are modified/added in corerev 80 */
+ uint32 phyovfl;
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */
+ uint32 lenfovfl;
+ uint32 weppeof;
+ uint32 badplcp;
+ uint32 msduthresh;
+ uint32 strmeof;
+ uint32 stsfifofull;
+ uint32 stsfifoerr;
+ uint32 PAD[6];
+ uint32 rxerr_stat;
+ uint32 ctx_fifo_full;
+ uint32 PAD0[9];
+ uint32 ctmode_ufc_cnt;
+ uint32 PAD1[28]; /* PAD added for counter elements to be added soon */
+} wl_cnt_ge80mcst_v1_t;
+
+typedef struct {
+ uint32 fifocount;
+ uint32 txfunfl[];
+} wl_cnt_ge80_txfunfl_v1_t;
+
+/** MACSTAT counters for "wl counter" version <= 10 */
+/* With ucode before its macstat cnts cleaned up */
+typedef struct {
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
+ uint32 PAD0; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 PAD1;
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /**< obsolete */
+ uint32 frmscons; /**< obsolete */
+ uint32 txnack; /**< obsolete */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ /* All counter variables have to be of uint32. */
+} wl_cnt_v_le10_mcst_t;
+
+#define MAX_RX_FIFO 3
+#define WL_RXFIFO_CNT_VERSION 1 /* current version of wl_rxfifo_cnt_t */
+typedef struct {
+ /* Counters for frames received from rx fifos */
+ uint16 version;
+ uint16 length; /* length of entire structure */
+ uint32 rxf_data[MAX_RX_FIFO]; /* data frames from rx fifo */
+ uint32 rxf_mgmtctl[MAX_RX_FIFO]; /* mgmt/ctl frames from rx fifo */
+} wl_rxfifo_cnt_t;
+
+typedef struct {
+ uint16 version; /**< see definition of WL_CNT_T_VERSION */
+ uint16 length; /**< length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /**< tx data frames */
+ uint32 txbyte; /**< tx data bytes */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txerror; /**< tx data errors (derived: sum of others) */
+ uint32 txctl; /**< tx management frames */
+ uint32 txprshort; /**< tx short preamble frames */
+ uint32 txserr; /**< tx status errors */
+ uint32 txnobuf; /**< tx out of buffers errors */
+ uint32 txnoassoc; /**< tx discard because we're not associated */
+ uint32 txrunt; /**< tx runt frames */
+ uint32 txchit; /**< tx header cache hit (fastpath) */
+ uint32 txcmiss; /**< tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /**< tx fifo underflows */
+ uint32 txphyerr; /**< tx phy errors (indicated in tx status) */
+ uint32 txphycrs; /**< PR8861/8963 counter */
+
+ /* receive stat counters */
+ uint32 rxframe; /**< rx data frames */
+ uint32 rxbyte; /**< rx data bytes */
+ uint32 rxerror; /**< rx data errors (derived: sum of others) */
+ uint32 rxctl; /**< rx management frames */
+ uint32 rxnobuf; /**< rx out of buffers errors */
+ uint32 rxnondata; /**< rx non data frames in the data channel errors */
+ uint32 rxbadds; /**< rx bad DS errors */
+ uint32 rxbadcm; /**< rx bad control or management frames */
+ uint32 rxfragerr; /**< rx fragmentation errors */
+ uint32 rxrunt; /**< rx runt frames */
+ uint32 rxgiant; /**< rx giant frames */
+ uint32 rxnoscb; /**< rx no scb error */
+ uint32 rxbadproto; /**< rx invalid frames */
+ uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */
+ uint32 rxbadda; /**< rx frames tossed for invalid da */
+ uint32 rxfilter; /**< rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /**< rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /**< tx/rx dma descriptor errors */
+ uint32 dmada; /**< tx/rx dma data errors */
+ uint32 dmape; /**< tx/rx dma descriptor protocol errors */
+ uint32 reset; /**< reset count */
+ uint32 tbtt; /**< cnts the TBTT int's */
+ uint32 txdmawar; /**< # occurrences of PR15420 workaround */
+ uint32 pkt_callback_reg_fail; /**< callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< Not used */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /**< Number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /**< obsolete */
+ uint32 frmscons; /**< obsolete */
+ uint32 txnack; /**< obsolete */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /**< dot11TransmittedFragmentCount */
+ uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /**< dot11FailedCount */
+ uint32 txretry; /**< dot11RetryCount */
+ uint32 txretrie; /**< dot11MultipleRetryCount */
+ uint32 rxdup; /**< dot11FrameduplicateCount */
+ uint32 txrts; /**< dot11RTSSuccessCount */
+ uint32 txnocts; /**< dot11RTSFailureCount */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 rxfrag; /**< dot11ReceivedFragmentCount */
+ uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /**< dot11FCSErrorCount */
+ uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /**< TKIPReplays */
+ uint32 ccmpfmterr; /**< CCMPFormatErrors */
+ uint32 ccmpreplay; /**< CCMPReplays */
+ uint32 ccmpundec; /**< CCMPDecryptErrors */
+ uint32 fourwayfail; /**< FourWayHandshakeFailures */
+ uint32 wepundec; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess; /**< DecryptSuccessCount */
+ uint32 tkipicverr; /**< TKIPICVErrorCount */
+ uint32 wepexcluded; /**< dot11WEPExcludedCount */
+
+ uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /**< Count PSM watchdogs */
+ uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /**< PRQ entries read in */
+ uint32 prq_undirected_entries; /**< which were bcast bss & ssid */
+ uint32 prq_bad_entries; /**< which could not be translated to info */
+ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /**< packets rx at 1Mbps */
+ uint32 rx2mbps; /**< packets rx at 2Mbps */
+ uint32 rx5mbps5; /**< packets rx at 5.5Mbps */
+ uint32 rx6mbps; /**< packets rx at 6Mbps */
+ uint32 rx9mbps; /**< packets rx at 9Mbps */
+ uint32 rx11mbps; /**< packets rx at 11Mbps */
+ uint32 rx12mbps; /**< packets rx at 12Mbps */
+ uint32 rx18mbps; /**< packets rx at 18Mbps */
+ uint32 rx24mbps; /**< packets rx at 24Mbps */
+ uint32 rx36mbps; /**< packets rx at 36Mbps */
+ uint32 rx48mbps; /**< packets rx at 48Mbps */
+ uint32 rx54mbps; /**< packets rx at 54Mbps */
+ uint32 rx108mbps; /**< packets rx at 108mbps */
+ uint32 rx162mbps; /**< packets rx at 162mbps */
+ uint32 rx216mbps; /**< packets rx at 216 mbps */
+ uint32 rx270mbps; /**< packets rx at 270 mbps */
+ uint32 rx324mbps; /**< packets rx at 324 mbps */
+ uint32 rx378mbps; /**< packets rx at 378 mbps */
+ uint32 rx432mbps; /**< packets rx at 432 mbps */
+ uint32 rx486mbps; /**< packets rx at 486 mbps */
+ uint32 rx540mbps; /**< packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /**< count of radio disables */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 bphy_badplcp;
+
+ uint32 txexptime; /**< Tx frames suppressed due to timer expiration */
+
+ uint32 txmpdu_sgi; /**< count for sgi transmit */
+ uint32 rxmpdu_sgi; /**< count for sgi received */
+ uint32 txmpdu_stbc; /**< count for stbc transmit */
+ uint32 rxmpdu_stbc; /**< count for stbc received */
+
+ uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /**< TKIPReplays */
+ uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /**< CCMPReplays */
+ uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /**< DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */
+
+ uint32 dma_hang; /**< count for dma hang */
+ uint32 reinit; /**< count for reinit */
+
+ uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */
+ uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */
+ uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */
+ uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */
+ uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */
+
+ uint32 cso_passthrough; /**< hw cso required but passthrough */
+ uint32 cso_normal; /**< hw cso hdr for normal process */
+ uint32 chained; /**< number of frames chained */
+ uint32 chainedsz1; /**< number of chain size 1 frames */
+ uint32 unchained; /**< number of frames not chained */
+ uint32 maxchainsz; /**< max chain size so far */
+ uint32 currchainsz; /**< current chain size */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 pciereset; /**< Secondary Bus Reset issued by driver */
+ uint32 cfgrestore; /**< configspace restore by driver */
+ uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */
+ uint32 rxrtry; /**< num of received packets with retry bit on */
+ uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */
+ uint32 rxnodelim; /**< macstat cnt only valid in ver 11.
+ * number of occasions that no valid delimiter is detected
+ * by ampdu parser.
+ */
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+
+ /* detailed control/management frames */
+ uint32 txbar; /**< Number of TX BAR */
+ uint32 rxbar; /**< Number of RX BAR */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+ uint32 rxpspoll; /**< Number of RX PS-poll */
+ uint32 txnull; /**< Number of TX NULL_DATA */
+ uint32 rxnull; /**< Number of RX NULL_DATA */
+ uint32 txqosnull; /**< Number of TX NULL_QoSDATA */
+ uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */
+ uint32 txassocreq; /**< Number of TX ASSOC request */
+ uint32 rxassocreq; /**< Number of RX ASSOC request */
+ uint32 txreassocreq; /**< Number of TX REASSOC request */
+ uint32 rxreassocreq; /**< Number of RX REASSOC request */
+ uint32 txdisassoc; /**< Number of TX DISASSOC */
+ uint32 rxdisassoc; /**< Number of RX DISASSOC */
+ uint32 txassocrsp; /**< Number of TX ASSOC response */
+ uint32 rxassocrsp; /**< Number of RX ASSOC response */
+ uint32 txreassocrsp; /**< Number of TX REASSOC response */
+ uint32 rxreassocrsp; /**< Number of RX REASSOC response */
+ uint32 txauth; /**< Number of TX AUTH */
+ uint32 rxauth; /**< Number of RX AUTH */
+ uint32 txdeauth; /**< Number of TX DEAUTH */
+ uint32 rxdeauth; /**< Number of RX DEAUTH */
+ uint32 txprobereq; /**< Number of TX probe request */
+ uint32 rxprobereq; /**< Number of RX probe request */
+ uint32 txprobersp; /**< Number of TX probe response */
+ uint32 rxprobersp; /**< Number of RX probe response */
+ uint32 txaction; /**< Number of TX action frame */
+ uint32 rxaction; /**< Number of RX action frame */
+ uint32 ampdu_wds; /**< Number of AMPDU watchdogs */
+ uint32 txlost; /**< Number of lost packets reported in txs */
+ uint32 txdatamcast; /**< Number of TX multicast data packets */
+ uint32 txdatabcast; /**< Number of TX broadcast data packets */
+ uint32 txbcast; /* Broadcast TransmittedFrameCount */
+ uint32 txdropped; /* tx dropped pkts */
+ uint32 rxbcast; /* BroadcastReceivedFrameCount */
+ uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */
+
+ /* This structure is deprecated and used only for ver <= 11.
+ * All counter variables have to be of uint32.
+ */
+} wl_cnt_ver_11_t;
+
+typedef struct {
+ uint16 version; /* see definition of WL_CNT_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txerror; /* tx data errors (derived: sum of others) */
+ uint32 txctl; /* tx management frames */
+ uint32 txprshort; /* tx short preamble frames */
+ uint32 txserr; /* tx status errors */
+ uint32 txnobuf; /* tx out of buffers errors */
+ uint32 txnoassoc; /* tx discard because we're not associated */
+ uint32 txrunt; /* tx runt frames */
+ uint32 txchit; /* tx header cache hit (fastpath) */
+ uint32 txcmiss; /* tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /* tx fifo underflows */
+ uint32 txphyerr; /* tx phy errors (indicated in tx status) */
+ uint32 txphycrs; /* PR8861/8963 counter */
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+ uint32 rxerror; /* rx data errors (derived: sum of others) */
+ uint32 rxctl; /* rx management frames */
+ uint32 rxnobuf; /* rx out of buffers errors */
+ uint32 rxnondata; /* rx non data frames in the data channel errors */
+ uint32 rxbadds; /* rx bad DS errors */
+ uint32 rxbadcm; /* rx bad control or management frames */
+ uint32 rxfragerr; /* rx fragmentation errors */
+ uint32 rxrunt; /* rx runt frames */
+ uint32 rxgiant; /* rx giant frames */
+ uint32 rxnoscb; /* rx no scb error */
+ uint32 rxbadproto; /* rx invalid frames */
+ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
+ uint32 rxbadda; /* rx frames tossed for invalid da */
+ uint32 rxfilter; /* rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /* rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /* tx/rx dma descriptor errors */
+ uint32 dmada; /* tx/rx dma data errors */
+ uint32 dmape; /* tx/rx dma descriptor protocol errors */
+ uint32 reset; /* reset count */
+ uint32 tbtt; /* cnts the TBTT int's */
+ uint32 txdmawar; /* # occurrences of PR15420 workaround */
+ uint32 pkt_callback_reg_fail; /* callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /* number of RTS sent out by the MAC */
+ uint32 txctsfrm; /* number of CTS sent out by the MAC */
+ uint32 txackfrm; /* number of ACK frames sent out */
+ uint32 txdnlfrm; /* Not used */
+ uint32 txbcnfrm; /* beacons transmitted */
+ uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /* parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /* Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /* beacons received from other BSS */
+ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /* Number of PMQ overflows */
+ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /* obsolete */
+ uint32 frmscons; /* obsolete */
+ uint32 txnack; /* obsolete */
+ uint32 txglitch_nack; /* obsolete */
+ uint32 txburst; /* obsolete */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /* TKIPReplays */
+ uint32 ccmpfmterr; /* CCMPFormatErrors */
+ uint32 ccmpreplay; /* CCMPReplays */
+ uint32 ccmpundec; /* CCMPDecryptErrors */
+ uint32 fourwayfail; /* FourWayHandshakeFailures */
+ uint32 wepundec; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr; /* dot11WEPICVErrorCount */
+ uint32 decsuccess; /* DecryptSuccessCount */
+ uint32 tkipicverr; /* TKIPICVErrorCount */
+ uint32 wepexcluded; /* dot11WEPExcludedCount */
+
+ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /* Count PSM watchdogs */
+ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /* PRQ entries read in */
+ uint32 prq_undirected_entries; /* which were bcast bss & ssid */
+ uint32 prq_bad_entries; /* which could not be translated to info */
+ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /* count of radio disables */
+ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+
+ uint32 txexptime; /* Tx frames suppressed due to timer expiration */
+
+ uint32 txmpdu_sgi; /* count for sgi transmit */
+ uint32 rxmpdu_sgi; /* count for sgi received */
+ uint32 txmpdu_stbc; /* count for stbc transmit */
+ uint32 rxmpdu_stbc; /* count for stbc received */
+
+ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /* TKIPReplays */
+ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /* CCMPReplays */
+ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /* DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
+
+ uint32 dma_hang; /* count for stbc received */
+ uint32 rxrtry; /* number of packets with retry bit set to 1 */
+} wl_cnt_ver_7_t;
+
+typedef struct {
+ uint16 version; /**< see definition of WL_CNT_T_VERSION */
+ uint16 length; /**< length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /**< tx data frames */
+ uint32 txbyte; /**< tx data bytes */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txerror; /**< tx data errors (derived: sum of others) */
+ uint32 txctl; /**< tx management frames */
+ uint32 txprshort; /**< tx short preamble frames */
+ uint32 txserr; /**< tx status errors */
+ uint32 txnobuf; /**< tx out of buffers errors */
+ uint32 txnoassoc; /**< tx discard because we're not associated */
+ uint32 txrunt; /**< tx runt frames */
+ uint32 txchit; /**< tx header cache hit (fastpath) */
+ uint32 txcmiss; /**< tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /**< tx fifo underflows */
+ uint32 txphyerr; /**< tx phy errors (indicated in tx status) */
+ uint32 txphycrs; /**< PR8861/8963 counter */
+
+ /* receive stat counters */
+ uint32 rxframe; /**< rx data frames */
+ uint32 rxbyte; /**< rx data bytes */
+ uint32 rxerror; /**< rx data errors (derived: sum of others) */
+ uint32 rxctl; /**< rx management frames */
+ uint32 rxnobuf; /**< rx out of buffers errors */
+ uint32 rxnondata; /**< rx non data frames in the data channel errors */
+ uint32 rxbadds; /**< rx bad DS errors */
+ uint32 rxbadcm; /**< rx bad control or management frames */
+ uint32 rxfragerr; /**< rx fragmentation errors */
+ uint32 rxrunt; /**< rx runt frames */
+ uint32 rxgiant; /**< rx giant frames */
+ uint32 rxnoscb; /**< rx no scb error */
+ uint32 rxbadproto; /**< rx invalid frames */
+ uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */
+ uint32 rxbadda; /**< rx frames tossed for invalid da */
+ uint32 rxfilter; /**< rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /**< rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /**< tx/rx dma descriptor errors */
+ uint32 dmada; /**< tx/rx dma data errors */
+ uint32 dmape; /**< tx/rx dma descriptor protocol errors */
+ uint32 reset; /**< reset count */
+ uint32 tbtt; /**< cnts the TBTT int's */
+ uint32 txdmawar; /**< # occurrences of PR15420 workaround */
+ uint32 pkt_callback_reg_fail; /**< callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< Not used */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /**< # of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /**< # of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /**< # of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /**< # of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /**< # of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /**< # of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /**< Number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /**< Number of NACKS received (Afterburner) */
+ uint32 frmscons; /**< Number of frames completed without transmission because of an
+ * Afterburner re-queue
+ */
+ uint32 txnack; /**< obsolete */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /**< dot11TransmittedFragmentCount */
+ uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /**< dot11FailedCount */
+ uint32 txretry; /**< dot11RetryCount */
+ uint32 txretrie; /**< dot11MultipleRetryCount */
+ uint32 rxdup; /**< dot11FrameduplicateCount */
+ uint32 txrts; /**< dot11RTSSuccessCount */
+ uint32 txnocts; /**< dot11RTSFailureCount */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 rxfrag; /**< dot11ReceivedFragmentCount */
+ uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /**< dot11FCSErrorCount */
+ uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /**< TKIPReplays */
+ uint32 ccmpfmterr; /**< CCMPFormatErrors */
+ uint32 ccmpreplay; /**< CCMPReplays */
+ uint32 ccmpundec; /**< CCMPDecryptErrors */
+ uint32 fourwayfail; /**< FourWayHandshakeFailures */
+ uint32 wepundec; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess; /**< DecryptSuccessCount */
+ uint32 tkipicverr; /**< TKIPICVErrorCount */
+ uint32 wepexcluded; /**< dot11WEPExcludedCount */
+
+ uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /**< TKIPReplays */
+ uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /**< CCMPReplays */
+ uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /**< DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */
+
+ uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */
+ uint32 txexptime; /**< Tx frames suppressed due to timer expiration */
+ uint32 psmwds; /**< Count PSM watchdogs */
+ uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /**< PRQ entries read in */
+ uint32 prq_undirected_entries; /**< which were bcast bss & ssid */
+ uint32 prq_bad_entries; /**< which could not be translated to info */
+ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /**< packets rx at 1Mbps */
+ uint32 rx2mbps; /**< packets rx at 2Mbps */
+ uint32 rx5mbps5; /**< packets rx at 5.5Mbps */
+ uint32 rx6mbps; /**< packets rx at 6Mbps */
+ uint32 rx9mbps; /**< packets rx at 9Mbps */
+ uint32 rx11mbps; /**< packets rx at 11Mbps */
+ uint32 rx12mbps; /**< packets rx at 12Mbps */
+ uint32 rx18mbps; /**< packets rx at 18Mbps */
+ uint32 rx24mbps; /**< packets rx at 24Mbps */
+ uint32 rx36mbps; /**< packets rx at 36Mbps */
+ uint32 rx48mbps; /**< packets rx at 48Mbps */
+ uint32 rx54mbps; /**< packets rx at 54Mbps */
+ uint32 rx108mbps; /**< packets rx at 108mbps */
+ uint32 rx162mbps; /**< packets rx at 162mbps */
+ uint32 rx216mbps; /**< packets rx at 216 mbps */
+ uint32 rx270mbps; /**< packets rx at 270 mbps */
+ uint32 rx324mbps; /**< packets rx at 324 mbps */
+ uint32 rx378mbps; /**< packets rx at 378 mbps */
+ uint32 rx432mbps; /**< packets rx at 432 mbps */
+ uint32 rx486mbps; /**< packets rx at 486 mbps */
+ uint32 rx540mbps; /**< packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /**< count of radio disables */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 bphy_badplcp;
+
+ uint32 txmpdu_sgi; /**< count for sgi transmit */
+ uint32 rxmpdu_sgi; /**< count for sgi received */
+ uint32 txmpdu_stbc; /**< count for stbc transmit */
+ uint32 rxmpdu_stbc; /**< count for stbc received */
+
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ /* All counter variables have to be of uint32. */
+} wl_cnt_ver_6_t;
+
+#define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */
+
+typedef struct {
+ uint16 version; /**< see definition of WL_DELTA_STATS_T_VERSION */
+ uint16 length; /**< length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /**< tx data frames */
+ uint32 txbyte; /**< tx data bytes */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txfail; /**< tx failures */
+
+ /* receive stat counters */
+ uint32 rxframe; /**< rx data frames */
+ uint32 rxbyte; /**< rx data bytes */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /**< packets rx at 1Mbps */
+ uint32 rx2mbps; /**< packets rx at 2Mbps */
+ uint32 rx5mbps5; /**< packets rx at 5.5Mbps */
+ uint32 rx6mbps; /**< packets rx at 6Mbps */
+ uint32 rx9mbps; /**< packets rx at 9Mbps */
+ uint32 rx11mbps; /**< packets rx at 11Mbps */
+ uint32 rx12mbps; /**< packets rx at 12Mbps */
+ uint32 rx18mbps; /**< packets rx at 18Mbps */
+ uint32 rx24mbps; /**< packets rx at 24Mbps */
+ uint32 rx36mbps; /**< packets rx at 36Mbps */
+ uint32 rx48mbps; /**< packets rx at 48Mbps */
+ uint32 rx54mbps; /**< packets rx at 54Mbps */
+ uint32 rx108mbps; /**< packets rx at 108mbps */
+ uint32 rx162mbps; /**< packets rx at 162mbps */
+ uint32 rx216mbps; /**< packets rx at 216 mbps */
+ uint32 rx270mbps; /**< packets rx at 270 mbps */
+ uint32 rx324mbps; /**< packets rx at 324 mbps */
+ uint32 rx378mbps; /**< packets rx at 378 mbps */
+ uint32 rx432mbps; /**< packets rx at 432 mbps */
+ uint32 rx486mbps; /**< packets rx at 486 mbps */
+ uint32 rx540mbps; /**< packets rx at 540 mbps */
+
+ /* phy stats */
+ uint32 rxbadplcp;
+ uint32 rxcrsglitch;
+ uint32 bphy_rxcrsglitch;
+ uint32 bphy_badplcp;
+
+ uint32 slice_index; /**< Slice for which stats are reported */
+
+} wl_delta_stats_t;
+
+/* Partial statistics counter report */
+#define WL_CNT_CTL_MGT_FRAMES 0
+
+typedef struct {
+ uint16 type;
+ uint16 len;
+
+ /* detailed control/management frames */
+ uint32 txnull;
+ uint32 rxnull;
+ uint32 txqosnull;
+ uint32 rxqosnull;
+ uint32 txassocreq;
+ uint32 rxassocreq;
+ uint32 txreassocreq;
+ uint32 rxreassocreq;
+ uint32 txdisassoc;
+ uint32 rxdisassoc;
+ uint32 txassocrsp;
+ uint32 rxassocrsp;
+ uint32 txreassocrsp;
+ uint32 rxreassocrsp;
+ uint32 txauth;
+ uint32 rxauth;
+ uint32 txdeauth;
+ uint32 rxdeauth;
+ uint32 txprobereq;
+ uint32 rxprobereq;
+ uint32 txprobersp;
+ uint32 rxprobersp;
+ uint32 txaction;
+ uint32 rxaction;
+ uint32 txrts;
+ uint32 rxrts;
+ uint32 txcts;
+ uint32 rxcts;
+ uint32 txack;
+ uint32 rxack;
+ uint32 txbar;
+ uint32 rxbar;
+ uint32 txback;
+ uint32 rxback;
+ uint32 txpspoll;
+ uint32 rxpspoll;
+} wl_ctl_mgt_cnt_t;
+
+typedef struct {
+ uint32 packets;
+ uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+ uint16 version; /**< see definition of WL_WME_CNT_VERSION */
+ uint16 length; /**< length of entire structure */
+
+ wl_traffic_stats_t tx[AC_COUNT]; /**< Packets transmitted */
+ wl_traffic_stats_t tx_failed[AC_COUNT]; /**< Packets dropped or failed to transmit */
+ wl_traffic_stats_t rx[AC_COUNT]; /**< Packets received */
+ wl_traffic_stats_t rx_failed[AC_COUNT]; /**< Packets failed to receive */
+
+ wl_traffic_stats_t forward[AC_COUNT]; /**< Packets forwarded by AP */
+
+ wl_traffic_stats_t tx_expired[AC_COUNT]; /**< packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+ uint32 low;
+ uint32 high;
+};
+
+/* A versioned structure for setting and retrieving debug message levels. */
+#define WL_MSGLEVEL_STRUCT_VERSION_1 1
+
+typedef struct wl_msglevel_v1 {
+ uint16 version;
+ uint16 length;
+ uint32 msglevel1;
+ uint32 msglevel2;
+ uint32 msglevel3;
+ /* add another uint32 when full */
+} wl_msglevel_v1_t;
+
+#define WL_ICMP_IPV6_CFG_VERSION 1
+#define WL_ICMP_IPV6_CLEAR_ALL (1 << 0)
+
+typedef struct wl_icmp_ipv6_cfg {
+ uint16 version;
+ uint16 length;
+ uint16 fixed_length;
+ uint16 flags;
+ uint32 num_ipv6;
+ /* num_ipv6 to follow */
+ struct ipv6_addr host_ipv6[];
+} wl_icmp_ipv6_cfg_t;
+
+#define WL_ICMP_CFG_IPV6_FIXED_LEN OFFSETOF(wl_icmp_ipv6_cfg_t, host_ipv6)
+#define WL_ICMP_CFG_IPV6_LEN(count) (WL_ICMP_CFG_IPV6_FIXED_LEN + \
+ ((count) * sizeof(struct ipv6_addr)))
+
+typedef struct wl_mkeep_alive_pkt {
+ uint16 version; /* Version for mkeep_alive */
+ uint16 length; /* length of fixed parameters in the structure */
+ uint32 period_msec; /* high bit on means immediate send */
+ uint16 len_bytes;
+ uint8 keep_alive_id; /* 0 - 3 for N = 4 */
+ uint8 data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION 1
+#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
+/* 1/2 second precision since idle time is a seconds counter anyway */
+#define WL_MKEEP_ALIVE_PRECISION 500
+#define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF
+#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000
+
+typedef struct wl_mkeep_alive_hist_info_v1 {
+ uint32 first_pktsend_ts; /**< timestamp(ms): packet was sent */
+ uint32 first_txs_ts; /**< timestamp(ms): received the first txstatus */
+ uint32 last_retry_txs_ts; /**< timestamp(ms): received the last txstatus */
+ uint32 first_retry_ts; /**< timestamp(ms): resent the packet first time */
+ uint32 last_retry_ts; /**< timestamp(ms): resent the packet last time */
+ uint32 first_txs; /**< txstatus when dongle received first time */
+ uint32 last_retry_txs; /**< txstatus when dongle received last time */
+ uint32 retry_cnt; /**< number of retries for the packet */
+} wl_mkeep_alive_hist_info_v1_t;
+
+typedef struct wl_mkeep_alive_hist_req_v1 {
+ uint16 version; /**< version of structure */
+ uint16 length; /**< length of this structure */
+ uint16 flags; /**< mkeepalive idx, operation codes */
+ uint16 count; /**< number of results */
+ uint16 max; /**< maximum number of history */
+ wl_mkeep_alive_hist_info_v1_t info[]; /**< struct array of length count */
+} wl_mkeep_alive_hist_req_v1_t;
+
+/* version of the mkeep_alive_hist IOVAR */
+#define WL_MKEEP_ALIVE_HIST_REQ_VER_1 1u
+/* Fixed length of wl_mkeep_alive_hist_req_v1_t */
+#define WL_MKEEP_ALIVE_HIST_REQ_FIXED_LEN_VER_1 OFFSETOF(wl_mkeep_alive_hist_req_v1_t, info)
+/* Keepalive ID */
+#define WL_MKEEP_ALIVE_HIST_ID_MASK 0xFF00u /**< ID mask */
+#define WL_MKEEP_ALIVE_HIST_ID_SHIFT 8u /**< Offset of keepalive ID */
+/* OP Codes */
+#define WL_MKEEP_ALIVE_HIST_OP_MASK 0x00FFu /**< OP code mask */
+#define WL_MKEEP_ALIVE_HIST_RESET (0x1u << 0u) /**< Clear history of specified ID */
+#define WL_MKEEP_ALIVE_HIST_RESET_ALL (0x1u << 1u) /**< Clear all history */
+
+/** TCP Keep-Alive conn struct */
+typedef struct wl_mtcpkeep_alive_conn_pkt {
+ struct ether_addr saddr; /**< src mac address */
+ struct ether_addr daddr; /**< dst mac address */
+ struct ipv4_addr sipaddr; /**< source IP addr */
+ struct ipv4_addr dipaddr; /**< dest IP addr */
+ uint16 sport; /**< src port */
+ uint16 dport; /**< dest port */
+ uint32 seq; /**< seq number */
+ uint32 ack; /**< ACK number */
+ uint16 tcpwin; /**< TCP window */
+ uint16 PAD;
+} wl_mtcpkeep_alive_conn_pkt_t;
+
+/** TCP Keep-Alive interval struct */
+typedef struct wl_mtcpkeep_alive_timers_pkt {
+ uint16 interval; /**< interval timer */
+ uint16 retry_interval; /**< retry_interval timer */
+ uint16 retry_count; /**< retry_count */
+} wl_mtcpkeep_alive_timers_pkt_t;
+
+typedef struct wake_info {
+ uint32 wake_reason;
+ uint32 wake_info_len; /**< size of packet */
+ uint8 packet[];
+} wake_info_t;
+
+typedef struct wake_pkt {
+ uint32 wake_pkt_len; /**< size of packet */
+ uint8 packet[];
+} wake_pkt_t;
+
+#define WL_MTCPKEEP_ALIVE_VERSION 1
+
+/* #ifdef WLBA */
+
+#define WLC_BA_CNT_VERSION 1 /**< current version of wlc_ba_cnt_t */
+
+/** block ack related stats */
+typedef struct wlc_ba_cnt {
+ uint16 version; /**< WLC_BA_CNT_VERSION */
+ uint16 length; /**< length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txpdu; /**< pdus sent */
+ uint32 txsdu; /**< sdus sent */
+ uint32 txfc; /**< tx side flow controlled packets */
+ uint32 txfci; /**< tx side flow control initiated */
+ uint32 txretrans; /**< retransmitted pdus */
+ uint32 txbatimer; /**< ba resend due to timer */
+ uint32 txdrop; /**< dropped packets */
+ uint32 txaddbareq; /**< addba req sent */
+ uint32 txaddbaresp; /**< addba resp sent */
+ uint32 txdelba; /**< delba sent */
+ uint32 txba; /**< ba sent */
+ uint32 txbar; /**< bar sent */
+ uint32 txpad[4]; /**< future */
+
+ /* receive side counters */
+ uint32 rxpdu; /**< pdus recd */
+ uint32 rxqed; /**< pdus buffered before sending up */
+ uint32 rxdup; /**< duplicate pdus */
+ uint32 rxnobuf; /**< pdus discarded due to no buf */
+ uint32 rxaddbareq; /**< addba req recd */
+ uint32 rxaddbaresp; /**< addba resp recd */
+ uint32 rxdelba; /**< delba recd */
+ uint32 rxba; /**< ba recd */
+ uint32 rxbar; /**< bar recd */
+ uint32 rxinvba; /**< invalid ba recd */
+ uint32 rxbaholes; /**< ba recd with holes */
+ uint32 rxunexp; /**< unexpected packets */
+ uint32 rxpad[4]; /**< future */
+} wlc_ba_cnt_t;
+/* #endif WLBA */
+
+/** structure for per-tid ampdu control */
+struct ampdu_tid_control {
+ uint8 tid; /* tid */
+ uint8 enable; /* enable/disable */
+};
+
+/** Support for ampdu_tx_ba_window_cfg */
+#define WL_AMPDU_TX_BA_WINDOW_CFG_VER_1 1u
+#define WL_AMPDU_TX_BA_WINDOW_CFG_CUR_VER WL_AMPDU_TX_BA_WINDOW_CFG_VER_1
+
+/* 16 bits Config (5 bits reserved) and Status (2 bits reserved) */
+#define WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_IDX 0u
+#define WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_FSZ 9u
+
+#define WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_IDX 9u
+#define WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_FSZ 2u
+
+#define WL_AMPDU_TX_BA_WINDOW_CFG_STATE_IDX 11u
+#define WL_AMPDU_TX_BA_WINDOW_CFG_STATE_FSZ 3u
+
+#define WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_MASK \
+ (MAXBITVAL(WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_FSZ) << \
+ WL_AMPDU_TX_BA_WINDOW_CFG_BA_WSIZE_IDX)
+
+#define WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_MASK \
+ (MAXBITVAL(WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_FSZ) << \
+ WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_IDX)
+
+#define WL_AMPDU_TX_BA_WINDOW_CFG_STATE_MASK \
+ (MAXBITVAL(WL_AMPDU_TX_BA_WINDOW_CFG_STATE_FSZ) << \
+ WL_AMPDU_TX_BA_WINDOW_CFG_STATE_IDX)
+
+/* code for config assoc_type */
+enum {
+ WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_AX = 0,
+ WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_UNIVERSAL = 1,
+ WL_AMPDU_TX_BA_WINDOW_CFG_ASSOC_TYPE_MAX = 2
+};
+
+/* ampdu_tx_ba_window_cfg states */
+enum {
+ WL_AMPDU_TX_BA_WINDOW_CFG_STATE_OFF = 0,
+ WL_AMPDU_TX_BA_WINDOW_CFG_STATE_NEGOTIATING = 1,
+ WL_AMPDU_TX_BA_WINDOW_CFG_STATE_NEGOTIATED = 2,
+ WL_AMPDU_TX_BA_WINDOW_CFG_STATE_MAX = 3
+};
+
+/** structure for per tid ampdu BA window configuration */
+typedef struct wl_ampdu_tx_ba_window_cfg_v1 {
+ uint16 version;
+ uint16 length; /* length of the entire structure ver+len+payload. */
+ /* tid bitmap:
+ * input (SET): select tid to configure.
+ * output (GET): TID that is currently configured.
+ */
+ uint8 tidbmp;
+ uint8 flag; /* currently not used. Reserved. 32-bit alignment. */
+ uint16 PAD;
+
+ /* Per-tid configuration tuple (tx_ba_wsize, assoctype). Used for GET and SET.
+ * bit0 - bit8: User configured TX BA window size. Range {0, max. FW supported}.
+ * bit9 - bit10: User configured association type. 0: 11ax association, 1: universal.
+ * bit11 - bit15: Reserved.
+ */
+ uint16 config[NUMPRIO];
+
+ /* Status of the per-tid configuration: GET only
+ * bit0 - bit8: Resulted TX BA window size.
+ * bit9 - bit10: Reserved.
+ * bit11 - bit13: TX BA configuration state.
+ * bit14 - bit15: Reserved.
+ */
+ uint16 status[NUMPRIO];
+} wl_ampdu_tx_ba_window_cfg_v1_t;
+
+/** struct for ampdu tx/rx aggregation control */
+struct ampdu_aggr {
+ int8 aggr_override; /**< aggr overrided by dongle. Not to be set by host. */
+ uint16 conf_TID_bmap; /**< bitmap of TIDs to configure */
+ uint16 enab_TID_bmap; /**< enable/disable per TID */
+};
+
+/** structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+ struct ether_addr ea; /**< Station address */
+ uint8 tid; /**< tid */
+ uint8 initiator; /**< 0 is recipient, 1 is originator */
+};
+
+/** structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+ uint8 tid; /**< tid */
+ uint8 retry; /**< retry value */
+};
+
+#define BDD_FNAME_LEN 32 /**< Max length of friendly name */
+typedef struct bdd_fname {
+ uint8 len; /**< length of friendly name */
+ uchar name[BDD_FNAME_LEN]; /**< friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/** For ioctls that take a list of TSPEC */
+struct tslist {
+ int32 count; /**< number of tspecs */
+ struct tsinfo_arg tsinfo[]; /**< variable length array of tsinfo */
+};
+
+/* WLTDLS */
+/**structure for tdls iovars */
+typedef struct tdls_iovar {
+ struct ether_addr ea; /**< Station address */
+ uint8 mode; /**< mode: depends on iovar */
+ uint8 PAD;
+ chanspec_t chanspec;
+ uint8 PAD[6];
+} tdls_iovar_t;
+
+#define TDLS_WFD_IE_SIZE 512
+/**structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+ struct ether_addr ea; /**< Station address */
+ uint8 mode;
+ uint8 PAD;
+ uint16 length;
+ uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+/* #endif WLTDLS */
+
+/** structure for addts/delts arguments */
+typedef struct tspec_arg {
+ uint16 version; /**< see definition of TSPEC_ARG_VERSION */
+ uint16 length; /**< length of entire structure */
+ uint32 flag; /**< bit field */
+ /* TSPEC Arguments */
+ struct tsinfo_arg tsinfo; /**< TS Info bit field */
+ uint8 PAD;
+ uint16 nom_msdu_size; /**< (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /**< Maximum MSDU Size (bytes) */
+ uint32 min_srv_interval; /**< Minimum Service Interval (us) */
+ uint32 max_srv_interval; /**< Maximum Service Interval (us) */
+ uint32 inactivity_interval; /**< Inactivity Interval (us) */
+ uint32 suspension_interval; /**< Suspension Interval (us) */
+ uint32 srv_start_time; /**< Service Start Time (us) */
+ uint32 min_data_rate; /**< Minimum Data Rate (bps) */
+ uint32 mean_data_rate; /**< Mean Data Rate (bps) */
+ uint32 peak_data_rate; /**< Peak Data Rate (bps) */
+ uint32 max_burst_size; /**< Maximum Burst Size (bytes) */
+ uint32 delay_bound; /**< Delay Bound (us) */
+ uint32 min_phy_rate; /**< Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /**< Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+ uint16 medium_time; /**< Medium Time (32 us/s periods) */
+ uint8 dialog_token; /**< dialog token */
+ uint8 PAD[3];
+} tspec_arg_t;
+
+/** tspec arg for desired station */
+typedef struct tspec_per_sta_arg {
+ struct ether_addr ea;
+ uint8 PAD[2];
+ struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/** structure for max bandwidth for each access category */
+typedef struct wme_max_bandwidth {
+ uint32 ac[AC_COUNT]; /**< max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_VERSION 2 /**< current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH 55 /**< argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /**< default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /**< default surplus bw */
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80
+#define WLC_WOWL_MAX_KEEPALIVE 2
+
+/** Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+ uint32 ac; /**< access class */
+ uint32 lifetime; /**< Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/** Management time configuration */
+typedef struct wl_lifetime_mg {
+ uint32 mgmt_bitmap; /**< Mgmt subtype */
+ uint32 lifetime; /**< Packet lifetime value in us */
+} wl_lifetime_mg_t;
+
+/* MAC Sample Capture related */
+#define WL_MACCAPTR_DEFSTART_PTR 0xA00
+#define WL_MACCAPTR_DEFSTOP_PTR 0xA3F
+#define WL_MACCAPTR_DEFSZ 0x3F
+
+#define WL_MACCAPTR_DEF_MASK 0xFFFFFFFF
+
+typedef enum {
+ WL_MACCAPT_TRIG = 0,
+ WL_MACCAPT_STORE = 1,
+ WL_MACCAPT_TRANS = 2,
+ WL_MACCAPT_MATCH = 3
+} maccaptr_optn;
+
+typedef enum {
+ WL_MACCAPT_STRT = 1,
+ WL_MACCAPT_STOP = 2,
+ WL_MACCAPT_RST = 3
+} maccaptr_cmd_t;
+
+/* MAC Sample Capture Set-up Paramters */
+typedef struct wl_maccapture_params {
+ uint8 gpio_sel;
+ uint8 la_mode; /* TRUE: GPIO Out Enabled */
+ uint8 PAD[2];
+ uint32 start_ptr; /* Start address to store */
+ uint32 stop_ptr; /* Stop address to store */
+ uint8 optn_bmp; /* Options */
+ uint8 PAD[3];
+ /* Don't change the order after this nor
+ * add anything in betw. Code uses offsets to populate
+ * registers
+ */
+ uint32 tr_mask; /* Trigger Mask */
+ uint32 tr_val; /* Trigger Value */
+ uint32 s_mask; /* Store Mode Mask */
+ uint32 x_mask; /* Trans. Mode Mask */
+ uint32 m_mask; /* Match Mode Mask */
+ uint32 m_val; /* Match Value */
+ maccaptr_cmd_t cmd; /* Start / Stop */
+} wl_maccapture_params_t;
+
+/** Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+ uint8 mode; /**< value 0 or 1 */
+ uint8 count; /**< count # of beacons before switching */
+ chanspec_t chspec; /**< chanspec */
+ uint8 reg; /**< regulatory class */
+ uint8 frame_type; /**< csa frame type, unicast or broadcast */
+} wl_chan_switch_t;
+
+enum {
+ PFN_LIST_ORDER,
+ PFN_RSSI
+};
+
+enum {
+ DISABLE,
+ ENABLE
+};
+
+enum {
+ OFF_ADAPT,
+ SMART_ADAPT,
+ STRICT_ADAPT,
+ SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+#define IMMEDIATE_EVENT_BIT 8
+#define SUPPRESS_SSID_BIT 9
+#define ENABLE_NET_OFFLOAD_BIT 10
+/** report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT 11
+
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
+
+#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_ADAPTSCAN_MASK 0x00c0
+#define IMMEDIATE_EVENT_MASK 0x0100
+#define SUPPRESS_SSID_MASK 0x0200
+#define ENABLE_NET_OFFLOAD_MASK 0x0400
+/** report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK 0x0800
+
+#define PFN_COMPLETE 1
+#define PFN_INCOMPLETE 0
+
+#define DEFAULT_BESTN 2
+#define DEFAULT_MSCAN 0
+#define DEFAULT_REPEAT 10
+#define DEFAULT_EXP 2
+
+#define PFN_PARTIAL_SCAN_BIT 0
+#define PFN_PARTIAL_SCAN_MASK 1
+
+#define PFN_SWC_RSSI_WINDOW_MAX 8
+#define PFN_SWC_MAX_NUM_APS 16
+#define PFN_HOTLIST_MAX_NUM_APS 64
+
+#define MAX_EPNO_HIDDEN_SSID 8
+#define MAX_WHITELIST_SSID 2
+
+/* Version 1 and 2 for various scan results structures defined below */
+#define PFN_SCANRESULTS_VERSION_V1 1u
+#define PFN_SCANRESULTS_VERSION_V2 2u
+#define PFN_SCANRESULTS_VERSION_V3 3u
+#define PFN_SCANRESULTS_VERSION_V4 4u
+
+/** PFN network info structure */
+typedef struct wl_pfn_subnet_info_v1 {
+ struct ether_addr BSSID;
+ uint8 channel; /**< channel number only */
+ uint8 SSID_len;
+ uint8 SSID[32];
+} wl_pfn_subnet_info_v1_t;
+
+typedef struct wl_pfn_subnet_info_v2 {
+ struct ether_addr BSSID;
+ uint8 channel; /**< channel number only */
+ uint8 SSID_len;
+ union {
+ uint8 SSID[32];
+ uint16 index;
+ } u;
+} wl_pfn_subnet_info_v2_t;
+
+typedef struct wl_pfn_subnet_info_v3 {
+ struct ether_addr BSSID;
+ chanspec_t chanspec; /**< with 6G chanspec only */
+ uint8 SSID_len;
+ uint8 PAD[3];
+ union {
+ uint8 SSID[32];
+ uint16 index;
+ } u;
+} wl_pfn_subnet_info_v3_t;
+
+typedef struct wl_pfn_net_info_v1 {
+ wl_pfn_subnet_info_v1_t pfnsubnet;
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ uint16 timestamp; /**< age in seconds */
+} wl_pfn_net_info_v1_t;
+
+typedef struct wl_pfn_net_info_v2 {
+ wl_pfn_subnet_info_v2_t pfnsubnet;
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ uint16 timestamp; /**< age in seconds */
+} wl_pfn_net_info_v2_t;
+
+typedef struct wl_pfn_net_info_v3 {
+ wl_pfn_subnet_info_v3_t pfnsubnet;
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ uint16 timestamp; /**< age in seconds */
+} wl_pfn_net_info_v3_t;
+
+/* Version 1 and 2 for various lbest scan results structures below */
+#define PFN_LBEST_SCAN_RESULT_VERSION_V1 1
+#define PFN_LBEST_SCAN_RESULT_VERSION_V2 2
+#define PFN_LBEST_SCAN_RESULT_VERSION_V3 3
+
+#define MAX_CHBKT_PER_RESULT 4
+
+typedef struct wl_pfn_lnet_info_v1 {
+ wl_pfn_subnet_info_v1_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */
+ uint16 flags; /**< partial scan, etc */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ uint32 timestamp; /**< age in miliseconds */
+ uint16 rtt0; /**< estimated distance to this AP in centimeters */
+ uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_v1_t;
+
+typedef struct wl_pfn_lnet_info_v2 {
+ wl_pfn_subnet_info_v2_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */
+ uint16 flags; /**< partial scan, etc */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ uint32 timestamp; /**< age in miliseconds */
+ uint16 rtt0; /**< estimated distance to this AP in centimeters */
+ uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_v2_t;
+
+typedef struct wl_pfn_lnet_info_v3 {
+ wl_pfn_subnet_info_v3_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */
+ uint16 flags; /**< partial scan, etc */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ uint32 timestamp; /**< age in miliseconds */
+ uint16 rtt0; /**< estimated distance to this AP in centimeters */
+ uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_v3_t;
+
+typedef struct wl_pfn_lscanresults_v1 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_lnet_info_v1_t netinfo[1];
+} wl_pfn_lscanresults_v1_t;
+
+typedef struct wl_pfn_lscanresults_v2 {
+ uint32 version;
+ uint16 status;
+ uint16 count;
+ uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT];
+ wl_pfn_lnet_info_v2_t netinfo[1];
+} wl_pfn_lscanresults_v2_t;
+
+typedef struct wl_pfn_lscanresults_v3 {
+ uint32 version;
+ uint16 status;
+ uint16 count;
+ uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT];
+ wl_pfn_lnet_info_v3_t netinfo[1];
+} wl_pfn_lscanresults_v3_t;
+
+/**this is used to report on 1-* pfn scan results */
+typedef struct wl_pfn_scanresults_v1 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_v1_t netinfo[1];
+} wl_pfn_scanresults_v1_t;
+
+typedef struct wl_pfn_scanresults_v2 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ uint32 scan_ch_bucket;
+ wl_pfn_net_info_v2_t netinfo[1];
+} wl_pfn_scanresults_v2_t;
+
+typedef struct wl_pfn_scanresults_v3 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ uint32 scan_ch_bucket;
+ wl_pfn_net_info_v3_t netinfo[1];
+} wl_pfn_scanresults_v3_t;
+
+#define WL_PFN_SCANRESULTS_SCAN_TYPE_HA 0u
+#define WL_PFN_SCANRESULTS_SCAN_TYPE_LP 1u
+
+/* In version 4, the status field is split between status and flags from version 2.
+ * This does not include changes from version 3.
+ */
+typedef struct wl_pfn_scanresults_v4 {
+ uint32 version;
+ uint16 status;
+ uint16 flags;
+ uint32 count;
+ uint32 scan_ch_bucket;
+ wl_pfn_net_info_v2_t netinfo[1];
+} wl_pfn_scanresults_v4_t;
+
+typedef struct wl_pfn_significant_net {
+ uint16 flags;
+ uint16 channel;
+ struct ether_addr BSSID;
+ int8 rssi[PFN_SWC_RSSI_WINDOW_MAX];
+} wl_pfn_significant_net_t;
+
+#define PFN_SWC_SCANRESULT_VERSION 1
+
+typedef struct wl_pfn_swc_results {
+ uint32 version;
+ uint32 pkt_count; /**< No. of results in current frame */
+ uint32 total_count; /**< Total expected results */
+ wl_pfn_significant_net_t list[];
+} wl_pfn_swc_results_t;
+typedef struct wl_pfn_net_info_bssid_v1 {
+ struct ether_addr BSSID;
+ uint8 channel; /**< channel number only */
+ int8 RSSI; /**< receive signal strength (in dBm) */
+ uint16 flags; /**< (e.g. partial scan, off channel) */
+ uint16 timestamp; /**< age in seconds */
+} wl_pfn_net_info_bssid_v1_t;
+
+typedef struct wl_pfn_scanhist_bssid_v1 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_bssid_v1_t netinfo[1];
+} wl_pfn_scanhist_bssid_v1_t;
+
+/* v2 for this struct is skiped to match with other struct v3 version */
+typedef struct wl_pfn_net_info_bssid_v3 {
+ struct ether_addr BSSID;
+ chanspec_t chanspec; /**<with 6G chanspec only */
+ uint16 flags; /**< (e.g. partial scan, off channel) */
+ uint16 timestamp; /**< age in seconds */
+ int8 RSSI; /**< receive signal strength (in dBm) */
+ uint8 PAD[2];
+} wl_pfn_net_info_bssid_v3_t;
+
+typedef struct wl_pfn_scanhist_bssid_v3 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_bssid_v3_t netinfo[1];
+} wl_pfn_scanhist_bssid_v3_t;
+
+#ifndef WL_PFN_NET_INFO_BSSID_TYPEDEF_HAS_ALIAS
+typedef wl_pfn_net_info_bssid_v1_t wl_pfn_net_info_bssid_t;
+typedef wl_pfn_scanhist_bssid_v1_t wl_pfn_scanhist_bssid_t;
+#endif /* WL_PFN_NET_INFO_BSSID_TYPEDEF_HAS_ALIAS */
+
+/* Version 1 and 2 for various single scan result */
+#define PFN_SCANRESULT_VERSION_V1 1
+#define PFN_SCANRESULT_VERSION_V2 2
+#define PFN_SCANRESULT_VERSION_V3 3
+
+/* used to report exactly one scan result */
+/* plus reports detailed scan info in bss_info */
+typedef struct wl_pfn_scanresult_v1 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_v1_t netinfo;
+ wl_bss_info_v109_t bss_info;
+} wl_pfn_scanresult_v1_t;
+
+typedef struct wl_pfn_scanresult_v2 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_v2_t netinfo;
+ wl_bss_info_v109_t bss_info;
+} wl_pfn_scanresult_v2_t;
+
+typedef struct wl_pfn_scanresult_v2_1 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_v2_t netinfo;
+ uint8 bss_info[]; /* var length wl_bss_info_X structures */
+} wl_pfn_scanresult_v2_1_t;
+
+typedef struct wl_pfn_scanresult_v3_1 {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_v3_t netinfo;
+ uint8 bss_info[]; /* var length wl_bss_info_X structures */
+} wl_pfn_scanresult_v3_1_t;
+
+#define PFN_SCAN_ALLGONE_VERSION_V1 1u
+
+typedef struct wl_pfn_scan_all_gone_event_v1 {
+ uint16 version;
+ uint16 length;
+ uint16 flags;
+ uint16 pad;
+} wl_pfn_scan_all_gone_event_v1_t;
+
+#define WL_PFN_MAX_RAND_LIMIT 20u
+#define PFN_VERSION_V2 2u
+/**PFN data structure */
+typedef struct wl_pfn_param_v2 {
+ int32 version; /**< PNO parameters version */
+ int32 scan_freq; /**< Scan frequency */
+ int32 lost_network_timeout; /**< Timeout in sec. to declare
+ * discovered network as lost
+ */
+ int16 flags; /**< Bit field to control features
+ * of PFN such as sort criteria auto
+ * enable switch and background scan
+ */
+ int16 rssi_margin; /**< Margin to avoid jitter for choosing a
+ * PFN based on RSSI sort criteria
+ */
+ uint8 bestn; /**< number of best networks in each scan */
+ uint8 mscan; /**< number of scans recorded */
+ uint8 repeat; /**< Minimum number of scan intervals
+ *before scan frequency changes in adaptive scan
+ */
+ uint8 exp; /**< Exponent of 2 for maximum scan interval */
+ int32 slow_freq; /**< slow scan period */
+} wl_pfn_param_v2_t;
+
+#define PFN_VERSION_V3 3u
+typedef struct wl_pfn_param_v3 {
+ int16 version; /**< PNO parameters version */
+ int16 length; /* length of the structure */
+ int32 scan_freq; /**< Scan frequency */
+ int32 lost_network_timeout; /**< Timeout in sec. to declare
+ * discovered network as lost
+ */
+ int16 flags; /**< Bit field to control features
+ * of PFN such as sort criteria auto
+ * enable switch and background scan
+ */
+ int16 rssi_margin; /**< Margin to avoid jitter for choosing a
+ * PFN based on RSSI sort criteria
+ */
+ uint8 bestn; /**< number of best networks in each scan */
+ uint8 mscan; /**< number of scans recorded */
+ uint8 repeat; /**< Minimum number of scan intervals
+ *before scan frequency changes in adaptive scan
+ */
+ uint8 exp; /**< Exponent of 2 for maximum scan interval */
+ int32 slow_freq; /**< slow scan period */
+ uint8 min_bound; /**< pfn scan period randomization - lower bound % */
+ uint8 max_bound; /**< pfn scan period randomization - upper bound % */
+ uint8 pfn_lp_scan_disable; /* add support to enable/disable scan-core scan for PNO */
+ uint8 PAD[1]; /**< Pad to 32-bit alignment */
+} wl_pfn_param_v3_t;
+
+#ifndef PFN_PARAM_HAS_ALIAS
+typedef wl_pfn_param_v2_t wl_pfn_param_t;
+#define PFN_VERSION PFN_VERSION_V2
+#endif
+
+typedef struct wl_pfn_bssid {
+ struct ether_addr macaddr;
+ /* Bit4: suppress_lost, Bit3: suppress_found */
+ uint16 flags;
+} wl_pfn_bssid_t;
+typedef struct wl_pfn_significant_bssid {
+ struct ether_addr macaddr;
+ int8 rssi_low_threshold;
+ int8 rssi_high_threshold;
+} wl_pfn_significant_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK 0x08
+#define WL_PFN_SUPPRESSLOST_MASK 0x10
+#define WL_PFN_SSID_IMPRECISE_MATCH 0x80
+#define WL_PFN_SSID_SAME_NETWORK 0x10000
+#define WL_PFN_SUPPRESS_AGING_MASK 0x20000
+#define WL_PFN_FLUSH_ALL_SSIDS 0x40000
+
+#define WL_PFN_IOVAR_FLAG_MASK 0xFFFF00FF
+#define WL_PFN_RSSI_MASK 0xff00
+#define WL_PFN_RSSI_SHIFT 8
+
+typedef struct wl_pfn_cfg {
+ uint32 reporttype;
+ int32 channel_num;
+ uint16 channel_list[WL_NUMCHANNELS];
+ uint32 flags;
+} wl_pfn_cfg_t;
+
+#define WL_PFN_SSID_CFG_VERSION 1
+#define WL_PFN_SSID_CFG_CLEAR 0x1
+
+typedef struct wl_pfn_ssid_params {
+ int8 min5G_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */
+ int8 min2G_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */
+ int16 init_score_max; /* The maximum score that a network can have before bonuses */
+
+ int16 cur_bssid_bonus; /* Add to current bssid */
+ int16 same_ssid_bonus; /* score bonus for all networks with the same network flag */
+ int16 secure_bonus; /* score bonus for networks that are not open */
+ int16 band_5g_bonus;
+} wl_pfn_ssid_params_t;
+
+typedef struct wl_ssid_ext_params {
+ int8 min5G_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */
+ int8 min2G_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */
+ int16 init_score_max; /* The maximum score that a network can have before bonuses */
+ int16 cur_bssid_bonus; /* Add to current bssid */
+ int16 same_ssid_bonus; /* score bonus for all networks with the same network flag */
+ int16 secure_bonus; /* score bonus for networks that are not open */
+ int16 band_5g_bonus;
+} wl_ssid_ext_params_t;
+
+typedef struct wl_pfn_ssid_cfg {
+ uint16 version;
+ uint16 flags;
+ wl_ssid_ext_params_t params;
+} wl_pfn_ssid_cfg_t;
+
+#define CH_BUCKET_REPORT_NONE 0
+#define CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY 1
+#define CH_BUCKET_REPORT_FULL_RESULT 2
+#define CH_BUCKET_REPORT_SCAN_COMPLETE (CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY | \
+ CH_BUCKET_REPORT_FULL_RESULT)
+#define CH_BUCKET_REPORT_REGULAR 0
+#define CH_BUCKET_GSCAN 4
+
+typedef struct wl_pfn_gscan_ch_bucket_cfg {
+ uint8 bucket_end_index;
+ uint8 bucket_freq_multiple;
+ uint8 flag;
+ uint8 reserved;
+ uint16 repeat;
+ uint16 max_freq_multiple;
+} wl_pfn_gscan_ch_bucket_cfg_t;
+
+typedef struct wl_pfn_capabilities {
+ uint16 max_mscan;
+ uint16 max_bestn;
+ uint16 max_swc_bssid;
+ uint16 max_hotlist_bssid;
+} wl_pfn_capabilities_t;
+
+#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0)
+#define GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK (1 << 3)
+#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7)
+#define WL_GSCAN_CFG_VERSION 1
+typedef struct wl_pfn_gscan_cfg {
+ uint16 version;
+ /**
+ * BIT0 1 = send probes/beacons to HOST
+ * BIT1 Reserved
+ * BIT2 Reserved
+ * Add any future flags here
+ * BIT7 1 = no other useful cfg sent
+ */
+ uint8 flags;
+ /** Buffer filled threshold in % to generate an event */
+ uint8 buffer_threshold;
+ /**
+ * No. of BSSIDs with "change" to generate an evt
+ * change - crosses rssi threshold/lost
+ */
+ uint8 swc_nbssid_threshold;
+ /* Max=8 (for now) Size of rssi cache buffer */
+ uint8 swc_rssi_window_size;
+ uint8 count_of_channel_buckets;
+ uint8 retry_threshold;
+ uint16 lost_ap_window;
+ wl_pfn_gscan_ch_bucket_cfg_t channel_bucket[1];
+} wl_pfn_gscan_cfg_t;
+
+#define WL_PFN_REPORT_ALLNET 0
+#define WL_PFN_REPORT_SSIDNET 1
+#define WL_PFN_REPORT_BSSIDNET 2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /**< Remaining reserved for future use */
+
+typedef struct wl_pfn {
+ wlc_ssid_t ssid; /**< ssid name and its length */
+ int32 flags; /**< bit2: hidden */
+ int32 infra; /**< BSS Vs IBSS */
+ int32 auth; /**< Open Vs Closed */
+ int32 wpa_auth; /**< WPA type */
+ int32 wsec; /**< wsec value */
+} wl_pfn_t;
+
+typedef struct wl_pfn_list {
+ uint32 version;
+ uint32 enabled;
+ uint32 count;
+ wl_pfn_t pfn[1];
+} wl_pfn_list_t;
+
+#define PFN_SSID_EXT_VERSION 1
+
+typedef struct wl_pfn_ext {
+ uint8 flags;
+ int8 rssi_thresh; /* RSSI threshold, track only if RSSI > threshold */
+ uint16 wpa_auth; /* Match the wpa auth type defined in wlioctl_defs.h */
+ uint8 ssid[DOT11_MAX_SSID_LEN];
+ uint8 ssid_len;
+ uint8 pad;
+} wl_pfn_ext_t;
+typedef struct wl_pfn_ext_list {
+ uint16 version;
+ uint16 count;
+ wl_pfn_ext_t pfn_ext[1];
+} wl_pfn_ext_list_t;
+
+#define WL_PFN_SSID_EXT_FOUND 0x1
+#define WL_PFN_SSID_EXT_LOST 0x2
+typedef struct wl_pfn_result_ssid {
+ uint8 flags;
+ int8 rssi;
+ /* channel number */
+ uint16 channel;
+ /* Assume idx in order of cfg */
+ uint32 index;
+} wl_pfn_result_ssid_crc32_t;
+
+typedef struct wl_pfn_ssid_ext_result {
+ uint16 version;
+ uint16 count;
+ wl_pfn_result_ssid_crc32_t net[1];
+} wl_pfn_ssid_ext_result_t;
+
+#define PFN_EXT_AUTH_CODE_OPEN 1 /* open */
+#define PFN_EXT_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */
+#define PFN_EXT_AUTH_CODE_EAPOL 4 /* any EAPOL */
+
+#define WL_PFN_HIDDEN_BIT 2
+#define WL_PFN_HIDDEN_MASK 0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX 10
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX 32
+#endif
+
+/* Dynamic scan configuration for motion profiles */
+
+#define WL_PFN_MPF_VERSION 1
+
+/* Valid group IDs, may be expanded in the future */
+#define WL_PFN_MPF_GROUP_SSID 0
+#define WL_PFN_MPF_GROUP_BSSID 1
+#define WL_PFN_MPF_MAX_GROUPS 2
+
+/* Max number of MPF states supported in this time */
+#define WL_PFN_MPF_STATES_MAX 4u
+#define WL_PFN_MPF_LP_CNT_MAX 7u
+
+/* Flags for the mpf-specific stuff */
+#define WL_PFN_MPF_ADAPT_ON_BIT 0u
+#define WL_PFN_MPF_ADAPTSCAN_BIT 1u
+#define WL_PFN_MPF_LP_SCAN_BIT 3u
+
+#define WL_PFN_MPF_ADAPT_ON_MASK 0x0001 /* Bit 0 */
+#define WL_PFN_MPF_ADAPTSCAN_MASK 0x0006 /* Bits [2:1] */
+#define WL_PFN_MPF_LP_SCAN_CNT_MASK 0x0038 /* Bits [5:3] */
+
+/* Per-state timing values */
+typedef struct wl_pfn_mpf_state_params {
+ int32 scan_freq; /* Scan frequency (secs) */
+ int32 lost_network_timeout; /* Timeout to declare net lost (secs) */
+ int16 flags; /* Space for flags: ADAPT, LP_SCAN cnt etc */
+ uint8 exp; /* Exponent of 2 for max interval for SMART/STRICT_ADAPT */
+ uint8 repeat; /* Number of scans before changing adaptation level */
+ int32 slow_freq; /* Slow scan period for SLOW_ADAPT */
+} wl_pfn_mpf_state_params_t;
+
+typedef struct wl_pfn_mpf_param {
+ uint16 version; /* Structure version */
+ uint16 groupid; /* Group ID: 0 (SSID), 1 (BSSID), other: reserved */
+ wl_pfn_mpf_state_params_t state[WL_PFN_MPF_STATES_MAX];
+} wl_pfn_mpf_param_t;
+
+/* Structure for setting pfn_override iovar */
+typedef struct wl_pfn_override_param {
+ uint16 version; /* Structure version */
+ uint16 start_offset; /* Seconds from now to apply new params */
+ uint16 duration; /* Seconds to keep new params applied */
+ uint16 reserved;
+ wl_pfn_mpf_state_params_t override;
+} wl_pfn_override_param_t;
+#define WL_PFN_OVERRIDE_VERSION 1
+
+/*
+ * Definitions for base MPF configuration
+ */
+
+#define WL_MPF_VERSION 1
+#define WL_MPF_MAX_BITS 3
+#define WL_MPF_MAX_STATES (1 << WL_MPF_MAX_BITS)
+
+#define WL_MPF_STATE_NAME_MAX 12
+
+typedef struct wl_mpf_val {
+ uint16 val; /* Value of GPIO bits */
+ uint16 state; /* State identifier */
+ char name[WL_MPF_STATE_NAME_MAX]; /* Optional name */
+} wl_mpf_val_t;
+
+typedef struct wl_mpf_map {
+ uint16 version;
+ uint16 type;
+ uint16 mask; /* Which GPIO bits to use */
+ uint8 count; /* Count of state/value mappings */
+ uint8 PAD;
+ wl_mpf_val_t vals[WL_MPF_MAX_STATES];
+} wl_mpf_map_t;
+
+#define WL_MPF_STATE_AUTO (0xFFFF) /* (uint16)-1) */
+
+typedef struct wl_mpf_state {
+ uint16 version;
+ uint16 type;
+ uint16 state; /* Get/Set */
+ uint8 force; /* 0 - auto (HW) state, 1 - forced state */
+ char name[WL_MPF_STATE_NAME_MAX]; /* Get/Set: Optional/actual name */
+ uint8 PAD;
+} wl_mpf_state_t;
+/*
+ * WLFCTS definition
+ */
+typedef struct wl_txstatus_additional_info {
+ uint32 rspec;
+ uint32 enq_ts;
+ uint32 last_ts;
+ uint32 entry_ts;
+ uint16 seq;
+ uint8 rts_cnt;
+ uint8 tx_cnt;
+} wl_txstatus_additional_info_t;
+
+/** Service discovery */
+typedef struct {
+ uint8 transaction_id; /**< Transaction id */
+ uint8 protocol; /**< Service protocol type */
+ uint16 query_len; /**< Length of query */
+ uint16 response_len; /**< Length of response */
+ uint8 qrbuf[];
+} wl_p2po_qr_t;
+
+typedef struct {
+ uint16 period; /**< extended listen period */
+ uint16 interval; /**< extended listen interval */
+ uint16 count; /* count to repeat */
+ uint16 pad; /* pad for 32bit align */
+} wl_p2po_listen_t;
+
+/** GAS state machine tunable parameters. Structure field values of 0 means use the default. */
+typedef struct wl_gas_config {
+ uint16 max_retransmit; /**< Max # of firmware/driver retransmits on no Ack
+ * from peer (on top of the ucode retries).
+ */
+ uint16 response_timeout; /**< Max time to wait for a GAS-level response
+ * after sending a packet.
+ */
+ uint16 max_comeback_delay; /**< Max GAS response comeback delay.
+ * Exceeding this fails the GAS exchange.
+ */
+ uint16 max_retries; /**< Max # of GAS state machine retries on failure
+ * of a GAS frame exchange.
+ */
+} wl_gas_config_t;
+
+/** P2P Find Offload parameters */
+typedef struct wl_p2po_find_config {
+ uint16 version; /**< Version of this struct */
+ uint16 length; /**< sizeof(wl_p2po_find_config_t) */
+ int32 search_home_time; /**< P2P search state home time when concurrent
+ * connection exists. -1 for default.
+ */
+ uint8 num_social_channels;
+ /**< Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX.
+ * 0 means use default social channels.
+ */
+ uint8 flags;
+ uint16 social_channels[1]; /**< Variable length array of social channels */
+} wl_p2po_find_config_t;
+#define WL_P2PO_FIND_CONFIG_VERSION 2 /**< value for version field */
+
+/** wl_p2po_find_config_t flags */
+#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /**< Whether to scan for all APs in the p2po_find
+ * periodic scans of all channels.
+ * 0 means scan for only P2P devices.
+ * 1 means scan for P2P devices plus non-P2P APs.
+ */
+
+/** For adding a WFDS service to seek */
+typedef struct {
+ uint32 seek_hdl; /**< unique id chosen by host */
+ uint8 addr[6]; /**< Seek service from a specific device with this
+ * MAC address, all 1's for any device.
+ */
+ uint8 service_hash[P2P_WFDS_HASH_LEN];
+ uint8 service_name_len;
+ uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN];
+ /**< Service name to seek, not null terminated */
+ uint8 service_info_req_len;
+ uint8 service_info_req[1]; /**< Service info request, not null terminated.
+ * Variable length specified by service_info_req_len.
+ * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN.
+ */
+} wl_p2po_wfds_seek_add_t;
+
+/** For deleting a WFDS service to seek */
+typedef struct {
+ uint32 seek_hdl; /**< delete service specified by id */
+} wl_p2po_wfds_seek_del_t;
+
+/** For adding a WFDS service to advertise */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 advertise_hdl; /**< unique id chosen by host */
+ uint8 service_hash[P2P_WFDS_HASH_LEN];
+ uint32 advertisement_id;
+ uint16 service_config_method;
+ uint8 service_name_len;
+ uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+ /**< Service name , not null terminated */
+ uint8 service_status;
+ uint16 service_info_len;
+ uint8 service_info[1]; /**< Service info, not null terminated.
+ * Variable length specified by service_info_len.
+ * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN.
+ */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t;
+#include <packed_section_end.h>
+
+/** For deleting a WFDS service to advertise */
+typedef struct {
+ uint32 advertise_hdl; /**< delete service specified by hdl */
+} wl_p2po_wfds_advertise_del_t;
+
+/** P2P Offload discovery mode for the p2po_state iovar */
+typedef enum {
+ WL_P2PO_DISC_STOP,
+ WL_P2PO_DISC_LISTEN,
+ WL_P2PO_DISC_DISCOVERY
+} disc_mode_t;
+
+/* ANQP offload */
+
+#define ANQPO_MAX_QUERY_SIZE 256
+typedef struct {
+ uint16 max_retransmit; /**< ~0 use default, max retransmit on no ACK from peer */
+ uint16 response_timeout; /**< ~0 use default, msec to wait for resp after tx packet */
+ uint16 max_comeback_delay; /**< ~0 use default, max comeback delay in resp else fail */
+ uint16 max_retries; /**< ~0 use default, max retries on failure */
+ uint16 query_len; /**< length of ANQP query */
+ uint8 query_data[1]; /**< ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
+} wl_anqpo_set_t;
+
+#define WL_ANQPO_FLAGS_BSSID_WILDCARD 0x0001
+#define WL_ANQPO_PEER_LIST_VERSION_2 2
+typedef struct {
+ uint16 channel; /**< channel of the peer */
+ struct ether_addr addr; /**< addr of the peer */
+} wl_anqpo_peer_v1_t;
+typedef struct {
+ uint16 channel; /**< channel of the peer */
+ struct ether_addr addr; /**< addr of the peer */
+ uint32 flags; /**< 0x01-Peer is MBO Capable */
+} wl_anqpo_peer_v2_t;
+
+#define WL_ANQPO_PEER_LIST_VERSION_3 3
+typedef struct {
+ uint16 chanspec; /**< chanspec of the peer */
+ struct ether_addr addr; /**< addr of the peer */
+ uint32 flags; /**< 0x01-Peer is MBO Capable */
+} wl_anqpo_peer_v3_t;
+
+#define ANQPO_MAX_PEER_LIST 64
+typedef struct {
+ uint16 count; /**< number of peers in list */
+ wl_anqpo_peer_v1_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_v1_t;
+
+typedef struct {
+ uint16 version; /**<VERSION */
+ uint16 length; /**< length of entire structure */
+ uint16 count; /**< number of peers in list */
+ wl_anqpo_peer_v2_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_v2_t;
+
+typedef struct {
+ uint16 version; /**< VERSION */
+ uint16 length; /**< length of entire structure */
+ uint16 count; /**< number of peers in list */
+ wl_anqpo_peer_v3_t peer[]; /**< max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_v3_t;
+
+#ifndef WL_ANQPO_PEER_LIST_TYPEDEF_HAS_ALIAS
+typedef wl_anqpo_peer_list_v1_t wl_anqpo_peer_list_t;
+typedef wl_anqpo_peer_v1_t wl_anqpo_peer_t;
+#endif /* WL_ANQPO_PEER_LIST_TYPEDEF_HAS_ALIAS */
+
+#define ANQPO_MAX_IGNORE_SSID 64
+typedef struct {
+ uint8 is_clear; /**< set to clear list (not used on GET) */
+ uint8 PAD;
+ uint16 count; /**< number of SSID in list */
+ wlc_ssid_t ssid[1]; /**< max ANQPO_MAX_IGNORE_SSID */
+} wl_anqpo_ignore_ssid_list_t;
+
+#define ANQPO_MAX_IGNORE_BSSID 64
+typedef struct {
+ uint8 is_clear; /**< set to clear list (not used on GET) */
+ uint8 PAD;
+ uint16 count; /**< number of addr in list */
+ struct ether_addr bssid[]; /**< max ANQPO_MAX_IGNORE_BSSID */
+} wl_anqpo_ignore_bssid_list_t;
+
+struct toe_ol_stats_t {
+ /** Num of tx packets that don't need to be checksummed */
+ uint32 tx_summed;
+
+ /* Num of tx packets where checksum is filled by offload engine */
+ uint32 tx_iph_fill;
+ uint32 tx_tcp_fill;
+ uint32 tx_udp_fill;
+ uint32 tx_icmp_fill;
+
+ /* Num of rx packets where toe finds out if checksum is good or bad */
+ uint32 rx_iph_good;
+ uint32 rx_iph_bad;
+ uint32 rx_tcp_good;
+ uint32 rx_tcp_bad;
+ uint32 rx_udp_good;
+ uint32 rx_udp_bad;
+ uint32 rx_icmp_good;
+ uint32 rx_icmp_bad;
+
+ /* Num of tx packets in which csum error is injected */
+ uint32 tx_tcp_errinj;
+ uint32 tx_udp_errinj;
+ uint32 tx_icmp_errinj;
+
+ /* Num of rx packets in which csum error is injected */
+ uint32 rx_tcp_errinj;
+ uint32 rx_udp_errinj;
+ uint32 rx_icmp_errinj;
+};
+
+/** Arp offload statistic counts */
+struct arp_ol_stats_t {
+ uint32 host_ip_entries; /**< Host IP table addresses (more than one if multihomed) */
+ uint32 host_ip_overflow; /**< Host IP table additions skipped due to overflow */
+
+ uint32 arp_table_entries; /**< ARP table entries */
+ uint32 arp_table_overflow; /**< ARP table additions skipped due to overflow */
+
+ uint32 host_request; /**< ARP requests from host */
+ uint32 host_reply; /**< ARP replies from host */
+ uint32 host_service; /**< ARP requests from host serviced by ARP Agent */
+
+ uint32 peer_request; /**< ARP requests received from network */
+ uint32 peer_request_drop; /**< ARP requests from network that were dropped */
+ uint32 peer_reply; /**< ARP replies received from network */
+ uint32 peer_reply_drop; /**< ARP replies from network that were dropped */
+ uint32 peer_service; /**< ARP request from host serviced by ARP Agent */
+};
+
+/** NS offload statistic counts */
+struct nd_ol_stats_t {
+ uint32 host_ip_entries; /**< Host IP table addresses (more than one if multihomed) */
+ uint32 host_ip_overflow; /**< Host IP table additions skipped due to overflow */
+ uint32 peer_request; /**< NS requests received from network */
+ uint32 peer_request_drop; /**< NS requests from network that were dropped */
+ uint32 peer_reply_drop; /**< NA replies from network that were dropped */
+ uint32 peer_service; /**< NS request from host serviced by firmware */
+};
+
+/*
+ * Neighbor Discovery Offloading
+ */
+enum {
+ WL_ND_IPV6_ADDR_TYPE_UNICAST = 0,
+ WL_ND_IPV6_ADDR_TYPE_ANYCAST
+};
+
+typedef struct wl_nd_host_ip_addr {
+ struct ipv6_addr ip_addr; /* host ip address */
+ uint8 type; /* type of address */
+ uint8 pad[3];
+} wl_nd_host_ip_addr_t;
+
+typedef struct wl_nd_host_ip_list {
+ uint32 count;
+ wl_nd_host_ip_addr_t host_ip[1];
+} wl_nd_host_ip_list_t;
+
+#define WL_ND_HOSTIP_IOV_VER 1
+
+enum {
+ WL_ND_HOSTIP_OP_VER = 0, /* get version */
+ WL_ND_HOSTIP_OP_ADD, /* add address */
+ WL_ND_HOSTIP_OP_DEL, /* delete specified address */
+ WL_ND_HOSTIP_OP_DEL_UC, /* delete all unicast address */
+ WL_ND_HOSTIP_OP_DEL_AC, /* delete all anycast address */
+ WL_ND_HOSTIP_OP_DEL_ALL, /* delete all addresses */
+ WL_ND_HOSTIP_OP_LIST, /* get list of host ip address */
+ WL_ND_HOSTIP_OP_MAX
+};
+
+typedef struct wl_nd_hostip {
+ uint16 version; /* version of iovar buf */
+ uint16 op_type; /* operation type */
+ uint32 length; /* length of entire structure */
+ union {
+ wl_nd_host_ip_addr_t host_ip; /* set param for add */
+ uint16 version; /* get return for ver */
+ } u;
+} wl_nd_hostip_t;
+
+#define WL_ND_HOSTIP_FIXED_LEN OFFSETOF(wl_nd_hostip_t, u)
+#define WL_ND_HOSTIP_WITH_ADDR_LEN (WL_ND_HOSTIP_FIXED_LEN + sizeof(wl_nd_host_ip_addr_t))
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/**
+ * NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+ uint32 period_msec; /** Retransmission period (0 to disable packet re-transmits) */
+ uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */
+ uint8 data[1]; /** Variable length packet to transmit. Contents should include
+ * entire ethernet packet (enet header, IP header, UDP header,
+ * and UDP payload) in network byte order.
+ */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data)
+
+#define MAX_RSSI_COUNT 8
+typedef struct rssi_struct {
+ int8 val[MAX_RSSI_COUNT]; /**< rssi values in AFs */
+ int16 sum; /**< total rssi sum */
+ uint8 cnt; /**< number rssi samples */
+ uint8 idx; /**< next rssi location */
+} rssi_struct_t;
+
+#ifdef WLDFSP
+#define DFSP_EVT_OFFSET OFFSETOF(dfsp_event_data_t, ie)
+#define DFSP_EVT_FLAGS_AP_ASSOC (1 << 0)
+#define DFSP_EVT_FLAGS_AP_BCNMON (1 << 1)
+#define DFSP_EVT_FLAGS_PROXY_BCSA (1 << 2)
+#define DFSP_EVT_FLAGS_PROXY_UCSA (1 << 3)
+#define DFSP_EVT_FLAGS_PROXY_PCSA (1 << 4)
+
+typedef struct dfsp_event_data {
+ uint16 flags; /* indicate what triggers the event */
+ uint16 ie_len;
+ uint8 ie[]; /* variable length */
+} dfsp_event_data_t;
+
+/* Proxy Channel Switch Announcement is a collection of IEs */
+typedef struct dfsp_pcsa {
+ dot11_ext_csa_ie_t ecsa;
+ dot11_mesh_csp_ie_t mcsp;
+ dot11_wide_bw_chan_switch_ie_t wbcs;
+} dfsp_pcsa_t;
+
+/* DFS Proxy */
+#define DFSP_CFG_VERSION 1
+#define DFSP_FLAGS_ENAB 0x1
+typedef struct dfsp_cfg {
+ uint16 version;
+ uint16 len;
+ uint16 flags; /**< bit 1 to enable/disable the feature */
+ uint16 max_bcn_miss_dur; /**< maximum beacon miss duration before ceasing data tx */
+ uint8 mcsp_ttl; /**< remaining number of hops allowed for pcsa message */
+ uint8 bcsa_cnt; /**< repeat numbers of broadcast CSA */
+ chanspec_t mon_chan; /**< passive monitoring channel spec */
+ struct ether_addr mon_bssid; /**< broadcast means monitoring all */
+ uint16 max_bcn_miss_dur_af; /**< maximum beacon miss duration before ceasing AF tx */
+} dfsp_cfg_t;
+
+#define DFSP_UCSA_VERSION 1
+typedef struct dfsp_ucsa {
+ uint16 version;
+ uint16 len;
+ struct ether_addr address;
+ uint8 enable;
+ uint8 retry_cnt; /**< just in case host needs to control the value */
+} dfsp_ucsa_t;
+
+typedef struct dfsp_ucsa_tbl {
+ uint8 tbl_num;
+ uint8 tbl[];
+} dfsp_ucsa_tbl_t;
+
+typedef struct dfsp_stats {
+ uint32 dfsp_csainfra;
+ uint32 dfsp_csabcnmon;
+ uint32 dfsp_bcsarx;
+ uint32 dfsp_ucsarx;
+ uint32 dfsp_pcsarx;
+ uint32 dfsp_bcsatx;
+ uint32 dfsp_ucsatx;
+ uint32 dfsp_pcsatx;
+ uint32 dfsp_ucsatxfail;
+ uint32 dfsp_evtnotif;
+ uint32 dfsp_evtsuspect;
+ uint32 dfsp_evtresume;
+} dfsp_stats_t;
+#endif /* WLDFSP */
+
+#ifdef WLAWDL
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_dfsp_params_tlv {
+ uint8 type;
+ uint16 len;
+ uint8 ie[]; /* variable length */
+} BWL_POST_PACKED_STRUCT awdl_dfsp_params_tlv_t;
+#include <packed_section_end.h>
+
+#define AWDL_DFSP_BCN_TLV_VALUE_OFFSET OFFSETOF(awdl_dfsp_bcn_tlv_t, time_since_bcn)
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_dfsp_bcn_tlv {
+ uint8 type;
+ uint16 len;
+ uint16 time_since_bcn; /* last time beacon seen in ms */
+} BWL_POST_PACKED_STRUCT awdl_dfsp_bcn_tlv_t;
+#include <packed_section_end.h>
+
+#if !defined(WLDFSP)
+#define AWDL_DFSP_EVT_OFFSET OFFSETOF(awdl_dfsp_event_data_t, ie)
+#define AWDL_DFSP_EVT_FLAGS_AP_ASSOC (1 << 0)
+#define AWDL_DFSP_EVT_FLAGS_AP_BCNMON (1 << 1)
+#define AWDL_DFSP_EVT_FLAGS_PROXY_BCSA (1 << 2)
+#define AWDL_DFSP_EVT_FLAGS_PROXY_UCSA (1 << 3)
+#define AWDL_DFSP_EVT_FLAGS_PROXY_PCSA (1 << 4)
+
+typedef struct awdl_dfsp_event_data {
+ uint16 flags; /* indicate what triggers the event */
+ uint16 ie_len;
+ uint8 ie[]; /* variable length */
+} awdl_dfsp_event_data_t;
+
+typedef struct awdl_dfsp_stats {
+ uint32 dfsp_csainfra;
+ uint32 dfsp_csabcnmon;
+ uint32 dfsp_bcsarx;
+ uint32 dfsp_ucsarx;
+ uint32 dfsp_pcsarx;
+ uint32 dfsp_bcsatx;
+ uint32 dfsp_ucsatx;
+ uint32 dfsp_pcsatx;
+ uint32 dfsp_ucsatxfail;
+ uint32 dfsp_evtnotif;
+ uint32 dfsp_evtsuspect;
+ uint32 dfsp_evtresume;
+} awdl_dfsp_stats_t;
+
+/* awdl peer as a DFS Proxy */
+#define AWDL_DFSP_CFG_VERSION 1
+#define AWDL_DFSP_FLAGS_ENAB 0x1
+typedef struct awdl_dfsp_cfg {
+ uint16 version;
+ uint16 len;
+ uint16 flags; /**< bit 1 to enable/disable the feature */
+ uint16 max_bcn_miss_dur; /**< maximum beacon miss duration before ceasing data tx */
+ uint8 mcsp_ttl; /**< remaining number of hops allowed for pcsa message */
+ uint8 bcsa_cnt; /**< repeat numbers of broadcast CSA */
+ chanspec_t mon_chan; /**< passive monitoring channel spec */
+ struct ether_addr mon_bssid; /**< broadcast means monitoring all */
+ uint16 max_bcn_miss_dur_af; /**< maximum beacon miss duration before ceasing AF tx */
+} awdl_dfsp_cfg_t;
+
+#define AWDL_DFSP_UCSA_VERSION 1
+typedef struct awdl_dfsp_ucsa {
+ uint16 version;
+ uint16 len;
+ struct ether_addr address;
+ uint8 enable;
+ uint8 retry_cnt; /**< just in case host needs to control the value */
+} awdl_dfsp_ucsa_t;
+
+typedef struct awdl_dfsp_ucsa_tbl {
+ uint8 tbl_num;
+ uint8 tbl[];
+} awdl_dfsp_ucsa_tbl_t;
+#endif /* defined(WLDFSP) */
+
+#if defined(WLDFSP)
+/* backward compatibility */
+typedef dfsp_stats_t awdl_dfsp_stats_t;
+#define AWDL_DFSP_EVT_OFFSET DFSP_EVT_OFFSET
+#define AWDL_DFSP_EVT_FLAGS_AP_ASSOC DFSP_EVT_FLAGS_AP_ASSOC
+#define AWDL_DFSP_EVT_FLAGS_AP_BCNMON DFSP_EVT_FLAGS_AP_BCNMON
+#define AWDL_DFSP_EVT_FLAGS_PROXY_BCSA DFSP_EVT_FLAGS_PROXY_BCSA
+#define AWDL_DFSP_EVT_FLAGS_PROXY_UCSA DFSP_EVT_FLAGS_PROXY_UCSA
+#define AWDL_DFSP_EVT_FLAGS_PROXY_PCSA DFSP_EVT_FLAGS_PROXY_PCSA
+#define AWDL_DFSP_CFG_VERSION 1
+#define AWDL_DFSP_FLAGS_ENAB DFSP_FLAGS_ENAB
+typedef dfsp_cfg_t awdl_dfsp_cfg_t;
+#define AWDL_DFSP_UCSA_VERSION 1
+typedef dfsp_ucsa_t awdl_dfsp_ucsa_t;
+typedef dfsp_ucsa_tbl_t awdl_dfsp_ucsa_tbl_t;
+#endif /* !WLDFSP */
+
+/* AWDL additional capability indicator */
+typedef uint32 awdl_cap_mask_t;
+
+#define WLC_AWDL_CAP_SEC_PAYLOAD 0x1u
+#define WLC_AWDL_CAP_CCA_STATS 0x2u
+#define WLC_AWDL_CAP_VERSION 2u
+#define WLC_AWDL_CAP_BMAP_SIZE sizeof(awdl_cap_mask_t)
+
+typedef struct awdl_cap_info {
+ uint16 version; /* Cap structure version */
+ uint16 length; /* Length: Includes version
+ + length + variable
+ data - 1byte(uint8)
+ */
+ uint8 awdl_cap[0]; /* Variable size data */
+} awdl_cap_info_t;
+
+typedef struct awdl_af_sec_payload {
+ uint16 version; /* Version of this structure */
+ uint16 length; /* Length of this entire struct including payload */
+ uint16 pri_pload_bm; /* Primary payload bitmask */
+ uint16 sec_pload_bm; /* Secondary payload bitmask */
+ uint8 payload[]; /* Secondary Payload */
+} awdl_af_sec_payload_t;
+
+typedef struct awdl_config_params {
+ uint32 version;
+ uint8 awdl_chan; /**< awdl channel */
+ uint8 guard_time; /**< Guard Time */
+ uint16 aw_period; /**< AW interval period */
+ uint16 aw_cmn_length; /**< Radio on Time AW */
+ uint16 action_frame_period; /**< awdl action frame period */
+ uint16 awdl_pktlifetime; /**< max packet life time in msec for awdl action frames */
+ uint16 awdl_maxnomaster; /**< max master missing time */
+ uint16 awdl_extcount; /**< Max extended period count for traffic */
+ uint16 aw_ext_length; /**< AW ext period */
+ uint16 awdl_nmode; /**< Operation mode of awdl interface; * 0 - Legacy mode
+ * 1 - 11n rate only * 2 - 11n + ampdu rx/tx
+ */
+ struct ether_addr ea; /**< destination bcast/mcast address to which action frame
+ * need to be sent
+ */
+} awdl_config_params_t;
+
+typedef struct wl_awdl_action_frame {
+ uint16 len_bytes;
+ uint8 awdl_action_frame_data[1];
+} wl_awdl_action_frame_t;
+
+#define WL_AWDL_ACTION_FRAME_FIXED_LEN OFFSETOF(wl_awdl_action_frame_t, awdl_sync_frame)
+
+typedef struct awdl_peer_node {
+ uint32 type_state; /**< Master, slave , etc.. */
+ uint16 aw_counter; /**< avail window counter */
+ int8 rssi; /**< rssi last af was received at */
+ int8 last_rssi; /**< rssi in the last AF */
+ uint16 tx_counter; /**<count down timer to next AW */
+ uint16 tx_delay; /**< ts_hw - ts_fw */
+ uint16 period_tu;
+ uint16 aw_period; /**< AW period - aw_cmn + ext * ext_len */
+ uint16 aw_cmn_length; /**< Common AW length */
+ uint16 aw_ext_length; /**< AW_EXT length */
+ uint32 self_metrics; /**< Election Metric */
+ uint32 top_master_metrics; /**< Top Master Metric */
+ struct ether_addr addr;
+ struct ether_addr top_master;
+ uint8 dist_top; /**< Distance from Top */
+ uint8 has_private_election_params;
+ struct ether_addr private_top_master;
+ uint32 private_top_master_metric;
+ uint32 private_election_ID;
+ uint8 private_distance_from_top;
+ uint8 PAD[3];
+} awdl_peer_node_t;
+
+typedef struct awdl_peer_table {
+ uint16 version;
+ uint16 len;
+ uint8 peer_nodes[1];
+} awdl_peer_table_t;
+
+/* structure for adding advertisers from host */
+typedef struct awdl_peer_advet_add {
+ uint16 aw_counter; /* avail window counter */
+ uint16 tx_counter; /* Down counter */
+ uint16 tx_delay; /* ts_hw - ts_fw */
+ uint16 period_tu;
+ uint16 aw_cmn_length;
+ uint16 aw_ext_length;
+ uint16 aw_period;
+ int8 rssi; /* RSSI value */
+ uint8 guard_time;
+ uint8 presence_mode;
+ uint8 age; /* age of this record */
+ uint8 dist_top; /* Distance from Top */
+ uint8 PAD;
+ uint32 ms; /* time at which we received the AF */
+ uint32 self_metrics; /* election metric */
+ uint32 top_master_metrics;
+ struct ether_addr top_master; /* Top Master address */
+ struct ether_addr addr; /* Peer address to be added */
+} awdl_peer_advet_add_t;
+
+typedef struct awdl_af_hdr {
+ struct ether_addr dst_mac;
+ uint8 action_hdr[4]; /**< Category + OUI[3] */
+} awdl_af_hdr_t;
+
+typedef struct awdl_oui {
+ uint8 oui[3]; /**< default: 0x00 0x17 0xf2 */
+ uint8 oui_type; /**< AWDL: 0x08 */
+} awdl_oui_t;
+
+typedef struct awdl_hdr {
+ uint8 type; /**< 0x08 AWDL */
+ uint8 version;
+ uint8 sub_type; /**< Sub type */
+ uint8 rsvd; /**< Reserved */
+ uint32 phy_timestamp; /**< PHY Tx time */
+ uint32 fw_timestamp; /**< Target Tx time */
+} awdl_hdr_t;
+
+#ifndef AWDL_DEFAULT_MAX_PEERS
+#define AWDL_DEFAULT_MAX_PEERS 8
+#endif /* AWDL_DEFAULT_MAX_PEERS */
+
+/* AWDL AF flags for awdl_oob_af iovar */
+#define AWDL_OOB_AF_FILL_TSF_PARAMS 0x00000001
+#define AWDL_OOB_AF_FILL_SYNC_PARAMS 0x00000002
+#define AWDL_OOB_AF_FILL_ELECT_PARAMS 0x00000004
+#define AWDL_OOB_AF_PARAMS_SIZE 38
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_oob_af_params {
+ struct ether_addr bssid;
+ struct ether_addr dst_mac;
+ uint32 channel;
+ uint32 dwell_time;
+ uint32 flags;
+ uint32 pkt_lifetime;
+ uint32 tx_rate;
+ uint32 max_retries; /**< for unicast frames only */
+ uint16 payload_len;
+ uint8 payload[1]; /**< complete AF payload */
+} BWL_POST_PACKED_STRUCT awdl_oob_af_params_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_oob_af_params_async {
+ uint32 tx_time; /**< tsf time to transmit, in usec */
+ uint16 tag; /**< packet tag */
+ struct ether_addr bssid;
+ struct ether_addr dst_mac;
+ uint32 channel;
+ uint32 dwell_time;
+ uint32 flags;
+ uint32 pkt_lifetime;
+ uint32 tx_rate;
+ uint32 max_retries; /**< for unicast frames only */
+ uint16 payload_len;
+ uint8 payload[1]; /**< complete AF payload */
+} BWL_POST_PACKED_STRUCT awdl_oob_af_params_async_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_oob_af_params_auto {
+ uint32 tx_chan_map; /**< bitmap for the channels in the chan seq to transmit the af */
+ uint32 tx_aws_offset; /**< time to transmit from the aw start, in usec */
+ struct ether_addr bssid;
+ struct ether_addr dst_mac;
+ uint32 channel;
+ uint32 dwell_time;
+ uint32 flags;
+ uint32 pkt_lifetime;
+ uint32 tx_rate;
+ uint32 max_retries; /**< for unicast frames only */
+ uint16 payload_len;
+ uint8 payload[1]; /**< complete AF payload */
+} BWL_POST_PACKED_STRUCT awdl_oob_af_params_auto_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_sync_params {
+ uint8 type; /**< Type */
+ uint16 param_len; /**< sync param length */
+ uint8 tx_chan; /**< tx channel */
+ uint16 tx_counter; /**< tx down counter */
+ uint8 master_chan; /**< master home channel */
+ uint8 guard_time; /**< Guard Time */
+ uint16 aw_period; /**< AW period */
+ uint16 action_frame_period; /**< awdl action frame period */
+ uint16 awdl_flags; /**< AWDL Flags */
+ uint16 aw_ext_length; /**< AW extention len */
+ uint16 aw_cmn_length; /**< AW common len */
+ uint16 aw_remaining; /**< Remaining AW length */
+ uint8 min_ext; /**< Minimum Extention count */
+ uint8 max_ext_multi; /**< Max multicast Extention count */
+ uint8 max_ext_uni; /**< Max unicast Extention count */
+ uint8 max_ext_af; /**< Max af Extention count */
+ struct ether_addr current_master; /**< Current Master mac addr */
+ uint8 presence_mode; /**< Presence mode */
+ uint8 reserved;
+ uint16 aw_counter; /**< AW seq# */
+ uint16 ap_bcn_alignment_delta; /**< AP Beacon alignment delta */
+} BWL_POST_PACKED_STRUCT awdl_sync_params_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_channel_sequence {
+ uint8 aw_seq_len; /**< AW seq length */
+ uint8 aw_seq_enc; /**< AW seq encoding */
+ uint8 aw_seq_duplicate_cnt; /**< AW seq dupilcate count */
+ uint8 seq_step_cnt; /**< Seq spet count */
+ uint16 seq_fill_chan; /**< channel to fill in; 0xffff repeat current channel */
+ uint8 chan_sequence[1]; /**< Variable list of channel Sequence */
+} BWL_POST_PACKED_STRUCT awdl_channel_sequence_t;
+#include <packed_section_end.h>
+#define WL_AWDL_CHAN_SEQ_FIXED_LEN OFFSETOF(awdl_channel_sequence_t, chan_sequence)
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_election_info {
+ uint8 election_flags; /**< Election Flags */
+ uint16 election_ID; /**< Election ID */
+ uint32 self_metrics;
+} BWL_POST_PACKED_STRUCT awdl_election_info_t;
+#include <packed_section_end.h>
+
+/* This is the super set of the one above. Will retire that one once this one is established */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_election_tree_info {
+ uint8 election_flags; /**< Election Flags */
+ uint16 election_ID; /**< Election ID */
+ uint32 self_metrics;
+ int8 close_sync_rssi_thld;
+ int8 master_rssi_boost;
+ int8 edge_sync_rssi_thld;
+ int8 close_range_rssi_thld;
+ int8 mid_range_rssi_thld;
+ uint8 max_higher_masters_close_range;
+ uint8 max_higher_masters_mid_range;
+ uint8 max_tree_depth;
+ /* read only */
+ struct ether_addr top_master; /**< top Master mac addr */
+ uint32 top_master_self_metric;
+ uint8 current_tree_depth;
+
+ uint8 edge_master_dwell_cnt;
+ struct ether_addr private_top_master; /**< private top Master mac addr */
+ uint32 private_top_master_metric;
+ uint32 private_election_ID;
+ uint8 private_distance_from_top;
+} BWL_POST_PACKED_STRUCT awdl_election_tree_info_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_election_params_tlv {
+ uint8 type; /**< Type */
+ uint16 param_len; /**< Election param length */
+ uint8 election_flags; /**< Election Flags */
+ uint16 election_ID; /**< Election ID */
+ uint8 dist_top; /**< Distance from Top */
+ uint8 rsvd; /**< Reserved */
+ struct ether_addr top_master; /**< Top Master mac addr */
+ uint32 top_master_metrics;
+ uint32 self_metrics;
+ uint8 pad[2]; /**< Padding */
+} BWL_POST_PACKED_STRUCT awdl_election_params_tlv_t;
+#include <packed_section_end.h>
+
+/*
+ * Definition in DINGO
+ */
+typedef struct awdl_opmode_v2 {
+ uint8 mode; /* 0 - Auto; 1 - Fixed; 2 - Forced */
+ uint8 role; /* 0 - slave; 1 - non-elect master; 2 - master */
+ uint16 bcast_tu; /* Bcasting period(TU) for non-elect master */
+ struct ether_addr master; /* Address of master to sync to */
+ uint16 cur_bcast_tu; /* Current Bcasting Period(TU) */
+ uint8 master_type;
+ uint8 dist_top;
+ uint16 cluster_id;
+ uint32 tsf_offset_h;
+ uint32 tsf_offset_l;
+} awdl_opmode_v2_t;
+
+typedef struct awdl_payload {
+ uint16 len; /**< Payload length */
+ uint8 payload[1]; /**< Payload */
+} awdl_payload_t;
+
+typedef struct awdl_long_payload {
+ uint8 long_psf_period; /**< transmit every long_psf_perios AWs */
+ uint8 long_psf_tx_offset; /**< delay from aw_start */
+ uint16 len; /**< Payload length */
+ uint8 payload[1]; /**< Payload */
+} awdl_long_payload_t;
+
+/* Values for awdl_opmode_t.role */
+#define AWDL_ROLE_SLAVE 0
+#define AWDL_ROLE_NE_MASTER 1
+#define AWDL_ROLE_MASTER 2
+
+#define SYNC_ROLE(role) (role & 0x0f)
+
+/* For NAN-AWDL concurrent master type */
+/*
+ SELF: self master, mac address can be NULL and mgmt interface mac is used
+ AWDL: align to AWDL master, AWDL desired mac master address must be provided.
+ NAN: align to NAN master, NAN target master mac address must be provided.
+ BOTH: align to both NAN/AWDL master, both mac addresses must be provided.
+ has to guarantee the NAN and AWDL master is in the same tree.
+*/
+#define SYNC_MASTER_SELF 1
+#define SYNC_MASTER_AWDL 2
+#define SYNC_MASTER_NAN 3
+#define SYNC_MASTER_BOTH 4
+
+/* use uper 4-bit to be the SYNC_MASTER_TYPE */
+#define SYNC_MASTER_TYPE(role) ((role & 0xf0) >> 4)
+
+/*
+ * Definitions on PHO, BIS, TRUNK & IGU branches
+ */
+typedef struct awdl_opmode_v1 {
+ uint8 mode; /* 0 - Auto; 1 - Fixed */
+ uint8 role; /* 0 - slave; 1 - non-elect master; 2 - master */
+ uint16 bcast_tu; /* Bcasting period(TU) for non-elect master */
+ struct ether_addr master; /* Address of master to sync to */
+ uint16 cur_bcast_tu; /* Current Bcasting Period(TU) */
+} awdl_opmode_v1_t;
+
+typedef awdl_opmode_v1_t awdl_opmode_t;
+
+typedef union awdl_opmode_un {
+ awdl_opmode_v1_t opmode_v1;
+ awdl_opmode_v2_t opmode_v2;
+} awdl_opmode_un_t;
+
+typedef struct awdl_extcount {
+ uint8 minExt; /**< Min extension count */
+ uint8 maxExtMulti; /**< Max extension count for mcast packets */
+ uint8 maxExtUni; /**< Max extension count for unicast packets */
+ uint8 maxAfExt; /**< Max extension count */
+} awdl_extcount_t;
+
+#define AWDL_OPMODE_AUTO 0
+#define AWDL_OPMODE_FIXED 1
+#define AWDL_OPMODE_FORCED 2 /* Fixed Mode with Forced_mode on */
+
+/** peer add/del operation */
+typedef struct awdl_peer_op {
+ uint8 version;
+ uint8 opcode; /**< see opcode definition */
+ struct ether_addr addr;
+ uint8 mode;
+ /* add other fixed fields here and increase the version number */
+ /* 0 or more TLVs at the end */
+} awdl_peer_op_t;
+
+/** peer op table */
+typedef struct awdl_peer_op_tbl {
+ uint16 len; /**< length */
+ uint8 tbl[1]; /**< Peer table */
+} awdl_peer_op_tbl_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_peer_op_node {
+ struct ether_addr addr;
+ uint32 flags; /**< Flags to indicate various states */
+ uint16 chanseq_len;
+ uint8 chanseq[1];
+} BWL_POST_PACKED_STRUCT awdl_peer_op_node_t;
+#include <packed_section_end.h>
+
+/* awdl_peer_op_node_t flags */
+#define AWDL_PEER_NODE_OP_FLAG_HT 0x01
+#define AWDL_PEER_NODE_OP_FLAG_AMPDU 0x02
+#define AWDL_PEER_NODE_OP_FLAG_PM 0x04
+#define AWDL_PEER_NODE_OP_FLAG_ABAND 0x08
+#define AWDL_PEER_NODE_OP_FLAG_QOS 0x10
+#define AWDL_PEER_NODE_OP_FLAG_AWDL 0x20
+#define AWDL_PEER_NODE_OP_FLAG_VHT 0x40
+#define AWDL_PEER_OP_CUR_VER 0
+
+#define AWDL_STATS_VERSION_3 3
+#define AWDL_STATS_CURRENT_VERSION AWDL_STATS_VERSION_3
+
+/** AWDL related statistics */
+typedef struct awdl_stats_core_v3 {
+ uint32 slotstart; /* AW slot_start */
+ uint32 slotend; /* AW slot_end */
+ uint32 slotskip; /* AW slot_skip */
+ uint32 slotstart_partial; /* AW slot resume */
+ uint32 slotend_partial; /* AW slot pre-empt */
+ uint32 psfstart; /* PSF slot_start */
+ uint32 psfend; /* PSF slot_end */
+ uint32 psfskip; /* PSF slot_skip */
+ uint32 psfreqfail; /* PSF timeslot register fail */
+ uint32 psfcnt; /* Number of PSFs */
+ uint32 micnt; /* Number of MI frames */
+ uint32 chansw; /* Total number of chan switches */
+ uint32 awrealignfail; /* No of awrealign failures */
+ uint32 datatx;
+ uint32 datarx;
+ uint32 txdrop;
+ uint32 rxdrop;
+ uint32 monrx;
+ uint32 txsupr;
+ uint32 rx80211;
+} awdl_stats_core_v3_t;
+
+typedef struct awdl_stats_cmn_v3 {
+ uint32 afrx;
+ uint32 aftx;
+ uint32 lostmaster;
+ uint32 misalign;
+ uint32 aws;
+ uint32 aw_dur;
+ uint32 debug;
+ uint32 afrxdrop;
+ uint32 awdrop;
+ uint32 noawchansw;
+ uint32 peeropdrop;
+ uint16 chancal; /* Used as a counter to track AWDL slots < 60TU */
+ uint16 nopreawint;
+ uint32 awdropchsw;
+ uint32 nopreawchsw;
+ uint32 nopreawprep;
+ uint32 aws_misalign;
+ uint32 txeval_fail;
+ uint32 infra_reqrcq;
+ uint32 awdl_reqtxq;
+ uint32 psfchanswtchskip; /* # chan. sw skipped during PSF xmit */
+ uint32 psfstateupdskip; /* # of PM updates skipped */
+ uint32 infra_offchpsf;
+ uint32 awdl_offchpsf;
+ uint32 chseqreq;
+ uint32 peerdelreq;
+ uint32 awend;
+ uint32 awrealign; /* No of AW realigns */
+ uint32 awchmismatch; /* Channel mismatch between expcted ch and scheduled ch */
+} awdl_stats_cmn_v3_t;
+
+typedef struct awdl_stats_v3 {
+ uint16 version;
+ uint16 length;
+ awdl_stats_cmn_v3_t cmnstats;
+ awdl_stats_core_v3_t corestats[MAX_NUM_D11CORES];
+ awdl_dfsp_stats_t dfspstats;
+} awdl_stats_v3_t;
+
+typedef struct awdl_stats_v2 {
+ uint32 afrx;
+ uint32 aftx;
+ uint32 datatx;
+ uint32 datarx;
+ uint32 txdrop;
+ uint32 rxdrop;
+ uint32 monrx;
+ uint32 lostmaster;
+ uint32 misalign;
+ uint32 aws;
+ uint32 aw_dur;
+ uint32 debug;
+ uint32 txsupr;
+ uint32 afrxdrop;
+ uint32 awdrop;
+ uint32 noawchansw;
+ uint32 rx80211;
+ uint32 peeropdrop;
+ uint16 chancal;
+ uint16 nopreawint;
+ uint32 awdropchsw;
+ uint32 nopreawchsw;
+ uint32 nopreawprep;
+ uint32 aws_misalign;
+ uint32 txeval_fail;
+ uint32 infra_reqrcq;
+ uint32 awdl_reqtxq;
+ uint32 psfchanswtchskip; /* # chan. sw skipped during PSF xmit */
+ uint32 psfstateupdskip; /* # of PM updates skipped */
+ uint32 dfsp_csainfra;
+ uint32 dfsp_csabcnmon;
+ uint32 dfsp_bcsarx;
+ uint32 dfsp_ucsarx;
+ uint32 dfsp_pcsarx;
+ uint32 dfsp_bcsatx;
+ uint32 dfsp_ucsatx;
+ uint32 dfsp_pcsatx;
+ uint32 dfsp_ucsatxfail;
+ uint32 dfsp_evtnotif;
+ uint32 dfsp_evtsuspect;
+ uint32 dfsp_evtresume;
+} awdl_stats_v2_t;
+
+typedef struct awdl_stats_v1 {
+ uint32 afrx;
+ uint32 aftx;
+ uint32 datatx;
+ uint32 datarx;
+ uint32 txdrop;
+ uint32 rxdrop;
+ uint32 monrx;
+ uint32 lostmaster;
+ uint32 misalign;
+ uint32 aws;
+ uint32 aw_dur;
+ uint32 debug;
+ uint32 txsupr;
+ uint32 afrxdrop;
+ uint32 awdrop;
+ uint32 noawchansw;
+ uint32 rx80211;
+ uint32 peeropdrop;
+ uint16 chancal;
+ uint16 nopreawint;
+ uint32 awdropchsw;
+ uint32 nopreawchsw;
+ uint32 nopreawprep;
+ uint32 infra_offchpsf;
+ uint32 awdl_offchpsf;
+ uint32 pmnoack;
+ uint32 scanreq;
+ uint32 chseqreq;
+ uint32 peerdelreq;
+ uint32 aws_misalign;
+ uint32 txeval_fail;
+ uint32 infra_reqrcq;
+ uint32 awdl_reqtxq;
+ uint32 psfchanswtchskip; /* # chan. sw skipped during PSF xmit */
+ uint32 psfstateupdskip; /* # of PM updates skipped */
+ uint32 dfsp_csainfra;
+ uint32 dfsp_csabcnmon;
+ uint32 dfsp_bcsarx;
+ uint32 dfsp_ucsarx;
+ uint32 dfsp_pcsarx;
+ uint32 dfsp_bcsatx;
+ uint32 dfsp_ucsatx;
+ uint32 dfsp_pcsatx;
+ uint32 dfsp_ucsatxfail;
+ uint32 dfsp_evtnotif;
+ uint32 dfsp_evtsuspect;
+ uint32 dfsp_evtresume;
+} awdl_stats_v1_t;
+
+typedef struct awdl_uct_stats {
+ uint32 aw_proc_in_aw_sched;
+ uint32 aw_upd_in_pre_aw_proc;
+ uint32 pre_aw_proc_in_aw_set;
+ uint32 ignore_pre_aw_proc;
+ uint32 miss_pre_aw_intr;
+ uint32 aw_dur_zero;
+ uint32 aw_sched;
+ uint32 aw_proc;
+ uint32 pre_aw_proc;
+ uint32 not_init;
+ uint32 null_awdl;
+} awdl_uct_stats_t;
+
+/* peer opcode */
+#define AWDL_PEER_OP_ADD 0
+#define AWDL_PEER_OP_DEL 1
+#define AWDL_PEER_OP_INFO 2
+#define AWDL_PEER_OP_UPD 3
+
+/**AWDL Piggy backed scan */
+typedef struct wl_awdl_pscan_params {
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ uint8 scan_type; /**< active or passive, 0 use default */
+ uint8 pad; /**< pad */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 aw_seq_num; /**< count AW sequence nunbers to be piggy backed for scan */
+ int32 nssid; /**< count of ssid in list */
+ int32 rsvd; /**< Reserved */
+ uint16 aw_counter_list[1]; /**< This is a list contains in following order
+ * - List aw seq numbers
+ * - List of SSID's 4 byte aligned.
+ */
+} wl_awdl_pscan_params_t;
+
+typedef struct wl_pscan_params {
+ uint32 version;
+ uint16 action; /**< PSCAN action type: FW or Host initiated pscan or abort pscan */
+ uint16 sync_id;
+ wl_awdl_pscan_params_t params;
+} wl_pscan_params_t;
+
+#define WL_AWDL_PSCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_awdl_pscan_params_t, aw_counter_list))
+#define WL_AWDL_MAX_NUM_AWSEQ 64
+#define AWDL_PSCAN_REQ_VERSION 1
+
+/** awdl pscan action type values */
+#define AWDL_HOST_PSCAN 0 /**< Host Initiated PSCAN */
+#define AWDL_FW_PSCAN 1 /**< Firmware Initiated PSCAN */
+#define AWDL_ABORT_PSCAN 2 /**< Abort any PSCAN */
+
+/* "aftxmode" iovar values */
+#define AWDL_AFTXMODE_AUTO 0 /**< Send AF on AWDL channel best effort while outside AW */
+
+/* --- Deprecated ---- */
+#define AWDL_AFTXMODE_INFRA 1 /**< Send AF on Infra channel while outside AW */
+#define AWDL_AFTXMODE_CUR_CHAN 2 /**< Send AF on Current channel while outside AW */
+/* --- Deprecated ---- */
+
+#define AWDL_AFTXMODE_SUPPRESS 3 /**< Suppress AF Tx */
+#define AWDL_AFTXMODE_SYNC_PREAW 4 /**< Send AF on master channel/s always in pre AW time */
+#define AWDL_AFTXMODE_LAST 4 /**< Last AWDL_AFTXMODE_XXX */
+
+typedef struct awdl_pw_opmode {
+ struct ether_addr top_master; /**< Peer mac addr */
+ uint8 mode; /**< 0 - normal; 1 - fast mode */
+} awdl_pw_opmode_t;
+
+/** i/f request */
+typedef struct wl_awdl_if2 {
+ int32 cfg_idx;
+ int32 up;
+ struct ether_addr bssid;
+ struct ether_addr if_addr;
+} wl_awdl_if2_t;
+
+typedef struct _aw_start {
+ uint8 role;
+ struct ether_addr master;
+ uint8 aw_seq_num;
+} aw_start_t;
+
+typedef struct _aw_extension_start {
+ uint8 aw_ext_num;
+} aw_extension_start_t;
+
+typedef struct _awdl_peer_state {
+ struct ether_addr peer;
+ uint8 state;
+} awdl_peer_state_t;
+#define AWDL_PEER_STATE_OPEN 0
+#define AWDL_PEER_STATE_CLOSE 1
+
+typedef struct _awdl_sync_state_changed {
+ uint8 new_role;
+ struct ether_addr master;
+} awdl_sync_state_changed_t;
+
+typedef struct _awdl_sync_state {
+ uint8 role;
+ struct ether_addr master;
+ uint8 PAD;
+ uint32 continuous_election_enable;
+} awdl_sync_state_t;
+
+typedef struct _awdl_aw_ap_alignment {
+ uint32 enabled;
+ int32 offset;
+ uint32 align_on_dtim;
+} awdl_aw_ap_alignment_t;
+
+typedef struct _awdl_peer_stats {
+ uint32 version;
+ struct ether_addr address;
+ uint8 clear;
+ int8 rssi;
+ int8 avg_rssi;
+ uint8 txRate;
+ uint8 rxRate;
+ uint8 PAD;
+ uint32 numTx;
+ uint32 numTxRetries;
+ uint32 numTxFailures;
+} awdl_peer_stats_t;
+
+#define MAX_NUM_AWDL_KEYS 4
+typedef struct _awdl_aes_key {
+ uint32 version;
+ int32 enable;
+ struct ether_addr awdl_peer;
+ uint8 keys[MAX_NUM_AWDL_KEYS][16];
+ uint8 PAD[2];
+} awdl_aes_key_t;
+
+/* AWDL CCA Stats */
+
+/* CCA Counters Delta @ Chan. boundary sent by FW to host */
+
+typedef struct wlc_awdl_cca_stats {
+ chanspec_t chanspec; /* Chanspec when CCA stats were read */
+ uint16 pad;
+ uint32 sample_dur; /* Duration in MS for which stats were sampled */
+ uint32 congest_ibss; /* Delta between IBSS - TxDUR */
+ uint32 congest_obss; /* Delta between OBSS - noctg */
+ uint32 interference; /* Delta between nopkt */
+} wlc_awdl_cca_stats_t;
+
+/* AWDL TLVs */
+typedef enum wl_awdl_tlv {
+ WL_AWDL_XTLV_CCA_STATS = 0x1u /* CCA Stats sent to host on chan. boundary */
+} wl_awdl_tlv_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_scan_event_data {
+ uint8 scan_usage; /**< Kind of scan in progress */
+ uint8 nscan_chans; /**< number of channels to be scanned */
+ uint8 ncached_chans; /**< number of cached channels */
+ uint8 flags;
+ uint8 chan_list[1]; /**< List of cached channels followed by
+ * channels to be scanned
+ */
+} BWL_POST_PACKED_STRUCT awdl_scan_event_data_t;
+
+/* Flags */
+/* bit-0 : Used to indicate if the flags/pad is valid
+* bit-1 : Slice-0 is blocked on scan.
+* bit-2 : Slice-1 is blocked on scan.
+* bit-7 : reserved
+*/
+
+#define AWDL_SCAN_EVT_DATA_FLAGS_VALID (1<<0)
+#define AWDL_SCAN_EVT_DATA_FLAGS_SLICE0_BLOCKED (1<<1)
+#define AWDL_SCAN_EVT_DATA_FLAGS_SLICE1_BLOCKED (1<<2)
+
+#include <packed_section_end.h>
+
+/*
+ * This structure will be supported only for pre-Koala builds.
+ * From Koala onwards use local adv_struct_master_local_t structure
+ * defined in wlc_awdl.c
+ */
+
+#ifndef WLAWDL_USE_MASTER_LOCAL
+typedef struct adv_struct_norm {
+ uint16 aw_counter; /**< avail window counter */
+ uint16 tx_counter;
+ uint16 tx_delay; /**< ts_hw - ts_fw */
+ uint16 period_tu;
+ uint16 aw_cmn_length;
+ uint16 aw_ext_length;
+ uint16 aw_period;
+ uint16 chan_seq0;
+ int8 rssi; /**< averaged RSSI value */
+ uint8 guard_time;
+ uint8 presence_mode;
+ uint8 age; /**< age of this record */
+ uint8 dist_top; /**< Distance from Top */
+ uint8 PAD[3];
+ uint32 ms; /**< time at which we received the AF */
+ uint32 self_metrics; /**< election metric */
+ uint32 top_master_metrics;
+ struct ether_addr top_master; /**< Top Master address */
+ struct ether_addr addr;
+ struct rssi_struct rssi_win; /**< RSSI values */
+} adv_struct_norm_t;
+#endif /* WLAWDL_USE_MASTER_LOCAL */
+
+/* AWDL configuration/operation flags */
+#define AWDL_CONFIG_NON_AWDL_INTERFACE_FLOW_CONTROL 0x00000001
+#define AWDL_CONFIG_AWDL_INTERFACE_UPDATE 0x00000002
+/* While scanning configure channel from chan seq */
+#define AWDL_CONFIG_CHANNEL_HOP_FROM_CHAN_SEQ 0x00000004
+#define AWDL_CONFIG_TX_OFF_BEFORE_INFRA_BEACON 0x00000008
+#define AWDL_CONFIG_SUPP_PSFTX_PREAW 0x00000010
+#define AWDL_CONFIG_SUPP_PSFTX_NOACK 0x00000020
+#define AWDL_CONFIG_SUPP_PSFTX_CHANSW 0x00000040
+/*
+ * awdl ranging
+ * all the fields with multple bytes are in the little Endian order
+ */
+
+/* Bit defines for global flags */
+#define AWDL_RANGING_ENABLE (1<<0) /**< Global enable bit */
+#define AWDL_RANGING_RESPOND (1<<1) /**< Enable responding to peer's range req */
+#define AWDL_RANGING_RANGED (1<<2) /**< V2: Report to host if ranged as target */
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_ranging_config {
+ uint16 flags;
+ uint8 sounding_count; /**< self initiated ranging: number of probes per peer */
+ uint8 reserved;
+ struct ether_addr allow_mac;
+ /**< peer initiated ranging: the allowed peer mac
+ * address, a unicast (for one peer) or
+ * a broadcast for all. Setting it to all zeros
+ * means responding to none,same as not setting
+ * the flag bit AWDL_RANGING_RESPOND
+ */
+} BWL_POST_PACKED_STRUCT awdl_ranging_config_t;
+#include <packed_section_end.h>
+
+/* list of peers for self initiated ranging */
+/* Bit defines for per peer flags */
+#define AWDL_RANGING_REPORT (1<<0) /**< V2: Enable reporting range to target */
+#define AWDL_SEQ_EN (1<<1)
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_ranging_peer {
+ chanspec_t ranging_chanspec; /**< desired chanspec for this peer */
+ uint16 flags; /**< per peer flags, report or not */
+ struct ether_addr ea; /**< peer MAC address */
+} BWL_POST_PACKED_STRUCT awdl_ranging_peer_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_ranging_list {
+ uint8 count; /**< number of MAC addresses */
+ uint8 num_peers_done; /**< host set to 0, when read, shows number of peers
+ * completed, success or fail
+ */
+ uint8 num_aws; /**< time period to do the ranging, specified in aws */
+ awdl_ranging_peer_t rp[1]; /**< variable length array of peers */
+} BWL_POST_PACKED_STRUCT awdl_ranging_list_t;
+#include <packed_section_end.h>
+
+/* ranging results, a list for self initiated ranging and one for peer initiated ranging */
+/* There will be one structure for each peer */
+#define AWDL_RANGING_STATUS_SUCCESS 1
+#define AWDL_RANGING_STATUS_FAIL 2
+#define AWDL_RANGING_STATUS_TIMEOUT 3
+#define AWDL_RANGING_STATUS_ABORT 4 /**< with partial results if sounding count > 0 */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_ranging_result {
+ uint8 status; /**< 1: Success, 2: Fail 3: Timeout 4: Aborted */
+ uint8 sounding_count; /**< number of measurements completed (0 = failure) */
+ struct ether_addr ea; /**< peer MAC address */
+ chanspec_t ranging_chanspec; /**< Chanspec where the ranging was done */
+ uint32 timestamp; /**< 32bits of the TSF timestamp ranging was completed at */
+ uint32 distance; /**< mean distance in meters expressed as Q4 number.
+ * Only valid when sounding_count > 0. Examples:
+ * 0x08 = 0.5m
+ * 0x10 = 1m
+ * 0x18 = 1.5m
+ * set to 0xffffffff to indicate invalid number
+ */
+ int32 rtt_var; /**< standard deviation in 10th of ns of RTTs measured.
+ * Only valid when sounding_count > 0
+ */
+} BWL_POST_PACKED_STRUCT awdl_ranging_result_t;
+#include <packed_section_end.h>
+#define AWDL_RANGING_TYPE_HOST 1
+#define AWDL_RANGING_TYPE_PEER 2
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_ranging_event_data {
+ uint8 type; /**< 1: Result of host initiated ranging */
+ /* V2: 2: Result of peer initiated ranging */
+ uint8 reserved;
+ uint8 success_count; /**< number of peers completed successfully */
+ uint8 count; /**< number of peers in the list */
+ awdl_ranging_result_t rr[1]; /**< variable array of ranging peers */
+} BWL_POST_PACKED_STRUCT awdl_ranging_event_data_t;
+typedef BWL_PRE_PACKED_STRUCT struct awdl_ftm_ranging_config {
+ uint16 flags; /* config flags */
+ uint8 num_aws; /* time period to do the ranging, specified in aws */
+} BWL_POST_PACKED_STRUCT awdl_ftm_ranging_config_t;
+#include <packed_section_end.h>
+
+/** awdl event config bit mask definitions */
+#define AWDL_EVENT_AW_EXT 0x01
+#define AWDL_EVENT_RANGING 0x02
+#define AWDL_RANGING_MAX_PEERS 8
+#endif /* WLAWDL */
+
+/*
+ * ptk_start: iovar to start 4-way handshake for secured ranging
+*/
+
+/* ptk negotiation security type - determines negotiation parameters */
+typedef enum {
+ WL_PTK_START_SEC_TYPE_PMK = 1
+} wl_ptk_start_sec_type_t;
+
+/* ptk negotiation role */
+typedef enum {
+ ROLE_NONE = 0x0,
+ ROLE_AUTH = 0x1,
+ ROLE_SUP = 0x2,
+ ROLE_STATIC = 0x3,
+ ROLE_INVALID = 0xff,
+ WL_PTK_START_ROLE_NONE = ROLE_NONE,
+ WL_PTK_START_ROLE_AUTH = ROLE_AUTH,
+ WL_PTK_START_ROLE_SUP = ROLE_SUP,
+ WL_PTK_START_ROLE_STATIC = ROLE_STATIC,
+ WL_PTK_START_ROLE_INVALID = ROLE_INVALID
+} wl_ptk_start_role_t;
+
+typedef struct wl_ptk_start_tlv {
+ uint16 id;
+ uint16 len;
+ uint8 data[1];
+} wl_ptk_start_tlv_t;
+
+typedef enum {
+ WL_PTK_START_TLV_PMK = 1 /* uint8[] */
+} wl_ptk_start_tlv_type;
+
+typedef enum {
+ WL_PTK_START_FLAG_NO_DATA_PROT = 1, /* data frame protection disabled */
+ WL_PTK_START_FLAG_GEN_FTM_TPK = 2 /* Generate FTM Toast/Seq Protection Key */
+} wl_ptk_start_flags_t;
+
+typedef struct wl_ptk_start_iov {
+ uint16 version;
+ uint16 len; /* length of entire iov from version */
+ wl_ptk_start_flags_t flags;
+ wl_ptk_start_sec_type_t sec_type;
+ wl_ptk_start_role_t role;
+ struct ether_addr peer_addr;
+ uint16 pad; /* reserved/32 bit alignment */
+ wl_ptk_start_tlv_t tlvs[1];
+} wl_ptk_start_iov_t;
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+#define MAX_WAKE_PACKET_CACHE_BYTES 128 /**< Maximum cached wake packet */
+
+#define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ sizeof(struct dot11_llc_snap_header) + \
+ ETHER_MAX_DATA)
+
+typedef struct pm_wake_packet {
+ uint32 status; /**< Is the wake reason a packet (if all the other field's valid) */
+ uint32 pattern_id; /**< Pattern ID that matched */
+ uint32 original_packet_size;
+ uint32 saved_packet_size;
+ uint8 packet[MAX_WAKE_PACKET_CACHE_BYTES];
+} pm_wake_packet_t;
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH=0, /**< Pattern matching filter */
+ WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /**< Magic packet match */
+ WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /**< A pattern list (match all to match filter) */
+ WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /**< SECURE WOWL magic / net pattern match */
+ WL_PKT_FILTER_TYPE_APF_MATCH=4, /* Android packet filter match */
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH_TIMEOUT=5, /* Pattern matching filter with timeout event */
+ WL_PKT_FILTER_TYPE_IMMEDIATE_PATTERN_MATCH=6, /* Immediately pattern matching filter */
+ WL_PKT_FILTYER_TYPE_MAX = 7, /* Pkt filter type MAX */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t /* backward compatibility; remove */
+
+/* String mapping for types that may be used by applications or debug */
+#define WL_PKT_FILTER_TYPE_NAMES \
+ { "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH }, \
+ { "MAGIC", WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \
+ { "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH }, \
+ { "SECURE WOWL", WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH }, \
+ { "APF", WL_PKT_FILTER_TYPE_APF_MATCH }, \
+ { "PATTERN TIMEOUT", WL_PKT_FILTER_TYPE_PATTERN_MATCH_TIMEOUT }, \
+ { "IMMEDIATE", WL_PKT_FILTER_TYPE_IMMEDIATE_PATTERN_MATCH }
+
+/** Secured WOWL packet was encrypted, need decrypted before check filter match */
+typedef struct wl_pkt_decrypter {
+ uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
+ void* dec_ctx;
+} wl_pkt_decrypter_t;
+
+/**
+ * Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+ uint32 offset; /**< Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ uint32 size_bytes; /**< Size of the pattern. Bitmask must be the same size. */
+ uint8 mask_and_pattern[]; /**< Variable length mask and pattern data. mask starts
+ * at offset 0. Pattern immediately follows mask. for
+ * secured pattern, put the descrypter pointer to the
+ * beginning, mask and pattern postponed correspondingly
+ */
+} wl_pkt_filter_pattern_t;
+
+/** A pattern list is a numerically specified list of modified pattern structures. */
+typedef struct wl_pkt_filter_pattern_listel {
+ uint16 rel_offs; /**< Offset to begin match (relative to 'base' below) */
+ uint16 base_offs; /**< Base for offset (defined below) */
+ uint16 size_bytes; /**< Size of mask/pattern */
+ uint16 match_flags; /**< Addition flags controlling the match */
+ uint8 mask_and_data[]; /**< Variable length mask followed by data, each size_bytes */
+} wl_pkt_filter_pattern_listel_t;
+
+typedef struct wl_pkt_filter_pattern_list {
+ uint8 list_cnt; /**< Number of elements in the list */
+ uint8 PAD1[1]; /**< Reserved (possible version: reserved) */
+ uint16 totsize; /**< Total size of this pattern list (includes this struct) */
+ uint8 patterns[]; /**< Variable number of wl_pkt_filter_pattern_listel_t elements */
+} wl_pkt_filter_pattern_list_t;
+
+typedef struct wl_apf_program {
+ uint16 version;
+ uint16 instr_len; /* number of instruction blocks */
+ uint32 inst_ts; /* program installation timestamp */
+ uint8 instrs[]; /* variable length instructions */
+} wl_apf_program_t;
+
+typedef struct wl_pkt_filter_pattern_timeout {
+ uint32 offset; /* Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
+ uint32 timeout; /* Timeout(seconds) */
+ uint8 mask_and_pattern[]; /* Variable length mask and pattern data.
+ * mask starts at offset 0. Pattern
+ * immediately follows mask.
+ */
+} wl_pkt_filter_pattern_timeout_t;
+
+/** IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+ uint32 id; /**< Unique filter id, specified by app. */
+ uint32 type; /**< Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ uint32 negate_match; /**< Negate the result of filter matches */
+ union { /* Filter definitions */
+ wl_pkt_filter_pattern_t pattern; /**< Pattern matching filter */
+ wl_pkt_filter_pattern_list_t patlist; /**< List of patterns to match */
+ wl_apf_program_t apf_program; /* apf program */
+ wl_pkt_filter_pattern_timeout_t pattern_timeout; /* Pattern timeout event filter */
+ } u;
+ /* Do NOT add structure members after the filter definitions, since they
+ * may include variable length arrays.
+ */
+} wl_pkt_filter_t;
+
+/** IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
+typedef struct wl_tcp_keep_set {
+ uint32 val1;
+ uint32 val2;
+} wl_tcp_keep_set_t;
+
+#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns)
+#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN \
+ OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data)
+#define WL_PKT_FILTER_PATTERN_TIMEOUT_FIXED_LEN \
+ OFFSETOF(wl_pkt_filter_pattern_timeout_t, mask_and_pattern)
+
+#define WL_APF_INTERNAL_VERSION 1
+#define WL_APF_PROGRAM_MAX_SIZE (2 * 1024)
+#define WL_APF_PROGRAM_FIXED_LEN OFFSETOF(wl_apf_program_t, instrs)
+#define WL_APF_PROGRAM_LEN(apf_program) \
+ ((apf_program)->instr_len * sizeof((apf_program)->instrs[0]))
+#define WL_APF_PROGRAM_TOTAL_LEN(apf_program) \
+ (WL_APF_PROGRAM_FIXED_LEN + WL_APF_PROGRAM_LEN(apf_program))
+
+/** IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+ uint32 id; /**< Unique filter id */
+ uint32 enable; /**< Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/** IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+ uint32 num; /**< Number of installed packet filters */
+ uint8 filter[]; /**< Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/** IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+ uint32 num_pkts_matched; /**< # filter matches for specified filter id */
+ uint32 num_pkts_forwarded; /**< # packets fwded from dongle to host for all filters */
+ uint32 num_pkts_discarded; /**< # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/** IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */
+typedef struct wl_pkt_filter_ports {
+ uint8 version; /**< Be proper */
+ uint8 reserved; /**< Be really proper */
+ uint16 count; /**< Number of ports following */
+ /* End of fixed data */
+ uint16 ports[1]; /**< Placeholder for ports[<count>] */
+} wl_pkt_filter_ports_t;
+
+#define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports)
+
+#define WL_PKT_FILTER_PORTS_VERSION 0
+#if defined(WL_PKT_FLTR_EXT) && !defined(WL_PKT_FLTR_EXT_DISABLED)
+#define WL_PKT_FILTER_PORTS_MAX 256
+#else
+#define WL_PKT_FILTER_PORTS_MAX 128
+#endif /* WL_PKT_FLTR_EXT && !WL_PKT_FLTR_EXT_DISABLED */
+
+#define RSN_REPLAY_LEN 8
+typedef struct _gtkrefresh {
+ uint8 KCK[RSN_KCK_LENGTH];
+ uint8 KEK[RSN_KEK_LENGTH];
+ uint8 ReplayCounter[RSN_REPLAY_LEN];
+} gtk_keyinfo_t, *pgtk_keyinfo_t;
+
+/** Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+ uint32 cmd; /**< common ioctl definition */
+ uint32 len; /**< length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES 4
+
+/**
+ * These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+ (((cmd) == WLC_GET_MAGIC) || \
+ ((cmd) == WLC_GET_VERSION) || \
+ ((cmd) == WLC_GET_AP) || \
+ ((cmd) == WLC_GET_INSTANCE))
+
+#define MAX_PKTENG_SWEEP_STEPS 40
+typedef struct wl_pkteng {
+ uint32 flags;
+ uint32 delay; /**< Inter-packet delay */
+ uint32 nframes; /**< Number of frames */
+ uint32 length; /**< Packet length */
+ uint8 seqno; /**< Enable/disable sequence no. */
+ struct ether_addr dest; /**< Destination address */
+ struct ether_addr src; /**< Source address */
+ uint8 sweep_steps; /**< Number of sweep power */
+ uint8 est_pwr_cnt; /**< Number of packets to collect est. pwr */
+ uint8 PAD[1];
+} wl_pkteng_t;
+
+typedef struct wl_pkteng_est_pwr {
+ uint32 ratespec; /* Ratespec of the packets */
+ uint8 est_pwr_required; /* Requested Number of packets to collect est power */
+ uint8 est_pwr_collected; /* Actual collected packets */
+ uint8 core_num; /* Total number of Tx cores */
+ uint8 PAD;
+ int8 est_pwr[]; /* The est power buffer */
+} wl_pkteng_est_pwr_t;
+
+/* The wl_pkteng_est_pwr_t is encapsulated in a xtlv buffer with the following ID */
+enum wl_pkgeng_estpwr_id {
+ wl_pkteng_estpwr_data = 0u,
+ wl_pkteng_estpwr_clear = 1u
+};
+
+/* IOVAR pkteng_sweep_counters response structure */
+#define WL_PKTENG_SWEEP_COUNTERS_VERSION 1
+typedef struct wl_pkteng_sweep_ctrs {
+ uint16 version; /**< Version - 1 */
+ uint16 size; /**< Complete Size including sweep_counters */
+ uint16 sweep_steps; /**< Number of steps */
+ uint16 PAD;
+ uint16 sweep_counter[]; /**< Array of frame counters */
+} wl_pkteng_sweep_ctrs_t;
+
+/* IOVAR pkteng_rx_pkt response structure */
+#define WL_PKTENG_RX_PKT_VERSION 1
+typedef struct wl_pkteng_rx_pkt {
+ uint16 version; /**< Version - 1 */
+ uint16 size; /**< Complete Size including the packet */
+ uint8 payload[]; /**< Packet payload */
+} wl_pkteng_rx_pkt_t;
+
+#define WL_PKTENG_RU_FILL_VER_1 1u
+#define WL_PKTENG_RU_FILL_VER_2 2u
+#define WL_PKTENG_RU_FILL_VER_3 3u
+
+// struct for ru packet engine
+typedef struct wl_pkteng_ru_v1 {
+ uint16 version; /* ver is 1 */
+ uint16 length; /* size of complete structure */
+ uint8 bw; /* bandwidth info */
+ uint8 ru_alloc_val; /* ru allocation index number */
+ uint8 mcs_val; /* mcs allocated value */
+ uint8 nss_val; /* num of spatial streams */
+ uint32 num_bytes; /* approx num of bytes to calculate other required params */
+ uint8 cp_ltf_val ; /* GI and LTF symbol size */
+ uint8 he_ltf_symb ; /* num of HE-LTF symbols */
+ uint8 stbc; /* STBC support */
+ uint8 coding_val; /* BCC/LDPC coding support */
+ uint8 pe_category; /* PE duration 0/8/16usecs */
+ uint8 dcm; /* dual carrier modulation */
+ uint8 mumimo_ltfmode; /* ltf mode */
+ uint8 trig_tx; /* form and transmit the trigger frame */
+ uint8 trig_type; /* type of trigger frame */
+ uint8 trig_period; /* trigger tx periodicity TBD */
+ struct ether_addr dest; /* destination address for un-associated mode */
+} wl_pkteng_ru_v1_t;
+
+typedef struct wl_pkteng_ru_v2 {
+ uint16 version; /* ver is 1 */
+ uint16 length; /* size of complete structure */
+ uint8 bw; /* bandwidth info */
+ uint8 ru_alloc_val; /* ru allocation index number */
+ uint8 mcs_val; /* mcs allocated value */
+ uint8 nss_val; /* num of spatial streams */
+ uint32 num_bytes; /* approx num of bytes to calculate other required params */
+ struct ether_addr dest; /* destination address for un-associated mode */
+ uint8 cp_ltf_val ; /* GI and LTF symbol size */
+ uint8 he_ltf_symb ; /* num of HE-LTF symbols */
+ uint8 stbc; /* STBC support */
+ uint8 coding_val; /* BCC/LDPC coding support */
+ uint8 pe_category; /* PE duration 0/8/16usecs */
+ uint8 dcm; /* dual carrier modulation */
+ uint8 mumimo_ltfmode; /* ltf mode */
+ uint8 trig_tx; /* form and transmit the trigger frame */
+ uint8 trig_type; /* type of trigger frame */
+ uint8 trig_period; /* trigger tx periodicity TBD */
+ uint8 tgt_rssi; /* target rssi value in encoded format */
+ uint8 pad[3]; /* 2 byte padding to make structure size a multiple of 32bits */
+} wl_pkteng_ru_v2_t;
+
+typedef struct wl_pkteng_ru_v3 {
+ uint16 version; /* ver is 3 */
+ uint16 length; /* size of complete structure */
+ uint8 bw; /* bandwidth info */
+ uint8 ru_alloc_val; /* ru allocation index number */
+ uint8 mcs_val; /* mcs allocated value */
+ uint8 nss_val; /* num of spatial streams */
+ uint32 num_bytes; /* approx num of bytes to calculate other required params */
+ struct ether_addr dest; /* destination address for un-associated mode */
+ uint8 cp_ltf_val ; /* GI and LTF symbol size */
+ uint8 he_ltf_symb ; /* num of HE-LTF symbols */
+ uint8 stbc; /* STBC support */
+ uint8 coding_val; /* BCC/LDPC coding support */
+ uint8 pe_category; /* PE duration 0/8/16usecs */
+ uint8 dcm; /* dual carrier modulation */
+ uint8 mumimo_ltfmode; /* ltf mode */
+ uint8 trig_tx; /* form and transmit the trigger frame */
+ uint8 trig_type; /* type of trigger frame */
+ uint8 trig_period; /* trigger tx periodicity TBD */
+ uint8 tgt_rssi; /* target rssi value in encoded format */
+ uint8 sub_band; /* in 160MHz case, 80L, 80U */
+ uint8 pad[2]; /* 2 byte padding to make structure size a multiple of 32bits */
+} wl_pkteng_ru_v3_t;
+
+#ifndef WL_PKTENG_RU_VER
+/* App uses the latest version - source picks it up from wlc_types.h */
+typedef wl_pkteng_ru_v3_t wl_pkteng_ru_fill_t;
+#endif
+
+typedef struct wl_trig_frame_info {
+ /* Structure versioning and structure length params */
+ uint16 version;
+ uint16 length;
+ /* Below params are the fields related to trigger frame contents */
+ /* Common Info Params Figure 9-52d - 11ax Draft 1.1 */
+ uint16 lsig_len;
+ uint16 trigger_type;
+ uint16 cascade_indication;
+ uint16 cs_req;
+ uint16 bw;
+ uint16 cp_ltf_type;
+ uint16 mu_mimo_ltf_mode;
+ uint16 num_he_ltf_syms;
+ uint16 stbc;
+ uint16 ldpc_extra_symb;
+ uint16 ap_tx_pwr;
+ uint16 afactor;
+ uint16 pe_disambiguity;
+ uint16 spatial_resuse;
+ uint16 doppler;
+ uint16 he_siga_rsvd;
+ uint16 cmn_info_rsvd;
+ /* User Info Params Figure 9-52e - 11ax Draft 1.1 */
+ uint16 aid12;
+ uint16 ru_alloc;
+ uint16 coding_type;
+ uint16 mcs;
+ uint16 dcm;
+ uint16 ss_alloc;
+ uint16 tgt_rssi;
+ uint16 usr_info_rsvd;
+} wl_trig_frame_info_t;
+
+/* wl pkteng_stats related definitions */
+#define WL_PKTENG_STATS_V1 (1)
+#define WL_PKTENG_STATS_V2 (2)
+
+typedef struct wl_pkteng_stats_v1 {
+ uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */
+ int32 rssi; /**< RSSI */
+ int32 snr; /**< signal to noise ratio */
+ uint16 rxpktcnt[NUM_80211_RATES+1];
+ uint8 rssi_qdb; /**< qdB portion of the computed rssi */
+ uint8 version;
+} wl_pkteng_stats_v1_t;
+
+typedef struct wl_pkteng_stats_v2 {
+ uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */
+ int32 rssi; /**< RSSI */
+ int32 snr; /**< signal to noise ratio */
+ uint16 rxpktcnt[NUM_80211_RATES+1];
+ uint8 rssi_qdb; /**< qdB portion of the computed rssi */
+ uint8 version;
+ uint16 length;
+ uint16 pad;
+ int32 rssi_per_core[WL_RSSI_ANT_MAX];
+ int32 rssi_per_core_qdb[WL_RSSI_ANT_MAX];
+} wl_pkteng_stats_v2_t;
+
+#ifndef WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS
+typedef wl_pkteng_stats_v1_t wl_pkteng_stats_t;
+#endif /* WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS */
+
+typedef struct wl_txcal_params {
+ wl_pkteng_t pkteng;
+ uint8 gidx_start;
+ int8 gidx_step;
+ uint8 gidx_stop;
+ uint8 PAD;
+} wl_txcal_params_t;
+
+typedef struct wl_txcal_gainidx {
+ uint8 num_actv_cores;
+ uint8 gidx_start_percore[WL_STA_ANT_MAX];
+ uint8 gidx_stop_percore[WL_STA_ANT_MAX];
+ uint8 PAD[3];
+} wl_txcal_gainidx_t;
+
+typedef struct wl_txcal_params_v2 {
+ wl_pkteng_t pkteng;
+ int8 gidx_step;
+ uint8 pwr_start[WL_STA_ANT_MAX];
+ uint8 pwr_stop[WL_STA_ANT_MAX];
+ uint8 init_start_idx;
+ uint8 gidx_start_percore[WL_STA_ANT_MAX];
+ uint8 gidx_stop_percore[WL_STA_ANT_MAX];
+ uint16 version;
+} wl_txcal_params_v2_t;
+
+typedef struct wl_txtone_idxsweep_params {
+ int8 gidx_step;
+ uint8 gidx_start_percore[WL_STA_ANT_MAX];
+ uint8 gidx_stop_percore[WL_STA_ANT_MAX];
+ uint32 dwell_time;
+} wl_txtone_idxsweep_params;
+
+typedef wl_txcal_params_t wl_txcal_params_v1_t;
+
+typedef struct wl_rssilog_params {
+ uint8 enable;
+ uint8 rssi_threshold;
+ uint8 time_threshold;
+ uint8 pad;
+} wl_rssilog_params_t;
+
+typedef struct wl_sslpnphy_papd_debug_data {
+ uint8 psat_pwr;
+ uint8 psat_indx;
+ uint8 final_idx;
+ uint8 start_idx;
+ int32 min_phase;
+ int32 voltage;
+ int8 temperature;
+ uint8 PAD[3];
+} wl_sslpnphy_papd_debug_data_t;
+typedef struct wl_sslpnphy_debug_data {
+ int16 papdcompRe [64];
+ int16 papdcompIm [64];
+} wl_sslpnphy_debug_data_t;
+typedef struct wl_sslpnphy_spbdump_data {
+ uint16 tbl_length;
+ int16 spbreal[256];
+ int16 spbimg[256];
+} wl_sslpnphy_spbdump_data_t;
+typedef struct wl_sslpnphy_percal_debug_data {
+ uint32 cur_idx;
+ uint32 tx_drift;
+ uint8 prev_cal_idx;
+ uint8 PAD[3];
+ uint32 percal_ctr;
+ int32 nxt_cal_idx;
+ uint32 force_1idxcal;
+ uint32 onedxacl_req;
+ int32 last_cal_volt;
+ int8 last_cal_temp;
+ uint8 PAD[3];
+ uint32 vbat_ripple;
+ uint32 exit_route;
+ int32 volt_winner;
+} wl_sslpnphy_percal_debug_data_t;
+
+typedef enum {
+ wowl_pattern_type_bitmap = 0,
+ wowl_pattern_type_arp,
+ wowl_pattern_type_na
+} wowl_pattern_type_t;
+
+typedef struct wl_wowl_pattern {
+ uint32 masksize; /**< Size of the mask in #of bytes */
+ uint32 offset; /**< Pattern byte offset in packet */
+ uint32 patternoffset; /**< Offset of start of pattern in the structure */
+ uint32 patternsize; /**< Size of the pattern itself in #of bytes */
+ uint32 id; /**< id */
+ uint32 reasonsize; /**< Size of the wakeup reason code */
+ wowl_pattern_type_t type; /**< Type of pattern */
+ /* Mask follows the structure above */
+ /* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct wl_wowl_pattern_list {
+ uint32 count;
+ wl_wowl_pattern_t pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct wl_wowl_wakeind {
+ uint8 pci_wakeind; /**< Whether PCI PMECSR PMEStatus bit was set */
+ uint32 ucode_wakeind; /**< What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+/** per AC rate control related data structure */
+typedef struct wl_txrate_class {
+ uint8 init_rate;
+ uint8 min_rate;
+ uint8 max_rate;
+} wl_txrate_class_t;
+
+/** structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+ int16 passive_dwell;
+ int16 active_dwell;
+ int16 bss_widthscan_interval;
+ int16 passive_total;
+ int16 active_total;
+ int16 chanwidth_transition_delay;
+ int16 activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t)
+
+/** RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+ uint32 rate_limit_msec; /**< # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint8 num_rssi_levels; /**< Number of entries in rssi_levels[] below */
+ int8 rssi_levels[MAX_RSSI_LEVELS]; /**< Variable number of RSSI levels. An event
+ * will be posted each time the RSSI of received
+ * beacons/packets crosses a level.
+ */
+ int8 pad[3];
+} wl_rssi_event_t;
+
+#define RSSI_MONITOR_VERSION 1
+#define RSSI_MONITOR_STOP (1 << 0)
+typedef struct wl_rssi_monitor_cfg {
+ uint8 version;
+ uint8 flags;
+ int8 max_rssi;
+ int8 min_rssi;
+} wl_rssi_monitor_cfg_t;
+
+typedef struct wl_rssi_monitor_evt {
+ uint8 version;
+ int8 cur_rssi;
+ uint16 pad;
+} wl_rssi_monitor_evt_t;
+
+/* CCA based channel quality event configuration (ID values for both config and report) */
+#define WL_CHAN_QUAL_CCA 0
+#define WL_CHAN_QUAL_NF 1
+#define WL_CHAN_QUAL_NF_LTE 2
+#define WL_CHAN_QUAL_TOTAL 3 /* The total IDs supported in both config and report */
+/* Additional channel quality event support in report only (>= 0x100)
+ * Notice that uint8 is used in configuration struct wl_chan_qual_metric_t, but uint16 is
+ * used for report in struct cca_chan_qual_event_t. So the ID values beyond 8-bit are used
+ * for reporting purpose only.
+ */
+#define WL_CHAN_QUAL_FULL_CCA (0x100u | WL_CHAN_QUAL_CCA) /* CCA: ibss vs. obss */
+#define WL_CHAN_QUAL_FULLPM_CCA (0x200u | WL_CHAN_QUAL_CCA) /* CCA: me vs. notme, PM vs. !PM */
+#define WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE (0x400u | WL_CHAN_QUAL_CCA)
+/* CCA: me vs. notme, PM vs. !PM with OFDM Desense */
+
+#define MAX_CHAN_QUAL_LEVELS 8
+
+typedef struct wl_chan_qual_metric {
+ uint8 id; /**< metric ID */
+ uint8 num_levels; /**< Number of entries in rssi_levels[] below */
+ uint16 flags;
+ int16 htol[MAX_CHAN_QUAL_LEVELS]; /**< threshold level array: hi-to-lo */
+ int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /**< threshold level array: lo-to-hi */
+} wl_chan_qual_metric_t;
+
+typedef struct wl_chan_qual_event {
+ uint32 rate_limit_msec; /**< # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint16 flags;
+ uint16 num_metrics;
+ wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /**< metric array */
+} wl_chan_qual_event_t;
+typedef struct wl_action_obss_coex_req {
+ uint8 info;
+ uint8 num;
+ uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+/** IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN 4
+
+/** This value is hardcoded to be 16 and MUST match PKTQ_MAX_PREC value defined elsewhere */
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 num_addrs;
+ uint8 addr_type[WL_IOV_MAC_PARAM_LEN];
+ struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t;
+#include <packed_section_end.h>
+
+/** This is extra info that follows wl_iov_mac_params_t */
+typedef struct {
+ uint32 addr_info[WL_IOV_MAC_PARAM_LEN];
+} wl_iov_mac_extra_params_t;
+
+/** Combined structure */
+typedef struct {
+ wl_iov_mac_params_t params;
+ wl_iov_mac_extra_params_t extra_params;
+} wl_iov_mac_full_params_t;
+
+/** Parameter block for PKTQ_LOG statistics */
+/* NOTE: this structure cannot change! It is exported to wlu as a binary format
+ * A new format revision number must be created if the interface changes
+ * The latest is v05; previous v01...v03 are no longer supported, v04 has
+ * common base with v05
+*/
+#define PKTQ_LOG_COUNTERS_V4 \
+ /* packets requested to be stored */ \
+ uint32 requested; \
+ /* packets stored */ \
+ uint32 stored; \
+ /* packets saved, because a lowest priority queue has given away one packet */ \
+ uint32 saved; \
+ /* packets saved, because an older packet from the same queue has been dropped */ \
+ uint32 selfsaved; \
+ /* packets dropped, because pktq is full with higher precedence packets */ \
+ uint32 full_dropped; \
+ /* packets dropped because pktq per that precedence is full */ \
+ uint32 dropped; \
+ /* packets dropped, in order to save one from a queue of a highest priority */ \
+ uint32 sacrificed; \
+ /* packets droped because of hardware/transmission error */ \
+ uint32 busy; \
+ /* packets re-sent because they were not received */ \
+ uint32 retry; \
+ /* packets retried again (ps pretend) prior to moving power save mode */ \
+ uint32 ps_retry; \
+ /* suppressed packet count */ \
+ uint32 suppress; \
+ /* packets finally dropped after retry limit */ \
+ uint32 retry_drop; \
+ /* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \
+ uint32 max_avail; \
+ /* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \
+ uint32 max_used; \
+ /* the maximum capacity of the queue */ \
+ uint32 queue_capacity; \
+ /* count of rts attempts that failed to receive cts */ \
+ uint32 rtsfail; \
+ /* count of packets sent (acked) successfully */ \
+ uint32 acked; \
+ /* running total of phy rate of packets sent successfully */ \
+ uint32 txrate_succ; \
+ /* running total of phy 'main' rate */ \
+ uint32 txrate_main; \
+ /* actual data transferred successfully */ \
+ uint32 throughput; \
+ /* time difference since last pktq_stats */ \
+ uint32 time_delta;
+
+typedef struct {
+ PKTQ_LOG_COUNTERS_V4
+} pktq_log_counters_v04_t;
+
+/** v5 is the same as V4 with extra parameter */
+typedef struct {
+ PKTQ_LOG_COUNTERS_V4
+ /** cumulative time to transmit */
+ uint32 airtime;
+} pktq_log_counters_v05_t;
+
+typedef struct {
+ uint8 num_prec[WL_IOV_MAC_PARAM_LEN];
+ pktq_log_counters_v04_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ uint32 counter_info[WL_IOV_MAC_PARAM_LEN];
+ uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+ char headings[];
+} pktq_log_format_v04_t;
+
+typedef struct {
+ uint8 num_prec[WL_IOV_MAC_PARAM_LEN];
+ pktq_log_counters_v05_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ uint32 counter_info[WL_IOV_MAC_PARAM_LEN];
+ uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+ char headings[];
+} pktq_log_format_v05_t;
+
+typedef struct {
+ uint32 version;
+ wl_iov_mac_params_t params;
+ union {
+ pktq_log_format_v04_t v04;
+ pktq_log_format_v05_t v05;
+ } pktq_log;
+} wl_iov_pktq_log_t;
+
+/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */
+#define PKTQ_LOG_AUTO (1 << 31)
+#define PKTQ_LOG_DEF_PREC (1 << 30)
+
+typedef struct wl_pfn_macaddr_cfg_0 {
+ uint8 version;
+ uint8 reserved;
+ struct ether_addr macaddr;
+} wl_pfn_macaddr_cfg_0_t;
+#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0
+#define WL_PFN_MAC_OUI_ONLY_MASK 1
+#define WL_PFN_SET_MAC_UNASSOC_MASK 2
+#define WL_PFN_RESTRICT_LA_MAC_MASK 4
+#define WL_PFN_MACADDR_FLAG_MASK 0x7
+/** To configure pfn_macaddr */
+typedef struct wl_pfn_macaddr_cfg {
+ uint8 version;
+ uint8 flags;
+ struct ether_addr macaddr;
+} wl_pfn_macaddr_cfg_t;
+#define WL_PFN_MACADDR_CFG_VER 1
+
+/*
+ * SCB_BS_DATA iovar definitions start.
+ */
+#define SCB_BS_DATA_STRUCT_VERSION 1
+
+/** The actual counters maintained for each station */
+typedef struct {
+ /* The following counters are a subset of what pktq_stats provides per precedence. */
+ uint32 retry; /**< packets re-sent because they were not received */
+ uint32 retry_drop; /**< packets finally dropped after retry limit */
+ uint32 rtsfail; /**< count of rts attempts that failed to receive cts */
+ uint32 acked; /**< count of packets sent (acked) successfully */
+ uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */
+ uint32 txrate_main; /**< running total of phy 'main' rate */
+ uint32 throughput; /**< actual data transferred successfully */
+ uint32 time_delta; /**< time difference since last pktq_stats */
+ uint32 airtime; /**< cumulative total medium access delay in useconds */
+} iov_bs_data_counters_t;
+
+/** The structure for individual station information. */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ether_addr station_address; /**< The station MAC address */
+ uint16 station_flags; /**< Bit mask of flags, for future use. */
+ iov_bs_data_counters_t station_counters; /**< The actual counter values */
+} BWL_POST_PACKED_STRUCT iov_bs_data_record_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 structure_version; /**< Structure version number (for wl/wlu matching) */
+ uint16 structure_count; /**< Number of iov_bs_data_record_t records following */
+ iov_bs_data_record_t structure_record[1]; /**< 0 - structure_count records */
+} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t;
+#include <packed_section_end.h>
+
+/* Bitmask of options that can be passed in to the iovar. */
+enum {
+ SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /**< Do not clear the counters after reading */
+};
+/*
+ * SCB_BS_DATA iovar definitions end.
+ */
+
+typedef struct wlc_extlog_cfg {
+ int32 max_number;
+ uint16 module; /**< bitmap */
+ uint8 level;
+ uint8 flag;
+ uint16 version;
+ uint16 PAD;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+ uint32 time;
+ uint16 module;
+ uint16 id;
+ uint8 level;
+ uint8 sub_unit;
+ uint8 seq_num;
+ uint8 pad;
+ int32 arg;
+ char str[MAX_ARGSTR_LEN];
+ char PAD[4-MAX_ARGSTR_LEN%4];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+ uint32 from_last;
+ uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+ uint16 id;
+ uint16 flag;
+ uint8 arg_type;
+ const char *fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER 1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+ FMTSTR_DRIVER_UP_ID = 0,
+ FMTSTR_DRIVER_DOWN_ID = 1,
+ FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+ FMTSTR_NO_PROGRESS_ID = 3,
+ FMTSTR_RFDISABLE_ID = 4,
+ FMTSTR_REG_PRINT_ID = 5,
+ FMTSTR_EXPTIME_ID = 6,
+ FMTSTR_JOIN_START_ID = 7,
+ FMTSTR_JOIN_COMPLETE_ID = 8,
+ FMTSTR_NO_NETWORKS_ID = 9,
+ FMTSTR_SECURITY_MISMATCH_ID = 10,
+ FMTSTR_RATE_MISMATCH_ID = 11,
+ FMTSTR_AP_PRUNED_ID = 12,
+ FMTSTR_KEY_INSERTED_ID = 13,
+ FMTSTR_DEAUTH_ID = 14,
+ FMTSTR_DISASSOC_ID = 15,
+ FMTSTR_LINK_UP_ID = 16,
+ FMTSTR_LINK_DOWN_ID = 17,
+ FMTSTR_RADIO_HW_OFF_ID = 18,
+ FMTSTR_RADIO_HW_ON_ID = 19,
+ FMTSTR_EVENT_DESC_ID = 20,
+ FMTSTR_PNP_SET_POWER_ID = 21,
+ FMTSTR_RADIO_SW_OFF_ID = 22,
+ FMTSTR_RADIO_SW_ON_ID = 23,
+ FMTSTR_PWD_MISMATCH_ID = 24,
+ FMTSTR_FATAL_ERROR_ID = 25,
+ FMTSTR_AUTH_FAIL_ID = 26,
+ FMTSTR_ASSOC_FAIL_ID = 27,
+ FMTSTR_IBSS_FAIL_ID = 28,
+ FMTSTR_EXTAP_FAIL_ID = 29,
+ FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+/** 11k Neighbor Report element (unversioned, deprecated) */
+typedef struct nbr_element {
+ uint8 id;
+ uint8 len;
+ struct ether_addr bssid;
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+ uint8 pad;
+} nbr_element_t;
+#define NBR_ADD_STATIC 0
+#define NBR_ADD_DYNAMIC 1
+
+#define WL_RRM_NBR_RPT_VER 1
+
+#define WL_NBR_RPT_FLAG_BSS_PREF_FROM_AP 0x01
+/** 11k Neighbor Report element */
+typedef struct nbr_rpt_elem {
+ uint8 version;
+ uint8 id;
+ uint8 len;
+ uint8 pad;
+ struct ether_addr bssid;
+ uint8 pad_1[2];
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+ uint8 addtype; /* static for manual add or dynamic if auto-learning of neighbors */
+ wlc_ssid_t ssid;
+ chanspec_t chanspec;
+ uint8 bss_trans_preference;
+ uint8 flags;
+} nbr_rpt_elem_t;
+
+typedef enum event_msgs_ext_command {
+ EVENTMSGS_NONE = 0,
+ EVENTMSGS_SET_BIT = 1,
+ EVENTMSGS_RESET_BIT = 2,
+ EVENTMSGS_SET_MASK = 3
+} event_msgs_ext_command_t;
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE ((uint)(sizeof(eventmsgs_ext_t) - 1))
+
+/* len- for SET it would be mask size from the application to the firmware */
+/* for GET it would be actual firmware mask size */
+/* maxgetsize - is only used for GET. indicate max mask size that the */
+/* application can read from the firmware */
+typedef struct eventmsgs_ext
+{
+ uint8 ver;
+ uint8 command;
+ uint8 len;
+ uint8 maxgetsize;
+ uint8 mask[1];
+} eventmsgs_ext_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params {
+ /** no of host dma descriptors programmed by the firmware before a commit */
+ uint16 max_dma_descriptors;
+
+ uint16 host_buf_len; /**< length of host buffer */
+ dmaaddr_t host_buf_addr; /**< physical address for bus_throughput_buf */
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t;
+#include <packed_section_end.h>
+
+typedef struct pcie_bus_tput_stats {
+ uint16 time_taken; /**< no of secs the test is run */
+ uint16 nbytes_per_descriptor; /**< no of bytes of data dma ed per descriptor */
+
+ /** no of desciptors for which dma is sucessfully completed within the test time */
+ uint32 count;
+} pcie_bus_tput_stats_t;
+
+#define HOST_WAKEUP_DATA_VER 1
+#include <packed_section_start.h>
+/* Bus interface host wakeup data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_host_wakeup_data {
+ uint16 ver;
+ uint16 len;
+ uchar data[1]; /* wakeup data */
+} BWL_POST_PACKED_STRUCT wl_host_wakeup_data_t;
+#include <packed_section_end.h>
+
+#define HOST_WAKEUP_DATA_VER_2 2
+#include <packed_section_start.h>
+/* Bus interface host wakeup data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_host_wakeup_data_v2 {
+ uint16 ver;
+ uint16 len;
+ uint32 gpio_toggle_time; /* gpio toggle time in ms */
+ uchar data[1]; /* wakeup data */
+} BWL_POST_PACKED_STRUCT wl_host_wakeup_data_v2_t;
+#include <packed_section_end.h>
+
+typedef struct keepalives_max_idle {
+ uint16 keepalive_count; /**< nmbr of keepalives per bss_max_idle period */
+ uint8 mkeepalive_index; /**< mkeepalive_index for keepalive frame to be used */
+ uint8 PAD; /**< to align next field */
+ uint16 max_interval; /**< seconds */
+} keepalives_max_idle_t;
+
+#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
+#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
+
+/* ##### HMAP section ##### */
+#define PCIE_MAX_HMAP_WINDOWS 8
+#define PCIE_HMAPTEST_VERSION 2
+#define HMAPTEST_INVALID_OFFSET 0xFFFFFFFFu
+#define HMAPTEST_DEFAULT_WRITE_PATTERN 0xBABECAFEu
+#define HMAPTEST_ACCESS_ARM 0
+#define HMAPTEST_ACCESS_M2M 1
+#define HMAPTEST_ACCESS_D11 2
+#define HMAPTEST_ACCESS_NONE 3
+
+typedef struct pcie_hmaptest {
+ uint16 version; /* Version */
+ uint16 length; /* Length of entire structure */
+ uint32 xfer_len;
+ uint32 accesstype;
+ uint32 is_write;
+ uint32 is_invalid;
+ uint32 host_addr_hi;
+ uint32 host_addr_lo;
+ uint32 host_offset;
+ uint32 value; /* 4 byte value to be filled in case of write access test */
+ uint32 delay; /* wait time in seconds before initiating access from dongle */
+} pcie_hmaptest_t;
+
+/* HMAP window register set */
+typedef struct hmapwindow {
+ uint32 baseaddr_lo; /* BaseAddrLower */
+ uint32 baseaddr_hi; /* BaseAddrUpper */
+ uint32 windowlength; /* Window Length */
+} hmapwindow_t;
+
+#define PCIE_HMAP_VERSION 1
+typedef struct pcie_hmap {
+ uint16 version; /**< Version */
+ uint16 length; /**< Length of entire structure */
+ uint32 enable; /**< status of HMAP enabled/disabled */
+ uint32 nwindows; /* no. of HMAP windows enabled */
+ uint32 window_config; /* HMAP window_config register */
+ uint32 hmap_violationaddr_lo; /* violating address lo */
+ uint32 hmap_violationaddr_hi; /* violating addr hi */
+ uint32 hmap_violation_info; /* violation info */
+ hmapwindow_t hwindows[]; /* Multiple hwindows */
+} pcie_hmap_t;
+
+/* ##### Power Stats section ##### */
+
+#define WL_PWRSTATS_VERSION 2
+
+/** Input structure for pwrstats IOVAR */
+typedef struct wl_pwrstats_query {
+ uint16 length; /**< Number of entries in type array. */
+ uint16 type[1]; /**< Types (tags) to retrieve.
+ * Length 0 (no types) means get all.
+ */
+} wl_pwrstats_query_t;
+
+/** This structure is for version 2; version 1 will be deprecated in by FW */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats {
+ uint16 version; /**< Version = 2 is TLV format */
+ uint16 length; /**< Length of entire structure */
+ uint8 data[1]; /**< TLV data, a series of structures,
+ * each starting with type and length.
+ *
+ * Padded as necessary so each section
+ * starts on a 4-byte boundary.
+ *
+ * Both type and len are uint16, but the
+ * upper nibble of length is reserved so
+ * valid len values are 0-4095.
+ */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_t;
+#include <packed_section_end.h>
+#define WL_PWR_STATS_HDRLEN OFFSETOF(wl_pwrstats_t, data)
+
+/* Bits for wake reasons */
+#define WLC_PMD_WAKE_SET 0x1u
+#define WLC_PMD_PM_AWAKE_BCN 0x2u
+/* BIT:3 is no longer being used */
+#define WLC_PMD_SCAN_IN_PROGRESS 0x8u
+#define WLC_PMD_RM_IN_PROGRESS 0x10u
+#define WLC_PMD_AS_IN_PROGRESS 0x20u
+#define WLC_PMD_PM_PEND 0x40u
+#define WLC_PMD_PS_POLL 0x80u
+#define WLC_PMD_CHK_UNALIGN_TBTT 0x100u
+#define WLC_PMD_APSD_STA_UP 0x200u
+#define WLC_PMD_TX_PEND_WAR 0x400u /* obsolete, can be reused */
+#define WLC_PMD_NAN_AWAKE 0x400u /* Reusing for NAN */
+#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800u
+
+#ifdef WLAWDL
+#define WLC_PMD_AWDL_AWAKE 0x1000u
+#endif /* WLAWDL */
+
+#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000u
+#define WLC_PMD_NON_PRIM_STA_UP 0x4000u
+#define WLC_PMD_AP_UP 0x8000u
+#define WLC_PMD_TX_IN_PROGRESS 0x10000u /* Dongle awake due to packet TX */
+#define WLC_PMD_4WAYHS_IN_PROGRESS 0x20000u /* Dongle awake due to 4 way handshake */
+#define WLC_PMD_PM_OVERRIDE 0x40000u /* Dongle awake due to PM override */
+#define WLC_PMD_WAKE_OTHER 0x80000u
+
+typedef struct wlc_pm_debug {
+ uint32 timestamp; /**< timestamp in millisecond */
+ uint32 reason; /**< reason(s) for staying awake */
+} wlc_pm_debug_t;
+
+/** WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */
+#define WLC_STA_AWAKE_STATES_MAX_V1 30
+#define WLC_PMD_EVENT_MAX_V1 32
+/** Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 {
+ uint32 curr_time; /**< ms */
+ uint32 hw_macc; /**< HW maccontrol */
+ uint32 sw_macc; /**< SW maccontrol */
+ uint32 pm_dur; /**< Total sleep time in PM, msecs */
+ uint32 mpc_dur; /**< Total sleep time in MPC, msecs */
+
+ /* int32 drifts = remote - local; +ve drift => local-clk slow */
+ int32 last_drift; /**< Most recent TSF drift from beacon */
+ int32 min_drift; /**< Min TSF drift from beacon in magnitude */
+ int32 max_drift; /**< Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /**< Avg TSF drift from beacon */
+
+ /* Wake history tracking */
+ uint8 pmwake_idx; /**< for stepping through pm_state */
+ wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /**< timestamped wake bits */
+ uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /**< cumulative usecs per wake reason */
+ uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */
+} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 {
+ uint16 type; /**< WL_PWRSTATS_TYPE_PM_AWAKE */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ pm_awake_data_v1_t awake_data;
+ uint32 frts_time; /**< Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /**< No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t;
+#include <packed_section_end.h>
+
+/** WL_PWRSTATS_TYPE_PM_AWAKE2 structures. Data sent as part of pwrstats IOVAR */
+typedef struct pm_awake_data_v2 {
+ uint32 curr_time; /**< ms */
+ uint32 hw_macc; /**< HW maccontrol */
+ uint32 sw_macc; /**< SW maccontrol */
+ uint32 pm_dur; /**< Total sleep time in PM, msecs */
+ uint32 mpc_dur; /**< Total sleep time in MPC, msecs */
+
+ /* int32 drifts = remote - local; +ve drift => local-clk slow */
+ int32 last_drift; /**< Most recent TSF drift from beacon */
+ int32 min_drift; /**< Min TSF drift from beacon in magnitude */
+ int32 max_drift; /**< Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /**< Avg TSF drift from beacon */
+
+ /* Wake history tracking */
+
+ /* pmstate array (type wlc_pm_debug_t) start offset */
+ uint16 pm_state_offset;
+ /** pmstate number of array entries */
+ uint16 pm_state_len;
+
+ /** array (type uint32) start offset */
+ uint16 pmd_event_wake_dur_offset;
+ /** pmd_event_wake_dur number of array entries */
+ uint16 pmd_event_wake_dur_len;
+
+ uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */
+ uint8 pmwake_idx; /**< for stepping through pm_state */
+ uint8 flags; /**< bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */
+ uint8 pad[2];
+ uint32 frts_time; /**< Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /**< No of times frts ended since driver load */
+} pm_awake_data_v2_t;
+
+typedef struct wl_pwr_pm_awake_stats_v2 {
+ uint16 type; /**< WL_PWRSTATS_TYPE_PM_AWAKE */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ pm_awake_data_v2_t awake_data;
+} wl_pwr_pm_awake_stats_v2_t;
+
+/* bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */
+#define WL_PWR_PM_AWAKE_STATS_WAKE 0x02
+#define WL_PWR_PM_AWAKE_STATS_ASLEEP 0x03
+#define WL_PWR_PM_AWAKE_STATS_WAKE_MASK 0x03
+
+/* WL_PWRSTATS_TYPE_PM_AWAKE Version 2 structures taken from 4324/43342 */
+/* These structures are only to be used with 4324/43342 devices */
+
+#define WL_STA_AWAKE_STATES_MAX_V2 30
+#define WL_PMD_EVENT_MAX_V2 32
+#define MAX_P2P_BSS_DTIM_PRD 4
+
+/** WL_PWRSTATS_TYPE_PM_ACCUMUL structures. Data sent as part of pwrstats IOVAR */
+typedef struct pm_accum_data_v1 {
+ uint64 current_ts;
+ uint64 pm_cnt;
+ uint64 pm_dur;
+ uint64 pm_last_entry_us;
+ uint64 awake_cnt;
+ uint64 awake_dur;
+ uint64 awake_last_entry_us;
+} pm_accum_data_v1_t;
+
+typedef struct wl_pwr_pm_accum_stats_v1 {
+ uint16 type; /**< WL_PWRSTATS_TYPE_PM_ACCUMUL */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ uint8 PAD[4];
+ pm_accum_data_v1_t accum_data;
+} wl_pwr_pm_accum_stats_v1_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct ucode_dbg_v2 {
+ uint32 macctrl;
+ uint16 m_p2p_hps;
+ uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+ uint32 psmdebug[20];
+ uint32 phydebug[20];
+ uint32 psm_brc;
+ uint32 ifsstat;
+} BWL_POST_PACKED_STRUCT ucode_dbg_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pmalert_awake_data_v2 {
+ uint32 curr_time; /* ms */
+ uint32 hw_macc; /* HW maccontrol */
+ uint32 sw_macc; /* SW maccontrol */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 mpc_dur; /* Total sleep time in MPC, msecs */
+
+ /* int32 drifts = remote - local; +ve drift => local-clk slow */
+ int32 last_drift; /* Most recent TSF drift from beacon */
+ int32 min_drift; /* Min TSF drift from beacon in magnitude */
+ int32 max_drift; /* Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /* Avg TSF drift from beacon */
+
+ /* Wake history tracking */
+ uint8 pmwake_idx; /* for stepping through pm_state */
+ wlc_pm_debug_t pm_state[WL_STA_AWAKE_STATES_MAX_V2]; /* timestamped wake bits */
+ uint32 pmd_event_wake_dur[WL_PMD_EVENT_MAX_V2]; /* cumulative usecs per wake reason */
+ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */
+ uint32 start_event_dur[WL_PMD_EVENT_MAX_V2]; /* start event-duration */
+ ucode_dbg_v2_t ud;
+ uint32 frts_time; /* Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT pmalert_awake_data_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v2 {
+ uint32 version;
+ uint32 length; /* Length of entire structure */
+ uint32 reasons; /* reason(s) for pm_alert */
+ /* Following fields are present only for reasons
+ * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED
+ */
+ uint32 prev_stats_time; /* msecs */
+ uint32 prev_pm_dur; /* msecs */
+ uint32 prev_mpc_dur; /* msecs */
+ pmalert_awake_data_v2_t awake_data;
+} BWL_POST_PACKED_STRUCT pm_alert_data_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_status_v2 {
+ uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ pmalert_awake_data_v2_t awake_data;
+ uint32 frts_time; /* Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_status_v2_t;
+#include <packed_section_end.h>
+
+/* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */
+/* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */
+/* Use regs from d11.h instead of raw addresses for */
+/* (at least) the chip independent registers */
+typedef struct ucode_dbg_ext {
+ uint32 x120;
+ uint32 x124;
+ uint32 x154;
+ uint32 x158;
+ uint32 x15c;
+ uint32 x180;
+ uint32 x184;
+ uint32 x188;
+ uint32 x18c;
+ uint32 x1a0;
+ uint32 x1a8;
+ uint32 x1e0;
+ uint32 scr_x14;
+ uint32 scr_x2b;
+ uint32 scr_x2c;
+ uint32 scr_x2d;
+ uint32 scr_x2e;
+
+ uint16 x40a;
+ uint16 x480;
+ uint16 x490;
+ uint16 x492;
+ uint16 x4d8;
+ uint16 x4b8;
+ uint16 x4ba;
+ uint16 x4bc;
+ uint16 x4be;
+ uint16 x500;
+ uint16 x50e;
+ uint16 x522;
+ uint16 x546;
+ uint16 x578;
+ uint16 x602;
+ uint16 x646;
+ uint16 x648;
+ uint16 x666;
+ uint16 x670;
+ uint16 x690;
+ uint16 x692;
+ uint16 x6a0;
+ uint16 x6a2;
+ uint16 x6a4;
+ uint16 x6b2;
+ uint16 x7c0;
+
+ uint16 shm_x20;
+ uint16 shm_x4a;
+ uint16 shm_x5e;
+ uint16 shm_x5f;
+ uint16 shm_xaab;
+ uint16 shm_x74a;
+ uint16 shm_x74b;
+ uint16 shm_x74c;
+ uint16 shm_x74e;
+ uint16 shm_x756;
+ uint16 shm_x75b;
+ uint16 shm_x7b9;
+ uint16 shm_x7d4;
+
+ uint16 shm_P2P_HPS;
+ uint16 shm_P2P_intr[16];
+ uint16 shm_P2P_perbss[48];
+} ucode_dbg_ext_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v1 {
+ uint32 version;
+ uint32 length; /**< Length of entire structure */
+ uint32 reasons; /**< reason(s) for pm_alert */
+ /* Following fields are present only for reasons
+ * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED
+ */
+ uint32 prev_stats_time; /**< msecs */
+ uint32 prev_pm_dur; /**< msecs */
+ uint32 prev_mpc_dur; /**< msecs */
+ pm_awake_data_v1_t awake_data;
+ uint32 start_event_dur[WLC_PMD_EVENT_MAX_V1]; /**< start event-duration */
+ ucode_dbg_v2_t ud;
+ uint32 frts_time; /**< Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /**< No of times frts ended since driver load */
+ ucode_dbg_ext_t ud_ext;
+ uint32 prev_frts_dur; /**< ms */
+} BWL_POST_PACKED_STRUCT pm_alert_data_v1_t;
+#include <packed_section_end.h>
+
+/* End of 43342/4324 v2 structure definitions */
+
+/* Original bus structure is for HSIC */
+
+typedef struct bus_metrics {
+ uint32 suspend_ct; /**< suspend count */
+ uint32 resume_ct; /**< resume count */
+ uint32 disconnect_ct; /**< disconnect count */
+ uint32 reconnect_ct; /**< reconnect count */
+ uint32 active_dur; /**< msecs in bus, usecs for user */
+ uint32 suspend_dur; /**< msecs in bus, usecs for user */
+ uint32 disconnect_dur; /**< msecs in bus, usecs for user */
+} bus_metrics_t;
+
+/** Bus interface info for USB/HSIC */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_USB_HSIC */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ bus_metrics_t hsic; /**< stats from hsic bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t;
+#include <packed_section_end.h>
+
+/* PCIe Event counter tlv IDs */
+enum pcie_cnt_xtlv_id {
+ PCIE_CNT_XTLV_METRICS = 0x1, /**< PCIe Bus Metrics */
+ PCIE_CNT_XTLV_BUS_CNT = 0x2 /**< PCIe Bus counters */
+};
+
+typedef struct pcie_bus_metrics {
+ uint32 d3_suspend_ct; /**< suspend count */
+ uint32 d0_resume_ct; /**< resume count */
+ uint32 perst_assrt_ct; /**< PERST# assert count */
+ uint32 perst_deassrt_ct; /**< PERST# de-assert count */
+ uint32 active_dur; /**< msecs */
+ uint32 d3_suspend_dur; /**< msecs */
+ uint32 perst_dur; /**< msecs */
+ uint32 l0_cnt; /**< L0 entry count */
+ uint32 l0_usecs; /**< L0 duration in usecs */
+ uint32 l1_cnt; /**< L1 entry count */
+ uint32 l1_usecs; /**< L1 duration in usecs */
+ uint32 l1_1_cnt; /**< L1_1ss entry count */
+ uint32 l1_1_usecs; /**< L1_1ss duration in usecs */
+ uint32 l1_2_cnt; /**< L1_2ss entry count */
+ uint32 l1_2_usecs; /**< L1_2ss duration in usecs */
+ uint32 l2_cnt; /**< L2 entry count */
+ uint32 l2_usecs; /**< L2 duration in usecs */
+ uint32 timestamp; /**< Timestamp on when stats are collected */
+ uint32 num_h2d_doorbell; /**< # of doorbell interrupts - h2d */
+ uint32 num_d2h_doorbell; /**< # of doorbell interrupts - d2h */
+ uint32 num_submissions; /**< # of submissions */
+ uint32 num_completions; /**< # of completions */
+ uint32 num_rxcmplt; /**< # of rx completions */
+ uint32 num_rxcmplt_drbl; /**< of drbl interrupts for rx complt. */
+ uint32 num_txstatus; /**< # of tx completions */
+ uint32 num_txstatus_drbl; /**< of drbl interrupts for tx complt. */
+ uint32 deepsleep_count; /**< # of times chip went to deepsleep */
+ uint32 deepsleep_dur; /**< # of msecs chip was in deepsleep */
+ uint32 ltr_active_ct; /**< # of times chip went to LTR ACTIVE */
+ uint32 ltr_active_dur; /**< # of msecs chip was in LTR ACTIVE */
+ uint32 ltr_sleep_ct; /**< # of times chip went to LTR SLEEP */
+ uint32 ltr_sleep_dur; /**< # of msecs chip was in LTR SLEEP */
+} pcie_bus_metrics_t;
+
+typedef struct pcie_cnt {
+ uint32 ltr_state; /**< Current LTR state */
+ uint32 l0_sr_cnt; /**< SR count during L0 */
+ uint32 l2l3_sr_cnt; /**< SR count during L2L3 */
+ uint32 d3_ack_sr_cnt; /**< srcount during last D3-ACK */
+ uint32 d3_sr_cnt; /**< SR count during D3 */
+ uint32 d3_info_start; /**< D3 INFORM received time */
+ uint32 d3_info_enter_cnt; /**< # of D3 INFORM received */
+ uint32 d3_cnt; /**< # of real D3 */
+ uint32 d3_ack_sent_cnt; /**< # of D3 ACK sent count */
+ uint32 d3_drop_cnt_event; /**< # of events dropped during D3 */
+ uint32 d2h_req_q_len; /**< # of Packet pending in D2H request queue */
+ uint32 hw_reason; /**< Last Host wake assert reason */
+ uint32 hw_assert_cnt; /**< # of times Host wake Asserted */
+ uint32 host_ready_cnt; /**< # of Host ready interrupts */
+ uint32 hw_assert_reason_0; /**< timestamp when hw_reason is TRAP */
+ uint32 hw_assert_reason_1; /**< timestamp when hw_reason is WL_EVENT */
+ uint32 hw_assert_reason_2; /**< timestamp when hw_reason is DATA */
+ uint32 hw_assert_reason_3; /**< timestamp when hw_reason is DELAYED_WAKE */
+ uint32 last_host_ready; /**< Timestamp of last Host ready */
+ bool hw_asserted; /**< Flag to indicate if Host wake is Asserted */
+ bool event_delivery_pend; /**< No resources to send event */
+ uint16 pad; /**< Word alignment for scripts */
+} pcie_cnt_t;
+
+/** Bus interface info for PCIE */
+typedef struct wl_pwr_pcie_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_PCIE */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ pcie_bus_metrics_t pcie; /**< stats from pcie bus driver */
+} wl_pwr_pcie_stats_t;
+
+/** Scan information history per category */
+typedef struct scan_data {
+ uint32 count; /**< Number of scans performed */
+ uint32 dur; /**< Total time (in us) used */
+} scan_data_t;
+
+typedef struct wl_pwr_scan_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_SCAN */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ /* Scan history */
+ scan_data_t user_scans; /**< User-requested scans: (i/e/p)scan */
+ scan_data_t assoc_scans; /**< Scans initiated by association requests */
+ scan_data_t roam_scans; /**< Scans initiated by the roam engine */
+ scan_data_t pno_scans[8]; /**< For future PNO bucketing (BSSID, SSID, etc) */
+ scan_data_t other_scans; /**< Scan engine usage not assigned to the above */
+} wl_pwr_scan_stats_t;
+
+typedef struct wl_pwr_connect_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_SCAN */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ /* Connection (Association + Key exchange) data */
+ uint32 count; /**< Number of connections performed */
+ uint32 dur; /**< Total time (in ms) used */
+} wl_pwr_connect_stats_t;
+
+typedef struct wl_pwr_phy_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_PHY */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ uint32 tx_dur; /**< TX Active duration in us */
+ uint32 rx_dur; /**< RX Active duration in us */
+} wl_pwr_phy_stats_t;
+
+#ifdef WLAWDL
+typedef struct wl_pwr_awdl_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_AWDL */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ uint32 tx_dur; /**< AWDL TX Active duration in usec */
+ uint32 rx_dur; /**< AWDL RX Active duration in usec */
+ uint32 aw_dur; /**< AWDL AW duration in msec */
+ uint32 awpscan_dur; /**< AWDL pscans dur in msec */
+} wl_pwr_awdl_stats_t;
+#endif /* WLAWDL */
+
+typedef struct wl_mimo_meas_metrics_v1 {
+ uint16 type;
+ uint16 len;
+ /* Total time(us) idle in MIMO RX chain configuration */
+ uint32 total_idle_time_mimo;
+ /* Total time(us) idle in SISO RX chain configuration */
+ uint32 total_idle_time_siso;
+ /* Total receive time (us) in SISO RX chain configuration */
+ uint32 total_rx_time_siso;
+ /* Total receive time (us) in MIMO RX chain configuration */
+ uint32 total_rx_time_mimo;
+ /* Total 1-chain transmit time(us) */
+ uint32 total_tx_time_1chain;
+ /* Total 2-chain transmit time(us) */
+ uint32 total_tx_time_2chain;
+ /* Total 3-chain transmit time(us) */
+ uint32 total_tx_time_3chain;
+} wl_mimo_meas_metrics_v1_t;
+
+typedef struct wl_mimo_meas_metrics {
+ uint16 type;
+ uint16 len;
+ /* Total time(us) idle in MIMO RX chain configuration */
+ uint32 total_idle_time_mimo;
+ /* Total time(us) idle in SISO RX chain configuration */
+ uint32 total_idle_time_siso;
+ /* Total receive time (us) in SISO RX chain configuration */
+ uint32 total_rx_time_siso;
+ /* Total receive time (us) in MIMO RX chain configuration */
+ uint32 total_rx_time_mimo;
+ /* Total 1-chain transmit time(us) */
+ uint32 total_tx_time_1chain;
+ /* Total 2-chain transmit time(us) */
+ uint32 total_tx_time_2chain;
+ /* Total 3-chain transmit time(us) */
+ uint32 total_tx_time_3chain;
+ /* End of original, OCL fields start here */
+ /* Total time(us) idle in ocl mode */
+ uint32 total_idle_time_ocl;
+ /* Total receive time (us) in ocl mode */
+ uint32 total_rx_time_ocl;
+ /* End of OCL fields, internal adjustment fields here */
+ /* Total SIFS idle time in MIMO mode */
+ uint32 total_sifs_time_mimo;
+ /* Total SIFS idle time in SISO mode */
+ uint32 total_sifs_time_siso;
+} wl_mimo_meas_metrics_t;
+
+typedef struct wl_pwr_slice_index {
+ uint16 type; /* WL_PWRSTATS_TYPE_SLICE_INDEX */
+ uint16 len;
+
+ uint32 slice_index; /* Slice index for which stats are meant for */
+} wl_pwr_slice_index_t;
+
+typedef struct wl_pwr_tsync_stats {
+ uint16 type; /**< WL_PWRSTATS_TYPE_TSYNC */
+ uint16 len;
+ uint32 avb_uptime; /**< AVB uptime in msec */
+} wl_pwr_tsync_stats_t;
+
+typedef struct wl_pwr_ops_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_OPS_STATS */
+ uint16 len; /* total length includes fixed fields */
+ uint32 partial_ops_dur; /* Total time(in usec) partial ops duration */
+ uint32 full_ops_dur; /* Total time(in usec) full ops duration */
+} wl_pwr_ops_stats_t;
+
+typedef struct wl_pwr_bcntrim_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_BCNTRIM_STATS */
+ uint16 len; /* total length includes fixed fields */
+ uint8 associated; /* STA is associated ? */
+ uint8 slice_idx; /* on which slice STA is associated */
+ uint16 pad; /* padding */
+ uint32 slice_beacon_seen; /* number of beacons seen on the Infra
+ * interface on this slice
+ */
+ uint32 slice_beacon_trimmed; /* number beacons actually trimmed on this slice */
+ uint32 total_beacon_seen; /* total number of beacons seen on the Infra interface */
+ uint32 total_beacon_trimmed; /* total beacons actually trimmed */
+} wl_pwr_bcntrim_stats_t;
+
+typedef struct wl_pwr_slice_index_band {
+ uint16 type; /* WL_PWRSTATS_TYPE_SLICE_INDEX_BAND_INFO */
+ uint16 len; /* Total length includes fixed fields */
+ uint16 index; /* Slice Index */
+ int16 bandtype; /* Slice Bandtype */
+} wl_pwr_slice_index_band_t;
+
+typedef struct wl_pwr_psbw_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_PSBW_STATS */
+ uint16 len; /* total length includes fixed fields */
+ uint8 slice_idx; /* on which slice STA is associated */
+ uint8 pad[3];
+ uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */
+ uint32 total_enable_dur; /* time(ms) psbw remains enabled total */
+} wl_pwr_psbw_stats_t;
+
+/* ##### End of Power Stats section ##### */
+
+/** IPV4 Arp offloads for ndis context */
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct hostip_id {
+ struct ipv4_addr ipa;
+ uint8 id;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+/* Return values */
+#define ND_REPLY_PEER 0x1 /**< Reply was sent to service NS request from peer */
+#define ND_REQ_SINK 0x2 /**< Input packet should be discarded */
+#define ND_FORCE_FORWARD 0X3 /**< For the dongle to forward req to HOST */
+
+/** Neighbor Solicitation Response Offload IOVAR param */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct nd_param {
+ struct ipv6_addr host_ip[2];
+ struct ipv6_addr solicit_ip;
+ struct ipv6_addr remote_ip;
+ uint8 host_mac[ETHER_ADDR_LEN];
+ uint32 offload_id;
+} BWL_POST_PACKED_STRUCT nd_param_t;
+#include <packed_section_end.h>
+
+typedef struct wl_pfn_roam_thresh {
+ uint32 pfn_alert_thresh; /**< time in ms */
+ uint32 roam_alert_thresh; /**< time in ms */
+} wl_pfn_roam_thresh_t;
+
+/* Reasons for wl_pmalert_t */
+#define PM_DUR_EXCEEDED (1<<0)
+#define MPC_DUR_EXCEEDED (1<<1)
+#define ROAM_ALERT_THRESH_EXCEEDED (1<<2)
+#define PFN_ALERT_THRESH_EXCEEDED (1<<3)
+#define CONST_AWAKE_DUR_ALERT (1<<4)
+#define CONST_AWAKE_DUR_RECOVERY (1<<5)
+
+#define MIN_PM_ALERT_LEN 9
+
+/** Data sent in EXCESS_PM_WAKE event */
+#define WL_PM_ALERT_VERSION 3
+
+/** This structure is for version 3; version 2 will be deprecated in by FW */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert {
+ uint16 version; /**< Version = 3 is TLV format */
+ uint16 length; /**< Length of entire structure */
+ uint32 reasons; /**< reason(s) for pm_alert */
+ uint8 data[1]; /**< TLV data, a series of structures,
+ * each starting with type and length.
+ *
+ * Padded as necessary so each section
+ * starts on a 4-byte boundary.
+ *
+ * Both type and len are uint16, but the
+ * upper nibble of length is reserved so
+ * valid len values are 0-4095.
+ */
+} BWL_POST_PACKED_STRUCT wl_pmalert_t;
+#include <packed_section_end.h>
+
+#define NUM_P2P_BSS_UCODE_DBG 3
+#define IDX_P2P_BSS_UCODE_DBG 12
+#define NUM_PSM_PHY_DBG 20
+
+/* Type values for the data section */
+#define WL_PMALERT_FIXED 0 /**< struct wl_pmalert_fixed_t, fixed fields */
+#define WL_PMALERT_PMSTATE 1 /**< struct wl_pmalert_pmstate_t, variable */
+#define WL_PMALERT_EVENT_DUR 2 /**< struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG 3 /**< struct wl_pmalert_ucode_dbg_v1, variable */
+#define WL_PMALERT_PS_ALLOWED_HIST 4 /**< struct wl_pmalert_ps_allowed_history, variable */
+#define WL_PMALERT_EXT_UCODE_DBG 5 /**< struct wl_pmalert_ext_ucode_dbg_t, variable */
+#define WL_PMALERT_EPM_START_EVENT_DUR 6 /**< struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG_V2 7 /**< struct wl_pmalert_ucode_dbg_v2, variable */
+#define WL_PMALERT_FIXED_SC 8 /**< struct wl_pmalert_fixed_sc_t, fixed fields */
+#define WL_PMALERT_UCODE_DBG_SC 9 /**< struct wl_pmalert_ucode_dbg_v2, variable */
+
+typedef struct wl_pmalert_fixed {
+ uint16 type; /**< WL_PMALERT_FIXED */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ uint32 prev_stats_time; /**< msecs */
+ uint32 curr_time; /**< ms */
+ uint32 prev_pm_dur; /**< msecs */
+ uint32 pm_dur; /**< Total sleep time in PM, msecs */
+ uint32 prev_mpc_dur; /**< msecs */
+ uint32 mpc_dur; /**< Total sleep time in MPC, msecs */
+ uint32 hw_macc; /**< HW maccontrol */
+ uint32 sw_macc; /**< SW maccontrol */
+
+ /* int32 drifts = remote - local; +ve drift -> local-clk slow */
+ int32 last_drift; /**< Most recent TSF drift from beacon */
+ int32 min_drift; /**< Min TSF drift from beacon in magnitude */
+ int32 max_drift; /**< Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /**< Avg TSF drift from beacon */
+ uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */
+ uint32 frts_time; /**< Cumulative ms spent in data frts since driver load */
+ uint32 frts_end_cnt; /**< No of times frts ended since driver load */
+ uint32 prev_frts_dur; /**< Data frts duration at start of pm-period */
+ uint32 cal_dur; /**< Cumulative ms spent in calibration */
+ uint32 prev_cal_dur; /**< cal duration at start of pm-period */
+} wl_pmalert_fixed_t;
+
+#define WL_PMALERT_SC_FLAG_SC_DOWN (1u << 0u)
+#define WL_PMALERT_SC_FLAG_SC_HIBERNATE (1u << 1u)
+
+typedef struct wl_pmalert_fixed_sc {
+ uint16 type; /* WL_PMALERT_FIXED_SC */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+ uint32 flags;
+ uint32 prev_stats_time; /* msecs */
+ uint32 curr_time; /* msecs */
+ uint32 prev_pm_dur; /* msecs */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 win_down_time; /* Total down time in the window in ms */
+ uint32 hw_macc; /* HW maccontrol */
+ uint32 sw_macc; /* SW maccontrol */
+
+ /* int32 drifts = remote - local; +ve drift -> local-clk slow */
+ int32 last_drift; /* Most recent TSF drift from beacon */
+ int32 min_drift; /* Min TSF drift from beacon in magnitude */
+ int32 max_drift; /* Max TSF drift from beacon in magnitude */
+ uint32 avg_drift; /* Avg TSF drift from beacon */
+ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */
+
+ uint32 cal_dur; /* Cumulative ms spent in calibration */
+ uint32 prev_cal_dur; /* cal duration at start of pm-period */
+} wl_pmalert_fixed_sc_t;
+
+typedef struct wl_pmalert_pmstate {
+ uint16 type; /**< WL_PMALERT_PMSTATE */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ uint8 pmwake_idx; /**< for stepping through pm_state */
+ uint8 pad[3];
+ /* Array of pmstate; len of array is based on tlv len */
+ wlc_pm_debug_t pmstate[1];
+} wl_pmalert_pmstate_t;
+
+typedef struct wl_pmalert_event_dur {
+ uint16 type; /**< WL_PMALERT_EVENT_DUR */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+
+ /* Array of event_dur, len of array is based on tlv len */
+ uint32 event_dur[1];
+} wl_pmalert_event_dur_t;
+
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v1 {
+ uint16 type; /* WL_PMALERT_UCODE_DBG */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+ uint32 macctrl;
+ uint16 m_p2p_hps;
+ uint32 psm_brc;
+ uint32 ifsstat;
+ uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+ uint32 psmdebug[NUM_PSM_PHY_DBG];
+ uint32 phydebug[NUM_PSM_PHY_DBG];
+ uint16 M_P2P_BSS[NUM_P2P_BSS_UCODE_DBG][IDX_P2P_BSS_UCODE_DBG];
+ uint16 M_P2P_PRE_TBTT[NUM_P2P_BSS_UCODE_DBG];
+
+ /* Following is valid only for corerevs<40 */
+ uint16 xmtfifordy;
+
+ /* Following 3 are valid only for 11ac corerevs (>=40) */
+ uint16 psm_maccommand;
+ uint16 txe_status1;
+ uint16 AQMFifoReady;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v2 {
+ uint16 type; /**< WL_PMALERT_UCODE_DBG_V2 */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ uint32 macctrl;
+ uint16 m_p2p_hps;
+ uint32 psm_brc;
+ uint32 ifsstat;
+ uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+ uint32 psmdebug[NUM_PSM_PHY_DBG];
+ uint32 phydebug[NUM_PSM_PHY_DBG];
+ uint16 M_P2P_BSS[NUM_P2P_BSS_UCODE_DBG][IDX_P2P_BSS_UCODE_DBG];
+ uint16 M_P2P_PRE_TBTT[NUM_P2P_BSS_UCODE_DBG];
+
+ /* Following is valid only for corerevs<40 */
+ uint16 xmtfifordy;
+
+ /* Following 3 are valid only for 11ac corerevs (>=40) */
+ uint16 psm_maccommand;
+ uint16 txe_status1;
+ uint32 AQMFifoReady;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_sc_v1 {
+ uint16 type; /**< WL_PMALERT_UCODE_DBG_SC */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ /* ucode SHM and registers */
+ uint32 macctrl;
+ uint16 m_p2p_hps;
+ uint32 psm_brc;
+ uint32 ifsstat;
+ uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+ uint32 psmdebug[NUM_PSM_PHY_DBG];
+ uint32 phydebug[NUM_PSM_PHY_DBG];
+ uint16 M_P2P_BSS[NUM_P2P_BSS_UCODE_DBG][IDX_P2P_BSS_UCODE_DBG];
+ uint16 M_P2P_PRE_TBTT[NUM_P2P_BSS_UCODE_DBG];
+ uint16 psm_maccommand;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+typedef struct wlc_ps_debug {
+ uint32 timestamp; /**< timestamp in millisecond */
+ uint32 ps_mask; /**< reason(s) for disallowing ps */
+} wlc_ps_debug_t;
+
+typedef struct wl_pmalert_ps_allowed_hist {
+ uint16 type; /**< WL_PMALERT_PS_ALLOWED_HIST */
+ uint16 len; /**< Up to 4K-1, top 4 bits are reserved */
+ uint32 ps_allowed_start_idx;
+ /* Array of ps_debug, len of array is based on tlv len */
+ wlc_ps_debug_t ps_debug[1];
+} wl_pmalert_ps_allowed_hist_t;
+
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN 4 /**< length of the set command string:
+ * "add", "del" (+ NUL)
+ */
+
+#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32))
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */
+ vndr_ie_t vndr_ie_data; /**< vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int32 iecount; /**< number of entries in the vndr_ie_list[] array */
+ vndr_ie_info_t vndr_ie_list[1]; /**< variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN]; /**< vndr_ie IOVar set command : "add", "del" + NUL */
+ vndr_ie_buf_t vndr_ie_buffer; /**< buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+#include <packed_section_end.h>
+
+/** tag_ID/length/value_buffer tuple */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */
+ tlv_t ie_data; /**< IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int32 iecount; /**< number of entries in the ie_list[] array */
+ ie_info_t ie_list[1]; /**< variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN]; /**< ie IOVar set command : "add" + NUL */
+ ie_buf_t ie_buffer; /**< buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */
+ uint8 id; /**< IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+#include <packed_section_end.h>
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef struct sta_prbreq_wps_ie_hdr {
+ struct ether_addr staAddr;
+ uint16 ieLen;
+} sta_prbreq_wps_ie_hdr_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+ sta_prbreq_wps_ie_hdr_t hdr;
+ uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+ uint32 totLen;
+ uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 flags;
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ chanspec_t local_chanspec; /**< channel on which we are associated */
+ uint8 local_max; /**< local max according to the AP */
+ uint8 local_constraint; /**< local constraint according to the AP */
+ int8 antgain[2]; /**< Ant gain for each band - from SROM */
+ uint8 rf_cores; /**< count of RF Cores being reported */
+ uint8 est_Pout[4]; /**< Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */
+ uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /**< Maximum target power among all rates */
+ uint32 tx_power_max_rate_ind[4]; /**< Index of the rate with the max target power */
+ int8 sar; /**< SAR limit for display by wl executable */
+ int8 channel_bandwidth; /**< 20, 40 or 80 MHz bandwidth? */
+ uint8 version; /**< Version of the data format wlu <--> driver */
+ uint8 display_core; /**< Displayed curpower core */
+ int8 target_offsets[4]; /**< Target power offsets for current rate per core */
+ uint32 last_tx_ratespec; /**< Ratespec for last transmition */
+ uint32 user_target; /**< user limit */
+ uint32 ppr_len; /**< length of each ppr serialization buffer */
+ int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+ uint8 pprdata[1]; /**< ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 flags;
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ chanspec_t local_chanspec; /**< channel on which we are associated */
+ uint8 local_max; /**< local max according to the AP */
+ uint8 local_constraint; /**< local constraint according to the AP */
+ int8 pad[2]; /**< unused */
+ uint8 rf_cores; /**< count of RF Cores being reported */
+ uint8 est_Pout[4]; /**< Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */
+ uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /**< Maximum target power among all rates */
+ uint32 tx_power_max_rate_ind[4]; /**< Index of the rate with the max target power */
+ int8 sar; /**< SAR limit for display by wl executable */
+ int8 channel_bandwidth; /**< 20, 40 or 80 MHz bandwidth? */
+ uint8 version; /**< Version of the data format wlu <--> driver */
+ uint8 display_core; /**< Displayed curpower core */
+ int8 target_offsets[4]; /**< Target power offsets for current rate per core */
+ uint32 last_tx_ratespec; /**< Ratespec for last transmition */
+ uint32 user_target; /**< user limit */
+ uint32 ppr_len; /**< length of each ppr serialization buffer */
+ int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+ int8 antgain[3]; /**< Ant gain for each band - from SROM */
+ uint8 pprdata[1]; /**< ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_v2_t;
+#include <packed_section_end.h>
+
+typedef struct tx_pwr_ru_rate_info {
+ uint16 version;
+ uint16 ru_alloc;
+ uint16 mcs;
+ uint16 nss;
+ uint16 num_he_ltf_syms;
+ uint16 ldpc;
+ uint16 gi;
+ uint16 txmode;
+ uint16 dcm;
+ uint16 tx_chain;
+} tx_pwr_ru_rate_info_t;
+
+#define TX_PWR_RU_RATE_INFO_VER 1
+
+/* TLV ID for curpower report, ID < 63 is reserved for ppr module */
+typedef enum tx_pwr_tlv_id {
+ TX_PWR_RPT_RU_RATE_INFO_ID = 64
+} tx_pwr_tlv_id_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ipv4_addr ipv4_addr;
+ struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 num_entry;
+ ibss_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
+#include <packed_section_end.h>
+
+#define MAX_IBSS_ROUTE_TBL_ENTRY 64
+
+#define TXPWR_TARGET_VERSION 0
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int32 version; /**< version number */
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ int8 txpwr[WL_STA_ANT_MAX]; /**< Max tx target power, in qdb */
+ uint8 rf_cores; /**< count of RF Cores being reported */
+} BWL_POST_PACKED_STRUCT txpwr_target_max_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_INFO_PARAM_CUR_VER 0
+/** Input structure for IOV_BSS_PEER_INFO */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ struct ether_addr ea; /**< peer MAC address */
+} BWL_POST_PACKED_STRUCT bss_peer_info_param_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_INFO_CUR_VER 0
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ struct ether_addr ea;
+ int32 rssi;
+ uint32 tx_rate; /**< current tx rate */
+ uint32 rx_rate; /**< current rx rate */
+ wl_rateset_t rateset; /**< rateset in use */
+ uint32 age; /**< age in seconds */
+} BWL_POST_PACKED_STRUCT bss_peer_info_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_LIST_INFO_CUR_VER 0
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ uint16 bss_peer_info_len; /**< length of bss_peer_info_t */
+ uint32 count; /**< number of peer info */
+ bss_peer_info_t peer_info[1]; /**< peer info */
+} BWL_POST_PACKED_STRUCT bss_peer_list_info_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info)
+
+#define AIBSS_BCN_FORCE_CONFIG_VER_0 0
+
+/** structure used to configure AIBSS beacon force xmit */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ uint16 len;
+ uint32 initial_min_bcn_dur; /**< dur in ms to check a bcn in bcn_flood period */
+ uint32 min_bcn_dur; /**< dur in ms to check a bcn after bcn_flood period */
+ uint32 bcn_flood_dur; /**< Initial bcn xmit period in ms */
+} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
+#include <packed_section_end.h>
+
+#define AIBSS_TXFAIL_CONFIG_VER_0 0
+#define AIBSS_TXFAIL_CONFIG_VER_1 1
+#define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1
+
+/** structure used to configure aibss tx fail event */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ uint16 len;
+ uint32 bcn_timeout; /**< dur in seconds to receive 1 bcn */
+ uint32 max_tx_retry; /**< no of consecutive no acks to send txfail event */
+ uint32 max_atim_failure; /**< no of consecutive atim failure */
+} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if {
+ uint16 version;
+ uint16 len;
+ uint32 flags;
+ struct ether_addr addr;
+ chanspec_t chspec;
+} BWL_POST_PACKED_STRUCT wl_aibss_if_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry {
+ struct ipv4_addr ip_addr;
+ struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl {
+ uint32 num_entry;
+ wlc_ipfo_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t;
+#include <packed_section_end.h>
+
+/* Version of wlc_btc_stats_t structure.
+ * Increment whenever a change is made to wlc_btc_stats_t
+ */
+#define BTCX_STATS_VER_4 4
+typedef struct wlc_btc_stats_v4 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* Size of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated. */
+ uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
+ uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
+ uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
+ uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
+ uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
+ uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
+ uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
+ uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
+ uint16 bt_succ_pm_protect_cnt; /* successful PM protection */
+ uint16 bt_succ_cts_cnt; /* successful CTS2A protection */
+ uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */
+ uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */
+ uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */
+ uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */
+ uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */
+ uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */
+ uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */
+ uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */
+ uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */
+ uint16 bt_dcsn_map; /* Accumulated decision bitmap once Ant grant */
+ uint16 bt_dcsn_cnt; /* Accumulated decision bitmap counters once Ant grant */
+ uint16 bt_a2dp_hiwat_cnt; /* Ant grant by a2dp high watermark */
+ uint16 bt_datadelay_cnt; /* Ant grant by acl/a2dp datadelay */
+ uint16 bt_crtpri_cnt; /* Ant grant by critical BT task */
+ uint16 bt_pri_cnt; /* Ant grant by high BT task */
+ uint16 a2dpbuf1cnt; /* Ant request with a2dp buffercnt 1 */
+ uint16 a2dpbuf2cnt; /* Ant request with a2dp buffercnt 2 */
+ uint16 a2dpbuf3cnt; /* Ant request with a2dp buffercnt 3 */
+ uint16 a2dpbuf4cnt; /* Ant request with a2dp buffercnt 4 */
+ uint16 a2dpbuf5cnt; /* Ant request with a2dp buffercnt 5 */
+ uint16 a2dpbuf6cnt; /* Ant request with a2dp buffercnt 6 */
+ uint16 a2dpbuf7cnt; /* Ant request with a2dp buffercnt 7 */
+ uint16 a2dpbuf8cnt; /* Ant request with a2dp buffercnt 8 */
+ uint16 antgrant_lt10ms; /* Ant grant duration cnt 0~10ms */
+ uint16 antgrant_lt30ms; /* Ant grant duration cnt 10~30ms */
+ uint16 antgrant_lt60ms; /* Ant grant duration cnt 30~60ms */
+ uint16 antgrant_ge60ms; /* Ant grant duration cnt 60~ms */
+} wlc_btc_stats_v4_t;
+
+#define BTCX_STATS_VER_3 3
+
+typedef struct wlc_btc_stats_v3 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* Size of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated. */
+ uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
+ uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
+ uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
+ uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
+ uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
+ uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
+ uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
+ uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
+ uint16 rsvd; /* pad to align struct to 32bit bndry */
+ uint16 bt_succ_pm_protect_cnt; /* successful PM protection */
+ uint16 bt_succ_cts_cnt; /* successful CTS2A protection */
+ uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */
+ uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */
+ uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */
+ uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */
+ uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */
+ uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */
+ uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */
+ uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */
+ uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */
+ uint8 pad; /* Padding */
+ uint8 slice_index; /* Slice to report */
+} wlc_btc_stats_v3_t;
+
+#define BTCX_STATS_VER_2 2
+
+typedef struct wlc_btc_stats_v2 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* Size of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated. */
+ uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
+ uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
+ uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
+ uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
+ uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
+ uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
+ uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
+ uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
+ uint16 rsvd; /* pad to align struct to 32bit bndry */
+} wlc_btc_stats_v2_t;
+
+/* Durations for each bt task in millisecond */
+#define WL_BTCX_DURSTATS_VER_1 (1u)
+typedef struct wlc_btcx_durstats_v1 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* validity of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated */
+ uint16 bt_acl_dur; /* acl duration in ms */
+ uint16 bt_sco_dur; /* sco duration in ms */
+ uint16 bt_esco_dur; /* esco duration in ms */
+ uint16 bt_a2dp_dur; /* a2dp duration in ms */
+ uint16 bt_sniff_dur; /* sniff duration in ms */
+ uint16 bt_pscan_dur; /* page scan duration in ms */
+ uint16 bt_iscan_dur; /* inquiry scan duration in ms */
+ uint16 bt_page_dur; /* paging duration in ms */
+ uint16 bt_inquiry_dur; /* inquiry duration in ms */
+ uint16 bt_mss_dur; /* mss duration in ms */
+ uint16 bt_park_dur; /* park duration in ms */
+ uint16 bt_rssiscan_dur; /* rssiscan duration in ms */
+ uint16 bt_iscan_sco_dur; /* inquiry scan sco duration in ms */
+ uint16 bt_pscan_sco_dur; /* page scan sco duration in ms */
+ uint16 bt_tpoll_dur; /* tpoll duration in ms */
+ uint16 bt_sacq_dur; /* sacq duration in ms */
+ uint16 bt_sdata_dur; /* sdata duration in ms */
+ uint16 bt_rs_listen_dur; /* rs listen duration in ms */
+ uint16 bt_rs_burst_dur; /* rs brust duration in ms */
+ uint16 bt_ble_adv_dur; /* ble adv duration in ms */
+ uint16 bt_ble_scan_dur; /* ble scan duration in ms */
+ uint16 bt_ble_init_dur; /* ble init duration in ms */
+ uint16 bt_ble_conn_dur; /* ble connection duration in ms */
+ uint16 bt_task_lmp_dur; /* lmp duration in ms */
+ uint16 bt_esco_retran_dur; /* esco retransmission duration in ms */
+ uint16 bt_task26_dur; /* task26 duration in ms */
+ uint16 bt_task27_dur; /* task27 duration in ms */
+ uint16 bt_task28_dur; /* task28 duration in ms */
+ uint16 bt_task_pred_dur; /* prediction task duration in ms */
+ uint16 bt_multihid_dur; /* multihid duration in ms */
+} wlc_btcx_durstats_v1_t;
+
+#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
+#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64
+
+ /* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER 0x0100
+#define MAX_ASSRTSTR_LEN 64
+
+ typedef struct assert_record {
+ uint32 time;
+ uint8 seq_num;
+ int8 str[MAX_ASSRTSTR_LEN];
+ } assert_record_t;
+
+ typedef struct assertlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ assert_record_t logs[1];
+ } assertlog_results_t;
+
+#define LOGRRC_FIX_LEN 8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+/* BCMWAPI_WAI */
+#define IV_LEN 16 /* same as SMS4_WPI_PN_LEN */
+ struct wapi_sta_msg_t
+ {
+ uint16 msg_type;
+ uint16 datalen;
+ uint8 vap_mac[6];
+ uint8 reserve_data1[2];
+ uint8 sta_mac[6];
+ uint8 reserve_data2[2];
+ uint8 gsn[IV_LEN];
+ uint8 wie[TLV_BODY_LEN_MAX + TLV_HDR_LEN]; /* 257 */
+ uint8 pad[3]; /* padding for alignment */
+ };
+/* #endif BCMWAPI_WAI */
+ /* chanim acs record */
+ typedef struct {
+ uint8 valid;
+ uint8 trigger;
+ chanspec_t selected_chspc;
+ int8 bgnoise;
+ uint32 glitch_cnt;
+ uint8 ccastats;
+ uint8 chan_idle;
+ uint32 timestamp;
+ } chanim_acs_record_t;
+
+ typedef struct {
+ chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+ uint8 count;
+ uint32 timestamp;
+ } wl_acs_record_t;
+
+#define WL_CHANIM_STATS_V2 2
+#define CCASTATS_V2_MAX 9
+typedef struct chanim_stats_v2 {
+ uint32 glitchcnt; /**< normalized as per second count */
+ uint32 badplcp; /**< normalized as per second count */
+ uint8 ccastats[CCASTATS_V2_MAX]; /**< normalized as 0-255 */
+ int8 bgnoise; /**< background noise level (in dBm) */
+ chanspec_t chanspec; /**< ctrl chanspec of the interface */
+ uint32 timestamp; /**< time stamp at which the stats are collected */
+ uint32 bphy_glitchcnt; /**< normalized as per second count */
+ uint32 bphy_badplcp; /**< normalized as per second count */
+ uint8 chan_idle; /**< normalized as 0~255 */
+ uint8 PAD[3];
+} chanim_stats_v2_t;
+
+typedef struct chanim_stats {
+ uint32 glitchcnt; /**< normalized as per second count */
+ uint32 badplcp; /**< normalized as per second count */
+ uint8 ccastats[CCASTATS_MAX]; /**< normalized as 0-255 */
+ int8 bgnoise; /**< background noise level (in dBm) */
+ uint8 pad_1[11 - CCASTATS_MAX];
+ chanspec_t chanspec; /**< ctrl chanspec of the interface */
+ uint8 pad_2[2];
+ uint32 timestamp; /**< time stamp at which the stats are collected */
+ uint32 bphy_glitchcnt; /**< normalized as per second count */
+ uint32 bphy_badplcp; /**< normalized as per second count */
+ uint8 chan_idle; /**< normalized as 0~255 */
+ uint8 PAD[3];
+} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 3
+typedef struct {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_SC_CHANIM_STATS_V1 1u
+/* sc chanim interface stats */
+typedef struct sc_chanim_stats_v1 {
+ uint32 stats_ms; /* duraion for which stats are collected, in ms */
+ chanspec_t chanspec;
+ uint16 PAD;
+ uint32 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint32 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint32 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint32 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint32 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint32 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint32 sc_rx_btsc_rx_dur; /* BT sc and wlan SC rx is active, in ms */
+} sc_chanim_stats_v1_t;
+
+typedef struct {
+ uint32 version;
+ uint32 length;
+ uint8 flags; /* flags: to print the stats,
+ * WL_CHANIM_COUNT_ONE ==> Query stats for Home channel,
+ * WL_CHANIM_COUNT_ALL ==> Query stats for all channels
+ */
+ uint8 id; /* Module id, to know which module has sent the stats
+ * SC_CHANIM_ID_SCAN ==> For SCAN
+ * SC_CHANIM_ID_STA ==> For STA
+ */
+ uint8 count; /* o/p: Count of channels for which stats needs to be displayed.
+ * This value is number of channels supported in particular locale when
+ * flags is WL_CHANIM_COUNT_ALL, one when flag is
+ * WL_CHANIM_COUNT_ONE
+ */
+ uint8 PAD;
+ sc_chanim_stats_v1_t sc_stats[1];
+} wl_chanim_sc_stats_v1_t;
+
+/* WL_CHANIM_SC_STATS_FIXED_LEN: when we change size of any field above sc_stats, this macro
+ * needs versioning. At present it uses offset of v1, which is same for all versions so far
+ */
+#define WL_CHANIM_SC_STATS_FIXED_LEN OFFSETOF(wl_chanim_sc_stats_v1_t, sc_stats)
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+#define WL_SC_CHANIM_STATS_V2 2u
+/* sc chanim interface stats */
+typedef struct sc_chanim_stats_v2 {
+ uint32 stats_ms; /* duraion for which stats are collected, in ms */
+ chanspec_t chanspec;
+ uint16 PAD;
+ uint32 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint32 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint32 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint32 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint32 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint32 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint32 sc_btle_overlap_dur; /* wlsc was awake and btsc le scan overlapped, in ms */
+ uint32 sc_btpage_overlap_dur; /* wlsc was awake and btsc page scan overlapped, in ms */
+ uint32 sc_btle_blnk_dur; /* wlauxtx blanked btsc le scan, in ms */
+ uint32 sc_btpage_blnk_dur; /* wlauxtx blanked btsc page scan, in ms */
+} sc_chanim_stats_v2_t;
+
+typedef struct {
+ uint32 version;
+ uint32 length;
+ uint8 flags; /* flags: to print the stats,
+ * WL_CHANIM_COUNT_ONE ==> Query stats for Home channel,
+ * WL_CHANIM_COUNT_ALL ==> Query stats for all channels
+ */
+ uint8 id; /* Module id, to know which module has sent the stats
+ * SC_CHANIM_ID_SCAN ==> For SCAN
+ * SC_CHANIM_ID_STA ==> For STA
+ */
+ uint8 count; /* o/p: Count of channels for which stats needs to be displayed.
+ * This value is number of channels supported in particular locale when
+ * flags is WL_CHANIM_COUNT_ALL, one when flag is
+ * WL_CHANIM_COUNT_ONE
+ */
+ uint8 PAD;
+ sc_chanim_stats_v2_t sc_stats[1];
+} wl_chanim_sc_stats_v2_t;
+
+#define WL_SC_CHANIM_STATS_V3 3u
+/* sc chanim interface stats */
+typedef struct sc_chanim_stats_v3 {
+ uint32 stats_ms; /* duraion for which stats are collected, in ms */
+ chanspec_t chanspec;
+ uint16 PAD;
+ uint32 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint32 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint32 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint32 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint32 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint32 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint32 sc_btle_overlap_dur; /* wlsc was awake and btsc le scan overlapped, in ms */
+ uint32 sc_btpage_overlap_dur; /* wlsc was awake and btsc page scan overlapped, in ms */
+ uint32 sc_btle_blnk_dur; /* wlauxtx blanked btsc le scan, in ms */
+ uint32 sc_btpage_blnk_dur; /* wlauxtx blanked btsc page scan, in ms */
+ uint32 ac_btle_overlap_dur; /* wlaux was awake and btsc le scan overlapped, in ms */
+ uint32 ac_btpage_overlap_dur; /* wlaux was awake and btsc page scan overlapped, in ms */
+} sc_chanim_stats_v3_t;
+
+typedef struct {
+ uint32 version;
+ uint32 length;
+ uint8 flags; /* flags: to print the stats,
+ * WL_CHANIM_COUNT_ONE ==> Query stats for Home channel,
+ * WL_CHANIM_COUNT_ALL ==> Query stats for all channels
+ */
+ uint8 id; /* Module id, to know which module has sent the stats
+ * SC_CHANIM_ID_SCAN ==> For SCAN
+ * SC_CHANIM_ID_STA ==> For STA
+ */
+ uint8 count; /* o/p: Count of channels for which stats needs to be displayed.
+ * This value is number of channels supported in particular locale when
+ * flags is WL_CHANIM_COUNT_ALL, one when flag is
+ * WL_CHANIM_COUNT_ONE
+ */
+ uint8 PAD;
+ sc_chanim_stats_v3_t sc_stats[1];
+} wl_chanim_sc_stats_v3_t;
+
+#define WL_SC_CHANIM_STATS_V4 4u
+/* sc chanim interface stats */
+typedef struct sc_chanim_stats_v4 {
+ uint32 stats_ms; /* duraion for which stats are collected, in ms */
+ chanspec_t chanspec;
+ uint16 PAD;
+ uint32 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint32 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint32 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint32 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint32 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint32 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint32 sc_btle_overlap_dur; /* wlsc was awake and btsc le scan overlapped, in ms */
+ uint32 sc_btpage_overlap_dur; /* wlsc was awake and btsc page scan overlapped, in ms */
+ uint32 ac_btle_blnk_dur; /* wlauxtx blanked btsc le scan, in ms */
+ uint32 ac_btpage_blnk_dur; /* wlauxtx blanked btsc page scan, in ms */
+ uint32 ac_btle_overlap_dur; /* wlaux was awake and btsc le scan overlapped, in ms */
+ uint32 ac_btpage_overlap_dur; /* wlaux was awake and btsc page scan overlapped, in ms */
+ uint32 timestamp; /* Time when stats last updated */
+} sc_chanim_stats_v4_t;
+
+typedef struct {
+ uint32 version;
+ uint32 length;
+ uint8 flags; /* flags: to print the stats,
+ * WL_CHANIM_COUNT_ONE ==> Query stats for Home channel,
+ * WL_CHANIM_COUNT_ALL ==> Query stats for all channels
+ */
+ uint8 id; /* Module id, to know which module has sent the stats
+ * SC_CHANIM_ID_SCAN ==> For SCAN
+ * SC_CHANIM_ID_STA ==> For STA
+ */
+ uint8 count; /* o/p: Count of channels for which stats needs to be displayed.
+ * This value is number of channels supported in particular locale when
+ * flags is WL_CHANIM_COUNT_ALL, one when flag is
+ * WL_CHANIM_COUNT_ONE
+ */
+ uint8 PAD;
+ sc_chanim_stats_v4_t sc_stats[1];
+} wl_chanim_sc_stats_v4_t;
+
+#define WL_SC_CHANIM_STATS_V5 5u
+/* sc chanim interface stats */
+typedef struct sc_chanim_stats_v5 {
+ uint32 stats_ms; /* duraion for which stats are collected, in ms */
+ chanspec_t chanspec;
+ uint16 sc_btrx_trans_cnt; /* BT RX transitions */
+ uint32 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint32 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint32 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint32 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint32 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint32 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint32 sc_btle_overlap_dur; /* wlsc was awake and btsc le scan overlapped, in ms */
+ uint32 sc_btpage_overlap_dur; /* wlsc was awake and btsc page scan overlapped, in ms */
+ uint32 ac_btle_blnk_dur; /* wlauxtx blanked btsc le scan, in ms */
+ uint32 ac_btpage_blnk_dur; /* wlauxtx blanked btsc page scan, in ms */
+ uint32 ac_btle_overlap_dur; /* wlaux was awake and btsc le scan overlapped, in ms */
+ uint32 ac_btpage_overlap_dur; /* wlaux was awake and btsc page scan overlapped, in ms */
+ uint32 timestamp; /* Time when stats last updated */
+} sc_chanim_stats_v5_t;
+
+typedef struct {
+ uint32 version;
+ uint32 length;
+ uint8 flags; /* flags: to print the stats,
+ * WL_CHANIM_COUNT_ONE ==> Query stats for Home channel,
+ * WL_CHANIM_COUNT_ALL ==> Query stats for all channels
+ */
+ uint8 id; /* Module id, to know which module has sent the stats
+ * SC_CHANIM_ID_SCAN ==> For SCAN
+ * SC_CHANIM_ID_STA ==> For STA
+ */
+ uint8 count; /* o/p: Count of channels for which stats needs to be displayed.
+ * This value is number of channels supported in particular locale when
+ * flags is WL_CHANIM_COUNT_ALL, one when flag is
+ * WL_CHANIM_COUNT_ONE
+ */
+ uint8 PAD;
+ sc_chanim_stats_v5_t sc_stats[1];
+} wl_chanim_sc_stats_v5_t;
+
+/* sc_chanim periodic ecounters structs for WL_IFSTATS_XTLV_SC_CHANIM_PERIODIC_STATS
+ * [similar to wl_chanim_sc_stats_vX_t, but constrained in size due to its known periodicity
+ * of reporting]
+ */
+#define WLC_SC_CHANIM_PERIODIC_STATS_V2 (2u)
+#define WLC_SC_STATS_MAX_BANDS_V1 2u
+
+typedef struct sc_chanim_stats_perband_periodic_v2 {
+ uint8 pad[3];
+ uint8 band_id; /* band for which stats reported; 0:5G,1:2G */
+ uint16 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint16 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint16 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint16 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint16 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint16 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint16 sc_btle_overlap_dur; /* wlsc was awake and btsc le scan overlapped, in ms */
+ uint16 sc_btpage_overlap_dur; /* wlsc was awake and btsc page scan overlapped, in ms */
+ uint16 sc_btle_blnk_dur; /* wlauxtx blanked btsc le scan, in ms */
+ uint16 sc_btpage_blnk_dur; /* wlauxtx blanked btsc page scan, in ms */
+} sc_chanim_stats_perband_periodic_v2_t;
+
+typedef struct wl_chanim_sc_periodic_stats_v2 {
+ uint16 ver;
+ uint16 len;
+ uint8 pad[3];
+ uint8 count; /* number of sc_stats below */
+ /* per band accum stats */
+ sc_chanim_stats_perband_periodic_v2_t sc_stats[WLC_SC_STATS_MAX_BANDS_V1];
+} wl_chanim_sc_periodic_stats_v2_t;
+
+#define WLC_SC_CHANIM_PERIODIC_STATS_V3 (3u)
+
+typedef struct sc_chanim_stats_perband_periodic_v3 {
+ uint8 pad[3];
+ uint8 band_id; /* band for which stats reported; 0:5G,1:2G */
+ uint16 sc_only_rx_dur; /* rx only on sc, in ms */
+ uint16 sc_rx_mc_rx_dur; /* Rx on SC when MC is active, in ms */
+ uint16 sc_rx_ac_rx_dur; /* Rx on SC when AC is active, in ms */
+ uint16 sc_rx_mc_tx_dur; /* sc rx with MC tx, in ms */
+ uint16 sc_rx_ac_bt_tx_dur; /* sc rx with AC-BT tx, in ms */
+ uint16 sc_rx_bt_rx_dur; /* sc rx when BT Main is active, in ms */
+ uint16 sc_btle_overlap_dur; /* wlsc was awake and btsc le scan overlapped, in ms */
+ uint16 sc_btpage_overlap_dur; /* wlsc was awake and btsc page scan overlapped, in ms */
+ uint16 sc_btle_blnk_dur; /* wlauxtx blanked btsc le scan, in ms */
+ uint16 sc_btpage_blnk_dur; /* wlauxtx blanked btsc page scan, in ms */
+ uint16 ac_btle_overlap_dur; /* wlaux was awake and btsc le scan overlapped, in ms */
+ uint16 ac_btpage_overlap_dur; /* wlaux was awake and btsc page scan overlapped, in ms */
+} sc_chanim_stats_perband_periodic_v3_t;
+
+typedef struct wl_chanim_sc_periodic_stats_v3 {
+ uint16 ver;
+ uint16 len;
+ uint8 pad[3];
+ uint8 count; /* number of sc_stats below */
+ /* per band accum stats */
+ sc_chanim_stats_perband_periodic_v3_t sc_stats[WLC_SC_STATS_MAX_BANDS_V1];
+} wl_chanim_sc_periodic_stats_v3_t;
+
+/** Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE 0x1
+
+/** scb probe parameter */
+typedef struct {
+ uint32 scb_timeout;
+ uint32 scb_activity_time;
+ uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/** selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+ uint32 count;
+ uint16 code; /**< SC or RC code */
+ uint8 PAD[2];
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+ uint32 version;
+ uint16 length; /**< reserved for future usage */
+ uint8 type;
+ uint8 codetype;
+ uint32 ignored_cnt;
+ uint32 malformed_cnt;
+ uint32 count_total; /**< count included the interested group */
+ wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+ SMFS_CODETYPE_SC,
+ SMFS_CODETYPE_RC
+};
+
+typedef enum smfs_type {
+ SMFS_TYPE_AUTH,
+ SMFS_TYPE_ASSOC,
+ SMFS_TYPE_REASSOC,
+ SMFS_TYPE_DISASSOC_TX,
+ SMFS_TYPE_DISASSOC_RX,
+ SMFS_TYPE_DEAUTH_TX,
+ SMFS_TYPE_DEAUTH_RX,
+ SMFS_TYPE_MAX
+} smfs_type_t;
+
+/* #ifdef PHYMON */
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+ /* Tx IQ/LO calibration coeffs */
+ int16 tx_iqlocal_a;
+ int16 tx_iqlocal_b;
+ int8 tx_iqlocal_ci;
+ int8 tx_iqlocal_cq;
+ int8 tx_iqlocal_di;
+ int8 tx_iqlocal_dq;
+ int8 tx_iqlocal_ei;
+ int8 tx_iqlocal_eq;
+ int8 tx_iqlocal_fi;
+ int8 tx_iqlocal_fq;
+
+ /** Rx IQ calibration coeffs */
+ int16 rx_iqcal_a;
+ int16 rx_iqcal_b;
+
+ uint8 tx_iqlocal_pwridx; /**< Tx Power Index for Tx IQ/LO calibration */
+ uint8 PAD[3];
+ uint32 papd_epsilon_table[64]; /**< PAPD epsilon table */
+ int16 papd_epsilon_offset; /**< PAPD epsilon offset */
+ uint8 curr_tx_pwrindex; /**< Tx power index */
+ int8 idle_tssi; /**< Idle TSSI */
+ int8 est_tx_pwr; /**< Estimated Tx Power (dB) */
+ int8 est_rx_pwr; /**< Estimated Rx Power (dB) from RSSI */
+ uint16 rx_gaininfo; /**< Rx gain applied on last Rx pkt */
+ uint16 init_gaincode; /**< initgain required for ACI */
+ int8 estirr_tx;
+ int8 estirr_rx;
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+ int32 version;
+ int8 num_phy_cores; /**< number of cores */
+ int8 curr_temperature; /**< on-chip temperature sensor reading */
+ chanspec_t chspec; /**< channspec for this state */
+ uint8 aci_state; /**< ACI state: ON/OFF */
+ uint8 PAD;
+ uint16 crsminpower; /**< crsminpower required for ACI */
+ uint16 crsminpowerl; /**< crsminpowerl required for ACI */
+ uint16 crsminpoweru; /**< crsminpoweru required for ACI */
+ wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+/* endif PHYMON */
+
+/** discovery state */
+typedef struct wl_p2p_disc_st {
+ uint8 state; /**< see state */
+ uint8 PAD;
+ chanspec_t chspec; /**< valid in listen state */
+ uint16 dwell; /**< valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/** scan request */
+typedef struct wl_p2p_scan {
+ uint8 type; /**< 'S' for WLC_SCAN, 'E' for "escan" */
+ uint8 reserved[3];
+ /* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/** i/f request */
+typedef struct wl_p2p_if {
+ struct ether_addr addr;
+ uint8 type; /**< see i/f type */
+ uint8 PAD;
+ chanspec_t chspec; /**< for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/** i/f query */
+typedef struct wl_p2p_ifq {
+ uint32 bsscfgidx;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/** OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+ uint8 ops; /**< 0: disable 1: enable */
+ uint8 ctw; /**< >= 10 */
+} wl_p2p_ops_t;
+
+/** absence and presence request */
+typedef struct wl_p2p_sched_desc {
+ uint32 start;
+ uint32 interval;
+ uint32 duration;
+ uint32 count; /**< see count */
+} wl_p2p_sched_desc_t;
+
+typedef struct wl_p2p_sched {
+ uint8 type; /**< see schedule type */
+ uint8 action; /**< see schedule action */
+ uint8 option; /**< see schedule option */
+ uint8 PAD;
+ wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+
+typedef struct wl_p2p_wfds_hash {
+ uint32 advt_id;
+ uint16 nw_cfg_method;
+ uint8 wfds_hash[6];
+ uint8 name_len;
+ uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+ uint8 PAD[3];
+} wl_p2p_wfds_hash_t;
+
+typedef struct wl_p2p_config_params {
+ uint16 enable; /**< 0: disable 1: enable */
+ uint16 chanspec; /* GO chanspec */
+ wlc_ssid_t ssid; /* SSID */
+} wl_p2p_config_params_t;
+
+typedef struct wl_bcmdcs_data {
+ uint32 reason;
+ chanspec_t chspec;
+ uint8 PAD[2];
+} wl_bcmdcs_data_t;
+/* ifdef EXT_STA */
+/**
+ * Format of IHV data passed to OID_DOT11_NIC_SPECIFIC_EXTENSION.
+ */
+typedef struct _IHV_NIC_SPECIFIC_EXTENSION {
+ uint8 oui[4]; /**< vendor specific OUI value */
+ uint32 event; /**< event code */
+ uint8 ihvData[1]; /**< ihv data */
+} IHV_NIC_SPECIFIC_EXTENSION, *PIHV_NIC_SPECIFIC_EXTENSION;
+#define IHV_NIC_SPECIFIC_EXTENTION_HEADER OFFSETOF(IHV_NIC_SPECIFIC_EXTENSION, ihvData[0])
+/* EXT_STA */
+/** NAT configuration */
+typedef struct {
+ uint32 ipaddr; /**< interface ip address */
+ uint32 ipaddr_mask; /**< interface ip address mask */
+ uint32 ipaddr_gateway; /**< gateway ip address */
+ uint8 mac_gateway[6]; /**< gateway mac address */
+ uint8 PAD[2];
+ uint32 ipaddr_dns; /**< DNS server ip address, valid only for public if */
+ uint8 mac_dns[6]; /**< DNS server mac address, valid only for public if */
+ uint8 GUID[38]; /**< interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+ uint32 op; /**< operation code */
+ uint8 pub_if; /**< set for public if, clear for private if */
+ uint8 PAD[3];
+ nat_if_info_t if_info; /**< interface info */
+} nat_cfg_t;
+
+typedef struct {
+ int32 state; /**< NAT state returned */
+} nat_state_t;
+
+typedef struct flush_txfifo {
+ uint32 txfifobmp;
+ uint32 hwtxfifoflush;
+ struct ether_addr ea;
+ uint8 PAD[2];
+} flush_txfifo_t;
+
+enum {
+ SPATIAL_MODE_2G_IDX = 0,
+ SPATIAL_MODE_5G_LOW_IDX,
+ SPATIAL_MODE_5G_MID_IDX,
+ SPATIAL_MODE_5G_HIGH_IDX,
+ SPATIAL_MODE_5G_UPPER_IDX,
+ SPATIAL_MODE_MAX_IDX
+};
+
+#define WLC_TXCORE_MAX 4 /**< max number of txcore supports */
+#define WLC_TXCORE_MAX_OLD 2 /**< backward compatibilty for TXCAL */
+#define WLC_SUBBAND_MAX 4 /**< max number of sub-band supports */
+typedef struct {
+ uint8 band2g[WLC_TXCORE_MAX];
+ uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+} sar_limit_t;
+
+#define MAX_NUM_TXCAL_MEAS 128
+#define MAX_NUM_PWR_STEP 40
+#define TXCAL_IOVAR_VERSION 0x1
+
+#define TXCAL_GAINSWEEP_VER (TXCAL_GAINSWEEP_VERSION_V2)
+#define TXCAL_GAINSWEEP_VERSION_V2 2
+
+/* Below macro defines the latest txcal iovar version updated */
+/* This macro also reflects in the 'txcal_ver' iovar */
+#define TXCAL_IOVAR_LATEST TXCAL_GAINSWEEP_VER
+
+/* below are used for bphy/ofdm separated LSC */
+#define TXCAL_PWR_BPHY 0
+#define TXCAL_PWR_OFDM 1
+
+typedef struct wl_txcal_meas_percore {
+ uint16 tssi[MAX_NUM_TXCAL_MEAS];
+ int16 pwr[MAX_NUM_TXCAL_MEAS];
+} wl_txcal_meas_percore_t;
+
+typedef struct wl_txcal_meas_ncore {
+ uint16 version;
+ uint8 valid_cnt;
+ uint8 num_core;
+ wl_txcal_meas_percore_t txcal_percore[1];
+} wl_txcal_meas_ncore_t;
+
+typedef struct wl_txcal_power_tssi_percore {
+ int16 tempsense;
+ int16 pwr_start;
+ uint8 pwr_start_idx;
+ uint8 num_entries;
+ uint16 pad;
+ uint8 tssi[MAX_NUM_PWR_STEP];
+} wl_txcal_power_tssi_percore_t;
+
+typedef struct wl_txcal_power_tssi_ncore {
+ uint16 version;
+ uint8 set_core;
+ uint8 channel;
+ uint8 num_core;
+ uint8 gen_tbl;
+ uint8 ofdm;
+ uint8 pad;
+ wl_txcal_power_tssi_percore_t tssi_percore[WLC_TXCORE_MAX];
+} wl_txcal_power_tssi_ncore_t;
+
+typedef struct wl_txcal_meas {
+ uint16 tssi[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS];
+ int16 pwr[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS];
+ uint8 valid_cnt;
+ uint8 PAD;
+} wl_txcal_meas_t;
+
+typedef struct wl_txcal_meas_old {
+ uint16 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS];
+ int16 pwr[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS];
+ uint8 valid_cnt;
+ uint8 PAD;
+} wl_txcal_meas_old_t;
+
+typedef struct wl_txcal_power_tssi {
+ uint8 set_core;
+ uint8 channel;
+ int16 tempsense[WLC_TXCORE_MAX];
+ int16 pwr_start[WLC_TXCORE_MAX];
+ uint8 pwr_start_idx[WLC_TXCORE_MAX];
+ uint8 num_entries[WLC_TXCORE_MAX];
+ uint8 tssi[WLC_TXCORE_MAX][MAX_NUM_PWR_STEP];
+ uint8 gen_tbl;
+ uint8 ofdm;
+} wl_txcal_power_tssi_t;
+
+typedef struct wl_txcal_power_tssi_old {
+ uint8 set_core;
+ uint8 channel;
+ int16 tempsense[WLC_TXCORE_MAX_OLD];
+ int16 pwr_start[WLC_TXCORE_MAX_OLD];
+ uint8 pwr_start_idx[WLC_TXCORE_MAX_OLD];
+ uint8 num_entries[WLC_TXCORE_MAX_OLD];
+ uint8 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_PWR_STEP];
+ uint8 gen_tbl;
+ uint8 ofdm;
+} wl_txcal_power_tssi_old_t;
+
+typedef struct wl_olpc_pwr {
+ uint16 version;
+ uint8 core;
+ uint8 channel;
+ int16 tempsense;
+ uint8 olpc_idx;
+ uint8 ofdm;
+} wl_olpc_pwr_t;
+
+typedef struct wl_rfem_temp_vdet_temp {
+ uint8 vdet_fem_t1;
+ int8 rfem_temp_t1;
+ uint8 vdet_fem_t2;
+ int8 rfem_temp_t2;
+} wl_rfem_temp_vdet_temp_t;
+
+typedef struct wl_rfem_temp_vin_tssi {
+ uint16 vin_chip_v1;
+ int16 tssi_chip_v1;
+ uint16 vin_chip_v2;
+ int16 tssi_chip_v2;
+} wl_rfem_temp_vin_tssi_t;
+
+typedef struct wl_txcal_tempsense {
+ uint16 version;
+ uint8 valid_cnt;
+ uint8 core;
+ int16 ref_temperature;
+ int16 meas_temperature;
+ wl_rfem_temp_vdet_temp_t vdet_temp;
+ wl_rfem_temp_vin_tssi_t vin_tssi;
+} wl_txcal_tempsense_t;
+
+/** IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+ int32 num; /**< Number of memory pools */
+ bcm_mp_stats_t s[1]; /**< Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+typedef struct {
+ uint32 ipaddr;
+ uint32 ipaddr_netmask;
+ uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/* Both powersel_params and lpc_params are used by IOVAR lpc_params.
+ * The powersel_params is replaced by lpc_params in later WLC versions.
+ */
+typedef struct powersel_params {
+ /* LPC Params exposed via IOVAR */
+ int32 tp_ratio_thresh; /**< Throughput ratio threshold */
+ uint8 rate_stab_thresh; /**< Thresh for rate stability based on nupd */
+ uint8 pwr_stab_thresh; /**< Number of successes before power step down */
+ uint8 pwr_sel_exp_time; /**< Time lapse for expiry of database */
+ uint8 PAD;
+} powersel_params_t;
+
+#define WL_LPC_PARAMS_VER_2 2
+#define WL_LPC_PARAMS_CURRENT_VERSION WL_LPC_PARAMS_VER_2
+
+typedef struct lpc_params {
+ uint16 version;
+ uint16 length;
+ /* LPC Params exposed via IOVAR */
+ uint8 rate_stab_thresh; /**< Thresh for rate stability based on nupd */
+ uint8 pwr_stab_thresh; /**< Number of successes before power step down */
+ uint8 lpc_exp_time; /**< Time lapse for expiry of database */
+ uint8 pwrup_slow_step; /**< Step size for slow step up */
+ uint8 pwrup_fast_step; /**< Step size for fast step up */
+ uint8 pwrdn_slow_step; /**< Step size for slow step down */
+} lpc_params_t;
+
+/* tx pkt delay statistics */
+#define SCB_RETRY_SHORT_DEF 7 /**< Default Short retry Limit */
+#define WLPKTDLY_HIST_NBINS 16 /**< number of bins used in the Delay histogram */
+
+/** structure to store per-AC delay statistics */
+typedef struct scb_delay_stats {
+ uint32 txmpdu_lost; /**< number of MPDUs lost */
+ uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /**< retry times histogram */
+ uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /**< cumulative packet latency */
+ uint32 delay_min; /**< minimum packet latency observed */
+ uint32 delay_max; /**< maximum packet latency observed */
+ uint32 delay_avg; /**< packet latency average */
+ uint32 delay_hist[WLPKTDLY_HIST_NBINS]; /**< delay histogram */
+ uint32 delay_count; /**< minimum number of time period units before
+ consequent packet delay events can be generated
+ */
+ uint32 prev_txmpdu_cnt; /**< Previous value of txmpdu_cnt[] during last iteration */
+ uint32 prev_delay_sum; /**< Previous value of delay_sum[] during last iteration */
+} scb_delay_stats_t;
+
+/** structure for txdelay event */
+typedef struct txdelay_event {
+ uint8 status;
+ uint8 PAD[3];
+ int32 rssi;
+ chanim_stats_t chanim_stats;
+ scb_delay_stats_t delay_stats[AC_COUNT];
+} txdelay_event_t;
+
+/** structure for txdelay parameters */
+typedef struct txdelay_params {
+ uint16 ratio; /**< Avg Txdelay Delta */
+ uint8 cnt; /**< Sample cnt */
+ uint8 period; /**< Sample period */
+ uint8 tune; /**< Debug */
+ uint8 PAD;
+} txdelay_params_t;
+#define MAX_TXDELAY_STATS_SCBS 6
+#define TXDELAY_STATS_VERSION 1
+
+enum {
+ TXDELAY_STATS_PARTIAL_RESULT = 0,
+ TXDELAY_STATS_FULL_RESULT = 1
+};
+
+typedef struct scb_total_delay_stats {
+ struct ether_addr ea;
+ uint8 pad[2];
+ scb_delay_stats_t dlystats[AC_COUNT];
+} scb_total_delay_stats_t;
+
+typedef struct txdelay_stats {
+ uint32 version;
+ uint32 full_result; /* 0:Partial, 1:full */
+ uint32 scb_cnt; /* in:requested, out:returned */
+ scb_total_delay_stats_t scb_delay_stats[1];
+} txdelay_stats_t;
+
+#define WL_TXDELAY_STATS_FIXED_SIZE \
+ (sizeof(txdelay_stats_t)+(MAX_TXDELAY_STATS_SCBS-1)*sizeof(scb_total_delay_stats_t))
+enum {
+ WNM_SERVICE_DMS = 1,
+ WNM_SERVICE_FMS = 2,
+ WNM_SERVICE_TFS = 3
+};
+
+/** Definitions for WNM/NPS TCLAS */
+typedef struct wl_tclas {
+ uint8 user_priority;
+ uint8 fc_len;
+ dot11_tclas_fc_t fc;
+} wl_tclas_t;
+
+#define WL_TCLAS_FIXED_SIZE OFFSETOF(wl_tclas_t, fc)
+
+typedef struct wl_tclas_list {
+ uint32 num;
+ wl_tclas_t tclas[];
+} wl_tclas_list_t;
+
+/** Definitions for WNM/NPS Traffic Filter Service */
+typedef struct wl_tfs_req {
+ uint8 tfs_id;
+ uint8 tfs_actcode;
+ uint8 tfs_subelem_id;
+ uint8 send;
+} wl_tfs_req_t;
+
+typedef struct wl_tfs_filter {
+ uint8 status; /**< Status returned by the AP */
+ uint8 tclas_proc; /**< TCLAS processing value (0:and, 1:or) */
+ uint8 tclas_cnt; /**< count of all wl_tclas_t in tclas array */
+ uint8 tclas[1]; /**< VLA of wl_tclas_t */
+} wl_tfs_filter_t;
+#define WL_TFS_FILTER_FIXED_SIZE OFFSETOF(wl_tfs_filter_t, tclas)
+
+typedef struct wl_tfs_fset {
+ struct ether_addr ea; /**< Address of AP/STA involved with this filter set */
+ uint8 tfs_id; /**< TFS ID field chosen by STA host */
+ uint8 status; /**< Internal status TFS_STATUS_xxx */
+ uint8 actcode; /**< Action code DOT11_TFS_ACTCODE_xxx */
+ uint8 token; /**< Token used in last request frame */
+ uint8 notify; /**< Notify frame sent/received because of this set */
+ uint8 filter_cnt; /**< count of all wl_tfs_filter_t in filter array */
+ uint8 filter[1]; /**< VLA of wl_tfs_filter_t */
+} wl_tfs_fset_t;
+#define WL_TFS_FSET_FIXED_SIZE OFFSETOF(wl_tfs_fset_t, filter)
+
+enum {
+ TFS_STATUS_DISABLED = 0, /**< TFS filter set disabled by user */
+ TFS_STATUS_DISABLING = 1, /**< Empty request just sent to AP */
+ TFS_STATUS_VALIDATED = 2, /**< Filter set validated by AP (but maybe not enabled!) */
+ TFS_STATUS_VALIDATING = 3, /**< Filter set just sent to AP */
+ TFS_STATUS_NOT_ASSOC = 4, /**< STA not associated */
+ TFS_STATUS_NOT_SUPPORT = 5, /**< TFS not supported by AP */
+ TFS_STATUS_DENIED = 6, /**< Filter set refused by AP (=> all sets are disabled!) */
+};
+
+typedef struct wl_tfs_status {
+ uint8 fset_cnt; /**< count of all wl_tfs_fset_t in fset array */
+ wl_tfs_fset_t fset[1]; /**< VLA of wl_tfs_fset_t */
+} wl_tfs_status_t;
+
+typedef struct wl_tfs_set {
+ uint8 send; /**< Immediatly register registered sets on AP side */
+ uint8 tfs_id; /**< ID of a specific set (existing or new), or nul for all */
+ uint8 actcode; /**< Action code for this filter set */
+ uint8 tclas_proc; /**< TCLAS processing operator for this filter set */
+} wl_tfs_set_t;
+
+typedef struct wl_tfs_term {
+ uint8 del; /**< Delete internal set once confirmation received */
+ uint8 tfs_id; /**< ID of a specific set (existing), or nul for all */
+} wl_tfs_term_t;
+
+#define DMS_DEP_PROXY_ARP (1 << 0)
+
+/* Definitions for WNM/NPS Directed Multicast Service */
+enum {
+ DMS_STATUS_DISABLED = 0, /**< DMS desc disabled by user */
+ DMS_STATUS_ACCEPTED = 1, /**< Request accepted by AP */
+ DMS_STATUS_NOT_ASSOC = 2, /**< STA not associated */
+ DMS_STATUS_NOT_SUPPORT = 3, /**< DMS not supported by AP */
+ DMS_STATUS_DENIED = 4, /**< Request denied by AP */
+ DMS_STATUS_TERM = 5, /**< Request terminated by AP */
+ DMS_STATUS_REMOVING = 6, /**< Remove request just sent */
+ DMS_STATUS_ADDING = 7, /**< Add request just sent */
+ DMS_STATUS_ERROR = 8, /**< Non compliant AP behvior */
+ DMS_STATUS_IN_PROGRESS = 9, /**< Request just sent */
+ DMS_STATUS_REQ_MISMATCH = 10, /**< Conditions for sending DMS req not met */
+ DMS_STATUS_TIMEOUT = 11 /**< Request Time out */
+};
+
+typedef struct wl_dms_desc {
+ uint8 user_id;
+ uint8 status;
+ uint8 token;
+ uint8 dms_id;
+ uint8 tclas_proc;
+ uint8 mac_len; /**< length of all ether_addr in data array, 0 if STA */
+ uint8 tclas_len; /**< length of all wl_tclas_t in data array */
+ uint8 data[1]; /**< VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */
+} wl_dms_desc_t;
+
+#define WL_DMS_DESC_FIXED_SIZE OFFSETOF(wl_dms_desc_t, data)
+
+typedef struct wl_dms_status {
+ uint32 cnt;
+ wl_dms_desc_t desc[1];
+} wl_dms_status_t;
+
+typedef struct wl_dms_set {
+ uint8 send;
+ uint8 user_id;
+ uint8 tclas_proc;
+} wl_dms_set_t;
+
+typedef struct wl_dms_term {
+ uint8 del;
+ uint8 user_id;
+} wl_dms_term_t;
+
+typedef struct wl_service_term {
+ uint8 service;
+ union {
+ wl_dms_term_t dms;
+ } u;
+} wl_service_term_t;
+
+/** Definitions for WNM/NPS BSS Transistion */
+#define WL_BSSTRANS_QUERY_VERSION_1 1
+typedef struct wl_bsstrans_query {
+ uint16 version; /* structure version */
+ uint16 pad0; /* padding for 4-byte allignment */
+ wlc_ssid_t ssid; /* SSID of NBR elem to be queried for */
+ uint8 reason; /* Reason code of the BTQ */
+ uint8 pad1[3]; /* padding for 4-byte allignment */
+} wl_bsstrans_query_t;
+
+#define BTM_QUERY_NBR_COUNT_MAX 16
+
+#define WL_BTQ_NBR_LIST_VERSION_1 1
+typedef struct wl_btq_nbr_list {
+ uint16 version; /* structure version */
+ uint8 count; /* No. of BTQ NBRs returned */
+ uint8 pad; /* padding for 4-byte allignment */
+ nbr_rpt_elem_t btq_nbt_elem[]; /* BTQ NBR elem in a BTQ NBR list */
+} wl_btq_nbr_list_t;
+
+typedef struct wl_bsstrans_req {
+ uint16 tbtt; /**< time of BSS to end of life, in unit of TBTT */
+ uint16 dur; /**< time of BSS to keep off, in unit of minute */
+ uint8 reqmode; /**< request mode of BSS transition request */
+ uint8 unicast; /**< request by unicast or by broadcast */
+} wl_bsstrans_req_t;
+
+enum {
+ BSSTRANS_RESP_AUTO = 0, /**< Currently equivalent to ENABLE */
+ BSSTRANS_RESP_DISABLE = 1, /**< Never answer BSS Trans Req frames */
+ BSSTRANS_RESP_ENABLE = 2, /**< Always answer Req frames with preset data */
+ BSSTRANS_RESP_WAIT = 3, /**< Send ind, wait and/or send preset data (NOT IMPL) */
+ BSSTRANS_RESP_IMMEDIATE = 4 /**< After an ind, set data and send resp (NOT IMPL) */
+};
+
+typedef struct wl_bsstrans_resp {
+ uint8 policy;
+ uint8 status;
+ uint8 delay;
+ struct ether_addr target;
+} wl_bsstrans_resp_t;
+
+/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception.
+ * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF,
+ * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan
+ * mandates different behavior on receiving BSS-transition request. To accomodate
+ * such divergent behaviors these policies have been created.
+ */
+typedef enum {
+ WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /**< Roam (or disassociate) in all cases */
+ WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /**< Roam only if requested by Request Mode field */
+ WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /**< Roam only if Preferred BSS provided */
+ WL_BSSTRANS_POLICY_WAIT = 3, /**< Wait for deauth and send Accepted status */
+ WL_BSSTRANS_POLICY_PRODUCT = 4, /**< Policy for real product use cases (Olympic) */
+ WL_BSSTRANS_POLICY_PRODUCT_WBTEXT = 5, /**< Policy for real product use cases (SS) */
+ WL_BSSTRANS_POLICY_MBO = 6, /**< Policy for MBO certification */
+ WL_BSSTRANS_POLICY_MAX = 7
+} wnm_bsstrans_policy_type_t;
+
+/** Definitions for WNM/NPS TIM Broadcast */
+typedef struct wl_timbc_offset {
+ int16 offset; /**< offset in us */
+ uint16 fix_intv; /**< override interval sent from STA */
+ uint16 rate_override; /**< use rate override to send high rate TIM broadcast frame */
+ uint8 tsf_present; /**< show timestamp in TIM broadcast frame */
+ uint8 PAD;
+} wl_timbc_offset_t;
+
+typedef struct wl_timbc_set {
+ uint8 interval; /**< Interval in DTIM wished or required. */
+ uint8 flags; /**< Bitfield described below */
+ uint16 rate_min; /**< Minimum rate required for High/Low TIM frames. Optionnal */
+ uint16 rate_max; /**< Maximum rate required for High/Low TIM frames. Optionnal */
+} wl_timbc_set_t;
+
+enum {
+ WL_TIMBC_SET_TSF_REQUIRED = 1, /**< Enable TIMBC only if TSF in TIM frames */
+ WL_TIMBC_SET_NO_OVERRIDE = 2, /**< ... if AP does not override interval */
+ WL_TIMBC_SET_PROXY_ARP = 4, /**< ... if AP support Proxy ARP */
+ WL_TIMBC_SET_DMS_ACCEPTED = 8 /**< ... if all DMS desc have been accepted */
+};
+
+typedef struct wl_timbc_status {
+ uint8 status_sta; /**< Status from internal state machine (check below) */
+ uint8 status_ap; /**< From AP response frame (check 8.4.2.86 from 802.11) */
+ uint8 interval;
+ uint8 pad;
+ int32 offset;
+ uint16 rate_high;
+ uint16 rate_low;
+} wl_timbc_status_t;
+
+enum {
+ WL_TIMBC_STATUS_DISABLE = 0, /**< TIMBC disabled by user */
+ WL_TIMBC_STATUS_REQ_MISMATCH = 1, /**< AP settings do no match user requirements */
+ WL_TIMBC_STATUS_NOT_ASSOC = 2, /**< STA not associated */
+ WL_TIMBC_STATUS_NOT_SUPPORT = 3, /**< TIMBC not supported by AP */
+ WL_TIMBC_STATUS_DENIED = 4, /**< Req to disable TIMBC sent to AP */
+ WL_TIMBC_STATUS_ENABLE = 5 /**< TIMBC enabled */
+};
+
+/** Definitions for PM2 Dynamic Fast Return To Sleep */
+typedef struct wl_pm2_sleep_ret_ext {
+ uint8 logic; /**< DFRTS logic: see WL_DFRTS_LOGIC_* below */
+ uint8 PAD;
+ uint16 low_ms; /**< Low FRTS timeout */
+ uint16 high_ms; /**< High FRTS timeout */
+ uint16 rx_pkts_threshold; /**< switching threshold: # rx pkts */
+ uint16 tx_pkts_threshold; /**< switching threshold: # tx pkts */
+ uint16 txrx_pkts_threshold; /**< switching threshold: # (tx+rx) pkts */
+ uint32 rx_bytes_threshold; /**< switching threshold: # rx bytes */
+ uint32 tx_bytes_threshold; /**< switching threshold: # tx bytes */
+ uint32 txrx_bytes_threshold; /**< switching threshold: # (tx+rx) bytes */
+} wl_pm2_sleep_ret_ext_t;
+
+#define WL_DFRTS_LOGIC_OFF 0 /**< Feature is disabled */
+#define WL_DFRTS_LOGIC_OR 1 /**< OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND 2 /**< AND all non-zero threshold conditions */
+
+/* Values for the passive_on_restricted_mode iovar. When set to non-zero, this iovar
+ * disables automatic conversions of a channel from passively scanned to
+ * actively scanned. These values only have an effect for country codes such
+ * as XZ where some 5 GHz channels are defined to be passively scanned.
+ */
+#define WL_PASSACTCONV_DISABLE_NONE 0 /**< Enable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_ALL 1 /**< Disable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_PERM 2 /**< Disable only permanent conversions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RMC_CNT_VERSION 1
+#define WL_RMC_TR_VERSION 1
+#define WL_RMC_MAX_CLIENT 32
+#define WL_RMC_FLAG_INBLACKLIST 1
+#define WL_RMC_FLAG_ACTIVEACKER 2
+#define WL_RMC_FLAG_RELMCAST 4
+#define WL_RMC_MAX_TABLE_ENTRY 4
+
+#define WL_RMC_VER 1
+#define WL_RMC_INDEX_ACK_ALL 255
+#define WL_RMC_NUM_OF_MC_STREAMS 4
+#define WL_RMC_MAX_TRS_PER_GROUP 1
+#define WL_RMC_MAX_TRS_IN_ACKALL 1
+#define WL_RMC_ACK_MCAST0 0x02
+#define WL_RMC_ACK_MCAST_ALL 0x01
+#define WL_RMC_ACTF_TIME_MIN 300 /**< time in ms */
+#define WL_RMC_ACTF_TIME_MAX 20000 /**< time in ms */
+#define WL_RMC_MAX_NUM_TRS 32 /**< maximun transmitters allowed */
+#define WL_RMC_ARTMO_MIN 350 /**< time in ms */
+#define WL_RMC_ARTMO_MAX 40000 /**< time in ms */
+
+/* RMC events in action frames */
+enum rmc_opcodes {
+ RELMCAST_ENTRY_OP_DISABLE = 0, /**< Disable multi-cast group */
+ RELMCAST_ENTRY_OP_DELETE = 1, /**< Delete multi-cast group */
+ RELMCAST_ENTRY_OP_ENABLE = 2, /**< Enable multi-cast group */
+ RELMCAST_ENTRY_OP_ACK_ALL = 3 /**< Enable ACK ALL bit in AMT */
+};
+
+/* RMC operational modes */
+enum rmc_modes {
+ WL_RMC_MODE_RECEIVER = 0, /**< Receiver mode by default */
+ WL_RMC_MODE_TRANSMITTER = 1, /**< Transmitter mode using wl ackreq */
+ WL_RMC_MODE_INITIATOR = 2 /**< Initiator mode using wl ackreq */
+};
+
+/** Each RMC mcast client info */
+typedef struct wl_relmcast_client {
+ uint8 flag; /**< status of client such as AR, R, or blacklisted */
+ uint8 PAD;
+ int16 rssi; /**< rssi value of RMC client */
+ struct ether_addr addr; /**< mac address of RMC client */
+} wl_relmcast_client_t;
+
+/** RMC Counters */
+typedef struct wl_rmc_cnts {
+ uint16 version; /**< see definition of WL_CNT_T_VERSION */
+ uint16 length; /**< length of entire structure */
+ uint16 dupcnt; /**< counter for duplicate rmc MPDU */
+ uint16 ackreq_err; /**< counter for wl ackreq error */
+ uint16 af_tx_err; /**< error count for action frame transmit */
+ uint16 null_tx_err; /**< error count for rmc null frame transmit */
+ uint16 af_unicast_tx_err; /**< error count for rmc unicast frame transmit */
+ uint16 mc_no_amt_slot; /**< No mcast AMT entry available */
+ /* Unused. Keep for rom compatibility */
+ uint16 mc_no_glb_slot; /**< No mcast entry available in global table */
+ uint16 mc_not_mirrored; /**< mcast group is not mirrored */
+ uint16 mc_existing_tr; /**< mcast group is already taken by transmitter */
+ uint16 mc_exist_in_amt; /**< mcast group is already programmed in amt */
+ /* Unused. Keep for rom compatibility */
+ uint16 mc_not_exist_in_gbl; /**< mcast group is not in global table */
+ uint16 mc_not_exist_in_amt; /**< mcast group is not in AMT table */
+ uint16 mc_utilized; /**< mcast addressed is already taken */
+ uint16 mc_taken_other_tr; /**< multi-cast addressed is already taken */
+ uint32 rmc_rx_frames_mac; /**< no of mc frames received from mac */
+ uint32 rmc_tx_frames_mac; /**< no of mc frames transmitted to mac */
+ uint32 mc_null_ar_cnt; /**< no. of times NULL AR is received */
+ uint32 mc_ar_role_selected; /**< no. of times took AR role */
+ uint32 mc_ar_role_deleted; /**< no. of times AR role cancelled */
+ uint32 mc_noacktimer_expired; /**< no. of times noack timer expired */
+ uint16 mc_no_wl_clk; /**< no wl clk detected when trying to access amt */
+ uint16 mc_tr_cnt_exceeded; /**< No of transmitters in the network exceeded */
+} wl_rmc_cnts_t;
+
+/** RMC Status */
+typedef struct wl_relmcast_st {
+ uint8 ver; /**< version of RMC */
+ uint8 num; /**< number of clients detected by transmitter */
+ wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT];
+ uint16 err; /**< error status (used in infra) */
+ uint16 actf_time; /**< action frame time period */
+} wl_relmcast_status_t;
+
+/** Entry for each STA/node */
+typedef struct wl_rmc_entry {
+ /* operation on multi-cast entry such add,
+ * delete, ack-all
+ */
+ int8 flag;
+ struct ether_addr addr; /**< multi-cast group mac address */
+} wl_rmc_entry_t;
+
+/** RMC table */
+typedef struct wl_rmc_entry_table {
+ uint8 index; /**< index to a particular mac entry in table */
+ uint8 opcode; /**< opcodes or operation on entry */
+ wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY];
+} wl_rmc_entry_table_t;
+
+typedef struct wl_rmc_trans_elem {
+ struct ether_addr tr_mac; /**< transmitter mac */
+ struct ether_addr ar_mac; /**< ar mac */
+ uint16 artmo; /**< AR timeout */
+ uint8 amt_idx; /**< amt table entry */
+ uint8 PAD;
+ uint16 flag; /**< entry will be acked, not acked, programmed, full etc */
+} wl_rmc_trans_elem_t;
+
+/** RMC transmitters */
+typedef struct wl_rmc_trans_in_network {
+ uint8 ver; /**< version of RMC */
+ uint8 num_tr; /**< number of transmitters in the network */
+ wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS];
+} wl_rmc_trans_in_network_t;
+
+/** To update vendor specific ie for RMC */
+typedef struct wl_rmc_vsie {
+ uint8 oui[DOT11_OUI_LEN];
+ uint8 PAD;
+ uint16 payload; /**< IE Data Payload */
+} wl_rmc_vsie_t;
+
+/* structures & defines for proximity detection */
+enum proxd_method {
+ PROXD_UNDEFINED_METHOD = 0,
+ PROXD_RSSI_METHOD = 1,
+ PROXD_TOF_METHOD = 2
+};
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE 0
+#define WL_PROXD_MODE_NEUTRAL 1
+#define WL_PROXD_MODE_INITIATOR 2
+#define WL_PROXD_MODE_TARGET 3
+
+#define WL_PROXD_ACTION_STOP 0
+#define WL_PROXD_ACTION_START 1
+
+#define WL_PROXD_FLAG_TARGET_REPORT 0x1
+#define WL_PROXD_FLAG_REPORT_FAILURE 0x2
+#define WL_PROXD_FLAG_INITIATOR_REPORT 0x4
+#define WL_PROXD_FLAG_NOCHANSWT 0x8
+#define WL_PROXD_FLAG_NETRUAL 0x10
+#define WL_PROXD_FLAG_INITIATOR_RPTRTT 0x20
+#define WL_PROXD_FLAG_ONEWAY 0x40
+#define WL_PROXD_FLAG_SEQ_EN 0x80
+
+#define WL_PROXD_SETFLAG_K 0x1
+#define WL_PROXD_SETFLAG_N 0x2
+#define WL_PROXD_SETFLAG_S 0x4
+
+#define WL_PROXD_SETFLAG_K 0x1
+#define WL_PROXD_SETFLAG_N 0x2
+#define WL_PROXD_SETFLAG_S 0x4
+
+#define WL_PROXD_RANDOM_WAKEUP 0x8000
+#define WL_PROXD_MAXREPORT 8
+
+typedef struct wl_proxd_iovar {
+ uint16 method; /**< Proximity Detection method */
+ uint16 mode; /**< Mode (neutral, initiator, target) */
+} wl_proxd_iovar_t;
+
+/*
+ * structures for proximity detection parameters
+ * consists of two parts, common and method specific params
+ * common params should be placed at the beginning
+ */
+
+typedef struct wl_proxd_params_common {
+ chanspec_t chanspec; /**< channel spec */
+ int16 tx_power; /**< tx power of Proximity Detection(PD) frames (in dBm) */
+ uint16 tx_rate; /**< tx rate of PD rames (in 500kbps units) */
+ uint16 timeout; /**< timeout value */
+ uint16 interval; /**< interval between neighbor finding attempts (in TU) */
+ uint16 duration; /**< duration of neighbor finding attempts (in ms) */
+} wl_proxd_params_common_t;
+
+typedef struct wl_proxd_params_rssi_method {
+ chanspec_t chanspec; /**< chanspec for home channel */
+ int16 tx_power; /**< tx power of Proximity Detection frames (in dBm) */
+ uint16 tx_rate; /**< tx rate of PD frames, 500kbps units */
+ uint16 timeout; /**< state machine wait timeout of the frames (in ms) */
+ uint16 interval; /**< interval between neighbor finding attempts (in TU) */
+ uint16 duration; /**< duration of neighbor finding attempts (in ms) */
+ /* method specific ones go after this line */
+ int16 rssi_thresh; /**< RSSI threshold (in dBm) */
+ uint16 maxconvergtmo; /**< max wait converge timeout (in ms) */
+} wl_proxd_params_rssi_method_t;
+
+#define Q1_NS 25 /**< Q1 time units */
+
+/* Number of bandwidth that the TOF can support */
+#define TOF_BW_NUM 3
+/* Number of total index including seq tx/rx idx */
+#define TOF_BW_SEQ_NUM 5
+
+enum tof_bw_index {
+ TOF_BW_20MHZ_INDEX = 0,
+ TOF_BW_40MHZ_INDEX = 1,
+ TOF_BW_80MHZ_INDEX = 2,
+ TOF_BW_SEQTX_INDEX = 3,
+ TOF_BW_SEQRX_INDEX = 4
+};
+
+/*
+ * Version 2 of above bw defines
+ * and enum tof_bw_index,
+ * with advent of 160 MHz
+ */
+#define TOF_BW_NUM_V2 4u
+#define TOF_BW_SEQ_NUM_V2 6u
+
+enum tof_bw_index_v2 {
+ TOF_BW_20MHZ_INDEX_V2 = 0,
+ TOF_BW_40MHZ_INDEX_V2 = 1,
+ TOF_BW_80MHZ_INDEX_V2 = 2,
+ TOF_BW_160MHZ_INDEX_V2 = 3,
+ TOF_BW_SEQTX_INDEX_V2 = 4,
+ TOF_BW_SEQRX_INDEX_V2 = 5
+};
+
+#define BANDWIDTH_BASE 20 /**< base value of bandwidth */
+#define TOF_BW_20MHZ (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX_V2)
+#define TOF_BW_40MHZ (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX_V2)
+#define TOF_BW_80MHZ (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX_V2)
+#define TOF_BW_160MHZ (BANDWIDTH_BASE << TOF_BW_160MHZ_INDEX_V2)
+
+#define TOF_BW_10MHZ 10
+
+#define NFFT_BASE 64 /**< base size of fft */
+#define TOF_NFFT_20MHZ (NFFT_BASE << TOF_BW_20MHZ_INDEX_V2)
+#define TOF_NFFT_40MHZ (NFFT_BASE << TOF_BW_40MHZ_INDEX_V2)
+#define TOF_NFFT_80MHZ (NFFT_BASE << TOF_BW_80MHZ_INDEX_V2)
+#define TOF_NFFT_160MHZ (NFFT_BASE << TOF_BW_160MHZ_INDEX_V2)
+
+typedef struct wl_proxd_params_tof_method {
+ chanspec_t chanspec; /**< chanspec for home channel */
+ int16 tx_power; /**< tx power of Proximity Detection(PD) frames (in dBm) */
+ uint16 tx_rate; /**< tx rate of PD rames (in 500kbps units) */
+ uint16 timeout; /**< state machine wait timeout of the frames (in ms) */
+ uint16 interval; /**< interval between neighbor finding attempts (in TU) */
+ uint16 duration; /**< duration of neighbor finding attempts (in ms) */
+ /* specific for the method go after this line */
+ struct ether_addr tgt_mac; /**< target mac addr for TOF method */
+ uint16 ftm_cnt; /**< number of the frames txed by initiator */
+ uint16 retry_cnt; /**< number of retransmit attampts for ftm frames */
+ int16 vht_rate; /**< ht or vht rate */
+ /* add more params required for other methods can be added here */
+} wl_proxd_params_tof_method_t;
+
+typedef struct wl_proxd_seq_config
+{
+ int16 N_tx_log2;
+ int16 N_rx_log2;
+ int16 N_tx_scale;
+ int16 N_rx_scale;
+ int16 w_len;
+ int16 w_offset;
+} wl_proxd_seq_config_t;
+
+#define WL_PROXD_TUNE_VERSION_1 1
+#define WL_PROXD_TUNE_VERSION_2 2
+#include <packed_section_start.h>
+/* For legacy ranging target (e.g. 43430, 43342) */
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune_v1 {
+ uint32 version;
+ uint32 Ki; /**< h/w delay K factor for initiator */
+ uint32 Kt; /**< h/w delay K factor for target */
+ int16 vhtack; /**< enable/disable VHT ACK */
+ int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+ int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */
+ int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */
+ int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */
+ int32 minDT; /**< min time difference of T4/T1 or T3/T2 */
+ uint8 totalfrmcnt; /**< total count of transfered measurement frames */
+ uint16 rsv_media; /**< reserve media value for TOF */
+ uint32 flags; /**< flags */
+ uint8 core; /**< core to use for tx */
+ uint8 setflags; /* set flags of K, N. S values */
+ int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+ uint8 sw_adj; /**< enable sw assisted timestamp adjustment */
+ uint8 hw_adj; /**< enable hw assisted timestamp adjustment */
+ uint8 seq_en; /**< enable ranging sequence */
+ uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */
+ int16 N_log2_2g; /**< simple threshold crossing for 2g channel */
+ int16 N_scale_2g; /**< simple threshold crossing for 2g channel */
+ wl_proxd_seq_config_t seq_5g20;
+ wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */
+ uint16 bitflip_thresh; /* bitflip threshold */
+ uint16 snr_thresh; /* SNR threshold */
+ int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */
+ uint32 acs_gdv_thresh;
+ int8 acs_rssi_thresh;
+ uint8 smooth_win_en;
+ int32 emu_delay;
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_v1_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+/* For legacy ranging initiator (including 4364) */
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune_v2 {
+ uint32 version;
+ uint32 Ki; /**< h/w delay K factor for initiator */
+ uint32 Kt; /**< h/w delay K factor for target */
+ int16 vhtack; /**< enable/disable VHT ACK */
+ int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+ int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */
+ int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */
+ int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */
+ int32 minDT; /**< min time difference of T4/T1 or T3/T2 */
+ uint8 totalfrmcnt; /**< total count of transfered measurement frames */
+ uint16 rsv_media; /**< reserve media value for TOF */
+ uint32 flags; /**< flags */
+ uint8 core; /**< core to use for tx */
+ uint8 setflags; /* set flags of K, N. S values */
+ int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+ uint8 sw_adj; /**< enable sw assisted timestamp adjustment */
+ uint8 hw_adj; /**< enable hw assisted timestamp adjustment */
+ uint8 seq_en; /**< enable ranging sequence */
+ uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */
+ int16 N_log2_2g; /**< simple threshold crossing for 2g channel */
+ int16 N_scale_2g; /**< simple threshold crossing for 2g channel */
+ wl_proxd_seq_config_t seq_5g20;
+ wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */
+ uint16 bitflip_thresh; /* bitflip threshold */
+ uint16 snr_thresh; /* SNR threshold */
+ int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */
+ uint32 acs_gdv_thresh;
+ int8 acs_rssi_thresh;
+ uint8 smooth_win_en;
+ int32 acs_gdmm_thresh;
+ int8 acs_delta_rssi_thresh;
+ int32 emu_delay;
+ uint8 core_mask; /* core mask selection */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_v2_t;
+#include <packed_section_end.h>
+
+#define WL_PROXD_TUNE_VERSION_3 3
+/* Future ranging support */
+typedef struct wl_proxd_params_tof_tune_v3 {
+ uint16 version;
+ uint16 len;
+ uint32 Ki; /**< h/w delay K factor for initiator */
+ uint32 Kt; /**< h/w delay K factor for target */
+ int16 vhtack; /**< enable/disable VHT ACK */
+ uint16 PAD;
+ int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+ uint16 PAD;
+ int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */
+ uint16 PAD;
+ int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */
+ uint16 PAD;
+ int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */
+ int32 minDT; /**< min time difference of T4/T1 or T3/T2 */
+ uint8 totalfrmcnt; /**< total count of transfered measurement frames */
+ uint8 PAD[3];
+ uint16 rsv_media; /**< reserve media value for TOF */
+ uint16 PAD;
+ uint32 flags; /**< flags */
+ uint8 core; /**< core to use for tx */
+ uint8 setflags; /* set flags of K, N. S values */
+ uint16 PAD;
+ int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+ uint8 sw_adj; /**< enable sw assisted timestamp adjustment */
+ uint8 hw_adj; /**< enable hw assisted timestamp adjustment */
+ uint8 seq_en; /**< enable ranging sequence */
+ uint8 PAD[3];
+ uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */
+ uint8 PAD[3];
+ int16 N_log2_2g; /**< simple threshold crossing for 2g channel */
+ int16 N_scale_2g; /**< simple threshold crossing for 2g channel */
+ wl_proxd_seq_config_t seq_5g20;
+ wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */
+ uint16 bitflip_thresh; /* bitflip threshold */
+ uint16 snr_thresh; /* SNR threshold */
+ int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */
+ uint8 PAD[3];
+ uint32 acs_gdv_thresh;
+ int8 acs_rssi_thresh;
+ uint8 smooth_win_en;
+ uint16 PAD;
+ int32 acs_gdmm_thresh;
+ int8 acs_delta_rssi_thresh;
+ uint8 PAD[3];
+ int32 emu_delay;
+ uint8 core_mask; /* core mask selection */
+ uint8 PAD[3];
+} wl_proxd_params_tof_tune_v3_t;
+
+/*
+ * tof tune with 160 MHz support
+ */
+#define WL_PROXD_TUNE_VERSION_4 4u
+typedef struct wl_proxd_params_tof_tune_v4 {
+ uint16 version;
+ uint16 len;
+ uint8 core; /**< core to use for tx */
+ uint8 setflags; /* set flags of K, N. S values */
+ uint8 totalfrmcnt; /**< total count of transfered measurement frames */
+ uint8 sw_adj; /**< enable sw assisted timestamp adjustment */
+ uint8 hw_adj; /**< enable hw assisted timestamp adjustment */
+ uint8 seq_en; /**< enable ranging sequence */
+ uint8 smooth_win_en;
+ uint8 core_mask; /* core mask selection */
+ int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */
+ int8 acs_rssi_thresh;
+ int8 acs_delta_rssi_thresh;
+ uint8 ftm_cnt[TOF_BW_SEQ_NUM_V2]; /**< no. of ftm frames based on bw */
+ uint8 PAD[3]; /* Use this for any int8/16 uint8/16 ext in future */
+ uint16 rsv_media; /**< reserve media value for TOF */
+ uint16 bitflip_thresh; /* bitflip threshold */
+ uint16 snr_thresh; /* SNR threshold */
+ int16 vhtack; /**< enable/disable VHT ACK */
+ int16 N_log2_2g; /**< simple threshold crossing for 2g channel */
+ int16 N_scale_2g; /**< simple threshold crossing for 2g channel */
+ int16 N_log2[TOF_BW_SEQ_NUM_V2]; /**< simple threshold crossing */
+ int16 w_offset[TOF_BW_NUM_V2]; /**< offset of thresh crossing window(per BW) */
+ int16 w_len[TOF_BW_NUM_V2]; /**< length of thresh crossing window(per BW) */
+ int16 N_scale[TOF_BW_SEQ_NUM_V2]; /**< simple threshold crossing */
+ uint32 Ki; /**< h/w delay K factor for initiator */
+ uint32 Kt; /**< h/w delay K factor for target */
+ uint32 flags; /**< flags */
+ uint32 acs_gdv_thresh;
+ int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */
+ int32 minDT; /**< min time difference of T4/T1 or T3/T2 */
+ int32 acs_gdmm_thresh;
+ int32 emu_delay;
+ wl_proxd_seq_config_t seq_5g20; /* Thresh crossing params for 2G Sequence */
+ wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */
+} wl_proxd_params_tof_tune_v4_t;
+
+typedef struct wl_proxd_params_iovar {
+ uint16 method; /**< Proximity Detection method */
+ uint8 PAD[2];
+ union {
+ /* common params for pdsvc */
+ wl_proxd_params_common_t cmn_params; /**< common parameters */
+ /* method specific */
+ wl_proxd_params_rssi_method_t rssi_params; /**< RSSI method parameters */
+ wl_proxd_params_tof_method_t tof_params; /**< TOF method parameters */
+ /* tune parameters */
+ wl_proxd_params_tof_tune_v3_t tof_tune; /**< TOF tune parameters */
+ } u; /**< Method specific optional parameters */
+} wl_proxd_params_iovar_t;
+
+/*
+ * proxd param iov with 160 MHz support
+ */
+#define WL_PROXD_IOVAR_VERSION_2 2u
+typedef struct wl_proxd_params_iovar_v2 {
+ uint16 version;
+ uint16 len;
+ uint16 method; /**< Proximity Detection method */
+ uint16 PAD;
+ union {
+ /* common params for pdsvc */
+ wl_proxd_params_common_t cmn_params; /**< common parameters */
+ /* method specific */
+ wl_proxd_params_rssi_method_t rssi_params; /**< RSSI method parameters */
+ wl_proxd_params_tof_method_t tof_params; /**< TOF method parameters */
+ /* tune parameters */
+ wl_proxd_params_tof_tune_v4_t tof_tune; /**< TOF tune parameters */
+ } u; /**< Method specific optional parameters */
+ uint8 tlv_params[]; /* xtlvs for variable ext params */
+} wl_proxd_params_iovar_v2_t;
+
+#define PROXD_COLLECT_GET_STATUS 0
+#define PROXD_COLLECT_SET_STATUS 1
+#define PROXD_COLLECT_QUERY_HEADER 2
+#define PROXD_COLLECT_QUERY_DATA 3
+#define PROXD_COLLECT_QUERY_DEBUG 4
+#define PROXD_COLLECT_REMOTE_REQUEST 5
+#define PROXD_COLLECT_DONE 6
+
+typedef enum {
+ WL_PROXD_COLLECT_METHOD_TYPE_DISABLE = 0x0,
+ WL_PROXD_COLLECT_METHOD_TYPE_IOVAR = 0x1,
+ WL_PROXD_COLLECT_METHOD_TYPE_EVENT = 0x2,
+ WL_PROXD_COLLECT_METHOD_TYPE_EVENT_LOG = 0x4
+} wl_proxd_collect_method_type_t;
+
+typedef uint16 wl_proxd_collect_method_t; /* query status: method to send proxd collect */
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query {
+ uint32 method; /**< method */
+ uint8 request; /**< Query request. */
+ uint8 status; /**< bitmask 0 -- disable, 0x1 -- enable collection, */
+ /* 0x2 -- Use generic event, 0x4 -- use event log */
+ uint16 index; /**< The current frame index [0 to total_frames - 1]. */
+ uint16 mode; /**< Initiator or Target */
+ uint8 busy; /**< tof sm is busy */
+ uint8 remote; /**< Remote collect data */
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header {
+ uint16 total_frames; /**< The total frames for this collect. */
+ uint16 nfft; /**< nfft value */
+ uint16 bandwidth; /**< bandwidth */
+ uint16 channel; /**< channel number */
+ uint32 chanspec; /**< channel spec */
+ uint32 fpfactor; /**< avb timer value factor */
+ uint16 fpfactor_shift; /**< avb timer value shift bits */
+ int32 distance; /**< distance calculated by fw */
+ uint32 meanrtt; /**< mean of RTTs */
+ uint32 modertt; /**< mode of RTTs */
+ uint32 medianrtt; /**< median of RTTs */
+ uint32 sdrtt; /**< standard deviation of RTTs */
+ uint32 clkdivisor; /**< clock divisor */
+ uint16 chipnum; /**< chip type */
+ uint8 chiprev; /**< chip revision */
+ uint8 phyver; /**< phy version */
+ struct ether_addr localMacAddr; /**< local mac address */
+ struct ether_addr remoteMacAddr; /**< remote mac address */
+ wl_proxd_params_tof_tune_v3_t params;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
+#include <packed_section_end.h>
+
+/*
+ * proxd collect header with 160 MHz support
+ */
+#define WL_PROXD_COLLECT_HEADER_VERSION_2 2u
+typedef struct wl_proxd_collect_header_v2 {
+ uint16 version;
+ uint16 len;
+ uint8 chiprev; /**< chip revision */
+ uint8 phyver; /**< phy version */
+ uint8 PAD[2]; /* Use this for any int8/16 uint8/16 ext in future */
+ uint16 total_frames; /**< The total frames for this collect. */
+ uint16 nfft; /**< nfft value */
+ uint16 bandwidth; /**< bandwidth */
+ uint16 channel; /**< channel number */
+ uint16 fpfactor_shift; /**< avb timer value shift bits */
+ uint16 chipnum; /**< chip type */
+ uint32 chanspec; /**< channel spec */
+ uint32 fpfactor; /**< avb timer value factor */
+ uint32 meanrtt; /**< mean of RTTs */
+ uint32 modertt; /**< mode of RTTs */
+ uint32 medianrtt; /**< median of RTTs */
+ uint32 sdrtt; /**< standard deviation of RTTs */
+ uint32 clkdivisor; /**< clock divisor */
+ int32 distance; /**< distance calculated by fw */
+ struct ether_addr localMacAddr; /**< local mac address */
+ uint16 PAD; /* Use this for any int8/16 uint8/16 ext in future */
+ struct ether_addr remoteMacAddr; /**< remote mac address */
+ uint16 PAD; /* Use this for any int8/16 uint8/16 ext in future */
+ wl_proxd_params_tof_tune_v4_t params; /* TOF tune params */
+ uint8 tlv_params[]; /* xtlvs for variable ext params */
+} wl_proxd_collect_header_v2_t;
+
+/* ifdef WL_NAN */
+/* ********************** NAN wl interface struct types and defs ******************** */
+/*
+ * Uses new common IOVAR batch processing mechanism
+ */
+
+/*
+ * NAN config control
+ * Bits 0 - 23 can be set by host
+ * Bits 24 - 31 - Internal use for firmware, host cannot set it
+ */
+
+/*
+ * Bit 0 : If set to 1, means event uses nan bsscfg,
+ * otherwise uses infra bsscfg. Default is using infra bsscfg
+ */
+#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x000001
+/* If set, discovery beacons are transmitted on 2G band */
+#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x000002
+/* If set, sync beacons are transmitted on 2G band */
+#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x000004
+/* If set, discovery beacons are transmitted on 5G band */
+#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x000008
+/* If set, sync beacons are transmitted on 5G band */
+#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x000010
+/* If set, auto datapath responses will be sent by FW */
+#define WL_NAN_CTRL_AUTO_DPRESP 0x000020
+/* If set, auto datapath confirms will be sent by FW */
+#define WL_NAN_CTRL_AUTO_DPCONF 0x000040
+/* If set, auto schedule responses will be sent by FW */
+#define WL_NAN_CTRL_AUTO_SCHEDRESP 0x000080
+/* If set, auto schedule confirms will be sent by FW */
+#define WL_NAN_CTRL_AUTO_SCHEDCONF 0x000100
+/* If set, proprietary rates are supported by FW */
+#define WL_NAN_CTRL_PROP_RATE 0x000200
+/* If set, service awake_dw overrides global dev awake_dw */
+#define WL_NAN_CTRL_SVC_OVERRIDE_DEV_AWAKE_DW 0x000400
+/* If set, merge scan will be disabled */
+#define WL_NAN_CTRL_SCAN_DISABLE 0x000800
+/* If set, power save will be disabled */
+#define WL_NAN_CTRL_POWER_SAVE_DISABLE 0x001000
+/* If set, device will merge to configured CID only */
+#define WL_NAN_CTRL_MERGE_CONF_CID_ONLY 0x002000
+/* If set, 5g core will be brought down in single band NAN */
+#define WL_NAN_CTRL_5G_SLICE_POWER_OPT 0x004000
+#define WL_NAN_CTRL_DUMP_HEAP 0x008000
+/* If set, host generates and assign ndp id for ndp sessions */
+#define WL_NAN_CTRL_HOST_GEN_NDPID 0x010000
+/* If set, nan ndp inactivity watchdog will be activated */
+#define WL_NAN_CTRL_DELETE_INACTIVE_PEERS 0x020000
+/* If set, nan assoc coex will be activated */
+#define WL_NAN_CTRL_INFRA_ASSOC_COEX 0x040000
+/* If set, dam will accept all NDP/RNG request from the peer including counter */
+#define WL_NAN_CTRL_DAM_ACCEPT_ALL 0x080000
+/* If set, nan mac ignores role for tx discovery beacon for periodic config */
+#define WL_NAN_CTRL_FASTDISC_IGNO_ROLE 0x100000
+/* If set, include NA in NAN beacons (disc beacons for now) */
+#define WL_NAN_CTRL_INCL_NA_IN_BCNS 0x200000
+/* If set, host assist will be enabled */
+#define WL_NAN_CTRL_HOST_ASSIST 0x400000
+/* If set, host configures NDI associated with the service */
+#define WL_NAN_CTRL_HOST_CFG_SVC_NDI 0x800000
+
+/* Value when all host-configurable bits set */
+#define WL_NAN_CTRL_MAX_MASK 0xFFFFFF
+#define WL_NAN_CFG_CTRL_FW_BITS 8
+
+/* Last 8-bits are firmware controlled bits.
+ * Bit 31:
+ * If set - indicates that NAN initialization is successful
+ * Bit 30:
+ * If set - indicates that NAN MAC cfg creation is successful
+ *
+ * NOTE: These are only ready-only bits for host.
+ * All sets to these bits from host are masked off
+ */
+#define WL_NAN_PROTO_INIT_DONE (1u << 31u)
+#define WL_NAN_CFG_CREATE_DONE (1u << 30u)
+
+#define WL_NAN_GET_PROTO_INIT_STATUS(x) \
+ (((x) & WL_NAN_PROTO_INIT_DONE) ? TRUE:FALSE)
+#define WL_NAN_CLEAR_PROTO_INIT_STATUS(x) \
+ ((x) &= ~WL_NAN_PROTO_INIT_DONE)
+#define WL_NAN_SET_PROTO_INIT_STATUS(x) \
+ ((x) |= (WL_NAN_PROTO_INIT_DONE))
+
+#define WL_NAN_GET_CFG_CREATE_STATUS(x) \
+ (((x) & WL_NAN_CFG_CREATE_DONE) ? TRUE:FALSE)
+#define WL_NAN_CLEAR_CFG_CREATE_STATUS(x) \
+ ((x) &= ~WL_NAN_CFG_CREATE_DONE)
+#define WL_NAN_SET_CFG_CREATE_STATUS(x) \
+ ((x) |= (WL_NAN_CFG_CREATE_DONE))
+
+#define WL_NAN_IOCTL_VERSION 0x2
+/* < some sufficient ioc buff size for our module */
+#define WL_NAN_IOC_BUFSZ 256
+/* some sufficient ioc buff size for dump commands */
+#define WL_NAN_IOC_BUFSZ_EXT 1024
+#define WL_NAN_MAX_SIDS_IN_BEACONS 127 /* Max allowed SIDs */
+#define WL_NAN_MASTER_RANK_LEN 8
+#define WL_NAN_RANGE_LIMITED 0x0040 /* Publish/Subscribe flags */
+
+/** The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN 6
+#define WL_NAN_HASHES_PER_BLOOM 4 /** Number of hash functions per bloom filter */
+
+/* no. of max last disc results */
+#define WL_NAN_MAX_DISC_RESULTS 3
+
+/* Max len of Rx and Tx filters */
+#define WL_NAN_MAX_SVC_MATCH_FILTER_LEN 255
+
+/* Max service name len */
+#define WL_NAN_MAX_SVC_NAME_LEN 32
+
+/* Type of Data path connection */
+#define WL_NAN_DP_TYPE_UNICAST 0
+#define WL_NAN_DP_TYPE_MULTICAST 1
+
+/* MAX security params length PMK field */
+#define WL_NAN_NCS_SK_PMK_LEN 32
+
+/* Post disc attr ID type */
+typedef uint8 wl_nan_post_disc_attr_id_t;
+
+/*
+ * Component IDs
+ */
+typedef enum {
+ WL_NAN_COMPID_CONFIG = 1,
+ WL_NAN_COMPID_ELECTION = 2,
+ WL_NAN_COMPID_SD = 3,
+ WL_NAN_COMPID_TIMESYNC = 4,
+ WL_NAN_COMPID_DATA_PATH = 5,
+ WL_NAN_COMPID_DEBUG = 15 /* Keep this at the end */
+} wl_nan_comp_id_t;
+
+#define WL_NAN_COMP_SHIFT 8
+#define WL_NAN_COMP_MASK(_c) (0x0F & ((uint8)(_c)))
+#define WL_NAN_COMP_ID(_c) (WL_NAN_COMP_MASK(_c) << WL_NAN_COMP_SHIFT)
+
+/* NAN Events */
+
+/** Instance ID type (unique identifier) */
+typedef uint8 wl_nan_instance_id_t;
+
+/* Publish sent for a subscribe */
+/* WL_NAN_EVENT_REPLIED */
+
+typedef struct wl_nan_ev_replied {
+ struct ether_addr sub_mac; /* Subscriber MAC */
+ wl_nan_instance_id_t pub_id; /* Publisher Instance ID */
+ uint8 sub_id; /* Subscriber ID */
+ int8 sub_rssi; /* Subscriber RSSI */
+ uint8 pad[3];
+} wl_nan_ev_replied_t;
+
+typedef struct wl_nan_event_replied {
+ struct ether_addr sub_mac; /* Subscriber MAC */
+ wl_nan_instance_id_t pub_id; /* Publisher Instance ID */
+ uint8 sub_id; /* Subscriber ID */
+ int8 sub_rssi; /* Subscriber RSSI */
+ uint8 attr_num;
+ uint16 attr_list_len; /* sizeof attributes attached to payload */
+ uint8 attr_list[0]; /* attributes payload */
+} wl_nan_event_replied_t;
+
+/* NAN Tx status of transmitted frames */
+#define WL_NAN_TXS_FAILURE 0
+#define WL_NAN_TXS_SUCCESS 1
+
+/* NAN frame types */
+enum wl_nan_frame_type {
+ /* discovery frame types */
+ WL_NAN_FRM_TYPE_PUBLISH = 1,
+ WL_NAN_FRM_TYPE_SUBSCRIBE = 2,
+ WL_NAN_FRM_TYPE_FOLLOWUP = 3,
+
+ /* datapath frame types */
+ WL_NAN_FRM_TYPE_DP_REQ = 4,
+ WL_NAN_FRM_TYPE_DP_RESP = 5,
+ WL_NAN_FRM_TYPE_DP_CONF = 6,
+ WL_NAN_FRM_TYPE_DP_INSTALL = 7,
+ WL_NAN_FRM_TYPE_DP_END = 8,
+
+ /* schedule frame types */
+ WL_NAN_FRM_TYPE_SCHED_REQ = 9,
+ WL_NAN_FRM_TYPE_SCHED_RESP = 10,
+ WL_NAN_FRM_TYPE_SCHED_CONF = 11,
+ WL_NAN_FRM_TYPE_SCHED_UPD = 12,
+
+ /* ranging frame types */
+ WL_NAN_FRM_TYPE_RNG_REQ = 13,
+ WL_NAN_FRM_TYPE_RNG_RESP = 14,
+ WL_NAN_FRM_TYPE_RNG_TERM = 15,
+ WL_NAN_FRM_TYPE_RNG_REPORT = 16,
+
+ WL_NAN_FRM_TYPE_UNSOLICIT_SDF = 17,
+ WL_NAN_FRM_TYPE_INVALID
+};
+typedef uint8 wl_nan_frame_type_t;
+
+/* NAN Reason codes for tx status */
+enum wl_nan_txs_reason_codes {
+ WL_NAN_REASON_SUCCESS = 1, /* NAN status success */
+ WL_NAN_REASON_TIME_OUT = 2, /* timeout reached */
+ WL_NAN_REASON_DROPPED = 3, /* pkt dropped due to internal failure */
+ WL_NAN_REASON_MAX_RETRIES_DONE = 4 /* Max retries exceeded */
+};
+
+/* For NAN TX status */
+typedef struct wl_nan_event_txs {
+ uint8 status; /* For TX status, success or failure */
+ uint8 reason_code; /* to identify reason when status is failure */
+ uint16 host_seq; /* seq num to keep track of pkts sent by host */
+ uint8 type; /* wl_nan_frame_type_t */
+ uint8 pad;
+ uint16 opt_tlvs_len;
+ uint8 opt_tlvs[];
+} wl_nan_event_txs_t;
+
+/* SD transmit pkt's event status is sent as optional tlv in wl_nan_event_txs_t */
+typedef struct wl_nan_event_sd_txs {
+ uint8 inst_id; /* Publish or subscribe instance id */
+ uint8 req_id; /* Requestor instance id */
+} wl_nan_event_sd_txs_t;
+
+/* nanho fsm tlv WL_NAN_XTLV_NANHO_OOB_TXS(0x0b0a) */
+typedef struct wl_nan_event_nanho_txs {
+ uint32 fsm_id; /* nho fsm id */
+ uint16 seq_id; /* nho seq id */
+ uint16 pad;
+} wl_nan_event_nanho_txs_t;
+
+/* Subscribe or Publish instance Terminated */
+
+/* WL_NAN_EVENT_TERMINATED */
+
+#define NAN_SD_TERM_REASON_TIMEOUT 1
+#define NAN_SD_TERM_REASON_HOSTREQ 2
+#define NAN_SD_TERM_REASON_FWTERM 3
+#define NAN_SD_TERM_REASON_FAIL 4
+
+typedef struct wl_nan_ev_terminated {
+ uint8 instance_id; /* publish / subscribe instance id */
+ uint8 reason; /* 1=timeout, 2=Host/IOVAR, 3=FW Terminated 4=Failure */
+ uint8 svctype; /* 0 - Publish, 0x1 - Subscribe */
+ uint8 pad; /* Align */
+ uint32 tx_cnt; /* Number of SDFs sent */
+} wl_nan_ev_terminated_t;
+
+/* Follow up received against a pub / subscr */
+/* WL_NAN_EVENT_RECEIVE */
+
+typedef struct wl_nan_ev_receive {
+ struct ether_addr remote_addr; /* Peer NAN device MAC */
+ uint8 local_id; /* Local subscribe or publish ID */
+ uint8 remote_id; /* Remote subscribe or publish ID */
+ int8 fup_rssi;
+ uint8 attr_num;
+ uint16 attr_list_len; /* sizeof attributes attached to payload */
+ uint8 attr_list[0]; /* attributes payload */
+} wl_nan_ev_receive_t;
+
+/* WL_NAN_EVENT_DISC_CACHE_TIMEOUT */
+#define WL_NAN_DISC_CACHE_EXPIRY_ENTRIES_MAX 8
+
+typedef struct wl_nan_disc_expired_cache_entry {
+ uint8 l_sub_id; /* local sub instance_id */
+ uint8 r_pub_id; /* remote-matched pub instance_id */
+ struct ether_addr r_nmi_addr; /* remote-matched pub nmi addr */
+} wl_nan_disc_expired_cache_entry_t;
+
+typedef struct wl_nan_ev_disc_cache_timeout {
+ uint16 count; /* no. of expired cache entries */
+ uint16 pad;
+ wl_nan_disc_expired_cache_entry_t cache_exp_list[];
+} wl_nan_ev_disc_cache_timeout_t;
+
+/* For NAN event mask extention */
+#define WL_NAN_EVMASK_EXTN_VER 1
+#define WL_NAN_EVMASK_EXTN_LEN 16 /* 16*8 = 128 masks supported */
+
+typedef struct wl_nan_event_extn {
+ uint8 ver;
+ uint8 pad;
+ uint16 len;
+ uint8 evmask[];
+} wl_nan_evmask_extn_t;
+
+/* WL_NAN_XTLV_DATA_DP_TXS */
+
+typedef struct wl_nan_data_dp_txs {
+ uint8 ndp_id;
+ uint8 pad;
+ struct ether_addr indi; /* initiator ndi */
+} wl_nan_data_dp_txs_t;
+
+/* WL_NAN_XTLV_RNG_TXS */
+
+typedef struct wl_nan_range_txs {
+ uint8 range_id;
+ uint8 pad[3];
+} wl_nan_range_txs_t;
+
+#define NAN_MAX_BANDS 2
+
+enum wl_nan_oob_af_txs_reason_code {
+ WL_NAN_OOB_AF_TX_REASON_BMAP = 0x01, /* Bitmap schedule */
+ WL_NAN_OOB_AF_TX_REASON_TIMEOUT = 0x02, /* OOB AF session timeout */
+ WL_NAN_OOB_AF_TX_REASON_DISABLED= 0x03, /* OOB disabled while it was scheduled */
+};
+
+/* WL_NAN_EVENT_OOB_AF_TXS event sent to host after NAN OOB AF transmit attempted */
+typedef struct wl_nan_event_oob_af_txs {
+ uint8 result; /* For TX status, success or failure */
+ uint8 reason; /* wl_nan_oob_af_txs_reason_code */
+ uint16 token; /* Token from the host */
+} wl_nan_event_oob_af_txs_t;
+
+/* WL_NAN_EVENT_OOB_AF_TXS status */
+#define NAN_OOB_AF_TXS_ACK 0x1
+#define NAN_OOB_AF_TXS_NO_ACK 0x2
+#define NAN_OOB_AF_TXS_EXPIRED 0x3
+#define NAN_OOB_AF_TXS_DISABLED 0x4
+
+/* WL_NAN_EVENT_OOB_AF_RX event sent to host upon receiving a NAN OOB AF frame */
+typedef struct wl_nan_event_oob_af_rx {
+ struct ether_addr sa;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ uint16 payload_len;
+ uint8 payload[]; /* AF hdr + NAN attrbutes in TLV format */
+} wl_nan_event_oob_af_rx_t;
+
+#define WL_NAN_CFG_OOB_AF_BODY_MAX_SIZE 1024u /* Max size of AF payload */
+#define WL_NAN_CFG_OOB_AF_MAX_TIMEOUT 8000u /* 8 Sec. Max timeout(in ms) for OOB AF session */
+
+/* Max size of the NAN OOB AF information in the IOVAR */
+#define WL_NAN_CFG_OOB_AF_MAX_INFO_SIZE \
+ (sizeof(bcm_xtlv_t) + sizeof(wl_nan_oob_af_t) + \
+ WL_NAN_CFG_OOB_AF_BODY_MAX_SIZE)
+/*
+ * TLVs - Below XTLV definitions will be deprecated
+ * in due course (soon as all other branches update
+ * to the comp ID based XTLVs listed below).
+ */
+enum wl_nan_cmd_xtlv_id {
+ WL_NAN_XTLV_MAC_ADDR = 0x120,
+ WL_NAN_XTLV_MATCH_RX = 0x121,
+ WL_NAN_XTLV_MATCH_TX = 0x122,
+ WL_NAN_XTLV_SVC_INFO = 0x123,
+ WL_NAN_XTLV_SVC_NAME = 0x124,
+ WL_NAN_XTLV_SR_FILTER = 0x125,
+ WL_NAN_XTLV_FOLLOWUP = 0x126,
+ WL_NAN_XTLV_SVC_LIFE_COUNT = 0x127,
+ WL_NAN_XTLV_AVAIL = 0x128,
+ WL_NAN_XTLV_SDF_RX = 0x129,
+ WL_NAN_XTLV_SDE_CONTROL = 0x12a,
+ WL_NAN_XTLV_SDE_RANGE_LIMIT = 0x12b,
+ WL_NAN_XTLV_NAN_AF = 0x12c,
+ WL_NAN_XTLV_SD_TERMINATE = 0x12d,
+ WL_NAN_XTLV_CLUSTER_ID = 0x12e,
+ WL_NAN_XTLV_PEER_RSSI = 0x12f,
+ WL_NAN_XTLV_BCN_RX = 0x130,
+ WL_NAN_XTLV_REPLIED = 0x131, /* Publish sent for a subscribe */
+ WL_NAN_XTLV_RECEIVED = 0x132, /* FUP Received */
+ WL_NAN_XTLV_DISC_RESULTS = 0x133, /* Discovery results */
+ WL_NAN_XTLV_TXS = 0x134 /* TX status */
+};
+
+#define WL_NAN_CMD_GLOBAL 0x00
+#define WL_NAN_CMD_CFG_COMP_ID 0x01
+#define WL_NAN_CMD_ELECTION_COMP_ID 0x02
+#define WL_NAN_CMD_SD_COMP_ID 0x03
+#define WL_NAN_CMD_SYNC_COMP_ID 0x04
+#define WL_NAN_CMD_DATA_COMP_ID 0x05
+#define WL_NAN_CMD_DAM_COMP_ID 0x06
+#define WL_NAN_CMD_RANGE_COMP_ID 0x07
+#define WL_NAN_CMD_GENERIC_COMP_ID 0x08
+#define WL_NAN_CMD_SCHED_COMP_ID 0x09
+#define WL_NAN_CMD_NSR_COMP_ID 0x0a /* NAN Save Restore */
+#define WL_NAN_CMD_NANHO_COMP_ID 0x0b /* NAN Host offload */
+#define WL_NAN_CMD_DBG_COMP_ID 0x0f
+
+#define WL_NAN_CMD_COMP_SHIFT 8
+#define NAN_CMD(x, y) (((x) << WL_NAN_CMD_COMP_SHIFT) | (y))
+
+/*
+ * Module based NAN TLV IDs
+ */
+typedef enum wl_nan_tlv {
+
+ WL_NAN_XTLV_CFG_MATCH_RX = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01),
+ WL_NAN_XTLV_CFG_MATCH_TX = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02),
+ WL_NAN_XTLV_CFG_SR_FILTER = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03),
+ WL_NAN_XTLV_CFG_SVC_NAME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04),
+ WL_NAN_XTLV_CFG_NAN_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05),
+ WL_NAN_XTLV_CFG_SVC_LIFE_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06),
+ WL_NAN_XTLV_CFG_SVC_HASH = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07),
+ WL_NAN_XTLV_CFG_SEC_CSID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), /* Security CSID */
+ WL_NAN_XTLV_CFG_SEC_PMK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), /* Security PMK */
+ WL_NAN_XTLV_CFG_SEC_PMKID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A),
+ WL_NAN_XTLV_CFG_SEC_SCID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B),
+ WL_NAN_XTLV_CFG_VNDR_PAYLOAD = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C),
+ WL_NAN_XTLV_CFG_HOST_INDPID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D),
+ /* when host ndpid is used */
+ WL_NAN_XTLV_CFG_MAC_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E),
+ /* fast disc time bitmap config */
+ WL_NAN_XTLV_CFG_FDISC_TBMP = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F),
+
+ WL_NAN_XTLV_SD_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
+ WL_NAN_XTLV_SD_FOLLOWUP = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02),
+ WL_NAN_XTLV_SD_SDF_RX = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03),
+ WL_NAN_XTLV_SD_SDE_CONTROL = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04),
+ WL_NAN_XTLV_SD_SDE_RANGE_LIMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05),
+ WL_NAN_XTLV_SD_NAN_AF = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06),
+ WL_NAN_XTLV_SD_TERM = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07),
+ WL_NAN_XTLV_SD_REPLIED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), /* Pub sent */
+ WL_NAN_XTLV_SD_FUP_RECEIVED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), /* FUP Received */
+ WL_NAN_XTLV_SD_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), /* Pub RX */
+ WL_NAN_XTLV_SD_TXS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B), /* Tx status */
+ WL_NAN_XTLV_SD_SDE_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
+ WL_NAN_XTLV_SD_SDE_SVC_UPD_IND = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
+ WL_NAN_XTLV_SD_SVC_NDI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E),
+ WL_NAN_XTLV_SD_NDP_SPEC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
+ WL_NAN_XTLV_SD_NDPE_TLV_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x10),
+ WL_NAN_XTLV_SD_NDL_QOS_UPD = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x11),
+ WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x12),
+ WL_NAN_XTLV_SD_PEER_NMI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x13),
+
+ WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
+ WL_NAN_XTLV_EV_MR_CHANGED = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02),
+
+ WL_NAN_XTLV_DATA_DP_END = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
+ WL_NAN_XTLV_DATA_DP_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
+ WL_NAN_XTLV_DATA_DP_SEC_INST = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
+ WL_NAN_XTLV_DATA_DP_TXS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04), /* txs for dp */
+ WL_NAN_XTLV_DATA_DP_OPAQUE_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05),
+ WL_NAN_XTLV_RANGE_INFO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01),
+ WL_NAN_XTLV_RNG_TXS = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
+
+ WL_NAN_XTLV_EV_SLOT_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
+ WL_NAN_XTLV_EV_GEN_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02),
+ WL_NAN_XTLV_CCA_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03),
+ WL_NAN_XTLV_PER_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04),
+ WL_NAN_XTLV_CHBOUND_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05),
+ WL_NAN_XTLV_SLOT_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06),
+
+ WL_NAN_XTLV_DAM_NA_ATTR = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), /* na attr */
+ WL_NAN_XTLV_HOST_ASSIST_REQ = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02), /* host assist */
+
+ /* wl_nan_fw_cap_t */
+ WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
+ /* wl_nan_fw_cap_v2_t */
+ WL_NAN_XTLV_GEN_FW_CAP_V2 = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
+
+ /* NAN OOB AF tlv */
+ WL_NAN_XTLV_OOB_AF = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x03),
+
+ WL_NAN_XTLV_SCHED_INFO = NAN_CMD(WL_NAN_CMD_SCHED_COMP_ID, 0x01),
+
+ /* Nan Save-Restore XTLVs */
+ WL_NAN_XTLV_NSR2_PEER = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x21),
+ WL_NAN_XTLV_NSR2_NDP = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x22),
+
+ /* Host offload XTLVs */
+ WL_NAN_XTLV_NANHO_PEER_ENTRY = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
+ WL_NAN_XTLV_NANHO_DCAPLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02),
+ WL_NAN_XTLV_NANHO_DCSLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03),
+ WL_NAN_XTLV_NANHO_BLOB = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
+ WL_NAN_XTLV_NANHO_NDP_STATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
+ WL_NAN_XTLV_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x06),
+ WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07),
+ WL_NAN_XTLV_NANHO_LOG_ERR_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x08),
+ WL_NAN_XTLV_NANHO_LOG_DBG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x09),
+ WL_NAN_XTLV_NANHO_OOB_TXS = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0A),
+ WL_NAN_XTLV_NANHO_DCAP_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0B),
+ WL_NAN_XTLV_NANHO_ELEM_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0C),
+ WL_NAN_XTLV_NANHO_SEC_SA = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0D)
+} wl_nan_tlv_t;
+
+/* Sub Module ID's for NAN */
+enum {
+ NAN_MAC = 0, /* nan mac */
+ NAN_DISC = 1, /* nan discovery */
+ NAN_DBG = 2, /* nan debug */
+ NAN_SCHED = 3, /* nan sched */
+ NAN_PEER_ENTRY = 4, /* nan peer entry */
+ NAN_AVAIL = 5, /* nan avail */
+ NAN_DAM = 6, /* nan dam */
+ NAN_FSM = 7, /* nan fsm registry */
+ NAN_NDP = 8, /* nan ndp */
+ NAN_NDL = 9, /* nan ndl */
+ NAN_DP = 10, /* nan dp core */
+ NAN_RNG = 11, /* nan ranging */
+ NAN_SEC = 12, /* nan sec */
+ NAN_LAST = 13
+};
+
+enum wl_nan_sub_cmd_xtlv_id {
+
+ /* Special command - Tag zero */
+ WL_NAN_CMD_GLB_NAN_VER = NAN_CMD(WL_NAN_CMD_GLOBAL, 0x00),
+
+ /* nan cfg sub-commands */
+
+ WL_NAN_CMD_CFG_NAN_INIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01),
+ WL_NAN_CMD_CFG_ROLE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02),
+ WL_NAN_CMD_CFG_HOP_CNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03),
+ WL_NAN_CMD_CFG_HOP_LIMIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04),
+ WL_NAN_CMD_CFG_WARMUP_TIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05),
+ WL_NAN_CMD_CFG_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06),
+ WL_NAN_CMD_CFG_OUI = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07),
+ WL_NAN_CMD_CFG_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08),
+ WL_NAN_CMD_CFG_CLEARCOUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09),
+ WL_NAN_CMD_CFG_CHANNEL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A),
+ WL_NAN_CMD_CFG_BAND = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B),
+ WL_NAN_CMD_CFG_CID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C),
+ WL_NAN_CMD_CFG_IF_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D),
+ WL_NAN_CMD_CFG_BCN_INTERVAL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E),
+ WL_NAN_CMD_CFG_SDF_TXTIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F),
+ WL_NAN_CMD_CFG_SID_BEACON = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x10),
+ WL_NAN_CMD_CFG_DW_LEN = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x11),
+ WL_NAN_CMD_CFG_AVAIL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x12),
+ WL_NAN_CMD_CFG_WFA_TM = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x13),
+ WL_NAN_CMD_CFG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x14),
+ WL_NAN_CMD_CFG_NAN_CONFIG = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x15), /* ctrl */
+ WL_NAN_CMD_CFG_NAN_ENAB = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x16),
+ WL_NAN_CMD_CFG_ULW = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x17),
+ WL_NAN_CMD_CFG_NAN_CONFIG2 = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x18), /* ctrl2 */
+ WL_NAN_CMD_CFG_DEV_CAP = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x19),
+ WL_NAN_CMD_CFG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1A),
+ WL_NAN_CMD_CFG_VNDR_PAYLOAD = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1B),
+ WL_NAN_CMD_CFG_FASTDISC = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1C),
+ WL_NAN_CMD_CFG_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1D),
+ WL_NAN_CMD_CFG_FSM_TIMEOUT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1E),
+ WL_NAN_CMD_CFG_MAX = WL_NAN_CMD_CFG_FSM_TIMEOUT,
+
+ /* Add new commands before and update */
+
+ /* nan election sub-commands */
+ WL_NAN_CMD_ELECTION_HOST_ENABLE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x01),
+ WL_NAN_CMD_ELECTION_METRICS_CONFIG = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x02),
+ WL_NAN_CMD_ELECTION_METRICS_STATE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03),
+ WL_NAN_CMD_ELECTION_LEAVE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03),
+ WL_NAN_CMD_ELECTION_MERGE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04),
+ WL_NAN_CMD_ELECTION_ADVERTISERS = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x05),
+ WL_NAN_CMD_ELECTION_RSSI_THRESHOLD = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x06),
+ WL_NAN_CMD_ELECTION_MAX = WL_NAN_CMD_ELECTION_RSSI_THRESHOLD,
+ /* New commands go before and update */
+
+ /* nan SD sub-commands */
+ WL_NAN_CMD_SD_PARAMS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
+ WL_NAN_CMD_SD_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02),
+ WL_NAN_CMD_SD_PUBLISH_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03),
+ WL_NAN_CMD_SD_CANCEL_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04),
+ WL_NAN_CMD_SD_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05),
+ WL_NAN_CMD_SD_SUBSCRIBE_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06),
+ WL_NAN_CMD_SD_CANCEL_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07),
+ WL_NAN_CMD_SD_VND_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08),
+ WL_NAN_CMD_SD_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09),
+ WL_NAN_CMD_SD_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A),
+ WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B),
+ WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
+ WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
+ WL_NAN_CMD_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E),
+ WL_NAN_CMD_SD_DISC_CACHE_CLEAR = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
+ WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_DISC_CACHE_CLEAR,
+
+ /* nan time sync sub-commands */
+
+ WL_NAN_CMD_SYNC_SOCIAL_CHAN = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
+ WL_NAN_CMD_SYNC_AWAKE_DWS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02),
+ WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x03),
+ WL_NAN_CMD_SYNC_MAX = WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD,
+
+ /* nan2 commands */
+ WL_NAN_CMD_DATA_CONFIG = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
+ WL_NAN_CMD_DATA_RSVD02 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
+ WL_NAN_CMD_DATA_RSVD03 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
+ WL_NAN_CMD_DATA_DATAREQ = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04),
+ WL_NAN_CMD_DATA_DATARESP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05),
+ WL_NAN_CMD_DATA_DATAEND = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x06),
+ WL_NAN_CMD_DATA_SCHEDUPD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x07),
+ WL_NAN_CMD_DATA_RSVD08 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x08),
+ WL_NAN_CMD_DATA_CAP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x9),
+ WL_NAN_CMD_DATA_STATUS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0A),
+ WL_NAN_CMD_DATA_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0B),
+ WL_NAN_CMD_DATA_RSVD0C = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0C),
+ WL_NAN_CMD_DATA_NDP_SHOW = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0D),
+ WL_NAN_CMD_DATA_DATACONF = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0E),
+ WL_NAN_CMD_DATA_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0F),
+ WL_NAN_CMD_DATA_MAX_PEERS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x10),
+ WL_NAN_CMD_DATA_DP_IDLE_PERIOD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x11),
+ WL_NAN_CMD_DATA_DP_OPAQUE_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x12),
+ WL_NAN_CMD_DATA_DP_HB_DURATION = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x13),
+ WL_NAN_CMD_DATA_DATAEND_V2 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x14),
+ WL_NAN_CMD_DATA_PATH_MAX = WL_NAN_CMD_DATA_DATAEND_V2, /* New ones before and update */
+
+ /* nan dam sub-commands */
+ WL_NAN_CMD_DAM_CFG = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01),
+ WL_NAN_CMD_DAM_MAX = WL_NAN_CMD_DAM_CFG, /* New ones before and update */
+
+ /* nan2.0 ranging commands */
+ WL_NAN_CMD_RANGE_REQUEST = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01),
+ WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
+ WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03),
+ WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04),
+ WL_NAN_CMD_RANGE_IDLE_COUNT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x05),
+ WL_NAN_CMD_RANGE_CANCEL_EXT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x06),
+
+ /* nan debug sub-commands */
+ WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
+ WL_NAN_CMD_DBG_SCAN = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02),
+ WL_NAN_CMD_DBG_SCAN_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03),
+ /* This is now moved under CFG */
+ WL_NAN_CMD_DBG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04),
+ WL_NAN_CMD_DBG_EVENT_CHECK = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05),
+ WL_NAN_CMD_DBG_DUMP = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06),
+ WL_NAN_CMD_DBG_CLEAR = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x07),
+ WL_NAN_CMD_DBG_RSSI = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x08),
+ WL_NAN_CMD_DBG_DEBUG = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x09),
+ WL_NAN_CMD_DBG_TEST1 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0A),
+ WL_NAN_CMD_DBG_TEST2 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0B),
+ WL_NAN_CMD_DBG_TEST3 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0C),
+ WL_NAN_CMD_DBG_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0D),
+ WL_NAN_CMD_DBG_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0E),
+ WL_NAN_CMD_DBG_LEVEL = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0F),
+ WL_NAN_CMD_DBG_MAX = WL_NAN_CMD_DBG_LEVEL, /* New ones before and update */
+
+ /* Generic componenet */
+ WL_NAN_CMD_GEN_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
+ WL_NAN_CMD_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
+ WL_NAN_CMD_CFG_OOB_AF = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x03),
+ WL_NAN_CMD_GEN_MAX = WL_NAN_CMD_CFG_OOB_AF,
+
+ /* NAN Save-Restore */
+ WL_NAN_CMD_NSR2 = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x20),
+ WL_NAN_CMD_NSR2_MAX = WL_NAN_CMD_NSR2,
+
+ /* Host offload sub-commands */
+ WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), /* WILL BE REMOVED */
+ WL_NAN_CMD_NANHO_INFO = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
+ WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), /* unused */
+ WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), /* unused */
+ WL_NAN_CMD_NANHO_LOG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
+ WL_NAN_CMD_NANHO_VER = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
+ WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_VER,
+
+ /* Add submodules below, and update WL_NAN_CMD_MAX */
+
+ /* ROML check for this enum should use WL_NAN_CMD_MAX */
+ WL_NAN_CMD_MAX = WL_NAN_CMD_NANHO_MAX
+};
+
+/*
+ * Component/Module based NAN TLV IDs for NAN stats
+ */
+typedef enum wl_nan_stats_tlv {
+ WL_NAN_XTLV_SYNC_MAC_STATS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
+
+ WL_NAN_XTLV_SD_DISC_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
+
+ WL_NAN_XTLV_DATA_NDP_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
+ WL_NAN_XTLV_DATA_NDL_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
+ WL_NAN_XTLV_DATA_SEC_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
+
+ WL_NAN_XTLV_GEN_SCHED_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
+ WL_NAN_XTLV_GEN_PEER_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
+ WL_NAN_XTLV_GEN_PEER_STATS_DEVCAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x03),
+ WL_NAN_XTLV_GEN_PEER_STATS_NDP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x04),
+ WL_NAN_XTLV_GEN_PEER_STATS_SCHED = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x05),
+ WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x06),
+ WL_NAN_XTLV_GEN_NDP_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x07),
+ WL_NAN_XTLV_GEN_PARSE_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x08),
+
+ WL_NAN_XTLV_DAM_STATS = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01),
+ WL_NAN_XTLV_DAM_AVAIL_STATS = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02),
+
+ WL_NAN_XTLV_RANGE_STATS = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01)
+} wl_nan_stats_tlv_t;
+
+/* NAN stats WL_NAN_CMD_GEN_STATS command */
+/* Input data */
+typedef struct wl_nan_cmn_get_stat {
+ uint32 modules_btmap; /* Bitmap to indicate module stats are needed:
+ * See NAN Sub Module ID's above
+ */
+ uint8 operation; /* Get, Get and Clear */
+ uint8 arg1; /* Submodule control variable1 */
+ uint8 arg2; /* Submodule control variable2 */
+ uint8 pad; /* May not be needed as TLV's are aligned,add to pass compile chk */
+} wl_nan_cmn_get_stat_t;
+
+/* Output for Stats container */
+typedef struct wl_nan_cmn_stat {
+ uint32 n_stats; /* Number of different sub TLV stats present in the container */
+ uint32 totlen; /* Total Length of stats data in container */
+ uint8 stats_tlvs []; /* Stat TLV's container */
+} wl_nan_cmn_stat_t;
+
+/* Defines for operation */
+#define WLA_NAN_STATS_GET 0
+#define WLA_NAN_STATS_GET_CLEAR 1
+
+#define WL_NAN_STAT_ALL 0xFFFFFFFF
+
+/* NAN Mac stats */
+
+typedef struct wl_nan_mac_band_stats {
+ uint32 bcn_tx; /* 2g/5g disc/sync beacon tx count */
+ uint32 bcn_rx; /* 2g/5g disc/sync beacon rx count */
+ uint32 dws; /* Number of 2g/5g DW's */
+} wl_nan_mac_band_stats_t;
+
+/* Note: if this struct is changing update wl_nan_slot_ecounters_vX_t version,
+ * as this struct is sent as payload in wl_nan_slot_ecounter_vX_ts
+ */
+typedef struct wl_nan_mac_stats {
+ wl_nan_mac_band_stats_t band[NAN_MAX_BANDS]; /* MAC sync band specific stats */
+ uint32 naf_tx; /* NAN AF tx */
+ uint32 naf_rx; /* NAN AF rx */
+ uint32 sdf_tx; /* SDF tx */
+ uint32 sdf_rx; /* SDF rx */
+ uint32 cnt_sync_bcn_rx_tu[3]; /* delta bw */
+ uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */
+ uint32 cnt_role_am_dw; /* anchor master role due to dw */
+ uint32 cnt_am_hop_err; /* wrong hopcount set for AM */
+} wl_nan_mac_stats_t;
+
+typedef struct wl_nan_mac_stats_v1 {
+ wl_nan_mac_band_stats_t band[NAN_MAX_BANDS]; /* MAC sync band specific stats */
+ uint32 naf_tx; /* NAN AF tx */
+ uint32 naf_rx; /* NAN AF rx */
+ uint32 sdf_tx; /* SDF tx */
+ uint32 sdf_rx; /* SDF rx */
+ uint32 cnt_sync_bcn_rx_tu[3]; /* delta bw */
+ uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */
+ uint32 cnt_role_am_dw; /* anchor master role due to dw */
+ uint32 cnt_am_hop_err; /* wrong hopcount set for AM */
+ uint32 merge_scan_cnt_2g; /* 2G band merge scan cnt */
+ uint32 merge_scan_cnt_5g; /* 5G band merge scan cnt */
+} wl_nan_mac_stats_v1_t;
+
+/* NAN Sched stats */
+/* Per core Sched stats */
+typedef struct nan_sched_stats_core {
+ uint32 slotstart; /* slot_start */
+ uint32 slotend; /* slot_end */
+ uint32 slotskip; /* slot_skip */
+ uint32 slotstart_partial; /* slot resume */
+ uint32 slotend_partial; /* slot pre-empt */
+ uint8 avail_upd_cnt; /* count to track num of times avail has been updated */
+ uint8 pad[3];
+} nan_sched_stats_core_t;
+/* Common Sched stats */
+typedef struct nan_sched_stats_cmn {
+ uint32 slot_adj_dw; /* Slot adjusts due to DW changes */
+ uint32 slot_dur; /* Total slot duration in TU's */
+} nan_sched_stats_cmn_t;
+
+/* Note: if this struct is changing update wl_nan_slot_ecounters_vX_t version,
+ * as this struct is sent as payload in wl_nan_slot_ecounters_vX_t
+ */
+typedef struct nan_sched_stats {
+ nan_sched_stats_cmn_t cmn;
+ nan_sched_stats_core_t slice[MAX_NUM_D11CORES];
+} nan_sched_stats_t;
+/* End NAN Sched stats */
+
+/* NAN Discovery stats */
+typedef struct nan_disc_stats {
+ uint32 pub_tx; /* Publish tx */
+ uint32 pub_rx; /* Publish rx */
+ uint32 sub_tx; /* Subscribe tx */
+ uint32 sub_rx; /* Subscribe rx */
+ uint32 fup_tx; /* Followup tx */
+ uint32 fup_rx; /* Followup rx */
+ uint32 pub_resp_ignored; /* response to incoming publish ignored */
+ uint32 sub_resp_ignored; /* response to incoming subscribe ignored */
+} nan_disc_stats_t;
+/* NAN Discovery stats end */
+
+/* statistics for nan sec */
+typedef struct nan_sec_stats_s {
+ uint32 mic_fail; /* rx mic fail */
+ uint32 replay_fail; /* replay counter */
+ uint32 tx_fail; /* tx fail (from txstatus) */
+ uint32 key_info_err; /* key info field err */
+ uint32 ok_sessions; /* successful mx negotiations */
+ uint32 fail_sessions; /* failed sessions */
+ uint32 keydesc_err; /* key desc error */
+ uint32 invalid_cipher; /* cipher suite not valid */
+ uint32 pmk_not_found; /* no pmk found for given service or for any reason */
+ uint32 no_pmk_for_pmkid; /* no pmk found for give pmkid */
+ uint32 key_install_err; /* failed to install keys */
+ uint32 no_keydesc_attr; /* key desc attr missing */
+ uint32 nonce_mismatch; /* nonce mismatch */
+} nan_sec_stats_t;
+
+/* WL_NAN_XTLV_GEN_PEER_STATS */
+typedef struct wl_nan_peer_stats {
+ struct ether_addr nmi;
+ uint8 pad[2];
+ uint32 pkt_enq; /* counter for queued pkt of peer */
+
+ /* NDL */
+ bool ndl_exist;
+ uint8 ndl_state;
+ bool counter_proposed;
+ uint8 pad1;
+
+ /* NDL QoS */
+ uint16 local_max_latency;
+ uint16 peer_max_latency;
+ uint8 local_min_slots;
+ uint8 peer_min_slots;
+
+ /* security association */
+ struct ether_addr sec_laddr; /* local mac addr */
+ struct ether_addr sec_raddr; /* remote mac addr */
+ uint8 sec_csid;
+ uint8 pad2;
+} wl_nan_peer_stats_t;
+
+/* WL_NAN_XTLV_GEN_PEER_STATS_DEVCAP */
+typedef struct wl_nan_peer_stats_dev_cap {
+ uint8 mapid;
+ uint8 awake_dw_2g;
+ uint8 awake_dw_5g;
+ uint8 bands_supported;
+ uint8 op_mode;
+ uint8 num_antennas;
+ uint16 chan_switch_time;
+ uint8 capabilities;
+ uint8 pad[3];
+} wl_nan_peer_stats_dev_cap_t;
+
+/* WL_NAN_XTLV_GEN_PEER_STATS_NDP */
+typedef struct wl_nan_peer_stats_ndp {
+ uint8 peer_role;
+ uint8 ndp_state;
+ uint8 indp_id; /* initiator ndp id */
+ uint8 ndp_ctrl; /* ndp control field */
+ struct ether_addr peer_nmi;
+ struct ether_addr peer_ndi;
+ struct ether_addr local_ndi;
+
+ /* peer scb info */
+ bool scb_allocated;
+ bool scb_found;
+ uint32 scb_flags;
+ uint32 scb_flags2;
+ uint32 scb_flags3;
+} wl_nan_peer_stats_ndp_t;
+
+enum {
+ WL_NAN_SCHED_STAT_SLOT_COMM = 0x01, /* Committed slot */
+ WL_NAN_SCHED_STAT_SLOT_COND = 0x02, /* Conditional slot(proposal/counter) */
+ WL_NAN_SCHED_STAT_SLOT_NDC = 0x04, /* NDC slot */
+ WL_NAN_SCHED_STAT_SLOT_IMMUT = 0x08, /* Immutable slot */
+ WL_NAN_SCHED_STAT_SLOT_RANGE = 0x10, /* Ranging slot */
+};
+typedef uint16 wl_nan_stats_sched_slot_info_t;
+
+typedef struct wl_nan_stats_sched_slot {
+ wl_nan_stats_sched_slot_info_t info; /* capture slot type and more info */
+ chanspec_t chanspec;
+} wl_nan_stats_sched_slot_t;
+
+/* WL_NAN_XTLV_GEN_PEER_STATS_SCHED, WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED */
+typedef struct wl_nan_stats_sched {
+ uint8 map_id;
+ uint8 seq_id; /* seq id from NA attr */
+ uint8 slot_dur;
+ uint8 pad;
+ uint16 period;
+ uint16 num_slot;
+ wl_nan_stats_sched_slot_t slot[];
+} wl_nan_stats_sched_t;
+
+/* WL_NAN_XTLV_GEN_PEER_STATS_SCHED */
+typedef struct wl_nan_peer_stats_sched {
+ uint8 map_id;
+ uint8 seq_id; /* seq id from NA attr */
+ uint8 slot_dur;
+ uint8 pad;
+ uint16 period;
+ uint16 num_slot;
+ wl_nan_stats_sched_slot_t slot[];
+} wl_nan_peer_stats_sched_t;
+
+/* WL_NAN_XTLV_RANGE_STATS */
+typedef struct wl_nan_range_stats {
+ uint16 rng_ssn_estb;
+ uint16 rng_ssn_fail;
+ uint16 rng_sched_start;
+ uint16 rng_sched_end;
+ uint16 ftm_ssn_success; /* number of succesfull ftm sessions */
+ uint16 ftm_ssn_fail;
+ uint16 num_meas; /* number of ftm frames */
+ uint16 num_valid_meas; /* number of ftm frames with valid timestamp */
+} wl_nan_range_stats_t;
+
+/* defines for ndp stats flag */
+
+#define NAN_NDP_STATS_FLAG_ROLE_MASK 0x01
+#define NAN_NDP_STATS_FLAG_ROLE_INIT 0x00
+#define NAN_NDP_STATS_FLAG_ROLE_RESP 0x01
+
+#define NAN_NDP_STATS_STATE_BIT_SHIFT 1
+#define NAN_NDP_STATS_FLAG_STATE_MASK 0x07
+#define NAN_NDP_STATS_FLAG_STATE_IN_PROG 0x00
+#define NAN_NDP_STATS_FLAG_STATE_ESTB 0x01
+#define NAN_NDP_STATS_FLAG_STATE_TEARDOWN_WAIT 0x02
+/* More states can be added here, when needed */
+
+/* WL_NAN_XTLV_GEN_NDP_STATS */
+typedef struct wl_nan_ndp_stats_s {
+ uint8 ndp_id;
+ uint8 indp_id;
+ uint8 flags;
+ uint8 nan_sec_csid;
+ struct ether_addr lndi_addr;
+ struct ether_addr pnmi_addr;
+ struct ether_addr pndi_addr;
+ uint8 PAD[2];
+} wl_nan_ndp_stats_t;
+
+/* WL_NAN_XTLV_EV_SLOT_INFO */
+typedef struct wl_nan_slot_info_s {
+ /* dw slot start expected */
+ uint32 dwst_h;
+ uint32 dwst_l;
+ /* dw slot start actual */
+ uint32 act_dwst_h;
+ uint32 act_dwst_l;
+ uint16 cur_chan[MAX_NUM_D11CORES]; /* sdb channels */
+ uint16 dw_chan; /* dw channel */
+ uint8 dw_no; /* dw number */
+ uint8 slot_seq_no; /* slot seq no. */
+} wl_nan_slot_info_t;
+
+/* WL_NAN_XTLV_DAM_STATS */
+typedef struct wl_nan_dam_stats {
+ uint32 cnt_rej_crb_inuse; /* DAM rejections because of crb in use already */
+} wl_nan_dam_stats_t;
+
+/* WL_NAN_EVENT_MR_CHANGED */
+typedef uint8 wl_nan_mr_changed_t;
+#define WL_NAN_AMR_CHANGED 1
+#define WL_NAN_IMR_CHANGED 2
+
+/*
+ * The macro BCMUTILS_ERR_CODES is defined only
+ * when using the common header file(a new approach) bcmutils/include/bcmerror.h.
+ * Otherwise, use the error codes from this file.
+ */
+#ifndef BCMUTILS_ERR_CODES
+
+/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */
+enum {
+ /* add new status here... */
+ WL_NAN_E_INVALID_TOKEN = -2135, /* invalid token or mismatch */
+ WL_NAN_E_INVALID_ATTR = -2134, /* generic invalid attr error */
+ WL_NAN_E_INVALID_NDL_ATTR = -2133, /* invalid NDL attribute */
+ WL_NAN_E_SCB_NORESOURCE = -2132, /* no more peer scb available */
+ WL_NAN_E_PEER_NOTAVAIL = -2131,
+ WL_NAN_E_SCB_EXISTS = -2130,
+ WL_NAN_E_INVALID_PEER_NDI = -2129,
+ WL_NAN_E_INVALID_LOCAL_NDI = -2128,
+ WL_NAN_E_ALREADY_EXISTS = -2127, /* generic NAN error for duplication */
+ WL_NAN_E_EXCEED_MAX_NUM_MAPS = -2126,
+ WL_NAN_E_INVALID_DEV_CHAN_SCHED = -2125,
+ WL_NAN_E_INVALID_PEER_BLOB_TYPE = -2124,
+ WL_NAN_E_INVALID_LCL_BLOB_TYPE = -2123,
+ WL_NAN_E_BCMC_PDPA = -2122, /* BCMC NAF PDPA */
+ WL_NAN_E_TIMEOUT = -2121,
+ WL_NAN_E_HOST_CFG = -2120,
+ WL_NAN_E_NO_ACK = -2119,
+ WL_NAN_E_SECINST_FAIL = -2118,
+ WL_NAN_E_REJECT_NDL = -2117, /* generic NDL rejection error */
+ WL_NAN_E_INVALID_NDP_ATTR = -2116,
+ WL_NAN_E_HOST_REJECTED = -2115,
+ WL_NAN_E_PCB_NORESOURCE = -2114,
+ WL_NAN_E_NDC_EXISTS = -2113,
+ WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2112,
+ WL_NAN_E_INVALID_NDC_ENTRY = -2111,
+ WL_NAN_E_SD_TX_LIST_FULL = -2110,
+ WL_NAN_E_SVC_SUB_LIST_FULL = -2109,
+ WL_NAN_E_SVC_PUB_LIST_FULL = -2108,
+ WL_NAN_E_SDF_MAX_LEN_EXCEEDED = -2107,
+ WL_NAN_E_ZERO_CRB = -2106, /* no CRB between local and peer */
+ WL_NAN_E_PEER_NDC_NOT_SELECTED = -2105, /* peer ndc not selected */
+ WL_NAN_E_DAM_CHAN_CONFLICT = -2104, /* dam schedule channel conflict */
+ WL_NAN_E_DAM_SCHED_PERIOD = -2103, /* dam schedule period mismatch */
+ WL_NAN_E_LCL_NDC_NOT_SELECTED = -2102, /* local selected ndc not configured */
+ WL_NAN_E_NDL_QOS_INVALID_NA = -2101, /* na doesn't comply with ndl qos */
+ WL_NAN_E_CLEAR_NAF_WITH_SA_AS_RNDI = -2100, /* rx clear naf with peer rndi */
+ WL_NAN_E_SEC_CLEAR_PKT = -2099, /* rx clear pkt from a peer with sec_sa */
+ WL_NAN_E_PROT_NON_PDPA_NAF = -2098, /* rx protected non PDPA frame */
+ WL_NAN_E_DAM_DOUBLE_REMOVE = -2097, /* remove peer schedule already removed */
+ WL_NAN_E_DAM_DOUBLE_MERGE = -2096, /* merge peer schedule already merged */
+ WL_NAN_E_DAM_REJECT_INVALID = -2095, /* reject for invalid schedule */
+ WL_NAN_E_DAM_REJECT_RANGE = -2094,
+ WL_NAN_E_DAM_REJECT_QOS = -2093,
+ WL_NAN_E_DAM_REJECT_NDC = -2092,
+ WL_NAN_E_DAM_REJECT_PEER_IMMUT = -2091,
+ WL_NAN_E_DAM_REJECT_LCL_IMMUT = -2090,
+ WL_NAN_E_DAM_EXCEED_NUM_SCHED = -2089,
+ WL_NAN_E_DAM_INVALID_SCHED_MAP = -2088, /* invalid schedule map list */
+ WL_NAN_E_DAM_INVALID_LCL_SCHED = -2087,
+ WL_NAN_E_INVALID_MAP_ID = -2086,
+ WL_NAN_E_CHAN_OVERLAP_ACROSS_MAP = -2085,
+ WL_NAN_E_INVALID_CHAN_LIST = -2084,
+ WL_NAN_E_INVALID_RANGE_TBMP = -2083,
+ WL_NAN_E_INVALID_IMMUT_SCHED = -2082,
+ WL_NAN_E_INVALID_NDC_ATTR = -2081,
+ WL_NAN_E_INVALID_TIME_BITMAP = -2080,
+ WL_NAN_E_INVALID_NA_ATTR = -2079,
+ WL_NAN_E_NO_NA_ATTR_IN_AVAIL_MAP = -2078, /* no na attr saved in avail map */
+ WL_NAN_E_INVALID_MAP_IDX = -2077,
+ WL_NAN_E_SEC_SA_NOTFOUND = -2076,
+ WL_NAN_E_BSSCFG_NOTFOUND = -2075,
+ WL_NAN_E_SCB_NOTFOUND = -2074,
+ WL_NAN_E_NCS_SK_KDESC_TYPE = -2073,
+ WL_NAN_E_NCS_SK_KEY_DESC_VER = -2072, /* key descr ver */
+ WL_NAN_E_NCS_SK_KEY_TYPE = -2071, /* key descr type */
+ WL_NAN_E_NCS_SK_KEYINFO_FAIL = -2070, /* key info (generic) */
+ WL_NAN_E_NCS_SK_KEY_LEN = -2069, /* key len */
+ WL_NAN_E_NCS_SK_KDESC_NOT_FOUND = -2068, /* key desc not found */
+ WL_NAN_E_NCS_SK_INVALID_PARAMS = -2067, /* invalid args */
+ WL_NAN_E_NCS_SK_KDESC_INVALID = -2066, /* key descr is not valid */
+ WL_NAN_E_NCS_SK_NONCE_MISMATCH = -2065,
+ WL_NAN_E_NCS_SK_KDATA_SAVE_FAIL = -2064, /* not able to save key data */
+ WL_NAN_E_NCS_SK_AUTH_TOKEN_CALC_FAIL = -2063,
+ WL_NAN_E_NCS_SK_PTK_CALC_FAIL = -2062,
+ WL_NAN_E_INVALID_STARTOFFSET = -2061,
+ WL_NAN_E_BAD_NA_ENTRY_TYPE = -2060,
+ WL_NAN_E_INVALID_CHANBMP = -2059,
+ WL_NAN_E_INVALID_OP_CLASS = -2058,
+ WL_NAN_E_NO_IES = -2057,
+ WL_NAN_E_NO_PEER_ENTRY_AVAIL = -2056,
+ WL_NAN_E_INVALID_PEER = -2055,
+ WL_NAN_E_PEER_EXISTS = -2054,
+ WL_NAN_E_PEER_NOTFOUND = -2053,
+ WL_NAN_E_NO_MEM = -2052,
+ WL_NAN_E_INVALID_OPTION = -2051,
+ WL_NAN_E_INVALID_BAND = -2050,
+ WL_NAN_E_INVALID_MAC = -2049,
+ WL_NAN_E_BAD_INSTANCE = -2048,
+ /* NAN status code reserved from -2048 to -3071 */
+ /* Do NOT add new status below -2048 */
+ WL_NAN_E_ERROR = -1,
+ WL_NAN_E_OK = 0
+};
+
+#endif /* BCMUTILS_ERR_CODES */
+
+/* Error codes used in vendor specific attribute in Data Path Termination frames */
+enum {
+ WL_NAN_DPEND_E_OK = 0,
+ WL_NAN_DPEND_E_ERROR = 1,
+ WL_NAN_DPEND_E_HOST_CMD = 2,
+ WL_NAN_DPEND_E_HOST_REJECTED = 3, /* host rejected rx frame */
+ WL_NAN_DPEND_E_RESOURCE_LIMIT = 4,
+ WL_NAN_DPEND_E_NO_ACK_RCV = 5,
+ WL_NAN_DPEND_E_TIMEOUT = 6,
+ WL_NAN_DPEND_E_NO_ELT = 7, /* rx frame missing element container */
+ WL_NAN_DPEND_E_NO_NDP_ATTR = 8,
+ WL_NAN_DPEND_E_NO_AVAIL_ATTR = 9,
+ WL_NAN_DPEND_E_NO_NDC_ATTR = 10,
+ WL_NAN_DPEND_E_NO_RANGE_BM = 11,
+ WL_NAN_DPEND_E_INVALID_NDP_ATTR = 12,
+ WL_NAN_DPEND_E_INVALID_NDC_ATTR = 13,
+ WL_NAN_DPEND_E_INVALID_IMMUT = 14,
+ WL_NAN_DPEND_E_INVALID_NDL_QOS = 15,
+ WL_NAN_DPEND_E_INVALID_SEC_PARAMS = 16,
+ WL_NAN_DPEND_E_REJECT_AVAIL = 17,
+ WL_NAN_DPEND_E_REJECT_NDL = 18,
+ WL_NAN_DPEND_E_SCB_NORESOURCE = 19
+};
+
+typedef int32 wl_nan_status_t;
+
+/** nan cmd list entry */
+enum wl_nan_sub_cmd_input_flags {
+ WL_NAN_SUB_CMD_FLAG_NONE = 0,
+ WL_NAN_SUB_CMD_FLAG_SKIP = 1, /* Skip to next sub-command on error */
+ WL_NAN_SUB_CMD_FLAG_TERMINATE = 2, /* Terminate processing and return */
+ WL_NAN_SUB_CMD_FLAG_LAST /* Keep this at the end */
+};
+
+/** container for nan events */
+typedef struct wl_nan_ioc {
+ uint16 version; /**< interface command or event version */
+ uint16 id; /**< nan ioctl cmd ID */
+ uint16 len; /**< total length of all tlv records in data[] */
+ uint16 pad; /**< pad to be 32 bit aligment */
+ uint8 data []; /**< var len payload of bcm_xtlv_t type */
+} wl_nan_ioc_t;
+
+/*
+ * NAN sub-command data structures
+ */
+
+/*
+ * Config component WL_NAN_CMD_CFG_XXXX sub-commands
+ * WL_NAN_CMD_CFG_ENABLE
+ */
+enum wl_nan_config_state {
+ WL_NAN_CONFIG_STATE_DISABLE = 0,
+ WL_NAN_CONFIG_STATE_ENABLE = 1
+};
+
+typedef int8 wl_nan_config_state_t;
+
+/* WL_NAN_CMD_CFG_NAN_INIT */
+
+typedef uint8 wl_nan_init_t;
+
+/* WL_NAN_CMD_CFG_NAN_VERSION */
+typedef uint16 wl_nan_ver_t;
+
+/* WL_NAN_CMD_CFG_NAN_CONFIG */
+typedef uint32 wl_nan_cfg_ctrl_t;
+
+/* WL_NAN_CMD_CFG_NAN_CONFIG2 */
+typedef struct wl_nan_cfg_ctrl2 {
+ uint32 flags1; /* wl_nan_cfg_ctrl2_flags1 */
+ uint32 flags2; /* wl_nan_cfg_ctrl2_flags2 */
+} wl_nan_cfg_ctrl2_t;
+
+enum wl_nan_cfg_ctrl2_flags1 {
+ /* Allows unicast SDF TX while local device is under NDP/NDL negotiation,
+ * but Not with the peer SDF destined to.
+ */
+ WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_UCAST_IN_PROG = 0x00000001,
+ /* Allows broadcast SDF TX while local device is under NDP/NDL negotiation */
+ WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_BCAST_IN_PROG = 0x00000002,
+ /* Allows the device to send schedule update automatically on local schedule change */
+ WL_NAN_CTRL2_FLAG1_AUTO_SCHEDUPD = 0x00000004,
+ /* Allows the device to handle slot pre_close operations */
+ WL_NAN_CTRL2_FLAG1_SLOT_PRE_CLOSE = 0x00000008,
+ /* Control flag to enable/disable NDPE capability */
+ WL_NAN_CTRL2_FLAG1_NDPE_CAP = 0x000000010,
+ /* Control flag to enable/disable AUTO DAM LWT mode */
+ WL_NAN_CTRL2_FLAG1_AUTODAM_LWT_MODE = 0x000000020,
+ /* Control flag to enable/disable PMK per NDP */
+ WL_NAN_CTRL2_FLAG1_PMK_PER_NDP = 0x000000040,
+ /* Control flag to enable/disable allowing clear Schedule Update on Secured connection */
+ WL_NAN_CTRL2_FLAG1_SEC_ALLOW_CLEAR_SCHED_UPD_PKT = 0x000000080,
+ /* Control flag to disable/enable 5G FAW */
+ WL_NAN_CTRL2_FLAG1_DISABLE_5G_FAW = 0x000000100,
+ /* Control flag to disable/enable AUTO DAM 6G CAP */
+ WL_NAN_CTRL2_FLAG1_DISABLE_AUTODAM_6G_CAP = 0x000000200,
+ /* Control flag to disable/enable allowing of unsecured OOB AF in a secured connection */
+ WL_NAN_CTRL2_FLAG1_ALLOW_UNSECURED_OOB_AF = 0x000000400,
+ /* Control flag to enable/disable 6G FULL avail */
+ WL_NAN_CTRL2_FLAG1_6G_FULL_AVAIL = 0x000000800
+};
+#define WL_NAN_CTRL2_FLAGS1_MASK 0x00000FFF
+
+#define WL_NAN_CTRL2_FLAGS2_MASK 0x00000000
+
+/*
+ * WL_NAN_CMD_CFG_BAND, WL_NAN_CMD_CFG_RSSI_THRESHOLD(Get only)
+ */
+typedef uint8 wl_nan_band_t;
+
+/*
+ * WL_NAN_CMD_CFG_ROLE
+ */
+enum wl_nan_role {
+ WL_NAN_ROLE_AUTO = 0,
+ WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1,
+ WL_NAN_ROLE_NON_MASTER_SYNC = 2,
+ WL_NAN_ROLE_MASTER = 3,
+ WL_NAN_ROLE_ANCHOR_MASTER = 4
+};
+
+typedef uint8 wl_nan_role_t;
+
+typedef struct wl_nan_device_state
+{
+ wl_nan_role_t role; /* Sync Master, Non-Sync Master */
+ uint8 state; /* TBD */
+ uint8 hopcount; /* Hops to the Anchor Master */
+ struct ether_addr immediate_master; /* Master MAC */
+ struct ether_addr anchor_master; /* Anchor Master MAC */
+ struct ether_addr cluster_id; /* Cluster ID to which this device belongs to */
+ uint8 PAD[3];
+ uint32 tsf_high; /* NAN Cluster TSFs */
+ uint32 tsf_low;
+} wl_nan_device_state_t;
+
+/*
+ * WL_NAN_CMD_CFG_HOP_CNT, WL_NAN_CMD_CFG_HOP_LIMIT
+ */
+typedef uint8 wl_nan_hop_count_t;
+
+/*
+ * WL_NAN_CMD_CFG_WARMUP_TIME
+ */
+typedef uint32 wl_nan_warmup_time_ticks_t;
+
+/*
+ * WL_NAN_CMD_CFG_RSSI_THRESHOLD
+ * rssi_close and rssi_mid are used to transition master to non-master
+ * role by NAN state machine. rssi thresholds corresponding to the band
+ * will be updated.
+ */
+/* To be deprecated */
+typedef struct wl_nan_rssi_threshold {
+ wl_nan_band_t band;
+ int8 rssi_close;
+ int8 rssi_mid;
+ uint8 pad;
+} wl_nan_rssi_threshold_t;
+
+/* WL_NAN_CMD_ELECTION_RSSI_THRESHOLD */
+
+typedef struct wl_nan_rssi_thld {
+ int8 rssi_close_2g;
+ int8 rssi_mid_2g;
+ int8 rssi_close_5g;
+ int8 rssi_mid_5g;
+} wl_nan_rssi_thld_t;
+
+/* WL_NAN_CMD_DATA_MAX_PEERS */
+
+typedef uint8 wl_nan_max_peers_t;
+
+/*
+ * WL_NAN_CMD_CFG_STATUS
+ */
+
+typedef enum wl_nan_election_mode {
+ WL_NAN_ELECTION_RUN_BY_HOST = 1,
+ WL_NAN_ELECTION_RUN_BY_FW = 2
+} wl_nan_election_mode_t;
+
+typedef struct wl_nan_conf_status {
+ struct ether_addr nmi; /* NAN mgmt interface address */
+ uint8 enabled; /* NAN is enabled */
+ uint8 role; /* Current nan sync role */
+ struct ether_addr cid; /* Current Cluster id */
+ uint8 social_chans[2]; /* Social channels */
+ uint8 mr[8]; /* Self Master Rank */
+ uint8 amr[8]; /* Anchor Master Rank */
+ uint32 ambtt; /* Anchor master beacon target time */
+ uint32 cluster_tsf_h; /* Current Cluster TSF High */
+ uint32 cluster_tsf_l; /* Current Cluster TSF Low */
+ uint8 election_mode; /* Election mode, host or firmware */
+ uint8 hop_count; /* Current Hop count */
+ uint8 imr[8]; /* Immediate Master Rank */
+ uint8 pad[4]; /* remove after precommit */
+ uint16 opt_tlvs_len;
+ uint8 opt_tlvs[];
+} wl_nan_conf_status_t;
+
+/*
+ * WL_NAN_CMD_CFG_OUI
+ */
+typedef struct wl_nan_oui_type {
+ uint8 nan_oui[DOT11_OUI_LEN];
+ uint8 type;
+} wl_nan_oui_type_t;
+
+/*
+ * WL_NAN_CMD_CFG_COUNT
+ */
+typedef struct wl_nan_count {
+ uint32 cnt_bcn_tx; /**< TX disc/sync beacon count */
+ uint32 cnt_bcn_rx; /**< RX disc/sync beacon count */
+ uint32 cnt_svc_disc_tx; /**< TX svc disc frame count */
+ uint32 cnt_svc_disc_rx; /**< RX svc disc frame count */
+} wl_nan_count_t;
+/*
+ * Election component WL_NAN_CMD_ELECTION_XXXX sub-commands
+ * WL_NAN_CMD_ELECTION_HOST_ENABLE
+ */
+enum wl_nan_enable_flags {
+ WL_NAN_DISABLE_FLAG_HOST_ELECTION = 0,
+ WL_NAN_ENABLE_FLAG_HOST_ELECTION = 1
+};
+
+/*
+ * 0 - disable host based election
+ * 1 - enable host based election
+ */
+typedef uint8 wl_nan_host_enable_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_METRICS_CONFIG
+ */
+/* Set only */
+typedef struct wl_nan_election_metric_config {
+ uint8 random_factor; /* Configured random factor */
+ uint8 master_pref; /* configured master preference */
+ uint8 pad[2];
+} wl_nan_election_metric_config_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_METRICS_STATE
+ */
+/* Get only */
+typedef struct wl_nan_election_metric_state {
+ uint8 random_factor; /* random factor used in MIs */
+ uint8 master_pref; /* Master advertised in MIs */
+ uint8 pad[2];
+} wl_nan_election_metric_state_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_LEAVE
+ * WL_NAN_CMD_ELECTION_STOP
+ */
+typedef struct ether_addr wl_nan_cluster_id_t;
+
+#define NHO_SEC_NCS_SK_REPLAY_CNT_LEN 8u
+/* kck */
+#define NHO_SEC_NCS_SK_256_KCK_LEN 24u /* refer nan2 r21 7.1.4.1 */
+/* kek */
+#define NHO_SEC_NCS_SK_256_KEK_LEN 32u /* refer nan2 r21 7.1.4.1 */
+/* tk */
+#define NHO_SEC_NCS_SK_256_TK_LEN 32u /* refer nan2 r21 section 7 */
+
+#define NHO_SEC_NCS_SK_MAX_KEY_LEN (NHO_SEC_NCS_SK_256_KCK_LEN \
+ + NHO_SEC_NCS_SK_256_KEK_LEN \
+ + NHO_SEC_NCS_SK_256_TK_LEN)
+
+#define NHO_SEC_KEY_INSTALL_FLAG (1 << 0)
+#define NHO_SEC_KEY_UNINSTALL_FLAG (1 << 1)
+
+/* WL_NAN_XTLV_NANHO_SEC_SA */
+typedef struct nanho_sec_sa {
+ int8 csid; /* Cipher suite ID to identify the security type */
+ uint8 kck_len; /* KCK len in key_buf */
+ uint8 kek_len; /* KEK len in key_buf */
+ uint8 tk_len; /* Transient key len in key_buf */
+ uint16 flags;
+ uint16 pad;
+ struct ether_addr laddr; /* local mac addr */
+ struct ether_addr raddr; /* remote mac addr */
+ uint8 key_krc[NHO_SEC_NCS_SK_REPLAY_CNT_LEN]; /* Key Replay ctr */
+ uint8 key_buf[NHO_SEC_NCS_SK_MAX_KEY_LEN]; /* PTK = KCK + KEK + TK */
+} nanho_sec_sa_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_MERGE
+ * 0 - disable cluster merge
+ * 1 - enable cluster merge
+ */
+typedef uint8 wl_nan_merge_enable_t;
+
+/*
+ * WL_NAN_CMD_CFG_ROLE
+ * role = 0 means configuration by firmware(obsolete); otherwise by host
+ * when host configures role, also need target master address to sync to
+ */
+#define NAN_SYNC_MASTER_SELF 1
+#define NAN_SYNC_MASTER_USE_TIMING 2 /* Use the tsf timing provided */
+#define NAN_SYNC_MASTER_AMREC_UPD 4 /* provide AM record update */
+
+/*
+ struct ether_addr addr:
+ when NAN_SYNC_MASTER_USE_TIMING is set, addr is the mac of Rx NAN beacon
+ providing the timing info
+ ltsf_h, ltsf_l:
+ The local TSF timestamp filled in by FW in the WL_NAN_EVENT_BCN_RX event;
+ rtsf_h, rtsf_l:
+ The timestamp in the Rx beacon frame, filled in by host
+ uint32 ambtt:
+ the amtt in the cluster ID attribute in the Rx beacon frame
+*/
+
+typedef struct nan_sync_master {
+ uint8 flag; /* 1: self; 2: use TSF timing; 4: AMR update */
+ uint8 hop_count;
+ struct ether_addr addr;
+ struct ether_addr cluster_id;
+ chanspec_t channel; /* bcn reception channel */
+ uint32 ltsf_h;
+ uint32 ltsf_l;
+ uint32 rtsf_h;
+ uint32 rtsf_l;
+ uint8 amr[WL_NAN_MASTER_RANK_LEN];
+ uint32 ambtt;
+} nan_sync_master_t;
+
+#ifdef WLAWDL
+/*
+* NAN Sync TLV(NSTLV):
+* To keep NAN/AWDL concurrency time sync.
+* It is generated at hybrid device, and propogated by AWDL only device.
+* It contains the information needed to run NAN election
+*/
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct awdl_nan_sync_tlv {
+ uint16 hop_count; /* total hop_count */
+ struct ether_addr src_addr; /* macaddr of the hybrid originator of nstlv */
+ struct ether_addr cluster_id; /* NAN cluster ID of hybrid originator of nstlv */
+ uint32 nan_tsf_h; /* NAN cluster TSF of the hybrid originator of nstlv */
+ uint32 nan_tsf_l;
+ uint8 master_preference;
+ uint8 random_factor;
+ uint8 amr[WL_NAN_MASTER_RANK_LEN];
+ uint8 orig_hop_count; /* hop_count of the origin hybrid NAN device */
+ uint32 ambtt; /* Anchor Master Beacon Transmission Time */
+ uint8 opt_xtlv_len; /* xtlv len */
+} BWL_POST_PACKED_STRUCT awdl_nan_sync_tlv_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_awdl_nan_sync_tlv {
+ uint8 type; /* 23 for NTLV */
+ uint16 param_len;
+ awdl_nan_sync_tlv_t ntlv;
+} BWL_POST_PACKED_STRUCT wl_awdl_nan_sync_tlv_t;
+
+/* External NAN sync TLV which can be used by other modules to pass NAN sync info */
+typedef awdl_nan_sync_tlv_t external_nan_sync_tlv_t;
+#include <packed_section_end.h>
+#endif /* WLAWDL */
+
+/* NAN advertiser structure */
+/* TODO RSDB: add chspec to indicates core corresponds correct core */
+typedef struct nan_adv_entry {
+ uint8 age; /* used to remove stale entries */
+ uint8 hop_count; /* for NTLV support, use bit7 for virtual NAN peer */
+ struct ether_addr addr;
+ struct ether_addr cluster_id;
+ chanspec_t channel; /* bcn reception channel */
+ uint32 ltsf_h;
+ uint32 ltsf_l;
+ uint32 rtsf_h;
+ uint32 rtsf_l;
+ uint8 amr[WL_NAN_MASTER_RANK_LEN];
+ uint32 ambtt;
+ int8 rssi[NAN_MAX_BANDS]; /* rssi last af was received at */
+ int8 last_rssi[NAN_MAX_BANDS]; /* rssi in the last AF */
+} nan_adv_entry_t;
+#define NAN_VIRTUAL_PEER_BIT 0x80
+
+typedef enum {
+ NAC_CNT_NTLV_AF_TX = 0, /* count of AF containing NTLV tx */
+ NAC_CNT_NTLV_AF_RX, /* count of AF containing NTLV rx */
+ NAC_CNT_NTLV_TMERR_TX, /* count of NTLV tx timing error */
+ NAC_CNT_NTLV_TMERR_RX, /* count of NTLV rx timing error */
+ NAC_CNT_NTLV_TM_MISMATCH, /* count of TopMaster mismatch in Rx NTLV processing */
+ NAC_CNT_NTLV_ADV_EXISTED, /* count of NTLV ignored bc advertiser existed from bcn */
+ NAC_CNT_NTLV_STALED_BCN, /* count of staled bcn from NTLV info */
+ NAC_CNT_NTLV_MERGE, /* count of NTLV used for NAN cluster merge */
+ NAC_CNT_NTLV_ELECTION_DROP, /* count of NTLV dropped in NAN election */
+ NAC_CNT_NTLV_TSF_ADOPT, /* count of NTLV used for NAN TSF adoption */
+ NAC_CNT_NTLV_LAST
+} nac_cnt_enum_t;
+
+#define NAC_MAX_CNT (NAC_CNT_NTLV_LAST)
+
+typedef struct nac_stats {
+ uint32 nac_cnt[NAC_MAX_CNT];
+} nac_stats_t;
+
+typedef struct nan_adv_table {
+ uint8 num_adv;
+ uint8 adv_size;
+ uint8 pad[2];
+ nan_adv_entry_t adv_nodes[0];
+} nan_adv_table_t;
+
+typedef struct wl_nan_role_cfg {
+ wl_nan_role_t cfg_role;
+ wl_nan_role_t cur_role;
+ uint8 pad[2];
+ nan_sync_master_t target_master;
+} wl_nan_role_cfg_t;
+
+typedef struct wl_nan_role_config {
+ wl_nan_role_t role;
+ struct ether_addr target_master;
+ uint8 pad;
+} wl_nan_role_config_t;
+
+typedef int8 wl_nan_sd_optional_field_types_t;
+
+/* Flag bits for Publish and Subscribe (wl_nan_sd_params_t flags) */
+
+/* First 8 bits are blocked for mapping
+ * against svc_control flag bits which goes out
+ * as part of SDA attribute in air in SDF frames
+ */
+#define WL_NAN_RANGE_LIMITED 0x0040
+
+/* Event generation indicator (default is continuous) */
+
+#define WL_NAN_MATCH_ONCE 0x100000
+#define WL_NAN_MATCH_NEVER 0x200000
+
+/* Bits specific to Publish */
+
+#define WL_NAN_PUB_UNSOLICIT 0x1000 /* Unsolicited Tx */
+#define WL_NAN_PUB_SOLICIT 0x2000 /* Solicited Tx */
+#define WL_NAN_PUB_BOTH 0x3000 /* Both the above */
+
+#define WL_NAN_PUB_BCAST 0x4000 /* bcast solicited Tx only */
+#define WL_NAN_PUB_EVENT 0x8000 /* Event on each solicited Tx */
+#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 /* Used for one-time solicited Publish */
+
+#define WL_NAN_FOLLOWUP 0x20000 /* Follow-up frames */
+#define WL_NAN_TX_FOLLOWUP 0x40000 /* host generated transmit Follow-up frames */
+
+/* Bits specific to Subscribe */
+
+#define WL_NAN_SUB_ACTIVE 0x1000 /* Active subscribe mode */
+#define WL_NAN_SUB_MATCH_IF_SVC_INFO 0x2000 /* Service info in publish */
+
+#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF /* Special values for time to live (ttl) parameter */
+
+/*
+ * Publish - runs until first transmission
+ * Subscribe - runs until first DiscoveryResult event
+ */
+#define WL_NAN_TTL_FIRST 0
+
+/* Nan Service Based control Flags */
+
+/* If set, dev will take care of dp_resp */
+#define WL_NAN_SVC_CTRL_AUTO_DPRESP 0x1000000
+
+/* If set, host wont rec event "receive" */
+#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE 0x2000000
+
+/* If set, host wont rec event "replied" */
+#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED 0x4000000
+
+/* If set, host wont rec event "terminated" */
+#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED 0x8000000
+
+/*
+ * WL_NAN_CMD_SD_PARAMS
+ */
+typedef struct wl_nan_sd_params
+{
+ uint16 length; /* length including options */
+ uint8 period; /* period of the unsolicited SDF xmission in DWs */
+ uint8 awake_dw; /* interval between two DWs where SDF tx/rx are done */
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for the service name */
+ uint8 instance_id; /* Instance of the current service */
+ int8 proximity_rssi; /* RSSI limit to Rx subscribe or pub SDF 0 no effect */
+ uint32 flags; /* bitmap representing aforesaid optional flags */
+ int32 ttl; /* TTL for this instance id, -1 will run till cancelled */
+ tlv_t optional[1]; /* optional fields in the SDF as appropriate */
+} wl_nan_sd_params_t;
+
+/*
+ * WL_NAN_CMD_SD_PUBLISH_LIST
+ * WL_NAN_CMD_SD_SUBSCRIBE_LIST
+ */
+typedef struct wl_nan_service_info
+{
+ uint8 instance_id; /* Publish instance ID */
+ uint8 service_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for service name */
+} wl_nan_service_info_t;
+
+typedef struct wl_nan_service_list
+{
+ uint16 id_count; /* Number of registered publish/subscribe services */
+ wl_nan_service_info_t list[1]; /* service info defined by nan_service instance */
+} wl_nan_service_list_t;
+
+/*
+ * WL_NAN_CMD_CFG_BCN_INTERVAL
+ */
+typedef uint16 wl_nan_disc_bcn_interval_t;
+
+/*
+ * WL_NAN_CMD_CFG_SDF_TXTIME
+ */
+typedef uint16 wl_nan_svc_disc_txtime_t;
+
+/*
+ * WL_NAN_CMD_CFG_STOP_BCN_TX
+ */
+typedef uint16 wl_nan_stop_bcn_tx_t;
+
+/*
+ * WL_NAN_CMD_CFG_FSM_TIMEOUT
+ */
+typedef uint32 wl_nan_fsm_timeout_t;
+
+/*
+ * WL_NAN_CMD_CFG_SID_BEACON
+ */
+typedef struct wl_nan_sid_beacon_control {
+ uint8 sid_enable; /* Flag to indicate the inclusion of Service IDs in Beacons */
+ uint8 sid_count; /* Limit for number of publish SIDs to be included in Beacons */
+ uint8 sub_sid_count; /* Limit for number of subscribe SIDs to be included in Beacons */
+ uint8 pad;
+} wl_nan_sid_beacon_control_t;
+
+/*
+ * WL_NAN_CMD_CFG_DW_LEN
+ */
+typedef uint16 wl_nan_dw_len_t;
+
+/*
+ * WL_NAN_CMD_CFG_AWAKE_DW Will be deprecated.
+ */
+typedef struct wl_nan_awake_dw {
+ wl_nan_band_t band; /* 0 - b mode 1- a mode */
+ uint8 interval; /* 1 or 2 or 4 or 8 or 16 */
+ uint16 pad;
+} wl_nan_awake_dw_t;
+
+/*
+ * WL_NAN_CMD_CFG_AWAKE_DWS
+ */
+typedef struct wl_nan_awake_dws {
+ uint8 dw_interval_2g; /* 2G DW interval */
+ uint8 dw_interval_5g; /* 5G DW interval */
+ uint16 pad;
+} wl_nan_awake_dws_t;
+
+/* WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD */
+
+typedef struct wl_nan_rssi_notif_thld {
+ int8 bcn_rssi_2g;
+ int8 bcn_rssi_5g;
+ int16 pad;
+} wl_nan_rssi_notif_thld_t;
+
+/*
+ * WL_NAN_CMD_CFG_SOCIAL_CHAN
+ */
+typedef struct wl_nan_social_channels {
+ uint8 soc_chan_2g; /* 2G social channel */
+ uint8 soc_chan_5g; /* 5G social channel */
+ uint16 pad;
+} wl_nan_social_channels_t;
+
+/*
+ * WL_NAN_CMD_SD_CANCEL_PUBLISH
+ * WL_NAN_CMD_SD_CANCEL_SUBSCRIBE
+ */
+typedef uint8 wl_nan_instance_id; /* Instance ID of an active publish instance */
+
+/*
+ * WL_NAN_CMD_SD_VND_INFO
+ */
+typedef struct wl_nan_sd_vendor_info
+{
+ uint16 length; /* Size in bytes of the payload following this field */
+ uint8 data[]; /* Vendor Information */
+} wl_nan_sd_vendor_info_t;
+
+/*
+ * WL_NAN_CMD_SD_STATS
+ */
+typedef struct wl_nan_sd_stats {
+ uint32 sdftx;
+ uint32 sdfrx;
+ uint32 sdsrffail;
+ uint32 sdrejrssi;
+ uint32 sdfollowuprx;
+ uint32 sdsubmatch;
+ uint32 sdpubreplied;
+ uint32 sdmftfail1;
+ uint32 sdmftfail2;
+ uint32 sdmftfail3;
+ uint32 sdmftfail4;
+} wl_nan_sd_stats_t;
+
+/* Flag bits for sd transmit message (wl_nan_sd_transmit_t flags) */
+
+/* If set, host wont rec "tx status" event for tx-followup msg */
+#define WL_NAN_FUP_SUPR_EVT_TXS 0x01
+/* more flags can be added here */
+
+/*
+ * WL_NAN_CMD_SD_TRANSMIT
+ * WL_NAN_CMD_SD_FUP_TRANSMIT
+ */
+typedef struct wl_nan_sd_transmit {
+ uint8 local_service_id; /* Sender Service ID */
+ uint8 requestor_service_id; /* Destination Service ID */
+ struct ether_addr destination_addr; /* Destination MAC */
+ uint16 token; /* follow_up_token when a follow-up
+ * msg is queued successfully
+ */
+ uint8 priority; /* requested relative prio */
+ uint8 flags; /* Flags for tx follow-up msg */
+ uint16 opt_len; /* total length of optional tlvs */
+ uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */
+} wl_nan_sd_transmit_t;
+
+/* disc cache timeout for a cache entry */
+typedef uint16 wl_nan_disc_cache_timeout_t;
+
+/*
+ * WL_NAN_CMD_SYNC_TSRESERVE
+ */
+/** time slot */
+#define NAN_MAX_TIMESLOT 32
+typedef struct wl_nan_timeslot {
+ uint32 abitmap; /**< available bitmap */
+ uint32 chanlist[NAN_MAX_TIMESLOT];
+} wl_nan_timeslot_t;
+
+/*
+ * Deprecated
+ *
+ * WL_NAN_CMD_SYNC_TSRELEASE
+ */
+typedef uint32 wl_nan_ts_bitmap_t;
+
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+/* nan merge scan params */
+typedef struct wl_nan_scan_params {
+ /* dwell time of discovery channel corresponds to band_idx.
+ * If set to 0 then fw default will be used.
+ */
+ uint16 dwell_time;
+ /* scan period of discovery channel corresponds to band_idx.
+ * If set to 0 then fw default will be used.
+ */
+ uint16 scan_period;
+ /* band index of discovery channel */
+ uint8 band_index;
+} wl_nan_scan_params_t;
+
+/*
+ * WL_NAN_CMD_DBG_SCAN
+ */
+typedef struct wl_nan_dbg_scan {
+ struct ether_addr cid;
+ uint8 pad[2];
+} wl_nan_dbg_scan_t;
+
+/* NAN_DBG_LEVEL */
+typedef struct wl_nan_dbg_level {
+ uint32 nan_err_level; /* for Error levels */
+ uint32 nan_dbg_level; /* for bebug logs and trace */
+ uint32 nan_info_level; /* for dumps like prhex */
+} wl_nan_dbg_level_t;
+
+/*
+ * WL_NAN_CMD_DBG_EVENT_MASK
+ */
+typedef uint32 wl_nan_event_mask_t;
+
+/*
+ * WL_NAN_CMD_DBG_EVENT_CHECK
+ */
+typedef uint8 wl_nan_dbg_ifname[BCM_MSG_IFNAME_MAX];
+
+/*
+ * WL_NAN_CMD_DBG_DUMP
+ * WL_NAN_CMD_DBG_CLEAR
+ */
+enum wl_nan_dbg_dump_type {
+ WL_NAN_DBG_DT_RSSI_DATA = 1,
+ WL_NAN_DBG_DT_STATS_DATA = 2,
+ /*
+ * Additional enums before this line
+ */
+ WL_NAN_DBG_DT_INVALID
+};
+typedef int8 wl_nan_dbg_dump_type_t;
+
+/** various params and ctl swithce for nan_debug instance */
+/*
+ * WL_NAN_CMD_DBG_DEBUG
+ */
+typedef struct wl_nan_debug_params {
+ uint16 cmd; /**< debug cmd to perform a debug action */
+ uint16 status;
+ uint32 msglevel; /**< msg level if enabled */
+ uint8 enabled; /**< runtime debuging enabled */
+ uint8 collect;
+ uint8 PAD[2];
+} wl_nan_debug_params_t;
+
+typedef struct wl_nan_sched_svc_timeslot_s {
+ uint32 abitmap; /* availability bitmap */
+ uint32 chanlist[NAN_MAX_TIMESLOT];
+ uint8 res; /* resolution: 0 = 16ms, 1 = 32ms, 2 = 64ms 3 = reserved. REfer NAN spec */
+ uint8 mapid; /* mapid from NAN spec. Used to differentiate 2G Vs 5G band */
+ uint8 PAD[2];
+} wl_nan_sched_svc_timeslot_t;
+
+/*
+ * WL_NAN_CMD_DATA_DP_IDLE_PERIOD
+ */
+typedef uint16 wl_nan_ndp_idle_period_t;
+
+/*
+ * WL_NAN_CMD_DATA_DP_HB_DURATION
+ */
+typedef uint16 wl_nan_ndp_hb_duration_t;
+
+/* nan cmd IDs */
+enum wl_nan_cmds {
+ /* nan cfg /disc & dbg ioctls */
+ WL_NAN_CMD_ENABLE = 1,
+ WL_NAN_CMD_ATTR = 2,
+ WL_NAN_CMD_NAN_JOIN = 3,
+ WL_NAN_CMD_LEAVE = 4,
+ WL_NAN_CMD_MERGE = 5,
+ WL_NAN_CMD_STATUS = 6,
+ WL_NAN_CMD_TSRESERVE = 7,
+ WL_NAN_CMD_TSSCHEDULE = 8,
+ WL_NAN_CMD_TSRELEASE = 9,
+ WL_NAN_CMD_OUI = 10,
+ WL_NAN_CMD_OOB_AF = 11,
+ WL_NAN_CMD_SCAN_PARAMS = 12,
+
+ WL_NAN_CMD_COUNT = 15,
+ WL_NAN_CMD_CLEARCOUNT = 16,
+
+ /* discovery engine commands */
+ WL_NAN_CMD_PUBLISH = 20,
+ WL_NAN_CMD_SUBSCRIBE = 21,
+ WL_NAN_CMD_CANCEL_PUBLISH = 22,
+ WL_NAN_CMD_CANCEL_SUBSCRIBE = 23,
+ WL_NAN_CMD_TRANSMIT = 24,
+ WL_NAN_CMD_CONNECTION = 25,
+ WL_NAN_CMD_SHOW = 26,
+ WL_NAN_CMD_STOP = 27, /* stop nan for a given cluster ID */
+ /* nan debug iovars & cmds */
+ WL_NAN_CMD_SCAN = 47,
+ WL_NAN_CMD_SCAN_RESULTS = 48,
+ WL_NAN_CMD_EVENT_MASK = 49,
+ WL_NAN_CMD_EVENT_CHECK = 50,
+ WL_NAN_CMD_DUMP = 51,
+ WL_NAN_CMD_CLEAR = 52,
+ WL_NAN_CMD_RSSI = 53,
+
+ WL_NAN_CMD_DEBUG = 60,
+ WL_NAN_CMD_TEST1 = 61,
+ WL_NAN_CMD_TEST2 = 62,
+ WL_NAN_CMD_TEST3 = 63,
+ WL_NAN_CMD_DISC_RESULTS = 64,
+ /* nan 2.0 data path commands */
+ WL_NAN_CMD_DATAPATH = 65
+};
+
+/* NAN DP interface commands */
+enum wl_nan_dp_cmds {
+ /* nan 2.0 ioctls */
+ WL_NAN_CMD_DP_CAP = 1000,
+ WL_NAN_CMD_DP_CONFIG = 1001,
+ WL_NAN_CMD_DP_CREATE = 1002,
+ WL_NAN_CMD_DP_AUTO_CONNECT = 1003,
+ WL_NAN_CMD_DP_DATA_REQ = 1004,
+ WL_NAN_CMD_DP_DATA_RESP = 1005,
+ WL_NAN_CMD_DP_SCHED_UPD = 1006,
+ WL_NAN_CMD_DP_END = 1007,
+ WL_NAN_CMD_DP_CONNECT = 1008,
+ WL_NAN_CMD_DP_STATUS = 1009
+};
+
+/* TODO Should remove this fixed length */
+#define WL_NAN_DATA_SVC_SPEC_INFO_LEN 32 /* arbitrary */
+#define WL_NAN_DP_MAX_SVC_INFO 0xFF
+#define WL_NAN_DATA_NDP_INST_SUPPORT 16
+
+/* Nan flags (16 bits) */
+#define WL_NAN_DP_FLAG_SVC_INFO 0x0001
+#define WL_NAN_DP_FLAG_CONFIRM 0x0002
+#define WL_NAN_DP_FLAG_EXPLICIT_CFM 0x0004
+#define WL_NAN_DP_FLAG_SECURITY 0x0008
+#define WL_NAN_DP_FLAG_HAST_NDL_COUNTER 0x0010 /* Host assisted NDL counter */
+
+/* NAN Datapath host status */
+#define WL_NAN_DP_STATUS_ACCEPTED 1
+#define WL_NAN_DP_STATUS_REJECTED 0
+
+/* to be done */
+typedef struct wl_nan_dp_cap {
+ uint8 tbd;
+} wl_nan_dp_cap_t;
+
+/** The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN 6
+/** Number of hash functions per bloom filter */
+#define WL_NAN_HASHES_PER_BLOOM 4
+/* no. of max last disc results */
+#define WL_NAN_MAX_DISC_RESULTS 3
+
+/* NAN security related defines */
+/* NCS-SK related */
+#define WL_NAN_NCS_SK_PMK_LEN 32
+#define WL_NAN_NCS_SK_PMKID_LEN 16
+
+/* recent discovery results */
+typedef struct wl_nan_disc_result_s
+{
+ wl_nan_instance_id_t instance_id; /* instance id of pub/sub req */
+ wl_nan_instance_id_t peer_instance_id; /* peer instance id of pub/sub req/resp */
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service descp string */
+ struct ether_addr peer_mac; /* peer mac address */
+} wl_nan_disc_result_t;
+
+/* list of recent discovery results */
+typedef struct wl_nan_disc_results_s
+{
+ wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS];
+} wl_nan_disc_results_list_t;
+
+/* nan 1.0 events */
+/* To be deprecated - will be replaced by event_disc_result */
+typedef struct wl_nan_ev_disc_result {
+ wl_nan_instance_id_t pub_id;
+ wl_nan_instance_id_t sub_id;
+ struct ether_addr pub_mac;
+ uint8 opt_tlvs[0];
+} wl_nan_ev_disc_result_t;
+
+typedef struct wl_nan_event_disc_result {
+ wl_nan_instance_id_t pub_id;
+ wl_nan_instance_id_t sub_id;
+ struct ether_addr pub_mac;
+ int8 publish_rssi; /* publisher RSSI */
+ uint8 attr_num;
+ uint16 attr_list_len; /* length of the all the attributes in the SDF */
+ uint8 attr_list[0]; /* list of NAN attributes */
+} wl_nan_event_disc_result_t;
+
+typedef struct wl_nan_ev_p2p_avail {
+ struct ether_addr sender;
+ struct ether_addr p2p_dev_addr;
+ uint8 dev_role;
+ uint8 resolution;
+ uint8 repeat;
+ uint8 pad[3];
+ chanspec_t chanspec;
+ uint32 avail_bmap;
+} wl_nan_ev_p2p_avail_t;
+
+/*
+* discovery interface event structures *
+*/
+
+enum wl_nan_oob_af_flags {
+ WL_NAN_OOB_AF_FLAG_SEND_EVENT = 0x0001, /* send tx status event */
+ WL_NAN_OOB_AF_FLAG_FLUSH_PCB = 0x0002, /* flush PCB */
+ WL_NAN_OOB_AF_FLAG_ADD_DCAP = 0x0004, /* add dev cap attr into NAF body */
+ WL_NAN_OOB_AF_FLAG_ADD_ELMT = 0x0008, /* add elmt container attr into NAF body */
+ WL_NAN_OOB_AF_FLAG_MFP_REQUIRED = 0x0010 /* MFP required */
+};
+typedef uint16 wl_nan_oob_af_flags_t;
+
+/* mandatory parameters for OOB action frame */
+typedef struct wl_nan_oob_af_params_s
+{
+ uint8 fup_lcl_id; /* local instance ID of follow-up SDF */
+ uint8 fup_peer_id; /* peer instance ID of follow-up SDF */
+ uint8 sdf_type; /* represented by service control type NAN_SC_XXX */
+ uint8 unused_uint8;
+ uint32 unused_uint32;
+ struct ether_addr bssid;
+ struct ether_addr dest;
+ uint32 pkt_lifetime;
+ uint8 n2af_sub_type; /* NAN2 AF sub type */
+ uint8 retry_cnt; /* NAF tx retry (not 802.11 re-tx) */
+ uint16 token; /* NAN host seq num */
+ uint16 flags; /* wl_nan_oob_af_flags_t */
+ uint32 fsm_id; /* unique fsm id */
+ uint16 payload_len;
+ uint8 payload[1];
+} wl_nan_oob_af_params_t;
+
+/* NAN Ranging */
+
+/* Bit defines for global flags */
+#define WL_NAN_RANGING_ENABLE 1 /**< enable RTT */
+#define WL_NAN_RANGING_RANGED 2 /**< Report to host if ranged as target */
+typedef struct nan_ranging_config {
+ uint32 chanspec; /**< Ranging chanspec */
+ uint16 timeslot; /**< NAN RTT start time slot 1-511 */
+ uint16 duration; /**< NAN RTT duration in ms */
+ struct ether_addr allow_mac; /**< peer initiated ranging: the allowed peer mac
+ * address, a unicast (for one peer) or
+ * a broadcast for all. Setting it to all zeros
+ * means responding to none,same as not setting
+ * the flag bit NAN_RANGING_RESPOND
+ */
+ uint16 flags;
+} wl_nan_ranging_config_t;
+
+/** list of peers for self initiated ranging */
+/** Bit defines for per peer flags */
+#define WL_NAN_RANGING_REPORT (1<<0) /**< Enable reporting range to target */
+typedef struct nan_ranging_peer {
+ uint32 chanspec; /**< desired chanspec for this peer */
+ uint32 abitmap; /**< available bitmap */
+ struct ether_addr ea; /**< peer MAC address */
+ uint8 frmcnt; /**< frame count */
+ uint8 retrycnt; /**< retry count */
+ uint16 flags; /**< per peer flags, report or not */
+ uint16 PAD;
+} wl_nan_ranging_peer_t;
+typedef struct nan_ranging_list {
+ uint8 count; /**< number of MAC addresses */
+ uint8 num_peers_done; /**< host set to 0, when read, shows number of peers
+ * completed, success or fail
+ */
+ uint8 num_dws; /**< time period to do the ranging, specified in dws */
+ uint8 reserve; /**< reserved field */
+ wl_nan_ranging_peer_t rp[1]; /**< variable length array of peers */
+} wl_nan_ranging_list_t;
+
+/* ranging results, a list for self initiated ranging and one for peer initiated ranging */
+/* There will be one structure for each peer */
+#define WL_NAN_RANGING_STATUS_SUCCESS 1
+#define WL_NAN_RANGING_STATUS_FAIL 2
+#define WL_NAN_RANGING_STATUS_TIMEOUT 3
+#define WL_NAN_RANGING_STATUS_ABORT 4 /**< with partial results if sounding count > 0 */
+typedef struct nan_ranging_result {
+ uint8 status; /**< 1: Success, 2: Fail 3: Timeout 4: Aborted */
+ uint8 sounding_count; /**< number of measurements completed (0 = failure) */
+ struct ether_addr ea; /**< initiator MAC address */
+ uint32 chanspec; /**< Chanspec where the ranging was done */
+ uint32 timestamp; /**< 32bits of the TSF timestamp ranging was completed at */
+ uint32 distance; /**< mean distance in meters expressed as Q4 number.
+ * Only valid when sounding_count > 0. Examples:
+ * 0x08 = 0.5m
+ * 0x10 = 1m
+ * 0x18 = 1.5m
+ * set to 0xffffffff to indicate invalid number
+ */
+ int32 rtt_var; /**< standard deviation in 10th of ns of RTTs measured.
+ * Only valid when sounding_count > 0
+ */
+ struct ether_addr tgtea; /**< target MAC address */
+ uint8 PAD[2];
+} wl_nan_ranging_result_t;
+typedef struct nan_ranging_event_data {
+ uint8 mode; /**< 1: Result of host initiated ranging */
+ /* 2: Result of peer initiated ranging */
+ uint8 reserved;
+ uint8 success_count; /**< number of peers completed successfully */
+ uint8 count; /**< number of peers in the list */
+ wl_nan_ranging_result_t rr[1]; /**< variable array of ranging peers */
+} wl_nan_ranging_event_data_t;
+
+enum {
+ WL_NAN_STATS_RSSI = 1,
+ WL_NAN_STATS_DATA = 2,
+ WL_NAN_STATS_DP = 3,
+/*
+ * ***** ADD before this line ****
+ */
+ WL_NAN_STATS_INVALID
+};
+typedef struct wl_nan_dp_stats {
+ uint32 tbd; /* TBD */
+} wl_nan_dp_stats_t;
+
+typedef struct wl_nan_stats {
+ /* general */
+ uint32 cnt_dw; /* DW slots */
+ uint32 cnt_disc_bcn_sch; /* disc beacon slots */
+ uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */
+ uint32 cnt_bcn_upd; /* count of beacon template updates */
+ uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */
+ uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */
+ uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */
+ uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */
+ uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */
+ uint32 cnt_sdftx_uc; /* count of unicast sdf tx */
+ uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */
+ uint32 cnt_sdf_rx; /* count of sdf rx */
+ /* NAN roles */
+ uint32 cnt_am; /* anchor master */
+ uint32 cnt_master; /* master */
+ uint32 cnt_nms; /* non master sync */
+ uint32 cnt_nmns; /* non master non sync */
+ /* TX */
+ uint32 cnt_err_txtime; /* txtime in sync bcn frame not a multiple of dw intv */
+ uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */
+ uint32 cnt_err_bcn_tx; /* beacon tx error */
+ uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive
+ * sync beacons is more than expected
+ */
+ /* MSCH */
+ uint32 cnt_err_msch_reg; /* error is Dw/disc reg with msch */
+ uint32 cnt_err_wrong_ch_cb; /* count of msch calbacks in wrong channel */
+ uint32 cnt_dw_skip; /* count of DW rejected */
+ uint32 cnt_disc_skip; /* count of disc bcn rejected */
+ uint32 cnt_dw_start_early; /* msch cb not at registered time */
+ uint32 cnt_dw_start_late; /* no. of delays in slot start */
+ /* SCANS */
+ uint32 cnt_mrg_scan; /* count of merge scans completed */
+ uint32 cnt_err_ms_rej; /* number of merge scan failed */
+ uint32 cnt_scan_results; /* no. of nan beacons scanned */
+ uint32 cnt_join_scan_rej; /* no. of join scans rejected */
+ uint32 cnt_nan_scan_abort; /* no. of join scans rejected */
+ /* enable/disable */
+ uint32 cnt_nan_enab; /* no. of times nan feature got enabled */
+ uint32 cnt_nan_disab; /* no. of times nan feature got disabled */
+ uint32 cnt_sync_bcn_rx; /* count of sync bcn rx within DW */
+ uint32 cnt_sync_bcn_rx_tu[3]; /* Delta bw the tsf in bcn & remote */
+ uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */
+ uint32 cnt_role_am_dw; /* anchor master role due to dw */
+ uint32 cnt_am_hop_err; /* wrong hopcount set for AM */
+} wl_nan_stats_t;
+
+#define WL_NAN_MAC_MAX_NAN_PEERS 6
+#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER 10
+
+typedef struct wl_nan_nbr_rssi {
+ uint8 rx_chan; /* channel number on which bcn rcvd */
+ uint8 PAD[3];
+ int32 rssi_raw; /* received rssi value */
+ int32 rssi_avg; /* normalized rssi value */
+} wl_nan_peer_rssi_t;
+
+typedef struct wl_nan_peer_rssi_entry {
+ struct ether_addr mac; /* peer mac address */
+ uint8 flags; /* TODO:rssi data order: latest first, oldest first etc */
+ uint8 rssi_cnt; /* rssi data sample present */
+ wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */
+} wl_nan_peer_rssi_entry_t;
+
+#define WL_NAN_PEER_RSSI 0x1
+#define WL_NAN_PEER_RSSI_LIST 0x2
+
+typedef struct wl_nan_nbr_rssi_data {
+ uint8 flags; /* this is a list or single rssi data */
+ uint8 peer_cnt; /* number of peers */
+ uint16 pad; /* padding */
+ wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */
+} wl_nan_peer_rssi_data_t;
+
+/* WL_NAN_CMD_DBG_DUMP, GET Resp */
+typedef struct wl_nan_dbg_dump_rsp {
+ wl_nan_dbg_dump_type_t dump_type; /* dump data type */
+ uint8 pad[3];
+ union {
+ wl_nan_peer_rssi_data_t peer_rssi;
+ wl_nan_stats_t nan_stats;
+ } u;
+} wl_nan_dbg_dump_rsp_t;
+
+enum nan_termination_status {
+ NAN_TERM_REASON_INVALID = 1,
+ NAN_TERM_REASON_TIMEOUT = 2,
+ NAN_TERM_REASON_USER_REQ = 3,
+ NAN_TERM_REASON_FAILURE = 4,
+ NAN_TERM_REASON_COUNT_REACHED = 5,
+ NAN_TERM_REASON_DE_SHUTDOWN = 6,
+ NAN_TERM_REASON_DISABLE_IN_PROGRESS = 7
+};
+
+/* nan2 data iovar */
+/* nan2 qos */
+typedef struct wl_nan_dp_qos
+{
+ uint8 tid;
+ uint8 pad;
+ uint16 pkt_size;
+ uint16 mean_rate;
+ uint16 svc_interval;
+} wl_nan_dp_qos_t;
+
+#define WL_NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF
+
+/* nan2 qos */
+typedef struct wl_nan_ndl_qos
+{
+ uint8 min_slots; /* min slots per dw interval */
+ uint8 pad;
+ uint16 max_latency; /* max latency */
+} wl_nan_ndl_qos_t;
+
+/* ndp config */
+typedef struct wl_nan_ndp_config
+{
+ uint8 ndp_id;
+ uint8 pub_id;
+ struct ether_addr pub_addr;
+ struct ether_addr data_addr; /* configure local data addr */
+ struct ether_addr init_data_addr; /* initiator data addr */
+ uint8 svc_spec_info[WL_NAN_DATA_SVC_SPEC_INFO_LEN];
+ wl_nan_dp_qos_t qos;
+ uint16 avail_len;
+ uint8 pad[3];
+ uint8 data[1];
+} wl_nan_ndp_config_t;
+
+/* nan2 device capabilities */
+typedef struct wl_nan_ndp_oper_cfg {
+ uint8 awake_dw_2g;
+ uint8 awake_dw_5g;
+ uint8 bands_supported;
+ uint8 op_mode;
+} wl_nan_ndp_oper_cfg_t;
+
+typedef uint8 wl_nan_ndp_ndpid_t;
+typedef uint8 wl_nan_ndp_conn_t;
+
+#define WL_NAN_INVALID_NDPID 0 /* reserved ndp id */
+
+typedef struct wl_nan_dp_req {
+ uint8 type; /* 0- unicast 1 - multicast */
+ uint8 pub_id; /* Publisher ID */
+ uint16 flags;
+ struct ether_addr peer_mac; /* Peer's NMI addr */
+ struct ether_addr mcast_mac; /* Multicast addr */
+ struct ether_addr ndi;
+ wl_nan_dp_qos_t qos;
+ wl_nan_ndl_qos_t ndl_qos; /* ndl qos */
+ uint8 tlv_params[]; /* xtlv parameters for command */
+} wl_nan_dp_req_t;
+
+/* TODO Need to replace ndp_id with lndp_id */
+/* Return structure to data req IOVAR */
+typedef struct wl_nan_dp_req_ret {
+ struct ether_addr indi; /* Initiators data mac addr */
+ uint8 ndp_id; /* Initiators ndpid */
+ uint8 pad;
+} wl_nan_dp_req_ret_t;
+
+typedef struct wl_nan_dp_resp {
+ uint8 type; /* 0- unicast 1 - multicast */
+ uint8 status; /* Accepted or Rejected */
+ uint8 reason_code;
+ /* Local NDP ID for unicast, mc_id for multicast, 0 for implicit NMSG */
+ uint8 ndp_id; /* can be host indp id also */
+ wl_nan_dp_qos_t qos;
+ /* Initiator data address for unicast or multicast address for multicast */
+ struct ether_addr mac_addr;
+ struct ether_addr ndi;
+ uint16 flags;
+ wl_nan_ndl_qos_t ndl_qos; /* ndl qos */
+ uint8 tlv_params[]; /* xtlv parameters for command */
+} wl_nan_dp_resp_t;
+
+/* Return structure to data resp IOVAR */
+typedef struct wl_nan_dp_resp_ret {
+ uint8 nmsgid; /* NMSG ID or for multicast else 0 */
+ uint8 pad[3];
+} wl_nan_dp_resp_ret_t;
+
+typedef struct wl_nan_dp_conf {
+ uint8 lndp_id; /* can be host ndp id */
+ uint8 status; /* Accepted or Rejected */
+ uint8 pad[2];
+} wl_nan_dp_conf_t;
+
+/* WL_NAN_CMD_DATA_DATAEND */
+typedef struct wl_nan_dp_end
+{
+ uint8 lndp_id; /* can be host ndp id */
+ uint8 status;
+ struct ether_addr mac_addr; /* initiator's ndi */
+} wl_nan_dp_end_t;
+
+/* wl_nan_dp_end_v2_t flags */
+#define WL_NAN_DP_END_V2_FLAG_NO_TX 0x0001u
+#define WL_NAN_DP_END_V2_FLAG_NO_RETRY 0x0002u
+
+/* WL_NAN_CMD_DATA_DATAEND_V2 */
+typedef struct wl_nan_dp_end_v2
+{
+ uint8 ndp_id; /* initiator's NDP ID or local NDP ID */
+ uint8 status;
+ struct ether_addr indi; /* initiator's ndi */
+ uint16 flags; /* flags to enable/disable retry, etc. */
+ uint16 opt_len; /* total length of optional tlvs */
+ uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */
+} wl_nan_dp_end_v2_t;
+
+typedef struct wl_nan_dp_schedupd {
+ uint8 type; /* 0: unicast, 1: multicast */
+ uint8 flags;
+ struct ether_addr addr; /* peer NMI or multicast addr */
+ wl_nan_dp_qos_t qos;
+ wl_nan_ndl_qos_t ndl_qos; /* ndl qos */
+ uint8 map_id;
+ uint8 pad;
+ uint16 hostseq;
+} wl_nan_dp_schedupd_t;
+
+/* set: update with notification, unset: NDL setup handshake */
+#define WL_NAN_DP_SCHEDUPD_NOTIF (1 << 0)
+
+/* list ndp ids */
+typedef struct wl_nan_ndp_id_list {
+ uint16 ndp_count;
+ uint8 lndp_id[];
+} wl_nan_ndp_id_list_t;
+
+/* nan2 status */
+typedef struct ndp_session {
+ uint8 lndp_id;
+ uint8 state;
+ uint8 pub_id;
+ uint8 pad;
+} ndp_session_t;
+
+typedef struct wl_nan_ndp_status {
+ struct ether_addr peer_nmi;
+ struct ether_addr peer_ndi;
+ ndp_session_t session;
+ struct ether_addr lndi;
+ uint8 pad[2];
+} wl_nan_ndp_status_t;
+
+#define NAN_DP_OPAQUE_INFO_DP_RESP 0x01
+#define NAN_DP_OPAQUE_INFO_DP_CONF 0x02
+
+typedef struct wl_nan_dp_opaque_info {
+ uint8 frm_mask; /* dp_resp / dp_conf as defined above. */
+ struct ether_addr initiator_ndi; /* NDI to match in the dp_req. */
+ uint8 pub_id; /* publish id where the opaque data is included. */
+ uint8 len; /* len of opaque_info[]. */
+ uint8 pad[3];
+ uint8 opaque_info[0];
+} wl_nan_dp_opaque_info_t;
+
+/* events */
+#define NAN_DP_SESSION_UNICAST 0
+#define NAN_DP_SESSION_MULTICAST 1
+#define NAN_DP_SECURITY_NONE 0
+#define NAN_DP_SECURITY_CSID 1
+#define NAN_DP_SECURITY_MK 2
+#define WL_NAN_DATA_NMSGID_LEN 8 /* 8 bytes as per nan spec */
+
+/* DP TERM event causes */
+#define WL_NAN_DP_TERM_WITH_INACTIVITY 1u
+#define WL_NAN_DP_TERM_WITH_FSM_DESTROY 2u
+#define WL_NAN_DP_TERM_WITH_PEER_DP_END 3u
+#define WL_NAN_DP_TERM_WITH_STALE_NDP 4u
+#define WL_NAN_DP_TERM_WITH_DISABLE 5u
+#define WL_NAN_DP_TERM_WITH_NDI_DEL 6u
+#define WL_NAN_DP_TERM_WITH_PEER_HB_FAIL 7u
+#define WL_NAN_DP_TERM_WITH_HOST_IOVAR 8u
+#define WL_NAN_DP_TERM_WITH_ESTB_FAIL 9u
+#define WL_NAN_DP_TERM_WITH_SCHED_REJECT 10u
+
+/* Common event structure for Nan Datapath
+ * Used for sending NDP Indication, Response, Confirmation, Securty Install and Establish events
+ */
+typedef struct wl_nan_ev_datapath_cmn {
+ uint8 type;
+ /* ndp_id is valid only if type is unicast */
+ uint8 ndp_id;
+ uint8 pub_id;
+ uint8 security;
+ /* Following two fields are valid only if type is unicast */
+ struct ether_addr initiator_ndi;
+ struct ether_addr responder_ndi;
+ struct ether_addr peer_nmi;
+ uint8 status;
+ uint8 role;
+ /* Following two fields are valid only if type is multicast */
+ uint8 nmsg_id[WL_NAN_DATA_NMSGID_LEN];
+ uint8 mc_id;
+ uint8 event_cause;
+ uint16 opt_tlv_len;
+ uint8 opt_tlvs[];
+} wl_nan_ev_datapath_cmn_t;
+
+/* this is obsolete - DON'T USE */
+typedef struct wl_nan_ev_datapath_end {
+ uint8 ndp_id;
+ uint8 status;
+ uint8 pad[2];
+ struct ether_addr peer_nmi;
+ struct ether_addr peer_ndi;
+} wl_nan_ev_datapath_end_t;
+
+typedef struct wl_tsf {
+ uint32 tsf_l;
+ uint32 tsf_h;
+} wl_tsf_t;
+
+typedef struct wl_nan_ev_rx_bcn {
+ wl_tsf_t tsf;
+ uint16 bcn_len;
+ uint8 pad[2];
+ uint8 bcn[0];
+} wl_nan_ev_rx_bcn_t;
+
+/* reason of host assist request */
+enum wl_nan_host_assist_reason {
+ WL_NAN_HAST_REASON_NONE = 0,
+
+ /* reason for host assist request */
+ WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */
+ WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */
+ WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */
+ WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */
+ WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */
+ WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6, /* SD: SVC NDI missing */
+ WL_NAN_HAST_REASON_PEER_SCB_NORESOURCE = 7, /* NDP: no more peer scb available */
+ WL_NAN_HAST_REASON_NDP_PMK_MISSING = 8, /* NDP: PMK needed from host */
+ WL_NAN_HAST_REASON_SVC_NDI_AND_PMK_MISSING = 9 /* PMK and SVC NDI needed from host */
+};
+typedef uint8 wl_nan_host_assist_reason_t;
+
+/* WL_NAN_XTLV_HOST_ASSIST_REQ */
+typedef struct wl_nan_host_assist_req {
+ struct ether_addr peer_nmi; /* peer nmi */
+ struct ether_addr initiator_ndi; /* initiator ndi */
+ uint8 indp_id; /* initiator NDP ID */
+ wl_nan_frame_type_t frm_type; /* received NAF type */
+ wl_nan_host_assist_reason_t reason; /* reason of host assist request */
+ uint8 pub_id; /* Publish ID (valid for WL_NAN_FRM_TYPE_DP_REQ) */
+ uint8 pad[2];
+} wl_nan_host_assist_req_t;
+
+/* nan sub-features */
+enum wl_nan_fw_cap_flag1 {
+ WL_NAN_FW_CAP_FLAG_NONE = 0x00000000, /* dummy */
+ WL_NAN_FW_CAP_FLAG1_AVAIL = 0x00000001,
+ WL_NAN_FW_CAP_FLAG1_DISC = 0x00000002,
+ WL_NAN_FW_CAP_FLAG1_DATA = 0x00000004,
+ WL_NAN_FW_CAP_FLAG1_SEC = 0x00000008,
+ WL_NAN_FW_CAP_FLAG1_RANGE = 0x00000010,
+ WL_NAN_FW_CAP_FLAG1_WFA_TB = 0x00000020,
+ WL_NAN_FW_CAP_FLAG1_DAM = 0x00000040,
+ WL_NAN_FW_CAP_FLAG1_DAM_STRICT = 0x00000080,
+ WL_NAN_FW_CAP_FLAG1_DAM_AUTO = 0x00000100,
+ WL_NAN_FW_CAP_FLAG1_DBG = 0x00000200,
+ WL_NAN_FW_CAP_FLAG1_BCMC_IN_NDC = 0x00000400,
+ WL_NAN_FW_CAP_FLAG1_CHSTATS = 0x00000800,
+ WL_NAN_FW_CAP_FLAG1_ASSOC_COEX = 0x00001000,
+ WL_NAN_FW_CAP_FLAG1_FASTDISC = 0x00002000,
+ WL_NAN_FW_CAP_FLAG1_NO_ID_GEN = 0x00004000,
+ WL_NAN_FW_CAP_FLAG1_DP_OPAQUE_DATA = 0x00008000,
+ WL_NAN_FW_CAP_FLAG1_NSR2 = 0x00010000,
+ WL_NAN_FW_CAP_FLAG1_NSR2_SAVE = 0x00020000,
+ WL_NAN_FW_CAP_FLAG1_NANHO = 0x00040000,
+ WL_NAN_FW_CAP_FLAG1_NDPE = 0x00080000,
+ WL_NAN_FW_CAP_FLAG1_OOB_AF = 0x00100000,
+ WL_NAN_FW_CAP_FLAG1_PMK_PER_NDP = 0x00200000
+};
+
+/* WL_NAN_XTLV_GEN_FW_CAP */
+typedef struct wl_nan_fw_cap {
+ uint32 flags1; /* nan sub-features compiled in firmware */
+ uint32 flags2; /* for more sub-features in future */
+ uint8 max_svc_publishes; /* max num of service publish */
+ uint8 max_svc_subscribes; /* max num of service subscribe */
+ uint8 max_lcl_sched_maps; /* max num of local schedule map */
+ uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
+ uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
+ uint8 max_peer_entries; /* max num of peer entry */
+ uint8 max_ndp_sessions; /* max num of NDP session */
+ uint8 max_concurrent_nan_clusters; /* max num of concurrent clusters */
+ uint16 max_service_name_len; /* max service name length */
+ uint16 max_match_filter_len; /* max match filter length */
+ uint16 max_total_match_filter_len; /* max total match filter length */
+ uint16 max_service_specific_info_len; /* max service specific info length */
+ uint16 max_vsa_data_len; /* max vendor specific attrib data length */
+ uint16 max_mesh_data_len; /* max mesh data length */
+ uint16 max_app_info_len; /* max app info length */
+ uint16 max_sdea_svc_specific_info_len; /* max sdea ser specific info length */
+ uint8 max_queued_tx_followup_msgs; /* max no. of queued tx followup msgs */
+ uint8 max_subscribe_address; /* max subscribe addresses supported */
+ uint8 ndp_supported_bands; /* number of ndp supported bands */
+ uint8 is_ndp_security_supported; /* if secure ndp is supported */
+ uint8 cipher_suites_supported_mask; /* bitmask for suites supported */
+ uint8 pad[3];
+} wl_nan_fw_cap_t;
+
+/* WL_NAN_XTLV_GEN_FW_CAP_V2 */
+typedef struct wl_nan_fw_cap_v2 {
+ uint32 flags1; /* nan sub-features compiled in firmware */
+ uint32 flags2; /* for more sub-features in future */
+ uint8 max_svc_publishes; /* max num of service publish */
+ uint8 max_svc_subscribes; /* max num of service subscribe */
+ uint8 max_lcl_sched_maps; /* max num of local schedule map */
+ uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
+ uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
+ uint8 max_peer_entries; /* max num of peer entry */
+ uint8 max_peer_sched_maps; /* max num of peer schedule maps */
+ uint8 max_ndp_sessions; /* max num of NDP session */
+ uint32 cipher_suites_supported_mask; /* bitmask for supported cipher suites */
+ uint32 reserved_uint32_1; /* reserved for future sub-features */
+ uint32 reserved_uint32_2; /* reserved for future sub-features */
+ uint32 reserved_uint32_3; /* reserved for future sub-features */
+ uint32 reserved_uint32_4; /* reserved for future sub-features */
+} wl_nan_fw_cap_v2_t;
+
+/* nan cipher suite support mask bits */
+#define WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK 0x01
+#define WL_NAN_CIPHER_SUITE_SHARED_KEY_256_MASK 0x02
+
+/* NAN Save Restore */
+#define WL_NAN_NSR2_INFO_MAX_SIZE 2048 /* arbitrary */
+
+/* WL_NAN_XTLV_NSR2_PEER */
+typedef struct wl_nan_nsr_peer_info {
+ struct ether_addr nmi;
+ uint8 l_min_slots; /* local QoS min slots */
+ uint8 p_min_slots; /* peer QoS min slots */
+ uint16 l_max_latency; /* local QoS max latency */
+ uint16 p_max_latency; /* peer QoS max latency */
+ uint8 num_map; /* num of NA map */
+ uint8 pad;
+ uint16 attrs_len; /* total len of following attrs */
+ uint8 attrs[]; /* peer attributes (NA/NDC/ULW/DevCap/Element container) */
+} wl_nan_nsr_peer_info_t;
+
+enum wl_nan_nsr_ndp_flag {
+ WL_NAN_NSR_NDP_FLAG_LCL_INITATOR = 0x0001,
+ WL_NAN_NSR_NDP_FLAG_MCAST = 0x0002
+};
+typedef uint16 wl_nan_nsr_ndp_flag_t;
+
+/* WL_NAN_XTLV_NSR2_NDP */
+typedef struct wl_nan_nsr_ndp_info {
+ struct ether_addr peer_nmi;
+ struct ether_addr peer_ndi;
+ struct ether_addr lcl_ndi;
+ uint16 flags; /* wl_nan_nsr_ndp_flag_t */
+ uint8 pub_id; /* publish id */
+ uint8 indp_id; /* initiator's ndp id */
+ uint8 last_token; /* last NDP dialog token */
+ uint8 pad;
+} wl_nan_nsr_ndp_info_t;
+
+/* NAN2.0 Ranging definitions */
+
+/* result indication bit map */
+#define NAN_RANGE_INDICATION_NONE 0
+#define NAN_RANGE_INDICATION_CONT (1<<0)
+#define NAN_RANGE_INDICATION_INGRESS (1<<1)
+#define NAN_RANGE_INDICATION_EGRESS (1<<2)
+
+/* responder flags */
+#define NAN_RANGE_FLAG_AUTO_ACCEPT (1 << 0)
+#define NAN_RANGE_FLAG_RESULT_REQUIRED (1 << 1)
+
+typedef struct wl_nan_range_req {
+ struct ether_addr peer;
+ uint8 publisher_id;
+ uint8 indication; /* bit map for result event */
+ uint32 resolution; /* default millimeters */
+ uint32 ingress; /* ingress limit in mm */
+ uint32 egress; /* egress limit in mm */
+ uint32 interval; /* max interval(in TU) b/w two ranging measurements */
+} wl_nan_range_req_t;
+
+#define NAN_RNG_REQ_IOV_LEN 24
+
+typedef uint8 wl_nan_range_id;
+
+typedef struct wl_nan_range_resp {
+ wl_nan_range_id range_id;
+ uint8 flags; /* auto response, range result required */
+ uint8 status; /* accept, reject */
+ uint8 indication; /* bit map for result event */
+ uint32 resolution; /* default millimeters */
+ uint32 ingress; /* ingress limit in mm */
+ uint32 egress; /* egress limit in mm */
+ uint32 interval; /* max interval(in TU) b/w two ranging measurements */
+} wl_nan_range_resp_t;
+
+#define NAN_RNG_RESP_IOV_LEN 20
+
+#define NAN_RNG_TERM_FLAG_IMMEDIATE (1u << 0u) /* Do not wait for TXS */
+#define NAN_RNG_TERM_FLAG_SILENT_TEARDOWN (1u << 1u) /* Do not TX rng_term */
+#define NAN_RNG_TERM_FLAG_EVENT_HOST (1u << 2u) /* Notify event to host */
+#define NAN_RNG_TERM_FLAG_OPT_TLVS (1u << 3u) /* opt tlvs present */
+
+typedef struct wl_nan_range_cancel_ext {
+ wl_nan_range_id range_id;
+ uint8 flags;
+ uint8 pad[2];
+} wl_nan_range_cancel_ext_t;
+
+#define NAN_RNG_CANCEL_IOV_FIXED_LEN 4u
+
+#define NAN_RNG_MAX_IOV_LEN 255
+
+typedef struct wl_nan_ev_rng_req_ind {
+ struct ether_addr peer_m_addr;
+ uint8 rng_id;
+ /* ftm parameters */
+ uint8 max_burst_dur;
+ uint8 min_ftm_delta;
+ uint8 max_num_ftm;
+ uint8 ftm_format_bw;
+ /* location info availability bit map */
+ uint8 lc_info_avail;
+ /* Last movement indication */
+ uint16 last_movement;
+ uint8 pad[2];
+} wl_nan_ev_rng_req_ind_t;
+
+#define NAN_RNG_REQ_IND_SIZE 14
+
+typedef struct wl_nan_ev_rng_rpt_ind {
+ uint32 dist_mm; /* in millimeter */
+ struct ether_addr peer_m_addr;
+ uint8 indication; /* indication definitions mentioned above */
+ uint8 rng_id;
+} wl_nan_ev_rng_rpt_ind_t;
+
+#define NAN_RNG_RPT_IND_SIZE 12
+
+/* number of continuous ranging crbs which can be idle,
+* after which ranging session will be terminated.
+* Default value is 5. Set to zero for disabling the
+* idle timeout functionality
+*/
+typedef uint8 wl_nan_range_idle_count_t;
+
+/* nan ranging termination reason codes */
+#define NAN_RNG_TERM_REASON_CODE_BASE 100u
+#define NAN_RNG_TERM_REASON_CODE(reason_code) \
+ (NAN_RNG_TERM_REASON_CODE_BASE + (reason_code))
+
+/* Reason Code Unspecified */
+#define NAN_RNG_TERM_UNSPECIFIED NAN_RNG_TERM_REASON_CODE(0u)
+/* no ftms from peer */
+#define NAN_RNG_TERM_IDLE_TIMEOUT NAN_RNG_TERM_REASON_CODE(1u)
+/* On Peer Termination */
+#define NAN_RNG_TERM_PEER_REQ NAN_RNG_TERM_REASON_CODE(2u)
+/* On User or Host Termination */
+#define NAN_RNG_TERM_USER_REQ NAN_RNG_TERM_REASON_CODE(3u)
+/* On FSM Timeout, waiting for Resp from peer */
+#define NAN_RNG_TERM_RNG_RESP_TIMEOUT NAN_RNG_TERM_REASON_CODE(4u)
+/* On range resp, reject from peer */
+#define NAN_RNG_TERM_RNG_RESP_REJ NAN_RNG_TERM_REASON_CODE(5u)
+/* On range req/resp txs fail */
+#define NAN_RNG_TERM_RNG_TXS_FAIL NAN_RNG_TERM_REASON_CODE(6u)
+
+typedef struct wl_nan_ev_rng_term_ind {
+ struct ether_addr peer_m_addr;
+ uint8 reason_code;
+ uint8 rng_id;
+} wl_nan_ev_rng_term_ind_t;
+
+#define NAN_RNG_TERM_IND_SIZE 8
+
+typedef struct wl_nan_ev_rng_resp {
+ struct ether_addr peer_m_addr;
+ uint8 status;
+ uint8 rng_id;
+} wl_nan_ev_rng_resp_t;
+
+/* Used by NDL schedule events -
+ * WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF, WL_NAN_EVENT_PEER_SCHED_REQ
+ * WL_NAN_EVENT_PEER_SCHED_RESP, WL_NAN_EVENT_PEER_SCHED_CONF
+ */
+typedef struct wl_nan_ev_sched_info {
+ struct ether_addr peer_nmi;
+ uint8 ndl_status; /* applies only to sched resp/conf */
+ uint8 pad;
+ uint16 opt_tlv_len;
+ uint8 opt_tlvs[];
+} wl_nan_ev_sched_info_t;
+
+/* WL_NAN_EVENT_CHAN_BOUNDARY */
+typedef struct wl_nan_chbound_info {
+ uint32 cluster_tsf_h; /* Current Cluster TSF High */
+ uint32 cluster_tsf_l; /* Current Cluster TSF Low */
+ uint16 cur_chspec;
+ uint16 opt_tlvs_len;
+ uint8 opt_tlvs[];
+} wl_nan_chbound_info_t;
+
+/* channel stats (includes nan & non-nan) */
+
+/* WL_NAN_XTLV_CCA_STATS */
+typedef struct wl_nan_cca_stats {
+ uint16 chanspec;
+ uint8 pad[2];
+ uint32 sample_dur;
+
+ uint32 congest_ibss;
+ uint32 congest_obss;
+ uint32 interference;
+} wl_nan_cca_stats_t;
+
+/* WL_NAN_XTLV_PER_STATS */
+typedef struct wl_nan_per_stats_s {
+ uint16 chanspec;
+ uint8 pad[2];
+ uint32 sample_dur;
+
+ uint32 txframe; /* tx data frames */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txerror; /* tx data errors */
+ uint32 txctl; /* tx management frames */
+ uint32 txserr; /* tx status errors */
+
+ uint32 rxframe; /* rx data frames */
+ uint32 rxerror; /* rx data errors */
+ uint32 rxctl; /* rx management frames */
+
+ uint32 txbar; /* tx bar */
+ uint32 rxbar; /* rx bar */
+ uint32 txaction; /* tx action frame */
+ uint32 rxaction; /* rx action frame */
+ uint32 txlost; /* lost packets reported in txs */
+ uint32 rxback; /* rx block ack */
+ uint32 txback; /* tx bloak ack */
+} wl_nan_per_stats_t;
+
+/* fast discovery beacon config
+ * WL_NAN_XTLV_CFG_FDISC_TBMP
+*/
+typedef struct wl_nan_fastdisc_s {
+ uint8 id;
+ uint8 bitmap_len;
+ uint8 pad[2];
+ uint8 bitmap[];
+} wl_nan_fastdisc_t;
+
+#define WL_NAN_FASTDISC_CFG_SIZE 1024 /* arbitrary */
+
+#ifdef WL_NANHO
+/* ****************** NAN Host offload specific strucures ****************** */
+
+enum wl_nan_rx_mgmt_frm_type {
+ WL_NAN_RX_MGMT_FRM_BCN = 0,
+ WL_NAN_RX_MGMT_FRM_SDF = 1,
+ WL_NAN_RX_MGMT_FRM_NAF = 2
+};
+typedef uint8 wl_nan_rx_mgmt_frm_type_t;
+
+/* WL_NAN_EVENT_RX_MGMT_FRM */
+typedef struct wl_nan_event_rx_mgmt_frm {
+ uint8 frm_type; /* wl_nan_rx_mgmt_frm_type_t */
+ uint8 pad;
+ uint16 frm_len;
+ uint8 frm[];
+} wl_nan_event_rx_mgmt_frm_t;
+
+#define WL_NAN_NANHO_UPDATE_MAX_SIZE 2048 /* arbitrary */
+
+enum wl_nan_peer_entry_action {
+ WL_NAN_PEER_ENTRY_ACT_ADD = 0, /* add peer entry */
+ WL_NAN_PEER_ENTRY_ACT_REMOVE = 1 /* remove peer entry */
+};
+typedef uint8 wl_nan_peer_entry_action_t;
+
+/* WL_NAN_XTLV_NANHO_PEER_ENTRY */
+typedef struct wl_nan_peer_entry
+{
+ struct ether_addr nmi; /* nmi of peer device */
+ uint8 action; /* wl_nan_peer_entry_action_t */
+ uint8 pad;
+} wl_nan_peer_entry_t;
+
+enum wl_nan_dcaplist_action {
+ WL_NAN_DCAPLIST_ACT_UPDATE = 0, /* update or add */
+ WL_NAN_DCAPLIST_ACT_REMOVE = 1 /* remove (only for peer dcap cache entry) */
+};
+typedef uint8 wl_nan_dcaplist_action_t;
+
+/* WL_NAN_XTLV_NANHO_DCAPLIST */
+typedef struct wl_nan_dev_cap_list
+{
+ struct ether_addr nmi; /* null for local device */
+ uint8 action; /* wl_nan_dcaplist_action_t */
+ /* optional fields for WL_NAN_DCAPLIST_ACT_UPDATE */
+ uint8 num_maps;
+ uint8 dcap[]; /* list of nan_dev_cap_t */
+} wl_nan_dev_cap_list_t;
+
+typedef struct wl_nan_dev_chan_sched {
+ uint16 num_slots; /* number of slot in schedule */
+ uint16 period; /* period of channel schedule (TU) */
+ uint8 slot_dur; /* slot duration (TU) */
+ uint8 map_id; /* map id (TBD) */
+ uint8 pad[2];
+ uint8 data[];
+ /* chanspec_t chan_sched[num_slot] */
+ /* uint8 slot_info[num_slot] */
+} wl_nan_dev_chan_sched_t;
+
+/* WL_NAN_XTLV_NANHO_DCSLIST */
+typedef struct wl_nan_dev_chan_sched_list {
+ struct ether_addr nmi; /* null for local device */
+ uint8 num_maps;
+ uint8 pad;
+ wl_nan_dev_chan_sched_t dcs[];
+} wl_nan_dev_chan_sched_list_t;
+
+/* WL_NAN_XTLV_NANHO_BLOB */
+typedef struct wl_nan_dev_blob {
+ struct ether_addr nmi; /* null for local device */
+ uint16 blob_len; /* blob len in blob[] buffer */
+ uint8 blob_type;
+ uint8 pad[3];
+ uint8 blob[];
+} wl_nan_dev_blob_t;
+
+typedef struct wl_nan_peer_ndl_state {
+ struct ether_addr nmi;
+ uint8 ndl_state; /* nan_peer_ndl_state_t */
+ uint8 pad;
+} wl_nan_peer_ndl_state_t;
+
+enum wl_nan_ndp_state_action {
+ WL_NAN_NDP_STATE_ACT_ESTABLISHED = 0,
+ WL_NAN_NDP_STATE_ACT_TERMINATED = 1
+};
+typedef uint8 wl_nan_ndp_state_action_t;
+
+/* WL_NAN_XTLV_NANHO_NDP_STATE */
+typedef struct wl_nan_ndp_state {
+ struct ether_addr peer_nmi;
+ struct ether_addr peer_ndi;
+ struct ether_addr lcl_ndi;
+ uint8 action; /* wl_nan_ndp_state_action_t */
+ uint8 pad;
+ /* TODO: secured NDP information */
+} wl_nan_ndp_state_t;
+
+/* *************** end of NAN Host offload specific strucures ************** */
+#endif /* WL_NANHO */
+
+/* ********************* end of NAN section ******************************** */
+/* endif WL_NAN */
+
+#define P2P_NAN_IOC_BUFSZ 512 /* some sufficient ioc buff size */
+#define WL_P2P_NAN_IOCTL_VERSION 0x1
+
+/* container for p2p nan iovtls & events */
+typedef struct wl_p2p_nan_ioc {
+ uint16 version; /* interface command or event version */
+ uint16 id; /* p2p nan ioctl cmd ID */
+ uint16 len; /* total length of data[] */
+ uint16 pad; /* padding */
+ uint8 data []; /* var len payload of bcm_xtlv_t type */
+} wl_p2p_nan_ioc_t;
+
+/* p2p nan cmd IDs */
+enum wl_p2p_nan_cmds {
+ /* p2p nan cfg ioctls */
+ WL_P2P_NAN_CMD_ENABLE = 1,
+ WL_P2P_NAN_CMD_CONFIG = 2,
+ WL_P2P_NAN_CMD_DEL_CONFIG = 3,
+ WL_P2P_NAN_CMD_GET_INSTS = 4
+};
+
+#define WL_P2P_NAN_CONFIG_VERSION 1
+
+#define WL_P2P_NAN_DEVICE_P2P 0x0
+#define WL_P2P_NAN_DEVICE_GO 0x1
+#define WL_P2P_NAN_DEVICE_GC 0x2
+#define WL_P2P_NAN_DEVICE_INVAL 0xFF
+
+/* NAN P2P operation */
+typedef struct p2p_nan_config {
+ uint16 version; /* wl_p2p_nan_config_t structure version */
+ uint16 len; /* total length including version and variable IE */
+ uint32 flags; /* 0x1 to NEW, 0x2 to ADD, 0x4 to DEL */
+ uint8 inst_id; /* publisher/subscriber id */
+ uint8 inst_type; /* publisher/subscriber */
+ uint8 dev_role; /* P2P device role: 'P2P','GO' or 'GC' */
+ uint8 pad1; /* padding */
+ uint8 resolution; /* Availability bitmap resolution */
+ uint8 repeat; /* Whether Availabilty repeat across DW */
+ uint16 ie_len; /* variable ie len */
+ struct ether_addr dev_mac; /* P2P device addres */
+ uint16 pad2; /* Padding */
+ uint32 avail_bmap; /* availability interval bitmap */
+ uint32 chanspec; /* Chanspec */
+ uint8 ie[]; /* hex ie data */
+} wl_p2p_nan_config_t;
+
+#define WL_P2P_NAN_SERVICE_LIST_VERSION 1
+typedef enum wl_nan_service_type {
+ WL_NAN_SVC_INST_PUBLISHER = 1,
+ WL_NAN_SVC_INST_SUBSCRIBER = 2
+} wl_nan_service_type_t;
+
+#define WL_P2P_NAN_CONFIG_NEW 0x1
+#define WL_P2P_NAN_CONFIG_ADD 0x2
+#define WL_P2P_NAN_CONFIG_DEL 0x4
+
+typedef struct wl_nan_svc_inst {
+ uint8 inst_id; /* publisher/subscriber id */
+ uint8 inst_type; /* publisher/subscriber */
+} wl_nan_svc_inst_t;
+
+typedef struct wl_nan_svc_inst_list {
+ uint16 version; /* this structure version */
+ uint16 len; /* total length including version and variable svc list */
+ uint16 count; /* service instance count */
+ uint16 pad; /* padding */
+ wl_nan_svc_inst_t svc[1]; /* service instance list */
+} wl_nan_svc_inst_list_t;
+
+#define NAN_POST_DISC_P2P_DATA_VER 1
+/* This structure will be used send peer p2p data with
+ * NAN discovery result
+ */
+typedef struct nan_post_disc_p2p_data {
+ uint8 ver; /* this structure version */
+ uint8 dev_role; /* P2P Device role */
+ uint8 resolution; /* Availability bitmap resolution */
+ uint8 repeat; /* Whether Availabilty repeat across DW */
+ struct ether_addr dev_mac; /* P2P device addres */
+ uint16 pad1; /* Padding */
+ uint32 chanspec; /* Chanspec */
+ uint32 avl_bmp; /* availability interval bitmap */
+} nan_post_disc_p2p_data_t;
+
+enum {
+ WL_AVAIL_NONE = 0x0000,
+ WL_AVAIL_LOCAL = 0x0001,
+ WL_AVAIL_PEER = 0x0002,
+ WL_AVAIL_NDC = 0x0003,
+ WL_AVAIL_IMMUTABLE = 0x0004,
+ WL_AVAIL_RESPONSE = 0x0005,
+ WL_AVAIL_COUNTER = 0x0006,
+ WL_AVAIL_RANGING = 0x0007,
+ WL_AVAIL_UPD_POT = 0x0008, /* modify potential, keep committed/conditional */
+ WL_AVAIL_UPD_COM_COND = 0x0009, /* modify committed/conditional, keep potential */
+ WL_AVAIL_REMOVE_MAP = 0x000A, /* remove map */
+ WL_AVAIL_FRM_TYPE = 0x000B, /* specify frame types containing NA */
+ WL_AVAIL_TYPE_MAX = WL_AVAIL_FRM_TYPE /* New ones before and update */
+};
+#define WL_AVAIL_TYPE_MASK 0x000F
+#define WL_AVAIL_FLAG_REMOVE 0x2000 /* remove schedule attr of given type & map id */
+#define WL_AVAIL_FLAG_SELECTED_NDC 0x4000
+#define WL_AVAIL_FLAG_RAW_MODE 0x8000
+#define WL_AVAIL_FLAGS_MASK 0xFF00
+#define WL_AVAIL_FLAGS_SHIFT 8
+
+typedef int16 wl_avail_flags_t;
+
+/* availability entry flags */
+enum {
+ WL_AVAIL_ENTRY_NONE = 0x0000,
+ WL_AVAIL_ENTRY_COM = 0x0001, /* committed */
+ WL_AVAIL_ENTRY_POT = 0x0002, /* potential */
+ WL_AVAIL_ENTRY_COND = 0x0004, /* conditional */
+ WL_AVAIL_ENTRY_PAGED = 0x0008, /* P-NDL */
+ WL_AVAIL_ENTRY_USAGE = 0x0030, /* usage preference */
+ WL_AVAIL_ENTRY_BIT_DUR = 0x00C0, /* bit duration */
+ WL_AVAIL_ENTRY_BAND_PRESENT = 0x0100, /* band present */
+ WL_AVAIL_ENTRY_CHAN_PRESENT = 0x0200, /* channel information present */
+ WL_AVAIL_ENTRY_CHAN_ENTRY_PRESENT = 0x0400, /* channel entry (opclass+bitmap) */
+ /* free to use 0x0800 */
+ WL_AVAIL_ENTRY_RXNSS = 0xF000 /* max num of spatial stream RX */
+};
+
+/* bit duration */
+enum {
+ WL_AVAIL_BIT_DUR_16 = 0, /* 16TU */
+ WL_AVAIL_BIT_DUR_32 = 1, /* 32TU */
+ WL_AVAIL_BIT_DUR_64 = 2, /* 64TU */
+ WL_AVAIL_BIT_DUR_128 = 3, /* 128TU */
+};
+
+/* period */
+enum {
+ WL_AVAIL_PERIOD_0 = 0, /* 0TU */
+ WL_AVAIL_PERIOD_128 = 1, /* 128TU */
+ WL_AVAIL_PERIOD_256 = 2, /* 256TU */
+ WL_AVAIL_PERIOD_512 = 3, /* 512TU */
+ WL_AVAIL_PERIOD_1024 = 4, /* 1024TU */
+ WL_AVAIL_PERIOD_2048 = 5, /* 2048TU */
+ WL_AVAIL_PERIOD_4096 = 6, /* 4096TU */
+ WL_AVAIL_PERIOD_8192 = 7, /* 8192TU */
+};
+
+/* band */
+enum {
+ WL_AVAIL_BAND_NONE = 0, /* reserved */
+ WL_AVAIL_BAND_SUB1G = 1, /* sub-1 GHz */
+ WL_AVAIL_BAND_2G = 2, /* 2.4 GHz */
+ WL_AVAIL_BAND_3G = 3, /* reserved (for 3.6 GHz) */
+ WL_AVAIL_BAND_5G = 4, /* 4.9 and 5 GHz */
+ WL_AVAIL_BAND_60G = 5, /* reserved (for 60 GHz) */
+};
+
+#define WL_AVAIL_ENTRY_TYPE_MASK 0x000F
+#define WL_AVAIL_ENTRY_USAGE_MASK 0x0030 /* up to 4 usage preferences */
+#define WL_AVAIL_ENTRY_USAGE_SHIFT 4
+#define WL_AVAIL_ENTRY_USAGE_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_USAGE_MASK) \
+ >> WL_AVAIL_ENTRY_USAGE_SHIFT)
+
+#define WL_AVAIL_ENTRY_BIT_DUR_MASK 0x00C0 /* 0:16TU, 1:32TU, 2:64TU, 3:128TU */
+#define WL_AVAIL_ENTRY_BIT_DUR_SHIFT 6
+#define WL_AVAIL_ENTRY_BIT_DUR_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_BIT_DUR_MASK) \
+ >> WL_AVAIL_ENTRY_BIT_DUR_SHIFT)
+
+#define WL_AVAIL_ENTRY_BAND_MASK 0x0100 /* 0=band not present, 1=present */
+#define WL_AVAIL_ENTRY_BAND_SHIFT 8
+
+#define WL_AVAIL_ENTRY_CHAN_MASK 0x0200 /* 0=channel info not present, 1=present */
+#define WL_AVAIL_ENTRY_CHAN_SHIFT 9
+
+#define WL_AVAIL_ENTRY_CHAN_ENTRY_MASK 0x0400 /* 0=chanspec, 1=hex channel entry */
+#define WL_AVAIL_ENTRY_CHAN_ENTRY_SHIFT 10
+
+#define WL_AVAIL_ENTRY_RXNSS_MASK 0xF000
+#define WL_AVAIL_ENTRY_RXNSS_SHIFT 12
+#define WL_AVAIL_ENTRY_RXNSS_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_RXNSS_MASK) \
+ >> WL_AVAIL_ENTRY_RXNSS_SHIFT)
+#define WL_AVAIL_ENTRY_RXNSS_MAX 15 /* 0-15 */
+
+/* mask for channel_entry (to be obsoleted) */
+#define WL_AVAIL_ENTRY_OPCLASS_MASK 0xFF
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_MASK 0xFF00
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT 8
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_VAL(_info) (((_info) & WL_AVAIL_ENTRY_CHAN_BITMAP_MASK) \
+ >> WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT)
+
+/* Used for raw channel entry field input */
+#define MAX_CHAN_ENTRY_LEN 6
+
+typedef struct wl_avail_entry {
+ uint16 length; /* total length */
+ uint16 start_offset; /* in TUs, multiply by 16 for total offset */
+ union {
+ uint32 channel_info; /* either chanspec or hex channel entry (opclass +
+ * bitmap per NAN spec), as indicated by setting
+ * WL_AVAIL_ENTRY_HEX_CHAN_ENTRY flag
+ */
+ uint32 band; /* defined by WL_BAND enum, 2=2.4GHz, 4=5GHz */
+ uint8 channel_entry[MAX_CHAN_ENTRY_LEN];
+ uint8 align[8]; /* aligned len of union in structure (not for use)
+ * if member of union is changed,
+ * update length of align[] accordingly.
+ */
+ } u; /* band or channel value, 0=all band/channels */
+ uint8 sched_map_id; /* avail map id associated with sched entry */
+ uint8 pad;
+ uint8 period; /* in TUs, defined by WL_AVAIL_PERIOD enum
+ * 1:128, 2:256, 3:512, 4:1024, 5:2048, 6:4096,
+ * 7:8192
+ */
+ uint8 bitmap_len;
+ uint16 flags; /* defined by avail entry flags enum:
+ * type, usage pref, bit duration, rx nss,
+ * and band, channel or channel entry
+ */
+ uint8 bitmap[]; /* time bitmap */
+} wl_avail_entry_t;
+
+#define WL_AVAIL_VERSION 1 /* current wl_avail version */
+
+typedef struct wl_avail {
+ uint16 length; /* total length */
+ uint16 flags; /* LSB - avail type (defined by WL_AVAIL enum)
+ * MSB - avail flags
+ */
+ uint8 id; /* id used for multiple maps/avail */
+ uint8 lndc_id; /* ndc id used in multi-ndc case */
+ uint8 version;
+ uint8 pad;
+ struct ether_addr addr; /* peer mac address or ndc id */
+ uint8 num_entries;
+ uint8 unused_byte;
+ /* add additional fields above this line */
+ uint8 entry[];
+} wl_avail_t;
+
+#define WL_AVAIL_MIN_LEN(n) ((n) ? OFFSETOF(wl_avail_t, entry) + \
+ ((n) * OFFSETOF(wl_avail_entry_t, bitmap)) : 0)
+
+/* unaligned schedule (window) */
+typedef struct wl_avail_ulw {
+ uint8 id; /* schedule ID */
+ uint8 overwrite; /* bit 0: overwrite all
+ * 1-4: map ID if overwrite all is 0
+ */
+ uint16 flags;
+ uint32 start; /* start time of first ULW, in us */
+ uint32 dur; /* duration of ULW, in us */
+ uint32 period; /* time between consecutive ULWs, in us */
+ union {
+ uint32 chanspec;
+ uint32 band;
+ uint8 chan_entry[MAX_CHAN_ENTRY_LEN];
+ uint8 pad[8];
+ } u;
+ uint8 cntdwn; /* remaining ULWs before schedule ends */
+ uint8 pad[3];
+} wl_avail_ulw_t;
+
+/* unset: NAN is not available during ULW, set: NAN is avail depending on ctrl flags */
+#define WL_NAN_ULW_CTRL_PRESENT (1 << 0)
+/* unset: band, set: channel */
+#define WL_NAN_ULW_CTRL_TYPE (1 << 1)
+/* set: NAN is availabile on specified band/channel */
+#define WL_NAN_ULW_CTRL_AVAIL (1 << 2)
+/* channel is provided in raw attribute format */
+#define WL_NAN_ULW_CTRL_RAW_CHAN (1 << 3)
+
+/* nan wfa testmode operations */
+enum {
+ WL_NAN_WFA_TM_IGNORE_TERMINATE_NAF = 0x00000001,
+ WL_NAN_WFA_TM_IGNORE_RX_DATA_OUTSIDE_CRB = 0x00000002,
+ WL_NAN_WFA_TM_ALLOW_TX_DATA_OUTSIDE_CRB = 0x00000004,
+ WL_NAN_WFA_TM_ENFORCE_NDL_COUNTER = 0x00000008,
+ WL_NAN_WFA_TM_BYPASS_NDL_PROPOSAL_VALIDATION = 0x00000010,
+ /* allow data(pings) tx while ndp sec negotiation */
+ WL_NAN_WFA_TM_SEC_SEND_PINGS_BYPASS_NDP_SM = 0x00000020,
+ /* generate and insert incorrect mic */
+ WL_NAN_WFA_TM_SEC_INCORRECT_MIC = 0x00000040,
+ /* send m4 reject deliberately */
+ WL_NAN_WFA_TM_SEC_REJECT_STATUS4M4 = 0x00000080,
+ /* send mgmt frame (for eg. ndp terminate) in clear txt (bypass security) */
+ WL_NAN_WFA_TM_SEC_SEND_MGMT_CLEAR = 0x00000100,
+ /* validate qos */
+ WL_NAN_WFA_TM_NDL_QOS_VALIDATE = 0x00000200,
+ /* firmware generated schedule update */
+ WL_NAN_WFA_TM_GEN_SCHED_UPD = 0x00000400,
+ /* add lower 4-bytes of TSF to configured start time */
+ WL_NAN_WFA_TM_ULW_START_TIME = 0x00000800,
+ /* enable schedule validation for SDF */
+ WL_NAN_WFA_TM_SDF_SCHED_VALIDATE = 0x00001000,
+ /* by pass faw na iovar */
+ WL_NAN_WFA_TM_SKIP_RAW_NA_BLOB = 0x00002000,
+ /* overwrite local NA with peer NA in received frame */
+ WL_NAN_WFA_TM_LOCAL_NA_OVERWRITE = 0x00004000,
+ /* randomize and self configure ndl qos(needed at responder in auto mode) */
+ WL_NAN_WFA_TM_SELF_CFG_NDL_QOS = 0x00008000,
+ /* send NAF frames only in DW */
+ WL_NAN_WFA_TM_SEND_NAF_IN_DW = 0x00010000,
+ /* restrict channels used for countered slots to Ch 6/149 only */
+ WL_NAN_WFA_TM_RESTRICT_COUNTER_SLOTS_CHAN = 0x00020000,
+ /* NDPE negative test case (4.2.5 & 4.2.6) */
+ WL_NAN_WFA_TM_NDPE_NEGATIVE_TEST_TB = 0x00040000,
+ /* Set NDPE(NAN3.0) capable bit in dev cap attr */
+ WL_NAN_WFA_TM_ENABLE_NDPE_CAP = 0x00080000,
+ /* NDPE negative test case (4.2.5.2). Enable both NDP and NDPE attributes */
+ WL_NAN_WFA_TM_ENABLE_NDP_NDPE_ATTR = 0x00100000,
+
+ /* add above & update mask */
+ WL_NAN_WFA_TM_FLAG_MASK = 0x001FFFFF
+};
+typedef uint32 wl_nan_wfa_testmode_t;
+
+/* To be removed; replaced by wl_nan_vndr_payload */
+typedef struct wl_nan_vndr_ie {
+ uint32 flags; /* bitmask indicating which packet(s) contain this IE */
+ uint16 body_len; /* length of body (does not include oui field) */
+ uint8 pad[2];
+ uint8 oui[DOT11_OUI_LEN];
+ uint8 pad2;
+ uint8 body[]; /* vendor IE payload */
+} wl_nan_vndr_ie_t;
+
+typedef struct wl_nan_vndr_payload {
+ uint32 flags; /* bitmask indicating which packet(s) contain payload */
+ uint16 payload_len; /* length of payload */
+ uint8 pad[2];
+ uint8 payload[]; /* payload to be appended to NAN frame */
+} wl_nan_vndr_payload_t;
+
+typedef struct wl_nan_dev_cap {
+ uint8 bands[NAN_MAX_BANDS];
+ uint8 awake_dw[NAN_MAX_BANDS];
+ uint8 overwrite_mapid[NAN_MAX_BANDS];
+ uint8 mapid; /* dev cap mapid */
+ uint8 all_maps; /* applies to device */
+ uint8 paging;
+ uint8 pad[3];
+} wl_nan_dev_cap_t;
+
+/* arbitrary max len for frame template */
+#define WL_NAN_FRM_TPLT_MAX_LEN 1024
+
+typedef struct wl_nan_frm_tplt {
+ wl_nan_frame_type_t type;
+ uint8 pad;
+ uint16 len; /* length of template */
+ uint8 data[]; /* template */
+} wl_nan_frm_tplt_t;
+
+#define RSSI_THRESHOLD_SIZE 16
+#define MAX_IMP_RESP_SIZE 256
+
+typedef struct wl_proxd_rssi_bias {
+ int32 version; /**< version */
+ int32 threshold[RSSI_THRESHOLD_SIZE]; /**< threshold */
+ int32 peak_offset; /**< peak offset */
+ int32 bias; /**< rssi bias */
+ int32 gd_delta; /**< GD - GD_ADJ */
+ int32 imp_resp[MAX_IMP_RESP_SIZE]; /**< (Hi*Hi)+(Hr*Hr) */
+} wl_proxd_rssi_bias_t;
+
+typedef struct wl_proxd_rssi_bias_avg {
+ int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /**< avg threshold */
+ int32 avg_peak_offset; /**< avg peak offset */
+ int32 avg_rssi; /**< avg rssi */
+ int32 avg_bias; /**< avg bias */
+} wl_proxd_rssi_bias_avg_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info {
+ uint16 type; /**< type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */
+ uint16 index; /**< The current frame index, from 1 to total_frames. */
+ uint16 tof_cmd; /**< M_TOF_CMD */
+ uint16 tof_rsp; /**< M_TOF_RSP */
+ uint16 tof_avb_rxl; /**< M_TOF_AVB_RX_L */
+ uint16 tof_avb_rxh; /**< M_TOF_AVB_RX_H */
+ uint16 tof_avb_txl; /**< M_TOF_AVB_TX_L */
+ uint16 tof_avb_txh; /**< M_TOF_AVB_TX_H */
+ uint16 tof_id; /**< M_TOF_ID */
+ uint8 tof_frame_type;
+ uint8 tof_frame_bw;
+ int8 tof_rssi;
+ int32 tof_cfo;
+ int32 gd_adj_ns; /**< gound delay */
+ int32 gd_h_adj_ns; /**< group delay + threshold crossing */
+ int16 nfft; /**< number of samples stored in H */
+ uint8 num_max_cores;
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t;
+#include <packed_section_end.h>
+
+#define K_TOF_COLLECT_H_PAD 1
+#define K_TOF_COLLECT_SC_20MHZ (64)
+/* Maximum possible size of sample capture */
+#define K_TOF_COLLECT_SC_80MHZ (2*K_TOF_COLLECT_SC_20MHZ)
+/* Maximum possible size of channel dump */
+#define K_TOF_COLLECT_CHAN_SIZE (2*K_TOF_COLLECT_SC_80MHZ)
+
+/*
+A few extra samples are required to estimate frequency offset
+Right now 16 samples are being used. Can be changed in future.
+*/
+#define K_TOF_COLLECT_SAMP_SIZE_20MHZ ((2u) * (K_TOF_COLLECT_SC_20MHZ) + \
+ (16u) + (K_TOF_COLLECT_H_PAD))
+#define K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ ((2u) * (K_TOF_COLLECT_SAMP_SIZE_20MHZ))
+#define K_TOF_COLLECT_H_SIZE_20MHZ (K_TOF_COLLECT_SAMP_SIZE_20MHZ)
+#define K_TOF_COLLECT_HRAW_SIZE_20MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ)
+
+#define K_TOF_COLLECT_SAMP_SIZE_80MHZ ((2u) * (K_TOF_COLLECT_SC_80MHZ) + \
+ (16u) + (K_TOF_COLLECT_H_PAD))
+#define K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ ((2u) * K_TOF_COLLECT_SAMP_SIZE_80MHZ)
+#define K_TOF_COLLECT_H_SIZE_80MHZ (K_TOF_COLLECT_SAMP_SIZE_80MHZ)
+#define K_TOF_COLLECT_HRAW_SIZE_80MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ)
+#define K_TOF_COLLECT_HRAW_SIZE_20MHZ_1FS (K_TOF_COLLECT_SAMP_SIZE_20MHZ)
+#define K_TOF_COLLECT_HRAW_SIZE_80MHZ_1FS ((2u) * (K_TOF_COLLECT_SAMP_SIZE_20MHZ))
+
+#define WL_PROXD_COLLECT_DATA_VERSION_1 1
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v1 {
+ wl_proxd_collect_info_t info;
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN];
+ /**< raw data read from phy used to adjust timestamps */
+ uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ];
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v1;
+#include <packed_section_end.h>
+
+#define WL_PROXD_COLLECT_DATA_VERSION_2 2
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v2 {
+ wl_proxd_collect_info_t info;
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
+ /**< raw data read from phy used to adjust timestamps */
+ uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ];
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v2;
+#include <packed_section_end.h>
+
+#define WL_PROXD_COLLECT_DATA_VERSION_3 3
+typedef struct wl_proxd_collect_data_v3 {
+ uint16 version;
+ uint16 len;
+ wl_proxd_collect_info_t info;
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
+ /**< raw data read from phy used to adjust timestamps */
+ uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint32 chan[4 * K_TOF_COLLECT_CHAN_SIZE];
+} wl_proxd_collect_data_t_v3;
+
+#define WL_PROXD_COLLECT_DATA_VERSION_4 4
+typedef struct wl_proxd_collect_data_v4 {
+ uint16 version;
+ uint16 len;
+ wl_proxd_collect_info_t info;
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0_5G];
+ /**< raw data read from phy used to adjust timestamps */
+ uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint32 chan[4 * K_TOF_COLLECT_CHAN_SIZE];
+} wl_proxd_collect_data_t_v4;
+#define WL_PROXD_COLLECT_DATA_VERSION_MAX WL_PROXD_COLLECT_DATA_VERSION_4
+
+typedef struct wl_proxd_debug_data {
+ uint8 count; /**< number of packets */
+ uint8 stage; /**< state machone stage */
+ uint8 received; /**< received or txed */
+ uint8 paket_type; /**< packet type */
+ uint8 category; /**< category field */
+ uint8 action; /**< action field */
+ uint8 token; /**< token number */
+ uint8 follow_token; /**< following token number */
+ uint16 index; /**< index of the packet */
+ uint16 tof_cmd; /**< M_TOF_CMD */
+ uint16 tof_rsp; /**< M_TOF_RSP */
+ uint16 tof_avb_rxl; /**< M_TOF_AVB_RX_L */
+ uint16 tof_avb_rxh; /**< M_TOF_AVB_RX_H */
+ uint16 tof_avb_txl; /**< M_TOF_AVB_TX_L */
+ uint16 tof_avb_txh; /**< M_TOF_AVB_TX_H */
+ uint16 tof_id; /**< M_TOF_ID */
+ uint16 tof_status0; /**< M_TOF_STATUS_0 */
+ uint16 tof_status2; /**< M_TOF_STATUS_2 */
+ uint16 tof_chsm0; /**< M_TOF_CHNSM_0 */
+ uint16 tof_phyctl0; /**< M_TOF_PHYCTL0 */
+ uint16 tof_phyctl1; /**< M_TOF_PHYCTL1 */
+ uint16 tof_phyctl2; /**< M_TOF_PHYCTL2 */
+ uint16 tof_lsig; /**< M_TOF_LSIG */
+ uint16 tof_vhta0; /**< M_TOF_VHTA0 */
+ uint16 tof_vhta1; /**< M_TOF_VHTA1 */
+ uint16 tof_vhta2; /**< M_TOF_VHTA2 */
+ uint16 tof_vhtb0; /**< M_TOF_VHTB0 */
+ uint16 tof_vhtb1; /**< M_TOF_VHTB1 */
+ uint16 tof_apmductl; /**< M_TOF_AMPDU_CTL */
+ uint16 tof_apmdudlim; /**< M_TOF_AMPDU_DLIM */
+ uint16 tof_apmdulen; /**< M_TOF_AMPDU_LEN */
+} wl_proxd_debug_data_t;
+
+/** version of the wl_wsec_info structure */
+#define WL_WSEC_INFO_VERSION 0x01
+
+/** start enum value for BSS properties */
+#define WL_WSEC_INFO_BSS_BASE 0x0100
+/* for WFA testing (CTT testbed) */
+#define WL_WSEC_INFO_TEST_BASE 0x0300
+/** size of len and type fields of wl_wsec_info_tlv_t struct */
+#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data)
+
+/** Allowed wl_wsec_info properties; not all of them may be supported. */
+typedef enum {
+ WL_WSEC_INFO_NONE = 0,
+ WL_WSEC_INFO_MAX_KEYS = 1,
+ WL_WSEC_INFO_NUM_KEYS = 2,
+ WL_WSEC_INFO_NUM_HW_KEYS = 3,
+ WL_WSEC_INFO_MAX_KEY_IDX = 4,
+ WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5,
+ WL_WSEC_INFO_SUPPORTED_ALGOS = 6,
+ WL_WSEC_INFO_MAX_KEY_LEN = 7,
+ WL_WSEC_INFO_FLAGS = 8,
+ /* add global/per-wlc properties above */
+ WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1),
+ WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2),
+ WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3),
+ WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
+ WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
+ WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6),
+ WL_WSEC_INFO_BSS_WPA_AP_RESTRICT = (WL_WSEC_INFO_BSS_BASE + 7),
+ WL_WSEC_INFO_BSS_PMK_PASSPHRASE = (WL_WSEC_INFO_BSS_BASE + 8),
+ WL_WSEC_INFO_BSS_SAE_PWE = (WL_WSEC_INFO_BSS_BASE + 9),
+ WL_WSEC_INFO_BSS_SAE_PK = (WL_WSEC_INFO_BSS_BASE + 0xA),
+ WL_WSEC_INFO_6G_LEGACY_SEC = (WL_WSEC_INFO_BSS_BASE + 0xB),
+ /*
+ * ADD NEW ENUM ABOVE HERE
+ */
+ /* WPA3 CTT testbed specific requirement. refer to WFA CAPI command list */
+ WL_WSEC_INFO_TEST_SAE_GROUPS = (WL_WSEC_INFO_TEST_BASE), /* supported SAE groups */
+ WL_WSEC_INFO_TEST_SAE_GROUP_REJ = (WL_WSEC_INFO_TEST_BASE + 1), /* rejected group ID */
+ WL_WSEC_INFO_TEST_SAE_INVALID_VEC = (WL_WSEC_INFO_TEST_BASE + 2), /* test SAE vector */
+ WL_WSEC_INFO_TEST_PMK = (WL_WSEC_INFO_TEST_BASE + 3), /* query PMK */
+ WL_WSEC_INFO_OCV = (WL_WSEC_INFO_TEST_BASE + 4), /* OCV enable/disable */
+ WL_WSEC_INFO_TEST_INVALID_OCI = (WL_WSEC_INFO_TEST_BASE + 5), /* OCV invalid OCI */
+ WL_WSEC_INFO_TEST_PMKSA_CACHE = (WL_WSEC_INFO_TEST_BASE + 6), /* PMKSA cache on/off */
+ WL_WSEC_INFO_TEST_IGNORE_CSA = (WL_WSEC_INFO_TEST_BASE + 7), /* Ignore CSA */
+ WL_WSEC_INFO_TEST_IGNORE_ASSOCRESP = (WL_WSEC_INFO_TEST_BASE + 8), /* Ignore reassoc_resp */
+ WL_WSEC_INFO_TEST_TD_POLICY = (WL_WSEC_INFO_TEST_BASE + 9), /* set TD policy */
+ WL_WSEC_INFO_TEST_DISASSOC_MFP_TMO = (WL_WSEC_INFO_TEST_BASE + 0xA),
+ /* sending disassoc frame when MFP query timed out */
+
+ /* add per-BSS properties above */
+ WL_WSEC_INFO_MAX = 0xffff
+} wl_wsec_info_type_t;
+
+#define WL_WSEC_PMK_INFO_VERSION 0x0100 /**< version 1.0 */
+
+typedef uint16 wl_wsec_info_pmk_info_flags_t;
+typedef uint32 wl_wsec_info_pmk_lifetime_t;
+typedef uint8 wl_wsec_info_akm_mask_t;
+typedef uint16 wl_wsec_info_pmk_info_flags;
+
+enum {
+ WL_WSEC_PMK_INFO_SSID_PRESENT = 0x1,
+ WL_WSEC_PMK_DEFAULT_LIFETIME = 0x2
+};
+
+struct bcm_xlo {
+ uint16 len;
+ uint16 off;
+};
+typedef struct bcm_xlo bcm_xlo_t;
+
+/*
+** all offsets are from the beginning of the structure that starts
+** with the version field and length field is the total length of the structure
+** including the version and length fields
+*/
+typedef struct wl_wsec_info_pmk_info {
+ uint16 version; /* WL_WSEC_PMK_INFO_VERSION */
+ uint16 len;
+ uint16 next_offset; /* If non zero, specifies offset of next next_offset field */
+ wl_wsec_info_pmk_info_flags_t flags; /* Fill in the input based on the flags */
+ wl_wsec_info_pmk_lifetime_t pmk_lifetime;
+ wl_wsec_info_akm_mask_t akm_mask; /* RSN authenticated key management suite */
+ uint8 rsvd[3]; /* reserved for future use */
+ bcm_xlo_t ssid; /* ssid - key, zero length is allowed for SSID */
+ bcm_xlo_t bssid; /* bssid - key, zero length = broadcast/wildcard */
+ bcm_xlo_t pass_id; /* key - optional password id for SAE */
+ bcm_xlo_t pmk; /* pmk - either 32 or 48 byte for SuiteB-192 */
+ bcm_xlo_t passphrase; /* passphrase info */
+ /* data follows */
+} wl_wsec_info_pmk_info_t;
+
+typedef struct {
+ uint32 algos; /* set algos to be enabled/disabled */
+ uint32 mask; /* algos outside mask unaltered */
+} wl_wsec_info_algos_t;
+
+/** tlv used to return wl_wsec_info properties */
+typedef struct {
+ uint16 type;
+ uint16 len; /**< data length */
+ uint8 data[1]; /**< data follows */
+} wl_wsec_info_tlv_t;
+
+/** input/output data type for wsec_info iovar */
+typedef struct wl_wsec_info {
+ uint8 version; /**< structure version */
+ uint8 pad[2];
+ uint8 num_tlvs;
+ wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */
+} wl_wsec_info_t;
+#define AP_BLOCK_NONE 0x0000 /* default: No restriction */
+#define AP_ALLOW_WPA2 0x0001 /* allow WPA2PSK AP */
+#define AP_ALLOW_TSN 0x0002 /* WPA3 transition AP */
+#define AP_ALLOW_WPA3_ONLY 0x0004 /* WPA3 only AP */
+#define AP_ALLOW_MAX (AP_ALLOW_WPA2 | AP_ALLOW_TSN | \
+ AP_ALLOW_WPA3_ONLY)
+typedef struct {
+ uint32 wpa_ap_restrict; /* set WPA2 / WPA3 AP restriction policy */
+} wl_wsec_info_wpa_ap_restrict_t;
+
+/* SAE PWE derivation method */
+#define SAE_PWE_LOOP 0x1u
+#define SAE_PWE_H2E 0x2u
+
+/* SAE PK modes */
+#define WSEC_SAE_PK_NONE 0u
+#define WSEC_SAE_PK_ENABLED 0x1u
+#define WSEC_SAE_PK_ONLY 0x2u
+
+/* HE 6Ghz security bitmap */
+#define WL_HE_6G_SEC_DISABLE 0x00u /* HE 6G Open Security support disable */
+#define WL_HE_6G_SEC_OPEN 0x01u /* HE 6G Open Security support */
+
+/*
+ * randmac definitions
+ */
+#define WL_RANDMAC_MODULE "randmac"
+#define WL_RANDMAC_API_VERSION 0x0100 /**< version 1.0 */
+#define WL_RANDMAC_API_MIN_VERSION 0x0100 /**< version 1.0 */
+
+/** subcommands that can apply to randmac */
+enum {
+ WL_RANDMAC_SUBCMD_NONE = 0,
+ WL_RANDMAC_SUBCMD_GET_VERSION = 1,
+ WL_RANDMAC_SUBCMD_ENABLE = 2,
+ WL_RANDMAC_SUBCMD_DISABLE = 3,
+ WL_RANDMAC_SUBCMD_CONFIG = 4,
+ WL_RANDMAC_SUBCMD_STATS = 5,
+ WL_RANDMAC_SUBCMD_CLEAR_STATS = 6,
+
+ WL_RANDMAC_SUBCMD_MAX
+};
+typedef int16 wl_randmac_subcmd_t;
+
+/* Common IOVAR struct */
+typedef struct wl_randmac {
+ uint16 version;
+ uint16 len; /* total length */
+ wl_randmac_subcmd_t subcmd_id; /* subcommand id */
+ uint8 data[0]; /* subcommand data */
+} wl_randmac_t;
+
+#define WL_RANDMAC_IOV_HDR_SIZE OFFSETOF(wl_randmac_t, data)
+
+/* randmac version subcommand */
+typedef struct wl_randmac_version {
+ uint16 version; /* Randmac method version info */
+ uint8 pad[2]; /* Align on 4 byte boundary */
+} wl_randmac_version_t;
+
+/*
+ * Bitmask for methods supporting MAC randomization feature
+ */
+#define WL_RANDMAC_USER_NONE 0x0000
+#define WL_RANDMAC_USER_FTM 0x0001
+#define WL_RANDMAC_USER_NAN 0x0002
+#define WL_RANDMAC_USER_SCAN 0x0004
+#define WL_RANDMAC_USER_ANQP 0x0008
+#define WL_RANDMAC_USER_ALL 0xFFFF
+typedef uint16 wl_randmac_method_t;
+
+enum {
+ WL_RANDMAC_FLAGS_NONE = 0x00,
+ WL_RANDMAC_FLAGS_ADDR = 0x01,
+ WL_RANDMAC_FLAGS_MASK = 0x02,
+ WL_RANDMAC_FLAGS_METHOD = 0x04,
+ WL_RANDMAC_FLAGS_ALL = 0xFF
+};
+typedef uint8 wl_randmac_flags_t;
+
+/* randmac statistics subcommand */
+typedef struct wl_randmac_stats {
+ uint32 set_ok; /* Set random addr success count */
+ uint32 set_fail; /* Set random addr failed count */
+ uint32 set_reqs; /* Set random addr count */
+ uint32 reset_reqs; /* Restore random addr count */
+ uint32 restore_ok; /* Restore random addr succes count */
+ uint32 restore_fail; /* Restore random addr failed count */
+ uint32 events_sent; /* randmac module events count */
+ uint32 events_rcvd; /* randmac events received count */
+} wl_randmac_stats_t;
+
+/* randmac config subcommand */
+typedef struct wl_randmac_config {
+ struct ether_addr addr; /* Randomized MAC address */
+ struct ether_addr addr_mask; /* bitmask for randomization */
+ wl_randmac_method_t method; /* Enabled methods */
+ wl_randmac_flags_t flags; /* What config info changed */
+ uint8 PAD;
+} wl_randmac_config_t;
+
+enum {
+ WL_RANDMAC_EVENT_NONE = 0, /**< not an event, reserved */
+ WL_RANDMAC_EVENT_BSSCFG_ADDR_SET = 1, /* bsscfg addr randomized */
+ WL_RANDMAC_EVENT_BSSCFG_ADDR_RESTORE = 2, /* bsscfg addr restored */
+ WL_RANDMAC_EVENT_ENABLED = 3, /* randmac module enabled */
+ WL_RANDMAC_EVENT_DISABLE = 4, /* randmac module disabled */
+ WL_RANDMAC_EVENT_BSSCFG_STATUS = 5, /* bsscfg enable/disable */
+
+ WL_RANDMAC_EVENT_MAX
+};
+typedef int16 wl_randmac_event_type_t;
+typedef int32 wl_randmac_status_t;
+typedef uint32 wl_randmac_event_mask_t;
+
+#define WL_RANDMAC_EVENT_MASK_ALL 0xfffffffe
+#define WL_RANDMAC_EVENT_MASK_EVENT(_event_type) (1 << (_event_type))
+#define WL_RANDMAC_EVENT_ENABLED(_mask, _event_type) (\
+ ((_mask) & WL_RANDMAC_EVENT_MASK_EVENT(_event_type)) != 0)
+
+/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
+enum {
+ WL_RANDMAC_TLV_NONE = 0,
+ WL_RANDMAC_TLV_METHOD = 1,
+ WL_RANDMAC_TLV_ADDR = 2,
+ WL_RANDMAC_TLV_MASK = 3
+};
+typedef uint16 wl_randmac_tlv_id_t;
+
+typedef struct wl_randmac_tlv {
+ wl_randmac_tlv_id_t id;
+ uint16 len; /* Length of variable */
+ uint8 data[1];
+} wl_randmac_tlv_t;
+
+/** randmac event */
+typedef struct wl_randmac_event {
+ uint16 version;
+ uint16 len; /* Length of all variables */
+ wl_randmac_event_type_t type;
+ wl_randmac_method_t method;
+ uint8 pad[2];
+ wl_randmac_tlv_t tlvs[1]; /**< variable */
+} wl_randmac_event_t;
+
+/*
+ * scan MAC definitions
+ */
+
+/** common iovar struct */
+typedef struct wl_scanmac {
+ uint16 subcmd_id; /**< subcommand id */
+ uint16 len; /**< total length of data[] */
+ uint8 data[]; /**< subcommand data */
+} wl_scanmac_t;
+
+/* subcommand ids */
+#define WL_SCANMAC_SUBCMD_ENABLE 0
+#define WL_SCANMAC_SUBCMD_BSSCFG 1u /**< only GET supported */
+#define WL_SCANMAC_SUBCMD_CONFIG 2u
+#define WL_SCANMAC_SUBCMD_MACADDR 3u
+
+/** scanmac enable data struct */
+typedef struct wl_scanmac_enable {
+ uint8 enable; /**< 1 - enable, 0 - disable */
+ uint8 pad[3]; /**< 4-byte struct alignment */
+} wl_scanmac_enable_t;
+
+/** scanmac bsscfg data struct */
+typedef struct wl_scanmac_bsscfg {
+ uint32 bsscfg; /**< bsscfg index */
+} wl_scanmac_bsscfg_t;
+
+/** scanmac config data struct */
+typedef struct wl_scanmac_config {
+ struct ether_addr mac; /**< 6 bytes of MAC address or MAC prefix (i.e. OUI) */
+ struct ether_addr random_mask; /**< randomized bits on each scan */
+ uint16 scan_bitmap; /**< scans to use this MAC address */
+ uint8 pad[2]; /**< 4-byte struct alignment */
+} wl_scanmac_config_t;
+
+/** scanmac mac addr data struct */
+typedef struct wl_scanmac_macaddr {
+ struct ether_addr mac; /* last mac address used for scan. either randomized or permanent */
+} wl_scanmac_macaddr_t;
+
+/* scan bitmap */
+#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0u) /**< unassociated scans */
+#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1u) /**< associated roam scans */
+#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2u) /**< associated PNO scans */
+#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3u) /**< associated host scans */
+#define WL_SCANMAC_SCAN_RAND_PERPRQ (0x01 << 4u) /* enable per probe rand */
+#define WL_SCANMAC_SCAN_RAND_SCANCH (0x01 << 5u) /* enable scan chan rand */
+
+#define WL_SCAN_EVENT_VER1 1
+#define WL_SCAN_EVENT_VER2 2
+
+#define WL_SCAN_TYPE_ASSOC 0x1 /* Assoc scan */
+#define WL_SCAN_TYPE_ROAM 0x2 /* Roam scan */
+#define WL_SCAN_TYPE_FWSCAN 0x4 /* Other FW scan */
+#define WL_SCAN_TYPE_HOSTSCAN 0x8 /* Host scan */
+
+typedef struct scan_event_data {
+ uint32 version;
+ uint32 flags;
+ uint16 num_chan_slice0;
+ uint16 num_chan_slice1;
+ /* Will contain num_chan_slice0 followed by num_chan_slice1 chanspecs */
+ chanspec_t scan_chan_list[];
+} scan_event_data_v1_t;
+
+/** tlv used to return chanspec list of each slice */
+typedef struct scan_chan_tlv {
+ uint16 type; /* slice index */
+ uint16 len; /* data length */
+ uint8 data[1]; /* chanspec list */
+} scan_chan_tlv_t;
+
+typedef struct scan_event_data_v2 {
+ uint32 version;
+ uint32 flags;
+ uint32 num_tlvs; /* no of chanspec list tlvs */
+ scan_chan_tlv_t tlvs[1];
+} scan_event_data_v2_t;
+
+#ifdef WL_SCAN_EVENT_V2
+typedef scan_event_data_v2_t scan_event_data_t;
+#define WL_SCAN_EVENT_FIXED_LEN_V2 OFFSETOF(scan_event_data_t, tlvs)
+#define WL_SCAN_EVENT_VERSION WL_SCAN_EVENT_VER2
+#else
+#define WL_SCAN_EVENT_VERSION WL_SCAN_EVENT_VER1
+typedef scan_event_data_v1_t scan_event_data_t;
+#endif
+
+/*
+ * bonjour dongle offload definitions
+ */
+
+/* common iovar struct */
+typedef struct wl_bdo {
+ uint16 subcmd_id; /* subcommand id */
+ uint16 len; /* total length of data[] */
+ uint8 data[]; /* subcommand data */
+} wl_bdo_t;
+
+/* subcommand ids */
+#define WL_BDO_SUBCMD_DOWNLOAD 0 /* Download flattened database */
+#define WL_BDO_SUBCMD_ENABLE 1 /* Start bonjour after download */
+#define WL_BDO_SUBCMD_MAX_DOWNLOAD 2 /* Get the max download size */
+
+/* maximum fragment size */
+#define BDO_MAX_FRAGMENT_SIZE 1024
+
+/* download flattened database
+ *
+ * BDO must be disabled before database download else fail.
+ *
+ * If database size is within BDO_MAX_FRAGMENT_SIZE then only a single fragment
+ * is required (i.e. frag_num = 0, total_size = frag_size).
+ * If database size exceeds BDO_MAX_FRAGMENT_SIZE then multiple fragments are required.
+ */
+typedef struct wl_bdo_download {
+ uint16 total_size; /* total database size */
+ uint16 frag_num; /* fragment number, 0 for first fragment, N-1 for last fragment */
+ uint16 frag_size; /* size of fragment (max BDO_MAX_FRAGMENT_SIZE) */
+ uint8 pad[2]; /* 4-byte struct alignment */
+ uint8 fragment[BDO_MAX_FRAGMENT_SIZE]; /* fragment data */
+} wl_bdo_download_t;
+
+/* enable
+ *
+ * Enable requires a downloaded database else fail.
+ */
+typedef struct wl_bdo_enable {
+ uint8 enable; /* 1 - enable, 0 - disable */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} wl_bdo_enable_t;
+
+/*
+ * Get the max download size for Bonjour Offload.
+ */
+typedef struct wl_bdo_max_download {
+ uint16 size; /* Max download size in bytes */
+ uint8 pad[2]; /* 4-byte struct alignment */
+} wl_bdo_max_download_t;
+
+/*
+ * TCP keepalive offload definitions
+ */
+
+/* common iovar struct */
+typedef struct wl_tko {
+ uint16 subcmd_id; /* subcommand id */
+ uint16 len; /* total length of data[] */
+ uint8 data[]; /* subcommand data */
+} wl_tko_t;
+
+/* subcommand ids */
+#define WL_TKO_SUBCMD_MAX_TCP 0 /* max TCP connections supported */
+#define WL_TKO_SUBCMD_PARAM 1 /* configure offload common parameters */
+#define WL_TKO_SUBCMD_CONNECT 2 /* TCP connection info */
+#define WL_TKO_SUBCMD_ENABLE 3 /* enable/disable */
+#define WL_TKO_SUBCMD_STATUS 4 /* TCP connection status */
+
+/* WL_TKO_SUBCMD_MAX_CONNECT subcommand data */
+typedef struct wl_tko_max_tcp {
+ uint8 max; /* max TCP connections supported */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} wl_tko_max_tcp_t;
+
+/* WL_TKO_SUBCMD_PARAM subcommand data */
+typedef struct wl_tko_param {
+ uint16 interval; /* keepalive tx interval (secs) */
+ uint16 retry_interval; /* keepalive retry interval (secs) */
+ uint16 retry_count; /* retry_count */
+ uint8 pad[2]; /* 4-byte struct alignment */
+} wl_tko_param_t;
+
+/* WL_TKO_SUBCMD_CONNECT subcommand data
+ * invoke with unique 'index' for each TCP connection
+ */
+typedef struct wl_tko_connect {
+ uint8 index; /* TCP connection index, 0 to max-1 */
+ uint8 ip_addr_type; /* 0 - IPv4, 1 - IPv6 */
+ uint16 local_port; /* local port */
+ uint16 remote_port; /* remote port */
+ uint16 PAD;
+ uint32 local_seq; /* local sequence number */
+ uint32 remote_seq; /* remote sequence number */
+ uint16 request_len; /* TCP keepalive request packet length */
+ uint16 response_len; /* TCP keepalive response packet length */
+ uint8 data[]; /* variable length field containing local/remote IPv4/IPv6,
+ * TCP keepalive request packet, TCP keepalive response packet
+ * For IPv4, length is 4 * 2 + request_length + response_length
+ * offset 0 - local IPv4
+ * offset 4 - remote IPv4
+ * offset 8 - TCP keepalive request packet
+ * offset 8+request_length - TCP keepalive response packet
+ * For IPv6, length is 16 * 2 + request_length + response_length
+ * offset 0 - local IPv6
+ * offset 16 - remote IPv6
+ * offset 32 - TCP keepalive request packet
+ * offset 32+request_length - TCP keepalive response packet
+ */
+} wl_tko_connect_t;
+
+/* WL_TKO_SUBCMD_CONNECT subcommand data to GET configured info for specific index */
+typedef struct wl_tko_get_connect {
+ uint8 index; /* TCP connection index, 0 to max-1 */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} wl_tko_get_connect_t;
+
+typedef struct wl_tko_enable {
+ uint8 enable; /* 1 - enable, 0 - disable */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} wl_tko_enable_t;
+
+/* WL_TKO_SUBCMD_STATUS subcommand data */
+/* must be invoked before tko is disabled else status is unavailable */
+typedef struct wl_tko_status {
+ uint8 count; /* number of status entries (i.e. equals
+ * max TCP connections supported)
+ */
+ uint8 status[1]; /* variable length field contain status for
+ * each TCP connection index
+ */
+} wl_tko_status_t;
+
+typedef enum {
+ TKO_STATUS_NORMAL = 0, /* TCP connection normal, no error */
+ TKO_STATUS_NO_RESPONSE = 1, /* no response to TCP keepalive */
+ TKO_STATUS_NO_TCP_ACK_FLAG = 2, /* TCP ACK flag not set */
+ TKO_STATUS_UNEXPECT_TCP_FLAG = 3, /* unexpect TCP flags set other than ACK */
+ TKO_STATUS_SEQ_NUM_INVALID = 4, /* ACK != sequence number */
+ TKO_STATUS_REMOTE_SEQ_NUM_INVALID = 5, /* SEQ > remote sequence number */
+ TKO_STATUS_TCP_DATA = 6, /* TCP data available */
+ TKO_STATUS_UNAVAILABLE = 255, /* not used/configured */
+} tko_status_t;
+
+enum rssi_reason {
+ RSSI_REASON_UNKNOW = 0,
+ RSSI_REASON_LOWRSSI = 1,
+ RSSI_REASON_NSYC = 2,
+ RSSI_REASON_TIMEOUT = 3
+};
+
+enum tof_reason {
+ TOF_REASON_OK = 0,
+ TOF_REASON_REQEND = 1,
+ TOF_REASON_TIMEOUT = 2,
+ TOF_REASON_NOACK = 3,
+ TOF_REASON_INVALIDAVB = 4,
+ TOF_REASON_INITIAL = 5,
+ TOF_REASON_ABORT = 6
+};
+
+enum rssi_state {
+ RSSI_STATE_POLL = 0,
+ RSSI_STATE_TPAIRING = 1,
+ RSSI_STATE_IPAIRING = 2,
+ RSSI_STATE_THANDSHAKE = 3,
+ RSSI_STATE_IHANDSHAKE = 4,
+ RSSI_STATE_CONFIRMED = 5,
+ RSSI_STATE_PIPELINE = 6,
+ RSSI_STATE_NEGMODE = 7,
+ RSSI_STATE_MONITOR = 8,
+ RSSI_STATE_LAST = 9
+};
+
+enum tof_state {
+ TOF_STATE_IDLE = 0,
+ TOF_STATE_IWAITM = 1,
+ TOF_STATE_TWAITM = 2,
+ TOF_STATE_ILEGACY = 3,
+ TOF_STATE_IWAITCL = 4,
+ TOF_STATE_TWAITCL = 5,
+ TOF_STATE_ICONFIRM = 6,
+ TOF_STATE_IREPORT = 7
+};
+
+enum tof_mode_type {
+ TOF_LEGACY_UNKNOWN = 0,
+ TOF_LEGACY_AP = 1,
+ TOF_NONLEGACY_AP = 2
+};
+
+enum tof_way_type {
+ TOF_TYPE_ONE_WAY = 0,
+ TOF_TYPE_TWO_WAY = 1,
+ TOF_TYPE_REPORT = 2
+};
+
+enum tof_rate_type {
+ TOF_FRAME_RATE_VHT = 0,
+ TOF_FRAME_RATE_LEGACY = 1
+};
+
+#define TOF_ADJ_TYPE_NUM 4 /**< number of assisted timestamp adjustment */
+enum tof_adj_mode {
+ TOF_ADJ_SOFTWARE = 0,
+ TOF_ADJ_HARDWARE = 1,
+ TOF_ADJ_SEQ = 2,
+ TOF_ADJ_NONE = 3
+};
+
+#define FRAME_TYPE_NUM 4 /**< number of frame type */
+enum frame_type {
+ FRAME_TYPE_CCK = 0,
+ FRAME_TYPE_OFDM = 1,
+ FRAME_TYPE_11N = 2,
+ FRAME_TYPE_11AC = 3
+};
+
+typedef struct wl_proxd_status_iovar {
+ uint16 method; /**< method */
+ uint8 mode; /**< mode */
+ uint8 peermode; /**< peer mode */
+ uint8 state; /**< state */
+ uint8 reason; /**< reason code */
+ uint8 PAD[2];
+ uint32 distance; /**< distance */
+ uint32 txcnt; /**< tx pkt counter */
+ uint32 rxcnt; /**< rx pkt counter */
+ struct ether_addr peer; /**< peer mac address */
+ int8 avg_rssi; /**< average rssi */
+ int8 hi_rssi; /**< highest rssi */
+ int8 low_rssi; /**< lowest rssi */
+ uint8 PAD[3];
+ uint32 dbgstatus; /**< debug status */
+ uint16 frame_type_cnt[FRAME_TYPE_NUM]; /**< frame types */
+ uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /**< adj types HW/SW */
+} wl_proxd_status_iovar_t;
+
+/* ifdef NET_DETECT */
+typedef struct net_detect_adapter_features {
+ uint8 wowl_enabled;
+ uint8 net_detect_enabled;
+ uint8 nlo_enabled;
+} net_detect_adapter_features_t;
+
+typedef enum net_detect_bss_type {
+ nd_bss_any = 0,
+ nd_ibss,
+ nd_ess
+} net_detect_bss_type_t;
+
+typedef struct net_detect_profile {
+ wlc_ssid_t ssid;
+ net_detect_bss_type_t bss_type; /**< Ignore for now since Phase 1 is only for ESS */
+ uint32 cipher_type; /**< DOT11_CIPHER_ALGORITHM enumeration values */
+ uint32 auth_type; /**< DOT11_AUTH_ALGORITHM enumeration values */
+} net_detect_profile_t;
+
+typedef struct net_detect_profile_list {
+ uint32 num_nd_profiles;
+ net_detect_profile_t nd_profile[];
+} net_detect_profile_list_t;
+
+typedef struct net_detect_config {
+ uint8 nd_enabled;
+ uint8 PAD[3];
+ uint32 scan_interval;
+ uint32 wait_period;
+ uint8 wake_if_connected;
+ uint8 wake_if_disconnected;
+ uint8 PAD[2];
+ net_detect_profile_list_t nd_profile_list;
+} net_detect_config_t;
+
+typedef enum net_detect_wake_reason {
+ nd_reason_unknown,
+ nd_net_detected,
+ nd_wowl_event,
+ nd_ucode_error
+} net_detect_wake_reason_t;
+
+typedef struct net_detect_wake_data {
+ net_detect_wake_reason_t nd_wake_reason;
+ uint32 nd_wake_date_length;
+ uint8 nd_wake_data[0]; /**< Wake data (currently unused) */
+} net_detect_wake_data_t;
+
+/* endif NET_DETECT */
+
+/* (unversioned, deprecated) */
+typedef struct bcnreq {
+ uint8 bcn_mode;
+ uint8 PAD[3];
+ int32 dur;
+ int32 channel;
+ struct ether_addr da;
+ uint16 random_int;
+ wlc_ssid_t ssid;
+ uint16 reps;
+ uint8 PAD[2];
+} bcnreq_t;
+
+#define WL_RRM_BCN_REQ_VER 1
+typedef struct bcn_req {
+ uint8 version;
+ uint8 bcn_mode;
+ uint8 pad_1[2];
+ int32 dur;
+ int32 channel;
+ struct ether_addr da;
+ uint16 random_int;
+ wlc_ssid_t ssid;
+ uint16 reps;
+ uint8 req_elements;
+ uint8 pad_2;
+ chanspec_list_t chspec_list;
+} bcn_req_t;
+
+typedef struct rrmreq {
+ struct ether_addr da;
+ uint8 reg;
+ uint8 chan;
+ uint16 random_int;
+ uint16 dur;
+ uint16 reps;
+} rrmreq_t;
+
+typedef struct framereq {
+ struct ether_addr da;
+ uint8 reg;
+ uint8 chan;
+ uint16 random_int;
+ uint16 dur;
+ struct ether_addr ta;
+ uint16 reps;
+} framereq_t;
+
+typedef struct statreq {
+ struct ether_addr da;
+ struct ether_addr peer;
+ uint16 random_int;
+ uint16 dur;
+ uint8 group_id;
+ uint8 PAD;
+ uint16 reps;
+} statreq_t;
+
+typedef struct txstrmreq {
+ struct ether_addr da; /* Destination address */
+ uint16 random_int; /* Random interval for measurement start */
+ uint16 dur; /* Measurement duration */
+ uint16 reps; /* number of repetitions */
+ struct ether_addr peer; /* Peer MAC address */
+ uint8 tid; /* Traffic ID */
+ uint8 bin0_range; /* Delay range of the first bin */
+} txstrmreq_t;
+
+typedef struct lcireq {
+ struct ether_addr da; /* Destination address */
+ uint16 reps; /* number of repetitions */
+ uint8 subj; /* Local/Remote/Thid party */
+ uint8 lat_res; /* Latitude requested Resolution */
+ uint8 lon_res; /* Longitude requested Resolution */
+ uint8 alt_res; /* Altitude requested Resolution */
+} lcireq_t;
+
+typedef struct civicreq {
+ struct ether_addr da; /* Destination address */
+ uint16 reps; /* number of repetitions */
+ uint8 subj; /* Local/Remote/Thid party */
+ uint8 civloc_type; /* Format of location info */
+ uint8 siu; /* Unit of Location service interval */
+ uint8 pad;
+ uint16 si; /* Location service interval */
+} civicreq_t;
+
+typedef struct locidreq {
+ struct ether_addr da; /* Destination address */
+ uint16 reps; /* number of repetitions */
+ uint8 subj; /* Local/Remote/Thid party */
+ uint8 siu; /* Unit of Location service interval */
+ uint16 si; /* Location service interval */
+} locidreq_t;
+
+typedef struct wl_rrm_config_ioc {
+ uint16 version; /* command version */
+ uint16 id; /* subiovar cmd ID */
+ uint16 len; /* total length of all bytes in data[] */
+ uint16 pad; /* 4-byte boundary padding */
+ uint8 data[1]; /* payload */
+} wl_rrm_config_ioc_t;
+
+enum {
+ WL_RRM_CONFIG_NONE = 0, /* reserved */
+ WL_RRM_CONFIG_GET_LCI = 1, /* get LCI */
+ WL_RRM_CONFIG_SET_LCI = 2, /* set LCI */
+ WL_RRM_CONFIG_GET_CIVIC = 3, /* get civic location */
+ WL_RRM_CONFIG_SET_CIVIC = 4, /* set civic location */
+ WL_RRM_CONFIG_GET_LOCID = 5, /* get location identifier */
+ WL_RRM_CONFIG_SET_LOCID = 6, /* set location identifier */
+ WL_RRM_CONFIG_MAX = 7
+};
+
+#define WL_RRM_CONFIG_NAME "rrm_config"
+#define WL_RRM_CONFIG_MIN_LENGTH OFFSETOF(wl_rrm_config_ioc_t, data)
+
+enum {
+ WL_RRM_EVENT_NONE = 0, /* not an event, reserved */
+ WL_RRM_EVENT_FRNG_REQ = 1, /* Receipt of FRNG request frame */
+ WL_RRM_EVENT_FRNG_REP = 2, /* Receipt of FRNG report frame */
+
+ WL_RRM_EVENT_MAX
+};
+typedef int16 wl_rrm_event_type_t;
+
+typedef struct frngreq_target {
+ uint32 bssid_info;
+ uint8 channel;
+ uint8 phytype;
+ uint8 reg;
+ uint8 pad;
+ struct ether_addr bssid;
+ chanspec_t chanspec;
+ uint32 sid;
+} frngreq_target_t;
+
+typedef struct frngreq {
+ wl_rrm_event_type_t event; /* RRM event type */
+ struct ether_addr da;
+ uint16 max_init_delay; /* Upper bound of random delay, in TUs */
+ uint8 min_ap_count; /* Min FTM ranges requested (1-15) */
+ uint8 num_aps; /* Number of APs to range, at least min_ap_count */
+ uint16 max_age; /* Max elapsed time before FTM request, 0xFFFF = any */
+ uint16 reps; /* Number of repetitions of this measurement type */
+ frngreq_target_t targets[1]; /* Target BSSIDs to range */
+} frngreq_t;
+
+typedef struct frngrep_range {
+ uint32 start_tsf; /* 4 lsb of tsf */
+ struct ether_addr bssid;
+ uint8 pad[2];
+ uint32 range;
+ uint32 max_err;
+ uint8 rsvd;
+ uint8 pad2[3];
+} frngrep_range_t;
+
+typedef struct frngrep_error {
+ uint32 start_tsf; /* 4 lsb of tsf */
+ struct ether_addr bssid;
+ uint8 code;
+ uint8 pad[1];
+} frngrep_error_t;
+
+typedef struct frngrep {
+ wl_rrm_event_type_t event; /* RRM event type */
+ struct ether_addr da;
+ uint8 range_entry_count;
+ uint8 error_entry_count;
+ uint16 dialog_token; /* dialog token */
+ frngrep_range_t range_entries[DOT11_FTM_RANGE_ENTRY_MAX_COUNT];
+ frngrep_error_t error_entries[DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT];
+} frngrep_t;
+
+typedef struct wl_rrm_frng_ioc {
+ uint16 version; /* command version */
+ uint16 id; /* subiovar cmd ID */
+ uint16 len; /* total length of all bytes in data[] */
+ uint16 pad; /* 4-byte boundary padding */
+ uint8 data[]; /* payload */
+} wl_rrm_frng_ioc_t;
+
+enum {
+ WL_RRM_FRNG_NONE = 0, /* reserved */
+ WL_RRM_FRNG_SET_REQ = 1, /* send ftm ranging request */
+ WL_RRM_FRNG_MAX = 2
+};
+
+#define WL_RRM_FRNG_NAME "rrm_frng"
+#define WL_RRM_FRNG_MIN_LENGTH OFFSETOF(wl_rrm_frng_ioc_t, data)
+
+#define WL_RRM_RPT_VER 0
+#define WL_RRM_RPT_MAX_PAYLOAD 256
+#define WL_RRM_RPT_MIN_PAYLOAD 7
+#define WL_RRM_RPT_FALG_ERR 0
+#define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0)
+#define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1)
+typedef struct {
+ uint16 ver; /**< version */
+ struct ether_addr addr; /**< STA MAC addr */
+ uint32 timestamp; /**< timestamp of the report */
+ uint16 flag; /**< flag */
+ uint16 len; /**< length of payload data */
+ uint8 data[WL_RRM_RPT_MAX_PAYLOAD];
+} statrpt_t;
+
+typedef struct wlc_dwds_config {
+ uint32 enable;
+ uint32 mode; /**< STA/AP interface */
+ struct ether_addr ea;
+ uint8 PAD[2];
+} wlc_dwds_config_t;
+
+typedef struct wl_el_set_params_s {
+ uint8 set; /**< Set number */
+ uint8 PAD[3];
+ uint32 size; /**< Size to make/expand */
+} wl_el_set_params_t;
+
+typedef struct wl_el_tag_params_s {
+ uint16 tag;
+ uint8 set;
+ uint8 flags;
+} wl_el_tag_params_t;
+
+#define EVENT_LOG_SET_TYPE_CURRENT_VERSION 0
+typedef struct wl_el_set_type_s {
+ uint16 version;
+ uint16 len;
+ uint8 set; /* Set number */
+ uint8 type; /* Type- EVENT_LOG_SET_TYPE_DEFAULT or EVENT_LOG_SET_TYPE_PRSRV */
+ uint16 PAD;
+} wl_el_set_type_t;
+
+#define EVENT_LOG_SET_TYPE_ALL_V1 1
+
+typedef struct wl_el_set_type_s_v1 {
+ uint8 set_val;
+ uint8 type_val;
+} wl_el_set_type_v1_t;
+
+typedef struct wl_el_set_all_type_s_v1 {
+ uint16 version;
+ uint16 len;
+ uint32 max_sets;
+ wl_el_set_type_v1_t set_type[1]; /* set-Type Values Array */
+} wl_el_set_all_type_v1_t;
+
+typedef struct wl_staprio_cfg {
+ struct ether_addr ea; /**< mac addr */
+ uint8 prio; /**< scb priority */
+} wl_staprio_cfg_t;
+
+#define STAMON_STACONFIG_VER 1
+/* size of struct wlc_stamon_sta_config_t elements */
+#define STAMON_STACONFIG_LENGTH 20
+
+typedef enum wl_stamon_cfg_cmd_type {
+ STAMON_CFG_CMD_DEL = 0,
+ STAMON_CFG_CMD_ADD = 1,
+ STAMON_CFG_CMD_ENB = 2,
+ STAMON_CFG_CMD_DSB = 3,
+ STAMON_CFG_CMD_CNT = 4,
+ STAMON_CFG_CMD_RSTCNT = 5,
+ STAMON_CFG_CMD_GET_STATS = 6,
+ STAMON_CFG_CMD_SET_MONTIME = 7
+} wl_stamon_cfg_cmd_type_t;
+
+typedef struct wlc_stamon_sta_config {
+ wl_stamon_cfg_cmd_type_t cmd; /**< 0 - delete, 1 - add */
+ struct ether_addr ea;
+ uint16 version; /* Command structure version */
+ uint16 length; /* Command structure length */
+ uint8 pad[2];
+ /* Time (ms) for which STA's are monitored. Value ZERO indicates no time limit */
+ uint32 monitor_time;
+} wlc_stamon_sta_config_t;
+
+/* ifdef SR_DEBUG */
+typedef struct /* pmu_reg */{
+ uint32 pmu_control;
+ uint32 pmu_capabilities;
+ uint32 pmu_status;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmu_timer1;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 pmu_chipcontrol1[4];
+ uint32 pmu_regcontrol[5];
+ uint32 pmu_pllcontrol[5];
+ uint32 pmu_rsrc_up_down_timer[31];
+ uint32 rsrc_dep_mask[31];
+} pmu_reg_t;
+/* endif SR_DEBUG */
+
+typedef struct wl_taf_define {
+ struct ether_addr ea; /**< STA MAC or 0xFF... */
+ uint16 version; /**< version */
+ uint32 sch; /**< method index */
+ uint32 prio; /**< priority */
+ uint32 misc; /**< used for return value */
+ uint8 text[]; /**< used to pass and return ascii text */
+} wl_taf_define_t;
+
+/** Received Beacons lengths information */
+#define WL_LAST_BCNS_INFO_FIXED_LEN OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring)
+typedef struct wlc_bcn_len_hist {
+ uint16 ver; /**< version field */
+ uint16 cur_index; /**< current pointed index in ring buffer */
+ uint32 max_bcnlen; /**< Max beacon length received */
+ uint32 min_bcnlen; /**< Min beacon length received */
+ uint32 ringbuff_len; /**< Length of the ring buffer 'bcnlen_ring' */
+ uint32 bcnlen_ring[1]; /**< ring buffer storing received beacon lengths */
+} wlc_bcn_len_hist_t;
+
+/* WDS net interface types */
+#define WL_WDSIFTYPE_NONE 0x0 /**< The interface type is neither WDS nor DWDS. */
+#define WL_WDSIFTYPE_WDS 0x1 /**< The interface is WDS type. */
+#define WL_WDSIFTYPE_DWDS 0x2 /**< The interface is DWDS type. */
+
+typedef struct wl_bssload_static {
+ uint8 is_static;
+ uint8 PAD;
+ uint16 sta_count;
+ uint8 chan_util;
+ uint8 PAD;
+ uint16 aac;
+} wl_bssload_static_t;
+
+/* Buffer of size WLC_SAMPLECOLLECT_MAXLEN (=10240 for 4345a0 ACPHY)
+ * gets copied to this, multiple times
+ */
+typedef enum wl_gpaio_option {
+ GPAIO_PMU_AFELDO,
+ GPAIO_PMU_TXLDO,
+ GPAIO_PMU_VCOLDO,
+ GPAIO_PMU_LNALDO,
+ GPAIO_PMU_ADCLDO,
+ GPAIO_ICTAT_CAL,
+ GPAIO_PMU_CLEAR,
+ GPAIO_OFF,
+ GPAIO_PMU_LOGENLDO,
+ GPAIO_PMU_RXLDO2G,
+ GPAIO_PMU_RXLDO5G,
+ GPAIO_PMU_LPFTXLDO,
+ GPAIO_PMU_LDO1P6,
+ GPAIO_RCAL,
+ GPAIO_IQDAC_BUF_DC_MEAS,
+ GPAIO_IQDAC_BUF_DC_CLEAR,
+ GPAIO_DAC_IQ_DC_RDBK,
+ GPAIO_DAC_IQ_DC_RDBK_CLEAR,
+ GPAIO_AFE_LDO_FOR_DAC_DC,
+ GPAIO_PA5G_VCAS_SOURCE,
+ GPAIO_BIQ2_DC_MEAS,
+ GPAIO_BIQ2_DC_CLEAR,
+ GPAIO_VBATMONITOR,
+ GPAIO_PA5G_VCAS_GMDRAIN,
+ GPAIO_PMU_ROLDO,
+ GPAIO_PMU_PFDLDO,
+ GPAIO_PMU_LCHLDO,
+ GPAIO_PMU_MMDLDO,
+ GPAIO_PMU_VCOCORELDO,
+ GPAIO_PMU_PLLLDO,
+ GPAIO_PMU_RXLDO,
+ GPAIO_IQDAC_DC_TP,
+ GPAIO_BG_ICTAT_CAL,
+ GPAIO_BG_ICTAT_UNCAL,
+ GPAIO_BG_CTAT_UNCAL,
+ GPAIO_RX_TIA_VDD,
+ GPAIO_RX_NBIAS_TIA,
+ GPAIO_RX_NBIAS_LPF,
+ GPAIO_RX_CMREF_BB,
+ GPAIO_RX_CMREF_RF,
+ GPAIO_RX_LDO_RF_V0P8,
+ GPAIO_RX_IBIAS_N,
+ GPAIO_RX_IBIAS_P,
+ GPAIO_DIG_LDO,
+ GPAIO_IPTAT_UNCAL,
+ GPAIO_IPTAT_CAL,
+ GPAIO_VPTAT_UNCAL,
+ GPAIO_VCTAT_CAL,
+ GPAIO_VBE,
+ GPAIO_IOUT_TEST_3U,
+ GPAIO_VCO_BIAS_N,
+ GPAIO_VCO_BIAS_P,
+ GPAIO_VCO_TEMPCO_N,
+ GPAIO_VCO_TEMPCO_P,
+ GPAIO_MMD_LDO,
+ GPAIO_VCO_LDO,
+ GPAIO_SRO_LDO,
+ GPAIO_TX_VCTRL,
+ GPAIO_TX_VCTRL_TC,
+ GPAIO_GPA_BUF,
+ GPAIO_LPBK_OUT,
+ GPAIO_ADC_LPBK_INN,
+ GPAIO_ADC_LPBK_INP,
+ GPAIO_ETSSI
+} wl_gpaio_option_t;
+
+/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */
+typedef enum wlc_ap_iov_bss_operation {
+ WLC_AP_IOV_OP_DELETE = -1,
+ WLC_AP_IOV_OP_DISABLE = 0,
+ WLC_AP_IOV_OP_ENABLE = 1,
+ WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2,
+ WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3,
+ WLC_AP_IOV_OP_MOVE = 4
+} wlc_ap_iov_bss_oper_t;
+
+/* LTE coex info */
+/* Analogue of HCI Set MWS Signaling cmd */
+typedef struct {
+ int16 mws_rx_assert_offset;
+ int16 mws_rx_assert_jitter;
+ int16 mws_rx_deassert_offset;
+ int16 mws_rx_deassert_jitter;
+ int16 mws_tx_assert_offset;
+ int16 mws_tx_assert_jitter;
+ int16 mws_tx_deassert_offset;
+ int16 mws_tx_deassert_jitter;
+ int16 mws_pattern_assert_offset;
+ int16 mws_pattern_assert_jitter;
+ int16 mws_inact_dur_assert_offset;
+ int16 mws_inact_dur_assert_jitter;
+ int16 mws_scan_freq_assert_offset;
+ int16 mws_scan_freq_assert_jitter;
+ int16 mws_prio_assert_offset_req;
+} wci2_config_t;
+
+/** Analogue of HCI MWS Channel Params */
+typedef struct {
+ uint16 mws_rx_center_freq; /**< MHz */
+ uint16 mws_tx_center_freq;
+ uint16 mws_rx_channel_bw; /**< KHz */
+ uint16 mws_tx_channel_bw;
+ uint8 mws_channel_en;
+ uint8 mws_channel_type; /**< Don't care for WLAN? */
+} mws_params_t;
+
+#define LTECX_MAX_NUM_PERIOD_TYPES 7
+
+/* LTE Frame params */
+typedef struct {
+ uint16 mws_frame_dur;
+ int16 mws_framesync_assert_offset;
+ uint16 mws_framesync_assert_jitter;
+ uint16 mws_period_dur[LTECX_MAX_NUM_PERIOD_TYPES];
+ uint8 mws_period_type[LTECX_MAX_NUM_PERIOD_TYPES];
+ uint8 mws_num_periods;
+} mws_frame_config_t;
+
+/** MWS wci2 message */
+typedef struct {
+ uint8 mws_wci2_data; /**< BT-SIG msg */
+ uint8 PAD;
+ uint16 mws_wci2_interval; /**< Interval in us */
+ uint16 mws_wci2_repeat; /**< No of msgs to send */
+} mws_wci2_msg_t;
+/* MWS ANT map */
+typedef struct {
+ uint16 combo1; /* mws ant selection 1 */
+ uint16 combo2; /* mws ant selection 2 */
+ uint16 combo3; /* mws ant selection 3 */
+ uint16 combo4; /* mws ant selection 4 */
+} mws_ant_map_t;
+
+/* MWS ANT map 2nd generation */
+typedef struct {
+ uint16 combo[16]; /* mws ant selection 2nd */
+} mws_ant_map_t_2nd;
+
+/* MWS Coex bitmap v2 map for Type0/Type6 */
+typedef struct {
+ uint16 bitmap_2G; /* 2G Bitmap */
+ uint16 bitmap_5G_lo; /* 5G lo bitmap */
+ uint16 bitmap_5G_mid; /* 5G mid bitmap */
+ uint16 bitmap_5G_hi; /* 5G hi bitmap */
+} mws_coex_bitmap_v2_t;
+
+/* MWS SCAN_REQ Bitmap */
+typedef struct mws_scanreq_params {
+ uint16 idx;
+ uint16 bm_2g;
+ uint16 bm_5g_lo;
+ uint16 bm_5g_mid;
+ uint16 bm_5g_hi;
+} mws_scanreq_params_t;
+
+/* MWS NR Coex Channel map */
+#define WL_MWS_NR_COEXMAP_VERSION 1
+typedef struct wl_mws_nr_coexmap {
+ uint16 version; /* Structure version */
+ uint16 bitmap_5g_lo; /* bitmap for 5G low channels by 2:
+ *34-48, 52-56, 60-64, 100-102
+ */
+ uint16 bitmap_5g_mid; /* bitmap for 5G mid channels by 2:
+ * 104, 108-112, 116-120, 124-128,
+ * 132-136, 140, 149-151
+ */
+ uint16 bitmap_5g_high; /* bitmap for 5G high channels by 2
+ * 153, 157-161, 165
+ */
+} wl_mws_nr_coexmap_t;
+
+typedef struct {
+ uint32 config; /**< MODE: AUTO (-1), Disable (0), Enable (1) */
+ uint32 status; /**< Current state: Disabled (0), Enabled (1) */
+} wl_config_t;
+
+#define WLC_RSDB_MODE_AUTO_MASK 0x80
+#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK))))
+
+typedef struct {
+ uint16 request; /* type of sensor hub request */
+ uint16 enable; /* enable/disable response for specified request */
+ uint16 interval; /* interval between responses to the request */
+} shub_req_t;
+
+#define WL_IF_STATS_T_VERSION 1 /**< current version of wl_if_stats structure */
+
+/** per interface counters */
+typedef struct wl_if_stats {
+ uint16 version; /**< version of the structure */
+ uint16 length; /**< length of the entire structure */
+ uint32 PAD; /**< padding */
+
+ /* transmit stat counters */
+ uint64 txframe; /**< tx data frames */
+ uint64 txbyte; /**< tx data bytes */
+ uint64 txerror; /**< tx data errors (derived: sum of others) */
+ uint64 txnobuf; /**< tx out of buffer errors */
+ uint64 txrunt; /**< tx runt frames */
+ uint64 txfail; /**< tx failed frames */
+ uint64 txretry; /**< tx retry frames */
+ uint64 txretrie; /**< tx multiple retry frames */
+ uint64 txfrmsnt; /**< tx sent frames */
+ uint64 txmulti; /**< tx mulitcast sent frames */
+ uint64 txfrag; /**< tx fragments sent */
+
+ /* receive stat counters */
+ uint64 rxframe; /**< rx data frames */
+ uint64 rxbyte; /**< rx data bytes */
+ uint64 rxerror; /**< rx data errors (derived: sum of others) */
+ uint64 rxnobuf; /**< rx out of buffer errors */
+ uint64 rxrunt; /**< rx runt frames */
+ uint64 rxfragerr; /**< rx fragment errors */
+ uint64 rxmulti; /**< rx multicast frames */
+
+ uint64 txexptime; /* DATA Tx frames suppressed due to timer expiration */
+ uint64 txrts; /* RTS/CTS succeeeded count */
+ uint64 txnocts; /* RTS/CTS faled count */
+
+ uint64 txretrans; /* Number of frame retransmissions */
+}
+wl_if_stats_t;
+
+typedef struct wl_band {
+ uint16 bandtype; /**< WL_BAND_2G, WL_BAND_5G */
+ uint16 bandunit; /**< bandstate[] index */
+ uint16 phytype; /**< phytype */
+ uint16 phyrev;
+}
+wl_band_t;
+
+#define WL_ROAM_STATS_VER_1 (1u) /**< current version of wl_if_stats structure */
+
+/** roam statistics counters */
+typedef struct {
+ uint16 version; /**< version of the structure */
+ uint16 length; /**< length of the entire structure */
+ uint32 initial_assoc_time;
+ uint32 prev_roam_time;
+ uint32 last_roam_event_type;
+ uint32 last_roam_event_status;
+ uint32 last_roam_event_reason;
+ uint16 roam_success_cnt;
+ uint16 roam_fail_cnt;
+ uint16 roam_attempt_cnt;
+ uint16 max_roam_target_cnt;
+ uint16 min_roam_target_cnt;
+ uint16 max_cached_ch_cnt;
+ uint16 min_cached_ch_cnt;
+ uint16 partial_roam_scan_cnt;
+ uint16 full_roam_scan_cnt;
+ uint16 most_roam_reason;
+ uint16 most_roam_reason_cnt;
+} wl_roam_stats_v1_t;
+
+#define WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */
+
+/** wlc interface version */
+typedef struct wl_wlc_version {
+ uint16 version; /**< version of the structure */
+ uint16 length; /**< length of the entire structure */
+
+ /* epi version numbers */
+ uint16 epi_ver_major; /**< epi major version number */
+ uint16 epi_ver_minor; /**< epi minor version number */
+ uint16 epi_rc_num; /**< epi RC number */
+ uint16 epi_incr_num; /**< epi increment number */
+
+ /* wlc interface version numbers */
+ uint16 wlc_ver_major; /**< wlc interface major version number */
+ uint16 wlc_ver_minor; /**< wlc interface minor version number */
+}
+wl_wlc_version_t;
+
+#define WL_SCAN_VERSION_T_VERSION 1 /**< current version of scan_version structure */
+/** scan interface version */
+typedef struct wl_scan_version {
+ uint16 version; /**< version of the structure */
+ uint16 length; /**< length of the entire structure */
+
+ /* scan interface version numbers */
+ uint16 scan_ver_major; /**< scan interface major version number */
+} wl_scan_version_t;
+
+/* Highest version of WLC_API_VERSION supported */
+#define WLC_API_VERSION_MAJOR_MAX 8
+#define WLC_API_VERSION_MINOR_MAX 0
+
+/* begin proxd definitions */
+#include <packed_section_start.h>
+
+#define WL_PROXD_API_VERSION 0x0300u /**< version 3.0 */
+
+/* proxd version with 11az */
+#define WL_PROXD_11AZ_API_VERSION_1 0x0400u
+
+/** Minimum supported API version */
+#define WL_PROXD_API_MIN_VERSION 0x0300u
+
+/** proximity detection methods */
+enum {
+ WL_PROXD_METHOD_NONE = 0,
+ WL_PROXD_METHOD_RSVD1 = 1, /**< backward compatibility - RSSI, not supported */
+ WL_PROXD_METHOD_TOF = 2, /**< 11v+BCM proprietary */
+ WL_PROXD_METHOD_RSVD2 = 3, /**< 11v only - if needed */
+ WL_PROXD_METHOD_FTM = 4, /**< IEEE rev mc/2014 */
+ WL_PROXD_METHOD_MAX
+};
+typedef int16 wl_proxd_method_t;
+
+/** 11az ftm types */
+enum {
+ WL_FTM_TYPE_NONE = 0, /* ftm type unspecified */
+ WL_FTM_TYPE_MC = 1, /* Legacy MC ftm */
+ WL_FTM_TYPE_TB = 2, /* 11az Trigger based */
+ WL_FTM_TYPE_NTB = 3, /* 11az Non-trigger based */
+ WL_FTM_TYPE_MAX
+};
+typedef uint8 wl_ftm_type_t;
+
+/** global and method configuration flags */
+enum {
+ WL_PROXD_FLAG_NONE = 0x00000000,
+ WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /**< respond to requests, per bss */
+ WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /**< 11mc range requests enabled */
+ WL_PROXD_FLAG_TX_LCI = 0x00000004, /**< tx lci, if known */
+ WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /**< tx civic, if known */
+ WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /**< auto respond w/o host action */
+ WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /**< continue tx w/o host action */
+ WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /**< publish availability */
+ WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /**< schedule using availability */
+ WL_PROXD_FLAG_ASAP_CAPABLE = 0x00000100, /* ASAP capable */
+ WL_PROXD_FLAG_MBURST_FOLLOWUP = 0x00000200, /* new multi-burst algorithm */
+ WL_PROXD_FLAG_SECURE = 0x00000400, /* per bsscfg option */
+ WL_PROXD_FLAG_NO_TSF_SYNC = 0x00000800, /* disable tsf sync */
+ WL_PROXD_FLAG_ALL = 0xffffffff
+};
+typedef uint32 wl_proxd_flags_t;
+
+#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \
+ WL_PROXD_FLAG_AVAIL_SCHEDULE)
+
+typedef enum wl_proxd_session_flags {
+ WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /**< no flags */
+ WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /**< local device is initiator */
+ WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /**< local device is target */
+ WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /**< (initiated) 1-way rtt */
+ WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /**< created w/ rx_auto_burst */
+ WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /**< good until cancelled */
+ WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /**< rtt detail in results */
+ WL_PROXD_SESSION_FLAG_SECURE = 0x00000040, /**< session is secure */
+ WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /**< AOA along w/ RTT */
+ WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /**< Same as proxd flags above */
+ WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /**< Same as proxd flags above */
+ WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /**< Use NAN BSS, if applicable */
+ WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /**< e.g. FTM1 - ASAP-capable */
+ WL_PROXD_SESSION_FLAG_RANDMAC = 0x00001000, /**< use random mac */
+ WL_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /**< report failure to target */
+ WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /**< report distance to target */
+ WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000,
+ WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /**< netrual mode */
+ WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /**< Toast */
+ WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /**< no param override from target */
+ WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /**< ASAP session */
+ WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /**< transmit LCI req */
+ WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /**< transmit civic loc req */
+ WL_PROXD_SESSION_FLAG_PRE_SCAN = 0x00400000, /* enable pre-scan for asap=1 */
+ WL_PROXD_SESSION_FLAG_AUTO_VHTACK = 0x00800000, /* use vhtack based on brcm ie */
+ WL_PROXD_SESSION_FLAG_VHTACK = 0x01000000, /* vht ack is in use - output only */
+ WL_PROXD_SESSION_FLAG_BDUR_NOPREF = 0x02000000, /* burst-duration: no preference */
+ WL_PROXD_SESSION_FLAG_NUM_FTM_NOPREF = 0x04000000, /* num of FTM frames: no preference */
+ WL_PROXD_SESSION_FLAG_FTM_SEP_NOPREF = 0x08000000, /* time btw FTM frams: no pref */
+ WL_PROXD_SESSION_FLAG_NUM_BURST_NOPREF = 0x10000000, /* num of bursts: no pref */
+ WL_PROXD_SESSION_FLAG_BURST_PERIOD_NOPREF = 0x20000000, /* burst period: no pref */
+ WL_PROXD_SESSION_FLAG_MBURST_FOLLOWUP = 0x40000000, /* new mburst algo - reserved */
+ WL_PROXD_SESSION_FLAG_MBURST_NODELAY = 0x80000000, /**< good until cancelled */
+ /* core rotation on initiator (reuse ONE_WAY bit) */
+ WL_PROXD_SESSION_FLAG_CORE_ROTATE = 0x00000004,
+ WL_PROXD_SESSION_FLAG_ALL = 0xffffffff
+} wl_proxd_session_flags_t;
+
+/** session flags for 11AZ */
+
+/** session flags */
+#define WL_FTM_SESSION_FLAG_NONE 0x0000000000000000llu /* no flags */
+#define WL_FTM_SESSION_FLAG_INITIATOR 0x0000000000000001llu /* local is initiator */
+#define WL_FTM_SESSION_FLAG_TARGET 0x0000000000000002llu /* local is target */
+#define WL_FTM_SESSION_FLAG_CORE_ROTATE 0x0000000000000004llu /* initiator core rotate */
+#define WL_FTM_SESSION_FLAG_AUTO_BURST 0x0000000000000008llu /* rx_auto_burst */
+#define WL_FTM_SESSION_FLAG_PERSIST 0x0000000000000010llu /* good until cancelled */
+#define WL_FTM_SESSION_FLAG_RTT_DETAIL 0x0000000000000020llu /* rtt detail results */
+#define WL_FTM_SESSION_FLAG_SECURE 0x0000000000000040llu /* session is secure */
+#define WL_FTM_SESSION_FLAG_AOA 0x0000000000000080llu /* AOA along w/ RTT */
+#define WL_FTM_SESSION_FLAG_RX_AUTO_BURST 0x0000000000000100llu /* see flags above */
+#define WL_FTM_SESSION_FLAG_TX_AUTO_BURST 0x0000000000000200llu /* see flags above */
+#define WL_FTM_SESSION_FLAG_NAN_BSS 0x0000000000000400llu /* NAN BSS */
+#define WL_FTM_SESSION_FLAG_ASAP_CAPABLE 0x0000000000000800llu /* ASAP-capable */
+#define WL_FTM_SESSION_FLAG_RANDMAC 0x0000000000001000llu /* use random mac */
+#define WL_FTM_SESSION_FLAG_REPORT_FAILURE 0x0000000000002000llu /* failure to target */
+#define WL_FTM_SESSION_FLAG_INITIATOR_RPT 0x0000000000004000llu /* distance to target */
+#define WL_FTM_SESSION_FLAG_NOCHANSWT 0x0000000000008000llu
+#define WL_FTM_SESSION_FLAG_NETRUAL 0x0000000000010000llu /* TODO: remove/reserved */
+#define WL_FTM_SESSION_FLAG_SEQ_EN 0x0000000000020000llu /* Toast */
+#define WL_FTM_SESSION_FLAG_NO_PARAM_OVRD 0x0000000000040000llu /* no override from tgt */
+#define WL_FTM_SESSION_FLAG_ASAP 0x0000000000080000llu /* ASAP session */
+#define WL_FTM_SESSION_FLAG_REQ_LCI 0x0000000000100000llu /* tx LCI req */
+#define WL_FTM_SESSION_FLAG_REQ_CIV 0x0000000000200000llu /* tx civic loc req */
+#define WL_FTM_SESSION_FLAG_PRE_SCAN 0x0000000000400000llu /* pre-scan for asap=1 */
+#define WL_FTM_SESSION_FLAG_AUTO_VHTACK 0x0000000000800000llu /* vhtack based on brcmie */
+#define WL_FTM_SESSION_FLAG_VHTACK 0x0000000001000000llu /* vht ack is in use */
+#define WL_FTM_SESSION_FLAG_BDUR_NOPREF 0x0000000002000000llu /* burst-duration no pref */
+#define WL_FTM_SESSION_FLAG_NUM_FTM_NOPREF 0x0000000004000000llu /* num of FTM: no pref */
+#define WL_FTM_SESSION_FLAG_FTM_SEP_NOPREF 0x0000000008000000llu /* time btw FTM: no pref */
+#define WL_FTM_SESSION_FLAG_NUM_BURST_NOPREF 0x0000000010000000llu /* num of bursts: no pref */
+#define WL_FTM_SESSION_FLAG_BURST_PERIOD_NOPREF 0x0000000020000000llu /* burst period: no pref */
+#define WL_FTM_SESSION_FLAG_MBURST_FOLLOWUP 0x0000000040000000llu /* new mburst algo */
+#define WL_FTM_SESSION_FLAG_MBURST_NODELAY 0x0000000080000000llu /* good until cancelled */
+#define WL_FTM_SESSION_FLAG_FULL_BW 0x0000000100000000llu /* use all bandwidth */
+#define WL_FTM_SESSION_FLAG_R2I_TOA_PHASE_SHIFT 0x0000000200000000llu /* phase shft average toa */
+#define WL_FTM_SESSION_FLAG_I2R_TOA_PHASE_SHIFT 0x0000000400000000llu /* phase shft average toa */
+#define WL_FTM_SESSION_FLAG_I2R_IMMEDIATE_RPT 0x0000000800000000llu /* immediate I2R feedback */
+#define WL_FTM_SESSION_FLAG_R2I_IMMEDIATE_RPT 0x0000001000000000llu /* immediate R2R report */
+#define WL_FTM_SESSION_FLAG_DEV_CLASS_A 0x0000002000000000llu /* class A device */
+#define WL_FTM_SESSION_FLAG_ALL 0xffffffffffffffffllu
+typedef uint64 wl_ftm_session_flags_t;
+typedef uint64 wl_ftm_session_mask_t;
+
+/* flags common across mc/ntb/tb.
+ * Explicit for the ones that are currently used.
+ * Currently not used ones still reserve their bits in above.
+ */
+#define FTM_COMMON_CONFIG_MASK \
+ (WL_FTM_SESSION_FLAG_INITIATOR \
+ | WL_FTM_SESSION_FLAG_INITIATOR_RPT \
+ | WL_FTM_SESSION_FLAG_TARGET \
+ | WL_FTM_SESSION_FLAG_SECURE \
+ | WL_FTM_SESSION_FLAG_CORE_ROTATE \
+ | WL_FTM_SESSION_FLAG_RANDMAC \
+ | WL_FTM_SESSION_FLAG_RX_AUTO_BURST \
+ | WL_FTM_SESSION_FLAG_TX_AUTO_BURST \
+ | WL_FTM_SESSION_FLAG_REQ_LCI \
+ | WL_FTM_SESSION_FLAG_REQ_CIV \
+ | WL_FTM_SESSION_FLAG_RTT_DETAIL \
+ | WL_FTM_SESSION_FLAG_NO_PARAM_OVRD \
+ | WL_FTM_SESSION_FLAG_AUTO_BURST)
+
+/* flags relevant to MC sessions */
+#define FTM_MC_CONFIG_MASK \
+ (FTM_COMMON_CONFIG_MASK) | \
+ (WL_FTM_SESSION_FLAG_AUTO_VHTACK \
+ | WL_FTM_SESSION_FLAG_MBURST_NODELAY \
+ | WL_FTM_SESSION_FLAG_ASAP_CAPABLE \
+ | WL_FTM_SESSION_FLAG_ASAP \
+ | WL_FTM_SESSION_FLAG_VHTACK \
+ | WL_FTM_SESSION_FLAG_BDUR_NOPREF \
+ | WL_FTM_SESSION_FLAG_NUM_FTM_NOPREF \
+ | WL_FTM_SESSION_FLAG_FTM_SEP_NOPREF \
+ | WL_FTM_SESSION_FLAG_NUM_BURST_NOPREF \
+ | WL_FTM_SESSION_FLAG_BURST_PERIOD_NOPREF \
+ | WL_FTM_SESSION_FLAG_SEQ_EN \
+ | WL_FTM_SESSION_FLAG_MBURST_FOLLOWUP)
+
+/* flags relevant to NTB sessions */
+#define FTM_NTB_CONFIG_MASK \
+ (FTM_COMMON_CONFIG_MASK) | \
+ (WL_FTM_SESSION_FLAG_R2I_TOA_PHASE_SHIFT \
+ | WL_FTM_SESSION_FLAG_I2R_TOA_PHASE_SHIFT \
+ | WL_FTM_SESSION_FLAG_I2R_IMMEDIATE_RPT \
+ | WL_FTM_SESSION_FLAG_R2I_IMMEDIATE_RPT)
+
+/* flages relevant to TB sessions. To be expanded */
+#define FTM_TB_CONFIG_MASK (FTM_NTB_CONFIG_MASK)
+
+/** time units - mc supports up to 0.1ns resolution */
+enum {
+ WL_PROXD_TMU_TU = 0, /**< 1024us */
+ WL_PROXD_TMU_SEC = 1,
+ WL_PROXD_TMU_MILLI_SEC = 2,
+ WL_PROXD_TMU_MICRO_SEC = 3,
+ WL_PROXD_TMU_NANO_SEC = 4,
+ WL_PROXD_TMU_PICO_SEC = 5
+};
+typedef int16 wl_proxd_tmu_t;
+
+/** time interval e.g. 10ns */
+typedef struct wl_proxd_intvl {
+ uint32 intvl;
+ wl_proxd_tmu_t tmu;
+ uint8 pad[2];
+} wl_proxd_intvl_t;
+
+/** commands that can apply to proxd, method or a session */
+enum {
+ WL_PROXD_CMD_NONE = 0,
+ WL_PROXD_CMD_GET_VERSION = 1,
+ WL_PROXD_CMD_ENABLE = 2,
+ WL_PROXD_CMD_DISABLE = 3,
+ WL_PROXD_CMD_CONFIG = 4,
+ WL_PROXD_CMD_START_SESSION = 5,
+ WL_PROXD_CMD_BURST_REQUEST = 6,
+ WL_PROXD_CMD_STOP_SESSION = 7,
+ WL_PROXD_CMD_DELETE_SESSION = 8,
+ WL_PROXD_CMD_GET_RESULT = 9,
+ WL_PROXD_CMD_GET_INFO = 10,
+ WL_PROXD_CMD_GET_STATUS = 11,
+ WL_PROXD_CMD_GET_SESSIONS = 12,
+ WL_PROXD_CMD_GET_COUNTERS = 13,
+ WL_PROXD_CMD_CLEAR_COUNTERS = 14,
+ WL_PROXD_CMD_COLLECT = 15, /* not supported, see 'wl proxd_collect' */
+ WL_PROXD_CMD_TUNE = 16, /* not supported, see 'wl proxd_tune' */
+ WL_PROXD_CMD_DUMP = 17,
+ WL_PROXD_CMD_START_RANGING = 18,
+ WL_PROXD_CMD_STOP_RANGING = 19,
+ WL_PROXD_CMD_GET_RANGING_INFO = 20,
+ WL_PROXD_CMD_IS_TLV_SUPPORTED = 21,
+
+ WL_PROXD_CMD_MAX
+};
+typedef int16 wl_proxd_cmd_t;
+
+/* session ids:
+ * id 0 is reserved
+ * ids 1..0x7fff - allocated by host/app
+ * ids 0x8000..0xffff - allocated by firmware, used for auto-rx
+ */
+enum {
+ WL_PROXD_SESSION_ID_GLOBAL = 0
+};
+
+/* Externally allocated sids */
+#define WL_PROXD_SID_EXT_MAX 0x7fff
+#define WL_PROXD_SID_EXT_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_EXT_MAX)
+
+/* block size for reserved sid blocks */
+#define WL_PROXD_SID_EXT_BLKSZ 256
+#define WL_PROXD_SID_EXT_BLK_START(_i) (WL_PROXD_SID_EXT_MAX - (_i) * WL_PROXD_SID_EXT_BLKSZ + 1)
+#define WL_PROXD_SID_EXT_BLK_END(_start) ((_start) + WL_PROXD_SID_EXT_BLKSZ - 1)
+
+/* rrm block */
+#define WL_PROXD_SID_RRM_START WL_PROXD_SID_EXT_BLK_START(1)
+#define WL_PROXD_SID_RRM_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_RRM_START)
+
+/* nan block */
+#define WL_PROXD_SID_NAN_START WL_PROXD_SID_EXT_BLK_START(2)
+#define WL_PROXD_SID_NAN_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_NAN_START)
+
+/** maximum number sessions that can be allocated, may be less if tunable */
+#define WL_PROXD_MAX_SESSIONS 16
+
+typedef uint16 wl_proxd_session_id_t;
+
+/* Use WL_PROXD_E_* errorcodes from this file if BCMUTILS_ERR_CODES not defined */
+#ifndef BCMUTILS_ERR_CODES
+
+/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */
+enum {
+ WL_PROXD_E_LAST = -1056,
+ WL_PROXD_E_NOAVAIL = -1056,
+ WL_PROXD_E_EXT_SCHED = -1055,
+ WL_PROXD_E_NOT_BCM = -1054,
+ WL_PROXD_E_FRAME_TYPE = -1053,
+ WL_PROXD_E_VERNOSUPPORT = -1052,
+ WL_PROXD_E_SEC_NOKEY = -1051,
+ WL_PROXD_E_SEC_POLICY = -1050,
+ WL_PROXD_E_SCAN_INPROCESS = -1049,
+ WL_PROXD_E_BAD_PARTIAL_TSF = -1048,
+ WL_PROXD_E_SCANFAIL = -1047,
+ WL_PROXD_E_NOTSF = -1046,
+ WL_PROXD_E_POLICY = -1045,
+ WL_PROXD_E_INCOMPLETE = -1044,
+ WL_PROXD_E_OVERRIDDEN = -1043,
+ WL_PROXD_E_ASAP_FAILED = -1042,
+ WL_PROXD_E_NOTSTARTED = -1041,
+ WL_PROXD_E_INVALIDMEAS = -1040,
+ WL_PROXD_E_INCAPABLE = -1039,
+ WL_PROXD_E_MISMATCH = -1038,
+ WL_PROXD_E_DUP_SESSION = -1037,
+ WL_PROXD_E_REMOTE_FAIL = -1036,
+ WL_PROXD_E_REMOTE_INCAPABLE = -1035,
+ WL_PROXD_E_SCHED_FAIL = -1034,
+ WL_PROXD_E_PROTO = -1033,
+ WL_PROXD_E_EXPIRED = -1032,
+ WL_PROXD_E_TIMEOUT = -1031,
+ WL_PROXD_E_NOACK = -1030,
+ WL_PROXD_E_DEFERRED = -1029,
+ WL_PROXD_E_INVALID_SID = -1028,
+ WL_PROXD_E_REMOTE_CANCEL = -1027,
+ WL_PROXD_E_CANCELED = -1026, /**< local */
+ WL_PROXD_E_INVALID_SESSION = -1025,
+ WL_PROXD_E_BAD_STATE = -1024,
+ WL_PROXD_E_START = -1024,
+ WL_PROXD_E_ERROR = -1,
+ WL_PROXD_E_OK = 0
+};
+typedef int32 wl_proxd_status_t;
+
+#endif /* BCMUTILS_ERR_CODES */
+
+/* proxd errors from phy */
+#define PROXD_TOF_INIT_ERR_BITS 16
+
+enum {
+ WL_PROXD_PHY_ERR_LB_CORR_THRESH = (1 << 0), /* Loopback Correlation threshold */
+ WL_PROXD_PHY_ERR_RX_CORR_THRESH = (1 << 1), /* Received Correlation threshold */
+ WL_PROXD_PHY_ERR_LB_PEAK_POWER = (1 << 2), /* Loopback Peak power */
+ WL_PROXD_PHY_ERR_RX_PEAK_POWER = (1 << 3), /* Received Peak power */
+ WL_PROXD_PHY_ERR_BITFLIP = (1 << 4), /* Bitflips */
+ WL_PROXD_PHY_ERR_SNR = (1 << 5), /* SNR */
+ WL_PROXD_PHY_RX_STRT_WIN_OFF = (1 << 6), /* Receive start window is off */
+ WL_PROXD_PHY_RX_END_WIN_OFF = (1 << 7), /* Receive End window is off */
+ WL_PROXD_PHY_ERR_LOW_CONFIDENCE = (1 << 15), /* Low confidence on meas distance */
+};
+typedef uint32 wl_proxd_phy_error_t;
+
+/** session states */
+enum {
+ WL_PROXD_SESSION_STATE_NONE = 0,
+ WL_PROXD_SESSION_STATE_CREATED = 1,
+ WL_PROXD_SESSION_STATE_CONFIGURED = 2,
+ WL_PROXD_SESSION_STATE_STARTED = 3,
+ WL_PROXD_SESSION_STATE_DELAY = 4,
+ WL_PROXD_SESSION_STATE_USER_WAIT = 5,
+ WL_PROXD_SESSION_STATE_SCHED_WAIT = 6,
+ WL_PROXD_SESSION_STATE_BURST = 7,
+ WL_PROXD_SESSION_STATE_STOPPING = 8,
+ WL_PROXD_SESSION_STATE_ENDED = 9,
+ WL_PROXD_SESSION_STATE_START_WAIT = 10,
+ WL_PROXD_SESSION_STATE_DESTROYING = -1
+};
+
+typedef enum wl_ftm_session_state {
+ WL_FTM_SESSION_STATE_NONE = 0,
+ WL_FTM_SESSION_STATE_CREATED = 1,
+ WL_FTM_SESSION_STATE_CONFIGURED = 2,
+ WL_FTM_SESSION_STATE_STARTED = 3,
+ WL_FTM_SESSION_STATE_DELAY = 4,
+ WL_FTM_SESSION_STATE_USER_WAIT = 5,
+ WL_FTM_SESSION_STATE_SCHED_WAIT = 6,
+ WL_FTM_SESSION_STATE_BURST = 7,
+ WL_FTM_SESSION_STATE_ENDED = 8
+} wl_ftm_session_state_t;
+
+typedef int16 wl_proxd_session_state_t;
+
+/** RTT sample flags */
+enum {
+ WL_PROXD_RTT_SAMPLE_NONE = 0x00,
+ WL_PROXD_RTT_SAMPLE_DISCARD = 0x01
+};
+typedef uint8 wl_proxd_rtt_sample_flags_t;
+typedef int16 wl_proxd_rssi_t;
+typedef uint16 wl_proxd_snr_t;
+typedef uint16 wl_proxd_bitflips_t;
+
+/** result flags */
+enum {
+ WL_PRXOD_RESULT_FLAG_NONE = 0x0000,
+ WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /**< LOS - if available */
+ WL_PROXD_RESULT_FLAG_LOS = 0x0002, /**< NLOS - if available */
+ WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /**< Fatal error during burst */
+ WL_PROXD_RESULT_FLAG_VHTACK = 0x0008, /* VHTACK or Legacy ACK used */
+ WL_PROXD_REQUEST_SENT = 0x0010, /* FTM request was sent */
+ WL_PROXD_REQUEST_ACKED = 0x0020, /* FTM request was acked */
+ WL_PROXD_LTFSEQ_STARTED = 0x0040, /* LTF sequence started */
+ WL_PROXD_RESULT_FLAG_ALL = 0xffff
+};
+typedef int16 wl_proxd_result_flags_t;
+
+#define WL_PROXD_RTT_SAMPLE_VERSION_1 1
+typedef struct wl_proxd_rtt_sample_v1 {
+ uint8 id; /**< id for the sample - non-zero */
+ wl_proxd_rtt_sample_flags_t flags;
+ wl_proxd_rssi_t rssi;
+ wl_proxd_intvl_t rtt; /**< round trip time */
+ uint32 ratespec;
+ wl_proxd_snr_t snr;
+ wl_proxd_bitflips_t bitflips;
+ wl_proxd_status_t status;
+ int32 distance;
+ wl_proxd_phy_error_t tof_phy_error;
+ wl_proxd_phy_error_t tof_tgt_phy_error; /* target phy error bit map */
+ wl_proxd_snr_t tof_tgt_snr;
+ wl_proxd_bitflips_t tof_tgt_bitflips;
+ uint8 coreid;
+ uint8 pad[3];
+} wl_proxd_rtt_sample_v1_t;
+
+#define WL_PROXD_RTT_RESULT_VERSION_1 1
+/** rtt measurement result */
+typedef struct wl_proxd_rtt_result_v1 {
+ wl_proxd_session_id_t sid;
+ wl_proxd_result_flags_t flags;
+ wl_proxd_status_t status;
+ struct ether_addr peer;
+ wl_proxd_session_state_t state; /**< current state */
+ union {
+ wl_proxd_intvl_t retry_after; /* hint for errors */
+ wl_proxd_intvl_t burst_duration; /* burst duration */
+ } u;
+ wl_proxd_rtt_sample_v1_t avg_rtt;
+ uint32 avg_dist; /* 1/256m units */
+ uint16 sd_rtt; /* RTT standard deviation */
+ uint8 num_valid_rtt; /* valid rtt cnt */
+ uint8 num_ftm; /* actual num of ftm cnt (Configured) */
+ uint16 burst_num; /* in a session */
+ uint16 num_rtt; /* 0 if no detail */
+ uint16 num_meas; /* number of ftm frames seen OTA */
+ uint8 pad[2];
+ wl_proxd_rtt_sample_v1_t rtt[1]; /* variable */
+} wl_proxd_rtt_result_v1_t;
+
+#define WL_PROXD_RTT_SAMPLE_VERSION_2 2
+typedef struct wl_proxd_rtt_sample_v2 {
+ uint16 version;
+ uint16 length;
+ uint8 id; /**< id for the sample - non-zero */
+ wl_proxd_rtt_sample_flags_t flags;
+ wl_proxd_rssi_t rssi;
+ wl_proxd_intvl_t rtt; /**< round trip time */
+ uint32 ratespec;
+ wl_proxd_snr_t snr;
+ wl_proxd_bitflips_t bitflips;
+ wl_proxd_status_t status;
+ int32 distance;
+ wl_proxd_phy_error_t tof_phy_error;
+ wl_proxd_phy_error_t tof_tgt_phy_error; /* target phy error bit map */
+ wl_proxd_snr_t tof_tgt_snr;
+ wl_proxd_bitflips_t tof_tgt_bitflips;
+ uint8 coreid;
+ uint8 pad[3];
+ uint32 chanspec;
+} wl_proxd_rtt_sample_v2_t;
+
+#define WL_PROXD_RTT_RESULT_VERSION_2 2
+/** rtt measurement result */
+typedef struct wl_proxd_rtt_result_v2 {
+ uint16 version;
+ uint16 length; /* up to rtt[] */
+ wl_proxd_session_id_t sid;
+ wl_proxd_result_flags_t flags;
+ wl_proxd_status_t status;
+ struct ether_addr peer;
+ wl_proxd_session_state_t state; /**< current state */
+ union {
+ wl_proxd_intvl_t retry_after; /* hint for errors */
+ wl_proxd_intvl_t burst_duration; /* burst duration */
+ } u;
+ uint32 avg_dist; /* 1/256m units */
+ uint16 sd_rtt; /* RTT standard deviation */
+ uint8 num_valid_rtt; /* valid rtt cnt */
+ uint8 num_ftm; /* actual num of ftm cnt (Configured) */
+ uint16 burst_num; /* in a session */
+ uint16 num_rtt; /* 0 if no detail */
+ uint16 num_meas; /* number of ftm frames seen OTA */
+ uint8 pad[2];
+ wl_proxd_rtt_sample_v2_t rtt[1]; /* variable, first element is avg_rtt */
+} wl_proxd_rtt_result_v2_t;
+
+/** aoa measurement result */
+typedef struct wl_proxd_aoa_result {
+ wl_proxd_session_id_t sid;
+ wl_proxd_result_flags_t flags;
+ wl_proxd_status_t status;
+ struct ether_addr peer;
+ wl_proxd_session_state_t state;
+ uint16 burst_num;
+ uint8 pad[2];
+ /* wl_proxd_aoa_sample_t sample_avg; TBD */
+} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t;
+#include <packed_section_end.h>
+
+/** global stats */
+typedef struct wl_proxd_counters {
+ uint32 tx; /* tx frame count */
+ uint32 rx; /* rx frame count */
+ uint32 burst; /* total number of burst */
+ uint32 sessions; /* total number of sessions */
+ uint32 max_sessions; /* max concurrency */
+ uint32 sched_fail; /* scheduling failures */
+ uint32 timeouts; /* timeouts */
+ uint32 protoerr; /* protocol errors */
+ uint32 noack; /* tx w/o ack */
+ uint32 txfail; /* any tx falure */
+ uint32 lci_req_tx; /* tx LCI requests */
+ uint32 lci_req_rx; /* rx LCI requests */
+ uint32 lci_rep_tx; /* tx LCI reports */
+ uint32 lci_rep_rx; /* rx LCI reports */
+ uint32 civic_req_tx; /* tx civic requests */
+ uint32 civic_req_rx; /* rx civic requests */
+ uint32 civic_rep_tx; /* tx civic reports */
+ uint32 civic_rep_rx; /* rx civic reports */
+ uint32 rctx; /* ranging contexts created */
+ uint32 rctx_done; /* count of ranging done */
+ uint32 publish_err; /* availability publishing errors */
+ uint32 on_chan; /* count of scheduler onchan */
+ uint32 off_chan; /* count of scheduler offchan */
+ uint32 tsf_lo; /* local tsf or session tsf */
+ uint32 tsf_hi;
+ uint32 num_meas;
+} wl_proxd_counters_t;
+
+typedef struct wl_proxd_counters wl_proxd_session_counters_t;
+
+enum {
+ WL_PROXD_CAP_NONE = 0x0000,
+ WL_PROXD_CAP_ALL = 0xffff
+};
+typedef int16 wl_proxd_caps_t;
+
+/** method capabilities */
+enum {
+ WL_PROXD_FTM_CAP_NONE = 0x0000,
+ WL_PROXD_FTM_CAP_FTM1 = 0x0001
+};
+typedef uint16 wl_proxd_ftm_caps_t;
+
+typedef struct wl_proxd_tlv_id_list {
+ uint16 num_ids;
+ uint16 ids[1];
+} wl_proxd_tlv_id_list_t;
+
+typedef struct wl_proxd_session_id_list {
+ uint16 num_ids;
+ wl_proxd_session_id_t ids[1];
+} wl_proxd_session_id_list_t;
+
+typedef struct wl_proxd_tpk {
+ struct ether_addr peer;
+ uint8 tpk[TPK_FTM_LEN];
+} wl_proxd_tpk_t;
+
+/* tlvs returned for get_info on ftm method
+ * configuration:
+ * proxd flags
+ * event mask
+ * debug mask
+ * session defaults (session tlvs)
+ * status tlv - not supported for ftm method
+ * info tlv
+ */
+typedef struct wl_proxd_ftm_info {
+ wl_proxd_ftm_caps_t caps;
+ uint16 max_sessions;
+ uint16 num_sessions;
+ uint16 rx_max_burst;
+} wl_proxd_ftm_info_t;
+
+enum {
+ WL_PROXD_WAIT_NONE = 0x0000,
+ WL_PROXD_WAIT_KEY = 0x0001,
+ WL_PROXD_WAIT_SCHED = 0x0002,
+ WL_PROXD_WAIT_TSF = 0x0004
+};
+typedef int16 wl_proxd_wait_reason_t;
+
+/* tlvs returned for get_info on session
+ * session config (tlvs)
+ * session info tlv
+ */
+typedef struct wl_proxd_ftm_session_info {
+ uint16 sid;
+ uint8 bss_index;
+ uint8 pad;
+ struct ether_addr bssid;
+ wl_proxd_session_state_t state;
+ wl_proxd_status_t status;
+ uint16 burst_num;
+ wl_proxd_wait_reason_t wait_reason;
+ uint32 meas_start_lo; /* sn tsf of 1st meas for cur/prev burst */
+ uint32 meas_start_hi;
+} wl_proxd_ftm_session_info_t;
+
+typedef struct wl_proxd_ftm_session_status {
+ uint16 sid;
+ wl_proxd_session_state_t state;
+ wl_proxd_status_t status;
+ uint16 burst_num;
+ uint16 core_info;
+} wl_proxd_ftm_session_status_t;
+
+/** rrm range request */
+typedef struct wl_proxd_range_req {
+ uint16 num_repeat;
+ uint16 init_delay_range; /**< in TUs */
+ uint8 pad;
+ uint8 num_nbr; /**< number of (possible) neighbors */
+ nbr_element_t nbr[1];
+} wl_proxd_range_req_t;
+
+#define WL_PROXD_LCI_LAT_OFF 0
+#define WL_PROXD_LCI_LONG_OFF 5
+#define WL_PROXD_LCI_ALT_OFF 10
+
+#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \
+ unsigned _off = WL_PROXD_LCI_LAT_OFF; \
+ _lat_err = (_lci)->data[(_off)] & 0x3f; \
+ _lat = (_lci)->data[(_off)+1]; \
+ _lat |= (_lci)->data[(_off)+2] << 8; \
+ _lat |= (_lci)->data[_(_off)+3] << 16; \
+ _lat |= (_lci)->data[(_off)+4] << 24; \
+ _lat <<= 2; \
+ _lat |= (_lci)->data[(_off)] >> 6; \
+}
+
+#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \
+ unsigned _off = WL_PROXD_LCI_LONG_OFF; \
+ _long_err = (_lci)->data[(_off)] & 0x3f; \
+ _lcilong = (_lci)->data[(_off)+1]; \
+ _lcilong |= (_lci)->data[(_off)+2] << 8; \
+ _lcilong |= (_lci)->data[_(_off)+3] << 16; \
+ _lcilong |= (_lci)->data[(_off)+4] << 24; \
+ __lcilong <<= 2; \
+ _lcilong |= (_lci)->data[(_off)] >> 6; \
+}
+
+#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \
+ unsigned _off = WL_PROXD_LCI_ALT_OFF; \
+ _alt_type = (_lci)->data[_off] & 0x0f; \
+ _alt_err = (_lci)->data[(_off)] >> 4; \
+ _alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \
+ _alt = (_lci)->data[(_off)+2]; \
+ _alt |= (_lci)->data[(_off)+3] << 8; \
+ _alt |= (_lci)->data[_(_off)+4] << 16; \
+ _alt <<= 6; \
+ _alt |= (_lci)->data[(_off) + 1] >> 2; \
+}
+
+#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6)
+
+/* availability. advertising mechanism bss specific */
+/** availablity flags */
+enum {
+ WL_PROXD_AVAIL_NONE = 0,
+ WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001,
+ WL_PROXD_AVAIL_SCHEDULED = 0x0002 /**< scheduled by proxd */
+};
+typedef int16 wl_proxd_avail_flags_t;
+
+/** time reference */
+enum {
+ WL_PROXD_TREF_NONE = 0,
+ WL_PROXD_TREF_DEV_TSF = 1,
+ WL_PROXD_TREF_NAN_DW = 2,
+ WL_PROXD_TREF_TBTT = 3,
+ WL_PROXD_TREF_MAX /* last entry */
+};
+typedef int16 wl_proxd_time_ref_t;
+
+/** proxd channel-time slot */
+typedef struct {
+ wl_proxd_intvl_t start; /**< from ref */
+ wl_proxd_intvl_t duration; /**< from start */
+ uint32 chanspec;
+} wl_proxd_time_slot_t;
+
+typedef struct wl_proxd_avail24 {
+ wl_proxd_avail_flags_t flags; /**< for query only */
+ wl_proxd_time_ref_t time_ref;
+ uint16 max_slots; /**< for query only */
+ uint16 num_slots;
+ wl_proxd_time_slot_t slots[1]; /**< ROM compat - not used */
+ wl_proxd_intvl_t repeat;
+ wl_proxd_time_slot_t ts0[1];
+} wl_proxd_avail24_t;
+#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)])
+#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0)
+#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0)
+#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\
+ WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \
+ (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0)))
+
+typedef struct wl_proxd_avail {
+ wl_proxd_avail_flags_t flags; /**< for query only */
+ wl_proxd_time_ref_t time_ref;
+ uint16 max_slots; /**< for query only */
+ uint16 num_slots;
+ wl_proxd_intvl_t repeat;
+ wl_proxd_time_slot_t slots[1];
+} wl_proxd_avail_t;
+#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)])
+#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots)
+
+#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0)
+#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\
+ WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \
+ (_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0)))
+
+/* collect support TBD */
+
+/** debugging */
+enum {
+ WL_PROXD_DEBUG_NONE = 0x00000000,
+ WL_PROXD_DEBUG_LOG = 0x00000001,
+ WL_PROXD_DEBUG_IOV = 0x00000002,
+ WL_PROXD_DEBUG_EVENT = 0x00000004,
+ WL_PROXD_DEBUG_SESSION = 0x00000008,
+ WL_PROXD_DEBUG_PROTO = 0x00000010,
+ WL_PROXD_DEBUG_SCHED = 0x00000020,
+ WL_PROXD_DEBUG_RANGING = 0x00000040,
+ WL_PROXD_DEBUG_NAN = 0x00000080,
+ WL_PROXD_DEBUG_PKT = 0x00000100,
+ WL_PROXD_DEBUG_SEC = 0x00000200,
+ WL_PROXD_DEBUG_EVENTLOG = 0x80000000, /* map/enable EVNET_LOG_TAG_PROXD_INFO */
+ WL_PROXD_DEBUG_ALL = 0xffffffff
+};
+typedef uint32 wl_proxd_debug_mask_t;
+
+/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
+typedef enum {
+ WL_PROXD_TLV_ID_NONE = 0,
+ WL_PROXD_TLV_ID_METHOD = 1,
+ WL_PROXD_TLV_ID_FLAGS = 2,
+ WL_PROXD_TLV_ID_CHANSPEC = 3, /**< note: uint32 */
+ WL_PROXD_TLV_ID_TX_POWER = 4,
+ WL_PROXD_TLV_ID_RATESPEC = 5,
+ WL_PROXD_TLV_ID_BURST_DURATION = 6, /**< intvl - length of burst */
+ WL_PROXD_TLV_ID_BURST_PERIOD = 7, /**< intvl - between bursts */
+ WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /**< intvl - between FTMs */
+ WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /**< uint16 - per burst */
+ WL_PROXD_TLV_ID_NUM_BURST = 10, /**< uint16 */
+ WL_PROXD_TLV_ID_FTM_RETRIES = 11, /**< uint16 at FTM level */
+ WL_PROXD_TLV_ID_BSS_INDEX = 12, /**< uint8 */
+ WL_PROXD_TLV_ID_BSSID = 13,
+ WL_PROXD_TLV_ID_INIT_DELAY = 14, /**< intvl - optional,non-standalone only */
+ WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /**< expect response within - intvl */
+ WL_PROXD_TLV_ID_EVENT_MASK = 16, /**< interested events - in/out */
+ WL_PROXD_TLV_ID_FLAGS_MASK = 17, /**< interested flags - in only */
+ WL_PROXD_TLV_ID_PEER_MAC = 18, /**< mac address of peer */
+ WL_PROXD_TLV_ID_FTM_REQ = 19, /**< dot11_ftm_req */
+ WL_PROXD_TLV_ID_LCI_REQ = 20,
+ WL_PROXD_TLV_ID_LCI = 21,
+ WL_PROXD_TLV_ID_CIVIC_REQ = 22,
+ WL_PROXD_TLV_ID_CIVIC = 23,
+ WL_PROXD_TLV_ID_AVAIL24 = 24, /**< ROM compatibility */
+ WL_PROXD_TLV_ID_SESSION_FLAGS = 25,
+ WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /**< in only */
+ WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /**< uint16 - limit bursts per session */
+ WL_PROXD_TLV_ID_RANGING_INFO = 28, /**< ranging info */
+ WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /**< uint16 */
+ WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /**< uint16, in only */
+ WL_PROXD_TLV_ID_NAN_MAP_ID = 31,
+ WL_PROXD_TLV_ID_DEV_ADDR = 32,
+ WL_PROXD_TLV_ID_AVAIL = 33, /**< wl_proxd_avail_t */
+ WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */
+ WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */
+ WL_PROXD_TLV_ID_TPK = 36, /* 32byte TPK */
+ WL_PROXD_TLV_ID_RI_RR = 36, /* RI_RR */
+ WL_PROXD_TLV_ID_TUNE = 37, /* wl_proxd_pararms_tof_tune_t */
+ WL_PROXD_TLV_ID_CUR_ETHER_ADDR = 38, /* Source Address used for Tx */
+
+ /* output - 512 + x */
+ WL_PROXD_TLV_ID_STATUS = 512,
+ WL_PROXD_TLV_ID_COUNTERS = 513,
+ WL_PROXD_TLV_ID_INFO = 514,
+ WL_PROXD_TLV_ID_RTT_RESULT = 515,
+ WL_PROXD_TLV_ID_AOA_RESULT = 516,
+ WL_PROXD_TLV_ID_SESSION_INFO = 517,
+ WL_PROXD_TLV_ID_SESSION_STATUS = 518,
+ WL_PROXD_TLV_ID_SESSION_ID_LIST = 519,
+ WL_PROXD_TLV_ID_RTT_RESULT_V2 = 520,
+
+ /* debug tlvs can be added starting 1024 */
+ WL_PROXD_TLV_ID_DEBUG_MASK = 1024,
+ WL_PROXD_TLV_ID_COLLECT = 1025, /**< output only */
+ WL_PROXD_TLV_ID_STRBUF = 1026,
+
+ WL_PROXD_TLV_ID_COLLECT_HEADER = 1025, /* wl_proxd_collect_header_t */
+ WL_PROXD_TLV_ID_COLLECT_INFO = 1028, /* wl_proxd_collect_info_t */
+ WL_PROXD_TLV_ID_COLLECT_DATA = 1029, /* wl_proxd_collect_data_t */
+ WL_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */
+ WL_PROXD_TLV_ID_MF_STATS_DATA = 1031, /* mf_stats_buffer */
+
+ WL_PROXD_TLV_ID_COLLECT_INLINE_HEADER = 1032,
+ WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_INFO = 1033,
+ WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_DATA = 1034,
+ WL_PROXD_TLV_ID_COLLECT_INLINE_RESULTS = 1035,
+
+ WL_PROXD_TLV_ID_MAX
+} wl_proxd_tlv_types_t;
+
+#define TOF_COLLECT_INLINE_HEADER_INFO_VER_1 1
+
+typedef struct wl_proxd_collect_inline_header_info_v1
+{
+ uint16 version;
+ uint16 pad1;
+ uint32 ratespec; /* override */
+ chanspec_t chanspec;
+ uint16 num_ftm;
+ struct ether_addr peer_mac;
+ struct ether_addr cur_ether_addr; /* source address for Tx */
+} wl_proxd_collect_inline_header_info_v1_t;
+
+#define TOF_COLLECT_INLINE_RESULTS_VER_1 1
+typedef struct wl_proxd_collect_inline_results_info_v1
+{
+ uint16 version;
+ uint16 pad1;
+ uint32 meanrtt;
+ uint32 distance;
+ uint16 num_rtt;
+ uint16 pad2;
+ int32 status;
+ uint32 ratespec;
+} wl_proxd_collect_inline_results_info_v1_t;
+
+#define TOF_COLLECT_INLINE_FRAME_INFO_VER_1 1
+typedef struct wl_proxd_collect_inline_frame_info_v1
+{
+ uint16 version;
+ uint16 pad1;
+ int32 gd;
+ uint32 T[4];
+ uint32 prev_t1;
+ uint32 prev_t4;
+ int32 hadj;
+ int8 rssi;
+ uint8 pad[3];
+} wl_proxd_collect_inline_frame_info_v1_t;
+
+#define TOF_COLLECT_INLINE_FRAME_INFO_VER_2 2
+typedef struct wl_proxd_collect_inline_frame_info_v2
+{
+ uint16 version;
+ uint16 pad1;
+ int32 gd;
+ uint32 T[4];
+ int32 hadj;
+ int8 rssi;
+ uint8 pad[3];
+} wl_proxd_collect_inline_frame_info_v2_t;
+
+typedef struct wl_proxd_tlv {
+ uint16 id;
+ uint16 len;
+ uint8 data[1];
+} wl_proxd_tlv_t;
+
+/** proxd iovar - applies to proxd, method or session */
+typedef struct wl_proxd_iov {
+ uint16 version;
+ uint16 len;
+ wl_proxd_cmd_t cmd;
+ wl_proxd_method_t method;
+ wl_proxd_session_id_t sid;
+ wl_ftm_type_t ftm_type; /* 11az ftm type. Only valid with PROXD vers >= 0x0400 */
+ uint8 PAD[1];
+ wl_proxd_tlv_t tlvs[1]; /**< variable */
+} wl_proxd_iov_t;
+
+#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs)
+
+/* The following event definitions may move to bcmevent.h, but sharing proxd types
+ * across needs more invasive changes unrelated to proxd
+ */
+enum {
+ WL_PROXD_EVENT_NONE = 0, /**< not an event, reserved */
+ WL_PROXD_EVENT_SESSION_CREATE = 1,
+ WL_PROXD_EVENT_SESSION_START = 2,
+ WL_PROXD_EVENT_FTM_REQ = 3,
+ WL_PROXD_EVENT_BURST_START = 4,
+ WL_PROXD_EVENT_BURST_END = 5,
+ WL_PROXD_EVENT_SESSION_END = 6,
+ WL_PROXD_EVENT_SESSION_RESTART = 7,
+ WL_PROXD_EVENT_BURST_RESCHED = 8, /**< burst rescheduled-e.g. partial TSF */
+ WL_PROXD_EVENT_SESSION_DESTROY = 9,
+ WL_PROXD_EVENT_RANGE_REQ = 10,
+ WL_PROXD_EVENT_FTM_FRAME = 11,
+ WL_PROXD_EVENT_DELAY = 12,
+ WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /**< (target) rx initiator-report */
+ WL_PROXD_EVENT_RANGING = 14,
+ WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */
+ WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */
+ WL_PROXD_EVENT_COLLECT = 17,
+ WL_PROXD_EVENT_START_WAIT = 18, /* waiting to start */
+ WL_PROXD_EVENT_MF_STATS = 19, /* mf stats event */
+
+ WL_PROXD_EVENT_MAX
+};
+typedef int16 wl_proxd_event_type_t;
+
+/** proxd event mask - upto 32 events for now */
+typedef uint32 wl_proxd_event_mask_t;
+
+#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe
+#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type))
+#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\
+ ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0)
+
+/** proxd event - applies to proxd, method or session */
+typedef struct wl_proxd_event {
+ uint16 version;
+ uint16 len;
+ wl_proxd_event_type_t type;
+ wl_proxd_method_t method;
+ wl_proxd_session_id_t sid;
+ uint8 pad[2]; /* This field is used fragmentation purpose */
+ wl_proxd_tlv_t tlvs[1]; /**< variable */
+} wl_proxd_event_t;
+
+enum {
+ WL_PROXD_RANGING_STATE_NONE = 0,
+ WL_PROXD_RANGING_STATE_NOTSTARTED = 1,
+ WL_PROXD_RANGING_STATE_INPROGRESS = 2,
+ WL_PROXD_RANGING_STATE_DONE = 3
+};
+typedef int16 wl_proxd_ranging_state_t;
+
+/** proxd ranging flags */
+enum {
+ WL_PROXD_RANGING_FLAG_NONE = 0x0000, /**< no flags */
+ WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001,
+ WL_PROXD_RANGING_FLAG_ALL = 0xffff
+};
+typedef uint16 wl_proxd_ranging_flags_t;
+
+struct wl_proxd_ranging_info {
+ wl_proxd_status_t status;
+ wl_proxd_ranging_state_t state;
+ wl_proxd_ranging_flags_t flags;
+ uint16 num_sids;
+ uint16 num_done;
+};
+typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t;
+
+#include <packed_section_start.h>
+/* Legacy platform i.e. 43342/43430 */
+#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_1 1
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data_v1 {
+ uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint8 ri_rr[FTM_TPK_LEN];
+ wl_proxd_phy_error_t phy_err_mask;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_v1_t;
+
+/* Secured 2.0 supoorted devices i.e. 4364 */
+#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_2 2
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data_v2 {
+ uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
+ wl_proxd_phy_error_t phy_err_mask;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_v2_t;
+#include <packed_section_end.h>
+
+#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_3 3
+typedef struct wl_proxd_collect_event_data_v3 {
+ uint16 version;
+ uint16 length;
+ uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
+ wl_proxd_phy_error_t phy_err_mask;
+} wl_proxd_collect_event_data_v3_t;
+
+#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_4 4
+typedef struct wl_proxd_collect_event_data_v4 {
+ uint16 version;
+ uint16 length;
+ uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0_5G];
+ wl_proxd_phy_error_t phy_err_mask;
+} wl_proxd_collect_event_data_v4_t;
+
+#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX WL_PROXD_COLLECT_EVENT_DATA_VERSION_4
+
+/** Data returned by the bssload_report iovar. This is also the WLC_E_BSS_LOAD event data */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_bssload {
+ uint16 sta_count; /**< station count */
+ uint16 aac; /**< available admission capacity */
+ uint8 chan_util; /**< channel utilization */
+} BWL_POST_PACKED_STRUCT wl_bssload_t;
+#include <packed_section_end.h>
+
+/**
+ * Maximum number of configurable BSS Load levels. The number of BSS Load
+ * ranges is always 1 more than the number of configured levels. eg. if
+ * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges:
+ * 0-10, 11-20, 21-30, 31-255. A WLC_E_BSS_LOAD event is generated each time
+ * the utilization level crosses into another range, subject to the rate limit.
+ */
+#define MAX_BSSLOAD_LEVELS 8
+#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1)
+
+/** BSS Load event notification configuration. */
+typedef struct wl_bssload_cfg {
+ uint32 rate_limit_msec; /**< # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint8 num_util_levels; /**< Number of entries in util_levels[] below */
+ uint8 util_levels[MAX_BSSLOAD_LEVELS];
+ /**< Variable number of BSS Load utilization levels in
+ * low to high order. An event will be posted each time
+ * a received beacon's BSS Load IE channel utilization
+ * value crosses a level.
+ */
+ uint8 PAD[3];
+} wl_bssload_cfg_t;
+
+/** User roam cache support */
+#define WL_USER_ROAM_CACHE_VER_1 1u
+#define WL_USER_ROAM_CACHE_VER WL_USER_ROAM_CACHE_VER_1
+
+#define WL_USER_ROAM_CACHE_GET 0u /**< Read back the chanspec[s] */
+#define WL_USER_ROAM_CACHE_ADD 1u /**< Add chanspec[s] */
+#define WL_USER_ROAM_CACHE_DEL 2u /**< Delete chanspec[s] */
+#define WL_USER_ROAM_CACHE_CLR 3u /**< Delete all chanspec[s] */
+#define WL_USER_ROAM_CACHE_OVERRIDE 4u /**< Set to use roam cached chanspec only */
+
+typedef struct wl_user_roamcache {
+ uint16 version;
+ uint16 length; /**< Total length including version and length */
+ uint32 subcmd; /**< Sub-command for chanspec add/rel etc.. */
+ union {
+ uint32 val; /**< Command value when applicable */
+ struct {
+ uint16 num_ch; /**< Number of chanspecs in the following array */
+ chanspec_t chanspecs[];
+ } chlist;
+ } u;
+} wl_user_roamcache_t;
+
+/** Multiple roaming profile suport */
+#define WL_MAX_ROAM_PROF_BRACKETS 4
+
+#define WL_ROAM_PROF_VER_0 0
+#define WL_ROAM_PROF_VER_1 1
+#define WL_ROAM_PROF_VER_2 2
+#define WL_ROAM_PROF_VER_3 3
+
+#define WL_MAX_ROAM_PROF_VER WL_ROAM_PROF_VER_1
+
+#define WL_ROAM_PROF_NONE (0 << 0)
+#define WL_ROAM_PROF_LAZY (1 << 0)
+#define WL_ROAM_PROF_NO_CI (1 << 1)
+#define WL_ROAM_PROF_SUSPEND (1 << 2)
+#define WL_ROAM_PROF_EXTSCAN (1 << 3)
+#define WL_ROAM_PROF_SYNC_DTIM (1 << 6)
+#define WL_ROAM_PROF_DEFAULT (1 << 7) /**< backward compatible single default profile */
+
+#define WL_FACTOR_TABLE_MAX_LIMIT 5
+
+#define WL_CU_2G_ROAM_TRIGGER (-60)
+#define WL_CU_5G_ROAM_TRIGGER (-70)
+
+#define WL_CU_SCORE_DELTA_DEFAULT 20
+
+#define WL_MAX_CHANNEL_USAGE 0x0FF
+#define WL_CU_PERCENTAGE_DISABLE 0
+#define WL_CU_PERCENTAGE_DEFAULT 70
+#define WL_CU_PERCENTAGE_MAX 100
+#define WL_CU_CALC_DURATION_DEFAULT 10 /* seconds */
+#define WL_CU_CALC_DURATION_MAX 60 /* seconds */
+
+#define WL_ESTM_LOW_TRIGGER_DISABLE 0
+#define WL_ESTM_LOW_TRIGGER_DEFAULT 5 /* Mbps */
+#define WL_ESTM_LOW_TRIGGER_MAX 250 /* Mbps */
+#define WL_ESTM_ROAM_DELTA_DEFAULT 10
+
+typedef struct wl_roam_prof_v4 {
+ uint8 roam_flags; /**< bit flags */
+ int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
+ int8 rssi_lower;
+ int8 roam_delta;
+
+ /* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+ /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+ int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */
+ int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */
+ uint16 nfscan; /**< number of full scan to start with */
+ uint16 fullscan_period;
+ uint16 init_scan_period;
+ uint16 backoff_multiplier;
+ uint16 max_scan_period;
+ uint8 channel_usage;
+ uint8 cu_avg_calc_dur;
+ uint16 estm_low_trigger; /**< ESTM low throughput roam trigger */
+ int8 estm_roam_delta; /**< ESTM low throughput roam delta */
+ int8 pad[3];
+ uint16 lp_roamscan_period;
+ uint16 max_fullscan_period;
+} wl_roam_prof_v4_t;
+
+typedef struct wl_roam_prof_v3 {
+ uint8 roam_flags; /**< bit flags */
+ int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
+ int8 rssi_lower;
+ int8 roam_delta;
+
+ /* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+ /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+ int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */
+ int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */
+ uint16 nfscan; /**< number of full scan to start with */
+ uint16 fullscan_period;
+ uint16 init_scan_period;
+ uint16 backoff_multiplier;
+ uint16 max_scan_period;
+ uint8 channel_usage;
+ uint8 cu_avg_calc_dur;
+ uint16 estm_low_trigger; /**< ESTM low throughput roam trigger */
+ int8 estm_roam_delta; /**< ESTM low throughput roam delta */
+ uint8 pad;
+} wl_roam_prof_v3_t;
+
+typedef struct wl_roam_prof_v2 {
+ int8 roam_flags; /**< bit flags */
+ int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
+ int8 rssi_lower;
+ int8 roam_delta;
+
+ /* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+ /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+ int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */
+ int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */
+ uint16 nfscan; /**< number of full scan to start with */
+ uint16 fullscan_period;
+ uint16 init_scan_period;
+ uint16 backoff_multiplier;
+ uint16 max_scan_period;
+ uint8 channel_usage;
+ uint8 cu_avg_calc_dur;
+ uint8 pad[2];
+} wl_roam_prof_v2_t;
+
+typedef struct wl_roam_prof_v1 {
+ int8 roam_flags; /**< bit flags */
+ int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
+ int8 rssi_lower;
+ int8 roam_delta;
+
+ /* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+ /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+ int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */
+ int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */
+ uint16 nfscan; /**< number of full scan to start with */
+ uint16 fullscan_period;
+ uint16 init_scan_period;
+ uint16 backoff_multiplier;
+ uint16 max_scan_period;
+} wl_roam_prof_v1_t;
+
+typedef struct wl_roam_prof_band_v4 {
+ uint32 band; /**< Must be just one band */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ wl_roam_prof_v4_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v4_t;
+
+typedef struct wl_roam_prof_band_v3 {
+ uint32 band; /**< Must be just one band */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ wl_roam_prof_v3_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v3_t;
+
+typedef struct wl_roam_prof_band_v2 {
+ uint32 band; /**< Must be just one band */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ wl_roam_prof_v2_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v2_t;
+
+typedef struct wl_roam_prof_band_v1 {
+ uint32 band; /**< Must be just one band */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ wl_roam_prof_v1_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v1_t;
+
+#define BSS_MAXTABLE_SIZE 10
+#define WNM_BSS_SELECT_FACTOR_VERSION 1
+typedef struct wnm_bss_select_factor_params {
+ uint8 low;
+ uint8 high;
+ uint8 factor;
+ uint8 pad;
+} wnm_bss_select_factor_params_t;
+
+#define WNM_BSS_SELECT_FIXED_SIZE OFFSETOF(wnm_bss_select_factor_cfg_t, params)
+typedef struct wnm_bss_select_factor_cfg {
+ uint8 version;
+ uint8 band;
+ uint16 type;
+ uint16 pad;
+ uint16 count;
+ wnm_bss_select_factor_params_t params[1];
+} wnm_bss_select_factor_cfg_t;
+
+#define WNM_BSS_SELECT_WEIGHT_VERSION 1
+typedef struct wnm_bss_select_weight_cfg {
+ uint8 version;
+ uint8 band;
+ uint16 type;
+ uint16 weight; /* weightage for each type between 0 to 100 */
+} wnm_bss_select_weight_cfg_t;
+
+/* For branches before koala .. wbtext is part
+ * of wnm need to use below type only
+ */
+typedef struct wnm_btm_default_score_cfg {
+ uint32 default_score; /* default score */
+ uint8 band;
+} wnm_btm_default_score_cfg_t;
+
+/* For branches from koala and above .. wbtext is
+ * seperate module..need to use below type only
+ */
+typedef struct wbtext_btm_default_score_cfg {
+ uint32 default_score; /* default score */
+ uint8 band;
+} wbtext_btm_default_score_cfg_t;
+
+#define WNM_BSS_SELECT_TYPE_RSSI 0
+#define WNM_BSS_SELECT_TYPE_CU 1
+#define WNM_BSS_SELECT_TYPE_ESTM_DL 2
+
+#define WNM_BSSLOAD_MONITOR_VERSION 1
+typedef struct wnm_bssload_monitor_cfg {
+ uint8 version;
+ uint8 band;
+ uint8 duration; /* duration between 1 to 20sec */
+} wnm_bssload_monitor_cfg_t;
+
+#define WNM_ROAM_TRIGGER_VERSION 1
+typedef struct wnm_roam_trigger_cfg {
+ uint8 version;
+ uint8 band;
+ uint16 type;
+ int16 trigger; /* trigger for each type in new roam algorithm */
+} wnm_roam_trigger_cfg_t;
+
+/* Data structures for Interface Create/Remove */
+
+#define WL_INTERFACE_CREATE_VER_0 0
+#define WL_INTERFACE_CREATE_VER_1 1
+#define WL_INTERFACE_CREATE_VER_2 2
+#define WL_INTERFACE_CREATE_VER_3 3
+
+/*
+ * The flags filed of the wl_interface_create is designed to be
+ * a Bit Mask. As of now only Bit 0 and Bit 1 are used as mentioned below.
+ * The rest of the bits can be used, incase we have to provide
+ * more information to the dongle
+ */
+
+/*
+ * Bit 0 of flags field is used to inform whether the interface requested to
+ * be created is STA or AP.
+ * 0 - Create a STA interface
+ * 1 - Create an AP interface
+ * NOTE: This Bit 0 is applicable for the WL_INTERFACE_CREATE_VER < 2
+ */
+#define WL_INTERFACE_CREATE_STA (0 << 0)
+#define WL_INTERFACE_CREATE_AP (1 << 0)
+
+/*
+ * From revision >= 2 Bit 0 of flags field will not used be for STA or AP interface creation.
+ * "iftype" field shall be used for identifying the interface type.
+ */
+typedef enum wl_interface_type {
+ WL_INTERFACE_TYPE_STA = 0,
+ WL_INTERFACE_TYPE_AP = 1,
+
+#ifdef WLAWDL
+ WL_INTERFACE_TYPE_AWDL = 2,
+#endif /* WLAWDL */
+
+ WL_INTERFACE_TYPE_NAN = 3,
+ WL_INTERFACE_TYPE_P2P_GO = 4,
+ WL_INTERFACE_TYPE_P2P_GC = 5,
+ WL_INTERFACE_TYPE_P2P_DISC = 6,
+ WL_INTERFACE_TYPE_IBSS = 7,
+ WL_INTERFACE_TYPE_MESH = 8,
+ WL_INTERFACE_TYPE_MAX
+} wl_interface_type_t;
+
+/*
+ * Bit 1 of flags field is used to inform whether MAC is present in the
+ * data structure or not.
+ * 0 - Ignore mac_addr field
+ * 1 - Use the mac_addr field
+ */
+#define WL_INTERFACE_MAC_DONT_USE (0 << 1)
+#define WL_INTERFACE_MAC_USE (1 << 1)
+
+/*
+ * Bit 2 of flags field is used to inform whether core or wlc index
+ * is present in the data structure or not.
+ * 0 - Ignore wlc_index field
+ * 1 - Use the wlc_index field
+ */
+#define WL_INTERFACE_WLC_INDEX_DONT_USE (0 << 2)
+#define WL_INTERFACE_WLC_INDEX_USE (1 << 2)
+
+/*
+ * Bit 3 of flags field is used to create interface on the host requested interface index
+ * 0 - Ignore if_index field
+ * 1 - Use the if_index field
+ */
+#define WL_INTERFACE_IF_INDEX_USE (1 << 3)
+
+/*
+ * Bit 4 of flags field is used to assign BSSID
+ * 0 - Ignore bssid field
+ * 1 - Use the bssid field
+ */
+#define WL_INTERFACE_BSSID_INDEX_USE (1 << 4)
+
+typedef struct wl_interface_create_v0 {
+ uint16 ver; /**< version of this struct */
+ uint32 flags; /**< flags that defines the operation */
+ struct ether_addr mac_addr; /**< Optional Mac address */
+} wl_interface_create_v0_t;
+
+typedef struct wl_interface_create {
+ uint16 ver; /**< version of this struct */
+ uint8 pad1[2]; /**< Padding bytes */
+ uint32 flags; /**< flags that defines the operation */
+ struct ether_addr mac_addr; /**< Optional Mac address */
+ uint8 pad2[2]; /**< Padding bytes */
+ uint32 wlc_index; /**< Optional wlc index */
+} wl_interface_create_v1_t;
+
+typedef struct wl_interface_create_v2 {
+ uint16 ver; /**< version of this struct */
+ uint8 pad1[2]; /**< Padding bytes */
+ uint32 flags; /**< flags that defines the operation */
+ struct ether_addr mac_addr; /**< Optional Mac address */
+ uint8 iftype; /**< Type of interface created */
+ uint8 pad2; /**< Padding bytes */
+ uint32 wlc_index; /**< Optional wlc index */
+} wl_interface_create_v2_t;
+
+typedef struct wl_interface_create_v3 {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length of whole structure including variable length */
+ uint16 fixed_len; /**< Fixed length of this structure excluding data[] */
+ uint8 iftype; /**< Type of interface created */
+ uint8 wlc_index; /**< Optional wlc index */
+ uint32 flags; /**< flags that defines the operation */
+ struct ether_addr mac_addr; /**< Optional Mac address */
+ struct ether_addr bssid; /**< Optional BSSID */
+ uint8 if_index; /**< interface index requested by Host */
+ uint8 pad[3]; /**< Padding bytes to ensure data[] is at 32 bit aligned */
+ uint8 data[]; /**< Optional application/Module specific data */
+} wl_interface_create_v3_t;
+
+#define WL_INTERFACE_INFO_VER_0 0
+#define WL_INTERFACE_INFO_VER_1 1
+#define WL_INTERFACE_INFO_VER_2 2
+
+typedef struct wl_interface_info_v0 {
+ uint16 ver; /**< version of this struct */
+ struct ether_addr mac_addr; /**< MAC address of the interface */
+ char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */
+ uint8 bsscfgidx; /**< source bsscfg index */
+} wl_interface_info_v0_t;
+
+typedef struct wl_interface_info_v1 {
+ uint16 ver; /**< version of this struct */
+ struct ether_addr mac_addr; /**< MAC address of the interface */
+ char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */
+ uint8 bsscfgidx; /**< source bsscfg index */
+ uint8 PAD;
+} wl_interface_info_v1_t;
+
+typedef struct wl_interface_info_v2 {
+ uint16 ver; /**< version of this struct */
+ uint16 length; /**< length of the whole structure */
+ struct ether_addr mac_addr; /**< MAC address of the interface */
+ uint8 bsscfgidx; /**< source bsscfg index */
+ uint8 if_index; /**< Interface index allocated by FW */
+ char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */
+} wl_interface_info_v2_t;
+
+#define PHY_RXIQEST_AVERAGING_DELAY 10
+
+typedef struct wl_iqest_params {
+ uint32 rxiq;
+ uint8 niter;
+ uint8 delay;
+ uint8 PAD[2];
+} wl_iqest_params_t;
+
+typedef struct wl_iqest_sweep_params {
+ wl_iqest_params_t params;
+ uint8 nchannels;
+ uint8 channel[3]; /** variable */
+} wl_iqest_sweep_params_t;
+
+typedef struct wl_iqest_value {
+ uint8 channel;
+ uint8 PAD[3];
+ uint32 rxiq;
+} wl_iqest_value_t;
+
+typedef struct wl_iqest_result {
+ uint8 nvalues;
+ uint8 PAD[3];
+ wl_iqest_value_t value[1];
+} wl_iqest_result_t;
+
+#define WL_PRIO_ROAM_PROF_V1 (1u)
+
+typedef struct wl_prio_roam_prof_v1 {
+ uint16 version; /* Version info */
+ uint16 length; /* byte length of this structure */
+ uint8 prio_roam_mode; /* Roam mode RCC/RCC+Full Scan */
+ uint8 PAD[3];
+} wl_prio_roam_prof_v1_t;
+
+typedef enum wl_prio_roam_mode {
+ PRIO_ROAM_MODE_OFF = 0, /* Prio_Roam feature disable */
+ PRIO_ROAM_MODE_RCC_ONLY = 1, /* Scan RCC list only */
+ PRIO_ROAM_MODE_RCC_FULLSCAN = 2, /* Scan RCC list + Full scan */
+ PRIO_ROAM_MODE_FULLSCAN_ONLY = 3 /* Full Scan only */
+} wl_prio_roam_mode_t;
+
+/* BTCX AIBSS (Oxygen) Status */
+typedef struct wlc_btc_aibss_info {
+ uint32 prev_tsf_l; // Lower 32 bits of last read of TSF
+ uint32 prev_tsf_h; // Higher 32 bits of last read of TSF
+ uint32 last_btinfo; // Last read of BT info
+ uint32 local_btinfo; // Local BT INFO BitMap
+ uint8 bt_out_of_sync_cnt; // BT not in sync with strobe
+ uint8 esco_off_cnt; // Count incremented when ESCO is off
+ uint8 strobe_enabled; // Set only in AIBSS mode
+ uint8 strobe_on; // strobe to BT is on for Oxygen
+ uint8 local_bt_in_sync; // Sync status of local BT when strobe is on
+ uint8 other_bt_in_sync; // Sync state of BT in other devices in AIBSS
+ uint8 local_bt_is_master; // Local BT is master
+ uint8 sco_prot_on; // eSCO Protection on in local device
+ uint8 other_esco_present; // eSCO status in other devices in AIBSS
+ uint8 rx_agg_change; // Indicates Rx Agg size needs to change
+ uint8 rx_agg_modified; // Rx Agg size modified
+ uint8 acl_grant_set; // ACL grants on for speeding up sync
+ uint8 write_ie_err_cnt; // BTCX Ie write error cnt
+ uint8 parse_ie_err_cnt; // BTCX IE parse error cnt
+ uint8 wci2_fail_cnt; // WCI2 init failure cnt
+ uint8 strobe_enable_err_cnt; // Strobe enable err cnt
+ uint8 strobe_init_err_cnt; // Strobe init err cnt
+ uint8 tsf_jump_cnt; // TSF jump cnt
+ uint8 acl_grant_cnt; // ALC grant cnt
+ uint8 pad1;
+ uint16 ibss_tsf_shm; // SHM address of strobe TSF
+ uint16 pad2;
+} wlc_btc_aibss_info_t;
+
+#define WLC_BTC_AIBSS_STATUS_VER 1
+#define WLC_BTC_AIBSS_STATUS_LEN (sizeof(wlc_btc_aibss_status_t) - 2 * (sizeof(uint16)))
+
+typedef struct wlc_btc_aibss_status {
+ uint16 version; // Version #
+ uint16 len; // Length of the structure(excluding len & version)
+ int32 mode; // Current value of btc_mode
+ uint16 bth_period; // bt coex period. read from shm.
+ uint16 agg_off_bm; // AGG OFF BM read from SHM
+ uint8 bth_active; // bt active session
+ uint8 pad[3];
+ wlc_btc_aibss_info_t aibss_info; // Structure definition above
+} wlc_btc_aibss_status_t;
+
+typedef enum {
+ STATE_NONE = 0,
+
+ /* WLAN -> BT */
+ W2B_DATA_SET = 21,
+ B2W_ACK_SET = 22,
+ W2B_DATA_CLEAR = 23,
+ B2W_ACK_CLEAR = 24,
+
+ /* BT -> WLAN */
+ B2W_DATA_SET = 31,
+ W2B_ACK_SET = 32,
+ B2W_DATA_CLEAR = 33,
+ W2B_ACK_CLEAR = 34
+} bwte_gci_intstate_t;
+
+#define WL_BWTE_STATS_VERSION 1 /* version of bwte_stats_t */
+typedef struct {
+ uint32 version;
+
+ bwte_gci_intstate_t inttobt;
+ bwte_gci_intstate_t intfrombt;
+
+ uint32 bt2wl_intrcnt; /* bt->wlan interrrupt count */
+ uint32 wl2bt_intrcnt; /* wlan->bt interrupt count */
+
+ uint32 wl2bt_dset_cnt;
+ uint32 wl2bt_dclear_cnt;
+ uint32 wl2bt_aset_cnt;
+ uint32 wl2bt_aclear_cnt;
+
+ uint32 bt2wl_dset_cnt;
+ uint32 bt2wl_dclear_cnt;
+ uint32 bt2wl_aset_cnt;
+ uint32 bt2wl_aclear_cnt;
+
+ uint32 state_error_1;
+ uint32 state_error_2;
+ uint32 state_error_3;
+ uint32 state_error_4;
+} bwte_stats_t;
+
+#define TBOW_MAX_SSID_LEN 32
+#define TBOW_MAX_PASSPHRASE_LEN 63
+
+#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */
+typedef struct tbow_setup_netinfo {
+ uint32 version;
+ uint8 opmode;
+ uint8 pad;
+ uint8 macaddr[ETHER_ADDR_LEN];
+ uint32 ssid_len;
+ uint8 ssid[TBOW_MAX_SSID_LEN];
+ uint8 passphrase_len;
+ uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN];
+ chanspec_t chanspec;
+ uint8 PAD[2];
+ uint32 channel;
+} tbow_setup_netinfo_t;
+
+typedef enum tbow_ho_opmode {
+ TBOW_HO_MODE_START_GO = 0,
+ TBOW_HO_MODE_START_STA,
+ TBOW_HO_MODE_START_GC,
+ TBOW_HO_MODE_TEST_GO,
+ TBOW_HO_MODE_STOP_GO = 0x10,
+ TBOW_HO_MODE_STOP_STA,
+ TBOW_HO_MODE_STOP_GC,
+ TBOW_HO_MODE_TEARDOWN
+} tbow_ho_opmode_t;
+
+/* Beacon trim feature statistics */
+/* configuration */
+#define BCNTRIMST_PER 0 /* Number of beacons to trim (0: disable) */
+#define BCNTRIMST_TIMEND 1 /* Number of bytes till TIM IE */
+#define BCNTRIMST_TSFLMT 2 /* TSF tolerance value (usecs) */
+/* internal use */
+#define BCNTRIMST_CUR 3 /* PSM's local beacon trim counter */
+#define BCNTRIMST_PREVLEN 4 /* Beacon length excluding the TIM IE */
+#define BCNTRIMST_TIMLEN 5 /* TIM IE Length */
+#define BCNTRIMST_RSSI 6 /* Partial beacon RSSI */
+#define BCNTRIMST_CHAN 7 /* Partial beacon channel */
+/* debug stat (off by default) */
+#define BCNTRIMST_DUR 8 /* RX duration until beacon trimmed */
+#define BCNTRIMST_RXMBSS 9 /* MYBSSID beacon received */
+#define BCNTRIMST_CANTRIM 10 /* # beacons which were trimmed */
+#define BCNTRIMST_LENCHG 11 /* # beacons not trimmed due to length change */
+#define BCNTRIMST_TSFDRF 12 /* # beacons not trimmed due to large TSF delta */
+#define BCNTRIMST_NOTIM 13 /* # beacons not trimmed due to TIM missing */
+
+#define BCNTRIMST_NUM 14
+
+#define WL_BCNTRIM_STATUS_VERSION_1 1
+#define WL_BCNTRIM_STATUS_VERSION_2 2 /* current version of
+ * struct wl_bcntrim_status_v2_t and
+ * struct wl_bcntrim_status_query_v2_t
+ * changes in v2: curr_slice_id also include
+ * beacon offload state
+ */
+
+typedef struct wl_bcntrim_status_query_v1 {
+ uint16 version;
+ uint16 len; /* Total length includes fixed fields */
+ uint8 reset; /* reset after reading the stats */
+ uint8 pad[3]; /* 4-byte alignment */
+} wl_bcntrim_status_query_v1_t;
+
+/* bits for curr_slice_id */
+#define WL_BCNTRIM_CURR_SLICE_ID_MASK 0x0Fu /* bits 0-3 for curr_slice_id */
+#define WL_BCNTRIM_SC_OFFLOAD_ACTIVE_MASK 0x80u /* mask for sc beacon offload */
+#define WL_BCNTRIM_SC_OFFLOAD_ACTIVE_FLAG (1u << 7u) /* MSB of curr_slice_id is used
+ * to indicate if the offload is
+ * currently active or not
+ */
+typedef struct wl_bcntrim_status_v1 {
+ uint16 version;
+ uint16 len; /* Total length includes fixed fields and variable data[] */
+ uint8 curr_slice_id; /* slice index of the interface */
+ uint8 applied_cfg; /* applied bcntrim N threshold */
+ uint8 pad[2]; /* 4-byte alignment */
+ uint32 fw_status; /* Bits representing bcntrim disable reason in FW */
+ uint32 total_disable_dur; /* total duration (msec) bcntrim remains
+ disabled due to FW disable reasons
+ */
+ uint32 data[]; /* variable length data containing stats */
+} wl_bcntrim_status_v1_t;
+
+/* v1 and v2 struct format for query and status are identical */
+typedef wl_bcntrim_status_v1_t wl_bcntrim_status_v2_t;
+typedef wl_bcntrim_status_query_v1_t wl_bcntrim_status_query_v2_t;
+
+#define BCNTRIM_STATS_MAX 10 /* Total stats part of the status data[] */
+
+/* Bits for FW status */
+#define WL_BCNTRIM_DISABLE_HOST 0x1 /* Host disabled bcntrim through bcntrim IOVar */
+#define WL_BCNTRIM_DISABLE_PHY_RATE 0x2 /* bcntrim disabled because beacon rx rate is
+ * higher than phy_rate_thresh
+ */
+#define WL_BCNTRIM_DISABLE_QUIET_IE 0x4 /* bcntrim disable when Quiet IE present */
+#define WL_BCNTRIM_DISABLE_QBSSLOAD_IE 0x8 /* bcntrim disable when QBSS Load IE present */
+#define WL_BCNTRIM_DISABLE_OPERMODE_IE 0x10 /* bcntrim disable when opermode IE is present */
+#define WL_BCNTRIM_DISABLE_CSA_IE 0x20 /* bcntrim dsiable when CSA IE is present */
+#define WL_BCNTRIM_DISABLE_SC_OFFLOAD 0x40 /* bcntrim disable on SC */
+
+#define BCNTRIM_DISABLE_THRESHOLD_TIME 1000 * 10 /* enable bcntrim after a threshold (10sec)
+ * when disabled due to above mentioned IE's
+ */
+#define WL_BCNTRIM_CFG_VERSION_1 1
+/* Common IOVAR struct */
+typedef struct wl_bcntrim_cfg_v1 {
+ uint16 version;
+ uint16 len; /* Total length includes fixed fields and variable data[] */
+ uint16 subcmd_id; /* subcommand id */
+ uint16 pad; /* pad/reserved */
+ uint8 data[]; /* subcommand data; could be empty */
+} wl_bcntrim_cfg_v1_t;
+
+/* subcommands ids */
+enum {
+ WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_THRESH = 0, /* PHY rate threshold above
+ * which bcntrim is not applied
+ */
+ WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1, /* Override bcntrim disable reasons */
+ WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT = 2, /* TSF drift limit to consider bcntrim */
+ WL_BCNTRIM_CFG_SUBCMD_SC_BCNTRIM = 3 /* config bcntrim on SC */
+};
+
+#define BCNTRIM_MAX_PHY_RATE 48 /* in 500Kbps */
+#define BCNTRIM_MAX_TSF_DRIFT 65535 /* in usec */
+#define WL_BCNTRIM_OVERRIDE_DISABLE_MASK \
+ (WL_BCNTRIM_DISABLE_QUIET_IE | WL_BCNTRIM_DISABLE_QBSSLOAD_IE)
+
+/* WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_TRESH */
+typedef struct wl_bcntrim_cfg_phy_rate_thresh {
+ uint32 rate; /* beacon rate (in 500kbps units) */
+} wl_bcntrim_cfg_phy_rate_thresh_t;
+
+/* WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */
+typedef struct wl_bcntrim_cfg_override_disable_mask {
+ uint32 mask; /* bits representing individual disable reason to override */
+} wl_bcntrim_cfg_override_disable_mask_t;
+
+/* WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT */
+typedef struct wl_bcntrim_cfg_tsf_drift_limit {
+ uint16 drift; /* tsf drift limit specified in usec */
+ uint8 pad[2]; /* 4-byte alignment */
+} wl_bcntrim_cfg_tsf_drift_limit_t;
+
+/* WL_BCNTRIM_CFG_SUBCMD_SC_BCNTRIM */
+typedef struct wl_bcntrim_cfg_sc_bcntrim {
+ uint32 sc_config; /* 0 disable or 1 enable sc bcntrim */
+} wl_bcntrim_cfg_sc_bcntrim_t;
+
+/* -------------- TX Power Cap --------------- */
+#define TXPWRCAP_MAX_NUM_CORES 8
+#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2)
+
+#define TXPWRCAP_MAX_NUM_CORES_V3 4
+#define TXPWRCAP_MAX_NUM_ANTENNAS_V3 (TXPWRCAP_MAX_NUM_CORES_V3 * 2)
+
+#define TXPWRCAP_NUM_SUBBANDS 5
+#define TXPWRCAP_MAX_NUM_SUBGRPS 10
+
+/* IOVAR txcapconfig enum's */
+#define TXPWRCAPCONFIG_WCI2 0u
+#define TXPWRCAPCONFIG_HOST 1u
+#define TXPWRCAPCONFIG_WCI2_AND_HOST 2u
+#define TXPWRCAPCONFIG_NONE 0xFFu
+
+/* IOVAR txcapstate enum's */
+#define TXPWRCAPSTATE_LOW_CAP 0
+#define TXPWRCAPSTATE_HIGH_CAP 1
+#define TXPWRCAPSTATE_HOST_LOW_WCI2_LOW_CAP 0
+#define TXPWRCAPSTATE_HOST_LOW_WCI2_HIGH_CAP 1
+#define TXPWRCAPSTATE_HOST_HIGH_WCI2_LOW_CAP 2
+#define TXPWRCAPSTATE_HOST_HIGH_WCI2_HIGH_CAP 3
+
+/* IOVAR txcapconfig and txcapstate structure is shared: SET and GET */
+#define TXPWRCAPCTL_VERSION 2
+#define TXPWRCAPCTL_VERSION_3 3
+
+typedef struct wl_txpwrcap_ctl {
+ uint8 version;
+ uint8 ctl[TXPWRCAP_NUM_SUBBANDS];
+} wl_txpwrcap_ctl_t;
+
+typedef struct wl_txpwrcap_ctl_v3 {
+ uint8 version;
+ uint8 ctl[TXPWRCAP_MAX_NUM_SUBGRPS];
+} wl_txpwrcap_ctl_v3_t;
+
+/* IOVAR txcapdump structure: GET only */
+#define TXPWRCAP_DUMP_VERSION 2
+typedef struct wl_txpwrcap_dump {
+ uint8 version;
+ uint8 pad0;
+ uint8 current_country[2];
+ uint32 current_channel;
+ uint8 config[TXPWRCAP_NUM_SUBBANDS];
+ uint8 state[TXPWRCAP_NUM_SUBBANDS];
+ uint8 high_cap_state_enabled;
+ uint8 wci2_cell_status_last;
+ uint8 download_present;
+ uint8 num_subbands;
+ uint8 num_antennas;
+ uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+ uint8 num_cc_groups;
+ uint8 current_country_cc_group_info_index;
+ int8 low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+ int8 high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+ uint8 PAD[3];
+} wl_txpwrcap_dump_t;
+
+typedef struct wl_txpwrcap_dump_v3 {
+ uint8 version;
+ uint8 pad0;
+ uint8 current_country[2];
+ uint32 current_channel;
+ uint8 config[TXPWRCAP_NUM_SUBBANDS];
+ uint8 state[TXPWRCAP_NUM_SUBBANDS];
+ uint8 high_cap_state_enabled;
+ uint8 wci2_cell_status_last;
+ uint8 download_present;
+ uint8 num_subbands;
+ uint8 num_antennas;
+ uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+ uint8 num_cc_groups;
+ uint8 current_country_cc_group_info_index;
+ uint8 cap_states_per_cc_group;
+ int8 host_low_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+ int8 host_low_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+ int8 host_high_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+ int8 host_high_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+ uint8 PAD[2];
+} wl_txpwrcap_dump_v3_t;
+
+/*
+* Capability flag for wl_txpwrcap_tbl_v2_t and wl_txpwrcap_t
+* The index into pwrs will be: 0: onbody-cck, 1: onbody-ofdm, 2:offbody-cck, 3:offbody-ofdm
+*
+* For 5G power in SDB case as well as for non-SDB case, the value of flag will be: CAP_ONOFF_BODY
+* The index into pwrs will be: 0: onbody, 1: offbody-ofdm
+*/
+
+#define CAP_ONOFF_BODY (0x1) /* on/off body only */
+#define CAP_CCK_OFDM (0x2) /* cck/ofdm capability only */
+#define CAP_LTE_CELL (0x4) /* cell on/off capability; required for iOS builds */
+#define CAP_HEAD_BODY (0x8) /* head/body capability */
+#define CAP_2G_DEPON_5G (0x10) /* 2G pwr caps depend on other slice 5G subband */
+#define CAP_SISO_MIMO (0x20) /* Siso/Mimo Separate Power Caps */
+#define CAP_ANT_TX (0x40) /* Separate Power Caps based on cell ant tx value */
+#define CAP_LTE_PQBIT (0x100u) /* QPBit is enabled */
+#define CAP_ONOFF_BODY_CCK_OFDM (CAP_ONOFF_BODY | CAP_CCK_OFDM)
+#define CAP_TXPWR_ALL (CAP_ONOFF_BODY|CAP_CCK_OFDM|CAP_LTE_CELL|\
+ CAP_SISO_MIMO|CAP_HEAD_BODY|CAP_ANT_TX)
+
+#define TXHDR_SEC_MAX 5u /* Deprecated. Kept till removed in all branches */
+#define TXPWRCAP_MAX_STATES 4u
+#define TXPWRCAP_MAX_STATES_V3 10u
+#define TXPWRCAP_CCKOFDM_ONOFFBODY_MAX_STATES 4u
+#define TXPWRCAP_ONOFFBODY_MAX_STATES 2u
+#define TXPWRCAP_ONOFFCELL_MAX_STATES 2u
+
+#define TXHDR_SEC_NONSDB_MAIN_2G 0
+#define TXHDR_SEC_NONSDB_MAIN_5G 1
+#define TXHDR_SEC_NONSDB_AUX_2G 2
+#define TXHDR_SEC_NONSDB_AUX_5G 3
+#define TXHDR_SEC_SDB_MAIN_2G 4
+#define TXHDR_SEC_SDB_MAIN_5G 5
+#define TXHDR_SEC_SDB_AUX_2G 6
+#define TXHDR_SEC_SDB_AUX_5G 7
+#define TXHDR_MAX_SECTION 8
+
+#define WL_TXPWRCAP_MAX_SLICES 2
+#define WL_TXPWRCAPDUMP_VER 4
+
+#define WL_TXPWRCAP_VERSION_2 2
+#define WL_TXPWRCAP_VERSION_3 3
+
+typedef struct wl_txpwrcap {
+ uint8 capability;
+ uint8 num_cap_states;
+ uint8 section; /* Index from above,eg. TXHDR_SEC_NONSDB */
+ int8 pwrs[][TXPWRCAP_NUM_SUBBANDS][TXPWRCAP_MAX_NUM_CORES];
+} wl_txpwrcap_t;
+
+typedef struct {
+ uint8 capability;
+ uint8 num_cap_states;
+ uint8 num_subgrps;
+ uint8 section; /* Index from above,eg. TXHDR_SEC_NONSDB */
+ int8 pwrs[][TXPWRCAP_MAX_NUM_SUBGRPS][TXPWRCAP_MAX_NUM_ANTENNAS_V3];
+} wl_txpwrcap_v2_t;
+
+#define TXPWRCAP_DUMP_VERSION_4 4u
+#define TXPWRCAP_DUMP_VERSION_5 5u
+#define TXPWRCAP_DUMP_VERSION_6 6u
+
+typedef struct wl_txpwrcap_dump_v4 {
+ uint8 version;
+ uint8 num_pwrcap;
+ uint8 current_country[2];
+ uint32 current_channel;
+ uint8 download_present;
+ uint8 num_cores; /* number cores on slice */
+ uint8 num_cc_groups; /* number cc groups */
+ uint8 current_country_cc_group_info_index;
+ /* first power cap always exist
+ * On main,-non-sdb follows by sdb2g and then sdb5g
+ * On aux slice - aux2g then aux5g.
+ */
+ wl_txpwrcap_t pwrcap; /* first power cap */
+} wl_txpwrcap_dump_v4_t;
+
+typedef struct wl_txpwrcap_dump_v5 {
+ uint8 version;
+ uint8 num_pwrcap;
+ uint8 current_country[2];
+ uint8 current_channel;
+ uint8 high_cap_state_enabled;
+ uint8 reserved[2];
+ uint8 download_present;
+ uint8 num_ants; /* number antenna slice */
+ uint8 num_cc_groups; /* number cc groups */
+ uint8 current_country_cc_group_info_index;
+ uint8 ant_tx; /* current value of ant_tx */
+ uint8 cell_status; /* current value of cell status */
+ int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */
+} wl_txpwrcap_dump_v5_t;
+
+typedef struct wl_txpwrcap_dump_v6 {
+ uint8 version;
+ uint8 num_pwrcap;
+ uint8 current_country[2];
+ uint8 current_channel;
+ uint8 high_cap_state_enabled;
+ uint8 reserved[2];
+ uint8 download_present;
+ uint8 num_ants; /* number antenna slice */
+ uint8 num_cc_groups; /* number cc groups */
+ uint8 current_country_cc_group_info_index;
+ uint8 ant_tx; /* current value of ant_tx */
+ uint8 cell_status; /* current value of cell status */
+ uint16 capability[TXHDR_MAX_SECTION]; /* capabilities */
+ int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */
+} wl_txpwrcap_dump_v6_t;
+
+#define TXCAPINFO_VERSION_1 1
+typedef struct wl_txpwrcap_ccgrp_info {
+ uint8 num_cc;
+ char cc_list[1][2]; /* 2 letters for each country. At least one country */
+} wl_txpwrcap_ccgrp_info_t;
+
+typedef struct {
+ uint16 version;
+ uint16 length; /* length in bytes */
+ uint8 num_ccgrp;
+ /* followed by one or more wl_txpwrcap_ccgrp_info_t */
+ wl_txpwrcap_ccgrp_info_t ccgrp_data[1];
+} wl_txpwrcap_info_t;
+
+typedef struct wl_txpwrcap_tbl {
+ uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+ /* Stores values for valid antennas */
+ int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+ int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+} wl_txpwrcap_tbl_t;
+
+typedef struct wl_txpwrcap_tbl_v2 {
+ uint8 version;
+ uint8 length; /* size of entire structure, including the pwrs */
+ uint8 capability; /* capability bitmap */
+ uint8 num_cores; /* number of cores i.e. entries in each cap state row */
+ /*
+ * pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state.
+ * Each row has up to TXPWRCAP_MAX_NUM_CORES entries - one for each core.
+ */
+ uint8 pwrs[][TXPWRCAP_MAX_NUM_CORES]; /* qdBm units */
+} wl_txpwrcap_tbl_v2_t;
+
+typedef struct wl_txpwrcap_tbl_v3 {
+ uint8 version;
+ uint8 length; /* size of entire structure, including the pwrs */
+ uint8 capability; /* capability bitmap */
+ uint8 num_cores; /* number of cores */
+ uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES_V3];
+ /*
+ * pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state.
+ * Each row has up to TXPWRCAP_MAX_NUM_ANTENNAS entries - for each antenna.
+ * Included in the rows of powers are rows for fail safe.
+ */
+ int8 pwrs[][TXPWRCAP_MAX_NUM_ANTENNAS_V3]; /* qdBm units */
+} wl_txpwrcap_tbl_v3_t;
+
+/* dynamic sar iovar subcommand ids */
+enum {
+ IOV_DYNSAR_MODE = 1,
+ IOV_DYNSAR_PWR_OFF = 2,
+ IOV_DYNSAR_STAT_SUM = 3,
+ IOV_DYNSAR_STAT_DET = 4,
+ IOV_DYNSAR_TS = 5,
+ IOV_DYNSAR_OPT_DUR = 6,
+ IOV_DYNSAR_OPT_TXDC = 7,
+ IOV_DYNSAR_STATUS = 8,
+ IOV_DYNSAR_EVENT = 9,
+ IOV_DYNSAR_VAR = 10,
+ IOV_DYNSAR_SUM_AGG = 11,
+ IOV_DYNSAR_CMD_LAST
+};
+
+/* when subcommand is IOV_DYNSAR_MODE, the mode can be one of the below */
+enum {
+ IOV_DYNSAR_MODE_OFF = 0, /* DSA optimization turned off */
+ IOV_DYNSAR_MODE_PWR = 1, /* DSA Power optimization mode */
+ IOV_DYNSAR_MODE_HBR_NOMUTE = 2, /* DSA Hybrid power and nomute optimization mode */
+
+ IOV_DYNSAR_MODE_MAX
+};
+
+#define DYNSAR_CNT_VERSION_V1 1u
+#define DYNSAR_CNT_VERSION_V2 2u
+#define DYNSAR_STS_OBS_WIN 20u
+#define DYNSAR_MAX_ANT WL_STA_ANT_MAX
+#define DYNSAR_MAX_AGG_IDX (DYNSAR_MAX_ANT << 1u) /* max antenna aggregation index */
+#define DYNSAR_MAC_NUM 2u
+
+/* Error bits */
+#define DYNSAR_NO_TXCAP (1u << 0u)
+#define DYNSAR_NO_CLM (1u << 1u)
+#define DYNSAR_TDMTX_DISABLED (1u << 2u)
+#define DYNSAR_VIOLATION (1u << 3u)
+#define DYNSAR_ANT_NUM_MISMATCH (1u << 4u)
+#define DYNSAR_COUNTRY_DISABLED (1u << 5u)
+
+typedef struct wlc_dynsar_sts_mon_ctr_st {
+ uint32 tx_dur; /* in usec */
+ uint32 tx_dur_raw; /* in usec */
+ uint32 plim_avg; /* In uw. plim averaged over mon win. */
+ uint32 energy; /* pavg * dur in mw * ms */
+ uint32 qsar; /* plim * dur in mw * ms */
+ uint16 fs; /* failsafe duration in usec */
+ uint8 util_hist; /* utilization in past observe sec */
+ uint8 util_pred; /* utilization of past (observe - budget) & predicted budget sec */
+} wlc_dynsar_sts_mon_ctr_t;
+
+typedef struct wlc_dynsar_sts_obs_win {
+ uint8 opt;
+ uint8 valid;
+ uint16 pad;
+ uint32 dur; /* monitor duration in usec */
+ uint64 ts; /* timestamp in usec */
+} wlc_dynsar_sts_obs_win_t;
+
+typedef struct dynsar_agg_entry {
+ uint32 util;
+ uint32 util_sqr;
+ uint32 mean_util;
+ uint32 var;
+} dynsar_agg_ent_t;
+
+typedef struct dynsar_agg_stat {
+ /* variable length */
+ uint16 len; /* length of this structure including data */
+ uint16 num_ent; /* number of entries per aggregated slot */
+ uint16 num_agg; /* number of aggregated slots */
+ uint16 pad; /* pad */
+ uint64 buf[]; /* num_ent entries wlc_dynsar_sts_obs_win_t
+ * followed by num_ent entries dynsar_agg_ent_t
+ */
+} dynsar_agg_stat_t;
+
+/* structure holding dynsar per slice counters that interface to iovar */
+typedef struct dynsar_cnt_v1 {
+ uint16 ver;
+ uint16 len; /* length of this structure */
+ uint8 num_ant; /* num_antennas */
+ uint8 win; /* number of valid entries in the observe window */
+ uint8 slice;
+ uint8 pad; /* num_antennas */
+ uint64 sync_ts; /* time of first mon period collection after last sync */
+ wlc_dynsar_sts_obs_win_t obs[DYNSAR_STS_OBS_WIN];
+ wlc_dynsar_sts_mon_ctr_t mon_ctr[DYNSAR_STS_OBS_WIN][DYNSAR_MAX_ANT];
+} dynsar_cnt_v1_t;
+
+typedef struct dynsar_shared_ant_stats {
+ uint32 tx_dur; /* tx duration */
+ uint8 sar_util; /* sar utilization */
+ uint8 pad[3]; /* pad */
+} dynsar_shared_ant_stats_t;
+
+typedef struct dynsar_unshared_ant_stats {
+ uint32 qsar; /* mw * ms */
+ uint32 energy; /* mw * ms */
+ uint32 tx_dur; /* tx duration */
+} dynsar_unshared_ant_stats_t;
+
+typedef struct dynsar_sum_v1 {
+ uint16 ver;
+ uint16 len; /* length of this structure */
+ uint32 dur; /* duration in us */
+ uint64 ts; /* time stamp of report in us */
+ uint64 sync_ts; /* time of first mon period collection after last sync */
+ uint8 slice;
+ uint8 num_ant;
+ uint8 opt;
+ uint8 sync;
+ /* per antenna counters aggregated if shared between radios */
+ struct {
+ uint32 tx_dur; /* tx duration */
+ uint8 sar_util; /* sar utilization */
+ uint8 PAD[3]; /* pad */
+ } shared[DYNSAR_MAX_ANT];
+
+ /* per antenna counters not aggregated between radios */
+ struct {
+ uint32 qsar; /* mw * ms */
+ uint32 energy; /* mw * ms */
+ } unshared[DYNSAR_MAX_ANT];
+} dynsar_sum_v1_t;
+
+typedef struct dynsar_sum_v2 {
+ uint16 ver;
+ uint16 len; /* length of this structure */
+ uint32 dur; /* duration in us */
+ uint64 ts; /* time stamp of report in us */
+ uint64 sync_ts; /* time of first mon period collection after last sync */
+ uint8 num_ant; /* max number of antennas between 2 slices */
+ uint8 opt;
+ uint8 sync;
+ uint8 max_mac; /* number of slices */
+ uint8 num_agg; /* number of aggregated antennas */
+ uint8 offset_shared; /* offset from beginning of structure to shared antenna data */
+ uint8 offset_unshared; /* offset from beginning of structure to unshared antenna data */
+ uint8 pad;
+ /* Variable length data sections follow as per above offsets:
+ * dynsar_unshared_ant_stats_t [max_mac][num_ant]
+ * dynsar_shared_ant_stats_t [num_agg]
+ */
+} dynsar_sum_v2_t;
+
+typedef struct dynsar_status {
+ uint16 ver;
+ uint16 len; /* length of this structure */
+ uint8 slice; /* slice number */
+ uint8 mode; /* optimization mode */
+ uint8 util_thrhd; /* utilization threshold */
+ uint8 opt_txdc; /* txdc prediction percentage */
+ uint8 opt_dur; /* optimization prediction duration */
+ uint8 event; /* if wl event is configured */
+ uint8 time_sync; /* if gpio pulse is configured */
+ uint8 power_off; /* power offset in db */
+ uint8 num_ant; /* num antenna */
+ uint8 status; /* status bitmap. e.g. WL_DYNSAR_STS_PWR_OPT.
+ * These are same as status field in wl_event
+ */
+ uint8 error; /* error bits */
+ uint8 gpio_pin; /* gpio pin */
+ /* aggregation index array of num_ant entries */
+ uint8 agg[]; /* aggregation indices */
+} dynsar_status_t;
+
+typedef struct dynsar_var_info {
+ uint lim; /* variance limit */
+ uint off; /* hysterysis offset applied to variance while optimized */
+} dynsar_var_info_t;
+
+typedef struct dynsar_status_v2 {
+ uint16 ver;
+ uint16 len; /* length of this structure */
+ uint8 slice; /* slice number */
+ uint8 mode; /* optimization mode */
+ uint8 util_thrhd; /* utilization threshold */
+ uint8 opt_txdc; /* txdc prediction percentage */
+ uint8 opt_dur; /* optimization prediction duration */
+ uint8 event; /* if wl event is configured */
+ uint8 time_sync; /* if gpio pulse is configured */
+ uint8 power_off; /* power offset in db */
+ uint8 num_ant; /* num antenna */
+ uint8 status; /* status bitmap. e.g. WL_DYNSAR_STS_PWR_OPT.
+ * These are same as status field in wl_event
+ */
+ uint8 error; /* error bits */
+ uint8 gpio_pin; /* gpio pin */
+ dynsar_var_info_t var; /* variance information */
+ /* aggregation index array of num_ant entries */
+ uint8 agg[]; /* aggregation indices */
+} dynsar_status_v2_t;
+
+typedef struct wl_dynsar_ioc {
+ uint16 id; /* ID of the sub-command */
+ uint16 len; /* total length of all data[] */
+ union { /* var len payload */
+ uint8 cnt;
+ dynsar_cnt_v1_t det;
+ dynsar_agg_stat_t agg_stat;
+ dynsar_sum_v1_t sum;
+ dynsar_sum_v2_t sumv2;
+ dynsar_status_t status;
+ dynsar_status_v2_t statusv2;
+ dynsar_var_info_t var;
+ } data;
+} wl_dynsar_ioc_t;
+
+typedef struct wlc_dynsar_status {
+ uint16 ver;
+ uint16 len; /* length of this structure */
+} wl_dynsar_status_t;
+
+/* ##### Ecounters section ##### */
+#define ECOUNTERS_VERSION_1 1
+
+/* Input structure for ecounters IOVAR */
+typedef struct ecounters_config_request {
+ uint16 version; /* config version */
+ uint16 set; /* Set where data will go. */
+ uint16 size; /* Size of the set. */
+ uint16 timeout; /* timeout in seconds. */
+ uint16 num_events; /* Number of events to report. */
+ uint16 ntypes; /* Number of entries in type array. */
+ uint16 type[1]; /* Statistics Types (tags) to retrieve. */
+} ecounters_config_request_t;
+
+#define ECOUNTERS_EVENTMSGS_VERSION_1 1
+#define ECOUNTERS_TRIGGER_CONFIG_VERSION_1 1
+
+#define ECOUNTERS_EVENTMSGS_EXT_MASK_OFFSET \
+ OFFSETOF(ecounters_eventmsgs_ext_t, mask[0])
+
+#define ECOUNTERS_TRIG_CONFIG_TYPE_OFFSET \
+ OFFSETOF(ecounters_trigger_config_t, type[0])
+
+typedef struct ecounters_eventmsgs_ext {
+ uint8 version;
+ uint8 len;
+ uint8 mask[1];
+} ecounters_eventmsgs_ext_t;
+
+typedef struct ecounters_trigger_config {
+ uint16 version; /* version */
+ uint16 set; /* set where data should go */
+ uint16 rsvd; /* reserved */
+ uint16 pad; /* pad/reserved */
+ uint16 ntypes; /* number of types/tags */
+ uint16 type[1]; /* list of types */
+} ecounters_trigger_config_t;
+
+#define ECOUNTERS_TRIGGER_REASON_VERSION_1 1
+typedef enum {
+ /* Triggered due to timer based ecounters */
+ ECOUNTERS_TRIGGER_REASON_TIMER = 0,
+ /* Triggered due to event based configuration */
+ ECOUNTERS_TRIGGER_REASON_EVENTS = 1,
+ ECOUNTERS_TRIGGER_REASON_D2H_EVENTS = 2,
+ ECOUNTERS_TRIGGER_REASON_H2D_EVENTS = 3,
+ ECOUNTERS_TRIGGER_REASON_USER_EVENTS = 4,
+ ECOUNTERS_TRIGGER_REASON_MAX = 5
+} ecounters_trigger_reasons_list_t;
+
+typedef struct ecounters_trigger_reason {
+ uint16 version; /* version */
+ uint16 trigger_reason; /* trigger reason */
+ uint32 sub_reason_code; /* sub reason code */
+ uint32 trigger_time_now; /* time in ms at trigger */
+ uint32 host_ref_time; /* host ref time */
+} ecounters_trigger_reason_t;
+
+#define WL_LQM_VERSION_1 1
+
+/* For wl_lqm_t flags field */
+#define WL_LQM_CURRENT_BSS_VALID 0x1
+#define WL_LQM_TARGET_BSS_VALID 0x2
+
+#define WL_PERIODIC_COMPACT_CNTRS_VER_1 (1)
+#define WL_PERIODIC_TXBF_CNTRS_VER_1 (1)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ /* taken from wl_wlc_cnt_t */
+ uint32 txfail;
+ /* taken from wl_cnt_ge40mcst_v1_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txback; /**< blockack txcnt */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 txframe; /**< tx data frames */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxframe; /**< rx data frames */
+ uint32 lqcm_report; /**< lqcm metric tx/rx idx */
+ uint32 tx_toss_cnt; /* number of tx packets tossed */
+ uint32 rx_toss_cnt; /* number of rx packets tossed */
+ uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
+ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 txbcnfrm; /**< beacons transmitted */
+} wl_periodic_compact_cntrs_v1_t;
+
+#define WL_PERIODIC_COMPACT_CNTRS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ /* taken from wl_wlc_cnt_t */
+ uint32 txfail;
+ /* taken from wl_cnt_ge40mcst_v1_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txback; /**< blockack txcnt */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 txframe; /**< tx data frames */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxframe; /**< rx data frames */
+ uint32 lqcm_report; /**< lqcm metric tx/rx idx */
+ uint32 tx_toss_cnt; /* number of tx packets tossed */
+ uint32 rx_toss_cnt; /* number of rx packets tossed */
+ uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
+ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 rxretry; /* Number of rx packets received after retry */
+ uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */
+ uint32 chswitch_cnt; /* Number of channel switches */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+} wl_periodic_compact_cntrs_v2_t;
+
+#define WL_PERIODIC_COMPACT_CNTRS_VER_3 (3)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ /* taken from wl_wlc_cnt_t */
+ uint32 txfail;
+ /* taken from wl_cnt_ge40mcst_v1_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txback; /**< blockack txcnt */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 txframe; /**< tx data frames */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxframe; /**< rx data frames */
+ uint32 lqcm_report; /**< lqcm metric tx/rx idx */
+ uint32 tx_toss_cnt; /* number of tx packets tossed */
+ uint32 rx_toss_cnt; /* number of rx packets tossed */
+ uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
+ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 rxretry; /* Number of rx packets received after retry */
+ uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */
+ uint32 chswitch_cnt; /* Number of channel switches */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 rxholes; /* Count of missed packets from peer */
+} wl_periodic_compact_cntrs_v3_t;
+
+#define WL_PERIODIC_COMPACT_CNTRS_VER_4 (4)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ /* taken from wl_wlc_cnt_t */
+ uint32 txfail;
+ /* taken from wl_cnt_ge40mcst_v1_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txback; /**< blockack txcnt */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 txframe; /**< tx data frames */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxframe; /**< rx data frames */
+ uint32 lqcm_report; /**< lqcm metric tx/rx idx */
+ uint32 tx_toss_cnt; /* number of tx packets tossed */
+ uint32 rx_toss_cnt; /* number of rx packets tossed */
+ uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
+ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 rxretry; /* Number of rx packets received after retry */
+ uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */
+ uint32 chswitch_cnt; /* Number of channel switches */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 rxholes; /* Count of missed packets from peer */
+
+ uint32 rxundec; /* Decrypt failures */
+ uint32 rxundec_mcst; /* Decrypt failures multicast */
+ uint16 replay; /* replay failures */
+ uint16 replay_mcst; /* ICV failures */
+
+ uint32 pktfilter_discard; /* Filtered packtets by pkt filter */
+ uint32 pktfilter_forward; /* Forwared packets by pkt filter */
+ uint32 mac_rxfilter; /* Pkts filtered due to class/auth state mismatch */
+
+} wl_periodic_compact_cntrs_v4_t;
+
+#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_1 (1)
+typedef struct {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_rand;
+ uint32 he_colormiss_cnt;
+ uint32 he_txmtid_back;
+ uint32 he_rxmtid_back;
+ uint32 he_rxmsta_back;
+ uint32 he_rxtrig_basic;
+ uint32 he_rxtrig_murts;
+ uint32 he_rxtrig_bsrp;
+ uint32 he_rxdlmu;
+ uint32 he_physu_rx;
+ uint32 he_txtbppdu;
+} wl_compact_he_cnt_wlc_v1_t;
+
+#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid;
+ uint32 he_rxtrig_rand;
+ uint32 he_colormiss_cnt;
+ uint32 he_txmampdu;
+ uint32 he_txmtid_back;
+ uint32 he_rxmtid_back;
+ uint32 he_rxmsta_back;
+ uint32 he_txfrag;
+ uint32 he_rxdefrag;
+ uint32 he_txtrig;
+ uint32 he_rxtrig_basic;
+ uint32 he_rxtrig_murts;
+ uint32 he_rxtrig_bsrp;
+ uint32 he_rxhemuppdu_cnt;
+ uint32 he_physu_rx;
+ uint32 he_phyru_rx;
+ uint32 he_txtbppdu;
+ uint32 he_null_tbppdu;
+ uint32 he_rxhesuppdu_cnt;
+ uint32 he_rxhesureppdu_cnt;
+ uint32 he_null_zero_agg;
+ uint32 he_null_bsrp_rsp;
+ uint32 he_null_fifo_empty;
+} wl_compact_he_cnt_wlc_v2_t;
+
+/* for future versions of this data structure, can consider wl_txbf_ecounters_t
+ * which contains the full list of txbf dump counters
+ */
+typedef struct {
+ uint16 version;
+ uint16 coreup;
+ uint32 txndpa;
+ uint32 txndp;
+ uint32 rxsf;
+ uint32 txbfm;
+ uint32 rxndpa_u;
+ uint32 rxndpa_m;
+ uint32 bferpt;
+ uint32 rxbfpoll;
+ uint32 txsf;
+} wl_periodic_txbf_cntrs_v1_t;
+
+typedef struct {
+ struct ether_addr BSSID;
+ chanspec_t chanspec;
+ int32 rssi;
+ int32 snr;
+} wl_rx_signal_metric_t;
+
+typedef struct {
+ uint8 version;
+ uint8 flags;
+ uint16 pad;
+ int32 noise_level; /* current noise level */
+ wl_rx_signal_metric_t current_bss;
+ wl_rx_signal_metric_t target_bss;
+} wl_lqm_t;
+
+#define WL_PERIODIC_IF_STATE_VER_1 (1)
+typedef struct wl_if_state_compact {
+ uint8 version;
+ uint8 assoc_state;
+ uint8 antenna_count; /**< number of valid antenna rssi */
+ int8 noise_level; /**< noise right after tx (in dBm) */
+ int8 snr; /* current noise level */
+ int8 rssi_sum; /**< summed rssi across all antennas */
+ uint16 pad16;
+ int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */
+ struct ether_addr BSSID;
+ chanspec_t chanspec;
+} wl_if_state_compact_t;
+
+#define WL_EVENT_STATISTICS_VER_1 (1)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint32 txdeauthivalclass;
+} wl_event_based_statistics_v1_t;
+
+#define WL_EVENT_STATISTICS_VER_2 (2)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint32 txdeauthivalclass;
+ /* addition for v2 */
+ int32 timestamp; /* last deauth time */
+ struct ether_addr last_deauth; /* wrong deauth MAC */
+ uint16 misdeauth; /* wrong deauth count every 1sec */
+ int16 cur_rssi; /* current bss rssi */
+ int16 deauth_rssi; /* deauth pkt rssi */
+} wl_event_based_statistics_v2_t;
+
+#define WL_EVENT_STATISTICS_VER_3 (3)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint16 PAD;
+ uint32 txdeauthivalclass;
+ /* addition for v2 */
+ int32 timestamp; /* last deauth time */
+ struct ether_addr last_deauth; /* wrong deauth MAC */
+ uint16 misdeauth; /* wrong deauth count every 1sec */
+ int16 cur_rssi; /* current bss rssi */
+ int16 deauth_rssi; /* deauth pkt rssi */
+ /* addition for v3 (roam statistics) */
+ uint32 initial_assoc_time;
+ uint32 prev_roam_time;
+ uint32 last_roam_event_type;
+ uint32 last_roam_event_status;
+ uint32 last_roam_event_reason;
+ uint16 roam_success_cnt;
+ uint16 roam_fail_cnt;
+ uint16 roam_attempt_cnt;
+ uint16 max_roam_target_cnt;
+ uint16 min_roam_target_cnt;
+ uint16 max_cached_ch_cnt;
+ uint16 min_cached_ch_cnt;
+ uint16 partial_roam_scan_cnt;
+ uint16 full_roam_scan_cnt;
+ uint16 most_roam_reason;
+ uint16 most_roam_reason_cnt;
+} wl_event_based_statistics_v3_t;
+
+#define WL_EVENT_STATISTICS_VER_4 (4u)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint16 PAD;
+ uint32 txdeauthivalclass;
+ /* addition for v2 */
+ int32 timestamp; /* last deauth time */
+ struct ether_addr last_deauth; /* wrong deauth MAC */
+ uint16 misdeauth; /* wrong deauth count every 1sec */
+ int16 cur_rssi; /* current bss rssi */
+ int16 deauth_rssi; /* deauth pkt rssi */
+} wl_event_based_statistics_v4_t;
+
+/* ##### SC/ Sc offload/ WBUS related ecounters */
+
+#define WL_SC_PERIODIC_COMPACT_CNTRS_VER_1 (1)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint16 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint16 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint16 rxf1ovfl; /**< number of receive fifo 0 overflows */
+ uint16 rxhlovfl; /**< number of length / header fifo overflows */
+ uint16 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint16 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint16 rxbeaconobss; /**< beacons received from other BSS */
+ uint16 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint16 rxtoolate; /**< receive too late */
+ uint16 chswitch_cnt; /* Number of channel switches */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint16 hibernate_cnt; /* Number of times sc went to hibernate */
+ uint16 awake_cnt; /* Number of times sc awake is called */
+ uint16 sc_up_cnt; /* Number of times sc up/down happened */
+ uint16 sc_down_cnt; /* Number of times sc down happened */
+} wl_sc_periodic_compact_cntrs_v1_t;
+
+#define WL_WBUS_PERIODIC_CNTRS_VER_1 (1)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ uint16 num_register; /* Number of registrations */
+ uint16 num_deregister; /* Number of deregistrations */
+ uint8 num_pending; /* Number of pending non-bt */
+ uint8 num_active; /* Number of active non-bt */
+ uint8 num_bt; /* Number of bt users */
+ uint8 pad1;
+ uint16 num_rej; /* Number of reject */
+ uint16 num_rej_bt; /* Number of rejects for bt */
+ uint16 num_accept_attempt; /* Numbber of accept attempt */
+ uint16 num_accept_ok; /* Number of accept ok */
+} wl_wbus_periodic_cntrs_v1_t;
+
+#define WL_STA_OFLD_CNTRS_VER_1 (1)
+typedef struct {
+ uint16 version;
+ uint16 pad;
+
+ uint16 sc_ofld_enter_cnt;
+ uint16 sc_ofld_exit_cnt;
+ uint16 sc_ofld_wbus_reject_cnt;
+ uint16 sc_ofld_wbus_cb_fail_cnt;
+ uint16 sc_ofld_missed_bcn_cnt;
+ uint8 sc_ofld_last_exit_reason;
+ uint8 sc_ofld_last_enter_fail_reason;
+} wl_sta_ofld_cntrs_v1_t;
+
+/* ##### Ecounters v2 section ##### */
+
+#define ECOUNTERS_VERSION_2 2
+
+/* Enumeration of various ecounters request types. This namespace is different from
+ * global reportable stats namespace.
+*/
+enum {
+ WL_ECOUNTERS_XTLV_REPORT_REQ = 1
+};
+
+/* Input structure for ecounters IOVAR */
+typedef struct ecounters_config_request_v2 {
+ uint16 version; /* config version */
+ uint16 len; /* Length of this struct including variable len */
+ uint16 logset; /* Set where data will go. */
+ uint16 reporting_period; /* reporting_period */
+ uint16 num_reports; /* Number of timer expirations to report on */
+ uint8 pad[2]; /* Reserved for future use */
+ uint8 ecounters_xtlvs[]; /* Statistics Types (tags) to retrieve. */
+} ecounters_config_request_v2_t;
+
+#define ECOUNTERS_STATS_TYPES_FLAG_SLICE 0x1
+#define ECOUNTERS_STATS_TYPES_FLAG_IFACE 0x2
+#define ECOUNTERS_STATS_TYPES_FLAG_GLOBAL 0x4
+#define ECOUNTERS_STATS_TYPES_DEFAULT 0x8
+
+/* Slice mask bits */
+#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE0 0x1u
+#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE1 0x2u
+#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE_SC 0x4u
+
+typedef struct ecounters_stats_types_report_req {
+ /* flags: bit0 = slice, bit1 = iface, bit2 = global,
+ * rest reserved
+ */
+ uint16 flags;
+ uint16 if_index; /* host interface index */
+ uint16 slice_mask; /* bit0 = slice0, bit1=slice1, rest reserved */
+ uint8 pad[2]; /* padding */
+ uint8 stats_types_req[]; /* XTLVs of requested types */
+} ecounters_stats_types_report_req_t;
+
+/* ##### Ecounters_Eventmsgs v2 section ##### */
+
+#define ECOUNTERS_EVENTMSGS_VERSION_2 2
+
+typedef struct event_ecounters_config_request_v2 {
+ uint16 version; /* config version */
+ uint16 len; /* Length of this struct including variable len */
+ uint16 logset; /* Set where data will go. */
+ uint16 event_id; /* Event id for which this config is meant for */
+ uint8 flags; /* Config flags */
+ uint8 pad[3]; /* Reserved for future use */
+ uint8 ecounters_xtlvs[]; /* Statistics Types (tags) to retrieve. */
+} event_ecounters_config_request_v2_t;
+
+#define EVENT_ECOUNTERS_FLAGS_ADD (1 << 0) /* Add configuration for the event_id if set */
+#define EVENT_ECOUNTERS_FLAGS_DEL (1 << 1) /* Delete configuration for event_id if set */
+#define EVENT_ECOUNTERS_FLAGS_ANYIF (1 << 2) /* Interface filtering disable / off bit */
+#define EVENT_ECOUNTERS_FLAGS_BE (1 << 3) /* If cleared report stats of
+ * one event log buffer
+ */
+#define EVENT_ECOUNTERS_FLAGS_DEL_ALL (1 << 4) /* Delete all the configurations of
+ * event ecounters if set
+ */
+
+#define EVENT_ECOUNTERS_FLAGS_BUS (1 << 5) /* Add configuration for the bus events */
+#define EVENT_ECOUNTERS_FLAGS_BUS_H2D (1 << 6) /* Add configuration for the bus direction
+ * 0 - D2H and 1 - H2D
+ */
+
+#define EVENT_ECOUNTERS_FLAGS_DELAYED_FLUSH (1 << 7) /* Flush only when half of the total size
+ * of blocks gets filled. This is to avoid
+ * many interrupts to host.
+ */
+#define EVENT_ECOUNTERS_FLAGS_USER (1 << 6) /* Add configuration for user defined events
+ * Reuse the same flag as H2D
+ */
+
+/* Ecounters suspend resume */
+#define ECOUNTERS_SUSPEND_VERSION_V1 1
+/* To be used in populating suspend_mask and suspend_bitmap */
+#define ECOUNTERS_SUSPEND_TIMER (1 << ECOUNTERS_TRIGGER_REASON_TIMER)
+#define ECOUNTERS_SUSPEND_EVENTS (1 << ECOUNTERS_TRIGGER_REASON_EVENTS)
+
+typedef struct ecounters_suspend {
+ uint16 version;
+ uint16 len;
+ uint32 suspend_bitmap; /* type of ecounter reporting to be suspended */
+ uint32 suspend_mask; /* type of ecounter reporting to be suspended */
+} ecounters_suspend_t;
+
+/* -------------- dynamic BTCOEX --------------- */
+#define DCTL_TROWS 2 /**< currently practical number of rows */
+#define DCTL_TROWS_MAX 4 /**< 2 extra rows RFU */
+/* DYNCTL profile flags */
+#define DCTL_FLAGS_DISABLED 0 /**< default value: all features disabled */
+#define DCTL_FLAGS_DYNCTL (1 << 0) /**< 1 - enabled, 0 - legacy only */
+#define DCTL_FLAGS_DESENSE (1 << 1) /**< auto desense is enabled */
+#define DCTL_FLAGS_MSWITCH (1 << 2) /**< mode switching is enabled */
+#define DCTL_FLAGS_PWRCTRL (1 << 3) /**< Tx power control is enabled */
+/* for now AGG on/off is handled separately */
+#define DCTL_FLAGS_TX_AGG_OFF (1 << 4) /**< TBD: allow TX agg Off */
+#define DCTL_FLAGS_RX_AGG_OFF (1 << 5) /**< TBD: allow RX agg Off */
+/* used for dry run testing only */
+#define DCTL_FLAGS_DRYRUN (1 << 7) /**< Enables dynctl dry run mode */
+#define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0)
+#define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0)
+#define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0)
+#define IS_PWRCTRL_ON(prof) ((prof->flags & DCTL_FLAGS_PWRCTRL) != 0)
+/* desense level currently in use */
+#define DESENSE_OFF 0
+#define DFLT_DESENSE_MID 12
+#define DFLT_DESENSE_HIGH 2
+
+/**
+ * dynctl data points(a set of btpwr & wlrssi thresholds)
+ * for mode & desense switching
+ */
+typedef struct btc_thr_data {
+ int8 mode; /**< used by desense sw */
+ int8 bt_pwr; /**< BT tx power threshold */
+ int8 bt_rssi; /**< BT rssi threshold */
+ /* wl rssi range when mode or desense change may be needed */
+ int8 wl_rssi_high;
+ int8 wl_rssi_low;
+} btc_thr_data_t;
+
+/* dynctl. profile data structure */
+#define DCTL_PROFILE_VER 0x01
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct dctl_prof {
+ uint8 version; /**< dynctl profile version */
+ /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */
+ uint8 flags; /**< bit[6:3] reserved, bit7 - Dryrun (sim) - On */
+ /** wl desense levels to apply */
+ uint8 dflt_dsns_level;
+ uint8 low_dsns_level;
+ uint8 mid_dsns_level;
+ uint8 high_dsns_level;
+ /** mode switching hysteresis in dBm */
+ int8 msw_btrssi_hyster;
+ /** default btcoex mode */
+ uint8 default_btc_mode;
+ /** num of active rows in mode switching table */
+ uint8 msw_rows;
+ /** num of rows in desense table */
+ uint8 dsns_rows;
+ /** dynctl mode switching data table */
+ btc_thr_data_t msw_data[DCTL_TROWS_MAX];
+ /** dynctl desense switching data table */
+ btc_thr_data_t dsns_data[DCTL_TROWS_MAX];
+} BWL_POST_PACKED_STRUCT dctl_prof_t;
+#include <packed_section_end.h>
+
+/** dynctl status info */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct dynctl_status {
+ uint8 sim_on; /**< true if simulation is On */
+ uint16 bt_pwr_shm; /**< BT per/task power as read from ucode */
+ int8 bt_pwr; /**< BT pwr extracted & converted to dBm */
+ int8 bt_rssi; /**< BT rssi in dBm */
+ int8 wl_rssi; /**< last wl rssi reading used by btcoex */
+ uint8 dsns_level; /**< current desense level */
+ uint8 btc_mode; /**< current btcoex mode */
+ /* add more status items if needed, pad to 4 BB if needed */
+} BWL_POST_PACKED_STRUCT dynctl_status_t;
+#include <packed_section_end.h>
+
+/** dynctl simulation (dryrun data) */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim {
+ uint8 sim_on; /**< simulation mode on/off */
+ int8 btpwr; /**< simulated BT power in dBm */
+ int8 btrssi; /**< simulated BT rssi in dBm */
+ int8 wlrssi; /**< simulated WL rssi in dBm */
+} BWL_POST_PACKED_STRUCT dynctl_sim_t;
+/* no default structure packing */
+#include <packed_section_end.h>
+
+/** PTK key maintained per SCB */
+#define RSN_TEMP_ENCR_KEY_LEN 16
+typedef struct wpa_ptk {
+ uint8 kck[RSN_KCK_LENGTH]; /**< EAPOL-Key Key Confirmation Key (KCK) */
+ uint8 kek[RSN_KEK_LENGTH]; /**< EAPOL-Key Key Encryption Key (KEK) */
+ uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 1 (TK1) */
+ uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 2 (TK2) */
+} wpa_ptk_t;
+
+/** GTK key maintained per SCB */
+typedef struct wpa_gtk {
+ uint32 idx;
+ uint32 key_len;
+ uint8 key[DOT11_MAX_KEY_SIZE];
+} wpa_gtk_t;
+
+/** FBT Auth Response Data structure */
+typedef struct wlc_fbt_auth_resp {
+ uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */
+ uint8 pad[2];
+ uint8 pmk_r1_name[WPA2_PMKID_LEN];
+ wpa_ptk_t ptk; /**< pairwise key */
+ wpa_gtk_t gtk; /**< group key */
+ uint32 ie_len;
+ uint8 status; /**< Status of parsing FBT authentication
+ Request in application
+ */
+ uint8 ies[1]; /**< IEs contains MDIE, RSNIE,
+ FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID)
+ */
+} wlc_fbt_auth_resp_t;
+
+/** FBT Action Response frame */
+typedef struct wlc_fbt_action_resp {
+ uint16 version; /**< structure version */
+ uint16 length; /**< length of structure */
+ uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */
+ uint8 data_len; /**< len of ie from Category */
+ uint8 data[1]; /**< data contains category, action, sta address, target ap,
+ status code,fbt response frame body
+ */
+} wlc_fbt_action_resp_t;
+
+#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16
+#define MACDBG_PMAC_OBJ_TYPE_LEN 8
+
+typedef struct _wl_macdbg_pmac_param_t {
+ char type[MACDBG_PMAC_OBJ_TYPE_LEN];
+ uint8 step;
+ uint8 w_en;
+ uint16 num;
+ uint32 bitmap;
+ uint8 addr_raw;
+ uint8 addr_num;
+ uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM];
+ uint8 pad0[2];
+ uint32 w_val;
+} wl_macdbg_pmac_param_t;
+
+/** IOVAR 'svmp_sampcol' parameter. Used to set and read SVMP_SAMPLE_COLLECT's setting */
+typedef struct wl_svmp_sampcol_param {
+ uint32 version; /* version */
+ uint8 enable;
+ uint8 trigger_mode; /* SVMP_SAMPCOL_TRIGGER */
+ uint8 trigger_mode_s[2]; /* SVMP_SAMPCOL_PKTPROC */
+ uint8 data_samplerate; /* SVMP_SAMPCOL_SAMPLERATE */
+ uint8 data_sel_phy1; /* SVMP_SAMPCOL_PHY1MUX */
+ uint8 data_sel_rx1; /* SVMP_SAMPCOL_RX1MUX without iqCompOut */
+ uint8 data_sel_dualcap; /* SVMP_SAMPCOL_RX1MUX */
+ uint8 pack_mode; /* SVMP_SAMPCOL_PACK */
+ uint8 pack_order;
+ uint8 pack_cfix_fmt;
+ uint8 pack_1core_sel;
+ uint16 waitcnt;
+ uint16 caplen;
+ uint32 buff_addr_start; /* in word-size (2-bytes) */
+ uint32 buff_addr_end; /* note: Tcl in byte-size, HW in vector-size (8-bytes) */
+ uint8 int2vasip;
+ uint8 PAD;
+ uint16 status;
+} wl_svmp_sampcol_t;
+
+#define WL_SVMP_SAMPCOL_PARAMS_VERSION 1
+
+enum {
+ SVMP_SAMPCOL_TRIGGER_PKTPROC_TRANSITION = 0,
+ SVMP_SAMPCOL_TRIGGER_FORCE_IMMEDIATE,
+ SVMP_SAMPCOL_TRIGGER_RADAR_DET
+};
+
+enum {
+ SVMP_SAMPCOL_PHY1MUX_GPIOOUT = 0,
+ SVMP_SAMPCOL_PHY1MUX_FFT,
+ SVMP_SAMPCOL_PHY1MUX_DBGHX,
+ SVMP_SAMPCOL_PHY1MUX_RX1MUX
+};
+
+enum {
+ SVMP_SAMPCOL_RX1MUX_FARROWOUT = 4,
+ SVMP_SAMPCOL_RX1MUX_IQCOMPOUT,
+ SVMP_SAMPCOL_RX1MUX_DCFILTEROUT,
+ SVMP_SAMPCOL_RX1MUX_RXFILTEROUT,
+ SVMP_SAMPCOL_RX1MUX_ACIFILTEROUT
+};
+
+enum {
+ SVMP_SAMPCOL_SAMPLERATE_1XBW = 0,
+ SVMP_SAMPCOL_SAMPLERATE_2XBW
+};
+
+enum {
+ SVMP_SAMPCOL_PACK_DUALCAP = 0,
+ SVMP_SAMPCOL_PACK_4CORE,
+ SVMP_SAMPCOL_PACK_2CORE,
+ SVMP_SAMPCOL_PACK_1CORE
+};
+
+enum {
+ SVMP_SAMPCOL_PKTPROC_RESET = 0,
+ SVMP_SAMPCOL_PKTPROC_CARRIER_SEARCH,
+ SVMP_SAMPCOL_PKTPROC_WAIT_FOR_NB_PWR,
+ SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W1_PWR,
+ SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W2_PWR,
+ SVMP_SAMPCOL_PKTPROC_OFDM_PHY,
+ SVMP_SAMPCOL_PKTPROC_TIMING_SEARCH,
+ SVMP_SAMPCOL_PKTPROC_CHAN_EST_1,
+ SVMP_SAMPCOL_PKTPROC_LEG_SIG_DEC,
+ SVMP_SAMPCOL_PKTPROC_SIG_DECODE_1,
+ SVMP_SAMPCOL_PKTPROC_SIG_DECODE_2,
+ SVMP_SAMPCOL_PKTPROC_HT_AGC,
+ SVMP_SAMPCOL_PKTPROC_CHAN_EST_2,
+ SVMP_SAMPCOL_PKTPROC_PAY_DECODE,
+ SVMP_SAMPCOL_PKTPROC_DSSS_CCK_PHY,
+ SVMP_SAMPCOL_PKTPROC_WAIT_ENERGY_DROP,
+ SVMP_SAMPCOL_PKTPROC_WAIT_NCLKS,
+ SVMP_SAMPCOL_PKTPROC_PAY_DEC_EXT,
+ SVMP_SAMPCOL_PKTPROC_SIG_FAIL_DELAY,
+ SVMP_SAMPCOL_PKTPROC_RIFS_SEARCH,
+ SVMP_SAMPCOL_PKTPROC_BOARD_SWITCH_DIV_SEARCH,
+ SVMP_SAMPCOL_PKTPROC_DSSS_CCK_BOARD_SWITCH_DIV_SEARCH,
+ SVMP_SAMPCOL_PKTPROC_CHAN_EST_3,
+ SVMP_SAMPCOL_PKTPROC_CHAN_EST_4,
+ SVMP_SAMPCOL_PKTPROC_FINE_TIMING_SEARCH,
+ SVMP_SAMPCOL_PKTPROC_SET_CLIP_GAIN,
+ SVMP_SAMPCOL_PKTPROC_NAP,
+ SVMP_SAMPCOL_PKTPROC_VHT_SIGA_DEC,
+ SVMP_SAMPCOL_PKTPROC_VHT_SIGB_DEC,
+ SVMP_SAMPCOL_PKTPROC_PKT_ABORT,
+ SVMP_SAMPCOL_PKTPROC_DCCAL
+};
+
+/** IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */
+typedef struct svmp_mem {
+ uint32 addr; /**< offset to read svmp memory from vasip base address */
+ uint16 len; /**< length in count of uint16's */
+ uint16 val; /**< set the range of addr/len with a value */
+} svmp_mem_t;
+
+/** IOVAR 'mu_rate' parameter. read/set mu rate for upto four users */
+#define MU_RATE_CFG_VERSION 1
+typedef struct mu_rate {
+ uint16 version; /**< version of the structure as defined by MU_RATE_CFG_VERSION */
+ uint16 length; /**< length of entire structure */
+ uint8 auto_rate; /**< enable/disable auto rate */
+ uint8 PAD;
+ uint16 rate_user[4]; /**< rate per each of four users, set to -1 for no change */
+} mu_rate_t;
+
+/** IOVAR 'mu_group' parameter. Used to set and read MU group recommendation setting */
+#define WL_MU_GROUP_AUTO_COMMAND -1
+#define WL_MU_GROUP_PARAMS_VERSION 3
+#define WL_MU_GROUP_METHOD_NAMELEN 64
+#define WL_MU_GROUP_NGROUP_MAX 15
+#define WL_MU_GROUP_NUSER_MAX 4
+#define WL_MU_GROUP_METHOD_MIN 0
+#define WL_MU_GROUP_NUMBER_AUTO_MIN 1
+#define WL_MU_GROUP_NUMBER_AUTO_MAX 15
+#define WL_MU_GROUP_NUMBER_FORCED_MAX 8
+#define WL_MU_GROUP_METHOD_OLD 0
+#define WL_MU_GROUP_MODE_AUTO 0
+#define WL_MU_GROUP_MODE_FORCED 1
+#define WL_MU_GROUP_FORCED_1GROUP 1
+#define WL_MU_GROUP_ENTRY_EMPTY -1
+typedef struct mu_group {
+ uint32 version; /* version */
+ int16 forced; /* forced group recommendation */
+ int16 forced_group_mcs; /* forced group with mcs */
+ int16 forced_group_num; /* forced group number */
+ int16 group_option[WL_MU_GROUP_NGROUP_MAX][WL_MU_GROUP_NUSER_MAX];
+ /* set mode for forced grouping and read mode for auto grouping */
+ int16 group_GID[WL_MU_GROUP_NGROUP_MAX];
+ int16 group_method; /* methof for VASIP group recommendation */
+ int16 group_number; /* requested number for VASIP group recommendation */
+ int16 auto_group_num; /* exact number from VASIP group recommendation */
+ int8 group_method_name[WL_MU_GROUP_METHOD_NAMELEN];
+ uint8 PAD[2];
+} mu_group_t;
+
+typedef struct mupkteng_sta {
+ struct ether_addr ea;
+ uint8 PAD[2];
+ int32 nrxchain;
+ int32 idx;
+} mupkteng_sta_t;
+
+typedef struct mupkteng_client {
+ int32 rspec;
+ int32 idx;
+ int32 flen;
+ int32 nframes;
+} mupkteng_client_t;
+
+typedef struct mupkteng_tx {
+ mupkteng_client_t client[8];
+ int32 nclients;
+ int32 ntx;
+} mupkteng_tx_t;
+
+/*
+ * MU Packet engine interface.
+ * The following two definitions will go into
+ * wlioctl_defs.h
+ * when wl utility changes are merged to EAGLE TOB & Trunk
+ */
+
+#define WL_MUPKTENG_PER_TX_START 0x10
+#define WL_MUPKTENG_PER_TX_STOP 0x20
+
+/** IOVAR 'mu_policy' parameter. Used to configure MU admission control policies */
+#define WL_MU_POLICY_PARAMS_VERSION 1
+#define WL_MU_POLICY_SCHED_DEFAULT 60
+#define WL_MU_POLICY_DISABLED 0
+#define WL_MU_POLICY_ENABLED 1
+#define WL_MU_POLICY_NRX_MIN 1
+#define WL_MU_POLICY_NRX_MAX 2
+typedef struct mu_policy {
+ uint16 version;
+ uint16 length;
+ uint32 sched_timer;
+ uint32 pfmon;
+ uint32 pfmon_gpos;
+ uint32 samebw;
+ uint32 nrx;
+ uint32 max_muclients;
+} mu_policy_t;
+
+#define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */
+
+/** Definitions of different NAN Bands */
+/* do not change the order */
+enum {
+ NAN_BAND_B = 0,
+ NAN_BAND_A,
+ NAN_BAND_AUTO,
+ NAN_BAND_INVALID = 0xFF
+};
+
+/* ifdef WL11ULB */
+/* ULB Mode configured via "ulb_mode" IOVAR */
+enum {
+ ULB_MODE_DISABLED = 0,
+ ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */
+ ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */
+ /* Add all other enums before this */
+ MAX_SUPP_ULB_MODES
+};
+
+/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only.
+ * Values of this enumeration are also used to specify 'Current Operational Bandwidth'
+ * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in
+ * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute)
+ */
+typedef enum {
+ ULB_BW_DISABLED = 0,
+ ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */
+ ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */
+ ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */
+ /* Add all other enums before this */
+ MAX_SUPP_ULB_BW
+} ulb_bw_type_t;
+/* endif WL11ULB */
+
+#define WL_MESH_IOCTL_VERSION 1
+#define MESH_IOC_BUFSZ 512 /* sufficient ioc buff size for mesh */
+
+/* container for mesh ioctls & events */
+typedef struct wl_mesh_ioc {
+ uint16 version; /* interface command or event version */
+ uint16 id; /* mesh ioctl cmd ID */
+ uint16 len; /* total length of all tlv records in data[] */
+ uint16 pad; /* pad to be 32 bit aligment */
+ uint8 data[]; /* var len payload of bcm_xtlv_t type */
+} wl_mesh_ioc_t;
+
+enum wl_mesh_cmds {
+ WL_MESH_CMD_ENABLE = 1,
+ WL_MESH_CMD_JOIN = 2,
+ WL_MESH_CMD_PEER_STATUS = 3,
+ WL_MESH_CMD_ADD_ROUTE = 4,
+ WL_MESH_CMD_DEL_ROUTE = 5,
+ WL_MESH_CMD_ADD_FILTER = 6,
+ WL_MESH_CMD_ENAB_AL_METRIC = 7,
+ WL_MESH_CMD_START_AUTOPEER = 8
+};
+
+enum wl_mesh_cmd_xtlv_id {
+ WL_MESH_XTLV_ENABLE = 1,
+ WL_MESH_XTLV_JOIN = 2,
+ WL_MESH_XTLV_STATUS = 3,
+ WL_MESH_XTLV_ADD_ROUTE = 4,
+ WL_MESH_XTLV_DEL_ROUTE = 5,
+ WL_MESH_XTLV_ADD_FILTER = 6,
+ WL_MESH_XTLV_ENAB_AIRLINK = 7,
+ WL_MESH_XTLV_START_AUTOPEER = 8
+};
+/* endif WLMESH */
+
+/* Fast BSS Transition parameter configuration */
+#define FBT_PARAM_CURRENT_VERSION 0
+
+typedef struct _wl_fbt_params {
+ uint16 version; /* version of the structure
+ * as defined by FBT_PARAM_CURRENT_VERSION
+ */
+ uint16 length; /* length of the entire structure */
+
+ uint16 param_type; /* type of parameter defined below */
+ uint16 param_len; /* length of the param_value */
+ uint8 param_value[1]; /* variable length */
+} wl_fbt_params_t;
+
+#define WL_FBT_PARAM_TYPE_RSNIE 0
+#define WL_FBT_PARAM_TYPE_FTIE 0x1
+#define WL_FBT_PARAM_TYPE_SNONCE 0x2
+#define WL_FBT_PARAM_TYPE_MDE 0x3
+#define WL_FBT_PARAM_TYPE_PMK_R0_NAME 0x4
+#define WL_FBT_PARAM_TYPE_R0_KHID 0x5
+#define WL_FBT_PARAM_TYPE_R1_KHID 0x6
+#define WL_FBT_PARAM_TYPE_FIRST_INVALID 0x7
+
+/* Assoc Mgr commands for fine control of assoc */
+#define WL_ASSOC_MGR_CURRENT_VERSION 0x0
+
+typedef struct {
+ uint16 version; /* version of the structure as
+ * defined by WL_ASSOC_MGR_CURRENT_VERSION
+ */
+ uint16 length; /* length of the entire structure */
+
+ uint16 cmd;
+ uint16 params;
+} wl_assoc_mgr_cmd_t;
+
+enum wl_sae_auth_xtlv_id {
+ WL_SAE_AUTH_XTLV_CONTAINER = 0xa1,
+ WL_SAE_AUTH_XTLV_BSSID = 0xa2,
+ WL_SAE_AUTH_XTLV_CYCLIC_GROUP = 0xa3,
+ WL_SAE_AUTH_XTLV_SCALAR = 0xa4,
+ WL_SAE_AUTH_XTLV_ELEMENTS = 0xa5,
+ WL_SAE_AUTH_XTLV_ANTI_CLOGGING = 0xa6,
+ WL_SAE_AUTH_XTLV_SEND_CONFIRM = 0xa7,
+ WL_SAE_AUTH_XTLV_CONFIRM = 0xa8,
+ WL_SAE_AUTH_XTLV_STATUS = 0xa9,
+ WL_SAE_AUTH_XTLV_LAST = 0xac
+};
+
+#define WL_ASSOC_MGR_CMD_PAUSE_ON_EVT 0 /* have assoc pause on certain events */
+#define WL_ASSOC_MGR_CMD_ABORT_ASSOC 1
+#define WL_ASSOC_MGR_CMD_SET_SAE_FRAME 2
+#define WL_ASSOC_MGR_CMD_SEND_AUTH 3
+
+#define WL_ASSOC_MGR_PARAMS_EVENT_NONE 0 /* use this to resume as well as clear */
+#define WL_ASSOC_MGR_PARAMS_PAUSE_EVENT_AUTH_RESP 1
+
+#define WL_WINVER_STRUCT_VER_1 (1)
+
+typedef struct wl_winver {
+
+ /* Version and length of this structure. Length includes all fields in wl_winver_t */
+ uint16 struct_version;
+ uint16 struct_length;
+
+ /* Windows operating system version info (Microsoft provided) */
+ struct {
+ uint32 major_ver;
+ uint32 minor_ver;
+ uint32 build;
+ } os_runtime;
+
+ /* NDIS runtime version (Microsoft provided) */
+ struct {
+ uint16 major_ver;
+ uint16 minor_ver;
+ } ndis_runtime;
+
+ /* NDIS Driver version (Broadcom provided) */
+ struct {
+ uint16 major_ver;
+ uint16 minor_ver;
+ } ndis_driver;
+
+ /* WDI Upper Edge (UE) Driver version (Microsoft provided) */
+ struct {
+ uint8 major_ver;
+ uint8 minor_ver;
+ uint8 suffix;
+ } wdi_ue;
+
+ /* WDI Lower Edge (LE) Driver version (Broadcom provided) */
+ struct {
+ uint8 major_ver;
+ uint8 minor_ver;
+ uint8 suffix;
+ } wdi_le;
+ uint8 PAD[2];
+} wl_winver_t;
+
+/* defined(WLRCC) || defined(ROAM_CHANNEL_CACHE) */
+#define MAX_ROAM_CHANNEL 20
+typedef struct {
+ int32 n;
+ chanspec_t channels[MAX_ROAM_CHANNEL];
+} wl_roam_channel_list_t;
+/* endif RCC || ROAM_CHANNEL_CACHE */
+
+/* values for IOV_MFP arg */
+enum {
+ WL_MFP_NONE = 0,
+ WL_MFP_CAPABLE,
+ WL_MFP_REQUIRED
+};
+
+typedef enum {
+ CHANSW_UNKNOWN = 0, /* channel switch due to unknown reason */
+ CHANSW_SCAN = 1, /* channel switch due to scan */
+ CHANSW_PHYCAL = 2, /* channel switch due to phy calibration */
+ CHANSW_INIT = 3, /* channel set at WLC up time */
+ CHANSW_ASSOC = 4, /* channel switch due to association */
+ CHANSW_ROAM = 5, /* channel switch due to roam */
+ CHANSW_MCHAN = 6, /* channel switch triggered by mchan module */
+ CHANSW_IOVAR = 7, /* channel switch due to IOVAR */
+ CHANSW_CSA_DFS = 8, /* channel switch due to chan switch announcement from AP */
+ CHANSW_APCS = 9, /* Channel switch from AP channel select module */
+
+#ifdef WLAWDL
+ CHANSW_AWDL = 10, /* channel switch due to AWDL */
+#endif /* WLAWDL */
+
+ CHANSW_FBT = 11, /* Channel switch from FBT module for action frame response */
+ CHANSW_UPDBW = 12, /* channel switch at update bandwidth */
+ CHANSW_ULB = 13, /* channel switch at ULB */
+ CHANSW_LAST = 14 /* last channel switch reason */
+} chansw_reason_t;
+
+/*
+ * WOWL unassociated mode power svae pattern.
+ */
+typedef struct wowl_radio_duty_cycle {
+ uint16 wake_interval;
+ uint16 sleep_interval;
+} wowl_radio_duty_cycle_t;
+
+typedef struct nd_ra_ol_limits {
+ uint16 version; /* version of the iovar buffer */
+ uint16 type; /* type of data provided */
+ uint16 length; /* length of the entire structure */
+ uint16 pad1; /* pad union to 4 byte boundary */
+ union {
+ struct {
+ uint16 min_time; /* seconds, min time for RA offload hold */
+ uint16 lifetime_percent;
+ /* percent, lifetime percentage for offload hold time */
+ } lifetime_relative;
+ struct {
+ uint16 hold_time; /* seconds, RA offload hold time */
+ uint16 pad2; /* unused */
+ } fixed;
+ } limits;
+} nd_ra_ol_limits_t;
+
+#define ND_RA_OL_LIMITS_VER 1
+
+/* nd_ra_ol_limits sub-types */
+#define ND_RA_OL_LIMITS_REL_TYPE 0 /* relative, percent of RA lifetime */
+#define ND_RA_OL_LIMITS_FIXED_TYPE 1 /* fixed time */
+
+/* buffer lengths for the different nd_ra_ol_limits types */
+#define ND_RA_OL_LIMITS_REL_TYPE_LEN 12
+#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10
+
+/*
+ * Temperature Throttling control mode
+ */
+typedef struct wl_temp_control {
+ uint8 enable;
+ uint8 PAD;
+ uint16 control_bit;
+} wl_temp_control_t;
+
+/* SensorHub Interworking mode */
+
+#define SHUB_CONTROL_VERSION 1
+#define SHUB_CONTROL_LEN 12
+
+typedef struct {
+ uint16 verison;
+ uint16 length;
+ uint16 cmd;
+ uint16 op_mode;
+ uint16 interval;
+ uint16 enable;
+} shub_control_t;
+
+/* WLC_MAJOR_VER <= 5 */
+/* Data structures for non-TLV format */
+
+/* Data structures for rsdb caps */
+/*
+ * The flags field of the rsdb_caps_response is designed to be
+ * a Bit Mask. As of now only Bit 0 is used as mentioned below.
+ */
+
+/* Bit-0 in flags is used to indicate if the cores can operate synchronously
+* i.e either as 2x2 MIMO or 2(1x1 SISO). This is true only for 4349 variants
+* 0 - device can operate only in rsdb mode (eg: 4364)
+* 1 - device can operate in both rsdb and mimo (eg : 4359 variants)
+*/
+
+#define WL_RSDB_CAPS_VER 2
+#define SYNCHRONOUS_OPERATION_TRUE (1 << 0)
+#define WL_RSDB_CAPS_FIXED_LEN OFFSETOF(rsdb_caps_response_t, num_chains)
+
+typedef struct rsdb_caps_response {
+ uint8 ver; /* Version */
+ uint8 len; /* length of this structure excluding ver and len */
+ uint8 rsdb; /* TRUE for rsdb chip */
+ uint8 num_of_cores; /* no of d11 cores */
+ uint16 flags; /* Flags to indicate various capabilities */
+ uint8 num_chains[1]; /* Tx/Rx chains for each core */
+} rsdb_caps_response_t;
+
+/* Data structures for rsdb bands */
+
+#define WL_RSDB_BANDS_VER 2
+#define WL_RSDB_BANDS_FIXED_LEN OFFSETOF(rsdb_bands_t, band)
+
+typedef struct rsdb_bands
+{
+ uint8 ver;
+ uint8 len;
+ uint16 num_cores; /* num of D11 cores */
+ int16 band[1]; /* The band operating on each of the d11 cores */
+} rsdb_bands_t;
+
+/* rsdb config */
+
+#define WL_RSDB_CONFIG_VER 3
+#define ALLOW_SIB_PARALLEL_SCAN (1 << 0)
+#define MAX_BANDS 2
+
+#define WL_RSDB_CONFIG_LEN sizeof(rsdb_config_t)
+
+typedef uint8 rsdb_opmode_t;
+typedef uint32 rsdb_flags_t;
+
+typedef enum rsdb_modes {
+ WLC_SDB_MODE_NOSDB_MAIN = 1, /* 2X2 or MIMO mode (applicable only for 4355) */
+ WLC_SDB_MODE_NOSDB_AUX = 2,
+ WLC_SDB_MODE_SDB_MAIN = 3, /* This is RSDB mode(default) applicable only for 4364 */
+ WLC_SDB_MODE_SDB_AUX = 4,
+ WLC_SDB_MODE_SDB_AUTO = 5, /* Same as WLC_RSDB_MODE_RSDB(1+1) mode above */
+} rsdb_modes_t;
+
+typedef struct rsdb_config {
+ uint8 ver;
+ uint8 len;
+ uint16 reserved;
+ rsdb_opmode_t non_infra_mode;
+ rsdb_opmode_t infra_mode[MAX_BANDS];
+ rsdb_flags_t flags[MAX_BANDS];
+ rsdb_opmode_t current_mode; /* Valid only in GET, returns the current mode */
+ uint8 pad[3];
+} rsdb_config_t;
+
+/* WLC_MAJOR_VER > =5 */
+/* TLV definitions and data structures for rsdb subcmds */
+
+enum wl_rsdb_cmd_ids {
+ /* RSDB ioctls */
+ WL_RSDB_CMD_VER = 0,
+ WL_RSDB_CMD_CAPS = 1,
+ WL_RSDB_CMD_BANDS = 2,
+ WL_RSDB_CMD_CONFIG = 3,
+ /* Add before this !! */
+ WL_RSDB_CMD_LAST
+};
+#define WL_RSDB_IOV_VERSION 0x1
+
+typedef struct rsdb_caps_response_v1 {
+ uint8 rsdb; /* TRUE for rsdb chip */
+ uint8 num_of_cores; /* no of d11 cores */
+ uint16 flags; /* Flags to indicate various capabilities */
+ uint8 num_chains[MAX_NUM_D11CORES]; /* Tx/Rx chains for each core */
+ uint8 band_cap[MAX_NUM_D11CORES]; /* band cap bitmask per slice */
+} rsdb_caps_response_v1_t;
+
+typedef struct rsdb_bands_v1
+{
+ uint8 num_cores; /* num of D11 cores */
+ uint8 pad; /* padding bytes for 4 byte alignment */
+ int8 band[MAX_NUM_D11CORES]; /* The band operating on each of the d11 cores */
+} rsdb_bands_v1_t;
+
+typedef struct rsdb_config_xtlv {
+ rsdb_opmode_t reserved1; /* Non_infra mode is no more applicable */
+ rsdb_opmode_t infra_mode[MAX_BANDS]; /* Target mode for Infra association */
+ uint8 pad; /* pad bytes for 4 byte alignment */
+ rsdb_flags_t flags[MAX_BANDS];
+ rsdb_opmode_t current_mode; /* GET only; has current mode of operation */
+ uint8 pad1[3];
+} rsdb_config_xtlv_t;
+
+/* Definitions for slot_bss chanseq iovar */
+#define WL_SLOT_BSS_VERSION 1
+
+/* critical slots max size */
+#define WL_SLOTTED_BSS_CS_BMP_CFG_MAX_SZ 128 /* arbitrary */
+
+enum wl_slotted_bss_cmd_id {
+ WL_SLOTTED_BSS_CMD_VER = 0,
+ WL_SLOTTED_BSS_CMD_CHANSEQ = 1,
+ WL_SLOTTED_BSS_CMD_CS_BMP = 2 /* critical slots bitmap */
+};
+
+typedef uint16 chan_seq_type_t;
+enum chan_seq_type {
+ CHAN_SEQ_TYPE_AWDL = 1,
+ CHAN_SEQ_TYPE_SLICE = 2,
+ CHAN_SEQ_TYPE_NAN = 3, /* NAN avail XTLV */
+ CHAN_SEQ_TYPE_NANHO = 4 /* NANHO channel schedule XTLV */
+};
+
+typedef uint8 sched_flag_t;
+enum sched_flag {
+ NO_SDB_SCHED = 0x1,
+ SDB_TDM_SCHED = 0x2,
+ SDB_SPLIT_BAND_SCHED = 0x4, /* default mode for 4357 */
+ MAIN_ONLY = 0x8,
+ AUX_ONLY = 0x10,
+ SDB_DUAL_TIME = (MAIN_ONLY | AUX_ONLY),
+ NO_SDB_MAIN_ONLY = (NO_SDB_SCHED | MAIN_ONLY), /* default mode for 4364 */
+ SDB_TDM_SCHED_MAIN = (SDB_TDM_SCHED | MAIN_ONLY),
+ SDB_TDM_SCHED_AUX = (SDB_TDM_SCHED | AUX_ONLY),
+ SDB_TDM_SCHED_DUAL_TIME = (SDB_TDM_SCHED | SDB_DUAL_TIME),
+ SDB_SPLIT_BAND_SCHED_DUAL_TIME = (SDB_SPLIT_BAND_SCHED | SDB_DUAL_TIME)
+};
+
+typedef struct chan_seq_tlv_data {
+ uint32 flags;
+ uint8 data[1];
+} chan_seq_tlv_data_t;
+
+typedef struct chan_seq_tlv {
+ chan_seq_type_t type;
+ uint16 len;
+ chan_seq_tlv_data_t chanseq_data[1];
+} chan_seq_tlv_t;
+
+typedef struct sb_channel_sequence {
+ sched_flag_t sched_flags; /* (sdb-tdm or sdb-sb or Dual-Time) */
+ uint8 num_seq; /* number of chan_seq_tlv following */
+ uint16 pad;
+ chan_seq_tlv_t seq[1];
+} sb_channel_sequence_t;
+
+typedef struct slice_chan_seq {
+ uint8 slice_index; /* 0(Main) or 1 (Aux) */
+ uint8 num_chanspecs;
+ uint8 dur;
+ uint8 pad;
+ chanspec_t chanspecs[1];
+} slice_chan_seq_t;
+
+#define SLOT_BSS_SLICE_TYPE_DUR_MAX_RANGE 2u
+#define SLOTTED_BSS_AGGR_EN (1 << 0) /* Bitmap of mode */
+#define SLOTTED_BSS_AGGR_LIMIT_DUR (1 << 1) /* Jira 49554 */
+#define SLOTTED_BSS_HE_1024_QAM_SUPPORT (1 << 2) /* MCS10-11 Support */
+
+#define WL_SLICE_CHAN_SEQ_FIXED_LEN OFFSETOF(slice_chan_seq_t, chanspecs)
+/* Definitions for slotted_bss stats */
+#define SBSS_STATS_VERSION 1
+#define SBSS_STATS_CURRENT_VERSION SBSS_STATS_VERSION
+
+#define SBSS_MAX_CHAN_STATS 4
+
+typedef struct sbss_core_stats {
+ uint32 sb_slot_start;
+ uint32 sb_slot_end;
+ uint32 sb_slot_skip;
+ uint32 mismatch_count;
+} sbss_core_stats_t;
+
+typedef struct sbss_chan_stats {
+ chanspec_t chanspec;
+ uint32 slot_start;
+ uint32 slot_end;
+ uint32 slot_skip;
+} sbss_chan_stats_t;
+
+typedef struct sbss_stats_v1 {
+ uint16 version;
+ uint16 length;
+ sbss_core_stats_t corestats[MAX_NUM_D11CORES];
+ sbss_chan_stats_t sbss_chanstats[MAX_NUM_D11CORES][SBSS_MAX_CHAN_STATS];
+} sbss_stats_t;
+
+/* slotted bss critical slots */
+typedef struct wl_sbss_cs_bmp_s {
+ uint8 bitmap_len;
+ uint8 pad[3];
+ uint8 bitmap[];
+} wl_sbss_cs_bmp_t;
+
+typedef struct sim_pm_params {
+ uint32 enabled;
+ uint16 cycle;
+ uint16 up;
+} sim_pm_params_t;
+
+/* Digital napping status */
+#define WL_NAP_STATUS_VERSION_1 1
+typedef struct wl_nap_status_v1 {
+ uint16 version; /* structure version */
+ uint16 len; /* length of returned data */
+ uint16 fw_status; /* bitmask of FW disable reasons */
+ uint8 hw_status; /* bitmask for actual HW state info */
+ uint8 slice_index; /* which slice this represents */
+ uint32 total_disable_dur; /* total time (ms) disabled for fw_status */
+} wl_nap_status_v1_t;
+
+/* Bits for fw_status */
+#define NAP_DISABLED_HOST 0x0001 /* Host has disabled through nap_enable */
+#define NAP_DISABLED_RSSI 0x0002 /* Disabled because of nap_rssi_threshold */
+#define NAP_DISABLED_SCAN 0x0004 /* Disabled because of scan */
+#define NAP_DISABLED_ASSOC 0x0008 /* Disabled because of association */
+#define NAP_DISABLED_LTE 0x0010 /* Disabled because of LTE */
+#define NAP_DISABLED_ACI 0x0020 /* Disabled because of ACI mitigation */
+#define NAP_DISABLED_SEQ_RANGE 0x0040 /* Disabled during SEQ Ranging */
+#define NAP_DISABLED_CHANSWITCH 0x0080 /* Disabled during channel switch */
+
+/* Bits for hw_status */
+#define NAP_HWCFG 0x01 /* State of NAP config bit in phy HW */
+#define NAP_NOCLK 0x80 /* No clock to read HW (e.g. core down) */
+
+/* ifdef WL_NATOE */
+#define WL_NATOE_IOCTL_VERSION 1
+#define WL_NATOE_IOC_BUFSZ 512 /* sufficient ioc buff size for natoe */
+#define WL_NATOE_DBG_STATS_BUFSZ 2048
+#define NATOE_FLAGS_ENAB_MASK 0x1
+#define NATOE_FLAGS_ACTIVE_MASK 0x2
+#define NATOE_FLAGS_PUBNW_MASK 0x4
+#define NATOE_FLAGS_PVTNW_MASK 0x8
+#define NATOE_FLAGS_ENAB_SHFT_MASK 0
+#define NATOE_FLAGS_ACTIVE_SHFT_MASK 1
+#define NATOE_FLAGS_PUBNW_SHFT_MASK 2
+#define NATOE_FLAGS_PVTNW_SHFT_MASK 3
+#define NATOE_FLAGS_PUB_NW_UP (1 << NATOE_FLAGS_PUBNW_SHFT_MASK)
+#define NATOE_FLAGS_PVT_NW_UP (1 << NATOE_FLAGS_PVTNW_SHFT_MASK)
+
+#define PCIE_FRWDPKT_STATS_VERSION 1
+
+/* Module version is 1 for IGUANA */
+#define WL_NATOE_MODULE_VER_1 1
+/* Module version is 2 for Lemur */
+#define WL_NATOE_MODULE_VER_2 2
+
+/* WL_NATOE_CMD_MOD_VER */
+typedef uint16 wl_natoe_ver_t;
+/* config natoe STA and AP IP's structure */
+typedef struct {
+ uint32 sta_ip;
+ uint32 sta_netmask;
+ uint32 sta_router_ip;
+ uint32 sta_dnsip;
+ uint32 ap_ip;
+ uint32 ap_netmask;
+} wl_natoe_config_ips_t;
+
+/* natoe ports config structure */
+typedef struct {
+ uint16 start_port_num;
+ uint16 no_of_ports;
+} wl_natoe_ports_config_t;
+
+/* natoe ports exception info */
+typedef struct {
+ uint16 sta_port_num;
+ uint16 dst_port_num; /* for SIP type protocol, dst_port_num info can be ignored by FW */
+ uint32 ip; /* for SIP ip is APcli_ip and for port clash it is dst_ip */
+ uint8 entry_type; /* Create/Destroy */
+ uint8 pad[3];
+} wl_natoe_exception_port_t;
+
+/* container for natoe ioctls & events */
+typedef struct wl_natoe_ioc {
+ uint16 version; /* interface command or event version */
+ uint16 id; /* natoe ioctl cmd ID */
+ uint16 len; /* total length of all tlv records in data[] */
+ uint16 pad; /* pad to be 32 bit aligment */
+ uint8 data[]; /* var len payload of bcm_xtlv_t type */
+} wl_natoe_ioc_t;
+
+typedef struct wl_natoe_pool_stats_v1 {
+ /* For debug purposes */
+ uint16 poolreorg_cnt;
+ uint16 poolrevert_cnt;
+ uint16 txfrag_state;
+ uint16 rxfrag_state;
+ uint16 txfrag_plen;
+ uint16 rxfrag_plen;
+ uint16 tx_pavail;
+ uint16 rx_pavail;
+ uint16 txmin_bkup_bufs;
+ uint16 rxmin_bkup_bufs;
+ uint16 pktpool_sbuf_alloc;
+ uint16 pktpool_plen;
+ uint16 pktpool_pavail;
+ /* Peak shared buffer count in all iterations */
+ uint16 sbuf_peak;
+ /* Peak shared buffer count in current D3 iteration */
+ uint16 sbuf_peak_cur;
+} wl_natoe_pool_stats_v1_t;
+
+typedef struct wl_natoe_arp_entry_v1 {
+ struct ipv4_addr ip;
+ struct ether_addr mac_addr;
+ uint8 lifetime;
+ uint8 flags;
+} wl_natoe_arp_entry_v1_t;
+
+typedef struct wl_natoe_dbg_arp_tbl_info_v1 {
+ uint8 valid_arp_entries;
+ uint8 PAD[3];
+ wl_natoe_arp_entry_v1_t arp_ent[];
+} wl_natoe_dbg_arp_tbl_info_v1_t;
+
+typedef struct wl_natoe_skip_port_entry_v1 {
+ struct ipv4_addr srcip;
+ uint16 src_port;
+ uint16 lifetime;
+} wl_natoe_skip_port_entry_v1_t;
+
+typedef struct wl_natoe_skip_port_info_v1 {
+ uint8 valid_entries;
+ uint8 PAD[3];
+ wl_natoe_skip_port_entry_v1_t skip_port_ent[];
+} wl_natoe_skip_port_info_v1_t;
+
+typedef struct wl_natoe_dbg_stats_v1 {
+ uint16 active_nat_entries;
+ uint16 active_dns_entries;
+ uint16 active_icmp_entries;
+ uint16 valid_arp_entries;
+ uint16 prev_nat_entries;
+ uint16 prev_dns_entries;
+ uint16 tcp_fast_reclaim_cnt;
+ uint16 mcast_packets;
+ uint16 bcast_packets;
+ uint16 port_commands_rcvd;
+ uint16 unsupported_prot;
+ uint16 arp_req_sent;
+ uint16 arp_rsp_rcvd;
+ uint16 non_ether_frames;
+ uint16 port_alloc_fail;
+ uint16 srcip_tbl_full;
+ uint16 dstip_tbl_full;
+ uint16 nat_tbl_full;
+ uint16 icmp_error_cnt;
+ uint16 pkt_drops_resource;
+ uint32 frwd_nat_pkt_cnt;
+ uint32 reverse_nat_pkt_cnt;
+ uint16 pub_nw_chspec;
+ uint16 pvt_nw_chspec;
+ uint8 pubnw_cfg_idx;
+ uint8 pvtnw_cfg_idx;
+ uint8 pubnw_cfg_ID;
+ uint8 pvtnw_cfg_ID;
+ uint16 natoe_flags;
+} wl_natoe_dbg_stats_v1_t;
+
+typedef struct wl_natoe_exception_port_inf_v1 {
+ uint16 except_bmap_size;
+ uint8 port_except_bmap[];
+} wl_natoe_exception_port_inf_v1_t;
+
+typedef struct wl_natoe_dstnat_entry_v1 {
+ struct ipv4_addr clientip;
+ struct ether_addr client_mac_addr;
+ uint16 client_listenport;
+ uint8 opcode;
+} wl_natoe_dstnat_entry_v1_t;
+
+typedef struct wl_pcie_frwd_stats_v1 {
+ uint16 version;
+ uint16 len;
+ uint16 frwd_txfrag_q_cnt; /* no. of txfrags in frwd_txfrag_list */
+ /* no. of outstanding lbufs in txpath on if0/ifx */
+ uint16 tx_frwd_n_lb_if0;
+ uint16 tx_frwd_n_lb_ifx;
+ /* no. of outstanding lfrags in txpath on if0/ifx */
+ uint16 tx_frwd_n_lf_if0;
+ uint16 tx_frwd_n_lf_ifx;
+ /* no. of pending frwd pkts dropped upon d3 entry */
+ uint16 tx_frwd_d3_drop_cnt;
+ /* Total no. of lbufs frwded in txpath on if0/ifx */
+ uint32 tx_frwd_n_lb_if0_cnt;
+ uint32 tx_frwd_n_lb_ifx_cnt;
+ /* Total no. of lfrags frwded in txpath on if0/ifx */
+ uint32 tx_frwd_n_lf_if0_cnt;
+ uint32 tx_frwd_n_lf_ifx_cnt;
+ uint32 frwd_tx_drop_thr_cnt; /* no. of pkts dropped due to txfrag threshold */
+ uint32 frwd_tx_drop_err_cnt; /* no. of pkts dropped due to txfrags not avail / errors */
+} wl_pcie_frwd_stats_v1_t;
+
+enum wl_natoe_cmds {
+ WL_NATOE_CMD_MOD_VER = 0,
+ WL_NATOE_CMD_ENABLE = 1,
+ WL_NATOE_CMD_CONFIG_IPS = 2,
+ WL_NATOE_CMD_CONFIG_PORTS = 3,
+ WL_NATOE_CMD_DBG_STATS = 4,
+ WL_NATOE_CMD_EXCEPTION_PORT = 5,
+ WL_NATOE_CMD_SKIP_PORT = 6,
+ WL_NATOE_CMD_TBL_CNT = 7,
+ WL_NATOE_CMD_CONFIG_DSTNAT = 8,
+ WL_NATOE_CMD_CTRL = 9
+};
+
+enum wl_natoe_cmd_xtlv_id {
+ WL_NATOE_XTLV_MOD_VER = 0,
+ WL_NATOE_XTLV_ENABLE = 1,
+ WL_NATOE_XTLV_CONFIG_IPS = 2,
+ WL_NATOE_XTLV_CONFIG_PORTS = 3,
+ WL_NATOE_XTLV_DBG_STATS = 4,
+ WL_NATOE_XTLV_EXCEPTION_PORT = 5,
+ WL_NATOE_XTLV_SKIP_PORT = 6,
+ WL_NATOE_XTLV_TBL_CNT = 7,
+ WL_NATOE_XTLV_ARP_TBL = 8,
+ WL_NATOE_XTLV_POOLREORG = 9,
+ WL_NATOE_XTLV_CONFIG_DSTNAT = 10,
+ WL_NATOE_XTLV_CTRL = 11
+};
+
+/* endif WL_NATOE */
+
+enum wl_idauth_cmd_ids {
+ WL_IDAUTH_CMD_CONFIG = 1,
+ WL_IDAUTH_CMD_PEER_INFO = 2,
+ WL_IDAUTH_CMD_COUNTERS = 3,
+ WL_IDAUTH_CMD_LAST
+};
+enum wl_idauth_xtlv_id {
+ WL_IDAUTH_XTLV_AUTH_ENAB = 0x1,
+ WL_IDAUTH_XTLV_GTK_ROTATION = 0x2,
+ WL_IDAUTH_XTLV_EAPOL_COUNT = 0x3,
+ WL_IDAUTH_XTLV_EAPOL_INTRVL = 0x4,
+ WL_IDAUTH_XTLV_BLKLIST_COUNT = 0x5,
+ WL_IDAUTH_XTLV_BLKLIST_AGE = 0x6,
+ WL_IDAUTH_XTLV_PEERS_INFO = 0x7,
+ WL_IDAUTH_XTLV_COUNTERS = 0x8
+};
+enum wl_idauth_stats {
+ WL_AUTH_PEER_STATE_AUTHORISED = 0x01,
+ WL_AUTH_PEER_STATE_BLACKLISTED = 0x02,
+ WL_AUTH_PEER_STATE_4WAY_HS_ONGOING = 0x03,
+ WL_AUTH_PEER_STATE_LAST
+};
+typedef struct {
+ uint16 state; /* Peer State: Authorised or Blacklisted */
+ struct ether_addr peer_addr; /* peer Address */
+ uint32 blklist_end_time; /* Time of blacklist end */
+} auth_peer_t;
+typedef struct wl_idauth_counters {
+ uint32 auth_reqs; /* No of auth req recvd */
+ uint32 mic_fail; /* No of mic fails */
+ uint32 four_way_hs_fail; /* No of 4-way handshake fails */
+} wl_idauth_counters_t;
+
+#define WLC_UTRACE_LEN (1024u * 4u) // default length
+#define WLC_UTRACE_LEN_AUX (1024u * 3u) // reduced length to fit smaller AUX BM
+#define WLC_UTRACE_LEN_SC (1024u * 3u) // reduced length to fit smaller Scan core BM
+
+#define WLC_UTRACE_READ_END 0
+#define WLC_UTRACE_MORE_DATA 1
+
+typedef struct wl_utrace_capture_args_v1 {
+ uint32 length;
+ uint32 flag;
+} wl_utrace_capture_args_v1_t;
+
+#define UTRACE_CAPTURE_VER_2 2
+typedef struct wl_utrace_capture_args_v2 {
+ /* structure control */
+ uint16 version; /**< structure version */
+ uint16 length; /**< length of the response */
+ uint32 flag; /* Indicates if there is more data or not */
+} wl_utrace_capture_args_v2_t;
+
+/* Signal read end. */
+#define WLC_REGVAL_READ_END 0
+/* Signal more data pending. */
+#define WLC_REGVAL_MORE_DATA 1
+/* Internal read state. */
+#define WLC_REGVAL_READ_CONTINUE 2
+
+#define WLC_REGVAL_DUMP_PHYREG 0
+#define WLC_REGVAL_DUMP_RADREG 1
+
+#define PHYREGVAL_CAPTURE_BUFFER_LEN 2048
+
+typedef struct wl_regval_capture_args {
+ uint32 control_flag; /* Carries status information. */
+} wl_regval_capture_args_t;
+
+/* XTLV IDs for the Health Check "hc" iovar top level container */
+enum {
+ WL_HC_XTLV_ID_CAT_HC = 1, /* category for HC as a whole */
+ WL_HC_XTLV_ID_CAT_DATAPATH_TX = 2, /* Datapath Tx */
+ WL_HC_XTLV_ID_CAT_DATAPATH_RX = 3, /* Datapath Rx */
+ WL_HC_XTLV_ID_CAT_SCAN = 4, /* Scan */
+ WL_HC_XTLV_ID_CAT_EVENTMASK = 5, /* Health Check event mask. */
+};
+
+/* Health Check: Common XTLV IDs for sub-elements in the top level container
+ * Number starts at 0x8000 to be out of the way for category specific IDs.
+ */
+enum {
+ WL_HC_XTLV_ID_ERR = 0x8000, /* for sub-command err return */
+ WL_HC_XTLV_ID_IDLIST = 0x8001, /* container for uint16 IDs */
+};
+
+/* Health Check: Datapath TX IDs */
+enum {
+ WL_HC_TX_XTLV_ID_VAL_STALL_THRESHOLD = 1, /* stall_threshold */
+ WL_HC_TX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 2, /* stall_sample_size */
+ WL_HC_TX_XTLV_ID_VAL_STALL_TIMEOUT = 3, /* stall_timeout */
+ WL_HC_TX_XTLV_ID_VAL_STALL_FORCE = 4, /* stall_force */
+ WL_HC_TX_XTLV_ID_VAL_STALL_EXCLUDE = 5, /* stall_exclude */
+ WL_HC_TX_XTLV_ID_VAL_FC_TIMEOUT = 6, /* flow ctl timeout */
+ WL_HC_TX_XTLV_ID_VAL_FC_FORCE = 7, /* flow ctl force failure */
+ WL_HC_TX_XTLV_ID_VAL_DELAY_TO_TRAP = 8, /* delay threshold for forced trap */
+ WL_HC_TX_XTLV_ID_VAL_DELAY_TO_RPT = 9, /* delay threshold for event log report */
+ WL_HC_TX_XTLV_ID_VAL_FAILURE_TO_RPT = 10, /* threshold for consecutive TX failures */
+};
+
+/* Health Check: Datapath RX IDs */
+enum {
+ WL_HC_RX_XTLV_ID_VAL_DMA_STALL_TIMEOUT = 1, /* dma_stall_timeout */
+ WL_HC_RX_XTLV_ID_VAL_DMA_STALL_FORCE = 2, /* dma_stall test trigger */
+ WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD = 3, /* stall_threshold */
+ WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4, /* stall_sample_size */
+ WL_HC_RX_XTLV_ID_VAL_STALL_FORCE = 5, /* stall test trigger */
+ WL_HC_RX_XTLV_ID_VAL_STALL_UC_DECRYPT_FAIL = 6, /* trigger uc decrypt failures */
+ WL_HC_RX_XTLV_ID_VAL_STALL_BCMC_DECRYPT_FAIL = 7, /* trigger bcmc decrypt failures */
+};
+
+/* Health Check: Datapath SCAN IDs */
+enum {
+ WL_HC_XTLV_ID_VAL_SCAN_STALL_THRESHOLD = 1, /* scan stall threshold */
+};
+
+/* Health check: PHY IDs */
+/* Needed for iguana 13.35 branch */
+typedef enum {
+ PHY_HC_DD_ALL = 0,
+ PHY_HC_DD_TEMPSENSE = 1,
+ PHY_HC_DD_VCOCAL = 2,
+ PHY_HC_DD_RX = 3,
+ PHY_HC_DD_TX = 4,
+ PHY_HC_DD_LAST /* This must be the last entry */
+} phy_hc_dd_type_t;
+
+typedef enum {
+ PHY_HC_DD_TEMP_FAIL = 0,
+ PHY_HC_DD_VCO_FAIL = 1,
+ PHY_HC_DD_RXDSN_FAIL = 2,
+ PHY_HC_DD_TXPOW_FAIL = 3,
+ PHY_HC_DD_END /* This must be the last entry */
+} phy_hc_dd_type_v2_t;
+
+/* IDs of Health Check report structures for sub types of health checks within WL */
+typedef enum wl_hc_dd_type {
+ WL_HC_DD_PCIE = 0, /* PCIe */
+ WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */
+ WL_HC_DD_RX_STALL = 2, /* RX stall check */
+ WL_HC_DD_TX_STALL = 3, /* TX stall check */
+ WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */
+ WL_HC_DD_PHY = 5, /* PHY health check */
+ WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */
+ WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */
+ WL_HC_DD_RX_STALL_V2 = 8, /* RX stall check v2 */
+
+#ifdef WLAWDL
+ WL_HC_DD_AWDL = 9, /* AWDL health check */
+#endif /* WLAWDL */
+
+ WL_HC_DD_SBSS =10, /* Slotted bss health check */
+ WL_HC_DD_NAN =11, /* NAN health check */
+ WL_HC_DD_MAX
+} wl_hc_dd_type_t;
+
+/* RX stall reason codes sent with wl_rx_hc_info_v2_t */
+typedef enum bcm_rx_hc_stall_reason {
+ BCM_RX_HC_RESERVED = 0,
+ BCM_RX_HC_UNSPECIFIED = 1, /* All other. Catch all */
+ BCM_RX_HC_UNICAST_DECRYPT_FAIL = 2, /* Unicast decrypt fail */
+ BCM_RX_HC_BCMC_DECRYPT_FAIL = 3, /* BCMC decrypt fail */
+ BCM_RX_HC_UNICAST_REPLAY = 4, /* Unicast replay */
+ BCM_RX_HC_BCMC_REPLAY = 5, /* BCMC replay */
+ BCM_RX_HC_AMPDU_DUP = 6, /* AMPDU DUP */
+ BCM_RX_HC_MAX
+} bcm_rx_hc_stall_reason_t;
+
+/*
+ * Health Check report structures for sub types of health checks within WL
+ */
+
+/* Health Check report structure for Rx DMA Stall check */
+typedef struct {
+ uint16 type;
+ uint16 length;
+ uint16 timeout;
+ uint16 stalled_dma_bitmap;
+} wl_rx_dma_hc_info_t;
+
+/* Health Check report structure for Tx packet failure check */
+typedef struct {
+ uint16 type;
+ uint16 length;
+ uint32 stall_bitmap;
+ uint32 stall_bitmap1;
+ uint32 failure_ac;
+ uint32 threshold;
+ uint32 tx_all;
+ uint32 tx_failure_all;
+} wl_tx_hc_info_t;
+
+/* Health Check report structure for Rx dropped packet failure check */
+typedef struct {
+ uint16 type; /* WL_HC_RX_DD_STALL */
+ uint16 length;
+ uint32 bsscfg_idx;
+ uint32 rx_hc_pkts;
+ uint32 rx_hc_dropped_all;
+ uint32 rx_hc_alert_th;
+} wl_rx_hc_info_t;
+
+/* Health Check report structure for Rx dropped packet failure check */
+typedef struct {
+ uint16 type; /* WL_HC_RX_DD_STALL_V2 */
+ uint16 length;
+ uint8 if_idx; /* interface index on which issue is reported */
+ uint8 ac; /* access category on which this problem is seen */
+ uint8 pad[2]; /* Reserved */
+ uint32 rx_hc_pkts;
+ uint32 rx_hc_dropped_all;
+ uint32 rx_hc_alert_th;
+ uint32 reason; /* refer to bcm_rx_hc_stall_reason_t above */
+ struct ether_addr peer_ea;
+} wl_rx_hc_info_v2_t;
+
+/* HE top level command IDs */
+enum {
+ WL_HE_CMD_ENAB = 0u,
+ WL_HE_CMD_FEATURES = 1u,
+ WL_HE_CMD_TWT_SETUP = 2u,
+ WL_HE_CMD_TWT_TEARDOWN = 3u,
+ WL_HE_CMD_TWT_INFO = 4u,
+ WL_HE_CMD_BSSCOLOR = 5u,
+ WL_HE_CMD_PARTIAL_BSSCOLOR = 6u,
+ WL_HE_CMD_CAP = 7u,
+ WL_HE_CMD_STAID = 8u,
+ WL_HE_CMD_MUEDCA = 9u,
+ WL_HE_CMD_RTSDURTHRESH = 10u,
+ WL_HE_CMD_PEDURATION = 11u,
+ WL_HE_CMD_TESTBED_MODE = 12u,
+ WL_HE_CMD_OMI_CONFIG = 13u,
+ WL_HE_CMD_OMI_STATUS = 14u,
+ WL_HE_CMD_OMI_ULMU_THROTTLE = 15u,
+ WL_HE_CMD_ULMU_DISABLE_POLICY = 16u,
+ WL_HE_CMD_ULMU_DISABLE_STATS = 17u,
+ WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP = 18u,
+ WL_HE_CMD_SR_PROHIBIT = 19u,
+ WL_HE_CMD_LAST
+};
+
+enum {
+ WL_HE_MUEDCA_IE = 0,
+ WL_HE_MUEDCA_SHM = 1,
+ WL_HE_MUEDCA_LAST
+};
+
+#ifdef WL11AX
+
+/* struct for dump MU EDCA IE/SHM paramters */
+typedef struct wl_he_muedca_ie_v1 {
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ uint8 mu_qos_info;
+ he_mu_ac_param_record_t param_ac[AC_COUNT];
+} wl_he_muedca_ie_v1_t;
+
+typedef wl_he_muedca_ie_v1_t wl_he_muedca_ie_t;
+
+#define WL_HE_MUEDCA_VER_1 1
+
+#endif /* WL11AX */
+
+/* TWT top level command IDs */
+enum {
+ WL_TWT_CMD_ENAB = 0,
+ WL_TWT_CMD_SETUP = 1,
+ WL_TWT_CMD_TEARDOWN = 2,
+ WL_TWT_CMD_INFO = 3,
+ WL_TWT_CMD_AUTOSCHED = 4,
+ WL_TWT_CMD_STATS = 5,
+ WL_TWT_CMD_EARLY_TERM_TIME = 6,
+ WL_TWT_CMD_RESP_CONFIG = 7,
+ WL_TWT_CMD_SPPS_ENAB = 8,
+ WL_TWT_CMD_CAP = 9,
+ WL_TWT_CMD_STATUS = 10,
+ WL_TWT_CMD_CONFIG = 11,
+ WL_TWT_CMD_LAST
+};
+
+#define WL_HEB_VER_1 1
+
+/* HEB top level command IDs */
+enum {
+ WL_HEB_CMD_ENAB = 0,
+ WL_HEB_CMD_NUM_HEB = 1,
+ WL_HEB_CMD_COUNTERS = 2,
+ WL_HEB_CMD_CLEAR_COUNTERS = 3,
+ WL_HEB_CMD_CONFIG = 4,
+ WL_HEB_CMD_STATUS = 5,
+ WL_HEB_CMD_LAST
+};
+
+/* HEB counters structures */
+typedef struct wl_heb_int_cnt_v1 {
+ uint16 pre_event;
+ uint16 start_event;
+ uint16 end_event;
+ uint16 missed;
+} wl_heb_int_cnt_v1_t;
+
+typedef struct wl_heb_cnt_v1 {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ wl_heb_int_cnt_v1_t heb_int_cnt[1];
+} wl_heb_cnt_v1_t;
+
+// struct for configuring HEB
+typedef struct wl_config_heb_fill_v1 {
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ uint32 duration;
+ uint32 periodicity;
+ uint16 heb_idx;
+ uint16 preeventtime;
+ uint8 count;
+ uint8 PAD[3];
+} wl_config_heb_fill_v1_t;
+
+typedef struct wl_heb_blk_params_v1 {
+ /* Don't change the order of following elements. This is as per the HEB HW spec */
+ uint32 event_int_val_l;
+ uint32 event_int_val_h;
+ uint32 param2;
+ uint32 param3;
+ uint32 pre_event_intmsk_bmp;
+ uint32 start_event_intmsk_bmp;
+ uint32 end_event_intmsk_bmp;
+ uint32 event_driver_info;
+ uint16 param1;
+ uint8 event_count;
+ uint8 noa_invert;
+} wl_heb_blk_params_v1_t;
+
+typedef struct wl_heb_int_status_v1 {
+ uint32 heb_idx;
+ wl_heb_blk_params_v1_t blk_params;
+} wl_heb_reg_status_v1_t;
+
+typedef struct wl_heb_status_v1 {
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ wl_heb_reg_status_v1_t heb_status[1];
+} wl_heb_status_v1_t;
+
+/* HWA */
+#define WL_HWA_VER_1 1
+
+/* HWA top level command IDs */
+typedef enum wl_hwa_cmd_type {
+ WL_HWA_CMD_ENAB = 0,
+ WL_HWA_CMD_CAPS = 1,
+ WL_HWA_CMD_COUNTERS = 2,
+ WL_HWA_CMD_CLRCNTS = 3,
+ WL_HWA_CMD_REGDUMP = 4,
+ WL_HWA_CMD_INDUCE_ERR = 5,
+ WL_HWA_CMD_LAST
+} wl_hwa_cmd_type_t;
+
+typedef struct wl_hwa_cnts_info_v1 {
+ uint16 cnt_rxs_filter; /* #filters added */
+ uint16 cnt_rxs_chainable; /* #rxchainable matched */
+} wl_hwa_cnts_info_v1_t;
+
+/* HWA dump info structures */
+typedef struct wl_hwa_hwcaps_info_v1 {
+ uint16 up; /* is hwa init'd/deint'd */
+ uint16 corerev; /* hwa core revision */
+ uint32 submodules_mask; /* mask for hwa submodules that are enabled */
+} wl_hwa_hwcaps_info_v1_t;
+
+typedef struct wl_hwa_cnts_v1 {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ wl_hwa_cnts_info_v1_t hwa_cnts_info[]; /* variable length array with hwa counters */
+} wl_hwa_cnts_v1_t;
+
+/* All submodules, order is important and define order of initialization. */
+/* Not use enumeration here because these defines are also used in macro */
+#define HWA_SUBMODULES_COMMON 0 /**< Common */
+#define HWA_SUBMODULES_TXPOST 1u /**< TxPost 3a */
+#define HWA_SUBMODULES_RXPOSTFILL 2u /**< RxPost and Fill 1a/1b */
+#define HWA_SUBMODULES_TXDMA 3u /**< TxDMA 3b */
+#define HWA_SUBMODULES_TXS 4u /**< TxStatus 4a */
+#define HWA_SUBMODULES_BUFMGR 5u /**< Buffer Manager, RX and TX. Do this last */
+#define HWA_SUBMODULES_CPL 6u /**< Completion 2b/4b */
+#define HWA_SUBMODULES_RXS 7u /**< RxStatus 2a */
+#define HWA_SUBMODULES_NUM 8u /**< number of submodules */
+
+#define HWA_SUBMODULES_ALL 0xFF /* Bitmaps for all submodules */
+#ifdef HWA
+#define HWA_SUBMODULE_MASK(submodule) (1u << (submodule))
+#else
+#define HWA_SUBMODULE_MASK(submodule) (0)
+#endif /* HWA */
+/*
+ * NOTES:
+ * wl_twt_sdesc_t is used to support both broadcast TWT and individual TWT.
+ * Value in bit[0:2] in 'flow_id' field is interpreted differently:
+ * - flow id for individual TWT (when WL_TWT_FLOW_FLAG_BROADCAST bit is NOT set
+ * in 'flow_flags' field)
+ * - flow id as defined in Table 8-248l1 for broadcast TWT (when
+ * WL_TWT_FLOW_FLAG_BROADCAST bit is set)
+ * In latter case other bits could be used to differentiate different flows
+ * in order to support multiple broadcast TWTs with the same flow id.
+ */
+
+/* TWT Setup descriptor */
+typedef struct wl_twt_sdesc {
+ /* Setup Command. */
+ uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h */
+ uint8 flow_flags; /* Flow attributes. See WL_TWT_FLOW_FLAG_XXXX below */
+ uint8 flow_id; /* must be between 0 and 7. Set 0xFF for auto assignment */
+ uint8 bid; /* must be between 0 and 31. Set 0xFF for auto assignment */
+ uint8 channel; /* Twt channel - Not used for now */
+ uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */
+ uint8 frame_recomm; /* frame recommendation for broadcast TWTs - Not used for now */
+ uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */
+ uint32 wake_time_h; /* target wake time - BSS TSF (us) */
+ uint32 wake_time_l;
+ uint32 wake_dur; /* target wake duration in unit of microseconds */
+ uint32 wake_int; /* target wake interval */
+ uint32 btwt_persistence; /* Broadcast TWT Persistence */
+ uint32 wake_int_max; /* max wake interval(uS) for TWT */
+ uint8 duty_cycle_min; /* min duty cycle for TWT(Percentage) */
+ uint8 pad;
+ /* deprecated - to be removed */
+ uint16 li;
+} wl_twt_sdesc_t;
+
+#define WL_TWT_SETUP_DESC_VER 1u
+
+/* TWT Setup descriptor (Version controlled) */
+typedef struct wl_twt_sdesc_v1 {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h */
+ uint8 flow_flags; /* Flow attributes. See WL_TWT_FLOW_FLAG_XXXX below */
+ uint8 flow_id; /* must be between 0 and 7. Set 0xFF for auto assignment */
+ uint8 bid; /* must be between 0 and 31. Set 0xFF for auto assignment */
+ uint8 channel; /* Twt channel - Not used for now */
+ uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */
+ uint8 frame_recomm; /* frame recommendation for broadcast TWTs - Not used for now */
+ uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */
+ uint32 wake_time_h; /* target wake time - BSS TSF (us) */
+ uint32 wake_time_l;
+ uint32 wake_dur; /* target wake duration in unit of microseconds */
+ uint32 wake_int; /* target wake interval */
+ uint32 btwt_persistence; /* Broadcast TWT Persistence */
+ uint32 wake_int_max; /* max wake interval(uS) for TWT */
+ uint32 wake_int_min; /* Min. wake interval allowed for TWT Setup */
+ uint32 wake_dur_min; /* Min. wake duration allowed for TWT Setup */
+ uint32 wake_dur_max; /* Max. wake duration allowed for TWT Setup */
+} wl_twt_sdesc_v1_t;
+
+#define WL_TWT_CONFIG_DESC_VER 1u
+
+/* TWT config descriptor */
+typedef struct wl_twt_cdesc {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */
+ uint8 PAD[3];
+ uint32 wake_time_h; /* target wake time - BSS TSF (us) */
+ uint32 wake_time_l;
+ uint32 wake_dur; /* target wake duration in unit of microseconds */
+ uint32 wake_int; /* target wake interval */
+ uint32 wake_int_max; /* max wake interval(uS) for TWT */
+ uint32 wake_int_min; /* Min. wake interval allowed for TWT Setup */
+ uint32 wake_dur_min; /* Min. wake duration allowed for TWT Setup */
+ uint32 wake_dur_max; /* Max. wake duration allowed for TWT Setup */
+ uint32 avg_pkt_num; /* Average Number of Packets per interval */
+} wl_twt_cdesc_t;
+
+/* Flow flags */
+#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1u << 0u)
+#define WL_TWT_FLOW_FLAG_TRIGGER (1u << 1u)
+#define WL_TWT_FLOW_FLAG_REQUEST (1u << 2u)
+#define WL_TWT_FLOW_FLAG_PROTECT (1u << 3u)
+#define WL_TWT_FLOW_FLAG_RESPONDER_PM (1u << 4u)
+#define WL_TWT_FLOW_FLAG_UNSOLICITED (1u << 5u)
+#define WL_TWT_FLOW_FLAG_WAKEDUR_UNIT_1ms (1u << 6u)
+#define WL_TWT_FLOW_FLAG_INFO_FRM_DISABLED (1u << 7u)
+
+/* Deprecated - To be removed */
+#define WL_TWT_FLOW_FLAG_BROADCAST (1u << 5u)
+#define WL_TWT_FLOW_FLAG_IMPLICIT (1u << 7u)
+
+/* Flow id */
+#define WL_TWT_FLOW_ID_FID 0x07u /* flow id */
+#define WL_TWT_FLOW_ID_GID_MASK 0x70u /* group id - broadcast TWT only */
+#define WL_TWT_FLOW_ID_GID_SHIFT 4u
+
+#define WL_TWT_INV_BCAST_ID 0xFFu
+#define WL_TWT_INV_FLOW_ID 0xFFu
+
+/* auto flow_id */
+#define WL_TWT_SETUP_FLOW_ID_AUTO 0xFFu
+/* auto broadcast ID */
+#define WL_TWT_SETUP_BCAST_ID_AUTO 0xFFu
+/* Infinite persistence for broadcast schedule */
+#define WL_TWT_INFINITE_BTWT_PERSIST 0xFFFFFFFFu
+
+/* Maximum individual & broadcast TWT supported */
+#define WL_TWT_MAX_ITWT 4u
+#define WL_TWT_MAX_BTWT 4u
+
+/* should be larger than what chip supports */
+#define WL_TWT_STATS_MAX_BTWT WL_TWT_MAX_BTWT
+#define WL_TWT_STATS_MAX_ITWT WL_TWT_MAX_ITWT
+
+#define WL_TWT_INACTIVE 0u /* Resource is not allotted */
+#define WL_TWT_RESERVED 1u /* Resource is allotted but HEB is not yet programmed */
+#define WL_TWT_ACTIVE 2u /* Resource is allotted and HEB is programmed */
+#define WL_TWT_SUSPEND 3u /* Resource is suspended and HEB released */
+
+/* Wake type */
+/* TODO: not yet finalized */
+#define WL_TWT_TIME_TYPE_BSS 0u /* The time specified in wake_time_h/l is
+ * the BSS TSF time.
+ */
+#define WL_TWT_TIME_TYPE_OFFSET 1u /* The time specified in wake_time_h/l is an offset
+ * of the TSF time when the iovar is processed.
+ */
+#define WL_TWT_TIME_TYPE_AUTO 2u /* The target wake time is chosen internally by the FW */
+
+#define WL_TWT_SETUP_VER 0u
+
+/* HE TWT Setup command */
+typedef struct wl_twt_setup {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ struct ether_addr peer; /* Peer address - leave it all 0s' for AP */
+ uint8 pad[2];
+ wl_twt_sdesc_t desc; /* Setup Descriptor */
+ uint16 dialog; /* Deprecated - to be removed */
+ uint8 pad1[2];
+} wl_twt_setup_t;
+
+#define WL_TWT_CONFIG_VER 0u
+
+/* TWT Config command */
+typedef struct wl_twt_config {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ struct ether_addr peer; /* Peer address. leave it all 0s' for AP */
+ uint8 pad[2];
+ wl_twt_cdesc_t desc; /* Config Descriptor */
+} wl_twt_config_t;
+
+#define WL_TWT_TEARDOWN_VER 0u
+
+/* twt teardown descriptor */
+typedef struct wl_twt_teardesc {
+ uint8 negotiation_type;
+ uint8 flow_id; /* must be between 0 and 7 */
+ uint8 bid; /* must be between 0 and 31 */
+ bool alltwt; /* all twt teardown - 0 or 1 */
+} wl_twt_teardesc_t;
+
+/* HE TWT Teardown command */
+typedef struct wl_twt_teardown {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ /* peer address */
+ struct ether_addr peer; /* leave it all 0s' for AP */
+ wl_twt_teardesc_t teardesc; /* Teardown descriptor */
+
+ /* deprecated - to be removed */
+ uint8 flow_flags;
+ uint8 flow_id;
+ uint8 bid;
+ uint8 pad;
+} wl_twt_teardown_t;
+
+/* twt information descriptor */
+typedef struct wl_twt_infodesc {
+ uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */
+ uint8 flow_id;
+ uint8 pad[2];
+ uint32 next_twt_h;
+ uint32 next_twt_l;
+ /* deprecated - to be removed */
+ uint8 wake_type;
+ uint8 pad1[3];
+} wl_twt_infodesc_t;
+
+/* Flow flags */
+#define WL_TWT_INFO_FLAG_ALL_TWT (1u << 0u) /* All TWT */
+#define WL_TWT_INFO_FLAG_RESUME (1u << 1u) /* 1 is TWT Resume, 0 is TWT Suspend */
+
+/* deprecated - to be removed */
+#define WL_TWT_INFO_FLAG_RESP_REQ (1 << 0) /* Response Requested */
+#define WL_TWT_INFO_FLAG_NEXT_TWT_REQ (1 << 1) /* Next TWT Request */
+#define WL_TWT_INFO_FLAG_BTWT_RESCHED (1 << 2) /* Broadcast Reschedule */
+typedef wl_twt_infodesc_t wl_twt_idesc_t;
+
+#define WL_TWT_INFO_VER 0u
+
+/* HE TWT Information command */
+typedef struct wl_twt_info {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ /* peer address */
+ struct ether_addr peer; /* leave it all 0s' for AP */
+ uint8 pad[2];
+ wl_twt_infodesc_t infodesc; /* information descriptor */
+ /* deprecated - to be removed */
+ wl_twt_idesc_t desc;
+} wl_twt_info_t;
+
+/* wl twt status */
+#define WL_TWT_CMD_STATUS_VERSION_1 1u
+
+#define WL_TWT_STATUS_FLAG_SPPS_ENAB (1u << 0u)
+#define WL_TWT_STATUS_FLAG_WAKE_STATE (1u << 1u)
+#define WL_TWT_STATUS_FLAG_WAKE_OVERRIDE (1u << 2u)
+
+typedef struct wl_twt_status {
+ uint8 state; /* TWT State */
+ uint8 heb_id; /* HEB ID */
+ uint8 PAD[2];
+ struct ether_addr peer;
+ uint8 PAD[2];
+ wl_twt_sdesc_t desc; /* TWT Descriptor */
+} wl_twt_status_t;
+
+/* wl twt status output */
+typedef struct wl_twt_status_v1 {
+ uint16 version;
+ uint16 length;
+ uint8 num_fid; /* Number of individual TWT setup */
+ uint8 num_bid; /* Number of Broadcast TWT setup */
+ uint16 status_flags; /* see WL_TWT_STATUS_FLAGS_XX */
+ wl_twt_status_t itwt_status[WL_TWT_MAX_ITWT];
+ wl_twt_status_t btwt_status[WL_TWT_MAX_BTWT];
+} wl_twt_status_v1_t;
+
+/* wl twt status command input */
+typedef struct wl_twt_status_cmd_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+} wl_twt_status_cmd_v1_t;
+
+#define WL_TWT_PEER_STATS_VERSION_1 1u
+typedef struct wl_twt_peer_stats_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+ uint8 id;
+ uint8 flow_flags;
+ uint8 PAD[2];
+ uint32 sp_seq; /* sequence number of the service period */
+ uint32 tx_ucast_pkts;
+ uint32 tx_pkts_min;
+ uint32 tx_pkts_max;
+ uint32 tx_pkts_avg;
+ uint32 tx_failures;
+ uint32 rx_ucast_pkts;
+ uint32 rx_pkts_min;
+ uint32 rx_pkts_max;
+ uint32 rx_pkts_avg;
+ uint32 rx_pkts_retried;
+} wl_twt_peer_stats_v1_t;
+
+#define WL_TWT_STATS_VERSION_1 1
+typedef struct wl_twt_stats_v1 {
+ uint16 version;
+ uint16 length;
+ uint32 num_stats; /* number of peer stats in the peer_stats_list */
+ wl_twt_peer_stats_v1_t peer_stats_list[];
+} wl_twt_stats_v1_t;
+
+#define WL_TWT_STATS_CMD_VERSION_1 1
+#define WL_TWT_STATS_CMD_FLAGS_RESET (1u << 0u)
+/* HE TWT stats command */
+typedef struct wl_twt_stats_cmd_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+ uint16 flags; /* see WL_TWT_STATS_CMD_FLAGS */
+ uint8 num_fid;
+ uint8 num_bid;
+ uint8 fid_list[WL_TWT_STATS_MAX_ITWT];
+ uint8 bid_list[WL_TWT_STATS_MAX_BTWT];
+} wl_twt_stats_cmd_v1_t;
+
+#define WL_TWT_RESP_CFG_VER 0u
+
+#define WL_TWT_CMD_RESP_CFG_TYPE_ALTERNATE 0u
+#define WL_TWT_CMD_RESP_CFG_TYPE_DICTATE 1u
+/* HE TWT resp command */
+typedef struct wl_twt_resp_cfg {
+ /* structure control */
+ uint16 version; /* Structure version */
+ uint16 length; /* Data length (starting after this field) */
+ uint8 dc_max; /* Max supported duty cycle for single TWT */
+ uint8 resp_type; /* Resp. type(Alt/dict) if duty cycle>max duty cycle */
+} wl_twt_resp_cfg_t;
+
+#define WL_TWT_CAP_CMD_VERSION_1 1u
+
+#define WL_TWT_CAP_FLAGS_REQ_SUPPORT (1u << 0u)
+#define WL_TWT_CAP_FLAGS_RESP_SUPPORT (1u << 1u)
+#define WL_TWT_CAP_FLAGS_BTWT_SUPPORT (1u << 2u)
+#define WL_TWT_CAP_FLAGS_FLEX_SUPPORT (1u << 3u)
+#define WL_TWT_CAP_FLAGS_TWT_REQUIRED (1u << 4u)
+
+/* HE TWT capabilities output */
+typedef struct wl_twt_cap {
+ uint16 version;
+ uint16 length;
+ uint16 device_cap; /* see WL_TWT_CAP_CMD_FLAGS */
+ uint16 peer_cap; /* see WL_TWT_CAP_CMD_FLAGS */
+} wl_twt_cap_t;
+
+/* HE TWT capabilities command input */
+typedef struct wl_twt_cap_cmd {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+} wl_twt_cap_cmd_t;
+
+/* EHT sub command IDs */
+enum {
+ WL_EHT_CMD_ENAB = 0u, /* enable/disable EHT feature as a whole */
+ WL_EHT_CMD_DBG = 1u, /* configure EHT debug facilities */
+ WL_EHT_CMD_FEATURES = 2u, /* configure EHT sub-features */
+ WL_EHT_CMD_LAST
+};
+
+/* Current version for wlc_clm_power_limits_req_t structure and flags */
+#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1
+/* "clm_power_limits" iovar request structure */
+typedef struct wlc_clm_power_limits_req {
+ /* Input. Structure and flags version */
+ uint32 version;
+ /* Full length of buffer (includes this structure and space for TLV-encoded PPR) */
+ uint32 buflen;
+ /* Input. Flags (see WLC_CLM_POWER_LIMITS_INPUT_FLAG_... below) */
+ uint32 input_flags;
+ /* Input. CC of region whose data is being requested */
+ char cc[WLC_CNTRY_BUF_SZ];
+ /* Input. Channel/subchannel in chanspec_t format */
+ uint32 chanspec;
+ /* Subchannel encoded as clm_limits_type_t */
+ uint32 clm_subchannel;
+ /* Input. 0-based antenna index */
+ uint32 antenna_idx;
+ /* Output. General flags (see WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_... below) */
+ uint32 output_flags;
+ /* Output. 2.4G country flags, encoded as clm_flags_t enum */
+ uint32 clm_country_flags_2g;
+ /* Output. 5G country flags, encoded as clm_flags_t enum */
+ uint32 clm_country_flags_5g;
+ /* Output. Length of TLV-encoded PPR data that follows this structure */
+ uint32 ppr_tlv_size;
+ /* Output. Beginning of buffer for TLV-encoded PPR data */
+ uint8 ppr_tlv[1];
+} wlc_clm_power_limits_req_t;
+
+/* Input. Do not apply SAR limits */
+#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_SAR 0x00000001
+/* Input. Do not apply board limits */
+#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_BOARD 0x00000002
+/* Output. Limits taken from product-specific country data */
+#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_PRODUCT_LIMITS 0x00000001
+/* Output. Limits taken from product-specific worldwide data */
+#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_WORLDWIDE_LIMITS 0x00000002
+/* Output. Limits taken from country-default (all-product) data */
+#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_DEFAULT_COUNTRY_LIMITS 0x00000004
+
+#define WL_MBO_IOV_MAJOR_VER 1
+#define WL_MBO_IOV_MINOR_VER 1
+#define WL_MBO_IOV_MAJOR_VER_SHIFT 8
+#define WL_MBO_IOV_VERSION \
+ ((WL_MBO_IOV_MAJOR_VER << WL_MBO_IOV_MAJOR_VER_SHIFT)| WL_MBO_IOV_MINOR_VER)
+
+#define MBO_MAX_CHAN_PREF_ENTRIES 16
+
+enum wl_mbo_cmd_ids {
+ WL_MBO_CMD_ADD_CHAN_PREF = 1,
+ WL_MBO_CMD_DEL_CHAN_PREF = 2,
+ WL_MBO_CMD_LIST_CHAN_PREF = 3,
+ WL_MBO_CMD_CELLULAR_DATA_CAP = 4,
+ WL_MBO_CMD_DUMP_COUNTERS = 5,
+ WL_MBO_CMD_CLEAR_COUNTERS = 6,
+ WL_MBO_CMD_FORCE_ASSOC = 7,
+ WL_MBO_CMD_BSSTRANS_REJECT = 8,
+ WL_MBO_CMD_SEND_NOTIF = 9,
+ /* Unused command, This enum no can be use
+ * for next new command
+ */
+ WL_MBO_CMD_CLEAR_CHAN_PREF = 10,
+ WL_MBO_CMD_NBR_INFO_CACHE = 11,
+ WL_MBO_CMD_ANQPO_SUPPORT = 12,
+ WL_MBO_CMD_DBG_EVENT_CHECK = 13,
+ WL_MBO_CMD_EVENT_MASK = 14,
+ /* Add before this !! */
+ WL_MBO_CMD_LAST
+};
+
+enum wl_mbo_xtlv_id {
+ WL_MBO_XTLV_OPCLASS = 0x1,
+ WL_MBO_XTLV_CHAN = 0x2,
+ WL_MBO_XTLV_PREFERENCE = 0x3,
+ WL_MBO_XTLV_REASON_CODE = 0x4,
+ WL_MBO_XTLV_CELL_DATA_CAP = 0x5,
+ WL_MBO_XTLV_COUNTERS = 0x6,
+ WL_MBO_XTLV_ENABLE = 0x7,
+ WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8,
+ WL_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9,
+ WL_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa,
+ WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb,
+ WL_MBO_XTLV_BIT_MASK = 0xc
+};
+
+/* event bit mask flags for MBO */
+#define MBO_EVT_BIT_MASK_CELLULAR_SWITCH 0x0001 /* Evt bit mask to enab cellular switch */
+#define MBO_EVT_BIT_MASK_BTM_REQ_RCVD 0x0002 /* Evt bit mask to enab BTM req rcvd */
+
+typedef struct wl_mbo_counters {
+ /* No of transition req recvd */
+ uint16 trans_req_rcvd;
+ /* No of transition req with disassoc imminent */
+ uint16 trans_req_disassoc;
+ /* No of transition req with BSS Termination */
+ uint16 trans_req_bss_term;
+ /* No of trans req w/ unspecified reason */
+ uint16 trans_resn_unspec;
+ /* No of trans req w/ reason frame loss */
+ uint16 trans_resn_frm_loss;
+ /* No of trans req w/ reason traffic delay */
+ uint16 trans_resn_traffic_delay;
+ /* No of trans req w/ reason insufficient buffer */
+ uint16 trans_resn_insuff_bw;
+ /* No of trans req w/ reason load balance */
+ uint16 trans_resn_load_bal;
+ /* No of trans req w/ reason low rssi */
+ uint16 trans_resn_low_rssi;
+ /* No of trans req w/ reason excessive retransmission */
+ uint16 trans_resn_xcess_retransmn;
+ /* No of trans req w/ reason gray zone */
+ uint16 trans_resn_gray_zone;
+ /* No of trans req w/ reason switch to premium AP */
+ uint16 trans_resn_prem_ap_sw;
+ /* No of transition rejection sent */
+ uint16 trans_rejn_sent;
+ /* No of trans rejn reason excessive frame loss */
+ uint16 trans_rejn_xcess_frm_loss;
+ /* No of trans rejn reason excessive traffic delay */
+ uint16 trans_rejn_xcess_traffic_delay;
+ /* No of trans rejn reason insufficient QoS capability */
+ uint16 trans_rejn_insuffic_qos_cap;
+ /* No of trans rejn reason low RSSI */
+ uint16 trans_rejn_low_rssi;
+ /* No of trans rejn reason high interference */
+ uint16 trans_rejn_high_interference;
+ /* No of trans rejn reason service unavilable */
+ uint16 trans_rejn_service_unavail;
+ /* No of beacon request rcvd */
+ uint16 bcn_req_rcvd;
+ /* No of beacon report sent */
+ uint16 bcn_rep_sent;
+ /* No of null beacon report sent */
+ uint16 null_bcn_rep_sent;
+ /* No of wifi to cell switch */
+ uint16 wifi_to_cell;
+} wl_mbo_counters_t;
+
+#define WL_FILS_IOV_MAJOR_VER 1
+#define WL_FILS_IOV_MINOR_VER 1
+#define WL_FILS_IOV_MAJOR_VER_SHIFT 8
+#define WL_FILS_IOV_VERSION \
+ ((WL_FILS_IOV_MAJOR_VER << WL_FILS_IOV_MAJOR_VER_SHIFT)| WL_FILS_IOV_MINOR_VER)
+
+enum wl_fils_cmd_ids {
+ WL_FILS_CMD_ADD_IND_IE = 1,
+ WL_FILS_CMD_ADD_AUTH_DATA = 2, /* Deprecated, kept to prevent ROM invalidation */
+ WL_FILS_CMD_ADD_HLP_IE = 3,
+ WL_FILS_CMD_ADD_CONNECT_PARAMS = 4,
+ WL_FILS_CMD_GET_CONNECT_PARAMS = 5,
+ /* Add before this !! */
+ WL_FILS_CMD_LAST
+};
+
+enum wl_fils_xtlv_id {
+ WL_FILS_XTLV_IND_IE = 0x1,
+ WL_FILS_XTLV_AUTH_DATA = 0x2, /* Deprecated, kept to prevent ROM invalidation */
+ WL_FILS_XTLV_HLP_IE = 0x3,
+ WL_FILS_XTLV_ERP_USERNAME = 0x4,
+ WL_FILS_XTLV_ERP_REALM = 0x5,
+ WL_FILS_XTLV_ERP_RRK = 0x6,
+ WL_FILS_XTLV_ERP_NEXT_SEQ_NUM = 0x7,
+ WL_FILS_XTLV_KEK = 0x8,
+ WL_FILS_XTLV_PMK = 0x9,
+ WL_FILS_XTLV_TK = 0xa,
+ WL_FILS_XTLV_PMKID = 0xb
+};
+
+#define WL_OCE_IOV_MAJOR_VER 1
+#define WL_OCE_IOV_MINOR_VER 1
+#define WL_OCE_IOV_MAJOR_VER_SHIFT 8
+#define WL_OCE_IOV_VERSION \
+ ((WL_OCE_IOV_MAJOR_VER << WL_OCE_IOV_MAJOR_VER_SHIFT)| WL_OCE_IOV_MINOR_VER)
+
+enum wl_oce_cmd_ids {
+ WL_OCE_CMD_ENABLE = 1,
+ WL_OCE_CMD_PROBE_DEF_TIME = 2,
+ WL_OCE_CMD_FD_TX_PERIOD = 3,
+ WL_OCE_CMD_FD_TX_DURATION = 4,
+ WL_OCE_CMD_RSSI_TH = 5,
+ WL_OCE_CMD_RWAN_LINKS = 6,
+ WL_OCE_CMD_CU_TRIGGER = 7,
+ WL_OCE_CMD_TXPWR_USED = 8,
+ /* Add before this !! */
+ WL_OCE_CMD_LAST
+};
+
+enum wl_oce_xtlv_id {
+ WL_OCE_XTLV_ENABLE = 0x1,
+ WL_OCE_XTLV_PROBE_DEF_TIME = 0x2,
+ WL_OCE_XTLV_FD_TX_PERIOD = 0x3,
+ WL_OCE_XTLV_FD_TX_DURATION = 0x4,
+ WL_OCE_XTLV_RSSI_TH = 0x5,
+ WL_OCE_XTLV_RWAN_LINKS = 0x6,
+ WL_OCE_XTLV_CU_TRIGGER = 0x7,
+ WL_OCE_XTLV_TXPWR_USED = 0x8
+};
+
+/* Robust Audio Video (RAV), MSCS (Mirrored Stream Classification Service) commands */
+#define WL_RAV_MSCS_IOV_MAJOR_VER 1u
+#define WL_RAV_MSCS_IOV_MINOR_VER 1u
+#define WL_RAV_MSCS_IOV_MAJOR_VER_SHIFT 8u
+
+#define WL_RAV_MSCS_IOV_VERSION \
+ ((WL_RAV_MSCS_IOV_MAJOR_VER << WL_RAV_MSCS_IOV_MAJOR_VER_SHIFT)| WL_RAV_MSCS_IOV_MINOR_VER)
+
+enum wl_rav_mscs_cmd_ids {
+ WL_RAV_MSCS_CMD_CONFIG = 1u, /* MSCS configuration */
+ WL_RAV_MSCS_CMD_ENABLE = 2u, /* Activate/deactivate MSCS */
+ WL_RAV_MSCS_CMD_UP_BITMAP = 3u, /* User priority bitmap */
+ WL_RAV_MSCS_CMD_UP_LIMIT = 4u, /* User priority limit */
+ WL_RAV_MSCS_CMD_STREAM_TIMEOUT = 5u, /* Stream timeout for MSCS Request */
+ WL_RAV_MSCS_CMD_FC_TYPE = 6u, /* Frame classifier type, IPv4, IPv6, etc. */
+ WL_RAV_MSCS_CMD_FC_MASK = 7u, /* Specifies the frame classifier mask */
+ WL_RAV_MSCS_CMD_REQ_TYPE = 8u, /* Indicates the MSCS Request type (add/remove) */
+ WL_RAV_MSCS_CMD_ASSOC_NEG = 9u, /* MSCS negotiation in the association */
+
+ /* Add before this !! */
+ WL_RAV_MSCS_CMD_LAST
+};
+
+typedef enum wl_rav_mscs_xtlv_id {
+ WL_RAV_MSCS_XTLV_CONFIG = 1u,
+ WL_RAV_MSCS_XTLV_ENABLE = 2u,
+ WL_RAV_MSCS_XTLV_UP_BITMAP = 3u,
+ WL_RAV_MSCS_XTLV_UP_LIMIT = 4u,
+ WL_RAV_MSCS_XTLV_STREAM_TIMEOUT = 5u,
+ WL_RAV_MSCS_XTLV_FC_TYPE = 6u,
+ WL_RAV_MSCS_XTLV_FC_MASK = 7u,
+ WL_RAV_MSCS_XTLV_REQ_TYPE = 8u,
+ WL_RAV_MSCS_XTLV_ASSOC_NEG = 9u
+} wl_rav_mscs_xtlv_id_t;
+
+#define WL_ESP_IOV_MAJOR_VER 1
+#define WL_ESP_IOV_MINOR_VER 1
+#define WL_ESP_IOV_MAJOR_VER_SHIFT 8
+#define WL_ESP_IOV_VERSION \
+ ((WL_ESP_IOV_MAJOR_VER << WL_ESP_IOV_MAJOR_VER_SHIFT)| WL_ESP_IOV_MINOR_VER)
+
+enum wl_esp_cmd_ids {
+ WL_ESP_CMD_ENABLE = 1,
+ WL_ESP_CMD_STATIC = 2,
+ /* Add before this !! */
+ WL_ESP_CMD_LAST
+};
+
+enum wl_esp_xtlv_id {
+ WL_ESP_XTLV_ENABLE = 0x1,
+ WL_ESP_XTLV_STATIC_AC = 0x2, /* access category */
+ WL_ESP_XTLV_STATIC_TYPE = 0x3, /* data type */
+ WL_ESP_XTLV_STATIC_VAL = 0x4
+};
+
+#define WL_BCN_PROT_IOV_MAJOR_VER 1
+#define WL_BCN_PROT_IOV_MINOR_VER 1
+#define WL_BCN_PROT_IOV_MAJOR_VER_SHIFT 8
+#define WL_BCN_PROT_IOV_VERSION \
+ ((WL_BCN_PROT_IOV_MAJOR_VER << WL_BCN_PROT_IOV_MAJOR_VER_SHIFT)| WL_BCN_PROT_IOV_MINOR_VER)
+
+enum wl_bcn_prot_cmd_ids {
+ WL_BCN_PROT_CMD_ENABLE = 1,
+ WL_BCN_PROT_CMD_COUNTERS = 2,
+ /* Add before this !! */
+ WL_BCN_PROT_CMD_LAST
+};
+
+enum wl_bcn_prot_xtlv_id {
+ WL_BCN_PROT_XTLV_ENABLE = 0x1,
+ WL_BCN_PROT_XTLV_COUNTERS = 0x2
+};
+
+typedef struct wlc_bcn_prot_counters {
+ uint32 no_en_bit; /* counts beacons without bcn prot enable bit at ext cap */
+ uint32 no_mme_ie; /* counts beacons without mme ie */
+ uint32 mic_fails; /* counts beacons failed mic check */
+ uint32 replay_fails; /* counts beacons failed replay check */
+} wlc_bcn_prot_counters_t;
+#define BCN_PROT_COUNTERS_SIZE sizeof(wlc_bcn_prot_counters_t)
+
+#define WL_DRR_IOV_MAJOR_VER 1
+#define WL_DRR_IOV_MINOR_VER 1
+#define WL_DRR_IOV_MAJOR_VER_SHIFT 8
+#define WL_DRR_IOV_VERSION \
+ ((WL_DRR_IOV_MAJOR_VER << WL_DRR_IOV_MAJOR_VER_SHIFT)| WL_DRR_IOV_MINOR_VER)
+
+enum wl_drr_cmd_ids {
+ WL_DRR_CMD_ENABLE = 1u,
+ /* Add before this !! */
+ WL_DRR_CMD_LAST
+};
+
+enum wl_drr_xtlv_id {
+ WL_DRR_XTLV_ENABLE = 0x1u
+};
+
+/* otpread command */
+#define WL_OTPREAD_VER 1
+
+typedef struct {
+ uint16 version; /* cmd structure version */
+ uint16 cmd_len; /* cmd struct len */
+ uint32 rdmode; /* otp read mode */
+ uint32 rdoffset; /* byte offset into otp to start read */
+ uint32 rdsize; /* number of bytes to read */
+} wl_otpread_cmd_t;
+
+/* "otpecc_rows" command */
+typedef struct {
+ uint16 version; /* version of this structure */
+ uint16 len; /* len in bytes of this structure */
+ uint32 cmdtype; /* command type : 0 : read row data, 1 : ECC lock */
+ uint32 rowoffset; /* start row offset */
+ uint32 numrows; /* number of rows */
+ uint8 rowdata[]; /* read rows data */
+} wl_otpecc_rows_t;
+
+#define WL_OTPECC_ROWS_VER 1
+
+#define WL_OTPECC_ROWS_CMD_READ 0
+#define WL_OTPECC_ROWS_CMD_LOCK 1
+
+#define WL_OTPECC_ARGIDX_CMDTYPE 0 /* command type */
+#define WL_OTPECC_ARGIDX_ROWOFFSET 1 /* start row offset */
+#define WL_OTPECC_ARGIDX_NUMROWS 2 /* number of rows */
+
+/* "otpeccrows" raw data size per row */
+#define WL_ECCDUMP_ROW_SIZE_BYTE 6 /* 4 bytes row data + 2 bytes ECC status */
+#define WL_ECCDUMP_ROW_SIZE_WORD 3
+
+/* otpECCstatus */
+#define OTP_ECC_ENAB_SHIFT 13
+#define OTP_ECC_ENAB_MASK 0x7
+#define OTP_ECC_CORR_ST_SHIFT 12
+#define OTP_ECC_CORR_ST_MASK 0x1
+#define OTP_ECC_DBL_ERR_SHIFT 11
+#define OTP_ECC_DBL_ERR_MASK 0x1
+#define OTP_ECC_DED_ST_SHIFT 10
+#define OTP_ECC_DED_ST_MASK 0x1
+#define OTP_ECC_SEC_ST_SHIFT 9
+#define OTP_ECC_SEC_ST_MASK 0x1
+#define OTP_ECC_DATA_SHIFT 0
+#define OTP_ECC_DATA_MASK 0x7f
+
+/* OTP_ECC_CORR_ST field */
+#define OTP_ECC_MODE 1
+#define OTP_NO_ECC_MODE 0
+
+/* OTP_ECC_ENAB field (bit15:13) :
+ * When 2 or 3 bits are set,
+ * it indicates that OTP ECC is enabled on the last row read.
+ * Otherwise, ECC is disabled
+ */
+#define OTP_ECC_ENAB(val) \
+ (bcm_bitcount((uint8 *)&(val), sizeof(uint8)) > 1)
+
+/* otp command details */
+#define WL_OTP_IOV_MAJOR_VER 1u
+#define WL_OTP_IOV_MINOR_VER 1u
+#define WL_OTP_IOV_MAJOR_VER_SHIFT 8u
+#define WL_OTP_IOV_VERSION \
+ ((WL_OTP_IOV_MAJOR_VER << WL_OTP_IOV_MAJOR_VER_SHIFT) | WL_OTP_IOV_MINOR_VER)
+
+/* OTP Regions HW/SW */
+#define OTP_RGN_NONE 0u
+#define OTP_RGN_HW 1u
+#define OTP_RGN_SW 2u
+
+/* OTP Lock Regions */
+#define OTP_LOCK_RGN_NONE 0u
+#define OTP_LOCK_RGN_WAFER_SORT 1u
+#define OTP_LOCK_RGN_HASH_DATA 2u
+#define OTP_LOCK_RGN_FINAL_TEST 3u
+#define OTP_LOCK_RGN_AUTOLOAD 4u
+#define OTP_LOCK_RGN_UPPER_GU 5u
+#define OTP_LOCK_RGN_LOWER_GU 6u
+#define OTP_LOCK_RGN_HW_SW 7u
+#define OTP_LOCK_RGN_BT 8u
+#define OTP_LOCK_RGN_SECURE 9u
+#define OTP_LOCK_RGN_SECURE_V 10u
+#define OTP_LOCK_RGN_SECURE_VI_0 11u
+#define OTP_LOCK_RGN_SECURE_VI_1 12u
+
+enum wl_otp_cmd_ids {
+ WL_OTP_CMD_RGNSTATUS = 1u,
+ WL_OTP_CMD_RGNDUMP = 2u,
+ WL_OTP_CMD_RGNWRITE = 3u,
+ WL_OTP_CMD_SBOOT = 4u,
+ WL_OTP_CMD_RGNLOCK = 5u,
+
+ /* Add before this !!! */
+ WL_OTP_CMD_LAST
+};
+
+enum wl_otp_xtlv_id {
+ WL_OTP_XTLV_NONE = 0u, /* Not valid otp tag */
+ WL_OTP_XTLV_RGN = 1u, /* OTP region type */
+ WL_OTP_XTLV_ADDR = 2u, /* OTP region start address */
+ WL_OTP_XTLV_SIZE = 3u, /* OTP region size */
+ WL_OTP_XTLV_DATA = 4u, /* OTP dump data */
+
+ /* SBOOT TAGs: */
+ WL_OTP_XTLV_SBOOT_FW_SIG_ENABLE = 5u, /* FW signing enable bit */
+ WL_OTP_XTLV_SBOOT_FW_SIG_DISABLE = 6u, /* FW signing disaable bit */
+ WL_OTP_XTLV_SBOOT_ROM_PROTECT_ENABLE = 7u, /* ROM protect enable bit */
+ WL_OTP_XTLV_SBOOT_ROM_PROTECT_PATCH = 8u, /* ROM protect from patch */
+ WL_OTP_XTLV_SBOOT_HOST_READ_NONSEC_EN = 9u, /* Host read non secure enable bit */
+ WL_OTP_XTLV_SBOOT_HOST_READ_NONSEC_DIS = 10u, /* Host read non secure disable bit */
+ WL_OTP_XTLV_SBOOT_HOST_WRITE_NONSEC_EN = 11u, /* Host write non secure enable bit */
+ WL_OTP_XTLV_SBOOT_HOST_WRITE_NONSEC_DIS = 12u, /* Host write non secure disable bit */
+ WL_OTP_XTLV_SBOOT_DBGREGS_PROTECT_ENAB = 13u, /* ARM DBG regs protect enable bit */
+ WL_OTP_XTLV_SBOOT_DBGREGS_PROTECT_DIS = 14u, /* ARM DBG regs protect disable bit */
+ WL_OTP_XTLV_SBOOT_JTAG_PROTECT_ENAB = 15u, /* JTAG protect disable bit */
+ WL_OTP_XTLV_SBOOT_JTAG_PROTECT_DIS = 16u, /* JTAG protect re-enable bit */
+ WL_OTP_XTLV_SBOOT_TCAM_PROTECT_SIZE = 17u, /* TCAM protect enable size field 8 bits */
+ WL_OTP_XTLV_SBOOT_ACTIVATE_SECURITY = 18u, /* Active security enable bit */
+ WL_OTP_XTLV_SBOOT_KEY_REVOC_BITS = 19u, /* Key revocation Bits field 16 bits */
+ WL_OTP_XTLV_SBOOT_CUSTOMER_PUBLIC_KEY_1 = 20u, /* Customer public key 1 field 257 bits */
+ WL_OTP_XTLV_SBOOT_CUSTOMER_PUBLIC_KEY_2 = 21u, /* Customer public key 2 field 257 bits */
+ WL_OTP_XTLV_SBOOT_LOT_NUM = 22u, /* Chip lot num 17 bits */
+ WL_OTP_XTLV_SBOOT_WAFER_NUM = 23u, /* Chip wafer num 5 bits */
+ WL_OTP_XTLV_SBOOT_WAFER_X = 24u, /* Chip wafer X 9 bits */
+ WL_OTP_XTLV_SBOOT_WAFER_Y = 25u, /* Chip wafer Y 9 bits */
+ WL_OTP_XTLV_SBOOT_UNLOCK_HASH_VAL = 26u, /* Unlock Hash Val 128 bits */
+};
+
+#define WL_LEAKY_AP_STATS_GT_TYPE 0
+#define WL_LEAKY_AP_STATS_PKT_TYPE 1
+typedef struct wlc_leaked_infra_guard_marker {
+ /* type field for this TLV: WL_LEAKY_AP_STATS_GT_TYPE */
+ uint16 type;
+ /* length field for this TLV */
+ uint16 len;
+ /* guard sample sequence number; Updated by 1 on every guard sample */
+ uint32 seq_number;
+ /* Guard time start time (tsf; PS indicated and acked) */
+ uint32 start_time;
+ /* tsf timestamp for the GT end event */
+ uint32 gt_tsf_l;
+ /* Guard time period in ms */
+ uint16 guard_duration;
+ /* Number PPDUs in the notification */
+ uint16 num_pkts;
+ /* Flags to indicate some states see below */
+ uint8 flag;
+ /* pad for 32-bit alignment */
+ uint8 reserved[3];
+} wlc_leaked_infra_guard_marker_t;
+
+/* Flag information */
+#define WL_LEAKED_GUARD_TIME_NONE 0 /* Not in any guard time */
+#define WL_LEAKED_GUARD_TIME_FRTS (0x01 << 0) /* Normal FRTS power save */
+#define WL_LEAKED_GUARD_TIME_SCAN (0x01 << 1) /* Channel switch due to scanning */
+
+#ifdef WLAWDL
+#define WL_LEAKED_GUARD_TIME_AWDL_PSF (0x01 << 2) /* Channel switch due to AWDL PSF */
+#define WL_LEAKED_GUARD_TIME_AWDL_AW (0x01 << 3) /* Channel switch due to AWDL AW */
+#endif /* WLAWDL */
+
+#define WL_LEAKED_GUARD_TIME_INFRA_STA (0x01 << 4) /* generic type infra sta channel switch */
+#define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7) /* indicate a GT is terminated early */
+
+typedef struct wlc_leaked_infra_packet_stat {
+ uint16 type; /* type field for this TLV: WL_LEAKY_AP_STATS_PKT_TYPE */
+ uint16 len; /* length field for this TLV */
+ uint16 ppdu_len_bytes; /* PPDU packet length in bytes */
+ uint16 num_mpdus; /* number of the MPDUs in the PPDU */
+ uint32 ppdu_time; /* PPDU arrival time at the begining of the guard time */
+ uint32 rate; /* PPDU packet rate; Received packet's data rate */
+ uint16 seq_number; /* sequence number */
+ int8 rssi; /* RSSI */
+ uint8 tid; /* tid */
+} wlc_leaked_infra_packet_stat_t;
+
+/* Wake timer structure definition */
+#define WAKE_TIMER_VERSION 1
+#define WAKE_TIMER_NOLIMIT 0xFFFF
+
+typedef struct wake_timer {
+ uint16 ver;
+ uint16 len;
+ uint16 limit; /* number of events to deliver
+ * 0-disable, 0xffff-indefinite, num_events otherwise
+ */
+ uint16 count; /* number of events delivered since enable (get only) */
+ uint16 period; /* timeout/period in milliseconds */
+} wake_timer_t;
+
+typedef struct wl_desense_restage_gain {
+ uint16 version;
+ uint16 length;
+ uint32 band;
+ uint8 num_cores;
+ uint8 desense_array[WL_TX_CHAINS_MAX];
+ uint8 PAD[3];
+} wl_desense_restage_gain_t;
+
+#define MAX_UCM_CHAINS 5
+#define MAX_UCM_PROFILES 10
+#define UCM_PROFILE_VERSION_1 1
+
+/* UCM per chain attribute struct */
+typedef struct wlc_btcx_chain_attr {
+ uint16 length; /* chain attr length, version is same as profile version */
+ int8 desense_level; /* per chain desense level */
+ int8 ack_pwr_strong_rssi; /* per chain ack power at strong rssi */
+ int8 ack_pwr_weak_rssi; /* per chain ack power at weak rssi */
+ int8 tx_pwr_strong_rssi; /* per chain tx power at strong rssi */
+ int8 tx_pwr_weak_rssi; /* per chain tx power at weak rssi */
+ uint8 PAD[1]; /* additional bytes for alignment */
+} wlc_btcx_chain_attr_t;
+
+typedef struct wlc_btcx_profile_v1 {
+ uint16 version; /* UCM profile version */
+ uint16 length; /* profile size */
+ uint16 fixed_length; /* size of the fixed portion of the profile */
+ uint8 init; /* profile initialized or not */
+ uint8 chain_attr_count; /* Number of elements in chain_attr array */
+ uint8 profile_index; /* profile index */
+ uint8 mode_strong_wl_bt; /* Mode under strong WLAN and BT RSSI */
+ uint8 mode_weak_wl; /* Mode under weak WLAN RSSI */
+ uint8 mode_weak_bt; /* Mode under weak BT RSSI */
+ uint8 mode_weak_wl_bt; /* Mode under weak BT and WLAN RSSI */
+ int8 mode_wl_hi_lo_rssi_thresh; /* Strong to weak WLAN RSSI threshold for mode selection */
+ int8 mode_wl_lo_hi_rssi_thresh; /* Weak to strong WLAN RSSI threshold for mode selection */
+ int8 mode_bt_hi_lo_rssi_thresh; /* Strong to weak BT RSSI threshold for mode selection */
+ int8 mode_bt_lo_hi_rssi_thresh; /* Weak to strong BT RSSI threshold for mode selection */
+ int8 desense_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for desense */
+ int8 desense_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for desense */
+ int8 ack_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for ACK power */
+ int8 ack_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for ACK power */
+ int8 tx_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for Tx power */
+ int8 tx_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for Tx power */
+ uint8 PAD[1]; /* additional bytes for 4 byte alignment */
+ wlc_btcx_chain_attr_t chain_attr[]; /* variable length array with chain attributes */
+} wlc_btcx_profile_v1_t;
+
+#define UCM_PROFILE_VERSION_2 2u
+
+typedef struct wlc_btcx_profile_v2 {
+ uint16 version; /* UCM profile version */
+ uint16 length; /* profile size */
+ uint16 fixed_length; /* size of the fixed portion of the profile */
+ uint8 init; /* profile initialized or not */
+ uint8 chain_attr_count; /* Number of elements in chain_attr array */
+ uint8 profile_index; /* profile index */
+ uint8 mode_strong_wl_bt; /* Mode under strong WLAN and BT RSSI */
+ uint8 mode_weak_wl; /* Mode under weak WLAN RSSI */
+ uint8 mode_weak_bt; /* Mode under weak BT RSSI */
+ uint8 mode_weak_wl_bt; /* Mode under weak BT and WLAN RSSI */
+ int8 mode_wl_hi_lo_rssi_thresh; /* Strong to weak WLAN RSSI threshold for mode selection */
+ int8 mode_wl_lo_hi_rssi_thresh; /* Weak to strong WLAN RSSI threshold for mode selection */
+ int8 mode_bt_hi_lo_rssi_thresh; /* Strong to weak BT RSSI threshold for mode selection */
+ int8 mode_bt_lo_hi_rssi_thresh; /* Weak to strong BT RSSI threshold for mode selection */
+ int8 desense_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for desense */
+ int8 desense_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for desense */
+ int8 ack_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for ACK power */
+ int8 ack_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for ACK power */
+ int8 tx_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for Tx power */
+ int8 tx_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for Tx power */
+ uint8 hybrid_ant_core_config; /* Select antenna configuration for hybrid */
+ wlc_btcx_chain_attr_t chain_attr[]; /* variable length array with chain attributes */
+} wlc_btcx_profile_v2_t;
+
+#define SSSR_D11_RESET_SEQ_STEPS 5u
+#define SSSR_HWA_RESET_SEQ_STEPS 8u
+
+#define SSSR_REG_INFO_VER_0 0u
+#define SSSR_REG_INFO_VER_1 1u
+#define SSSR_REG_INFO_VER_2 2u
+#define SSSR_REG_INFO_VER_3 3u
+
+typedef struct sssr_reg_info_v0 {
+ uint16 version;
+ uint16 length; /* length of the structure validated at host */
+ struct {
+ struct {
+ uint32 pmuintmask0;
+ uint32 pmuintmask1;
+ uint32 resreqtimer;
+ uint32 macresreqtimer;
+ uint32 macresreqtimer1;
+ } base_regs;
+ } pmu_regs;
+ struct {
+ struct {
+ uint32 intmask;
+ uint32 powerctrl;
+ uint32 clockcontrolstatus;
+ uint32 powerctrl_mask;
+ } base_regs;
+ } chipcommon_regs;
+ struct {
+ struct {
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 itopoobb;
+ } wrapper_regs;
+ } arm_regs;
+ struct {
+ struct {
+ uint32 ltrstate;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 itopoobb;
+ } wrapper_regs;
+ } pcie_regs;
+ struct {
+ struct {
+ uint32 ioctrl;
+ } wrapper_regs;
+ uint32 vasip_sr_addr;
+ uint32 vasip_sr_size;
+ } vasip_regs;
+ struct {
+ struct {
+ uint32 xmtaddress;
+ uint32 xmtdata;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 itopoobb;
+ uint32 ioctrl;
+ uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
+ } wrapper_regs;
+ uint32 sr_size;
+ } mac_regs[MAX_NUM_D11CORES];
+} sssr_reg_info_v0_t;
+
+typedef struct sssr_reg_info_v1 {
+ uint16 version;
+ uint16 length; /* length of the structure validated at host */
+ struct {
+ struct {
+ uint32 pmuintmask0;
+ uint32 pmuintmask1;
+ uint32 resreqtimer;
+ uint32 macresreqtimer;
+ uint32 macresreqtimer1;
+ } base_regs;
+ } pmu_regs;
+ struct {
+ struct {
+ uint32 intmask;
+ uint32 powerctrl;
+ uint32 clockcontrolstatus;
+ uint32 powerctrl_mask;
+ } base_regs;
+ } chipcommon_regs;
+ struct {
+ struct {
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 itopoobb;
+ } wrapper_regs;
+ } arm_regs;
+ struct {
+ struct {
+ uint32 ltrstate;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 itopoobb;
+ } wrapper_regs;
+ } pcie_regs;
+ struct {
+ struct {
+ uint32 ioctrl;
+ } wrapper_regs;
+ uint32 vasip_sr_addr;
+ uint32 vasip_sr_size;
+ } vasip_regs;
+ struct {
+ struct {
+ uint32 xmtaddress;
+ uint32 xmtdata;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 itopoobb;
+ uint32 ioctrl;
+ uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
+ } wrapper_regs;
+ uint32 sr_size;
+ } mac_regs[MAX_NUM_D11CORES];
+ struct {
+ uint32 dig_sr_addr;
+ uint32 dig_sr_size;
+ } dig_mem_info;
+} sssr_reg_info_v1_t;
+
+#define MAX_NUM_D11_CORES_WITH_SCAN 3u
+
+typedef struct sssr_reg_info_v2 {
+ uint16 version;
+ uint16 length; /* length of the structure validated at host */
+ struct {
+ struct {
+ uint32 pmuintmask0;
+ uint32 pmuintmask1;
+ uint32 resreqtimer;
+ uint32 macresreqtimer;
+ uint32 macresreqtimer1;
+ uint32 macresreqtimer2;
+ } base_regs;
+ } pmu_regs;
+ struct {
+ struct {
+ uint32 intmask;
+ uint32 powerctrl;
+ uint32 clockcontrolstatus;
+ uint32 powerctrl_mask;
+ } base_regs;
+ } chipcommon_regs;
+ struct {
+ struct {
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 extrsrcreq;
+ } wrapper_regs;
+ } arm_regs;
+ struct {
+ struct {
+ uint32 ltrstate;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 extrsrcreq;
+ } wrapper_regs;
+ } pcie_regs;
+ struct {
+ struct {
+ uint32 xmtaddress;
+ uint32 xmtdata;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 extrsrcreq;
+ uint32 ioctrl;
+ uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
+ } wrapper_regs;
+ uint32 sr_size;
+ } mac_regs[MAX_NUM_D11_CORES_WITH_SCAN];
+ struct {
+ uint32 dig_sr_addr;
+ uint32 dig_sr_size;
+ } dig_mem_info;
+} sssr_reg_info_v2_t;
+
+typedef struct sssr_reg_info_v3 {
+ uint16 version;
+ uint16 length; /* length of the structure validated at host */
+ struct {
+ struct {
+ uint32 pmuintmask0;
+ uint32 pmuintmask1;
+ uint32 resreqtimer;
+ uint32 macresreqtimer;
+ uint32 macresreqtimer1;
+ uint32 macresreqtimer2;
+ } base_regs;
+ } pmu_regs;
+ struct {
+ struct {
+ uint32 intmask;
+ uint32 powerctrl;
+ uint32 clockcontrolstatus;
+ uint32 powerctrl_mask;
+ } base_regs;
+ } chipcommon_regs;
+ struct {
+ struct {
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 extrsrcreq;
+ } wrapper_regs;
+ } arm_regs;
+ struct {
+ struct {
+ uint32 ltrstate;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 extrsrcreq;
+ } wrapper_regs;
+ } pcie_regs;
+ struct {
+ struct {
+ uint32 xmtaddress;
+ uint32 xmtdata;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 resetctrl;
+ uint32 extrsrcreq;
+ uint32 ioctrl;
+ uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
+ } wrapper_regs;
+ uint32 sr_size;
+ } mac_regs[MAX_NUM_D11_CORES_WITH_SCAN];
+ struct {
+ uint32 dig_sr_addr;
+ uint32 dig_sr_size;
+ } dig_mem_info;
+ uint32 fis_enab;
+ struct {
+ struct {
+ uint32 clkenable;
+ uint32 clkgatingenable;
+ uint32 clkext;
+ uint32 clkctlstatus;
+ } base_regs;
+ struct {
+ uint32 ioctrl;
+ uint32 resetctrl;
+ } wrapper_regs;
+ uint32 hwa_resetseq_val[SSSR_HWA_RESET_SEQ_STEPS];
+ } hwa_regs;
+} sssr_reg_info_v3_t;
+
+#ifndef SSSR_REG_INFO_HAS_ALIAS
+typedef sssr_reg_info_v0_t sssr_reg_info_t;
+#define SSSR_REG_INFO_VER SSSR_REG_INFO_VER_0
+#endif
+
+/* A wrapper structure for all versions of SSSR register information structures */
+typedef union sssr_reg_info {
+ sssr_reg_info_v0_t rev0;
+ sssr_reg_info_v1_t rev1;
+ sssr_reg_info_v2_t rev2;
+ sssr_reg_info_v3_t rev3;
+} sssr_reg_info_cmn_t;
+
+/* ADaptive Power Save(ADPS) structure definition */
+#define WL_ADPS_IOV_MAJOR_VER 1
+#define WL_ADPS_IOV_MINOR_VER 0
+#define WL_ADPS_IOV_MAJOR_VER_SHIFT 8
+#define WL_ADPS_IOV_VER \
+ ((WL_ADPS_IOV_MAJOR_VER << WL_ADPS_IOV_MAJOR_VER_SHIFT) | WL_ADPS_IOV_MINOR_VER)
+
+#define ADPS_NUM_DIR 2
+#define ADPS_RX 0
+#define ADPS_TX 1
+
+#define WL_ADPS_IOV_MODE 0x0001
+#define WL_ADPS_IOV_RSSI 0x0002
+#define WL_ADPS_IOV_DUMP 0x0003
+#define WL_ADPS_IOV_DUMP_CLEAR 0x0004
+#define WL_ADPS_IOV_SUSPEND 0x0005
+#define WL_ADPS_IOV_GAIN 0x0006
+#define WL_ADPS_IOV_RESET_GAIN 0x0007
+
+#define ADPS_SUMMARY_STEP_NUM 2
+#define ADPS_SUMMARY_STEP_LOW 0
+#define ADPS_SUMMARY_STEP_HIGH 1
+
+#define ADPS_SUB_IOV_VERSION_1 1
+#define ADPS_SUB_IOV_VERSION_2 2
+
+/* suspend/resume ADPS by wl/private command from host */
+#define ADPS_RESUME 0u
+#define ADPS_SUSPEND 1u
+
+typedef struct wl_adps_params_v1 {
+ uint16 version;
+ uint16 length;
+ uint8 band; /* band - 2G or 5G */
+ uint8 mode; /* operation mode, default = 0 (ADPS disable) */
+ uint16 padding;
+} wl_adps_params_v1_t;
+
+typedef struct wl_adps_rssi {
+ int32 thresh_hi; /* rssi threshold to resume ADPS operation */
+ int32 thresh_lo; /* rssi threshold to suspend ADPS operation */
+} wl_adps_rssi_t;
+
+typedef struct wl_adps_rssi_params_v1 {
+ uint16 version;
+ uint16 length;
+ uint8 band;
+ uint8 padding[3];
+ wl_adps_rssi_t rssi;
+} wl_adps_rssi_params_v1_t;
+
+typedef struct adps_stat_elem {
+ uint32 duration; /* each step duration time (mSec) */
+ uint32 counts; /* each step hit count number */
+} adps_stat_elem_t;
+
+typedef struct wl_adps_dump_summary_v1 {
+ uint16 version;
+ uint16 length;
+ uint8 mode; /* operation mode: On/Off */
+ uint8 flags; /* restrict flags */
+ uint8 current_step; /* current step */
+ uint8 padding;
+ adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */
+} wl_adps_dump_summary_v1_t;
+
+typedef struct wl_adps_dump_summary_v2 {
+ uint16 version;
+ uint16 length;
+ uint8 mode; /* operation mode: On/Off */
+ uint8 current_step; /* current step */
+ uint8 padding[2];
+ uint32 flags; /* restrict flags */
+ adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */
+} wl_adps_dump_summary_v2_t;
+
+typedef struct wl_adps_suspend_v1 {
+ uint16 version;
+ uint16 length;
+ uint8 suspend; /* 1: suspend 0: resume */
+ uint8 padding[3];
+} wl_adps_suspend_v1_t;
+
+/* For ADPS energy gain */
+typedef struct wlc_adps_energy_gain_data {
+ uint32 pm_dur_gain; /* gain of pm duration by ADPS */
+ uint32 step0_dur; /* duration of ADPS STEP0(PM MAX + PSPOLL PRD) */
+ uint32 wake_up_dur; /* duration of wake up state */
+ uint32 pad;
+ uint64 tx_tot_bytes; /* Total Tx bytes */
+} wlc_adps_energy_gain_data_t;
+
+typedef struct wl_adps_energy_gain_v1 {
+ uint16 version;
+ uint16 length;
+ uint32 pad;
+ wlc_adps_energy_gain_data_t gain_data[MAX_BANDS];
+} wl_adps_energy_gain_v1_t;
+
+typedef struct wlc_btc_2gchain_dis {
+ uint16 ver;
+ uint16 len;
+ uint8 chain_dis;
+ uint8 flag;
+} wlc_btc_2gchain_dis_t;
+
+/* TDLS structure definition */
+#define WL_TDLS_T_VERSION_V1 1
+typedef struct wl_tdls_dump_summary_v1 {
+ uint16 version;
+ uint16 length; /* length of the entire structure */
+ uint32 txsetupreq; /* tdls setup req sent */
+ uint32 txsetupresp; /* tdls setup resp sent */
+ uint32 txsetupcfm; /* tdls setup confirm sent */
+ uint32 txteardown; /* tdls teardwon frames sent */
+ uint32 txptireq; /* tdls pti req frames sent */
+ uint32 txptiresp; /* tdls pti resp frames sent */
+ uint32 txchswreq; /* tdls chsw req frames sent */
+ uint32 txchswresp; /* tdls chsw resp frame sent */
+ uint32 rxsetupreq; /* tdls setup req rcvd */
+ uint32 rxdsetupresp; /* tdls setup resp rcvd */
+ uint32 rxsetupcfm; /* tdls setup confirm rcvd */
+ uint32 rxteardown; /* tdls teardown frames rcvd */
+ uint32 rxptireq; /* tdls pti req frames rcvd */
+ uint32 rxptiresp; /* tdls pti resp frames rcvd */
+ uint32 rxchswreq; /* tdls chsw req frames rcvd */
+ uint32 rxchswresp; /* tdls chsw resp frames rcvd */
+ uint32 discard; /* frames discarded due to full buffer */
+ uint32 ubuffered; /* frames buffered by TDLS txmod */
+ uint32 buf_reinserted; /* frames reinserted */
+ uint32 idletime; /* time since no traffic on tdls link */
+ uint32 uptime; /* time since tdls link connected */
+ uint32 tx_cnt; /* frames txed over tdls link */
+ uint32 rx_cnt; /* frames rcvd over tdls link */
+ uint32 blist_cnt; /* number of tdls black list */
+ uint32 scb_flags; /* connected tdls scb flags */
+ struct ether_addr peer_addr; /* connected peer addr */
+ uint8 padding[2];
+} wl_tdls_dump_summary_v1_t;
+
+#define WLC_BTC_2GCHAIN_DIS_REASSOC 0x1
+#define WLC_BTC_2GCHAIN_DIS_VER1 0x1
+#define WLC_BTC_2GCHAIN_DIS_VER1_LEN 6
+
+/* --- BTCX WiFi Protection (btc_wifi_prot iovar) --- */
+
+/* Current iovar structure version: 1 */
+#define WL_BTC_WIFI_PROT_VER_1 1
+
+typedef struct wl_btc_wifi_prot_v1 {
+ uint16 ver; /* version */
+ uint16 len; /* total length */
+ uint8 data[]; /* bcm_xtlv_t payload */
+} wl_btc_wifi_prot_v1_t;
+
+/* Xtlv tags (protection type) and data */
+#define WL_BTC_WIFI_PROT_M1_M4 1
+typedef struct wl_btc_wifi_prot_m1_m4 {
+ uint32 enable; /* enable/disable m1-m4 protection */
+ uint32 timeout; /* maximum timeout in ms (0: default) */
+} wl_btc_wifi_prot_m1_m4_t;
+
+#define WL_BTC_WIFI_PROT_ENABLE 1
+#define WL_BTC_WIFI_PROT__DISABLE 0
+
+/* --- End BTCX WiFi Protection --- */
+
+/* --- BTCX ULMU disable (btc_ulmu_config iovar) --- */
+
+/* Version number */
+#define WL_BTC_ULMU_CONFIG_VER_1 1
+typedef struct wl_btc_ulmu_config_v1 {
+ uint16 version; /* btc_ulmu_config version */
+ uint16 len; /* Total length */
+ uint32 ulmu_bt_task_bm; /* BT Task bimtap for ULMU disable */
+ uint32 ulmu_bt_period_th; /* BT period thresh for ULMU disable */
+} wl_btc_ulmu_config_v1_t;
+
+/* --- End BTCX ULMU config --- */
+
+#define RPSNOA_IOV_MAJOR_VER 1
+#define RPSNOA_IOV_MINOR_VER 1
+#define RPSNOA_IOV_MAJOR_VER_SHIFT 8
+#define RPSNOA_IOV_VERSION \
+ ((RPSNOA_IOV_MAJOR_VER << RPSNOA_IOV_MAJOR_VER_SHIFT)| RPSNOA_IOV_MINOR_VER)
+
+enum wl_rpsnoa_cmd_ids {
+ WL_RPSNOA_CMD_ENABLE = 1,
+ WL_RPSNOA_CMD_STATUS,
+ WL_RPSNOA_CMD_PARAMS,
+ WL_RPSNOA_CMD_LAST
+};
+
+typedef struct rpsnoa_cmnhdr {
+ uint16 ver; /* cmd structure version */
+ uint16 len; /* cmd structure len */
+ uint32 subcmd;
+ uint32 cnt;
+} rpsnoa_cmnhdr_t;
+
+typedef struct rpsnoa_data {
+ int16 band;
+ int16 value;
+} rpsnoa_data_t;
+
+typedef struct rpsnoa_stats {
+ int16 band;
+ int16 state;
+ uint32 sleep_dur;
+ uint32 sleep_avail_dur;
+ uint32 last_pps;
+} rpsnoa_stats_t;
+
+typedef struct rpsnoa_param {
+ uint16 band;
+ uint8 level;
+ uint8 stas_assoc_check;
+ uint32 pps;
+ uint32 quiet_time;
+} rpsnoa_param_t;
+
+typedef struct rpsnoa_iovar {
+ rpsnoa_cmnhdr_t hdr;
+ rpsnoa_data_t data[1];
+} rpsnoa_iovar_t;
+
+typedef struct rpsnoa_iovar_status {
+ rpsnoa_cmnhdr_t hdr;
+ rpsnoa_stats_t stats[1];
+} rpsnoa_iovar_status_t;
+
+typedef struct rpsnoa_iovar_params {
+ rpsnoa_cmnhdr_t hdr;
+ rpsnoa_param_t param[1];
+} rpsnoa_iovar_params_t;
+
+/* Per-interface reportable stats types */
+enum wl_ifstats_xtlv_id {
+ /* global */
+ WL_IFSTATS_XTLV_SLICE_INDEX = 1,
+ WL_IFSTATS_XTLV_IF_INDEX = 2,
+ WL_IFSTATS_XTLV_MAC_ADDR = 3,
+ WL_IFSTATS_XTLV_REPORT_CMD = 4, /* Comes in an iovar */
+ WL_IFSTATS_XTLV_BUS_PCIE = 5,
+ WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS = 6,
+ WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH = 7,
+ /* history of blocks freed most recently */
+ WL_STATS_XTLV_FBINFO_STATS = 8,
+
+ /* Report data across all SCBs using ecounters */
+ /* STA_info ecounters */
+ WL_IFSTATS_XTLV_WL_STA_INFO_ECOUNTERS = 0x100,
+ /* For AMPDU stat sub-types requested in a different format */
+ /* these could be sum and report stats across slices. OR
+ * report sub-types in pairs so host can sum and add.
+ * Information sent here is across slices, therefore global
+ */
+ WL_IFSTATS_XTLV_TX_AMPDU_STATS = 0x101,
+ WL_IFSTATS_XTLV_RX_AMPDU_STATS = 0x102,
+ /* scb ecounter statistics */
+ WL_IFSTATS_XTLV_SCB_ECOUNTERS = 0x103,
+ /* Global NAN stats */
+ WL_IFSTATS_XTLV_NAN_STATS = 0x104,
+ WL_IFSTATS_XTLV_CHAN_STATS = 0x105,
+ /* TDLS state */
+ WL_IFSTATS_XTLV_IF_TDLS_STATE = 0x106,
+ WL_IFSTATS_XTLV_KEY_PLUMB_INFO = 0x107,
+ /* HE TX related stats */
+ WL_IFSTATS_XTLV_HE_TXMU_STATS = 0x108,
+
+ WL_IFSTATS_XTLV_SC_PERIODIC_STATE = 0x109,
+ WL_IFSTATS_XTLV_WBUS_PERIODIC_STATE = 0x10A,
+
+ /* Per-slice information
+ * Per-interface reporting could also include slice specific data
+ */
+ /* xtlv container for reporting */
+ WL_IFSTATS_XTLV_WL_SLICE = 0x301,
+ /* Per-slice AMPDU stats */
+ WL_IFSTATS_XTLV_WL_SLICE_TX_AMPDU_DUMP = 0x302,
+ WL_IFSTATS_XTLV_WL_SLICE_RX_AMPDU_DUMP = 0x303,
+ /* Per-slice BTCOEX stats */
+ WL_IFSTATS_XTLV_WL_SLICE_BTCOEX = 0x304,
+ /* V11_WLCNTRS used in ecounters */
+ WL_IFSTATS_XTLV_WL_SLICE_V11_WLCNTRS = 0x305,
+ /* V30_WLCNTRS Used in ecounters */
+ WL_IFSTATS_XTLV_WL_SLICE_V30_WLCNTRS = 0x306,
+ /* phy,ucode,scan pwrstats */
+ WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_PHY = 0x307,
+ WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_SCAN = 0x308,
+ WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_WAKE_V2 = 0x309,
+ /* Per-slice LTECOEX stats */
+ WL_IFSTATS_XTLV_WL_SLICE_LTECOEX = 0x30A,
+ /* TVPM ecounters */
+ WL_IFSTATS_XTLV_WL_SLICE_TVPM = 0x30B,
+ /* TDMTX ecounters */
+ WL_IFSTATS_XTLV_WL_SLICE_TDMTX = 0x30C,
+ /* Slice specific state capture in periodic fasion */
+ WL_SLICESTATS_XTLV_PERIODIC_STATE = 0x30D,
+ WL_SLICESTATS_XTLV_HIST_TX_STATS = 0x30E,
+ WL_SLICESTATS_XTLV_HIST_RX_STATS = 0x30F,
+ /* TX histograms */
+ WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS = 0x310,
+ /* TX queue depth */
+ WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH = 0x311,
+ /* Latency instrumentation debug */
+ WL_STATS_XTLV_WL_QUEUE_STOP = 0x312,
+ /* Beamforming counters */
+ WL_IFSTATS_XTLV_WL_SLICE_TXBF = 0x313,
+ /* Per-slice BTCOEX task duration stats */
+ WL_IFSTATS_XTLV_WL_SLICE_BTCOEX_TSKDUR_STATS = 0x314,
+ /* Per-slice RC1 COEX (NR5G Coex) stats */
+ WL_IFSTATS_XTLV_WL_SLICE_NR5GCX = 0x315,
+ /* Per-slice RC1 COEX (RC1 Coex) stats for trunk and future branches */
+ WL_IFSTATS_XTLV_WL_SLICE_RC1CX = 0x315,
+ /* Per-slice sta offload stats */
+ WL_IFSTATS_XTLV_WL_SLICE_STA_OFLD_STATS = 0x316,
+ /* Per-Slice [only aux] btcec sc stats */
+ WL_IFSTATS_XTLV_WL_SLICE_BTCEC_PERIODIC_STATS = 0x317,
+ /* Per-Slice sc lq stats */
+ WL_IFSTATS_XTLV_SC_CHANIM_PERIODIC_STATS = 0x318,
+ /* Per-slice RC2 COEX stats */
+ WL_IFSTATS_XTLV_WL_SLICE_RC2CX = 0x319,
+
+ /* Per-interface */
+ /* XTLV container for reporting */
+ WL_IFSTATS_XTLV_IF = 0x501,
+ /* Generic stats applicable to all IFs */
+ WL_IFSTATS_XTLV_GENERIC = 0x502,
+ /* Infra specific */
+ WL_IFSTATS_XTLV_INFRA_SPECIFIC = 0x503,
+ /* MGT counters infra and softAP */
+ WL_IFSTATS_XTLV_MGT_CNT = 0x504,
+ /* AMPDU stats on per-IF */
+ WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505,
+ WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506,
+
+#ifdef WLAWDL
+ WL_IFSTATS_XTLV_WL_PWRSTATS_AWDL = 0x507,
+#endif /* WLAWDL */
+
+ WL_IFSTATS_XTLV_IF_LQM = 0x508,
+ /* Interface specific state capture in periodic fashion */
+ WL_IFSTATS_XTLV_IF_PERIODIC_STATE = 0x509,
+ /* Event statistics on per-IF */
+ WL_IFSTATS_XTLV_IF_EVENT_STATS = 0x50A,
+ /* Infra HE specific */
+ WL_IFSTATS_XTLV_INFRA_SPECIFIC_HE = 0x50B,
+ /* Roam statistics */
+ WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC = 0x50C,
+ WL_IFSTATS_XTLV_ROAM_STATS_EVENT = 0x50D,
+ /* ecounters for nan */
+ /* nan slot stats */
+ WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601,
+ /* Ecounters for NDP session status */
+ WL_STATS_XTLV_NDP_SESSION_STATUS = 0x602,
+ /* NAN disc frame status ecounters */
+ WL_STATS_XTLV_NAN_DISC_FRM_STATUS = 0x603
+};
+
+/* current version of wl_stats_report_t structure for request */
+#define WL_STATS_REPORT_REQUEST_VERSION_V2 2
+
+/* current version of wl_stats_report_t structure for response */
+#define WL_STATS_REPORT_RESPONSE_VERSION_V2 2
+
+/** Top structure of if_counters IOVar buffer */
+typedef struct wl_stats_report {
+ uint16 version; /**< see version definitions above */
+ uint16 length; /**< length of data including all paddings. */
+ uint8 data []; /**< variable length payload:
+ * 1 or more bcm_xtlv_t type of tuples.
+ * each tuple is padded to multiple of 4 bytes.
+ * 'length' field of this structure includes all paddings.
+ */
+} wl_stats_report_t;
+
+/* interface specific mgt count */
+#define WL_MGT_STATS_VERSION_V1 1
+/* Associated stats type: WL_IFSTATS_MGT_CNT */
+typedef struct {
+ uint16 version;
+ uint16 length;
+
+ /* detailed control/management frames */
+ uint32 txnull;
+ uint32 rxnull;
+ uint32 txqosnull;
+ uint32 rxqosnull;
+ uint32 txassocreq;
+ uint32 rxassocreq;
+ uint32 txreassocreq;
+ uint32 rxreassocreq;
+ uint32 txdisassoc;
+ uint32 rxdisassoc;
+ uint32 txassocrsp;
+ uint32 rxassocrsp;
+ uint32 txreassocrsp;
+ uint32 rxreassocrsp;
+ uint32 txauth;
+ uint32 rxauth;
+ uint32 txdeauth;
+ uint32 rxdeauth;
+ uint32 txprobereq;
+ uint32 rxprobereq;
+ uint32 txprobersp;
+ uint32 rxprobersp;
+ uint32 txaction;
+ uint32 rxaction;
+ uint32 txpspoll;
+ uint32 rxpspoll;
+} wl_if_mgt_stats_t;
+
+/* This structure (wl_if_infra_stats_t) is deprecated in favour of
+ * versioned structure (wl_if_infra_enh_stats_vxxx_t) defined below
+ */
+#define WL_INFRA_STATS_VERSION_V1 1
+/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC */
+typedef struct wl_infra_stats {
+ uint16 version; /**< version of the structure */
+ uint16 length;
+ uint32 rxbeaconmbss;
+ uint32 tbtt;
+} wl_if_infra_stats_t;
+
+/* Starting the versioned structure with version as 2 to distinguish
+ * between legacy unversioned structure
+ */
+#define WL_INFRA_ENH_STATS_VERSION_V2 2u
+/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC */
+typedef struct wl_infra_enh_stats_v2 {
+ uint16 version; /**< version of the structure */
+ uint16 length;
+ uint32 rxbeaconmbss;
+ uint32 tbtt;
+ uint32 tim_mcast_ind; /**< number of beacons with tim bits indicating multicast data */
+ uint32 tim_ucast_ind; /**< number of beacons with tim bits indicating unicast data */
+} wl_if_infra_enh_stats_v2_t;
+
+#define WL_INFRA_STATS_HE_VERSION_V1 (1u)
+/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC_HE */
+typedef struct wl_infra_stats_he {
+ uint16 version; /**< version of the structure */
+ uint16 length;
+ uint32 PAD; /**< Explicit padding */
+
+ /* DL SU MPDUs and total number of bytes */
+ uint64 dlsu_mpdudata;
+ uint64 dlsu_mpdu_bytes;
+
+ /* DL MUMIMO MPDUs and total number of bytes */
+ uint64 dlmumimo_mpdudata;
+ uint64 dlmumimo_mpdu_bytes;
+
+ /* DL OFDMA MPDUs and total number of bytes */
+ uint64 dlofdma_mpdudata;
+ uint64 dlofdma_mpdu_bytes;
+
+ /* UL SU MPDUs and total number of bytes */
+ uint64 ulsu_mpdudata;
+ uint64 ulsu_mpdu_bytes;
+
+ /* ULOFDMA MPSUs and total number of bytes */
+ uint64 ulofdma_mpdudata;
+ uint64 ulofdma_mpdu_bytes;
+} wl_if_infra_stats_he_t;
+
+#define LTECOEX_STATS_VER 1
+
+typedef struct wlc_ltecoex_stats {
+ uint16 version; /**< WL_IFSTATS_XTLV_WL_SLICE_LTECOEX */
+ uint16 len; /* Length of wl_ltecx_stats structure */
+ uint8 slice_index; /* Slice unit of wl_ltecx_stats structure */
+ uint8 pad[3]; /* Padding */
+ /* LTE noise based eCounters Bins
+ cumulative the wl_cnt_wlc_t and wl_ctl_mgt_cnt_t
+ counter information based on LTE Coex interference level
+ */
+ uint32 txframe_no_LTE; /* txframe counter in no LTE Coex case */
+ uint32 rxframe_no_LTE; /* rxframe counter in no LTE Coex case */
+ uint32 rxrtry_no_LTE; /* rxrtry counter in no LTE Coex case */
+ uint32 txretrans_no_LTE; /* txretrans counter in no LTE Coex case */
+ uint32 txnocts_no_LTE; /* txnocts counter in no LTE Coex case */
+ uint32 txrts_no_LTE; /* txrts counter in no LTE Coex case */
+ uint32 txdeauth_no_LTE; /* txdeauth counter in no LTE Coex case */
+ uint32 txassocreq_no_LTE; /* txassocreq counter in no LTE Coex case */
+ uint32 txassocrsp_no_LTE; /* txassocrsp counter in no LTE Coex case */
+ uint32 txreassocreq_no_LTE; /* txreassocreq counter in no LTE Coex case */
+ uint32 txreassocrsp_no_LTE; /* txreassocrsp counter in no LTE Coex case */
+ uint32 txframe_light_LTE; /* txframe counter in light LTE Coex case */
+ uint32 txretrans_light_LTE; /* txretrans counter in light LTE Coex case */
+ uint32 rxframe_light_LTE; /* rxframe counter in light LTE Coex case */
+ uint32 rxrtry_light_LTE; /* rxrtry counter in light LTE Coex case */
+ uint32 txnocts_light_LTE; /* txnocts counter in light LTE Coex case */
+ uint32 txrts_light_LTE; /* txrts counter in light LTE Coex case */
+ uint32 txdeauth_light_LTE; /* txdeauth counter in light LTE Coex case */
+ uint32 txassocreq_light_LTE; /* txassocreq counter in light LTE Coex case */
+ uint32 txassocrsp_light_LTE; /* txassocrsp counter in light LTE Coex case */
+ uint32 txreassocreq_light_LTE; /* txreassocreq counter in light LTE Coex case */
+ uint32 txreassocrsp_light_LTE; /* txreassocrsp counter in light LTE Coex case */
+ uint32 txframe_heavy_LTE; /* txframe counter in heavy LTE Coex case */
+ uint32 txretrans_heavy_LTE; /* txretrans counter in heavy LTE Coex case */
+ uint32 rxframe_heavy_LTE; /* rxframe counter in heavy LTE Coex case */
+ uint32 rxrtry_heavy_LTE; /* rxrtry counter in heavy LTE Coex case */
+ uint32 txnocts_heavy_LTE; /* txnocts counter in heavy LTE Coex case */
+ uint32 txrts_heavy_LTE; /* txrts counter in heavy LTE Coex case */
+ uint32 txdeauth_heavy_LTE; /* txdeauth counter in heavy LTE Coex case */
+ uint32 txassocreq_heavy_LTE; /* txassocreq counter in heavy LTE Coex case */
+ uint32 txassocrsp_heavy_LTE; /* txassocrsp counter in heavy LTE Coex case */
+ uint32 txreassocreq_heavy_LTE; /* txreassocreq counter in heavy LTE Coex case */
+ uint32 txreassocrsp_heavy_LTE; /* txreassocrsp counter in heavy LTE Coex case */
+
+ /* LTE specific ecounters */
+ uint16 type4_txinhi_dur; /* Duration of tx inhibit(in ms) due to Type4 */
+ uint16 type4_nonzero_cnt; /* Counts of none zero Type4 msg */
+ uint16 type4_timeout_cnt; /* Counts of Type4 timeout */
+ uint16 rx_pri_dur; /* Duration of wlan_rx_pri assertions */
+ uint16 rx_pri_cnt; /* Count of wlan_rx_pri assertions */
+ uint16 type6_dur; /* duration of LTE Tx power limiting assertions */
+ uint16 type6_cnt; /* Count of LTE Tx power limiting assertions */
+ uint16 ts_prot_frm_cnt; /* count of WLAN protection frames triggered by LTE coex */
+ uint16 ts_gr_cnt; /* count of intervals granted to WLAN in timesharing */
+ uint16 ts_gr_dur; /* duration granted to WLAN in timesharing */
+} wlc_ltecoex_stats_t;
+
+#define CSA_EVT_CSA_RXED (1 << 0)
+#define CSA_EVT_CSA_TIMEOUT (1 << 1)
+#define CSA_EVT_FROM_INFRA (1 << 2)
+typedef struct csa_event_data {
+ chanspec_t chan_old;
+ dot11_ext_csa_ie_t ecsa;
+ dot11_mesh_csp_ie_t mcsp;
+ dot11_wide_bw_chan_switch_ie_t wbcs;
+ uint8 flags;
+ uint8 pad[3];
+} csa_event_data_t;
+
+/* ifdef (WL_ASSOC_BCN_RPT) */
+enum wl_bcn_report_cmd_id {
+ WL_BCN_RPT_CMD_VER = 0,
+ WL_BCN_RPT_CMD_CONFIG = 1,
+ WL_BCN_RPT_CMD_VENDOR_IE = 2,
+ WL_BCN_RPT_CMD_LAST
+};
+
+/* beacon report specific macros */
+#define WL_BCN_RPT_CCX_IE_OVERRIDE (1u << 0)
+
+/* beacon report specific macros */
+#define WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE (1u << 1)
+#define WL_BCN_RPT_ASSOC_SCAN_SOLICITED_MODE (1u << 2)
+#define WL_BCN_RPT_ASSOC_SCAN_MODE_SHIFT (1)
+#define WL_BCN_RPT_ASSOC_SCAN_MODE_MASK (WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE |\
+ WL_BCN_RPT_ASSOC_SCAN_SOLICITED_MODE)
+#define WL_BCN_RPT_ASSOC_SCAN_MODE_MAX (WL_BCN_RPT_ASSOC_SCAN_MODE_MASK >> \
+ WL_BCN_RPT_ASSOC_SCAN_MODE_SHIFT)
+/* beacon report mode specific macro */
+#define WL_BCN_RPT_ASSOC_SCAN_MODE_DEFAULT WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE
+
+/* beacon report timeout config specific macros */
+#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_DEFAULT (120000)
+#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_MIN (60000)
+#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_MAX (0xFFFFFFFF)
+
+/* beacon report cache count specific macros */
+#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MIN (0)
+#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MAX (8)
+#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_DEFAULT (WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MAX)
+
+#define WL_BCN_REPORT_CMD_VERSION 1
+struct wl_bcn_report_cfg {
+ uint32 flags; /**< Flags that defines the operation/setting information */
+ uint32 scan_cache_timeout; /**< scan cache timeout value in millisec */
+ uint32 scan_cache_timer_pend; /**< Read only pending time for timer expiry in millisec */
+ uint8 scan_cache_cnt; /**< scan cache count */
+};
+
+/* endif (WL_ASSOC_BCN_RPT) */
+
+/* Thermal, Voltage, and Power Mitigation */
+#define TVPM_REQ_VERSION_1 1
+#define TVPM_REQ_CURRENT_VERSION TVPM_REQ_VERSION_1
+
+/* tvpm iovar data */
+typedef struct {
+ uint16 version; /* TVPM request version */
+ uint16 length; /* Length of the entire structure */
+
+ uint16 req_type; /* Request type: wl_tvpm_req_type_t */
+ uint16 req_len; /* Length of the following value */
+ uint8 value[]; /* Variable length data depending on req_type */
+} wl_tvpm_req_t;
+
+/* tvpm iovar request types */
+typedef enum {
+ WL_TVPM_REQ_CLTM_INDEX, /* req_value: uint32, range 1...100 */
+ WL_TVPM_REQ_PPM_INDEX, /* req_value: uint32, range 1...100 */
+ WL_TVPM_REQ_ENABLE, /* req_value: uint32, range 0...1 */
+ WL_TVPM_REQ_STATUS, /* req_value: none */
+ WL_TVPM_REQ_PERIOD, /* req_value: int32, range {-1,1-10} */
+ WL_TVPM_REQ_TXDC, /* req_value: uint32, range 1...100 */
+ WL_TVPM_REQ_MAX
+} wl_tvpm_req_type_t;
+
+/* structure for data returned by request type WL_TVPM_REQ_STATUS */
+typedef struct wl_tvpm_status {
+ uint16 enable; /* whether TVPM is enabled */
+ uint16 tx_dutycycle; /* a percentage: 1-100 */
+ int16 tx_power_backoff; /* 0...-6 */
+ uint16 num_active_chains; /* 1...3 */
+ int16 temp; /* local temperature in degrees C */
+ uint8 vbat; /* local voltage in units of 0.1V */
+ uint8 pad;
+} wl_tvpm_status_t;
+
+/* TVPM ecounters */
+typedef struct wl_tvpm_ecounters_t {
+ uint16 version; /* version field */
+ uint16 length; /* byte length in wl_tvpm_ecounters_t starting at version */
+ uint16 tx_dutycycle; /* a percentage: 1-100 */
+ int16 tx_power_backoff; /* 0...-6 */
+ uint16 num_active_chains; /* 1...3 */
+ int16 temp; /* local temperature */
+ uint8 vbat; /* local voltage */
+ uint8 cltm; /* CLTM index */
+ uint8 ppm; /* PPM index */
+ uint8 pad; /* pad to align to uint16 */
+} wl_tvpm_ecounters_t;
+
+#define TDMTX_ECOUNTERS_VERSION_V1 1
+#define TDMTX_ECOUNTERS_VERSION_V2 2
+
+/* TDMTX ecounters */
+typedef struct wl_tdmtx_ecounters_v1 {
+ uint16 version; /* version field */
+ uint16 length; /* byte length in wl_tdmtx_ecounters_t starting at version */
+ uint32 txa_on; /* TXA on requests */
+ uint32 txa_tmcnt; /* Total number of TXA timeout */
+ uint32 por_on; /* TXA POR requests */
+ uint32 txpuen; /* Path enable requests */
+ uint32 txpudis; /* Total number of times Tx path is muted on the slice */
+ uint32 txpri_on; /* Total number of times Tx priority was obtained by the slice */
+ uint32 txdefer; /* Total number of times Tx was deferred by the slice */
+ uint32 txmute; /* Total number of times active Tx muted on the slice */
+ uint32 actpwrboff; /* Total number of times TX power is backed off by the slice */
+ uint32 txa_dur; /* Total time txa on */
+ uint32 txpri_dur; /* Total time TXPri */
+ uint32 txdefer_dur; /* Total time txdefer */
+} wl_tdmtx_ecounters_v1_t;
+
+/* TDMTX ecounters for version 2 */
+typedef struct wl_tdmtx_ecounters_v2 {
+ uint16 version; /* version field */
+ uint16 length; /* byte length in wl_tdmtx_ecounters_t starting at version */
+ uint32 txa_on; /* TXA on requests */
+ uint32 txa_tmcnt; /* Total number of TXA timeout */
+ uint32 porhi_on; /* TXA PORHI requests */
+ uint32 porlo_on; /* TXA PORLO requests */
+ uint32 txpuen; /* Path enable requests */
+ uint32 txpudis; /* Total number of times Tx path is muted on the slice */
+ uint32 txpri_on; /* Total number of times Tx priority was obtained by the slice */
+ uint32 txdefer; /* Total number of times Tx was deferred by the slice */
+ uint32 txmute; /* Total number of times active Tx muted on the slice */
+ uint32 actpwrboff; /* Total number of times TX power is backed off by the slice */
+ uint32 txa_dur; /* Total time txa on */
+ uint32 txpri_dur; /* Total time TXPri */
+ uint32 txdefer_dur; /* Total time txdefer */
+} wl_tdmtx_ecounters_v2_t;
+
+/* Note: if this struct is changing update wl_scb_ecounters_vX_t version,
+ * as this struct is sent as payload in wl_scb_ecounters_vX_t
+ */
+typedef struct wlc_scb_stats_v1 {
+ uint32 tx_pkts; /* num of packets transmitted (ucast) */
+ uint32 tx_failures; /* num of packets failed */
+ uint32 rx_ucast_pkts; /* num of unicast packets received */
+ uint32 rx_mcast_pkts; /* num of multicast packets received */
+ uint32 tx_rate; /* Rate of last successful tx frame */
+ uint32 rx_rate; /* Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */
+ uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */
+ uint32 tx_mcast_pkts; /* num of mcast pkts txed */
+ uint64 tx_ucast_bytes; /* data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /* data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /* data bytes recvd ucast */
+ uint64 rx_mcast_bytes; /* data bytes recvd mcast */
+ uint32 tx_pkts_retried; /* num of packets where a retry was necessary */
+ uint32 tx_pkts_retry_exhausted; /* num of packets where a retry was exhausted */
+ uint32 tx_rate_mgmt; /* Rate of last transmitted management frame */
+ uint32 tx_rate_fallback; /* last used lowest fallback TX rate */
+ uint32 rx_pkts_retried; /* # rx with retry bit set */
+ uint32 tx_pkts_total; /* total num of tx pkts */
+ uint32 tx_pkts_retries; /* total num of tx retries */
+ uint32 tx_pkts_fw_total; /* total num of tx pkts generated from fw */
+ uint32 tx_pkts_fw_retries; /* num of fw generated tx pkts retried */
+ uint32 tx_pkts_fw_retry_exhausted; /* num of fw generated tx pkts where retry exhausted */
+} wlc_scb_stats_v1_t;
+
+/* ecounters for scb stats
+ * XTLV ID: WL_IFSTATS_XTLV_SCB_ECOUNTERS
+ */
+
+#define WL_SCB_ECOUNTERS_VERSION_1 1
+#define WL_SCB_ECOUNTERS_VERSION_2 2
+
+typedef struct wl_scb_ecounters_v1 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chanspec; /* current chanspec where scb is operating */
+ struct ether_addr ea; /* peer ndi or sta ea */
+ uint8 peer_type; /* peer type */
+ uint8 pad;
+
+ /* scb tx and rx stats */
+ wlc_scb_stats_v1_t stats;
+} wl_scb_ecounters_v1_t;
+
+typedef struct wl_scb_ecounters_v2 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chanspec; /* current chanspec where scb is operating */
+ struct ether_addr ea; /* peer ndi or sta ea */
+ uint8 peer_type; /* peer type */
+ uint8 pad;
+
+ /* scb tx and rx stats */
+ uint16 tx_rate; /* Rate(in Mbps) of last successful tx frame */
+ uint16 rx_rate; /* Rate(in Mbps) of last successful rx frame */
+ uint16 tx_rate_fallback; /* last used lowest fallback TX rate(in Mbps) */
+ uint16 pad1;
+ uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */
+ uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */
+ uint32 rx_pkts_retried; /* # rx with retry bit set */
+ uint32 tx_pkts_retries; /* total num of tx retries */
+ uint32 tx_failures; /* num of packets failed */
+ uint32 tx_pkts_total; /* total num of tx pkts */
+ int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna of data frames */
+} wl_scb_ecounters_v2_t;
+
+/* ecounters for nan slot stats
+ * XTLV ID: WL_IFSTATS_XTLV_NAN_SLOT_STATS
+ */
+
+#define WL_NAN_SLOT_ECOUNTERS_VERSION_1 1
+#define WL_NAN_SLOT_ECOUNTERS_VERSION_2 2
+#define WL_NAN_SLOT_ECOUNTERS_VERSION_3 3
+
+typedef struct wl_nan_slot_ecounters_v1 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */
+ uint16 cur_slot_idx; /* cur nan slot index */
+ uint16 pad;
+ nan_sched_stats_t sched; /* sched stats */
+ wl_nan_mac_stats_t mac; /* mac stats */
+} wl_nan_slot_ecounters_v1_t;
+
+typedef struct wl_nan_slot_ecounters_v2 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */
+ uint16 cur_slot_idx; /* cur nan slot index */
+ uint16 pad;
+ nan_sched_stats_t sched; /* sched stats */
+ wl_nan_mac_stats_t mac; /* mac stats */
+ /* for v2 */
+ uint16 bcn_rx_drop_rssi; /* Beacon received but ignored due to weak rssi */
+ uint16 bcn_rx_drop_rssi_5g; /* 5G Beacon received but ignored due to weak rssi */
+ uint16 cnt_rssi_close; /* cnt of beacon rssi > rssi_close received */
+ uint16 cnt_rssi_close_5g; /* cnt of 5G beacon rssi > rssi_close received */
+ uint16 cnt_rssi_mid; /* cnt of beacon rssi > rssi_middle received */
+ uint16 cnt_rssi_mid_5g; /* cnt of 5G beacon rssi > rssi_middle received */
+ uint16 bcn_txfail; /* Beacon sending failure count */
+ uint16 bcn_txfail_5g; /* sending 5G beacon failure count */
+} wl_nan_slot_ecounters_v2_t;
+
+typedef struct wl_nan_slot_ecounters_v3 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */
+ uint16 cur_slot_idx; /* cur nan slot index */
+ uint16 pad;
+ nan_sched_stats_t sched; /* sched stats */
+ /* for v3 */
+ wl_nan_mac_stats_v1_t mac; /* mac stats */
+ uint16 bcn_rx_drop_rssi; /* Beacon received but ignored due to weak rssi */
+ uint16 bcn_rx_drop_rssi_5g; /* 5G Beacon received but ignored due to weak rssi */
+ uint16 cnt_rssi_close; /* cnt of beacon rssi > rssi_close received */
+ uint16 cnt_rssi_close_5g; /* cnt of 5G beacon rssi > rssi_close received */
+ uint16 cnt_rssi_mid; /* cnt of beacon rssi > rssi_middle received */
+ uint16 cnt_rssi_mid_5g; /* cnt of 5G beacon rssi > rssi_middle received */
+ uint16 bcn_txfail; /* Beacon sending failure count */
+ uint16 bcn_txfail_5g; /* sending 5G beacon failure count */
+} wl_nan_slot_ecounters_v3_t;
+
+/* WL_STATS_XTLV_NDP_SESSION_STATUS for ecounters */
+#define WL_NAN_SESSION_STATUS_EC_VERSION_1 1
+typedef struct wl_nan_ndp_session_status_v1_s {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint8 role; /* Role of NAN device */
+ uint8 ndp_id; /* local NDP ID */
+ uint8 state; /* NDP state */
+ uint8 nan_sec_csid; /* security csid */
+ struct ether_addr lndi_addr; /* Local NDI addr */
+ struct ether_addr pnmi_addr; /* Peer NMI addr */
+ struct ether_addr pndi_addr; /* Peer NDI addr */
+ uint8 dpe_state; /* DPE state to know where timeout/dpend has come */
+ uint8 pad;
+} wl_nan_ndp_session_status_v1_t;
+
+/* WL_STATS_XTLV_NAN_DISC_FRM_STATUS for ecounters */
+#define WL_NAN_DISC_FRM_STATUS_EC_VERSION_1 1
+typedef struct wl_nan_disc_frame_status_v1_s {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint8 type; /* wl_nan_frame_type_t */
+ uint8 status; /* For TX status, success or failure */
+ uint8 reason_code; /* to identify reason when status is failure */
+ uint8 inst_id; /* Publish or subscribe instance id */
+ uint8 req_id; /* Requestor instance id */
+ uint8 pad;
+ uint16 token; /* seq num to keep track of pkts sent by host */
+} wl_nan_disc_frame_status_v1_t;
+
+typedef struct wl_nan_oob_af {
+ uint64 bitmap; /* 16 TU slots in 1024 TU window */
+ struct ether_addr sa; /* Optional SA. Default set to NMI */
+ struct ether_addr da;
+ struct ether_addr bssid;
+ bool secured; /* Optional. Default set to 0 (Open) */
+ uint8 map_id; /* Host selected map id. Default 0 */
+ uint16 timeout; /* OOB AF session timeout in milliseconds */
+ uint16 pad[3]; /* Structure padding. Can be used in future */
+ uint16 token; /* host generated. Used by FW in TX status event */
+ uint16 payload_len;
+ uint8 payload[]; /* AF hdr + NAN attrbutes in TLV format */
+} wl_nan_oob_af_t;
+
+/*
+ * BT log definitions
+ */
+
+/* common iovar struct */
+typedef struct wl_btl {
+ uint16 subcmd_id; /* subcommand id */
+ uint16 len; /* total length of data[] */
+ uint8 data[2]; /* subcommand data, variable length */
+} wl_btl_t;
+
+/* subcommand ids */
+#define WL_BTL_SUBCMD_ENABLE 0 /* enable/disable logging */
+#define WL_BTL_SUBCMD_STATS 1 /* statistics */
+
+/* WL_BTL_SUBCMD_ENABLE data */
+typedef struct wl_blt_enable {
+ uint8 enable; /* 1 - enable, 0 - disable */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} wl_btl_enable_t;
+
+/* WL_BTL_SUBCMD_STATS data */
+typedef struct wl_blt_stats {
+ uint32 bt_interrupt; /* num BT interrupts */
+ uint32 config_req; /* num CONFIG_REQ */
+ uint32 config_res_success; /* num CONFIG_RES successful */
+ uint32 config_res_fail; /* num CONFIG_RES failed */
+ uint32 log_req; /* num LOG_REQ */
+ uint32 log_res_success; /* num LOG_RES successful */
+ uint32 log_res_fail; /* num LOG_RES failed */
+ uint32 indirect_read_fail; /* num indirect read fail */
+ uint32 indirect_write_fail; /* num indirect write fail */
+ uint32 dma_fail; /* num DMA failed */
+ uint32 min_log_req_duration; /* min log request duration in usec */
+ uint32 max_log_req_duration; /* max log request duration in usec */
+ uint16 mem_dump_req; /* num mem dump requests */
+ uint16 mem_dump_success; /* num mem dumps successful */
+ uint16 mem_dump_fail; /* num mem dumps failed */
+ uint16 bt_wake_success; /* num BT wakes successful */
+ uint16 bt_wake_fail; /* num BT wakes failed */
+ uint16 mem_dump_req_interrupt; /* num MEM_DUMP_REQ interrupt */
+ uint16 mem_dump_res_interrupt; /* num MEM_DUMP_RES interrupt */
+ uint16 mem_dump_res_timeout; /* num MEM_DUMP_RES timeout */
+ uint16 mem_dump_proc_no_bt_ready; /* num proceed if no BT ready */
+ uint16 mem_dump_proc_no_bt_response; /* num proceed if no BT response */
+ uint16 mem_dump_proc_no_bt_clock; /* num proceed if no BT clock */
+ uint16 pad; /* alignment */
+ uint32 last_failed_region; /* start addr of last failed region */
+ uint32 min_mem_dump_duration; /* min mem dump duration in usec */
+ uint32 max_mem_dump_duration; /* max mem dump duration in usec */
+} wl_btl_stats_t;
+
+/* IOV AWD DATA */
+
+/* AWD DATA structures */
+typedef struct {
+ uint8 version; /* Extended trap version info */
+ uint8 reserved; /* currently unused */
+ uint16 length; /* Length of data excluding this header */
+ uint8 data[]; /* this data is TLV of tags */
+} awd_data_v1_t;
+
+/* AWD TAG structure */
+typedef struct {
+ uint8 tagid; /* one of AWD DATA TAGs numbers */
+ uint8 length; /* the data size represented by this field must be aligned to 32 bits */
+ uint8 data[]; /* variable size, defined by length field */
+} awd_tag_data_v1_t;
+
+/* IOV ETD DATA */
+
+/* ETD DATA structures */
+typedef struct {
+ uint8 version; /* Extended trap version info */
+ uint8 reserved; /* currently unused */
+ uint16 length; /* Length of data excluding this header */
+ uint8 data[]; /* this data is TLV of tags */
+} etd_data_v1_t;
+
+/* ETD TAG structure */
+typedef struct {
+ uint8 tagid; /* one of ETD DATA TAGs numbers */
+ uint8 length; /* the data size represented by this field must be aligned to 32 bits */
+ uint8 data[]; /* variable size, defined by length field */
+} etd_tag_data_v1_t;
+
+/* ETD information structures associated with ETD_DATA_Tags */
+/* ETD_JOIN_CLASSIFICATION_INFO 10 */
+typedef struct {
+ uint8 assoc_type; /* assoc type */
+ uint8 assoc_state; /* current state of assoc state machine */
+ uint8 wpa_state; /* wpa->state */
+ uint8 wsec_portopen; /* shows if security port is open */
+ uint8 total_attempts_num; /* total number of join attempts (bss_retries) */
+ uint8 num_of_targets; /* up to 3, in current design */
+ uint8 reserved [2]; /* padding to get 32 bits alignment */
+ uint32 wsec; /* bsscfg->wsec */
+ uint32 wpa_auth; /* bsscfg->WPA_auth */
+ uint32 time_to_join; /* time duration to process WLC_SET_SSID request (ms) */
+} join_classification_info_v1_t;
+
+/* ETD_JOIN_TARGET_CLASSIFICATION_INFO 11 */
+typedef struct {
+ int8 rssi; /* RSSI on current channel */
+ uint8 cca; /* CCA on current channel */
+ uint8 channel; /* current channel */
+ uint8 num_of_attempts; /* (bss_retries) up to 5 */
+ uint8 oui[3]; /* the first three octets of the AP's address */
+ uint8 reserved; /* padding to get 32 bits alignment */
+ uint32 time_duration; /* time duration of current attempt (ms) */
+} join_target_classification_info_v1_t;
+
+/* ETD_ASSOC_STATE 12 */
+typedef struct {
+ uint8 assoc_state; /* assoc type */
+ uint8 reserved [3]; /* padding to get 32 bits alignment */
+} join_assoc_state_v1_t;
+
+/* ETD_CHANNEL 13 tag */
+typedef struct {
+ uint8 channel; /* last attempt channel */
+ uint8 reserved [3]; /* padding to get 32 bits alignment */
+} join_channel_v1_t;
+
+/* ETD_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 */
+typedef struct {
+ uint8 total_attempts_num; /* total number of join attempts (bss_retries) */
+ uint8 reserved [3]; /* padding to get 32 bits alignment */
+} join_total_attempts_num_v1_t;
+
+/* IOV_ROAM_CACHE structures */
+
+enum wl_rmc_report_cmd_id {
+ WL_RMC_RPT_CMD_VER = 0,
+ WL_RMC_RPT_CMD_DATA = 1,
+ WL_RMC_RPT_CMD_LAST
+};
+
+enum wl_rmc_report_xtlv_id {
+ WL_RMC_RPT_XTLV_VER = 0x0,
+ WL_RMC_RPT_XTLV_BSS_INFO = 0x1,
+ WL_RMC_RPT_XTLV_CANDIDATE_INFO = 0x2,
+ WL_RMC_RPT_XTLV_USER_CACHE_INFO = 0x3
+};
+
+/* WL_RMC_RPT_XTLV_BSS_INFO */
+typedef struct {
+ int16 rssi; /* current BSS RSSI */
+ uint8 reason; /* reason code for last full scan */
+ uint8 status; /* last status code for not roaming */
+ uint32 fullscan_count; /* number of full scans performed on current BSS */
+ uint32 time_full_scan; /* delta time (in ms) between cur time and full scan timestamp */
+} rmc_bss_info_v1_t;
+
+/* WL_RMC_RPT_XTLV_CANDIDATE_INFO */
+typedef struct {
+ int16 rssi; /* last seen rssi */
+ uint16 ctl_channel; /* channel */
+ uint32 time_last_seen; /* delta time (in ms) between cur time and last seen timestamp */
+ uint16 bss_load; /* BSS load */
+ uint8 bssid [6]; /* padding to get 32 bits alignment */
+} rmc_candidate_info_v1_t;
+
+#define WL_FILTER_IE_VERSION 1 /* deprecated */
+enum wl_filter_ie_options {
+ WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */
+ WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */
+ WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */
+ WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */
+ WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */
+};
+
+typedef struct wl_filter_ie_tlv {
+ uint16 id; /* elelment id [ + ext id ] */
+ uint16 len; /* sub option length + pattern length */
+ uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */
+} wl_filter_ie_tlv_t;
+
+#define WL_FILTER_IE_VERSION_1 1 /* the latest version */
+typedef struct wl_filter_ie_iov_v1 {
+ uint16 version; /* Structure version */
+ uint16 len; /* Total length of the structure */
+ uint16 fixed_length; /* Total length of fixed fields */
+ uint8 option; /* Filter action - check for suboption */
+ uint8 pad[1]; /* Align to 4 bytes */
+ uint32 pktflag; /* frame type - FC_XXXX */
+ uint8 tlvs[]; /* variable data (zero in for list ,clearall) */
+} wl_filter_ie_iov_v1_t;
+
+/* Event aggregation config */
+#define EVENT_AGGR_CFG_VERSION 1
+#define EVENT_AGGR_DISABLED 0x0
+#define EVENT_AGGR_ENABLED 0x1
+
+#define EVENT_AGGR_BUFSIZE_MAX 1512
+#define EVENT_AGGR_BUFSIZE_MIN 512
+
+#define EVENT_AGGR_FLUSH_TIMEOUT_DEFAULT 100
+#define EVENT_AGGR_FLUSH_TIMEOUT_MAX 2000
+#define EVENT_AGGR_NUM_EVENTS_FLUSH 5
+typedef struct event_aggr_config {
+ uint16 version;
+ uint16 len;
+ uint16 flags; /* bit 0 to enable/disable the feature */
+ uint16 bufsize; /* Aggregate buffer size */
+ uint16 flush_timeout; /* Timeout for event flush */
+ uint16 num_events_flush; /* Number of events aggregated before flush */
+} event_aggr_config_t;
+
+#ifndef WL_TDMTX_TYPEDEF_HAS_ALIAS
+typedef tdmtx_cnt_v1_t tdmtx_cnt_t;
+typedef tdmtx_cnt_shm_v1_t tdmtx_cnt_shm_t;
+typedef wl_tdmtx_ecounters_v1_t wl_tdmtx_ecounters_t;
+#define WL_CNT_TDMTX_STRUCT_SZ (sizeof(tdmtx_cnt_t))
+#define WL_CNT_TDMTX_SHM_SZ (sizeof(tdmtx_cnt_shm_t))
+#endif
+
+/** chanctxt related statistics */
+#define CHANCTXT_STATS_VERSION_1 1
+#define CHANCTXT_STATS_CURRENT_VERSION CHANCTXT_STATS_VERSION_1
+typedef struct wlc_chanctxt_stats {
+ uint32 excursionq_end_miss;
+ uint32 activeq_end_miss;
+ uint32 no_chanctxt_count;
+ uint32 txqueue_end_incomplete;
+ uint32 txqueue_start_incomplete;
+} wlc_chanctxt_stats_core_t;
+
+typedef struct chanctxt_stats {
+ uint16 version;
+ uint16 length;
+ wlc_chanctxt_stats_core_t corestats[MAX_NUM_D11CORES];
+} wlc_chanctxt_stats_t;
+
+typedef struct wl_txdc_ioc {
+ uint8 ver;
+ uint8 id; /* ID of the sub-command */
+ uint16 len; /* total length of all data[] */
+ uint8 data[]; /* var len payload */
+} wl_txdc_ioc_t;
+
+/*
+ * iovar subcommand ids
+ */
+enum {
+ IOV_TXDC_ENB = 1,
+ IOV_TXDC_MODE = 2,
+ IOV_TXDC_DUMP = 3,
+ IOV_TXDC_LAST
+};
+
+/* WL_NAN_XTLV_SLOT_STATS */
+/* WL_NAN_EVENT_SLOT_START, WL_NAN_EVENT_SLOT_END */
+typedef struct nan_slot_event_data {
+ uint32 cur_slot_idx; /* current idx in channel schedule */
+ uint32 fw_time; /* target current time in microseconds */
+ uint32 band; /* current band (2G/5G) for which the event is received */
+} nan_slot_event_data_t;
+
+#ifndef BCMUTILS_ERR_CODES
+
+/* SAE (Simultaneous Authentication of Equals) error codes.
+ * These error codes are local.
+ */
+
+/* SAE status codes are reserved from -3072 to -4095 (1K) */
+
+enum wl_sae_status {
+ WL_SAE_E_AUTH_FAILURE = -3072,
+ /* Discard silently */
+ WL_SAE_E_AUTH_DISCARD = -3073,
+ /* Authentication in progress */
+ WL_SAE_E_AUTH_CONTINUE = -3074,
+ /* Invalid scalar/elt */
+ WL_SAE_E_AUTH_COMMIT_INVALID = -3075,
+ /* Invalid confirm token */
+ WL_SAE_E_AUTH_CONFIRM_INVALID = -3076,
+ /* Peer scalar validation failure */
+ WL_SAE_E_CRYPTO_SCALAR_VALIDATION = -3077,
+ /* Peer element prime validation failure */
+ WL_SAE_E_CRYPTO_ELE_PRIME_VALIDATION = -3078,
+ /* Peer element is not on the curve */
+ WL_SAE_E_CRYPTO_ELE_NOT_ON_CURVE = -3079,
+ /* Generic EC error (eliptic curve related) */
+ WL_SAE_E_CRYPTO_EC_ERROR = -3080,
+ /* Both local and peer mac addrs are same */
+ WL_SAE_E_CRYPTO_EQUAL_MACADDRS = -3081,
+ /* Loop exceeded in deriving the scalar */
+ WL_SAE_E_CRYPTO_SCALAR_ITER_EXCEEDED = -3082,
+ /* ECC group is unsupported */
+ WL_SAE_E_CRYPTO_UNSUPPORTED_GROUP = -3083,
+ /* Exceeded the hunting-and-pecking counter */
+ WL_SAE_E_CRYPTO_PWE_COUNTER_EXCEEDED = -3084,
+ /* SAE crypto component is not initialized */
+ WL_SAE_E_CRYPTO_NOT_INITED = -3085,
+ /* bn_get has failed */
+ WL_SAE_E_CRYPTO_BN_GET_ERROR = -3086,
+ /* bn_set has failed */
+ WL_SAE_E_CRYPTO_BN_SET_ERROR = -3087,
+ /* PMK is not computed yet */
+ WL_SAE_E_CRYPTO_PMK_UNAVAILABLE = -3088,
+ /* Peer confirm did not match */
+ WL_SAE_E_CRYPTO_CONFIRM_MISMATCH = -3089,
+ /* Element K is at infinity no the curve */
+ WL_SAE_E_CRYPTO_KEY_AT_INFINITY = -3090,
+ /* SAE Crypto private data magic number mismatch */
+ WL_SAE_E_CRYPTO_PRIV_MAGIC_MISMATCH = -3091,
+ /* Max retry exhausted */
+ WL_SAE_E_MAX_RETRY_LIMIT_REACHED = -3092
+};
+
+/* PMK manager block. Event codes from -5120 to -6143 */
+
+/* PSK hashing event codes */
+typedef enum wlc_pmk_psk_hash_status {
+ WL_PMK_E_PSK_HASH_FAILED = -5120,
+ WL_PMK_E_PSK_HASH_DONE = -5121,
+ WL_PMK_E_PSK_HASH_RUNNING = -5122,
+ WL_PMK_E_PSK_INVALID = -5123,
+ WL_PMK_E_PSK_NOMEM = -5124
+} wlc_pmk_psk_hash_status_t;
+
+#endif /* BCMUTILS_ERR_CODES */
+
+/* Block Channel */
+#define WL_BLOCK_CHANNEL_VER_1 1u
+
+typedef struct wl_block_ch_v1 {
+ uint16 version;
+ uint16 len;
+ uint32 band; /* Band select */
+ uint8 channel_num; /* The number of block channels in the selected band */
+ uint8 padding[3];
+ uint8 channel[]; /* Channel to block, Variable Length */
+} wl_block_ch_v1_t;
+
+typedef struct dma_wl_addr_region {
+ uint32 addr_low;
+ uint32 addr_high;
+} dma_wl_addr_region_t;
+
+#define WL_ROAMSTATS_IOV_VERSION 1
+
+#define MAX_PREV_ROAM_EVENTS 16u
+
+#define ROAMSTATS_UNKNOWN_CNT 0xFFFFu
+
+/* roaming statistics counter structures */
+typedef struct wlc_assoc_roamstats_event_msg_v1 {
+ uint32 event_type; /* Message (see below) */
+ uint32 status; /* Status code (see below) */
+ uint32 reason; /* Reason code (if applicable) */
+ uint32 timestamp; /* Timestamp of event */
+} wlc_assoc_roamstats_event_msg_v1_t;
+
+enum wl_roamstats_cmd_id {
+ WL_ROAMSTATS_XTLV_CMD_VER = 0,
+ WL_ROAMSTATS_XTLV_CMD_RESET = 1,
+ WL_ROAMSTATS_XTLV_CMD_STATUS = 2,
+ WL_ROAMSTATS_XTLV_CMD_LAST /* Keep this at the end */
+};
+
+enum wl_roamstats_xtlv_id {
+ WL_ROAMSTATS_XTLV_VER = 0x0,
+ WL_ROAMSTATS_XTLV_COUNTER_INFO = 0x1,
+ WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS = 0x2,
+ WL_ROAMSTATS_XTLV_REASON_INFO = 0x3
+};
+
+/* WL_ROAMSTATS_XTLV_COUNTER_INFO */
+typedef struct {
+ uint32 initial_assoc_time;
+ uint32 prev_roam_time;
+ uint32 host_access_time;
+ uint16 roam_success_cnt;
+ uint16 roam_fail_cnt;
+ uint16 roam_attempt_cnt;
+ uint16 max_roam_target_cnt;
+ uint16 min_roam_target_cnt;
+ uint16 max_cached_ch_cnt;
+ uint16 min_cached_ch_cnt;
+ uint16 partial_roam_scan_cnt;
+ uint16 full_roam_scan_cnt;
+} roamstats_counter_info_v1_t;
+
+/* WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS */
+typedef struct {
+ uint16 max;
+ uint16 pos;
+ wlc_assoc_roamstats_event_msg_v1_t roam_event[];
+} roamstats_prev_roam_events_v1_t;
+
+/* WL_ROAMSTATS_XTLV_REASON_INFO */
+typedef struct {
+ uint16 max;
+ uint16 reason_cnt[];
+} roamstats_reason_info_v1_t;
+
+#ifdef HEALTH_CHECK_WLIOCTL
+/* Health check status format:
+ * reporting status size = uint32
+ * 8 LSB bits are reserved for: WARN (0), ERROR (1), and other levels
+ * MSB 24 bits are reserved for client to fill in its specific status
+ */
+#define HEALTH_CHECK_STATUS_OK 0
+/* Bit positions. */
+#define HEALTH_CHECK_STATUS_WARN 0x1
+#define HEALTH_CHECK_STATUS_ERROR 0x2
+#define HEALTH_CHECK_STATUS_TRAP 0x4
+#define HEALTH_CHECK_STATUS_NOEVENT 0x8
+
+/* Indication that required information is populated in log buffers */
+#define HEALTH_CHECK_STATUS_INFO_LOG_BUF 0x80
+#define HEALTH_CHECK_STATUS_MASK (0xFF)
+
+#define HEALTH_CHECK_STATUS_MSB_SHIFT 8
+#endif /* HEALTH_CHECK_WLIOCTL */
+
+/** receive signal reporting module interface */
+
+#define WL_RXSIG_IOV_MAJOR_VER (1u)
+#define WL_RXSIG_IOV_MINOR_VER (1u)
+#define WL_RXSIG_IOV_MAJOR_VER_SHIFT (8u)
+#define WL_RXSIG_IOV_VERSION \
+ ((WL_RXSIG_IOV_MAJOR_VER << WL_RXSIG_IOV_MAJOR_VER_SHIFT) | WL_RXSIG_IOV_MINOR_VER)
+#define WL_RXSIG_IOV_GET_MAJOR(x) (x >> WL_RXSIG_IOV_MAJOR_VER_SHIFT)
+#define WL_RXSIG_IOV_GET_MINOR(x) (x & 0xFF)
+
+enum wl_rxsig_cmd_rssi_mode {
+ WL_RXSIG_MODE_DB = 0x0,
+ WL_RXSIG_MODE_QDB = 0x1,
+ WL_RXSIG_MODE_LAST
+};
+
+/* structure defs for 'wl rxsig [cmd]' iovars */
+enum wl_rxsig_iov_v1 {
+ WL_RXSIG_CMD_RSSI = 0x1, /**< combined rssi moving avg */
+ WL_RXSIG_CMD_SNR = 0x2, /**< combined snr moving avg */
+ WL_RXSIG_CMD_RSSIANT = 0x3, /**< rssi moving avg per-ant */
+ WL_RXSIG_CMD_SNRANT = 0x4, /**< snr moving avg per-snr */
+ WL_RXSIG_CMD_SMPLWIN = 0x5, /**< config for sampling window size */
+ WL_RXSIG_CMD_SMPLGRP = 0x7, /**< config for grouping of pkt type */
+ WL_RXSIG_CMD_STA_MA = 0x8,
+ WL_RXSIG_CMD_MAMODE = 0x9,
+ WL_RXSIG_CMD_MADIV = 0xa,
+ WL_RXSIG_CMD_DUMP = 0xb,
+ WL_RXSIG_CMD_DUMPWIN = 0xc,
+ WL_RXSIG_CMD_TOTAL
+};
+
+struct wl_rxsig_cfg_v1 {
+ uint16 version;
+ chanspec_t chan; /**< chanspec info for querying stats */
+ uint8 pmac[ETHER_ADDR_LEN]; /**< peer(link) mac address */
+};
+
+struct wl_rxsig_iov_rssi_v1 {
+ int8 rssi;
+ uint8 rssi_qdb;
+ uint8 pad[2];
+};
+
+struct wl_rxsig_iov_snr_v1 {
+ int16 snr;
+ uint16 pad;
+};
+
+struct wl_rxsig_iov_rssi_ant_v1 {
+ int8 deci[WL_RSSI_ANT_MAX];
+ uint8 frac[WL_RSSI_ANT_MAX];
+ uint8 rssi_mode; /**< MODE_DB or MODE_QDB */
+ uint8 num_of_ant; /**< total number of ants */
+ uint8 pad[2]; /**< padding for 32bit align */
+};
+
+#ifdef BCM_SDC
+
+#define SDC_TRIGGER_CONFIG_VER_1 1
+typedef struct {
+ uint16 version;
+ uint16 type;
+ uint8 activate;
+ uint8 pad;
+} sdc_trigger_cfg_t;
+
+typedef enum sdc_trigger_types {
+ SDC_TYPE_STA_ONBOARD_DEBUG = 1,
+ SDC_TYPE_SCAN_DEBUG = 2,
+#ifdef SDC_TEST
+ /*
+ * This is for test purpose only. Don't assign specific value.
+ * Keep at the end
+ */
+ SDC_TYPE_TEST1,
+ SDC_TYPE_TEST2,
+ SDC_TYPE_TEST3,
+#endif /* SDC_TEST */
+ SDC_TYPE_MAX_TRIGGER
+} sdc_trigger_types_t;
+
+/* *** SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */
+
+/* tlv IDs uniquely identifies tx and rx stats component */
+enum wl_slice_hist_stats_xtlv_id {
+ WL_STATE_HIST_TX_TOSS_REASONS = 0x1,
+ WL_STATE_HIST_RX_TOSS_REASONS = 0x2
+};
+
+#ifndef WLC_HIST_TOSS_LEN
+#define WLC_HIST_TOSS_LEN (8u)
+#endif
+#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_1 (1u)
+#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_1 (1u)
+
+/* Format of running toss reasons with seq
+ * [see HIST_TOSS_xxxx macros]
+ * bits [7..0] : 8 bits : toss sts.
+ * [11..8] : cfgidx
+ * [15..12]: ac
+ * [31..16]: seq
+ */
+#define HIST_TOSS_STS_POS (0u)
+#define HIST_TOSS_STS_MASK (0x000000ffu)
+#define HIST_TOSS_CFGIDX_POS (8u)
+#define HIST_TOSS_CFGIDX_MASK (0x00000f00u)
+#define HIST_TOSS_AC_POS (12u)
+#define HIST_TOSS_AC_MASK (0x0000f000u)
+#define HIST_TOSS_SEQ_POS (16u)
+#define HIST_TOSS_SEQ_MASK (0xffff0000u)
+
+/* Format of toss reasons with count
+ * bits [15..0] : 16 bits : toss reason
+ * bits [31..16]: 16 bits : count
+ */
+#define HIST_TOSS_RC_REASON_POS (0u)
+#define HIST_TOSS_RC_REASON_MASK (0xffffu)
+#define HIST_TOSS_RC_COUNT_POS (16u)
+#define HIST_TOSS_RC_COUNT_MASK (0xffff0000u)
+
+typedef struct {
+ uint16 version;
+ uint8 hist_toss_type; /* from wl_slice_hist_XX_stats_xtlv_id */
+ uint8 hist_toss_num; /* number of elements in hist_toss_xxx */
+ uint32 hist_toss_cur_idx; /* latest data is in this index */
+ uint32 hist_toss_reasons[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as
+ * per HIST_TOSS_xxx format
+ */
+ uint32 hist_toss_counts[WLC_HIST_TOSS_LEN]; /* toss counts corr to reasons */
+} wl_hist_compact_toss_stats_v1_t;
+
+#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_2 (2u)
+#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_2 (2u)
+
+typedef struct {
+ uint16 version;
+ uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */
+ uint8 htr_num; /* number of elements in htr_running or htr_rc */
+ uint16 htr_rnidx; /* htr_running[rnidx-1] has latest data */
+ uint16 htr_rcidx; /* htr_rc[rcidx-1] has latest data */
+ uint32 htr_running[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as
+ * per WLC_SDC_COMPACT_TOSS_REASON() format
+ */
+ uint32 htr_rn_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_running data */
+ uint32 htr_rc[WLC_HIST_TOSS_LEN]; /* last 8 toss reasons and counts in
+ * WLC_SDC_COMPACT_TOSS_RC() format
+ */
+ uint32 htr_rc_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_rc */
+} wl_hist_compact_toss_stats_v2_t;
+
+#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_3 (3u)
+#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_3 (3u)
+
+typedef struct {
+ uint8 toss_reason;
+ uint8 cfg_ac;
+ uint16 toss_seq;
+} toss_info_t;
+
+typedef struct {
+ uint16 toss_reason;
+ uint16 toss_cnt;
+} toss_cnt_t;
+
+typedef struct {
+ uint16 version;
+ uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */
+ uint8 htr_num; /* number of elements in htr_running or htr_rc */
+ uint16 htr_rnidx; /* htr_running[rnidx-1] has latest data */
+ uint16 htr_rcidx; /* htr_rc[rcidx-1] has latest data */
+ toss_info_t htr_running[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as
+ * per WLC_SDC_COMPACT_TOSS_REASON() format
+ */
+ uint32 htr_rn_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_running data */
+ toss_cnt_t htr_rc[WLC_HIST_TOSS_LEN]; /* last 8 toss reasons and counts in
+ * WLC_SDC_COMPACT_TOSS_RC() format
+ */
+ uint32 htr_rc_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_rc */
+} wl_hist_compact_toss_stats_v3_t;
+
+/* ***END of SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */
+
+#endif /* BCM_SDC */
+
+typedef struct wl_avs_info_v1 {
+ uint16 version; /* Structure version */
+ uint16 equ_version; /* Equation Version */
+ uint32 RO; /* RO in OTP */
+ uint32 equ_csr; /* Equated CSR */
+ uint32 read_csr; /* Read Back CSR */
+ uint32 aging; /* aging setting in nvram */
+} wl_avs_info_v1_t;
+
+#define WL_AVS_INFO_VER_1 1
+
+/* bitmap for clm_flags iovar */
+#define WL_CLM_TXBF 0x01u /**< Flag for Tx beam forming */
+#define WL_CLM_RED_EU 0x02u /* Flag for EU RED */
+#define WL_CLM_EDCRS_EU 0x04u /**< Use EU post-2015 energy detect */
+#define WL_CLM_DFS_TPC 0x08u /**< Flag for DFS TPC */
+#define WL_CLM_RADAR_TYPE_EU 0x10u /**< Flag for EU */
+#define WL_CLM_DSA 0x20u /**< Flag for DSA */
+#define WL_CLM_PER_ANTENNA 0x40u /**< Flag for PER_ANTENNA */
+#define WL_CLM_LO_GAIN_NBCAL 0x20u /**< Flag for LO_GAIN_NBCAL */
+#define WL_CLM_PSD 0x80u /**< Flag for PSD */
+#define WL_CLM_HE 0x100u /**< Flag for HE */
+#define WL_CLM_NO_80MHZ 0x200u /**< Flag for NO_80MHZ */
+#define WL_CLM_NO_40MHZ 0x400u /**< Flag for NO_40MHZ */
+#define WL_CLM_NO_MIMO 0x800u /**< Flag for NO_MIMO */
+#define WL_CLM_HAS_DSSS_EIRP 0x1000u /**< Flag for HAS_DSSS_EIRP */
+#define WL_CLM_HAS_OFDM_EIRP 0x2000u /**< Flag for HAS_OFDM_EIRP */
+#define WL_CLM_NO_160MHZ 0x4000u /**< Flag for NO_160MHZ */
+#define WL_CLM_NO_80_80MHZ 0x8000u /**< Flag for NO_80_80MHZ */
+#define WL_CLM_NO_240MHZ 0x10000u /**< Flag for NO_240MHZ */
+#define WL_CLM_NO_320MHZ 0x200000u /**< Flag for NO_320MHZ */
+#define WL_CLM_NO_160_160MHZ 0x400000u /**< Flag for NO_160_160MHZ */
+#define WL_CLM_DFS_FCC WL_CLM_DFS_TPC /**< Flag for DFS FCC */
+#define WL_CLM_DFS_EU (WL_CLM_DFS_TPC | WL_CLM_RADAR_TYPE_EU) /**< Flag for DFS EU */
+
+/* SC (scan core) command IDs */
+enum wl_sc_cmd {
+ WL_SC_CMD_DBG = 0,
+ WL_SC_CMD_CNX = 1,
+ WL_SC_CMD_CAP = 2,
+ WL_SC_CMD_CONFIG = 3,
+ WL_SC_CMD_PMALERT_ADJ_FACTOR = 4,
+ WL_SC_CMD_LAST
+};
+
+/* WBUS sub-command IDs for unit test */
+#define WL_WBUS_INA_SLOT_START 0x01u /**< Inactive slot start sub command ID. */
+#define WL_WBUS_INA_SLOT_STOP 0x02u /**< Inactive slot stop sub command ID. */
+
+/* WBUS (WiFi BT uniform scheduler) command IDs */
+enum wl_wbus_cmd {
+ WL_WBUS_CMD_VER = 0,
+ WL_WBUS_CMD_STATS = 1,
+ WL_WBUS_CMD_UNIT_TEST = 2,
+ WL_WBUS_CMD_BT_TEST = 3,
+ WL_WBUS_CMD_CAP = 4,
+ WL_WBUS_CMD_LAST
+};
+
+#define WBUS_BT_SCHED_TEST_PARAMS_VER_1 1
+
+typedef struct wbus_bt_sched_test_params_v1 {
+ uint16 version;
+ uint8 min_duty_cycle;
+ uint8 type;
+ uint32 flags;
+ uint32 action;
+ uint32 duration;
+ uint32 interval;
+} wbus_bt_sched_test_params_v1_t;
+
+enum wl_wbus_bt_test_type {
+ WBUS_TEST_BT_USER_TYPE_LE_SCAN = 0u,
+ WBUS_TEST_BT_USER_TYPE_PAGE_SCAN = 1u,
+ WBUS_TEST_BT_USER_TYPE_MAX = 2u
+};
+
+#define WBUS_BT_SCHED_ADD 0u
+#define WBUS_BT_SCHED_REMOVE 1u
+#define WBUS_BT_SCHED_INVALID 0xFFu
+
+enum wlc_btcec_iocv_subcmds {
+ WL_BTCEC_SUBCMD_TEST = 0,
+ WL_BTCEC_SUBCMD_STATS = 1,
+ WL_BTCEC_SUBCMD_TEST_BTMC_MODE = 2,
+ WL_BTCEC_SUBCMD_LAST
+};
+
+/* btcec - bt schedule id's */
+typedef enum {
+ BTCEC_SCHED_ID_LE_SCAN = 1,
+ BTCEC_SCHED_ID_PAGE_SCAN = 2
+} BTCEC_SCHED_IDS;
+/* max schedule id must be equal to last valid schedule id */
+#define BTCEC_SCHED_MAX_V1 BTCEC_SCHED_ID_PAGE_SCAN
+
+/* meanings of flags */
+/* bit0: schedule with the given sch_id is present in btcec */
+#define BTCEC_SCHSTATS_FLAG_ACTIVE (0x1)
+/* bit1: schedule with the given sch_id is successfully registered with wbus */
+#define BTCEC_SCHSTATS_FLAG_WBUS_REG_OK (0x2)
+/* bit2: schedule with the given sch_id is being scheduled in wbus */
+#define BTCEC_SCHSTATS_FLAG_WBUS_SCHEDULED (0x4)
+
+/* v1 supports pmdur only */
+#define WLC_BTCEC_STATS_V1 1u
+typedef struct wlc_btcec_stats_v1 {
+ uint16 ver;
+ uint16 len;
+ uint32 pmdur; /* duration in millisec granted for bt corr to which wlsc slept */
+} wlc_btcec_stats_v1_t;
+
+/* v2 supports 1 page scan and 1 le scan */
+#define WLC_BTCEC_STATS_V2 2u
+/* btcec per schedule stats for general reporting */
+typedef struct wlc_btcec_sch_stats_v2 {
+ uint8 sch_id; /* schedule id */
+ uint8 flags; /* flags. see BTCEC_SCHSTATS_FLAG_xxx */
+ uint8 pad[2];
+ uint32 slcnt; /* num slots granted to bt */
+ uint32 skip; /* count of btsc le/page scan, skipped */
+ uint32 btdur; /* duration in millisec granted for bt corr to which wlsc slept */
+ uint32 overlap; /* wlsc was awake and btsc le/page scan overlapped, in ms */
+ uint32 txblnk; /* wlauxtx blanked btsc le/page scan, in ms */
+} wlc_btcec_sch_stats_v2_t;
+
+/* btcec stats for general reporting */
+typedef struct wlc_btcec_stats_v2 {
+ uint16 ver;
+ uint16 len;
+ uint32 rx_msg_cnt; /* received messages counter */
+ uint32 tx_msg_cnt; /* transmitted messages counter */
+ uint32 add_msg_cnt; /* rx add messages counter */
+ uint32 del_msg_cnt; /* rx del messages counter */
+ uint32 stats_dur; /* dur in sec for which stats is accumulated */
+ uint8 pad[3];
+ uint8 num_sstats; /* number of elements in sstats struct */
+ wlc_btcec_sch_stats_v2_t sstats[]; /* sch specific stats */
+} wlc_btcec_stats_v2_t;
+
+/* v3 = v2 + ac_overlap */
+#define WLC_BTCEC_STATS_V3 3u
+/* btcec per schedule stats for general reporting */
+typedef struct wlc_btcec_sch_stats_v3 {
+ uint8 sch_id; /* schedule id */
+ uint8 flags; /* flags. see BTCEC_SCHSTATS_FLAG_xxx */
+ uint8 pad[2];
+ uint32 slcnt; /* num slots granted to bt */
+ uint32 skip; /* count of btsc le/page scan, skipped */
+ uint32 btdur; /* duration in millisec granted for bt corr to which wlsc slept */
+ uint32 overlap; /* wlsc was awake and btsc le/page scan overlapped, in ms */
+ uint32 txblnk; /* wlauxtx blanked btsc le/page scan, in ms */
+ uint32 ac_overlap; /* wlaux overlapped btsc le/page scan, in ms */
+ uint16 sched_duration; /* duration finally scheduled in msch, in ms */
+ uint16 sched_interval; /* interval finally scheduled in msch, in ms */
+ uint16 req_duration; /* duration finally scheduled in msch, in ms */
+ uint16 req_interval; /* interval finally scheduled in msch, in ms */
+ uint16 min_duty_cycle; /* interval finally scheduled in msch, in ms */
+ uint16 pad2;
+} wlc_btcec_sch_stats_v3_t;
+
+/* btcec stats for general reporting */
+typedef struct wlc_btcec_stats_v3 {
+ uint16 ver;
+ uint16 len;
+ uint32 rx_msg_cnt; /* received messages counter */
+ uint32 tx_msg_cnt; /* transmitted messages counter */
+ uint32 add_msg_cnt; /* rx add messages counter */
+ uint32 del_msg_cnt; /* rx del messages counter */
+ uint32 stats_dur; /* dur in sec for which stats is accumulated */
+ uint8 pad[3];
+ uint8 num_sstats; /* number of elements in sstats struct */
+ wlc_btcec_sch_stats_v3_t sstats[]; /* sch specific stats */
+} wlc_btcec_stats_v3_t;
+
+/* btcec periodic ecounters structs
+ * [similar to wlc_btcec_stats_vX_t, but constrained in size due to its periodicity of reporting]
+ */
+#define WLC_BTCEC_PERIODIC_CNTRS_V1 (1u)
+
+/* btcec per schedule stats for periodic ecounters reporting */
+typedef struct wlc_btcec_periodic_sch_stats_v1 {
+ uint8 sch_id; /* schedule id from BTCEC_SCHED_IDS */
+ uint8 flags; /* flags. see BTCEC_SCHSTATS_FLAG_xxx */
+ uint8 slcnt; /* num slots granted to bt */
+ uint8 skip; /* count of btsc le/page scan, skipped */
+ uint16 btdur; /* duration in millisec granted for bt corr to which wlsc slept */
+ uint16 overlap; /* wlsc was awake and btsc le/page scan overlapped, in ms */
+ uint16 txblnk; /* wlauxtx blanked btsc le/page scan, in ms */
+ uint16 ac_overlap; /* wlaux overlapped btsc le/page scan, in ms */
+ uint16 sched_duration; /* duration finally scheduled in msch, in ms */
+ uint16 sched_interval; /* interval finally scheduled in msch, in ms */
+ uint16 req_duration; /* duration finally scheduled in msch, in ms */
+ uint16 req_interval; /* interval finally scheduled in msch, in ms */
+ uint16 min_duty_cycle; /* interval finally scheduled in msch, in ms */
+ uint16 pad2;
+} wlc_btcec_periodic_sch_stats_v1_t;
+
+/* btcec stats for periodic ecounters reporting */
+typedef struct {
+ uint16 ver;
+ uint16 len;
+ uint8 rx_msg_cnt; /* received messages counter */
+ uint8 tx_msg_cnt; /* transmitted messages counter */
+ uint8 add_msg_cnt; /* rx add messages counter */
+ uint8 del_msg_cnt; /* rx del messages counter */
+ uint8 pad[3];
+ uint8 num_sstats; /* number of elements in sstats struct */
+ wlc_btcec_periodic_sch_stats_v1_t sstats[BTCEC_SCHED_MAX_V1]; /* sch specific */
+} wlc_btcec_periodic_stats_v1_t;
+
+#define WBUS_OFFLOAD_STATS_V1 1u
+#define WBUS_OFFLOAD_USER_STATS_V1 1u
+
+typedef struct wbus_offload_user_stats_v1 {
+ uint16 version; /* version of this structure */
+ uint16 len; /* size of this structure */
+ uint8 type; /* Offload type */
+ uint8 pad[3];
+ uint32 num_accepted; /* num of times user got accepted */
+ uint32 num_rejected; /* num of times user got rejected */
+ uint32 num_failed; /* num of times user accept failed */
+} wbus_offload_user_stats_v1_t;
+
+typedef struct wbus_offload_stats_v1 {
+ uint16 version; /* version of this structure */
+ uint16 len; /* size of this structure */
+ uint32 num_accept_ok; /* num accept pass */
+ uint32 num_accept_fail; /* num accept fail */
+ uint32 num_rejected; /* num of rejected users so far */
+ uint32 num_rejected_bt; /* num of rejected users so far for BT */
+ uint32 num_rejected_all; /* num times all offloads are rejected */
+ uint8 pad[3];
+ uint8 num_user; /* num of users stats */
+ wbus_offload_user_stats_v1_t user_stats[]; /* per user stats */
+} wbus_offload_stats_v1_t;
+
+#define KEY_UPDATE_INFO_VER_V1 1
+typedef struct key_update_info_v1
+{
+ uint16 ver;
+ uint8 pad;
+ uint8 flags;
+ uint32 timestamp;
+ uint32 algo;
+ uint32 key_flags;
+ struct ether_addr ea;
+ struct ether_addr sa;
+} key_update_info_v1_t;
+
+/* Key update flag bit field */
+#define KEY_UPD_FLAG_ADD_KEY 0x1 /* 0 - Removal, 1 - Add key */
+
+#ifdef WLLLW
+/* LLW Session */
+#define LLW_VERSION 1
+#define LLW_STATS_VERSION 1
+
+/* LLW roles */
+#define LLW_ROLE_SCHEDULER 0
+#define LLW_ROLE_CLIENT 1
+
+/* LLW modes */
+#define LLW_MODE_GAPS 0
+#define LLW_MODE_BACK_TO_BACK 1
+
+/* LLW session max values */
+#define LLW_MAX_SESSION_ID 10
+#define LLW_MAX_FLOW_ID 40
+#define LLW_MAX_CLIENT_NUM 15
+#define LLW_MAX_GAPS_PERIOD 20
+#define LLW_MAX_GAPS_VAR 3
+#define LLW_MAX_RETX_CNT 10
+#define LLW_MAX_AIFSN EDCF_AIFSN_MAX
+#define LLW_MAX_CWMIN EDCF_ECW_MAX
+#define LLW_MAX_CWMAX EDCF_ECW_MAX
+#define LLW_MAX_PER_NUMERATOR 100
+#define LLW_MAX_PER_DENOM 10000
+#define LLW_MAX_CLIENT_ID 15
+#define LLW_MAX_PKT_SIZE 1500
+#define LLW_MAX_PKT_NUM 10
+#define LLW_MAX_MCS 9
+#define LLW_MAX_NUM_STREAMS 8
+#define LLW_MAX_IBS 32
+
+/* Per LLW session config */
+/* WL_LLW_CMD_SESSION_CREATE, WL_LLW_CMD_SESSION_UPDATE */
+typedef struct wl_llw_session_cfg {
+ uint8 session_id;
+ uint8 role;
+ uint8 mode;
+ uint8 client_id;
+ uint8 gaps_period;
+ uint8 gaps_var;
+ uint8 aifsn;
+ uint8 ecwmin; /* exponent value for minimum contention window */
+ uint8 ecwmax; /* exponent value for maximum contention window */
+ uint8 mcs;
+ uint8 num_streams;
+ uint8 ibs; /* interblock spacing in usecs, for spacing between Transaction Blocks */
+ uint16 ul_pkt_size;
+ uint16 dl_pkt_size;
+ uint16 per_denom; /* denominator for target PER */
+ uint8 per_numerator; /* this value divided by per_denom gives the target PER */
+ uint8 dl_pkt_num;
+ uint8 client_num;
+ uint8 retx_cnt;
+ uint8 pwr_save;
+ uint8 auto_ba; /* automatic RX/TX BA session setup (no negotiation needed) */
+ uint8 if_index;
+ uint8 padding[3];
+ struct ether_addr multicast_addr;
+ struct ether_addr scheduler_addr;
+} wl_llw_session_cfg_t;
+
+/* WL_LLW_CMD_SESSION_DELETE, WL_LLW_CMD_SESSION_ENABLE, WL_LLW_CMD_SESSION_DISABLE, */
+/* WL_LLW_CMD_SESSION_GET */
+typedef struct wl_llw_session_cmd {
+ uint8 session_id;
+ uint8 padding[3];
+} wl_llw_session_cmd_t;
+
+/* LLW client config */
+/* WL_LLW_CMD_CLIENT_ADD, WL_LLW_CMD_CLIENT_DELETE, WL_LLW_CMD_CLIENT_GET */
+typedef struct wl_llw_client_cfg {
+ uint8 session_id;
+ uint8 client_id;
+ struct ether_addr mac;
+} wl_llw_client_cfg_t;
+
+/* Get list of session IDs from FW */
+/* WL_LLW_CMD_SESSION_ID */
+typedef struct llw_session_id_list {
+ uint8 id_count; /* Number of session IDs */
+ uint8 list[]; /* list of session IDs */
+} llw_session_id_list_t;
+
+/* LLW XTLV structures */
+typedef struct wl_llw_iov_cmd {
+ uint16 version;
+ uint8 cmd_cnt;
+ uint8 pad;
+ uint8 cmds[];
+} wl_llw_iov_cmd_t;
+
+typedef struct wl_llw_iov_sub_cmd {
+ uint16 type;
+ uint16 len;
+ union {
+ int32 status; /* Processed status - Set by FW */
+ uint32 options; /* Command Process Options - Set by Host */
+ } u;
+ uint8 data[];
+} wl_llw_iov_sub_cmd_t;
+
+/* to be used in type field of wl_llw_iov_sub_cmd_t structure while issuing LLW commands */
+typedef enum wl_llw_sub_cmd_xtlv_id {
+ WL_LLW_CMD_SESSION_ID,
+ WL_LLW_CMD_SESSION_CREATE,
+ WL_LLW_CMD_SESSION_DELETE,
+ WL_LLW_CMD_SESSION_UPDATE,
+ WL_LLW_CMD_SESSION_ENABLE,
+ WL_LLW_CMD_SESSION_DISABLE,
+ WL_LLW_CMD_SESSION_GET,
+ WL_LLW_CMD_CLIENT_ADD,
+ WL_LLW_CMD_CLIENT_DELETE,
+ WL_LLW_CMD_CLIENT_GET,
+ WL_LLW_CMD_FLOW_ADD,
+ WL_LLW_CMD_FLOW_DELETE,
+ WL_LLW_CMD_FLOW_GET,
+ WL_LLW_CMD_STATS
+} wl_llw_sub_cmd_xtlv_id_t;
+
+/* LLW stats */
+typedef enum wl_llw_xtlv {
+ WL_LLW_XTLV_STATS
+} wl_llw_xtlv_t;
+
+typedef struct wl_llw_stats {
+ uint32 txpackets;
+ uint32 txbytes;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txdropped;
+ uint32 tx_avg_q_time;
+ uint32 tx_min_q_time;
+ uint32 tx_max_q_time;
+ uint32 tx_avg_rem_lifetime;
+ uint32 tx_min_rem_lifetime;
+ uint32 tx_max_rem_lifetime;
+ uint32 rxpackets;
+ uint32 rxbytes;
+ uint32 rxfail;
+ uint32 rxretry;
+ uint32 txschedfrm;
+ uint32 retxschedfrm;
+} wl_llw_stats_t;
+
+typedef struct wl_llw_stats_hdr {
+ uint16 version;
+ uint16 stats_cnt;
+ uint32 tot_len;
+ uint8 stat_xtlvs[];
+} wl_llw_stats_hdr_t;
+
+/* WL_LLW_XTLV_STATS */
+typedef struct wl_llw_stats_xtlv {
+ uint16 type;
+ uint16 len;
+ uint8 stats[];
+} wl_llw_stats_xtlv_t;
+
+/* WL_LLW_CMD_STATS */
+typedef struct wl_llw_stats_cmd {
+ uint8 session_id;
+ uint8 client_id;
+ uint16 padding;
+} wl_llw_stats_cmd_t;
+
+/* LLW flow ring ID config */
+/* WL_LLW_CMD_FLOW_ADD, WL_LLW_CMD_FLOW_DELETE, WL_LLW_CMD_FLOW_GET */
+typedef struct wl_llw_flow_cfg {
+ uint8 session_id;
+ uint8 flow_id;
+ uint16 padding;
+} wl_llw_flow_cfg_t;
+#endif /* End of LLW Session */
+
+#define WL_OMI_CONFIG_VERSION_1 1u
+
+/* values for valid_bm */
+#define OMI_CONFIG_VALID_BMP_RXNSS 0x0001u
+#define OMI_CONFIG_VALID_BMP_BW 0x0002u
+#define OMI_CONFIG_VALID_BMP_ULMU_DISABLE 0x0004u
+#define OMI_CONFIG_VALID_BMP_TXNSTS 0x0008u
+#define OMI_CONFIG_VALID_BMP_ERSU_DISABLE 0x0010u
+#define OMI_CONFIG_VALID_BMP_DLMU_RSD_RCM 0x0020u
+#define OMI_CONFIG_VALID_BMP_ULMU_DATA_DISABLE 0x0040u
+#define OMI_CONFIG_VALID_BMP_ALL 0x0FFFu
+
+#define OMI_CONFIG_BW_MAX 3u
+
+typedef struct wl_omi_config {
+ uint16 valid_bm; /* validity bitmask for each config */
+ uint8 rxnss;
+ uint8 bw;
+ uint8 ulmu_disable;
+ uint8 txnsts;
+ uint8 ersu_disable;
+ uint8 dlmu_resound_rec;
+ uint8 ulmu_data_disable;
+ uint8 pad[3];
+} wl_omi_config_t;
+
+typedef struct wl_omi_req {
+ uint16 version;
+ uint16 len;
+ wl_omi_config_t config;
+} wl_omi_req_v1_t;
+
+/* Bits for ULMU disable reason */
+#define OMI_ULMU_DISABLED_HOST 0x01u /* Host has disabled through he omi */
+
+#ifdef WLAWDL
+#define OMI_ULMU_DISABLED_AWDL 0x02u /* Disabled due to AWDL enabled */
+#endif /* WLAWDL */
+
+#define OMI_ULMU_DISABLED_NAN 0x04u /* Disabled due to NAN enabled */
+#define OMI_ULMU_DISABLED_BTCOEX 0x08u /* Disabled while in BT Coex activity */
+#define OMI_ULMU_DISABLED_LTECOEX 0x10u /* Disabled due to LTE Coex activity */
+#define OMI_ULMU_DISABLED_NON11AX_CONN 0x20u /* Disabled due to not associated to 11ax AP */
+#define OMI_ULMU_DISABLED_THROTTLE_ENABLE 0x40u /* Disabled due to throttle timer running */
+#define OMI_ULMU_DISABLED_TXCHAIN_DOWNGRADE 0x80u /* Disabled due to Txchain downgrade */
+#define OMI_ULMU_DISABLED_TX_DUTY_CYCLE 0x100u /* Disabled due to tx duty cycle */
+
+/* Bits for DLMU Resound Recommendation reason */
+#define OMI_DLMU_RSD_RCM_HOST (0x1u << 0u) /* Host directly set the bit */
+#define OMI_DLMU_RSD_RCM_MPF (0x1u << 1u) /* Set on MPF state change */
+
+#define WL_OMI_STATUS_VERSION_1 1u
+typedef struct wl_omi_status {
+ uint16 version;
+ uint16 len;
+ wl_omi_config_t omi_pending; /* OMI requests pending */
+ uint16 omi_data; /* current OM Control field for completed OMI requests */
+ uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
+ uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */
+} wl_omi_status_v1_t;
+
+#define WL_OMI_STATUS_VERSION_2 2u
+typedef struct wl_omi_status_v2 {
+ uint16 version;
+ uint16 len;
+ wl_omi_config_t omi_pending; /* OMI requests pending */
+ uint16 omi_data; /* Current OM Control field for completed OMI requests */
+ uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
+ uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */
+ uint32 dlmu_rsd_rcm_duration; /* Dur (ms) for which ResoundRecommentation is set */
+ uint16 dlmu_rsd_rcm_mpf_state; /* The MPF state value */
+ uint16 dlmu_rsd_rcm_reason; /* DL MU-MIMO recommendation reasons bitmap */
+} wl_omi_status_v2_t;
+
+#define WL_ULMU_DISABLE_STATS_VERSION_1 1u
+typedef struct wl_ulmu_disable_stats {
+ uint16 version;
+ uint16 len;
+ uint32 ulmu_disable_ts; /* UL OFDMA disabled timestamp (ms) */
+ uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
+ uint16 ulmu_disable_count; /* UL MU disable count during current infra association */
+ uint32 last_trig_rx_ts; /* Last trigger frame received timestamp (ms) */
+ uint16 trig_rx_count; /* No of trigger frames received after last UL OFDMA disable */
+ uint16 max_latency; /* Max latency by AP to re-act for UL OFDMA disable request (ms) */
+ uint16 min_latency; /* Min latency by AP to re-act for UL OFDMA disable request (ms) */
+ uint16 avg_latency; /* Avg latency by AP to re-act for UL OFDMA disable request (ms) */
+} wl_ulmu_disable_stats_v1_t;
+
+/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */
+enum wl_tx_histogram_id {
+ WL_TX_HIST_TXQ_ID = 1,
+ WL_TX_HIST_LOW_TXQ_ID = 2,
+ WL_TX_HIST_SCBQ_ID = 3,
+ WL_TX_HIST_EXCUR_TXQ_ID = 4,
+ WL_TX_HIST_EXCUR_LOW_TXQ_ID = 5
+};
+
+/* common tx histogram structure */
+typedef struct wl_tx_hist {
+ uint16 hist_bmap; /* bit N indicates histogram follows for priority or fifo N */
+ uint16 hist_count; /* count of histograms in var len array */
+ uint32 hist[1]; /* var len array of histograms each prefix by hist length */
+} wl_tx_hist_t;
+
+#define WL_TX_HIST_FIXED_LEN (OFFSETOF(wl_tx_hist_t, hist))
+#define WL_TX_HIST_FULL_LEN(num_hist, max_hist_size) \
+ (WL_TX_HIST_FIXED_LEN + (num_hist) * \
+ (max_hist_size + 1) * sizeof(uint32))
+
+/* structure for WL_TX_HIST_TXQ, WL_TX_HIST_EXCUR_TXQ_ID */
+typedef struct wl_tx_hist_txq {
+ uint32 bsscfg_bmap; /* bitmap of bsscfg indexes associated with this queue */
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_tx_hist_txq_t;
+
+#define WL_TX_HIST_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TX_HIST_TXQ_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */
+enum wl_txq_stop_histogram_id {
+ WL_TXQ_STOP_HIST_SW = 1,
+ WL_TXQ_STOP_HIST_HW = 2,
+ WL_TXQ_STOP_HIST_PKTS_SW = 3,
+ WL_TXQ_STOP_HIST_PKTS_HW = 4,
+ WL_TXQ_STOP_HIST_MAX = WL_TXQ_STOP_HIST_PKTS_HW
+};
+
+/* common tx histogram structure */
+typedef struct wl_txq_stop_hist {
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_txq_stop_hist_t;
+
+#define WL_TXQ_STOP_HIST_FIXED_LEN \
+ (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TXQ_STOP_HIST_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* structure for WL_TX_HIST_LOW_TXQ, WL_TX_HIST_EXCUR_LOW_TXQ_ID */
+typedef struct wl_tx_hist_low_txq {
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_tx_hist_low_txq_t;
+
+#define WL_TX_HIST_LOW_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TX_HIST_LOW_TXQ_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* structure for WL_TX_HIST_SCBQ */
+typedef struct wl_tx_hist_scbq {
+ struct ether_addr ea; /* ether addr of peer */
+ uint16 bsscfg_idx; /* bsscfg index */
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_tx_hist_scbq_t;
+
+#define WL_TX_HIST_SCBQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TX_HIST_SCBQ_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH */
+enum wl_tx_queue_depth_id {
+ WL_TX_QUEUE_DEPTH_TXQ_ID = 1,
+ WL_TX_QUEUE_DEPTH_LOW_TXQ_ID = 2,
+ WL_TX_QUEUE_DEPTH_SCBQ_ID = 3,
+ WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID = 4,
+ WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID = 5
+};
+
+/* common tx queue depth structure */
+typedef struct wl_tx_queue_depth {
+ uint16 queue_depth_bmap; /* bitmap of queue depth in var len array */
+ uint16 queue_depth_count; /* count of queue depth in var len array */
+ uint16 queue_depth[1]; /* var len array of queue depth */
+} wl_tx_queue_depth_t;
+
+#define WL_TX_QUEUE_DEPTH_FIXED_LEN (OFFSETOF(wl_tx_queue_depth_t, queue_depth))
+#define WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth) \
+ (WL_TX_QUEUE_DEPTH_FIXED_LEN + (num_queue_depth) * \
+ sizeof(uint16))
+
+/* structure for WL_TX_QUEUE_DEPTH_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID */
+typedef struct wl_tx_queue_depth_txq {
+ uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */
+ wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
+} wl_tx_queue_depth_txq_t;
+
+#define WL_TX_QUEUE_DEPTH_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
+#define WL_TX_QUEUE_DEPTH_TXQ_FULL_LEN(num_queue_depth) \
+ (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + \
+ WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
+
+/* structure for WL_TX_QUEUE_DEPTH_LOW_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID */
+typedef struct wl_tx_queue_depth_low_txq {
+ wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
+} wl_tx_queue_depth_low_txq_t;
+
+#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
+#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FULL_LEN(num_queue_depth) \
+ (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + \
+ WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
+
+/* structure for WL_TX_QUEUE_DEPTH_SCBQ_ID */
+typedef struct wl_tx_queue_depth_scbq {
+ struct ether_addr ea; /* ether addr of peer */
+ uint16 bsscfg_idx; /* bsscfg index */
+ wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
+} wl_tx_queue_depth_scbq_t;
+
+#define WL_TX_QUEUE_DEPTH_SCBQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
+#define WL_TX_QUEUE_DEPTH_SCBQ_FULL_LEN(num_queue_depth) \
+ (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + \
+ WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS */
+enum wl_pcie_tx_histogram_id {
+ WL_PCIE_TX_HIST_ID = 1
+};
+
+/* structure for PCIE_TX_HIST_ID */
+typedef struct wl_pcie_tx_hist {
+ uint16 ring_id; /* PCIe ring id */
+ uint16 pad; /* 4-byte alignment */
+ wl_tx_hist_t tx_hist; /* hist_bmap:
+ * 0x1=tx histogram
+ * 0x2=tx status pending histogram
+ */
+} wl_pcie_tx_hist_t;
+
+#define WL_PCIE_TX_HIST_FIXED_LEN \
+ (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_PCIE_TX_HIST_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH */
+enum wl_pcie_tx_queue_depth_id {
+ WL_PCIE_TX_QUEUE_DEPTH_ID = 1
+};
+
+/* structure for WL_PCIE_TX_QUEUE_DEPTH_ID */
+typedef struct wl_pcie_tx_queue_depth {
+ uint16 ring_id; /* PCIe ring id */
+ uint16 queue_depth; /* queue depth of ring id */
+ uint16 tx_status_pend; /* tx status pending of ring id */
+ uint16 pad; /* 4-byte alignment */
+} wl_pcie_tx_queue_depth_t;
+
+#define WL_PCIE_TX_QUEUE_DEPTH_FIXED_LEN sizeof(wl_pcie_tx_queue_depth_t)
+
+#define WL_WSEC_DEL_PMK_VER_V1 1u
+/* tlv ids for del pmk */
+#define WL_DEL_PMK_TLV_ID 1u
+#define WL_DEL_PMKID_TLV_ID 2u
+#define WL_DEL_PEER_ADDR_TLV_ID 3u
+typedef struct wl_wsec_del_pmk {
+ uint16 version;
+ uint16 length;
+ uint8 xtlvs[];
+} wl_wsec_del_pmk_t;
+#define WL_WSEC_DEL_PMK_FIXED_LEN_V1 OFFSETOF(wl_wsec_del_pmk_t, xtlvs)
+
+/* WTC */
+#define WLC_WTC_ROAM_VER_1 1
+
+#define WLC_WTC_ROAM_CUR_VER WLC_WTC_ROAM_VER_1
+#define WLC_WTC_ROAM_CONFIG_HDRLEN 4u
+
+typedef enum wtc_band_list {
+ WTC_BAND_2G = 0,
+ WTC_BAND_5G = 1,
+ WTC_MAX_BAND = 2
+} wtc_band_list_e;
+
+typedef struct wlc_wtcroam {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_wtc_args_t;
+
+typedef struct wlc_wtcconfig_info {
+ uint8 mode; /* enable or disable wtc. static config */
+ uint8 scantype; /* type of scan for wtc roam */
+ int8 rssithresh[WTC_MAX_BAND]; /* rssi value below which wtc is active */
+ int8 ap_rssithresh[WTC_MAX_BAND]; /* rssi value above which candidate AP is good */
+} wlc_wtcconfig_info_v1_t;
+
+/* RCROAM */
+#define WLC_RC_ROAM_VER_1 1
+
+typedef struct wlc_rcroam {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_rcroam_t;
+
+typedef struct wlc_rcroam_info_v1 {
+ uint16 inactivity_period; /* inactivty monitor period */
+ uint16 roam_scan_timeout;
+ uint16 periodic_roam_scan_timeout;
+ uint8 roam_trig_step; /* roaming trigger step value */
+ bool enab;
+} wlc_rcroam_info_v1_t;
+
+#define WLC_RC_ROAM_CUR_VER WLC_RC_ROAM_VER_1
+#define RCROAM_HDRLEN 4u
+#define MAX_RCSCAN_TIMER 300u
+
+/* Reasons for re-setting RC ROAM params */
+#define WLC_RCROAM_RESET_JOIN 0 /* New join */
+#define WLC_RCROAM_REASON_ROAM_SUCCESS 1 /* Successful roam */
+#define WLC_RCROAM_RESET_RSSI_CHANGE 2 /* RSSI of the link which was bad regained/worsened */
+#define WLC_RCROAM_RESET_BCN_LOSS 3 /* Beacon loss */
+#define WLC_RCROAM_RESET_DISASSOC_RX 4 /* Disassoc was received */
+#define WLC_RCROAM_RESET_DEAUTH_RX 5 /* Deauth was received */
+#define WLC_RCROAM_RESET_IOVAR 6 /* Iovar to disable rcroam was received from host */
+#define WLC_RCROAM_RESET_WTCREQ 7 /* WTC request overriding rcroam */
+#define WLC_RCROAM_RESET_RSN_ABORT 8 /* Reset RCROAM params due to roam abort */
+
+#define WLC_SILENT_ROAM_VER_1 1
+/* silent roam information struct */
+typedef struct wlc_sroam_info_v1 {
+ /* Silent roam Set/Get value */
+ uint8 sroam_on; /* sroam on/off */
+ int8 sroam_min_rssi; /* minimum rssi threshold to activate the feature */
+ uint8 sroam_rssi_range; /* rssi tolerance to determine stationary status */
+ uint8 sroam_score_delta; /* roam score delta value to prune candidate ap */
+ uint8 sroam_period_time; /* required monitoring period to trigger roaming scan */
+ uint8 sroam_band; /* band setting of roaming scan (all, 5g, 2g) */
+ uint8 sroam_inact_cnt; /* tx/rx frame count threshold for checking inactivity */
+ /* Silent roam monitor value */
+ int8 sroam_ref_rssi; /* reference rssi which is picked when monitoring is
+ * started. it is updated to current rssi when it's
+ * out from rssi range
+ */
+ uint8 sroam_time_since; /* elapsed time since start monitoring */
+ uint8 pad[3];
+ uint32 sroam_txfrm_prev; /* save current tx frame counts */
+ uint32 sroam_rxfrm_prev; /* save current rx frame counts */
+} wlc_sroam_info_v1_t;
+
+typedef struct wlc_sroam {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_sroam_t;
+
+#define WLC_SILENT_ROAM_CUR_VER WLC_SILENT_ROAM_VER_1
+#define SROAM_HDRLEN 4u
+
+#define DEF_SROAM_OFF 0
+#define DEF_SROAM_MIN_RSSI -65
+#define DEF_SROAM_RSSI_RANGE 3u
+#define DEF_SROAM_SCORE_DELTA 1u
+#define DEF_SROAM_PERIOD_TIME 10u
+#define DEF_SROAM_INACT_CNT 5u
+#define MAX_SROAM_RSSI -70
+#define MAX_SROAM_RSSI_RANGE 5u
+#define MAX_SROAM_SCORE_DELTA 10u
+#define MAX_SROAM_PERIOD_TIME 250u
+#define SROAM_BAND_AUTO 3u
+
+/* MACSMPL IOVAR parameters */
+typedef enum wl_macdbg_macsmpl_iovar_id {
+ WL_MACSMPL_START = 0,
+ WL_MACSMPL_STOP = 1,
+ WL_MACSMPL_DUMP = 2,
+ WL_MACSMPL_STATUS = 3,
+ WL_MACSMPL_SIZE = 4
+} wl_macdbg_macsmpl_iovar_id_t;
+
+/* WL_MACSMPL_STATUS values */
+typedef enum wl_macdbg_macsmpl_status {
+ WL_MACSMPL_STATUS_IDLE = 0,
+ WL_MACSMPL_STATUS_ACTIVE = 1,
+ WL_MACSMPL_STATUS_WAIT_FOR_TRIG = 2,
+ WL_MACSMPL_STATUS_TRIGGERED = 3
+} wl_macdbg_macsmpl_status_t;
+
+/* WL_MACSMPL_START_PARAM subcommand data */
+typedef struct wl_macsmpl_start_param {
+ uint32 trig_condition; /* trigger condition */
+ uint16 gpio_mux; /* MACControl1 GPIOSel field */
+ uint8 pad[2]; /* 4-byte struct alignment */
+} wl_macsmpl_param_start_t;
+
+/* MAC SC fragment request data */
+typedef struct wl_macsmpl_frag_req_param {
+ uint32 offset; /* requested MAC SC fragment offset */
+ uint32 size; /* requested MAC SC fragment size, bytes */
+} wl_macsmpl_frag_req_param_t;
+
+/* MAC SC fragment response data */
+typedef struct wl_macsmpl_frag_resp_param {
+ uint32 offset; /* MAC SC response fragment offset */
+ uint32 size; /* MAC SC reponse fragment size, bytes */
+ uint8 data[]; /* MAC SC response fragment data, flexible array */
+} wl_macsmpl_frag_resp_param_t;
+
+/* MAC SC status data */
+typedef struct wl_macsmpl_status {
+ uint32 maccontrol1; /* MACControl1 register value */
+ uint32 macsc_flags; /* M_MACSC_FLAGS SHM register value */
+ uint16 sc_play_ctrl; /* TXE SampleCollectPlayCtrl register value */
+ uint16 sc_cur_ptr; /* TXE SampleCollectCurPtr register value */
+ uint16 sc_start_ptr; /* TXE SampleCollectStartPtr register value */
+ uint16 sc_stop_ptr; /* TXE SampleCollectStopPtr register value */
+} wl_macsmpl_status_t;
+
+/* WL_MACSMPL parameters data */
+typedef struct wl_macsmpl_param {
+ wl_macdbg_macsmpl_iovar_id_t subcmd_id;
+ union {
+ wl_macsmpl_param_start_t start;
+ wl_macsmpl_frag_req_param_t frag_req;
+ } u;
+} wl_macsmpl_param_t;
+
+/* High priority P2P */
+#define WL_HP2P_COUNTERS_VER 2u
+typedef struct hp2p_counters {
+ uint16 frames_queued;
+ uint16 frames_processed;
+ uint16 frames_exp;
+ uint16 frames_preempt;
+ uint16 frames_retried;
+ uint16 reserved; /* reserved, rsvd2 and rsvd3 are experimental counters */
+ uint16 rsvd2;
+ uint16 rsvd3;
+} hp2p_counters_t;
+
+typedef struct hp2p_counters_v2 {
+ uint32 frames_queued; /* Number of AMPDUs processed */
+ uint16 frames_exp; /* Number of Lifetime expiries */
+ uint16 edt_retry; /* Exceed due to - retry */
+ uint16 mpif_reconf; /* MPIF Reconfigure */
+ uint16 exceed_delay; /* Exceed delay threshold */
+ uint16 edt_nav_thresh; /* Exceed due to - NAV threshold */
+ uint16 edt_dc_def; /* Exceed due to - DC based deferral */
+ uint16 edt_tx_fifo_full; /* Exceed due to - Tx FIFO full */
+ uint16 edt_cts_thresh; /* Exceed due to - CTS threshold */
+ uint16 dbg1; /* dbgX are for internal debugging */
+ uint16 dbg2;
+ uint16 dbg3;
+ uint16 dbg4;
+ uint16 dbg5;
+ uint16 dbg6;
+ uint16 dbg7;
+ uint16 dbg8;
+ uint16 dbg9;
+ uint16 dbg10;
+} hp2p_counters_v2_t;
+
+typedef struct hp2p_counters_hdr {
+ uint16 version; /* version of hp2p_counters_t structure */
+ uint16 len;
+ uint16 slice_idx;
+ uint16 pad;
+ uint8 counters[];
+} hp2p_counters_hdr_t;
+
+/* HP2P RLLW Stats */
+#define WL_HP2P_RLLW_STATS_VER 1u
+typedef struct hp2p_rllw_stats_hdr {
+ uint16 version; /* version of hptp_rllw_stats structure */
+ uint16 len; /* length of the payload */
+ uint8 slice_idx; /* Slice idx BAND_2G_INDEX - 0 / BAND_5G_INDEX - 1 */
+ uint8 pad[3];
+ uint8 stats[]; /* rllw_stats instance */
+} hp2p_rllw_stats_hdr_t;
+
+/*
+ * rllw stats common & per band
+ *
+ */
+typedef struct hp2p_rllw_stats_v1 {
+ /* Slice Specific Stats */
+ uint16 n_hnav_intrs; /* # of high nav interrupts */
+ uint16 n_switches; /* # of Switches to band n */
+ /* Stats on wlc_trig */
+ uint16 n_override_pkts; /* # of pkts enqd with ovrd bit set */
+ uint16 n_overrides; /* # of overrides - this is only trig_wlc */
+ uint16 n_override_pkts_acked; /* # of ovrd pkts that got an ACK */
+ uint16 n_override_pkts_not_acked; /* # of ovrd pkts that werent ACKed */
+} hp2p_rllw_stats_v1_t;
+
+/* TX enable flags */
+#define WL_HP2P_TX_AMPDU 0x0001u
+#define WL_HP2P_TX_AMSDU 0x0002u
+#define WL_HP2P_TX_RDG 0x0004u
+#define WL_HP2P_TX_TXOP 0x0008u
+#define WL_HP2P_TX_TXOP_RTS 0x0010u
+
+/* RX enable flags */
+#define WL_HP2P_RX_AMPDU 0x0001u
+#define WL_HP2P_RX_AMSDU 0x0002u
+#define WL_HP2P_RX_RDG 0x0004u
+#define WL_HP2P_RX_AMPDU_REORDER 0x0008u
+#define WL_HP2P_RX_DELETE 0x0010u
+
+/* Max/min values for configuration parameters to check validity */
+#define WL_HP2P_MAX_RETRY_MAX 14u
+#define WL_HP2P_MAX_RETRY_MIN 6u
+#define WL_HP2P_LATENCY_TARGET_MAX 30u
+#define WL_HP2P_BURST_INTERVAL_MAX 64u
+#define WL_HP2P_MAX_FIFO 5u
+#define WL_HP2P_MAX_UCODE_LATENCY_THR 500u
+#define WL_HP2P_MAX_UCODE_RECOV_TO 500u
+#define WL_HP2P_MAX_UCODE_NAV_THR 50000u
+
+#define WLC_HP2P_DEF_NAV_THRESH 13u
+#define WLC_HP2P_MIN_NAV_THRESH 1u
+#define WLC_HP2P_MAX_NAV_THRESH 35u
+#define WLC_HP2P_MAX_HOF_WAIT_THRESH 65u
+
+#define WL_HP2P_VERSION 1u
+typedef struct hp2p_tx_config {
+ struct ether_addr peer_addr;
+ uint16 max_burst;
+ uint16 txop; /* stored in network order (ls octet first) */
+ uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG, TXOP, TXOP_RTS */
+ uint8 aci;
+ uint8 ecw;
+ uint8 fifo;
+ uint8 tid;
+ uint8 burst_interval;
+ uint8 latency_target;
+ uint8 max_retry;
+ uint8 pad;
+} hp2p_tx_config_t;
+
+typedef struct hp2p_rx_config {
+ struct ether_addr peer_addr;
+ uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG, AMPDU Reorder */
+ uint8 tid;
+ uint8 pad;
+ uint16 ba_wsize; /* AMPDU BA Window size */
+} hp2p_rx_config_t;
+
+typedef struct hp2p_udbg_config {
+ uint16 recovery_timeout; /* multiples of 256 usecs */
+ uint16 latency_thresh; /* multiples of 256 usecs */
+ uint16 enable_trap; /* trap if ucode delay exceeds latency_thresh */
+ uint16 nav_thresh; /* in usec */
+} hp2p_udbg_config_t;
+
+#define WLC_HP2P_RLLW_RETRY_LIMIT 7u
+#define WLC_HP2P_MAX_PKTLIFETIME_IN_MS 2000u /* 2 seconds */
+
+/*
+ * nav_thresh: 13 : By default set to 13ms
+ * retry_limit: 0 : Auto / Default retry limit to be applied
+ * Max retry limit is 7
+ * pkt_lifetime: 0 : Auto / Default pkt lifetime to be applied
+ * Max value cant exceed 2000u (2seconds)
+ * floor_rate: 0 : Auto / Default min rate to be applied
+ * ceiling_rate: 0 : Auto / Default max rate to be applied
+ * hof_wait_thr: 0 : Disabled by default
+ * hof_switch_dur: 0 : Disabled by default
+ */
+typedef struct hp2p_rllw_cfg {
+ uint8 nav_thresh; /* NAV threshold in ms */
+ uint8 retry_limit; /* # max retries before pkt is discarded */
+ uint16 pkt_lifetime; /* Pkt lifetime in ms */
+ uint16 floor_rate; /* Min rate in Mbps */
+ uint16 ceiling_rate; /* Max rate in Mbps */
+ uint16 hof_wait_thr; /* HOF packet wait threshold in ms */
+ uint16 hof_switch_dur; /* How long to stay in the switched band in ms */
+ uint16 hof_pkt_life_thr; /* HOF remaining pkt lifetime threshold in ms */
+ uint16 pad;
+} hp2p_rllw_cfg_t;
+
+#define WL_HP2P_CAP_MPEER 0x01u /* Multipeer Hp2p */
+#define WL_HP2P_CAP_MPEER_TXOP 0x02u /* Same txop transmit on mpeer Hp2p */
+#define WL_HP2P_CAP_RLLW 0x04u /* Band switching and override on NAV */
+#define WL_HP2P_CAP_SPLIT_TXS 0x08u /* Spilt tx status for rllw sub feature */
+
+typedef uint32 hp2p_cap_t; /* HP2P capabilities */
+
+typedef struct hp2p_cmd {
+ uint16 type;
+ uint16 len;
+ uint8 data[];
+} hp2p_cmd_t;
+
+#define WL_HP2P_CTRL_MPEER_TXOP_ENAB 0x01u /* Same txop transmit on mpeer Hp2p */
+/*
+ * Band switching on NAV. Control for band
+ * switching for HPP traffic, applies only
+ * for dual local/peer map availability.
+ */
+#define WL_HP2P_CTRL_RLLW_SWITCH_ENAB 0x02u /* RLLW Band switching enabled */
+#define WL_HP2P_CTRL_RLLW_PRIMARY_OVR 0x04u /* RLLW Override enab on primary band. */
+#define WL_HP2P_CTRL_RLLW_DENY_BT 0x08u /* RLLW Give WiFi priority over BT */
+#define WL_HP2P_CTRL_RLLW_PKTLT_ENAB 0x10u /* RLLW pkt lifetime based decision enabled */
+#define WL_HP2P_CTRL_RLLW_HOF_SW_ENAB 0x20u /* RLLW Band switching due to HOF timeout enabled */
+
+typedef uint32 hp2p_ctrl_t; /* HP2P Ctrl bits */
+
+/* TLVs for HP2P related IOVARs */
+typedef enum wl_hp2p_tlv {
+ HP2P_TLV_RLLW_SLICE_STATS = 1u,
+ HP2P_TLV_RLLW_PEER_STATS = 2u
+} wl_hp2p_tlv_t;
+
+/* Top level HP2P RLLW stats header */
+typedef struct wl_hp2p_stats_hdr {
+ uint32 n_stats; /* # of TLVs in the payload */
+ uint32 totlen; /* total length of TLVs following this field */
+ uint8 stats_tlvs[]; /* chain of TLVs for reader to decode based on length */
+} wl_hp2p_stats_hdr;
+
+/* Per Slice RLLW Stats */
+typedef struct wlc_hp2p_rllw_slice_stats {
+ uint8 slice; /* slice number */
+ uint8 pad;
+ uint16 n_hnav_intrs; /* # of high nav interrupts on slice n */
+ uint16 n_overrides; /* # of overrides */
+ uint16 n_switches; /* # of switches */
+ uint32 n_switched_pkts; /* # of packets after switching to slice n */
+ uint32 n_switched_pkts_not_acked; /* # of packets after switch & Not ACKed */
+ uint32 n_override_pkts; /* # of pkts enqd with ovrd bit set on slice n */
+ uint32 n_override_pkts_not_acked; /* # of ovrd pkts that werent ACKed on slice n */
+ uint32 n_no_switch_in_pktlife; /* # of no switch happened,pkts within lifetime */
+ uint32 n_no_override_in_pktlife; /* # of no ovrd happened,pkts within lifetime */
+ uint16 n_no_hof_switch_in_pktlife; /* # of skipped HOF switches due to pkt lifetime */
+ uint16 n_no_hof_switch_null_pkt; /* # of skipped HOF switches due to NULL HOF pkt */
+ uint16 n_hof_switches; /* # of switches triggered by HOF timeout */
+ uint16 n_hof_intrs; /* # of HOF interrupts on slice n */
+} wlc_hp2p_rllw_slice_stats_t;
+
+/* Peer specific stats */
+typedef struct {
+ uint8 slice; /* slice number */
+ uint8 pad[3];
+ uint16 n_overrides; /* # of overrides */
+ uint16 n_switches; /* # of switches */
+ uint32 n_switched_pkts; /* # of packets after switching to slice n */
+ uint32 n_switched_pkts_not_acked; /* # of packets after switch & Not ACKed */
+ uint32 n_override_pkts; /* # of pkts enqd with ovrd bit set on slice n */
+ uint32 n_override_pkts_not_acked; /* # of ovrd pkts that werent ACKed on slice n */
+ uint32 n_no_switch_in_pktlife; /* # of no switch happened,pkts within lifetime */
+ uint32 n_no_override_in_pktlife; /* # of no ovrd happened,pkts within lifetime */
+ uint16 n_no_hof_switch_in_pktlife; /* # of skipped HOF switches due to pkt lifetime */
+ uint16 n_hof_switches; /* # of switches triggered by HOF timeout */
+} wlc_hp2p_rllw_peer_stats_t;
+
+/* Peer Stats Info to host */
+typedef struct wlc_hp2p_peer_stats_info {
+ struct ether_addr peer_mac; /* Mac Address of the HP2P peer */
+ uint16 nslices; /* Number of slices */
+ wlc_hp2p_rllw_peer_stats_t peer_stats[MAX_NUM_D11CORES]; /* Peer specific stats */
+} wlc_hp2p_peer_stats_info_t;
+
+typedef struct hp2p_cmd_hdr {
+ uint16 version;
+ uint16 slice_idx;
+ uint8 cmd[];
+} hp2p_cmd_hdr_t;
+
+/* to be used in type field of hp2p_cmd_t structure while issuing HP2P commands */
+typedef enum hp2p_cmd_id {
+ WL_HP2P_CMD_ENABLE = 0, /* Enable HP2P */
+ WL_HP2P_CMD_TX_CONFIG = 1, /* Tx Configuration */
+ WL_HP2P_CMD_RX_CONFIG = 2, /* Rx Configuration */
+ WL_HP2P_CMD_COUNTERS = 3, /* HP2P Counters */
+ WL_HP2P_CMD_UDBG_CONFIG = 4, /* ucode debug config */
+ WL_HP2P_CMD_RLLW_CFG = 5, /* HP2P RLLW config */
+ WL_HP2P_CMD_RLLW_STATS = 6, /* HP2P RLLW Stats */
+ WL_HP2P_CMD_CAP = 7, /* HP2P Capability */
+ WL_HP2P_CMD_CTRL = 8, /* HP2P feature ctrl */
+ WL_HP2P_CMD_RLLW_CFG_V2 = 9, /* HP2P RLLW config v2 */
+ WL_HP2P_CMD_RLLW_STATS_V2 = 10 /* HP2P RLLW Stats v2 */
+} hp2p_cmd_id_t;
+
+/**
+ * TX Profile.
+ *
+ * TX Profile allows the host to configure frames with a specific profile. This
+ * includes but is not limited to transmit rate, number of retries, whether or
+ * not to use frame aggregation, whether or not to use AMSDU and RTS protection
+ * threshold. The original intent is for the host to be able to specify transmit
+ * "reliability".
+ */
+
+/* IOVAR */
+typedef struct wlc_tx_profile_ioc {
+ uint16 id;
+ uint16 length;
+ union {
+ uint32 options;
+ uint32 status;
+ } u;
+ uint8 data[];
+} wlc_tx_profile_ioc_t;
+
+#define TX_PROFILE_IOV_HDR_SIZE (OFFSETOF(wlc_tx_profile_ioc_t, u))
+#define TX_PROFILE_IOV_OPTION_SIZE (sizeof(wlc_tx_profile_ioc_t) - TX_PROFILE_IOV_HDR_SIZE)
+
+/* TX Profile parameter TLV */
+enum tx_profile_param_id {
+ WL_TX_PROFILE_PARAM_RATE = 0,
+ WL_TX_PROFILE_PARAM_RTS = 1,
+ WL_TX_PROFILE_PARAM_RETRY = 2,
+ WL_TX_PROFILE_PARAM_BW = 3,
+ WL_TX_PROFILE_PARAM_AGGRE = 4,
+ WL_TX_PROFILE_PARAM_AMSDU = 5,
+ WL_TX_PROFILE_PARAM_AMPDU = 6,
+ WL_TX_PROFILE_PARAM_LAST = 7
+};
+
+typedef enum tx_profile_param_id tx_profile_param_id_t;
+
+typedef struct tx_profile_param {
+ uint16 param_id;
+ uint16 length;
+ uint8 param[];
+} tx_profile_param_t;
+
+/* Subcommands */
+typedef enum tx_profile_cmd_id {
+ /* The IOVAR to enable/disable the TX Profile in general. An integer
+ * is passed to the firmware with value 0 or 1.
+ */
+ WL_TX_PROFILE_ENABLE = 0,
+ /* Create a TX Profile with provided parameters. */
+ WL_TX_PROFILE_CREATE = 1,
+ /* Update a TX profile with provided parameters. */
+ WL_TX_PROFILE_UPDATE = 2,
+ /* Delete a TX Profile. */
+ WL_TX_PROFILE_DELETE = 3,
+ /* Dump TX Profile related debug information. */
+ WL_TX_PROFILE_DUMP = 4,
+ /* Show TX Profile stats */
+ WL_TX_PROFILE_STATS = 5,
+ /* Show Tx Profile tagged packets log */
+ WL_TX_PROFILE_PKT_LOGS = 6
+} tx_profile_cmd_id_t;
+
+typedef struct tx_profile_index_params {
+ uint16 profile_index;
+ uint16 length;
+ uint8 params[];
+} tx_profile_index_params_t;
+
+typedef struct tx_profile_index_stats {
+ uint32 profile_index;
+ uint32 tx_finish_cnt;
+ uint32 tx_acked_cnt;
+ uint32 tx_phy_err_cnt;
+ uint32 tx_suspend_cnt;
+} tx_profile_index_stats_t;
+
+typedef struct tx_profile_pkt_log {
+ uint32 rate;
+ uint16 retries;
+ uint16 rts;
+} tx_profile_pkt_log_t;
+
+/* End TX Profile section */
+
+typedef enum wl_rffe_cmd_type {
+ WL_RFFE_CMD_DEBUG_MODE = 0,
+ WL_RFFE_CMD_ELNABYP_MODE = 1,
+ WL_RFFE_CMD_REG = 2,
+ WL_RFFE_CMD_ELNA_VDD_MODE = 3,
+ WL_RFFE_CMD_LAST
+} wl_rffe_cmd_type_t;
+
+/** RFFE struct passed through ioctl */
+typedef struct {
+ uint32 regaddr; /**< rFEM_RegAddr */
+ uint32 antnum; /**< rFEM AntNum */
+ uint32 slaveid; /**< rFEM SlaveID */
+ uint32 value; /**< read/write value */
+} rffe_reg_t;
+
+#ifndef BCMUTILS_ERR_CODES
+
+/*
+ * SOE (Security Offload Engine) status codes.
+ */
+
+/* SOE status codes are reserved from -6144 to -7167 (1K) */
+
+enum wl_soe_status {
+ /* Invalid operational context */
+ WL_SOE_E_BAD_OP_CONTEXT = -6144,
+
+ /* Invalid operational type */
+ WL_SOE_E_BAD_OP_TYPE = -6145,
+
+ /* Failure to get NAF3 encoded scalar */
+ WL_SOE_E_BN_GET_NAF3_ERROR = -6146,
+
+ /* Failure to get NAF3 params */
+ WL_SOE_E_ECG_GET_NAF3_PARAMS_ERROR = -6147,
+
+ /* FAILURE to get Montgomery params */
+ WL_SOE_E_MONT_PARAMS_GET_ERROR = -6148,
+
+ /* Invalid OSL handle */
+ WL_SOE_E_BAD_SI_OSH = -6149,
+
+ /* Invalid ECG group */
+ WL_SOE_E_BAD_ECG_GROUP = -6150,
+
+ /* Invalid BN context */
+ WL_SOE_E_BAD_BN_CTX = -6151,
+
+ /* Invalid SOE core register base address */
+ WL_SOE_E_BAD_SOE_REGBASE = -6152,
+
+ /* Invalid SOE context */
+ WL_SOE_E_BAD_SOE_CONTXT = -6153,
+
+ /* Number of words are too short (i.e., not enough
+ * room to encode the PKA sequence)
+ */
+ WL_SOE_E_PKA_SEQUENCE_WORDS_TOO_SHORT = -6154,
+
+ /* Generic bn_get error */
+ WL_SOE_E_PKA_BN_GET_ERROR = -6155,
+
+ /* Sequence buf too short for BN */
+ WL_SOE_E_PKA_BN_BUF_TOO_SHORT_BN = -6156,
+
+ /* Sequence buf too short for ECG prime */
+ WL_SOE_E_PKA_BN_BUF_TOO_SHORT_ECG_PRIME = -6157,
+
+ /* Sequence buf too short for Montgomery N' */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_MONT_PRIME = -6158,
+
+ /* Sequence buf too short for Accumulator registers */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_ACCM_REG = -6159,
+
+ /* Sequence buf too short for the point P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_P = -6160,
+
+ /* Sequence buf too short for -P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PN = -6161,
+
+ /* Sequence buf too short for 3P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3P = -6162,
+
+ /* Sequence buf too short for -3P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3PN = -6163,
+
+ /* Sequence buf too short for NAF3 scalar */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_NAF3_SCALAR = -6164,
+
+ /* Sequence buf too short for load shift count */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PRE_JMP = -6165,
+
+ /* SOE engine(SHA/PKA) failed to complete the operation */
+ WL_SOE_E_ENGINE_UNABLE_TO_COMPLETE = -6166,
+
+ /* Wrong LIR (Long Integer Register) type */
+ WL_SOE_E_PKA_BAD_LIR_TYPE = -6167,
+
+ /* Reference count has reached maximum */
+ WL_SOE_E_MAX_REF_COUNT_REACHED = -6168,
+
+ /* Failed to get the SOE context reference */
+ WL_SOE_E_GET_REF_FAILED = -6169,
+
+ /* Incoming digest length is invalid */
+ WL_SOE_E_SHA_WRONG_DIGEST_LEN = -6170
+};
+
+#endif /* BCMUTILS_ERR_CODES */
+
+#define NR5GCX_STATUS_VER_1 1
+/* NR coex status structures */
+typedef struct wlc_nr5gcx_status_v1 {
+ uint16 version; /* version info */
+ uint16 len; /* status length */
+ uint32 mode; /* NR coex status */
+ uint32 nr_req_cnt; /* NR req number since last read */
+ uint32 nr_dur; /* NR duration since last read, us */
+ uint32 nr_duty_cycle; /* NR duty cycle since last read */
+ uint32 nr_max_dur; /* NR max duration in a single request */
+ uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */
+ uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */
+ uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */
+ uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap,
+ * 1 event per bit.
+ */
+ uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical
+ * event whose dur is max
+ */
+} wlc_nr5gcx_status_v1_t;
+
+#define NR5GCX_STATUS_VER_2 2
+/* NR coex status structures */
+typedef struct wlc_nr5gcx_status_v2 {
+ uint16 version; /* version info */
+ uint16 len; /* status length */
+ uint32 mode; /* NR coex status */
+ uint32 nr_req_cnt; /* NR req number since last read */
+ uint32 nr_dur; /* NR duration since last read, us */
+ uint32 nr_duty_cycle; /* NR duty cycle since last read */
+ uint32 nr_max_dur; /* NR max duration in a single request */
+ uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */
+ uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */
+ uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */
+ uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap,
+ * 1 event per bit.
+ */
+ uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical
+ * event whose dur is max
+ */
+ uint16 data_stall_cnt; /* # of data stall, i.e., # of rate recovery. */
+ uint16 nr_deny_cnt_data_stall; /* # of NR deny due to data stall. */
+ uint16 nr_deny_dur_data_stall; /* Duration of NR deny due to data stall. */
+ uint16 succ_rr_cnt; /* # of successful rate recovery. */
+} wlc_nr5gcx_status_v2_t;
+
+#define RC1CX_STATUS_VER_1 1
+/* RC1 coex status structures */
+typedef struct wlc_rc1cx_status_v1 {
+ uint16 version; /* version info */
+ uint16 len; /* status length */
+ uint32 mode; /* RC1 coex status */
+ uint32 rc1_req_cnt; /* RC1 req number since last read */
+ uint32 rc1_dur; /* RC1 duration since last read, us */
+ uint32 rc1_duty_cycle; /* RC1 duty cycle since last read */
+ uint32 rc1_max_dur; /* RC1 max duration in a single request */
+ uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */
+ uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */
+ uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */
+ uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap,
+ * 1 event per bit.
+ */
+ uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical
+ * event whose dur is max
+ */
+} wlc_rc1cx_status_v1_t;
+
+#define RC1CX_STATUS_VER_2 2
+/* RC1 coex status structures */
+typedef struct wlc_rc1cx_status_v2 {
+ uint16 version; /* version info */
+ uint16 len; /* status length */
+ uint32 mode; /* RC1 coex status */
+ uint32 rc1_req_cnt; /* RC1 req number since last read */
+ uint32 rc1_dur; /* RC1 duration since last read, us */
+ uint32 rc1_duty_cycle; /* RC1 duty cycle since last read */
+ uint32 rc1_max_dur; /* RC1 max duration in a single request */
+ uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */
+ uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */
+ uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */
+ uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap,
+ * 1 event per bit.
+ */
+ uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical
+ * event whose dur is max
+ */
+ uint16 data_stall_cnt; /* # of data stall, i.e., # of rate recovery. */
+ uint16 rc1_deny_cnt_data_stall; /* # of RC1 deny due to data stall. */
+ uint16 rc1_deny_dur_data_stall; /* Duration of RC1 deny due to data stall. */
+ uint16 succ_rr_cnt; /* # of successful rate recovery. */
+} wlc_rc1cx_status_v2_t;
+
+/* ifdef (WLC_OBSS_HW) */
+/* OBSS HW specific Macros */
+#define WLC_OBSS_HW_CMD_VERSION_1 1u
+
+/* OBSS HW config sub command identification flag */
+#define OBSS_HW_CFG_SUB_CMD_ENABLE (1u << 0u)
+#define OBSS_HW_CFG_SUB_CMD_SW_CACHE_INTVL (1u << 1u)
+#define OBSS_HW_CFG_SUB_CMD_PHY_SENSE_DUR (1u << 2u)
+#define OBSS_HW_CFG_SUB_CMD_MASK (OBSS_HW_CFG_SUB_CMD_ENABLE | \
+ OBSS_HW_CFG_SUB_CMD_SW_CACHE_INTVL | \
+ OBSS_HW_CFG_SUB_CMD_PHY_SENSE_DUR)
+
+#define WLC_OBSS_HW_DEF_SW_CACHE_INTVL 1u /* 1 sec */
+#define WLC_OBSS_HW_MAX_SW_CACHE_INTVL 5u /* 5 sec */
+#define WLC_OBSS_HW_DEF_PHY_SENSE_DUR 30 /* 30 msec */
+#define WLC_OBSS_HW_MAX_PHY_SENSE_DUR 500 /* 500 msec */
+
+/* OBSS HW test sub command identification flag */
+#define WLC_OBSS_HW_TEST_SUB_CMD_TEST_MODE (1u << 0u)
+#define WLC_OBSS_HW_TEST_SUB_CMD_MITI_MODE (1u << 1u)
+#define WLC_OBSS_HW_TEST_SUB_CMD_MASK (WLC_OBSS_HW_TEST_SUB_CMD_TEST_MODE |\
+ WLC_OBSS_HW_TEST_SUB_CMD_MITI_MODE)
+
+/* Test mode values */
+#define WLC_OBSS_HW_TEST_MODE_STOP 0u /* Stop the test */
+#define WLC_OBSS_HW_TEST_MODE_TRIGGER 1u /* Trigger mode */
+#define WLC_OBSS_HW_TEST_MODE_FREE_RUNNING 2u /* Free running stats collection */
+
+#define WLC_OBSS_HW_TEST_MITI_TX_RX_DISABLE 0u /* Both Tx and Rx mitigation disabled */
+#define WLC_OBSS_HW_TEST_MITI_RX_FILT_PKTENG 1u /* Rx mitigation via filter override enabled */
+ /* All Tx miti disabled; Only pkteng */
+#define WLC_OBSS_HW_TEST_MITI_TX_ONLY 2u /* Rx mitigation disabled, Tx mitigation */
+#define WLC_OBSS_HW_TEST_MITI_TX_RX_FILT 3u /* Rx Tx mitigation enabled */
+#define WLC_OBSS_HW_TEST_MITI_CHAN_CHANGE 4u /* Mitigation by chanspec change */
+
+#define WL_OBSS_ANT_MAX 2u /* Max Antennas */
+#define ACPHY_OBSS_STATS_BIN_CNT 8u /* min 1 for default */
+#define ACPHY_OBSS_SUBBAND_CNT 8u /* Max sub band counts i.e., 160Mhz = 8 * 20MHZ */
+
+enum wlc_obss_hw_cmd_id {
+ WLC_OBSS_HW_CMD_VER = 1u,
+ WLC_OBSS_HW_CMD_CONFIG = 2u,
+ WLC_OBSS_HW_CMD_DUMP_STATS = 3u,
+ WLC_OBSS_HW_CMD_TEST = 4u,
+ WLC_OBSS_HW_CMD_LAST
+};
+
+/* OBSS HW specific structures */
+typedef struct wlc_obss_hw_cfg_v1 {
+ uint16 sub_cmd_flags; /* Flag bits to Identify configuring sub command */
+ uint8 is_enable; /* Feature is enabled or not */
+ uint8 sw_cache_interval; /* SW cache interval to cache OBSS stats in sec */
+ uint16 phy_sensing_duration; /* PHY OBSS sensing duration in msec */
+} wlc_obss_hw_cfg_v1_t;
+
+typedef struct wlc_obss_hw_stats_v1 {
+ uint16 avg_obss_stats[WL_OBSS_ANT_MAX][ACPHY_OBSS_SUBBAND_CNT][ACPHY_OBSS_STATS_BIN_CNT];
+ uint16 obss_det_stats[ACPHY_OBSS_SUBBAND_CNT];
+ uint16 stats_cnt; /* Stats count */
+ uint8 obss_mit_status; /* OBSS mitigation status */
+ uint8 mit_bw; /* Mitigation BW that got selected */
+} wlc_obss_hw_stats_v1_t;
+
+typedef struct wlc_obss_hw_test_v1 {
+ uint16 sub_cmd_flags; /* Flag bits to Identify configuring sub command */
+ uint8 test_mode; /* To stop/start respective test mode */
+ uint8 mitigation_mode; /* mitigation enabling/disabling options */
+} wlc_obss_hw_test_v1_t;
+
+#define STA_PM_SC_OFLD_CFG_VER_V1 1u
+#define STA_PM_SC_OFLD_ENAB_FLAG (1u << 0u)
+
+#define STA_PM_SC_FLAG_MASK (1u << 0u)
+#define STA_PM_SC_CONS_EXP_BCN_RX_THR_MASK (1u << 1u)
+#define STA_PM_SC_MISSED_BCN_THR_MASK (1u << 2u)
+#define STA_PM_SC_OFLD_RSSI_THR_MASK (1u << 3u)
+#define STA_PM_SC_OFLD_RSSI_HYST_MASK (1u << 4u)
+#define STA_PM_SC_OFLD_TIM_EXIT_DLY_MASK (1u << 5u)
+#define STA_PM_SC_OFLD_FIELD_MASK_ALL (STA_PM_SC_FLAG_MASK | \
+ STA_PM_SC_CONS_EXP_BCN_RX_THR_MASK | \
+ STA_PM_SC_MISSED_BCN_THR_MASK | \
+ STA_PM_SC_OFLD_RSSI_THR_MASK | \
+ STA_PM_SC_OFLD_RSSI_HYST_MASK | \
+ STA_PM_SC_OFLD_TIM_EXIT_DLY_MASK)
+
+#define STA_PM_SC_OFLD_RSSI_HYST_MAX 80u
+
+typedef struct wlc_sta_pm_sc_ofld_cfg_v1 {
+ uint16 version;
+ uint16 len;
+ uint16 field_mask; /* Provides info on which of the following fields are valid */
+ uint16 flags; /* enable 0x1 */
+ uint8 cons_exp_bcn_rx_thr; /* Consecutive expected bcn in true slice to offload to SC */
+ uint8 sc_missed_bcn_thr; /* Missed beacons threshold in SC to exit offload */
+ int8 sc_offload_rssi_thr; /* Bcn RSSI threshold to exit offload, must be negative */
+ uint8 sc_offload_rssi_hyst; /* Hysteresis for the RSSI threshold, 0 - RSSI_HYST_MAX */
+ uint8 tim_exit_delay_ms; /* TIM exit delay when DTIM AID=0 is also set */
+ uint8 PAD[3];
+} wlc_sta_pm_sc_ofld_cfg_v1_t;
+
+typedef enum wlc_sta_pm_sc_ofld_fail_reason {
+ STA_PM_SC_OFLD_FAIL_RSSI = (1u << 0u), /* Low rssi */
+ STA_PM_SC_OFLD_FAIL_UNSUPPORTED = (1u << 1u), /* Unsupported by WBUS */
+ STA_PM_SC_OFLD_FAIL_MISSED_BCN = (1u << 2u), /* missed bcns on true slice */
+ STA_PM_SC_OFLD_FAIL_NO_PS = (1u << 3u), /* not in PM */
+ STA_PM_SC_OFLD_FAIL_TX_ACTIVE = (1u << 4u), /* Active TX in true slice */
+ STA_PM_SC_OFLD_FAIL_CSA = (1u << 5u), /* CSA */
+ STA_PM_SC_OFLD_FAIL_MRC = (1u << 6u), /* in MRC */
+ STA_PM_SC_OFLD_FAIL_AS_STATE = (1u << 7u), /* Assoc state not IDLE */
+ STA_PM_SC_OFLD_FAIL_NO_BASIC_RATESET = (1u << 8u), /* AP bcn has no basic rate */
+ STA_PM_SC_OFLD_FAIL_UNSUP_BCN_RATE_RX = (1u << 8u), /* AP bcn at SC unsup rate */
+ STA_PM_SC_OFLD_FAIL_DISABLED = (1u << 9u), /* Offload disabled */
+ STA_PM_SC_OFLD_FAIL_CAL = (1u << 10u), /* Cal on main/aux core */
+ STA_PM_SC_OFLD_FAIL_UNSUP_BCMC_RATE_RX = (1u << 11u), /* unsupported bcmc rate rx */
+ STA_PM_SC_OFLD_FAIL_TWT_ACTIVE = (1u << 12u), /* TWT is active */
+ STA_PM_SC_OFLD_FAIL_AP_ENAB = (1u << 13u), /* AP cfg is enabled */
+ STA_PM_SC_OFLD_FAIL_SLOTTED_BSS_ENAB = (1u << 14u), /* Slotted BSS is enabled */
+ STA_PM_SC_OFLD_FAIL_BTMC_ACTIVE = (1u << 15u), /* BT Main Core is active */
+ STA_PM_SC_OFLD_FAIL_UNSUP_BASIC_RATE = (1u << 16u) /* SC Unsupported basic rate */
+} wlc_sta_pm_sc_ofld_fail_reason_t;
+
+typedef enum wlc_sta_pm_sc_ofld_exit_reason {
+ STA_PM_SC_OFLD_EXIT_AS_STATE = 1u, /* Exit due to assoc state not IDLE */
+ STA_PM_SC_OFLD_EXIT_BCN_LOSS = 2u, /* Exit due to beacon loss */
+ STA_PM_SC_OFLD_EXIT_TIM = 3u, /* Exit due to TIM bit set */
+ STA_PM_SC_OFLD_EXIT_PM = 4u, /* Exit due to exit out of PM mode */
+ STA_PM_SC_OFLD_EXIT_TX = 5u, /* Exit due to tx on true slice */
+ STA_PM_SC_OFLD_EXIT_CSA = 6u, /* Exit due to CSA */
+ STA_PM_SC_OFLD_EXIT_LOW_RSSI = 7u, /* Exit due to rssi lower than threshold */
+ STA_PM_SC_OFLD_EXIT_MRC = 8u, /* Exit due to MRC being active */
+ STA_PM_SC_OFLD_EXIT_BSSCFG_DOWN = 9u, /* Exit due to bsscfg down */
+ STA_PM_SC_OFLD_EXIT_WLC_DOWN = 10u, /* Exit due to wlc down */
+ STA_PM_SC_OFLD_EXIT_WBUS_REJECT = 11u, /* Exit due to wbus reject */
+ STA_PM_SC_OFLD_EXIT_HOST_DISABLE = 12u, /* Exit due to disable by host */
+ STA_PM_SC_OFLD_EXIT_CAL_TVPM = 13u, /* Exit due to Cal/TVPM on main/aux core */
+ STA_PM_SC_OFLD_EXIT_DISASSOC = 14u, /* Exit due to disassoc */
+ STA_PM_SC_OFLD_EXIT_CAC = 15u, /* Exit due to CAC admit */
+ STA_PM_SC_OFLD_EXIT_CHN_OVERLAP = 16u, /* Exit due to true slice chan overlap */
+ STA_PM_SC_OFLD_EXIT_UNSUP_BCMC_RATE_RX = 17u, /* Exit due to unsupported bcmc rate rx */
+ STA_PM_SC_OFLD_EXIT_BCMC_LOSS = 18u, /* Exit due to bcmc pkt loss */
+ STA_PM_SC_OFLD_EXIT_TWT = 19u, /* Exit due to TWT active */
+ STA_PM_SC_OFLD_EXIT_SLOTTED_BSS = 20u, /* Exit due to Slotted BSS active */
+ STA_PM_SC_OFLD_EXIT_AP_BSS = 21u, /* Exit due to AP BSS active */
+ STA_PM_SC_OFLD_EXIT_MAX = 255u /* Max, uint8 for now */
+} wlc_sta_pm_sc_ofld_exit_reason_t;
+
+#define STA_PM_SC_OFLD_STATUS_VER_V1 1u
+
+#define STA_PM_SC_OFLD_STATUS_CLEAR (1u << 0u)
+#define STA_PM_SC_OFLD_STATUS_OFFLOADED (1u << 1u)
+#define STA_PM_SC_OFLD_STATUS_ASSOCIATED (1u << 2u)
+
+typedef struct wlc_sta_pm_sc_status_v1 {
+ uint16 version;
+ uint16 len;
+ uint32 flags;
+ uint32 sc_offload_enter_cnt; /* Offload enter cnt */
+ uint32 sc_offload_exit_cnt; /* Offload exit cnt */
+ uint32 sc_offload_wbus_reject_cnt; /* WBUS reject reg cnt */
+ uint32 sc_offload_exp_bcn_cnt; /* SC ofld expected bcn cnt */
+ uint32 sc_offload_exp_bcn_missed_cnt; /* SC ofld missed bcn cnt */
+ uint32 sc_offload_last_exit_reason; /* Previous exit reason */
+ uint32 sc_offload_enter_fail_reason; /* SC ofld enter fail reason */
+ uint32 sc_offload_total_dur_ms; /* Cumulative duration in offload (ms) */
+ uint32 sc_offload_tim_exit_dly_cnt; /* TIM exit delay cnt */
+} wlc_sta_pm_sc_status_v1_t;
+
+#define WL_SDTC_IOV_VERSION 1
+#define SDTC_SUB_IOV_VERSION_1 1
+#define SDTC_SUB_IOV_VERSION_1_1 1
+#define MAX_ATID_CORES 5u
+#define SDTC_ID_ALL 0xFFu
+
+/* SDTC IOV ID's */
+enum wl_sdtc_iov_id {
+ SDTC_ID_INVALID = 0x0,
+ SDTC_ID_CB = 0x1,
+ SDTC_ID_PCIE = 0x2,
+ SDTC_ID_SYSMEM = 0x3,
+ SDTC_ID_AUXMAC = 0x4,
+ SDTC_ID_MAINMAC = 0x5
+};
+
+/* SDTC Iovars */
+enum wl_sdtc_cmd_ids {
+ WL_SDTC_CMD_EN = 0x1,
+ WL_SDTC_CMD_CONFIG = 0x2,
+ WL_SDTC_CMD_CORE_ENAB = 0x3,
+ WL_SDTC_CMD_ETB_INFO = 0x4,
+ WL_SDTC_CMD_LAST
+};
+
+enum wl_sdtc_xtlv_version {
+ WL_SDTC_ENABLE_V1 = 0x1
+};
+
+typedef struct sdtc_regs {
+ uint32 addr;
+ uint32 val;
+} sdtc_regs_t;
+
+typedef struct sdtc_config {
+ uint16 version;
+ uint16 len;
+
+ uint8 sdtc_id; /* 0xFF indicates, all core id's enable */
+ uint8 pad; /* 32 bit alignment */
+ uint16 num_of_registers; /* if no of reg is "0",it will use default SDTC config */
+ sdtc_regs_t regs[1]; /* Array of register list */
+} sdtc_config_t;
+
+typedef struct sdtc_enab_atid_list {
+ uint16 version;
+ uint16 len;
+
+ uint32 sdtc_id_bmp;
+ uint32 sdtc_id_mask;
+} sdtc_enab_atid_list_t;
+
+typedef struct etb_addr_info {
+ uint16 version;
+ uint16 len;
+ uint32 etbinfo_addr; /* etb_info address */
+} etb_addr_info_t;
+
+typedef struct etb_info {
+ uint16 version;
+ uint16 len;
+ uint32 read_write_p; /* read write pointer address */
+ uint8 etb_full; /* status bit */
+ uint8 pad[3]; /* 32bit alignment */
+ uint32 addr; /* H/W Address */
+ uint32 read_bytes; /* Size of ETB(Embedded Trace Buffer) Memory */
+} etb_info_t;
+
+/* This ROAM RSSI limit value is used in order to prune LOW RSSI candidate
+ * for Priority roam and Beacon lost
+ * WLC_E_REASON_DEAUTH, WLC_E_REASON_DISASSOC, WLC_E_REASON_BCNS_LOST
+ */
+#define WLC_ROAM_RSSI_LMT_VER_1 1
+typedef struct wlc_roam_rssi_lmt_info_v1 {
+ int16 rssi_limit_2g;
+ int16 rssi_limit_5g;
+} wlc_roam_rssi_lmt_info_v1_t;
+
+typedef struct wlc_wlc_roam_rssi_limit {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_roam_rssi_limit_t;
+
+#define ROAMRSSI_HDRLEN 4u
+#define ROAMRSSI_2G_DEFAULT -128
+#define ROAMRSSI_2G_MAX -70
+#define ROAMRSSI_2G_MIN -128
+#define ROAMRSSI_5G_DEFAULT -128
+#define ROAMRSSI_5G_MAX -70
+#define ROAMRSSI_5G_MIN -128
+
+#define WLC_ROAM_MIN_DELTA_VER_1 1
+typedef struct wlc_roam_min_score_delta_info_v1 {
+ uint32 roam_min_delta_2g;
+ uint32 roam_min_delta_5g;
+} wlc_roam_min_delta_info_v1_t;
+
+typedef struct wlc_roam_min_delta {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_roam_min_delta_t;
+
+#define ROAM_MIN_DELTA_HDRLEN 4u
+#define ROAM_MIN_DELTA_DEFAULT 0u
+/* MAX score is (RSSI Factor MAX * RSSI weight) + (CU factor MAX * CU weight)
+ * 10000 = (100 * 65) + (100 * 35)
+ */
+#define ROAM_MIN_DELTA_MAX 10000u
+
+/* Beacon mute mitigation specific Macros */
+
+/* Macro flags to identify the specific config commands in IOVAR set operation */
+#define WLC_BCN_MUTE_MITI_CFG_CMD_ENABLE (1u << 0u)
+#define WLC_BCN_MUTE_MITI_CFG_CMD_PM0_PERIOD (1u << 1u)
+#define WLC_BCN_MUTE_MITI_CFG_CMD_MAX_MITI_TIMEOUT (1u << 2u)
+#define WLC_BCN_MUTE_MITI_CFG_CMD_MASK (WLC_BCN_MUTE_MITI_CFG_CMD_ENABLE |\
+ WLC_BCN_MUTE_MITI_CFG_CMD_PM0_PERIOD |\
+ WLC_BCN_MUTE_MITI_CFG_CMD_MAX_MITI_TIMEOUT)
+
+/* Configurable PM0 period range in sec */
+#define WLC_BCN_MUTE_MITI_CFG_PM0_PERIOD_MIN (0u) /* Minimum PM0 periodicity */
+#define WLC_BCN_MUTE_MITI_CFG_PM0_PERIOD_DEFAULT (1u) /* Default PM0 periodicity */
+#define WLC_BCN_MUTE_MITI_CFG_PM0_PERIOD_MAX (10u) /* Maximum PM0 priodicity */
+
+/* Configurable mitigation Timeout range */
+#define WLC_BCN_MUTE_MITI_CFG_TIMEOUT_MIN (30u) /* Minimum Timeout in sec */
+#define WLC_BCN_MUTE_MITI_CFG_TIMEOUT_DEFAULT (60u) /* Default Timeout in sec */
+#define WLC_BCN_MUTE_MITI_CFG_TIMEOUT_MAX (120u) /* Maximum Timeout in sec */
+
+#define WLC_BCN_MUTE_MITI_CMD_VER_1 1u /* Version number for wlc_bcn_mute_miti_cfg_v1 struct */
+typedef struct wlc_bcn_mute_miti_cfg_v1 {
+ uint16 version; /* Structure version */
+ uint16 length; /* Length of whole struct */
+ uint32 cmd_flags; /* Flag bits to Identify configuring command */
+ uint8 is_enabled; /* Feature is enabled or not */
+ uint8 pm0_periodicity; /* Interval between consecutive PM0 during mitigation
+ * period (in sec)
+ */
+ uint16 max_miti_timeout; /* Maximum mitigation timeout in sec to send
+ * Mitigation Timeout event.
+ */
+} wlc_bcn_mute_miti_cfg_v1_t;
+
+#define WLC_BCN_TBTT_CMD_VER_1 (1u) /* Ver num for wlc_bcn_tbtt_cfg struct */
+/* Configurable mitigation Timeout range */
+#define WLC_BCN_TBTT_CFG_EVENT_PERIOD (3u) /* Send event after this value of drift */
+#define WLC_BCN_TBTT_CFG_DRIFT_THRESH (3) /* in ms */
+#define WLC_BCN_TBTT_CFG_JITTER_THRESH (1) /* in ms */
+
+/* Macro flags to identify the specific config commands in IOVAR set operation */
+#define WLC_BCN_DRIFT_BCN_OFFSET (1u << 0u)
+#define WLC_BCN_DRIFT_EVENT_PERIOD (1u << 1u)
+#define WLC_BCN_DRIFT_DRIFT_THRESHOLD (1u << 2u)
+#define WLC_BCN_DRIFT_JITTER_THRESHOLD (1u << 3u)
+
+/* configurable range */
+#define WLC_BCN_DRIFT_BCN_OFFSET_MAX (15u)
+#define WLC_BCN_DRIFT_BCN_OFFSET_MIN (5u)
+
+#define WLC_BCN_DRIFT_DRIFT_THRESHOLD_MAX (10)
+#define WLC_BCN_DRIFT_DRIFT_THRESHOLD_MIN (-4)
+
+#define WLC_BCN_DRIFT_JITTER_THRESHOLD_MAX (2)
+#define WLC_BCN_DRIFT_JITTER_THRESHOLD_MIN (-2)
+
+typedef struct wlc_bcn_tbtt_cfg_v1 {
+ uint16 version; /* Structure version */
+ uint16 length; /* Length of whole struct */
+ uint32 cmd; /* type of cmd */
+ uint8 tbtt_bcn_off; /* num btw (5-15u) for uatbtt */
+ uint8 event_period; /* continous / periodic event */
+ int8 drift_thres; /* drift threshold for event in ms */
+ int8 jitter_thres; /* jitter threshold for event in ms */
+} wlc_bcn_tbtt_cfg_v1_t;
+
+/* SC scan retry config params */
+#define SC_SCAN_RETRY_CFG_VERSION_1 1u
+#define SC_SCAN_RETRY_CFG_VERSION_2 2u
+
+/* Bits indicating which are the valid params in the set command. */
+#define SC_SCAN_RETRY_CFG_PARAMS_THRESHOLD (1u << 0u)
+#define SC_SCAN_RETRY_CFG_PARAMS_MODE (1u << 1u)
+#define SC_SCAN_RETRY_CFG_PARAMS_BTMCRX_WEIGHT (1u << 2u)
+
+#ifndef SC_SCAN_RETRY_CFG_HAS_ALIAS
+#define SC_SCAN_RETRY_CFG_VERSION SC_SCAN_RETRY_CFG_VERSION_1
+#endif
+
+/* Input structure for sc_scan_retry_cfg IOVAR */
+typedef struct sc_scan_retry_cfg_params_v1 {
+ uint16 version; /* config version. */
+ uint16 len; /* Length of this struct. */
+ uint32 set_flag; /* Flag bits to Identify valid param type to be set */
+ uint8 threshold; /* Amount of Tx-Blanking in percentage considered as failed scan. */
+ uint8 scan_mode; /* Scan mode in which scan need to be re-scheduled. */
+ uint8 PAD[2]; /* 32bit alignment */
+} sc_scan_retry_cfg_params_v1_t;
+
+/* Input structure for sc_scan_retry_cfg v2 IOVAR */
+typedef struct sc_scan_retry_cfg_params_v2 {
+ uint16 version; /* config version. */
+ uint16 len; /* Length of this struct. */
+ uint32 set_flag; /* Flag bits to Identify valid param type to
+ * be set.
+ */
+ uint8 threshold; /* Amount of Tx-Blanking + the weighted
+ * BTMC Rx overlap.
+ * in percentage considered as failed scan.
+ */
+ uint8 scan_mode; /* Scan mode in which scan need to be
+ *re-scheduled..
+ */
+ uint8 btmc_rx_overlap_weightage; /* weightage for btmc_rx_overlap duration in % */
+ uint8 PAD; /* 32bit alignment */
+} sc_scan_retry_cfg_params_v2_t;
+
+/* host queries RNG version from 'wl cap' iovar */
+#define BCM_RNG_VERSION_1 1u /* for initial "reseed" version */
+/* Supported entropy size, bytes */
+#define BCM_RNG_ENTROPY_SIZE_BYTES 64u
+
+/* RNG top level command IDs */
+typedef enum wl_rng_cmd_id {
+ WL_RNG_CMD_RESEED = 0u,
+ WL_RNG_CMD_LAST = 1u
+} wl_rng_cmd_id_t;
+
+/* RNG reseed host entropy */
+typedef struct bcm_rng_reseed {
+ uint16 entropy_size; /* host entropy size, bytes */
+ uint8 entropy[]; /* host entropy, flexible array */
+} bcm_rng_reseed_t;
+
+/* RNG IOVAR data */
+typedef struct wl_rng_iovar {
+ uint16 version; /* Version of this structure */
+ uint16 subcmd_id; /* RNG subcmd ID */
+ uint16 pad; /* padding for 32-bit struct alignment */
+ uint16 length; /* Length of data following this length field */
+ union {
+ bcm_rng_reseed_t reseed; /* RNG reseed data */
+ } u;
+} wl_rng_iovar_t;
+
+#ifdef RC2CX
+#define RC2CX_STATUS_VER_1 1
+/* RC2 coex status structures */
+typedef struct wlc_rc2cx_stats_v1 {
+ uint16 version; /* version info */
+ uint8 len; /* status length */
+ uint8 mode; /* RC2 coex mode */
+ uint16 rc2_req_cnt; /* RC2 req number since last read. */
+ uint16 rc2_grant_cnt; /* RC2 grant count since last read. */
+ uint32 rc2_dur; /* RC2 duration since last read, us. */
+ uint16 rc2_succ_pm_prot_cnt; /* RC2 number of successfully acked PM. */
+ uint16 rc2_succ_cts_prot_cnt; /* RC2 number of successfully TXed CTS2A. */
+ uint16 rc2_grant_delay_cnt; /* RC2 grant delay counter, delay > 4.5ms. */
+ uint16 rc2_crit_phycal_cnt; /* RC2 WLAN/BT critical: PHY cal. counter. */
+ uint16 rc2_crit_rate_cnt; /* RC2 WLAN/BT critical: rate recovery counter. */
+ uint16 rc2_crit_bcnloss_cnt; /* RC2 WLAN/BT critical: beacon loss counter. */
+ uint16 rc2_crit_hpp_cnt; /* RC2 WLAN/BT critical: HPP counter. */
+ uint16 rc2_crit_bt_cnt; /* RC2 WLAN/BT critical: BT counter. */
+ uint16 rc2_crit_slotbss_cnt; /* RC2 WLAN/BT critical: AWDL/NAN counter. */
+ uint16 rsvd;
+ uint32 rc2_crit_cnt; /* RC2 WLAN/BT critical counter, aggregate. */
+} wlc_rc2cx_stats_v1_t;
+
+/* Definitions for RC2 coex iovar */
+#define WL_RC2CX_VERSION 1
+
+/* RC2 coex IOV sub command IDs */
+typedef enum rc2cx_cmd_id {
+ WL_RC2CX_CMD_VER = 0, /* RC2CX version sub command */
+ WL_RC2CX_CMD_MODE = 1, /* RC2CX Mode sub command */
+ WL_RC2CX_CMD_PM_PROT = 2, /* RC2CX PM Protection sub command */
+ WL_RC2CX_CMD_PER_CTS = 3 /* RC2CX Periodic CTS sub command */
+} rc2cx_cmd_id_t;
+
+/* first byte of bcm_iov_batch_subcmd.data for the WL_RC2CX_CMD_MODE command */
+#define RC2CX_MODE_TDD 0x01u
+#define RC2CX_MODE_DISABLED 0x00u
+
+/* first byte of bcm_iov_batch_subcmd.data for the WL_RC2CX_CMD_PM_PROT command */
+#define RC2CX_PM_PROT_ENABLED 0x01u
+#define RC2CX_PM_PROT_DISABLED 0x00u
+
+/* first byte of bcm_iov_batch_subcmd.data for the WL_RC2CX_CMD_PER_CTS command */
+#define RC2CX_PER_CTS_ENABLED 0x01u
+#define RC2CX_PER_CTS_DISABLED 0x00u
+
+#define RC2CX_PER_CTS_DENY_BT 0x02u
+#define RC2CX_PER_CTS_GRANT_BT 0x01u
+
+/* payload for the WL_RC2CX_CMD_PER_CTS command */
+typedef struct rc2cx_per_cts_config {
+ uint8 enable_flag; /* 0: feature disabled;
+ * 1: grant BT when granting RC2;
+ * 2: deny BT when granting RC2
+ */
+ uint8 duration_val; /* Must be valid with enable command */
+ uint8 interval_val; /* Must be valid with enable command */
+ uint8 pad;
+} rc2cx_per_cts_config_t;
+
+#endif /* RC2CX */
+
+enum phy_rxgcrs_ed_enhncd_cmd_id {
+ PHY_RXGCRS_ED_ENHNCD_CMD_EN = 1u,
+ PHY_RXGCRS_ED_ENHNCD_CMD_STATUS = 2u,
+ PHY_RXGCRS_ED_ENHNCD_CMD_COREMASK = 3u,
+ PHY_RXGCRS_ED_ENHNCD_CMD_LAST
+};
+
+/* SAE command
+ * Only IOV batching support - see bcmiov.h
+ */
+
+/* SAE command version */
+#define WL_SAE_CMD_VERSION 1
+
+/* SAE sub-commands */
+typedef enum sae_cmd_id {
+ WL_SAE_CMD_AP_SESSION_HOLD_TIME = 1, /* AP rate limit session hold time
+ * Data: uint32,
+ * miliseconds
+ */
+ WL_SAE_CMD_AP_MAX_ACTIVE_SESSIONS = 2, /* AP max sessions
+ * Data:
+ * uint32
+ */
+} sae_cmd_id_t;
+
+/* Frameburst COT IOVAR data */
+#define WL_FRAMEBURST_COT_VERSION 1u /* current version of frameburst_cot structure */
+#define WL_FRAMEBURST_MAX_AC 4u
+typedef struct frameburst_cot {
+ uint16 version; /* version of frameburst_cot_t structure */
+ uint16 length;
+ chanspec_t chspec;
+ uint16 pad; /* padding */
+ uint16 fbcot[WL_FRAMEBURST_MAX_AC]; /* per AC (BK, BE, VI, and VO) in us units */
+} frameburst_cot_t;
+
+typedef enum {
+ BCM_TRACE_VER = 1,
+ BCM_TRACE_ENAB = 2,
+ BCM_TRACE_EVENT_ENAB = 3
+} bcm_trace_sub_cmds_t;
+
+/* Trace events
+ * Max trace event allowed = 255
+ */
+enum {
+ BCM_TRACE_E_INVALID = 0,
+ BCM_TRACE_E_WBUS = 1,
+ BCM_TRACE_E_MSCH = 2,
+ BCM_TRACE_E_SC = 3,
+ BCM_TRACE_E_SCAN = 4,
+ BCM_TRACE_E_LAST
+};
+
+#define BCM_TRACE_VERSION_1 1u
+
+typedef struct bcm_trace_event_enab_v1 {
+ uint8 version;
+ uint8 event;
+ uint8 enab;
+ uint8 pad[1];
+} bcm_trace_event_enab_v1_t;
+
+/* rate_info command version */
+#define WL_RATE_INFO_VERSION 1
+typedef struct wl_rate_info {
+ uint16 version; /**< structure version */
+ uint16 length; /**< length of this struct */
+ uint32 mode_tx_rate; /**< the most used tx rate in tx_rate_histo */
+ uint32 mode_rx_rate; /**< the most used rx rate in rx_rate_histo */
+} wl_rate_info_t;
+
+/* "rng_test" IOVAR param */
+typedef struct {
+ uint32 rounds_no; /* IN number of generate cycles */
+ uint32 gen_no; /* IN number of buffers per cycle */
+ uint32 time_max; /* OUT max time of one cycle, us */
+ uint32 time_min; /* OUT min time of one cycle, us */
+ uint32 time_aver; /* OUT time of all cycles, us */
+} rng_test_t;
+
+/* latency_critical_data mode to reduce a latency */
+typedef enum {
+ LATENCY_CRT_DATA_MODE_OFF = 0, /* Turn off */
+ LATENCY_CRT_DATA_MODE_1 = 1u, /* Remap BLE scan window size */
+ LATENCY_CRT_DATA_MODE_2 = 2u, /* Remap BLE scan window size +
+ * Prevent full roam scan
+ */
+ LATENCY_CRT_DATA_MODE_LAST
+} latency_crt_mode_t;
+
+typedef struct wl_ext_auth_evt {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ unsigned int key_mgmt_suite;
+ int status;
+} wl_ext_auth_evt_t;
+
+#define WL_AUTH_START_EVT_V1 1u
+typedef struct wl_auth_start_evt {
+ uint16 version;
+ uint16 len;
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ uint8 PAD[2];
+ uint32 key_mgmt_suite;
+ uint8 opt_tlvs[];
+} wl_auth_start_evt_t;
+#endif /* _wlioctl_h_ */
diff --git a/bcmdhd.101.10.361.x/include/wlioctl_defs.h b/bcmdhd.101.10.361.x/include/wlioctl_defs.h
new file mode 100755
index 0000000..6ab03aa
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wlioctl_defs.h
@@ -0,0 +1,2514 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef wlioctl_defs_h
+#define wlioctl_defs_h
+
+#ifdef EFI
+/*
+ * This is the Broadcom-specific guid selector for IOCTL handler in the 80211 Protocol
+ * define for EFI. However, we use last 4 nibbles to communicate 'cmd' from tool to
+ * driver.
+ */
+#define BCMWL_IOCTL_GUID \
+ {0xB4910A35, 0x88C5, 0x4328, { 0x90, 0x08, 0x9F, 0xB2, 0x00, 0x00, 0x0, 0x0 } }
+#endif /* EFI */
+/* All builds use the new 11ac ratespec/chanspec */
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#ifndef USE_NEW_RSPEC_DEFS
+/* Remove when no referencing branches exist.
+ * These macros will be used only in older branches (prior to K branch).
+ * Wl layer in newer branches and trunk use those defined in bcmwifi_rspec.h.
+ * Non-wl layer in newer branches and trunk may use these as well
+ * until they are removed.
+ */
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK 0x00000300
+#define WL_RSPEC_TXEXP_SHIFT 8
+#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */
+#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */
+#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ 0x00010000
+#define WL_RSPEC_BW_40MHZ 0x00020000
+#define WL_RSPEC_BW_80MHZ 0x00030000
+#define WL_RSPEC_BW_160MHZ 0x00040000
+#define WL_RSPEC_BW_10MHZ 0x00050000
+#define WL_RSPEC_BW_5MHZ 0x00060000
+#define WL_RSPEC_BW_2P5MHZ 0x00070000
+
+#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
+
+#ifndef OEM_ANDROID
+/* 'proprietary' string should not exist in open source(OEM_ANDROID) */
+/* given a proprietary MCS, get number of spatial streams */
+#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+
+#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \
+ : ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
+#endif /* !OEM_ANDROID */
+#endif /* !USE_NEW_RSPEC_DEFS */
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */
+#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI 0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */
+#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */
+#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */
+#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */
+
+#define WLC_11N_N_PROP_MCS 6 /* number of proprietary 11n MCS'es */
+#define WLC_11N_FIRST_PROP_MCS 87 /* first Broadcom proprietary MCS */
+#define WLC_11N_LAST_PROP_MCS 102
+
+#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS 1 /* CCA keeps this many seconds history - trimmed for dongle */
+
+#define IBSS_MED 15 /* Mediom in-bss congestion percentage */
+#define IBSS_HI 25 /* Hi in-bss congestion percentage */
+#define OBSS_MED 12
+#define OBSS_HI 25
+#define INTERFER_MED 5
+#define INTERFER_HI 10
+
+#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */
+#define CCA_FLAGS_PREFER_1_6_11 0x10
+#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exclude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */
+
+#define WL_STA_AID(a) ((a) &~ 0xc000)
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM 0x00000001 /* Running a Broadcom driver */
+#define WL_STA_WME 0x00000002 /* WMM association */
+#define WL_STA_NONERP 0x00000004 /* No ERP */
+#define WL_STA_AUTHE 0x00000008 /* Authenticated */
+#define WL_STA_ASSOC 0x00000010 /* Associated */
+#define WL_STA_AUTHO 0x00000020 /* Authorized */
+#define WL_STA_WDS 0x00000040 /* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */
+#define WL_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP 0x00002000 /* STA 802.11n capable */
+#define WL_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define WL_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */
+#define WL_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */
+#define WL_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */
+#define WL_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */
+#define WL_STA_RIFS_CAP 0x00080000 /* rifs enabled */
+#define WL_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */
+#define WL_STA_WPS 0x00200000 /* WPS state */
+#define WL_STA_DWDS_CAP 0x01000000 /* DWDS CAP */
+#define WL_STA_DWDS 0x02000000 /* DWDS active */
+#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */
+#define WL_STA_IS_2G 0x04000000 /* 2G channels supported */
+#define WL_STA_IS_5G 0x08000000 /* 5G channels supported */
+#define WL_STA_IS_6G 0x10000000 /* 6G channels supported */
+
+/* STA HT cap fields */
+#define WL_STA_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */
+#define WL_STA_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define WL_STA_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */
+#define WL_STA_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */
+#define WL_STA_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */
+#define WL_STA_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */
+#define WL_STA_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */
+#define WL_STA_CAP_GF 0x0010 /* Greenfield preamble support */
+#define WL_STA_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */
+#define WL_STA_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */
+#define WL_STA_CAP_TX_STBC 0x0080 /* Tx STBC support */
+#define WL_STA_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */
+#define WL_STA_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */
+#define WL_STA_CAP_DELAYED_BA 0x0400 /* delayed BA support */
+#define WL_STA_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */
+#define WL_STA_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */
+#define WL_STA_CAP_PSMP 0x2000 /* Power Save Multi Poll support */
+#define WL_STA_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */
+#define WL_STA_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */
+
+#define WL_STA_CAP_RX_STBC_NO 0x0 /* no rx STBC support */
+#define WL_STA_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */
+#define WL_STA_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */
+#define WL_STA_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */
+
+/* scb vht flags */
+#define WL_STA_VHT_LDPCCAP 0x0001
+#define WL_STA_SGI80 0x0002
+#define WL_STA_SGI160 0x0004
+#define WL_STA_VHT_TX_STBCCAP 0x0008
+#define WL_STA_VHT_RX_STBCCAP 0x0010
+#define WL_STA_SU_BEAMFORMER 0x0020
+#define WL_STA_SU_BEAMFORMEE 0x0040
+#define WL_STA_MU_BEAMFORMER 0x0080
+#define WL_STA_MU_BEAMFORMEE 0x0100
+#define WL_STA_VHT_TXOP_PS 0x0200
+#define WL_STA_HTC_VHT_CAP 0x0400
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+#define WL_IOCTL_ACTION_GET 0x0
+#define WL_IOCTL_ACTION_SET 0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
+#define WL_IOCTL_ACTION_OVL_RSV 0x20
+#define WL_IOCTL_ACTION_OVL 0x40
+#define WL_IOCTL_ACTION_MASK 0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT 1
+
+/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_ANY 2 /* deprecated */
+#define WL_BSSTYPE_MESH 3
+
+/* Bit definitions of mws_active_scan_throttle iovar */
+
+#define WL_SCAN_THROTTLE_MASK 0xF
+
+#define WL_SCAN_THROTTLE_ASSOCSCAN (1U << 0)
+#define WL_SCAN_THROTTLE_ROAMSCAN (1U << 1)
+#define WL_SCAN_THROTTLE_OTHER_FW_SCAN (1U << 2) /* for other scans like pno etc */
+#define WL_SCAN_THROTTLE_HOSTSCAN (1U << 3)
+
+/* Mask bit for Assoc scan, Roam scan, Other FW scan, Host scan bit defines */
+#define WL_SCANFLAGS_CLIENT_MASK 0xF00u
+#define WL_SCANFLAGS_CLIENT_SHIFT 8
+
+/* Mask bit for LOW power scan, High accuracy scan, LOW span scan bit defines */
+#define WL_SCANFLAGS_SCAN_MODE_MASK 0x7000u
+#define WL_SCANFLAGS_SCAN_MODE_SHIFT 12u
+
+/* Bitmask for scan_type */
+/* Reserved flag precludes the use of 0xff for scan_type which is
+ * interpreted as default for backward compatibility.
+ * Low priority scan uses currently reserved bit,
+ * this should be changed as scan_type extended.
+ * So, reserved flag definition removed.
+ */
+/* Use lower 16 bit for scan flags, the upper 16 bits are for internal use */
+#define WL_SCANFLAGS_PASSIVE 0x01U /* force passive scan */
+#define WL_SCANFLAGS_LOW_PRIO 0x02U /* Low priority scan */
+#define WL_SCANFLAGS_PROHIBITED 0x04U /* allow scanning prohibited channels */
+#define WL_SCANFLAGS_OFFCHAN 0x08U /* allow scanning/reporting off-channel APs */
+#define WL_SCANFLAGS_HOTSPOT 0x10U /* automatic ANQP to hotspot APs */
+#define WL_SCANFLAGS_SWTCHAN 0x20U /* Force channel switch for differerent bandwidth */
+#define WL_SCANFLAGS_FORCE_PARALLEL 0x40U /* Force parallel scan even when actcb_fn_t is on.
+ * by default parallel scan will be disabled if actcb_fn_t
+ * is provided.
+ */
+#define WL_SCANFLAGS_SISO 0x40U /* Use 1 RX chain for scanning */
+#define WL_SCANFLAGS_MIMO 0x80U /* Force MIMO scanning */
+#define WL_SCANFLAGS_ASSOCSCAN 0x100U /* Assoc scan */
+#define WL_SCANFLAGS_ROAMSCAN 0x200U /* Roam scan */
+#define WL_SCANFLAGS_FWSCAN 0x400U /* Other FW scan */
+#define WL_SCANFLAGS_HOSTSCAN 0x800U /* Host scan */
+#define WL_SCANFLAGS_LOW_POWER_SCAN 0x1000U /* LOW power scan, scheduled scan
+ * only on scancore
+ */
+#define WL_SCANFLAGS_HIGH_ACCURACY 0x2000U /* High accuracy scan, which needs
+ * reliable scan results
+ */
+#define WL_SCANFLAGS_LOW_SPAN 0x4000U /* LOW span scan, which expects
+ * scan to be completed ASAP
+ */
+#define WL_SCANFLAGS_LISTEN 0x8000U /* Listen option in escan
+ * enable LISTEN along with PASSIVE flag
+ */
+
+/* BIT MASK for SSID TYPE */
+#define WL_SCAN_SSIDFLAGS_SHORT_SSID 0x01U /* Use as Regular SSID */
+
+/* Value to decide scan type based on scqs */
+#define WL_SC_RETRY_SCAN_MODE_NO_SCAN 0x0u /* Do not reschedule scan */
+#define WL_SC_RETRY_SCAN_MODE_HIGH_ACC 0x1u /* Reschedule scan as HighAccuracy */
+#define WL_SC_RETRY_SCAN_MODE_LOWPOWER 0x2u /* Reschedule scan as LOWPOWER */
+#define WL_SC_RETRY_SCAN_MODE_AUTO 0x3u /* Scan rescheduling type is decided
+ * dynamically.
+ */
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM 4
+
+/* Flags for parallel scan */
+/* Bitmap to enable/disable rsdb parallel scan, 5g-5g/2g-2g parallel scan
+ * SCAN_PARALLEL_PASSIVE_5G ==> 5g-5g parallel scan
+ * SCAN_PARALLEL_PASSIVE_2G ==> 2g-2g parallel scan
+ */
+#define SCAN_PARALLEL_PASSIVE_5G (0x40)
+#define SCAN_PARALLEL_PASSIVE_2G (0x80)
+
+#define SCANOL_ENABLED (1 << 0)
+#define SCANOL_BCAST_SSID (1 << 1)
+#define SCANOL_NOTIFY_BCAST_SSID (1 << 2)
+#define SCANOL_RESULTS_PER_CYCLE (1 << 3)
+
+/* scan times in milliseconds */
+#define SCANOL_HOME_TIME 45 /* for home channel processing */
+#define SCANOL_ASSOC_TIME 20 /* dwell on a channel while associated */
+#define SCANOL_UNASSOC_TIME 40 /* dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME 110 /* listen on a channelfor passive scan */
+#define SCANOL_AWAY_LIMIT 100 /* max time to be away from home channel */
+#define SCANOL_IDLE_REST_TIME 40
+#define SCANOL_IDLE_REST_MULTIPLIER 0
+#define SCANOL_ACTIVE_REST_TIME 20
+#define SCANOL_ACTIVE_REST_MULTIPLIER 0
+#define SCANOL_CYCLE_IDLE_REST_TIME 300000 /* Idle Rest Time between Scan Cycle (msec) */
+#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER 0 /* Idle Rest Time Multiplier */
+#define SCANOL_CYCLE_ACTIVE_REST_TIME 200
+#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER 0
+#define SCANOL_MAX_REST_TIME 3600000 /* max rest time between scan cycle (msec) */
+#define SCANOL_CYCLE_DEFAULT 0 /* default for Max Scan Cycle, 0 = forever */
+#define SCANOL_CYCLE_MAX 864000 /* Max Scan Cycle */
+ /* 10 sec/scan cycle => 100 days */
+#define SCANOL_NPROBES 2 /* for Active scan; send n probes on each channel */
+#define SCANOL_NPROBES_MAX 5 /* for Active scan; send n probes on each channel */
+#define SCANOL_SCAN_START_DLY 10 /* delay start of offload scan (sec) */
+#define SCANOL_SCAN_START_DLY_MAX 240 /* delay start of offload scan (sec) */
+#define SCANOL_MULTIPLIER_MAX 10 /* Max Multiplier */
+#define SCANOL_UNASSOC_TIME_MAX 100 /* max dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME_MAX 500 /* max listen on a channel for passive scan */
+#define SCANOL_SSID_MAX 16 /* max supported preferred SSID */
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START 1
+#define WL_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+#if defined(SIMPLE_ISCAN)
+#define ISCAN_RETRY_CNT 5
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+#define ISCAN_STATE_PENDING 2
+#endif /* SIMPLE_ISCAN */
+
+#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
+#define ANTENNA_NUM_2 2
+#define ANTENNA_NUM_3 3
+#define ANTENNA_NUM_4 4
+
+#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE 0 /* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED 1 /* interference detected */
+#define ITFR_HOME_CHANNEL 2 /* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */
+
+#define WL_NUM_RPI_BINS 8
+#define WL_RM_TYPE_BASIC 1
+#define WL_RM_TYPE_CCA 2
+#define WL_RM_TYPE_RPI 3
+#define WL_RM_TYPE_ABORT -1 /* ABORT any in-progress RM request */
+
+#define WL_RM_FLAG_PARALLEL (1<<0)
+
+#define WL_RM_FLAG_LATE (1<<1)
+#define WL_RM_FLAG_INCAPABLE (1<<2)
+#define WL_RM_FLAG_REFUSED (1<<3)
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#define WLC_CIS_DEFAULT 0 /* built-in default */
+#define WLC_CIS_SROM 1 /* source is sprom */
+#define WLC_CIS_OTP 2 /* source is otp */
+
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON 1 /* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
+#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */
+/* value >= 0 causes
+ * - input to be set to that value
+ * - PCL to be off
+ */
+
+#define BCM_MAC_STATUS_INDICATION (0x40010200L)
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC 0xa5a5
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC 0x14e46c77
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info received on channel (vs offchannel) */
+#define WL_BSS_FLAGS_HS20 0x08 /* hotspot 2.0 capable */
+#define WL_BSS_FLAGS_RSSI_INVALID 0x10 /* BSS contains invalid RSSI */
+#define WL_BSS_FLAGS_RSSI_INACCURATE 0x20 /* BSS contains inaccurate RSSI */
+#define WL_BSS_FLAGS_SNR_INVALID 0x40 /* BSS contains invalid SNR */
+#define WL_BSS_FLAGS_NF_INVALID 0x80 /* BSS contains invalid noise floor */
+
+/* bit definitions for bcnflags in wl_bss_info */
+#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */
+#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */
+#define WL_BSS_BCNFLAGS_MULTIPLE_BSSID_SET 0x4 /* this AP belongs to a multiple BSSID set */
+#define WL_BSS_BCNFLAGS_NONTRANSMITTED_BSSID 0x8 /* this AP is the transmitted BSSID */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ 0x00000100
+#define VHT_BI_80MHZ 0x00000200
+#define VHT_BI_160MHZ 0x00000400
+#define VHT_BI_8080MHZ 0x00000800
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype set /* subtype param */
+#define ioctl_pid used /* pid param */
+#define ioctl_status needed /* status param */
+
+/* Enumerate crypto algorithms */
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_OCB_MSDU 5
+#define CRYPTO_ALGO_AES_OCB_MPDU 6
+#if !defined(BCMCCX) && !defined(BCMEXTCCX)
+#define CRYPTO_ALGO_NALG 7
+#else
+#define CRYPTO_ALGO_CKIP 7
+#define CRYPTO_ALGO_CKIP_MMH 8
+#define CRYPTO_ALGO_WEP_MMH 9
+#define CRYPTO_ALGO_NALG 10
+#endif /* !BCMCCX && !BCMEXTCCX */
+
+#define CRYPTO_ALGO_SMS4 11
+#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
+#define CRYPTO_ALGO_BIP 13 /* 802.11w BIP (aes cmac) */
+
+#define CRYPTO_ALGO_AES_GCM 14 /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256 15 /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256 16 /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17 /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */
+
+#define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF
+
+/* algo bit vector */
+#define KEY_ALGO_MASK(_algo) (1 << _algo)
+
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define KEY_ALGO_MASK_CCX (KEY_ALGO_MASK(CRYPTO_ALGO_CKIP) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_CKIP_MMH) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_WEP_MMH))
+#endif /* defined(BCMCCX) || defined(BCMEXTCCX) */
+
+#define KEY_ALGO_MASK_WEP (KEY_ALGO_MASK(CRYPTO_ALGO_WEP1) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_WEP128) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_NALG))
+
+#define KEY_ALGO_MASK_AES (KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM256) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM) | \
+ KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256))
+#define KEY_ALGO_MASK_TKIP (KEY_ALGO_MASK(CRYPTO_ALGO_TKIP))
+#define KEY_ALGO_MASK_WAPI (KEY_ALGO_MASK(CRYPTO_ALGO_SMS4))
+
+#define WSEC_GEN_MIC_ERROR 0x0001
+#define WSEC_GEN_REPLAY 0x0002
+#define WSEC_GEN_ICV_ERROR 0x0004
+#define WSEC_GEN_MFP_ACT_ERROR 0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020
+
+#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define WL_CKIP_KP (1 << 4) /* CMIC */
+#define WL_CKIP_MMH (1 << 5) /* CKIP */
+#else
+#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
+#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
+#endif /* BCMCCX || BCMEXTCCX */
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
+#define WL_LINK_KEY (1 << 7) /* For linking keys of both cores */
+#define WL_UNLINK_KEY (1 << 8) /* For unlinking keys of both cores */
+
+/* wireless security bitvec */
+#define WSEC_NONE 0x0
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#ifdef BCMCCX
+#define CKIP_KP_ENABLED 0x0010
+#define CKIP_MIC_ENABLED 0x0020
+#endif /* BCMCCX */
+#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
+#ifdef WLFIPS
+#define FIPS_ENABLED 0x0080
+#endif /* WLFIPS */
+
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED 0x0100
+#endif /* BCMWAPI_WPI */
+
+/* wsec macros for operating on the above definitions */
+#ifdef WLWSEC
+#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
+#else /* WLWSEC */
+#define WSEC_WEP_ENABLED(wsec) NULL
+#define WSEC_TKIP_ENABLED(wsec) NULL
+#define WSEC_AES_ENABLED(wsec) NULL
+#endif /* WLWSEC */
+
+/* Macros to check if algorithm is enabled */
+#define WSEC_INFO_ALGO_ENABLED(_wi, _algo) \
+ (_wi).cur_algos & (1 << CRYPTO_ALGO_##_algo)
+
+#define WSEC_INFO_ALGO_NONE(_wi) (((_wi).cur_algos) == 0)
+
+#ifdef WLWSEC
+#ifdef BCMCCX
+#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED)
+#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED)
+#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) \
+ ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \
+ CKIP_MIC_ENABLED | SMS4_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+#ifndef BCMWAPI_WPI /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) \
+ ((wsec) & \
+ (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
+#endif /* BCMWAPI_WPI */
+#else /* defined BCMCCX */
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+#ifndef BCMWAPI_WPI /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#endif /* BCMCCX */
+#else /* WLWSEC */
+#define WSEC_ENABLED(wsec) 0
+#endif /* WLWSEC */
+
+#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
+
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+/* Following macros are not used any more. Just kept here to
+ * avoid build issue in BISON/CARIBOU branch
+ */
+#define MFP_CAPABLE 0x0200
+#define MFP_REQUIRED 0x0400
+#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM 0x0008 /* CCKM */
+#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
+#endif /* BCMCCX || BCMEXTCCX */
+/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
+#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI 0x0400 /* why it is same as WAPI_AUTH_UNSPECIFIED */
+#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
+#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
+#define WPA2_AUTH_FILS_SHA256 0x10000 /* FILS with SHA256 key derivation */
+#define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */
+#define WPA2_AUTH_IS_FILS(auth) ((auth) & (WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FILS_SHA384))
+#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
+#define WPA3_AUTH_DPP_AKM 0x80000 /* Device Provisioning Protocol (DPP) */
+#define WPA3_AUTH_OWE 0x100000 /* OWE */
+#define WPA3_AUTH_1X_SUITE_B_SHA256 0x200000 /* Suite B SHA256 */
+#define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */
+#define WPA3_AUTH_PSK_SHA384 0x800000 /* PSK with SHA384 key derivation */
+#define WPA3_AUTH_SAE_AP_ONLY 0x1000000 /* SAE restriction to connect to pure SAE APs */
+/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */
+#define WPA2_AUTH_SHA256 0x8000
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+/* pmkid */
+#define MAXPMKID 16 /* max # PMKID cache entries NDIS */
+
+#ifdef MACOSX
+/* Macos limits ioctl maxlen to 2k */
+#define WLC_IOCTL_MAXLEN 2048 /* max length ioctl buffer required */
+#else
+/* SROM12 changes */
+#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#endif /* MACOSX */
+
+#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN 1896 /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
+#endif
+#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192
+
+#define WLC_IOCTL_NANRESP_MAXLEN 4096u /* "max" length nan ioctl resp buffer required */
+#define WLC_IOCTL_NANRESP_MEDLEN 800u /* "med" length nan ioctl resp buffer required */
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC 0
+#define WLC_GET_VERSION 1
+#define WLC_UP 2
+#define WLC_DOWN 3
+#define WLC_GET_LOOP 4
+#define WLC_SET_LOOP 5
+#define WLC_DUMP 6
+#define WLC_GET_MSGLEVEL 7
+#define WLC_SET_MSGLEVEL 8
+#define WLC_GET_PROMISC 9
+#define WLC_SET_PROMISC 10
+/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */
+#define WLC_GET_RATE 12
+#define WLC_GET_MAX_RATE 13
+#define WLC_GET_INSTANCE 14
+/* #define WLC_GET_FRAG 15 */ /* no longer supported */
+/* #define WLC_SET_FRAG 16 */ /* no longer supported */
+/* #define WLC_GET_RTS 17 */ /* no longer supported */
+/* #define WLC_SET_RTS 18 */ /* no longer supported */
+#define WLC_GET_INFRA 19
+#define WLC_SET_INFRA 20
+#define WLC_GET_AUTH 21
+#define WLC_SET_AUTH 22
+#define WLC_GET_BSSID 23
+#define WLC_SET_BSSID 24
+#define WLC_GET_SSID 25
+#define WLC_SET_SSID 26
+#define WLC_RESTART 27
+#define WLC_TERMINATED 28
+/* #define WLC_DUMP_SCB 28 */ /* no longer supported */
+#define WLC_GET_CHANNEL 29
+#define WLC_SET_CHANNEL 30
+#define WLC_GET_SRL 31
+#define WLC_SET_SRL 32
+#define WLC_GET_LRL 33
+#define WLC_SET_LRL 34
+#define WLC_GET_PLCPHDR 35
+#define WLC_SET_PLCPHDR 36
+#define WLC_GET_RADIO 37
+#define WLC_SET_RADIO 38
+#define WLC_GET_PHYTYPE 39
+#define WLC_DUMP_RATE 40
+#define WLC_SET_RATE_PARAMS 41
+#define WLC_GET_FIXRATE 42
+#define WLC_SET_FIXRATE 43
+/* #define WLC_GET_WEP 42 */ /* no longer supported */
+/* #define WLC_SET_WEP 43 */ /* no longer supported */
+#define WLC_GET_KEY 44
+#define WLC_SET_KEY 45
+#define WLC_GET_REGULATORY 46
+#define WLC_SET_REGULATORY 47
+#define WLC_GET_PASSIVE_SCAN 48
+#define WLC_SET_PASSIVE_SCAN 49
+#define WLC_SCAN 50
+#define WLC_SCAN_RESULTS 51
+#define WLC_DISASSOC 52
+#define WLC_REASSOC 53
+#define WLC_GET_ROAM_TRIGGER 54
+#define WLC_SET_ROAM_TRIGGER 55
+#define WLC_GET_ROAM_DELTA 56
+#define WLC_SET_ROAM_DELTA 57
+#define WLC_GET_ROAM_SCAN_PERIOD 58
+#define WLC_SET_ROAM_SCAN_PERIOD 59
+#define WLC_EVM 60 /* diag */
+#define WLC_GET_TXANT 61
+#define WLC_SET_TXANT 62
+#define WLC_GET_ANTDIV 63
+#define WLC_SET_ANTDIV 64
+/* #define WLC_GET_TXPWR 65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR 66 */ /* no longer supported */
+#define WLC_GET_CLOSED 67
+#define WLC_SET_CLOSED 68
+#define WLC_GET_MACLIST 69
+#define WLC_SET_MACLIST 70
+#define WLC_GET_RATESET 71
+#define WLC_SET_RATESET 72
+/* #define WLC_GET_LOCALE 73 */ /* no longer supported */
+#define WLC_LONGTRAIN 74
+#define WLC_GET_BCNPRD 75
+#define WLC_SET_BCNPRD 76
+#define WLC_GET_DTIMPRD 77
+#define WLC_SET_DTIMPRD 78
+#define WLC_GET_SROM 79
+#define WLC_SET_SROM 80
+#define WLC_GET_WEP_RESTRICT 81
+#define WLC_SET_WEP_RESTRICT 82
+#define WLC_GET_COUNTRY 83
+#define WLC_SET_COUNTRY 84
+#define WLC_GET_PM 85
+#define WLC_SET_PM 86
+#define WLC_GET_WAKE 87
+#define WLC_SET_WAKE 88
+/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK 90 /* ndis only */
+#define WLC_SET_FORCELINK 91 /* ndis only */
+#define WLC_FREQ_ACCURACY 92 /* diag */
+#define WLC_CARRIER_SUPPRESS 93 /* diag */
+#define WLC_GET_PHYREG 94
+#define WLC_SET_PHYREG 95
+#define WLC_GET_RADIOREG 96
+#define WLC_SET_RADIOREG 97
+#define WLC_GET_REVINFO 98
+#define WLC_GET_UCANTDIV 99
+#define WLC_SET_UCANTDIV 100
+#define WLC_R_REG 101
+#define WLC_W_REG 102
+/* #define WLC_DIAG_LOOPBACK 103 old tray diag */
+/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE 105
+#define WLC_SET_MACMODE 106
+#define WLC_GET_MONITOR 107
+#define WLC_SET_MONITOR 108
+#define WLC_GET_GMODE 109
+#define WLC_SET_GMODE 110
+#define WLC_GET_LEGACY_ERP 111
+#define WLC_SET_LEGACY_ERP 112
+#define WLC_GET_RX_ANT 113
+#define WLC_GET_CURR_RATESET 114 /* current rateset */
+#define WLC_GET_SCANSUPPRESS 115
+#define WLC_SET_SCANSUPPRESS 116
+#define WLC_GET_AP 117
+#define WLC_SET_AP 118
+#define WLC_GET_EAP_RESTRICT 119
+#define WLC_SET_EAP_RESTRICT 120
+#define WLC_SCB_AUTHORIZE 121
+#define WLC_SCB_DEAUTHORIZE 122
+#define WLC_GET_WDSLIST 123
+#define WLC_SET_WDSLIST 124
+#define WLC_GET_ATIM 125
+#define WLC_SET_ATIM 126
+#define WLC_GET_RSSI 127
+#define WLC_GET_PHYANTDIV 128
+#define WLC_SET_PHYANTDIV 129
+#define WLC_AP_RX_ONLY 130
+#define WLC_GET_TX_PATH_PWR 131
+#define WLC_SET_TX_PATH_PWR 132
+#define WLC_GET_WSEC 133
+#define WLC_SET_WSEC 134
+#define WLC_GET_PHY_NOISE 135
+#define WLC_GET_BSS_INFO 136
+#define WLC_GET_PKTCNTS 137
+#define WLC_GET_LAZYWDS 138
+#define WLC_SET_LAZYWDS 139
+#define WLC_GET_BANDLIST 140
+#define WLC_GET_BAND 141
+#define WLC_SET_BAND 142
+#define WLC_SCB_DEAUTHENTICATE 143
+#define WLC_GET_SHORTSLOT 144
+#define WLC_GET_SHORTSLOT_OVERRIDE 145
+#define WLC_SET_SHORTSLOT_OVERRIDE 146
+#define WLC_GET_SHORTSLOT_RESTRICT 147
+#define WLC_SET_SHORTSLOT_RESTRICT 148
+#define WLC_GET_GMODE_PROTECTION 149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
+#define WLC_UPGRADE 152
+/* #define WLC_GET_MRATE 153 */ /* no longer supported */
+/* #define WLC_SET_MRATE 154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS 155
+#define WLC_SET_IGNORE_BCNS 156
+#define WLC_GET_SCB_TIMEOUT 157
+#define WLC_SET_SCB_TIMEOUT 158
+#define WLC_GET_ASSOCLIST 159
+#define WLC_GET_CLK 160
+#define WLC_SET_CLK 161
+#define WLC_GET_UP 162
+#define WLC_OUT 163
+#define WLC_GET_WPA_AUTH 164
+#define WLC_SET_WPA_AUTH 165
+#define WLC_GET_UCFLAGS 166
+#define WLC_SET_UCFLAGS 167
+#define WLC_GET_PWRIDX 168
+#define WLC_SET_PWRIDX 169
+#define WLC_GET_TSSI 170
+#define WLC_GET_SUP_RATESET_OVERRIDE 171
+#define WLC_SET_SUP_RATESET_OVERRIDE 172
+/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL 178
+#define WLC_SET_PROTECTION_CONTROL 179
+#define WLC_GET_PHYLIST 180
+#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
+#define WLC_DECRYPT_STATUS 182 /* ndis only */
+#define WLC_GET_KEY_SEQ 183
+#define WLC_GET_SCAN_CHANNEL_TIME 184
+#define WLC_SET_SCAN_CHANNEL_TIME 185
+#define WLC_GET_SCAN_UNASSOC_TIME 186
+#define WLC_SET_SCAN_UNASSOC_TIME 187
+#define WLC_GET_SCAN_HOME_TIME 188
+#define WLC_SET_SCAN_HOME_TIME 189
+#define WLC_GET_SCAN_NPROBES 190
+#define WLC_SET_SCAN_NPROBES 191
+#define WLC_GET_PRB_RESP_TIMEOUT 192
+#define WLC_SET_PRB_RESP_TIMEOUT 193
+#define WLC_GET_ATTEN 194
+#define WLC_SET_ATTEN 195
+#define WLC_GET_SHMEM 196 /* diag */
+#define WLC_SET_SHMEM 197 /* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST 200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define WLC_TKIP_COUNTERMEASURES 202
+#define WLC_GET_PIOMODE 203
+#define WLC_SET_PIOMODE 204
+#define WLC_SET_ASSOC_PREFER 205
+#define WLC_GET_ASSOC_PREFER 206
+#define WLC_SET_ROAM_PREFER 207
+#define WLC_GET_ROAM_PREFER 208
+/* #define WLC_SET_LED 209 */ /* no longer supported */
+/* #define WLC_GET_LED 210 */ /* no longer supported */
+#define WLC_GET_INTERFERENCE_MODE 211
+#define WLC_SET_INTERFERENCE_MODE 212
+#define WLC_GET_CHANNEL_QA 213
+#define WLC_START_CHANNEL_QA 214
+#define WLC_GET_CHANNEL_SEL 215
+#define WLC_START_CHANNEL_SEL 216
+#define WLC_GET_VALID_CHANNELS 217
+#define WLC_GET_FAKEFRAG 218
+#define WLC_SET_FAKEFRAG 219
+#define WLC_GET_PWROUT_PERCENTAGE 220
+#define WLC_SET_PWROUT_PERCENTAGE 221
+#define WLC_SET_BAD_FRAME_PREEMPT 222
+#define WLC_GET_BAD_FRAME_PREEMPT 223
+#define WLC_SET_LEAP_LIST 224
+#define WLC_GET_LEAP_LIST 225
+#define WLC_GET_CWMIN 226
+#define WLC_SET_CWMIN 227
+#define WLC_GET_CWMAX 228
+#define WLC_SET_CWMAX 229
+#define WLC_GET_WET 230
+#define WLC_SET_WET 231
+#define WLC_GET_PUB 232
+/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY 235
+#define WLC_SET_KEY_PRIMARY 236
+
+/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS 238
+#define WLC_SET_ACI_ARGS 239
+#define WLC_UNSET_CALLBACK 240
+#define WLC_SET_CALLBACK 241
+#define WLC_GET_RADAR 242
+#define WLC_SET_RADAR 243
+#define WLC_SET_SPECT_MANAGMENT 244
+#define WLC_GET_SPECT_MANAGMENT 245
+#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP 247
+#define WLC_SET_CS_SCAN_TIMER 248
+#define WLC_GET_CS_SCAN_TIMER 249
+#define WLC_MEASURE_REQUEST 250
+#define WLC_INIT 251
+#define WLC_SEND_QUIET 252
+#define WLC_KEEPALIVE 253
+#define WLC_SEND_PWR_CONSTRAINT 254
+#define WLC_UPGRADE_STATUS 255
+#define WLC_CURRENT_PWR 256
+#define WLC_GET_SCAN_PASSIVE_TIME 257
+#define WLC_SET_SCAN_PASSIVE_TIME 258
+#define WLC_LEGACY_LINK_BEHAVIOR 259
+#define WLC_GET_CHANNELS_IN_COUNTRY 260
+#define WLC_GET_COUNTRY_LIST 261
+#define WLC_GET_VAR 262 /* get value of named variable */
+#define WLC_SET_VAR 263 /* set named variable to value */
+#define WLC_NVRAM_GET 264 /* deprecated */
+#define WLC_NVRAM_SET 265
+#define WLC_NVRAM_DUMP 266
+#define WLC_REBOOT 267
+#define WLC_SET_WSEC_PMK 268
+#define WLC_GET_AUTH_MODE 269
+#define WLC_SET_AUTH_MODE 270
+#define WLC_GET_WAKEENTRY 271
+#define WLC_SET_WAKEENTRY 272
+#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */
+#define WLC_NVOTPW 274
+#define WLC_OTPW 275
+#define WLC_IOV_BLOCK_GET 276
+#define WLC_IOV_MODULES_GET 277
+#define WLC_SOFT_RESET 278
+#define WLC_GET_ALLOW_MODE 279
+#define WLC_SET_ALLOW_MODE 280
+#define WLC_GET_DESIRED_BSSID 281
+#define WLC_SET_DESIRED_BSSID 282
+#define WLC_DISASSOC_MYAP 283
+#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */
+/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */
+#define WLC_GET_CMD 309
+/* #define WLC_LAST 310 */ /* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE 317
+#define WLC_GET_TXBF_RATESET 318
+#define WLC_SET_TXBF_RATESET 319
+#define WLC_SCAN_CQ 320
+#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */
+#define WLC_DUMP_RATESET 322
+#define WLC_ECHO 323
+#define WLC_SCB_AUTHENTICATE 325
+#define WLC_LAST 326 /* The last ioctl. Also push this
+ * number when adding new ioctls
+ */
+/*
+ * Alert:
+ * Duplicate a few definitions that irelay requires from epiioctl.h here
+ * so caller doesn't have to include this file and epiioctl.h .
+ * If this grows any more, it would be time to move these irelay-specific
+ * definitions out of the epiioctl.h and into a separate driver common file.
+ */
+#define WLC_SPEC_FLAG 0x80000000 /* For some special IOCTL */
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE 0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE 0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK)
+#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS 1
+#define WL_DECRYPT_STATUS_FAILURE 2
+#define WL_DECRYPT_STATUS_UNKNOWN 3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS 0
+#define WLC_UPGRADE_PENDING 1
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
+#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
+#define WL_AUTH_SAE_KEY 3 /* d11 sae authentication */
+#define WL_AUTH_FILS_SHARED 4 /* d11 fils shared key authentication */
+#define WL_AUTH_FILS_SHARED_PFS 5 /* d11 fils shared key w/ pfs authentication */
+#define WL_AUTH_FILS_PUBLIC 6 /* d11 fils public key authentication */
+/* Some branch use different define for WL_AUTH_OPEN_SHARED
+ * for example, PHOENIX2 Branch defined WL_AUTH_OPEN_SHARED as 3
+ * But other branch defined WL_AUTH_OPEN_SHARED as 2
+ * if it is mismatch, WEP association can be failed.
+ * More information - RB:5320
+ */
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */
+#define WL_DIAG_MEMORY 3 /* d11 memory test */
+#define WL_DIAG_LED 4 /* LED test */
+#define WL_DIAG_REG 5 /* d11/phy register test */
+#define WL_DIAG_SROM 6 /* srom read/crc test */
+#define WL_DIAG_DMA 7 /* DMA test */
+#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS 0
+#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */
+#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */
+#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */
+
+/* band types */
+#define WLC_BAND_AUTO 0 /* auto-select */
+#define WLC_BAND_5G 1 /* 5 Ghz */
+#define WLC_BAND_2G 2 /* 2.4 Ghz */
+#define WLC_BAND_ALL 3 /* all bands */
+#define WLC_BAND_6G 4 /* 6 Ghz */
+#define WLC_BAND_INVALID -1 /* Invalid band */
+
+#define WL_BAND_MAX_CNT 3 /* max number of bands supported */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0 1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1 2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2 3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
+#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
+#define WL_CHAN_FREQ_RANGE_6G_6BAND 6
+
+/* SROM12 */
+#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
+#define WL_CHAN_FREQ_RANGE_2G_40 6
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16
+
+#define WL_CHAN_FREQ_RANGE_5G_5BAND 18
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20
+
+#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
+#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
+#define WLC_MACMODE_AVOID 3 /* Avoid specified (i.e. conditionally avoid unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
+ * Preamble: Long
+ * Shortslot: Off
+ * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: Auto
+ * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ * Extended Rateset: 6b, 9, 12b, 48
+ * Preamble: Short required
+ * Shortslot: Auto
+ * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: On
+ * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ * Preamble: Short required
+ * Shortslot: On and required
+ * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
+ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ * Preamble: Long
+ * Shortslot: Auto
+ */
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO -1
+#define WLC_PLCP_SHORT 0
+#define WLC_PLCP_LONG 1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO -1
+#define WLC_PROTECTION_OFF 0
+#define WLC_PROTECTION_ON 1
+#define WLC_PROTECTION_MMHDR_ONLY 2
+#define WLC_PROTECTION_CTS_ONLY 3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF 0
+#define WLC_PROTECTION_CTL_LOCAL 1
+#define WLC_PROTECTION_CTL_OVERLAP 2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF 0
+#define WLC_N_PROTECTION_OPTIONAL 1
+#define WLC_N_PROTECTION_20IN40 2
+#define WLC_N_PROTECTION_MIXEDMODE 3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE 0
+#define WLC_N_PREAMBLE_GF 1
+#define WLC_N_PREAMBLE_GF_BRCM 2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL 0
+#define WLC_N_BW_40ALL 1
+#define WLC_N_BW_20IN2G_40IN5G 2
+
+#define WLC_BW_20MHZ_BIT (1<<0)
+#define WLC_BW_40MHZ_BIT (1<<1)
+#define WLC_BW_80MHZ_BIT (1<<2)
+#define WLC_BW_160MHZ_BIT (1<<3)
+#define WLC_BW_240MHZ_BIT (1<<4)
+#define WLC_BW_320MHZ_BIT (1u<<5u)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT| \
+ WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_320MHZ (WLC_BW_320MHZ_BIT| \
+ WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_240MHZ (WLC_BW_240MHZ_BIT| \
+ WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED 0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_160MHZ(bw_cap) (((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_240MHZ(bw_cap) (((bw_cap) & WLC_BW_240MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_320MHZ(bw_cap) (((bw_cap) & WLC_BW_320MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0 0
+#define WLC_N_TXRX_CHAIN1 1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20 0x01
+#define WLC_N_SGI_40 0x02
+#define WLC_VHT_SGI_80 0x04
+#define WLC_VHT_SGI_160 0x08
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL 0x02
+
+#define LISTEN_INTERVAL 10
+/* interference mitigation options */
+#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */
+#define INTERFERE_NONE 0 /* off */
+#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */
+#define WLAN_MANUAL 2 /* ACI: no auto detection */
+#define WLAN_AUTO 3 /* ACI: auto detect */
+#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */
+
+/* interfernece mode bit-masks (ACPHY) */
+#define ACPHY_ACI_GLITCHBASED_DESENSE 1 /* bit 0 */
+#define ACPHY_ACI_HWACI_PKTGAINLMT 2 /* bit 1 */
+#define ACPHY_ACI_W2NB_PKTGAINLMT 4 /* bit 2 */
+#define ACPHY_ACI_PREEMPTION 8 /* bit 3 */
+#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */
+#define ACPHY_LPD_PREEMPTION 32 /* bit 5 */
+#define ACPHY_HWOBSS_MITIGATION 64 /* bit 6 */
+#define ACPHY_ACI_MAX_MODE 127
+
+/* AP environment */
+#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */
+#define AP_ENV_DENSE 1 /* "Corporate" or other AP dense environment */
+#define AP_ENV_SPARSE 2 /* "Home" or other sparse environment */
+#define AP_ENV_INDETERMINATE 3 /* AP environment hasn't been identified */
+
+#define TRIGGER_NOW 0
+#define TRIGGER_CRS 0x01
+#define TRIGGER_CRSDEASSERT 0x02
+#define TRIGGER_GOODFCS 0x04
+#define TRIGGER_BADFCS 0x08
+#define TRIGGER_BADPLCP 0x10
+#define TRIGGER_CRSGLITCH 0x20
+#define TRIGGER_ASYNC 0x40
+
+#define WL_SAMPLEDATA_HEADER_TYPE 1
+#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
+#define WL_SAMPLEDATA_TYPE 2
+#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */
+#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */
+
+/* WL_OTA START */
+#define WL_OTA_ARG_PARSE_BLK_SIZE 1200
+#define WL_OTA_TEST_MAX_NUM_RATE 30
+#define WL_OTA_TEST_MAX_NUM_SEQ 100
+#define WL_OTA_TEST_MAX_NUM_RSSI 85
+#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */
+#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */
+#define WL_RADAR_SIMULATED 2 /* force radar detector to declare
+ * detection once
+ */
+#define WL_RADAR_SIMULATED_SC 3 /* force radar detector to declare
+ * detection once on scan core
+ * if available and active
+ */
+#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX 4 /* max 4 receive antennas/cores */
+#define WL_ANT_IDX_1 0 /* antenna index 1 */
+#define WL_ANT_IDX_2 1 /* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */
+#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */
+#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */
+#define WL_DFS_CACSTATE_CSA 3 /* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */
+#define WL_DFS_CACSTATES 7 /* this many states exist */
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ 0
+#define WL_BW_40MHZ 1
+#define WL_BW_80MHZ 2
+#define WL_BW_160MHZ 3
+#define WL_BW_8080MHZ 4
+#define WL_BW_2P5MHZ 5
+#define WL_BW_5MHZ 6
+#define WL_BW_10MHZ 7
+#define WL_BW_320MHZ 8u
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+#define WL_TX_POWER_F_HT 0x10
+#define WL_TX_POWER_F_VHT 0x20
+#define WL_TX_POWER_F_OPENLOOP 0x40
+#define WL_TX_POWER_F_PROP11NRATES 0x80
+#define WL_TX_POWER_F_UNIT_QDBM 0x100
+#define WL_TX_POWER_F_TXCAP 0x200
+#define WL_TX_POWER_F_HE 0x400
+#define WL_TX_POWER_F_RU_RATE 0x800
+
+/* Message levels */
+#define WL_ERROR_VAL 0x00000001
+#define WL_TRACE_VAL 0x00000002
+#define WL_PRHDRS_VAL 0x00000004
+#define WL_PRPKT_VAL 0x00000008
+#define WL_INFORM_VAL 0x00000010
+#define WL_TMP_VAL 0x00000020
+#define WL_OID_VAL 0x00000040
+#define WL_RATE_VAL 0x00000080
+#define WL_ASSOC_VAL 0x00000100
+#define WL_PRUSR_VAL 0x00000200
+#define WL_PS_VAL 0x00000400
+#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
+#define WL_PORT_VAL 0x00001000
+#define WL_DUAL_VAL 0x00002000
+#define WL_WSEC_VAL 0x00004000
+#define WL_WSEC_DUMP_VAL 0x00008000
+#define WL_LOG_VAL 0x00010000
+#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */
+#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */
+#define WL_REGULATORY_VAL 0x00080000
+#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */
+#define WL_TAF_VAL 0x00100000
+#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */
+#define WL_MPC_VAL 0x00400000
+#define WL_APSTA_VAL 0x00800000
+#define WL_DFS_VAL 0x01000000
+#define WL_BA_VAL 0x00000000 /* retired in TOT on 6/14/2010 */
+#define WL_MUMIMO_VAL 0x02000000 /* Using retired WL_BA_VAL */
+#define WL_ACI_VAL 0x04000000
+#define WL_PRMAC_VAL 0x04000000
+#define WL_MBSS_VAL 0x04000000
+#define WL_CAC_VAL 0x08000000
+#define WL_AMSDU_VAL 0x10000000
+#define WL_AMPDU_VAL 0x20000000
+#define WL_FFPLD_VAL 0x40000000
+#define WL_ROAM_EXP_VAL 0x80000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL 0x00000001
+/* re-using WL_DPT_VAL */
+/* re-using WL_MESH_VAL */
+#define WL_NATOE_VAL 0x00000001
+#define WL_MESH_VAL 0x00000001
+#define WL_SCAN_VAL 0x00000002
+#define WL_WOWL_VAL 0x00000004
+#define WL_COEX_VAL 0x00000008
+#define WL_RTDC_VAL 0x00000010
+#define WL_PROTO_VAL 0x00000020
+#define WL_SWDIV_VAL 0x00000040
+#define WL_CHANINT_VAL 0x00000080
+#define WL_WMF_VAL 0x00000100
+#define WL_P2P_VAL 0x00000200
+#define WL_ITFR_VAL 0x00000400
+#define WL_MCHAN_VAL 0x00000800
+#define WL_TDLS_VAL 0x00001000
+#define WL_MCNX_VAL 0x00002000
+#define WL_PROT_VAL 0x00004000
+#define WL_TSO_VAL 0x00010000
+#define WL_TRF_MGMT_VAL 0x00020000
+#define WL_LPC_VAL 0x00040000
+#define WL_L2FILTER_VAL 0x00080000
+#define WL_TXBF_VAL 0x00100000
+#define WL_P2PO_VAL 0x00200000
+#define WL_TBTT_VAL 0x00400000
+#define WL_FBT_VAL 0x00800000
+#define WL_RRM_VAL 0x00800000 /* reuse */
+#define WL_MQ_VAL 0x01000000
+/* This level is currently used in Phoenix2 only */
+#define WL_SRSCAN_VAL 0x02000000
+#define WL_WNM_VAL 0x04000000
+/* re-using WL_WNM_VAL for MBO */
+#define WL_MBO_VAL 0x04000000
+/* re-using WL_SRSCAN_VAL */
+#define WL_RANDMAC_VAL 0x02000000
+
+#ifdef WLAWDL
+#define WL_AWDL_VAL 0x08000000
+#endif /* WLAWDL */
+
+#define WL_UNUSED_VAL 0x10000000 /* Was a duplicate for WL_LPC_VAL. Removed */
+#define WL_NET_DETECT_VAL 0x20000000
+#define WL_OCE_VAL 0x20000000 /* reuse */
+#define WL_PCIE_VAL 0x40000000
+#define WL_PMDUR_VAL 0x80000000
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL 0x80000000
+
+/* wl_msg_level2 is full. For new bits take the next one and AND with
+ * wl_msg_level3 in wl_dbg.h
+ */
+#define WL_ASSOC_AP_VAL 0x00000001
+#define WL_FILS_VAL 0x00000002
+#define WL_LATENCY_VAL 0x00000004
+#define WL_WBUS_VAL 0x00000008
+
+/* number of bytes needed to define a proper bit mask for MAC event reporting */
+#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define BCMIO_NBBY 8
+#define WL_EVENTING_MASK_LEN (16+4) /* Don't increase this without wl review */
+
+#define WL_EVENTING_MASK_EXT_LEN ROUNDUP(WLC_E_LAST, NBBY)/NBBY
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI 1u /* by RSSI */
+#define WL_JOIN_PREF_WPA 2u /* by akm and ciphers */
+#define WL_JOIN_PREF_BAND 3u /* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA 4u /* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF 5u /* defined by requesting AP */
+#define WL_JOIN_PREF_RSN_PRIO 6u /* by RSNE/RSNXE related security priority */
+
+/* Join preference RSN priority */
+#define WL_JP_RSN_SAE_PK 1u /* SAE-PK higher priority over non SAE-PK APs */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC 1
+#define WLC_MEASURE_CHANNEL_BASIC 2
+#define WLC_MEASURE_CHANNEL_CCA 3
+#define WLC_MEASURE_CHANNEL_RPI 4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
+
+/* bit position in per_chan_info; these depend on current country/regulatory domain */
+#define WL_CHAN_VALID_HW (1u << 0) /* valid with current HW */
+#define WL_CHAN_VALID_SW (1u << 1) /* valid with current country setting */
+#define WL_CHAN_BAND_5G (1u << 2) /* 5GHz-band channel */
+#define WL_CHAN_RADAR (1u << 3) /* radar sensitive channel */
+#define WL_CHAN_INACTIVE (1u << 4) /* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE (1u << 5) /* channel is in passive mode */
+#define WL_CHAN_RESTRICTED (1u << 6) /* restricted use channel */
+#define WL_CHAN_RADAR_EU_WEATHER (1u << 7) /* EU Radar weather channel.
+ * Implies an EU Radar channel.
+ */
+#define WL_CHAN_CLM_RESTRICTED (1u << 8) /* channel restricted in CLM (i.e. by default) */
+#define WL_CHAN_BAND_6G (1u << 9) /* 6GHz-band channel */
+#define WL_CHAN_OOS_SHIFT 24u /* shift for OOS field */
+#define WL_CHAN_OOS_MASK 0xFF000000u /* field specifying minutes remaining for this
+ * channel's out-of-service period due to radar
+ * detection
+ */
+
+/* BTC mode used by "btc_mode" iovar */
+#define WL_BTC_DISABLE 0 /* disable BT coexistence */
+#define WL_BTC_FULLTDM 1 /* full TDM COEX */
+#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */
+#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */
+#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT 8 /* set the default mode for the device */
+#define WL_INF_BTC_DISABLE 0
+#define WL_INF_BTC_ENABLE 1
+#define WL_INF_BTC_AUTO 3
+
+/* BTC wire used by "btc_wire" iovar */
+#define WL_BTC_DEFWIRE 0 /* use default wire setting */
+#define WL_BTC_2WIRE 2 /* use 2-wire BTC */
+#define WL_BTC_3WIRE 3 /* use 3-wire BTC */
+#define WL_BTC_4WIRE 4 /* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT (1 << 0)
+#define WL_BTC_FLAG_BT_DEF (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP (1 << 5)
+#define WL_BTC_FLAG_ECI (1 << 6)
+#define WL_BTC_FLAG_LIGHT (1 << 7)
+#define WL_BTC_FLAG_PARALLEL (1 << 8)
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS 64
+
+/* This constant is obsolete, not part of ioctl/iovar interface and should never be used
+ * It is preserved only for compatibility with older branches that use it
+ */
+#ifdef WL_BAND6G
+#ifdef WL11AC_80P80
+#define WL_NUMCHANSPECS 446
+#else
+#define WL_NUMCHANSPECS 350
+#endif
+#else
+#if defined(WL11AC_80P80)
+#define WL_NUMCHANSPECS 206
+#elif defined(WL_BW160MHZ)
+#define WL_NUMCHANSPECS 140
+#else
+#define WL_NUMCHANSPECS 110
+#endif
+#endif /* WL_BAND6G */
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
+#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */
+
+/* Base offset values */
+#define WL_PKT_FILTER_BASE_PKT 0
+#define WL_PKT_FILTER_BASE_END 1
+#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */
+#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */
+#define WL_PKT_FILTER_BASE_ETH_H 4
+#define WL_PKT_FILTER_BASE_ETH_D 5
+#define WL_PKT_FILTER_BASE_ARP_H 6
+#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */
+#define WL_PKT_FILTER_BASE_IP4_H 8
+#define WL_PKT_FILTER_BASE_IP4_D 9
+#define WL_PKT_FILTER_BASE_IP6_H 10
+#define WL_PKT_FILTER_BASE_IP6_D 11
+#define WL_PKT_FILTER_BASE_TCP_H 12
+#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */
+#define WL_PKT_FILTER_BASE_UDP_H 14
+#define WL_PKT_FILTER_BASE_UDP_D 15
+#define WL_PKT_FILTER_BASE_IP6_P 16
+#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */
+
+/* String mapping for bases that may be used by applications or debug */
+#define WL_PKT_FILTER_BASE_NAMES \
+ { "START", WL_PKT_FILTER_BASE_PKT }, \
+ { "END", WL_PKT_FILTER_BASE_END }, \
+ { "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \
+ { "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \
+ { "D11_H", WL_PKT_FILTER_BASE_D11_H }, \
+ { "D11_D", WL_PKT_FILTER_BASE_D11_D }, \
+ { "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \
+ { "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \
+ { "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \
+ { "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \
+ { "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \
+ { "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \
+ { "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \
+ { "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \
+ { "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \
+ { "UDP_D", WL_PKT_FILTER_BASE_UDP_D }
+
+/* Flags for a pattern list element */
+#define WL_PKT_FILTER_MFLAG_NEG 0x0001
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START 0x01
+#define WL_PKTENG_PER_TX_STOP 0x02
+#define WL_PKTENG_PER_RX_START 0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
+#define WL_PKTENG_PER_RX_STOP 0x08
+#define WL_PKTENG_PER_RU_TX_START 0x09
+#define WL_PKTENG_PER_TRIG_TX_START 0x0a
+#define WL_PKTENG_PER_MASK 0xff
+
+#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
+#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */
+#define WL_PKTENG_COLLECT 0x400 /* Save last Rx'ed packet */
+#ifdef PKTENG_LONGPKTSZ
+/* max pktsz limit for pkteng */
+#define WL_PKTENG_MAXPKTSZ PKTENG_LONGPKTSZ
+#else
+#define WL_PKTENG_MAXPKTSZ 16384
+#endif
+
+#define NUM_80211b_RATES 4
+#define NUM_80211ag_RATES 8
+#define NUM_80211n_RATES 32
+#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+
+/*
+ * WOWL capability/override settings
+ */
+#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */
+#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */
+#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */
+#define WL_WOWL_TST (1 << 5) /* Wakeup after test */
+#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads -
+ * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS
+ */
+#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */
+#define WL_WOWL_SCANOL (1 << 16) /* If the bit is set, scan offload is enabled */
+#define WL_WOWL_TCPKEEP_TIME (1 << 17) /* Wakeup on tcpkeep alive timeout */
+#define WL_WOWL_MDNS_CONFLICT (1 << 18) /* Wakeup on mDNS Conflict Resolution */
+#define WL_WOWL_MDNS_SERVICE (1 << 19) /* Wakeup on mDNS Service Connect */
+#define WL_WOWL_TCPKEEP_DATA (1 << 20) /* tcp keepalive got data */
+#define WL_WOWL_FW_HALT (1 << 21) /* Firmware died in wowl mode */
+#define WL_WOWL_ENAB_HWRADIO (1 << 22) /* Enable detection of radio button changes */
+#define WL_WOWL_MIC_FAIL (1 << 23) /* Offloads detected MIC failure(s) */
+#define WL_WOWL_UNASSOC (1 << 24) /* Wakeup in Unassociated state (Net/Magic Pattern) */
+#define WL_WOWL_SECURE (1 << 25) /* Wakeup if received matched secured pattern */
+#define WL_WOWL_EXCESS_WAKE (1 << 26) /* Excess wake */
+#define WL_WOWL_LINKDOWN (1 << 31) /* Link Down indication in WoWL mode */
+
+#define WL_WOWL_TCPKEEP (1 << 20) /* temp copy to satisfy automerger */
+#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */
+
+#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+#define MAGIC_PKT_NUM_MAC_ADDRS 16
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */
+
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK 0x07
+#define WL_COEX_INFO_REQ 0x01
+#define WL_COEX_40MHZ_INTOLERANT 0x02
+#define WL_COEX_WIDTH20 0x04
+
+#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER 0x0100
+
+#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON 0x0001
+#define LOG_MODULE_ASSOC 0x0002
+#define LOG_MODULE_EVENT 0x0004
+#define LOG_MODULE_MAX 3 /* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE 0
+#define WL_LOG_LEVEL_ERR 1
+#define WL_LOG_LEVEL_WARN 2
+#define WL_LOG_LEVEL_INFO 3
+#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT 1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL 0
+#define LOG_ARGTYPE_STR 1 /* %s */
+#define LOG_ARGTYPE_INT 2 /* %d */
+#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */
+#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG 0x4
+#define VNDR_IE_AUTHRSP_FLAG 0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG 0x20
+#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_AUTHREQ_FLAG 0x80
+#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */
+#define VNDR_IE_DISASSOC_FLAG 0x200
+
+#if defined(WLP2P)
+/* P2P Action Frames flags (spec ordered) */
+#define VNDR_IE_GONREQ_FLAG 0x001000
+#define VNDR_IE_GONRSP_FLAG 0x002000
+#define VNDR_IE_GONCFM_FLAG 0x004000
+#define VNDR_IE_INVREQ_FLAG 0x008000
+#define VNDR_IE_INVRSP_FLAG 0x010000
+#define VNDR_IE_DISREQ_FLAG 0x020000
+#define VNDR_IE_DISRSP_FLAG 0x040000
+#define VNDR_IE_PRDREQ_FLAG 0x080000
+#define VNDR_IE_PRDRSP_FLAG 0x100000
+
+#define VNDR_IE_P2PAF_SHIFT 12
+#endif /* WLP2P */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE 0 /* disabled */
+#define CHANIM_DETECT 1 /* detection only */
+#define CHANIM_EXT 2 /* external state machine */
+#define CHANIM_ACT 3 /* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT 0
+#define APCS_IOCTL 1
+#define APCS_CHANIM 2
+#define APCS_CSTIMER 3
+#define APCS_TXDLY 5
+#define APCS_NONACSD 6
+#define APCS_DFS_REENTRY 7
+#define APCS_TXFAIL 8
+#define APCS_MAX 9
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD 10
+
+/* CHANIM */
+#define CCASTATS_TXDUR 0
+#define CCASTATS_INBSS 1
+#define CCASTATS_OBSS 2
+#define CCASTATS_NOCTG 3
+#define CCASTATS_NOPKT 4
+#define CCASTATS_DOZE 5
+#define CCASTATS_TXOP 6
+#define CCASTATS_GDTXDUR 7
+#define CCASTATS_BDTXDUR 8
+
+/* FIXME: CCASTATS_MAX is 9 for existing chips and 10 for new ones.
+ * This is to avoid rom invalidation of existing chips.
+ */
+#ifndef WLCHANIM_V2
+#define CCASTATS_MAX 9
+#else /* WLCHANIM_V2 */
+#define CCASTATS_MYRX 9
+#define CCASTATS_MAX 10
+#endif /* WLCHANIM_V2 */
+
+#define WL_CHANIM_COUNT_ALL 0xff
+#define WL_CHANIM_COUNT_ONE 0x1
+
+/* Module id: to know which module has sent the stats */
+#define SC_CHANIM_ID_NULL 0u
+#define SC_CHANIM_ID_SCAN 1u /* Module Id of scan, used to report scqs */
+#define SC_CHANIM_ID_STA 2u /* Module Id of STA, used tp report scqs */
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* tpc option bits */
+#define TPC_OPT_NO_11DH_TXPWR 1 /* Do not adopt 11d+11h AP power constraints when
+ * autocountry is 0
+ */
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN 0
+#define WL_P2P_DISC_ST_LISTEN 1
+#define WL_P2P_DISC_ST_SEARCH 2
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT 0
+#define WL_P2P_IF_GO 1
+#define WL_P2P_IF_DYNBCN_GO 2
+#define WL_P2P_IF_DEV 3
+
+/* p2p GO configuration */
+#define WL_P2P_ENABLE_CONF 1 /* configure */
+#define WL_P2P_DISABLE_CONF 0 /* un-configure */
+
+/* count */
+#define WL_P2P_SCHED_RSVD 0
+#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */
+
+#define WL_P2P_SCHED_FIXED_LEN 3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
+
+/* at some point we may need bitvec here (combination of actions) */
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
+#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
+
+/* at some point we may need bitvec here (combination of options) */
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with
+ * start being an offset of the 'current' TSF
+ */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe
+ * requests
+ */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
+#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED 1 /* NAT is enabled */
+#define NAT_STATE_DISABLED 2 /* NAT is disabled */
+
+#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER 0x0001
+#define IPV4_NETBT_FILTER 0x0002
+#define IPV4_LLMNR_FILTER 0x0004
+#define IPV4_SSDP_FILTER 0x0008
+#define IPV4_WSD_FILTER 0x0010
+#define IPV6_NETBT_FILTER 0x0200
+#define IPV6_LLMNR_FILTER 0x0400
+#define IPV6_SSDP_FILTER 0x0800
+#define IPV6_WSD_FILTER 0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE 0x00000001
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Don't shape traffic */
+#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC 0x0008 /* Manage traffic over our local subnet */
+#define TRF_MGMT_FLAG_FILTER_ON_MACADDR 0x0010 /* filter on MAC address */
+#define TRF_MGMT_FLAG_NO_RX 0x0020 /* do not apply fiters to rx packets */
+
+#define TRF_FILTER_MAC_ADDR 0x0001 /* L2 filter use dst mac address for filtering */
+#define TRF_FILTER_IP_ADDR 0x0002 /* L3 filter use ip ddress for filtering */
+#define TRF_FILTER_L4 0x0004 /* L4 filter use tcp/udp for filtering */
+#define TRF_FILTER_DWM 0x0008 /* L3 filter use DSCP for filtering */
+#define TRF_FILTER_FAVORED 0x0010 /* Tag the packet FAVORED */
+
+/* WNM/NPS subfeatures mask */
+#define WL_WNM_BSSTRANS 0x00000001
+#define WL_WNM_PROXYARP 0x00000002
+#define WL_WNM_MAXIDLE 0x00000004
+#define WL_WNM_TIMBC 0x00000008
+#define WL_WNM_TFS 0x00000010
+#define WL_WNM_SLEEP 0x00000020
+#define WL_WNM_DMS 0x00000040
+#define WL_WNM_FMS 0x00000080
+#define WL_WNM_NOTIF 0x00000100
+#define WL_WNM_WBTEXT 0x00000200
+#define WL_WNM_ESTM 0x00000400
+#define WL_WNM_MAX 0x00000800
+#ifdef WLWNM_BRCM
+#define BRCM_WNM_FEATURE_SET\
+ (WL_WNM_PROXYARP | \
+ WL_WNM_SLEEP | \
+ WL_WNM_FMS | \
+ WL_WNM_TFS | \
+ WL_WNM_TIMBC | \
+ WL_WNM_BSSTRANS | \
+ WL_WNM_DMS | \
+ WL_WNM_NOTIF | \
+ 0)
+#endif /* WLWNM_BRCM */
+#ifndef ETHER_MAX_DATA
+#define ETHER_MAX_DATA 1500
+#endif /* ETHER_MAX_DATA */
+
+/* Different discovery modes for dpt */
+#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */
+#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */
+#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */
+#define DPT_PATHSEL_APPATH 2 /* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+
+/* flags to indicate DPT status */
+#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */
+#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */
+#define DPT_STATUS_FAILED 0x04 /* DPT link failed */
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */
+#define TDLS_MANUAL_EP_WFD_TPQ 8 /* WiFi-Display Tunneled Probe reQuest */
+
+/* modes */
+#define TDLS_WFD_IE_TX 0
+#define TDLS_WFD_IE_RX 1
+#define TDLS_WFD_PROBE_IE_TX 2
+#define TDLS_WFD_PROBE_IE_RX 3
+#endif /* WLTDLS */
+
+/* define for flag */
+#define TSPEC_PENDING 0 /* TSPEC pending */
+#define TSPEC_ACCEPTED 1 /* TSPEC accepted */
+#define TSPEC_REJECTED 2 /* TSPEC rejected */
+#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
+#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
+
+#ifdef BCMCCX
+/* "wlan_reason" iovar interface */
+#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */
+#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */
+#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */
+#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */
+#endif /* BCMCCX */
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO 0x0004 /* Disable HW Radio monitor (e.g., Cust Spec) */
+#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+#define CSA_BROADCAST_ACTION_FRAME 0 /* csa broadcast action frame */
+#define CSA_UNICAST_ACTION_FRAME 1 /* csa unicast action frame */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0) value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ * the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+#define IMMEDIATE_EVENT_BIT 8
+#define SUPPRESS_SSID_BIT 9
+#define ENABLE_NET_OFFLOAD_BIT 10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT 11
+#define BESTN_BSSID_ONLY_BIT 12
+
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
+
+#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_ADAPTSCAN_MASK 0x00c0
+#define IMMEDIATE_EVENT_MASK 0x0100
+#define SUPPRESS_SSID_MASK 0x0200
+#define ENABLE_NET_OFFLOAD_MASK 0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK 0x0800
+#define BESTN_BSSID_ONLY_MASK 0x1000
+
+#ifdef PFN_SCANRESULT_2
+#define PFN_SCANRESULT_VERSION 2
+#else
+#define PFN_SCANRESULT_VERSION 1
+#endif /* PFN_SCANRESULT_2 */
+#ifndef MAX_PFN_LIST_COUNT
+#define MAX_PFN_LIST_COUNT 16
+#endif /* MAX_PFN_LIST_COUNT */
+
+#define PFN_COMPLETE 1
+#define PFN_INCOMPLETE 0
+
+#define DEFAULT_BESTN 2
+#define DEFAULT_MSCAN 0
+#define DEFAULT_REPEAT 10
+#define DEFAULT_EXP 2
+
+#define PFN_PARTIAL_SCAN_BIT 0
+#define PFN_PARTIAL_SCAN_MASK 1
+
+#define WL_PFN_SUPPRESSFOUND_MASK 0x08
+#define WL_PFN_SUPPRESSLOST_MASK 0x10
+#define WL_PFN_SSID_A_BAND_TRIG 0x20
+#define WL_PFN_SSID_BG_BAND_TRIG 0x40
+#define WL_PFN_SSID_IMPRECISE_MATCH 0x80
+#define WL_PFN_SSID_SAME_NETWORK 0x10000
+#define WL_PFN_SUPPRESS_AGING_MASK 0x20000
+#define WL_PFN_FLUSH_ALL_SSIDS 0x40000
+#define WL_PFN_RSSI_MASK 0xff00
+#define WL_PFN_RSSI_SHIFT 8
+
+#define WL_PFN_REPORT_ALLNET 0
+#define WL_PFN_REPORT_SSIDNET 1
+#define WL_PFN_REPORT_BSSIDNET 2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_HISTORY_OFF 0x00000002 /* Scan history suppressed */
+
+#define WL_PFN_HIDDEN_BIT 2
+#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK 0x4
+#define MAX_BSSID_PREF_LIST_NUM 32
+
+#ifdef CUSTOM_SSID_WHITELIST_NUM
+#define MAX_SSID_WHITELIST_NUM CUSTOM_SSID_WHITELIST_NUM
+#else
+#define MAX_SSID_WHITELIST_NUM 4
+#endif /* CUSTOM_SSID_WHITELIST_NUM */
+#ifdef CUSTOM_BSSID_BLACKLIST_NUM
+#define MAX_BSSID_BLACKLIST_NUM CUSTOM_BSSID_BLACKLIST_NUM
+#else
+#define MAX_BSSID_BLACKLIST_NUM 32
+#endif /* CUSTOM_BSSID_BLACKLIST_NUM */
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2 0x00000004
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define ARP_OL_UPDATE_HOST_CACHE 0x00000010
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER 0x1
+#define ARP_ERRTEST_REPLY_HOST 0x2
+
+#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
+#if defined(WL_PKT_FLTR_EXT) && !defined(WL_PKT_FLTR_EXT_DISABLED)
+#define ND_MULTIHOMING_MAX 32 /* Maximum local host IP addresses */
+#else
+#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */
+#endif /* WL_PKT_FLTR_EXT && !WL_PKT_FLTR_EXT_DISABLED */
+#define ND_REQUEST_MAX 5 /* Max set of offload params */
+
+#ifdef WLAWDL
+/* AWDL AF flags for awdl_oob_af iovar */
+#define AWDL_OOB_AF_FILL_TSF_PARAMS 0x00000001
+#define AWDL_OOB_AF_FILL_SYNC_PARAMS 0x00000002
+#define AWDL_OOB_AF_FILL_ELECT_PARAMS 0x00000004
+#define AWDL_OOB_AF_PARAMS_SIZE 38
+
+#define AWDL_OPMODE_AUTO 0
+#define AWDL_OPMODE_FIXED 1
+
+#define AWDL_PEER_STATE_OPEN 0
+#define AWDL_PEER_STATE_CLOSE 1
+
+#define SYNC_ROLE_SLAVE 0
+#define SYNC_ROLE_NE_MASTER 1 /* Non-election master */
+#define SYNC_ROLE_MASTER 2
+
+/* peer opcode */
+#define AWDL_PEER_OP_ADD 0
+#define AWDL_PEER_OP_DEL 1
+#define AWDL_PEER_OP_INFO 2
+#define AWDL_PEER_OP_UPD 3
+#endif /* WLAWDL */
+
+/* AOAC wake event flag */
+#define WAKE_EVENT_NLO_DISCOVERY_BIT 1
+#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2
+#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
+#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+#define WAKE_EVENT_NET_PACKET_BIT 0x10
+
+#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */
+
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE 2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+
+#ifdef DONGLEOVERLAYS
+#define OVERLAY_IDX_MASK 0x000000ff
+#define OVERLAY_IDX_SHIFT 0
+#define OVERLAY_FLAGS_MASK 0xffffff00
+#define OVERLAY_FLAGS_SHIFT 8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD 0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL 0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP 0x400
+#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024
+#endif /* DONGLEOVERLAYS */
+
+/* reuse two number in the sc/rc space */
+#define SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED 0xFFFD
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE 0x77
+#define BCM_ACTION_RFAWARE_DCS 0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR 0x1
+#define BCM_DCS_UNKNOWN 0xFF
+
+#ifdef EXT_STA
+#define IHV_OID_BCM 0x00181000 /* based on BRCM_OUI value */
+/* ---------------------------------------------------------------------------
+* Event codes
+* ---------------------------------------------------------------------------
+*/
+#ifdef BCMCCX
+#define IHV_CCX_EVENT_STATUS_INDICATION 0x00000001L /* from driver */
+#define IHV_CCX_EVENT_PACKET_RECEIVED 0x00000002L /* from driver */
+#define IHV_CCX_EVENT_PACKET_TRANSMITTED 0x00000003L /* from driver */
+#define IHV_CCX_EVENT_OID 0x00000004L /* to driver */
+#define IHV_CCX_EVENT_OK_TO_ASSOCIATE 0x00000005L /* to driver */
+#define IHV_CCX_EVENT_SEND_PACKET 0x00000006L /* to driver */
+#endif /* BCMCCX */
+
+#define IHV_DRIVER_EVENT_GEN_INDICATION 0x00000011L /* from driver */
+#endif /* EXT_STA */
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS 0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040
+#define WLFC_FLAGS_PKT_STAMP_SIGNALS 0x0080
+
+#endif /* PROP_TXSTATUS */
+
+#define WL_TIMBC_STATUS_AP_UNKNOWN 255 /* AP status for internal use only */
+
+#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RELMCAST_MAX_CLIENT 32
+#define WL_RELMCAST_FLAG_INBLACKLIST 1
+#define WL_RELMCAST_FLAG_ACTIVEACKER 2
+#define WL_RELMCAST_FLAG_RELMCAST 4
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE 0
+#define WL_PROXD_MODE_NEUTRAL 1
+#define WL_PROXD_MODE_INITIATOR 2
+#define WL_PROXD_MODE_TARGET 3
+#define WL_PROXD_RANDOM_WAKEUP 0x8000
+
+#ifdef NET_DETECT
+#define NET_DETECT_MAX_WAKE_DATA_SIZE 2048
+#define NET_DETECT_MAX_PROFILES 16
+#define NET_DETECT_MAX_CHANNELS 50
+#endif /* NET_DETECT */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */
+#define WL_RADIO_TSYNC_PWRSAVE_DISABLE (1<<5) /* Disable Radio in tsync mode for power saving */
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
+
+#define WL_4335_SPURAVOID_ON1 1
+#define WL_4335_SPURAVOID_ON2 2
+#define WL_4335_SPURAVOID_ON3 3
+#define WL_4335_SPURAVOID_ON4 4
+#define WL_4335_SPURAVOID_ON5 5
+#define WL_4335_SPURAVOID_ON6 6
+#define WL_4335_SPURAVOID_ON7 7
+#define WL_4335_SPURAVOID_ON8 8
+#define WL_4335_SPURAVOID_ON9 9
+
+/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE (1U<<31)
+#define WL_TXPWR_2G (1U<<30)
+#define WL_TXPWR_5G (1U<<29)
+#define WL_TXPWR_NEG (1U<<28)
+
+#define WL_TXPWR_MASK (~(0x7<<29))
+#define WL_TXPWR_CORE_MAX (3)
+#define WL_TXPWR_CORE0_MASK (0x000000FF)
+#define WL_TXPWR_CORE0_SHIFT (0)
+#define WL_TXPWR_CORE1_MASK (0x0000FF00)
+#define WL_TXPWR_CORE1_SHIFT (8)
+#define WL_TXPWR_CORE2_MASK (0x00FF0000)
+#define WL_TXPWR_CORE2_SHIFT (16)
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_LCN40 10
+#define WLC_PHY_TYPE_AC 11
+#define WLC_PHY_TYPE_LCN20 12
+#define WLC_PHY_TYPE_NULL 0xf
+
+/* Values for PM */
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */
+
+#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */
+
+/* fbt_cap: FBT assoc / reassoc modes. */
+#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC 1 /* Driver 4-way handshake & reassoc (WLFBT). */
+
+/* monitor_promisc_level bits */
+#define WL_MONPROMISC_PROMISC 0x0001
+#define WL_MONPROMISC_CTRL 0x0002
+#define WL_MONPROMISC_FCS 0x0004
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+/* Wi-Fi Display Services (WFDS) */
+#define WL_P2P_SOCIAL_CHANNELS_MAX WL_NUMCHANNELS
+#define MAX_WFDS_SEEK_SVC 4 /* Max # of wfds services to seek */
+#define MAX_WFDS_ADVERT_SVC 4 /* Max # of wfds services to advertise */
+#define MAX_WFDS_SVC_NAME_LEN 200 /* maximum service_name length */
+#define MAX_WFDS_ADV_SVC_INFO_LEN 65000 /* maximum adv service_info length */
+#define P2P_WFDS_HASH_LEN 6 /* Length of a WFDS service hash */
+#define MAX_WFDS_SEEK_SVC_INFO_LEN 255 /* maximum seek service_info req length */
+#define MAX_WFDS_SEEK_SVC_NAME_LEN 200 /* maximum service_name length */
+
+/* ap_isolate bitmaps */
+#define AP_ISOLATE_DISABLED 0x0
+#define AP_ISOLATE_SENDUP_ALL 0x01
+#define AP_ISOLATE_SENDUP_MCAST 0x02
+
+/* Type values for the wl_pwrstats_t data field */
+#define WL_PWRSTATS_TYPE_PHY 0 /**< struct wl_pwr_phy_stats */
+#define WL_PWRSTATS_TYPE_SCAN 1 /**< struct wl_pwr_scan_stats */
+#define WL_PWRSTATS_TYPE_USB_HSIC 2 /**< struct wl_pwr_usb_hsic_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /**< struct wl_pwr_pm_awake_stats_v1 */
+#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */
+
+#ifdef WLAWDL
+#define WL_PWRSTATS_TYPE_AWDL 5 /**< struct wl_pwr_awdl_stats; */
+#endif /* WLAWDL */
+
+#define WL_PWRSTATS_TYPE_PCIE 6 /**< struct wl_pwr_pcie_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /**< struct wl_pwr_pm_awake_stats_v2 */
+#define WL_PWRSTATS_TYPE_SDIO 8 /* struct wl_pwr_sdio_stats */
+#define WL_PWRSTATS_TYPE_MIMO_PS_METRICS 9 /* struct wl_mimo_meas_metrics_t */
+#define WL_PWRSTATS_TYPE_SLICE_INDEX 10 /* slice index for which this report is meant for */
+#define WL_PWRSTATS_TYPE_TSYNC 11 /**< struct wl_pwr_tsync_stats */
+#define WL_PWRSTATS_TYPE_OPS_STATS 12 /* struct wl_pwr_ops_stats_t */
+#define WL_PWRSTATS_TYPE_BCNTRIM_STATS 13 /* struct wl_pwr_bcntrim_stats_t */
+#define WL_PWRSTATS_TYPE_SLICE_INDEX_BAND_INFO 14 /* wl_pwr_slice_index_band_t */
+#define WL_PWRSTATS_TYPE_PSBW_STATS 15 /* struct wl_pwr_psbw_stats_t */
+#define WL_PWRSTATS_TYPE_PM_ACCUMUL 16 /* struct wl_pwr_pm_accum_stats_v1_t */
+
+/* IOV AWD DATA */
+#define AWD_DATA_JOIN_INFO 0
+#define AWD_DATA_VERSION_V1 1
+
+/* IOV ETD DATA */
+#define ETD_DATA_JOIN_INFO 0
+#define ETD_DATA_VERSION_V1 1
+
+/* CTMODE DBG */
+/* input param: [31:16] => MPDU_THRESHOLD
+ * [15:03] => RESERVED
+ * [02] => enable UFP
+ * [01] => enable UFC
+ * [00] => enalbe CTMODE
+ */
+#define CTMODE_DBG_CTMODE_EN (0x1u)
+#define CTMODE_DBG_UFC_EN (0x2u)
+#define CTMODE_DBG_UFP_EN (0x4u)
+#define CTMODE_DBG_MPDU_THRESHOLD_SHIFT (7u)
+#define CTMODE_DBG_MPDU_THRESHOLD_MASK ((0x1FFu) << CTMODE_DBG_MPDU_THRESHOLD_SHIFT)
+#define CTMODE_DBG_BYTES_THRESHOLD_SHIFT (16u)
+#define CTMODE_DBG_BYTES_THRESHOLD_MASK ((0xFFFu) << CTMODE_DBG_BYTES_THRESHOLD_SHIFT)
+
+/* ====== SC use case configs ========= */
+/* SC user/use case request */
+#define WL_SC_REQ_SCAN 0u /* user scan */
+#define WL_SC_REQ_CNX 1u /* associated idle */
+#define WL_SC_REQ_NAN 2u /* NAN synchronization and discovery offload */
+
+/* === Per use case configuration === */
+/* scan cfgs */
+#define SC_SCAN_CFG_PASSIVE_MASK 0x01u /* Enable passive scan on sc */
+#define SC_SCAN_CFG_PASSIVE_SHIFT 0u
+#define SC_SCAN_CFG_LP_SCAN_MASK 0x02u /* Enable low prio scan on sc */
+#define SC_SCAN_CFG_LP_SCAN_SHIFT 1u
+#define SC_SCAN_CFG_REG_SCAN_MASK 0x04u /* Enable split scan using sc */
+#define SC_SCAN_CFG_REG_SCAN_SHIFT 2u
+#define SC_SCAN_CFG_FULL_SCAN_MASK 0x08u /* Enable full scan on sc */
+#define SC_SCAN_CFG_FULL_SCAN_SHIFT 3u
+/* Add get and set macros for each of the configs? */
+
+/* === Place holder for cnx and nan cfgs === */
+#endif /* wlioctl_defs_h */
diff --git a/bcmdhd.101.10.361.x/include/wlioctl_utils.h b/bcmdhd.101.10.361.x/include/wlioctl_utils.h
new file mode 100755
index 0000000..b419d17
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wlioctl_utils.h
@@ -0,0 +1,60 @@
+/*
+ * Custom OID/ioctl related helper functions.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wlioctl_utils_h_
+#define _wlioctl_utils_h_
+
+#include <wlioctl.h>
+
+#ifndef BCMDRIVER
+#define CCA_THRESH_MILLI 14
+#define CCA_THRESH_INTERFERE 6
+
+extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input,
+ cca_congest_channel_req_t *avg, bool percent);
+
+extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans,
+ uint flags, chanspec_t *answer);
+#endif /* BCMDRIVER */
+
+extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf,
+ int buflen, uint32 corerev);
+
+extern const char * wl_get_reinit_rc_name(int rc);
+
+/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */
+#define GET_WLCCNT_FROM_CNTBUF(cntbuf) (const wl_cnt_wlc_t*) \
+ bcm_get_data_from_xtlv_buf(((const wl_cnt_info_t *)cntbuf)->data, \
+ ((const wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \
+ NULL, BCM_XTLV_OPTION_ALIGN32)
+
+/* We keep adding new counters, so give warning in case we exceed the ioctl buf len
+ * and need to move on to larger ioctl length in the future.
+ */
+#define CHK_CNTBUF_DATALEN(cntbuf, ioctl_buflen) do { \
+ if (((wl_cnt_info_t *)cntbuf)->datalen + \
+ OFFSETOF(wl_cnt_info_t, data) > ioctl_buflen) \
+ printf("IOVAR buffer short!\n"); \
+} while (0)
+
+#endif /* _wlioctl_utils_h_ */
diff --git a/bcmdhd.101.10.361.x/include/wpa.h b/bcmdhd.101.10.361.x/include/wpa.h
new file mode 100755
index 0000000..331cbbb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wpa.h
@@ -0,0 +1,306 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <ethernet.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Reason Codes */
+
+/* 13 through 23 taken from IEEE Std 802.11i-2004 */
+#define DOT11_RC_INVALID_WPA_IE 13 /* Invalid info. element */
+#define DOT11_RC_MIC_FAILURE 14 /* Michael failure */
+#define DOT11_RC_4WH_TIMEOUT 15 /* 4-way handshake timeout */
+#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 /* Group key update timeout */
+#define DOT11_RC_WPA_IE_MISMATCH 17 /* WPA IE in 4-way handshake differs from
+ * (re-)assoc. request/probe response
+ */
+#define DOT11_RC_INVALID_MC_CIPHER 18 /* Invalid multicast cipher */
+#define DOT11_RC_INVALID_UC_CIPHER 19 /* Invalid unicast cipher */
+#define DOT11_RC_INVALID_AKMP 20 /* Invalid authenticated key management protocol */
+#define DOT11_RC_BAD_WPA_VERSION 21 /* Unsupported WPA version */
+#define DOT11_RC_INVALID_WPA_CAP 22 /* Invalid WPA IE capabilities */
+#define DOT11_RC_8021X_AUTH_FAIL 23 /* 802.1X authentication failure */
+
+#define WPA2_PMKID_LEN 16
+
+/* WPA IE fixed portion */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 tag; /* TAG */
+ uint8 length; /* TAG length */
+ uint8 oui[3]; /* IE OUI */
+ uint8 oui_type; /* OUI type */
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version; /* IE version */
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN 4
+#define WPA_IE_FIXED_LEN 8
+#define WPA_IE_TAG_FIXED_LEN 6
+
+#define BIP_OUI_TYPE WPA2_OUI "\x06"
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 tag; /* TAG */
+ uint8 length; /* TAG length */
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version; /* IE version */
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN 4
+#define WPA_RSN_IE_TAG_FIXED_LEN 2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+#define WFA_OSEN_IE_FIXED_LEN 6
+
+/* WPA suite/multicast suite */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 oui[3];
+ uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN 4
+
+/* WPA unicast suite list/key management suite list */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN 2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+/* WPA cipher suites */
+#define WPA_CIPHER_NONE 0 /* None */
+#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */
+#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */
+#define WPA_CIPHER_AES_OCB 3 /* AES (OCB) */
+#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */
+#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
+#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */
+#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */
+#ifdef BCMCCX
+#define WPA_CIPHER_CKIP 8 /* KP with no MIC */
+#define WPA_CIPHER_CKIP_MMH 9 /* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */
+#define WPA_CIPHER_WEP_MMH 10 /* MIC with no KP ("WEP/MMH", "CMIC") */
+
+#define IS_CCX_CIPHER(cipher) ((cipher) == WPA_CIPHER_CKIP || \
+ (cipher) == WPA_CIPHER_CKIP_MMH || \
+ (cipher) == WPA_CIPHER_WEP_MMH)
+#endif /* BCMCCX */
+
+#define WPA_CIPHER_AES_GCM 8 /* AES (GCM) */
+#define WPA_CIPHER_AES_GCM256 9 /* AES (GCM256) */
+#define WPA_CIPHER_CCMP_256 10 /* CCMP-256 */
+#define WPA_CIPHER_BIP_GMAC_128 11 /* BIP_GMAC_128 */
+#define WPA_CIPHER_BIP_GMAC_256 12 /* BIP_GMAC_256 */
+#define WPA_CIPHER_BIP_CMAC_256 13 /* BIP_CMAC_256 */
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CIPHER_NONE WPA_CIPHER_NONE
+#define WAPI_CIPHER_SMS4 11
+
+#define WAPI_CSE_WPI_SMS4 1
+#endif /* BCMWAPI_WAI */
+
+#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
+ (cipher) == WPA_CIPHER_WEP_40 || \
+ (cipher) == WPA_CIPHER_WEP_104 || \
+ (cipher) == WPA_CIPHER_TKIP || \
+ (cipher) == WPA_CIPHER_AES_OCB || \
+ (cipher) == WPA_CIPHER_AES_CCM || \
+ (cipher) == WPA_CIPHER_AES_GCM || \
+ (cipher) == WPA_CIPHER_AES_GCM256 || \
+ (cipher) == WPA_CIPHER_CCMP_256 || \
+ (cipher) == WPA_CIPHER_TPK)
+
+#define IS_WPA_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_128 || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_256 || \
+ (cipher) == WPA_CIPHER_BIP_CMAC_256)
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
+ (cipher) == WAPI_CSE_WPI_SMS4)
+
+/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
+#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
+ WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
+
+#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
+ WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
+#endif /* BCMWAPI_WAI */
+
+#define IS_VALID_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK || \
+ (akm) == RSN_AKM_FBT_1X || \
+ (akm) == RSN_AKM_FBT_PSK || \
+ (akm) == RSN_AKM_MFP_1X || \
+ (akm) == RSN_AKM_MFP_PSK || \
+ (akm) == RSN_AKM_SHA256_1X || \
+ (akm) == RSN_AKM_SHA256_PSK || \
+ (akm) == RSN_AKM_TPK || \
+ (akm) == RSN_AKM_SAE_PSK || \
+ (akm) == RSN_AKM_SAE_FBT || \
+ (akm) == RSN_AKM_FILS_SHA256 || \
+ (akm) == RSN_AKM_FILS_SHA384 || \
+ (akm) == RSN_AKM_OWE || \
+ (akm) == RSN_AKM_SUITEB_SHA256_1X || \
+ (akm) == RSN_AKM_SUITEB_SHA384_1X)
+
+#define IS_VALID_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_128 || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_256 || \
+ (cipher) == WPA_CIPHER_BIP_CMAC_256)
+
+#define WPA_IS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256 || \
+ (akm) == RSN_AKM_FBT_SHA384)
+
+#define WPA_IS_FILS_AKM(akm) ((akm) == RSN_AKM_FILS_SHA256 || \
+ (akm) == RSN_AKM_FILS_SHA384)
+
+#define WPA_IS_FILS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256_FILS || \
+ (akm) == RSN_AKM_FBT_SHA384_FILS)
+
+/* WPA TKIP countermeasures parameters */
+#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */
+#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */
+
+/* RSN IE defines */
+#define RSN_CAP_LEN 2 /* Length of RSN capabilities field (2 octets) */
+
+/* RSN Capabilities defined in 802.11i */
+#define RSN_CAP_PREAUTH 0x0001
+#define RSN_CAP_NOPAIRWISE 0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4
+#define RSN_CAP_1_REPLAY_CNTR 0
+#define RSN_CAP_2_REPLAY_CNTRS 1
+#define RSN_CAP_4_REPLAY_CNTRS 2
+#define RSN_CAP_16_REPLAY_CNTRS 3
+#define RSN_CAP_MFPR 0x0040
+#define RSN_CAP_MFPC 0x0080
+#define RSN_CAP_SPPC 0x0400
+#define RSN_CAP_SPPR 0x0800
+#define RSN_CAP_OCVC 0x4000
+
+/* WPA capabilities defined in 802.11i */
+#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+/* WPA capabilities defined in 802.11zD9.0 */
+#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) /* bit 9 */
+
+/* WPA Specific defines */
+#define WPA_CAP_LEN RSN_CAP_LEN /* Length of RSN capabilities in RSN IE (2 octets) */
+#define WPA_PMKID_CNT_LEN 2 /* Length of RSN PMKID count (2 octests) */
+
+#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN 2
+
+/* RSN dev type in rsn_info struct */
+typedef enum {
+ DEV_NONE = 0,
+ DEV_STA = 1,
+ DEV_AP = 2
+} device_type_t;
+
+typedef uint32 rsn_akm_mask_t; /* RSN_AKM_... see 802.11.h */
+typedef uint8 rsn_cipher_t; /* WPA_CIPHER_xxx */
+typedef uint32 rsn_ciphers_t; /* mask of rsn_cipher_t */
+typedef uint8 rsn_akm_t;
+typedef uint8 auth_ie_type_mask_t;
+
+/* Old location for this structure. Moved to bcmwpa.h */
+#ifndef RSN_IE_INFO_STRUCT_RELOCATED
+typedef struct rsn_ie_info {
+ uint8 version;
+ rsn_cipher_t g_cipher;
+ uint8 p_count;
+ uint8 akm_count;
+ uint8 pmkid_count;
+ rsn_akm_t sta_akm; /* single STA akm */
+ uint16 caps;
+ rsn_ciphers_t p_ciphers;
+ rsn_akm_mask_t akms;
+ uint8 pmkids_offset; /* offset into the IE */
+ rsn_cipher_t g_mgmt_cipher;
+ device_type_t dev_type; /* AP or STA */
+ rsn_cipher_t sta_cipher; /* single STA cipher */
+ uint16 key_desc; /* key descriptor version as STA */
+ int parse_status;
+ uint16 mic_len; /* unused. keep for ROM compatibility. */
+ auth_ie_type_mask_t auth_ie_type; /* bit field of WPA, WPA2 and (not yet) CCX WAPI */
+ uint8 pmk_len; /* EAPOL PMK */
+ uint8 kck_mic_len; /* EAPOL MIC (by KCK) */
+ uint8 kck_len; /* EAPOL KCK */
+ uint8 kek_len; /* EAPOL KEK */
+ uint8 tk_len; /* EAPOL TK */
+ uint8 ptk_len; /* EAPOL PTK */
+ uint8 kck2_len; /* EAPOL KCK2 */
+ uint8 kek2_len; /* EAPOL KEK2 */
+} rsn_ie_info_t;
+#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
+
+/* Other WAI definition */
+#define WAPI_WAI_REQUEST 0x00F1
+#define WAPI_UNICAST_REKEY 0x00F2
+#define WAPI_STA_AGING 0x00F3
+#define WAPI_MUTIL_REKEY 0x00F4
+#define WAPI_STA_STATS 0x00F5
+
+#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */
+#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */
+#endif /* BCMWAPI_WAI */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _proto_wpa_h_ */
diff --git a/bcmdhd.101.10.361.x/include/wps.h b/bcmdhd.101.10.361.x/include/wps.h
new file mode 100755
index 0000000..81009ba
--- /dev/null
+++ b/bcmdhd.101.10.361.x/include/wps.h
@@ -0,0 +1,379 @@
+/*
+ * WPS IE definitions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _WPS_
+#define _WPS_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Data Element Definitions */
+#define WPS_ID_AP_CHANNEL 0x1001
+#define WPS_ID_ASSOC_STATE 0x1002
+#define WPS_ID_AUTH_TYPE 0x1003
+#define WPS_ID_AUTH_TYPE_FLAGS 0x1004
+#define WPS_ID_AUTHENTICATOR 0x1005
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_CONFIG_ERROR 0x1009
+#define WPS_ID_CONF_URL4 0x100A
+#define WPS_ID_CONF_URL6 0x100B
+#define WPS_ID_CONN_TYPE 0x100C
+#define WPS_ID_CONN_TYPE_FLAGS 0x100D
+#define WPS_ID_CREDENTIAL 0x100E
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_E_HASH1 0x1014
+#define WPS_ID_E_HASH2 0x1015
+#define WPS_ID_E_SNONCE1 0x1016
+#define WPS_ID_E_SNONCE2 0x1017
+#define WPS_ID_ENCR_SETTINGS 0x1018
+#define WPS_ID_ENCR_TYPE 0x100F
+#define WPS_ID_ENCR_TYPE_FLAGS 0x1010
+#define WPS_ID_ENROLLEE_NONCE 0x101A
+#define WPS_ID_FEATURE_ID 0x101B
+#define WPS_ID_IDENTITY 0x101C
+#define WPS_ID_IDENTITY_PROOF 0x101D
+#define WPS_ID_KEY_WRAP_AUTH 0x101E
+#define WPS_ID_KEY_IDENTIFIER 0x101F
+#define WPS_ID_MAC_ADDR 0x1020
+#define WPS_ID_MANUFACTURER 0x1021
+#define WPS_ID_MSG_TYPE 0x1022
+#define WPS_ID_MODEL_NAME 0x1023
+#define WPS_ID_MODEL_NUMBER 0x1024
+#define WPS_ID_NW_INDEX 0x1026
+#define WPS_ID_NW_KEY 0x1027
+#define WPS_ID_NW_KEY_INDEX 0x1028
+#define WPS_ID_NEW_DEVICE_NAME 0x1029
+#define WPS_ID_NEW_PWD 0x102A
+#define WPS_ID_OOB_DEV_PWD 0x102C
+#define WPS_ID_OS_VERSION 0x102D
+#define WPS_ID_POWER_LEVEL 0x102F
+#define WPS_ID_PSK_CURRENT 0x1030
+#define WPS_ID_PSK_MAX 0x1031
+#define WPS_ID_PUBLIC_KEY 0x1032
+#define WPS_ID_RADIO_ENABLED 0x1033
+#define WPS_ID_REBOOT 0x1034
+#define WPS_ID_REGISTRAR_CURRENT 0x1035
+#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036
+#define WPS_ID_REGISTRAR_LIST 0x1037
+#define WPS_ID_REGISTRAR_MAX 0x1038
+#define WPS_ID_REGISTRAR_NONCE 0x1039
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_RESP_TYPE 0x103B
+#define WPS_ID_RF_BAND 0x103C
+#define WPS_ID_R_HASH1 0x103D
+#define WPS_ID_R_HASH2 0x103E
+#define WPS_ID_R_SNONCE1 0x103F
+#define WPS_ID_R_SNONCE2 0x1040
+#define WPS_ID_SEL_REGISTRAR 0x1041
+#define WPS_ID_SERIAL_NUM 0x1042
+#define WPS_ID_SC_STATE 0x1044
+#define WPS_ID_SSID 0x1045
+#define WPS_ID_TOT_NETWORKS 0x1046
+#define WPS_ID_UUID_E 0x1047
+#define WPS_ID_UUID_R 0x1048
+#define WPS_ID_VENDOR_EXT 0x1049
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_X509_CERT_REQ 0x104B
+#define WPS_ID_X509_CERT 0x104C
+#define WPS_ID_EAP_IDENTITY 0x104D
+#define WPS_ID_MSG_COUNTER 0x104E
+#define WPS_ID_PUBKEY_HASH 0x104F
+#define WPS_ID_REKEY_KEY 0x1050
+#define WPS_ID_KEY_LIFETIME 0x1051
+#define WPS_ID_PERM_CFG_METHODS 0x1052
+#define WPS_ID_SEL_REG_CFG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ID_PORTABLE_DEVICE 0x1056
+#define WPS_ID_AP_SETUP_LOCKED 0x1057
+#define WPS_ID_APP_LIST 0x1058
+#define WPS_ID_EAP_TYPE 0x1059
+#define WPS_ID_INIT_VECTOR 0x1060
+#define WPS_ID_KEY_PROVIDED_AUTO 0x1061
+#define WPS_ID_8021X_ENABLED 0x1062
+#define WPS_ID_WEP_TRANSMIT_KEY 0x1064
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WFA_VENDOR_EXT_ID "\x00\x37\x2A"
+#define WPS_WFA_SUBID_VERSION2 0x00
+#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02
+#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
+#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05
+
+/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
+#define MS_VENDOR_EXT_ID "\x00\x01\x37"
+#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */
+#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */
+
+/* Vertical Pairing Identifier TLV Definitions */
+#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */
+#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */
+#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */
+#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */
+#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested.
+ * Not supported in Windows 7
+ */
+#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */
+
+/* sizes of the fixed size elements */
+#define WPS_ID_AP_CHANNEL_S 2
+#define WPS_ID_ASSOC_STATE_S 2
+#define WPS_ID_AUTH_TYPE_S 2
+#define WPS_ID_AUTH_TYPE_FLAGS_S 2
+#define WPS_ID_AUTHENTICATOR_S 8
+#define WPS_ID_CONFIG_METHODS_S 2
+#define WPS_ID_CONFIG_ERROR_S 2
+#define WPS_ID_CONN_TYPE_S 1
+#define WPS_ID_CONN_TYPE_FLAGS_S 1
+#define WPS_ID_DEVICE_PWD_ID_S 2
+#define WPS_ID_ENCR_TYPE_S 2
+#define WPS_ID_ENCR_TYPE_FLAGS_S 2
+#define WPS_ID_FEATURE_ID_S 4
+#define WPS_ID_MAC_ADDR_S 6
+#define WPS_ID_MSG_TYPE_S 1
+#define WPS_ID_SC_STATE_S 1
+#define WPS_ID_RF_BAND_S 1
+#define WPS_ID_OS_VERSION_S 4
+#define WPS_ID_VERSION_S 1
+#define WPS_ID_SEL_REGISTRAR_S 1
+#define WPS_ID_SEL_REG_CFG_METHODS_S 2
+#define WPS_ID_REQ_TYPE_S 1
+#define WPS_ID_RESP_TYPE_S 1
+#define WPS_ID_AP_SETUP_LOCKED_S 1
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WPS_WFA_SUBID_VERSION2_S 1
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1
+#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1
+#define WPS_WFA_SUBID_REG_CFG_METHODS_S 2
+
+/* Association states */
+#define WPS_ASSOC_NOT_ASSOCIATED 0
+#define WPS_ASSOC_CONN_SUCCESS 1
+#define WPS_ASSOC_CONFIG_FAIL 2
+#define WPS_ASSOC_ASSOC_FAIL 3
+#define WPS_ASSOC_IP_FAIL 4
+
+/* Authentication types */
+#define WPS_AUTHTYPE_OPEN 0x0001
+#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA2 0x0010
+#define WPS_AUTHTYPE_WPA2PSK 0x0020
+
+/* Config methods */
+#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_LABEL 0x0004
+#define WPS_CONFMET_DISPLAY 0x0008
+#define WPS_CONFMET_EXT_NFC_TOK 0x0010
+#define WPS_CONFMET_INT_NFC_TOK 0x0020
+#define WPS_CONFMET_NFC_INTF 0x0040
+#define WPS_CONFMET_PBC 0x0080
+#define WPS_CONFMET_KEYPAD 0x0100
+/* WSC 2.0 */
+#define WPS_CONFMET_VIRT_PBC 0x0280
+#define WPS_CONFMET_PHY_PBC 0x0480
+#define WPS_CONFMET_VIRT_DISPLAY 0x2008
+#define WPS_CONFMET_PHY_DISPLAY 0x4008
+
+/* WPS error messages */
+#define WPS_ERROR_NO_ERROR 0
+#define WPS_ERROR_OOB_INT_READ_ERR 1
+#define WPS_ERROR_DECRYPT_CRC_FAIL 2
+#define WPS_ERROR_CHAN24_NOT_SUPP 3
+#define WPS_ERROR_CHAN50_NOT_SUPP 4
+#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAIL_CONN_REGISTRAR 11
+#define WPS_ERROR_MULTI_PBC_DETECTED 12
+#define WPS_ERROR_ROGUE_SUSPECTED 13
+#define WPS_ERROR_DEVICE_BUSY 14
+#define WPS_ERROR_SETUP_LOCKED 15
+#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18
+#define WPS_ERROR_60GHZ_NOT_SUPPORT 19
+#define WPS_ERROR_PKH_MISMATCH 20 /* Public Key Hash Mismatch */
+
+/* Connection types */
+#define WPS_CONNTYPE_ESS 0x01
+#define WPS_CONNTYPE_IBSS 0x02
+
+/* Device password ID */
+#define WPS_DEVICEPWDID_DEFAULT 0x0000
+#define WPS_DEVICEPWDID_USER_SPEC 0x0001
+#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002
+#define WPS_DEVICEPWDID_REKEY 0x0003
+#define WPS_DEVICEPWDID_PUSH_BTN 0x0004
+#define WPS_DEVICEPWDID_REG_SPEC 0x0005
+#define WPS_DEVICEPWDID_IBSS 0x0006
+#define WPS_DEVICEPWDID_NFC_CHO 0x0007 /* NFC-Connection-Handover */
+#define WPS_DEVICEPWDID_WFDS 0x0008 /* Wi-Fi Direct Services Specification */
+
+/* Encryption type */
+#define WPS_ENCRTYPE_NONE 0x0001
+#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only
+ * be advertised on the AP when Mixed Mode
+ * is enabled (Encryption Type is 0x000c).
+ */
+#define WPS_ENCRTYPE_AES 0x0008
+
+/* WPS Message Types */
+#define WPS_ID_BEACON 0x01
+#define WPS_ID_PROBE_REQ 0x02
+#define WPS_ID_PROBE_RESP 0x03
+#define WPS_ID_MESSAGE_M1 0x04
+#define WPS_ID_MESSAGE_M2 0x05
+#define WPS_ID_MESSAGE_M2D 0x06
+#define WPS_ID_MESSAGE_M3 0x07
+#define WPS_ID_MESSAGE_M4 0x08
+#define WPS_ID_MESSAGE_M5 0x09
+#define WPS_ID_MESSAGE_M6 0x0A
+#define WPS_ID_MESSAGE_M7 0x0B
+#define WPS_ID_MESSAGE_M8 0x0C
+#define WPS_ID_MESSAGE_ACK 0x0D
+#define WPS_ID_MESSAGE_NACK 0x0E
+#define WPS_ID_MESSAGE_DONE 0x0F
+
+/* WSP private ID for local use */
+#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1)
+#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2)
+#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3)
+#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4)
+#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5)
+#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6)
+
+/* Device Type categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_CAT_COMPUTER 1
+#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2
+#define WPS_DEVICE_TYPE_CAT_PRINTER 3
+#define WPS_DEVICE_TYPE_CAT_CAMERA 4
+#define WPS_DEVICE_TYPE_CAT_STORAGE 5
+#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6
+#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7
+#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8
+#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9
+#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10
+#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */
+
+/* Device Type sub categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */
+
+/* Device request/response type */
+#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_MSGTYPE_REGISTRAR 0x02
+#define WPS_MSGTYPE_AP_WLAN_MGR 0x03
+
+/* RF Band */
+#define WPS_RFBAND_24GHZ 0x01
+#define WPS_RFBAND_50GHZ 0x02
+
+/* Simple Config state */
+#define WPS_SCSTATE_UNCONFIGURED 0x01
+#define WPS_SCSTATE_CONFIGURED 0x02
+#define WPS_SCSTATE_OFF 11
+
+/* WPS Vendor extension key */
+#define WPS_OUI_HEADER_LEN 2
+#define WPS_OUI_HEADER_SIZE 4
+#define WPS_OUI_FIXED_HEADER_OFF 16
+#define WPS_WFA_SUBID_V2_OFF 3
+#define WPS_WFA_V2_OFF 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WPS_ */
diff --git a/bcmdhd.101.10.361.x/linux_osl.c b/bcmdhd.101.10.361.x/linux_osl.c
new file mode 100755
index 0000000..4e7607c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/linux_osl.c
@@ -0,0 +1,2197 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
+#include <asm/cacheflush.h>
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+
+#include <linux/random.h>
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <pcicfg.h>
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
+#include <asm-generic/pci-dma-compat.h>
+#endif
+
+#if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
+#include <bcm_assert_log.h>
+#endif
+
+#include <linux/fs.h>
+
+#ifdef BCM_OBJECT_TRACE
+#include <bcmutils.h>
+#endif /* BCM_OBJECT_TRACE */
+#include "linux_osl_priv.h"
+
+#define PCI_CFG_RETRY 10 /* PR15065: retry count for pci cfg accesses */
+
+#define DUMPBUFSZ 1024
+
+#ifdef CUSTOMER_HW4_DEBUG
+uint32 g_assert_type = 1; /* By Default not cause Kernel Panic */
+#else
+uint32 g_assert_type = 0; /* By Default Kernel Panic */
+#endif /* CUSTOMER_HW4_DEBUG */
+
+module_param(g_assert_type, int, 0);
+
+#if defined(CUSTOMER_HW_AMLOGIC) && defined(USE_AML_PCIE_TEE_MEM)
+extern struct device *get_pcie_reserved_mem_dev(void);
+#endif
+
+#if defined(BCMSLTGT)
+/* !!!make sure htclkratio is not 0!!! */
+extern uint htclkratio;
+#endif
+
+#ifdef USE_DMA_LOCK
+static void osl_dma_lock(osl_t *osh);
+static void osl_dma_unlock(osl_t *osh);
+static void osl_dma_lock_init(osl_t *osh);
+
+#define DMA_LOCK(osh) osl_dma_lock(osh)
+#define DMA_UNLOCK(osh) osl_dma_unlock(osh)
+#define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
+#else
+#define DMA_LOCK(osh) do { /* noop */ } while(0)
+#define DMA_UNLOCK(osh) do { /* noop */ } while(0)
+#define DMA_LOCK_INIT(osh) do { /* noop */ } while(0)
+#endif /* USE_DMA_LOCK */
+
+static int16 linuxbcmerrormap[] =
+{ 0, /* 0 */
+ -EINVAL, /* BCME_ERROR */
+ -EINVAL, /* BCME_BADARG */
+ -EINVAL, /* BCME_BADOPTION */
+ -EINVAL, /* BCME_NOTUP */
+ -EINVAL, /* BCME_NOTDOWN */
+ -EINVAL, /* BCME_NOTAP */
+ -EINVAL, /* BCME_NOTSTA */
+ -EINVAL, /* BCME_BADKEYIDX */
+ -EINVAL, /* BCME_RADIOOFF */
+ -EINVAL, /* BCME_NOTBANDLOCKED */
+ -EINVAL, /* BCME_NOCLK */
+ -EINVAL, /* BCME_BADRATESET */
+ -EINVAL, /* BCME_BADBAND */
+ -E2BIG, /* BCME_BUFTOOSHORT */
+ -E2BIG, /* BCME_BUFTOOLONG */
+ -EBUSY, /* BCME_BUSY */
+ -EINVAL, /* BCME_NOTASSOCIATED */
+ -EINVAL, /* BCME_BADSSIDLEN */
+ -EINVAL, /* BCME_OUTOFRANGECHAN */
+ -EINVAL, /* BCME_BADCHAN */
+ -EFAULT, /* BCME_BADADDR */
+ -ENOMEM, /* BCME_NORESOURCE */
+ -EOPNOTSUPP, /* BCME_UNSUPPORTED */
+ -EMSGSIZE, /* BCME_BADLENGTH */
+ -EINVAL, /* BCME_NOTREADY */
+ -EPERM, /* BCME_EPERM */
+ -ENOMEM, /* BCME_NOMEM */
+ -EINVAL, /* BCME_ASSOCIATED */
+ -ERANGE, /* BCME_RANGE */
+ -EINVAL, /* BCME_NOTFOUND */
+ -EINVAL, /* BCME_WME_NOT_ENABLED */
+ -EINVAL, /* BCME_TSPEC_NOTFOUND */
+ -EINVAL, /* BCME_ACM_NOTSUPPORTED */
+ -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
+ -EIO, /* BCME_SDIO_ERROR */
+ -ENODEV, /* BCME_DONGLE_DOWN */
+ -EINVAL, /* BCME_VERSION */
+ -EIO, /* BCME_TXFAIL */
+ -EIO, /* BCME_RXFAIL */
+ -ENODEV, /* BCME_NODEVICE */
+ -EINVAL, /* BCME_NMODE_DISABLED */
+ -ENODATA, /* BCME_NONRESIDENT */
+ -EINVAL, /* BCME_SCANREJECT */
+ -EINVAL, /* BCME_USAGE_ERROR */
+ -EIO, /* BCME_IOCTL_ERROR */
+ -EIO, /* BCME_SERIAL_PORT_ERR */
+ -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
+ -EIO, /* BCME_DECERR */
+ -EIO, /* BCME_ENCERR */
+ -EIO, /* BCME_MICERR */
+ -ERANGE, /* BCME_REPLAY */
+ -EINVAL, /* BCME_IE_NOTFOUND */
+ -EINVAL, /* BCME_DATA_NOTFOUND */
+ -EINVAL, /* BCME_NOT_GC */
+ -EINVAL, /* BCME_PRS_REQ_FAILED */
+ -EINVAL, /* BCME_NO_P2P_SE */
+ -EINVAL, /* BCME_NOA_PND */
+ -EINVAL, /* BCME_FRAG_Q_FAILED */
+ -EINVAL, /* BCME_GET_AF_FAILED */
+ -EINVAL, /* BCME_MSCH_NOTREADY */
+ -EINVAL, /* BCME_IOV_LAST_CMD */
+ -EINVAL, /* BCME_MINIPMU_CAL_FAIL */
+ -EINVAL, /* BCME_RCAL_FAIL */
+ -EINVAL, /* BCME_LPF_RCCAL_FAIL */
+ -EINVAL, /* BCME_DACBUF_RCCAL_FAIL */
+ -EINVAL, /* BCME_VCOCAL_FAIL */
+ -EINVAL, /* BCME_BANDLOCKED */
+ -EINVAL, /* BCME_BAD_IE_DATA */
+ -EINVAL, /* BCME_REG_FAILED */
+ -EINVAL, /* BCME_NOCHAN */
+ -EINVAL, /* BCME_PKTTOSS */
+ -EINVAL, /* BCME_DNGL_DEVRESET */
+ -EINVAL, /* BCME_ROAM */
+ -EOPNOTSUPP, /* BCME_NO_SIG_FILE */
+
+/* When an new error code is added to bcmutils.h, add os
+ * specific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != BCME_NO_SIG_FILE
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+uint lmtest = FALSE;
+
+#ifdef DHD_MAP_LOGGING
+#define DHD_MAP_LOG_SIZE 2048
+
+typedef struct dhd_map_item {
+ dmaaddr_t pa; /* DMA address (physical) */
+ uint64 ts_nsec; /* timestamp: nsec */
+ uint32 size; /* mapping size */
+ uint8 rsvd[4]; /* reserved for future use */
+} dhd_map_item_t;
+
+typedef struct dhd_map_record {
+ uint32 items; /* number of total items */
+ uint32 idx; /* current index of metadata */
+ dhd_map_item_t map[0]; /* metadata storage */
+} dhd_map_log_t;
+
+void
+osl_dma_map_dump(osl_t *osh)
+{
+ dhd_map_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ map_log = (dhd_map_log_t *)(osh->dhd_map_log);
+ unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
+ osl_get_localtime(&ts_sec, &ts_usec);
+
+ if (map_log && unmap_log) {
+ printf("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
+ (unsigned long)ts_usec);
+ printf("%s: dhd_map_log(pa)=0x%llx size=%d,"
+ " dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(sizeof(dhd_map_item_t) * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
+ }
+}
+
+static void *
+osl_dma_map_log_init(uint32 item_len)
+{
+ dhd_map_log_t *map_log;
+ gfp_t flags;
+ uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
+ (item_len * sizeof(dhd_map_item_t)));
+
+ flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
+ map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
+ if (map_log) {
+ memset(map_log, 0, alloc_size);
+ map_log->items = item_len;
+ map_log->idx = 0;
+ }
+
+ return (void *)map_log;
+}
+
+static void
+osl_dma_map_log_deinit(osl_t *osh)
+{
+ if (osh->dhd_map_log) {
+ kfree(osh->dhd_map_log);
+ osh->dhd_map_log = NULL;
+ }
+
+ if (osh->dhd_unmap_log) {
+ kfree(osh->dhd_unmap_log);
+ osh->dhd_unmap_log = NULL;
+ }
+}
+
+static void
+osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
+{
+ dhd_map_log_t *log = (dhd_map_log_t *)handle;
+ uint32 idx;
+
+ if (log == NULL) {
+ printf("%s: log is NULL\n", __FUNCTION__);
+ return;
+ }
+
+ idx = log->idx;
+ log->map[idx].ts_nsec = osl_localtime_ns();
+ log->map[idx].pa = pa;
+ log->map[idx].size = len;
+ log->idx = (idx + 1) % log->items;
+}
+#endif /* DHD_MAP_LOGGING */
+
+/* translate bcmerrors into linux errors */
+int
+osl_error(int bcmerror)
+{
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+ /* Array bounds covered by ASSERT in osl_attach */
+ return linuxbcmerrormap[-bcmerror];
+}
+#ifdef SHARED_OSL_CMN
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+{
+#else
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+ void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
+ osl_t *osh;
+ gfp_t flags;
+
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+ if (!(osh = kmalloc(sizeof(osl_t), flags)))
+ return osh;
+
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+ if (osl_cmn == NULL || *osl_cmn == NULL) {
+ if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
+ kfree(osh);
+ return NULL;
+ }
+ bzero(osh->cmn, sizeof(osl_cmn_t));
+ if (osl_cmn)
+ *osl_cmn = osh->cmn;
+ atomic_set(&osh->cmn->malloced, 0);
+ osh->cmn->dbgmem_list = NULL;
+ spin_lock_init(&(osh->cmn->dbgmem_lock));
+
+#ifdef BCMDBG_PKT
+ spin_lock_init(&(osh->cmn->pktlist_lock));
+#endif
+ spin_lock_init(&(osh->cmn->pktalloc_lock));
+
+ } else {
+ osh->cmn = *osl_cmn;
+ }
+ atomic_add(1, &osh->cmn->refcount);
+
+ bcm_object_trace_init();
+ /* Check that error map has the right number of entries in it */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+ osh->failed = 0;
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+ osh->bustype = bustype;
+ osh->magic = OS_HANDLE_MAGIC;
+
+ switch (bustype) {
+ case PCI_BUS:
+ case SI_BUS:
+ osh->pub.mmbus = TRUE;
+ break;
+ case SDIO_BUS:
+ case USB_BUS:
+ case SPI_BUS:
+ case RPC_BUS:
+ osh->pub.mmbus = FALSE;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+#ifdef BCMDBG_CTRACE
+ spin_lock_init(&osh->ctrace_lock);
+ INIT_LIST_HEAD(&osh->ctrace_list);
+ osh->ctrace_num = 0;
+#endif /* BCMDBG_CTRACE */
+
+ DMA_LOCK_INIT(osh);
+
+#ifdef BCMDBG_ASSERT
+ if (pkttag) {
+ struct sk_buff *skb;
+ BCM_REFERENCE(skb);
+ ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
+ }
+#endif
+
+#ifdef DHD_MAP_LOGGING
+ osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
+ if (osh->dhd_map_log == NULL) {
+ printf("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
+ }
+
+ osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
+ if (osh->dhd_unmap_log == NULL) {
+ printf("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
+ }
+#endif /* DHD_MAP_LOGGING */
+
+#if defined(CUSTOMER_HW_AMLOGIC) && defined(USE_AML_PCIE_TEE_MEM)
+ osh->tee_mem_dev = get_pcie_reserved_mem_dev();
+ if (osh->tee_mem_dev) {
+ printf("####### use amlogic pcie TEE protect mem #######\n");
+ ((struct pci_dev *)osh->pdev)->dev.dma_mask = NULL;
+ }
+#endif
+
+ return osh;
+}
+
+void osl_set_bus_handle(osl_t *osh, void *bus_handle)
+{
+ osh->bus_handle = bus_handle;
+}
+
+void* osl_get_bus_handle(osl_t *osh)
+{
+ return osh->bus_handle;
+}
+
+#if defined(AXI_TIMEOUTS_NIC)
+void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
+{
+ if (osh) {
+ osh->bpt_cb = (bpt_cb_fn)bpt_cb;
+ osh->sih = bpt_ctx;
+ }
+}
+#endif /* AXI_TIMEOUTS_NIC */
+
+void
+osl_detach(osl_t *osh)
+{
+ if (osh == NULL)
+ return;
+
+#ifdef BCMDBG_MEM
+ if (MEMORY_LEFTOVER(osh)) {
+ static char dumpbuf[DUMPBUFSZ];
+ struct bcmstrbuf b;
+
+ printf("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh));
+ bcm_binit(&b, dumpbuf, DUMPBUFSZ);
+ MALLOC_DUMP(osh, &b);
+ printf("%s", b.origbuf);
+ }
+#endif
+
+ bcm_object_trace_deinit();
+
+#ifdef DHD_MAP_LOGGING
+ osl_dma_map_log_deinit(osh);
+#endif /* DHD_MAP_LOGGING */
+
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(1, &osh->cmn->refcount);
+ if (atomic_read(&osh->cmn->refcount) == 0) {
+ kfree(osh->cmn);
+ }
+ kfree(osh);
+}
+
+/* APIs to set/get specific quirks in OSL layer */
+void
+BCMFASTPATH(osl_flag_set)(osl_t *osh, uint32 mask)
+{
+ osh->flags |= mask;
+}
+
+void
+osl_flag_clr(osl_t *osh, uint32 mask)
+{
+ osh->flags &= ~mask;
+}
+
+bool
+osl_is_flag_set(osl_t *osh, uint32 mask)
+{
+ return (osh->flags & mask);
+}
+
+#if (defined(BCMPCIE) && defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
+
+inline void
+BCMFASTPATH(osl_cache_flush)(void *va, uint size)
+{
+ if (size > 0)
+ dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
+ DMA_TO_DEVICE);
+}
+
+inline void
+BCMFASTPATH(osl_cache_inv)(void *va, uint size)
+{
+ dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
+}
+
+inline void
+BCMFASTPATH(osl_prefetch)(const void *ptr)
+{
+ __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
+}
+
+#endif /* !__ARM_ARCH_7A__ */
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+ uint val = 0;
+ uint retry = PCI_CFG_RETRY; /* PR15065: faulty cardbus controller bug */
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ /* only 4byte access supported */
+ ASSERT(size == 4);
+
+ do {
+ pci_read_config_dword(osh->pdev, offset, &val);
+ if (val != 0xffffffff)
+ break;
+ } while (retry--);
+
+#ifdef BCMDBG
+ if (retry < PCI_CFG_RETRY)
+ printf("PCI CONFIG READ access to %d required %d retries\n", offset,
+ (PCI_CFG_RETRY - retry));
+#endif /* BCMDBG */
+
+ return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+ uint retry = PCI_CFG_RETRY; /* PR15065: faulty cardbus controller bug */
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ /* only 4byte access supported */
+ ASSERT(size == 4);
+
+ do {
+ pci_write_config_dword(osh->pdev, offset, val);
+ /* PR15065: PCI_BAR0_WIN is believed to be the only pci cfg write that can occur
+ * when dma activity is possible
+ */
+ if (offset != PCI_BAR0_WIN)
+ break;
+ if (osl_pci_read_config(osh, offset, size) == val)
+ break;
+ } while (retry--);
+
+#ifdef BCMDBG
+ if (retry < PCI_CFG_RETRY)
+ printf("PCI CONFIG WRITE access to %d required %d retries\n", offset,
+ (PCI_CFG_RETRY - retry));
+#endif /* BCMDBG */
+}
+
+#ifdef BCMPCIE
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pci_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__)
+ return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+#else
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+#endif
+}
+
+/* return slot # for the pci device pointed by osh->pdev */
+uint
+osl_pci_slot(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__)
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
+#else
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+#endif
+}
+
+/* return domain # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_domain(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+/* return the pci device pointed by osh->pdev */
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return osh->pdev;
+}
+#endif
+
+#ifdef BCMDBG_MEM
+/* In BCMDBG_MEM configurations osl_malloc is only used internally in
+ * the implementation of osl_debug_malloc. Because we are using the GCC
+ * -Wstrict-prototypes compile option, we must always have a prototype
+ * for a global/external function. So make osl_malloc static in
+ * the BCMDBG_MEM case.
+ */
+static
+#endif
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+ void *addr;
+ gfp_t flags;
+
+ /* only ASSERT if osh is defined */
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ unsigned long irq_flags;
+ int i = 0;
+ if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+ {
+ OSL_STATIC_BUF_LOCK(&bcm_static_buf->static_lock, irq_flags);
+
+ for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+ {
+ if (bcm_static_buf->buf_use[i] == 0)
+ break;
+ }
+
+ if (i == STATIC_BUF_MAX_NUM)
+ {
+ OSL_STATIC_BUF_UNLOCK(&bcm_static_buf->static_lock, irq_flags);
+ printf("all static buff in use!\n");
+ goto original;
+ }
+
+ bcm_static_buf->buf_use[i] = 1;
+ OSL_STATIC_BUF_UNLOCK(&bcm_static_buf->static_lock, irq_flags);
+
+ bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+ if (osh)
+ atomic_add(size, &osh->cmn->malloced);
+
+ return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+ }
+ }
+original:
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+ if ((addr = kmalloc(size, flags)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh && osh->cmn)
+ atomic_add(size, &osh->cmn->malloced);
+
+ return (addr);
+}
+
+#ifndef BCMDBG_MEM
+void *
+osl_mallocz(osl_t *osh, uint size)
+{
+ void *ptr;
+
+ ptr = osl_malloc(osh, size);
+
+ if (ptr != NULL) {
+ bzero(ptr, size);
+ }
+
+ return ptr;
+}
+#endif
+
+#ifdef BCMDBG_MEM
+/* In BCMDBG_MEM configurations osl_mfree is only used internally in
+ * the implementation of osl_debug_mfree. Because we are using the GCC
+ * -Wstrict-prototypes compile option, we must always have a prototype
+ * for a global/external function. So make osl_mfree static in
+ * the BCMDBG_MEM case.
+ */
+static
+#endif
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ unsigned long flags;
+
+ if (addr == NULL) {
+ return;
+ }
+
+ if (bcm_static_buf)
+ {
+ if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+ <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+ {
+ int buf_idx = 0;
+
+ buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+ OSL_STATIC_BUF_LOCK(&bcm_static_buf->static_lock, flags);
+ bcm_static_buf->buf_use[buf_idx] = 0;
+ OSL_STATIC_BUF_UNLOCK(&bcm_static_buf->static_lock, flags);
+
+ if (osh && osh->cmn) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(size, &osh->cmn->malloced);
+ }
+ return;
+ }
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ if (osh && osh->cmn) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+ ASSERT(size <= osl_malloced(osh));
+
+ atomic_sub(size, &osh->cmn->malloced);
+ }
+ kfree(addr);
+}
+
+#ifdef BCMDBG_MEM
+/* In BCMDBG_MEM configurations osl_vmalloc is only used internally in
+ * the implementation of osl_debug_vmalloc. Because we are using the GCC
+ * -Wstrict-prototypes compile option, we must always have a prototype
+ * for a global/external function. So make osl_vmalloc static in
+ * the BCMDBG_MEM case.
+ */
+static
+#endif
+void *
+osl_vmalloc(osl_t *osh, uint size)
+{
+ void *addr;
+
+ /* only ASSERT if osh is defined */
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ if ((addr = vmalloc(size)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh && osh->cmn)
+ atomic_add(size, &osh->cmn->malloced);
+
+ return (addr);
+}
+
+#ifndef BCMDBG_MEM
+void *
+osl_vmallocz(osl_t *osh, uint size)
+{
+ void *ptr;
+
+ ptr = osl_vmalloc(osh, size);
+
+ if (ptr != NULL) {
+ bzero(ptr, size);
+ }
+
+ return ptr;
+}
+#endif
+
+#ifdef BCMDBG_MEM
+/* In BCMDBG_MEM configurations osl_vmfree is only used internally in
+ * the implementation of osl_debug_vmfree. Because we are using the GCC
+ * -Wstrict-prototypes compile option, we must always have a prototype
+ * for a global/external function. So make osl_vmfree static in
+ * the BCMDBG_MEM case.
+ */
+static
+#endif
+void
+osl_vmfree(osl_t *osh, void *addr, uint size)
+{
+ if (osh && osh->cmn) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+ ASSERT(size <= osl_malloced(osh));
+
+ atomic_sub(size, &osh->cmn->malloced);
+ }
+ vfree(addr);
+}
+
+uint
+osl_check_memleak(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ if (atomic_read(&osh->cmn->refcount) == 1)
+ return (atomic_read(&osh->cmn->malloced));
+ else
+ return 0;
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (atomic_read(&osh->cmn->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+#ifdef BCMDBG_MEM
+void *
+osl_debug_malloc(osl_t *osh, uint size, int line, const char* file)
+{
+ bcm_mem_link_t *p;
+ const char* basename;
+ unsigned long flags = 0;
+ if (!size) {
+ printf("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
+ ASSERT(0);
+ }
+
+ if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
+ return (NULL);
+ }
+
+ if (osh) {
+ OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
+ }
+
+ p->size = size;
+ p->line = line;
+ p->osh = (void *)osh;
+
+ basename = strrchr(file, '/');
+ /* skip the '/' */
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+ strlcpy(p->file, basename, sizeof(p->file));
+
+ /* link this block */
+ if (osh) {
+ p->prev = NULL;
+ p->next = osh->cmn->dbgmem_list;
+ if (p->next)
+ p->next->prev = p;
+ osh->cmn->dbgmem_list = p;
+ OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
+ }
+
+ return p + 1;
+}
+
+void *
+osl_debug_mallocz(osl_t *osh, uint size, int line, const char* file)
+{
+ void *ptr;
+
+ ptr = osl_debug_malloc(osh, size, line, file);
+
+ if (ptr != NULL) {
+ bzero(ptr, size);
+ }
+
+ return ptr;
+}
+
+void
+osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, const char* file)
+{
+ bcm_mem_link_t *p;
+ unsigned long flags = 0;
+
+ ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
+
+ if (addr == NULL) {
+ return;
+ }
+
+ p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
+ if (p->size == 0) {
+ printf("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
+ addr, size, line, file);
+ prhex("bcm_mem_link_t", (void *)p, sizeof(*p));
+ ASSERT(p->size);
+ return;
+ }
+
+ if (p->size != size) {
+ printf("%s: dealloca size does not match alloc size\n", __FUNCTION__);
+ printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
+ printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
+ prhex("bcm_mem_link_t", (void *)p, sizeof(*p));
+ ASSERT(p->size == size);
+ return;
+ }
+
+ if (osh && ((osl_t*)p->osh)->cmn != osh->cmn) {
+ printf("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
+ ((osl_t*)p->osh)->cmn, osh->cmn);
+ printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
+ printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
+ prhex("bcm_mem_link_t", (void *)p, sizeof(*p));
+ ASSERT(((osl_t*)p->osh)->cmn == osh->cmn);
+ return;
+ }
+
+ /* unlink this block */
+ if (osh && osh->cmn) {
+ OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
+ if (p->prev)
+ p->prev->next = p->next;
+ if (p->next)
+ p->next->prev = p->prev;
+ if (osh->cmn->dbgmem_list == p)
+ osh->cmn->dbgmem_list = p->next;
+ p->next = p->prev = NULL;
+ }
+ p->size = 0;
+
+ if (osh && osh->cmn) {
+ OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
+ }
+ osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
+}
+
+void *
+osl_debug_vmalloc(osl_t *osh, uint size, int line, const char* file)
+{
+ bcm_mem_link_t *p;
+ const char* basename;
+ unsigned long flags = 0;
+ if (!size) {
+ printf("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
+ ASSERT(0);
+ }
+
+ if ((p = (bcm_mem_link_t*)osl_vmalloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
+ return (NULL);
+ }
+
+ if (osh) {
+ OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
+ }
+
+ p->size = size;
+ p->line = line;
+ p->osh = (void *)osh;
+
+ basename = strrchr(file, '/');
+ /* skip the '/' */
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+ strlcpy(p->file, basename, sizeof(p->file));
+
+ /* link this block */
+ if (osh) {
+ p->prev = NULL;
+ p->next = osh->cmn->dbgvmem_list;
+ if (p->next)
+ p->next->prev = p;
+ osh->cmn->dbgvmem_list = p;
+ OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
+ }
+
+ return p + 1;
+}
+
+void *
+osl_debug_vmallocz(osl_t *osh, uint size, int line, const char* file)
+{
+ void *ptr;
+
+ ptr = osl_debug_vmalloc(osh, size, line, file);
+
+ if (ptr != NULL) {
+ bzero(ptr, size);
+ }
+
+ return ptr;
+}
+
+void
+osl_debug_vmfree(osl_t *osh, void *addr, uint size, int line, const char* file)
+{
+ bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
+ unsigned long flags = 0;
+
+ ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
+
+ if (p->size == 0) {
+ printf("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
+ addr, size, line, file);
+ ASSERT(p->size);
+ return;
+ }
+
+ if (p->size != size) {
+ printf("%s: dealloca size does not match alloc size\n", __FUNCTION__);
+ printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
+ printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
+ ASSERT(p->size == size);
+ return;
+ }
+
+ if (osh && ((osl_t*)p->osh)->cmn != osh->cmn) {
+ printf("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
+ ((osl_t*)p->osh)->cmn, osh->cmn);
+ printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
+ printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
+ ASSERT(((osl_t*)p->osh)->cmn == osh->cmn);
+ return;
+ }
+
+ /* unlink this block */
+ if (osh && osh->cmn) {
+ OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
+ if (p->prev)
+ p->prev->next = p->next;
+ if (p->next)
+ p->next->prev = p->prev;
+ if (osh->cmn->dbgvmem_list == p)
+ osh->cmn->dbgvmem_list = p->next;
+ p->next = p->prev = NULL;
+ }
+ p->size = 0;
+
+ if (osh && osh->cmn) {
+ OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
+ }
+ osl_vmfree(osh, p, size + sizeof(bcm_mem_link_t));
+}
+
+int
+osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b)
+{
+ bcm_mem_link_t *p;
+ unsigned long flags = 0;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
+
+ if (osl_check_memleak(osh) && osh->cmn->dbgmem_list) {
+ if (b != NULL)
+ bcm_bprintf(b, " Address Size File:line\n");
+ else
+ printf(" Address Size File:line\n");
+
+ for (p = osh->cmn->dbgmem_list; p; p = p->next) {
+ if (b != NULL)
+ bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
+ p->size, p->file, p->line);
+ else
+ printk("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
+ p->size, p->file, p->line);
+
+ /* Detects loop-to-self so we don't enter infinite loop */
+ if (p == p->next) {
+ if (b != NULL)
+ bcm_bprintf(b, "WARNING: loop-to-self "
+ "p %p p->next %p\n", p, p->next);
+ else
+ printk("WARNING: loop-to-self "
+ "p %p p->next %p\n", p, p->next);
+
+ break;
+ }
+ }
+ }
+ if (osl_check_memleak(osh) && osh->cmn->dbgvmem_list) {
+ if (b != NULL)
+ bcm_bprintf(b, "Vmem\n Address Size File:line\n");
+ else
+ printf("Vmem\n Address Size File:line\n");
+
+ for (p = osh->cmn->dbgvmem_list; p; p = p->next) {
+ if (b != NULL)
+ bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
+ p->size, p->file, p->line);
+ else
+ printk("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
+ p->size, p->file, p->line);
+
+ /* Detects loop-to-self so we don't enter infinite loop */
+ if (p == p->next) {
+ if (b != NULL)
+ bcm_bprintf(b, "WARNING: loop-to-self "
+ "p %p p->next %p\n", p, p->next);
+ else
+ printk("WARNING: loop-to-self "
+ "p %p p->next %p\n", p, p->next);
+
+ break;
+ }
+ }
+ }
+
+ OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
+
+ return 0;
+}
+
+#endif /* BCMDBG_MEM */
+
+uint
+osl_dma_consistent_align(void)
+{
+ return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
+{
+ void *va;
+ uint16 align = (1 << align_bits);
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+ size += align;
+ *alloced = size;
+
+#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
+ va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
+ if (va)
+ *pap = (ulong)__virt_to_phys((ulong)va);
+#else
+ {
+ dma_addr_t pap_lin;
+ struct pci_dev *hwdev = osh->pdev;
+ gfp_t flags;
+#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
+ flags = GFP_ATOMIC;
+#else
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
+#ifdef DHD_ALLOC_COHERENT_MEM_WITH_GFP_COMP
+ flags |= __GFP_COMP;
+#endif /* DHD_ALLOC_COHERENT_MEM_WITH_GFP_COMP */
+#if defined(CUSTOMER_HW_AMLOGIC) && defined(USE_AML_PCIE_TEE_MEM)
+ if (osh->tee_mem_dev)
+ va = dma_alloc_coherent(osh->tee_mem_dev, size, &pap_lin, flags);
+ else
+#endif /* CUSTOMER_HW_AMLOGIC && USE_AML_PCIE_TEE_MEM */
+ va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
+#ifdef BCMDMA64OSL
+ PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
+ PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
+#else
+ *pap = (dmaaddr_t)pap_lin;
+#endif /* BCMDMA64OSL */
+ }
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+
+ return va;
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+#ifdef BCMDMA64OSL
+ dma_addr_t paddr;
+#endif /* BCMDMA64OSL */
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
+ kfree(va);
+#else
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(pa, paddr);
+ pci_free_consistent(osh->pdev, size, va, paddr);
+#else
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+#endif /* BCMDMA64OSL */
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+}
+
+void *
+osl_virt_to_phys(void *va)
+{
+ return (void *)(uintptr)virt_to_phys(va);
+}
+
+#include <asm/cacheflush.h>
+void
+BCMFASTPATH(osl_dma_flush)(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *dmah)
+{
+ return;
+}
+
+dmaaddr_t
+BCMFASTPATH(osl_dma_map)(osl_t *osh, void *va, uint size, int direction, void *p,
+ hnddma_seg_map_t *dmah)
+{
+ int dir;
+ dmaaddr_t ret_addr;
+ dma_addr_t map_addr;
+ int ret;
+
+ DMA_LOCK(osh);
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+
+ map_addr = pci_map_single(osh->pdev, va, size, dir);
+
+ ret = pci_dma_mapping_error(osh->pdev, map_addr);
+
+ if (ret) {
+ printf("%s: Failed to map memory\n", __FUNCTION__);
+ PHYSADDRLOSET(ret_addr, 0);
+ PHYSADDRHISET(ret_addr, 0);
+ } else {
+ PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
+ PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
+ }
+
+#ifdef DHD_MAP_LOGGING
+ osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
+#endif /* DHD_MAP_LOGGING */
+
+ DMA_UNLOCK(osh);
+
+ return ret_addr;
+}
+
+void
+BCMFASTPATH(osl_dma_unmap)(osl_t *osh, dmaaddr_t pa, uint size, int direction)
+{
+ int dir;
+#ifdef BCMDMA64OSL
+ dma_addr_t paddr;
+#endif /* BCMDMA64OSL */
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ DMA_LOCK(osh);
+
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+
+#ifdef DHD_MAP_LOGGING
+ osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
+#endif /* DHD_MAP_LOGGING */
+
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(pa, paddr);
+ pci_unmap_single(osh->pdev, paddr, size, dir);
+#else /* BCMDMA64OSL */
+ pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+#endif /* BCMDMA64OSL */
+
+ DMA_UNLOCK(osh);
+}
+
+/* OSL function for CPU relax */
+inline void
+BCMFASTPATH(osl_cpu_relax)(void)
+{
+ cpu_relax();
+}
+
+extern void osl_preempt_disable(osl_t *osh)
+{
+ preempt_disable();
+}
+
+extern void osl_preempt_enable(osl_t *osh)
+{
+ preempt_enable();
+}
+
+#if defined(BCMDBG_ASSERT) || defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+ char tempbuf[256];
+ const char *basename;
+
+ basename = strrchr(file, '/');
+ /* skip the '/' */
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+#ifdef BCMASSERT_LOG
+ snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+ exp, basename, line);
+#ifndef OEM_ANDROID
+ bcm_assert_log(tempbuf);
+#endif /* OEM_ANDROID */
+#endif /* BCMASSERT_LOG */
+
+#ifdef BCMDBG_ASSERT
+ snprintf(tempbuf, 256, "assertion \"%s\" failed: file \"%s\", line %d\n",
+ exp, basename, line);
+
+ /* Print assert message and give it time to be written to /var/log/messages */
+ if (!in_interrupt() && g_assert_type != 1 && g_assert_type != 3) {
+ const int delay = 3;
+ printf("%s", tempbuf);
+ printf("panic in %d seconds\n", delay);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay * HZ);
+ }
+#endif /* BCMDBG_ASSERT */
+
+ switch (g_assert_type) {
+ case 0:
+ printf("%s", tempbuf);
+ BUG();
+ break;
+ case 1:
+ /* fall through */
+ case 3:
+ printf("%s", tempbuf);
+ break;
+ case 2:
+ printf("%s", tempbuf);
+ BUG();
+ break;
+ default:
+ break;
+ }
+}
+#endif /* BCMDBG_ASSERT || BCMASSERT_LOG */
+
+void
+osl_delay(uint usec)
+{
+ uint d;
+
+#ifdef BCMSLTGT
+ usec *= htclkratio;
+#endif
+
+ while (usec > 0) {
+ d = MIN(usec, 1000);
+ udelay(d);
+ usec -= d;
+ }
+}
+
+void
+osl_sleep(uint ms)
+{
+#ifdef BCMSLTGT
+ ms *= htclkratio;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ if (ms < 20)
+ usleep_range(ms*1000, ms*1000 + 1000);
+ else
+#endif
+ msleep(ms);
+}
+
+uint64
+osl_sysuptime_us(void)
+{
+ struct osl_timespec tv;
+ uint64 usec;
+
+ osl_do_gettimeofday(&tv);
+ /* tv_usec content is fraction of a second */
+ usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
+#ifdef BCMSLTGT
+ /* scale down the time to match the slow target roughly */
+ usec /= htclkratio;
+#endif
+ return usec;
+}
+
+uint64
+osl_localtime_ns(void)
+{
+ uint64 ts_nsec = 0;
+
+#ifdef BCMDONGLEHOST
+ /* Some Linux based platform cannot use local_clock()
+ * since it is defined by EXPORT_SYMBOL_GPL().
+ * GPL-incompatible module (NIC builds wl.ko)
+ * cannnot use the GPL-only symbol.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ ts_nsec = local_clock();
+#else
+ ts_nsec = cpu_clock(smp_processor_id());
+#endif
+#endif /* BCMDONGLEHOST */
+ return ts_nsec;
+}
+
+void
+osl_get_localtime(uint64 *sec, uint64 *usec)
+{
+ uint64 ts_nsec = 0;
+ unsigned long rem_nsec = 0;
+
+#ifdef BCMDONGLEHOST
+ /* Some Linux based platform cannot use local_clock()
+ * since it is defined by EXPORT_SYMBOL_GPL().
+ * GPL-incompatible module (NIC builds wl.ko) can
+ * not use the GPL-only symbol.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ ts_nsec = local_clock();
+#else
+ ts_nsec = cpu_clock(smp_processor_id());
+#endif
+ rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
+#endif /* BCMDONGLEHOST */
+ *sec = (uint64)ts_nsec;
+ *usec = (uint64)(rem_nsec / MSEC_PER_SEC);
+}
+
+uint64
+osl_systztime_us(void)
+{
+ struct osl_timespec tv;
+ uint64 tzusec;
+
+ osl_do_gettimeofday(&tv);
+ /* apply timezone */
+ tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
+ USEC_PER_SEC);
+ tzusec += tv.tv_usec;
+
+ return tzusec;
+}
+
+/*
+ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
+ */
+#ifdef OSLREGOPS
+uint8
+osl_readb(osl_t *osh, volatile uint8 *r)
+{
+ osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
+ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
+
+ return (uint8)((rreg)(ctx, (volatile void*)r, sizeof(uint8)));
+}
+
+uint16
+osl_readw(osl_t *osh, volatile uint16 *r)
+{
+ osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
+ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
+
+ return (uint16)((rreg)(ctx, (volatile void*)r, sizeof(uint16)));
+}
+
+uint32
+osl_readl(osl_t *osh, volatile uint32 *r)
+{
+ osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
+ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
+
+ return (uint32)((rreg)(ctx, (volatile void*)r, sizeof(uint32)));
+}
+
+void
+osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
+{
+ osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
+ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
+
+ ((wreg)(ctx, (volatile void*)r, v, sizeof(uint8)));
+}
+
+void
+osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
+{
+ osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
+ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
+
+ ((wreg)(ctx, (volatile void*)r, v, sizeof(uint16)));
+}
+
+void
+osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
+{
+ osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
+ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
+
+ ((wreg)(ctx, (volatile void*)r, v, sizeof(uint32)));
+}
+#endif /* OSLREGOPS */
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
+#ifdef BINOSL
+
+uint32
+osl_sysuptime(void)
+{
+ uint32 msec = ((uint32)jiffies * (1000 / HZ));
+#ifdef BCMSLTGT
+ /* scale down the time to match the slow target roughly */
+ msec /= htclkratio;
+#endif
+ return msec;
+}
+
+int
+osl_printf(const char *format, ...)
+{
+ va_list args;
+ static char printbuf[1024];
+ int len;
+
+ /* sprintf into a local buffer because there *is* no "vprintk()".. */
+ va_start(args, format);
+ len = vsnprintf(printbuf, 1024, format, args);
+ va_end(args);
+
+ if (len > sizeof(printbuf)) {
+ printf("osl_printf: buffer overrun\n");
+ return (0);
+ }
+
+ return (printf("%s", printbuf));
+}
+
+int
+osl_sprintf(char *buf, const char *format, ...)
+{
+ va_list args;
+ int rc;
+
+ va_start(args, format);
+ rc = vsprintf(buf, format, args);
+ va_end(args);
+ return (rc);
+}
+
+int
+osl_snprintf(char *buf, size_t n, const char *format, ...)
+{
+ va_list args;
+ int rc;
+
+ va_start(args, format);
+ rc = vsnprintf(buf, n, format, args);
+ va_end(args);
+ return (rc);
+}
+
+int
+osl_vsprintf(char *buf, const char *format, va_list ap)
+{
+ return (vsprintf(buf, format, ap));
+}
+
+int
+osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
+{
+ return (vsnprintf(buf, n, format, ap));
+}
+
+int
+osl_strcmp(const char *s1, const char *s2)
+{
+ return (strcmp(s1, s2));
+}
+
+int
+osl_strncmp(const char *s1, const char *s2, uint n)
+{
+ return (strncmp(s1, s2, n));
+}
+
+int
+osl_strlen(const char *s)
+{
+ return (strlen(s));
+}
+
+char*
+osl_strcpy(char *d, const char *s)
+{
+ return (strcpy(d, s));
+}
+
+char*
+osl_strncpy(char *d, const char *s, uint n)
+{
+ return (strlcpy(d, s, n));
+}
+
+char*
+osl_strchr(const char *s, int c)
+{
+ return (strchr(s, c));
+}
+
+char*
+osl_strrchr(const char *s, int c)
+{
+ return (strrchr(s, c));
+}
+
+void*
+osl_memset(void *d, int c, size_t n)
+{
+ return memset(d, c, n);
+}
+
+void*
+osl_memcpy(void *d, const void *s, size_t n)
+{
+ return memcpy(d, s, n);
+}
+
+void*
+osl_memmove(void *d, const void *s, size_t n)
+{
+ return memmove(d, s, n);
+}
+
+int
+osl_memcmp(const void *s1, const void *s2, size_t n)
+{
+ return memcmp(s1, s2, n);
+}
+
+uint32
+osl_readl(volatile uint32 *r)
+{
+ return (readl(r));
+}
+
+uint16
+osl_readw(volatile uint16 *r)
+{
+ return (readw(r));
+}
+
+uint8
+osl_readb(volatile uint8 *r)
+{
+ return (readb(r));
+}
+
+void
+osl_writel(uint32 v, volatile uint32 *r)
+{
+ writel(v, r);
+}
+
+void
+osl_writew(uint16 v, volatile uint16 *r)
+{
+ writew(v, r);
+}
+
+void
+osl_writeb(uint8 v, volatile uint8 *r)
+{
+ writeb(v, r);
+}
+
+void *
+osl_uncached(void *va)
+{
+ return ((void*)va);
+}
+
+void *
+osl_cached(void *va)
+{
+ return ((void*)va);
+}
+
+uint
+osl_getcycles(void)
+{
+ uint cycles;
+
+#if defined(__i386__)
+ rdtscl(cycles);
+#else
+ cycles = 0;
+#endif /* __i386__ */
+ return cycles;
+}
+
+void *
+osl_reg_map(uint32 pa, uint size)
+{
+ return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
+}
+
+void
+osl_reg_unmap(void *va)
+{
+ iounmap(va);
+}
+
+int
+osl_busprobe(uint32 *val, uint32 addr)
+{
+ *val = readl((uint32 *)(uintptr)addr);
+
+ return 0;
+}
+#endif /* BINOSL */
+
+uint32
+osl_rand(void)
+{
+ uint32 rand;
+
+ get_random_bytes(&rand, sizeof(rand));
+
+ return rand;
+}
+
+/* Linux Kernel: File Operations: start */
+void *
+osl_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp)) {
+ printf("ERROR %ld: Unable to open file %s\n", PTR_ERR(fp), filename);
+ fp = NULL;
+ }
+
+ return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (fp == NULL) {
+ return 0;
+ }
+
+ rdlen = kernel_read_compat(fp, fp->f_pos, buf, len);
+ if (rdlen > 0) {
+ fp->f_pos += rdlen;
+ }
+
+ return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+ struct file *fp = (struct file *)image;
+
+ if (fp != NULL) {
+ filp_close(fp, NULL);
+ }
+}
+
+int
+osl_os_image_size(void *image)
+{
+ int len = 0, curroffset;
+
+ if (image) {
+ /* store the current offset */
+ curroffset = generic_file_llseek(image, 0, 1);
+ /* goto end of file to get length */
+ len = generic_file_llseek(image, 0, 2);
+ /* restore back the offset */
+ generic_file_llseek(image, curroffset, 0);
+ }
+ return len;
+}
+
+/* Linux Kernel: File Operations: end */
+
+#if defined(AXI_TIMEOUTS_NIC)
+inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
+{
+ bool poll_timeout = FALSE;
+ static int in_si_clear = FALSE;
+
+ switch (size) {
+ case sizeof(uint8):
+ *(volatile uint8*)v = readb((volatile uint8*)(addr));
+ if (*(volatile uint8*)v == 0xff)
+ poll_timeout = TRUE;
+ break;
+ case sizeof(uint16):
+ *(volatile uint16*)v = readw((volatile uint16*)(addr));
+ if (*(volatile uint16*)v == 0xffff)
+ poll_timeout = TRUE;
+ break;
+ case sizeof(uint32):
+ *(volatile uint32*)v = readl((volatile uint32*)(addr));
+ if (*(volatile uint32*)v == 0xffffffff)
+ poll_timeout = TRUE;
+ break;
+ case sizeof(uint64):
+ *(volatile uint64*)v = *((volatile uint64*)(addr));
+ if (*(volatile uint64*)v == 0xffffffffffffffff)
+ poll_timeout = TRUE;
+ break;
+ }
+
+ if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
+ in_si_clear = TRUE;
+ osh->bpt_cb((void *)osh->sih, (void *)addr);
+ in_si_clear = FALSE;
+ }
+}
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+void
+timer_cb_compat(struct timer_list *tl)
+{
+ timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
+ t->callback((ulong)t->arg);
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
+
+/* timer apis */
+/* Note: All timer api's are thread unsafe and should be protected with locks by caller */
+
+osl_timer_t *
+osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
+{
+ osl_timer_t *t;
+ BCM_REFERENCE(fn);
+ if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
+ printf(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
+ (int)sizeof(osl_timer_t));
+ return (NULL);
+ }
+ bzero(t, sizeof(osl_timer_t));
+ if ((t->timer = MALLOCZ(NULL, sizeof(timer_list_compat_t))) == NULL) {
+ printf("osl_timer_init: malloc failed\n");
+ MFREE(NULL, t, sizeof(osl_timer_t));
+ return (NULL);
+ }
+
+ t->set = TRUE;
+#ifdef BCMDBG
+ if ((t->name = MALLOCZ(NULL, strlen(name) + 1)) != NULL) {
+ strcpy(t->name, name);
+ }
+#endif
+
+ init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
+
+ return (t);
+}
+
+void
+osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
+{
+ if (t == NULL) {
+ printf("%s: Timer handle is NULL\n", __FUNCTION__);
+ return;
+ }
+ ASSERT(!t->set);
+
+ t->set = TRUE;
+ if (periodic) {
+ printf("Periodic timers are not supported by Linux timer apis\n");
+ }
+#if defined(BCMSLTGT)
+ timer_expires(t->timer) = jiffies + ms*HZ/1000*htclkratio;
+#else
+ timer_expires(t->timer) = jiffies + ms*HZ/1000;
+#endif /* defined(BCMSLTGT) */
+
+ add_timer(t->timer);
+
+ return;
+}
+
+void
+osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
+{
+ if (t == NULL) {
+ printf("%s: Timer handle is NULL\n", __FUNCTION__);
+ return;
+ }
+ if (periodic) {
+ printf("Periodic timers are not supported by Linux timer apis\n");
+ }
+ t->set = TRUE;
+#if defined(BCMSLTGT)
+ timer_expires(t->timer) = jiffies + ms*HZ/1000*htclkratio;
+#else
+ timer_expires(t->timer) = jiffies + ms*HZ/1000;
+#endif /* defined(BCMSLTGT) */
+
+ mod_timer(t->timer, timer_expires(t->timer));
+
+ return;
+}
+
+/*
+ * Return TRUE if timer successfully deleted, FALSE if still pending
+ */
+bool
+osl_timer_del(osl_t *osh, osl_timer_t *t)
+{
+ if (t == NULL) {
+ printf("%s: Timer handle is NULL\n", __FUNCTION__);
+ return (FALSE);
+ }
+ if (t->set) {
+ t->set = FALSE;
+ if (t->timer) {
+ del_timer(t->timer);
+ MFREE(NULL, t->timer, sizeof(struct timer_list));
+ }
+#ifdef BCMDBG
+ if (t->name) {
+ MFREE(NULL, t->name, strlen(t->name) + 1);
+ }
+#endif
+ MFREE(NULL, t, sizeof(osl_timer_t));
+ }
+ return (TRUE);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
+int
+kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
+{
+ return (int)kernel_read(file, addr, (size_t)count, &offset);
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+
+/* Linux specific multipurpose spinlock API */
+void *
+osl_spin_lock_init(osl_t *osh)
+{
+ /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+ /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+ /* and this results in kernel asserts in internal builds */
+ spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+ if (lock)
+ spin_lock_init(lock);
+ return ((void *)lock);
+}
+void
+osl_spin_lock_deinit(osl_t *osh, void *lock)
+{
+ if (lock)
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+
+unsigned long
+osl_spin_lock(void *lock)
+{
+ unsigned long flags = 0;
+
+ if (lock) {
+#ifdef DHD_USE_SPIN_LOCK_BH
+ /* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
+ ASSERT(!in_irq());
+ spin_lock_bh((spinlock_t *)lock);
+#else
+ spin_lock_irqsave((spinlock_t *)lock, flags);
+#endif /* DHD_USE_SPIN_LOCK_BH */
+ }
+
+ return flags;
+}
+
+void
+osl_spin_unlock(void *lock, unsigned long flags)
+{
+ if (lock) {
+#ifdef DHD_USE_SPIN_LOCK_BH
+ /* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
+ ASSERT(!in_irq());
+ spin_unlock_bh((spinlock_t *)lock);
+#else
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+#endif /* DHD_USE_SPIN_LOCK_BH */
+ }
+}
+
+unsigned long
+osl_spin_lock_irq(void *lock)
+{
+ unsigned long flags = 0;
+
+ if (lock)
+ spin_lock_irqsave((spinlock_t *)lock, flags);
+
+ return flags;
+}
+
+void
+osl_spin_unlock_irq(void *lock, unsigned long flags)
+{
+ if (lock)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
+
+unsigned long
+osl_spin_lock_bh(void *lock)
+{
+ unsigned long flags = 0;
+
+ if (lock) {
+ /* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
+ ASSERT(!in_irq());
+ spin_lock_bh((spinlock_t *)lock);
+ }
+
+ return flags;
+}
+
+void
+osl_spin_unlock_bh(void *lock, unsigned long flags)
+{
+ if (lock) {
+ /* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
+ ASSERT(!in_irq());
+ spin_unlock_bh((spinlock_t *)lock);
+ }
+}
+
+void *
+osl_mutex_lock_init(osl_t *osh)
+{
+ struct mutex *mtx = NULL;
+
+ mtx = MALLOCZ(osh, sizeof(*mtx));
+ if (mtx)
+ mutex_init(mtx);
+
+ return mtx;
+}
+
+void
+osl_mutex_lock_deinit(osl_t *osh, void *mtx)
+{
+ if (mtx) {
+ mutex_destroy(mtx);
+ MFREE(osh, mtx, sizeof(struct mutex));
+ }
+}
+
+/* For mutex lock/unlock unsigned long flags is used,
+ * this is to keep in sync with spin lock apis, so that
+ * locks can be easily interchanged based on contexts
+ */
+unsigned long
+osl_mutex_lock(void *lock)
+{
+ if (lock)
+ mutex_lock((struct mutex *)lock);
+
+ return 0;
+}
+
+void
+osl_mutex_unlock(void *lock, unsigned long flags)
+{
+ if (lock)
+ mutex_unlock((struct mutex *)lock);
+ return;
+}
+
+#ifdef USE_DMA_LOCK
+static void
+osl_dma_lock(osl_t *osh)
+{
+ /* The conditional check is to avoid the scheduling bug.
+ * If the spin_lock_bh is used under the spin_lock_irqsave,
+ * Kernel triggered the warning message as the spin_lock_irqsave
+ * disables the interrupt and the spin_lock_bh doesn't use in case
+ * interrupt is disabled.
+ * Please refer to the __local_bh_enable_ip() function
+ * in kernel/softirq.c to understand the condtion.
+ */
+ if (likely(in_irq() || irqs_disabled())) {
+ spin_lock(&osh->dma_lock);
+ } else {
+ spin_lock_bh(&osh->dma_lock);
+ osh->dma_lock_bh = TRUE;
+ }
+}
+
+static void
+osl_dma_unlock(osl_t *osh)
+{
+ if (unlikely(osh->dma_lock_bh)) {
+ osh->dma_lock_bh = FALSE;
+ spin_unlock_bh(&osh->dma_lock);
+ } else {
+ spin_unlock(&osh->dma_lock);
+ }
+}
+
+static void
+osl_dma_lock_init(osl_t *osh)
+{
+ spin_lock_init(&osh->dma_lock);
+ osh->dma_lock_bh = FALSE;
+}
+#endif /* USE_DMA_LOCK */
+
+void
+osl_do_gettimeofday(struct osl_timespec *ts)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+ struct timespec64 curtime;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ struct timespec curtime;
+#else
+ struct timeval curtime;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+ ktime_get_real_ts64(&curtime);
+ ts->tv_nsec = curtime.tv_nsec;
+ ts->tv_usec = curtime.tv_nsec / 1000;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ getnstimeofday(&curtime);
+ ts->tv_nsec = curtime.tv_nsec;
+ ts->tv_usec = curtime.tv_nsec / 1000;
+#else
+ do_gettimeofday(&curtime);
+ ts->tv_usec = curtime.tv_usec;
+ ts->tv_nsec = curtime.tv_usec * 1000;
+#endif
+ ts->tv_sec = curtime.tv_sec;
+}
+
+uint32
+osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts)
+{
+ uint32 diff_s, diff_us, total_diff_us;
+ bool pgc_g = FALSE;
+
+ diff_s = (uint32)cur_ts->tv_sec - (uint32)old_ts->tv_sec;
+ pgc_g = (cur_ts->tv_usec > old_ts->tv_usec) ? TRUE : FALSE;
+ diff_us = pgc_g ? (cur_ts->tv_usec - old_ts->tv_usec) : (old_ts->tv_usec - cur_ts->tv_usec);
+ total_diff_us = pgc_g ? (diff_s * 1000000 + diff_us) : (diff_s * 1000000 - diff_us);
+ return total_diff_us;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+void
+osl_get_monotonic_boottime(struct osl_timespec *ts)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+ struct timespec64 curtime;
+#else
+ struct timespec curtime;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+ curtime = ktime_to_timespec64(ktime_get_boottime());
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+ curtime = ktime_to_timespec(ktime_get_boottime());
+#else
+ get_monotonic_boottime(&curtime);
+#endif
+ ts->tv_sec = curtime.tv_sec;
+ ts->tv_nsec = curtime.tv_nsec;
+ ts->tv_usec = curtime.tv_nsec / 1000;
+}
+#endif
diff --git a/bcmdhd.101.10.361.x/linux_osl_priv.h b/bcmdhd.101.10.361.x/linux_osl_priv.h
new file mode 100755
index 0000000..7f14a92
--- /dev/null
+++ b/bcmdhd.101.10.361.x/linux_osl_priv.h
@@ -0,0 +1,188 @@
+/*
+ * Private header file for Linux OS Independent Layer
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _LINUX_OSL_PRIV_H_
+#define _LINUX_OSL_PRIV_H_
+
+#include <osl.h>
+
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
+
+/* dependancy check */
+#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
+#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
+
+#define OSL_MEMLIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define OSL_MEMLIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define OSL_STATIC_BUF_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define OSL_STATIC_BUF_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define OSL_STATIC_PKT_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define OSL_STATIC_PKT_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define OSL_PKTLIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define OSL_PKTLIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define OSL_CTRACE_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define OSL_CTRACE_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#define PREALLOC_FREE_MAGIC 0xFEDC
+#define PREALLOC_USED_MAGIC 0xFCDE
+#else
+#define DHD_SKB_HDRSIZE 336
+#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_BUF_MAX_NUM 16
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+ spinlock_t static_lock;
+ unsigned char *buf_ptr;
+ unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+extern bcm_static_buf_t *bcm_static_buf;
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_4PAGE_NUM 0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#elif defined(ENHANCED_STATIC_BUF)
+#define STATIC_PKT_4PAGE_NUM 1
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
+#else
+#define STATIC_PKT_4PAGE_NUM 0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_1PAGE_NUM 0
+/* Should match DHD_SKB_2PAGE_BUF_NUM */
+#define STATIC_PKT_2PAGE_NUM 192
+#else
+#define STATIC_PKT_1PAGE_NUM 8
+#define STATIC_PKT_2PAGE_NUM 8
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_PKT_1_2PAGE_NUM \
+ ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
+#define STATIC_PKT_MAX_NUM \
+ ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
+
+typedef struct bcm_static_pkt {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+ unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
+ spinlock_t osl_pkt_lock;
+ uint32 last_allocated_index;
+#else
+ struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
+ struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+#ifdef ENHANCED_STATIC_BUF
+ struct sk_buff *skb_16k;
+#endif /* ENHANCED_STATIC_BUF */
+ struct semaphore osl_pkt_sem;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ unsigned char pkt_use[STATIC_PKT_MAX_NUM];
+} bcm_static_pkt_t;
+
+extern bcm_static_pkt_t *bcm_static_skb;
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ void *osh;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_cmn_info {
+ atomic_t malloced;
+ atomic_t pktalloced; /* Number of allocated packet buffers */
+ spinlock_t dbgmem_lock;
+ bcm_mem_link_t *dbgmem_list;
+ bcm_mem_link_t *dbgvmem_list;
+#ifdef BCMDBG_PKT /* pkt logging for debugging */
+ spinlock_t pktlist_lock;
+ pktlist_info_t pktlist;
+#endif /* BCMDBG_PKT */
+ spinlock_t pktalloc_lock;
+ atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
+#if defined(AXI_TIMEOUTS_NIC)
+typedef uint32 (*bpt_cb_fn)(void *ctx, void *addr);
+#endif /* AXI_TIMEOUTS_NIC */
+
+struct osl_info {
+ osl_pubinfo_t pub;
+ uint32 flags; /* If specific cases to be handled in the OSL */
+ uint magic;
+ void *pdev;
+ uint failed;
+ uint bustype;
+ osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+ /* for host drivers, a bus handle is needed when reading from and/or writing to dongle
+ * registeres, however ai/si utilities only passes osh handle to R_REG and W_REG. as
+ * a work around, save the bus handle here
+ */
+ void *bus_handle;
+#ifdef BCMDBG_CTRACE
+ spinlock_t ctrace_lock;
+ struct list_head ctrace_list;
+ int ctrace_num;
+#endif /* BCMDBG_CTRACE */
+#if defined(AXI_TIMEOUTS_NIC)
+ bpt_cb_fn bpt_cb;
+ void *sih;
+#endif /* AXI_TIMEOUTS_NIC */
+#ifdef USE_DMA_LOCK
+ spinlock_t dma_lock;
+ bool dma_lock_bh;
+#endif /* USE_DMA_LOCK */
+#ifdef DHD_MAP_LOGGING
+ void *dhd_map_log;
+ void *dhd_unmap_log;
+#endif /* DHD_MAP_LOGGING */
+#if defined(CUSTOMER_HW_AMLOGIC) && defined(USE_AML_PCIE_TEE_MEM)
+ struct device *tee_mem_dev;
+#endif /* CUSTOMER_HW_AMLOGIC && USE_AML_PCIE_TEE_MEM */
+};
+
+#endif /* _LINUX_OSL_PRIV_H_ */
diff --git a/bcmdhd.101.10.361.x/linux_pkt.c b/bcmdhd.101.10.361.x/linux_pkt.c
new file mode 100755
index 0000000..f32f8e4
--- /dev/null
+++ b/bcmdhd.101.10.361.x/linux_pkt.c
@@ -0,0 +1,897 @@
+/*
+ * Linux Packet (skb) interface
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+
+#include <linux/random.h>
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <pcicfg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
+#include <bcm_assert_log.h>
+#endif
+#include <linux/fs.h>
+#include "linux_osl_priv.h"
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+
+bcm_static_buf_t *bcm_static_buf = 0;
+bcm_static_pkt_t *bcm_static_skb = 0;
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+#ifdef BCM_OBJECT_TRACE
+/* don't clear the first 4 byte that is the pkt sn */
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ uint tagsz = sizeof(s->cb); \
+ ASSERT(OSL_PKTTAG_SZ <= tagsz); \
+ memset(s->cb + 4, 0, tagsz - 4); \
+} while (0)
+#else
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ uint tagsz = sizeof(s->cb); \
+ ASSERT(OSL_PKTTAG_SZ <= tagsz); \
+ memset(s->cb, 0, tagsz); \
+} while (0)
+#endif /* BCM_OBJECT_TRACE */
+
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!bcm_static_buf && adapter) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+ DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+ printf("can not alloc static buf!\n");
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ } else {
+ printf("succeed to alloc static buf\n");
+ }
+
+ spin_lock_init(&bcm_static_buf->static_lock);
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+ }
+
+#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
+ if (!bcm_static_skb && adapter) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
+ if (!skb_buff_ptr) {
+ printf("cannot alloc static buf!\n");
+ bcm_static_buf = NULL;
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ }
+
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+ (STATIC_PKT_MAX_NUM));
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ bcm_static_skb->pkt_use[i] = 0;
+ }
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_init(&bcm_static_skb->osl_pkt_lock);
+ bcm_static_skb->last_allocated_index = 0;
+#else
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ }
+#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+ return 0;
+}
+
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+#ifdef BCMSDIO
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ return 0;
+}
+
+static struct sk_buff *
+BCMFASTPATH(osl_alloc_skb)(osl_t *osh, unsigned int len)
+{
+ struct sk_buff *skb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+
+#ifdef DHD_USE_ATOMIC_PKTGET
+ flags = GFP_ATOMIC;
+#endif /* DHD_USE_ATOMIC_PKTGET */
+ skb = __dev_alloc_skb(len, flags);
+#else
+ skb = dev_alloc_skb(len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
+
+ return skb;
+}
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+struct sk_buff *
+BCMFASTPATH(osl_pkt_tonative)(osl_t *osh, void *pkt)
+{
+ struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+ struct sk_buff *nskb1, *nskb2;
+#endif
+#ifdef BCMDBG_PKT
+ unsigned long flags;
+#endif
+
+ if (osh->pub.pkttag)
+ OSL_PKTTAG_CLEAR(pkt);
+
+ /* Decrement the packet counter */
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+#ifdef BCMDBG_PKT
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, flags);
+ pktlist_remove(&(osh->cmn->pktlist), (void *) nskb);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, flags);
+#endif /* BCMDBG_PKT */
+ atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+ for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+ if (PKTISCHAINED(nskb1)) {
+ nskb2 = PKTCLINK(nskb1);
+ } else {
+ nskb2 = NULL;
+ }
+
+ DEL_CTRACE(osh, nskb1);
+ }
+#endif /* BCMDBG_CTRACE */
+ }
+ return (struct sk_buff *)pkt;
+}
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+#ifdef BCMDBG_PKT
+void *
+osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
+#else /* BCMDBG_PKT pkt logging for debugging */
+#ifdef BCMDBG_CTRACE
+void *
+BCMFASTPATH(osl_pkt_frmnative)(osl_t *osh, void *pkt, int line, char *file)
+#else
+void *
+BCMFASTPATH(osl_pkt_frmnative)(osl_t *osh, void *pkt)
+#endif /* BCMDBG_CTRACE */
+#endif /* BCMDBG_PKT */
+{
+ struct sk_buff *cskb;
+ struct sk_buff *nskb;
+ unsigned long pktalloced = 0;
+#ifdef BCMDBG_PKT
+ unsigned long flags;
+#endif
+
+ if (osh->pub.pkttag)
+ OSL_PKTTAG_CLEAR(pkt);
+
+ /* walk the PKTCLINK() list */
+ for (cskb = (struct sk_buff *)pkt;
+ cskb != NULL;
+ cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
+
+ /* walk the pkt buffer list */
+ for (nskb = cskb; nskb; nskb = nskb->next) {
+
+ /* Increment the packet counter */
+ pktalloced++;
+
+ /* clean the 'prev' pointer
+ * Kernel 3.18 is leaving skb->prev pointer set to skb
+ * to indicate a non-fragmented skb
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ nskb->prev = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
+
+#ifdef BCMDBG_PKT
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, flags);
+ pktlist_add(&(osh->cmn->pktlist), (void *) nskb, line, file);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, flags);
+#endif /* BCMDBG_PKT */
+
+#ifdef BCMDBG_CTRACE
+ ADD_CTRACE(osh, nskb, file, line);
+#endif /* BCMDBG_CTRACE */
+ }
+ }
+
+ /* Increment the packet counter */
+ atomic_add(pktalloced, &osh->cmn->pktalloced);
+
+ return (void *)pkt;
+}
+
+/* Return a new packet. zero out pkttag */
+#ifdef BCMDBG_PKT
+void *
+BCMFASTPATH(linux_pktget)(osl_t *osh, uint len, int line, char *file)
+#else /* BCMDBG_PKT */
+#ifdef BCMDBG_CTRACE
+void *
+BCMFASTPATH(linux_pktget)(osl_t *osh, uint len, int line, char *file)
+#else
+#ifdef BCM_OBJECT_TRACE
+void *
+BCMFASTPATH(linux_pktget)(osl_t *osh, uint len, int line, const char *caller)
+#else
+void *
+BCMFASTPATH(linux_pktget)(osl_t *osh, uint len)
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+#endif /* BCMDBG_PKT */
+{
+ struct sk_buff *skb;
+#ifdef BCMDBG_PKT
+ unsigned long flags;
+#endif
+ uchar num = 0;
+ if (lmtest != FALSE) {
+ get_random_bytes(&num, sizeof(uchar));
+ if ((num + 1) <= (256 * lmtest / 100))
+ return NULL;
+ }
+
+ if ((skb = osl_alloc_skb(osh, len))) {
+#ifdef BCMDBG
+ skb_put(skb, len);
+#else
+ skb->tail += len;
+ skb->len += len;
+#endif
+ skb->priority = 0;
+
+#ifdef BCMDBG_CTRACE
+ ADD_CTRACE(osh, skb, file, line);
+#endif
+#ifdef BCMDBG_PKT
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, flags);
+ pktlist_add(&(osh->cmn->pktlist), (void *) skb, line, file);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, flags);
+#endif
+ atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+ }
+
+ return ((void*) skb);
+}
+
+/* Free the driver packet. Free the tag if present */
+#ifdef BCM_OBJECT_TRACE
+void
+BCMFASTPATH(linux_pktfree)(osl_t *osh, void *p, bool send, int line, const char *caller)
+#else
+void
+BCMFASTPATH(linux_pktfree)(osl_t *osh, void *p, bool send)
+#endif /* BCM_OBJECT_TRACE */
+{
+ struct sk_buff *skb, *nskb;
+#ifdef BCMDBG_PKT
+ unsigned long flags;
+#endif
+ if (osh == NULL)
+ return;
+
+ skb = (struct sk_buff*) p;
+
+ if (send) {
+ if (osh->pub.tx_fn) {
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+ }
+ } else {
+ if (osh->pub.rx_fn) {
+ osh->pub.rx_fn(osh->pub.rx_ctx, p);
+ }
+ }
+
+ PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
+ if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
+ printf("%s: pkt %p is from static pool\n",
+ __FUNCTION__, p);
+ dump_stack();
+ return;
+ }
+
+ if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
+ printf("%s: pkt %p is from static pool and not in used\n",
+ __FUNCTION__, p);
+ dump_stack();
+ return;
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
+
+ /* perversion: we use skb->next to chain multi-skb packets */
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+#ifdef BCMDBG_CTRACE
+ DEL_CTRACE(osh, skb);
+#endif
+#ifdef BCMDBG_PKT
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, flags);
+ pktlist_remove(&(osh->cmn->pktlist), (void *) skb);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, flags);
+#endif
+
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
+ if (skb->destructor || irqs_disabled()) {
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ } else {
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
+ }
+ atomic_dec(&osh->cmn->pktalloced);
+ skb = nskb;
+ }
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+ int i = 0;
+ struct sk_buff *skb;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+ if (!bcm_static_skb)
+ return linux_pktget(osh, len);
+
+ if (len > DHD_SKB_MAX_BUFSIZE) {
+ printf("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+ return linux_pktget(osh, len);
+ }
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ OSL_STATIC_PKT_LOCK(&bcm_static_skb->osl_pkt_lock, flags);
+
+ if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+ uint32 index;
+ for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+ index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
+ bcm_static_skb->last_allocated_index++;
+ if (bcm_static_skb->skb_8k[index] &&
+ bcm_static_skb->pkt_use[index] == 0) {
+ break;
+ }
+ }
+
+ if (i < STATIC_PKT_2PAGE_NUM) {
+ bcm_static_skb->pkt_use[index] = 1;
+ skb = bcm_static_skb->skb_8k[index];
+ skb->data = skb->head;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
+#else
+ skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->data += PKT_HEADROOM_DEFAULT;
+ skb->cloned = 0;
+ skb->priority = 0;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+ skb->mac_len = PREALLOC_USED_MAGIC;
+ OSL_STATIC_PKT_UNLOCK(&bcm_static_skb->osl_pkt_lock, flags);
+ return skb;
+ }
+ }
+
+ OSL_STATIC_PKT_UNLOCK(&bcm_static_skb->osl_pkt_lock, flags);
+ printf("%s: all static pkt in use!\n", __FUNCTION__);
+ return NULL;
+#else
+ down(&bcm_static_skb->osl_pkt_sem);
+
+ if (len <= DHD_SKB_1PAGE_BUFSIZE) {
+ for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+ if (bcm_static_skb->skb_4k[i] &&
+ bcm_static_skb->pkt_use[i] == 0) {
+ break;
+ }
+ }
+
+ if (i != STATIC_PKT_1PAGE_NUM) {
+ bcm_static_skb->pkt_use[i] = 1;
+
+ skb = bcm_static_skb->skb_4k[i];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ return skb;
+ }
+ }
+
+ if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+ for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+ if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
+ bcm_static_skb->pkt_use[i] == 0) {
+ break;
+ }
+ }
+
+ if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
+ bcm_static_skb->pkt_use[i] = 1;
+ skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ return skb;
+ }
+ }
+
+#if defined (ENHANCED_STATIC_BUF)
+ if (bcm_static_skb->skb_16k &&
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
+
+ skb = bcm_static_skb->skb_16k;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ return skb;
+ }
+#endif /* ENHANCED_STATIC_BUF */
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ printf("%s: all static pkt in use!\n", __FUNCTION__);
+ return linux_pktget(osh, len);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+ int i;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ struct sk_buff *skb = (struct sk_buff *)p;
+ unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+ if (!p) {
+ return;
+ }
+
+ if (!bcm_static_skb) {
+ linux_pktfree(osh, p, send);
+ return;
+ }
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ OSL_STATIC_PKT_LOCK(&bcm_static_skb->osl_pkt_lock, flags);
+
+ for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i]) {
+ if (bcm_static_skb->pkt_use[i] == 0) {
+ printf("%s: static pkt idx %d(%p) is double free\n",
+ __FUNCTION__, i, p);
+ } else {
+ bcm_static_skb->pkt_use[i] = 0;
+ }
+
+ if (skb->mac_len != PREALLOC_USED_MAGIC) {
+ printf("%s: static pkt idx %d(%p) is not in used\n",
+ __FUNCTION__, i, p);
+ }
+
+ skb->mac_len = PREALLOC_FREE_MAGIC;
+ OSL_STATIC_PKT_UNLOCK(&bcm_static_skb->osl_pkt_lock, flags);
+ return;
+ }
+ }
+
+ OSL_STATIC_PKT_UNLOCK(&bcm_static_skb->osl_pkt_lock, flags);
+ printf("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
+#else
+ down(&bcm_static_skb->osl_pkt_sem);
+ for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_4k[i]) {
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+#ifdef ENHANCED_STATIC_BUF
+ if (p == bcm_static_skb->skb_16k) {
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+#endif
+ up(&bcm_static_skb->osl_pkt_sem);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ linux_pktfree(osh, p, send);
+}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+#ifdef BCMDBG_PKT
+void *
+osl_pktdup(osl_t *osh, void *skb, int line, char *file)
+#else /* BCMDBG_PKT */
+#ifdef BCMDBG_CTRACE
+void *
+osl_pktdup(osl_t *osh, void *skb, int line, char *file)
+#else
+#ifdef BCM_OBJECT_TRACE
+void *
+osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
+#else
+void *
+osl_pktdup(osl_t *osh, void *skb)
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+#endif /* BCMDBG_PKT */
+{
+ void * p;
+#ifdef BCMDBG_PKT
+ unsigned long irqflags;
+#endif
+
+ ASSERT(!PKTISCHAINED(skb));
+
+ if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+ return NULL;
+
+ /* skb_clone copies skb->cb.. we don't want that */
+ if (osh->pub.pkttag)
+ OSL_PKTTAG_CLEAR(p);
+
+ /* Increment the packet counter */
+ atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
+#ifdef BCMDBG_CTRACE
+ ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
+#endif
+#ifdef BCMDBG_PKT
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, irqflags);
+ pktlist_add(&(osh->cmn->pktlist), (void *) p, line, file);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, irqflags);
+#endif
+ return (p);
+}
+
+#ifdef BCMDBG_CTRACE
+int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int ck = FALSE;
+
+ OSL_CTRACE_LOCK(&osh->ctrace_lock, flags);
+
+ list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+ if (pkt == skb) {
+ ck = TRUE;
+ break;
+ }
+ }
+
+ OSL_CTRACE_UNLOCK(&osh->ctrace_lock, flags);
+ return ck;
+}
+
+void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int idx = 0;
+ int i, j;
+
+ OSL_CTRACE_LOCK(&osh->ctrace_lock, flags);
+
+ if (b != NULL)
+ bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
+ else
+ printf(" Total %d sbk not free\n", osh->ctrace_num);
+
+ list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+ if (b != NULL)
+ bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
+ else
+ printk("[%d] skb %p:\n", ++idx, skb);
+
+ for (i = 0; i < skb->ctrace_count; i++) {
+ j = (skb->ctrace_start + i) % CTRACE_NUM;
+ if (b != NULL)
+ bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]);
+ else
+ printk(" [%s(%d)]\n", skb->func[j], skb->line[j]);
+ }
+ if (b != NULL)
+ bcm_bprintf(b, "\n");
+ else
+ printk("\n");
+ }
+
+ OSL_CTRACE_UNLOCK(&osh->ctrace_lock, flags);
+
+ return;
+}
+#endif /* BCMDBG_CTRACE */
+
+#ifdef BCMDBG_PKT
+#ifdef BCMDBG_PTRACE
+void
+osl_pkttrace(osl_t *osh, void *pkt, uint16 bit)
+{
+ pktlist_trace(&(osh->cmn->pktlist), pkt, bit);
+}
+#endif /* BCMDBG_PTRACE */
+
+char *
+osl_pktlist_dump(osl_t *osh, char *buf)
+{
+ pktlist_dump(&(osh->cmn->pktlist), buf);
+ return buf;
+}
+
+void
+osl_pktlist_add(osl_t *osh, void *p, int line, char *file)
+{
+ unsigned long flags;
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, flags);
+ pktlist_add(&(osh->cmn->pktlist), p, line, file);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, flags);
+}
+
+void
+osl_pktlist_remove(osl_t *osh, void *p)
+{
+ unsigned long flags;
+ OSL_PKTLIST_LOCK(&osh->cmn->pktlist_lock, flags);
+ pktlist_remove(&(osh->cmn->pktlist), p);
+ OSL_PKTLIST_UNLOCK(&osh->cmn->pktlist_lock, flags);
+}
+#endif /* BCMDBG_PKT */
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
+#ifdef BINOSL
+bool
+osl_pktshared(void *skb)
+{
+ return (((struct sk_buff*)skb)->cloned);
+}
+
+uchar*
+osl_pktdata(osl_t *osh, void *skb)
+{
+ return (((struct sk_buff*)skb)->data);
+}
+
+uint
+osl_pktlen(osl_t *osh, void *skb)
+{
+ return (((struct sk_buff*)skb)->len);
+}
+
+uint
+osl_pktheadroom(osl_t *osh, void *skb)
+{
+ return (uint) skb_headroom((struct sk_buff *) skb);
+}
+
+uint
+osl_pkttailroom(osl_t *osh, void *skb)
+{
+ return (uint) skb_tailroom((struct sk_buff *) skb);
+}
+
+void*
+osl_pktnext(osl_t *osh, void *skb)
+{
+ return (((struct sk_buff*)skb)->next);
+}
+
+void
+osl_pktsetnext(void *skb, void *x)
+{
+ ((struct sk_buff*)skb)->next = (struct sk_buff*)x;
+}
+
+void
+osl_pktsetlen(osl_t *osh, void *skb, uint len)
+{
+ __skb_trim((struct sk_buff*)skb, len);
+}
+
+uchar*
+osl_pktpush(osl_t *osh, void *skb, int bytes)
+{
+ return (skb_push((struct sk_buff*)skb, bytes));
+}
+
+uchar*
+osl_pktpull(osl_t *osh, void *skb, int bytes)
+{
+ return (skb_pull((struct sk_buff*)skb, bytes));
+}
+
+void*
+osl_pkttag(void *skb)
+{
+ return ((void*)(((struct sk_buff*)skb)->cb));
+}
+
+void*
+osl_pktlink(void *skb)
+{
+ return (((struct sk_buff*)skb)->prev);
+}
+
+void
+osl_pktsetlink(void *skb, void *x)
+{
+ ((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
+}
+
+uint
+osl_pktprio(void *skb)
+{
+ return (((struct sk_buff*)skb)->priority);
+}
+
+void
+osl_pktsetprio(void *skb, uint x)
+{
+ ((struct sk_buff*)skb)->priority = x;
+}
+#endif /* BINOSL */
+
+uint
+osl_pktalloced(osl_t *osh)
+{
+ if (atomic_read(&osh->cmn->refcount) == 1)
+ return (atomic_read(&osh->cmn->pktalloced));
+ else
+ return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
+#include <linux/kallsyms.h>
+#include <net/sock.h>
+void
+osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
+{
+ uint32 fraction;
+ static void *p_tcp_wfree = NULL;
+
+ if (tsq <= 0)
+ return;
+
+ if (!skb->destructor || skb->destructor == sock_wfree)
+ return;
+
+ if (unlikely(!p_tcp_wfree)) {
+ /* this is a hack to get tcp_wfree pointer since it's not
+ * exported. There are two possible call back function pointer
+ * stored in skb->destructor: tcp_wfree and sock_wfree.
+ * This expansion logic should only apply to TCP traffic which
+ * uses tcp_wfree as skb destructor
+ */
+ char sym[KSYM_SYMBOL_LEN];
+ sprint_symbol(sym, (unsigned long)skb->destructor);
+ sym[9] = 0;
+ if (!strcmp(sym, "tcp_wfree"))
+ p_tcp_wfree = skb->destructor;
+ else
+ return;
+ }
+
+ if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
+ return;
+
+ /* abstract a certain portion of skb truesize from the socket
+ * sk_wmem_alloc to allow more skb can be allocated for this
+ * socket for better cusion meeting WiFi device requirement
+ */
+ fraction = skb->truesize * (tsq - 1) / tsq;
+ skb->truesize -= fraction;
+ atomic_sub(fraction, (atomic_t *)&skb->sk->sk_wmem_alloc);
+ skb_orphan(skb);
+}
+#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
diff --git a/bcmdhd.101.10.361.x/nciutils.c b/bcmdhd.101.10.361.x/nciutils.c
new file mode 100755
index 0000000..9ae0f3c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/nciutils.c
@@ -0,0 +1,3095 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the BOOKER NCI (non coherent interconnect) based Broadcom chips.
+ *
+ * Broadcom Proprietary and Confidential. Copyright (C) 2020,
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
+ * the contents of this file may not be disclosed to third parties,
+ * copied or duplicated in any form, in whole or in part, without
+ * the prior written permission of Broadcom.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ */
+
+#include <typedefs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <pcie_core.h>
+#include "siutils_priv.h"
+#include <nci.h>
+#include <bcmdevs.h>
+#include <hndoobr.h>
+
+#define NCI_BAD_REG 0xbbadd000u /* Bad Register Address */
+#define NCI_BAD_INDEX -1 /* Bad Index */
+
+#define OOBR_BASE_MASK 0x00001FFFu /* Mask to get Base address of OOBR */
+#define EROM1_BASE_MASK 0x00000FFFu /* Mask to get Base address of EROM1 */
+
+/* Core Info */
+#define COREINFO_COREID_MASK 0x00000FFFu /* Bit-11 to 0 */
+#define COREINFO_REV_MASK 0x000FF000u /* Core Rev Mask */
+#define COREINFO_REV_SHIFT 12u /* Bit-12 */
+#define COREINFO_MFG_MASK 0x00F00000u /* Core Mfg Mask */
+#define COREINFO_MFG_SHIFT 20u /* Bit-20 */
+#define COREINFO_BPID_MASK 0x07000000u /* 26-24 Gives Backplane ID */
+#define COREINFO_BPID_SHIFT 24u /* Bit:26-24 */
+#define COREINFO_ISBP_MASK 0x08000000u /* Is Backplane or Bridge */
+#define COREINFO_ISBP_SHIFT 27u /* Bit:27 */
+
+/* Interface Config */
+#define IC_IFACECNT_MASK 0x0000F000u /* No of Interface Descriptor Mask */
+#define IC_IFACECNT_SHIFT 12u /* Bit-12 */
+#define IC_IFACEOFFSET_MASK 0x00000FFFu /* OFFSET for 1st Interface Descriptor */
+
+/* DMP Reg Offset */
+#define DMP_DMPCTRL_REG_OFFSET 8u
+
+/* Interface Descriptor Masks */
+#define ID_NODEPTR_MASK 0xFFFFFFF8u /* Master/Slave Network Interface Addr */
+#define ID_NODETYPE_MASK 0x00000007u /* 0:Booker 1:IDM 1-0xf:Reserved */
+#define ID_WORDOFFSET_MASK 0xF0000000u /* WordOffset to next Iface Desc in EROM2 */
+#define ID_WORDOFFSET_SHIFT 28u /* WordOffset bits 31-28 */
+#define ID_CORETYPE_MASK 0x08000000u /* CORE belongs to OOBR(0) or EROM(1) */
+#define ID_CORETYPE_SHIFT 27u /* Bit-27 */
+#define ID_MI_MASK 0x04000000u /* 0: Slave Interface, 1:Master Interface */
+#define ID_MI_SHIFT 26u /* Bit-26 */
+#define ID_NADDR_MASK 0x03000000u /* No of Slave Address Regions */
+#define ID_NADDR_SHIFT 24u /* Bit:25-24 */
+#define ID_BPID_MASK 0x00F00000u /* Give Backplane ID */
+#define ID_BPID_SHIFT 20u /* Bit:20-23 */
+#define ID_COREINFOPTR_MASK 0x00001FFFu /* OOBR or EROM Offset */
+#define ID_ENDMARKER 0xFFFFFFFFu /* End of EROM Part 2 */
+
+/* Slave Port Address Descriptor Masks */
+#define SLAVEPORT_BASE_ADDR_MASK 0xFFFFFF00u /* Bits 31:8 is the base address */
+#define SLAVEPORT_BOUND_ADDR_MASK 0x00000040u /* Addr is not 2^n and with bound addr */
+#define SLAVEPORT_BOUND_ADDR_SHIFT 6u /* Bit-6 */
+#define SLAVEPORT_64BIT_ADDR_MASK 0x00000020u /* 64-bit base and bound fields */
+#define SLAVEPORT_64BIT_ADDR_SHIFT 5u /* Bit-5 */
+#define SLAVEPORT_ADDR_SIZE_MASK 0x0000001Fu /* Address Size mask */
+#define SLAVEPORT_ADDR_TYPE_BOUND 0x1u /* Bound Addr */
+#define SLAVEPORT_ADDR_TYPE_64 0x2u /* 64-Bit Addr */
+#define SLAVEPORT_ADDR_MIN_SHIFT 0x8u
+/* Address space Size of the slave port */
+#define SLAVEPORT_ADDR_SIZE(adesc) (1u << ((adesc & SLAVEPORT_ADDR_SIZE_MASK) + \
+ SLAVEPORT_ADDR_MIN_SHIFT))
+
+#define GET_NEXT_EROM_ADDR(addr) ((uint32*)((uintptr)(addr) + 4u))
+
+#define NCI_DEFAULT_CORE_UNIT (0u)
+
+/* Error Codes */
+enum {
+ NCI_OK = 0,
+ NCI_BACKPLANE_ID_MISMATCH = -1,
+ NCI_INVALID_EROM2PTR = -2,
+ NCI_WORDOFFSET_MISMATCH = -3,
+ NCI_NOMEM = -4,
+ NCI_MASTER_INVALID_ADDR = -5
+};
+
+#define GET_OOBR_BASE(erom2base) ((erom2base) & ~OOBR_BASE_MASK)
+#define GET_EROM1_BASE(erom2base) ((erom2base) & ~EROM1_BASE_MASK)
+#define CORE_ID(core_info) ((core_info) & COREINFO_COREID_MASK)
+#define GET_INFACECNT(iface_cfg) (((iface_cfg) & IC_IFACECNT_MASK) >> IC_IFACECNT_SHIFT)
+#define GET_NODEPTR(iface_desc_0) ((iface_desc_0) & ID_NODEPTR_MASK)
+#define GET_NODETYPE(iface_desc_0) ((iface_desc_0) & ID_NODETYPE_MASK)
+#define GET_WORDOFFSET(iface_desc_1) (((iface_desc_1) & ID_WORDOFFSET_MASK) \
+ >> ID_WORDOFFSET_SHIFT)
+#define IS_MASTER(iface_desc_1) (((iface_desc_1) & ID_MI_MASK) >> ID_MI_SHIFT)
+#define GET_CORETYPE(iface_desc_1) (((iface_desc_1) & ID_CORETYPE_MASK) >> ID_CORETYPE_SHIFT)
+#define GET_NUM_ADDR_REG(iface_desc_1) (((iface_desc_1) & ID_NADDR_MASK) >> ID_NADDR_SHIFT)
+#define GET_COREOFFSET(iface_desc_1) ((iface_desc_1) & ID_COREINFOPTR_MASK)
+#define ADDR_SIZE(sz) ((1u << ((sz) + 8u)) - 1u)
+
+#define CORE_REV(core_info) ((core_info) & COREINFO_REV_MASK) >> COREINFO_REV_SHIFT
+#define CORE_MFG(core_info) ((core_info) & COREINFO_MFG_MASK) >> COREINFO_MFG_SHIFT
+#define COREINFO_BPID(core_info) (((core_info) & COREINFO_BPID_MASK) >> COREINFO_BPID_SHIFT)
+#define IS_BACKPLANE(core_info) (((core_info) & COREINFO_ISBP_MASK) >> COREINFO_ISBP_SHIFT)
+#define ID_BPID(iface_desc_1) (((iface_desc_1) & ID_BPID_MASK) >> ID_BPID_SHIFT)
+#define IS_BACKPLANE_ID_SAME(core_info, iface_desc_1) \
+ (COREINFO_BPID((core_info)) == ID_BPID((iface_desc_1)))
+
+#define NCI_WORD_SIZE (4u)
+#define PCI_ACCESS_SIZE (4u)
+
+#define NCI_ADDR2NUM(addr) ((uintptr)(addr))
+#define NCI_ADD_NUM(addr, size) (NCI_ADDR2NUM(addr) + (size))
+#ifdef DONGLEBUILD
+#define NCI_ADD_ADDR(addr, size) ((uint32*)REG_MAP(NCI_ADD_NUM((addr), (size)), 0u))
+#else /* !DONGLEBUILD */
+#define NCI_ADD_ADDR(addr, size) ((uint32*)(NCI_ADD_NUM((addr), (size))))
+#endif /* DONGLEBUILD */
+#define NCI_INC_ADDR(addr, size) ((addr) = NCI_ADD_ADDR((addr), (size)))
+
+#define NODE_TYPE_BOOKER 0x0u
+#define NODE_TYPE_NIC400 0x1u
+
+#define BP_BOOKER 0x0u
+#define BP_NIC400 0x1u
+#define BP_APB1 0x2u
+#define BP_APB2 0x3u
+#define BP_CCI400 0x4u
+
+#define PCIE_WRITE_SIZE 4u
+
+static const char BACKPLANE_ID_NAME[][11] = {
+ "BOOKER",
+ "NIC400",
+ "APB1",
+ "APB2",
+ "CCI400",
+ "\0"
+};
+
+#define APB_INF(ifd) ((ID_BPID((ifd).iface_desc_1) == BP_APB1) || \
+ (ID_BPID((ifd).iface_desc_1) == BP_APB2))
+#define BOOKER_INF(ifd) (ID_BPID((ifd).iface_desc_1) == BP_BOOKER)
+#define NIC_INF(ifd) (ID_BPID((ifd).iface_desc_1) == BP_NIC400)
+
+/* BOOKER NCI LOG LEVEL */
+#define NCI_LOG_LEVEL_ERROR 0x1u
+#define NCI_LOG_LEVEL_TRACE 0x2u
+#define NCI_LOG_LEVEL_INFO 0x4u
+#define NCI_LOG_LEVEL_PRINT 0x8u
+
+#ifndef NCI_DEFAULT_LOG_LEVEL
+#define NCI_DEFAULT_LOG_LEVEL (NCI_LOG_LEVEL_ERROR)
+#endif /* NCI_DEFAULT_LOG_LEVEL */
+
+uint32 nci_log_level = NCI_DEFAULT_LOG_LEVEL;
+
+#ifdef DONGLEBUILD
+#define NCI_ERROR(args) do { if (nci_log_level & NCI_LOG_LEVEL_ERROR) { printf args; } } while (0u)
+#define NCI_TRACE(args) do { if (nci_log_level & NCI_LOG_LEVEL_TRACE) { printf args; } } while (0u)
+#define NCI_INFO(args) do { if (nci_log_level & NCI_LOG_LEVEL_INFO) { printf args; } } while (0u)
+#define NCI_PRINT(args) do { if (nci_log_level & NCI_LOG_LEVEL_PRINT) { printf args; } } while (0u)
+#else /* !DONGLEBUILD */
+#define NCI_KERN_PRINT(...) printk(KERN_ERR __VA_ARGS__)
+#define NCI_ERROR(args) do { if (nci_log_level & NCI_LOG_LEVEL_ERROR) \
+ { NCI_KERN_PRINT args; } } while (0u)
+#define NCI_TRACE(args) do { if (nci_log_level & NCI_LOG_LEVEL_TRACE) \
+ { NCI_KERN_PRINT args; } } while (0u)
+#define NCI_INFO(args) do { if (nci_log_level & NCI_LOG_LEVEL_INFO) \
+ { NCI_KERN_PRINT args; } } while (0u)
+#define NCI_PRINT(args) do { if (nci_log_level & NCI_LOG_LEVEL_PRINT) \
+ { NCI_KERN_PRINT args; } } while (0u)
+#endif /* DONGLEBUILD */
+
+#define NCI_EROM_WORD_SIZEOF 4u
+#define NCI_REGS_PER_CORE 2u
+
+#define NCI_EROM1_LEN(erom2base) (erom2base - GET_EROM1_BASE(erom2base))
+#define NCI_NONOOBR_CORES(erom2base) NCI_EROM1_LEN(erom2base) \
+ /(NCI_REGS_PER_CORE * NCI_EROM_WORD_SIZEOF)
+
+/* AXI ID to CoreID + unit mappings */
+typedef struct nci_axi_to_coreidx {
+ uint coreid;
+ uint coreunit;
+} nci_axi_to_coreidx_t;
+
+static const nci_axi_to_coreidx_t axi2coreidx_4397[] = {
+ {CC_CORE_ID, 0}, /* 00 Chipcommon */
+ {PCIE2_CORE_ID, 0}, /* 01 PCIe */
+ {D11_CORE_ID, 0}, /* 02 D11 Main */
+ {ARMCR4_CORE_ID, 0}, /* 03 ARM */
+ {BT_CORE_ID, 0}, /* 04 BT AHB */
+ {D11_CORE_ID, 1}, /* 05 D11 Aux */
+ {D11_CORE_ID, 0}, /* 06 D11 Main l1 */
+ {D11_CORE_ID, 1}, /* 07 D11 Aux l1 */
+ {D11_CORE_ID, 0}, /* 08 D11 Main l2 */
+ {D11_CORE_ID, 1}, /* 09 D11 Aux l2 */
+ {NODEV_CORE_ID, 0}, /* 10 M2M DMA */
+ {NODEV_CORE_ID, 0}, /* 11 unused */
+ {NODEV_CORE_ID, 0}, /* 12 unused */
+ {NODEV_CORE_ID, 0}, /* 13 unused */
+ {NODEV_CORE_ID, 0}, /* 14 unused */
+ {NODEV_CORE_ID, 0} /* 15 unused */
+};
+
+typedef struct slave_port {
+ uint32 adesc; /**< Address Descriptor 0 */
+ uint32 addrl; /**< Lower Base */
+ uint32 addrh; /**< Upper Base */
+ uint32 extaddrl; /**< Lower Bound */
+ uint32 extaddrh; /**< Ubber Bound */
+} slave_port_t;
+
+typedef struct interface_desc {
+ slave_port_t *sp; /**< Slave Port Addr 0-3 */
+
+ uint32 iface_desc_0; /**< Interface-0 Descriptor Word0 */
+ /* If Node Type 0-Booker xMNI/xSNI address. If Node Type 1-DMP wrapper Address */
+ uint32 node_ptr; /**< Core's Node pointer */
+
+ uint32 iface_desc_1; /**< Interface Descriptor Word1 */
+ uint8 num_addr_reg; /**< Number of Slave Port Addr (Valid only if master=0) */
+ uint8 coretype; /**< Core Belongs to 0:OOBR 1:Without OOBR */
+ uint8 master; /**< 1:Master 0:Slave */
+
+ uint8 node_type; /**< 0:Booker , 1:IDM Wrapper, 2-0xf: Reserved */
+} interface_desc_t;
+
+typedef struct nci_cores {
+ void *regs;
+ /* 2:0-Node type (0-booker,1-IDM Wrapper) 31:3-Interconnect registyer space */
+ interface_desc_t *desc; /**< Interface & Address Descriptors */
+ /*
+ * 11:0-CoreID, 19:12-RevID 23:20-MFG 26:24-Backplane ID if
+ * bit 27 is 1 (Core is Backplane or Bridge )
+ */
+ uint32 coreinfo; /**< CoreInfo of each core */
+ /*
+ * 11:0 - Offosewt of 1st Interface desc in EROM 15:12 - No.
+ * of interfaces attachedto this core
+ */
+ uint32 iface_cfg; /**< Interface config Reg */
+ uint32 dmp_regs_off; /**< DMP control & DMP status @ 0x48 from coreinfo */
+ uint32 coreid; /**< id of each core */
+ uint8 coreunit; /**< Unit differentiate same coreids */
+ uint8 iface_cnt; /**< no of Interface connected to each core */
+ uint8 PAD[2u];
+} nci_cores_t;
+
+typedef struct nci_info {
+ void *osh; /**< osl os handle */
+ nci_cores_t *cores; /**< Cores Parsed */
+ void *pci_bar_addr; /**< PCI BAR0 Window */
+ uint32 cc_erom2base; /**< Base of EROM2 from ChipCommon */
+ uint32 *erom1base; /**< Base of EROM1 */
+ uint32 *erom2base; /**< Base of EROM2 */
+ uint32 *oobr_base; /**< Base of OOBR */
+ uint16 bustype; /**< SI_BUS, PCI_BUS */
+ uint8 max_cores; /**< # Max cores indicated by Register */
+ uint8 num_cores; /**< # discovered cores */
+ uint8 refcnt; /**< Allocation reference count */
+ uint8 scan_done; /**< Set to TRUE when erom scan is done. */
+ uint8 PAD[2];
+} nci_info_t;
+
+#define NI_IDM_RESET_ENTRY 0x1
+#define NI_IDM_RESET_EXIT 0x0
+
+/* AXI Slave Network Interface registers */
+typedef volatile struct asni_regs {
+ uint32 node_type; /* 0x000 */
+ uint32 node_info; /* 0x004 */
+ uint32 secr_acc; /* 0x008 */
+ uint32 pmusela; /* 0x00c */
+ uint32 pmuselb; /* 0x010 */
+ uint32 PAD[11];
+ uint32 node_feat; /* 0x040 */
+ uint32 bursplt; /* 0x044 */
+ uint32 addr_remap; /* 0x048 */
+ uint32 PAD[13];
+ uint32 sildbg; /* 0x080 */
+ uint32 qosctl; /* 0x084 */
+ uint32 wdatthrs; /* 0x088 */
+ uint32 arqosovr; /* 0x08c */
+ uint32 awqosovr; /* 0x090 */
+ uint32 atqosot; /* 0x094 */
+ uint32 arqosot; /* 0x098 */
+ uint32 awqosot; /* 0x09c */
+ uint32 axqosot; /* 0x0a0 */
+ uint32 qosrdpk; /* 0x0a4 */
+ uint32 qosrdbur; /* 0x0a8 */
+ uint32 qosrdavg; /* 0x0ac */
+ uint32 qoswrpk; /* 0x0b0 */
+ uint32 qoswrbur; /* 0x0b4 */
+ uint32 qoswravg; /* 0x0b8 */
+ uint32 qoscompk; /* 0x0bc */
+ uint32 qoscombur; /* 0x0c0 */
+ uint32 qoscomavg; /* 0x0c4 */
+ uint32 qosrbbqv; /* 0x0c8 */
+ uint32 qoswrbqv; /* 0x0cc */
+ uint32 qoscombqv; /* 0x0d0 */
+ uint32 PAD[11];
+ uint32 idm_device_id; /* 0x100 */
+ uint32 PAD[15];
+ uint32 idm_reset_ctrl; /* 0x140 */
+} asni_regs_t;
+
+/* AXI Master Network Interface registers */
+typedef volatile struct amni_regs {
+ uint32 node_type; /* 0x000 */
+ uint32 node_info; /* 0x004 */
+ uint32 secr_acc; /* 0x008 */
+ uint32 pmusela; /* 0x00c */
+ uint32 pmuselb; /* 0x010 */
+ uint32 PAD[11];
+ uint32 node_feat; /* 0x040 */
+ uint32 PAD[15];
+ uint32 sildbg; /* 0x080 */
+ uint32 qosacc; /* 0x084 */
+ uint32 PAD[26];
+ uint32 interrupt_status; /* 0x0f0 */
+ uint32 interrupt_mask; /* 0x0f4 */
+ uint32 interrupt_status_ns; /* 0x0f8 */
+ uint32 interrupt_mask_ns; /* 0x0FC */
+ uint32 idm_device_id; /* 0x100 */
+ uint32 PAD[15];
+ uint32 idm_reset_ctrl; /* 0x140 */
+} amni_regs_t;
+
+#define NCI_SPINWAIT_TIMEOUT (300u)
+
+/* DMP/io control and DMP/io status */
+typedef struct dmp_regs {
+ uint32 dmpctrl;
+ uint32 dmpstatus;
+} dmp_regs_t;
+
+#ifdef _RTE_
+static nci_info_t *knci_info = NULL;
+#endif /* _RTE_ */
+
+static void nci_save_iface1_reg(interface_desc_t *desc, uint32 iface_desc_1);
+static uint32* nci_save_slaveport_addr(nci_info_t *nci,
+ interface_desc_t *desc, uint32 *erom2ptr);
+static int nci_get_coreunit(nci_cores_t *cores, uint32 numcores, uint cid,
+ uint32 iface_desc_1);
+static nci_cores_t* nci_initial_parse(nci_info_t *nci, uint32 *erom2ptr, uint32 *core_idx);
+static void _nci_setcoreidx_pcie_bus(si_t *sih, volatile void **regs, uint32 curmap,
+ uint32 curwrap);
+static volatile void *_nci_setcoreidx(si_t *sih, uint coreidx);
+static uint32 _nci_get_curwrap(nci_info_t *nci, uint coreidx, uint wrapper_idx);
+static uint32 nci_get_curwrap(nci_info_t *nci, uint coreidx);
+static uint32 _nci_get_curmap(nci_info_t *nci, uint coreidx, uint slave_port_idx, uint base_idx);
+static uint32 nci_get_curmap(nci_info_t *nci, uint coreidx);
+static void _nci_core_reset(const si_t *sih, uint32 bits, uint32 resetbits);
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+static void nci_reset_APB(const si_info_t *sii, aidmp_t *ai, int *ret,
+ uint32 errlog_status, uint32 errlog_id);
+static void nci_reset_axi_to(const si_info_t *sii, aidmp_t *ai);
+#endif /* (AXI_TIMEOUTS) || (AXI_TIMEOUTS_NIC) */
+static uint32 nci_find_numcores(si_t *sih);
+#ifdef BOOKER_NIC400_INF
+static int32 nci_find_first_wrapper_idx(nci_info_t *nci, uint32 coreidx);
+#endif /* BOOKER_NIC400_INF */
+
+/*
+ * Description : This function will search for a CORE with matching 'core_id' and mismatching
+ * 'wordoffset', if found then increments 'coreunit' by 1.
+ */
+/* TODO: Need to understand this. */
+static int
+BCMATTACHFN(nci_get_coreunit)(nci_cores_t *cores, uint32 numcores,
+ uint core_id, uint32 iface_desc_1)
+{
+ uint32 core_idx;
+ uint32 coreunit = NCI_DEFAULT_CORE_UNIT;
+
+ for (core_idx = 0u; core_idx < numcores; core_idx++) {
+ if ((cores[core_idx].coreid == core_id) &&
+ (GET_COREOFFSET(cores[core_idx].desc->iface_desc_1) !=
+ GET_COREOFFSET(iface_desc_1))) {
+ coreunit = cores[core_idx].coreunit + 1;
+ }
+ }
+
+ return coreunit;
+}
+
+/*
+ * OOBR Region
+ +-------------------------------+
+ + +
+ + OOBR with EROM Data +
+ + +
+ +-------------------------------+
+ + +
+ + EROM1 +
+ + +
+ +-------------------------------+ --> ChipCommon.EROMBASE
+ + +
+ + EROM2 +
+ + +
+ +-------------------------------+
+*/
+
+/**
+ * Function : nci_init
+ * Description : Malloc's memory related to 'nci_info_t' and its internal elements.
+ *
+ * @paramter[in]
+ * @regs : This is a ChipCommon Regster
+ * @bustype : Bus Connect Type
+ *
+ * Return : On Succes 'nci_info_t' data structure is returned as void,
+ * where all EROM parsed Cores are saved,
+ * using this all EROM Cores are Freed.
+ * On Failure 'NULL' is returned by printing ERROR messages
+ */
+void*
+BCMATTACHFN(nci_init)(si_t *sih, chipcregs_t *cc, uint bustype)
+{
+ si_info_t *sii = SI_INFO(sih);
+ nci_cores_t *cores;
+ nci_info_t *nci = NULL;
+ uint8 err_at = 0u;
+
+#ifdef _RTE_
+ if (knci_info) {
+ knci_info->refcnt++;
+ nci = knci_info;
+
+ goto end;
+ }
+#endif /* _RTE_ */
+
+ /* It is used only when NCI_ERROR is used */
+ BCM_REFERENCE(err_at);
+
+ if ((nci = MALLOCZ(sii->osh, sizeof(*nci))) == NULL) {
+ err_at = 1u;
+ goto end;
+ }
+ sii->nci_info = nci;
+
+ nci->osh = sii->osh;
+ nci->refcnt++;
+
+ nci->cc_erom2base = R_REG(nci->osh, &cc->eromptr);
+ nci->bustype = bustype;
+ switch (nci->bustype) {
+ case SI_BUS:
+ nci->erom2base = (uint32*)REG_MAP(nci->cc_erom2base, 0u);
+ nci->oobr_base = (uint32*)REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), 0u);
+ nci->erom1base = (uint32*)REG_MAP(GET_EROM1_BASE(nci->cc_erom2base), 0u);
+
+ break;
+
+ case PCI_BUS:
+ /* Set wrappers address */
+ sii->curwrap = (void *)((uintptr)cc + SI_CORE_SIZE);
+ /* Set access window to Erom Base(For NCI, EROM starts with OOBR) */
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_EROM1_BASE(nci->cc_erom2base));
+ nci->erom1base = (uint32*)((uintptr)cc);
+ nci->erom2base = (uint32*)((uintptr)cc + NCI_EROM1_LEN(nci->cc_erom2base));
+
+ break;
+
+ default:
+ err_at = 2u;
+ ASSERT(0u);
+ goto end;
+ }
+
+ nci->max_cores = nci_find_numcores(sih);
+ if (!nci->max_cores) {
+ err_at = 3u;
+ goto end;
+ }
+
+ if ((cores = MALLOCZ(nci->osh, sizeof(*cores) * nci->max_cores)) == NULL) {
+ err_at = 4u;
+ goto end;
+ }
+ nci->cores = cores;
+
+#ifdef _RTE_
+ knci_info = nci;
+#endif /* _RTE_ */
+
+end:
+ if (err_at) {
+ NCI_ERROR(("nci_init: Failed err_at=%#x\n", err_at));
+ nci_uninit(nci);
+ nci = NULL;
+ }
+
+ return nci;
+}
+
+/**
+ * Function : nci_uninit
+ * Description : Free's memory related to 'nci_info_t' and its internal malloc'd elements.
+ *
+ * @paramter[in]
+ * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved, using this
+ * all EROM Cores are Freed.
+ *
+ * Return : void
+ */
+void
+BCMATTACHFN(nci_uninit)(void *ctx)
+{
+ nci_info_t *nci = (nci_info_t *)ctx;
+ uint8 core_idx, desc_idx;
+ interface_desc_t *desc;
+ nci_cores_t *cores;
+ slave_port_t *sp;
+
+ if (nci == NULL) {
+ return;
+ }
+
+ nci->refcnt--;
+
+#ifdef _RTE_
+ if (nci->refcnt != 0) {
+ return;
+ }
+#endif /* _RTE_ */
+
+ cores = nci->cores;
+ if (cores == NULL) {
+ goto end;
+ }
+
+ for (core_idx = 0u; core_idx < nci->num_cores; core_idx++) {
+ desc = cores[core_idx].desc;
+ if (desc == NULL) {
+ break;
+ }
+
+ for (desc_idx = 0u; desc_idx < cores[core_idx].iface_cnt; desc_idx++) {
+ sp = desc[desc_idx].sp;
+ if (sp) {
+ MFREE(nci->osh, sp, (sizeof(*sp) * desc[desc_idx].num_addr_reg));
+ }
+ }
+ MFREE(nci->osh, desc, (sizeof(*desc) * cores[core_idx].iface_cnt));
+ }
+ MFREE(nci->osh, cores, sizeof(*cores) * nci->max_cores);
+
+end:
+
+#ifdef _RTE_
+ knci_info = NULL;
+#endif /* _RTE_ */
+
+ MFREE(nci->osh, nci, sizeof(*nci));
+}
+
+/**
+ * Function : nci_save_iface1_reg
+ * Description : Interface1 Descriptor is obtained from the Reg and saved in
+ * Internal data structures 'nci->cores'.
+ *
+ * @paramter[in]
+ * @desc : Descriptor of Core which needs to be updated with obatained Interface1 Descritpor.
+ * @iface_desc_1 : Obatained Interface1 Descritpor.
+ *
+ * Return : void
+ */
+static void
+BCMATTACHFN(nci_save_iface1_reg)(interface_desc_t *desc, uint32 iface_desc_1)
+{
+ BCM_REFERENCE(BACKPLANE_ID_NAME);
+
+ desc->coretype = GET_CORETYPE(iface_desc_1);
+ desc->master = IS_MASTER(iface_desc_1);
+
+ desc->iface_desc_1 = iface_desc_1;
+ desc->num_addr_reg = GET_NUM_ADDR_REG(iface_desc_1);
+ if (desc->master) {
+ if (desc->num_addr_reg) {
+ NCI_ERROR(("nci_save_iface1_reg: Master NODEPTR Addresses is not zero "
+ "i.e. %d\n", GET_NUM_ADDR_REG(iface_desc_1)));
+ ASSERT(0u);
+ }
+ } else {
+ /* SLAVE 'NumAddressRegion' one less than actual slave ports, so increment by 1 */
+ desc->num_addr_reg++;
+ }
+
+ NCI_INFO(("\tnci_save_iface1_reg: %s InterfaceDesc:%#x WordOffset=%#x "
+ "NoAddrReg=%#x %s_Offset=%#x BackplaneID=%s\n",
+ desc->master?"Master":"Slave", desc->iface_desc_1,
+ GET_WORDOFFSET(desc->iface_desc_1),
+ desc->num_addr_reg, desc->coretype?"EROM1":"OOBR",
+ GET_COREOFFSET(desc->iface_desc_1),
+ BACKPLANE_ID_NAME[ID_BPID(desc->iface_desc_1)]));
+}
+
+/**
+ * Function : nci_save_slaveport_addr
+ * Description : All Slave Port Addr of Interface Descriptor are saved.
+ *
+ * @paramter[in]
+ * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved
+ * @desc : Current Interface Descriptor.
+ * @erom2ptr : Pointer to Address Descriptor0.
+ *
+ * Return : On Success, this function returns Erom2 Ptr to Next Interface Descriptor,
+ * On Failure, NULL is returned.
+ */
+static uint32*
+BCMATTACHFN(nci_save_slaveport_addr)(nci_info_t *nci,
+ interface_desc_t *desc, uint32 *erom2ptr)
+{
+ slave_port_t *sp;
+ uint32 adesc;
+ uint32 sz;
+ uint32 addr_idx;
+
+ /* Allocate 'NumAddressRegion' of Slave Port */
+ if ((desc->sp = (slave_port_t *)MALLOCZ(
+ nci->osh, (sizeof(*sp) * desc->num_addr_reg))) == NULL) {
+ NCI_ERROR(("\tnci_save_slaveport_addr: Memory Allocation failed for Slave Port\n"));
+ return NULL;
+ }
+
+ sp = desc->sp;
+ /* Slave Port Addrs Desc */
+ for (addr_idx = 0u; addr_idx < desc->num_addr_reg; addr_idx++) {
+ adesc = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+ sp[addr_idx].adesc = adesc;
+
+ sp[addr_idx].addrl = adesc & SLAVEPORT_BASE_ADDR_MASK;
+ if (adesc & SLAVEPORT_64BIT_ADDR_MASK) {
+ sp[addr_idx].addrh = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+ sp[addr_idx].extaddrl = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+ sp[addr_idx].extaddrh = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+ NCI_INFO(("\tnci_save_slaveport_addr: SlavePortAddr[%#x]:0x%08x al=0x%08x "
+ "ah=0x%08x extal=0x%08x extah=0x%08x\n", addr_idx, adesc,
+ sp[addr_idx].addrl, sp[addr_idx].addrh, sp[addr_idx].extaddrl,
+ sp[addr_idx].extaddrh));
+ }
+ else if (adesc & SLAVEPORT_BOUND_ADDR_MASK) {
+ sp[addr_idx].addrh = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+ NCI_INFO(("\tnci_save_slaveport_addr: SlavePortAddr[%#x]:0x%08x al=0x%08x "
+ "ah=0x%08x\n", addr_idx, adesc, sp[addr_idx].addrl,
+ sp[addr_idx].addrh));
+ } else {
+ sz = adesc & SLAVEPORT_ADDR_SIZE_MASK;
+ sp[addr_idx].addrh = sp[addr_idx].addrl + ADDR_SIZE(sz);
+ NCI_INFO(("\tnci_save_slaveport_addr: SlavePortAddr[%#x]:0x%08x al=0x%08x "
+ "ah=0x%08x sz=0x%08x\n", addr_idx, adesc, sp[addr_idx].addrl,
+ sp[addr_idx].addrh, sz));
+ }
+ }
+
+ return erom2ptr;
+}
+
+/**
+ * Function : nci_initial_parse
+ * Description : This function does
+ * 1. Obtains OOBR/EROM1 pointer based on CoreType
+ * 2. Analysis right CoreUnit for this 'core'
+ * 3. Saves CoreInfo & Interface Config in Coresponding 'core'
+ *
+ * @paramter[in]
+ * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved.
+ * @erom2ptr : Pointer to Interface Descriptor0.
+ * @core_idx : New core index needs to be populated in this pointer.
+ *
+ * Return : On Success, this function returns 'core' where CoreInfo & Interface Config are saved.
+ */
+static nci_cores_t*
+BCMATTACHFN(nci_initial_parse)(nci_info_t *nci, uint32 *erom2ptr, uint32 *core_idx)
+{
+ uint32 iface_desc_1;
+ nci_cores_t *core;
+ uint32 dmp_regs_off = 0u;
+ uint32 iface_cfg = 0u;
+ uint32 core_info;
+ uint32 *ptr;
+ uint coreid;
+
+ iface_desc_1 = R_REG(nci->osh, erom2ptr);
+
+ /* Get EROM1/OOBR Pointer based on CoreType */
+ if (!GET_CORETYPE(iface_desc_1)) {
+ if (nci->bustype == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ nci->oobr_base = (uint32*)((uintptr)nci->erom1base);
+ }
+
+ ptr = NCI_ADD_ADDR(nci->oobr_base, GET_COREOFFSET(iface_desc_1));
+ } else {
+ ptr = NCI_ADD_ADDR(nci->erom1base, GET_COREOFFSET(iface_desc_1));
+ }
+ dmp_regs_off = GET_COREOFFSET(iface_desc_1) + DMP_DMPCTRL_REG_OFFSET;
+
+ core_info = R_REG(nci->osh, ptr);
+ NCI_INC_ADDR(ptr, NCI_WORD_SIZE);
+ iface_cfg = R_REG(nci->osh, ptr);
+
+ *core_idx = nci->num_cores;
+ core = &nci->cores[*core_idx];
+
+ if (CORE_ID(core_info) < 0xFFu) {
+ coreid = CORE_ID(core_info) | 0x800u;
+ } else {
+ coreid = CORE_ID(core_info);
+ }
+
+ /* Get coreunit from previous cores i.e. num_cores */
+ core->coreunit = nci_get_coreunit(nci->cores, nci->num_cores,
+ coreid, iface_desc_1);
+
+ core->coreid = coreid;
+
+ /* Increment the num_cores once proper coreunit is known */
+ nci->num_cores++;
+
+ NCI_INFO(("\n\nnci_initial_parse: core_idx:%d %s=%p \n",
+ *core_idx, GET_CORETYPE(iface_desc_1)?"EROM1":"OOBR", ptr));
+
+ /* Core Info Register */
+ core->coreinfo = core_info;
+
+ /* Save DMP register base address. */
+ core->dmp_regs_off = dmp_regs_off;
+
+ NCI_INFO(("\tnci_initial_parse: COREINFO:%#x CId:%#x CUnit=%#x CRev=%#x CMfg=%#x\n",
+ core->coreinfo, core->coreid, core->coreunit, CORE_REV(core->coreinfo),
+ CORE_MFG(core->coreinfo)));
+
+ /* Interface Config Register */
+ core->iface_cfg = iface_cfg;
+ core->iface_cnt = GET_INFACECNT(iface_cfg);
+
+ NCI_INFO(("\tnci_initial_parse: INTERFACE_CFG:%#x IfaceCnt=%#x IfaceOffset=%#x \n",
+ iface_cfg, core->iface_cnt, iface_cfg & IC_IFACEOFFSET_MASK));
+
+ /* For PCI_BUS case set back BAR0 Window to EROM1 Base */
+ if (nci->bustype == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_EROM1_BASE(nci->cc_erom2base));
+ }
+
+ return core;
+}
+
+static uint32
+BCMATTACHFN(nci_find_numcores)(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ volatile hndoobr_reg_t *oobr_reg = NULL;
+ uint32 orig_bar0_win1 = 0u;
+ uint32 num_oobr_cores = 0u;
+ uint32 num_nonoobr_cores = 0u;
+
+ /* No of Non-OOBR Cores */
+ num_nonoobr_cores = NCI_NONOOBR_CORES(nci->cc_erom2base);
+ if (num_nonoobr_cores <= 0u) {
+ NCI_ERROR(("nci_find_numcores: Invalid Number of non-OOBR cores %d\n",
+ num_nonoobr_cores));
+ goto fail;
+ }
+
+ /* No of OOBR Cores */
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ oobr_reg = (volatile hndoobr_reg_t*)REG_MAP(GET_OOBR_BASE(nci->cc_erom2base),
+ SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Save Original Bar0 Win1 */
+ orig_bar0_win1 = OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN,
+ PCI_ACCESS_SIZE);
+
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ oobr_reg = (volatile hndoobr_reg_t*)sii->curmap;
+ break;
+
+ default:
+ NCI_ERROR(("nci_find_numcores: Invalid bustype %d\n", BUSTYPE(sih->bustype)));
+ ASSERT(0);
+ goto fail;
+ }
+
+ num_oobr_cores = R_REG(nci->osh, &oobr_reg->capability) & OOBR_CAP_CORECNT_MASK;
+ if (num_oobr_cores <= 0u) {
+ NCI_ERROR(("nci_find_numcores: Invalid Number of OOBR cores %d\n", num_oobr_cores));
+ goto fail;
+ }
+
+ /* Point back to original base */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE, orig_bar0_win1);
+ }
+
+ NCI_PRINT(("nci_find_numcores: Total Cores found %d\n",
+ (num_oobr_cores + num_nonoobr_cores)));
+ /* Total No of Cores */
+ return (num_oobr_cores + num_nonoobr_cores);
+
+fail:
+ return 0u;
+}
+
+/**
+ * Function : nci_scan
+ * Description : Function parses EROM in BOOKER NCI Architecture and saves all inforamtion about
+ * Cores in 'nci_info_t' data structure.
+ *
+ * @paramter[in]
+ * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved.
+ *
+ * Return : On Success No of parsed Cores in EROM is returned,
+ * On Failure '0' is returned by printing ERROR messages
+ * in Console(If NCI_LOG_LEVEL is enabled).
+ */
+uint32
+BCMATTACHFN(nci_scan)(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = (nci_info_t *)sii->nci_info;
+ axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+ uint32 *cur_iface_desc_1_ptr;
+ nci_cores_t *core;
+ interface_desc_t *desc;
+ uint32 wordoffset = 0u;
+ uint32 iface_desc_0;
+ uint32 iface_desc_1;
+ uint32 *erom2ptr;
+ uint8 iface_idx;
+ uint32 core_idx;
+ int err = 0;
+
+ /* If scan was finished already */
+ if (nci->scan_done) {
+ goto end;
+ }
+
+ erom2ptr = nci->erom2base;
+ sii->axi_num_wrappers = 0;
+
+ while (TRUE) {
+ iface_desc_0 = R_REG(nci->osh, erom2ptr);
+ if (iface_desc_0 == ID_ENDMARKER) {
+ NCI_INFO(("\nnci_scan: Reached end of EROM2 with total cores=%d \n",
+ nci->num_cores));
+ break;
+ }
+
+ /* Save current Iface1 Addr for comparision */
+ cur_iface_desc_1_ptr = GET_NEXT_EROM_ADDR(erom2ptr);
+
+ /* Get CoreInfo, InterfaceCfg, CoreIdx */
+ core = nci_initial_parse(nci, cur_iface_desc_1_ptr, &core_idx);
+
+ core->desc = (interface_desc_t *)MALLOCZ(
+ nci->osh, (sizeof(*(core->desc)) * core->iface_cnt));
+ if (core->desc == NULL) {
+ NCI_ERROR(("nci_scan: Mem Alloc failed for Iface and Addr "
+ "Descriptor\n"));
+ err = NCI_NOMEM;
+ break;
+ }
+
+ for (iface_idx = 0u; iface_idx < core->iface_cnt; iface_idx++) {
+ desc = &core->desc[iface_idx];
+
+ iface_desc_0 = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+ iface_desc_1 = R_REG(nci->osh, erom2ptr);
+ NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
+
+ /* Interface Descriptor Register */
+ nci_save_iface1_reg(desc, iface_desc_1);
+ if (desc->master && desc->num_addr_reg) {
+ err = NCI_MASTER_INVALID_ADDR;
+ goto end;
+ }
+
+ wordoffset = GET_WORDOFFSET(iface_desc_1);
+
+ /* NodePointer Register */
+ desc->iface_desc_0 = iface_desc_0;
+ desc->node_ptr = GET_NODEPTR(iface_desc_0);
+ desc->node_type = GET_NODETYPE(iface_desc_0);
+
+ if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
+ axi_wrapper[sii->axi_num_wrappers].mfg = CORE_MFG(core->coreinfo);
+ axi_wrapper[sii->axi_num_wrappers].cid = CORE_ID(core->coreinfo);
+ axi_wrapper[sii->axi_num_wrappers].rev = CORE_REV(core->coreinfo);
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = desc->master;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = desc->node_ptr;
+ sii->axi_num_wrappers++;
+ }
+
+ NCI_INFO(("nci_scan: %s NodePointer:%#x Type=%s NODEPTR=%#x \n",
+ desc->master?"Master":"Slave", desc->iface_desc_0,
+ desc->node_type?"NIC-400":"BOOKER", desc->node_ptr));
+
+ /* Slave Port Addresses */
+ if (!desc->master) {
+ erom2ptr = nci_save_slaveport_addr(nci, desc, erom2ptr);
+ if (erom2ptr == NULL) {
+ NCI_ERROR(("nci_scan: Invalid EROM2PTR\n"));
+ err = NCI_INVALID_EROM2PTR;
+ goto end;
+ }
+ }
+
+ /* Current loop ends with next iface_desc_0 */
+ }
+
+ if (wordoffset == 0u) {
+ NCI_INFO(("\nnci_scan: EROM PARSING found END 'wordoffset=%#x' "
+ "with total cores=%d \n", wordoffset, nci->num_cores));
+ break;
+ }
+ }
+ nci->scan_done = TRUE;
+
+end:
+ if (err) {
+ NCI_ERROR(("nci_scan: Failed with Code %d\n", err));
+ nci->num_cores = 0;
+ ASSERT(0u);
+ }
+
+ return nci->num_cores;
+}
+
+/**
+ * Function : nci_dump_erom
+ * Description : Function dumps EROM from inforamtion cores in 'nci_info_t' data structure.
+ *
+ * @paramter[in]
+ * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved.
+ *
+ * Return : void
+ */
+void
+BCMATTACHFN(nci_dump_erom)(void *ctx)
+{
+ nci_info_t *nci = (nci_info_t *)ctx;
+ nci_cores_t *core;
+ interface_desc_t *desc;
+ slave_port_t *sp;
+ uint32 core_idx, addr_idx, iface_idx;
+ uint32 core_info;
+
+ BCM_REFERENCE(core_info);
+
+ NCI_INFO(("\nnci_dump_erom: -- EROM Dump --\n"));
+ for (core_idx = 0u; core_idx < nci->num_cores; core_idx++) {
+ core = &nci->cores[core_idx];
+
+ /* Core Info Register */
+ core_info = core->coreinfo;
+ NCI_INFO(("\nnci_dump_erom: core_idx=%d COREINFO:%#x CId:%#x CUnit:%#x CRev=%#x "
+ "CMfg=%#x\n", core_idx, core_info, CORE_ID(core_info), core->coreunit,
+ CORE_REV(core_info), CORE_MFG(core_info)));
+
+ /* Interface Config Register */
+ NCI_INFO(("nci_dump_erom: IfaceCfg=%#x IfaceCnt=%#x \n",
+ core->iface_cfg, core->iface_cnt));
+
+ for (iface_idx = 0u; iface_idx < core->iface_cnt; iface_idx++) {
+ desc = &core->desc[iface_idx];
+ /* NodePointer Register */
+ NCI_INFO(("nci_dump_erom: %s iface_desc_0 Master=%#x MASTER_WRAP=%#x "
+ "Type=%s \n", desc->master?"Master":"Slave", desc->iface_desc_0,
+ desc->node_ptr,
+ (desc->node_type)?"NIC-400":"BOOKER"));
+
+ /* Interface Descriptor Register */
+ NCI_INFO(("nci_dump_erom: %s InterfaceDesc:%#x WOffset=%#x NoAddrReg=%#x "
+ "%s_Offset=%#x\n", desc->master?"Master":"Slave",
+ desc->iface_desc_1, GET_WORDOFFSET(desc->iface_desc_1),
+ desc->num_addr_reg,
+ desc->coretype?"EROM1":"OOBR", GET_COREOFFSET(desc->iface_desc_1)));
+
+ /* Slave Port Addresses */
+ sp = desc->sp;
+ if (!sp) {
+ continue;
+ }
+ for (addr_idx = 0u; addr_idx < desc->num_addr_reg; addr_idx++) {
+ if (sp[addr_idx].extaddrl) {
+ NCI_INFO(("nci_dump_erom: SlavePortAddr[%#x]: AddrDesc=%#x"
+ " al=%#x ah=%#x extal=%#x extah=%#x\n", addr_idx,
+ sp[addr_idx].adesc, sp[addr_idx].addrl,
+ sp[addr_idx].addrh, sp[addr_idx].extaddrl,
+ sp[addr_idx].extaddrh));
+ } else {
+ NCI_INFO(("nci_dump_erom: SlavePortAddr[%#x]: AddrDesc=%#x"
+ " al=%#x ah=%#x\n", addr_idx, sp[addr_idx].adesc,
+ sp[addr_idx].addrl, sp[addr_idx].addrh));
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask & set operation,
+ * switch back to the original core, and return the new value.
+ */
+uint
+BCMPOSTTRAPFN(nci_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *cores_info = &nci->cores[coreidx];
+
+ NCI_TRACE(("nci_corereg coreidx %u regoff %u mask %u val %u\n",
+ coreidx, regoff, mask, val));
+ ASSERT(GOODIDX(coreidx, nci->num_cores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES) {
+ return 0;
+ }
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ uint32 curmap = nci_get_curmap(nci, coreidx);
+ BCM_REFERENCE(curmap);
+
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs) {
+ cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii)) {
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ } else {
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff);
+ }
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*)((volatile uchar*)nci_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ /* readback */
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx) {
+ nci_setcoreidx(&sii->pub, origidx);
+ }
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+uint
+nci_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w = 0;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *cores_info = &nci->cores[coreidx];
+
+ NCI_TRACE(("nci_corereg_writeonly() coreidx %u regoff %u mask %u val %u\n",
+ coreidx, regoff, mask, val));
+
+ ASSERT(GOODIDX(coreidx, nci->num_cores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES) {
+ return 0;
+ }
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ uint32 curmap = nci_get_curmap(nci, coreidx);
+ BCM_REFERENCE(curmap);
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs) {
+ cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii)) {
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ } else {
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff);
+ }
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*) nci_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx) {
+ nci_setcoreidx(&sii->pub, origidx);
+ }
+
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+nci_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+ volatile uint32 *r = NULL;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *cores_info = &nci->cores[coreidx];
+
+ NCI_TRACE(("nci_corereg_addr() coreidx %u regoff %u\n", coreidx, regoff));
+
+ ASSERT(GOODIDX(coreidx, nci->num_cores));
+ ASSERT(regoff < SI_CORE_SIZE);
+
+ if (coreidx >= SI_MAXCORES) {
+ return 0;
+ }
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ uint32 curmap = nci_get_curmap(nci, coreidx);
+ BCM_REFERENCE(curmap);
+
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs) {
+ cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs + regoff);
+
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii)) {
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ } else {
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff);
+ }
+ }
+ }
+
+ if (!fast) {
+ ASSERT(sii->curidx == coreidx);
+ r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
+ }
+
+ return (r);
+}
+
+uint
+BCMPOSTTRAPFN(nci_findcoreidx)(const si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ uint core_idx;
+
+ NCI_TRACE(("nci_findcoreidx() coreid %u coreunit %u\n", coreid, coreunit));
+
+ for (core_idx = 0; core_idx < nci->num_cores; core_idx++) {
+ if ((nci->cores[core_idx].coreid == coreid) &&
+ (nci->cores[core_idx].coreunit == coreunit)) {
+ return core_idx;
+ }
+ }
+ return BADIDX;
+}
+
+static uint32
+_nci_get_slave_addr_size(nci_info_t *nci, uint coreidx, uint32 slave_port_idx, uint base_idx)
+{
+ uint32 size;
+ uint32 add_desc;
+
+ NCI_TRACE(("_nci_get_slave_addr_size() coreidx %u slave_port_idx %u base_idx %u\n",
+ coreidx, slave_port_idx, base_idx));
+
+ add_desc = nci->cores[coreidx].desc[slave_port_idx].sp[base_idx].adesc;
+
+ size = add_desc & SLAVEPORT_ADDR_SIZE_MASK;
+ return ADDR_SIZE(size);
+}
+
+static uint32
+BCMPOSTTRAPFN(_nci_get_curmap)(nci_info_t *nci, uint coreidx, uint slave_port_idx, uint base_idx)
+{
+ /* TODO: Is handling of 64 bit addressing required */
+ NCI_TRACE(("_nci_get_curmap coreidx %u slave_port_idx %u base_idx %u\n",
+ coreidx, slave_port_idx, base_idx));
+ return nci->cores[coreidx].desc[slave_port_idx].sp[base_idx].addrl;
+}
+
+/* Get the interface descriptor which is connected to APB and return its address */
+static uint32
+BCMPOSTTRAPFN(nci_get_curmap)(nci_info_t *nci, uint coreidx)
+{
+ nci_cores_t *core_info = &nci->cores[coreidx];
+ uint32 iface_idx;
+
+ NCI_TRACE(("nci_get_curmap coreidx %u\n", coreidx));
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+
+ /* If core is a Backplane or Bridge, then its slave port
+ * will give the pointer to access registers.
+ */
+ if (!IS_MASTER(core_info->desc[iface_idx].iface_desc_1) &&
+ (IS_BACKPLANE(core_info->coreinfo) ||
+ APB_INF(core_info->desc[iface_idx]))) {
+ return _nci_get_curmap(nci, coreidx, iface_idx, 0);
+ }
+ }
+
+ /* no valid slave port address is found */
+ return NCI_BAD_REG;
+}
+
+static uint32
+BCMPOSTTRAPFN(_nci_get_curwrap)(nci_info_t *nci, uint coreidx, uint wrapper_idx)
+{
+ return nci->cores[coreidx].desc[wrapper_idx].node_ptr;
+}
+
+static uint32
+BCMPOSTTRAPFN(nci_get_curwrap)(nci_info_t *nci, uint coreidx)
+{
+ nci_cores_t *core_info = &nci->cores[coreidx];
+ uint32 iface_idx;
+ NCI_TRACE(("nci_get_curwrap coreidx %u\n", coreidx));
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_TRACE(("nci_get_curwrap iface_idx %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+ if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_BOOKER) ||
+ (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_NIC400)) {
+ return _nci_get_curwrap(nci, coreidx, iface_idx);
+ }
+ }
+
+ /* no valid master wrapper found */
+ return NCI_BAD_REG;
+}
+
+static void
+_nci_setcoreidx_pcie_bus(si_t *sih, volatile void **regs, uint32 curmap,
+ uint32 curwrap)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ *regs = sii->curmap;
+ switch (sii->slice) {
+ case 0: /* main/first slice */
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, PCIE_WRITE_SIZE, curmap);
+ // TODO: why curwrap is zero i.e no master wrapper
+ if (curwrap != 0) {
+ if (PCIE_GEN2(sii)) {
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2,
+ PCIE_WRITE_SIZE, curwrap);
+ } else {
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2,
+ PCIE_WRITE_SIZE, curwrap);
+ }
+ }
+ break;
+ case 1: /* aux/second slice */
+ /* PCIE GEN2 only for other slices */
+ if (!PCIE_GEN2(sii)) {
+ /* other slices not supported */
+ NCI_ERROR(("pci gen not supported for slice 1\n"));
+ ASSERT(0);
+ break;
+ }
+
+ /* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */
+
+ *regs = (volatile uint8 *)*regs + PCI_SEC_BAR0_WIN_OFFSET;
+ sii->curwrap = (void *)((uintptr)*regs + SI_CORE_SIZE);
+
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, PCIE_WRITE_SIZE, curmap);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, PCIE_WRITE_SIZE, curwrap);
+ break;
+
+ case 2: /* scan/third slice */
+ /* PCIE GEN2 only for other slices */
+ if (!PCIE_GEN2(sii)) {
+ /* other slices not supported */
+ NCI_ERROR(("pci gen not supported for slice 1\n"));
+ ASSERT(0);
+ break;
+ }
+ /* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */
+ *regs = (volatile uint8 *)*regs + PCI_SEC_BAR0_WIN_OFFSET;
+ sii->curwrap = (void *)((uintptr)*regs + SI_CORE_SIZE);
+
+ /* point bar0 window */
+ nci_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, curmap);
+ nci_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, curwrap);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+static volatile void *
+BCMPOSTTRAPFN(_nci_setcoreidx)(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *cores_info = &nci->cores[coreidx];
+ uint32 curmap, curwrap;
+ volatile void *regs = NULL;
+
+ NCI_TRACE(("_nci_setcoreidx coreidx %u\n", coreidx));
+ if (!GOODIDX(coreidx, nci->num_cores)) {
+ return (NULL);
+ }
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) ||
+ !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ curmap = nci_get_curmap(nci, coreidx);
+ curwrap = nci_get_curwrap(nci, coreidx);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ /* map if does not exist */
+ if (!cores_info->regs) {
+ cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs));
+ }
+ sii->curmap = regs = cores_info->regs;
+ sii->curwrap = REG_MAP(curwrap, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ _nci_setcoreidx_pcie_bus(sih, &regs, curmap, curwrap);
+ break;
+
+ default:
+ NCI_ERROR(("_nci_stcoreidx Invalid bustype %d\n", BUSTYPE(sih->bustype)));
+ break;
+ }
+ sii->curidx = coreidx;
+ return regs;
+}
+
+volatile void *
+BCMPOSTTRAPFN(nci_setcoreidx)(si_t *sih, uint coreidx)
+{
+ return _nci_setcoreidx(sih, coreidx);
+}
+
+volatile void *
+BCMPOSTTRAPFN(nci_setcore)(si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ uint core_idx;
+
+ NCI_TRACE(("nci_setcore coreidx %u coreunit %u\n", coreid, coreunit));
+ core_idx = nci_findcoreidx(sih, coreid, coreunit);
+
+ if (!GOODIDX(core_idx, nci->num_cores)) {
+ return (NULL);
+ }
+ return nci_setcoreidx(sih, core_idx);
+}
+
+/* Get the value of the register at offset "offset" of currently configured core */
+uint
+BCMPOSTTRAPFN(nci_get_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
+ NCI_TRACE(("nci_wrap_reg offset %u mask %u val %u\n", offset, mask, val));
+
+ if (mask || val) {
+ uint32 w = R_REG(sii->osh, addr);
+ w &= ~mask;
+ w |= val;
+ W_REG(sii->osh, addr, w);
+ }
+ return (R_REG(sii->osh, addr));
+}
+
+uint
+nci_corevendor(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+
+ NCI_TRACE(("nci_corevendor coreidx %u\n", sii->curidx));
+ return (nci->cores[sii->curidx].coreinfo & COREINFO_MFG_MASK) >> COREINFO_MFG_SHIFT;
+}
+
+uint
+BCMPOSTTRAPFN(nci_corerev)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ uint coreidx = sii->curidx;
+
+ NCI_TRACE(("nci_corerev coreidx %u\n", coreidx));
+
+ return (nci->cores[coreidx].coreinfo & COREINFO_REV_MASK) >> COREINFO_REV_SHIFT;
+}
+
+uint
+nci_corerev_minor(const si_t *sih)
+{
+ return (nci_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
+ SISF_MINORREV_D11_MASK;
+}
+
+uint
+BCMPOSTTRAPFN(nci_coreid)(const si_t *sih, uint coreidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+
+ NCI_TRACE(("nci_coreid coreidx %u\n", coreidx));
+ return nci->cores[coreidx].coreid;
+}
+
+/** return total coreunit of coreid or zero if not found */
+uint
+BCMPOSTTRAPFN(nci_numcoreunits)(const si_t *sih, uint coreid)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ uint found = 0;
+ uint i;
+
+ NCI_TRACE(("nci_numcoreunits coreidx %u\n", coreid));
+
+ for (i = 0; i < nci->num_cores; i++) {
+ if (nci->cores[i].coreid == coreid) {
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/* Return the address of the nth address space in the current core
+ * Arguments:
+ * sih : Pointer to struct si_t
+ * spidx : slave port index
+ * baidx : base address index
+ */
+uint32
+nci_addr_space(const si_t *sih, uint spidx, uint baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint cidx;
+
+ NCI_TRACE(("nci_addr_space spidx %u baidx %u\n", spidx, baidx));
+ cidx = sii->curidx;
+ return _nci_get_curmap(sii->nci_info, cidx, spidx, baidx);
+}
+
+/* Return the size of the nth address space in the current core
+* Arguments:
+* sih : Pointer to struct si_t
+* spidx : slave port index
+* baidx : base address index
+*/
+uint32
+nci_addr_space_size(const si_t *sih, uint spidx, uint baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint cidx;
+
+ NCI_TRACE(("nci_addr_space_size spidx %u baidx %u\n", spidx, baidx));
+
+ cidx = sii->curidx;
+ return _nci_get_slave_addr_size(sii->nci_info, cidx, spidx, baidx);
+}
+
+/*
+ * Performs soft reset of attached device.
+ * Writes have the following effect:
+ * 0b1 Request attached device to enter reset.
+ * Write is ignored if it occurs before soft reset exit has occurred.
+ *
+ * 0b0 Request attached device to exit reset.
+ * Write is ignored if it occurs before soft reset entry has occurred.
+ *
+ * Software can poll this register to determine whether soft reset entry or exit has occurred,
+ * using the following values:
+ * 0b1 Indicates that the device is in reset.
+ * 0b0 Indicates that the device is not in reset.
+ *
+ *
+ * Note
+ * The register value updates to reflect a request for reset entry or reset exit,
+ * but the update can only occur after required internal conditions are met.
+ * Until these conditions are met, a read to the register returns the old value.
+ * For example, outstanding transactions currently being handled must complete before
+ * the register value updates.
+ *
+ * To ensure reset propagation within the device,
+ * it is the responsibility of software to allow enough cycles after
+ * soft reset assertion is reflected in the reset control register
+ * before exiting soft reset by triggering a write of 0b0.
+ * If this responsibility is not met, the behavior is undefined or unpredictable.
+ *
+ * When the register value is 0b1,
+ * the external soft reset pin that connects to the attached AXI master or slave
+ * device is asserted, using the correct polarity of the reset pin.
+ * When the register value is 0b0, the external softreset
+ * pin that connects to the attached AXI master or slave device is deasserted,
+ * using the correct polarity of the reset pin.
+ */
+static void
+BCMPOSTTRAPFN(_nci_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ amni_regs_t *amni = (amni_regs_t *)(uintptr)sii->curwrap;
+ volatile dmp_regs_t *io;
+ volatile uint32* erom_base = 0u;
+ uint32 orig_bar0_win1 = 0u;
+ volatile uint32 dummy;
+ volatile uint32 reg_read;
+ uint32 dmp_write_value;
+
+ /* Point to OOBR base */
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ erom_base = (volatile uint32*)REG_MAP(GET_OOBR_BASE(nci->cc_erom2base),
+ SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /*
+ * Save Original Bar0 Win1. In nci, the io registers dmpctrl & dmpstatus
+ * registers are implemented in the EROM section. REF -
+ * https://docs.google.com/document/d/1HE7hAmvdoNFSnMI7MKQV1qVrFBZVsgLdNcILNOA2C8c
+ * This requires addition BAR0 windows mapping to erom section in chipcommon.
+ */
+ orig_bar0_win1 = OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN,
+ PCI_ACCESS_SIZE);
+
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ erom_base = (volatile uint32*)sii->curmap;
+ break;
+
+ default:
+ NCI_ERROR(("_nci_core_reset Invalid bustype %d\n", BUSTYPE(sih->bustype)));
+ break;
+ }
+
+ /* Point to DMP Control */
+ io = (dmp_regs_t*)(NCI_ADD_ADDR(erom_base, nci->cores[sii->curidx].dmp_regs_off));
+
+ NCI_TRACE(("_nci_core_reset reg 0x%p io %p\n", amni, io));
+
+ /* Put core into reset */
+ W_REG(nci->osh, &amni->idm_reset_ctrl, NI_IDM_RESET_ENTRY);
+
+ /* poll for the reset to happen */
+ while (TRUE) {
+ /* Wait until reset is effective */
+ SPINWAIT(((reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl)) !=
+ NI_IDM_RESET_ENTRY), NCI_SPINWAIT_TIMEOUT);
+
+ if (reg_read == NI_IDM_RESET_ENTRY) {
+ break;
+ }
+ }
+
+ dmp_write_value = (bits | resetbits | SICF_FGC | SICF_CLOCK_EN);
+
+ W_REG(nci->osh, &io->dmpctrl, dmp_write_value);
+
+ /* poll for the dmp_reg write to happen */
+ while (TRUE) {
+ /* Wait until reset is effective */
+ SPINWAIT(((reg_read = R_REG(nci->osh, &io->dmpctrl)) !=
+ dmp_write_value), NCI_SPINWAIT_TIMEOUT);
+ if (reg_read == dmp_write_value) {
+ break;
+ }
+ }
+
+ /* take core out of reset */
+ W_REG(nci->osh, &amni->idm_reset_ctrl, 0u);
+
+ /* poll for the core to come out of reset */
+ while (TRUE) {
+ /* Wait until reset is effected */
+ SPINWAIT(((reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl)) !=
+ NI_IDM_RESET_EXIT), NCI_SPINWAIT_TIMEOUT);
+ if (reg_read == NI_IDM_RESET_EXIT) {
+ break;
+ }
+ }
+
+ dmp_write_value = (bits | SICF_CLOCK_EN);
+ W_REG(nci->osh, &io->dmpctrl, (bits | SICF_CLOCK_EN));
+ /* poll for the core to come out of reset */
+ while (TRUE) {
+ SPINWAIT(((reg_read = R_REG(nci->osh, &io->dmpctrl)) !=
+ dmp_write_value), NCI_SPINWAIT_TIMEOUT);
+ if (reg_read == dmp_write_value) {
+ break;
+ }
+ }
+
+ dummy = R_REG(nci->osh, &io->dmpctrl);
+ BCM_REFERENCE(dummy);
+
+ /* Point back to original base */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE, orig_bar0_win1);
+ }
+}
+
+/* reset and re-enable a core
+ */
+void
+BCMPOSTTRAPFN(nci_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ int32 iface_idx = 0u;
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *core = &nci->cores[sii->curidx];
+
+ /* If Wrapper is of NIC400, then call AI functionality */
+ for (iface_idx = core->iface_cnt-1; iface_idx >= 0; iface_idx--) {
+ if (!(BOOKER_INF(core->desc[iface_idx]) || NIC_INF(core->desc[iface_idx]))) {
+ continue;
+ }
+#ifdef BOOKER_NIC400_INF
+ if (core->desc[iface_idx].node_type == NODE_TYPE_NIC400) {
+ ai_core_reset_ext(sih, bits, resetbits);
+ } else
+#endif /* BOOKER_NIC400_INF */
+ {
+ _nci_core_reset(sih, bits, resetbits);
+ }
+ }
+}
+
+#ifdef BOOKER_NIC400_INF
+static int32
+BCMPOSTTRAPFN(nci_find_first_wrapper_idx)(nci_info_t *nci, uint32 coreidx)
+{
+ nci_cores_t *core_info = &nci->cores[coreidx];
+ uint32 iface_idx;
+
+ NCI_TRACE(("nci_find_first_wrapper_idx %u\n", coreidx));
+
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_INFO(("nci_find_first_wrapper_idx: %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+
+ if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_BOOKER) ||
+ (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_NIC400)) {
+ return iface_idx;
+ }
+ }
+
+ /* no valid master wrapper found */
+ return NCI_BAD_INDEX;
+}
+#endif /* BOOKER_NIC400_INF */
+
+void
+nci_core_disable(const si_t *sih, uint32 bits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ uint32 reg_read;
+ volatile dmp_regs_t *io = NULL;
+ uint32 orig_bar0_win1 = 0u;
+ uint32 dmp_write_value;
+ amni_regs_t *amni = (amni_regs_t *)(uintptr)sii->curwrap;
+ nci_cores_t *core = &nci->cores[sii->curidx];
+ int32 iface_idx;
+
+ NCI_TRACE(("nci_core_disable\n"));
+
+ BCM_REFERENCE(core);
+ BCM_REFERENCE(iface_idx);
+
+#ifdef BOOKER_NIC400_INF
+ iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
+
+ if (iface_idx < 0) {
+ NCI_ERROR(("nci_core_disable: First Wrapper is not found\n"));
+ ASSERT(0u);
+ return;
+ }
+
+ /* If Wrapper is of NIC400, then call AI functionality */
+ if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
+ return ai_core_disable(sih, bits);
+ }
+#endif /* BOOKER_NIC400_INF */
+
+ ASSERT(GOODREGS(sii->curwrap));
+ reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl);
+
+ /* if core is already in reset, just return */
+ if (reg_read == NI_IDM_RESET_ENTRY) {
+ return;
+ }
+
+ /* Put core into reset */
+ W_REG(nci->osh, &amni->idm_reset_ctrl, NI_IDM_RESET_ENTRY);
+ while (TRUE) {
+ /* Wait until reset is effected */
+ SPINWAIT(((reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl)) !=
+ NI_IDM_RESET_ENTRY), NCI_SPINWAIT_TIMEOUT);
+ if (reg_read == NI_IDM_RESET_ENTRY) {
+ break;
+ }
+ }
+
+ /* Point to OOBR base */
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ io = (volatile dmp_regs_t*)
+ REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Save Original Bar0 Win1 */
+ orig_bar0_win1 =
+ OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
+
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ io = (volatile dmp_regs_t*)sii->curmap;
+ break;
+
+ default:
+ NCI_ERROR(("nci_core_disable Invalid bustype %d\n", BUSTYPE(sih->bustype)));
+ break;
+
+ }
+
+ /* Point to DMP Control */
+ io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
+
+ dmp_write_value = (bits | SICF_FGC | SICF_CLOCK_EN);
+ W_REG(nci->osh, &io->dmpctrl, dmp_write_value);
+
+ /* poll for the dmp_reg write to happen */
+ while (TRUE) {
+ /* Wait until reset is effected */
+ SPINWAIT(((reg_read = R_REG(nci->osh, &io->dmpctrl)) != dmp_write_value),
+ NCI_SPINWAIT_TIMEOUT);
+ if (reg_read == dmp_write_value) {
+ break;
+ }
+ }
+
+ /* Point back to original base */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE, orig_bar0_win1);
+ }
+}
+
+bool
+BCMPOSTTRAPFN(nci_iscoreup)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ amni_regs_t *ni = (amni_regs_t *)(uintptr)sii->curwrap;
+ uint32 reset_ctrl;
+
+#ifdef BOOKER_NIC400_INF
+ nci_cores_t *core = &nci->cores[sii->curidx];
+ int32 iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
+
+ if (iface_idx < 0) {
+ NCI_ERROR(("nci_iscoreup: First Wrapper is not found\n"));
+ ASSERT(0u);
+ return FALSE;
+ }
+
+ /* If Wrapper is of NIC400, then call AI functionality */
+ if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
+ return ai_iscoreup(sih);
+ }
+#endif /* BOOKER_NIC400_INF */
+
+ NCI_TRACE(("nci_iscoreup\n"));
+ reset_ctrl = R_REG(nci->osh, &ni->idm_reset_ctrl);
+
+ return (reset_ctrl == NI_IDM_RESET_ENTRY) ? FALSE : TRUE;
+}
+
+/* TODO: OOB Router core is not available. Can be removed. */
+uint
+nci_intflag(si_t *sih)
+{
+ return 0;
+}
+
+uint
+nci_flag(si_t *sih)
+{
+ /* TODO: will be implemented if required for NCI */
+ return 0;
+}
+
+uint
+nci_flag_alt(const si_t *sih)
+{
+ /* TODO: will be implemented if required for NCI */
+ return 0;
+}
+
+void
+BCMATTACHFN(nci_setint)(const si_t *sih, int siflag)
+{
+ BCM_REFERENCE(sih);
+ BCM_REFERENCE(siflag);
+
+ /* TODO: Figure out how to set interrupt mask in nci */
+}
+
+/* TODO: OOB Router core is not available. Can we remove or need an alternate implementation. */
+uint32
+nci_oobr_baseaddr(const si_t *sih, bool second)
+{
+ return 0;
+}
+
+uint
+nci_coreunit(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *cores = nci->cores;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = nci_coreid(sih, sii->curidx);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++) {
+ if (cores[i].coreid == coreid) {
+ coreunit++;
+ }
+ }
+
+ return (coreunit);
+}
+
+uint
+nci_corelist(const si_t *sih, uint coreid[])
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *cores = nci->cores;
+ uint32 i;
+
+ for (i = 0; i < sii->numcores; i++) {
+ coreid[i] = cores[i].coreid;
+ }
+
+ return (sii->numcores);
+}
+
+/* Return the number of address spaces in current core */
+int
+BCMATTACHFN(nci_numaddrspaces)(const si_t *sih)
+{
+ /* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */
+ BCM_REFERENCE(sih);
+
+ return 2;
+}
+
+/* The value of wrap_pos should be greater than 0 */
+/* wrapba, wrapba2 and wrapba3 */
+uint32
+nci_get_nth_wrapper(const si_t *sih, int32 wrap_pos)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ const nci_cores_t *core_info = &nci->cores[sii->curidx];
+ uint32 iface_idx;
+ uint32 addr = 0;
+
+ ASSERT(wrap_pos >= 0);
+ if (wrap_pos < 0) {
+ return addr;
+ }
+
+ NCI_TRACE(("nci_get_curmap coreidx %u\n", sii->curidx));
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+ /* hack for core idx 8, coreidx without APB Backplane ID */
+ if (!IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
+ continue;
+ }
+ /* TODO: Should the interface be only BOOKER or NIC is also fine. */
+ if (GET_NODETYPE(core_info->desc[iface_idx].iface_desc_0) != NODE_TYPE_BOOKER) {
+ continue;
+ }
+ /* Iterate till we do not get a wrapper at nth (wrap_pos) position */
+ if (wrap_pos == 0) {
+ break;
+ }
+ wrap_pos--;
+ }
+ if (iface_idx < core_info->iface_cnt) {
+ addr = GET_NODEPTR(core_info->desc[iface_idx].iface_desc_0);
+ }
+ return addr;
+}
+
+/* Get slave port address of the 0th slave (csp2ba) */
+uint32
+nci_get_axi_addr(const si_t *sih, uint32 *size)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ const nci_cores_t *core_info = (const nci_cores_t *)&nci->cores[sii->curidx];
+ uint32 iface_idx;
+ uint32 addr = 0;
+
+ NCI_TRACE(("nci_get_curmap coreidx %u\n", sii->curidx));
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+ if (IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
+ continue;
+ }
+ if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_BOOKER) ||
+ (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_NIC400)) {
+ break;
+ }
+ }
+ if (iface_idx < core_info->iface_cnt) {
+ /*
+ * TODO: Is there any case where we need to return the slave port address
+ * corresponding to index other than 0.
+ */
+ if (&core_info->desc[iface_idx].sp[0] != NULL) {
+ addr = core_info->desc[iface_idx].sp[0].addrl;
+ if (size) {
+ uint32 adesc = core_info->desc[iface_idx].sp[0].adesc;
+ *size = SLAVEPORT_ADDR_SIZE(adesc);
+ }
+ }
+ }
+ return addr;
+}
+
+/* spidx shouldbe the index of the slave port which we are expecting.
+ * The value will vary from 0 to num_addr_reg.
+ */
+/* coresba and coresba2 */
+uint32
+nci_get_core_baaddr(const si_t *sih, uint32 *size, int32 baidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ const nci_cores_t *core_info = (const nci_cores_t *)&nci->cores[sii->curidx];
+ uint32 iface_idx;
+ uint32 addr = 0;
+
+ NCI_TRACE(("nci_get_curmap coreidx %u\n", sii->curidx));
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+ /* hack for core idx 8, coreidx without APB Backplane ID */
+ if (IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
+ continue;
+ }
+ if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB1) ||
+ (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB2)) {
+ break;
+ }
+ }
+ if (iface_idx < core_info->iface_cnt) {
+ /*
+ * TODO: Is there any case where we need to return the slave port address
+ * corresponding to index other than 0.
+ */
+ if ((core_info->desc[iface_idx].num_addr_reg > baidx) &&
+ (&core_info->desc[iface_idx].sp[baidx] != NULL)) {
+ addr = core_info->desc[iface_idx].sp[baidx].addrl;
+ if (size) {
+ uint32 adesc = core_info->desc[iface_idx].sp[0].adesc;
+ *size = SLAVEPORT_ADDR_SIZE(adesc);
+ }
+ }
+ }
+ return addr;
+}
+
+uint32
+nci_addrspace(const si_t *sih, uint spidx, uint baidx)
+{
+ if (spidx == CORE_SLAVE_PORT_0) {
+ if (baidx == CORE_BASE_ADDR_0) {
+ return nci_get_core_baaddr(sih, NULL, CORE_BASE_ADDR_0);
+ } else if (baidx == CORE_BASE_ADDR_1) {
+ return nci_get_core_baaddr(sih, NULL, CORE_BASE_ADDR_1);
+ }
+ } else if (spidx == CORE_SLAVE_PORT_1) {
+ if (baidx == CORE_BASE_ADDR_0) {
+ return nci_get_axi_addr(sih, NULL);
+ }
+ }
+
+ SI_ERROR(("nci_addrspace: Need to parse the erom again to find %d base addr"
+ " in %d slave port\n", baidx, spidx));
+
+ return 0;
+}
+
+uint32
+BCMATTACHFN(nci_addrspacesize)(const si_t *sih, uint spidx, uint baidx)
+{
+ uint32 size = 0;
+
+ if (spidx == CORE_SLAVE_PORT_0) {
+ if (baidx == CORE_BASE_ADDR_0) {
+ nci_get_core_baaddr(sih, &size, CORE_BASE_ADDR_0);
+ goto done;
+ } else if (baidx == CORE_BASE_ADDR_1) {
+ nci_get_core_baaddr(sih, &size, CORE_BASE_ADDR_1);
+ goto done;
+ }
+ } else if (spidx == CORE_SLAVE_PORT_1) {
+ if (baidx == CORE_BASE_ADDR_0) {
+ nci_get_axi_addr(sih, &size);
+ goto done;
+ }
+ }
+
+ SI_ERROR(("nci_addrspacesize: Need to parse the erom again to find %d"
+ " base addr in %d slave port\n", baidx, spidx));
+done:
+ return size;
+}
+
+uint32
+BCMPOSTTRAPFN(nci_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *core = &nci->cores[sii->curidx];
+ uint32 orig_bar0_win1 = 0;
+ int32 iface_idx;
+ uint32 w;
+
+ BCM_REFERENCE(iface_idx);
+
+ if ((core[sii->curidx].coreid) == PMU_CORE_ID) {
+ NCI_ERROR(("nci_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ASSERT((val & ~mask) == 0);
+
+#ifdef BOOKER_NIC400_INF
+ iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
+ if (iface_idx < 0) {
+ NCI_ERROR(("nci_core_cflags: First Wrapper is not found\n"));
+ ASSERT(0u);
+ return 0u;
+ }
+
+ /* If Wrapper is of NIC400, then call AI functionality */
+ if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
+ aidmp_t *ai = sii->curwrap;
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+ return R_REG(sii->osh, &ai->ioctrl);
+ } else
+#endif /* BOOKER_NIC400_INF */
+ {
+ volatile dmp_regs_t *io = sii->curwrap;
+ volatile uint32 reg_read;
+
+ /* BOOKER */
+ /* Point to OOBR base */
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ io = (volatile dmp_regs_t*)
+ REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Save Original Bar0 Win1 */
+ orig_bar0_win1 =
+ OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
+
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ io = (volatile dmp_regs_t*)sii->curmap;
+ break;
+
+ default:
+ NCI_ERROR(("nci_core_cflags Invalid bustype %d\n", BUSTYPE(sih->bustype)));
+ break;
+
+ }
+
+ /* Point to DMP Control */
+ io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &io->dmpctrl) & ~mask) | val);
+ W_REG(sii->osh, &io->dmpctrl, w);
+ }
+
+ reg_read = R_REG(sii->osh, &io->dmpctrl);
+
+ /* Point back to original base */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN,
+ PCI_ACCESS_SIZE, orig_bar0_win1);
+ }
+
+ return reg_read;
+ }
+}
+
+void
+BCMPOSTTRAPFN(nci_core_cflags_wo)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *core = &nci->cores[sii->curidx];
+ volatile dmp_regs_t *io = sii->curwrap;
+ uint32 orig_bar0_win1 = 0;
+ int32 iface_idx;
+ uint32 w;
+
+ BCM_REFERENCE(iface_idx);
+
+ if ((core[sii->curidx].coreid) == PMU_CORE_ID) {
+ NCI_ERROR(("nci_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
+ return;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ASSERT((val & ~mask) == 0);
+
+#ifdef BOOKER_NIC400_INF
+ iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
+ if (iface_idx < 0) {
+ NCI_ERROR(("nci_core_cflags_wo: First Wrapper is not found\n"));
+ ASSERT(0u);
+ return;
+ }
+
+ /* If Wrapper is of NIC400, then call AI functionality */
+ if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
+ aidmp_t *ai = sii->curwrap;
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+ } else
+#endif /* BOOKER_NIC400_INF */
+ {
+ /* BOOKER */
+ /* Point to OOBR base */
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ io = (volatile dmp_regs_t*)
+ REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Save Original Bar0 Win1 */
+ orig_bar0_win1 =
+ OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
+
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ io = (volatile dmp_regs_t*)sii->curmap;
+ break;
+
+ default:
+ NCI_ERROR(("nci_core_cflags_wo Invalid bustype %d\n",
+ BUSTYPE(sih->bustype)));
+ break;
+ }
+
+ /* Point to DMP Control */
+ io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &io->dmpctrl) & ~mask) | val);
+ W_REG(sii->osh, &io->dmpctrl, w);
+ }
+
+ /* Point back to original base */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN,
+ PCI_ACCESS_SIZE, orig_bar0_win1);
+ }
+ }
+}
+
+uint32
+nci_core_sflags(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ nci_cores_t *core = &nci->cores[sii->curidx];
+ uint32 orig_bar0_win1 = 0;
+ int32 iface_idx;
+ uint32 w;
+
+ BCM_REFERENCE(iface_idx);
+
+ if ((core[sii->curidx].coreid) == PMU_CORE_ID) {
+ NCI_ERROR(("nci_core_sflags: Accessing PMU DMP register (ioctrl)\n"));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+#ifdef BOOKER_NIC400_INF
+ iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
+ if (iface_idx < 0) {
+ NCI_ERROR(("nci_core_sflags: First Wrapper is not found\n"));
+ ASSERT(0u);
+ return 0u;
+ }
+
+ /* If Wrapper is of NIC400, then call AI functionality */
+ if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
+ aidmp_t *ai = sii->curwrap;
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+ } else
+#endif /* BOOKER_NIC400_INF */
+ {
+ volatile dmp_regs_t *io = sii->curwrap;
+ volatile uint32 reg_read;
+
+ /* BOOKER */
+ /* Point to OOBR base */
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ io = (volatile dmp_regs_t*)
+ REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Save Original Bar0 Win1 */
+ orig_bar0_win1 =
+ OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
+
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
+ GET_OOBR_BASE(nci->cc_erom2base));
+ io = (volatile dmp_regs_t*)sii->curmap;
+ break;
+
+ default:
+ NCI_ERROR(("nci_core_sflags Invalid bustype %d\n", BUSTYPE(sih->bustype)));
+ return 0u;
+ }
+
+ /* Point to DMP Control */
+ io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &io->dmpstatus) & ~mask) | val);
+ W_REG(sii->osh, &io->dmpstatus, w);
+ }
+
+ reg_read = R_REG(sii->osh, &io->dmpstatus);
+
+ /* Point back to original base */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN,
+ PCI_ACCESS_SIZE, orig_bar0_win1);
+ }
+
+ return reg_read;
+ }
+}
+
+/* TODO: Used only by host */
+int
+nci_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
+{
+ return 0;
+}
+
+int
+nci_backplane_access_64(si_t *sih, uint addr, uint size, uint64 *val, bool read)
+{
+ return 0;
+}
+
+uint
+nci_num_slaveports(const si_t *sih, uint coreidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ const nci_cores_t *core_info = (const nci_cores_t *)&nci->cores[coreidx];
+ uint32 iface_idx;
+ uint32 numports = 0;
+
+ NCI_TRACE(("nci_get_curmap coreidx %u\n", coreidx));
+ for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
+ NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
+ iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
+ IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
+ /* hack for core idx 8, coreidx without APB Backplane ID */
+ if (IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
+ continue;
+ }
+ if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB1) ||
+ (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB2)) {
+ break;
+ }
+ }
+ if (iface_idx < core_info->iface_cnt) {
+ numports = core_info->desc[iface_idx].num_addr_reg;
+ }
+ return numports;
+}
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+void
+nci_dumpregs(const si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
+ sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
+ BCM_REFERENCE(sii);
+ /* TODO: Implement dump regs for nci. */
+}
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+
+#ifdef BCMDBG
+static void
+_nci_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose)
+{
+ /* TODO: This is WIP and will be developed once the
+ * implementation is done based on the NCI.
+ */
+}
+
+void
+nci_view(si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ const nci_cores_t *core_info = (const nci_cores_t *)nci->cores;
+ osl_t *osh;
+ /* TODO: We need to do the structure mapping correctly based on the BOOKER/NIC type */
+ aidmp_t *ai;
+ uint32 cid, addr;
+
+ ai = sii->curwrap;
+ osh = sii->osh;
+
+ if ((core_info[sii->curidx].coreid) == PMU_CORE_ID) {
+ SI_ERROR(("Cannot access pmu DMP\n"));
+ return;
+ }
+ cid = core_info[sii->curidx].coreid;
+ addr = nci_get_nth_wrapper(sih, 0u);
+ _nci_view(osh, ai, cid, addr, verbose);
+}
+
+void
+nci_viewall(si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ nci_info_t *nci = sii->nci_info;
+ const nci_cores_t *core_info = (const nci_cores_t *)nci->cores;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint32 cid, addr;
+ uint i;
+
+ osh = sii->osh;
+ for (i = 0; i < sii->numcores; i++) {
+ nci_setcoreidx(sih, i);
+
+ if ((core_info[i].coreid) == PMU_CORE_ID) {
+ SI_ERROR(("Skipping pmu DMP\n"));
+ continue;
+ }
+ ai = sii->curwrap;
+ cid = core_info[i].coreid;
+ addr = nci_get_nth_wrapper(sih, 0u);
+ _nci_view(osh, ai, cid, addr, verbose);
+ }
+}
+#endif /* BCMDBG */
+
+uint32
+nci_clear_backplane_to(si_t *sih)
+{
+ /* TODO: This is WIP and will be developed once the
+ * implementation is done based on the NCI.
+ */
+ return 0;
+}
+
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+static bool g_disable_backplane_logs = FALSE;
+
+static uint32 last_axi_error = AXI_WRAP_STS_NONE;
+static uint32 last_axi_error_log_status = 0;
+static uint32 last_axi_error_core = 0;
+static uint32 last_axi_error_wrap = 0;
+static uint32 last_axi_errlog_lo = 0;
+static uint32 last_axi_errlog_hi = 0;
+static uint32 last_axi_errlog_id = 0;
+
+/* slave error is ignored, so account for those cases */
+static uint32 si_ignore_errlog_cnt = 0;
+
+static void
+nci_reset_APB(const si_info_t *sii, aidmp_t *ai, int *ret,
+ uint32 errlog_status, uint32 errlog_id)
+{
+ /* only reset APB Bridge on timeout (not slave error, or dec error) */
+ switch (errlog_status & AIELS_ERROR_MASK) {
+ case AIELS_SLAVE_ERR:
+ NCI_PRINT(("AXI slave error\n"));
+ *ret |= AXI_WRAP_STS_SLAVE_ERR;
+ break;
+
+ case AIELS_TIMEOUT:
+ nci_reset_axi_to(sii, ai);
+ *ret |= AXI_WRAP_STS_TIMEOUT;
+ break;
+
+ case AIELS_DECODE:
+ NCI_PRINT(("AXI decode error\n"));
+#ifdef USE_HOSTMEM
+ /* Ignore known cases of CR4 prefetch abort bugs */
+ if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) !=
+ (BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID))
+#endif /* USE_HOSTMEM */
+ {
+ *ret |= AXI_WRAP_STS_DECODE_ERR;
+ }
+ break;
+ default:
+ ASSERT(0); /* should be impossible */
+ }
+ if (errlog_status & AIELS_MULTIPLE_ERRORS) {
+ NCI_PRINT(("Multiple AXI Errors\n"));
+ /* Set multiple errors bit only if actual error is not ignored */
+ if (*ret) {
+ *ret |= AXI_WRAP_STS_MULTIPLE_ERRORS;
+ }
+ }
+ return;
+}
+/*
+ * API to clear the back plane timeout per core.
+ * Caller may passs optional wrapper address. If present this will be used as
+ * the wrapper base address. If wrapper base address is provided then caller
+ * must provide the coreid also.
+ * If both coreid and wrapper is zero, then err status of current bridge
+ * will be verified.
+ */
+
+uint32
+nci_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
+{
+ int ret = AXI_WRAP_STS_NONE;
+ aidmp_t *ai = NULL;
+ uint32 errlog_status = 0;
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
+ uint32 current_coreidx = si_coreidx(sih);
+ uint32 target_coreidx = nci_findcoreidx(sih, coreid, coreunit);
+
+#if defined(AXI_TIMEOUTS_NIC)
+ si_axi_error_t * axi_error = sih->err_info ?
+ &sih->err_info->axi_error[sih->err_info->count] : NULL;
+#endif /* AXI_TIMEOUTS_NIC */
+ bool restore_core = FALSE;
+
+ if ((sii->axi_num_wrappers == 0) ||
+#ifdef AXI_TIMEOUTS_NIC
+ (!PCIE(sii)) ||
+#endif /* AXI_TIMEOUTS_NIC */
+ FALSE) {
+ SI_VMSG(("nci_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d,"
+ " BUS_TYPE:%d, ID:%x\n",
+ sii->axi_num_wrappers, PCIE(sii),
+ BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+ return AXI_WRAP_STS_NONE;
+ }
+
+ if (wrap != NULL) {
+ ai = (aidmp_t *)wrap;
+ } else if (coreid && (target_coreidx != current_coreidx)) {
+ if (nci_setcoreidx(sih, target_coreidx) == NULL) {
+ /* Unable to set the core */
+ NCI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
+ coreid, coreunit, target_coreidx));
+ errlog_lo = target_coreidx;
+ ret = AXI_WRAP_STS_SET_CORE_FAIL;
+ goto end;
+ }
+ restore_core = TRUE;
+ ai = (aidmp_t *)si_wrapperregs(sih);
+ } else {
+ /* Read error status of current wrapper */
+ ai = (aidmp_t *)si_wrapperregs(sih);
+
+ /* Update CoreID to current Code ID */
+ coreid = nci_coreid(sih, sii->curidx);
+ }
+
+ /* read error log status */
+ errlog_status = R_REG(sii->osh, &ai->errlogstatus);
+
+ if (errlog_status == ID32_INVALID) {
+ /* Do not try to peek further */
+ NCI_PRINT(("nci_clear_backplane_to_per_core, errlogstatus:%x - "
+ "Slave Wrapper:%x\n", errlog_status, coreid));
+ ret = AXI_WRAP_STS_WRAP_RD_ERR;
+ errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ goto end;
+ }
+
+ if ((errlog_status & AIELS_ERROR_MASK) != 0) {
+ uint32 tmp;
+ uint32 count = 0;
+ /* set ErrDone to clear the condition */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ /* SPINWAIT on errlogstatus timeout status bits */
+ while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) {
+
+ if (tmp == ID32_INVALID) {
+ NCI_PRINT(("nci_clear_backplane_to_per_core: prev errlogstatus:%x,"
+ " errlogstatus:%x\n",
+ errlog_status, tmp));
+ ret = AXI_WRAP_STS_WRAP_RD_ERR;
+
+ errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ goto end;
+ }
+ /*
+ * Clear again, to avoid getting stuck in the loop, if a new error
+ * is logged after we cleared the first timeout
+ */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ count++;
+ OSL_DELAY(10);
+ if ((10 * count) > AI_REG_READ_TIMEOUT) {
+ errlog_status = tmp;
+ break;
+ }
+ }
+
+ errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
+ errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
+ errlog_id = R_REG(sii->osh, &ai->errlogid);
+ errlog_flags = R_REG(sii->osh, &ai->errlogflags);
+
+ /* we are already in the error path, so OK to check for the slave error */
+ if (nci_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id, errlog_status)) {
+ si_ignore_errlog_cnt++;
+ goto end;
+ }
+
+ nci_reset_APB(sii, ai, &ret, errlog_status, errlog_id);
+
+ NCI_PRINT(("\tCoreID: %x\n", coreid));
+ NCI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
+ ", status 0x%08x\n",
+ errlog_lo, errlog_hi, errlog_id, errlog_flags,
+ errlog_status));
+ }
+
+end:
+ if (ret != AXI_WRAP_STS_NONE) {
+ last_axi_error = ret;
+ last_axi_error_log_status = errlog_status;
+ last_axi_error_core = coreid;
+ last_axi_error_wrap = (uint32)ai;
+ last_axi_errlog_lo = errlog_lo;
+ last_axi_errlog_hi = errlog_hi;
+ last_axi_errlog_id = errlog_id;
+ }
+
+#if defined(AXI_TIMEOUTS_NIC)
+ if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
+ axi_error->error = ret;
+ axi_error->coreid = coreid;
+ axi_error->errlog_lo = errlog_lo;
+ axi_error->errlog_hi = errlog_hi;
+ axi_error->errlog_id = errlog_id;
+ axi_error->errlog_flags = errlog_flags;
+ axi_error->errlog_status = errlog_status;
+ sih->err_info->count++;
+
+ if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+ sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+ NCI_PRINT(("AXI Error log overflow\n"));
+ }
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ if (restore_core) {
+ if (nci_setcoreidx(sih, current_coreidx) == NULL) {
+ /* Unable to set the core */
+ return ID32_INVALID;
+ }
+ }
+ return ret;
+}
+
+/* TODO: It needs to be handled based on BOOKER/NCI DMP. */
+/* reset AXI timeout */
+static void
+nci_reset_axi_to(const si_info_t *sii, aidmp_t *ai)
+{
+ /* reset APB Bridge */
+ OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ /* clear Reset bit */
+ AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ NCI_PRINT(("AXI timeout\n"));
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
+ NCI_PRINT(("reset failed on wrapper %p\n", ai));
+ g_disable_backplane_logs = TRUE;
+ }
+}
+
+void
+nci_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core, uint32 *lo,
+ uint32 *hi, uint32 *id)
+{
+ *error_status = last_axi_error_log_status;
+ *core = last_axi_error_core;
+ *lo = last_axi_errlog_lo;
+ *hi = last_axi_errlog_hi;
+ *id = last_axi_errlog_id;
+}
+
+uint32
+nci_get_axi_timeout_reg(void)
+{
+ return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0);
+}
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+/* TODO: This function should be able to handle NIC as well as BOOKER */
+bool
+nci_ignore_errlog(const si_info_t *sii, const aidmp_t *ai,
+ uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
+{
+ uint32 ignore_errsts = AIELS_SLAVE_ERR;
+ uint32 ignore_errsts_2 = 0;
+ uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
+ uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
+ uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
+ bool address_check = TRUE;
+ uint32 axi_id = 0;
+ uint32 axi_id2 = 0;
+ bool extd_axi_id_mask = FALSE;
+ uint32 axi_id_mask;
+
+ NCI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n",
+ ai, errsts, err_axi_id, hi_addr, lo_addr));
+
+ /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
+ switch (CHIPID(sii->pub.chip)) {
+ case BCM4397_CHIP_GRPID: /* TODO: Are these IDs same for 4397 as well? */
+#ifdef BTOVERPCIE
+ axi_id = BCM4378_BT_AXI_ID;
+ /* For BT over PCIE, ignore any slave error from BT. */
+ /* No need to check any address range */
+ address_check = FALSE;
+#endif /* BTOVERPCIE */
+ axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID;
+ extd_axi_id_mask = TRUE;
+ ignore_errsts_2 = AIELS_DECODE;
+ break;
+ default:
+ return FALSE;
+ }
+
+ axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK;
+
+ /* AXI ID check */
+ err_axi_id &= axi_id_mask;
+ errsts &= AIELS_ERROR_MASK;
+
+ /* check the ignore error cases. 2 checks */
+ if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) ||
+ ((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) {
+ /* not the error ignore cases */
+ return FALSE;
+
+ }
+
+ /* check the specific address checks now, if specified */
+ if (address_check) {
+ /* address range check */
+ if ((hi_addr != ignore_hi) ||
+ (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) {
+ return FALSE;
+ }
+ }
+
+ NCI_PRINT(("err check: ignored\n"));
+ return TRUE;
+}
+
+/* TODO: Check the CORE to AXI ID mapping for 4397 */
+uint32
+nci_findcoreidx_by_axiid(const si_t *sih, uint32 axiid)
+{
+ uint coreid = 0;
+ uint coreunit = 0;
+ const nci_axi_to_coreidx_t *axi2coreidx = NULL;
+ switch (CHIPID(sih->chip)) {
+ case BCM4397_CHIP_GRPID:
+ axi2coreidx = axi2coreidx_4397;
+ break;
+ default:
+ NCI_PRINT(("Chipid mapping not found\n"));
+ break;
+ }
+
+ if (!axi2coreidx) {
+ return (BADIDX);
+ }
+
+ coreid = axi2coreidx[axiid].coreid;
+ coreunit = axi2coreidx[axiid].coreunit;
+
+ return nci_findcoreidx(sih, coreid, coreunit);
+}
+
+void nci_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ /* Adding just a wrapper. Will implement when required. */
+}
+
+/*
+ * this is not declared as static const, although that is the right thing to do
+ * reason being if declared as static const, compile/link process would that in
+ * read only section...
+ * currently this code/array is used to identify the registers which are dumped
+ * during trap processing
+ * and usually for the trap buffer, .rodata buffer is reused, so for now just static
+*/
+/* TODO: Should we do another mapping for BOOKER and used correct one based on type of DMP. */
+#ifdef DONGLEBUILD
+static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = {
+ OFFSETOF(aidmp_t, ioctrl),
+ OFFSETOF(aidmp_t, iostatus),
+ OFFSETOF(aidmp_t, resetctrl),
+ OFFSETOF(aidmp_t, resetstatus),
+ OFFSETOF(aidmp_t, resetreadid),
+ OFFSETOF(aidmp_t, resetwriteid),
+ OFFSETOF(aidmp_t, errlogctrl),
+ OFFSETOF(aidmp_t, errlogdone),
+ OFFSETOF(aidmp_t, errlogstatus),
+ OFFSETOF(aidmp_t, errlogaddrlo),
+ OFFSETOF(aidmp_t, errlogaddrhi),
+ OFFSETOF(aidmp_t, errlogid),
+ OFFSETOF(aidmp_t, errloguser),
+ OFFSETOF(aidmp_t, errlogflags),
+ OFFSETOF(aidmp_t, itipoobaout),
+ OFFSETOF(aidmp_t, itipoobbout),
+ OFFSETOF(aidmp_t, itipoobcout),
+ OFFSETOF(aidmp_t, itipoobdout)
+};
+
+static uint32
+BCMRAMFN(nci_get_sizeof_wrapper_offsets_to_dump)(void)
+{
+ return (sizeof(wrapper_offsets_to_dump));
+}
+
+static uint32
+BCMRAMFN(nci_get_wrapper_base_addr)(uint32 **offset)
+{
+ uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump);
+
+ *offset = &wrapper_offsets_to_dump[0];
+ return arr_size;
+}
+
+#ifdef UART_TRAP_DBG
+/* TODO: Is br_wrapba populated for 4397 NCI? */
+void
+nci_dump_APB_Bridge_registers(const si_t *sih)
+{
+ aidmp_t *ai;
+ const si_info_t *sii = SI_INFO(sih);
+
+ ai = (aidmp_t *)sii->br_wrapba[0];
+ printf("APB Bridge 0\n");
+ printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
+ R_REG(sii->osh, &ai->errlogaddrlo),
+ R_REG(sii->osh, &ai->errlogaddrhi),
+ R_REG(sii->osh, &ai->errlogid),
+ R_REG(sii->osh, &ai->errlogflags));
+ printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
+}
+#endif /* UART_TRAP_DBG */
+
+uint32
+BCMATTACHFN(nci_wrapper_dump_buf_size)(const si_t *sih)
+{
+ uint32 buf_size = 0;
+ uint32 wrapper_count = 0;
+ const si_info_t *sii = SI_INFO(sih);
+
+ wrapper_count = sii->axi_num_wrappers;
+ if (wrapper_count == 0) {
+ return 0;
+ }
+
+ /* cnt indicates how many registers, tag_id 0 will say these are address/value */
+ /* address/value pairs */
+ buf_size += 2 * (nci_get_sizeof_wrapper_offsets_to_dump() * wrapper_count);
+
+ return buf_size;
+}
+
+uint32*
+nci_wrapper_dump_binary_one(const si_info_t *sii, uint32 *p32, uint32 wrap_ba)
+{
+ uint i;
+ uint32 *addr;
+ uint32 arr_size;
+ uint32 *offset_base;
+
+ arr_size = nci_get_wrapper_base_addr(&offset_base);
+
+ for (i = 0; i < arr_size; i++) {
+ addr = (uint32 *)(wrap_ba + *(offset_base + i));
+ *p32++ = (uint32)addr;
+ *p32++ = R_REG(sii->osh, addr);
+ }
+ return p32;
+}
+
+uint32
+nci_wrapper_dump_binary(const si_t *sih, uchar *p)
+{
+ uint32 *p32 = (uint32 *)p;
+ uint32 i;
+ const si_info_t *sii = SI_INFO(sih);
+
+ for (i = 0; i < sii->axi_num_wrappers; i++) {
+ p32 = nci_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr);
+ }
+ return 0;
+}
+
+#if defined(ETD)
+uint32
+nci_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, uint32 *core, uint32 *ba, uchar *p)
+{
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ uint32 *p32;
+ uint32 wrap_ba = last_axi_error_wrap;
+ uint i;
+ uint32 *addr;
+
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (last_axi_error != AXI_WRAP_STS_NONE) {
+ if (wrap_ba) {
+ p32 = (uint32 *)p;
+ uint32 arr_size;
+ uint32 *offset_base;
+
+ arr_size = nci_get_wrapper_base_addr(&offset_base);
+ for (i = 0; i < arr_size; i++) {
+ addr = (uint32 *)(wrap_ba + *(offset_base + i));
+ *p32++ = R_REG(sii->osh, addr);
+ }
+ }
+ *error = last_axi_error;
+ *core = last_axi_error_core;
+ *ba = wrap_ba;
+ }
+#else
+ *error = 0;
+ *core = 0;
+ *ba = 0;
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+ return 0;
+}
+#endif /* ETD */
+
+bool
+nci_check_enable_backplane_log(const si_t *sih)
+{
+#if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
+ if (g_disable_backplane_logs) {
+ return FALSE;
+ }
+ else {
+ return TRUE;
+ }
+#else /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+ return FALSE;
+#endif /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
+}
+#endif /* DONGLEBUILD */
diff --git a/bcmdhd.101.10.361.x/pcie_core.c b/bcmdhd.101.10.361.x/pcie_core.c
new file mode 100755
index 0000000..1dfc3d6
--- /dev/null
+++ b/bcmdhd.101.10.361.x/pcie_core.c
@@ -0,0 +1,227 @@
+/** @file pcie_core.c
+ *
+ * Contains PCIe related functions that are shared between different driver models (e.g. firmware
+ * builds, DHD builds, BMAC builds), in order to avoid code duplication.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#if defined(DONGLEBUILD)
+#include <pcieregsoffs.h>
+#include <pcicfg.h>
+#endif
+#include "pcie_core.h"
+#include <bcmdevs.h>
+
+/* local prototypes */
+
+/* local variables */
+
+/* function definitions */
+
+#ifdef BCMDRIVER /* this workaround can only be run on the host side since it resets the chip */
+#if !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST)
+
+/* To avoid build error for dongle standalone test, define CAN_SLEEP if not defined */
+#ifndef CAN_SLEEP
+#define CAN_SLEEP() (FALSE)
+#endif
+
+#ifndef USEC_PER_MSEC
+#define USEC_PER_MSEC 1000
+#endif
+
+/**
+ * WAR for CRWLPCIEGEN2-163, needed for all the chips at this point.
+ * The PCIe core contains a 'snoop bus', that allows the logic in the PCIe core to read and write
+ * to the PCIe configuration registers. When chip backplane reset hits, e.g. on driver unload, the
+ * pcie snoop out will reset to default values and may get out of sync with pcie config registers.
+ * This is causing failures because the LTR enable bit on the snoop bus gets out of sync. Also on
+ * the snoop bus are the device power state, MSI info, L1subenable which may potentially cause
+ * problems.
+ */
+/* wd_mask/wd_val is only for chipc_corerev >= 65 */
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val)
+{
+ uint32 val, i, lsc;
+ uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
+ PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L,
+ PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA,
+ PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL,
+ PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG,
+ PCIECFGREG_REG_BAR3_CONFIG};
+ sbpcieregs_t *pcieregs = NULL;
+ uint32 origidx = si_coreidx(sih);
+
+#if defined(BCMQT) || defined(BCMFPGA_HW)
+ /*
+ * JIRA : SWWLAN-283651, 4397A0 WAR : During insmod avoid existing
+ * PCIE WAR to avoid 'pcie_watchdog_reset'
+ */
+ if (BCM4397_CHIP(sih->chip)) {
+ return;
+ }
+
+ /* To avoid hang on FPGA, donot reset watchdog */
+ if (CCREV(sih->ccrev) < 65) {
+ si_setcoreidx(sih, origidx);
+ return;
+ }
+#endif
+#ifdef BCMFPGA_HW
+ if (CCREV(sih->ccrev) < 67) {
+ /* To avoid hang on FPGA, donot reset watchdog */
+ si_setcoreidx(sih, origidx);
+ return;
+ }
+#endif
+
+ /* Switch to PCIE2 core */
+ pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+ BCM_REFERENCE(pcieregs);
+ ASSERT(pcieregs != NULL);
+
+ /* Disable/restore ASPM Control to protect the watchdog reset */
+ W_REG(osh, &pcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+ lsc = R_REG(osh, &pcieregs->configdata);
+ val = lsc & (~PCIE_ASPM_ENAB);
+ W_REG(osh, &pcieregs->configdata, val);
+
+ if (CCREV(sih->ccrev) >= 65) {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), wd_mask, wd_val);
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), WD_COUNTER_MASK, 4);
+ CAN_SLEEP() ? OSL_SLEEP(2) : OSL_DELAY(2 * USEC_PER_MSEC); /* 2 ms */
+ val = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, intstatus), 0, 0);
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, intstatus),
+ wd_mask, val & wd_mask);
+ } else {
+ si_corereg_writeonly(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+ /* Read a config space to make sure the above write gets flushed on PCIe bus */
+ val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
+ CAN_SLEEP() ? OSL_SLEEP(100) : OSL_DELAY(100 * USEC_PER_MSEC); /* 100 ms */
+ }
+
+ W_REG(osh, &pcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+ W_REG(osh, &pcieregs->configdata, lsc);
+
+ if (sih->buscorerev <= 13) {
+ /* Write configuration registers back to the shadow registers
+ * cause shadow registers are cleared out after watchdog reset.
+ */
+ for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
+ W_REG(osh, &pcieregs->configaddr, cfg_offset[i]);
+ val = R_REG(osh, &pcieregs->configdata);
+ W_REG(osh, &pcieregs->configdata, val);
+ }
+ }
+ si_setcoreidx(sih, origidx);
+}
+
+/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled
+ * by the L12 state from MAC to save power by putting the
+ * SerDes analog in IDDQ mode
+ */
+void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+ sbpcieregs_t *pcie = NULL;
+ uint crwlpciegen2_117_disable = 0;
+ uint32 origidx = si_coreidx(sih);
+
+ crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1;
+ /* Switch to PCIE2 core */
+ pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+ BCM_REFERENCE(pcie);
+ ASSERT(pcie != NULL);
+
+ OR_REG(osh, &sbpcieregs->control,
+ crwlpciegen2_117_disable);
+
+ si_setcoreidx(sih, origidx);
+}
+
+#define PCIE_PMCR_REFUP_MASK 0x3f0001e0
+#define PCIE_PMCR_REFEXT_MASK 0x400000
+#define PCIE_PMCR_REFUP_100US 0x38000080
+#define PCIE_PMCR_REFEXT_100US 0x400000
+
+/* Set PCIE TRefUp time to 100us */
+void pcie_set_trefup_time_100us(si_t *sih)
+{
+ si_corereg(sih, sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP);
+ si_corereg(sih, sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFUP_MASK, PCIE_PMCR_REFUP_100US);
+
+ si_corereg(sih, sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP_EXT);
+ si_corereg(sih, sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFEXT_MASK, PCIE_PMCR_REFEXT_100US);
+}
+
+uint32
+pcie_cto_to_thresh_default(uint corerev)
+{
+ return REV_GE_69(corerev) ?
+ PCIE_CTO_TO_THRESH_DEFAULT_REV69 : PCIE_CTO_TO_THRESH_DEFAULT;
+}
+
+uint32
+pcie_corereg(osl_t *osh, volatile void *regs, uint32 offset, uint32 mask, uint32 val)
+{
+ volatile uint32 *regsva =
+ (volatile uint32 *)((volatile char *)regs + PCI_16KB0_PCIREGS_OFFSET + offset);
+
+ if (mask || val) {
+ uint32 w = R_REG(osh, regsva);
+ w &= ~mask;
+ w |= val;
+ W_REG(osh, regsva, w);
+ }
+ return (R_REG(osh, regsva));
+}
+#endif /* !defined(DONGLEBUILD) || defined(BCMSTANDALONE_TEST) */
+
+#if defined(DONGLEBUILD)
+void pcie_coherent_accenable(osl_t *osh, si_t *sih)
+{
+ pcieregs_t *pcie = NULL;
+ uint32 val;
+ uint32 origidx = si_coreidx(sih);
+
+ if ((pcie = si_setcore(sih, PCIE2_CORE_ID, 0)) != NULL) {
+ /* PCIe BAR1 coherent access enabled */
+ W_REG(osh, PCIE_configindaddr_ALTBASE(pcie, 0), PCIECFGREG_SPROM_CTRL);
+ val = R_REG(osh, PCIE_configinddata_ALTBASE(pcie, 0));
+ val |= (SPROM_BAR1_COHERENT_ACC_EN | SPROM_BAR2_COHERENT_ACC_EN);
+ W_REG(osh, PCIE_configinddata_ALTBASE(pcie, 0), val);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+#endif /* DONGLEBUILD */
+#endif /* BCMDRIVER */
diff --git a/bcmdhd.101.10.361.x/pom.h b/bcmdhd.101.10.361.x/pom.h
new file mode 100755
index 0000000..32d056b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/pom.h
@@ -0,0 +1,70 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: $
+ */
+
+/*
+ * NOTE:-
+ * This file is a exact copy of trunk/components/opensource/pom/pom.h.
+ * Any changes needed to this file should reflect in trunk too.
+ * Otherwise POM(power on off manager) and dhd will go out of sync
+ */
+
+/*
+ * Donot change the order of func ids below.
+ * Order of invoking the power off and power on handlers is importent during
+ * power toggle through API: pom_toggle_reg_on.
+ * WLAN FW should be loaded first after REG-ON. Otherwise SR doesnot work.
+ *
+ */
+enum pom_func_id {
+ WLAN_FUNC_ID = 0,
+ BT_FUNC_ID = 1,
+ MAX_COEX_FUNC = 2
+};
+
+enum pom_toggle_reason {
+ BY_WLAN_DUE_TO_WLAN = 0,
+ BY_WLAN_DUE_TO_BT = 1,
+ BY_BT_DUE_TO_BT = 2,
+ BY_BT_DUE_TO_WLAN = 3,
+ BY_USER_PROCESS = 4,
+ BY_UNKNOWN_REASON = 5
+};
+
+/* Common structure to be used to register and de-register from BT/WLAN */
+typedef struct pom_func_handler {
+ unsigned char func_id;
+ void *handler;
+ int (*power_off)(void *handler, unsigned char reason);
+ int (*power_on)(void *handler, unsigned char reason);
+} pom_func_handler_t;
+
+/* Register call back during attach of each function */
+extern int pom_func_register(struct pom_func_handler *func);
+
+/* De-Register call back during detach of each function */
+extern int pom_func_deregister(struct pom_func_handler *func);
+
+/* Toggle Reg ON, called to recover from bad state */
+extern int pom_toggle_reg_on(unsigned char func_id, unsigned char reason);
diff --git a/bcmdhd.101.10.361.x/sbutils.c b/bcmdhd.101.10.361.x/sbutils.c
new file mode 100755
index 0000000..1722d88
--- /dev/null
+++ b/bcmdhd.101.10.361.x/sbutils.c
@@ -0,0 +1,1111 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#if !defined(BCMDONGLEHOST)
+#include <pci_core.h>
+#endif /* !defined(BCMDONGLEHOST) */
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static uint _sb_coreidx(const si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
+ uint ncores, uint devid);
+static uint32 _sb_coresba(const si_info_t *sii);
+static volatile void *_sb_setcoreidx(const si_info_t *sii, uint coreidx);
+#define SET_SBREG(sii, r, mask, val) \
+ W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+/*
+ * Macros to read/write sbconfig registers.
+ */
+#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
+#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
+#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(const si_info_t *sii, volatile uint32 *sbr)
+{
+ return R_REG(sii->osh, sbr);
+}
+
+static void
+sb_write_sbreg(const si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+ W_REG(sii->osh, sbr, v);
+}
+
+uint
+sb_coreid(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ volatile void *corereg;
+ sbconfig_t *sb;
+ uint origidx, intflag;
+ bcm_int_bitmask_t intr_val;
+
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+ corereg = si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(corereg != NULL);
+ sb = REGS2SB(corereg);
+ intflag = R_SBREG(sii, &sb->sbflagst);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+
+ return intflag;
+}
+
+uint
+sb_flag(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+
+ return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(const si_t *sih, int siflag)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 vec;
+
+ if (siflag == -1)
+ vec = 0;
+ else
+ vec = 1 << siflag;
+ W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+BCMATTACHFN(_sb_coreidx)(const si_info_t *sii, uint32 sba)
+{
+ uint i;
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+
+ for (i = 0; i < sii->numcores; i ++)
+ if (sba == cores_info->coresba[i])
+ return i;
+ return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+BCMATTACHFN(_sb_coresba)(const si_info_t *sii)
+{
+ uint32 sbaddr;
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS: {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+ break;
+ }
+
+ case PCI_BUS:
+ sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+ sbaddr = (uint32)(uintptr)sii->curmap;
+ break;
+#endif
+
+ default:
+ sbaddr = BADCOREADDR;
+ break;
+ }
+
+ return sbaddr;
+}
+
+uint
+sb_corevendor(const si_t *sih)
+{
+ const si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(const si_t *sih)
+{
+ const si_info_t *sii;
+ sbconfig_t *sb;
+ uint sbidh;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+ sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+ return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 w;
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 w;
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+ }
+
+ /* return the new value
+ * for write operation, the following readback ensures the completion of write opration.
+ */
+ return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 w;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+ (val << SBTMH_SISF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatehigh, w);
+ }
+
+ /* return the new value */
+ return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbtmstatelow) &
+ (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ volatile uint32 *r = NULL;
+ uint w;
+ bcm_int_bitmask_t intr_val;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ if (regoff >= SBCONFIGOFF) {
+ w = (R_SBREG(sii, r) & ~mask) | val;
+ W_SBREG(sii, r, w);
+ } else {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+ }
+
+ /* readback */
+ if (regoff >= SBCONFIGOFF)
+ w = R_SBREG(sii, r);
+ else {
+ w = R_REG(sii->osh, r);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ sb_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+ }
+
+ return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+sb_corereg_addr(const si_t *sih, uint coreidx, uint regoff)
+{
+ volatile uint32 *r = NULL;
+ bool fast = FALSE;
+ const si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx, sii->numcores));
+ ASSERT(regoff < SI_CORE_SIZE);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast)
+ return 0;
+
+ return (r);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES 2
+static uint
+BCMATTACHFN(_sb_scan)(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
+ uint32 sbba, uint numcores, uint devid)
+{
+ uint next;
+ uint ncc = 0;
+ uint i;
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ /* bail out in case it is too deep to scan at the specified bus level */
+ if (bus >= SB_MAXBUSES) {
+ SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+ return 0;
+ }
+ SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+ /* Scan all cores on the bus starting from core 0.
+ * Core addresses must be contiguous on each bus.
+ */
+ for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+ cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+ /* keep and reuse the initial register mapping */
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
+ SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+ cores_info->regs[next] = regs;
+ }
+
+ /* change core to 'next' and read its coreid */
+ sii->curmap = _sb_setcoreidx(sii, next);
+ sii->curidx = next;
+
+ cores_info->coreid[next] = sb_coreid(&sii->pub);
+
+ /* core specific processing... */
+ /* chipc provides # cores */
+ if (cores_info->coreid[next] == CC_CORE_ID) {
+ chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+
+ /* determine numcores - this is the total # cores in the chip */
+ ASSERT(cc);
+ numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
+ SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+ sii->pub.issim ? "QT" : ""));
+ }
+ /* scan bridged SB(s) and add results to the end of the list */
+ else if (cores_info->coreid[next] == OCP_CORE_ID) {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+ uint nsbcc;
+
+ sii->numcores = next + 1;
+
+ if ((nsbba & 0xfff00000) != si_enum_base(devid))
+ continue;
+ nsbba &= 0xfffff000;
+ if (_sb_coreidx(sii, nsbba) != BADIDX)
+ continue;
+
+ nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+ nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
+ if (sbba == si_enum_base(devid))
+ numcores -= nsbcc;
+ ncc += nsbcc;
+ }
+ }
+
+ SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+ sii->numcores = i + ncc;
+ return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+BCMATTACHFN(sb_scan)(si_t *sih, volatile void *regs, uint devid)
+{
+ uint32 origsba;
+ sbconfig_t *sb;
+ si_info_t *sii = SI_INFO(sih);
+ BCM_REFERENCE(devid);
+
+ sb = REGS2SB(sii->curmap);
+
+ sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+ /* Save the current core info and validate it later till we know
+ * for sure what is good and what is bad.
+ */
+ origsba = _sb_coresba(sii);
+
+ /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
+ sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+volatile void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ sii->curmap = _sb_setcoreidx(sii, coreidx);
+ sii->curidx = coreidx;
+
+ return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static volatile void *
+_sb_setcoreidx(const si_info_t *sii, uint coreidx)
+{
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint32 sbaddr = cores_info->coresba[coreidx];
+ volatile void *regs;
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ regs = cores_info->regs[coreidx];
+ break;
+
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+ regs = sii->curmap;
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+ /* map new one */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ regs = cores_info->regs[coreidx];
+ break;
+#endif /* BCMSDIO */
+
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(const si_info_t *sii, uint asidx)
+{
+ sbconfig_t *sb;
+ volatile uint32 *addrm;
+
+ sb = REGS2SB(sii->curmap);
+
+ switch (asidx) {
+ case 0:
+ addrm = &sb->sbadmatch0;
+ break;
+
+ case 1:
+ addrm = &sb->sbadmatch1;
+ break;
+
+ case 2:
+ addrm = &sb->sbadmatch2;
+ break;
+
+ case 3:
+ addrm = &sb->sbadmatch3;
+ break;
+
+ default:
+ SI_ERROR(("sb_admatch: Address space index (%d) out of range\n", asidx));
+ return 0;
+ }
+
+ return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+
+ /* + 1 because of enumeration space */
+ return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(const si_t *sih, uint asidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(const si_t *sih, uint asidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+#if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT) || \
+ defined(BCMDBG_DUMP)
+/* traverse all cores to find and clear source of serror */
+static void
+sb_serr_clear(si_info_t *sii)
+{
+ sbconfig_t *sb;
+ uint origidx;
+ uint i;
+ bcm_int_bitmask_t intr_val;
+ volatile void *corereg = NULL;
+
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(&sii->pub);
+
+ for (i = 0; i < sii->numcores; i++) {
+ corereg = sb_setcoreidx(&sii->pub, i);
+ if (NULL != corereg) {
+ sb = REGS2SB(corereg);
+ if ((R_SBREG(sii, &sb->sbtmstatehigh)) & SBTMH_SERR) {
+ AND_SBREG(sii, &sb->sbtmstatehigh, ~SBTMH_SERR);
+ SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
+ sb_coreid(&sii->pub)));
+ }
+ }
+ }
+
+ sb_setcoreidx(&sii->pub, origidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+
+/*
+ * Check if any inband, outband or timeout errors has happened and clear them.
+ * Must be called with chip clk on !
+ */
+bool
+sb_taclear(si_t *sih, bool details)
+{
+ si_info_t *sii = SI_INFO(sih);
+ bool rc = FALSE;
+ uint32 inband = 0, serror = 0, timeout = 0;
+ volatile uint32 imstate;
+
+ BCM_REFERENCE(sii);
+
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ volatile uint32 stcmd;
+
+ /* inband error is Target abort for PCI */
+ stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32));
+ inband = stcmd & PCI_STAT_TA;
+ if (inband) {
+#ifdef BCMDBG
+ if (details) {
+ SI_ERROR(("\ninband:\n"));
+ si_viewall(sih, FALSE);
+ }
+#endif
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
+ }
+
+ /* serror */
+ stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32));
+ serror = stcmd & PCI_SBIM_STATUS_SERR;
+ if (serror) {
+#ifdef BCMDBG
+ if (details) {
+ SI_ERROR(("\nserror:\n"));
+ si_viewall(sih, FALSE);
+ }
+#endif
+ sb_serr_clear(sii);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
+ }
+
+ /* timeout */
+ imstate = sb_corereg(sih, sii->pub.buscoreidx,
+ SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
+ if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
+ sb_corereg(sih, sii->pub.buscoreidx,
+ SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
+ (imstate & ~(SBIM_IBE | SBIM_TO)));
+ /* inband = imstate & SBIM_IBE; same as TA above */
+ timeout = imstate & SBIM_TO;
+ if (timeout) {
+#ifdef BCMDBG
+ if (details) {
+ SI_ERROR(("\ntimeout:\n"));
+ si_viewall(sih, FALSE);
+ }
+#endif
+ }
+ }
+
+ if (inband) {
+ /* dump errlog for sonics >= 2.3 */
+ if (sii->pub.socirev == SONICS_2_2)
+ ;
+ else {
+ uint32 imerrlog, imerrloga;
+ imerrlog = sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, 0, 0);
+ if (imerrlog & SBTMEL_EC) {
+ imerrloga = sb_corereg(sih, sii->pub.buscoreidx,
+ SBIMERRLOGA, 0, 0);
+ BCM_REFERENCE(imerrloga);
+ /* clear errlog */
+ sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, ~0, 0);
+ SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
+ imerrlog, imerrloga));
+ }
+ }
+ }
+ }
+#ifdef BCMSDIO
+ else if ((BUSTYPE(sii->pub.bustype) == SDIO_BUS) ||
+ (BUSTYPE(sii->pub.bustype) == SPI_BUS)) {
+ sbconfig_t *sb;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ volatile void *corereg = NULL;
+ volatile uint32 tmstate;
+
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ corereg = si_setcore(sih, SDIOD_CORE_ID, 0);
+ if (corereg != NULL) {
+ sb = REGS2SB(corereg);
+
+ imstate = R_SBREG(sii, &sb->sbimstate);
+ if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
+ AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+ /* inband = imstate & SBIM_IBE; cmd error */
+ timeout = imstate & SBIM_TO;
+ }
+ tmstate = R_SBREG(sii, &sb->sbtmstatehigh);
+ if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
+ sb_serr_clear(sii);
+ serror = 1;
+ OR_SBREG(sii, &sb->sbtmstatelow, SBTML_INT_ACK);
+ AND_SBREG(sii, &sb->sbtmstatelow, ~SBTML_INT_ACK);
+ }
+ }
+
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+ }
+#endif /* BCMSDIO */
+
+ if (inband | timeout | serror) {
+ rc = TRUE;
+ SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
+ inband, serror, timeout));
+ }
+
+ return (rc);
+}
+#endif /* BCMDBG_ERR || BCMASSERT_SUPPORT || BCMDBG_DUMP */
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ origidx = sii->curidx;
+ ASSERT(GOODIDX(origidx, sii->numcores));
+
+ INTR_OFF(sii, &intr_val);
+
+ /* switch over to chipcommon core if there is one, else use pci */
+ if (sii->pub.ccrev != NOREV) {
+ chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(ccregs != NULL);
+
+ /* do the buffer registers update */
+ W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+ W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+#if !defined(BCMDONGLEHOST)
+ } else if (PCI(sii)) {
+ sbpciregs_t *pciregs = (sbpciregs_t *)si_setcore(sih, PCI_CORE_ID, 0);
+ ASSERT(pciregs != NULL);
+
+ /* do the buffer registers update */
+ W_REG(sii->osh, &pciregs->bcastaddr, SB_COMMIT);
+ W_REG(sii->osh, &pciregs->bcastdata, 0x0);
+#endif /* !defined(BCMDONGLEHOST) */
+ } else
+ ASSERT(0);
+
+ /* restore core index */
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+
+void
+sb_core_disable(const si_t *sih, uint32 bits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ volatile uint32 dummy;
+ sbconfig_t *sb;
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /* if core is already in reset, just return */
+ if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+ return;
+
+ /* if clocks are not enabled, put into reset and return */
+ if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+ goto disable;
+
+ /* set target reject and spin until busy is clear (preserve core-specific bits) */
+ OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+ SI_ERROR(("sb_core_disable: target state still busy\n"));
+
+ /*
+ * If core is initiator, set the Reject bit and allow Busy to clear.
+ * sonicsrev < 2.3 chips don't have the Reject and Busy bits (nops).
+ * Don't assert - dma engine might be stuck (PR4871).
+ */
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+ OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+ dummy = R_SBREG(sii, &sb->sbimstate);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+
+ /* don't forget to clear the initiator reject bit */
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+ AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+ /* leave reset and reject asserted */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(const si_t *sih, uint32 bits, uint32 resetbits)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb;
+ volatile uint32 dummy;
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ sb_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+
+ /* set reset while enabling the clock and forcing them on throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ /* PR3158 - clear any serror */
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+ W_SBREG(sii, &sb->sbtmstatehigh, 0);
+ }
+ if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+ AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ /* leave clock enabled */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+ uint32 base;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ base = 0;
+
+ if (type == 0) {
+ base = admatch & SBAM_BASE0_MASK;
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE1_MASK;
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE2_MASK;
+ }
+
+ return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+ uint32 size;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ size = 0;
+
+ if (type == 0) {
+ size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+ }
+
+ return (size);
+}
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP)|| defined(BCMDBG_PHYDUMP)
+/* print interesting sbconfig registers */
+void
+sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+ sbconfig_t *sb;
+ uint origidx, i;
+ bcm_int_bitmask_t intr_val;
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+
+ origidx = sii->curidx;
+
+ INTR_OFF(sii, &intr_val);
+
+ for (i = 0; i < sii->numcores; i++) {
+ sb = REGS2SB(sb_setcoreidx(sih, i));
+
+ bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+
+ if (sii->pub.socirev > SONICS_2_2)
+ bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
+ sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
+ sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
+
+ bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
+ "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
+ R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
+ R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
+ R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
+ }
+
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+
+#if defined(BCMDBG)
+void
+sb_view(si_t *sih, bool verbose)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+
+ SI_ERROR(("\nCore ID: 0x%x\n", sb_coreid(&sii->pub)));
+
+ if (sii->pub.socirev > SONICS_2_2)
+ SI_ERROR(("sbimerrlog 0x%x sbimerrloga 0x%x\n",
+ sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
+ sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0)));
+
+ /* Print important or helpful registers */
+ SI_ERROR(("sbtmerrloga 0x%x sbtmerrlog 0x%x\n",
+ R_SBREG(sii, &sb->sbtmerrloga), R_SBREG(sii, &sb->sbtmerrlog)));
+ SI_ERROR(("sbimstate 0x%x sbtmstatelow 0x%x sbtmstatehigh 0x%x\n",
+ R_SBREG(sii, &sb->sbimstate),
+ R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh)));
+ SI_ERROR(("sbimconfiglow 0x%x sbtmconfiglow 0x%x\nsbtmconfighigh 0x%x sbidhigh 0x%x\n",
+ R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbtmconfiglow),
+ R_SBREG(sii, &sb->sbtmconfighigh), R_SBREG(sii, &sb->sbidhigh)));
+
+ /* Print more detailed registers that are otherwise not relevant */
+ if (verbose) {
+ SI_ERROR(("sbipsflag 0x%x sbtpsflag 0x%x\n",
+ R_SBREG(sii, &sb->sbipsflag), R_SBREG(sii, &sb->sbtpsflag)));
+ SI_ERROR(("sbadmatch3 0x%x sbadmatch2 0x%x\nsbadmatch1 0x%x sbadmatch0 0x%x\n",
+ R_SBREG(sii, &sb->sbadmatch3), R_SBREG(sii, &sb->sbadmatch2),
+ R_SBREG(sii, &sb->sbadmatch1), R_SBREG(sii, &sb->sbadmatch0)));
+ SI_ERROR(("sbintvec 0x%x sbbwa0 0x%x sbimconfighigh 0x%x\n",
+ R_SBREG(sii, &sb->sbintvec), R_SBREG(sii, &sb->sbbwa0),
+ R_SBREG(sii, &sb->sbimconfighigh)));
+ SI_ERROR(("sbbconfig 0x%x sbbstate 0x%x\n",
+ R_SBREG(sii, &sb->sbbconfig), R_SBREG(sii, &sb->sbbstate)));
+ SI_ERROR(("sbactcnfg 0x%x sbflagst 0x%x sbidlow 0x%x \n\n",
+ R_SBREG(sii, &sb->sbactcnfg), R_SBREG(sii, &sb->sbflagst),
+ R_SBREG(sii, &sb->sbidlow)));
+ }
+}
+#endif /* BCMDBG */
diff --git a/bcmdhd.101.10.361.x/siutils.c b/bcmdhd.101.10.361.x/siutils.c
new file mode 100755
index 0000000..dab5843
--- /dev/null
+++ b/bcmdhd.101.10.361.x/siutils.c
@@ -0,0 +1,10249 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <sbgci.h>
+#ifndef BCMSDIO
+#include <pcie_core.h>
+#endif
+#if !defined(BCMDONGLEHOST)
+#include <pci_core.h>
+#include <nicpci.h>
+#include <bcmnvram.h>
+#include <bcmsrom.h>
+#include <hndtcam.h>
+#endif /* !defined(BCMDONGLEHOST) */
+#ifdef BCMPCIEDEV
+#include <pcieregsoffs.h>
+#include <pciedev.h>
+#endif /* BCMPCIEDEV */
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsysmem.h>
+#include <sbsocram.h>
+#if defined(BCMECICOEX) || !defined(BCMDONGLEHOST)
+#include <bcmotp.h>
+#endif /* BCMECICOEX || !BCMDONGLEHOST */
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#endif /* BCMSDIO */
+#include <hndpmu.h>
+#ifdef BCMSPI
+#include <spid.h>
+#endif /* BCMSPI */
+#if !defined(BCMDONGLEHOST) && !defined(BCM_BOOTLOADER) && defined(SR_ESSENTIALS)
+#include <saverestore.h>
+#endif
+#include <dhd_config.h>
+
+#ifdef BCM_SDRBL
+#include <hndcpu.h>
+#endif /* BCM_SDRBL */
+#ifdef HNDGCI
+#include <hndgci.h>
+#endif /* HNDGCI */
+#ifdef DONGLEBUILD
+#include <hnd_gci.h>
+#endif /* DONGLEBUILD */
+#include <hndlhl.h>
+#include <hndoobr.h>
+#include <lpflags.h>
+#ifdef BCM_SFLASH
+#include <sflash.h>
+#endif
+#ifdef BCM_SH_SFLASH
+#include <sh_sflash.h>
+#endif
+#ifdef BCMGCISHM
+#include <hnd_gcishm.h>
+#endif
+#include "siutils_priv.h"
+#include "sbhndarm.h"
+#include <hndchipc.h>
+#ifdef SOCI_NCI_BUS
+#include <nci.h>
+#endif /* SOCI_NCI_BUS */
+
+#ifdef SECI_UART
+/* Defines the set of GPIOs to be used for SECI UART if not specified in NVRAM */
+/* For further details on each ppin functionality please refer to PINMUX table in
+ * Top level architecture of BCMXXXX Chip
+ */
+#define DEFAULT_SECI_UART_PINMUX 0x08090a0b
+static bool force_seci_clk = 0;
+#endif /* SECI_UART */
+
+#define XTAL_FREQ_26000KHZ 26000
+#define XTAL_FREQ_59970KHZ 59970
+#define WCI2_UART_RX_BUF_SIZE 64
+
+/**
+ * A set of PMU registers is clocked in the ILP domain, which has an implication on register write
+ * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
+ * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set.
+ */
+#define PMUREGS_ILP_SENSITIVE(regoff) \
+ ((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \
+ (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \
+ (regoff) == OFFSETOF(pmuregs_t, res_req_timer))
+
+#define CHIPCREGS_ILP_SENSITIVE(regoff) \
+ ((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \
+ (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \
+ (regoff) == OFFSETOF(chipcregs_t, res_req_timer))
+
+#define GCI_FEM_CTRL_WAR 0x11111111
+
+#ifndef AXI_TO_VAL
+#define AXI_TO_VAL 19
+#endif /* AXI_TO_VAL */
+
+#ifndef AXI_TO_VAL_25
+/*
+ * Increase BP timeout for fast clock and short PCIe timeouts
+ * New timeout: 2 ** 25 cycles
+ */
+#define AXI_TO_VAL_25 25
+#endif /* AXI_TO_VAL_25 */
+
+#define si_srpwr_domain_mask(rval, mask) \
+ (((rval) >> SRPWR_STATUS_SHIFT) & (mask))
+
+/* local prototypes */
+#if !defined(BCMDONGLEHOST)
+static void si_43012_lp_enable(si_t *sih);
+#endif /* !defined(BCMDONGLEHOST) */
+static int32 BCMATTACHFN(si_alloc_wrapper)(si_info_t *sii);
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, volatile const void *regs);
+
+#if !defined(BCMDONGLEHOST)
+static void si_nvram_process(si_info_t *sii, char *pvars);
+
+/* dev path concatenation util */
+static char *si_devpathvar(const si_t *sih, char *var, int len, const char *name);
+static char *si_pcie_devpathvar(const si_t *sih, char *var, int len, const char *name);
+static bool _si_clkctl_cc(si_info_t *sii, uint mode);
+static bool si_ispcie(const si_info_t *sii);
+static uint sysmem_banksize(const si_info_t *sii, sysmemregs_t *r, uint8 idx);
+static uint socram_banksize(const si_info_t *sii, sbsocramregs_t *r, uint8 idx, uint8 mtype);
+static void si_gci_get_chipctrlreg_ringidx_base4(uint32 pin, uint32 *regidx, uint32 *pos);
+static uint8 si_gci_get_chipctrlreg_ringidx_base8(uint32 pin, uint32 *regidx, uint32 *pos);
+static void si_gci_gpio_chipcontrol(si_t *si, uint8 gpoi, uint8 opt);
+static void si_gci_enable_gpioint(si_t *sih, bool enable);
+#if defined(BCMECICOEX) || defined(SECI_UART)
+static chipcregs_t * seci_set_core(si_t *sih, uint32 *origidx, bool *fast);
+#endif
+#endif /* !defined(BCMDONGLEHOST) */
+
+static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
+
+static void si_oob_war_BT_F1(si_t *sih);
+
+#if defined(DONGLEBUILD)
+#if !defined(NVSRCX)
+static char * BCMATTACHFN(si_getkvars)(void);
+static int BCMATTACHFN(si_getkvarsz)(void);
+#endif
+#endif /* DONGLEBUILD */
+
+#if defined(BCMLTECOEX) && !defined(WLTEST)
+static void si_wci2_rxfifo_intr_handler_process(si_t *sih, uint32 intstatus);
+#endif /* BCMLTECOEX && !WLTEST */
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+#if !defined(BCMDONGLEHOST)
+/* global variable to indicate GCI reset is done */
+static bool gci_reset_done = FALSE;
+#endif
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+static bool si_onetimeinit = FALSE;
+
+#ifdef SR_DEBUG
+static const uint32 si_power_island_test_array[] = {
+ 0x0000, 0x0001, 0x0010, 0x0011,
+ 0x0100, 0x0101, 0x0110, 0x0111,
+ 0x1000, 0x1001, 0x1010, 0x1011,
+ 0x1100, 0x1101, 0x1110, 0x1111
+};
+#endif /* SR_DEBUG */
+
+/* 4360 pcie2 WAR */
+int do_4360_pcie2_war = 0;
+
+/* global kernel resource */
+static si_info_t ksii;
+static si_cores_info_t ksii_cores_info;
+
+#ifndef BCMDONGLEHOST
+static const char BCMATTACHDATA(rstr_rmin)[] = "rmin";
+static const char BCMATTACHDATA(rstr_rmax)[] = "rmax";
+
+static const char BCMATTACHDATA(rstr_lhl_ps_mode)[] = "lhl_ps_mode";
+static const char BCMATTACHDATA(rstr_ext_wakeup_dis)[] = "ext_wakeup_dis";
+#if defined(BCMSRTOPOFF) && !defined(BCMSRTOPOFF_DISABLED)
+static const char BCMATTACHDATA(rstr_srtopoff_enab)[] = "srtopoff_enab";
+#endif
+#endif /* BCMDONGLEHOST */
+
+static uint32 wd_msticks; /**< watchdog timer ticks normalized to ms */
+
+#ifdef DONGLEBUILD
+/**
+ * As si_kattach goes thru full srom initialisation same can be used
+ * for all subsequent calls
+ */
+#if !defined(NVSRCX)
+static char *
+BCMATTACHFN(si_getkvars)(void)
+{
+ if (FWSIGN_ENAB()) {
+ return NULL;
+ }
+ return (ksii.vars);
+}
+
+static int
+BCMATTACHFN(si_getkvarsz)(void)
+{
+ if (FWSIGN_ENAB()) {
+ return NULL;
+ }
+ return (ksii.varsz);
+}
+#endif /* !defined(NVSRCX) */
+#endif /* DONGLEBUILD */
+
+/** Returns the backplane address of the chipcommon core for a particular chip */
+uint32
+BCMATTACHFN(si_enum_base)(uint devid)
+{
+ return SI_ENUM_BASE_DEFAULT;
+}
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/sb/sdio/etc
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ * function set 'vars' to NULL, making dereferencing of this parameter undesired.
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+BCMATTACHFN(si_attach)(uint devid, osl_t *osh, volatile void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ si_info_t *sii;
+
+ /* alloc si_info_t */
+ /* freed after ucode download for firmware builds */
+ if ((sii = MALLOCZ_NOPERSIST(osh, sizeof(si_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+
+#ifdef BCMDVFS
+ if (BCMDVFS_ENAB() && si_dvfs_info_init((si_t *)sii, osh) == NULL) {
+ SI_ERROR(("si_dvfs_info_init failed\n"));
+ return (NULL);
+ }
+#endif /* BCMDVFS */
+
+ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->vars = vars ? *vars : NULL;
+ sii->varsz = varsz ? *varsz : 0;
+
+#if defined(BCM_SH_SFLASH) && !defined(BCM_SH_SFLASH_DISABLED)
+ sh_sflash_attach(osh, (si_t *)sii);
+#endif
+ return (si_t *)sii;
+}
+
+/** generic kernel variant of si_attach(). Is not called for Linux WLAN NIC builds. */
+si_t *
+BCMATTACHFN(si_kattach)(osl_t *osh)
+{
+ static bool ksii_attached = FALSE;
+ si_cores_info_t *cores_info;
+
+ if (!ksii_attached) {
+ void *regs = NULL;
+ const uint device_id = BCM4710_DEVICE_ID; // pick an arbitrary default device_id
+
+ regs = REG_MAP(si_enum_base(device_id), SI_CORE_SIZE); // map physical to virtual
+ cores_info = (si_cores_info_t *)&ksii_cores_info;
+ ksii.cores_info = cores_info;
+
+ /* Use osh as the deciding factor if the memory management
+ * system has been initialized. Pass non-NULL vars & varsz only
+ * if memory management has been initialized. Otherwise MALLOC()
+ * will fail/crash.
+ */
+#if defined(BCMDONGLEHOST)
+ ASSERT(osh);
+#endif
+ if (si_doattach(&ksii, device_id, osh, regs,
+ SI_BUS, NULL,
+ osh != SI_OSH ? &(ksii.vars) : NULL,
+ osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
+ SI_ERROR(("si_kattach: si_doattach failed\n"));
+ REG_UNMAP(regs);
+ return NULL;
+ }
+ REG_UNMAP(regs);
+
+ /* save ticks normalized to ms for si_watchdog_ms() */
+ if (PMUCTL_ENAB(&ksii.pub)) {
+ /* based on 32KHz ILP clock */
+ wd_msticks = 32;
+ } else {
+#if !defined(BCMDONGLEHOST)
+ if (CCREV(ksii.pub.ccrev) < 18)
+ wd_msticks = si_clock(&ksii.pub) / 1000;
+ else
+ wd_msticks = si_alp_clock(&ksii.pub) / 1000;
+#else
+ wd_msticks = ALP_CLOCK / 1000;
+#endif /* !defined(BCMDONGLEHOST) */
+ }
+
+ ksii_attached = TRUE;
+ SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+ CCREV(ksii.pub.ccrev), wd_msticks));
+ }
+
+ return &ksii.pub;
+}
+
+static bool
+BCMATTACHFN(si_buscore_prep)(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+ BCM_REFERENCE(sdh);
+ BCM_REFERENCE(devid);
+
+#if !defined(BCMDONGLEHOST)
+ /* kludge to enable the clock on the 4306 which lacks a slowclock */
+ if (BUSTYPE(bustype) == PCI_BUS && !si_ispcie(sii))
+ si_clkctl_xtal(&sii->pub, XTAL|PLL, ON);
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(BCMSDIO) && defined(BCMDONGLEHOST) && !defined(BCMSDIOLITE)
+ /* PR 39902, 43618, 44891, 41539 -- avoid backplane accesses that may
+ * cause SDIO clock requests before a stable ALP clock. Originally had
+ * this later (just before srom_var_init() below) to guarantee ALP for
+ * CIS read, but due to these PRs moving it here before backplane use.
+ */
+ /* As it precedes any backplane access, can't check chipid; but may
+ * be able to qualify with devid if underlying SDIO allows. But should
+ * be ok for all our SDIO (4318 doesn't support clock and pullup regs,
+ * but the access attempts don't seem to hurt.) Might elimiante the
+ * the need for ALP for CIS at all if underlying SDIO uses CMD53...
+ */
+ if (BUSTYPE(bustype) == SDIO_BUS) {
+ int err;
+ uint8 clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (!err) {
+ uint8 clkval;
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval));
+ return FALSE;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ /* PR 40613: account for possible ALP delay */
+ OSL_DELAY(65);
+ }
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ }
+
+#ifdef BCMSPI
+ /* Avoid backplane accesses before wake-wlan (i.e. htavail) for spi.
+ * F1 read accesses may return correct data but with data-not-available dstatus bit set.
+ */
+ if (BUSTYPE(bustype) == SPI_BUS) {
+
+ int err;
+ uint32 regdata;
+ /* wake up wlan function :WAKE_UP goes as HT_AVAIL request in hardware */
+ regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL);
+ SI_MSG(("F0 REG0 rd = 0x%x\n", regdata));
+ regdata |= WAKE_UP;
+
+ bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err);
+
+ /* It takes time for wakeup to take effect. */
+ OSL_DELAY(100000);
+ }
+#endif /* BCMSPI */
+#endif /* BCMSDIO && BCMDONGLEHOST && !BCMSDIOLITE */
+
+ return TRUE;
+}
+
+/* note: this function is used by dhd */
+uint32
+si_get_pmu_reg_addr(si_t *sih, uint32 offset)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 pmuaddr = INVALID_ADDR;
+ uint origidx = 0;
+
+ SI_MSG(("si_get_pmu_reg_addr: pmu access, offset: %x\n", offset));
+ if (!(sii->pub.cccaps & CC_CAP_PMU)) {
+ goto done;
+ }
+ if (AOB_ENAB(&sii->pub)) {
+ uint pmucoreidx;
+ pmuregs_t *pmu;
+ SI_MSG(("si_get_pmu_reg_addr: AOBENAB: %x\n", offset));
+ origidx = sii->curidx;
+ pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+ pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+ /* note: this function is used by dhd and possible 64 bit compilation needs
+ * a cast to (unsigned long) for avoiding a compilation error.
+ */
+ pmuaddr = (uint32)(uintptr)((volatile uint8*)pmu + offset);
+ si_setcoreidx(sih, origidx);
+ } else
+ pmuaddr = SI_ENUM_BASE(sih) + offset;
+
+done:
+ SI_MSG(("%s: addrRET: %x\n", __FUNCTION__, pmuaddr));
+ return pmuaddr;
+}
+
+static bool
+BCMATTACHFN(si_buscore_setup)(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, volatile const void *regs)
+{
+ const si_cores_info_t *cores_info = sii->cores_info;
+ bool pci, pcie, pcie_gen2 = FALSE;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+
+#if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
+ /* first, enable backplane timeouts */
+ si_slave_wrapper_add(&sii->pub);
+#endif
+ sii->curidx = 0;
+
+ cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (CCREV(sii->pub.ccrev) >= 11)
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+ /* get chipcommon extended capabilities */
+
+ if (CCREV(sii->pub.ccrev) >= 35) /* PR77565 */
+ sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ if (AOB_ENAB(&sii->pub)) {
+ uint pmucoreidx;
+ pmuregs_t *pmu;
+
+ pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+ if (!GOODIDX(pmucoreidx, sii->numcores)) {
+ SI_ERROR(("si_buscore_setup: si_findcoreidx failed\n"));
+ return FALSE;
+ }
+
+ pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+ sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities);
+ si_setcoreidx(&sii->pub, SI_CC_IDX);
+
+ sii->pub.gcirev = si_corereg(&sii->pub, GCI_CORE_IDX(&sii->pub),
+ GCI_OFFSETOF(&sii->pub, gci_corecaps0), 0, 0) & GCI_CAP0_REV_MASK;
+
+ if (GCIREV(sii->pub.gcirev) >= 9) {
+ sii->pub.lhlrev = si_corereg(&sii->pub, GCI_CORE_IDX(&sii->pub),
+ OFFSETOF(gciregs_t, lhl_core_capab_adr), 0, 0) &
+ LHL_CAP_REV_MASK;
+ } else {
+ sii->pub.lhlrev = NOREV;
+ }
+
+ } else
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+ CCREV(sii->pub.ccrev), sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+ sii->pub.pmucaps));
+
+ /* figure out bus/orignal core idx */
+ /* note for PCI_BUS the buscoretype variable is setup in ai_scan() */
+ if (BUSTYPE(sii->pub.bustype) != PCI_BUS) {
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ }
+ sii->pub.buscorerev = NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = FALSE;
+ pcirev = pcierev = NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ /* This loop can be optimized */
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ si_setcoreidx(&sii->pub, i);
+ cid = si_coreid(&sii->pub);
+ crev = si_corerev(&sii->pub);
+
+ /* Display cores found */
+ if (CHIPTYPE(sii->pub.socitype) != SOCI_NCI) {
+ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x size:%x regs 0x%p\n",
+ i, cid, crev, cores_info->coresba[i], cores_info->coresba_size[i],
+ OSL_OBFUSCATE_BUF(cores_info->regs[i])));
+ }
+
+ if (BUSTYPE(bustype) == SI_BUS) {
+ /* now look at the chipstatus register to figure the pacakge */
+ /* this shoudl be a general change to cover all teh chips */
+ /* this also shoudl validate the build where the dongle is built */
+ /* for SDIO but downloaded on PCIE dev */
+#ifdef BCMPCIEDEV_ENABLED
+ if (cid == PCIE2_CORE_ID) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ pcie_gen2 = TRUE;
+ }
+#endif
+ /* rest fill it up here */
+
+ } else if (BUSTYPE(bustype) == PCI_BUS) {
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = TRUE;
+ } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ if (cid == PCIE2_CORE_ID)
+ pcie_gen2 = TRUE;
+ }
+ }
+#ifdef BCMSDIO
+ else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+ (BUSTYPE(bustype) == SPI_BUS)) &&
+ (cid == SDIOD_CORE_ID)) {
+ sii->pub.buscorerev = (int16)crev;
+ sii->pub.buscoretype = (uint16)cid;
+ sii->pub.buscoreidx = (uint16)i;
+ }
+#endif /* BCMSDIO */
+
+ /* find the core idx before entering this func. */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+ if (regs == sii->curmap) {
+ *origidx = i;
+ }
+ } else {
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == cores_info->coresba[i])) ||
+ (regs == cores_info->regs[i])) {
+ *origidx = i;
+ }
+ }
+ }
+
+#if !defined(BCMDONGLEHOST)
+ if (pci && pcie) {
+ if (si_ispcie(sii))
+ pci = FALSE;
+ else
+ pcie = FALSE;
+ }
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(PCIE_FULL_DONGLE)
+ if (pcie) {
+ if (pcie_gen2)
+ sii->pub.buscoretype = PCIE2_CORE_ID;
+ else
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = (int16)pcierev;
+ sii->pub.buscoreidx = (uint16)pcieidx;
+ }
+ BCM_REFERENCE(pci);
+ BCM_REFERENCE(pcirev);
+ BCM_REFERENCE(pciidx);
+#else
+ if (pci) {
+ sii->pub.buscoretype = PCI_CORE_ID;
+ sii->pub.buscorerev = (int16)pcirev;
+ sii->pub.buscoreidx = (uint16)pciidx;
+ } else if (pcie) {
+ if (pcie_gen2)
+ sii->pub.buscoretype = PCIE2_CORE_ID;
+ else
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = (int16)pcierev;
+ sii->pub.buscoreidx = (uint16)pcieidx;
+ }
+#endif /* defined(PCIE_FULL_DONGLE) */
+
+ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+ sii->pub.buscorerev));
+
+#if !defined(BCMDONGLEHOST)
+ /* fixup necessary chip/core configurations */
+ if (!FWSIGN_ENAB() && BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (SI_FAST(sii)) {
+ if (!sii->pch &&
+ ((sii->pch = (void *)(uintptr)pcicore_init(&sii->pub, sii->osh,
+ (volatile void *)PCIEREGS(sii))) == NULL))
+ return FALSE;
+ }
+ if (si_pci_fixcfg(&sii->pub)) {
+ SI_ERROR(("si_buscore_setup: si_pci_fixcfg failed\n"));
+ return FALSE;
+ }
+ }
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(BCMSDIO) && defined(BCMDONGLEHOST)
+ /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+ * already running.
+ */
+ if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+ if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+ si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+ si_core_disable(&sii->pub, 0);
+ }
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+ /* return to the original core */
+ si_setcoreidx(&sii->pub, *origidx);
+
+ return TRUE;
+}
+
+#if !defined(BCMDONGLEHOST) /* if not a DHD build */
+
+static const char BCMATTACHDATA(rstr_boardvendor)[] = "boardvendor";
+static const char BCMATTACHDATA(rstr_boardtype)[] = "boardtype";
+#if defined(BCMPCIEDEV_SROM_FORMAT)
+static const char BCMATTACHDATA(rstr_subvid)[] = "subvid";
+#endif /* defined(BCMPCIEDEV_SROM_FORMAT) */
+#ifdef BCMSDIO
+static const char BCMATTACHDATA(rstr_manfid)[] = "manfid";
+#endif
+static const char BCMATTACHDATA(rstr_prodid)[] = "prodid";
+static const char BCMATTACHDATA(rstr_boardrev)[] = "boardrev";
+static const char BCMATTACHDATA(rstr_boardflags)[] = "boardflags";
+static const char BCMATTACHDATA(rstr_boardflags4)[] = "boardflags4";
+static const char BCMATTACHDATA(rstr_xtalfreq)[] = "xtalfreq";
+static const char BCMATTACHDATA(rstr_muxenab)[] = "muxenab";
+static const char BCMATTACHDATA(rstr_gpiopulldown)[] = "gpdn";
+static const char BCMATTACHDATA(rstr_devid)[] = "devid";
+static const char BCMATTACHDATA(rstr_wl0id)[] = "wl0id";
+static const char BCMATTACHDATA(rstr_devpathD)[] = "devpath%d";
+static const char BCMATTACHDATA(rstr_D_S)[] = "%d:%s";
+static const char BCMATTACHDATA(rstr_swdenab)[] = "swdenable";
+static const char BCMATTACHDATA(rstr_spurconfig)[] = "spurconfig";
+static const char BCMATTACHDATA(rstr_lpflags)[] = "lpflags";
+static const char BCMATTACHDATA(rstr_armclk)[] = "armclk";
+static const char BCMATTACHDATA(rstr_rfldo3p3_cap_war)[] = "rfldo3p3_cap_war";
+#if defined(SECI_UART)
+static const char BCMATTACHDATA(rstr_fuart_pup_rx_cts)[] = "fuart_pup_rx_cts";
+#endif /* defined(SECI_UART) */
+
+static uint32
+BCMATTACHFN(si_fixup_vid_overrides)(si_info_t *sii, char *pvars, uint32 conf_vid)
+{
+ BCM_REFERENCE(pvars);
+
+ if ((sii->pub.boardvendor != VENDOR_APPLE)) {
+ return conf_vid;
+ }
+
+ switch (sii->pub.boardtype)
+ {
+ /* Check for the SROM value */
+ case BCM94360X51P2:
+ case BCM94360X29C:
+ case BCM94360X29CP2:
+ case BCM94360X51:
+ case BCM943602X87:
+ case BCM943602X238D:
+ /* Take the PCIe configuration space subsystem ID */
+ sii->pub.boardtype = (conf_vid >> 16) & 0xffff;
+ break;
+
+ default:
+ /* Do nothing */
+ break;
+ }
+
+ return conf_vid;
+}
+
+static void
+BCMATTACHFN(si_nvram_process)(si_info_t *sii, char *pvars)
+{
+ uint w = 0;
+
+ if (FWSIGN_ENAB()) {
+ return;
+ }
+
+ /* get boardtype and boardrev */
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case PCI_BUS:
+ /* do a pci config read to get subsystem id and subvendor id */
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(uint32));
+
+ /* Let nvram variables override subsystem Vend/ID */
+ if ((sii->pub.boardvendor = (uint16)si_getdevpathintvar(&sii->pub,
+ rstr_boardvendor)) == 0) {
+#ifdef BCMHOSTVARS
+ if ((w & 0xffff) == 0)
+ sii->pub.boardvendor = VENDOR_BROADCOM;
+ else
+#endif /* BCMHOSTVARS */
+ sii->pub.boardvendor = w & 0xffff;
+ } else {
+ SI_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n",
+ sii->pub.boardvendor, w & 0xffff));
+ }
+
+ if ((sii->pub.boardtype = (uint16)si_getdevpathintvar(&sii->pub, rstr_boardtype))
+ == 0) {
+ if ((sii->pub.boardtype = getintvar(pvars, rstr_boardtype)) == 0)
+ sii->pub.boardtype = (w >> 16) & 0xffff;
+ } else {
+ SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n",
+ sii->pub.boardtype, (w >> 16) & 0xffff));
+ }
+
+ /* Override high priority fixups */
+ if (!FWSIGN_ENAB()) {
+ si_fixup_vid_overrides(sii, pvars, w);
+ }
+ break;
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ sii->pub.boardvendor = getintvar(pvars, rstr_manfid);
+ sii->pub.boardtype = getintvar(pvars, rstr_prodid);
+ break;
+
+ case SPI_BUS:
+ sii->pub.boardvendor = VENDOR_BROADCOM;
+ sii->pub.boardtype = QT4710_BOARD;
+ break;
+#endif
+
+ case SI_BUS:
+#ifdef BCMPCIEDEV_SROM_FORMAT
+ if (BCMPCIEDEV_ENAB() && si_is_sprom_available(&sii->pub) && pvars &&
+ getvar(pvars, rstr_subvid)) {
+ sii->pub.boardvendor = getintvar(pvars, rstr_subvid);
+ } else
+#endif
+ sii->pub.boardvendor = VENDOR_BROADCOM;
+ if (pvars == NULL || ((sii->pub.boardtype = getintvar(pvars, rstr_prodid)) == 0))
+ if ((sii->pub.boardtype = getintvar(pvars, rstr_boardtype)) == 0)
+ sii->pub.boardtype = 0xffff;
+
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+ /* do a pci config read to get subsystem id and subvendor id */
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(uint32));
+ sii->pub.boardvendor = w & 0xffff;
+ sii->pub.boardtype = (w >> 16) & 0xffff;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (sii->pub.boardtype == 0) {
+ SI_ERROR(("si_doattach: unknown board type\n"));
+ ASSERT(sii->pub.boardtype);
+ }
+
+ sii->pub.lpflags = getintvar(pvars, rstr_lpflags);
+ sii->pub.boardrev = getintvar(pvars, rstr_boardrev);
+ sii->pub.boardflags = getintvar(pvars, rstr_boardflags);
+
+#ifdef BCM_SDRBL
+ sii->pub.boardflags2 |= ((!CHIP_HOSTIF_USB(&(sii->pub))) ? ((si_arm_sflags(&(sii->pub))
+ & SISF_SDRENABLE) ? BFL2_SDR_EN:0):
+ (((uint)getintvar(pvars, "boardflags2")) & BFL2_SDR_EN));
+#endif /* BCM_SDRBL */
+ sii->pub.boardflags4 = getintvar(pvars, rstr_boardflags4);
+
+}
+
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(CONFIG_XIP) && defined(BCMTCAM)
+extern uint8 patch_pair;
+#endif /* CONFIG_XIP && BCMTCAM */
+
+#if !defined(BCMDONGLEHOST)
+typedef struct {
+ uint8 uart_tx;
+ uint32 uart_rx;
+} si_mux_uartopt_t;
+
+/* note: each index corr to MUXENAB43012_HOSTWAKE_MASK > shift - 1 */
+static const uint8 BCMATTACHDATA(mux43012_hostwakeopt)[] = {
+ CC_PIN_GPIO_00
+};
+
+static const si_mux_uartopt_t BCMATTACHDATA(mux_uartopt)[] = {
+ {CC_PIN_GPIO_00, CC_PIN_GPIO_01},
+ {CC_PIN_GPIO_05, CC_PIN_GPIO_04},
+ {CC_PIN_GPIO_15, CC_PIN_GPIO_14},
+};
+
+/* note: each index corr to MUXENAB_DEF_HOSTWAKE mask >> shift - 1 */
+static const uint8 BCMATTACHDATA(mux_hostwakeopt)[] = {
+ CC_PIN_GPIO_00,
+};
+
+#ifdef SECI_UART
+#define NUM_SECI_UART_GPIOS 4
+static bool fuart_pullup_rx_cts_enab = FALSE;
+static bool fast_uart_init = FALSE;
+static uint32 fast_uart_tx;
+static uint32 fast_uart_functionsel;
+static uint32 fast_uart_pup;
+static uint32 fast_uart_rx;
+static uint32 fast_uart_cts_in;
+#endif /* SECI_UART */
+
+void
+BCMATTACHFN(si_swdenable)(si_t *sih, uint32 swdflag)
+{
+ /* FIXME Need a more generic test for SWD instead of check on specific chipid */
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ if (swdflag) {
+ /* Enable ARM debug clk, which is required for the ARM debug
+ * unit to operate
+ */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5, (1 << ARMCR4_DBG_CLK_BIT),
+ (1 << ARMCR4_DBG_CLK_BIT));
+ /* Force HT clock in Chipcommon. The HT clock is required for backplane
+ * access via SWD
+ */
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), CCS_FORCEHT,
+ CCS_FORCEHT);
+ /* Set TAP_SEL so that ARM is the first and the only TAP on the TAP chain.
+ * Must do a chip reset to clear this bit
+ */
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, jtagctrl),
+ JCTRL_TAPSEL_BIT, JCTRL_TAPSEL_BIT);
+ SI_MSG(("si_swdenable: set arm_dbgclk, ForceHTClock and tap_sel bit\n"));
+ }
+ break;
+ default:
+ /* swdenable specified for an unsupported chip */
+ ASSERT(0);
+ break;
+ }
+}
+
+/** want to have this available all the time to switch mux for debugging */
+void
+BCMATTACHFN(si_muxenab)(si_t *sih, uint32 w)
+{
+ uint32 chipcontrol, pmu_chipcontrol;
+
+ pmu_chipcontrol = si_pmu_chipcontrol(sih, 1, 0, 0);
+ chipcontrol = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol),
+ 0, 0);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ CASE_BCM43602_CHIP:
+ if (w & MUXENAB_UART)
+ chipcontrol |= CCTRL4360_UART_MODE;
+ break;
+
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ /*
+ * 0x10 : use GPIO0 as host wake up pin
+ * 0x20 ~ 0xf0: Reserved
+ */
+ if (w & MUXENAB43012_HOSTWAKE_MASK) {
+ uint8 hostwake = 0;
+ uint8 hostwake_ix = MUXENAB43012_GETIX(w, HOSTWAKE);
+
+ if (hostwake_ix >
+ sizeof(mux43012_hostwakeopt)/sizeof(mux43012_hostwakeopt[0]) - 1) {
+ SI_ERROR(("si_muxenab: wrong index %d for hostwake\n",
+ hostwake_ix));
+ break;
+ }
+
+ hostwake = mux43012_hostwakeopt[hostwake_ix];
+ si_gci_set_functionsel(sih, hostwake, CC_FNSEL_MISC1);
+ }
+ break;
+
+case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ if (w & MUXENAB_DEF_UART_MASK) {
+ uint32 uart_rx = 0, uart_tx = 0;
+ uint8 uartopt_idx = (w & MUXENAB_DEF_UART_MASK) - 1;
+ uint8 uartopt_size = sizeof(mux_uartopt)/sizeof(mux_uartopt[0]);
+
+ if (uartopt_idx < uartopt_size) {
+ uart_rx = mux_uartopt[uartopt_idx].uart_rx;
+ uart_tx = mux_uartopt[uartopt_idx].uart_tx;
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+ uart_rx = 0;
+ uart_tx = 1;
+#endif
+ if (CHIPREV(sih->chiprev) >= 3) {
+ si_gci_set_functionsel(sih, uart_rx, CC_FNSEL_GPIO1);
+ si_gci_set_functionsel(sih, uart_tx, CC_FNSEL_GPIO1);
+ } else {
+ si_gci_set_functionsel(sih, uart_rx, CC_FNSEL_GPIO0);
+ si_gci_set_functionsel(sih, uart_tx, CC_FNSEL_GPIO0);
+ }
+ } else {
+ SI_MSG(("si_muxenab: Invalid uart OTP setting\n"));
+ }
+ }
+ if (w & MUXENAB_DEF_HOSTWAKE_MASK) {
+ uint8 hostwake = 0;
+ /*
+ * SDIO
+ * 0x10 : use GPIO0 as host wake up pin
+ */
+ uint8 hostwake_ix = MUXENAB_DEF_GETIX(w, HOSTWAKE);
+
+ if (hostwake_ix > (sizeof(mux_hostwakeopt) /
+ sizeof(mux_hostwakeopt[0]) - 1)) {
+ SI_ERROR(("si_muxenab: wrong index %d for hostwake\n",
+ hostwake_ix));
+ break;
+ }
+
+ hostwake = mux_hostwakeopt[hostwake_ix];
+ si_gci_set_functionsel(sih, hostwake, CC_FNSEL_GPIO0);
+ }
+
+ break;
+
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ /* TBD fill */
+ if (w & MUXENAB_HOST_WAKE) {
+ si_gci_set_functionsel(sih, CC_PIN_GPIO_00, CC_FNSEL_MISC1);
+ }
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ /* TBD fill */
+ break;
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ /* TBD fill */
+ break;
+ default:
+ /* muxenab specified for an unsupported chip */
+ ASSERT(0);
+ break;
+ }
+
+ /* write both updated values to hw */
+ si_pmu_chipcontrol(sih, 1, ~0, pmu_chipcontrol);
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol),
+ ~0, chipcontrol);
+}
+
+/** ltecx GCI reg access */
+uint32
+BCMPOSTTRAPFN(si_gci_direct)(si_t *sih, uint offset, uint32 mask, uint32 val)
+{
+ /* gci direct reg access */
+ return si_corereg(sih, GCI_CORE_IDX(sih), offset, mask, val);
+}
+
+uint32
+si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val)
+{
+ /* gci indirect reg access */
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, regidx);
+ return si_corereg(sih, GCI_CORE_IDX(sih), offset, mask, val);
+}
+
+uint32
+si_gci_input(si_t *sih, uint reg)
+{
+ /* gci_input[] */
+ return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_input[reg]), 0, 0);
+}
+
+uint32
+si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val)
+{
+ /* gci_output[] */
+ return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_output[reg]), mask, val);
+}
+
+uint32
+si_gci_int_enable(si_t *sih, bool enable)
+{
+ uint offs;
+
+ /* enable GCI interrupt */
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_ECI, (enable ? CI_ECI : 0)));
+}
+
+void
+si_gci_reset(si_t *sih)
+{
+ int i;
+
+ if (gci_reset_done == FALSE) {
+ gci_reset_done = TRUE;
+
+ /* Set ForceRegClk and ForceSeciClk */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((1 << GCI_CCTL_FREGCLK_OFFSET)
+ |(1 << GCI_CCTL_FSECICLK_OFFSET)),
+ ((1 << GCI_CCTL_FREGCLK_OFFSET)
+ |(1 << GCI_CCTL_FSECICLK_OFFSET)));
+
+ /* Some Delay */
+ for (i = 0; i < 2; i++) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), 0, 0);
+ }
+ /* Reset SECI block */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((1 << GCI_CCTL_SECIRST_OFFSET)
+ |(1 << GCI_CCTL_RSTSL_OFFSET)
+ |(1 << GCI_CCTL_RSTOCC_OFFSET)),
+ ((1 << GCI_CCTL_SECIRST_OFFSET)
+ |(1 << GCI_CCTL_RSTSL_OFFSET)
+ |(1 << GCI_CCTL_RSTOCC_OFFSET)));
+ /* Some Delay */
+ for (i = 0; i < 10; i++) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), 0, 0);
+ }
+ /* Remove SECI Reset */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((1 << GCI_CCTL_SECIRST_OFFSET)
+ |(1 << GCI_CCTL_RSTSL_OFFSET)
+ |(1 << GCI_CCTL_RSTOCC_OFFSET)),
+ ((0 << GCI_CCTL_SECIRST_OFFSET)
+ |(0 << GCI_CCTL_RSTSL_OFFSET)
+ |(0 << GCI_CCTL_RSTOCC_OFFSET)));
+
+ /* Some Delay */
+ for (i = 0; i < 2; i++) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), 0, 0);
+ }
+
+ /* Clear ForceRegClk and ForceSeciClk */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((1 << GCI_CCTL_FREGCLK_OFFSET)
+ |(1 << GCI_CCTL_FSECICLK_OFFSET)),
+ ((0 << GCI_CCTL_FREGCLK_OFFSET)
+ |(0 << GCI_CCTL_FSECICLK_OFFSET)));
+ }
+ /* clear events */
+ for (i = 0; i < 32; i++) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_event[i]), ALLONES_32, 0x00);
+ }
+}
+
+void
+si_gci_gpio_chipcontrol_ex(si_t *sih, uint8 gci_gpio, uint8 opt)
+{
+ si_gci_gpio_chipcontrol(sih, gci_gpio, opt);
+}
+
+static void
+BCMPOSTTRAPFN(si_gci_gpio_chipcontrol)(si_t *sih, uint8 gci_gpio, uint8 opt)
+{
+ uint32 ring_idx = 0, pos = 0;
+
+ si_gci_get_chipctrlreg_ringidx_base8(gci_gpio, &ring_idx, &pos);
+ SI_MSG(("si_gci_gpio_chipcontrol:rngidx is %d, pos is %d, opt is %d, mask is 0x%04x,"
+ " value is 0x%04x\n",
+ ring_idx, pos, opt, GCIMASK_8B(pos), GCIPOSVAL_8B(opt, pos)));
+
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, ring_idx);
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_gpioctl),
+ GCIMASK_8B(pos), GCIPOSVAL_8B(opt, pos));
+}
+
+static uint8
+BCMPOSTTRAPFN(si_gci_gpio_reg)(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value,
+ uint32 reg_offset)
+{
+ uint32 ring_idx = 0, pos = 0; /**< FunctionSel register idx and bits to use */
+ uint32 val_32;
+
+ si_gci_get_chipctrlreg_ringidx_base4(gci_gpio, &ring_idx, &pos);
+ SI_MSG(("si_gci_gpio_reg:rngidx is %d, pos is %d, val is %d, mask is 0x%04x,"
+ " value is 0x%04x\n",
+ ring_idx, pos, value, GCIMASK_4B(pos), GCIPOSVAL_4B(value, pos)));
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, ring_idx);
+
+ if (mask || value) {
+ /* set operation */
+ si_corereg(sih, GCI_CORE_IDX(sih),
+ reg_offset, GCIMASK_4B(pos), GCIPOSVAL_4B(value, pos));
+ }
+ val_32 = si_corereg(sih, GCI_CORE_IDX(sih), reg_offset, 0, 0);
+
+ value = (uint8)((val_32 >> pos) & 0xFF);
+
+ return value;
+}
+
+/**
+ * In order to route a ChipCommon originated GPIO towards a package pin, both CC and GCI cores have
+ * to be written to.
+ * @param[in] sih
+ * @param[in] gpio chip specific package pin number. See Toplevel Arch page, GCI chipcontrol reg
+ * section.
+ * @param[in] mask chip common gpio mask
+ * @param[in] val chip common gpio value
+ */
+void
+BCMPOSTTRAPFN(si_gci_enable_gpio)(si_t *sih, uint8 gpio, uint32 mask, uint32 value)
+{
+ uint32 ring_idx = 0, pos = 0;
+
+ si_gci_get_chipctrlreg_ringidx_base4(gpio, &ring_idx, &pos);
+ SI_MSG(("si_gci_enable_gpio:rngidx is %d, pos is %d, val is %d, mask is 0x%04x,"
+ " value is 0x%04x\n",
+ ring_idx, pos, value, GCIMASK_4B(pos), GCIPOSVAL_4B(value, pos)));
+ si_gci_set_functionsel(sih, gpio, CC_FNSEL_SAMEASPIN);
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, ring_idx);
+
+ si_gpiocontrol(sih, mask, 0, GPIO_HI_PRIORITY);
+ si_gpioouten(sih, mask, mask, GPIO_HI_PRIORITY);
+ si_gpioout(sih, mask, value, GPIO_HI_PRIORITY);
+}
+
+/*
+ * The above seems to be for gpio output only (forces gpioouten).
+ * This function is to configure GPIO as input, and accepts a mask of bits.
+ * Also: doesn't force the gpiocontrol (chipc) functionality, assumes it
+ * is the default, and rejects the request (BUSY => gpio in use) if it's
+ * already configured for a different function... but it will override the
+ * output enable.
+ */
+int
+si_gpio_enable(si_t *sih, uint32 mask)
+{
+ uint bit;
+ int fnsel = -1; /* Valid fnsel is a small positive number */
+
+ BCM_REFERENCE(bit);
+ BCM_REFERENCE(fnsel);
+
+ /* Bail if any bit is explicitly set for some other function */
+ if (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiocontrol), 0, 0) & mask) {
+ return BCME_BUSY;
+ }
+
+#if !defined(BCMDONGLEHOST)
+ /* Some chips need to be explicitly set */
+ switch (CHIPID(sih->chip))
+ {
+ case BCM4362_CHIP_GRPID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ fnsel = CC_FNSEL_SAMEASPIN;
+ default:
+ ;
+ }
+
+ if (fnsel != -1) {
+ for (bit = 0; mask; bit++) {
+ if (mask & (1 << bit)) {
+ si_gci_set_functionsel(sih, bit, (uint8)fnsel);
+ mask ^= (1 << bit);
+ }
+ }
+ }
+#endif /* !BCMDONGLEHOST */
+ si_gpioouten(sih, mask, 0, GPIO_HI_PRIORITY);
+
+ return BCME_OK;
+}
+
+static const char BCMATTACHDATA(rstr_host_wake_opt)[] = "host_wake_opt";
+uint8
+BCMATTACHFN(si_gci_host_wake_gpio_init)(si_t *sih)
+{
+ uint8 host_wake_gpio = CC_GCI_GPIO_INVALID;
+ uint32 host_wake_opt;
+
+ /* parse the device wake opt from nvram */
+ /* decode what that means for specific chip */
+ if (getvar(NULL, rstr_host_wake_opt) == NULL)
+ return host_wake_gpio;
+
+ host_wake_opt = getintvar(NULL, rstr_host_wake_opt);
+ host_wake_gpio = host_wake_opt & 0xff;
+ si_gci_host_wake_gpio_enable(sih, host_wake_gpio, FALSE);
+
+ return host_wake_gpio;
+}
+
+void
+BCMPOSTTRAPFN(si_gci_host_wake_gpio_enable)(si_t *sih, uint8 gpio, bool state)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ si_gci_enable_gpio(sih, gpio, 1 << gpio,
+ state ? 1 << gpio : 0x00);
+ break;
+ default:
+ SI_ERROR(("host wake not supported for 0x%04x yet\n", CHIPID(sih->chip)));
+ break;
+ }
+}
+
+void
+si_gci_time_sync_gpio_enable(si_t *sih, uint8 gpio, bool state)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ si_gci_enable_gpio(sih, gpio, 1 << gpio,
+ state ? 1 << gpio : 0x00);
+ break;
+ default:
+ SI_ERROR(("Time sync not supported for 0x%04x yet\n", CHIPID(sih->chip)));
+ break;
+ }
+}
+
+#define TIMESYNC_GPIO_NUM 12 /* Hardcoded for now. Will be removed later */
+static const char BCMATTACHDATA(rstr_time_sync_opt)[] = "time_sync_opt";
+uint8
+BCMATTACHFN(si_gci_time_sync_gpio_init)(si_t *sih)
+{
+ uint8 time_sync_gpio = TIMESYNC_GPIO_NUM;
+ uint32 time_sync_opt;
+
+ /* parse the device wake opt from nvram */
+ /* decode what that means for specific chip */
+ if (getvar(NULL, rstr_time_sync_opt) == NULL) {
+ time_sync_opt = TIMESYNC_GPIO_NUM;
+ } else {
+ time_sync_opt = getintvar(NULL, rstr_time_sync_opt);
+ }
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ time_sync_gpio = time_sync_opt & 0xff;
+ si_gci_enable_gpio(sih, time_sync_gpio,
+ 1 << time_sync_gpio, 0x00);
+ break;
+ default:
+ SI_ERROR(("time sync not supported for 0x%04x yet\n", CHIPID(sih->chip)));
+ break;
+ }
+
+ return time_sync_gpio;
+}
+
+uint8
+BCMPOSTTRAPFN(si_gci_gpio_wakemask)(si_t *sih, uint8 gpio, uint8 mask, uint8 value)
+{
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_wakemask),
+ GCI_WAKEMASK_GPIOWAKE, GCI_WAKEMASK_GPIOWAKE);
+ return (si_gci_gpio_reg(sih, gpio, mask, value, GCI_OFFSETOF(sih, gci_gpiowakemask)));
+}
+
+uint8
+BCMPOSTTRAPFN(si_gci_gpio_intmask)(si_t *sih, uint8 gpio, uint8 mask, uint8 value)
+{
+ return (si_gci_gpio_reg(sih, gpio, mask, value, GCI_OFFSETOF(sih, gci_gpiointmask)));
+}
+
+uint8
+BCMPOSTTRAPFN(si_gci_gpio_status)(si_t *sih, uint8 gpio, uint8 mask, uint8 value)
+{
+ return (si_gci_gpio_reg(sih, gpio, mask, value, GCI_OFFSETOF(sih, gci_gpiostatus)));
+}
+
+static void
+si_gci_enable_gpioint(si_t *sih, bool enable)
+{
+ if (enable)
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intmask),
+ GCI_INTSTATUS_GPIOINT, GCI_INTSTATUS_GPIOINT);
+ else
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intmask),
+ GCI_INTSTATUS_GPIOINT, 0);
+}
+
+/* assumes function select is performed separately */
+void
+BCMINITFN(si_enable_gpio_wake)(si_t *sih, uint8 *wake_mask, uint8 *cur_status, uint8 gci_gpio,
+ uint32 pmu_cc2_mask, uint32 pmu_cc2_value)
+{
+ si_gci_gpio_chipcontrol(sih, gci_gpio,
+ (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT));
+
+ si_gci_gpio_intmask(sih, gci_gpio, *wake_mask, *wake_mask);
+ si_gci_gpio_wakemask(sih, gci_gpio, *wake_mask, *wake_mask);
+
+ /* clear the existing status bits */
+ *cur_status = si_gci_gpio_status(sih, gci_gpio,
+ GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
+
+ /* top level gci int enable */
+ si_gci_enable_gpioint(sih, TRUE);
+
+ /* enable the pmu chip control bit to enable wake */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, pmu_cc2_mask, pmu_cc2_value);
+}
+
+void
+BCMPOSTTRAPFN(si_gci_config_wake_pin)(si_t *sih, uint8 gpio_n, uint8 wake_events, bool gci_gpio)
+{
+ uint8 chipcontrol = 0;
+ uint32 pmu_chipcontrol2 = 0;
+
+ if (!gci_gpio)
+ chipcontrol = (1 << GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT);
+
+ chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_PULLUP_BIT);
+ si_gci_gpio_chipcontrol(sih, gpio_n,
+ (chipcontrol | (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT)));
+
+ /* enable gci gpio int/wake events */
+ si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
+ si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
+
+ /* clear the existing status bits */
+ si_gci_gpio_status(sih, gpio_n,
+ GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
+
+ /* Enable gci2wl_wake */
+ pmu_chipcontrol2 = si_pmu_chipcontrol(sih, PMU_CHIPCTL2, 0, 0);
+ pmu_chipcontrol2 |= si_pmu_wake_bit_offset(sih);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2, ~0, pmu_chipcontrol2);
+
+ /* enable gci int/wake events */
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intmask),
+ GCI_INTSTATUS_GPIOINT, GCI_INTSTATUS_GPIOINT);
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_wakemask),
+ GCI_INTSTATUS_GPIOWAKE, GCI_INTSTATUS_GPIOWAKE);
+}
+
+void
+si_gci_free_wake_pin(si_t *sih, uint8 gpio_n)
+{
+ uint8 chipcontrol = 0;
+ uint8 wake_events;
+
+ si_gci_gpio_chipcontrol(sih, gpio_n, chipcontrol);
+
+ /* enable gci gpio int/wake events */
+ wake_events = si_gci_gpio_intmask(sih, gpio_n, 0, 0);
+ si_gci_gpio_intmask(sih, gpio_n, wake_events, 0);
+ wake_events = si_gci_gpio_wakemask(sih, gpio_n, 0, 0);
+ si_gci_gpio_wakemask(sih, gpio_n, wake_events, 0);
+
+ /* clear the existing status bits */
+ si_gci_gpio_status(sih, gpio_n,
+ GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
+}
+
+#if defined(BCMPCIEDEV)
+static const char BCMINITDATA(rstr_device_wake_opt)[] = "device_wake_opt";
+#else
+static const char BCMINITDATA(rstr_device_wake_opt)[] = "sd_devwake";
+#endif
+#define DEVICE_WAKE_GPIO3 3
+
+uint8
+BCMATTACHFN(si_enable_perst_wake)(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status)
+{
+ uint8 gci_perst = CC_GCI_GPIO_15;
+ switch (CHIPID(sih->chip)) {
+ default:
+ SI_ERROR(("device wake not supported for 0x%04x yet\n", CHIPID(sih->chip)));
+ break;
+ }
+ return gci_perst;
+
+}
+
+uint8
+BCMINITFN(si_get_device_wake_opt)(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (getvar(NULL, rstr_device_wake_opt) == NULL)
+ return CC_GCI_GPIO_INVALID;
+
+ sii->device_wake_opt = (uint8)getintvar(NULL, rstr_device_wake_opt);
+ return sii->device_wake_opt;
+}
+
+uint8
+si_enable_device_wake(si_t *sih, uint8 *wake_mask, uint8 *cur_status)
+{
+ uint8 gci_gpio = CC_GCI_GPIO_INVALID; /* DEVICE_WAKE GCI GPIO */
+ uint32 device_wake_opt;
+ const si_info_t *sii = SI_INFO(sih);
+
+ device_wake_opt = sii->device_wake_opt;
+
+ if (device_wake_opt == CC_GCI_GPIO_INVALID) {
+ /* parse the device wake opt from nvram */
+ /* decode what that means for specific chip */
+ /* apply the right gci config */
+ /* enable the internal interrupts */
+ /* assume: caller already registered handler for that GCI int */
+ if (getvar(NULL, rstr_device_wake_opt) == NULL)
+ return gci_gpio;
+
+ device_wake_opt = getintvar(NULL, rstr_device_wake_opt);
+ }
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ /* device_wake op 1:
+ * gpio 1, func sel 4,
+ * gcigpioctrl: input pin, exra gpio
+ * since GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT is used, gci gpio is same as GPIO num
+ * GCI GPIO 1,wakemask/intmask: any edge, both positive negative
+ * enable the wake mask, intmask in GCI top level
+ * enable the chip common to get the G/ECI interrupt
+ * enable the PMU ctrl to wake the chip on wakemask set
+ */
+ if (device_wake_opt == 1) {
+ gci_gpio = CC_GCI_GPIO_1;
+ *wake_mask = (1 << GCI_GPIO_STS_VALUE_BIT) |
+ (1 << GCI_GPIO_STS_POS_EDGE_BIT) |
+ (1 << GCI_GPIO_STS_NEG_EDGE_BIT);
+ si_gci_set_functionsel(sih, gci_gpio, CC_FNSEL_GCI0);
+ si_enable_gpio_wake(sih, wake_mask, cur_status, gci_gpio,
+ PMU_CC2_GCI2_WAKE | PMU_CC2_MASK_WL_DEV_WAKE,
+ PMU_CC2_GCI2_WAKE | PMU_CC2_MASK_WL_DEV_WAKE);
+ /* hack: add a pulldown to HOST_WAKE */
+ si_gci_gpio_chipcontrol(sih, 0,
+ (1 << GCI_GPIO_CHIPCTRL_PULLDN_BIT));
+
+ /* Enable wake on GciWake */
+ si_gci_indirect(sih, 0,
+ GCI_OFFSETOF(sih, gci_wakemask),
+ (GCI_INTSTATUS_GPIOWAKE | GCI_INTSTATUS_GPIOINT),
+ (GCI_INTSTATUS_GPIOWAKE | GCI_INTSTATUS_GPIOINT));
+
+ } else {
+ SI_ERROR(("0x%04x: don't know about device_wake_opt %d\n",
+ CHIPID(sih->chip), device_wake_opt));
+ }
+ break;
+ default:
+ SI_ERROR(("device wake not supported for 0x%04x yet\n", CHIPID(sih->chip)));
+ break;
+ }
+ return gci_gpio;
+}
+
+void
+si_gci_gpioint_handler_unregister(si_t *sih, void *gci_i)
+{
+ si_info_t *sii;
+ gci_gpio_item_t *p, *n;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(gci_i != NULL);
+
+ sii = SI_INFO(sih);
+
+ if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
+ SI_ERROR(("si_gci_gpioint_handler_unregister: not GCI capable\n"));
+ return;
+ }
+ ASSERT(sii->gci_gpio_head != NULL);
+
+ if ((void*)sii->gci_gpio_head == gci_i) {
+ sii->gci_gpio_head = sii->gci_gpio_head->next;
+ MFREE(sii->osh, gci_i, sizeof(gci_gpio_item_t));
+ return;
+ } else {
+ p = sii->gci_gpio_head;
+ n = p->next;
+ while (n) {
+ if ((void*)n == gci_i) {
+ p->next = n->next;
+ MFREE(sii->osh, gci_i, sizeof(gci_gpio_item_t));
+ return;
+ }
+ p = n;
+ n = n->next;
+ }
+ }
+}
+
+void*
+si_gci_gpioint_handler_register(si_t *sih, uint8 gci_gpio, uint8 gpio_status,
+ gci_gpio_handler_t cb, void *arg)
+{
+ si_info_t *sii;
+ gci_gpio_item_t *gci_i;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(cb != NULL);
+
+ sii = SI_INFO(sih);
+
+ if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
+ SI_ERROR(("si_gci_gpioint_handler_register: not GCI capable\n"));
+ return NULL;
+ }
+
+ SI_MSG(("si_gci_gpioint_handler_register: gci_gpio is %d\n", gci_gpio));
+ if (gci_gpio >= SI_GPIO_MAX) {
+ SI_ERROR(("isi_gci_gpioint_handler_register: Invalid GCI GPIO NUM %d\n", gci_gpio));
+ return NULL;
+ }
+
+ gci_i = MALLOC(sii->osh, (sizeof(gci_gpio_item_t)));
+
+ ASSERT(gci_i);
+ if (gci_i == NULL) {
+ SI_ERROR(("si_gci_gpioint_handler_register: GCI Item MALLOC failure\n"));
+ return NULL;
+ }
+
+ if (sii->gci_gpio_head)
+ gci_i->next = sii->gci_gpio_head;
+ else
+ gci_i->next = NULL;
+
+ sii->gci_gpio_head = gci_i;
+
+ gci_i->handler = cb;
+ gci_i->arg = arg;
+ gci_i->gci_gpio = gci_gpio;
+ gci_i->status = gpio_status;
+
+ return (void *)(gci_i);
+}
+
+static void
+si_gci_gpioint_handler_process(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 gpio_status[2], status;
+ gci_gpio_item_t *gci_i;
+
+ sii = SI_INFO(sih);
+
+ /* most probably there are going to be 1 or 2 GPIOs used this way, so do for each GPIO */
+
+ /* go through the GPIO handlers and call them back if their intstatus is set */
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, 0);
+ gpio_status[0] = si_corereg(sih, GCI_CORE_IDX(sih),
+ GCI_OFFSETOF(sih, gci_gpiostatus), 0, 0);
+ /* Only clear the status bits that have been read. Other bits (if present) should not
+ * get cleared, so that they can be handled later.
+ */
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_gpiostatus), ~0, gpio_status[0]);
+
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, 1);
+ gpio_status[1] = si_corereg(sih, GCI_CORE_IDX(sih),
+ GCI_OFFSETOF(sih, gci_gpiostatus), 0, 0);
+ /* Only clear the status bits that have been read. Other bits (if present) should not
+ * get cleared, so that they can be handled later.
+ */
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_gpiostatus), ~0, gpio_status[1]);
+
+ gci_i = sii->gci_gpio_head;
+
+ SI_MSG(("si_gci_gpioint_handler_process: status 0x%04x, 0x%04x\n",
+ gpio_status[0], gpio_status[1]));
+
+ while (gci_i) {
+ if (gci_i->gci_gpio < 8)
+ status = ((gpio_status[0] >> (gci_i->gci_gpio * 4)) & 0x0F);
+ else
+ status = ((gpio_status[1] >> ((gci_i->gci_gpio - 8) * 4)) & 0x0F);
+ /* should we mask these */
+ /* call back */
+ ASSERT(gci_i->handler);
+ if (gci_i->status & status)
+ gci_i->handler(status, gci_i->arg);
+ gci_i = gci_i->next;
+ }
+}
+
+void
+si_gci_handler_process(si_t *sih)
+{
+ uint32 gci_intstatus;
+
+ /* check the intmask, wakemask in the interrupt routine and call the right ones */
+ /* for now call the gpio interrupt */
+ gci_intstatus = si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intstat), 0, 0);
+
+ if (gci_intstatus & GCI_INTMASK_GPIOINT) {
+ SI_MSG(("si_gci_handler_process: gci_intstatus is 0x%04x\n", gci_intstatus));
+ si_gci_gpioint_handler_process(sih);
+ }
+ if ((gci_intstatus & ~(GCI_INTMASK_GPIOINT))) {
+#ifdef HNDGCI
+ hndgci_handler_process(gci_intstatus, sih);
+#endif /* HNDGCI */
+ }
+#ifdef WLGCIMBHLR
+ if (gci_intstatus & GCI_INTSTATUS_EVENT) {
+ hnd_gci_mb_handler_process(gci_intstatus, sih);
+ }
+#endif /* WLGCIMBHLR */
+
+#if defined(BCMLTECOEX) && !defined(WLTEST)
+ if (gci_intstatus & GCI_INTMASK_SRFNE) {
+ si_wci2_rxfifo_intr_handler_process(sih, gci_intstatus);
+ }
+#endif /* BCMLTECOEX && !WLTEST */
+
+#ifdef BCMGCISHM
+ if (gci_intstatus & (GCI_INTSTATUS_EVENT | GCI_INTSTATUS_EVENTWAKE)) {
+ hnd_gcishm_handler_process(sih, gci_intstatus);
+ }
+#endif /* BCMGCISHM */
+}
+
+void
+si_gci_seci_init(si_t *sih)
+{
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), ALLONES_32,
+ (GCI_CCTL_SCS << GCI_CCTL_SCS_OFFSET) |
+ (GCI_MODE_SECI << GCI_CCTL_SMODE_OFFSET) |
+ (1 << GCI_CCTL_SECIEN_OFFSET));
+
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_chipctrl), ALLONES_32, 0x0080000); //0x200
+
+ si_gci_indirect(sih, 1, GCI_OFFSETOF(sih, gci_gpioctl), ALLONES_32, 0x00010280); //0x044
+
+ /* baudrate:4Mbps at 40MHz xtal, escseq:0xdb, high baudrate, enable seci_tx/rx */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv), //0x1e0
+ ALLONES_32, 0xF6);
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj), ALLONES_32, 0xFF); //0x1f8
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secifcr), ALLONES_32, 0x00); //0x1e4
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr), ALLONES_32, 0x08); //0x1ec
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secilcr), ALLONES_32, 0xA8); //0x1e8
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartescval), //0x1d0
+ ALLONES_32, 0xDB);
+
+ /* Atlas/GMAC3 configuration for SECI */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_miscctl), ALLONES_32, 0xFFFF); //0xc54
+
+ /* config GPIO pins 5/6 as SECI_IN/SECI_OUT */
+ si_gci_indirect(sih, 0,
+ GCI_OFFSETOF(sih, gci_seciin_ctrl), ALLONES_32, 0x161); //0x218
+ si_gci_indirect(sih, 0,
+ GCI_OFFSETOF(sih, gci_seciout_ctrl), ALLONES_32, 0x10051); //0x21c
+
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciout_txen_txbr), ALLONES_32, 0x01); //0x224
+
+ /* WLAN rx offset assignment */
+ /* WLCX: RX offset assignment from WLAN core to WLAN core (faked as BT TX) */
+ si_gci_indirect(sih, 0,
+ GCI_OFFSETOF(sih, gci_secif0rx_offset), ALLONES_32, 0x13121110); //0x1bc
+ si_gci_indirect(sih, 1,
+ GCI_OFFSETOF(sih, gci_secif0rx_offset), ALLONES_32, 0x17161514);
+ si_gci_indirect(sih, 2,
+ GCI_OFFSETOF(sih, gci_secif0rx_offset), ALLONES_32, 0x1b1a1918);
+
+ /* first 12 nibbles configured for format-0 */
+ /* note: we can only select 1st 12 nibbles of each IP for format_0 */
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_seciusef0tx_reg), //0x1b4
+ ALLONES_32, 0xFFF); // first 12 nibbles
+
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_secitx_datatag),
+ ALLONES_32, 0x0F0); // gci_secitx_datatag(nibbles 4 to 7 tagged)
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_secirx_datatag),
+ ALLONES_32, 0x0F0); // gci_secirx_datatag(nibbles 4 to 7 tagged)
+
+ /* TX offset assignment (wlan to bt) */
+ si_gci_indirect(sih, 0,
+ GCI_OFFSETOF(sih, gci_secif0tx_offset), 0xFFFFFFFF, 0x76543210); //0x1b8
+ si_gci_indirect(sih, 1,
+ GCI_OFFSETOF(sih, gci_secif0tx_offset), 0xFFFFFFFF, 0x0000ba98);
+ if (CHIPID(sih->chip) == BCM43602_CHIP_ID) {
+ /* Request BT side to update SECI information */
+ si_gci_direct(sih, OFFSETOF(chipcregs_t, gci_seciauxtx),
+ (SECI_AUX_TX_START | SECI_REFRESH_REQ),
+ (SECI_AUX_TX_START | SECI_REFRESH_REQ));
+ /* WLAN to update SECI information */
+ si_gci_direct(sih, OFFSETOF(chipcregs_t, gci_corectrl),
+ SECI_UPD_SECI, SECI_UPD_SECI);
+ }
+
+ // HW ECI bus directly driven from IP
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_control_0), ALLONES_32, 0x00000000);
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_control_1), ALLONES_32, 0x00000000);
+}
+
+#if defined(BCMLTECOEX) && !defined(WLTEST)
+int
+si_wci2_rxfifo_handler_register(si_t *sih, wci2_handler_t rx_cb, void *ctx)
+{
+ si_info_t *sii;
+ wci2_rxfifo_info_t *wci2_info;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(rx_cb != NULL);
+
+ if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
+ SI_ERROR(("si_wci2_rxfifo_handler_register: not GCI capable\n"));
+ return BCME_ERROR;
+ }
+
+ if ((wci2_info = (wci2_rxfifo_info_t *)MALLOCZ(sii->osh,
+ sizeof(wci2_rxfifo_info_t))) == NULL) {
+ SI_ERROR(("si_wci2_rxfifo_handler_register: WCI2 RXFIFO INFO MALLOC failure\n"));
+ return BCME_NOMEM;
+ }
+
+ if ((wci2_info->rx_buf = (char *)MALLOCZ(sii->osh, WCI2_UART_RX_BUF_SIZE)) == NULL) {
+ MFREE(sii->osh, wci2_info, sizeof(wci2_rxfifo_info_t));
+
+ SI_ERROR(("si_wci2_rxfifo_handler_register: WCI2 RXFIFO INFO MALLOC failure\n"));
+ return BCME_NOMEM;
+ }
+
+ if ((wci2_info->cbs = (wci2_cbs_t *)MALLOCZ(sii->osh, sizeof(wci2_cbs_t))) == NULL) {
+ MFREE(sii->osh, wci2_info->rx_buf, WCI2_UART_RX_BUF_SIZE);
+ MFREE(sii->osh, wci2_info, sizeof(wci2_rxfifo_info_t));
+
+ SI_ERROR(("si_wci2_rxfifo_handler_register: WCI2 RXFIFO INFO MALLOC failure\n"));
+ return BCME_NOMEM;
+ }
+
+ sii->wci2_info = wci2_info;
+
+ /* init callback */
+ wci2_info->cbs->handler = rx_cb;
+ wci2_info->cbs->context = ctx;
+
+ return BCME_OK;
+}
+
+void
+si_wci2_rxfifo_handler_unregister(si_t *sih)
+{
+
+ si_info_t *sii;
+ wci2_rxfifo_info_t *wci2_info;
+
+ sii = SI_INFO(sih);
+ ASSERT(sii);
+
+ if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
+ SI_ERROR(("si_wci2_rxfifo_handler_unregister: not GCI capable\n"));
+ return;
+ }
+
+ wci2_info = sii->wci2_info;
+
+ if (wci2_info == NULL) {
+ return;
+ }
+
+ if (wci2_info->rx_buf != NULL) {
+ MFREE(sii->osh, wci2_info->rx_buf, WCI2_UART_RX_BUF_SIZE);
+ }
+
+ if (wci2_info->cbs != NULL) {
+ MFREE(sii->osh, wci2_info->cbs, sizeof(wci2_cbs_t));
+ }
+
+ MFREE(sii->osh, wci2_info, sizeof(wci2_rxfifo_info_t));
+
+}
+
+/* GCI WCI2 UART RXFIFO interrupt handler */
+static void
+si_wci2_rxfifo_intr_handler_process(si_t *sih, uint32 intstatus)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 udata;
+ char ubyte;
+ wci2_rxfifo_info_t *wci2_info;
+ bool call_cb = FALSE;
+
+ wci2_info = sii->wci2_info;
+
+ if (wci2_info == NULL) {
+ return;
+ }
+
+ if (intstatus & GCI_INTSTATUS_SRFOF) {
+ SI_ERROR(("*** rx fifo overflow *** \n"));
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_intstat),
+ GCI_INTSTATUS_SRFOF, GCI_INTSTATUS_SRFOF);
+ }
+
+ /* Check if RF FIFO has any data */
+ if (intstatus & GCI_INTMASK_SRFNE) {
+
+ /* Read seci uart data */
+ udata = si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartdata), 0, 0);
+
+ while (udata & SECI_UART_DATA_RF_NOT_EMPTY_BIT) {
+
+ ubyte = (char) udata;
+ if (wci2_info) {
+ wci2_info->rx_buf[wci2_info->rx_idx] = ubyte;
+ wci2_info->rx_idx++;
+ call_cb = TRUE;
+ /* if the buffer is full, break
+ * remaining will be processed in next callback
+ */
+ if (wci2_info->rx_idx == WCI2_UART_RX_BUF_SIZE) {
+ break;
+ }
+ }
+
+ udata = si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartdata), 0, 0);
+ }
+
+ /* if callback registered; call it */
+ if (call_cb && wci2_info && wci2_info->cbs) {
+ wci2_info->cbs->handler(wci2_info->cbs->context, wci2_info->rx_buf,
+ wci2_info->rx_idx);
+ bzero(wci2_info->rx_buf, WCI2_UART_RX_BUF_SIZE);
+ wci2_info->rx_idx = 0;
+ }
+ }
+}
+#endif /* BCMLTECOEX && !WLTEST */
+
+#ifdef BCMLTECOEX
+/* Program GCI GpioMask and GCI GpioControl Registers */
+static void
+si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio,
+ uint8 gpioctl_mask, uint8 gpioctl_val)
+{
+ uint32 indirect_idx =
+ GCI_REGIDX(gci_pos) | (gcigpio << GCI_GPIOIDX_OFFSET);
+ si_gci_indirect(sih, indirect_idx, GCI_OFFSETOF(sih, gci_gpiomask),
+ (1 << GCI_BITOFFSET(gci_pos)),
+ (1 << GCI_BITOFFSET(gci_pos)));
+ /* Write GPIO Configuration to GCI Registers */
+ si_gci_indirect(sih, gcigpio/4, GCI_OFFSETOF(sih, gci_gpioctl),
+ (gpioctl_mask << (gcigpio%4)*8), (gpioctl_val << (gcigpio%4)*8));
+}
+
+void
+si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio)
+{
+ uint8 fsync_padnum, lterx_padnum, ltetx_padnum, wlprio_padnum;
+ uint8 fsync_fnsel, lterx_fnsel, ltetx_fnsel, wlprio_fnsel;
+ uint8 fsync_gcigpio, lterx_gcigpio, ltetx_gcigpio, wlprio_gcigpio;
+
+ /* reset GCI block */
+ si_gci_reset(sih);
+
+ /* enable ERCX (pure gpio) mode, Keep SECI in Reset Mode Only */
+ /* Hopefully, keeping SECI in Reset Mode will draw lesser current */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((GCI_MODE_MASK << GCI_CCTL_SMODE_OFFSET)
+ |(1 << GCI_CCTL_SECIEN_OFFSET)
+ |(1 << GCI_CCTL_RSTSL_OFFSET)
+ |(1 << GCI_CCTL_SECIRST_OFFSET)),
+ ((GCI_MODE_GPIO << GCI_CCTL_SMODE_OFFSET)
+ |(0 << GCI_CCTL_SECIEN_OFFSET)
+ |(1 << GCI_CCTL_RSTSL_OFFSET)
+ |(1 << GCI_CCTL_SECIRST_OFFSET)));
+
+ /* Extract Interface Configuration */
+ fsync_padnum = LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_FSYNC_IDX);
+ lterx_padnum = LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_LTERX_IDX);
+ ltetx_padnum = LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_LTETX_IDX);
+ wlprio_padnum = LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_WLPRIO_IDX);
+
+ fsync_fnsel = LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_FSYNC_IDX);
+ lterx_fnsel = LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_LTERX_IDX);
+ ltetx_fnsel = LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_LTETX_IDX);
+ wlprio_fnsel = LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_WLPRIO_IDX);
+
+ fsync_gcigpio = LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_FSYNC_IDX);
+ lterx_gcigpio = LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_LTERX_IDX);
+ ltetx_gcigpio = LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_LTETX_IDX);
+ wlprio_gcigpio = LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_WLPRIO_IDX);
+
+ /* Clear this Function Select for all GPIOs if programmed by default */
+ si_gci_clear_functionsel(sih, fsync_fnsel);
+ si_gci_clear_functionsel(sih, lterx_fnsel);
+ si_gci_clear_functionsel(sih, ltetx_fnsel);
+ si_gci_clear_functionsel(sih, wlprio_fnsel);
+
+ /* Program Function select for selected GPIOs */
+ si_gci_set_functionsel(sih, fsync_padnum, fsync_fnsel);
+ si_gci_set_functionsel(sih, lterx_padnum, lterx_fnsel);
+ si_gci_set_functionsel(sih, ltetx_padnum, ltetx_fnsel);
+ si_gci_set_functionsel(sih, wlprio_padnum, wlprio_fnsel);
+
+ /* NOTE: We are keeping Input PADs in Pull Down Mode to take care of the case
+ * when LTE Modem doesn't drive these lines for any reason.
+ * We should consider alternate ways to identify this situation and dynamically
+ * enable Pull Down PAD only when LTE Modem doesn't drive these lines.
+ */
+
+ /* Configure Frame Sync as input */
+ si_config_gcigpio(sih, GCI_LTE_FRAMESYNC_POS, fsync_gcigpio, 0xFF,
+ ((1 << GCI_GPIOCTL_INEN_OFFSET)|(1 << GCI_GPIOCTL_PDN_OFFSET)));
+
+ /* Configure LTE Rx as input */
+ si_config_gcigpio(sih, GCI_LTE_RX_POS, lterx_gcigpio, 0xFF,
+ ((1 << GCI_GPIOCTL_INEN_OFFSET)|(1 << GCI_GPIOCTL_PDN_OFFSET)));
+
+ /* Configure LTE Tx as input */
+ si_config_gcigpio(sih, GCI_LTE_TX_POS, ltetx_gcigpio, 0xFF,
+ ((1 << GCI_GPIOCTL_INEN_OFFSET)|(1 << GCI_GPIOCTL_PDN_OFFSET)));
+
+ /* Configure WLAN Prio as output. BT Need to configure its ISM Prio separately
+ * NOTE: LTE chip has to enable its internal pull-down whenever WL goes down
+ */
+ si_config_gcigpio(sih, GCI_WLAN_PRIO_POS, wlprio_gcigpio, 0xFF,
+ (1 << GCI_GPIOCTL_OUTEN_OFFSET));
+
+ /* Enable inbandIntMask for FrmSync only, disable LTE_Rx and LTE_Tx
+ * Note: FrameSync, LTE Rx & LTE Tx happen to share the same REGIDX
+ * Hence a single Access is sufficient
+ */
+ si_gci_indirect(sih, GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
+ GCI_OFFSETOF(sih, gci_inbandeventintmask),
+ ((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
+ |(1 << GCI_BITOFFSET(GCI_LTE_RX_POS))
+ |(1 << GCI_BITOFFSET(GCI_LTE_TX_POS))),
+ ((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
+ |(0 << GCI_BITOFFSET(GCI_LTE_RX_POS))
+ |(0 << GCI_BITOFFSET(GCI_LTE_TX_POS))));
+
+ /* Enable Inband interrupt polarity for LTE_FRMSYNC */
+ si_gci_indirect(sih, GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
+ GCI_OFFSETOF(sih, gci_intpolreg),
+ (1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)),
+ (1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)));
+}
+
+void
+si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio, uint32 xtalfreq)
+{
+ /* BCMLTECOEXGCI_ENAB should be checked before calling si_wci2_init() */
+ uint8 baud = baudrate;
+ uint8 seciin, seciout, fnselin, fnselout, gcigpioin, gcigpioout;
+
+ /* Extract PAD GPIO number (1-byte) from "ltecx_padnum" for each LTECX pin */
+ seciin = LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_WCI2IN_IDX);
+ seciout = LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_WCI2OUT_IDX);
+ /* Extract FunctionSel (1-nibble) from "ltecx_fnsel" for each LTECX pin */
+ fnselin = LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_WCI2IN_IDX);
+ fnselout = LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_WCI2OUT_IDX);
+ /* Extract GCI-GPIO number (1-nibble) from "ltecx_gcigpio" for each LTECX pin */
+ gcigpioin = LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_WCI2IN_IDX);
+ gcigpioout = LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_WCI2OUT_IDX);
+
+ /* reset GCI block */
+ si_gci_reset(sih);
+
+ /* NOTE: Writing Reserved bits of older GCI Revs is OK */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((GCI_CCTL_SCS_MASK << GCI_CCTL_SCS_OFFSET)
+ |(GCI_CCTL_LOWTOUT_MASK << GCI_CCTL_SILOWTOUT_OFFSET)
+ |(1 << GCI_CCTL_BRKONSLP_OFFSET)
+ |(1 << GCI_CCTL_US_OFFSET)
+ |(GCI_MODE_MASK << GCI_CCTL_SMODE_OFFSET)
+ |(1 << GCI_CCTL_FSL_OFFSET)
+ |(1 << GCI_CCTL_SECIEN_OFFSET)),
+ ((GCI_CCTL_SCS_DEF << GCI_CCTL_SCS_OFFSET)
+ |(GCI_CCTL_LOWTOUT_30BIT << GCI_CCTL_SILOWTOUT_OFFSET)
+ |(0 << GCI_CCTL_BRKONSLP_OFFSET)
+ |(0 << GCI_CCTL_US_OFFSET)
+ |(GCI_MODE_BTSIG << GCI_CCTL_SMODE_OFFSET)
+ |(0 << GCI_CCTL_FSL_OFFSET)
+ |(1 << GCI_CCTL_SECIEN_OFFSET))); /* 19000024 */
+
+ /* Program Function select for selected GPIOs */
+ si_gci_set_functionsel(sih, seciin, fnselin);
+ si_gci_set_functionsel(sih, seciout, fnselout);
+
+ /* Enable inbandIntMask for FrmSync only; disable LTE_Rx and LTE_Tx
+ * Note: FrameSync, LTE Rx & LTE Tx happen to share the same REGIDX
+ * Hence a single Access is sufficient
+ */
+ si_gci_indirect(sih,
+ GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
+ GCI_OFFSETOF(sih, gci_inbandeventintmask),
+ ((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
+ |(1 << GCI_BITOFFSET(GCI_LTE_RX_POS))
+ |(1 << GCI_BITOFFSET(GCI_LTE_TX_POS))),
+ ((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
+ |(0 << GCI_BITOFFSET(GCI_LTE_RX_POS))
+ |(0 << GCI_BITOFFSET(GCI_LTE_TX_POS))));
+
+ if (GCIREV(sih->gcirev) >= 1) {
+ /* Program inband interrupt polarity as posedge for FrameSync */
+ si_gci_indirect(sih, GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
+ GCI_OFFSETOF(sih, gci_intpolreg),
+ (1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)),
+ (1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)));
+ }
+ if (GCIREV(sih->gcirev) >= 4) {
+ /* Program SECI_IN Control Register */
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_seciin_ctrl), ALLONES_32,
+ ((GCI_MODE_BTSIG << GCI_SECIIN_MODE_OFFSET)
+ |(gcigpioin << GCI_SECIIN_GCIGPIO_OFFSET)
+ |(GCI_LTE_IP_ID << GCI_SECIIN_RXID2IP_OFFSET)));
+
+ /* Program GPIO Control Register for SECI_IN GCI GPIO */
+ si_gci_indirect(sih, gcigpioin/4, GCI_OFFSETOF(sih, gci_gpioctl),
+ (0xFF << (gcigpioin%4)*8),
+ (((1 << GCI_GPIOCTL_INEN_OFFSET)
+ |(1 << GCI_GPIOCTL_PDN_OFFSET)) << (gcigpioin%4)*8));
+
+ /* Program SECI_OUT Control Register */
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_seciout_ctrl), ALLONES_32,
+ ((GCI_MODE_BTSIG << GCI_SECIOUT_MODE_OFFSET)
+ |(gcigpioout << GCI_SECIOUT_GCIGPIO_OFFSET)
+ |((1 << GCI_LTECX_SECI_ID) << GCI_SECIOUT_SECIINRELATED_OFFSET)));
+
+ /* Program GPIO Control Register for SECI_OUT GCI GPIO */
+ si_gci_indirect(sih, gcigpioout/4, GCI_OFFSETOF(sih, gci_gpioctl),
+ (0xFF << (gcigpioout%4)*8),
+ (((1 << GCI_GPIOCTL_OUTEN_OFFSET)) << (gcigpioout%4)*8));
+
+ /* Program SECI_IN Aux FIFO enable for LTECX SECI_IN Port */
+ if (GCIREV(sih->gcirev) >= 16) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_seciin_auxfifo_en),
+ (((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
+ |((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)),
+ (((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
+ |((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)));
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciin_auxfifo_en),
+ (((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
+ |((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)),
+ (((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
+ |((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)));
+ }
+ /* Program SECI_OUT Tx Enable for LTECX SECI_OUT Port */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciout_txen_txbr), ALLONES_32,
+ ((1 << GCI_LTECX_SECI_ID) << GCI_SECITX_ENABLE_OFFSET));
+ }
+ if (GCIREV(sih->gcirev) >= 5) {
+ /* enable WlPrio/TxOn override from D11 */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_miscctl),
+ (1 << GCI_LTECX_TXCONF_EN_OFFSET | 1 << GCI_LTECX_PRISEL_EN_OFFSET),
+ (1 << GCI_LTECX_TXCONF_EN_OFFSET | 1 << GCI_LTECX_PRISEL_EN_OFFSET));
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_miscctl),
+ (1 << GCI_LTECX_TXCONF_EN_OFFSET | 1 << GCI_LTECX_PRISEL_EN_OFFSET),
+ 0x0000);
+ }
+ /* baudrate: 1/2/3/4mbps, escseq:0xdb, high baudrate, enable seci_tx/rx */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secifcr), ALLONES_32, 0x00);
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secilcr),
+ ALLONES_32, 0x00);
+ } else if (GCIREV(sih->gcirev) >= 4) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secilcr), ALLONES_32, 0x00);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secilcr), ALLONES_32, 0x28);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartescval), ALLONES_32, 0xDB);
+
+ switch (baud) {
+ case 1:
+ /* baudrate:1mbps */
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xFE);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xFE);
+ }
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x80);
+ } else if (GCIREV(sih->gcirev) >= 4) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x80);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x81);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
+ ALLONES_32, 0x23);
+ break;
+
+ case 2:
+ /* baudrate:2mbps */
+ if (xtalfreq == XTAL_FREQ_26000KHZ) {
+ /* 43430 A0 uses 26 MHz crystal.
+ * Baudrate settings for crystel freq 26 MHz
+ */
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xFF);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xFF);
+ }
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secimcr), ALLONES_32, 0x80);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x80);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
+ ALLONES_32, 0x0);
+ }
+ else {
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xFF);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xFF);
+ }
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secimcr), ALLONES_32, 0x80);
+ } else if (GCIREV(sih->gcirev) >= 4) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x80);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x81);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
+ ALLONES_32, 0x11);
+ }
+ break;
+
+ case 4:
+ /* baudrate:4mbps */
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF7);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF7);
+ }
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x8);
+ } else if (GCIREV(sih->gcirev) >= 4) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x8);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x9);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
+ ALLONES_32, 0x0);
+ break;
+
+ case 25:
+ /* baudrate:2.5mbps */
+ if (xtalfreq == XTAL_FREQ_26000KHZ) {
+ /* 43430 A0 uses 26 MHz crystal.
+ * Baudrate settings for crystel freq 26 MHz
+ */
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF6);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF6);
+ }
+ } else if (xtalfreq == XTAL_FREQ_59970KHZ) {
+ /* 4387 uses 60M MHz crystal.
+ * Baudrate settings for crystel freq/2 29.9 MHz
+ * set bauddiv to 0xF4 to achieve 2.5M for Xtal/2 @ 29.9MHz
+ * bauddiv = 256-Integer Part of (GCI clk freq/baudrate)
+ */
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF4);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF4);
+ }
+ } else {
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF1);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF1);
+ }
+ }
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x8);
+ } else if (GCIREV(sih->gcirev) >= 4) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x8);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x9);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
+ ALLONES_32, 0x0);
+ break;
+
+ case 3:
+ default:
+ /* baudrate:3mbps */
+ if (xtalfreq == XTAL_FREQ_26000KHZ) {
+ /* 43430 A0 uses 26 MHz crystal.
+ * Baudrate settings for crystel freq 26 MHz
+ */
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF7);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF7);
+ }
+ } else {
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID,
+ GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF4);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
+ ALLONES_32, 0xF4);
+ }
+ }
+ if (GCIREV(sih->gcirev) >= 15) {
+ si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x8);
+ } else if (GCIREV(sih->gcirev) >= 4) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x8);
+ } else {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
+ ALLONES_32, 0x9);
+ }
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
+ ALLONES_32, 0x0);
+ break;
+ }
+ /* GCI Rev >= 1 */
+ if (GCIREV(sih->gcirev) >= 1) {
+ /* Route Rx-data through AUX register */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_rxfifo_common_ctrl),
+ GCI_RXFIFO_CTRL_AUX_EN, GCI_RXFIFO_CTRL_AUX_EN);
+#if !defined(WLTEST)
+ /* Route RX Type 2 data through RX FIFO */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_rxfifo_common_ctrl),
+ GCI_RXFIFO_CTRL_FIFO_TYPE2_EN, GCI_RXFIFO_CTRL_FIFO_TYPE2_EN);
+ /* Enable Inband interrupt for RX FIFO status */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_intmask),
+ (GCI_INTSTATUS_SRFNE | GCI_INTSTATUS_SRFOF),
+ (GCI_INTSTATUS_SRFNE | GCI_INTSTATUS_SRFOF));
+#endif /* !WLTEST */
+ } else {
+ /* GPIO 3-7 as BT_SIG complaint */
+ /* config GPIO pins 3-7 as input */
+ si_gci_indirect(sih, 0,
+ GCI_OFFSETOF(sih, gci_gpioctl), 0x20000000, 0x20000010);
+ si_gci_indirect(sih, 1,
+ GCI_OFFSETOF(sih, gci_gpioctl), 0x20202020, 0x20202020);
+ /* gpio mapping: frmsync-gpio7, mws_rx-gpio6, mws_tx-gpio5,
+ * pat[0]-gpio4, pat[1]-gpio3
+ */
+ si_gci_indirect(sih, 0x70010,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x00000001, 0x00000001);
+ si_gci_indirect(sih, 0x60010,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x00000002, 0x00000002);
+ si_gci_indirect(sih, 0x50010,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x00000004, 0x00000004);
+ si_gci_indirect(sih, 0x40010,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x02000000, 0x00000008);
+ si_gci_indirect(sih, 0x30010,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x04000000, 0x04000010);
+ /* gpio mapping: wlan_rx_prio-gpio5, wlan_tx_on-gpio4 */
+ si_gci_indirect(sih, 0x50000,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x00000010, 0x00000010);
+ si_gci_indirect(sih, 0x40000,
+ GCI_OFFSETOF(sih, gci_gpiomask), 0x00000020, 0x00000020);
+ /* enable gpio out on gpio4(wlanrxprio), gpio5(wlantxon) */
+ si_gci_direct(sih,
+ GCI_OFFSETOF(sih, gci_control_0), 0x00000030, 0x00000000);
+ }
+}
+#endif /* BCMLTECOEX */
+
+/* This function is used in AIBSS mode by BTCX to enable strobing to BT */
+bool
+si_btcx_wci2_init(si_t *sih)
+{
+ /* reset GCI block */
+ si_gci_reset(sih);
+
+ if (GCIREV(sih->gcirev) >= 1) {
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
+ ((GCI_CCTL_SCS_MASK << GCI_CCTL_SCS_OFFSET)
+ |(GCI_CCTL_LOWTOUT_MASK << GCI_CCTL_SILOWTOUT_OFFSET)
+ |(1 << GCI_CCTL_BRKONSLP_OFFSET)
+ |(1 << GCI_CCTL_US_OFFSET)
+ |(GCI_MODE_MASK << GCI_CCTL_SMODE_OFFSET)
+ |(1 << GCI_CCTL_FSL_OFFSET)
+ |(1 << GCI_CCTL_SECIEN_OFFSET)),
+ ((GCI_CCTL_SCS_DEF << GCI_CCTL_SCS_OFFSET)
+ |(GCI_CCTL_LOWTOUT_30BIT << GCI_CCTL_SILOWTOUT_OFFSET)
+ |(0 << GCI_CCTL_BRKONSLP_OFFSET)
+ |(0 << GCI_CCTL_US_OFFSET)
+ |(GCI_MODE_BTSIG << GCI_CCTL_SMODE_OFFSET)
+ |(0 << GCI_CCTL_FSL_OFFSET)
+ |(1 << GCI_CCTL_SECIEN_OFFSET))); /* 19000024 */
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void
+si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode)
+{
+#ifdef HNDGCI
+ hndgci_init(sih, osh, HND_GCI_PLAIN_UART_MODE,
+ GCI_UART_BR_115200);
+
+ /* specify rx callback */
+ hndgci_uart_config_rx_complete(-1, -1, 0, NULL, NULL);
+#else
+ BCM_REFERENCE(sih);
+ BCM_REFERENCE(osh);
+ BCM_REFERENCE(seci_mode);
+#endif /* HNDGCI */
+}
+
+/**
+ * A given GCI pin needs to be converted to a GCI FunctionSel register offset and the bit position
+ * in this register.
+ * @param[in] input pin number, see respective chip Toplevel Arch page, GCI chipstatus regs
+ * @param[out] regidx chipcontrol reg(ring_index base) and
+ * @param[out] pos bits to shift for pin first regbit
+ *
+ * eg: gpio9 will give regidx: 1 and pos 4
+ */
+static void
+BCMPOSTTRAPFN(si_gci_get_chipctrlreg_ringidx_base4)(uint32 pin, uint32 *regidx, uint32 *pos)
+{
+ *regidx = (pin / 8);
+ *pos = (pin % 8) * 4; // each pin occupies 4 FunctionSel register bits
+
+ SI_MSG(("si_gci_get_chipctrlreg_ringidx_base4:%d:%d:%d\n", pin, *regidx, *pos));
+}
+
+/* input: pin number
+* output: chipcontrol reg(ring_index base) and
+* bits to shift for pin first regbit.
+* eg: gpio9 will give regidx: 2 and pos 16
+*/
+static uint8
+BCMPOSTTRAPFN(si_gci_get_chipctrlreg_ringidx_base8)(uint32 pin, uint32 *regidx, uint32 *pos)
+{
+ *regidx = (pin / 4);
+ *pos = (pin % 4)*8;
+
+ SI_MSG(("si_gci_get_chipctrlreg_ringidx_base8:%d:%d:%d\n", pin, *regidx, *pos));
+
+ return 0;
+}
+
+/** setup a given pin for fnsel function */
+void
+BCMPOSTTRAPFN(si_gci_set_functionsel)(si_t *sih, uint32 pin, uint8 fnsel)
+{
+ uint32 reg = 0, pos = 0;
+
+ SI_MSG(("si_gci_set_functionsel:%d\n", pin));
+
+ si_gci_get_chipctrlreg_ringidx_base4(pin, &reg, &pos);
+ si_gci_chipcontrol(sih, reg, GCIMASK_4B(pos), GCIPOSVAL_4B(fnsel, pos));
+}
+
+/* Returns a given pin's fnsel value */
+uint32
+si_gci_get_functionsel(si_t *sih, uint32 pin)
+{
+ uint32 reg = 0, pos = 0, temp;
+
+ SI_MSG(("si_gci_get_functionsel: %d\n", pin));
+
+ si_gci_get_chipctrlreg_ringidx_base4(pin, &reg, &pos);
+ temp = si_gci_chipstatus(sih, reg);
+ return GCIGETNBL(temp, pos);
+}
+
+/* Sets fnsel value to IND for all the GPIO pads that have fnsel set to given argument */
+void
+si_gci_clear_functionsel(si_t *sih, uint8 fnsel)
+{
+ uint32 i;
+ SI_MSG(("si_gci_clear_functionsel: %d\n", fnsel));
+ for (i = 0; i <= CC_PIN_GPIO_LAST; i++) {
+ if (si_gci_get_functionsel(sih, i) == fnsel)
+ si_gci_set_functionsel(sih, i, CC_FNSEL_IND);
+ }
+}
+
+/** write 'val' to the gci chip control register indexed by 'reg' */
+uint32
+BCMPOSTTRAPFN(si_gci_chipcontrol)(si_t *sih, uint reg, uint32 mask, uint32 val)
+{
+ /* because NFLASH and GCI clashes in 0xC00 */
+ if ((CCREV(sih->ccrev) == 38) && ((sih->chipst & (1 << 4)) != 0)) {
+ /* CC NFLASH exist, prohibit to manipulate gci register */
+ ASSERT(0);
+ return ALLONES_32;
+ }
+
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, reg);
+ return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_chipctrl), mask, val);
+}
+
+/* Read the gci chip status register indexed by 'reg' */
+uint32
+BCMPOSTTRAPFN(si_gci_chipstatus)(si_t *sih, uint reg)
+{
+ /* because NFLASH and GCI clashes in 0xC00 */
+ if ((CCREV(sih->ccrev) == 38) && ((sih->chipst & (1 << 4)) != 0)) {
+ /* CC NFLASH exist, prohibit to manipulate gci register */
+ ASSERT(0);
+ return ALLONES_32;
+ }
+
+ si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, reg);
+ /* setting mask and value to '0' to use si_corereg for read only purpose */
+ return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_chipsts), 0, 0);
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+uint16
+BCMINITFN(si_chipid)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ return (sii->chipnew) ? sii->chipnew : sih->chip;
+}
+
+/* CHIP_ID's being mapped here should not be used anywhere else in the code */
+static void
+BCMATTACHFN(si_chipid_fixup)(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ ASSERT(sii->chipnew == 0);
+ switch (sih->chip) {
+ case BCM4377_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM4369_CHIP_ID; /* chip class */
+ break;
+ case BCM4375_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM4375_CHIP_ID; /* chip class */
+ break;
+ case BCM4362_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM4362_CHIP_ID; /* chip class */
+ break;
+ case BCM4356_CHIP_ID:
+ case BCM4371_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef AXI_TIMEOUTS_NIC
+uint32
+BCMPOSTTRAPFN(si_clear_backplane_to_fast)(void *sih, void *addr)
+{
+ si_t *_sih = DISCARD_QUAL(sih, si_t);
+
+ if (CHIPTYPE(_sih->socitype) == SOCI_AI) {
+ return ai_clear_backplane_to_fast(_sih, addr);
+ }
+
+ return 0;
+}
+
+const si_axi_error_info_t *
+si_get_axi_errlog_info(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ return (const si_axi_error_info_t *)sih->err_info;
+ }
+
+ return NULL;
+}
+
+void
+si_reset_axi_errlog_info(const si_t *sih)
+{
+ if (sih->err_info) {
+ sih->err_info->count = 0;
+ }
+}
+#endif /* AXI_TIMEOUTS_NIC */
+
+/* TODO: Can we allocate only one instance? */
+static int32
+BCMATTACHFN(si_alloc_wrapper)(si_info_t *sii)
+{
+ if (sii->osh) {
+ sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh,
+ (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
+
+ if (sii->axi_wrapper == NULL) {
+ return BCME_NOMEM;
+ }
+ } else {
+ sii->axi_wrapper = NULL;
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+static void
+BCMATTACHFN(si_free_wrapper)(si_info_t *sii)
+{
+ if (sii->axi_wrapper) {
+
+ MFREE(sii->osh, sii->axi_wrapper, (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
+ }
+}
+
+static void *
+BCMATTACHFN(si_alloc_coresinfo)(si_info_t *sii, osl_t *osh, chipcregs_t *cc)
+{
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+ sii->nci_info = nci_init(&sii->pub, (void*)(uintptr)cc, sii->pub.bustype);
+
+ return sii->nci_info;
+
+ } else {
+
+#ifdef _RTE_
+ sii->cores_info = (si_cores_info_t *)&ksii_cores_info;
+#else
+ if (sii->cores_info == NULL) {
+ /* alloc si_cores_info_t */
+ if ((sii->cores_info = (si_cores_info_t *)MALLOCZ(osh,
+ sizeof(si_cores_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed for cores_info! malloced"
+ " %d bytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+ } else {
+ ASSERT(sii->cores_info == &ksii_cores_info);
+
+ }
+#endif /* _RTE_ */
+ return sii->cores_info;
+ }
+
+}
+
+static void
+BCMATTACHFN(si_free_coresinfo)(si_info_t *sii, osl_t *osh)
+{
+
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+ if (sii->nci_info) {
+ nci_uninit(sii->nci_info);
+ sii->nci_info = NULL;
+ }
+ } else {
+ if (sii->cores_info && (sii->cores_info != &ksii_cores_info)) {
+ MFREE(osh, sii->cores_info, sizeof(si_cores_info_t));
+ }
+ }
+}
+
+/**
+ * Allocate an si handle. This function may be called multiple times. This function is called by
+ * both si_attach() and si_kattach().
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ * function set 'vars' to NULL.
+ */
+static si_info_t *
+BCMATTACHFN(si_doattach)(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+#if defined(NVSRCX)
+ char *sromvars;
+#endif
+ uint err_at = 0;
+
+ ASSERT(GOODREGS(regs));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+ sii->device_removed = FALSE;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+ sii->second_bar0win = ~0x0;
+ sih->enum_base = si_enum_base(devid);
+
+#if defined(AXI_TIMEOUTS_NIC)
+ sih->err_info = MALLOCZ(osh, sizeof(si_axi_error_info_t));
+ if (sih->err_info == NULL) {
+ SI_ERROR(("si_doattach: %zu bytes MALLOC FAILED",
+ sizeof(si_axi_error_info_t)));
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+#if defined(AXI_TIMEOUTS_NIC) && defined(__linux__)
+ osl_set_bpt_cb(osh, (void *)si_clear_backplane_to_fast, (void *)sih);
+#endif /* AXI_TIMEOUTS_NIC && linux */
+
+ /* check to see if we are a si core mimic'ing a pci core */
+ if ((bustype == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
+ SI_ERROR(("si_doattach: incoming bus is PCI but it's a lie, switching to SI "
+ "devid:0x%x\n", devid));
+ bustype = SI_BUS;
+ }
+
+ /* find Chipcommon address */
+ if (bustype == PCI_BUS) {
+ savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ /* PR 29857: init to core0 if bar0window is not programmed properly */
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE(sih)))
+ savewin = SI_ENUM_BASE(sih);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE(sih));
+ if (!regs) {
+ err_at = 1;
+ goto exit;
+ }
+ cc = (chipcregs_t *)regs;
+#ifdef BCMSDIO
+ } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+ cc = (chipcregs_t *)sii->curmap;
+#endif
+ } else {
+ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE(sih), SI_CORE_SIZE);
+ }
+
+ sih->bustype = (uint16)bustype;
+#ifdef BCMBUSTYPE
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+ bustype, BUSTYPE(bustype)));
+ err_at = 2;
+ goto exit;
+ }
+#endif
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+ err_at = 3;
+ goto exit;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ if (!cc) {
+ err_at = 3;
+ goto exit;
+ }
+ w = R_REG(osh, &cc->chipid);
+#if defined(BCMDONGLEHOST)
+ /* plz refer to RB:13157 */
+ if ((w & 0xfffff) == 148277) w -= 65532;
+#endif /* defined(BCMDONGLEHOST) */
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+#if defined(BCMSDIO) && (defined(HW_OOB) || defined(FORCE_WOWLAN))
+ dhd_conf_set_hw_oob_intr(sdh, sih);
+#endif
+
+ si_chipid_fixup(sih);
+
+ sih->issim = IS_SIM(sih->chippkg);
+
+ if (MULTIBP_CAP(sih)) {
+ sih->_multibp_enable = TRUE;
+ }
+
+ /* scan for cores */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+
+ if (si_alloc_coresinfo(sii, osh, cc) == NULL) {
+ err_at = 4;
+ goto exit;
+ }
+ ASSERT(sii->nci_info);
+
+ if (!FWSIGN_ENAB()) {
+ if ((si_alloc_wrapper(sii)) != BCME_OK) {
+ err_at = 5;
+ goto exit;
+ }
+ }
+
+ if ((sii->numcores = nci_scan(sih)) == 0u) {
+ err_at = 6;
+ goto exit;
+ } else {
+ if (!FWSIGN_ENAB()) {
+ nci_dump_erom(sii->nci_info);
+ }
+ }
+ } else {
+
+ if (si_alloc_coresinfo(sii, osh, cc) == NULL) {
+ err_at = 7;
+ goto exit;
+ }
+
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+ SI_MSG(("Found chip type SB (0x%08x)\n", w));
+ sb_scan(&sii->pub, regs, devid);
+ } else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
+ (CHIPTYPE(sii->pub.socitype) == SOCI_NAI) ||
+ (CHIPTYPE(sii->pub.socitype) == SOCI_DVTBUS)) {
+
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ else if (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)
+ SI_MSG(("Found chip type NAI (0x%08x)\n", w));
+ else
+ SI_MSG(("Found chip type DVT (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ if ((si_alloc_wrapper(sii)) != BCME_OK) {
+ err_at = 8;
+ goto exit;
+ }
+ ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ /* make sure the wrappers are properly accounted for */
+ if (sii->axi_num_wrappers == 0) {
+ SI_ERROR(("FATAL: Wrapper count 0\n"));
+ err_at = 16;
+ goto exit;
+ }
+ }
+ else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+ SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+ /* pass chipc address instead of original core base */
+ ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else {
+ SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+ err_at = 9;
+ goto exit;
+ }
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ err_at = 10;
+ goto exit;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ err_at = 11;
+ goto exit;
+ }
+
+ /* JIRA: SWWLAN-98321: SPROM read showing wrong values */
+ /* Set the clkdiv2 divisor bits (2:0) to 0x4 if srom is present */
+ if (bustype == SI_BUS) {
+ uint32 clkdiv2, sromprsnt, capabilities, srom_supported;
+ capabilities = R_REG(osh, &cc->capabilities);
+ srom_supported = capabilities & SROM_SUPPORTED;
+ if (srom_supported) {
+ sromprsnt = R_REG(osh, &cc->sromcontrol);
+ sromprsnt = sromprsnt & SROM_PRSNT_MASK;
+ if (sromprsnt) {
+ /* SROM clock come from backplane clock/div2. Must <= 1Mhz */
+ clkdiv2 = (R_REG(osh, &cc->clkdiv2) & ~CLKD2_SROM);
+ clkdiv2 |= CLKD2_SROMDIV_192;
+ W_REG(osh, &cc->clkdiv2, clkdiv2);
+ }
+ }
+ }
+
+ if (bustype == PCI_BUS) {
+#if !defined(BCMDONGLEHOST)
+ /* JIRA:SWWLAN-18243: SPROM access taking too long */
+ /* not required for 43602 */
+ if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
+ (CHIPREV(sih->chiprev) <= 2)) {
+ pcie_disable_TL_clk_gating(sii->pch);
+ pcie_set_L1_entry_time(sii->pch, 0x40);
+ }
+#endif /* BCMDONGLEHOST */
+
+ }
+#ifdef BCM_SDRBL
+ /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
+ * not turned on, then we want to hold arm in reset.
+ * Bottomline: In sdrenable case, we allow arm to boot only when protection is
+ * turned on.
+ */
+ if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
+ uint32 sflags = si_arm_sflags(&(sii->pub));
+
+ /* If SDR is enabled but protection is not turned on
+ * then we want to force arm to WFI.
+ */
+ if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
+ disable_arm_irq();
+ while (1) {
+ hnd_cpu_wait(sih);
+ }
+ }
+ }
+#endif /* BCM_SDRBL */
+#ifdef SI_SPROM_PROBE
+ si_sprom_init(sih);
+#endif /* SI_SPROM_PROBE */
+
+#if !defined(BCMDONGLEHOST)
+ if (!FWSIGN_ENAB()) {
+ /* Init nvram from flash if it exists */
+ if (nvram_init(&(sii->pub)) != BCME_OK) {
+ SI_ERROR(("si_doattach: nvram_init failed \n"));
+ goto exit;
+ }
+ }
+
+ /* Init nvram from sprom/otp if they exist */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+
+#ifdef DONGLEBUILD
+#if !defined(NVSRCX)
+ /* Init nvram from sprom/otp if they exist and not inited */
+ if (!FWSIGN_ENAB() && si_getkvars()) {
+ *vars = si_getkvars();
+ *varsz = si_getkvarsz();
+ }
+ else
+#endif
+#endif /* DONGLEBUILD */
+ {
+#if defined(NVSRCX)
+ sromvars = srom_get_sromvars();
+ if (sromvars == NULL) {
+ if (srom_var_init(&sii->pub, BUSTYPE(bustype), (void *)regs,
+ sii->osh, &sromvars, varsz)) {
+ err_at = 12;
+ goto exit;
+ }
+ }
+#else
+ if (!FWSIGN_ENAB()) {
+ if (srom_var_init(&sii->pub, BUSTYPE(bustype), (void *)regs,
+ sii->osh, vars, varsz)) {
+ err_at = 13;
+ goto exit;
+ }
+ }
+#endif /* NVSRCX */
+ }
+ GCC_DIAGNOSTIC_POP();
+
+ pvars = vars ? *vars : NULL;
+
+ si_nvram_process(sii, pvars);
+
+ /* xtalfreq is required for programming open loop calibration support changes */
+ sii->xtalfreq = getintvar(NULL, rstr_xtalfreq);
+ /* === NVRAM, clock is ready === */
+#else
+ pvars = NULL;
+ BCM_REFERENCE(pvars);
+#endif /* !BCMDONGLEHOST */
+
+#if !defined(BCMDONGLEHOST)
+#if defined(BCMSRTOPOFF) && !defined(BCMSRTOPOFF_DISABLED)
+ _srtopoff_enab = (bool)getintvar(NULL, rstr_srtopoff_enab);
+#endif
+
+ if (!FWSIGN_ENAB()) {
+ if (HIB_EXT_WAKEUP_CAP(sih)) {
+ sii->lhl_ps_mode = (uint8)getintvar(NULL, rstr_lhl_ps_mode);
+
+ if (getintvar(NULL, rstr_ext_wakeup_dis)) {
+ sii->hib_ext_wakeup_enab = FALSE;
+ } else if (BCMSRTOPOFF_ENAB()) {
+ /* Has GPIO false wakeup issue on 4387, needs resolve */
+ sii->hib_ext_wakeup_enab = TRUE;
+ } else if (LHL_IS_PSMODE_1(sih)) {
+ sii->hib_ext_wakeup_enab = TRUE;
+ } else {
+ sii->hib_ext_wakeup_enab = FALSE;
+ }
+ }
+
+ sii->rfldo3p3_war = (bool)getintvar(NULL, rstr_rfldo3p3_cap_war);
+ }
+#endif /* !defined(BCMDONGLEHOST) */
+
+ if (!si_onetimeinit) {
+#if !defined(BCMDONGLEHOST)
+ char *val;
+
+ (void) val;
+ if (!FWSIGN_ENAB()) {
+ /* Cache nvram override to min mask */
+ if ((val = getvar(NULL, rstr_rmin)) != NULL) {
+ sii->min_mask_valid = TRUE;
+ sii->nvram_min_mask = (uint32)bcm_strtoul(val, NULL, 0);
+ } else {
+ sii->min_mask_valid = FALSE;
+ }
+ /* Cache nvram override to max mask */
+ if ((val = getvar(NULL, rstr_rmax)) != NULL) {
+ sii->max_mask_valid = TRUE;
+ sii->nvram_max_mask = (uint32)bcm_strtoul(val, NULL, 0);
+ } else {
+ sii->max_mask_valid = FALSE;
+ }
+
+#ifdef DONGLEBUILD
+ /* Handle armclk frequency setting from NVRAM file */
+ if (BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip) ||
+ BCM4389_CHIP(sih->chip) ||
+ BCM4388_CHIP(sih->chip) || BCM4397_CHIP(sih->chip) || FALSE) {
+ if ((val = getvar(NULL, rstr_armclk)) != NULL) {
+ sii->armpllclkfreq = (uint32)bcm_strtoul(val, NULL, 0);
+ ASSERT(sii->armpllclkfreq > 0);
+ } else {
+ sii->armpllclkfreq = 0;
+ }
+ }
+
+#endif /* DONGLEBUILD */
+ }
+
+#endif /* !BCMDONGLEHOST */
+
+#if defined(CONFIG_XIP) && defined(BCMTCAM)
+ /* patch the ROM if there are any patch pairs from OTP/SPROM */
+ if (patch_pair) {
+
+#if defined(__ARM_ARCH_7R__)
+ hnd_tcam_bootloader_load(si_setcore(sih, ARMCR4_CORE_ID, 0), pvars);
+#elif defined(__ARM_ARCH_7A__)
+ hnd_tcam_bootloader_load(si_setcore(sih, SYSMEM_CORE_ID, 0), pvars);
+#else
+ hnd_tcam_bootloader_load(si_setcore(sih, SOCRAM_CORE_ID, 0), pvars);
+#endif
+ si_setcoreidx(sih, origidx);
+ }
+#endif /* CONFIG_XIP && BCMTCAM */
+
+ if (CCREV(sii->pub.ccrev) >= 20) {
+ uint32 gpiopullup = 0, gpiopulldown = 0;
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+#if !defined(BCMDONGLEHOST) /* if not a DHD build */
+ if (getvar(pvars, rstr_gpiopulldown) != NULL) {
+ uint32 value;
+ value = getintvar(pvars, rstr_gpiopulldown);
+ if (value != 0xFFFFFFFF) { /* non populated SROM fields are ffff */
+ gpiopulldown |= value;
+ }
+ }
+#endif /* !BCMDONGLEHOST */
+
+ W_REG(osh, &cc->gpiopullup, gpiopullup);
+ W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+ si_setcoreidx(sih, origidx);
+ }
+
+#ifdef DONGLEBUILD
+ /* Ensure gci is initialized before PMU as PLL init needs to aquire gci semaphore */
+ hnd_gci_init(sih);
+#endif /* DONGLEBUILD */
+
+#if defined(BT_WLAN_REG_ON_WAR)
+ /*
+ * 4389B0/C0 - WLAN and BT turn on WAR - synchronize WLAN and BT firmware using GCI
+ * semaphore - THREAD_0_GCI_SEM_3_ID to ensure that simultaneous register accesses
+ * does not occur. The WLAN firmware will acquire the semaphore just to ensure that
+ * if BT firmware is already executing the WAR, then wait until it finishes.
+ * In BT firmware checking for WL_REG_ON status is sufficient to decide whether
+ * to apply the WAR or not (i.e, WLAN is turned ON/OFF).
+ */
+ if ((hnd_gcisem_acquire(GCI_BT_WLAN_REG_ON_WAR_SEM, TRUE,
+ GCI_BT_WLAN_REG_ON_WAR_SEM_TIMEOUT) != BCME_OK)) {
+ err_at = 14;
+ hnd_gcisem_set_err(GCI_BT_WLAN_REG_ON_WAR_SEM);
+ goto exit;
+ }
+
+ /* WLAN/BT turn On WAR - Remove wlsc_btsc_prisel override after semaphore acquire
+ * BT sets the override at power up when WL_REG_ON is low - wlsc_btsc_prisel is in
+ * undefined state when wlan_reg_on is low
+ */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_23,
+ (CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_MASK |
+ CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_MASK), 0u);
+
+ if ((hnd_gcisem_release(GCI_BT_WLAN_REG_ON_WAR_SEM) != BCME_OK)) {
+ hnd_gcisem_set_err(GCI_BT_WLAN_REG_ON_WAR_SEM);
+ err_at = 15;
+ goto exit;
+ }
+#endif /* BT_WLAN_REG_ON_WAR */
+
+ /* Skip PMU initialization from the Dongle Host.
+ * Firmware will take care of it when it comes up.
+ */
+#if !defined(BCMDONGLEHOST)
+ /* PMU specific initializations */
+ if (PMUCTL_ENAB(sih)) {
+ uint32 xtalfreq;
+ si_pmu_init(sih, sii->osh);
+ si_pmu_chip_init(sih, sii->osh);
+ xtalfreq = getintvar(pvars, rstr_xtalfreq);
+#if defined(WL_FWSIGN)
+ if (FWSIGN_ENAB()) {
+ xtalfreq = XTALFREQ_KHZ;
+ }
+#endif /* WL_FWSIGN */
+
+ /*
+ * workaround for chips that don't support external LPO, thus ALP clock
+ * can not be measured accurately:
+ */
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ xtalfreq = 40000;
+ break;
+ case BCM4369_CHIP_GRPID:
+ if (xtalfreq == 0)
+ xtalfreq = 37400;
+ break;
+ default:
+ break;
+ }
+
+ /* If xtalfreq var not available, try to measure it */
+ if (xtalfreq == 0)
+ xtalfreq = si_pmu_measure_alpclk(sih, sii->osh);
+
+ sii->xtalfreq = xtalfreq;
+ si_pmu_pll_init(sih, sii->osh, xtalfreq);
+
+ if (!FWSIGN_ENAB()) {
+ /* configure default spurmode */
+ sii->spurmode = getintvar(pvars, rstr_spurconfig) & 0xf;
+
+#if defined(SAVERESTORE)
+ /* Only needs to be done once.
+ * Needs this before si_pmu_res_init() to use sr_isenab()
+ */
+ if (SR_ENAB()) {
+ sr_save_restore_init(sih);
+ }
+#endif
+
+ /* TODO: should move the per core srpwr out of
+ * si_doattach() to a function where it knows
+ * which core it should enable the power domain
+ * request for...
+ */
+ if (SRPWR_CAP(sih) && !SRPWR_ENAB()) {
+ uint32 domain = SRPWR_DMN3_MACMAIN_MASK;
+
+#if defined(WLRSDB) && !defined(WLRSDB_DISABLED)
+ domain |= SRPWR_DMN2_MACAUX_MASK;
+#endif /* WLRSDB && !WLRSDB_DISABLED */
+
+ if (si_scan_core_present(sih)) {
+ domain |= SRPWR_DMN4_MACSCAN_MASK;
+ }
+
+ si_srpwr_request(sih, domain, domain);
+ }
+ }
+
+ si_pmu_res_init(sih, sii->osh);
+ si_pmu_swreg_init(sih, sii->osh);
+#ifdef BCMGCISHM
+ hnd_gcishm_init(sih);
+#endif
+ }
+#endif /* !defined(BCMDONGLEHOST) */
+#ifdef _RTE_
+ si_onetimeinit = TRUE;
+#endif
+ }
+
+#if !defined(BCMDONGLEHOST)
+
+ si_lowpwr_opt(sih);
+
+ if (!FWSIGN_ENAB()) {
+ if (PCIE(sii)) {
+ ASSERT(sii->pch != NULL);
+ pcicore_attach(sii->pch, pvars, SI_DOATTACH);
+ }
+ }
+
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
+ (CCREV(sih->ccrev) >= 62)) {
+ /* Clear SFlash clock request */
+ CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ, 0);
+ }
+
+#ifdef SECI_UART
+ /* Enable pull up on fast_uart_rx and fast_uart_cts_in
+ * when fast uart is disabled.
+ */
+ if (getvar(pvars, rstr_fuart_pup_rx_cts) != NULL) {
+ w = getintvar(pvars, rstr_fuart_pup_rx_cts);
+ if (w)
+ fuart_pullup_rx_cts_enab = TRUE;
+ }
+#endif
+
+ /* configure default pinmux enables for the chip */
+ if (getvar(pvars, rstr_muxenab) != NULL) {
+ w = getintvar(pvars, rstr_muxenab);
+ si_muxenab((si_t *)sii, w);
+ }
+
+ /* configure default swd enables for the chip */
+ if (getvar(pvars, rstr_swdenab) != NULL) {
+ w = getintvar(pvars, rstr_swdenab);
+ si_swdenable((si_t *)sii, w);
+ }
+
+ sii->device_wake_opt = CC_GCI_GPIO_INVALID;
+#endif /* !BCMDONGLEHOST */
+ /* clear any previous epidiag-induced target abort */
+ ASSERT(!si_taclear(sih, FALSE));
+
+#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
+ si_pmustatstimer_init(sih);
+#endif /* BCMPMU_STATS */
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+ /* Enable console prints */
+ si_muxenab(sii, 3);
+#endif
+
+ if (((PCIECOREREV(sih->buscorerev) == 66) || (PCIECOREREV(sih->buscorerev) == 68)) &&
+ CST4378_CHIPMODE_BTOP(sih->chipst)) {
+ /*
+ * HW4378-413 :
+ * BT oob connections for pcie function 1 seen at oob_ain[5] instead of oob_ain[1]
+ */
+ si_oob_war_BT_F1(sih);
+ }
+
+ return (sii);
+
+exit:
+#if !defined(BCMDONGLEHOST)
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ if (sii->pch)
+ pcicore_deinit(sii->pch);
+ sii->pch = NULL;
+ }
+#endif /* !defined(BCMDONGLEHOST) */
+
+ if (err_at) {
+ SI_ERROR(("si_doattach Failed. Error at %d\n", err_at));
+ si_free_coresinfo(sii, osh);
+ si_free_wrapper(sii);
+ }
+ return NULL;
+}
+
+/** may be called with core in reset */
+void
+BCMATTACHFN(si_detach)(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint idx;
+
+#if !defined(BCMDONGLEHOST)
+ struct si_pub *si_local = NULL;
+ bcopy(&sih, &si_local, sizeof(si_t*));
+#endif /* !BCMDONGLEHOST */
+
+#ifdef BCM_SH_SFLASH
+ if (BCM_SH_SFLASH_ENAB()) {
+ sh_sflash_detach(sii->osh, sih);
+ }
+#endif
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+ if (sii->nci_info) {
+ nci_uninit(sii->nci_info);
+ sii->nci_info = NULL;
+
+ /* TODO: REG_UNMAP */
+ }
+ } else {
+ for (idx = 0; idx < SI_MAXCORES; idx++) {
+ if (cores_info->regs[idx]) {
+ REG_UNMAP(cores_info->regs[idx]);
+ cores_info->regs[idx] = NULL;
+ }
+ }
+ }
+ }
+
+#if !defined(BCMDONGLEHOST)
+ srom_var_deinit(si_local);
+ nvram_exit(si_local); /* free up nvram buffers */
+#endif /* !BCMDONGLEHOST */
+
+#if !defined(BCMDONGLEHOST)
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ if (sii->pch)
+ pcicore_deinit(sii->pch);
+ sii->pch = NULL;
+ }
+#endif /* !defined(BCMDONGLEHOST) */
+
+ si_free_coresinfo(sii, sii->osh);
+
+#if defined(AXI_TIMEOUTS_NIC)
+ if (sih->err_info) {
+ MFREE(sii->osh, sih->err_info, sizeof(si_axi_error_info_t));
+ sii->pub.err_info = NULL;
+ }
+#endif /* AXI_TIMEOUTS_NIC */
+
+ si_free_wrapper(sii);
+
+#ifdef BCMDVFS
+ if (BCMDVFS_ENAB()) {
+ si_dvfs_info_deinit(sih, sii->osh);
+ }
+#endif /* BCMDVFS */
+
+ if (sii != &ksii) {
+ MFREE(sii->osh, sii, sizeof(si_info_t));
+ }
+}
+
+void *
+BCMPOSTTRAPFN(si_osh)(si_t *sih)
+{
+ const si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sii->osh != NULL) {
+ SI_ERROR(("osh is already set....\n"));
+ ASSERT(!sii->osh);
+ }
+ sii->osh = osh;
+}
+
+/** register driver interrupt disabling and restoring callback functions */
+void
+BCMATTACHFN(si_register_intr_callback)(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ si_info_t *sii = SI_INFO(sih);
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+ sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+ sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = si_coreid(sih);
+}
+
+void
+BCMPOSTTRAPFN(si_deregister_intr_callback)(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intrsoff_fn = NULL;
+ sii->intrsrestore_fn = NULL;
+ sii->intrsenabled_fn = NULL;
+}
+
+uint
+BCMPOSTTRAPFN(si_intflag)(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_intflag(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return R_REG(sii->osh, ((uint32 *)(uintptr)
+ (sii->oob_router + OOB_STATUSA)));
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_intflag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_flag(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_flag(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_flag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_flag_alt(const si_t *sih)
+{
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_flag_alt(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_flag_alt(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+BCMATTACHFN(si_setint)(const si_t *sih, int siflag)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_setint(sih, siflag);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_setint(sih, siflag);
+ else
+ ASSERT(0);
+}
+
+uint32
+si_oobr_baseaddr(const si_t *sih, bool second)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return 0;
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return (second ? sii->oob_router1 : sii->oob_router);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_oobr_baseaddr(sih, second);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+BCMPOSTTRAPFN(si_coreid)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+ return nci_coreid(sih, sii->curidx);
+ } else
+ {
+ return cores_info->coreid[sii->curidx];
+ }
+}
+
+uint
+BCMPOSTTRAPFN(si_coreidx)(const si_t *sih)
+{
+ const si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->curidx;
+}
+
+uint
+si_get_num_cores(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->numcores;
+}
+
+volatile void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit)
+{
+ return si_setcore(sih, D11_CORE_ID, coreunit);
+}
+
+/** return the core-type instantiation # of the current core */
+uint
+si_coreunit(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
+ return nci_coreunit(sih);
+ }
+
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = si_coreid(sih);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++)
+ if (cores_info->coreid[i] == coreid)
+ coreunit++;
+
+ return (coreunit);
+}
+
+uint
+BCMATTACHFN(si_corevendor)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corevendor(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_corevendor(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+BCMINITFN(si_backplane64)(const si_t *sih)
+{
+ return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+BCMPOSTTRAPFN(si_corerev)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corerev(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_corerev(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_corerev_minor(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ return ai_corerev_minor(sih);
+ }
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_corerev_minor(sih);
+ else {
+ return 0;
+ }
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+BCMPOSTTRAPFN(si_findcoreidx)(const si_t *sih, uint coreid, uint coreunit)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint found;
+ uint i;
+
+ if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ return nci_findcoreidx(sih, coreid, coreunit);
+ }
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == coreid) {
+ if (found == coreunit)
+ return (i);
+ found++;
+ }
+ }
+
+ return (BADIDX);
+}
+
+bool
+BCMPOSTTRAPFN(si_hwa_present)(const si_t *sih)
+{
+ if (si_findcoreidx(sih, HWA_CORE_ID, 0) != BADIDX) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+bool
+BCMPOSTTRAPFN(si_sysmem_present)(const si_t *sih)
+{
+ if (si_findcoreidx(sih, SYSMEM_CORE_ID, 0) != BADIDX) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/* return the coreid of the core at index */
+uint
+si_findcoreid(const si_t *sih, uint coreidx)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = sii->cores_info;
+
+ if (coreidx >= sii->numcores) {
+ return NODEV_CORE_ID;
+ }
+ if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ return nci_coreid(sih, coreidx);
+ }
+ return cores_info->coreid[coreidx];
+}
+
+/** return total coreunit of coreid or zero if not found */
+uint
+BCMPOSTTRAPFN(si_numcoreunits)(const si_t *sih, uint coreid)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint found = 0;
+ uint i;
+
+ if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ return nci_numcoreunits(sih, coreid);
+ }
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == coreid) {
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/** return total D11 coreunits */
+uint
+BCMPOSTTRAPRAMFN(si_numd11coreunits)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ return nci_numcoreunits(sih, D11_CORE_ID);
+ }
+ return si_numcoreunits(sih, D11_CORE_ID);
+}
+
+/** return list of found cores */
+uint
+si_corelist(const si_t *sih, uint coreid[])
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+
+ if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ return nci_corelist(sih, coreid);
+ }
+ (void)memcpy_s(coreid, (sii->numcores * sizeof(uint)), cores_info->coreid,
+ (sii->numcores * sizeof(uint)));
+ return (sii->numcores);
+}
+
+/** return current wrapper mapping */
+void *
+BCMPOSTTRAPFN(si_wrapperregs)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curwrap));
+
+ return (sii->curwrap);
+}
+
+/** return current register mapping */
+volatile void *
+BCMPOSTTRAPFN(si_coreregs)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curmap));
+
+ return (sii->curmap);
+}
+
+/**
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+volatile void *
+BCMPOSTTRAPFN(si_setcore)(si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint idx;
+
+ idx = si_findcoreidx(sih, coreid, coreunit);
+ if (!GOODIDX(idx, sii->numcores)) {
+ return (NULL);
+ }
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, idx);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_setcoreidx(sih, idx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+volatile void *
+BCMPOSTTRAPFN(si_setcoreidx)(si_t *sih, uint coreidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, coreidx);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_setcoreidx(sih, coreidx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+/** Turn off interrupt as required by sb_setcore, before switch core */
+volatile void *
+BCMPOSTTRAPFN(si_switch_core)(si_t *sih, uint coreid, uint *origidx, bcm_int_bitmask_t *intr_val)
+{
+ volatile void *cc;
+ si_info_t *sii = SI_INFO(sih);
+
+ if (SI_FAST(sii)) {
+ /* Overloading the origidx variable to remember the coreid,
+ * this works because the core ids cannot be confused with
+ * core indices.
+ */
+ *origidx = coreid;
+ if (coreid == CC_CORE_ID)
+ return (volatile void *)CCREGS_FAST(sii);
+ else if (coreid == BUSCORETYPE(sih->buscoretype))
+ return (volatile void *)PCIEREGS(sii);
+ }
+ INTR_OFF(sii, intr_val);
+ *origidx = sii->curidx;
+ cc = si_setcore(sih, coreid, 0);
+ ASSERT(cc != NULL);
+
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+ BCMPOSTTRAPFN(si_restore_core)(si_t *sih, uint coreid, bcm_int_bitmask_t *intr_val)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == BUSCORETYPE(sih->buscoretype))))
+ return;
+
+ si_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+/* Switch to particular core and get corerev */
+#ifdef USE_NEW_COREREV_API
+uint
+BCMPOSTTRAPFN(si_corerev_ext)(si_t *sih, uint coreid, uint coreunit)
+{
+ uint coreidx;
+ uint corerev;
+
+ coreidx = si_coreidx(sih);
+ (void)si_setcore(sih, coreid, coreunit);
+
+ corerev = si_corerev(sih);
+
+ si_setcoreidx(sih, coreidx);
+ return corerev;
+}
+#else
+uint si_get_corerev(si_t *sih, uint core_id)
+{
+ uint corerev, orig_coreid;
+ bcm_int_bitmask_t intr_val;
+
+ si_switch_core(sih, core_id, &orig_coreid, &intr_val);
+ corerev = si_corerev(sih);
+ si_restore_core(sih, orig_coreid, &intr_val);
+ return corerev;
+}
+#endif /* !USE_NEW_COREREV_API */
+
+int
+BCMATTACHFN(si_numaddrspaces)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_numaddrspaces(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_numaddrspaces(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+/* Return the address of the nth address space in the current core
+ * Arguments:
+ * sih : Pointer to struct si_t
+ * spidx : slave port index
+ * baidx : base address index
+ */
+
+uint32
+si_addrspace(const si_t *sih, uint spidx, uint baidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspace(sih, baidx);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_addrspace(sih, spidx, baidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspace(sih, baidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_addrspace(sih, spidx, baidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+/* Return the size of the nth address space in the current core
+ * Arguments:
+ * sih : Pointer to struct si_t
+ * spidx : slave port index
+ * baidx : base address index
+ */
+uint32
+BCMATTACHFN(si_addrspacesize)(const si_t *sih, uint spidx, uint baidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspacesize(sih, baidx);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_addrspacesize(sih, spidx, baidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspacesize(sih, baidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_addrspacesize(sih, spidx, baidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ /* Only supported for SOCI_AI */
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_coreaddrspaceX(sih, asidx, addr, size);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_coreaddrspaceX(sih, asidx, addr, size);
+ else
+ *size = 0;
+}
+
+uint32
+BCMPOSTTRAPFN(si_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_cflags(sih, mask, val);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_core_cflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_cflags_wo(sih, mask, val);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_core_cflags_wo(sih, mask, val);
+ else
+ ASSERT(0);
+}
+
+uint32
+si_core_sflags(const si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_sflags(sih, mask, val);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_core_sflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_commit(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_commit(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ;
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ;
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ ;
+ else {
+ ASSERT(0);
+ }
+}
+
+bool
+BCMPOSTTRAPFN(si_iscoreup)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_iscoreup(sih);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_iscoreup(sih);
+ else {
+ ASSERT(0);
+ return FALSE;
+ }
+}
+
+/** Caller should make sure it is on the right core, before calling this routine */
+uint
+BCMPOSTTRAPFN(si_wrapperreg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ /* only for AI back plane chips */
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return (ai_wrap_reg(sih, offset, mask, val));
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return (nci_get_wrap_reg(sih, offset, mask, val));
+ return 0;
+}
+/* si_backplane_access is used to read full backplane address from host for PCIE FD
+ * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0
+ * Provides support for read/write of 1/2/4 bytes of backplane address
+ * Can be used to read/write
+ * 1. core regs
+ * 2. Wrapper regs
+ * 3. memory
+ * 4. BT area
+ * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region"
+ * [11 : 0] should be the "regoff"
+ * for reading 4 bytes from reg 0x200 of d11 core use it like below
+ * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE)
+ */
+static int si_backplane_addr_sane(uint addr, uint size)
+{
+ int bcmerror = BCME_OK;
+
+ /* For 2 byte access, address has to be 2 byte aligned */
+ if (size == 2) {
+ if (addr & 0x1) {
+ bcmerror = BCME_ERROR;
+ }
+ }
+ /* For 4 byte access, address has to be 4 byte aligned */
+ if (size == 4) {
+ if (addr & 0x3) {
+ bcmerror = BCME_ERROR;
+ }
+ }
+
+ return bcmerror;
+}
+
+void
+si_invalidate_second_bar0win(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ sii->second_bar0win = ~0x0;
+}
+
+int
+si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
+{
+ volatile uint32 *r = NULL;
+ uint32 region = 0;
+ si_info_t *sii = SI_INFO(sih);
+
+ /* Valid only for pcie bus */
+ if (BUSTYPE(sih->bustype) != PCI_BUS) {
+ SI_ERROR(("Valid only for pcie bus \n"));
+ return BCME_ERROR;
+ }
+
+ /* Split adrr into region and address offset */
+ region = (addr & (0xFFFFF << 12));
+ addr = addr & 0xFFF;
+
+ /* check for address and size sanity */
+ if (si_backplane_addr_sane(addr, size) != BCME_OK)
+ return BCME_ERROR;
+
+ /* Update window if required */
+ if (sii->second_bar0win != region) {
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region);
+ sii->second_bar0win = region;
+ }
+
+ /* Estimate effective address
+ * sii->curmap : bar-0 virtual address
+ * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset
+ * regoff : actual reg offset
+ */
+ r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr);
+
+ SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n",
+ (volatile char*)sii->curmap, region, addr, r, read));
+
+ switch (size) {
+ case sizeof(uint8) :
+ if (read)
+ *val = R_REG(sii->osh, (volatile uint8*)r);
+ else
+ W_REG(sii->osh, (volatile uint8*)r, *val);
+ break;
+ case sizeof(uint16) :
+ if (read)
+ *val = R_REG(sii->osh, (volatile uint16*)r);
+ else
+ W_REG(sii->osh, (volatile uint16*)r, *val);
+ break;
+ case sizeof(uint32) :
+ if (read)
+ *val = R_REG(sii->osh, (volatile uint32*)r);
+ else
+ W_REG(sii->osh, (volatile uint32*)r, *val);
+ break;
+ default :
+ SI_ERROR(("Invalid size %d \n", size));
+ return (BCME_ERROR);
+ break;
+ }
+
+ return (BCME_OK);
+}
+
+/* precommit failed when this is removed */
+/* BLAZAR_BRANCH_101_10_DHD_002/build/dhd/linux-fc30/brix-brcm */
+/* TBD: Revisit later */
+#ifdef BCMINTERNAL
+int
+si_backplane_access_64(si_t *sih, uint addr, uint size, uint64 *val, bool read)
+{
+#if defined(NDIS) || defined(EFI)
+ SI_ERROR(("NDIS/EFI won't support 64 bit access\n"));
+ return (BCME_ERROR);
+#else
+ volatile uint64 *r = NULL;
+ uint32 region = 0;
+ si_info_t *sii = SI_INFO(sih);
+
+ /* Valid only for pcie bus */
+ if (BUSTYPE(sih->bustype) != PCI_BUS) {
+ SI_ERROR(("Valid only for pcie bus \n"));
+ return BCME_ERROR;
+ }
+
+ /* Split adrr into region and address offset */
+ region = (addr & (0xFFFFF << 12));
+ addr = addr & 0xFFF;
+
+ /* check for address and size sanity */
+ if (si_backplane_addr_sane(addr, size) != BCME_OK) {
+ SI_ERROR(("Address is not aligned\n"));
+ return BCME_ERROR;
+ }
+
+ /* Update window if required */
+ if (sii->second_bar0win != region) {
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region);
+ sii->second_bar0win = region;
+ }
+
+ /* Estimate effective address
+ * sii->curmap : bar-0 virtual address
+ * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset
+ * regoff : actual reg offset
+ */
+ r = (volatile uint64 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr);
+
+ switch (size) {
+ case sizeof(uint64) :
+ if (read) {
+ *val = R_REG(sii->osh, (volatile uint64*)r);
+ } else {
+ W_REG(sii->osh, (volatile uint64*)r, *val);
+ }
+ break;
+ default :
+ SI_ERROR(("Invalid size %d \n", size));
+ return (BCME_ERROR);
+ break;
+ }
+
+ return (BCME_OK);
+#endif /* NDIS */
+}
+#endif /* BCMINTERNAL */
+
+uint
+BCMPOSTTRAPFN(si_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg(sih, coreidx, regoff, mask, val);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_corereg(sih, coreidx, regoff, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+BCMPOSTTRAPFN(si_corereg_writeonly)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ return nci_corereg_writeonly(sih, coreidx, regoff, mask, val);
+ } else
+ {
+ return ai_corereg_writeonly(sih, coreidx, regoff, mask, val);
+ }
+}
+
+/** ILP sensitive register access needs special treatment to avoid backplane stalls */
+bool
+BCMPOSTTRAPFN(si_pmu_is_ilp_sensitive)(uint32 idx, uint regoff)
+{
+ if (idx == SI_CC_IDX) {
+ if (CHIPCREGS_ILP_SENSITIVE(regoff))
+ return TRUE;
+ } else if (PMUREGS_ILP_SENSITIVE(regoff)) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/** 'idx' should refer either to the chipcommon core or the PMU core */
+uint
+BCMPOSTTRAPFN(si_pmu_corereg)(si_t *sih, uint32 idx, uint regoff, uint mask, uint val)
+{
+ int pmustatus_offset;
+
+ /* prevent backplane stall on double write to 'ILP domain' registers in the PMU */
+ if (mask != 0 && PMUREV(sih->pmurev) >= 22 &&
+ si_pmu_is_ilp_sensitive(idx, regoff)) {
+ pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) :
+ OFFSETOF(chipcregs_t, pmustatus);
+
+ while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING)
+ {};
+ }
+
+ return si_corereg(sih, idx, regoff, mask, val);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+BCMPOSTTRAPFN(si_corereg_addr)(si_t *sih, uint coreidx, uint regoff)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg_addr(sih, coreidx, regoff);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_corereg_addr(sih, coreidx, regoff);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ return nci_corereg_addr(sih, coreidx, regoff);
+ else {
+ return 0;
+ }
+}
+
+void
+si_core_disable(const si_t *sih, uint32 bits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_disable(sih, bits);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_disable(sih, bits);
+}
+
+void
+BCMPOSTTRAPFN(si_core_reset)(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_reset(sih, bits, resetbits);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_reset(sih, bits, resetbits);
+}
+
+/** Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(const si_t *sih)
+{
+ uint32 cflags;
+ int result = 0;
+
+ /* Read core control flags */
+ cflags = si_core_cflags(sih, 0, 0);
+
+ /* Set bist & fgc */
+ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+ /* Wait for bist done */
+ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+ result = BCME_ERROR;
+
+ /* Reset core control flags */
+ si_core_cflags(sih, 0xffff, cflags);
+
+ return result;
+}
+
+uint
+si_num_slaveports(const si_t *sih, uint coreid)
+{
+ uint idx = si_findcoreidx(sih, coreid, 0);
+ uint num = 0;
+
+ if (idx != BADIDX) {
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ num = ai_num_slaveports(sih, idx);
+ }
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
+ num = nci_num_slaveports(sih, idx);
+ }
+ }
+ return num;
+}
+
+/* TODO: Check if NCI has a slave port address */
+uint32
+si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint core_id, uint coreunit)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx = sii->curidx;
+ uint32 addr = 0x0;
+
+ if (!((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NCI)))
+ goto done;
+
+ si_setcore(sih, core_id, coreunit);
+
+ addr = si_addrspace(sih, spidx, baidx);
+
+ si_setcoreidx(sih, origidx);
+
+done:
+ return addr;
+}
+
+/* TODO: Check if NCI has a d11 slave port address */
+uint32
+si_get_d11_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint coreunit)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx = sii->curidx;
+ uint32 addr = 0x0;
+
+ if (!((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NCI)))
+ goto done;
+
+ si_setcore(sih, D11_CORE_ID, coreunit);
+
+ addr = si_addrspace(sih, spidx, baidx);
+
+ si_setcoreidx(sih, origidx);
+
+done:
+ return addr;
+}
+
+static uint32
+BCMINITFN(factor6)(uint32 x)
+{
+ switch (x) {
+ case CC_F6_2: return 2;
+ case CC_F6_3: return 3;
+ case CC_F6_4: return 4;
+ case CC_F6_5: return 5;
+ case CC_F6_6: return 6;
+ case CC_F6_7: return 7;
+ default: return 0;
+ }
+}
+
+/*
+ * Divide the clock by the divisor with protection for
+ * a zero divisor.
+ */
+static uint32
+divide_clock(uint32 clock, uint32 div)
+{
+ return div ? clock / div : 0;
+}
+
+/** calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+BCMINITFN(si_clock_rate)(uint32 pll_type, uint32 n, uint32 m)
+{
+ uint32 n1, n2, clock, m1, m2, m3, mc;
+
+ n1 = n & CN_N1_MASK;
+ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+ if (pll_type == PLL_TYPE6) {
+ if (m & CC_T6_MMASK)
+ return CC_T6_M1;
+ else
+ return CC_T6_M0;
+ } else if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ n1 = factor6(n1);
+ n2 += CC_F5_BIAS;
+ } else if (pll_type == PLL_TYPE2) {
+ n1 += CC_T2_BIAS;
+ n2 += CC_T2_BIAS;
+ ASSERT((n1 >= 2) && (n1 <= 7));
+ ASSERT((n2 >= 5) && (n2 <= 23));
+ } else if (pll_type == PLL_TYPE5) {
+ /* 5365 */
+ return (100000000);
+ } else
+ ASSERT(0);
+ /* PLL types 3 and 7 use BASE2 (25Mhz) */
+ if ((pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE7)) {
+ clock = CC_CLOCK_BASE2 * n1 * n2;
+ } else
+ clock = CC_CLOCK_BASE1 * n1 * n2;
+
+ if (clock == 0)
+ return 0;
+
+ m1 = m & CC_M1_MASK;
+ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+ if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ m1 = factor6(m1);
+ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+ m2 += CC_F5_BIAS;
+ else
+ m2 = factor6(m2);
+ m3 = factor6(m3);
+
+ switch (mc) {
+ case CC_MC_BYPASS: return (clock);
+ case CC_MC_M1: return divide_clock(clock, m1);
+ case CC_MC_M1M2: return divide_clock(clock, m1 * m2);
+ case CC_MC_M1M2M3: return divide_clock(clock, m1 * m2 * m3);
+ case CC_MC_M1M3: return divide_clock(clock, m1 * m3);
+ default: return (0);
+ }
+ } else {
+ ASSERT(pll_type == PLL_TYPE2);
+
+ m1 += CC_T2_BIAS;
+ m2 += CC_T2M2_BIAS;
+ m3 += CC_T2_BIAS;
+ ASSERT((m1 >= 2) && (m1 <= 7));
+ ASSERT((m2 >= 3) && (m2 <= 10));
+ ASSERT((m3 >= 2) && (m3 <= 7));
+
+ if ((mc & CC_T2MC_M1BYP) == 0)
+ clock /= m1;
+ if ((mc & CC_T2MC_M2BYP) == 0)
+ clock /= m2;
+ if ((mc & CC_T2MC_M3BYP) == 0)
+ clock /= m3;
+
+ return (clock);
+ }
+}
+
+/**
+ * Some chips could have multiple host interfaces, however only one will be active.
+ * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
+ */
+uint
+si_chip_hostif(const si_t *sih)
+{
+ uint hosti = 0;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ break;
+ CASE_BCM43602_CHIP:
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+
+ case BCM4360_CHIP_ID:
+ /* chippkg bit-0 == 0 is PCIE only pkgs
+ * chippkg bit-0 == 1 has both PCIE and USB cores enabled
+ */
+ if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
+ hosti = CHIP_HOSTIF_USBMODE;
+ else
+ hosti = CHIP_HOSTIF_PCIEMODE;
+
+ break;
+
+ case BCM4369_CHIP_GRPID:
+ if (CST4369_CHIPMODE_SDIOD(sih->chipst))
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ else if (CST4369_CHIPMODE_PCIE(sih->chipst))
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+ case BCM4362_CHIP_GRPID:
+ if (CST4362_CHIPMODE_SDIOD(sih->chipst)) {
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ } else if (CST4362_CHIPMODE_PCIE(sih->chipst)) {
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return hosti;
+}
+
+#if !defined(BCMDONGLEHOST)
+uint32
+BCMINITFN(si_clock)(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint32 n, m;
+ uint idx;
+ uint32 pll_type, rate;
+ bcm_int_bitmask_t intr_val;
+
+ INTR_OFF(sii, &intr_val);
+ if (PMUCTL_ENAB(sih)) {
+ rate = si_pmu_si_clock(sih, sii->osh);
+ goto exit;
+ }
+
+ idx = sii->curidx;
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ n = R_REG(sii->osh, &cc->clockcontrol_n);
+ pll_type = sih->cccaps & CC_CAP_PLL_MASK;
+ if (pll_type == PLL_TYPE6)
+ m = R_REG(sii->osh, &cc->clockcontrol_m3);
+ else if (pll_type == PLL_TYPE3)
+ m = R_REG(sii->osh, &cc->clockcontrol_m2);
+ else
+ m = R_REG(sii->osh, &cc->clockcontrol_sb);
+
+ /* calculate rate */
+ rate = si_clock_rate(pll_type, n, m);
+
+ if (pll_type == PLL_TYPE3)
+ rate = rate / 2;
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+exit:
+ INTR_RESTORE(sii, &intr_val);
+
+ return rate;
+}
+
+/** returns value in [Hz] units */
+uint32
+BCMINITFN(si_alp_clock)(si_t *sih)
+{
+ if (PMUCTL_ENAB(sih)) {
+ return si_pmu_alp_clock(sih, si_osh(sih));
+ }
+
+ return ALP_CLOCK;
+}
+
+/** returns value in [Hz] units */
+uint32
+BCMINITFN(si_ilp_clock)(si_t *sih)
+{
+ if (PMUCTL_ENAB(sih))
+ return si_pmu_ilp_clock(sih, si_osh(sih));
+
+ return ILP_CLOCK;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+/** set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+ uint nb, maxt;
+ uint pmu_wdt = 1;
+
+ if (PMUCTL_ENAB(sih) && pmu_wdt) {
+ nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24);
+ /* The mips compiler uses the sllv instruction,
+ * so we specially handle the 32-bit case.
+ */
+ if (nb == 32)
+ maxt = 0xffffffff;
+ else
+ maxt = ((1 << nb) - 1);
+
+ /* PR43821: PMU watchdog timer needs min. of 2 ticks */
+ if (ticks == 1)
+ ticks = 2;
+ else if (ticks > maxt)
+ ticks = maxt;
+#ifndef DONGLEBUILD
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ PMU_REG_NEW(sih, min_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK);
+ PMU_REG_NEW(sih, watchdog_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK);
+ PMU_REG_NEW(sih, pmustatus, PST_WDRESET, PST_WDRESET);
+ PMU_REG_NEW(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, 0);
+ SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO),
+ PMU_MAX_TRANSITION_DLY);
+ }
+#endif /* DONGLEBUILD */
+ pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
+ } else {
+#if !defined(BCMDONGLEHOST)
+ /* make sure we come up in fast clock mode; or if clearing, clear clock */
+ si_clkctl_cc(sih, ticks ? CLK_FAST : CLK_DYNAMIC);
+#endif /* !defined(BCMDONGLEHOST) */
+ maxt = (1 << 28) - 1;
+ if (ticks > maxt)
+ ticks = maxt;
+
+ if (CCREV(sih->ccrev) >= 65) {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0,
+ (ticks & WD_COUNTER_MASK) | WD_SSRESET_PCIE_F0_EN |
+ WD_SSRESET_PCIE_ALL_FN_EN);
+ } else {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+ }
+ }
+}
+
+/** trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+ si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+ return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+#if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT) || \
+ defined(BCMDBG_DUMP)
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_taclear(sih, details);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NCI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return FALSE;
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return FALSE;
+ else {
+ ASSERT(0);
+ return FALSE;
+ }
+#else
+ return FALSE;
+#endif /* BCMDBG_ERR || BCMASSERT_SUPPORT || BCMDBG_DUMP */
+}
+
+#if !defined(BCMDONGLEHOST)
+/**
+ * Map sb core id to pci device id.
+ */
+uint16
+BCMATTACHFN(si_d11_devid)(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint16 device;
+
+ (void) sii;
+ if (FWSIGN_ENAB()) {
+ return 0xffff;
+ }
+
+ /* normal case: nvram variable with devpath->devid->wl0id */
+ if ((device = (uint16)si_getdevpathintvar(sih, rstr_devid)) != 0)
+ ;
+ /* Get devid from OTP/SPROM depending on where the SROM is read */
+ else if ((device = (uint16)getintvar(sii->vars, rstr_devid)) != 0)
+ ;
+ /* no longer support wl0id, but keep the code here for backward compatibility. */
+ else if ((device = (uint16)getintvar(sii->vars, rstr_wl0id)) != 0)
+ ;
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ ;
+ else {
+ /* ignore it */
+ device = 0xffff;
+ }
+ return device;
+}
+
+int
+BCMATTACHFN(si_corepciid)(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+ uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
+ uint8 *pciheader)
+{
+ uint16 vendor = 0xffff, device = 0xffff;
+ uint8 class, subclass, progif = 0;
+ uint8 header = PCI_HEADER_NORMAL;
+ uint32 core = si_coreid(sih);
+
+ /* Verify whether the function exists for the core */
+ if (func >= (uint)((core == USB20H_CORE_ID) || (core == NS_USB20_CORE_ID) ? 2 : 1))
+ return BCME_ERROR;
+
+ /* Known vendor translations */
+ switch (si_corevendor(sih)) {
+ case SB_VEND_BCM:
+ case MFGID_BRCM:
+ vendor = VENDOR_BROADCOM;
+ break;
+ default:
+ return BCME_ERROR;
+ }
+
+ /* Determine class based on known core codes */
+ switch (core) {
+ case ENET_CORE_ID:
+ class = PCI_CLASS_NET;
+ subclass = PCI_NET_ETHER;
+ device = BCM47XX_ENET_ID;
+ break;
+ case GIGETH_CORE_ID:
+ class = PCI_CLASS_NET;
+ subclass = PCI_NET_ETHER;
+ device = BCM47XX_GIGETH_ID;
+ break;
+ case GMAC_CORE_ID:
+ class = PCI_CLASS_NET;
+ subclass = PCI_NET_ETHER;
+ device = BCM47XX_GMAC_ID;
+ break;
+ case SDRAM_CORE_ID:
+ case MEMC_CORE_ID:
+ case DMEMC_CORE_ID:
+ case SOCRAM_CORE_ID:
+ class = PCI_CLASS_MEMORY;
+ subclass = PCI_MEMORY_RAM;
+ device = (uint16)core;
+ break;
+ case PCI_CORE_ID:
+ case PCIE_CORE_ID:
+ case PCIE2_CORE_ID:
+ class = PCI_CLASS_BRIDGE;
+ subclass = PCI_BRIDGE_PCI;
+ device = (uint16)core;
+ header = PCI_HEADER_BRIDGE;
+ break;
+ case CODEC_CORE_ID:
+ class = PCI_CLASS_COMM;
+ subclass = PCI_COMM_MODEM;
+ device = BCM47XX_V90_ID;
+ break;
+ case I2S_CORE_ID:
+ class = PCI_CLASS_MMEDIA;
+ subclass = PCI_MMEDIA_AUDIO;
+ device = BCM47XX_AUDIO_ID;
+ break;
+ case USB_CORE_ID:
+ case USB11H_CORE_ID:
+ class = PCI_CLASS_SERIAL;
+ subclass = PCI_SERIAL_USB;
+ progif = 0x10; /* OHCI */
+ device = BCM47XX_USBH_ID;
+ break;
+ case USB20H_CORE_ID:
+ case NS_USB20_CORE_ID:
+ class = PCI_CLASS_SERIAL;
+ subclass = PCI_SERIAL_USB;
+ progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI value defined in spec */
+ device = BCM47XX_USB20H_ID;
+ header = PCI_HEADER_MULTI; /* multifunction */
+ break;
+ case IPSEC_CORE_ID:
+ class = PCI_CLASS_CRYPT;
+ subclass = PCI_CRYPT_NETWORK;
+ device = BCM47XX_IPSEC_ID;
+ break;
+ case NS_USB30_CORE_ID:
+ class = PCI_CLASS_SERIAL;
+ subclass = PCI_SERIAL_USB;
+ progif = 0x30; /* XHCI */
+ device = BCM47XX_USB30H_ID;
+ break;
+ case ROBO_CORE_ID:
+ /* Don't use class NETWORK, so wl/et won't attempt to recognize it */
+ class = PCI_CLASS_COMM;
+ subclass = PCI_COMM_OTHER;
+ device = BCM47XX_ROBO_ID;
+ break;
+ case CC_CORE_ID:
+ class = PCI_CLASS_MEMORY;
+ subclass = PCI_MEMORY_FLASH;
+ device = (uint16)core;
+ break;
+ case SATAXOR_CORE_ID:
+ class = PCI_CLASS_XOR;
+ subclass = PCI_XOR_QDMA;
+ device = BCM47XX_SATAXOR_ID;
+ break;
+ case ATA100_CORE_ID:
+ class = PCI_CLASS_DASDI;
+ subclass = PCI_DASDI_IDE;
+ device = BCM47XX_ATA100_ID;
+ break;
+ case USB11D_CORE_ID:
+ class = PCI_CLASS_SERIAL;
+ subclass = PCI_SERIAL_USB;
+ device = BCM47XX_USBD_ID;
+ break;
+ case USB20D_CORE_ID:
+ class = PCI_CLASS_SERIAL;
+ subclass = PCI_SERIAL_USB;
+ device = BCM47XX_USB20D_ID;
+ break;
+ case D11_CORE_ID:
+ class = PCI_CLASS_NET;
+ subclass = PCI_NET_OTHER;
+ device = si_d11_devid(sih);
+ break;
+
+ default:
+ class = subclass = progif = 0xff;
+ device = (uint16)core;
+ break;
+ }
+
+ *pcivendor = vendor;
+ *pcidevice = device;
+ *pciclass = class;
+ *pcisubclass = subclass;
+ *pciprogif = progif;
+ *pciheader = header;
+
+ return 0;
+}
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+/** print interesting sbconfig registers */
+void
+si_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ origidx = sii->curidx;
+
+ INTR_OFF(sii, &intr_val);
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_dumpregs(sih, b);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_dumpregs(sih, b);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_dumpregs(sih, b);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_dumpregs(sih, b);
+ else
+ ASSERT(0);
+
+ si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+#endif /* !defined(BCMDONGLEHOST) */
+
+#ifdef BCMDBG
+void
+si_view(si_t *sih, bool verbose)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_view(sih, verbose);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_view(sih, verbose);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_view(sih, verbose);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_view(sih, verbose);
+ else
+ ASSERT(0);
+}
+
+void
+si_viewall(si_t *sih, bool verbose)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint curidx, i;
+ bcm_int_bitmask_t intr_val;
+
+ curidx = sii->curidx;
+
+ INTR_OFF(sii, &intr_val);
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ ai_viewall(sih, verbose);
+ else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
+ nci_viewall(sih, verbose);
+ else {
+ SI_ERROR(("si_viewall: num_cores %d\n", sii->numcores));
+ for (i = 0; i < sii->numcores; i++) {
+ si_setcoreidx(sih, i);
+ si_view(sih, verbose);
+ }
+ }
+ si_setcoreidx(sih, curidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+#endif /* BCMDBG */
+
+/** return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+ chipcregs_t *cc;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ if (CCREV(sii->pub.ccrev) < 6) {
+ if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+ PCI_CFG_GPIO_SCS))
+ return (SCC_SS_PCI);
+ else
+ return (SCC_SS_XTAL);
+ } else if (CCREV(sii->pub.ccrev) < 10) {
+ cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+ ASSERT(cc);
+ return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+ } else /* Insta-clock */
+ return (SCC_SS_XTAL);
+}
+
+/** return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+ uint32 slowclk;
+ uint div;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ /* shouldn't be here unless we've established the chip has dynamic clk control */
+ ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+ slowclk = si_slowclk_src(sii);
+ if (CCREV(sii->pub.ccrev) < 6) {
+ if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+ else
+ return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+ } else if (CCREV(sii->pub.ccrev) < 10) {
+ div = 4 *
+ (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+ if (slowclk == SCC_SS_LPO)
+ return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+ else if (slowclk == SCC_SS_XTAL)
+ return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+ else if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+ else
+ ASSERT(0);
+ } else {
+ /* Chipc rev 10 is InstaClock */
+ div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+ div = 4 * (div + 1);
+ return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+ }
+ return (0);
+}
+
+static void
+BCMINITFN(si_clkctl_setdelay)(si_info_t *sii, void *chipcregs)
+{
+ chipcregs_t *cc = (chipcregs_t *)chipcregs;
+ uint slowmaxfreq, pll_delay, slowclk;
+ uint pll_on_delay, fref_sel_delay;
+
+ pll_delay = PLL_DELAY;
+
+ /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+ * since the xtal will also be powered down by dynamic clk control logic.
+ */
+
+ slowclk = si_slowclk_src(sii);
+ if (slowclk != SCC_SS_XTAL)
+ pll_delay += XTAL_ON_DELAY;
+
+ /* Starting with 4318 it is ILP that is used for the delays */
+ slowmaxfreq = si_slowclk_freq(sii, (CCREV(sii->pub.ccrev) >= 10) ? FALSE : TRUE, cc);
+
+ pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+ fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+ W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+ W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/** initialize power control delay registers */
+void
+BCMINITFN(si_clkctl_init)(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx = 0;
+ chipcregs_t *cc;
+ bool fast;
+
+ if (!CCCTL_ENAB(sih))
+ return;
+
+ sii = SI_INFO(sih);
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ return;
+ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+ return;
+ ASSERT(cc != NULL);
+
+ /* set all Instaclk chip ILP to 1 MHz */
+ if (CCREV(sih->ccrev) >= 10)
+ SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+ si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+ /* PR 110294 */
+ OSL_DELAY(20000);
+
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+}
+
+#if !defined(BCMDONGLEHOST)
+/** return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
+uint16
+BCMINITFN(si_clkctl_fast_pwrup_delay)(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint origidx = 0;
+ chipcregs_t *cc;
+ uint slowminfreq;
+ uint16 fpdelay;
+ bcm_int_bitmask_t intr_val;
+ bool fast;
+
+ if (PMUCTL_ENAB(sih)) {
+ INTR_OFF(sii, &intr_val);
+ fpdelay = si_pmu_fast_pwrup_delay(sih, sii->osh);
+ INTR_RESTORE(sii, &intr_val);
+ return fpdelay;
+ }
+
+ if (!CCCTL_ENAB(sih))
+ return 0;
+
+ fast = SI_FAST(sii);
+ fpdelay = 0;
+ if (!fast) {
+ origidx = sii->curidx;
+ INTR_OFF(sii, &intr_val);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ goto done;
+ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) {
+ goto done;
+ }
+
+ ASSERT(cc != NULL);
+
+ slowminfreq = si_slowclk_freq(sii, FALSE, cc);
+ if (slowminfreq > 0)
+ fpdelay = (((R_REG(sii->osh, &cc->pll_on_delay) + 2) * 1000000) +
+ (slowminfreq - 1)) / slowminfreq;
+
+done:
+ if (!fast) {
+ si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+ }
+ return fpdelay;
+}
+
+/** turn primary xtal and/or pll off/on */
+int
+si_clkctl_xtal(si_t *sih, uint what, bool on)
+{
+ si_info_t *sii;
+ uint32 in, out, outen;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ return (-1);
+#endif /* BCMSDIO */
+
+ case PCI_BUS:
+ /* pcie core doesn't have any mapping to control the xtal pu */
+ if (PCIE(sii) || PCIE_GEN2(sii))
+ return -1;
+
+ in = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_IN, sizeof(uint32));
+ out = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32));
+ outen = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN, sizeof(uint32));
+
+ /*
+ * Avoid glitching the clock if GPRS is already using it.
+ * We can't actually read the state of the PLLPD so we infer it
+ * by the value of XTAL_PU which *is* readable via gpioin.
+ */
+ if (on && (in & PCI_CFG_GPIO_XTAL))
+ return (0);
+
+ if (what & XTAL)
+ outen |= PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ outen |= PCI_CFG_GPIO_PLL;
+
+ if (on) {
+ /* turn primary xtal on */
+ if (what & XTAL) {
+ out |= PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ out |= PCI_CFG_GPIO_PLL;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
+ sizeof(uint32), out);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
+ sizeof(uint32), outen);
+ OSL_DELAY(XTAL_ON_DELAY);
+ }
+
+ /* turn pll on */
+ if (what & PLL) {
+ out &= ~PCI_CFG_GPIO_PLL;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
+ sizeof(uint32), out);
+ OSL_DELAY(2000);
+ }
+ } else {
+ if (what & XTAL)
+ out &= ~PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ out |= PCI_CFG_GPIO_PLL;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32), out);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN, sizeof(uint32),
+ outen);
+ }
+ return 0;
+
+ default:
+ return (-1);
+ }
+
+ return (0);
+}
+
+/**
+ * clock control policy function throught chipcommon
+ *
+ * set dynamic clk control mode (forceslow, forcefast, dynamic)
+ * returns true if we are forcing fast clock
+ * this is a wrapper over the next internal function
+ * to allow flexible policy settings for outside caller
+ */
+bool
+si_clkctl_cc(si_t *sih, uint mode)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* chipcommon cores prior to rev6 don't support dynamic clock control */
+ if (CCREV(sih->ccrev) < 6)
+ return FALSE;
+
+ return _si_clkctl_cc(sii, mode);
+}
+
+/* clk control mechanism through chipcommon, no policy checking */
+static bool
+_si_clkctl_cc(si_info_t *sii, uint mode)
+{
+ uint origidx = 0;
+ chipcregs_t *cc;
+ uint32 scc;
+ bcm_int_bitmask_t intr_val;
+ bool fast = SI_FAST(sii);
+
+ /* chipcommon cores prior to rev6 don't support dynamic clock control */
+ if (CCREV(sii->pub.ccrev) < 6)
+ return (FALSE);
+
+ /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
+ ASSERT(CCREV(sii->pub.ccrev) != 10);
+
+ if (!fast) {
+ INTR_OFF(sii, &intr_val);
+ origidx = sii->curidx;
+ cc = (chipcregs_t *) si_setcore(&sii->pub, CC_CORE_ID, 0);
+ } else if ((cc = (chipcregs_t *) CCREGS_FAST(sii)) == NULL)
+ goto done;
+ ASSERT(cc != NULL);
+
+ if (!CCCTL_ENAB(&sii->pub) && (CCREV(sii->pub.ccrev) < 20))
+ goto done;
+
+ switch (mode) {
+ case CLK_FAST: /* FORCEHT, fast (pll) clock */
+ if (CCREV(sii->pub.ccrev) < 10) {
+ /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
+ si_clkctl_xtal(&sii->pub, XTAL, ON);
+ SET_REG(sii->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (CCREV(sii->pub.ccrev) < 20) {
+ OR_REG(sii->osh, &cc->system_clk_ctl, SYCC_HR);
+ } else {
+ OR_REG(sii->osh, &cc->clk_ctl_st, CCS_FORCEHT);
+ }
+
+ /* wait for the PLL */
+ if (PMUCTL_ENAB(&sii->pub)) {
+ uint32 htavail = CCS_HTAVAIL;
+ SPINWAIT(((R_REG(sii->osh, &cc->clk_ctl_st) & htavail) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(R_REG(sii->osh, &cc->clk_ctl_st) & htavail);
+ } else {
+ OSL_DELAY(PLL_DELAY);
+ }
+ break;
+
+ case CLK_DYNAMIC: /* enable dynamic clock control */
+ if (CCREV(sii->pub.ccrev) < 10) {
+ scc = R_REG(sii->osh, &cc->slow_clk_ctl);
+ scc &= ~(SCC_FS | SCC_IP | SCC_XC);
+ if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
+ scc |= SCC_XC;
+ W_REG(sii->osh, &cc->slow_clk_ctl, scc);
+
+ /* for dynamic control, we have to release our xtal_pu "force on" */
+ if (scc & SCC_XC)
+ si_clkctl_xtal(&sii->pub, XTAL, OFF);
+ } else if (CCREV(sii->pub.ccrev) < 20) {
+ /* Instaclock */
+ AND_REG(sii->osh, &cc->system_clk_ctl, ~SYCC_HR);
+ } else {
+ AND_REG(sii->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
+ }
+
+ /* wait for the PLL */
+ if (PMUCTL_ENAB(&sii->pub)) {
+ uint32 htavail = CCS_HTAVAIL;
+ SPINWAIT(((R_REG(sii->osh, &cc->clk_ctl_st) & htavail) != 0),
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(!(R_REG(sii->osh, &cc->clk_ctl_st) & htavail));
+ } else {
+ OSL_DELAY(PLL_DELAY);
+ }
+
+ break;
+
+ default:
+ ASSERT(0);
+ }
+
+done:
+ if (!fast) {
+ si_setcoreidx(&sii->pub, origidx);
+ INTR_RESTORE(sii, &intr_val);
+ }
+ return (mode == CLK_FAST);
+}
+
+/** Build device path. Support SI, PCI for now. */
+int
+BCMNMIATTACHFN(si_devpath)(const si_t *sih, char *path, int size)
+{
+ int slen;
+
+ ASSERT(path != NULL);
+ ASSERT(size >= SI_DEVPATH_BUFSZ);
+
+ if (!path || size <= 0)
+ return -1;
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ slen = snprintf(path, (size_t)size, "sb/%u/", si_coreidx(sih));
+ break;
+ case PCI_BUS:
+ ASSERT((SI_INFO(sih))->osh != NULL);
+ slen = snprintf(path, (size_t)size, "pci/%u/%u/",
+ OSL_PCI_BUS((SI_INFO(sih))->osh),
+ OSL_PCI_SLOT((SI_INFO(sih))->osh));
+ break;
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ SI_ERROR(("si_devpath: device 0 assumed\n"));
+ slen = snprintf(path, (size_t)size, "sd/%u/", si_coreidx(sih));
+ break;
+#endif
+ default:
+ slen = -1;
+ ASSERT(0);
+ break;
+ }
+
+ if (slen < 0 || slen >= size) {
+ path[0] = '\0';
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+BCMNMIATTACHFN(si_devpath_pcie)(const si_t *sih, char *path, int size)
+{
+ ASSERT(path != NULL);
+ ASSERT(size >= SI_DEVPATH_BUFSZ);
+
+ if (!path || size <= 0)
+ return -1;
+
+ ASSERT((SI_INFO(sih))->osh != NULL);
+ snprintf(path, (size_t)size, "pcie/%u/%u/",
+ OSL_PCIE_DOMAIN((SI_INFO(sih))->osh),
+ OSL_PCIE_BUS((SI_INFO(sih))->osh));
+
+ return 0;
+}
+
+char *
+BCMATTACHFN(si_coded_devpathvar)(const si_t *sih, char *varname, int var_len, const char *name)
+{
+ char pathname[SI_DEVPATH_BUFSZ + 32];
+ char devpath[SI_DEVPATH_BUFSZ + 32];
+ char devpath_pcie[SI_DEVPATH_BUFSZ + 32];
+ char *p;
+ int idx;
+ int len1;
+ int len2;
+ int len3 = 0;
+
+ if (FWSIGN_ENAB()) {
+ return NULL;
+ }
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ snprintf(devpath_pcie, SI_DEVPATH_BUFSZ, "pcie/%u/%u",
+ OSL_PCIE_DOMAIN((SI_INFO(sih))->osh),
+ OSL_PCIE_BUS((SI_INFO(sih))->osh));
+ len3 = strlen(devpath_pcie);
+ }
+
+ /* try to get compact devpath if it exist */
+ if (si_devpath(sih, devpath, SI_DEVPATH_BUFSZ) == 0) {
+ /* devpath now is 'zzz/zz/', adjust length to */
+ /* eliminate ending '/' (if present) */
+ len1 = strlen(devpath);
+ if (devpath[len1 - 1] == '/')
+ len1--;
+
+ for (idx = 0; idx < SI_MAXCORES; idx++) {
+ snprintf(pathname, SI_DEVPATH_BUFSZ, rstr_devpathD, idx);
+ if ((p = getvar(NULL, pathname)) == NULL)
+ continue;
+
+ /* eliminate ending '/' (if present) */
+ len2 = strlen(p);
+ if (p[len2 - 1] == '/')
+ len2--;
+
+ /* check that both lengths match and if so compare */
+ /* the strings (minus trailing '/'s if present */
+ if ((len1 == len2) && (memcmp(p, devpath, len1) == 0)) {
+ snprintf(varname, var_len, rstr_D_S, idx, name);
+ return varname;
+ }
+
+ /* try the new PCIe devpath format if it exists */
+ if (len3 && (len3 == len2) && (memcmp(p, devpath_pcie, len3) == 0)) {
+ snprintf(varname, var_len, rstr_D_S, idx, name);
+ return varname;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/** Get a variable, but only if it has a devpath prefix */
+char *
+BCMATTACHFN(si_getdevpathvar)(const si_t *sih, const char *name)
+{
+ char varname[SI_DEVPATH_BUFSZ + 32];
+ char *val;
+
+ si_devpathvar(sih, varname, sizeof(varname), name);
+
+ if ((val = getvar(NULL, varname)) != NULL)
+ return val;
+
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ si_pcie_devpathvar(sih, varname, sizeof(varname), name);
+ if ((val = getvar(NULL, varname)) != NULL)
+ return val;
+ }
+
+ /* try to get compact devpath if it exist */
+ if (si_coded_devpathvar(sih, varname, sizeof(varname), name) == NULL)
+ return NULL;
+
+ return (getvar(NULL, varname));
+}
+
+/** Get a variable, but only if it has a devpath prefix */
+int
+BCMATTACHFN(si_getdevpathintvar)(const si_t *sih, const char *name)
+{
+#if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
+ BCM_REFERENCE(sih);
+ return (getintvar(NULL, name));
+#else
+ char varname[SI_DEVPATH_BUFSZ + 32];
+ int val;
+
+ si_devpathvar(sih, varname, sizeof(varname), name);
+
+ if ((val = getintvar(NULL, varname)) != 0)
+ return val;
+
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ si_pcie_devpathvar(sih, varname, sizeof(varname), name);
+ if ((val = getintvar(NULL, varname)) != 0)
+ return val;
+ }
+
+ /* try to get compact devpath if it exist */
+ if (si_coded_devpathvar(sih, varname, sizeof(varname), name) == NULL)
+ return 0;
+
+ return (getintvar(NULL, varname));
+#endif /* BCMBUSTYPE && BCMBUSTYPE == SI_BUS */
+}
+
+/**
+ * Concatenate the dev path with a varname into the given 'var' buffer
+ * and return the 'var' pointer.
+ * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
+ * On overflow, the first char will be set to '\0'.
+ */
+static char *
+BCMATTACHFN(si_devpathvar)(const si_t *sih, char *var, int len, const char *name)
+{
+ uint path_len;
+
+ if (!var || len <= 0)
+ return var;
+
+ if (si_devpath(sih, var, len) == 0) {
+ path_len = strlen(var);
+
+ if (strlen(name) + 1 > (uint)(len - path_len))
+ var[0] = '\0';
+ else
+ strlcpy(var + path_len, name, len - path_len);
+ }
+
+ return var;
+}
+
+static char *
+BCMATTACHFN(si_pcie_devpathvar)(const si_t *sih, char *var, int len, const char *name)
+{
+ uint path_len;
+
+ if (!var || len <= 0)
+ return var;
+
+ if (si_devpath_pcie(sih, var, len) == 0) {
+ path_len = strlen(var);
+
+ if (strlen(name) + 1 > (uint)(len - path_len))
+ var[0] = '\0';
+ else
+ strlcpy(var + path_len, name, len - path_len);
+ }
+
+ return var;
+}
+
+uint32
+BCMPOSTTRAPFN(si_ccreg)(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint32 reg_val = 0;
+
+ sii = SI_INFO(sih);
+
+ /* abort for invalid offset */
+ if (offset > sizeof(chipcregs_t))
+ return 0;
+
+ reg_val = si_corereg(&sii->pub, SI_CC_IDX, offset, mask, val);
+
+ return reg_val;
+}
+
+void
+sih_write_sraon(si_t *sih, int offset, int len, const uint32* data)
+{
+ chipcregs_t *cc;
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(si_osh(sih), &cc->sr_memrw_addr, offset);
+ while (len > 0) {
+ W_REG(si_osh(sih), &cc->sr_memrw_data, *data);
+ data++;
+ len -= sizeof(uint32);
+ }
+}
+
+#ifdef SR_DEBUG
+void
+si_dump_pmu(si_t *sih, void *arg)
+{
+ uint i;
+ uint32 pmu_chip_ctl_reg;
+ uint32 pmu_chip_reg_reg;
+ uint32 pmu_chip_pll_reg;
+ uint32 pmu_chip_res_reg;
+ pmu_reg_t *pmu_var = (pmu_reg_t*)arg;
+ pmu_var->pmu_control = si_ccreg(sih, PMU_CTL, 0, 0);
+ pmu_var->pmu_capabilities = si_ccreg(sih, PMU_CAP, 0, 0);
+ pmu_var->pmu_status = si_ccreg(sih, PMU_ST, 0, 0);
+ pmu_var->res_state = si_ccreg(sih, PMU_RES_STATE, 0, 0);
+ pmu_var->res_pending = si_ccreg(sih, PMU_RES_PENDING, 0, 0);
+ pmu_var->pmu_timer1 = si_ccreg(sih, PMU_TIMER, 0, 0);
+ pmu_var->min_res_mask = si_ccreg(sih, MINRESMASKREG, 0, 0);
+ pmu_var->max_res_mask = si_ccreg(sih, MAXRESMASKREG, 0, 0);
+ pmu_chip_ctl_reg = (pmu_var->pmu_capabilities & 0xf8000000);
+ pmu_chip_ctl_reg = pmu_chip_ctl_reg >> 27;
+ for (i = 0; i < pmu_chip_ctl_reg; i++) {
+ pmu_var->pmu_chipcontrol1[i] = si_pmu_chipcontrol(sih, i, 0, 0);
+ }
+ pmu_chip_reg_reg = (pmu_var->pmu_capabilities & 0x07c00000);
+ pmu_chip_reg_reg = pmu_chip_reg_reg >> 22;
+ for (i = 0; i < pmu_chip_reg_reg; i++) {
+ pmu_var->pmu_regcontrol[i] = si_pmu_vreg_control(sih, i, 0, 0);
+ }
+ pmu_chip_pll_reg = (pmu_var->pmu_capabilities & 0x003e0000);
+ pmu_chip_pll_reg = pmu_chip_pll_reg >> 17;
+ for (i = 0; i < pmu_chip_pll_reg; i++) {
+ pmu_var->pmu_pllcontrol[i] = si_pmu_pllcontrol(sih, i, 0, 0);
+ }
+ pmu_chip_res_reg = (pmu_var->pmu_capabilities & 0x00001f00);
+ pmu_chip_res_reg = pmu_chip_res_reg >> 8;
+ for (i = 0; i < pmu_chip_res_reg; i++) {
+ si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
+ pmu_var->pmu_rsrc_up_down_timer[i] = si_corereg(sih, SI_CC_IDX,
+ RSRCUPDWNTIME, 0, 0);
+ }
+ pmu_chip_res_reg = (pmu_var->pmu_capabilities & 0x00001f00);
+ pmu_chip_res_reg = pmu_chip_res_reg >> 8;
+ for (i = 0; i < pmu_chip_res_reg; i++) {
+ si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
+ pmu_var->rsrc_dep_mask[i] = si_corereg(sih, SI_CC_IDX, PMU_RES_DEP_MASK, 0, 0);
+ }
+}
+
+void
+si_pmu_keep_on(const si_t *sih, int32 int_val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ uint32 res_dep_mask;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+
+ /* Corresponding Resource Dependancy Mask */
+ W_REG(sii->osh, &cc->res_table_sel, int_val);
+ res_dep_mask = R_REG(sii->osh, &cc->res_dep_mask);
+ /* Local change of minimum resource mask */
+ min_res_mask = res_dep_mask | 1 << int_val;
+ /* Corresponding change of Maximum Resource Mask */
+ max_res_mask = R_REG(sii->osh, &cc->max_res_mask);
+ max_res_mask = max_res_mask | min_res_mask;
+ W_REG(sii->osh, &cc->max_res_mask, max_res_mask);
+ /* Corresponding change of Minimum Resource Mask */
+ W_REG(sii->osh, &cc->min_res_mask, min_res_mask);
+}
+
+uint32
+si_pmu_keep_on_get(const si_t *sih)
+{
+ uint i;
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ uint32 res_dep_mask;
+ uint32 min_res_mask;
+
+ /* Read min res mask */
+ min_res_mask = R_REG(sii->osh, &cc->min_res_mask);
+ /* Get corresponding Resource Dependancy Mask */
+ for (i = 0; i < PMU_RES; i++) {
+ W_REG(sii->osh, &cc->res_table_sel, i);
+ res_dep_mask = R_REG(sii->osh, &cc->res_dep_mask);
+ res_dep_mask = res_dep_mask | 1 << i;
+ /* Compare with the present min res mask */
+ if (res_dep_mask == min_res_mask) {
+ return i;
+ }
+ }
+ return 0;
+}
+
+uint32
+si_power_island_set(si_t *sih, uint32 int_val)
+{
+ uint32 i = 0x0;
+ uint32 j;
+ uint32 k;
+ int cnt = 0;
+ for (k = 0; k < ARRAYSIZE(si_power_island_test_array); k++) {
+ if (int_val == si_power_island_test_array[k]) {
+ cnt = cnt + 1;
+ }
+ }
+ if (cnt > 0) {
+ if (int_val & SUBCORE_POWER_ON) {
+ i = i | 0x1;
+ }
+ if (int_val & PHY_POWER_ON) {
+ i = i | 0x2;
+ }
+ if (int_val & VDDM_POWER_ON) {
+ i = i | 0x4;
+ }
+ if (int_val & MEMLPLDO_POWER_ON) {
+ i = i | 0x8;
+ }
+ j = (i << 18) & 0x003c0000;
+ si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x003c0000, j);
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+uint32
+si_power_island_get(si_t *sih)
+{
+ uint32 sc_on = 0x0;
+ uint32 phy_on = 0x0;
+ uint32 vddm_on = 0x0;
+ uint32 memlpldo_on = 0x0;
+ uint32 res;
+ uint32 reg_val;
+ reg_val = si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0, 0);
+ if (reg_val & SUBCORE_POWER_ON_CHK) {
+ sc_on = SUBCORE_POWER_ON;
+ }
+ if (reg_val & PHY_POWER_ON_CHK) {
+ phy_on = PHY_POWER_ON;
+ }
+ if (reg_val & VDDM_POWER_ON_CHK) {
+ vddm_on = VDDM_POWER_ON;
+ }
+ if (reg_val & MEMLPLDO_POWER_ON_CHK) {
+ memlpldo_on = MEMLPLDO_POWER_ON;
+ }
+ res = (sc_on | phy_on | vddm_on | memlpldo_on);
+ return res;
+}
+#endif /* SR_DEBUG */
+
+uint32
+si_pciereg(const si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii)) {
+ SI_ERROR(("si_pciereg: Not a PCIE device\n"));
+ return 0;
+ }
+
+ return pcicore_pciereg(sii->pch, offset, mask, val, type);
+}
+
+uint32
+si_pcieserdesreg(const si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii)) {
+ SI_ERROR(("si_pcieserdesreg: Not a PCIE device\n"));
+ return 0;
+ }
+
+ return pcicore_pcieserdesreg(sii->pch, mdioslave, offset, mask, val);
+
+}
+
+/** return TRUE if PCIE capability exists in the pci config space */
+static bool
+BCMATTACHFN(si_ispcie)(const si_info_t *sii)
+{
+ uint8 cap_ptr;
+
+ if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+ return FALSE;
+
+ cap_ptr = pcicore_find_pci_capability(sii->osh, PCI_CAP_PCIECAP_ID, NULL, NULL);
+ if (!cap_ptr)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Wake-on-wireless-LAN (WOWL) support functions */
+/** Enable PME generation and disable clkreq */
+void
+si_pci_pmeen(const si_t *sih)
+{
+ pcicore_pmeen(SI_INFO(sih)->pch);
+}
+
+/** Return TRUE if PME status is set */
+bool
+si_pci_pmestat(const si_t *sih)
+{
+ return pcicore_pmestat(SI_INFO(sih)->pch);
+}
+
+/** Disable PME generation, clear the PME status bit if set */
+void
+si_pci_pmeclr(const si_t *sih)
+{
+ pcicore_pmeclr(SI_INFO(sih)->pch);
+}
+
+void
+si_pci_pmestatclr(const si_t *sih)
+{
+ pcicore_pmestatclr(SI_INFO(sih)->pch);
+}
+
+#ifdef BCMSDIO
+/** initialize the sdio core */
+void
+si_sdio_init(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (BUSCORETYPE(sih->buscoretype) == SDIOD_CORE_ID) {
+ uint idx;
+ sdpcmd_regs_t *sdpregs;
+
+ /* get the current core index */
+ /* could do stuff like tcpflag in pci, but why? */
+ idx = sii->curidx;
+ ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0));
+
+ /* switch to sdio core */
+ /* could use buscoreidx? */
+ sdpregs = (sdpcmd_regs_t *)si_setcore(sih, SDIOD_CORE_ID, 0);
+ ASSERT(sdpregs);
+
+ SI_MSG(("si_sdio_init: For SDIO Corerev %d, enable ints from core %d "
+ "through SD core %d (%p)\n",
+ sih->buscorerev, idx, sii->curidx, OSL_OBFUSCATE_BUF(sdpregs)));
+
+ /* enable backplane error and core interrupts */
+ W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT);
+ W_REG(sii->osh, &sdpregs->sbintmask, (I_SB_SERR | I_SB_RESPERR | (1 << idx)));
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+ }
+
+ /* enable interrupts */
+ bcmsdh_intr_enable(sii->sdh);
+
+ /* What else */
+}
+#endif /* BCMSDIO */
+
+/**
+ * Disable pcie_war_ovr for some platforms (sigh!)
+ * This is for boards that have BFL2_PCIEWAR_OVR set
+ * but are in systems that still want the benefits of ASPM
+ * Note that this should be done AFTER si_doattach
+ */
+void
+si_pcie_war_ovr_update(const si_t *sih, uint8 aspm)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE_GEN1(sii))
+ return;
+
+ pcie_war_ovr_aspm_update(sii->pch, aspm);
+}
+
+void
+si_pcie_power_save_enable(const si_t *sih, bool enable)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE_GEN1(sii))
+ return;
+
+ pcie_power_save_enable(sii->pch, enable);
+}
+
+void
+si_pcie_set_maxpayload_size(const si_t *sih, uint16 size)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return;
+
+ pcie_set_maxpayload_size(sii->pch, size);
+}
+
+uint16
+si_pcie_get_maxpayload_size(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return (0);
+
+ return pcie_get_maxpayload_size(sii->pch);
+}
+
+void
+si_pcie_set_request_size(const si_t *sih, uint16 size)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return;
+
+ pcie_set_request_size(sii->pch, size);
+}
+
+uint16
+BCMATTACHFN(si_pcie_get_request_size)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE_GEN1(sii))
+ return (0);
+
+ return pcie_get_request_size(sii->pch);
+}
+
+uint16
+si_pcie_get_ssid(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE_GEN1(sii))
+ return (0);
+
+ return pcie_get_ssid(sii->pch);
+}
+
+uint32
+si_pcie_get_bar0(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return (0);
+
+ return pcie_get_bar0(sii->pch);
+}
+
+int
+si_pcie_configspace_cache(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return BCME_UNSUPPORTED;
+
+ return pcie_configspace_cache(sii->pch);
+}
+
+int
+si_pcie_configspace_restore(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return BCME_UNSUPPORTED;
+
+ return pcie_configspace_restore(sii->pch);
+}
+
+int
+si_pcie_configspace_get(const si_t *sih, uint8 *buf, uint size)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii) || size > PCI_CONFIG_SPACE_SIZE)
+ return -1;
+
+ return pcie_configspace_get(sii->pch, buf, size);
+}
+
+void
+si_pcie_hw_L1SS_war(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ /* SWWLAN-41753: WAR intermittent issue with D3Cold and L1.2 exit,
+ * need to update PMU rsrc dependency
+ */
+ if (PCIE_GEN2(sii))
+ pcie_hw_L1SS_war(sii->pch);
+}
+
+void
+BCMINITFN(si_pci_up)(const si_t *sih)
+{
+ const si_info_t *sii;
+
+ /* if not pci bus, we're done */
+ if (BUSTYPE(sih->bustype) != PCI_BUS)
+ return;
+
+ sii = SI_INFO(sih);
+
+ if (PCIE(sii)) {
+ pcicore_up(sii->pch, SI_PCIUP);
+ }
+}
+
+/** Unconfigure and/or apply various WARs when system is going to sleep mode */
+void
+BCMUNINITFN(si_pci_sleep)(const si_t *sih)
+{
+ /* 4360 pcie2 WAR */
+ do_4360_pcie2_war = 0;
+
+ pcicore_sleep(SI_INFO(sih)->pch);
+}
+
+/** Unconfigure and/or apply various WARs when the wireless interface is going down */
+void
+BCMINITFN(si_pci_down)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ BCM_REFERENCE(sii);
+
+ /* if not pci bus, we're done */
+ if (BUSTYPE(sih->bustype) != PCI_BUS)
+ return;
+
+ pcicore_down(sii->pch, SI_PCIDOWN);
+}
+
+/**
+ * Configure the pci core for pci client (NIC) action
+ * coremask is the bitvec of cores by index to be enabled.
+ */
+void
+BCMATTACHFN(si_pci_setup)(si_t *sih, uint coremask)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ sbpciregs_t *pciregs = NULL;
+ uint32 siflag = 0, w;
+ uint idx = 0;
+
+ if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+ return;
+
+ ASSERT(PCI(sii) || PCIE(sii));
+ ASSERT(sii->pub.buscoreidx != BADIDX);
+
+ if (PCI(sii)) {
+ /* get current core index */
+ idx = sii->curidx;
+
+ /* we interrupt on this backplane flag number */
+ siflag = si_flag(sih);
+
+ /* switch over to pci core */
+ pciregs = (sbpciregs_t *)si_setcoreidx(sih, sii->pub.buscoreidx);
+ }
+
+ /*
+ * Enable sb->pci interrupts. Assume
+ * PCI rev 2.3 support was added in pci core rev 6 and things changed..
+ */
+ if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ /* pci config write to set this core bit in PCIIntMask */
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_MASK, sizeof(uint32));
+ w |= (coremask << PCI_SBIM_SHIFT);
+#ifdef USER_MODE
+ /* User mode operate with interrupt disabled */
+ w &= !(coremask << PCI_SBIM_SHIFT);
+#endif
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_MASK, sizeof(uint32), w);
+ } else {
+ /* set sbintvec bit for our flag number */
+ si_setint(sih, siflag);
+ }
+
+ /*
+ * enable prefetch and bursts for dma big window
+ * enable read multiple for dma big window corerev >= 11
+ * PR 9962/4708: Set initiator timeouts. corerev < 5
+ */
+ if (PCI(sii)) {
+ OR_REG(sii->osh, &pciregs->sbtopci2, (SBTOPCI_PREF | SBTOPCI_BURST));
+ if (sii->pub.buscorerev >= 11) {
+ OR_REG(sii->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
+ /* PR50531: On some Laptops, the 4321 CB shows bad
+ * UDP performance on one direction
+ */
+ w = R_REG(sii->osh, &pciregs->clkrun);
+ W_REG(sii->osh, &pciregs->clkrun, (w | PCI_CLKRUN_DSBL));
+ w = R_REG(sii->osh, &pciregs->clkrun);
+ }
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+ }
+}
+
+/* In NIC mode is there any better way to find out what ARM core is there? */
+static uint
+BCMATTACHFN(si_get_armcoreidx)(si_t *sih)
+{
+ uint saveidx = si_coreidx(sih);
+ uint coreidx = BADIDX;
+
+ if (si_setcore(sih, ARMCR4_CORE_ID, 0) != NULL ||
+ si_setcore(sih, ARMCA7_CORE_ID, 0) != NULL) {
+ coreidx = si_coreidx(sih);
+ }
+
+ si_setcoreidx(sih, saveidx);
+
+ return coreidx;
+}
+
+/**
+ * Configure the pcie core for pcie client (NIC) action
+ * coreidx is the index of the core to be enabled.
+ */
+int
+BCMATTACHFN(si_pcie_setup)(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ int main_intr, alt_intr;
+ uint pciepidx;
+ uint32 w;
+ osl_t *osh = si_osh(sih);
+ uint saveidx = si_coreidx(sih);
+ volatile void *oobrregs;
+ uint armcidx, armpidx;
+ int ret = BCME_OK;
+
+ /* try the new hnd oobr first */
+ if ((oobrregs = si_setcore(sih, HND_OOBR_CORE_ID, 0)) == NULL) {
+ goto exit;
+ }
+
+ ASSERT(BUSTYPE(sih->bustype) == PCI_BUS);
+ ASSERT(BUSTYPE(sih->buscoretype) == PCIE2_CORE_ID);
+
+ /* ==== Enable sb->pci interrupts ==== */
+
+ /* 1) query the pcie interrupt port index and
+ * re-route the main interrupt to pcie (from ARM) if necessary
+ */
+ main_intr = hnd_oobr_get_intr_config(sih, coreidx,
+ HND_CORE_MAIN_INTR, sih->buscoreidx, &pciepidx);
+ if (main_intr < 0) {
+ /* query failure means the main interrupt is not routed
+ * to the pcie core... re-route!
+ */
+ armcidx = si_get_armcoreidx(sih);
+ if (!GOODIDX(armcidx, sii->numcores)) {
+ SI_MSG(("si_pcie_setup: arm core not found\n"));
+ ret = BCME_NOTFOUND;
+ goto exit;
+ }
+
+ /* query main and alt interrupt info */
+ main_intr = hnd_oobr_get_intr_config(sih, coreidx,
+ HND_CORE_MAIN_INTR, armcidx, &armpidx);
+ alt_intr = hnd_oobr_get_intr_config(sih, coreidx,
+ HND_CORE_ALT_INTR, sih->buscoreidx, &pciepidx);
+ if ((ret = main_intr) < 0 || (ret = alt_intr) < 0) {
+ SI_MSG(("si_pcie_setup: coreidx %u main (=%d) or "
+ "alt (=%d) interrupt query failed\n",
+ coreidx, main_intr, alt_intr));
+ goto exit;
+ }
+
+ /* swap main and alt interrupts at pcie input interrupts */
+ hnd_oobr_set_intr_src(sih, sih->buscoreidx, pciepidx, main_intr);
+ /* TODO: route the alternate interrupt to arm */
+ /* hnd_oobr_set_intr_src(sih, armcidx, armppidx, alt_intr); */
+ BCM_REFERENCE(armpidx);
+
+ /* query main interrupt info again.
+ * is it really necessary?
+ * it can't fail as we just set it up...
+ */
+ main_intr = hnd_oobr_get_intr_config(sih, coreidx,
+ HND_CORE_MAIN_INTR, sih->buscoreidx, &pciepidx);
+ ASSERT(main_intr >= 0);
+ }
+ /* hnd_oobr_dump(sih); */
+
+ /* 2) pcie config write to set this core bit in PCIIntMask */
+ w = OSL_PCI_READ_CONFIG(osh, PCI_INT_MASK, sizeof(w));
+ w |= ((1 << pciepidx) << PCI_SBIM_SHIFT);
+ OSL_PCI_WRITE_CONFIG(osh, PCI_INT_MASK, sizeof(w), w);
+
+ /* ==== other setups ==== */
+
+ /* reset the return value */
+ ret = BCME_OK;
+exit:
+ /* return to the original core */
+ si_setcoreidx(sih, saveidx);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+
+ /* fall back to the old way... */
+ if (oobrregs == NULL) {
+ uint coremask = (1 << coreidx);
+ si_pci_setup(sih, coremask);
+ }
+
+ return ret;
+}
+
+uint8
+si_pcieclkreq(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return 0;
+
+ return pcie_clkreq(sii->pch, mask, val);
+}
+
+uint32
+si_pcielcreg(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return 0;
+
+ return pcie_lcreg(sii->pch, mask, val);
+}
+
+uint8
+si_pcieltrenable(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!(PCIE(sii)))
+ return 0;
+
+ return pcie_ltrenable(sii->pch, mask, val);
+}
+
+uint8
+BCMATTACHFN(si_pcieobffenable)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!(PCIE(sii)))
+ return 0;
+
+ return pcie_obffenable(sii->pch, mask, val);
+}
+
+uint32
+si_pcieltr_reg(const si_t *sih, uint32 reg, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!(PCIE(sii)))
+ return 0;
+
+ return pcie_ltr_reg(sii->pch, reg, mask, val);
+}
+
+uint32
+si_pcieltrspacing_reg(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!(PCIE(sii)))
+ return 0;
+
+ return pcieltrspacing_reg(sii->pch, mask, val);
+}
+
+uint32
+si_pcieltrhysteresiscnt_reg(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!(PCIE(sii)))
+ return 0;
+
+ return pcieltrhysteresiscnt_reg(sii->pch, mask, val);
+}
+
+void
+si_pcie_set_error_injection(const si_t *sih, uint32 mode)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return;
+
+ pcie_set_error_injection(sii->pch, mode);
+}
+
+void
+si_pcie_set_L1substate(const si_t *sih, uint32 substate)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ pcie_set_L1substate(sii->pch, substate);
+}
+#ifndef BCM_BOOTLOADER
+uint32
+si_pcie_get_L1substate(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ return pcie_get_L1substate(sii->pch);
+
+ return 0;
+}
+#endif /* BCM_BOOTLOADER */
+/** indirect way to read pcie config regs */
+uint
+si_pcie_readreg(void *sih, uint addrtype, uint offset)
+{
+ return pcie_readreg(sih, (sbpcieregs_t *)PCIEREGS(((si_info_t *)sih)),
+ addrtype, offset);
+}
+
+/* indirect way to write pcie config regs */
+uint
+si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val)
+{
+ return pcie_writereg(sih, (sbpcieregs_t *)PCIEREGS(((si_info_t *)sih)),
+ addrtype, offset, val);
+}
+
+/**
+ * PCI(e) core requires additional software initialization in an SROMless system. In such a system,
+ * the PCIe core will assume POR defaults, which are mostly ok, with the exception of the mapping of
+ * two address subwindows within the BAR0 window.
+ * Note: the current core may be changed upon return.
+ */
+int
+si_pci_fixcfg(si_t *sih)
+{
+#ifndef DONGLEBUILD
+
+ uint origidx, pciidx;
+ sbpciregs_t *pciregs = NULL;
+ sbpcieregs_t *pcieregs = NULL;
+ uint16 val16;
+ volatile uint16 *reg16 = NULL;
+
+ si_info_t *sii = SI_INFO(sih);
+
+ ASSERT(BUSTYPE(sii->pub.bustype) == PCI_BUS);
+
+ /* Fixup PI in SROM shadow area to enable the correct PCI core access */
+ origidx = si_coreidx(&sii->pub);
+
+ /* check 'pi' is correct and fix it if not. */
+ if (BUSCORETYPE(sii->pub.buscoretype) == PCIE2_CORE_ID) {
+ pcieregs = (sbpcieregs_t *)si_setcore(&sii->pub, PCIE2_CORE_ID, 0);
+ ASSERT(pcieregs != NULL);
+ reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
+ } else if (BUSCORETYPE(sii->pub.buscoretype) == PCIE_CORE_ID) {
+ pcieregs = (sbpcieregs_t *)si_setcore(&sii->pub, PCIE_CORE_ID, 0);
+ ASSERT(pcieregs != NULL);
+ reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
+ } else if (BUSCORETYPE(sii->pub.buscoretype) == PCI_CORE_ID) {
+ pciregs = (sbpciregs_t *)si_setcore(&sii->pub, PCI_CORE_ID, 0);
+ ASSERT(pciregs != NULL);
+ reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
+ }
+ pciidx = si_coreidx(&sii->pub);
+
+ if (!reg16) return -1;
+
+ val16 = R_REG(sii->osh, reg16);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
+ /* write bitfield used to translate 3rd and 7th 4K chunk in the Bar0 space. */
+ val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
+ W_REG(sii->osh, reg16, val16);
+ }
+
+ /* restore the original index */
+ si_setcoreidx(&sii->pub, origidx);
+
+ pcicore_hwup(sii->pch);
+#endif /* DONGLEBUILD */
+ return 0;
+} /* si_pci_fixcfg */
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(WLTEST)
+int
+si_dump_pcieinfo(const si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE_GEN1(sii) && !PCIE_GEN2(sii))
+ return BCME_ERROR;
+
+ return pcicore_dump_pcieinfo(sii->pch, b);
+}
+
+void
+si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b)
+{
+ uint i;
+ uint32 pmu_cap;
+ uint32 pmu_chip_reg;
+
+ bcm_bprintf(b, "===pmu(rev %d)===\n", sih->pmurev);
+ if (!(sih->pmurev == 0x11 || (sih->pmurev >= 0x15 && sih->pmurev <= 0x19))) {
+ bcm_bprintf(b, "PMU dump not supported\n");
+ return;
+ }
+ pmu_cap = si_ccreg(sih, PMU_CAP, 0, 0);
+ bcm_bprintf(b, "pmu_control 0x%x\n", si_ccreg(sih, PMU_CTL, 0, 0));
+ bcm_bprintf(b, "pmu_capabilities 0x%x\n", pmu_cap);
+ bcm_bprintf(b, "pmu_status 0x%x\n", si_ccreg(sih, PMU_ST, 0, 0));
+ bcm_bprintf(b, "res_state 0x%x\n", si_ccreg(sih, PMU_RES_STATE, 0, 0));
+ bcm_bprintf(b, "res_pending 0x%x\n", si_ccreg(sih, PMU_RES_PENDING, 0, 0));
+ bcm_bprintf(b, "pmu_timer1 %d\n", si_ccreg(sih, PMU_TIMER, 0, 0));
+ bcm_bprintf(b, "min_res_mask 0x%x\n", si_ccreg(sih, MINRESMASKREG, 0, 0));
+ bcm_bprintf(b, "max_res_mask 0x%x\n", si_ccreg(sih, MAXRESMASKREG, 0, 0));
+
+ pmu_chip_reg = (pmu_cap & 0xf8000000);
+ pmu_chip_reg = pmu_chip_reg >> 27;
+ bcm_bprintf(b, "si_pmu_chipcontrol: ");
+ for (i = 0; i < pmu_chip_reg; i++) {
+ bcm_bprintf(b, "[%d]=0x%x ", i, si_pmu_chipcontrol(sih, i, 0, 0));
+ }
+
+ pmu_chip_reg = (pmu_cap & 0x07c00000);
+ pmu_chip_reg = pmu_chip_reg >> 22;
+ bcm_bprintf(b, "\nsi_pmu_vregcontrol: ");
+ for (i = 0; i < pmu_chip_reg; i++) {
+ bcm_bprintf(b, "[%d]=0x%x ", i, si_pmu_vreg_control(sih, i, 0, 0));
+ }
+ pmu_chip_reg = (pmu_cap & 0x003e0000);
+ pmu_chip_reg = pmu_chip_reg >> 17;
+ bcm_bprintf(b, "\nsi_pmu_pllcontrol: ");
+ for (i = 0; i < pmu_chip_reg; i++) {
+ bcm_bprintf(b, "[%d]=0x%x ", i, si_pmu_pllcontrol(sih, i, 0, 0));
+ }
+ pmu_chip_reg = (pmu_cap & 0x0001e000);
+ pmu_chip_reg = pmu_chip_reg >> 13;
+ bcm_bprintf(b, "\nsi_pmu_res u/d timer: ");
+ for (i = 0; i < pmu_chip_reg; i++) {
+ si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
+ bcm_bprintf(b, "[%d]=0x%x ", i, si_corereg(sih, SI_CC_IDX, RSRCUPDWNTIME, 0, 0));
+ }
+ pmu_chip_reg = (pmu_cap & 0x00001f00);
+ pmu_chip_reg = pmu_chip_reg >> 8;
+ bcm_bprintf(b, "\nsi_pmu_res dep_mask: ");
+ for (i = 0; i < pmu_chip_reg; i++) {
+ si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
+ bcm_bprintf(b, "[%d]=0x%x ", i, si_corereg(sih, SI_CC_IDX, PMU_RES_DEP_MASK, 0, 0));
+ }
+ bcm_bprintf(b, "\n");
+}
+
+int
+si_dump_pcieregs(const si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (!PCIE_GEN1(sii) && !PCIE_GEN2(sii))
+ return BCME_ERROR;
+
+ return pcicore_dump_pcieregs(sii->pch, b);
+}
+
+#endif /* BCMDBG || BCMDBG_DUMP || WLTEST */
+
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+void
+si_dump(const si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
+ uint i;
+
+ bcm_bprintf(b, "si %p chip 0x%x chiprev 0x%x boardtype 0x%x boardvendor 0x%x bus %d\n",
+ OSL_OBFUSCATE_BUF(sii), sih->chip, sih->chiprev,
+ sih->boardtype, sih->boardvendor, sih->bustype);
+ bcm_bprintf(b, "osh %p curmap %p\n",
+ OSL_OBFUSCATE_BUF(sii->osh), OSL_OBFUSCATE_BUF(sii->curmap));
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ bcm_bprintf(b, "sonicsrev %d ", sih->socirev);
+ bcm_bprintf(b, "ccrev %d buscoretype 0x%x buscorerev %d curidx %d\n",
+ CCREV(sih->ccrev), sih->buscoretype, sih->buscorerev, sii->curidx);
+
+#ifdef BCMDBG
+ if ((BUSTYPE(sih->bustype) == PCI_BUS) && (sii->pch))
+ pcicore_dump(sii->pch, b);
+#endif
+
+ bcm_bprintf(b, "cores: ");
+ for (i = 0; i < sii->numcores; i++)
+ bcm_bprintf(b, "0x%x ", cores_info->coreid[i]);
+ bcm_bprintf(b, "\n");
+}
+
+void
+si_ccreg_dump(si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ uint i;
+ bcm_int_bitmask_t intr_val;
+ chipcregs_t *cc;
+
+ /* only support corerev 22 for now */
+ if (CCREV(sih->ccrev) != 23)
+ return;
+
+ origidx = sii->curidx;
+
+ INTR_OFF(sii, &intr_val);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc);
+
+ bcm_bprintf(b, "\n===cc(rev %d) registers(offset val)===\n", CCREV(sih->ccrev));
+ for (i = 0; i <= 0xc4; i += 4) {
+ if (i == 0x4c) {
+ bcm_bprintf(b, "\n");
+ continue;
+ }
+ bcm_bprintf(b, "0x%x\t0x%x\n", i, *(uint32 *)((uintptr)cc + i));
+ }
+
+ bcm_bprintf(b, "\n");
+
+ for (i = 0x1e0; i <= 0x1e4; i += 4) {
+ bcm_bprintf(b, "0x%x\t0x%x\n", i, *(uint32 *)((uintptr)cc + i));
+ }
+ bcm_bprintf(b, "\n");
+
+ if (sih->cccaps & CC_CAP_PMU) {
+ for (i = 0x600; i <= 0x660; i += 4) {
+ bcm_bprintf(b, "0x%x\t0x%x\n", i, *(uint32 *)((uintptr)cc + i));
+ }
+ }
+ bcm_bprintf(b, "\n");
+
+ si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+
+/** dump dynamic clock control related registers */
+void
+si_clkctl_dump(si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ return;
+
+ INTR_OFF(sii, &intr_val);
+ origidx = sii->curidx;
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ goto done;
+
+ bcm_bprintf(b, "pll_on_delay 0x%x fref_sel_delay 0x%x ",
+ cc->pll_on_delay, cc->fref_sel_delay);
+ if ((CCREV(sih->ccrev) >= 6) && (CCREV(sih->ccrev) < 10))
+ bcm_bprintf(b, "slow_clk_ctl 0x%x ", cc->slow_clk_ctl);
+ if (CCREV(sih->ccrev) >= 10) {
+ bcm_bprintf(b, "system_clk_ctl 0x%x ", cc->system_clk_ctl);
+ bcm_bprintf(b, "clkstatestretch 0x%x ", cc->clkstatestretch);
+ }
+
+ if (BUSTYPE(sih->bustype) == PCI_BUS)
+ bcm_bprintf(b, "gpioout 0x%x gpioouten 0x%x ",
+ OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)),
+ OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN, sizeof(uint32)));
+
+ if (sih->cccaps & CC_CAP_PMU) {
+ /* dump some PMU register ? */
+ }
+ bcm_bprintf(b, "\n");
+
+ si_setcoreidx(sih, origidx);
+done:
+ INTR_RESTORE(sii, &intr_val);
+}
+
+int
+si_gpiodump(si_t *sih, struct bcmstrbuf *b)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ chipcregs_t *cc;
+
+ INTR_OFF(sii, &intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc);
+
+ bcm_bprintf(b, "GPIOregs\t");
+
+ bcm_bprintf(b, "gpioin 0x%x ", R_REG(sii->osh, &cc->gpioin));
+ bcm_bprintf(b, "gpioout 0x%x ", R_REG(sii->osh, &cc->gpioout));
+ bcm_bprintf(b, "gpioouten 0x%x ", R_REG(sii->osh, &cc->gpioouten));
+ bcm_bprintf(b, "gpiocontrol 0x%x ", R_REG(sii->osh, &cc->gpiocontrol));
+ bcm_bprintf(b, "gpiointpolarity 0x%x ", R_REG(sii->osh, &cc->gpiointpolarity));
+ bcm_bprintf(b, "gpiointmask 0x%x ", R_REG(sii->osh, &cc->gpiointmask));
+
+ bcm_bprintf(b, "\n");
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+ return 0;
+
+}
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+
+#endif /* !defined(BCMDONGLEHOST) */
+
+/** change logical "focus" to the gpio core for optimized access */
+volatile void *
+si_gpiosetcore(si_t *sih)
+{
+ return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/**
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ * to some chip-specific purpose.
+ */
+uint32
+BCMPOSTTRAPFN(si_gpiocontrol)(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output enable bits */
+uint32
+BCMPOSTTRAPFN(si_gpioouten)(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioouten);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output bits */
+uint32
+BCMPOSTTRAPFN(si_gpioout)(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioout);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** reserve one gpio */
+uint32
+si_gpioreserve(const si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already reserved */
+ if (si_gpioreservation & gpio_bitmask)
+ return 0xffffffff;
+ /* set reservation */
+ si_gpioreservation |= gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/**
+ * release one gpio.
+ *
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till someone overwrites it.
+ */
+uint32
+si_gpiorelease(const si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already released */
+ if (!(si_gpioreservation & gpio_bitmask))
+ return 0xffffffff;
+
+ /* clear reservation */
+ si_gpioreservation &= ~gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+ uint regoff;
+
+ regoff = OFFSETOF(chipcregs_t, gpioin);
+ return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+uint32
+si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+ regoff = OFFSETOF(chipcregs_t, gpioeventintmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+ uint offs;
+
+ if (CCREV(sih->ccrev) < 20)
+ return 0xffffffff;
+
+ offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+ uint offs;
+
+ if (CCREV(sih->ccrev) < 11)
+ return 0xffffffff;
+
+ if (regtype == GPIO_REGEVT)
+ offs = OFFSETOF(chipcregs_t, gpioevent);
+ else if (regtype == GPIO_REGEVT_INTMSK)
+ offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+ else if (regtype == GPIO_REGEVT_INTPOL)
+ offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+ else
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+BCMATTACHFN(si_gpio_int_enable)(si_t *sih, bool enable)
+{
+ uint offs;
+
+ if (CCREV(sih->ccrev) < 11)
+ return 0xffffffff;
+
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+#if !defined(BCMDONGLEHOST)
+void
+si_gci_shif_config_wake_pin(si_t *sih, uint8 gpio_n, uint8 wake_events,
+ bool gci_gpio)
+{
+ uint8 chipcontrol = 0;
+ uint32 gci_wakset;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4376_CHIP_GRPID :
+ case BCM4378_CHIP_GRPID :
+ {
+ if (!gci_gpio) {
+ chipcontrol = (1 << GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT);
+ }
+ chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_PULLUP_BIT);
+ chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_INVERT_BIT);
+ si_gci_gpio_chipcontrol(sih, gpio_n,
+ (chipcontrol | (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT)));
+
+ /* enable gci gpio int/wake events */
+ si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
+ si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
+
+ /* clear the existing status bits */
+ si_gci_gpio_status(sih, gpio_n,
+ GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
+
+ /* Enable gci2wl_wake for 4378 */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
+ CC2_4378_GCI2WAKE_MASK, CC2_4378_GCI2WAKE_MASK);
+
+ /* enable gci int/wake events */
+ gci_wakset = (GCI_INTSTATUS_GPIOWAKE) | (GCI_INTSTATUS_GPIOINT);
+
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_intmask),
+ gci_wakset, gci_wakset);
+ /* Enable wake on GciWake */
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_wakemask),
+ gci_wakset, gci_wakset);
+ break;
+ }
+ case BCM4385_CHIP_GRPID :
+ case BCM4387_CHIP_GRPID :
+ {
+ if (!gci_gpio) {
+ chipcontrol = (1 << GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT);
+ }
+ chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_PULLUP_BIT);
+ chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_INVERT_BIT);
+ si_gci_gpio_chipcontrol(sih, gpio_n,
+ (chipcontrol | (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT)));
+
+ /* enable gci gpio int/wake events */
+ si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
+ si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
+
+ /* clear the existing status bits */
+ si_gci_gpio_status(sih, gpio_n,
+ GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
+
+ /* Enable gci2wl_wake for 4387 */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
+ CC2_4387_GCI2WAKE_MASK, CC2_4387_GCI2WAKE_MASK);
+
+ /* enable gci int/wake events */
+ gci_wakset = (GCI_INTSTATUS_GPIOWAKE) | (GCI_INTSTATUS_GPIOINT);
+
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_intmask),
+ gci_wakset, gci_wakset);
+ /* Enable wake on GciWake */
+ si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_wakemask),
+ gci_wakset, gci_wakset);
+ break;
+ }
+ default:;
+ }
+}
+
+void
+si_shif_int_enable(si_t *sih, uint8 gpio_n, uint8 wake_events, bool enable)
+{
+ if (enable) {
+ si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
+ si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
+ } else {
+ si_gci_gpio_intmask(sih, gpio_n, wake_events, 0);
+ si_gci_gpio_wakemask(sih, gpio_n, wake_events, 0);
+ }
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+/** Return the size of the specified SYSMEM bank */
+static uint
+sysmem_banksize(const si_info_t *sii, sysmemregs_t *regs, uint8 idx)
+{
+ uint banksize, bankinfo;
+ uint bankidx = idx;
+
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1);
+ return banksize;
+}
+
+/** Return the RAM size of the SYSMEM core */
+uint32
+si_sysmem_size(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ sysmemregs_t *regs;
+ bool wasup;
+ uint32 coreinfo;
+ uint memsize = 0;
+ uint8 i;
+ uint nb, nrb;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SYSMEM core */
+ if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Number of ROM banks, SW need to skip the ROM banks. */
+ if (si_corerev(sih) < 12) {
+ nrb = (coreinfo & SYSMEM_SRCI_ROMNB_MASK) >> SYSMEM_SRCI_ROMNB_SHIFT;
+ nb = (coreinfo & SYSMEM_SRCI_SRNB_MASK) >> SYSMEM_SRCI_SRNB_SHIFT;
+ } else {
+ nrb = (coreinfo & SYSMEM_SRCI_NEW_ROMNB_MASK) >> SYSMEM_SRCI_NEW_ROMNB_SHIFT;
+ nb = (coreinfo & SYSMEM_SRCI_NEW_SRNB_MASK) >> SYSMEM_SRCI_NEW_SRNB_SHIFT;
+ }
+ for (i = 0; i < nb; i++)
+ memsize += sysmem_banksize(sii, regs, i + nrb);
+
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, &intr_val);
+
+ return memsize;
+}
+
+/** Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(const si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+ uint banksize, bankinfo;
+ uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+ ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+ return banksize;
+}
+
+void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 16) {
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ W_REG(sii->osh, &regs->bankpda, bankpda);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, &intr_val);
+}
+
+/** Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev == 0)
+ memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+ else if (corerev < 3) {
+ memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+ memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ } else if ((corerev <= 7) || (corerev == 12)) {
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+ uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb --;
+ memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+ if (lss != 0)
+ memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ } else {
+ uint8 i;
+ uint nb;
+ /* length of SRAM Banks increased for corerev greater than 23 */
+ if (corerev >= 23) {
+ nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) >> SRCI_SRNB_SHIFT;
+ } else {
+ nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ }
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, &intr_val);
+
+ return memsize;
+}
+
+/* Return true if bus MPU is present */
+bool
+si_is_bus_mpu_present(si_t *sih)
+{
+ uint origidx, newidx = NODEV_CORE_ID;
+ sysmemregs_t *sysmemregs = NULL;
+ cr4regs_t *cr4regs;
+ const si_info_t *sii = SI_INFO(sih);
+ uint ret = 0;
+ bool wasup;
+
+ origidx = si_coreidx(sih);
+
+ cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0);
+ if (cr4regs) {
+ /* ARMCR4 */
+ newidx = ARMCR4_CORE_ID;
+ } else {
+ sysmemregs = si_setcore(sih, SYSMEM_CORE_ID, 0);
+ if (sysmemregs) {
+ /* ARMCA7 */
+ newidx = SYSMEM_CORE_ID;
+ }
+ }
+
+ if (newidx != NODEV_CORE_ID) {
+ if (!(wasup = si_iscoreup(sih))) {
+ si_core_reset(sih, 0, 0);
+ }
+ if (newidx == ARMCR4_CORE_ID) {
+ /* ARMCR4 */
+ ret = R_REG(sii->osh, &cr4regs->corecapabilities) & CAP_MPU_MASK;
+ } else {
+ /* ARMCA7 */
+ ret = R_REG(sii->osh, &sysmemregs->mpucapabilities) &
+ ACC_MPU_REGION_CNT_MASK;
+ }
+ if (!wasup) {
+ si_core_disable(sih, 0);
+ }
+ }
+
+ si_setcoreidx(sih, origidx);
+
+ return ret ? TRUE : FALSE;
+}
+
+#if defined(BCMDONGLEHOST)
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+uint32
+si_tcm_size(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ volatile uint8 *regs;
+ bool wasup;
+ uint32 corecap;
+ uint memsize = 0;
+ uint banku_size = 0;
+ uint32 nab = 0;
+ uint32 nbb = 0;
+ uint32 totb = 0;
+ uint32 bxinfo = 0;
+ uint32 idx = 0;
+ volatile uint32 *arm_cap_reg;
+ volatile uint32 *arm_bidx;
+ volatile uint32 *arm_binfo;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to CR4 core */
+ if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size. If in reset, come out of reset,
+ * but remain in halt
+ */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
+
+ arm_cap_reg = (volatile uint32 *)(regs + SI_CR4_CAP);
+ corecap = R_REG(sii->osh, arm_cap_reg);
+
+ nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+ nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+ totb = nab + nbb;
+
+ arm_bidx = (volatile uint32 *)(regs + SI_CR4_BANKIDX);
+ arm_binfo = (volatile uint32 *)(regs + SI_CR4_BANKINFO);
+ for (idx = 0; idx < totb; idx++) {
+ W_REG(sii->osh, arm_bidx, idx);
+
+ bxinfo = R_REG(sii->osh, arm_binfo);
+ if (bxinfo & ARMCR4_BUNITSZ_MASK) {
+ banku_size = ARMCR4_BSZ_1K;
+ } else {
+ banku_size = ARMCR4_BSZ_8K;
+ }
+ memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * banku_size;
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, &intr_val);
+
+ return memsize;
+}
+
+bool
+si_has_flops(si_t *sih)
+{
+ uint origidx, cr4_rev;
+
+ /* Find out CR4 core revision */
+ origidx = si_coreidx(sih);
+ if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
+ cr4_rev = si_corerev(sih);
+ si_setcoreidx(sih, origidx);
+
+ if (cr4_rev == 1 || cr4_rev >= 3)
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif /* BCMDONGLEHOST */
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev >= 16) {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++) {
+ W_REG(sii->osh, &regs->bankidx, i);
+ if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, &intr_val);
+
+ return memsize;
+}
+
+#if !defined(BCMDONGLEHOST)
+static bool
+BCMPOSTTRAPFN(si_seci_uart)(const si_t *sih)
+{
+ return (sih->cccaps_ext & CC_CAP_EXT_SECI_PUART_PRESENT);
+}
+
+/** seci clock enable/disable */
+static void
+BCMPOSTTRAPFN(si_seci_clkreq)(si_t *sih, bool enable)
+{
+ uint32 clk_ctl_st;
+ uint32 offset;
+ uint32 val;
+ pmuregs_t *pmu;
+ uint32 origidx = 0;
+ const si_info_t *sii = SI_INFO(sih);
+#ifdef SECI_UART
+ bool fast;
+ chipcregs_t *cc = seci_set_core(sih, &origidx, &fast);
+ ASSERT(cc);
+#endif /* SECI_UART */
+ if (!si_seci(sih) && !si_seci_uart(sih))
+ return;
+ offset = OFFSETOF(chipcregs_t, clk_ctl_st);
+ clk_ctl_st = si_corereg(sih, 0, offset, 0, 0);
+
+ if (enable && !(clk_ctl_st & CLKCTL_STS_SECI_CLK_REQ)) {
+ val = CLKCTL_STS_SECI_CLK_REQ | CLKCTL_STS_HT_AVAIL_REQ;
+#ifdef SECI_UART
+ /* Restore the fast UART function select when enabling */
+ if (fast_uart_init) {
+ si_gci_set_functionsel(sih, fast_uart_tx, fast_uart_functionsel);
+ if (fuart_pullup_rx_cts_enab) {
+ si_gci_set_functionsel(sih, fast_uart_rx, fast_uart_functionsel);
+ si_gci_set_functionsel(sih, fast_uart_cts_in,
+ fast_uart_functionsel);
+ }
+ }
+#endif /* SECI_UART */
+ } else if (!enable && (clk_ctl_st & CLKCTL_STS_SECI_CLK_REQ)) {
+ val = 0;
+#ifdef SECI_UART
+ if (force_seci_clk) {
+ return;
+ }
+#endif /* SECI_UART */
+ } else {
+ return;
+ }
+#ifdef SECI_UART
+ /* park the fast UART as PULL UP when disabling the clocks to avoid sending
+ * breaks to the host
+ */
+ if (!enable && fast_uart_init) {
+ si_gci_set_functionsel(sih, fast_uart_tx, fast_uart_pup);
+ if (fuart_pullup_rx_cts_enab) {
+ W_REG(sii->osh, &cc->SECI_status, SECI_STAT_BI);
+ si_gci_set_functionsel(sih, fast_uart_rx, fast_uart_pup);
+ si_gci_set_functionsel(sih, fast_uart_cts_in, fast_uart_pup);
+ SPINWAIT(!(R_REG(sii->osh, &cc->SECI_status) & SECI_STAT_BI), 1000);
+ }
+ }
+#endif /* SECI_UART */
+
+ /* Setting/clearing bit 4 along with bit 8 of 0x1e0 block. the core requests that
+ * the PMU set the device state such that the HT clock will be available on short notice.
+ */
+ si_corereg(sih, SI_CC_IDX, offset,
+ CLKCTL_STS_SECI_CLK_REQ | CLKCTL_STS_HT_AVAIL_REQ, val);
+
+ if (!enable)
+ return;
+#ifndef SECI_UART
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+#endif
+
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
+ (void)si_pmu_wait_for_steady_state(sih, sii->osh, pmu);
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ SPINWAIT(!(si_corereg(sih, 0, offset, 0, 0) & CLKCTL_STS_SECI_CLK_AVAIL),
+ PMU_MAX_TRANSITION_DLY);
+
+ clk_ctl_st = si_corereg(sih, 0, offset, 0, 0);
+ if (enable) {
+ if (!(clk_ctl_st & CLKCTL_STS_SECI_CLK_AVAIL)) {
+ SI_ERROR(("SECI clock is not available\n"));
+ ASSERT(0);
+ return;
+ }
+ }
+}
+
+#if defined(BCMECICOEX) || defined(SECI_UART)
+static chipcregs_t *
+BCMPOSTTRAPFN(seci_set_core)(si_t *sih, uint32 *origidx, bool *fast)
+{
+ chipcregs_t *cc;
+ const si_info_t *sii = SI_INFO(sih);
+ *fast = SI_FAST(sii);
+
+ if (!*fast) {
+ *origidx = sii->curidx;
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ } else {
+ *origidx = 0;
+ cc = (chipcregs_t *)CCREGS_FAST(sii);
+ }
+ return cc;
+}
+
+static chipcregs_t *
+BCMPOSTTRAPFN(si_seci_access_preamble)(si_t *sih, const si_info_t *sii, uint32 *origidx, bool *fast)
+{
+ chipcregs_t *cc = seci_set_core(sih, origidx, fast);
+
+ if (cc) {
+ if (((R_REG(sii->osh, &cc->clk_ctl_st) & CCS_SECICLKREQ) != CCS_SECICLKREQ)) {
+ /* enable SECI clock */
+ si_seci_clkreq(sih, TRUE);
+ }
+ }
+ return cc;
+}
+#endif /* BCMECICOEX||SECI_UART */
+#ifdef SECI_UART
+
+uint32
+BCMPOSTTRAPFN(si_seci_access)(si_t *sih, uint32 val, int access)
+{
+ uint32 origidx;
+ bool fast;
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ bcm_int_bitmask_t intr_val;
+ uint32 offset, retval = 1;
+
+ if (!si_seci_uart(sih))
+ return 0;
+
+ INTR_OFF(sii, &intr_val);
+ if (!(cc = si_seci_access_preamble(sih, sii, &origidx, &fast)))
+ goto exit;
+
+ switch (access) {
+ case SECI_ACCESS_STATUSMASK_SET:
+ offset = OFFSETOF(chipcregs_t, SECI_statusmask);
+ retval = si_corereg(sih, SI_CC_IDX, offset, ALLONES_32, val);
+ break;
+ case SECI_ACCESS_STATUSMASK_GET:
+ offset = OFFSETOF(chipcregs_t, SECI_statusmask);
+ retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
+ break;
+ case SECI_ACCESS_INTRS:
+ offset = OFFSETOF(chipcregs_t, SECI_status);
+ retval = si_corereg(sih, SI_CC_IDX, offset,
+ ALLONES_32, ALLONES_32);
+ break;
+ case SECI_ACCESS_UART_CTS:
+ offset = OFFSETOF(chipcregs_t, seci_uart_msr);
+ retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
+ retval = retval & SECI_UART_MSR_CTS_STATE;
+ break;
+ case SECI_ACCESS_UART_RTS:
+ offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
+ if (val) {
+ /* clear forced flow control; enable auto rts */
+ retval = si_corereg(sih, SI_CC_IDX, offset,
+ SECI_UART_MCR_PRTS | SECI_UART_MCR_AUTO_RTS,
+ SECI_UART_MCR_AUTO_RTS);
+ } else {
+ /* set forced flow control; clear auto rts */
+ retval = si_corereg(sih, SI_CC_IDX, offset,
+ SECI_UART_MCR_PRTS | SECI_UART_MCR_AUTO_RTS,
+ SECI_UART_MCR_PRTS);
+ }
+ break;
+ case SECI_ACCESS_UART_RXEMPTY:
+ offset = OFFSETOF(chipcregs_t, SECI_status);
+ retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
+ retval = (retval & SECI_STAT_SRFE) == SECI_STAT_SRFE;
+ break;
+ case SECI_ACCESS_UART_GETC:
+ /* assumes caller checked for nonempty rx FIFO */
+ offset = OFFSETOF(chipcregs_t, seci_uart_data);
+ retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0) & 0xff;
+ break;
+ case SECI_ACCESS_UART_TXFULL:
+ offset = OFFSETOF(chipcregs_t, SECI_status);
+ retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
+ retval = (retval & SECI_STAT_STFF) == SECI_STAT_STFF;
+ break;
+ case SECI_ACCESS_UART_PUTC:
+ /* This register must not do a RMW otherwise it will affect the RX FIFO */
+ W_REG(sii->osh, &cc->seci_uart_data, (uint32)(val & 0xff));
+ retval = 0;
+ break;
+ default:
+ ASSERT(0);
+ }
+
+exit:
+ /* restore previous core */
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+
+ return retval;
+}
+
+void si_seci_clk_force(si_t *sih, bool val)
+{
+ force_seci_clk = val;
+ if (force_seci_clk) {
+ si_seci_clkreq(sih, TRUE);
+ } else {
+ si_seci_down(sih);
+ }
+}
+
+bool si_seci_clk_force_status(si_t *sih)
+{
+ return force_seci_clk;
+}
+#endif /* SECI_UART */
+
+/** SECI Init routine, pass in seci_mode */
+volatile void *
+BCMINITFN(si_seci_init)(si_t *sih, uint8 seci_mode)
+{
+ uint32 origidx = 0;
+ uint32 offset;
+ const si_info_t *sii;
+ volatile void *ptr;
+ chipcregs_t *cc;
+ bool fast;
+ uint32 seci_conf;
+
+ if (sih->ccrev < 35)
+ return NULL;
+
+#ifdef SECI_UART
+ if (seci_mode == SECI_MODE_UART) {
+ if (!si_seci_uart(sih))
+ return NULL;
+ }
+ else {
+#endif /* SECI_UART */
+ if (!si_seci(sih))
+ return NULL;
+#ifdef SECI_UART
+ }
+#endif /* SECI_UART */
+
+ if (seci_mode > SECI_MODE_MASK)
+ return NULL;
+
+ sii = SI_INFO(sih);
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((ptr = si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ return NULL;
+ } else if ((ptr = CCREGS_FAST(sii)) == NULL)
+ return NULL;
+ cc = (chipcregs_t *)ptr;
+ ASSERT(cc);
+
+ /* enable SECI clock */
+ if (seci_mode != SECI_MODE_LEGACY_3WIRE_WLAN)
+ si_seci_clkreq(sih, TRUE);
+
+ /* put the SECI in reset */
+ seci_conf = R_REG(sii->osh, &cc->SECI_config);
+ seci_conf &= ~SECI_ENAB_SECI_ECI;
+ W_REG(sii->osh, &cc->SECI_config, seci_conf);
+ seci_conf = SECI_RESET;
+ W_REG(sii->osh, &cc->SECI_config, seci_conf);
+
+ /* set force-low, and set EN_SECI for all non-legacy modes */
+ seci_conf |= SECI_ENAB_SECIOUT_DIS;
+ if ((seci_mode == SECI_MODE_UART) || (seci_mode == SECI_MODE_SECI) ||
+ (seci_mode == SECI_MODE_HALF_SECI))
+ {
+ seci_conf |= SECI_ENAB_SECI_ECI;
+ }
+ W_REG(sii->osh, &cc->SECI_config, seci_conf);
+
+ if (seci_mode != SECI_MODE_LEGACY_3WIRE_WLAN) {
+ /* take seci out of reset */
+ seci_conf = R_REG(sii->osh, &cc->SECI_config);
+ seci_conf &= ~(SECI_RESET);
+ W_REG(sii->osh, &cc->SECI_config, seci_conf);
+ }
+ /* set UART/SECI baud rate */
+ /* hard-coded at 4MBaud for now */
+ if ((seci_mode == SECI_MODE_UART) || (seci_mode == SECI_MODE_SECI) ||
+ (seci_mode == SECI_MODE_HALF_SECI)) {
+ offset = OFFSETOF(chipcregs_t, seci_uart_bauddiv);
+ si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0xFF); /* 4MBaud */
+ if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
+ /* MAC clk is 160MHz */
+ offset = OFFSETOF(chipcregs_t, seci_uart_bauddiv);
+ si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0xFE);
+ offset = OFFSETOF(chipcregs_t, seci_uart_baudadj);
+ si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0x44);
+ offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
+ si_corereg(sih, SI_CC_IDX, offset,
+ 0xFF, SECI_UART_MCR_BAUD_ADJ_EN); /* 0x81 */
+ }
+#ifdef SECI_UART
+ else if (CCREV(sih->ccrev) >= 62) {
+ /* rx FIFO level at which an interrupt is generated */
+ offset = OFFSETOF(chipcregs_t, eci.ge35.eci_uartfifolevel);
+ si_corereg(sih, SI_CC_IDX, offset, 0xff, 0x01);
+ offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
+ si_corereg(sih, SI_CC_IDX, offset, SECI_UART_MCR_AUTO_RTS,
+ SECI_UART_MCR_AUTO_RTS);
+ }
+#endif /* SECI_UART */
+ else {
+ /* 4336 MAC clk is 80MHz */
+ offset = OFFSETOF(chipcregs_t, seci_uart_baudadj);
+ si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0x22);
+ offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
+ si_corereg(sih, SI_CC_IDX, offset,
+ 0xFF, SECI_UART_MCR_BAUD_ADJ_EN); /* 0x80 */
+ }
+
+ /* LCR/MCR settings */
+ offset = OFFSETOF(chipcregs_t, seci_uart_lcr);
+ si_corereg(sih, SI_CC_IDX, offset, 0xFF,
+ (SECI_UART_LCR_RX_EN | SECI_UART_LCR_TXO_EN)); /* 0x28 */
+ offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
+ si_corereg(sih, SI_CC_IDX, offset,
+ SECI_UART_MCR_TX_EN, SECI_UART_MCR_TX_EN); /* 0x01 */
+
+#ifndef SECI_UART
+ /* Give control of ECI output regs to MAC core */
+ offset = OFFSETOF(chipcregs_t, eci.ge35.eci_controllo);
+ si_corereg(sih, SI_CC_IDX, offset, ALLONES_32, ECI_MACCTRLLO_BITS);
+ offset = OFFSETOF(chipcregs_t, eci.ge35.eci_controlhi);
+ si_corereg(sih, SI_CC_IDX, offset, 0xFFFF, ECI_MACCTRLHI_BITS);
+#endif /* SECI_UART */
+ }
+
+ /* set the seci mode in seci conf register */
+ seci_conf = R_REG(sii->osh, &cc->SECI_config);
+ seci_conf &= ~(SECI_MODE_MASK << SECI_MODE_SHIFT);
+ seci_conf |= (seci_mode << SECI_MODE_SHIFT);
+ W_REG(sii->osh, &cc->SECI_config, seci_conf);
+
+ /* Clear force-low bit */
+ seci_conf = R_REG(sii->osh, &cc->SECI_config);
+ seci_conf &= ~SECI_ENAB_SECIOUT_DIS;
+ W_REG(sii->osh, &cc->SECI_config, seci_conf);
+
+ /* restore previous core */
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+
+ return ptr;
+}
+
+#ifdef BCMECICOEX
+#define NOTIFY_BT_FM_DISABLE(sih, val) \
+ si_eci_notify_bt((sih), ECI_OUT_FM_DISABLE_MASK(CCREV(sih->ccrev)), \
+ ((val) << ECI_OUT_FM_DISABLE_SHIFT(CCREV(sih->ccrev))), FALSE)
+
+/** Query OTP to see if FM is disabled */
+static int
+BCMINITFN(si_query_FMDisabled_from_OTP)(si_t *sih, uint16 *FMDisabled)
+{
+ int error = BCME_OK;
+ uint bitoff = 0;
+ bool wasup;
+ void *oh;
+ uint32 min_res_mask = 0;
+
+ /* If there is a bit for this chip, check it */
+ if (bitoff) {
+ if (!(wasup = si_is_otp_powered(sih))) {
+ si_otp_power(sih, TRUE, &min_res_mask);
+ }
+
+ if ((oh = otp_init(sih)) != NULL)
+ *FMDisabled = !otp_read_bit(oh, OTP4325_FM_DISABLED_OFFSET);
+ else
+ error = BCME_NOTFOUND;
+
+ if (!wasup) {
+ si_otp_power(sih, FALSE, &min_res_mask);
+ }
+ }
+
+ return error;
+}
+
+bool
+si_eci(const si_t *sih)
+{
+ return (!!(sih->cccaps & CC_CAP_ECI));
+}
+
+bool
+BCMPOSTTRAPFN(si_seci)(const si_t *sih)
+{
+ return (sih->cccaps_ext & CC_CAP_EXT_SECI_PRESENT);
+}
+
+bool
+si_gci(const si_t *sih)
+{
+ return (sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT);
+}
+
+bool
+si_sraon(const si_t *sih)
+{
+ return (sih->cccaps_ext & CC_CAP_SR_AON_PRESENT);
+}
+
+/** ECI Init routine */
+int
+BCMINITFN(si_eci_init)(si_t *sih)
+{
+ uint32 origidx = 0;
+ const si_info_t *sii;
+ chipcregs_t *cc;
+ bool fast;
+ uint16 FMDisabled = FALSE;
+
+ /* check for ECI capability */
+ if (!(sih->cccaps & CC_CAP_ECI))
+ return BCME_ERROR;
+
+ sii = SI_INFO(sih);
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ return BCME_ERROR;
+ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+ return BCME_ERROR;
+ ASSERT(cc);
+
+ /* disable level based interrupts */
+ if (CCREV(sih->ccrev) < 35) {
+ W_REG(sii->osh, &cc->eci.lt35.eci_intmaskhi, 0x0);
+ W_REG(sii->osh, &cc->eci.lt35.eci_intmaskmi, 0x0);
+ W_REG(sii->osh, &cc->eci.lt35.eci_intmasklo, 0x0);
+ } else {
+ W_REG(sii->osh, &cc->eci.ge35.eci_intmaskhi, 0x0);
+ W_REG(sii->osh, &cc->eci.ge35.eci_intmasklo, 0x0);
+ }
+
+ /* Assign eci_output bits between 'wl' and dot11mac */
+ if (CCREV(sih->ccrev) < 35) {
+ W_REG(sii->osh, &cc->eci.lt35.eci_control, ECI_MACCTRL_BITS);
+ } else {
+ W_REG(sii->osh, &cc->eci.ge35.eci_controllo, ECI_MACCTRLLO_BITS);
+ W_REG(sii->osh, &cc->eci.ge35.eci_controlhi, ECI_MACCTRLHI_BITS);
+ }
+
+ /* enable only edge based interrupts
+ * only toggle on bit 62 triggers an interrupt
+ */
+ if (CCREV(sih->ccrev) < 35) {
+ W_REG(sii->osh, &cc->eci.lt35.eci_eventmaskhi, 0x0);
+ W_REG(sii->osh, &cc->eci.lt35.eci_eventmaskmi, 0x0);
+ W_REG(sii->osh, &cc->eci.lt35.eci_eventmasklo, 0x0);
+ } else {
+ W_REG(sii->osh, &cc->eci.ge35.eci_eventmaskhi, 0x0);
+ W_REG(sii->osh, &cc->eci.ge35.eci_eventmasklo, 0x0);
+ }
+
+ /* restore previous core */
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+
+ /* if FM disabled in OTP, let BT know */
+ if (!si_query_FMDisabled_from_OTP(sih, &FMDisabled)) {
+ if (FMDisabled) {
+ NOTIFY_BT_FM_DISABLE(sih, 1);
+ }
+ }
+
+ return 0;
+}
+
+/** Write values to BT on eci_output. */
+void
+si_eci_notify_bt(si_t *sih, uint32 mask, uint32 val, bool is_interrupt)
+{
+ uint32 offset;
+
+ if ((sih->cccaps & CC_CAP_ECI) ||
+ (si_seci(sih)))
+ {
+ /* ECI or SECI mode */
+ /* Clear interrupt bit by default */
+ if (is_interrupt) {
+ si_corereg(sih, SI_CC_IDX,
+ (CCREV(sih->ccrev) < 35 ?
+ OFFSETOF(chipcregs_t, eci.lt35.eci_output) :
+ OFFSETOF(chipcregs_t, eci.ge35.eci_outputlo)),
+ (1 << 30), 0);
+ }
+
+ if (CCREV(sih->ccrev) >= 35) {
+ if ((mask & 0xFFFF0000) == ECI48_OUT_MASKMAGIC_HIWORD) {
+ offset = OFFSETOF(chipcregs_t, eci.ge35.eci_outputhi);
+ mask = mask & ~0xFFFF0000;
+ } else {
+ offset = OFFSETOF(chipcregs_t, eci.ge35.eci_outputlo);
+ mask = mask | (1<<30);
+ val = val & ~(1 << 30);
+ }
+ } else {
+ offset = OFFSETOF(chipcregs_t, eci.lt35.eci_output);
+ val = val & ~(1 << 30);
+ }
+
+ si_corereg(sih, SI_CC_IDX, offset, mask, val);
+
+ /* Set interrupt bit if needed */
+ if (is_interrupt) {
+ si_corereg(sih, SI_CC_IDX,
+ (CCREV(sih->ccrev) < 35 ?
+ OFFSETOF(chipcregs_t, eci.lt35.eci_output) :
+ OFFSETOF(chipcregs_t, eci.ge35.eci_outputlo)),
+ (1 << 30), (1 << 30));
+ }
+ } else if (sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT) {
+ /* GCI Mode */
+ if ((mask & 0xFFFF0000) == ECI48_OUT_MASKMAGIC_HIWORD) {
+ mask = mask & ~0xFFFF0000;
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_output[1]), mask, val);
+ }
+ }
+}
+
+static void
+BCMPOSTTRAPFN(seci_restore_coreidx)(si_t *sih, uint32 origidx, bool fast)
+{
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+ return;
+}
+
+void
+BCMPOSTTRAPFN(si_seci_down)(si_t *sih)
+{
+ uint32 origidx;
+ bool fast;
+ const si_info_t *sii = SI_INFO(sih);
+ const chipcregs_t *cc;
+ uint32 offset;
+
+ if (!si_seci(sih) && !si_seci_uart(sih))
+ return;
+ /* Don't proceed if request is already made to bring down the clock */
+ offset = OFFSETOF(chipcregs_t, clk_ctl_st);
+ if (!(si_corereg(sih, 0, offset, 0, 0) & CLKCTL_STS_SECI_CLK_REQ))
+ return;
+ if (!(cc = si_seci_access_preamble(sih, sii, &origidx, &fast)))
+ goto exit;
+
+exit:
+ /* bring down the clock if up */
+ si_seci_clkreq(sih, FALSE);
+
+ /* restore previous core */
+ seci_restore_coreidx(sih, origidx, fast);
+}
+
+void
+si_seci_upd(si_t *sih, bool enable)
+{
+ uint32 origidx = 0;
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ bool fast;
+ uint32 regval, seci_ctrl;
+ bcm_int_bitmask_t intr_val;
+
+ if (!si_seci(sih))
+ return;
+
+ fast = SI_FAST(sii);
+ INTR_OFF(sii, &intr_val);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ goto exit;
+ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+ goto exit;
+
+ ASSERT(cc);
+
+ /* Select SECI based on enable input */
+ if ((CHIPID(sih->chip) == BCM4352_CHIP_ID) || (CHIPID(sih->chip) == BCM4360_CHIP_ID)) {
+ regval = R_REG(sii->osh, &cc->chipcontrol);
+
+ seci_ctrl = CCTRL4360_SECI_ON_GPIO01;
+
+ if (enable) {
+ regval |= seci_ctrl;
+ } else {
+ regval &= ~seci_ctrl;
+ }
+ W_REG(sii->osh, &cc->chipcontrol, regval);
+
+ if (enable) {
+ /* Send ECI update to BT */
+ regval = R_REG(sii->osh, &cc->SECI_config);
+ regval |= SECI_UPD_SECI;
+ W_REG(sii->osh, &cc->SECI_config, regval);
+ SPINWAIT((R_REG(sii->osh, &cc->SECI_config) & SECI_UPD_SECI), 1000);
+ /* Request ECI update from BT */
+ W_REG(sii->osh, &cc->seci_uart_data, SECI_SLIP_ESC_CHAR);
+ W_REG(sii->osh, &cc->seci_uart_data, SECI_REFRESH_REQ);
+ }
+ }
+
+exit:
+ /* restore previous core */
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+}
+
+void *
+BCMINITFN(si_gci_init)(si_t *sih)
+{
+#ifdef HNDGCI
+ const si_info_t *sii = SI_INFO(sih);
+#endif /* HNDGCI */
+
+ if (sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)
+ {
+ si_gci_reset(sih);
+
+ if (sih->boardflags4 & BFL4_BTCOEX_OVER_SECI) {
+ si_gci_seci_init(sih);
+ }
+
+ /* Set GCI Control bits 40 - 47 to be SW Controlled. These bits
+ contain WL channel info and are sent to BT.
+ */
+ si_gci_direct(sih, GCI_OFFSETOF(sih, gci_control_1),
+ GCI_WL_CHN_INFO_MASK, GCI_WL_CHN_INFO_MASK);
+ }
+#ifdef HNDGCI
+ hndgci_init(sih, sii->osh, HND_GCI_PLAIN_UART_MODE,
+ GCI_UART_BR_115200);
+#endif /* HNDGCI */
+
+ return (NULL);
+}
+#endif /* BCMECICOEX */
+#endif /* !(BCMDONGLEHOST) */
+
+/**
+ * For boards that use GPIO(8) is used for Bluetooth Coex TX_WLAN pin,
+ * when GPIOControl for Pin 8 is with ChipCommon core,
+ * if UART_TX_1 (bit 5: Chipc capabilities) strapping option is set, then
+ * GPIO pin 8 is driven by Uart0MCR:2 rather than GPIOOut:8. To drive this pin
+ * low, one has to set Uart0MCR:2 to 1. This is required when the BTC is disabled,
+ * or the driver goes down. Refer to PR35488.
+ */
+void
+si_btcgpiowar(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ chipcregs_t *cc;
+
+ /* Make sure that there is ChipCommon core present &&
+ * UART_TX is strapped to 1
+ */
+ if (!(sih->cccaps & CC_CAP_UARTGPIO))
+ return;
+
+ /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+ INTR_OFF(sii, &intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, &intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("si_chipcontrl_restore: Failed to find CORE ID!\n"));
+ return;
+ }
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ uint32 val;
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("si_chipcontrl_read: Failed to find CORE ID!\n"));
+ return -1;
+ }
+ val = R_REG(sii->osh, &cc->chipcontrol);
+ si_setcoreidx(sih, origidx);
+ return val;
+}
+
+/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ uint32 val;
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("si_chipcontrl_srom4360: Failed to find CORE ID!\n"));
+ return;
+ }
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ val &= ~(CCTRL4360_SECI_MODE |
+ CCTRL4360_BTSWCTRL_MODE |
+ CCTRL4360_EXTRA_FEMCTRL_MODE |
+ CCTRL4360_BT_LGCY_MODE |
+ CCTRL4360_CORE2FEMCTRL4_ON);
+
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ /* huh, nothing here? */
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+/**
+ * The SROM clock is derived from the backplane clock. For chips having a fast
+ * backplane clock that requires a higher-than-POR-default clock divisor ratio for the SROM clock.
+ */
+void
+si_srom_clk_set(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ uint32 val;
+ uint32 divisor = 1;
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("si_srom_clk_set: Failed to find CORE ID!\n"));
+ return;
+ }
+
+ val = R_REG(sii->osh, &cc->clkdiv2);
+ ASSERT(0);
+
+ W_REG(sii->osh, &cc->clkdiv2, ((val & ~CLKD2_SROM) | divisor));
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag)
+{
+#if !defined(BCMDONGLEHOST)
+ switch (CHIPID(sih->chip)) {
+ case BCM43460_CHIP_ID:
+ case BCM4360_CHIP_ID:
+ si_pmu_avbtimer_enable(sih, osh, set_flag);
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("si_btc_enable_chipcontrol: Failed to find CORE ID!\n"));
+ return;
+ }
+
+ /* BT fix */
+ W_REG(sii->osh, &cc->chipcontrol,
+ R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+
+/** cache device removed state */
+void si_set_device_removed(si_t *sih, bool status)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ sii->device_removed = status;
+}
+
+/** check if the device is removed */
+bool
+si_deviceremoved(const si_t *sih)
+{
+ uint32 w;
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (sii->device_removed) {
+ return TRUE;
+ }
+
+ switch (BUSTYPE(sih->bustype)) {
+ case PCI_BUS:
+ ASSERT(SI_INFO(sih)->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((w & 0xFFFF) != VENDOR_BROADCOM)
+ return TRUE;
+ break;
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+bool
+si_is_warmboot(void)
+{
+
+ return FALSE;
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+ if (CCREV(sih->ccrev) >= 31) {
+ const si_info_t *sii;
+ uint origidx;
+ chipcregs_t *cc;
+ uint32 sromctrl;
+
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return FALSE;
+
+ sii = SI_INFO(sih);
+ origidx = sii->curidx;
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc);
+ sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+ si_setcoreidx(sih, origidx);
+ return (sromctrl & SRC_PRESENT);
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ if (CHIPREV(sih->chiprev) == 0) {
+ /* WAR for 4369a0: HW4369-1729. no sprom, default to otp always. */
+ return 0;
+ } else {
+ return (sih->chipst & CST4369_SPROM_PRESENT) != 0;
+ }
+ break;
+ CASE_BCM43602_CHIP:
+ return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ return FALSE;
+ case BCM4362_CHIP_GRPID:
+ return (sih->chipst & CST4362_SPROM_PRESENT) != 0;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ return (sih->chipst & CST4378_SPROM_PRESENT) != 0;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ return (sih->chipst & CST4387_SPROM_PRESENT) != 0;
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ /* 4389 supports only OTP */
+ return FALSE;
+ default:
+ return TRUE;
+ }
+}
+
+bool
+si_is_sflash_available(const si_t *sih)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_ID:
+ return (sih->chipst & CST4387_SFLASH_PRESENT) != 0;
+ default:
+ return FALSE;
+ }
+}
+
+#if !defined(BCMDONGLEHOST)
+bool
+si_is_otp_disabled(const si_t *sih)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43526_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43602_CHIP_ID:
+ /* 4360 OTP is always powered and enabled */
+ return FALSE;
+ /* These chips always have their OTP on */
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ default:
+ return FALSE;
+ }
+}
+
+bool
+si_is_otp_powered(si_t *sih)
+{
+ if (PMUCTL_ENAB(sih))
+ return si_pmu_is_otp_powered(sih, si_osh(sih));
+ return TRUE;
+}
+
+void
+si_otp_power(si_t *sih, bool on, uint32* min_res_mask)
+{
+ if (PMUCTL_ENAB(sih))
+ si_pmu_otp_power(sih, si_osh(sih), on, min_res_mask);
+ OSL_DELAY(1000);
+}
+
+/* Return BCME_NOTFOUND if the card doesn't have CIS format nvram */
+int
+si_cis_source(const si_t *sih)
+{
+ /* Most PCI chips use SROM format instead of CIS */
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ return BCME_NOTFOUND;
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM43460_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID: {
+ if ((sih->chipst & CST4360_OTP_ENABLED))
+ return CIS_OTP;
+ return CIS_DEFAULT;
+ }
+ CASE_BCM43602_CHIP:
+ if (sih->chipst & CST43602_SPROM_PRESENT) {
+ /* Don't support CIS formatted SROM, use 'real' SROM format instead */
+ return BCME_NOTFOUND;
+ }
+ return CIS_OTP;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ return CIS_OTP;
+ case BCM4369_CHIP_GRPID:
+ if (CHIPREV(sih->chiprev) == 0) {
+ /* WAR for 4369a0: HW4369-1729 */
+ return CIS_OTP;
+ } else if (sih->chipst & CST4369_SPROM_PRESENT) {
+ return CIS_SROM;
+ }
+ return CIS_OTP;
+ case BCM4362_CHIP_GRPID:
+ return ((sih->chipst & CST4362_SPROM_PRESENT)? CIS_SROM : CIS_OTP);
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ if (sih->chipst & CST4378_SPROM_PRESENT)
+ return CIS_SROM;
+ return CIS_OTP;
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ if (sih->chipst & CST4387_SPROM_PRESENT)
+ return CIS_SROM;
+ return CIS_OTP;
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ /* 4389 supports only OTP */
+ return CIS_OTP;
+ default:
+ return CIS_DEFAULT;
+ }
+}
+
+uint16 BCMATTACHFN(si_fabid)(si_t *sih)
+{
+ uint32 data;
+ uint16 fabid = 0;
+
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ data = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, fabid), 0, 0);
+ fabid = data & 0xf;
+ break;
+
+ default:
+ break;
+ }
+
+ return fabid;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+uint32 BCMATTACHFN(si_get_sromctl)(si_t *sih)
+{
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ uint32 sromctl;
+ osl_t *osh = si_osh(sih);
+
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ sromctl = R_REG(osh, &cc->sromcontrol);
+
+ /* return to the original core */
+ si_setcoreidx(sih, origidx);
+ return sromctl;
+}
+
+int BCMATTACHFN(si_set_sromctl)(si_t *sih, uint32 value)
+{
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ osl_t *osh = si_osh(sih);
+ int ret = BCME_OK;
+
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ /* get chipcommon rev */
+ if (si_corerev(sih) >= 32) {
+ /* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and
+ * SpromPresent is 1.
+ */
+ if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 &&
+ (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) {
+ W_REG(osh, &cc->sromcontrol, value);
+ } else {
+ ret = BCME_NODEVICE;
+ }
+ } else {
+ ret = BCME_UNSUPPORTED;
+ }
+
+ /* return to the original core */
+ si_setcoreidx(sih, origidx);
+
+ return ret;
+}
+
+uint
+BCMPOSTTRAPFN(si_core_wrapperreg)(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
+{
+ uint origidx;
+ bcm_int_bitmask_t intr_val;
+ uint ret_val;
+ const si_info_t *sii = SI_INFO(sih);
+
+ origidx = si_coreidx(sih);
+
+ INTR_OFF(sii, &intr_val);
+ /* Validate the core idx */
+ si_setcoreidx(sih, coreidx);
+
+ ret_val = si_wrapperreg(sih, offset, mask, val);
+
+ /* return to the original core */
+ si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+ return ret_val;
+}
+
+#if !defined(BCMDONGLEHOST)
+static void
+si_pmu_sr_upd(si_t *sih)
+{
+#if defined(SAVERESTORE)
+ if (SR_ENAB()) {
+ const si_info_t *sii = SI_INFO(sih);
+
+ /* min_mask is updated after SR code is downloaded to txfifo */
+ if (PMUCTL_ENAB(sih))
+ si_pmu_res_minmax_update(sih, sii->osh);
+ }
+#endif
+}
+
+/**
+ * To make sure that, res mask is minimal to save power and also, to indicate
+ * specifically to host about the SR logic.
+ */
+void
+si_update_masks(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ CASE_BCM43602_CHIP:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ /* Assumes SR engine has been enabled */
+ if (PMUCTL_ENAB(sih))
+ si_pmu_res_minmax_update(sih, sii->osh);
+ break;
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID:
+ /* min_mask is updated after SR code is downloaded to txfifo */
+ si_pmu_sr_upd(sih);
+ PMU_REG(sih, mac_res_req_timer, ~0x0, PMU43012_MAC_RES_REQ_TIMER);
+ PMU_REG(sih, mac_res_req_mask, ~0x0, PMU43012_MAC_RES_REQ_MASK);
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+void
+si_force_islanding(si_t *sih, bool enable)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM43012_CHIP_ID:
+ case BCM43013_CHIP_ID:
+ case BCM43014_CHIP_ID: {
+ if (enable) {
+ /* Turn on the islands */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x00000053, 0x0);
+#ifdef USE_MEMLPLDO
+ /* Force vddm pwrsw always on */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x000003, 0x000003);
+#endif
+#ifdef BCMQT
+ /* Turn off the islands */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x000050, 0x000050);
+#endif
+ } else {
+ /* Turn off the islands */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x000050, 0x000050);
+ }
+ }
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+#endif /* !defined(BCMDONGLEHOST) */
+
+/* cleanup the timer from the host when ARM is been halted
+ * without a chance for ARM cleanup its resources
+ * If left not cleanup, Intr from a software timer can still
+ * request HT clk when ARM is halted.
+ */
+uint32
+si_pmu_res_req_timer_clr(si_t *sih)
+{
+ uint32 mask;
+
+ mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ;
+ mask <<= 14;
+ /* clear mask bits */
+ pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0);
+ /* readback to ensure write completes */
+ return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0);
+}
+
+/** turn on/off rfldo */
+void
+si_pmu_rfldo(si_t *sih, bool on)
+{
+#if !defined(BCMDONGLEHOST)
+ switch (CHIPID(sih->chip)) {
+ case BCM4360_CHIP_ID:
+ case BCM4352_CHIP_ID:
+ case BCM43526_CHIP_ID: {
+ CASE_BCM43602_CHIP:
+ si_pmu_vreg_control(sih, PMU_VREG_0, RCTRL4360_RFLDO_PWR_DOWN,
+ on ? 0 : RCTRL4360_RFLDO_PWR_DOWN);
+ break;
+ }
+ default:
+ ASSERT(0);
+ break;
+ }
+#endif
+}
+
+/* Caller of this function should make sure is on PCIE core
+ * Used in pciedev.c.
+ */
+void
+si_pcie_disable_oobselltr(const si_t *sih)
+{
+ ASSERT(si_coreid(sih) == PCIE2_CORE_ID);
+ if (PCIECOREREV(sih->buscorerev) >= 23)
+ si_wrapperreg(sih, AI_OOBSELIND74, ~0, 0);
+ else
+ si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0);
+}
+
+void
+si_pcie_ltr_war(const si_t *sih)
+{
+#if !defined(BCMDONGLEHOST)
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ pcie_ltr_war(sii->pch, si_pcieltrenable(sih, 0, 0));
+#endif /* !defined(BCMDONGLEHOST */
+}
+
+void
+si_pcie_hw_LTR_war(const si_t *sih)
+{
+#if !defined(BCMDONGLEHOST)
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ pcie_hw_LTR_war(sii->pch);
+#endif /* !defined(BCMDONGLEHOST */
+}
+
+void
+si_pciedev_reg_pm_clk_period(const si_t *sih)
+{
+#if !defined(BCMDONGLEHOST)
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ pciedev_reg_pm_clk_period(sii->pch);
+#endif /* !defined(BCMDONGLEHOST */
+}
+
+void
+si_pciedev_crwlpciegen2(const si_t *sih)
+{
+#if !defined(BCMDONGLEHOST)
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ pciedev_crwlpciegen2(sii->pch);
+#endif /* !defined(BCMDONGLEHOST */
+}
+
+void
+si_pcie_prep_D3(const si_t *sih, bool enter_D3)
+{
+#if !defined(BCMDONGLEHOST)
+ const si_info_t *sii = SI_INFO(sih);
+
+ if (PCIE_GEN2(sii))
+ pciedev_prep_D3(sii->pch, enter_D3);
+#endif /* !defined(BCMDONGLEHOST */
+}
+
+#if !defined(BCMDONGLEHOST)
+uint
+BCMPOSTTRAPFN(si_corereg_ifup)(si_t *sih, uint core_id, uint regoff, uint mask, uint val)
+{
+ bool isup;
+ volatile void *regs;
+ uint origidx, ret_val, coreidx;
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ regs = si_setcore(sih, core_id, 0);
+ BCM_REFERENCE(regs);
+ ASSERT(regs != NULL);
+
+ coreidx = si_coreidx(sih);
+
+ isup = si_iscoreup(sih);
+ if (isup == TRUE) {
+ ret_val = si_corereg(sih, coreidx, regoff, mask, val);
+ } else {
+ ret_val = 0;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return ret_val;
+}
+
+/* 43012 specific low power settings.
+ * See http://confluence.broadcom.com/display/WLAN/BCM43012+Low+Power+Settings.
+ * See 47xxtcl/43012.tcl proc lp_enable.
+ */
+void si_43012_lp_enable(si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ bcm_int_bitmask_t intr_val;
+ uint origidx;
+ int count;
+ gciregs_t *gciregs;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, &intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Enable radiodig clk gating */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG5, PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN,
+ PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN);
+
+ /* Disable SPM clock */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG5, PMUCCTL05_43012_DISABLE_SPM_CLK,
+ PMUCCTL05_43012_DISABLE_SPM_CLK);
+
+ /* Enable access of radiodig registers using async apb interface */
+ si_pmu_chipcontrol(sih, CHIPCTRLREG6, PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB,
+ PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB);
+
+ /* Remove SFLASH clock request (which is default on for boot-from-flash support) */
+ CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ, CCS_HQCLKREQ);
+
+ /* Switch to GCI core */
+ if (!(gciregs = si_setcore(sih, GCI_CORE_ID, 0))) {
+ goto done;
+ }
+
+ /* GCIForceRegClk Off */
+ if (!(sih->lpflags & LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE)) {
+ si_gci_direct(sih, GET_GCI_OFFSET(sih, gci_corectrl),
+ GCI_CORECTRL_FORCEREGCLK_MASK, 0);
+ }
+
+ /* Disable the sflash pad */
+ if (!(sih->lpflags & LPFLAGS_SI_SFLASH_DISABLE)) {
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03,
+ CC_GCI_03_LPFLAGS_SFLASH_MASK, CC_GCI_03_LPFLAGS_SFLASH_VAL);
+ }
+
+ /* Input disable all LHL I/O pins */
+ for (count = 0; count < GPIO_CTRL_REG_COUNT; count++) {
+ OR_REG(sii->osh, &gciregs->gpio_ctrl_iocfg_p_adr[count],
+ GPIO_CTRL_REG_DISABLE_INTERRUPT);
+ }
+
+ /* Power down BT LDO3p3 */
+ if (!(sih->lpflags & LPFLAGS_SI_BTLDO3P3_DISABLE)) {
+ si_pmu_chipcontrol(sih, CHIPCTRLREG2, PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF,
+ PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF);
+ }
+
+done:
+ si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, &intr_val);
+}
+
+/** this function is called from the BMAC during (re) initialisation */
+void
+si_lowpwr_opt(si_t *sih)
+{
+ uint mask, val;
+
+ /* 43602 chip (all revision) related changes */
+ if (BCM43602_CHIP(sih->chip)) {
+ uint hosti = si_chip_hostif(sih);
+ uint origidx = si_coreidx(sih);
+ volatile void *regs;
+
+ regs = si_setcore(sih, CC_CORE_ID, 0);
+ BCM_REFERENCE(regs);
+ ASSERT(regs != NULL);
+
+ /* disable usb app clk */
+ /* Can be done any time. If it is not USB, then do it. In case */
+ /* of USB, do not write it */
+ if (hosti != CHIP_HOSTIF_USBMODE && !BCM43602_CHIP(sih->chip)) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5, (1 << USBAPP_CLK_BIT), 0);
+ }
+ /* disable pcie clks */
+ if (hosti != CHIP_HOSTIF_PCIEMODE) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5, (1 << PCIE_CLK_BIT), 0);
+ }
+
+ /* disable armcr4 debug clk */
+ /* Can be done anytime as long as driver is functional. */
+ /* In TCL, dhalt commands needs to change to undo this */
+ switch (CHIPID(sih->chip)) {
+ CASE_BCM43602_CHIP:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL3,
+ PMU43602_CC3_ARMCR4_DBG_CLK, 0);
+ break;
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ {
+ uint32 tapsel = si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, jtagctrl), 0, 0)
+ & JCTRL_TAPSEL_BIT;
+ /* SWD: if tap sel bit set, */
+ /* enable armcr4 debug clock */
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ (1 << ARMCR4_DBG_CLK_BIT),
+ tapsel?(1 << ARMCR4_DBG_CLK_BIT):0);
+ }
+ break;
+ default:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
+ (1 << ARMCR4_DBG_CLK_BIT), 0);
+ break;
+ }
+
+ /* Power down unused BBPLL ch-6(pcie_tl_clk) and ch-5(sample-sync-clk), */
+ /* valid in all modes, ch-5 needs to be reenabled for sample-capture */
+ /* this needs to be done in the pmu init path, at the beginning. Should not be */
+ /* a pcie driver. Enable the sample-sync-clk in the sample capture function */
+ if (BCM43602_CHIP(sih->chip)) {
+ /* configure open loop PLL parameters, open loop is used during S/R */
+ val = (3 << PMU1_PLL0_PC1_M1DIV_SHIFT) | (6 << PMU1_PLL0_PC1_M2DIV_SHIFT) |
+ (6 << PMU1_PLL0_PC1_M3DIV_SHIFT) | (8 << PMU1_PLL0_PC1_M4DIV_SHIFT);
+ si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL4, ~0, val);
+ si_pmu_pllupd(sih);
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
+ PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN | PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN,
+ PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN | PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ }
+ if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
+ /* Enable memory standby based on lpflags */
+ if (sih->lpflags & LPFLAGS_SI_GLOBAL_DISABLE) {
+ SI_MSG(("si_lowpwr_opt: Disable lower power configuration!\n"));
+ goto exit;
+ }
+
+ SI_MSG(("si_lowpwr_opt: Enable lower power configuration!\n"));
+
+ /* Enable mem clk gating */
+ mask = (0x1 << MEM_CLK_GATE_BIT);
+ val = (0x1 << MEM_CLK_GATE_BIT);
+
+ si_corereg_ifup(sih, SDIOD_CORE_ID, SI_PWR_CTL_ST, mask, val);
+ si_corereg_ifup(sih, SOCRAM_CORE_ID, SI_PWR_CTL_ST, mask, val);
+
+ si_43012_lp_enable(sih);
+ }
+exit:
+ return;
+}
+#endif /* !defined(BCMDONGLEHOST) */
+
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+uint32
+BCMPOSTTRAPFN(si_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void * wrap)
+{
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) {
+ return ai_clear_backplane_to_per_core(sih, coreid, coreunit, wrap);
+ }
+ return AXI_WRAP_STS_NONE;
+}
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+uint32
+BCMPOSTTRAPFN(si_clear_backplane_to)(si_t *sih)
+{
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) {
+ return ai_clear_backplane_to(sih);
+ }
+
+ return 0;
+}
+
+void
+BCMATTACHFN(si_update_backplane_timeouts)(const si_t *sih, bool enable, uint32 timeout_exp,
+ uint32 cid)
+{
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+ /* Enable only for AXI */
+ if (CHIPTYPE(sih->socitype) != SOCI_AI) {
+ return;
+ }
+
+ ai_update_backplane_timeouts(sih, enable, timeout_exp, cid);
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+}
+
+/*
+ * This routine adds the AXI timeouts for
+ * chipcommon, pcie and ARM slave wrappers
+ */
+void
+si_slave_wrapper_add(si_t *sih)
+{
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+ uint32 axi_to = 0;
+
+ /* Enable only for AXI */
+ if ((CHIPTYPE(sih->socitype) != SOCI_AI) &&
+ (CHIPTYPE(sih->socitype) != SOCI_DVTBUS)) {
+ return;
+ }
+
+ axi_to = AXI_TO_VAL;
+
+ /* All required slave wrappers are added in ai_scan */
+ ai_update_backplane_timeouts(sih, TRUE, axi_to, 0);
+
+#ifdef DISABLE_PCIE2_AXI_TIMEOUT
+ ai_update_backplane_timeouts(sih, FALSE, 0, PCIE_CORE_ID);
+ ai_update_backplane_timeouts(sih, FALSE, 0, PCIE2_CORE_ID);
+#endif
+
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+}
+
+#ifndef BCMDONGLEHOST
+/* read from pcie space using back plane indirect access */
+/* Set Below mask for reading 1, 2, 4 bytes in single read */
+/* #define SI_BPIND_1BYTE 0x1 */
+/* #define SI_BPIND_2BYTE 0x3 */
+/* #define SI_BPIND_4BYTE 0xF */
+int
+BCMPOSTTRAPFN(si_bpind_access)(si_t *sih, uint32 addr_high, uint32 addr_low,
+ int32 * data, bool read, uint32 us_timeout)
+{
+
+ uint32 status = 0;
+ uint8 mask = SI_BPIND_4BYTE;
+ int ret_val = BCME_OK;
+
+ /* Program Address low and high fields */
+ si_ccreg(sih, OFFSETOF(chipcregs_t, bp_addrlow), ~0, addr_low);
+ si_ccreg(sih, OFFSETOF(chipcregs_t, bp_addrhigh), ~0, addr_high);
+
+ if (read) {
+ /* Start the read */
+ si_ccreg(sih, OFFSETOF(chipcregs_t, bp_indaccess), ~0,
+ CC_BP_IND_ACCESS_START_MASK | mask);
+ } else {
+ /* Write the data and force the trigger */
+ si_ccreg(sih, OFFSETOF(chipcregs_t, bp_data), ~0, *data);
+ si_ccreg(sih, OFFSETOF(chipcregs_t, bp_indaccess), ~0,
+ CC_BP_IND_ACCESS_START_MASK |
+ CC_BP_IND_ACCESS_RDWR_MASK | mask);
+
+ }
+
+ /* Wait for status to be cleared */
+ SPINWAIT(((status = si_ccreg(sih, OFFSETOF(chipcregs_t, bp_indaccess), 0, 0)) &
+ CC_BP_IND_ACCESS_START_MASK), us_timeout);
+
+ if (status & (CC_BP_IND_ACCESS_START_MASK | CC_BP_IND_ACCESS_ERROR_MASK)) {
+ ret_val = BCME_ERROR;
+ SI_ERROR(("Action Failed for address 0x%08x:0x%08x \t status: 0x%x\n",
+ addr_high, addr_low, status));
+ /* For ATE, Stop execution here, to catch BPind timeout */
+#ifdef ATE_BUILD
+ hnd_die();
+#endif /* ATE_BUILD */
+ } else {
+ /* read data */
+ if (read)
+ *data = si_ccreg(sih, OFFSETOF(chipcregs_t, bp_data), 0, 0);
+ }
+
+ return ret_val;
+}
+#endif /* !BCMDONGLEHOST */
+
+void
+si_pll_sr_reinit(si_t *sih)
+{
+#if !defined(BCMDONGLEHOST) && !defined(DONGLEBUILD)
+ osl_t *osh = si_osh(sih);
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 data;
+
+ /* disable PLL open loop operation */
+ switch (CHIPID(sih->chip)) {
+ case BCM43602_CHIP_ID:
+ /* read back the pll openloop state */
+ data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0);
+ /* check current pll mode */
+ if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) == 0) {
+ /* no POR; don't required pll and saverestore init */
+ return;
+ }
+ si_pmu_pll_init(sih, osh, sii->xtalfreq);
+ si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, PMU1_PLLCTL8_OPENLOOP_MASK, 0);
+ si_pmu_pllupd(sih);
+ /* allow PLL to settle after config PLL for closeloop operation */
+ OSL_DELAY(100);
+ break;
+ default:
+ /* any unsupported chip bail */
+ return;
+ }
+ si_pmu_init(sih, osh);
+ si_pmu_chip_init(sih, osh);
+#if defined(BCMPMU_STATS)
+ if (PMU_STATS_ENAB()) {
+ si_pmustatstimer_init(sih);
+ }
+#endif /* BCMPMU_STATS */
+#if defined(SR_ESSENTIALS)
+ /* Module can be power down during D3 state, thus
+ * needs this before si_pmu_res_init() to use sr_isenab()
+ * Full dongle may not need to reinit saverestore
+ */
+ if (SR_ESSENTIALS_ENAB()) {
+ sr_save_restore_init(sih);
+ }
+#endif /* SR_ESSENTIALS */
+ si_pmu_res_init(sih, sii->osh);
+ si_pmu_swreg_init(sih, osh);
+ si_lowpwr_opt(sih);
+#endif /* !BCMDONGLEHOST && !DONGLEBUILD */
+}
+
+void
+BCMATTACHFN(si_pll_closeloop)(si_t *sih)
+{
+#if !defined(BCMDONGLEHOST) && !defined(DONGLEBUILD) || defined(SAVERESTORE)
+ uint32 data;
+
+ BCM_REFERENCE(data);
+
+ /* disable PLL open loop operation */
+ switch (CHIPID(sih->chip)) {
+#if !defined(BCMDONGLEHOST) && !defined(DONGLEBUILD)
+ /* Don't apply those changes to FULL DONGLE mode since the
+ * behaviour was not verified
+ */
+ case BCM43602_CHIP_ID:
+ /* read back the pll openloop state */
+ data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0);
+ /* current mode is openloop (possible POR) */
+ if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) {
+ si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8,
+ PMU1_PLLCTL8_OPENLOOP_MASK, 0);
+ si_pmu_pllupd(sih);
+ /* allow PLL to settle after config PLL for closeloop operation */
+ OSL_DELAY(100);
+ }
+ break;
+#endif /* !BCMDONGLEHOST && !DONGLEBUILD */
+ case BCM4369_CHIP_GRPID:
+ case BCM4362_CHIP_GRPID:
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL1,
+ PMU_CC1_ENABLE_CLOSED_LOOP_MASK, PMU_CC1_ENABLE_CLOSED_LOOP);
+ break;
+ default:
+ /* any unsupported chip bail */
+ return;
+ }
+#endif /* !BCMDONGLEHOST && !DONGLEBUILD || SAVERESTORE */
+}
+
+#if !defined(BCMDONGLEHOST)
+void
+BCMPOSTTRAPFN(si_introff)(const si_t *sih, bcm_int_bitmask_t *intr_val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ INTR_OFF(sii, intr_val);
+}
+
+void
+BCMPOSTTRAPFN(si_intrrestore)(const si_t *sih, bcm_int_bitmask_t *intr_val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_get_nvram_rfldo3p3_war(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->rfldo3p3_war;
+}
+
+void
+si_nvram_res_masks(const si_t *sih, uint32 *min_mask, uint32 *max_mask)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ /* Apply nvram override to min mask */
+ if (sii->min_mask_valid == TRUE) {
+ SI_MSG(("Applying rmin=%d to min_mask\n", sii->nvram_min_mask));
+ *min_mask = sii->nvram_min_mask;
+ }
+ /* Apply nvram override to max mask */
+ if (sii->max_mask_valid == TRUE) {
+ SI_MSG(("Applying rmax=%d to max_mask\n", sii->nvram_max_mask));
+ *max_mask = sii->nvram_max_mask;
+ }
+}
+
+uint8
+si_getspurmode(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->spurmode;
+}
+
+uint32
+si_xtalfreq(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->xtalfreq;
+}
+
+uint32
+si_get_openloop_dco_code(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->openloop_dco_code;
+}
+
+void
+si_set_openloop_dco_code(si_t *sih, uint32 _openloop_dco_code)
+{
+ si_info_t *sii = SI_INFO(sih);
+ sii->openloop_dco_code = _openloop_dco_code;
+}
+
+uint32
+BCMPOSTTRAPFN(si_get_armpllclkfreq)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 armpllclkfreq = ARMPLL_FREQ_400MHZ;
+ BCM_REFERENCE(sii);
+
+#ifdef DONGLEBUILD
+ uint32 armpllclk_max;
+
+#if defined(__ARM_ARCH_7R__)
+ armpllclk_max = ARMPLL_FREQ_400MHZ;
+#elif defined(__ARM_ARCH_7A__)
+ armpllclk_max = ARMPLL_FREQ_1000MHZ;
+#else
+#error "Unknown CPU architecture for armpllclkfreq!"
+#endif
+
+ armpllclkfreq = (sii->armpllclkfreq) ? sii->armpllclkfreq : armpllclk_max;
+
+ SI_MSG(("armpllclkfreq = %d\n", armpllclkfreq));
+#endif /* DONGLEBUILD */
+
+ return armpllclkfreq;
+}
+
+uint8
+BCMPOSTTRAPFN(si_get_ccidiv)(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint8 ccidiv = 0xFF;
+ BCM_REFERENCE(sii);
+
+#ifdef DONGLEBUILD
+ ccidiv = (sii->ccidiv) ? sii->ccidiv : CCIDIV_3_TO_1;
+#endif /* DONGLEBUILD */
+
+ return ccidiv;
+}
+#ifdef DONGLEBUILD
+uint32
+BCMATTACHFN(si_wrapper_dump_buf_size)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_wrapper_dump_buf_size(sih);
+ return 0;
+}
+
+uint32
+BCMPOSTTRAPFN(si_wrapper_dump_binary)(const si_t *sih, uchar *p)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_wrapper_dump_binary(sih, p);
+ return 0;
+}
+
+#if defined(ETD) && !defined(ETD_DISABLED)
+uint32
+BCMPOSTTRAPFN(si_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core,
+ uint32 *ba, uchar *p)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_wrapper_dump_last_timeout(sih, error, core, ba, p);
+ return 0;
+}
+#endif /* ETD && !ETD_DISABLED */
+#endif /* DONGLEBUILD */
+#endif /* !BCMDONGLEHOST */
+
+uint32
+BCMPOSTTRAPFN(si_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_findcoreidx_by_axiid(sih, axiid);
+ return 0;
+}
+
+void
+BCMPOSTTRAPFN(si_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core,
+ uint32 *lo, uint32 *hi, uint32 *id)
+{
+#if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_wrapper_get_last_error(sih, error_status, core, lo, hi, id);
+#endif /* (AXI_TIMEOUTS_NIC) || (AXI_TIMEOUTS) */
+ return;
+}
+
+uint32
+si_get_axi_timeout_reg(const si_t *sih)
+{
+#if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ return ai_get_axi_timeout_reg();
+ }
+#endif /* (AXI_TIMEOUTS_NIC) || (AXI_TIMEOUTS) */
+ return 0;
+}
+
+#if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED)
+bool _bcmsrpwr = TRUE;
+#else
+bool _bcmsrpwr = FALSE;
+#endif
+
+#define PWRREQ_OFFSET(sih) OFFSETOF(chipcregs_t, powerctl)
+
+static void
+BCMPOSTTRAPFN(si_corereg_pciefast_write)(const si_t *sih, uint regoff, uint val)
+{
+ volatile uint32 *r = NULL;
+ const si_info_t *sii = SI_INFO(sih);
+
+ ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
+
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+
+ W_REG(sii->osh, r, val);
+}
+
+static uint
+BCMPOSTTRAPFN(si_corereg_pciefast_read)(const si_t *sih, uint regoff)
+{
+ volatile uint32 *r = NULL;
+ const si_info_t *sii = SI_INFO(sih);
+
+ ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
+
+ r = (volatile uint32 *)((volatile char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+
+ return R_REG(sii->osh, r);
+}
+
+uint32
+BCMPOSTTRAPFN(si_srpwr_request)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
+ OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ uint32 mask2 = mask;
+ uint32 val2 = val;
+ volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
+ + (uintptr)offset);
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+
+ if (mask || val) {
+ mask <<= SRPWR_REQON_SHIFT;
+ val <<= SRPWR_REQON_SHIFT;
+
+ /* Return if requested power request is already set */
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = R_REG(sii->osh, fast_srpwr_addr);
+ } else {
+ r = si_corereg_pciefast_read(sih, offset);
+ }
+
+ if ((r & mask) == val) {
+ return r;
+ }
+
+ r = (r & ~mask) | val;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ W_REG(sii->osh, fast_srpwr_addr, r);
+ r = R_REG(sii->osh, fast_srpwr_addr);
+ } else {
+ si_corereg_pciefast_write(sih, offset, r);
+ r = si_corereg_pciefast_read(sih, offset);
+ }
+
+ if (val2) {
+ if ((r & (mask2 << SRPWR_STATUS_SHIFT)) ==
+ (val2 << SRPWR_STATUS_SHIFT)) {
+ return r;
+ }
+ si_srpwr_stat_spinwait(sih, mask2, val2);
+ }
+ } else {
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = R_REG(sii->osh, fast_srpwr_addr);
+ } else {
+ r = si_corereg_pciefast_read(sih, offset);
+ }
+ }
+
+ return r;
+}
+
+#ifdef CORE_PWRUP_WAR
+uint32
+BCMPOSTTRAPFN(si_srpwr_request_on_rev80)(si_t *sih, uint32 mask, uint32 val, uint32 ucode_awake)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+ uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+ uint32 mask2 = mask;
+ uint32 val2 = val;
+ volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
+ + (uintptr)offset);
+ if (mask || val) {
+ mask <<= SRPWR_REQON_SHIFT;
+ val <<= SRPWR_REQON_SHIFT;
+
+ /* Return if requested power request is already set */
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = R_REG(sii->osh, fast_srpwr_addr);
+ } else {
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ }
+
+ if ((r & mask) == val) {
+ W_REG(sii->osh, fast_srpwr_addr, r);
+ return r;
+ }
+
+ r = (r & ~mask) | val;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ W_REG(sii->osh, fast_srpwr_addr, r);
+ r = R_REG(sii->osh, fast_srpwr_addr);
+ } else {
+ r = si_corereg(sih, cidx, offset, ~0, r);
+ }
+
+ if (val2) {
+
+ /*
+ * When ucode is not requested to be awake by FW,
+ * the power status may indicate ON due to FW or
+ * ucode's earlier power down request is not
+ * honored yet. In such case, FW will find the
+ * power status high at this stage, but as it is in
+ * transition (from ON to OFF), it may go down any
+ * time and lead to AXI slave error. Hence we need
+ * a fixed delay to cross any such transition state.
+ */
+ if (ucode_awake == 0) {
+ hnd_delay(SRPWR_UP_DOWN_DELAY);
+ }
+
+ if ((r & (mask2 << SRPWR_STATUS_SHIFT)) ==
+ (val2 << SRPWR_STATUS_SHIFT)) {
+ return r;
+ }
+ si_srpwr_stat_spinwait(sih, mask2, val2);
+ }
+ } else {
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = R_REG(sii->osh, fast_srpwr_addr);
+ } else {
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ }
+ SPINWAIT(((R_REG(sii->osh, fast_srpwr_addr) &
+ (mask2 << SRPWR_REQON_SHIFT)) != 0),
+ PMU_MAX_TRANSITION_DLY);
+ }
+
+ return r;
+}
+#endif /* CORE_PWRUP_WAR */
+
+uint32
+BCMPOSTTRAPFN(si_srpwr_stat_spinwait)(const si_t *sih, uint32 mask, uint32 val)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
+ OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
+ + (uintptr)offset);
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+ ASSERT(mask);
+ ASSERT(val);
+
+ /* spinwait on pwrstatus */
+ mask <<= SRPWR_STATUS_SHIFT;
+ val <<= SRPWR_STATUS_SHIFT;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ SPINWAIT(((R_REG(sii->osh, fast_srpwr_addr) & mask) != val),
+ PMU_MAX_TRANSITION_DLY);
+ r = R_REG(sii->osh, fast_srpwr_addr) & mask;
+ ASSERT(r == val);
+ } else {
+ SPINWAIT(((si_corereg_pciefast_read(sih, offset) & mask) != val),
+ PMU_MAX_TRANSITION_DLY);
+ r = si_corereg_pciefast_read(sih, offset) & mask;
+ ASSERT(r == val);
+ }
+
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
+
+ return r;
+}
+
+uint32
+si_srpwr_stat(si_t *sih)
+{
+ uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
+ OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ } else {
+ r = si_corereg_pciefast_read(sih, offset);
+ }
+
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
+
+ return r;
+}
+
+uint32
+si_srpwr_domain(si_t *sih)
+{
+ uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
+ OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+ if (FWSIGN_ENAB()) {
+ return 0;
+ }
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ } else {
+ r = si_corereg_pciefast_read(sih, offset);
+ }
+
+ r = (r >> SRPWR_DMN_ID_SHIFT) & SRPWR_DMN_ID_MASK;
+
+ return r;
+}
+
+uint8
+si_srpwr_domain_wl(si_t *sih)
+{
+ return SRPWR_DMN1_ARMBPSD;
+}
+
+bool
+si_srpwr_cap(si_t *sih)
+{
+ if (FWSIGN_ENAB()) {
+ return FALSE;
+ }
+
+ /* If domain ID is non-zero, chip supports power domain control */
+ return si_srpwr_domain(sih) != 0 ? TRUE : FALSE;
+}
+
+uint32
+BCMPOSTTRAPFN(si_srpwr_domain_all_mask)(const si_t *sih)
+{
+ uint32 mask = SRPWR_DMN0_PCIE_MASK |
+ SRPWR_DMN1_ARMBPSD_MASK |
+ SRPWR_DMN2_MACAUX_MASK |
+ SRPWR_DMN3_MACMAIN_MASK;
+
+ if (si_scan_core_present(sih)) {
+ mask |= SRPWR_DMN4_MACSCAN_MASK;
+ }
+
+ return mask;
+}
+
+uint32
+si_srpwr_bt_status(si_t *sih)
+{
+ uint32 r;
+ uint32 offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
+ OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ uint32 cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ } else {
+ r = si_corereg_pciefast_read(sih, offset);
+ }
+
+ r = (r >> SRPWR_BT_STATUS_SHIFT) & SRPWR_BT_STATUS_MASK;
+
+ return r;
+}
+/* Utility API to read/write the raw registers with absolute address.
+ * This function can be invoked from either FW or host driver.
+ */
+uint32
+si_raw_reg(const si_t *sih, uint32 reg, uint32 val, uint32 wrire_req)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ uint32 address_space = reg & ~0xFFF;
+ volatile uint32 * addr = (void*)(uintptr)(reg);
+ uint32 prev_value = 0;
+ uint32 cfg_reg = 0;
+
+ if (sii == NULL) {
+ return 0;
+ }
+
+ /* No need to translate the absolute address on SI bus */
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ goto skip_cfg;
+ }
+
+ /* This API supports only the PCI host interface */
+ if (BUSTYPE(sih->bustype) != PCI_BUS) {
+ return ID32_INVALID;
+ }
+
+ if (PCIE_GEN2(sii)) {
+ /* Use BAR0 Secondary window is PCIe Gen2.
+ * Set the secondary BAR0 Window to current register of interest
+ */
+ addr = (volatile uint32*)(((volatile uint8*)sii->curmap) +
+ PCI_SEC_BAR0_WIN_OFFSET + (reg & 0xfff));
+ cfg_reg = PCIE2_BAR0_CORE2_WIN;
+
+ } else {
+ /* PCIe Gen1 do not have secondary BAR0 window.
+ * reuse the BAR0 WIN2
+ */
+ addr = (volatile uint32*)(((volatile uint8*)sii->curmap) +
+ PCI_BAR0_WIN2_OFFSET + (reg & 0xfff));
+ cfg_reg = PCI_BAR0_WIN2;
+ }
+
+ prev_value = OSL_PCI_READ_CONFIG(sii->osh, cfg_reg, 4);
+
+ if (prev_value != address_space) {
+ OSL_PCI_WRITE_CONFIG(sii->osh, cfg_reg,
+ sizeof(uint32), address_space);
+ } else {
+ prev_value = 0;
+ }
+
+skip_cfg:
+ if (wrire_req) {
+ W_REG(sii->osh, addr, val);
+ } else {
+ val = R_REG(sii->osh, addr);
+ }
+
+ if (prev_value) {
+ /* Restore BAR0 WIN2 for PCIE GEN1 devices */
+ OSL_PCI_WRITE_CONFIG(sii->osh,
+ cfg_reg, sizeof(uint32), prev_value);
+ }
+
+ return val;
+}
+
+#ifdef DONGLEBUILD
+/* if the logs could be gathered, host could be notified with to take logs or not */
+bool
+BCMPOSTTRAPFN(si_check_enable_backplane_log)(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ return ai_check_enable_backplane_log(sih);
+ }
+ return TRUE;
+}
+#endif /* DONGLEBUILD */
+
+uint8
+si_lhl_ps_mode(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->lhl_ps_mode;
+}
+
+uint8
+si_hib_ext_wakeup_isenab(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+ return sii->hib_ext_wakeup_enab;
+}
+
+static void
+BCMATTACHFN(si_oob_war_BT_F1)(si_t *sih)
+{
+ uint origidx = si_coreidx(sih);
+ volatile void *regs;
+
+ if (FWSIGN_ENAB()) {
+ return;
+ }
+ regs = si_setcore(sih, AXI2AHB_BRIDGE_ID, 0);
+ ASSERT(regs);
+ BCM_REFERENCE(regs);
+
+ si_wrapperreg(sih, AI_OOBSELINA30, 0xF00, 0x300);
+
+ si_setcoreidx(sih, origidx);
+}
+
+#ifndef BCMDONGLEHOST
+
+#define RF_SW_CTRL_ELNABYP_ANT_MASK 0x000CC330
+
+/* These are the outputs to the rfem which go out via the CLB */
+#define RF_SW_CTRL_ELNABYP_2G0_MASK 0x00000010
+#define RF_SW_CTRL_ELNABYP_5G0_MASK 0x00000020
+#define RF_SW_CTRL_ELNABYP_2G1_MASK 0x00004000
+#define RF_SW_CTRL_ELNABYP_5G1_MASK 0x00008000
+
+/* Feedback values go into the phy from CLB output
+ * The polarity of the feedback is opposite to the elnabyp signal going out to the rfem
+ */
+#define RF_SW_CTRL_ELNABYP_2G0_MASK_FB 0x00000100
+#define RF_SW_CTRL_ELNABYP_5G0_MASK_FB 0x00000200
+#define RF_SW_CTRL_ELNABYP_2G1_MASK_FB 0x00040000
+#define RF_SW_CTRL_ELNABYP_5G1_MASK_FB 0x00080000
+
+/* The elnabyp override values for each rfem */
+#define ELNABYP_IOVAR_2G0_VALUE_MASK 0x01
+#define ELNABYP_IOVAR_5G0_VALUE_MASK 0x02
+#define ELNABYP_IOVAR_2G1_VALUE_MASK 0x04
+#define ELNABYP_IOVAR_5G1_VALUE_MASK 0x08
+
+/* The elnabyp override enables for each rfem
+ * The values are 'don't care' if the corresponding enables are 0
+ */
+#define ELNABYP_IOVAR_2G0_ENABLE_MASK 0x10
+#define ELNABYP_IOVAR_5G0_ENABLE_MASK 0x20
+#define ELNABYP_IOVAR_2G1_ENABLE_MASK 0x40
+#define ELNABYP_IOVAR_5G1_ENABLE_MASK 0x80
+
+#define ANTENNA_0_ENABLE 0x00000044
+#define ANTENNA_1_ENABLE 0x20000000
+#define RFFE_CTRL_START 0x80000000
+#define RFFE_CTRL_READ 0x40000000
+#define RFFE_CTRL_RFEM_SEL 0x08000000
+#define RFFE_MISC_EN_PHYCYCLES 0x00000002
+
+void
+si_rffe_rfem_init(si_t *sih)
+{
+ ASSERT(GCI_OFFSETOF(sih, gci_chipctrl) == OFFSETOF(gciregs_t, gci_chipctrl));
+ /* Enable RFFE clock
+ * GCI Chip Control reg 15 - Bits 29 & 30 (Global 509 & 510)
+ */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_15, ALLONES_32, 0x60000000);
+ /* SDATA0/1 rf_sw_ctrl pull down
+ * GCI chip control reg 23 - Bits 29 & 30 (Global 765 & 766)
+ */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_23, 0x3 << 29, 0x3 << 29);
+ /* RFFE Clk Ctrl Reg */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_clk_ctrl), ALLONES_32, 0x101);
+
+ /* Disable override control of RFFE controller and enable phy control */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_wlmc), ALLONES_32, 0);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_wlac), ALLONES_32, 0);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_wlsc), ALLONES_32, 0);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_btmc), ALLONES_32, 0);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_btsc), ALLONES_32, 0);
+
+ /* reg address = 0x16, deviceID of rffe dev1 = 0xE, deviceID of dev0 = 0xC,
+ * last_mux_ctrl = 0, disable_preemption = 0 (1 for 4387b0), tssi_mask = 3, tssi_en = 0,
+ * rffe_disable_line1 = 0, enable rffe_en_phyaccess = 1,
+ * disable BRCM proprietary reg0 wr = 0
+ */
+ if (sih->ccrev == 68) {
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_misc_ctrl), ALLONES_32, 0x0016EC72);
+ } else {
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_misc_ctrl), ALLONES_32, 0x0016EC32);
+ }
+
+ /* Enable Dual RFFE Master: rffe_single_master = 0
+ * Use Master0 SW interface only : rffe_dis_sw_intf_m1 = 1
+ */
+ if (sih->ccrev >= 71) {
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_clk_ctrl),
+ 1u << 20u | 1u << 26u, 1u << 26u);
+ }
+
+ /* Enable antenna access for both cores */
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ALLONES_32, ANTENNA_0_ENABLE);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ALLONES_32, ANTENNA_1_ENABLE);
+}
+
+void
+si_rffe_set_debug_mode(si_t *sih, bool enable)
+{
+ uint32 misc_ctrl_set = 0;
+ /* Enable/Disable rffe_en_phyaccess bit */
+ if (!enable) {
+ misc_ctrl_set = RFFE_MISC_EN_PHYCYCLES;
+ }
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_misc_ctrl), RFFE_MISC_EN_PHYCYCLES,
+ misc_ctrl_set);
+
+ sih->rffe_debug_mode = enable;
+}
+
+bool
+si_rffe_get_debug_mode(si_t *sih)
+{
+ return sih->rffe_debug_mode;
+}
+
+int8
+si_rffe_get_elnabyp_mode(si_t *sih)
+{
+ return sih->rffe_elnabyp_mode;
+}
+
+int
+si_rffe_set_elnabyp_mode(si_t *sih, uint8 mode)
+{
+ int ret = BCME_OK;
+ uint32 elnabyp_ovr_val = 0;
+ uint32 elnabyp_ovr_en = 0;
+
+ if ((mode & ELNABYP_IOVAR_2G0_VALUE_MASK) && (mode & ELNABYP_IOVAR_2G0_ENABLE_MASK)) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G0_MASK;
+ } else if (mode & ELNABYP_IOVAR_2G0_ENABLE_MASK) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G0_MASK_FB;
+ }
+ if ((mode & ELNABYP_IOVAR_5G0_VALUE_MASK) && (mode & ELNABYP_IOVAR_5G0_ENABLE_MASK)) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G0_MASK;
+ } else if (mode & ELNABYP_IOVAR_5G0_ENABLE_MASK) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G0_MASK_FB;
+ }
+ if ((mode & ELNABYP_IOVAR_2G1_VALUE_MASK) && (mode & ELNABYP_IOVAR_2G1_ENABLE_MASK)) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G1_MASK;
+ } else if (mode & ELNABYP_IOVAR_2G1_ENABLE_MASK) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G1_MASK_FB;
+ }
+ if ((mode & ELNABYP_IOVAR_5G1_VALUE_MASK) && (mode & ELNABYP_IOVAR_5G1_ENABLE_MASK)) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G1_MASK;
+ } else if (mode & ELNABYP_IOVAR_5G1_ENABLE_MASK) {
+ elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G1_MASK_FB;
+ }
+
+ if (mode & ELNABYP_IOVAR_2G0_ENABLE_MASK) {
+ elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_2G0_MASK | RF_SW_CTRL_ELNABYP_2G0_MASK_FB);
+ }
+ if (mode & ELNABYP_IOVAR_5G0_ENABLE_MASK) {
+ elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_5G0_MASK | RF_SW_CTRL_ELNABYP_5G0_MASK_FB);
+ }
+ if (mode & ELNABYP_IOVAR_2G1_ENABLE_MASK) {
+ elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_2G1_MASK | RF_SW_CTRL_ELNABYP_2G1_MASK_FB);
+ }
+ if (mode & ELNABYP_IOVAR_5G1_ENABLE_MASK) {
+ elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_5G1_MASK | RF_SW_CTRL_ELNABYP_5G1_MASK_FB);
+ }
+
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_14, RF_SW_CTRL_ELNABYP_ANT_MASK,
+ elnabyp_ovr_val);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_15, RF_SW_CTRL_ELNABYP_ANT_MASK,
+ elnabyp_ovr_en);
+
+ sih->rffe_elnabyp_mode = mode;
+
+ return ret;
+}
+
+int
+BCMPOSTTRAPFN(si_rffe_rfem_read)(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr,
+ uint32 *val)
+{
+ int ret = BCME_OK;
+ uint32 gci_rffe_ctrl_val, antenna_0_enable, antenna_1_enable;
+ uint32 gci_rffe_ctrl = si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0);
+ uint32 gci_chipcontrol_03 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, 0, 0);
+ uint32 gci_chipcontrol_02 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, 0, 0);
+
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, 0);
+
+ switch (antenna) {
+ case 1:
+ gci_rffe_ctrl_val = RFFE_CTRL_START | RFFE_CTRL_READ;
+ antenna_0_enable = ANTENNA_0_ENABLE;
+ antenna_1_enable = 0;
+ break;
+ case 2:
+ gci_rffe_ctrl_val = RFFE_CTRL_START | RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL;
+ antenna_0_enable = 0;
+ antenna_1_enable = ANTENNA_1_ENABLE;
+ break;
+ default:
+ ret = BCME_BADOPTION;
+ }
+
+ if (ret == BCME_OK) {
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_config), ALLONES_32,
+ ((uint16) dev_id) << 8);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_addr), ALLONES_32, reg_addr);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ANTENNA_0_ENABLE, antenna_0_enable);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ANTENNA_1_ENABLE, antenna_1_enable);
+ /* Initiate read */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl),
+ RFFE_CTRL_START | RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL, gci_rffe_ctrl_val);
+ /* Wait for status */
+ SPINWAIT(si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
+ RFFE_CTRL_START, 100);
+ if (si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
+ RFFE_CTRL_START) {
+ OSL_SYS_HALT();
+ ret = BCME_NOTREADY;
+ } else {
+ *val = si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_data0), 0, 0);
+ /* Clear read and rfem_sel flags */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl),
+ RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL, 0);
+ }
+ }
+
+ /* Restore the values */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, gci_rffe_ctrl);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ALLONES_32, gci_chipcontrol_03);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ALLONES_32, gci_chipcontrol_02);
+ return ret;
+}
+
+int
+BCMPOSTTRAPFN(si_rffe_rfem_write)(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr,
+ uint32 data)
+{
+ int ret = BCME_OK;
+ uint32 antenna_0_enable, antenna_1_enable;
+ uint32 gci_rffe_ctrl = si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0);
+ uint32 gci_chipcontrol_03 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, 0, 0);
+ uint32 gci_chipcontrol_02 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, 0, 0);
+ uint8 repeat = (sih->ccrev == 69) ? 2 : 1; /* WAR for 4387c0 */
+
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, 0);
+
+ switch (antenna) {
+ case 1:
+ antenna_0_enable = ANTENNA_0_ENABLE;
+ antenna_1_enable = 0;
+ break;
+ case 2:
+ antenna_0_enable = 0;
+ antenna_1_enable = ANTENNA_1_ENABLE;
+ break;
+ case 3:
+ antenna_0_enable = ANTENNA_0_ENABLE;
+ antenna_1_enable = ANTENNA_1_ENABLE;
+ break;
+ default:
+ ret = BCME_BADOPTION;
+ }
+
+ if (ret == BCME_OK) {
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_config), ALLONES_32,
+ ((uint16) dev_id) << 8);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_addr), ALLONES_32, reg_addr);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ANTENNA_0_ENABLE, antenna_0_enable);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ANTENNA_1_ENABLE, antenna_1_enable);
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_data0), ALLONES_32, data);
+ while (repeat--) {
+ /* Initiate write */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), RFFE_CTRL_START |
+ RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL, RFFE_CTRL_START);
+ /* Wait for status */
+ SPINWAIT(si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
+ RFFE_CTRL_START, 100);
+ if (si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
+ RFFE_CTRL_START) {
+ OSL_SYS_HALT();
+ ret = BCME_NOTREADY;
+ }
+ }
+ }
+
+ /* Restore the values */
+ si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, gci_rffe_ctrl);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ALLONES_32, gci_chipcontrol_03);
+ si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ALLONES_32, gci_chipcontrol_02);
+ return ret;
+}
+#endif /* !BCMDONGLEHOST */
+
+#if defined(BCMSDIODEV_ENABLED) && defined(ATE_BUILD)
+bool
+si_chipcap_sdio_ate_only(const si_t *sih)
+{
+ bool ate_build = FALSE;
+ switch (CHIPID(sih->chip)) {
+ case BCM4369_CHIP_GRPID:
+ if (CST4369_CHIPMODE_SDIOD(sih->chipst) &&
+ CST4369_CHIPMODE_PCIE(sih->chipst)) {
+ ate_build = TRUE;
+ }
+ break;
+ case BCM4376_CHIP_GRPID:
+ case BCM4378_CHIP_GRPID:
+ case BCM4385_CHIP_GRPID:
+ case BCM4387_CHIP_GRPID:
+ case BCM4388_CHIP_GRPID:
+ case BCM4389_CHIP_GRPID:
+ case BCM4397_CHIP_GRPID:
+ ate_build = TRUE;
+ break;
+ case BCM4362_CHIP_GRPID:
+ if (CST4362_CHIPMODE_SDIOD(sih->chipst) &&
+ CST4362_CHIPMODE_PCIE(sih->chipst)) {
+ ate_build = TRUE;
+ }
+ break;
+ default:
+ break;
+ }
+ return ate_build;
+}
+#endif /* BCMSDIODEV_ENABLED && ATE_BUILD */
+
+#ifdef UART_TRAP_DBG
+void
+si_dump_APB_Bridge_registers(const si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ ai_dump_APB_Bridge_registers(sih);
+ }
+}
+#endif /* UART_TRAP_DBG */
+
+void
+si_force_clocks(const si_t *sih, uint clock_state)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ ai_force_clocks(sih, clock_state);
+ }
+}
+
+/* Indicates to the siutils how the PICe BAR0 is mappend,
+ * used for siutils to arrange BAR0 window management,
+ * for PCI NIC driver.
+ *
+ * Here is the current scheme, which are all using BAR0:
+ *
+ * id enum wrapper
+ * ==== ========= =========
+ * 0 0000-0FFF 1000-1FFF
+ * 1 4000-4FFF 5000-5FFF
+ * 2 9000-9FFF A000-AFFF
+ * >= 3 not supported
+ */
+void
+si_set_slice_id(si_t *sih, uint8 slice)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ sii->slice = slice;
+}
+
+uint8
+si_get_slice_id(const si_t *sih)
+{
+ const si_info_t *sii = SI_INFO(sih);
+
+ return sii->slice;
+}
+
+bool
+BCMPOSTTRAPRAMFN(si_scan_core_present)(const si_t *sih)
+{
+ return (si_numcoreunits(sih, D11_CORE_ID) > 2);
+}
+
+#if !defined(BCMDONGLEHOST)
+bool
+si_btc_bt_status_in_reset(si_t *sih)
+{
+ uint32 chipst = 0;
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_GRPID:
+ chipst = si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipstatus), 0, 0);
+ /* 1 =bt in reset 0 = bt out of reset */
+ return (chipst & (1 << BT_IN_RESET_BIT_SHIFT)) ? TRUE : FALSE;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ return FALSE;
+}
+
+bool
+si_btc_bt_status_in_pds(si_t *sih)
+{
+ return !((si_gci_chipstatus(sih, GCI_CHIPSTATUS_04) >>
+ BT_IN_PDS_BIT_SHIFT) & 0x1);
+}
+
+int
+si_btc_bt_pds_wakeup_force(si_t *sih, bool force)
+{
+ if (force) {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0,
+ PMU_CC0_4387_BT_PU_WAKE_MASK, PMU_CC0_4387_BT_PU_WAKE_MASK);
+ SPINWAIT((si_btc_bt_status_in_pds(sih) == TRUE), PMU_MAX_TRANSITION_DLY);
+ if (si_btc_bt_status_in_pds(sih) == TRUE) {
+ SI_ERROR(("si_btc_bt_pds_wakeup_force"
+ " ERROR : BT still in PDS after pds_wakeup_force \n"));
+ return BCME_ERROR;
+ } else {
+ return BCME_OK;
+ }
+ } else {
+ si_pmu_chipcontrol(sih, PMU_CHIPCTL0, PMU_CC0_4387_BT_PU_WAKE_MASK, 0);
+ return BCME_OK;
+ }
+}
+
+#endif /* !defined(BCMDONGLEHOST) */
+
+#ifndef BCMDONGLEHOST
+/* query d11 core type */
+uint
+BCMATTACHFN(si_core_d11_type)(si_t *sih, uint coreunit)
+{
+#ifdef WL_SCAN_CORE_SIM
+ /* use the core unit WL_SCAN_CORE_SIM as the scan core */
+ return (coreunit == WL_SCAN_CORE_SIM) ?
+ D11_CORE_TYPE_SCAN : D11_CORE_TYPE_NORM;
+#else
+ uint coreidx;
+ volatile void *regs;
+ uint coretype;
+
+ coreidx = si_coreidx(sih);
+ regs = si_setcore(sih, D11_CORE_ID, coreunit);
+ ASSERT(regs != NULL);
+ BCM_REFERENCE(regs);
+
+ coretype = (si_core_sflags(sih, 0, 0) & SISF_CORE_BITS_SCAN) != 0 ?
+ D11_CORE_TYPE_SCAN : D11_CORE_TYPE_NORM;
+
+ si_setcoreidx(sih, coreidx);
+ return coretype;
+#endif /* WL_SCAN_CORE_SIM */
+}
+
+/* decide if this core is allowed by the package option or not... */
+bool
+BCMATTACHFN(si_pkgopt_d11_allowed)(si_t *sih, uint coreunit)
+{
+ uint coreidx;
+ volatile void *regs;
+ bool allowed;
+
+ coreidx = si_coreidx(sih);
+ regs = si_setcore(sih, D11_CORE_ID, coreunit);
+ ASSERT(regs != NULL);
+ BCM_REFERENCE(regs);
+
+ allowed = ((si_core_sflags(sih, 0, 0) & SISF_CORE_BITS_SCAN) == 0 ||
+ (si_gci_chipstatus(sih, GCI_CHIPSTATUS_09) & GCI_CST9_SCAN_DIS) == 0);
+
+ si_setcoreidx(sih, coreidx);
+ return allowed;
+}
+
+void
+si_configure_pwrthrottle_gpio(si_t *sih, uint8 pwrthrottle_gpio_in)
+{
+ uint32 board_gpio = 0;
+ if (CHIPID(sih->chip) == BCM4369_CHIP_ID || CHIPID(sih->chip) == BCM4377_CHIP_ID) {
+ si_gci_set_functionsel(sih, pwrthrottle_gpio_in, 1);
+ }
+ board_gpio = 1 << pwrthrottle_gpio_in;
+ si_gpiocontrol(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
+ si_gpioouten(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
+}
+
+void
+si_configure_onbody_gpio(si_t *sih, uint8 onbody_gpio_in)
+{
+ uint32 board_gpio = 0;
+ if (CHIPID(sih->chip) == BCM4369_CHIP_ID || CHIPID(sih->chip) == BCM4377_CHIP_ID) {
+ si_gci_set_functionsel(sih, onbody_gpio_in, 1);
+ }
+ board_gpio = 1 << onbody_gpio_in;
+ si_gpiocontrol(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
+ si_gpioouten(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
+}
+
+#endif /* !BCMDONGLEHOST */
+
+void
+si_jtag_udr_pwrsw_main_toggle(si_t *sih, bool on)
+{
+#ifdef DONGLEBUILD
+ int val = on ? 0 : 1;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_GRPID:
+ jtag_setbit_128(sih, 8, 99, val);
+ jtag_setbit_128(sih, 8, 101, val);
+ jtag_setbit_128(sih, 8, 105, val);
+ break;
+ default:
+ SI_ERROR(("si_jtag_udr_pwrsw_main_toggle: add support for this chip!\n"));
+ OSL_SYS_HALT();
+ break;
+ }
+#endif
+}
+
+/* return the backplane address where the sssr dumps are stored per D11 core */
+uint32
+BCMATTACHFN(si_d11_core_sssr_addr)(si_t *sih, uint unit, uint32 *sssr_size)
+{
+ uint32 sssr_dmp_src = 0;
+ *sssr_size = 0;
+ /* ideally these addresses should be grok'ed from EROM map */
+ switch (CHIPID(sih->chip)) {
+ case BCM4387_CHIP_GRPID:
+ if (unit == 0) {
+ sssr_dmp_src = BCM4387_SSSR_DUMP_AXI_MAIN;
+ *sssr_size = (uint32)BCM4387_SSSR_DUMP_MAIN_SIZE;
+ } else if (unit == 1) {
+ sssr_dmp_src = BCM4387_SSSR_DUMP_AXI_AUX;
+ *sssr_size = (uint32)BCM4387_SSSR_DUMP_AUX_SIZE;
+ } else if (unit == 2) {
+ sssr_dmp_src = BCM4387_SSSR_DUMP_AXI_SCAN;
+ *sssr_size = (uint32)BCM4387_SSSR_DUMP_SCAN_SIZE;
+ }
+ break;
+ default:
+ break;
+ }
+ return (sssr_dmp_src);
+}
+
+#ifdef USE_LHL_TIMER
+/* Get current HIB time API */
+uint32
+si_cur_hib_time(si_t *sih)
+{
+ uint32 hib_time;
+
+ hib_time = LHL_REG(sih, lhl_hibtim_adr, 0, 0);
+
+ /* there is no HW sync on the read path for LPO regs,
+ * so SW should read twice and if values are same,
+ * then use the value, else read again and use the
+ * latest value
+ */
+ if (hib_time != LHL_REG(sih, lhl_hibtim_adr, 0, 0)) {
+ hib_time = LHL_REG(sih, lhl_hibtim_adr, 0, 0);
+ }
+
+ return (hib_time);
+}
+#endif /* USE_LHL_TIMER */
diff --git a/bcmdhd.101.10.361.x/siutils_priv.h b/bcmdhd.101.10.361.x/siutils_priv.h
new file mode 100755
index 0000000..3935039
--- /dev/null
+++ b/bcmdhd.101.10.361.x/siutils_priv.h
@@ -0,0 +1,513 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _siutils_priv_h_
+#define _siutils_priv_h_
+
+#if defined(BCMDBG_ERR) && defined(ERR_USE_LOG_EVENT)
+#define SI_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_SI_ERROR, args)
+#elif defined(BCMDBG_ERR) || defined(SI_ERROR_ENFORCE)
+#define SI_ERROR(args) printf args
+#else
+#define SI_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#if defined(ENABLE_CORECAPTURE)
+
+#if !defined(BCMDBG)
+#define SI_PRINT(args) osl_wificc_logDebug args
+#else
+#define SI_PRINT(args) printf args
+#endif /* !BCMDBG */
+
+#else
+
+#define SI_PRINT(args) printf args
+
+#endif /* ENABLE_CORECAPTURE */
+
+#ifdef BCMDBG
+#define SI_MSG(args) printf args
+#else
+#define SI_MSG(args)
+#endif /* BCMDBG */
+
+#ifdef BCMDBG_SI
+#define SI_VMSG(args) printf args
+#else
+#define SI_VMSG(args)
+#endif
+
+#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef void (*si_intrsoff_t)(void *intr_arg, bcm_int_bitmask_t *mask);
+typedef void (*si_intrsrestore_t)(void *intr_arg, bcm_int_bitmask_t *mask);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+#define SI_GPIO_MAX 16
+
+typedef struct gci_gpio_item {
+ void *arg;
+ uint8 gci_gpio;
+ uint8 status;
+ gci_gpio_handler_t handler;
+ struct gci_gpio_item *next;
+} gci_gpio_item_t;
+
+typedef struct wci2_cbs {
+ void *context;
+ wci2_handler_t handler;
+} wci2_cbs_t;
+
+typedef struct wci2_rxfifo_info {
+ char *rx_buf;
+ int rx_idx;
+ wci2_cbs_t *cbs;
+} wci2_rxfifo_info_t;
+
+#define AI_SLAVE_WRAPPER 0
+#define AI_MASTER_WRAPPER 1
+
+typedef struct axi_wrapper {
+ uint32 mfg;
+ uint32 cid;
+ uint32 rev;
+ uint32 wrapper_type;
+ uint32 wrapper_addr;
+ uint32 wrapper_size;
+ uint32 node_type;
+} axi_wrapper_t;
+
+#ifdef SOCI_NCI_BUS
+#define SI_MAX_AXI_WRAPPERS 65u
+#else
+#define SI_MAX_AXI_WRAPPERS 32u
+#endif /* SOCI_NCI_BUS */
+#define AI_REG_READ_TIMEOUT 300u /* in msec */
+
+/* for some combo chips, BT side accesses chipcommon->0x190, as a 16 byte addr */
+/* register at 0x19C doesn't exist, so error is logged at the slave wrapper */
+/* Since this can't be fixed in the boot rom, WAR it */
+#define BT_CC_SPROM_BADREG_LO 0x18000190
+#define BT_CC_SPROM_BADREG_SIZE 4
+#define BT_CC_SPROM_BADREG_HI 0
+
+#define BCM4389_BT_AXI_ID 2
+#define BCM4388_BT_AXI_ID 2
+#define BCM4369_BT_AXI_ID 4
+#define BCM4378_BT_AXI_ID 2
+#define BCM43602_BT_AXI_ID 1
+#define BCM4378_ARM_PREFETCH_AXI_ID 9
+
+#define BCM4378_BT_ADDR_HI 0
+#define BCM4378_BT_ADDR_LO 0x19000000 /* BT address space */
+#define BCM4378_BT_SIZE 0x01000000 /* BT address space size */
+#define BCM4378_UNUSED_AXI_ID 0xffffffff
+#define BCM4378_CC_AXI_ID 0
+#define BCM4378_PCIE_AXI_ID 1
+
+#define BCM4387_BT_ADDR_HI 0
+#define BCM4387_BT_ADDR_LO 0x19000000 /* BT address space */
+#define BCM4387_BT_SIZE 0x01000000 /* BT address space size */
+#define BCM4387_UNUSED_AXI_ID 0xffffffff
+#define BCM4387_CC_AXI_ID 0
+#define BCM4387_PCIE_AXI_ID 1
+
+#define BCM_AXI_ID_MASK 0xFu
+#define BCM_AXI_ACCESS_TYPE_MASK 0xF0u
+
+#define BCM43xx_CR4_AXI_ID 3
+#define BCM43xx_AXI_ACCESS_TYPE_PREFETCH (1 << 4)
+
+typedef struct si_cores_info {
+ volatile void *regs[SI_MAXCORES]; /* other regs va */
+
+ uint coreid[SI_MAXCORES]; /**< id of each core */
+ uint32 coresba[SI_MAXCORES]; /**< backplane address of each core */
+ void *regs2[SI_MAXCORES]; /**< va of each core second register set (usbh20) */
+ uint32 coresba2[SI_MAXCORES]; /**< address of each core second register set (usbh20) */
+ uint32 coresba_size[SI_MAXCORES]; /**< backplane address space size */
+ uint32 coresba2_size[SI_MAXCORES]; /**< second address space size */
+
+ void *wrappers[SI_MAXCORES]; /**< other cores wrapper va */
+ uint32 wrapba[SI_MAXCORES]; /**< address of controlling wrapper */
+
+ void *wrappers2[SI_MAXCORES]; /**< other cores wrapper va */
+ uint32 wrapba2[SI_MAXCORES]; /**< address of controlling wrapper */
+
+ void *wrappers3[SI_MAXCORES]; /**< other cores wrapper va */
+ uint32 wrapba3[SI_MAXCORES]; /**< address of controlling wrapper */
+
+ uint32 cia[SI_MAXCORES]; /**< erom cia entry for each core */
+ uint32 cib[SI_MAXCORES]; /**< erom cia entry for each core */
+
+ uint32 csp2ba[SI_MAXCORES]; /**< Second slave port base addr 0 */
+ uint32 csp2ba_size[SI_MAXCORES]; /**< Second slave port addr space size */
+} si_cores_info_t;
+
+#define RES_PEND_STATS_COUNT 8
+
+typedef struct res_state_info
+{
+ uint32 low;
+ uint32 low_time;
+ uint32 high;
+ uint32 high_time;
+} si_res_state_info_t;
+
+/** misc si info needed by some of the routines */
+typedef struct si_info {
+ struct si_pub pub; /**< back plane public state (must be first field) */
+
+ void *osh; /**< osl os handle */
+ void *sdh; /**< bcmsdh handle */
+
+ uint dev_coreid; /**< the core provides driver functions */
+ void *intr_arg; /**< interrupt callback function arg */
+ si_intrsoff_t intrsoff_fn; /**< turns chip interrupts off */
+ si_intrsrestore_t intrsrestore_fn; /**< restore chip interrupts */
+ si_intrsenabled_t intrsenabled_fn; /**< check if interrupts are enabled */
+
+ void *pch; /**< PCI/E core handle */
+
+ bool memseg; /**< flag to toggle MEM_SEG register */
+
+ char *vars;
+ uint varsz;
+
+ volatile void *curmap; /* current regs va */
+
+ uint curidx; /**< current core index */
+ uint numcores; /**< # discovered cores */
+
+ void *curwrap; /**< current wrapper va */
+
+ uint32 oob_router; /**< oob router registers for axi */
+ uint32 oob_router1; /**< oob router registers for axi */
+
+ si_cores_info_t *cores_info;
+#if !defined(BCMDONGLEHOST)
+ /* Store NVRAM data so that it is available after reclaim. */
+ uint32 nvram_min_mask;
+ bool min_mask_valid;
+ uint32 nvram_max_mask;
+ bool max_mask_valid;
+#endif /* !BCMDONGLEHOST */
+ gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */
+ uint chipnew; /**< new chip number */
+ uint second_bar0win; /**< Backplane region */
+ uint num_br; /**< # discovered bridges */
+ uint32 br_wrapba[SI_MAXBR]; /**< address of bridge controlling wrapper */
+ uint32 xtalfreq;
+ uint32 openloop_dco_code; /**< OPEN loop calibration dco code */
+ uint8 spurmode;
+ bool device_removed;
+ uint axi_num_wrappers;
+ axi_wrapper_t *axi_wrapper;
+ uint8 device_wake_opt; /* device_wake GPIO number */
+ uint8 lhl_ps_mode;
+ uint8 hib_ext_wakeup_enab;
+ uint32 armpllclkfreq; /**< arm clock rate from nvram */
+ uint32 ccidiv; /**< arm clock : cci clock ratio
+ * (determines sysmem frequency)
+ */
+ wci2_rxfifo_info_t *wci2_info; /* wci2_rxfifo interrupt info */
+ uint8 slice; /* this instance of the si accesses
+ * the first(0)/second(1)/...
+ * d11 core
+ */
+ si_res_state_info_t res_state[RES_PEND_STATS_COUNT];
+ uint32 res_pend_count;
+ bool rfldo3p3_war; /**< singing cap war enable from nvram */
+ void *nci_info;
+} si_info_t;
+
+#define SI_INFO(sih) ((si_info_t *)(uintptr)sih)
+
+#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+ ISALIGNED((x), SI_CORE_SIZE))
+#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR 0
+#define GOODIDX(idx, maxcores) (((uint)idx) < maxcores)
+#define NOREV (int16)-1 /**< Invalid rev */
+
+#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si))
+
+/** Newer chips can access PCI/PCIE and CC core without requiring to change PCI BAR0 WIN */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define CCREGS_FAST(si) \
+ (((si)->curmap == NULL) ? NULL : \
+ ((volatile char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+#define PCIEREGS(si) (((volatile char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ * Adding SOCI_NCI_BUS to avoid abandons in the branches that use this MACRO.
+ */
+#ifdef SOCI_NCI_BUS
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && (si_coreid(&(si)->pub) == (si)->dev_coreid)) { \
+ (*(si)->intrsoff_fn)((si)->intr_arg, intr_val); }
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && (si_coreid(&(si)->pub) == (si)->dev_coreid)) { \
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+#else
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ (*(si)->intrsoff_fn)((si)->intr_arg, intr_val); }
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+#endif /* SOCI_NCI_BUS */
+
+/* dynamic clock control defines */
+#define LPOMINFREQ 25000 /**< low power oscillator min */
+#define LPOMAXFREQ 43000 /**< low power oscillator max */
+#define XTALMINFREQ 19800000 /**< 20 MHz - 1% */
+#define XTALMAXFREQ 20200000 /**< 20 MHz + 1% */
+#define PCIMINFREQ 25000000 /**< 25 MHz */
+#define PCIMAXFREQ 34000000 /**< 33 MHz + fudge */
+
+#define ILP_DIV_5MHZ 0 /**< ILP = 5 MHz */
+#define ILP_DIV_1MHZ 4 /**< ILP = 1 MHz */
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME 10 /**< Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME 90 /**< Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, volatile void *regs, uint devid);
+extern uint sb_coreid(const si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(const si_t *sih);
+extern void sb_setint(const si_t *sih, int siflag);
+extern uint sb_corevendor(const si_t *sih);
+extern uint sb_corerev(const si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern volatile uint32 *sb_corereg_addr(const si_t *sih, uint coreidx, uint regoff);
+extern bool sb_iscoreup(const si_t *sih);
+extern volatile void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(const si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(const si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(const si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(const si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(const si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(const si_t *sih, uint asidx);
+extern int sb_numaddrspaces(const si_t *sih);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+#ifdef BCMDBG
+extern void sb_view(si_t *sih, bool verbose);
+extern void sb_viewall(si_t *sih, bool verbose);
+#endif
+#if defined(BCMDBG) || defined(BCMDBG_DUMP)
+extern void sb_dump(si_t *sih, struct bcmstrbuf *b);
+#endif
+#if defined(BCMDBG) || defined(BCMDBG_DUMP)|| defined(BCMDBG_PHYDUMP)
+extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif /* BCMDBG || BCMDBG_DUMP|| BCMDBG_PHYDUMP */
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern uint ai_flag_alt(const si_t *sih);
+extern void ai_setint(const si_t *sih, int siflag);
+extern uint ai_corevendor(const si_t *sih);
+extern uint ai_corerev(const si_t *sih);
+extern uint ai_corerev_minor(const si_t *sih);
+extern volatile uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool ai_iscoreup(const si_t *sih);
+extern volatile void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern volatile void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx);
+extern volatile void *ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(const si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(const si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
+ uint32 resetbits, void *p, volatile void *s);
+extern void ai_core_disable(const si_t *sih, uint32 bits);
+extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
+ aidmp_t *pmacai, aidmp_t *smacai);
+extern int ai_numaddrspaces(const si_t *sih);
+extern uint32 ai_addrspace(const si_t *sih, uint spidx, uint baidx);
+extern uint32 ai_addrspacesize(const si_t *sih, uint spidx, uint baidx);
+extern void ai_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(const si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern void ai_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout, uint32 cid);
+extern uint32 ai_clear_backplane_to(si_t *sih);
+void ai_force_clocks(const si_t *sih, uint clock_state);
+extern uint ai_num_slaveports(const si_t *sih, uint coreidx);
+
+#ifdef AXI_TIMEOUTS_NIC
+uint32 ai_clear_backplane_to_fast(si_t *sih, void * addr);
+#endif /* AXI_TIMEOUTS_NIC */
+
+#ifdef BOOKER_NIC400_INF
+extern void ai_core_reset_ext(const si_t *sih, uint32 bits, uint32 resetbits);
+#endif /* BOOKER_NIC400_INF */
+
+#if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
+extern uint32 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap);
+#endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
+
+#ifdef BCMDBG
+extern void ai_view(const si_t *sih, bool verbose);
+extern void ai_viewall(si_t *sih, bool verbose);
+#endif
+#if defined(BCMDBG) || defined(BCMDBG_DUMP)|| defined(BCMDBG_PHYDUMP)
+extern void ai_dumpregs(const si_t *sih, struct bcmstrbuf *b);
+#endif /* BCMDBG || BCMDBG_DUMP|| BCMDBG_PHYDUMP */
+
+extern uint32 ai_wrapper_dump_buf_size(const si_t *sih);
+extern uint32 ai_wrapper_dump_binary(const si_t *sih, uchar *p);
+extern bool ai_check_enable_backplane_log(const si_t *sih);
+extern uint32 ai_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, uint32 *core,
+ uint32 *ba, uchar *p);
+extern uint32 ai_findcoreidx_by_axiid(const si_t *sih, uint32 axiid);
+#if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
+extern void ai_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core,
+ uint32 *lo, uint32 *hi, uint32 *id);
+extern uint32 ai_get_axi_timeout_reg(void);
+#endif /* (AXI_TIMEOUTS_NIC) || (AXI_TIMEOUTS) */
+
+#ifdef UART_TRAP_DBG
+void ai_dump_APB_Bridge_registers(const si_t *sih);
+#endif /* UART_TRAP_DBG */
+void ai_force_clocks(const si_t *sih, uint clock_state);
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b) (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#ifndef SOCI_NCI_BUS
+#define nci_uninit(a) do {} while (0)
+#define nci_scan(a) (0)
+#define nci_dump_erom(a) do {} while (0)
+#define nci_init(a, b, c) (NULL)
+#define nci_setcore(a, b, c) (NULL)
+#define nci_setcoreidx(a, b) (NULL)
+#define nci_findcoreidx(a, b, c) (0)
+#define nci_corereg_addr(a, b, c) (NULL)
+#define nci_corereg_writeonly(a, b, c, d, e) (0)
+#define nci_corereg(a, b, c, d, e) (0)
+#define nci_corerev_minor(a) (0)
+#define nci_corerev(a) (0)
+#define nci_corevendor(a) (0)
+#define nci_get_wrap_reg(a, b, c, d) (0)
+#define nci_core_reset(a, b, c) do {} while (0)
+#define nci_core_disable(a, b) do {} while (0)
+#define nci_iscoreup(a) (FALSE)
+#define nci_coreid(a, b) (0)
+#define nci_numcoreunits(a, b) (0)
+#define nci_addr_space(a, b, c) (0)
+#define nci_addr_space_size(a, b, c) (0)
+#define nci_iscoreup(a) (FALSE)
+#define nci_intflag(a) (0)
+#define nci_flag(a) (0)
+#define nci_flag_alt(a) (0)
+#define nci_setint(a, b) do {} while (0)
+#define nci_oobr_baseaddr(a, b) (0)
+#define nci_coreunit(a) (0)
+#define nci_corelist(a, b) (0)
+#define nci_numaddrspaces(a) (0)
+#define nci_addrspace(a, b, c) (0)
+#define nci_addrspacesize(a, b, c) (0)
+#define nci_coreaddrspaceX(a, b, c, d) do {} while (0)
+#define nci_core_cflags(a, b, c) (0)
+#define nci_core_cflags_wo(a, b, c) do {} while (0)
+#define nci_core_sflags(a, b, c) (0)
+#define nci_wrapperreg(a, b, c, d) (0)
+#define nci_invalidate_second_bar0win(a) do {} while (0)
+#define nci_backplane_access(a, b, c, d, e) (0)
+#define nci_backplane_access_64(a, b, c, d, e) (0)
+#define nci_num_slaveports(a, b) (0)
+#if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
+#define nci_dumpregs(a, b) do {} while (0)
+#endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
+#ifdef BCMDBG
+#define nci_view(a, b) do {} while (0)
+#define nci_viewall(a, b) do {} while (0)
+#endif /* BCMDBG */
+#define nci_get_nth_wrapper(a, b) (0)
+#define nci_get_axi_addr(a, b) (0)
+#define nci_wrapper_dump_binary_one(a, b, c) (NULL)
+#define nci_wrapper_dump_binary(a, b) (0)
+#define nci_wrapper_dump_last_timeout(a, b, c, d, e) (0)
+#define nci_check_enable_backplane_log(a) (FALSE)
+#define nci_get_core_baaddr(a, b, c) (0)
+#define nci_clear_backplane_to(a) (0)
+#define nci_clear_backplane_to_per_core(a, b, c, d) (0)
+#define nci_ignore_errlog(a, b, c, d, e, f) (FALSE)
+#define nci_wrapper_get_last_error(a, b, c, d, e, f) do {} while (0)
+#define nci_get_axi_timeout_reg() (0)
+#define nci_findcoreidx_by_axiid(a, b) (0)
+#define nci_wrapper_dump_binary_one(a, b, c) (NULL)
+#define nci_wrapper_dump_binary(a, b) (0)
+#define nci_wrapper_dump_last_timeout(a, b, c, d, e) (0)
+#define nci_check_enable_backplane_log(a) (FALSE)
+#define nci_wrapper_dump_buf_size(a) (0)
+#endif /* SOCI_NCI_BUS */
+#endif /* _siutils_priv_h_ */
diff --git a/bcmdhd.101.10.361.x/wb_regon_coordinator.c b/bcmdhd.101.10.361.x/wb_regon_coordinator.c
new file mode 100755
index 0000000..bd3aa84
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wb_regon_coordinator.c
@@ -0,0 +1,444 @@
+/*
+ * DHD BT WiFi Coex RegON Coordinator
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ */
+#include <linux/cdev.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/eventpoll.h>
+#include <linux/version.h>
+
+#define DESCRIPTION "Broadcom WiFi BT Regon coordinator Driver"
+#define AUTHOR "Broadcom Corporation"
+
+#define DEVICE_NAME "wbrc"
+#define CLASS_NAME "bcm"
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+typedef unsigned int __poll_t;
+#endif
+
+/*
+ * 4 byte message to sync with BT stack.
+ * Byte 0 - header
+ * Byte 1 - length of LTV now fixed to 2
+ * Byte 2 - type
+ * Byte 3 - command value
+*/
+#define WBRC_MSG_LEN 4u
+
+/* Below defines to be mapped in the user space. */
+/* TODO have these as enums and define new structure with members */
+
+/* Byte 0 - Define header for direction of command */
+#define HEADER_DIR_WL2BT 0x01
+#define HEADER_DIR_BT2WL 0x02
+
+/*
+ * Byte 2 - Define Type of Command (Followed LTV format)
+ * wifi/bt, signal/ack types
+ */
+#define TYPE_WIFI_CMD 0x01
+#define TYPE_WIFI_ACK 0x02
+#define TYPE_BT_CMD 0x03
+#define TYPE_BT_ACK 0x04
+
+/* Byte 3 - Define Value field: commands/acks */
+#define CMD_RESET_WIFI 0x40
+#define CMD_RESET_WIFI_WITH_ACK 0x41
+#define CMD_RESET_BT 0x42
+#define CMD_RESET_BT_WITH_ACK 0x43
+#define ACK_RESET_WIFI_COMPLETE 0x80
+#define ACK_RESET_BT_COMPLETE 0x81
+
+struct wbrc_pvt_data {
+ int wbrc_bt_dev_major_number; /* BT char dev major number */
+ struct class *wbrc_bt_dev_class; /* BT char dev class */
+ struct device *wbrc_bt_dev_device; /* BT char dev */
+ struct mutex wbrc_mutex; /* mutex to synchronise */
+ bool bt_dev_opened; /* To check if bt dev open is called */
+ wait_queue_head_t bt_reset_waitq; /* waitq to wait till bt reset is done */
+ unsigned int bt_reset_ack; /* condition variable to be check for bt reset */
+ wait_queue_head_t wlan_reset_waitq; /* waitq to wait till wlan reset is done */
+ unsigned int wlan_reset_ack; /* condition variable to be check for wlan reset */
+ wait_queue_head_t outmsg_waitq; /* wait queue for poll */
+ char wl2bt_message[WBRC_MSG_LEN]; /* message to communicate with Bt stack */
+ bool read_data_available; /* condition to check if read data is present */
+};
+
+static struct wbrc_pvt_data *g_wbrc_data;
+
+#define WBRC_LOCK(wbrc_data) {if (wbrc_data) mutex_lock(&(wbrc_data)->wbrc_mutex);}
+#define WBRC_UNLOCK(wbrc_data) {if (wbrc_data) mutex_unlock(&(wbrc_data)->wbrc_mutex);}
+
+int wbrc_wl2bt_reset(void);
+int wbrc_bt_reset_ack(struct wbrc_pvt_data *wbrc_data);
+
+int wbrc_bt2wl_reset(void);
+int wbrc_wl_reset_ack(struct wbrc_pvt_data *wbrc_data);
+
+static int wbrc_bt_dev_open(struct inode *, struct file *);
+static int wbrc_bt_dev_release(struct inode *, struct file *);
+static ssize_t wbrc_bt_dev_read(struct file *, char *, size_t, loff_t *);
+static ssize_t wbrc_bt_dev_write(struct file *, const char *, size_t, loff_t *);
+static __poll_t wbrc_bt_dev_poll(struct file *filep, poll_table *wait);
+
+static struct file_operations wbrc_bt_dev_fops = {
+ .open = wbrc_bt_dev_open,
+ .read = wbrc_bt_dev_read,
+ .write = wbrc_bt_dev_write,
+ .release = wbrc_bt_dev_release,
+ .poll = wbrc_bt_dev_poll,
+};
+
+static ssize_t wbrc_bt_dev_read(struct file *filep, char *buffer, size_t len,
+ loff_t *offset)
+{
+ struct wbrc_pvt_data *wbrc_data = filep->private_data;
+ int err_count = 0;
+ int ret = 0;
+
+ WBRC_LOCK(wbrc_data);
+ pr_info("%s\n", __func__);
+ if (wbrc_data->read_data_available == FALSE) {
+ goto exit;
+ }
+ if (len < WBRC_MSG_LEN) {
+ pr_err("%s: invalid length:%d\n", __func__, (int)len);
+ ret = -EFAULT;
+ goto exit;
+ }
+ err_count = copy_to_user(buffer, &wbrc_data->wl2bt_message,
+ sizeof(wbrc_data->wl2bt_message));
+ if (err_count == 0) {
+ pr_info("Sent %d bytes\n",
+ (int)sizeof(wbrc_data->wl2bt_message));
+ err_count = sizeof(wbrc_data->wl2bt_message);
+ } else {
+ pr_err("Failed to send %d bytes\n", err_count);
+ ret = -EFAULT;
+ }
+ wbrc_data->read_data_available = FALSE;
+
+exit:
+ WBRC_UNLOCK(wbrc_data);
+ return ret;
+}
+
+static ssize_t wbrc_bt_dev_write(struct file *filep, const char *buffer,
+ size_t len, loff_t *offset)
+{
+ struct wbrc_pvt_data *wbrc_data = filep->private_data;
+ int err_count = 0;
+ int ret = 0;
+ char message[WBRC_MSG_LEN] = {};
+
+ WBRC_LOCK(wbrc_data);
+
+ pr_info("%s Received %zu bytes\n", __func__, len);
+ if (len < WBRC_MSG_LEN) {
+ pr_err("%s: Received malformed packet:%d\n", __func__, (int)len);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ err_count = copy_from_user(message, buffer, len);
+ if (err_count) {
+ pr_err("%s: copy_from_user failed:%d\n", __func__, err_count);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (message[0] != HEADER_DIR_BT2WL) {
+ pr_err("%s: invalid header:%d\n", __func__, message[0]);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (message[2] == TYPE_BT_CMD) {
+ switch (message[3]) {
+ case CMD_RESET_WIFI:
+ pr_info("RCVD CMD_RESET_WIFI\n");
+ break;
+ case CMD_RESET_WIFI_WITH_ACK:
+ pr_info("RCVD CMD_RESET_WIFI_WITH_ACK\n");
+ break;
+ }
+ }
+
+ if (message[2] == TYPE_BT_ACK && message[3] == ACK_RESET_BT_COMPLETE) {
+ pr_info("RCVD ACK_RESET_BT_COMPLETE");
+ wbrc_bt_reset_ack(wbrc_data);
+ }
+
+exit:
+ WBRC_UNLOCK(wbrc_data);
+ return ret;
+}
+
+static __poll_t wbrc_bt_dev_poll(struct file *filep, poll_table *wait)
+{
+ struct wbrc_pvt_data *wbrc_data = filep->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(filep, &wbrc_data->outmsg_waitq, wait);
+
+ if (wbrc_data->read_data_available)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (!wbrc_data->bt_dev_opened)
+ mask |= EPOLLHUP;
+
+ return mask;
+}
+
+static int wbrc_bt_dev_open(struct inode *inodep, struct file *filep)
+{
+ struct wbrc_pvt_data *wbrc_data = g_wbrc_data;
+ int ret = 0;
+ WBRC_LOCK(wbrc_data);
+ if (wbrc_data->bt_dev_opened) {
+ pr_err("%s already opened\n", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ wbrc_data->bt_dev_opened = TRUE;
+ pr_info("%s Device opened %d time(s)\n", __func__,
+ wbrc_data->bt_dev_opened);
+ filep->private_data = wbrc_data;
+
+exit:
+ WBRC_UNLOCK(wbrc_data);
+ return ret;
+}
+
+static int wbrc_bt_dev_release(struct inode *inodep, struct file *filep)
+{
+ struct wbrc_pvt_data *wbrc_data = filep->private_data;
+ WBRC_LOCK(wbrc_data);
+ pr_info("%s Device closed %d\n", __func__, wbrc_data->bt_dev_opened);
+ wbrc_data->bt_dev_opened = FALSE;
+ WBRC_UNLOCK(wbrc_data);
+ wake_up_interruptible(&wbrc_data->outmsg_waitq);
+ return 0;
+}
+
+void wbrc_signal_bt_reset(struct wbrc_pvt_data *wbrc_data)
+{
+ pr_info("%s\n", __func__);
+
+ /* Below message will be read by userspace using .read */
+ wbrc_data->wl2bt_message[0] = HEADER_DIR_WL2BT; // Minimal Header
+ wbrc_data->wl2bt_message[1] = 2; // Length
+ wbrc_data->wl2bt_message[2] = TYPE_WIFI_CMD; // Type
+ wbrc_data->wl2bt_message[3] = CMD_RESET_BT_WITH_ACK; // Value
+ wbrc_data->read_data_available = TRUE;
+ smp_wmb();
+
+ wake_up_interruptible(&wbrc_data->outmsg_waitq);
+}
+
+int wbrc_init(void)
+{
+ int err = 0;
+ struct wbrc_pvt_data *wbrc_data;
+ pr_info("%s\n", __func__);
+ wbrc_data = kzalloc(sizeof(struct wbrc_pvt_data), GFP_KERNEL);
+ if (wbrc_data == NULL) {
+ return -ENOMEM;
+ }
+ mutex_init(&wbrc_data->wbrc_mutex);
+ init_waitqueue_head(&wbrc_data->bt_reset_waitq);
+ init_waitqueue_head(&wbrc_data->wlan_reset_waitq);
+ init_waitqueue_head(&wbrc_data->outmsg_waitq);
+ g_wbrc_data = wbrc_data;
+
+ wbrc_data->wbrc_bt_dev_major_number = register_chrdev(0, DEVICE_NAME, &wbrc_bt_dev_fops);
+ err = wbrc_data->wbrc_bt_dev_major_number;
+ if (wbrc_data->wbrc_bt_dev_major_number < 0) {
+ pr_alert("wbrc_sequencer failed to register a major number\n");
+ goto err_register;
+ }
+
+ wbrc_data->wbrc_bt_dev_class = class_create(THIS_MODULE, CLASS_NAME);
+ err = PTR_ERR(wbrc_data->wbrc_bt_dev_class);
+ if (IS_ERR(wbrc_data->wbrc_bt_dev_class)) {
+ pr_alert("Failed to register device class\n");
+ goto err_class;
+ }
+
+ wbrc_data->wbrc_bt_dev_device = device_create(
+ wbrc_data->wbrc_bt_dev_class, NULL, MKDEV(wbrc_data->wbrc_bt_dev_major_number, 0),
+ NULL, DEVICE_NAME);
+ err = PTR_ERR(wbrc_data->wbrc_bt_dev_device);
+ if (IS_ERR(wbrc_data->wbrc_bt_dev_device)) {
+ pr_alert("Failed to create the device\n");
+ goto err_device;
+ }
+ pr_info("device class created correctly\n");
+
+ return 0;
+
+err_device:
+ class_destroy(wbrc_data->wbrc_bt_dev_class);
+err_class:
+ unregister_chrdev(wbrc_data->wbrc_bt_dev_major_number, DEVICE_NAME);
+err_register:
+ kfree(wbrc_data);
+ g_wbrc_data = NULL;
+ return err;
+}
+
+void wbrc_exit(void)
+{
+ struct wbrc_pvt_data *wbrc_data = g_wbrc_data;
+ pr_info("%s\n", __func__);
+ wake_up_interruptible(&wbrc_data->outmsg_waitq);
+ device_destroy(wbrc_data->wbrc_bt_dev_class, MKDEV(wbrc_data->wbrc_bt_dev_major_number, 0));
+ class_destroy(wbrc_data->wbrc_bt_dev_class);
+ unregister_chrdev(wbrc_data->wbrc_bt_dev_major_number, DEVICE_NAME);
+ kfree(wbrc_data);
+ g_wbrc_data = NULL;
+}
+
+#ifndef BCMDHD_MODULAR
+/* Required only for Built-in DHD */
+module_init(wbrc_init);
+module_exit(wbrc_exit);
+#endif /* BOARD_MODULAR */
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+/*
+ * Wait until the condition *var == condition is met.
+ * Returns 0 if the @condition evaluated to false after the timeout elapsed
+ * Returns 1 if the @condition evaluated to true
+ */
+#define WBRC_RESET_WAIT_TIMEOUT 4000
+int
+wbrc_reset_wait_on_condition(wait_queue_head_t *reset_waitq, uint *var, uint condition)
+{
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(WBRC_RESET_WAIT_TIMEOUT);
+
+ timeout = wait_event_timeout(*reset_waitq, (*var == condition), timeout);
+
+ return timeout;
+}
+
+/* WBRC_LOCK should be held from caller */
+int wbrc_bt_reset_ack(struct wbrc_pvt_data *wbrc_data)
+{
+ pr_info("%s\n", __func__);
+ wbrc_data->bt_reset_ack = TRUE;
+ smp_wmb();
+ wake_up(&wbrc_data->bt_reset_waitq);
+ return 0;
+}
+
+int wbrc_wl2bt_reset(void)
+{
+ int ret = 0;
+ struct wbrc_pvt_data *wbrc_data = g_wbrc_data;
+
+ pr_info("%s\n", __func__);
+
+ WBRC_LOCK(wbrc_data);
+ if (!wbrc_data->bt_dev_opened) {
+ pr_info("%s: no BT\n", __func__);
+ WBRC_UNLOCK(wbrc_data);
+ return ret;
+ }
+
+ wbrc_data->bt_reset_ack = FALSE;
+
+ wbrc_signal_bt_reset(wbrc_data);
+
+ WBRC_UNLOCK(wbrc_data);
+ /* Wait till BT reset is done */
+ wbrc_reset_wait_on_condition(&wbrc_data->bt_reset_waitq,
+ &wbrc_data->bt_reset_ack, TRUE);
+ if (wbrc_data->bt_reset_ack == FALSE) {
+ pr_err("%s: BT reset timeout\n", __func__);
+ ret = -1;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(wbrc_wl2bt_reset);
+
+int wbrc_signal_wlan_reset(struct wbrc_pvt_data *wbrc_data)
+{
+ /* TODO call dhd reset, right now just send ack from here */
+ wbrc_wl_reset_ack(wbrc_data);
+ return 0;
+}
+
+/* WBRC_LOCK should be held from caller, this will be called from DHD */
+int wbrc_wl_reset_ack(struct wbrc_pvt_data *wbrc_data)
+{
+ pr_info("%s\n", __func__);
+ wbrc_data->wlan_reset_ack = TRUE;
+ smp_wmb();
+ wake_up(&wbrc_data->wlan_reset_waitq);
+ return 0;
+}
+EXPORT_SYMBOL(wbrc_wl_reset_ack);
+
+int wbrc_bt2wl_reset(void)
+{
+ int ret = 0;
+ struct wbrc_pvt_data *wbrc_data = g_wbrc_data;
+
+ pr_info("%s\n", __func__);
+
+ WBRC_LOCK(wbrc_data);
+ wbrc_data->wlan_reset_ack = FALSE;
+ wbrc_signal_wlan_reset(wbrc_data);
+ /* Wait till WLAN reset is done */
+ wbrc_reset_wait_on_condition(&wbrc_data->wlan_reset_waitq,
+ &wbrc_data->wlan_reset_ack, TRUE);
+ if (wbrc_data->wlan_reset_ack == FALSE) {
+ pr_err("%s: WLAN reset timeout\n", __func__);
+ ret = -1;
+ }
+ WBRC_UNLOCK(wbrc_data);
+ return ret;
+}
+EXPORT_SYMBOL(wbrc_bt2wl_reset);
diff --git a/bcmdhd.101.10.361.x/wifi_stats.h b/bcmdhd.101.10.361.x/wifi_stats.h
new file mode 100755
index 0000000..a4d2e73
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wifi_stats.h
@@ -0,0 +1,377 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wifi_stats_h_
+#define _wifi_stats_h_
+
+// remove the conditional after moving all branches to use the new code
+#ifdef USE_WIFI_STATS_H
+
+#include <ethernet.h>
+#include <802.11.h>
+
+typedef int32 wifi_radio;
+typedef int32 wifi_channel;
+typedef int32 wifi_rssi;
+typedef struct { uint16 version; uint16 length; } ver_len;
+
+typedef enum wifi_channel_width {
+ WIFI_CHAN_WIDTH_20 = 0,
+ WIFI_CHAN_WIDTH_40 = 1,
+ WIFI_CHAN_WIDTH_80 = 2,
+ WIFI_CHAN_WIDTH_160 = 3,
+ WIFI_CHAN_WIDTH_80P80 = 4,
+ WIFI_CHAN_WIDTH_5 = 5,
+ WIFI_CHAN_WIDTH_10 = 6,
+ WIFI_CHAN_WIDTH_INVALID = -1
+} wifi_channel_width_t;
+
+typedef enum {
+ WIFI_DISCONNECTED = 0,
+ WIFI_AUTHENTICATING = 1,
+ WIFI_ASSOCIATING = 2,
+ WIFI_ASSOCIATED = 3,
+ WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */
+ WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */
+} wifi_connection_state;
+
+typedef enum {
+ WIFI_ROAMING_IDLE = 0,
+ WIFI_ROAMING_ACTIVE = 1
+} wifi_roam_state;
+
+typedef enum {
+ WIFI_INTERFACE_STA = 0,
+ WIFI_INTERFACE_SOFTAP = 1,
+ WIFI_INTERFACE_IBSS = 2,
+ WIFI_INTERFACE_P2P_CLIENT = 3,
+ WIFI_INTERFACE_P2P_GO = 4,
+ WIFI_INTERFACE_NAN = 5,
+ WIFI_INTERFACE_MESH = 6
+} wifi_interface_mode;
+
+#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */
+#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11
+ * beacon frame control protected bit set)
+ */
+#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities
+ * element interworking bit is set
+ */
+#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */
+#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities
+ * element UTF-8 SSID bit is set
+ */
+#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */
+#if defined(__linux__)
+#define PACK_ATTRIBUTE __attribute__ ((packed))
+#else
+#define PACK_ATTRIBUTE
+#endif
+typedef struct {
+ wifi_interface_mode mode; /* interface mode */
+ uint8 mac_addr[6]; /* interface mac address (self) */
+ uint8 PAD[2];
+ wifi_connection_state state; /* connection state (valid for STA, CLI only) */
+ wifi_roam_state roaming; /* roaming state */
+ uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */
+ uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */
+ uint8 bssid[ETHER_ADDR_LEN]; /* bssid */
+ uint8 PAD[1];
+ uint8 ap_country_str[3]; /* country string advertised by AP */
+ uint8 country_str[3]; /* country string for this association */
+ uint8 PAD[2];
+} wifi_interface_info;
+
+typedef wifi_interface_info *wifi_interface_handle;
+
+/* channel information */
+typedef struct {
+ wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */
+ wifi_channel center_freq; /* primary 20 MHz channel */
+ wifi_channel center_freq0; /* center frequency (MHz) first segment */
+ wifi_channel center_freq1; /* center frequency (MHz) second segment */
+} wifi_channel_info;
+
+/* wifi rate */
+typedef struct {
+ uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+ uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+ uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+ uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std
+ * in the units of 0.5mbps
+ */
+ /* HT/VHT it would be mcs index */
+ uint32 reserved; /* reserved */
+ uint32 bitrate; /* units of 100 Kbps */
+} wifi_rate;
+
+typedef struct {
+ uint32 preamble :3; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+ uint32 nss :2; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+ uint32 bw :3; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+ uint32 rateMcsIdx :8; /* OFDM/CCK rate code would be as per ieee std
+ * in the units of 0.5mbps HT/VHT it would be
+ * mcs index
+ */
+ uint32 reserved :16; /* reserved */
+ uint32 bitrate; /* units of 100 Kbps */
+} wifi_rate_v1;
+
+/* channel statistics */
+typedef struct {
+ wifi_channel_info channel; /* channel */
+ uint32 on_time; /* msecs the radio is awake (32 bits number
+ * accruing over time)
+ */
+ uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number
+ * accruing over time)
+ */
+} wifi_channel_stat;
+
+/* radio statistics */
+typedef struct {
+ struct {
+ uint16 version;
+ uint16 length;
+ };
+ wifi_radio radio; /* wifi radio (if multiple radio supported) */
+ uint32 on_time; /* msecs the radio is awake (32 bits number
+ * accruing over time)
+ */
+ uint32 tx_time; /* msecs the radio is transmitting (32 bits
+ * number accruing over time)
+ */
+ uint32 rx_time; /* msecs the radio is in active receive (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and
+ * GAS exchange (32 bits number accruing over time)
+ */
+ uint32 num_channels; /* number of channels */
+ wifi_channel_stat channels[1]; /* channel statistics */
+} wifi_radio_stat;
+
+typedef struct {
+ wifi_radio radio;
+ uint32 on_time;
+ uint32 tx_time;
+ uint32 rx_time;
+ uint32 on_time_scan;
+ uint32 on_time_nbd;
+ uint32 on_time_gscan;
+ uint32 on_time_roam_scan;
+ uint32 on_time_pno_scan;
+ uint32 on_time_hs20;
+ uint32 num_channels;
+} wifi_radio_stat_h;
+
+/* per rate statistics */
+typedef struct {
+ wifi_rate_v1 rate; /* rate information */
+ uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */
+ uint32 rx_mpdu; /* number of received data pkts */
+ uint32 mpdu_lost; /* number of data packet losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+} wifi_rate_stat_v1;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+ uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */
+ uint32 rx_mpdu; /* number of received data pkts */
+ uint32 mpdu_lost; /* number of data packet losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+ wifi_rate rate;
+} wifi_rate_stat;
+
+/* access categories */
+typedef enum {
+ WIFI_AC_VO = 0,
+ WIFI_AC_VI = 1,
+ WIFI_AC_BE = 2,
+ WIFI_AC_BK = 3,
+ WIFI_AC_MAX = 4
+} wifi_traffic_ac;
+
+/* wifi peer type */
+typedef enum
+{
+ WIFI_PEER_STA,
+ WIFI_PEER_AP,
+ WIFI_PEER_P2P_GO,
+ WIFI_PEER_P2P_CLIENT,
+ WIFI_PEER_NAN,
+ WIFI_PEER_TDLS,
+ WIFI_PEER_INVALID
+} wifi_peer_type;
+
+/* per peer statistics */
+typedef struct {
+ wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */
+ uint8 peer_mac_address[6]; /* mac address */
+ uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */
+ uint32 num_rate; /* number of rates */
+ wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */
+} wifi_peer_info;
+
+/* per access category statistics */
+typedef struct {
+ wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */
+ uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts
+ * (ACK rcvd)
+ */
+ uint32 rx_mpdu; /* number of received unicast mpdus */
+ uint32 tx_mcast; /* number of succesfully transmitted multicast
+ * data packets
+ */
+ /* STA case: implies ACK received from AP for the
+ * unicast packet in which mcast pkt was sent
+ */
+ uint32 rx_mcast; /* number of received multicast data packets */
+ uint32 rx_ampdu; /* number of received unicast a-mpdus */
+ uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */
+ uint32 mpdu_lost; /* number of data pkt losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+ uint32 contention_time_min; /* data pkt min contention time (usecs) */
+ uint32 contention_time_max; /* data pkt max contention time (usecs) */
+ uint32 contention_time_avg; /* data pkt avg contention time (usecs) */
+ uint32 contention_num_samples; /* num of data pkts used for contention statistics */
+} wifi_wmm_ac_stat;
+
+/* interface statistics */
+typedef struct {
+ wifi_interface_handle iface; /* wifi interface */
+ wifi_interface_info info; /* current state of the interface */
+ uint32 beacon_rx; /* access point beacon received count from
+ * connected AP
+ */
+ uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT)
+ * The average_tsf_offset field is used so as to calculate
+ * the typical beacon contention time on the channel as well
+ * may be used to debug beacon synchronization and related
+ * power consumption issue
+ */
+ uint32 leaky_ap_detected; /* indicate that this AP
+ * typically leaks packets beyond
+ * the driver guard time.
+ */
+ uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after
+ * frame with PM bit set was ACK'ed by AP
+ */
+ uint32 leaky_ap_guard_time; /* guard time currently in force
+ * (when implementing IEEE power management
+ * based on frame control PM bit), How long
+ * driver waits before shutting down the radio and after
+ * receiving an ACK for a data frame with PM bit set)
+ */
+ uint32 mgmt_rx; /* access point mgmt frames received count from
+ * connected AP (including Beacon)
+ */
+ uint32 mgmt_action_rx; /* action frames received count */
+ uint32 mgmt_action_tx; /* action frames transmit count */
+ wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI
+ * (averaged)
+ */
+ wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from
+ * connected AP
+ */
+ wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from
+ * connected AP
+ */
+ wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */
+ uint32 num_peers; /* number of peers */
+ wifi_peer_info peer_info[1]; /* per peer statistics */
+} wifi_iface_stat;
+
+#ifdef CONFIG_COMPAT
+/* interface statistics */
+typedef struct {
+ compat_uptr_t iface; /* wifi interface */
+ wifi_interface_info info; /* current state of the interface */
+ uint32 beacon_rx; /* access point beacon received count from
+ * connected AP
+ */
+ uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT)
+ * The average_tsf_offset field is used so as to calculate
+ * the typical beacon contention time on the channel as well
+ * may be used to debug beacon synchronization and related
+ * power consumption issue
+ */
+ uint32 leaky_ap_detected; /* indicate that this AP
+ * typically leaks packets beyond
+ * the driver guard time.
+ */
+ uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after
+ * frame with PM bit set was ACK'ed by AP
+ */
+ uint32 leaky_ap_guard_time; /* guard time currently in force
+ * (when implementing IEEE power management
+ * based on frame control PM bit), How long
+ * driver waits before shutting down the radio and after
+ * receiving an ACK for a data frame with PM bit set)
+ */
+ uint32 mgmt_rx; /* access point mgmt frames received count from
+ * connected AP (including Beacon)
+ */
+ uint32 mgmt_action_rx; /* action frames received count */
+ uint32 mgmt_action_tx; /* action frames transmit count */
+ wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI
+ * (averaged)
+ */
+ wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from
+ * connected AP
+ */
+ wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from
+ * connected AP
+ */
+ wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */
+ uint32 num_peers; /* number of peers */
+ wifi_peer_info peer_info[1]; /* per peer statistics */
+} compat_wifi_iface_stat;
+#endif /* CONFIG_COMPAT */
+
+#endif /* USE_WIFI_STATS_H */
+
+#endif /* _wifi_stats_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_android.c b/bcmdhd.101.10.361.x/wl_android.c
new file mode 100755
index 0000000..7a60390
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_android.c
@@ -0,0 +1,14244 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_config.h>
+#include <bcmip.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef BCMSDIO
+#include <bcmsdbus.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgvif.h>
+#endif
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+#include <bcmwifi_rspec.h>
+#include <dhd_linux.h>
+#include <bcmiov.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+#ifdef WL_BCNRECV
+#include <wl_cfgvendor.h>
+#include <brcm_nl80211.h>
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+#include <mbo.h>
+#endif /* WL_MBO */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#ifdef DHD_EVENT_LOG_FILTER
+#include <dhd_event_log_filter.h>
+#endif /* DHD_EVENT_LOG_FILTER */
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
+
+#ifdef WL_TWT
+#include <802.11ah.h>
+#endif /* WL_TWT */
+
+#ifdef WL_STATIC_IF
+#define WL_BSSIDX_MAX 16
+#endif /* WL_STATIC_IF */
+
+uint android_msg_level = ANDROID_ERROR_LEVEL | ANDROID_MSG_LEVEL;
+
+#define ANDROID_ERROR_MSG(x, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printf("ANDROID-ERROR) " x, ## args); \
+ } \
+ } while (0)
+#define ANDROID_TRACE_MSG(x, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printf("ANDROID-TRACE) " x, ## args); \
+ } \
+ } while (0)
+#define ANDROID_INFO_MSG(x, args...) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printf("ANDROID-INFO) " x, ## args); \
+ } \
+ } while (0)
+#define ANDROID_ERROR(x) ANDROID_ERROR_MSG x
+#define ANDROID_TRACE(x) ANDROID_TRACE_MSG x
+#define ANDROID_INFO(x) ANDROID_INFO_MSG x
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START "START"
+#define CMD_STOP "STOP"
+#define CMD_SCAN_ACTIVE "SCAN-ACTIVE"
+#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
+#define CMD_RSSI "RSSI"
+#define CMD_LINKSPEED "LINKSPEED"
+#define CMD_RXFILTER_START "RXFILTER-START"
+#define CMD_RXFILTER_STOP "RXFILTER-STOP"
+#define CMD_RXFILTER_ADD "RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE "BTCOEXMODE"
+#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
+#define CMD_SETSUSPENDMODE "SETSUSPENDMODE"
+#define CMD_SETDTIM_IN_SUSPEND "SET_DTIM_IN_SUSPEND"
+#define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND"
+#define CMD_DISDTIM_IN_SUSPEND "DISABLE_DTIM_IN_SUSPEND"
+#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
+#define CMD_SETFWPATH "SETFWPATH"
+#define CMD_SETBAND "SETBAND"
+#define CMD_GETBAND "GETBAND"
+#define CMD_COUNTRY "COUNTRY"
+#define CMD_P2P_SET_NOA "P2P_SET_NOA"
+#if !defined WL_ENABLE_P2P_IF
+#define CMD_P2P_GET_NOA "P2P_GET_NOA"
+#endif /* WL_ENABLE_P2P_IF */
+#define CMD_P2P_SD_OFFLOAD "P2P_SD_"
+#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_"
+#define CMD_P2P_SET_PS "P2P_SET_PS"
+#define CMD_P2P_ECSA "P2P_ECSA"
+#define CMD_P2P_INC_BW "P2P_INCREASE_BW"
+#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE"
+#define CMD_SETROAMMODE "SETROAMMODE"
+#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA"
+#define CMD_MIRACAST "MIRACAST"
+#ifdef WL_NAN
+#define CMD_NAN "NAN_"
+#endif /* WL_NAN */
+#define CMD_COUNTRY_DELIMITER "/"
+
+#if defined (WL_SUPPORT_AUTO_CHANNEL)
+#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */
+#define CMD_CHANSPEC "CHANSPEC"
+#define CMD_DATARATE "DATARATE"
+#define CMD_ASSOC_CLIENTS "ASSOCLIST"
+#define CMD_SET_CSA "SETCSA"
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef WL_WTC
+#define CMD_WTC_CONFIG "SETWTCMODE"
+#endif /* WL_WTC */
+#ifdef SUPPORT_HIDDEN_AP
+/* Hostapd private command */
+#define CMD_SET_HAPD_MAX_NUM_STA "HAPD_MAX_NUM_STA"
+#define CMD_SET_HAPD_SSID "HAPD_SSID"
+#define CMD_SET_HAPD_HIDE_SSID "HAPD_HIDE_SSID"
+#endif /* SUPPORT_HIDDEN_AP */
+#ifdef SUPPORT_SOFTAP_SINGL_DISASSOC
+#define CMD_HAPD_STA_DISASSOC "HAPD_STA_DISASSOC"
+#endif /* SUPPORT_SOFTAP_SINGL_DISASSOC */
+#ifdef SUPPORT_SET_LPC
+#define CMD_HAPD_LPC_ENABLED "HAPD_LPC_ENABLED"
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+#define CMD_TEST_FORCE_HANG "TEST_FORCE_HANG"
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+#ifdef SUPPORT_LTECX
+#define CMD_LTECX_SET "LTECOEX"
+#endif /* SUPPORT_LTECX */
+#ifdef TEST_TX_POWER_CONTROL
+#define CMD_TEST_SET_TX_POWER "TEST_SET_TX_POWER"
+#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER"
+#endif /* TEST_TX_POWER_CONTROL */
+#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING"
+#ifdef SUPPORT_SET_TID
+#define CMD_SET_TID "SET_TID"
+#define CMD_GET_TID "GET_TID"
+#endif /* SUPPORT_SET_TID */
+#define CMD_ROAM_VSIE_ENAB_SET "SET_ROAMING_REASON_ENABLED"
+#define CMD_ROAM_VSIE_ENAB_GET "GET_ROAMING_REASON_ENABLED"
+#define CMD_BR_VSIE_ENAB_SET "SET_BR_ERR_REASON_ENABLED"
+#define CMD_BR_VSIE_ENAB_GET "GET_BR_ERR_REASON_ENABLED"
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+#define CMD_KEEP_ALIVE "KEEPALIVE"
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
+#define CMD_PNOSETUP_SET "PNOSETUP "
+#define CMD_PNOENABLE_SET "PNOFORCE"
+#define CMD_PNODEBUG_SET "PNODEBUG"
+#define CMD_WLS_BATCHING "WLS_BATCHING"
+#endif /* PNO_SUPPORT */
+
+#define CMD_HAPD_SET_AX_MODE "HAPD_SET_AX_MODE"
+
+#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER"
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC"
+#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC"
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#define CMD_GET_FACTORY_MAC "FACTORY_MAC"
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+#ifdef ROAM_API
+#define CMD_ROAMTRIGGER_SET "SETROAMTRIGGER"
+#define CMD_ROAMTRIGGER_GET "GETROAMTRIGGER"
+#define CMD_ROAMDELTA_SET "SETROAMDELTA"
+#define CMD_ROAMDELTA_GET "GETROAMDELTA"
+#define CMD_ROAMSCANPERIOD_SET "SETROAMSCANPERIOD"
+#define CMD_ROAMSCANPERIOD_GET "GETROAMSCANPERIOD"
+#define CMD_FULLROAMSCANPERIOD_SET "SETFULLROAMSCANPERIOD"
+#define CMD_FULLROAMSCANPERIOD_GET "GETFULLROAMSCANPERIOD"
+#define CMD_COUNTRYREV_SET "SETCOUNTRYREV"
+#define CMD_COUNTRYREV_GET "GETCOUNTRYREV"
+#endif /* ROAM_API */
+
+#if defined(SUPPORT_NAN_RANGING_TEST_BW)
+#define CMD_NAN_RANGING_SET_BW "NAN_RANGING_SET_BW"
+#endif /* SUPPORT_NAN_RANGING_TEST_BW */
+
+#ifdef WES_SUPPORT
+#define CMD_GETSCANCHANNELTIMELEGACY "GETSCANCHANNELTIME_LEGACY"
+#define CMD_SETSCANCHANNELTIMELEGACY "SETSCANCHANNELTIME_LEGACY"
+#define CMD_GETSCANUNASSOCTIMELEGACY "GETSCANUNASSOCTIME_LEGACY"
+#define CMD_SETSCANUNASSOCTIMELEGACY "SETSCANUNASSOCTIME_LEGACY"
+#define CMD_GETSCANPASSIVETIMELEGACY "GETSCANPASSIVETIME_LEGACY"
+#define CMD_SETSCANPASSIVETIMELEGACY "SETSCANPASSIVETIME_LEGACY"
+#define CMD_GETSCANHOMETIMELEGACY "GETSCANHOMETIME_LEGACY"
+#define CMD_SETSCANHOMETIMELEGACY "SETSCANHOMETIME_LEGACY"
+#define CMD_GETSCANHOMEAWAYTIMELEGACY "GETSCANHOMEAWAYTIME_LEGACY"
+#define CMD_SETSCANHOMEAWAYTIMELEGACY "SETSCANHOMEAWAYTIME_LEGACY"
+#define CMD_GETROAMSCANCHLEGACY "GETROAMSCANCHANNELS_LEGACY"
+#define CMD_ADDROAMSCANCHLEGACY "ADDROAMSCANCHANNELS_LEGACY"
+#define CMD_GETROAMSCANFQLEGACY "GETROAMSCANFREQUENCIES_LEGACY"
+#define CMD_ADDROAMSCANFQLEGACY "ADDROAMSCANFREQUENCIES_LEGACY"
+#define CMD_GETROAMTRIGLEGACY "GETROAMTRIGGER_LEGACY"
+#define CMD_SETROAMTRIGLEGACY "SETROAMTRIGGER_LEGACY"
+#define CMD_REASSOCLEGACY "REASSOC_LEGACY"
+
+#define CMD_GETROAMSCANCONTROL "GETROAMSCANCONTROL"
+#define CMD_SETROAMSCANCONTROL "SETROAMSCANCONTROL"
+#define CMD_GETROAMSCANCHANNELS "GETROAMSCANCHANNELS"
+#define CMD_SETROAMSCANCHANNELS "SETROAMSCANCHANNELS"
+#define CMD_ADDROAMSCANCHANNELS "ADDROAMSCANCHANNELS"
+#define CMD_GETROAMSCANFREQS "GETROAMSCANFREQUENCIES"
+#define CMD_SETROAMSCANFREQS "SETROAMSCANFREQUENCIES"
+#define CMD_ADDROAMSCANFREQS "ADDROAMSCANFREQUENCIES"
+#define CMD_GETSCANCHANNELTIME "GETSCANCHANNELTIME"
+#define CMD_SETSCANCHANNELTIME "SETSCANCHANNELTIME"
+#define CMD_GETSCANUNASSOCTIME "GETSCANUNASSOCTIME"
+#define CMD_SETSCANUNASSOCTIME "SETSCANUNASSOCTIME"
+#define CMD_GETSCANPASSIVETIME "GETSCANPASSIVETIME"
+#define CMD_SETSCANPASSIVETIME "SETSCANPASSIVETIME"
+#define CMD_GETSCANHOMETIME "GETSCANHOMETIME"
+#define CMD_SETSCANHOMETIME "SETSCANHOMETIME"
+#define CMD_GETSCANHOMEAWAYTIME "GETSCANHOMEAWAYTIME"
+#define CMD_SETSCANHOMEAWAYTIME "SETSCANHOMEAWAYTIME"
+#define CMD_GETSCANNPROBES "GETSCANNPROBES"
+#define CMD_SETSCANNPROBES "SETSCANNPROBES"
+#define CMD_GETDFSSCANMODE "GETDFSSCANMODE"
+#define CMD_SETDFSSCANMODE "SETDFSSCANMODE"
+#define CMD_SETJOINPREFER "SETJOINPREFER"
+
+#define CMD_SENDACTIONFRAME "SENDACTIONFRAME"
+#define CMD_REASSOC "REASSOC"
+
+#define CMD_GETWESMODE "GETWESMODE"
+#define CMD_SETWESMODE "SETWESMODE"
+#define CMD_GETNCHOMODE "GETNCHOMODE"
+#define CMD_SETNCHOMODE "SETNCHOMODE"
+
+/* Customer requested to Remove OKCMODE command */
+#define CMD_GETOKCMODE "GETOKCMODE"
+#define CMD_SETOKCMODE "SETOKCMODE"
+
+#define CMD_OKC_SET_PMK "SET_PMK"
+#define CMD_OKC_ENABLE "OKC_ENABLE"
+
+typedef struct android_wifi_reassoc_params {
+ unsigned char bssid[18];
+ int channel;
+} android_wifi_reassoc_params_t;
+
+#define ANDROID_WIFI_REASSOC_PARAMS_SIZE sizeof(struct android_wifi_reassoc_params)
+
+#define ANDROID_WIFI_ACTION_FRAME_SIZE 1040
+
+typedef struct android_wifi_af_params {
+ unsigned char bssid[18];
+ int channel;
+ int dwell_time;
+ int len;
+ unsigned char data[ANDROID_WIFI_ACTION_FRAME_SIZE];
+} android_wifi_af_params_t;
+
+#define ANDROID_WIFI_AF_PARAMS_SIZE sizeof(struct android_wifi_af_params)
+#endif /* WES_SUPPORT */
+#ifdef SUPPORT_AMPDU_MPDU_CMD
+#define CMD_AMPDU_MPDU "AMPDU_MPDU"
+#endif /* SUPPORT_AMPDU_MPDU_CMD */
+
+#define CMD_CHANGE_RL "CHANGE_RL"
+#define CMD_RESTORE_RL "RESTORE_RL"
+
+#define CMD_SET_RMC_ENABLE "SETRMCENABLE"
+#define CMD_SET_RMC_TXRATE "SETRMCTXRATE"
+#define CMD_SET_RMC_ACTPERIOD "SETRMCACTIONPERIOD"
+#define CMD_SET_RMC_IDLEPERIOD "SETRMCIDLEPERIOD"
+#define CMD_SET_RMC_LEADER "SETRMCLEADER"
+#define CMD_SET_RMC_EVENT "SETRMCEVENT"
+
+#define CMD_SET_SCSCAN "SETSINGLEANT"
+#define CMD_GET_SCSCAN "GETSINGLEANT"
+#ifdef WLTDLS
+#define CMD_TDLS_RESET "TDLS_RESET"
+#endif /* WLTDLS */
+
+#ifdef CONFIG_SILENT_ROAM
+#define CMD_SROAM_TURN_ON "SROAMTURNON"
+#define CMD_SROAM_SET_INFO "SROAMSETINFO"
+#define CMD_SROAM_GET_INFO "SROAMGETINFO"
+#endif /* CONFIG_SILENT_ROAM */
+
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+#define CMD_ROAM_RSSI_LMT "ROAMRSSILIMIT"
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+#ifdef CONFIG_ROAM_MIN_DELTA
+#define CMD_ROAM_MIN_DELTA "ROAMMINSCOREDELTA"
+#endif /* CONFIG_ROAM_MIN_DELTA */
+
+#define CMD_SET_DISCONNECT_IES "SET_DISCONNECT_IES"
+
+#ifdef FCC_PWR_LIMIT_2G
+#define CMD_GET_FCC_PWR_LIMIT_2G "GET_FCC_CHANNEL"
+#define CMD_SET_FCC_PWR_LIMIT_2G "SET_FCC_CHANNEL"
+/* CUSTOMER_HW4's value differs from BRCM FW value for enable/disable */
+#define CUSTOMER_HW4_ENABLE 0
+#define CUSTOMER_HW4_DISABLE -1
+#endif /* FCC_PWR_LIMIT_2G */
+#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1)
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+#ifdef WLFBT
+#define CMD_GET_FTKEY "GET_FTKEY"
+#endif
+
+#ifdef WLAIBSS
+#define CMD_SETIBSSTXFAILEVENT "SETIBSSTXFAILEVENT"
+#define CMD_GET_IBSS_PEER_INFO "GETIBSSPEERINFO"
+#define CMD_GET_IBSS_PEER_INFO_ALL "GETIBSSPEERINFOALL"
+#define CMD_SETIBSSROUTETABLE "SETIBSSROUTETABLE"
+#define CMD_SETIBSSAMPDU "SETIBSSAMPDU"
+#define CMD_SETIBSSANTENNAMODE "SETIBSSANTENNAMODE"
+#endif /* WLAIBSS */
+
+#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD"
+#define CMD_INTERFACE_CREATE "INTERFACE_CREATE"
+#define CMD_INTERFACE_DELETE "INTERFACE_DELETE"
+#define CMD_GET_LINK_STATUS "GETLINKSTATUS"
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define CMD_GET_BSS_INFO "GETBSSINFO"
+#define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO"
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#define CMD_GET_STA_INFO "GETSTAINFO"
+
+/* related with CMD_GET_LINK_STATUS */
+#define WL_ANDROID_LINK_VHT 0x01
+#define WL_ANDROID_LINK_MIMO 0x02
+#define WL_ANDROID_LINK_AP_VHT_SUPPORT 0x04
+#define WL_ANDROID_LINK_AP_MIMO_SUPPORT 0x08
+
+#ifdef P2PRESP_WFDIE_SRC
+#define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP"
+#define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP"
+#endif /* P2PRESP_WFDIE_SRC */
+
+#define CMD_DFS_AP_MOVE "DFS_AP_MOVE"
+#define CMD_WBTEXT_ENABLE "WBTEXT_ENABLE"
+#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
+#define CMD_WBTEXT_BTM_TIMER_THRESHOLD "WBTEXT_BTM_TIMER_THRESHOLD"
+#define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA"
+#define CMD_WBTEXT_ESTM_ENABLE "WBTEXT_ESTM_ENABLE"
+
+#ifdef WBTEXT
+#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
+#define DEFAULT_WBTEXT_PROFILE_A_V2 "a -70 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_B_V2 "b -60 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_A_V3 "a -70 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_B_V3 "b -60 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_A "RSSI a 65"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_B "RSSI b 65"
+#define DEFAULT_WBTEXT_WEIGHT_CU_A "CU a 35"
+#define DEFAULT_WBTEXT_WEIGHT_CU_B "CU b 35"
+#define DEFAULT_WBTEXT_WEIGHT_ESTM_DL_A "ESTM_DL a 70"
+#define DEFAULT_WBTEXT_WEIGHT_ESTM_DL_B "ESTM_DL b 70"
+#define DEFAULT_WBTEXT_CU_RSSI_TRIG_A -70
+#define DEFAULT_WBTEXT_CU_RSSI_TRIG_B -60
+#ifdef WBTEXT_SCORE_V2
+#define DEFAULT_WBTEXT_TABLE_RSSI_A "RSSI a 0 55 100 55 60 90 \
+60 70 60 70 80 20 80 90 0 90 128 0"
+#define DEFAULT_WBTEXT_TABLE_RSSI_B "RSSI b 0 55 100 55 60 90 \
+60 70 60 70 80 20 80 90 0 90 128 0"
+#define DEFAULT_WBTEXT_TABLE_CU_A "CU a 0 30 100 30 80 20 \
+80 100 20"
+#define DEFAULT_WBTEXT_TABLE_CU_B "CU b 0 10 100 10 70 20 \
+70 100 20"
+#else
+#define DEFAULT_WBTEXT_TABLE_RSSI_A "RSSI a 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_RSSI_B "RSSI b 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_CU_A "CU a 0 30 100 30 50 90 \
+50 60 70 60 80 50 80 100 20"
+#define DEFAULT_WBTEXT_TABLE_CU_B "CU b 0 10 100 10 25 90 \
+25 40 70 40 70 50 70 100 20"
+#endif /* WBTEXT_SCORE_V2 */
+#endif /* WBTEXT */
+
+#define BUFSZ 8
+#define BUFSZN BUFSZ + 1
+
+#define _S(x) #x
+#define S(x) _S(x)
+
+#define MAXBANDS 2 /**< Maximum #of bands */
+#define BAND_2G_INDEX 1
+#define BAND_5G_INDEX 0
+
+typedef union {
+ wl_roam_prof_band_v1_t v1;
+ wl_roam_prof_band_v2_t v2;
+ wl_roam_prof_band_v3_t v3;
+ wl_roam_prof_band_v4_t v4;
+} wl_roamprof_band_t;
+
+#ifdef WLWFDS
+#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH"
+#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH"
+#endif /* WLWFDS */
+
+#ifdef BT_WIFI_HANDOVER
+#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN"
+#endif /* BT_WIFI_HANDOVER */
+
+#define CMD_MURX_BFE_CAP "MURX_BFE_CAP"
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+#define CMD_SET_RSSI_LOGGING "SET_RSSI_LOGGING"
+#define CMD_GET_RSSI_LOGGING "GET_RSSI_LOGGING"
+#define CMD_GET_RSSI_PER_ANT "GET_RSSI_PER_ANT"
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+#define CMD_GET_SNR "GET_SNR"
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+#define CMD_SET_AP_BEACONRATE "SET_AP_BEACONRATE"
+#define CMD_GET_AP_BASICRATE "GET_AP_BASICRATE"
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define CMD_SET_AP_RPS "SET_AP_RPS"
+#define CMD_GET_AP_RPS "GET_AP_RPS"
+#define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS"
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef SUPPORT_AP_SUSPEND
+#define CMD_SET_AP_SUSPEND "SET_AP_SUSPEND"
+#endif /* SUPPORT_AP_SUSPEND */
+
+#ifdef SUPPORT_AP_BWCTRL
+#define CMD_SET_AP_BW "SET_AP_BW"
+#define CMD_GET_AP_BW "GET_AP_BW"
+#endif /* SUPPORT_AP_BWCTRL */
+
+/* miracast related definition */
+#define MIRACAST_MODE_OFF 0
+#define MIRACAST_MODE_SOURCE 1
+#define MIRACAST_MODE_SINK 2
+
+#ifdef CONNECTION_STATISTICS
+#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS"
+
+struct connection_stats {
+ u32 txframe;
+ u32 txbyte;
+ u32 txerror;
+ u32 rxframe;
+ u32 rxbyte;
+ u32 txfail;
+ u32 txretry;
+ u32 txretrie;
+ u32 txrts;
+ u32 txnocts;
+ u32 txexptime;
+ u32 txrate;
+ u8 chan_idle;
+};
+#endif /* CONNECTION_STATISTICS */
+
+#ifdef SUPPORT_LQCM
+#define CMD_SET_LQCM_ENABLE "SET_LQCM_ENABLE"
+#define CMD_GET_LQCM_REPORT "GET_LQCM_REPORT"
+#endif
+
+static LIST_HEAD(miracast_resume_list);
+#ifdef WL_CFG80211
+static u8 miracast_cur_mode;
+#endif /* WL_CFG80211 */
+
+#ifdef DHD_LOG_DUMP
+#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP"
+#define SUBCMD_UNWANTED "UNWANTED"
+#define SUBCMD_DISCONNECTED "DISCONNECTED"
+void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_STATUS_LOGGING
+#define CMD_DUMP_STATUS_LOG "DUMP_STAT_LOG"
+#define CMD_QUERY_STATUS_LOG "QUERY_STAT_LOG"
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef DHD_HANG_SEND_UP_TEST
+#define CMD_MAKE_HANG "MAKE_HANG"
+#endif /* CMD_DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_DEBUG_UART
+extern bool dhd_debug_uart_is_running(struct net_device *dev);
+#endif /* DHD_DEBUG_UART */
+
+#ifdef RTT_GEOFENCE_INTERVAL
+#if defined (RTT_SUPPORT) && defined(WL_NAN)
+#define CMD_GEOFENCE_INTERVAL "GEOFENCE_INT"
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_INTERVAL */
+
+struct io_cfg {
+ s8 *iovar;
+ s32 param;
+ u32 ioctl;
+ void *arg;
+ u32 len;
+ struct list_head list;
+};
+
+#if defined(BCMFW_ROAM_ENABLE)
+#define CMD_SET_ROAMPREF "SET_ROAMPREF"
+
+#define MAX_NUM_SUITES 10
+#define WIDTH_AKM_SUITE 8
+#define JOIN_PREF_RSSI_LEN 0x02
+#define JOIN_PREF_RSSI_SIZE 4 /* RSSI pref header size in bytes */
+#define JOIN_PREF_WPA_HDR_SIZE 4 /* WPA pref header size in bytes */
+#define JOIN_PREF_WPA_TUPLE_SIZE 12 /* Tuple size in bytes */
+#define JOIN_PREF_MAX_WPA_TUPLES 16
+#define MAX_BUF_SIZE (JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + \
+ (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
+#endif /* BCMFW_ROAM_ENABLE */
+
+#if defined(CONFIG_TIZEN)
+/*
+ * adding these private commands corresponding to atd-server's implementation
+ * __atd_control_pm_state()
+ */
+#define CMD_POWERSAVEMODE_SET "SETPOWERSAVEMODE"
+#define CMD_POWERSAVEMODE_GET "GETPOWERSAVEMODE"
+#endif /* CONFIG_TIZEN */
+
+#define CMD_DEBUG_VERBOSE "DEBUG_VERBOSE"
+#ifdef WL_NATOE
+
+#define CMD_NATOE "NATOE"
+
+#define NATOE_MAX_PORT_NUM 65535
+
+/* natoe command info structure */
+typedef struct wl_natoe_cmd_info {
+ uint8 *command; /* pointer to the actual command */
+ uint16 tot_len; /* total length of the command */
+ uint16 bytes_written; /* Bytes written for get response */
+} wl_natoe_cmd_info_t;
+
+typedef struct wl_natoe_sub_cmd wl_natoe_sub_cmd_t;
+typedef int (natoe_cmd_handler_t)(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+
+struct wl_natoe_sub_cmd {
+ char *name;
+ uint8 version; /* cmd version */
+ uint16 id; /* id for the dongle f/w switch/case */
+ uint16 type; /* base type of argument */
+ natoe_cmd_handler_t *handler; /* cmd handler */
+};
+
+#define WL_ANDROID_NATOE_FUNC(suffix) wl_android_natoe_subcmd_ ##suffix
+static int wl_android_process_natoe_cmd(struct net_device *dev,
+ char *command, int total_len);
+static int wl_android_natoe_subcmd_enable(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_config_ips(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_config_ports(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_dbg_stats(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+
+static const wl_natoe_sub_cmd_t natoe_cmd_list[] = {
+ /* wl natoe enable [0/1] or new: "wl natoe [0/1]" */
+ {"enable", 0x01, WL_NATOE_CMD_ENABLE,
+ IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(enable)
+ },
+ {"config_ips", 0x01, WL_NATOE_CMD_CONFIG_IPS,
+ IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ips)
+ },
+ {"config_ports", 0x01, WL_NATOE_CMD_CONFIG_PORTS,
+ IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ports)
+ },
+ {"stats", 0x01, WL_NATOE_CMD_DBG_STATS,
+ IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(dbg_stats)
+ },
+ {"tbl_cnt", 0x01, WL_NATOE_CMD_TBL_CNT,
+ IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(tbl_cnt)
+ },
+ {NULL, 0, 0, 0, NULL}
+};
+
+#endif /* WL_NATOE */
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+#define CMD_PCIE_IRQ_CORE "PCIE_IRQ_CORE"
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+#ifdef WLADPS_PRIVATE_CMD
+#define CMD_SET_ADPS "SET_ADPS"
+#define CMD_GET_ADPS "GET_ADPS"
+#ifdef WLADPS_ENERGY_GAIN
+#define CMD_GET_GAIN_ADPS "GET_GAIN_ADPS"
+#define CMD_RESET_GAIN_ADPS "RESET_GAIN_ADPS"
+#ifndef ADPS_GAIN_2G_PM0_IDLE
+#define ADPS_GAIN_2G_PM0_IDLE 0
+#endif
+#ifndef ADPS_GAIN_5G_PM0_IDLE
+#define ADPS_GAIN_5G_PM0_IDLE 0
+#endif
+#ifndef ADPS_GAIN_2G_TX_PSPOLL
+#define ADPS_GAIN_2G_TX_PSPOLL 0
+#endif
+#ifndef ADPS_GAIN_5G_TX_PSPOLL
+#define ADPS_GAIN_5G_TX_PSPOLL 0
+#endif
+#endif /* WLADPS_ENERGY_GAIN */
+#endif /* WLADPS_PRIVATE_CMD */
+
+#ifdef DHD_PKT_LOGGING
+#define CMD_PKTLOG_FILTER_ENABLE "PKTLOG_FILTER_ENABLE"
+#define CMD_PKTLOG_FILTER_DISABLE "PKTLOG_FILTER_DISABLE"
+#define CMD_PKTLOG_FILTER_PATTERN_ENABLE "PKTLOG_FILTER_PATTERN_ENABLE"
+#define CMD_PKTLOG_FILTER_PATTERN_DISABLE "PKTLOG_FILTER_PATTERN_DISABLE"
+#define CMD_PKTLOG_FILTER_ADD "PKTLOG_FILTER_ADD"
+#define CMD_PKTLOG_FILTER_DEL "PKTLOG_FILTER_DEL"
+#define CMD_PKTLOG_FILTER_INFO "PKTLOG_FILTER_INFO"
+#define CMD_PKTLOG_START "PKTLOG_START"
+#define CMD_PKTLOG_STOP "PKTLOG_STOP"
+#define CMD_PKTLOG_FILTER_EXIST "PKTLOG_FILTER_EXIST"
+#define CMD_PKTLOG_MINMIZE_ENABLE "PKTLOG_MINMIZE_ENABLE"
+#define CMD_PKTLOG_MINMIZE_DISABLE "PKTLOG_MINMIZE_DISABLE"
+#define CMD_PKTLOG_CHANGE_SIZE "PKTLOG_CHANGE_SIZE"
+#define CMD_PKTLOG_DEBUG_DUMP "PKTLOG_DEBUG_DUMP"
+#endif /* DHD_PKT_LOGGING */
+
+#ifdef DHD_EVENT_LOG_FILTER
+#define CMD_EWP_FILTER "EWP_FILTER"
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#ifdef WL_BCNRECV
+#define CMD_BEACON_RECV "BEACON_RECV"
+#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+#define CMD_CAC_TSPEC "CAC_TSPEC"
+#endif /* WL_CAC_TS */
+#ifdef WL_GET_CU
+#define CMD_GET_CHAN_UTIL "GET_CU"
+#endif /* WL_GET_CU */
+
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+#define CMD_SET_SOFTAP_ELNA_BYPASS "SET_SOFTAP_ELNA_BYPASS"
+#define CMD_GET_SOFTAP_ELNA_BYPASS "GET_SOFTAP_ELNA_BYPASS"
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+
+#ifdef WL_NAN
+#define CMD_GET_NAN_STATUS "GET_NAN_STATUS"
+#endif /* WL_NAN */
+
+#ifdef WL_TWT
+#define CMD_TWT_SETUP "TWT_SETUP"
+#define CMD_TWT_TEARDOWN "TWT_TEARDOWN"
+#define CMD_TWT_INFO "TWT_INFO_FRM"
+#define CMD_TWT_STATUS_QUERY "TWT_STATUS"
+#define CMD_TWT_CAPABILITY "TWT_CAP"
+#endif /* WL_TWT */
+
+/* drv command info structure */
+typedef struct wl_drv_cmd_info {
+ uint8 *command; /* pointer to the actual command */
+ uint16 tot_len; /* total length of the command */
+ uint16 bytes_written; /* Bytes written for get response */
+} wl_drv_cmd_info_t;
+
+typedef struct wl_drv_sub_cmd wl_drv_sub_cmd_t;
+typedef int (drv_cmd_handler_t)(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
+
+struct wl_drv_sub_cmd {
+ char *name;
+ uint8 version; /* cmd version */
+ uint16 id; /* id for the dongle f/w switch/case */
+ uint16 type; /* base type of argument */
+ drv_cmd_handler_t *handler; /* cmd handler */
+};
+
+#ifdef WL_MBO
+
+#define CMD_MBO "MBO"
+enum {
+ WL_MBO_CMD_NON_CHAN_PREF = 1,
+ WL_MBO_CMD_CELL_DATA_CAP = 2
+};
+#define WL_ANDROID_MBO_FUNC(suffix) wl_android_mbo_subcmd_ ##suffix
+
+static int wl_android_process_mbo_cmd(struct net_device *dev,
+ char *command, int total_len);
+static int wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
+static int wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
+
+static const wl_drv_sub_cmd_t mbo_cmd_list[] = {
+ {"non_pref_chan", 0x01, WL_MBO_CMD_NON_CHAN_PREF,
+ IOVT_BUFFER, WL_ANDROID_MBO_FUNC(non_pref_chan)
+ },
+ {"cell_data_cap", 0x01, WL_MBO_CMD_CELL_DATA_CAP,
+ IOVT_BUFFER, WL_ANDROID_MBO_FUNC(cell_data_cap)
+ },
+ {NULL, 0, 0, 0, NULL}
+};
+
+#endif /* WL_MBO */
+
+#ifdef WL_GENL
+static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info);
+static int wl_genl_init(void);
+static int wl_genl_deinit(void);
+
+extern struct net init_net;
+/* attribute policy: defines which attribute has which type (e.g int, char * etc)
+ * possible values defined in net/netlink.h
+ */
+static struct nla_policy wl_genl_policy[BCM_GENL_ATTR_MAX + 1] = {
+ [BCM_GENL_ATTR_STRING] = { .type = NLA_NUL_STRING },
+ [BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY },
+};
+
+#define WL_GENL_VER 1
+/* family definition */
+static struct genl_family wl_genl_family = {
+ .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
+ .hdrsize = 0,
+ .name = "bcm-genl", /* Netlink I/F for Android */
+ .version = WL_GENL_VER, /* Version Number */
+ .maxattr = BCM_GENL_ATTR_MAX,
+};
+
+/* commands: mapping between the command enumeration and the actual function */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+struct genl_ops wl_genl_ops[] = {
+ {
+ .cmd = BCM_GENL_CMD_MSG,
+ .flags = 0,
+ .policy = wl_genl_policy,
+ .doit = wl_genl_handle_msg,
+ .dumpit = NULL,
+ },
+};
+#else
+struct genl_ops wl_genl_ops = {
+ .cmd = BCM_GENL_CMD_MSG,
+ .flags = 0,
+ .policy = wl_genl_policy,
+ .doit = wl_genl_handle_msg,
+ .dumpit = NULL,
+
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+static struct genl_multicast_group wl_genl_mcast[] = {
+ { .name = "bcm-genl-mcast", },
+};
+#else
+static struct genl_multicast_group wl_genl_mcast = {
+ .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
+ .name = "bcm-genl-mcast",
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
+#endif /* WL_GENL */
+
+#ifdef SUPPORT_LQCM
+#define LQCM_ENAB_MASK 0x000000FF /* LQCM enable flag mask */
+#define LQCM_TX_INDEX_MASK 0x0000FF00 /* LQCM tx index mask */
+#define LQCM_RX_INDEX_MASK 0x00FF0000 /* LQCM rx index mask */
+
+#define LQCM_TX_INDEX_SHIFT 8 /* LQCM tx index shift */
+#define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */
+#endif /* SUPPORT_LQCM */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+#define NUMBER_SEQUENTIAL_PRIVCMD_ERRORS 7
+static int priv_cmd_errors = 0;
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#ifdef WL_P2P_6G
+#define CMD_ENABLE_6G_P2P "ENABLE_6G_P2P"
+#endif /* WL_P2P_6G */
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+int dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command);
+#ifdef WES_SUPPORT
+int wl_cfg80211_set_wes_mode(struct net_device *dev, int mode);
+int wl_cfg80211_get_wes_mode(struct net_device *dev);
+int wl_cfg80211_set_ncho_mode(struct net_device *dev, int mode);
+int wl_cfg80211_get_ncho_mode(struct net_device *dev);
+#endif /* WES_SUPPORT */
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif /* WL_CFG80211 */
+#if defined(WL_WTC) && defined(CUSTOMER_HW4_PRIVATE_CMD)
+static int wl_android_wtc_config(struct net_device *dev, char *command, int total_len);
+#endif /* WL_WTC && CUSTOMER_HW4_PRIVATE_CMD */
+#ifdef WBTEXT
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len);
+static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev,
+ char *command, int total_len);
+static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
+ char *command, int total_len);
+static int wl_cfg80211_wbtext_estm_enable(struct net_device *dev,
+ char *command, int total_len);
+static int wlc_wbtext_get_roam_prof(struct net_device *ndev, wl_roamprof_band_t *rp,
+ uint8 band, uint8 *roam_prof_ver, uint8 *roam_prof_size);
+static int wl_android_wbtext_enable(struct net_device *dev, int mode);
+#endif /* WBTEXT */
+#ifdef WES_SUPPORT
+/* wl_roam.c */
+extern int get_roamscan_mode(struct net_device *dev, int *mode);
+extern int set_roamscan_mode(struct net_device *dev, int mode);
+extern int get_roamscan_chanspec_list(struct net_device *dev, chanspec_t *chanspecs);
+extern int set_roamscan_chanspec_list(struct net_device *dev, uint n, chanspec_t *chanspecs);
+extern int add_roamscan_chanspec_list(struct net_device *dev, uint n, chanspec_t *chanspecs);
+
+static char* legacy_cmdlist[] =
+{
+ CMD_GETROAMSCANCHLEGACY, CMD_ADDROAMSCANCHLEGACY,
+ CMD_GETROAMSCANFQLEGACY, CMD_ADDROAMSCANFQLEGACY,
+ CMD_GETROAMTRIGLEGACY, CMD_SETROAMTRIGLEGACY,
+ CMD_REASSOCLEGACY,
+ CMD_GETSCANCHANNELTIMELEGACY, CMD_SETSCANCHANNELTIMELEGACY,
+ CMD_GETSCANUNASSOCTIMELEGACY, CMD_SETSCANUNASSOCTIMELEGACY,
+ CMD_GETSCANPASSIVETIMELEGACY, CMD_SETSCANPASSIVETIMELEGACY,
+ CMD_GETSCANHOMETIMELEGACY, CMD_SETSCANHOMETIMELEGACY,
+ CMD_GETSCANHOMEAWAYTIMELEGACY, CMD_SETSCANHOMEAWAYTIMELEGACY,
+ "\0"
+};
+
+static char* ncho_cmdlist[] =
+{
+ CMD_ROAMTRIGGER_GET, CMD_ROAMTRIGGER_SET,
+ CMD_ROAMDELTA_GET, CMD_ROAMDELTA_SET,
+ CMD_ROAMSCANPERIOD_GET, CMD_ROAMSCANPERIOD_SET,
+ CMD_FULLROAMSCANPERIOD_GET, CMD_FULLROAMSCANPERIOD_SET,
+ CMD_COUNTRYREV_GET, CMD_COUNTRYREV_SET,
+ CMD_GETROAMSCANCONTROL, CMD_SETROAMSCANCONTROL,
+ CMD_GETROAMSCANCHANNELS, CMD_SETROAMSCANCHANNELS, CMD_ADDROAMSCANCHANNELS,
+ CMD_GETROAMSCANFREQS, CMD_SETROAMSCANFREQS, CMD_ADDROAMSCANFREQS,
+ CMD_SENDACTIONFRAME,
+ CMD_REASSOC,
+ CMD_GETSCANCHANNELTIME, CMD_SETSCANCHANNELTIME,
+ CMD_GETSCANUNASSOCTIME, CMD_SETSCANUNASSOCTIME,
+ CMD_GETSCANPASSIVETIME, CMD_SETSCANPASSIVETIME,
+ CMD_GETSCANHOMETIME, CMD_SETSCANHOMETIME,
+ CMD_GETSCANHOMEAWAYTIME, CMD_SETSCANHOMEAWAYTIME,
+ CMD_GETSCANNPROBES, CMD_SETSCANNPROBES,
+ CMD_GETDFSSCANMODE, CMD_SETDFSSCANMODE,
+ CMD_SETJOINPREFER,
+ CMD_GETWESMODE, CMD_SETWESMODE,
+ "\0"
+};
+#endif /* WES_SUPPORT */
+#ifdef ROAM_CHANNEL_CACHE
+extern void wl_update_roamscan_cache_by_band(struct net_device *dev, int band);
+#endif /* ROAM_CHANNEL_CACHE */
+
+int wl_android_priority_roam_enable(struct net_device *dev, int mode);
+#ifdef CONFIG_SILENT_ROAM
+int wl_android_sroam_turn_on(struct net_device *dev, int mode);
+#endif /* CONFIG_SILENT_ROAM */
+int wl_android_rcroam_turn_on(struct net_device *dev, int mode);
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+extern bool ap_fw_loaded;
+extern char iface_name[IFNAMSIZ];
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+/* private command support for restoring roam/scan parameters */
+#if defined(SUPPORT_RESTORE_SCAN_PARAMS) || defined(WES_SUPPORT)
+#define CMD_RESTORE_SCAN_PARAMS "RESTORE_SCAN_PARAMS"
+
+typedef int (*PRIV_CMD_HANDLER) (struct net_device *dev, char *command);
+typedef int (*PRIV_CMD_HANDLER_WITH_LEN) (struct net_device *dev, char *command, int total_len);
+
+enum {
+ RESTORE_TYPE_UNSPECIFIED = 0,
+ RESTORE_TYPE_PRIV_CMD = 1,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN = 2
+};
+
+typedef struct android_restore_scan_params {
+ char command[64];
+ int parameter;
+ int cmd_type;
+ union {
+ PRIV_CMD_HANDLER cmd_handler;
+ PRIV_CMD_HANDLER_WITH_LEN cmd_handler_w_len;
+ };
+} android_restore_scan_params_t;
+
+/* function prototypes of private command handler */
+static int wl_android_default_set_scan_params(struct net_device *dev, char *command, int total_len);
+static int wl_android_set_roam_trigger(struct net_device *dev, char* command);
+int wl_android_set_roam_delta(struct net_device *dev, char* command);
+int wl_android_set_roam_scan_period(struct net_device *dev, char* command);
+int wl_android_set_full_roam_scan_period(struct net_device *dev, char* command);
+int wl_android_set_roam_scan_control(struct net_device *dev, char *command);
+int wl_android_set_scan_channel_time(struct net_device *dev, char *command);
+int wl_android_set_scan_home_time(struct net_device *dev, char *command);
+int wl_android_set_scan_home_away_time(struct net_device *dev, char *command);
+int wl_android_set_scan_nprobes(struct net_device *dev, char *command);
+static int wl_android_set_band(struct net_device *dev, char *command);
+int wl_android_set_scan_dfs_channel_mode(struct net_device *dev, char *command);
+int wl_android_set_wes_mode(struct net_device *dev, char *command);
+int wl_android_set_okc_mode(struct net_device *dev, char *command);
+
+/* default values */
+#ifdef ROAM_API
+#define DEFAULT_ROAM_TIRGGER -75
+#define DEFAULT_ROAM_DELTA 10
+#define DEFAULT_ROAMSCANPERIOD 10
+#define DEFAULT_FULLROAMSCANPERIOD_SET 120
+#endif /* ROAM_API */
+#ifdef WES_SUPPORT
+#define DEFAULT_ROAMSCANCONTROL 0
+#define DEFAULT_SCANCHANNELTIME 40
+#ifdef BCM4361_CHIP
+#define DEFAULT_SCANHOMETIME 60
+#else
+#define DEFAULT_SCANHOMETIME 45
+#endif /* BCM4361_CHIP */
+#define DEFAULT_SCANHOMEAWAYTIME 100
+#define DEFAULT_SCANPROBES 2
+#define DEFAULT_DFSSCANMODE 1
+#define DEFAULT_WESMODE 0
+#define DEFAULT_OKCMODE 1
+#endif /* WES_SUPPORT */
+#define DEFAULT_BAND 0
+#ifdef WBTEXT
+#define DEFAULT_WBTEXT_ENABLE 1
+#endif /* WBTEXT */
+
+/* restoring parameter list, please don't change order */
+static android_restore_scan_params_t restore_params[] =
+{
+/* wbtext need to be disabled while updating roam/scan parameters */
+#ifdef WBTEXT
+ { CMD_WBTEXT_ENABLE, 0, RESTORE_TYPE_PRIV_CMD_WITH_LEN,
+ .cmd_handler_w_len = wl_android_wbtext},
+#endif /* WBTEXT */
+#ifdef ROAM_API
+ { CMD_ROAMTRIGGER_SET, DEFAULT_ROAM_TIRGGER,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_trigger},
+ { CMD_ROAMDELTA_SET, DEFAULT_ROAM_DELTA,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_delta},
+ { CMD_ROAMSCANPERIOD_SET, DEFAULT_ROAMSCANPERIOD,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_period},
+ { CMD_FULLROAMSCANPERIOD_SET, DEFAULT_FULLROAMSCANPERIOD_SET,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_full_roam_scan_period},
+#endif /* ROAM_API */
+#ifdef WES_SUPPORT
+ { CMD_SETROAMSCANCONTROL, DEFAULT_ROAMSCANCONTROL,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_control},
+ { CMD_SETSCANCHANNELTIME, DEFAULT_SCANCHANNELTIME,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_channel_time},
+ { CMD_SETSCANHOMETIME, DEFAULT_SCANHOMETIME,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_home_time},
+ { CMD_GETSCANHOMEAWAYTIME, DEFAULT_SCANHOMEAWAYTIME,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_home_away_time},
+ { CMD_SETSCANNPROBES, DEFAULT_SCANPROBES,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_nprobes},
+ { CMD_SETDFSSCANMODE, DEFAULT_DFSSCANMODE,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_scan_dfs_channel_mode},
+ { CMD_SETWESMODE, DEFAULT_WESMODE,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_wes_mode},
+#endif /* WES_SUPPORT */
+ { CMD_SETBAND, DEFAULT_BAND,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_band},
+#ifdef WBTEXT
+ { CMD_WBTEXT_ENABLE, DEFAULT_WBTEXT_ENABLE,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN, .cmd_handler_w_len = wl_android_wbtext},
+#endif /* WBTEXT */
+ { "\0", 0, RESTORE_TYPE_UNSPECIFIED, .cmd_handler = NULL}
+};
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS || WES_SUPPORT */
+
+#ifdef SUPPORT_LATENCY_CRITICAL_DATA
+#define CMD_GET_LATENCY_CRITICAL_DATA "GET_LATENCY_CRT_DATA"
+#define CMD_SET_LATENCY_CRITICAL_DATA "SET_LATENCY_CRT_DATA"
+#endif /* SUPPORT_LATENCY_CRITICAL_DATA */
+
+typedef struct android_priv_cmd_log_cfg_table {
+ char command[64];
+ int enable;
+} android_priv_cmd_log_cfg_table_t;
+
+static android_priv_cmd_log_cfg_table_t loging_params[] = {
+ {CMD_GET_SNR, FALSE},
+#ifdef SUPPORT_LQCM
+ {CMD_GET_LQCM_REPORT, FALSE},
+#endif
+#ifdef WL_GET_CU
+ {CMD_GET_CHAN_UTIL, FALSE},
+#endif
+ {"\0", FALSE}
+};
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+
+static char* wl_android_get_band_str(u16 band)
+{
+ switch (band) {
+#ifdef WL_6G_BAND
+ case WLC_BAND_6G:
+ return "6G";
+#endif /* WL_6G_BAND */
+ case WLC_BAND_5G:
+ return "5G";
+ case WLC_BAND_2G:
+ return "2G";
+ default:
+ ANDROID_ERROR(("Unkown band: %d \n", band));
+ return "Unknown band";
+ }
+}
+
+#ifdef WBTEXT
+static int wl_android_bandstr_to_fwband(char *band, u8 *fw_band)
+{
+ int err = BCME_OK;
+
+ if (!strcasecmp(band, "a")) {
+ *fw_band = WLC_BAND_5G;
+ } else if (!strcasecmp(band, "b")) {
+ *fw_band = WLC_BAND_2G;
+#ifdef WL_6G_BAND
+ } else if (!strcasecmp(band, "6g")) {
+ *fw_band = WLC_BAND_6G;
+#endif /* WL_6G_BAND */
+ } else if (!strcasecmp(band, "all")) {
+ *fw_band = WLC_BAND_ALL;
+ } else {
+ err = BCME_BADBAND;
+ }
+
+ return err;
+}
+#endif /* WBTEXT */
+
+#ifdef WLWFDS
+static int wl_android_set_wfds_hash(
+ struct net_device *dev, char *command, bool enable)
+{
+ int error = 0;
+ wl_p2p_wfds_hash_t *wfds_hash = NULL;
+ char *smbuf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (smbuf == NULL) {
+ ANDROID_ERROR(("wl_android_set_wfds_hash: failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
+ return -ENOMEM;
+ }
+
+ if (enable) {
+ wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1);
+ error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash,
+ sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ }
+ else {
+ wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1);
+ error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash,
+ sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ }
+
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_wfds_hash: failed to %s, error=%d\n", command, error));
+ }
+
+ if (smbuf) {
+ MFREE(cfg->osh, smbuf, WLC_IOCTL_MAXLEN);
+ }
+ return error;
+}
+#endif /* WLWFDS */
+
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+ int link_speed;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_link_speed(net, &link_speed);
+ if (error) {
+ ANDROID_ERROR(("Get linkspeed failed \n"));
+ return -1;
+ }
+
+ /* Convert Kbps to Android Mbps */
+ link_speed = link_speed / 1000;
+ bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+ ANDROID_INFO(("wl_android_get_link_speed: command result is %s\n", command));
+ return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+ wlc_ssid_t ssid = {0, {0}};
+ int bytes_written = 0;
+ int error = 0;
+ scb_val_t scbval;
+ char *delim = NULL;
+ struct net_device *target_ndev = net;
+#ifdef WL_VIRTUAL_APSTA
+ char *pos = NULL;
+ struct bcm_cfg80211 *cfg;
+#endif /* WL_VIRTUAL_APSTA */
+
+ delim = strchr(command, ' ');
+ /* For Ap mode rssi command would be
+ * driver rssi <sta_mac_addr>
+ * for STA/GC mode
+ * driver rssi
+ */
+ if (delim) {
+ /* Ap/GO mode
+ * driver rssi <sta_mac_addr>
+ */
+ ANDROID_TRACE(("wl_android_get_rssi: cmd:%s\n", delim));
+ /* skip space from delim after finding char */
+ delim++;
+ if (!(bcm_ether_atoe((delim), &scbval.ea))) {
+ ANDROID_ERROR(("wl_android_get_rssi: address err\n"));
+ return -1;
+ }
+ scbval.val = htod32(0);
+ ANDROID_TRACE(("wl_android_get_rssi: address:"MACDBG, MAC2STRDBG(scbval.ea.octet)));
+#ifdef WL_VIRTUAL_APSTA
+ /* RSDB AP may have another virtual interface
+ * In this case, format of private command is as following,
+ * DRIVER rssi <sta_mac_addr> <AP interface name>
+ */
+
+ /* Current position is start of MAC address string */
+ pos = delim;
+ delim = strchr(pos, ' ');
+ if (delim) {
+ /* skip space from delim after finding char */
+ delim++;
+ if (strnlen(delim, IFNAMSIZ)) {
+ cfg = wl_get_cfg(net);
+ target_ndev = wl_get_ap_netdev(cfg, delim);
+ if (target_ndev == NULL)
+ target_ndev = net;
+ }
+ }
+#endif /* WL_VIRTUAL_APSTA */
+ }
+ else {
+ /* STA/GC mode */
+ bzero(&scbval, sizeof(scb_val_t));
+ }
+
+ error = wldev_get_rssi(target_ndev, &scbval);
+ if (error)
+ return -1;
+#if defined(RSSIOFFSET)
+ scbval.val = wl_update_rssi_offset(net, scbval.val);
+#endif
+
+ error = wldev_get_ssid(target_ndev, &ssid);
+ if (error)
+ return -1;
+ if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+ ANDROID_ERROR(("wl_android_get_rssi: wldev_get_ssid failed\n"));
+ } else if (total_len <= ssid.SSID_len) {
+ return -ENOMEM;
+ } else {
+ memcpy(command, ssid.SSID, ssid.SSID_len);
+ bytes_written = ssid.SSID_len;
+ }
+ if ((total_len - bytes_written) < (strlen(" rssi -XXX") + 1))
+ return -ENOMEM;
+
+ bytes_written += scnprintf(&command[bytes_written], total_len - bytes_written,
+ " rssi %d", scbval.val);
+ command[bytes_written] = '\0';
+
+ ANDROID_TRACE(("wl_android_get_rssi: command result is %s (%d)\n", command, bytes_written));
+ return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command)
+{
+ int suspend_flag;
+ int ret_now;
+ int ret = 0;
+
+ suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+ if (suspend_flag != 0) {
+ suspend_flag = 1;
+ }
+ ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+ if (ret_now != suspend_flag) {
+ if (!(ret = net_os_set_suspend(dev, ret_now, 1))) {
+ ANDROID_INFO(("wl_android_set_suspendopt: Suspend Flag %d -> %d\n",
+ ret_now, suspend_flag));
+ } else {
+ ANDROID_ERROR(("wl_android_set_suspendopt: failed %d\n", ret));
+ }
+ }
+
+ return ret;
+}
+
+static int wl_android_set_suspendmode(struct net_device *dev, char *command)
+{
+ int ret = 0;
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND)
+ int suspend_flag;
+
+ suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0';
+ if (suspend_flag != 0)
+ suspend_flag = 1;
+
+ if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
+ ANDROID_INFO(("wl_android_set_suspendmode: Suspend Mode %d\n", suspend_flag));
+ else
+ ANDROID_ERROR(("wl_android_set_suspendmode: failed %d\n", ret));
+#endif
+
+ return ret;
+}
+
+#ifdef WL_CFG80211
+int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len)
+{
+ uint8 mode[5];
+ int error = 0;
+ int bytes_written = 0;
+
+ error = wldev_get_mode(dev, mode, sizeof(mode));
+ if (error)
+ return -1;
+
+ ANDROID_INFO(("wl_android_get_80211_mode: mode:%s\n", mode));
+ bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode);
+ ANDROID_INFO(("wl_android_get_80211_mode: command:%s EXIT\n", command));
+ return bytes_written;
+
+}
+
+extern chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec);
+int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int chsp = {0};
+ uint16 band = 0;
+ uint16 bw = 0;
+ uint16 channel = 0;
+ u32 sb = 0;
+ chanspec_t chanspec;
+
+ /* command is
+ * driver chanspec
+ */
+ error = wldev_iovar_getint(dev, "chanspec", &chsp);
+ if (error)
+ return -1;
+
+ chanspec = wl_chspec_driver_to_host(chsp);
+ ANDROID_INFO(("wl_android_get_80211_mode: return value of chanspec:%x\n", chanspec));
+
+ channel = chanspec & WL_CHANSPEC_CHAN_MASK;
+ band = chanspec & WL_CHANSPEC_BAND_MASK;
+ bw = chanspec & WL_CHANSPEC_BW_MASK;
+
+ ANDROID_INFO(("wl_android_get_80211_mode: channel:%d band:%d bandwidth:%d\n",
+ channel, band, bw));
+
+ if (bw == WL_CHANSPEC_BW_160) {
+ bw = WL_CH_BANDWIDTH_160MHZ;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ bw = WL_CH_BANDWIDTH_80MHZ;
+ } else if (bw == WL_CHANSPEC_BW_40) {
+ bw = WL_CH_BANDWIDTH_40MHZ;
+ } else if (bw == WL_CHANSPEC_BW_20) {
+ bw = WL_CH_BANDWIDTH_20MHZ;
+ } else {
+ bw = WL_CH_BANDWIDTH_20MHZ;
+ }
+
+ if (bw == WL_CH_BANDWIDTH_40MHZ) {
+ if (CHSPEC_SB_UPPER(chanspec)) {
+ channel += CH_10MHZ_APART;
+ } else {
+ channel -= CH_10MHZ_APART;
+ }
+ }
+ else if (bw == WL_CH_BANDWIDTH_80MHZ) {
+ sb = chanspec & WL_CHANSPEC_CTL_SB_MASK;
+ if (sb == WL_CHANSPEC_CTL_SB_LL) {
+ channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+ } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+ channel -= CH_10MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+ channel += CH_10MHZ_APART;
+ } else {
+ /* WL_CHANSPEC_CTL_SB_UU */
+ channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+ }
+ } else if (bw == WL_CH_BANDWIDTH_160MHZ) {
+ channel = wf_chspec_primary20_chan(chanspec);
+ }
+ bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC,
+ channel, wl_android_get_band_str(CHSPEC2WLC_BAND(chanspec)), bw);
+
+ ANDROID_INFO(("wl_android_get_chanspec: command:%s EXIT\n", command));
+ return bytes_written;
+
+}
+#endif /* WL_CFG80211 */
+
+/* returns current datarate datarate returned from firmware are in 500kbps */
+int wl_android_get_datarate(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int datarate = 0;
+ int bytes_written = 0;
+
+ error = wldev_get_datarate(dev, &datarate);
+ if (error)
+ return -1;
+
+ ANDROID_INFO(("wl_android_get_datarate: datarate:%d\n", datarate));
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2));
+ return bytes_written;
+}
+int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ uint i;
+ int len = 0;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+ ANDROID_TRACE(("wl_android_get_assoclist: ENTER\n"));
+
+ assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+
+ error = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf));
+ if (error)
+ return -1;
+
+ assoc_maclist->count = dtoh32(assoc_maclist->count);
+ bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:",
+ CMD_ASSOC_CLIENTS, assoc_maclist->count);
+
+ for (i = 0; i < assoc_maclist->count; i++) {
+ len = snprintf(command + bytes_written, total_len - bytes_written, " " MACDBG,
+ MAC2STRDBG(assoc_maclist->ea[i].octet));
+ /* A return value of '(total_len - bytes_written)' or more means that the
+ * output was truncated
+ */
+ if ((len > 0) && (len < (total_len - bytes_written))) {
+ bytes_written += len;
+ } else {
+ ANDROID_ERROR(("wl_android_get_assoclist: Insufficient buffer %d,"
+ " bytes_written %d\n",
+ total_len, bytes_written));
+ bytes_written = -1;
+ break;
+ }
+ }
+ return bytes_written;
+}
+
+#ifdef WL_CFG80211
+extern chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec);
+static int wl_android_set_csa(struct net_device *dev, char *command)
+{
+ int error = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_chan_switch_t csa_arg;
+ u32 chnsp = 0;
+ int err = 0;
+
+ ANDROID_INFO(("wl_android_set_csa: command:%s\n", command));
+
+ command = (command + strlen(CMD_SET_CSA));
+ /* Order is mode, count channel */
+ if (!*++command) {
+ ANDROID_ERROR(("wl_android_set_csa:error missing arguments\n"));
+ return -1;
+ }
+ csa_arg.mode = bcm_atoi(command);
+
+ if (csa_arg.mode != 0 && csa_arg.mode != 1) {
+ ANDROID_ERROR(("Invalid mode\n"));
+ return -1;
+ }
+
+ if (!*++command) {
+ ANDROID_ERROR(("wl_android_set_csa: error missing count\n"));
+ return -1;
+ }
+ command++;
+ csa_arg.count = bcm_atoi(command);
+
+ csa_arg.reg = 0;
+ csa_arg.chspec = 0;
+ command += 2;
+ if (!*command) {
+ ANDROID_ERROR(("wl_android_set_csa: error missing channel\n"));
+ return -1;
+ }
+
+ chnsp = wf_chspec_aton(command);
+ if (chnsp == 0) {
+ ANDROID_ERROR(("wl_android_set_csa:chsp is not correct\n"));
+ return -1;
+ }
+ chnsp = wl_chspec_host_to_driver(chnsp);
+ csa_arg.chspec = chnsp;
+
+ if (chnsp & WL_CHANSPEC_BAND_5G) {
+ u32 chanspec = chnsp;
+ err = wldev_iovar_getint(dev, "per_chan_info", &chanspec);
+ if (!err) {
+ if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) {
+ ANDROID_ERROR(("Channel is radar sensitive\n"));
+ return -1;
+ }
+ if (chanspec == 0) {
+ ANDROID_ERROR(("Invalid hw channel\n"));
+ return -1;
+ }
+ } else {
+ ANDROID_ERROR(("does not support per_chan_info\n"));
+ return -1;
+ }
+ ANDROID_INFO(("non radar sensitivity\n"));
+ }
+ error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
+ smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_csa:set csa failed:%d\n", error));
+ return -1;
+ }
+ return 0;
+}
+#endif /* WL_CFG80211 */
+
+static int
+wl_android_set_bcn_li_dtim(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim;
+
+ dtim = *(command + strlen(CMD_SETDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (dtim > (MAX_DTIM_ALLOWED_INTERVAL / MAX_DTIM_SKIP_BEACON_INTERVAL)) {
+ ANDROID_ERROR(("%s: failed, invalid dtim %d\n",
+ __FUNCTION__, dtim));
+ return BCME_ERROR;
+ }
+
+ if (!(ret = net_os_set_suspend_bcn_li_dtim(dev, dtim))) {
+ ANDROID_TRACE(("%s: SET bcn_li_dtim in suspend %d\n",
+ __FUNCTION__, dtim));
+ } else {
+ ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+
+static int
+wl_android_set_max_dtim(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim_flag;
+
+ dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) {
+ ANDROID_TRACE(("wl_android_set_max_dtim: use Max bcn_li_dtim in suspend %s\n",
+ (dtim_flag ? "Enable" : "Disable")));
+ } else {
+ ANDROID_ERROR(("wl_android_set_max_dtim: failed %d\n", ret));
+ }
+
+ return ret;
+}
+
+#ifdef DISABLE_DTIM_IN_SUSPEND
+static int
+wl_android_set_disable_dtim_in_suspend(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim_flag;
+
+ dtim_flag = *(command + strlen(CMD_DISDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (!(ret = net_os_set_disable_dtim_in_suspend(dev, dtim_flag))) {
+ ANDROID_TRACE(("wl_android_set_disable_dtim_in_suspend: "
+ "use Disable bcn_li_dtim in suspend %s\n",
+ (dtim_flag ? "Enable" : "Disable")));
+ } else {
+ ANDROID_ERROR(("wl_android_set_disable_dtim_in_suspend: failed %d\n", ret));
+ }
+
+ return ret;
+}
+#endif /* DISABLE_DTIM_IN_SUSPEND */
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+ uint band;
+ int bytes_written;
+ int error = BCME_OK;
+
+ error = wldev_iovar_getint(dev, "if_band", &band);
+ if (error == BCME_UNSUPPORTED) {
+ error = wldev_get_band(dev, &band);
+ if (error) {
+ return error;
+ }
+ }
+ bytes_written = snprintf(command, total_len, "Band %d", band);
+ return bytes_written;
+}
+
+#ifdef WL_CFG80211
+static int
+wl_android_set_band(struct net_device *dev, char *command)
+{
+ int error = 0;
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+#ifdef WL_HOST_BAND_MGMT
+ int ret = 0;
+ if ((ret = wl_cfg80211_set_band(dev, band)) < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* If roam_var is unsupported, fallback to the original method */
+ ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
+ "but roam_band iovar unsupported in the firmware\n"));
+ } else {
+ error = -1;
+ }
+ }
+ if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) {
+ /* Apply if roam_band iovar is not supported or band setting is AUTO */
+ error = wldev_set_band(dev, band);
+ }
+#else
+ error = wl_cfg80211_set_if_band(dev, band);
+#endif /* WL_HOST_BAND_MGMT */
+#ifdef ROAM_CHANNEL_CACHE
+ wl_update_roamscan_cache_by_band(dev, band);
+#endif /* ROAM_CHANNEL_CACHE */
+ return error;
+}
+#endif /* WL_CFG80211 */
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef ROAM_API
+#ifdef WBTEXT
+static bool wl_android_check_wbtext_support(struct net_device *dev)
+{
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ return dhdp->wbtext_support;
+}
+#endif /* WBTEXT */
+
+static bool
+wl_android_check_wbtext_policy(struct net_device *dev)
+{
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ if (dhdp->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static int
+wl_android_set_roam_trigger(struct net_device *dev, char* command)
+{
+ int roam_trigger[2] = {0, 0};
+ int error;
+
+#ifdef WBTEXT
+ if (wl_android_check_wbtext_policy(dev)) {
+ ANDROID_ERROR(("blocked to set roam trigger. try with setting roam profile\n"));
+ return BCME_ERROR;
+ }
+#endif /* WBTEXT */
+
+ sscanf(command, "%*s %10d", &roam_trigger[0]);
+ if (roam_trigger[0] >= 0) {
+ ANDROID_ERROR(("wrong roam trigger value (%d)\n", roam_trigger[0]));
+ return BCME_ERROR;
+ }
+
+ roam_trigger[1] = WLC_BAND_ALL;
+ error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger));
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("failed to set roam trigger (%d)\n", error));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+static int
+wl_android_get_roam_trigger(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written, error;
+ int roam_trigger[2] = {0, 0};
+ uint16 band = 0;
+ int chsp = {0};
+ chanspec_t chanspec;
+#ifdef WBTEXT
+ int i;
+ wl_roamprof_band_t rp;
+ uint8 roam_prof_ver = 0, roam_prof_size = 0;
+#endif /* WBTEXT */
+
+ error = wldev_iovar_getint(dev, "chanspec", &chsp);
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("failed to get chanspec (%d)\n", error));
+ return BCME_ERROR;
+ }
+
+ chanspec = wl_chspec_driver_to_host(chsp);
+ band = CHSPEC2WLC_BAND(chanspec);
+
+ if (wl_android_check_wbtext_policy(dev)) {
+#ifdef WBTEXT
+ memset_s(&rp, sizeof(rp), 0, sizeof(rp));
+ if ((error = wlc_wbtext_get_roam_prof(dev, &rp, band, &roam_prof_ver,
+ &roam_prof_size))) {
+ ANDROID_ERROR(("Getting roam_profile failed with err=%d \n", error));
+ return -EINVAL;
+ }
+ switch (roam_prof_ver) {
+ case WL_ROAM_PROF_VER_1:
+ {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ if (rp.v2.roam_prof[i].channel_usage == 0) {
+ roam_trigger[0] = rp.v2.roam_prof[i].roam_trigger;
+ break;
+ }
+ }
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ if (rp.v3.roam_prof[i].channel_usage == 0) {
+ roam_trigger[0] = rp.v3.roam_prof[i].roam_trigger;
+ break;
+ }
+ }
+ }
+ break;
+ case WL_ROAM_PROF_VER_3:
+ {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ if (rp.v4.roam_prof[i].channel_usage == 0) {
+ roam_trigger[0] = rp.v4.roam_prof[i].roam_trigger;
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ ANDROID_ERROR(("bad version = %d \n", roam_prof_ver));
+ return BCME_VERSION;
+ }
+#endif /* WBTEXT */
+ if (roam_trigger[0] == 0) {
+ ANDROID_ERROR(("roam trigger was not set properly\n"));
+ return BCME_ERROR;
+ }
+ } else {
+ roam_trigger[1] = band;
+ error = wldev_ioctl_get(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger));
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("failed to get roam trigger (%d)\n", error));
+ return BCME_ERROR;
+ }
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_ROAMTRIGGER_GET, roam_trigger[0]);
+
+ return bytes_written;
+}
+
+#ifdef WBTEXT
+s32
+wl_cfg80211_wbtext_roam_trigger_config(struct net_device *ndev, int roam_trigger)
+{
+ char *commandp = NULL;
+ s32 ret = BCME_OK;
+ char *data;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ uint8 bandidx = 0;
+
+ commandp = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (unlikely(!commandp)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ANDROID_INFO(("roam trigger %d\n", roam_trigger));
+ if (roam_trigger > 0) {
+ ANDROID_ERROR(("Invalid roam trigger value %d\n", roam_trigger));
+ goto exit;
+ }
+
+ for (bandidx = 0; bandidx < MAXBANDS; bandidx++) {
+ char *band;
+ int tri0, tri1, low0, low1, cu0, cu1, dur0, dur1;
+ int tri0_dflt;
+ if (bandidx == BAND_5G_INDEX) {
+ band = "a";
+ tri0_dflt = DEFAULT_WBTEXT_CU_RSSI_TRIG_A;
+ } else {
+ band = "b";
+ tri0_dflt = DEFAULT_WBTEXT_CU_RSSI_TRIG_B;
+ }
+
+ /* Get ROAM Profile
+ * WBTEXT_PROFILE_CONFIG band
+ */
+ bzero(commandp, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG, band);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+
+ /* Set ROAM Profile
+ * WBTEXT_PROFILE_CONFIG band -70 roam_trigger 70 10 roam_trigger -128 0 10
+ */
+ sscanf(commandp, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)"
+ "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)",
+ &tri0, &low0, &cu0, &dur0, &tri1, &low1, &cu1, &dur1);
+
+ if (tri0_dflt <= roam_trigger) {
+ tri0 = roam_trigger + 1;
+ } else {
+ tri0 = tri0_dflt;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s %d %d %d %d %d %d %d %d",
+ CMD_WBTEXT_PROFILE_CONFIG, band,
+ tri0, roam_trigger, cu0, dur0, roam_trigger, low1, cu1, dur1);
+
+ ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Failed to set roam_prof %s error = %d\n", data, ret));
+ goto exit;
+ }
+ }
+
+exit:
+ if (commandp) {
+ MFREE(cfg->osh, commandp, WLC_IOCTL_SMLEN);
+ }
+ return ret;
+}
+#endif /* WBTEXT */
+
+static int
+wl_android_set_roam_trigger_legacy(struct net_device *dev, char* command)
+{
+ int roam_trigger[2] = {0, 0};
+ int error;
+
+ sscanf(command, "%*s %10d", &roam_trigger[0]);
+ if (roam_trigger[0] >= 0) {
+ ANDROID_ERROR(("wrong roam trigger value (%d)\n", roam_trigger[0]));
+ return BCME_ERROR;
+ }
+
+ if (wl_android_check_wbtext_policy(dev)) {
+#ifdef WBTEXT
+ error = wl_cfg80211_wbtext_roam_trigger_config(dev, roam_trigger[0]);
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("failed to set roam prof trigger (%d)\n", error));
+ return BCME_ERROR;
+ }
+#endif /* WBTEXT */
+ } else {
+ if (roam_trigger[0] >= 0) {
+ ANDROID_ERROR(("wrong roam trigger value (%d)\n", roam_trigger[0]));
+ return BCME_ERROR;
+ }
+
+ roam_trigger[1] = WLC_BAND_ALL;
+ error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger));
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("failed to set roam trigger (%d)\n", error));
+ return BCME_ERROR;
+ }
+ }
+
+ return BCME_OK;
+}
+
+int wl_android_set_roam_delta(
+ struct net_device *dev, char* command)
+{
+ int roam_delta[2];
+
+ sscanf(command, "%*s %10d", &roam_delta[0]);
+ roam_delta[1] = WLC_BAND_ALL;
+
+ return wldev_ioctl_set(dev, WLC_SET_ROAM_DELTA, roam_delta,
+ sizeof(roam_delta));
+}
+
+static int wl_android_get_roam_delta(
+ struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written;
+ int roam_delta[2] = {0, 0};
+
+ roam_delta[1] = WLC_BAND_2G;
+ if (wldev_ioctl_get(dev, WLC_GET_ROAM_DELTA, roam_delta,
+ sizeof(roam_delta))) {
+ roam_delta[1] = WLC_BAND_5G;
+#ifdef WL_6G_BAND
+ if (wldev_ioctl_get(dev, WLC_GET_ROAM_DELTA, roam_delta,
+ sizeof(roam_delta))) {
+ roam_delta[1] = WLC_BAND_6G;
+#endif /* WL_6G_BAND */
+ if (wldev_ioctl_get(dev, WLC_GET_ROAM_DELTA, roam_delta,
+ sizeof(roam_delta))) {
+ return -1;
+ }
+#ifdef WL_6G_BAND
+ }
+#endif /* WL_6G_BAND */
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_ROAMDELTA_GET, roam_delta[0]);
+
+ return bytes_written;
+}
+
+int wl_android_set_roam_scan_period(
+ struct net_device *dev, char* command)
+{
+ int roam_scan_period = 0;
+
+ sscanf(command, "%*s %10d", &roam_scan_period);
+ return wldev_ioctl_set(dev, WLC_SET_ROAM_SCAN_PERIOD, &roam_scan_period,
+ sizeof(roam_scan_period));
+}
+
+static int wl_android_get_roam_scan_period(
+ struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written;
+ int roam_scan_period = 0;
+
+ if (wldev_ioctl_get(dev, WLC_GET_ROAM_SCAN_PERIOD, &roam_scan_period,
+ sizeof(roam_scan_period)))
+ return -1;
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_ROAMSCANPERIOD_GET, roam_scan_period);
+
+ return bytes_written;
+}
+
+int wl_android_set_full_roam_scan_period(
+ struct net_device *dev, char* command)
+{
+ int error = 0;
+ int full_roam_scan_period = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ sscanf(command+sizeof("SETFULLROAMSCANPERIOD"), "%d", &full_roam_scan_period);
+ WL_TRACE(("fullroamperiod = %d\n", full_roam_scan_period));
+
+ error = wldev_iovar_setbuf(dev, "fullroamperiod", &full_roam_scan_period,
+ sizeof(full_roam_scan_period), smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("Failed to set full roam scan period, error = %d\n", error));
+ }
+
+ return error;
+}
+
+static int wl_android_get_full_roam_scan_period(
+ struct net_device *dev, char *command, int total_len)
+{
+ int error;
+ int bytes_written;
+ int full_roam_scan_period = 0;
+
+ error = wldev_iovar_getint(dev, "fullroamperiod", &full_roam_scan_period);
+
+ if (error) {
+ ANDROID_ERROR(("%s: get full roam scan period failed code %d\n",
+ __func__, error));
+ return -1;
+ } else {
+ ANDROID_INFO(("%s: get full roam scan period %d\n", __func__, full_roam_scan_period));
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_FULLROAMSCANPERIOD_GET, full_roam_scan_period);
+
+ return bytes_written;
+}
+
+int wl_android_set_country_rev(
+ struct net_device *dev, char* command)
+{
+ int error = 0;
+ wl_country_t cspec = {{0}, 0, {0} };
+ char country_code[WLC_CNTRY_BUF_SZ];
+ char smbuf[WLC_IOCTL_SMLEN];
+ int rev = 0;
+
+ bzero(country_code, sizeof(country_code));
+ sscanf(command+sizeof("SETCOUNTRYREV"), "%3s %10d", country_code, &rev);
+ WL_TRACE(("country_code = %s, rev = %d\n", country_code, rev));
+
+ memcpy(cspec.country_abbrev, country_code, sizeof(country_code));
+ memcpy(cspec.ccode, country_code, sizeof(country_code));
+ cspec.rev = rev;
+
+ error = wldev_iovar_setbuf(dev, "country", (char *)&cspec,
+ sizeof(cspec), smbuf, sizeof(smbuf), NULL);
+
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_country_rev: set country '%s/%d' failed code %d\n",
+ cspec.ccode, cspec.rev, error));
+ } else {
+ dhd_bus_country_set(dev, &cspec, true);
+ ANDROID_INFO(("wl_android_set_country_rev: set country '%s/%d'\n",
+ cspec.ccode, cspec.rev));
+ }
+
+ return error;
+}
+
+static int wl_android_get_country_rev(
+ struct net_device *dev, char *command, int total_len)
+{
+ int error;
+ int bytes_written;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_country_t cspec;
+
+ error = wldev_iovar_getbuf(dev, "country", NULL, 0, smbuf,
+ sizeof(smbuf), NULL);
+
+ if (error) {
+ ANDROID_ERROR(("wl_android_get_country_rev: get country failed code %d\n",
+ error));
+ return -1;
+ } else {
+ memcpy(&cspec, smbuf, sizeof(cspec));
+ ANDROID_INFO(("wl_android_get_country_rev: get country '%c%c %d'\n",
+ cspec.ccode[0], cspec.ccode[1], cspec.rev));
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %c%c %d",
+ CMD_COUNTRYREV_GET, cspec.ccode[0], cspec.ccode[1], cspec.rev);
+
+ return bytes_written;
+}
+#endif /* ROAM_API */
+
+#ifdef WES_SUPPORT
+int wl_android_get_roam_scan_control(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int mode = 0;
+
+ error = get_roamscan_mode(dev, &mode);
+ if (error) {
+ ANDROID_ERROR(("wl_android_get_roam_scan_control: Failed to get Scan Control,"
+ " error = %d\n", error));
+ return -1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETROAMSCANCONTROL, mode);
+
+ return bytes_written;
+}
+
+int wl_android_set_roam_scan_control(struct net_device *dev, char *command)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("wl_android_set_roam_scan_control: Failed to get Parameter\n"));
+ return -1;
+ }
+
+ error = set_roamscan_mode(dev, mode);
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_roam_scan_control: Failed to set Scan Control %d,"
+ " error = %d\n",
+ mode, error));
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+wl_android_get_roam_scan_channels(struct net_device *dev, char *command, int total_len, char *cmd)
+{
+ int bytes_written = 0;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+ int nchan = 0, i = 0;
+ int buf_avail, len;
+
+ nchan = get_roamscan_chanspec_list(dev, chanspecs);
+ if (nchan < 0) {
+ ANDROID_ERROR(("Failed to Set roamscan channels, n_chan = %d\n", nchan));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", cmd, nchan);
+
+ buf_avail = total_len - bytes_written;
+ for (i = 0; i < nchan; i++) {
+ /* A return value of 'buf_avail' or more means that the output was truncated */
+ len = snprintf(command + bytes_written, buf_avail, " %d",
+ CHSPEC_CHANNEL(chanspecs[i]));
+ if (len >= buf_avail) {
+ ANDROID_ERROR(("Insufficient memory, %d bytes\n", total_len));
+ ANDROID_ERROR(("Insufficient memory, %d bytes\n", total_len));
+ bytes_written = -1;
+ break;
+ }
+ /* 'buf_avail' decremented by number of bytes written */
+ buf_avail -= len;
+ bytes_written += len;
+ }
+ ANDROID_INFO(("%s\n", command));
+ return bytes_written;
+}
+
+#define CHANNEL_IDX 1
+int
+wl_android_set_roam_scan_channels(struct net_device *dev, char *command)
+{
+ int error = BCME_OK, i;
+ unsigned char *p = (unsigned char *)(command + strlen(CMD_SETROAMSCANCHANNELS) + 1);
+ uint16 nchan = 0, channel = 0;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+
+ nchan = p[0];
+ if (nchan > MAX_ROAM_CHANNEL) {
+ ANDROID_ERROR(("Failed to Set roamscan channnels, n_chan = %d\n", nchan));
+ return BCME_BADARG;
+ }
+
+ for (i = 0; i < nchan; i++) {
+ channel = p[i + CHANNEL_IDX];
+ /* Convert chanspec from channel */
+ chanspecs[i] = wf_channel2chspec(channel, WL_CHANSPEC_BW_20);
+ }
+
+ error = set_roamscan_chanspec_list(dev, nchan, chanspecs);
+ if (error) {
+ ANDROID_ERROR(("Failed to Set Scan Channels %d, error = %d\n", p[0], error));
+ return error;
+ }
+
+ return error;
+}
+
+int
+wl_android_add_roam_scan_channels(struct net_device *dev, char *command, uint cmdlen)
+{
+ int i, error = BCME_OK;
+ char *pcmd, *token;
+ uint16 nchan = 0, channel = 0;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+
+ pcmd = (command + cmdlen + 1);
+ /* Parse roam channel count */
+ token = bcmstrtok(&pcmd, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Bad argument!\n"));
+ return BCME_BADARG;
+ }
+ nchan = bcm_atoi(token);
+ if (nchan > MAX_ROAM_CHANNEL) {
+ ANDROID_ERROR(("Failed to Add roamscan channnels, n_chan = %d\n", nchan));
+ return BCME_BADARG;
+ }
+
+ for (i = 0; i < nchan; i++) {
+ /* Parse roam channel list */
+ token = bcmstrtok(&pcmd, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Bad argument!\n"));
+ return BCME_BADARG;
+ }
+ channel = bcm_atoi(token);
+ /* Convert chanspec from channel */
+ if (channel > 0) {
+ chanspecs[i] = wf_channel2chspec(channel, WL_CHANSPEC_BW_20);
+ }
+ }
+
+ error = add_roamscan_chanspec_list(dev, nchan, chanspecs);
+ if (error) {
+ ANDROID_ERROR(("Failed to add Scan Channels %s, error = %d\n", pcmd, error));
+ }
+
+ return error;
+}
+
+int
+wl_android_get_roam_scan_freqs(struct net_device *dev, char *command, int total_len, char *cmd)
+{
+ int bytes_written = 0;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+ int nchan = 0, i = 0;
+ int buf_avail, len;
+ u32 freq = 0;
+ uint start_factor = 0;
+
+ nchan = get_roamscan_chanspec_list(dev, chanspecs);
+ if (nchan < 0) {
+ ANDROID_ERROR(("Failed to Get roamscan frequencies, n_chan = %d\n", nchan));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", cmd, nchan);
+
+ buf_avail = total_len - bytes_written;
+ for (i = 0; i < nchan; i++) {
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ if (CHSPEC_BAND(chanspecs[i]) == WL_CHANSPEC_BAND_5G) {
+ start_factor = WF_CHAN_FACTOR_5_G;
+ } else if (CHSPEC_BAND(chanspecs[i]) == WL_CHANSPEC_BAND_6G) {
+ start_factor = WF_CHAN_FACTOR_6_G;
+ }
+ freq = wf_channel2mhz(CHSPEC_CHANNEL(chanspecs[i]), start_factor);
+ /* A return value of 'buf_avail' or more means that the output was truncated */
+ len = snprintf(command + bytes_written, buf_avail, " %d", freq);
+ if (len >= buf_avail) {
+ ANDROID_ERROR(("Insufficient memory, %d bytes\n", total_len));
+ bytes_written = -1;
+ break;
+ }
+ /* 'buf_avail' decremented by number of bytes written */
+ buf_avail -= len;
+ bytes_written += len;
+ }
+ ANDROID_INFO(("%s\n", command));
+ return bytes_written;
+}
+
+int
+wl_android_set_roam_scan_freqs(struct net_device *dev, char *command)
+{
+ int error = BCME_OK, i;
+ char *pcmd, *token;
+ uint16 nchan = 0, freq = 0;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+
+ pcmd = (command + strlen(CMD_SETROAMSCANFREQS) + 1);
+ /* Parse roam channel count */
+ token = bcmstrtok(&pcmd, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Bad argument!\n"));
+ return BCME_BADARG;
+ }
+ nchan = bcm_atoi(token);
+ if (nchan > MAX_ROAM_CHANNEL) {
+ ANDROID_ERROR(("Failed to Set roamscan frequencies, n_chan = %d\n", nchan));
+ return BCME_BADARG;
+ }
+
+ for (i = 0; i < nchan; i++) {
+ /* Parse roam channel list */
+ token = bcmstrtok(&pcmd, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Bad argument!\n"));
+ return BCME_BADARG;
+ }
+ freq = bcm_atoi(token);
+ /* Convert chanspec from frequency */
+ chanspecs[i] = wl_freq_to_chanspec(freq);
+ }
+
+ error = set_roamscan_chanspec_list(dev, nchan, chanspecs);
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Channels %d, error = %d\n", nchan, error));
+ return error;
+ }
+
+ return error;
+}
+
+int
+wl_android_add_roam_scan_freqs(struct net_device *dev, char *command, uint cmdlen)
+{
+ int i, error = BCME_OK;
+ char *pcmd, *token;
+ uint16 nchan = 0, freq = 0;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+
+ pcmd = (command + cmdlen + 1);
+ /* Parse roam channel count */
+ token = bcmstrtok(&pcmd, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Bad argument!\n"));
+ return BCME_BADARG;
+ }
+ nchan = bcm_atoi(token);
+ if (nchan > MAX_ROAM_CHANNEL) {
+ ANDROID_ERROR(("Failed to Add roamscan frequencies, n_chan = %d\n", nchan));
+ return BCME_BADARG;
+ }
+
+ for (i = 0; i < nchan; i++) {
+ /* Parse roam channel list */
+ token = bcmstrtok(&pcmd, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Bad argument!\n"));
+ return BCME_BADARG;
+ }
+ freq = bcm_atoi(token);
+ /* Convert chanspec from channel */
+ if (freq > 0) {
+ chanspecs[i] = wl_freq_to_chanspec(freq);
+ }
+ }
+
+ error = add_roamscan_chanspec_list(dev, nchan, chanspecs);
+ if (error) {
+ ANDROID_ERROR(("Failed to add Scan Channels %s, error = %d\n", pcmd, error));
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_channel_time(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int time = 0;
+
+ error = wldev_ioctl_get(dev, WLC_GET_SCAN_CHANNEL_TIME, &time, sizeof(time));
+ if (error) {
+ ANDROID_ERROR(("Failed to get Scan Channel Time, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETSCANCHANNELTIME, time);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_scan_channel_time(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int time = 0;
+
+ if (sscanf(command, "%*s %d", &time) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+
+ if (time == 0) {
+ /* Set default value when Private param is 0. */
+ time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+ }
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ wl_cfg80211_custom_scan_time(dev, WL_CUSTOM_SCAN_CHANNEL_TIME, time);
+ error = wldev_ioctl_set(dev, WLC_SET_SCAN_CHANNEL_TIME, &time, sizeof(time));
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Channel Time %d, error = %d\n", time, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_unassoc_time(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int time = 0;
+
+ error = wldev_ioctl_get(dev, WLC_GET_SCAN_UNASSOC_TIME, &time, sizeof(time));
+ if (error) {
+ ANDROID_ERROR(("Failed to get Scan Unassoc Time, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETSCANUNASSOCTIME, time);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_scan_unassoc_time(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int time = 0;
+
+ if (sscanf(command, "%*s %d", &time) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+ if (time == 0) {
+ /* Set default value when Private param is 0. */
+ time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+ }
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ wl_cfg80211_custom_scan_time(dev, WL_CUSTOM_SCAN_UNASSOC_TIME, time);
+ error = wldev_ioctl_set(dev, WLC_SET_SCAN_UNASSOC_TIME, &time, sizeof(time));
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Unassoc Time %d, error = %d\n", time, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_passive_time(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int time = 0;
+
+ error = wldev_ioctl_get(dev, WLC_GET_SCAN_PASSIVE_TIME, &time, sizeof(time));
+ if (error) {
+ ANDROID_ERROR(("Failed to get Scan Passive Time, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETSCANPASSIVETIME, time);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_scan_passive_time(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int time = 0;
+
+ if (sscanf(command, "%*s %d", &time) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+ if (time == 0) {
+ /* Set default value when Private param is 0. */
+ time = DHD_SCAN_PASSIVE_TIME;
+ }
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ wl_cfg80211_custom_scan_time(dev, WL_CUSTOM_SCAN_PASSIVE_TIME, time);
+ error = wldev_ioctl_set(dev, WLC_SET_SCAN_PASSIVE_TIME, &time, sizeof(time));
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Passive Time %d, error = %d\n", time, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_home_time(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int time = 0;
+
+ error = wldev_ioctl_get(dev, WLC_GET_SCAN_HOME_TIME, &time, sizeof(time));
+ if (error) {
+ ANDROID_ERROR(("Failed to get Scan Home Time, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETSCANHOMETIME, time);
+
+ return bytes_written;
+}
+
+int wl_android_set_scan_home_time(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int time = 0;
+
+ if (sscanf(command, "%*s %d", &time) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+ if (time == 0) {
+ /* Set default value when Private param is 0. */
+ time = DHD_SCAN_HOME_TIME;
+ }
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ wl_cfg80211_custom_scan_time(dev, WL_CUSTOM_SCAN_HOME_TIME, time);
+ error = wldev_ioctl_set(dev, WLC_SET_SCAN_HOME_TIME, &time, sizeof(time));
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Home Time %d, error = %d\n", time, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_home_away_time(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int time = 0;
+
+ error = wldev_iovar_getint(dev, "scan_home_away_time", &time);
+ if (error) {
+ ANDROID_ERROR(("Failed to get Scan Home Away Time, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETSCANHOMEAWAYTIME, time);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_scan_home_away_time(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int time = 0;
+
+ if (sscanf(command, "%*s %d", &time) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+ if (time == 0) {
+ /* Set default value when Private param is 0. */
+ time = DHD_SCAN_HOME_AWAY_TIME;
+ }
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ wl_cfg80211_custom_scan_time(dev, WL_CUSTOM_SCAN_HOME_AWAY_TIME, time);
+ error = wldev_iovar_setint(dev, "scan_home_away_time", time);
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Home Away Time %d, error = %d\n", time, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_nprobes(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int num = 0;
+
+ error = wldev_ioctl_get(dev, WLC_GET_SCAN_NPROBES, &num, sizeof(num));
+ if (error) {
+ ANDROID_ERROR(("Failed to get Scan NProbes, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETSCANNPROBES, num);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_scan_nprobes(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int num = 0;
+
+ if (sscanf(command, "%*s %d", &num) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+
+ error = wldev_ioctl_set(dev, WLC_SET_SCAN_NPROBES, &num, sizeof(num));
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan NProbes %d, error = %d\n", num, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_scan_dfs_channel_mode(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ int bytes_written = 0;
+ int mode = 0;
+ int scan_passive_time = 0;
+
+ error = wldev_iovar_getint(dev, "scan_passive_time", &scan_passive_time);
+ if (error) {
+ ANDROID_ERROR(("Failed to get Passive Time, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ if (scan_passive_time == 0) {
+ mode = 0;
+ } else {
+ mode = 1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETDFSSCANMODE, mode);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_scan_dfs_channel_mode(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ int mode = 0;
+ int scan_passive_time = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+
+ if (mode == 1) {
+ scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+ } else if (mode == 0) {
+ scan_passive_time = 0;
+ } else {
+ ANDROID_ERROR(("Failed to set Scan DFS channel mode %d\n", mode));
+ return BCME_ERROR;
+ }
+ error = wldev_iovar_setint(dev, "scan_passive_time", scan_passive_time);
+ if (error) {
+ ANDROID_ERROR(("Failed to set Scan Passive Time %d, error = %d\n",
+ scan_passive_time, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+#define JOINPREFFER_BUF_SIZE 12
+
+static int
+wl_android_set_join_prefer(struct net_device *dev, char *command)
+{
+ int error = BCME_OK;
+ char smbuf[WLC_IOCTL_SMLEN];
+ uint8 buf[JOINPREFFER_BUF_SIZE];
+ char *pcmd;
+ int total_len_left;
+ int i;
+ char hex[] = "XX";
+#ifdef WBTEXT
+ int turn_on = OFF;
+ char clear[] = { 0x01, 0x02, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00 };
+#endif /* WBTEXT */
+
+ pcmd = command + strlen(CMD_SETJOINPREFER) + 1;
+ total_len_left = strlen(pcmd);
+
+ bzero(buf, sizeof(buf));
+
+ if (total_len_left != JOINPREFFER_BUF_SIZE << 1) {
+ ANDROID_ERROR(("wl_android_set_join_prefer: Failed to get Parameter\n"));
+ return BCME_ERROR;
+ }
+
+ /* Store the MSB first, as required by join_pref */
+ for (i = 0; i < JOINPREFFER_BUF_SIZE; i++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ buf[i] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+
+#ifdef WBTEXT
+ /* Set WBTEXT mode */
+ turn_on = memcmp(buf, clear, sizeof(buf)) == 0 ? TRUE : FALSE;
+ error = wl_android_wbtext_enable(dev, turn_on);
+ if (error) {
+ ANDROID_ERROR(("Failed to set WBTEXT(%d) = %s\n",
+ error, (turn_on ? "Enable" : "Disable")));
+ }
+#endif /* WBTEXT */
+
+ prhex("join pref", (uint8 *)buf, JOINPREFFER_BUF_SIZE);
+ error = wldev_iovar_setbuf(dev, "join_pref", buf, JOINPREFFER_BUF_SIZE,
+ smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("Failed to set join_pref, error = %d\n", error));
+ }
+
+ return error;
+}
+
+int wl_android_send_action_frame(struct net_device *dev, char *command, int total_len)
+{
+ int error = -1;
+ android_wifi_af_params_t *params = NULL;
+ wl_action_frame_t *action_frame = NULL;
+ wl_af_params_t *af_params = NULL;
+ char *smbuf = NULL;
+ struct ether_addr tmp_bssid;
+ int tmp_channel = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (total_len <
+ (strlen(CMD_SENDACTIONFRAME) + 1 + sizeof(android_wifi_af_params_t))) {
+ ANDROID_ERROR(("wl_android_send_action_frame: Invalid parameters \n"));
+ goto send_action_frame_out;
+ }
+
+ params = (android_wifi_af_params_t *)(command + strlen(CMD_SENDACTIONFRAME) + 1);
+
+ if ((uint16)params->len > ANDROID_WIFI_ACTION_FRAME_SIZE) {
+ ANDROID_ERROR(("wl_android_send_action_frame: Requested action frame len"
+ " was out of range(%d)\n",
+ params->len));
+ goto send_action_frame_out;
+ }
+
+ smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (smbuf == NULL) {
+ ANDROID_ERROR(("wl_android_send_action_frame: failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
+ goto send_action_frame_out;
+ }
+
+ af_params = (wl_af_params_t *)MALLOCZ(cfg->osh, WL_WIFI_AF_PARAMS_SIZE);
+ if (af_params == NULL) {
+ ANDROID_ERROR(("wl_android_send_action_frame: unable to allocate frame\n"));
+ goto send_action_frame_out;
+ }
+
+ bzero(&tmp_bssid, ETHER_ADDR_LEN);
+ if (bcm_ether_atoe((const char *)params->bssid, (struct ether_addr *)&tmp_bssid) == 0) {
+ bzero(&tmp_bssid, ETHER_ADDR_LEN);
+
+ error = wldev_ioctl_get(dev, WLC_GET_BSSID, &tmp_bssid, ETHER_ADDR_LEN);
+ if (error) {
+ bzero(&tmp_bssid, ETHER_ADDR_LEN);
+ ANDROID_ERROR(("wl_android_send_action_frame: failed to get bssid,"
+ " error=%d\n", error));
+ goto send_action_frame_out;
+ }
+ }
+
+ if (params->channel < 0) {
+ struct channel_info ci;
+ bzero(&ci, sizeof(ci));
+ error = wldev_ioctl_get(dev, WLC_GET_CHANNEL, &ci, sizeof(ci));
+ if (error) {
+ ANDROID_ERROR(("wl_android_send_action_frame: failed to get channel,"
+ " error=%d\n", error));
+ goto send_action_frame_out;
+ }
+
+ tmp_channel = ci.hw_channel;
+ }
+ else {
+ tmp_channel = params->channel;
+ }
+
+ af_params->channel = tmp_channel;
+ af_params->dwell_time = params->dwell_time;
+ memcpy(&af_params->BSSID, &tmp_bssid, ETHER_ADDR_LEN);
+ action_frame = &af_params->action_frame;
+
+ action_frame->packetId = 0;
+ memcpy(&action_frame->da, &tmp_bssid, ETHER_ADDR_LEN);
+ action_frame->len = (uint16)params->len;
+ memcpy(action_frame->data, params->data, action_frame->len);
+
+ error = wldev_iovar_setbuf(dev, "actframe", af_params,
+ sizeof(wl_af_params_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ if (error) {
+ ANDROID_ERROR(("wl_android_send_action_frame: failed to set action frame,"
+ " error=%d\n", error));
+ }
+
+send_action_frame_out:
+ if (af_params) {
+ MFREE(cfg->osh, af_params, WL_WIFI_AF_PARAMS_SIZE);
+ }
+
+ if (smbuf) {
+ MFREE(cfg->osh, smbuf, WLC_IOCTL_MAXLEN);
+ }
+
+ if (error)
+ return -1;
+ else
+ return 0;
+}
+
+int
+wl_android_reassoc(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK;
+ android_wifi_reassoc_params_t *params = NULL;
+ chanspec_t channel;
+ u32 params_size;
+ wl_reassoc_params_t reassoc_params;
+ char pcmd[WL_PRIV_CMD_LEN + 1];
+
+ sscanf(command, "%"S(WL_PRIV_CMD_LEN)"s *", pcmd);
+ if (total_len < (strlen(pcmd) + 1 + sizeof(android_wifi_reassoc_params_t))) {
+ ANDROID_ERROR(("Invalid parameters %s\n", command));
+ return BCME_ERROR;
+ }
+ params = (android_wifi_reassoc_params_t *)(command + strlen(pcmd) + 1);
+
+ bzero(&reassoc_params, WL_REASSOC_PARAMS_FIXED_SIZE);
+
+ if (bcm_ether_atoe((const char *)params->bssid,
+ (struct ether_addr *)&reassoc_params.bssid) == 0) {
+ ANDROID_ERROR(("Invalid bssid \n"));
+ return BCME_BADARG;
+ }
+
+ if (params->channel < 0) {
+ ANDROID_ERROR(("Invalid Channel %d\n", params->channel));
+ return BCME_BADARG;
+ }
+
+ reassoc_params.chanspec_num = 1;
+
+ channel = params->channel;
+ if (CHANNEL_IS_2G(channel) || CHANNEL_IS_5G(channel)) {
+ /* If reassoc Param is BSSID and Channel */
+ reassoc_params.chanspec_list[0] = wf_channel2chspec(channel, WL_CHANSPEC_BW_20);
+ } else {
+ /* If reassoc Param is BSSID and Frequency */
+ reassoc_params.chanspec_list[0] = wl_freq_to_chanspec(channel);
+ }
+ params_size = WL_REASSOC_PARAMS_FIXED_SIZE + sizeof(chanspec_t);
+
+ error = wldev_ioctl_set(dev, WLC_REASSOC, &reassoc_params, params_size);
+ if (error) {
+ ANDROID_ERROR(("failed to reassoc, error=%d\n", error));
+ return error;
+ }
+ return error;
+}
+
+int wl_android_get_wes_mode(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ int mode = 0;
+
+ mode = wl_cfg80211_get_wes_mode(dev);
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETWESMODE, mode);
+
+ return bytes_written;
+}
+
+int wl_android_set_wes_mode(struct net_device *dev, char *command)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("wl_android_set_wes_mode: Failed to get Parameter\n"));
+ return -1;
+ }
+
+ error = wl_cfg80211_set_wes_mode(dev, mode);
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_wes_mode: Failed to set WES Mode %d, error = %d\n",
+ mode, error));
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+wl_android_get_ncho_mode(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ int mode = 0;
+
+ mode = wl_cfg80211_get_ncho_mode(dev);
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GETNCHOMODE, mode);
+
+ return bytes_written;
+}
+
+int
+wl_android_set_ncho_mode(struct net_device *dev, int mode)
+{
+ char cmd[WLC_IOCTL_SMLEN];
+ int error = BCME_OK;
+
+#ifdef WBTEXT
+ /* Set WBTEXT mode */
+ error = wl_android_wbtext_enable(dev, !mode);
+ if (error) {
+ ANDROID_ERROR(("Failed to set WBTEXT(%d) = %s\n",
+ error, (mode ? "Disable" : "Enable")));
+ }
+#endif /* WBTEXT */
+ /* Set Piority roam mode */
+ error = wl_android_priority_roam_enable(dev, !mode);
+ if (error) {
+ ANDROID_ERROR(("Failed to set Priority Roam(%d) = %s\n",
+ error, (mode ? "Disable" : "Enable")));
+ }
+#ifdef CONFIG_SILENT_ROAM
+ /* Set Silent roam mode */
+ error = wl_android_sroam_turn_on(dev, !mode);
+ if (error) {
+ ANDROID_ERROR(("Failed to set SROAM(%d) = %s\n",
+ error, (mode ? "Disable" : "Enable")));
+ }
+#endif /* CONFIG_SILENT_ROAM */
+ /* Set RCROAM(ROAMEXT) mode */
+ error = wl_android_rcroam_turn_on(dev, !mode);
+ if (error) {
+ ANDROID_ERROR(("Failed to set RCROAM(%d) = %s\n",
+ error, (mode ? "Disable" : "Enable")));
+ }
+
+ if (mode == OFF) {
+ /* restore NCHO set parameters */
+ bzero(cmd, WLC_IOCTL_SMLEN);
+ snprintf(cmd, WLC_IOCTL_SMLEN, "%s", CMD_RESTORE_SCAN_PARAMS);
+ error = wl_android_default_set_scan_params(dev, cmd, WLC_IOCTL_SMLEN);
+ if (error) {
+ ANDROID_ERROR(("Failed to set RESTORE_SCAN_PARAMS(%d)\n", error));
+ }
+
+ wl_cfg80211_set_wes_mode(dev, OFF);
+ set_roamscan_mode(dev, ROAMSCAN_MODE_NORMAL);
+ }
+
+ error = wl_cfg80211_set_ncho_mode(dev, mode);
+ if (error) {
+ ANDROID_ERROR(("Failed to set NCHO Mode %d, error = %d\n", mode, error));
+ }
+
+ return error;
+}
+
+static int
+wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
+{
+ uchar pmk[33];
+ int error = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+ dhd_pub_t *dhdp;
+#ifdef OKC_DEBUG
+ int i = 0;
+#endif
+
+ if (total_len < (strlen("SET_PMK ") + 32)) {
+ ANDROID_ERROR(("wl_android_set_pmk: Invalid argument\n"));
+ return -1;
+ }
+
+ dhdp = wl_cfg80211_get_dhdp(dev);
+ if (!dhdp) {
+ ANDROID_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return -1;
+ }
+
+ bzero(pmk, sizeof(pmk));
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_OKC_PMK), dhd_net2idx(dhdp->info, dev), 0);
+ memcpy((char *)pmk, command + strlen("SET_PMK "), 32);
+ error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("Failed to set PMK for OKC, error = %d\n", error));
+ }
+#ifdef OKC_DEBUG
+ ANDROID_ERROR(("PMK is "));
+ for (i = 0; i < 32; i++)
+ ANDROID_ERROR(("%02X ", pmk[i]));
+
+ ANDROID_ERROR(("\n"));
+#endif
+ return error;
+}
+
+static int
+wl_android_okc_enable(struct net_device *dev, char *command)
+{
+ int error = 0;
+ char okc_enable = 0;
+
+ okc_enable = command[strlen(CMD_OKC_ENABLE) + 1] - '0';
+ error = wldev_iovar_setint(dev, "okc_enable", okc_enable);
+ if (error) {
+ ANDROID_ERROR(("Failed to %s OKC, error = %d\n",
+ okc_enable ? "enable" : "disable", error));
+ }
+
+ return error;
+}
+
+static int
+wl_android_legacy_check_command(struct net_device *dev, char *command)
+{
+ int cnt = 0;
+
+ while (strlen(legacy_cmdlist[cnt]) > 0) {
+ if (strnicmp(command, legacy_cmdlist[cnt], strlen(legacy_cmdlist[cnt])) == 0) {
+ char cmd[WL_PRIV_CMD_LEN + 1];
+ sscanf(command, "%"S(WL_PRIV_CMD_LEN)"s ", cmd);
+ if (strlen(legacy_cmdlist[cnt]) == strlen(cmd)) {
+ return TRUE;
+ }
+ }
+ cnt++;
+ }
+ return FALSE;
+}
+
+static int
+wl_android_legacy_private_command(struct net_device *net, char *command, int total_len)
+{
+ int bytes_written = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ if (cfg->ncho_mode == ON) {
+ ANDROID_ERROR(("Enabled NCHO mode\n"));
+ /* In order to avoid Sequential error HANG event. */
+ return BCME_UNSUPPORTED;
+ }
+
+ /* ROAMSCAN CHANNELS Add, Get Command */
+ if (strnicmp(command, CMD_ADDROAMSCANCHLEGACY, strlen(CMD_ADDROAMSCANCHLEGACY)) == 0) {
+ bytes_written = wl_android_add_roam_scan_channels(net, command,
+ strlen(CMD_ADDROAMSCANCHLEGACY));
+ }
+ else if (strnicmp(command, CMD_GETROAMSCANCHLEGACY, strlen(CMD_GETROAMSCANCHLEGACY)) == 0) {
+ bytes_written = wl_android_get_roam_scan_channels(net, command, total_len,
+ CMD_GETROAMSCANCHLEGACY);
+ }
+ /* ROAMSCAN FREQUENCIES Add, Get Command */
+ else if (strnicmp(command, CMD_ADDROAMSCANFQLEGACY, strlen(CMD_ADDROAMSCANFQLEGACY)) == 0) {
+ bytes_written = wl_android_add_roam_scan_freqs(net, command,
+ strlen(CMD_ADDROAMSCANFQLEGACY));
+ }
+ else if (strnicmp(command, CMD_GETROAMSCANFQLEGACY, strlen(CMD_GETROAMSCANFQLEGACY)) == 0) {
+ bytes_written = wl_android_get_roam_scan_freqs(net, command, total_len,
+ CMD_GETROAMSCANFQLEGACY);
+ }
+ else if (strnicmp(command, CMD_GETROAMTRIGLEGACY, strlen(CMD_GETROAMTRIGLEGACY)) == 0) {
+ bytes_written = wl_android_get_roam_trigger(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETROAMTRIGLEGACY, strlen(CMD_SETROAMTRIGLEGACY)) == 0) {
+ bytes_written = wl_android_set_roam_trigger_legacy(net, command);
+ }
+ else if (strnicmp(command, CMD_REASSOCLEGACY, strlen(CMD_REASSOCLEGACY)) == 0) {
+ bytes_written = wl_android_reassoc(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_GETSCANCHANNELTIMELEGACY,
+ strlen(CMD_GETSCANCHANNELTIMELEGACY)) == 0) {
+ bytes_written = wl_android_get_scan_channel_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANCHANNELTIMELEGACY,
+ strlen(CMD_SETSCANCHANNELTIMELEGACY)) == 0) {
+ bytes_written = wl_android_set_scan_channel_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANUNASSOCTIMELEGACY,
+ strlen(CMD_GETSCANUNASSOCTIMELEGACY)) == 0) {
+ bytes_written = wl_android_get_scan_unassoc_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANUNASSOCTIMELEGACY,
+ strlen(CMD_SETSCANUNASSOCTIMELEGACY)) == 0) {
+ bytes_written = wl_android_set_scan_unassoc_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANPASSIVETIMELEGACY,
+ strlen(CMD_GETSCANPASSIVETIMELEGACY)) == 0) {
+ bytes_written = wl_android_get_scan_passive_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANPASSIVETIMELEGACY,
+ strlen(CMD_SETSCANPASSIVETIMELEGACY)) == 0) {
+ bytes_written = wl_android_set_scan_passive_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANHOMETIMELEGACY,
+ strlen(CMD_GETSCANHOMETIMELEGACY)) == 0) {
+ bytes_written = wl_android_get_scan_home_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANHOMETIMELEGACY,
+ strlen(CMD_SETSCANHOMETIMELEGACY)) == 0) {
+ bytes_written = wl_android_set_scan_home_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANHOMEAWAYTIMELEGACY,
+ strlen(CMD_GETSCANHOMEAWAYTIMELEGACY)) == 0) {
+ bytes_written = wl_android_get_scan_home_away_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANHOMEAWAYTIMELEGACY,
+ strlen(CMD_SETSCANHOMEAWAYTIMELEGACY)) == 0) {
+ bytes_written = wl_android_set_scan_home_away_time(net, command);
+ }
+ else {
+ ANDROID_ERROR(("Unknown NCHO PRIVATE command %s - ignored\n", command));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_ncho_check_command(struct net_device *dev, char *command)
+{
+ int cnt = 0;
+
+ while (strlen(ncho_cmdlist[cnt]) > 0) {
+ if (strnicmp(command, ncho_cmdlist[cnt], strlen(ncho_cmdlist[cnt])) == 0) {
+ char cmd[WL_PRIV_CMD_LEN + 1];
+ sscanf(command, "%"S(WL_PRIV_CMD_LEN)"s ", cmd);
+ if (strlen(ncho_cmdlist[cnt]) == strlen(cmd)) {
+ return TRUE;
+ }
+ }
+ cnt++;
+ }
+ return FALSE;
+}
+
+static int
+wl_android_ncho_private_command(struct net_device *net, char *command, int total_len)
+{
+ int bytes_written = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ if (cfg->ncho_mode == OFF) {
+ ANDROID_ERROR(("Disable NCHO mode\n"));
+ /* In order to avoid Sequential error HANG event. */
+ return BCME_UNSUPPORTED;
+ }
+
+#ifdef ROAM_API
+ if (strnicmp(command, CMD_ROAMTRIGGER_SET, strlen(CMD_ROAMTRIGGER_SET)) == 0) {
+ bytes_written = wl_android_set_roam_trigger(net, command);
+ } else if (strnicmp(command, CMD_ROAMTRIGGER_GET, strlen(CMD_ROAMTRIGGER_GET)) == 0) {
+ bytes_written = wl_android_get_roam_trigger(net, command, total_len);
+ } else if (strnicmp(command, CMD_ROAMDELTA_SET, strlen(CMD_ROAMDELTA_SET)) == 0) {
+ bytes_written = wl_android_set_roam_delta(net, command);
+ } else if (strnicmp(command, CMD_ROAMDELTA_GET, strlen(CMD_ROAMDELTA_GET)) == 0) {
+ bytes_written = wl_android_get_roam_delta(net, command, total_len);
+ } else if (strnicmp(command, CMD_ROAMSCANPERIOD_SET,
+ strlen(CMD_ROAMSCANPERIOD_SET)) == 0) {
+ bytes_written = wl_android_set_roam_scan_period(net, command);
+ } else if (strnicmp(command, CMD_ROAMSCANPERIOD_GET,
+ strlen(CMD_ROAMSCANPERIOD_GET)) == 0) {
+ bytes_written = wl_android_get_roam_scan_period(net, command, total_len);
+ } else if (strnicmp(command, CMD_FULLROAMSCANPERIOD_SET,
+ strlen(CMD_FULLROAMSCANPERIOD_SET)) == 0) {
+ bytes_written = wl_android_set_full_roam_scan_period(net, command);
+ } else if (strnicmp(command, CMD_FULLROAMSCANPERIOD_GET,
+ strlen(CMD_FULLROAMSCANPERIOD_GET)) == 0) {
+ bytes_written = wl_android_get_full_roam_scan_period(net, command, total_len);
+ } else if (strnicmp(command, CMD_COUNTRYREV_SET, strlen(CMD_COUNTRYREV_SET)) == 0) {
+ bytes_written = wl_android_set_country_rev(net, command);
+#ifdef FCC_PWR_LIMIT_2G
+ if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
+ ANDROID_ERROR(("fccpwrlimit2g deactivation is failed\n"));
+ } else {
+ ANDROID_ERROR(("fccpwrlimit2g is deactivated\n"));
+ }
+#endif /* FCC_PWR_LIMIT_2G */
+ } else if (strnicmp(command, CMD_COUNTRYREV_GET, strlen(CMD_COUNTRYREV_GET)) == 0) {
+ bytes_written = wl_android_get_country_rev(net, command, total_len);
+ } else
+#endif /* ROAM_API */
+ if (strnicmp(command, CMD_GETROAMSCANCONTROL, strlen(CMD_GETROAMSCANCONTROL)) == 0) {
+ bytes_written = wl_android_get_roam_scan_control(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETROAMSCANCONTROL, strlen(CMD_SETROAMSCANCONTROL)) == 0) {
+ bytes_written = wl_android_set_roam_scan_control(net, command);
+ }
+ /* ROAMSCAN CHANNELS Add, Get, Set Command */
+ else if (strnicmp(command, CMD_ADDROAMSCANCHANNELS, strlen(CMD_ADDROAMSCANCHANNELS)) == 0) {
+ bytes_written = wl_android_add_roam_scan_channels(net, command,
+ strlen(CMD_ADDROAMSCANCHANNELS));
+ }
+ else if (strnicmp(command, CMD_GETROAMSCANCHANNELS, strlen(CMD_GETROAMSCANCHANNELS)) == 0) {
+ bytes_written = wl_android_get_roam_scan_channels(net, command, total_len,
+ CMD_GETROAMSCANCHANNELS);
+ }
+ else if (strnicmp(command, CMD_SETROAMSCANCHANNELS, strlen(CMD_SETROAMSCANCHANNELS)) == 0) {
+ bytes_written = wl_android_set_roam_scan_channels(net, command);
+ }
+ /* ROAMSCAN FREQUENCIES Add, Get, Set Command */
+ else if (strnicmp(command, CMD_ADDROAMSCANFREQS, strlen(CMD_ADDROAMSCANFREQS)) == 0) {
+ bytes_written = wl_android_add_roam_scan_freqs(net, command,
+ strlen(CMD_ADDROAMSCANFREQS));
+ }
+ else if (strnicmp(command, CMD_GETROAMSCANFREQS, strlen(CMD_GETROAMSCANFREQS)) == 0) {
+ bytes_written = wl_android_get_roam_scan_freqs(net, command, total_len,
+ CMD_GETROAMSCANFREQS);
+ }
+ else if (strnicmp(command, CMD_SETROAMSCANFREQS, strlen(CMD_SETROAMSCANFREQS)) == 0) {
+ bytes_written = wl_android_set_roam_scan_freqs(net, command);
+ }
+ else if (strnicmp(command, CMD_SENDACTIONFRAME, strlen(CMD_SENDACTIONFRAME)) == 0) {
+ bytes_written = wl_android_send_action_frame(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_REASSOC, strlen(CMD_REASSOC)) == 0) {
+ bytes_written = wl_android_reassoc(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_GETSCANCHANNELTIME, strlen(CMD_GETSCANCHANNELTIME)) == 0) {
+ bytes_written = wl_android_get_scan_channel_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANCHANNELTIME, strlen(CMD_SETSCANCHANNELTIME)) == 0) {
+ bytes_written = wl_android_set_scan_channel_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANUNASSOCTIME, strlen(CMD_GETSCANUNASSOCTIME)) == 0) {
+ bytes_written = wl_android_get_scan_unassoc_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANUNASSOCTIME, strlen(CMD_SETSCANUNASSOCTIME)) == 0) {
+ bytes_written = wl_android_set_scan_unassoc_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANPASSIVETIME, strlen(CMD_GETSCANPASSIVETIME)) == 0) {
+ bytes_written = wl_android_get_scan_passive_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANPASSIVETIME, strlen(CMD_SETSCANPASSIVETIME)) == 0) {
+ bytes_written = wl_android_set_scan_passive_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANHOMETIME, strlen(CMD_GETSCANHOMETIME)) == 0) {
+ bytes_written = wl_android_get_scan_home_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANHOMETIME, strlen(CMD_SETSCANHOMETIME)) == 0) {
+ bytes_written = wl_android_set_scan_home_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANHOMEAWAYTIME, strlen(CMD_GETSCANHOMEAWAYTIME)) == 0) {
+ bytes_written = wl_android_get_scan_home_away_time(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANHOMEAWAYTIME, strlen(CMD_SETSCANHOMEAWAYTIME)) == 0) {
+ bytes_written = wl_android_set_scan_home_away_time(net, command);
+ }
+ else if (strnicmp(command, CMD_GETSCANNPROBES, strlen(CMD_GETSCANNPROBES)) == 0) {
+ bytes_written = wl_android_get_scan_nprobes(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETSCANNPROBES, strlen(CMD_SETSCANNPROBES)) == 0) {
+ bytes_written = wl_android_set_scan_nprobes(net, command);
+ }
+ else if (strnicmp(command, CMD_GETDFSSCANMODE, strlen(CMD_GETDFSSCANMODE)) == 0) {
+ bytes_written = wl_android_get_scan_dfs_channel_mode(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETDFSSCANMODE, strlen(CMD_SETDFSSCANMODE)) == 0) {
+ bytes_written = wl_android_set_scan_dfs_channel_mode(net, command);
+ }
+ else if (strnicmp(command, CMD_SETJOINPREFER, strlen(CMD_SETJOINPREFER)) == 0) {
+ bytes_written = wl_android_set_join_prefer(net, command);
+ }
+ else if (strnicmp(command, CMD_GETWESMODE, strlen(CMD_GETWESMODE)) == 0) {
+ bytes_written = wl_android_get_wes_mode(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SETWESMODE, strlen(CMD_SETWESMODE)) == 0) {
+ bytes_written = wl_android_set_wes_mode(net, command);
+ }
+ else {
+ ANDROID_ERROR(("Unknown NCHO PRIVATE command %s - ignored\n", command));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ }
+
+ return bytes_written;
+}
+#endif /* WES_SUPPORT */
+
+#if defined(SUPPORT_RESTORE_SCAN_PARAMS) || defined(WES_SUPPORT)
+static int
+wl_android_default_set_scan_params(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ uint error_cnt = 0;
+ int cnt = 0;
+ char restore_command[WLC_IOCTL_SMLEN];
+
+ while (strlen(restore_params[cnt].command) > 0 && restore_params[cnt].cmd_handler) {
+ snprintf(restore_command, WLC_IOCTL_SMLEN, "%s %d",
+ restore_params[cnt].command, restore_params[cnt].parameter);
+ if (restore_params[cnt].cmd_type == RESTORE_TYPE_PRIV_CMD) {
+ error = restore_params[cnt].cmd_handler(dev, restore_command);
+ } else if (restore_params[cnt].cmd_type == RESTORE_TYPE_PRIV_CMD_WITH_LEN) {
+ error = restore_params[cnt].cmd_handler_w_len(dev,
+ restore_command, total_len);
+ } else {
+ ANDROID_ERROR(("Unknown restore command handler\n"));
+ error = -1;
+ }
+ if (error) {
+ ANDROID_ERROR(("Failed to restore scan parameters %s, error : %d\n",
+ restore_command, error));
+ error_cnt++;
+ }
+ cnt++;
+ }
+ if (error_cnt > 0) {
+ ANDROID_ERROR(("Got %d error(s) while restoring scan parameters\n",
+ error_cnt));
+ error = -1;
+ }
+ return error;
+}
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS || WES_SUPPORT */
+
+#ifdef WLTDLS
+int wl_android_tdls_reset(struct net_device *dev)
+{
+ int ret = 0;
+ ret = dhd_tdls_enable(dev, false, false, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("Disable tdls failed. %d\n", ret));
+ return ret;
+ }
+ ret = dhd_tdls_enable(dev, true, true, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("enable tdls failed. %d\n", ret));
+ return ret;
+ }
+ return 0;
+}
+#endif /* WLTDLS */
+
+int
+wl_android_rcroam_turn_on(struct net_device *dev, int rcroam_enab)
+{
+ int ret = BCME_OK;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ wlc_rcroam_t *prcroam;
+ wlc_rcroam_info_v1_t *rcroam;
+ uint rcroamlen = sizeof(*rcroam) + RCROAM_HDRLEN;
+
+ ANDROID_INFO(("RCROAM mode %s\n", rcroam_enab ? "enable" : "disable"));
+
+ prcroam = (wlc_rcroam_t *)MALLOCZ(dhdp->osh, rcroamlen);
+ if (!prcroam) {
+ ANDROID_ERROR(("Fail to malloc buffer\n"));
+ return BCME_NOMEM;
+ }
+
+ /* Get RCROAM param */
+ ret = wldev_iovar_getbuf(dev, "rcroam", NULL, 0, prcroam, rcroamlen, NULL);
+ if (ret) {
+ ANDROID_ERROR(("Failed to get RCROAM info(%d)\n", ret));
+ goto done;
+ }
+
+ if (prcroam->ver != WLC_RC_ROAM_CUR_VER) {
+ ret = BCME_VERSION;
+ ANDROID_ERROR(("Ver(%d:%d). mismatch RCROAM info(%d)\n",
+ prcroam->ver, WLC_RC_ROAM_CUR_VER, ret));
+ goto done;
+ }
+
+ /* Set RCROAM param */
+ rcroam = (wlc_rcroam_info_v1_t *)prcroam->data;
+ prcroam->ver = WLC_RC_ROAM_CUR_VER;
+ prcroam->len = sizeof(*rcroam);
+ rcroam->enab = rcroam_enab;
+
+ ret = wldev_iovar_setbuf(dev, "rcroam", prcroam, rcroamlen,
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (ret) {
+ ANDROID_ERROR(("Failed to set RCROAM %s(%d)\n",
+ rcroam_enab ? "Enable" : "Disable", ret));
+ goto done;
+ }
+done:
+ if (prcroam) {
+ MFREE(dhdp->osh, prcroam, rcroamlen);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_SILENT_ROAM
+int
+wl_android_sroam_turn_on(struct net_device *dev, int sroam_mode)
+{
+ int ret = BCME_OK;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ dhdp->sroam_turn_on = sroam_mode;
+ ANDROID_INFO(("%s Silent mode %s\n", __FUNCTION__,
+ sroam_mode ? "enable" : "disable"));
+
+ if (!sroam_mode) {
+ ret = dhd_sroam_set_mon(dhdp, FALSE);
+ if (ret) {
+ ANDROID_ERROR(("%s Failed to Set sroam %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+
+ return ret;
+}
+
+int
+wl_android_sroam_set_info(struct net_device *dev, char *data,
+ char *command, int total_len)
+{
+ int ret = BCME_OK;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ size_t slen = strlen(data);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ data[slen] = '\0';
+ psroam = (wlc_sroam_t *)MALLOCZ(dhdp->osh, sroamlen);
+ if (!psroam) {
+ ANDROID_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ psroam->ver = WLC_SILENT_ROAM_CUR_VER;
+ psroam->len = sizeof(*sroam);
+ sroam = (wlc_sroam_info_t *)psroam->data;
+
+ sroam->sroam_on = FALSE;
+ if (*data && *data != '\0') {
+ sroam->sroam_min_rssi = simple_strtol(data, &data, 10);
+ ANDROID_INFO(("1.Minimum RSSI %d\n", sroam->sroam_min_rssi));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_rssi_range = simple_strtol(data, &data, 10);
+ ANDROID_INFO(("2.RSSI Range %d\n", sroam->sroam_rssi_range));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_score_delta = simple_strtol(data, &data, 10);
+ ANDROID_INFO(("3.Score Delta %d\n", sroam->sroam_score_delta));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_period_time = simple_strtol(data, &data, 10);
+ ANDROID_INFO(("4.Sroam period %d\n", sroam->sroam_period_time));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_band = simple_strtol(data, &data, 10);
+ ANDROID_INFO(("5.Sroam Band %d\n", sroam->sroam_band));
+ data++;
+ }
+ if (*data && *data != '\0') {
+ sroam->sroam_inact_cnt = simple_strtol(data, &data, 10);
+ ANDROID_INFO(("6.Inactivity Count %d\n", sroam->sroam_inact_cnt));
+ data++;
+ }
+
+ if (*data != '\0') {
+ ret = BCME_BADARG;
+ goto done;
+ }
+
+ ret = wldev_iovar_setbuf(dev, "sroam", psroam, sroamlen, ioctl_buf,
+ sizeof(ioctl_buf), NULL);
+ if (ret) {
+ ANDROID_ERROR(("Failed to set silent roam info(%d)\n", ret));
+ goto done;
+ }
+done:
+ if (psroam) {
+ MFREE(dhdp->osh, psroam, sroamlen);
+ }
+
+ return ret;
+}
+
+int
+wl_android_sroam_get_info(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_OK;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ psroam = (wlc_sroam_t *)MALLOCZ(dhdp->osh, sroamlen);
+ if (!psroam) {
+ ANDROID_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ ret = wldev_iovar_getbuf(dev, "sroam", NULL, 0, psroam, sroamlen, NULL);
+ if (ret) {
+ ANDROID_ERROR(("Failed to get silent roam info(%d)\n", ret));
+ goto done;
+ }
+
+ if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
+ ret = BCME_VERSION;
+ ANDROID_ERROR(("Ver(%d:%d). mismatch silent roam info(%d)\n",
+ psroam->ver, WLC_SILENT_ROAM_CUR_VER, ret));
+ goto done;
+ }
+
+ sroam = (wlc_sroam_info_t *)psroam->data;
+ bytes_written = snprintf(command, total_len,
+ "%s %d %d %d %d %d %d %d\n",
+ CMD_SROAM_GET_INFO, sroam->sroam_on, sroam->sroam_min_rssi, sroam->sroam_rssi_range,
+ sroam->sroam_score_delta, sroam->sroam_period_time, sroam->sroam_band,
+ sroam->sroam_inact_cnt);
+ ret = bytes_written;
+
+ ANDROID_INFO(("%s", command));
+done:
+ if (psroam) {
+ MFREE(dhdp->osh, psroam, sroamlen);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_SILENT_ROAM */
+
+int
+wl_android_priority_roam_enable(struct net_device *dev, int mode)
+{
+ int error = BCME_OK;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ wl_prio_roam_prof_v1_t *prio_roam;
+ uint buf_len = sizeof(wl_prio_roam_prof_v1_t) + (uint)strlen("priority_roam") + 1;
+
+ prio_roam = (wl_prio_roam_prof_v1_t *)MALLOCZ(dhdp->osh, buf_len);
+ if (!prio_roam) {
+ ANDROID_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ error = BCME_NOMEM;
+ goto done;
+ }
+
+ error = wldev_iovar_getbuf(dev, "priority_roam", NULL, 0, prio_roam, buf_len, NULL);
+ if (error == BCME_UNSUPPORTED) {
+ ANDROID_ERROR(("Priority Roam Unsupport\n"));
+ error = BCME_OK;
+ goto done;
+ } else if (prio_roam->version != WL_PRIO_ROAM_PROF_V1) {
+ ANDROID_ERROR(("Priority Roam Version mismatch\n"));
+ goto done;
+ } else if (prio_roam->prio_roam_mode == mode) {
+ ANDROID_INFO(("Priority Roam already set(mode:%d)\n", mode));
+ goto done;
+ }
+
+ prio_roam->version = WL_PRIO_ROAM_PROF_V1;
+ prio_roam->length = sizeof(wl_prio_roam_prof_v1_t);
+ prio_roam->prio_roam_mode = mode;
+
+ error = wldev_iovar_setbuf(dev, "priority_roam", prio_roam,
+ sizeof(wl_prio_roam_prof_v1_t), ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (error) {
+ ANDROID_ERROR(("Failed to set Priority Roam %s(%d)\n",
+ mode ? "Enable" : "Disable", error));
+ goto done;
+ }
+done:
+ if (prio_roam) {
+ MFREE(dhdp->osh, prio_roam, sizeof(wl_prio_roam_prof_v1_t));
+ }
+
+ return error;
+}
+
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+int
+wl_android_roam_rssi_limit(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_OK;
+ int argc, bytes_written = 0;
+ int lmt2g, lmt5g;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ argc = sscanf(command, CMD_ROAM_RSSI_LMT " %d %d\n", &lmt2g, &lmt5g);
+
+ if (!argc) {
+ ret = dhd_roam_rssi_limit_get(dhdp, &lmt2g, &lmt5g);
+ if (ret) {
+ ANDROID_ERROR(("Failed to Get roam_rssi_limit (%d)\n", ret));
+ return ret;
+ }
+ bytes_written = snprintf(command, total_len, "%d, %d\n", lmt2g, lmt5g);
+ /* Get roam rssi limit */
+ return bytes_written;
+ } else {
+ /* Set roam rssi limit */
+ ret = dhd_roam_rssi_limit_set(dhdp, lmt2g, lmt5g);
+ if (ret) {
+ ANDROID_ERROR(("Failed to Set roam_rssi_limit (%d)\n", ret));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+
+#ifdef CONFIG_ROAM_MIN_DELTA
+int
+wl_android_roam_min_delta(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_OK;
+ int argc, bytes_written = 0;
+ uint32 delta2g = 0, delta5g = 0, delta = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ argc = sscanf(command, CMD_ROAM_MIN_DELTA " %d\n", &delta);
+
+ if (!argc) {
+ /* Get Minimum ROAM score delta */
+ ret = dhd_roam_min_delta_get(dhdp, &delta2g, &delta5g);
+ if (ret) {
+ ANDROID_ERROR(("Failed to Get roam_min_delta (%d)\n", ret));
+ return ret;
+ }
+ bytes_written = snprintf(command, total_len, "%d, %d\n", delta2g, delta5g);
+ return bytes_written;
+ } else {
+ /* Set Minimum ROAM score delta
+ * Framework set one parameter # wpa_cli driver ROAMMINSCOREDELTA <value>
+ */
+ ret = dhd_roam_min_delta_set(dhdp, delta, delta);
+ if (ret) {
+ ANDROID_ERROR(("Failed to Set roam_min_delta (%d)\n", ret));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+#endif /* CONFIG_ROAM_MIN_DELTA */
+
+static int
+get_int_bytes(uchar *oui_str, uchar *oui, int len)
+{
+ int idx;
+ uchar val;
+ uchar *src, *dest;
+ char hexstr[3];
+
+ if ((oui_str == NULL) || (oui == NULL) || (len == 0)) {
+ return BCME_BADARG;
+ }
+ src = oui_str;
+ dest = oui;
+
+ for (idx = 0; idx < len; idx++) {
+ if (*src == '\0') {
+ *dest = '\0';
+ break;
+ }
+ hexstr[0] = src[0];
+ hexstr[1] = src[1];
+ hexstr[2] = '\0';
+
+ val = (uchar)bcm_strtoul(hexstr, NULL, 16);
+ if (val == (uchar)-1) {
+ return BCME_ERROR;
+ }
+ *dest++ = val;
+ src += 2;
+ }
+ return BCME_OK;
+}
+
+#define TAG_BYTE 0
+static int
+wl_android_set_disconnect_ies(struct net_device *dev, char *command)
+{
+ int cmd_prefix_len = 0;
+ char ie_len = 0;
+ int hex_ie_len = 0;
+ int total_len = 0;
+ int max_len = 0;
+ int cmd_len = 0;
+ uchar disassoc_ie[VNDR_IE_MAX_LEN] = {0};
+ s32 bssidx = 0;
+ struct bcm_cfg80211 *cfg = NULL;
+ s32 ret = 0;
+ cfg = wl_get_cfg(dev);
+
+ cmd_prefix_len = strlen("SET_DISCONNECT_IES ");
+ cmd_len = strlen(command);
+ /*
+ * <CMD> + <IES in HEX format>
+ * IES in hex format has to be in following format
+ * First byte = Tag, Second Byte = len and rest of
+ * bytes will be value. For ex: SET_DISCONNECT_IES dd0411223344
+ * tag = dd, len =04. Total IEs len = len + 2
+ */
+ ANDROID_INFO(("cmd recv = %s\n", command));
+ max_len = MIN(cmd_len, VNDR_IE_MAX_LEN);
+ /* Validate IEs len */
+ get_int_bytes(&command[cmd_prefix_len + 2], &ie_len, 1);
+ ANDROID_INFO(("ie_len = %d \n", ie_len));
+ if (ie_len <= 0 || ie_len > max_len) {
+ ret = BCME_BADLEN;
+ return ret;
+ }
+
+ /* Total len in hex is sum of double binary len, tag and len byte */
+ hex_ie_len = (ie_len * 2) + 4;
+ total_len = cmd_prefix_len + hex_ie_len;
+ if (command[total_len] != '\0' || (cmd_len != total_len)) {
+ ANDROID_ERROR(("command recv not matching with len, command = %s"
+ "total_len = %d, cmd_len = %d\n", command, total_len, cmd_len));
+ ret = BCME_BADARG;
+ return ret;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ ANDROID_ERROR(("Find index failed\n"));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* Tag and len bytes are also part of total len of ies in binary */
+ ie_len = ie_len + 2;
+ /* Convert IEs in binary */
+ get_int_bytes(&command[cmd_prefix_len], disassoc_ie, ie_len);
+ if (disassoc_ie[TAG_BYTE] != 0xdd) {
+ ANDROID_ERROR(("Wrong tag recv, tag = 0x%02x\n", disassoc_ie[TAG_BYTE]));
+ ret = BCME_UNSUPPORTED;
+ return ret;
+ }
+
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(dev), bssidx, VNDR_IE_DISASSOC_FLAG, disassoc_ie, ie_len);
+
+ return ret;
+}
+
+#ifdef FCC_PWR_LIMIT_2G
+int
+wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command)
+{
+ int error = 0;
+ int enable = 0;
+
+ sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable);
+
+ if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) {
+ ANDROID_ERROR(("wl_android_set_fcc_pwr_limit_2g: Invalid data\n"));
+ return BCME_ERROR;
+ }
+
+ CUSTOMER_HW4_EN_CONVERT(enable);
+
+ ANDROID_ERROR(("wl_android_set_fcc_pwr_limit_2g: fccpwrlimit2g set (%d)\n", enable));
+ error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable);
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_fcc_pwr_limit_2g: fccpwrlimit2g"
+ " set returned (%d)\n", error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int enable = 0;
+ int bytes_written = 0;
+
+ error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable);
+ if (error) {
+ ANDROID_ERROR(("wl_android_get_fcc_pwr_limit_2g: fccpwrlimit2g get"
+ " error (%d)\n", error));
+ return BCME_ERROR;
+ }
+ ANDROID_ERROR(("wl_android_get_fcc_pwr_limit_2g: fccpwrlimit2g get (%d)\n", enable));
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable);
+
+ return bytes_written;
+}
+#endif /* FCC_PWR_LIMIT_2G */
+
+/* Additional format of sta_info
+ * tx_pkts, tx_failures, tx_rate(kbps), rssi(main), rssi(aux), tx_pkts_retried,
+ * tx_pkts_retry_exhausted, rx_lastpkt_rssi(main), rx_lastpkt_rssi(aux),
+ * tx_pkts_total, tx_pkts_retries, tx_pkts_fw_total, tx_pkts_fw_retries,
+ * tx_pkts_fw_retry_exhausted
+ */
+#define STA_INFO_ADD_FMT "%d %d %d %d %d %d %d %d %d %d %d %d %d %d"
+
+#ifdef BIGDATA_SOFTAP
+#define BIGDATA_SOFTAP_FMT MACOUI " %d %s %d %d %d %d %d %d"
+#endif /* BIGDATA_SOFTAP */
+
+#define STAINFO_BAND_2G 0x0001
+#define STAINFO_BAND_5G 0x0002
+#define STAINFO_BAND_6G 0x0004
+#define STAINFO_BAND_60G 0x0008
+s32
+wl_cfg80211_get_sta_info(struct net_device *dev, char* command, int total_len)
+{
+ int bytes_written = -1, ret = 0;
+ char *pos, *token, *cmdstr;
+ bool is_macaddr = FALSE;
+ sta_info_v4_t *sta = NULL;
+ struct ether_addr mac;
+ char *iovar_buf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_device *apdev = NULL;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+#endif /* BCMDONGLEHOST */
+
+#ifdef BIGDATA_SOFTAP
+ void *data = NULL;
+ wl_ap_sta_data_t *sta_data = NULL;
+#endif /* BIGDATA_SOFTAP */
+
+ /* Client information */
+ uint16 cap = 0;
+ uint32 rxrtry = 0, rxmulti = 0;
+ uint32 tx_pkts = 0, tx_failures = 0, tx_rate = 0;
+ uint32 tx_pkts_retried = 0, tx_pkts_retry_exhausted = 0;
+ uint32 tx_pkts_total = 0, tx_pkts_retries = 0;
+ uint32 tx_pkts_fw_total = 0, tx_pkts_fw_retries = 0;
+ uint32 tx_pkts_fw_retry_exhausted = 0;
+ int8 rssi[WL_STA_ANT_MAX] = {0};
+ int8 rx_lastpkt_rssi[WL_STA_ANT_MAX] = {0};
+ wl_if_stats_t *if_stats = NULL;
+ u16 bands = 0;
+ u32 sta_flags = 0;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+ BCM_REFERENCE(if_stats);
+ /* This Command used during only SoftAP mode. */
+ ANDROID_INFO(("%s\n", command));
+
+ /* Check the current op_mode */
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ ANDROID_ERROR(("unsupported op mode: %d\n", dhdp->op_mode));
+ return BCME_NOTAP;
+ }
+
+ /*
+ * DRIVER GETSTAINFO [client MAC or ALL] [ifname]
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Client MAC or ALL */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("GETSTAINFO subcmd not provided wl_cfg80211_get_sta_info\n"));
+ return -EINVAL;
+ }
+ cmdstr = token;
+
+ bzero(&mac, ETHER_ADDR_LEN);
+ if ((!strncmp(token, "all", 3)) || (!strncmp(token, "ALL", 3))) {
+ is_macaddr = FALSE;
+ } else if ((bcm_ether_atoe(token, &mac))) {
+ is_macaddr = TRUE;
+ } else {
+ ANDROID_ERROR(("Failed to get address\n"));
+ return -EINVAL;
+ }
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ /* assign requested dev for compatibility */
+ apdev = dev;
+ } else {
+ /* Find a net_device for SoftAP by interface name */
+ apdev = wl_get_ap_netdev(cfg, token);
+ if (!apdev) {
+ ANDROID_ERROR(("cannot find a net_device for SoftAP\n"));
+ return -EINVAL;
+ }
+ }
+
+ iovar_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (!iovar_buf) {
+ ANDROID_ERROR(("Failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
+ return BCME_NOMEM;
+ }
+
+ if (is_macaddr) {
+ int cnt;
+
+ /* get the sta info */
+ ret = wldev_iovar_getbuf(apdev, "sta_info",
+ (struct ether_addr *)mac.octet, ETHER_ADDR_LEN,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("Get sta_info ERR %d\n", ret));
+
+#ifdef BIGDATA_SOFTAP
+ /* Customer wants to send basic client information
+ * to the framework even if DHD cannot get the sta_info.
+ */
+ goto get_bigdata;
+#endif /* BIGDATA_SOFTAP */
+
+#ifndef BIGDATA_SOFTAP
+ goto error;
+#endif /* BIGDATA_SOFTAP */
+ }
+
+ sta = (sta_info_v4_t *)iovar_buf;
+ if (dtoh16(sta->ver) != WL_STA_VER_4) {
+ ANDROID_ERROR(("sta_info struct version mismatch, "
+ "host ver : %d, fw ver : %d\n", WL_STA_VER_4,
+ dtoh16(sta->ver)));
+
+#ifdef BIGDATA_SOFTAP
+ /* Customer wants to send basic client information
+ * to the framework even if DHD cannot get the sta_info.
+ */
+ goto get_bigdata;
+#endif /* BIGDATA_SOFTAP */
+
+#ifndef BIGDATA_SOFTAP
+ goto error;
+#endif /* BIGDATA_SOFTAP */
+ }
+ cap = dtoh16(sta->cap);
+ rxrtry = dtoh32(sta->rx_pkts_retried);
+ rxmulti = dtoh32(sta->rx_mcast_pkts);
+ tx_pkts = dtoh32(sta->tx_pkts);
+ tx_failures = dtoh32(sta->tx_failures);
+ tx_rate = dtoh32(sta->tx_rate);
+ tx_pkts_retried = dtoh32(sta->tx_pkts_retried);
+ tx_pkts_retry_exhausted = dtoh32(sta->tx_pkts_retry_exhausted);
+ tx_pkts_total = dtoh32(sta->tx_pkts_total);
+ tx_pkts_retries = dtoh32(sta->tx_pkts_retries);
+ tx_pkts_fw_total = dtoh32(sta->tx_pkts_fw_total);
+ tx_pkts_fw_retries = dtoh32(sta->tx_pkts_fw_retries);
+ tx_pkts_fw_retry_exhausted = dtoh32(sta->tx_pkts_fw_retry_exhausted);
+ sta_flags = dtoh32(sta->flags);
+ if (sta_flags & WL_STA_IS_2G) {
+ bands |= STAINFO_BAND_2G;
+ }
+ if (sta_flags & WL_STA_IS_5G) {
+ bands |= STAINFO_BAND_5G;
+ }
+ if (sta_flags & WL_STA_IS_6G) {
+ bands |= STAINFO_BAND_6G;
+ }
+ for (cnt = WL_ANT_IDX_1; cnt < WL_RSSI_ANT_MAX; cnt++) {
+ rssi[cnt] = sta->rssi[cnt];
+ rx_lastpkt_rssi[cnt] = sta->rx_lastpkt_rssi[cnt];
+ }
+ } else {
+ int i;
+
+ /* Check if there is an associated STA or not */
+ assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+ ret = wldev_ioctl_get(apdev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf));
+
+ if (ret < 0) {
+ ANDROID_ERROR(("Fail to get assoc list: %d\n", ret));
+ goto error;
+ }
+
+ assoc_maclist->count = dtoh32(assoc_maclist->count);
+ ANDROID_INFO(("Assoc count : %d\n", assoc_maclist->count));
+
+ for (i = 0; i < assoc_maclist->count; i++) {
+ /* get the sta info */
+ ret = wldev_iovar_getbuf(apdev, "sta_info",
+ (struct ether_addr *)assoc_maclist->ea[i].octet, ETHER_ADDR_LEN,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+
+ if (ret < 0) {
+ ANDROID_ERROR(("sta_info err : %d", ret));
+ continue;
+ }
+ sta = (sta_info_v4_t *)iovar_buf;
+ if (dtoh16(sta->ver) == WL_STA_VER_4) {
+ rxrtry += dtoh32(sta->rx_pkts_retried);
+ rxmulti += dtoh32(sta->rx_mcast_pkts);
+ tx_pkts += dtoh32(sta->tx_pkts);
+ tx_failures += dtoh32(sta->tx_failures);
+ tx_pkts_total += dtoh32(sta->tx_pkts_total);
+ tx_pkts_retries += dtoh32(sta->tx_pkts_retries);
+ tx_pkts_fw_total += dtoh32(sta->tx_pkts_fw_total);
+ tx_pkts_fw_retries += dtoh32(sta->tx_pkts_fw_retries);
+ tx_pkts_fw_retry_exhausted +=
+ dtoh32(sta->tx_pkts_fw_retry_exhausted);
+ }
+ }
+ }
+
+#ifdef BIGDATA_SOFTAP
+get_bigdata:
+
+ if (is_macaddr && wl_get_ap_stadata(cfg, &mac, &data) == BCME_OK) {
+ ANDROID_ERROR(("mac " MACDBG" \n", MAC2STRDBG((char*)&mac)));
+ sta_data = (wl_ap_sta_data_t *)data;
+#ifdef STAINFO_LEGACY
+ bytes_written = snprintf(command, total_len,
+ "%s %s Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d "
+ "CAP=%04x " BIGDATA_SOFTAP_FMT " " STA_INFO_ADD_FMT
+ "\n", CMD_GET_STA_INFO, cmdstr, rxrtry, rxmulti, cap,
+ MACOUI2STR((char*)&sta_data->mac),
+ sta_data->channel, wf_chspec_to_bw_str(sta_data->chanspec),
+ sta_data->rssi, sta_data->rate, sta_data->mode_80211,
+ sta_data->nss, sta_data->mimo, sta_data->reason_code,
+ tx_pkts, tx_failures, tx_rate,
+ (int32)rssi[WL_ANT_IDX_1], (int32)rssi[WL_ANT_IDX_2],
+ tx_pkts_retried, tx_pkts_retry_exhausted,
+ (int32)rx_lastpkt_rssi[WL_ANT_IDX_1],
+ (int32)rx_lastpkt_rssi[WL_ANT_IDX_2],
+ tx_pkts_total, tx_pkts_retries, tx_pkts_fw_total,
+ tx_pkts_fw_retries, tx_pkts_fw_retry_exhausted);
+#else
+ bytes_written = snprintf(command, total_len,
+ "%s %s Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d "
+ "CAP=%04x " BIGDATA_SOFTAP_FMT " %d\n",
+ CMD_GET_STA_INFO, cmdstr, rxrtry, rxmulti, cap,
+ MACOUI2STR((char*)&sta_data->mac),
+ sta_data->channel, wf_chspec_to_bw_str(sta_data->chanspec),
+ sta_data->rssi, sta_data->rate, sta_data->mode_80211,
+ sta_data->nss, sta_data->mimo, sta_data->reason_code, bands);
+#endif /* STAINFO_LEGACY */
+ } else
+#endif /* BIGDATA_SOFTAP */
+ {
+ ANDROID_ERROR(("ALL\n"));
+ bytes_written = snprintf(command, total_len,
+ "%s %s Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d CAP=%04x "
+ STA_INFO_ADD_FMT "\n", CMD_GET_STA_INFO, cmdstr, rxrtry, rxmulti, cap,
+ tx_pkts, tx_failures, tx_rate, (int32)rssi[WL_ANT_IDX_1],
+ (int32)rssi[WL_ANT_IDX_2], tx_pkts_retried,
+ tx_pkts_retry_exhausted, (int32)rx_lastpkt_rssi[WL_ANT_IDX_1],
+ (int32)rx_lastpkt_rssi[WL_ANT_IDX_2], tx_pkts_total,
+ tx_pkts_retries, tx_pkts_fw_total, tx_pkts_fw_retries,
+ tx_pkts_fw_retry_exhausted);
+ }
+ WL_ERR_KERN(("Command: %s", command));
+
+error:
+ if (iovar_buf) {
+ MFREE(cfg->osh, iovar_buf, WLC_IOCTL_MAXLEN);
+ }
+ if (if_stats) {
+ MFREE(cfg->osh, if_stats, sizeof(*if_stats));
+ }
+
+ return bytes_written;
+}
+
+#ifdef WL_WTC
+/*
+ * CMD Format
+ * Enable format for 3 band and 2 band respectively:
+ * DRIVER SETWTCMODE <mode> <scan_type> <rssi_thresh> <ap_rssi_thresg 2G 5G 6G>
+ * DRIVER SETWTCMODE 0 1 -80 -70 -65 -60
+ * DRIVER SETWTCMODE <mode> <scan_type> <rssi_thresh> <ap_rssi_thresg 2G 5G>
+ * DRIVER SETWTCMODE 0 1 -80 -70 -65
+ * Disable format for 3 band and 2 band respectively:
+ * DRIVER SETWTCMODE 1 0 0 0 0 0
+ * DRIVER SETWTCMODE 1 0 0 0 0
+ */
+#define WL_TRIBAND 3
+#define WL_DUALBAND 2
+
+/* For WTC disable, any value >= 1 */
+#define WL_WTC_ENABLE 0
+static int
+wl_android_wtc_config(struct net_device *dev, char *command, int total_len)
+{
+ s32 bw;
+ char *token, *pos;
+ wlc_wtc_args_t *wtc_params;
+ wlc_wtcconfig_info_v1_t *wtc_config;
+ u32 i, wtc_paramslen, maxbands = WL_DUALBAND;
+ u8 buf[WLC_IOCTL_SMLEN] = {0};
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ WL_DBG_MEM(("Enter. cmd:%s\n", command));
+#ifdef WL_6G_BAND
+ if (cfg->band_6g_supported) {
+ maxbands = WL_TRIBAND;
+ }
+#endif /* WL_6G_BAND */
+ wtc_paramslen = sizeof(wlc_wtcconfig_info_v1_t) + WLC_WTC_ROAM_CONFIG_HDRLEN;
+ wtc_params = (wlc_wtc_args_t*)MALLOCZ(cfg->osh, wtc_paramslen);
+ if (!wtc_params) {
+ ANDROID_ERROR(("Error allocating wtc_params\n"));
+ return -ENOMEM;
+ }
+
+ wtc_config = (wlc_wtcconfig_info_v1_t *)wtc_params->data;
+ /* Get wtc config information and check version compatibility */
+ bw = wldev_iovar_getbuf(dev, "wnm_wbtext_wtc_config",
+ (char*)&wtc_params, wtc_paramslen, buf, WLC_IOCTL_SMLEN, 0);
+ if (bw) {
+ ANDROID_ERROR(("Error querying wnm_wbtext_wtc_config: %d\n", bw));
+ goto exit;
+ }
+
+ (void)memcpy_s(wtc_params, wtc_paramslen, buf, wtc_paramslen);
+ if (wtc_params->ver != WLC_WTC_ROAM_VER_1) {
+ ANDROID_ERROR(("Wrong version:%d\n", wtc_params->ver));
+ bw = -EINVAL;
+ goto exit;
+ }
+
+ if (wtc_params->len != sizeof(wlc_wtcconfig_info_v1_t)) {
+ ANDROID_ERROR(("Bad len\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+
+ if (strlen(command) == strlen(CMD_WTC_CONFIG)) {
+ /* No additional arguments given. GET case */
+ bw += scnprintf(command, (total_len - bw), "%u %u",
+ wtc_config->mode, wtc_config->scantype);
+ bw += scnprintf(command + bw, (total_len - bw), " %d",
+ wtc_config->rssithresh[0]);
+ for (i = 0; i < maxbands; i++) {
+ bw += scnprintf(command + bw, (total_len - bw), " %d",
+ wtc_config->ap_rssithresh[i]);
+ }
+ bw += scnprintf(command + bw, (total_len - bw), "\n");
+ } else {
+ /* SET */
+ pos = command + sizeof(CMD_WTC_CONFIG);
+
+ /* mode */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("No mode present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ wtc_config->mode = (u8)bcm_atoi(token);
+
+ /* scantype */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("No scantype present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ wtc_config->scantype = (u8)bcm_atoi(token);
+
+ /* rssithreshold */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Invalid arg for rssi threshold\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ for (i = 0; i < maxbands; i++) {
+ wtc_config->rssithresh[i] = (s8)bcm_atoi(token);
+ }
+
+ /* AP rssithreshold */
+ for (i = 0; i < maxbands; i++) {
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Invalid arg for ap threshold\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ wtc_config->ap_rssithresh[i] = (s8)bcm_atoi(token);
+ }
+
+ bw = wldev_iovar_setbuf(dev, "wnm_wbtext_wtc_config",
+ (char*)wtc_params, wtc_paramslen, buf, WLC_IOCTL_SMLEN, NULL);
+ if (bw) {
+ ANDROID_ERROR(("wtc config set failed. ret:%d\n", bw));
+ }
+ }
+
+exit:
+ if (wtc_params) {
+ MFREE(cfg->osh, wtc_params, wtc_paramslen);
+ }
+ return bw;
+}
+#endif /* WL_WTC */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+#ifdef WBTEXT
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len)
+{
+ int error = BCME_OK, argc = 0;
+ int data, bytes_written;
+ int roam_trigger[2];
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ argc = sscanf(command+sizeof(CMD_WBTEXT_ENABLE), "%d", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data);
+ if (error) {
+ ANDROID_ERROR(("wl_android_wbtext: Failed to set wbtext error = %d\n",
+ error));
+ return error;
+ }
+ bytes_written = snprintf(command, total_len, "WBTEXT %s\n",
+ (data == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT)?
+ "ENABLED" : "DISABLED");
+ return bytes_written;
+ } else {
+ if (data) {
+ data = WL_BSSTRANS_POLICY_PRODUCT_WBTEXT;
+ }
+
+ if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data)) != BCME_OK) {
+ ANDROID_ERROR(("wl_android_wbtext: Failed to set wbtext error = %d\n",
+ error));
+ return error;
+ }
+
+ if (data) {
+ /* reset roam_prof when wbtext is on */
+ if ((error = wl_cfg80211_wbtext_set_default(dev)) != BCME_OK) {
+ return error;
+ }
+ } else {
+ /* reset legacy roam trigger when wbtext is off */
+ roam_trigger[0] = DEFAULT_ROAM_TRIGGER_VALUE;
+ roam_trigger[1] = WLC_BAND_ALL;
+ if ((error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger))) != BCME_OK) {
+ ANDROID_ERROR(("wl_android_wbtext: Failed to reset roam trigger = %d\n",
+ error));
+ return error;
+ }
+ }
+ dhdp->wbtext_policy = data;
+ }
+ return error;
+}
+
+static int
+wl_android_wbtext_enable(struct net_device *dev, int mode)
+{
+ int error = BCME_OK;
+ char commandp[WLC_IOCTL_SMLEN];
+
+ if (wl_android_check_wbtext_support(dev)) {
+ bzero(commandp, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "WBTEXT_ENABLE %d", mode);
+ error = wl_android_wbtext(dev, commandp, WLC_IOCTL_SMLEN);
+ if (error) {
+ ANDROID_ERROR(("Failed to set WBTEXT = %d\n", error));
+ return error;
+ }
+ }
+
+ return error;
+}
+
+static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev,
+ char *command, int total_len)
+{
+ int error = BCME_OK, argc = 0;
+ int data, bytes_written;
+
+ argc = sscanf(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD " %d\n", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_bsstrans_timer_threshold", &data);
+ if (error) {
+ ANDROID_ERROR(("Failed to get wnm_bsstrans_timer_threshold (%d)\n", error));
+ return error;
+ }
+ bytes_written = snprintf(command, total_len, "%d\n", data);
+ return bytes_written;
+ } else {
+ if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_timer_threshold",
+ data)) != BCME_OK) {
+ ANDROID_ERROR(("Failed to set wnm_bsstrans_timer_threshold (%d)\n", error));
+ return error;
+ }
+ }
+ return error;
+}
+
+static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
+ char *command, int total_len)
+{
+ int error = BCME_OK, argc = 0;
+ int data = 0, bytes_written;
+
+ argc = sscanf(command, CMD_WBTEXT_BTM_DELTA " %d\n", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_btmdelta", &data);
+ if (error) {
+ ANDROID_ERROR(("Failed to get wnm_btmdelta (%d)\n", error));
+ return error;
+ }
+ bytes_written = snprintf(command, total_len, "%d\n", data);
+ return bytes_written;
+ } else {
+ if ((error = wldev_iovar_setint(dev, "wnm_btmdelta",
+ data)) != BCME_OK) {
+ ANDROID_ERROR(("Failed to set wnm_btmdelta (%d)\n", error));
+ return error;
+ }
+ }
+ return error;
+}
+
+static int wl_cfg80211_wbtext_estm_enable(struct net_device *dev,
+ char *command, int total_len)
+{
+ int error = BCME_OK;
+ int data = 0, bytes_written = 0;
+ int wnmmask = 0;
+ char *pcmd = command;
+
+ bcmstrtok(&pcmd, " ", NULL);
+
+ error = wldev_iovar_getint(dev, "wnm", &wnmmask);
+ if (error) {
+ ANDROID_ERROR(("Failed to get wnm_btmdelta (%d)\n", error));
+ return error;
+ }
+ ANDROID_INFO(("wnmmask %x\n", wnmmask));
+ if (*pcmd == WL_IOCTL_ACTION_GET) {
+ bytes_written = snprintf(command, total_len, "wbtext_estm_enable %d\n",
+ (wnmmask & WL_WNM_ESTM) ? 1:0);
+ return bytes_written;
+ } else {
+ data = bcm_atoi(pcmd);
+ if (data == 0) {
+ wnmmask &= ~WL_WNM_ESTM;
+ } else {
+ wnmmask |= WL_WNM_ESTM;
+ }
+ ANDROID_INFO(("wnmmask %x\n", wnmmask));
+ if ((error = wldev_iovar_setint(dev, "wnm", wnmmask)) != BCME_OK) {
+ ANDROID_ERROR(("Failed to set wnm mask (%d)\n", error));
+ return error;
+ }
+ }
+ return error;
+}
+#endif /* WBTEXT */
+
+#ifdef PNO_SUPPORT
+#define PNO_PARAM_SIZE 50
+#define VALUE_SIZE 50
+#define LIMIT_STR_FMT ("%50s %50s")
+static int
+wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
+{
+ int err = BCME_OK;
+ uint i, tokens, len_remain;
+ char *pos, *pos2, *token, *token2, *delim;
+ char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1];
+ struct dhd_pno_batch_params batch_params;
+
+ ANDROID_INFO(("wls_parse_batching_cmd: command=%s, len=%d\n", command, total_len));
+ len_remain = total_len;
+ if (len_remain > (strlen(CMD_WLS_BATCHING) + 1)) {
+ pos = command + strlen(CMD_WLS_BATCHING) + 1;
+ len_remain -= strlen(CMD_WLS_BATCHING) + 1;
+ } else {
+ ANDROID_ERROR(("wls_parse_batching_cmd: No arguments, total_len %d\n", total_len));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ bzero(&batch_params, sizeof(struct dhd_pno_batch_params));
+ if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
+ if (len_remain > (strlen(PNO_BATCHING_SET) + 1)) {
+ pos += strlen(PNO_BATCHING_SET) + 1;
+ } else {
+ ANDROID_ERROR(("wls_parse_batching_cmd: %s missing arguments, total_len %d\n",
+ PNO_BATCHING_SET, total_len));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
+ bzero(param, sizeof(param));
+ bzero(value, sizeof(value));
+ if (token == NULL || !*token)
+ break;
+ if (*token == '\0')
+ continue;
+ delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER);
+ if (delim != NULL)
+ *delim = ' ';
+
+ tokens = sscanf(token, LIMIT_STR_FMT, param, value);
+ if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) {
+ batch_params.scan_fr = simple_strtol(value, NULL, 0);
+ ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr));
+ } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) {
+ batch_params.bestn = simple_strtol(value, NULL, 0);
+ ANDROID_INFO(("bestn : %d\n", batch_params.bestn));
+ } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) {
+ batch_params.mscan = simple_strtol(value, NULL, 0);
+ ANDROID_INFO(("mscan : %d\n", batch_params.mscan));
+ } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) {
+ i = 0;
+ pos2 = value;
+ tokens = sscanf(value, "<%s>", value);
+ if (tokens != 1) {
+ err = BCME_ERROR;
+ ANDROID_ERROR(("wls_parse_batching_cmd: invalid format"
+ " for channel"
+ " <> params\n"));
+ goto exit;
+ }
+ while ((token2 = strsep(&pos2,
+ PNO_PARAM_CHANNEL_DELIMETER)) != NULL) {
+ if (token2 == NULL || !*token2)
+ break;
+ if (*token2 == '\0')
+ continue;
+ if (*token2 == 'A' || *token2 == 'B') {
+ batch_params.band = (*token2 == 'A')?
+ WLC_BAND_5G : WLC_BAND_2G;
+ ANDROID_INFO(("band : %s\n",
+ (*token2 == 'A')? "A" : "B"));
+ } else {
+ if ((batch_params.nchan >= WL_NUMCHANNELS) ||
+ (i >= WL_NUMCHANNELS)) {
+ ANDROID_ERROR(("Too many nchan %d\n",
+ batch_params.nchan));
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ batch_params.chan_list[i++] =
+ simple_strtol(token2, NULL, 0);
+ batch_params.nchan++;
+ ANDROID_INFO(("channel :%d\n",
+ batch_params.chan_list[i-1]));
+ }
+ }
+ } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) {
+ batch_params.rtt = simple_strtol(value, NULL, 0);
+ ANDROID_INFO(("rtt : %d\n", batch_params.rtt));
+ } else {
+ ANDROID_ERROR(("wls_parse_batching_cmd : unknown param: %s\n", param));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ }
+ err = dhd_dev_pno_set_for_batch(dev, &batch_params);
+ if (err < 0) {
+ ANDROID_ERROR(("failed to configure batch scan\n"));
+ } else {
+ bzero(command, total_len);
+ err = snprintf(command, total_len, "%d", err);
+ }
+ } else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
+ err = dhd_dev_pno_get_for_batch(dev, command, total_len);
+ if (err < 0) {
+ ANDROID_ERROR(("failed to getting batching results\n"));
+ } else {
+ err = strlen(command);
+ }
+ } else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) {
+ err = dhd_dev_pno_stop_for_batch(dev);
+ if (err < 0) {
+ ANDROID_ERROR(("failed to stop batching scan\n"));
+ } else {
+ bzero(command, total_len);
+ err = snprintf(command, total_len, "OK");
+ }
+ } else {
+ ANDROID_ERROR(("wls_parse_batching_cmd : unknown command\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+exit:
+ return err;
+}
+
+#ifndef WL_SCHED_SCAN
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+ int res = -1;
+ int nssid = 0;
+ cmd_tlv_t *cmd_tlv_temp;
+ char *str_ptr;
+ int tlv_size_left;
+ int pno_time = 0;
+ int pno_repeat = 0;
+ int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+ int i;
+ char pno_in_example[] = {
+ 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+ 'S', '1', '2', '0',
+ 'S',
+ 0x05,
+ 'd', 'l', 'i', 'n', 'k',
+ 'S',
+ 0x04,
+ 'G', 'O', 'O', 'G',
+ 'T',
+ '0', 'B',
+ 'R',
+ '2',
+ 'M',
+ '2',
+ 0x00
+ };
+#endif /* PNO_SET_DEBUG */
+ ANDROID_INFO(("wl_android_set_pno_setup: command=%s, len=%d\n", command, total_len));
+
+ if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+ ANDROID_ERROR(("wl_android_set_pno_setup: argument=%d less min size\n", total_len));
+ goto exit_proc;
+ }
+#ifdef PNO_SET_DEBUG
+ memcpy(command, pno_in_example, sizeof(pno_in_example));
+ total_len = sizeof(pno_in_example);
+#endif
+ str_ptr = command + strlen(CMD_PNOSETUP_SET);
+ tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+ cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+ bzero(ssids_local, sizeof(ssids_local));
+
+ if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+ (cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+ (cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) {
+
+ str_ptr += sizeof(cmd_tlv_t);
+ tlv_size_left -= sizeof(cmd_tlv_t);
+
+ if ((nssid = wl_parse_ssid_list_tlv(&str_ptr, ssids_local,
+ MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+ ANDROID_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ } else {
+ if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+ ANDROID_ERROR(("wl_android_set_pno_setup: scan duration corrupted"
+ " field size %d\n",
+ tlv_size_left));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+ ANDROID_INFO(("wl_android_set_pno_setup: pno_time=%d\n", pno_time));
+
+ if (str_ptr[0] != 0) {
+ if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+ ANDROID_ERROR(("wl_android_set_pno_setup: pno repeat:"
+ " corrupted field\n"));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+ ANDROID_INFO(("wl_android_set_pno_setup: got pno_repeat=%d\n",
+ pno_repeat));
+ if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+ ANDROID_ERROR(("wl_android_set_pno_setup: FREQ_EXPO_MAX"
+ " corrupted field size\n"));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+ ANDROID_INFO(("wl_android_set_pno_setup: pno_freq_expo_max=%d\n",
+ pno_freq_expo_max));
+ }
+ }
+ } else {
+ ANDROID_ERROR(("wl_android_set_pno_setup: get wrong TLV command\n"));
+ goto exit_proc;
+ }
+
+ res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat,
+ pno_freq_expo_max, NULL, 0);
+exit_proc:
+ return res;
+}
+#endif /* !WL_SCHED_SCAN */
+#endif /* PNO_SUPPORT */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+ int ret;
+ struct ether_addr p2pdev_addr;
+
+#define MAC_ADDR_STR_LEN 18
+ if (total_len < MAC_ADDR_STR_LEN) {
+ ANDROID_ERROR(("wl_android_get_p2p_dev_addr: buflen %d is less than p2p dev addr\n",
+ total_len));
+ return -1;
+ }
+
+ ret = wl_cfg80211_get_p2p_dev_addr(ndev, &p2pdev_addr);
+ if (ret) {
+ ANDROID_ERROR(("wl_android_get_p2p_dev_addr: Failed to get p2p dev addr\n"));
+ return -1;
+ }
+ return (snprintf(command, total_len, MACF, ETHERP_TO_MACF(&p2pdev_addr)));
+}
+
+int
+wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
+{
+ int i, j, match;
+ int ret = 0;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+ /* set filtering mode */
+ if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) {
+ ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACMODE error=%d\n", ret));
+ return ret;
+ }
+ if (macmode != MACLIST_MODE_DISABLED) {
+ /* set the MAC filter list */
+ if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist,
+ sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) {
+ ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACLIST error=%d\n", ret));
+ return ret;
+ }
+ /* get the current list of associated STAs */
+ assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
+ if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist,
+ sizeof(mac_buf))) != 0) {
+ ANDROID_ERROR(("wl_android_set_ap_mac_list: WLC_GET_ASSOCLIST error=%d\n",
+ ret));
+ return ret;
+ }
+ /* do we have any STA associated? */
+ if (assoc_maclist->count) {
+ /* iterate each associated STA */
+ for (i = 0; i < assoc_maclist->count; i++) {
+ match = 0;
+ /* compare with each entry */
+ for (j = 0; j < maclist->count; j++) {
+ ANDROID_INFO(("wl_android_set_ap_mac_list: associated="MACDBG
+ "list = "MACDBG "\n",
+ MAC2STRDBG(assoc_maclist->ea[i].octet),
+ MAC2STRDBG(maclist->ea[j].octet)));
+ if (memcmp(assoc_maclist->ea[i].octet,
+ maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
+ match = 1;
+ break;
+ }
+ }
+ /* do conditional deauth */
+ /* "if not in the allow list" or "if in the deny list" */
+ if ((macmode == MACLIST_MODE_ALLOW && !match) ||
+ (macmode == MACLIST_MODE_DENY && match)) {
+ scb_val_t scbval;
+
+ scbval.val = htod32(1);
+ memcpy(&scbval.ea, &assoc_maclist->ea[i],
+ ETHER_ADDR_LEN);
+ if ((ret = wldev_ioctl_set(dev,
+ WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scb_val_t))) != 0)
+ ANDROID_ERROR(("wl_android_set_ap_mac_list:"
+ " WLC_SCB_DEAUTHENTICATE"
+ " error=%d\n",
+ ret));
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2
+ *
+ */
+static int
+wl_android_set_mac_address_filter(struct net_device *dev, char* str)
+{
+ int i;
+ int ret = 0;
+ int macnum = 0;
+ int macmode = MACLIST_MODE_DISABLED;
+ struct maclist *list;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ const char *token;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ /* string should look like below (macmode/macnum/maclist) */
+ /* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */
+
+ /* get the MAC filter mode */
+ token = strsep((char**)&str, " ");
+ if (!token) {
+ return -1;
+ }
+ macmode = bcm_atoi(token);
+
+ if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
+ ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid macmode %d\n", macmode));
+ return -1;
+ }
+
+ token = strsep((char**)&str, " ");
+ if (!token) {
+ return -1;
+ }
+ macnum = bcm_atoi(token);
+ if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+ ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid number of MAC"
+ " address entries %d\n",
+ macnum));
+ return -1;
+ }
+ /* allocate memory for the MAC list */
+ list = (struct maclist*) MALLOCZ(cfg->osh, sizeof(int) +
+ sizeof(struct ether_addr) * macnum);
+ if (!list) {
+ ANDROID_ERROR(("wl_android_set_mac_address_filter : failed to allocate memory\n"));
+ return -1;
+ }
+ /* prepare the MAC list */
+ list->count = htod32(macnum);
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+ for (i = 0; i < list->count; i++) {
+ token = strsep((char**)&str, " ");
+ if (token == NULL) {
+ ANDROID_ERROR(("wl_android_set_mac_address_filter : No mac address present\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ strlcpy(eabuf, token, sizeof(eabuf));
+ if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
+ ANDROID_ERROR(("wl_android_set_mac_address_filter : mac parsing err index=%d,"
+ " addr=%s\n",
+ i, eabuf));
+ list->count = i;
+ break;
+ }
+ ANDROID_INFO(("wl_android_set_mac_address_filter : %d/%d MACADDR=%s",
+ i, list->count, eabuf));
+ }
+ if (i == 0)
+ goto exit;
+
+ /* set the list */
+ if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
+ ANDROID_ERROR(("wl_android_set_mac_address_filter: Setting MAC list failed error=%d\n",
+ ret));
+
+exit:
+ MFREE(cfg->osh, list, sizeof(int) + sizeof(struct ether_addr) * macnum);
+
+ return ret;
+}
+
+static int wl_android_get_factory_mac_addr(struct net_device *ndev, char *command, int total_len)
+{
+ int ret;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ if (total_len < ETHER_ADDR_STR_LEN) {
+ ANDROID_ERROR(("wl_android_get_factory_mac_addr buflen %d"
+ "is less than factory mac addr\n", total_len));
+ return BCME_ERROR;
+ }
+ ret = snprintf(command, total_len, MACDBG,
+ MAC2STRDBG(bcmcfg_to_prmry_ndev(cfg)->perm_addr));
+ return ret;
+}
+
+#if defined(WLAN_ACCEL_BOOT)
+int wl_android_wifi_accel_on(struct net_device *dev, bool force_reg_on)
+{
+ int ret = 0;
+
+ ANDROID_ERROR(("%s: force_reg_on = %d\n", __FUNCTION__, force_reg_on));
+ if (!dev) {
+ ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (force_reg_on) {
+ /* First resume the bus if it is in suspended state */
+ ret = dhd_net_bus_resume(dev, 0);
+ if (ret) {
+ ANDROID_ERROR(("%s: dhd_net_bus_resume failed\n", __FUNCTION__));
+ }
+ /* Toggle wl_reg_on */
+ ret = wl_android_wifi_off(dev, TRUE);
+ if (ret) {
+ ANDROID_ERROR(("%s: wl_android_wifi_off failed\n", __FUNCTION__));
+ }
+ ret = wl_android_wifi_on(dev);
+ if (ret) {
+ ANDROID_ERROR(("%s: wl_android_wifi_on failed\n", __FUNCTION__));
+ }
+ } else {
+ ret = dhd_net_bus_resume(dev, 0);
+ }
+
+ return ret;
+}
+
+int wl_android_wifi_accel_off(struct net_device *dev, bool force_reg_on)
+{
+ int ret = 0;
+
+ ANDROID_ERROR(("%s: force_reg_on = %d\n", __FUNCTION__, force_reg_on));
+ if (!dev) {
+ ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (force_reg_on) {
+ ANDROID_ERROR(("%s: do nothing as wl_reg_on will be toggled in UP\n",
+ __FUNCTION__));
+ } else {
+ ret = dhd_net_bus_suspend(dev);
+ }
+
+ return ret;
+}
+#endif /* WLAN_ACCEL_BOOT */
+
+#ifdef WBRC
+extern int wbrc_wl2bt_reset(void);
+#endif /* WBRC */
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+ int ret = 0;
+ int retry = POWERUP_MAX_RETRY;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ BCM_REFERENCE(dhdp);
+ if (!dev) {
+ ANDROID_ERROR(("wl_android_wifi_on: dev is null\n"));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ WL_MSG(dev->name, "in g_wifi_on=%d\n", g_wifi_on);
+ if (!g_wifi_on) {
+ do {
+ dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
+#ifdef BCMSDIO
+ ret = dhd_net_bus_resume(dev, 0);
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+ ret = dhd_net_bus_devreset(dev, FALSE);
+#endif /* BCMPCIE */
+#ifdef WBRC
+ if (dhdp->dhd_induce_bh_error == DHD_INDUCE_BH_ON_FAIL_ONCE) {
+ ANDROID_ERROR(("%s: dhd_induce_bh_error = %d\n",
+ __FUNCTION__, dhdp->dhd_induce_bh_error));
+ /* Forcefully set error */
+ ret = BCME_ERROR;
+ /* Clear the induced bh error */
+ dhdp->dhd_induce_bh_error = DHD_INDUCE_ERROR_CLEAR;
+ }
+ if (dhdp->dhd_induce_bh_error == DHD_INDUCE_BH_ON_FAIL_ALWAYS) {
+ ANDROID_ERROR(("%s: dhd_induce_bh_error = %d\n",
+ __FUNCTION__, dhdp->dhd_induce_bh_error));
+ /* Forcefully set error */
+ ret = BCME_ERROR;
+ }
+#endif /* WBRC */
+ if (ret == 0) {
+ break;
+ }
+ ANDROID_ERROR(("failed to power up wifi chip, retry again (%d left) **\n\n",
+ retry));
+#ifdef BCMPCIE
+ dhd_net_bus_devreset(dev, TRUE);
+#endif /* BCMPCIE */
+ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+#ifdef WBRC
+ /* Inform BT reset which will internally wait till BT reset is done */
+ if (wbrc_wl2bt_reset()) {
+ ANDROID_ERROR(("Failed to reset BT, nothing to be done!!!!\n"));
+ }
+#endif /* WBRC */
+ } while (retry-- > 0);
+ if (ret != 0) {
+ ANDROID_ERROR(("failed to power up wifi chip, max retry reached **\n\n"));
+#ifdef BCM_DETECT_TURN_ON_FAILURE
+ BUG_ON(1);
+#endif /* BCM_DETECT_TURN_ON_FAILURE */
+ goto exit;
+ }
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ ret = dhd_net_bus_devreset(dev, FALSE);
+ if (ret)
+ goto err;
+#ifdef BCMSDIO
+ dhd_net_bus_resume(dev, 1);
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMDBUS */
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ if (!ret) {
+ if (dhd_dev_init_ioctl(dev) < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+#endif /* BCMSDIO || BCMDBUS */
+ g_wifi_on = TRUE;
+ }
+
+exit:
+ WL_MSG(dev->name, "Success\n");
+ dhd_net_if_unlock(dev);
+ return ret;
+
+#if defined(BCMSDIO) || defined(BCMDBUS)
+err:
+ dhd_net_bus_devreset(dev, TRUE);
+#ifdef BCMSDIO
+ dhd_net_bus_suspend(dev);
+#endif /* BCMSDIO */
+ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+ WL_MSG(dev->name, "Failed\n");
+ dhd_net_if_unlock(dev);
+ return ret;
+#endif /* BCMSDIO || BCMDBUS */
+}
+
+int wl_android_wifi_off(struct net_device *dev, bool on_failure)
+{
+ int ret = 0;
+
+ if (!dev) {
+ ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+#if defined(BCMPCIE) && defined(DHD_DEBUG_UART)
+ ret = dhd_debug_uart_is_running(dev);
+ if (ret) {
+ ANDROID_ERROR(("wl_android_wifi_off: - Debug UART App is running\n"));
+ return -EBUSY;
+ }
+#endif /* BCMPCIE && DHD_DEBUG_UART */
+ dhd_net_if_lock(dev);
+ WL_MSG(dev->name, "in g_wifi_on=%d, on_failure=%d\n", g_wifi_on, on_failure);
+ if (g_wifi_on || on_failure) {
+#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
+ ret = dhd_net_bus_devreset(dev, TRUE);
+#ifdef BCMSDIO
+ dhd_net_bus_suspend(dev);
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+ g_wifi_on = FALSE;
+ }
+ WL_MSG(dev->name, "out\n");
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+ if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+ return -1;
+ return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
+}
+
+#ifdef CONNECTION_STATISTICS
+static int
+wl_chanim_stats(struct net_device *dev, u8 *chan_idle)
+{
+ int err;
+ wl_chanim_stats_t *list;
+ /* Parameter _and_ returned buffer of chanim_stats. */
+ wl_chanim_stats_t param;
+ u8 result[WLC_IOCTL_SMLEN];
+ chanim_stats_t *stats;
+
+ bzero(&param, sizeof(param));
+
+ param.buflen = htod32(sizeof(wl_chanim_stats_t));
+ param.count = htod32(WL_CHANIM_COUNT_ONE);
+
+ if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)&param, sizeof(wl_chanim_stats_t),
+ (char*)result, sizeof(result), 0)) < 0) {
+ ANDROID_ERROR(("Failed to get chanim results %d \n", err));
+ return err;
+ }
+
+ list = (wl_chanim_stats_t*)result;
+
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+ if (list->buflen == 0) {
+ list->version = 0;
+ list->count = 0;
+ } else if (list->version != WL_CHANIM_STATS_VERSION) {
+ ANDROID_ERROR(("Sorry, firmware has wl_chanim_stats version %d "
+ "but driver supports only version %d.\n",
+ list->version, WL_CHANIM_STATS_VERSION));
+ list->buflen = 0;
+ list->count = 0;
+ }
+
+ stats = list->stats;
+ stats->glitchcnt = dtoh32(stats->glitchcnt);
+ stats->badplcp = dtoh32(stats->badplcp);
+ stats->chanspec = dtoh16(stats->chanspec);
+ stats->timestamp = dtoh32(stats->timestamp);
+ stats->chan_idle = dtoh32(stats->chan_idle);
+
+ ANDROID_INFO(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n",
+ stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle,
+ stats->timestamp));
+
+ *chan_idle = stats->chan_idle;
+
+ return (err);
+}
+
+static int
+wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len)
+{
+ static char iovar_buf[WLC_IOCTL_MAXLEN];
+ const wl_cnt_wlc_t* wlc_cnt = NULL;
+#ifndef DISABLE_IF_COUNTERS
+ wl_if_stats_t* if_stats = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+#endif /* BCMDONGLEHOST */
+#endif /* DISABLE_IF_COUNTERS */
+
+ int link_speed = 0;
+ struct connection_stats *output;
+ unsigned int bufsize = 0;
+ int bytes_written = -1;
+ int ret = 0;
+
+ ANDROID_INFO(("wl_android_get_connection_stats: enter Get Connection Stats\n"));
+
+ if (total_len <= 0) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: invalid buffer size %d\n", total_len));
+ goto error;
+ }
+
+ bufsize = total_len;
+ if (bufsize < sizeof(struct connection_stats)) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: not enough buffer size, provided=%u,"
+ " requires=%zu\n",
+ bufsize,
+ sizeof(struct connection_stats)));
+ goto error;
+ }
+
+ output = (struct connection_stats *)command;
+
+#ifndef DISABLE_IF_COUNTERS
+ if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(*if_stats));
+ if (if_stats == NULL) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: MALLOCZ failed\n"));
+ goto error;
+ }
+ bzero(if_stats, sizeof(*if_stats));
+
+#ifdef BCMDONGLEHOST
+ if (FW_SUPPORTED(dhdp, ifst)) {
+ ret = wl_cfg80211_ifstats_counters(dev, if_stats);
+ } else
+#endif /* BCMDONGLEHOST */
+ {
+ ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
+ }
+
+ ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
+ if (ret) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: if_counters not supported ret=%d\n",
+ ret));
+
+ /* In case if_stats IOVAR is not supported, get information from counters. */
+#endif /* DISABLE_IF_COUNTERS */
+ ret = wldev_iovar_getbuf(dev, "counters", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(ret)) {
+ ANDROID_ERROR(("counters error (%d) - size = %zu\n", ret, sizeof(wl_cnt_wlc_t)));
+ goto error;
+ }
+ ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("wl_android_get_connection_stats:"
+ " wl_cntbuf_to_xtlv_format ERR %d\n",
+ ret));
+ goto error;
+ }
+
+ if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: wlc_cnt NULL!\n"));
+ goto error;
+ }
+
+ output->txframe = dtoh32(wlc_cnt->txframe);
+ output->txbyte = dtoh32(wlc_cnt->txbyte);
+ output->txerror = dtoh32(wlc_cnt->txerror);
+ output->rxframe = dtoh32(wlc_cnt->rxframe);
+ output->rxbyte = dtoh32(wlc_cnt->rxbyte);
+ output->txfail = dtoh32(wlc_cnt->txfail);
+ output->txretry = dtoh32(wlc_cnt->txretry);
+ output->txretrie = dtoh32(wlc_cnt->txretrie);
+ output->txrts = dtoh32(wlc_cnt->txrts);
+ output->txnocts = dtoh32(wlc_cnt->txnocts);
+ output->txexptime = dtoh32(wlc_cnt->txexptime);
+#ifndef DISABLE_IF_COUNTERS
+ } else {
+ /* Populate from if_stats. */
+ if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: incorrect version of"
+ " wl_if_stats_t,"
+ " expected=%u got=%u\n",
+ WL_IF_STATS_T_VERSION, if_stats->version));
+ goto error;
+ }
+
+ output->txframe = (uint32)dtoh64(if_stats->txframe);
+ output->txbyte = (uint32)dtoh64(if_stats->txbyte);
+ output->txerror = (uint32)dtoh64(if_stats->txerror);
+ output->rxframe = (uint32)dtoh64(if_stats->rxframe);
+ output->rxbyte = (uint32)dtoh64(if_stats->rxbyte);
+ output->txfail = (uint32)dtoh64(if_stats->txfail);
+ output->txretry = (uint32)dtoh64(if_stats->txretry);
+ output->txretrie = (uint32)dtoh64(if_stats->txretrie);
+ if (dtoh16(if_stats->length) > OFFSETOF(wl_if_stats_t, txexptime)) {
+ output->txexptime = (uint32)dtoh64(if_stats->txexptime);
+ output->txrts = (uint32)dtoh64(if_stats->txrts);
+ output->txnocts = (uint32)dtoh64(if_stats->txnocts);
+ } else {
+ output->txexptime = 0;
+ output->txrts = 0;
+ output->txnocts = 0;
+ }
+ }
+#endif /* DISABLE_IF_COUNTERS */
+
+ /* link_speed is in kbps */
+ ret = wldev_get_link_speed(dev, &link_speed);
+ if (ret || link_speed < 0) {
+ ANDROID_ERROR(("wl_android_get_connection_stats: wldev_get_link_speed()"
+ " failed, ret=%d, speed=%d\n",
+ ret, link_speed));
+ goto error;
+ }
+
+ output->txrate = link_speed;
+
+ /* Channel idle ratio. */
+ if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) {
+ output->chan_idle = 0;
+ };
+
+ bytes_written = sizeof(struct connection_stats);
+
+error:
+#ifndef DISABLE_IF_COUNTERS
+ if (if_stats) {
+ MFREE(cfg->osh, if_stats, sizeof(*if_stats));
+ }
+#endif /* DISABLE_IF_COUNTERS */
+
+ return bytes_written;
+}
+#endif /* CONNECTION_STATISTICS */
+
+#ifdef WL_NATOE
+static int
+wl_android_process_natoe_cmd(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_ERROR;
+ char *pcmd = command;
+ char *str = NULL;
+ wl_natoe_cmd_info_t cmd_info;
+ const wl_natoe_sub_cmd_t *natoe_cmd = &natoe_cmd_list[0];
+
+ /* skip to cmd name after "natoe" */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* If natoe subcmd name is not provided, return error */
+ if (*pcmd == '\0') {
+ ANDROID_ERROR(("natoe subcmd not provided wl_android_process_natoe_cmd\n"));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* get the natoe command name to str */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ while (natoe_cmd->name != NULL) {
+ if (strcmp(natoe_cmd->name, str) == 0) {
+ /* dispacth cmd to appropriate handler */
+ if (natoe_cmd->handler) {
+ cmd_info.command = command;
+ cmd_info.tot_len = total_len;
+ ret = natoe_cmd->handler(dev, natoe_cmd, pcmd, &cmd_info);
+ }
+ return ret;
+ }
+ natoe_cmd++;
+ }
+ return ret;
+}
+
+static int
+wlu_natoe_set_vars_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len)
+{
+ int res = BCME_OK;
+ wl_natoe_cmd_info_t *cmd_info = (wl_natoe_cmd_info_t *)ctx;
+ uint8 *command = cmd_info->command;
+ uint16 total_len = cmd_info->tot_len;
+ uint16 bytes_written = 0;
+
+ UNUSED_PARAMETER(len);
+
+ switch (type) {
+
+ case WL_NATOE_XTLV_ENABLE:
+ {
+ bytes_written = snprintf(command, total_len, "natoe: %s\n",
+ *data?"enabled":"disabled");
+ cmd_info->bytes_written = bytes_written;
+ break;
+ }
+
+ case WL_NATOE_XTLV_CONFIG_IPS:
+ {
+ wl_natoe_config_ips_t *config_ips;
+ uint8 buf[16];
+
+ config_ips = (wl_natoe_config_ips_t *)data;
+ bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_ip, buf);
+ bytes_written = snprintf(command, total_len, "sta ip: %s\n", buf);
+ bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_netmask, buf);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "sta netmask: %s\n", buf);
+ bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_router_ip, buf);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "sta router ip: %s\n", buf);
+ bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_dnsip, buf);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "sta dns ip: %s\n", buf);
+ bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_ip, buf);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "ap ip: %s\n", buf);
+ bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_netmask, buf);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "ap netmask: %s\n", buf);
+ cmd_info->bytes_written = bytes_written;
+ break;
+ }
+
+ case WL_NATOE_XTLV_CONFIG_PORTS:
+ {
+ wl_natoe_ports_config_t *ports_config;
+
+ ports_config = (wl_natoe_ports_config_t *)data;
+ bytes_written = snprintf(command, total_len, "starting port num: %d\n",
+ dtoh16(ports_config->start_port_num));
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "number of ports: %d\n", dtoh16(ports_config->no_of_ports));
+ cmd_info->bytes_written = bytes_written;
+ break;
+ }
+
+ case WL_NATOE_XTLV_DBG_STATS:
+ {
+ char *stats_dump = (char *)data;
+
+ bytes_written = snprintf(command, total_len, "%s\n", stats_dump);
+ cmd_info->bytes_written = bytes_written;
+ break;
+ }
+
+ case WL_NATOE_XTLV_TBL_CNT:
+ {
+ bytes_written = snprintf(command, total_len, "natoe max tbl entries: %d\n",
+ dtoh32(*(uint32 *)data));
+ cmd_info->bytes_written = bytes_written;
+ break;
+ }
+
+ default:
+ /* ignore */
+ break;
+ }
+
+ return res;
+}
+
+/*
+ * --- common for all natoe get commands ----
+ */
+static int
+wl_natoe_get_ioctl(struct net_device *dev, wl_natoe_ioc_t *natoe_ioc,
+ uint16 iocsz, uint8 *buf, uint16 buflen, wl_natoe_cmd_info_t *cmd_info)
+{
+ /* for gets we only need to pass ioc header */
+ wl_natoe_ioc_t *iocresp = (wl_natoe_ioc_t *)buf;
+ int res;
+
+ /* send getbuf natoe iovar */
+ res = wldev_iovar_getbuf(dev, "natoe", natoe_ioc, iocsz, buf,
+ buflen, NULL);
+
+ /* check the response buff */
+ if ((res == BCME_OK)) {
+ /* scans ioctl tlvbuf f& invokes the cbfn for processing */
+ res = bcm_unpack_xtlv_buf(cmd_info, iocresp->data, iocresp->len,
+ BCM_XTLV_OPTION_ALIGN32, wlu_natoe_set_vars_cbfn);
+
+ if (res == BCME_OK) {
+ res = cmd_info->bytes_written;
+ }
+ }
+ else
+ {
+ ANDROID_ERROR(("wl_natoe_get_ioctl: get command failed code %d\n", res));
+ res = BCME_ERROR;
+ }
+
+ return res;
+}
+
+static int
+wl_android_natoe_subcmd_enable(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd,
+ char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ wl_natoe_ioc_t *natoe_ioc;
+ char *pcmd = command;
+ uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+ uint16 buflen = WL_NATOE_IOC_BUFSZ;
+ bcm_xtlv_t *pxtlv = NULL;
+ char *ioctl_buf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
+ }
+
+ /* alloc mem for ioctl headr + tlv data */
+ natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ if (!natoe_ioc) {
+ ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ return -ENOMEM;
+ }
+
+ /* make up natoe cmd ioctl header */
+ natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+ natoe_ioc->id = htod16(cmd->id);
+ natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+ pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+ if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+ iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+ ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+ WLC_IOCTL_MEDLEN, cmd_info);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_enable\n"));
+ ret = -EINVAL;
+ }
+ } else { /* set */
+ uint8 val = bcm_atoi(pcmd);
+
+ /* buflen is max tlv data we can write, it will be decremented as we pack */
+ /* save buflen at start */
+ uint16 buflen_at_start = buflen;
+
+ /* we'll adjust final ioc size at the end */
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE,
+ sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* adjust iocsz to the end of last data record */
+ natoe_ioc->len = (buflen_at_start - buflen);
+ iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+ ret = wldev_iovar_setbuf(dev, "natoe",
+ natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+
+exit:
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ MFREE(cfg->osh, natoe_ioc, iocsz);
+
+ return ret;
+}
+
+static int
+wl_android_natoe_subcmd_config_ips(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ wl_natoe_config_ips_t config_ips;
+ wl_natoe_ioc_t *natoe_ioc;
+ char *pcmd = command;
+ char *str;
+ uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+ uint16 buflen = WL_NATOE_IOC_BUFSZ;
+ bcm_xtlv_t *pxtlv = NULL;
+ char *ioctl_buf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
+ }
+
+ /* alloc mem for ioctl headr + tlv data */
+ natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ if (!natoe_ioc) {
+ ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ return -ENOMEM;
+ }
+
+ /* make up natoe cmd ioctl header */
+ natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+ natoe_ioc->id = htod16(cmd->id);
+ natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+ pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+ if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+ iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+ ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+ WLC_IOCTL_MEDLEN, cmd_info);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ips\n"));
+ ret = -EINVAL;
+ }
+ } else { /* set */
+ /* buflen is max tlv data we can write, it will be decremented as we pack */
+ /* save buflen at start */
+ uint16 buflen_at_start = buflen;
+
+ bzero(&config_ips, sizeof(config_ips));
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) {
+ ANDROID_ERROR(("Invalid STA IP addr %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_netmask)) {
+ ANDROID_ERROR(("Invalid STA netmask %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_router_ip)) {
+ ANDROID_ERROR(("Invalid STA router IP addr %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_dnsip)) {
+ ANDROID_ERROR(("Invalid STA DNS IP addr %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_ip)) {
+ ANDROID_ERROR(("Invalid AP IP addr %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_netmask)) {
+ ANDROID_ERROR(("Invalid AP netmask %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv,
+ &buflen, WL_NATOE_XTLV_CONFIG_IPS, sizeof(config_ips),
+ &config_ips, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* adjust iocsz to the end of last data record */
+ natoe_ioc->len = (buflen_at_start - buflen);
+ iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+ ret = wldev_iovar_setbuf(dev, "natoe",
+ natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+
+exit:
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ);
+
+ return ret;
+}
+
+static int
+wl_android_natoe_subcmd_config_ports(struct net_device *dev,
+ const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ wl_natoe_ports_config_t ports_config;
+ wl_natoe_ioc_t *natoe_ioc;
+ char *pcmd = command;
+ char *str;
+ uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+ uint16 buflen = WL_NATOE_IOC_BUFSZ;
+ bcm_xtlv_t *pxtlv = NULL;
+ char *ioctl_buf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
+ }
+
+ /* alloc mem for ioctl headr + tlv data */
+ natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ if (!natoe_ioc) {
+ ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ return -ENOMEM;
+ }
+
+ /* make up natoe cmd ioctl header */
+ natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+ natoe_ioc->id = htod16(cmd->id);
+ natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+ pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+ if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+ iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+ ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+ WLC_IOCTL_MEDLEN, cmd_info);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ports\n"));
+ ret = -EINVAL;
+ }
+ } else { /* set */
+ /* buflen is max tlv data we can write, it will be decremented as we pack */
+ /* save buflen at start */
+ uint16 buflen_at_start = buflen;
+
+ bzero(&ports_config, sizeof(ports_config));
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid port string %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+ ports_config.start_port_num = htod16(bcm_atoi(str));
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid port string %s\n", str));
+ ret = -EINVAL;
+ goto exit;
+ }
+ ports_config.no_of_ports = htod16(bcm_atoi(str));
+
+ if ((uint32)(ports_config.start_port_num + ports_config.no_of_ports) >
+ NATOE_MAX_PORT_NUM) {
+ ANDROID_ERROR(("Invalid port configuration\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv,
+ &buflen, WL_NATOE_XTLV_CONFIG_PORTS, sizeof(ports_config),
+ &ports_config, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* adjust iocsz to the end of last data record */
+ natoe_ioc->len = (buflen_at_start - buflen);
+ iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+ ret = wldev_iovar_setbuf(dev, "natoe",
+ natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+
+exit:
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ);
+
+ return ret;
+}
+
+static int
+wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd,
+ char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ wl_natoe_ioc_t *natoe_ioc;
+ char *pcmd = command;
+ uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ;
+ uint16 buflen = WL_NATOE_DBG_STATS_BUFSZ;
+ bcm_xtlv_t *pxtlv = NULL;
+ char *ioctl_buf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
+ }
+
+ /* alloc mem for ioctl headr + tlv data */
+ natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ if (!natoe_ioc) {
+ ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MAXLEN);
+ return -ENOMEM;
+ }
+
+ /* make up natoe cmd ioctl header */
+ natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+ natoe_ioc->id = htod16(cmd->id);
+ natoe_ioc->len = htod16(WL_NATOE_DBG_STATS_BUFSZ);
+ pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+ if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+ iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+ ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+ WLC_IOCTL_MAXLEN, cmd_info);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_dbg_stats\n"));
+ ret = -EINVAL;
+ }
+ } else { /* set */
+ uint8 val = bcm_atoi(pcmd);
+
+ /* buflen is max tlv data we can write, it will be decremented as we pack */
+ /* save buflen at start */
+ uint16 buflen_at_start = buflen;
+
+ /* we'll adjust final ioc size at the end */
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE,
+ sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* adjust iocsz to the end of last data record */
+ natoe_ioc->len = (buflen_at_start - buflen);
+ iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+ ret = wldev_iovar_setbuf(dev, "natoe",
+ natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+
+exit:
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MAXLEN);
+ MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ);
+
+ return ret;
+}
+
+static int
+wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd,
+ char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ wl_natoe_ioc_t *natoe_ioc;
+ char *pcmd = command;
+ uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+ uint16 buflen = WL_NATOE_IOC_BUFSZ;
+ bcm_xtlv_t *pxtlv = NULL;
+ char *ioctl_buf = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
+ }
+
+ /* alloc mem for ioctl headr + tlv data */
+ natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ if (!natoe_ioc) {
+ ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ return -ENOMEM;
+ }
+
+ /* make up natoe cmd ioctl header */
+ natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+ natoe_ioc->id = htod16(cmd->id);
+ natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+ pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+ if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+ iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+ ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+ WLC_IOCTL_MEDLEN, cmd_info);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_tbl_cnt\n"));
+ ret = -EINVAL;
+ }
+ } else { /* set */
+ uint32 val = bcm_atoi(pcmd);
+
+ /* buflen is max tlv data we can write, it will be decremented as we pack */
+ /* save buflen at start */
+ uint16 buflen_at_start = buflen;
+
+ /* we'll adjust final ioc size at the end */
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_TBL_CNT,
+ sizeof(uint32), &val, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* adjust iocsz to the end of last data record */
+ natoe_ioc->len = (buflen_at_start - buflen);
+ iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+ ret = wldev_iovar_setbuf(dev, "natoe",
+ natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+
+exit:
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ);
+
+ return ret;
+}
+
+#endif /* WL_NATOE */
+
+#ifdef WL_MBO
+static int
+wl_android_process_mbo_cmd(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_ERROR;
+ char *pcmd = command;
+ char *str = NULL;
+ wl_drv_cmd_info_t cmd_info;
+ const wl_drv_sub_cmd_t *mbo_cmd = &mbo_cmd_list[0];
+
+ /* skip to cmd name after "mbo" */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* If mbo subcmd name is not provided, return error */
+ if (*pcmd == '\0') {
+ ANDROID_ERROR(("mbo subcmd not provided %s\n", __FUNCTION__));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* get the mbo command name to str */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ while (mbo_cmd->name != NULL) {
+ if (strnicmp(mbo_cmd->name, str, strlen(mbo_cmd->name)) == 0) {
+ /* dispatch cmd to appropriate handler */
+ if (mbo_cmd->handler) {
+ cmd_info.command = command;
+ cmd_info.tot_len = total_len;
+ ret = mbo_cmd->handler(dev, mbo_cmd, pcmd, &cmd_info);
+ }
+ return ret;
+ }
+ mbo_cmd++;
+ }
+ return ret;
+}
+
+static int
+wl_android_send_wnm_notif(struct net_device *dev, bcm_iov_buf_t *iov_buf,
+ uint16 iov_buf_len, uint8 *iov_resp, uint16 iov_resp_len, uint8 sub_elem_type)
+{
+ int ret = BCME_OK;
+ uint8 *pxtlv = NULL;
+ uint16 iovlen = 0;
+ uint16 buflen = 0, buflen_start = 0;
+
+ memset_s(iov_buf, iov_buf_len, 0, iov_buf_len);
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_SEND_NOTIF;
+ buflen = buflen_start = iov_buf_len - sizeof(bcm_iov_buf_t);
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_SUB_ELEM_TYPE,
+ sizeof(sub_elem_type), &sub_elem_type, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+ iov_buf->len = buflen_start - buflen;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to sent wnm notif %d\n", ret));
+ }
+ return ret;
+}
+
+static int
+wl_android_mbo_resp_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx;
+ uint8 *command = cmd_info->command;
+ uint16 total_len = cmd_info->tot_len;
+ uint16 bytes_written = 0;
+
+ UNUSED_PARAMETER(len);
+ /* TODO: validate data value */
+ if (data == NULL) {
+ ANDROID_ERROR(("%s: Bad argument !!\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ switch (type) {
+ case WL_MBO_XTLV_CELL_DATA_CAP:
+ {
+ bytes_written = snprintf(command, total_len, "cell_data_cap: %u\n", *data);
+ cmd_info->bytes_written = bytes_written;
+ }
+ break;
+ default:
+ ANDROID_ERROR(("%s: Unknown tlv %u\n", __FUNCTION__, type));
+ }
+ return BCME_OK;
+}
+
+static int
+wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev, const wl_drv_sub_cmd_t *cmd,
+ char *command, wl_drv_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ uint8 *pxtlv = NULL;
+ uint16 buflen = 0, buflen_start = 0;
+ uint16 iovlen = 0;
+ char *pcmd = command;
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_iov_buf_t *p_resp = NULL;
+ uint8 *iov_resp = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ uint16 version;
+
+ /* first get the configured value */
+ iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (iov_buf == NULL) {
+ ret = -ENOMEM;
+ ANDROID_ERROR(("iov buf memory alloc exited\n"));
+ goto exit;
+ }
+ iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (iov_resp == NULL) {
+ ret = -ENOMEM;
+ ANDROID_ERROR(("iov resp memory alloc exited\n"));
+ goto exit;
+ }
+
+ /* fill header */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_CELLULAR_DATA_CAP;
+
+ ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp,
+ WLC_IOCTL_MAXLEN,
+ NULL);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ p_resp = (bcm_iov_buf_t *)iov_resp;
+
+ /* get */
+ if (*pcmd == WL_IOCTL_ACTION_GET) {
+ /* Check for version */
+ version = dtoh16(*(uint16 *)iov_resp);
+ if (version != WL_MBO_IOV_VERSION) {
+ ret = -EINVAL;
+ }
+ if (p_resp->id == WL_MBO_CMD_CELLULAR_DATA_CAP) {
+ ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data,
+ p_resp->len, BCM_XTLV_OPTION_ALIGN32,
+ wl_android_mbo_resp_parse_cbfn);
+ if (ret == BCME_OK) {
+ ret = cmd_info->bytes_written;
+ }
+ } else {
+ ret = -EINVAL;
+ ANDROID_ERROR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id));
+ goto exit;
+ }
+ } else {
+ uint8 cell_cap = bcm_atoi(pcmd);
+ const uint8* old_cell_cap = NULL;
+ uint16 len = 0;
+
+ old_cell_cap = bcm_get_data_from_xtlv_buf((uint8 *)p_resp->data, p_resp->len,
+ WL_MBO_XTLV_CELL_DATA_CAP, &len, BCM_XTLV_OPTION_ALIGN32);
+ if (old_cell_cap && *old_cell_cap == cell_cap) {
+ ANDROID_ERROR(("No change is cellular data capability\n"));
+ /* No change in value */
+ goto exit;
+ }
+
+ buflen = buflen_start = WLC_IOCTL_MEDLEN - sizeof(bcm_iov_buf_t);
+
+ if (cell_cap < MBO_CELL_DATA_CONN_AVAILABLE ||
+ cell_cap > MBO_CELL_DATA_CONN_NOT_CAPABLE) {
+ ANDROID_ERROR(("wrong value %u\n", cell_cap));
+ ret = -EINVAL;
+ goto exit;
+ }
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CELL_DATA_CAP,
+ sizeof(cell_cap), &cell_cap, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ iov_buf->len = buflen_start - buflen;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* Skip for CUSTOMER_HW4 - WNM notification
+ * for cellular data capability is handled by host
+ */
+#if !defined(CUSTOMER_HW4)
+ /* send a WNM notification request to associated AP */
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ ANDROID_INFO(("Sending WNM Notif\n"));
+ ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN,
+ iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_CELL_DATA_CAP);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to send WNM notification %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+#endif /* CUSTOMER_HW4 */
+ }
+exit:
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN);
+ }
+ if (iov_resp) {
+ MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN);
+ }
+ return ret;
+}
+
+static int
+wl_android_mbo_non_pref_chan_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx;
+ uint8 *command = cmd_info->command + cmd_info->bytes_written;
+ uint16 total_len = cmd_info->tot_len;
+ uint16 bytes_written = 0;
+
+ ANDROID_INFO(("Total bytes written at begining %u\n", cmd_info->bytes_written));
+ UNUSED_PARAMETER(len);
+ if (data == NULL) {
+ ANDROID_ERROR(("%s: Bad argument !!\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ switch (type) {
+ case WL_MBO_XTLV_OPCLASS:
+ {
+ bytes_written = snprintf(command, total_len, "%u:", *data);
+ ANDROID_ERROR(("wr %u %u\n", bytes_written, *data));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ case WL_MBO_XTLV_CHAN:
+ {
+ bytes_written = snprintf(command, total_len, "%u:", *data);
+ ANDROID_ERROR(("wr %u\n", bytes_written));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ case WL_MBO_XTLV_PREFERENCE:
+ {
+ bytes_written = snprintf(command, total_len, "%u:", *data);
+ ANDROID_ERROR(("wr %u\n", bytes_written));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ case WL_MBO_XTLV_REASON_CODE:
+ {
+ bytes_written = snprintf(command, total_len, "%u ", *data);
+ ANDROID_ERROR(("wr %u\n", bytes_written));
+ command += bytes_written;
+ cmd_info->bytes_written += bytes_written;
+ }
+ break;
+ default:
+ ANDROID_ERROR(("%s: Unknown tlv %u\n", __FUNCTION__, type));
+ }
+ ANDROID_INFO(("Total bytes written %u\n", cmd_info->bytes_written));
+ return BCME_OK;
+}
+
+static int
+wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev,
+ const wl_drv_sub_cmd_t *cmd, char *command,
+ wl_drv_cmd_info_t *cmd_info)
+{
+ int ret = BCME_OK;
+ uint8 *pxtlv = NULL;
+ uint16 buflen = 0, buflen_start = 0;
+ uint16 iovlen = 0;
+ char *pcmd = command;
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_iov_buf_t *p_resp = NULL;
+ uint8 *iov_resp = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ uint16 version;
+
+ ANDROID_ERROR(("%s:%d\n", __FUNCTION__, __LINE__));
+ iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (iov_buf == NULL) {
+ ret = -ENOMEM;
+ ANDROID_ERROR(("iov buf memory alloc exited\n"));
+ goto exit;
+ }
+ iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (iov_resp == NULL) {
+ ret = -ENOMEM;
+ ANDROID_ERROR(("iov resp memory alloc exited\n"));
+ goto exit;
+ }
+ /* get */
+ if (*pcmd == WL_IOCTL_ACTION_GET) {
+ /* fill header */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_LIST_CHAN_PREF;
+
+ ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp,
+ WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ p_resp = (bcm_iov_buf_t *)iov_resp;
+ /* Check for version */
+ version = dtoh16(*(uint16 *)iov_resp);
+ if (version != WL_MBO_IOV_VERSION) {
+ ANDROID_ERROR(("Version mismatch. returned ver %u expected %u\n",
+ version, WL_MBO_IOV_VERSION));
+ ret = -EINVAL;
+ }
+ if (p_resp->id == WL_MBO_CMD_LIST_CHAN_PREF) {
+ ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data,
+ p_resp->len, BCM_XTLV_OPTION_ALIGN32,
+ wl_android_mbo_non_pref_chan_parse_cbfn);
+ if (ret == BCME_OK) {
+ ret = cmd_info->bytes_written;
+ }
+ } else {
+ ret = -EINVAL;
+ ANDROID_ERROR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id));
+ goto exit;
+ }
+ } else {
+ char *str = pcmd;
+ uint opcl = 0, ch = 0, pref = 0, rc = 0;
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!(strnicmp(str, "set", 3)) || (!strnicmp(str, "clear", 5))) {
+ /* delete all configurations */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_DEL_CHAN_PREF;
+ iov_buf->len = 0;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ANDROID_ERROR(("Unknown command %s\n", str));
+ goto exit;
+ }
+ /* parse non pref channel list */
+ if (strnicmp(str, "set", 3) == 0) {
+ uint8 cnt = 0;
+ str = bcmstrtok(&pcmd, " ", NULL);
+ while (str != NULL) {
+ ret = sscanf(str, "%u:%u:%u:%u", &opcl, &ch, &pref, &rc);
+ ANDROID_ERROR(("buflen %u op %u, ch %u, pref %u rc %u\n",
+ buflen, opcl, ch, pref, rc));
+ if (ret != 4) {
+ ANDROID_ERROR(("Not all parameter presents\n"));
+ ret = -EINVAL;
+ }
+ /* TODO: add a validation check here */
+ memset_s(iov_buf, WLC_IOCTL_MEDLEN, 0, WLC_IOCTL_MEDLEN);
+ buflen = buflen_start = WLC_IOCTL_MEDLEN;
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ /* opclass */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_OPCLASS,
+ sizeof(uint8), (uint8 *)&opcl, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ /* channel */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CHAN,
+ sizeof(uint8), (uint8 *)&ch, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ /* preference */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_PREFERENCE,
+ sizeof(uint8), (uint8 *)&pref, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ /* reason */
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_REASON_CODE,
+ sizeof(uint8), (uint8 *)&rc, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ ANDROID_ERROR(("len %u\n", (buflen_start - buflen)));
+ /* Now set the new non pref channels */
+ iov_buf->version = WL_MBO_IOV_VERSION;
+ iov_buf->id = WL_MBO_CMD_ADD_CHAN_PREF;
+ iov_buf->len = buflen_start - buflen;
+ iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
+ ret = wldev_iovar_setbuf(dev, "mbo",
+ iov_buf, iovlen, iov_resp, WLC_IOCTL_MEDLEN, NULL);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cnt++;
+ if (cnt >= MBO_MAX_CHAN_PREF_ENTRIES) {
+ break;
+ }
+ ANDROID_ERROR(("%d cnt %u\n", __LINE__, cnt));
+ str = bcmstrtok(&pcmd, " ", NULL);
+ }
+ }
+ /* send a WNM notification request to associated AP */
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ ANDROID_INFO(("Sending WNM Notif\n"));
+ ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN,
+ iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_NON_PREF_CHAN_REPORT);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("Fail to send WNM notification %d\n", ret));
+ ret = -EINVAL;
+ }
+ }
+ }
+exit:
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN);
+ }
+ if (iov_resp) {
+ MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN);
+ }
+ return ret;
+}
+#endif /* WL_MBO */
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_AMPDU_MPDU_CMD
+/* CMD_AMPDU_MPDU */
+static int
+wl_android_set_ampdu_mpdu(struct net_device *dev, const char* string_num)
+{
+ int err = 0;
+ int ampdu_mpdu;
+
+ ampdu_mpdu = bcm_atoi(string_num);
+
+ if (ampdu_mpdu > 32) {
+ ANDROID_ERROR(("wl_android_set_ampdu_mpdu : ampdu_mpdu MAX value is 32.\n"));
+ return -1;
+ }
+
+ ANDROID_ERROR(("wl_android_set_ampdu_mpdu : ampdu_mpdu = %d\n", ampdu_mpdu));
+ err = wldev_iovar_setint(dev, "ampdu_mpdu", ampdu_mpdu);
+ if (err < 0) {
+ ANDROID_ERROR(("wl_android_set_ampdu_mpdu : ampdu_mpdu set error. %d\n", err));
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* SUPPORT_AMPDU_MPDU_CMD */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+extern int wl_cfg80211_send_msg_to_ril(void);
+extern void wl_cfg80211_register_dev_ril_bridge_event_notifier(void);
+extern void wl_cfg80211_unregister_dev_ril_bridge_event_notifier(void);
+extern int g_mhs_chan_for_cpcoex;
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+#if defined (WL_SUPPORT_AUTO_CHANNEL)
+static s32
+wl_android_set_auto_channel_scan_state(struct net_device *ndev)
+{
+ u32 val = 0;
+ s32 ret = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ /* Set interface up, explicitly. */
+ val = 1;
+
+ ret = wldev_ioctl_set(ndev, WLC_UP, (void *)&val, sizeof(val));
+ if (ret < 0) {
+ ANDROID_ERROR(("set interface up failed, error = %d\n", ret));
+ goto done;
+ }
+
+ /* Stop all scan explicitly, till auto channel selection complete. */
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ if (cfg->escan_info.ndev == NULL) {
+ ret = BCME_OK;
+ goto done;
+ }
+
+ wl_cfgscan_cancel_scan(cfg);
+
+done:
+ return ret;
+}
+
+s32
+wl_android_get_freq_list_chanspecs(struct net_device *ndev, wl_uint32_list_t *list,
+ s32 buflen, const char* cmd_str, int sta_channel, chanspec_band_t sta_acs_band)
+{
+ u32 freq = 0;
+ chanspec_t chanspec = 0;
+ s32 ret = BCME_OK;
+ int i = 0;
+ char *pcmd, *token;
+ int len = buflen;
+
+ pcmd = bcmstrstr(cmd_str, FREQ_STR);
+ pcmd += strlen(FREQ_STR);
+
+ len -= sizeof(list->count);
+
+ while ((token = strsep(&pcmd, ",")) != NULL) {
+ if (*token == '\0')
+ continue;
+
+ if (len < sizeof(list->element[i]))
+ break;
+
+ freq = bcm_atoi(token);
+ /* Convert chanspec from frequency */
+ if ((freq > 0) &&
+ ((chanspec = wl_freq_to_chanspec(freq)) != INVCHANSPEC)) {
+ ANDROID_INFO(("Adding chanspec in list : 0x%x at the index %d\n", chanspec, i));
+ list->element[i] = chanspec;
+ len -= sizeof(list->element[i]);
+ i++;
+#ifdef WL_5G_SOFTAP_ONLY_ON_DEF_CHAN
+ /* Android includes 2g channels even for 5g band configuration. For
+ * customers using only single channel 5G AP, set the channel and
+ * return without doing ACS
+ */
+ if (CHSPEC_BAND(chanspec) == WL_CHANSPEC_BAND_5G) {
+ ANDROID_INFO(("Pick default channnel from 5g\n"));
+ if (!sta_channel) {
+ list->element[0] = chanspec;
+ list->count = 1;
+ return ret;
+ }
+ break;
+ }
+#endif /* WL_5G_SOFTAP_ONLY_ON_DEF_CHAN */
+ }
+ }
+
+ list->count = i;
+ /* valid chanspec present in the list */
+ if (list->count && sta_channel) {
+ /* STA associated case. Can't do ACS.
+ * Frequency list is order of lower to higher band.
+ * check with the highest band entry.
+ */
+ chanspec = list->element[i-1];
+ if (CHSPEC_BAND(chanspec) == sta_acs_band) {
+ /* softap request is for same band. Use SCC
+ * Convert sta channel to freq
+ */
+ freq = wl_channel_to_frequency(sta_channel, sta_acs_band);
+ list->element[0] =
+ wl_freq_to_chanspec(freq);
+ ANDROID_INFO(("Softap on same band as STA."
+ "Use SCC. chanspec:0x%x\n", chanspec));
+ } else {
+ list->element[0] = chanspec;
+ ANDROID_INFO(("RSDB case chanspec:0x%x\n", chanspec));
+ }
+ list->count = 1;
+ return ret;
+ }
+ return ret;
+}
+
+s32
+wl_android_get_band_chanspecs(struct net_device *ndev, void *buf, s32 buflen,
+ chanspec_band_t band, bool acs_req)
+{
+ u32 channel = 0;
+ s32 ret = BCME_ERROR;
+ s32 i = 0;
+ s32 j = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ wl_uint32_list_t *list = NULL;
+ chanspec_t chanspec = 0;
+
+ if (band != 0xff) {
+ chanspec |= (band | WL_CHANSPEC_BW_20 |
+ WL_CHANSPEC_CTL_SB_NONE);
+ chanspec = wl_chspec_host_to_driver(chanspec);
+ }
+
+ ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ ANDROID_ERROR(("get 'chanspecs' failed, error = %d\n", ret));
+ goto done;
+ }
+
+ list = (wl_uint32_list_t *)buf;
+ /* Skip DFS and inavlid P2P channel. */
+ for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+ if (!CHSPEC_IS20(list->element[i])) {
+ continue;
+ }
+ chanspec = (chanspec_t) dtoh32(list->element[i]);
+ channel = chanspec | WL_CHANSPEC_BW_20;
+ channel = wl_chspec_host_to_driver(channel);
+
+ ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+ if (ret < 0) {
+ ANDROID_ERROR(("get 'per_chan_info' failed, error = %d\n", ret));
+ goto done;
+ }
+
+ if (CHSPEC_IS5G(chanspec) && (CHANNEL_IS_RADAR(channel) ||
+#ifndef ALLOW_5G_ACS
+ ((acs_req == true) && (CHSPEC_CHANNEL(chanspec) != APCS_DEFAULT_5G_CH)) ||
+#endif /* !ALLOW_5G_ACS */
+ (0))) {
+ continue;
+ } else if (!(CHSPEC_IS2G(chanspec) || CHSPEC_IS5G(chanspec)) &&
+ !(CHSPEC_IS_6G_PSC(chanspec))) {
+ continue;
+ }
+ else {
+ list->element[j] = list->element[i];
+ ANDROID_INFO(("Adding chanspec in list : %x\n", list->element[j]));
+ }
+
+ j++;
+ }
+
+ list->count = j;
+
+done:
+ return ret;
+}
+
+static s32
+wl_android_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+ int *channel)
+{
+ s32 ret = BCME_ERROR;
+ int chosen = 0;
+ int retry = 0;
+
+ /* Start auto channel selection scan. */
+ ret = wldev_ioctl_set(ndev, WLC_START_CHANNEL_SEL, NULL, 0);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't start auto channel scan, error = %d\n", ret));
+ *channel = 0;
+ goto done;
+ }
+
+ /* Wait for auto channel selection, worst case possible delay is 5250ms. */
+ retry = CHAN_SEL_RETRY_COUNT;
+
+ while (retry--) {
+ OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
+ chosen = 0;
+ ret = wldev_ioctl_get(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+ if ((ret == 0) && (dtoh32(chosen) != 0)) {
+ *channel = (u16)(chosen & 0x00FF);
+ ANDROID_INFO(("selected channel = %d\n", *channel));
+ break;
+ }
+ ANDROID_INFO(("attempt = %d, ret = %d, chosen = %d\n",
+ (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+ }
+
+ if (retry <= 0) {
+ ANDROID_ERROR(("failure, auto channel selection timed out\n"));
+ *channel = 0;
+ ret = BCME_ERROR;
+ }
+
+done:
+ return ret;
+}
+
+static s32
+wl_android_restore_auto_channel_scan_state(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ /* Clear scan stop driver status. */
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+
+ return BCME_OK;
+}
+
+s32
+wl_android_get_best_channels(struct net_device *dev, char* cmd, int total_len)
+{
+ int channel = 0;
+ s32 ret = BCME_ERROR;
+ u8 *buf = NULL;
+ char *pos = cmd;
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
+
+ bzero(cmd, total_len);
+ cfg = wl_get_cfg(dev);
+
+ buf = (u8 *)MALLOC(cfg->osh, CHANSPEC_BUF_SIZE);
+ if (buf == NULL) {
+ ANDROID_ERROR(("failed to allocate chanspec buffer\n"));
+ return -ENOMEM;
+ }
+
+ /*
+ * Always use primary interface, irrespective of interface on which
+ * command came.
+ */
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /*
+ * Make sure that FW and driver are in right state to do auto channel
+ * selection scan.
+ */
+ ret = wl_android_set_auto_channel_scan_state(ndev);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't set auto channel scan state, error = %d\n", ret));
+ goto done;
+ }
+
+ /* Best channel selection in 2.4GHz band. */
+ ret = wl_android_get_band_chanspecs(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ WL_CHANSPEC_BAND_2G, false);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+ goto done;
+ }
+
+ ret = wl_android_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ &channel);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+ goto done;
+ }
+
+ if (CHANNEL_IS_2G(channel)) {
+ channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ } else {
+ ANDROID_ERROR(("invalid 2.4GHz channel, channel = %d\n", channel));
+ channel = 0;
+ }
+
+ pos += snprintf(pos, total_len, "%04d ", channel);
+
+ /* Best channel selection in 5GHz band. */
+ ret = wl_android_get_band_chanspecs(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ WL_CHANSPEC_BAND_5G, false);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+ goto done;
+ }
+
+ ret = wl_android_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ &channel);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+ goto done;
+ }
+
+ if (CHANNEL_IS_5G(channel)) {
+ channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+ } else {
+ ANDROID_ERROR(("invalid 5GHz channel, channel = %d\n", channel));
+ channel = 0;
+ }
+
+ pos += snprintf(pos, total_len - (pos - cmd), "%04d ", channel);
+
+ /* Set overall best channel same as 5GHz best channel. */
+ pos += snprintf(pos, total_len - (pos - cmd), "%04d ", channel);
+
+done:
+ if (NULL != buf) {
+ MFREE(cfg->osh, buf, CHANSPEC_BUF_SIZE);
+ }
+
+ /* Restore FW and driver back to normal state. */
+ ret = wl_android_restore_auto_channel_scan_state(ndev);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't restore auto channel scan state, error = %d\n", ret));
+ }
+
+ return (pos - cmd);
+}
+
+int
+wl_android_set_spect(struct net_device *dev, int spect)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int wlc_down = 1;
+ int wlc_up = 1;
+ int err = BCME_OK;
+
+ if (!wl_get_drv_status_all(cfg, CONNECTED)) {
+ err = wldev_ioctl_set(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down));
+ if (err) {
+ ANDROID_ERROR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
+ return err;
+ }
+
+ err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect));
+ if (err) {
+ ANDROID_ERROR(("%s: error setting spect: code: %d\n", __func__, err));
+ return err;
+ }
+
+ err = wldev_ioctl_set(dev, WLC_UP, &wlc_up, sizeof(wlc_up));
+ if (err) {
+ ANDROID_ERROR(("%s: WLC_UP failed: code: %d\n", __func__, err));
+ return err;
+ }
+ }
+ return err;
+}
+
+static int
+wl_android_get_sta_channel(struct bcm_cfg80211 *cfg)
+{
+ chanspec_t *sta_chanspec = NULL;
+ u32 channel = 0;
+
+ if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
+ if ((sta_chanspec = (chanspec_t *)wl_read_prof(cfg,
+ bcmcfg_to_prmry_ndev(cfg), WL_PROF_CHAN))) {
+ channel = wf_chspec_ctlchan(*sta_chanspec);
+ }
+ }
+ return channel;
+}
+
+static int
+wl_cfg80211_get_acs_band(int band)
+{
+ chanspec_band_t acs_band = WLC_ACS_BAND_INVALID;
+ switch (band) {
+ case WLC_BAND_AUTO:
+ ANDROID_INFO(("ACS full channel scan \n"));
+ /* Restricting band to 2G in case of hw_mode any */
+ acs_band = WL_CHANSPEC_BAND_2G;
+ break;
+#ifdef WL_6G_BAND
+ case WLC_BAND_6G:
+ ANDROID_INFO(("ACS 6G band scan \n"));
+ acs_band = WL_CHANSPEC_BAND_6G;
+ break;
+#endif /* WL_6G_BAND */
+ case WLC_BAND_5G:
+ ANDROID_INFO(("ACS 5G band scan \n"));
+ acs_band = WL_CHANSPEC_BAND_5G;
+ break;
+ case WLC_BAND_2G:
+ /*
+ * If channel argument is not provided/ argument 20 is provided,
+ * Restrict channel to 2GHz, 20MHz BW, No SB
+ */
+ ANDROID_INFO(("ACS 2G band scan \n"));
+ acs_band = WL_CHANSPEC_BAND_2G;
+ break;
+ default:
+ ANDROID_ERROR(("ACS: No band chosen\n"));
+ break;
+ }
+ ANDROID_INFO(("%s: ACS: band = %d, acs_band = 0x%x\n", __FUNCTION__, band, acs_band));
+ return acs_band;
+}
+
+/* SoftAP feature */
+static int
+wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str,
+ char* command, int total_len)
+{
+ int channel = 0, sta_channel = 0;
+ int chosen = 0;
+ int retry = 0;
+ int ret = 0;
+ int spect = 0;
+ u8 *reqbuf = NULL;
+ uint32 band = WLC_BAND_INVALID, sta_band = WLC_BAND_INVALID;
+ chanspec_band_t acs_band = WLC_ACS_BAND_INVALID;
+ uint32 buf_size;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bool acs_freq_list_present = false;
+ char *pcmd;
+
+ if (cmd_str) {
+ ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str)));
+ pcmd = bcmstrstr(cmd_str, FREQ_STR);
+ if (pcmd) {
+ acs_freq_list_present = true;
+ ANDROID_INFO(("ACS has freq list\n"));
+ } else if (strnicmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
+ band = WLC_BAND_AUTO;
+#ifdef WL_6G_BAND
+ } else if (strnicmp(cmd_str, APCS_BAND_6G, strlen(APCS_BAND_6G)) == 0) {
+ band = WLC_BAND_6G;
+#endif /* WL_6G_BAND */
+ } else if (strnicmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
+ band = WLC_BAND_5G;
+ } else if (strnicmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
+ band = WLC_BAND_2G;
+ } else {
+ /*
+ * For backward compatibility: Some platforms used to issue argument 20 or 0
+ * to enforce the 2G channel selection
+ */
+ channel = bcm_atoi(cmd_str);
+ if ((channel == APCS_BAND_2G_LEGACY1) ||
+ (channel == APCS_BAND_2G_LEGACY2)) {
+ band = WLC_BAND_2G;
+ } else {
+ ANDROID_ERROR(("Invalid argument\n"));
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* If no argument is provided, default to 2G */
+ ANDROID_ERROR(("No argument given default to 2.4G scan\n"));
+ band = WLC_BAND_2G;
+ }
+ ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band));
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+ wl_cfg80211_register_dev_ril_bridge_event_notifier();
+ if (band == WLC_BAND_2G) {
+ wl_cfg80211_send_msg_to_ril();
+
+ if (g_mhs_chan_for_cpcoex) {
+ channel = g_mhs_chan_for_cpcoex;
+ g_mhs_chan_for_cpcoex = 0;
+ goto done2;
+ }
+ }
+ wl_cfg80211_unregister_dev_ril_bridge_event_notifier();
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+ /* If STA is connected, return is STA channel, else ACS can be issued,
+ * set spect to 0 and proceed with ACS
+ */
+ sta_channel = wl_android_get_sta_channel(cfg);
+ sta_band = WL_GET_BAND(sta_channel);
+ if (sta_channel && (band != WLC_BAND_INVALID)) {
+ switch (sta_band) {
+ case (WLC_BAND_5G):
+#ifdef WL_6G_BAND
+ case (WLC_BAND_6G):
+#endif /* WL_6G_BAND */
+ {
+ if (band == WLC_BAND_2G || band == WLC_BAND_AUTO) {
+ channel = APCS_DEFAULT_2G_CH;
+ } else if (band == WLC_BAND_5G) {
+ channel = sta_channel;
+ }
+ break;
+ }
+ case (WLC_BAND_2G): {
+ if (band == WLC_BAND_5G) {
+ channel = APCS_DEFAULT_5G_CH;
+ } else if (band == WLC_BAND_2G) {
+ channel = sta_channel;
+ }
+#ifdef WL_6G_BAND
+ else if (band == WLC_BAND_6G) {
+ channel = APCS_DEFAULT_6G_CH;
+ }
+#endif /* WL_6G_BAND */
+ break;
+ }
+ default:
+ /* Intentional fall through to use same sta channel for softap */
+ channel = sta_channel;
+ break;
+ }
+ WL_MSG(dev->name, "band=%d, sta_band=%d, channel=%d\n", band, sta_band, channel);
+ goto done2;
+ }
+
+ /* If AP is started on wlan0 iface,
+ * do not issue any iovar to fw and choose default ACS channel for softap
+ */
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+ ANDROID_INFO(("Softap started on primary iface\n"));
+ goto done;
+ }
+ }
+
+ channel = wl_ext_autochannel(dev, ACS_DRV_BIT, band);
+ if (channel) {
+ acs_band = CHSPEC_BAND(channel);
+ goto done2;
+ } else
+ goto done;
+
+ ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect));
+ if (ret) {
+ ANDROID_ERROR(("ACS: error getting the spect, ret=%d\n", ret));
+ goto done;
+ }
+
+ if (spect > 0) {
+ ret = wl_android_set_spect(dev, 0);
+ if (ret < 0) {
+ ANDROID_ERROR(("ACS: error while setting spect, ret=%d\n", ret));
+ goto done;
+ }
+ }
+
+ reqbuf = (u8 *)MALLOCZ(cfg->osh, CHANSPEC_BUF_SIZE);
+ if (reqbuf == NULL) {
+ ANDROID_ERROR(("failed to allocate chanspec buffer\n"));
+ return -ENOMEM;
+ }
+
+ if (acs_freq_list_present) {
+ wl_uint32_list_t *list = NULL;
+ bzero(reqbuf, sizeof(*reqbuf));
+ list = (wl_uint32_list_t *)reqbuf;
+
+ ret = wl_android_get_freq_list_chanspecs(dev, list, CHANSPEC_BUF_SIZE,
+ cmd_str, sta_channel, wl_cfg80211_get_acs_band(sta_band));
+ if (ret < 0) {
+ ANDROID_ERROR(("ACS chanspec set failed!\n"));
+ goto done;
+ }
+
+ /* skip ACS for single channel case */
+ if (list->count == 1) {
+ cfg->acs_chspec = (chanspec_t)list->element[0];
+ channel = wf_chspec_ctlchan((chanspec_t)list->element[0]);
+ acs_band = CHSPEC_BAND((chanspec_t)list->element[0]);
+ goto done2;
+ }
+ } else {
+ acs_band = wl_cfg80211_get_acs_band(band);
+ if (acs_band == WLC_ACS_BAND_INVALID) {
+ ANDROID_ERROR(("ACS: No band chosen\n"));
+ goto done2;
+ }
+
+ if ((ret = wl_android_get_band_chanspecs(dev, reqbuf, CHANSPEC_BUF_SIZE,
+ acs_band, true)) < 0) {
+ ANDROID_ERROR(("ACS chanspec retrieval failed! \n"));
+ goto done;
+ }
+ }
+
+ buf_size = CHANSPEC_BUF_SIZE;
+ ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
+ buf_size);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret));
+ channel = 0;
+ goto done;
+ }
+
+ /* Wait for auto channel selection, max 3000 ms */
+ if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G) || (band == WLC_BAND_6G)) {
+ OSL_SLEEP(500);
+ } else {
+ /*
+ * Full channel scan at the minimum takes 1.2secs
+ * even with parallel scan. max wait time: 3500ms
+ */
+ OSL_SLEEP(1000);
+ }
+
+ retry = APCS_MAX_RETRY;
+ while (retry--) {
+ ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &chosen,
+ sizeof(chosen));
+ if (ret < 0) {
+ chosen = 0;
+ } else {
+ chosen = dtoh32(chosen);
+ }
+
+ if (chosen) {
+ /* Update chanspec which can be used during softAP bringup with right BW */
+ cfg->acs_chspec = chosen;
+ channel = wf_chspec_ctlchan(chosen);
+ acs_band = CHSPEC_BAND(chosen);
+ break;
+ }
+ ANDROID_INFO(("%d tried, ret = %d, chosen = 0x%x, acs_band = 0x%x\n",
+ (APCS_MAX_RETRY - retry), ret, chosen, acs_band));
+ OSL_SLEEP(250);
+ }
+
+done:
+ if ((retry == 0) || (ret < 0)) {
+ /* On failure, fallback to a default channel */
+ if (band == WLC_BAND_5G) {
+ channel = APCS_DEFAULT_5G_CH;
+#ifdef WL_6G_BAND
+ } else if (band == WLC_BAND_6G) {
+ channel = APCS_DEFAULT_6G_CH;
+#endif /* WL_6G_BAND */
+ } else {
+ channel = APCS_DEFAULT_2G_CH;
+ }
+ ANDROID_ERROR(("ACS failed. Fall back to default channel (%d) \n", channel));
+ }
+done2:
+ if (spect > 0) {
+ if ((ret = wl_android_set_spect(dev, spect) < 0)) {
+ ANDROID_ERROR(("ACS: error while setting spect\n"));
+ }
+ }
+
+ if (reqbuf) {
+ MFREE(cfg->osh, reqbuf, CHANSPEC_BUF_SIZE);
+ }
+
+ if (channel) {
+ ret = snprintf(command, total_len, "%d", channel);
+ ANDROID_INFO(("command result is %s \n", command));
+ }
+
+ return ret;
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+static int
+wl_android_set_roam_vsie_enab(struct net_device *dev, const char *cmd, u32 cmd_len)
+{
+ s32 err = BCME_OK;
+ u32 roam_vsie_enable = 0;
+ u32 cmd_str_len = (u32)strlen(CMD_ROAM_VSIE_ENAB_SET);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ /* <CMD><SPACE><VAL> */
+ if (!cmd || (cmd_len < (cmd_str_len + 1))) {
+ ANDROID_ERROR(("wrong arg\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (dev != bcmcfg_to_prmry_ndev(cfg)) {
+ ANDROID_ERROR(("config not supported on non primary i/f\n"));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ roam_vsie_enable = cmd[(cmd_str_len + 1)] - '0';
+ if (roam_vsie_enable > 1) {
+ roam_vsie_enable = 1;
+ }
+
+ WL_DBG_MEM(("set roam vsie %d\n", roam_vsie_enable));
+ err = wldev_iovar_setint(dev, "roam_vsie", roam_vsie_enable);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("set roam vsie enable failed. ret:%d\n", err));
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_android_get_roam_vsie_enab(struct net_device *dev, char *cmd, u32 cmd_len)
+{
+ s32 err = BCME_OK;
+ u32 roam_vsie_enable = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int bytes_written;
+
+ /* <CMD> */
+ if (!cmd) {
+ ANDROID_ERROR(("wrong arg\n"));
+ return -1;
+ }
+
+ if (dev != bcmcfg_to_prmry_ndev(cfg)) {
+ ANDROID_ERROR(("config not supported on non primary i/f\n"));
+ return -1;
+ }
+
+ err = wldev_iovar_getint(dev, "roam_vsie", &roam_vsie_enable);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("get roam vsie enable failed. ret:%d\n", err));
+ return -1;
+ }
+ ANDROID_INFO(("get roam vsie %d\n", roam_vsie_enable));
+
+ bytes_written = snprintf(cmd, cmd_len, "%s %d",
+ CMD_ROAM_VSIE_ENAB_GET, roam_vsie_enable);
+
+ return bytes_written;
+}
+
+static int
+wl_android_set_bcn_rpt_vsie_enab(struct net_device *dev, const char *cmd, u32 cmd_len)
+{
+ s32 err;
+ u32 bcn_vsie_enable = 0;
+ u32 cmd_str_len = (u32)strlen(CMD_BR_VSIE_ENAB_SET);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ /* <CMD><SPACE><VAL> */
+ if (!cmd || (cmd_len < (cmd_str_len + 1))) {
+ ANDROID_ERROR(("invalid arg\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (dev != bcmcfg_to_prmry_ndev(cfg)) {
+ ANDROID_ERROR(("config not supported on non primary i/f\n"));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ bcn_vsie_enable = cmd[cmd_str_len + 1] - '0';
+ if (bcn_vsie_enable > 1) {
+ bcn_vsie_enable = 1;
+ }
+
+ WL_DBG_MEM(("set bcn report vsie %d\n", bcn_vsie_enable));
+ err = wldev_iovar_setint(dev, "bcnrpt_vsie_en", bcn_vsie_enable);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("set bcn vsie failed. ret:%d\n", err));
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_android_get_bcn_rpt_vsie_enab(struct net_device *dev, char *cmd, u32 cmd_len)
+{
+ s32 err = BCME_OK;
+ u32 bcn_vsie_enable = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int bytes_written;
+
+ /* <CMD> */
+ if (!cmd) {
+ ANDROID_ERROR(("wrong arg\n"));
+ return -1;
+ }
+
+ if (dev != bcmcfg_to_prmry_ndev(cfg)) {
+ ANDROID_ERROR(("config not supported on non primary i/f\n"));
+ return -1;
+ }
+
+ err = wldev_iovar_getint(dev, "bcnrpt_vsie_en", &bcn_vsie_enable);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("get bcn vsie failed. ret:%d\n", err));
+ return -1;
+ }
+ ANDROID_INFO(("get bcn report vsie %d\n", bcn_vsie_enable));
+
+ bytes_written = snprintf(cmd, cmd_len, "%s %d",
+ CMD_BR_VSIE_ENAB_GET, bcn_vsie_enable);
+
+ return bytes_written;
+}
+
+#ifdef SUPPORT_HIDDEN_AP
+static int
+wl_android_set_max_num_sta(struct net_device *dev, const char* string_num)
+{
+ int err = BCME_ERROR;
+ int max_assoc;
+
+ max_assoc = bcm_atoi(string_num);
+ ANDROID_INFO(("wl_android_set_max_num_sta : HAPD_MAX_NUM_STA = %d\n", max_assoc));
+
+ err = wldev_iovar_setint(dev, "maxassoc", max_assoc);
+ if (err < 0) {
+ ANDROID_ERROR(("failed to set maxassoc, error:%d\n", err));
+ }
+
+ return err;
+}
+
+static int
+wl_android_set_ssid(struct net_device *dev, const char* hapd_ssid)
+{
+ wlc_ssid_t ssid;
+ s32 ret;
+
+ ssid.SSID_len = strlen(hapd_ssid);
+ if (ssid.SSID_len == 0) {
+ ANDROID_ERROR(("wl_android_set_ssids : No SSID\n"));
+ return -1;
+ }
+ if (ssid.SSID_len > DOT11_MAX_SSID_LEN) {
+ ssid.SSID_len = DOT11_MAX_SSID_LEN;
+ ANDROID_ERROR(("wl_android_set_ssid : Too long SSID Length %zu\n", strlen(hapd_ssid)));
+ }
+ bcm_strncpy_s(ssid.SSID, sizeof(ssid.SSID), hapd_ssid, ssid.SSID_len);
+ ANDROID_INFO(("wl_android_set_ssid: HAPD_SSID = %s\n", ssid.SSID));
+ ret = wldev_ioctl_set(dev, WLC_SET_SSID, &ssid, sizeof(wlc_ssid_t));
+ if (ret < 0) {
+ ANDROID_ERROR(("wl_android_set_ssid : WLC_SET_SSID Error:%d\n", ret));
+ }
+ return 1;
+
+}
+
+static int
+wl_android_set_hide_ssid(struct net_device *dev, const char* string_num)
+{
+ int hide_ssid;
+ int enable = 0;
+ int err = BCME_ERROR;
+
+ hide_ssid = bcm_atoi(string_num);
+ ANDROID_INFO(("wl_android_set_hide_ssid: HAPD_HIDE_SSID = %d\n", hide_ssid));
+ if (hide_ssid) {
+ enable = 1;
+ }
+
+ err = wldev_iovar_setint(dev, "closednet", enable);
+ if (err < 0) {
+ ANDROID_ERROR(("failed to set closednet, error:%d\n", err));
+ }
+
+ return err;
+}
+#endif /* SUPPORT_HIDDEN_AP */
+
+#ifdef SUPPORT_SOFTAP_SINGL_DISASSOC
+static int
+wl_android_sta_diassoc(struct net_device *dev, const char* straddr)
+{
+ scb_val_t scbval;
+ int error = 0;
+
+ ANDROID_INFO(("wl_android_sta_diassoc: deauth STA %s\n", straddr));
+
+ /* Unspecified reason */
+ scbval.val = htod32(1);
+
+ if (bcm_ether_atoe(straddr, &scbval.ea) == 0) {
+ ANDROID_ERROR(("wl_android_sta_diassoc: Invalid station MAC Address!!!\n"));
+ return -1;
+ }
+
+ ANDROID_ERROR(("wl_android_sta_diassoc: deauth STA: "MACDBG " scb_val.val %d\n",
+ MAC2STRDBG(scbval.ea.octet), scbval.val));
+
+ error = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+ sizeof(scb_val_t));
+ if (error) {
+ ANDROID_ERROR(("Fail to DEAUTH station, error = %d\n", error));
+ }
+
+ return 1;
+}
+#endif /* SUPPORT_SOFTAP_SINGL_DISASSOC */
+
+#ifdef SUPPORT_SET_LPC
+static int
+wl_android_set_lpc(struct net_device *dev, const char* string_num)
+{
+ int lpc_enabled, ret;
+ s32 val = 1;
+
+ lpc_enabled = bcm_atoi(string_num);
+ ANDROID_INFO(("wl_android_set_lpc: HAPD_LPC_ENABLED = %d\n", lpc_enabled));
+
+ ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
+ if (ret < 0)
+ ANDROID_ERROR(("WLC_DOWN error %d\n", ret));
+
+ wldev_iovar_setint(dev, "lpc", lpc_enabled);
+
+ ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32));
+ if (ret < 0)
+ ANDROID_ERROR(("WLC_UP error %d\n", ret));
+
+ return 1;
+}
+#endif /* SUPPORT_SET_LPC */
+
+static int
+wl_android_ch_res_rl(struct net_device *dev, bool change)
+{
+ int error = 0;
+ s32 srl = 7;
+ s32 lrl = 4;
+ ANDROID_ERROR(("wl_android_ch_res_rl: enter\n"));
+ if (change) {
+ srl = 4;
+ lrl = 2;
+ }
+
+ BCM_REFERENCE(lrl);
+
+ error = wldev_ioctl_set(dev, WLC_SET_SRL, &srl, sizeof(s32));
+ if (error) {
+ ANDROID_ERROR(("Failed to set SRL, error = %d\n", error));
+ }
+#ifndef CUSTOM_LONG_RETRY_LIMIT
+ error = wldev_ioctl_set(dev, WLC_SET_LRL, &lrl, sizeof(s32));
+ if (error) {
+ ANDROID_ERROR(("Failed to set LRL, error = %d\n", error));
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+ return error;
+}
+
+#ifdef SUPPORT_LTECX
+#define DEFAULT_WLANRX_PROT 1
+#define DEFAULT_LTERX_PROT 0
+#define DEFAULT_LTETX_ADV 1200
+
+static int
+wl_android_set_ltecx(struct net_device *dev, const char* string_num)
+{
+ uint16 chan_bitmap;
+ int ret;
+
+ chan_bitmap = bcm_strtoul(string_num, NULL, 16);
+
+ ANDROID_INFO(("wl_android_set_ltecx: LTECOEX 0x%x\n", chan_bitmap));
+
+ if (chan_bitmap) {
+ ret = wldev_iovar_setint(dev, "mws_coex_bitmap", chan_bitmap);
+ if (ret < 0) {
+ ANDROID_ERROR(("mws_coex_bitmap error %d\n", ret));
+ }
+
+ ret = wldev_iovar_setint(dev, "mws_wlanrx_prot", DEFAULT_WLANRX_PROT);
+ if (ret < 0) {
+ ANDROID_ERROR(("mws_wlanrx_prot error %d\n", ret));
+ }
+
+ ret = wldev_iovar_setint(dev, "mws_lterx_prot", DEFAULT_LTERX_PROT);
+ if (ret < 0) {
+ ANDROID_ERROR(("mws_lterx_prot error %d\n", ret));
+ }
+
+ ret = wldev_iovar_setint(dev, "mws_ltetx_adv", DEFAULT_LTETX_ADV);
+ if (ret < 0) {
+ ANDROID_ERROR(("mws_ltetx_adv error %d\n", ret));
+ }
+ } else {
+ ret = wldev_iovar_setint(dev, "mws_coex_bitmap", chan_bitmap);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ ANDROID_ERROR(("LTECX_CHAN_BITMAP is UNSUPPORTED\n"));
+ } else {
+ ANDROID_ERROR(("LTECX_CHAN_BITMAP error %d\n", ret));
+ }
+ }
+ }
+ return 1;
+}
+#endif /* SUPPORT_LTECX */
+
+#ifdef WL_RELMCAST
+static int
+wl_android_rmc_enable(struct net_device *net, int rmc_enable)
+{
+ int err;
+
+ err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("wl_android_rmc_enable: rmc_ackreq, error = %d\n", err));
+ }
+ return err;
+}
+
+static int
+wl_android_rmc_set_leader(struct net_device *dev, const char* straddr)
+{
+ int error = BCME_OK;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_rmc_entry_t rmc_entry;
+ ANDROID_INFO(("wl_android_rmc_set_leader: Set new RMC leader %s\n", straddr));
+
+ bzero(&rmc_entry, sizeof(wl_rmc_entry_t));
+ if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) {
+ if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) {
+ ANDROID_INFO(("wl_android_rmc_set_leader: Set auto leader selection mode\n"));
+ bzero(&rmc_entry, sizeof(wl_rmc_entry_t));
+ } else {
+ ANDROID_ERROR(("wl_android_rmc_set_leader: No valid mac address provided\n"));
+ return BCME_ERROR;
+ }
+ }
+
+ error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t),
+ smbuf, sizeof(smbuf), NULL);
+
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("wl_android_rmc_set_leader: Unable to set RMC leader, error = %d\n",
+ error));
+ }
+
+ return error;
+}
+
+static int wl_android_set_rmc_event(struct net_device *dev, char *command)
+{
+ int err = 0;
+ int pid = 0;
+
+ if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) {
+ ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
+ return -1;
+ }
+
+ /* set pid, and if the event was happened, let's send a notification through netlink */
+ wl_cfg80211_set_rmc_pid(dev, pid);
+
+ ANDROID_INFO(("RMC pid=%d\n", pid));
+
+ return err;
+}
+#endif /* WL_RELMCAST */
+
+int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int mode = 0;
+
+ error = wldev_iovar_getint(dev, "scan_ps", &mode);
+ if (error) {
+ ANDROID_ERROR(("wl_android_get_singlecore_scan: Failed to get single core scan Mode,"
+ " error = %d\n",
+ error));
+ return -1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode);
+
+ return bytes_written;
+}
+
+int wl_android_set_singlecore_scan(struct net_device *dev, char *command)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("wl_android_set_singlecore_scan: Failed to get Parameter\n"));
+ return -1;
+ }
+
+ error = wldev_iovar_setint(dev, "scan_ps", mode);
+ if (error) {
+ ANDROID_ERROR(("wl_android_set_singlecore_scan[1]: Failed to set Mode %d, error = %d\n",
+ mode, error));
+ return -1;
+ }
+
+ return error;
+}
+#ifdef TEST_TX_POWER_CONTROL
+static int
+wl_android_set_tx_power(struct net_device *dev, const char* string_num)
+{
+ int err = 0;
+ s32 dbm;
+ enum nl80211_tx_power_setting type;
+
+ dbm = bcm_atoi(string_num);
+
+ if (dbm < -1) {
+ ANDROID_ERROR(("wl_android_set_tx_power: dbm is negative...\n"));
+ return -EINVAL;
+ }
+
+ if (dbm == -1)
+ type = NL80211_TX_POWER_AUTOMATIC;
+ else
+ type = NL80211_TX_POWER_FIXED;
+
+ err = wl_set_tx_power(dev, type, dbm);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("wl_android_set_tx_power: error (%d)\n", err));
+ return err;
+ }
+
+ return 1;
+}
+
+static int
+wl_android_get_tx_power(struct net_device *dev, char *command, int total_len)
+{
+ int err;
+ int bytes_written;
+ s32 dbm = 0;
+
+ err = wl_get_tx_power(dev, &dbm);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("wl_android_get_tx_power: error (%d)\n", err));
+ return err;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_TEST_GET_TX_POWER, dbm);
+
+ ANDROID_ERROR(("wl_android_get_tx_power: GET_TX_POWER: dBm=%d\n", dbm));
+
+ return bytes_written;
+}
+#endif /* TEST_TX_POWER_CONTROL */
+
+static int
+wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num)
+{
+ int err = BCME_ERROR;
+ int setval = 0;
+ s32 mode = bcm_atoi(string_num);
+ s32 mode_bit = 0;
+ int enab = 0;
+
+ /* As Samsung specific and their requirement,
+ * the mode set as the following form.
+ * -1 : HEAD SAR disabled
+ * 0 : HEAD SAR enabled
+ * 1 : GRIP SAR disabled
+ * 2 : GRIP SAR enabled
+ * 3 : NR mmWave SAR disabled
+ * 4 : NR mmWave SAR enabled
+ * 5 : NR Sub6 SAR disabled
+ * 6 : NR Sub6 SAR enabled
+ * 7 : SAR BACKOFF disabled all
+ * The 'SAR BACKOFF disabled all' index should be the end of the mode.
+ */
+ if ((mode < HEAD_SAR_BACKOFF_DISABLE) || (mode > SAR_BACKOFF_DISABLE_ALL)) {
+ ANDROID_ERROR(("%s: Request for Unsupported:%d\n", __FUNCTION__, bcm_atoi(string_num)));
+ err = BCME_RANGE;
+ goto error;
+ }
+
+ mode_bit = mode + 1;
+ enab = mode_bit % 2;
+ mode_bit = mode_bit / 2;
+
+ err = wldev_iovar_getint(dev, "sar_enable", &setval);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: Failed to get sar_enable - error (%d)\n", __FUNCTION__, err));
+ goto error;
+ }
+
+ if (mode == SAR_BACKOFF_DISABLE_ALL) {
+ ANDROID_ERROR(("%s: SAR limit control all mode disable!\n", __FUNCTION__));
+ setval = 0;
+ } else {
+ ANDROID_ERROR(("%s: SAR limit control mode %d enab %d\n",
+ __FUNCTION__, mode_bit, enab));
+ if (enab) {
+ setval |= (1 << mode_bit);
+ } else {
+ setval &= ~(1 << mode_bit);
+ }
+ }
+
+ err = wldev_iovar_setint(dev, "sar_enable", setval);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: Failed to set sar_enable - error (%d)\n", __FUNCTION__, err));
+ goto error;
+ }
+ err = BCME_OK;
+error:
+ return err;
+}
+
+#ifdef SUPPORT_SET_TID
+static int
+wl_android_set_tid(struct net_device *dev, char* command)
+{
+ int err = BCME_ERROR;
+ char *pos = command;
+ char *token = NULL;
+ uint8 mode = 0;
+ uint32 uid = 0;
+ uint8 prio = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp) {
+ ANDROID_ERROR(("dhd is NULL\n"));
+ return err;
+ }
+
+ ANDROID_INFO(("%s: command[%s]\n", __FUNCTION__, command));
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments\n"));
+ return err;
+ }
+
+ mode = bcm_atoi(token);
+
+ if (mode < SET_TID_OFF || mode > SET_TID_BASED_ON_UID) {
+ ANDROID_ERROR(("Invalid arguments, mode %d\n", mode));
+ return err;
+ }
+
+ if (mode) {
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments for target uid\n"));
+ return err;
+ }
+
+ uid = bcm_atoi(token);
+
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments for target tid\n"));
+ return err;
+ }
+
+ prio = bcm_atoi(token);
+ if (prio >= 0 && prio <= MAXPRIO) {
+ dhdp->tid_mode = mode;
+ dhdp->target_uid = uid;
+ dhdp->target_tid = prio;
+ } else {
+ ANDROID_ERROR(("Invalid arguments, prio %d\n", prio));
+ return err;
+ }
+ } else {
+ dhdp->tid_mode = SET_TID_OFF;
+ dhdp->target_uid = 0;
+ dhdp->target_tid = 0;
+ }
+
+ ANDROID_INFO(("%s mode [%d], uid [%d], tid [%d]\n", __FUNCTION__,
+ dhdp->tid_mode, dhdp->target_uid, dhdp->target_tid));
+
+ err = BCME_OK;
+ return err;
+}
+
+static int
+wl_android_get_tid(struct net_device *dev, char* command, int total_len)
+{
+ int bytes_written = BCME_ERROR;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp) {
+ ANDROID_ERROR(("dhd is NULL\n"));
+ return bytes_written;
+ }
+
+ bytes_written = snprintf(command, total_len, "mode %d uid %d tid %d",
+ dhdp->tid_mode, dhdp->target_uid, dhdp->target_tid);
+
+ ANDROID_INFO(("%s: command results %s\n", __FUNCTION__, command));
+
+ return bytes_written;
+}
+#endif /* SUPPORT_SET_TID */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+int wl_android_set_roam_mode(struct net_device *dev, char *command)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
+ }
+
+ error = wldev_iovar_setint(dev, "roam_off", mode);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
+ __FUNCTION__, mode, error));
+ return -1;
+ }
+ else
+ ANDROID_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n",
+ __FUNCTION__, mode, error));
+ return 0;
+}
+
+#ifdef WL_CFG80211
+int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len)
+{
+ char ie_buf[VNDR_IE_MAX_LEN];
+ char *ioctl_buf = NULL;
+ char hex[] = "XX";
+ char *pcmd = NULL;
+ int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ s32 iecount;
+ uint32 pktflag;
+ s32 err = BCME_OK, bssidx;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ /* Check the VSIE (Vendor Specific IE) which was added.
+ * If exist then send IOVAR to delete it
+ */
+ if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) {
+ return -EINVAL;
+ }
+
+ if (total_len < (strlen(CMD_SETIBSSBEACONOUIDATA) + 1)) {
+ ANDROID_ERROR(("error. total_len:%d\n", total_len));
+ return -EINVAL;
+ }
+
+ pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1;
+ for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+ if (*pcmd == '\0') {
+ ANDROID_ERROR(("error while parsing OUI.\n"));
+ return -EINVAL;
+ }
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ pcmd++;
+ while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16);
+ datalen++;
+ }
+
+ if (datalen <= 0) {
+ ANDROID_ERROR(("error. vndr ie len:%d\n", datalen));
+ return -EINVAL;
+ }
+
+ tot_len = (int)(sizeof(vndr_ie_setbuf_t) + (datalen - 1));
+ vndr_ie = (vndr_ie_setbuf_t *)MALLOCZ(cfg->osh, tot_len);
+ if (!vndr_ie) {
+ ANDROID_ERROR(("IE memory alloc failed\n"));
+ return -ENOMEM;
+ }
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strlcpy(vndr_ie->cmd, "add", sizeof(vndr_ie->cmd));
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* Set packet flag to indicate that BEACON's will contain this IE */
+ pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+ /* Set the IE ID */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+ memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+ DOT11_OUI_LEN);
+ memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+ &ie_buf[DOT11_OUI_LEN], datalen);
+
+ ielen = DOT11_OUI_LEN + datalen;
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+ ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ if (vndr_ie) {
+ MFREE(cfg->osh, vndr_ie, tot_len);
+ }
+ return -ENOMEM;
+ }
+ bzero(ioctl_buf, WLC_IOCTL_MEDLEN); /* init the buffer */
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ ANDROID_ERROR(("Find index failed\n"));
+ err = BCME_ERROR;
+ goto end;
+ }
+ err = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf,
+ WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+end:
+ if (err != BCME_OK) {
+ err = -EINVAL;
+ if (vndr_ie) {
+ MFREE(cfg->osh, vndr_ie, tot_len);
+ }
+ }
+ else {
+ /* do NOT free 'vndr_ie' for the next process */
+ wl_cfg80211_ibss_vsie_set_buffer(dev, vndr_ie, tot_len);
+ }
+
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+
+ return err;
+}
+#endif /* WL_CFG80211 */
+
+#if defined(BCMFW_ROAM_ENABLE)
+static int
+wl_android_set_roampref(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+ uint8 buf[MAX_BUF_SIZE];
+ uint8 *pref = buf;
+ char *pcmd;
+ int num_ucipher_suites = 0;
+ int num_akm_suites = 0;
+ wpa_suite_t ucipher_suites[MAX_NUM_SUITES];
+ wpa_suite_t akm_suites[MAX_NUM_SUITES];
+ int num_tuples = 0;
+ int total_bytes = 0;
+ int total_len_left;
+ int i, j;
+ char hex[] = "XX";
+
+ pcmd = command + strlen(CMD_SET_ROAMPREF) + 1;
+ total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1;
+
+ num_akm_suites = simple_strtoul(pcmd, NULL, 16);
+ if (num_akm_suites > MAX_NUM_SUITES) {
+ ANDROID_ERROR(("too many AKM suites = %d\n", num_akm_suites));
+ return -1;
+ }
+
+ /* Increment for number of AKM suites field + space */
+ pcmd += 3;
+ total_len_left -= 3;
+
+ /* check to make sure pcmd does not overrun */
+ if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
+ return -1;
+
+ bzero(buf, sizeof(buf));
+ bzero(akm_suites, sizeof(akm_suites));
+ bzero(ucipher_suites, sizeof(ucipher_suites));
+
+ /* Save the AKM suites passed in the command */
+ for (i = 0; i < num_akm_suites; i++) {
+ /* Store the MSB first, as required by join_pref */
+ for (j = 0; j < 4; j++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32));
+ }
+
+ total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE);
+ num_ucipher_suites = simple_strtoul(pcmd, NULL, 16);
+ /* Increment for number of cipher suites field + space */
+ pcmd += 3;
+ total_len_left -= 3;
+
+ if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE))
+ return -1;
+
+ /* Save the cipher suites passed in the command */
+ for (i = 0; i < num_ucipher_suites; i++) {
+ /* Store the MSB first, as required by join_pref */
+ for (j = 0; j < 4; j++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32));
+ }
+
+ /* Join preference for RSSI
+ * Type : 1 byte (0x01)
+ * Length : 1 byte (0x02)
+ * Value : 2 bytes (reserved)
+ */
+ *pref++ = WL_JOIN_PREF_RSSI;
+ *pref++ = JOIN_PREF_RSSI_LEN;
+ *pref++ = 0;
+ *pref++ = 0;
+
+ /* Join preference for WPA
+ * Type : 1 byte (0x02)
+ * Length : 1 byte (not used)
+ * Value : (variable length)
+ * reserved: 1 byte
+ * count : 1 byte (no of tuples)
+ * Tuple1 : 12 bytes
+ * akm[4]
+ * ucipher[4]
+ * mcipher[4]
+ * Tuple2 : 12 bytes
+ * Tuplen : 12 bytes
+ */
+ num_tuples = num_akm_suites * num_ucipher_suites;
+ if (num_tuples != 0) {
+ if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) {
+ *pref++ = WL_JOIN_PREF_WPA;
+ *pref++ = 0;
+ *pref++ = 0;
+ *pref++ = (uint8)num_tuples;
+ total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
+ (JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
+ } else {
+ ANDROID_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+ return -1;
+ }
+ } else {
+ /* No WPA config, configure only RSSI preference */
+ total_bytes = JOIN_PREF_RSSI_SIZE;
+ }
+
+ /* akm-ucipher-mcipher tuples in the format required for join_pref */
+ for (i = 0; i < num_ucipher_suites; i++) {
+ for (j = 0; j < num_akm_suites; j++) {
+ memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN);
+ pref += WPA_SUITE_LEN;
+ memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
+ pref += WPA_SUITE_LEN;
+ /* Set to 0 to match any available multicast cipher */
+ bzero(pref, WPA_SUITE_LEN);
+ pref += WPA_SUITE_LEN;
+ }
+ }
+
+ prhex("join pref", (uint8 *)buf, total_bytes);
+ error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("Failed to set join_pref, error = %d\n", error));
+ }
+ return error;
+}
+#endif /* defined(BCMFW_ROAM_ENABLE */
+
+#ifdef WL_CFG80211
+static int
+wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config)
+{
+ struct io_cfg *resume_cfg;
+ s32 ret;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ resume_cfg = (struct io_cfg *)MALLOCZ(cfg->osh, sizeof(struct io_cfg));
+ if (!resume_cfg)
+ return -ENOMEM;
+
+ if (config->iovar) {
+ ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param);
+ if (ret) {
+ ANDROID_ERROR(("%s: Failed to get current %s value\n",
+ __FUNCTION__, config->iovar));
+ goto error;
+ }
+
+ ret = wldev_iovar_setint(dev, config->iovar, config->param);
+ if (ret) {
+ ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+ config->iovar, config->param));
+ goto error;
+ }
+
+ resume_cfg->iovar = config->iovar;
+ } else {
+ resume_cfg->arg = MALLOCZ(cfg->osh, config->len);
+ if (!resume_cfg->arg) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = wldev_ioctl_get(dev, config->ioctl, resume_cfg->arg, config->len);
+ if (ret) {
+ ANDROID_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__,
+ config->ioctl));
+ goto error;
+ }
+ ret = wldev_ioctl_set(dev, config->ioctl + 1, config->arg, config->len);
+ if (ret) {
+ ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+ config->iovar, config->param));
+ goto error;
+ }
+ if (config->ioctl + 1 == WLC_SET_PM)
+ wl_cfg80211_update_power_mode(dev);
+ resume_cfg->ioctl = config->ioctl;
+ resume_cfg->len = config->len;
+ }
+
+ /* assuming only one active user and no list protection */
+ list_add(&resume_cfg->list, head);
+
+ return 0;
+error:
+ MFREE(cfg->osh, resume_cfg->arg, config->len);
+ MFREE(cfg->osh, resume_cfg, sizeof(struct io_cfg));
+ return ret;
+}
+
+static void
+wl_android_iolist_resume(struct net_device *dev, struct list_head *head)
+{
+ struct io_cfg *config;
+ struct list_head *cur, *q;
+ s32 ret = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_safe(cur, q, head) {
+ config = list_entry(cur, struct io_cfg, list);
+ GCC_DIAGNOSTIC_POP();
+ if (config->iovar) {
+ if (!ret)
+ ret = wldev_iovar_setint(dev, config->iovar,
+ config->param);
+ } else {
+ if (!ret)
+ ret = wldev_ioctl_set(dev, config->ioctl + 1,
+ config->arg, config->len);
+ if (config->ioctl + 1 == WLC_SET_PM)
+ wl_cfg80211_update_power_mode(dev);
+ MFREE(cfg->osh, config->arg, config->len);
+ }
+ list_del(cur);
+ MFREE(cfg->osh, config, sizeof(struct io_cfg));
+ }
+}
+
+static int
+wl_android_set_miracast(struct net_device *dev, char *command)
+{
+ int mode, val = 0;
+ int ret = 0;
+ struct io_cfg config;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
+ }
+
+ ANDROID_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
+
+ if (miracast_cur_mode == mode) {
+ return 0;
+ }
+
+ wl_android_iolist_resume(dev, &miracast_resume_list);
+ miracast_cur_mode = MIRACAST_MODE_OFF;
+
+ bzero((void *)&config, sizeof(config));
+ switch (mode) {
+ case MIRACAST_MODE_SOURCE:
+#ifdef MIRACAST_MCHAN_ALGO
+ /* setting mchan_algo to platform specific value */
+ config.iovar = "mchan_algo";
+
+ /* check for station's beacon interval(BI)
+ * If BI is over 100ms, don't use mchan_algo
+ */
+ ret = wldev_ioctl_get(dev, WLC_GET_BCNPRD, &val, sizeof(int));
+ if (!ret && val > 100) {
+ config.param = 0;
+ ANDROID_ERROR(("%s: Connected station's beacon interval: "
+ "%d and set mchan_algo to %d \n",
+ __FUNCTION__, val, config.param));
+ } else {
+ config.param = MIRACAST_MCHAN_ALGO;
+ }
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret) {
+ goto resume;
+ }
+#endif /* MIRACAST_MCHAN_ALGO */
+
+#ifdef MIRACAST_MCHAN_BW
+ /* setting mchan_bw to platform specific value */
+ config.iovar = "mchan_bw";
+ config.param = MIRACAST_MCHAN_BW;
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret) {
+ goto resume;
+ }
+#endif /* MIRACAST_MCHAN_BW */
+
+#ifdef MIRACAST_AMPDU_SIZE
+ /* setting apmdu to platform specific value */
+ config.iovar = "ampdu_mpdu";
+ config.param = MIRACAST_AMPDU_SIZE;
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret) {
+ goto resume;
+ }
+#endif /* MIRACAST_AMPDU_SIZE */
+ /* FALLTROUGH */
+ /* Source mode shares most configurations with sink mode.
+ * Fall through here to avoid code duplication
+ */
+ case MIRACAST_MODE_SINK:
+ /* disable internal roaming */
+ config.iovar = "roam_off";
+ config.param = 1;
+ config.arg = NULL;
+ config.len = 0;
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret) {
+ goto resume;
+ }
+
+#ifdef CUSTOMER_HW10
+ /* [CSP#812738] Change scan engine parameters to reduce scan time
+ * and guarantee more times to mirroring.
+ */
+ val = 10;
+ config.iovar = NULL;
+ config.ioctl = WLC_GET_SCAN_CHANNEL_TIME;
+ config.arg = &val;
+ config.len = sizeof(int);
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret)
+ goto resume;
+
+ val = 180;
+ config.iovar = NULL;
+ config.ioctl = WLC_GET_SCAN_HOME_TIME;
+ config.arg = &val;
+ config.len = sizeof(int);
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret)
+ goto resume;
+
+#if defined(BCM4339_CHIP)
+ config.iovar = "phy_watchdog";
+ config.param = 0;
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ ANDROID_INFO(("%s: do iovar cmd=%s (ret=%d)\n",
+ __FUNCTION__, config.iovar, ret));
+#endif
+#endif /* CUSTOMER_HW10 */
+
+#ifndef CUSTOMER_HW10
+
+ /* tunr off pm */
+ ret = wldev_ioctl_get(dev, WLC_GET_PM, &val, sizeof(val));
+ if (ret) {
+ goto resume;
+ }
+
+ if (val != PM_OFF) {
+ val = PM_OFF;
+ config.iovar = NULL;
+ config.ioctl = WLC_GET_PM;
+ config.arg = &val;
+ config.len = sizeof(int);
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret) {
+ goto resume;
+ }
+ }
+#endif /* CUSTOMER_HW10 */
+ break;
+ case MIRACAST_MODE_OFF:
+ default:
+ break;
+ }
+ miracast_cur_mode = mode;
+
+ return 0;
+
+resume:
+ ANDROID_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret));
+ wl_android_iolist_resume(dev, &miracast_resume_list);
+ return ret;
+}
+#endif /* WL_CFG80211 */
+
+#ifdef WL_RELMCAST
+#define NETLINK_OXYGEN 30
+#define AIBSS_BEACON_TIMEOUT 10
+
+static struct sock *nl_sk = NULL;
+
+static void wl_netlink_recv(struct sk_buff *skb)
+{
+ ANDROID_ERROR(("netlink_recv called\n"));
+}
+
+static int wl_netlink_init(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ struct netlink_kernel_cfg cfg = {
+ .input = wl_netlink_recv,
+ };
+#endif
+
+ if (nl_sk != NULL) {
+ ANDROID_ERROR(("nl_sk already exist\n"));
+ return BCME_ERROR;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN,
+ 0, wl_netlink_recv, NULL, THIS_MODULE);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg);
+#else
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg);
+#endif
+
+ if (nl_sk == NULL) {
+ ANDROID_ERROR(("nl_sk is not ready\n"));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+static void wl_netlink_deinit(void)
+{
+ if (nl_sk) {
+ netlink_kernel_release(nl_sk);
+ nl_sk = NULL;
+ }
+}
+
+s32
+wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh = NULL;
+ int ret = -1;
+
+ if (nl_sk == NULL) {
+ ANDROID_ERROR(("nl_sk was not initialized\n"));
+ goto nlmsg_failure;
+ }
+
+ skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC);
+ if (skb == NULL) {
+ ANDROID_ERROR(("failed to allocate memory\n"));
+ goto nlmsg_failure;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
+ if (nlh == NULL) {
+ ANDROID_ERROR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n",
+ skb_tailroom(skb), nlmsg_total_size(size)));
+ dev_kfree_skb(skb);
+ goto nlmsg_failure;
+ }
+
+ memcpy(nlmsg_data(nlh), data, size);
+ nlh->nlmsg_seq = seq;
+ nlh->nlmsg_type = type;
+
+ /* netlink_unicast() takes ownership of the skb and frees it itself. */
+ ret = netlink_unicast(nl_sk, skb, pid, 0);
+ ANDROID_INFO(("netlink_unicast() pid=%d, ret=%d\n", pid, ret));
+
+nlmsg_failure:
+ return ret;
+}
+#endif /* WL_RELMCAST */
+
+#ifdef WLAIBSS
+static int wl_android_set_ibss_txfail_event(struct net_device *dev, char *command, int total_len)
+{
+ int err = 0;
+ int retry = 0;
+ int pid = 0;
+ aibss_txfail_config_t txfail_config = {0, 0, 0, 0, 0};
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ if (sscanf(command, CMD_SETIBSSTXFAILEVENT " %d %d", &retry, &pid) <= 0) {
+ ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
+ return -1;
+ }
+
+ /* set pid, and if the event was happened, let's send a notification through netlink */
+ wl_cfg80211_set_txfail_pid(dev, pid);
+
+#ifdef WL_RELMCAST
+ /* using same pid for RMC, AIBSS shares same pid with RMC and it is set once */
+ wl_cfg80211_set_rmc_pid(dev, pid);
+#endif /* WL_RELMCAST */
+
+ /* If retry value is 0, it disables the functionality for TX Fail. */
+ if (retry > 0) {
+ txfail_config.max_tx_retry = retry;
+ txfail_config.bcn_timeout = 0; /* 0 : disable tx fail from beacon */
+ }
+ txfail_config.version = AIBSS_TXFAIL_CONFIG_VER_0;
+ txfail_config.len = sizeof(txfail_config);
+
+ err = wldev_iovar_setbuf(dev, "aibss_txfail_config", (void *) &txfail_config,
+ sizeof(aibss_txfail_config_t), smbuf, WLC_IOCTL_SMLEN, NULL);
+ ANDROID_INFO(("retry=%d, pid=%d, err=%d\n", retry, pid, err));
+
+ return ((err == 0)?total_len:err);
+}
+
+static int wl_android_get_ibss_peer_info(struct net_device *dev, char *command,
+ int total_len, bool bAll)
+{
+ int error;
+ int bytes_written = 0;
+ void *buf = NULL;
+ bss_peer_list_info_t peer_list_info;
+ bss_peer_info_t *peer_info;
+ int i;
+ bool found = false;
+ struct ether_addr mac_ea;
+ char *str = command;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ ANDROID_INFO(("get ibss peer info(%s)\n", bAll?"true":"false"));
+
+ if (!bAll) {
+ if (bcmstrtok(&str, " ", NULL) == NULL) {
+ ANDROID_ERROR(("invalid command\n"));
+ return -1;
+ }
+
+ if (!str || !bcm_ether_atoe(str, &mac_ea)) {
+ ANDROID_ERROR(("invalid MAC address\n"));
+ return -1;
+ }
+ }
+
+ if ((buf = MALLOC(cfg->osh, WLC_IOCTL_MAXLEN)) == NULL) {
+ ANDROID_ERROR(("kmalloc failed\n"));
+ return -1;
+ }
+
+ error = wldev_iovar_getbuf(dev, "bss_peer_info", NULL, 0, buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(error)) {
+ ANDROID_ERROR(("could not get ibss peer info (%d)\n", error));
+ MFREE(cfg->osh, buf, WLC_IOCTL_MAXLEN);
+ return -1;
+ }
+
+ memcpy(&peer_list_info, buf, sizeof(peer_list_info));
+ peer_list_info.version = htod16(peer_list_info.version);
+ peer_list_info.bss_peer_info_len = htod16(peer_list_info.bss_peer_info_len);
+ peer_list_info.count = htod32(peer_list_info.count);
+
+ ANDROID_INFO(("ver:%d, len:%d, count:%d\n", peer_list_info.version,
+ peer_list_info.bss_peer_info_len, peer_list_info.count));
+
+ if (peer_list_info.count > 0) {
+ if (bAll)
+ bytes_written += snprintf(&command[bytes_written], total_len, "%u ",
+ peer_list_info.count);
+
+ peer_info = (bss_peer_info_t *) ((char *)buf + BSS_PEER_LIST_INFO_FIXED_LEN);
+
+ for (i = 0; i < peer_list_info.count; i++) {
+
+ ANDROID_INFO(("index:%d rssi:%d, tx:%u, rx:%u\n", i, peer_info->rssi,
+ peer_info->tx_rate, peer_info->rx_rate));
+
+ if (!bAll &&
+ memcmp(&mac_ea, &peer_info->ea, sizeof(struct ether_addr)) == 0) {
+ found = true;
+ }
+
+ if (bAll || found) {
+ bytes_written += snprintf(&command[bytes_written],
+ total_len - bytes_written,
+ MACF" %u %d ", ETHER_TO_MACF(peer_info->ea),
+ peer_info->tx_rate/1000, peer_info->rssi);
+ if (bytes_written >= total_len) {
+ ANDROID_ERROR(("wl_android_get_ibss_peer_info: Insufficient"
+ " memory, %d bytes\n",
+ total_len));
+ bytes_written = -1;
+ break;
+ }
+ }
+
+ if (found)
+ break;
+
+ peer_info = (bss_peer_info_t *)((char *)peer_info+sizeof(bss_peer_info_t));
+ }
+ }
+ else {
+ ANDROID_ERROR(("could not get ibss peer info : no item\n"));
+ }
+ ANDROID_INFO(("command(%u):%s\n", total_len, command));
+ ANDROID_INFO(("bytes_written:%d\n", bytes_written));
+
+ MFREE(cfg->osh, buf, WLC_IOCTL_MAXLEN);
+ return bytes_written;
+}
+
+int wl_android_set_ibss_routetable(struct net_device *dev, char *command)
+{
+
+ char *pcmd = command;
+ char *str = NULL;
+ ibss_route_tbl_t *route_tbl = NULL;
+ char *ioctl_buf = NULL;
+ s32 err = BCME_OK;
+ uint32 route_tbl_len;
+ uint32 entries;
+ char *endptr;
+ uint32 i = 0;
+ struct ipv4_addr dipaddr;
+ struct ether_addr ea;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ route_tbl_len = sizeof(ibss_route_tbl_t) +
+ (MAX_IBSS_ROUTE_TBL_ENTRY - 1) * sizeof(ibss_route_entry_t);
+ route_tbl = (ibss_route_tbl_t *)MALLOCZ(cfg->osh, route_tbl_len);
+ if (!route_tbl) {
+ ANDROID_ERROR(("Route TBL alloc failed\n"));
+ return -ENOMEM;
+ }
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ if (route_tbl) {
+ MFREE(cfg->osh, route_tbl, route_tbl_len);
+ }
+ return -ENOMEM;
+ }
+ bzero(ioctl_buf, WLC_IOCTL_MEDLEN);
+
+ /* drop command */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* get count */
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid number parameter %s\n", str));
+ err = -EINVAL;
+ goto exit;
+ }
+ entries = bcm_strtoul(str, &endptr, 0);
+ if (*endptr != '\0') {
+ ANDROID_ERROR(("Invalid number parameter %s\n", str));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (entries > MAX_IBSS_ROUTE_TBL_ENTRY) {
+ ANDROID_ERROR(("Invalid entries number %u\n", entries));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ANDROID_INFO(("Routing table count:%u\n", entries));
+ route_tbl->num_entry = entries;
+
+ for (i = 0; i < entries; i++) {
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, &dipaddr)) {
+ ANDROID_ERROR(("Invalid ip string %s\n", str));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_ether_atoe(str, &ea)) {
+ ANDROID_ERROR(("Invalid ethernet string %s\n", str));
+ err = -EINVAL;
+ goto exit;
+ }
+ bcopy(&dipaddr, &route_tbl->route_entry[i].ipv4_addr, IPV4_ADDR_LEN);
+ bcopy(&ea, &route_tbl->route_entry[i].nexthop, ETHER_ADDR_LEN);
+ }
+
+ route_tbl_len = sizeof(ibss_route_tbl_t) +
+ ((!entries?0:(entries - 1)) * sizeof(ibss_route_entry_t));
+ err = wldev_iovar_setbuf(dev, "ibss_route_tbl",
+ route_tbl, route_tbl_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", err));
+ err = -EINVAL;
+ }
+
+exit:
+ if (route_tbl) {
+ MFREE(cfg->osh, route_tbl, sizeof(ibss_route_tbl_t) +
+ (MAX_IBSS_ROUTE_TBL_ENTRY - 1) * sizeof(ibss_route_entry_t));
+ }
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+ return err;
+
+}
+
+int
+wl_android_set_ibss_ampdu(struct net_device *dev, char *command, int total_len)
+{
+ char *pcmd = command;
+ char *str = NULL, *endptr = NULL;
+ struct ampdu_aggr aggr;
+ char smbuf[WLC_IOCTL_SMLEN];
+ int idx;
+ int err = 0;
+ int wme_AC2PRIO[AC_COUNT][2] = {
+ {PRIO_8021D_VO, PRIO_8021D_NC}, /* AC_VO - 3 */
+ {PRIO_8021D_CL, PRIO_8021D_VI}, /* AC_VI - 2 */
+ {PRIO_8021D_BK, PRIO_8021D_NONE}, /* AC_BK - 1 */
+ {PRIO_8021D_BE, PRIO_8021D_EE}}; /* AC_BE - 0 */
+
+ ANDROID_INFO(("set ibss ampdu:%s\n", command));
+
+ bzero(&aggr, sizeof(aggr));
+ /* Cofigure all priorities */
+ aggr.conf_TID_bmap = NBITMASK(NUMPRIO);
+
+ /* acquire parameters */
+ /* drop command */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ for (idx = 0; idx < AC_COUNT; idx++) {
+ bool on;
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
+ return -EINVAL;
+ }
+ on = bcm_strtoul(str, &endptr, 0) ? TRUE : FALSE;
+ if (*endptr != '\0') {
+ ANDROID_ERROR(("Invalid number format %s\n", str));
+ return -EINVAL;
+ }
+ if (on) {
+ setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][0]);
+ setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][1]);
+ }
+ }
+
+ err = wldev_iovar_setbuf(dev, "ampdu_txaggr", (void *)&aggr,
+ sizeof(aggr), smbuf, WLC_IOCTL_SMLEN, NULL);
+
+ return ((err == 0) ? total_len : err);
+}
+
+int wl_android_set_ibss_antenna(struct net_device *dev, char *command, int total_len)
+{
+ char *pcmd = command;
+ char *str = NULL;
+ int txchain, rxchain;
+ int err = 0;
+
+ ANDROID_INFO(("set ibss antenna:%s\n", command));
+
+ /* acquire parameters */
+ /* drop command */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* TX chain */
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
+ return -EINVAL;
+ }
+ txchain = bcm_atoi(str);
+
+ /* RX chain */
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
+ return -EINVAL;
+ }
+ rxchain = bcm_atoi(str);
+
+ err = wldev_iovar_setint(dev, "txchain", txchain);
+ if (err != 0)
+ return err;
+ err = wldev_iovar_setint(dev, "rxchain", rxchain);
+ return ((err == 0)?total_len:err);
+}
+#endif /* WLAIBSS */
+
+int wl_keep_alive_set(struct net_device *dev, char* extra)
+{
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ int ret;
+ uint period_msec = 0;
+ char *buf;
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+
+ if (extra == NULL) {
+ ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+ return -1;
+ }
+ if (sscanf(extra, "%d", &period_msec) != 1) {
+ ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+
+ bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t));
+
+ mkeep_alive_pkt.period_msec = period_msec;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+ /* Setup keep alive zero for null packet generation */
+ mkeep_alive_pkt.keep_alive_id = 0;
+ mkeep_alive_pkt.len_bytes = 0;
+
+ buf = (char *)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
+ if (!buf) {
+ ANDROID_ERROR(("%s: buffer alloc failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ ret = wldev_iovar_setbuf(dev, "mkeep_alive", (char *)&mkeep_alive_pkt,
+ WL_MKEEP_ALIVE_FIXED_LEN, buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0)
+ ANDROID_ERROR(("%s:keep_alive set failed:%d\n", __FUNCTION__, ret));
+ else
+ ANDROID_TRACE(("%s:keep_alive set ok\n", __FUNCTION__));
+ MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN);
+ return ret;
+}
+
+#ifdef P2PRESP_WFDIE_SRC
+static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int only_resp_wfdsrc = 0;
+
+ error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n",
+ __FUNCTION__, error));
+ return -1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_P2P_GET_WFDIE_RESP, only_resp_wfdsrc);
+
+ return bytes_written;
+}
+
+static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsrc)
+{
+ int error = 0;
+
+ error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n",
+ __FUNCTION__, only_resp_wfdsrc, error));
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* P2PRESP_WFDIE_SRC */
+
+#ifdef BT_WIFI_HANDOVER
+static int
+wl_tbow_teardown(struct net_device *dev)
+{
+ int err = BCME_OK;
+ char buf[WLC_IOCTL_SMLEN];
+ tbow_setup_netinfo_t netinfo;
+ bzero(&netinfo, sizeof(netinfo));
+ netinfo.opmode = TBOW_HO_MODE_TEARDOWN;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo,
+ sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL);
+ if (err < 0) {
+ ANDROID_ERROR(("tbow_doho iovar error %d\n", err));
+ return err;
+ }
+ return err;
+}
+#endif /* BT_WIFI_HANOVER */
+
+static int wl_android_get_link_status(struct net_device *dev, char *command,
+ int total_len)
+{
+ int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map;
+ uint32 rspec;
+ uint encode, txexp;
+ wl_bss_info_t *bi;
+ int datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
+ char buf[WLC_IOCTL_SMLEN];
+
+ if (datalen > WLC_IOCTL_SMLEN) {
+ ANDROID_ERROR(("data too big\n"));
+ return -1;
+ }
+
+ bzero(buf, datalen);
+ /* get BSS information */
+ *(u32 *) buf = htod32(datalen);
+ error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen);
+ if (unlikely(error)) {
+ ANDROID_ERROR(("Could not get bss info %d\n", error));
+ return -1;
+ }
+
+ bi = (wl_bss_info_t*) (buf + sizeof(uint32));
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ if (bi->BSSID.octet[i] > 0) {
+ break;
+ }
+ }
+
+ if (i == ETHER_ADDR_LEN) {
+ ANDROID_INFO(("No BSSID\n"));
+ return -1;
+ }
+
+ /* check VHT capability at beacon */
+ if (bi->vht_cap) {
+ if (CHSPEC_IS5G(bi->chanspec)) {
+ result |= WL_ANDROID_LINK_AP_VHT_SUPPORT;
+ }
+ }
+
+ /* get a rspec (radio spectrum) rate */
+ error = wldev_iovar_getint(dev, "nrate", &rspec);
+ if (unlikely(error) || rspec == 0) {
+ ANDROID_ERROR(("get link status error (%d)\n", error));
+ return -1;
+ }
+
+ /* referred wl_nrate_print() for the calculation */
+ encode = (rspec & WL_RSPEC_ENCODING_MASK);
+ txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
+
+ switch (encode) {
+ case WL_RSPEC_ENCODE_HT:
+ /* check Rx MCS Map for HT */
+ for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+ int8 bitmap = 0xFF;
+ if (i == MAX_STREAMS_SUPPORTED-1) {
+ bitmap = 0x7F;
+ }
+ if (bi->basic_mcs[i] & bitmap) {
+ nss++;
+ }
+ }
+ break;
+ case WL_RSPEC_ENCODE_VHT:
+ /* check Rx MCS Map for VHT */
+ for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+ if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+ nss++;
+ }
+ }
+ break;
+ }
+
+ /* check MIMO capability with nss in beacon */
+ if (nss > 1) {
+ result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT;
+ }
+
+ /* Legacy rates WL_RSPEC_ENCODE_RATE are single stream, and
+ * HT rates for mcs 0-7 are single stream.
+ * In case of VHT NSS comes from rspec.
+ */
+ single_stream = (encode == WL_RSPEC_ENCODE_RATE) ||
+ ((encode == WL_RSPEC_ENCODE_HT) && (rspec & WL_RSPEC_HT_MCS_MASK) < 8) ||
+ ((encode == WL_RSPEC_ENCODE_VHT) &&
+ ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1);
+
+ if (txexp == 0) {
+ if ((rspec & WL_RSPEC_STBC) && single_stream) {
+ stf = OLD_NRATE_STF_STBC;
+ } else {
+ stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM;
+ }
+ } else if (txexp == 1 && single_stream) {
+ stf = OLD_NRATE_STF_CDD;
+ }
+
+ /* check 11ac (VHT) */
+ if (encode == WL_RSPEC_ENCODE_VHT) {
+ if (CHSPEC_IS5G(bi->chanspec)) {
+ result |= WL_ANDROID_LINK_VHT;
+ }
+ }
+
+ /* check MIMO */
+ if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) {
+ switch (stf) {
+ case OLD_NRATE_STF_SISO:
+ break;
+ case OLD_NRATE_STF_CDD:
+ case OLD_NRATE_STF_STBC:
+ result |= WL_ANDROID_LINK_MIMO;
+ break;
+ case OLD_NRATE_STF_SDM:
+ if (!single_stream) {
+ result |= WL_ANDROID_LINK_MIMO;
+ }
+ break;
+ }
+ }
+
+ ANDROID_INFO(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n",
+ __FUNCTION__, result, stf, single_stream, nss));
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_LINK_STATUS, result);
+
+ return bytes_written;
+}
+
+#ifdef P2P_LISTEN_OFFLOADING
+
+s32
+wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg)
+{
+ s32 bssidx;
+ int ret = 0;
+ int p2plo_pause = 0;
+ dhd_pub_t *dhd = NULL;
+ if (!cfg || !cfg->p2p) {
+ ANDROID_ERROR(("Wl %p or cfg->p2p %p is null\n",
+ cfg, cfg ? cfg->p2p : 0));
+ return 0;
+ }
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (!dhd->up) {
+ ANDROID_ERROR(("bus is already down\n"));
+ return ret;
+ }
+
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+ "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ ANDROID_ERROR(("p2po_stop Failed :%d\n", ret));
+ }
+
+ return ret;
+}
+s32
+wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ wl_p2plo_listen_t p2plo_listen;
+ int ret = -EAGAIN;
+ int channel = 0;
+ int period = 0;
+ int interval = 0;
+ int count = 0;
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ ANDROID_ERROR(("Sending Action Frames. Try it again.\n"));
+ goto exit;
+ }
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ ANDROID_ERROR(("Scanning already\n"));
+ goto exit;
+ }
+
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) {
+ ANDROID_ERROR(("Scanning being aborted\n"));
+ goto exit;
+ }
+
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ ANDROID_ERROR(("p2p listen offloading already running\n"));
+ goto exit;
+ }
+
+ /* Just in case if it is not enabled */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ ANDROID_ERROR(("cfgp2p_enable discovery failed"));
+ goto exit;
+ }
+
+ bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t));
+
+ if (len) {
+ sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count);
+ if ((channel == 0) || (period == 0) ||
+ (interval == 0) || (count == 0)) {
+ ANDROID_ERROR(("Wrong argument %d/%d/%d/%d \n",
+ channel, period, interval, count));
+ ret = -EAGAIN;
+ goto exit;
+ }
+ p2plo_listen.period = period;
+ p2plo_listen.interval = interval;
+ p2plo_listen.count = count;
+
+ ANDROID_ERROR(("channel:%d period:%d, interval:%d count:%d\n",
+ channel, period, interval, count));
+ } else {
+ ANDROID_ERROR(("Argument len is wrong.\n"));
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ ANDROID_ERROR(("p2po_listen_channel Failed :%d\n", ret));
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen,
+ sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ ANDROID_ERROR(("p2po_listen Failed :%d\n", ret));
+ goto exit;
+ }
+
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+exit :
+ return ret;
+}
+s32
+wl_cfg80211_p2plo_listen_stop(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ int ret = -EAGAIN;
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ ANDROID_ERROR(("p2po_stop Failed :%d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+s32
+wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len)
+{
+ int ret = 0;
+
+ ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len));
+
+ if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) {
+ ret = wl_cfg80211_p2plo_listen_start(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) {
+ ret = wl_cfg80211_p2plo_listen_stop(dev);
+ } else {
+ ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf));
+ ret = -EINVAL;
+ }
+ return ret;
+}
+void
+wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev;
+ if (!cfg) {
+ return;
+ }
+
+ wdev = bcmcfg_to_p2p_wdev(cfg);
+
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ ANDROID_INFO(("P2P_FIND: Discovery offload is already in progress."
+ "it aborted\n"));
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+ if (wdev != NULL) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ cfg80211_remain_on_channel_expired(wdev,
+ cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(wdev,
+ cfg->last_roc_id,
+ &cfg->remain_on_chan,
+ cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+ wl_cfg80211_p2plo_deinit(cfg);
+ }
+}
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef WL_MURX
+int
+wl_android_murx_bfe_cap(struct net_device *dev, int val)
+{
+ int err = BCME_OK;
+ int iface_count = wl_cfg80211_iface_count(dev);
+ struct ether_addr bssid;
+ wl_reassoc_params_t params;
+
+ if (iface_count > 1) {
+ ANDROID_ERROR(("murx_bfe_cap change is not allowed when "
+ "there are multiple interfaces\n"));
+ return -EINVAL;
+ }
+ /* Now there is only single interface */
+ err = wldev_iovar_setint(dev, "murx_bfe_cap", val);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set murx_bfe_cap IOVAR to %d,"
+ "error %d\n", val, err));
+ return err;
+ }
+
+ /* If successful intiate a reassoc */
+ bzero(&bssid, ETHER_ADDR_LEN);
+ if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) {
+ ANDROID_ERROR(("Failed to get bssid, error=%d\n", err));
+ return err;
+ }
+
+ bzero(&params, sizeof(wl_reassoc_params_t));
+ memcpy(&params.bssid, &bssid, ETHER_ADDR_LEN);
+
+ if ((err = wldev_ioctl_set(dev, WLC_REASSOC, &params,
+ sizeof(wl_reassoc_params_t))) < 0) {
+ ANDROID_ERROR(("reassoc failed err:%d \n", err));
+ } else {
+ ANDROID_INFO(("reassoc issued successfully\n"));
+ }
+
+ return err;
+}
+#endif /* WL_MURX */
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+int
+wl_android_get_rssi_per_ant(struct net_device *dev, char *command, int total_len)
+{
+ wl_rssi_ant_mimo_t rssi_ant_mimo;
+ char *ifname = NULL;
+ char *peer_mac = NULL;
+ char *mimo_cmd = "mimo";
+ char *pos, *token;
+ int err = BCME_OK;
+ int bytes_written = 0;
+ bool mimo_rssi = FALSE;
+
+ bzero(&rssi_ant_mimo, sizeof(wl_rssi_ant_mimo_t));
+ /*
+ * STA I/F: DRIVER GET_RSSI_PER_ANT <ifname> <mimo>
+ * AP/GO I/F: DRIVER GET_RSSI_PER_ANT <ifname> <Peer MAC addr> <mimo>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments\n"));
+ return -EINVAL;
+ }
+ ifname = token;
+
+ /* Optional: Check the MIMO RSSI mode or peer MAC address */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (token) {
+ /* Check the MIMO RSSI mode */
+ if (strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) {
+ mimo_rssi = TRUE;
+ } else {
+ peer_mac = token;
+ }
+ }
+
+ /* Optional: Check the MIMO RSSI mode - RSSI sum across antennas */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (token && strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) {
+ mimo_rssi = TRUE;
+ }
+
+ err = wl_get_rssi_per_ant(dev, ifname, peer_mac, &rssi_ant_mimo);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to get RSSI info, err=%d\n", err));
+ return err;
+ }
+
+ /* Parse the results */
+ ANDROID_INFO(("ifname %s, version %d, count %d, mimo rssi %d\n",
+ ifname, rssi_ant_mimo.version, rssi_ant_mimo.count, mimo_rssi));
+ if (mimo_rssi) {
+ ANDROID_INFO(("MIMO RSSI: %d\n", rssi_ant_mimo.rssi_sum));
+ bytes_written = snprintf(command, total_len, "%s MIMO %d",
+ CMD_GET_RSSI_PER_ANT, rssi_ant_mimo.rssi_sum);
+ } else {
+ int cnt;
+ bytes_written = snprintf(command, total_len, "%s PER_ANT ", CMD_GET_RSSI_PER_ANT);
+ for (cnt = 0; cnt < rssi_ant_mimo.count; cnt++) {
+ ANDROID_INFO(("RSSI[%d]: %d\n", cnt, rssi_ant_mimo.rssi_ant[cnt]));
+ bytes_written = snprintf(command, total_len, "%d ",
+ rssi_ant_mimo.rssi_ant[cnt]);
+ }
+ }
+
+ return bytes_written;
+}
+
+int
+wl_android_set_rssi_logging(struct net_device *dev, char *command, int total_len)
+{
+ rssilog_set_param_t set_param;
+ char *pos, *token;
+ int err = BCME_OK;
+
+ bzero(&set_param, sizeof(rssilog_set_param_t));
+ /*
+ * DRIVER SET_RSSI_LOGGING <enable/disable> <RSSI Threshold> <Time Threshold>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* enable/disable */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments\n"));
+ return -EINVAL;
+ }
+ set_param.enable = bcm_atoi(token);
+
+ /* RSSI Threshold */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments\n"));
+ return -EINVAL;
+ }
+ set_param.rssi_threshold = bcm_atoi(token);
+
+ /* Time Threshold */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("Invalid arguments\n"));
+ return -EINVAL;
+ }
+ set_param.time_threshold = bcm_atoi(token);
+
+ ANDROID_INFO(("enable %d, RSSI threshold %d, Time threshold %d\n", set_param.enable,
+ set_param.rssi_threshold, set_param.time_threshold));
+
+ err = wl_set_rssi_logging(dev, (void *)&set_param);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to configure RSSI logging: enable %d, RSSI Threshold %d,"
+ " Time Threshold %d\n", set_param.enable, set_param.rssi_threshold,
+ set_param.time_threshold));
+ }
+
+ return err;
+}
+
+int
+wl_android_get_rssi_logging(struct net_device *dev, char *command, int total_len)
+{
+ rssilog_get_param_t get_param;
+ int err = BCME_OK;
+ int bytes_written = 0;
+
+ err = wl_get_rssi_logging(dev, (void *)&get_param);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to get RSSI logging info\n"));
+ return BCME_ERROR;
+ }
+
+ ANDROID_INFO(("report_count %d, enable %d, rssi_threshold %d, time_threshold %d\n",
+ get_param.report_count, get_param.enable, get_param.rssi_threshold,
+ get_param.time_threshold));
+
+ /* Parse the parameter */
+ if (!get_param.enable) {
+ ANDROID_INFO(("RSSI LOGGING: Feature is disables\n"));
+ bytes_written = snprintf(command, total_len,
+ "%s FEATURE DISABLED\n", CMD_GET_RSSI_LOGGING);
+ } else if (get_param.enable &
+ (RSSILOG_FLAG_FEATURE_SW | RSSILOG_FLAG_REPORT_READY)) {
+ if (!get_param.report_count) {
+ ANDROID_INFO(("[PASS] RSSI difference across antennas is within"
+ " threshold limits\n"));
+ bytes_written = snprintf(command, total_len, "%s PASS\n",
+ CMD_GET_RSSI_LOGGING);
+ } else {
+ ANDROID_INFO(("[FAIL] RSSI difference across antennas found "
+ "to be greater than %3d dB\n", get_param.rssi_threshold));
+ ANDROID_INFO(("[FAIL] RSSI difference check have failed for "
+ "%d out of %d times\n", get_param.report_count,
+ get_param.time_threshold));
+ ANDROID_INFO(("[FAIL] RSSI difference is being monitored once "
+ "per second, for a %d secs window\n", get_param.time_threshold));
+ bytes_written = snprintf(command, total_len, "%s FAIL - RSSI Threshold "
+ "%d dBm for %d out of %d times\n", CMD_GET_RSSI_LOGGING,
+ get_param.rssi_threshold, get_param.report_count,
+ get_param.time_threshold);
+ }
+ } else {
+ ANDROID_INFO(("[BUSY] Reprot is not ready\n"));
+ bytes_written = snprintf(command, total_len, "%s BUSY - NOT READY\n",
+ CMD_GET_RSSI_LOGGING);
+ }
+
+ return bytes_written;
+}
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+void
+wl_android_set_irq_cpucore(struct net_device *net, int affinity_cmd)
+{
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+ if (!dhdp) {
+ ANDROID_ERROR(("dhd is NULL\n"));
+ return;
+ }
+
+ dhd_set_irq_cpucore(dhdp, affinity_cmd);
+}
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+#ifdef SUPPORT_LQCM
+static int
+wl_android_lqcm_enable(struct net_device *net, int lqcm_enable)
+{
+ int err = 0;
+
+ err = wldev_iovar_setint(net, "lqcm", lqcm_enable);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("failed to set lqcm enable %d, error = %d\n", lqcm_enable, err));
+ return -EIO;
+ }
+ return err;
+}
+
+static int
+wl_android_get_lqcm_report(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written, err = 0;
+ uint32 lqcm_report = 0;
+ uint32 lqcm_enable, tx_lqcm_idx, rx_lqcm_idx;
+
+ err = wldev_iovar_getint(dev, "lqcm", &lqcm_report);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("failed to get lqcm report, error = %d\n", err));
+ return -EIO;
+ }
+ lqcm_enable = lqcm_report & LQCM_ENAB_MASK;
+ tx_lqcm_idx = (lqcm_report & LQCM_TX_INDEX_MASK) >> LQCM_TX_INDEX_SHIFT;
+ rx_lqcm_idx = (lqcm_report & LQCM_RX_INDEX_MASK) >> LQCM_RX_INDEX_SHIFT;
+
+ ANDROID_INFO(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_GET_LQCM_REPORT, lqcm_report);
+
+ return bytes_written;
+}
+#endif /* SUPPORT_LQCM */
+
+int
+wl_android_get_snr(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written, error = 0;
+ s32 snr = 0;
+
+ error = wldev_iovar_getint(dev, "snr", &snr);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to get SNR %d, error = %d\n",
+ __FUNCTION__, snr, error));
+ return -EIO;
+ }
+
+ bytes_written = snprintf(command, total_len, "snr %d", snr);
+ ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ return bytes_written;
+}
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+int
+wl_android_set_ap_beaconrate(struct net_device *dev, char *command)
+{
+ int rate = 0;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+
+ /*
+ * DRIVER SET_AP_BEACONRATE <rate> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Rate */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rate = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_INFO(("rate %d, ifacename %s\n", rate, ifname));
+
+ err = wl_set_ap_beacon_rate(dev, rate, ifname);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set ap beacon rate to %d, error = %d\n", rate, err));
+ }
+
+ return err;
+}
+
+int wl_android_get_ap_basicrate(struct net_device *dev, char *command, int total_len)
+{
+ char *pos, *token;
+ char *ifname = NULL;
+ int bytes_written = 0;
+ /*
+ * DRIVER GET_AP_BASICRATE <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_INFO(("ifacename %s\n", ifname));
+
+ bytes_written = wl_get_ap_basic_rate(dev, command, ifname, total_len);
+ if (bytes_written < 1) {
+ ANDROID_ERROR(("Failed to get ap basic rate, error = %d\n", bytes_written));
+ return -EPROTO;
+ }
+
+ return bytes_written;
+}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+int
+wl_android_get_ap_rps(struct net_device *dev, char *command, int total_len)
+{
+ char *pos, *token;
+ char *ifname = NULL;
+ int bytes_written = 0;
+ char name[IFNAMSIZ];
+ /*
+ * DRIVER GET_AP_RPS <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("ifacename %s\n", name));
+
+ bytes_written = wl_get_ap_rps(dev, command, name, total_len);
+ if (bytes_written < 1) {
+ ANDROID_ERROR(("Failed to get rps, error = %d\n", bytes_written));
+ return -EPROTO;
+ }
+
+ return bytes_written;
+
+}
+
+int
+wl_android_set_ap_rps(struct net_device *dev, char *command, int total_len)
+{
+ int enable = 0;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER SET_AP_RPS <0/1> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Enable */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ enable = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("enable %d, ifacename %s\n", enable, name));
+
+ err = wl_set_ap_rps(dev, enable? TRUE: FALSE, name);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set rps, enable %d, error = %d\n", enable, err));
+ }
+
+ return err;
+}
+
+int
+wl_android_set_ap_rps_params(struct net_device *dev, char *command, int total_len)
+{
+ ap_rps_info_t rps;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+ char name[IFNAMSIZ];
+
+ bzero(&rps, sizeof(rps));
+ /*
+ * DRIVER SET_AP_RPS_PARAMS <pps> <level> <quiettime> <assoccheck> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* pps */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.pps = bcm_atoi(token);
+
+ /* level */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.level = bcm_atoi(token);
+
+ /* quiettime */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.quiet_time = bcm_atoi(token);
+
+ /* sta assoc check */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.sta_assoc_check = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+ strlcpy(name, ifname, sizeof(name));
+
+ ANDROID_INFO(("pps %d, level %d, quiettime %d, sta_assoc_check %d, "
+ "ifacename %s\n", rps.pps, rps.level, rps.quiet_time,
+ rps.sta_assoc_check, name));
+
+ err = wl_update_ap_rps_params(dev, &rps, name);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to update rps, pps %d, level %d, quiettime %d, "
+ "sta_assoc_check %d, err = %d\n", rps.pps, rps.level, rps.quiet_time,
+ rps.sta_assoc_check, err));
+ }
+
+ return err;
+}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+void
+wl_android_make_hang_with_reason(struct net_device *dev, const char *string_num)
+{
+ dhd_make_hang_with_reason(dev, string_num);
+}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+static void
+wl_android_check_priv_cmd_errors(struct net_device *dev)
+{
+ dhd_pub_t *dhdp;
+ int memdump_mode;
+
+ if (!dev) {
+ ANDROID_ERROR(("dev is NULL\n"));
+ return;
+ }
+
+ dhdp = wl_cfg80211_get_dhdp(dev);
+ if (!dhdp) {
+ ANDROID_ERROR(("dhdp is NULL\n"));
+ return;
+ }
+
+#ifdef DHD_FW_COREDUMP
+ memdump_mode = dhdp->memdump_enabled;
+#else
+ /* Default enable if DHD doesn't support SOCRAM dump */
+ memdump_mode = 1;
+#endif /* DHD_FW_COREDUMP */
+
+ if (report_hang_privcmd_err) {
+ priv_cmd_errors++;
+ } else {
+ priv_cmd_errors = 0;
+ }
+
+ /* Trigger HANG event only if memdump mode is enabled
+ * due to customer's request
+ */
+ if (memdump_mode == DUMP_MEMFILE_BUGON &&
+ (priv_cmd_errors > NUMBER_SEQUENTIAL_PRIVCMD_ERRORS)) {
+ ANDROID_ERROR(("Send HANG event due to sequential private cmd errors\n"));
+ priv_cmd_errors = 0;
+#ifdef DHD_FW_COREDUMP
+ /* Take a SOCRAM dump */
+ dhdp->memdump_type = DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR;
+ dhd_common_socram_dump(dhdp);
+#endif /* DHD_FW_COREDUMP */
+ /* Send the HANG event to upper layer */
+ dhdp->hang_reason = HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR;
+ dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
+ }
+}
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#ifdef DHD_PKT_LOGGING
+static int
+wl_android_pktlog_filter_enable(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXPKT_CASE, TRUE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXSTATUS_CASE, TRUE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_RXPKT_CASE, TRUE);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter enable success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter enable fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_disable(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXPKT_CASE, FALSE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXSTATUS_CASE, FALSE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_RXPKT_CASE, FALSE);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter disable success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter disable fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_pattern_enable(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE) + 1 > total_len) {
+ return BCME_ERROR;
+ }
+
+ err = dhd_pktlog_filter_pattern_enable(filter,
+ command + strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE) + 1, TRUE);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter pattern enable success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter pattern enable fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_pattern_disable(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE) + 1 > total_len) {
+ return BCME_ERROR;
+ }
+
+ err = dhd_pktlog_filter_pattern_enable(filter,
+ command + strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE) + 1, FALSE);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter pattern disable success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter pattern disable fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_add(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_ADD) + 1 > total_len) {
+ return BCME_ERROR;
+ }
+
+ err = dhd_pktlog_filter_add(filter, command + strlen(CMD_PKTLOG_FILTER_ADD) + 1);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter add success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter add fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_del(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ ANDROID_ERROR(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_DEL) + 1 > total_len) {
+ DHD_PKT_LOG(("%s(): wrong cmd length %d found\n",
+ __FUNCTION__, (int)strlen(CMD_PKTLOG_FILTER_DEL)));
+ return BCME_ERROR;
+ }
+
+ err = dhd_pktlog_filter_del(filter, command + strlen(CMD_PKTLOG_FILTER_DEL) + 1);
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter del success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter del fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_info(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ err = dhd_pktlog_filter_info(filter);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter info success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter info fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_start(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
+ return -EINVAL;
+ }
+
+ atomic_set(&dhdp->pktlog->pktlog_ring->start, TRUE);
+
+ bytes_written = snprintf(command, total_len, "OK");
+
+ ANDROID_ERROR(("%s: pktlog start success\n", __FUNCTION__));
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_stop(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): _pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
+ return -EINVAL;
+ }
+
+ atomic_set(&dhdp->pktlog->pktlog_ring->start, FALSE);
+
+ bytes_written = snprintf(command, total_len, "OK");
+
+ ANDROID_ERROR(("%s: pktlog stop success\n", __FUNCTION__));
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_exist(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ uint32 id;
+ bool exist = FALSE;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_EXIST) + 1 > total_len) {
+ return BCME_ERROR;
+ }
+
+ exist = dhd_pktlog_filter_existed(filter, command + strlen(CMD_PKTLOG_FILTER_EXIST) + 1,
+ &id);
+
+ if (exist) {
+ bytes_written = snprintf(command, total_len, "TRUE");
+ ANDROID_ERROR(("%s: pktlog filter pattern id: %d is existed\n", __FUNCTION__, id));
+ } else {
+ bytes_written = snprintf(command, total_len, "FALSE");
+ ANDROID_ERROR(("%s: pktlog filter pattern id: %d is not existed\n", __FUNCTION__, id));
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_minmize_enable(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
+ return -EINVAL;
+ }
+
+ dhdp->pktlog->pktlog_ring->pktlog_minmize = TRUE;
+
+ bytes_written = snprintf(command, total_len, "OK");
+
+ ANDROID_ERROR(("%s: pktlog pktlog_minmize enable\n", __FUNCTION__));
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_minmize_disable(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (!dhdp->pktlog->pktlog_ring) {
+ DHD_PKT_LOG(("%s(): pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->pktlog_ring));
+ return -EINVAL;
+ }
+
+ dhdp->pktlog->pktlog_ring->pktlog_minmize = FALSE;
+
+ bytes_written = snprintf(command, total_len, "OK");
+
+ ANDROID_ERROR(("%s: pktlog pktlog_minmize disable\n", __FUNCTION__));
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_change_size(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ int err = BCME_OK;
+ int size;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (strlen(CMD_PKTLOG_CHANGE_SIZE) + 1 > total_len) {
+ return BCME_ERROR;
+ }
+
+ size = bcm_strtoul(command + strlen(CMD_PKTLOG_CHANGE_SIZE) + 1, NULL, 0);
+
+ dhdp->pktlog->pktlog_ring =
+ dhd_pktlog_ring_change_size(dhdp->pktlog->pktlog_ring, size);
+ if (!dhdp->pktlog->pktlog_ring) {
+ err = BCME_ERROR;
+ }
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog change size success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog change size fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_android_pktlog_dbg_dump(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ int err = BCME_OK;
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
+
+ if (strlen(CMD_PKTLOG_DEBUG_DUMP) + 1 > total_len) {
+ return BCME_ERROR;
+ }
+
+ err = dhd_pktlog_debug_dump(dhdp);
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_INFO(("%s: pktlog dbg dump success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog dbg dump fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return bytes_written;
+}
+#endif /* DHD_PKT_LOGGING */
+
+#if defined(CONFIG_TIZEN)
+static int wl_android_set_powersave_mode(
+ struct net_device *dev, char* command, int total_len)
+{
+ int pm;
+
+ int err = BCME_OK;
+#ifdef DHD_PM_OVERRIDE
+ extern bool g_pm_override;
+#endif /* DHD_PM_OVERRIDE */
+ sscanf(command, "%*s %10d", &pm);
+ if (pm < PM_OFF || pm > PM_FAST) {
+ ANDROID_ERROR(("check pm=%d\n", pm));
+ return BCME_ERROR;
+ }
+
+#ifdef DHD_PM_OVERRIDE
+ if (pm > PM_OFF) {
+ g_pm_override = FALSE;
+ }
+#endif /* DHD_PM_OVERRIDE */
+
+ err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+
+#ifdef DHD_PM_OVERRIDE
+ if (pm == PM_OFF) {
+ g_pm_override = TRUE;
+ }
+
+ ANDROID_ERROR(("%s: PM:%d, pm_override=%d\n", __FUNCTION__, pm, g_pm_override));
+#endif /* DHD_PM_OVERRIDE */
+ return err;
+}
+
+static int wl_android_get_powersave_mode(
+ struct net_device *dev, char *command, int total_len)
+{
+ int err, bytes_written;
+ int pm;
+
+ err = wldev_ioctl_get(dev, WLC_GET_PM, &pm, sizeof(pm));
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("failed to get pm (%d)", err));
+ return err;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_POWERSAVEMODE_GET, pm);
+
+ return bytes_written;
+}
+#endif /* CONFIG_TIZEN */
+
+#ifdef DHD_EVENT_LOG_FILTER
+uint32 dhd_event_log_filter_serialize(dhd_pub_t *dhdp, char *buf, uint32 tot_len, int type);
+
+#ifdef DHD_EWPR_VER2
+uint32 dhd_event_log_filter_serialize_bit(dhd_pub_t *dhdp, char *buf, uint32 tot_len,
+ int index1, int index2, int index3);
+#endif
+
+static int
+wl_android_ewp_filter(struct net_device *dev, char *command, uint32 tot_len)
+{
+ uint32 bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+#ifdef DHD_EWPR_VER2
+ int index1 = 0, index2 = 0, index3 = 0;
+ unsigned char *index_str = (unsigned char *)(command +
+ strlen(CMD_EWP_FILTER) + 1);
+#else
+ int type = 0;
+#endif
+
+ if (!dhdp || !command) {
+ ANDROID_ERROR(("%s(): dhdp=%p \n", __FUNCTION__, dhdp));
+ return -EINVAL;
+ }
+
+ if (!FW_SUPPORTED(dhdp, ecounters)) {
+ ANDROID_ERROR(("does not support ecounters!\n"));
+ return BCME_UNSUPPORTED;
+ }
+
+#ifdef DHD_EWPR_VER2
+ if (strlen(command) > strlen(CMD_EWP_FILTER) + 1) {
+ sscanf(index_str, "%10d %10d %10d", &index1, &index2, &index3);
+ ANDROID_TRACE(("%s(): get index request: %d %d %d\n", __FUNCTION__,
+ index1, index2, index3));
+ }
+ bytes_written += dhd_event_log_filter_serialize_bit(dhdp,
+ &command[bytes_written], tot_len - bytes_written, index1, index2, index3);
+#else
+ /* NEED TO GET TYPE if EXIST */
+ type = 0;
+
+ bytes_written += dhd_event_log_filter_serialize(dhdp,
+ &command[bytes_written], tot_len - bytes_written, type);
+#endif
+
+ return (int)bytes_written;
+}
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#ifdef SUPPORT_AP_SUSPEND
+int
+wl_android_set_ap_suspend(struct net_device *dev, char *command, int total_len)
+{
+ int suspend = 0;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER SET_AP_SUSPEND <0/1> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Enable */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ suspend = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("suspend %d, ifacename %s\n", suspend, name));
+
+ err = wl_set_ap_suspend(dev, suspend? TRUE: FALSE, name);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set suspend, suspend %d, error = %d\n", suspend, err));
+ }
+
+ return err;
+}
+#endif /* SUPPORT_AP_SUSPEND */
+
+#ifdef SUPPORT_AP_BWCTRL
+int
+wl_android_set_ap_bw(struct net_device *dev, char *command, int total_len)
+{
+ int bw = DOT11_OPER_MODE_20MHZ;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER SET_AP_BW <0/1/2> <ifname>
+ * 0 : 20MHz, 1 : 40MHz, 2 : 80MHz 3: 80+80 or 160MHz
+ * This is from operating mode field
+ * in 8.4.1.50 of 802.11ac-2013
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* BW */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ bw = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("bw %d, ifacename %s\n", bw, name));
+
+ err = wl_set_ap_bw(dev, bw, name);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set bw, bw %d, error = %d\n", bw, err));
+ }
+
+ return err;
+}
+
+int
+wl_android_get_ap_bw(struct net_device *dev, char *command, int total_len)
+{
+ char *pos, *token;
+ char *ifname = NULL;
+ int bytes_written = 0;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER GET_AP_BW <ifname>
+ * returns 0 : 20MHz, 1 : 40MHz, 2 : 80MHz 3: 80+80 or 160MHz
+ * This is from operating mode field
+ * in 8.4.1.50 of 802.11ac-2013
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("ifacename %s\n", name));
+
+ bytes_written = wl_get_ap_bw(dev, command, name, total_len);
+ if (bytes_written < 1) {
+ ANDROID_ERROR(("Failed to get bw, error = %d\n", bytes_written));
+ return -EPROTO;
+ }
+
+ return bytes_written;
+
+}
+#endif /* SUPPORT_AP_BWCTRL */
+
+static int
+wl_android_priv_cmd_log_enable_check(char* cmd)
+{
+ int cnt = 0;
+
+ while (strlen(loging_params[cnt].command) > 0) {
+ if (!strnicmp(cmd, loging_params[cnt].command,
+ strlen(loging_params[cnt].command))) {
+ return loging_params[cnt].enable;
+ }
+
+ cnt++;
+ }
+
+ return FALSE;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr)
+{
+#define PRIVATE_COMMAND_MAX_LEN 8192
+#define PRIVATE_COMMAND_DEF_LEN 4096
+ int ret = 0;
+ char *command = NULL;
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+ int buf_size = 0;
+ dhd_pub_t *dhd = dhd_get_pub(net);
+
+ net_os_wake_lock(net);
+
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif
+ {
+ compat_android_wifi_priv_cmd compat_priv_cmd;
+ if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+ sizeof(compat_android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+
+ }
+ priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+ priv_cmd.used_len = compat_priv_cmd.used_len;
+ priv_cmd.total_len = compat_priv_cmd.total_len;
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+ ANDROID_ERROR(("%s: buf length invalid:%d\n", __FUNCTION__,
+ priv_cmd.total_len));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN);
+ command = (char *)MALLOC(dhd->osh, (buf_size + 1));
+ if (!command) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command[priv_cmd.total_len] = '\0';
+
+ if (wl_android_priv_cmd_log_enable_check(command)) {
+ ANDROID_ERROR(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__,
+ command, ifr->ifr_name));
+ }
+ else {
+ ANDROID_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+ }
+
+ bytes_written = wl_handle_private_cmd(net, command, priv_cmd.total_len);
+ if (bytes_written >= 0) {
+ if ((bytes_written == 0) && (priv_cmd.total_len > 0)) {
+ command[0] = '\0';
+ }
+ if (bytes_written >= priv_cmd.total_len) {
+ ANDROID_ERROR(("%s: err. bytes_written:%d >= total_len:%d, buf_size:%d\n",
+ __FUNCTION__, bytes_written, priv_cmd.total_len, buf_size));
+
+ ret = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ bytes_written++;
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+ ANDROID_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+ ret = -EFAULT;
+ }
+ }
+ else {
+ /* Propagate the error */
+ ret = bytes_written;
+ }
+
+exit:
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+ if (ret) {
+ /* Avoid incrementing priv_cmd_errors in case of unsupported feature */
+ if (ret != BCME_UNSUPPORTED) {
+ wl_android_check_priv_cmd_errors(net);
+ }
+ } else {
+ priv_cmd_errors = 0;
+ }
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+ net_os_wake_unlock(net);
+ MFREE(dhd->osh, command, (buf_size + 1));
+ return ret;
+}
+
+#ifdef WLADPS_PRIVATE_CMD
+static int
+wl_android_set_adps_mode(struct net_device *dev, const char* string_num)
+{
+ int err = 0, adps_mode;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ if (g_pm_control) {
+ return -EPERM;
+ }
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+ adps_mode = bcm_atoi(string_num);
+ ANDROID_ERROR(("%s: SET_ADPS %d\n", __FUNCTION__, adps_mode));
+
+ if (!(adps_mode == 0 || adps_mode == 1)) {
+ ANDROID_ERROR(("wl_android_set_adps_mode: Invalid value %d.\n", adps_mode));
+ return -EINVAL;
+ }
+
+ err = dhd_enable_adps(dhdp, adps_mode);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("failed to set adps mode %d, error = %d\n", adps_mode, err));
+ return -EIO;
+ }
+ return err;
+}
+static int
+wl_android_get_adps_mode(
+ struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written, err = 0;
+ uint len;
+ char buf[WLC_IOCTL_SMLEN];
+
+ bcm_iov_buf_t iov_buf;
+ bcm_iov_buf_t *ptr = NULL;
+ wl_adps_params_v1_t *data = NULL;
+
+ uint8 *pdata = NULL;
+ uint8 band, mode = 0;
+
+ bzero(&iov_buf, sizeof(iov_buf));
+
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(band);
+
+ iov_buf.version = WL_ADPS_IOV_VER;
+ iov_buf.len = sizeof(band);
+ iov_buf.id = WL_ADPS_IOV_MODE;
+
+ pdata = (uint8 *)&iov_buf.data;
+
+ for (band = 1; band <= MAX_BANDS; band++) {
+ pdata[0] = band;
+ err = wldev_iovar_getbuf(dev, "adps", &iov_buf, len,
+ buf, WLC_IOCTL_SMLEN, NULL);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("wl_android_get_adps_mode fail to get adps band %d(%d).\n",
+ band, err));
+ return -EIO;
+ }
+ ptr = (bcm_iov_buf_t *) buf;
+ data = (wl_adps_params_v1_t *) ptr->data;
+ mode = data->mode;
+ if (mode != OFF) {
+ break;
+ }
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_GET_ADPS, mode);
+ return bytes_written;
+}
+
+#ifdef WLADPS_ENERGY_GAIN
+static int
+wl_android_get_gain_adps(
+ struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written;
+
+ int ret = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ ret = dhd_event_log_filter_adps_energy_gain(dhdp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ANDROID_INFO(("%s ADPS Energy Gain: %d uAh\n", __FUNCTION__, ret));
+
+ bytes_written = snprintf(command, total_len, "%s %d uAm",
+ CMD_GET_GAIN_ADPS, ret);
+
+ return bytes_written;
+}
+
+static int
+wl_android_reset_gain_adps(
+ struct net_device *dev, char *command)
+{
+ int ret = BCME_OK;
+
+ bcm_iov_buf_t iov_buf;
+ char buf[WLC_IOCTL_SMLEN] = {0, };
+
+ iov_buf.version = WL_ADPS_IOV_VER;
+ iov_buf.id = WL_ADPS_IOV_RESET_GAIN;
+ iov_buf.len = 0;
+
+ if ((ret = wldev_iovar_setbuf(dev, "adps", &iov_buf, sizeof(iov_buf),
+ buf, sizeof(buf), NULL)) < 0) {
+ ANDROID_ERROR(("%s fail to reset adps gain (%d)\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
+#endif /* WLADPS_ENERGY_GAIN */
+#endif /* WLADPS_PRIVATE_CMD */
+
+#ifdef WL_BCNRECV
+#define BCNRECV_ATTR_HDR_LEN 30
+int
+wl_android_bcnrecv_event(struct net_device *ndev, uint attr_type,
+ uint status, uint reason, uint8 *data, uint data_len)
+{
+ s32 err = BCME_OK;
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ uint len;
+
+ len = BCNRECV_ATTR_HDR_LEN + data_len;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev), len,
+ BRCM_VENDOR_EVENT_BEACON_RECV, kflags);
+ if (!skb) {
+ ANDROID_ERROR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+ if ((attr_type == BCNRECV_ATTR_BCNINFO) && (data)) {
+ /* send bcn info to upper layer */
+ nla_put(skb, BCNRECV_ATTR_BCNINFO, data_len, data);
+ } else if (attr_type == BCNRECV_ATTR_STATUS) {
+ nla_put_u32(skb, BCNRECV_ATTR_STATUS, status);
+ if (reason) {
+ nla_put_u32(skb, BCNRECV_ATTR_REASON, reason);
+ }
+ } else {
+ ANDROID_ERROR(("UNKNOWN ATTR_TYPE. attr_type:%d\n", attr_type));
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ cfg80211_vendor_event(skb, kflags);
+ return err;
+}
+
+static int
+_wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool user_trigger)
+{
+ s32 err = BCME_OK;
+ struct net_device *pdev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* check any scan is in progress before beacon recv scan trigger IOVAR */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("Scan in progress, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("P2P Scan in progress, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ if (wl_get_drv_status(cfg, REMAINING_ON_CHANNEL, ndev)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("P2P remain on channel, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ /* check STA is in connected state, Beacon recv required connected state
+ * else exit from beacon recv scan
+ */
+ if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("STA is in not associated state error:%d\n", err));
+ goto exit;
+ }
+
+#ifdef WL_NAN
+ /* Check NAN is enabled, if enabled exit else continue */
+ if (wl_cfgnan_is_enabled(cfg)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("Nan is enabled, NAN+STA+FAKEAP concurrency is not supported\n"));
+ goto exit;
+ }
+#endif /* WL_NAN */
+
+ /* Triggering an sendup_bcn iovar */
+ err = wldev_iovar_setint(pdev, "sendup_bcn", 1);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("sendup_bcn failed to set, error:%d\n", err));
+ } else {
+ cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STARTED;
+ ANDROID_INFO(("bcnrecv started. user_trigger:%d ifindex:%d\n",
+ user_trigger, ndev->ifindex));
+ if (user_trigger) {
+ if ((err = wl_android_bcnrecv_event(pdev, BCNRECV_ATTR_STATUS,
+ WL_BCNRECV_STARTED, 0, NULL, 0)) != BCME_OK) {
+ ANDROID_ERROR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+ }
+ }
+exit:
+ /*
+ * BCNRECV start request can be rejected from dongle
+ * in various conditions.
+ * Error code need to be overridden to BCME_UNSUPPORTED
+ * to avoid hang event from continous private
+ * command error
+ */
+ if (err) {
+ err = BCME_UNSUPPORTED;
+ }
+ return err;
+}
+
+int
+_wl_android_bcnrecv_stop(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint reason)
+{
+ s32 err = BCME_OK;
+ u32 status;
+ struct net_device *pdev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* Stop bcnrx except for fw abort event case */
+ if (reason != WL_BCNRECV_ROAMABORT) {
+ err = wldev_iovar_setint(pdev, "sendup_bcn", 0);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("sendup_bcn failed to set error:%d\n", err));
+ goto exit;
+ }
+ }
+
+ /* Send notification for all cases */
+ if (reason == WL_BCNRECV_SUSPEND) {
+ cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_SUSPENDED;
+ status = WL_BCNRECV_SUSPENDED;
+ } else {
+ cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STOPPED;
+ ANDROID_INFO(("bcnrecv stopped. reason:%d ifindex:%d\n",
+ reason, ndev->ifindex));
+ if (reason == WL_BCNRECV_USER_TRIGGER) {
+ status = WL_BCNRECV_STOPPED;
+ } else {
+ status = WL_BCNRECV_ABORTED;
+ }
+ }
+ if ((err = wl_android_bcnrecv_event(pdev, BCNRECV_ATTR_STATUS, status,
+ reason, NULL, 0)) != BCME_OK) {
+ ANDROID_ERROR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+exit:
+ return err;
+}
+
+static int
+wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ s32 err = BCME_OK;
+
+ /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
+ mutex_lock(&cfg->scan_sync);
+ mutex_lock(&cfg->bcn_sync);
+ err = _wl_android_bcnrecv_start(cfg, ndev, true);
+ mutex_unlock(&cfg->bcn_sync);
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+int
+wl_android_bcnrecv_stop(struct net_device *ndev, uint reason)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ mutex_lock(&cfg->bcn_sync);
+ if ((cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) ||
+ (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED)) {
+ err = _wl_android_bcnrecv_stop(cfg, ndev, reason);
+ }
+ mutex_unlock(&cfg->bcn_sync);
+ return err;
+}
+
+int
+wl_android_bcnrecv_suspend(struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ mutex_lock(&cfg->bcn_sync);
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
+ ANDROID_INFO(("bcnrecv suspend\n"));
+ ret = _wl_android_bcnrecv_stop(cfg, ndev, WL_BCNRECV_SUSPEND);
+ }
+ mutex_unlock(&cfg->bcn_sync);
+ return ret;
+}
+
+int
+wl_android_bcnrecv_resume(struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
+ mutex_lock(&cfg->scan_sync);
+ mutex_lock(&cfg->bcn_sync);
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED) {
+ ANDROID_INFO(("bcnrecv resume\n"));
+ ret = _wl_android_bcnrecv_start(cfg, ndev, false);
+ }
+ mutex_unlock(&cfg->bcn_sync);
+ mutex_unlock(&cfg->scan_sync);
+ return ret;
+}
+
+/* Beacon recv functionality code implementation */
+int
+wl_android_bcnrecv_config(struct net_device *ndev, char *cmd_argv, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ uint err = BCME_OK;
+
+ if (!ndev) {
+ ANDROID_ERROR(("ndev is NULL\n"));
+ return -EINVAL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ ANDROID_ERROR(("cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* sync commands from user space */
+ mutex_lock(&cfg->usr_sync);
+ if (strncmp(cmd_argv, "start", strlen("start")) == 0) {
+ ANDROID_INFO(("BCNRECV start\n"));
+ err = wl_android_bcnrecv_start(cfg, ndev);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("Failed to process the start command, error:%d\n", err));
+ goto exit;
+ }
+ } else if (strncmp(cmd_argv, "stop", strlen("stop")) == 0) {
+ ANDROID_INFO(("BCNRECV stop\n"));
+ err = wl_android_bcnrecv_stop(ndev, WL_BCNRECV_USER_TRIGGER);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("Failed to stop the bcn recv, error:%d\n", err));
+ goto exit;
+ }
+ } else {
+ err = BCME_ERROR;
+ }
+exit:
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+#endif /* WL_BCNRECV */
+
+#ifdef SUPPORT_LATENCY_CRITICAL_DATA
+static int
+wl_android_set_latency_crt_data(struct net_device *dev, int mode)
+{
+ int ret;
+#ifdef DHD_GRO_ENABLE_HOST_CTRL
+ dhd_pub_t *dhdp = NULL;
+#endif /* DHD_GRO_ENABLE_HOST_CTRL */
+ if (mode >= LATENCY_CRT_DATA_MODE_LAST) {
+ return BCME_BADARG;
+ }
+#ifdef DHD_GRO_ENABLE_HOST_CTRL
+ dhdp = wl_cfg80211_get_dhdp(dev);
+ if (mode != LATENCY_CRT_DATA_MODE_OFF) {
+ ANDROID_ERROR(("Not permitted GRO by framework\n"));
+ dhdp->permitted_gro = FALSE;
+ } else {
+ ANDROID_ERROR(("Permitted GRO by framework\n"));
+ dhdp->permitted_gro = TRUE;
+ }
+#endif /* DHD_GRO_ENABLE_HOST_CTRL */
+ ret = wldev_iovar_setint(dev, "latency_critical_data", mode);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("failed to set latency_critical_data mode %d, error = %d\n",
+ mode, ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+wl_android_get_latency_crt_data(struct net_device *dev, char *command, int total_len)
+{
+ int ret;
+ int mode = LATENCY_CRT_DATA_MODE_OFF;
+ int bytes_written;
+
+ ret = wldev_iovar_getint(dev, "latency_critical_data", &mode);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("failed to get latency_critical_data error = %d\n", ret));
+ return ret;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_GET_LATENCY_CRITICAL_DATA, mode);
+
+ return bytes_written;
+}
+#endif /* SUPPORT_LATENCY_CRITICAL_DATA */
+
+#ifdef WL_CAC_TS
+/* CAC TSPEC functionality code implementation */
+static void
+wl_android_update_tsinfo(uint8 access_category, tspec_arg_t *tspec_arg)
+{
+ uint8 tspec_id;
+ /* Using direction as bidirectional by default */
+ uint8 direction = TSPEC_BI_DIRECTION;
+ /* Using U-APSD as the default power save mode */
+ uint8 user_psb = TSPEC_UAPSD_PSB;
+ uint8 ADDTS_AC2PRIO[4] = {PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_VI, PRIO_8021D_VO};
+
+ /* Map tspec_id from access category */
+ tspec_id = ADDTS_AC2PRIO[access_category];
+
+ /* Update the tsinfo */
+ tspec_arg->tsinfo.octets[0] = (uint8)(TSPEC_EDCA_ACCESS | direction |
+ (tspec_id << TSPEC_TSINFO_TID_SHIFT));
+ tspec_arg->tsinfo.octets[1] = (uint8)((tspec_id << TSPEC_TSINFO_PRIO_SHIFT) |
+ user_psb);
+ tspec_arg->tsinfo.octets[2] = 0x00;
+}
+
+static s32
+wl_android_handle_cac_action(struct bcm_cfg80211 * cfg, struct net_device * ndev, char * argv)
+{
+ tspec_arg_t tspec_arg;
+ s32 err = BCME_ERROR;
+ u8 ts_cmd[12] = "cac_addts";
+ uint8 access_category;
+ s32 bssidx;
+
+ /* Following handling is done only for the primary interface */
+ memset_s(&tspec_arg, sizeof(tspec_arg), 0, sizeof(tspec_arg));
+ if (strncmp(argv, "addts", strlen("addts")) == 0) {
+ tspec_arg.version = TSPEC_ARG_VERSION;
+ tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
+ /* Read the params passed */
+ sscanf(argv, "%*s %hhu %hu %hu", &access_category,
+ &tspec_arg.nom_msdu_size, &tspec_arg.surplus_bw);
+ if ((access_category > TSPEC_MAX_ACCESS_CATEGORY) ||
+ ((tspec_arg.surplus_bw < TSPEC_MIN_SURPLUS_BW) ||
+ (tspec_arg.surplus_bw > TSPEC_MAX_SURPLUS_BW)) ||
+ (tspec_arg.nom_msdu_size > TSPEC_MAX_MSDU_SIZE)) {
+ ANDROID_ERROR(("Invalid params access_category %hhu nom_msdu_size %hu"
+ " surplus BW %hu\n", access_category, tspec_arg.nom_msdu_size,
+ tspec_arg.surplus_bw));
+ return BCME_USAGE_ERROR;
+ }
+
+ /* Update tsinfo */
+ wl_android_update_tsinfo(access_category, &tspec_arg);
+ /* Update other tspec parameters */
+ tspec_arg.dialog_token = TSPEC_DEF_DIALOG_TOKEN;
+ tspec_arg.mean_data_rate = TSPEC_DEF_MEAN_DATA_RATE;
+ tspec_arg.min_phy_rate = TSPEC_DEF_MIN_PHY_RATE;
+ } else if (strncmp(argv, "delts", strlen("delts")) == 0) {
+ snprintf(ts_cmd, sizeof(ts_cmd), "cac_delts");
+ tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
+ tspec_arg.version = TSPEC_ARG_VERSION;
+ /* Read the params passed */
+ sscanf(argv, "%*s %hhu", &access_category);
+
+ if (access_category > TSPEC_MAX_ACCESS_CATEGORY) {
+ ANDROID_INFO(("Invalide param, access_category %hhu\n", access_category));
+ return BCME_USAGE_ERROR;
+ }
+ /* Update tsinfo */
+ wl_android_update_tsinfo(access_category, &tspec_arg);
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ ANDROID_ERROR(("Find index failed\n"));
+ err = BCME_ERROR;
+ return err;
+ }
+ err = wldev_iovar_setbuf_bsscfg(ndev, ts_cmd, &tspec_arg, sizeof(tspec_arg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s error (%d)\n", ts_cmd, err));
+ }
+
+ return err;
+}
+
+static s32
+wl_android_cac_ts_config(struct net_device *ndev, char *cmd_argv, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ s32 err = BCME_OK;
+
+ if (!ndev) {
+ ANDROID_ERROR(("ndev is NULL\n"));
+ return -EINVAL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ ANDROID_ERROR(("cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* Request supported only for primary interface */
+ if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+ ANDROID_ERROR(("Request on non-primary interface\n"));
+ return -1;
+ }
+
+ /* sync commands from user space */
+ mutex_lock(&cfg->usr_sync);
+ err = wl_android_handle_cac_action(cfg, ndev, cmd_argv);
+ mutex_unlock(&cfg->usr_sync);
+
+ return err;
+}
+#endif /* WL_CAC_TS */
+
+#ifdef WL_GET_CU
+/* Implementation to get channel usage from framework */
+static s32
+wl_android_get_channel_util(struct net_device *ndev, char *command, int total_len)
+{
+ s32 bytes_written, err = 0;
+ wl_bssload_t bssload;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ u8 chan_use_percentage = 0;
+
+ if ((err = wldev_iovar_getbuf(ndev, "bssload_report", NULL,
+ 0, smbuf, WLC_IOCTL_SMLEN, NULL))) {
+ ANDROID_ERROR(("Getting bssload report failed with err=%d \n", err));
+ return err;
+ }
+
+ (void)memcpy_s(&bssload, sizeof(wl_bssload_t), smbuf, sizeof(wl_bssload_t));
+ /* Convert channel usage to percentage value */
+ chan_use_percentage = (bssload.chan_util * 100) / 255;
+
+ bytes_written = snprintf(command, total_len, "CU %hhu",
+ chan_use_percentage);
+ ANDROID_INFO(("Channel Utilization %u %u\n", bssload.chan_util, chan_use_percentage));
+
+ return bytes_written;
+}
+#endif /* WL_GET_CU */
+
+#ifdef RTT_GEOFENCE_INTERVAL
+#if defined (RTT_SUPPORT) && defined(WL_NAN)
+static void
+wl_android_set_rtt_geofence_interval(struct net_device *ndev, char *command)
+{
+ int rtt_interval = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+ char *rtt_intp = command + strlen(CMD_GEOFENCE_INTERVAL) + 1;
+
+ rtt_interval = bcm_atoi(rtt_intp);
+ dhd_rtt_set_geofence_rtt_interval(dhdp, rtt_interval);
+}
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_INTERVAL */
+
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+int
+wl_android_set_softap_elna_bypass(struct net_device *dev, char *command, int total_len)
+{
+ char *ifname = NULL;
+ char *pos, *token;
+ int err = BCME_OK;
+ int enable = FALSE;
+
+ /*
+ * STA/AP/GO I/F: DRIVER SET_SOFTAP_ELNA_BYPASS <ifname> <enable/disable>
+ * the enable/disable format follows Samsung specific rules as following
+ * Enable : 0
+ * Disable :-1
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("%s: Invalid arguments about interface name\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ ifname = token;
+
+ /* get enable/disable flag */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("%s: Invalid arguments about Enable/Disable\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ enable = bcm_atoi(token);
+
+ CUSTOMER_HW4_EN_CONVERT(enable);
+ err = wl_set_softap_elna_bypass(dev, ifname, enable);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: Failed to set ELNA Bypass of SoftAP mode, err=%d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+
+ return err;
+}
+
+int
+wl_android_get_softap_elna_bypass(struct net_device *dev, char *command, int total_len)
+{
+ char *ifname = NULL;
+ char *pos, *token;
+ int err = BCME_OK;
+ int bytes_written = 0;
+ int softap_elnabypass = 0;
+
+ /*
+ * STA/AP/GO I/F: DRIVER GET_SOFTAP_ELNA_BYPASS <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("%s: Invalid arguments about interface name\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ ifname = token;
+
+ err = wl_get_softap_elna_bypass(dev, ifname, &softap_elnabypass);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: Failed to get ELNA Bypass of SoftAP mode, err=%d\n",
+ __FUNCTION__, err));
+ return err;
+ } else {
+ softap_elnabypass--; //Convert format to Customer HW4
+ ANDROID_INFO(("%s: eLNA Bypass feature enable status is %d\n",
+ __FUNCTION__, softap_elnabypass));
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_GET_SOFTAP_ELNA_BYPASS, softap_elnabypass);
+ }
+
+ return bytes_written;
+}
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+
+#ifdef WL_NAN
+int
+wl_android_get_nan_status(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ int error = BCME_OK;
+ wl_nan_conf_status_t nstatus;
+
+ error = wl_cfgnan_get_status(dev, &nstatus);
+ if (error) {
+ ANDROID_ERROR(("Failed to get nan status (%d)\n", error));
+ return error;
+ }
+
+ bytes_written = snprintf(command, total_len,
+ "EN:%d Role:%d EM:%d CID:"MACF" NMI:"MACF" SC(2G):%d SC(5G):%d "
+ "MR:"NMRSTR" AMR:"NMRSTR" IMR:"NMRSTR
+ "HC:%d AMBTT:%04x TSF[%04x:%04x]\n",
+ nstatus.enabled,
+ nstatus.role,
+ nstatus.election_mode,
+ ETHERP_TO_MACF(&(nstatus.cid)),
+ ETHERP_TO_MACF(&(nstatus.nmi)),
+ nstatus.social_chans[0],
+ nstatus.social_chans[1],
+ NMR2STR(nstatus.mr),
+ NMR2STR(nstatus.amr),
+ NMR2STR(nstatus.imr),
+ nstatus.hop_count,
+ nstatus.ambtt,
+ nstatus.cluster_tsf_h,
+ nstatus.cluster_tsf_l);
+ return bytes_written;
+}
+#endif /* WL_NAN */
+
+#ifdef SUPPORT_NAN_RANGING_TEST_BW
+enum {
+ NAN_RANGING_5G_BW20 = 1,
+ NAN_RANGING_5G_BW40,
+ NAN_RANGING_5G_BW80
+};
+
+int
+wl_nan_ranging_bw(struct net_device *net, int bw, char *command)
+{
+ int bytes_written, err = BCME_OK;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s32 val = 1;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+
+ if (bw < NAN_RANGING_5G_BW20 || bw > NAN_RANGING_5G_BW80) {
+ ANDROID_ERROR(("Wrong BW cmd:%d, %s\n", bw, __FUNCTION__));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ return bytes_written;
+ }
+
+ switch (bw) {
+ case NAN_RANGING_5G_BW20:
+ ANDROID_ERROR(("NAN_RANGING 5G/BW20\n"));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = 0x1;
+ break;
+ case NAN_RANGING_5G_BW40:
+ ANDROID_ERROR(("NAN_RANGING 5G/BW40\n"));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = 0x3;
+ break;
+ case NAN_RANGING_5G_BW80:
+ ANDROID_ERROR(("NAN_RANGING 5G/BW80\n"));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = 0x7;
+ break;
+ }
+
+ err = wldev_ioctl_set(net, WLC_DOWN, &val, sizeof(s32));
+ if (err) {
+ ANDROID_ERROR(("WLC_DOWN error %d\n", err));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ } else {
+ err = wldev_iovar_setbuf(net, "bw_cap", &param, sizeof(param),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+
+ if (err) {
+ ANDROID_ERROR(("BW set failed\n"));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ } else {
+ ANDROID_ERROR(("BW set done\n"));
+ bytes_written = scnprintf(command, sizeof("OK"), "OK");
+ }
+
+ err = wldev_ioctl_set(net, WLC_UP, &val, sizeof(s32));
+ if (err < 0) {
+ ANDROID_ERROR(("WLC_UP error %d\n", err));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ }
+ }
+ return bytes_written;
+}
+#endif /* SUPPORT_NAN_RANGING_TEST_BW */
+
+static int
+wl_android_set_softap_ax_mode(struct net_device *dev, const char* cmd_str)
+{
+ int enable = 0;
+ int err = 0;
+ s32 bssidx = 0;
+ struct bcm_cfg80211 *cfg = NULL;
+
+ if (!dev) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (cmd_str) {
+ enable = bcm_atoi(cmd_str);
+ } else {
+ ANDROID_ERROR(("failed due to wrong received parameter\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ ANDROID_ERROR(("find softap index from wdev failed\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ANDROID_INFO(("HAPD_SET_AX_MODE = %d\n", enable));
+ err = wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_HE_FEATURES_HE_AP, (bool)enable);
+ if (err) {
+ ANDROID_ERROR(("failed to set softap ax mode(%d)\n", enable));
+
+ }
+exit :
+ return err;
+}
+
+#ifdef WL_P2P_6G
+#define WL_HE_FEATURES_P2P_6G 0x0200u
+static int
+wl_android_enable_p2p_6g(struct net_device *dev, int enable)
+{
+ s32 err = 0;
+ s32 bssidx = 0;
+ struct bcm_cfg80211 *cfg = NULL;
+
+ if (!dev) {
+ err = -EINVAL;
+ return err;
+ }
+
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ err = -EINVAL;
+ return err;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ ANDROID_ERROR(("find softap index from wdev failed\n"));
+ err = -EINVAL;
+ return err;
+ }
+
+ /* Enable/disable for P2P 6G, both P2P and P2P_6G needs to be handled together */
+ err = wl_cfg80211_set_he_mode(dev, cfg, bssidx, (WL_HE_FEATURES_HE_P2P |
+ WL_HE_FEATURES_P2P_6G), (bool)enable);
+ if (err == BCME_OK) {
+ /* Set P2P 6G support flag */
+ if (enable) {
+ cfg->p2p_6g_enabled = TRUE;
+ } else {
+ cfg->p2p_6g_enabled = FALSE;
+ }
+ }
+
+ return err;
+}
+#endif /* WL_P2P_6G */
+
+#ifdef WL_TWT
+
+static int
+wl_android_twt_setup(struct net_device *ndev, char *command, int total_len)
+{
+ wl_twt_config_t val;
+ s32 bw;
+ char *token, *pos;
+ u8 mybuf[WLC_IOCTL_SMLEN] = {0};
+ u8 resp_buf[WLC_IOCTL_SMLEN] = {0};
+ u64 twt;
+ uint8 *rem = mybuf;
+ uint16 rem_len = sizeof(mybuf);
+ int32 val32;
+
+ WL_DBG_MEM(("Enter. cmd:%s\n", command));
+
+ if (strlen(command) == strlen(CMD_TWT_SETUP)) {
+ ANDROID_ERROR(("Error, twt_setup cmd missing params\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+
+ bzero(&val, sizeof(val));
+ val.version = WL_TWT_SETUP_VER;
+ val.length = sizeof(val.version) + sizeof(val.length);
+
+ /* Default values, Overide Below */
+ val.desc.wake_time_h = 0xFFFFFFFF;
+ val.desc.wake_time_l = 0xFFFFFFFF;
+ val.desc.wake_int_min = 0xFFFFFFFF;
+ val.desc.wake_int_max = 0xFFFFFFFF;
+ val.desc.wake_dur_min = 0xFFFFFFFF;
+ val.desc.wake_dur_max = 0xFFFFFFFF;
+ val.desc.avg_pkt_num = 0xFFFFFFFF;
+
+ pos = command + sizeof(CMD_TWT_SETUP);
+
+ /* negotiation_type */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Mandatory param negotiation type not present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ val.desc.negotiation_type = htod32((u32)bcm_atoi(token));
+
+ /* Wake Duration */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Mandatory param wake Duration not present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ val.desc.wake_dur = htod32((u32)bcm_atoi(token));
+
+ /* Wake interval */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Mandaory param Wake Interval not present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ val.desc.wake_int = htod32((u32)bcm_atoi(token));
+
+ /* Wake Time parameter */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("No Wake Time parameter provided, using default\n"));
+ } else {
+ twt = (u64)bcm_atoi(token);
+ val32 = htod32((u32)(twt >> 32));
+ if ((val32 != -1) && ((int32)(htod32((u32)twt)) != -1)) {
+ val.desc.wake_time_h = htod32((u32)(twt >> 32));
+ val.desc.wake_time_l = htod32((u32)twt);
+ }
+ }
+
+ /* Minimum allowed Wake interval */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("No Minimum allowed Wake interval provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.desc.wake_int_min = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* Max Allowed Wake interval */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Maximum allowed Wake interval not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.desc.wake_int_max = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* Minimum allowed Wake duration */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Maximum allowed Wake duration not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.desc.wake_dur_min = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* Maximum allowed Wake duration */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Maximum allowed Wake duration not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.desc.wake_dur_max = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* Average number of packets */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Average number of packets not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.desc.avg_pkt_num = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* a peer_address */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Average number of packets not provided, using default\n"));
+ } else {
+ /* get peer mac */
+ if (!bcm_ether_atoe(token, &val.peer)) {
+ ANDROID_ERROR(("%s : Malformed peer addr\n", __FUNCTION__));
+ bw = BCME_ERROR;
+ goto exit;
+ }
+ }
+
+ bw = bcm_pack_xtlv_entry(&rem, &rem_len, WL_TWT_CMD_CONFIG,
+ sizeof(val), (uint8 *)&val, BCM_XTLV_OPTION_ALIGN32);
+ if (bw != BCME_OK) {
+ goto exit;
+ }
+
+ bw = wldev_iovar_setbuf(ndev, "twt",
+ mybuf, sizeof(mybuf) - rem_len, resp_buf, WLC_IOCTL_SMLEN, NULL);
+ if (bw < 0) {
+ ANDROID_ERROR(("twt config set failed. ret:%d\n", bw));
+ }
+exit:
+ return bw;
+}
+
+static int
+wl_android_twt_display_cap(wl_twt_cap_t *result, char *command, int total_len)
+{
+ int rem_len = 0, bytes_written = 0;
+
+ rem_len = total_len;
+ bytes_written = scnprintf(command, rem_len, "Device TWT Capabilities:\n");
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Requester Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_REQ_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Responder Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_RESP_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Broadcast TWT Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_BTWT_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Flexible TWT Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_FLEX_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "TWT Required by peer %d, \n",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_TWT_REQUIRED));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ /* Peer capabilities */
+ bytes_written = scnprintf(command, rem_len, "\nPeer TWT Capabilities:\n");
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Requester Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_REQ_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Responder Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_RESP_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Broadcast TWT Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_BTWT_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "Flexible TWT Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_FLEX_SUPPORT));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "TWT Required by peer %d, \n",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_TWT_REQUIRED));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len, "\t------------"
+ "---------------------------------------------------\n\n");
+ command += bytes_written;
+ rem_len -= bytes_written;
+ ANDROID_INFO(("Device TWT Capabilities:\n"));
+ ANDROID_INFO(("Requester Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_REQ_SUPPORT)));
+ ANDROID_INFO(("Responder Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_RESP_SUPPORT)));
+ ANDROID_INFO(("Broadcast TWT Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_BTWT_SUPPORT)));
+ ANDROID_INFO(("Flexible TWT Support %d, \t",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_FLEX_SUPPORT)));
+ ANDROID_INFO(("TWT Required by peer %d, \n",
+ !!(result->device_cap & WL_TWT_CAP_FLAGS_TWT_REQUIRED)));
+ /* Peer capabilities */
+ ANDROID_INFO(("\nPeer TWT Capabilities:\n"));
+ ANDROID_INFO(("Requester Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_REQ_SUPPORT)));
+ ANDROID_INFO(("Responder Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_RESP_SUPPORT)));
+ ANDROID_INFO(("Broadcast TWT Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_BTWT_SUPPORT)));
+ ANDROID_INFO(("Flexible TWT Support %d, \t",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_FLEX_SUPPORT)));
+ ANDROID_INFO(("TWT Required by peer %d, \n",
+ !!(result->peer_cap & WL_TWT_CAP_FLAGS_TWT_REQUIRED)));
+ ANDROID_INFO(("\t-----------------------------------------------------------------\n\n"));
+
+ if ((total_len - rem_len) > 0) {
+ return (total_len - rem_len);
+ } else {
+ return BCME_ERROR;
+ }
+}
+
+static int
+wl_android_twt_cap(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_OK;
+ char iovbuf[WLC_IOCTL_SMLEN] = {0, };
+ uint8 *pxtlv = NULL;
+ uint8 *iovresp = NULL;
+ wl_twt_cap_cmd_t cmd_cap;
+ wl_twt_cap_t result;
+
+ uint16 buflen = 0, bufstart = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ bzero(&cmd_cap, sizeof(cmd_cap));
+
+ cmd_cap.version = WL_TWT_CAP_CMD_VERSION_1;
+ cmd_cap.length = sizeof(cmd_cap) - OFFSETOF(wl_twt_cap_cmd_t, peer);
+
+ iovresp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (iovresp == NULL) {
+ ANDROID_ERROR(("%s: iov resp memory alloc exited\n", __FUNCTION__));
+ goto exit;
+ }
+
+ buflen = bufstart = WLC_IOCTL_SMLEN;
+ pxtlv = (uint8 *)iovbuf;
+
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_TWT_CMD_CAP,
+ sizeof(cmd_cap), (uint8 *)&cmd_cap, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s : Error return during pack xtlv :%d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_getbuf(dev, "twt", iovbuf, bufstart-buflen,
+ iovresp, WLC_IOCTL_MEDLEN, NULL))) {
+ ANDROID_ERROR(("Getting twt status failed with err=%d \n", ret));
+ goto exit;
+ }
+ if (ret) {
+ ANDROID_ERROR(("%s : Error return during twt iovar set :%d\n", __FUNCTION__, ret));
+ }
+
+ (void)memcpy_s(&result, sizeof(result), iovresp, sizeof(result));
+
+ if (dtoh16(result.version) == WL_TWT_CAP_CMD_VERSION_1) {
+ ANDROID_ERROR(("capability ver %d, \n", dtoh16(result.version)));
+ ret = wl_android_twt_display_cap(&result, command, total_len);
+ return ret;
+ } else {
+ ret = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("Version 1 unsupported. ver %d, \n", dtoh16(result.version)));
+ goto exit;
+ }
+
+exit:
+ if (iovresp) {
+ MFREE(cfg->osh, iovresp, WLC_IOCTL_MEDLEN);
+ }
+
+ return ret;
+}
+
+static int
+wl_android_twt_status_display_v1(wl_twt_status_v1_t *status, char *command, int total_len)
+{
+ uint i;
+ wl_twt_sdesc_t *desc = NULL;
+ int rem_len = 0, bytes_written = 0;
+
+ rem_len = total_len;
+
+ ANDROID_ERROR(("\nNumber of Individual TWTs: %d\n", status->num_fid));
+ bytes_written = scnprintf(command, rem_len,
+ "\nNumber of Individual TWTs: %d\n", status->num_fid);
+ command += bytes_written;
+ rem_len -= bytes_written;
+ bytes_written = scnprintf(command, rem_len,
+ "Number of Broadcast TWTs: %d\n", status->num_bid);
+ command += bytes_written;
+ rem_len -= bytes_written;
+ bytes_written = scnprintf(command, rem_len,
+ "TWT SPPS Enabled %d \t STA Wake Status %d \t Wake Override %d\n",
+ !!(status->status_flags & WL_TWT_STATUS_FLAG_SPPS_ENAB),
+ !!(status->status_flags & WL_TWT_STATUS_FLAG_WAKE_STATE),
+ !!(status->status_flags & WL_TWT_STATUS_FLAG_WAKE_OVERRIDE));
+ command += bytes_written;
+ rem_len -= bytes_written;
+ ANDROID_INFO(("Number of Broadcast TWTs: %d\n", status->num_bid));
+ ANDROID_INFO(("TWT SPPS Enabled %d \t STA Wake Status %d \t Wake Override %d\n",
+ !!(status->status_flags & WL_TWT_STATUS_FLAG_SPPS_ENAB),
+ !!(status->status_flags & WL_TWT_STATUS_FLAG_WAKE_STATE),
+ !!(status->status_flags & WL_TWT_STATUS_FLAG_WAKE_OVERRIDE)));
+ ANDROID_INFO(("\t---------------- Individual TWT list-------------------\n"));
+ bytes_written = scnprintf(command, rem_len,
+ "\t---------------- Individual TWT list-------------------\n");
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ for (i = 0; i < WL_TWT_MAX_ITWT; i ++) {
+ if ((status->itwt_status[i].state == WL_TWT_ACTIVE) ||
+ (status->itwt_status[i].state == WL_TWT_SUSPEND)) {
+ desc = &status->itwt_status[i].desc;
+ bytes_written = scnprintf(command, rem_len, "\tFlow ID %d \tState %d\t",
+ desc->flow_id,
+ status->itwt_status[i].state);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "peer: "MACF"\n",
+ ETHER_TO_MACF(status->itwt_status[i].peer));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "Unannounced %d\tTriggered %d\tProtection %d\t"
+ "Info Frame Disabled %d\n",
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_UNANNOUNCED),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_TRIGGER),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_PROTECT),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_INFO_FRM_DISABLED));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "target wake time: 0x%08x%08x\t",
+ desc->wake_time_h, desc->wake_time_l);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "wake duration: %u\t", desc->wake_dur);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "wake interval: %u\t", desc->wake_int);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "TWT channel: %u\n", desc->channel);
+ command += bytes_written;
+ rem_len -= bytes_written;
+ ANDROID_INFO(("\tFlow ID %d \tState %d\t",
+ desc->flow_id,
+ status->itwt_status[i].state));
+ ANDROID_INFO(("peer: "MACF"\n", ETHER_TO_MACF(status->itwt_status[i].peer)));
+ ANDROID_INFO(("Unannounced %d\tTriggered %d\tProtection %d\t"
+ "Info Frame Disabled %d\n",
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_UNANNOUNCED),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_TRIGGER),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_PROTECT),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_INFO_FRM_DISABLED)));
+ ANDROID_INFO(("target wake time: 0x%08x%08x\t",
+ desc->wake_time_h, desc->wake_time_l));
+ ANDROID_INFO(("wake duration: %u\t", desc->wake_dur));
+ ANDROID_INFO(("wake interval: %u\t", desc->wake_int));
+ ANDROID_INFO(("TWT channel: %u\n", desc->channel));
+ }
+ }
+
+ ANDROID_INFO(("\t---------------- Broadcast TWT list-------------------\n"));
+ bytes_written = scnprintf(command, rem_len,
+ "\t---------------- Broadcast TWT list-------------------\n");
+ command += bytes_written;
+ rem_len -= bytes_written;
+ for (i = 0; i < WL_TWT_MAX_BTWT; i ++) {
+ if ((status->btwt_status[i].state == WL_TWT_ACTIVE) ||
+ (status->btwt_status[i].state == WL_TWT_SUSPEND)) {
+ desc = &status->btwt_status[i].desc;
+ bytes_written = scnprintf(command, rem_len,
+ "Broadcast ID %d \tState %d\t",
+ desc->bid, status->btwt_status[i].state);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "peer: "MACF"\n",
+ ETHER_TO_MACF(status->btwt_status[i].peer));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "Unannounced %d\tTriggered %d\tProtection %d\t"
+ "Info Frame Disabled %d\t",
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_UNANNOUNCED),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_TRIGGER),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_PROTECT),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_INFO_FRM_DISABLED));
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "Frame Recommendation %d\tBTWT Persistence %d\n",
+ desc->frame_recomm, desc->btwt_persistence);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "target wake time: 0x%08x%08x\t",
+ desc->wake_time_h, desc->wake_time_l);
+ command += bytes_written;
+ rem_len -= bytes_written;
+ bytes_written = scnprintf(command, rem_len,
+ "wake duration: %u\t", desc->wake_dur);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "wake interval: %u\t", desc->wake_int);
+ command += bytes_written;
+ rem_len -= bytes_written;
+
+ bytes_written = scnprintf(command, rem_len,
+ "TWT channel: %u\n", desc->channel);
+ command += bytes_written;
+ rem_len -= bytes_written;
+ ANDROID_INFO(("Broadcast ID %d \tState %d\t",
+ desc->bid, status->btwt_status[i].state));
+ ANDROID_INFO(("peer: "MACF"\n", ETHER_TO_MACF(status->btwt_status[i].peer)));
+ ANDROID_INFO(("Unannounced %d\tTriggered %d\tProtection %d\t"
+ "Info Frame Disabled %d\t",
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_UNANNOUNCED),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_TRIGGER),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_PROTECT),
+ !!(desc->flow_flags & WL_TWT_FLOW_FLAG_INFO_FRM_DISABLED)));
+ ANDROID_INFO(("Frame Recommendation %d\tBTWT Persistence %d\n",
+ desc->frame_recomm, desc->btwt_persistence));
+ ANDROID_INFO(("target wake time: 0x%08x%08x\t",
+ desc->wake_time_h, desc->wake_time_l));
+ ANDROID_INFO(("wake duration: %u\t", desc->wake_dur));
+ ANDROID_INFO(("wake interval: %u\t", desc->wake_int));
+ ANDROID_INFO(("TWT channel: %u\n", desc->channel));
+ }
+ }
+
+ if ((total_len - rem_len) > 0) {
+ return (total_len - rem_len);
+ } else {
+ return BCME_ERROR;
+ }
+}
+
+static int
+wl_android_twt_status_query(struct net_device *dev, char *command, int total_len)
+{
+ int ret = BCME_OK;
+ char iovbuf[WLC_IOCTL_SMLEN] = {0, };
+ uint8 *pxtlv = NULL;
+ uint8 *iovresp = NULL;
+ wl_twt_status_cmd_v1_t status_cmd;
+ wl_twt_status_v1_t result;
+
+ uint16 buflen = 0, bufstart = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ bzero(&status_cmd, sizeof(status_cmd));
+
+ status_cmd.version = WL_TWT_CMD_STATUS_VERSION_1;
+ status_cmd.length = sizeof(status_cmd) - OFFSETOF(wl_twt_status_cmd_v1_t, peer);
+
+ iovresp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (iovresp == NULL) {
+ ANDROID_ERROR(("%s: iov resp memory alloc exited\n", __FUNCTION__));
+ goto exit;
+ }
+
+ buflen = bufstart = WLC_IOCTL_SMLEN;
+ pxtlv = (uint8 *)iovbuf;
+
+ ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_TWT_CMD_STATUS,
+ sizeof(status_cmd), (uint8 *)&status_cmd, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s : Error return during pack xtlv :%d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_getbuf(dev, "twt", iovbuf, bufstart-buflen,
+ iovresp, WLC_IOCTL_MEDLEN, NULL))) {
+ ANDROID_ERROR(("Getting twt status failed with err=%d \n", ret));
+ goto exit;
+ }
+ if (ret) {
+ ANDROID_ERROR(("%s : Error return during twt iovar set :%d\n", __FUNCTION__, ret));
+ }
+
+ (void)memcpy_s(&result, sizeof(result), iovresp, sizeof(result));
+
+ if (dtoh16(result.version) == WL_TWT_CMD_STATUS_VERSION_1) {
+ ANDROID_ERROR(("status query ver %d, \n", dtoh16(result.version)));
+ ret = wl_android_twt_status_display_v1(&result, command, total_len);
+ return ret;
+ } else {
+ ret = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("Version 1 unsupported. ver %d, \n", dtoh16(result.version)));
+ goto exit;
+ }
+
+exit:
+ if (iovresp) {
+ MFREE(cfg->osh, iovresp, WLC_IOCTL_MEDLEN);
+ }
+
+ return ret;
+}
+
+static int
+wl_android_twt_info(struct net_device *ndev, char *command, int total_len)
+{
+ wl_twt_info_t val;
+ s32 bw;
+ char *token, *pos;
+ u8 mybuf[WLC_IOCTL_SMLEN] = {0};
+ u8 res_buf[WLC_IOCTL_SMLEN] = {0};
+ u64 twt;
+ uint8 *rem = mybuf;
+ uint16 rem_len = sizeof(mybuf);
+ int32 val32;
+
+ WL_DBG_MEM(("Enter. cmd:%s\n", command));
+
+ if (strlen(command) == strlen(CMD_TWT_INFO)) {
+ ANDROID_ERROR(("Error, twt teardown cmd missing params\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+
+ bzero(&val, sizeof(val));
+ val.version = WL_TWT_INFO_VER;
+ val.length = sizeof(val.version) + sizeof(val.length);
+
+ /* Default values, Overide Below */
+ val.infodesc.flow_id = 0xFF;
+ val.desc.next_twt_h = 0xFFFFFFFF;
+ val.desc.next_twt_l = 0xFFFFFFFF;
+
+ pos = command + sizeof(CMD_TWT_TEARDOWN);
+
+ /* (all TWT) */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Mandatory all TWT type not present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ if (htod32((u32)bcm_atoi(token)) == 1) {
+ val.infodesc.flow_flags |= WL_TWT_INFO_FLAG_ALL_TWT;
+ }
+
+ /* Flow ID */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("flow ID not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.infodesc.flow_id = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* resume offset */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("resume offset not provided, using default\n"));
+ } else {
+ twt = (u64)bcm_atoi(token);
+ val32 = htod32((u32)(twt >> 32));
+ if ((val32 != -1) && ((int32)(htod32((u32)twt)) != -1)) {
+ val.infodesc.next_twt_h = htod32((u32)(twt >> 32));
+ val.infodesc.next_twt_l = htod32((u32)twt);
+ val.infodesc.flow_flags |= WL_TWT_INFO_FLAG_RESUME;
+ }
+ }
+
+ /* peer_address */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Peer Addr not provided, using default\n"));
+ } else {
+ /* get peer mac */
+ if (!bcm_ether_atoe(token, &val.peer)) {
+ ANDROID_ERROR(("%s : Malformed peer addr\n", __FUNCTION__));
+ bw = BCME_ERROR;
+ goto exit;
+ }
+ }
+
+ bw = bcm_pack_xtlv_entry(&rem, &rem_len, WL_TWT_CMD_INFO,
+ sizeof(val), (uint8 *)&val, BCM_XTLV_OPTION_ALIGN32);
+ if (bw != BCME_OK) {
+ goto exit;
+ }
+
+ bw = wldev_iovar_setbuf(ndev, "twt",
+ mybuf, sizeof(mybuf) - rem_len, res_buf, WLC_IOCTL_SMLEN, NULL);
+ if (bw < 0) {
+ ANDROID_ERROR(("twt teardown failed. ret:%d\n", bw));
+ }
+exit:
+ return bw;
+
+}
+
+static int
+wl_android_twt_teardown(struct net_device *ndev, char *command, int total_len)
+{
+ wl_twt_teardown_t val;
+ s32 bw;
+ char *token, *pos;
+ u8 mybuf[WLC_IOCTL_SMLEN] = {0};
+ u8 res_buf[WLC_IOCTL_SMLEN] = {0};
+ uint8 *rem = mybuf;
+ uint16 rem_len = sizeof(mybuf);
+ int32 val32;
+
+ WL_DBG_MEM(("Enter. cmd:%s\n", command));
+
+ if (strlen(command) == strlen(CMD_TWT_TEARDOWN)) {
+ ANDROID_ERROR(("Error, twt teardown cmd missing params\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+
+ bzero(&val, sizeof(val));
+ val.version = WL_TWT_TEARDOWN_VER;
+ val.length = sizeof(val.version) + sizeof(val.length);
+
+ /* Default values, Overide Below */
+ val.teardesc.flow_id = 0xFF;
+ val.teardesc.bid = 0xFF;
+
+ pos = command + sizeof(CMD_TWT_TEARDOWN);
+
+ /* negotiation_type */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Mandatory param negotiation type not present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ val.teardesc.negotiation_type = htod32((u32)bcm_atoi(token));
+
+ /* (all TWT) */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Mandatory all TWT type not present\n"));
+ bw = -EINVAL;
+ goto exit;
+ }
+ val.teardesc.alltwt = htod32((u32)bcm_atoi(token));
+
+ /* Flow ID */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("flow ID not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.teardesc.flow_id = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* Broadcas ID */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("flow ID not provided, using default\n"));
+ } else {
+ val32 = htod32((u32)bcm_atoi(token));
+ if (val32 != -1) {
+ val.teardesc.bid = htod32((u32)bcm_atoi(token));
+ }
+ }
+
+ /* peer_address */
+ token = strsep((char**)&pos, " ");
+ if (!token) {
+ ANDROID_ERROR(("Peer Addr not provided, using default\n"));
+ } else {
+ /* get peer mac */
+ if (!bcm_ether_atoe(token, &val.peer)) {
+ ANDROID_ERROR(("%s : Malformed peer addr\n", __FUNCTION__));
+ bw = BCME_ERROR;
+ goto exit;
+ }
+ }
+
+ bw = bcm_pack_xtlv_entry(&rem, &rem_len, WL_TWT_CMD_TEARDOWN,
+ sizeof(val), (uint8 *)&val, BCM_XTLV_OPTION_ALIGN32);
+ if (bw != BCME_OK) {
+ goto exit;
+ }
+
+ bw = wldev_iovar_setbuf(ndev, "twt",
+ mybuf, sizeof(mybuf) - rem_len, res_buf, WLC_IOCTL_SMLEN, NULL);
+ if (bw < 0) {
+ ANDROID_ERROR(("twt teardown failed. ret:%d\n", bw));
+ }
+exit:
+ return bw;
+}
+#endif /* WL_TWT */
+
+int
+wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len)
+{
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+
+ bzero(&priv_cmd, sizeof(android_wifi_priv_cmd));
+ priv_cmd.total_len = cmd_len;
+
+ if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+ ANDROID_INFO(("%s, Received regular START command\n", __FUNCTION__));
+#ifdef SUPPORT_DEEP_SLEEP
+ trigger_deep_sleep = 1;
+#else
+#ifdef BT_OVER_SDIO
+ bytes_written = dhd_net_bus_get(net);
+#else
+ bytes_written = wl_android_wifi_on(net);
+#endif /* BT_OVER_SDIO */
+#endif /* SUPPORT_DEEP_SLEEP */
+ }
+ else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+ bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+ }
+
+ if (!g_wifi_on) {
+ ANDROID_ERROR(("%s: Ignore private cmd \"%s\" - iface is down\n",
+ __FUNCTION__, command));
+ return 0;
+ }
+
+ if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+#ifdef SUPPORT_DEEP_SLEEP
+ trigger_deep_sleep = 1;
+#else
+#ifdef BT_OVER_SDIO
+ bytes_written = dhd_net_bus_put(net);
+#else
+ bytes_written = wl_android_wifi_off(net, FALSE);
+#endif /* BT_OVER_SDIO */
+#endif /* SUPPORT_DEEP_SLEEP */
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+ wl_cfg80211_set_passive_scan(net, command);
+ }
+ else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+ wl_cfg80211_set_passive_scan(net, command);
+ }
+#endif /* WL_CFG80211 */
+ else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+ bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+ bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+ }
+#ifdef PKT_FILTER_SUPPORT
+ else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+ bytes_written = net_os_enable_packet_filter(net, 1);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+ bytes_written = net_os_enable_packet_filter(net, 0);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+ else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+ /* TBD: BTCOEXSCAN-START */
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+ /* TBD: BTCOEXSCAN-STOP */
+ }
+ else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+#ifdef WL_CFG80211
+ void *dhdp = wl_cfg80211_get_dhdp(net);
+ bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command);
+#else
+#ifdef PKT_FILTER_SUPPORT
+ uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+ if (mode == 1)
+ net_os_enable_packet_filter(net, 0); /* DHCP starts */
+ else
+ net_os_enable_packet_filter(net, 1); /* DHCP ends */
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* WL_CFG80211 */
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+ bytes_written = wl_android_set_suspendopt(net, command);
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+ bytes_written = wl_android_set_suspendmode(net, command);
+ }
+ else if (strnicmp(command, CMD_SETDTIM_IN_SUSPEND, strlen(CMD_SETDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_bcn_li_dtim(net, command);
+ }
+ else if (strnicmp(command, CMD_MAXDTIM_IN_SUSPEND, strlen(CMD_MAXDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_max_dtim(net, command);
+ }
+#ifdef DISABLE_DTIM_IN_SUSPEND
+ else if (strnicmp(command, CMD_DISDTIM_IN_SUSPEND, strlen(CMD_DISDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_disable_dtim_in_suspend(net, command);
+ }
+#endif /* DISABLE_DTIM_IN_SUSPEND */
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+ bytes_written = wl_android_set_band(net, command);
+ }
+#endif /* WL_CFG80211 */
+ else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+ bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) {
+ bytes_written = wl_android_set_csa(net, command);
+ } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) {
+ bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) {
+ bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_CFG80211 */
+#ifndef CUSTOMER_SET_COUNTRY
+ /* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
+ else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ /*
+ * Usage examples:
+ * DRIVER COUNTRY US
+ * DRIVER COUNTRY US/7
+ * Wrong revinfo should be filtered:
+ * DRIVER COUNTRY US/-1
+ */
+ char *country_code = command + strlen(CMD_COUNTRY) + 1;
+ char *rev_info_delim = country_code + 2; /* 2 bytes of country code */
+ int revinfo = -1;
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ if ((rev_info_delim) &&
+ (strnicmp(rev_info_delim, CMD_COUNTRY_DELIMITER,
+ strlen(CMD_COUNTRY_DELIMITER)) == 0) &&
+ (rev_info_delim + 1)) {
+ revinfo = bcm_atoi(rev_info_delim + 1);
+ } else {
+ revinfo = 0;
+ }
+
+ if (revinfo < 0) {
+ ANDROID_ERROR(("%s:failed due to wrong revinfo %d\n", __FUNCTION__, revinfo));
+ return BCME_BADARG;
+ }
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (dhdp->is_blob) {
+ revinfo = 0;
+ }
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+#ifdef WL_CFG80211
+ bytes_written = wl_cfg80211_set_country_code(net, country_code,
+ true, true, revinfo);
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef FCC_PWR_LIMIT_2G
+ if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__));
+ }
+#endif /* FCC_PWR_LIMIT_2G */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+#else
+ bytes_written = wldev_set_country(net, country_code, true, true, revinfo);
+#endif /* WL_CFG80211 */
+ }
+#endif /* CUSTOMER_SET_COUNTRY */
+ else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) {
+ bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) {
+ bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len);
+ }
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+ else if (strnicmp(command, CMD_ROAM_VSIE_ENAB_SET, strlen(CMD_ROAM_VSIE_ENAB_SET)) == 0) {
+ bytes_written = wl_android_set_roam_vsie_enab(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_ROAM_VSIE_ENAB_GET, strlen(CMD_ROAM_VSIE_ENAB_GET)) == 0) {
+ bytes_written = wl_android_get_roam_vsie_enab(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_BR_VSIE_ENAB_SET, strlen(CMD_BR_VSIE_ENAB_SET)) == 0) {
+ bytes_written = wl_android_set_bcn_rpt_vsie_enab(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_BR_VSIE_ENAB_GET, strlen(CMD_BR_VSIE_ENAB_GET)) == 0) {
+ bytes_written = wl_android_get_bcn_rpt_vsie_enab(net, command, priv_cmd.total_len);
+ }
+#ifdef WES_SUPPORT
+ else if (strnicmp(command, CMD_GETNCHOMODE, strlen(CMD_GETNCHOMODE)) == 0) {
+ bytes_written = wl_android_get_ncho_mode(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SETNCHOMODE, strlen(CMD_SETNCHOMODE)) == 0) {
+ int mode;
+ sscanf(command, "%*s %d", &mode);
+ bytes_written = wl_android_set_ncho_mode(net, mode);
+ }
+ else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0) {
+ bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_OKC_ENABLE, strlen(CMD_OKC_ENABLE)) == 0) {
+ bytes_written = wl_android_okc_enable(net, command);
+ }
+ else if (wl_android_legacy_check_command(net, command)) {
+ bytes_written = wl_android_legacy_private_command(net, command, priv_cmd.total_len);
+ }
+ else if (wl_android_ncho_check_command(net, command)) {
+ bytes_written = wl_android_ncho_private_command(net, command, priv_cmd.total_len);
+ }
+#endif /* WES_SUPPORT */
+#if defined(SUPPORT_RESTORE_SCAN_PARAMS) || defined(WES_SUPPORT)
+ else if (strnicmp(command, CMD_RESTORE_SCAN_PARAMS, strlen(CMD_RESTORE_SCAN_PARAMS)) == 0) {
+ bytes_written = wl_android_default_set_scan_params(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS || WES_SUPPORT */
+#ifdef WLTDLS
+ else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) {
+ bytes_written = wl_android_tdls_reset(net);
+ }
+#endif /* WLTDLS */
+#ifdef CONFIG_SILENT_ROAM
+ else if (strnicmp(command, CMD_SROAM_TURN_ON, strlen(CMD_SROAM_TURN_ON)) == 0) {
+ int mode = *(command + strlen(CMD_SROAM_TURN_ON) + 1) - '0';
+ bytes_written = wl_android_sroam_turn_on(net, mode);
+ }
+ else if (strnicmp(command, CMD_SROAM_SET_INFO, strlen(CMD_SROAM_SET_INFO)) == 0) {
+ char *data = (command + strlen(CMD_SROAM_SET_INFO) + 1);
+ bytes_written = wl_android_sroam_set_info(net, data, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SROAM_GET_INFO, strlen(CMD_SROAM_GET_INFO)) == 0) {
+ bytes_written = wl_android_sroam_get_info(net, command, priv_cmd.total_len);
+ }
+#endif /* CONFIG_SILENT_ROAM */
+#ifdef CONFIG_ROAM_RSSI_LIMIT
+ else if (strnicmp(command, CMD_ROAM_RSSI_LMT, strlen(CMD_ROAM_RSSI_LMT)) == 0) {
+ bytes_written = wl_android_roam_rssi_limit(net, command, priv_cmd.total_len);
+ }
+#endif /* CONFIG_ROAM_RSSI_LIMIT */
+#ifdef CONFIG_ROAM_MIN_DELTA
+ else if (strnicmp(command, CMD_ROAM_MIN_DELTA, strlen(CMD_ROAM_MIN_DELTA)) == 0) {
+ bytes_written = wl_android_roam_min_delta(net, command, priv_cmd.total_len);
+ }
+#endif /* CONFIG_ROAM_MIN_DELTA */
+ else if (strnicmp(command, CMD_SET_DISCONNECT_IES, strlen(CMD_SET_DISCONNECT_IES)) == 0) {
+ bytes_written = wl_android_set_disconnect_ies(net, command);
+ }
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+#ifdef PNO_SUPPORT
+ else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+ bytes_written = dhd_dev_pno_stop_for_ssid(net);
+ }
+#ifndef WL_SCHED_SCAN
+ else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+ bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+ }
+#endif /* !WL_SCHED_SCAN */
+ else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+ int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+ bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net);
+ }
+ else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) {
+ bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len);
+ }
+#endif /* PNO_SUPPORT */
+ else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+ bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+ int skip = strlen(CMD_P2P_SET_NOA) + 1;
+ bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+#ifdef WL_SDO
+ else if (strnicmp(command, CMD_P2P_SD_OFFLOAD, strlen(CMD_P2P_SD_OFFLOAD)) == 0) {
+ u8 *buf = command;
+ u8 *cmd_id = NULL;
+ int len;
+
+ cmd_id = strsep((char **)&buf, " ");
+ if (!cmd_id) {
+ /* Propagate the error */
+ bytes_written = -EINVAL;
+ } else {
+ /* if buf == NULL, means no arg */
+ if (buf == NULL) {
+ len = 0;
+ } else {
+ len = strlen(buf);
+ }
+ bytes_written = wl_cfg80211_sd_offload(net, cmd_id, buf, len);
+ }
+ }
+#endif /* WL_SDO */
+#ifdef P2P_LISTEN_OFFLOADING
+ else if (strnicmp(command, CMD_P2P_LISTEN_OFFLOAD, strlen(CMD_P2P_LISTEN_OFFLOAD)) == 0) {
+ u8 *sub_command = strchr(command, ' ');
+ bytes_written = wl_cfg80211_p2plo_offload(net, command, sub_command,
+ sub_command ? strlen(sub_command) : 0);
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+#if !defined WL_ENABLE_P2P_IF
+ else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
+ bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_ENABLE_P2P_IF */
+ else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+ int skip = strlen(CMD_P2P_SET_PS) + 1;
+ bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+ else if (strnicmp(command, CMD_P2P_ECSA, strlen(CMD_P2P_ECSA)) == 0) {
+ int skip = strlen(CMD_P2P_ECSA) + 1;
+ bytes_written = wl_cfg80211_set_p2p_ecsa(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+ /* This command is not for normal VSDB operation but for only specific P2P operation.
+ * Ex) P2P OTA backup operation
+ */
+ else if (strnicmp(command, CMD_P2P_INC_BW, strlen(CMD_P2P_INC_BW)) == 0) {
+ int skip = strlen(CMD_P2P_INC_BW) + 1;
+ bytes_written = wl_cfg80211_increase_p2p_bw(net,
+ command + skip, priv_cmd.total_len - skip);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+ strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+ int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+ bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+ priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+ }
+#ifdef WLFBT
+ else if (strnicmp(command, CMD_GET_FTKEY, strlen(CMD_GET_FTKEY)) == 0) {
+ bytes_written = wl_cfg80211_get_fbt_key(net, command, priv_cmd.total_len);
+ }
+#endif /* WLFBT */
+#endif /* WL_CFG80211 */
+#if defined (WL_SUPPORT_AUTO_CHANNEL)
+ else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
+ strlen(CMD_GET_BEST_CHANNELS)) == 0) {
+ bytes_written = wl_android_get_best_channels(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#if defined (WL_SUPPORT_AUTO_CHANNEL)
+ else if (strnicmp(command, CMD_SET_HAPD_AUTO_CHANNEL,
+ strlen(CMD_SET_HAPD_AUTO_CHANNEL)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_AUTO_CHANNEL) + 1;
+ bytes_written = wl_android_set_auto_channel(net, (const char*)command+skip, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_AMPDU_MPDU_CMD
+ /* CMD_AMPDU_MPDU */
+ else if (strnicmp(command, CMD_AMPDU_MPDU, strlen(CMD_AMPDU_MPDU)) == 0) {
+ int skip = strlen(CMD_AMPDU_MPDU) + 1;
+ bytes_written = wl_android_set_ampdu_mpdu(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_AMPDU_MPDU_CMD */
+#if defined (SUPPORT_HIDDEN_AP)
+ else if (strnicmp(command, CMD_SET_HAPD_MAX_NUM_STA,
+ strlen(CMD_SET_HAPD_MAX_NUM_STA)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_MAX_NUM_STA) + 3;
+ wl_android_set_max_num_sta(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_HAPD_SSID,
+ strlen(CMD_SET_HAPD_SSID)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_SSID) + 3;
+ wl_android_set_ssid(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_HAPD_HIDE_SSID,
+ strlen(CMD_SET_HAPD_HIDE_SSID)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_HIDE_SSID) + 3;
+ wl_android_set_hide_ssid(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_HIDDEN_AP */
+#ifdef SUPPORT_SOFTAP_SINGL_DISASSOC
+ else if (strnicmp(command, CMD_HAPD_STA_DISASSOC,
+ strlen(CMD_HAPD_STA_DISASSOC)) == 0) {
+ int skip = strlen(CMD_HAPD_STA_DISASSOC) + 1;
+ wl_android_sta_diassoc(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_SOFTAP_SINGL_DISASSOC */
+#ifdef SUPPORT_SET_LPC
+ else if (strnicmp(command, CMD_HAPD_LPC_ENABLED,
+ strlen(CMD_HAPD_LPC_ENABLED)) == 0) {
+ int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3;
+ wl_android_set_lpc(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+ else if (strnicmp(command, CMD_TEST_FORCE_HANG,
+ strlen(CMD_TEST_FORCE_HANG)) == 0) {
+ int skip = strlen(CMD_TEST_FORCE_HANG) + 1;
+ net_os_send_hang_message_reason(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+ else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0)
+ bytes_written = wl_android_ch_res_rl(net, true);
+ else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0)
+ bytes_written = wl_android_ch_res_rl(net, false);
+#ifdef SUPPORT_LTECX
+ else if (strnicmp(command, CMD_LTECX_SET, strlen(CMD_LTECX_SET)) == 0) {
+ int skip = strlen(CMD_LTECX_SET) + 1;
+ bytes_written = wl_android_set_ltecx(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_LTECX */
+#ifdef WL_RELMCAST
+ else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) {
+ int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0';
+ bytes_written = wl_android_rmc_enable(net, rmc_enable);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) {
+ int rmc_txrate;
+ sscanf(command, "%*s %10d", &rmc_txrate);
+ bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) {
+ int actperiod;
+ sscanf(command, "%*s %10d", &actperiod);
+ bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) {
+ int acktimeout;
+ sscanf(command, "%*s %10d", &acktimeout);
+ acktimeout *= 1000;
+ bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) {
+ int skip = strlen(CMD_SET_RMC_LEADER) + 1;
+ bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_EVENT,
+ strlen(CMD_SET_RMC_EVENT)) == 0) {
+ bytes_written = wl_android_set_rmc_event(net, command);
+ }
+#endif /* WL_RELMCAST */
+ else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) {
+ bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) {
+ bytes_written = wl_android_set_singlecore_scan(net, command);
+ }
+#ifdef TEST_TX_POWER_CONTROL
+ else if (strnicmp(command, CMD_TEST_SET_TX_POWER,
+ strlen(CMD_TEST_SET_TX_POWER)) == 0) {
+ int skip = strlen(CMD_TEST_SET_TX_POWER) + 1;
+ wl_android_set_tx_power(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_TEST_GET_TX_POWER,
+ strlen(CMD_TEST_GET_TX_POWER)) == 0) {
+ wl_android_get_tx_power(net, command, priv_cmd.total_len);
+ }
+#endif /* TEST_TX_POWER_CONTROL */
+ else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL,
+ strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) {
+ int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1;
+ bytes_written = wl_android_set_sarlimit_txctrl(net, (const char*)command+skip);
+ }
+#ifdef SUPPORT_SET_TID
+ else if (strnicmp(command, CMD_SET_TID, strlen(CMD_SET_TID)) == 0) {
+ bytes_written = wl_android_set_tid(net, command);
+ }
+ else if (strnicmp(command, CMD_GET_TID, strlen(CMD_GET_TID)) == 0) {
+ bytes_written = wl_android_get_tid(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_SET_TID */
+#ifdef WL_WTC
+ else if (strnicmp(command, CMD_WTC_CONFIG, strlen(CMD_WTC_CONFIG)) == 0) {
+ bytes_written = wl_android_wtc_config(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_WTC */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+ else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
+ int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
+ wl_android_set_mac_address_filter(net, command+skip);
+ }
+ else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
+ bytes_written = wl_android_set_roam_mode(net, command);
+#if defined(BCMFW_ROAM_ENABLE)
+ else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
+ bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
+ }
+#endif /* BCMFW_ROAM_ENABLE */
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
+ bytes_written = wl_android_set_miracast(net, command);
+ else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
+ bytes_written = wl_android_set_ibss_beacon_ouidata(net,
+ command, priv_cmd.total_len);
+#endif /* WL_CFG80211 */
+#ifdef WLAIBSS
+ else if (strnicmp(command, CMD_SETIBSSTXFAILEVENT,
+ strlen(CMD_SETIBSSTXFAILEVENT)) == 0)
+ bytes_written = wl_android_set_ibss_txfail_event(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO_ALL,
+ strlen(CMD_GET_IBSS_PEER_INFO_ALL)) == 0)
+ bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
+ TRUE);
+ else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO,
+ strlen(CMD_GET_IBSS_PEER_INFO)) == 0)
+ bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
+ FALSE);
+ else if (strnicmp(command, CMD_SETIBSSROUTETABLE,
+ strlen(CMD_SETIBSSROUTETABLE)) == 0)
+ bytes_written = wl_android_set_ibss_routetable(net, command);
+ else if (strnicmp(command, CMD_SETIBSSAMPDU, strlen(CMD_SETIBSSAMPDU)) == 0)
+ bytes_written = wl_android_set_ibss_ampdu(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_SETIBSSANTENNAMODE, strlen(CMD_SETIBSSANTENNAMODE)) == 0)
+ bytes_written = wl_android_set_ibss_antenna(net, command, priv_cmd.total_len);
+#endif /* WLAIBSS */
+ else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+ int skip = strlen(CMD_KEEP_ALIVE) + 1;
+ bytes_written = wl_keep_alive_set(net, command + skip);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
+ int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
+ bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
+ }
+ else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) {
+ char *name = (command + strlen(CMD_INTERFACE_CREATE) +1);
+ ANDROID_INFO(("Creating %s interface\n", name));
+ if (wl_cfg80211_add_if(wl_get_cfg(net), net, WL_IF_TYPE_STA,
+ name, NULL) == NULL) {
+ bytes_written = -ENODEV;
+ } else {
+ /* Return success */
+ bytes_written = 0;
+ }
+ }
+ else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) {
+ char *name = (command + strlen(CMD_INTERFACE_DELETE) +1);
+ ANDROID_INFO(("Deleteing %s interface\n", name));
+ bytes_written = wl_cfg80211_del_if(wl_get_cfg(net), net, NULL, name);
+ }
+#endif /* WL_CFG80211 */
+ else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) {
+ bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len);
+ }
+#ifdef P2PRESP_WFDIE_SRC
+ else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP,
+ strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) {
+ int mode = *(command + strlen(CMD_P2P_SET_WFDIE_RESP) + 1) - '0';
+ bytes_written = wl_android_set_wfdie_resp(net, mode);
+ } else if (strnicmp(command, CMD_P2P_GET_WFDIE_RESP,
+ strlen(CMD_P2P_GET_WFDIE_RESP)) == 0) {
+ bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len);
+ }
+#endif /* P2PRESP_WFDIE_SRC */
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_DFS_AP_MOVE, strlen(CMD_DFS_AP_MOVE)) == 0) {
+ char *data = (command + strlen(CMD_DFS_AP_MOVE) +1);
+ bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len);
+ }
+#endif /* WL_CFG80211 */
+#ifdef WBTEXT
+ else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) {
+ bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG,
+ strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG,
+ strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_weight_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG,
+ strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_table_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG,
+ strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_delta_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD,
+ strlen(CMD_WBTEXT_BTM_TIMER_THRESHOLD)) == 0) {
+ bytes_written = wl_cfg80211_wbtext_btm_timer_threshold(net, command,
+ priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_BTM_DELTA,
+ strlen(CMD_WBTEXT_BTM_DELTA)) == 0) {
+ bytes_written = wl_cfg80211_wbtext_btm_delta(net, command,
+ priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_ESTM_ENABLE,
+ strlen(CMD_WBTEXT_ESTM_ENABLE)) == 0) {
+ bytes_written = wl_cfg80211_wbtext_estm_enable(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WBTEXT */
+#ifdef WLWFDS
+ else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) {
+ bytes_written = wl_android_set_wfds_hash(net, command, 1);
+ }
+ else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) {
+ bytes_written = wl_android_set_wfds_hash(net, command, 0);
+ }
+#endif /* WLWFDS */
+#ifdef BT_WIFI_HANDOVER
+ else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) {
+ bytes_written = wl_tbow_teardown(net);
+ }
+#endif /* BT_WIFI_HANDOVER */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef FCC_PWR_LIMIT_2G
+ else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G,
+ strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) {
+ bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G,
+ strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) {
+ bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command);
+ }
+#endif /* FCC_PWR_LIMIT_2G */
+ else if (strnicmp(command, CMD_GET_STA_INFO, strlen(CMD_GET_STA_INFO)) == 0) {
+ bytes_written = wl_cfg80211_get_sta_info(net, command, priv_cmd.total_len);
+ }
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+ else if (strnicmp(command, CMD_MURX_BFE_CAP,
+ strlen(CMD_MURX_BFE_CAP)) == 0) {
+#ifdef WL_MURX
+ uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0';
+ bytes_written = wl_android_murx_bfe_cap(net, val);
+#else
+ return BCME_UNSUPPORTED;
+#endif /* WL_MURX */
+ }
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+ else if (strnicmp(command, CMD_GET_AP_BASICRATE, strlen(CMD_GET_AP_BASICRATE)) == 0) {
+ bytes_written = wl_android_get_ap_basicrate(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_AP_BEACONRATE, strlen(CMD_SET_AP_BEACONRATE)) == 0) {
+ bytes_written = wl_android_set_ap_beaconrate(net, command);
+ }
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+ else if (strnicmp(command, CMD_SET_AP_RPS_PARAMS, strlen(CMD_SET_AP_RPS_PARAMS)) == 0) {
+ bytes_written = wl_android_set_ap_rps_params(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_AP_RPS, strlen(CMD_SET_AP_RPS)) == 0) {
+ bytes_written = wl_android_set_ap_rps(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_AP_RPS, strlen(CMD_GET_AP_RPS)) == 0) {
+ bytes_written = wl_android_get_ap_rps(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+#ifdef SUPPORT_AP_SUSPEND
+ else if (strnicmp(command, CMD_SET_AP_SUSPEND, strlen(CMD_SET_AP_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_ap_suspend(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_AP_SUSPEND */
+#ifdef SUPPORT_AP_BWCTRL
+ else if (strnicmp(command, CMD_SET_AP_BW, strlen(CMD_SET_AP_BW)) == 0) {
+ bytes_written = wl_android_set_ap_bw(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_AP_BW, strlen(CMD_GET_AP_BW)) == 0) {
+ bytes_written = wl_android_get_ap_bw(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_AP_BWCTRL */
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ else if (strnicmp(command, CMD_SET_RSSI_LOGGING, strlen(CMD_SET_RSSI_LOGGING)) == 0) {
+ bytes_written = wl_android_set_rssi_logging(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_RSSI_LOGGING, strlen(CMD_GET_RSSI_LOGGING)) == 0) {
+ bytes_written = wl_android_get_rssi_logging(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_RSSI_PER_ANT, strlen(CMD_GET_RSSI_PER_ANT)) == 0) {
+ bytes_written = wl_android_get_rssi_per_ant(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) {
+ bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO))
+ == 0) {
+ bytes_written = wl_cfg80211_get_connect_failed_status(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) {
+ bytes_written = wl_cfg80211_set_random_mac(net, TRUE);
+ } else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) {
+ bytes_written = wl_cfg80211_set_random_mac(net, FALSE);
+ }
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef WL_NATOE
+ else if (strnicmp(command, CMD_NATOE, strlen(CMD_NATOE)) == 0) {
+ bytes_written = wl_android_process_natoe_cmd(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_NATOE */
+#ifdef CONNECTION_STATISTICS
+ else if (strnicmp(command, CMD_GET_CONNECTION_STATS,
+ strlen(CMD_GET_CONNECTION_STATS)) == 0) {
+ bytes_written = wl_android_get_connection_stats(net, command,
+ priv_cmd.total_len);
+ }
+#endif
+#ifdef DHD_LOG_DUMP
+ else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP,
+ strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) {
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+ /* check whether it has more command */
+ if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP), " ", 1) == 0) {
+ /* compare unwanted/disconnected command */
+ if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
+ SUBCMD_UNWANTED, strlen(SUBCMD_UNWANTED)) == 0) {
+ dhd_log_dump_trigger(dhdp, CMD_UNWANTED);
+ } else if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
+ SUBCMD_DISCONNECTED, strlen(SUBCMD_DISCONNECTED)) == 0) {
+ dhd_log_dump_trigger(dhdp, CMD_DISCONNECTED);
+ } else {
+ dhd_log_dump_trigger(dhdp, CMD_DEFAULT);
+ }
+ } else {
+ dhd_log_dump_trigger(dhdp, CMD_DEFAULT);
+ }
+ }
+#endif /* DHD_LOG_DUMP */
+#ifdef DHD_STATUS_LOGGING
+ else if (strnicmp(command, CMD_DUMP_STATUS_LOG, strlen(CMD_DUMP_STATUS_LOG)) == 0) {
+ dhd_statlog_dump_scr(wl_cfg80211_get_dhdp(net));
+ }
+ else if (strnicmp(command, CMD_QUERY_STATUS_LOG, strlen(CMD_QUERY_STATUS_LOG)) == 0) {
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+ bytes_written = dhd_statlog_query(dhdp, command, priv_cmd.total_len);
+ }
+#endif /* DHD_STATUS_LOGGING */
+#if defined(CONFIG_TIZEN)
+ else if (strnicmp(command, CMD_POWERSAVEMODE_SET,
+ strlen(CMD_POWERSAVEMODE_SET)) == 0) {
+ bytes_written = wl_android_set_powersave_mode(net, command,
+ priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_POWERSAVEMODE_GET,
+ strlen(CMD_POWERSAVEMODE_GET)) == 0) {
+ bytes_written = wl_android_get_powersave_mode(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* CONFIG_TIZEN */
+#ifdef SET_PCIE_IRQ_CPU_CORE
+ else if (strnicmp(command, CMD_PCIE_IRQ_CORE, strlen(CMD_PCIE_IRQ_CORE)) == 0) {
+ int affinity_cmd = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
+ wl_android_set_irq_cpucore(net, affinity_cmd);
+ }
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+ else if (strnicmp(command, CMD_MAKE_HANG, strlen(CMD_MAKE_HANG)) == 0) {
+ int skip = strlen(CMD_MAKE_HANG) + 1;
+ wl_android_make_hang_with_reason(net, (const char*)command+skip);
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef SUPPORT_LQCM
+ else if (strnicmp(command, CMD_SET_LQCM_ENABLE, strlen(CMD_SET_LQCM_ENABLE)) == 0) {
+ int lqcm_enable = *(command + strlen(CMD_SET_LQCM_ENABLE) + 1) - '0';
+ bytes_written = wl_android_lqcm_enable(net, lqcm_enable);
+ }
+ else if (strnicmp(command, CMD_GET_LQCM_REPORT,
+ strlen(CMD_GET_LQCM_REPORT)) == 0) {
+ bytes_written = wl_android_get_lqcm_report(net, command,
+ priv_cmd.total_len);
+ }
+#endif
+ else if (strnicmp(command, CMD_GET_SNR, strlen(CMD_GET_SNR)) == 0) {
+ bytes_written = wl_android_get_snr(net, command, priv_cmd.total_len);
+ }
+#ifdef WLADPS_PRIVATE_CMD
+ else if (strnicmp(command, CMD_SET_ADPS, strlen(CMD_SET_ADPS)) == 0) {
+ int skip = strlen(CMD_SET_ADPS) + 1;
+ bytes_written = wl_android_set_adps_mode(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_GET_ADPS, strlen(CMD_GET_ADPS)) == 0) {
+ bytes_written = wl_android_get_adps_mode(net, command, priv_cmd.total_len);
+ }
+#ifdef WLADPS_ENERGY_GAIN
+ else if (strnicmp(command, CMD_GET_GAIN_ADPS, strlen(CMD_GET_GAIN_ADPS)) == 0) {
+ bytes_written = wl_android_get_gain_adps(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_RESET_GAIN_ADPS, strlen(CMD_RESET_GAIN_ADPS)) == 0) {
+ bytes_written = wl_android_reset_gain_adps(net, command);
+ }
+#endif /* WLADPS_ENERGY_GAIN */
+#endif /* WLADPS_PRIVATE_CMD */
+#ifdef DHD_PKT_LOGGING
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_ENABLE,
+ strlen(CMD_PKTLOG_FILTER_ENABLE)) == 0) {
+ bytes_written = wl_android_pktlog_filter_enable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_DISABLE,
+ strlen(CMD_PKTLOG_FILTER_DISABLE)) == 0) {
+ bytes_written = wl_android_pktlog_filter_disable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_PATTERN_ENABLE,
+ strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE)) == 0) {
+ bytes_written =
+ wl_android_pktlog_filter_pattern_enable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_PATTERN_DISABLE,
+ strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE)) == 0) {
+ bytes_written =
+ wl_android_pktlog_filter_pattern_disable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_ADD, strlen(CMD_PKTLOG_FILTER_ADD)) == 0) {
+ bytes_written = wl_android_pktlog_filter_add(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_DEL, strlen(CMD_PKTLOG_FILTER_DEL)) == 0) {
+ bytes_written = wl_android_pktlog_filter_del(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_INFO, strlen(CMD_PKTLOG_FILTER_INFO)) == 0) {
+ bytes_written = wl_android_pktlog_filter_info(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_START, strlen(CMD_PKTLOG_START)) == 0) {
+ bytes_written = wl_android_pktlog_start(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_STOP, strlen(CMD_PKTLOG_STOP)) == 0) {
+ bytes_written = wl_android_pktlog_stop(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_EXIST, strlen(CMD_PKTLOG_FILTER_EXIST)) == 0) {
+ bytes_written = wl_android_pktlog_filter_exist(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_MINMIZE_ENABLE,
+ strlen(CMD_PKTLOG_MINMIZE_ENABLE)) == 0) {
+ bytes_written = wl_android_pktlog_minmize_enable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_MINMIZE_DISABLE,
+ strlen(CMD_PKTLOG_MINMIZE_DISABLE)) == 0) {
+ bytes_written = wl_android_pktlog_minmize_disable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_CHANGE_SIZE,
+ strlen(CMD_PKTLOG_CHANGE_SIZE)) == 0) {
+ bytes_written = wl_android_pktlog_change_size(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_DEBUG_DUMP, strlen(CMD_PKTLOG_DEBUG_DUMP)) == 0) {
+ bytes_written = wl_android_pktlog_dbg_dump(net, command, priv_cmd.total_len);
+ }
+#endif /* DHD_PKT_LOGGING */
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_DEBUG_VERBOSE, strlen(CMD_DEBUG_VERBOSE)) == 0) {
+ int verbose_level = *(command + strlen(CMD_DEBUG_VERBOSE) + 1) - '0';
+ bytes_written = wl_cfg80211_set_dbg_verbose(net, verbose_level);
+ }
+#endif /* WL_CFG80211 */
+#ifdef DHD_EVENT_LOG_FILTER
+ else if (strnicmp(command, CMD_EWP_FILTER,
+ strlen(CMD_EWP_FILTER)) == 0) {
+ bytes_written = wl_android_ewp_filter(net, command, priv_cmd.total_len);
+ }
+#endif /* DHD_EVENT_LOG_FILTER */
+#ifdef WL_BCNRECV
+ else if (strnicmp(command, CMD_BEACON_RECV,
+ strlen(CMD_BEACON_RECV)) == 0) {
+ char *data = (command + strlen(CMD_BEACON_RECV) + 1);
+ bytes_written = wl_android_bcnrecv_config(net,
+ data, priv_cmd.total_len);
+ }
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+ else if (strnicmp(command, CMD_MBO, strlen(CMD_MBO)) == 0) {
+ bytes_written = wl_android_process_mbo_cmd(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
+ else if (strnicmp(command, CMD_CAC_TSPEC,
+ strlen(CMD_CAC_TSPEC)) == 0) {
+ char *data = (command + strlen(CMD_CAC_TSPEC) + 1);
+ bytes_written = wl_android_cac_ts_config(net,
+ data, priv_cmd.total_len);
+ }
+#endif /* WL_CAC_TS */
+#ifdef WL_GET_CU
+ else if (strnicmp(command, CMD_GET_CHAN_UTIL,
+ strlen(CMD_GET_CHAN_UTIL)) == 0) {
+ bytes_written = wl_android_get_channel_util(net,
+ command, priv_cmd.total_len);
+ }
+#endif /* WL_GET_CU */
+#ifdef RTT_GEOFENCE_INTERVAL
+#if defined (RTT_SUPPORT) && defined(WL_NAN)
+ else if (strnicmp(command, CMD_GEOFENCE_INTERVAL,
+ strlen(CMD_GEOFENCE_INTERVAL)) == 0) {
+ (void)wl_android_set_rtt_geofence_interval(net, command);
+ }
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_INTERVAL */
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+ else if (strnicmp(command, CMD_SET_SOFTAP_ELNA_BYPASS,
+ strlen(CMD_SET_SOFTAP_ELNA_BYPASS)) == 0) {
+ bytes_written =
+ wl_android_set_softap_elna_bypass(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_SOFTAP_ELNA_BYPASS,
+ strlen(CMD_GET_SOFTAP_ELNA_BYPASS)) == 0) {
+ bytes_written =
+ wl_android_get_softap_elna_bypass(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+#ifdef WL_NAN
+ else if (strnicmp(command, CMD_GET_NAN_STATUS,
+ strlen(CMD_GET_NAN_STATUS)) == 0) {
+ bytes_written =
+ wl_android_get_nan_status(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_NAN */
+#if defined(SUPPORT_NAN_RANGING_TEST_BW)
+ else if (strnicmp(command, CMD_NAN_RANGING_SET_BW, strlen(CMD_NAN_RANGING_SET_BW)) == 0) {
+ int bw_cmd = *(command + strlen(CMD_NAN_RANGING_SET_BW) + 1) - '0';
+ bytes_written = wl_nan_ranging_bw(net, bw_cmd, command);
+ }
+#endif /* SUPPORT_NAN_RANGING_TEST_BW */
+ else if (strnicmp(command, CMD_GET_FACTORY_MAC, strlen(CMD_GET_FACTORY_MAC)) == 0) {
+ bytes_written = wl_android_get_factory_mac_addr(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_HAPD_SET_AX_MODE, strlen(CMD_HAPD_SET_AX_MODE)) == 0) {
+ int skip = strlen(CMD_HAPD_SET_AX_MODE) + 1;
+ bytes_written = wl_android_set_softap_ax_mode(net, command + skip);
+ }
+#ifdef SUPPORT_LATENCY_CRITICAL_DATA
+ else if (strnicmp(command, CMD_SET_LATENCY_CRITICAL_DATA,
+ strlen(CMD_SET_LATENCY_CRITICAL_DATA)) == 0) {
+ int enable = *(command + strlen(CMD_SET_LATENCY_CRITICAL_DATA) + 1) - '0';
+ bytes_written = wl_android_set_latency_crt_data(net, enable);
+ }
+ else if (strnicmp(command, CMD_GET_LATENCY_CRITICAL_DATA,
+ strlen(CMD_GET_LATENCY_CRITICAL_DATA)) == 0) {
+ bytes_written = wl_android_get_latency_crt_data(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_LATENCY_CRITICAL_DATA */
+#ifdef WL_TWT
+ else if (strnicmp(command, CMD_TWT_SETUP, strlen(CMD_TWT_SETUP)) == 0) {
+ bytes_written = wl_android_twt_setup(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_TWT_TEARDOWN, strlen(CMD_TWT_TEARDOWN)) == 0) {
+ bytes_written = wl_android_twt_teardown(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_TWT_INFO, strlen(CMD_TWT_INFO)) == 0) {
+ bytes_written = wl_android_twt_info(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_TWT_STATUS_QUERY, strlen(CMD_TWT_STATUS_QUERY)) == 0) {
+ bytes_written = wl_android_twt_status_query(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_TWT_CAPABILITY, strlen(CMD_TWT_CAPABILITY)) == 0) {
+ bytes_written = wl_android_twt_cap(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_TWT */
+#ifdef WL_P2P_6G
+ else if (strnicmp(command, CMD_ENABLE_6G_P2P, strlen(CMD_ENABLE_6G_P2P)) == 0) {
+ int enable = *(command + strlen(CMD_ENABLE_6G_P2P) + 1) - '0';
+ bytes_written = wl_android_enable_p2p_6g(net, enable);
+ }
+#endif /* WL_P2P_6G */
+ else if (wl_android_ext_priv_cmd(net, command, priv_cmd.total_len, &bytes_written) == 0) {
+ }
+ else {
+ ANDROID_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ }
+
+ return bytes_written;
+}
+
+/*
+* ENABLE_INSMOD_NO_FW_LOAD X O O O
+* ENABLE_INSMOD_NO_POWER_OFF X X O O
+* NO_POWER_OFF_AFTER_OPEN X X X O
+* after insmod H L H H
+* wlan0 down H L L H
+* fw trap trigger wlan0 down H L L L
+*/
+
+int wl_android_init(void)
+{
+ int ret = 0;
+
+#ifdef ENABLE_INSMOD_NO_POWER_OFF
+ dhd_download_fw_on_driverload = TRUE;
+#elif defined(ENABLE_INSMOD_NO_FW_LOAD) || defined(BUS_POWER_RESTORE)
+ dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+ if (!iface_name[0]) {
+ bzero(iface_name, IFNAMSIZ);
+ bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+ }
+
+#ifdef CUSTOMER_HW4_DEBUG
+ /* No Kernel Panic from ASSERT() on customer platform. */
+ g_assert_type = 1;
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#ifdef WL_GENL
+ wl_genl_init();
+#endif
+#ifdef WL_RELMCAST
+ wl_netlink_init();
+#endif /* WL_RELMCAST */
+
+ return ret;
+}
+
+int wl_android_exit(void)
+{
+ int ret = 0;
+ struct io_cfg *cur, *q;
+
+#ifdef WL_GENL
+ wl_genl_deinit();
+#endif /* WL_GENL */
+#ifdef WL_RELMCAST
+ wl_netlink_deinit();
+#endif /* WL_RELMCAST */
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ list_del(&cur->list);
+ kfree(cur);
+ }
+
+ return ret;
+}
+
+void wl_android_post_init(void)
+{
+
+#ifdef ENABLE_4335BT_WAR
+ bcm_bt_unlock(lock_cookie_wifi);
+ ANDROID_ERROR(("%s: btlock released\n", __FUNCTION__));
+#endif /* ENABLE_4335BT_WAR */
+
+ if (!dhd_download_fw_on_driverload) {
+ g_wifi_on = FALSE;
+ }
+}
+
+#ifdef WL_GENL
+/* Generic Netlink Initializaiton */
+static int wl_genl_init(void)
+{
+ int ret;
+
+ ANDROID_INFO(("GEN Netlink Init\n\n"));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ /* register new family */
+ ret = genl_register_family(&wl_genl_family);
+ if (ret != 0)
+ goto failure;
+
+ /* register functions (commands) of the new family */
+ ret = genl_register_ops(&wl_genl_family, &wl_genl_ops);
+ if (ret != 0) {
+ ANDROID_ERROR(("register ops failed: %i\n", ret));
+ genl_unregister_family(&wl_genl_family);
+ goto failure;
+ }
+
+ ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast);
+#else
+ ret = genl_register_family_with_ops_groups(&wl_genl_family, wl_genl_ops, wl_genl_mcast);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+ if (ret != 0) {
+ ANDROID_ERROR(("register mc_group failed: %i\n", ret));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ genl_unregister_ops(&wl_genl_family, &wl_genl_ops);
+#endif
+ genl_unregister_family(&wl_genl_family);
+ goto failure;
+ }
+
+ return 0;
+
+failure:
+ ANDROID_ERROR(("Registering Netlink failed!!\n"));
+ return -1;
+}
+
+/* Generic netlink deinit */
+static int wl_genl_deinit(void)
+{
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ if (genl_unregister_ops(&wl_genl_family, &wl_genl_ops) < 0)
+ ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
+#endif
+ if (genl_unregister_family(&wl_genl_family) < 0)
+ ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
+
+ return 0;
+}
+
+s32 wl_event_to_bcm_event(u16 event_type)
+{
+ /* When you add any new event, please mention the
+ * version of BCM supplicant supporting it
+ */
+ u16 event = -1;
+
+ switch (event_type) {
+ case WLC_E_SERVICE_FOUND:
+ event = BCM_E_SVC_FOUND;
+ break;
+ case WLC_E_P2PO_ADD_DEVICE:
+ event = BCM_E_DEV_FOUND;
+ break;
+ case WLC_E_P2PO_DEL_DEVICE:
+ event = BCM_E_DEV_LOST;
+ break;
+ /* Above events are supported from BCM Supp ver 47 Onwards */
+#ifdef BT_WIFI_HANDOVER
+ case WLC_E_BT_WIFI_HANDOVER_REQ:
+ event = BCM_E_DEV_BT_WIFI_HO_REQ;
+ break;
+#endif /* BT_WIFI_HANDOVER */
+
+ default:
+ ANDROID_ERROR(("Event not supported\n"));
+ }
+
+ return event;
+}
+
+s32
+wl_genl_send_msg(
+ struct net_device *ndev,
+ u32 event_type,
+ const u8 *buf,
+ u16 len,
+ u8 *subhdr,
+ u16 subhdr_len)
+{
+ int ret = 0;
+ struct sk_buff *skb;
+ void *msg;
+ u32 attr_type = 0;
+ bcm_event_hdr_t *hdr = NULL;
+ int mcast = 1; /* By default sent as mutlicast type */
+ int pid = 0;
+ u8 *ptr = NULL, *p = NULL;
+ u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
+ u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ ANDROID_INFO(("Enter \n"));
+
+ /* Decide between STRING event and Data event */
+ if (event_type == 0)
+ attr_type = BCM_GENL_ATTR_STRING;
+ else
+ attr_type = BCM_GENL_ATTR_MSG;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, kflags);
+ if (skb == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG);
+ if (msg == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (attr_type == BCM_GENL_ATTR_STRING) {
+ /* Add a BCM_GENL_MSG attribute. Since it is specified as a string.
+ * make sure it is null terminated
+ */
+ if (subhdr || subhdr_len) {
+ ANDROID_ERROR(("No sub hdr support for the ATTR STRING type \n"));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf);
+ if (ret != 0) {
+ ANDROID_ERROR(("nla_put_string failed\n"));
+ goto out;
+ }
+ } else {
+ /* ATTR_MSG */
+
+ /* Create a single buffer for all */
+ p = ptr = (u8 *)MALLOCZ(cfg->osh, tot_len);
+ if (!ptr) {
+ ret = -ENOMEM;
+ ANDROID_ERROR(("ENOMEM!!\n"));
+ goto out;
+ }
+
+ /* Include the bcm event header */
+ hdr = (bcm_event_hdr_t *)ptr;
+ hdr->event_type = wl_event_to_bcm_event(event_type);
+ hdr->len = len + subhdr_len;
+ ptr += sizeof(bcm_event_hdr_t);
+
+ /* Copy subhdr (if any) */
+ if (subhdr && subhdr_len) {
+ memcpy(ptr, subhdr, subhdr_len);
+ ptr += subhdr_len;
+ }
+
+ /* Copy the data */
+ if (buf && len) {
+ memcpy(ptr, buf, len);
+ }
+
+ ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p);
+ if (ret != 0) {
+ ANDROID_ERROR(("nla_put_string failed\n"));
+ goto out;
+ }
+ }
+
+ if (mcast) {
+ int err = 0;
+ /* finalize the message */
+ genlmsg_end(skb, msg);
+ /* NETLINK_CB(skb).dst_group = 1; */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+ if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
+#else
+ if ((err = genlmsg_multicast(&wl_genl_family, skb, 0, 0, GFP_ATOMIC)) < 0)
+#endif
+ ANDROID_ERROR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
+ attr_type, err));
+ else
+ ANDROID_INFO(("Multicast msg sent successfully. attr_type:%d len:%d \n",
+ attr_type, tot_len));
+ } else {
+ NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */
+
+ /* finalize the message */
+ genlmsg_end(skb, msg);
+
+ /* send the message back */
+ if (genlmsg_unicast(&init_net, skb, pid) < 0)
+ ANDROID_ERROR(("genlmsg_unicast failed\n"));
+ }
+
+out:
+ if (p) {
+ MFREE(cfg->osh, p, tot_len);
+ }
+ if (ret)
+ nlmsg_free(skb);
+
+ return ret;
+}
+
+static s32
+wl_genl_handle_msg(
+ struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *na;
+ u8 *data = NULL;
+
+ ANDROID_INFO(("Enter \n"));
+
+ if (info == NULL) {
+ return -EINVAL;
+ }
+
+ na = info->attrs[BCM_GENL_ATTR_MSG];
+ if (!na) {
+ ANDROID_ERROR(("nlattribute NULL\n"));
+ return -EINVAL;
+ }
+
+ data = (char *)nla_data(na);
+ if (!data) {
+ ANDROID_ERROR(("Invalid data\n"));
+ return -EINVAL;
+ } else {
+ /* Handle the data */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ ANDROID_INFO(("%s: Data received from pid (%d) \n", __func__,
+ info->snd_pid));
+#else
+ ANDROID_INFO(("%s: Data received from pid (%d) \n", __func__,
+ info->snd_portid));
+#endif /* (LINUX_VERSION < VERSION(3, 7, 0) || WL_COMPAT_WIRELESS */
+ }
+
+ return 0;
+}
+#endif /* WL_GENL */
+
+int wl_fatal_error(void * wl, int rc)
+{
+ return FALSE;
+}
+
+void
+wl_android_set_wifi_on_flag(bool enable)
+{
+ ANDROID_ERROR(("%s: %d\n", __FUNCTION__, enable));
+ g_wifi_on = enable;
+}
+
+#ifdef WL_STATIC_IF
+#include <dhd_linux_priv.h>
+struct net_device *
+wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg, u16 iftype, char *ifname,
+ int static_ifidx)
+{
+#if defined(CUSTOM_MULTI_MAC) || defined(WL_EXT_IAPSTA)
+ dhd_pub_t *dhd = cfg->pub;
+#endif
+ struct net_device *ndev;
+ struct wireless_dev *wdev = NULL;
+ int ifidx = WL_STATIC_IFIDX; /* Register ndev with a reserved ifidx */
+ u8 mac_addr[ETH_ALEN];
+ struct net_device *primary_ndev;
+#ifdef DHD_USE_RANDMAC
+ struct ether_addr ea_addr;
+#endif /* DHD_USE_RANDMAC */
+#ifdef CUSTOM_MULTI_MAC
+ char hw_ether[62];
+#endif
+
+ ANDROID_INFO(("[STATIC_IF] Enter (%s) iftype:%d\n", ifname, iftype));
+
+ if (!cfg) {
+ ANDROID_ERROR(("cfg null\n"));
+ return NULL;
+ }
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ ifidx += static_ifidx;
+#ifdef DHD_USE_RANDMAC
+ wl_cfg80211_generate_mac_addr(&ea_addr);
+ (void)memcpy_s(mac_addr, ETH_ALEN, ea_addr.octet, ETH_ALEN);
+#else
+#if defined(CUSTOM_MULTI_MAC)
+ if (!wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, static_ifidx+1)) {
+ (void)memcpy_s(mac_addr, ETH_ALEN, hw_ether, ETH_ALEN);
+ } else
+#endif
+ {
+ /* Use primary mac with locally admin bit set */
+ (void)memcpy_s(mac_addr, ETH_ALEN, primary_ndev->dev_addr, ETH_ALEN);
+ mac_addr[0] |= 0x02;
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_get_vif_macaddr(dhd, static_ifidx+1, mac_addr);
+#endif
+ }
+#endif /* DHD_USE_RANDMAC */
+
+ ndev = wl_cfg80211_allocate_if(cfg, ifidx, ifname, mac_addr,
+ WL_BSSIDX_MAX, NULL);
+ if (unlikely(!ndev)) {
+ ANDROID_ERROR(("Failed to allocate static_if\n"));
+ goto fail;
+ }
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (unlikely(!wdev)) {
+ ANDROID_ERROR(("Failed to allocate wdev for static_if\n"));
+ goto fail;
+ }
+
+ wdev->wiphy = cfg->wdev->wiphy;
+ wdev->iftype = iftype;
+
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ if (wl_cfg80211_register_if(cfg, ifidx,
+ ndev, TRUE) != BCME_OK) {
+ ANDROID_ERROR(("ndev registration failed!\n"));
+ goto fail;
+ }
+
+ cfg->static_ndev[static_ifidx] = ndev;
+ cfg->static_ndev_state[static_ifidx] = NDEV_STATE_OS_IF_CREATED;
+ wl_cfg80211_update_iflist_info(cfg, ndev, ifidx, NULL, WL_BSSIDX_MAX,
+ ifname, NDEV_STATE_OS_IF_CREATED);
+ ANDROID_INFO(("Static I/F (%s) Registered\n", ndev->name));
+ return ndev;
+
+fail:
+ wl_cfg80211_remove_if(cfg, ifidx, ndev, false);
+ return NULL;
+}
+
+void
+wl_cfg80211_unregister_static_if(struct bcm_cfg80211 *cfg)
+{
+ int i;
+
+ ANDROID_INFO(("[STATIC_IF] Enter\n"));
+ if (!cfg) {
+ ANDROID_ERROR(("invalid input\n"));
+ return;
+ }
+
+ /* wdev free will happen from notifier context */
+ /* free_netdev(cfg->static_ndev);
+ */
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (cfg->static_ndev[i])
+ unregister_netdev(cfg->static_ndev[i]);
+ }
+}
+
+s32
+wl_cfg80211_static_if_open(struct net_device *net)
+{
+ struct wireless_dev *wdev = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 iftype = net->ieee80211_ptr ? net->ieee80211_ptr->iftype : 0;
+ u16 wl_iftype, wl_mode;
+#ifdef CUSTOM_MULTI_MAC
+ dhd_pub_t *dhd = dhd_get_pub(net);
+ char hw_ether[62];
+#endif
+ int static_ifidx;
+
+ ANDROID_INFO(("[STATIC_IF] dev_open ndev %p and wdev %p\n", net, net->ieee80211_ptr));
+ static_ifidx = wl_cfg80211_static_ifidx(cfg, net);
+ ASSERT(static_ifidx >= 0);
+
+ if (cfg80211_to_wl_iftype(iftype, &wl_iftype, &wl_mode) < 0) {
+ return BCME_ERROR;
+ }
+ if (cfg->static_ndev_state[static_ifidx] != NDEV_STATE_FW_IF_CREATED) {
+#ifdef CUSTOM_MULTI_MAC
+ if (!wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, static_ifidx+1))
+ memcpy(net->dev_addr, hw_ether, ETHER_ADDR_LEN);
+#endif
+ wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, net->dev_addr);
+ if (!wdev) {
+ ANDROID_ERROR(("[STATIC_IF] wdev is NULL, can't proceed\n"));
+ return BCME_ERROR;
+ }
+ } else {
+ ANDROID_INFO(("Fw IF for static netdev already created\n"));
+ }
+
+ return BCME_OK;
+}
+
+s32
+wl_cfg80211_static_if_close(struct net_device *net)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ int static_ifidx;
+
+ static_ifidx = wl_cfg80211_static_ifidx(cfg, net);
+
+ if (cfg->static_ndev_state[static_ifidx] == NDEV_STATE_FW_IF_CREATED) {
+ if (mutex_is_locked(&cfg->if_sync) == TRUE) {
+ ret = _wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ } else {
+ ret = wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ }
+
+ if (unlikely(ret)) {
+ ANDROID_ERROR(("Del iface failed for static_if %d\n", ret));
+ }
+ }
+
+ return ret;
+}
+struct net_device *
+wl_cfg80211_post_static_ifcreate(struct bcm_cfg80211 *cfg,
+ wl_if_event_info *event, u8 *addr, s32 iface_type, int static_ifidx)
+{
+ struct net_device *new_ndev = NULL;
+ struct wireless_dev *wdev = NULL;
+
+ ANDROID_INFO(("Updating static iface after Fw IF create \n"));
+ new_ndev = cfg->static_ndev[static_ifidx];
+
+ if (new_ndev) {
+ wdev = new_ndev->ieee80211_ptr;
+ ASSERT(wdev);
+ wdev->iftype = iface_type;
+ (void)memcpy_s(new_ndev->dev_addr, ETH_ALEN, addr, ETH_ALEN);
+ }
+
+ cfg->static_ndev_state[static_ifidx] = NDEV_STATE_FW_IF_CREATED;
+ wl_cfg80211_update_iflist_info(cfg, new_ndev, event->ifidx, addr, event->bssidx,
+ event->name, NDEV_STATE_FW_IF_CREATED);
+ return new_ndev;
+}
+s32
+wl_cfg80211_post_static_ifdel(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ int static_ifidx;
+ int ifidx = WL_STATIC_IFIDX;
+
+ static_ifidx = wl_cfg80211_static_ifidx(cfg, ndev);
+ ifidx += static_ifidx;
+ cfg->static_ndev_state[static_ifidx] = NDEV_STATE_FW_IF_DELETED;
+ wl_cfg80211_update_iflist_info(cfg, ndev, ifidx, NULL,
+ WL_BSSIDX_MAX, NULL, NDEV_STATE_FW_IF_DELETED);
+ wl_cfg80211_clear_per_bss_ies(cfg, ndev->ieee80211_ptr);
+ wl_dealloc_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
+ return BCME_OK;
+}
+#endif /* WL_STATIC_IF */
+
+#ifdef WBTEXT
+static int
+wlc_wbtext_get_roam_prof(struct net_device *ndev, wl_roamprof_band_t *rp,
+ uint8 band, uint8 *roam_prof_ver, uint8 *roam_prof_size)
+{
+ int err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ u8 *ioctl_buf = NULL;
+
+ ioctl_buf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (unlikely(!ioctl_buf)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ rp->v1.band = band;
+ rp->v1.len = 0;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ ioctl_buf, WLC_IOCTL_MEDLEN, NULL))) {
+ ANDROID_ERROR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy_s(rp, sizeof(*rp), ioctl_buf, sizeof(*rp));
+ /* roam_prof version get */
+ if (rp->v1.ver > WL_ROAM_PROF_VER_3) {
+ ANDROID_ERROR(("bad version (=%d) in return data\n", rp->v1.ver));
+ err = BCME_VERSION;
+ goto exit;
+ }
+ switch (rp->v1.ver) {
+ case WL_ROAM_PROF_VER_0:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v1_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_0;
+ }
+ break;
+ case WL_ROAM_PROF_VER_1:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v2_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_1;
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v3_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_2;
+ }
+ break;
+ case WL_ROAM_PROF_VER_3:
+ {
+ *roam_prof_size = sizeof(wl_roam_prof_v4_t);
+ *roam_prof_ver = WL_ROAM_PROF_VER_3;
+ }
+ break;
+ default:
+ ANDROID_ERROR(("bad version = %d \n", rp->v1.ver));
+ err = BCME_VERSION;
+ goto exit;
+ }
+ ANDROID_INFO(("roam prof ver %u size %u\n", *roam_prof_ver, *roam_prof_size));
+ if ((rp->v1.len % *roam_prof_size) != 0) {
+ ANDROID_ERROR(("bad length (=%d) in return data\n", rp->v1.len));
+ err = BCME_BADLEN;
+ }
+exit:
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+ return err;
+}
+
+s32
+wl_cfg80211_wbtext_set_default(struct net_device *ndev)
+{
+ char *commandp = NULL;
+ s32 ret = BCME_OK;
+ char *data;
+ u8 *ioctl_buf = NULL;
+ wl_roamprof_band_t rp;
+ uint8 bandidx = 0;
+ int wnmmask = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ ANDROID_INFO(("set wbtext to default\n"));
+
+ commandp = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (unlikely(!commandp)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (unlikely(!ioctl_buf)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ rp.v1.band = WLC_BAND_2G;
+ rp.v1.len = 0;
+ /* Getting roam profile from fw */
+ if ((ret = wldev_iovar_getbuf(ndev, "roam_prof", &rp, sizeof(rp),
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL))) {
+ ANDROID_ERROR(("Getting roam_profile failed with err=%d \n", ret));
+ goto exit;
+ }
+ memcpy_s(&rp, sizeof(rp), ioctl_buf, sizeof(rp));
+ for (bandidx = 0; bandidx < MAXBANDS; bandidx++) {
+ switch (rp.v1.ver) {
+ case WL_ROAM_PROF_VER_1:
+ {
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ if (bandidx == BAND_5G_INDEX) {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_A_V2);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ } else {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_B_V2);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ }
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ case WL_ROAM_PROF_VER_3:
+ {
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ if (bandidx == BAND_5G_INDEX) {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_A_V3);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ } else {
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG,
+ DEFAULT_WBTEXT_PROFILE_B_V3);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ }
+ }
+ break;
+ default:
+ ANDROID_ERROR(("No Support for roam prof ver = %d \n", rp.v1.ver));
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* set roam profile */
+ ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set roam_prof %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+ }
+
+ /* wbtext code for backward compatibility. Newer firmwares set default value
+ * from fw init
+ */
+ /* set RSSI weight */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ /* set CU weight */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ ret = wldev_iovar_getint(ndev, "wnm", &wnmmask);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to get wnmmask error = %d\n", __func__, ret));
+ goto exit;
+ }
+ /* set ESTM DL weight. */
+ if (wnmmask & WL_WNM_ESTM) {
+ ANDROID_ERROR(("Setting ESTM wt\n"));
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_ESTM_DL_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_ESTM_DL_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+ }
+
+ /* set RSSI table */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_A);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set RSSI table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_B);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set RSSI table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ /* set CU table */
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_A);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set CU table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+ memset_s(commandp, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_B);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set CU table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ goto exit;
+ }
+
+exit:
+ if (commandp) {
+ MFREE(cfg->osh, commandp, WLC_IOCTL_SMLEN);
+ }
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_SMLEN);
+ }
+ return ret;
+}
+
+s32
+wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len)
+{
+ uint i = 0;
+ long int rssi_lower, roam_trigger;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ wl_roamprof_band_t *rp = NULL;
+ int err = -EINVAL, bytes_written = 0;
+ size_t len = strlen(data);
+ int rp_len = 0;
+ u8 *ioctl_buf = NULL;
+ uint8 roam_prof_size = 0, roam_prof_ver = 0, fs_per = 0, prof_cnt = 0;
+
+ data[len] = '\0';
+ ioctl_buf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (unlikely(!ioctl_buf)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ rp = (wl_roamprof_band_t *)MALLOCZ(cfg->osh, sizeof(*rp));
+ if (unlikely(!rp)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (*data && (!strncmp(data, "b", 1))) {
+ rp->v1.band = WLC_BAND_2G;
+ } else if (*data && (!strncmp(data, "a", 1))) {
+ rp->v1.band = WLC_BAND_5G;
+ } else {
+ err = snprintf(command, total_len, "Missing band\n");
+ goto exit;
+ }
+ data++;
+ rp->v1.len = 0;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ ioctl_buf, WLC_IOCTL_MEDLEN, NULL))) {
+ ANDROID_ERROR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy_s(rp, sizeof(*rp), ioctl_buf, sizeof(*rp));
+ /* roam_prof version get */
+ if (rp->v1.ver > WL_ROAM_PROF_VER_3) {
+ ANDROID_ERROR(("bad version (=%d) in return data\n", rp->v1.ver));
+ err = -EINVAL;
+ goto exit;
+ }
+ switch (rp->v1.ver) {
+ case WL_ROAM_PROF_VER_0:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v1_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_0;
+ }
+ break;
+ case WL_ROAM_PROF_VER_1:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v2_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_1;
+ }
+ break;
+ case WL_ROAM_PROF_VER_2:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v3_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_2;
+ }
+ break;
+ case WL_ROAM_PROF_VER_3:
+ {
+ roam_prof_size = sizeof(wl_roam_prof_v4_t);
+ roam_prof_ver = WL_ROAM_PROF_VER_3;
+ }
+ break;
+ default:
+ ANDROID_ERROR(("bad version = %d \n", rp->v1.ver));
+ goto exit;
+ }
+ ANDROID_INFO(("roam prof ver %u size %u\n", roam_prof_ver, roam_prof_size));
+ if ((rp->v1.len % roam_prof_size) != 0) {
+ ANDROID_ERROR(("bad length (=%d) in return data\n", rp->v1.len));
+ err = -EINVAL;
+ goto exit;
+ }
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /* printing contents of roam profile data from fw and exits
+ * if code hits any of one of the below condtion. If remaining
+ * length of buffer is less than roam profile size or
+ * if there is no valid entry.
+ */
+ if (((i * roam_prof_size) > rp->v1.len)) {
+ break;
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_0) {
+ fs_per = rp->v1.roam_prof[i].fullscan_period;
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ fs_per = rp->v2.roam_prof[i].fullscan_period;
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ fs_per = rp->v3.roam_prof[i].fullscan_period;
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_3) {
+ fs_per = rp->v4.roam_prof[i].fullscan_period;
+ }
+ if (fs_per == 0) {
+ break;
+ }
+ prof_cnt++;
+ }
+
+ if (!*data) {
+ for (i = 0; (i < prof_cnt) && (i < WL_MAX_ROAM_PROF_BRACKETS); i++) {
+ if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ bytes_written += scnprintf(command+bytes_written,
+ total_len - bytes_written,
+ "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)",
+ rp->v2.roam_prof[i].roam_trigger,
+ rp->v2.roam_prof[i].rssi_lower,
+ rp->v2.roam_prof[i].channel_usage,
+ rp->v2.roam_prof[i].cu_avg_calc_dur);
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ bytes_written += scnprintf(command+bytes_written,
+ total_len - bytes_written,
+ "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)",
+ rp->v3.roam_prof[i].roam_trigger,
+ rp->v3.roam_prof[i].rssi_lower,
+ rp->v3.roam_prof[i].channel_usage,
+ rp->v3.roam_prof[i].cu_avg_calc_dur);
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_3) {
+ bytes_written += snprintf(command+bytes_written,
+ total_len - bytes_written,
+ "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)",
+ rp->v4.roam_prof[i].roam_trigger,
+ rp->v4.roam_prof[i].rssi_lower,
+ rp->v4.roam_prof[i].channel_usage,
+ rp->v4.roam_prof[i].cu_avg_calc_dur);
+ }
+ }
+ bytes_written += scnprintf(command+bytes_written, total_len - bytes_written, "\n");
+ err = bytes_written;
+ goto exit;
+ } else {
+ /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */
+ if (prof_cnt != 2) {
+ ANDROID_ERROR(("FW must have 2 rows to fill roam_prof\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ /* setting roam profile to fw */
+ data++;
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ roam_trigger = simple_strtol(data, &data, 10);
+ if (roam_trigger >= 0) {
+ ANDROID_ERROR(("roam trigger[%d] value must be negative\n", i));
+ err = -EINVAL;
+ goto exit;
+ }
+ data++;
+ rssi_lower = simple_strtol(data, &data, 10);
+ if (rssi_lower >= 0) {
+ ANDROID_ERROR(("rssi lower[%d] value must be negative\n", i));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ rp->v2.roam_prof[i].roam_trigger = roam_trigger;
+ rp->v2.roam_prof[i].rssi_lower = rssi_lower;
+ data++;
+ rp->v2.roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->v2.roam_prof[i].cu_avg_calc_dur =
+ simple_strtol(data, &data, 10);
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ rp->v3.roam_prof[i].roam_trigger = roam_trigger;
+ rp->v3.roam_prof[i].rssi_lower = rssi_lower;
+ data++;
+ rp->v3.roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->v3.roam_prof[i].cu_avg_calc_dur =
+ simple_strtol(data, &data, 10);
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_3) {
+ rp->v4.roam_prof[i].roam_trigger = roam_trigger;
+ rp->v4.roam_prof[i].rssi_lower = rssi_lower;
+ data++;
+ rp->v4.roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->v4.roam_prof[i].cu_avg_calc_dur =
+ simple_strtol(data, &data, 10);
+ }
+
+ rp_len += roam_prof_size;
+
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ }
+ if (i != 1) {
+ ANDROID_ERROR(("Only two roam_prof rows supported.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ rp->v1.len = rp_len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
+ &cfg->ioctl_buf_sync)) < 0) {
+ ANDROID_ERROR(("seting roam_profile failed with err %d\n", err));
+ }
+ }
+exit:
+ if (rp) {
+ MFREE(cfg->osh, rp, sizeof(*rp));
+ }
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+ return err;
+}
+
+int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
+{
+ int bytes_written = 0, err = -EINVAL, argc = 0;
+ char rssi[BUFSZN], band[BUFSZN], weight[BUFSZN];
+ char *endptr = NULL;
+ wnm_bss_select_weight_cfg_t *bwcfg;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ bwcfg = (wnm_bss_select_weight_cfg_t *)MALLOCZ(cfg->osh, sizeof(*bwcfg));
+ if (unlikely(!bwcfg)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION;
+ bwcfg->type = 0;
+ bwcfg->weight = 0;
+
+ argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band, weight);
+
+ if (!strcasecmp(rssi, "rssi"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ else if (!strcasecmp(rssi, "cu"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ else if (!strcasecmp(rssi, "estm_dl"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_ESTM_DL;
+ else {
+ /* Usage DRIVER WBTEXT_WEIGHT_CONFIG <rssi/cu/estm_dl> <band> <weight> */
+ ANDROID_ERROR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (BCME_BADBAND == wl_android_bandstr_to_fwband(band, &bwcfg->band)) {
+ ANDROID_ERROR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (argc == 2) {
+ /* If there is no data after band, getting wnm_bss_select_weight from fw */
+ if (bwcfg->band == WLC_BAND_ALL) {
+ ANDROID_ERROR(("band option \"all\" is for set only, not get\n"));
+ goto exit;
+ }
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ ANDROID_ERROR(("Getting wnm_bss_select_weight failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(bwcfg, ioctl_buf, sizeof(*bwcfg));
+ bytes_written = snprintf(command, total_len, "%s %s weight = %d\n",
+ (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" :
+ (bwcfg->type == WNM_BSS_SELECT_TYPE_CU) ? "CU": "ESTM_DL",
+ wl_android_get_band_str(bwcfg->band), bwcfg->weight);
+ err = bytes_written;
+ goto exit;
+ } else {
+ /* if weight is non integer returns command usage error */
+ bwcfg->weight = simple_strtol(weight, &endptr, 0);
+ if (*endptr != '\0') {
+ ANDROID_ERROR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ /* setting weight for iovar wnm_bss_select_weight to fw */
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ ANDROID_ERROR(("setting wnm_bss_select_weight failed with err=%d\n", err));
+ }
+ }
+exit:
+ if (bwcfg) {
+ MFREE(cfg->osh, bwcfg, sizeof(*bwcfg));
+ }
+ return err;
+}
+
+/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */
+#define WBTEXT_TUPLE_MIN_LEN_CHECK 5
+
+int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int bytes_written = 0, err = -EINVAL;
+ char rssi[BUFSZN], band[BUFSZN];
+ int btcfg_len = 0, i = 0, parsed_len = 0;
+ wnm_bss_select_factor_cfg_t *btcfg;
+ size_t slen = strlen(data);
+ char *start_addr = NULL;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ data[slen] = '\0';
+ btcfg = (wnm_bss_select_factor_cfg_t *)MALLOCZ(cfg->osh,
+ (sizeof(*btcfg) + sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT));
+ if (unlikely(!btcfg)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION;
+ btcfg->band = WLC_BAND_AUTO;
+ btcfg->type = 0;
+ btcfg->count = 0;
+
+ sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band);
+
+ if (!strcasecmp(rssi, "rssi")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ }
+ else if (!strcasecmp(rssi, "cu")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ }
+ else {
+ ANDROID_ERROR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (BCME_BADBAND == wl_android_bandstr_to_fwband(band, &btcfg->band)) {
+ ANDROID_ERROR(("%s: Command usage, Wrong band\n", __func__));
+ goto exit;
+ }
+
+ if ((slen - 1) == (strlen(rssi) + strlen(band))) {
+ /* Getting factor table using iovar 'wnm_bss_select_table' from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg,
+ sizeof(*btcfg),
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ ANDROID_ERROR(("Getting wnm_bss_select_table failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(btcfg, ioctl_buf, sizeof(*btcfg));
+ memcpy(btcfg, ioctl_buf, (btcfg->count+1) * sizeof(*btcfg));
+
+ bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
+ "No of entries in table: %d\n", btcfg->count);
+ bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
+ "%s factor table\n",
+ (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU");
+ bytes_written += snprintf(command + bytes_written, total_len - bytes_written,
+ "low\thigh\tfactor\n");
+ for (i = 0; i <= btcfg->count-1; i++) {
+ bytes_written += snprintf(command + bytes_written,
+ total_len - bytes_written, "%d\t%d\t%d\n", btcfg->params[i].low,
+ btcfg->params[i].high, btcfg->params[i].factor);
+ }
+ err = bytes_written;
+ goto exit;
+ } else {
+ uint16 len = (sizeof(wnm_bss_select_factor_params_t) * WL_FACTOR_TABLE_MAX_LIMIT);
+ memset_s(btcfg->params, len, 0, len);
+ data += (strlen(rssi) + strlen(band) + 2);
+ start_addr = data;
+ slen = slen - (strlen(rssi) + strlen(band) + 2);
+ for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) {
+ if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) {
+ btcfg->params[i].low = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].high = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].factor = simple_strtol(data, &data, 10);
+ btcfg->count++;
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ parsed_len = data - start_addr;
+ } else {
+ ANDROID_ERROR(("%s:Command usage:less no of args\n", __func__));
+ goto exit;
+ }
+ }
+ btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg));
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len,
+ cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
+ ANDROID_ERROR(("seting wnm_bss_select_table failed with err %d\n", err));
+ goto exit;
+ }
+ }
+exit:
+ if (btcfg) {
+ MFREE(cfg->osh, btcfg,
+ (sizeof(*btcfg) + sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT));
+ }
+ return err;
+}
+
+s32
+wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len)
+{
+ uint i = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0;
+ char delta[BUFSZN], band[BUFSZN], *endptr = NULL;
+ wl_roamprof_band_t *rp = NULL;
+ uint8 band_val = 0, roam_prof_size = 0, roam_prof_ver = 0;
+
+ rp = (wl_roamprof_band_t *)MALLOCZ(cfg->osh, sizeof(*rp));
+ if (unlikely(!rp)) {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", band, delta);
+ if (BCME_BADBAND == wl_android_bandstr_to_fwband(band, &band_val)) {
+ ANDROID_ERROR(("%s: Missing band\n", __func__));
+ goto exit;
+ }
+ if ((err = wlc_wbtext_get_roam_prof(ndev, rp, band_val, &roam_prof_ver,
+ &roam_prof_size))) {
+ ANDROID_ERROR(("Getting roam_profile failed with err=%d \n", err));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (argc == 2) {
+ /* if delta is non integer returns command usage error */
+ val = simple_strtol(delta, &endptr, 0);
+ if (*endptr != '\0') {
+ ANDROID_ERROR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /*
+ * Checking contents of roam profile data from fw and exits
+ * if code hits below condtion. If remaining length of buffer is
+ * less than roam profile size or if there is no valid entry.
+ */
+ if (len >= rp->v1.len) {
+ break;
+ }
+ if (roam_prof_ver == WL_ROAM_PROF_VER_1) {
+ if (rp->v2.roam_prof[i].fullscan_period == 0) {
+ break;
+ }
+ if (rp->v2.roam_prof[i].channel_usage != 0) {
+ rp->v2.roam_prof[i].roam_delta = val;
+ }
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_2) {
+ if (rp->v3.roam_prof[i].fullscan_period == 0) {
+ break;
+ }
+ if (rp->v3.roam_prof[i].channel_usage != 0) {
+ rp->v3.roam_prof[i].roam_delta = val;
+ }
+ } else if (roam_prof_ver == WL_ROAM_PROF_VER_3) {
+ if (rp->v4.roam_prof[i].fullscan_period == 0) {
+ break;
+ }
+ if (rp->v4.roam_prof[i].channel_usage != 0) {
+ rp->v4.roam_prof[i].roam_delta = val;
+ }
+ }
+ len += roam_prof_size;
+ }
+ }
+ else {
+ if (rp->v2.roam_prof[0].channel_usage != 0) {
+ bytes_written = snprintf(command, total_len,
+ "%s Delta %d\n", wl_android_get_band_str(rp->v1.band),
+ rp->v2.roam_prof[0].roam_delta);
+ }
+ err = bytes_written;
+ goto exit;
+ }
+ rp->v1.len = len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
+ &cfg->ioctl_buf_sync)) < 0) {
+ ANDROID_ERROR(("seting roam_profile failed with err %d\n", err));
+ }
+exit :
+ if (rp) {
+ MFREE(cfg->osh, rp, sizeof(*rp));
+ }
+ return err;
+}
+#endif /* WBTEXT */
diff --git a/bcmdhd.101.10.361.x/wl_android.h b/bcmdhd.101.10.361.x/wl_android.h
new file mode 100755
index 0000000..b08a18b
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_android.h
@@ -0,0 +1,252 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wl_android_
+#define _wl_android_
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#ifdef WL_EXT_IAPSTA
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif /* WL_ESCAN */
+#include <wl_iapsta.h>
+#endif /* WL_IAPSTA */
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) || \
+ (defined(WL_EXT_GENL) && defined(SENDPROB))
+#ifndef WL_EVENT
+#define WL_EVENT
+#endif
+#include <wl_event.h>
+#endif
+#include <wl_android_ext.h>
+
+/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
+ * automatically
+ */
+#if defined(WL_SDO) || defined(BT_WIFI_HANDOVER)
+#define WL_GENL
+#endif
+
+#ifdef WL_GENL
+#include <net/genetlink.h>
+#endif
+
+typedef struct _android_wifi_priv_cmd {
+ char *buf;
+ int used_len;
+ int total_len;
+} android_wifi_priv_cmd;
+
+#ifdef CONFIG_COMPAT
+typedef struct _compat_android_wifi_priv_cmd {
+ compat_caddr_t buf;
+ int used_len;
+ int total_len;
+} compat_android_wifi_priv_cmd;
+#endif /* CONFIG_COMPAT */
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/* message levels */
+#define ANDROID_ERROR_LEVEL (1 << 0)
+#define ANDROID_TRACE_LEVEL (1 << 1)
+#define ANDROID_INFO_LEVEL (1 << 2)
+#define ANDROID_SCAN_LEVEL (1 << 3)
+#define ANDROID_DBG_LEVEL (1 << 4)
+#define ANDROID_TPUT_LEVEL (1 << 8)
+#define ANDROID_MSG_LEVEL (1 << 0)
+
+#define WL_MSG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_MSG_LEVEL) { \
+ printf("[%s] %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+#define WL_MSG_PRINT_RATE_LIMIT_PERIOD 1000000000u /* 1s in units of ns */
+#define WL_MSG_RLMT(name, cmp, size, arg1, args...) \
+do { \
+ if (android_msg_level & ANDROID_MSG_LEVEL) { \
+ static uint64 __err_ts = 0; \
+ static uint32 __err_cnt = 0; \
+ uint64 __cur_ts = 0; \
+ static uint8 static_tmp[size]; \
+ __cur_ts = osl_localtime_ns(); \
+ if (__err_ts == 0 || (__cur_ts > __err_ts && \
+ (__cur_ts - __err_ts > WL_MSG_PRINT_RATE_LIMIT_PERIOD)) || \
+ memcmp(&static_tmp, cmp, size)) { \
+ __err_ts = __cur_ts; \
+ memcpy(static_tmp, cmp, size); \
+ printf("[%s] %s : [%u times] " arg1, \
+ name, __func__, __err_cnt, ## args); \
+ __err_cnt = 0; \
+ } else { \
+ ++__err_cnt; \
+ } \
+ } \
+} while (0)
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+void wl_android_set_wifi_on_flag(bool enable);
+#if defined(WLAN_ACCEL_BOOT)
+int wl_android_wifi_accel_on(struct net_device *dev, bool force_reg_on);
+int wl_android_wifi_accel_off(struct net_device *dev, bool force_reg_on);
+#endif /* WLAN_ACCEL_BOOT */
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev, bool on_failure);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr);
+int wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len);
+#ifdef WL_CFG80211
+int wl_android_set_spect(struct net_device *dev, int spect);
+s32 wl_android_get_band_chanspecs(struct net_device *ndev, void *buf, s32 buflen,
+ chanspec_band_t band, bool acs_req);
+#endif
+
+#ifdef WL_GENL
+typedef struct bcm_event_hdr {
+ u16 event_type;
+ u16 len;
+} bcm_event_hdr_t;
+
+/* attributes (variables): the index in this enum is used as a reference for the type,
+ * userspace application has to indicate the corresponding type
+ * the policy is used for security considerations
+ */
+enum {
+ BCM_GENL_ATTR_UNSPEC,
+ BCM_GENL_ATTR_STRING,
+ BCM_GENL_ATTR_MSG,
+ __BCM_GENL_ATTR_MAX
+};
+#define BCM_GENL_ATTR_MAX (__BCM_GENL_ATTR_MAX - 1)
+
+/* commands: enumeration of all commands (functions),
+ * used by userspace application to identify command to be ececuted
+ */
+enum {
+ BCM_GENL_CMD_UNSPEC,
+ BCM_GENL_CMD_MSG,
+ __BCM_GENL_CMD_MAX
+};
+#define BCM_GENL_CMD_MAX (__BCM_GENL_CMD_MAX - 1)
+
+/* Enum values used by the BCM supplicant to identify the events */
+enum {
+ BCM_E_UNSPEC,
+ BCM_E_SVC_FOUND,
+ BCM_E_DEV_FOUND,
+ BCM_E_DEV_LOST,
+#ifdef BT_WIFI_HANDOVER
+ BCM_E_DEV_BT_WIFI_HO_REQ,
+#endif
+ BCM_E_MAX
+};
+
+s32 wl_genl_send_msg(struct net_device *ndev, u32 event_type,
+ const u8 *string, u16 len, u8 *hdr, u16 hdrlen);
+#endif /* WL_GENL */
+s32 wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size);
+
+/* hostap mac mode */
+#define MACLIST_MODE_DISABLED 0
+#define MACLIST_MODE_DENY 1
+#define MACLIST_MODE_ALLOW 2
+
+/* max number of assoc list */
+#define MAX_NUM_OF_ASSOCLIST 64
+
+/* Bandwidth */
+#define WL_CH_BANDWIDTH_20MHZ 20
+#define WL_CH_BANDWIDTH_40MHZ 40
+#define WL_CH_BANDWIDTH_80MHZ 80
+#define WL_CH_BANDWIDTH_160MHZ 160
+
+/* max number of mac filter list
+ * restrict max number to 10 as maximum cmd string size is 255
+ */
+#define MAX_NUM_MAC_FILT 10
+#define WL_GET_BAND(ch) (((uint)(ch) <= CH_MAX_2G_CHANNEL) ? \
+ WLC_BAND_2G : WLC_BAND_5G)
+
+/* SoftAP auto channel feature */
+#define APCS_BAND_2G_LEGACY1 20
+#define APCS_BAND_2G_LEGACY2 0
+#define APCS_BAND_AUTO "band=auto"
+#define APCS_BAND_2G "band=2g"
+#define APCS_BAND_5G "band=5g"
+#define APCS_BAND_6G "band=6g"
+#define FREQ_STR "freq="
+#define APCS_MAX_2G_CHANNELS 11
+#define APCS_MAX_RETRY 10
+#define APCS_DEFAULT_2G_CH 1
+#define APCS_DEFAULT_5G_CH 149
+#define APCS_DEFAULT_6G_CH 5
+
+int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
+#ifdef WL_BCNRECV
+extern int wl_android_bcnrecv_config(struct net_device *ndev, char *data,
+ int total_len);
+extern int wl_android_bcnrecv_stop(struct net_device *ndev, uint reason);
+extern int wl_android_bcnrecv_resume(struct net_device *ndev);
+extern int wl_android_bcnrecv_suspend(struct net_device *ndev);
+extern int wl_android_bcnrecv_event(struct net_device *ndev,
+ uint attr_type, uint status, uint reason, uint8 *data, uint data_len);
+#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+#define TSPEC_UPLINK_DIRECTION (0 << 5) /* uplink direction traffic stream */
+#define TSPEC_DOWNLINK_DIRECTION (1 << 5) /* downlink direction traffic stream */
+#define TSPEC_BI_DIRECTION (3 << 5) /* bi direction traffic stream */
+#define TSPEC_EDCA_ACCESS (1 << 7) /* EDCA access policy */
+#define TSPEC_UAPSD_PSB (1 << 2) /* U-APSD power saving behavior */
+#define TSPEC_TSINFO_TID_SHIFT 1 /* TID Shift */
+#define TSPEC_TSINFO_PRIO_SHIFT 3 /* PRIO Shift */
+#define TSPEC_MAX_ACCESS_CATEGORY 3
+#define TSPEC_MAX_USER_PRIO 7
+#define TSPEC_MAX_DIALOG_TOKEN 255
+#define TSPEC_MAX_SURPLUS_BW 12410
+#define TSPEC_MIN_SURPLUS_BW 11210
+#define TSPEC_MAX_MSDU_SIZE 1520
+#define TSPEC_DEF_MEAN_DATA_RATE 120000
+#define TSPEC_DEF_MIN_PHY_RATE 6000000
+#define TSPEC_DEF_DIALOG_TOKEN 7
+#endif /* WL_CAC_TS */
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define WLC_ACS_BAND_INVALID 0xffffu
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#define WL_PRIV_CMD_LEN 64
+#endif /* _wl_android_ */
diff --git a/bcmdhd.101.10.361.x/wl_android_ext.c b/bcmdhd.101.10.361.x/wl_android_ext.c
new file mode 100755
index 0000000..7899365
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_android_ext.c
@@ -0,0 +1,4043 @@
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+
+#include <wl_android.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <linux/wireless.h>
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* WL_WIRELESS_EXT */
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_config.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif /* WL_ESCAN */
+
+#define AEXT_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printf("[%s] AEXT-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AEXT_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printf("[%s] AEXT-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AEXT_INFO(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printf("[%s] AEXT-INFO) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AEXT_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printf("[%s] AEXT-DBG) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+#ifndef WL_CFG80211
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+#define IEEE80211_BAND_2GHZ 0
+#define IEEE80211_BAND_5GHZ 1
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#endif /* WL_CFG80211 */
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+#define CMD_CHANNEL "CHANNEL"
+#define CMD_CHANNELS "CHANNELS"
+#define CMD_ROAM_TRIGGER "ROAM_TRIGGER"
+#define CMD_PM "PM"
+#define CMD_MONITOR "MONITOR"
+#define CMD_SET_SUSPEND_BCN_LI_DTIM "SET_SUSPEND_BCN_LI_DTIM"
+#define CMD_WLMSGLEVEL "WLMSGLEVEL"
+#ifdef WL_EXT_IAPSTA
+#define CMD_IAPSTA_INIT "IAPSTA_INIT"
+#define CMD_IAPSTA_CONFIG "IAPSTA_CONFIG"
+#define CMD_IAPSTA_ENABLE "IAPSTA_ENABLE"
+#define CMD_IAPSTA_DISABLE "IAPSTA_DISABLE"
+#define CMD_ISAM_INIT "ISAM_INIT"
+#define CMD_ISAM_CONFIG "ISAM_CONFIG"
+#define CMD_ISAM_ENABLE "ISAM_ENABLE"
+#define CMD_ISAM_DISABLE "ISAM_DISABLE"
+#define CMD_ISAM_STATUS "ISAM_STATUS"
+#define CMD_ISAM_PEER_PATH "ISAM_PEER_PATH"
+#define CMD_ISAM_PARAM "ISAM_PARAM"
+#endif /* WL_EXT_IAPSTA */
+#define CMD_AUTOCHANNEL "AUTOCHANNEL"
+#define CMD_WL "WL"
+#define CMD_CONF "CONF"
+
+#if defined(PKT_STATICS) && defined(BCMSDIO)
+#define CMD_DUMP_PKT_STATICS "DUMP_PKT_STATICS"
+#define CMD_CLEAR_PKT_STATICS "CLEAR_PKT_STATICS"
+extern void dhd_bus_dump_txpktstatics(dhd_pub_t *dhdp);
+extern void dhd_bus_clear_txpktstatics(dhd_pub_t *dhdp);
+#endif /* PKT_STATICS && BCMSDIO */
+
+#ifdef IDHCP
+typedef struct dhcpc_parameter {
+ uint32 ip_addr;
+ uint32 ip_serv;
+ uint32 lease_time;
+} dhcpc_para_t;
+#endif /* IDHCP */
+
+#ifdef WL_EXT_WOWL
+#define WL_WOWL_TCPFIN (1 << 26)
+typedef struct wl_wowl_pattern2 {
+ char cmd[4];
+ wl_wowl_pattern_t wowl_pattern;
+} wl_wowl_pattern2_t;
+#endif /* WL_EXT_WOWL */
+
+#ifdef WL_EXT_TCPKA
+typedef struct tcpka_conn {
+ uint32 sess_id;
+ struct ether_addr dst_mac; /* Destinition Mac */
+ struct ipv4_addr src_ip; /* Sorce IP */
+ struct ipv4_addr dst_ip; /* Destinition IP */
+ uint16 ipid; /* Ip Identification */
+ uint16 srcport; /* Source Port Address */
+ uint16 dstport; /* Destination Port Address */
+ uint32 seq; /* TCP Sequence Number */
+ uint32 ack; /* TCP Ack Number */
+ uint16 tcpwin; /* TCP window */
+ uint32 tsval; /* Timestamp Value */
+ uint32 tsecr; /* Timestamp Echo Reply */
+ uint32 len; /* last packet payload len */
+ uint32 ka_payload_len; /* keep alive payload length */
+ uint8 ka_payload[1]; /* keep alive payload */
+} tcpka_conn_t;
+
+typedef struct tcpka_conn_sess {
+ uint32 sess_id; /* session id */
+ uint32 flag; /* enable/disable flag */
+ wl_mtcpkeep_alive_timers_pkt_t tcpka_timers;
+} tcpka_conn_sess_t;
+
+typedef struct tcpka_conn_info {
+ uint32 ipid;
+ uint32 seq;
+ uint32 ack;
+} tcpka_conn_sess_info_t;
+#endif /* WL_EXT_TCPKA */
+
+typedef struct auth_name_map_t {
+ uint auth;
+ uint wpa_auth;
+ char *auth_name;
+} auth_name_map_t;
+
+const auth_name_map_t auth_name_map[] = {
+ {WL_AUTH_OPEN_SYSTEM, WPA_AUTH_DISABLED, "open"},
+ {WL_AUTH_SHARED_KEY, WPA_AUTH_DISABLED, "shared"},
+ {WL_AUTH_OPEN_SYSTEM, WPA_AUTH_PSK, "wpa/psk"},
+ {WL_AUTH_OPEN_SYSTEM, WPA2_AUTH_PSK, "wpa2/psk"},
+ {WL_AUTH_OPEN_SYSTEM, WPA2_AUTH_PSK_SHA256|WPA2_AUTH_PSK, "wpa2/psk/sha256"},
+ {WL_AUTH_OPEN_SYSTEM, WPA2_AUTH_FT|WPA2_AUTH_PSK, "wpa2/psk/ft"},
+ {WL_AUTH_OPEN_SYSTEM, WPA2_AUTH_UNSPECIFIED, "wpa2/eap"},
+ {WL_AUTH_OPEN_SYSTEM, WPA2_AUTH_FT|WPA2_AUTH_UNSPECIFIED, "wpa2/eap/ft"},
+ {WL_AUTH_OPEN_SYSTEM, WPA3_AUTH_SAE_PSK, "wpa3/psk"},
+ {WL_AUTH_SAE_KEY, WPA3_AUTH_SAE_PSK, "wpa3sae/psk"},
+ {WL_AUTH_OPEN_SYSTEM, WPA3_AUTH_SAE_PSK|WPA2_AUTH_PSK, "wpa3/psk"},
+ {WL_AUTH_SAE_KEY, WPA3_AUTH_SAE_PSK|WPA2_AUTH_PSK, "wpa3sae/psk"},
+ {WL_AUTH_OPEN_SYSTEM, 0x20, "wpa3/psk"},
+ {WL_AUTH_SAE_KEY, 0x20, "wpa3sae/psk"},
+ {WL_AUTH_OPEN_SYSTEM, WPA3_AUTH_SAE_PSK|WPA2_AUTH_PSK_SHA256|WPA2_AUTH_PSK, "wpa3/psk/sha256"},
+ {WL_AUTH_SAE_KEY, WPA3_AUTH_SAE_PSK|WPA2_AUTH_PSK_SHA256|WPA2_AUTH_PSK, "wpa3sae/psk/sha256"},
+ {WL_AUTH_OPEN_SYSTEM, 0x20|WPA2_AUTH_PSK_SHA256|WPA2_AUTH_PSK, "wpa3/psk/sha256"},
+ {WL_AUTH_SAE_KEY, 0x20|WPA2_AUTH_PSK_SHA256|WPA2_AUTH_PSK, "wpa3sae/psk/sha256"},
+ {WL_AUTH_OPEN_SYSTEM, WPA3_AUTH_OWE, "wpa3/owe"},
+};
+
+typedef struct wsec_name_map_t {
+ uint wsec;
+ char *wsec_name;
+} wsec_name_map_t;
+
+const wsec_name_map_t wsec_name_map[] = {
+ {WSEC_NONE, "none"},
+ {WEP_ENABLED, "wep"},
+ {TKIP_ENABLED, "tkip"},
+ {AES_ENABLED, "aes"},
+ {TKIP_ENABLED|AES_ENABLED, "tkip/aes"},
+};
+
+static int wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len);
+
+int
+wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ int ret;
+
+ ret = wldev_ioctl(dev, cmd, arg, len, set);
+ if (ret)
+ AEXT_ERROR(dev->name, "cmd=%d, ret=%d\n", cmd, ret);
+ return ret;
+}
+
+int
+wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val)
+{
+ int ret;
+
+ ret = wldev_iovar_getint(dev, iovar, val);
+ if (ret)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar, ret);
+
+ return ret;
+}
+
+int
+wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val)
+{
+ int ret;
+
+ ret = wldev_iovar_setint(dev, iovar, val);
+ if (ret)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar, ret);
+
+ return ret;
+}
+
+int
+wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+ if (ret != 0)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+
+ return ret;
+}
+
+int
+wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+ if (ret != 0)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+
+ return ret;
+}
+
+int
+wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx,
+ struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, iovar_name, param, paramlen,
+ buf, buflen, bsscfg_idx, buf_sync);
+ if (ret < 0)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+
+ return ret;
+}
+
+static chanspec_t
+wl_ext_chspec_to_legacy(chanspec_t chspec)
+{
+ chanspec_t lchspec;
+
+ if (wf_chspec_malformed(chspec)) {
+ AEXT_ERROR("wlan", "input chanspec (0x%04X) malformed\n", chspec);
+ return INVCHANSPEC;
+ }
+
+ /* get the channel number */
+ lchspec = CHSPEC_CHANNEL(chspec);
+
+ /* convert the band */
+ if (CHSPEC_IS2G(chspec)) {
+ lchspec |= WL_LCHANSPEC_BAND_2G;
+ } else {
+ lchspec |= WL_LCHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (CHSPEC_IS20(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_20;
+ lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+ } else if (CHSPEC_IS40(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_40;
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+ lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+ } else {
+ lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+ }
+ } else {
+ /* cannot express the bandwidth */
+ char chanbuf[CHANSPEC_STR_LEN];
+ AEXT_ERROR("wlan", "unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec);
+ return INVCHANSPEC;
+ }
+
+ return lchspec;
+}
+
+chanspec_t
+wl_ext_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec)
+{
+ if (ioctl_ver == 1) {
+ chanspec = wl_ext_chspec_to_legacy(chanspec);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
+ }
+ chanspec = htodchanspec(chanspec);
+
+ return chanspec;
+}
+
+static void
+wl_ext_ch_to_chanspec(int ioctl_ver, int ch,
+ struct wl_join_params *join_params, size_t *join_params_size)
+{
+ chanspec_t chanspec = 0;
+
+ if (ch != 0) {
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
+
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ wl_ext_chspec_host_to_driver(ioctl_ver,
+ join_params->params.chanspec_list[0]);
+
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+ }
+}
+
+#if defined(WL_EXT_IAPSTA) || defined(WL_CFG80211) || defined(WL_ESCAN)
+static chanspec_t
+wl_ext_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+ chanspec_t chspec;
+
+ /* get the channel number */
+ chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+ /* convert the band */
+ if (LCHSPEC_IS2G(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chspec |= WL_CHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (LCHSPEC_IS20(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BW_20;
+ } else {
+ chspec |= WL_CHANSPEC_BW_40;
+ if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+ chspec |= WL_CHANSPEC_CTL_SB_L;
+ } else {
+ chspec |= WL_CHANSPEC_CTL_SB_U;
+ }
+ }
+
+ if (wf_chspec_malformed(chspec)) {
+ AEXT_ERROR("wlan", "output chanspec (0x%04X) malformed\n", chspec);
+ return INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+chanspec_t
+wl_ext_chspec_driver_to_host(int ioctl_ver, chanspec_t chanspec)
+{
+ chanspec = dtohchanspec(chanspec);
+ if (ioctl_ver == 1) {
+ chanspec = wl_ext_chspec_from_legacy(chanspec);
+ }
+
+ return chanspec;
+}
+#endif /* WL_EXT_IAPSTA || WL_CFG80211 || WL_ESCAN */
+
+bool
+wl_ext_check_scan(struct net_device *dev, dhd_pub_t *dhdp)
+{
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+ struct wl_escan_info *escan = dhdp->escan;
+#endif /* WL_ESCAN */
+
+#ifdef WL_CFG80211
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ AEXT_ERROR(dev->name, "cfg80211 scanning...\n");
+ return TRUE;
+ }
+#endif /* WL_CFG80211 */
+
+#ifdef WL_ESCAN
+ if (escan->escan_state == ESCAN_STATE_SCANING) {
+ AEXT_ERROR(dev->name, "escan scanning...\n");
+ return TRUE;
+ }
+#endif /* WL_ESCAN */
+
+ return FALSE;
+}
+
+#if defined(WL_CFG80211) || defined(WL_ESCAN)
+void
+wl_ext_user_sync(struct dhd_pub *dhd, int ifidx, bool lock)
+{
+ struct net_device *dev = dhd_idx2net(dhd, ifidx);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+ struct wl_escan_info *escan = dhd->escan;
+#endif /* WL_ESCAN */
+
+ AEXT_INFO(dev->name, "lock=%d\n", lock);
+
+ if (lock) {
+#if defined(WL_CFG80211)
+ mutex_lock(&cfg->usr_sync);
+#endif
+#if defined(WL_ESCAN)
+ mutex_lock(&escan->usr_sync);
+#endif
+ } else {
+#if defined(WL_CFG80211)
+ mutex_unlock(&cfg->usr_sync);
+#endif
+#if defined(WL_ESCAN)
+ mutex_unlock(&escan->usr_sync);
+#endif
+ }
+}
+#endif /* WL_CFG80211 && WL_ESCAN */
+
+static bool
+wl_ext_event_complete(struct dhd_pub *dhd, int ifidx)
+{
+ struct net_device *dev = dhd_idx2net(dhd, ifidx);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+ struct wl_escan_info *escan = dhd->escan;
+#endif /* WL_ESCAN */
+ bool complete = TRUE;
+
+#ifdef WL_CFG80211
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ AEXT_INFO(dev->name, "SCANNING\n");
+ complete = FALSE;
+ }
+ if (wl_get_drv_status_all(cfg, CONNECTING)) {
+ AEXT_INFO(dev->name, "CFG80211 CONNECTING\n");
+ complete = FALSE;
+ }
+ if (wl_get_drv_status_all(cfg, DISCONNECTING)) {
+ AEXT_INFO(dev->name, "DISCONNECTING\n");
+ complete = FALSE;
+ }
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+ if (escan->escan_state == ESCAN_STATE_SCANING) {
+ AEXT_INFO(dev->name, "ESCAN_STATE_SCANING\n");
+ complete = FALSE;
+ }
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ if (wl_ext_sta_connecting(dev)) {
+ AEXT_INFO(dev->name, "CONNECTING\n");
+ complete = FALSE;
+ }
+#endif /* WL_EXT_IAPSTA */
+
+ return complete;
+}
+
+void
+wl_ext_wait_event_complete(struct dhd_pub *dhd, int ifidx)
+{
+ struct net_device *net;
+ s32 timeout = -1;
+
+ timeout = wait_event_interruptible_timeout(dhd->conf->event_complete,
+ wl_ext_event_complete(dhd, ifidx), msecs_to_jiffies(10000));
+ if (timeout <= 0 || !wl_ext_event_complete(dhd, ifidx)) {
+ wl_ext_event_complete(dhd, ifidx);
+ net = dhd_idx2net(dhd, ifidx);
+ AEXT_ERROR(net->name, "timeout\n");
+ }
+}
+
+int
+wl_ext_get_ioctl_ver(struct net_device *dev, int *ioctl_ver)
+{
+ int ret = 0;
+ s32 val = 0;
+
+ val = 1;
+ ret = wl_ext_ioctl(dev, WLC_GET_VERSION, &val, sizeof(val), 0);
+ if (ret) {
+ return ret;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ AEXT_ERROR(dev->name, "Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION);
+ return BCME_VERSION;
+ }
+ *ioctl_ver = val;
+
+ return ret;
+}
+
+void
+wl_ext_bss_iovar_war(struct net_device *ndev, s32 *val)
+{
+ dhd_pub_t *dhd = dhd_get_pub(ndev);
+ uint chip;
+ bool need_war = false;
+
+ chip = dhd_conf_get_chip(dhd);
+
+ if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
+ chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
+ chip == BCM4371_CHIP_ID ||
+ chip == BCM43430_CHIP_ID ||
+ chip == BCM4345_CHIP_ID || chip == BCM43454_CHIP_ID ||
+ chip == BCM4359_CHIP_ID ||
+ chip == BCM43143_CHIP_ID || chip == BCM43242_CHIP_ID ||
+ chip == BCM43569_CHIP_ID) {
+ need_war = true;
+ }
+
+ if (need_war) {
+ /* Few firmware branches have issues in bss iovar handling and
+ * that can't be changed since they are in production.
+ */
+ if (*val == WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else if (*val == WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else {
+ /* Ignore for other bss enums */
+ return;
+ }
+ AEXT_TRACE(ndev->name, "wl bss %d\n", *val);
+ }
+}
+
+int
+wl_ext_set_chanspec(struct net_device *dev, int ioctl_ver,
+ uint16 channel, chanspec_t *ret_chspec)
+{
+ s32 _chan = channel;
+ chanspec_t chspec = 0;
+ chanspec_t fw_chspec = 0;
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 err = BCME_OK;
+ s32 bw_cap = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+ uint band;
+
+ if (_chan <= CH_MAX_2G_CHANNEL)
+ band = IEEE80211_BAND_2GHZ;
+ else
+ band = IEEE80211_BAND_5GHZ;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ param.band = WLC_BAND_5G;
+ err = wl_ext_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ AEXT_ERROR(dev->name, "bw_cap failed, %d\n", err);
+ return err;
+ } else {
+ err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ if (bw_cap != WLC_N_BW_20ALL)
+ bw = WL_CHANSPEC_BW_40;
+ }
+ } else {
+ if (WL_BW_CAP_80MHZ(iovar_buf[0]))
+ bw = WL_CHANSPEC_BW_80;
+ else if (WL_BW_CAP_40MHZ(iovar_buf[0]))
+ bw = WL_CHANSPEC_BW_40;
+ else
+ bw = WL_CHANSPEC_BW_20;
+
+ }
+ }
+ else if (band == IEEE80211_BAND_2GHZ)
+ bw = WL_CHANSPEC_BW_20;
+
+set_channel:
+ chspec = wf_channel2chspec(_chan, bw);
+ if (wf_chspec_valid(chspec)) {
+ fw_chspec = wl_ext_chspec_host_to_driver(ioctl_ver, chspec);
+ if (fw_chspec != INVCHANSPEC) {
+ if ((err = wl_ext_iovar_setint(dev, "chanspec", fw_chspec)) == BCME_BADCHAN) {
+ if (bw == WL_CHANSPEC_BW_80)
+ goto change_bw;
+ err = wl_ext_ioctl(dev, WLC_SET_CHANNEL, &_chan, sizeof(_chan), 1);
+ WL_MSG(dev->name, "channel %d\n", _chan);
+ } else if (err) {
+ AEXT_ERROR(dev->name, "failed to set chanspec error %d\n", err);
+ } else
+ WL_MSG(dev->name, "channel %d, 0x%x\n", channel, chspec);
+ } else {
+ AEXT_ERROR(dev->name, "failed to convert host chanspec to fw chanspec\n");
+ err = BCME_ERROR;
+ }
+ } else {
+change_bw:
+ if (bw == WL_CHANSPEC_BW_80)
+ bw = WL_CHANSPEC_BW_40;
+ else if (bw == WL_CHANSPEC_BW_40)
+ bw = WL_CHANSPEC_BW_20;
+ else
+ bw = 0;
+ if (bw)
+ goto set_channel;
+ AEXT_ERROR(dev->name, "Invalid chanspec 0x%x\n", chspec);
+ err = BCME_ERROR;
+ }
+ *ret_chspec = fw_chspec;
+
+ return err;
+}
+
+static int
+wl_ext_channel(struct net_device *dev, char* command, int total_len)
+{
+ int ret;
+ int channel=0;
+ channel_info_t ci;
+ int bytes_written = 0;
+ chanspec_t fw_chspec;
+ int ioctl_ver = 0;
+
+ AEXT_TRACE(dev->name, "cmd %s", command);
+
+ sscanf(command, "%*s %d", &channel);
+
+ if (channel > 0) {
+ wl_ext_get_ioctl_ver(dev, &ioctl_ver);
+ ret = wl_ext_set_chanspec(dev, ioctl_ver, channel, &fw_chspec);
+ } else {
+ if (!(ret = wl_ext_ioctl(dev, WLC_GET_CHANNEL, &ci,
+ sizeof(channel_info_t), FALSE))) {
+ AEXT_TRACE(dev->name, "hw_channel %d\n", ci.hw_channel);
+ AEXT_TRACE(dev->name, "target_channel %d\n", ci.target_channel);
+ AEXT_TRACE(dev->name, "scan_channel %d\n", ci.scan_channel);
+ bytes_written = snprintf(command, sizeof(channel_info_t)+2,
+ "channel %d", ci.hw_channel);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_channels(struct net_device *dev, char* command, int total_len)
+{
+ int ret, i;
+ int bytes_written = -1;
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ wl_uint32_list_t *list;
+
+ AEXT_TRACE(dev->name, "cmd %s", command);
+
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ ret = wl_ext_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
+ sizeof(valid_chan_list), 0);
+ if (ret<0) {
+ AEXT_ERROR(dev->name, "get channels failed with %d\n", ret);
+ } else {
+ bytes_written = snprintf(command, total_len, "channels");
+ for (i = 0; i < dtoh32(list->count); i++) {
+ bytes_written += snprintf(command+bytes_written, total_len, " %d",
+ dtoh32(list->element[i]));
+ }
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_roam_trigger(struct net_device *dev, char* command, int total_len)
+{
+ int ret = 0;
+ int roam_trigger[2] = {0, 0};
+ int trigger[2]= {0, 0};
+ int bytes_written=-1;
+
+ sscanf(command, "%*s %10d", &roam_trigger[0]);
+
+ if (roam_trigger[0]) {
+ roam_trigger[1] = WLC_BAND_ALL;
+ ret = wl_ext_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger), 1);
+ } else {
+ roam_trigger[1] = WLC_BAND_2G;
+ ret = wl_ext_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger), 0);
+ if (!ret)
+ trigger[0] = roam_trigger[0];
+
+ roam_trigger[1] = WLC_BAND_5G;
+ ret = wl_ext_ioctl(dev, WLC_GET_ROAM_TRIGGER, &roam_trigger,
+ sizeof(roam_trigger), 0);
+ if (!ret)
+ trigger[1] = roam_trigger[0];
+
+ AEXT_TRACE(dev->name, "roam_trigger %d %d\n", trigger[0], trigger[1]);
+ bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]);
+ ret = bytes_written;
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_pm(struct net_device *dev, char *command, int total_len)
+{
+ int pm=-1, ret = -1;
+ char *pm_local;
+ int bytes_written=-1;
+
+ AEXT_TRACE(dev->name, "cmd %s", command);
+
+ sscanf(command, "%*s %d", &pm);
+
+ if (pm >= 0) {
+ ret = wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ } else {
+ ret = wl_ext_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), 0);
+ if (!ret) {
+ AEXT_TRACE(dev->name, "PM = %d", pm);
+ if (pm == PM_OFF)
+ pm_local = "PM_OFF";
+ else if(pm == PM_MAX)
+ pm_local = "PM_MAX";
+ else if(pm == PM_FAST)
+ pm_local = "PM_FAST";
+ else {
+ pm = 0;
+ pm_local = "Invalid";
+ }
+ bytes_written = snprintf(command, total_len, "PM %s", pm_local);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_monitor(struct net_device *dev, char *command, int total_len)
+{
+ int val = -1, ret = -1;
+ int bytes_written=-1;
+
+ sscanf(command, "%*s %d", &val);
+
+ if (val >=0) {
+ ret = wl_ext_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(val), 1);
+ } else {
+ ret = wl_ext_ioctl(dev, WLC_GET_MONITOR, &val, sizeof(val), 0);
+ if (!ret) {
+ AEXT_TRACE(dev->name, "monitor = %d\n", val);
+ bytes_written = snprintf(command, total_len, "monitor %d", val);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+
+ return ret;
+}
+
+s32
+wl_ext_connect(struct net_device *dev, struct wl_conn_info *conn_info)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_extjoin_params_t *ext_join_params = NULL;
+ struct wl_join_params join_params;
+ size_t join_params_size;
+ s32 err = 0;
+ u32 chan_cnt = 0;
+ s8 *iovar_buf = NULL;
+ int ioctl_ver = 0;
+ char sec[64];
+
+ wl_ext_get_ioctl_ver(dev, &ioctl_ver);
+
+ if (dhd->conf->chip == BCM43362_CHIP_ID)
+ goto set_ssid;
+
+ if (conn_info->channel) {
+ chan_cnt = 1;
+ }
+
+ iovar_buf = kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (iovar_buf == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+ chan_cnt * sizeof(chanspec_t);
+ ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ ext_join_params->ssid.SSID_len = min((uint32)sizeof(ext_join_params->ssid.SSID),
+ conn_info->ssid.SSID_len);
+ memcpy(&ext_join_params->ssid.SSID, conn_info->ssid.SSID, ext_join_params->ssid.SSID_len);
+ ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+ /* increate dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+ ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ ext_join_params->scan.nprobes = chan_cnt ?
+ (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+ ext_join_params->scan.home_time = -1;
+
+ if (memcmp(&ether_null, &conn_info->bssid, ETHER_ADDR_LEN))
+ memcpy(&ext_join_params->assoc.bssid, &conn_info->bssid, ETH_ALEN);
+ else
+ memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ if (chan_cnt) {
+ u16 band, bw, ctl_sb;
+ chanspec_t chspec;
+ band = (conn_info->channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+ : WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+ chspec = (conn_info->channel | band | bw | ctl_sb);
+ ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ ext_join_params->assoc.chanspec_list[0] |= chspec;
+ ext_join_params->assoc.chanspec_list[0] =
+ wl_ext_chspec_host_to_driver(ioctl_ver,
+ ext_join_params->assoc.chanspec_list[0]);
+ }
+ ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec), TRUE);
+ WL_MSG(dev->name,
+ "Connecting with %pM channel (%d) ssid \"%s\", len (%d), sec=%s\n\n",
+ &ext_join_params->assoc.bssid, conn_info->channel,
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec);
+ err = wl_ext_iovar_setbuf_bsscfg(dev, "join", ext_join_params,
+ join_params_size, iovar_buf, WLC_IOCTL_MAXLEN, conn_info->bssidx, NULL);
+
+ if (err) {
+ if (err == BCME_UNSUPPORTED) {
+ AEXT_TRACE(dev->name, "join iovar is not supported\n");
+ goto set_ssid;
+ } else {
+ AEXT_ERROR(dev->name, "error (%d)\n", err);
+ goto exit;
+ }
+ } else
+ goto exit;
+
+set_ssid:
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ join_params.ssid.SSID_len = min((uint32)sizeof(join_params.ssid.SSID),
+ conn_info->ssid.SSID_len);
+ memcpy(&join_params.ssid.SSID, conn_info->ssid.SSID, join_params.ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+ if (memcmp(&ether_null, &conn_info->bssid, ETHER_ADDR_LEN))
+ memcpy(&join_params.params.bssid, &conn_info->bssid, ETH_ALEN);
+ else
+ memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+ wl_ext_ch_to_chanspec(ioctl_ver, conn_info->channel, &join_params, &join_params_size);
+ AEXT_TRACE(dev->name, "join_param_size %zu\n", join_params_size);
+
+ if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ AEXT_INFO(dev->name, "ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len);
+ }
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec), TRUE);
+ WL_MSG(dev->name,
+ "Connecting with %pM channel (%d) ssid \"%s\", len (%d), sec=%s\n\n",
+ &join_params.params.bssid, conn_info->channel,
+ join_params.ssid.SSID, join_params.ssid.SSID_len, sec);
+ err = wl_ext_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, 1);
+
+exit:
+#ifdef WL_EXT_IAPSTA
+ if (!err)
+ wl_ext_add_remove_pm_enable_work(dev, TRUE);
+#endif /* WL_EXT_IAPSTA */
+ if (iovar_buf)
+ kfree(iovar_buf);
+ if (ext_join_params)
+ kfree(ext_join_params);
+ return err;
+
+}
+
+void
+wl_ext_get_sec(struct net_device *dev, int ifmode, char *sec, int total_len, bool dump)
+{
+ int auth=0, wpa_auth=0, wsec=0, mfp=0, i;
+ int bytes_written=0;
+ bool match = FALSE;
+
+ memset(sec, 0, total_len);
+ wl_ext_iovar_getint(dev, "auth", &auth);
+ wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+ wl_ext_iovar_getint(dev, "wsec", &wsec);
+ wldev_iovar_getint(dev, "mfp", &mfp);
+
+#ifdef WL_EXT_IAPSTA
+ if (ifmode == IMESH_MODE) {
+ if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA_AUTH_DISABLED) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "open");
+ } else if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA2_AUTH_PSK) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "sae");
+ } else {
+ bytes_written += snprintf(sec+bytes_written, total_len, "%d/0x%x",
+ auth, wpa_auth);
+ }
+ } else
+#endif /* WL_EXT_IAPSTA */
+ {
+ match = FALSE;
+ for (i=0; i<sizeof(auth_name_map)/sizeof(auth_name_map[0]); i++) {
+ const auth_name_map_t* row = &auth_name_map[i];
+ if (row->auth == auth && row->wpa_auth == wpa_auth) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "%s",
+ row->auth_name);
+ match = TRUE;
+ break;
+ }
+ }
+ if (!match) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "%d/0x%x",
+ auth, wpa_auth);
+ }
+ }
+
+ if (mfp == WL_MFP_NONE) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/mfpn");
+ } else if (mfp == WL_MFP_CAPABLE) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/mfpc");
+ } else if (mfp == WL_MFP_REQUIRED) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/mfpr");
+ } else {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/%d", mfp);
+ }
+
+#ifdef WL_EXT_IAPSTA
+ if (ifmode == IMESH_MODE) {
+ if (wsec == WSEC_NONE) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/none");
+ } else {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/aes");
+ }
+ } else
+#endif /* WL_EXT_IAPSTA */
+ {
+ match = FALSE;
+ for (i=0; i<sizeof(wsec_name_map)/sizeof(wsec_name_map[0]); i++) {
+ const wsec_name_map_t* row = &wsec_name_map[i];
+ if (row->wsec == (wsec&0x7)) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/%s",
+ row->wsec_name);
+ match = TRUE;
+ break;
+ }
+ }
+ if (!match) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/0x%x", wsec);
+ }
+ }
+ if (dump) {
+ AEXT_INFO(dev->name, "auth/wpa_auth/mfp/wsec = %d/0x%x/%d/0x%x\n",
+ auth, wpa_auth, mfp, wsec);
+ }
+}
+
+bool
+wl_ext_dfs_chan(uint16 chan)
+{
+ if (chan >= 52 && chan <= 144)
+ return TRUE;
+ return FALSE;
+}
+
+uint16
+wl_ext_get_default_chan(struct net_device *dev,
+ uint16 *chan_2g, uint16 *chan_5g, bool nodfs)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ uint16 chan_tmp = 0, chan = 0;
+ wl_uint32_list_t *list;
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ s32 ret = BCME_OK;
+ int i;
+
+ *chan_2g = 0;
+ *chan_5g = 0;
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ ret = wl_ext_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
+ sizeof(valid_chan_list), 0);
+ if (ret == 0) {
+ for (i=0; i<dtoh32(list->count); i++) {
+ chan_tmp = dtoh32(list->element[i]);
+ if (!dhd_conf_match_channel(dhd, chan_tmp))
+ continue;
+ if (chan_tmp <= 13 && !*chan_2g) {
+ *chan_2g = chan_tmp;
+ } else if (chan_tmp >= 36 && chan_tmp <= 161 && !*chan_5g) {
+ if (wl_ext_dfs_chan(chan_tmp) && nodfs)
+ continue;
+ else
+ *chan_5g = chan_tmp;
+ }
+ }
+ }
+
+ return chan;
+}
+
+int
+wl_ext_set_scan_time(struct net_device *dev, int scan_time,
+ uint32 scan_get, uint32 scan_set)
+{
+ int ret, cur_scan_time;
+
+ ret = wl_ext_ioctl(dev, scan_get, &cur_scan_time, sizeof(cur_scan_time), 0);
+ if (ret)
+ return 0;
+
+ if (scan_time != cur_scan_time)
+ wl_ext_ioctl(dev, scan_set, &scan_time, sizeof(scan_time), 1);
+
+ return cur_scan_time;
+}
+
+static int
+wl_ext_wlmsglevel(struct net_device *dev, char *command, int total_len)
+{
+ int val = -1, ret = 0;
+ int bytes_written = 0;
+
+ sscanf(command, "%*s %x", &val);
+
+ if (val >=0) {
+ if (val & DHD_ANDROID_VAL) {
+ android_msg_level = (uint)(val & 0xFFFF);
+ WL_MSG(dev->name, "android_msg_level=0x%x\n", android_msg_level);
+ }
+#if defined(WL_WIRELESS_EXT)
+ else if (val & DHD_IW_VAL) {
+ iw_msg_level = (uint)(val & 0xFFFF);
+ WL_MSG(dev->name, "iw_msg_level=0x%x\n", iw_msg_level);
+ }
+#endif
+#ifdef WL_CFG80211
+ else if (val & DHD_CFG_VAL) {
+ wl_cfg80211_enable_trace((u32)(val & 0xFFFF));
+ }
+#endif
+ else if (val & DHD_CONFIG_VAL) {
+ config_msg_level = (uint)(val & 0xFFFF);
+ WL_MSG(dev->name, "config_msg_level=0x%x\n", config_msg_level);
+ }
+ else if (val & DHD_DUMP_VAL) {
+ dump_msg_level = (uint)(val & 0xFFFF);
+ WL_MSG(dev->name, "dump_msg_level=0x%x\n", dump_msg_level);
+ }
+ }
+ else {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "android_msg_level=0x%x", android_msg_level);
+#if defined(WL_WIRELESS_EXT)
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\niw_msg_level=0x%x", iw_msg_level);
+#endif
+#ifdef WL_CFG80211
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\nwl_dbg_level=0x%x", wl_dbg_level);
+#endif
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\nconfig_msg_level=0x%x", config_msg_level);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\ndump_msg_level=0x%x", dump_msg_level);
+ AEXT_INFO(dev->name, "%s\n", command);
+ ret = bytes_written;
+ }
+
+ return ret;
+}
+
+#ifdef WL_CFG80211
+bool
+wl_legacy_chip_check(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ uint chip;
+
+ chip = dhd_conf_get_chip(dhd);
+
+ if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
+ chip == BCM4334_CHIP_ID || chip == BCM43340_CHIP_ID ||
+ chip == BCM43341_CHIP_ID || chip == BCM4324_CHIP_ID ||
+ chip == BCM4335_CHIP_ID || chip == BCM4339_CHIP_ID ||
+ chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
+ chip == BCM4371_CHIP_ID ||
+ chip == BCM43430_CHIP_ID ||
+ chip == BCM4345_CHIP_ID || chip == BCM43454_CHIP_ID ||
+ chip == BCM4359_CHIP_ID ||
+ chip == BCM43143_CHIP_ID || chip == BCM43242_CHIP_ID ||
+ chip == BCM43569_CHIP_ID) {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+wl_new_chip_check(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ uint chip;
+
+ chip = dhd_conf_get_chip(dhd);
+
+ if (chip == BCM4359_CHIP_ID || chip == BCM43012_CHIP_ID ||
+ chip == BCM43751_CHIP_ID || chip == BCM43752_CHIP_ID) {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+wl_extsae_chip(struct dhd_pub *dhd)
+{
+ uint chip;
+
+ chip = dhd_conf_get_chip(dhd);
+
+ if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
+ chip == BCM4334_CHIP_ID || chip == BCM43340_CHIP_ID ||
+ chip == BCM43341_CHIP_ID || chip == BCM4324_CHIP_ID ||
+ chip == BCM4335_CHIP_ID || chip == BCM4339_CHIP_ID ||
+ chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
+ chip == BCM43143_CHIP_ID || chip == BCM43242_CHIP_ID ||
+ chip == BCM43569_CHIP_ID) {
+ return false;
+ }
+
+ return true;
+}
+#endif
+
+#ifdef WLEASYMESH
+#define CMD_EASYMESH "EASYMESH"
+//Set map 4 and dwds 1 on wlan0 interface
+#define EASYMESH_SLAVE "slave"
+#define EASYMESH_MASTER "master"
+
+static int
+wl_ext_easymesh(struct net_device *dev, char* command, int total_len)
+{
+ int ret = 0, wlc_down = 1, wlc_up = 1, map = 4, dwds = 1;
+
+ AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+ if (strncmp(command, EASYMESH_SLAVE, strlen(EASYMESH_SLAVE)) == 0) {
+ WL_MSG(dev->name, "try to set map %d, dwds %d\n", map, dwds);
+ ret = wl_ext_ioctl(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down), 1);
+ if (ret)
+ goto exit;
+ wl_ext_iovar_setint(dev, "map", map);
+ wl_ext_iovar_setint(dev, "dwds", dwds);
+ ret = wl_ext_ioctl(dev, WLC_UP, &wlc_up, sizeof(wlc_up), 1);
+ if (ret)
+ goto exit;
+ }
+ else if (strncmp(command, EASYMESH_MASTER, strlen(EASYMESH_MASTER)) == 0) {
+ map = dwds = 0;
+ WL_MSG(dev->name, "try to set map %d, dwds %d\n", map, dwds);
+ ret = wl_ext_ioctl(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down), 1);
+ if (ret) {
+ goto exit;
+ }
+ wl_ext_iovar_setint(dev, "map", map);
+ wl_ext_iovar_setint(dev, "dwds", dwds);
+ ret = wl_ext_ioctl(dev, WLC_UP, &wlc_up, sizeof(wlc_up), 1);
+ if (ret) {
+ goto exit;
+ }
+ }
+
+exit:
+ return ret;
+}
+#endif /* WLEASYMESH */
+
+int
+wl_ext_add_del_ie(struct net_device *dev, uint pktflag, char *ie_data, const char* add_del_cmd)
+{
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ int ie_data_len = 0, tot_len = 0, iecount;
+ int err = -1;
+
+ if (!strlen(ie_data)) {
+ AEXT_ERROR(dev->name, "wrong ie %s\n", ie_data);
+ goto exit;
+ }
+
+ tot_len = (int)(sizeof(vndr_ie_setbuf_t) + ((strlen(ie_data)-2)/2));
+ vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, GFP_KERNEL);
+ if (!vndr_ie) {
+ AEXT_ERROR(dev->name, "IE memory alloc failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strncpy(vndr_ie->cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* Set packet flag to indicate that BEACON's will contain this IE */
+ pktflag = htod32(pktflag);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+
+ /* Set the IE ID */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar)DOT11_MNG_VS_ID;
+
+ /* Set the IE LEN */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (strlen(ie_data)-2)/2;
+
+ /* Set the IE OUI and DATA */
+ ie_data_len = wl_pattern_atoh(ie_data,
+ (char *)vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui);
+ if (ie_data_len <= 0) {
+ AEXT_ERROR(dev->name, "wrong ie_data_len %d\n", (int)strlen(ie_data)-2);
+ goto exit;
+ }
+
+ err = wl_ext_iovar_setbuf(dev, "vndr_ie", vndr_ie, tot_len, iovar_buf,
+ sizeof(iovar_buf), NULL);
+
+exit:
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+ return err;
+}
+
+#ifdef IDHCP
+/*
+terence 20190409:
+dhd_priv wl dhcpc_dump
+dhd_priv wl dhcpc_param <client ip> <server ip> <lease time>
+*/
+static int
+wl_ext_dhcpc_dump(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = 0;
+ int bytes_written = 0;
+ uint32 ip_addr;
+ char buf[20]="";
+
+ if (!data) {
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "ipaddr %s ", buf);
+ }
+
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "mask %s ", buf);
+ }
+
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "gw %s ", buf);
+ }
+
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "dnsserv %s ", buf);
+ }
+
+ if (!bytes_written)
+ bytes_written = -1;
+
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ }
+
+ return bytes_written;
+}
+
+int
+wl_ext_dhcpc_param(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = -1, bytes_written = 0;
+ char ip_addr_str[20]="", ip_serv_str[20]="";
+ struct dhcpc_parameter dhcpc_param;
+ uint32 ip_addr, ip_serv, lease_time;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+
+ if (data) {
+ AEXT_TRACE(dev->name, "cmd %s", command);
+ sscanf(data, "%s %s %d", ip_addr_str, ip_serv_str, &lease_time);
+ AEXT_TRACE(dev->name, "ip_addr = %s, ip_serv = %s, lease_time = %d",
+ ip_addr_str, ip_serv_str, lease_time);
+
+ memset(&dhcpc_param, 0, sizeof(struct dhcpc_parameter));
+ if (!bcm_atoipv4(ip_addr_str, (struct ipv4_addr *)&ip_addr)) {
+ AEXT_ERROR(dev->name, "wrong ip_addr_str %s\n", ip_addr_str);
+ ret = -1;
+ goto exit;
+ }
+ dhcpc_param.ip_addr = ip_addr;
+
+ if (!bcm_atoipv4(ip_addr_str, (struct ipv4_addr *)&ip_serv)) {
+ AEXT_ERROR(dev->name, "wrong ip_addr_str %s\n", ip_addr_str);
+ ret = -1;
+ goto exit;
+ }
+ dhcpc_param.ip_serv = ip_serv;
+ dhcpc_param.lease_time = lease_time;
+ ret = wl_ext_iovar_setbuf(dev, "dhcpc_param", &dhcpc_param,
+ sizeof(struct dhcpc_parameter), iovar_buf, sizeof(iovar_buf), NULL);
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "dhcpc_param", &dhcpc_param,
+ sizeof(struct dhcpc_parameter), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&dhcpc_param.ip_addr, ip_addr_str);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "ip_addr %s\n", ip_addr_str);
+ bcm_ip_ntoa((struct ipv4_addr *)&dhcpc_param.ip_serv, ip_serv_str);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "ip_serv %s\n", ip_serv_str);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "lease_time %d\n", dhcpc_param.lease_time);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+
+ exit:
+ return ret;
+}
+#endif /* IDHCP */
+
+int
+wl_ext_mkeep_alive(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int ret = -1, i, ifidx, id, period=-1;
+ char *packet = NULL, *buf = NULL;
+ int bytes_written = 0;
+
+ if (data) {
+ buf = kmalloc(total_len, GFP_KERNEL);
+ if (buf == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ packet = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (packet == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ AEXT_TRACE(dev->name, "cmd %s", command);
+ sscanf(data, "%d %d %s", &id, &period, packet);
+ AEXT_TRACE(dev->name, "id=%d, period=%d, packet=%s", id, period, packet);
+ if (period >= 0) {
+ ifidx = dhd_net2idx(dhd->info, dev);
+ ret = dhd_conf_mkeep_alive(dhd, ifidx, id, period, packet, FALSE);
+ } else {
+ if (id < 0)
+ id = 0;
+ ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf,
+ total_len, NULL);
+ if (!ret) {
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf;
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "Id :%d\n"
+ "Period (msec) :%d\n"
+ "Length :%d\n"
+ "Packet :0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes));
+ for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%02x", mkeep_alive_pktp->data[i]);
+ }
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+ }
+
+exit:
+ if (buf)
+ kfree(buf);
+ if (packet)
+ kfree(packet);
+ return ret;
+}
+
+#ifdef WL_EXT_TCPKA
+static int
+wl_ext_tcpka_conn_add(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ tcpka_conn_t *tcpka = NULL;
+ uint32 sess_id = 0, ipid = 0, srcport = 0, dstport = 0, seq = 0, ack = 0,
+ tcpwin = 0, tsval = 0, tsecr = 0, len = 0, ka_payload_len = 0;
+ char dst_mac[ETHER_ADDR_STR_LEN], src_ip[IPV4_ADDR_STR_LEN],
+ dst_ip[IPV4_ADDR_STR_LEN], ka_payload[32];
+
+ if (data) {
+ memset(dst_mac, 0, sizeof(dst_mac));
+ memset(src_ip, 0, sizeof(src_ip));
+ memset(dst_ip, 0, sizeof(dst_ip));
+ memset(ka_payload, 0, sizeof(ka_payload));
+ sscanf(data, "%d %s %s %s %d %d %d %u %u %d %u %u %u %32s",
+ &sess_id, dst_mac, src_ip, dst_ip, &ipid, &srcport, &dstport, &seq,
+ &ack, &tcpwin, &tsval, &tsecr, &len, ka_payload);
+
+ ka_payload_len = strlen(ka_payload) / 2;
+ tcpka = kmalloc(sizeof(struct tcpka_conn) + ka_payload_len, GFP_KERNEL);
+ if (tcpka == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ sizeof(struct tcpka_conn) + ka_payload_len);
+ ret = -1;
+ goto exit;
+ }
+ memset(tcpka, 0, sizeof(struct tcpka_conn) + ka_payload_len);
+
+ tcpka->sess_id = sess_id;
+ if (!(ret = bcm_ether_atoe(dst_mac, &tcpka->dst_mac))) {
+ AEXT_ERROR(dev->name, "mac parsing err addr=%s\n", dst_mac);
+ ret = -1;
+ goto exit;
+ }
+ if (!bcm_atoipv4(src_ip, &tcpka->src_ip)) {
+ AEXT_ERROR(dev->name, "src_ip parsing err ip=%s\n", src_ip);
+ ret = -1;
+ goto exit;
+ }
+ if (!bcm_atoipv4(dst_ip, &tcpka->dst_ip)) {
+ AEXT_ERROR(dev->name, "dst_ip parsing err ip=%s\n", dst_ip);
+ ret = -1;
+ goto exit;
+ }
+ tcpka->ipid = ipid;
+ tcpka->srcport = srcport;
+ tcpka->dstport = dstport;
+ tcpka->seq = seq;
+ tcpka->ack = ack;
+ tcpka->tcpwin = tcpwin;
+ tcpka->tsval = tsval;
+ tcpka->tsecr = tsecr;
+ tcpka->len = len;
+ ka_payload_len = wl_pattern_atoh(ka_payload, (char *)tcpka->ka_payload);
+ if (ka_payload_len == -1) {
+ AEXT_ERROR(dev->name,"rejecting ka_payload=%s\n", ka_payload);
+ ret = -1;
+ goto exit;
+ }
+ tcpka->ka_payload_len = ka_payload_len;
+
+ AEXT_INFO(dev->name,
+ "tcpka_conn_add %d %pM %pM %pM %d %d %d %u %u %d %u %u %u %u \"%s\"\n",
+ tcpka->sess_id, &tcpka->dst_mac, &tcpka->src_ip, &tcpka->dst_ip,
+ tcpka->ipid, tcpka->srcport, tcpka->dstport, tcpka->seq,
+ tcpka->ack, tcpka->tcpwin, tcpka->tsval, tcpka->tsecr,
+ tcpka->len, tcpka->ka_payload_len, tcpka->ka_payload);
+
+ ret = wl_ext_iovar_setbuf(dev, "tcpka_conn_add", (char *)tcpka,
+ (sizeof(tcpka_conn_t) + tcpka->ka_payload_len - 1),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ }
+
+exit:
+ if (tcpka)
+ kfree(tcpka);
+ return ret;
+}
+
+static int
+wl_ext_tcpka_conn_enable(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ tcpka_conn_sess_t tcpka_conn;
+ int ret = 0;
+ uint32 sess_id = 0, flag, interval = 0, retry_interval = 0, retry_count = 0;
+
+ if (data) {
+ sscanf(data, "%d %d %d %d %d",
+ &sess_id, &flag, &interval, &retry_interval, &retry_count);
+ tcpka_conn.sess_id = sess_id;
+ tcpka_conn.flag = flag;
+ if (tcpka_conn.flag) {
+ tcpka_conn.tcpka_timers.interval = interval;
+ tcpka_conn.tcpka_timers.retry_interval = retry_interval;
+ tcpka_conn.tcpka_timers.retry_count = retry_count;
+ } else {
+ tcpka_conn.tcpka_timers.interval = 0;
+ tcpka_conn.tcpka_timers.retry_interval = 0;
+ tcpka_conn.tcpka_timers.retry_count = 0;
+ }
+
+ AEXT_INFO(dev->name, "tcpka_conn_enable %d %d %d %d %d\n",
+ tcpka_conn.sess_id, tcpka_conn.flag,
+ tcpka_conn.tcpka_timers.interval,
+ tcpka_conn.tcpka_timers.retry_interval,
+ tcpka_conn.tcpka_timers.retry_count);
+
+ ret = wl_ext_iovar_setbuf(dev, "tcpka_conn_enable", (char *)&tcpka_conn,
+ sizeof(tcpka_conn_sess_t), iovar_buf, sizeof(iovar_buf), NULL);
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_tcpka_conn_info(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ tcpka_conn_sess_info_t *info = NULL;
+ uint32 sess_id = 0;
+ int ret = 0, bytes_written = 0;
+
+ if (data) {
+ sscanf(data, "%d", &sess_id);
+ AEXT_INFO(dev->name, "tcpka_conn_sess_info %d\n", sess_id);
+ ret = wl_ext_iovar_getbuf(dev, "tcpka_conn_sess_info", (char *)&sess_id,
+ sizeof(uint32), iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ info = (tcpka_conn_sess_info_t *) iovar_buf;
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "id :%d\n"
+ "ipid :%d\n"
+ "seq :%u\n"
+ "ack :%u",
+ sess_id, info->ipid, info->seq, info->ack);
+ AEXT_INFO(dev->name, "%s\n", command);
+ ret = bytes_written;
+ }
+ }
+
+ return ret;
+}
+#endif /* WL_EXT_TCPKA */
+
+static int
+wl_ext_rsdb_mode(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_config_t rsdb_mode_cfg = {1, 0}, *rsdb_p;
+ int ret = 0;
+
+ if (data) {
+ rsdb_mode_cfg.config = (int)simple_strtol(data, NULL, 0);
+ ret = wl_ext_iovar_setbuf(dev, "rsdb_mode", (char *)&rsdb_mode_cfg,
+ sizeof(rsdb_mode_cfg), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ AEXT_INFO(dev->name, "rsdb_mode %d\n", rsdb_mode_cfg.config);
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "rsdb_mode", NULL, 0,
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (!ret) {
+ rsdb_p = (wl_config_t *) iovar_buf;
+ ret = snprintf(command, total_len, "%d", rsdb_p->config);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ }
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_recal(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = 0, i, nchan, nssid = 0;
+ int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
+ wl_scan_params_t *params = NULL;
+ int ioctl_ver;
+ char *p;
+
+ AEXT_TRACE(dev->name, "Enter\n");
+
+ if (data) {
+ params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+ params = (wl_scan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memset(params, 0, params_size);
+
+ wl_ext_get_ioctl_ver(dev, &ioctl_ver);
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+
+ params->scan_type |= WL_SCANFLAGS_PASSIVE;
+ nchan = 2;
+ params->channel_list[0] = wf_channel2chspec(1, WL_CHANSPEC_BW_20);
+ params->channel_list[1] = wf_channel2chspec(2, WL_CHANSPEC_BW_20);
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+
+ for (i = 0; i < nchan; i++) {
+ wl_ext_chspec_host_to_driver(ioctl_ver, params->channel_list[i]);
+ }
+
+ p = (char*)params->channel_list + nchan * sizeof(uint16);
+
+ params->channel_num = htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (nchan & WL_SCAN_PARAMS_COUNT_MASK));
+ params_size = p - (char*)params + nssid * sizeof(wlc_ssid_t);
+
+ AEXT_INFO(dev->name, "recal\n");
+ ret = wl_ext_ioctl(dev, WLC_SCAN, params, params_size, 1);
+ }
+
+exit:
+ if (params)
+ kfree(params);
+ return ret;
+}
+
+static s32
+wl_ext_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ s32 err = 0;
+
+ if (!ndev)
+ return -ENODEV;
+
+ /* Setup event_msgs */
+ err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
+ if (unlikely(err)) {
+ AEXT_ERROR(ndev->name, "Get event_msgs error (%d)\n", err);
+ goto eventmsg_out;
+ }
+ memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+ if (add) {
+ setbit(eventmask, event);
+ } else {
+ clrbit(eventmask, event);
+ }
+ err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf), NULL);
+ if (unlikely(err)) {
+ AEXT_ERROR(ndev->name, "Set event_msgs error (%d)\n", err);
+ goto eventmsg_out;
+ }
+
+eventmsg_out:
+ return err;
+}
+
+static int
+wl_ext_event_msg(struct net_device *dev, char *data,
+ char *command, int total_len)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ int i, bytes_written = 0, add = -1;
+ uint event;
+ char *vbuf;
+ bool skipzeros;
+
+ /* dhd_priv wl event_msg [offset] [1/0, 1 for add, 0 for remove] */
+ /* dhd_priv wl event_msg 40 1 */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%d %d", &event, &add);
+ /* Setup event_msgs */
+ bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
+ sizeof(iovbuf), NULL);
+ if (unlikely(bytes_written)) {
+ AEXT_ERROR(dev->name, "Get event_msgs error (%d)\n", bytes_written);
+ goto eventmsg_out;
+ }
+ memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+ if (add == -1) {
+ if (isset(eventmask, event))
+ bytes_written += snprintf(command+bytes_written, total_len, "1");
+ else
+ bytes_written += snprintf(command+bytes_written, total_len, "0");
+ AEXT_INFO(dev->name, "%s\n", command);
+ goto eventmsg_out;
+ }
+ bytes_written = wl_ext_add_remove_eventmsg(dev, event, add);
+ }
+ else {
+ /* Setup event_msgs */
+ bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
+ sizeof(iovbuf), NULL);
+ if (bytes_written) {
+ AEXT_ERROR(dev->name, "Get event_msgs error (%d)\n", bytes_written);
+ goto eventmsg_out;
+ }
+ vbuf = (char *)iovbuf;
+ bytes_written += snprintf(command+bytes_written, total_len, "0x");
+ for (i = (sizeof(eventmask) - 1); i >= 0; i--) {
+ if (vbuf[i] || (i == 0))
+ skipzeros = FALSE;
+ if (skipzeros)
+ continue;
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%02x", vbuf[i] & 0xff);
+ }
+ AEXT_INFO(dev->name, "%s\n", command);
+ }
+
+eventmsg_out:
+ return bytes_written;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+static int
+wl_ext_pkt_filter_add(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int i, filter_id, new_id = 0, cnt;
+ conf_pkt_filter_add_t *filter_add = &dhd->conf->pkt_filter_add;
+ char **pktfilter = dhd->pktfilter;
+ int err = 0;
+
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+
+ new_id = simple_strtol(data, NULL, 10);
+ if (new_id <= 0) {
+ AEXT_ERROR(dev->name, "wrong id %d\n", new_id);
+ return -1;
+ }
+
+ cnt = dhd->pktfilter_count;
+ for (i=0; i<cnt; i++) {
+ if (!pktfilter[i])
+ continue;
+ filter_id = simple_strtol(pktfilter[i], NULL, 10);
+ if (new_id == filter_id) {
+ AEXT_ERROR(dev->name, "filter id %d already in list\n", filter_id);
+ return -1;
+ }
+ }
+
+ cnt = filter_add->count;
+ if (cnt >= DHD_CONF_FILTER_MAX) {
+ AEXT_ERROR(dev->name, "not enough filter\n");
+ return -1;
+ }
+ for (i=0; i<cnt; i++) {
+ filter_id = simple_strtol(filter_add->filter[i], NULL, 10);
+ if (new_id == filter_id) {
+ AEXT_ERROR(dev->name, "filter id %d already in list\n", filter_id);
+ return -1;
+ }
+ }
+
+ strcpy(&filter_add->filter[cnt][0], data);
+ dhd->pktfilter[dhd->pktfilter_count] = filter_add->filter[cnt];
+ filter_add->count++;
+ dhd->pktfilter_count++;
+
+ dhd_pktfilter_offload_set(dhd, data);
+ AEXT_INFO(dev->name, "filter id %d added\n", new_id);
+ }
+
+ return err;
+}
+
+static int
+wl_ext_pkt_filter_delete(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int i, j, filter_id, cnt;
+ char **pktfilter = dhd->pktfilter;
+ conf_pkt_filter_add_t *filter_add = &dhd->conf->pkt_filter_add;
+ bool in_filter = FALSE;
+ int id, err = 0;
+
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ id = (int)simple_strtol(data, NULL, 0);
+
+ cnt = filter_add->count;
+ for (i=0; i<cnt; i++) {
+ filter_id = simple_strtol(filter_add->filter[i], NULL, 10);
+ if (id == filter_id) {
+ in_filter = TRUE;
+ memset(filter_add->filter[i], 0, PKT_FILTER_LEN);
+ for (j=i; j<(cnt-1); j++) {
+ strcpy(filter_add->filter[j], filter_add->filter[j+1]);
+ memset(filter_add->filter[j+1], 0, PKT_FILTER_LEN);
+ }
+ cnt--;
+ filter_add->count--;
+ dhd->pktfilter_count--;
+ }
+ }
+
+ cnt = dhd->pktfilter_count;
+ for (i=0; i<cnt; i++) {
+ if (!pktfilter[i])
+ continue;
+ filter_id = simple_strtol(pktfilter[i], NULL, 10);
+ if (id == filter_id) {
+ in_filter = TRUE;
+ memset(pktfilter[i], 0, strlen(pktfilter[i]));
+ }
+ }
+
+ if (in_filter) {
+ dhd_pktfilter_offload_delete(dhd, id);
+ AEXT_INFO(dev->name, "filter id %d deleted\n", id);
+ } else {
+ AEXT_ERROR(dev->name, "filter id %d not in list\n", id);
+ err = -1;
+ }
+ }
+
+ return err;
+}
+
+static int
+wl_ext_pkt_filter_enable(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int err = 0, id, enable;
+ int i, filter_id, cnt;
+ char **pktfilter = dhd->pktfilter;
+ bool in_filter = FALSE;
+
+ /* dhd_priv wl pkt_filter_enable [id] [1/0] */
+ /* dhd_priv wl pkt_filter_enable 141 1 */
+ if (data) {
+ sscanf(data, "%d %d", &id, &enable);
+
+ cnt = dhd->pktfilter_count;
+ for (i=0; i<cnt; i++) {
+ if (!pktfilter[i])
+ continue;
+ filter_id = simple_strtol(pktfilter[i], NULL, 10);
+ if (id == filter_id) {
+ in_filter = TRUE;
+ break;
+ }
+ }
+
+ if (in_filter) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ enable, dhd_master_mode);
+ AEXT_INFO(dev->name, "filter id %d %s\n", id, enable?"enabled":"disabled");
+ } else {
+ AEXT_ERROR(dev->name, "filter id %d not in list\n", id);
+ err = -1;
+ }
+ }
+
+ return err;
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+#ifdef SENDPROB
+static int
+wl_ext_send_probreq(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0;
+ char addr_str[16], addr[6];
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ char ie_data[WLC_IOCTL_SMLEN] = "\0";
+ wl_probe_params_t params;
+
+ /* dhd_priv wl send_probreq [dest. addr] [OUI+VAL] */
+ /* dhd_priv wl send_probreq 0x00904c010203 0x00904c01020304050607 */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%s %s", addr_str, ie_data);
+ AEXT_TRACE(dev->name, "addr=%s, ie=%s\n", addr_str, ie_data);
+
+ if (strlen(addr_str) != 14) {
+ AEXT_ERROR(dev->name, "wrong addr %s\n", addr_str);
+ goto exit;
+ }
+ wl_pattern_atoh(addr_str, (char *) addr);
+ memset(&params, 0, sizeof(params));
+ memcpy(&params.bssid, addr, ETHER_ADDR_LEN);
+ memcpy(&params.mac, addr, ETHER_ADDR_LEN);
+
+ err = wl_ext_add_del_ie(dev, VNDR_IE_PRBREQ_FLAG, ie_data, "add");
+ if (err)
+ goto exit;
+ err = wl_ext_iovar_setbuf(dev, "sendprb", (char *)&params, sizeof(params),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ OSL_SLEEP(100);
+ wl_ext_add_del_ie(dev, VNDR_IE_PRBREQ_FLAG, ie_data, "del");
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_ext_send_probresp(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0;
+ char addr_str[16], addr[6];
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ char ie_data[WLC_IOCTL_SMLEN] = "\0";
+
+ /* dhd_priv wl send_probresp [dest. addr] [OUI+VAL] */
+ /* dhd_priv wl send_probresp 0x00904c010203 0x00904c01020304050607 */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%s %s", addr_str, ie_data);
+ AEXT_TRACE(dev->name, "addr=%s, ie=%s\n", addr_str, ie_data);
+
+ if (strlen(addr_str) != 14) {
+ AEXT_ERROR(dev->name, "wrong addr %s\n", addr_str);
+ goto exit;
+ }
+ wl_pattern_atoh(addr_str, (char *) addr);
+
+ err = wl_ext_add_del_ie(dev, VNDR_IE_PRBRSP_FLAG, ie_data, "add");
+ if (err)
+ goto exit;
+ err = wl_ext_iovar_setbuf(dev, "send_probresp", addr, sizeof(addr),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ OSL_SLEEP(100);
+ wl_ext_add_del_ie(dev, VNDR_IE_PRBRSP_FLAG, ie_data, "del");
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_ext_recv_probreq(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0, enable = 0;
+ char cmd[32];
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+
+ /* enable:
+ 1. dhd_priv wl 86 0
+ 2. dhd_priv wl event_msg 44 1
+ disable:
+ 1. dhd_priv wl 86 2;
+ 2. dhd_priv wl event_msg 44 0
+ */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%d", &enable);
+ if (enable) {
+ strcpy(cmd, "wl 86 0");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl event_msg 44 1");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ dhd->recv_probereq = TRUE;
+ } else {
+ if (dhd->conf->pm) {
+ strcpy(cmd, "wl 86 2");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ }
+ strcpy(cmd, "wl event_msg 44 0");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ dhd->recv_probereq = FALSE;
+ }
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_ext_recv_probresp(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0, enable = 0;
+ char cmd[64];
+
+ /* enable:
+ 1. dhd_priv wl pkt_filter_add 150 0 0 0 0xFF 0x50
+ 2. dhd_priv wl pkt_filter_enable 150 1
+ 3. dhd_priv wl mpc 0
+ 4. dhd_priv wl 108 1
+ disable:
+ 1. dhd_priv wl 108 0
+ 2. dhd_priv wl mpc 1
+ 3. dhd_priv wl pkt_filter_disable 150 0
+ 4. dhd_priv pkt_filter_delete 150
+ */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%d", &enable);
+ if (enable) {
+ strcpy(cmd, "wl pkt_filter_add 150 0 0 0 0xFF 0x50");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl pkt_filter_enable 150 1");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl mpc 0");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl 108 1");
+ err= wl_ext_wl_iovar(dev, cmd, total_len);
+ } else {
+ strcpy(cmd, "wl 108 0");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ strcpy(cmd, "wl mpc 1");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ strcpy(cmd, "wl pkt_filter_enable 150 0");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ strcpy(cmd, "wl pkt_filter_delete 150");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ }
+ }
+
+exit:
+ return err;
+}
+#endif /* SENDPROB */
+
+#if defined(USE_IW)
+static int
+wl_ext_gtk_key_info(struct net_device *dev, char *data, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int err = 0;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ gtk_keyinfo_t keyinfo;
+ bcol_gtk_para_t bcol_keyinfo;
+
+ /* wl gtk_key_info [kck kek replay_ctr] */
+ /* wl gtk_key_info 001122..FF001122..FF00000000000001 */
+ if (data) {
+ if (!dhd->conf->rekey_offload) {
+ AEXT_INFO(dev->name, "rekey_offload disabled\n");
+ return BCME_UNSUPPORTED;
+ }
+
+ memset(&bcol_keyinfo, 0, sizeof(bcol_keyinfo));
+ bcol_keyinfo.enable = 1;
+ bcol_keyinfo.ptk_len = 64;
+ memcpy(&bcol_keyinfo.ptk, data, RSN_KCK_LENGTH+RSN_KEK_LENGTH);
+ err = wl_ext_iovar_setbuf(dev, "bcol_gtk_rekey_ptk", &bcol_keyinfo,
+ sizeof(bcol_keyinfo), iovar_buf, sizeof(iovar_buf), NULL);
+ if (!err) {
+ goto exit;
+ }
+
+ memset(&keyinfo, 0, sizeof(keyinfo));
+ memcpy(&keyinfo, data, RSN_KCK_LENGTH+RSN_KEK_LENGTH+RSN_REPLAY_LEN);
+ err = wl_ext_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (err) {
+ AEXT_ERROR(dev->name, "failed to set gtk_key_info\n");
+ return err;
+ }
+ }
+
+exit:
+ if (android_msg_level & ANDROID_INFO_LEVEL) {
+ prhex("kck", (uchar *)keyinfo.KCK, RSN_KCK_LENGTH);
+ prhex("kek", (uchar *)keyinfo.KEK, RSN_KEK_LENGTH);
+ prhex("replay_ctr", (uchar *)keyinfo.ReplayCounter, RSN_REPLAY_LEN);
+ }
+ return err;
+}
+#endif /* USE_IW */
+
+#ifdef WL_EXT_WOWL
+static int
+wl_ext_wowl_pattern(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ uint buf_len = 0;
+ int offset;
+ char mask[128]="\0", pattern[128]="\0", add[4]="\0",
+ mask_tmp[128], *pmask_tmp;
+ uint32 masksize, patternsize, pad_len = 0;
+ wl_wowl_pattern2_t *wowl_pattern2 = NULL;
+ wl_wowl_pattern_t *wowl_pattern = NULL;
+ char *mask_and_pattern;
+ wl_wowl_pattern_list_t *list;
+ uint8 *ptr;
+ int ret = 0, i, j, v;
+
+ if (data) {
+ sscanf(data, "%s %d %s %s", add, &offset, mask_tmp, pattern);
+ if (strcmp(add, "add") != 0 && strcmp(add, "clr") != 0) {
+ AEXT_ERROR(dev->name, "first arg should be add or clr\n");
+ goto exit;
+ }
+ if (!strcmp(add, "clr")) {
+ AEXT_INFO(dev->name, "wowl_pattern clr\n");
+ ret = wl_ext_iovar_setbuf(dev, "wowl_pattern", add,
+ sizeof(add), iovar_buf, sizeof(iovar_buf), NULL);
+ goto exit;
+ }
+ masksize = strlen(mask_tmp) -2;
+ AEXT_TRACE(dev->name, "0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // add pading
+ if (masksize % 16)
+ pad_len = (16 - masksize % 16);
+ for (i=0; i<pad_len; i++)
+ strcat(mask_tmp, "0");
+ masksize += pad_len;
+ AEXT_TRACE(dev->name, "1 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // translate 0x00 to 0, others to 1
+ j = 0;
+ pmask_tmp = &mask_tmp[2];
+ for (i=0; i<masksize/2; i++) {
+ if(strncmp(&pmask_tmp[i*2], "00", 2))
+ pmask_tmp[j] = '1';
+ else
+ pmask_tmp[j] = '0';
+ j++;
+ }
+ pmask_tmp[j] = '\0';
+ masksize = masksize / 2;
+ AEXT_TRACE(dev->name, "2 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // reorder per 8bits
+ pmask_tmp = &mask_tmp[2];
+ for (i=0; i<masksize/8; i++) {
+ char c;
+ for (j=0; j<4; j++) {
+ c = pmask_tmp[i*8+j];
+ pmask_tmp[i*8+j] = pmask_tmp[(i+1)*8-j-1];
+ pmask_tmp[(i+1)*8-j-1] = c;
+ }
+ }
+ AEXT_TRACE(dev->name, "3 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // translate 8bits to 1byte
+ j = 0; v = 0;
+ pmask_tmp = &mask_tmp[2];
+ strcpy(mask, "0x");
+ for (i=0; i<masksize; i++) {
+ v = (v<<1) | (pmask_tmp[i]=='1');
+ if (((i+1)%4) == 0) {
+ if (v < 10)
+ mask[j+2] = v + '0';
+ else
+ mask[j+2] = (v-10) + 'a';
+ j++;
+ v = 0;
+ }
+ }
+ mask[j+2] = '\0';
+ masksize = j/2;
+ AEXT_TRACE(dev->name, "4 mask=%s, masksize=%d\n", mask, masksize);
+
+ patternsize = (strlen(pattern)-2)/2;
+ buf_len = sizeof(wl_wowl_pattern2_t) + patternsize + masksize;
+ wowl_pattern2 = kmalloc(buf_len, GFP_KERNEL);
+ if (wowl_pattern2 == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", buf_len);
+ goto exit;
+ }
+ memset(wowl_pattern2, 0, sizeof(wl_wowl_pattern2_t));
+
+ strncpy(wowl_pattern2->cmd, add, sizeof(add));
+ wowl_pattern2->wowl_pattern.type = 0;
+ wowl_pattern2->wowl_pattern.offset = offset;
+ mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t);
+
+ wowl_pattern2->wowl_pattern.masksize = masksize;
+ ret = wl_pattern_atoh(mask, mask_and_pattern);
+ if (ret == -1) {
+ AEXT_ERROR(dev->name, "rejecting mask=%s\n", mask);
+ goto exit;
+ }
+
+ mask_and_pattern += wowl_pattern2->wowl_pattern.masksize;
+ wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) +
+ wowl_pattern2->wowl_pattern.masksize;
+
+ wowl_pattern2->wowl_pattern.patternsize = patternsize;
+ ret = wl_pattern_atoh(pattern, mask_and_pattern);
+ if (ret == -1) {
+ AEXT_ERROR(dev->name, "rejecting pattern=%s\n", pattern);
+ goto exit;
+ }
+
+ AEXT_INFO(dev->name, "%s %d %s %s\n", add, offset, mask, pattern);
+
+ ret = wl_ext_iovar_setbuf(dev, "wowl_pattern", (char *)wowl_pattern2,
+ buf_len, iovar_buf, sizeof(iovar_buf), NULL);
+ }
+ else {
+ ret = wl_ext_iovar_getbuf(dev, "wowl_pattern", NULL, 0,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ list = (wl_wowl_pattern_list_t *)iovar_buf;
+ ret = snprintf(command, total_len, "#of patterns :%d\n", list->count);
+ ptr = (uint8 *)list->pattern;
+ for (i=0; i<list->count; i++) {
+ uint8 *pattern;
+ wowl_pattern = (wl_wowl_pattern_t *)ptr;
+ ret += snprintf(command+ret, total_len,
+ "Pattern %d:\n"
+ "ID :0x%x\n"
+ "Offset :%d\n"
+ "Masksize :%d\n"
+ "Mask :0x",
+ i+1, (uint32)wowl_pattern->id, wowl_pattern->offset,
+ wowl_pattern->masksize);
+ pattern = ((uint8 *)wowl_pattern + sizeof(wl_wowl_pattern_t));
+ for (j = 0; j < wowl_pattern->masksize; j++) {
+ ret += snprintf(command+ret, total_len, "%02x", pattern[j]);
+ }
+ ret += snprintf(command+ret, total_len, "\n");
+ ret += snprintf(command+ret, total_len,
+ "PatternSize:%d\n"
+ "Pattern :0x",
+ wowl_pattern->patternsize);
+
+ pattern = ((uint8*)wowl_pattern + wowl_pattern->patternoffset);
+ for (j=0; j<wowl_pattern->patternsize; j++)
+ ret += snprintf(command+ret, total_len, "%02x", pattern[j]);
+ ret += snprintf(command+ret, total_len, "\n");
+ ptr += (wowl_pattern->masksize + wowl_pattern->patternsize +
+ sizeof(wl_wowl_pattern_t));
+ }
+
+ AEXT_INFO(dev->name, "%s\n", command);
+ }
+ }
+
+exit:
+ if (wowl_pattern2)
+ kfree(wowl_pattern2);
+ return ret;
+}
+
+static int
+wl_ext_wowl_wakeind(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_wowl_wakeind_t *wake = NULL;
+ int ret = -1;
+ char clr[6]="\0";
+
+ if (data) {
+ sscanf(data, "%s", clr);
+ if (!strcmp(clr, "clear")) {
+ AEXT_INFO(dev->name, "wowl_wakeind clear\n");
+ ret = wl_ext_iovar_setbuf(dev, "wowl_wakeind", clr, sizeof(clr),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ } else {
+ AEXT_ERROR(dev->name, "first arg should be clear\n");
+ }
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "wowl_wakeind", NULL, 0,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ wake = (wl_wowl_wakeind_t *) iovar_buf;
+ ret = snprintf(command, total_len, "wakeind=0x%x", wake->ucode_wakeind);
+ if (wake->ucode_wakeind & WL_WOWL_MAGIC)
+ ret += snprintf(command+ret, total_len, " (MAGIC packet)");
+ if (wake->ucode_wakeind & WL_WOWL_NET)
+ ret += snprintf(command+ret, total_len, " (Netpattern)");
+ if (wake->ucode_wakeind & WL_WOWL_DIS)
+ ret += snprintf(command+ret, total_len, " (Disassoc/Deauth)");
+ if (wake->ucode_wakeind & WL_WOWL_BCN)
+ ret += snprintf(command+ret, total_len, " (Loss of beacon)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME)
+ ret += snprintf(command+ret, total_len, " (TCPKA timeout)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA)
+ ret += snprintf(command+ret, total_len, " (TCPKA data)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPFIN)
+ ret += snprintf(command+ret, total_len, " (TCP FIN)");
+ AEXT_INFO(dev->name, "%s\n", command);
+ }
+ }
+
+ return ret;
+}
+#endif /* WL_EXT_WOWL */
+
+#ifdef WL_GPIO_NOTIFY
+typedef struct notify_payload {
+ int index;
+ int len;
+ char payload[128];
+} notify_payload_t;
+
+static int
+wl_ext_gpio_notify(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ notify_payload_t notify, *pnotify = NULL;
+ int i, ret = 0, bytes_written = 0;
+ char frame_str[WLC_IOCTL_SMLEN+3];
+
+ if (data) {
+ memset(&notify, 0, sizeof(notify));
+ memset(frame_str, 0, sizeof(frame_str));
+ sscanf(data, "%d %s", &notify.index, frame_str);
+
+ if (notify.index < 0)
+ notify.index = 0;
+
+ if (strlen(frame_str)) {
+ notify.len = wl_pattern_atoh(frame_str, notify.payload);
+ if (notify.len == -1) {
+ AEXT_ERROR(dev->name, "rejecting pattern=%s\n", frame_str);
+ goto exit;
+ }
+ AEXT_INFO(dev->name, "index=%d, len=%d\n", notify.index, notify.len);
+ if (android_msg_level & ANDROID_INFO_LEVEL)
+ prhex("payload", (uchar *)notify.payload, notify.len);
+ ret = wl_ext_iovar_setbuf(dev, "bcol_gpio_noti", (char *)&notify,
+ sizeof(notify), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ } else {
+ AEXT_INFO(dev->name, "index=%d\n", notify.index);
+ ret = wl_ext_iovar_getbuf(dev, "bcol_gpio_noti", &notify.index,
+ sizeof(notify.index), iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ pnotify = (notify_payload_t *)iovar_buf;
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "Id :%d\n"
+ "Packet :0x",
+ pnotify->index);
+ for (i=0; i<pnotify->len; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%02x", pnotify->payload[i]);
+ }
+ AEXT_TRACE(dev->name, "command result is\n%s\n", command);
+ ret = bytes_written;
+ }
+ }
+ }
+
+exit:
+ return ret;
+}
+#endif /* WL_GPIO_NOTIFY */
+
+#ifdef CSI_SUPPORT
+typedef struct csi_config {
+ /* Peer device mac address. */
+ struct ether_addr addr;
+ /* BW to be used in the measurements. This needs to be supported both by the */
+ /* device itself and the peer. */
+ uint32 bw;
+ /* Time interval between measurements (units: 1 ms). */
+ uint32 period;
+ /* CSI method */
+ uint32 method;
+} csi_config_t;
+
+typedef struct csi_list {
+ uint32 cnt;
+ csi_config_t configs[1];
+} csi_list_t;
+
+static int
+wl_ether_atoe(const char *a, struct ether_addr *n)
+{
+ char *c = NULL;
+ int i = 0;
+
+ memset(n, 0, ETHER_ADDR_LEN);
+ for (;;) {
+ n->octet[i++] = (uint8)strtoul(a, &c, 16);
+ if (!*c++ || i == ETHER_ADDR_LEN)
+ break;
+ a = c;
+ }
+ return (i == ETHER_ADDR_LEN);
+}
+
+static int
+wl_ext_csi(struct net_device *dev, char *data, char *command, int total_len)
+{
+ csi_config_t csi, *csip;
+ csi_list_t *csi_list;
+ int ret = -1, period=-1, i;
+ char mac[32], *buf = NULL;
+ struct ether_addr ea;
+ int bytes_written = 0;
+
+ buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (buf == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ memset(buf, 0, WLC_IOCTL_SMLEN);
+
+ if (data) {
+ sscanf(data, "%s %d", mac, &period);
+ ret = wl_ether_atoe(mac, &ea);
+ if (!ret) {
+ AEXT_ERROR(dev->name, "rejecting mac=%s, ret=%d\n", mac, ret);
+ goto exit;
+ }
+ AEXT_TRACE(dev->name, "mac=%pM, period=%d", &ea, period);
+ if (period > 0) {
+ memset(&csi, 0, sizeof(csi_config_t));
+ bcopy(&ea, &csi.addr, ETHER_ADDR_LEN);
+ csi.period = period;
+ ret = wl_ext_iovar_setbuf(dev, "csi", (char *)&csi, sizeof(csi),
+ buf, WLC_IOCTL_SMLEN, NULL);
+ } else if (period == 0) {
+ memset(&csi, 0, sizeof(csi_config_t));
+ bcopy(&ea, &csi.addr, ETHER_ADDR_LEN);
+ ret = wl_ext_iovar_setbuf(dev, "csi_del", (char *)&csi, sizeof(csi),
+ buf, WLC_IOCTL_SMLEN, NULL);
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "csi", &ea, ETHER_ADDR_LEN, buf,
+ WLC_IOCTL_SMLEN, NULL);
+ if (!ret) {
+ csip = (csi_config_t *) buf;
+ /* Dump all lists */
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "Mac :%pM\n"
+ "Period :%d\n"
+ "BW :%d\n"
+ "Method :%d\n",
+ &csip->addr, csip->period, csip->bw, csip->method);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+ }
+ else {
+ ret = wl_ext_iovar_getbuf(dev, "csi_list", NULL, 0, buf, WLC_IOCTL_SMLEN, NULL);
+ if (!ret) {
+ csi_list = (csi_list_t *)buf;
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "Total number :%d\n", csi_list->cnt);
+ for (i=0; i<csi_list->cnt; i++) {
+ csip = &csi_list->configs[i];
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "Idx :%d\n"
+ "Mac :%pM\n"
+ "Period :%d\n"
+ "BW :%d\n"
+ "Method :%d\n\n",
+ i+1, &csip->addr, csip->period, csip->bw, csip->method);
+ }
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+
+exit:
+ if (buf)
+ kfree(buf);
+ return ret;
+}
+#endif /* CSI_SUPPORT */
+
+static int
+wl_ext_get_country(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_country_t cspec = {{0}, 0, {0}};
+ int bytes_written = 0, ret = 0;
+
+ if (data) {
+ char *country_code = data;
+ char *rev_info_delim = country_code + 2; /* 2 bytes of country code */
+ int revinfo = 0;
+ if ((rev_info_delim) &&
+ (strnicmp(rev_info_delim, "/", strlen("/")) == 0) && (rev_info_delim + 1)) {
+ revinfo = bcm_atoi(rev_info_delim + 1);
+ }
+#ifdef WL_CFG80211
+ bytes_written = wl_cfg80211_set_country_code(dev, country_code,
+ true, true, revinfo);
+#else
+ bytes_written = wldev_set_country(dev, country_code, true, true, revinfo);
+#endif /* WL_CFG80211 */
+ } else {
+ ret = dhd_conf_get_country(dhd, &cspec);
+ if (!ret) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%s/%d", cspec.ccode, cspec.rev);
+ }
+ if (!bytes_written)
+ bytes_written = -1;
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ }
+
+ return bytes_written;
+}
+
+typedef int (wl_ext_tpl_parse_t)(struct net_device *dev, char *data, char *command,
+ int total_len);
+
+typedef struct wl_ext_iovar_tpl_t {
+ int get;
+ int set;
+ char *name;
+ wl_ext_tpl_parse_t *parse;
+} wl_ext_iovar_tpl_t;
+
+const wl_ext_iovar_tpl_t wl_ext_iovar_tpl_list[] = {
+ {WLC_GET_VAR, WLC_SET_VAR, "event_msg", wl_ext_event_msg},
+#if defined(USE_IW)
+ {WLC_GET_VAR, WLC_SET_VAR, "gtk_key_info", wl_ext_gtk_key_info},
+#endif /* USE_IW */
+ {WLC_GET_VAR, WLC_SET_VAR, "recal", wl_ext_recal},
+ {WLC_GET_VAR, WLC_SET_VAR, "rsdb_mode", wl_ext_rsdb_mode},
+ {WLC_GET_VAR, WLC_SET_VAR, "mkeep_alive", wl_ext_mkeep_alive},
+#ifdef PKT_FILTER_SUPPORT
+ {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_add", wl_ext_pkt_filter_add},
+ {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_delete", wl_ext_pkt_filter_delete},
+ {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_enable", wl_ext_pkt_filter_enable},
+#endif /* PKT_FILTER_SUPPORT */
+#if defined(WL_EXT_IAPSTA) && defined(WLMESH)
+ {WLC_GET_VAR, WLC_SET_VAR, "mesh_peer_status", wl_ext_mesh_peer_status},
+#endif /* WL_EXT_IAPSTA && WLMESH */
+#ifdef SENDPROB
+ {WLC_GET_VAR, WLC_SET_VAR, "send_probreq", wl_ext_send_probreq},
+ {WLC_GET_VAR, WLC_SET_VAR, "send_probresp", wl_ext_send_probresp},
+ {WLC_GET_VAR, WLC_SET_VAR, "recv_probreq", wl_ext_recv_probreq},
+ {WLC_GET_VAR, WLC_SET_VAR, "recv_probresp", wl_ext_recv_probresp},
+#endif /* SENDPROB */
+#ifdef WL_EXT_TCPKA
+ {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_add", wl_ext_tcpka_conn_add},
+ {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_enable", wl_ext_tcpka_conn_enable},
+ {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_sess_info", wl_ext_tcpka_conn_info},
+#endif /* WL_EXT_TCPKA */
+#ifdef WL_EXT_WOWL
+ {WLC_GET_VAR, WLC_SET_VAR, "wowl_pattern", wl_ext_wowl_pattern},
+ {WLC_GET_VAR, WLC_SET_VAR, "wowl_wakeind", wl_ext_wowl_wakeind},
+#endif /* WL_EXT_WOWL */
+#ifdef IDHCP
+ {WLC_GET_VAR, WLC_SET_VAR, "dhcpc_dump", wl_ext_dhcpc_dump},
+ {WLC_GET_VAR, WLC_SET_VAR, "dhcpc_param", wl_ext_dhcpc_param},
+#endif /* IDHCP */
+#ifdef WL_GPIO_NOTIFY
+ {WLC_GET_VAR, WLC_SET_VAR, "bcol_gpio_noti", wl_ext_gpio_notify},
+#endif /* WL_GPIO_NOTIFY */
+#ifdef CSI_SUPPORT
+ {WLC_GET_VAR, WLC_SET_VAR, "csi", wl_ext_csi},
+#endif /* CSI_SUPPORT */
+ {WLC_GET_VAR, WLC_SET_VAR, "country", wl_ext_get_country},
+};
+
+/*
+Ex: dhd_priv wl [cmd] [val]
+ dhd_priv wl 85
+ dhd_priv wl 86 1
+ dhd_priv wl mpc
+ dhd_priv wl mpc 1
+*/
+static int
+wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len)
+{
+ int cmd, val, ret = -1, i;
+ char name[32], *pch, *pick_tmp, *data;
+ int bytes_written=-1;
+ const wl_ext_iovar_tpl_t *tpl = wl_ext_iovar_tpl_list;
+ int tpl_count = ARRAY_SIZE(wl_ext_iovar_tpl_list);
+ char *pEnd;
+
+ AEXT_TRACE(dev->name, "cmd %s\n", command);
+ pick_tmp = command;
+
+ pch = bcmstrtok(&pick_tmp, " ", 0); // pick wl
+ if (!pch || strncmp(pch, "wl", 2))
+ goto exit;
+
+ pch = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
+ if (!pch)
+ goto exit;
+
+ memset(name, 0 , sizeof (name));
+ cmd = bcm_strtoul(pch, &pEnd, 0);
+ if (cmd == 0 || strlen(pEnd)) {
+ strcpy(name, pch);
+ }
+ data = bcmstrtok(&pick_tmp, "", 0); // pick data
+ if (data && (cmd == 0|| strlen(pEnd))) {
+ cmd = WLC_SET_VAR;
+ } else if (cmd == 0|| strlen(pEnd)) {
+ cmd = WLC_GET_VAR;
+ }
+
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if ((tpl->get == cmd || tpl->set == cmd) && !strcmp(tpl->name, name))
+ break;
+ }
+ if (i < tpl_count && tpl->parse) {
+ ret = tpl->parse(dev, data, command, total_len);
+ } else {
+ if (cmd == WLC_SET_VAR) {
+ val = (int)simple_strtol(data, NULL, 0);
+ AEXT_INFO(dev->name, "set %s %d\n", name, val);
+ ret = wl_ext_iovar_setint(dev, name, val);
+ } else if (cmd == WLC_GET_VAR) {
+ AEXT_INFO(dev->name, "get %s\n", name);
+ ret = wl_ext_iovar_getint(dev, name, &val);
+ if (!ret) {
+ bytes_written = snprintf(command, total_len, "%d", val);
+ AEXT_INFO(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ } else if (data) {
+ val = (int)simple_strtol(data, NULL, 0);
+ AEXT_INFO(dev->name, "set %d %d\n", cmd, val);
+ ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), TRUE);
+ } else {
+ AEXT_INFO(dev->name, "get %d\n", cmd);
+ ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), FALSE);
+ if (!ret) {
+ bytes_written = snprintf(command, total_len, "%d", val);
+ AEXT_INFO(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
+ }
+
+exit:
+ return ret;
+}
+
+int
+wl_ext_conf_iovar(struct net_device *dev, char *command, int total_len)
+{
+ int ret = 0;
+ char name[32], *pch, *pick_tmp, *data;
+ int bytes_written=-1;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+
+ AEXT_TRACE(dev->name, "cmd %s\n", command);
+ pick_tmp = command;
+
+ pch = bcmstrtok(&pick_tmp, " ", 0); // pick conf
+ if (!pch || strncmp(pch, "conf", 4))
+ goto exit;
+
+ pch = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
+ if (!pch)
+ goto exit;
+
+ strncpy(name, pch, sizeof(name));
+
+ data = bcmstrtok(&pick_tmp, "", 0); // pick data
+
+ if (!strcmp(name, "pm")) {
+ if (data) {
+ dhd->conf->pm = simple_strtol(data, NULL, 0);
+ ret = 0;
+ } else {
+ bytes_written = snprintf(command, total_len, "%d", dhd->conf->pm);
+ ret = bytes_written;
+ }
+ } else {
+ AEXT_ERROR(dev->name, "no config parameter found\n");
+ }
+
+exit:
+ return ret;
+}
+
+int
+wl_android_ext_priv_cmd(struct net_device *net, char *command,
+ int total_len, int *bytes_written)
+{
+ int ret = 0;
+
+ if (strnicmp(command, CMD_CHANNELS, strlen(CMD_CHANNELS)) == 0) {
+ *bytes_written = wl_ext_channels(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_CHANNEL, strlen(CMD_CHANNEL)) == 0) {
+ *bytes_written = wl_ext_channel(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ROAM_TRIGGER, strlen(CMD_ROAM_TRIGGER)) == 0) {
+ *bytes_written = wl_ext_roam_trigger(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_PM, strlen(CMD_PM)) == 0) {
+ *bytes_written = wl_ext_pm(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_MONITOR, strlen(CMD_MONITOR)) == 0) {
+ *bytes_written = wl_ext_monitor(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_SET_SUSPEND_BCN_LI_DTIM, strlen(CMD_SET_SUSPEND_BCN_LI_DTIM)) == 0) {
+ int bcn_li_dtim;
+ bcn_li_dtim = (int)simple_strtol((command + strlen(CMD_SET_SUSPEND_BCN_LI_DTIM) + 1), NULL, 10);
+ *bytes_written = net_os_set_suspend_bcn_li_dtim(net, bcn_li_dtim);
+ }
+#ifdef WL_EXT_IAPSTA
+ else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0 ||
+ strnicmp(command, CMD_ISAM_INIT, strlen(CMD_ISAM_INIT)) == 0) {
+ *bytes_written = wl_ext_isam_init(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0 ||
+ strnicmp(command, CMD_ISAM_CONFIG, strlen(CMD_ISAM_CONFIG)) == 0) {
+ *bytes_written = wl_ext_iapsta_config(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0 ||
+ strnicmp(command, CMD_ISAM_ENABLE, strlen(CMD_ISAM_ENABLE)) == 0) {
+ *bytes_written = wl_ext_iapsta_enable(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0 ||
+ strnicmp(command, CMD_ISAM_DISABLE, strlen(CMD_ISAM_DISABLE)) == 0) {
+ *bytes_written = wl_ext_iapsta_disable(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ISAM_STATUS, strlen(CMD_ISAM_STATUS)) == 0) {
+ *bytes_written = wl_ext_isam_status(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ISAM_PARAM, strlen(CMD_ISAM_PARAM)) == 0) {
+ *bytes_written = wl_ext_isam_param(net, command, total_len);
+ }
+#if defined(WLMESH) && defined(WL_ESCAN)
+ else if (strnicmp(command, CMD_ISAM_PEER_PATH, strlen(CMD_ISAM_PEER_PATH)) == 0) {
+ *bytes_written = wl_ext_isam_peer_path(net, command, total_len);
+ }
+#endif /* WLMESH && WL_ESCAN */
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
+ *bytes_written = wl_cfg80211_autochannel(net, command, total_len);
+ }
+#endif /* WL_CFG80211 */
+#if defined(WL_WIRELESS_EXT) && defined(WL_ESCAN)
+ else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
+ *bytes_written = wl_iw_autochannel(net, command, total_len);
+ }
+#endif /* WL_WIRELESS_EXT && WL_ESCAN */
+ else if (strnicmp(command, CMD_WLMSGLEVEL, strlen(CMD_WLMSGLEVEL)) == 0) {
+ *bytes_written = wl_ext_wlmsglevel(net, command, total_len);
+ }
+#ifdef WLEASYMESH
+ else if (strnicmp(command, CMD_EASYMESH, strlen(CMD_EASYMESH)) == 0) {
+ int skip = strlen(CMD_EASYMESH) + 1;
+ *bytes_written = wl_ext_easymesh(net, command+skip, total_len);
+ }
+#endif /* WLEASYMESH */
+#if defined(PKT_STATICS) && defined(BCMSDIO)
+ else if (strnicmp(command, CMD_DUMP_PKT_STATICS, strlen(CMD_DUMP_PKT_STATICS)) == 0) {
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ dhd_bus_dump_txpktstatics(dhd);
+ }
+ else if (strnicmp(command, CMD_CLEAR_PKT_STATICS, strlen(CMD_CLEAR_PKT_STATICS)) == 0) {
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ dhd_bus_clear_txpktstatics(dhd);
+ }
+#endif /* PKT_STATICS && BCMSDIO */
+ else if (strnicmp(command, CMD_WL, strlen(CMD_WL)) == 0) {
+ *bytes_written = wl_ext_wl_iovar(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_CONF, strlen(CMD_CONF)) == 0) {
+ *bytes_written = wl_ext_conf_iovar(net, command, total_len);
+ }
+ else
+ ret = -1;
+
+ return ret;
+}
+
+#if defined(WL_CFG80211) || defined(WL_ESCAN)
+int
+wl_ext_get_distance(struct net_device *net, u32 band)
+{
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 bw_cap = 0, distance = 0;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+ char buf[WLC_IOCTL_SMLEN]="\0";
+ s32 err = BCME_OK;
+
+ param.band = band;
+ err = wl_ext_iovar_getbuf(net, "bw_cap", &param, sizeof(param), buf,
+ sizeof(buf), NULL);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ AEXT_ERROR(net->name, "bw_cap failed, %d\n", err);
+ return err;
+ } else {
+ err = wl_ext_iovar_getint(net, "mimo_bw_cap", &bw_cap);
+ if (bw_cap != WLC_N_BW_20ALL)
+ bw = WL_CHANSPEC_BW_40;
+ }
+ } else {
+ if (WL_BW_CAP_80MHZ(buf[0]))
+ bw = WL_CHANSPEC_BW_80;
+ else if (WL_BW_CAP_40MHZ(buf[0]))
+ bw = WL_CHANSPEC_BW_40;
+ else
+ bw = WL_CHANSPEC_BW_20;
+ }
+
+ if (bw == WL_CHANSPEC_BW_20)
+ distance = 2;
+ else if (bw == WL_CHANSPEC_BW_40)
+ distance = 4;
+ else if (bw == WL_CHANSPEC_BW_80)
+ distance = 8;
+ else
+ distance = 16;
+ AEXT_INFO(net->name, "bw=0x%x, distance=%d\n", bw, distance);
+
+ return distance;
+}
+
+int
+wl_ext_get_best_channel(struct net_device *net,
+#if defined(BSSCACHE)
+ wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#else
+ wl_scan_results_t *bss_list,
+#endif /* BSSCACHE */
+ int ioctl_ver, int *best_2g_ch, int *best_5g_ch
+)
+{
+ struct wl_bss_info *bi = NULL; /* must be initialized */
+ s32 i, j;
+#if defined(BSSCACHE)
+ wl_bss_cache_t *node;
+#endif /* BSSCACHE */
+ int b_band[CH_MAX_2G_CHANNEL]={0}, a_band1[4]={0}, a_band4[5]={0};
+ s32 cen_ch, distance, distance_2g, distance_5g, ch, min_ap=999;
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ wl_uint32_list_t *list;
+ int ret;
+ chanspec_t chanspec;
+ struct dhd_pub *dhd = dhd_get_pub(net);
+
+ memset(b_band, -1, sizeof(b_band));
+ memset(a_band1, -1, sizeof(a_band1));
+ memset(a_band4, -1, sizeof(a_band4));
+
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ ret = wl_ext_ioctl(net, WLC_GET_VALID_CHANNELS, &valid_chan_list,
+ sizeof(valid_chan_list), 0);
+ if (ret<0) {
+ AEXT_ERROR(net->name, "get channels failed with %d\n", ret);
+ return 0;
+ } else {
+ for (i = 0; i < dtoh32(list->count); i++) {
+ ch = dtoh32(list->element[i]);
+ if (!dhd_conf_match_channel(dhd, ch))
+ continue;
+ if (ch < CH_MAX_2G_CHANNEL)
+ b_band[ch-1] = 0;
+ else if (ch <= 48)
+ a_band1[(ch-36)/4] = 0;
+ else if (ch >= 149 && ch <= 161)
+ a_band4[(ch-149)/4] = 0;
+ }
+ }
+
+ distance_2g = wl_ext_get_distance(net, WLC_BAND_2G);
+ distance_5g = wl_ext_get_distance(net, WLC_BAND_5G);
+
+#if defined(BSSCACHE)
+ node = bss_cache_ctrl->m_cache_head;
+ for (i=0; node && i<256; i++)
+#else
+ for (i=0; i < bss_list->count; i++)
+#endif /* BSSCACHE */
+ {
+#if defined(BSSCACHE)
+ bi = node->results.bss_info;
+#else
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : bss_list->bss_info;
+#endif /* BSSCACHE */
+ chanspec = wl_ext_chspec_driver_to_host(ioctl_ver, bi->chanspec);
+ cen_ch = CHSPEC_CHANNEL(bi->chanspec);
+ distance = 0;
+ if (CHSPEC_IS20(chanspec))
+ distance += 2;
+ else if (CHSPEC_IS40(chanspec))
+ distance += 4;
+ else if (CHSPEC_IS80(chanspec))
+ distance += 8;
+ else
+ distance += 16;
+
+ if (CHSPEC_IS2G(chanspec)) {
+ distance += distance_2g;
+ for (j=0; j<ARRAYSIZE(b_band); j++) {
+ if (b_band[j] >= 0 && abs(cen_ch-(1+j)) <= distance)
+ b_band[j] += 1;
+ }
+ } else {
+ distance += distance_5g;
+ if (cen_ch <= 48) {
+ for (j=0; j<ARRAYSIZE(a_band1); j++) {
+ if (a_band1[j] >= 0 && abs(cen_ch-(36+j*4)) <= distance)
+ a_band1[j] += 1;
+ }
+ } else if (cen_ch >= 149) {
+ for (j=0; j<ARRAYSIZE(a_band4); j++) {
+ if (a_band4[j] >= 0 && abs(cen_ch-(149+j*4)) <= distance)
+ a_band4[j] += 1;
+ }
+ }
+ }
+#if defined(BSSCACHE)
+ node = node->next;
+#endif /* BSSCACHE */
+ }
+
+ *best_2g_ch = 0;
+ min_ap = 999;
+ for (i=0; i<CH_MAX_2G_CHANNEL; i++) {
+ if(b_band[i] < min_ap && b_band[i] >= 0) {
+ min_ap = b_band[i];
+ *best_2g_ch = i+1;
+ }
+ }
+ *best_5g_ch = 0;
+ min_ap = 999;
+ for (i=0; i<ARRAYSIZE(a_band1); i++) {
+ if(a_band1[i] < min_ap && a_band1[i] >= 0) {
+ min_ap = a_band1[i];
+ *best_5g_ch = i*4 + 36;
+ }
+ }
+ for (i=0; i<ARRAYSIZE(a_band4); i++) {
+ if(a_band4[i] < min_ap && a_band4[i] >= 0) {
+ min_ap = a_band4[i];
+ *best_5g_ch = i*4 + 149;
+ }
+ }
+
+ if (android_msg_level & ANDROID_INFO_LEVEL) {
+ struct bcmstrbuf strbuf;
+ char *tmp_buf = NULL;
+ tmp_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (tmp_buf == NULL) {
+ AEXT_ERROR(net->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ bcm_binit(&strbuf, tmp_buf, WLC_IOCTL_SMLEN);
+ for (j=0; j<ARRAYSIZE(b_band); j++)
+ bcm_bprintf(&strbuf, "%d/%d, ", b_band[j], 1+j);
+ bcm_bprintf(&strbuf, "\n");
+ for (j=0; j<ARRAYSIZE(a_band1); j++)
+ bcm_bprintf(&strbuf, "%d/%d, ", a_band1[j], 36+j*4);
+ bcm_bprintf(&strbuf, "\n");
+ for (j=0; j<ARRAYSIZE(a_band4); j++)
+ bcm_bprintf(&strbuf, "%d/%d, ", a_band4[j], 149+j*4);
+ bcm_bprintf(&strbuf, "\n");
+ bcm_bprintf(&strbuf, "best_2g_ch=%d, best_5g_ch=%d\n",
+ *best_2g_ch, *best_5g_ch);
+ AEXT_INFO(net->name, "\n%s", strbuf.origbuf);
+ if (tmp_buf) {
+ kfree(tmp_buf);
+ }
+ }
+
+exit:
+ return 0;
+}
+#endif /* WL_CFG80211 || WL_ESCAN */
+
+#ifdef WL_CFG80211
+#define APCS_MAX_RETRY 10
+static int
+wl_ext_fw_apcs(struct net_device *dev, uint32 band)
+{
+ int channel = 0, chosen = 0, retry = 0, ret = 0, spect = 0;
+ u8 *reqbuf = NULL;
+ uint32 buf_size;
+
+ ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect));
+ if (ret) {
+ AEXT_ERROR(dev->name, "ACS: error getting the spect, ret=%d\n", ret);
+ goto done;
+ }
+
+ if (spect > 0) {
+ ret = wl_android_set_spect(dev, 0);
+ if (ret < 0) {
+ AEXT_ERROR(dev->name, "ACS: error while setting spect, ret=%d\n", ret);
+ goto done;
+ }
+ }
+
+ reqbuf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+ if (reqbuf == NULL) {
+ AEXT_ERROR(dev->name, "failed to allocate chanspec buffer\n");
+ goto done;
+ }
+ memset(reqbuf, 0, CHANSPEC_BUF_SIZE);
+
+ if (band == WLC_BAND_AUTO) {
+ AEXT_INFO(dev->name, "ACS full channel scan \n");
+ reqbuf[0] = htod32(0);
+ } else if (band == WLC_BAND_5G) {
+ AEXT_INFO(dev->name, "ACS 5G band scan \n");
+ ret = wl_android_get_band_chanspecs(dev, (void *)reqbuf, CHANSPEC_BUF_SIZE,
+ WL_CHANSPEC_BAND_5G, false);
+ if (ret < 0) {
+ AEXT_ERROR(dev->name, "ACS 5g chanspec retreival failed! \n");
+ goto done;
+ }
+ } else if (band == WLC_BAND_2G) {
+ /*
+ * If channel argument is not provided/ argument 20 is provided,
+ * Restrict channel to 2GHz, 20MHz BW, No SB
+ */
+ AEXT_INFO(dev->name, "ACS 2G band scan \n");
+ ret = wl_android_get_band_chanspecs(dev, (void *)reqbuf, CHANSPEC_BUF_SIZE,
+ WL_CHANSPEC_BAND_2G, false);
+ if (ret < 0) {
+ AEXT_ERROR(dev->name, "ACS 2g chanspec retreival failed! \n");
+ goto done;
+ }
+ } else {
+ AEXT_ERROR(dev->name, "ACS: No band chosen\n");
+ goto done;
+ }
+
+ buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE;
+ ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
+ buf_size);
+ if (ret < 0) {
+ AEXT_ERROR(dev->name, "can't start auto channel scan, err = %d\n", ret);
+ channel = 0;
+ goto done;
+ }
+
+ /* Wait for auto channel selection, max 3000 ms */
+ if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) {
+ OSL_SLEEP(500);
+ } else {
+ /*
+ * Full channel scan at the minimum takes 1.2secs
+ * even with parallel scan. max wait time: 3500ms
+ */
+ OSL_SLEEP(1000);
+ }
+
+ retry = APCS_MAX_RETRY;
+ while (retry--) {
+ ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &chosen,
+ sizeof(chosen));
+ if (ret < 0) {
+ chosen = 0;
+ } else {
+ chosen = dtoh32(chosen);
+ }
+
+ if (chosen) {
+ int chosen_band;
+ int apcs_band;
+#ifdef D11AC_IOTYPES
+ if (wl_cfg80211_get_ioctl_version() == 1) {
+ channel = LCHSPEC_CHANNEL((chanspec_t)chosen);
+ } else {
+ channel = CHSPEC_CHANNEL((chanspec_t)chosen);
+ }
+#else
+ channel = CHSPEC_CHANNEL((chanspec_t)chosen);
+#endif /* D11AC_IOTYPES */
+ apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band;
+ chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G;
+ if (apcs_band == chosen_band) {
+ WL_MSG(dev->name, "selected channel = %d\n", channel);
+ break;
+ }
+ }
+ AEXT_INFO(dev->name, "%d tried, ret = %d, chosen = 0x%x\n",
+ (APCS_MAX_RETRY - retry), ret, chosen);
+ OSL_SLEEP(250);
+ }
+
+done:
+ if (spect > 0) {
+ if ((ret = wl_android_set_spect(dev, spect) < 0)) {
+ AEXT_ERROR(dev->name, "ACS: error while setting spect\n");
+ }
+ }
+
+ if (reqbuf) {
+ kfree(reqbuf);
+ }
+
+ return channel;
+}
+#endif /* WL_CFG80211 */
+
+#ifdef WL_ESCAN
+int
+wl_ext_drv_scan(struct net_device *dev, uint32 band, bool fast_scan)
+{
+ int ret = -1, i, cnt = 0;
+ int retry = 0, retry_max, retry_interval = 250, up = 1;
+ wl_scan_info_t scan_info;
+
+ retry_max = WL_ESCAN_TIMER_INTERVAL_MS/retry_interval;
+ ret = wldev_ioctl_get(dev, WLC_GET_UP, &up, sizeof(s32));
+ if (ret < 0 || up == 0) {
+ ret = wldev_ioctl_set(dev, WLC_UP, &up, sizeof(s32));
+ }
+ memset(&scan_info, 0, sizeof(wl_scan_info_t));
+ if (band == WLC_BAND_2G || band == WLC_BAND_AUTO) {
+ for (i=0; i<13; i++)
+ scan_info.channels.channel[i] = i + 1;
+ cnt += 13;
+ }
+ if (band == WLC_BAND_5G || band == WLC_BAND_AUTO) {
+ for (i=0; i<4; i++)
+ scan_info.channels.channel[i+cnt] = 36 + i*4;
+ cnt += 4;
+ for (i=0; i<4; i++)
+ scan_info.channels.channel[i+cnt] = 149 + i*4;
+ cnt += 4;
+ }
+ scan_info.channels.count = cnt;
+ if (fast_scan)
+ scan_info.scan_time = 40;
+ scan_info.bcast_ssid = TRUE;
+ retry = retry_max;
+ while (retry--) {
+ ret = wl_escan_set_scan(dev, &scan_info);
+ if (!ret)
+ break;
+ OSL_SLEEP(retry_interval);
+ }
+ if (retry == 0) {
+ AEXT_ERROR(dev->name, "scan retry failed %d\n", retry_max);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+wl_ext_drv_apcs(struct net_device *dev, uint32 band)
+{
+ int ret = 0, channel = 0;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_escan_info *escan = NULL;
+ int retry = 0, retry_max, retry_interval = 250;
+
+ escan = dhd->escan;
+ WL_MSG(dev->name, "ACS_SCAN\n");
+ escan->autochannel = 1;
+ ret = wl_ext_drv_scan(dev, band, TRUE);
+ if (ret < 0)
+ goto done;
+ retry_max = WL_ESCAN_TIMER_INTERVAL_MS/retry_interval;
+ retry = retry_max;
+ while (retry--) {
+ if (escan->escan_state == ESCAN_STATE_IDLE) {
+ if (band == WLC_BAND_5G)
+ channel = escan->best_5g_ch;
+ else
+ channel = escan->best_2g_ch;
+ WL_MSG(dev->name, "selected channel = %d\n", channel);
+ goto done;
+ }
+ AEXT_INFO(dev->name, "escan_state=%d, %d tried, ret = %d\n",
+ escan->escan_state, (retry_max - retry), ret);
+ OSL_SLEEP(retry_interval);
+ }
+
+done:
+ escan->autochannel = 0;
+
+ return channel;
+}
+#endif /* WL_ESCAN */
+
+int
+wl_ext_autochannel(struct net_device *dev, uint acs, uint32 band)
+{
+ int channel = 0;
+ uint16 chan_2g, chan_5g;
+
+ AEXT_INFO(dev->name, "acs=0x%x, band=%d \n", acs, band);
+
+#ifdef WL_CFG80211
+ if (acs & ACS_FW_BIT) {
+ int ret = 0;
+ ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &channel, sizeof(channel));
+ channel = 0;
+ if (ret != BCME_UNSUPPORTED)
+ channel = wl_ext_fw_apcs(dev, band);
+ if (channel)
+ return channel;
+ }
+#endif
+
+#ifdef WL_ESCAN
+ if (acs & ACS_DRV_BIT)
+ channel = wl_ext_drv_apcs(dev, band);
+#endif /* WL_ESCAN */
+
+ if (channel == 0) {
+ wl_ext_get_default_chan(dev, &chan_2g, &chan_5g, TRUE);
+ if (band == WLC_BAND_5G) {
+ channel = chan_5g;
+ } else {
+ channel = chan_2g;
+ }
+ AEXT_ERROR(dev->name, "ACS failed. Fall back to default channel (%d) \n", channel);
+ }
+
+ return channel;
+}
+
+#if defined(RSSIAVG)
+void
+wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
+{
+ wl_rssi_cache_t *node, *cur, **rssi_head;
+ int i=0;
+
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+ node = *rssi_head;
+
+ for (;node;) {
+ AEXT_INFO("wlan", "Free %d with BSSID %pM\n", i, &node->BSSID);
+ cur = node;
+ node = cur->next;
+ kfree(cur);
+ i++;
+ }
+ *rssi_head = NULL;
+}
+
+void
+wl_delete_dirty_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
+{
+ wl_rssi_cache_t *node, *prev, **rssi_head;
+ int i = -1, tmp = 0;
+ struct osl_timespec now;
+
+ osl_do_gettimeofday(&now);
+
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+ node = *rssi_head;
+ prev = node;
+ for (;node;) {
+ i++;
+ if (now.tv_sec > node->tv.tv_sec) {
+ if (node == *rssi_head) {
+ tmp = 1;
+ *rssi_head = node->next;
+ } else {
+ tmp = 0;
+ prev->next = node->next;
+ }
+ AEXT_INFO("wlan", "Del %d with BSSID %pM\n", i, &node->BSSID);
+ kfree(node);
+ if (tmp == 1) {
+ node = *rssi_head;
+ prev = node;
+ } else {
+ node = prev->next;
+ }
+ continue;
+ }
+ prev = node;
+ node = node->next;
+ }
+}
+
+void
+wl_delete_disconnected_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+ u8 *bssid)
+{
+ wl_rssi_cache_t *node, *prev, **rssi_head;
+ int i = -1, tmp = 0;
+
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+ node = *rssi_head;
+ prev = node;
+ for (;node;) {
+ i++;
+ if (!memcmp(&node->BSSID, bssid, ETHER_ADDR_LEN)) {
+ if (node == *rssi_head) {
+ tmp = 1;
+ *rssi_head = node->next;
+ } else {
+ tmp = 0;
+ prev->next = node->next;
+ }
+ AEXT_INFO("wlan", "Del %d with BSSID %pM\n", i, &node->BSSID);
+ kfree(node);
+ if (tmp == 1) {
+ node = *rssi_head;
+ prev = node;
+ } else {
+ node = prev->next;
+ }
+ continue;
+ }
+ prev = node;
+ node = node->next;
+ }
+}
+
+void
+wl_reset_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
+{
+ wl_rssi_cache_t *node, **rssi_head;
+
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+
+ /* reset dirty */
+ node = *rssi_head;
+ for (;node;) {
+ node->dirty += 1;
+ node = node->next;
+ }
+}
+
+int
+wl_update_connected_rssi_cache(struct net_device *net,
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl, int *rssi_avg)
+{
+ wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
+ int j, k=0;
+ int rssi, error=0;
+ struct ether_addr bssid;
+ struct osl_timespec now, timeout;
+ scb_val_t scbval;
+
+ if (!g_wifi_on)
+ return 0;
+
+ error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ if (error == BCME_NOTASSOCIATED) {
+ AEXT_INFO("wlan", "Not Associated! res:%d\n", error);
+ return 0;
+ }
+ if (error) {
+ AEXT_ERROR(net->name, "Could not get bssid (%d)\n", error);
+ }
+ error = wldev_get_rssi(net, &scbval);
+ if (error) {
+ AEXT_ERROR(net->name, "Could not get rssi (%d)\n", error);
+ return error;
+ }
+ rssi = dtoh32(scbval.val);
+
+ osl_do_gettimeofday(&now);
+ timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
+ if (timeout.tv_sec < now.tv_sec) {
+ /*
+ * Integer overflow - assume long enough timeout to be assumed
+ * to be infinite, i.e., the timeout would never happen.
+ */
+ AEXT_TRACE(net->name,
+ "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
+ RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
+ }
+
+ /* update RSSI */
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+ node = *rssi_head;
+ prev = NULL;
+ for (;node;) {
+ if (!memcmp(&node->BSSID, &bssid, ETHER_ADDR_LEN)) {
+ AEXT_INFO("wlan", "Update %d with BSSID %pM, RSSI=%d\n", k, &bssid, rssi);
+ for (j=0; j<RSSIAVG_LEN-1; j++)
+ node->RSSI[j] = node->RSSI[j+1];
+ node->RSSI[j] = rssi;
+ node->dirty = 0;
+ node->tv = timeout;
+ goto exit;
+ }
+ prev = node;
+ node = node->next;
+ k++;
+ }
+
+ leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
+ if (!leaf) {
+ AEXT_ERROR(net->name, "Memory alloc failure %d\n", (int)sizeof(wl_rssi_cache_t));
+ return 0;
+ }
+ AEXT_INFO(net->name, "Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
+ k, &bssid, rssi);
+
+ leaf->next = NULL;
+ leaf->dirty = 0;
+ leaf->tv = timeout;
+ memcpy(&leaf->BSSID, &bssid, ETHER_ADDR_LEN);
+ for (j=0; j<RSSIAVG_LEN; j++)
+ leaf->RSSI[j] = rssi;
+
+ if (!prev)
+ *rssi_head = leaf;
+ else
+ prev->next = leaf;
+
+exit:
+ *rssi_avg = (int)wl_get_avg_rssi(rssi_cache_ctrl, &bssid);
+
+ return error;
+}
+
+void
+wl_update_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+ wl_scan_results_t *ss_list)
+{
+ wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
+ wl_bss_info_t *bi = NULL;
+ int i, j, k;
+ struct osl_timespec now, timeout;
+
+ if (!ss_list->count)
+ return;
+
+ osl_do_gettimeofday(&now);
+ timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
+ if (timeout.tv_sec < now.tv_sec) {
+ /*
+ * Integer overflow - assume long enough timeout to be assumed
+ * to be infinite, i.e., the timeout would never happen.
+ */
+ AEXT_TRACE("wlan",
+ "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
+ RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
+ }
+
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+
+ /* update RSSI */
+ for (i = 0; i < ss_list->count; i++) {
+ node = *rssi_head;
+ prev = NULL;
+ k = 0;
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+ for (;node;) {
+ if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+ AEXT_INFO("wlan", "Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ for (j=0; j<RSSIAVG_LEN-1; j++)
+ node->RSSI[j] = node->RSSI[j+1];
+ node->RSSI[j] = dtoh16(bi->RSSI);
+ node->dirty = 0;
+ node->tv = timeout;
+ break;
+ }
+ prev = node;
+ node = node->next;
+ k++;
+ }
+
+ if (node)
+ continue;
+
+ leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
+ if (!leaf) {
+ AEXT_ERROR("wlan", "Memory alloc failure %d\n",
+ (int)sizeof(wl_rssi_cache_t));
+ return;
+ }
+ AEXT_INFO("wlan", "Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+
+ leaf->next = NULL;
+ leaf->dirty = 0;
+ leaf->tv = timeout;
+ memcpy(&leaf->BSSID, &bi->BSSID, ETHER_ADDR_LEN);
+ for (j=0; j<RSSIAVG_LEN; j++)
+ leaf->RSSI[j] = dtoh16(bi->RSSI);
+
+ if (!prev)
+ *rssi_head = leaf;
+ else
+ prev->next = leaf;
+ }
+}
+
+int16
+wl_get_avg_rssi(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, void *addr)
+{
+ wl_rssi_cache_t *node, **rssi_head;
+ int j, rssi_sum, rssi=RSSI_MINVAL;
+
+ rssi_head = &rssi_cache_ctrl->m_cache_head;
+
+ node = *rssi_head;
+ for (;node;) {
+ if (!memcmp(&node->BSSID, addr, ETHER_ADDR_LEN)) {
+ rssi_sum = 0;
+ rssi = 0;
+ for (j=0; j<RSSIAVG_LEN; j++)
+ rssi_sum += node->RSSI[RSSIAVG_LEN-j-1];
+ rssi = rssi_sum / j;
+ break;
+ }
+ node = node->next;
+ }
+ rssi = MIN(rssi, RSSI_MAXVAL);
+ if (rssi == RSSI_MINVAL) {
+ AEXT_ERROR("wlan", "BSSID %pM does not in RSSI cache\n", addr);
+ }
+ return (int16)rssi;
+}
+#endif /* RSSIAVG */
+
+#if defined(RSSIOFFSET)
+int
+wl_update_rssi_offset(struct net_device *net, int rssi)
+{
+#if defined(RSSIOFFSET_NEW)
+ int j;
+#endif /* RSSIOFFSET_NEW */
+
+ if (!g_wifi_on)
+ return rssi;
+
+#if defined(RSSIOFFSET_NEW)
+ for (j=0; j<RSSI_OFFSET; j++) {
+ if (rssi - (RSSI_OFFSET_MINVAL+RSSI_OFFSET_INTVAL*(j+1)) < 0)
+ break;
+ }
+ rssi += j;
+#else
+ rssi += RSSI_OFFSET;
+#endif /* RSSIOFFSET_NEW */
+ return MIN(rssi, RSSI_MAXVAL);
+}
+#endif /* RSSIOFFSET */
+
+#if defined(BSSCACHE)
+void
+wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+ wl_bss_cache_t *node, *cur, **bss_head;
+ int i=0;
+
+ AEXT_TRACE("wlan", "called\n");
+
+ bss_head = &bss_cache_ctrl->m_cache_head;
+ node = *bss_head;
+
+ for (;node;) {
+ AEXT_TRACE("wlan", "Free %d with BSSID %pM\n",
+ i, &node->results.bss_info->BSSID);
+ cur = node;
+ node = cur->next;
+ kfree(cur);
+ i++;
+ }
+ *bss_head = NULL;
+}
+
+void
+wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+ wl_bss_cache_t *node, *prev, **bss_head;
+ int i = -1, tmp = 0;
+ struct osl_timespec now;
+
+ osl_do_gettimeofday(&now);
+
+ bss_head = &bss_cache_ctrl->m_cache_head;
+ node = *bss_head;
+ prev = node;
+ for (;node;) {
+ i++;
+ if (now.tv_sec > node->tv.tv_sec || node->dirty > BSSCACHE_DIRTY) {
+ if (node == *bss_head) {
+ tmp = 1;
+ *bss_head = node->next;
+ } else {
+ tmp = 0;
+ prev->next = node->next;
+ }
+ AEXT_TRACE("wlan", "Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ i, &node->results.bss_info->BSSID,
+ dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID);
+ kfree(node);
+ if (tmp == 1) {
+ node = *bss_head;
+ prev = node;
+ } else {
+ node = prev->next;
+ }
+ continue;
+ }
+ prev = node;
+ node = node->next;
+ }
+}
+
+void
+wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+ u8 *bssid)
+{
+ wl_bss_cache_t *node, *prev, **bss_head;
+ int i = -1, tmp = 0;
+
+ bss_head = &bss_cache_ctrl->m_cache_head;
+ node = *bss_head;
+ prev = node;
+ for (;node;) {
+ i++;
+ if (!memcmp(&node->results.bss_info->BSSID, bssid, ETHER_ADDR_LEN)) {
+ if (node == *bss_head) {
+ tmp = 1;
+ *bss_head = node->next;
+ } else {
+ tmp = 0;
+ prev->next = node->next;
+ }
+ AEXT_TRACE("wlan", "Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ i, &node->results.bss_info->BSSID,
+ dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID);
+ kfree(node);
+ if (tmp == 1) {
+ node = *bss_head;
+ prev = node;
+ } else {
+ node = prev->next;
+ }
+ continue;
+ }
+ prev = node;
+ node = node->next;
+ }
+}
+
+int
+wl_bss_cache_size(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+ wl_bss_cache_t *node, **bss_head;
+ int bss_num = 0;
+
+ bss_head = &bss_cache_ctrl->m_cache_head;
+
+ node = *bss_head;
+ for (;node;) {
+ if (node->dirty > 1) {
+ bss_num++;
+ }
+ node = node->next;
+ }
+ return bss_num;
+}
+
+void
+wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+ wl_bss_cache_t *node, **bss_head;
+
+ bss_head = &bss_cache_ctrl->m_cache_head;
+
+ /* reset dirty */
+ node = *bss_head;
+ for (;node;) {
+ node->dirty += 1;
+ node = node->next;
+ }
+}
+
+static void
+wl_bss_cache_dump(
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif /* RSSIAVG */
+ wl_bss_cache_t *node)
+{
+ int k = 0;
+ int16 rssi;
+
+ for (;node;) {
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID);
+#else
+ rssi = dtoh16(node->results.bss_info->RSSI);
+#endif /* RSSIAVG */
+ k++;
+ AEXT_TRACE("wlan", "dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &node->results.bss_info->BSSID, rssi, node->results.bss_info->SSID);
+ node = node->next;
+ }
+}
+
+#if defined(SORT_BSS_CHANNEL)
+static wl_bss_cache_t *
+wl_bss_cache_sort_channel(wl_bss_cache_t **bss_head, wl_bss_cache_t *leaf)
+{
+ wl_bss_cache_t *node, *prev;
+ uint16 channel, channel_node;
+
+ node = *bss_head;
+ channel = wf_chspec_ctlchan(leaf->results.bss_info->chanspec);
+ for (;node;) {
+ channel_node = wf_chspec_ctlchan(node->results.bss_info->chanspec);
+ if (channel_node > channel) {
+ leaf->next = node;
+ if (node == *bss_head)
+ *bss_head = leaf;
+ else
+ prev->next = leaf;
+ break;
+ }
+ prev = node;
+ node = node->next;
+ }
+ if (node == NULL)
+ prev->next = leaf;
+
+ return *bss_head;
+}
+#endif /* SORT_BSS_CHANNEL */
+
+#if defined(SORT_BSS_RSSI)
+static wl_bss_cache_t *
+wl_bss_cache_sort_rssi(wl_bss_cache_t **bss_head, wl_bss_cache_t *leaf
+#if defined(RSSIAVG)
+, wl_rssi_cache_ctrl_t *rssi_cache_ctrl
+#endif /* RSSIAVG */
+)
+{
+ wl_bss_cache_t *node, *prev;
+ int16 rssi, rssi_node;
+
+ node = *bss_head;
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(rssi_cache_ctrl, &leaf->results.bss_info->BSSID);
+#else
+ rssi = dtoh16(leaf->results.bss_info->RSSI);
+#endif /* RSSIAVG */
+ for (;node;) {
+#if defined(RSSIAVG)
+ rssi_node = wl_get_avg_rssi(rssi_cache_ctrl,
+ &node->results.bss_info->BSSID);
+#else
+ rssi_node = dtoh16(node->results.bss_info->RSSI);
+#endif /* RSSIAVG */
+ if (rssi > rssi_node) {
+ leaf->next = node;
+ if (node == *bss_head)
+ *bss_head = leaf;
+ else
+ prev->next = leaf;
+ break;
+ }
+ prev = node;
+ node = node->next;
+ }
+ if (node == NULL)
+ prev->next = leaf;
+
+ return *bss_head;
+}
+#endif /* SORT_BSS_BY_RSSI */
+
+void
+wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif /* RSSIAVG */
+ wl_scan_results_t *ss_list)
+{
+ wl_bss_cache_t *node, *node_target = NULL, *prev, *leaf, **bss_head;
+ wl_bss_cache_t *node_rssi_prev = NULL, *node_rssi = NULL;
+ wl_bss_info_t *bi = NULL;
+ int i, k=0, bss_num = 0;
+ struct osl_timespec now, timeout;
+ int16 rssi_min;
+ bool rssi_replace = FALSE;
+
+ if (!ss_list->count)
+ return;
+
+ osl_do_gettimeofday(&now);
+ timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT;
+ if (timeout.tv_sec < now.tv_sec) {
+ /*
+ * Integer overflow - assume long enough timeout to be assumed
+ * to be infinite, i.e., the timeout would never happen.
+ */
+ AEXT_TRACE("wlan",
+ "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
+ BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
+ }
+
+ bss_head = &bss_cache_ctrl->m_cache_head;
+
+ // get the num of bss cache
+ node = *bss_head;
+ for (;node;) {
+ node = node->next;
+ bss_num++;
+ }
+
+ for (i=0; i < ss_list->count; i++) {
+ node = *bss_head;
+ prev = NULL;
+ node_target = NULL;
+ node_rssi_prev = NULL;
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+
+ // find the bss with same BSSID
+ for (;node;) {
+ if (!memcmp(&node->results.bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+ if (node == *bss_head)
+ *bss_head = node->next;
+ else {
+ prev->next = node->next;
+ }
+ break;
+ }
+ prev = node;
+ node = node->next;
+ }
+ if (node)
+ node_target = node;
+
+ // find the bss with lowest RSSI
+ if (!node_target && bss_num >= BSSCACHE_MAXCNT) {
+ node = *bss_head;
+ prev = NULL;
+ rssi_min = dtoh16(bi->RSSI);
+ for (;node;) {
+ if (dtoh16(node->results.bss_info->RSSI) < rssi_min) {
+ node_rssi = node;
+ node_rssi_prev = prev;
+ rssi_min = dtoh16(node->results.bss_info->RSSI);
+ }
+ prev = node;
+ node = node->next;
+ }
+ if (dtoh16(bi->RSSI) > rssi_min) {
+ rssi_replace = TRUE;
+ node_target = node_rssi;
+ if (node_rssi == *bss_head)
+ *bss_head = node_rssi->next;
+ else if (node_rssi) {
+ node_rssi_prev->next = node_rssi->next;
+ }
+ }
+ }
+
+ k++;
+ if (bss_num < BSSCACHE_MAXCNT) {
+ bss_num++;
+ AEXT_TRACE("wlan",
+ "Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ } else if (node_target) {
+ if (rssi_replace) {
+ AEXT_TRACE("wlan",
+ "Replace %d with cached BSSID %pM(%3d) => %pM(%3d), "\
+ "SSID \"%s\" => \"%s\"\n",
+ k, &node_target->results.bss_info->BSSID,
+ dtoh16(node_target->results.bss_info->RSSI),
+ &bi->BSSID, dtoh16(bi->RSSI),
+ node_target->results.bss_info->SSID, bi->SSID);
+ } else {
+ AEXT_TRACE("wlan",
+ "Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ }
+ kfree(node_target);
+ node_target = NULL;
+ } else {
+ AEXT_TRACE("wlan", "Skip %d BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ continue;
+ }
+
+ leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL);
+ if (!leaf) {
+ AEXT_ERROR("wlan", "Memory alloc failure %d\n",
+ dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t));
+ return;
+ }
+
+ memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
+ leaf->next = NULL;
+ leaf->dirty = 0;
+ leaf->tv = timeout;
+ leaf->results.count = 1;
+ leaf->results.version = ss_list->version;
+
+ if (*bss_head == NULL)
+ *bss_head = leaf;
+ else {
+#if defined(SORT_BSS_CHANNEL)
+ *bss_head = wl_bss_cache_sort_channel(bss_head, leaf);
+#elif defined(SORT_BSS_RSSI)
+ *bss_head = wl_bss_cache_sort_rssi(bss_head, leaf
+#if defined(RSSIAVG)
+ , rssi_cache_ctrl
+#endif /* RSSIAVG */
+ );
+#else
+ leaf->next = *bss_head;
+ *bss_head = leaf;
+#endif /* SORT_BSS_BY_RSSI */
+ }
+ }
+ wl_bss_cache_dump(
+#if defined(RSSIAVG)
+ rssi_cache_ctrl,
+#endif /* RSSIAVG */
+ *bss_head);
+}
+
+void
+wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+ AEXT_TRACE("wlan", "Enter\n");
+ wl_free_bss_cache(bss_cache_ctrl);
+}
+#endif /* BSSCACHE */
diff --git a/bcmdhd.101.10.361.x/wl_android_ext.h b/bcmdhd.101.10.361.x/wl_android_ext.h
new file mode 100755
index 0000000..1ebc33a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_android_ext.h
@@ -0,0 +1,175 @@
+
+#ifndef _wl_android_ext_
+#define _wl_android_ext_
+typedef struct bcol_gtk_para {
+ int enable;
+ int ptk_len;
+ char ptk[64];
+ char replay[8];
+} bcol_gtk_para_t;
+#define ACS_FW_BIT (1<<0)
+#define ACS_DRV_BIT (1<<1)
+int wl_ext_autochannel(struct net_device *dev, uint acs, uint32 band);
+int wl_android_ext_priv_cmd(struct net_device *net, char *command, int total_len,
+ int *bytes_written);
+void wl_ext_get_sec(struct net_device *dev, int ifmode, char *sec, int total_len, bool dump);
+bool wl_ext_check_scan(struct net_device *dev, dhd_pub_t *dhdp);
+int wl_ext_set_scan_time(struct net_device *dev, int scan_time,
+ uint32 scan_get, uint32 scan_set);
+void wl_ext_wait_event_complete(struct dhd_pub *dhd, int ifidx);
+int wl_ext_add_del_ie(struct net_device *dev, uint pktflag, char *ie_data, const char* add_del_cmd);
+#ifdef WL_ESCAN
+int wl_ext_drv_scan(struct net_device *dev, uint band, bool fast_scan);
+#endif
+#ifdef WL_EXT_GENL
+int wl_ext_genl_init(struct net_device *net);
+void wl_ext_genl_deinit(struct net_device *net);
+#endif
+#ifdef WL_EXT_IAPSTA
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+int wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+int wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val);
+int wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val);
+int wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+int wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+int wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx,
+ struct mutex* buf_sync);
+chanspec_t wl_ext_chspec_driver_to_host(int ioctl_ver, chanspec_t chanspec);
+chanspec_t wl_ext_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec);
+bool wl_ext_dfs_chan(uint16 chan);
+uint16 wl_ext_get_default_chan(struct net_device *dev,
+ uint16 *chan_2g, uint16 *chan_5g, bool nodfs);
+int wl_ext_set_chanspec(struct net_device *dev, int ioctl_ver,
+ uint16 channel, chanspec_t *ret_chspec);
+int wl_ext_get_ioctl_ver(struct net_device *dev, int *ioctl_ver);
+#endif
+#if defined(WL_CFG80211) || defined(WL_ESCAN)
+void wl_ext_user_sync(struct dhd_pub *dhd, int ifidx, bool lock);
+#endif
+#if defined(WL_CFG80211)
+bool wl_legacy_chip_check(struct net_device *net);
+bool wl_new_chip_check(struct net_device *net);
+bool wl_extsae_chip(struct dhd_pub *dhd);
+#endif
+#if defined(WL_EXT_IAPSTA) || defined(WL_CFG80211)
+void wl_ext_bss_iovar_war(struct net_device *dev, s32 *val);
+#endif /* WL_EXT_IAPSTA ||WL_CFG80211 */
+
+typedef struct wl_conn_info {
+ uint8 bssidx;
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ uint16 channel;
+} wl_conn_info_t;
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
+s32 wl_ext_connect(struct net_device *dev, wl_conn_info_t *conn_info);
+#endif /* WL_EXT_IAPSTA || USE_IW */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
+#endif
+
+/* terence:
+ * BSSCACHE: Cache bss list
+ * RSSAVG: Average RSSI of BSS list
+ * RSSIOFFSET: RSSI offset
+ * SORT_BSS_BY_RSSI: Sort BSS by RSSI
+ */
+//#define BSSCACHE
+//#define RSSIAVG
+//#define RSSIOFFSET
+//#define RSSIOFFSET_NEW
+
+#define RSSI_MAXVAL -2
+#define RSSI_MINVAL -200
+
+#if defined(ESCAN_RESULT_PATCH)
+#define REPEATED_SCAN_RESULT_CNT 2
+#else
+#define REPEATED_SCAN_RESULT_CNT 1
+#endif
+
+#if defined(RSSIAVG) || defined(RSSIOFFSET)
+extern int g_wifi_on;
+#endif
+
+#if defined(RSSIAVG)
+#define RSSIAVG_LEN (4*REPEATED_SCAN_RESULT_CNT)
+#define RSSICACHE_TIMEOUT 15
+
+typedef struct wl_rssi_cache {
+ struct wl_rssi_cache *next;
+ int dirty;
+ struct osl_timespec tv;
+ struct ether_addr BSSID;
+ int16 RSSI[RSSIAVG_LEN];
+} wl_rssi_cache_t;
+
+typedef struct wl_rssi_cache_ctrl {
+ wl_rssi_cache_t *m_cache_head;
+} wl_rssi_cache_ctrl_t;
+
+void wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl);
+void wl_delete_dirty_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl);
+void wl_delete_disconnected_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, u8 *bssid);
+void wl_reset_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl);
+void wl_update_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, wl_scan_results_t *ss_list);
+int wl_update_connected_rssi_cache(struct net_device *net, wl_rssi_cache_ctrl_t *rssi_cache_ctrl, int *rssi_avg);
+int16 wl_get_avg_rssi(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, void *addr);
+#endif
+
+#if defined(RSSIOFFSET)
+#define RSSI_OFFSET 5
+#if defined(RSSIOFFSET_NEW)
+#define RSSI_OFFSET_MAXVAL -80
+#define RSSI_OFFSET_MINVAL -94
+#define RSSI_OFFSET_INTVAL ((RSSI_OFFSET_MAXVAL-RSSI_OFFSET_MINVAL)/RSSI_OFFSET)
+#endif
+#define BCM4330_CHIP_ID 0x4330
+#define BCM4330B2_CHIP_REV 4
+int wl_update_rssi_offset(struct net_device *net, int rssi);
+#endif
+
+#if defined(BSSCACHE)
+#define BSSCACHE_TIMEOUT 30
+#define BSSCACHE_MAXCNT 20
+#define BSSCACHE_DIRTY 4
+#define SORT_BSS_CHANNEL
+//#define SORT_BSS_RSSI
+
+typedef struct wl_bss_cache {
+ struct wl_bss_cache *next;
+ int dirty;
+ struct osl_timespec tv;
+ wl_scan_results_t results;
+} wl_bss_cache_t;
+
+typedef struct wl_bss_cache_ctrl {
+ wl_bss_cache_t *m_cache_head;
+} wl_bss_cache_ctrl_t;
+
+void wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid);
+int wl_bss_cache_size(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+ wl_scan_results_t *ss_list);
+void wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+#endif
+int wl_ext_get_best_channel(struct net_device *net,
+#if defined(BSSCACHE)
+ wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#else
+ wl_scan_results_t *bss_list,
+#endif
+ int ioctl_ver, int *best_2g_ch, int *best_5g_ch
+);
+#endif
diff --git a/bcmdhd.101.10.361.x/wl_bam.c b/bcmdhd.101.10.361.x/wl_bam.c
new file mode 100755
index 0000000..ec9e91c
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_bam.c
@@ -0,0 +1,643 @@
+/*
+ * Bad AP Manager for ADPS
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#include <linuxver.h>
+#include <bcmiov.h>
+#include <linux/list_sort.h>
+#include <wl_cfg80211.h>
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_bam.h>
+
+static int
+wl_bad_ap_mngr_add_entry(wl_bad_ap_mngr_t *bad_ap_mngr, wl_bad_ap_info_t *bad_ap_info)
+{
+ unsigned long flags;
+ wl_bad_ap_info_entry_t *entry;
+
+ entry = MALLOCZ(bad_ap_mngr->osh, sizeof(*entry));
+ if (entry == NULL) {
+ WL_ERR(("%s: allocation for list failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ memcpy(&entry->bad_ap, bad_ap_info, sizeof(entry->bad_ap));
+ INIT_LIST_HEAD(&entry->list);
+ spin_lock_irqsave(&bad_ap_mngr->lock, flags);
+ list_add_tail(&entry->list, &bad_ap_mngr->list);
+ spin_unlock_irqrestore(&bad_ap_mngr->lock, flags);
+
+ bad_ap_mngr->num++;
+
+ return BCME_OK;
+}
+
+#if !defined(DHD_ADPS_BAM_EXPORT)
+#define WL_BAD_AP_INFO_FILE_PATH PLATFORM_PATH".bad_ap_list.info"
+#define WL_BAD_AP_MAX_BUF_SIZE 1024u
+
+/* Bad AP information data format
+ *
+ * Status and Reason: come from event
+ * Connection count: Increase connecting Bad AP
+ *
+ * BSSID,year-month-day hour:min:sec,Status,Reason,Connection count
+ * ex) XX:XX:XX:XX:XX:XX,1970-01-01 00:00:00,1,2,1
+ *
+ */
+#define WL_BAD_AP_INFO_FMT \
+ "%02x:%02x:%02x:%02x:%02x:%02x,%04ld-%02d-%02d %02d:%02d:%02d,%u,%u,%u\n"
+#define WL_BAD_AP_INFO_FMT_ITEM_CNT 15u
+
+static inline void
+wl_bad_ap_mngr_tm2ts(struct timespec *ts, const struct tm tm)
+{
+ ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+ ts->tv_nsec = 0;
+}
+
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+static int
+wl_bad_ap_mngr_timecmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ int ret;
+
+ struct timespec ts1;
+ struct timespec ts2;
+
+ wl_bad_ap_info_entry_t *e1 = CONTAINEROF(a, wl_bad_ap_info_entry_t, list);
+ wl_bad_ap_info_entry_t *e2 = CONTAINEROF(b, wl_bad_ap_info_entry_t, list);
+
+ wl_bad_ap_mngr_tm2ts(&ts1, e1->bad_ap.tm);
+ wl_bad_ap_mngr_tm2ts(&ts2, e2->bad_ap.tm);
+
+ ret = timespec_compare((const struct timespec *)&ts1, (const struct timespec *)&ts2);
+
+ return ret;
+}
+
+static void
+wl_bad_ap_mngr_update(struct bcm_cfg80211 *cfg, wl_bad_ap_info_t *bad_ap_info)
+{
+ wl_bad_ap_info_entry_t *entry;
+ unsigned long flags;
+
+ if (list_empty(&cfg->bad_ap_mngr.list)) {
+ return;
+ }
+
+ WL_CFG_BAM_LOCK(&cfg->bad_ap_mngr.lock, flags);
+ /* sort by timestamp */
+ list_sort(NULL, &cfg->bad_ap_mngr.list, wl_bad_ap_mngr_timecmp);
+
+ /* update entry with the latest bad ap information */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ entry = list_first_entry(&cfg->bad_ap_mngr.list, wl_bad_ap_info_entry_t, list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ if (entry != NULL) {
+ memcpy(&entry->bad_ap, bad_ap_info, sizeof(entry->bad_ap));
+ }
+ WL_CFG_BAM_UNLOCK(&cfg->bad_ap_mngr.lock, flags);
+}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+static inline int
+wl_bad_ap_mngr_fread_bad_ap_info(char *buf, int buf_len, wl_bad_ap_info_t *bad_ap)
+{
+ return snprintf(buf, buf_len, WL_BAD_AP_INFO_FMT,
+ bad_ap->bssid.octet[0], bad_ap->bssid.octet[1],
+ bad_ap->bssid.octet[2], bad_ap->bssid.octet[3],
+ bad_ap->bssid.octet[4], bad_ap->bssid.octet[5],
+ bad_ap->tm.tm_year + 1900, bad_ap->tm.tm_mon + 1, bad_ap->tm.tm_mday,
+ bad_ap->tm.tm_hour, bad_ap->tm.tm_min, bad_ap->tm.tm_sec,
+ bad_ap->status, bad_ap->reason, bad_ap->connect_count);
+}
+
+static int
+wl_bad_ap_mngr_fparse(struct bcm_cfg80211 *cfg, struct file *fp)
+{
+ int len;
+ int pos = 0;
+ char tmp[128];
+ int ret = BCME_ERROR;
+
+ wl_bad_ap_info_t bad_ap;
+ char *buf = NULL;
+
+ buf = MALLOCZ(cfg->osh, WL_BAD_AP_MAX_BUF_SIZE);
+ if (buf == NULL) {
+ WL_ERR(("%s: allocation for buf failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ ret = vfs_read(fp, buf, WL_BAD_AP_MAX_BUF_SIZE, &fp->f_pos);
+ if (ret < 0) {
+ WL_ERR(("%s: file read failed (%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ len = ret;
+ do {
+ ret = sscanf(&buf[pos], WL_BAD_AP_INFO_FMT,
+ (uint32 *)&bad_ap.bssid.octet[0], (uint32 *)&bad_ap.bssid.octet[1],
+ (uint32 *)&bad_ap.bssid.octet[2], (uint32 *)&bad_ap.bssid.octet[3],
+ (uint32 *)&bad_ap.bssid.octet[4], (uint32 *)&bad_ap.bssid.octet[5],
+ (long int *)&bad_ap.tm.tm_year, (uint32 *)&bad_ap.tm.tm_mon,
+ (uint32 *)&bad_ap.tm.tm_mday, (uint32 *)&bad_ap.tm.tm_hour,
+ (uint32 *)&bad_ap.tm.tm_min, (uint32 *)&bad_ap.tm.tm_sec,
+ (uint32 *)&bad_ap.status, (uint32 *)&bad_ap.reason,
+ (uint32 *)&bad_ap.connect_count);
+ if (ret != WL_BAD_AP_INFO_FMT_ITEM_CNT) {
+ WL_ERR(("%s: file parse failed(expected: %d actual: %d)\n",
+ __FUNCTION__, WL_BAD_AP_INFO_FMT_ITEM_CNT, ret));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+ /* convert struct tm format */
+ bad_ap.tm.tm_year -= 1900;
+ bad_ap.tm.tm_mon -= 1;
+
+ ret = wl_bad_ap_mngr_add(&cfg->bad_ap_mngr, &bad_ap);
+ if (ret < 0) {
+ WL_ERR(("%s: bad ap add failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ ret = wl_bad_ap_mngr_fread_bad_ap_info(tmp, sizeof(tmp), &bad_ap);
+ if (ret < 0) {
+ WL_ERR(("%s: wl_bad_ap_mngr_fread_bad_ap_info failed (%d)\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+
+ if (cfg->bad_ap_mngr.num >= WL_BAD_AP_MAX_ENTRY_NUM) {
+ break;
+ }
+
+ len -= ret;
+ pos += ret;
+ } while (len > 0);
+
+ ret = BCME_OK;
+
+fail:
+ if (buf) {
+ MFREE(cfg->osh, buf, WL_BAD_AP_MAX_BUF_SIZE);
+ }
+
+ return ret;
+}
+
+static int
+wl_bad_ap_mngr_fread(struct bcm_cfg80211 *cfg, const char *fname)
+{
+ int ret = BCME_ERROR;
+
+ mm_segment_t fs;
+ struct file *fp = NULL;
+
+ if (fname == NULL) {
+ WL_ERR(("%s: fname is NULL\n", __FUNCTION__));
+ return ret;
+ }
+ mutex_lock(&cfg->bad_ap_mngr.fs_lock);
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ fp = filp_open(fname, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ fp = NULL;
+ WL_ERR(("%s: file open failed(%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ if ((ret = wl_bad_ap_mngr_fparse(cfg, fp)) < 0) {
+ goto fail;
+ }
+fail:
+ if (fp) {
+ filp_close(fp, NULL);
+ }
+ set_fs(fs);
+
+ mutex_unlock(&cfg->bad_ap_mngr.fs_lock);
+
+ return ret;
+}
+
+static int
+wl_bad_ap_mngr_fwrite(struct bcm_cfg80211 *cfg, const char *fname)
+{
+ int ret = BCME_ERROR;
+
+ mm_segment_t fs;
+ struct file *fp = NULL;
+
+ int len = 0;
+ char tmp[WL_BAD_AP_MAX_BUF_SIZE];
+ wl_bad_ap_info_t *bad_ap;
+ wl_bad_ap_info_entry_t *entry;
+
+ if (list_empty(&cfg->bad_ap_mngr.list)) {
+ return BCME_ERROR;
+ }
+
+ if (fname == NULL) {
+ return BCME_NOTFOUND;
+ }
+
+ mutex_lock(&cfg->bad_ap_mngr.fs_lock);
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ fp = filp_open(fname, O_CREAT | O_RDWR | O_TRUNC, 0666);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ WL_ERR(("%s: file open failed(%d)\n", __FUNCTION__, ret));
+ fp = NULL;
+ goto fail;
+ }
+
+ memset(tmp, 0, sizeof(tmp));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(entry, &cfg->bad_ap_mngr.list, list) {
+ bad_ap = &entry->bad_ap;
+ ret = wl_bad_ap_mngr_fread_bad_ap_info(&tmp[len], sizeof(tmp) - len, bad_ap);
+ if (ret < 0) {
+ WL_ERR(("%s: snprintf failed(%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+
+ len += ret;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+ ret = vfs_write(fp, tmp, len, &fp->f_pos);
+ if (ret < 0) {
+ WL_ERR(("%s: file write failed(%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+ /* Sync file from filesystem to physical media */
+ ret = vfs_fsync(fp, 0);
+ if (ret < 0) {
+ WL_ERR(("%s: sync file failed(%d)\n", __FUNCTION__, ret));
+ goto fail;
+ }
+ ret = BCME_OK;
+fail:
+ if (fp) {
+ filp_close(fp, NULL);
+ }
+ set_fs(fs);
+ mutex_unlock(&cfg->bad_ap_mngr.fs_lock);
+
+ return ret;
+}
+#else
+extern wl_bad_ap_mngr_t *g_bad_ap_mngr;
+#endif /* DHD_ADPS_BAM_EXPORT */
+
+wl_bad_ap_info_entry_t*
+wl_bad_ap_mngr_find(wl_bad_ap_mngr_t *bad_ap_mngr, const struct ether_addr *bssid)
+{
+ wl_bad_ap_info_entry_t *entry;
+ unsigned long flags;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ spin_lock_irqsave(&bad_ap_mngr->lock, flags);
+ list_for_each_entry(entry, &bad_ap_mngr->list, list) {
+ if (!memcmp(&entry->bad_ap.bssid.octet, bssid->octet, ETHER_ADDR_LEN)) {
+ spin_unlock_irqrestore(&bad_ap_mngr->lock, flags);
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&bad_ap_mngr->lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ return NULL;
+}
+
+int
+wl_bad_ap_mngr_add(wl_bad_ap_mngr_t *bad_ap_mngr, wl_bad_ap_info_t *bad_ap_info)
+{
+ int ret;
+ wl_bad_ap_info_entry_t *entry;
+ unsigned long flags;
+
+ BCM_REFERENCE(entry);
+ BCM_REFERENCE(flags);
+
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ ret = wl_bad_ap_mngr_add_entry(bad_ap_mngr, bad_ap_info);
+#else
+ if (bad_ap_mngr->num == WL_BAD_AP_MAX_ENTRY_NUM) {
+ /* Remove the oldest entry if entry list is full */
+ spin_lock_irqsave(&bad_ap_mngr->lock, flags);
+ list_del(bad_ap_mngr->list.next);
+ bad_ap_mngr->num--;
+ spin_unlock_irqrestore(&bad_ap_mngr->lock, flags);
+ }
+
+ /* delete duplicated entry to update it at tail to keep the odrer */
+ entry = wl_bad_ap_mngr_find(bad_ap_mngr, &bad_ap_info->bssid);
+ if (entry != NULL) {
+ spin_lock_irqsave(&bad_ap_mngr->lock, flags);
+ list_del(&entry->list);
+ bad_ap_mngr->num--;
+ spin_unlock_irqrestore(&bad_ap_mngr->lock, flags);
+ }
+
+ ret = wl_bad_ap_mngr_add_entry(bad_ap_mngr, bad_ap_info);
+ if (ret < 0) {
+ WL_ERR(("%s - fail to add bad ap data(%d)\n", __FUNCTION__, ret));
+ return ret;
+ }
+#endif /* DHD_ADPS_BAM_EXPORT */
+ return ret;
+}
+
+void
+wl_bad_ap_mngr_deinit(struct bcm_cfg80211 *cfg)
+{
+ wl_bad_ap_info_entry_t *entry;
+ unsigned long flags;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ WL_CFG_BAM_LOCK(&cfg->bad_ap_mngr.lock, flags);
+ while (!list_empty(&cfg->bad_ap_mngr.list)) {
+ entry = list_entry(cfg->bad_ap_mngr.list.next, wl_bad_ap_info_entry_t, list);
+ if (entry) {
+ list_del(&cfg->bad_ap_mngr.list);
+ MFREE(cfg->osh, entry, sizeof(*entry));
+ }
+ }
+ WL_CFG_BAM_UNLOCK(&cfg->bad_ap_mngr.lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ mutex_destroy(&cfg->bad_ap_mngr.fs_lock);
+#endif /* !DHD_ADPS_BAM_EXPORT */
+}
+
+void
+wl_bad_ap_mngr_init(struct bcm_cfg80211 *cfg)
+{
+ cfg->bad_ap_mngr.osh = cfg->osh;
+ cfg->bad_ap_mngr.num = 0;
+
+ spin_lock_init(&cfg->bad_ap_mngr.lock);
+ INIT_LIST_HEAD(&cfg->bad_ap_mngr.list);
+
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ mutex_init(&cfg->bad_ap_mngr.fs_lock);
+#else
+ g_bad_ap_mngr = &cfg->bad_ap_mngr;
+#endif /* !DHD_ADPS_BAM_EXPORT */
+}
+
+static int
+wl_event_adps_bad_ap_mngr(struct bcm_cfg80211 *cfg, void *data)
+{
+ int ret = BCME_OK;
+
+ wl_event_adps_t *event_data = (wl_event_adps_t *)data;
+ wl_event_adps_bad_ap_t *bad_ap_data;
+
+ wl_bad_ap_info_entry_t *entry;
+ wl_bad_ap_info_t temp;
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ struct timespec ts;
+#endif /* !DHD_ADPS_BAM_EXPORT */
+
+ if (event_data->version != WL_EVENT_ADPS_VER_1) {
+ return BCME_VERSION;
+ }
+
+ if (event_data->length != (OFFSETOF(wl_event_adps_t, data) + sizeof(*bad_ap_data))) {
+ return BCME_ERROR;
+ }
+
+ BCM_REFERENCE(ret);
+ BCM_REFERENCE(entry);
+ bad_ap_data = (wl_event_adps_bad_ap_t *)event_data->data;
+
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ /* Update Bad AP list */
+ if (list_empty(&cfg->bad_ap_mngr.list)) {
+ wl_bad_ap_mngr_fread(cfg, WL_BAD_AP_INFO_FILE_PATH);
+ }
+
+ getnstimeofday(&ts);
+ entry = wl_bad_ap_mngr_find(&cfg->bad_ap_mngr, &bad_ap_data->ea);
+ if (entry != NULL) {
+ time_to_tm((ts.tv_sec - (sys_tz.tz_minuteswest * 60)), 0, &entry->bad_ap.tm);
+ entry->bad_ap.status = bad_ap_data->status;
+ entry->bad_ap.reason = bad_ap_data->reason;
+ entry->bad_ap.connect_count++;
+ }
+ else {
+ time_to_tm((ts.tv_sec - (sys_tz.tz_minuteswest * 60)), 0, &temp.tm);
+ temp.status = bad_ap_data->status;
+ temp.reason = bad_ap_data->reason;
+ temp.connect_count = 1;
+ memcpy(temp.bssid.octet, &bad_ap_data->ea.octet, ETHER_ADDR_LEN);
+
+ if (cfg->bad_ap_mngr.num < WL_BAD_AP_MAX_ENTRY_NUM) {
+ wl_bad_ap_mngr_add(&cfg->bad_ap_mngr, &temp);
+ }
+ else {
+ wl_bad_ap_mngr_update(cfg, &temp);
+ }
+ }
+
+ wl_bad_ap_mngr_fwrite(cfg, WL_BAD_AP_INFO_FILE_PATH);
+#else
+ memcpy(temp.bssid.octet, &bad_ap_data->ea.octet, ETHER_ADDR_LEN);
+ ret = wl_bad_ap_mngr_add(&cfg->bad_ap_mngr, &temp);
+#endif /* !DHD_ADPS_BAM_EXPORT */
+
+ return ret;
+}
+
+static int
+wl_adps_get_mode(struct net_device *ndev, uint8 band)
+{
+ int len;
+ int ret;
+
+ uint8 *pdata;
+ char buf[WLC_IOCTL_SMLEN];
+
+ bcm_iov_buf_t iov_buf;
+ bcm_iov_buf_t *resp;
+ wl_adps_params_v1_t *data = NULL;
+
+ memset(&iov_buf, 0, sizeof(iov_buf));
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(band);
+
+ iov_buf.version = WL_ADPS_IOV_VER;
+ iov_buf.len = sizeof(band);
+ iov_buf.id = WL_ADPS_IOV_MODE;
+ pdata = (uint8 *)iov_buf.data;
+ *pdata = band;
+
+ ret = wldev_iovar_getbuf(ndev, "adps", &iov_buf, len, buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ return ret;
+ }
+ resp = (bcm_iov_buf_t *)buf;
+ data = (wl_adps_params_v1_t *)resp->data;
+
+ return data->mode;
+}
+
+/*
+ * Return value:
+ * Disabled: 0
+ * Enabled: bitmap of WLC_BAND_2G or WLC_BAND_5G when ADPS is enabled at each BAND
+ *
+ */
+int
+wl_adps_enabled(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ uint8 i;
+ int mode;
+ int ret = 0;
+
+ for (i = 1; i <= MAX_BANDS; i++) {
+ mode = wl_adps_get_mode(ndev, i);
+ if (mode > 0) {
+ ret |= (1 << i);
+ }
+ }
+
+ return ret;
+}
+
+int
+wl_adps_set_suspend(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 suspend)
+{
+ int ret = BCME_OK;
+
+ int buf_len;
+ bcm_iov_buf_t *iov_buf = NULL;
+ wl_adps_suspend_v1_t *data = NULL;
+
+ buf_len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+ iov_buf = MALLOCZ(cfg->osh, buf_len);
+ if (iov_buf == NULL) {
+ WL_ERR(("%s - failed to alloc %d bytes for iov_buf\n",
+ __FUNCTION__, buf_len));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ iov_buf->version = WL_ADPS_IOV_VER;
+ iov_buf->len = sizeof(*data);
+ iov_buf->id = WL_ADPS_IOV_SUSPEND;
+
+ data = (wl_adps_suspend_v1_t *)iov_buf->data;
+ data->version = ADPS_SUB_IOV_VERSION_1;
+ data->length = sizeof(*data);
+ data->suspend = suspend;
+
+ ret = wldev_iovar_setbuf(ndev, "adps", (char *)iov_buf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ WL_ERR(("%s - adps suspend is not supported\n", __FUNCTION__));
+ ret = BCME_OK;
+ }
+ else {
+ WL_ERR(("%s - fail to set adps suspend %d (%d)\n",
+ __FUNCTION__, suspend, ret));
+ }
+ goto exit;
+ }
+ WL_INFORM_MEM(("[%s] Detect BAD AP and Suspend ADPS\n", ndev->name));
+exit:
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, buf_len);
+ }
+ return ret;
+}
+
+bool
+wl_adps_bad_ap_check(struct bcm_cfg80211 *cfg, const struct ether_addr *bssid)
+{
+#if !defined(DHD_ADPS_BAM_EXPORT)
+ /* Update Bad AP list */
+ if (list_empty(&cfg->bad_ap_mngr.list)) {
+ wl_bad_ap_mngr_fread(cfg, WL_BAD_AP_INFO_FILE_PATH);
+ }
+#endif /* DHD_ADPS_BAM_EXPORT */
+
+ if (wl_bad_ap_mngr_find(&cfg->bad_ap_mngr, bssid) != NULL)
+ return TRUE;
+
+ return FALSE;
+}
+
+s32
+wl_adps_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ int ret = BCME_OK;
+ wl_event_adps_t *event_data = (wl_event_adps_t *)data;
+
+ switch (event_data->type) {
+ case WL_E_TYPE_ADPS_BAD_AP:
+ ret = wl_event_adps_bad_ap_mngr(cfg, data);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/bcmdhd.101.10.361.x/wl_bigdata.c b/bcmdhd.101.10.361.x/wl_bigdata.c
new file mode 100755
index 0000000..fc77983
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_bigdata.c
@@ -0,0 +1,575 @@
+/*
+ * Bigdata logging and report. None EWP and Hang event.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#include <typedefs.h>
+#include <osl.h>
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <wl_cfg80211.h>
+#include <wldev_common.h>
+#include <bcmendian.h>
+#include <wlioctl.h>
+#include <dhd_linux_wq.h>
+#include <wl_bigdata.h>
+
+#define WL_AP_BIGDATA_LOG(args) WL_DBG(args)
+
+#define WLC_E_IS_ASSOC(e, r) \
+ (((e == WLC_E_ASSOC_IND) || (e == WLC_E_REASSOC_IND)) && r == DOT11_SC_SUCCESS)
+#define WLC_E_IS_DEAUTH(e) \
+ (e == WLC_E_DISASSOC_IND || e == WLC_E_DEAUTH_IND || e == WLC_E_DEAUTH)
+
+static void dump_ap_stadata(wl_ap_sta_data_t *ap_sta_data);
+static inline void copy_ap_stadata(wl_ap_sta_data_t *dest, wl_ap_sta_data_t *src);
+static void wg_rate_dot11mode(uint32 *rate, uint8 *channel, uint32 *mode_80211);
+static void wg_ht_mimo_ant(uint32 *nss, wl_rateset_args_t *rateset);
+static void wg_vht_mimo_ant(uint32 *nss, wl_rateset_args_t *rateset);
+#if defined(WL11AX)
+static void wg_he_mimo_ant(uint32 *nss, uint16 *mcsset);
+#endif /* WL11AX */
+static int wg_parse_ap_stadata(struct net_device *dev, struct ether_addr *sta_mac,
+ wl_ap_sta_data_t *ap_sta_data);
+
+static void
+dump_ap_stadata(wl_ap_sta_data_t *ap_sta_data)
+{
+ int i;
+
+ if (!ap_sta_data) {
+ WL_AP_BIGDATA_LOG(("ap_sta_data is NULL\n"));
+ return;
+ }
+
+ for (i = 0; i < MAX_STA_INFO_AP_CNT; i++) {
+ if (!ap_sta_data[i].is_empty) {
+ WL_AP_BIGDATA_LOG(("idx %d "MACDBG" dis %d empty %d\n",
+ i, MAC2STRDBG((char *)&ap_sta_data[i].mac),
+ ap_sta_data[i].disconnected, ap_sta_data[i].is_empty));
+ WL_AP_BIGDATA_LOG(("mode %d nss %d chanspec %d rssi %d "
+ "rate %d reason_code %d\n\n",
+ ap_sta_data[i].mode_80211,
+ ap_sta_data[i].nss, ap_sta_data[i].chanspec,
+ ap_sta_data[i].rssi, ap_sta_data[i].rate,
+ ap_sta_data[i].reason_code));
+ }
+ }
+}
+
+static inline void
+copy_ap_stadata(wl_ap_sta_data_t *dest, wl_ap_sta_data_t *src)
+{
+ memcpy(dest, src, sizeof(wl_ap_sta_data_t));
+ dest->is_empty = FALSE;
+ dest->disconnected = FALSE;
+ dest->reason_code = 0;
+}
+
+static void
+get_copy_ptr_stadata(struct ether_addr *sta_mac, wl_ap_sta_data_t *sta_data,
+ uint32 *sta_list_cnt, void **data)
+{
+ int i;
+ int discon_idx = -1;
+ int empty_idx = -1;
+
+ if (!sta_mac || !sta_data || !sta_list_cnt ||!data) {
+ WL_ERR(("sta_mac=%p sta_data=%p sta_lit_cnt=%p data=%p\n",
+ sta_mac, sta_data, sta_list_cnt, data));
+ return;
+ }
+
+ /* Find already existed sta */
+ for (i = 0; i < MAX_STA_INFO_AP_CNT; i++) {
+ if (!memcmp((char*)sta_mac, (char*)&sta_data[i].mac, ETHER_ADDR_LEN)) {
+ WL_AP_BIGDATA_LOG(("found existed "
+ "STA idx %d "MACDBG"\n",
+ i, MAC2STRDBG((char *)sta_mac)));
+ *data = (wl_ap_sta_data_t *)&sta_data[i];
+ return;
+ }
+
+ if (sta_data[i].disconnected && (discon_idx == -1)) {
+ discon_idx = i;
+ }
+
+ if (sta_data[i].is_empty && (empty_idx == -1)) {
+ empty_idx = i;
+ }
+ }
+
+ /* Buf is max */
+ if (*sta_list_cnt >= MAX_STA_INFO_AP_CNT) {
+ if (discon_idx != -1) {
+ WL_AP_BIGDATA_LOG(("delete disconnected "
+ "idx %d "MACDBG"\n",
+ discon_idx, MAC2STRDBG((char *)sta_mac)));
+ *data = (wl_ap_sta_data_t *)&sta_data[discon_idx];
+ return;
+ }
+ }
+
+ /* Buf is not max */
+ if (empty_idx != -1) {
+ (*sta_list_cnt)++;
+ WL_AP_BIGDATA_LOG(("empty idx %d \n", empty_idx));
+ *data = (wl_ap_sta_data_t *)&sta_data[empty_idx];
+ return;
+ }
+}
+
+static void
+wg_rate_dot11mode(uint32 *rate, uint8 *channel, uint32 *mode_80211)
+{
+ if (*rate <= DOT11_11B_MAX_RATE) {
+ /* 11b maximum rate is 11Mbps. 11b mode */
+ *mode_80211 = BIGDATA_DOT11_11B_MODE;
+ } else {
+ /* It's not HT Capable case. */
+ if (*channel > DOT11_2GHZ_MAX_CH_NUM) {
+ *mode_80211 = BIGDATA_DOT11_11A_MODE; /* 11a mode */
+ } else {
+ *mode_80211 = BIGDATA_DOT11_11G_MODE; /* 11g mode */
+ }
+ }
+}
+
+static void
+wg_ht_mimo_ant(uint32 *nss, wl_rateset_args_t *rateset)
+{
+ int i;
+
+ *nss = 0;
+ for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+ int8 bitmap = DOT11_HT_MCS_RATE_MASK;
+ if (i == MAX_STREAMS_SUPPORTED-1) {
+ bitmap = DOT11_RATE_MASK;
+ }
+ if (rateset->mcs[i] & bitmap) {
+ (*nss)++;
+ }
+ }
+}
+
+static void
+wg_vht_mimo_ant(uint32 *nss, wl_rateset_args_t *rateset)
+{
+ int i;
+ uint32 mcs_code;
+
+ *nss = 0;
+
+ for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+ mcs_code = VHT_MCS_MAP_TO_MCS_CODE(rateset->vht_mcs[i - 1]);
+ if (mcs_code != VHT_CAP_MCS_MAP_NONE) {
+ (*nss)++;
+ }
+ }
+}
+
+#if defined(WL11AX)
+static void
+wg_he_mimo_ant(uint32 *nss, uint16 *mcsset)
+{
+ int i;
+
+ *nss = 0;
+
+ for (i = 0; i <= HE_MCS_MAP_NSS_MAX; i++) {
+ if (mcsset[i]) {
+ (*nss)++;
+ }
+ }
+}
+#endif /* WL11AX */
+
+static int
+wg_parse_ap_stadata(struct net_device *dev, struct ether_addr *sta_mac,
+ wl_ap_sta_data_t *ap_sta_data)
+{
+ sta_info_v4_t *sta_v4 = NULL;
+ sta_info_v5_t *sta_v5 = NULL;
+ wl_rateset_args_t *rateset_adv;
+ int ret = BCME_OK;
+ char* ioctl_buf = NULL;
+#if defined(WL11AX)
+ struct wl_rateset_args_v2 *rateset_adv_v2;
+#endif
+
+ ioctl_buf = (char*)kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (ioctl_buf == NULL) {
+ WL_ERR(("failed to allocated ioctl_buf \n"));
+ return BCME_ERROR;
+ }
+
+ ret = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)sta_mac,
+ ETHER_ADDR_LEN, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+
+ if (ret < 0) {
+ WL_ERR(("sta_info err value :%d\n", ret));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ sta_v4 = (sta_info_v4_t *)ioctl_buf;
+ ap_sta_data->mac = *sta_mac;
+ ap_sta_data->rssi = 0;
+ ap_sta_data->mimo = 0;
+
+ rateset_adv = &sta_v4->rateset_adv;
+ ap_sta_data->chanspec = sta_v4->chanspec;
+
+ WL_AP_BIGDATA_LOG(("sta_info ver %d\n", sta_v4->ver));
+
+ if (sta_v4->ver == WL_STA_VER_5) {
+ sta_v5 = (sta_info_v5_t *)ioctl_buf;
+ ap_sta_data->chanspec = sta_v5->chanspec;
+ rateset_adv = &sta_v5->rateset_adv;
+ }
+
+ ap_sta_data->channel = wf_chspec_ctlchan(ap_sta_data->chanspec);
+ ap_sta_data->rate =
+ (sta_v4->rateset.rates[sta_v4->rateset.count - 1] & DOT11_RATE_MASK) / 2;
+
+ if (sta_v4->vht_flags) {
+ ap_sta_data->mode_80211 = BIGDATA_DOT11_11AC_MODE;
+ wg_vht_mimo_ant(&ap_sta_data->nss, rateset_adv);
+ } else if (sta_v4->ht_capabilities) {
+ ap_sta_data->mode_80211 = BIGDATA_DOT11_11N_MODE;
+ wg_ht_mimo_ant(&ap_sta_data->nss, rateset_adv);
+ } else {
+ wg_rate_dot11mode(&ap_sta_data->rate, &ap_sta_data->channel,
+ &ap_sta_data->mode_80211);
+ }
+
+#if defined(WL11AX)
+ ret = wldev_iovar_getbuf(dev, "rateset", NULL, 0, ioctl_buf,
+ sizeof(wl_rateset_args_v2_t), NULL);
+ if (ret < 0) {
+ WL_ERR(("get rateset failed = %d\n", ret));
+ } else {
+ rateset_adv_v2 = (wl_rateset_args_v2_t *)ioctl_buf;
+ WL_AP_BIGDATA_LOG(("rateset ver %d\n", rateset_adv_v2->version));
+
+ if (rateset_adv_v2->version == RATESET_ARGS_V2) {
+ rateset_adv_v2 = (wl_rateset_args_v2_t *)&sta_v4->rateset_adv;
+ if (sta_v4->ver == WL_STA_VER_5) {
+ rateset_adv_v2 = (wl_rateset_args_v2_t *)&sta_v5->rateset_adv;
+ }
+
+ if (rateset_adv_v2->he_mcs[0]) {
+ WL_AP_BIGDATA_LOG(("there is he mcs rate\n"));
+ ap_sta_data->mode_80211 = BIGDATA_DOT11_11AX_MODE;
+ wg_he_mimo_ant(&ap_sta_data->nss, &rateset_adv_v2->he_mcs[0]);
+ }
+ }
+ }
+#endif /* WL11AX */
+
+ if (ap_sta_data->nss) {
+ ap_sta_data->nss = ap_sta_data->nss - 1;
+ }
+
+done:
+ if (ioctl_buf) {
+ kfree(ioctl_buf);
+ }
+
+ return ret;
+}
+
+void
+wl_gather_ap_stadata(void *handle, void *event_info, u8 event)
+{
+ u32 event_type = 0;
+ u32 reason = 0;
+ u32 status = 0;
+ struct ether_addr sta_mac;
+ dhd_pub_t *dhdp;
+
+ struct net_device *dev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+ wl_event_msg_t *e;
+
+ wl_ap_sta_data_t *sta_data;
+ wl_ap_sta_data_t temp_sta_data;
+ void *data = NULL;
+ int i;
+ int ret;
+
+ ap_sta_wq_data_t *wq_event_data = event_info;
+ if (!wq_event_data) {
+ WL_ERR(("wq_event_data is NULL\n"));
+ return;
+ }
+
+ cfg = (struct bcm_cfg80211 *)wq_event_data->bcm_cfg;
+ if (!cfg || !cfg->ap_sta_info) {
+ WL_ERR(("cfg=%p ap_sta_info=%p\n", cfg, (cfg ? cfg->ap_sta_info : NULL)));
+ if (wq_event_data) {
+ kfree(wq_event_data);
+ }
+ return;
+ }
+
+ mutex_lock(&cfg->ap_sta_info->wq_data_sync);
+
+ dhdp = (dhd_pub_t *)cfg->pub;
+ e = &wq_event_data->e;
+ dev = (struct net_device *)wq_event_data->ndev;
+
+ if (!e || !dev) {
+ WL_ERR(("e=%p dev=%p\n", e, dev));
+ goto done;
+ }
+
+ if (!wl_get_drv_status(cfg, AP_CREATED, dev)) {
+ WL_ERR(("skip to gather data becasue interface is not available\n"));
+ goto done;
+ }
+
+ sta_data = cfg->ap_sta_info->ap_sta_data;
+
+ event_type = ntoh32(e->event_type);
+ reason = ntoh32(e->reason);
+ status = ntoh32(e->status);
+ sta_mac = e->addr;
+
+ if (!sta_data) {
+ WL_ERR(("sta_data is NULL\n"));
+ goto done;
+ }
+
+ WL_AP_BIGDATA_LOG((""MACDBG" event %d status %d reason %d\n",
+ MAC2STRDBG((char*)&sta_mac), event_type, status, reason));
+
+ if (WLC_E_IS_ASSOC(event_type, reason)) {
+ ret = wg_parse_ap_stadata(dev, &sta_mac, &temp_sta_data);
+ if (ret < 0) {
+ WL_AP_BIGDATA_LOG(("sta_info err value :%d\n", ret));
+ goto done;
+ }
+
+ if (cfg->ap_sta_info->sta_list_cnt == 0) {
+ copy_ap_stadata(&sta_data[0], &temp_sta_data);
+ cfg->ap_sta_info->sta_list_cnt++;
+ dump_ap_stadata(sta_data);
+ } else {
+ get_copy_ptr_stadata(&sta_mac, sta_data,
+ &cfg->ap_sta_info->sta_list_cnt, &data);
+ if (data != NULL) {
+ copy_ap_stadata((wl_ap_sta_data_t *)data, &temp_sta_data);
+ dump_ap_stadata(sta_data);
+ }
+ }
+ }
+
+ if (WLC_E_IS_DEAUTH(event_type)) {
+ /* Find already existed sta */
+ for (i = 0; i < MAX_STA_INFO_AP_CNT; i++) {
+ if (!sta_data[i].is_empty &&
+ !memcmp((char*)&sta_mac, (char*)&sta_data[i].mac, ETHER_ADDR_LEN)) {
+ WL_AP_BIGDATA_LOG(("found disconnected "
+ "STA idx %d "MACDBG"\n",
+ i, MAC2STRDBG((char *)&sta_mac)));
+ sta_data[i].is_empty = FALSE;
+ sta_data[i].disconnected = TRUE;
+ sta_data[i].reason_code = reason;
+ dump_ap_stadata(sta_data);
+ goto done;
+ }
+ }
+ }
+
+done:
+ if (wq_event_data) {
+ ASSERT(dhdp->osh);
+ MFREE(dhdp->osh, wq_event_data, sizeof(ap_sta_wq_data_t));
+ }
+ mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
+}
+
+int
+wl_attach_ap_stainfo(void *bcm_cfg)
+{
+ gfp_t kflags;
+ uint32 alloc_len;
+ wl_ap_sta_info_t *sta_info;
+ wl_ap_sta_data_t *sta_data = NULL;
+ int i;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)bcm_cfg;
+
+ if (!cfg) {
+ WL_ERR(("cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ alloc_len = sizeof(wl_ap_sta_info_t);
+ sta_info = (wl_ap_sta_info_t *)kzalloc(alloc_len, kflags);
+
+ if (unlikely(!sta_info)) {
+ WL_ERR(("could not allocate memory for - "
+ "wl_ap_sta_info_t\n"));
+ goto fail;
+ }
+ cfg->ap_sta_info = sta_info;
+
+ alloc_len = sizeof(wl_ap_sta_data_t) * MAX_STA_INFO_AP_CNT;
+ sta_data = (wl_ap_sta_data_t *)kzalloc(alloc_len, kflags);
+
+ if (unlikely(!sta_data)) {
+ WL_ERR(("could not allocate memory for - "
+ "wl_ap_sta_data_t\n"));
+ goto fail;
+ }
+
+ cfg->ap_sta_info->sta_list_cnt = 0;
+
+ for (i = 0; i < MAX_STA_INFO_AP_CNT; i++) {
+ sta_data[i].is_empty = TRUE;
+ memset(&sta_data[i].mac, 0, ETHER_ADDR_LEN);
+ }
+
+ cfg->ap_sta_info->ap_sta_data = sta_data;
+
+ mutex_init(&cfg->ap_sta_info->wq_data_sync);
+
+ WL_ERR(("attach success\n"));
+
+ return BCME_OK;
+
+fail:
+ if (sta_data) {
+ kfree(sta_data);
+ cfg->ap_sta_info->ap_sta_data = NULL;
+ }
+
+ if (sta_info) {
+ kfree(sta_info);
+ cfg->ap_sta_info = NULL;
+ }
+
+ return BCME_ERROR;
+}
+
+int
+wl_detach_ap_stainfo(void *bcm_cfg)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)bcm_cfg;
+
+ if (!cfg || !cfg->ap_sta_info) {
+ WL_ERR(("cfg=%p ap_sta_info=%p\n",
+ cfg, (cfg ? cfg->ap_sta_info : NULL)));
+ return -EINVAL;
+ }
+
+ if (cfg->ap_sta_info->ap_sta_data) {
+ kfree(cfg->ap_sta_info->ap_sta_data);
+ cfg->ap_sta_info->ap_sta_data = NULL;
+ }
+
+ mutex_destroy(&cfg->ap_sta_info->wq_data_sync);
+
+ kfree(cfg->ap_sta_info);
+ cfg->ap_sta_info = NULL;
+
+ WL_ERR(("detach success\n"));
+
+ return BCME_OK;
+}
+
+int
+wl_ap_stainfo_init(void *bcm_cfg)
+{
+ int i;
+ wl_ap_sta_data_t *sta_data;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)bcm_cfg;
+
+ if (!cfg || !cfg->ap_sta_info) {
+ WL_ERR(("cfg=%p ap_sta_info=%p\n",
+ cfg, (cfg ? cfg->ap_sta_info : NULL)));
+ return -EINVAL;
+ }
+
+ sta_data = cfg->ap_sta_info->ap_sta_data;
+ cfg->ap_sta_info->sta_list_cnt = 0;
+
+ if (!sta_data) {
+ WL_ERR(("ap_sta_data is NULL\n"));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_STA_INFO_AP_CNT; i++) {
+ sta_data[i].is_empty = TRUE;
+ memset(&sta_data[i].mac, 0, ETHER_ADDR_LEN);
+ }
+
+ return BCME_OK;
+}
+
+int
+wl_get_ap_stadata(void *bcm_cfg, struct ether_addr *sta_mac, void **data)
+{
+ int i;
+ wl_ap_sta_data_t *sta_data;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)bcm_cfg;
+
+ if (!cfg || !cfg->ap_sta_info) {
+ WL_ERR(("cfg=%p ap_sta_info=%p\n",
+ cfg, (cfg ? cfg->ap_sta_info : NULL)));
+ return -EINVAL;
+ }
+
+ if (!sta_mac || !data) {
+ WL_ERR(("sta_mac=%p data=%p\n", sta_mac, data));
+ return -EINVAL;
+ }
+
+ sta_data = cfg->ap_sta_info->ap_sta_data;
+
+ if (!sta_data) {
+ WL_ERR(("ap_sta_data is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* Find already existed sta */
+ for (i = 0; i < MAX_STA_INFO_AP_CNT; i++) {
+ if (!sta_data[i].is_empty) {
+ WL_AP_BIGDATA_LOG(("%d " MACDBG " " MACDBG "\n", i,
+ MAC2STRDBG((char *)sta_mac),
+ MAC2STRDBG((char*)&sta_data[i].mac)));
+
+ if (!memcmp(sta_mac, (char*)&sta_data[i].mac, ETHER_ADDR_LEN)) {
+ WL_AP_BIGDATA_LOG(("Found STA idx %d " MACDBG "\n",
+ i, MAC2STRDBG((char *)&sta_mac)));
+
+ *data = (wl_ap_sta_data_t*)&sta_data[i];
+ return BCME_OK;
+ }
+ }
+ }
+
+ return BCME_ERROR;
+}
diff --git a/bcmdhd.101.10.361.x/wl_cfg80211.c b/bcmdhd.101.10.361.x/wl_cfg80211.c
new file mode 100755
index 0000000..d8c2150
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfg80211.c
@@ -0,0 +1,22880 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#ifdef WL_WPS_SYNC
+#include <eapol.h>
+#endif /* WL_WPS_SYNC */
+#include <802.11.h>
+#include <bcmiov.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#if defined(CONFIG_TIZEN)
+#include <linux/net_stat_tizen.h>
+#endif /* CONFIG_TIZEN */
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <bcmevent.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgvif.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h>
+#ifdef WL_FILS
+#include <fils.h>
+#include <frag.h>
+#endif /* WL_FILS */
+
+#ifdef OEM_ANDROID
+#include <wl_android.h>
+#endif
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#include <wl_cfgvendor.h>
+#endif /* defined(BCMDONGLEHOST) */
+
+#ifdef CONFIG_SLEEP_MONITOR
+#include <linux/power/sleep_monitor.h>
+#endif
+
+#if !defined(WL_VENDOR_EXT_SUPPORT)
+#undef GSCAN_SUPPORT
+#endif
+#include <dhd_config.h>
+
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+
+#if defined(BIGDATA_SOFTAP) || defined(DHD_ENABLE_BIGDATA_LOGGING)
+#include <wl_bigdata.h>
+#endif /* BIGDATA_SOFTAP || DHD_ENABLE_BIGDATA_LOGGING */
+
+#ifdef DHD_EVENT_LOG_FILTER
+#include <dhd_event_log_filter.h>
+#endif /* DHD_EVENT_LOG_FILTER */
+#define BRCM_SAE_VENDOR_EVENT_BUF_LEN 500
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+#include <bcmtlv.h>
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+#include <linux/dev_ril_bridge.h>
+#include <linux/notifier.h>
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+#if (defined(WL_FW_OCE_AP_SELECT) || defined(BCMFW_ROAM_ENABLE)) && \
+ ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS))
+uint fw_ap_select = true;
+#else
+uint fw_ap_select = false;
+#endif /* WL_FW_OCE_AP_SELECT && (ROAM_ENABLE || BCMFW_ROAM_ENABLE) */
+module_param(fw_ap_select, uint, 0660);
+
+#if defined(WL_REASSOC)
+uint wl_reassoc_support = true;
+#else
+uint wl_reassoc_support = false;
+#endif /* WL_REASSOC */
+module_param(wl_reassoc_support, uint, 0660);
+
+static struct device *cfg80211_parent_dev = NULL;
+static struct bcm_cfg80211 *g_bcmcfg = NULL;
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define MAX_WAIT_TIME 1500
+#ifdef WLAIBSS_MCHAN
+#define IBSS_IF_NAME "ibss%d"
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef VSDB
+/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */
+#define DEFAULT_SLEEP_TIME_VSDB 120
+#define OFF_CHAN_TIME_THRESHOLD_MS 200
+#define AF_RETRY_DELAY_TIME 40
+
+/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg) \
+ do { \
+ if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) || \
+ wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) { \
+ OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB); \
+ } \
+ } while (0)
+#else /* VSDB */
+/* if not VSDB, do nothing */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
+#endif /* VSDB */
+
+#if !defined(BCMDONGLEHOST)
+#ifdef ntoh32
+#undef ntoh32
+#endif
+#ifdef ntoh16
+#undef ntoh16
+#endif
+#ifdef htod32
+#undef htod32
+#endif
+#ifdef htod16
+#undef htod16
+#endif
+#define ntoh32(i) (i)
+#define ntoh16(i) (i)
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define DNGL_FUNC(func, parameters)
+#else
+#define DNGL_FUNC(func, parameters) func parameters
+#define COEX_DHCP
+
+#endif /* defined(BCMDONGLEHOST) */
+
+#define WLAN_EID_SSID 0
+#define CH_MIN_5G_CHANNEL 34
+#ifdef WLAIBSS
+enum abiss_event_type {
+ AIBSS_EVENT_TXFAIL
+};
+#endif
+
+#ifdef WL_RELMCAST
+enum rmc_event_type {
+ RMC_EVENT_NONE,
+ RMC_EVENT_LEADER_CHECK_FAIL
+};
+#endif /* WL_RELMCAST */
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ *
+ * this definition reuires disabling missing-field-initializer warning
+ * as the ieee80211_regdomain definition differs in plain linux and in Android
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && \
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+#endif
+static const struct ieee80211_regdomain brcm_regdom = {
+#ifdef WL_6G_BAND
+ .n_reg_rules = 8,
+#else
+ .n_reg_rules = 4,
+#endif
+ .alpha2 = "99",
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+ /* If any */
+ /* IEEE 802.11 channel 14 - Only JP enables
+ * this and for 802.11b only
+ */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+ /* IEEE 802.11a, channel 36..64 */
+ REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ /* IEEE 802.11a, channel 100..165 */
+ REG_RULE(5470-10, 5850+10, 40, 6, 20, 0),
+#ifdef WL_6G_BAND
+ REG_RULE(6025-80, 6985+80, 160, 6, 20, 0),
+ REG_RULE(5935-10, 7115+10, 20, 6, 20, 0),
+ REG_RULE(5965-20, 7085+20, 40, 6, 20, 0),
+ REG_RULE(5985-40, 7025+40, 80, 6, 20, 0),
+#endif /* WL_6G_BAND */
+ }
+};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && \
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+ (defined(WL_IFACE_COMB_NUM_CHANNELS) || \
+ defined(WL_CFG80211_P2P_DEV_IF))
+static const struct ieee80211_iface_limit common_if_limits[] = {
+ {
+ /*
+ * Driver can support up to 2 AP's
+ */
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ /*
+ * During P2P-GO removal, P2P-GO is first changed to STA and later only
+ * removed. So setting maximum possible number of STA interfaces according
+ * to kernel version.
+ *
+ * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
+ * linux-3.8 and above - max:4
+ * sta + NAN NMI + NAN DPI open + NAN DPI sec (since there is no iface type
+ * for NAN defined, registering it as STA type)
+ */
+#ifdef WL_ENABLE_P2P_IF
+ .max = 5,
+#else
+ .max = 4,
+#endif /* WL_ENABLE_P2P_IF */
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+#define NUM_DIFF_CHANNELS 2
+
+static const struct ieee80211_iface_combination
+common_iface_combinations[] = {
+ {
+ .num_different_channels = NUM_DIFF_CHANNELS,
+ /*
+ * At Max 5 network interfaces can be registered concurrently
+ */
+ .max_interfaces = IFACE_MAX_CNT,
+ .limits = common_if_limits,
+ .n_limits = ARRAY_SIZE(common_if_limits),
+ },
+};
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+static const char *wl_if_state_strs[WL_IF_STATE_MAX + 1] = {
+ "WL_IF_CREATE_REQ",
+ "WL_IF_CREATE_DONE",
+ "WL_IF_DELETE_REQ",
+ "WL_IF_DELETE_DONE",
+ "WL_IF_CHANGE_REQ",
+ "WL_IF_CHANGE_DONE",
+ "WL_IF_STATE_MAX"
+};
+
+#ifdef WBTEXT
+typedef struct wl_wbtext_bssid {
+ struct ether_addr ea;
+ struct list_head list;
+} wl_wbtext_bssid_t;
+
+static void wl_cfg80211_wbtext_reset_conf(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static void wl_cfg80211_wbtext_update_rcc(struct bcm_cfg80211 *cfg, struct net_device *dev);
+static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea);
+static bool wl_cfg80211_wbtext_add_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea);
+static void wl_cfg80211_wbtext_clear_bssid_list(struct bcm_cfg80211 *cfg);
+static bool wl_cfg80211_wbtext_send_nbr_req(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile);
+static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile);
+static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev);
+static int wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, uint body_len);
+#endif /* WBTEXT */
+
+#ifdef RTT_SUPPORT
+static s32 wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* RTT_SUPPORT */
+#ifdef WL_CHAN_UTIL
+static s32 wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_cfg80211_start_bssload_report(struct net_device *ndev);
+#endif /* WL_CHAN_UTIL */
+
+/* SoftAP related parameters */
+#define DEFAULT_2G_SOFTAP_CHANNEL 1
+#define DEFAULT_2G_SOFTAP_CHANSPEC 0x1006
+#define DEFAULT_5G_SOFTAP_CHANNEL 149
+#define WL_MAX_NUM_CSA_COUNTERS 255
+
+#define MAX_VNDR_OUI_STR_LEN 256u
+#define VNDR_OUI_STR_LEN 10u
+#define DOT11_DISCONNECT_RC 2u
+static const uchar *exclude_vndr_oui_list[] = {
+ "\x00\x50\xf2", /* Microsoft */
+ "\x00\x00\xf0", /* Samsung Elec */
+ WFA_OUI, /* WFA */
+ NULL
+};
+
+typedef struct wl_vndr_oui_entry {
+ uchar oui[DOT11_OUI_LEN];
+ struct list_head list;
+} wl_vndr_oui_entry_t;
+
+#ifdef WL_ANALYTICS
+static const uchar disco_bcnloss_vsie[] = {
+ 0xdd, /* Vendor specific */
+ 0x09, /* Length */
+ 0x00, 0x00, 0xF0, /* OUI */
+ 0x22, /* VENDOR_ENTERPRISE_STA_OUI_TYPE */
+ 0x03, /* Sub type for additional rc */
+ 0x01, /* Version */
+ 0x02, /* Length */
+ 0x07, 0x00 /* Reason code for BCN loss */
+};
+#endif /* WL_ANALYTICS */
+
+static int wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, char *vndr_oui, u32 vndr_oui_len);
+static void wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg);
+#ifdef WL_ANALYTICS
+static bool wl_vndr_ies_find_vendor_oui(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, const char *vndr_oui);
+#endif
+static s32 wl_cfg80211_parse_vndr_ies(const u8 *parse, u32 len,
+ struct parsed_vndr_ies *vndr_ies);
+static bool wl_cfg80211_filter_vndr_ext_id(const vndr_ie_t *vndrie);
+#if defined(WL_FW_OCE_AP_SELECT)
+static bool
+wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+/* Check whether the given IE looks like WFA OCE IE. */
+#define wl_cfgoce_is_oce_ie(ie, tlvs, len) wl_cfgoce_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_MBO_OCE)
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif /* WL_FW_OCE_AP_SELECT */
+
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
+static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* WLAIBSS_MCHAN */
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, const u8 *mac,
+ struct station_info *sinfo);
+#else
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac,
+ struct station_info *sinfo);
+#endif
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev, bool enabled,
+ s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+static int wl_cfg80211_update_connect_params(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme, u32 changed);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) */
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, s32 mbm);
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, s32 dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+ struct wireless_dev *wdev, s32 *dbm);
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ void *cookie, void (*callback) (void *cookie,
+ struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx);
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ bcm_struct_cfgdev *cfgdev, u64 cookie);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+static s32 wl_cfg80211_del_station(
+ struct wiphy *wiphy, struct net_device *ndev,
+ struct station_del_parameters *params);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *ndev, const u8* mac_addr);
+#else
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *ndev, u8* mac_addr);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev, const u8 *mac, struct station_parameters *params);
+#else
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac, struct station_parameters *params);
+#endif
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 wl_cfg80211_update_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa, bool set);
+static void wl_cfg80211_spmk_pmkdb_change_pmk_type(struct bcm_cfg80211 *cfg,
+ pmkid_list_v3_t *pmk_list);
+static void wl_cfg80211_spmk_pmkdb_del_spmk(struct bcm_cfg80211 *cfg,
+ struct cfg80211_pmksa *pmksa);
+
+struct wireless_dev *
+wl_cfg80211_create_iface(struct wiphy *wiphy, wl_iftype_t
+ iface_type, u8 *mac_addr, const char *name);
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t iftype, s32 del, u8 *addr);
+s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t brcm_iftype, s32 del, u8 *addr);
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+static s32 wl_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
+chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#ifdef WLFBT
+static int wl_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_ft_ies_params *ftie);
+#endif /* WLFBT */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf);
+static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_event_handler(struct work_struct *work_data);
+static void wl_init_eq(struct bcm_cfg80211 *cfg);
+static void wl_flush_eq(struct bcm_cfg80211 *cfg);
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg);
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags);
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg);
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg);
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
+static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
+ const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct bcm_cfg80211 *cfg, struct wl_event_q *e);
+static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed);
+#ifdef DHD_LOSSLESS_ROAMING
+static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+#endif /* DHD_LOSSLESS_ROAMING */
+static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#ifdef BT_WIFI_HANDOVER
+static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef GSCAN_SUPPORT
+static s32 wl_handle_roam_exp_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+static s32 wl_handle_rssi_monitor_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* RSSI_MONITOR_SUPPORT */
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+ enum wl_status state, bool set);
+#ifdef CUSTOM_EVENT_PM_WAKE
+static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#if defined(DHD_LOSSLESS_ROAMING) || defined (DBG_PKT_MON)
+static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
+#ifdef DHD_LOSSLESS_ROAMING
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+#ifdef WL_SDO
+static s32 wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif
+
+#ifdef WL_MBO
+static s32
+wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_MBO */
+
+#ifdef WL_TWT
+static s32
+wl_notify_twt_event(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* WL_TWT */
+
+#ifdef WL_CLIENT_SAE
+static bool wl_is_pmkid_available(struct net_device *dev, const u8 *bssid);
+static s32 wl_notify_start_auth(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_cfg80211_external_auth(struct wiphy *wiphy,
+ struct net_device *dev, struct cfg80211_external_auth_params *ext_auth);
+static s32
+wl_cfg80211_mgmt_auth_tx(struct net_device *dev, bcm_struct_cfgdev *cfgdev,
+ struct bcm_cfg80211 *cfg, const u8 *buf, size_t len, s32 bssidx, u64 *cookie);
+#endif /* WL_CLIENT_SAE */
+
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * cfg profile utilities
+ */
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#ifdef WL_FILS
+static s32 wl_set_fils_params(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#endif
+
+#ifdef BCMWAPI_WPI
+static s32 wl_set_set_wapi_ie(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#endif
+
+#ifdef WL_GCMP
+static s32 wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask);
+#endif /* WL_GCMP */
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg);
+
+/*
+ * information element utilities
+ */
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
+
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(const bcm_tlv_t *wpa2ie, const u8** rsn_cap);
+#endif
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, dhd_pub_t *data);
+static void wl_free_wdev(struct bcm_cfg80211 *cfg);
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 11))
+static int
+#else
+static void
+#endif /* kernel version < 3.10.11 */
+wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool update_ssid);
+static void wl_cfg80211_work_handler(struct work_struct *work);
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr,
+ struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * bcm_cfg80211 memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg);
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+
+static void wl_link_up(struct bcm_cfg80211 *cfg);
+static s32 wl_handle_link_down(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as);
+static s32 wl_post_linkup_ops(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as);
+static void wl_link_down(struct bcm_cfg80211 *cfg);
+static s32 wl_config_infra(struct bcm_cfg80211 *cfg, struct net_device *ndev, u16 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+int wl_cfg80211_get_ioctl_version(void);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(const char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+netdev_tx_t dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+#ifdef ROAM_CHANNEL_CACHE
+int init_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver);
+#endif /* ROAM_CHANNEL_CACHE */
+
+#ifdef P2P_LISTEN_OFFLOADING
+s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef CUSTOMER_HW4_DEBUG
+extern bool wl_scan_timeout_dbg_enabled;
+#endif /* CUSTOMER_HW4_DEBUG */
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif /* PKT_FILTER_SUPPORT */
+
+static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const struct ether_addr *bssid);
+static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+
+#ifdef WL_SDO
+s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
+s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
+#define MAX_SDO_PROTO 5
+wl_sdo_proto_t wl_sdo_protos [] = {
+ { "all", SVC_RPOTYPE_ALL },
+ { "upnp", SVC_RPOTYPE_UPNP },
+ { "bonjour", SVC_RPOTYPE_BONJOUR },
+ { "wsd", SVC_RPOTYPE_WSD },
+ { "vendor", SVC_RPOTYPE_VENDOR },
+};
+#endif
+
+#ifdef WL_WPS_SYNC
+static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg);
+static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg);
+static void wl_wps_reauth_timeout(unsigned long data);
+static s32 wl_get_free_wps_inst(struct bcm_cfg80211 *cfg);
+static s32 wl_get_wps_inst_match(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static s32 wl_wps_session_add(struct net_device *ndev, u16 mode, u8 *peer_mac);
+static void wl_wps_session_del(struct net_device *ndev);
+s32 wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac);
+static void wl_wps_handle_ifdel(struct net_device *ndev);
+#endif /* WL_WPS_SYNC */
+
+#if defined(WL_FW_OCE_AP_SELECT)
+bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint);
+#endif /* WL_FW_OCE_AP_SELECT */
+
+#ifdef WL_BCNRECV
+static s32 wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CAC_TS
+static s32 wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_CAC_TS */
+
+#if defined(WL_MBO) || defined(WL_OCE)
+static s32 wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_MBO || WL_OCE */
+static void wl_cfg80211_handle_set_ssid_complete(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as,
+ const wl_event_msg_t *event, wl_assoc_state_t assoc_state);
+
+#if !defined(BCMDONGLEHOST)
+/* Wake lock are used in Android only, which is dongle based as of now */
+#define DHD_OS_WAKE_LOCK(pub)
+#define DHD_OS_WAKE_UNLOCK(pub)
+#define DHD_EVENT_WAKE_LOCK(pub)
+#define DHD_EVENT_WAKE_UNLOCK(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)
+#endif /* defined(BCMDONGLEHOST) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) || \
+ (defined(CONFIG_ARCH_MSM) && defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
+ cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+#else
+#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
+ cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) || \
+ defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE) || \
+ defined(WL_FILS) || defined(CONFIG_CFG80211_FILS_BKPORT)
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp) \
+ cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED);
+#else
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp) \
+ cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || \
+ * (CFG80211_CONNECT_TIMEOUT_REASON_CODE) ||
+ * WL_FILS || CONFIG_CFG80211_FILS_BKPORT
+ */
+#elif defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE)
+/* There are customer kernels with backported changes for
+ * connect timeout. CFG80211_CONNECT_TIMEOUT_REASON_CODE define
+ * is available for kernels < 4.7 in such cases.
+ */
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp) \
+ cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED);
+#else
+/* Kernels < 4.7 doesn't support cfg80211_connect_bss */
+#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp) \
+ cfg80211_connect_result(dev, bssid, req_ie, req_ie_len, resp_ie, \
+ resp_ie_len, status, gfp);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) */
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+
+#define WL_EIDX_INVALID 0xffff
+#define WL_SET_EIDX_IN_PROGRESS(cfg, id, type) \
+ { cfg->eidx.in_progress = id; \
+ cfg->eidx.event_type = type; }
+#define WL_CLR_EIDX_STATES(cfg) \
+ cfg->eidx.in_progress = WL_EIDX_INVALID;
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#ifdef PROP_TXSTATUS_VSDB
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+
+/* WAR: disable pm_bcnrx , scan_ps for BCM4354 WISOL module.
+* WISOL module have ANT_1 Rx sensitivity issue.
+*/
+#if defined(FORCE_DISABLE_SINGLECORE_SCAN)
+extern void dhd_force_disable_singlcore_scan(dhd_pub_t *dhd);
+#endif /* FORCE_DISABLE_SINGLECORE_SCAN */
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+struct chan_info {
+ int freq;
+ int chan_type;
+};
+#endif
+
+#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+ { \
+ .bitrate = RATE_TO_BASE100KBPS(_rateid), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate __wl_rates[] = {
+ RATETAB_ENT(DOT11_RATE_1M, 0),
+ RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(DOT11_RATE_6M, 0),
+ RATETAB_ENT(DOT11_RATE_9M, 0),
+ RATETAB_ENT(DOT11_RATE_12M, 0),
+ RATETAB_ENT(DOT11_RATE_18M, 0),
+ RATETAB_ENT(DOT11_RATE_24M, 0),
+ RATETAB_ENT(DOT11_RATE_36M, 0),
+ RATETAB_ENT(DOT11_RATE_48M, 0),
+ RATETAB_ENT(DOT11_RATE_54M, 0)
+};
+
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size 8
+#define wl_g_rates (__wl_rates + 0)
+#define wl_g_rates_size 12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(144, 0),
+ CHAN5G(149, 0), CHAN5G(153, 0),
+ CHAN5G(157, 0), CHAN5G(161, 0),
+ CHAN5G(165, 0),
+
+#if defined(WL_6G_BAND) && !defined(CFG80211_6G_SUPPORT)
+ /* 6GHz frequency starting 5935 */
+ CHAN6G_CHAN2(0), CHAN6G(1, 0),
+ CHAN6G(5, 0), CHAN6G(9, 0),
+ CHAN6G(13, 0), CHAN6G(17, 0),
+ CHAN6G(21, 0), CHAN6G(25, 0),
+ CHAN6G(29, 0), CHAN6G(33, 0),
+ CHAN6G(37, 0), CHAN6G(41, 0),
+ CHAN6G(45, 0), CHAN6G(49, 0),
+ CHAN6G(53, 0), CHAN6G(57, 0),
+ CHAN6G(61, 0), CHAN6G(65, 0),
+ CHAN6G(69, 0), CHAN6G(73, 0),
+ CHAN6G(77, 0), CHAN6G(81, 0),
+ CHAN6G(85, 0), CHAN6G(89, 0),
+ CHAN6G(93, 0), CHAN6G(97, 0),
+ CHAN6G(101, 0), CHAN6G(105, 0),
+ CHAN6G(109, 0), CHAN6G(113, 0),
+ CHAN6G(117, 0), CHAN6G(121, 0),
+ CHAN6G(125, 0), CHAN6G(129, 0),
+ CHAN6G(133, 0), CHAN6G(137, 0),
+ CHAN6G(141, 0), CHAN6G(145, 0),
+ CHAN6G(149, 0), CHAN6G(153, 0),
+ CHAN6G(157, 0), CHAN6G(161, 0),
+ CHAN6G(165, 0), CHAN6G(169, 0),
+ CHAN6G(173, 0), CHAN6G(177, 0),
+ CHAN6G(181, 0), CHAN6G(185, 0),
+ CHAN6G(189, 0), CHAN6G(193, 0),
+ CHAN6G(197, 0), CHAN6G(201, 0),
+ CHAN6G(205, 0), CHAN6G(209, 0),
+ CHAN6G(213, 0), CHAN6G(217, 0),
+ CHAN6G(221, 0), CHAN6G(225, 0),
+ CHAN6G(229, 0), CHAN6G(233, 0),
+
+ CHAN6G(3, 0), CHAN6G(11, 0),
+ CHAN6G(19, 0), CHAN6G(27, 0),
+ CHAN6G(35, 0), CHAN6G(43, 0),
+ CHAN6G(51, 0), CHAN6G(59, 0),
+ CHAN6G(67, 0), CHAN6G(75, 0),
+ CHAN6G(83, 0), CHAN6G(91, 0),
+ CHAN6G(99, 0), CHAN6G(107, 0),
+ CHAN6G(115, 0), CHAN6G(123, 0),
+ CHAN6G(131, 0), CHAN6G(139, 0),
+ CHAN6G(147, 0), CHAN6G(155, 0),
+ CHAN6G(163, 0), CHAN6G(171, 0),
+ CHAN6G(179, 0), CHAN6G(187, 0),
+ CHAN6G(195, 0), CHAN6G(203, 0),
+ CHAN6G(211, 0), CHAN6G(219, 0), CHAN6G(227, 0),
+
+ CHAN6G(7, 0), CHAN6G(23, 0),
+ CHAN6G(39, 0), CHAN6G(55, 0),
+ CHAN6G(71, 0), CHAN6G(87, 0),
+ CHAN6G(103, 0), CHAN6G(119, 0),
+ CHAN6G(135, 0), CHAN6G(151, 0),
+ CHAN6G(167, 0), CHAN6G(183, 0),
+ CHAN6G(199, 0), CHAN6G(215, 0),
+
+ CHAN6G(15, 0), CHAN6G(47, 0),
+ CHAN6G(79, 0), CHAN6G(111, 0),
+ CHAN6G(143, 0), CHAN6G(175, 0), CHAN6G(207, 0),
+#endif /* WL_6G_BAND && !CFG80211_6G_SUPPORT */
+};
+
+#ifdef CFG80211_6G_SUPPORT
+static struct ieee80211_channel __wl_6ghz_channels[] = {
+ CHAN6G_CHAN2(0), CHAN6G(1, 0),
+ CHAN6G(5, 0), CHAN6G(9, 0),
+ CHAN6G(13, 0), CHAN6G(17, 0),
+ CHAN6G(21, 0), CHAN6G(25, 0),
+ CHAN6G(29, 0), CHAN6G(33, 0),
+ CHAN6G(37, 0), CHAN6G(41, 0),
+ CHAN6G(45, 0), CHAN6G(49, 0),
+ CHAN6G(53, 0), CHAN6G(57, 0),
+ CHAN6G(61, 0), CHAN6G(65, 0),
+ CHAN6G(69, 0), CHAN6G(73, 0),
+ CHAN6G(77, 0), CHAN6G(81, 0),
+ CHAN6G(85, 0), CHAN6G(89, 0),
+ CHAN6G(93, 0), CHAN6G(97, 0),
+ CHAN6G(101, 0), CHAN6G(105, 0),
+ CHAN6G(109, 0), CHAN6G(113, 0),
+ CHAN6G(117, 0), CHAN6G(121, 0),
+ CHAN6G(125, 0), CHAN6G(129, 0),
+ CHAN6G(133, 0), CHAN6G(137, 0),
+ CHAN6G(141, 0), CHAN6G(145, 0),
+ CHAN6G(149, 0), CHAN6G(153, 0),
+ CHAN6G(157, 0), CHAN6G(161, 0),
+ CHAN6G(165, 0), CHAN6G(169, 0),
+ CHAN6G(173, 0), CHAN6G(177, 0),
+ CHAN6G(181, 0), CHAN6G(185, 0),
+ CHAN6G(189, 0), CHAN6G(193, 0),
+ CHAN6G(197, 0), CHAN6G(201, 0),
+ CHAN6G(205, 0), CHAN6G(209, 0),
+ CHAN6G(213, 0), CHAN6G(217, 0),
+ CHAN6G(221, 0), CHAN6G(225, 0),
+ CHAN6G(229, 0), CHAN6G(233, 0),
+};
+#endif /* CFG80211_6G_SUPPORT */
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+ .channels = __wl_2ghz_channels,
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = __wl_5ghz_a_channels,
+ .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size
+};
+
+#ifdef CFG80211_6G_SUPPORT
+static struct ieee80211_supported_band __wl_band_6ghz = {
+ .band = IEEE80211_BAND_6GHZ,
+ .channels = __wl_6ghz_channels,
+ .n_channels = ARRAY_SIZE(__wl_6ghz_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size
+};
+#endif /* CFG80211_6G_SUPPORT */
+
+static const u32 __wl_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+#ifdef MFP
+ /*
+ * Advertising AES_CMAC cipher suite to userspace would imply that we
+ * are supporting MFP. So advertise only when MFP support is enabled.
+ */
+ WLAN_CIPHER_SUITE_AES_CMAC,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
+#endif /* MFP */
+
+#ifdef BCMWAPI_WPI
+ WLAN_CIPHER_SUITE_SMS4,
+#endif
+
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ WLAN_CIPHER_SUITE_PMK,
+#endif /* WLAN_CIPHER_SUITE_PMK */
+#ifdef WL_GCMP
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+#endif /* WL_GCMP */
+};
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * The firmware code required for this feature to work is currently under
+ * BCMINTERNAL flag. In future if this is to enabled we need to bring the
+ * required firmware code out of the BCMINTERNAL flag.
+ */
+struct wl_dump_survey {
+ u32 obss;
+ u32 ibss;
+ u32 no_ctg;
+ u32 no_pckt;
+ u32 tx;
+ u32 idle;
+};
+#endif /* WL_SUPPORT_ACS */
+
+#ifdef WL_CFG80211_GON_COLLISION
+#define BLOCK_GON_REQ_MAX_NUM 5
+#endif /* WL_CFG80211_GON_COLLISION */
+
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+static int maxrxpktglom = 0;
+#endif
+
+/* IOCtl version read from targeted driver */
+int ioctl_version;
+
+typedef struct rsn_cipher_algo_entry {
+ u32 cipher_suite;
+ u32 wsec_algo;
+ u32 wsec_key_algo;
+} rsn_cipher_algo_entry_t;
+
+static const rsn_cipher_algo_entry_t rsn_cipher_algo_lookup_tbl[] = {
+ {WLAN_CIPHER_SUITE_WEP40, WEP_ENABLED, CRYPTO_ALGO_WEP1},
+ {WLAN_CIPHER_SUITE_WEP104, WEP_ENABLED, CRYPTO_ALGO_WEP128},
+ {WLAN_CIPHER_SUITE_TKIP, TKIP_ENABLED, CRYPTO_ALGO_TKIP},
+ {WLAN_CIPHER_SUITE_CCMP, AES_ENABLED, CRYPTO_ALGO_AES_CCM},
+ {WLAN_CIPHER_SUITE_AES_CMAC, AES_ENABLED, CRYPTO_ALGO_BIP},
+
+#ifdef BCMWAPI_WPI
+ {WLAN_CIPHER_SUITE_SMS4, SMS4_ENABLED, CRYPTO_ALGO_SMS4},
+#endif /* BCMWAPI_WPI */
+
+#ifdef WL_GCMP
+ {WLAN_CIPHER_SUITE_GCMP, AES_ENABLED, CRYPTO_ALGO_AES_GCM},
+ {WLAN_CIPHER_SUITE_GCMP_256, AES_ENABLED, CRYPTO_ALGO_AES_GCM256},
+ {WLAN_CIPHER_SUITE_BIP_GMAC_128, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC},
+ {WLAN_CIPHER_SUITE_BIP_GMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC256},
+#endif /* WL_GCMP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ {WLAN_CIPHER_SUITE_BIP_CMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_CMAC256},
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
+};
+
+typedef struct rsn_akm_wpa_auth_entry {
+ u32 akm_suite;
+ u32 wpa_auth;
+} rsn_akm_wpa_auth_entry_t;
+
+static const rsn_akm_wpa_auth_entry_t rsn_akm_wpa_auth_lookup_tbl[] = {
+#ifdef WL_OWE
+ {WLAN_AKM_SUITE_OWE, WPA3_AUTH_OWE},
+#endif /* WL_OWE */
+ {WLAN_AKM_SUITE_8021X, WPA2_AUTH_UNSPECIFIED},
+ {WL_AKM_SUITE_SHA256_1X, WPA2_AUTH_1X_SHA256},
+ {WL_AKM_SUITE_SHA256_PSK, WPA2_AUTH_PSK_SHA256},
+ {WLAN_AKM_SUITE_PSK, WPA2_AUTH_PSK},
+ {WLAN_AKM_SUITE_FT_8021X, WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_FT_PSK, WPA2_AUTH_PSK | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_FILS_SHA256, WPA2_AUTH_FILS_SHA256},
+ {WLAN_AKM_SUITE_FILS_SHA384, WPA2_AUTH_FILS_SHA384},
+ {WLAN_AKM_SUITE_8021X_SUITE_B, WPA3_AUTH_1X_SUITE_B_SHA256},
+ {WLAN_AKM_SUITE_8021X_SUITE_B_192, WPA3_AUTH_1X_SUITE_B_SHA384},
+
+#ifdef BCMWAPI_WPI
+ {WLAN_AKM_SUITE_WAPI_CERT, WAPI_AUTH_UNSPECIFIED},
+ {WLAN_AKM_SUITE_WAPI_PSK, WAPI_AUTH_PSK},
+#endif /* BCMWAPI_WPI */
+
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+ {WLAN_AKM_SUITE_SAE, WPA3_AUTH_SAE_PSK},
+#endif /* WL_SAE || WL_CLIENT_SAE */
+#ifdef WL_SAE_FT
+ {WLAN_AKM_SUITE_FT_OVER_SAE, WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT},
+#endif /* WL_SAE_FT */
+ {WLAN_AKM_SUITE_DPP, WPA3_AUTH_DPP_AKM},
+ {WLAN_AKM_SUITE_FT_8021X_SHA384, WPA3_AUTH_1X_SUITE_B_SHA384 | WPA2_AUTH_FT}
+};
+
+#define BUFSZ 8
+#define BUFSZN BUFSZ + 1
+
+#define _S(x) #x
+#define S(x) _S(x)
+
+#define SOFT_AP_IF_NAME "swlan0"
+
+/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */
+uint32 fw_assoc_watchdog_ms = 0;
+bool fw_assoc_watchdog_started = 0;
+#define FW_ASSOC_WATCHDOG_TIME 10 * 1000 /* msec */
+
+int wl_channel_to_frequency(u32 chan, chanspec_band_t band)
+{
+ if (chan == 0) {
+ return 0; /* not supported */
+ }
+ switch (band) {
+ case WL_CHANSPEC_BAND_2G:
+ if (chan == 14)
+ return 2484;
+ else if (chan < 14)
+ return 2407 + chan * 5;
+ break;
+ case WL_CHANSPEC_BAND_5G:
+ if (chan >= 182 && chan <= 196)
+ return 4000 + chan * 5;
+ else
+ return 5000 + chan * 5;
+ break;
+#ifdef WL_6G_BAND
+ case WL_CHANSPEC_BAND_6G:
+ if (chan == 2) {
+ /* Specific handling for channel 2 in 6G */
+ return 5935;
+ }
+ return 5950 + chan * 5;
+ break;
+#endif /* WL_6G_BAND */
+ default:
+ WL_ERR(("Invalid Frequency Band\n"));
+ }
+ return 0; /* not supported */
+}
+
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg,
+ enum wl_pm_workq_act_type type)
+{
+ u16 wq_duration = 0;
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ dhd_pub_t *dhd = NULL;
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ if (cfg == NULL)
+ return;
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ mutex_lock(&cfg->pm_sync);
+ /*
+ * Make cancel and schedule work part mutually exclusive
+ * so that while cancelling, we are sure that there is no
+ * work getting scheduled.
+ */
+ if (delayed_work_pending(&cfg->pm_enable_work)) {
+ cancel_delayed_work(&cfg->pm_enable_work);
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_PM_WAKE_UNLOCK(cfg->pub);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ }
+
+ if (type == WL_PM_WORKQ_SHORT) {
+ wq_duration = WL_PM_ENABLE_TIMEOUT;
+ } else if (type == WL_PM_WORKQ_LONG) {
+ wq_duration = (WL_PM_ENABLE_TIMEOUT*2);
+ }
+
+ /* It should schedule work item only if driver is up */
+ if (wq_duration) {
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ if (dhd->up)
+#endif /* defined(BCMDONGLEHOST) && defined(OEM_ANDROID) */
+ {
+ if (schedule_delayed_work(&cfg->pm_enable_work,
+ msecs_to_jiffies((const unsigned int)wq_duration))) {
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_PM_WAKE_LOCK_TIMEOUT(cfg->pub, wq_duration);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+ } else {
+ WL_ERR(("Can't schedule pm work handler\n"));
+ }
+ }
+ }
+ mutex_unlock(&cfg->pm_sync);
+}
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+ chanspec_t chspec;
+
+ /* get the channel number */
+ chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+ /* convert the band */
+ if (LCHSPEC_IS2G(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chspec |= WL_CHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (LCHSPEC_IS20(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BW_20;
+ } else {
+ chspec |= WL_CHANSPEC_BW_40;
+ if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+ chspec |= WL_CHANSPEC_CTL_SB_L;
+ } else {
+ chspec |= WL_CHANSPEC_CTL_SB_U;
+ }
+ }
+
+ if (wf_chspec_malformed(chspec)) {
+ WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec));
+ return INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+ chanspec_t lchspec;
+
+ if (wf_chspec_malformed(chspec)) {
+ WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec));
+ return INVCHANSPEC;
+ }
+
+ /* get the channel number */
+ lchspec = CHSPEC_CHANNEL(chspec);
+
+ /* convert the band */
+ if (CHSPEC_IS2G(chspec)) {
+ lchspec |= WL_LCHANSPEC_BAND_2G;
+ } else {
+ lchspec |= WL_LCHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (CHSPEC_IS20(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_20;
+ lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+ } else if (CHSPEC_IS40(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_40;
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+ lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+ } else {
+ lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+ }
+ } else {
+ /* cannot express the bandwidth */
+ char chanbuf[CHANSPEC_STR_LEN];
+ WL_ERR((
+ "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec));
+ return INVCHANSPEC;
+ }
+
+ return lchspec;
+}
+
+bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg)
+{
+ return cfg->hal_started;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+ if (ioctl_version == 1) {
+ chanspec = wl_chspec_to_legacy(chanspec);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
+ }
+ chanspec = htodchanspec(chanspec);
+
+ return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(u16 channel)
+{
+ chanspec_t chanspec;
+ chanspec_band_t band;
+
+ band = WL_CHANNEL_BAND(channel);
+
+ chanspec = wf_create_20MHz_chspec(channel, band);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
+
+ return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+ chanspec = dtohchanspec(chanspec);
+ if (ioctl_version == 1) {
+ chanspec = wl_chspec_from_legacy(chanspec);
+ }
+
+ return chanspec;
+}
+
+/*
+ * convert ASCII string to MAC address (colon-delimited format)
+ * eg: 00:11:22:33:44:55
+ */
+int
+wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n)
+{
+ char *c = NULL;
+ int count = 0;
+
+ bzero(n, ETHER_ADDR_LEN);
+ for (;;) {
+ n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
+ if (!*c++ || count == ETHER_ADDR_LEN)
+ break;
+ a = c;
+ }
+ return (count == ETHER_ADDR_LEN);
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+#ifdef WLMESH_CFG80211
+ [NL80211_IFTYPE_MESH_POINT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4)
+ },
+#endif /* WLMESH_CFG80211 */
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+#ifdef WL_CLIENT_SAE
+ | BIT(IEEE80211_STYPE_AUTH >> 4)
+#endif /* WL_CLIENT_SAE */
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ [NL80211_IFTYPE_P2P_DEVICE] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+#endif /* WL_CFG80211_P2P_DEV_IF */
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+#if defined(WL_FW_OCE_AP_SELECT)
+bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint)
+{
+ const u8 *parse = NULL;
+ bcm_tlv_t *ie;
+ const struct cfg80211_bss_ies *ies;
+ u32 len;
+ struct cfg80211_bss *bss;
+
+ bss = CFG80211_GET_BSS(wiphy, NULL, bssid_hint, 0, 0);
+ if (!bss) {
+ WL_ERR(("Unable to find AP in the cache"));
+ return false;
+ }
+
+ if (rcu_access_pointer(bss->ies)) {
+ ies = rcu_access_pointer(bss->ies);
+ parse = ies->data;
+ len = ies->len;
+ } else {
+ WL_ERR(("ies is NULL"));
+ return false;
+ }
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgoce_is_oce_ie((const uint8*)ie, (u8 const **)&parse, &len) == TRUE) {
+ return true;
+ } else {
+ ie = bcm_next_tlv((const bcm_tlv_t*) ie, &len);
+ if (!ie) {
+ return false;
+ }
+ parse = (uint8 *)ie;
+ WL_DBG(("NON OCE IE. next ie ptr:%p", parse));
+ }
+ }
+ WL_DBG(("OCE IE NOT found"));
+ return false;
+}
+#endif /* WL_FW_OCE_AP_SELECT */
+
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
+static void
+wl_validate_wps_ie(const char *wps_ie, s32 wps_ie_len, bool *pbc)
+{
+ #define WPS_IE_FIXED_LEN 6
+ s16 len;
+ const u8 *subel = NULL;
+ u16 subelt_id;
+ u16 subelt_len;
+ u16 val;
+ u8 *valptr = (uint8*) &val;
+ if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) {
+ WL_ERR(("invalid argument : NULL\n"));
+ return;
+ }
+ len = (s16)wps_ie[TLV_LEN_OFF];
+
+ if (len > wps_ie_len) {
+ WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len));
+ return;
+ }
+ WL_DBG(("wps_ie len=%d\n", len));
+ len -= 4; /* for the WPS IE's OUI, oui_type fields */
+ subel = wps_ie + WPS_IE_FIXED_LEN;
+ while (len >= 4) { /* must have attr id, attr len fields */
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_id = HTON16(val);
+
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_len = HTON16(val);
+
+ len -= 4; /* for the attr id, attr len fields */
+ len -= (s16)subelt_len; /* for the remaining fields in this attribute */
+ if (len < 0) {
+ break;
+ }
+ WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+ subel, subelt_id, subelt_len));
+
+ if (subelt_id == WPS_ID_VERSION) {
+ WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_REQ_TYPE) {
+ WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_DEVICE_NAME) {
+ char devname[33];
+ int namelen = MIN(subelt_len, (sizeof(devname) - 1));
+
+ if (namelen) {
+ memcpy(devname, subel, namelen);
+ devname[namelen] = '\0';
+ /* Printing len as rx'ed in the IE */
+ WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+ devname, subelt_len));
+ }
+ } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+ *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+ } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+ ": cat=%u\n", HTON16(val)));
+ } else {
+ WL_DBG((" unknown attr 0x%x\n", subelt_id));
+ }
+
+ subel += subelt_len;
+ }
+}
+
+s32 wl_set_tx_power(struct net_device *dev,
+ enum nl80211_tx_power_setting type, s32 dbm)
+{
+ s32 err = 0;
+ s32 disable = 0;
+ s32 txpwrqdbm;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = WL_RADIO_SW_DISABLE << 16;
+ disable = htod32(disable);
+ err = wldev_ioctl_set(dev, WLC_SET_RADIO, &disable, sizeof(disable));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+ return err;
+ }
+
+ if (dbm > 0xffff)
+ dbm = 0xffff;
+ txpwrqdbm = dbm * 4;
+#ifdef SUPPORT_WL_TXPOWER
+ if (type == NL80211_TX_POWER_AUTOMATIC)
+ txpwrqdbm = 127;
+ else
+ txpwrqdbm |= WL_TXPWR_OVERRIDE;
+#endif /* SUPPORT_WL_TXPOWER */
+ err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
+ sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
+ if (unlikely(err))
+ WL_ERR(("qtxpower error (%d)\n", err));
+ else
+ WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm));
+
+ return err;
+}
+
+s32 wl_get_tx_power(struct net_device *dev, s32 *dbm)
+{
+ s32 err = 0;
+ s32 txpwrdbm;
+ char ioctl_buf[WLC_IOCTL_SMLEN];
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
+ NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, 0, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ memcpy(&txpwrdbm, ioctl_buf, sizeof(txpwrdbm));
+ txpwrdbm = dtoh32(txpwrdbm);
+ *dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+ WL_DBG(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+
+ return err;
+}
+
+chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+ chanspec_t chspec;
+ int cur_band, err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ struct ether_addr bssid;
+ wl_bss_info_t *bss = NULL;
+ u16 channel = WL_P2P_TEMP_CHAN;
+ char *buf;
+
+ bzero(&bssid, sizeof(bssid));
+ if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, sizeof(bssid)))) {
+ /* STA interface is not associated. So start the new interface on a temp
+ * channel . Later proper channel will be applied by the above framework
+ * via set_channel (cfg80211 API).
+ */
+ WL_DBG(("Not associated. Return a temp channel. \n"));
+ cur_band = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_BAND, &cur_band, sizeof(int));
+ if (unlikely(err)) {
+ WL_ERR(("Get band failed\n"));
+ } else if (cur_band == WLC_BAND_5G || cur_band == WLC_BAND_6G) {
+ channel = WL_P2P_TEMP_CHAN_5G;
+ }
+ return wl_ch_host_to_driver(channel);
+ }
+
+ buf = (char *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
+ if (!buf) {
+ WL_ERR(("buf alloc failed. use temp channel\n"));
+ return wl_ch_host_to_driver(channel);
+ }
+
+ *(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+ if ((err = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, buf,
+ WL_EXTRA_BUF_MAX))) {
+ WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+ chspec = wl_ch_host_to_driver(channel);
+ }
+ else {
+ bss = (wl_bss_info_t *) (buf + 4);
+ chspec = bss->chanspec;
+#ifdef WL_6G_BAND
+ /* Avoid p2p bring up in 6G based on bssinfo */
+ if (CHSPEC_IS6G(chspec)) {
+ channel = WL_P2P_TEMP_CHAN_5G;
+ chspec = wl_ch_host_to_driver(channel);
+ }
+#endif /* WL_6G_BAND */
+ WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
+ }
+
+ MFREE(cfg->osh, buf, WL_EXTRA_BUF_MAX);
+ return chspec;
+}
+
+void
+wl_wlfc_enable(struct bcm_cfg80211 *cfg, bool enable)
+{
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ bool wlfc_enabled = FALSE;
+ s32 err, up =1;
+ dhd_pub_t *dhd;
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (!dhd) {
+ return;
+ }
+
+ if (enable) {
+ if (!cfg->wlfc_on && !disable_proptx) {
+ dhd_wlfc_get_enable(dhd, &wlfc_enabled);
+ if (!wlfc_enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+ dhd_wlfc_init(dhd);
+ err = wldev_ioctl_set(primary_ndev, WLC_UP, &up, sizeof(s32));
+ if (err < 0)
+ WL_ERR(("WLC_UP return err:%d\n", err));
+ }
+ cfg->wlfc_on = true;
+ WL_DBG(("wlfc_on:%d \n", cfg->wlfc_on));
+ }
+ } else if (dhd->conf->disable_proptx != 0){
+ dhd_wlfc_deinit(dhd);
+ cfg->wlfc_on = false;
+ }
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+}
+
+struct wireless_dev *
+wl_cfg80211_p2p_if_add(struct bcm_cfg80211 *cfg,
+ wl_iftype_t wl_iftype,
+ char const *name, u8 *mac_addr, s32 *ret_err)
+{
+ u16 chspec;
+ s16 cfg_type;
+ long timeout;
+ s32 err;
+ u16 p2p_iftype;
+ int dhd_mode;
+ struct net_device *new_ndev = NULL;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct ether_addr *p2p_addr;
+
+ *ret_err = BCME_OK;
+ if (!cfg->p2p) {
+ WL_ERR(("p2p not initialized\n"));
+ return NULL;
+ }
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ /* Handle Dedicated P2P discovery Interface */
+ return wl_cfgp2p_add_p2p_disc_if(cfg);
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ if (wl_iftype == WL_IF_TYPE_P2P_GO) {
+ p2p_iftype = WL_P2P_IF_GO;
+ } else {
+ p2p_iftype = WL_P2P_IF_CLIENT;
+ }
+
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (wl_iftype == WL_IF_TYPE_P2P_GO)) {
+ WL_ERR(("FW does not support multiple GO\n"));
+ *ret_err = -ENOTSUPP;
+ return NULL;
+ }
+ if (!cfg->p2p->on) {
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ wl_cfgp2p_init_discovery(cfg);
+ }
+
+ strlcpy(cfg->p2p->vir_ifname, name, sizeof(cfg->p2p->vir_ifname));
+ /* In concurrency case, STA may be already associated in a particular channel.
+ * so retrieve the current channel of primary interface and then start the virtual
+ * interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "cfg p2p_ifadd"
+ */
+ wl_set_p2p_status(cfg, IF_ADDING);
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ cfg_type = wl_cfgp2p_get_conn_idx(cfg);
+ if (cfg_type < BCME_OK) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR(("Failed to get connection idx for p2p interface"
+ ", error code = %d\n", cfg_type));
+ return NULL;
+ }
+
+ p2p_addr = wl_to_p2p_bss_macaddr(cfg, cfg_type);
+ memcpy(p2p_addr->octet, mac_addr, ETH_ALEN);
+
+ err = wl_cfgp2p_ifadd(cfg, p2p_addr,
+ htod32(p2p_iftype), chspec);
+ if (unlikely(err)) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual iface add failed (%d) \n", err));
+ return NULL;
+ }
+
+ /* Wait for WLC_E_IF event with IF_ADD opcode */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+ wl_if_event_info *event = &cfg->if_event_info;
+ new_ndev = wl_cfg80211_post_ifcreate(bcmcfg_to_prmry_ndev(cfg), event,
+ event->mac, cfg->p2p->vir_ifname, false);
+ if (unlikely(!new_ndev)) {
+ goto fail;
+ }
+
+ if (wl_iftype == WL_IF_TYPE_P2P_GO) {
+ cfg->p2p->p2p_go_count++;
+ }
+ /* Fill p2p specific data */
+ wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
+
+ WL_ERR((" virtual interface(%s) is "
+ "created net attach done\n", cfg->p2p->vir_ifname));
+#if defined(BCMDONGLEHOST)
+ dhd_mode = (wl_iftype == WL_IF_TYPE_P2P_GC) ?
+ DHD_FLAG_P2P_GC_MODE : DHD_FLAG_P2P_GO_MODE;
+ DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+#endif /* defined(BCMDONGLEHOST) */
+ /* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ INIT_COMPLETION(cfg->iface_disable);
+#else
+ init_completion(&cfg->iface_disable);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+
+ return new_ndev->ieee80211_ptr;
+ }
+
+fail:
+ return NULL;
+}
+
+void
+wl_cfg80211_iface_state_ops(struct wireless_dev *wdev,
+ wl_interface_state_t state,
+ wl_iftype_t wl_iftype, u16 wl_mode)
+{
+ struct net_device *ndev;
+ struct bcm_cfg80211 *cfg;
+ dhd_pub_t *dhd;
+ s32 bssidx;
+
+ WL_DBG(("state:%s wl_iftype:%d mode:%d\n",
+ wl_if_state_strs[state], wl_iftype, wl_mode));
+ if (!wdev) {
+ WL_ERR(("wdev null\n"));
+ return;
+ }
+
+ if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_NAN_NMI)) {
+ /* P2P discovery is a netless device and uses a
+ * hidden bsscfg interface in fw. Don't apply the
+ * iface ops state changes for p2p discovery I/F.
+ * NAN NMI is netless device and uses a hidden bsscfg interface in fw.
+ * Don't apply iface ops state changes for NMI I/F.
+ */
+ return;
+ }
+
+ cfg = wiphy_priv(wdev->wiphy);
+ ndev = wdev->netdev;
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
+ if (!ndev || (bssidx < 0)) {
+ WL_ERR(("ndev null. skip iface state ops\n"));
+ return;
+ }
+
+ switch (state) {
+ case WL_IF_CREATE_REQ:
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_CONCURRENCY);
+#endif /* WL_BCNRECV */
+ wl_cfgscan_cancel_scan(cfg);
+ wl_wlfc_enable(cfg, true);
+#ifdef WLTDLS
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
+#endif /* WLTDLS */
+ break;
+ case WL_IF_DELETE_REQ:
+#ifdef WL_WPS_SYNC
+ wl_wps_handle_ifdel(ndev);
+#endif /* WPS_SYNC */
+ if (wl_get_drv_status(cfg, SCANNING, ndev)) {
+ /* Send completion for any pending scans */
+ wl_cfgscan_cancel_scan(cfg);
+ }
+
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+ if (!(dhd->chan_isvht80)) {
+ dhd_set_cpucore(dhd, FALSE);
+ }
+#endif /* CUSTOM_SET_CPUCORE */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+ break;
+ case WL_IF_CREATE_DONE:
+ if (wl_mode == WL_MODE_BSS) {
+ /* Common code for sta type interfaces - STA, GC */
+ /* Enable firmware key buffering before sent 4-way M4 */
+ wldev_iovar_setint(ndev, "buf_key_b4_m4", 1);
+ }
+ if (wl_iftype == WL_IF_TYPE_P2P_GC) {
+ /* Disable firmware roaming for P2P interface */
+ wldev_iovar_setint(ndev, "roam_off", 1);
+ wldev_iovar_setint(ndev, "bcn_timeout", dhd->conf->bcn_timeout);
+ {
+ int assoc_retry = 3;
+#if defined(CUSTOM_ASSOC_RETRY_MAX)
+ assoc_retry = CUSTOM_ASSOC_RETRY_MAX;
+#endif /* CUSTOM_ASSOC_RETRY_MAX */
+ /* set retry_max to CUSTOM_ASSOC_RETRY_MAX(3) */
+ wldev_iovar_setint(ndev, "assoc_retry_max", assoc_retry);
+ }
+ }
+ if (wl_mode == WL_MODE_AP) {
+ /* Common code for AP/GO */
+#if defined(SUPPORT_AP_POWERSAVE) && defined(BCMDONGLEHOST)
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE && BCMDONGLEHOST */
+ }
+ break;
+ case WL_IF_DELETE_DONE:
+#ifdef WLTDLS
+ /* Enable back TDLS if connected interface is <= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
+#endif /* WLTDLS */
+ wl_wlfc_enable(cfg, false);
+ break;
+ case WL_IF_CHANGE_REQ:
+ /* Flush existing IEs from firmware on role change */
+ wl_cfg80211_clear_per_bss_ies(cfg, wdev);
+ break;
+ case WL_IF_CHANGE_DONE:
+ if (wl_mode == WL_MODE_BSS) {
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, FALSE);
+#endif /* SUPPORT_AP_POWERSAVE */
+ /* Enable buffering of PTK key till EAPOL 4/4 is sent out */
+ wldev_iovar_setint(ndev, "buf_key_b4_m4", 1);
+ }
+ break;
+
+ default:
+ WL_ERR(("Unsupported state: %d\n", state));
+ return;
+ }
+}
+
+static s32
+wl_cfg80211_p2p_if_del(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s16 bssidx;
+ s16 err;
+ s32 cfg_type;
+ struct net_device *ndev;
+ long timeout;
+ struct ether_addr p2p_dev_addr = {{0}};
+
+ if (unlikely(!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
+ WL_INFORM_MEM(("device is not ready\n"));
+ return BCME_NOTFOUND;
+ }
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ /* Handle dedicated P2P discovery interface. */
+ return wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ /* Handle P2P Group Interface */
+ bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
+ if (bssidx <= 0) {
+ WL_ERR(("bssidx not found\n"));
+ return BCME_NOTFOUND;
+ }
+ if (wl_cfgp2p_find_type(cfg, bssidx, &cfg_type) != BCME_OK) {
+ /* Couldn't find matching iftype */
+ WL_MEM(("non P2P interface\n"));
+ return BCME_NOTFOUND;
+ }
+
+ ndev = wdev->netdev;
+ (void)memcpy_s(p2p_dev_addr.octet, ETHER_ADDR_LEN,
+ ndev->dev_addr, ETHER_ADDR_LEN);
+
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ wl_clr_p2p_status(cfg, IF_ADDING);
+
+ /* for GO */
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+ cfg->p2p->p2p_go_count--;
+ /* disable interface before bsscfg free */
+ err = wl_cfgp2p_ifdisable(cfg, &p2p_dev_addr);
+ /* if fw doesn't support "ifdis",
+ do not wait for link down of ap mode
+ */
+ if (err == 0) {
+ WL_ERR(("Wait for Link Down event for GO !!!\n"));
+ wait_for_completion_timeout(&cfg->iface_disable,
+ msecs_to_jiffies(500));
+ } else if (err != BCME_UNSUPPORTED) {
+ msleep(300);
+ }
+ } else {
+ /* GC case */
+ if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ WL_ERR(("Wait for Link Down event for GC !\n"));
+ wait_for_completion_timeout
+ (&cfg->iface_disable, msecs_to_jiffies(500));
+ }
+
+ /* Force P2P disconnect in iface down context */
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ WL_INFORM_MEM(("force send disconnect event\n"));
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_clr_drv_status(cfg, AUTHORIZED, ndev);
+ }
+ }
+
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ wl_set_p2p_status(cfg, IF_DELETING);
+ DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+
+ err = wl_cfgp2p_ifdel(cfg, &p2p_dev_addr);
+ if (unlikely(err)) {
+ WL_ERR(("P2P IFDEL operation failed, error code = %d\n", err));
+ err = BCME_ERROR;
+ goto fail;
+ } else {
+ /* Wait for WLC_E_IF event */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+ cfg->if_event_info.valid) {
+ WL_ERR(("P2P IFDEL operation done\n"));
+ err = BCME_OK;
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ err = -EINVAL;
+ }
+ }
+
+fail:
+ /* Even in failure case, attempt to remove the host data structure.
+ * Firmware would be cleaned up via WiFi reset done by the
+ * user space from hang event context (for android only).
+ */
+ bzero(cfg->p2p->vir_ifname, IFNAMSIZ);
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
+ wl_to_p2p_bss_ndev(cfg, cfg_type) = NULL;
+ wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, cfg_type));
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS */
+ wl_cfg80211_clear_p2p_disc_ies(cfg);
+#ifdef BCMDONGLEHOST
+ dhd_net_if_lock(ndev);
+#endif /* BCMDONGLEHOST */
+ if (cfg->if_event_info.ifidx) {
+ /* Remove interface except for primary ifidx */
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+ }
+#ifdef BCMDONGLEHOST
+ dhd_net_if_unlock(ndev);
+#endif /* BCMDONGLEHOST */
+ return err;
+}
+
+static struct wireless_dev *
+wl_cfg80211_add_monitor_if(struct wiphy *wiphy, const char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+ WL_ERR(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+ return ERR_PTR(-EOPNOTSUPP);
+#else
+ struct wireless_dev *wdev;
+ struct net_device* ndev = NULL;
+
+ dhd_add_monitor(name, &ndev);
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (!wdev) {
+ WL_ERR(("wireless_dev alloc failed! \n"));
+ goto fail;
+ }
+
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_MONITOR;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wiphy));
+
+ WL_DBG(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ return ndev->ieee80211_ptr;
+fail:
+ return ERR_PTR(-EOPNOTSUPP);
+#endif // endif
+}
+
+static struct wireless_dev *
+wl_cfg80211_add_ibss(struct wiphy *wiphy, u16 wl_iftype, char const *name)
+{
+#ifdef WLAIBSS_MCHAN
+ /* AIBSS */
+ return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+#else
+ /* Normal IBSS */
+ WL_ERR(("IBSS not supported on Virtual iface\n"));
+ return NULL;
+#endif
+}
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+static s32
+_wl_cfg80211_check_axi_error(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ hnd_ext_trap_hdr_t *hdr;
+ int axi_host_error_size;
+ uint8 *new_dst;
+ uint32 *ext_data = dhd->extended_trap_data;
+ struct file *fp = NULL;
+ char *filename = DHD_COMMON_DUMP_PATH
+ DHD_DUMP_AXI_ERROR_FILENAME
+ DHD_DUMP_HAL_FILENAME_SUFFIX;
+
+ WL_ERR(("%s: starts to read %s. Axi error \n", __FUNCTION__, filename));
+
+ fp = filp_open(filename, O_RDONLY, 0);
+
+ if (IS_ERR(fp) || (fp == NULL)) {
+ WL_ERR(("%s: Couldn't read the file, err %ld,File [%s] No previous axi error \n",
+ __FUNCTION__, PTR_ERR(fp), filename));
+ return ret;
+ }
+
+ kernel_read_compat(fp, fp->f_pos, (char *)dhd->axi_err_dump, sizeof(dhd_axi_error_dump_t));
+ filp_close(fp, NULL);
+
+ /* Delete axi error info file */
+ if (dhd_file_delete(filename) < 0) {
+ WL_ERR(("%s(): Failed to delete file: %s\n", __FUNCTION__, filename));
+ return ret;
+ }
+ WL_ERR(("%s(): Success to delete file: %s\n", __FUNCTION__, filename));
+
+ if (dhd->axi_err_dump->etd_axi_error_v1.signature != HND_EXT_TRAP_AXIERROR_SIGNATURE) {
+ WL_ERR(("%s: Invalid AXI signature: 0x%x\n",
+ __FUNCTION__, dhd->axi_err_dump->etd_axi_error_v1.signature));
+ }
+
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+ new_dst = hdr->data;
+
+ axi_host_error_size = sizeof(dhd->axi_err_dump->axid)
+ + sizeof(dhd->axi_err_dump->fault_address);
+
+ /* TAG_TRAP_AXI_HOST_INFO tlv : host's axid, fault address */
+ new_dst = bcm_write_tlv(TAG_TRAP_AXI_HOST_INFO,
+ (const void *)dhd->axi_err_dump,
+ axi_host_error_size, new_dst);
+
+ /* TAG_TRAP_AXI_ERROR tlv */
+ new_dst = bcm_write_tlv(TAG_TRAP_AXI_ERROR,
+ (const void *)&dhd->axi_err_dump->etd_axi_error_v1,
+ sizeof(dhd->axi_err_dump->etd_axi_error_v1), new_dst);
+ hdr->len = new_dst - hdr->data;
+
+ dhd->dongle_trap_occured = TRUE;
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ copy_hang_info_trap(dhd);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ memset(dhd->axi_err_dump, 0, sizeof(dhd_axi_error_dump_t));
+
+ dhd->hang_reason = HANG_REASON_DONGLE_TRAP;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ ret = BCME_ERROR;
+ return ret;
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+/* All Android/Linux private/Vendor Interface calls should make
+ * use of below API for interface creation.
+ */
+struct wireless_dev *
+wl_cfg80211_add_if(struct bcm_cfg80211 *cfg,
+ struct net_device *primary_ndev,
+ wl_iftype_t wl_iftype, const char *name, u8 *mac)
+{
+ u8 mac_addr[ETH_ALEN];
+ s32 err = -ENODEV;
+ struct wireless_dev *wdev = NULL;
+ struct wiphy *wiphy;
+ s32 wl_mode;
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhd;
+#endif /* BCMDONGLEHOST */
+ wl_iftype_t macaddr_iftype = wl_iftype;
+
+ WL_INFORM_MEM(("if name: %s, wl_iftype:%d \n",
+ name ? name : "NULL", wl_iftype));
+ if (!cfg || !primary_ndev || !name) {
+ WL_ERR(("cfg/ndev/name ptr null\n"));
+ return NULL;
+ }
+ if (wl_cfg80211_get_wdev_from_ifname(cfg, name)) {
+ WL_ERR(("Interface name %s exists!\n", name));
+ return NULL;
+ }
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+#if defined(BCMDONGLEHOST)
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (!dhd) {
+ return NULL;
+ }
+#endif /* BCMDONGLEHOST */
+
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+ WL_ERR(("Please check op_mode %d, name %s\n", dhd->op_mode, name));
+ return NULL;
+ }
+
+ if ((wl_mode = wl_iftype_to_mode(wl_iftype)) < 0) {
+ return NULL;
+ }
+ mutex_lock(&cfg->if_sync);
+#ifdef WL_NAN
+ if (wl_iftype == WL_IF_TYPE_NAN) {
+ /*
+ * Bypass the role conflict check for NDI and handle it
+ * from dp req and dp resp context
+ * because in aware comms, ndi gets created soon after nan enable.
+ */
+ } else
+#endif /* WL_NAN */
+#ifdef WL_IFACE_MGMT
+ /* Allow wdev interface creation for p2p discovery to avoid failures
+ * in user supplicant initialization. The role conflict rules will be
+ * applied from discovery context if userspace tries to use discovery.
+ */
+ if ((wl_iftype != WL_IF_TYPE_P2P_DISC) &&
+ (err = wl_cfg80211_handle_if_role_conflict(cfg, wl_iftype)) < 0) {
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
+ }
+#endif /* WL_IFACE_MGMT */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ /* Check the previous smmu fault error */
+ if ((err = _wl_cfg80211_check_axi_error(cfg)) < 0) {
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ /* Protect the interace op context */
+ /* Do pre-create ops */
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr, WL_IF_CREATE_REQ,
+ wl_iftype, wl_mode);
+
+ if (strnicmp(name, SOFT_AP_IF_NAME, strlen(SOFT_AP_IF_NAME)) == 0) {
+ macaddr_iftype = WL_IF_TYPE_AP;
+ }
+
+ if (mac) {
+ /* If mac address is provided, use that */
+ memcpy(mac_addr, mac, ETH_ALEN);
+ } else if ((wl_get_vif_macaddr(cfg, macaddr_iftype, mac_addr) != BCME_OK)) {
+ /* Fetch the mac address to be used for virtual interface */
+ err = -EINVAL;
+ goto fail;
+ }
+
+ switch (wl_iftype) {
+ case WL_IF_TYPE_IBSS:
+ wdev = wl_cfg80211_add_ibss(wiphy, wl_iftype, name);
+ break;
+ case WL_IF_TYPE_MONITOR:
+ wdev = wl_cfg80211_add_monitor_if(wiphy, name);
+ break;
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_NAN:
+ if (cfg->iface_cnt >= (IFACE_MAX_CNT - 1)) {
+ WL_ERR(("iface_cnt exceeds max cnt. created iface_cnt: %d\n",
+ cfg->iface_cnt));
+ err = -ENOTSUPP;
+ goto fail;
+ }
+ wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ wl_iftype, mac_addr, name);
+ break;
+ case WL_IF_TYPE_P2P_DISC:
+ case WL_IF_TYPE_P2P_GO:
+ /* Intentional fall through */
+ case WL_IF_TYPE_P2P_GC:
+ if (cfg->p2p_supported) {
+ wdev = wl_cfg80211_p2p_if_add(cfg, wl_iftype,
+ name, mac_addr, &err);
+ break;
+ }
+ /* Intentionally fall through for unsupported interface
+ * handling when firmware doesn't support p2p
+ */
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ err = -ENOTSUPP;
+ goto fail;
+ }
+
+ if (!wdev) {
+ WL_ERR(("vif create failed. err:%d\n", err));
+ if (err != -ENOTSUPP) {
+ err = -ENODEV;
+ }
+ goto fail;
+ }
+
+ /* Ensure decrementing in case of failure */
+ cfg->vif_count++;
+
+ wl_cfg80211_iface_state_ops(wdev,
+ WL_IF_CREATE_DONE, wl_iftype, wl_mode);
+
+ WL_INFORM_MEM(("Vif created. dev->ifindex:%d"
+ " cfg_iftype:%d, vif_count:%d\n",
+ (wdev->netdev ? wdev->netdev->ifindex : 0xff),
+ wdev->iftype, cfg->vif_count));
+ mutex_unlock(&cfg->if_sync);
+ return wdev;
+
+fail:
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
+ WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+
+ if (err != -ENOTSUPP) {
+ /* For non-supported interfaces, just return error and
+ * skip below recovery steps.
+ */
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ wl_copy_hang_info_if_falure(primary_ndev, HANG_REASON_IFACE_DEL_FAILURE, err);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ SUPP_LOG(("IF_ADD fail. err:%d\n", err));
+ wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
+#if defined(BCMDONGLEHOST)
+ if (dhd_query_bus_erros(dhd)) {
+ goto exit;
+ }
+ dhd->iface_op_failed = TRUE;
+#endif /* BCMDONGLEHOST */
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled) {
+ dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ /* If reached here, something wrong with DHD or firmware.
+ * There could be a chance that firmware is in bad state.
+ * Request the upper layer to do a Wi-Fi reset.
+ */
+ dhd->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ }
+exit:
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
+}
+
+static s32
+wl_cfg80211_del_ibss(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ WL_INFORM_MEM(("del ibss wdev_ptr:%p\n", wdev));
+#ifdef WLAIBSS_MCHAN
+ /* AIBSS */
+ return bcm_cfg80211_del_ibss_if(wiphy, wdev);
+#else
+ /* Normal IBSS */
+ return wl_cfg80211_del_iface(wiphy, wdev);
+#endif
+}
+
+s32
+wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname)
+{
+ int ret = BCME_OK;
+ mutex_lock(&cfg->if_sync);
+ ret = _wl_cfg80211_del_if(cfg, primary_ndev, wdev, ifname);
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+
+s32
+_wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname)
+{
+ int ret = BCME_OK;
+ s32 bssidx;
+ struct wiphy *wiphy;
+ u16 wl_mode;
+ u16 wl_iftype;
+ struct net_info *netinfo;
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhd;
+ BCM_REFERENCE(dhd);
+#endif /* BCMDONGLEHOST */
+
+ if (!cfg) {
+ return -EINVAL;
+ }
+
+#if defined(BCMDONGLEHOST)
+ dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ if (!wdev && ifname) {
+ /* If only ifname is provided, fetch corresponding wdev ptr from our
+ * internal data structure
+ */
+ wdev = wl_cfg80211_get_wdev_from_ifname(cfg, ifname);
+ }
+
+ /* Check whether we have a valid wdev ptr */
+ if (unlikely(!wdev)) {
+ WL_ERR(("wdev not found. '%s' does not exists\n", ifname));
+ return -ENODEV;
+ }
+
+ WL_INFORM_MEM(("del vif. wdev cfg_iftype:%d\n", wdev->iftype));
+
+ wiphy = wdev->wiphy;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ /* p2p discovery would be de-initialized in stop p2p
+ * device context/from other virtual i/f creation context
+ * so netinfo list may not have any node corresponding to
+ * discovery I/F. Handle it before bssidx check.
+ */
+ ret = wl_cfg80211_p2p_if_del(wiphy, wdev);
+ if (unlikely(ret)) {
+ goto exit;
+ } else {
+ /* success case. return from here */
+ if (cfg->vif_count) {
+ cfg->vif_count--;
+ }
+ return BCME_OK;
+ }
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ if ((netinfo = wl_get_netinfo_by_wdev(cfg, wdev)) == NULL) {
+ WL_ERR(("Find netinfo from wdev %p failed\n", wdev));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ if (!wdev->netdev) {
+ WL_ERR(("ndev null! \n"));
+ } else {
+ /* Disable tx before del */
+ netif_tx_disable(wdev->netdev);
+ }
+
+ wl_iftype = netinfo->iftype;
+ wl_mode = wl_iftype_to_mode(wl_iftype);
+ bssidx = netinfo->bssidx;
+ WL_DBG_MEM(("[IFDEL] cfg_iftype:%d wl_iftype:%d mode:%d bssidx:%d\n",
+ wdev->iftype, wl_iftype, wl_mode, bssidx));
+
+ /* Do pre-interface del ops */
+ wl_cfg80211_iface_state_ops(wdev, WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+
+#ifdef PCIE_FULL_DONGLE
+ /* clean up sta info & clean up flowrings correspondign to the iface */
+ dhd_net_del_flowrings_sta(dhd, wdev->netdev);
+#endif /* PCIE_FULL_DONGLE */
+
+ switch (wl_iftype) {
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_NAN:
+ ret = wl_cfg80211_del_iface(wiphy, wdev);
+ break;
+ case WL_IF_TYPE_IBSS:
+ ret = wl_cfg80211_del_ibss(wiphy, wdev);
+ break;
+
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ ret = BCME_ERROR;
+ }
+
+exit:
+ if (ret == BCME_OK) {
+ /* Successful case */
+ if (cfg->vif_count) {
+ cfg->vif_count--;
+ }
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
+ WL_IF_DELETE_DONE, wl_iftype, wl_mode);
+#ifdef WL_NAN
+ if (!((cfg->nancfg->mac_rand) && (wl_iftype == WL_IF_TYPE_NAN)))
+#endif /* WL_NAN */
+ {
+ wl_release_vif_macaddr(cfg, wdev->netdev->dev_addr, wl_iftype);
+ }
+ WL_INFORM_MEM(("vif deleted. vif_count:%d\n", cfg->vif_count));
+ } else {
+ if (!wdev->netdev) {
+ WL_ERR(("ndev null! \n"));
+ } else {
+ /* IF del failed. revert back tx queue status */
+ netif_tx_start_all_queues(wdev->netdev);
+ }
+
+ /* Skip generating log files and sending HANG event
+ * if driver state is not READY
+ */
+ if (wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg))) {
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ wl_copy_hang_info_if_falure(primary_ndev,
+ HANG_REASON_IFACE_DEL_FAILURE, ret);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ SUPP_LOG(("IF_DEL fail. err:%d\n", ret));
+ wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
+#if defined(BCMDONGLEHOST)
+ /* IF dongle is down due to previous hang or other conditions, sending
+ * one more hang notification is not needed.
+ */
+ if (dhd_query_bus_erros(dhd) || (ret == BCME_DONGLE_DOWN)) {
+ goto end;
+ }
+ dhd->iface_op_failed = TRUE;
+#if defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled && (ret != -EBADTYPE)) {
+ dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* BCMDONGLEHOST */
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ WL_ERR(("Notify hang event to upper layer \n"));
+ dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ }
+ }
+end:
+ return ret;
+}
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *dev,
+ int ifidx, char *name, uint8 *mac, uint8 bssidx, uint8 role)
+{
+ bool ifadd_expected = FALSE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bool bss_pending_op = TRUE;
+
+ /* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+ * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+ */
+ if (wl_get_p2p_status(cfg, IF_CHANGING))
+ return wl_cfg80211_notify_ifchange(dev, ifidx, name, mac, bssidx);
+
+ /* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+ if (wl_get_p2p_status(cfg, IF_ADDING)) {
+ ifadd_expected = TRUE;
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ } else if (cfg->bss_pending_op) {
+ ifadd_expected = TRUE;
+ bss_pending_op = FALSE;
+ }
+
+ if (ifadd_expected) {
+ wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+ if_event_info->ifidx = ifidx;
+ if_event_info->bssidx = bssidx;
+ if_event_info->role = role;
+ strlcpy(if_event_info->name, name, sizeof(if_event_info->name));
+ if_event_info->name[IFNAMSIZ - 1] = '\0';
+ if (mac)
+ memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+
+ /* Update bss pendig operation status */
+ if (!bss_pending_op) {
+ cfg->bss_pending_op = FALSE;
+ }
+ WL_INFORM_MEM(("IF_ADD ifidx:%d bssidx:%d role:%d\n",
+ ifidx, bssidx, role));
+ OSL_SMP_WMB();
+ if_event_info->valid = TRUE;
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *dev, int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+ bool ifdel_expected = FALSE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_if_event_info *if_event_info = &cfg->if_event_info;
+ bool bss_pending_op = TRUE;
+
+ if (wl_get_p2p_status(cfg, IF_DELETING)) {
+ ifdel_expected = TRUE;
+ wl_clr_p2p_status(cfg, IF_DELETING);
+ } else if (cfg->bss_pending_op) {
+ ifdel_expected = TRUE;
+ bss_pending_op = FALSE;
+ }
+
+ if (ifdel_expected) {
+ if_event_info->ifidx = ifidx;
+ if_event_info->bssidx = bssidx;
+ /* Update bss pendig operation status */
+ if (!bss_pending_op) {
+ cfg->bss_pending_op = FALSE;
+ }
+ WL_INFORM_MEM(("IF_DEL ifidx:%d bssidx:%d\n", ifidx, bssidx));
+ OSL_SMP_WMB();
+ if_event_info->valid = TRUE;
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+ uint8 bssidx)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+ wl_set_p2p_status(cfg, IF_CHANGED);
+ OSL_SMP_WMB();
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if ((cmd == WLC_SET_LRL) &&
+ (retry != CUSTOM_LONG_RETRY_LIMIT)) {
+ WL_DBG(("CUSTOM_LONG_RETRY_LIMIT is used.Ignore configuration"));
+ return err;
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+
+ retry = htod32(retry);
+ err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
+ if (unlikely(err)) {
+ WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_DBG(("Enter\n"));
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+ if (err != BCME_OK)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+ if (err != BCME_OK)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_LONG &&
+ (cfg->conf->retry_long != wiphy->retry_long)) {
+ cfg->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+ if (err != BCME_OK)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT &&
+ (cfg->conf->retry_short != wiphy->retry_short)) {
+ cfg->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+ if (err != BCME_OK) {
+ return err;
+ }
+ }
+
+ return err;
+}
+
+void
+wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
+ int ibss_vsie_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (cfg != NULL && ibss_vsie != NULL) {
+ if (cfg->ibss_vsie != NULL) {
+ MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
+ }
+ cfg->ibss_vsie = ibss_vsie;
+ cfg->ibss_vsie_len = ibss_vsie_len;
+ }
+}
+
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+ /* free & initiralize VSIE (Vendor Specific IE) */
+ if (cfg->ibss_vsie != NULL) {
+ MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
+ cfg->ibss_vsie_len = 0;
+ }
+}
+
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ char *ioctl_buf = NULL;
+ s32 ret = BCME_OK, bssidx;
+
+ if (cfg != NULL && cfg->ibss_vsie != NULL) {
+ ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ WL_ERR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
+ }
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = BCME_ERROR;
+ goto end;
+ }
+ /* change the command from "add" to "del" */
+ strlcpy(cfg->ibss_vsie->cmd, "del", sizeof(cfg->ibss_vsie->cmd));
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
+ cfg->ibss_vsie, cfg->ibss_vsie_len,
+ ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, NULL);
+ WL_ERR(("ret=%d\n", ret));
+
+ if (ret == BCME_OK) {
+ /* Free & initialize VSIE */
+ MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
+ cfg->ibss_vsie_len = 0;
+ }
+end:
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ }
+ }
+
+ return ret;
+}
+
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev* wdev = NULL;
+ struct net_device *new_ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ long timeout;
+ wl_aibss_if_t aibss_if;
+ wl_if_event_info *event = NULL;
+
+ if (cfg->ibss_cfgdev != NULL) {
+ WL_ERR(("IBSS interface %s already exists\n", name));
+ return NULL;
+ }
+
+ WL_ERR(("Try to create IBSS interface %s\n", name));
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* generate a new MAC address for the IBSS interface */
+ get_primary_mac(cfg, &cfg->ibss_if_addr);
+ cfg->ibss_if_addr.octet[4] ^= 0x40;
+ bzero(&aibss_if, sizeof(aibss_if));
+ memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+ aibss_if.chspec = 0;
+ aibss_if.len = sizeof(aibss_if);
+
+ cfg->bss_pending_op = TRUE;
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+ sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
+ goto fail;
+ }
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op)
+ goto fail;
+
+ event = &cfg->if_event_info;
+ /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+ * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+ * and will be freed by dhd_detach unless it gets unregistered before that. The
+ * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+ * be freed by wl_dealloc_netinfo
+ */
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+ event->mac, event->bssidx, event->name);
+ if (new_ndev == NULL)
+ goto fail;
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (wdev == NULL)
+ goto fail;
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_ADHOC;
+ wdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+ /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+ * needs to be modified to take one parameter (bool need_rtnl_lock)
+ */
+ ASSERT_RTNL();
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, FALSE) != BCME_OK)
+ goto fail;
+
+ wl_alloc_netinfo(cfg, new_ndev, wdev, WL_IF_TYPE_IBSS,
+ PM_ENABLE, event->bssidx, event->ifidx);
+ cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+ WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+ return cfg->ibss_cfgdev;
+
+fail:
+ WL_ERR(("failed to create IBSS interface %s \n", name));
+ cfg->bss_pending_op = FALSE;
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+ if (wdev) {
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+ }
+ return NULL;
+}
+
+static s32
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ long timeout;
+
+ if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+ return -EINVAL;
+ ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ cfg->bss_pending_op = TRUE;
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+ sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+ goto fail;
+ }
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ goto fail;
+ }
+
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+ cfg->ibss_cfgdev = NULL;
+ return 0;
+
+fail:
+ cfg->bss_pending_op = FALSE;
+ return -1;
+}
+#endif /* WLAIBSS_MCHAN */
+
+s32
+wl_cfg80211_to_fw_iftype(wl_iftype_t iftype)
+{
+ s32 ret = BCME_ERROR;
+
+ switch (iftype) {
+ case WL_IF_TYPE_AP:
+ ret = WL_INTERFACE_TYPE_AP;
+ break;
+ case WL_IF_TYPE_STA:
+ ret = WL_INTERFACE_TYPE_STA;
+ break;
+ case WL_IF_TYPE_NAN_NMI:
+ case WL_IF_TYPE_NAN:
+ ret = WL_INTERFACE_TYPE_NAN;
+ break;
+ case WL_IF_TYPE_P2P_DISC:
+ ret = WL_INTERFACE_TYPE_P2P_DISC;
+ break;
+ case WL_IF_TYPE_P2P_GO:
+ ret = WL_INTERFACE_TYPE_P2P_GO;
+ break;
+ case WL_IF_TYPE_P2P_GC:
+ ret = WL_INTERFACE_TYPE_P2P_GC;
+ break;
+
+#ifdef WLAWDL
+ case WL_IF_TYPE_AWDL:
+ ret = WL_INTERFACE_TYPE_AWDL;
+ break;
+#endif /* WLAWDL */
+
+ default:
+ WL_ERR(("Unsupported type:%d \n", iftype));
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+s32
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t cfg_iftype, s32 del, u8 *addr)
+{
+ s32 ret;
+ struct wl_interface_create_v2 iface;
+ wl_interface_create_v3_t iface_v3;
+ wl_interface_create_v0_t iface_v0;
+ struct wl_interface_info_v1 *info;
+ wl_interface_info_v2_t *info_v2;
+ wl_interface_info_v0_t *info_v0;
+ uint32 ifflags = 0;
+ bool use_iface_info_v2 = false;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s32 iftype;
+#ifdef WLEASYMESH
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WLEASYMESH */
+
+ if (del) {
+ ret = wldev_iovar_setbuf(ndev, "interface_remove",
+ NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (unlikely(ret))
+ WL_ERR(("Interface remove failed!! ret %d\n", ret));
+ return ret;
+ }
+
+ /* Interface create */
+ bzero(&iface, sizeof(iface));
+ /*
+ * flags field is still used along with iftype inorder to support the old version of the
+ * FW work with the latest app changes.
+ */
+
+ iftype = wl_cfg80211_to_fw_iftype(cfg_iftype);
+ if (iftype < 0) {
+ return -ENOTSUPP;
+ }
+
+ if (addr) {
+ ifflags |= WL_INTERFACE_MAC_USE;
+ if (wl_legacy_chip_check(ndev)) {
+ iface.flags = ifflags;
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ }
+ }
+#ifdef WLEASYMESH
+ if (dhd->conf->fw_type == FW_TYPE_EZMESH && iftype == WL_INTERFACE_TYPE_AP) {
+ // this can be removed for 4359
+ ifflags |= WL_INTERFACE_TYPE_AP;
+ }
+#endif /* WLEASYMESH */
+
+ /* Pass ver = 0 for fetching the interface_create iovar version */
+ if (wl_legacy_chip_check(ndev)) {
+ bzero(&iface_v0, sizeof(iface_v0));
+ iface_v0.ver = WL_INTERFACE_CREATE_VER_0;
+ iface_v0.flags = iftype | ifflags;
+ if (addr) {
+ memcpy(&iface_v0.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface_v0, sizeof(struct wl_interface_create),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (ret == 0) {
+ info_v0 = (wl_interface_info_v0_t *)ioctl_buf;
+ ret = info_v0->bsscfgidx;
+ goto exit;
+ }
+ } else {
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ }
+ if (ret == BCME_UNSUPPORTED) {
+ WL_ERR(("interface_create iovar not supported\n"));
+ return ret;
+ } else if ((ret == 0) && *((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3) {
+ WL_DBG(("interface_create version 3. flags:0x%x \n", ifflags));
+ use_iface_info_v2 = true;
+ bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
+ iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+ iface_v3.iftype = iftype;
+ iface_v3.flags = ifflags;
+ if (addr) {
+ memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface_v3, sizeof(wl_interface_create_v3_t),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ } else if ((ret == 0) &&
+ ((*((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_2) ||
+ (*((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_1))) {
+ /* Legacy firmware could invoke iovar and reply data
+ * directly. Do not try if return ver is not expected.
+ * On any other error, attempt with iovar version 2 */
+ WL_DBG(("interface_create version 2. get_ver:%d ifflags:0x%x\n", ret, ifflags));
+ iface.ver = WL_INTERFACE_CREATE_VER_2;
+ iface.iftype = iftype;
+ iface.flags = ifflags;
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ } else {
+ /* inteface_create version 0, need to add chipnum in bcmdevs_legacy.h
+ * and add new chip check at wl_legacy_chip_check function */
+ WL_DBG(("interface_create version 0. get_ver:%d ifflags:0x%x\n", ret, ifflags));
+ }
+
+ if (unlikely(ret)) {
+ WL_ERR(("Interface create failed!! ret %d\n", ret));
+ return ret;
+ }
+
+ /* success case */
+ if (use_iface_info_v2 == true) {
+ info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
+ ret = info_v2->bsscfgidx;
+ } else {
+ /* Use v1 struct */
+ info = (struct wl_interface_info_v1 *)ioctl_buf;
+ ret = info->bsscfgidx;
+ }
+
+exit:
+#ifdef WLEASYMESH
+ //Give fw more time to process interface_create
+ if (dhd->conf->fw_type == FW_TYPE_EZMESH) {
+ wl_delay(500);
+ }
+#endif /* WLEASYMESH */
+ WL_DBG(("wl interface create success!! bssidx:%d \n", ret));
+ return ret;
+}
+
+#ifdef CUSTOMER_HW6
+#define BCM4355_REV_C1 0x0c
+bool
+wl_customer6_legacy_chip_check(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev)
+{
+ u32 chipnum;
+ wlc_rev_info_t revinfo;
+ int ret;
+
+ /* Get the device rev info */
+ bzero(&revinfo, sizeof(revinfo));
+ ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (ret < 0) {
+ WL_ERR(("wl_customer6_legacy_chip_check: GET revinfo FAILED. ret:%d\n", ret));
+ ASSERT(0);
+ return false;
+ }
+
+ chipnum = revinfo.chipnum;
+ WL_DBG(("wl_customer6_legacy_chip_check: GET_REVINFO device 0x%x, vendor 0x%x,"
+ " chipnum 0x%x\n",
+ dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(chipnum)));
+ if (
+#ifdef BCM4350_CHIP_ID
+ (chipnum == BCM4350_CHIP_ID) ||
+#endif /* BCM4350_CHIP_ID */
+#ifdef BCM4355_CHIP_ID
+ ((chipnum == BCM4355_CHIP_ID) && (revinfo.chiprev < BCM4355_REV_C1)) ||
+#endif /* BCM4355_CHIP_ID */
+#ifdef BCM4345_CHIP_ID
+ (chipnum == BCM4345_CHIP_ID) ||
+#endif /* BCM4345_CHIP_ID */
+ false) {
+ /* WAR required */
+ WL_DBG(("%s: Customer6 legacy chip identified\n", __FUNCTION__));
+ return true;
+ }
+
+ return false;
+}
+#endif /* CUSTOMER_HW6 */
+
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t brcm_iftype, s32 del, u8 *addr)
+{
+ s32 ret = BCME_OK;
+ s32 val = 0;
+
+ struct {
+ s32 cfg;
+ s32 val;
+ struct ether_addr ea;
+ } bss_setbuf;
+
+ WL_DBG(("wl_iftype:%d del:%d \n", brcm_iftype, del));
+
+ bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+ /* AP=2, STA=3, up=1, down=0, val=-1 */
+ if (del) {
+ val = WLC_AP_IOV_OP_DELETE;
+ } else if (brcm_iftype == WL_IF_TYPE_AP) {
+ /* Add/role change to AP Interface */
+ WL_DBG(("Adding AP Interface \n"));
+ val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else if (brcm_iftype == WL_IF_TYPE_STA) {
+ /* Add/role change to STA Interface */
+ WL_DBG(("Adding STA Interface \n"));
+ val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else {
+ WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", brcm_iftype));
+ return -EINVAL;
+ }
+
+ if (!del) {
+ wl_ext_bss_iovar_war(ndev, &val);
+ }
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+
+ if (addr) {
+ memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+ }
+
+ WL_MSG(ndev->name, "wl bss %d bssidx:%d\n", val, bsscfg_idx);
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret != 0)
+ WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+ return ret;
+}
+
+s32
+wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 bss_up)
+{
+ s32 ret = BCME_OK;
+ s32 val = bss_up ? 1 : 0;
+
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+
+ WL_INFORM_MEM(("wl bss -C %d %s\n", bsscfg_idx, bss_up ? "up" : "down"));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret != 0) {
+ WL_ERR(("'bss %d' failed with %d\n", bss_up, ret));
+ }
+
+ return ret;
+}
+
+bool
+wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+ s32 result, val;
+ bool isup = false;
+ s8 getbuf[64];
+
+ /* Check if the BSS is up */
+ *(int*)getbuf = -1;
+ result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+ sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+ if (result != 0) {
+ WL_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+ WL_ERR(("NOTE: this ioctl error is normal "
+ "when the BSS has not been created yet.\n"));
+ } else {
+ val = *(int*)getbuf;
+ val = dtoh32(val);
+ WL_DBG(("wl bss -C %d = %d\n", bsscfg_idx, val));
+ isup = (val ? TRUE : FALSE);
+ }
+ return isup;
+}
+
+s32
+wl_iftype_to_mode(wl_iftype_t iftype)
+{
+ s32 mode = BCME_ERROR;
+
+ switch (iftype) {
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_P2P_DISC:
+ mode = WL_MODE_BSS;
+ break;
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ break;
+ case WL_IF_TYPE_NAN:
+ mode = WL_MODE_NAN;
+ break;
+
+#ifdef WLAWDL
+ case WL_IF_TYPE_AWDL:
+ mode = WL_MODE_AWDL;
+ break;
+#endif /* WLAWDL */
+
+ case WL_IF_TYPE_AIBSS:
+ /* Intentional fall through */
+ case WL_IF_TYPE_IBSS:
+ mode = WL_MODE_IBSS;
+ break;
+#ifdef WLMESH_CFG80211
+ case WL_IF_TYPE_MESH:
+ mode = WL_MODE_MESH;
+ break;
+#endif /* WLMESH_CFG80211 */
+ default:
+ WL_ERR(("Unsupported type:%d\n", iftype));
+ break;
+ }
+ return mode;
+}
+
+s32
+cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
+{
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ *role = WL_IF_TYPE_STA;
+ *mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ *role = WL_IF_TYPE_AP;
+ *mode = WL_MODE_AP;
+ break;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ case NL80211_IFTYPE_P2P_DEVICE:
+ *role = WL_IF_TYPE_P2P_DISC;
+ *mode = WL_MODE_BSS;
+ break;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ case NL80211_IFTYPE_P2P_GO:
+ *role = WL_IF_TYPE_P2P_GO;
+ *mode = WL_MODE_AP;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *role = WL_IF_TYPE_P2P_GC;
+ *mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ WL_ERR(("Unsupported mode \n"));
+ return BCME_UNSUPPORTED;
+ case NL80211_IFTYPE_ADHOC:
+ *role = WL_IF_TYPE_IBSS;
+ *mode = WL_MODE_IBSS;
+ break;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ case NL80211_IFTYPE_NAN:
+ *role = WL_IF_TYPE_NAN;
+ *mode = WL_MODE_NAN;
+ break;
+#endif
+#ifdef WLMESH_CFG80211
+ case NL80211_IFTYPE_MESH_POINT:
+ *role = WLC_E_IF_ROLE_AP;
+ *mode = WL_MODE_MESH;
+ break;
+#endif /* WLMESH_CFG80211 */
+ default:
+ WL_ERR(("Unknown interface type:0x%x\n", type));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+static s32
+wl_role_to_cfg80211_type(uint16 role, uint16 *wl_iftype, uint16 *mode)
+{
+ switch (role) {
+
+#ifdef WLAWDL
+ case WLC_E_IF_ROLE_AWDL:
+ /* Intentional fall through - Since there is no
+ * corresponding iftype in cfg80211 stack, map
+ * iftype to station.
+ */
+ *wl_iftype = WL_IF_TYPE_AWDL;
+ *mode = WL_MODE_AWDL;
+ return NL80211_IFTYPE_STATION;
+#endif /* WLAWDL */
+
+ case WLC_E_IF_ROLE_STA:
+ *wl_iftype = WL_IF_TYPE_STA;
+ *mode = WL_MODE_BSS;
+ return NL80211_IFTYPE_STATION;
+ case WLC_E_IF_ROLE_AP:
+ *wl_iftype = WL_IF_TYPE_AP;
+ *mode = WL_MODE_AP;
+ return NL80211_IFTYPE_AP;
+ case WLC_E_IF_ROLE_P2P_GO:
+ *wl_iftype = WL_IF_TYPE_P2P_GO;
+ *mode = WL_MODE_AP;
+ return NL80211_IFTYPE_P2P_GO;
+ case WLC_E_IF_ROLE_P2P_CLIENT:
+ *wl_iftype = WL_IF_TYPE_P2P_GC;
+ *mode = WL_MODE_BSS;
+ return NL80211_IFTYPE_P2P_CLIENT;
+ case WLC_E_IF_ROLE_IBSS:
+ *wl_iftype = WL_IF_TYPE_IBSS;
+ *mode = WL_MODE_IBSS;
+ return NL80211_IFTYPE_ADHOC;
+ case WLC_E_IF_ROLE_NAN:
+ *wl_iftype = WL_IF_TYPE_NAN;
+ *mode = WL_MODE_NAN;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) && defined(WL_CFG80211_NAN)
+ /* NL80211_IFTYPE_NAN should only be used with CFG80211 NAN MGMT
+ * For Vendor HAL based NAN implementation, continue advertising
+ * as a STA interface
+ */
+ return NL80211_IFTYPE_NAN;
+#else
+ return NL80211_IFTYPE_STATION;
+#endif /* ((LINUX_VER >= KERNEL_VERSION(4, 9, 0))) && WL_CFG80211_NAN */
+#ifdef WLDWDS
+ case WLC_E_IF_ROLE_WDS:
+ *wl_iftype = WL_IF_TYPE_AP;
+ *mode = WL_MODE_AP;
+ return NL80211_IFTYPE_AP;
+#endif
+#ifdef WLMESH_CFG80211
+ case WLC_E_IF_ROLE_MESH:
+ *wl_iftype = WL_IF_TYPE_MESH;
+ *mode = WL_MODE_MESH;
+ return NL80211_IFTYPE_MESH_POINT;
+#endif /* WLMESH_CFG80211 */
+
+ default:
+ WL_ERR(("Unknown interface role:0x%x. Forcing type station\n", role));
+ return BCME_ERROR;
+ }
+}
+
+struct net_device *
+wl_cfg80211_post_ifcreate(struct net_device *ndev,
+ wl_if_event_info *event, u8 *addr,
+ const char *name, bool rtnl_lock_reqd)
+{
+ struct bcm_cfg80211 *cfg;
+ struct net_device *primary_ndev;
+ struct net_device *new_ndev = NULL;
+ struct wireless_dev *wdev = NULL;
+ s32 iface_type;
+ s32 ret = BCME_OK;
+ u16 mode;
+ u8 mac_addr[ETH_ALEN];
+ u16 wl_iftype;
+#ifdef WL_STATIC_IF
+ int static_ifidx;
+#endif
+
+ if (!ndev || !event) {
+ WL_ERR(("Wrong arg\n"));
+ return NULL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return NULL;
+ }
+
+ WL_DBG(("Enter. role:%d ifidx:%d bssidx:%d\n",
+ event->role, event->ifidx, event->bssidx));
+ if (!event->ifidx || !event->bssidx) {
+ /* Fw returned primary idx (0) for virtual interface */
+ WL_ERR(("Wrong index. ifidx:%d bssidx:%d \n",
+ event->ifidx, event->bssidx));
+ return NULL;
+ }
+
+#if defined(WLMESH_CFG80211) && defined(WL_EXT_IAPSTA)
+ if (wl_ext_iapsta_mesh_creating(ndev)) {
+ event->role = WLC_E_IF_ROLE_MESH;
+ WL_MSG(ndev->name, "change role to WLC_E_IF_ROLE_MESH\n");
+ }
+#endif /* WLMESH_CFG80211 && WL_EXT_IAPSTA */
+
+ iface_type = wl_role_to_cfg80211_type(event->role, &wl_iftype, &mode);
+ if (iface_type < 0) {
+ /* Unknown iface type */
+ WL_ERR(("Wrong iface type \n"));
+ return NULL;
+ }
+
+ WL_DBG(("mac_ptr:%p name:%s role:%d nl80211_iftype:%d " MACDBG "\n",
+ addr, name, event->role, iface_type, MAC2STRDBG(event->mac)));
+ if (!name) {
+ /* If iface name is not provided, use dongle ifname */
+ name = event->name;
+ }
+
+ if (!addr) {
+ /* If mac address is not set, use primary mac with locally administered
+ * bit set.
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ memcpy(mac_addr, primary_ndev->dev_addr, ETH_ALEN);
+#ifndef CUSTOMER_HW6
+ /* For customer6 builds, use primary mac address for virtual interface */
+ mac_addr[0] |= 0x02;
+#endif /* CUSTOMER_HW6 */
+ addr = mac_addr;
+ }
+
+ if (iface_type == NL80211_IFTYPE_P2P_CLIENT) {
+ struct ether_addr *p2p_addr;
+ s16 cfg_type = wl_cfgp2p_get_conn_idx(cfg);
+ if (cfg_type < BCME_OK) {
+ WL_ERR(("Failed to get connection idx for p2p interface"
+ ", error code = %d", cfg_type));
+ goto fail;
+ }
+ p2p_addr = wl_to_p2p_bss_macaddr(cfg, cfg_type);
+
+ /* check if pre-registered mac matches the mac from dongle via WLC_E_LINK */
+ if (memcmp(p2p_addr->octet, addr, ETH_ALEN)) {
+ WL_INFORM_MEM(("p2p pre-regsitered mac:" MACDBG
+ " , mac from dongle:" MACDBG "\n",
+ MAC2STRDBG(p2p_addr->octet), MAC2STRDBG(addr)));
+
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ wl_cfg80211_handle_hang_event(primary_ndev,
+ HANG_REASON_IFACE_ADD_FAILURE, DUMP_TYPE_IFACE_OP_FAILURE);
+ goto fail;
+ }
+ }
+
+#ifdef WL_STATIC_IF
+ static_ifidx = wl_cfg80211_static_if_name(cfg, name);
+ if (static_ifidx >= 0) {
+ new_ndev = wl_cfg80211_post_static_ifcreate(cfg, event, addr, iface_type,
+ static_ifidx);
+ if (!new_ndev) {
+ WL_ERR(("failed to get I/F pointer\n"));
+ return NULL;
+ }
+ wdev = new_ndev->ieee80211_ptr;
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+ name, addr, event->bssidx, event->name);
+ if (!new_ndev) {
+ WL_ERR(("I/F allocation failed! \n"));
+ return NULL;
+ } else {
+ WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+ event->ifidx, event->bssidx));
+ }
+
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (!wdev) {
+ WL_ERR(("wireless_dev alloc failed! \n"));
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ return NULL;
+ }
+
+ wdev->wiphy = bcmcfg_to_wiphy(cfg);
+ wdev->iftype = iface_type;
+
+ new_ndev->ieee80211_ptr = wdev;
+#ifdef WLDWDS
+ /* set wds0.x to 4addr interface here */
+ if (event->role == WLC_E_IF_ROLE_WDS) {
+ WL_MSG(ndev->name, "set vwdev 4addr to %s\n", event->name);
+ wdev->use_4addr = true;
+ }
+#endif /* WLDWDS */
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+ memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_ifadding(new_ndev, event->ifidx);
+#endif /* WL_EXT_IAPSTA */
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd)
+ != BCME_OK) {
+ WL_ERR(("IFACE register failed \n"));
+ /* Post interface registration, wdev would be freed from the netdev
+ * destructor path. For other cases, handle it here.
+ */
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ return NULL;
+ }
+ }
+
+ /* Initialize with the station mode params */
+ ret = wl_alloc_netinfo(cfg, new_ndev, wdev, wl_iftype,
+ PM_ENABLE, event->bssidx, event->ifidx);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_alloc_netinfo Error (%d)\n", ret));
+ goto fail;
+ }
+
+ /* Apply the mode & infra setting based on iftype */
+ if ((ret = wl_config_infra(cfg, new_ndev, wl_iftype)) < 0) {
+ WL_ERR(("config ifmode failure (%d)\n", ret));
+ goto fail;
+ }
+
+ if (mode == WL_MODE_AP) {
+ wl_set_drv_status(cfg, AP_CREATING, new_ndev);
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_iftype(new_ndev, wl_iftype);
+#endif
+
+ WL_INFORM_MEM(("Network Interface (%s) registered with host."
+ " cfg_iftype:%d wl_role:%d " MACDBG "\n",
+ new_ndev->name, iface_type, event->role, MAC2STRDBG(new_ndev->dev_addr)));
+
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+
+ return new_ndev;
+
+fail:
+#ifdef WL_STATIC_IF
+ /* remove static if from iflist */
+ static_ifidx = wl_cfg80211_static_if_name(cfg, name);
+ if (static_ifidx >= 0) {
+ cfg->static_ndev_state[static_ifidx] = NDEV_STATE_FW_IF_FAILED;
+ wl_cfg80211_update_iflist_info(cfg, new_ndev, WL_STATIC_IFIDX+static_ifidx, addr,
+ event->bssidx, event->name, NDEV_STATE_FW_IF_FAILED);
+ }
+#endif /* WL_STATIC_IF */
+ if (new_ndev) {
+ /* wdev would be freed from netdev destructor call back */
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ }
+
+ return NULL;
+}
+
+s32
+wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg,
+ wl_iftype_t sec_data_if_type)
+{
+ struct net_info *iter, *next;
+ struct net_device *primary_ndev;
+ s32 ret = BCME_OK;
+ uint8 i = 0;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(ret);
+
+ /* Note: This function will clean up only the network interface and host
+ * data structures. The firmware interface clean up will happen in the
+ * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+ * context for the module case).
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("Enter, deleting iftype %s\n",
+ wl_iftype_to_str(sec_data_if_type)));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && (iter->ndev != primary_ndev)) {
+ if (iter->iftype != sec_data_if_type) {
+ continue;
+ }
+ switch (sec_data_if_type) {
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC: {
+ ret = _wl_cfg80211_del_if(cfg,
+ iter->ndev, NULL, iter->ndev->name);
+ break;
+ }
+#ifdef WL_NAN
+ case WL_IF_TYPE_NAN: {
+ if (wl_cfgnan_is_enabled(cfg) == false) {
+ WL_INFORM_MEM(("Nan is not active,"
+ " ignore NDI delete\n"));
+ } else {
+ ret = wl_cfgnan_delete_ndp(cfg, iter->ndev);
+ }
+ break;
+ }
+#endif /* WL_NAN */
+ case WL_IF_TYPE_AP: {
+ /* Cleanup AP */
+#ifdef WL_STATIC_IF
+ /* handle static ap */
+ if (wl_cfg80211_static_if(cfg, iter->ndev)) {
+ dev_close(iter->ndev);
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ /* handle virtual created AP */
+ ret = _wl_cfg80211_del_if(cfg, iter->ndev,
+ NULL, iter->ndev->name);
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported interface type\n"));
+ ret = -ENOTSUPP;
+ goto fail;
+ }
+ }
+ }
+ }
+fail:
+ return ret;
+}
+
+s32
+wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx)
+{
+ s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg;
+ struct net_info *netinfo = NULL;
+
+ if (!ndev || !ndev->ieee80211_ptr) {
+ /* No wireless dev done for this interface */
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ if (ifidx <= 0) {
+ WL_ERR(("Invalid IF idx for iface:%s\n", ndev->name));
+#if defined(BCMDONGLEHOST)
+ ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
+ BCM_REFERENCE(ifidx);
+#endif
+ if (ifidx <= 0) {
+ ASSERT(0);
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
+
+ if ((netinfo = wl_get_netinfo_by_wdev(cfg, ndev_to_wdev(ndev))) == NULL) {
+ WL_ERR(("Find netinfo from wdev %p failed\n", ndev_to_wdev(ndev)));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+#ifdef WL_STATIC_IF
+ if (wl_cfg80211_static_if(cfg, ndev)) {
+ ret = wl_cfg80211_post_static_ifdel(cfg, ndev);
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ WL_INFORM_MEM(("[%s] cfg80211_remove_if ifidx:%d, vif_count:%d\n",
+ ndev->name, ifidx, cfg->vif_count));
+ wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
+ cfg->bss_pending_op = FALSE;
+ }
+
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+exit:
+ return ret;
+}
+
+int
+wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ bcm_struct_cfgdev *cfgdev;
+
+ if (cfg->p2p) {
+ /* De-initialize the p2p discovery interface, if operational */
+ WL_ERR(("Disabling P2P Discovery Interface \n"));
+#ifdef WL_CFG80211_P2P_DEV_IF
+ cfgdev = bcmcfg_to_p2p_wdev(cfg);
+#else
+ cfgdev = cfg->p2p_net;
+#endif
+ if (cfgdev) {
+ ret = wl_cfg80211_scan_stop(cfg, cfgdev);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+ }
+ }
+
+ wl_cfgp2p_disable_discovery(cfg);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ p2p_on(cfg) = false;
+ }
+ return ret;
+}
+
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+struct wireless_dev *
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+ wl_iftype_t wl_iftype,
+ u8 *mac_addr, const char *name)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *new_ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 ret = BCME_OK;
+ s32 bsscfg_idx = 0;
+ long timeout;
+ wl_if_event_info *event = NULL;
+ u8 addr[ETH_ALEN];
+ struct net_info *iter, *next;
+
+ WL_DBG(("Enter\n"));
+ if (!name) {
+ WL_ERR(("Interface name not provided\n"));
+ return NULL;
+ }
+ else {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, name, strlen(name)) == 0) {
+ WL_ERR(("Interface name,%s exists!\n", iter->ndev->name));
+ return NULL;
+ }
+ }
+ }
+ }
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (likely(!mac_addr)) {
+ /* Use primary MAC with the locally administered bit for the
+ * Secondary STA I/F
+ */
+ memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+ addr[0] |= 0x02;
+ } else {
+ /* Use the application provided mac address (if any) */
+ memcpy(addr, mac_addr, ETH_ALEN);
+ }
+
+ cfg->bss_pending_op = TRUE;
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+
+ /*
+ * Intialize the firmware I/F.
+ */
+
+#ifdef CUSTOMER_HW6
+ if (wl_customer6_legacy_chip_check(cfg, primary_ndev)) {
+ /* Use bss iovar instead of interface_create iovar */
+ ret = BCME_UNSUPPORTED;
+ } else
+#endif /* CUSTOMER_HW6 */
+
+ {
+ ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
+ wl_iftype, 0, addr);
+ }
+ if (ret == BCME_UNSUPPORTED) {
+ /* Use bssidx 1 by default */
+ bsscfg_idx = 1;
+ if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+ bsscfg_idx, wl_iftype, 0, addr)) < 0) {
+ goto exit;
+ }
+ } else if (ret < 0) {
+ WL_ERR(("Interface create failed!! ret:%d \n", ret));
+ goto exit;
+ } else {
+ /* Success */
+ bsscfg_idx = ret;
+ }
+
+ WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx));
+ /*
+ * Wait till the firmware send a confirmation event back.
+ */
+ WL_DBG(("Wait for the FW I/F Event\n"));
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("ADD_IF event, didn't come. Return. timeout:%lu bss_pending_op:%d\n",
+ timeout, cfg->bss_pending_op));
+ if (timeout == -ERESTARTSYS) {
+ WL_ERR(("waitqueue was interrupted by a signal, returns -ERESTARTSYS\n"));
+ }
+ goto exit;
+ }
+
+ event = &cfg->if_event_info;
+ /*
+ * Since FW operation is successful,we can go ahead with the
+ * the host interface creation.
+ */
+ new_ndev = wl_cfg80211_post_ifcreate(primary_ndev,
+ event, addr, name, false);
+
+ if (new_ndev) {
+ /* Iface post ops successful. Return ndev/wdev ptr */
+ return new_ndev->ieee80211_ptr;
+ }
+
+exit:
+ cfg->bss_pending_op = FALSE;
+ return NULL;
+}
+
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = NULL;
+ s32 ret = BCME_OK;
+ s32 bsscfg_idx = 1;
+ long timeout;
+ u16 wl_iftype;
+ u16 wl_mode;
+
+ WL_DBG(("Enter\n"));
+
+ /* If any scan is going on, abort it */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_DBG(("Scan in progress. Aborting the scan!\n"));
+ wl_cfgscan_cancel_scan(cfg);
+ }
+
+ bsscfg_idx = wl_get_bssidx_by_wdev(cfg, wdev);
+ if (bsscfg_idx <= 0) {
+ /* validate bsscfgidx */
+ WL_ERR(("Wrong bssidx! \n"));
+ return -EINVAL;
+ }
+
+ /* Handle p2p iface */
+ if ((ret = wl_cfg80211_p2p_if_del(wiphy, wdev)) != BCME_NOTFOUND) {
+ WL_DBG(("P2P iface del handled \n"));
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+ return ret;
+ }
+
+ ndev = wdev->netdev;
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev null! \n"));
+ return -EINVAL;
+ }
+
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+ if (cfg80211_to_wl_iftype(ndev->ieee80211_ptr->iftype,
+ &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
+ }
+
+ WL_DBG(("del interface. bssidx:%d cfg_iftype:%d wl_iftype:%d",
+ bsscfg_idx, ndev->ieee80211_ptr->iftype, wl_iftype));
+ /* Delete the firmware interface. "interface_remove" command
+ * should go on the interface to be deleted
+ */
+ if (wl_cfg80211_get_bus_state(cfg)) {
+ WL_ERR(("Bus state is down: %d\n", __LINE__));
+ ret = BCME_DONGLE_DOWN;
+ goto exit;
+ }
+
+ cfg->bss_pending_op = true;
+ ret = wl_cfg80211_interface_ops(cfg, ndev, bsscfg_idx,
+ wl_iftype, 1, NULL);
+ if (ret == BCME_UNSUPPORTED) {
+ if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+ bsscfg_idx, wl_iftype, true, NULL)) < 0) {
+ WL_ERR(("DEL bss failed ret:%d \n", ret));
+ goto exit;
+ }
+ } else if ((ret == BCME_NOTAP) || (ret == BCME_NOTSTA)) {
+ /* De-init sequence involving role downgrade not happened.
+ * Do nothing and return error. The del command should be
+ * retried.
+ */
+ WL_ERR(("ifdel role mismatch:%d\n", ret));
+ ret = -EBADTYPE;
+ goto exit;
+ } else if (ret < 0) {
+ WL_ERR(("Interface DEL failed ret:%d \n", ret));
+ goto exit;
+ }
+
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ /* The interface unregister will happen from wifi reset context */
+ ret = -ETIMEDOUT;
+ /* fall through */
+ }
+
+exit:
+ if (ret < 0) {
+ WL_ERR(("iface del failed:%d\n", ret));
+#ifdef WL_STATIC_IF
+ if (wl_cfg80211_static_if(cfg, ndev)) {
+ /*
+ * For static interface, clean up the host data,
+ * irrespective of fw status. For dynamic
+ * interfaces it gets cleaned from dhd_stop context
+ */
+ wl_cfg80211_post_static_ifdel(cfg, ndev);
+ }
+#endif /* WL_STATIC_IF */
+ } else {
+ ret = wl_cfg80211_post_ifdel(ndev, false, cfg->if_event_info.ifidx);
+ if (unlikely(ret)) {
+ WL_ERR(("post_ifdel failed\n"));
+ }
+ }
+
+ cfg->bss_pending_op = false;
+ return ret;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct cfg80211_bss *bss;
+ struct ieee80211_channel *chan;
+ struct wl_join_params join_params;
+ int scan_suppress;
+ struct cfg80211_ssid ssid;
+ s32 scan_retry = 0;
+ s32 err = 0;
+ size_t join_params_size;
+ chanspec_t chanspec = 0;
+ char sec[64];
+
+ WL_TRACE(("In\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_INFORM_MEM(("IBSS JOIN\n"));
+ if (!params->ssid || params->ssid_len <= 0 ||
+ params->ssid_len > DOT11_MAX_SSID_LEN) {
+ WL_ERR(("Invalid parameter\n"));
+ return -EINVAL;
+ }
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ chan = params->chandef.chan;
+#else
+ chan = params->channel;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ if (chan) {
+ u16 center_freq = chan->center_freq;
+#ifdef WL_EXT_IAPSTA
+ enum nl80211_band band;
+ s32 _chan;
+ _chan = ieee80211_frequency_to_channel(center_freq);
+ wl_ext_iapsta_update_iftype(dev, WL_IF_TYPE_IBSS);
+ _chan = wl_ext_iapsta_update_channel(dev, _chan);
+ if (CHANNEL_IS_5G(_chan))
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_2GHZ;
+ center_freq = ieee80211_channel_to_frequency(_chan, band);
+#endif /* WL_EXT_IAPSTA */
+ cfg->channel = wl_freq_to_chanspec(center_freq);
+ }
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+ u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+ if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
+ (memcmp(params->ssid, lssid->SSID, lssid->SSID_len) == 0) &&
+ (*channel == cfg->channel))) {
+ WL_ERR(("Connection already existed to " MACDBG "\n",
+ MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+ return -EISCONN;
+ }
+ WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
+ lssid->SSID, MAC2STRDBG(bssid)));
+ }
+
+ /* remove the VSIE */
+ wl_cfg80211_ibss_vsie_delete(dev);
+
+ bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+ if (!bss) {
+ if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) {
+ int retry = 4;
+ memcpy(ssid.ssid, params->ssid, params->ssid_len);
+ ssid.ssid_len = params->ssid_len;
+ do {
+ if (unlikely
+ (__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+ -EBUSY)) {
+ wl_delay(150);
+ } else {
+ break;
+ }
+ } while (++scan_retry < WL_SCAN_RETRY_MAX);
+
+ /* rtnl lock code is removed here. don't see why rtnl lock
+ * needs to be released.
+ */
+
+ /* wait 4 secons till scan done.... */
+// schedule_timeout_interruptible(msecs_to_jiffies(4000));
+ while (retry--) {
+ if (!wl_get_drv_status_all(cfg, SCANNING)) {
+ break;
+ }
+ wl_delay(150);
+ }
+
+ bss = cfg80211_get_ibss(wiphy, NULL,
+ params->ssid, params->ssid_len);
+ }
+ }
+ if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) ||
+ ((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid &&
+ !memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) {
+ cfg->ibss_starter = false;
+ WL_DBG(("Found IBSS\n"));
+ } else {
+ cfg->ibss_starter = true;
+ }
+
+ if (bss) {
+ CFG80211_PUT_BSS(wiphy, bss);
+ }
+
+ if (chan) {
+ u32 bw_cap = 0;
+ err = wl_get_bandwidth_cap(dev, CHSPEC_BAND(cfg->channel), &bw_cap);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to get bandwidth capability (%d)\n", err));
+ return err;
+ }
+ chanspec = wf_create_chspec_from_primary(wf_chspec_primary20_chan(cfg->channel),
+ bw_cap, CHSPEC_BAND(cfg->channel));
+ }
+
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ bzero(&join_params, sizeof(join_params));
+ memcpy((void *)join_params.ssid.SSID, (const void *)params->ssid,
+ params->ssid_len);
+ join_params.ssid.SSID_len = htod32(params->ssid_len);
+ if (params->bssid) {
+ memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN);
+ err = wldev_ioctl_set(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid,
+ ETHER_ADDR_LEN);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ } else
+ bzero(&join_params.params.bssid, ETHER_ADDR_LEN);
+
+ if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+ scan_suppress = TRUE;
+ /* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */
+ err = wldev_ioctl_set(dev, WLC_SET_SCANSUPPRESS,
+ &scan_suppress, sizeof(int));
+ if (unlikely(err)) {
+ WL_ERR(("Scan Suppress Setting Failed (%d)\n", err));
+ return err;
+ }
+ }
+
+ join_params.params.chanspec_list[0] = chanspec;
+ join_params.params.chanspec_num = 1;
+ wldev_iovar_setint(dev, "chanspec", chanspec);
+ join_params_size = sizeof(join_params);
+
+ /* Disable Authentication, IBSS will add key if it required */
+ wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+ wldev_iovar_setint(dev, "wsec", 0);
+
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec), TRUE);
+ WL_MSG(dev->name, "Join IBSS %pM ssid \"%s\", len (%d), channel=%d, sec=%s\n",
+ &join_params.params.bssid, join_params.ssid.SSID, join_params.ssid.SSID_len,
+ wf_chspec_ctlchan(chanspec), sec);
+ err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+ join_params_size);
+ if (unlikely(err)) {
+ WL_ERR(("IBSS set_ssid Error (%d)\n", err));
+ return err;
+ }
+
+ if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+ scan_suppress = FALSE;
+ /* Reset the SCAN SUPPRESS Flag */
+ err = wldev_ioctl_set(dev, WLC_SET_SCANSUPPRESS,
+ &scan_suppress, sizeof(int));
+ if (unlikely(err)) {
+ WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err));
+ return err;
+ }
+ }
+ wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+#ifdef WLAIBSS
+ cfg->aibss_txfail_seq = 0; /* initialize the sequence */
+#endif /* WLAIBSS */
+#ifdef WL_RELMCAST
+ cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */
+#endif /* WL_RELMCAST */
+ return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ scb_val_t scbval;
+ u8 *curbssid;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+ wl_link_down(cfg);
+
+ WL_MSG(dev->name, "Leave IBSS\n");
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
+ scbval.val = 0;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ WL_ERR(("error(%d)\n", err));
+ return err;
+ }
+
+ /* remove the VSIE */
+ wl_cfg80211_ibss_vsie_delete(dev);
+
+ return err;
+}
+
+#ifdef MFP
+static
+int wl_cfg80211_get_rsn_capa(const bcm_tlv_t *wpa2ie,
+ const u8** rsn_cap)
+{
+ u16 suite_count;
+ const wpa_suite_mcast_t *mcast;
+ const wpa_suite_ucast_t *ucast;
+ int len;
+ const wpa_suite_auth_key_mgmt_t *mgmt;
+
+ if (!wpa2ie)
+ return BCME_BADARG;
+
+ len = wpa2ie->len;
+
+ /* check for Multicast cipher suite */
+ if ((len -= (WPA_SUITE_LEN + WPA2_VERSION_LEN)) <= 0) {
+ return BCME_NOTFOUND;
+ }
+
+ mcast = (const wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+
+ /* Check for the unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ return BCME_NOTFOUND;
+ }
+
+ ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ suite_count = ltoh16_ua(&ucast->count);
+ if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+ (len -= (WPA_IE_SUITE_COUNT_LEN +
+ (WPA_SUITE_LEN * suite_count))) <= 0)
+ return BCME_BADLEN;
+
+ /* Check for AUTH key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ return BCME_NOTFOUND;
+ }
+
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ suite_count = ltoh16_ua(&mgmt->count);
+
+ if ((suite_count <= NL80211_MAX_NR_CIPHER_SUITES) &&
+ (len -= (WPA_IE_SUITE_COUNT_LEN +
+ (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+ rsn_cap[0] = (const u8 *)&mgmt->list[suite_count];
+ } else {
+ return BCME_BADLEN;
+ }
+
+ return BCME_OK;
+}
+#endif /* MFP */
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ val = WPA_AUTH_PSK |
+ WPA_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ val = WPA2_AUTH_PSK|
+ WPA2_AUTH_UNSPECIFIED;
+ else
+ val = WPA_AUTH_DISABLED;
+
+ if (is_wps_conn(sme))
+ val = WPA_AUTH_DISABLED;
+
+ WL_DBG_MEM(("[%s] wl wpa_auth 0x%0x\n", dev->name, val));
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wpa_auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ sec->wpa_versions = sme->crypto.wpa_versions;
+ return err;
+}
+
+#ifdef BCMWAPI_WPI
+static s32
+wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 err = 0;
+ s32 bssidx;
+
+ WL_DBG((" wl_set_set_wapi_ie\n"));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", (const void *)sme->ie, sme->ie_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("set_wapi_ie Error (%d)\n", err));
+ return err;
+ }
+ WL_DBG_MEM(("wapi_ie successfully (%s)\n", dev->name));
+ return err;
+}
+#endif /* BCMWAPI_WPI */
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ switch (sme->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ val = WL_AUTH_OPEN_SYSTEM;
+ WL_DBG(("open system\n"));
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ val = WL_AUTH_SHARED_KEY;
+ WL_DBG(("shared key\n"));
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ val = WL_AUTH_OPEN_SHARED;
+ WL_DBG(("automatic\n"));
+ break;
+#ifdef WL_FILS
+ case NL80211_AUTHTYPE_FILS_SK:
+ WL_DBG(("fils shared key\n"));
+ val = WL_AUTH_FILS_SHARED;
+ break;
+ case NL80211_AUTHTYPE_FILS_SK_PFS:
+ val = WL_AUTH_FILS_SHARED_PFS;
+ WL_DBG(("fils shared key with pfs\n"));
+ break;
+ case NL80211_AUTHTYPE_FILS_PK:
+ WL_DBG(("fils public key\n"));
+ val = WL_AUTH_FILS_PUBLIC;
+ break;
+#endif /* WL_FILS */
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+ case NL80211_AUTHTYPE_SAE:
+#ifdef WL_CLIENT_SAE
+ if (!wl_is_pmkid_available(dev, sme->bssid))
+ val = DOT11_SAE;
+ else
+#endif /* WL_CLIENT_SAE */
+ {
+ /* Fw will choose right auth type
+ * dynamically based on PMKID availability
+ */
+ val = WL_AUTH_OPEN_SHARED;
+ }
+
+ WL_DBG(("sae auth type\n"));
+ break;
+#endif /* WL_SAE || WL_CLIENT_SAE */
+ default:
+ val = 2;
+ WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+ break;
+ }
+
+ WL_DBG_MEM(("[%s] wl auth 0x%0x \n", dev->name, val));
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ sec->auth_type = sme->auth_type;
+ sec->fw_auth = val;
+ return err;
+}
+
+static u32
+wl_rsn_cipher_wsec_algo_lookup(uint32 cipher)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
+ if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
+ return rsn_cipher_algo_lookup_tbl[i].wsec_algo;
+ }
+ }
+ return WSEC_NONE;
+}
+
+static u32
+wl_rsn_cipher_wsec_key_algo_lookup(uint32 cipher)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
+ if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
+ return rsn_cipher_algo_lookup_tbl[i].wsec_key_algo;
+ }
+ }
+ return CRYPTO_ALGO_OFF;
+}
+
+static u32
+wl_rsn_akm_wpa_auth_lookup(uint32 akm)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_akm_wpa_auth_lookup_tbl); i++) {
+ if (akm == rsn_akm_wpa_auth_lookup_tbl[i].akm_suite) {
+ return rsn_akm_wpa_auth_lookup_tbl[i].wpa_auth;
+ }
+ }
+ return WPA_AUTH_DISABLED;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
+ s32 err = 0;
+ s32 wsec_val = 0;
+
+#ifdef BCMWAPI_WPI
+ s32 wapi_val = 0;
+ s32 val = 0;
+#endif
+
+ s32 bssidx;
+#ifdef WL_GCMP
+ uint32 algos = 0, mask = 0;
+#endif /* WL_GCMP */
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ pval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.ciphers_pairwise[0]);
+ if (pval == WSEC_NONE) {
+ WL_ERR(("Invalid cipher (0x%x)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return BCME_BADARG;
+ }
+ switch (sme->crypto.ciphers_pairwise[0]) {
+
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+#ifndef CUSTOMER_HW6
+ if (!IS_WAPI_VER(sme->crypto.wpa_versions)) {
+ WL_ERR(("Invalid WAPI version %d\n", sme->crypto.wpa_versions));
+ return BCME_BADARG;
+ }
+#endif /* !CUSTOMER_HW6 */
+ val = pval;
+ err = wl_set_set_wapi_ie(dev, sme);
+ if (unlikely(err)) {
+ WL_DBG(("Set wapi ie failed \n"));
+ return err;
+ } else {
+ WL_DBG(("Set wapi ie succeded\n"));
+ }
+ wapi_val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
+ WL_DBG_MEM(("[WAPI] wl wpa_auth to 0x%0x (%s)\n", val, dev->name));
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wapi_val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wpa_auth failed (%d)\n", err));
+ return err;
+ }
+ break;
+#endif /* BCMWAPI_WPI */
+
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ algos = KEY_ALGO_MASK(wl_rsn_cipher_wsec_key_algo_lookup(
+ sme->crypto.ciphers_pairwise[0]));
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ break;
+ }
+ }
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+ * handshake.
+ * Note that the FW feature flag only exists on kernels that support the
+ * FT-EAP AKM suite.
+ */
+ if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
+ err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
+ if (err) {
+ WL_ERR(("FBT: Error setting sup_wpa (%d)\n", err));
+ return err;
+ } else {
+ WL_INFORM_MEM(("idsup enabled.\n"));
+ }
+ }
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+ if (sme->crypto.cipher_group) {
+ gval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.cipher_group);
+ if (gval == WSEC_NONE) {
+ WL_ERR(("invalid cipher group (0x%x)\n", sme->crypto.cipher_group));
+ return BCME_BADARG;
+ }
+ switch (sme->crypto.cipher_group) {
+
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ val = gval;
+ break;
+#endif
+
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ algos = KEY_ALGO_MASK(
+ wl_rsn_cipher_wsec_key_algo_lookup(sme->crypto.cipher_group));
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ break;
+ }
+ }
+
+ WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+#ifdef WL_GCMP
+ WL_DBG(("algos:%x, mask:%x\n", algos, mask));
+#endif /* WL_GCMP */
+
+ if (is_wps_conn(sme)) {
+ if (sme->privacy) {
+ wsec_val = 4;
+ } else {
+ /* WPS-2.0 allows no security */
+ wsec_val = 0;
+ }
+ } else {
+
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+ WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED\n"));
+ wsec_val = val;
+ } else
+#endif
+
+ {
+ WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC\n"));
+ wsec_val = pval | gval;
+ }
+ }
+
+ WL_DBG_MEM(("[%s] wl wsec 0x%x\n", dev->name, wsec_val));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec_val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+#ifdef WL_GCMP
+ if (wl_set_wsec_info_algos(dev, algos, mask)) {
+ WL_ERR(("set wsec_info error (%d)\n", err));
+ }
+#endif /* WL_GCMP */
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+ sec->cipher_group = sme->crypto.cipher_group;
+ sec->fw_wsec = wsec_val;
+ return err;
+}
+#ifdef WL_GCMP
+static s32
+wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx;
+ s32 err = 0;
+ wl_wsec_info_t *wsec_info;
+ bcm_xtlv_t *wsec_info_tlv;
+ uint16 tlv_data_len;
+ uint32 tlv_data[2];
+ uint32 param_len;
+ uint8 * buf;
+
+ WL_DBG(("enter.\n"));
+ if (!cfg) {
+ return BCME_ERROR;
+ }
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ buf = MALLOCZ(cfg->osh, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
+ if (!buf) {
+ WL_ERR(("No memory"));
+ return BCME_NOMEM;
+ }
+ wsec_info = (wl_wsec_info_t *)buf;
+ wsec_info->version = WL_WSEC_INFO_VERSION;
+ wsec_info_tlv = (bcm_xtlv_t *)(buf + OFFSETOF(wl_wsec_info_t, tlvs));
+
+ wsec_info->num_tlvs++;
+ tlv_data_len = sizeof(tlv_data);
+ tlv_data[0] = algos;
+ tlv_data[1] = mask;
+
+ bcm_xtlv_pack_xtlv(wsec_info_tlv, WL_WSEC_INFO_BSS_ALGOS, tlv_data_len,
+ (const uint8 *)tlv_data, 0);
+ param_len = OFFSETOF(wl_wsec_info_t, tlvs) + WL_WSEC_INFO_TLV_HDR_LEN + tlv_data_len;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_info", wsec_info, param_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+ MFREE(cfg->osh, buf, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
+ return err;
+}
+#endif /* WL_GCMP */
+
+#ifdef WL_SAE
+s32
+wl_cfg80211_set_wsec_info(struct net_device *dev, uint32 *data,
+ uint16 data_len, int tag)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx;
+ s32 err = 0;
+ wl_wsec_info_t *wsec_info;
+ bcm_xtlv_t *bcm_info_tlv;
+ uint32 param_len;
+ uint8 *buf = NULL;
+
+ if (!cfg) {
+ return BCME_ERROR;
+ }
+
+ if (data_len > WLC_IOCTL_SMLEN) {
+ err = BCME_BADLEN;
+ goto exit;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ buf = MALLOCZ(cfg->osh, sizeof(wl_wsec_info_t) + data_len);
+ if (!buf) {
+ WL_ERR(("No memory"));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ wsec_info = (wl_wsec_info_t *)buf;
+ bzero(wsec_info, sizeof(wl_wsec_info_t) + data_len);
+ wsec_info->version = WL_WSEC_INFO_VERSION;
+ bcm_info_tlv = (bcm_xtlv_t *)(buf + OFFSETOF(wl_wsec_info_t, tlvs));
+
+ wsec_info->num_tlvs++;
+
+ bcm_xtlv_pack_xtlv(bcm_info_tlv, tag, data_len, (const u8*)data, 0);
+ param_len = OFFSETOF(wl_wsec_info_t, tlvs) + WL_WSEC_INFO_TLV_HDR_LEN + data_len;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_info", wsec_info, param_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err) && (err != BCME_UNSUPPORTED)) {
+ WL_ERR(("set wsec_info error (%d)\n", err));
+ }
+
+exit:
+ if (buf)
+ MFREE(cfg->osh, buf, sizeof(wl_wsec_info_t) + data_len);
+ return err;
+}
+#endif /* SAE */
+
+#ifdef MFP
+static s32
+wl_cfg80211_set_mfp(struct bcm_cfg80211 *cfg,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ s32 mfp = WL_MFP_NONE;
+ s32 current_mfp = WL_MFP_NONE;
+ const bcm_tlv_t *wpa2_ie;
+ const u8* rsn_cap = NULL;
+ bool fw_support = false;
+ int err, count = 0;
+ const u8 *eptr = NULL, *ptr = NULL;
+ const u8* group_mgmt_cs = NULL;
+ const wpa_pmkid_list_t* pmkid = NULL;
+ struct wl_security *sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+
+ if (!sme) {
+ /* No connection params from userspace, Do nothing. */
+ return 0;
+ }
+
+ /* Check fw support and retreive current mfp val */
+ err = wldev_iovar_getint(dev, "mfp", &current_mfp);
+ if (!err) {
+ fw_support = true;
+ }
+
+ /* Parse the wpa2ie to decode the MFP capablity */
+ if (((wpa2_ie = bcm_parse_tlvs((const u8 *)sme->ie, sme->ie_len,
+ DOT11_MNG_RSN_ID)) != NULL) &&
+ (wl_cfg80211_get_rsn_capa(wpa2_ie, &rsn_cap) == 0) && rsn_cap) {
+ WL_DBG(("rsn_cap 0x%x%x\n", rsn_cap[0], rsn_cap[1]));
+ /* Check for MFP cap in the RSN capability field */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ if (sme->mfp)
+#endif
+ {
+ if (rsn_cap[0] & RSN_CAP_MFPR) {
+ mfp = WL_MFP_REQUIRED;
+ } else if (rsn_cap[0] & RSN_CAP_MFPC) {
+ mfp = WL_MFP_CAPABLE;
+ }
+ }
+ /*
+ * eptr --> end/last byte addr of wpa2_ie
+ * ptr --> to keep track of current/required byte addr
+ */
+ eptr = (const u8*)wpa2_ie + (wpa2_ie->len + TLV_HDR_LEN);
+ /* pointing ptr to the next byte after rns_cap */
+ ptr = (const u8*)rsn_cap + RSN_CAP_LEN;
+ if (mfp && (eptr - ptr) >= WPA2_PMKID_COUNT_LEN) {
+ /* pmkid now to point to 1st byte addr of pmkid in wpa2_ie */
+ pmkid = (const wpa_pmkid_list_t*)ptr;
+ count = pmkid->count.low | (pmkid->count.high << 8);
+ /* ptr now to point to last byte addr of pmkid */
+ ptr = (const u8*)pmkid + (count * WPA2_PMKID_LEN
+ + WPA2_PMKID_COUNT_LEN);
+ if ((eptr - ptr) >= WPA_SUITE_LEN) {
+ /* group_mgmt_cs now to point to first byte addr of bip */
+ group_mgmt_cs = ptr;
+ }
+ }
+ }
+
+ WL_DBG(("mfp:%d wpa2_ie ptr:%p mfp fw_support:%d\n",
+ mfp, wpa2_ie, fw_support));
+
+ if (fw_support == false) {
+ if (mfp == WL_MFP_REQUIRED) {
+ /* if mfp > 0, mfp capability set in wpa ie, but
+ * FW indicated error for mfp. Propagate the error up.
+ */
+ WL_ERR(("mfp capability found in wpaie. But fw doesn't "
+ "seem to support MFP\n"));
+ err = -EINVAL;
+ goto exit;
+ } else {
+ /* Firmware doesn't support mfp. But since connection request
+ * is for non-mfp case, don't bother.
+ */
+ err = BCME_OK;
+ goto exit;
+ }
+ } else if (mfp != current_mfp) {
+ /* Some FW brances report error (-5) during MFP set if the BSS
+ * is up (roam case). Typically in roaming cases, the MFP
+ * configuration doesn't change. So in roam/reassoc cases, there is
+ * no need to update the fw state. If we still hit corner cases
+ * throwing (-5) error, we need to pull in RB:59117.
+ */
+ err = wldev_iovar_setint(dev, "mfp", mfp);
+ if (unlikely(err)) {
+ WL_ERR(("mfp (%d) set failed ret:%d \n", mfp, err));
+ goto exit;
+ }
+ WL_DBG_MEM(("[%s] wl mfp 0x%x\n", dev->name, mfp));
+ }
+
+ if (sec) {
+ sec->fw_mfp = mfp;
+ }
+
+ if (group_mgmt_cs && bcmp((const uint8 *)WPA2_OUI,
+ group_mgmt_cs, (WPA_SUITE_LEN - 1)) == 0) {
+ WL_DBG(("BIP is found\n"));
+ err = wldev_iovar_setbuf(dev, "bip",
+ group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ /*
+ * Dont return failure for unsupported cases
+ * of bip iovar for backward compatibility
+ */
+ if (err != BCME_UNSUPPORTED && err < 0) {
+ WL_ERR(("bip set error (%d)\n", err));
+
+#ifdef CUSTOMER_HW6
+ if (wl_customer6_legacy_chip_check(cfg,
+ bcmcfg_to_prmry_ndev(cfg))) {
+ /* Ignore bip error: Some older firmwares doesn't
+ * support bip iovar/ return BCME_NOTUP while trying
+ * to set bip from connect context. These firmares
+ * include bip in RSNIE by default. So its okay to
+ * ignore the error.
+ */
+ err = BCME_OK;
+ goto exit;
+ } else
+#endif /* CUSTOMER_HW6 */
+
+ {
+ goto exit;
+ }
+ } else {
+ WL_INFORM_MEM(("[%s] wl bip %02X:%02X:%02X\n",
+ dev->name, group_mgmt_cs[0], group_mgmt_cs[1],
+ group_mgmt_cs[2]));
+ }
+ }
+exit:
+ if (err) {
+ wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
+ FW_LOGSET_MASK_ALL);
+ }
+
+ return 0;
+}
+#endif /* MFP */
+
+#ifdef WL_FILS
+bool
+wl_is_fils_supported(struct net_device *ndev)
+{
+ s32 err;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN] = {0};
+ bcm_iov_buf_t *iov_buf = (bcm_iov_buf_t *)ioctl_buf;
+
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ err = wldev_iovar_getbuf(ndev, "fils", (uint8*)iov_buf, sizeof(bcm_iov_buf_t),
+ iov_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err == BCME_UNSUPPORTED) {
+ WL_DBG(("FILS NOT supported\n"));
+ return false;
+ }
+
+ WL_INFORM(("FILS supported\n"));
+ return true;
+}
+
+#define WL_NUM_OF_TLV_IN_SET_FILS_PARAMS 4u
+static s32
+wl_set_fils_params(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_xtlvbuf_t tbuf;
+ s32 err = BCME_OK;
+ uint32 buf_size;
+
+ if ((sme->auth_type != NL80211_AUTHTYPE_FILS_SK) &&
+ (sme->auth_type != NL80211_AUTHTYPE_FILS_SK_PFS) &&
+ (sme->auth_type != NL80211_AUTHTYPE_FILS_PK)) {
+ return BCME_OK;
+ }
+ if (sme->fils_erp_rrk_len > WL_MAX_FILS_KEY_LEN) {
+ WL_ERR(("%s: FILS rRK exceed allowed size\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ /* Check incoming buffer length */
+ buf_size = sme->fils_erp_username_len + sme->fils_erp_realm_len + sme->fils_erp_rrk_len +
+ sizeof(sme->fils_erp_next_seq_num) +
+ WL_NUM_OF_TLV_IN_SET_FILS_PARAMS * BCM_XTLV_HDR_SIZE_EX(BCM_XTLV_OPTION_ALIGN32) +
+ sizeof(bcm_iov_buf_t) - 1u;
+
+ if (buf_size > WLC_IOCTL_SMLEN) {
+ WL_ERR(("%s: FILS connect params arguments exceed allowed size\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ iov_buf = MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (!iov_buf) {
+ WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, WLC_IOCTL_SMLEN));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ iov_buf->id = WL_FILS_CMD_ADD_CONNECT_PARAMS;
+ /* check if this should be len w/o headers */
+ err = bcm_xtlv_buf_init(&tbuf, (uint8*)&iov_buf->data[0],
+ WLC_IOCTL_SMLEN - sizeof(bcm_iov_buf_t) + sizeof(uint16),
+ BCM_XTLV_OPTION_ALIGN32);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: xtlv_context initialization failed\n", __FUNCTION__));
+ goto exit;
+ }
+ if (sme->fils_erp_username_len && sme->fils_erp_username != NULL) {
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_USERNAME,
+ sme->fils_erp_username, sme->fils_erp_username_len);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ }
+ if (sme->fils_erp_realm_len && sme->fils_erp_realm != NULL) {
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_REALM,
+ sme->fils_erp_realm, sme->fils_erp_realm_len);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ }
+ if (sme->fils_erp_rrk_len && sme->fils_erp_rrk != NULL) {
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_RRK,
+ sme->fils_erp_rrk, sme->fils_erp_rrk_len);
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ }
+ err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_NEXT_SEQ_NUM,
+ (u8 *)&sme->fils_erp_next_seq_num, sizeof(sme->fils_erp_next_seq_num));
+ if (err != BCME_OK) {
+ WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
+ goto exit;
+ }
+ iov_buf->len = bcm_xtlv_buf_len(&tbuf);
+ err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf->len + sizeof(bcm_iov_buf_t) -
+ sizeof(uint16), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("set fils params ioctl error (%d)\n", err));
+ goto exit;
+ }
+
+exit:
+ if (err != BCME_OK) {
+ WL_ERR(("set FILS params error %d\n", err));
+ }
+ else {
+ WL_DBG_MEM(("FILS parameters succesfully applied\n"));
+ }
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, WLC_IOCTL_SMLEN);
+ }
+ return err;
+}
+
+#if !defined(WL_FILS_ROAM_OFFLD) && defined(WL_FILS)
+static s32
+wl_get_bcn_timeout(struct net_device *dev, u32 *bcn_timeout)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_getint(dev, "bcn_timeout", bcn_timeout);
+ if (unlikely(err)) {
+ WL_ERR(("could not get bcn_timeout (%d)\n", err));
+ }
+ return err;
+}
+
+#define WL_ROAM_ENABLE 0
+#define WL_ROAM_DISABLE 1
+/* Beacon Timeout beacon loss in case FILS roaming offload is not supported by fw */
+#define WL_BCN_TIMEOUT 3
+
+static s32
+wl_fils_toggle_roaming(struct net_device *dev, u32 auth_type)
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (WPA2_AUTH_IS_FILS(auth_type) && !cfg->fils_info.fils_roam_disabled) {
+ err = wl_get_bcn_timeout(dev, &cfg->fils_info.fils_bcn_timeout_cache);
+ if (unlikely(err)) {
+ return err;
+ }
+ wl_dongle_roam(dev, WL_ROAM_DISABLE, WL_BCN_TIMEOUT);
+ cfg->fils_info.fils_roam_disabled = true;
+ WL_DBG_MEM(("fw roam disabled for FILS akm\n"));
+ } else if (cfg->fils_info.fils_roam_disabled) {
+ /* Enable roaming back for other auth types */
+ wl_dongle_roam(dev, WL_ROAM_ENABLE, cfg->fils_info.fils_bcn_timeout_cache);
+ cfg->fils_info.fils_roam_disabled = false;
+ WL_DBG_MEM(("fw roam enabled\n"));
+ }
+ return err;
+}
+#endif /* !WL_FILS_ROAM_OFFLD && WL_FILS */
+#endif /* WL_FILS */
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (sme->crypto.n_akm_suites) {
+ err = wldev_iovar_getint(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ return err;
+ }
+ if (val & (WPA_AUTH_PSK |
+ WPA_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid akm suite (0x%x)\n",
+ sme->crypto.akm_suites[0]));
+ return -EINVAL;
+ }
+ } else if (val & (WPA2_AUTH_PSK |
+ WPA2_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+#ifdef MFP
+
+#ifdef CUSTOMER_HW6
+ case WL_AKM_SUITE_SHA256_1X:
+ if (wl_customer6_legacy_chip_check(cfg, dev)) {
+ val = WPA2_AUTH_UNSPECIFIED;
+ } else {
+ val = WPA2_AUTH_1X_SHA256;
+ }
+ break;
+ case WL_AKM_SUITE_SHA256_PSK:
+ if (wl_customer6_legacy_chip_check(cfg, dev)) {
+ val = WPA2_AUTH_PSK;
+ } else {
+ val = WPA2_AUTH_PSK_SHA256;
+ }
+ break;
+#endif /* CUSTOMER_HW6 */
+
+#ifndef CUSTOMER_HW6
+ case WL_AKM_SUITE_SHA256_1X:
+ val = WPA2_AUTH_1X_SHA256;
+ break;
+ case WL_AKM_SUITE_SHA256_PSK:
+ val = WPA2_AUTH_PSK_SHA256;
+ break;
+#endif /* CUSTOMER_HW6 */
+#endif /* MFP */
+ case WLAN_AKM_SUITE_8021X:
+ case WLAN_AKM_SUITE_PSK:
+#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X)
+ case WLAN_AKM_SUITE_FT_8021X:
+#endif
+#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK)
+ case WLAN_AKM_SUITE_FT_PSK:
+#endif
+ case WLAN_AKM_SUITE_FILS_SHA256:
+ case WLAN_AKM_SUITE_FILS_SHA384:
+ case WLAN_AKM_SUITE_8021X_SUITE_B:
+ case WLAN_AKM_SUITE_8021X_SUITE_B_192:
+#ifdef WL_OWE
+ case WLAN_AKM_SUITE_OWE:
+#endif /* WL_OWE */
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+ case WLAN_AKM_SUITE_SAE:
+#endif /* WL_SAE || WL_CLIENT_SAE */
+#ifdef WL_SAE_FT
+ case WLAN_AKM_SUITE_FT_OVER_SAE:
+#endif /* WL_SAE_FT */
+ case WLAN_AKM_SUITE_DPP:
+ case WLAN_AKM_SUITE_FT_8021X_SHA384:
+ val = wl_rsn_akm_wpa_auth_lookup(sme->crypto.akm_suites[0]);
+ break;
+ case WLAN_AKM_SUITE_FT_FILS_SHA256:
+ val = WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT;
+ break;
+ case WLAN_AKM_SUITE_FT_FILS_SHA384:
+ val = WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT;
+ break;
+ default:
+ WL_ERR(("invalid akm suite (0x%x)\n",
+ sme->crypto.akm_suites[0]));
+ return -EINVAL;
+ }
+ }
+
+#ifdef BCMWAPI_WPI
+ else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_WAPI_CERT:
+ val = WAPI_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_WAPI_PSK:
+ val = WAPI_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid akm suite (0x%x)\n",
+ sme->crypto.akm_suites[0]));
+ return -EINVAL;
+ }
+ }
+#endif
+
+#ifdef WL_FILS
+#if !defined(WL_FILS_ROAM_OFFLD)
+ err = wl_fils_toggle_roaming(dev, val);
+ if (unlikely(err)) {
+ return err;
+ }
+#endif /* !WL_FILS_ROAM_OFFLD */
+#endif /* !WL_FILS */
+
+#ifdef MFP
+ if ((err = wl_cfg80211_set_mfp(cfg, dev, sme)) < 0) {
+ WL_ERR(("MFP set failed err:%d\n", err));
+ return -EINVAL;
+ }
+#endif /* MFP */
+
+ WL_DBG_MEM(("[%s] wl wpa_auth to 0x%x\n", dev->name, val));
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("could not set wpa_auth (0x%x)\n", err));
+ return err;
+ }
+ }
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ sec->wpa_auth = sme->crypto.akm_suites[0];
+ sec->fw_wpa_auth = val;
+
+ return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec;
+ struct wl_wsec_key key;
+ s32 val;
+ s32 err = 0;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ WL_DBG(("key len (%d)\n", sme->key_len));
+ if (sme->key_len) {
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise));
+ if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2)) &&
+
+#ifdef BCMWAPI_WPI
+ !is_wapi(sec->cipher_pairwise) &&
+#endif
+
+ (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+ WLAN_CIPHER_SUITE_WEP104)))
+ {
+ bzero(&key, sizeof(key));
+ key.len = (u32) sme->key_len;
+ key.index = (u32) sme->key_idx;
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, sme->key, key.len);
+ key.flags = WL_PRIMARY_KEY;
+ if ((sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP40) ||
+ (sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP104)) {
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(sec->cipher_pairwise);
+ } else {
+ WL_ERR(("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ /* Set the new key/index */
+ WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo));
+ WL_DBG(("key \"%s\"\n", key.data));
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ WL_INFORM_MEM(("key applied to fw\n"));
+ if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+ WL_DBG(("set auth_type to shared key\n"));
+ val = WL_AUTH_SHARED_KEY; /* shared key */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ }
+ }
+ }
+ return err;
+}
+
+#if defined (CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
+static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
+{
+ u32 chanspec = 0;
+ bool isvht80 = 0;
+
+ if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK)
+ chanspec = wl_chspec_driver_to_host(chanspec);
+
+ isvht80 = chanspec & WL_CHANSPEC_BW_80;
+ WL_DBG(("wl_get_chan_isvht80: chanspec(%x:%d)\n", chanspec, isvht80));
+
+ return isvht80;
+}
+#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */
+
+int wl_cfg80211_cleanup_mismatch_status(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ bool disassociate)
+{
+ scb_val_t scbval;
+ int err = TRUE;
+ int wait_cnt;
+
+ if (disassociate) {
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, dev), DOT11_RC_DISASSOC_LEAVING);
+#endif /* BCMDONGLEHOST */
+ WL_ERR(("Disassociate previous connection!\n"));
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
+ scbval.val = DOT11_RC_DISASSOC_LEAVING;
+ scbval.val = htod32(scbval.val);
+
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ wait_cnt = 500/10;
+ } else {
+ wait_cnt = 200/10;
+ WL_ERR(("Waiting for previous DISCONNECTING status!\n"));
+ if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ }
+ }
+
+ while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+ WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
+ wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(10);
+ }
+
+ if (wait_cnt == 0) {
+ WL_ERR(("DISCONNECING clean up failed!\n"));
+ /* Clear DISCONNECTING driver status as we have made sufficient attempts
+ * for driver clean up.
+ */
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+ return BCME_NOTREADY;
+ }
+ return BCME_OK;
+}
+
+#ifdef WL_FILS
+static int
+wl_fils_add_hlp_container(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ const uint8* ie_buf, uint16 ie_len)
+{
+ const bcm_tlv_ext_t *hlp_ie;
+
+ if ((hlp_ie = (const bcm_tlv_ext_t*)bcm_parse_tlvs_dot11((const uint8 *)ie_buf, ie_len,
+ FILS_HLP_CONTAINER_EXT_ID, TRUE))) {
+ u16 hlp_len = hlp_ie->len;
+ u16 left_len = (ie_len - ((const uint8*)hlp_ie - ie_buf));
+ bcm_iov_buf_t *iov_buf = 0;
+ uint8* pxtlv;
+ int err;
+ size_t iov_buf_len;
+ bcm_tlv_dot11_frag_tot_len(ie_buf, ie_len, FILS_HLP_CONTAINER_EXT_ID,
+ TRUE, (uint*)&hlp_len);
+
+ hlp_len += BCM_TLV_EXT_HDR_SIZE;
+
+ if ((hlp_len > DOT11_MAX_MPDU_BODY_LEN) || (hlp_len > left_len)) {
+ WL_ERR(("bad HLP length %d\n", hlp_len));
+ return EFAULT;
+ }
+ iov_buf_len = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) - 1 + hlp_len;
+ iov_buf = MALLOCZ(cfg->osh, iov_buf_len);
+ if (iov_buf == NULL) {
+ WL_ERR(("failed to allocated iov_buf\n"));
+ return ENOMEM;
+ }
+
+ prhex("HLP, HLP", (const uchar *)hlp_ie, hlp_len);
+
+ pxtlv = (uint8 *)&iov_buf->data[0];
+ ((bcm_xtlv_t*)pxtlv)->id = WL_FILS_XTLV_HLP_IE;
+ ((bcm_xtlv_t*)pxtlv)->len = hlp_len;
+
+ memcpy(((bcm_xtlv_t*)pxtlv)->data, hlp_ie, ((bcm_xtlv_t*)pxtlv)->len);
+
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ iov_buf->id = WL_FILS_CMD_ADD_HLP_IE;
+ iov_buf->len = ((sizeof(bcm_xtlv_t)-1) + ((bcm_xtlv_t*)pxtlv)->len);
+
+ err = wldev_iovar_setbuf(dev, "fils", iov_buf,
+ sizeof(bcm_iov_buf_t) + iov_buf->len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("fils wldev_iovar_setbuf error (%d)\n", err));
+ }
+ else {
+ WL_DBG_MEM(("FILS HLP Packet succesfully updated\n"));
+ }
+ MFREE(cfg->osh, iov_buf, iov_buf_len);
+ }
+ return BCME_OK;
+}
+#endif /* WL_FILS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+#define UPDATE_ASSOC_IES BIT(0)
+#ifndef UPDATE_FILS_ERP_INFO
+#define UPDATE_FILS_ERP_INFO BIT(1)
+#define UPDATE_AUTH_TYPE BIT(2)
+#endif
+
+static int
+wl_cfg80211_update_connect_params(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme, u32 changed)
+{
+ s32 err = BCME_OK;
+#if defined(WL_FILS)
+ if (changed & UPDATE_FILS_ERP_INFO) {
+ err = wl_set_fils_params(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid FILS params\n"));
+ goto exit;
+ }
+ if (!(changed & UPDATE_AUTH_TYPE)) {
+ WL_DBG(("Warning: FILS ERP params are set,"
+ "but authentication type - not\n"));
+ }
+ }
+ if (changed & UPDATE_AUTH_TYPE) {
+ err = wl_set_auth_type(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid auth type\n"));
+ goto exit;
+ }
+ }
+#endif /* WL_FILS */
+ if (changed & UPDATE_ASSOC_IES) {
+ WL_DBG(("update assoc ies\n"));
+ err = wl_cfg80211_set_mgmt_vndr_ies(wl_get_cfg(dev), ndev_to_cfgdev(dev),
+ wl_get_bssidx_by_wdev(wl_get_cfg(dev), dev->ieee80211_ptr),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ if (err) {
+ WL_ERR(("error updating vndr ies\n"));
+ goto exit;
+ }
+ }
+exit:
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) */
+
+#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
+static s32
+wl_config_roam_env_detection(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+ s32 roam_trigger[2] = {0, 0};
+ s32 err = BCME_OK;
+
+ if (dhdp->roam_env_detection && (dev == bcmcfg_to_prmry_ndev(cfg))) {
+ bool is_roamtrig_reset = TRUE;
+ bool is_roam_env_ok = (wldev_iovar_setint(dev, "roam_env_detection",
+ AP_ENV_DETECT_NOT_USED) == BCME_OK);
+#ifdef SKIP_ROAM_TRIGGER_RESET
+ roam_trigger[1] = WLC_BAND_2G;
+ is_roamtrig_reset =
+ (wldev_ioctl_get(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger)) == BCME_OK) &&
+ (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER-10);
+#endif /* SKIP_ROAM_TRIGGER_RESET */
+ if (is_roamtrig_reset && is_roam_env_ok) {
+ roam_trigger[0] = WL_AUTO_ROAM_TRIGGER;
+ roam_trigger[1] = WLC_BAND_ALL;
+ err = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger));
+ if (unlikely(err)) {
+ WL_ERR((" failed to restore roam_trigger for auto env"
+ " detection. err:%d\n", err));
+ }
+ }
+ }
+ return err;
+}
+#endif /* ROAM_ENABLE && ROAMENV_DETECTION */
+
+s32
+wl_do_preassoc_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(ASSOC_START), dhd_net2idx(dhdp->info, dev), 0);
+#endif /* BCMDONGLEHOST */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ dhd_reset_tcpsync_info_by_dev(dev);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+ if (wl_get_drv_status(cfg, SCANNING, dev)) {
+ wl_cfgscan_cancel_scan(cfg);
+ }
+
+#ifdef WL_SCHED_SCAN
+ /* Locks are taken in wl_cfg80211_sched_scan_stop()
+ * A start scan occuring during connect is unlikely
+ */
+ if (cfg->sched_scan_req) {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ wl_cfg80211_sched_scan_stop(wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
+ cfg->sched_scan_req->reqid);
+#else
+ wl_cfg80211_sched_scan_stop(wdev->wiphy, bcmcfg_to_prmry_ndev(cfg));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+ }
+#endif /* WL_SCHED_SCAN */
+#ifdef WL_CFG80211_GON_COLLISION
+ /* init block gon req count */
+ cfg->block_gon_req_tx_count = 0;
+ cfg->block_gon_req_rx_count = 0;
+#endif /* WL_CFG80211_GON_COLLISION */
+
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+ maxrxpktglom = 0;
+#endif
+
+#if defined (ROAM_ENABLE) && defined (ROAM_AP_ENV_DETECTION)
+ if (wl_config_roam_env_detection(cfg, dev) != BCME_OK) {
+ return BCME_ERROR;
+ }
+#endif /* ROAM_ENABLE && ROAM_AP_ENV_DETECTION */
+
+#ifdef WLTDLS
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
+#endif /* WLTDLS */
+
+#ifdef SUPPORT_AP_BWCTRL
+ if (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ wl_restore_ap_bw(cfg);
+ }
+#endif /* SUPPORT_AP_BWCTRL */
+#if defined(ROAMEXP_SUPPORT)
+ /* Clear Blacklist bssid and Whitelist ssid list before join issue
+ * This is temporary fix since currently firmware roaming is not
+ * disabled by android framework before SSID join from framework
+ */
+ /* Flush blacklist bssid content */
+ dhd_dev_set_blacklist_bssid(dev, NULL, 0, true);
+ /* Flush whitelist ssid content */
+ dhd_dev_set_whitelist_ssid(dev, NULL, 0, true);
+#endif /* ROAMEXP_SUPPORT */
+
+ WL_DBG(("SME IE : len=%zu\n", sme->ie_len));
+ if (sme->ie != NULL && sme->ie_len > 0 && (wl_dbg_level & WL_DBG_DBG)) {
+ prhex(NULL, sme->ie, sme->ie_len);
+ }
+ /* Connection attempted via linux-wireless */
+ wl_set_drv_status(cfg, CFG80211_CONNECT, dev);
+ return BCME_OK;
+}
+
+static s32
+wl_config_assoc_security(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ s32 err = BCME_OK;
+
+ err = wl_set_wpa_version(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid wpa_version\n"));
+ goto exit;
+ }
+
+ err = wl_set_auth_type(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid auth type\n"));
+ goto exit;
+ }
+
+#ifdef WL_FILS
+ if (sme->ie && sme->ie_len) {
+ err = wl_fils_add_hlp_container(cfg, dev, sme->ie, sme->ie_len);
+ if (unlikely(err)) {
+ WL_ERR(("FILS sending HLP failed\n"));
+ goto exit;
+ }
+ }
+#endif /* WL_FILS */
+
+ err = wl_set_set_cipher(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid ciper\n"));
+ goto exit;
+ }
+
+ err = wl_set_key_mgmt(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid key mgmt\n"));
+ goto exit;
+ }
+
+ err = wl_set_set_sharedkey(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid shared key\n"));
+ goto exit;
+ }
+
+#ifdef WL_FILS
+ err = wl_set_fils_params(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid FILS params\n"));
+ goto exit;
+ }
+#endif /* WL_FILS */
+
+exit:
+ return err;
+}
+
+static s32
+wl_config_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct cfg80211_connect_params *sme, wlcfg_assoc_info_t *info)
+{
+ const wpa_ie_fixed_t *wpa_ie;
+ const bcm_tlv_t *wpa2_ie;
+ const u8* wpaie = 0;
+ u32 wpaie_len;
+ s32 err;
+ s32 bssidx = info->bssidx;
+
+ /* configure all vendor and extended vendor IEs */
+ wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+
+ /* Find the RSNXE_IE and plumb */
+ if ((err = wl_cfg80211_config_rsnxe_ie(cfg, dev,
+ (const u8*)sme->ie, sme->ie_len)) < 0) {
+ WL_ERR(("Failed to configure rsnxe ie: %d\n", err));
+ return err;
+ }
+
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((const u8 *)sme->ie, sme->ie_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" RSN IE is found\n"));
+ }
+
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie(sme->ie,
+ sme->ie_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+
+ if (wpa_ie != NULL || wpa2_ie != NULL) {
+ wpaie = (wpa_ie != NULL) ? (const u8 *)wpa_ie : (const u8 *)wpa2_ie;
+ wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+ wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+ } else {
+ wpaie = NULL;
+ wpaie_len = 0;
+ }
+
+ err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("wpaie set error (%d)\n", err));
+ }
+
+ return err;
+}
+
+s32
+wl_cfg80211_config_rsnxe_ie(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ const u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie = NULL;
+ s32 err = 0;
+ u8 ie_len = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_RSNXE_ID))) {
+ WL_DBG(("Found RSNXE ie\n"));
+ break;
+ }
+
+ ie_len = (ie != NULL) ? (ie->len + BCM_TLV_HDR_SIZE): 0;
+
+ err = wldev_iovar_setbuf(dev, "rsnxe", ie, ie_len,
+ smbuf, sizeof(smbuf), NULL);
+ if (!err) {
+ WL_DBG(("Configured RSNXE IE\n"));
+ } else if (err == BCME_UNSUPPORTED) {
+ WL_DBG(("FW does not support rsnxe iovar\n"));
+ err = BCME_OK;
+ } else {
+ WL_ERR(("rsnxe set error (%d)\n", err));
+ }
+ return err;
+}
+
+static s32
+wl_fillup_assoc_params_v1(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ void *params, u32 buf_len, wlcfg_assoc_info_t *info)
+{
+ chanspec_t *chanspecs = info->chanspecs;
+ u32 chan_cnt = info->chan_cnt;
+ u32 join_scan_active_time = 0;
+ wl_extjoin_params_v1_t *ext_join_params = (wl_extjoin_params_v1_t *)params;
+
+ if (buf_len < (sizeof(ext_join_params->ssid.SSID) +
+ (sizeof(chanspec_t) * chan_cnt))) {
+ WL_ERR(("buf too short\n"));
+ return -EINVAL;
+ }
+
+ if (info->bssid_hint) {
+ /* Set bssid hint flag */
+ WL_DBG_MEM(("ASSOC_HINT_BSSID_PRESENT. channels:%d\n", chan_cnt));
+ ext_join_params->assoc.flags |= ASSOC_HINT_BSSID_PRESENT;
+ }
+
+ /* ssid length check is already done above */
+ if (memcpy_s(ext_join_params->ssid.SSID, sizeof(ext_join_params->ssid.SSID),
+ info->ssid, info->ssid_len) != BCME_OK) {
+ WL_ERR(("ssid cpy failed info_len:%d\n", info->ssid_len));
+ return -EINVAL;
+ }
+
+ ext_join_params->ssid.SSID_len = info->ssid_len;
+ wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+ if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_DBG(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ ext_join_params->ssid.SSID_len));
+ }
+ ext_join_params->ssid.SSID_len = htod32(info->ssid_len);
+
+ /* Use increased dwell for targeted join case to take care of noisy env */
+ join_scan_active_time = info->targeted_join ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS :
+ WL_BCAST_SCAN_JOIN_ACTIVE_DWELL_TIME_MS;
+ ext_join_params->scan.active_time = chan_cnt ? join_scan_active_time : -1;
+ ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ /* WAR to sync with presence period of VSDB GO.
+ * send probe request more frequently
+ * probe request will be stopped when it gets probe response from target AP/GO.
+ */
+ ext_join_params->scan.nprobes = chan_cnt ?
+ (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+ ext_join_params->scan.home_time = -1;
+
+ (void)memcpy_s(&ext_join_params->assoc.bssid, ETH_ALEN, info->bssid, ETH_ALEN);
+
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ /* source and target lens are same */
+ (void)memcpy_s(ext_join_params->assoc.chanspec_list, (sizeof(chanspec_t) * chan_cnt),
+ chanspecs, sizeof(chanspec_t) * chan_cnt);
+
+ ext_join_params->assoc.chanspec_num = htod32(chan_cnt);
+ return BCME_OK;
+}
+
+static s32
+wl_fillup_assoc_params_v0(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ void *params, u32 buf_len, wlcfg_assoc_info_t *info)
+{
+ chanspec_t *chanspecs = info->chanspecs;
+ u32 chan_cnt = info->chan_cnt;
+ u32 join_scan_active_time = 0;
+ wl_extjoin_params_t *ext_join_params = (wl_extjoin_params_t *)params;
+
+ if (buf_len < (sizeof(ext_join_params->ssid.SSID) +
+ (sizeof(chanspec_t) * chan_cnt))) {
+ WL_ERR(("buf too short\n"));
+ return -EINVAL;
+ }
+
+ /* ssid length check is already done above */
+ if (memcpy_s(ext_join_params->ssid.SSID, sizeof(ext_join_params->ssid.SSID),
+ info->ssid, info->ssid_len) != BCME_OK) {
+ WL_ERR(("ssid cpy failed info_len:%d\n", info->ssid_len));
+ return -EINVAL;
+ }
+
+ ext_join_params->ssid.SSID_len = info->ssid_len;
+ wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+ if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_DBG(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ ext_join_params->ssid.SSID_len));
+ }
+ ext_join_params->ssid.SSID_len = htod32(info->ssid_len);
+
+ /* Use increased dwell for targeted join case to take care of noisy env */
+ join_scan_active_time = info->targeted_join ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS :
+ WL_BCAST_SCAN_JOIN_ACTIVE_DWELL_TIME_MS;
+ ext_join_params->scan.active_time = chan_cnt ? join_scan_active_time : -1;
+ ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ /* WAR to sync with presence period of VSDB GO.
+ * send probe request more frequently
+ * probe request will be stopped when it gets probe response from target AP/GO.
+ */
+ ext_join_params->scan.nprobes = chan_cnt ?
+ (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+ ext_join_params->scan.home_time = -1;
+
+ (void)memcpy_s(&ext_join_params->assoc.bssid, ETH_ALEN, info->bssid, ETH_ALEN);
+
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ /* source and target lens are same */
+ (void)memcpy_s(ext_join_params->assoc.chanspec_list, (sizeof(chanspec_t) * chan_cnt),
+ chanspecs, sizeof(chanspec_t) * chan_cnt);
+
+ ext_join_params->assoc.chanspec_num = htod32(chan_cnt);
+ return BCME_OK;
+}
+
+static s32
+wl_config_assoc_params(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ void *params, u32 buf_len, wlcfg_assoc_info_t *info)
+{
+ s32 ret;
+
+ if (!cfg->join_iovar_ver) {
+ ret = wl_fillup_assoc_params_v0(cfg, dev, params, buf_len, info);
+ } else {
+ ret = wl_fillup_assoc_params_v1(cfg, dev, params, buf_len, info);
+ }
+ return ret;
+}
+
+static s32
+wl_handle_assoc_hints(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct cfg80211_connect_params *sme, wlcfg_assoc_info_t *info)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+ bool skip_hints = false;
+#endif /* KERNEL >= 3.15 */
+ chanspec_t chspec;
+
+ if (!sme || !info) {
+ WL_ERR(("wrong args\n"));
+ return -EINVAL;
+ }
+
+ if (unlikely(!sme->ssid) || (sme->ssid_len > DOT11_MAX_SSID_LEN)) {
+ WL_ERR(("Invalid ssid %p. len:%zu\n", sme->ssid, sme->ssid_len));
+ return -EINVAL;
+ }
+
+ /* Copy SSID detail */
+ info->ssid_len = sme->ssid_len;
+ if (memcpy_s(info->ssid, sizeof(info->ssid),
+ sme->ssid, info->ssid_len) != BCME_OK) {
+ WL_ERR(("ssid cpy failed\n"));
+ return -EINVAL;
+ }
+
+ /* Handle incoming BSSID and Channel info */
+ if (sme->bssid && !ETHER_ISBCAST(sme->bssid)) {
+ /* Use user space requested BSSID and channel */
+ info->targeted_join = true;
+ (void)memcpy_s(info->bssid, ETH_ALEN, sme->bssid, ETH_ALEN);
+ if (sme->channel && ((chspec =
+ wl_freq_to_chanspec(sme->channel->center_freq)) != INVCHANSPEC)) {
+ info->chan_cnt = 1;
+ info->chanspecs[0] = chspec;
+ /* Skip p2p connection on 6G */
+#ifdef WL_P2P_6G
+ if (!(cfg->p2p_6g_enabled)) {
+#endif /* WL_P2P_6G */
+ if (IS_P2P_GC(dev->ieee80211_ptr) && (CHSPEC_IS6G(chspec))) {
+ WL_ERR(("P2P connection not allowed on 6G\n"));
+ return -ENOTSUPP;
+ }
+#ifdef WL_P2P_6G
+ }
+#endif /* WL_P2P_6G */
+ }
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+ else {
+#ifdef WL_SKIP_CONNECT_HINTS
+ skip_hints = true;
+ WL_DBG(("force skip connect hints\n"));
+#else /* WL_SKIP_CONNECT_HINTS */
+ /* override bssid_hint if overridden via module param */
+ skip_hints = fw_ap_select;
+#if defined(WL_FW_OCE_AP_SELECT)
+ /* If fw select needs to be specifically done for OCE */
+ skip_hints = fw_ap_select &&
+ wl_cfg80211_is_oce_ap(wiphy, sme->bssid_hint);
+#endif /* WL_FW_OCE_AP_SELECT */
+ WL_DBG(("fw_ap_select:%d skip_hints:%d\n", fw_ap_select, skip_hints));
+#endif /* WL_SKIP_CONNECT_HINTS */
+
+ /* Use bssid_hint if hints are allowed and if its unicast addr */
+ if (!skip_hints && sme->bssid_hint && !ETHER_ISBCAST(sme->bssid_hint)) {
+ WL_INFORM_MEM(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
+ info->targeted_join = true;
+ if (cfg->join_iovar_ver) {
+ /* Firmware supports bssid_hint feature */
+ info->bssid_hint = true;
+ }
+ (void)memcpy_s(info->bssid, ETH_ALEN, sme->bssid_hint, ETH_ALEN);
+#ifndef WL_FORCE_RCC_LIST
+ /* Use channel hint only for target bssid join case. In other
+ * cases, use RCC or full scan to find better APs.
+ */
+ if (sme->channel_hint && ((chspec = wl_freq_to_chanspec(
+ sme->channel_hint->center_freq)) != INVCHANSPEC)) {
+ info->chan_cnt = 1;
+ info->chanspecs[0] = chspec;
+ WL_INFORM_MEM(("channel_hint: chspec(%x)\n", chspec));
+ }
+#endif /* !WL_FORCE_RCC_LIST */
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+
+ if (info->targeted_join != true) {
+ /* For non targeted join, use bcast address */
+ (void)memcpy_s(&info->bssid, ETH_ALEN, &ether_bcast, ETH_ALEN);
+ }
+ WL_DBG(("targeted_join:%d chan_cnt:%d\n",
+ info->targeted_join, info->chan_cnt));
+ return 0;
+}
+
+static s32
+wl_sync_fw_assoc_states(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, wlcfg_assoc_info_t *info)
+{
+ s32 err = BCME_OK;
+ u8 bssid[ETH_ALEN];
+
+ if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_reassoc_support) {
+ /* ROAM case */
+ info->reassoc = true;
+ } else {
+ /* store the bssid for the connect req */
+ wl_update_prof(cfg, dev, NULL, info->bssid, WL_PROF_LATEST_BSSID);
+
+ /* following scenarios are possible
+ * In case of wrong request/abnormal status,
+ * trigger DISASSOC to clean up status.
+ * 1. DHD prev status is CONNECTING
+ * => 1.1 Wrong request
+ * 2. DHD previous status is CONNECTED
+ * - FW connected
+ * => Wrong request
+ * - FW not connected
+ * => Abnormal status
+ * 3. DHD previous status is DISCONNECTING
+ * => Waiting for disconnecting
+ * 4. DHD previous status is not connected
+ * - FW not connected
+ * => Normal status
+ * - FW connected
+ * => Abnormal status
+ */
+ if (wl_get_drv_status(cfg, CONNECTING, dev) ||
+ wl_get_drv_status(cfg, CONNECTED, dev)) {
+ /* set nested connect bit to identify the context */
+ wl_set_drv_status(cfg, NESTED_CONNECT, dev);
+ /* DHD prev status is CONNECTING/CONNECTED */
+ wl_cfg80211_cleanup_mismatch_status(dev, cfg, TRUE);
+ } else if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ /* DHD prev status is DISCONNECTING */
+ wl_cfg80211_cleanup_mismatch_status(dev, cfg, false);
+ } else if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
+ /* DHD previous status is not connected and FW connected */
+ if (wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN) == 0) {
+ /* set nested connect bit to identify the context */
+ wl_set_drv_status(cfg, NESTED_CONNECT, dev);
+ wl_cfg80211_cleanup_mismatch_status(dev, cfg, true);
+ }
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(dev, STA_WAIT_DISCONNECTED, WL_EXT_STATUS_DISCONNECTING, NULL);
+#endif
+ }
+
+ /* Clear BSSID if disconnecting state is not in progress */
+ bzero(&bssid, sizeof(bssid));
+ if (!info->reassoc && !wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+ }
+
+ LOG_TS(cfg, conn_start);
+ CLR_TS(cfg, authorize_start);
+ /* clear nested connect bit on proceeding for connection */
+ wl_clr_drv_status(cfg, NESTED_CONNECT, dev);
+
+ if (!info->reassoc) {
+ /* 'connect' request received */
+ wl_set_drv_status(cfg, CONNECTING, dev);
+ }
+
+ return err;
+}
+
+#if defined(DBG_PKT_MON) && defined(BCMDONGLEHOST)
+void
+wl_pkt_mon_start(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ DHD_DBG_PKT_MON_START(dhdp);
+ }
+}
+#endif /* DBG_PKT_MON && BCMDONGLEHOST */
+
+static s32
+wl_ext_get_rssi(struct bcm_cfg80211 *cfg, u8 *bssid)
+{
+ wl_scan_results_t *bss_list;
+ wl_bss_info_t *bi = NULL;
+ s32 i, rssi = 0;
+
+ mutex_lock(&cfg->scan_sync);
+ bss_list = cfg->bss_list;
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ if (!memcmp(&bi->BSSID, bssid, ETHER_ADDR_LEN))
+ rssi = dtoh32(bi->RSSI);
+ }
+ mutex_unlock(&cfg->scan_sync);
+
+ return rssi;
+}
+
+void
+wl_conn_debug_info(struct bcm_cfg80211 *cfg, struct net_device *dev, wlcfg_assoc_info_t *info)
+{
+ struct wl_security *sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ char sec_info[64];
+ u32 chanspec = info->chanspecs[0];
+ u32 chan_cnt = info->chan_cnt;
+ s32 cur_rssi = 0, target_rssi = 0;
+ scb_val_t scb_val;
+ s32 err = BCME_OK;
+
+ if (!sec) {
+ WL_ERR(("no sec?\n"));
+ return;
+ }
+
+ target_rssi = wl_ext_get_rssi(cfg, info->bssid);
+ wl_ext_get_sec(dev, 0, sec_info, sizeof(sec_info), TRUE);
+ if (info->reassoc) {
+ memset(&scb_val, 0, sizeof(scb_val_t));
+ err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+ if (!err)
+ cur_rssi = dtoh32(scb_val.val);
+ WL_MSG(dev->name, "Reconnecting with " MACDBG " ssid \"%s\", len (%d), "
+ "channel=%d(chan_cnt=%d), sec=%s, rssi=%d => %d\n",
+ MAC2STRDBG((u8*)(&info->bssid)), info->ssid, info->ssid_len,
+ wf_chspec_ctlchan(chanspec), chan_cnt, sec_info, cur_rssi, target_rssi);
+ } else {
+ WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\", len (%d), "
+ "channel=%d(chan_cnt=%d), sec=%s, rssi=%d\n",
+ MAC2STRDBG((u8*)(&info->bssid)), info->ssid, info->ssid_len,
+ wf_chspec_ctlchan(chanspec), chan_cnt, sec_info, target_rssi);
+ }
+ if (wl_dbg_level & WL_DBG_DBG) {
+ WL_MSG(dev->name, "akm:0x%x auth:0x%x wpaver:0x%x pwise:0x%x gwise:0x%x\n",
+ sec->wpa_auth, sec->auth_type, sec->wpa_versions,
+ sec->cipher_pairwise, sec->cipher_group);
+ WL_MSG(dev->name, "wpa_auth:0x%x auth:0x%x wsec:0x%x mfp:0x%x\n",
+ sec->fw_wpa_auth, sec->fw_auth, sec->fw_wsec, sec->fw_mfp);
+ /* print channels for assoc */
+ prhex("chanspecs", (const u8 *)info->chanspecs,
+ (info->chan_cnt * sizeof(chanspec_t)));
+ }
+ SUPP_LOG(("[%s] Connecting with " MACDBG " ssid \"%s\",chan_cnt:%d\n",
+ dev->name, MAC2STRDBG((u8*)(&info->bssid)),
+ info->ssid, info->chan_cnt));
+}
+
+s32
+wl_handle_join(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, wlcfg_assoc_info_t *assoc_info)
+{
+ s32 err = 0;
+ size_t join_params_size;
+ void *join_params = NULL;
+
+ if (!cfg->join_iovar_ver) {
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+ assoc_info->chan_cnt * sizeof(chanspec_t);
+ } else if (cfg->join_iovar_ver == WL_EXTJOIN_VERSION_V1) {
+ /* Use version join struct */
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE_V1 +
+ assoc_info->chan_cnt * sizeof(chanspec_t);
+ } else {
+ WL_ERR(("Unsupported join iovar version\n"));
+ return -EINVAL;
+ }
+
+ join_params = MALLOCZ(cfg->osh, join_params_size);
+ if (join_params == NULL) {
+ err = -ENOMEM;
+ WL_ERR(("Mem alloc for join_params failed\n"));
+ goto fail;
+ }
+
+ /* Fill up the join params */
+ err = wl_config_assoc_params(cfg, dev, join_params, join_params_size,
+ assoc_info);
+ if (unlikely(err)) {
+ WL_ERR(("config assoc ies failed\n"));
+ goto fail;
+ }
+
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_channel(dev, wf_chspec_ctlchan(assoc_info->chanspecs[0]));
+#endif
+ /* print relevant info for debug purpose */
+ wl_conn_debug_info(cfg, dev, assoc_info);
+ err = wldev_iovar_setbuf_bsscfg(dev, "join", join_params, join_params_size,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, assoc_info->bssidx, &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("join iovar failed. err:%d\n", err));
+ }
+
+fail:
+ if (join_params) {
+ MFREE(cfg->osh, join_params, join_params_size);
+ }
+ return err;
+}
+
+s32
+wl_handle_reassoc(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ wlcfg_assoc_info_t *info)
+{
+ wl_reassoc_params_t *reassoc_params;
+ s32 err = BCME_OK;
+ size_t reassoc_params_size;
+ u32 chan_cnt = info->chan_cnt;
+ chanspec_t *chanspecs = info->chanspecs;
+ u32 chanspec = info->chanspecs[0];
+
+ if (!cfg->join_iovar_ver) {
+ reassoc_params_size = WL_ASSOC_PARAMS_FIXED_SIZE +
+ chan_cnt * sizeof(chanspec_t);
+ } else if (cfg->join_iovar_ver == WL_EXTJOIN_VERSION_V1) {
+ reassoc_params_size = WL_ASSOC_PARAMS_FIXED_SIZE_V1 +
+ chan_cnt * sizeof(chanspec_t);
+ } else {
+ WL_ERR(("Unsupported join iovar version\n"));
+ err = -EINVAL;
+ goto fail;
+ }
+
+ reassoc_params = MALLOCZ(cfg->osh, reassoc_params_size);
+ if (reassoc_params == NULL) {
+ err = -ENOMEM;
+ WL_ERR(("Mem alloc for reassoc_params failed\n"));
+ goto fail;
+ }
+
+ (void)memcpy_s(&reassoc_params->bssid.octet, ETH_ALEN, info->bssid, ETH_ALEN);
+ (void)memcpy_s(reassoc_params->chanspec_list, (sizeof(chanspec_t) * chan_cnt),
+ chanspecs, sizeof(chanspec_t) * chan_cnt);
+ reassoc_params->chanspec_num = htod32(chan_cnt);
+
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_channel(dev, wf_chspec_ctlchan(chanspec));
+#endif
+ /* print relevant info for debug purpose */
+ wl_conn_debug_info(cfg, dev, info);
+ err = wldev_ioctl_set(dev, WLC_REASSOC, reassoc_params, sizeof(wl_reassoc_params_t));
+ if (unlikely(err)) {
+ WL_ERR(("reassoc failed, error=%d\n", err));
+ goto fail;
+ }
+
+fail:
+ if (reassoc_params) {
+ MFREE(cfg->osh, reassoc_params, reassoc_params_size);
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wlcfg_assoc_info_t assoc_info;
+
+ WL_DBG(("Enter len=%zu\n", sme->ie_len));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ /* syncronize the connect states */
+ mutex_lock(&cfg->connect_sync);
+
+ bzero(&assoc_info, sizeof(wlcfg_assoc_info_t));
+ if ((assoc_info.bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find wlan index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ goto fail;
+ }
+
+ err = wl_do_preassoc_ops(cfg, dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("config assoc channel failed\n"));
+ goto fail;
+ }
+
+ err = wl_handle_assoc_hints(cfg, dev, sme, &assoc_info);
+ if (unlikely(err)) {
+ WL_ERR(("assoc hint processing failed\n"));
+ goto fail;
+ }
+
+ if (wl_sync_fw_assoc_states(cfg, dev, &assoc_info) != BCME_OK) {
+ /* attempt best effort */
+ WL_ERR(("fw assoc sync failed\n"));
+ }
+
+ if (assoc_info.reassoc) {
+ /* Handle roam to same ESS */
+ if ((err = wl_handle_reassoc(cfg, dev, &assoc_info)) != BCME_OK) {
+ goto fail;
+ }
+ } else {
+ err = wl_config_assoc_security(cfg, dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("config assoc security failed\n"));
+ goto fail;
+ }
+
+ err = wl_get_assoc_channels(cfg, dev, &assoc_info);
+ if (unlikely(err)) {
+ WL_ERR(("get assoc channels failed\n"));
+ goto fail;
+ }
+
+ err = wl_config_assoc_ies(cfg, dev, sme, &assoc_info);
+ if (unlikely(err)) {
+ WL_ERR(("config assoc ies failed\n"));
+ goto fail;
+ }
+
+ if ((err = wl_handle_join(cfg, dev, &assoc_info)) != BCME_OK) {
+ goto fail;
+ }
+ }
+ /* Store the minium idx expected */
+ cfg->eidx.min_connect_idx = cfg->eidx.enqd;
+
+fail:
+ if (unlikely(err)) {
+ WL_ERR(("connect error (%d)\n", err));
+ wl_clr_drv_status(cfg, CONNECTING, dev);
+ CLR_TS(cfg, conn_start);
+ /* Flush fw logs */
+ wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
+#ifdef WLTDLS
+ /* If connect fails, check whether we can enable back TDLS */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_DISCONNECT, false);
+#endif /* WLTDLS */
+ } else {
+#ifdef DBG_PKT_MON
+ /* start packet log in adv to ensure that EAPOL msgs aren't missed */
+ wl_pkt_mon_start(cfg, dev);
+#endif /* DBG_PKT_MON */
+ }
+#ifdef WL_EXT_IAPSTA
+ if (!err)
+ wl_ext_in4way_sync(dev, STA_NO_BTC_IN4WAY|STA_REASSOC_RETRY,
+ WL_EXT_STATUS_CONNECTING, &assoc_info);
+#endif
+
+ mutex_unlock(&cfg->connect_sync);
+ return err;
+}
+
+static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ uint8 wait_cnt;
+ u32 status = 0;
+
+ wait_cnt = WAIT_FOR_DISCONNECT_MAX;
+ while ((status = wl_get_drv_status(cfg, DISCONNECTING, dev)) && wait_cnt) {
+ WL_DBG(("Waiting for disconnection, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(50);
+ }
+
+ WL_INFORM_MEM(("Wait for disconnection done. status:%d wait_cnt:%d\n", status, wait_cnt));
+ if (!wait_cnt && wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ /* No response from firmware. Indicate connect result
+ * to clear cfg80211 state machine
+ */
+ if (wl_get_drv_status(cfg, CONNECTING, dev)) {
+ WL_INFORM_MEM(("force send connect result\n"));
+ CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ } else {
+ WL_INFORM_MEM(("force send disconnect event\n"));
+ CFG80211_DISCONNECTED(dev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, false, GFP_KERNEL);
+ }
+ CLR_TS(cfg, conn_start);
+ CLR_TS(cfg, authorize_start);
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ }
+ return;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ scb_val_t scbval;
+ bool act = false;
+ s32 err = 0;
+ u8 *curbssid = NULL;
+ u8 null_bssid[ETHER_ADDR_LEN];
+ s32 bssidx = 0;
+ bool connected;
+ bool conn_in_progress;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_START),
+ dhd_net2idx(dhdp->info, dev), reason_code);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_cleanup_m4_state_work(dhdp, dhd_net2idx(dhdp->info, dev));
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#endif /* BCMDONGLEHOST */
+
+ connected = wl_get_drv_status(cfg, CONNECTED, dev);
+ conn_in_progress = wl_get_drv_status(cfg, CONNECTING, dev);
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
+ WL_INFORM_MEM(("disconnect in connect state [%d:%d:%d]. reason:%d\n",
+ connected, conn_in_progress, act, reason_code));
+ if (connected || conn_in_progress) {
+ if (curbssid) {
+ WL_DBG_MEM(("curbssid:" MACDBG "\n", MAC2STRDBG(curbssid)));
+ }
+ act = true;
+ }
+
+ if (!curbssid) {
+ WL_ERR(("Disconnecting while CONNECTING status %d\n", (int)sizeof(null_bssid)));
+ bzero(null_bssid, sizeof(null_bssid));
+ curbssid = null_bssid;
+ }
+ WL_MSG(dev->name, "Reason %d, act %d, bssid %pM\n", reason_code, act, curbssid);
+
+ if (act) {
+#ifdef DBG_PKT_MON
+ /* Stop packet monitor */
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ DHD_DBG_PKT_MON_STOP(dhdp);
+ }
+#endif /* DBG_PKT_MON */
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ /* Let scan aborted by F/W */
+ if (cfg->scan_request) {
+ WL_TRACE_HW4(("Aborting the scan! \n"));
+ wl_cfgscan_cancel_scan(cfg);
+ }
+ if (conn_in_progress || connected || wdev->ssid_len) {
+ scbval.val = reason_code;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ WL_INFORM_MEM(("[%s] wl disassoc\n", dev->name));
+ /* Set DISCONNECTING state. We are clearing this state
+ in all exit paths
+ */
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ WL_ERR(("error (%d)\n", err));
+ goto exit;
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(dev, STA_NO_BTC_IN4WAY|STA_WAIT_DISCONNECTED,
+ WL_EXT_STATUS_DISCONNECTING, NULL);
+#endif
+ }
+#ifdef WL_WPS_SYNC
+ /* If are in WPS reauth state, then we would be
+ * dropping the link down events. Ensure that
+ * Event is sent up for the disconnect Req
+ */
+ if (wl_wps_session_update(dev,
+ WPS_STATE_DISCONNECT, curbssid) == BCME_OK) {
+ WL_INFORM_MEM(("[WPS] Disconnect done.\n"));
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ if (connected) {
+ /* Avoid further wl disassoc iovars */
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+ }
+ goto exit;
+
+ }
+#endif /* WPS_SYNC */
+ wl_cfg80211_wait_for_disconnection(cfg, dev);
+ } else {
+ /* Not in connected or connection in progres states. Still receiving
+ * disassoc indicates state mismatch with upper layer. Check for state
+ * and issue disconnect indication if required.
+ */
+
+ if (wdev->current_bss || wdev->ssid_len) {
+ WL_INFORM_MEM(("report disconnect event\n"));
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ }
+ }
+
+#ifdef CUSTOM_SET_CPUCORE
+ /* set default cpucore */
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ dhdp->chan_isvht80 &= ~DHD_FLAG_STA_MODE;
+ if (!(dhdp->chan_isvht80))
+ dhd_set_cpucore(dhdp, FALSE);
+ }
+#endif /* CUSTOM_SET_CPUCORE */
+
+ cfg->rssi = 0; /* reset backup of rssi */
+
+exit:
+ CLR_TS(cfg, conn_start);
+ CLR_TS(cfg, authorize_start);
+
+ /* Clear IEs for disaasoc */
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) >= 0) {
+ WL_INFORM_MEM(("Clearing disconnect IEs \n"));
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(dev), bssidx, VNDR_IE_DISASSOC_FLAG, NULL, 0);
+ } else {
+ WL_ERR(("Find index failed\n"));
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, s32 mbm)
+#else
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, s32 dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ s32 dbm = MBM_TO_DBM(mbm);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+ defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES)
+ dbm = MBM_TO_DBM(dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+ return -EINVAL;
+ }
+ break;
+ case NL80211_TX_POWER_FIXED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+ return -EINVAL;
+ }
+ break;
+ }
+
+ err = wl_set_tx_power(ndev, type, dbm);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ cfg->conf->tx_power = dbm;
+
+ return err;
+}
+
+static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+ struct wireless_dev *wdev, s32 *dbm)
+#else
+wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+ err = wl_get_tx_power(ndev, dbm);
+ if (unlikely(err))
+ WL_ERR(("error (%d)\n", err));
+
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ u32 index;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ RETURN_EIO_IF_NOT_UP(cfg);
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ /* Fix IOT issue with Apple Airport */
+ if (wsec == WEP_ENABLED) {
+ /* Just select a new current key */
+ index = (u32) key_idx;
+ index = htod32(index);
+ err = wldev_ioctl_set(dev, WLC_SET_KEY_PRIMARY, &index,
+ sizeof(index));
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+ return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_wsec_key key;
+ s32 err = 0;
+ s32 bssidx;
+ s32 mode = wl_get_mode_by_netdev(cfg, dev);
+
+ WL_MSG(dev->name, "key index (%d)\n", key_idx);
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+ bzero(&key, sizeof(key));
+ key.index = (u32) key_idx;
+
+ if (!ETHER_ISMULTI(mac_addr))
+ memcpy((char *)&key.ea, (const void *)mac_addr, ETHER_ADDR_LEN);
+ key.len = (u32) params->key_len;
+
+ /* check for key index change */
+ if (key.len == 0) {
+ /* key delete */
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("key delete error (%d)\n", err));
+ return err;
+ }
+ } else {
+ if (key.len > sizeof(key.data)) {
+ WL_ERR(("Invalid key length (%d)\n", key.len));
+ return -EINVAL;
+ }
+ WL_DBG(("Setting the key index %d\n", key.index));
+ memcpy(key.data, params->key, key.len);
+
+ if ((mode == WL_MODE_BSS) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ u8 keybuf[8];
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ }
+
+ /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+ if (params->seq && params->seq_len == 6) {
+ /* rx iv */
+ const u8 *ivptr;
+ ivptr = (const u8 *) params->seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = true;
+ }
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
+ if (key.algo == CRYPTO_ALGO_OFF) { //not found.
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+ swap_key_from_BE(&key);
+#if defined(BCMDONGLEHOST) && !defined(CUSTOMER_HW4)
+ /* need to guarantee EAPOL 4/4 send out before set key */
+ dhd_wait_pend8021x(dev);
+#endif /* BCMDONGLEHOST && !CUSTOMER_HW4 */
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ WL_INFORM_MEM(("[%s] wsec key set\n", dev->name));
+ }
+ return err;
+}
+
+int
+wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable)
+{
+ int err;
+ wl_eventmsg_buf_t ev_buf;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (dev != bcmcfg_to_prmry_ndev(cfg)) {
+ /* roam offload is only for the primary device */
+ return -1;
+ }
+
+ WL_INFORM_MEM(("[%s] wl roam_offload %d\n", dev->name, enable));
+ err = wldev_iovar_setint(dev, "roam_offload", enable);
+ if (err)
+ return err;
+
+ bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+ err = wl_cfg80211_apply_eventbuffer(dev, cfg, &ev_buf);
+ if (!err) {
+ cfg->roam_offload = enable;
+ }
+ return err;
+}
+
+struct wireless_dev *
+wl_cfg80211_get_wdev_from_ifname(struct bcm_cfg80211 *cfg, const char *name)
+{
+ struct net_info *iter, *next;
+
+ if (name == NULL) {
+ WL_ERR(("Iface name is not provided\n"));
+ return NULL;
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (strcmp(iter->ndev->name, name) == 0) {
+ return iter->ndev->ieee80211_ptr;
+ }
+ }
+ }
+
+ WL_DBG(("Iface %s not found\n", name));
+ return NULL;
+}
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+void
+wl_cfg80211_block_arp(struct net_device *dev, int enable)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_INFORM_MEM(("[%s] Enter. enable:%d\n", dev->name, enable));
+ if (!dhd_pkt_filter_enable) {
+ WL_DBG(("Packet filter isn't enabled\n"));
+ return;
+ }
+
+ /* Block/Unblock ARP frames only if STA is connected to
+ * the upstream AP in case of STA+SoftAP Concurrenct mode
+ */
+ if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
+ WL_DBG(("STA not connected to upstream AP\n"));
+ return;
+ }
+
+ if (enable) {
+ WL_DBG(("Enable ARP Filter\n"));
+ /* Add ARP filter */
+ dhd_packet_filter_add_remove(dhdp, TRUE, DHD_BROADCAST_ARP_FILTER_NUM);
+
+ /* Enable ARP packet filter - blacklist */
+ dhd_pktfilter_offload_enable(dhdp, dhdp->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM],
+ TRUE, FALSE);
+ } else {
+ WL_DBG(("Disable ARP Filter\n"));
+ /* Disable ARP packet filter */
+ dhd_pktfilter_offload_enable(dhdp, dhdp->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM],
+ FALSE, TRUE);
+
+ /* Delete ARP filter */
+ dhd_packet_filter_add_remove(dhdp, FALSE, DHD_BROADCAST_ARP_FILTER_NUM);
+ }
+}
+#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wl_wsec_key key;
+ s32 val = 0;
+ s32 wsec = 0;
+ s32 err = 0;
+ u8 keybuf[8];
+ s32 bssidx = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 mode = wl_get_mode_by_netdev(cfg, dev);
+#ifdef WL_GCMP
+ uint32 algos = 0, mask = 0;
+#endif /* WL_GCMP */
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ wsec_pmk_t pmk;
+ struct wl_security *sec;
+#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+#if defined (BCMDONGLEHOST)
+ if (dhd_query_bus_erros(dhdp)) {
+ /* If we are hit with bus error, return success so that
+ * don't repeatedly call del station till we recover.
+ */
+ return 0;
+ }
+#endif /* BCMDONGLEHOST */
+
+ WL_INFORM_MEM(("key index (%d) (0x%x)\n", key_idx, params->cipher));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (mac_addr &&
+ ((params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP104))) {
+ wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+ goto exit;
+ }
+
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_KEY), dhd_net2idx(dhdp->info, dev), 0);
+#endif /* BCMDONGLEHOST */
+
+ bzero(&key, sizeof(key));
+ /* Clear any buffered wep key */
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
+
+ key.len = (u32) params->key_len;
+ key.index = (u32) key_idx;
+
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, params->key, key.len);
+
+ key.flags = WL_PRIMARY_KEY;
+
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
+ val = wl_rsn_cipher_wsec_algo_lookup(params->cipher);
+ if (val == WSEC_NONE) {
+ WL_ERR(("Invalid cipher (0x%x), key.len = %d\n", params->cipher, key.len));
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ /* WLAN_CIPHER_SUITE_PMK is not NL80211 standard ,but BRCM proprietary cipher suite.
+ * so it doesn't have right algo type. Just for now, bypass this check for
+ * backward compatibility.
+ * TODO: deprecate this proprietary way and replace to nl80211 set_pmk API.
+ */
+ if (params->cipher != WLAN_CIPHER_SUITE_PMK)
+#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
+ return -EINVAL;
+ }
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (params->key_len != TKIP_KEY_SIZE) {
+ WL_ERR(("wrong TKIP Key length:%d", params->key_len));
+ return -EINVAL;
+ }
+ /* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+ if (mode == WL_MODE_BSS) {
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ case WLAN_CIPHER_SUITE_PMK:
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+
+ WL_MEM(("set_pmk: wpa_auth:%x akm:%x\n", sec->wpa_auth, params->cipher));
+ /* Avoid pmk set for SAE and OWE for external supplicant case. */
+ if (IS_AKM_SAE(sec->wpa_auth) || IS_AKM_OWE(sec->wpa_auth)) {
+ WL_INFORM_MEM(("skip pmk set for akm:%x\n", sec->wpa_auth));
+ break;
+ }
+
+ if (params->key_len > sizeof(pmk.key)) {
+ WL_ERR(("Worng PMK key length:%d", params->key_len));
+ return -EINVAL;
+ }
+ bzero(&pmk, sizeof(pmk));
+ bcopy(params->key, &pmk.key, params->key_len);
+ pmk.key_len = params->key_len;
+ pmk.flags = 0; /* 0:PMK, WSEC_PASSPHRASE:PSK, WSEC_SAE_PASSPHRASE:SAE_PSK */
+
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ err = wldev_iovar_setbuf_bsscfg(dev, "okc_info_pmk", pmk.key, pmk.key_len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", err));
+ }
+ }
+
+ err = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (err) {
+ WL_ERR(("pmk failed, err=%d (ignore)\n", err));
+ return err;
+ } else {
+ WL_DBG(("pmk set. flags:0x%x\n", pmk.flags));
+ }
+ /* Clear key length to delete key */
+ key.len = 0;
+ break;
+#endif /* WLAN_CIPHER_SUITE_PMK */
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ algos = KEY_ALGO_MASK(key.algo);
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ WL_DBG(("no post processing required (0x%x)\n", params->cipher));
+ break;
+ }
+
+ /* Set the new key/index */
+ if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) {
+ WL_ERR(("IBSS KEY setted\n"));
+ wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
+ }
+ swap_key_from_BE(&key);
+ if ((params->cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ /*
+ * For AP role, since we are doing a wl down before bringing up AP,
+ * the plumbed keys will be lost. So for AP once we bring up AP, we
+ * need to plumb keys again. So buffer the keys for future use. This
+ * is more like a WAR. If firmware later has the capability to do
+ * interface upgrade without doing a "wl down" and "wl apsta 0", then
+ * this will not be required.
+ */
+ WL_DBG(("Buffering WEP Keys \n"));
+ memcpy(&cfg->wep_key, &key, sizeof(struct wl_wsec_key));
+ }
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+
+exit:
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("get wsec error (%d)\n", err));
+ return err;
+ }
+
+ wsec |= val;
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wsec error (%d)\n", err));
+ return err;
+ }
+#ifdef WL_GCMP
+ wl_set_wsec_info_algos(dev, algos, mask);
+#endif /* WL_GCMP */
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(dev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_ADD_KEY, NULL);
+#endif
+ return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+ struct wl_wsec_key key;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ s32 bssidx;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+#if defined (BCMDONGLEHOST)
+ if (dhd_query_bus_erros(dhdp)) {
+ /* If we are hit with bus error, return success so that
+ * don't repeatedly call del station till we recover.
+ */
+ return 0;
+ }
+#endif /* BCMDONGLEHOST */
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+ WL_DBG(("Enter\n"));
+
+#ifndef MFP
+ if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
+ return -EINVAL;
+#endif
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DELETE_KEY), dhd_net2idx(dhdp->info, dev), 0);
+#endif /* BCMDONGLEHOST */
+ bzero(&key, sizeof(key));
+
+ key.flags = WL_PRIMARY_KEY;
+ key.algo = CRYPTO_ALGO_OFF;
+ key.index = (u32) key_idx;
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ if (err == -EINVAL) {
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ WL_DBG(("invalid key index (%d)\n", key_idx));
+ }
+ } else {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ }
+ return err;
+ }
+ return err;
+}
+
+/* NOTE : this function cannot work as is and is never called */
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie, struct key_params * params))
+{
+ struct key_params params;
+ struct wl_wsec_key key;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+ WL_DBG(("key index (%d)\n", key_idx));
+ RETURN_EIO_IF_NOT_UP(cfg);
+ bzero(&key, sizeof(key));
+ key.index = key_idx;
+ swap_key_to_BE(&key);
+ bzero(&params, sizeof(params));
+ params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+ params.key = key.data;
+
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ switch (WSEC_ENABLED(wsec)) {
+ case WEP_ENABLED:
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP40;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ }
+ break;
+ case TKIP_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case AES_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+
+#ifdef BCMWAPI_WPI
+ case SMS4_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ break;
+#endif
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ /* to connect to mixed mode AP */
+ case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid algo (0x%x)\n", wsec));
+ return -EINVAL;
+ }
+
+ callback(cookie, &params);
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx)
+{
+#ifdef MFP
+ /* Firmware seems to use hard coded index for Group Mgmt Key.
+ * TODO/Need to check whether something else needs to be
+ * taken here
+ */
+ return 0;
+#else
+ WL_INFORM_MEM(("Not supported\n"));
+ return -EOPNOTSUPP;
+#endif /* MFP */
+}
+
+static bool
+wl_check_assoc_state(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ wl_assoc_info_t asinfo;
+ uint32 state = 0;
+ int err;
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "assoc_info",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("failed to get assoc_info : err=%d\n", err));
+ return FALSE;
+ } else {
+ memcpy(&asinfo, cfg->ioctl_buf, sizeof(wl_assoc_info_t));
+ state = dtoh32(asinfo.state);
+ WL_DBG(("assoc state=%d\n", state));
+ }
+
+ return (state > 0)? TRUE:FALSE;
+}
+
+static s32
+wl_cfg80211_get_rssi(struct net_device *dev, struct bcm_cfg80211 *cfg, s32 *rssi)
+{
+ s32 err = BCME_OK;
+ scb_val_t scb_val;
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ wl_rssi_ant_mimo_t rssi_ant_mimo;
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+ if (dev == NULL || cfg == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* initialize rssi */
+ *rssi = 0;
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ /* Query RSSI sum across antennas */
+ bzero(&rssi_ant_mimo, sizeof(rssi_ant_mimo));
+ err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
+ if (err) {
+ WL_ERR(("Could not get rssi sum (%d)\n", err));
+ /* set rssi to zero and do not return error,
+ * because iovar phy_rssi_ant could return BCME_UNSUPPORTED
+ * when bssid was null during roaming
+ */
+ err = BCME_OK;
+ } else {
+ cfg->rssi_sum_report = TRUE;
+ if ((*rssi = rssi_ant_mimo.rssi_sum) >= 0) {
+ *rssi = 0;
+ }
+ }
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+ /* if SUPPORT_RSSI_SUM_REPORT works once, do not use legacy method anymore */
+ if (cfg->rssi_sum_report == FALSE) {
+ bzero(&scb_val, sizeof(scb_val));
+ scb_val.val = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t));
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ return err;
+ }
+#if defined(RSSIOFFSET)
+ *rssi = wl_update_rssi_offset(dev, dtoh32(scb_val.val));
+#else
+ *rssi = dtoh32(scb_val.val);
+#endif
+ }
+
+ if (*rssi >= 0) {
+ /* check assoc status including roaming */
+ DHD_OS_WAKE_LOCK((dhd_pub_t *)(cfg->pub));
+ if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_check_assoc_state(cfg, dev)) {
+ *rssi = cfg->rssi; /* use previous RSSI */
+ WL_DBG(("use previous RSSI %d dBm\n", cfg->rssi));
+ } else {
+ *rssi = 0;
+ }
+ DHD_OS_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ } else {
+ /* backup the current rssi */
+ cfg->rssi = *rssi;
+ }
+
+ return err;
+}
+
+static int
+wl_cfg80211_ifstats_counters_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ switch (type) {
+ case WL_IFSTATS_XTLV_IF_INDEX:
+ WL_DBG(("Stats received on interface index: %d\n", *data));
+ break;
+ case WL_IFSTATS_XTLV_GENERIC: {
+ if (len > sizeof(wl_if_stats_t)) {
+ WL_INFORM(("type 0x%x: cntbuf length too long! %d > %d\n",
+ type, len, (int)sizeof(wl_if_stats_t)));
+ }
+ memcpy(ctx, data, sizeof(wl_if_stats_t));
+ break;
+ }
+ default:
+ WL_DBG(("Unsupported counter type 0x%x\n", type));
+ break;
+ }
+
+ return BCME_OK;
+}
+
+/* Parameters to if_counters iovar need to be converted to XTLV format
+ * before sending to FW. The length of the top level XTLV container
+ * containing parameters should not exceed 228 bytes
+ */
+#define IF_COUNTERS_PARAM_CONTAINER_LEN_MAX 228
+
+int
+wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ uint8 *pbuf = NULL;
+ bcm_xtlvbuf_t xtlvbuf, local_xtlvbuf;
+ bcm_xtlv_t *xtlv;
+ uint16 expected_resp_len;
+ wl_stats_report_t *request = NULL, *response = NULL;
+ int bsscfg_idx;
+ int ret = BCME_OK;
+
+ pbuf = (uint8 *)MALLOCZ(dhdp->osh, WLC_IOCTL_MEDLEN);
+ if (!pbuf) {
+ WL_ERR(("Failed to allocate local pbuf\n"));
+ return BCME_NOMEM;
+ }
+
+ /* top level container length cannot exceed 228 bytes.
+ * This is because the output buffer is 1535 bytes long.
+ * Allow 1300 bytes for reporting stats coming in XTLV format
+ */
+ request = (wl_stats_report_t *)
+ MALLOCZ(dhdp->osh, IF_COUNTERS_PARAM_CONTAINER_LEN_MAX);
+ if (!request) {
+ WL_ERR(("Failed to allocate wl_stats_report_t with length (%d)\n",
+ IF_COUNTERS_PARAM_CONTAINER_LEN_MAX));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ request->version = WL_STATS_REPORT_REQUEST_VERSION_V2;
+
+ /* Top level container... we will create it ourselves */
+ /* Leave space for report version, length, and top level XTLV
+ * WL_IFSTATS_XTLV_IF.
+ */
+ ret = bcm_xtlv_buf_init(&local_xtlvbuf,
+ (uint8*)(request->data) + BCM_XTLV_HDR_SIZE,
+ IF_COUNTERS_PARAM_CONTAINER_LEN_MAX -
+ offsetof(wl_stats_report_t, data) - BCM_XTLV_HDR_SIZE,
+ BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret) {
+ goto fail;
+ }
+
+ /* Populate requests using this the local_xtlvbuf context. The xtlvbuf
+ * is used to fill the container containing the XTLVs populated using
+ * local_xtlvbuf.
+ */
+ ret = bcm_xtlv_buf_init(&xtlvbuf,
+ (uint8*)(request->data),
+ IF_COUNTERS_PARAM_CONTAINER_LEN_MAX -
+ offsetof(wl_stats_report_t, data),
+ BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret) {
+ goto fail;
+ }
+
+ /* Request generic stats */
+ ret = bcm_xtlv_put_data(&local_xtlvbuf,
+ WL_IFSTATS_XTLV_GENERIC, NULL, 0);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Complete the outer container with type and length
+ * only.
+ */
+ ret = bcm_xtlv_put_data(&xtlvbuf,
+ WL_IFSTATS_XTLV_IF,
+ NULL, bcm_xtlv_buf_len(&local_xtlvbuf));
+
+ if (ret) {
+ goto fail;
+ }
+
+ request->length = bcm_xtlv_buf_len(&xtlvbuf) +
+ offsetof(wl_stats_report_t, data);
+ bsscfg_idx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr);
+
+ /* send the command over to the device and get teh output */
+ ret = wldev_iovar_getbuf_bsscfg(dev, "if_counters", (void *)request,
+ request->length, pbuf, WLC_IOCTL_MEDLEN, bsscfg_idx,
+ &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("if_counters not supported ret=%d\n", ret));
+ goto fail;
+ }
+
+ /* Reuse request to process response */
+ response = (wl_stats_report_t *)pbuf;
+
+ /* version check */
+ if (response->version != WL_STATS_REPORT_REQUEST_VERSION_V2) {
+ ret = BCME_VERSION;
+ goto fail;
+ }
+
+ xtlv = (bcm_xtlv_t *)(response->data);
+
+ expected_resp_len =
+ (BCM_XTLV_LEN(xtlv) + OFFSETOF(wl_stats_report_t, data));
+
+ /* Check if the received length is as expected */
+ if ((response->length > WLC_IOCTL_MEDLEN) ||
+ (response->length < expected_resp_len)) {
+ ret = BCME_ERROR;
+ WL_ERR(("Illegal response length received. Got: %d"
+ " Expected: %d. Expected len must be <= %u\n",
+ response->length, expected_resp_len, WLC_IOCTL_MEDLEN));
+ goto fail;
+ }
+
+ /* check the type. The return data will be in
+ * WL_IFSTATS_XTLV_IF container. So check if that container is
+ * present
+ */
+ if (BCM_XTLV_ID(xtlv) != WL_IFSTATS_XTLV_IF) {
+ ret = BCME_ERROR;
+ WL_ERR(("unexpected type received: %d Expected: %d\n",
+ BCM_XTLV_ID(xtlv), WL_IFSTATS_XTLV_IF));
+ goto fail;
+ }
+
+ /* Process XTLVs within WL_IFSTATS_XTLV_IF container */
+ ret = bcm_unpack_xtlv_buf(if_stats,
+ (uint8*)response->data + BCM_XTLV_HDR_SIZE,
+ BCM_XTLV_LEN(xtlv), /* total length of all TLVs in container */
+ BCM_XTLV_OPTION_ALIGN32, wl_cfg80211_ifstats_counters_cb);
+ if (ret) {
+ WL_ERR(("Error unpacking XTLVs in wl_ifstats_counters: %d\n", ret));
+ }
+
+fail:
+ if (pbuf) {
+ MFREE(dhdp->osh, pbuf, WLC_IOCTL_MEDLEN);
+ }
+
+ if (request) {
+ MFREE(dhdp->osh, request, IF_COUNTERS_PARAM_CONTAINER_LEN_MAX);
+ }
+ return ret;
+}
+#undef IF_COUNTERS_PARAM_CONTAINER_LEN_MAX
+
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
+#else
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+#endif
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 rssi = 0;
+#if defined(SUPPORT_RSSI_SUM_REPORT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ wl_rssi_ant_mimo_t rssi_ant_mimo;
+ int cnt, chains;
+#endif /* SUPPORT_RSSI_SUM_REPORT && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+ s32 rate = 0;
+ s32 err = 0;
+ u16 wl_iftype = 0;
+ u16 wl_mode = 0;
+ get_pktcnt_t pktcnt;
+ wl_if_stats_t *if_stats = NULL;
+ sta_info_v4_t *sta = NULL;
+#ifdef WL_RATE_INFO
+ wl_rate_info_t *rates = NULL;
+#endif /* WL_RATE_INFO */
+ u8 *curmacp = NULL;
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool fw_assoc_state = FALSE;
+ u32 dhd_assoc_state = 0;
+#endif
+ void *buf;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ if (cfg80211_to_wl_iftype(dev->ieee80211_ptr->iftype, &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
+ }
+
+ buf = MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (buf == NULL) {
+ WL_ERR(("wl_cfg80211_get_station: MALLOC failed\n"));
+ goto error;
+ }
+
+ switch (wl_iftype) {
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_IBSS:
+ if (cfg->roam_offload) {
+ struct ether_addr bssid;
+ bzero(&bssid, sizeof(bssid));
+ err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (err) {
+ WL_ERR(("Failed to get current BSSID\n"));
+ } else {
+ if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+ /* roaming is detected */
+ err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+ if (err)
+ WL_ERR(("Failed to handle the delayed"
+ " roam, err=%d", err));
+ mac = (u8 *)bssid.octet;
+ }
+ }
+ }
+#if defined(BCMDONGLEHOST)
+ dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
+ DHD_OS_WAKE_LOCK(dhd);
+ fw_assoc_state = dhd_is_associated(dhd, 0, &err);
+ if (dhd_assoc_state && !fw_assoc_state) {
+ /* check roam (join) status */
+ if (wl_check_assoc_state(cfg, dev)) {
+ fw_assoc_state = TRUE;
+ WL_DBG(("roam status\n"));
+ }
+ }
+ DHD_OS_WAKE_UNLOCK(dhd);
+#endif
+#if defined(BCMDONGLEHOST)
+ if (!dhd_assoc_state || !fw_assoc_state)
+#else
+ if (!wl_get_drv_status(cfg, CONNECTED, dev))
+#endif /* defined(BCMDONGLEHOST) */
+ {
+ WL_ERR(("NOT assoc\n"));
+ if (err == -ENODATA)
+ goto error;
+#if defined(BCMDONGLEHOST)
+ if (!dhd_assoc_state) {
+ WL_TRACE_HW4(("drv state is not connected \n"));
+ }
+ if (!fw_assoc_state) {
+ WL_TRACE_HW4(("fw state is not associated \n"));
+ }
+ /* Disconnect due to fw is not associated for
+ * FW_ASSOC_WATCHDOG_TIME ms.
+ * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
+ * means that BSSID is null.
+ */
+ if (dhd_assoc_state && !fw_assoc_state && !err) {
+ if (!fw_assoc_watchdog_started) {
+ fw_assoc_watchdog_ms = OSL_SYSUPTIME();
+ fw_assoc_watchdog_started = TRUE;
+ WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
+ } else if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
+ FW_ASSOC_WATCHDOG_TIME) {
+ fw_assoc_watchdog_started = FALSE;
+ err = -ENODEV;
+ WL_TRACE_HW4(("fw is not associated for %d ms \n",
+ (OSL_SYSUPTIME() - fw_assoc_watchdog_ms)));
+ goto get_station_err;
+ }
+ }
+#endif /* defined(BCMDONGLEHOST) */
+ err = -ENODEV;
+ goto error;
+ }
+#if defined(BCMDONGLEHOST)
+ if (dhd_is_associated(dhd, 0, NULL)) {
+ fw_assoc_watchdog_started = FALSE;
+ }
+#endif /* defined(BCMDONGLEHOST) */
+ curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+ MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+ }
+ sta = (sta_info_v4_t *)buf;
+ bzero(sta, WLC_IOCTL_MEDLEN);
+ err = wldev_iovar_getbuf(dev, "sta_info", (const void*)curmacp,
+ ETHER_ADDR_LEN, buf, WLC_IOCTL_MEDLEN, NULL);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ goto error;
+ }
+ if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
+ WL_ERR(("GET STA INFO version mismatch, %d\n", err));
+ return BCME_VERSION;
+ }
+ sta->rx_rate = dtoh32(sta->rx_rate);
+ if (sta->rx_rate != 0) {
+ sinfo->filled |= STA_INFO_BIT(INFO_RX_BITRATE);
+ sinfo->rxrate.legacy = sta->rx_rate / 100;
+ WL_DBG(("RX Rate %d Mbps\n", (sta->rx_rate / 1000)));
+ }
+ /* go through to get another information */
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_P2P_DISC:
+ if ((err = wl_cfg80211_get_rssi(dev, cfg, &rssi)) != BCME_OK) {
+ goto get_station_err;
+ }
+#if defined(RSSIAVG)
+ err = wl_update_connected_rssi_cache(dev, &cfg->g_connected_rssi_cache_ctrl, &rssi);
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ goto get_station_err;
+ }
+ wl_delete_dirty_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#if defined(RSSIOFFSET)
+ rssi = wl_update_rssi_offset(dev, rssi);
+#endif
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(rssi, RSSI_MAXVAL);
+#endif
+ sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+#if defined(SUPPORT_RSSI_SUM_REPORT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ /* Query RSSI sum across antennas */
+ (void)memset_s(&rssi_ant_mimo,
+ sizeof(rssi_ant_mimo), 0, sizeof(rssi_ant_mimo));
+ err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
+ if (err) {
+ WL_ERR(("Could not get rssi sum (%d)\n", err));
+ } else {
+ chains = 0;
+ for (cnt = 0; cnt < rssi_ant_mimo.count; cnt++) {
+ sinfo->chain_signal[cnt] = rssi_ant_mimo.rssi_ant[cnt];
+ chains |= (1 << cnt);
+ WL_DBG(("RSSI[%d]: %d dBm\n",
+ cnt, rssi_ant_mimo.rssi_ant[cnt]));
+ }
+ sinfo->chains = chains;
+ sinfo->filled |= STA_INFO_BIT(INFO_CHAIN_SIGNAL);
+ }
+#endif /* SUPPORT_RSSI_SUM_REPORT && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+ /* go through to get another information */
+ case WL_IF_TYPE_P2P_GO:
+#ifdef WL_RATE_INFO
+ /* Get the current tx/rx rate */
+ err = wldev_iovar_getbuf(dev, "rate_info", NULL, 0,
+ buf, WLC_IOCTL_SMLEN, NULL);
+#else
+ /* Get the current tx rate */
+ err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+#endif /* WL_RATE_INFO */
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ goto error;
+ } else {
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+ int rxpktglom;
+#endif
+#ifdef WL_RATE_INFO
+ rates = (wl_rate_info_t *)buf;
+ rates->version = dtoh32(rates->version);
+ rates->length = dtoh32(rates->length);
+ if (rates->version != WL_RATE_INFO_VERSION) {
+ WL_ERR(("RATE_INFO version mismatch\n"));
+ err = BCME_VERSION;
+ goto error;
+ }
+ if (rates->length != (uint16)sizeof(wl_rate_info_t)) {
+ WL_ERR(("RATE_INFO length mismatch\n"));
+ err = BCME_BADLEN;
+ goto error;
+ }
+ /* Report the current tx rate */
+ rate = dtoh32(rates->mode_tx_rate);
+#else
+ /* Report the current tx rate */
+ rate = dtoh32(rate);
+#endif /* WL_RATE_INFO */
+ sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Tx rate %d Mbps\n", (rate / 2)));
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+ rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+ if (maxrxpktglom != rxpktglom) {
+ maxrxpktglom = rxpktglom;
+ WL_DBG(("Rate %d Mbps, update bus:"
+ "maxtxpktglom=%d\n", (rate/2), maxrxpktglom));
+ err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+ (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+ }
+ }
+#endif
+#ifdef WL_RATE_INFO
+ /* Report the current rx rate */
+ rate = dtoh32(rates->mode_rx_rate);
+ sinfo->filled |= STA_INFO_BIT(INFO_RX_BITRATE);
+ sinfo->rxrate.legacy = rate * 5;
+ WL_DBG(("Rx rate %d Mbps\n", (rate / 2)));
+#endif /* WL_RATE_INFO */
+ }
+ if_stats = (wl_if_stats_t *)buf;
+ bzero(if_stats, WLC_IOCTL_MEDLEN);
+#ifdef BCMDONGLEHOST
+ if (FW_SUPPORTED(dhd, ifst)) {
+ err = wl_cfg80211_ifstats_counters(dev, if_stats);
+ } else
+#endif /* BCMDONGLEHOST */
+ {
+ err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, WLC_IOCTL_MEDLEN, NULL);
+ }
+
+ if (err) {
+// WL_ERR(("if_counters not supported ret=%d\n", err));
+ bzero(&pktcnt, sizeof(pktcnt));
+ err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
+ sizeof(pktcnt));
+ if (!err) {
+ sinfo->rx_packets = pktcnt.rx_good_pkt;
+ sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+ sinfo->tx_packets = pktcnt.tx_good_pkt;
+ sinfo->tx_failed = pktcnt.tx_bad_pkt;
+ }
+ } else {
+ sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
+ /* In this case, if_stats->rxerror is invalid.
+ * So, force to assign '0'.
+ */
+ sinfo->rx_dropped_misc = 0;
+ sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
+ sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
+ (uint32)dtoh64(if_stats->txrunt) +
+ (uint32)dtoh64(if_stats->txfail);
+ sinfo->rx_bytes = dtoh64(if_stats->rxbyte);
+ sinfo->tx_bytes = dtoh64(if_stats->txbyte);
+ sinfo->tx_retries = (uint32)dtoh64(if_stats->txretry);
+ sinfo->filled |= (STA_INFO_BIT(INFO_RX_BYTES) |
+ STA_INFO_BIT(INFO_TX_BYTES) |
+ STA_INFO_BIT(INFO_TX_RETRIES));
+ }
+
+ sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
+ STA_INFO_BIT(INFO_RX_DROP_MISC) |
+ STA_INFO_BIT(INFO_TX_PACKETS) |
+ STA_INFO_BIT(INFO_TX_FAILED));
+get_station_err:
+#if 0
+ if (err && (err != -ENODATA)) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ scb_val_t scbval;
+#ifdef BCMDONGLEHOST
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
+#endif /* BCMDONGLEHOST */
+ scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ WL_ERR(("disassoc error (%d)\n", err));
+ }
+
+ WL_ERR(("force cfg80211_disconnected: %d\n", err));
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+#ifdef BCMDONGLEHOST
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_DONE),
+ dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
+#endif /* BCMDONGLEHOST */
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_link_down(cfg);
+ }
+#endif
+ break;
+ case WL_IF_TYPE_AP:
+ sta = (sta_info_v4_t *)buf;
+ bzero(sta, WLC_IOCTL_MEDLEN);
+ err = wldev_iovar_getbuf(dev, "sta_info", (const void*)mac,
+ ETHER_ADDR_LEN, buf, WLC_IOCTL_MEDLEN, NULL);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ goto error;
+ }
+ if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
+ WL_ERR(("GET STA INFO version mismatch, %d\n", err));
+ return BCME_VERSION;
+ }
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->filled |= STA_INFO_BIT(INFO_INACTIVE_TIME);
+ sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
+ sinfo->connected_time = sta->in;
+ }
+#endif
+ WL_INFORM_MEM(("STA %s, flags 0x%x, idle time %ds, connected time %ds\n",
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf),
+ sta->flags, sta->idle, sta->in));
+ break;
+ default :
+ WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+ }
+error:
+ if (buf) {
+ MFREE(cfg->osh, buf, WLC_IOCTL_MEDLEN);
+ }
+
+ return err;
+}
+
+static int
+wl_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+ int idx, u8 *mac, struct station_info *sinfo)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct maclist *assoc_maclist = (struct maclist *)&(cfg->assoclist);
+ int err;
+
+ WL_DBG(("%s: enter, idx=%d\n", __FUNCTION__, idx));
+
+ if (idx == 0) {
+ assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+ err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
+ assoc_maclist, sizeof(cfg->assoclist));
+ if (err < 0) {
+ WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+ cfg->assoclist.count = 0;
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (idx < le32_to_cpu(cfg->assoclist.count)) {
+ (void)memcpy_s(mac, ETH_ALEN, cfg->assoclist.mac[idx], ETH_ALEN);
+ return wl_cfg80211_get_station(wiphy, ndev, mac, sinfo);
+ }
+
+ return -ENOENT;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, s32 timeout)
+{
+ s32 pm;
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
+ s32 mode;
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status;
+#endif /* RTT_SUPPORT */
+ dhd_pub_t *dhd = cfg->pub;
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ WL_DBG(("Enter\n"));
+ mode = wl_get_mode_by_netdev(cfg, dev);
+ if (cfg->p2p_net == dev || _net_info == NULL ||
+ !wl_get_drv_status(cfg, CONNECTED, dev) ||
+ ((mode != WL_MODE_BSS) &&
+ (mode != WL_MODE_IBSS))) {
+ return err;
+ }
+
+ /* Enlarge pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_LONG);
+
+ pm = enabled ? PM_FAST : PM_OFF;
+ if (_net_info->pm_block) {
+ WL_ERR(("%s:Do not enable the power save for pm_block %d\n",
+ dev->name, _net_info->pm_block));
+ pm = PM_OFF;
+ }
+ if (enabled && dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ pm = htod32(pm);
+ WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled")));
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status->status != RTT_ENABLED) {
+#endif /* RTT_SUPPORT */
+ err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (unlikely(err)) {
+ if (err == -ENODEV)
+ WL_DBG(("net_device is not ready yet\n"));
+ else
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+#ifdef RTT_SUPPORT
+ }
+#endif /* RTT_SUPPORT */
+ wl_cfg80211_update_power_mode(dev);
+ return err;
+}
+
+/* force update cfg80211 to keep power save mode in sync. BUT this is NOT
+ * a good solution since there is no protection while changing wdev->os. Best
+ * way of changing power saving mode is doing it through
+ * NL80211_CMD_SET_POWER_SAVE
+ */
+void wl_cfg80211_update_power_mode(struct net_device *dev)
+{
+ int err, pm = -1;
+
+ err = wldev_ioctl_get(dev, WLC_GET_PM, &pm, sizeof(pm));
+ if (err)
+ WL_ERR(("error (%d)\n", err));
+ else if (pm != -1 && dev->ieee80211_ptr)
+ dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+ u32 ret = 0;
+
+ if (bit16 & 0xff00) {
+ ret += 8;
+ bit16 >>= 8;
+ }
+
+ if (bit16 & 0xf0) {
+ ret += 4;
+ bit16 >>= 4;
+ }
+
+ if (bit16 & 0xc) {
+ ret += 2;
+ bit16 >>= 2;
+ }
+
+ if (bit16 & 2)
+ ret += bit16 & 2;
+ else if (bit16)
+ ret += bit16;
+
+ return ret;
+}
+
+#ifndef OEM_ANDROID
+/*
+ * API invoked from driver .resume context
+ */
+s32
+wl_cfg80211_resume(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = BCME_OK;
+ int pkt_filter_id = WL_WOWLAN_PKT_FILTER_ID_FIRST;
+
+ if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+ WL_INFORM_MEM(("device is not ready\n"));
+ return err;
+ }
+
+ while (pkt_filter_id <= WL_WOWLAN_PKT_FILTER_ID_LAST) {
+ /* delete wowlan pkt filter if any */
+ err = wldev_iovar_setbuf(ndev, "pkt_filter_delete", &pkt_filter_id,
+ sizeof(pkt_filter_id), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ &cfg->ioctl_buf_sync);
+ /* pkt_filter_delete would return BCME_BADARG when pkt filter id
+ * does not exist in filter list of firmware, ignore it.
+ */
+ if (BCME_BADARG == err)
+ err = BCME_OK;
+
+ if (BCME_OK != err) {
+ WL_ERR(("pkt_filter_delete failed, id=%d, err=%d\n",
+ pkt_filter_id, err));
+ }
+ pkt_filter_id++;
+ }
+
+ return err;
+}
+#endif /* !OEM_ANDROID */
+
+#if !defined(OEM_ANDROID)
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static s32 wl_wowlan_config(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ u32 i = 0, j = 0;
+ u32 buf_len = 0, pattern_size = 0;
+ wl_pkt_filter_t *pkt_filterp = NULL;
+ wl_pkt_filter_enable_t pkt_filter_enable;
+ u8 mask_bytes_len = 0, mask_byte_idx = 0, mask_bit_idx = 0;
+ const u32 max_buf_size = WL_PKT_FILTER_FIXED_LEN +
+ WL_PKT_FILTER_PATTERN_FIXED_LEN + (2 * WL_WOWLAN_MAX_PATTERN_LEN);
+
+ WL_DBG(("Enter\n"));
+
+ if (wow == NULL) {
+ WL_DBG(("wow config is null\n"));
+ return err;
+ }
+
+ /* configure wowlan pattern filters */
+ if (0 < wow->n_patterns) {
+ pkt_filterp = (wl_pkt_filter_t *)MALLOCZ(cfg->osh, max_buf_size);
+ if (pkt_filterp == NULL) {
+ WL_ERR(("Error allocating buffer for pkt filters\n"));
+ return -ENOMEM;
+ }
+
+ WL_DBG(("Pattern count=%d\n", wow->n_patterns));
+ while (i < wow->n_patterns) {
+
+ /* reset buffers */
+ buf_len = 0;
+ bzero(pkt_filterp, max_buf_size);
+
+ /* copy filter id */
+ store32_ua(&pkt_filterp->id, (WL_WOWLAN_PKT_FILTER_ID_FIRST + i));
+
+ /* copy filter type */
+ store32_ua(&pkt_filterp->type, WL_PKT_FILTER_TYPE_PATTERN_MATCH);
+
+ /* copy size */
+ pattern_size = htod32(wow->patterns[i].pattern_len);
+ store32_ua(&pkt_filterp->u.pattern.size_bytes, pattern_size);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ /* copy offset */
+ store32_ua(&pkt_filterp->u.pattern.offset, wow->patterns[i].pkt_offset);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+ /* convert mask from bit to byte format */
+ j = 0;
+ mask_bit_idx = 0;
+ mask_byte_idx = 0;
+ mask_bytes_len = DIV_ROUND_UP(pattern_size, 8);
+ while ((mask_byte_idx < mask_bytes_len) &&
+ (mask_bit_idx < pattern_size)) {
+
+ if (isbitset(wow->patterns[i].mask[mask_byte_idx], mask_bit_idx++))
+ pkt_filterp->u.pattern.mask_and_pattern[j] = 0xFF;
+ j++;
+ if (mask_bit_idx >= 8) {
+ /* move to next mask byte */
+ mask_bit_idx = 0;
+ mask_byte_idx++;
+ }
+ }
+
+ /* copy pattern to be matched */
+ memcpy(&pkt_filterp->u.pattern.mask_and_pattern[pattern_size],
+ wow->patterns[i].pattern, pattern_size);
+
+ /* calculate filter buffer len */
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + (2 * pattern_size));
+
+ /* add pkt filter */
+ err = wldev_iovar_setbuf(ndev, "pkt_filter_add", pkt_filterp, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync);
+ if (BCME_OK != err) {
+ WL_ERR(("pkt_filter_add failed, id=%d, err=%d\n",
+ pkt_filterp->id, err));
+ goto exit;
+ }
+
+ /* enable pkt filter id */
+ pkt_filter_enable.id = pkt_filterp->id;
+ pkt_filter_enable.enable = TRUE;
+ err = wldev_iovar_setbuf(ndev, "pkt_filter_enable", &pkt_filter_enable,
+ sizeof(pkt_filter_enable),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (BCME_OK != err) {
+ WL_ERR(("pkt_filter_enable failed, id=%d, err=%d\n",
+ pkt_filterp->id, err));
+ goto exit;
+ }
+ i++; /* move to next pattern */
+ }
+ } else
+ WL_DBG(("wowlan filters not found\n"));
+
+exit:
+ if (pkt_filterp) {
+ MFREE(cfg->osh, pkt_filterp, max_buf_size);
+ }
+
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM */
+#endif /* !OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+/*
+ * API invoked from driver .suspend context
+ */
+s32
+wl_cfg80211_suspend(struct bcm_cfg80211 *cfg)
+{
+ s32 err = BCME_OK;
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM */
+#ifdef DHD_CLEAR_ON_SUSPEND
+ struct net_info *iter, *next;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ unsigned long flags;
+
+ if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+ WL_INFORM_MEM(("device is not ready : status (%d)\n",
+ (int)cfg->status));
+ return err;
+ }
+ for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface doesn't have a ndev associated with it (for kernel > 3.8) */
+ if (iter->ndev)
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ }
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ cfg80211_scan_done(cfg->scan_request, true);
+ cfg->scan_request = NULL;
+ }
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+ wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ }
+ }
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+ wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+ }
+ }
+ }
+#endif /* DHD_CLEAR_ON_SUSPEND */
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ err = wl_wowlan_config(wiphy, wiphy->wowlan_config);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM */
+
+ return err;
+}
+#endif /* !OEM_ANDROID */
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+ s32 err)
+{
+ int i, j;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ int npmkids = cfg->pmk_list->pmkids.count;
+
+ ASSERT(cfg->pmk_list->pmkids.length >= (sizeof(u16)*2));
+ if (!pmk_list) {
+ WL_ERR(("pmk_list is NULL\n"));
+ return -EINVAL;
+ }
+ /* pmk list is supported only for STA interface i.e. primary interface
+ * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+ */
+ if (primary_dev != dev) {
+ WL_INFORM_MEM(("Not supporting Flushing pmklist on virtual"
+ " interfaces than primary interface\n"));
+ return err;
+ }
+
+ WL_DBG(("No of elements %d\n", npmkids));
+ for (i = 0; i < npmkids; i++) {
+ WL_DBG(("PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].bssid));
+ for (j = 0; j < WPA2_PMKID_LEN; j++) {
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].pmkid[j]));
+ }
+ }
+ if (cfg->wlc_ver.wlc_ver_major >= MIN_PMKID_LIST_V3_FW_MAJOR) {
+ pmk_list->pmkids.version = PMKID_LIST_VER_3;
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ }
+ else if (cfg->wlc_ver.wlc_ver_major == MIN_PMKID_LIST_V2_FW_MAJOR) {
+ u32 v2_list_size = (u32)(sizeof(pmkid_list_v2_t) + npmkids*sizeof(pmkid_v2_t));
+ pmkid_list_v2_t *pmkid_v2_list = (pmkid_list_v2_t *)MALLOCZ(cfg->osh, v2_list_size);
+ pmkid_list_v3_t *spmk_list = &cfg->spmk_info_list->pmkids;
+
+ if (pmkid_v2_list == NULL) {
+ WL_ERR(("failed to allocate pmkid list\n"));
+ return BCME_NOMEM;
+ }
+
+ pmkid_v2_list->version = PMKID_LIST_VER_2;
+ /* Account for version, length and pmkid_v2_t fields */
+ pmkid_v2_list->length = (npmkids * sizeof(pmkid_v2_t)) + (2 * sizeof(u16));
+
+ for (i = 0; i < npmkids; i++) {
+ /* memcpy_s return checks not needed as buffers are of same size */
+ (void)memcpy_s(&pmkid_v2_list->pmkid[i].BSSID,
+ ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN);
+
+ /* copy pmkid if available */
+ if (pmk_list->pmkids.pmkid[i].pmkid_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].PMKID,
+ WPA2_PMKID_LEN,
+ pmk_list->pmkids.pmkid[i].pmkid,
+ pmk_list->pmkids.pmkid[i].pmkid_len);
+ }
+
+ if (pmk_list->pmkids.pmkid[i].pmk_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].pmk,
+ pmk_list->pmkids.pmkid[i].pmk_len,
+ pmk_list->pmkids.pmkid[i].pmk,
+ pmk_list->pmkids.pmkid[i].pmk_len);
+ pmkid_v2_list->pmkid[i].pmk_len = pmk_list->pmkids.pmkid[i].pmk_len;
+ }
+
+ if (pmk_list->pmkids.pmkid[i].ssid_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].ssid.ssid,
+ pmk_list->pmkids.pmkid[i].ssid_len,
+ pmk_list->pmkids.pmkid[i].ssid,
+ pmk_list->pmkids.pmkid[i].ssid_len);
+ pmkid_v2_list->pmkid[i].ssid.ssid_len
+ = pmk_list->pmkids.pmkid[i].ssid_len;
+ }
+
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].fils_cache_id,
+ FILS_CACHE_ID_LEN, &pmk_list->pmkids.pmkid[i].fils_cache_id,
+ FILS_CACHE_ID_LEN);
+ for (j = 0; j < spmk_list->count; j++) {
+ if (memcmp(&pmkid_v2_list->pmkid[i].BSSID,
+ &spmk_list->pmkid[j].bssid, ETHER_ADDR_LEN)) {
+ continue; /* different MAC */
+ }
+ WL_DBG(("SPMK replace idx:%d bssid: "MACF " to SSID: %d\n", i,
+ ETHER_TO_MACF(pmkid_v2_list->pmkid[i].BSSID),
+ spmk_list->pmkid[j].ssid_len));
+ bzero(&pmkid_v2_list->pmkid[i].BSSID, ETHER_ADDR_LEN);
+ pmkid_v2_list->pmkid[i].ssid.ssid_len =
+ spmk_list->pmkid[j].ssid_len;
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].ssid.ssid,
+ spmk_list->pmkid[j].ssid_len,
+ spmk_list->pmkid[j].ssid,
+ spmk_list->pmkid[j].ssid_len);
+ }
+ pmkid_v2_list->pmkid[i].length = PMKID_ELEM_V2_LENGTH;
+ }
+
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v2_list,
+ v2_list_size, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("pmkid_info failed (%d)\n", err));
+ }
+
+ MFREE(cfg->osh, pmkid_v2_list, v2_list_size);
+ }
+ else {
+ u32 v1_list_size = (u32)(sizeof(pmkid_list_v1_t) + npmkids*sizeof(pmkid_v1_t));
+ pmkid_list_v1_t *pmkid_v1_list = (pmkid_list_v1_t *)MALLOCZ(cfg->osh, v1_list_size);
+ if (pmkid_v1_list == NULL) {
+ WL_ERR(("failed to allocate pmkid list\n"));
+ return BCME_NOMEM;
+ }
+ for (i = 0; i < npmkids; i++) {
+ /* memcpy_s return checks not needed as buffers are of same size */
+ (void)memcpy_s(&pmkid_v1_list->pmkid[i].BSSID,
+ ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN);
+ (void)memcpy_s(pmkid_v1_list->pmkid[i].PMKID,
+ WPA2_PMKID_LEN, pmk_list->pmkids.pmkid[i].pmkid,
+ WPA2_PMKID_LEN);
+ pmkid_v1_list->npmkid++;
+ }
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v1_list,
+ v1_list_size, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("pmkid_info failed (%d)\n", err));
+ }
+
+ MFREE(cfg->osh, pmkid_v1_list, v1_list_size);
+ }
+ return err;
+}
+
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ int i;
+ int npmkids = cfg->pmk_list->pmkids.count;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ if (cfg->wlc_ver.wlc_ver_major >= PMKDB_WLC_VER) {
+ err = wl_cfg80211_update_pmksa(wiphy, dev, pmksa, TRUE);
+ if (err != BCME_OK) {
+ WL_ERR(("wl_cfg80211_set_pmksa err:%d\n", err));
+ }
+ return err;
+ }
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_PMKSA), dhd_net2idx(dhdp->info, dev), 0);
+#endif /* BCMDONGLEHOST */
+
+ for (i = 0; i < npmkids; i++) {
+ if (pmksa->bssid != NULL) {
+ if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN))
+ break;
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid != NULL) {
+ if (!memcmp(pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
+ pmksa->ssid_len))
+ break;
+ }
+#endif /* WL_FILS */
+ }
+ if (i < WL_NUM_PMKIDS_MAX) {
+ if (pmksa->bssid != NULL) {
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].bssid, pmksa->bssid,
+ ETHER_ADDR_LEN);
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid != NULL) {
+ cfg->pmk_list->pmkids.pmkid[i].ssid_len = pmksa->ssid_len;
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].ssid, pmksa->ssid,
+ pmksa->ssid_len);
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].fils_cache_id, pmksa->cache_id,
+ FILS_CACHE_ID_LEN);
+ }
+#endif /* WL_FILS */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || \
+ defined(WL_FILS))
+ if (pmksa->pmk_len) {
+ if (memcpy_s(&cfg->pmk_list->pmkids.pmkid[i].pmk, PMK_LEN_MAX, pmksa->pmk,
+ pmksa->pmk_len)) {
+ WL_ERR(("invalid pmk len = %zu", pmksa->pmk_len));
+ } else {
+ cfg->pmk_list->pmkids.pmkid[i].pmk_len = pmksa->pmk_len;
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS) */
+ /* return check not required as buffer lengths are same */
+ (void)memcpy_s(cfg->pmk_list->pmkids.pmkid[i].pmkid, WPA2_PMKID_LEN, pmksa->pmkid,
+ WPA2_PMKID_LEN);
+ cfg->pmk_list->pmkids.pmkid[i].pmkid_len = WPA2_PMKID_LEN;
+
+ /* set lifetime not to expire in firmware by default.
+ * Currently, wpa_supplicant control PMKID lifetime on his end. e.g) set 12 hours
+ * when it expired, wpa_supplicant should call set_pmksa/del_pmksa to update
+ * corresponding entry.
+ */
+ cfg->pmk_list->pmkids.pmkid[i].time_left = KEY_PERM_PMK;
+ if (i == npmkids) {
+ cfg->pmk_list->pmkids.length += sizeof(pmkid_v3_t);
+ cfg->pmk_list->pmkids.count++;
+ }
+ } else {
+ err = -EINVAL;
+ }
+
+#if (WL_DBG_LEVEL > 0)
+ if (pmksa->bssid != NULL) {
+ WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &cfg->pmk_list->pmkids.pmkid[npmkids - 1].bssid));
+ }
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n",
+ cfg->pmk_list->pmkids.pmkid[npmkids - 1].
+ pmkid[i]));
+ }
+#endif /* (WL_DBG_LEVEL > 0) */
+
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+ return err;
+}
+
+/* sending pmkid_info IOVAR to manipulate PMKID(PMKSA) list in firmware.
+ * input @pmksa: host given single pmksa info.
+ * if it's NULL, assume whole list manipulated. e.g) flush all PMKIDs in firmware.
+ * input @set: TRUE means adding PMKSA operation. FALSE means deleting.
+ * return: log internal BCME_XXX error, and convert it to -EINVAL to linux generic error code.
+ */
+static s32 wl_cfg80211_update_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa, bool set) {
+
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ pmkid_list_v3_t *pmk_list;
+ uint32 alloc_len;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ if (cfg->wlc_ver.wlc_ver_major < MIN_PMKID_LIST_V3_FW_MAJOR) {
+ WL_DBG(("wlc_ver_major not supported:%d\n", cfg->wlc_ver.wlc_ver_major));
+ return BCME_VERSION;
+ }
+
+ alloc_len = (uint32)(OFFSETOF(pmkid_list_v3_t, pmkid) + ((pmksa) ? sizeof(pmkid_v3_t) : 0));
+ pmk_list = (pmkid_list_v3_t *)MALLOCZ(cfg->osh, alloc_len);
+
+ if (pmk_list == NULL) {
+ return BCME_NOMEM;
+ }
+
+ pmk_list->version = PMKID_LIST_VER_3;
+ pmk_list->length = alloc_len;
+ pmk_list->count = (pmksa) ? 1 : 0; // 1 means single entry operation, 0 means whole list.
+ pmk_list->flag = (set) ? PMKDB_SET_IOVAR : PMKDB_CLEAR_IOVAR;
+
+ if (pmksa) {
+ /* controll set/del action by lifetime parameter accordingly.
+ * if set == TRUE, it's set PMKID action with lifetime permanent.
+ * if set == FALSE, it's del PMKID action with lifetime zero.
+ */
+ pmk_list->pmkid->time_left = (set) ? KEY_PERM_PMK : 0;
+ if (pmksa->bssid) {
+ eacopy(pmksa->bssid, &pmk_list->pmkid->bssid);
+ }
+ if (pmksa->pmkid) {
+ err = memcpy_s(&pmk_list->pmkid->pmkid, sizeof(pmk_list->pmkid->pmkid),
+ pmksa->pmkid, WPA2_PMKID_LEN);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->pmkid_len = WPA2_PMKID_LEN;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ if (pmksa->pmk) {
+ err = memcpy_s(&pmk_list->pmkid->pmk, sizeof(pmk_list->pmkid->pmk),
+ pmksa->pmk, pmksa->pmk_len);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->pmk_len = pmksa->pmk_len;
+ }
+ if (pmksa->ssid) {
+ err = memcpy_s(&pmk_list->pmkid->ssid, sizeof(pmk_list->pmkid->ssid),
+ pmksa->ssid, pmksa->ssid_len);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->ssid_len = pmksa->ssid_len;
+ }
+ if (pmksa->cache_id) {
+ /* supplicant passes data received on air as is(network order).
+ * convert it before using.
+ */
+ pmk_list->pmkid->fils_cache_id = ntoh16(*(const uint16 *)pmksa->cache_id);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
+ wl_cfg80211_spmk_pmkdb_change_pmk_type(cfg, pmk_list);
+ }
+
+ if (wl_dbg_level & WL_DBG_DBG) {
+ prhex("pmklist_data", (char *)pmk_list, alloc_len);
+ }
+
+ err = wldev_iovar_setbuf(dev, "pmkdb", (char *)pmk_list,
+ alloc_len, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("pmkdb set failed. err:%d\n", err));
+ }
+exit:
+ if (pmk_list) {
+ MFREE(cfg->osh, pmk_list, alloc_len);
+ }
+ return err;
+}
+
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ int i;
+ int npmkids = cfg->pmk_list->pmkids.count;
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ if (!pmksa) {
+ WL_ERR(("pmksa is not initialized\n"));
+ return BCME_ERROR;
+ }
+ if (cfg->wlc_ver.wlc_ver_major >= PMKDB_WLC_VER) {
+ err = wl_cfg80211_update_pmksa(wiphy, dev, pmksa, FALSE);
+ if (err != BCME_OK) {
+ WL_ERR(("wl_cfg80211_del_pmksa err:%d\n", err));
+ }
+ wl_cfg80211_spmk_pmkdb_del_spmk(cfg, pmksa);
+ return err;
+ }
+
+ if (!npmkids) {
+ /* nmpkids = 0, nothing to delete */
+ WL_DBG(("npmkids=0. Skip del\n"));
+ return BCME_OK;
+ }
+
+#if (WL_DBG_LEVEL > 0)
+ if (pmksa->bssid) {
+ WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ pmksa->bssid));
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid) {
+ WL_DBG(("FILS: del_pmksa for ssid: "));
+ for (i = 0; i < pmksa->ssid_len; i++) {
+ WL_DBG(("%c", pmksa->ssid[i]));
+ }
+ WL_DBG(("\n"));
+ }
+#endif /* WL_FILS */
+ if (pmksa->pmkid) {
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmksa->pmkid[i]));
+ }
+ }
+#endif /* (WL_DBG_LEVEL > 0) */
+
+ for (i = 0; i < npmkids; i++) {
+ if (pmksa->bssid) {
+ if (!memcmp
+ (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN)) {
+ break;
+ }
+ }
+#ifdef WL_FILS
+ else if (pmksa->ssid) {
+ if (!memcmp
+ (pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
+ pmksa->ssid_len)) {
+ break;
+ }
+ }
+#endif /* WL_FILS */
+ }
+ if ((npmkids > 0) && (i < npmkids)) {
+ bzero(&cfg->pmk_list->pmkids.pmkid[i], sizeof(pmkid_v3_t));
+ for (; i < (npmkids - 1); i++) {
+ (void)memcpy_s(&cfg->pmk_list->pmkids.pmkid[i],
+ sizeof(pmkid_v3_t),
+ &cfg->pmk_list->pmkids.pmkid[i + 1],
+ sizeof(pmkid_v3_t));
+ }
+ npmkids--;
+ cfg->pmk_list->pmkids.length -= sizeof(pmkid_v3_t);
+ cfg->pmk_list->pmkids.count--;
+
+ } else {
+ err = -EINVAL;
+ }
+
+ /* current wl_update_pmklist() doesn't delete corresponding PMKID entry.
+ * inside firmware. So we need to issue delete action explicitely through
+ * this function.
+ */
+ err = wl_cfg80211_update_pmksa(wiphy, dev, pmksa, FALSE);
+ /* intentional fall through even on error.
+ * it should work above MIN_PMKID_LIST_V3_FW_MAJOR, otherwise let ignore it.
+ */
+
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+ return err;
+
+}
+
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ RETURN_EIO_IF_NOT_UP(cfg);
+ if (cfg->wlc_ver.wlc_ver_major >= PMKDB_WLC_VER) {
+ /* NULL pmksa means delete whole PMKSA list */
+ err = wl_cfg80211_update_pmksa(wiphy, dev, NULL, FALSE);
+ if (err != BCME_OK) {
+ WL_ERR(("wl_cfg80211_flush_pmksa err:%d\n", err));
+ }
+ return err;
+ }
+ bzero(cfg->pmk_list, sizeof(*cfg->pmk_list));
+ cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
+ cfg->pmk_list->pmkids.count = 0;
+ cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
+ return err;
+}
+
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+ struct afx_hdl *afx_instance;
+ struct bcm_cfg80211 *cfg;
+ s32 ret = BCME_OK;
+
+ BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work);
+ if (afx_instance) {
+ cfg = wl_get_cfg(afx_instance->dev);
+ if (cfg != NULL && cfg->afx_hdl->is_active) {
+ if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
+ ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
+ (100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
+ } else {
+ ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev,
+ cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan,
+ NULL);
+ }
+ if (unlikely(ret != BCME_OK)) {
+ WL_ERR(("ERROR occurred! returned value is (%d)\n", ret));
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL))
+ complete(&cfg->act_frm_scan);
+ }
+ }
+ }
+}
+
+static s32
+wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ u32 max_retry = WL_CHANNEL_SYNC_RETRY;
+ bool is_p2p_gas = false;
+
+ if (dev == NULL)
+ return -1;
+
+ WL_DBG((" enter ) \n"));
+
+ wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+ cfg->afx_hdl->is_active = TRUE;
+
+ if (cfg->afx_hdl->pending_tx_act_frm) {
+ wl_action_frame_t *action_frame;
+ action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame);
+ if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len))
+ is_p2p_gas = true;
+ }
+
+ /* Loop to wait until we find a peer's channel or the
+ * pending action frame tx is cancelled.
+ */
+ while ((cfg->afx_hdl->retry < max_retry) &&
+ (cfg->afx_hdl->peer_chan == WL_INVALID)) {
+ cfg->afx_hdl->is_listen = FALSE;
+ wl_set_drv_status(cfg, SCANNING, dev);
+ WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+ cfg->afx_hdl->retry));
+ /* search peer on peer's listen channel */
+ schedule_work(&cfg->afx_hdl->work);
+ wait_for_completion_timeout(&cfg->act_frm_scan,
+ msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+
+ if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+ !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+ break;
+
+ if (is_p2p_gas)
+ break;
+
+ if (cfg->afx_hdl->my_listen_chan) {
+ WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
+ cfg->afx_hdl->my_listen_chan));
+ /* listen on my listen channel */
+ cfg->afx_hdl->is_listen = TRUE;
+ schedule_work(&cfg->afx_hdl->work);
+ wait_for_completion_timeout(&cfg->act_frm_scan,
+ msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+ }
+ if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+ !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+ break;
+
+ cfg->afx_hdl->retry++;
+
+ WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+ }
+
+ cfg->afx_hdl->is_active = FALSE;
+
+ wl_clr_drv_status(cfg, SCANNING, dev);
+ wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+
+ return (cfg->afx_hdl->peer_chan);
+}
+
+struct p2p_config_af_params {
+ s32 max_tx_retry; /* max tx retry count if tx no ack */
+#ifdef WL_CFG80211_GON_COLLISION
+ /* drop tx go nego request if go nego collision occurs */
+ bool drop_tx_req;
+#endif
+#ifdef WL_CFG80211_SYNC_GON
+ /* WAR: dongle does not keep the dwell time of 'actframe' sometime.
+ * if extra_listen is set, keep the dwell time to get af response frame
+ */
+ bool extra_listen;
+#endif
+ bool search_channel; /* 1: search peer's channel to send af */
+};
+
+static s32
+wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
+ wl_action_frame_t *action_frame, wl_af_params_t *af_params,
+ struct p2p_config_af_params *config_af_params)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wifi_p2p_pub_act_frame_t *act_frm =
+ (wifi_p2p_pub_act_frame_t *) (action_frame->data);
+
+ /* initialize default value */
+#ifdef WL_CFG80211_GON_COLLISION
+ config_af_params->drop_tx_req = false;
+#endif
+#ifdef WL_CFG80211_SYNC_GON
+ config_af_params->extra_listen = true;
+#endif
+ config_af_params->search_channel = false;
+ config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
+ cfg->next_af_subtype = WL_PUB_AF_STYPE_INVALID;
+
+ switch (act_frm->subtype) {
+ case P2P_PAF_GON_REQ: {
+ /* Disable he if peer does not support before starting GONEG */
+ WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
+ wl_set_p2p_status(cfg, GO_NEG_PHASE);
+
+ config_af_params->search_channel = true;
+ cfg->next_af_subtype = act_frm->subtype + 1;
+
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+
+#ifdef WL_CFG80211_GON_COLLISION
+ config_af_params->drop_tx_req = true;
+#endif /* WL_CFG80211_GON_COLLISION */
+ break;
+ }
+ case P2P_PAF_GON_RSP: {
+ cfg->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for CONF frame */
+ /* WAR : 100ms is added because kernel spent more time in some case.
+ * Kernel should be fixed.
+ */
+ af_params->dwell_time = WL_MED_DWELL_TIME + 100;
+ break;
+ }
+ case P2P_PAF_GON_CONF: {
+ /* If we reached till GO Neg confirmation reset the filter */
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+
+ /* minimize dwell time */
+ af_params->dwell_time = WL_MIN_DWELL_TIME;
+
+#ifdef WL_CFG80211_GON_COLLISION
+ /* if go nego formation done, clear it */
+ cfg->block_gon_req_tx_count = 0;
+ cfg->block_gon_req_rx_count = 0;
+#endif /* WL_CFG80211_GON_COLLISION */
+#ifdef WL_CFG80211_SYNC_GON
+ config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+ break;
+ }
+ case P2P_PAF_INVITE_REQ: {
+ config_af_params->search_channel = true;
+ cfg->next_af_subtype = act_frm->subtype + 1;
+
+ /* increase dwell time */
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ break;
+ }
+ case P2P_PAF_INVITE_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+ config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+ break;
+ case P2P_PAF_DEVDIS_REQ: {
+ if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+ action_frame->len)) {
+ config_af_params->search_channel = true;
+ }
+
+ cfg->next_af_subtype = act_frm->subtype + 1;
+ /* maximize dwell time to wait for RESP frame */
+ af_params->dwell_time = WL_LONG_DWELL_TIME;
+ break;
+ }
+ case P2P_PAF_DEVDIS_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+ config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+ break;
+ case P2P_PAF_PROVDIS_REQ: {
+ if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+ action_frame->len)) {
+ config_af_params->search_channel = true;
+ }
+
+ cfg->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ break;
+ }
+ case P2P_PAF_PROVDIS_RSP: {
+ /* wpa_supplicant send go nego req right after prov disc */
+ cfg->next_af_subtype = P2P_PAF_GON_REQ;
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+ config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+ break;
+ }
+ default:
+ WL_DBG(("Unknown p2p pub act frame subtype: %d\n",
+ act_frm->subtype));
+ err = BCME_BADARG;
+ }
+ return err;
+}
+
+#if defined(WL11U) && defined(WL_HOST_AF_DFS_CHECK)
+static bool
+wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params,
+ void *frame, u16 frame_len)
+{
+ wl_scan_results *bss_list;
+ wl_bss_info_t *bi = NULL;
+ bool result = false;
+ s32 i;
+ chanspec_t chanspec;
+
+ /* If DFS channel is 52~148, check to block it or not */
+ if (af_params &&
+ (af_params->channel >= 52 && af_params->channel <= 148)) {
+ if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+ bss_list = cfg->bss_list;
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ chanspec = wl_chspec_driver_to_host(bi->chanspec);
+ if (CHSPEC_IS5G(chanspec) &&
+ ((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec))
+ == af_params->channel)) {
+ result = true; /* do not block the action frame */
+ break;
+ }
+ }
+ }
+ }
+ else {
+ result = true;
+ }
+
+ WL_DBG(("result=%s", result?"true":"false"));
+ return result;
+}
+#endif /* WL11U && WL_HOST_AF_DFS_CHECK */
+static bool
+wl_cfg80211_check_dwell_overflow(int32 requested_dwell, ulong dwell_jiffies)
+{
+ if ((requested_dwell & CUSTOM_RETRY_MASK) &&
+ (jiffies_to_msecs(jiffies - dwell_jiffies) >
+ (requested_dwell & ~CUSTOM_RETRY_MASK))) {
+ WL_ERR(("Action frame TX retry time over dwell time!\n"));
+ return true;
+ }
+ return false;
+}
+
+static bool
+wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
+ bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params,
+ wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx)
+{
+#ifdef WL11U
+ struct net_device *ndev = NULL;
+#endif /* WL11U */
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ bool ack = false;
+ u8 category, action;
+ s32 tx_retry;
+ struct p2p_config_af_params config_af_params;
+ struct net_info *netinfo;
+#ifdef VSDB
+ ulong off_chan_started_jiffies = 0;
+#endif
+ ulong dwell_jiffies = 0;
+ bool dwell_overflow = false;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ int32 requested_dwell = af_params->dwell_time;
+
+ /* Add the default dwell time
+ * Dwell time to stay off-channel to wait for a response action frame
+ * after transmitting an GO Negotiation action frame
+ */
+ af_params->dwell_time = WL_DEFAULT_DWELL_TIME;
+
+#ifdef WL11U
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ndev = dev;
+#else
+ ndev = ndev_to_cfgdev(cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+ category = action_frame->data[DOT11_ACTION_CAT_OFF];
+ action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+ /* initialize variables */
+ tx_retry = 0;
+ cfg->next_af_subtype = WL_PUB_AF_STYPE_INVALID;
+ config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+ config_af_params.search_channel = false;
+#ifdef WL_CFG80211_GON_COLLISION
+ config_af_params.drop_tx_req = false;
+#endif
+#ifdef WL_CFG80211_SYNC_GON
+ config_af_params.extra_listen = false;
+#endif
+
+ /* config parameters */
+ /* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */
+ if (category == DOT11_ACTION_CAT_PUBLIC) {
+ if (wl_cfg80211_is_dpp_frame((void *)action_frame->data, action_frame->len)) {
+ wl_dpp_pa_frame_t *pa = (wl_dpp_pa_frame_t *)action_frame->data;
+ config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ cfg->need_wait_afrx = true;
+ /* once matching frame is found in rx, abort dwell (upper layer
+ * doesn't do that).
+ */
+ if (pa->ftype == DPP_AUTH_REQ) {
+ cfg->next_af_subtype = DPP_AUTH_RESP;
+ } else if (pa->ftype == DPP_AUTH_RESP) {
+ cfg->next_af_subtype = DPP_AUTH_CONF;
+ } else {
+ cfg->next_af_subtype = WL_PUB_AF_STYPE_INVALID;
+ cfg->need_wait_afrx = false;
+ }
+ } else if (wl_cfg80211_is_dpp_gas_action(
+ (void *)action_frame->data, action_frame->len)) {
+ config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ cfg->need_wait_afrx = true;
+ config_af_params.search_channel = false;
+
+ if (requested_dwell == 0) {
+ /* Use minimal dwell to take care of Ack */
+ af_params->dwell_time = WL_MIN_DWELL_TIME;
+ }
+ } else if ((action == P2P_PUB_AF_ACTION) &&
+ (action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) {
+ /* p2p public action frame process */
+ if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy,
+ action_frame, af_params, &config_af_params)) {
+ /* just send unknown subtype frame with default parameters. */
+ WL_DBG(("Unknown subtype.\n"));
+ }
+
+#ifdef WL_CFG80211_GON_COLLISION
+ if (config_af_params.drop_tx_req) {
+ if (cfg->block_gon_req_tx_count) {
+ /* drop gon req tx action frame */
+ WL_DBG(("Drop gon req tx action frame: count %d\n",
+ cfg->block_gon_req_tx_count));
+ goto exit;
+ }
+ }
+#endif /* WL_CFG80211_GON_COLLISION */
+ } else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) {
+ /* service discovery process */
+ if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+ action == P2PSD_ACTION_ID_GAS_CREQ) {
+ /* configure service discovery query frame */
+ config_af_params.search_channel = true;
+
+ /* save next af suptype to cancel remained dwell time */
+ cfg->next_af_subtype = action + 1;
+
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ if (requested_dwell & CUSTOM_RETRY_MASK) {
+ config_af_params.max_tx_retry =
+ (requested_dwell & CUSTOM_RETRY_MASK) >> 24;
+ af_params->dwell_time =
+ (requested_dwell & ~CUSTOM_RETRY_MASK);
+ WL_DBG(("Custom retry(%d) and dwell time(%d) is set.\n",
+ config_af_params.max_tx_retry,
+ af_params->dwell_time));
+ }
+ } else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+ action == P2PSD_ACTION_ID_GAS_CRESP) {
+ /* configure service discovery response frame */
+ af_params->dwell_time = WL_MIN_DWELL_TIME;
+ } else {
+ WL_DBG(("Unknown action type: %d\n", action));
+ }
+ } else {
+ WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n",
+ category, action, action_frame_len));
+ }
+ } else if (category == P2P_AF_CATEGORY) {
+ /* do not configure anything. it will be sent with a default configuration */
+ } else {
+ WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n",
+ category, action));
+#ifdef BCMDONGLEHOST
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+ return false;
+ }
+#endif /* BCMDONGLEHOST */
+ }
+
+ netinfo = wl_get_netinfo_by_wdev(cfg, cfgdev_to_wdev(cfgdev));
+ /* validate channel and p2p ies */
+ if (config_af_params.search_channel &&
+ IS_P2P_SOCIAL(CHSPEC_CHANNEL(af_params->channel)) &&
+ netinfo && netinfo->bss.ies.probe_req_ie_len) {
+ config_af_params.search_channel = true;
+ } else {
+ config_af_params.search_channel = false;
+ }
+#ifdef WL11U
+ if (ndev == bcmcfg_to_prmry_ndev(cfg))
+ config_af_params.search_channel = false;
+#endif /* WL11U */
+
+#ifdef VSDB
+ /* if connecting on primary iface, sleep for a while before sending af tx for VSDB */
+ if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+ OSL_SLEEP(50);
+ }
+#endif
+
+ /* if scan is ongoing, abort current scan. */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_cfgscan_cancel_scan(cfg);
+ }
+
+ /* Abort P2P listen */
+ if (discover_cfgdev(cfgdev, cfg)) {
+ if (cfg->p2p_supported && cfg->p2p) {
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ }
+ }
+
+#if defined(WL11U) && defined(WL_HOST_AF_DFS_CHECK)
+ /* handling DFS channel exceptions */
+ if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
+ return false; /* the action frame was blocked */
+ }
+#endif /* WL11U && WL_HOST_AF_DFS_CHECK */
+
+ /* set status and destination address before sending af */
+ if (cfg->next_af_subtype != WL_PUB_AF_STYPE_INVALID) {
+ /* set this status to cancel the remained dwell time in rx process */
+ wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+ }
+ wl_set_drv_status(cfg, SENDING_ACT_FRM, dev);
+ memcpy(cfg->afx_hdl->tx_dst_addr.octet,
+ af_params->action_frame.da.octet,
+ sizeof(cfg->afx_hdl->tx_dst_addr.octet));
+
+ /* save af_params for rx process */
+ cfg->afx_hdl->pending_tx_act_frm = af_params;
+
+ if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) {
+ WL_DBG(("Set GAS action frame config.\n"));
+ config_af_params.search_channel = false;
+ config_af_params.max_tx_retry = 1;
+ }
+
+ /* search peer's channel */
+ if (config_af_params.search_channel) {
+ /* initialize afx_hdl */
+ if ((cfg->afx_hdl->bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ goto exit;
+ }
+ cfg->afx_hdl->dev = dev;
+ cfg->afx_hdl->retry = 0;
+ cfg->afx_hdl->peer_chan = WL_INVALID;
+
+ if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) {
+ WL_ERR(("couldn't find peer's channel.\n"));
+ wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len,
+ af_params->channel);
+ /* Even if we couldn't find peer channel, try to send the frame
+ * out. P2P cert 5.1.14 testbed device (realtek) doesn't seem to
+ * respond to probe request (Ideally it has to be in listen and
+ * responsd to probe request). However if we send Go neg req, the
+ * peer is sending GO-neg resp. So instead of giving up here, just
+ * proceed and attempt sending out the action frame.
+ */
+ }
+
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ /*
+ * Abort scan even for VSDB scenarios. Scan gets aborted in firmware
+ * but after the check of piggyback algorithm.
+ * To take care of current piggback algo, lets abort the scan here itself.
+ */
+ wl_cfgscan_cancel_scan(cfg);
+ /* Suspend P2P discovery's search-listen to prevent it from
+ * starting a scan or changing the channel.
+ */
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ goto exit;
+ }
+
+ /* update channel */
+ if (cfg->afx_hdl->peer_chan != WL_INVALID) {
+ af_params->channel = cfg->afx_hdl->peer_chan;
+ WL_ERR(("Attempt tx on peer listen channel:%d\n",
+ cfg->afx_hdl->peer_chan));
+ } else {
+ WL_ERR(("Attempt tx with the channel provided by userspace."
+ "Channel: %d\n", CHSPEC_CHANNEL(af_params->channel)));
+ }
+ }
+
+#ifdef VSDB
+ off_chan_started_jiffies = jiffies;
+#endif /* VSDB */
+
+ wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
+
+ wl_cfgp2p_need_wait_actfrmae(cfg, action_frame->data, action_frame->len, true);
+
+ dwell_jiffies = jiffies;
+ /* Now send a tx action frame */
+ ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
+ dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
+
+ /* if failed, retry it. tx_retry_max value is configure by .... */
+ while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry) &&
+ !dwell_overflow) {
+#ifdef VSDB
+ if (af_params->channel) {
+ if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
+ OFF_CHAN_TIME_THRESHOLD_MS) {
+ WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+ off_chan_started_jiffies = jiffies;
+ } else
+ OSL_SLEEP(AF_RETRY_DELAY_TIME);
+ }
+#endif /* VSDB */
+ ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
+ false : true;
+ dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
+ }
+
+ if (ack == false) {
+ WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry));
+ }
+ WL_DBG(("Complete to send action frame\n"));
+exit:
+ /* Clear SENDING_ACT_FRM after all sending af is done */
+ wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+
+#ifdef WL_CFG80211_SYNC_GON
+ /* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+ * if we coundn't get the next action response frame and dongle does not keep
+ * the dwell time, go to listen state again to get next action response frame.
+ */
+ if (ack && config_af_params.extra_listen &&
+#ifdef WL_CFG80211_GON_COLLISION
+ !cfg->block_gon_req_tx_count &&
+#endif /* WL_CFG80211_GON_COLLISION */
+ wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
+ cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
+ s32 extar_listen_time;
+
+ extar_listen_time = af_params->dwell_time -
+ jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies);
+
+ if (extar_listen_time > 50) {
+ wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+ WL_DBG(("Wait more time! actual af time:%d,"
+ "calculated extar listen:%d\n",
+ af_params->dwell_time, extar_listen_time));
+ if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel,
+ extar_listen_time + 100) == BCME_OK) {
+ wait_for_completion_timeout(&cfg->wait_next_af,
+ msecs_to_jiffies(extar_listen_time + 100 + 300));
+ }
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+ }
+ }
+#endif /* WL_CFG80211_SYNC_GON */
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+
+ cfg->afx_hdl->pending_tx_act_frm = NULL;
+
+ if (ack) {
+ WL_DBG(("-- Action Frame Tx succeeded, listen chan: %d\n",
+ cfg->afx_hdl->my_listen_chan));
+ } else {
+ WL_ERR(("-- Action Frame Tx failed, listen chan: %d\n",
+ cfg->afx_hdl->my_listen_chan));
+ }
+
+#ifdef WL_CFG80211_GON_COLLISION
+ if (cfg->block_gon_req_tx_count) {
+ cfg->block_gon_req_tx_count--;
+ /* if ack is ture, supplicant will wait more time(100ms).
+ * so we will return it as a success to get more time .
+ */
+ ack = true;
+ }
+#endif /* WL_CFG80211_GON_COLLISION */
+ return ack;
+}
+
+#define MAX_NUM_OF_ASSOCIATED_DEV 64
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+#else
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+ struct ieee80211_channel *channel, bool offchan,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
+ unsigned int wait, const u8* buf, size_t len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ bool no_cck,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ bool dont_wait_for_ack,
+#endif
+ u64 *cookie)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+{
+ wl_action_frame_t *action_frame;
+ wl_af_params_t *af_params;
+ scb_val_t scb_val;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct ieee80211_channel *channel = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+#endif
+ const struct ieee80211_mgmt *mgmt;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *dev = NULL;
+ s32 err = BCME_OK;
+ s32 bssidx = 0;
+ u32 id;
+ bool ack = false;
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+
+ WL_DBG(("Enter \n"));
+
+ if (len > ACTION_FRAME_SIZE) {
+ WL_ERR(("bad length:%zu\n", len));
+ return BCME_BADLEN;
+ }
+
+#ifdef DHD_IFDEBUG
+ PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+ dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ if (!dev) {
+ WL_ERR(("dev is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE) */
+ if (discover_cfgdev(cfgdev, cfg)) {
+ if (!cfg->p2p_supported || !cfg->p2p) {
+ WL_ERR(("P2P doesn't setup completed yet\n"));
+ return -EINVAL;
+ }
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ }
+ else {
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) {
+ WL_ERR(("Failed to find bssidx\n"));
+ return BCME_ERROR;
+ }
+ }
+
+ WL_DBG(("TX target bssidx=%d\n", bssidx));
+
+ if (p2p_is_on(cfg)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return -EFAULT;
+ }
+ }
+ *cookie = 0;
+ id = cfg->send_action_id++;
+ if (id == 0)
+ id = cfg->send_action_id++;
+ *cookie = id;
+ mgmt = (const struct ieee80211_mgmt *)buf;
+ if (ieee80211_is_mgmt(mgmt->frame_control)) {
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ s32 ie_len = len - ie_offset;
+ if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) {
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ }
+ wl_cfg80211_set_mgmt_vndr_ies(cfg, cfgdev, bssidx,
+ VNDR_IE_PRBRSP_FLAG, (const u8 *)(buf + ie_offset), ie_len);
+ cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+#if defined(P2P_IE_MISSING_FIX)
+ if (!cfg->p2p_prb_noti) {
+ cfg->p2p_prb_noti = true;
+ WL_DBG(("wl_cfg80211_mgmt_tx: TX 802_1X Probe"
+ " Response first time.\n"));
+ }
+#endif
+ goto exit;
+ } else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+ ieee80211_is_deauth(mgmt->frame_control)) {
+ char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ int num_associated = 0;
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+ if (!bcmp((const uint8 *)BSSID_BROADCAST,
+ (const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) {
+ assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+ err = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST,
+ assoc_maclist, sizeof(mac_buf));
+ if (err < 0)
+ WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+ else
+ num_associated = assoc_maclist->count;
+ }
+ memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+ scb_val.val = mgmt->u.disassoc.reason_code;
+ err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t));
+ if (err < 0)
+ WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err));
+ WL_ERR(("Disconnect STA : " MACDBG " scb_val.val %d\n",
+ MAC2STRDBG(bcm_ether_ntoa((const struct ether_addr *)mgmt->da,
+ eabuf)), scb_val.val));
+
+ /* WAR Wait for the deauth event to come,
+ * supplicant will do the delete iface immediately
+ * and we will have problem in sending
+ * deauth frame if we delete the bss in firmware.
+ * But we do not need additional delays for this WAR
+ * during P2P connection.
+ *
+ * Supplicant call this function with BCAST after
+ * delete all GC stations with each addr.
+ * So, 400 ms delay can be called only once when GO disconnect all GC
+ */
+ if (num_associated > 0 && ETHER_ISBCAST(mgmt->da))
+ wl_delay(400);
+
+ cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+ goto exit;
+
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
+ /* Abort the dwell time of any previous off-channel
+ * action frame that may be still in effect. Sending
+ * off-channel action frames relies on the driver's
+ * scan engine. If a previous off-channel action frame
+ * tx is still in progress (including the dwell time),
+ * then this new action frame will not be sent out.
+ */
+/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary.
+ * And previous off-channel action frame must be ended before new af tx.
+ */
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ wl_cfgscan_cancel_scan(cfg);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ }
+#ifdef WL_CLIENT_SAE
+ else if (ieee80211_is_auth(mgmt->frame_control)) {
+ err = wl_cfg80211_mgmt_auth_tx(dev, cfgdev, cfg, buf, len,
+ bssidx, cookie);
+ goto exit;
+ }
+#endif /* WL_CLIENT_SAE */
+
+ } else {
+ WL_ERR(("Driver only allows MGMT packet type\n"));
+ goto exit;
+ }
+
+ af_params = (wl_af_params_t *)MALLOCZ(cfg->osh, WL_WIFI_AF_PARAMS_SIZE);
+
+ if (af_params == NULL)
+ {
+ WL_ERR(("unable to allocate frame\n"));
+ return -ENOMEM;
+ }
+
+ action_frame = &af_params->action_frame;
+
+ /* Add the packet Id */
+ action_frame->packetId = *cookie;
+ WL_DBG(("action frame %d\n", action_frame->packetId));
+ /* Add BSSID */
+ memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+ memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = len - DOT11_MGMT_HDR_LEN;
+ WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+ if (channel) {
+ /* Add the channel */
+ af_params->channel =
+ wl_freq_to_chanspec(channel->center_freq);
+ } else {
+ af_params->channel = 0;
+ }
+
+ /* Save listen_chan for searching common channel */
+ cfg->afx_hdl->peer_listen_chan = af_params->channel;
+ WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ af_params->dwell_time = params->wait;
+#else
+ af_params->dwell_time = wait;
+#endif
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+ ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params,
+ action_frame, action_frame->len, bssidx);
+ cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
+ WL_DBG(("txstatus notified for cookie:%llu. ack:%d\n", *cookie, ack));
+
+ MFREE(cfg->osh, af_params, WL_WIFI_AF_PARAMS_SIZE);
+exit:
+ return err;
+}
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+ u16 frame, bool reg)
+#else
+ struct mgmt_frame_regs *upd)
+#endif
+{
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+ WL_DBG(("frame_type: %x, reg: %d\n", frame, reg));
+
+ if (frame != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+ return;
+#endif
+
+ return;
+}
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ s32 err = 0;
+ s32 ap_isolate = 0;
+#ifdef PCIE_FULL_DONGLE
+ s32 ifidx = DHD_BAD_IF;
+#endif
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ s32 gmode = -1, nmode = -1;
+ s32 gmode_prev = -1, nmode_prev = -1;
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+#if defined(PCIE_FULL_DONGLE) || defined(SUPPORT_HOSTAPD_BGN_MODE)
+ dhd_pub_t *dhd;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd = (dhd_pub_t *)(cfg->pub);
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->p2p_net == dev)
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
+#endif /* PCIE_FULL_DONGLE || SUPPORT_HOSTAPD_BGN_MODE */
+
+ if (params->use_cts_prot >= 0) {
+ }
+
+ if (params->use_short_preamble >= 0) {
+ }
+
+ if (params->use_short_slot_time >= 0) {
+ }
+
+ if (params->basic_rates) {
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ switch ((int)(params->basic_rates[params->basic_rates_len -1])) {
+ case 22: /* B only , rate 11 */
+ gmode = 0;
+ nmode = 0;
+ break;
+ case 108: /* G only , rate 54 */
+ gmode = 2;
+ nmode = 0;
+ break;
+ default:
+ gmode = -1;
+ nmode = -1;
+ break;
+ }
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+ }
+
+ if (params->ap_isolate >= 0) {
+ ap_isolate = params->ap_isolate;
+#ifdef PCIE_FULL_DONGLE
+ ifidx = dhd_net2idx(dhd->info, dev);
+
+ if (ifidx != DHD_BAD_IF) {
+ err = dhd_set_ap_isolate(dhd, ifidx, ap_isolate);
+ } else {
+ WL_ERR(("Failed to set ap_isolate\n"));
+ }
+#else
+ err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+ if (unlikely(err))
+ {
+ WL_ERR(("set ap_isolate Error (%d)\n", err));
+ }
+#endif /* PCIE_FULL_DONGLE */
+ }
+
+ if (params->ht_opmode >= 0) {
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ nmode = 1;
+ gmode = 1;
+ } else {
+ nmode = 0;
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+ }
+
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ err = wldev_iovar_getint(dev, "nmode", &nmode_prev);
+ if (unlikely(err)) {
+ WL_ERR(("error reading nmode (%d)\n", err));
+ }
+ if (nmode == nmode_prev) {
+ nmode = -1;
+ }
+ err = wldev_ioctl_get(dev, WLC_GET_GMODE, &gmode_prev, sizeof(gmode_prev));
+ if (unlikely(err)) {
+ WL_ERR(("error reading gmode (%d)\n", err));
+ }
+ if (gmode == gmode_prev) {
+ gmode = -1;
+ }
+
+ if (((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) == DHD_FLAG_HOSTAP_MODE) &&
+ ((gmode > -1) || (nmode > -1))) {
+ s32 val = 0;
+
+ err = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
+ if (unlikely(err))
+ WL_ERR(("WLC_DOWN command failed:[%d]\n", err));
+
+ if (nmode > -1) {
+ err = wldev_iovar_setint(dev, "nmode", nmode);
+ if (unlikely(err))
+ WL_ERR(("nmode command failed:mode[%d]:err[%d]\n", nmode, err));
+ }
+
+ if (gmode > -1) {
+ err = wldev_ioctl_set(dev, WLC_SET_GMODE, &gmode, sizeof(s32));
+ if (unlikely(err))
+ WL_ERR(("WLC_SET_GMODE command failed:mode[%d]:err[%d]\n",
+ gmode, err));
+ }
+
+ val = 0;
+ err = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32));
+ if (unlikely(err))
+ WL_ERR(("WLC_UP command failed:err[%d]\n", err));
+
+ }
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+
+ return err;
+}
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *
+wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *_net_info, *next;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (_net_info->ndev &&
+ test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
+ return _net_info->ndev;
+ }
+
+ return NULL;
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+bool
+wl_cfg80211_macaddr_sync_reqd(struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ WL_DBG(("enter \n"));
+ if (!wdev) {
+ WL_ERR(("no wdev present\n"));
+ return false;
+ }
+
+ BCM_REFERENCE(cfg);
+
+#if defined(WL_STATIC_IF)
+ /* In soft case too role upgrade happens
+ * from STA to AP in some cases.These
+ * cases will have iftype as STATION.
+ */
+ if (wl_cfg80211_static_if(cfg, dev)) {
+ WL_INFORM_MEM(("STATIC interface\n"));
+ return true;
+ }
+#endif /* WL_STATIC_IF && WL_SOFTAP_RAND */
+
+ switch (wdev->iftype) {
+#ifdef WL_P2P_RAND
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ WL_INFORM_MEM(("P2P GO/GC interface\n"));
+ return true;
+#endif /* WL_P2P_RAND */
+#if defined(WL_STA_ASSOC_RAND)
+ case NL80211_IFTYPE_STATION:
+ WL_INFORM_MEM(("STA interface\n"));
+ return true;
+#endif /* WL_STA_ASSOC_RAND */
+#ifdef WL_SOFTAP_RAND
+ case NL80211_IFTYPE_AP:
+ WL_INFORM_MEM(("SOFTAP interface\n"));
+ return true;
+#endif /* WL_SOFTAP_RAND */
+ default:
+ WL_ERR(("no macthing if type\n"));
+ }
+ return false;
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+wl_cfg80211_del_station(
+ struct wiphy *wiphy, struct net_device *ndev,
+ struct station_del_parameters *params)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_del_station(
+ struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8* mac_addr)
+#else
+wl_cfg80211_del_station(
+ struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8* mac_addr)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+{
+ struct net_device *dev;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ scb_val_t scb_val;
+ int err;
+ char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+ int num_associated = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ const u8 *mac_addr = params->mac;
+#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
+ u16 rc = params->reason_code;
+#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+ WL_DBG(("Entry\n"));
+ if (mac_addr == NULL) {
+ WL_DBG(("mac_addr is NULL ignore it\n"));
+ return 0;
+ }
+
+ dev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (p2p_is_on(cfg)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return -EFAULT;
+ }
+ }
+#ifdef WL_EXT_IAPSTA
+ err = wl_ext_in4way_sync(ndev, AP_WAIT_STA_RECONNECT,
+ WL_EXT_STATUS_DELETE_STA, (void *)mac_addr);
+ if (err) {
+ return 0;
+ }
+#endif
+
+ assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+ err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
+ assoc_maclist, sizeof(mac_buf));
+ if (err < 0)
+ WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+ else
+ num_associated = assoc_maclist->count;
+
+ memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN);
+#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ if (rc == DOT11_RC_8021X_AUTH_FAIL) {
+ WL_ERR(("deauth will be sent at F/W\n"));
+ scb_val.val = DOT11_RC_8021X_AUTH_FAIL;
+ } else {
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+
+#ifdef WL_WPS_SYNC
+ if (wl_wps_session_update(ndev,
+ WPS_STATE_DISCONNECT_CLIENT, mac_addr) == BCME_UNSUPPORTED) {
+ /* Ignore disconnect command from upper layer */
+ WL_INFORM_MEM(("[WPS] Ignore client disconnect.\n"));
+ } else
+#endif /* WL_WPS_SYNC */
+ {
+ scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+ WL_MSG(dev->name, "Disconnect STA : %pM scb_val.val %d\n",
+ mac_addr, scb_val.val);
+#if defined(BCMDONGLEHOST)
+ /* need to guarantee EAP-Failure send out before deauth */
+ dhd_wait_pend8021x(dev);
+#endif
+ err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t));
+ if (err < 0) {
+ WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+ }
+ }
+#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+
+ /* WAR Wait for the deauth event to come, supplicant will do the
+ * delete iface immediately and we will have problem in sending
+ * deauth frame if we delete the bss in firmware
+ * But we do not need additional delays for this WAR
+ * during P2P connection.
+ *
+ * Supplicant call this function with BCAST after doing
+ * wl_cfg80211_del_station() all GC stations with each addr.
+ * So, 400 ms delay can be called only once when GO disconnect all GC
+ */
+ if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
+ wl_delay(400);
+
+ return 0;
+}
+
+/* Implementation for post SCB authorize */
+static void
+wl_cfg80211_post_scb_auth(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+#ifdef WBTEXT
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* WBTEXT */
+
+ LOG_TS(cfg, authorize_cmplt);
+ CLR_TS(cfg, authorize_start);
+ wl_set_drv_status(cfg, AUTHORIZED, dev);
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif
+#ifdef WBTEXT
+ /* send nbr request or BTM query to update RCC
+ * after 4-way handshake is completed
+ */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+ dhdp->wbtext_support) {
+ wl_cfg80211_wbtext_update_rcc(cfg, dev);
+ }
+#endif /* WBTEXT */
+}
+
+/* Currently adding support only for authorize/de-authorize flag
+ * Need to be extended in future
+ */
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_change_station(
+ struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+#else
+wl_cfg80211_change_station(
+ struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac,
+ struct station_parameters *params)
+#endif
+{
+ int err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef BCMSUP_4WAY_HANDSHAKE
+ struct wl_security *sec;
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+ struct net_device *ndev = ndev_to_wlc_ndev(dev, cfg);
+
+ WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
+ "sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
+ params->sta_flags_mask, params->sta_flags_set, ndev->name));
+
+ if ((wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS) &&
+ !(wl_get_drv_status(cfg, CONNECTED, dev))) {
+ /* Return error indicating not in connected state */
+ WL_ERR(("Ignore SCB_AUTHORIZE/DEAUTHORIZE in non connected state\n"));
+ return -ENOTSUPP;
+ }
+
+ /* Processing only authorize/de-authorize flag for now */
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+ WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
+ return -ENOTSUPP;
+ }
+
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+ err = wldev_ioctl_set(ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
+ } else {
+ WL_INFORM_MEM(("[%s] WLC_SCB_DEAUTHORIZE " MACDBG "\n",
+ ndev->name, MAC2STRDBG(mac)));
+ }
+ wl_clr_drv_status(cfg, AUTHORIZED, dev);
+ CLR_TS(cfg, authorize_start);
+ CLR_TS(cfg, conn_start);
+ return err;
+ }
+ /* In case of 4way HS offloaded to FW and key_mgmt being 8021x, even the SCB
+ * authorization is also offloaded to FW. So on reception of SCB authorize in the above
+ * cases we avoid explicit call to ioctl WLC_SCB_AUTHORIZE. The post SCB authorize
+ * actions are done from context of WLC_E_PSK_SUP event handler
+ */
+#ifdef BCMSUP_4WAY_HANDSHAKE
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ if (
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+ (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) &&
+#else
+ (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) &&
+#endif
+ ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X))) {
+ return BCME_OK;
+ }
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+ err = wldev_ioctl_set(ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+ } else {
+ WL_INFORM_MEM(("[%s] WLC_SCB_AUTHORIZE " MACDBG "\n",
+ ndev->name, MAC2STRDBG(mac)));
+#ifdef WL_WPS_SYNC
+ wl_wps_session_update(ndev, WPS_STATE_AUTHORIZE, mac);
+#endif /* WL_WPS_SYNC */
+ }
+
+ /* Post SCB authorize actions */
+ wl_cfg80211_post_scb_auth(cfg, ndev);
+
+ return err;
+}
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * Currently the dump_obss IOVAR is returning string as output so we need to
+ * parse the output buffer in an unoptimized way. Going forward if we get the
+ * IOVAR output in binary format this method can be optimized
+ */
+static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey)
+{
+ int i;
+ char *token;
+ char delim[] = " \n";
+
+ token = strsep(&buf, delim);
+ while (token != NULL) {
+ if (!strcmp(token, "OBSS")) {
+ for (i = 0; i < OBSS_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->obss = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "IBSS")) {
+ for (i = 0; i < IBSS_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->ibss = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "TXDur")) {
+ for (i = 0; i < TX_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->tx = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "Category")) {
+ for (i = 0; i < CTG_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->no_ctg = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "Packet")) {
+ for (i = 0; i < PKT_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->no_pckt = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "Opp(time):")) {
+ for (i = 0; i < IDLE_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->idle = simple_strtoul(token, NULL, 10);
+ }
+
+ token = strsep(&buf, delim);
+ }
+
+ return 0;
+}
+
+static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req,
+ struct wl_dump_survey *survey)
+{
+ cca_stats_n_flags *results;
+ char *buf;
+ int retry, err;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ buf = (char *)MALLOCZ(cfg->osh, sizeof(char) * WLC_IOCTL_MAXLEN);
+ if (unlikely(!buf)) {
+ WL_ERR(("%s: buf alloc failed\n", __func__));
+ return -ENOMEM;
+ }
+
+ retry = IOCTL_RETRY_COUNT;
+ while (retry--) {
+ err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req),
+ buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err >= 0) {
+ break;
+ }
+ WL_DBG(("attempt = %d, err = %d, \n",
+ (IOCTL_RETRY_COUNT - retry), err));
+ }
+
+ if (retry <= 0) {
+ WL_ERR(("failure, dump_obss IOVAR failed\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ results = (cca_stats_n_flags *)(buf);
+ wl_parse_dump_obss(results->buf, survey);
+ MFREE(cfg->osh, buf, sizeof(char) * WLC_IOCTL_MAXLEN);
+
+ return 0;
+exit:
+ MFREE(cfg->osh, buf, sizeof(char) * WLC_IOCTL_MAXLEN);
+ return err;
+}
+
+static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
+ int idx, struct survey_info *info)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_dump_survey *survey;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel*chan;
+ cca_msrmnt_query req;
+ int val, err, noise, retry;
+
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ return -ENOENT;
+ }
+#endif
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (band && idx >= band->n_channels) {
+ idx -= band->n_channels;
+ band = NULL;
+ }
+
+ if (!band || idx >= band->n_channels) {
+ /* Move to 5G band */
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (idx >= band->n_channels) {
+ return -ENOENT;
+ }
+ }
+
+ chan = &band->channels[idx];
+ /* Setting current channel to the requested channel */
+ if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan,
+ NL80211_CHAN_HT20) < 0)) {
+ /*
+ * FIXME:
+ *
+ * Mostly set channel should not fail. because we are
+ * traversing through Valid channel list. In case it fails,
+ * right now we are passing the stats for previous channel.
+ */
+ WL_ERR(("Set channel failed \n"));
+ }
+
+ if (!idx) {
+ /* Set interface up, explicitly. */
+ val = 1;
+ err = wldev_ioctl_set(ndev, WLC_UP, (void *)&val, sizeof(val));
+ if (err < 0) {
+ WL_ERR(("set interface up failed, error = %d\n", err));
+ }
+ }
+
+ /* Get noise value */
+ retry = IOCTL_RETRY_COUNT;
+ while (retry--) {
+ noise = 0;
+ err = wldev_ioctl_get(ndev, WLC_GET_PHY_NOISE, &noise,
+ sizeof(noise));
+ if (err >= 0) {
+ break;
+ }
+ WL_DBG(("attempt = %d, err = %d, \n",
+ (IOCTL_RETRY_COUNT - retry), err));
+ }
+
+ if (retry <= 0) {
+ WL_ERR(("Get Phy Noise failed, error = %d\n", err));
+ noise = CHAN_NOISE_DUMMY;
+ }
+
+ survey = (struct wl_dump_survey *)MALLOCZ(cfg->osh,
+ sizeof(struct wl_dump_survey));
+ if (unlikely(!survey)) {
+ WL_ERR(("%s: alloc failed\n", __func__));
+ return -ENOMEM;
+ }
+
+ /* Start Measurement for obss stats on current channel */
+ req.msrmnt_query = 0;
+ req.time_req = ACS_MSRMNT_DELAY;
+ if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+ goto exit;
+ }
+
+ /*
+ * Wait for the meaurement to complete, adding a buffer value of 10 to take
+ * into consideration any delay in IOVAR completion
+ */
+ msleep(ACS_MSRMNT_DELAY + 10);
+
+ /* Issue IOVAR to collect measurement results */
+ req.msrmnt_query = 1;
+ if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+ goto exit;
+ }
+
+ info->channel = chan;
+ info->noise = noise;
+ info->channel_time = ACS_MSRMNT_DELAY;
+ info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle;
+ info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg +
+ survey->no_pckt;
+ info->channel_time_tx = survey->tx;
+ info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+ MFREE(cfg->osh, survey, sizeof(struct wl_dump_survey));
+
+ return 0;
+exit:
+ MFREE(cfg->osh, survey, sizeof(struct wl_dump_survey));
+ return err;
+}
+#endif /* WL_SUPPORT_ACS */
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = wl_cfg80211_add_virtual_iface,
+ .del_virtual_intf = wl_cfg80211_del_virtual_iface,
+ .change_virtual_intf = wl_cfg80211_change_virtual_iface,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ .start_p2p_device = wl_cfgp2p_start_p2p_device,
+ .stop_p2p_device = wl_cfgp2p_stop_p2p_device,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ .scan = wl_cfg80211_scan,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ .abort_scan = wl_cfg80211_abort_scan,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ .set_wiphy_params = wl_cfg80211_set_wiphy_params,
+ .join_ibss = wl_cfg80211_join_ibss,
+ .leave_ibss = wl_cfg80211_leave_ibss,
+ .get_station = wl_cfg80211_get_station,
+ .dump_station = wl_cfg80211_dump_station,
+ .set_tx_power = wl_cfg80211_set_tx_power,
+ .get_tx_power = wl_cfg80211_get_tx_power,
+ .add_key = wl_cfg80211_add_key,
+ .del_key = wl_cfg80211_del_key,
+ .get_key = wl_cfg80211_get_key,
+ .set_default_key = wl_cfg80211_config_default_key,
+ .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+ .set_power_mgmt = wl_cfg80211_set_power_mgmt,
+ .connect = wl_cfg80211_connect,
+ .disconnect = wl_cfg80211_disconnect,
+ .set_pmksa = wl_cfg80211_set_pmksa,
+ .del_pmksa = wl_cfg80211_del_pmksa,
+ .flush_pmksa = wl_cfg80211_flush_pmksa,
+ .remain_on_channel = wl_cfgscan_remain_on_channel,
+ .cancel_remain_on_channel = wl_cfgscan_cancel_remain_on_channel,
+ .mgmt_tx = wl_cfg80211_mgmt_tx,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+ .mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+#else
+ .update_mgmt_frame_registrations = wl_cfg80211_mgmt_frame_register,
+#endif
+ .change_bss = wl_cfg80211_change_bss,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ .set_channel = wl_cfg80211_set_channel,
+#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && \
+ !defined(WL_COMPAT_WIRELESS)
+ .set_beacon = wl_cfg80211_add_set_beacon,
+ .add_beacon = wl_cfg80211_add_set_beacon,
+ .del_beacon = wl_cfg80211_del_beacon,
+#else
+ .change_beacon = wl_cfg80211_change_beacon,
+ .start_ap = wl_cfg80211_start_ap,
+ .stop_ap = wl_cfg80211_stop_ap,
+#endif /* LINUX_VERSION < KERNEL_VERSION(3,4,0) && !WL_COMPAT_WIRELESS */
+#ifdef WL_SCHED_SCAN
+ .sched_scan_start = wl_cfg80211_sched_scan_start,
+ .sched_scan_stop = wl_cfg80211_sched_scan_stop,
+#endif /* WL_SCHED_SCAN */
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .del_station = wl_cfg80211_del_station,
+ .change_station = wl_cfg80211_change_station,
+ .mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ .tdls_mgmt = wl_cfg80211_tdls_mgmt,
+ .tdls_oper = wl_cfg80211_tdls_oper,
+#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */
+#ifdef WL_SUPPORT_ACS
+ .dump_survey = wl_cfg80211_dump_survey,
+#endif /* WL_SUPPORT_ACS */
+#ifdef WL_CFG80211_ACL
+ .set_mac_acl = wl_cfg80211_set_mac_acl,
+#endif /* WL_CFG80211_ACL */
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+ .set_rekey_data = wl_cfg80211_set_rekey_data,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+#ifdef WL_CLIENT_SAE
+ .external_auth = wl_cfg80211_external_auth,
+#endif /* WL_CLIENT_SAE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ /* This should be enabled from kernel version which supports this */
+ .update_connect_params = wl_cfg80211_update_connect_params,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+ .set_pmk = wl_cfg80211_set_pmk,
+ .del_pmk = wl_cfg80211_del_pmk,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ .channel_switch = wl_cfg80211_channel_switch,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#ifdef WLFBT
+ .update_ft_ies = wl_cfg80211_update_ft_ies,
+#endif /* WLFBT */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) */
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+ s32 err = 0;
+
+ switch (mode) {
+ case WL_MODE_BSS:
+ return NL80211_IFTYPE_STATION;
+ case WL_MODE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ case WL_MODE_AP:
+ return NL80211_IFTYPE_AP;
+#ifdef WLMESH_CFG80211
+ case WL_MODE_MESH:
+ return NL80211_IFTYPE_MESH_POINT;
+#endif /* WLMESH_CFG80211 */
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return err;
+}
+
+static bool
+wl_is_ccode_change_required(struct net_device *net,
+ char *country_code, int revinfo)
+{
+ s32 ret = BCME_OK;
+ wl_country_t cspec = {{0}, 0, {0}};
+ wl_country_t cur_cspec = {{0}, 0, {0}};
+
+ ret = wldev_iovar_getbuf(net, "country", NULL, 0, &cur_cspec,
+ sizeof(cur_cspec), NULL);
+ if (ret < 0) {
+ WL_ERR(("get country code failed = %d\n", ret));
+ return true;
+ }
+ /* If translation table is available, update cspec */
+ cspec.rev = revinfo;
+ strlcpy(cspec.country_abbrev, country_code, WL_CCODE_LEN + 1);
+ strlcpy(cspec.ccode, country_code, WL_CCODE_LEN + 1);
+ dhd_get_customized_country_code(net, cspec.country_abbrev, &cspec);
+ if ((cur_cspec.rev == cspec.rev) &&
+ (strncmp(cur_cspec.ccode, cspec.ccode, WL_CCODE_LEN) == 0) &&
+ (strncmp(cur_cspec.country_abbrev, cspec.country_abbrev, WL_CCODE_LEN) == 0)) {
+ WL_INFORM_MEM(("country code = %s/%d is already configured\n",
+ country_code, revinfo));
+ return false;
+ }
+ return true;
+}
+
+void
+wl_cfg80211_cleanup_connection(struct net_device *net, bool user_enforced)
+{
+ s32 ret = BCME_OK;
+ struct wireless_dev *wdev = ndev_to_wdev(net);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_info *iter, *next;
+ scb_val_t scbval;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (wl_get_drv_status(cfg, AP_CREATED, iter->ndev)) {
+ memset(scbval.ea.octet, 0xff, ETHER_ADDR_LEN);
+ scbval.val = DOT11_RC_DEAUTH_LEAVING;
+ if ((ret = wldev_ioctl_set(iter->ndev,
+ WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scb_val_t))) != 0) {
+ WL_ERR(("Failed to disconnect STAs %d\n", ret));
+ }
+
+ } else if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if ((iter->ndev == net) && !user_enforced)
+ continue;
+ wl_cfg80211_disassoc(iter->ndev, WLAN_REASON_DEAUTH_LEAVING);
+ } else {
+ WL_INFORM(("Disconnected state. Interface clean "
+ "up skipped for ifname:%s\n", iter->ndev->name));
+ }
+ }
+ }
+
+ wl_cfgscan_cancel_scan(cfg);
+
+ /* Clean up NAN connection */
+#ifdef WL_NAN
+ if (wl_cfgnan_is_enabled(cfg)) {
+ mutex_lock(&cfg->if_sync);
+ ret = wl_cfgnan_check_nan_disable_pending(cfg, true, true);
+ mutex_unlock(&cfg->if_sync);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ }
+ }
+#endif /* WL_NAN */
+}
+
+s32
+wl_cfg80211_set_country_code(struct net_device *net, char *country_code,
+ bool notify, bool user_enforced, int revinfo)
+{
+ s32 ret = BCME_OK;
+ struct wireless_dev *wdev = ndev_to_wdev(net);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ BCM_REFERENCE(cfg);
+
+ if (revinfo < 0) {
+ WL_ERR(("country revinfo wrong : %d\n", revinfo));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ if ((wl_is_ccode_change_required(net, country_code, revinfo) == false) &&
+ !dhd_force_country_change(net)) {
+ goto exit;
+ }
+
+ wl_cfg80211_cleanup_connection(net, user_enforced);
+
+ ret = wldev_set_country(net, country_code,
+ notify, revinfo);
+ if (ret < 0) {
+ WL_ERR(("set country Failed :%d\n", ret));
+ goto exit;
+ }
+
+ /* send up the hint so that upper layer apps
+ * can refresh the channel
+ * list
+ */
+ if (!IS_REGDOM_SELF_MANAGED(wiphy)) {
+ regulatory_hint(wiphy, country_code);
+ }
+
+exit:
+ return ret;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#define WL_CFG80211_REG_NOTIFIER() static int wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+#else
+#define WL_CFG80211_REG_NOTIFIER() static void wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+#endif /* kernel version < 3.9.0 */
+#endif
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+WL_CFG80211_REG_NOTIFIER()
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+ int ret = 0;
+ int revinfo = -1;
+
+ if (!request || !cfg) {
+ WL_ERR(("Invalid arg\n"));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 11))
+ return -EINVAL;
+#else
+ return;
+#endif /* kernel version < 3.10.11 */
+ }
+
+ WL_DBG(("ccode: %c%c Initiator: %d\n",
+ request->alpha2[0], request->alpha2[1], request->initiator));
+
+ /* We support only REGDOM_SET_BY_USER as of now */
+ if ((request->initiator != NL80211_REGDOM_SET_BY_USER) &&
+ (request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+ WL_ERR(("reg_notifier for intiator:%d not supported : set default\n",
+ request->initiator));
+ /* in case of no supported country by regdb
+ lets driver setup platform default Locale
+ */
+ }
+
+ WL_ERR(("Set country code %c%c from %s\n",
+ request->alpha2[0], request->alpha2[1],
+ ((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
+ ret = wl_cfg80211_set_country_code(bcmcfg_to_prmry_ndev(cfg), request->alpha2, false,
+ (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false),
+ revinfo);
+ if (ret < 0) {
+ WL_ERR(("Set country failed ret:%d\n", ret));
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 11))
+ return ret;
+#else
+ return;
+#endif /* kernel version < 3.10.11 */
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static const struct wiphy_wowlan_support brcm_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY,
+ .n_patterns = WL_WOWLAN_MAX_PATTERNS,
+ .pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN,
+ .pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ .max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
+#endif /* CONFIG_PM */
+
+int wl_features_set(u8 *array, uint8 len, u32 ftidx)
+{
+ u8* ft_byte;
+
+ if ((ftidx / 8u) >= len)
+ return BCME_BADARG;
+
+ ft_byte = &array[ftidx / 8u];
+ *ft_byte |= BIT(ftidx % 8u);
+ return BCME_OK;
+}
+
+static
+void wl_config_custom_regulatory(struct wiphy *wiphy)
+{
+
+#if defined(WL_SELF_MANAGED_REGDOM) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ /* Use self managed regulatory domain */
+ wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED |
+ REGULATORY_IGNORE_STALE_KICKOFF;
+ wiphy->regd = &brcm_regdom;
+ WL_DBG(("Self managed regdom\n"));
+ return;
+#else /* WL_SELF_MANAGED_REGDOM && KERNEL >= 4.0 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ wiphy->regulatory_flags |=
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ REGULATORY_IGNORE_STALE_KICKOFF |
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) */
+ REGULATORY_CUSTOM_REG;
+#else /* KERNEL VER >= 3.14 */
+ wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+ wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+ WL_DBG(("apply custom regulatory\n"));
+#endif /* WL_SELF_MANAGED_REGDOM && KERNEL >= 4.0 */
+}
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, dhd_pub_t *context)
+{
+ s32 err = 0;
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ struct cfg80211_wowlan *brcm_wowlan_config = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM */
+
+//#if defined(BCMDONGLEHOST) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+ dhd_pub_t *dhd = (dhd_pub_t *)context;
+ BCM_REFERENCE(dhd);
+
+ if (!dhd) {
+ WL_ERR(("DHD is NULL!!"));
+ err = -ENODEV;
+ return err;
+ }
+//#endif /* defined(BCMDONGLEHOST) && KERNEL >= 3, 4, 0 || defined(WL_COMPAT_WIRELESS)) */
+
+ wdev->wiphy =
+ wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
+ if (unlikely(!wdev->wiphy)) {
+ WL_ERR(("Couldn not allocate wiphy device\n"));
+ err = -ENOMEM;
+ return err;
+ }
+ set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+ wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+ /* Report how many SSIDs Driver can support per Scan request */
+ wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+ wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+#ifdef WL_SCHED_SCAN
+ wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
+ wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
+ wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#else
+ wdev->wiphy->max_sched_scan_reqs = 1;
+#endif /* LINUX_VER < 4.12 */
+#endif /* WL_SCHED_SCAN */
+#ifdef WLMESH_CFG80211
+ wdev->wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
+#endif /* WLMESH_CFG80211 */
+ wdev->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_ADHOC)
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+ /*
+ * This monitor mode support creates an issue in registering
+ * Action frame for P2P-GO, this was leading an error in receiving
+ * action frames to GO interface.Keeping the code here because
+ * monitor mode code has kept as it is in other modules,
+ * though we are not supporting this mode.
+ */
+ | BIT(NL80211_IFTYPE_MONITOR)
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_IFACE_COMB_NUM_CHANNELS) || \
+ defined(WL_CFG80211_P2P_DEV_IF)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
+ | BIT(NL80211_IFTYPE_P2P_GO)
+#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ | BIT(NL80211_IFTYPE_P2P_DEVICE)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef WLMESH_CFG80211
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif /* WLMESH_CFG80211 */
+ | BIT(NL80211_IFTYPE_AP);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+ (defined(WL_IFACE_COMB_NUM_CHANNELS) || \
+ defined(WL_CFG80211_P2P_DEV_IF))
+ WL_DBG(("Setting interface combinations for common mode\n"));
+ wdev->wiphy->iface_combinations = common_iface_combinations;
+ wdev->wiphy->n_iface_combinations =
+ ARRAY_SIZE(common_iface_combinations);
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wdev->wiphy->cipher_suites = __wl_cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+ wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+ wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+ wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif /* !WL_POWERSAVE_DISABLED */
+ wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+ WIPHY_FLAG_4ADDR_AP |
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && \
+ !defined(WL_COMPAT_WIRELESS)
+ WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+ WIPHY_FLAG_4ADDR_STATION;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+ /*
+ * If FW ROAM flag is advertised, upper layer doesn't provide the
+ * bssid & freq in the connect command. However, kernel ver >= 3.15,
+ * provides bssid_hint & freq_hint which can be used by the firmware.
+ * fw_ap_select variable determines whether FW selects the AP or the
+ * user space selects the target AP within the given ESS.
+ */
+ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ /* this flag should be added to support wpa_supplicant 1.0+ */
+ wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_OFFCHAN_TX;
+#endif
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+ /* From 3.4 kernel ownards AP_SME flag can be advertised
+ * to remove the patch from supplicant
+ */
+ wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
+#ifdef WL_CFG80211_ACL
+ /* Configure ACL capabilities. */
+ wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
+#endif
+
+#if defined(BCMDONGLEHOST) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || \
+ defined(WL_COMPAT_WIRELESS))
+ /* Supplicant distinguish between the SoftAP mode and other
+ * modes (e.g. P2P, WPS, HS2.0) when it builds the probe
+ * response frame from Supplicant MR1 and Kernel 3.4.0 or
+ * later version. To add Vendor specific IE into the
+ * probe response frame in case of SoftAP mode,
+ * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable.
+ */
+ if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) {
+ wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ wdev->wiphy->probe_resp_offload = 0;
+ }
+#endif /* defined(BCMDONGLEHOST) && KERNEL >= 3, 4, 0 || defined(WL_COMPAT_WIRELESS)) */
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+#endif
+
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+ /*
+ * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the
+ * disconnection of connected network before suspend. So a dummy wowlan
+ * filter is configured for kernels linux-3.8 and above.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ wdev->wiphy->wowlan = &brcm_wowlan_support;
+ /* If this is not provided cfg stack will get disconnect
+ * during suspend.
+ * Note: wiphy->wowlan_config is freed by cfg80211 layer.
+ * so use malloc instead of MALLOC(osh) to avoid false alarm.
+ */
+ brcm_wowlan_config = kmalloc(sizeof(struct cfg80211_wowlan), GFP_KERNEL);
+ if (brcm_wowlan_config) {
+ brcm_wowlan_config->disconnect = true;
+ brcm_wowlan_config->gtk_rekey_failure = true;
+ brcm_wowlan_config->eap_identity_req = true;
+ brcm_wowlan_config->four_way_handshake = true;
+ brcm_wowlan_config->patterns = NULL;
+ brcm_wowlan_config->n_patterns = 0;
+ brcm_wowlan_config->tcp = NULL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ brcm_wowlan_config->nd_config = NULL;
+#endif
+ } else {
+ WL_ERR(("Can not allocate memory for brcm_wowlan_config,"
+ " So wiphy->wowlan_config is set to NULL\n"));
+ }
+ wdev->wiphy->wowlan_config = brcm_wowlan_config;
+#else
+ wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ wdev->wiphy->wowlan.n_patterns = WL_WOWLAN_MAX_PATTERNS;
+ wdev->wiphy->wowlan.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN;
+ wdev->wiphy->wowlan.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ wdev->wiphy->wowlan.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+
+ WL_DBG(("Registering custom regulatory)\n"));
+ wl_config_custom_regulatory(wdev->wiphy);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ WL_INFORM_MEM(("Registering Vendor80211\n"));
+ err = wl_cfgvendor_attach(wdev->wiphy, dhd);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+#ifdef WL_FILS
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD);
+#endif /* WL_FILS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+ wdev->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wdev->wiphy->max_num_csa_counters = WL_MAX_NUM_CSA_COUNTERS;
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 12, 0) */
+
+ /* Now we can register wiphy with cfg80211 module */
+ err = wiphy_register(wdev->wiphy);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+ wiphy_free(wdev->wiphy);
+ }
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+ (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
+ /* Workaround for a cfg80211 bug */
+ wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN)
+ wdev->wiphy->features |= (NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR);
+ wdev->wiphy->max_sched_scan_plans = 1; /* multiple plans not supported */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+ if (wl_extsae_chip(dhd))
+ wdev->wiphy->features |= NL80211_FEATURE_SAE;
+#endif /* WL_SAE || WL_CLIENT_SAE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && defined(BCMSUP_4WAY_HANDSHAKE)
+ if (FW_SUPPORTED(dhd, idsup)) {
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) && defined(BCMSUP_4WAY_HANDSHAKE) */
+#ifdef WL_SCAN_TYPE
+ /* These scan types will be mapped to default scan on non-supported chipset */
+ /* Advertise scan type capability. */
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_SPAN_SCAN);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_POWER_SCAN);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN);
+ wdev->wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN;
+#endif /* WL_SCAN_TYPE */
+
+ return err;
+}
+
+static void wl_free_wdev(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = cfg->wdev;
+ struct wiphy *wiphy = NULL;
+ if (!wdev) {
+ WL_ERR(("wdev is invalid\n"));
+ return;
+ }
+ if (wdev->wiphy) {
+ wiphy = wdev->wiphy;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ wl_cfgvendor_detach(wdev->wiphy);
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */
+ WL_DBG(("clear wowlan\n"));
+ wdev->wiphy->wowlan = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_SELF_MANAGED_REGDOM) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ /* Making regd ptr NULL, to avoid reference/freeing by regulatory unregister */
+ wiphy->regd = NULL;
+#endif /* WL_SELF_MANAGED_REGDOM && KERNEL >= 4.0 */
+ wiphy_unregister(wdev->wiphy);
+ wdev->wiphy->dev.parent = NULL;
+ wdev->wiphy = NULL;
+ }
+
+ wl_delete_all_netinfo(cfg);
+ if (wiphy) {
+ if (wdev->netdev)
+ wdev->netdev->ieee80211_ptr = NULL;
+ wdev->netdev = NULL;
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+ cfg->wdev = NULL;
+ wiphy_free(wiphy);
+ }
+
+ /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
+ * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+ */
+}
+
+static s32
+wl_post_linkup_ops(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as)
+{
+ s32 ret = BCME_OK;
+ int vndr_oui_num = 0;
+ struct net_device *ndev = as->ndev;
+ char vndr_oui[MAX_VNDR_OUI_STR_LEN] = {0, };
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+#ifdef WL_WPS_SYNC
+ wl_wps_session_update(ndev, WPS_STATE_LINKUP, as->addr);
+#endif /** WL_WPS_SYNC */
+
+ if (IS_PRIMARY_NDEV(cfg, ndev)) {
+ vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
+ ndev, vndr_oui, ARRAY_SIZE(vndr_oui));
+ if (vndr_oui_num > 0) {
+ WL_INFORM_MEM(("[%s] vendor oui: %s\n",
+ ndev->name, vndr_oui));
+ }
+ }
+
+#ifdef ESCAN_CHANNEL_CACHE
+ /* Update RCC list. FW clears RCC from join iovar context */
+ update_roam_cache(cfg, ioctl_version);
+#endif /* ESCAN_CHANNEL_CACHE */
+
+#ifdef BCMDONGLEHOST
+ if (as->event_type == WLC_E_LINK) {
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_CONNECT);
+ }
+#endif /* BCMDONGLEHOST */
+#ifdef WBTEXT
+ if ((ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ dhdp->wbtext_support && (as->event_type == WLC_E_SET_SSID)) {
+ /* set wnm_keepalives_max_idle after association */
+ wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
+ }
+#endif /* WBTEXT */
+
+#ifdef DHD_EVENT_LOG_FILTER
+ dhd_event_log_filter_notify_connect_done(dhdp,
+ as->addr, false);
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#ifdef CUSTOM_SET_OCLOFF
+ if (dhdp->ocl_off) {
+ int err = 0;
+ int ocl_enable = 0;
+ err = wldev_iovar_setint(ndev, "ocl_enable", ocl_enable);
+ if (err != 0) {
+ WL_ERR(("[WIFI_SEC] Set ocl_enable %d"
+ " failed %d\n",
+ ocl_enable, err));
+ } else {
+ WL_ERR(("[WIFI_SEC] Set ocl_enable %d"
+ " succeeded %d\n",
+ ocl_enable, err));
+ }
+ }
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef CUSTOM_SET_ANTNPM
+ if (dhdp->mimo_ant_set) {
+ int err = 0;
+
+ WL_ERR(("[WIFI_SEC] mimo_ant_set = %d\n", dhdp->mimo_ant_set));
+ err = wldev_iovar_setint(ndev, "txchain", dhdp->mimo_ant_set);
+ if (err != 0) {
+ WL_ERR(("[WIFI_SEC] Fail set txchain. err:%d\n", err));
+ }
+ err = wldev_iovar_setint(ndev, "rxchain", dhdp->mimo_ant_set);
+ if (err != 0) {
+ WL_ERR(("[WIFI_SEC] Fail set rxchain. err:%d\n", err));
+ }
+ }
+#endif /* CUSTOM_SET_ANTNPM */
+
+#if defined (ROAM_ENABLE) && defined (ROAM_AP_ENV_DETECTION)
+ if (dhdp->roam_env_detection) {
+ wldev_iovar_setint(ndev, "roam_env_detection",
+ AP_ENV_INDETERMINATE);
+ }
+#endif /* ROAM_AP_ENV_DETECTION */
+
+ if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ init_completion(&cfg->iface_disable);
+#else
+ /* reinitialize completion to clear previous count */
+ INIT_COMPLETION(cfg->iface_disable);
+#endif
+ }
+
+#ifdef CUSTOM_SET_CPUCORE
+ if (wl_get_chan_isvht80(ndev, dhdp)) {
+ if (ndev == bcmcfg_to_prmry_ndev(cfg))
+ dhdp->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
+ else if (is_p2p_group_iface(ndev->ieee80211_ptr))
+ dhdp->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
+ dhd_set_cpucore(dhdp, TRUE);
+ }
+#endif /* CUSTOM_SET_CPUCORE */
+
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if (wl_set_retry(ndev, CUSTOM_LONG_RETRY_LIMIT, 1) < 0) {
+ WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+
+#if defined(CONFIG_TIZEN)
+ net_stat_tizen_update_wifi(ndev, WIFISTAT_CONNECTION);
+#endif /* CONFIG_TIZEN */
+
+#ifdef WL_BAM
+ {
+ struct ether_addr eth = {{0}};
+ (void)memcpy_s(&eth.octet, ETH_ALEN, as->addr, ETH_ALEN);
+ if (wl_adps_bad_ap_check(cfg, &eth)) {
+ if (wl_adps_enabled(cfg, ndev)) {
+ wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
+ }
+ }
+ }
+#endif /* WL_BAM */
+
+#ifdef CONFIG_TCPACK_FASTTX
+ if (wl_get_chan_isvht80(ndev, dhdp))
+ wldev_iovar_setint(ndev, "tcpack_fast_tx", 0);
+ else
+ wldev_iovar_setint(ndev, "tcpack_fast_tx", 1);
+#endif /* CONFIG_TCPACK_FASTTX */
+ return ret;
+}
+
+#ifdef WL_SAE
+static s32
+wl_cfg80211_event_sae_key(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ wl_sae_key_info_t *sae_key)
+{
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ int err = BCME_OK;
+ struct cfg80211_pmksa pmksa;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), BRCM_SAE_VENDOR_EVENT_BUF_LEN,
+ BRCM_VENDOR_EVENT_SAE_KEY, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, BRCM_SAE_VENDOR_EVENT_BUF_LEN,
+ BRCM_VENDOR_EVENT_SAE_KEY, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ err = BCME_NOMEM;
+ goto done;
+ }
+
+ WL_INFORM_MEM(("Received Sae Key event for "MACDBG" key length %x %x",
+ MAC2STRDBG(sae_key->peer_mac), sae_key->pmk_len, sae_key->pmkid_len));
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PEER_MAC, ETHER_ADDR_LEN, sae_key->peer_mac);
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PMK, sae_key->pmk_len, sae_key->pmk);
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PMKID, sae_key->pmkid_len, sae_key->pmkid);
+ cfg80211_vendor_event(skb, kflags);
+ /* wpa_supplicant will manage the PMK and PMKID from here on..
+ * Delete the PMK cache in firmware, if wlc_ver equals to MIN_PMKID_LIST_V3_FW_MAJOR
+ * else ignore.
+ * MIN_PMKID_LIST_V3_FW_MAJOR has two IOVAR's(pmklist_info and PMKDB).
+ */
+ if (cfg->wlc_ver.wlc_ver_major == MIN_PMKID_LIST_V3_FW_MAJOR) {
+ WL_INFORM_MEM(("Deleting the SAE PMK cache Info from firmware \n"));
+ memset_s(&pmksa, sizeof(pmksa), 0, sizeof(pmksa));
+ pmksa.bssid = sae_key->peer_mac;
+ pmksa.pmkid = sae_key->pmkid;
+ err = wl_cfg80211_update_pmksa(wiphy, ndev, &pmksa, FALSE);
+ if (err != BCME_OK) {
+ WL_ERR(("Failed to delete the SAE PMK cache Info from firmware %d\n", err));
+ }
+ }
+done:
+ return err;
+}
+
+static s32
+wl_bss_handle_sae_auth_v1(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *event, void *data)
+{
+ int err = BCME_OK;
+ wl_auth_event_t *auth_data;
+ wl_sae_key_info_t sae_key;
+ uint16 tlv_buf_len;
+ auth_data = (wl_auth_event_t *)data;
+
+ tlv_buf_len = auth_data->length - WL_AUTH_EVENT_FIXED_LEN_V1;
+
+ /* check if PMK info present */
+ sae_key.pmk = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMK_TLV_ID, &(sae_key.pmk_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmk || !sae_key.pmk_len) {
+ WL_ERR(("Mandatory PMK info not present"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ /* check if PMKID info present */
+ sae_key.pmkid = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMKID_TLV_ID, &(sae_key.pmkid_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmkid || !sae_key.pmkid_len) {
+ WL_ERR(("Mandatory PMKID info not present\n"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ memcpy_s(sae_key.peer_mac, ETHER_ADDR_LEN, event->addr.octet, ETHER_ADDR_LEN);
+ err = wl_cfg80211_event_sae_key(cfg, ndev, &sae_key);
+ if (err) {
+ WL_ERR(("Failed to event sae key info\n"));
+ }
+done:
+ return err;
+}
+
+static s32
+wl_bss_handle_sae_auth_v2(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *event, void *data)
+{
+ int err = BCME_OK;
+ wl_auth_event_t *auth_data;
+ wl_sae_key_info_t sae_key;
+ uint16 tlv_buf_len;
+ uint8 ssid[DOT11_MAX_SSID_LEN];
+ const uint8 *tmp_buf;
+ uint16 ssid_len;
+ uint16 type_len;
+ uint32 type;
+ pmkid_v3_t *t_pmkid = NULL;
+
+ auth_data = (wl_auth_event_t *)data;
+
+ tlv_buf_len = auth_data->length - WL_AUTH_EVENT_FIXED_LEN_V2;
+
+ /* check if PMK info present */
+ sae_key.pmk = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMK_TLV_ID, &(sae_key.pmk_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmk || !sae_key.pmk_len) {
+ WL_ERR(("Mandatory PMK info not present"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ /* check if PMKID info present */
+ sae_key.pmkid = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMKID_TLV_ID, &(sae_key.pmkid_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmkid || !sae_key.pmkid_len) {
+ WL_ERR(("Mandatory PMKID info not present\n"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ (void)memcpy_s(sae_key.peer_mac, ETHER_ADDR_LEN, event->addr.octet, ETHER_ADDR_LEN);
+
+ tmp_buf = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMKID_TYPE_TLV_ID, &type_len, BCM_XTLV_OPTION_ALIGN32);
+
+ memcpy(&type, tmp_buf, MIN(type_len, sizeof(type)));
+ if (type == WL_AUTH_PMKID_TYPE_SSID) {
+ int idx;
+ int idx2;
+ pmkid_list_v3_t *spmk_list = &cfg->spmk_info_list->pmkids;
+
+ tmp_buf = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_SSID_TLV_ID, &ssid_len, BCM_XTLV_OPTION_ALIGN32);
+ if (tmp_buf == NULL) {
+ return BCME_ERROR;
+ }
+ bzero(ssid, sizeof(ssid));
+ (void)memcpy_s(ssid, sizeof(ssid), tmp_buf, MIN(sizeof(ssid), ssid_len));
+ for (idx = 0; idx < spmk_list->count; idx++) {
+ t_pmkid = &spmk_list->pmkid[idx];
+ if (ssid_len == t_pmkid->ssid_len &&
+ !memcmp(ssid, t_pmkid->ssid, MIN(sizeof(ssid), ssid_len))) {
+ break;
+ }
+ }
+ if (idx >= spmk_list->count) {
+ if (spmk_list->count == MAXPMKID) {
+ /* remove oldest PMK info */
+ for (idx2 = 0; idx2 < spmk_list->count - 1; idx2++) {
+ (void)memcpy_s(&spmk_list->pmkid[idx2], sizeof(pmkid_v3_t),
+ &spmk_list->pmkid[idx2 + 1], sizeof(pmkid_v3_t));
+ }
+ t_pmkid = &spmk_list->pmkid[spmk_list->count - 1];
+ } else {
+ t_pmkid = &spmk_list->pmkid[spmk_list->count++];
+ }
+ }
+ if (!t_pmkid) {
+ WL_ERR(("SPMK TPMKID is null\n"));
+ return BCME_NOTFOUND;
+ }
+ bzero(t_pmkid, sizeof(pmkid_v3_t));
+ memcpy(&t_pmkid->bssid, event->addr.octet, 6);
+ t_pmkid->ssid_len = ssid_len;
+ err = memcpy_s(t_pmkid->ssid, sizeof(t_pmkid->ssid), ssid, ssid_len);
+ if (err != BCME_OK) {
+ goto done;
+ }
+ /* COPY but not used */
+ t_pmkid->pmkid_len = sae_key.pmkid_len;
+ memcpy(t_pmkid->pmkid, sae_key.pmkid, sae_key.pmkid_len);
+ t_pmkid->pmk_len = sae_key.pmk_len;
+ memcpy(t_pmkid->pmk, sae_key.pmk, sae_key.pmk_len);
+ }
+
+ err = wl_cfg80211_event_sae_key(cfg, ndev, &sae_key);
+ if (err) {
+ WL_ERR(("Failed to event sae key info\n"));
+ }
+done:
+ return err;
+}
+
+s32
+wl_bss_handle_sae_auth(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *event, void *data)
+{
+ int err = BCME_OK;
+ uint status = ntoh32(event->status);
+ wl_auth_event_t *auth_data;
+
+ if (status == WLC_E_STATUS_SUCCESS) {
+ auth_data = (wl_auth_event_t *)data;
+ if (auth_data->version == WL_AUTH_EVENT_DATA_V1) {
+ err = wl_bss_handle_sae_auth_v1(cfg, ndev, event, data);
+ } else if (auth_data->version == WL_AUTH_EVENT_DATA_V2) {
+ err = wl_bss_handle_sae_auth_v2(cfg, ndev, event, data);
+ } else {
+ WL_ERR(("unknown auth event data version %x\n",
+ auth_data->version));
+ err = BCME_VERSION;
+ }
+ }
+ WL_INFORM_MEM(("SAE AUTH status:%d ret: %d\n", status, err));
+ return err;
+}
+#endif /* WL_SAE */
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+enum {
+ BIGDATA_ASSOC_REJECT_NO_ACK = 1,
+ BIGDATA_ASSOC_REJECT_FAIL = 2,
+ BIGDATA_ASSOC_REJECT_UNSOLICITED = 3,
+ BIGDATA_ASSOC_REJECT_TIMEOUT = 4,
+ BIGDATA_ASSOC_REJECT_ABORT = 5,
+ BIGDATA_ASSOC_REJECT_NO_NETWWORKS = 6,
+ BIGDATA_ASSOC_REJECT_MAX = 50
+};
+
+int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+ u32 status = ntoh32(e->status);
+
+ cfg->assoc_reject_status = 0;
+
+ if (status != WLC_E_STATUS_SUCCESS) {
+ WL_INFORM(("auth assoc status event=%d e->status %d e->reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ (int)ntoh32(cfg->event_auth_assoc.status),
+ (int)ntoh32(cfg->event_auth_assoc.reason)));
+
+ /* Populate status based on cached auth/assoc status value */
+ switch ((int)ntoh32(cfg->event_auth_assoc.status)) {
+ case WLC_E_STATUS_NO_ACK:
+ cfg->assoc_reject_status = BIGDATA_ASSOC_REJECT_NO_ACK;
+ break;
+ case WLC_E_STATUS_FAIL:
+ cfg->assoc_reject_status = BIGDATA_ASSOC_REJECT_FAIL;
+ break;
+ case WLC_E_STATUS_UNSOLICITED:
+ cfg->assoc_reject_status = BIGDATA_ASSOC_REJECT_UNSOLICITED;
+ break;
+ case WLC_E_STATUS_TIMEOUT:
+ cfg->assoc_reject_status = BIGDATA_ASSOC_REJECT_TIMEOUT;
+ break;
+ case WLC_E_STATUS_ABORT:
+ cfg->assoc_reject_status = BIGDATA_ASSOC_REJECT_ABORT;
+ break;
+ case WLC_E_STATUS_SUCCESS:
+ if (status == WLC_E_STATUS_NO_NETWORKS) {
+ cfg->assoc_reject_status =
+ BIGDATA_ASSOC_REJECT_NO_NETWWORKS;
+ break;
+ }
+ default:
+ cfg->assoc_reject_status = BIGDATA_ASSOC_REJECT_MAX;
+ break;
+ }
+ if (cfg->assoc_reject_status) {
+ if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) {
+ cfg->assoc_reject_status += BIGDATA_ASSOC_REJECT_MAX;
+ }
+ }
+ }
+
+ WL_INFORM_MEM(("assoc_reject_status %d \n", cfg->assoc_reject_status));
+
+ return 0;
+}
+
+s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int bytes_written = 0;
+
+ if (cfg == NULL) {
+ return -1;
+ }
+ bytes_written = snprintf(cmd, total_len, "assoc_reject.status %d",
+ cfg->assoc_reject_status);
+ WL_ERR(("cmd: %s \n", cmd));
+ return bytes_written;
+}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+static s32
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u16 flags = ntoh16(e->flags);
+ u32 status = ntoh32(e->status);
+ bool active;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ struct ieee80211_channel *channel = NULL;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ chanspec_t chanspec;
+ u32 freq;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+
+ if (event == WLC_E_JOIN) {
+ WL_INFORM_MEM(("[%s] joined in IBSS network\n", ndev->name));
+ }
+ if (event == WLC_E_START) {
+ WL_INFORM_MEM(("[%s] started IBSS network\n", ndev->name));
+ }
+ if (event == WLC_E_JOIN || event == WLC_E_START ||
+ (event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get chanspec %d\n", err));
+ return err;
+ }
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ freq = wl_channel_to_frequency(wf_chspec_ctlchan(chanspec), CHSPEC_BAND(chanspec));
+ channel = ieee80211_get_channel(wiphy, freq);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ /* ROAM or Redundant */
+ u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+ WL_DBG(("IBSS connected event from same BSSID("
+ MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
+ return err;
+ }
+ WL_MSG(ndev->name, "IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr));
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+ wl_update_bss_info(cfg, ndev, false);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
+#else
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
+#endif
+ }
+ else {
+ /* New connection */
+ WL_MSG(ndev->name, "IBSS connected to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)&e->addr));
+ wl_link_up(cfg);
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+ wl_update_bss_info(cfg, ndev, false);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
+#else
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
+#endif
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+ active = true;
+ wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT);
+ }
+ } else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
+ event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ wl_link_down(cfg);
+ wl_init_prof(cfg, ndev);
+ }
+ else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
+ WL_INFORM_MEM(("no action - join fail (IBSS mode)\n"));
+ }
+ else {
+ WL_DBG(("no action (IBSS mode)\n"));
+}
+ return err;
+}
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define WiFiALL_OUI "\x50\x6F\x9A" /* Wi-FiAll OUI */
+#define WiFiALL_OUI_LEN 3
+#define WiFiALL_OUI_TYPE 16
+
+/* 11kv feature flag for big data */
+#define WL_BIGDATA_11KV_QBSSLOAD 0x00000001
+#define WL_BIGDATA_11KV_PROXYARP 0x00000002
+#define WL_BIGDATA_11KV_TFS 0x00000004
+#define WL_BIGDATA_11KV_SLEEP 0x00000008
+#define WL_BIGDATA_11KV_TIMBC 0x00000010
+#define WL_BIGDATA_11KV_BSSTRANS 0x00000020
+#define WL_BIGDATA_11KV_DMS 0x00000040
+#define WL_BIGDATA_11KV_LINK_MEA 0x00000080
+#define WL_BIGDATA_11KV_NBRREP 0x00000100
+#define WL_BIGDATA_11KV_BCNPASSIVE 0x00000200
+#define WL_BIGDATA_11KV_BCNACTIVE 0x00000400
+#define WL_BIGDATA_11KV_BCNTABLE 0x00000800
+#define WL_BIGDATA_11KV_BSSAAD 0x00001000
+#define WL_BIGDATA_11KV_MAX 0x00002000
+
+#define WL_BIGDATA_SUPPORT_11K 0x00000001
+#define WL_BIGDATA_SUPPORT_11V 0x00000002
+
+typedef struct {
+ uint8 bitmap;
+ uint8 octet_len;
+ uint32 flag;
+} bigdata_11kv_t;
+
+bigdata_11kv_t bigdata_11k_info[] = {
+ {DOT11_RRM_CAP_LINK, DOT11_RRM_CAP_LEN, WL_BIGDATA_11KV_LINK_MEA},
+ {DOT11_RRM_CAP_NEIGHBOR_REPORT, DOT11_RRM_CAP_LEN, WL_BIGDATA_11KV_NBRREP},
+ {DOT11_RRM_CAP_BCN_PASSIVE, DOT11_RRM_CAP_LEN, WL_BIGDATA_11KV_BCNPASSIVE},
+ {DOT11_RRM_CAP_BCN_ACTIVE, DOT11_RRM_CAP_LEN, WL_BIGDATA_11KV_BCNACTIVE},
+ {DOT11_RRM_CAP_BCN_TABLE, DOT11_RRM_CAP_LEN, WL_BIGDATA_11KV_BCNTABLE},
+ {DOT11_RRM_CAP_BSSAAD, DOT11_RRM_CAP_LEN, WL_BIGDATA_11KV_BSSAAD},
+};
+
+bigdata_11kv_t bigdata_11v_info[] = {
+ {DOT11_EXT_CAP_PROXY_ARP, DOT11_EXTCAP_LEN_PROXY_ARP, WL_BIGDATA_11KV_PROXYARP},
+ {DOT11_EXT_CAP_TFS, DOT11_EXTCAP_LEN_TFS, WL_BIGDATA_11KV_TFS},
+ {DOT11_EXT_CAP_WNM_SLEEP, DOT11_EXTCAP_LEN_WNM_SLEEP, WL_BIGDATA_11KV_SLEEP},
+ {DOT11_EXT_CAP_TIMBC, DOT11_EXTCAP_LEN_TIMBC, WL_BIGDATA_11KV_TIMBC},
+ {DOT11_EXT_CAP_BSSTRANS_MGMT, DOT11_EXTCAP_LEN_BSSTRANS, WL_BIGDATA_11KV_BSSTRANS},
+ {DOT11_EXT_CAP_DMS, DOT11_EXTCAP_LEN_DMS, WL_BIGDATA_11KV_DMS}
+};
+
+static void
+wl_get_11kv_info(u8 *ie, u32 ie_len, uint8 *support_11kv, uint32 *flag_11kv)
+{
+ bcm_tlv_t *ie_11kv = NULL;
+ uint32 flag_11k = 0, flag_11v = 0;
+ int i;
+
+ /* parsing QBSS load ie */
+ if ((bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_QBSS_LOAD_ID)) != NULL) {
+ flag_11k |= WL_BIGDATA_11KV_QBSSLOAD;
+ }
+
+ /* parsing RM IE for 11k */
+ if ((ie_11kv = bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_RRM_CAP_ID)) != NULL) {
+ for (i = 0; i < ARRAYSIZE(bigdata_11k_info); i++) {
+ if ((ie_11kv->len >= bigdata_11k_info[i].octet_len) &&
+ isset(ie_11kv->data, bigdata_11k_info[i].bitmap)) {
+ flag_11k |= bigdata_11k_info[i].flag;
+ }
+ }
+ }
+
+ /* parsing extended cap. IE for 11v */
+ if ((ie_11kv = bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_EXT_CAP_ID)) != NULL) {
+ for (i = 0; i < ARRAYSIZE(bigdata_11v_info); i++) {
+ if ((ie_11kv->len >= bigdata_11v_info[i].octet_len) &&
+ isset(ie_11kv->data, bigdata_11v_info[i].bitmap)) {
+ flag_11v |= bigdata_11v_info[i].flag;
+ }
+ }
+ }
+
+ if (flag_11k > 0) {
+ *support_11kv |= WL_BIGDATA_SUPPORT_11K;
+ }
+
+ if (flag_11v > 0) {
+ *support_11kv |= WL_BIGDATA_SUPPORT_11V;
+ }
+
+ *flag_11kv = flag_11k | flag_11v;
+}
+
+int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *mac)
+{
+ s32 err = 0;
+ wl_bss_info_v109_1_t *bi;
+ uint8 eabuf[ETHER_ADDR_LEN];
+ u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0;
+ char rate_str[4];
+ u8 *ie = NULL;
+ u32 ie_len;
+ struct wiphy *wiphy;
+ struct cfg80211_bss *bss;
+ bcm_tlv_t *interworking_ie = NULL;
+ bcm_tlv_t *tlv_ie = NULL;
+ bcm_tlv_t *vht_ie = NULL;
+ vndr_ie_t *vndrie;
+ int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1;
+ u32 i, remained_len, count = 0;
+ char roam_count_str[4], akm_str[4];
+ s32 val = 0;
+ uint8 support_11kv = 0;
+ uint32 flag_11kv = 0; /* bit flags of 11kv big data */
+ int cfg_bss_info_len = 0;
+
+ /* get BSS information */
+
+ strlcpy(cfg->bss_info, "x x x x x x x x x x x x x x x x x", sizeof(cfg->bss_info));
+
+ *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+
+ err = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ cfg->roam_count = 0;
+ return -1;
+ }
+
+ if (!mac) {
+ WL_ERR(("mac is null \n"));
+ cfg->roam_count = 0;
+ return -1;
+ }
+
+ memcpy(eabuf, mac, ETHER_ADDR_LEN);
+
+ bi = (wl_bss_info_v109_1_t *)(cfg->extra_buf + 4);
+ channel = wf_chspec_ctlchan(bi->chanspec);
+ freq = wl_channel_to_frequency(channel, CHSPEC_BAND(bi->chanspec));
+ rate = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ snprintf(rate_str, sizeof(rate_str), "x"); /* Unknown */
+
+ } else {
+ rate = dtoh32(rate);
+ snprintf(rate_str, sizeof(rate_str), "%d", (rate/2));
+ }
+
+ /* supported maximum rate */
+ supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2;
+
+ if (supported_rate < 12) {
+ mode_80211 = BIGDATA_DOT11_11B_MODE; /* 11b maximum rate is 11Mbps. 11b mode */
+ } else {
+ /* It's not HT Capable case. */
+ if (channel > 14) {
+ mode_80211 = BIGDATA_DOT11_11A_MODE; /* 11a mode */
+ } else {
+ mode_80211 = BIGDATA_DOT11_11G_MODE; /* 11g mode */
+ }
+ }
+
+ if (bi->n_cap) {
+ /* check Rx MCS Map for HT */
+ nss = 0;
+ mode_80211 = BIGDATA_DOT11_11N_MODE;
+ for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+ int8 bitmap = DOT11_HT_MCS_RATE_MASK;
+ if (i == MAX_STREAMS_SUPPORTED-1) {
+ bitmap = DOT11_RATE_MASK;
+ }
+ if (bi->basic_mcs[i] & bitmap) {
+ nss++;
+ }
+ }
+ }
+
+ if (bi->vht_cap) {
+ nss = 0;
+ mode_80211 = BIGDATA_DOT11_11AC_MODE;
+ for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+ if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+ nss++;
+ }
+ }
+ }
+
+#if defined(WL11AX)
+ if (bi->he_cap) {
+ nss = 0;
+ mode_80211 = BIGDATA_DOT11_11AX_MODE;
+ for (i = 1; i <= HE_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = HE_MCS_NSS_GET_MCS(i, dtoh32(bi->he_rxmcsmap));
+ if (mcs_map != HE_MCS_CODE_NONE) {
+ nss++;
+ }
+ }
+ }
+#endif /* WL11AX */
+
+ if (nss) {
+ nss = nss - 1;
+ }
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+ bss = CFG80211_GET_BSS(wiphy, NULL, eabuf, bi->SSID, bi->SSID_len);
+ if (!bss) {
+ WL_ERR(("Could not find the AP\n"));
+ } else {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ie = (u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
+#else
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ GCC_DIAGNOSTIC_POP();
+ }
+
+ if (ie) {
+ ie_mu_mimo_cap = 0;
+ ie_11u_rel_num = 0;
+
+ if (bi->vht_cap) {
+ if ((vht_ie = bcm_parse_tlvs(ie, ie_len,
+ DOT11_MNG_VHT_CAP_ID)) != NULL) {
+ if (vht_ie->len >= VHT_CAP_IE_LEN) {
+ ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3;
+ }
+ }
+ }
+
+ if ((interworking_ie = bcm_parse_tlvs(ie, ie_len,
+ DOT11_MNG_INTERWORKING_ID)) != NULL) {
+ if ((tlv_ie = bcm_parse_tlvs(ie, ie_len, DOT11_MNG_VS_ID)) != NULL) {
+ remained_len = ie_len;
+
+ while (tlv_ie) {
+ if (count > MAX_VNDR_IE_NUMBER)
+ break;
+
+ if (tlv_ie->id == DOT11_MNG_VS_ID) {
+ vndrie = (vndr_ie_t *) tlv_ie;
+
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("wl_get_bss_info: invalid vndr ie."
+ "length is too small %d\n",
+ vndrie->len));
+ break;
+ }
+
+ if (!bcmp(vndrie->oui,
+ (u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) &&
+ (vndrie->data[0] == WiFiALL_OUI_TYPE))
+ {
+ WL_ERR(("Found Wi-FiAll OUI oui.\n"));
+ ie_11u_rel_num = vndrie->data[1];
+ ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4;
+ ie_11u_rel_num += 1;
+
+ break;
+ }
+ }
+ count++;
+ tlv_ie = bcm_next_tlv(tlv_ie, &remained_len);
+ }
+ }
+ }
+
+ /* get 11kv information from ie of current bss */
+ wl_get_11kv_info(ie, ie_len, &support_11kv, &flag_11kv);
+ }
+
+ for (i = 0; i < bi->SSID_len; i++) {
+ if (bi->SSID[i] == ' ') {
+ bi->SSID[i] = '_';
+ }
+ }
+
+ /* 0 : None, 1 : OKC, 2 : FT, 3 : CCKM */
+ err = wldev_iovar_getint(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ snprintf(akm_str, sizeof(akm_str), "x"); /* Unknown */
+ } else {
+ WL_ERR(("wpa_auth val %d \n", val));
+ if (val & WPA2_AUTH_FT) {
+ snprintf(akm_str, sizeof(akm_str), "2");
+ } else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) {
+ snprintf(akm_str, sizeof(akm_str), "1");
+ } else {
+ snprintf(akm_str, sizeof(akm_str), "0");
+ }
+ }
+
+ if (cfg->roam_offload) {
+ snprintf(roam_count_str, sizeof(roam_count_str), "x"); /* Unknown */
+ } else {
+ snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count);
+ }
+ cfg->roam_count = 0;
+
+ WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), "*****"));
+ WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d,"
+ "MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n"
+ "akm:%s, roam:%s, 11kv:%d/%d \n",
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), (rate / 2), mode_80211, nss,
+ ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise,
+ akm_str, roam_count_str, support_11kv, flag_11kv));
+
+ if (ie) {
+ snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+ MACOUI" %d %s %d %s %d %d %d %d %d %d %s %s %d %d",
+ MACOUI2STR(eabuf), freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), rate_str, mode_80211, nss, ie_mu_mimo_cap,
+ ie_11u_rel_num, bi->SNR, bi->phy_noise, akm_str, roam_count_str,
+ support_11kv, flag_11kv);
+ } else {
+ /* ie_mu_mimo_cap and ie_11u_rel_num is unknow. */
+ snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+ MACOUI" %d %s %d %s %d %d x x %d %d %s %s x x",
+ MACOUI2STR(eabuf), freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), rate_str, mode_80211, nss, bi->SNR,
+ bi->phy_noise, akm_str, roam_count_str);
+ }
+
+ cfg_bss_info_len = strlen(cfg->bss_info);
+ if (GET_BSS_INFO_LEN > cfg_bss_info_len) {
+ uint16 full_cnt = 0, partial_cnt = 0;
+ bool cnt_valid = FALSE;
+
+#if defined(DHD_PUB_ROAM_EVT)
+ wl_roam_stats_v1_t *roam_elem =
+ (wl_roam_stats_v1_t *)dhd_get_roam_evt((dhd_pub_t *)cfg->pub);
+
+ if (roam_elem && roam_elem->version == WL_ROAM_STATS_VER_1) {
+ wl_roam_stats_v1_t *roam_elem_v1;
+ roam_elem_v1 = (wl_roam_stats_v1_t *)(uintptr_t)roam_elem;
+
+ cnt_valid = TRUE;
+ full_cnt = roam_elem_v1->full_roam_scan_cnt;
+ partial_cnt = roam_elem_v1->partial_roam_scan_cnt;
+ }
+#endif /* DHD_PUB_ROAM_EVT */
+ if (cnt_valid) {
+ WL_ERR(("GET_BSS: full roam scan count:%d partial roam scan count:%d\n",
+ full_cnt, partial_cnt));
+ snprintf(&cfg->bss_info[cfg_bss_info_len],
+ GET_BSS_INFO_LEN - cfg_bss_info_len, " %d %d",
+ full_cnt, partial_cnt);
+ } else {
+ WL_ERR(("GET_BSS: roam scan count invalid\n"));
+ snprintf(&cfg->bss_info[cfg_bss_info_len],
+ GET_BSS_INFO_LEN - cfg_bss_info_len, " x x");
+ }
+ } else {
+ WL_ERR(("Buffer to short to save roam info\n"));
+ }
+
+ CFG80211_PUT_BSS(wiphy, bss);
+
+ return 0;
+}
+
+s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (cfg == NULL) {
+ return -1;
+ }
+
+ if (total_len < GET_BSS_INFO_LEN) {
+ WL_ERR(("wl_cfg80211_get_bss_info: Buffer insuffient %d\n", total_len));
+ return -1;
+ }
+
+ bzero(cmd, total_len);
+ memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN);
+
+ WL_ERR_KERN(("cmd: %s \n", cmd));
+
+ return GET_BSS_INFO_LEN;
+}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason)
+{
+ scb_val_t scbval;
+ s32 err;
+#ifdef BCMDONGLEHOST
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ BCM_REFERENCE(cfg);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
+#endif /* BCMDONGLEHOST */
+
+ memset_s(&scbval, sizeof(scb_val_t), 0x0, sizeof(scb_val_t));
+ scbval.val = htod32(reason);
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ if (err < 0) {
+ WL_ERR(("WLC_DISASSOC error %d\n", err));
+ } else {
+ WL_INFORM_MEM(("wl disassoc. reason:%d\n", reason));
+ }
+}
+void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason)
+{
+ struct net_device *dev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ scb_val_t scb_val;
+ int err;
+ char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+ int num_associated = 0;
+
+ dev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (p2p_is_on(cfg)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return;
+ }
+ }
+
+ assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+ err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
+ assoc_maclist, sizeof(mac_buf));
+ if (err < 0)
+ WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+ else
+ num_associated = assoc_maclist->count;
+
+ memset(scb_val.ea.octet, 0xff, ETHER_ADDR_LEN);
+ scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+ scb_val.val = htod32(reason);
+ err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t));
+ if (err < 0) {
+ WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+ }
+
+ /* WAR Wait for the deauth event to come, supplicant will do the
+ * delete iface immediately and we will have problem in sending
+ * deauth frame if we delete the bss in firmware
+ * But we do not need additional delays for this WAR
+ * during P2P connection.
+ *
+ * Supplicant call this function with BCAST after doing
+ * wl_cfg80211_del_station() all GC stations with each addr.
+ * So, 400 ms delay can be called only once when GO disconnect all GC
+ */
+ if (num_associated > 0)
+ wl_delay(400);
+
+ return;
+}
+/* API to handle the Deauth from the AP.
+* For now we are deleting the PMKID cache in DHD/FW
+* in case of current connection is using SAE authnetication
+*/
+static s32
+wl_cfg80211_handle_deauth_ind(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as)
+{
+ int err = BCME_OK;
+#ifdef WL_SAE
+ struct net_device *ndev = as->ndev;
+ const wl_event_msg_t *e = as->event_msg;
+ uint8 bssid[ETHER_ADDR_LEN];
+ struct cfg80211_pmksa pmksa;
+ s32 val = 0;
+ struct wlc_ssid *curssid;
+ pmkid_list_v3_t *spmk_list = &cfg->spmk_info_list->pmkids;
+ pmkid_v3_t *t_pmkid = NULL;
+ int idx;
+ bool bFound = FALSE;
+#endif /* WL_SAE */
+
+ if (as->reason > WLC_E_DEAUTH_MAX_REASON) {
+ /* Specific AP send deauth by invalid reason.
+ * If reason over 0x8XXX, framework trigger recovery.
+ * Framework check HANG_REASON_MASK(0x8000) with reason.
+ */
+ WL_ERR(("Event %d original reason is %d, "
+ "changed 0xFF\n", as->event_type, as->reason));
+ as->reason = WLC_E_DEAUTH_MAX_REASON;
+ }
+#ifdef WL_SAE
+ err = wldev_iovar_getint(ndev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ goto done;
+ }
+ if (val == WPA3_AUTH_SAE_PSK) {
+ (void)memcpy_s(bssid, ETHER_ADDR_LEN,
+ (const uint8*)&e->addr, ETHER_ADDR_LEN);
+ memset_s(&pmksa, sizeof(pmksa), 0, sizeof(pmksa));
+ pmksa.bssid = bssid;
+ WL_INFORM_MEM(("Deleting the PMKSA for SAE AP "MACDBG,
+ MAC2STRDBG(e->addr.octet)));
+ wl_cfg80211_del_pmksa(cfg->wdev->wiphy, ndev, &pmksa);
+ curssid = wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ for (idx = 0; idx < spmk_list->count; idx++) {
+ t_pmkid = &spmk_list->pmkid[idx];
+ if (curssid->SSID_len == t_pmkid->ssid_len &&
+ !memcmp(curssid->SSID, t_pmkid->ssid, curssid->SSID_len)) {
+ bFound = TRUE;
+ break;
+ }
+ }
+ if (!bFound) {
+ goto done;
+ }
+ for (; idx < spmk_list->count - 1; idx++) {
+ memcpy_s(&spmk_list->pmkid[idx], sizeof(pmkid_v3_t),
+ &spmk_list->pmkid[idx + 1], sizeof(pmkid_v3_t));
+ }
+ spmk_list->count--;
+ }
+done:
+#endif /* WL_SAE */
+ return err;
+}
+
+static void
+wl_cache_assoc_resp_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ u32 datalen = ntoh32(e->datalen);
+ u32 event_type = ntoh32(e->event_type);
+
+ if (data && datalen <= sizeof(conn_info->resp_ie)) {
+ conn_info->resp_ie_len = datalen;
+ WL_DBG((" assoc resp IES len = %d\n", conn_info->resp_ie_len));
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ (void)memcpy_s(conn_info->resp_ie, sizeof(conn_info->resp_ie),
+ data, datalen);
+
+ WL_INFORM_MEM(("[%s] cached assoc resp ies"
+ "event %d reason=%d ie_len=%d from " MACDBG "\n",
+ ndev->name, event_type, ntoh32(e->reason), datalen,
+ MAC2STRDBG((const u8*)(&e->addr))));
+ }
+}
+
+char *
+wl_get_link_action_str(u16 link_action)
+{
+ switch (link_action) {
+ case WL_LINK_NONE:
+ return "LINK_NONE";
+ case WL_LINK_ASSOC_FAIL:
+ return "ASSOC_FAIL";
+ case WL_LINK_ASSOC_DONE:
+ return "ASSOC_DONE";
+ case WL_LINK_DOWN:
+ return "LINK_DOWN";
+ case WL_LINK_ROAM_DONE:
+ return "ROAM_DONE";
+ case WL_LINK_FORCE_DEAUTH:
+ return "SEND_DEAUTH";
+ default:
+ return "UNKOWN_STATE";
+ }
+}
+
+char *
+wl_get_assoc_state_str(u16 assoc_state)
+{
+ switch (assoc_state) {
+ case WL_STATE_ASSOC_IDLE:
+ return "ASSOC_IDLE";
+ case WL_STATE_ASSOCIATING:
+ return "ASSOCIATING";
+ case WL_STATE_ASSOCIATED:
+ return "ASSOCIATED";
+ default:
+ return "UNKOWN_STATE";
+ }
+}
+
+static u32
+wl_set_link_action(wl_assoc_state_t assoc_state, bool link_up)
+{
+ wl_link_action_t action = WL_LINK_NONE;
+
+ switch (assoc_state) {
+ case WL_STATE_ASSOCIATING:
+ if (link_up) {
+ action = WL_LINK_ASSOC_DONE;
+ } else {
+ action = WL_LINK_ASSOC_FAIL;
+ }
+ break;
+ case WL_STATE_ASSOCIATED:
+ if (link_up) {
+ action = WL_LINK_ROAM_DONE;
+ } else {
+ action = WL_LINK_DOWN;
+ }
+ break;
+ case WL_STATE_ASSOC_IDLE:
+ if (link_up) {
+ /* link up while cfg80211 state is not in
+ * 'ASSOCIATING/ASSOCIATED. Sync up the fw
+ * by disconnecting.
+ */
+ WL_ERR(("Unexpected link up\n"));
+ action = WL_LINK_FORCE_DEAUTH;
+ }
+ break;
+ default:
+ WL_ERR(("unknown state:%d\n", assoc_state));
+ action = WL_LINK_NONE;
+ }
+
+ return action;
+}
+
+static void
+wl_cfg8021_unlink_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 *bssid)
+{
+ struct cfg80211_bss *bss;
+ wlc_ssid_t *ssid;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ if (ssid && bssid) {
+ bss = CFG80211_GET_BSS(wdev->wiphy, NULL, bssid, ssid->SSID, ssid->SSID_len);
+ if (bss) {
+ cfg80211_unlink_bss(wdev->wiphy, bss);
+ CFG80211_PUT_BSS(wdev->wiphy, bss);
+ WL_INFORM_MEM(("bss unlinked"));
+ }
+ }
+}
+
+static s32
+wl_post_linkdown_ops(struct bcm_cfg80211 *cfg,
+ wl_assoc_status_t *as, struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ /* Common Code for connect failure & link down */
+ BCM_REFERENCE(dhdp);
+
+ WL_INFORM_MEM(("link down. connection state bit status: [%u:%u:%u:%u]\n",
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, NESTED_CONNECT, ndev)));
+
+ /* clear timestamps on disconnect */
+ CLR_TS(cfg, conn_start);
+ CLR_TS(cfg, conn_cmplt);
+ CLR_TS(cfg, authorize_start);
+ CLR_TS(cfg, authorize_cmplt);
+
+ wl_link_down(cfg);
+ wl_clr_drv_status(cfg, AUTHORIZED, ndev);
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+
+#ifdef DBG_PKT_MON
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ /* Stop packet monitor */
+ DHD_DBG_PKT_MON_STOP(dhdp);
+ }
+#endif /* DHD_PKT_MON */
+
+ /* Flush preserve logs */
+ wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
+ FW_LOGSET_MASK_ALL);
+
+#ifdef WL_GET_RCC
+ wl_android_get_roam_scan_chanlist(cfg);
+#endif /* WL_GET_RCC */
+
+#ifdef WES_SUPPORT
+ if (cfg->ncho_mode) {
+ /* Turn off NCHO mode */
+ wl_android_set_ncho_mode(ndev, FALSE);
+ }
+#endif /* WES_SUPPORT */
+
+#ifdef WLTDLS
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_DISCONNECT, false);
+#endif /* WLTDLS */
+
+ /* clear RSSI monitor, framework will set new cfg */
+#ifdef RSSI_MONITOR_SUPPORT
+ dhd_dev_set_rssi_monitor_cfg(bcmcfg_to_prmry_ndev(cfg),
+ FALSE, 0, 0);
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef WBTEXT
+ wl_cfg80211_wbtext_reset_conf(cfg, as->ndev);
+#endif /* WBTEXT */
+
+#ifdef P2PLISTEN_AP_SAMECHN
+ if (as->ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ wl_cfg80211_set_p2p_resp_ap_chn(as->ndev, 0);
+ cfg->p2p_resp_apchn_status = false;
+ WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
+ }
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+#ifdef WL_NAN
+ if (wl_cfgnan_is_enabled(cfg)) {
+ wl_cfgnan_get_stats(cfg);
+ }
+#endif /* WL_NAN */
+
+ return ret;
+}
+
+static s32
+wl_handle_assoc_fail(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as, bool completed)
+{
+ s32 ret = BCME_OK;
+ struct net_device *ndev = as->ndev;
+ u8 *connect_req_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+#endif /* BCMDONGLEHOST */
+ WL_MSG(ndev->name, "assoc fail Reason: %s from %pM\n",
+ bcmevent_get_name(as->event_type), as->addr);
+
+ if (connect_req_bssid && !ETHER_ISNULLADDR(as->addr) &&
+ memcmp(&as->addr, connect_req_bssid, ETH_ALEN) != 0) {
+ WL_ERR(("Event:%d Wrong bssid:" MACDBG "\n", as->event_type, MAC2STRDBG(as->addr)));
+ return BCME_OK;
+ }
+#ifdef WL_EXT_IAPSTA
+ {
+ wl_event_msg_t emsg;
+ memcpy(&emsg, as->event_msg, sizeof(wl_event_msg_t));
+ ret = wl_ext_in4way_sync(ndev, STA_REASSOC_RETRY,
+ WL_EXT_STATUS_RECONNECT, &emsg);
+ if (ret)
+ return 0;
+ }
+ wl_ext_iapsta_enable_master_if(ndev, FALSE);
+#endif
+
+ /* A connect request in Connected/Connecting will have the
+ * NESTED_CONNECT state set.
+ */
+ if (wl_get_drv_status(cfg, NESTED_CONNECT, ndev) &&
+ wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ wl_clr_drv_status(cfg, NESTED_CONNECT, ndev);
+ WL_INFORM_MEM(("Disconnect from nested connect context\n"));
+#if defined(BSSCACHE)
+ wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl,
+ (u8*)(&as->event_msg->addr));
+#endif
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+ return BCME_OK;
+ }
+
+#ifdef WL_WPS_SYNC
+ if (wl_wps_session_update(ndev,
+ WPS_STATE_CONNECT_FAIL, as->addr) == BCME_UNSUPPORTED) {
+ /* Skip the event handling */
+ return BCME_OK;
+ }
+#endif /* WL_WPS_SYNC */
+
+#ifdef BCMDONGLEHOST
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), 0);
+#endif /* BCMDONGLEHOST */
+
+#if defined(CONFIG_TIZEN)
+ net_stat_tizen_update_wifi(ndev, WIFISTAT_CONNECTION_FAIL);
+#endif /* CONFIG_TIZEN */
+
+ /* if link down, bsscfg is disabled */
+ if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+ complete(&cfg->iface_disable);
+ }
+
+ /* Report connect result to upper layer */
+ ret = wl_bss_connect_done(cfg, ndev, as->event_msg, as->data, false);
+ if (unlikely(ret)) {
+ WL_ERR(("connect result reporting failed.\n"));
+ }
+
+ /* Issue WLC_DISASSOC to prevent FW roam attempts. Do not issue
+ * WLC_DISASSOC again if the linkdown is generated due to local
+ * disassoc, to avoid connect-disconnect loop.
+ */
+ if (!((as->event_type == WLC_E_LINK) && (as->reason == WLC_E_LINK_DISASSOC))) {
+ wl_cfg80211_disassoc(ndev, WLAN_REASON_DEAUTH_LEAVING);
+ }
+
+ /* Common handler for assoc fail/link down */
+ wl_post_linkdown_ops(cfg, as, as->ndev);
+
+ return ret;
+}
+
+s32
+wl_get_connected_bssid(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 *mac_addr)
+{
+ u8 bssid_dongle[ETH_ALEN] = {0};
+ u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+
+ if (!mac_addr) {
+ return -EINVAL;
+ }
+
+ /* roam offload does not sync BSSID always, get it from dongle */
+ if (cfg->roam_offload) {
+ if (wldev_ioctl_get(ndev, WLC_GET_BSSID, bssid_dongle,
+ sizeof(bssid_dongle)) == BCME_OK) {
+ /* if not roam case, it would return null bssid */
+ if (!ETHER_ISNULLADDR(bssid_dongle)) {
+ curbssid = (u8 *)&bssid_dongle;
+ }
+ }
+ }
+
+ if (curbssid) {
+ (void)memcpy_s(mac_addr, ETH_ALEN, curbssid, ETH_ALEN);
+ }
+ return BCME_OK;
+}
+
+#ifdef WBTEXT
+static void
+wl_cfg80211_wbtext_reset_conf(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ s32 err;
+
+ /* when STA was disconnected, clear join pref and set wbtext */
+ if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+ dhdp->wbtext_policy
+ == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
+ char smbuf[WLC_IOCTL_SMLEN];
+ if ((err = wldev_iovar_setbuf(ndev, "join_pref",
+ NULL, 0, smbuf, sizeof(smbuf), NULL)) == BCME_OK) {
+ if ((err = wldev_iovar_setint(ndev, "wnm_bsstrans_resp",
+ dhdp->wbtext_policy)) == BCME_OK) {
+ wl_cfg80211_wbtext_set_default(ndev);
+ } else {
+ WL_ERR(("Failed to set wbtext = %d\n", err));
+ }
+ } else {
+ WL_ERR(("Failed to clear join pref = %d\n", err));
+ }
+ wl_cfg80211_wbtext_clear_bssid_list(cfg);
+ } else {
+ WL_ERR(("wbtext not applicable\n"));
+ }
+#endif /* BCMDONGLEHOST */
+}
+#endif /* WBTEXT */
+
+static s32
+wl_handle_link_down(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as)
+{
+ s32 ret = BCME_OK;
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhdp = (dhd_pub_t *)cfg->pub;
+#endif /* BCMDONGLEHOST */
+ struct net_device *ndev = as->ndev;
+ u32 datalen = as->data_len;
+ u32 event = as->event_type;
+ u8 *data = as->data;
+ u8 *ie_ptr = NULL;
+ u16 ie_len = 0;
+ bool loc_gen = 0;
+ u16 reason = as->reason;
+
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+#endif /* BCMDONGLEHOST */
+ WL_MSG(ndev->name, "Link down: %s(%d), reason %d from %pM\n",
+ bcmevent_get_name(as->event_type), event, reason, as->addr);
+ if ((BCME_OK != wl_get_connected_bssid(cfg, ndev, as->curbssid))) {
+ WL_ERR(("bssid not found\n"));
+ return -1;
+ }
+#ifdef WL_EXT_IAPSTA
+ {
+ wl_event_msg_t emsg;
+ memcpy(&emsg, as->event_msg, sizeof(wl_event_msg_t));
+ ret = wl_ext_in4way_sync(ndev, STA_REASSOC_RETRY,
+ WL_EXT_STATUS_RECONNECT, &emsg);
+ if (ret)
+ return 0;
+ }
+#endif
+
+ if (memcmp(as->curbssid, as->addr, ETHER_ADDR_LEN) != 0) {
+ WL_ERR(("BSSID of event is not the connected BSSID"
+ "(ignore it) cur: " MACDBG
+ " event: " MACDBG"\n",
+ MAC2STRDBG(as->curbssid),
+ MAC2STRDBG((const u8*)(&as->addr))));
+ return 0;
+ }
+
+ /* A connect request in Connected/Connecting will have the
+ * NESTED_CONNECT state set.
+ */
+ if (wl_get_drv_status(cfg, NESTED_CONNECT, ndev) &&
+ wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ wl_clr_drv_status(cfg, NESTED_CONNECT, ndev);
+ WL_INFORM_MEM(("Disconnect from nested connect context\n"));
+#if defined(BSSCACHE)
+ wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl,
+ (u8*)(&as->event_msg->addr));
+#endif
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_DISCONNECTED, NULL);
+ wl_ext_iapsta_restart_master(ndev);
+#endif
+ return 0;
+ }
+
+#ifdef WL_WPS_SYNC
+ if (wl_wps_session_update(ndev,
+ WPS_STATE_LINKDOWN, as->addr) == BCME_UNSUPPORTED) {
+ /* Skip event handling */
+ return 0;
+ }
+#endif /* WL_WPS_SYNC */
+
+ if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+#ifdef BCMDONGLEHOST
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev),
+ WLAN_REASON_DEAUTH_LEAVING);
+#endif /* BCMDONGLEHOST */
+ wl_cfg80211_disassoc(ndev, WLAN_REASON_DEAUTH_LEAVING);
+ }
+
+#ifdef BCMDONGLEHOST
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_DONE),
+ dhd_net2idx(dhdp->info, ndev), as->reason);
+#endif /* BCMDONGLEHOST */
+
+#if defined(CONFIG_TIZEN)
+ net_stat_tizen_update_wifi(ndev, WIFISTAT_CONNECTION_FAIL);
+#endif /* CONFIG_TIZEN */
+
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+ /*
+ * FW sends body and body len as a part of deauth
+ * and disassoc events (WLC_E_DISASSOC_IND, WLC_E_DEAUTH_IND)
+ * The VIEs sits after reason code in the body. Reason code is
+ * 2 bytes long.
+ */
+ WL_DBG(("recv disconnect ies ie_len = %d\n", ie_len));
+ if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND) {
+ if ((datalen > DOT11_DISCONNECT_RC) &&
+ datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
+ data) {
+ ie_ptr = (uchar*)data + DOT11_DISCONNECT_RC;
+ ie_len = datalen - DOT11_DISCONNECT_RC;
+ }
+ }
+#ifdef WL_ANALYTICS
+ else if ((event == WLC_E_LINK) &&
+ (reason == WLC_E_LINK_BCN_LOSS)) {
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_vndr_ies_find_vendor_oui(cfg, ndev,
+ CISCO_AIRONET_OUI)) {
+ WL_INFORM_MEM(("Analytics Beacon loss\n"));
+ ie_ptr = (uchar*)disco_bcnloss_vsie;
+ ie_len = sizeof(disco_bcnloss_vsie);
+ }
+ }
+ }
+#endif /* WL_ANALYTICS */
+
+#ifdef BCMDONGLEHOST
+#if defined(DHDTCPSYNC_FLOOD_BLK) && defined(CUSTOMER_TCPSYNC_FLOOD_DIS_RC)
+ {
+ u32 ifidx = ntoh32(as->event_msg->ifidx);
+ struct dhd_if *ifp = dhd_get_ifp(dhdp, ifidx);
+ if (ifp && ifp->disconnect_tsync_flood) {
+ reason = CUSTOMER_TCPSYNC_FLOOD_DIS_RC;
+ }
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK && CUSTOMER_TCPSYNC_FLOOD_DIS_RC */
+#endif /* BCMDONGLEHOST */
+
+#ifdef DHD_ENABLE_BIGDATA_LOGGING
+ wl_get_bss_info(cfg, ndev, as->addr);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+ /* unlink from bss list - to force fresh add from next scan/connect */
+ wl_cfg8021_unlink_bss(cfg, ndev, as->addr);
+
+ /* clear profile before reporting link down */
+ wl_init_prof(cfg, ndev);
+#if defined(BSSCACHE)
+ wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl,
+ (u8*)(&as->event_msg->addr));
+#endif
+#ifdef WL_EXT_IAPSTA
+ {
+ wl_event_msg_t emsg;
+ memcpy(&emsg, as->event_msg, sizeof(wl_event_msg_t));
+ wl_ext_in4way_sync(ndev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_DISCONNECTED, &emsg);
+ wl_ext_iapsta_restart_master(ndev);
+ }
+#endif
+
+ CFG80211_DISCONNECTED(ndev, reason, ie_ptr, ie_len,
+ loc_gen, GFP_KERNEL);
+ WL_MSG(ndev->name, "Disconnect event sent to upper layer"
+ "event:%d e->reason=%d reason=%d ie_len=%d "
+ "from " MACDBG "\n",
+ event, ntoh32(as->reason), reason, ie_len,
+ MAC2STRDBG((const u8*)(&as->addr)));
+
+ /* clear connected state */
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+
+ /* Common handler for assoc fail/link down */
+ wl_post_linkdown_ops(cfg, as, as->ndev);
+
+ return ret;
+}
+
+static s32
+wl_handle_assoc_done(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as)
+{
+ s32 ret = BCME_OK;
+ bool act = true;
+ struct net_device *ndev = as->ndev;
+
+ wl_update_prof(cfg, ndev, as->event_msg, &act, WL_PROF_ACT);
+
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ u8 *conn_req_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
+ if (memcmp(curbssid, conn_req_bssid, ETHER_ADDR_LEN) == 0) {
+ /* connected bssid and outstanding connect req bssid are same */
+ WL_INFORM_MEM((" Connected event of connected device "
+ "e=%d s=%d, ignore it\n",
+ as->event_type, as->status));
+ return ret;
+ }
+ }
+
+ /* Report connect result to cfg80211 layer */
+ ret = wl_bss_connect_done(cfg, ndev, as->event_msg, as->data, true);
+ if (unlikely(ret)) {
+ WL_ERR(("Connect report failed!\n"));
+ /* Sync with fw */
+ wl_cfg80211_disassoc(ndev, WLAN_REASON_DEAUTH_LEAVING);
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ WL_DBG(("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+ wl_update_prof(cfg, ndev, NULL, (const void *)&as->addr, WL_PROF_BSSID);
+
+ wl_link_up(cfg);
+
+ /* Handle feature specific handling on linkup event */
+ ret = wl_post_linkup_ops(cfg, as);
+
+exit:
+ return ret;
+}
+
+static s32
+wl_handle_roam_done(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (cfg->roam_offload) {
+ /* roam offload enabled, avoid roam events to wake up host */
+ WL_ERR(("roamoffload enabled. Ignore event\n"));
+ return ret;
+ }
+
+#ifdef DHD_EVENT_LOG_FILTER
+ dhd_event_log_filter_notify_connect_done(dhdp,
+ as->addr, true);
+#endif /* DHD_EVENT_LOG_FILTER */
+
+#ifdef DHD_LOSSLESS_ROAMING
+ {
+ struct wl_security *sec = wl_read_prof(cfg,
+ as->ndev, WL_PROF_SEC);
+ if (!IS_AKM_SUITE_FT(sec)) {
+ wl_bss_roaming_done(cfg, as->ndev, as->event_msg, as->data);
+ }
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
+
+ return ret;
+}
+
+static s32
+wl_handle_sta_link_action(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as)
+{
+ s32 ret = BCME_OK;
+
+ WL_INFORM_MEM(("assoc_state:%s link action:%s\n",
+ wl_get_assoc_state_str(as->assoc_state),
+ wl_get_link_action_str(as->link_action)));
+
+ switch (as->link_action) {
+ case WL_LINK_ASSOC_DONE:
+ ret = wl_handle_assoc_done(cfg, as);
+ break;
+ case WL_LINK_ASSOC_FAIL:
+ ret = wl_handle_assoc_fail(cfg, as, FALSE);
+ break;
+ case WL_LINK_DOWN:
+ ret = wl_handle_link_down(cfg, as);
+ break;
+ case WL_LINK_ROAM_DONE:
+ ret = wl_handle_roam_done(cfg, as);
+ break;
+ case WL_LINK_FORCE_DEAUTH:
+ wl_cfg80211_disassoc(as->ndev, WLAN_REASON_DEAUTH_LEAVING);
+ break;
+ default:
+ WL_ERR(("Unsupported link state:%d\n", as->link_action));
+ ret = -ENOTSUPP;
+ }
+
+ if (unlikely(ret)) {
+ WL_ERR(("link_action:%d handling failed\n", as->link_action));
+ }
+
+ return ret;
+}
+
+static s32
+wl_handle_assoc_events(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, const wl_event_msg_t *e,
+ void *data, wl_assoc_state_t assoc_state)
+{
+ s32 err = BCME_OK;
+ wl_assoc_status_t as;
+
+ if (!wdev || !e) {
+ WL_ERR(("wrong input\n"));
+ return -EINVAL;
+ }
+
+ bzero(&as, sizeof(wl_assoc_status_t));
+ as.event_type = ntoh32(e->event_type);
+ as.status = ntoh32(e->status);
+ as.reason = ntoh32(e->reason);
+ as.flags = ntoh16(e->flags);
+ as.ndev = wdev->netdev;
+ as.data = data;
+ as.data_len = ntoh32(e->datalen);
+ as.event_msg = e;
+ (void)memcpy_s(as.addr, ETH_ALEN, e->addr.octet, ETH_ALEN);
+
+ WL_INFORM_MEM(("[%s] Mode BSS. assoc_state:%d event:%d "
+ "status:%d reason:%d e_idx:%d " MACDBG "\n",
+ as.ndev->name, as.assoc_state, as.event_type, as.status, as.reason,
+ cfg->eidx.in_progress, MAC2STRDBG((const u8*)(&e->addr))));
+
+ /* Handle FW events */
+ switch (as.event_type) {
+ case WLC_E_AUTH:
+ if (ntoh32(e->auth_type) == DOT11_SAE) {
+#ifdef WL_SAE
+ wl_bss_handle_sae_auth(cfg, as.ndev, e, data);
+#endif /* WL_SAE */
+
+#ifdef WL_CLIENT_SAE
+ wl_handle_auth_event(cfg, as.ndev, e, data);
+#endif /* WL_CLIENT_SAE */
+ }
+
+ /* Intentional fall through */
+ case WLC_E_ASSOC:
+ wl_get_auth_assoc_status(cfg, as.ndev, e, data);
+ break;
+ case WLC_E_ASSOC_RESP_IE:
+ if (as.status != WLC_E_STATUS_SUCCESS) {
+ wl_cache_assoc_resp_ies(cfg, as.ndev, e, data);
+ }
+ break;
+ case WLC_E_SET_SSID:
+ wl_cfg80211_handle_set_ssid_complete(cfg, &as, e, assoc_state);
+ break;
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ wl_cfg80211_handle_deauth_ind(cfg, &as);
+ /* intentional fall through */
+ case WLC_E_DISASSOC:
+ case WLC_E_DEAUTH:
+ as.link_action = wl_set_link_action(assoc_state, false);
+ break;
+ case WLC_E_LINK:
+ if (as.flags & WLC_EVENT_MSG_LINK) {
+ as.link_action = wl_set_link_action(assoc_state, true);
+ } else {
+ as.link_action = wl_set_link_action(assoc_state, false);
+ }
+ break;
+ default:
+ WL_DBG(("Ignore event:%d\n", as.event_type));
+ as.link_action = 0;
+ }
+
+ if (as.link_action) {
+ /* Handle change in link state (if any) */
+ err = wl_handle_sta_link_action(cfg, &as);
+ }
+
+ return err;
+}
+
+#define IS_OBSOLETE_EVENT(cur_idx, marker_idx) ((s32)(cur_idx - marker_idx) < 0)
+static s32
+wl_notify_connect_status_sta(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, const wl_event_msg_t *e, void *data)
+{
+ u32 event_type;
+ wl_assoc_state_t assoc_state;
+ struct net_device *ndev;
+ s32 ret = BCME_OK;
+ wl_event_idx_t *idx = &cfg->eidx;
+
+ if (!wdev || !e) {
+ WL_ERR(("wrong input\n"));
+ return -EINVAL;
+ }
+
+ event_type = ntoh32(e->event_type);
+ if (IS_OBSOLETE_EVENT(idx->in_progress, idx->min_connect_idx)) {
+ /* If this event is enqd before the connect req, discard */
+ WL_ERR(("discard obsolete event:%d. cur_idx:%d min_idx:%d\n",
+ event_type, idx->in_progress, idx->min_connect_idx));
+ return -EINVAL;
+ }
+
+ ndev = wdev->netdev;
+ if (!wl_get_drv_status(cfg, CFG80211_CONNECT, ndev)) {
+ /* Join attempt via non-cfg80211 interface.
+ * Don't send events to cfg80211 layer
+ */
+ WL_INFORM_MEM(("Event received in non-cfg80211"
+ " connect state. Ignore\n"));
+ goto exit;
+ }
+
+ if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ assoc_state = WL_STATE_ASSOCIATING;
+ } else if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ assoc_state = WL_STATE_ASSOCIATED;
+ } else {
+ WL_ERR(("Unexpected event:%d in assoc idle state\n", event_type));
+ assoc_state = WL_STATE_ASSOC_IDLE;
+ }
+
+ ret = wl_handle_assoc_events(cfg, wdev, e, data, assoc_state);
+exit:
+ return ret;
+}
+
+static s32
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ s32 err = 0;
+ u32 mode;
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ mode = wl_get_mode_by_netdev(cfg, ndev);
+
+ /* Push link events to upper layer log */
+ SUPP_LOG(("[%s] Mode:%d event:%d status:0x%x reason:%d\n",
+ ndev->name, mode, ntoh32(e->event_type),
+ ntoh32(e->status), ntoh32(e->reason)));
+
+ if (mode == WL_MODE_AP) {
+ /* AP/P2O GO cases */
+ err = wl_notify_connect_status_ap(cfg, ndev, e, data);
+ } else if (mode == WL_MODE_IBSS) {
+ err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
+ } else if (mode == WL_MODE_BSS) {
+ /* STA/GC cases */
+ err = wl_notify_connect_status_sta(cfg, ndev->ieee80211_ptr, e, data);
+ } else {
+ WL_ERR(("Unexpected event:%d for mode:%d\n", e->event_type, mode));
+ }
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+ return err;
+}
+
+#ifdef WL_RELMCAST
+void wl_cfg80211_set_rmc_pid(struct net_device *dev, int pid)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ if (pid > 0)
+ cfg->rmc_event_pid = pid;
+ WL_DBG(("set pid for rmc event : pid=%d\n", pid));
+}
+#endif /* WL_RELMCAST */
+
+#ifdef WLAIBSS
+void wl_cfg80211_set_txfail_pid(struct net_device *dev, int pid)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ if (pid > 0)
+ cfg->aibss_txfail_pid = pid;
+ WL_DBG(("set pid for aibss fail event : pid=%d\n", pid));
+}
+
+static s32
+wl_notify_aibss_txfail(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 evt = ntoh32(e->event_type);
+ int ret = -1;
+#ifdef PCIE_FULL_DONGLE
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ u32 reason = ntoh32(e->reason);
+#endif
+ if (cfg->aibss_txfail_pid != 0) {
+#ifdef PCIE_FULL_DONGLE
+ if (reason == AIBSS_PEER_FREE) {
+ uint8 ifindex;
+ wl_event_msg_t event;
+
+ bzero(&event, sizeof(wl_event_msg_t));
+ memcpy(&event, e, sizeof(wl_event_msg_t));
+
+ ifindex = (uint8)dhd_ifname2idx(dhd->info, event.ifname);
+ WL_INFORM_MEM(("Peer freed. Flow rings delete for peer.\n"));
+ dhd_flow_rings_delete_for_peer(dhd, ifindex,
+ (void *)&event.addr.octet[0]);
+ return 0;
+ }
+#endif
+ ret = wl_netlink_send_msg(cfg->aibss_txfail_pid, AIBSS_EVENT_TXFAIL,
+ cfg->aibss_txfail_seq++, &e->addr, ETHER_ADDR_LEN);
+ }
+
+ WL_DBG(("txfail : evt=%d, pid=%d, ret=%d, mac=" MACF "\n",
+ evt, cfg->aibss_txfail_pid, ret, CONST_ETHERP_TO_MACF(&e->addr)));
+ return ret;
+}
+#endif /* WLAIBSS */
+#ifdef WL_RELMCAST
+static s32
+wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 evt = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ int ret = -1;
+
+ switch (reason) {
+ case WLC_E_REASON_RMC_AR_LOST:
+ case WLC_E_REASON_RMC_AR_NO_ACK:
+ if (cfg->rmc_event_pid != 0) {
+ ret = wl_netlink_send_msg(cfg->rmc_event_pid,
+ RMC_EVENT_LEADER_CHECK_FAIL,
+ cfg->rmc_event_seq++, NULL, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret));
+ return ret;
+}
+#endif /* WL_RELMCAST */
+
+#ifdef GSCAN_SUPPORT
+static s32
+wl_handle_roam_exp_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ u32 datalen = be32_to_cpu(e->datalen);
+
+ if (datalen) {
+ wl_roam_exp_event_t *evt_data = (wl_roam_exp_event_t *)data;
+ if (evt_data->version == ROAM_EXP_EVENT_VERSION) {
+ wlc_ssid_t *ssid = &evt_data->cur_ssid;
+ struct wireless_dev *wdev;
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ if (ndev) {
+ wdev = ndev->ieee80211_ptr;
+ wdev->ssid_len = min(ssid->SSID_len, (uint32)DOT11_MAX_SSID_LEN);
+ memcpy(wdev->ssid, ssid->SSID, wdev->ssid_len);
+ WL_ERR(("SSID is %s\n", ssid->SSID));
+ wl_update_prof(cfg, ndev, NULL, ssid, WL_PROF_SSID);
+ } else {
+ WL_ERR(("NULL ndev!\n"));
+ }
+ } else {
+ WL_ERR(("Version mismatch %d, expected %d", evt_data->version,
+ ROAM_EXP_EVENT_VERSION));
+ }
+ }
+ return BCME_OK;
+}
+#endif /* GSCAN_SUPPORT */
+
+#ifdef RSSI_MONITOR_SUPPORT
+static s32 wl_handle_rssi_monitor_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+
+#if defined(WL_VENDOR_EXT_SUPPORT) || defined(CONFIG_BCMDHD_VENDOR_EXT)
+ u32 datalen = be32_to_cpu(e->datalen);
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+
+ if (datalen) {
+ wl_rssi_monitor_evt_t *evt_data = (wl_rssi_monitor_evt_t *)data;
+ if (evt_data->version == RSSI_MONITOR_VERSION) {
+ dhd_rssi_monitor_evt_t monitor_data;
+ monitor_data.version = DHD_RSSI_MONITOR_EVT_VERSION;
+ monitor_data.cur_rssi = evt_data->cur_rssi;
+ memcpy(&monitor_data.BSSID, &e->addr, ETHER_ADDR_LEN);
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_RSSI_MONITOR_EVENT,
+ &monitor_data, sizeof(monitor_data));
+ } else {
+ WL_ERR(("Version mismatch %d, expected %d", evt_data->version,
+ RSSI_MONITOR_VERSION));
+ }
+ }
+#endif /* WL_VENDOR_EXT_SUPPORT || CONFIG_BCMDHD_VENDOR_EXT */
+ return BCME_OK;
+}
+#endif /* RSSI_MONITOR_SUPPORT */
+
+static s32
+wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ struct net_device *ndev = NULL;
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+#ifdef DHD_LOSSLESS_ROAMING
+ struct wl_security *sec;
+#endif
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+ WL_DBG(("Enter \n"));
+
+#ifdef BCMDONGLEHOST
+ BCM_REFERENCE(dhdp);
+#endif /* BCMDONGLEHOST */
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
+
+#ifdef OEM_ANDROID
+ wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false);
+#endif /* OEM_ANDROID */
+
+ cfg->disable_roam_event = TRUE;
+ }
+
+ if ((cfg->disable_roam_event) && (event == WLC_E_ROAM))
+ return err;
+
+ if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+#ifdef DHD_LOSSLESS_ROAMING
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ /* In order to reduce roaming delay, wl_bss_roaming_done is
+ * early called with WLC_E_LINK event. It is called from
+ * here only if WLC_E_LINK event is blocked for specific
+ * security type.
+ */
+ if (IS_AKM_SUITE_FT(sec)) {
+ wl_bss_roaming_done(cfg, ndev, e, data);
+#ifdef BCMDONGLEHOST
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
+#endif /* BCMDONGLEHOST */
+ }
+ /* Roam timer is deleted mostly from wl_cfg80211_change_station
+ * after roaming is finished successfully. We need to delete
+ * the timer from here only for some security types that aren't
+ * using wl_cfg80211_change_station to authorize SCB
+ */
+ if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) {
+ wl_del_roam_timeout(cfg);
+ }
+#else
+#if !defined(DHD_NONFT_ROAMING)
+ wl_bss_roaming_done(cfg, ndev, e, data);
+#endif /* !DHD_NONFT_ROAMING */
+#endif /* DHD_LOSSLESS_ROAMING */
+#ifdef WBTEXT
+ if (dhdp->wbtext_support) {
+ /* set wnm_keepalives_max_idle after association */
+ wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
+
+ /* Mostly nbr request of BTM query will be handled
+ * from wl_cfg80211_change_station
+ * after key negotiation is finished.
+ * This part is only for some specific security
+ * types (FT, CCKM) that don't call
+ * wl_cfg80211_change_station after roaming
+ */
+ if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) {
+ /* send nbr request or BTM query to update RCC
+ * after roaming completed
+ */
+ wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+ }
+ }
+#endif /* WBTEXT */
+ } else {
+ wl_bss_connect_done(cfg, ndev, e, data, true);
+ }
+ act = true;
+ wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ wl_vndr_ies_get_vendor_oui(cfg, ndev, NULL, 0);
+ }
+ }
+#ifdef DHD_LOSSLESS_ROAMING
+ else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) {
+ wl_del_roam_timeout(cfg);
+ }
+#endif
+ return err;
+}
+
+#ifdef CUSTOM_EVENT_PM_WAKE
+uint32 last_dpm_upd_time = 0; /* ms */
+#define DPM_UPD_LMT_TIME ((CUSTOM_EVENT_PM_WAKE + (5)) * (1000) * (4)) /* ms */
+#define DPM_UPD_LMT_RSSI -85 /* dbm */
+
+static s32
+wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ struct net_device *ndev = NULL;
+ u8 *pbuf = NULL;
+ uint32 cur_dpm_upd_time = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ s32 rssi;
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ wl_rssi_ant_mimo_t rssi_ant_mimo;
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ pbuf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (pbuf == NULL) {
+ WL_ERR(("failed to allocate local pbuf\n"));
+ return -ENOMEM;
+ }
+
+ err = wldev_iovar_getbuf_bsscfg(ndev, "dump",
+ "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN,
+ 0, &cfg->ioctl_buf_sync);
+
+ if (err) {
+ WL_ERR(("dump ioctl err = %d", err));
+ } else {
+ WL_ERR(("PM status : %s\n", pbuf));
+ }
+
+ if (pbuf) {
+ MFREE(cfg->osh, pbuf, WLC_IOCTL_MEDLEN);
+ }
+
+ if (dhd->early_suspended) {
+ /* LCD off */
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ /* Query RSSI sum across antennas */
+ memset(&rssi_ant_mimo, 0, sizeof(rssi_ant_mimo));
+ err = wl_get_rssi_per_ant(ndev, ndev->name, NULL, &rssi_ant_mimo);
+ if (err) {
+ WL_ERR(("Could not get rssi sum (%d)\n", err));
+ }
+ rssi = rssi_ant_mimo.rssi_sum;
+ if (rssi == 0)
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+ {
+ scb_val_t scb_val;
+ memset(&scb_val, 0, sizeof(scb_val_t));
+ scb_val.val = 0;
+ err = wldev_ioctl_get(ndev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ }
+ rssi = wl_rssi_offset(dtoh32(scb_val.val));
+ }
+ WL_ERR(("RSSI %d dBm\n", rssi));
+ if (rssi > DPM_UPD_LMT_RSSI) {
+ return err;
+ }
+ } else {
+ /* LCD on */
+ return err;
+ }
+
+ if (last_dpm_upd_time == 0) {
+ last_dpm_upd_time = OSL_SYSUPTIME();
+ } else {
+ cur_dpm_upd_time = OSL_SYSUPTIME();
+ if (cur_dpm_upd_time - last_dpm_upd_time < DPM_UPD_LMT_TIME) {
+ scb_val_t scbval;
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhd->info, ndev), 0);
+ bzero(&scbval, sizeof(scb_val_t));
+
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC,
+ &scbval, sizeof(scb_val_t));
+ if (err < 0) {
+ WL_ERR(("Disassoc error %d\n", err));
+ return err;
+ }
+ WL_ERR(("Force Disassoc due to updated DPM event.\n"));
+
+ last_dpm_upd_time = 0;
+ } else {
+ last_dpm_upd_time = cur_dpm_upd_time;
+ }
+ }
+
+ return err;
+}
+#endif /* CUSTOM_EVENT_PM_WAKE */
+
+#ifdef QOS_MAP_SET
+/* get user priority table */
+uint8 *
+wl_get_up_table(dhd_pub_t * dhdp, int idx)
+{
+ struct net_device *ndev;
+ struct bcm_cfg80211 *cfg;
+
+ ndev = dhd_idx2net(dhdp, idx);
+ if (ndev) {
+ cfg = wl_get_cfg(ndev);
+ if (cfg)
+ return (uint8 *)(cfg->up_table);
+ }
+
+ return NULL;
+}
+#endif /* QOS_MAP_SET */
+
+#if defined(DHD_LOSSLESS_ROAMING) || defined (DBG_PKT_MON)
+/*
+ * start packet logging in advance to make sure that EAPOL
+ * messages are not missed during roaming
+ */
+static s32
+wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_security *sec;
+ struct net_device *ndev;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ u32 status = ntoh32(e->status);
+ u32 reason = ntoh32(e->reason);
+
+ BCM_REFERENCE(sec);
+
+ if (status == WLC_E_STATUS_SUCCESS && reason != WLC_E_REASON_INITIAL_ASSOC) {
+ WL_ERR(("Attempting roam with reason code : %d\n", reason));
+ }
+
+#ifdef CONFIG_SILENT_ROAM
+ if (dhdp->in_suspend && reason == WLC_E_REASON_SILENT_ROAM) {
+ dhdp->sroamed = TRUE;
+ }
+#endif /* CONFIG_SILENT_ROAM */
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef DBG_PKT_MON
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ DHD_DBG_PKT_MON_STOP(dhdp);
+ DHD_DBG_PKT_MON_START(dhdp);
+ }
+#endif /* DBG_PKT_MON */
+#ifdef DHD_LOSSLESS_ROAMING
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ /* Disable Lossless Roaming for specific AKM suite
+ * Any other AKM suite can be added below if transition time
+ * is delayed because of Lossless Roaming
+ * and it causes any certication failure
+ */
+ if (IS_AKM_SUITE_FT(sec) || IS_AKM_OWE(sec->wpa_auth)) {
+ return BCME_OK;
+ }
+
+ dhdp->dequeue_prec_map = 1 << dhdp->flow_prio_map[PRIO_8021D_NC];
+ /* Restore flow control */
+ dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF);
+
+ mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS));
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ return BCME_OK;
+}
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
+
+static s32
+wl_notify_roam_start_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ int event_type;
+
+ event_type = WIFI_EVENT_ROAM_SCAN_STARTED;
+ wl_cfgvendor_send_async_event(wiphy, ndev, GOOGLE_ROAM_EVENT_START,
+ &event_type, sizeof(int));
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || (WL_VENDOR_EXT_SUPPORT) */
+
+ return BCME_OK;
+}
+
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ wl_assoc_info_t assoc_info;
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ s32 err = 0;
+#ifdef QOS_MAP_SET
+ bcm_tlv_t * qos_map_ie = NULL;
+#endif /* QOS_MAP_SET */
+
+ WL_DBG(("Enter \n"));
+
+ bzero(&assoc_info, sizeof(wl_assoc_info_t));
+ err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc info (%d)\n", err));
+ return err;
+ }
+ memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t));
+ assoc_info.req_len = htod32(assoc_info.req_len);
+ assoc_info.resp_len = htod32(assoc_info.resp_len);
+ assoc_info.flags = htod32(assoc_info.flags);
+ if (conn_info->req_ie_len) {
+ conn_info->req_ie_len = 0;
+ bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+ }
+ if (conn_info->resp_ie_len) {
+ conn_info->resp_ie_len = 0;
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ }
+
+ if (assoc_info.req_len) {
+ err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
+ assoc_info.req_len, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc req (%d)\n", err));
+ return err;
+ }
+ if (assoc_info.req_len < sizeof(struct dot11_assoc_req)) {
+ WL_ERR(("req_len %d lessthan %d \n", assoc_info.req_len,
+ (int)sizeof(struct dot11_assoc_req)));
+ return BCME_BADLEN;
+ }
+ conn_info->req_ie_len = (uint32)(assoc_info.req_len
+ - sizeof(struct dot11_assoc_req));
+ if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+ conn_info->req_ie_len -= ETHER_ADDR_LEN;
+ }
+ if (conn_info->req_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+ else {
+ WL_ERR(("IE size %d above max %d size \n",
+ conn_info->req_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->req_ie_len = 0;
+ }
+
+ if (assoc_info.resp_len) {
+ err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
+ assoc_info.resp_len, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc resp (%d)\n", err));
+ return err;
+ }
+ if (assoc_info.resp_len < sizeof(struct dot11_assoc_resp)) {
+ WL_ERR(("resp_len %d is lessthan %d \n", assoc_info.resp_len,
+ (int)sizeof(struct dot11_assoc_resp)));
+ return BCME_BADLEN;
+ }
+ conn_info->resp_ie_len = assoc_info.resp_len -
+ (uint32)sizeof(struct dot11_assoc_resp);
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE) {
+ memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+ } else {
+ WL_ERR(("IE size %d above max %d size \n",
+ conn_info->resp_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+
+#ifdef QOS_MAP_SET
+ /* find qos map set ie */
+ if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_QOS_MAP_ID)) != NULL) {
+ WL_DBG((" QoS map set IE found in assoc response\n"));
+ if (!cfg->up_table) {
+ cfg->up_table = (uint8 *)MALLOC(cfg->osh, UP_TABLE_MAX);
+ }
+ wl_set_up_table(cfg->up_table, qos_map_ie);
+ } else {
+ MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
+ }
+#endif /* QOS_MAP_SET */
+ } else {
+ conn_info->resp_ie_len = 0;
+ }
+ WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+ conn_info->resp_ie_len));
+
+ return err;
+}
+
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ bool update_ssid)
+{
+ struct cfg80211_bss *bss;
+ wl_bss_info_t *bi;
+ struct wlc_ssid *ssid;
+ const struct bcm_tlv *tim;
+ s32 beacon_interval;
+ s32 dtim_period;
+ size_t ie_len;
+ const u8 *ie;
+ u8 *curbssid;
+ s32 err = 0;
+ struct wiphy *wiphy;
+ char *buf;
+ u32 freq;
+ chanspec_t chspec = INVCHANSPEC;
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len);
+ buf = (char *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
+ if (!buf) {
+ WL_ERR(("buffer alloc failed.\n"));
+ return BCME_NOMEM;
+ }
+ mutex_lock(&cfg->usr_sync);
+ *(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+ err = wldev_ioctl_get(ndev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ goto update_bss_info_out;
+ }
+ bi = (wl_bss_info_t *)(buf + 4);
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ wl_update_prof(cfg, ndev, NULL, &chspec, WL_PROF_CHAN);
+
+ if (!bss) {
+ if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+ WL_ERR(("Bssid doesn't match\n"));
+ err = -EIO;
+ goto update_bss_info_out;
+ }
+ err = wl_inform_single_bss(cfg, bi, update_ssid);
+ if (unlikely(err)) {
+ WL_ERR(("Could not update the AP detail in cache\n"));
+ goto update_bss_info_out;
+ }
+
+ WL_INFORM_MEM(("Updated the AP %pM detail in cache\n", curbssid));
+ ie = ((u8 *)bi) + bi->ie_offset;
+ ie_len = bi->ie_length;
+ beacon_interval = cpu_to_le16(bi->beacon_period);
+ } else {
+ u16 channel;
+ WL_INFORM_MEM(("Found AP in the cache - BSSID %pM\n", bss->bssid));
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ freq = wl_channel_to_frequency(channel, CHSPEC_BAND(bi->chanspec));
+ bss->channel = ieee80211_get_channel(wiphy, freq);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ie = (const u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
+#else
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ beacon_interval = bss->beacon_interval;
+
+ CFG80211_PUT_BSS(wiphy, bss);
+ }
+
+ tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ if (tim) {
+ dtim_period = tim->data[1];
+ } else {
+ /*
+ * active scan was done so we could not get dtim
+ * information out of probe response.
+ * so we speficially query dtim information.
+ */
+ dtim_period = 0;
+ err = wldev_ioctl_get(ndev, WLC_GET_DTIMPRD,
+ &dtim_period, sizeof(dtim_period));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+ goto update_bss_info_out;
+ }
+ }
+
+ wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+ if (unlikely(err)) {
+ WL_ERR(("Failed with error %d\n", err));
+ }
+
+ MFREE(cfg->osh, buf, WL_EXTRA_BUF_MAX);
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+
+#ifdef DHD_LOSSLESS_ROAMING
+static s32
+wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ s32 err = 0;
+ u8 *curbssid;
+ chanspec_t *chanspec;
+ scb_val_t scbval;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || \
+ defined(WL_COMPAT_WIRELESS)
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct ieee80211_channel *notify_channel = NULL;
+ u32 freq;
+ u32 cur_channel, cur_chanspec, orig_channel;
+#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
+#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || \
+ defined(WL_FILS_ROAM_OFFLD) || defined(CFG80211_ROAM_API_GE_4_12)
+ struct cfg80211_roam_info roam_info;
+#endif /* (CONFIG_ARCH_MSM && CFG80211_ROAMED_API_UNIFIED) || LINUX_VERSION >= 4.12.0 */
+#if defined(WL_FILS_ROAM_OFFLD)
+ struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#endif
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ dhd_if_t *ifp = NULL;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+ s32 rssi;
+#ifdef WLFBT
+ uint32 data_len = 0;
+ if (data)
+ data_len = ntoh32(e->datalen);
+#endif /* WLFBT */
+
+ BCM_REFERENCE(dhdp);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ chanspec = (chanspec_t *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || \
+ defined(WL_COMPAT_WIRELESS)
+ /* Skip calling cfg80211_roamed If the channels are same and
+ * the current bssid & the new bssid are same
+ * Also clear timer roam_timeout.
+ * Only used on BCM4359 devices.
+ */
+ err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&cur_chanspec);
+ if (unlikely(err)) {
+ return err;
+ }
+ cur_channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(cur_chanspec));
+ orig_channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(*chanspec));
+ if (dhdp->conf->chip != BCM43569_CHIP_ID) {
+ if ((orig_channel == cur_channel) &&
+ ((memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN) == 0))) {
+ WL_DBG(("BSS already present, Skipping roamed event to upper layer\n"));
+ goto fail;
+ }
+ }
+#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
+
+ if ((err = wl_get_assoc_ies(cfg, ndev)) != BCME_OK) {
+#ifdef BCMDONGLEHOST
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
+#endif /* BCMDONGLEHOST */
+ WL_ERR(("Fetching Assoc IEs failed, Skipping roamed event to"
+ " upper layer\n"));
+ /* To make sure disconnect, and fw sync, explictly send dissassoc
+ * for BSSID 00:00:00:00:00:00 issue
+ */
+ bzero(&scbval, sizeof(scb_val_t));
+ scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ if (wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t)) < 0) {
+ WL_ERR(("WLC_DISASSOC error\n"));
+ }
+ goto fail;
+ }
+
+ memset(&scbval, 0, sizeof(scb_val_t));
+ err = wldev_ioctl_get(ndev, WLC_GET_RSSI, &scbval, sizeof(scb_val_t));
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ }
+ rssi = dtoh32(scbval.val);
+
+ WL_MSG(ndev->name, "%pM(ch:%3d/%sMHz) => %pM(ch:%3d/%sMHz, rssi: %3d)\n",
+ curbssid, orig_channel,
+ CHSPEC_IS20(*chanspec)?"20":
+ CHSPEC_IS40(*chanspec)?"40":
+ CHSPEC_IS80(*chanspec)?"80":
+ CHSPEC_IS160(*chanspec)?"160":"??",
+ (const u8*)(&e->addr), cur_channel,
+ CHSPEC_IS20(cur_chanspec)?"20":
+ CHSPEC_IS40(cur_chanspec)?"40":
+ CHSPEC_IS80(cur_chanspec)?"80":
+ CHSPEC_IS160(cur_chanspec)?"160":"??",
+ rssi);
+
+ wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ if ((err = wl_update_bss_info(cfg, ndev, true)) != BCME_OK) {
+ WL_ERR(("failed to update bss info, err=%d\n", err));
+ goto fail;
+ }
+ if (cfg->wlc_ver.wlc_ver_major < PMKDB_WLC_VER) {
+ wl_update_pmklist(ndev, cfg->pmk_list, err);
+ }
+
+ chanspec = (chanspec_t *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || \
+ defined(WL_COMPAT_WIRELESS)
+ /* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
+ freq = wl_channel_to_frequency(wf_chspec_ctlchan(*chanspec), CHSPEC_BAND(*chanspec));
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
+#ifdef WLFBT
+ /* back up the given FBT key for the further supplicant request,
+ * currently not checking the FBT is enabled for current BSS in DHD,
+ * because the supplicant decides to take it or not.
+ */
+ if (data && (data_len == FBT_KEYLEN)) {
+ memcpy(cfg->fbt_key, data, FBT_KEYLEN);
+ }
+#endif /* WLFBT */
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if (wl_set_retry(ndev, CUSTOM_LONG_RETRY_LIMIT, 1) < 0) {
+ WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
+ DHD_STATLOG_CTRL(dhdp, ST(REASSOC_INFORM),
+ dhd_net2idx(dhdp->info, ndev), 0);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
+#endif
+
+#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || \
+ defined(WL_FILS_ROAM_OFFLD) || defined(CFG80211_ROAM_API_GE_4_12)
+ memset(&roam_info, 0, sizeof(struct cfg80211_roam_info));
+ roam_info.channel = notify_channel;
+ roam_info.bssid = curbssid;
+ roam_info.req_ie = conn_info->req_ie;
+ roam_info.req_ie_len = conn_info->req_ie_len;
+ roam_info.resp_ie = conn_info->resp_ie;
+ roam_info.resp_ie_len = conn_info->resp_ie_len;
+#if defined(WL_FILS_ROAM_OFFLD)
+ if ((sec->auth_type == NL80211_AUTHTYPE_FILS_SK_PFS) ||
+ (sec->auth_type == NL80211_AUTHTYPE_FILS_SK)) {
+ roam_info.fils.kek = fils_info->fils_kek;
+ roam_info.fils.kek_len = fils_info->fils_kek_len;
+ roam_info.fils.update_erp_next_seq_num = true;
+ roam_info.fils.erp_next_seq_num = fils_info->fils_erp_next_seq_num;
+ roam_info.fils.pmk = fils_info->fils_pmk;
+ roam_info.fils.pmk_len = fils_info->fils_kek_len;
+ roam_info.fils.pmkid = fils_info->fils_pmkid;
+ }
+#endif
+ cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
+#else
+ cfg80211_roamed(ndev,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || \
+ defined(WL_COMPAT_WIRELESS)
+ notify_channel,
+#endif
+ curbssid,
+ conn_info->req_ie, conn_info->req_ie_len,
+ conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+#endif /* (CONFIG_ARCH_MSM && CFG80211_ROAMED_API_UNIFIED) || LINUX_VERSION >= 4.12.0 */
+
+ memcpy(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN);
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ cfg->roam_count++;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#ifdef WL_BAM
+ if (wl_adps_bad_ap_check(cfg, &e->addr)) {
+ if (wl_adps_enabled(cfg, ndev)) {
+ wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
+ }
+ }
+#endif /* WL_BAM */
+
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ ifp = dhd_get_ifp(dhdp, e->ifidx);
+ if (ifp) {
+ ifp->post_roam_evt = TRUE;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
+ return err;
+
+fail:
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+ return err;
+}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+static bool
+wl_cfg80211_verify_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct cfg80211_bss **bss)
+{
+ struct wiphy *wiphy;
+ struct wlc_ssid *ssid;
+ uint8 *curbssid;
+ bool ret = false;
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ if (!ssid || !curbssid) {
+ WL_ERR(("No SSID/bssid found in the saved profile \n"));
+ return false;
+ }
+
+ *bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len);
+ if (*bss) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+ /* Update the reference count after use. In case of kernel version >= 4.7
+ * the cfg802_put_bss is called in cfg80211_connect_bss context
+ */
+ CFG80211_PUT_BSS(wiphy, *bss);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) */
+ ret = true;
+ } else {
+ WL_ERR(("No bss entry for bssid:"MACDBG" ssid_len:%d\n",
+ MAC2STRDBG(curbssid), ssid->SSID_len));
+ }
+
+ return ret;
+}
+
+#ifdef WL_FILS
+static s32
+wl_get_fils_connect_params(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ const bcm_xtlv_t* pxtlv_out;
+ struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
+ int err = BCME_OK;
+ bcm_iov_buf_t *iov_buf_in = NULL;
+ bcm_iov_buf_t iov_buf_out = {0};
+ u16 len;
+ u16 type;
+ const u8 *data;
+ iov_buf_in = MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
+ if (!iov_buf_in) {
+ WL_ERR(("buf memory alloc failed\n"));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ iov_buf_out.version = WL_FILS_IOV_VERSION;
+ iov_buf_out.id = WL_FILS_CMD_GET_CONNECT_PARAMS;
+ err = wldev_iovar_getbuf(ndev, "fils", (uint8*)&iov_buf_out, sizeof(bcm_iov_buf_t),
+ iov_buf_in, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Get FILS Params Error (%d)\n", err));
+ goto exit;
+ }
+ pxtlv_out = (bcm_xtlv_t*)((bcm_iov_buf_t*)iov_buf_in)->data;
+ len = iov_buf_in->len;
+ do {
+ if (!bcm_valid_xtlv(pxtlv_out, iov_buf_in->len, BCM_XTLV_OPTION_ALIGN32)) {
+ WL_ERR(("%s: XTLV is not valid\n", __func__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ bcm_xtlv_unpack_xtlv(pxtlv_out, &type, &len, &data, BCM_XTLV_OPTION_ALIGN32);
+ switch (type) {
+ case WL_FILS_XTLV_ERP_NEXT_SEQ_NUM:
+ fils_info->fils_erp_next_seq_num = *(const u16 *)data;
+ break;
+ case WL_FILS_XTLV_KEK:
+ if (memcpy_s(fils_info->fils_kek,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ fils_info->fils_kek_len = len;
+ break;
+ case WL_FILS_XTLV_PMK:
+ if (memcpy_s(fils_info->fils_pmk,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ fils_info->fils_pmk_len = len;
+ break;
+ case WL_FILS_XTLV_PMKID:
+ if (memcpy_s(fils_info->fils_pmkid,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
+ break;
+ default:
+ WL_ERR(("%s: wrong XTLV code\n", __func__));
+ break;
+
+ }
+ } while ((pxtlv_out = bcm_next_xtlv(pxtlv_out, (int *)&iov_buf_in->len,
+ BCM_XTLV_OPTION_ALIGN32)) && iov_buf_in->len);
+exit:
+ if (iov_buf_in) {
+ MFREE(cfg->osh, iov_buf_in, WLC_IOCTL_SMLEN);
+ }
+ return err;
+}
+#endif /* WL_FILS */
+
+#ifdef WL_FILS
+static s32
+wl_fillup_resp_params(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ u8 *curbssid, void *params, u32 status)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ struct cfg80211_connect_resp_params *resp_params;
+ struct wl_fils_info *fils_info = NULL;
+ struct wlc_ssid *ssid = NULL;
+ struct wiphy *wiphy = NULL;
+ s32 ret = BCME_OK;
+
+ fils_info = wl_to_fils_info(cfg);
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ wiphy = bcmcfg_to_wiphy(cfg);
+ if (!ssid) {
+ WL_ERR(("ssid profile not found\n"));
+ return BCME_ERROR;
+ }
+
+ resp_params = (struct cfg80211_connect_resp_params *)params;
+ resp_params->status = status;
+ resp_params->bssid = curbssid;
+ resp_params->bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len);
+ if (!resp_params->bss) {
+ WL_ERR(("null bss\n"));
+ return BCME_ERROR;
+ }
+
+ resp_params->req_ie = conn_info->req_ie;
+ resp_params->req_ie_len = conn_info->req_ie_len;
+ resp_params->resp_ie = conn_info->resp_ie;
+ resp_params->resp_ie_len = conn_info->resp_ie_len;
+#ifdef WL_FILS_ROAM_OFFLD
+ /* Kernel >= 4.17 has introduced FILS data struct inside resp params */
+ resp_params->fils.kek = fils_info->fils_kek;
+ resp_params->fils.kek_len = fils_info->fils_kek_len;
+ resp_params->fils.update_erp_next_seq_num = true;
+ resp_params->fils.erp_next_seq_num = fils_info->fils_erp_next_seq_num;
+ resp_params->fils.pmk = fils_info->fils_pmk;
+ resp_params->fils.pmk_len = fils_info->fils_kek_len;
+ resp_params->fils.pmkid = fils_info->fils_pmkid;
+#else
+ resp_params->fils_kek = fils_info->fils_kek;
+ resp_params->fils_kek_len = fils_info->fils_kek_len;
+ resp_params->update_erp_next_seq_num = true;
+ resp_params->fils_erp_next_seq_num = fils_info->fils_erp_next_seq_num;
+ resp_params->pmk = fils_info->fils_pmk;
+ resp_params->pmk_len = fils_info->fils_kek_len;
+ resp_params->pmkid = fils_info->fils_pmkid;
+#endif /* WL_FILS_ROAM_OFFLD */
+
+ return ret;
+}
+#endif /* WL_FILS */
+
+static s32
+wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ s32 err = 0;
+ u8 *conn_req_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
+ u32 status;
+#ifdef WL_FILS
+ struct cfg80211_connect_resp_params resp_params = {0};
+#endif /* WL_FILS */
+
+ u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ u32 event_type = ntoh32(e->event_type);
+ struct cfg80211_bss *bss = NULL;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+#endif /* BCMDONGLEHOST */
+
+ WL_DBG((" enter\n"));
+ if (!sec) {
+ WL_ERR(("sec is NULL\n"));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ if (!wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ WL_INFORM_MEM(("[%s] Ignore event:%d. drv status"
+ " connecting:%x. connected:%d\n",
+ ndev->name, event_type, wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev)));
+ err = BCME_OK;
+ goto exit;
+ }
+
+ if (!conn_req_bssid) {
+ WL_ERR(("conn_req bssid is null\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (ETHER_ISNULLADDR(curbssid) &&
+ !ETHER_ISNULLADDR(conn_req_bssid)) {
+ WL_DBG(("copy bssid\n"));
+ memcpy(curbssid, conn_req_bssid, ETHER_ADDR_LEN);
+ }
+
+ wl_cfgscan_cancel_scan(cfg);
+ bzero(&cfg->last_roamed_addr, ETHER_ADDR_LEN);
+
+ if (completed) {
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet),
+ WL_PROF_BSSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ /*
+ * CFG layer relies on cached IEs (from probe/beacon) to fetch matching bss.
+ * For cases, there is no match available,
+ * need to update the cache based on bss info from fw.
+ */
+ wl_update_bss_info(cfg, ndev, true);
+ if (cfg->wlc_ver.wlc_ver_major < PMKDB_WLC_VER) {
+ wl_update_pmklist(ndev, cfg->pmk_list, err);
+ }
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+
+ if (wl_cfg80211_verify_bss(cfg, ndev, &bss) != true) {
+ /* If bss entry is not available in the cfg80211 bss cache
+ * the wireless stack will complain and won't populate
+ * wdev->current_bss ptr
+ */
+ WL_ERR(("BSS entry not found. Indicate assoc event failure\n"));
+ completed = false;
+ sec->auth_assoc_res_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ }
+ if (!ndev->ieee80211_ptr->ssid_len) {
+ /* In certain cases, the delayed cfg80211 work from
+ * disconnect context will induce race conditions in
+ * which the ssid_len will be cleared, but dhd is in
+ * connecting state. Return connect failure to avoid
+ * getting locked in connected state.
+ */
+ WL_ERR(("ssid_len=0. Indicate assoc event failure\n"));
+ completed = false;
+ sec->auth_assoc_res_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ }
+ }
+
+ /* Clear status after updating CONNECTED status */
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
+
+ /* update status field */
+ if (completed) {
+ status = WLAN_STATUS_SUCCESS;
+ } else if (sec->auth_assoc_res_status) {
+ status = sec->auth_assoc_res_status;
+ } else {
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ }
+
+ if (completed) {
+ WL_MSG(ndev->name, "Report connect result - connection succeeded\n");
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
+ wl_ext_iapsta_enable_master_if(ndev, TRUE);
+#endif
+ } else {
+ WL_MSG(ndev->name, "Report connect result - connection failed\n");
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+ }
+
+#ifdef WL_FILS
+ if ((sec->auth_type == NL80211_AUTHTYPE_FILS_SK_PFS) ||
+ (sec->auth_type == NL80211_AUTHTYPE_FILS_SK)) {
+ if ((err = wl_get_fils_connect_params(cfg, ndev)) != BCME_OK) {
+ WL_ERR(("FILS params fetch failed.\n"));
+ goto exit;
+ }
+
+ if (wl_fillup_resp_params(cfg, ndev, curbssid, &resp_params, status) != BCME_OK) {
+ WL_ERR(("connect resp_params failure\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ cfg80211_connect_done(ndev, &resp_params, GFP_KERNEL);
+ }
+ else
+#endif /* WL_FILS */
+ {
+ CFG80211_CONNECT_RESULT(ndev, curbssid, bss,
+ conn_info->req_ie, conn_info->req_ie_len,
+ conn_info->resp_ie, conn_info->resp_ie_len,
+ status, GFP_KERNEL);
+ }
+
+ if (completed) {
+ LOG_TS(cfg, conn_cmplt);
+ LOG_TS(cfg, authorize_start);
+ WL_INFORM_MEM(("[%s] Report connect result - "
+ "connection succeeded\n", ndev->name));
+
+#ifdef BCMWAPI_WPI
+ if (sec->cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+ /* In WAPI case, there is no seperate authorized call
+ * from upper layer. so set the state from connect done.
+ */
+ wl_set_drv_status(cfg, AUTHORIZED, ndev);
+ CLR_TS(cfg, authorize_start);
+ LOG_TS(cfg, authorize_cmplt);
+ }
+#endif /* WAPI */
+ }
+
+exit:
+ CLR_TS(cfg, conn_start);
+ return err;
+}
+
+static s32
+wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ u16 flags = ntoh16(e->flags);
+ enum nl80211_key_type key_type;
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ WL_INFORM_MEM(("[%s] mic fail event - " MACDBG " \n",
+ ndev->name, MAC2STRDBG(e->addr.octet)));
+ mutex_lock(&cfg->usr_sync);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ key_type = NL80211_KEYTYPE_GROUP;
+ else
+ key_type = NL80211_KEYTYPE_PAIRWISE;
+
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1,
+ NULL, GFP_KERNEL);
+ mutex_unlock(&cfg->usr_sync);
+
+ return 0;
+}
+
+#ifdef BT_WIFI_HANDOVER
+static s32
+wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ u32 event = ntoh32(e->event_type);
+ u32 datalen = ntoh32(e->datalen);
+ s32 err;
+
+ WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen));
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0);
+
+ return err;
+}
+#endif /* BT_WIFI_HANDOVER */
+
+#ifdef WL_CFG80211_GON_COLLISION
+static void
+wl_gon_req_collision(struct bcm_cfg80211 *cfg, wl_action_frame_t *tx_act_frm,
+ wifi_p2p_pub_act_frame_t *rx_act_frm, struct net_device *ndev,
+ struct ether_addr sa, struct ether_addr da)
+{
+ if (cfg->afx_hdl->pending_tx_act_frm == NULL)
+ return;
+
+ if (tx_act_frm &&
+ wl_cfgp2p_is_pub_action(tx_act_frm->data, tx_act_frm->len)) {
+ wifi_p2p_pub_act_frame_t *pact_frm;
+
+ pact_frm = (wifi_p2p_pub_act_frame_t *)tx_act_frm->data;
+
+ if (!(pact_frm->subtype == P2P_PAF_GON_REQ &&
+ rx_act_frm->subtype == P2P_PAF_GON_REQ)) {
+ return;
+ }
+ }
+
+ WL_ERR((" GO NEGO Request COLLISION !!! \n"));
+
+ /* if sa(peer) addr is less than da(my) addr,
+ * my device will process peer's gon request and block to send my gon req.
+ *
+ * if not (sa addr > da addr),
+ * my device will process gon request and drop gon req of peer.
+ */
+ if (memcmp(sa.octet, da.octet, ETHER_ADDR_LEN) < 0) {
+ /* block to send tx gon request */
+ cfg->block_gon_req_tx_count = BLOCK_GON_REQ_MAX_NUM;
+ WL_ERR((" block to send gon req tx !!!\n"));
+
+ /* if we are finding a common channel for sending af,
+ * do not scan more to block to send current gon req
+ */
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, ndev);
+ complete(&cfg->act_frm_scan);
+ }
+ } else {
+ /* drop gon request of peer to process gon request by my device. */
+ WL_ERR((" drop to receive gon req rx !!! \n"));
+ cfg->block_gon_req_rx_count = BLOCK_GON_REQ_MAX_NUM;
+ }
+
+ return;
+}
+#endif /* WL_CFG80211_GON_COLLISION */
+
+void
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 bsscfgidx)
+{
+ s32 err = 0;
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
+ if (cfg->afx_hdl != NULL) {
+ if (cfg->afx_hdl->dev != NULL) {
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev);
+ }
+ cfg->afx_hdl->peer_chan = WL_INVALID;
+ }
+ complete(&cfg->act_frm_scan);
+ WL_DBG(("*** Wake UP ** Working afx searching is cleared\n"));
+ } else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+ if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) ||
+ wl_get_p2p_status(cfg, ACTION_TX_NOACK)))
+ wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+
+ WL_DBG(("*** Wake UP ** abort actframe iovar on bsscfxidx %d\n", bsscfgidx));
+ /* Scan engine is not used for sending action frames in the latest driver
+ * branches. actframe_abort is used in the latest driver branches
+ * instead of scan abort.
+ * If actframe_abort iovar succeeds, don't execute scan abort.
+ * If actframe_abort fails with unsupported error,
+ * execute scan abort (for backward copmatibility).
+ */
+ if (cfg->af_sent_channel) {
+ err = wldev_iovar_setint_bsscfg(ndev, "actframe_abort", 1, bsscfgidx);
+ if (err < 0) {
+ if (err == BCME_UNSUPPORTED) {
+ mutex_lock(&cfg->scan_sync);
+ wl_cfgscan_scan_abort(cfg);
+ mutex_unlock(&cfg->scan_sync);
+ } else {
+ WL_ERR(("actframe_abort failed. ret:%d\n", err));
+ }
+ }
+ }
+ }
+#ifdef WL_CFG80211_SYNC_GON
+ else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+ WL_DBG(("*** Wake UP ** abort listen for next af frame\n"));
+ /* So abort scan to cancel listen */
+ wl_cfgscan_cancel_scan(cfg);
+ }
+#endif /* WL_CFG80211_SYNC_GON */
+}
+
+#if defined(WES_SUPPORT)
+int wl_cfg80211_set_wes_mode(struct net_device *dev, int mode)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ cfg->wes_mode = mode;
+ return 0;
+}
+
+int wl_cfg80211_get_wes_mode(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ return cfg->wes_mode;
+}
+
+bool wl_cfg80211_is_wes(void *frame, u32 frame_len)
+{
+ unsigned char *data;
+
+ if (frame == NULL) {
+ WL_ERR(("Invalid frame \n"));
+ return false;
+ }
+
+ if (frame_len < 4) {
+ WL_ERR(("Invalid frame length [%d] \n", frame_len));
+ return false;
+ }
+
+ data = frame;
+
+ if (memcmp(data, "\x7f\x00\x00\xf0", 4) == 0) {
+ WL_DBG(("Receive WES VS Action Frame \n"));
+ return true;
+ }
+
+ return false;
+}
+
+int
+wl_cfg80211_set_ncho_mode(struct net_device *dev, int mode)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ cfg->ncho_mode = mode;
+ return BCME_OK;
+}
+
+int
+wl_cfg80211_get_ncho_mode(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ return cfg->ncho_mode;
+}
+#endif /* WES_SUPPORT */
+
+int wl_cfg80211_get_ioctl_version(void)
+{
+ return ioctl_version;
+}
+
+static s32
+wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct ether_addr da;
+ struct ether_addr bssid;
+ bool isfree = false;
+ s32 err = 0;
+ s32 freq;
+ struct net_device *ndev = NULL;
+ wifi_p2p_pub_act_frame_t *act_frm = NULL;
+ wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+ wl_event_rx_frame_data_t *rxframe;
+ u32 event;
+ u8 *mgmt_frame;
+ u8 bsscfgidx;
+ u32 mgmt_frame_len;
+ chanspec_t chspec;
+#if defined(BCMDONGLEHOST) && defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST && TDLS_MSG_ONLY_WFD && WLTDLS */
+ if (ntoh32(e->datalen) < sizeof(wl_event_rx_frame_data_t)) {
+ WL_ERR(("wrong datalen:%d\n", ntoh32(e->datalen)));
+ return -EINVAL;
+ }
+ mgmt_frame_len = ntoh32(e->datalen) - (uint32)sizeof(wl_event_rx_frame_data_t);
+ event = ntoh32(e->event_type);
+ bsscfgidx = e->bsscfgidx;
+ rxframe = (wl_event_rx_frame_data_t *)data;
+ if (!rxframe) {
+ WL_ERR(("rxframe: NULL\n"));
+ return -EINVAL;
+ }
+ chspec = ntoh16(rxframe->channel);
+ bzero(&bssid, ETHER_ADDR_LEN);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ if ((ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) &&
+ (event == WLC_E_PROBREQ_MSG)) {
+ /* Probe req event comes on wlan0 interface even though
+ * the frame have been received on correct interface(AP)
+ * in firmware. Find the right interface to pass it up.
+ * Required for WPS-AP certification 4.2.13.
+ * TODO/Need a better fix. Current fix doesn't take
+ * care of dual AP/GO scenarios.
+ */
+ struct net_info *iter, *next;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_AP) {
+ ndev = iter->ndev;
+ cfgdev = ndev_to_cfgdev(ndev);
+ break;
+ }
+ }
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chspec));
+#else
+ freq = wl_channel_to_frequency(wf_chspec_ctlchan(chspec), CHSPEC_BAND(chspec));
+#endif
+ if (event == WLC_E_ACTION_FRAME_RX) {
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ if ((err = wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx,
+ NULL)) != BCME_OK) {
+ WL_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
+ goto exit;
+ }
+
+ err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (err < 0)
+ WL_ERR(("WLC_GET_BSSID error %d\n", err));
+ memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
+ err = wl_frame_get_mgmt(cfg, FC_ACTION, &da, &e->addr, &bssid,
+ &mgmt_frame, &mgmt_frame_len,
+ (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+ if (err < 0) {
+ WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n",
+ mgmt_frame_len, CHSPEC_CHANNEL(chspec), freq));
+ goto exit;
+ }
+ isfree = true;
+
+ wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN, CHSPEC_CHANNEL(chspec));
+
+ if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ act_frm = (wifi_p2p_pub_act_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ p2p_act_frm = (wifi_p2p_action_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ (void) p2p_act_frm;
+ } else if (wl_cfg80211_is_dpp_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ /* Stop waiting for next AF. */
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+#ifdef WL_SDO
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("SD offload is in progress. Don't report the"
+ "frame via rx_mgmt path\n"));
+ goto exit;
+ }
+#endif
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+ if (cfg->next_af_subtype == sd_act_frm->action) {
+ WL_DBG(("We got a right next frame of SD!(%d)\n",
+ sd_act_frm->action));
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ /* Stop waiting for next AF. */
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ }
+ }
+ (void) sd_act_frm;
+#ifdef WLTDLS
+ } else if ((mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) ||
+ (wl_cfg80211_is_tdls_tunneled_frame(
+ &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN))) {
+ if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) {
+ WL_ERR((" TDLS Action Frame Received type = %d \n",
+ mgmt_frame[DOT11_MGMT_HDR_LEN + 1]));
+ }
+#ifdef TDLS_MSG_ONLY_WFD
+#ifdef BCMDONGLEHOST
+ if (!dhdp->tdls_mode) {
+ WL_DBG((" TDLS Frame filtered \n"));
+ goto exit;
+ }
+#endif /* BCMDONGLEHOST */
+#else
+ if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
+ cfg->tdls_mgmt_frame = mgmt_frame;
+ cfg->tdls_mgmt_frame_len = mgmt_frame_len;
+ cfg->tdls_mgmt_freq = freq;
+ return 0;
+ }
+#endif /* TDLS_MSG_ONLY_WFD */
+#endif /* WLTDLS */
+#ifdef QOS_MAP_SET
+ } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_QOS) {
+ /* update QoS map set table */
+ bcm_tlv_t * qos_map_ie = NULL;
+ uint8 offset = DOT11_MGMT_HDR_LEN + DOT11_ACTION_FRMHDR_LEN;
+ if ((qos_map_ie = bcm_parse_tlvs(&mgmt_frame[offset],
+ mgmt_frame_len - offset, DOT11_MNG_QOS_MAP_ID)) != NULL) {
+ WL_DBG((" QoS map set IE found in QoS action frame\n"));
+ if (!cfg->up_table) {
+ cfg->up_table = (uint8 *)MALLOC(cfg->osh, UP_TABLE_MAX);
+ }
+ wl_set_up_table(cfg->up_table, qos_map_ie);
+ } else {
+ MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
+ }
+#endif /* QOS_MAP_SET */
+#ifdef WBTEXT
+ } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_RRM) {
+ /* radio measurement category */
+ switch (mgmt_frame[DOT11_MGMT_HDR_LEN+1]) {
+ case DOT11_RM_ACTION_NR_REP:
+ if (wl_cfg80211_recv_nbr_resp(ndev,
+ &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)
+ == BCME_OK) {
+ WL_DBG(("RCC updated by nbr response\n"));
+ }
+ break;
+ default:
+ break;
+ }
+#endif /* WBTEXT */
+ } else {
+ /* WAR : There is no way to identify DA of action frame as of now.
+ * We have to modify firmware code to include DA and SA of Act frame
+ * as event data
+ */
+ /*
+ * if we got normal action frame and ndev is p2p0,
+ * we have to change ndev from p2p0 to wlan0
+ */
+#if defined(WES_SUPPORT)
+ if (wl_cfg80211_is_wes(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN) && cfg->wes_mode == 0) {
+ /* Ignore WES VS Action frame */
+ goto exit;
+ }
+#endif /* WES_SUPPORT */
+
+ /* We need to check proper action frame is received */
+ if (cfg->next_af_subtype != WL_PUB_AF_STYPE_INVALID) {
+ u8 action = 0;
+ if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) {
+ WL_DBG(("Recived action is not public action frame\n"));
+ } else if (cfg->next_af_subtype == action) {
+ WL_DBG(("Recived action is the waiting action(%d)\n",
+ action));
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ /* Stop waiting for next AF. */
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ }
+ }
+ }
+
+ if (act_frm) {
+#ifdef WL_CFG80211_GON_COLLISION
+ if (act_frm->subtype == P2P_PAF_GON_REQ) {
+ wl_gon_req_collision(cfg,
+ &cfg->afx_hdl->pending_tx_act_frm->action_frame,
+ act_frm, ndev, e->addr, da);
+
+ if (cfg->block_gon_req_rx_count) {
+ WL_ERR(("drop frame GON Req Rx : count (%d)\n",
+ cfg->block_gon_req_rx_count));
+ cfg->block_gon_req_rx_count--;
+ goto exit;
+ }
+ } else if (act_frm->subtype == P2P_PAF_GON_CONF) {
+ /* if go formation done, clear it */
+ cfg->block_gon_req_tx_count = 0;
+ cfg->block_gon_req_rx_count = 0;
+ }
+#endif /* WL_CFG80211_GON_COLLISION */
+
+ if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+ if (cfg->next_af_subtype == act_frm->subtype) {
+ WL_DBG(("We got a right next frame!(%d)\n",
+ act_frm->subtype));
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
+ OSL_SLEEP(20);
+ }
+
+ /* Stop waiting for next AF. */
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ } else if ((cfg->next_af_subtype == P2P_PAF_GON_RSP) &&
+ (act_frm->subtype == P2P_PAF_GON_REQ)) {
+ /* If current received frame is GO NEG REQ and next
+ * expected frame is GO NEG RESP, do not send it up.
+ */
+ WL_ERR(("GO Neg req received while waiting for RESP."
+ "Discard incoming frame\n"));
+ goto exit;
+ }
+ }
+ }
+
+ if (wl_cfg80211_is_dpp_frame(
+ (void *)&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ wl_dpp_pa_frame_t *pa =
+ (wl_dpp_pa_frame_t *)&mgmt_frame[DOT11_MGMT_HDR_LEN];
+ if (cfg->next_af_subtype == pa->ftype) {
+ WL_DBG(("matching dpp frm (%d) found. abort dwell\n", pa->ftype));
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ /* Stop waiting for next AF. */
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ }
+ }
+ if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) {
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ }
+ } else if (event == WLC_E_PROBREQ_MSG) {
+
+ /* Handle probe reqs frame
+ * WPS-AP certification 4.2.13
+ */
+ struct parsed_ies prbreq_ies;
+ u32 prbreq_ie_len = 0;
+ bool pbc = 0;
+
+ WL_DBG((" Event WLC_E_PROBREQ_MSG received\n"));
+ mgmt_frame = (u8 *)(data);
+ mgmt_frame_len = ntoh32(e->datalen);
+ if (mgmt_frame_len < DOT11_MGMT_HDR_LEN) {
+ WL_ERR(("wrong datalen:%d\n", mgmt_frame_len));
+ return -EINVAL;
+ }
+ prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN;
+
+ /* Parse prob_req IEs */
+ if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ prbreq_ie_len, &prbreq_ies) < 0) {
+ WL_ERR(("Prob req get IEs failed\n"));
+ return 0;
+ }
+
+ if (prbreq_ies.wps_ie != NULL) {
+ wl_validate_wps_ie(
+ (const char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+ WL_DBG((" wps_ie exist pbc = %d\n", pbc));
+ /* if pbc method, send prob_req mgmt frame to upper layer */
+ if (!pbc)
+ return 0;
+ } else
+ return 0;
+ } else {
+ mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+
+ /* wpa supplicant use probe request event for restarting another GON Req.
+ * but it makes GON Req repetition.
+ * so if src addr of prb req is same as my target device,
+ * do not send probe request event during sending action frame.
+ */
+ if (event == WLC_E_P2P_PROBREQ_MSG) {
+ WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ?
+ "WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG"));
+
+#ifdef WL_CFG80211_USE_PRB_REQ_FOR_AF_TX
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) &&
+ !memcmp(cfg->afx_hdl->tx_dst_addr.octet, e->addr.octet,
+ ETHER_ADDR_LEN)) {
+ if (cfg->afx_hdl->pending_tx_act_frm &&
+ wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ chanspec_t channel = hton16(rxframe->channel);
+ WL_DBG(("PROBE REQUEST : Peer found, channel : %d\n",
+ channel));
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ }
+ }
+#endif /* WL_CFG80211_USE_PRB_REQ_FOR_AF_TX */
+
+ /* Filter any P2P probe reqs arriving during the
+ * GO-NEG Phase
+ */
+ if (cfg->p2p &&
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti &&
+#endif
+ wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
+ WL_DBG(("Filtering P2P probe_req while "
+ "being in GO-Neg state\n"));
+ return 0;
+ }
+ }
+ }
+
+ if (discover_cfgdev(cfgdev, cfg))
+ WL_DBG(("Rx Managment frame For P2P Discovery Interface \n"));
+ else
+ WL_DBG(("Rx Managment frame For Iface (%s) \n", ndev->name));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 18, 0) */
+
+ WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
+ mgmt_frame_len, ntoh32(e->datalen), CHSPEC_CHANNEL(chspec), freq));
+exit:
+ if (isfree) {
+ MFREE(cfg->osh, mgmt_frame, mgmt_frame_len);
+ }
+ return err;
+}
+
+#ifdef CUSTOMER_HW6
+static s32
+wl_cfg80211_ccode_evt_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data)
+{
+ s32 err = 0;
+ /* Indicate to upper layer for regdom change */
+ WL_INFORM_MEM(("Received country code change event\n"));
+ err = wl_update_wiphybands(cfg, true);
+
+ return err;
+}
+#endif /* CUSTOMER_HW6 */
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+ WL_DBG(("Enter \n"));
+ conf->frag_threshold = (u32)-1;
+ conf->rts_threshold = (u32)-1;
+ conf->retry_short = (u32)-1;
+ conf->retry_long = (u32)-1;
+ conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+ if (!profile) {
+ WL_ERR(("profile null\n"));
+ return;
+ }
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ bzero(profile, sizeof(struct wl_profile));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
+{
+ bzero(cfg->evt_handler, sizeof(cfg->evt_handler));
+
+ cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+ cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+ cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+ cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+ cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_AUTH_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC_RESP_IE] = wl_notify_connect_status;
+#ifdef PNO_SUPPORT
+ cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+ cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_SSID_EXT] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_GAS_FRAGMENT_RX] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_ROAM_EXP_EVENT] = wl_handle_roam_exp_event;
+#endif /* GSCAN_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+ cfg->evt_handler[WLC_E_RSSI_LQM] = wl_handle_rssi_monitor_event;
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef WL_SDO
+ cfg->evt_handler[WLC_E_SERVICE_FOUND] = wl_svc_resp_handler;
+ cfg->evt_handler[WLC_E_P2PO_ADD_DEVICE] = wl_notify_device_discovery;
+ cfg->evt_handler[WLC_E_P2PO_DEL_DEVICE] = wl_notify_device_discovery;
+#endif
+#ifdef WLTDLS
+ cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
+#endif /* WLTDLS */
+ cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
+#ifdef WLAIBSS
+ cfg->evt_handler[WLC_E_AIBSS_TXFAIL] = wl_notify_aibss_txfail;
+#endif /* WLAIBSS */
+#ifdef WL_RELMCAST
+ cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status;
+#endif /* WL_RELMCAST */
+#ifdef BT_WIFI_HANDOVER
+ cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
+#endif
+#ifdef WL_NAN
+ cfg->evt_handler[WLC_E_NAN_CRITICAL] = wl_cfgnan_notify_nan_status;
+ cfg->evt_handler[WLC_E_NAN_NON_CRITICAL] = wl_cfgnan_notify_nan_status;
+#endif /* WL_NAN */
+ cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind;
+ cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind;
+#ifdef CUSTOM_EVENT_PM_WAKE
+ cfg->evt_handler[WLC_E_EXCESS_PM_WAKE_EVENT] = wl_check_pmstatus;
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#if defined(DHD_LOSSLESS_ROAMING) || defined (DBG_PKT_MON)
+ cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status;
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
+ cfg->evt_handler[WLC_E_ROAM_START] = wl_notify_roam_start_status;
+#ifdef WL_BAM
+ cfg->evt_handler[WLC_E_ADPS] = wl_adps_event_handler;
+#endif /* WL_BAM */
+ cfg->evt_handler[WLC_E_PSK_SUP] = wl_cfg80211_sup_event_handler;
+#ifdef CUSTOMER_HW6
+ cfg->evt_handler[WLC_E_COUNTRY_CODE_CHANGED] = wl_cfg80211_ccode_evt_handler;
+#endif /* CUSTOMER_HW6 */
+#ifdef WL_BCNRECV
+ cfg->evt_handler[WLC_E_BCNRECV_ABORTED] = wl_bcnrecv_aborted_event_handler;
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+ cfg->evt_handler[WLC_E_MBO] = wl_mbo_event_handler;
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
+ cfg->evt_handler[WLC_E_ADDTS_IND] = wl_cfg80211_cac_event_handler;
+ cfg->evt_handler[WLC_E_DELTS_IND] = wl_cfg80211_cac_event_handler;
+#endif /* WL_CAC_TS */
+#if defined(WL_MBO) || defined(WL_OCE)
+ cfg->evt_handler[WLC_E_PRUNE] = wl_bssid_prune_event_handler;
+#endif /* WL_MBO || WL_OCE */
+#ifdef RTT_SUPPORT
+ cfg->evt_handler[WLC_E_PROXD] = wl_cfg80211_rtt_event_handler;
+#endif
+#ifdef WL_CHAN_UTIL
+ cfg->evt_handler[WLC_E_BSS_LOAD] = wl_cfg80211_bssload_report_event_handler;
+#endif /* WL_CHAN_UTIL */
+#ifdef WL_TWT
+ cfg->evt_handler[WLC_E_TWT_SETUP] = wl_notify_twt_event;
+ cfg->evt_handler[WLC_E_TWT_TEARDOWN] = wl_notify_twt_event;
+ cfg->evt_handler[WLC_E_TWT_INFO_FRM] = wl_notify_twt_event;
+#endif /* WL_TWT */
+#ifdef WL_CLIENT_SAE
+ cfg->evt_handler[WLC_E_JOIN_START] = wl_notify_start_auth;
+#endif /* WL_CLIENT_SAE */
+}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+static int
+wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ cfg->escan_info.escan_buf[0] = DHD_OS_PREALLOC(cfg->pub,
+ DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
+ if (cfg->escan_info.escan_buf[0] == NULL) {
+ WL_ERR(("Failed to alloc ESCAN_BUF0\n"));
+ return -ENOMEM;
+ }
+
+ cfg->escan_info.escan_buf[1] = DHD_OS_PREALLOC(cfg->pub,
+ DHD_PREALLOC_WIPHY_ESCAN1, ESCAN_BUF_SIZE);
+ if (cfg->escan_info.escan_buf[1] == NULL) {
+ WL_ERR(("Failed to alloc ESCAN_BUF1\n"));
+ return -ENOMEM;
+ }
+
+ bzero(cfg->escan_info.escan_buf[0], ESCAN_BUF_SIZE);
+ bzero(cfg->escan_info.escan_buf[1], ESCAN_BUF_SIZE);
+ cfg->escan_info.escan_type[0] = 0;
+ cfg->escan_info.escan_type[1] = 0;
+#else
+ cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub,
+ DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
+ if (cfg->escan_info.escan_buf == NULL) {
+ WL_ERR(("Failed to alloc ESCAN_BUF\n"));
+ return -ENOMEM;
+ }
+ bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+
+ return 0;
+}
+
+static void
+wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ if (cfg->escan_info.escan_buf[0] != NULL) {
+ cfg->escan_info.escan_buf[0] = NULL;
+ cfg->escan_info.escan_type[0] = 0;
+ }
+
+ if (cfg->escan_info.escan_buf[1] != NULL) {
+ cfg->escan_info.escan_buf[1] = NULL;
+ cfg->escan_info.escan_type[1] = 0;
+ }
+#else
+ if (cfg->escan_info.escan_buf != NULL) {
+ cfg->escan_info.escan_buf = NULL;
+ }
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+}
+#endif /* STATIC_WL_PRIV_STRUCT */
+
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
+{
+ WL_DBG(("Enter \n"));
+
+ cfg->scan_results = (wl_scan_results_t *)MALLOCZ(cfg->osh,
+ WL_SCAN_BUF_MAX);
+ if (unlikely(!cfg->scan_results)) {
+ WL_ERR(("Scan results alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->conf = (struct wl_conf *)MALLOCZ(cfg->osh, sizeof(*cfg->conf));
+ if (unlikely(!cfg->conf)) {
+ WL_ERR(("wl_conf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->scan_req_int = (void *)MALLOCZ(cfg->osh,
+ sizeof(*cfg->scan_req_int));
+ if (unlikely(!cfg->scan_req_int)) {
+ WL_ERR(("Scan req alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->ioctl_buf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (unlikely(!cfg->ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->escan_ioctl_buf = (void *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (unlikely(!cfg->escan_ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->extra_buf = (void *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
+ if (unlikely(!cfg->extra_buf)) {
+ WL_ERR(("Extra buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->pmk_list = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->pmk_list));
+ if (unlikely(!cfg->pmk_list)) {
+ WL_ERR(("pmk list alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+#if defined(STATIC_WL_PRIV_STRUCT)
+ cfg->conn_info = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->conn_info));
+ if (unlikely(!cfg->conn_info)) {
+ WL_ERR(("cfg->conn_info alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->ie = (void *)MALLOC(cfg->osh, sizeof(*cfg->ie));
+ if (unlikely(!cfg->ie)) {
+ WL_ERR(("cfg->ie alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ if (unlikely(wl_init_escan_result_buf(cfg))) {
+ WL_ERR(("Failed to init escan resul buf\n"));
+ goto init_priv_mem_out;
+ }
+#endif /* STATIC_WL_PRIV_STRUCT */
+ cfg->afx_hdl = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->afx_hdl));
+ if (unlikely(!cfg->afx_hdl)) {
+ WL_ERR(("afx hdl alloc failed\n"));
+ goto init_priv_mem_out;
+ } else {
+ init_completion(&cfg->act_frm_scan);
+ init_completion(&cfg->wait_next_af);
+
+ INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
+ }
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_frame_len = 0;
+ }
+#endif /* WLTDLS */
+ cfg->spmk_info_list = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->spmk_info_list));
+ if (unlikely(!cfg->spmk_info_list)) {
+ WL_ERR(("Single PMK info list allocation falure\n"));
+ goto init_priv_mem_out;
+ }
+
+ return 0;
+
+init_priv_mem_out:
+ wl_deinit_priv_mem(cfg);
+
+ return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
+{
+ MFREE(cfg->osh, cfg->scan_results, WL_SCAN_BUF_MAX);
+ MFREE(cfg->osh, cfg->conf, sizeof(*cfg->conf));
+ MFREE(cfg->osh, cfg->scan_req_int, sizeof(*cfg->scan_req_int));
+ MFREE(cfg->osh, cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+ MFREE(cfg->osh, cfg->escan_ioctl_buf, WLC_IOCTL_MAXLEN);
+ MFREE(cfg->osh, cfg->extra_buf, WL_EXTRA_BUF_MAX);
+ MFREE(cfg->osh, cfg->pmk_list, sizeof(*cfg->pmk_list));
+#if defined(STATIC_WL_PRIV_STRUCT)
+ MFREE(cfg->osh, cfg->conn_info, sizeof(*cfg->conn_info));
+ MFREE(cfg->osh, cfg->ie, sizeof(*cfg->ie));
+ wl_deinit_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+ if (cfg->afx_hdl) {
+#if defined(BCMDONGLEHOST)
+ cancel_work_sync(&cfg->afx_hdl->work);
+#endif
+ MFREE(cfg->osh, cfg->afx_hdl, sizeof(*cfg->afx_hdl));
+ }
+ MFREE(cfg->osh, cfg->spmk_info_list, sizeof(*cfg->spmk_info_list));
+
+}
+
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
+{
+ int ret = 0;
+ WL_DBG(("Enter \n"));
+
+ /* making separate work queue needs GPL license,
+ * but some drivers are not in GPL license, so, making seperate queue Android only
+ */
+
+#ifdef OEM_ANDROID
+ /* Allocate workqueue for event */
+ if (!cfg->event_workq) {
+ cfg->event_workq = alloc_workqueue("dhd_eventd",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 1);
+ }
+
+ if (!cfg->event_workq) {
+ WL_ERR(("event_workq alloc_workqueue failed\n"));
+ ret = -ENOMEM;
+ } else {
+ INIT_WORK(&cfg->event_work, wl_event_handler);
+ }
+#endif /* OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+ INIT_WORK(&cfg->event_work, wl_event_handler);
+ cfg->event_workq_init = true;
+#endif /* OEM_ANDROID */
+ return ret;
+}
+
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg)
+{
+
+#ifdef OEM_ANDROID
+ if (cfg && cfg->event_workq) {
+ cancel_work_sync(&cfg->event_work);
+ destroy_workqueue(cfg->event_workq);
+ cfg->event_workq = NULL;
+ }
+#endif /* OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+ if (cfg && cfg->event_workq_init) {
+#ifdef BCMDONGLEHOST
+ cancel_work_sync(&cfg->event_work);
+#endif /* BCMDONGLEHOST */
+ cfg->event_workq_init = false;
+ }
+#endif /* OEM_ANDROID */
+}
+
+void wl_terminate_event_handler(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (cfg) {
+ wl_destroy_event_handler(cfg);
+ wl_flush_eq(cfg);
+ }
+}
+
+#ifdef DHD_LOSSLESS_ROAMING
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ /* restore prec_map to ALLPRIO */
+ dhdp->dequeue_prec_map = ALLPRIO;
+ del_timer_sync(&cfg->roam_timeout);
+}
+
+static void wl_roam_timeout(unsigned long data)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_ERR(("roam timer expired\n"));
+
+ /* restore prec_map to ALLPRIO */
+ dhdp->dequeue_prec_map = ALLPRIO;
+}
+
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+#define CP_CHAN_INFO_RAT_MODE_LTE 3
+#define CP_CHAN_INFO_RAT_MODE_NR5G 7
+int g_mhs_chan_for_cpcoex = 0;
+
+struct __packed cam_cp_noti_info {
+ u8 rat;
+ u32 band;
+ u32 channel;
+};
+
+int
+wl_cfg80211_send_msg_to_ril()
+{
+ int id, buf = 1;
+
+ id = IPC_SYSTEM_CP_CHANNEL_INFO;
+ dev_ril_bridge_send_msg(id, sizeof(int), &buf);
+ WL_ERR(("[BeyondX] send message to ril.\n"));
+
+ OSL_SLEEP(500);
+ return 0;
+}
+
+int
+wl_cfg80211_ril_bridge_notifier_call(struct notifier_block *nb,
+ unsigned long size, void *buf)
+{
+ struct dev_ril_bridge_msg *msg;
+ struct cam_cp_noti_info *cp_noti_info;
+ static int mhs_channel_for_4g, mhs_channel_for_5g;
+ static int recv_msg_4g, recv_msg_5g;
+
+ WL_ERR(("[BeyondX] receive message from ril.\n"));
+ msg = (struct dev_ril_bridge_msg *)buf;
+
+ if (msg->dev_id == IPC_SYSTEM_CP_CHANNEL_INFO &&
+ msg->data_len <= sizeof(struct cam_cp_noti_info)) {
+ u8 rat;
+ u32 band;
+ u32 channel;
+
+ cp_noti_info = (struct cam_cp_noti_info *)msg->data;
+ rat = cp_noti_info->rat;
+ band = cp_noti_info->band;
+ channel = cp_noti_info->channel;
+
+ /* LTE/5G Band/Freq information => Mobile Hotspot channel mapping.
+ * LTE/B40: 38650~39649 => Ch.11
+ * LTE/B41: 39650~41589 => Ch.1
+ * 5G/N41: 499200~537999 => Ch.1
+ */
+ if (rat == CP_CHAN_INFO_RAT_MODE_LTE) {
+ recv_msg_4g = 1;
+ if (channel >= 38650 && channel <= 39649) {
+ mhs_channel_for_4g = 11;
+ } else if (channel >= 39650 && channel <= 41589) {
+ mhs_channel_for_4g = 1;
+ }
+ }
+ if (rat == CP_CHAN_INFO_RAT_MODE_NR5G) {
+ recv_msg_5g = 1;
+ if (channel >= 499200 && channel <= 537999) {
+ mhs_channel_for_5g = 1;
+ }
+ }
+
+ WL_DBG(("[BeyondX] rat: %u, band: %u, channel: %u, mhs_channel_for_4g: %u, "
+ "mhs_channel_for_5g: %u\n", rat, band, channel,
+ mhs_channel_for_4g, mhs_channel_for_5g));
+
+ if (recv_msg_4g && recv_msg_5g) {
+ if (mhs_channel_for_4g && mhs_channel_for_5g) {
+ /* if 4G/B40 + 5G/N41, select channel 6 for MHS */
+ if (mhs_channel_for_4g == 11 && mhs_channel_for_5g == 1) {
+ g_mhs_chan_for_cpcoex = 6;
+ /* if 4G(except for B40) + 5G/N41, select channel 1 for MHS */
+ } else {
+ g_mhs_chan_for_cpcoex = 1;
+ }
+ } else {
+ g_mhs_chan_for_cpcoex = mhs_channel_for_4g ? mhs_channel_for_4g :
+ mhs_channel_for_5g ? mhs_channel_for_5g : 0;
+ }
+ mhs_channel_for_4g = mhs_channel_for_5g = 0;
+ recv_msg_4g = recv_msg_5g = 0;
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block wl_cfg80211_ril_bridge_notifier = {
+ .notifier_call = wl_cfg80211_ril_bridge_notifier_call,
+};
+
+static bool wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+ unsigned long state, void *ptr)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+ struct net_device *dev = ptr;
+#else
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+#endif /* LINUX_VERSION < VERSION(3, 11, 0) */
+ struct wireless_dev *wdev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+
+ WL_DBG(("Enter state:%lu ndev%p \n", state, dev));
+ if (!dev) {
+ WL_ERR(("dev null\n"));
+ return NOTIFY_DONE;
+ }
+
+ wdev = ndev_to_wdev(dev);
+ if (!wdev) {
+ WL_ERR(("wdev null. Do nothing\n"));
+ return NOTIFY_DONE;
+ }
+
+ cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+ if (!cfg || (cfg != wl_cfg80211_get_bcmcfg())) {
+ /* If cfg80211 priv is null or doesn't match return */
+ WL_ERR(("wrong cfg ptr (%p)\n", cfg));
+ return NOTIFY_DONE;
+ }
+
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ /* Nothing to be done for primary I/F */
+ return NOTIFY_DONE;
+ }
+
+ switch (state) {
+ case NETDEV_DOWN:
+ {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+ int max_wait_timeout = 2;
+ int max_wait_count = 100;
+ int refcnt = 0;
+ unsigned long limit = jiffies + max_wait_timeout * HZ;
+ while (work_pending(&wdev->cleanup_work)) {
+ if (refcnt%5 == 0) {
+ WL_ERR(("[NETDEV_DOWN] wait for "
+ "complete of cleanup_work"
+ " (%d th)\n", refcnt));
+ }
+ if (!time_before(jiffies, limit)) {
+ WL_ERR(("[NETDEV_DOWN] cleanup_work"
+ " of CFG80211 is not"
+ " completed in %d sec\n",
+ max_wait_timeout));
+ break;
+ }
+ if (refcnt >= max_wait_count) {
+ WL_ERR(("[NETDEV_DOWN] cleanup_work"
+ " of CFG80211 is not"
+ " completed in %d loop\n",
+ max_wait_count));
+ break;
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ (void)schedule_timeout(100);
+ set_current_state(TASK_RUNNING);
+ refcnt++;
+ }
+#endif /* LINUX_VERSION < VERSION(3, 14, 0) */
+ break;
+ }
+ case NETDEV_UNREGISTER:
+ wl_cfg80211_clear_per_bss_ies(cfg, wdev);
+ /* after calling list_del_rcu(&wdev->list) */
+ wl_dealloc_netinfo_by_wdev(cfg, wdev);
+ break;
+ case NETDEV_GOING_DOWN:
+ /*
+ * At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+ * In front of door, the function checks whether current scan
+ * is working or not. If the scanning is still working,
+ * wdev_cleanup_work call WARN_ON and make the scan done forcibly.
+ */
+ if (wl_get_drv_status(cfg, SCANNING, dev))
+ wl_cfgscan_cancel_scan(cfg);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+ .notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+
+/*
+ * to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool wl_cfg80211_netdev_notifier_registered = FALSE;
+
+void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+{
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ bool p2p_connected = wl_cfgp2p_vif_created(cfg);
+#ifdef WL_NAN
+ bool nan_connected = wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg));
+#endif /* WL_NAN */
+ struct net_info *iter, *next;
+
+ if (!(cfg->roam_flags & WL_ROAM_OFF_ON_CONCURRENT))
+ return;
+
+ WL_DBG(("roam off:%d p2p_connected:%d connected_cnt:%d \n",
+ enable, p2p_connected, connected_cnt));
+ /* Disable FW roam when we have a concurrent P2P connection */
+ if (enable &&
+ ((p2p_connected && connected_cnt > 1) ||
+#ifdef WL_NAN
+ nan_connected ||
+#endif /* WL_NAN */
+ FALSE)) {
+
+ /* Mark it as to be reverted */
+ cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
+ == BCME_OK) {
+ iter->roam_off = TRUE;
+ }
+ else {
+ WL_ERR(("error to enable roam_off\n"));
+ }
+ }
+ }
+ }
+ else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
+ cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (iter->roam_off != WL_INVALID) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
+ == BCME_OK) {
+ iter->roam_off = FALSE;
+ }
+ else {
+ WL_ERR(("error to disable roam_off\n"));
+ }
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+#ifdef WLEASYMESH
+ struct net_device *primary_dev;
+ dhd_pub_t *dhd = cfg->pub;
+#endif /* WLEASYMESH */
+ u32 ctl_chan = 0;
+ u32 chanspec = 0;
+ u32 pre_ctl_chan = 0;
+ u32 band = 0;
+ u32 pre_band = 0;
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ cfg->vsdb_mode = false;
+
+ if (connected_cnt <= 1) {
+ return;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ chanspec = 0;
+ ctl_chan = 0;
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ band = CHSPEC_BAND(chanspec);
+ wl_update_prof(cfg, iter->ndev, NULL,
+ &chanspec, WL_PROF_CHAN);
+ }
+ if (!cfg->vsdb_mode) {
+ if (!pre_ctl_chan && ctl_chan) {
+ pre_ctl_chan = ctl_chan;
+ pre_band = band;
+ } else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan) &&
+ (band == pre_band)) {
+ cfg->vsdb_mode = true;
+ }
+ }
+ }
+ }
+ }
+#ifdef WLEASYMESH
+ if(dhd->conf->fw_type == FW_TYPE_EZMESH && cfg->vsdb_mode) {
+ primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ WL_MSG("wlan", "check primary chanspec\n");
+ if (wldev_iovar_getint(primary_dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
+ //chanspec = wl_chspec_driver_to_host(chanspec);
+ WL_MSG("wlan", "set primary chanspec to 0x%d\n", chanspec);
+ wldev_iovar_setint(primary_dev, "chanspec", chanspec);
+ }
+ cfg->vsdb_mode = false;
+ }
+#endif /* WLEASYMESH*/
+ WL_MSG("wlan", "%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
+ return;
+}
+
+int
+wl_cfg80211_determine_p2p_rsdb_scc_mode(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+ u32 chanspec = 0;
+ u32 pre_chanspec = 0;
+ u32 band = 0;
+ u32 pre_band = INVCHANSPEC;
+ bool is_rsdb_supported = FALSE;
+ bool rsdb_or_scc_mode = FALSE;
+
+#ifdef BCMDONGLEHOST
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+#endif /* BCMDONGLEHOST */
+
+ if (!is_rsdb_supported) {
+ return 0;
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ chanspec = 0;
+ band = 0;
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ band = CHSPEC_BAND(chanspec);
+ }
+
+ if (pre_band == INVCHANSPEC && chanspec) {
+ pre_band = band;
+ pre_chanspec = chanspec;
+ } else {
+ if ((pre_band == band) && (pre_chanspec != chanspec)) {
+ /* VSDB case */
+ rsdb_or_scc_mode = FALSE;
+ } else {
+ /* RSDB/SCC case */
+ rsdb_or_scc_mode = TRUE;
+ }
+ }
+ }
+ }
+ }
+ WL_DBG(("RSDB or SCC mode is %s\n", rsdb_or_scc_mode ? "enabled" : "disabled"));
+
+ return rsdb_or_scc_mode;
+}
+
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+ enum wl_status state, bool set)
+{
+ s32 pm = PM_FAST;
+ s32 err = BCME_OK;
+ u32 mode;
+ chanspec_t chspec = 0;
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = cfg->pub;
+#endif /* BCMDONGLEHOST */
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status;
+#endif /* RTT_SUPPORT */
+#ifdef DISABLE_FRAMEBURST_VSDB
+ bool rsdb_scc_flag = FALSE;
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#ifdef BCMDONGLEHOST
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR(("busstate is DHD_BUS_DOWN!\n"));
+ return 0;
+ }
+#endif /* BCMDONGLEHOST */
+ WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+ state, set, _net_info->pm_restore, _net_info->ndev->name));
+
+ if (state != WL_STATUS_CONNECTED)
+ return 0;
+ mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+ if (set) {
+ wl_cfg80211_concurrent_roam(cfg, 1);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (mode == WL_MODE_AP) {
+ if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+ WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+ }
+ pm = PM_OFF;
+ if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
+
+ wl_cfg80211_update_power_mode(_net_info->ndev);
+ }
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
+#if defined(WLTDLS)
+ if (wl_cfg80211_is_concurrent_mode(primary_dev)) {
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
+ }
+#endif /* defined(WLTDLS) */
+
+#ifdef BCMDONGLEHOST
+#ifdef DISABLE_FRAMEBURST_VSDB
+ if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE) &&
+ wl_cfg80211_is_concurrent_mode(primary_dev)) {
+ rsdb_scc_flag = wl_cfg80211_determine_p2p_rsdb_scc_mode(cfg);
+ wl_cfg80211_set_frameburst(cfg, rsdb_scc_flag);
+ }
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
+ /* Enable frameburst for
+ * STA/SoftAP concurrent mode
+ */
+ wl_cfg80211_set_frameburst(cfg, TRUE);
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+#endif /* BCMDONGLEHOST */
+ } else { /* clear */
+ chspec = INVCHANSPEC;
+ /* clear chan information when the net device is disconnected */
+ wl_update_prof(cfg, _net_info->ndev, NULL, &chspec, WL_PROF_CHAN);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (primary_dev == _net_info->ndev) {
+ pm = PM_FAST;
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status->status != RTT_ENABLED) {
+#endif /* RTT_SUPPORT */
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
+
+ wl_cfg80211_update_power_mode(_net_info->ndev);
+ }
+#ifdef RTT_SUPPORT
+ }
+#endif /* RTT_SUPPORT */
+ }
+ wl_cfg80211_concurrent_roam(cfg, 0);
+#if defined(WLTDLS)
+ if (!wl_cfg80211_is_concurrent_mode(primary_dev)) {
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
+ }
+#endif /* defined(WLTDLS) */
+
+#ifdef BCMDONGLEHOST
+#if defined(DISABLE_FRAMEBURST_VSDB)
+ if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE)) {
+ wl_cfg80211_set_frameburst(cfg, TRUE);
+ }
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ CHSPEC_IS2G(cfg->ap_oper_channel)) {
+ /* Disable frameburst for stand-alone 2GHz SoftAP */
+ wl_cfg80211_set_frameburst(cfg, FALSE);
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+#endif /* BCMDONGLEHOST */
+ }
+ return err;
+}
+
+#ifdef DHD_LOSSLESS_ROAMING
+static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+ int err = 0;
+
+ /* Init roam timer */
+ init_timer_compat(&cfg->roam_timeout, wl_roam_timeout, cfg);
+
+ return err;
+}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#ifdef CONFIG_SLEEP_MONITOR
+extern long long temp_raw;
+
+int wlan_get_sleep_monitor64_cb(void *priv, long long *raw_val,
+ int check_level, int caller_type)
+{
+ struct bcm_cfg80211 *cfg = priv;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int state = DEVICE_UNKNOWN;
+
+ if (!dhdp->up)
+ state = DEVICE_POWER_OFF;
+ else {
+ state = DEVICE_ON_ACTIVE1;
+ if (wl_get_drv_status_all(cfg, CONNECTED))
+ state = DEVICE_ON_ACTIVE2;
+
+ if (caller_type == SLEEP_MONITOR_CALL_SUSPEND) {
+ *raw_val = temp_raw;
+ temp_raw = 0;
+ }
+ }
+
+ return state;
+}
+
+static struct sleep_monitor_ops wlan_sleep_monitor_ops = {
+ .read64_cb_func = wlan_get_sleep_monitor64_cb,
+};
+#endif /* CONFIG_SLEEP_MONITOR */
+
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
+{
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
+
+ cfg->scan_request = NULL;
+ cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+#ifdef DISABLE_BUILTIN_ROAM
+ cfg->roam_on = false;
+#else
+ cfg->roam_on = true;
+#endif /* DISABLE_BUILTIN_ROAM */
+ cfg->active_scan = true;
+ cfg->rf_blocked = false;
+ cfg->vsdb_mode = false;
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ cfg->wlfc_on = false;
+#endif /* BCMSDIO || BCMDBUS */
+ cfg->roam_flags |= WL_ROAM_OFF_ON_CONCURRENT;
+ cfg->disable_roam_event = false;
+ /* register interested state */
+ set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+ spin_lock_init(&cfg->cfgdrv_lock);
+ mutex_init(&cfg->ioctl_buf_sync);
+ init_waitqueue_head(&cfg->netif_change_event);
+ init_completion(&cfg->send_af_done);
+ init_completion(&cfg->iface_disable);
+ mutex_init(&cfg->usr_sync);
+ mutex_init(&cfg->event_sync);
+ mutex_init(&cfg->if_sync);
+ mutex_init(&cfg->scan_sync);
+ mutex_init(&cfg->connect_sync);
+ mutex_init(&cfg->pm_sync);
+#ifdef WLTDLS
+ mutex_init(&cfg->tdls_sync);
+#endif /* WLTDLS */
+#ifdef WL_BCNRECV
+ mutex_init(&cfg->bcn_sync);
+#endif /* WL_BCNRECV */
+#ifdef WL_WPS_SYNC
+ wl_init_wps_reauth_sm(cfg);
+#endif /* WL_WPS_SYNC */
+ wl_init_eq(cfg);
+ err = wl_init_priv_mem(cfg);
+ if (err)
+ return err;
+ if (wl_create_event_handler(cfg))
+ return -ENOMEM;
+ wl_init_event_handler(cfg);
+ err = wl_init_scan(cfg);
+ if (err)
+ return err;
+#ifdef DHD_LOSSLESS_ROAMING
+ err = wl_init_roam_timeout(cfg);
+ if (err) {
+ return err;
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+ wl_init_conf(cfg->conf);
+ wl_init_prof(cfg, ndev);
+ wl_link_down(cfg);
+ DNGL_FUNC(dhd_cfg80211_init, (cfg));
+ cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
+ cfg->pmk_list->pmkids.count = 0;
+ cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
+
+#ifdef CONFIG_SLEEP_MONITOR
+ sleep_monitor_register_ops(cfg, &wlan_sleep_monitor_ops,
+ SLEEP_MONITOR_WIFI);
+#endif /* CONFIG_SLEEP_MONITOR */
+ return err;
+}
+
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+ DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+ wl_destroy_event_handler(cfg);
+ wl_flush_eq(cfg);
+ wl_link_down(cfg);
+ del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ del_timer_sync(&cfg->roam_timeout);
+#endif
+ wl_deinit_priv_mem(cfg);
+ if (wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ }
+
+#ifdef CONFIG_SLEEP_MONITOR
+ sleep_monitor_unregister_ops(SLEEP_MONITOR_WIFI);
+#endif /* CONFIG_SLEEP_MONITOR */
+}
+
+#if defined(WL_ENABLE_P2P_IF) || defined (WL_NEWCFG_PRIVCMD_SUPPORT)
+static s32 wl_cfg80211_attach_p2p(struct bcm_cfg80211 *cfg)
+{
+ WL_TRACE(("Enter \n"));
+
+ if (wl_cfgp2p_register_ndev(cfg) < 0) {
+ WL_ERR(("P2P attach failed. \n"));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static s32 wl_cfg80211_detach_p2p(struct bcm_cfg80211 *cfg)
+{
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ struct wireless_dev *wdev;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+ WL_DBG(("Enter \n"));
+ if (!cfg) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ else {
+ wdev = cfg->p2p_wdev;
+ if (!wdev) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+ }
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+ wl_cfgp2p_unregister_ndev(cfg);
+
+ cfg->p2p_wdev = NULL;
+ cfg->p2p_net = NULL;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ WL_DBG(("Freeing 0x%p \n", wdev));
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+ return 0;
+}
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+
+#if defined(BCMDONGLEHOST)
+static s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+ struct bcm_cfg80211 * cfg;
+ s32 err = 0;
+ s32 ret = 0;
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ cfg = wl_get_cfg(ndev);
+ if (unlikely(!cfg)) {
+ WL_ERR(("cfg is invaild\n"));
+ return -EINVAL;
+ }
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ if (cfg->wdev) {
+ ret = wl_cfgp2p_supported(cfg, ndev);
+ if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+ cfg->wdev->wiphy->interface_modes |=
+ (BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
+
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->p2p_net) {
+ /* Update MAC addr for p2p0 interface here. */
+ memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+ cfg->p2p_net->dev_addr[0] |= 0x02;
+ WL_MSG(cfg->p2p_net->name, "p2p_dev_addr="MACDBG "\n",
+ MAC2STRDBG(cfg->p2p_net->dev_addr));
+ } else {
+ WL_ERR(("p2p_net not yet populated."
+ " Couldn't update the MAC Address for p2p0 \n"));
+ return -ENODEV;
+ }
+#endif /* WL_ENABLE_P2P_IF */
+ cfg->p2p_supported = true;
+ } else if (ret == 0) {
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
+ } else {
+ /* SDIO bus timeout */
+ err = -ENODEV;
+ goto fail;
+ }
+ }
+ }
+ wl_set_drv_status(cfg, READY, ndev);
+fail:
+ return err;
+}
+#endif /* BCMDONGLEHOST */
+
+struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev)
+{
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ if (!wdev || !wdev->wiphy)
+ return NULL;
+
+ return wiphy_priv(wdev->wiphy);
+}
+
+s32
+wl_cfg80211_net_attach(struct net_device *primary_ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(primary_ndev);
+#ifdef WL_STATIC_IF
+ enum nl80211_iftype ntype;
+ int i;
+#endif
+
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return BCME_ERROR;
+ }
+#ifdef WL_STATIC_IF
+ /* Register dummy n/w iface. FW init will happen only from dev_open */
+#ifdef WLEASYMESH
+ ntype = NL80211_IFTYPE_AP;
+#else
+ ntype = NL80211_IFTYPE_STATION;
+#endif
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (wl_cfg80211_register_static_if(cfg, ntype,
+ WL_STATIC_IFNAME_PREFIX, i) == NULL) {
+ WL_ERR(("static i/f registration failed!\n"));
+ wl_cfg80211_unregister_static_if(cfg);
+ return BCME_ERROR;
+ }
+ }
+#endif /* WL_STATIC_IF */
+ return BCME_OK;
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+{
+ struct wireless_dev *wdev;
+ struct bcm_cfg80211 *cfg;
+ s32 err = 0;
+ struct device *dev;
+ u16 bssidx = 0;
+ u16 ifidx = 0;
+ dhd_pub_t *dhd = (struct dhd_pub *)(context);
+
+ WL_TRACE(("In\n"));
+ if (!ndev) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+#if !defined(BCMDONGLEHOST)
+ wl_cfg80211_set_parent_dev(context);
+#endif
+ dev = wl_cfg80211_get_parent_dev();
+
+ wdev = (struct wireless_dev *)MALLOCZ(dhd->osh, sizeof(*wdev));
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return -ENOMEM;
+ }
+ err = wl_setup_wiphy(wdev, dev, context);
+ if (unlikely(err)) {
+ MFREE(dhd->osh, wdev, sizeof(*wdev));
+ return -ENOMEM;
+ }
+#ifdef WLMESH_CFG80211
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_MESH);
+#else
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+#endif
+ cfg = wiphy_priv(wdev->wiphy);
+ cfg->wdev = wdev;
+ cfg->pub = context;
+ cfg->osh = dhd->osh;
+ INIT_LIST_HEAD(&cfg->net_list);
+#ifdef WBTEXT
+ INIT_LIST_HEAD(&cfg->wbtext_bssid_list);
+#endif /* WBTEXT */
+ INIT_LIST_HEAD(&cfg->vndr_oui_list);
+ spin_lock_init(&cfg->vndr_oui_sync);
+ spin_lock_init(&cfg->net_list_sync);
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+ cfg->state_notifier = wl_notifier_change_state;
+ err = wl_alloc_netinfo(cfg, ndev, wdev, WL_IF_TYPE_STA, PM_ENABLE, bssidx, ifidx);
+ if (err) {
+ WL_ERR(("Failed to alloc net_info (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+ err = wl_init_priv(cfg);
+ if (err) {
+ WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+
+ err = wl_setup_rfkill(cfg, TRUE);
+ if (err) {
+ WL_ERR(("Failed to setup rfkill %d\n", err));
+ goto cfg80211_attach_out;
+ }
+
+ if (!wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = TRUE;
+ err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ if (err) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ WL_ERR(("Failed to register notifierl %d\n", err));
+ goto cfg80211_attach_out;
+ }
+ }
+
+#if defined(OEM_ANDROID) && defined(COEX_DHCP)
+ cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+ if (!cfg->btcoex_info)
+ goto cfg80211_attach_out;
+#endif /* defined(OEM_ANDROID) && defined(COEX_DHCP) */
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ cfg->random_mac_enabled = FALSE;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+ wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if defined(WL_ENABLE_P2P_IF) || defined (WL_NEWCFG_PRIVCMD_SUPPORT)
+ err = wl_cfg80211_attach_p2p(cfg);
+ if (err)
+ goto cfg80211_attach_out;
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+ /* wlan scan_supp timer and work thread info */
+ init_timer_compat(&cfg->scan_supp_timer, wl_cfg80211_scan_supp_timerfunc, cfg);
+ INIT_WORK(&cfg->wlan_work, wl_cfg80211_work_handler);
+#endif /* DHCP_SCAN_SUPPRESS */
+
+ INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+ INIT_DELAYED_WORK(&cfg->loc.work, wl_cfgscan_listen_complete_work);
+ INIT_DELAYED_WORK(&cfg->ap_work, wl_cfg80211_ap_timeout_work);
+ mutex_init(&cfg->pm_sync);
+#ifdef WL_NAN
+ err = wl_cfgnan_attach(cfg);
+ if (err) {
+ WL_ERR(("Failed to attach nan module %d\n", err));
+ goto cfg80211_attach_out;
+ }
+#endif /* WL_NAN */
+ cfg->rssi_sum_report = FALSE;
+#ifdef WL_BAM
+ wl_bad_ap_mngr_init(cfg);
+#endif /* WL_BAM */
+
+#ifdef BIGDATA_SOFTAP
+ wl_attach_ap_stainfo(cfg);
+#endif /* BIGDATA_SOFTAP */
+
+ return err;
+
+cfg80211_attach_out:
+ wl_cfg80211_detach(cfg);
+ return err;
+}
+
+void wl_cfg80211_detach(struct bcm_cfg80211 *cfg)
+{
+ WL_DBG(("Enter\n"));
+ if (!cfg) {
+ return;
+ }
+/* clean up pm_enable work item. Remove this once deinit is properly
+ * clean up and wl_cfg8021_down is called while removing the module
+ */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+#if defined(OEM_ANDROID) && defined(COEX_DHCP)
+ wl_cfg80211_btcoex_deinit();
+ cfg->btcoex_info = NULL;
+#endif /* defined(OEM_ANDROID) && defined(COEX_DHCP) */
+
+ wl_setup_rfkill(cfg, FALSE);
+
+#ifdef WL_WPS_SYNC
+ wl_deinit_wps_reauth_sm(cfg);
+#endif /* WL_WPS_SYNC */
+
+ del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ del_timer_sync(&cfg->roam_timeout);
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#ifdef WL_STATIC_IF
+ wl_cfg80211_unregister_static_if(cfg);
+#endif /* WL_STATIC_IF */
+#if defined(WL_ENABLE_P2P_IF) || defined (WL_NEWCFG_PRIVCMD_SUPPORT)
+ wl_cfg80211_detach_p2p(cfg);
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#ifdef WL_BAM
+ wl_bad_ap_mngr_deinit(cfg);
+#endif /* WL_BAM */
+
+#ifdef BIGDATA_SOFTAP
+ wl_detach_ap_stainfo(cfg);
+#endif /* BIGDATA_SOFTAP */
+
+#ifdef WL_NAN
+ wl_cfgnan_detach(cfg);
+#endif /* WL_NAN */
+ wl_cfg80211_ibss_vsie_free(cfg);
+ wl_dealloc_netinfo_by_wdev(cfg, cfg->wdev);
+ wl_cfg80211_set_bcmcfg(NULL);
+ wl_deinit_priv(cfg);
+ wl_cfg80211_clear_parent_dev();
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+ wl_free_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_release_bss_cache_ctrl(&cfg->g_bss_cache_ctrl);
+#endif
+ wl_free_wdev(cfg);
+ /* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+ * structure "cfg", which is the private part of wiphy, has been freed in
+ * wl_free_wdev !!!!!!!!!!!
+ */
+ WL_DBG(("Exit\n"));
+}
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+void wl_cfg80211_register_dev_ril_bridge_event_notifier()
+{
+ WL_DBG(("Enter\n"));
+ if (!wl_cfg80211_ril_bridge_notifier_registered) {
+ s32 err = 0;
+ wl_cfg80211_ril_bridge_notifier_registered = TRUE;
+ err = register_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ if (err) {
+ wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+ WL_ERR(("Failed to register ril_notifier! %d\n", err));
+ }
+ }
+}
+
+void wl_cfg80211_unregister_dev_ril_bridge_event_notifier()
+{
+ WL_DBG(("Enter\n"));
+ if (wl_cfg80211_ril_bridge_notifier_registered) {
+ wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+ unregister_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ }
+}
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
+static void wl_print_event_data(struct bcm_cfg80211 *cfg,
+ uint32 event_type, const wl_event_msg_t *e)
+{
+ s32 status = ntoh32(e->status);
+ s32 reason = ntoh32(e->reason);
+ s32 ifidx = ntoh32(e->ifidx);
+ s32 bssidx = ntoh32(e->bsscfgidx);
+
+ switch (event_type) {
+ case WLC_E_ESCAN_RESULT:
+ if ((status == WLC_E_STATUS_SUCCESS) ||
+ (status == WLC_E_STATUS_ABORT)) {
+ WL_INFORM_MEM(("event_type (%d), ifidx: %d"
+ " bssidx: %d scan_type:%d\n",
+ event_type, ifidx, bssidx, status));
+ }
+ break;
+ case WLC_E_LINK:
+ case WLC_E_DISASSOC:
+ case WLC_E_DISASSOC_IND:
+ case WLC_E_DEAUTH:
+ case WLC_E_DEAUTH_IND:
+ WL_INFORM_MEM(("event_type (%d), ifidx: %d bssidx: %d"
+ " status:%d reason:%d\n",
+ event_type, ifidx, bssidx, status, reason));
+ break;
+
+ default:
+ /* Print only when DBG verbose is enabled */
+ WL_DBG(("event_type (%d), ifidx: %d bssidx: %d status:%d reason: %d\n",
+ event_type, ifidx, bssidx, status, reason));
+ }
+}
+
+static void wl_event_handler(struct work_struct *work_data)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct wl_event_q *e;
+ struct wireless_dev *wdev = NULL;
+
+ WL_DBG(("Enter \n"));
+ BCM_SET_CONTAINER_OF(cfg, work_data, struct bcm_cfg80211, event_work);
+ LOG_TS(cfg, wl_evt_hdlr_entry);
+ DHD_EVENT_WAKE_LOCK(cfg->pub);
+ while ((e = wl_deq_event(cfg))) {
+ s32 status = ntoh32(e->emsg.status);
+ u32 event_type = ntoh32(e->emsg.event_type);
+ bool scan_cmplt_evt = (event_type == WLC_E_ESCAN_RESULT) &&
+ ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT));
+
+ LOG_TS(cfg, wl_evt_deq);
+ if (scan_cmplt_evt) {
+ LOG_TS(cfg, scan_deq);
+ }
+ /* Print only critical events to avoid too many prints */
+ wl_print_event_data(cfg, e->etype, &e->emsg);
+
+ if (e->emsg.ifidx > WL_MAX_IFS) {
+ WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+ goto fail;
+ }
+
+ /* Make sure iface operations, don't creat race conditions */
+ mutex_lock(&cfg->if_sync);
+ if (!(wdev = wl_get_wdev_by_fw_idx(cfg,
+ e->emsg.bsscfgidx, e->emsg.ifidx))) {
+ /* For WLC_E_IF would be handled by wl_host_event */
+ if (e->etype != WLC_E_IF)
+ WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
+ " Ignoring event.\n", e->emsg.bsscfgidx));
+ } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR((": BUS is DOWN.\n"));
+ } else
+#endif /* defined(BCMDONGLEHOST) */
+ {
+ WL_DBG(("event_type %d event_sub %d\n",
+ ntoh32(e->emsg.event_type),
+ ntoh32(e->emsg.reason)));
+ WL_SET_EIDX_IN_PROGRESS(cfg, e->id, e->etype);
+ cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
+ &e->emsg, e->edata);
+ WL_CLR_EIDX_STATES(cfg);
+ if (scan_cmplt_evt) {
+ LOG_TS(cfg, scan_hdlr_cmplt);
+ }
+ }
+ } else {
+ WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+ mutex_unlock(&cfg->if_sync);
+fail:
+ wl_put_event(cfg, e);
+ if (scan_cmplt_evt) {
+ LOG_TS(cfg, scan_cmplt);
+ }
+ LOG_TS(cfg, wl_evt_hdlr_exit);
+ }
+ DHD_EVENT_WAKE_UNLOCK(cfg->pub);
+}
+
+/*
+* Generic API to handle critical events which doesnt need
+* cfg enquening and sleepable API calls.
+*/
+s32
+wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, const wl_event_msg_t * e)
+{
+ s32 ret = BCME_ERROR;
+ u32 event_type = ntoh32(e->event_type);
+
+ if (event_type >= WLC_E_LAST) {
+ return BCME_ERROR;
+ }
+
+ switch (event_type) {
+ case WLC_E_NAN_CRITICAL: {
+#ifdef WL_NAN
+ if (ntoh32(e->reason) == WL_NAN_EVENT_STOP) {
+ WL_DBG(("Received WL_NAN_EVENT_STOP\n"));
+ }
+#endif /* WL_NAN */
+ break;
+ }
+ default:
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+ s32 status = ntoh32(e->status);
+ u32 event_type = ntoh32(e->event_type);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ struct net_info *netinfo;
+
+ WL_DBG(("event_type (%d): reason (%d): %s\n", event_type, ntoh32(e->reason),
+ bcmevent_get_name(event_type)));
+ if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) {
+ WL_ERR(("Stale event ignored\n"));
+ return;
+ }
+
+#ifdef OEM_ANDROID
+ if (cfg->event_workq == NULL) {
+ WL_ERR(("Event handler is not created\n"));
+ return;
+ }
+#endif /* OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+ if (!cfg->event_workq_init) {
+ WL_ERR(("Event handler is not created\n"));
+ return;
+ }
+#endif /* OEM_ANDROID */
+
+ if (event_type == WLC_E_IF) {
+ /* Don't process WLC_E_IF events in wl_cfg80211 layer */
+ return;
+ }
+
+ netinfo = wl_get_netinfo_by_fw_idx(cfg, e->bsscfgidx, e->ifidx);
+ if (!netinfo) {
+ /* Since the netinfo entry is not there, the netdev entry is not
+ * created via cfg80211 interface. so the event is not of interest
+ * to the cfg80211 layer.
+ */
+ WL_TRACE(("ignore event %d, not interested\n", event_type));
+ return;
+ }
+
+ /* Handle wl_cfg80211_critical_events */
+ if (wl_cfg80211_handle_critical_events(cfg, netinfo->wdev, e) == BCME_OK) {
+ return;
+ }
+
+ if (event_type == WLC_E_PFN_NET_FOUND) {
+ WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+ }
+ else if (event_type == WLC_E_PFN_NET_LOST) {
+ WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+ }
+
+ if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) {
+
+#ifdef OEM_ANDROID
+ queue_work(cfg->event_workq, &cfg->event_work);
+#endif /* OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+ schedule_work(&cfg->event_work);
+#endif /* OEM_ANDROID */
+ }
+ /* Mark timeout value for thread sched */
+ if ((event_type == WLC_E_ESCAN_RESULT) &&
+ ((status == WLC_E_STATUS_SUCCESS) ||
+ (status == WLC_E_STATUS_ABORT))) {
+ LOG_TS(cfg, scan_enq);
+ WL_INFORM_MEM(("Enqueing escan completion (%d). WQ state:0x%x \n",
+ status, work_busy(&cfg->event_work)));
+ }
+}
+
+static void wl_init_eq(struct bcm_cfg80211 *cfg)
+{
+ wl_init_eq_lock(cfg);
+ INIT_LIST_HEAD(&cfg->eq_list);
+}
+
+static void wl_flush_eq(struct bcm_cfg80211 *cfg)
+{
+ struct wl_event_q *e;
+ unsigned long flags;
+
+ flags = wl_lock_eq(cfg);
+ while (!list_empty_careful(&cfg->eq_list)) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ MFREE(cfg->osh, e, e->datalen + sizeof(struct wl_event_q));
+ }
+ wl_unlock_eq(cfg, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg)
+{
+ struct wl_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_lock_eq(cfg);
+ if (likely(!list_empty(&cfg->eq_list))) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_unlock_eq(cfg, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event,
+ const wl_event_msg_t *msg, void *data)
+{
+ struct wl_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = (uint32)(sizeof(struct wl_event_q) + data_len);
+ e = (struct wl_event_q *)MALLOCZ(cfg->osh, evtq_size);
+ if (unlikely(!e)) {
+ WL_ERR(("event alloc failed\n"));
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ e->datalen = data_len;
+ e->id = cfg->eidx.enqd++;
+ flags = wl_lock_eq(cfg);
+ list_add_tail(&e->eq_list, &cfg->eq_list);
+ wl_unlock_eq(cfg, flags);
+
+ return err;
+}
+
+static void wl_put_event(struct bcm_cfg80211 *cfg, struct wl_event_q *e)
+{
+ MFREE(cfg->osh, e, e->datalen + sizeof(struct wl_event_q));
+}
+
+static s32 wl_config_infra(struct bcm_cfg80211 *cfg, struct net_device *ndev, u16 iftype)
+{
+ s32 infra = 0;
+ s32 err = 0;
+ bool skip_infra = false;
+
+ switch (iftype) {
+ case WL_IF_TYPE_IBSS:
+ case WL_IF_TYPE_AIBSS:
+ infra = 0;
+ break;
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC:
+ /* Intentional fall through */
+ infra = 1;
+ break;
+#ifdef WLMESH_CFG80211
+ case NL80211_IFTYPE_MESH_POINT:
+ infra = WL_BSSTYPE_MESH;
+ break;
+#endif /* WLMESH_CFG80211 */
+ case WL_IF_TYPE_MONITOR:
+
+#ifdef WLAWDL
+ case WL_IF_TYPE_AWDL:
+#endif /* WLAWDL */
+
+ case WL_IF_TYPE_NAN:
+ /* Intentionall fall through */
+ default:
+ skip_infra = true;
+ WL_ERR(("Skipping infra setting for type:%d\n", iftype));
+ break;
+ }
+
+ /* /TODO Infra iovar is stored in default bss first and
+ * then applied to the next upcoming bss. so if there is
+ * some other concurrent bss coming up in parallel, it
+ * can cause problem. Ideally this iovar should get directly
+ * applied on the target bsscfg.
+ */
+ if (!skip_infra) {
+ infra = htod32(infra);
+ err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ return err;
+ }
+ }
+ return 0;
+}
+
+void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set)
+{
+ if (!ev || (event > WLC_E_LAST))
+ return;
+
+ if (ev->num < MAX_EVENT_BUF_NUM) {
+ ev->event[ev->num].type = event;
+ ev->event[ev->num].set = set;
+ ev->num++;
+ } else {
+ WL_ERR(("evenbuffer doesn't support > %u events. Update"
+ " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));
+ ASSERT(0);
+ }
+}
+
+s32 wl_cfg80211_apply_eventbuffer(
+ struct net_device *ndev,
+ struct bcm_cfg80211 *cfg,
+ wl_eventmsg_buf_t *ev)
+{
+ int i, ret = BCME_OK;
+ s8 event_buf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE] = {0};
+ /* Room for "event_msgs_ext" + '\0' + bitvec */
+ char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
+ eventmsgs_ext_t *eventmask_msg;
+ s32 msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE;
+
+ if (!ev || (!ev->num)) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&cfg->event_sync);
+
+ eventmask_msg = (eventmsgs_ext_t *)event_buf;
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_NONE;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+ eventmask_msg->maxgetsize = WL_EVENTING_MASK_EXT_LEN;
+
+ /* Read event_msgs mask */
+ ret = wldev_iovar_getbuf(ndev, "event_msgs_ext",
+ eventmask_msg, EVENTMSGS_EXT_STRUCT_SIZE,
+ iovbuf,
+ sizeof(iovbuf),
+ NULL);
+
+ if (unlikely(ret)) {
+ WL_ERR(("Get event_msgs error (%d)\n", ret));
+ goto exit;
+ }
+
+ bcopy(iovbuf, eventmask_msg, msglen);
+
+ /* apply the set bits */
+ for (i = 0; i < ev->num; i++) {
+ if (ev->event[i].set)
+ setbit(eventmask_msg->mask, ev->event[i].type);
+ else
+ clrbit(eventmask_msg->mask, ev->event[i].type);
+ }
+
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+
+ /* Write updated Event mask */
+ ret = wldev_iovar_setbuf(ndev, "event_msgs_ext", eventmask_msg,
+ WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE,
+ iovbuf, sizeof(iovbuf), NULL);
+
+ if (unlikely(ret)) {
+ WL_ERR(("Set event_msgs error (%d)\n", ret));
+ }
+
+exit:
+ mutex_unlock(&cfg->event_sync);
+ return ret;
+}
+
+s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+ s32 err = 0;
+ s8 event_buf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE] = {0};
+ eventmsgs_ext_t *eventmask_msg = NULL;
+ struct bcm_cfg80211 *cfg;
+ /* Room for "event_msgs_ext" + '\0' + bitvec */
+ char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
+ s32 msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE;
+
+ if (!ndev)
+ return -ENODEV;
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg)
+ return -ENODEV;
+
+ mutex_lock(&cfg->event_sync);
+
+ eventmask_msg = (eventmsgs_ext_t *)event_buf;
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_NONE;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+ eventmask_msg->maxgetsize = WL_EVENTING_MASK_EXT_LEN;
+
+ /* Read event_msgs mask */
+ err = wldev_iovar_getbuf(ndev, "event_msgs_ext",
+ eventmask_msg, EVENTMSGS_EXT_STRUCT_SIZE,
+ iovbuf,
+ sizeof(iovbuf),
+ NULL);
+
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+ bcopy(iovbuf, eventmask_msg, msglen);
+
+ if (add) {
+ setbit(eventmask_msg->mask, event);
+ } else {
+ clrbit(eventmask_msg->mask, event);
+ }
+
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+
+ err = wldev_iovar_setbuf(ndev, "event_msgs_ext", eventmask_msg,
+ WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE,
+ iovbuf, sizeof(iovbuf), NULL);
+
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+eventmsg_out:
+ mutex_unlock(&cfg->event_sync);
+ return err;
+}
+
+void
+wl_cfg80211_generate_mac_addr(struct ether_addr *ea_addr)
+{
+ RANDOM_BYTES(ea_addr->octet, ETHER_ADDR_LEN);
+ /* restore mcast and local admin bits to 0 and 1 */
+ ETHER_SET_UNICAST(ea_addr->octet);
+ ETHER_SET_LOCALADDR(ea_addr->octet);
+ WL_ERR(("%s:generated new MAC="MACDBG" \n",
+ __FUNCTION__, MAC2STRDBG(ea_addr->octet)));
+ return;
+}
+
+static s32 wl_update_chan_param(struct net_device *dev, u32 cur_chan,
+ struct ieee80211_channel *band_chan, bool *dfs_radar_disabled, bool legacy_chan_info)
+{
+ s32 err = BCME_OK;
+ u32 channel = cur_chan;
+
+ if (!(*dfs_radar_disabled)) {
+ if (legacy_chan_info) {
+ channel |= WL_CHANSPEC_BW_20;
+ channel = wl_chspec_host_to_driver(channel);
+ err = wldev_iovar_getint(dev, "per_chan_info", &channel);
+ }
+ if (!err) {
+ if (channel & WL_CHAN_RADAR) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ band_chan->flags |= (IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IBSS);
+#else
+ band_chan->flags |= IEEE80211_CHAN_RADAR;
+#endif
+ }
+ if (channel & WL_CHAN_PASSIVE) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ band_chan->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+#else
+ band_chan->flags |= IEEE80211_CHAN_NO_IR;
+#endif
+ }
+ } else if (err == BCME_UNSUPPORTED) {
+ *dfs_radar_disabled = TRUE;
+ WL_ERR(("does not support per_chan_info\n"));
+ }
+ }
+
+ return err;
+}
+
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
+{
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ struct ieee80211_channel *band_chan_arr = NULL;
+ void *list;
+ u32 i, j, index, channel, array_size = 0;
+ chanspec_t chspec = 0;
+ s32 err = BCME_OK;
+ bool ht40_allowed;
+ bool dfs_radar_disabled = FALSE;
+ bool legacy_chan_info = FALSE;
+ u16 list_count;
+
+#define LOCAL_BUF_LEN 4096
+ list = MALLOCZ(cfg->osh, LOCAL_BUF_LEN);
+ if (list == NULL) {
+ WL_ERR(("failed to allocate local buf\n"));
+ return -ENOMEM;
+ }
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "chan_info_list", NULL,
+ 0, list, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
+ if (err == BCME_UNSUPPORTED) {
+ WL_INFORM(("get chan_info_list, UNSUPPORTED\n"));
+ err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+ 0, list, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
+ if (err != BCME_OK) {
+ WL_ERR(("get chanspecs err(%d)\n", err));
+ MFREE(cfg->osh, list, LOCAL_BUF_LEN);
+ return err;
+ }
+ /* Update indicating legacy chan info usage */
+ legacy_chan_info = TRUE;
+ } else if (err != BCME_OK) {
+ WL_ERR(("get chan_info_list err(%d)\n", err));
+ MFREE(cfg->osh, list, LOCAL_BUF_LEN);
+ return err;
+ }
+
+ WL_CHANNEL_ARRAY_INIT(__wl_2ghz_channels);
+ WL_CHANNEL_ARRAY_INIT(__wl_5ghz_a_channels);
+#ifdef CFG80211_6G_SUPPORT
+ WL_CHANNEL_ARRAY_INIT(__wl_6ghz_channels);
+#endif /* CFG80211_6G_SUPPORT */
+
+ list_count = legacy_chan_info ? ((wl_uint32_list_t *)list)->count :
+ ((wl_chanspec_list_v1_t *)list)->count;
+ for (i = 0; i < dtoh32(list_count); i++) {
+ index = 0;
+ ht40_allowed = false;
+ if (legacy_chan_info) {
+ chspec = (chanspec_t)dtoh32(((wl_uint32_list_t *)list)->element[i]);
+ } else {
+ chspec = (chanspec_t)dtoh32
+ (((wl_chanspec_list_v1_t *)list)->chspecs[i].chanspec);
+ }
+ chspec = wl_chspec_driver_to_host(chspec);
+ channel = wf_chspec_ctlchan(chspec);
+
+ if (!CHSPEC_IS40(chspec) &&
+ !CHSPEC_IS20(chspec)) {
+ WL_DBG(("HT80/160/80p80 center channel : %d\n", channel));
+ continue;
+ }
+ if (CHSPEC_IS2G(chspec) && (channel >= CH_MIN_2G_CHANNEL) &&
+ (channel <= CH_MAX_2G_CHANNEL)) {
+ band_chan_arr = __wl_2ghz_channels;
+ array_size = ARRAYSIZE(__wl_2ghz_channels);
+ ht40_allowed = (bw_cap == WLC_N_BW_40ALL)? true : false;
+ }
+#ifdef CFG80211_6G_SUPPORT
+ else if (CHSPEC_IS6G(chspec) && (channel >= CH_MIN_6G_CHANNEL) &&
+ (channel <= CH_MAX_6G_CHANNEL)) {
+ band_chan_arr = __wl_6ghz_channels;
+ array_size = ARRAYSIZE(__wl_6ghz_channels);
+ ht40_allowed = (bw_cap == WLC_N_BW_20ALL)? false : true;
+ }
+#endif /* CFG80211_6G_SUPPORT */
+ else if (
+#ifdef WL_6G_BAND
+ /* Currently due to lack of kernel support both 6GHz and 5GHz
+ * channels are published under 5GHz band
+ */
+ (CHSPEC_IS6G(chspec) && (channel >= CH_MIN_6G_CHANNEL) &&
+ (channel <= CH_MAX_6G_CHANNEL)) ||
+#endif /* WL_6G_BAND */
+ (CHSPEC_IS5G(chspec) && channel >= CH_MIN_5G_CHANNEL)) {
+ band_chan_arr = __wl_5ghz_a_channels;
+ array_size = ARRAYSIZE(__wl_5ghz_a_channels);
+ ht40_allowed = (bw_cap == WLC_N_BW_20ALL)? false : true;
+ } else {
+ WL_ERR(("Invalid channel Sepc. 0x%x.\n", chspec));
+ continue;
+ }
+ if (!ht40_allowed && CHSPEC_IS40(chspec))
+ continue;
+ for (j = 0; j < array_size; j++) {
+ if (band_chan_arr[j].hw_value == chspec) {
+ break;
+ }
+ }
+ index = j;
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ continue;
+ if (index < array_size) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+ band_chan_arr[index].center_freq =
+ ieee80211_channel_to_frequency(channel);
+#else
+ band_chan_arr[index].center_freq =
+ wl_channel_to_frequency(channel, CHSPEC_BAND(chspec));
+#endif
+ band_chan_arr[index].hw_value = chspec;
+ band_chan_arr[index].beacon_found = false;
+ band_chan_arr[index].flags &= ~IEEE80211_CHAN_DISABLED;
+
+ if (CHSPEC_IS40(chspec) && ht40_allowed) {
+ /* assuming the order is HT20, HT40 Upper,
+ * HT40 lower from chanspecs
+ */
+ u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40;
+ if (CHSPEC_SB_UPPER(chspec)) {
+ if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+ band_chan_arr[index].flags &=
+ ~IEEE80211_CHAN_NO_HT40;
+ band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS;
+ } else {
+ /* It should be one of
+ * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS
+ */
+ band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40;
+ if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+ band_chan_arr[index].flags |=
+ IEEE80211_CHAN_NO_HT40MINUS;
+ }
+ } else {
+ band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40;
+ if (!legacy_chan_info) {
+ channel = dtoh32
+ (((wl_chanspec_list_v1_t *)list)->chspecs[i].chaninfo);
+ } else {
+ channel |= CHSPEC_BAND(chspec);
+ }
+ /* Update channel for radar/passive support */
+ err = wl_update_chan_param(dev, channel,
+ &band_chan_arr[index], &dfs_radar_disabled, legacy_chan_info);
+ }
+ }
+
+ }
+
+ __wl_band_2ghz.n_channels = ARRAYSIZE(__wl_2ghz_channels);
+ __wl_band_5ghz_a.n_channels = ARRAYSIZE(__wl_5ghz_a_channels);
+#ifdef CFG80211_6G_SUPPORT
+ __wl_band_6ghz.n_channels = ARRAYSIZE(__wl_6ghz_channels);
+#endif /* CFG80211_6G_SUPPORT */
+
+ MFREE(cfg->osh, list, LOCAL_BUF_LEN);
+#undef LOCAL_BUF_LEN
+ return err;
+}
+
+#ifdef WL_6G_BAND
+static void wl_is_6g_supported(struct bcm_cfg80211 *cfg, u32 *bandlist, u8 nbands)
+{
+ u32 i = 0;
+
+ if (nbands > WL_MAX_BAND_SUPPORT) {
+ return;
+ }
+ /* Check for 6GHz band support */
+ for (i = 1; i <= nbands; i++) {
+ if (bandlist[i] == WLC_BAND_6G) {
+ cfg->band_6g_supported = true;
+ }
+ }
+}
+#endif /* WL_6G_BAND */
+
+static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+ struct wiphy *wiphy;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ u32 bandlist[WL_MAX_BAND_SUPPORT+1];
+ u32 nband = 0;
+ u32 i = 0;
+ s32 err = 0;
+ s32 index = 0;
+ s32 nmode = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) || defined(CUSTOMER_HW5)
+ u32 j = 0;
+ s32 vhtmode = 0;
+ s32 txstreams = 0;
+ s32 rxstreams = 0;
+ s32 ldpc_cap = 0;
+ s32 stbc_rx = 0;
+ s32 stbc_tx = 0;
+ s32 txbf_bfe_cap = 0;
+ s32 txbf_bfr_cap = 0;
+#endif /* KERNEL >= 3.6 || CUSTOMER_HW5 */
+ s32 bw_cap = 0;
+ s32 cur_band = -1;
+ struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
+
+ bzero(bandlist, sizeof(bandlist));
+ err = wldev_ioctl_get(dev, WLC_GET_BANDLIST, bandlist,
+ sizeof(bandlist));
+ if (unlikely(err)) {
+ WL_ERR(("error read bandlist (%d)\n", err));
+ return err;
+ }
+ err = wldev_ioctl_get(dev, WLC_GET_BAND, &cur_band,
+ sizeof(s32));
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ err = wldev_iovar_getint(dev, "nmode", &nmode);
+ if (unlikely(err)) {
+ WL_ERR(("error reading nmode (%d)\n", err));
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) || defined(CUSTOMER_HW5)
+ err = wldev_iovar_getint(dev, "vhtmode", &vhtmode);
+ if (unlikely(err)) {
+ WL_ERR(("error reading vhtmode (%d)\n", err));
+ }
+
+ if (vhtmode) {
+ err = wldev_iovar_getint(dev, "txstreams", &txstreams);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txstreams (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "rxstreams", &rxstreams);
+ if (unlikely(err)) {
+ WL_ERR(("error reading rxstreams (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading ldpc_cap (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx);
+ if (unlikely(err)) {
+ WL_ERR(("error reading stbc_rx (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx);
+ if (unlikely(err)) {
+ WL_ERR(("error reading stbc_tx (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txbf_bfe_cap (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txbf_bfr_cap (%d)\n", err));
+ }
+ }
+#endif /* KERNEL >= 3.6 || CUSTOMER_HW5 */
+
+ /* For nmode and vhtmode check bw cap */
+ if (nmode ||
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) || defined(CUSTOMER_HW5)
+ vhtmode ||
+#endif /* KERNEL >= 3.6 || CUSTOMER_HW5 */
+ 0) {
+ err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+ }
+ }
+
+#ifdef WL_6G_BAND
+ wl_is_6g_supported(cfg, bandlist, bandlist[0]);
+#endif /* WL_6G_BAND */
+
+ err = wl_construct_reginfo(cfg, bw_cap);
+ if (err) {
+ WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
+ if (err != BCME_UNSUPPORTED)
+ return err;
+ }
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+ nband = bandlist[0];
+
+ for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
+ index = -1;
+
+ if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
+ bands[IEEE80211_BAND_2GHZ] =
+ &__wl_band_2ghz;
+ index = IEEE80211_BAND_2GHZ;
+ if (bw_cap == WLC_N_BW_40ALL)
+ bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ } else {
+ if (bandlist[i] == WLC_BAND_6G) {
+#ifdef CFG80211_6G_SUPPORT
+ if (__wl_band_6ghz.n_channels > 0) {
+ bands[IEEE80211_BAND_6GHZ] = &__wl_band_6ghz;
+ index = IEEE80211_BAND_6GHZ;
+ } else {
+ WL_ERR(("6GHz channels not listed\n"));
+ continue;
+ }
+#else /* CFG80211_6G_SUPPORT */
+ /* Both 6G/5G channels will be under 5G band list */
+ if (__wl_band_5ghz_a.n_channels > 0)
+ {
+ bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;
+ index = IEEE80211_BAND_5GHZ;
+ } else {
+ WL_ERR(("5GHz channels not listed\n"));
+ continue;
+ }
+#endif /* CFG80211_6G_SUPPORT */
+ } else if ((bandlist[i] == WLC_BAND_5G) &&
+ (__wl_band_5ghz_a.n_channels > 0)) {
+ bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;
+ index = IEEE80211_BAND_5GHZ;
+ } else {
+ WL_ERR(("Invalid band\n"));
+ continue;
+ }
+
+ if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G))
+ bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) || defined(CUSTOMER_HW5)
+ /* VHT capabilities. */
+ if (vhtmode) {
+ /* Supported */
+ bands[index]->vht_cap.vht_supported = TRUE;
+
+ for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) {
+ /* TX stream rates. */
+ if (j <= txstreams) {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+ bands[index]->vht_cap.vht_mcs.tx_mcs_map);
+ } else {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+ bands[index]->vht_cap.vht_mcs.tx_mcs_map);
+ }
+
+ /* RX stream rates. */
+ if (j <= rxstreams) {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+ bands[index]->vht_cap.vht_mcs.rx_mcs_map);
+ } else {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+ bands[index]->vht_cap.vht_mcs.rx_mcs_map);
+ }
+ }
+
+ /* Capabilities */
+ /* 80 MHz is mandatory */
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_80;
+
+ if (WL_BW_CAP_160MHZ(bw_cap)) {
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160;
+ }
+
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+
+ if (ldpc_cap)
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_RXLDPC;
+
+ if (stbc_tx)
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_TXSTBC;
+
+ if (stbc_rx)
+ bands[index]->vht_cap.cap |=
+ (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT);
+
+ if (txbf_bfe_cap)
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+
+ if (txbf_bfr_cap) {
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+ }
+
+ if (txbf_bfe_cap || txbf_bfr_cap) {
+ bands[index]->vht_cap.cap |=
+ (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT);
+ bands[index]->vht_cap.cap |=
+ ((txstreams - 1) <<
+ VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT);
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB;
+ }
+
+ /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
+ bands[index]->vht_cap.cap |=
+ (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
+ WL_DBG(("__wl_update_wiphybands band[%d] vht_enab=%d vht_cap=%08x "
+ "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
+ index,
+ bands[index]->vht_cap.vht_supported,
+ bands[index]->vht_cap.cap,
+ bands[index]->vht_cap.vht_mcs.rx_mcs_map,
+ bands[index]->vht_cap.vht_mcs.tx_mcs_map));
+ }
+#endif /* KERNEL >= 3.6 || CUSTOMER_HW5 */
+ }
+
+ if ((index >= 0) && nmode) {
+ bands[index]->ht_cap.cap |=
+ (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
+ bands[index]->ht_cap.ht_supported = TRUE;
+ bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ /* An HT shall support all EQM rates for one spatial stream */
+ bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+ }
+
+ }
+
+ wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
+ wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
+#ifdef CFG80211_6G_SUPPORT
+ wiphy->bands[IEEE80211_BAND_6GHZ] = bands[IEEE80211_BAND_6GHZ];
+#endif /* CFG80211_6G_SUPPORT */
+
+ /* check if any bands populated otherwise makes 2Ghz as default */
+ if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
+#ifdef CFG80211_6G_SUPPORT
+ wiphy->bands[IEEE80211_BAND_6GHZ] == NULL &&
+#endif /* CFG80211_6G_SUPPORT */
+ wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
+ /* Setup 2Ghz band as default */
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+ }
+
+ if (notify) {
+ if (!IS_REGDOM_SELF_MANAGED(wiphy)) {
+ WL_UPDATE_CUSTOM_REGULATORY(wiphy);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
+ rtnl_unlock();
+#endif
+ wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
+ rtnl_lock();
+#endif
+ }
+ }
+
+ return 0;
+}
+
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+ s32 err;
+
+ mutex_lock(&cfg->usr_sync);
+ err = __wl_update_wiphybands(cfg, notify);
+ mutex_unlock(&cfg->usr_sync);
+
+ return err;
+}
+
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
+{
+ s32 err = 0;
+ s32 ret = 0;
+
+ struct net_info *netinfo = NULL;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#ifdef WLTDLS
+ u32 tdls;
+#endif /* WLTDLS */
+ u16 wl_iftype = 0;
+ u16 wl_mode = 0;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ WL_DBG(("In\n"));
+
+#if defined(__linux__)
+ if (!dhd_download_fw_on_driverload) {
+#endif
+ err = wl_create_event_handler(cfg);
+ if (err) {
+ WL_ERR(("wl_create_event_handler failed\n"));
+ return err;
+ }
+ wl_init_event_handler(cfg);
+#if defined(__linux__)
+ }
+#endif
+ /* Reserve 0x8000 toggle bit for P2P GO/GC */
+ cfg->vif_macaddr_mask = 0x8000;
+
+#if defined(BCMDONGLEHOST)
+ err = dhd_config_dongle(cfg);
+ if (unlikely(err))
+ return err;
+#endif /* defined(BCMDONGLEHOST) */
+
+#ifdef SHOW_LOGTRACE
+ /* Start the event logging */
+ wl_add_remove_eventmsg(ndev, WLC_E_TRACE, TRUE);
+#endif /* SHOW_LOGTRACE */
+
+ (void)memcpy_s(wdev->wiphy->perm_addr, ETHER_ADDR_LEN,
+ bcmcfg_to_prmry_ndev(cfg)->perm_addr, ETHER_ADDR_LEN);
+ /* Always bring up interface in STA mode.
+ * Did observe , if previous SofAP Bringup/cleanup
+ * is not done properly, iftype is stuck with AP mode.
+ * So during next wlan0 up, forcing the type to STA
+ */
+ netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ if (!netinfo) {
+ WL_ERR(("there is no netinfo\n"));
+ return -ENODEV;
+ }
+
+ if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ /* AP on primary interface case: Supplicant will
+ * set mode first and then do dev_open. so in this
+ * case, the type will already be set.
+ */
+ netinfo->iftype = WL_IF_TYPE_AP;
+ } else {
+ ndev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
+ netinfo->iftype = WL_IF_TYPE_STA;
+ }
+
+ if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
+ }
+ if (!dhd->fw_preinit) {
+ err = wl_config_infra(cfg, ndev, wl_iftype);
+ if (unlikely(err && err != -EINPROGRESS)) {
+ WL_ERR(("wl_config_infra failed\n"));
+ if (err == -1) {
+ WL_ERR(("return error %d\n", err));
+ return err;
+ }
+ }
+ }
+
+ err = wl_init_scan(cfg);
+ if (err) {
+ WL_ERR(("wl_init_scan failed\n"));
+ return err;
+ }
+ err = __wl_update_wiphybands(cfg, true);
+ if (unlikely(err)) {
+ WL_ERR(("wl_update_wiphybands failed\n"));
+ if (err == -1) {
+ WL_ERR(("return error %d\n", err));
+ return err;
+ }
+ }
+
+ /* Update wlc version in cfg struct already queried as part of DHD initialization */
+ cfg->wlc_ver.wlc_ver_major = dhd->wlc_ver_major;
+ cfg->wlc_ver.wlc_ver_minor = dhd->wlc_ver_minor;
+
+ if ((ret = wldev_iovar_getbuf(ndev, "scan_ver", NULL, 0,
+ ioctl_buf, sizeof(ioctl_buf), NULL)) == BCME_OK) {
+ WL_INFORM_MEM(("scan_params v2\n"));
+ /* use scan_params ver2 */
+ cfg->scan_params_v2 = true;
+ } else {
+ if (ret == BCME_UNSUPPORTED) {
+ WL_INFORM(("scan_ver, UNSUPPORTED\n"));
+ ret = BCME_OK;
+ } else {
+ WL_INFORM(("get scan_ver err(%d)\n", ret));
+ }
+ }
+
+ if (((cfg->wlc_ver.wlc_ver_major == MIN_JOINEXT_V1_BR1_FW_MAJOR) &&
+ (cfg->wlc_ver.wlc_ver_minor == MIN_JOINEXT_V1_BR1_FW_MINOR)) ||
+ ((cfg->wlc_ver.wlc_ver_major == MIN_JOINEXT_V1_BR2_FW_MAJOR) &&
+ (cfg->wlc_ver.wlc_ver_minor >= MIN_JOINEXT_V1_BR2_FW_MINOR)) ||
+ (cfg->wlc_ver.wlc_ver_major >= MIN_JOINEXT_V1_FW_MAJOR)) {
+ cfg->join_iovar_ver = WL_EXTJOIN_VERSION_V1;
+ WL_INFORM_MEM(("join_ver:%d\n", cfg->join_iovar_ver));
+ }
+
+#ifdef DHD_LOSSLESS_ROAMING
+ del_timer_sync(&cfg->roam_timeout);
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ err = dhd_monitor_init(cfg->pub);
+
+#ifdef WL_HOST_BAND_MGMT
+ /* By default the curr_band is initialized to BAND_AUTO */
+ if ((ret = wl_cfg80211_set_band(ndev, WLC_BAND_AUTO)) < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* Don't fail the initialization, lets just
+ * fall back to the original method
+ */
+ WL_ERR(("WL_HOST_BAND_MGMT defined, "
+ "but roam_band iovar not supported \n"));
+ } else {
+ WL_ERR(("roam_band failed. ret=%d", ret));
+ err = -1;
+ }
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ /* Reset WES mode to 0 */
+ cfg->wes_mode = OFF;
+ cfg->ncho_mode = OFF;
+ cfg->ncho_band = WLC_BAND_AUTO;
+#ifdef WBTEXT
+ /* when wifi up, set roam_prof to default value */
+ if (dhd->wbtext_support) {
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ if (!dhd->fw_preinit) {
+ wl_cfg80211_wbtext_set_default(ndev);
+ }
+ wl_cfg80211_wbtext_clear_bssid_list(cfg);
+ }
+ }
+#endif /* WBTEXT */
+#ifdef WLTDLS
+ if (wldev_iovar_getint(ndev, "tdls_enable", &tdls) == 0) {
+ WL_DBG(("TDLS supported in fw\n"));
+ cfg->tdls_supported = true;
+ }
+#endif /* WLTDLS */
+#ifdef WL_IFACE_MGMT
+#ifdef CUSTOM_IF_MGMT_POLICY
+ cfg->iface_data.policy = CUSTOM_IF_MGMT_POLICY;
+#else
+ cfg->iface_data.policy = WL_IF_POLICY_DEFAULT;
+#endif /* CUSTOM_IF_MGMT_POLICY */
+#endif /* WL_IFACE_MGMT */
+#ifdef WL_NAN
+#ifdef WL_NANP2P
+ if (FW_SUPPORTED(dhd, nanp2p)) {
+ /* Enable NANP2P concurrent support */
+ cfg->conc_disc = WL_NANP2P_CONC_SUPPORT;
+ WL_INFORM_MEM(("nan + p2p conc discovery is supported\n"));
+ cfg->nan_p2p_supported = true;
+ }
+#endif /* WL_NANP2P */
+#endif /* WL_NAN */
+
+#ifdef WL_SAR_TX_POWER
+ cfg->wifi_tx_power_mode = WIFI_POWER_SCENARIO_INVALID;
+#endif /* WL_SAR_TX_POWER */
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+ /* wlan scan_supp timer and work thread info */
+ init_timer_compat(&cfg->scan_supp_timer, wl_cfg80211_scan_supp_timerfunc, cfg);
+ INIT_WORK(&cfg->wlan_work, wl_cfg80211_work_handler);
+#endif /* DHCP_SCAN_SUPPRESS */
+
+ INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+ wl_set_drv_status(cfg, READY, ndev);
+
+ return err;
+}
+
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+ s32 err = 0;
+ struct net_info *iter, *next;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && \
+ (defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)) && \
+ !defined(PLATFORM_SLP)
+ struct net_device *p2p_net = cfg->p2p_net;
+#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) && !PLATFORM_SLP */
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+ WL_INFORM_MEM(("cfg80211 down\n"));
+
+ /* Check if cfg80211 interface is already down */
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ WL_DBG(("cfg80211 interface is already down\n"));
+ return err; /* it is even not ready */
+ }
+
+#ifdef SHOW_LOGTRACE
+ /* Stop the event logging */
+ wl_add_remove_eventmsg(ndev, WLC_E_TRACE, FALSE);
+#endif /* SHOW_LOGTRACE */
+
+ /* clear vendor OUI list */
+ wl_vndr_ies_clear_vendor_oui_list(cfg);
+
+ /* clear timestamps */
+ CLR_TS(cfg, scan_start);
+ CLR_TS(cfg, scan_cmplt);
+ CLR_TS(cfg, conn_start);
+ CLR_TS(cfg, conn_cmplt);
+ CLR_TS(cfg, authorize_start);
+ CLR_TS(cfg, authorize_cmplt);
+
+ /* Delete pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+ if (cfg->loc.in_progress) {
+ /* Listen in progress */
+ if (delayed_work_pending(&cfg->loc.work)) {
+ cancel_delayed_work_sync(&cfg->loc.work);
+ }
+ wl_cfgscan_notify_listen_complete(cfg);
+ }
+
+ if (delayed_work_pending(&cfg->ap_work)) {
+ cancel_delayed_work_sync(&cfg->ap_work);
+ }
+
+ if (cfg->p2p_supported) {
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ if (wl_cfgp2p_vif_created(cfg)) {
+ bool enabled = false;
+ dhd_wlfc_get_enable(dhd, &enabled);
+ /* WLFC should be turned off
+ * while unloading dhd driver in IBSS or SoftAP mode
+ */
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+ dhd_wlfc_deinit(dhd);
+ cfg->wlfc_on = false;
+ }
+ }
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+ }
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+ /* Force clear of scan_suppress */
+ if (cfg->scan_suppressed)
+ wl_cfg80211_scan_suppress(ndev, 0);
+ del_timer_sync(&cfg->scan_supp_timer);
+ cancel_work_sync(&cfg->wlan_work);
+#endif /* DHCP_SCAN_SUPPRESS */
+
+#ifdef WL_SAR_TX_POWER
+ cfg->wifi_tx_power_mode = WIFI_POWER_SCENARIO_INVALID;
+#endif /* WL_SAR_TX_POWER */
+ if (!dhd_download_fw_on_driverload) {
+ /* For built-in drivers/other drivers that do reset on
+ * "ifconfig <primary_iface> down", cleanup any left
+ * over interfaces
+ */
+ wl_cfg80211_cleanup_virtual_ifaces(cfg, false);
+ }
+ /* Clear used mac addr mask */
+ cfg->vif_macaddr_mask = 0;
+
+#ifdef BCMDONGLEHOST
+ if (dhd->up)
+#endif /* BCMDONGLEHOST */
+ {
+ /* If primary BSS is operational (for e.g SoftAP), bring it down */
+ if (wl_cfg80211_bss_isup(ndev, 0)) {
+ if (wl_cfg80211_bss_up(cfg, ndev, 0, 0) < 0)
+ WL_ERR(("BSS down failed \n"));
+ }
+
+ /* clear all the security setting on primary Interface */
+ wl_cfg80211_clear_security(cfg);
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) /* p2p discovery iface is null */
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ }
+#ifdef WL_SDO
+ wl_cfg80211_sdo_deinit(cfg);
+#endif
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+ /* cancel and notify scan complete, if scan request is pending */
+ wl_cfgscan_cancel_scan(cfg);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ /* p2p discovery iface ndev ptr could be null */
+ if (iter->ndev == NULL)
+ continue;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ WL_INFORM_MEM(("wl_cfg80211_down. connection state bit status: [%u:%u:%u:%u]\n",
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, NESTED_CONNECT, ndev)));
+
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ CFG80211_DISCONNECTED(iter->ndev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_clr_drv_status(cfg, AUTHORIZED, iter->ndev);
+ }
+
+ if ((iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+
+ u8 *latest_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct cfg80211_bss *bss = CFG80211_GET_BSS(wiphy, NULL, latest_bssid,
+ wdev->ssid, wdev->ssid_len);
+
+ BCM_REFERENCE(bss);
+
+ CFG80211_CONNECT_RESULT(ndev,
+ latest_bssid, bss, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+ wl_clr_drv_status(cfg, READY, iter->ndev);
+ wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+ wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+ wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+ wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+ wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
+ wl_clr_drv_status(cfg, NESTED_CONNECT, iter->ndev);
+ wl_clr_drv_status(cfg, CFG80211_CONNECT, iter->ndev);
+ }
+ bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
+ NL80211_IFTYPE_STATION;
+#if defined(WL_CFG80211) && \
+ (defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)) && \
+ !defined(PLATFORM_SLP)
+#ifdef SUPPORT_DEEP_SLEEP
+ if (!trigger_deep_sleep)
+#endif /* SUPPORT_DEEP_SLEEP */
+ if (p2p_net)
+ dev_close(p2p_net);
+#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT)&& !PLATFORM_SLP */
+
+ /* Avoid deadlock from wl_cfg80211_down */
+#if defined(BCMDONGLEHOST) && defined(__linux__)
+ if (!dhd_download_fw_on_driverload) {
+#endif
+ mutex_unlock(&cfg->usr_sync);
+ wl_destroy_event_handler(cfg);
+ mutex_lock(&cfg->usr_sync);
+#if defined(BCMDONGLEHOST) && defined(__linux__)
+ }
+#endif
+
+ wl_flush_eq(cfg);
+ wl_link_down(cfg);
+ if (cfg->p2p_supported) {
+ if (timer_pending(&cfg->p2p->listen_timer))
+ del_timer_sync(&cfg->p2p->listen_timer);
+ wl_cfgp2p_down(cfg);
+ }
+
+ del_timer_sync(&cfg->scan_timeout);
+
+ wl_cfg80211_clear_mgmt_vndr_ies(cfg);
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+#endif
+
+ dhd_monitor_uninit();
+#ifdef WLAIBSS_MCHAN
+ bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef WL11U
+ /* Clear interworking element. */
+ if (cfg->wl11u) {
+ cfg->wl11u = FALSE;
+ }
+#endif /* WL11U */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled) {
+ wl_scan_timeout_dbg_clear();
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ cfg->disable_roam_event = false;
+ cfg->scan_params_v2 = false;
+
+ DNGL_FUNC(dhd_cfg80211_down, (cfg));
+
+#ifdef DHD_IFDEBUG
+ /* Printout all netinfo entries */
+ wl_probe_wdev_all(cfg);
+#endif /* DHD_IFDEBUG */
+
+ return err;
+}
+
+s32 wl_cfg80211_up(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg;
+ s32 err = 0;
+ int val = 1;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd;
+#endif /* BCMDONGLEHOST */
+#ifdef DISABLE_PM_BCNRX
+ s32 interr = 0;
+ uint param = 0;
+ s8 iovbuf[WLC_IOCTL_SMLEN];
+#endif /* DISABLE_PM_BCNRX */
+#ifdef WL_USE_RANDOMIZED_SCAN
+ uint8 random_addr[ETHER_ADDR_LEN] = {0x02, 0x00, 0x00, 0x00, 0x00, 0x00};
+#endif /* WL_USE_RANDOMIZED_SCAN */
+ WL_DBG(("In\n"));
+ cfg = wl_get_cfg(net);
+
+ if ((err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
+ sizeof(int)) < 0)) {
+ WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+ return err;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION));
+ return BCME_VERSION;
+ }
+ ioctl_version = val;
+ WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(net, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+
+ mutex_lock(&cfg->usr_sync);
+#if defined(BCMDONGLEHOST)
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
+ if (unlikely(err)) {
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+ }
+ }
+#ifdef WLMESH_CFG80211
+ cfg->wdev->wiphy->features |= NL80211_FEATURE_USERSPACE_MPM;
+#endif /* WLMESH_CFG80211 */
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+ if (dhd->fw_4way_handshake) {
+ /* This is a hacky method to indicate fw 4WHS support and
+ * is used only for kernels (kernels < 3.14). For newer
+ * kernels, we would be using vendor extn. path to advertise
+ * FW based 4-way handshake feature support.
+ */
+ cfg->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE;
+ }
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+#endif /* defined(BCMDONGLEHOST) */
+ err = __wl_cfg80211_up(cfg);
+ if (unlikely(err))
+ WL_ERR(("__wl_cfg80211_up failed\n"));
+
+#ifdef ROAM_CHANNEL_CACHE
+ if (init_roam_cache(cfg, ioctl_version) == 0) {
+ /* Enable support for Roam cache */
+ cfg->rcc_enabled = true;
+ WL_ERR(("Roam channel cache enabled\n"));
+ } else {
+ WL_ERR(("Failed to enable RCC.\n"));
+ }
+#endif /* ROAM_CHANNEL_CACHE */
+#ifdef WL_USE_RANDOMIZED_SCAN
+ /* Call scanmac only for valid configuration */
+ if (wl_cfg80211_scan_mac_enable(net)) {
+ WL_ERR(("%s : randmac enable failed\n", __FUNCTION__));
+ } else {
+ /* scanmac enabled. apply configuration */
+ if (wl_cfg80211_scan_mac_config(net, random_addr, NULL)) {
+ WL_ERR(("%s : failed to set randmac config for scan\n", __FUNCTION__));
+ /* if config fails, disable scan mac */
+ wl_cfg80211_scan_mac_disable(net);
+ }
+ }
+#endif /* WL_USE_RANDOMIZED_SCAN */
+/* WAR: disable pm_bcnrx , scan_ps for BCM4354 WISOL module.
+ * WISOL module have ANT_1 Rx sensitivity issue.
+ */
+#if defined(FORCE_DISABLE_SINGLECORE_SCAN)
+ dhd_force_disable_singlcore_scan(dhd);
+#endif /* FORCE_DISABLE_SINGLECORE_SCAN */
+
+ /* IOVAR configurations with 'up' condition */
+#ifdef DISABLE_PM_BCNRX
+ interr = wldev_iovar_setbuf(net, "pm_bcnrx", (char *)&param, sizeof(param), iovbuf,
+ sizeof(iovbuf), &cfg->ioctl_buf_sync);
+
+ if (unlikely(interr)) {
+ WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
+ }
+#endif /* DISABLE_PM_BCNRX */
+#ifdef WL_CHAN_UTIL
+ interr = wl_cfg80211_start_bssload_report(net);
+ if (unlikely(interr)) {
+ WL_ERR(("%s: Failed to start bssload_report eventing, err=%d\n",
+ __FUNCTION__, interr));
+ }
+#endif /* WL_CHAN_UTIL */
+
+ mutex_unlock(&cfg->usr_sync);
+
+#ifdef WLAIBSS_MCHAN
+ bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
+#endif /* WLAIBSS_MCHAN */
+ cfg->spmk_info_list->pmkids.count = 0;
+ return err;
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd;
+#if defined(SOFTAP_SEND_HANGEVT)
+ /* specifc mac address used for hang event */
+ uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11};
+#endif /* SOFTAP_SEND_HANGEVT */
+#endif /* BCMDONGLEHOST */
+ if (!cfg) {
+ return BCME_ERROR;
+ }
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef BCMDONGLEHOST
+ dhd = (dhd_pub_t *)(cfg->pub);
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhd->req_hang_type) {
+ WL_ERR(("wl_cfg80211_hang, Clear HANG test request 0x%x\n",
+ dhd->req_hang_type));
+ dhd->req_hang_type = 0;
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
+ if ((dhd->hang_reason <= HANG_REASON_MASK) || (dhd->hang_reason >= HANG_REASON_MAX)) {
+ WL_ERR(("wl_cfg80211_hang, Invalid hang reason 0x%x\n",
+ dhd->hang_reason));
+ dhd->hang_reason = HANG_REASON_UNKNOWN;
+ }
+#ifdef DHD_USE_EXTENDED_HANG_REASON
+ /* The proper dhd->hang_reason handling codes should be implemented
+ * in the WPA Supplicant/Hostapd or Android framework.
+ * If not, HANG event may not be sent to Android framework and
+ * driver cannot be reloaded.
+ * Please do not enable DHD_USE_EXTENDED_HANG_REASON if your Android platform
+ * cannot handle the dhd->hang_reason value.
+ */
+ if (dhd->hang_reason != 0) {
+ reason = dhd->hang_reason;
+ }
+#endif /* DHD_USE_EXTENDED_HANG_REASON */
+ WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason)));
+#else
+ WL_ERR(("In : chip crash eventing\n"));
+#endif /* BCMDONGLEHOST */
+
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+#ifdef BCMDONGLEHOST
+#ifdef SOFTAP_SEND_HANGEVT
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC);
+ } else
+#endif /* SOFTAP_SEND_HANGEVT */
+#endif /* BCMDONGLEHOST */
+ {
+ if (dhd->up == TRUE) {
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+#ifdef CUSOMER_HW4
+ wl_cfgvendor_send_hang_event(dev, reason,
+ dhd->hang_info, dhd->hang_info_cnt);
+#else
+ wl_cfgvendor_simple_hang_event(dev, reason);
+#endif
+#else
+ CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ }
+ }
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+ if (cfg != NULL) {
+ /* Do we need to call wl_cfg80211_down here ? */
+ wl_link_down(cfg);
+ }
+ return 0;
+}
+
+s32 wl_cfg80211_down(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#ifdef RTT_SUPPORT
+ dhd_pub_t *dhdp;
+#endif /* RTT_SUPPORT */
+
+ s32 err = BCME_ERROR;
+
+ WL_DBG(("In\n"));
+
+ if (cfg && (cfg == wl_cfg80211_get_bcmcfg())) {
+#ifdef WL_NAN
+ mutex_lock(&cfg->if_sync);
+ wl_cfgnan_check_nan_disable_pending(cfg, true, false);
+ mutex_unlock(&cfg->if_sync);
+#endif /* WL_NAN */
+
+#ifdef RTT_SUPPORT
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhdp->rtt_state) {
+ dhd_rtt_deinit(dhdp);
+ }
+#endif /* RTT_SUPPORT */
+ mutex_lock(&cfg->usr_sync);
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+ err = __wl_cfg80211_down(cfg);
+ mutex_unlock(&cfg->usr_sync);
+ }
+
+ return err;
+}
+
+void
+wl_cfg80211_sta_ifdown(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ WL_DBG(("In\n"));
+
+ if (cfg) {
+ /* cancel scan if anything pending */
+ wl_cfgscan_cancel_scan(cfg);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ wl_get_drv_status(cfg, CONNECTED, dev)) {
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+ }
+}
+
+void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+{
+ unsigned long flags;
+ void *rptr = NULL;
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+ if (!profile)
+ return NULL;
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SEC:
+ rptr = &profile->sec;
+ break;
+ case WL_PROF_ACT:
+ rptr = &profile->active;
+ break;
+ case WL_PROF_BSSID:
+ rptr = profile->bssid;
+ break;
+ case WL_PROF_SSID:
+ rptr = &profile->ssid;
+ break;
+ case WL_PROF_CHAN:
+ rptr = &profile->channel;
+ break;
+ case WL_PROF_LATEST_BSSID:
+ rptr = profile->latest_bssid;
+ break;
+ }
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ if (!rptr)
+ WL_ERR(("invalid item (%d)\n", item));
+ return rptr;
+}
+
+s32
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, const void *data, s32 item)
+{
+ s32 err = 0;
+ const struct wlc_ssid *ssid;
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+ if (!profile)
+ return WL_INVALID;
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SSID:
+ ssid = (const wlc_ssid_t *) data;
+ bzero(profile->ssid.SSID,
+ sizeof(profile->ssid.SSID));
+ profile->ssid.SSID_len = MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN);
+ memcpy(profile->ssid.SSID, ssid->SSID, profile->ssid.SSID_len);
+ break;
+ case WL_PROF_BSSID:
+ if (data)
+ memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+ else
+ bzero(profile->bssid, ETHER_ADDR_LEN);
+ break;
+ case WL_PROF_SEC:
+ memcpy(&profile->sec, data, sizeof(profile->sec));
+ break;
+ case WL_PROF_ACT:
+ profile->active = *(const bool *)data;
+ break;
+ case WL_PROF_BEACONINT:
+ profile->beacon_interval = *(const u16 *)data;
+ break;
+ case WL_PROF_DTIMPERIOD:
+ profile->dtim_period = *(const u8 *)data;
+ break;
+ case WL_PROF_CHAN:
+ profile->channel = *(const chanspec_t *)data;
+ break;
+ case WL_PROF_LATEST_BSSID:
+ if (data) {
+ memcpy_s(profile->latest_bssid, sizeof(profile->latest_bssid),
+ data, ETHER_ADDR_LEN);
+ } else {
+ memset_s(profile->latest_bssid, sizeof(profile->latest_bssid),
+ 0, ETHER_ADDR_LEN);
+ }
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+ if (err == -EOPNOTSUPP)
+ WL_ERR(("unsupported item (%d)\n", item));
+
+ return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+ /*
+ * prohibit to change debug level
+ * by insmod parameter.
+ * eventually debug level will be configured
+ * in compile time by using CONFIG_XXX
+ */
+ /* wl_dbg_level = level; */
+}
+
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
+{
+ return cfg->ibss_starter;
+}
+
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ ie->buf[ie->offset] = t;
+ ie->buf[ie->offset + 1] = l;
+ memcpy(&ie->buf[ie->offset + 2], v, l);
+ ie->offset += l + 2;
+
+ return err;
+}
+
+static void wl_link_up(struct bcm_cfg80211 *cfg)
+{
+ cfg->link_up = true;
+}
+
+static void wl_link_down(struct bcm_cfg80211 *cfg)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+ WL_DBG(("In\n"));
+ cfg->link_up = false;
+ if (conn_info) {
+ conn_info->req_ie_len = 0;
+ conn_info->resp_ie_len = 0;
+ }
+}
+
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
+{
+ unsigned long flags;
+
+ WL_CFG_EQ_LOCK(&cfg->eq_lock, flags);
+ return flags;
+}
+
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
+{
+ WL_CFG_EQ_UNLOCK(&cfg->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
+{
+ spin_lock_init(&cfg->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+ if (in_atomic() || (ms < jiffies_to_msecs(1))) {
+ OSL_DELAY(ms*1000);
+ } else {
+ OSL_SLEEP(ms);
+ }
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ struct ether_addr primary_mac;
+ if (!cfg->p2p)
+ return -1;
+ if (!p2p_is_on(cfg)) {
+ get_primary_mac(cfg, &primary_mac);
+ memcpy((void *)&p2pdev_addr, (void *)&primary_mac, ETHER_ADDR_LEN);
+ } else {
+ memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet,
+ ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_increase_p2p_bw(cfg, net, buf, len);
+}
+
+#ifdef P2PLISTEN_AP_SAMECHN
+s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable)
+{
+ s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable);
+
+ if ((ret == 0) && enable) {
+ /* disable PM for p2p responding on infra AP channel */
+ s32 pm = PM_OFF;
+
+ ret = wldev_ioctl_set(net, WLC_SET_PM, &pm, sizeof(pm));
+ }
+
+ return ret;
+}
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+#ifdef WL_SDO
+#define MAX_QR_LEN NLMSG_GOODSIZE
+
+typedef struct wl_cfg80211_dev_info {
+ u16 band;
+ u16 freq;
+ s16 rssi;
+ u16 ie_len;
+ u8 bssid[ETH_ALEN];
+} wl_cfg80211_dev_info_t;
+
+static s32
+wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ int err = 0;
+ u32 event = ntoh32(e->event_type);
+ wl_cfg80211_dev_info_t info;
+ wl_bss_info_t *bi = NULL;
+ struct net_device *ndev = NULL;
+ u8 *buf = NULL;
+ u32 buflen = 0;
+ u16 channel = 0;
+ wl_escan_result_t *escan_result;
+ chanspec_t chspec = INVCHANSPEC;
+
+ WL_SD(("Enter. type:%d \n", event));
+
+ if ((event != WLC_E_P2PO_ADD_DEVICE) && (event != WLC_E_P2PO_DEL_DEVICE)) {
+ WL_ERR(("Unknown Event\n"));
+ return -EINVAL;
+ }
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->usr_sync);
+ if (event == WLC_E_P2PO_DEL_DEVICE) {
+ WL_SD(("DEV_LOST MAC:"MACDBG" \n", MAC2STRDBG(e->addr.octet)));
+ err = wl_genl_send_msg(ndev, event, (const u8 *)e->addr.octet, ETH_ALEN, 0, 0);
+ } else {
+
+ escan_result = (wl_escan_result_t *) data;
+
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ bi = escan_result->bss_info;
+ buflen = dtoh32(bi->length);
+ if (unlikely(buflen > WL_BSS_INFO_MAX)) {
+ WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Update sub-header */
+ bzero(&info, sizeof(wl_cfg80211_dev_info_t));
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ channel = wf_chspec_ctlchan(chspec);
+ info.freq = wl_channel_to_frequency(channel, CHSPEC_BAND(chspec));
+ info.rssi = wl_rssi_offset(dtoh16(bi->RSSI));
+ memcpy(info.bssid, &bi->BSSID, ETH_ALEN);
+ info.ie_len = buflen;
+
+ WL_SD(("DEV_FOUND band:%x Freq:%d rssi:%x "MACDBG" \n",
+ info.band, info.freq, info.rssi, MAC2STRDBG(info.bssid)));
+
+ buf = ((u8 *) bi) + bi->ie_offset;
+ err = wl_genl_send_msg(ndev, event, buf,
+ buflen, (u8 *)&info, sizeof(wl_cfg80211_dev_info_t));
+ }
+exit:
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+
+s32
+wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg)
+{
+ if (cfg->sdo) {
+ WL_SD(("SDO already initialized\n"));
+ return 0;
+ }
+
+ cfg->sdo = (sd_offload_t *)MALLOCZ(cfg->osh, sizeof(sd_offload_t));
+ if (!cfg->sdo) {
+ WL_ERR(("MALLOCZ failed for SDO \n"));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+s32
+wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg)
+{
+ s32 bssidx;
+ int ret = 0;
+ int sdo_pause = 0;
+ if (!cfg || !cfg->p2p) {
+ WL_ERR(("Wl %p or cfg->p2p %p is null\n",
+ cfg, cfg ? cfg->p2p : 0));
+ return 0;
+ }
+
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (!cfg->sdo) {
+ WL_DBG(("SDO Not Initialized. Do nothing. \n"));
+ return 0;
+ }
+ if (cfg->sdo->dd_state &&
+ (ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+ "p2po_stop", (void*)&sdo_pause, sizeof(sdo_pause),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
+ }
+ MFREE(cfg->osh, cfg->sdo, sizeof(sd_offload_t));
+
+ WL_SD(("SDO Deinit Done \n"));
+
+ return 0;
+}
+
+s32
+wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
+{
+ wl_sd_listen_t sd_listen;
+ int ret = 0;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+
+ WL_DBG(("Enter\n"));
+
+ if (!cfg->sdo) {
+ return -EINVAL;
+ }
+
+ if (dev == NULL)
+ dev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* Disable back the ESCAN events for the offload */
+ wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+
+ /* Resume according to the saved state */
+ if (cfg->sdo->dd_state == WL_DD_STATE_SEARCH) {
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_find Failed :%d\n", ret));
+ }
+ } else if (cfg->sdo->dd_state == WL_DD_STATE_LISTEN) {
+ /* Need to save the listen params in the set context
+ * so that those values can be restored in the resume context
+ */
+ sd_listen.interval = cfg->sdo->sd_listen.interval;
+ sd_listen.period = cfg->sdo->sd_listen.period;
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
+ sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen Failed :%d\n", ret));
+ }
+
+ }
+
+ /* p2po_stop clears of the eventmask for GAS. Set it back */
+ wl_add_remove_eventmsg(dev, WLC_E_SERVICE_FOUND, true);
+ wl_add_remove_eventmsg(dev, WLC_E_GAS_FRAGMENT_RX, true);
+ wl_add_remove_eventmsg(dev, WLC_E_GAS_COMPLETE, true);
+
+ WL_SD(("SDO Resumed \n"));
+
+ return ret;
+}
+
+s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
+{
+
+ int ret = 0;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ int sdo_pause = 1;
+
+ WL_DBG(("Enter \n"));
+
+ if (!cfg->sdo) {
+ WL_ERR(("SDO not initialized \n"));
+ return -EINVAL;
+ }
+
+ if (dev == NULL)
+ dev = bcmcfg_to_prmry_ndev(cfg);
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop",
+ (void*)&sdo_pause, sizeof(sdo_pause),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
+ }
+
+ /* Enable back the ESCAN events for the SCAN */
+ wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+
+ WL_SD(("SDO Paused \n"));
+
+ return ret;
+}
+
+static s32
+wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 event = ntoh32(e->event_type);
+ struct net_device *ndev = NULL;
+ const u8 *dst_mac = (const u8 *)e->addr.octet;
+ int ret = 0;
+ wl_event_sd_t *gas = NULL;
+ int status = ntoh32(e->status);
+ sdo_event_t sdo_hdr;
+ u32 data_len = ntoh32(e->datalen);
+ u8 *data_ptr = NULL;
+ u32 tot_len = 0;
+
+ WL_SD(("Enter event_type:%d status:%d\n", event, status));
+
+ if (!cfg->sdo) {
+ WL_ERR(("SDO Not initialized \n"));
+ return -EINVAL;
+ }
+
+ if (!(cfg->sdo->sd_state & WL_SD_SEARCH_SVC)) {
+ /* We are not searching for any service. Drop
+ * any bogus Event
+ */
+ WL_ERR(("Bogus SDO Event. Do nothing.. \n"));
+ return -1;
+ }
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->usr_sync);
+ if (event == WLC_E_SERVICE_FOUND) {
+
+ if ((status != WLC_E_STATUS_SUCCESS) && (status != WLC_E_STATUS_PARTIAL)) {
+ WL_ERR(("WLC_E_SERVICE_FOUND: unknown status \n"));
+ goto exit;
+ }
+
+ gas = (wl_event_sd_t *)data;
+ if (!gas) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ bzero(&sdo_hdr, sizeof(sdo_event_t));
+ sdo_hdr.freq = wl_channel_to_frequency(gas->channel, WL_CHANSPEC_BAND_2G);
+ sdo_hdr.count = gas->count;
+ memcpy(sdo_hdr.addr, dst_mac, ETH_ALEN);
+ data_ptr = (char *)gas->tlv;
+ tot_len = data_len - (sizeof(wl_event_sd_t) - sizeof(wl_sd_tlv_t));
+
+ WL_SD(("WLC_E_SERVICE_FOUND "MACDBG" data_len:%d tlv_count:%d \n",
+ MAC2STRDBG(dst_mac), data_len, sdo_hdr.count));
+
+ if (tot_len > NLMSG_DEFAULT_SIZE) {
+ WL_ERR(("size(%u) > %lu not supported \n", tot_len, NLMSG_DEFAULT_SIZE));
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (wl_genl_send_msg(ndev, event, data_ptr,
+ tot_len, (u8 *)&sdo_hdr, sizeof(sdo_event_t)) < 0)
+ WL_ERR(("Couldn't send up the NETLINK Event \n"));
+ else
+ WL_SD(("GAS event sent up \n"));
+ } else {
+ WL_ERR(("Unsupported Event: %d \n", event));
+ }
+
+exit:
+ mutex_unlock(&cfg->usr_sync);
+ return ret;
+}
+
+s32 wl_cfg80211_DsdOffloadParseProto(char* proto_str, u8* proto)
+{
+ s32 len = -1;
+ int i = 0;
+
+ for (i = 0; i < MAX_SDO_PROTO; i++) {
+ if (strncmp(proto_str, wl_sdo_protos[i].str, strlen(wl_sdo_protos[i].str)) == 0) {
+ WL_SD(("Matching proto (%d) found \n", wl_sdo_protos[i].val));
+ *proto = wl_sdo_protos[i].val;
+ len = strlen(wl_sdo_protos[i].str);
+ break;
+ }
+ }
+ return len;
+}
+
+/*
+ * register to search for a UPnP service
+ * ./DRIVER P2P_SD_REQ upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1
+ *
+ * Enable discovery
+ * ./cfg p2po_find
+*/
+#define UPNP_QUERY_VER_OFFSET 3
+s32 wl_sd_handle_sd_req(
+ struct net_device *dev,
+ u8 * buf,
+ int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = 0;
+ wl_sd_qr_t *sdreq;
+ u8 proto = 0;
+ s32 ret = 0;
+ u32 tot_len = len + sizeof(wl_sd_qr_t);
+ u16 version = 0;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("find_idx failed\n"));
+ return -EINVAL;
+ }
+ /* Check for the least arg length expected */
+ if (!buf || (len < strlen("all"))) {
+ WL_ERR(("Wrong Arg\n"));
+ return -EINVAL;
+ }
+
+ if (tot_len > WLC_IOCTL_MAXLEN) {
+ WL_ERR(("Length > %lu not supported \n", MAX_QR_LEN));
+ return -EINVAL;
+ }
+
+ sdreq = (wl_sd_qr_t *)MALLOCZ(cfg->osh, tot_len);
+ if (!sdreq) {
+ WL_ERR(("MALLOCZ failed\n"));
+ return -ENOMEM;
+ }
+
+ WL_SD(("%s Len: %d\n", buf, len));
+ if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
+ WL_ERR(("Unknown proto \n"));
+ goto exit;
+ }
+
+ sdreq->protocol = proto;
+ buf += ret;
+ buf++; /* skip the space */
+ sdreq->transaction_id = simple_strtoul(buf, NULL, 16);
+ WL_SD(("transaction_id:%d\n", sdreq->transaction_id));
+ buf += sizeof(sdreq->transaction_id);
+
+ if (*buf == '\0') {
+ WL_SD(("No Query present. Proto:%d \n", proto));
+ sdreq->query_len = 0;
+ } else {
+ buf++; /* skip the space */
+ /* UPNP version needs to put as binary val */
+ if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
+ /* Extract UPNP version */
+ version = simple_strtoul(buf, NULL, 16);
+ buf = buf + UPNP_QUERY_VER_OFFSET;
+ buf[0] = version;
+ WL_SD(("Upnp version: 0x%x \n", version));
+ }
+
+ len = strlen(buf);
+ WL_SD(("Len after stripping proto: %d Query: %s\n", len, buf));
+ /* copy the query part */
+ memcpy(sdreq->qrbuf, buf, len);
+ sdreq->query_len = len;
+ }
+
+ /* Enable discovery */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ WL_ERR(("cfgp2p_enable discovery failed"));
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_req_resp", (void*)sdreq,
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("Find SVC Failed \n"));
+ goto exit;
+ }
+
+ cfg->sdo->sd_state |= WL_SD_SEARCH_SVC;
+
+exit:
+ MFREE(cfg->osh, sdreq, tot_len);
+ return ret;
+}
+
+s32 wl_sd_handle_sd_cancel_req(
+ struct net_device *dev,
+ u8 *buf)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+
+ if (wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_cancel", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync) < 0) {
+ WL_ERR(("Cancel SD Failed \n"));
+ return -EINVAL;
+ }
+
+ cfg->sdo->sd_state &= ~WL_SD_SEARCH_SVC;
+
+ return 0;
+}
+
+/*
+ * register a UPnP service to be discovered
+ * ./cfg P2P_SD_SVC_ADD upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 0x10uu
+ * id:6859dede-8574-59ab-9332-123456789012::urn:schemas-upnporg:device:InternetGate
+ * wayDevice:1
+*/
+s32 wl_sd_handle_sd_add_svc(
+ struct net_device *dev,
+ u8 * buf,
+ int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = 0;
+ wl_sd_qr_t *sdreq;
+ u8 proto = 0;
+ u16 version = 0;
+ s32 ret = 0;
+ u8 *resp = NULL;
+ u8 *query = NULL;
+ u32 tot_len = len + sizeof(wl_sd_qr_t);
+
+ if (!buf || !len)
+ return -EINVAL;
+
+ WL_SD(("%s Len: %d\n", buf, len));
+ if (tot_len > WLC_IOCTL_MAXLEN) {
+ WL_ERR(("Query-Resp length > %d not supported \n", WLC_IOCTL_MAXLEN));
+ return -ENOMEM;
+ }
+
+ sdreq = (wl_sd_qr_t *)MALLOCZ(cfg->osh, tot_len);
+ if (!sdreq) {
+ WL_ERR(("malloc failed\n"));
+ return -ENOMEM;
+ }
+
+ if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
+ WL_ERR(("Unknown Proto \n"));
+ goto exit;
+ }
+
+ sdreq->protocol = proto;
+ buf += ret;
+
+ if (*buf == '\0') {
+ WL_ERR(("No Query Resp pair present \n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ buf++; /* Skip the space */
+ len = strlen(buf);
+ query = strsep((char **)&buf, " ");
+ if (!query || !buf) {
+ WL_ERR(("No Query RESP Present\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ resp = buf;
+
+ if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
+ /* Extract UPNP version */
+ version = simple_strtoul(query, NULL, 16);
+ query = query + UPNP_QUERY_VER_OFFSET;
+ resp = resp + UPNP_QUERY_VER_OFFSET;
+ query[0] = version;
+ resp[0] = version;
+ WL_SD(("Upnp version: 0x%x \n", version));
+ }
+
+ sdreq->query_len = strlen(query);
+ sdreq->response_len = strlen(buf);
+ WL_SD(("query:%s len:%u \n", query, sdreq->query_len));
+ WL_SD(("resp:%s len:%u \n", buf, sdreq->response_len));
+
+ memcpy(sdreq->qrbuf, query, sdreq->query_len);
+ memcpy((sdreq->qrbuf + sdreq->query_len), resp, sdreq->response_len);
+
+ /* Enable discovery */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ WL_ERR(("cfgp2p_enable discovery failed"));
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_addsvc", (void*)sdreq,
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("FW Failed in doing p2po_addsvc. RET:%d \n", ret));
+ goto exit;
+ }
+
+ cfg->sdo->sd_state |= WL_SD_ADV_SVC;
+
+exit:
+ MFREE(cfg->osh, sdreq, tot_len);
+ return ret;
+}
+
+s32 wl_sd_handle_sd_del_svc(
+ struct net_device *dev,
+ u8 * buf,
+ int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = 0;
+ wl_sd_qr_t *sdreq;
+ u8 proto = 0;
+ s32 ret = 0;
+ u32 tot_len = len + sizeof(wl_sd_qr_t);
+ u16 version = 0;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("find_idx failed\n"));
+ return -EINVAL;
+ }
+
+ sdreq = (wl_sd_qr_t *)MALLOCZ(cfg->osh, tot_len);
+ if (!sdreq) {
+ WL_ERR(("malloc failed\n"));
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Check for the least arg length expected */
+ if (buf && len >= strlen("all")) {
+ WL_DBG(("%s Len: %d\n", buf, len));
+ if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
+ WL_ERR(("Unknown Proto \n"));
+ goto exit;
+ }
+ sdreq->protocol = proto;
+ buf += ret;
+
+ if (*buf == ' ') {
+ /* Query present */
+ buf++; /* Skip the space */
+ /* UPNP version needs to put as binary val */
+ if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
+ /* Extract UPNP version */
+ version = simple_strtoul(buf, NULL, 16);
+ buf = buf + UPNP_QUERY_VER_OFFSET;
+ buf[0] = version;
+ WL_SD(("Upnp version: 0x%x \n", version));
+ }
+ memcpy(sdreq->qrbuf, buf, strlen(buf));
+ sdreq->query_len = strlen(buf);
+ WL_SD(("Query to be deleted:%s len:%d\n", buf, sdreq->query_len));
+ }
+ } else {
+ /* ALL */
+ proto = 0;
+ }
+
+ sdreq->protocol = proto;
+ WL_SD(("Proto: %d \n", proto));
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_delsvc", (void*)sdreq,
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("FW Failed in doing sd_delsvc. ret=%d \n", ret));
+ goto exit;
+ }
+
+ cfg->sdo->sd_state &= ~WL_SD_ADV_SVC;
+
+exit:
+ if (sdreq) {
+ MFREE(cfg->osh, sdreq, tot_len);
+ }
+
+ return ret;
+}
+
+s32 wl_sd_handle_sd_stop_discovery(
+ struct net_device *dev,
+ u8 * buf,
+ int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ int ret = 0;
+ int sdo_pause = 0;
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", (void*)&sdo_pause,
+ sizeof(sdo_pause), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
+ return -1;
+ }
+
+ /* clear the states */
+ cfg->sdo->dd_state = WL_DD_STATE_IDLE;
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+
+ bzero(&cfg->sdo->sd_listen, sizeof(wl_sd_listen_t));
+
+ /* Remove ESCAN from waking up the host if ofind/olisten is enabled */
+ wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+
+ return ret;
+}
+
+s32 wl_sd_handle_sd_find(
+ struct net_device *dev,
+ u8 * buf,
+ int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ int ret = 0;
+ s32 disc_bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ vndr_ie_setbuf_t *ie_setbuf;
+ vndr_ie_t *vndrie;
+ vndr_ie_buf_t *vndriebuf;
+ int tot_len = 0;
+ uint channel = 0;
+
+ u8 p2pie_buf[] = {
+ 0x09, 0x02, 0x02, 0x00, 0x27, 0x0c, 0x06, 0x05, 0x00,
+ 0x55, 0x53, 0x04, 0x51, 0x0b, 0x11, 0x05, 0x00, 0x55,
+ 0x53, 0x04, 0x51, 0x0b
+ };
+
+ /* Enable discovery */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ WL_ERR(("cfgp2p_enable discovery failed"));
+ return -1;
+ }
+
+ if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) {
+ buf += strlen("chan=");
+ channel = simple_strtol(buf, NULL, 10);
+ WL_SD(("listen_chan to be set:%d\n", channel));
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+ return -1;
+ }
+ }
+
+ tot_len = sizeof(vndr_ie_setbuf_t) + sizeof(p2pie_buf);
+ ie_setbuf = (vndr_ie_setbuf_t *)MALLOCZ(cfg->osh, tot_len);
+ if (!ie_setbuf) {
+ WL_ERR(("IE memory alloc failed\n"));
+ return -ENOMEM;
+ }
+
+ /* Apply the p2p_ie for p2po_find */
+ strlcpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
+
+ vndriebuf = &ie_setbuf->vndr_ie_buffer;
+ vndriebuf->iecount = htod32(1);
+ vndriebuf->vndr_ie_list[0].pktflag = htod32(16);
+
+ vndrie = &vndriebuf->vndr_ie_list[0].vndr_ie_data;
+
+ vndrie->id = (uchar) DOT11_MNG_PROPR_ID;
+ vndrie->len = sizeof(p2pie_buf);
+ memcpy(vndrie->oui, WFA_OUI, WFA_OUI_LEN);
+ memcpy(vndrie->data, p2pie_buf, sizeof(p2pie_buf));
+
+ /* Remove ESCAN from waking up the host if SDO is enabled */
+ wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+
+ if (wldev_iovar_setbuf_bsscfg(dev, "ie", (void*)ie_setbuf,
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ disc_bssidx, &cfg->ioctl_buf_sync) < 0) {
+ WL_ERR(("p2p add_ie failed \n"));
+ ret = -EINVAL;
+ goto exit;
+ } else
+ WL_SD(("p2p add_ie applied successfully len:%d \n", tot_len));
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_find Failed :%d\n", ret));
+ ret = -1;
+ goto exit;
+ }
+
+ /* set the states */
+ cfg->sdo->dd_state = WL_DD_STATE_SEARCH;
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+
+exit:
+ if (ie_setbuf) {
+ MFREE(cfg->osh, ie_setbuf, tot_len);
+ }
+
+ /* Incase of failure enable back the ESCAN event */
+ if (ret)
+ wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+
+ return ret;
+}
+
+s32 wl_sd_handle_sd_listen(
+ struct net_device *dev,
+ u8 *buf,
+ int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ wl_sd_listen_t sd_listen;
+ int ret = 0;
+ u8 * ptr = NULL;
+ uint channel = 0;
+
+ /* Just in case if it is not enabled */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ WL_ERR(("cfgp2p_enable discovery failed"));
+ return -1;
+ }
+ bzero(&sd_listen, sizeof(wl_sd_listen_t));
+ if (len) {
+ ptr = strsep((char **)&buf, " ");
+ if (ptr == NULL) {
+ /* period and duration given wrongly */
+ WL_ERR(("Arguments in wrong format \n"));
+ return -EINVAL;
+ }
+ else if (strncmp(ptr, "chan=", strlen("chan=")) == 0) {
+ sd_listen.interval = 65535;
+ sd_listen.period = 65535;
+ ptr += strlen("chan=");
+ channel = simple_strtol(ptr, NULL, 10);
+ }
+ else {
+ sd_listen.period = simple_strtol(ptr, NULL, 10);
+ ptr = strsep((char **)&buf, " ");
+ if (ptr == NULL) {
+ WL_ERR(("Arguments in wrong format \n"));
+ return -EINVAL;
+ }
+ sd_listen.interval = simple_strtol(ptr, NULL, 10);
+ if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) {
+ buf += strlen("chan=");
+ channel = simple_strtol(buf, NULL, 10);
+ }
+ }
+ WL_SD(("listen_period:%d, listen_interval:%d and listen_channel:%d\n",
+ sd_listen.period, sd_listen.interval, channel));
+ }
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+ return -1;
+ }
+
+ WL_SD(("p2po_listen period:%d interval:%d \n",
+ sd_listen.period, sd_listen.interval));
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
+ sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen Failed :%d\n", ret));
+ return -1;
+ }
+
+ /* Remove ESCAN from waking up the host if ofind/olisten is enabled */
+ wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+
+ /* Store the extended listen values for use in sdo_resume */
+ cfg->sdo->sd_listen.interval = sd_listen.interval;
+ cfg->sdo->sd_listen.period = sd_listen.period;
+
+ /* set the states */
+ cfg->sdo->dd_state = WL_DD_STATE_LISTEN;
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+
+ return 0;
+}
+
+s32 wl_cfg80211_sd_offload(struct net_device *dev, char *cmd, char* buf, int len)
+{
+ int ret = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ WL_SD(("Entry cmd:%s arg_len:%d \n", cmd, len));
+
+ if (!cfg->sdo) {
+ WL_SD(("Initializing SDO \n"));
+ if ((ret = wl_cfg80211_sdo_init(cfg)) < 0)
+ goto exit;
+ }
+
+ if (strncmp(cmd, "P2P_SD_REQ", strlen("P2P_SD_REQ")) == 0) {
+ ret = wl_sd_handle_sd_req(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_SD_CANCEL_REQ", strlen("P2P_SD_CANCEL_REQ")) == 0) {
+ ret = wl_sd_handle_sd_cancel_req(dev, buf);
+ } else if (strncmp(cmd, "P2P_SD_SVC_ADD", strlen("P2P_SD_SVC_ADD")) == 0) {
+ ret = wl_sd_handle_sd_add_svc(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_SD_SVC_DEL", strlen("P2P_SD_SVC_DEL")) == 0) {
+ ret = wl_sd_handle_sd_del_svc(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_SD_FIND", strlen("P2P_SD_FIND")) == 0) {
+ ret = wl_sd_handle_sd_find(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_SD_LISTEN", strlen("P2P_SD_LISTEN")) == 0) {
+ ret = wl_sd_handle_sd_listen(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_SD_STOP", strlen("P2P_STOP")) == 0) {
+ ret = wl_sd_handle_sd_stop_discovery(dev, buf, len);
+ } else {
+ WL_ERR(("Request for Unsupported CMD:%s \n", buf));
+ ret = -EINVAL;
+ }
+
+exit:
+ return ret;
+}
+#endif /* WL_SDO */
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len,
+ enum wl_management_type type)
+{
+ struct bcm_cfg80211 *cfg;
+ s32 ret = 0;
+ s32 bssidx = 0;
+ s32 pktflag = 0;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ cfg = wl_get_cfg(ndev);
+ if (wl_get_drv_status(cfg, AP_CREATING, ndev)) {
+ /* Vendor IEs should be set to FW
+ * after SoftAP interface is brought up
+ */
+ WL_DBG(("Skipping set IE since AP is not up \n"));
+ goto exit;
+ } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ /* Either stand alone AP case or P2P discovery */
+ if (wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ /* Stand alone AP case on primary interface */
+ WL_DBG(("Apply IEs for Primary AP Interface \n"));
+ bssidx = 0;
+ } else {
+ if (!cfg->p2p) {
+ /* If p2p not initialized, return failure */
+ WL_ERR(("P2P not initialized \n"));
+ goto exit;
+ }
+ /* P2P Discovery case (p2p listen) */
+ if (!cfg->p2p->on) {
+ /* Turn on Discovery interface */
+ p2p_on(cfg) = true;
+ ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+ if (unlikely(ret)) {
+ WL_ERR(("Enable discovery failed \n"));
+ goto exit;
+ }
+ }
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ if (!cfg->p2p_wdev) {
+ WL_ERR(("p2p_wdev not present\n"));
+ goto exit;
+ }
+ wdev = cfg->p2p_wdev;
+ WL_DBG(("Apply IEs for P2P Discovery Iface wdev:%p\n", wdev));
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ }
+ } else {
+ /* Virtual AP/ P2P Group Interface */
+ WL_DBG(("Apply IEs for iface:%s\n", ndev->name));
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ }
+
+ if (wdev != NULL) {
+ switch (type) {
+ case WL_BEACON:
+ pktflag = VNDR_IE_BEACON_FLAG;
+ break;
+ case WL_PROBE_RESP:
+ pktflag = VNDR_IE_PRBRSP_FLAG;
+ break;
+ case WL_ASSOC_RESP:
+ pktflag = VNDR_IE_ASSOCRSP_FLAG;
+ break;
+ }
+ if (pktflag) {
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ wdev_to_cfgdev(wdev), bssidx, pktflag, buf, len);
+ }
+ }
+exit:
+ return ret;
+}
+
+static const struct rfkill_ops wl_rfkill_ops = {
+ .set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+ WL_DBG(("Enter \n"));
+ WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+ if (!cfg)
+ return -EINVAL;
+
+ cfg->rf_blocked = blocked;
+
+ return 0;
+}
+
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+{
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ if (!cfg)
+ return -EINVAL;
+ if (setup) {
+ cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+ wl_cfg80211_get_parent_dev(),
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+
+ if (!cfg->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rfkill_register(cfg->rfkill);
+
+ if (err)
+ rfkill_destroy(cfg->rfkill);
+ } else {
+ if (!cfg->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rfkill_unregister(cfg->rfkill);
+ rfkill_destroy(cfg->rfkill);
+ }
+
+err_out:
+ return err;
+}
+
+struct bcm_cfg80211 *wl_cfg80211_get_bcmcfg(void)
+{
+ return g_bcmcfg;
+}
+
+void wl_cfg80211_set_bcmcfg(struct bcm_cfg80211 *cfg)
+{
+ g_bcmcfg = cfg;
+}
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+ return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+ cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+ cfg80211_parent_dev = NULL;
+}
+
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ if (wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+ "cur_etheraddr", NULL, 0, ioctl_buf, sizeof(ioctl_buf),
+ 0, NULL) == BCME_OK) {
+ memcpy(mac->octet, ioctl_buf, ETHER_ADDR_LEN);
+ } else {
+ bzero(mac->octet, ETHER_ADDR_LEN);
+ }
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+
+ if (!cfg || !cfg->wdev)
+ return -EINVAL;
+
+#if defined(BCMDONGLEHOST)
+ if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+ return -1;
+#endif /* BCMDONGLEHOST */
+
+ return 0;
+}
+
+void wl_cfg80211_enable_trace(u32 level)
+{
+ wl_dbg_level = level;
+ WL_MSG("wlan", "wl_dbg_level = 0x%x\n", wl_dbg_level);
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+ /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+ * is passed with CMD_FRAME. This callback is supposed to cancel
+ * the OFFCHANNEL Wait. Since we are already taking care of that
+ * with the tx_mgmt logic, do nothing here.
+ */
+
+ return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+
+#ifdef WL_HOST_BAND_MGMT
+s32
+wl_cfg80211_set_band(struct net_device *ndev, int band)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int ret = 0;
+ char ioctl_buf[50];
+
+ if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
+ WL_ERR(("Invalid band\n"));
+ return -EINVAL;
+ }
+
+ if ((ret = wldev_iovar_setbuf(ndev, "roam_band", &band,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting roam_band failed code=%d\n", ret));
+ return ret;
+ }
+
+ WL_DBG(("Setting band to %d\n", band));
+ cfg->curr_band = band;
+
+ return 0;
+}
+#endif /* WL_HOST_BAND_MGMT */
+
+s32
+wl_cfg80211_set_if_band(struct net_device *ndev, int band)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int ret = BCME_OK, wait_cnt;
+ char ioctl_buf[32];
+
+ if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
+ WL_ERR(("Invalid band\n"));
+ return -EINVAL;
+ }
+
+ if (cfg->ncho_band == band) {
+ WL_ERR(("Same to Current band %d\n", cfg->ncho_band));
+ return ret;
+ }
+
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), 0);
+#endif /* BCMDONGLEHOST */
+ ret = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (ret < 0) {
+ WL_ERR(("WLC_DISASSOC error %d\n", ret));
+ /* continue to set 'if_band' */
+ }
+ else {
+ /* This is to ensure that 'if_band' iovar is issued only after
+ * disconnection is completed
+ */
+ wait_cnt = WAIT_FOR_DISCONNECT_MAX;
+ while (wl_get_drv_status(cfg, CONNECTED, ndev) && wait_cnt) {
+ WL_DBG(("Wait until disconnected. wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(50);
+ }
+ }
+ }
+ if ((ret = wldev_iovar_setbuf(ndev, "if_band", &band,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting if_band failed ret=%d\n", ret));
+ /* issue 'WLC_SET_BAND' if if_band is not supported */
+ if (ret == BCME_UNSUPPORTED) {
+ ret = wldev_set_band(ndev, band);
+ if (ret < 0) {
+ WL_ERR(("seting band failed ret=%d\n", ret));
+ }
+ }
+ }
+
+ if (ret == BCME_OK) {
+ cfg->ncho_band = band;
+ }
+ return ret;
+}
+
+bool wl_cfg80211_is_concurrent_mode(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ if ((cfg) && (wl_get_drv_status_all(cfg, CONNECTED) > 1)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/*
+ * This is to support existing btcoex implementation
+ * btcoex clean up may help removing this function
+ */
+void* wl_cfg80211_get_dhdp(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ return (cfg && cfg->p2p);
+}
+
+bool wl_cfg80211_is_roam_offload(struct net_device * dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ return (cfg && cfg->roam_offload);
+}
+
+bool wl_cfg80211_is_event_from_connected_bssid(struct net_device * dev, const wl_event_msg_t *e,
+ int ifidx)
+{
+#ifdef BCMDONGLEHOST
+ u8 *curbssid = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (!cfg) {
+ /* When interface is created using wl
+ * ndev->ieee80211_ptr will be NULL.
+ */
+ return NULL;
+ }
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+
+ if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+ return true;
+ }
+#endif /* BCMDONGLEHOST */
+ return false;
+}
+
+static void wl_cfg80211_work_handler(struct work_struct * work)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_info *iter, *next;
+ s32 err = BCME_OK;
+ s32 pm = PM_FAST;
+ dhd_pub_t *dhd;
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
+ WL_DBG(("Enter \n"));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+ (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS &&
+ wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS))
+ continue;
+ if (iter->ndev) {
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ if ((err = wldev_ioctl_set(iter->ndev, WLC_SET_PM,
+ &pm, sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ iter->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ iter->ndev->name, err));
+ } else
+ wl_cfg80211_update_power_mode(iter->ndev);
+ }
+ }
+ }
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_PM_WAKE_UNLOCK(cfg->pub);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+ if (cfg->scan_suppressed) {
+ /* There is pending scan_suppress. Clean it */
+ WL_ERR(("Clean up from timer after %d msec\n", WL_SCAN_SUPPRESS_TIMEOUT));
+ wl_cfg80211_scan_suppress(bcmcfg_to_prmry_ndev(cfg), 0);
+ }
+#endif /* DHCP_SCAN_SUPPRESS */
+
+}
+
+u8
+wl_get_action_category(void *frame, u32 frame_len)
+{
+ u8 category;
+ u8 *ptr = (u8 *)frame;
+ if (frame == NULL)
+ return DOT11_ACTION_CAT_ERR_MASK;
+ if (frame_len < DOT11_ACTION_HDR_LEN)
+ return DOT11_ACTION_CAT_ERR_MASK;
+ category = ptr[DOT11_ACTION_CAT_OFF];
+ WL_DBG(("Action Category: %d\n", category));
+ return category;
+}
+
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
+{
+ u8 *ptr = (u8 *)frame;
+ if (frame == NULL || ret_action == NULL)
+ return BCME_ERROR;
+ if (frame_len < DOT11_ACTION_HDR_LEN)
+ return BCME_ERROR;
+ if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+ return BCME_ERROR;
+ *ret_action = ptr[DOT11_ACTION_ACT_OFF];
+ WL_DBG(("Public Action : %d\n", *ret_action));
+ return BCME_OK;
+}
+
+#ifdef WLFBT
+int
+wl_cfg80211_get_fbt_key(struct net_device *dev, uint8 *key, int total_len)
+{
+ struct bcm_cfg80211 * cfg = wl_get_cfg(dev);
+ int bytes_written = -1;
+
+ if (total_len < FBT_KEYLEN) {
+ WL_ERR(("wl_cfg80211_get_fbt_key: Insufficient buffer \n"));
+ goto end;
+ }
+ if (cfg) {
+ memcpy(key, cfg->fbt_key, FBT_KEYLEN);
+ bytes_written = FBT_KEYLEN;
+ } else {
+ bzero(key, FBT_KEYLEN);
+ WL_ERR(("wl_cfg80211_get_fbt_key: Failed to copy KCK and KEK \n"));
+ }
+ prhex("KCK, KEK", (uchar *)key, FBT_KEYLEN);
+end:
+ return bytes_written;
+}
+#endif /* WLFBT */
+
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const struct ether_addr *bssid)
+{
+ s32 err;
+ wl_event_msg_t e;
+
+ bzero(&e, sizeof(e));
+ e.event_type = cpu_to_be32(WLC_E_ROAM);
+ memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+ /* trigger the roam event handler */
+ err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+
+ return err;
+}
+
+static bool
+wl_cfg80211_filter_vndr_ext_id(const vndr_ie_t *vndrie)
+{
+ if (vndrie->oui[0] == FILS_EXTID_MNG_HLP_CONTAINER_ID) {
+ /* Skip adding fils HLP IE, its already done using
+ * "WL_FILS_CMD_ADD_HLP_IE" subcmd.
+ */
+ WL_DBG(("%s:SKIP ADDING FILS HLP EXTN ID\n", __func__));
+ return true;
+ }
+ return false;
+}
+
+static s32
+wl_cfg80211_parse_vndr_ies(const u8 *parse, u32 len,
+ struct parsed_vndr_ies *vndr_ies)
+{
+ s32 err = BCME_OK;
+ const vndr_ie_t *vndrie;
+ const bcm_tlv_t *ie;
+ struct parsed_vndr_ie_info *parsed_info;
+ u32 count = 0;
+ u32 remained_len;
+
+ remained_len = len;
+ bzero(vndr_ies, sizeof(*vndr_ies));
+
+ ie = (const bcm_tlv_t *) parse;
+ if (!bcm_valid_tlv(ie, remained_len))
+ ie = NULL;
+ while (ie) {
+ if (count >= MAX_VNDR_IE_NUMBER)
+ break;
+ if (ie->id == DOT11_MNG_VS_ID || (ie->id == DOT11_MNG_ID_EXT_ID)) {
+ vndrie = (const vndr_ie_t *) ie;
+ if (ie->id == DOT11_MNG_ID_EXT_ID) {
+ /* len should be bigger than sizeof ID extn field at least */
+ if (vndrie->len < MIN_VENDOR_EXTN_IE_LEN) {
+ WL_ERR(("%s: invalid vndr extn ie."
+ " length %d\n",
+ __FUNCTION__, vndrie->len));
+ goto end;
+ }
+ if (wl_cfg80211_filter_vndr_ext_id(vndrie)) {
+ goto end;
+ }
+ } else {
+ /* len should be bigger than OUI length +
+ * one data length at least
+ */
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("wl_cfg80211_parse_vndr_ies:"
+ " invalid vndr ie. length is too small %d\n",
+ vndrie->len));
+ goto end;
+ }
+
+ /* if wpa or wme ie, do not add ie */
+ if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+ ((vndrie->data[0] == WPA_OUI_TYPE) ||
+ (vndrie->data[0] == WME_OUI_TYPE))) {
+ CFGP2P_DBG(("SKIP WPA/WME oui \n"));
+ goto end;
+ }
+#if defined(WL_MBO) || defined(WL_OCE)
+ if ((!memcmp(vndrie->oui, (u8 *)WFA_OUI, WFA_OUI_LEN)) &&
+ (vndrie->data[0] == WFA_OUI_TYPE_MBO_OCE)) {
+ WL_DBG(("SKIP ID : %d Len: %d OUI:"MACOUIDBG
+ " TYPE:%0x\n", vndrie->id, vndrie->len,
+ MACOUI2STRDBG(vndrie->oui), vndrie->data[0]));
+ goto end;
+ }
+#endif /* WL_MBO || WL_OCE */
+ }
+
+ parsed_info = &vndr_ies->ie_info[count++];
+
+ /* save vndr ie information */
+ parsed_info->ie_ptr = (const char *)vndrie;
+ parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+ memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+ vndr_ies->count = count;
+ if (ie->id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("** Vendor Extension ie id: 0x%02x, len:%d\n",
+ ie->id, vndrie->len));
+ } else {
+ WL_DBG(("** OUI "MACOUIDBG", type 0x%02x len:%d\n",
+ MACOUI2STRDBG(parsed_info->vndrie.oui),
+ parsed_info->vndrie.data[0], vndrie->len));
+ }
+ }
+end:
+ ie = bcm_next_tlv(ie, &remained_len);
+ }
+ return err;
+}
+
+static bool
+wl_vndr_ies_exclude_vndr_oui(struct parsed_vndr_ie_info *vndr_info)
+{
+ int i = 0;
+
+ while (exclude_vndr_oui_list[i]) {
+ if (!memcmp(vndr_info->vndrie.oui,
+ exclude_vndr_oui_list[i],
+ DOT11_OUI_LEN)) {
+ return TRUE;
+ }
+ i++;
+ }
+
+ return FALSE;
+}
+
+static bool
+wl_vndr_ies_check_duplicate_vndr_oui(struct bcm_cfg80211 *cfg,
+ struct parsed_vndr_ie_info *vndr_info)
+{
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ unsigned long flags;
+
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN)) {
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ return TRUE;
+ }
+ }
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ return FALSE;
+}
+
+static bool
+wl_vndr_ies_add_vendor_oui_list(struct bcm_cfg80211 *cfg,
+ struct parsed_vndr_ie_info *vndr_info)
+{
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ unsigned long flags;
+
+ oui_entry = kmalloc(sizeof(*oui_entry), GFP_KERNEL);
+ if (oui_entry == NULL) {
+ WL_ERR(("alloc failed\n"));
+ return FALSE;
+ }
+
+ memcpy(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN);
+
+ INIT_LIST_HEAD(&oui_entry->list);
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ list_add_tail(&oui_entry->list, &cfg->vndr_oui_list);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+
+ return TRUE;
+}
+
+static void
+wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg)
+{
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ unsigned long flags;
+
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ while (!list_empty(&cfg->vndr_oui_list)) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ oui_entry = list_entry(cfg->vndr_oui_list.next, wl_vndr_oui_entry_t, list);
+ GCC_DIAGNOSTIC_POP();
+ if (oui_entry) {
+ list_del(&oui_entry->list);
+ kfree(oui_entry);
+ }
+ }
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+}
+
+static int
+wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ char *vndr_oui, u32 vndr_oui_len)
+{
+ int i;
+ int vndr_oui_num = 0;
+
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ struct parsed_vndr_ie_info *vndr_info;
+ struct parsed_vndr_ies vndr_ies;
+
+ char *pos = vndr_oui;
+ u32 remained_buf_len = vndr_oui_len;
+ unsigned long flags;
+
+ if (!conn_info->resp_ie_len) {
+ return BCME_ERROR;
+ }
+
+ wl_vndr_ies_clear_vendor_oui_list(cfg);
+
+ if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
+ conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
+ for (i = 0; i < vndr_ies.count; i++) {
+ vndr_info = &vndr_ies.ie_info[i];
+ if (wl_vndr_ies_exclude_vndr_oui(vndr_info)) {
+ continue;
+ }
+
+ if (wl_vndr_ies_check_duplicate_vndr_oui(cfg, vndr_info)) {
+ continue;
+ }
+
+ wl_vndr_ies_add_vendor_oui_list(cfg, vndr_info);
+ vndr_oui_num++;
+ }
+ }
+
+ if (vndr_oui) {
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (remained_buf_len < VNDR_OUI_STR_LEN) {
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ return BCME_ERROR;
+ }
+ pos += snprintf(pos, VNDR_OUI_STR_LEN, "%02X-%02X-%02X ",
+ oui_entry->oui[0], oui_entry->oui[1], oui_entry->oui[2]);
+ remained_buf_len -= VNDR_OUI_STR_LEN;
+ }
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ }
+
+ return vndr_oui_num;
+}
+
+#ifdef WL_ANALYTICS
+static bool
+wl_vndr_ies_find_vendor_oui(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const char *vndr_oui)
+{
+ int i;
+ int vndr_oui_num = 0;
+
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ struct parsed_vndr_ie_info *vndr_info;
+ struct parsed_vndr_ies vndr_ies;
+
+ unsigned long flags;
+ bool found = FALSE;
+
+ if (!conn_info->resp_ie_len) {
+ return FALSE;
+ }
+
+ wl_vndr_ies_clear_vendor_oui_list(cfg);
+
+ if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
+ conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
+ for (i = 0; i < vndr_ies.count; i++) {
+ vndr_info = &vndr_ies.ie_info[i];
+ if (wl_vndr_ies_exclude_vndr_oui(vndr_info)) {
+ continue;
+ }
+
+ if (wl_vndr_ies_check_duplicate_vndr_oui(cfg, vndr_info)) {
+ continue;
+ }
+
+ wl_vndr_ies_add_vendor_oui_list(cfg, vndr_info);
+ vndr_oui_num++;
+ }
+ }
+
+ if (vndr_oui && vndr_oui_num) {
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(vndr_oui, oui_entry->oui, DOT11_OUI_LEN)) {
+ found = TRUE;
+ break;
+ }
+ }
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ }
+
+ return found;
+}
+#endif /* WL_ANALYTICS */
+
+void
+wl_cfg80211_clear_p2p_disc_ies(struct bcm_cfg80211 *cfg)
+{
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (cfg->p2p_wdev) {
+ /* clear IEs for dedicated p2p interface */
+ WL_DBG_MEM(("Clear IEs for P2P Discovery Iface\n"));
+ wl_cfg80211_clear_per_bss_ies(cfg, cfg->p2p_wdev);
+ }
+#else
+ /* Legacy P2P used to store it in primary dev cache */
+ s32 index;
+ struct net_device *ndev;
+ s32 bssidx;
+ s32 ret;
+ s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
+ VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+
+ WL_DBG(("Clear IEs for P2P Discovery Iface \n"));
+ /* certain vendors uses p2p0 interface in addition to
+ * the dedicated p2p interface supported by the linux
+ * kernel.
+ */
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (bssidx == WL_INVALID) {
+ WL_DBG(("No discovery I/F available. Do nothing.\n"));
+ return;
+ }
+
+ for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+ if ((ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(ndev),
+ bssidx, vndrie_flag[index], NULL, 0)) < 0) {
+ if (ret != BCME_NOTFOUND) {
+ WL_ERR(("vndr_ies clear failed (%d). Ignoring.. \n", ret));
+ }
+ }
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+}
+
+s32
+wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ s32 index;
+ s32 ret;
+ struct net_info *netinfo;
+ s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
+ VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+
+ netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ if (!netinfo || !netinfo->wdev) {
+ WL_ERR(("netinfo or netinfo->wdev is NULL\n"));
+ return -1;
+ }
+
+ WL_DBG(("clear management vendor IEs for bssidx:%d \n", netinfo->bssidx));
+ /* Clear the IEs set in the firmware so that host is in sync with firmware */
+ for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+ if ((ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev),
+ netinfo->bssidx, vndrie_flag[index], NULL, 0)) < 0)
+ if (ret != BCME_NOTFOUND) {
+ WL_ERR(("vndr_ies clear failed. Ignoring.. \n"));
+ }
+ }
+
+ return 0;
+}
+
+s32
+wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+
+ WL_DBG(("clear management vendor IEs \n"));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ wl_cfg80211_clear_per_bss_ies(cfg, iter->wdev);
+ }
+ return 0;
+}
+
+static void
+wl_print_fw_ie_data(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
+{
+ vndr_ie_buf_t *ies;
+ s32 ret;
+
+ ret = wldev_iovar_getbuf_bsscfg(ndev, "vndr_ie", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (ret == BCME_OK) {
+ ies = (vndr_ie_buf_t *)cfg->ioctl_buf;
+ WL_INFORM_MEM(("FW IE count:%d ", ies->iecount));
+#ifdef GET_FW_IE_DATA
+ if (wl_dbg_level & WL_DBG_DBG) {
+ int i = 0;
+ /* If debug enabled, print each IE */
+ for (i = 0; i < ies->iecount; i++) {
+ vndr_ie_info_t *info = &ies->vndr_ie_list[i];
+ WL_DBG_MEM(("pktflag:0x%x\n", info->pktflag));
+ prhex("IE:", (u8 *)&info->vndr_ie_data,
+ info->vndr_ie_data.len + TLV_HDR_LEN);
+ }
+ }
+#endif /* GET_FW_IE_DATA */
+ } else {
+ WL_ERR(("IE retrieval failed! ret:%d\n", ret));
+ }
+}
+
+#define WL_VNDR_IE_MAXLEN 2048
+static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN];
+int
+wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+ struct net_device *ndev = NULL;
+ s32 ret = BCME_OK;
+ u8 *curr_ie_buf = NULL;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u32 del_add_ie_buf_len = 0;
+ u32 total_ie_buf_len = 0;
+ u32 parsed_ie_buf_len = 0;
+ struct parsed_vndr_ies old_vndr_ies;
+ struct parsed_vndr_ies new_vndr_ies;
+ s32 i;
+ u8 *ptr;
+ s32 remained_buf_len;
+ wl_bss_vndr_ies_t *ies = NULL;
+ struct net_info *netinfo;
+ struct wireless_dev *wdev;
+
+ if (!cfgdev) {
+ WL_ERR(("cfgdev is NULL\n"));
+ return -EINVAL;
+ }
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ wdev = cfgdev_to_wdev(cfgdev);
+
+ if (bssidx > WL_MAX_IFS) {
+ WL_ERR(("bssidx > supported concurrent Ifaces \n"));
+ return -EINVAL;
+ }
+
+ netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ if (!netinfo) {
+ WL_ERR(("net_info ptr is NULL \n"));
+ return -EINVAL;
+ }
+
+ /* Clear the global buffer */
+ bzero(g_mgmt_ie_buf, sizeof(g_mgmt_ie_buf));
+ curr_ie_buf = g_mgmt_ie_buf;
+ ies = &netinfo->bss.ies;
+
+ WL_DBG_MEM(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d\n",
+ pktflag, bssidx, vndr_ie_len));
+
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = ies->probe_res_ie;
+ mgmt_ie_len = &ies->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->probe_res_ie);
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ mgmt_ie_buf = ies->assoc_res_ie;
+ mgmt_ie_len = &ies->assoc_res_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->assoc_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = ies->beacon_ie;
+ mgmt_ie_len = &ies->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->beacon_ie);
+ break;
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = ies->probe_req_ie;
+ mgmt_ie_len = &ies->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->probe_req_ie);
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = ies->assoc_req_ie;
+ mgmt_ie_len = &ies->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
+ break;
+ case VNDR_IE_DISASSOC_FLAG :
+ mgmt_ie_buf = ies->disassoc_ie;
+ mgmt_ie_len = &ies->disassoc_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->disassoc_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ WL_ERR(("not suitable packet type (%d)\n", pktflag));
+ return BCME_ERROR;
+ }
+
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ WL_ERR(("extra IE size too big\n"));
+ ret = -ENOMEM;
+ } else {
+ /* parse and save new vndr_ie in curr_ie_buff before comparing it */
+ if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+ ptr = curr_ie_buf;
+
+ WL_DBG(("Incoming IEs len:%d\n", vndr_ie_len));
+ if ((ret = wl_cfg80211_parse_vndr_ies((const u8 *)vndr_ie,
+ vndr_ie_len, &new_vndr_ies)) < 0) {
+ WL_ERR(("parse vndr ie failed \n"));
+ goto exit;
+ }
+
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &new_vndr_ies.ie_info[i];
+
+ if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) {
+ WL_ERR(("IE size is too big (%d > %d)\n",
+ parsed_ie_buf_len, WL_VNDR_IE_MAXLEN));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ parsed_ie_buf_len += vndrie_info->ie_len;
+ }
+ }
+
+ if (mgmt_ie_buf != NULL) {
+ if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+ WL_DBG_MEM(("No change in cached IEs for pkt:%d\n", pktflag));
+ goto exit;
+ }
+
+ /* parse old vndr_ie */
+ WL_DBG(("Cached IEs len:%d\n", *mgmt_ie_len));
+ if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+ &old_vndr_ies)) < 0) {
+ WL_ERR(("parse vndr ie failed \n"));
+ goto exit;
+ }
+ /* make a command to delete old ie */
+ for (i = 0; i < old_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &old_vndr_ies.ie_info[i];
+
+ if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG_MEM(("DEL VENDOR EXTN ID :%d TYPE:%d Len:%d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.len));
+ } else {
+ WL_DBG_MEM(("DEL ID :%d Len:%d OUI:"MACOUIDBG" TYPE:%d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui),
+ vndrie_info->vndrie.data[0]));
+ }
+
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+ pktflag, vndrie_info->vndrie.oui,
+ vndrie_info->vndrie.id,
+ vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+ vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+ "del");
+
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
+
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (mgmt_ie_buf && parsed_ie_buf_len) {
+ ptr = mgmt_ie_buf;
+
+ remained_buf_len = mgmt_ie_buf_len;
+ /* make a command to add new ie */
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &new_vndr_ies.ie_info[i];
+ if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG_MEM(("ADD VENDOR EXTN ID :%d TYPE:%d Len:%d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.len));
+ } else {
+ WL_DBG_MEM(("ADD ID :%d Len:%d OUI:"MACOUIDBG" TYPE:%d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui),
+ vndrie_info->vndrie.data[0]));
+ }
+
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+ pktflag, vndrie_info->vndrie.oui,
+ vndrie_info->vndrie.id,
+ vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+ vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+ "add");
+
+ /* verify remained buf size before copy data */
+ if (remained_buf_len >= vndrie_info->ie_len) {
+ remained_buf_len -= vndrie_info->ie_len;
+ } else {
+ WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+ "found vndr ies # = %d(cur %d), remained len %d, "
+ "cur mgmt_ie_len %d, new ie len = %d\n",
+ pktflag, new_vndr_ies.count, i, remained_buf_len,
+ *mgmt_ie_len, vndrie_info->ie_len));
+ break;
+ }
+
+ /* save the parsed IE in cfg struct */
+ memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ *mgmt_ie_len += vndrie_info->ie_len;
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
+
+ if (total_ie_buf_len && cfg->ioctl_buf != NULL) {
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+ total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (ret) {
+ WL_ERR(("vndr_ie set error :%d\n", ret));
+ if (ret == BCME_NOTFOUND) {
+ /* retrieve and print IE data for debug */
+ wl_print_fw_ie_data(cfg, ndev, bssidx);
+ }
+ }
+ }
+ }
+exit:
+
+return ret;
+}
+
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ int err;
+
+ /* Clear the security settings on the primary Interface */
+ err = wldev_iovar_setint(dev, "wsec", 0);
+ if (unlikely(err)) {
+ WL_ERR(("wsec clear failed \n"));
+ }
+ err = wldev_iovar_setint(dev, "auth", 0);
+ if (unlikely(err)) {
+ WL_ERR(("auth clear failed \n"));
+ }
+ err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+ if (unlikely(err)) {
+ WL_ERR(("wpa_auth clear failed \n"));
+ }
+}
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+void wl_cfg80211_del_p2p_wdev(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wireless_dev *wdev = NULL;
+
+ WL_DBG(("Enter \n"));
+ if (!cfg) {
+ WL_ERR(("Invalid Ptr\n"));
+ return;
+ } else {
+ wdev = cfg->p2p_wdev;
+ }
+
+ if (wdev) {
+ wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+ }
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+static s32
+wl_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ gtk_keyinfo_t keyinfo;
+ bcol_gtk_para_t bcol_keyinfo;
+ dhd_pub_t *dhd = cfg->pub;
+
+ WL_DBG(("Enter\n"));
+ if (data == NULL || cfg->p2p_net == dev) {
+ WL_ERR(("data is NULL or wrong net device\n"));
+ return -EINVAL;
+ }
+
+ if (!dhd->conf->rekey_offload) {
+ WL_TRACE(("rekey_offload disabled\n"));
+ return BCME_UNSUPPORTED;
+ }
+
+ memset(&bcol_keyinfo, 0, sizeof(bcol_keyinfo));
+ bcol_keyinfo.enable = 1;
+ bcol_keyinfo.ptk_len = 64;
+ memcpy(&bcol_keyinfo.ptk[0], data->kck, RSN_KCK_LENGTH);
+ memcpy(&bcol_keyinfo.ptk[RSN_KCK_LENGTH], data->kek, RSN_KEK_LENGTH);
+ err = wldev_iovar_setbuf(dev, "bcol_gtk_rekey_ptk", &bcol_keyinfo,
+ sizeof(bcol_keyinfo), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (!err) {
+ goto exit;
+ }
+
+ bcopy(data->kck, keyinfo.KCK, RSN_KCK_LENGTH);
+ bcopy(data->kek, keyinfo.KEK, RSN_KEK_LENGTH);
+ bcopy(data->replay_ctr, keyinfo.ReplayCounter, RSN_REPLAY_LEN);
+ if ((err = wldev_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync)) < 0) {
+ return err;
+ }
+
+exit:
+ prhex("kck", (const u8 *) (data->kck), RSN_KCK_LENGTH);
+ prhex("kek", (const u8 *) (data->kek), RSN_KEK_LENGTH);
+ prhex("replay_ctr", (const u8 *) (data->replay_ctr), RSN_REPLAY_LEN);
+ WL_DBG(("Exit\n"));
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf)
+{
+ int ret = 0;
+ wsec_pmk_t pmk;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 bssidx;
+
+ pmk.key_len = conf->pmk_len;
+ if (pmk.key_len > sizeof(pmk.key)) {
+ ret = -EINVAL;
+ return ret;
+ }
+ pmk.flags = 0;
+ ret = memcpy_s(&pmk.key, sizeof(pmk.key), conf->pmk, conf->pmk_len);
+ if (ret) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ ret = wldev_iovar_setbuf_bsscfg(dev, "okc_info_pmk", pmk.key, pmk.key_len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (ret) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
+ }
+ }
+
+ ret = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (ret) {
+ WL_ERR(("wl_cfg80211_set_pmk error:%d", ret));
+ ret = -EINVAL;
+ return ret;
+ } else {
+ WL_DBG(("pmk added for mac:"MACDBG"\n", MAC2STRDBG(conf->aa)));
+ }
+ return 0;
+}
+
+static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa)
+{
+ int err = BCME_OK;
+ struct cfg80211_pmksa pmksa;
+
+ /* build up cfg80211_pmksa structure to use existing wl_cfg80211_update_pmksa API */
+ bzero(&pmksa, sizeof(pmksa));
+ pmksa.bssid = aa;
+
+ err = wl_cfg80211_update_pmksa(wiphy, dev, &pmksa, FALSE);
+ if (unlikely(err)) {
+ WL_ERR(("wl_cfg80211_update_pmksa err:%d\n", err));
+ err = -EINVAL;
+ } else {
+ WL_DBG(("pmk deleted for bssid:"MACDBG"\n", MAC2STRDBG(aa)));
+ }
+
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+
+u64
+wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg)
+{
+ u64 id = 0;
+ id = ++cfg->last_roc_id;
+#ifdef P2P_LISTEN_OFFLOADING
+ if (id == P2PO_COOKIE) {
+ id = ++cfg->last_roc_id;
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+ if (id == 0)
+ id = ++cfg->last_roc_id;
+ return id;
+}
+
+struct net_device*
+wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname)
+{
+ struct net_info *iter, *next;
+ struct net_device *ndev = NULL;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
+ ndev = iter->ndev;
+ break;
+ }
+ }
+ }
+
+ return ndev;
+}
+
+#ifdef WBTEXT
+static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
+{
+ wl_wbtext_bssid_t *bssid = NULL;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ /* check duplicate */
+ list_for_each_entry(bssid, &cfg->wbtext_bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(bssid->ea.octet, ea, ETHER_ADDR_LEN)) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+static bool wl_cfg80211_wbtext_add_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
+{
+ wl_wbtext_bssid_t *bssid = NULL;
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ bssid = (wl_wbtext_bssid_t *)MALLOC(cfg->osh, sizeof(wl_wbtext_bssid_t));
+ if (bssid == NULL) {
+ WL_ERR(("alloc failed\n"));
+ return FALSE;
+ }
+
+ memcpy(bssid->ea.octet, ea, ETHER_ADDR_LEN);
+
+ INIT_LIST_HEAD(&bssid->list);
+ list_add_tail(&bssid->list, &cfg->wbtext_bssid_list);
+
+ WL_DBG(("add wbtext bssid : %s\n", bcm_ether_ntoa(ea, eabuf)));
+
+ return TRUE;
+}
+
+static void wl_cfg80211_wbtext_clear_bssid_list(struct bcm_cfg80211 *cfg)
+{
+ wl_wbtext_bssid_t *bssid = NULL;
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ while (!list_empty(&cfg->wbtext_bssid_list)) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ bssid = list_entry(cfg->wbtext_bssid_list.next, wl_wbtext_bssid_t, list);
+ GCC_DIAGNOSTIC_POP();
+ if (bssid) {
+ WL_DBG(("clear wbtext bssid : %s\n", bcm_ether_ntoa(&bssid->ea, eabuf)));
+ list_del(&bssid->list);
+ MFREE(cfg->osh, bssid, sizeof(wl_wbtext_bssid_t));
+ }
+ }
+}
+
+static void wl_cfg80211_wbtext_update_rcc(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ bcm_tlv_t * cap_ie = NULL;
+ bool req_sent = FALSE;
+ struct wl_profile *profile;
+
+ WL_DBG(("Enter\n"));
+
+ profile = wl_get_profile_by_netdev(cfg, dev);
+ if (!profile) {
+ WL_ERR(("no profile exists\n"));
+ return;
+ }
+
+ if (wl_cfg80211_wbtext_check_bssid_list(cfg,
+ (struct ether_addr *)&profile->bssid) == FALSE) {
+ WL_DBG(("already updated\n"));
+ return;
+ }
+
+ /* first, check NBR bit in RRM IE */
+ if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_RRM_CAP_ID)) != NULL) {
+ if (isset(cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+ WL_DBG(("sending neighbor report\n"));
+ req_sent = wl_cfg80211_wbtext_send_nbr_req(cfg, dev, profile);
+ }
+ }
+
+ /* if RRM nbr was not supported, check BTM bit in extend cap. IE */
+ if (!req_sent) {
+ if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_EXT_CAP_ID)) != NULL) {
+ if (cap_ie->len >= DOT11_EXTCAP_LEN_BSSTRANS &&
+ isset(cap_ie->data, DOT11_EXT_CAP_BSSTRANS_MGMT)) {
+ WL_DBG(("sending btm query\n"));
+ wl_cfg80211_wbtext_send_btm_query(cfg, dev, profile);
+ }
+ }
+ }
+}
+
+static bool wl_cfg80211_wbtext_send_nbr_req(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile)
+{
+ int error = -1;
+ char *smbuf = NULL;
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ bcm_tlv_t * rrm_cap_ie = NULL;
+ wlc_ssid_t *ssid = NULL;
+ bool ret = FALSE;
+
+ WL_DBG(("Enter\n"));
+
+ /* check RRM nbr bit in extend cap. IE of assoc response */
+ if ((rrm_cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_RRM_CAP_ID)) != NULL) {
+ if (!isset(rrm_cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+ WL_DBG(("AP doesn't support neighbor report\n"));
+ return FALSE;
+ }
+ }
+
+ smbuf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (smbuf == NULL) {
+ WL_ERR(("failed to allocated memory\n"));
+ goto nbr_req_out;
+ }
+
+ ssid = (wlc_ssid_t *)MALLOCZ(cfg->osh, sizeof(wlc_ssid_t));
+ if (ssid == NULL) {
+ WL_ERR(("failed to allocated memory\n"));
+ goto nbr_req_out;
+ }
+
+ ssid->SSID_len = MIN(profile->ssid.SSID_len, DOT11_MAX_SSID_LEN);
+ memcpy(ssid->SSID, profile->ssid.SSID, ssid->SSID_len);
+
+ error = wldev_iovar_setbuf(dev, "rrm_nbr_req", ssid,
+ sizeof(wlc_ssid_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ if (error == BCME_OK) {
+ ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
+ (struct ether_addr *)&profile->bssid);
+ } else {
+ WL_ERR(("failed to send neighbor report request, error=%d\n", error));
+ }
+
+nbr_req_out:
+ if (ssid) {
+ MFREE(cfg->osh, ssid, sizeof(wlc_ssid_t));
+ }
+
+ if (smbuf) {
+ MFREE(cfg->osh, smbuf, WLC_IOCTL_MAXLEN);
+ }
+ return ret;
+}
+
+static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile)
+
+{
+ int error = -1;
+ bool ret = FALSE;
+ wl_bsstrans_query_t btq;
+
+ WL_DBG(("Enter\n"));
+
+ bzero(&btq, sizeof(wl_bsstrans_query_t));
+
+ btq.version = WL_BSSTRANS_QUERY_VERSION_1;
+ error = wldev_iovar_setbuf(dev, "wnm_bsstrans_query", &btq,
+ sizeof(btq), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (error == BCME_OK) {
+ ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
+ (struct ether_addr *)&profile->bssid);
+ } else {
+ WL_ERR(("wl_cfg80211_wbtext_send_btm_query: failed to set BTM query,"
+ " error=%d\n", error));
+ }
+ return ret;
+}
+
+static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ keepalives_max_idle_t keepalive = {0, 0, 0, 0};
+ s32 bssidx, error;
+ int wnm_maxidle = 0;
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+ /* AP supports wnm max idle ? */
+ if (bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID) != NULL) {
+ error = wldev_iovar_getint(dev, "wnm_maxidle", &wnm_maxidle);
+ if (error < 0) {
+ WL_ERR(("failed to get wnm max idle period : %d\n", error));
+ }
+ }
+
+ WL_DBG(("wnm max idle period : %d\n", wnm_maxidle));
+
+ /* if wnm maxidle has valid period, set it as keep alive */
+ if (wnm_maxidle > 0) {
+ keepalive.keepalive_count = 1;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) >= 0) {
+ error = wldev_iovar_setbuf_bsscfg(dev, "wnm_keepalives_max_idle", &keepalive,
+ sizeof(keepalives_max_idle_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (error < 0) {
+ if (error == BCME_BADARG) {
+ WL_ERR(("set wnm_keepalive with invalid arguments\n"));
+ } else {
+ WL_ERR(("set wnm_keepalives_max_idle failed : %d\n", error));
+ }
+ }
+ }
+}
+
+static int
+wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, uint body_len)
+{
+ dot11_rm_action_t *rm_rep;
+ bcm_tlv_t *tlvs;
+ uint tlv_len;
+ int i, error;
+ dot11_neighbor_rep_ie_t *nbr_rep_ie;
+ chanspec_t ch;
+ wl_roam_channel_list_t channel_list;
+ char iobuf[WLC_IOCTL_SMLEN];
+
+ if (body_len < DOT11_RM_ACTION_LEN) {
+ WL_ERR(("Received Neighbor Report frame with incorrect length %d\n",
+ body_len));
+ return BCME_ERROR;
+ }
+
+ rm_rep = (dot11_rm_action_t *)body;
+ WL_DBG(("received neighbor report (token = %d)\n", rm_rep->token));
+
+ tlvs = (bcm_tlv_t *)&rm_rep->data[0];
+
+ tlv_len = body_len - DOT11_RM_ACTION_LEN;
+
+ while (tlvs && tlvs->id == DOT11_MNG_NEIGHBOR_REP_ID) {
+ nbr_rep_ie = (dot11_neighbor_rep_ie_t *)tlvs;
+
+ if (nbr_rep_ie->len < DOT11_NEIGHBOR_REP_IE_FIXED_LEN) {
+ WL_ERR(("malformed Neighbor Report element with length %d\n",
+ nbr_rep_ie->len));
+ tlvs = bcm_next_tlv(tlvs, &tlv_len);
+ continue;
+ }
+
+ ch = CH20MHZ_CHSPEC(nbr_rep_ie->channel);
+ WL_DBG(("ch:%d, bssid:"MACDBG"\n",
+ ch, MAC2STRDBG(nbr_rep_ie->bssid.octet)));
+
+ /* get RCC list */
+ error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+ (void *)&channel_list, sizeof(channel_list), NULL);
+ if (error) {
+ WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ /* update RCC */
+ if (channel_list.n < MAX_ROAM_CHANNEL) {
+ for (i = 0; i < channel_list.n; i++) {
+ if (channel_list.channels[i] == ch) {
+ break;
+ }
+ }
+ if (i == channel_list.n) {
+ channel_list.channels[channel_list.n] = ch;
+ channel_list.n++;
+ }
+ }
+
+ /* set RCC list */
+ error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
+ sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
+ if (error) {
+ WL_DBG(("Failed to set roamscan channels, error = %d\n", error));
+ }
+
+ tlvs = bcm_next_tlv(tlvs, &tlv_len);
+ }
+
+ return BCME_OK;
+}
+#endif /* WBTEXT */
+#ifdef SUPPORT_SET_CAC
+void
+wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable)
+{
+ int ret = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+ WL_DBG(("cac enable %d\n", enable));
+ if (!dhd) {
+ WL_ERR(("dhd is NULL\n"));
+ return;
+ }
+ if ((ret = dhd_wl_ioctl_set_intiovar(dhd, "cac", enable,
+ WLC_SET_VAR, TRUE, 0)) < 0) {
+ WL_ERR(("Failed set CAC, ret=%d\n", ret));
+ } else {
+ WL_DBG(("CAC set successfully\n"));
+ }
+ return;
+}
+#endif /* SUPPORT_SET_CAC */
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+int
+wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_rssi_ant_mimo_t *get_param = (wl_rssi_ant_mimo_t *)param;
+ rssi_ant_param_t *set_param = NULL;
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
+
+ bzero(iobuf, WLC_IOCTL_SMLEN);
+
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("Could not find net_device for ifname:%s\n", ifname));
+ err = BCME_BADARG;
+ goto fail;
+ }
+
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO) {
+ if (peer_mac) {
+ set_param = (rssi_ant_param_t *)MALLOCZ(cfg->osh, sizeof(rssi_ant_param_t));
+ err = wl_cfg80211_ether_atoe(peer_mac, &set_param->ea);
+ if (!err) {
+ WL_ERR(("Invalid Peer MAC format\n"));
+ err = BCME_BADARG;
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Peer MAC is not provided for iftype %d\n", iftype));
+ err = BCME_BADARG;
+ goto fail;
+ }
+ }
+
+ err = wldev_iovar_getbuf(ifdev, "phy_rssi_ant", peer_mac ?
+ (void *)&(set_param->ea) : NULL, peer_mac ? ETHER_ADDR_LEN : 0,
+ (void *)iobuf, sizeof(iobuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to get rssi info, err=%d\n", err));
+ } else {
+ memcpy(get_param, iobuf, sizeof(wl_rssi_ant_mimo_t));
+ if (get_param->count == 0) {
+ WL_ERR(("Not supported on this chip\n"));
+ err = BCME_UNSUPPORTED;
+ }
+ }
+
+fail:
+ if (set_param) {
+ MFREE(cfg->osh, set_param, sizeof(rssi_ant_param_t));
+ }
+
+ return err;
+}
+
+int
+wl_get_rssi_logging(struct net_device *dev, void *param)
+{
+ rssilog_get_param_t *get_param = (rssilog_get_param_t *)param;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+
+ bzero(iobuf, WLC_IOCTL_SMLEN);
+ bzero(get_param, sizeof(*get_param));
+ err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
+ sizeof(iobuf), NULL);
+ if (err) {
+ WL_ERR(("Failed to get rssi logging info, err=%d\n", err));
+ } else {
+ memcpy(get_param, iobuf, sizeof(*get_param));
+ }
+
+ return err;
+}
+
+int
+wl_set_rssi_logging(struct net_device *dev, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rssilog_set_param_t *set_param = (rssilog_set_param_t *)param;
+ int err;
+
+ err = wldev_iovar_setbuf(dev, "rssilog", set_param,
+ sizeof(*set_param), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("Failed to set rssi logging param, err=%d\n", err));
+ }
+
+ return err;
+}
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+/* Function to flush the FW preserve buffer content
+* The buffer content is sent to host in form of events.
+*/
+void
+wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ int i;
+ int err = 0;
+ u8 buf[WLC_IOCTL_SMLEN] = {0};
+ wl_el_set_params_t set_param;
+
+ /* Set the size of data to retrieve */
+ memset(&set_param, 0, sizeof(set_param));
+ set_param.size = WLC_IOCTL_SMLEN;
+
+ for (i = 0; i < dhd->event_log_max_sets; i++)
+ {
+ if ((0x01u << i) & logset_mask) {
+ set_param.set = i;
+ err = wldev_iovar_setbuf(dev, "event_log_get", &set_param,
+ sizeof(struct wl_el_set_params_s), buf, WLC_IOCTL_SMLEN,
+ NULL);
+ if (err) {
+ WL_DBG(("Failed to get fw preserve logs, err=%d\n", err));
+ }
+ }
+ }
+}
+#ifdef USE_WFA_CERT_CONF
+extern int g_frameburst;
+#endif /* USE_WFA_CERT_CONF */
+
+int
+wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable)
+{
+ int ret = BCME_OK;
+ int val = enable ? 1 : 0;
+
+#ifdef USE_WFA_CERT_CONF
+ if (!g_frameburst) {
+ WL_DBG(("Skip setting frameburst\n"));
+ return 0;
+ }
+#endif /* USE_WFA_CERT_CONF */
+
+ WL_DBG(("Set frameburst %d\n", val));
+ ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg), WLC_SET_FAKEFRAG, &val, sizeof(val));
+ if (ret < 0) {
+ WL_ERR(("Failed set frameburst, ret=%d\n", ret));
+ } else {
+ WL_INFORM_MEM(("frameburst is %s\n", enable ? "enabled" : "disabled"));
+ }
+
+ return ret;
+}
+
+s32
+wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level)
+{
+ /* configure verbose level for debugging */
+ if (level) {
+ /* Enable increased verbose */
+ wl_dbg_level |= WL_DBG_DBG;
+ } else {
+ /* Disable */
+ wl_dbg_level &= ~WL_DBG_DBG;
+ }
+ WL_INFORM(("debug verbose set to %d\n", level));
+
+ return BCME_OK;
+}
+
+const u8 *
+wl_find_attribute(const u8 *buf, u16 len, u16 element_id)
+{
+ const u8 *attrib;
+ u16 attrib_id;
+ u16 attrib_len;
+
+ if (!buf) {
+ WL_ERR(("buf null\n"));
+ return NULL;
+ }
+
+ attrib = buf;
+ while (len >= 4) {
+ /* attribute id */
+ attrib_id = *attrib++ << 8;
+ attrib_id |= *attrib++;
+ len -= 2;
+
+ /* 2-byte little endian */
+ attrib_len = *attrib++ << 8;
+ attrib_len |= *attrib++;
+
+ len -= 2;
+ if (attrib_id == element_id) {
+ /* This will point to start of subelement attrib after
+ * attribute id & len
+ */
+ return attrib;
+ }
+ if (len > attrib_len) {
+ len -= attrib_len; /* for the remaining subelt fields */
+ WL_DBG(("Attribue:%4x attrib_len:%d rem_len:%d\n",
+ attrib_id, attrib_len, len));
+
+ /* Go to next subelement */
+ attrib += attrib_len;
+ } else {
+ WL_ERR(("Incorrect Attribue:%4x attrib_len:%d\n",
+ attrib_id, attrib_len));
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ WL_INFORM(("dhd->hang_was_sent = %d and busstate = %d\n",
+ dhd->hang_was_sent, dhd->busstate));
+ return ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent);
+}
+
+#ifdef WL_WPS_SYNC
+static void wl_wps_reauth_timeout(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ s32 inst;
+ unsigned long flags;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst >= 0) {
+ WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d! state:%d\n",
+ ndev->name, inst, cfg->wps_session[inst].state));
+ if (cfg->wps_session[inst].state == WPS_STATE_REAUTH_WAIT) {
+ /* Session should get deleted from success (linkup) or
+ * deauth case. Just in case, link reassoc failed, clear
+ * state here.
+ */
+ WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d!\n",
+ ndev->name, inst));
+ cfg->wps_session[inst].state = WPS_STATE_IDLE;
+ cfg->wps_session[inst].in_use = false;
+ }
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+}
+
+static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg)
+{
+ /* Only two instances are supported as of now. one for
+ * infra STA and other for infra STA/GC.
+ */
+ int i = 0;
+ struct net_device *pdev = bcmcfg_to_prmry_ndev(cfg);
+
+ spin_lock_init(&cfg->wps_sync);
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ /* Init scan_timeout timer */
+ init_timer_compat(&cfg->wps_session[i].timer, wl_wps_reauth_timeout, pdev);
+ cfg->wps_session[i].in_use = false;
+ cfg->wps_session[i].state = WPS_STATE_IDLE;
+ }
+}
+
+static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ cfg->wps_session[i].in_use = false;
+ cfg->wps_session[i].state = WPS_STATE_IDLE;
+ del_timer_sync(&cfg->wps_session[i].timer);
+ }
+
+}
+
+static s32
+wl_get_free_wps_inst(struct bcm_cfg80211 *cfg)
+{
+ int i;
+
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ if (!cfg->wps_session[i].in_use) {
+ return i;
+ }
+ }
+ return BCME_ERROR;
+}
+
+static s32
+wl_get_wps_inst_match(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ if ((cfg->wps_session[i].in_use) &&
+ (ndev == cfg->wps_session[i].ndev)) {
+ return i;
+ }
+ }
+
+ return BCME_ERROR;
+}
+
+static s32
+wl_wps_session_add(struct net_device *ndev, u16 mode, u8 *mac_addr)
+{
+ s32 inst;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ /* Fetch and initialize a wps instance */
+ inst = wl_get_free_wps_inst(cfg);
+ if (inst == BCME_ERROR) {
+ WL_ERR(("[WPS] No free insance\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_ERROR;
+ }
+ cfg->wps_session[inst].in_use = true;
+ cfg->wps_session[inst].state = WPS_STATE_STARTED;
+ cfg->wps_session[inst].ndev = ndev;
+ cfg->wps_session[inst].mode = mode;
+ /* return check not required since both buffer lens are same */
+ (void)memcpy_s(cfg->wps_session[inst].peer_mac, ETH_ALEN, mac_addr, ETH_ALEN);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ WL_INFORM_MEM(("[%s][WPS] session created. Peer: " MACDBG "\n",
+ ndev->name, MAC2STRDBG(mac_addr)));
+ return BCME_OK;
+}
+
+static void
+wl_wps_session_del(struct net_device *ndev)
+{
+ s32 inst;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+
+ /* Get current instance for the given ndev */
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ WL_DBG(("[WPS] instance match NOT found\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return;
+ }
+
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state != WPS_STATE_DONE) {
+ WL_DBG(("[WPS] wrong state:%d\n", cur_state));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return;
+ }
+
+ /* Mark this as unused */
+ cfg->wps_session[inst].in_use = false;
+ cfg->wps_session[inst].state = WPS_STATE_IDLE;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ /* Ensure this API is called from sleepable context. */
+ del_timer_sync(&cfg->wps_session[inst].timer);
+
+ WL_INFORM_MEM(("[%s][WPS] session deleted\n", ndev->name));
+}
+
+static void
+wl_wps_handle_ifdel(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 inst;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ WL_DBG(("[WPS] instance match NOT found\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return;
+ }
+ cur_state = cfg->wps_session[inst].state;
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ WL_INFORM_MEM(("[%s][WPS] state:%x\n", ndev->name, cur_state));
+ if (cur_state > WPS_STATE_IDLE) {
+ wl_wps_session_del(ndev);
+ }
+}
+
+static s32
+wl_wps_handle_sta_linkdown(struct net_device *ndev, u16 inst)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link down\n", ndev->name));
+ /* Drop the link down event while we are waiting for reauth */
+ return BCME_UNSUPPORTED;
+ } else if (cur_state == WPS_STATE_STARTED) {
+ /* Link down before reaching EAP-FAIL. End WPS session */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] link down after wps start\n", ndev->name));
+ } else {
+ WL_DBG(("[%s][WPS] link down in state:%d\n",
+ ndev->name, cur_state));
+ }
+
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return BCME_OK;
+}
+
+static s32
+wl_wps_handle_peersta_linkdown(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+ bool wps_done = false;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ if (!peer_mac) {
+ WL_ERR(("Invalid arg\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ /* AP/GO can have multiple clients. so validate peer_mac addr
+ * and ensure states are updated only for right peer.
+ */
+ if (memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ /* Mac addr not matching. Ignore. */
+ WL_DBG(("[%s][WPS] No active WPS session"
+ "for the peer:" MACDBG "\n", ndev->name, MAC2STRDBG(peer_mac)));
+ ret = BCME_OK;
+ goto exit;
+ }
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link down."
+ " Peer: " MACDBG "\n",
+ ndev->name, MAC2STRDBG(peer_mac)));
+#ifdef NOT_YET
+ /* Link down during REAUTH state is expected. However,
+ * if this is send up, hostapd statemachine issues a
+ * deauth down and that may pre-empt WPS reauth state
+ * at GC.
+ */
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link down. Ignore."
+ " for client:" MACDBG "\n",
+ ndev->name, MAC2STRDBG(peer_mac)));
+ ret = BCME_UNSUPPORTED;
+#endif
+ } else if (cur_state == WPS_STATE_STARTED) {
+ /* Link down before reaching REAUTH_WAIT state. WPS
+ * session ended.
+ */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ WL_INFORM_MEM(("[%s][WPS] link down after wps start"
+ " client:" MACDBG "\n",
+ ndev->name, MAC2STRDBG(peer_mac)));
+ wps_done = true;
+ /* since we have freed lock above, return from here */
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("[%s][WPS] Unsupported state:%d",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
+exit:
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
+}
+
+static s32
+wl_wps_handle_sta_linkup(struct net_device *ndev, u16 inst)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+ bool wps_done = false;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ /* WPS session succeeded. del session. */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] WPS_REAUTH link up (WPS DONE)\n", ndev->name));
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("[%s][WPS] unexpected link up in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
+}
+
+static s32
+wl_wps_handle_peersta_linkup(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ /* For AP case, check whether call came for right peer */
+ if (!peer_mac ||
+ memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ WL_ERR(("[WPS] macaddr mismatch\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ /* Mac addr not matching. Ignore. */
+ return BCME_ERROR;
+ }
+
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link up\n", ndev->name));
+ ret = BCME_OK;
+ } else {
+ WL_INFORM_MEM(("[%s][WPS] unexpected link up in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
+
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ return ret;
+}
+
+static s32
+wl_wps_handle_authorize(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ /* For AP case, check whether call came for right peer */
+ if (!peer_mac ||
+ memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ WL_ERR(("[WPS] macaddr mismatch\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ /* Mac addr not matching. Ignore. */
+ return BCME_ERROR;
+ }
+
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ /* WPS session succeeded. del session. */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] Authorize done (WPS DONE)\n", ndev->name));
+ ret = BCME_OK;
+ } else {
+ WL_INFORM_MEM(("[%s][WPS] unexpected Authorize in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
+
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
+}
+
+static s32
+wl_wps_handle_reauth(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ u16 mode;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ mode = cfg->wps_session[inst].mode;
+
+ if (((mode == WL_MODE_BSS) && (cur_state == WPS_STATE_STARTED)) ||
+ ((mode == WL_MODE_AP) && (cur_state == WPS_STATE_M8_SENT))) {
+ /* Move to reauth wait */
+ cfg->wps_session[inst].state = WPS_STATE_REAUTH_WAIT;
+ /* Use ndev to find the wps instance which fired the timer */
+ timer_set_private(&cfg->wps_session[inst].timer, ndev);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ mod_timer(&cfg->wps_session[inst].timer,
+ jiffies + msecs_to_jiffies(WL_WPS_REAUTH_TIMEOUT));
+ WL_INFORM_MEM(("[%s][WPS] STATE_REAUTH_WAIT mode:%d Peer: " MACDBG "\n",
+ ndev->name, mode, MAC2STRDBG(peer_mac)));
+ return BCME_OK;
+ } else {
+ /* 802.1x cases */
+ WL_DBG(("[%s][WPS] EAP-FAIL\n", ndev->name));
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
+}
+
+static s32
+wl_wps_handle_disconnect(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ /* If Disconnect command comes from user space for STA/GC,
+ * respond with event without waiting for event from fw as
+ * it would be dropped by the WPS_SYNC code.
+ */
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ if (ETHER_ISBCAST(peer_mac)) {
+ WL_INFORM_MEM(("[WPS] Bcast peer. Do nothing.\n"));
+ } else {
+ /* Notify link down */
+ CFG80211_DISCONNECTED(ndev,
+ WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
+ true, GFP_ATOMIC);
+ WL_INFORM_MEM(("[WPS] Disconnect event notified\n"));
+ }
+ } else {
+ WL_DBG(("[%s][WPS] Not valid state to report disconnected:%d",
+ ndev->name, cur_state));
+ ret = BCME_UNSUPPORTED;
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
+}
+
+static s32
+wl_wps_handle_disconnect_client(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+ bool wps_done = false;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ /* For GO/AP, ignore disconnect client during reauth state */
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ if (ETHER_ISBCAST(peer_mac)) {
+ /* If there is broadcast deauth, then mark wps session as ended */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] BCAST deauth. WPS stopped.\n", ndev->name));
+ ret = BCME_OK;
+ goto exit;
+ } else if (!(memcmp(cfg->wps_session[inst].peer_mac,
+ peer_mac, ETH_ALEN))) {
+ WL_ERR(("[%s][WPS] Drop disconnect client\n", ndev->name));
+ ret = BCME_UNSUPPORTED;
+ }
+ }
+
+exit:
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
+}
+
+static s32
+wl_wps_handle_connect_fail(struct net_device *ndev, u16 inst)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] Connect fail. WPS stopped.\n",
+ ndev->name));
+ } else {
+ WL_ERR(("[%s][WPS] Connect fail. state:%d\n",
+ ndev->name, cur_state));
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return BCME_OK;
+}
+
+static s32
+wl_wps_handle_m8_sent(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ if (cur_state == WPS_STATE_STARTED) {
+ /* Move to M8 sent state */
+ cfg->wps_session[inst].state = WPS_STATE_M8_SENT;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_OK;
+ } else {
+ /* 802.1x cases */
+ WL_DBG(("[%s][WPS] Not valid state to send M8\n", ndev->name));
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
+}
+
+s32
+wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac)
+{
+ s32 inst;
+ u16 mode;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ s32 ret = BCME_ERROR;
+ unsigned long flags;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ /* Get current instance for the given ndev */
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ /* No active WPS session. Do Nothing. */
+ WL_DBG(("[%s][WPS] No matching instance.\n", ndev->name));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_NOTFOUND;
+ }
+ mode = cfg->wps_session[inst].mode;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ WL_DBG(("[%s][WPS] state:%d mode:%d Peer: " MACDBG "\n",
+ ndev->name, state, mode, MAC2STRDBG(peer_mac)));
+
+ switch (state) {
+ case WPS_STATE_M8_RECVD:
+ {
+ /* Occasionally, due to race condition between ctrl
+ * and data path, deauth ind is recvd before EAP-FAIL.
+ * Ignore deauth ind before EAP-FAIL
+ * So move to REAUTH WAIT on receiving M8 on GC and
+ * ignore deauth ind before EAP-FAIL till 'x' timeout.
+ * Kickoff a timer to monitor reauth status.
+ */
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for AP/GO mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
+ case WPS_STATE_M8_SENT:
+ {
+ /* Mantain the M8 sent state to verify
+ * EAP-FAIL sent is valid
+ */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_m8_sent(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for STA/GC mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
+ case WPS_STATE_EAP_FAIL:
+ {
+ /* Move to REAUTH WAIT following EAP-FAIL TX on GO/AP.
+ * Kickoff a timer to monitor reauth status
+ */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for STA/GC mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
+ case WPS_STATE_LINKDOWN:
+ {
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_sta_linkdown(ndev, inst);
+ } else if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ ret = wl_wps_handle_peersta_linkdown(ndev, inst, peer_mac);
+ }
+ }
+ break;
+ }
+ case WPS_STATE_LINKUP:
+ {
+ if (mode == WL_MODE_BSS) {
+ wl_wps_handle_sta_linkup(ndev, inst);
+ } else if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ wl_wps_handle_peersta_linkup(ndev, inst, peer_mac);
+ }
+ }
+ break;
+ }
+ case WPS_STATE_DISCONNECT_CLIENT:
+ {
+ /* Disconnect STA/GC command from user space */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_disconnect_client(ndev, inst, peer_mac);
+ } else {
+ WL_ERR(("[WPS] Unsupported mode %d\n", mode));
+ }
+ break;
+ }
+ case WPS_STATE_DISCONNECT:
+ {
+ /* Disconnect command on STA/GC interface */
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_disconnect(ndev, inst, peer_mac);
+ }
+ break;
+ }
+ case WPS_STATE_CONNECT_FAIL:
+ {
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_connect_fail(ndev, inst);
+ } else {
+ WL_ERR(("[WPS] Unsupported mode %d\n", mode));
+ }
+ break;
+ }
+ case WPS_STATE_AUTHORIZE:
+ {
+ if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ wl_wps_handle_authorize(ndev, inst, peer_mac);
+ } else {
+ WL_INFORM_MEM(("[WPS] Authorize Request for wrong peer\n"));
+ }
+ }
+ break;
+ }
+
+ default:
+ WL_ERR(("[WPS] Unsupported state:%d mode:%d\n", state, mode));
+ ret = BCME_ERROR;
+ }
+
+ return ret;
+}
+
+#define EAP_EXP_ATTRIB_DATA_OFFSET 14
+void
+wl_handle_wps_states(struct net_device *ndev, u8 *pkt, u16 len, bool direction)
+{
+ eapol_header_t *eapol_hdr;
+ bool tx_packet = direction;
+ u16 eapol_type;
+ u16 mode;
+ u8 *peer_mac;
+
+ if (!ndev || !pkt) {
+ WL_ERR(("[WPS] Invalid arg\n"));
+ return;
+ }
+
+ if (len < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
+ WL_ERR(("[WPS] Invalid len\n"));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pkt;
+ eapol_type = eapol_hdr->type;
+
+ peer_mac = tx_packet ? eapol_hdr->eth.ether_dhost :
+ eapol_hdr->eth.ether_shost;
+ /*
+ * The implementation assumes only one WPS session would be active
+ * per interface at a time. Even for hostap, the wps_pin session
+ * is limited to one enrollee/client at a time. A session is marked
+ * started on WSC_START and gets cleared from below contexts
+ * a) Deauth/link down before reaching EAP-FAIL state. (Fail case)
+ * b) Link up following EAP-FAIL. (success case)
+ * c) Link up timeout after EAP-FAIL. (Fail case)
+ */
+
+ if (eapol_type == EAP_PACKET) {
+ wl_eap_header_t *eap;
+
+ if (len > sizeof(*eap)) {
+ eap = (wl_eap_header_t *)(pkt + ETHER_HDR_LEN + EAPOL_HDR_LEN);
+ if (eap->type == EAP_EXPANDED_TYPE) {
+ wl_eap_exp_t *exp = (wl_eap_exp_t *)eap->data;
+ if (eap->length > EAP_EXP_HDR_MIN_LENGTH) {
+ /* opcode is at fixed offset */
+ u8 opcode = exp->opcode;
+ u16 eap_len = ntoh16(eap->length);
+
+ WL_DBG(("[%s][WPS] EAP EXPANDED packet. opcode:%x len:%d\n",
+ ndev->name, opcode, eap_len));
+ if (opcode == EAP_WSC_MSG) {
+ const u8 *msg;
+ const u8* parse_buf = exp->data;
+ /* Check if recvd pkt is fragmented */
+ if ((!tx_packet) &&
+ (exp->flags &
+ EAP_EXP_FLAGS_FRAGMENTED_DATA)) {
+ if ((eap_len - EAP_EXP_ATTRIB_DATA_OFFSET)
+ > 2) {
+ parse_buf +=
+ EAP_EXP_FRAGMENT_LEN_OFFSET;
+ eap_len -=
+ EAP_EXP_FRAGMENT_LEN_OFFSET;
+ WL_DBG(("Rcvd EAP"
+ " fragmented pkt\n"));
+ } else {
+ /* If recvd pkt is fragmented
+ * and does not have
+ * length field drop the packet.
+ */
+ return;
+ }
+ }
+
+ msg = wl_find_attribute(parse_buf,
+ (eap_len - EAP_EXP_ATTRIB_DATA_OFFSET),
+ EAP_ATTRIB_MSGTYPE);
+ if (unlikely(!msg)) {
+ WL_ERR(("[WPS] ATTRIB MSG not found!\n"));
+ } else if ((*msg == EAP_WSC_MSG_M8) &&
+ !tx_packet) {
+ /* In certain cases M2 can also carry
+ * credential. So add check for
+ * cred in M8/M2 and start reauth timer.
+ */
+ WL_INFORM_MEM(("[%s][WPS] M8\n",
+ ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_M8_RECVD, peer_mac);
+ } else if ((*msg == EAP_WSC_MSG_M8) &&
+ tx_packet) {
+ WL_INFORM_MEM(("[%s][WPS] M8 Sent\n",
+ ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_M8_SENT, peer_mac);
+ } else {
+ WL_DBG(("[%s][WPS] EAP WSC MSG: 0x%X\n",
+ ndev->name, *msg));
+ }
+ } else if (opcode == EAP_WSC_START) {
+ /* WSC session started. WSC_START - Tx from GO/AP.
+ * Session will be deleted on successful link up or
+ * on failure (deauth context)
+ */
+ mode = tx_packet ? WL_MODE_AP : WL_MODE_BSS;
+ wl_wps_session_add(ndev, mode, peer_mac);
+ WL_INFORM_MEM(("[%s][WPS] WSC_START Mode:%d\n",
+ ndev->name, mode));
+ } else if (opcode == EAP_WSC_DONE) {
+ /* WSC session done. TX on STA/GC. RX on GO/AP
+ * On devices where config file save fails, it may
+ * return WPS_NAK with config_error:0. But the
+ * connection would still proceed. Hence don't let
+ * state machine depend on WSC DONE.
+ */
+ WL_INFORM_MEM(("[%s][WPS] WSC_DONE\n", ndev->name));
+ }
+ }
+ }
+
+ if (eap->code == EAP_CODE_FAILURE) {
+ /* EAP_FAIL */
+ WL_INFORM_MEM(("[%s][WPS] EAP_FAIL\n", ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_EAP_FAIL, peer_mac);
+ }
+ }
+ }
+}
+#endif /* WL_WPS_SYNC */
+
+s32
+wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data)
+{
+ int err = BCME_OK;
+ u32 status = ntoh32(event->status);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u32 reason = ntoh32(event->reason);
+
+ if (!wl_get_drv_status(cfg, CFG80211_CONNECT, ndev)) {
+ /* Join attempt via non-cfg80211 interface.
+ * Don't send resultant events to cfg80211
+ * layer
+ */
+ WL_INFORM_MEM(("Event received in non-cfg80211"
+ " connect state. Ignore\n"));
+ return BCME_OK;
+ }
+
+ if ((status == WLC_SUP_KEYED || status == WLC_SUP_KEYXCHANGE_WAIT_G1) &&
+ reason == WLC_E_SUP_OTHER) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+ /* NL80211_CMD_PORT_AUTHORIZED supported above >= 4.15 */
+ cfg80211_port_authorized(ndev, (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID),
+ GFP_KERNEL);
+ WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
+#elif ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT))
+ err = wl_cfgvendor_send_async_event(bcmcfg_to_wiphy(cfg), ndev,
+ BRCM_VENDOR_EVENT_PORT_AUTHORIZED, NULL, 0);
+ WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
+#else
+ /* not supported in kernel <= 3,14,0 */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
+ /* Post SCB authorize actions */
+ wl_cfg80211_post_scb_auth(cfg, ndev);
+ } else if (status < WLC_SUP_KEYXCHANGE_WAIT_G1 && (reason != WLC_E_SUP_OTHER &&
+ reason != WLC_E_SUP_PTK_UPDATE)) {
+ /* if any failure seen while 4way HS, should send NL80211_CMD_DISCONNECT */
+ WL_ERR(("4way HS error. status:%d, reason:%d\n", status, reason));
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ }
+
+ return err;
+}
+
+#ifdef WL_BCNRECV
+static s32
+wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 status = ntoh32(e->status);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* Abort fakeapscan, when Roam is in progress */
+ if (status == WLC_E_STATUS_RXBCN_ABORT) {
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_ROAMABORT);
+ } else {
+ WL_ERR(("UNKNOWN STATUS. status:%d\n", status));
+ }
+ return BCME_OK;
+}
+#endif /* WL_BCNRECV */
+
+#ifdef WL_MBO
+static s32
+wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ wl_event_mbo_t *mbo_evt = (wl_event_mbo_t *)data;
+ wl_event_mbo_cell_nw_switch_t *cell_sw_evt = NULL;
+ wl_btm_event_type_data_t *evt_data = NULL;
+
+ WL_INFORM(("MBO: Evt %u\n", mbo_evt->type));
+
+ if (mbo_evt->type == WL_MBO_E_CELLULAR_NW_SWITCH) {
+ cell_sw_evt = (wl_event_mbo_cell_nw_switch_t *)mbo_evt->data;
+ BCM_REFERENCE(cell_sw_evt);
+ SUPP_EVENT(("CTRL-EVENT-CELLULAR-SWITCH", "reason %d cur_assoc_time_left %u "
+ "reassoc_delay %u\n", cell_sw_evt->reason,
+ cell_sw_evt->assoc_time_remain, cell_sw_evt->reassoc_delay));
+ } else if (mbo_evt->type == WL_MBO_E_BTM_RCVD) {
+ evt_data = (wl_btm_event_type_data_t *)mbo_evt->data;
+ if (evt_data->version != WL_BTM_EVENT_DATA_VER_1) {
+ WL_ERR(("version mismatch. rcvd %u expected %u\n",
+ evt_data->version, WL_BTM_EVENT_DATA_VER_1));
+ return -1;
+ }
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BTM-REQ-RCVD", "reason=%u\n",
+ evt_data->transition_reason));
+ } else {
+ WL_INFORM(("UNKNOWN EVENT. type:%u\n", mbo_evt->type));
+ }
+ return err;
+}
+#endif /* WL_MBO */
+
+#ifdef WL_CAC_TS
+static s32
+wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 event = ntoh32(e->event_type);
+ s32 status = ntoh32(e->status);
+ s32 reason = ntoh32(e->reason);
+
+ BCM_REFERENCE(reason);
+
+ if (event == WLC_E_ADDTS_IND) {
+ /* The supp log format of adding ts_delay in success case needs to be maintained */
+ if (status == WLC_E_STATUS_SUCCESS) {
+ uint *ts_delay = (uint *)data;
+ BCM_REFERENCE(ts_delay);
+ SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d ts_delay=%u\n",
+ status, reason, *ts_delay));
+ } else {
+ SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d\n",
+ status, reason));
+ }
+ } else if (event == WLC_E_DELTS_IND) {
+ SUPP_EVENT(("CTRL-EVENT-CAC-DELTS", "status=%d reason=%d\n", status, reason));
+ }
+
+ return BCME_OK;
+}
+#endif /* WL_CAC_TS */
+
+#if defined(WL_MBO) || defined(WL_OCE)
+static s32
+wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ uint reason = 0;
+ wl_bssid_pruned_evt_info_t *evt_info = (wl_bssid_pruned_evt_info_t *)data;
+
+ if (evt_info->version == WL_BSSID_PRUNE_EVT_VER_1) {
+ if (evt_info->reason == WLC_E_PRUNE_ASSOC_RETRY_DELAY) {
+ /* MBO assoc retry delay */
+ reason = WIFI_PRUNE_ASSOC_RETRY_DELAY;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
+ " reason=%u timeout_val=%u(ms)\n", evt_info->SSID,
+ ETHER_TO_MACF(evt_info->BSSID), reason, evt_info->time_remaining));
+ } else if (evt_info->reason == WLC_E_PRUNE_RSSI_ASSOC_REJ) {
+ /* OCE RSSI-based assoc rejection */
+ reason = WIFI_PRUNE_RSSI_ASSOC_REJ;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
+ " reason=%u timeout_val=%u(ms) rssi_threshold=%d(dBm)\n",
+ evt_info->SSID, ETHER_TO_MACF(evt_info->BSSID),
+ reason, evt_info->time_remaining, evt_info->rssi_threshold));
+ } else {
+ /* Invalid other than the assoc retry delay/RSSI assoc rejection
+ * in the current handler
+ */
+ BCM_REFERENCE(reason);
+ WL_INFORM(("INVALID. reason:%u\n", evt_info->reason));
+ }
+ } else {
+ WL_INFORM(("version mismatch. rcvd %u expected %u\n", evt_info->version,
+ WL_BSSID_PRUNE_EVT_VER_1));
+ }
+ return err;
+}
+#endif /* WL_MBO || WL_OCE */
+#ifdef RTT_SUPPORT
+static s32
+wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_event_msg_t event;
+
+ (void)memcpy_s(&event, sizeof(wl_event_msg_t),
+ e, sizeof(wl_event_msg_t));
+ return dhd_rtt_event_handler(dhdp, &event, data);
+}
+#endif /* RTT_SUPPORT */
+
+void
+wl_print_verinfo(struct bcm_cfg80211 *cfg)
+{
+ char *ver_ptr;
+ uint32 alloc_len = MOD_PARAM_INFOLEN;
+
+ if (!cfg) {
+ WL_ERR(("cfg is NULL\n"));
+ return;
+ }
+
+ ver_ptr = (char *)MALLOCZ(cfg->osh, alloc_len);
+ if (!ver_ptr) {
+ WL_ERR(("Failed to alloc ver_ptr\n"));
+ return;
+ }
+
+ if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
+ TRUE, &ver_ptr, alloc_len)) {
+ WL_ERR(("DHD Version: %s\n", ver_ptr));
+ }
+
+ if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
+ FALSE, &ver_ptr, alloc_len)) {
+ WL_ERR(("F/W Version: %s\n", ver_ptr));
+ }
+
+ MFREE(cfg->osh, ver_ptr, alloc_len);
+}
+
+/* Get the concurrency mode */
+int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+ uint cmode = CONCURRENCY_MODE_NONE;
+ u32 connected_cnt = 0;
+ u32 pre_channel = 0, channel = 0;
+ u32 pre_band = 0;
+ u32 chanspec = 0;
+ u32 band = 0;
+
+ connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ if (connected_cnt <= 1) {
+ return cmode;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(chanspec));
+ band = (channel <= CH_MAX_2G_CHANNEL) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ }
+ if ((!pre_channel && channel)) {
+ pre_band = band;
+ pre_channel = channel;
+ } else if (pre_channel) {
+ if ((pre_band == band) && (pre_channel == channel)) {
+ cmode = CONCURRENCY_SCC_MODE;
+ goto exit;
+ } else if ((pre_band == band) && (pre_channel != channel)) {
+ cmode = CONCURRENCY_VSDB_MODE;
+ goto exit;
+ } else if (pre_band != band) {
+ cmode = CONCURRENCY_RSDB_MODE;
+ goto exit;
+ }
+ }
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && \
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+exit:
+ return cmode;
+}
+#ifdef WL_CHAN_UTIL
+static s32
+wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ struct sk_buff *skb = NULL;
+ s32 status = ntoh32(e->status);
+ u8 chan_use_percentage = 0;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ uint len;
+ gfp_t kflags;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ len = CU_ATTR_HDR_LEN + sizeof(u8);
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), len,
+ BRCM_VENDOR_EVENT_CU, kflags);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ skb = cfg80211_vendor_event_alloc(wiphy, len, BRCM_VENDOR_EVENT_CU, kflags);
+#else
+ /* No support exist */
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ if ((status == WLC_E_STATUS_SUCCESS) && data) {
+ wl_bssload_t *bssload_report = (wl_bssload_t *)data;
+ chan_use_percentage = (bssload_report->chan_util * 100) / 255;
+ WL_DBG(("ChannelUtilization=%hhu\n", chan_use_percentage));
+ err = nla_put_u8(skb, CU_ATTR_PERCENTAGE, chan_use_percentage);
+ if (err < 0) {
+ WL_ERR(("Failed to put CU_ATTR_PERCENTAGE, err:%d\n", err));
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ cfg80211_vendor_event(skb, kflags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+ return err;
+}
+
+#define WL_CHAN_UTIL_DEFAULT_INTERVAL 3000
+#define WL_CHAN_UTIL_THRESH_MIN 15
+#define WL_CHAN_UTIL_THRESH_INTERVAL 10
+#ifndef CUSTOM_CU_INTERVAL
+#define CUSTOM_CU_INTERVAL WL_CHAN_UTIL_DEFAULT_INTERVAL
+#endif /* CUSTOM_CU_INTERVAL */
+
+static s32
+wl_cfg80211_start_bssload_report(struct net_device *ndev)
+{
+ s32 err = BCME_OK;
+ wl_bssload_cfg_t blcfg;
+ u8 i;
+ struct bcm_cfg80211 *cfg;
+
+ if (!ndev) {
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ return -ENODEV;
+ }
+
+ /* Typecasting to void as the buffer size is same as the memset size */
+ (void)memset_s(&blcfg, sizeof(wl_bssload_cfg_t), 0, sizeof(wl_bssload_cfg_t));
+ /* Set default report interval 3 sec and 8 threshhold levels between 15 to 85% */
+ blcfg.rate_limit_msec = CUSTOM_CU_INTERVAL;
+ blcfg.num_util_levels = MAX_BSSLOAD_LEVELS;
+ for (i = 0; i < MAX_BSSLOAD_LEVELS; i++) {
+ blcfg.util_levels[i] = (((WL_CHAN_UTIL_THRESH_MIN +
+ (i * WL_CHAN_UTIL_THRESH_INTERVAL)) * 255) / 100);
+ }
+
+ err = wldev_iovar_setbuf(ndev, "bssload_report_event", &blcfg,
+ sizeof(wl_bssload_cfg_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ }
+
+ return err;
+}
+#endif /* WL_CHAN_UTIL */
+
+s32
+wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable)
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg;
+ s8 event_buf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE] = {0};
+ eventmsgs_ext_t *eventmask_msg = NULL;
+ /* Room for "event_msgs_ext" + '\0' + bitvec */
+ char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
+ s32 msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE;
+
+ if (!ndev) {
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ return -ENODEV;
+ }
+
+ mutex_lock(&cfg->event_sync);
+
+ eventmask_msg = (eventmsgs_ext_t *)event_buf;
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_NONE;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+ eventmask_msg->maxgetsize = WL_EVENTING_MASK_EXT_LEN;
+
+ /* Read event_msgs mask */
+ err = wldev_iovar_getbuf(ndev, "event_msgs_ext",
+ eventmask_msg, EVENTMSGS_EXT_STRUCT_SIZE,
+ iovbuf,
+ sizeof(iovbuf),
+ NULL);
+
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+ bcopy(iovbuf, eventmask_msg, msglen);
+
+ /* Add set/clear of event mask under feature specific flags */
+ if (enable) {
+ WL_DBG(("%s: Enabling events on resume\n", __FUNCTION__));
+#ifdef WL_CHAN_UTIL
+ setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
+ } else {
+ WL_DBG(("%s: Disabling events before suspend\n", __FUNCTION__));
+#ifdef WL_CHAN_UTIL
+ clrbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
+ }
+
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
+
+ err = wldev_iovar_setbuf(ndev, "event_msgs_ext", eventmask_msg,
+ WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE,
+ iovbuf, sizeof(iovbuf), NULL);
+
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+eventmsg_out:
+ mutex_unlock(&cfg->event_sync);
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#ifdef WLFBT
+static int
+wl_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_ft_ies_params *ftie)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!FW_SUPPORTED(dhdp, fbtoverds) && !FW_SUPPORTED(dhdp, fbt_adpt)) {
+ WL_INFORM(("FW does not support FT roaming\n"));
+ return BCME_UNSUPPORTED;
+ }
+ return BCME_OK;
+}
+#endif /* WLFBT */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) */
+
+#ifdef WL_WIPSEVT
+int
+wl_cfg80211_wips_event_ext(wl_wips_event_info_t *wips_event)
+{
+ s32 err = BCME_OK;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct bcm_cfg80211 *cfg;
+ struct net_device *ndev;
+ struct wiphy *wiphy;
+
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_ERR(("WIPS evt invalid arg\n"));
+ return err;
+ }
+
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wiphy = bcmcfg_to_wiphy(cfg);
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev),
+ BRCM_VENDOR_WIPS_EVENT_BUF_LEN, BRCM_VENDOR_EVENT_WIPS, kflags);
+
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return BCME_NOMEM;
+ }
+
+ err = nla_put_u16(skb, WIPS_ATTR_DEAUTH_CNT, wips_event->misdeauth);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_CNT failed\n"));
+ goto fail;
+ }
+ err = nla_put(skb, WIPS_ATTR_DEAUTH_BSSID, ETHER_ADDR_LEN, &wips_event->bssid);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put WIPS_ATTR_DEAUTH_BSSID failed\n"));
+ goto fail;
+ }
+ err = nla_put_s16(skb, WIPS_ATTR_CURRENT_RSSI, wips_event->current_RSSI);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_CURRENT_RSSI failed\n"));
+ goto fail;
+ }
+ err = nla_put_s16(skb, WIPS_ATTR_DEAUTH_RSSI, wips_event->deauth_RSSI);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_RSSI failed\n"));
+ goto fail;
+ }
+ cfg80211_vendor_event(skb, kflags);
+
+ return err;
+
+fail:
+ if (skb) {
+ nlmsg_free(skb);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+ return err;
+}
+
+int
+wl_cfg80211_wips_event(uint16 misdeauth, char* bssid)
+{
+ s32 err = BCME_OK;
+ wl_wips_event_info_t wips_event;
+
+ wips_event.misdeauth = misdeauth;
+ memcpy(&wips_event.bssid, bssid, ETHER_ADDR_LEN);
+ wips_event.current_RSSI = 0;
+ wips_event.deauth_RSSI = 0;
+
+ err = wl_cfg80211_wips_event_ext(&wips_event);
+ return err;
+}
+#endif /* WL_WIPSEVT */
+
+#ifdef PCIE_INB_DW
+#define WL_DS(x)
+/*
+ * This API checks whether its okay to enter DS.
+ * If some transaction is in progress, return true
+ * to skip DS.
+ */
+#ifndef USECS_PER_MSEC
+#define USECS_PER_MSEC 1000UL
+#endif /* USECS_PER_MSEC */
+bool wl_cfg80211_check_in_progress(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg;
+ struct net_device *pri_dev;
+ u8 reason = WL_STATE_IDLE;
+ u64 timeout;
+ u64 start_time = 0;
+
+ cfg = wl_get_cfg(dev);
+ pri_dev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* check states like scan in progress, four way handshake, etc
+ * before entering Deep Sleep.
+ */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_DS(("scan in progress\n"));
+ reason = WL_STATE_SCANNING;
+ start_time = GET_TS(cfg, scan_start);
+ } else if (wl_get_drv_status_all(cfg, CONNECTING)) {
+ WL_DS(("connect in progress\n"));
+ reason = WL_STATE_CONNECTING;
+ start_time = GET_TS(cfg, conn_start);
+ } else if ((IS_STA_IFACE(ndev_to_wdev(dev))) &&
+ wl_get_drv_status(cfg, CONNECTED, pri_dev) &&
+ !wl_get_drv_status(cfg, AUTHORIZED, pri_dev)) {
+ WL_DS(("connect-authorization in progress\n"));
+ reason = WL_STATE_AUTHORIZING;
+ start_time = GET_TS(cfg, authorize_start);
+ }
+
+ if (reason) {
+ u64 curtime = OSL_LOCALTIME_NS();
+ if (unlikely(!start_time)) {
+ WL_ERR_RLMT(("state got cleared for reason:%d\n", reason));
+ return false;
+ }
+ /* check whether we are stuck in a state
+ * for too long.
+ */
+ timeout = (start_time + (WL_DS_SKIP_THRESHOLD_USECS * USECS_PER_MSEC));
+ if (time_after64(curtime, timeout)) {
+ /* state hasn't changed for WL_DS_SKIP_THRESHOLD_USECS */
+ WL_ERR(("DS skip threshold hit. reason:%d start_time:"
+ SEC_USEC_FMT" cur_time:"SEC_USEC_FMT"\n",
+ reason, GET_SEC_USEC(start_time), GET_SEC_USEC(curtime)));
+ ASSERT((0));
+ }
+ /* return true to skip suspend */
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+bool wl_cfg80211_is_dpp_frame(void *frame, u32 frame_len)
+{
+ /* check for DPP public action frames */
+ wl_dpp_pa_frame_t *pact_frm;
+
+ if (frame == NULL) {
+ return false;
+ }
+ pact_frm = (wl_dpp_pa_frame_t *)frame;
+ if (frame_len < sizeof(wl_dpp_pa_frame_t) -1) {
+ return false;
+ }
+
+ if ((pact_frm->category == WL_PUB_AF_CATEGORY) &&
+ (pact_frm->action == WL_PUB_AF_ACTION) &&
+ (pact_frm->oui_type == WL_PUB_AF_WFA_STYPE_DPP) &&
+ (memcmp(pact_frm->oui, WFA_OUI, sizeof(pact_frm->oui)) == 0)) {
+ return true;
+ }
+
+ return false;
+}
+
+const char *
+get_dpp_pa_ftype(enum wl_dpp_ftype ftype)
+{
+ switch (ftype) {
+ case DPP_AUTH_REQ:
+ return "DPP_AUTH_REQ";
+ case DPP_AUTH_RESP:
+ return "DPP_AUTH_RESP";
+ case DPP_AUTH_CONF:
+ return "DPP_AUTH_CONF";
+ default:
+ return "Unkown DPP frame";
+ }
+}
+
+#define GAS_RESP_LEN 2
+#define DOUBLE_TLV_BODY_OFF 4
+bool wl_cfg80211_find_gas_subtype(u8 subtype, u16 adv_id, u8* data, u32 len)
+{
+ const bcm_tlv_t *ie = (bcm_tlv_t *)data;
+ const u8 *frame = NULL;
+ u16 id, flen;
+
+ /* Skipped first ANQP Element, if frame has anqp elemnt */
+ ie = bcm_parse_tlvs(ie, len, DOT11_MNG_ADVERTISEMENT_ID);
+
+ if (ie == NULL)
+ return false;
+
+ frame = (const uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
+ id = ((u16) (((frame)[1] << 8) | (frame)[0]));
+ flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
+
+ /* If the contents match the OUI and the type */
+ if ((flen >= WFA_OUI_LEN + 1) &&
+ (id == adv_id) &&
+ !bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) &&
+ subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) {
+ return true;
+ }
+
+ return false;
+}
+
+bool wl_cfg80211_is_dpp_gas_action(void *frame, u32 frame_len)
+{
+ wl_dpp_gas_af_t *act_frm = (wl_dpp_gas_af_t *)frame;
+ u32 len;
+ const bcm_tlv_t *ie = NULL;
+
+ if ((frame_len < (sizeof(wl_dpp_gas_af_t) - 1)) ||
+ act_frm->category != WL_PUB_AF_CATEGORY) {
+ return false;
+ }
+
+ len = frame_len - (u32)(sizeof(wl_dpp_gas_af_t) - 1);
+ if (act_frm->action == WL_PUB_AF_GAS_IREQ) {
+ ie = (bcm_tlv_t *)act_frm->query_data;
+ /* We are interested only in MNG ADV ID. Skip any other id. */
+ ie = bcm_parse_tlvs(ie, len, DOT11_MNG_ADVERTISEMENT_ID);
+ } else if (act_frm->action == WL_PUB_AF_GAS_IRESP) {
+ ie = (bcm_tlv_t *)&act_frm->query_data[WL_GAS_RESP_OFFSET];
+ /* We are interested only in MNG ADV ID. Skip any other id. */
+ ie = bcm_parse_tlvs(ie, len, DOT11_MNG_ADVERTISEMENT_ID);
+ } else {
+ return false;
+ }
+
+ if (ie && (ie->len >= WL_GAS_MIN_LEN) &&
+ (memcmp(&ie->data[WL_GAS_WFA_OFFSET], WFA_OUI, 3) == 0) &&
+ (ie->data[WL_GAS_STYPE_OFFSET] == WL_GAS_WFA_STYPE_DPP)) {
+ WL_DBG(("DPP GAS FRAME. type:%d\n", act_frm->action));
+ return true;
+ }
+
+ /* Non DPP GAS frame */
+ return false;
+}
+
+#ifdef KEEP_ALIVE
+#define KA_TEMP_BUF_SIZE 512
+#define KA_FRAME_SIZE 300
+int
+wl_cfg80211_start_mkeep_alive(struct bcm_cfg80211 *cfg, uint8 mkeep_alive_id,
+ uint16 ether_type, uint8 *ip_pkt, uint16 ip_pkt_len,
+ uint8* src_mac, uint8* dst_mac, uint32 period_msec)
+{
+ const int ETHERTYPE_LEN = 2;
+ char *pbuf = NULL;
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
+ uint16 buf_len = 0;
+ u8 str_len = 0;
+ int res = BCME_ERROR;
+ uint16 len_bytes = 0;
+ int i = 0;
+ uint16 pmac_frame_len = KA_FRAME_SIZE;
+ uint16 pbuf_len = KA_TEMP_BUF_SIZE;
+
+ /* ether frame to have both max IP pkt (256 bytes) and ether header */
+ char *pmac_frame = NULL;
+ char *pmac_frame_begin = NULL;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ struct net_device *primary_ndev = NULL;
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /*
+ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+ * dongle shall reject a mkeep_alive request.
+ */
+ if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+ return res;
+
+ WL_TRACE(("%s execution\n", __FUNCTION__));
+
+ if ((pbuf = MALLOCZ(cfg->osh, KA_TEMP_BUF_SIZE)) == NULL) {
+ WL_ERR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+ res = BCME_NOMEM;
+ return res;
+ }
+
+ if ((pmac_frame = MALLOCZ(cfg->osh, pmac_frame_len)) == NULL) {
+ WL_ERR(("failed to allocate mac_frame with size %d\n", pmac_frame_len));
+ res = BCME_NOMEM;
+ goto exit;
+ }
+ pmac_frame_begin = pmac_frame;
+
+ /*
+ * Get current mkeep-alive status.
+ */
+ res = wldev_iovar_getbuf(primary_ndev, "mkeep_alive", &mkeep_alive_id,
+ sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, &cfg->ioctl_buf_sync);
+ if (res < 0) {
+ WL_ERR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ /* Check available ID whether it is occupied */
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+ if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+ WL_ERR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
+ __FUNCTION__, mkeep_alive_id));
+
+ /* Current occupied ID info */
+ WL_ERR(("%s: mkeep_alive\n", __FUNCTION__));
+ WL_ERR((" Id : %d\n"
+ " Period: %d msec\n"
+ " Length: %d\n"
+ " Packet: 0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes)));
+
+ for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+ WL_ERR(("%02x", mkeep_alive_pktp->data[i]));
+ }
+ WL_ERR(("\n"));
+
+ res = BCME_NOTFOUND;
+ goto exit;
+ }
+ }
+
+ /* Request the specified ID */
+ bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t));
+ bzero(pbuf, KA_TEMP_BUF_SIZE);
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strlcpy(pbuf, str, KA_TEMP_BUF_SIZE);
+ buf_len = str_len + 1;
+ pbuf_len -= buf_len;
+
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + buf_len);
+ mkeep_alive_pkt.period_msec = htod32(period_msec);
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ len_bytes = (ETHER_ADDR_LEN*2) + ETHERTYPE_LEN + ip_pkt_len;
+ mkeep_alive_pkt.len_bytes = htod16(len_bytes);
+
+ /* ID assigned */
+ mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+
+ /*
+ * Build up Ethernet Frame
+ */
+
+ /* Mapping dest mac addr */
+ res = memcpy_s(pmac_frame, pmac_frame_len, dst_mac, ETHER_ADDR_LEN);
+ if (res) {
+ goto exit;
+ }
+ pmac_frame += ETHER_ADDR_LEN;
+ pmac_frame_len -= ETHER_ADDR_LEN;
+
+ /* Mapping src mac addr */
+ res = memcpy_s(pmac_frame, pmac_frame_len, src_mac, ETHER_ADDR_LEN);
+ if (res) {
+ goto exit;
+ }
+ pmac_frame += ETHER_ADDR_LEN;
+ pmac_frame_len -= ETHER_ADDR_LEN;
+
+ /* Mapping Ethernet type */
+ ether_type = hton16(ether_type);
+ res = memcpy_s(pmac_frame, pmac_frame_len, &ether_type, ETHERTYPE_LEN);
+ if (res) {
+ goto exit;
+ }
+ pmac_frame += ETHERTYPE_LEN;
+ pmac_frame_len -= ETHERTYPE_LEN;
+
+ /* Mapping IP pkt */
+ res = memcpy_s(pmac_frame, pmac_frame_len, ip_pkt, ip_pkt_len);
+ if (res) {
+ goto exit;
+ }
+ pmac_frame += ip_pkt_len;
+ pmac_frame_len -= ip_pkt_len;
+
+ /*
+ * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ res = memcpy_s((char *)mkeep_alive_pktp, pbuf_len,
+ &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+ if (res) {
+ goto exit;
+ }
+ pbuf_len -= WL_MKEEP_ALIVE_FIXED_LEN;
+
+ /*
+ * Length of ether frame (assume to be all hexa bytes)
+ * = src mac + dst mac + ether type + ip pkt len
+ */
+ res = memcpy_s(mkeep_alive_pktp->data, pbuf_len,
+ pmac_frame_begin, len_bytes);
+ if (res) {
+ goto exit;
+ }
+ buf_len += len_bytes;
+
+ res = wldev_ioctl_set(primary_ndev, WLC_SET_VAR, pbuf, buf_len);
+exit:
+ if (pmac_frame_begin) {
+ MFREE(cfg->osh, pmac_frame_begin, KA_FRAME_SIZE);
+ }
+ if (pbuf) {
+ MFREE(cfg->osh, pbuf, KA_TEMP_BUF_SIZE);
+ }
+ return res;
+}
+
+int
+wl_cfg80211_stop_mkeep_alive(struct bcm_cfg80211 *cfg, uint8 mkeep_alive_id)
+{
+ char *pbuf = NULL;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
+ int res = BCME_ERROR;
+ int i = 0;
+ struct net_device *primary_ndev = NULL;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /*
+ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+ * dongle shall reject a mkeep_alive request.
+ */
+ if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+ return res;
+
+ WL_TRACE(("%s execution\n", __FUNCTION__));
+
+ /*
+ * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
+ */
+ if ((pbuf = MALLOCZ(cfg->osh, KA_TEMP_BUF_SIZE)) == NULL) {
+ WL_ERR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+ res = BCME_NOMEM;
+ return res;
+ }
+
+ res = wldev_iovar_getbuf(primary_ndev, "mkeep_alive", &mkeep_alive_id,
+ sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, &cfg->ioctl_buf_sync);
+ if (res < 0) {
+ WL_ERR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ /* Check occupied ID */
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+ WL_DBG(("%s: mkeep_alive\n", __FUNCTION__));
+ WL_DBG((" Id : %d\n"
+ " Period: %d msec\n"
+ " Length: %d\n"
+ " Packet: 0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes)));
+
+ for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+ WL_DBG(("%02x", mkeep_alive_pktp->data[i]));
+ }
+ WL_DBG(("\n"));
+ }
+
+ /* Make it stop if available */
+ if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+ WL_INFORM_MEM(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
+ bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t));
+
+ mkeep_alive_pkt.period_msec = 0;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+
+ res = wldev_iovar_setbuf(primary_ndev, "mkeep_alive",
+ (char *)&mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN,
+ pbuf, KA_TEMP_BUF_SIZE, &cfg->ioctl_buf_sync);
+ } else {
+ WL_ERR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
+ res = BCME_NOTFOUND;
+ }
+exit:
+ if (pbuf) {
+ MFREE(cfg->osh, pbuf, KA_TEMP_BUF_SIZE);
+ }
+ return res;
+}
+#endif /* KEEP_ALIVE */
+
+s32
+wl_cfg80211_handle_macaddr_change(struct net_device *dev, u8 *macaddr)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ uint8 wait_cnt = WAIT_FOR_DISCONNECT_MAX;
+ u32 status = TRUE;
+
+ if (IS_STA_IFACE(dev->ieee80211_ptr) &&
+ wl_get_drv_status(cfg, CONNECTED, dev)) {
+ /* Macaddress change in connected state. The curent
+ * connection will become invalid. Issue disconnect
+ * to current AP to let the AP know about link down
+ */
+ WL_INFORM_MEM(("macaddr change in connected state. Force disassoc.\n"));
+ wl_cfg80211_disassoc(dev, WLAN_REASON_DEAUTH_LEAVING);
+
+ while ((status = wl_get_drv_status(cfg, CONNECTED, dev)) && wait_cnt) {
+ WL_DBG(("Waiting for disconnection, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(50);
+ }
+ }
+ return BCME_OK;
+}
+
+int
+wl_cfg80211_handle_hang_event(struct net_device *ndev, uint16 hang_reason, uint32 memdump_type)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+ WL_INFORM_MEM(("hang reason = %d, memdump_type =%d\n",
+ hang_reason, memdump_type));
+
+ /* check if pre-registered mac matches the mac from dongle via WLC_E_LINK */
+ if (wl_get_drv_status(cfg, READY, ndev)) {
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+ wl_copy_hang_info_if_falure(ndev,
+ hang_reason, BCME_NOTFOUND);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+ SUPP_LOG(("Err. hang reason:%d, dump_type:%d\n", hang_reason, memdump_type));
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ /* IF dongle is down due to previous hang or other conditions,
+ * sending 0ne more hang notification is not needed.
+ */
+
+ if (dhd_query_bus_erros(dhd)) {
+ return BCME_ERROR;
+ }
+ dhd->iface_op_failed = TRUE;
+#if defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled) {
+ dhd->memdump_type = memdump_type;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ WL_ERR(("Notify hang event to upper layer \n"));
+ dhd->hang_reason = hang_reason;
+ net_os_send_hang_message(ndev);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+ }
+
+ return BCME_OK;
+}
+
+static void
+wl_cfg80211_spmk_pmkdb_change_pmk_type(struct bcm_cfg80211 *cfg, pmkid_list_v3_t *pmk_list)
+{
+ int i;
+ pmkid_list_v3_t *spmk_list = NULL;
+
+ if (!cfg || !cfg->spmk_info_list || !cfg->spmk_info_list->pmkids.count) {
+ return;
+ }
+
+ spmk_list = &cfg->spmk_info_list->pmkids;
+ for (i = 0; i < spmk_list->count; i++) {
+ if (memcmp(&pmk_list->pmkid->bssid,
+ &spmk_list->pmkid[i].bssid, ETHER_ADDR_LEN)) {
+ continue; /* different MAC */
+ }
+ WL_INFORM_MEM(("SPMK replace idx:%d bssid: "MACF " to SSID: %d\n", i,
+ ETHER_TO_MACF(pmk_list->pmkid->bssid), spmk_list->pmkid[i].ssid_len));
+ bzero(&pmk_list->pmkid->bssid, ETHER_ADDR_LEN);
+ pmk_list->pmkid->ssid_len = spmk_list->pmkid[i].ssid_len;
+ (void)memcpy_s(pmk_list->pmkid->ssid, spmk_list->pmkid[i].ssid_len,
+ spmk_list->pmkid[i].ssid, spmk_list->pmkid[i].ssid_len);
+ }
+}
+
+static void
+wl_cfg80211_spmk_pmkdb_del_spmk(struct bcm_cfg80211 *cfg, struct cfg80211_pmksa *pmksa)
+{
+ pmkid_list_v3_t *spmk_list = NULL;
+ bool bFound = FALSE;
+ int i;
+
+ if (!cfg || !cfg->spmk_info_list || !cfg->spmk_info_list->pmkids.count) {
+ return;
+ }
+
+ spmk_list = &cfg->spmk_info_list->pmkids;
+ for (i = 0; i < spmk_list->count; i++) {
+ if (eacmp(&pmksa->bssid, &spmk_list->pmkid[i].bssid)) {
+ continue; /* different MAC */
+ }
+ bFound = TRUE;
+ break;
+ }
+ WL_INFORM_MEM(("wl_cfg80211_del_pmksa "MACDBG "found:%d(idx:%d)",
+ MAC2STRDBG(spmk_list->pmkid[i].bssid.octet), bFound, i));
+ if (!bFound) {
+ return;
+ }
+
+ for (; i < spmk_list->count - 1; i++) {
+ memcpy_s(&spmk_list->pmkid[i], sizeof(pmkid_v3_t),
+ &spmk_list->pmkid[i + 1], sizeof(pmkid_v3_t));
+ }
+ spmk_list->count--;
+}
+
+static void
+wl_cfg80211_handle_set_ssid_complete(struct bcm_cfg80211 *cfg, wl_assoc_status_t *as,
+ const wl_event_msg_t *event, wl_assoc_state_t assoc_state)
+{
+ if (as->status != WLC_E_STATUS_SUCCESS) {
+#ifdef CUSTOMER_HW6
+ /* Some older chips sends SET_SSID with fail status and it
+ * still proceeds with join. Excempt it for the sake of
+ * compatibility
+ */
+ return;
+#endif /* CUSTOMER_HW6 */
+#ifdef DHD_ENABLE_BIGDATA_LOGGING
+ wl_get_connect_failed_status(cfg, event);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+#ifdef SET_SSID_FAIL_CUSTOM_RC
+ if (as->status == WLC_E_STATUS_TIMEOUT) {
+ WL_INFORM_MEM(("overriding reason code %d to %d\n",
+ as->reason, SET_SSID_FAIL_CUSTOM_RC));
+ as->reason = SET_SSID_FAIL_CUSTOM_RC;
+ }
+#endif /* SET_SSID_FAIL_CUSTOM_RC */
+
+ /* Report connect failure */
+ as->link_action = wl_set_link_action(assoc_state, false);
+ }
+#ifdef WL_NAN
+ else if ((as->status == WLC_E_STATUS_SUCCESS) &&
+ wl_cfgnan_is_enabled(cfg) &&
+ wl_get_drv_status(cfg, CONNECTED, as->ndev)) {
+ u8 *curbssid = wl_read_prof(cfg, as->ndev, WL_PROF_BSSID);
+ u8 *conn_req_bssid =
+ wl_read_prof(cfg, as->ndev, WL_PROF_LATEST_BSSID);
+
+ if (memcmp(curbssid, conn_req_bssid, ETHER_ADDR_LEN) == 0) {
+ wl_cfgnan_get_stats(cfg);
+ }
+ }
+#endif /* WL_NAN */
+
+ return;
+}
+
+#ifdef WL_TWT
+static s32
+wl_notify_twt_event(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data)
+{
+ uint32 type;
+ type = ntoh32(e->event_type);
+ WL_DBG(("TWT event type %d\n", type));
+ return BCME_OK;
+}
+#endif /* WL_TWT */
+
+#define CHECK_AND_INCR_LEN(ret, len, maxlen) \
+ { \
+ if ((ret < 0) || ((ret + len) > maxlen)) \
+ return len; \
+ len += ret; \
+ }
+u32
+wl_cfg80211_debug_data_dump(struct net_device *dev, u8 *buf, u32 buf_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ u32 len = 0;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ s32 ret = 0;
+
+ BCM_REFERENCE(dhdp);
+ if ((!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
+ WL_INFORM(("driver not up.\n"));
+ return 0;
+ }
+
+ ret = snprintf(buf, buf_len, "\n[BCMLINUX]\nlock info:\n");
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+
+ ret = snprintf(buf+len, buf_len-len,
+ "rtnl:%d\n"
+ "scan_sync:%d\n"
+ "usr_sync:%d\n"
+ "if_sync:%d\n"
+ "event_sync:%d\n"
+ "ioctl_buf:%d\n",
+ rtnl_is_locked(), mutex_is_locked(&cfg->scan_sync),
+ mutex_is_locked(&cfg->usr_sync), mutex_is_locked(&cfg->if_sync),
+ mutex_is_locked(&cfg->event_sync), mutex_is_locked(&cfg->ioctl_buf_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+
+#ifdef WL_NAN
+ ret = snprintf(buf+len, buf_len-len, "nan:%d\n", mutex_is_locked(&cfg->nancfg->nan_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+#endif /* WL_NAN */
+#ifdef RTT_SUPPORT
+ {
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
+ ret = snprintf(buf+len, buf_len-len, "rtt:%d\n",
+ mutex_is_locked(&rtt_status->rtt_mutex));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+ ret = snprintf(buf+len, buf_len-len, "geofence:%d\n",
+ mutex_is_locked(&(rtt_status)->geofence_mutex));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+ }
+#endif /* RTT_SUPPORT */
+#ifdef WL_BCNRECV
+ ret = snprintf(buf+len, buf_len-len, "bcn_sync:%d\n", mutex_is_locked(&cfg->bcn_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+#endif /* WL_BCNRECV */
+#ifdef WLTDLS
+ ret = snprintf(buf+len, buf_len-len, "tdls_sync:%d\n", mutex_is_locked(&cfg->tdls_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+#endif /* WLTDLS */
+ ret = snprintf(buf+len, buf_len-len, "cfgdrv:%d\n", spin_is_locked(&cfg->cfgdrv_lock));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+
+ ret = snprintf(buf+len, buf_len-len, "vndr_oui:%d\n", spin_is_locked(&cfg->vndr_oui_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+
+ ret = snprintf(buf+len, buf_len-len, "net_list:%d\n", spin_is_locked(&cfg->net_list_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+
+ ret = snprintf(buf+len, buf_len-len, "eq_lock:%d\n", spin_is_locked(&cfg->eq_lock));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+
+#ifdef WL_WPS_SYNC
+ ret = snprintf(buf+len, buf_len-len, "wps:%d\n", spin_is_locked(&cfg->wps_sync));
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+#endif /* WL_WPS_SYNC */
+ ret = snprintf(buf+len, buf_len-len, "eidx.in_progress:0x%x eidx.event:0x%x",
+ cfg->eidx.in_progress, cfg->eidx.event_type);
+ CHECK_AND_INCR_LEN(ret, len, buf_len);
+ return len;
+}
+
+#ifdef WL_CLIENT_SAE
+static bool
+wl_is_pmkid_available(struct net_device *dev, const u8 *bssid)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int i;
+
+ /* check the bssid is null or not */
+ if (!bssid) return FALSE;
+
+ for (i = 0; i < cfg->pmk_list->pmkids.count; i++) {
+ if (!memcmp(bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN)) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+#define WL_AUTH_START_EVT_V0 0
+static s32
+wl_notify_start_auth(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data)
+{
+ struct cfg80211_external_auth_params ext_auth_param;
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ u32 datalen = be32_to_cpu(e->datalen);
+#ifdef WL_AUTH_START_EVT_V0
+ wl_ext_auth_evt_t *evt_v0_data = (wl_ext_auth_evt_t *)data;
+#else
+ wl_auth_start_evt_t *evt_v1_data = (wl_auth_start_evt_t *)data;
+#endif
+ wl_assoc_mgr_cmd_t cmd;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ int err, retry = 3;
+
+ WL_DBG(("Enter\n"));
+
+ if (!datalen || !data) {
+ WL_ERR(("Invalid data for auth start event\n"));
+ return BCME_ERROR;
+ }
+
+ ext_auth_param.action = NL80211_EXTERNAL_AUTH_START;
+ ext_auth_param.key_mgmt_suite = ntoh32(WLAN_AKM_SUITE_SAE_SHA256);
+#ifdef WL_AUTH_START_EVT_V0
+ (void)memcpy_s(&ext_auth_param.bssid, ETHER_ADDR_LEN, &evt_v0_data->bssid, ETHER_ADDR_LEN);
+ ext_auth_param.ssid.ssid_len = MIN(evt_v0_data->ssid.SSID_len, DOT11_MAX_SSID_LEN);
+ if (ext_auth_param.ssid.ssid_len) {
+ (void)memcpy_s(&ext_auth_param.ssid.ssid, ext_auth_param.ssid.ssid_len,
+ evt_v0_data->ssid.SSID, ext_auth_param.ssid.ssid_len);
+ }
+ WL_MSG(ndev->name, "BSSID "MACDBG"\n", MAC2STRDBG(&evt_v0_data->bssid));
+#else
+ (void)memcpy_s(&ext_auth_param.bssid, ETHER_ADDR_LEN, &evt_v1_data->bssid, ETHER_ADDR_LEN);
+ ext_auth_param.ssid.ssid_len = MIN(evt_v1_data->ssid.SSID_len, DOT11_MAX_SSID_LEN);
+ if (ext_auth_param.ssid.ssid_len) {
+ (void)memcpy_s(&ext_auth_param.ssid.ssid, ext_auth_param.ssid.ssid_len,
+ evt_v1_data->ssid.SSID, ext_auth_param.ssid.ssid_len);
+ }
+ WL_MSG(ndev->name, "BSSID "MACDBG", version=%d\n",
+ MAC2STRDBG(&evt_v1_data->bssid), evt_v1_data->version);
+#endif
+
+ /* Wait for conn_owner_nlportid been assigned in nl80211_connect */
+ for (retry = 3; retry > 0; retry--) {
+ if (wdev->conn_owner_nlportid) {
+ break;
+ }
+ wl_delay(10);
+ }
+
+ err = cfg80211_external_auth_request(ndev, &ext_auth_param, GFP_KERNEL);
+ if (err) {
+ WL_ERR(("Send external auth request failed, ret %d\n", err));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (err < 0) {
+ WL_ERR(("WLC_DISASSOC error %d\n", err));
+ }
+ return BCME_ERROR;
+ }
+
+ cmd.version = WL_ASSOC_MGR_CURRENT_VERSION;
+ cmd.length = sizeof(cmd);
+ cmd.cmd = WL_ASSOC_MGR_CMD_PAUSE_ON_EVT;
+ cmd.params = WL_ASSOC_MGR_PARAMS_PAUSE_EVENT_AUTH_RESP;
+ err = wldev_iovar_setbuf(ndev, "assoc_mgr_cmd", (void *)&cmd, sizeof(cmd), cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to pause assoc(%d)\n", err));
+ }
+
+ return BCME_OK;
+}
+
+s32
+wl_handle_auth_event(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bcm_struct_cfgdev *cfgdev = ndev_to_cfgdev(ndev);
+ u8 bsscfgidx = e->bsscfgidx;
+ u8 *mgmt_frame = NULL;
+ u8 *body = NULL;
+ u32 body_len = 0;
+ s32 chan;
+ chanspec_t chanspec;
+ s32 freq;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ u32 len = ntoh32(e->datalen);
+ u32 status = ntoh32(e->status);
+ int err = BCME_OK;
+
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
+ return WL_INVALID;
+ }
+
+ if (!len) {
+ WL_ERR(("WLC_E_AUTH has no payload. status %d reason %d\n",
+ status, ntoh32(e->reason)));
+#ifdef WL_EXT_IAPSTA
+ if (status != WLC_E_STATUS_SUCCESS)
+ wl_ext_in4way_sync(ndev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+ return WL_INVALID;
+ }
+
+ body = (u8 *)MALLOCZ(cfg->osh, len);
+ if (body == NULL) {
+ WL_ERR(("Failed to allocate body\n"));
+ return WL_INVALID;
+ }
+ (void)memcpy_s(body, len, data, len);
+
+ err = wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ MFREE(cfg->osh, body, len);
+ WL_ERR(("Could not get cur_etheraddr %d\n", err));
+ return err;
+ }
+ (void)memcpy_s(da.octet, ETHER_ADDR_LEN, cfg->ioctl_buf, ETHER_ADDR_LEN);
+
+ bzero(&bssid, sizeof(bssid));
+ err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ /* Use e->addr as bssid for Sta case , before association completed */
+ if (err == BCME_NOTASSOCIATED) {
+ (void)memcpy_s(&bssid, ETHER_ADDR_LEN, &e->addr, ETHER_ADDR_LEN);
+ err = BCME_OK;
+ }
+ if (unlikely(err)) {
+ MFREE(cfg->osh, body, len);
+ WL_ERR(("Could not get bssid %d\n", err));
+ return err;
+ }
+
+ err = wldev_iovar_getint(ndev, "chanspec", &chan);
+ if (unlikely(err)) {
+ MFREE(cfg->osh, body, len);
+ WL_ERR(("Could not get chanspec %d\n", err));
+ return err;
+ }
+
+ chanspec = wl_chspec_driver_to_host(chan);
+ freq = wl_channel_to_frequency(wf_chspec_ctlchan(chanspec), CHSPEC_BAND(chanspec));
+
+ body_len = len;
+ err = wl_frame_get_mgmt(cfg, FC_AUTH, &da, &e->addr, &bssid,
+ &mgmt_frame, &len, body);
+ if (!err) {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_update_extsae_4way(ndev, (struct ieee80211_mgmt *)mgmt_frame, FALSE);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif
+ MFREE(cfg->osh, mgmt_frame, len);
+ }
+
+ if (body) {
+ MFREE(cfg->osh, body, body_len);
+ }
+
+ return BCME_OK;
+}
+
+/** Called by the cfg80211 framework */
+static s32
+wl_cfg80211_external_auth(struct wiphy *wiphy,
+ struct net_device *ndev, struct cfg80211_external_auth_params *ext_auth_param)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_assoc_mgr_cmd_t cmd;
+
+ WL_DBG(("Enter\n"));
+
+ if (!ext_auth_param ||
+ ETHER_ISNULLADDR(ext_auth_param->bssid)) {
+ WL_ERR(("Invalid param\n"));
+ return -EINVAL;
+ }
+
+ cmd.version = WL_ASSOC_MGR_CURRENT_VERSION;
+ cmd.length = sizeof(cmd);
+ cmd.cmd = WL_ASSOC_MGR_CMD_PAUSE_ON_EVT;
+ cmd.params = WL_ASSOC_MGR_PARAMS_EVENT_NONE;
+ err = wldev_iovar_setbuf(ndev, "assoc_mgr_cmd", (void *)&cmd, sizeof(cmd),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to pause assoc(%d)\n", err));
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_mgmt_auth_tx(struct net_device *dev, bcm_struct_cfgdev *cfgdev,
+ struct bcm_cfg80211 *cfg, const u8 *buf, size_t len, s32 bssidx, u64 *cookie)
+{
+ int err = 0;
+ wl_assoc_mgr_cmd_t *cmd;
+ char *ambuf = NULL;
+ int param_len;
+ bool ack = true;
+
+ param_len = sizeof(wl_assoc_mgr_cmd_t) + len;
+ ambuf = MALLOCZ(cfg->osh, param_len);
+ if (ambuf == NULL) {
+ WL_ERR(("unable to allocate frame\n"));
+ return -ENOMEM;
+ }
+
+ cmd = (wl_assoc_mgr_cmd_t*)ambuf;
+ cmd->version = WL_ASSOC_MGR_CURRENT_VERSION;
+ cmd->length = len;
+ cmd->cmd = WL_ASSOC_MGR_CMD_SEND_AUTH;
+ err = memcpy_s(&cmd->params, len, buf, len);
+ if (err) {
+ WL_ERR(("Failed to copy cmd params(%d)\n", err));
+ ack = false;
+ } else {
+ err = wldev_iovar_setbuf(dev, "assoc_mgr_cmd", ambuf, param_len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to send auth(%d)\n", err));
+ ack = false;
+ }
+#ifdef WL_EXT_IAPSTA
+ else {
+ const struct ieee80211_mgmt *mgmt = (const struct ieee80211_mgmt *)buf;
+ wl_ext_update_extsae_4way(dev, mgmt, TRUE);
+ }
+#endif
+ }
+
+ MFREE(cfg->osh, ambuf, param_len);
+
+ cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
+ return BCME_OK;
+}
+#endif /* WL_CLIENT_SAE */
+
+s32
+wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int ret = 0;
+ int bytes_written = -1;
+
+ sscanf(command, "%*s %d", &cfg->autochannel);
+
+ if (cfg->autochannel == 0) {
+ cfg->best_2g_ch = 0;
+ cfg->best_5g_ch = 0;
+ } else if (cfg->autochannel == 2) {
+ bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
+ cfg->best_2g_ch, cfg->best_5g_ch);
+ WL_TRACE(("command result is %s\n", command));
+ ret = bytes_written;
+ }
+
+ return ret;
+}
+
+#ifdef WL_STATIC_IF
+bool
+wl_cfg80211_static_if(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ int i;
+
+ if (!cfg)
+ return FALSE;
+
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (cfg->static_ndev[i] == ndev)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+int
+wl_cfg80211_static_ifidx(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ int i;
+
+ if (!cfg)
+ return -1;
+
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (cfg->static_ndev[i] == ndev)
+ return i;
+ }
+
+ return -1;
+}
+
+struct net_device *
+wl_cfg80211_static_if_active(struct bcm_cfg80211 *cfg)
+{
+ int i;
+
+ if (!cfg)
+ return NULL;
+
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (cfg->static_ndev[i] && (cfg->static_ndev_state[i] & NDEV_STATE_FW_IF_CREATED))
+ return cfg->static_ndev[i];
+ }
+
+ return NULL;
+}
+
+int
+wl_cfg80211_static_if_name(struct bcm_cfg80211 *cfg, const char *name)
+{
+ int i;
+
+ if (!cfg)
+ return -1;
+
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (cfg->static_ndev[i] && (!strncmp(cfg->static_ndev[i]->name, name, strlen(name))))
+ return i;
+ }
+
+ return -1;
+}
+
+void
+wl_cfg80211_static_if_dev_close(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int i;
+
+ if (!cfg)
+ return;
+
+ for (i=0; i<DHD_MAX_STATIC_IFS; i++) {
+ if (cfg->static_ndev[i] && (cfg->static_ndev[i]->flags & IFF_UP))
+ dev_close(cfg->static_ndev[i]);
+ }
+
+ return;
+}
+#endif /* WL_STATIC_IF */
diff --git a/bcmdhd.101.10.361.x/wl_cfg80211.h b/bcmdhd.101.10.361.x/wl_cfg80211.h
new file mode 100755
index 0000000..52faf2d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfg80211.h
@@ -0,0 +1,3087 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/**
+ * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface.
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+#include <osl.h>
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* BCMDONGLEHOST */
+
+#define WL_CFG_DRV_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_DRV_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_WPS_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_WPS_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_NET_LIST_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_NET_LIST_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_EQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_EQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_BAM_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_BAM_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_VNDR_OUI_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_VNDR_OUI_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+#ifdef WL_BAM
+#include <wl_bam.h>
+#endif /* WL_BAM */
+
+#ifdef BIGDATA_SOFTAP
+#include <wl_bigdata.h>
+#endif /* BIGDATA_SOFTAP */
+#include <dhd_dbg.h>
+
+struct wl_conf;
+struct wl_iface;
+struct bcm_cfg80211;
+struct wl_security;
+struct wl_ibss;
+
+/* Enable by default */
+#define WL_WTC
+
+/*
+ * Common feature. If this becomes customer specific,
+ * move it to customer specific makefile when required
+ */
+#define WL_5G_SOFTAP_ONLY_ON_DEF_CHAN
+
+#if !defined(WL_CLIENT_SAE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0))
+#define WL_CLIENT_SAE
+#endif
+#if defined(WL_SAE) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#error "Can not support WL_SAE befor kernel 3.14"
+#endif
+#if defined(WL_CLIENT_SAE) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#error "Can not support WL_CLIENT_SAE before kernel 3.10"
+#endif
+#if defined(WL_CLIENT_SAE) && defined(WL_SAE)
+#error "WL_SAE is for dongle-offload and WL_CLIENT_SAE is for wpa_supplicant. Please choose one."
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) && !defined(WL_SCAN_TYPE))
+#define WL_SCAN_TYPE
+#endif /* WL_SCAN_TYPE */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) && !defined(WL_FILS)
+#define WL_FILS
+#endif /* WL_FILS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) && !defined(WL_FILS_ROAM_OFFLD)
+#define WL_FILS_ROAM_OFFLD
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+/* Use driver managed regd */
+#define WL_SELF_MANAGED_REGDOM
+#endif /* KERNEL >= 4.0 */
+
+#define CH_TO_CHSPC(band, _channel) \
+ ((_channel | band) | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE)
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = CH_TO_CHSPC(WL_CHANSPEC_BAND_2G, _channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = CH_TO_CHSPC(WL_CHANSPEC_BAND_5G, _channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#ifdef CFG80211_6G_SUPPORT
+#define CHAN6G(_channel, _flags) { \
+ .band = IEEE80211_BAND_6GHZ, \
+ .center_freq = 5950 + (5 * (_channel)), \
+ .hw_value = CH_TO_CHSPC(WL_CHANSPEC_BAND_6G, _channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN6G_CHAN2(_flags) { \
+ .band = IEEE80211_BAND_6GHZ, \
+ .center_freq = 5935, \
+ .hw_value = 0x5002, \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+#else
+#define CHAN6G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5950 + (5 * (_channel)), \
+ .hw_value = CH_TO_CHSPC(WL_CHANSPEC_BAND_6G, _channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN6G_CHAN2(_flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5935, \
+ .hw_value = 0x5002, \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+#endif /* CFG80211_6G_SUPPORT */
+
+#ifdef WL_SAE
+#define IS_AKM_SAE(akm) (akm == WLAN_AKM_SUITE_SAE)
+#else
+#define IS_AKM_SAE(akm) FALSE
+#endif
+#ifdef WL_OWE
+#define IS_AKM_OWE(akm) (akm == WLAN_AKM_SUITE_OWE)
+#else
+#define IS_AKM_OWE(akm) FALSE
+#endif
+
+#if defined(IL_BIGENDIAN)
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh64(i) (bcmswap64(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh64(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDIAN */
+
+#define WL_DBG_NONE 0
+#define WL_DBG_P2P_ACTION (1 << 5)
+#define WL_DBG_TRACE (1 << 4)
+#define WL_DBG_SCAN (1 << 3)
+#define WL_DBG_DBG (1 << 2)
+#define WL_DBG_INFO (1 << 1)
+#define WL_DBG_ERR (1 << 0)
+
+#ifndef WAIT_FOR_DISCONNECT_MAX
+#define WAIT_FOR_DISCONNECT_MAX 10
+#endif /* WAIT_FOR_DISCONNECT_MAX */
+#define WAIT_FOR_DISCONNECT_STATE_SYNC 10
+
+#if defined(CONFIG_6GHZ_BKPORT) || (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+/* Native 6GHz band supported available. For Backported
+ * kernels, kernels/customer makefiles should explicitly
+ * define CONFIG_6GHZ_BKPORT
+ */
+#if defined(WL_6G_BAND)
+#define CFG80211_6G_SUPPORT
+#endif
+#endif /* CONFIG_6GHZ_BKPORT || LINUX_VER >= 5.4 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+/* Newer kernels use defines from nl80211.h */
+#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ
+#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
+#define IEEE80211_BAND_60GHZ NL80211_BAND_60GHZ
+#ifdef CFG80211_6G_SUPPORT
+#define IEEE80211_BAND_6GHZ NL80211_BAND_6GHZ
+#endif /* CFG80211_6G_SUPPORT */
+#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
+#endif /* LINUX_VER >= 4.7 */
+
+/* Max BAND support */
+#define WL_MAX_BAND_SUPPORT 3
+
+#ifdef DHD_LOG_DUMP
+extern void dhd_log_dump_write(int type, char *binary_data,
+ int binary_len, const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+extern char *dhd_dbg_get_system_timestamp(void);
+#ifndef _DHD_LOG_DUMP_DEFINITIONS_
+#define DHD_LOG_DUMP_WRITE(fmt, ...) \
+ dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \
+ dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, NULL, 0, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_PRSRV(fmt, ...) \
+ dhd_log_dump_write(DLD_BUF_TYPE_PRESERVE, NULL, 0, fmt, ##__VA_ARGS__)
+#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
+
+#ifndef DHD_LOG_DUMP_RING_DEFINITIONS
+#define DHD_PREFIX_TS "[%s]: ", dhd_log_dump_get_timestamp()
+#define DHD_PREFIX_TS_FN "[%s] %s: ", dhd_log_dump_get_timestamp(), __func__
+
+#define DHD_LOG_DUMP_WRITE_TS DHD_LOG_DUMP_WRITE(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_TS_FN DHD_LOG_DUMP_WRITE(DHD_PREFIX_TS_FN)
+
+#define DHD_LOG_DUMP_WRITE_EX_TS DHD_LOG_DUMP_WRITE_EX(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_EX_TS_FN DHD_LOG_DUMP_WRITE_EX(DHD_PREFIX_TS_FN)
+
+#define DHD_LOG_DUMP_WRITE_PRSRV_TS DHD_LOG_DUMP_WRITE_PRSRV(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_PRSRV_TS_FN DHD_LOG_DUMP_WRITE_PRSRV(DHD_PREFIX_TS_FN)
+
+#define DHD_LOG_DUMP_WRITE_ROAM_TS DHD_LOG_DUMP_WRITE(DHD_PREFIX_TS)
+#define DHD_LOG_DUMP_WRITE_ROAM_TS_FN DHD_LOG_DUMP_WRITE(DHD_PREFIX_TS_FN)
+#endif /* DHD_LOG_DUMP_RING_DEFINITIONS */
+#endif /* DHD_LOG_DUMP */
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+#define PM_BLOCK 1
+#define PM_ENABLE 0
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define RADIO_PWRSAVE_PPS 10
+#define RADIO_PWRSAVE_QUIET_TIME 10
+#define RADIO_PWRSAVE_LEVEL 3
+#define RADIO_PWRSAVE_STAS_ASSOC_CHECK 0
+
+#define RADIO_PWRSAVE_LEVEL_MIN 1
+#define RADIO_PWRSAVE_LEVEL_MAX 9
+#define RADIO_PWRSAVE_PPS_MIN 1
+#define RADIO_PWRSAVE_QUIETTIME_MIN 1
+#define RADIO_PWRSAVE_ASSOCCHECK_MIN 0
+#define RADIO_PWRSAVE_ASSOCCHECK_MAX 1
+
+#define RADIO_PWRSAVE_MAJOR_VER 1
+#define RADIO_PWRSAVE_MINOR_VER 1
+#define RADIO_PWRSAVE_MAJOR_VER_SHIFT 8
+#define RADIO_PWRSAVE_VERSION \
+ ((RADIO_PWRSAVE_MAJOR_VER << RADIO_PWRSAVE_MAJOR_VER_SHIFT)| RADIO_PWRSAVE_MINOR_VER)
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef BCMWAPI_WPI
+#ifdef OEM_ANDROID
+#undef NL80211_WAPI_VERSION_1
+#define NL80211_WAPI_VERSION_1 0
+
+#undef WLAN_AKM_SUITE_WAPI_PSK
+#define WLAN_AKM_SUITE_WAPI_PSK 0x000FACFE /* WAPI */
+
+#undef WLAN_AKM_SUITE_WAPI_CERT
+#define WLAN_AKM_SUITE_WAPI_CERT 0x000FACFF /* WAPI */
+
+#define IS_WAPI_VER(version) (version == NL80211_WAPI_VERSION_1)
+#else
+#undef WLAN_AKM_SUITE_WAPI_PSK
+#define WLAN_AKM_SUITE_WAPI_PSK 0x000FAC04
+
+#undef WLAN_AKM_SUITE_WAPI_CERT
+#define WLAN_AKM_SUITE_WAPI_CERT 0x000FAC12
+
+#undef NL80211_WAPI_VERSION_1
+#define NL80211_WAPI_VERSION_1 1 << 2
+#define IS_WAPI_VER(version) (version & NL80211_WAPI_VERSION_1)
+#endif /* OEM_ANDROID */
+#endif /* BCMWAPI_WPI */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+#define IS_REGDOM_SELF_MANAGED(wiphy) \
+ (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+#else
+#define IS_REGDOM_SELF_MANAGED(wiphy) (false)
+#endif /* KERNEL >= 4.0 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) && \
+ defined(WL_SELF_MANAGED_REGDOM)
+#define WL_UPDATE_CUSTOM_REGULATORY(wiphy) \
+ wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#define WL_UPDATE_CUSTOM_REGULATORY(wiphy) \
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else /* kernel > 4.0 && WL_SELF_MANAGED_REGDOM */
+/* Kernels < 3.14 */
+#define WL_UPDATE_CUSTOM_REGULATORY(wiphy) \
+ wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+
+/* GCMP crypto supported above kernel v4.0 */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0)) && defined(WL_GCMP_SUPPORT)
+/* Check for minimal kernel version before enabling WL_GCMP */
+#define WL_GCMP
+#endif /* (LINUX_VERSION > KERNEL_VERSION(4, 0, 0) && WL_GCMP_SUPPORT */
+
+#ifndef IBSS_COALESCE_ALLOWED
+#define IBSS_COALESCE_ALLOWED IBSS_COALESCE_DEFAULT
+#endif
+
+#ifndef IBSS_INITIAL_SCAN_ALLOWED
+#define IBSS_INITIAL_SCAN_ALLOWED IBSS_INITIAL_SCAN_ALLOWED_DEFAULT
+#endif
+
+#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
+
+/* On some MSM platform, it uses different version
+ * of linux kernel and cfg code as not synced.
+ * MSM defined CFG80211_DISCONNECTED_V2 as the flag
+ * when they uses different kernel/cfg version.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || \
+ (defined(CONFIG_ARCH_MSM) && defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ BCM_REFERENCE(loc_gen); \
+ cfg80211_disconnected(dev, reason, ie, len, gfp);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
+
+/* 0 invalidates all debug messages. default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#if defined(CUSTOMER_DBG_SYSTEM_TIME) && defined(DHD_DEBUGABILITY_LOG_DUMP_RING)
+#define WL_DBG_PRINT_SYSTEM_TIME \
+ pr_cont("[%s]", dhd_dbg_get_system_timestamp())
+#else
+#define WL_DBG_PRINT_SYSTEM_TIME
+#endif /* defined(CUSTOMER_DBG_SYSTEM_TIME) && defined(DHD_DEBUGABILITY_LOG_DUMP_RING) */
+
+#if defined(CUSTOMER_DBG_PREFIX_ENABLE)
+#define USER_PREFIX_CFG80211 "[cfg80211][wlan] "
+#define CFG80211_INFO_TEXT USER_PREFIX_CFG80211
+#define CFG80211_ERROR_TEXT USER_PREFIX_CFG80211
+#define CFG80211_SCAN_TEXT USER_PREFIX_CFG80211
+#define CFG80211_TRACE_TEXT USER_PREFIX_CFG80211
+#define CFG80211_DEBUG_TEXT USER_PREFIX_CFG80211
+#else
+#define CFG80211_INFO_TEXT "CFG80211-INFO) "
+/* Samsung want to print INFO2 instead of ERROR
+ * because most of case, ERROR message is not a real ERROR.
+ * but it can be regarded as real error case for Tester
+ */
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFG80211_ERROR_TEXT "CFG80211-INFO2) "
+#else
+#define CFG80211_ERROR_TEXT "CFG80211-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
+#define CFG80211_SCAN_TEXT "CFG80211-SCAN) "
+#define CFG80211_TRACE_TEXT "CFG80211-TRACE) "
+#define CFG80211_DEBUG_TEXT "CFG80211-DEBUG) "
+#endif /* defined(CUSTOMER_DBG_PREFIX_ENABLE) */
+
+#ifdef DHD_DEBUG
+#ifdef DHD_LOG_DUMP
+#define WL_ERR_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+ } \
+} while (0)
+#define WL_ERR(x) WL_ERR_MSG x
+#define WL_ERR_KERN_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_ERR_KERN(x) WL_ERR_KERN_MSG x
+#define WL_ERR_MEM_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+ } \
+} while (0)
+/* Prints to debug ring by default. If dbg level is enabled, prints on to
+ * console as well
+ */
+#define WL_DBG_MEM_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printf(CFG80211_INFO_TEXT "%s : " x, __func__, ## args); \
+ } \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+} while (0)
+#define WL_DBG_MEM(x) WL_DBG_MEM_MSG x
+#define WL_ERR_MEM(x) WL_ERR_MEM_MSG x
+#define WL_INFORM_MEM_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printf(CFG80211_INFO_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+ } \
+} while (0)
+#define WL_INFORM_MEM(x) WL_INFORM_MEM_MSG x
+#define WL_ERR_EX_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE_EX_TS_FN; \
+ DHD_LOG_DUMP_WRITE_EX(x, ## args); \
+ } \
+} while (0)
+#define WL_ERR_EX(x) WL_ERR_EX_MSG x
+#define WL_MEM(args) \
+do { \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE args; \
+} while (0)
+#else
+#define WL_ERR_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_ERR(x) WL_ERR_MSG x
+#define WL_ERR_KERN(args) WL_ERR(args)
+#define WL_ERR_MEM(args) WL_ERR(args)
+#define WL_INFORM_MEM(args) WL_INFORM(args)
+#define WL_DBG_MEM(args) WL_DBG(args)
+#define WL_ERR_EX(args) WL_ERR(args)
+#define WL_MEM(args) WL_DBG(args)
+#endif /* DHD_LOG_DUMP */
+#else /* defined(DHD_DEBUG) */
+#define WL_ERR_MSG(x, args...) \
+do { \
+ if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \
+ printf(CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_ERR(x) WL_ERR_MSG x
+#define WL_ERR_KERN(args) WL_ERR(args)
+#define WL_ERR_MEM(args) WL_ERR(args)
+#define WL_INFORM_MEM(args) WL_INFORM(args)
+#define WL_DBG_MEM(args) WL_DBG(args)
+#define WL_ERR_EX(args) WL_ERR(args)
+#define WL_MEM(args) WL_DBG(args)
+#endif /* defined(DHD_DEBUG) */
+
+#if defined(__linux__) && !defined(DHD_EFI)
+#define WL_PRINT_RATE_LIMIT_PERIOD 4000000000u /* 4s in units of ns */
+#endif
+#if defined(__linux__) && !defined(DHD_EFI)
+#define WL_ERR_RLMT(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ static uint64 __err_ts = 0; \
+ static uint32 __err_cnt = 0; \
+ uint64 __cur_ts = 0; \
+ __cur_ts = local_clock(); \
+ if (__err_ts == 0 || (__cur_ts > __err_ts && \
+ (__cur_ts - __err_ts > WL_PRINT_RATE_LIMIT_PERIOD))) { \
+ __err_ts = __cur_ts; \
+ WL_ERR(args); \
+ WL_ERR(("[Repeats %u times]\n", __err_cnt)); \
+ __err_cnt = 0; \
+ } else { \
+ ++__err_cnt; \
+ } \
+ } \
+} while (0)
+#else /* defined(__linux__) && !defined(DHD_EFI) */
+#define WL_ERR_RLMT(args) WL_ERR(args)
+#endif /* defined(__linux__) && !defined(DHD_EFI) */
+
+#ifdef WL_INFORM
+#undef WL_INFORM
+#endif
+
+#define WL_INFORM_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printf(CFG80211_INFO_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_INFORM(x) WL_INFORM_MSG x
+
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define WL_SCAN_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_SCAN) { \
+ printf(CFG80211_SCAN_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_SCAN(x) WL_SCAN_MSG x
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define WL_TRACE_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_TRACE) { \
+ printf(CFG80211_TRACE_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_TRACE(x) WL_TRACE_MSG x
+#ifdef WL_TRACE_HW4
+#undef WL_TRACE_HW4
+#endif
+#ifdef CUSTOMER_HW4_DEBUG
+#define WL_TRACE_HW4_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFG80211_TRACE_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_TRACE_HW4(x) WL_TRACE_HW4_MSG x
+#else
+#define WL_TRACE_HW4 WL_TRACE
+#endif /* CUSTOMER_HW4_DEBUG */
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_MSG(x, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printf(CFG80211_DEBUG_TEXT "%s : " x, __func__, ## args); \
+ } \
+} while (0)
+#define WL_DBG(x) WL_DBG_MSG x
+#else /* !(WL_DBG_LEVEL > 0) */
+#define WL_DBG(args)
+#endif /* (WL_DBG_LEVEL > 0) */
+#define WL_PNO(x)
+#define WL_SD(x)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+#define ieee80211_band nl80211_band
+#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ
+#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
+#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
+#endif
+
+#define WL_SCAN_RETRY_MAX 3
+#define WL_NUM_PMKIDS_MAX MAXPMKID
+#define WL_SCAN_BUF_MAX (1024 * 8)
+#define WL_TLV_INFO_MAX 1500
+#define WL_SCAN_IE_LEN_MAX 2048
+#define WL_BSS_INFO_MAX 2048
+#define WL_ASSOC_INFO_MAX 512
+/* the length of pmkid_info iovar is 1416
+ * It exceed the original 1024 limitation
+ * so change WL_EXTRA_LEN_MAX to 2048
+ */
+#define WL_IOCTL_LEN_MAX 2048
+#define WL_EXTRA_BUF_MAX 2048
+#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX 256
+#define WL_FILE_NAME_MAX 256
+#define WL_DEFAULT_DWELL_TIME 200
+#define WL_MED_DWELL_TIME 400
+#define WL_MIN_DWELL_TIME 100
+#define WL_LONG_DWELL_TIME 1000
+#define IFACE_MAX_CNT 5
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define WL_BCAST_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 80
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#define WL_AF_TX_MAX_RETRY 5
+#define WL_AF_TX_MIN_RETRY 3
+
+#define WL_AF_SEARCH_TIME_MAX 450
+#define WL_AF_TX_EXTRA_TIME_MAX 200
+
+#define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */
+#ifdef WL_NAN
+#define WL_SCAN_TIMER_INTERVAL_MS_NAN 15000 /* Scan timeout */
+#endif /* WL_NAN */
+#ifdef WL_6G_BAND
+/* additional scan timeout for 6GHz, 6000msec */
+#define WL_SCAN_TIMER_INTERVAL_MS_6G 6000
+#endif /* WL_6G_BAND */
+#define CHSPEC_IS_6G_PSC(chspec) (CHSPEC_IS6G(chspec) && ((CHSPEC_CHANNEL(chspec) % 16) == 5))
+#define WL_CHANNEL_SYNC_RETRY 5
+#define WL_INVALID -1
+
+#ifdef DHD_LOSSLESS_ROAMING
+#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */
+#endif
+/* Bring down SCB Timeout to 20secs from 60secs default */
+#ifndef WL_SCB_TIMEOUT
+#define WL_SCB_TIMEOUT 20
+#endif
+
+#if defined(ROAM_ENABLE) || defined(ROAM_CHANNEL_CACHE)
+#define ESCAN_CHANNEL_CACHE
+#endif
+
+#ifndef WL_SCB_ACTIVITY_TIME
+#define WL_SCB_ACTIVITY_TIME 5
+#endif
+
+#ifndef WL_SCB_MAX_PROBE
+#define WL_SCB_MAX_PROBE 3
+#endif
+
+#ifndef WL_PSPRETEND_RETRY_LIMIT
+#define WL_PSPRETEND_RETRY_LIMIT 1
+#endif
+
+#ifndef WL_MIN_PSPRETEND_THRESHOLD
+#define WL_MIN_PSPRETEND_THRESHOLD 2
+#endif
+
+/* Cipher suites */
+#ifndef WLAN_CIPHER_SUITE_PMK
+#define WLAN_CIPHER_SUITE_PMK 0x00904C00
+#endif /* WLAN_CIPHER_SUITE_PMK */
+
+#ifndef WLAN_AKM_SUITE_FT_8021X
+#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
+#endif /* WLAN_AKM_SUITE_FT_8021X */
+
+#ifndef WLAN_AKM_SUITE_FT_PSK
+#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
+#endif /* WLAN_AKM_SUITE_FT_PSK */
+
+#ifndef WLAN_AKM_SUITE_8021X_SUITE_B
+#define WLAN_AKM_SUITE_8021X_SUITE_B 0x000FAC0B
+#define WLAN_AKM_SUITE_8021X_SUITE_B_192 0x000FAC0C
+#endif /* WLAN_AKM_SUITE_8021X_SUITE_B */
+
+/* TODO: even in upstream linux(v5.0), FT-1X-SHA384 isn't defined and supported yet.
+ * need to revisit here to sync correct name later.
+ */
+#ifndef WLAN_AKM_SUITE_FT_8021X_SHA384
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 0x000FAC0D
+#endif /* WLAN_AKM_SUITE_FT_8021X_SHA384 */
+
+#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
+#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
+
+#define WLAN_AKM_SUITE_SAE_SHA256 0x000FAC08
+
+#ifndef WLAN_AKM_SUITE_FILS_SHA256
+#define WLAN_AKM_SUITE_FILS_SHA256 0x000FAC0E
+#define WLAN_AKM_SUITE_FILS_SHA384 0x000FAC0F
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 0x000FAC10
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 0x000FAC11
+#endif /* WLAN_AKM_SUITE_FILS_SHA256 */
+
+#define MIN_VENDOR_EXTN_IE_LEN 2
+#ifdef WL_OWE
+#ifndef WLAN_AKM_SUITE_OWE
+#define WLAN_AKM_SUITE_OWE 0X000FAC12
+#endif /* WPA_KEY_MGMT_OWE */
+#endif /* WL_OWE */
+#define WLAN_AKM_SUITE_DPP 0X506F9A02
+
+/*
+ * BRCM local.
+ * Use a high number that's unlikely to clash with linux upstream for a while until we can
+ * submit these changes to the community.
+*/
+#define NL80211_FEATURE_FW_4WAY_HANDSHAKE (1<<31)
+
+/* SCAN_SUPPRESS timer values in ms */
+#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */
+#define WL_SCAN_SUPPRESS_RETRY 3000
+
+#define WL_PM_ENABLE_TIMEOUT 10000
+
+/* cfg80211 wowlan definitions */
+#define WL_WOWLAN_MAX_PATTERNS 8
+#define WL_WOWLAN_MIN_PATTERN_LEN 1
+#define WL_WOWLAN_MAX_PATTERN_LEN 255
+#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201
+#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \
+ WL_WOWLAN_MAX_PATTERNS - 1)
+#ifdef WLAIBSS
+#define IBSS_COALESCE_DEFAULT 0
+#define IBSS_INITIAL_SCAN_ALLOWED_DEFAULT 0
+#else /* WLAIBSS */
+#define IBSS_COALESCE_DEFAULT 1
+#define IBSS_INITIAL_SCAN_ALLOWED_DEFAULT 1
+#endif /* WLAIBSS */
+
+#ifdef WLTDLS
+#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04"
+#define TDLS_TUNNELED_PRB_RESP "\x7f\x50\x6f\x9a\05"
+#define TDLS_MAX_IFACE_FOR_ENABLE 1
+#endif /* WLTDLS */
+
+#ifdef WLAIBSS
+/* Custom AIBSS beacon parameters */
+#define AIBSS_INITIAL_MIN_BCN_DUR 500
+#define AIBSS_MIN_BCN_DUR 5000
+#define AIBSS_BCN_FLOOD_DUR 5000
+#define AIBSS_PEER_FREE 3
+#endif /* WLAIBSS */
+
+#ifndef FILS_INDICATION_IE_TAG_FIXED_LEN
+#define FILS_INDICATION_IE_TAG_FIXED_LEN 2
+#endif
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) &&\
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
+(entry) = list_first_entry((ptr), type, member); \
+GCC_DIAGNOSTIC_POP(); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
+entry = container_of((ptr), type, member); \
+GCC_DIAGNOSTIC_POP(); \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
+/* DPP Public Action Frame types */
+enum wl_dpp_ftype {
+ DPP_AUTH_REQ = 0,
+ DPP_AUTH_RESP = 1,
+ DPP_AUTH_CONF = 2,
+ DPP_PEER_DISC_REQ = 5,
+ DPP_PEER_DISC_RESP = 6,
+ DPP_PKEX_EX_REQ = 7,
+ DPP_PKEX_EX_RESP = 8,
+ DPP_PKEX_COMMIT_REVEAL_REQ = 9,
+ DPP_PKEX_COMMIT_REVEAL_RESP = 10,
+ DPP_CONFIGURATION_RESULT = 11
+};
+
+/* DPP Public Action Frame */
+struct wl_dpp_pub_act_frame {
+ uint8 category; /* PUB_AF_CATEGORY */
+ uint8 action; /* PUB_AF_ACTION */
+ uint8 oui[3]; /* OUI */
+ uint8 oui_type; /* OUI type */
+ uint8 crypto_suite; /* OUI subtype */
+ uint8 ftype; /* nonzero, identifies req/rsp transaction */
+ uint8 elts[1]; /* Variable length information elements. */
+} __attribute__ ((packed));
+typedef struct wl_dpp_pub_act_frame wl_dpp_pa_frame_t;
+
+#define WL_PUB_AF_CATEGORY 0x04
+#define WL_PUB_AF_ACTION 0x09 /* Vendor specific */
+#define WL_PUB_AF_WFA_STYPE_DPP 0x1A /* WFA Subtype DPP */
+#define WL_PUB_AF_STYPE_INVALID 255
+#define WL_GAS_MIN_LEN 8
+#define WL_GAS_WFA_OFFSET 3
+#define WL_GAS_RESP_OFFSET 4
+#define WL_GAS_STYPE_OFFSET 6
+#define WL_GAS_WFA_STYPE_DPP 0x1A
+#define WL_GAS_DPP_ADV_ID 0x7ddd
+
+/* Action value for GAS Initial Request AF */
+#define WL_PUB_AF_GAS_IREQ 0x0a
+/* Action value for GAS Initial Response AF */
+#define WL_PUB_AF_GAS_IRESP 0x0b
+/* Action value for GAS Comeback Request AF */
+#define WL_PUB_AF_GAS_CREQ 0x0c
+/* Action value for GAS Comeback Response AF */
+#define WL_PUB_AF_GAS_CRESP 0x0d
+/* Advertisement Protocol IE ID */
+#define WL_PUB_AF_GAS_AD_EID 0x6c
+
+typedef wifi_p2psd_gas_pub_act_frame_t wl_dpp_gas_af_t;
+
+/* driver status */
+enum wl_status {
+ WL_STATUS_READY = 0,
+ WL_STATUS_SCANNING,
+ WL_STATUS_SCAN_ABORTING,
+ WL_STATUS_CONNECTING,
+ WL_STATUS_CONNECTED,
+ WL_STATUS_DISCONNECTING,
+ WL_STATUS_AP_CREATING,
+ WL_STATUS_AP_CREATED,
+ /* whole sending action frame procedure:
+ * includes a) 'finding common channel' for public action request frame
+ * and b) 'sending af via 'actframe' iovar'
+ */
+ WL_STATUS_SENDING_ACT_FRM,
+ /* find a peer to go to a common channel before sending public action req frame */
+ WL_STATUS_FINDING_COMMON_CHANNEL,
+ /* waiting for next af to sync time of supplicant.
+ * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN
+ */
+ WL_STATUS_WAITING_NEXT_ACT_FRM,
+#ifdef WL_CFG80211_SYNC_GON
+ /* go to listen state to wait for next af after SENDING_ACT_FRM */
+ WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN,
+#endif /* WL_CFG80211_SYNC_GON */
+ /* it will be set when upper layer requests listen and succeed in setting listen mode.
+ * if set, other scan request can abort current listen state
+ */
+ WL_STATUS_REMAINING_ON_CHANNEL,
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ /* it's fake listen state to keep current scan state.
+ * it will be set when upper layer requests listen but scan is running. then just run
+ * a expire timer without actual listen state.
+ * if set, other scan request does not need to abort scan.
+ */
+ WL_STATUS_FAKE_REMAINING_ON_CHANNEL,
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ WL_STATUS_NESTED_CONNECT,
+ WL_STATUS_CFG80211_CONNECT,
+ WL_STATUS_AUTHORIZED
+};
+
+typedef enum wl_iftype {
+ WL_IF_TYPE_STA = 0,
+ WL_IF_TYPE_AP = 1,
+#ifdef WLMESH_CFG80211
+ WL_IF_TYPE_MESH = 2,
+#endif /* WLMESH_CFG80211 */
+
+#ifdef WLAWDL
+ WL_IF_TYPE_AWDL = 2,
+#endif /* WLAWDL */
+
+ WL_IF_TYPE_NAN_NMI = 3,
+ WL_IF_TYPE_NAN = 4,
+ WL_IF_TYPE_P2P_GO = 5,
+ WL_IF_TYPE_P2P_GC = 6,
+ WL_IF_TYPE_P2P_DISC = 7,
+ WL_IF_TYPE_IBSS = 8,
+ WL_IF_TYPE_MONITOR = 9,
+ WL_IF_TYPE_AIBSS = 10,
+ WL_IF_TYPE_MAX
+} wl_iftype_t;
+
+typedef enum wl_interface_state {
+ WL_IF_CREATE_REQ,
+ WL_IF_CREATE_DONE,
+ WL_IF_DELETE_REQ,
+ WL_IF_DELETE_DONE,
+ WL_IF_CHANGE_REQ,
+ WL_IF_CHANGE_DONE,
+ WL_IF_STATE_MAX, /* Retain as last one */
+} wl_interface_state_t;
+
+/* wi-fi mode */
+enum wl_mode {
+ WL_MODE_BSS = 0,
+ WL_MODE_IBSS = 1,
+ WL_MODE_AP = 2,
+
+#ifdef WLAWDL
+ WL_MODE_AWDL = 3,
+#endif /* WLAWDL */
+
+ WL_MODE_NAN = 4,
+#ifdef WLMESH_CFG80211
+ WL_MODE_MESH = 5,
+#endif /* WLMESH_CFG80211 */
+ WL_MODE_MAX
+};
+
+/* driver profile list */
+enum wl_prof_list {
+ WL_PROF_MODE,
+ WL_PROF_SSID,
+ WL_PROF_SEC,
+ WL_PROF_IBSS,
+ WL_PROF_BAND,
+ WL_PROF_CHAN,
+ WL_PROF_BSSID,
+ WL_PROF_ACT,
+ WL_PROF_BEACONINT,
+ WL_PROF_DTIMPERIOD,
+ WL_PROF_LATEST_BSSID
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+ WL_ESCAN_STATE_IDLE,
+ WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+ WL_FW_LOADING_DONE,
+ WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+ WL_BEACON = 0x1,
+ WL_PROBE_RESP = 0x2,
+ WL_ASSOC_RESP = 0x4
+};
+
+enum wl_pm_workq_act_type {
+ WL_PM_WORKQ_SHORT,
+ WL_PM_WORKQ_LONG,
+ WL_PM_WORKQ_DEL
+};
+
+enum wl_tdls_config {
+ TDLS_STATE_AP_CREATE,
+ TDLS_STATE_AP_DELETE,
+ TDLS_STATE_CONNECT,
+ TDLS_STATE_DISCONNECT,
+ TDLS_STATE_SETUP,
+ TDLS_STATE_TEARDOWN,
+ TDLS_STATE_IF_CREATE,
+ TDLS_STATE_IF_DELETE,
+ TDLS_STATE_NMI_CREATE
+};
+
+typedef enum wl_assoc_state {
+ WL_STATE_ASSOC_IDLE,
+ WL_STATE_ASSOCIATING,
+ WL_STATE_ASSOCIATED
+} wl_assoc_state_t;
+
+typedef enum wl_link_action {
+ WL_LINK_NONE,
+ WL_LINK_ASSOC_FAIL,
+ WL_LINK_ASSOC_DONE,
+ WL_LINK_DOWN,
+ WL_LINK_ROAM_DONE,
+ WL_LINK_FORCE_DEAUTH
+} wl_link_action_t;
+
+typedef struct wl_assoc_status {
+ u16 flags;
+ u16 assoc_state;
+ u32 event_type;
+ u32 status;
+ u32 reason;
+ wl_link_action_t link_action;
+ u8 curbssid[ETH_ALEN];
+ u8 addr[ETH_ALEN];
+ u16 data_len;
+ void *data;
+ struct net_device *ndev;
+ const wl_event_msg_t *event_msg;
+} wl_assoc_status_t;
+
+/* beacon / probe_response */
+struct beacon_proberesp {
+ __le64 timestamp;
+ __le16 beacon_int;
+ __le16 capab_info;
+ u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+ u32 frag_threshold;
+ u32 rts_threshold;
+ u32 retry_short;
+ u32 retry_long;
+ s32 tx_power;
+ struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+ u16 band;
+ u16 channel;
+ s16 rssi;
+ u16 frame_len;
+ u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+ struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+ u16 offset;
+ u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ u32 id; /* counter to track events */
+ wl_event_msg_t emsg;
+ u32 datalen;
+ s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+ u32 wpa_versions;
+ u32 auth_type;
+ u32 cipher_pairwise;
+ u32 cipher_group;
+ u32 wpa_auth;
+ u32 auth_assoc_res_status;
+ u32 fw_wpa_auth;
+ u32 fw_auth;
+ u32 fw_wsec;
+ u32 fw_mfp;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+ u8 beacon_interval; /* in millisecond */
+ u8 atim; /* in millisecond */
+ s8 join_only;
+ u8 band;
+ u8 channel;
+};
+
+typedef struct wl_bss_vndr_ies {
+ u8 probe_req_ie[VNDR_IES_BUF_LEN];
+ u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+ u8 assoc_req_ie[VNDR_IES_BUF_LEN];
+ u8 assoc_res_ie[VNDR_IES_BUF_LEN];
+ u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
+ u8 disassoc_ie[VNDR_IES_BUF_LEN];
+ u32 probe_req_ie_len;
+ u32 probe_res_ie_len;
+ u32 assoc_req_ie_len;
+ u32 assoc_res_ie_len;
+ u32 beacon_ie_len;
+ u32 disassoc_ie_len;
+} wl_bss_vndr_ies_t;
+
+typedef struct wl_cfgbss {
+ u8 *wpa_ie;
+ u8 *rsn_ie;
+ u8 *wps_ie;
+ u8 *fils_ind_ie;
+ bool security_mode;
+ struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */
+} wl_cfgbss_t;
+
+/* cfg driver profile */
+struct wl_profile {
+ u32 mode;
+ s32 band;
+ u32 channel;
+ struct wlc_ssid ssid;
+ struct wl_security sec;
+ struct wl_ibss ibss;
+ u8 bssid[ETHER_ADDR_LEN];
+ u16 beacon_interval;
+ u8 dtim_period;
+ bool active;
+ u8 latest_bssid[ETHER_ADDR_LEN];
+};
+
+struct wl_wps_ie {
+ uint8 id; /* IE ID: 0xDD */
+ uint8 len; /* IE length */
+ uint8 OUI[3]; /* WiFi WPS specific OUI */
+ uint8 oui_type; /* Vendor specific OUI Type */
+ uint8 attrib[1]; /* variable length attributes */
+} __attribute__ ((packed));
+typedef struct wl_wps_ie wl_wps_ie_t;
+
+struct wl_eap_msg {
+ uint16 attrib;
+ uint16 len;
+ uint8 type;
+} __attribute__ ((packed));
+typedef struct wl_eap_msg wl_eap_msg_t;
+
+struct wl_eap_exp {
+ uint8 OUI[3];
+ uint32 oui_type;
+ uint8 opcode;
+ u8 flags;
+ u8 data[1];
+} __attribute__ ((packed));
+typedef struct wl_eap_exp wl_eap_exp_t;
+
+struct net_info {
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wl_profile profile;
+ wl_iftype_t iftype;
+ s32 roam_off;
+ unsigned long sme_state;
+ bool pm_restore;
+ bool pm_block;
+ s32 pm;
+ s32 bssidx;
+ wl_cfgbss_t bss;
+ u8 ifidx;
+ struct list_head list; /* list of all net_info structure */
+};
+
+#ifdef WL_BCNRECV
+/* PERIODIC Beacon receive for detecting FakeAPs */
+typedef struct wl_bcnrecv_result {
+ uint8 SSID[DOT11_MAX_SSID_LEN]; /**< SSID String */
+ struct ether_addr BSSID; /**< Network BSSID */
+ uint8 channel; /**< Channel */
+ uint16 beacon_interval;
+ uint32 timestamp[2]; /**< Beacon Timestamp */
+ uint64 system_time;
+} wl_bcnrecv_result_t;
+
+typedef struct wl_bcnrecv_info {
+ uint bcnrecv_state; /* TO know the fakeap state */
+} wl_bcnrecv_info_t;
+
+typedef enum wl_bcnrecv_state {
+ BEACON_RECV_IDLE = 0,
+ BEACON_RECV_STARTED,
+ BEACON_RECV_STOPPED,
+ BEACON_RECV_SUSPENDED
+} wl_bcnrecv_state_t;
+
+typedef enum wl_bcnrecv_reason {
+ WL_BCNRECV_INVALID = 0,
+ WL_BCNRECV_USER_TRIGGER,
+ WL_BCNRECV_SUSPEND,
+ WL_BCNRECV_SCANBUSY,
+ WL_BCNRECV_CONCURRENCY,
+ WL_BCNRECV_LISTENBUSY,
+ WL_BCNRECV_ROAMABORT,
+ WL_BCNRECV_HANG
+} wl_bcnrecv_reason_t;
+
+typedef enum wl_bcnrecv_status {
+ WL_BCNRECV_STARTED = 0,
+ WL_BCNRECV_STOPPED,
+ WL_BCNRECV_ABORTED,
+ WL_BCNRECV_SUSPENDED,
+ WL_BCNRECV_MAX
+} wl_bcnrecv_status_t;
+
+typedef enum wl_bcnrecv_attr_type {
+ BCNRECV_ATTR_STATUS = 1,
+ BCNRECV_ATTR_REASON,
+ BCNRECV_ATTR_BCNINFO
+} wl_bcnrecv_attr_type_t;
+#endif /* WL_BCNRECV */
+#ifdef WL_CHAN_UTIL
+#define CU_ATTR_PERCENTAGE 1
+#define CU_ATTR_HDR_LEN 30
+#endif /* WL_CHAN_UTIL */
+
+/* association inform */
+#define MAX_REQ_LINE 1024u
+struct wl_connect_info {
+ u8 req_ie[MAX_REQ_LINE];
+ u32 req_ie_len;
+ u8 resp_ie[MAX_REQ_LINE];
+ u32 resp_ie_len;
+};
+#define WL_MAX_FILS_KEY_LEN 64
+
+struct wl_fils_info {
+ u8 fils_kek[WL_MAX_FILS_KEY_LEN];
+ u32 fils_kek_len;
+ u8 fils_pmk[WL_MAX_FILS_KEY_LEN];
+ u32 fils_pmk_len;
+ u8 fils_pmkid[WL_MAX_FILS_KEY_LEN];
+ u16 fils_erp_next_seq_num;
+ bool fils_roam_disabled;
+ u32 fils_bcn_timeout_cache;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+ const struct firmware *fw_entry;
+ unsigned long status;
+ u32 ptr;
+ s8 fw_name[WL_FILE_NAME_MAX];
+ s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+ u32 req_len;
+ u32 resp_len;
+};
+
+#define WL_EXTJOIN_VERSION_V1 1
+/* MIN branch version supporting join iovar versioning */
+#define MIN_JOINEXT_V1_FW_MAJOR 17u
+/* Branch/es supporting join iovar versioning prior to
+ * MIN_JOINEXT_V1_FW_MAJOR
+ */
+#define MIN_JOINEXT_V1_BR2_FW_MAJOR 16u
+#define MIN_JOINEXT_V1_BR2_FW_MINOR 1u
+
+#define MIN_JOINEXT_V1_BR1_FW_MAJOR 14u
+#define MIN_JOINEXT_V1_BR1_FW_MINOR 2u
+
+#define PMKDB_WLC_VER 14
+#define MIN_PMKID_LIST_V3_FW_MAJOR 13
+#define MIN_PMKID_LIST_V3_FW_MINOR 0
+
+#define MIN_PMKID_LIST_V2_FW_MAJOR 12
+#define MIN_PMKID_LIST_V2_FW_MINOR 0
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+ pmkid_list_v3_t pmkids;
+ pmkid_v3_t foo[MAXPMKID];
+};
+
+#define KEY_PERM_PMK 0xFFFFFFFF
+
+#ifdef DHD_MAX_IFS
+#define WL_MAX_IFS DHD_MAX_IFS
+#else
+#define WL_MAX_IFS 16
+#endif
+
+#define MAC_RAND_BYTES 3
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+ u32 escan_state;
+#ifdef STATIC_WL_PRIV_STRUCT
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ u8 *escan_buf[2];
+#else
+ u8 *escan_buf;
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+#else
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ u8 escan_buf[2][ESCAN_BUF_SIZE];
+#else
+ u8 escan_buf[ESCAN_BUF_SIZE];
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+#endif /* STATIC_WL_PRIV_STRUCT */
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ u8 cur_sync_id;
+ u8 escan_type[2];
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+ struct wiphy *wiphy;
+ struct net_device *ndev;
+#ifdef DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH
+ bool prev_escan_aborted;
+#endif /* DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH */
+};
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#define BUF_OVERFLOW_MGMT_COUNT 3
+typedef struct {
+ int RSSI;
+ int length;
+ struct ether_addr BSSID;
+} removal_element_t;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+struct afx_hdl {
+ wl_af_params_t *pending_tx_act_frm;
+ struct ether_addr tx_dst_addr;
+ struct net_device *dev;
+ struct work_struct work;
+ s32 bssidx;
+ u32 retry;
+ s32 peer_chan;
+ s32 peer_listen_chan; /* search channel: configured by upper layer */
+ s32 my_listen_chan; /* listen chanel: extract it from prb req or gon req */
+ bool is_listen;
+ bool ack_recv;
+ bool is_active;
+};
+
+struct parsed_ies {
+ const wpa_ie_fixed_t *wps_ie;
+ u32 wps_ie_len;
+ const wpa_ie_fixed_t *wpa_ie;
+ u32 wpa_ie_len;
+ const bcm_tlv_t *wpa2_ie;
+ u32 wpa2_ie_len;
+ const bcm_tlv_t *fils_ind_ie;
+ u32 fils_ind_ie_len;
+};
+
+#ifdef WL_SDO
+/* Service discovery */
+typedef struct {
+ uint8 transaction_id; /* Transaction ID */
+ uint8 protocol; /* Service protocol type */
+ uint16 query_len; /* Length of query */
+ uint16 response_len; /* Length of response */
+ uint8 qrbuf[1];
+} wl_sd_qr_t;
+
+typedef struct {
+ uint16 period; /* extended listen period */
+ uint16 interval; /* extended listen interval */
+} wl_sd_listen_t;
+
+#define WL_SD_STATE_IDLE 0x0000
+#define WL_SD_SEARCH_SVC 0x0001
+#define WL_SD_ADV_SVC 0x0002
+
+enum wl_dd_state {
+ WL_DD_STATE_IDLE,
+ WL_DD_STATE_SEARCH,
+ WL_DD_STATE_LISTEN
+};
+
+#define MAX_SDO_PROTO_STR_LEN 20
+typedef struct wl_sdo_proto {
+ char str[MAX_SDO_PROTO_STR_LEN];
+ u32 val;
+} wl_sdo_proto_t;
+
+typedef struct sd_offload {
+ u32 sd_state;
+ enum wl_dd_state dd_state;
+ wl_sd_listen_t sd_listen;
+} sd_offload_t;
+
+typedef struct sdo_event {
+ u8 addr[ETH_ALEN];
+ uint16 freq; /* channel Freq */
+ uint8 count; /* Tlv count */
+ uint16 update_ind;
+} sdo_event_t;
+#endif /* WL_SDO */
+
+#ifdef P2P_LISTEN_OFFLOADING
+typedef struct {
+ uint16 period; /* listen offload period */
+ uint16 interval; /* listen offload interval */
+ uint16 count; /* listen offload count */
+ uint16 pad; /* pad for 32bit align */
+} wl_p2plo_listen_t;
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef WL11U
+/* Max length of Interworking element */
+#define IW_IES_MAX_BUF_LEN 8
+#endif
+#ifdef WLFBT
+#define FBT_KEYLEN 32
+#endif
+#define MAX_EVENT_BUF_NUM 16
+typedef struct wl_eventmsg_buf {
+ u16 num;
+ struct {
+ u16 type;
+ bool set;
+ } event [MAX_EVENT_BUF_NUM];
+} wl_eventmsg_buf_t;
+
+typedef struct wl_if_event_info {
+ bool valid;
+ int ifidx;
+ int bssidx;
+ uint8 mac[ETHER_ADDR_LEN];
+ char name[IFNAMSIZ+1];
+ uint8 role;
+} wl_if_event_info;
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+typedef struct ap_rps_info {
+ bool enable;
+ int sta_assoc_check;
+ int pps;
+ int quiet_time;
+ int level;
+} ap_rps_info_t;
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+#define RSSILOG_FLAG_FEATURE_SW 0x1
+#define RSSILOG_FLAG_REPORT_READY 0x2
+typedef struct rssilog_set_param {
+ uint8 enable;
+ uint8 rssi_threshold;
+ uint8 time_threshold;
+ uint8 pad;
+} rssilog_set_param_t;
+
+typedef struct rssilog_get_param {
+ uint8 report_count;
+ uint8 enable;
+ uint8 rssi_threshold;
+ uint8 time_threshold;
+} rssilog_get_param_t;
+
+typedef struct rssi_ant_param {
+ struct ether_addr ea;
+ chanspec_t chanspec;
+} rssi_ant_param_t;
+
+typedef struct wl_rssi_ant_mimo {
+ uint32 version;
+ uint32 count;
+ int8 rssi_ant[WL_RSSI_ANT_MAX];
+ int8 rssi_sum;
+ int8 PAD[3];
+} wl_rssi_ant_mimo_t;
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+/* MBO-OCE prune event reason codes */
+#if defined(WL_MBO) || defined(WL_OCE)
+typedef enum wl_prune_evt_reason {
+ WIFI_PRUNE_UNSPECIFIED = 0, /* Unspecified event reason code */
+ WIFI_PRUNE_ASSOC_RETRY_DELAY = 1, /* MBO assoc retry delay */
+ WIFI_PRUNE_RSSI_ASSOC_REJ = 2 /* OCE RSSI-based assoc rejection */
+} wl_prune_evt_reason_t;
+#endif /* WL_MBO || WL_OCE */
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define GET_BSS_INFO_LEN 90
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+#ifdef WL_MBO
+typedef struct wl_event_mbo wl_event_mbo_t;
+typedef struct wl_event_mbo_cell_nw_switch wl_event_mbo_cell_nw_switch_t;
+typedef struct wl_btm_event_type_data wl_btm_event_type_data_t;
+#endif /* WL_MBO */
+
+#if defined(WL_MBO) || defined(WL_OCE)
+typedef struct wl_bssid_prune_evt_info wl_bssid_pruned_evt_info_t;
+#endif /* WL_MBO || WL_OCE */
+
+#define WL_CCODE_LEN 2
+
+#ifdef WL_NAN
+#ifdef WL_NANP2P
+#define WL_CFG_P2P_DISC_BIT 0x1u
+#define WL_CFG_NAN_DISC_BIT 0x2u
+#define WL_NANP2P_CONC_SUPPORT (WL_CFG_P2P_DISC_BIT | WL_CFG_NAN_DISC_BIT)
+#endif /* WL_NAN2P */
+#endif /* WL_NAN */
+
+#ifdef WL_IFACE_MGMT
+#define WL_IFACE_NOT_PRESENT -1
+
+typedef enum iface_conc_policy {
+ WL_IF_POLICY_DEFAULT = 0,
+ WL_IF_POLICY_FCFS = 1,
+ WL_IF_POLICY_LP = 2,
+ WL_IF_POLICY_ROLE_PRIORITY = 3,
+ WL_IF_POLICY_CUSTOM = 4,
+ WL_IF_POLICY_INVALID
+} iface_conc_policy_t;
+
+typedef struct iface_mgmt_data {
+ uint8 policy;
+ uint8 priority[WL_IF_TYPE_MAX];
+} iface_mgmt_data_t;
+#endif /* WL_IFACE_MGMT */
+
+#ifdef WL_WPS_SYNC
+#define EAP_PACKET 0
+#define EAP_EXPANDED_TYPE 254
+#define EAP_EXP_OPCODE_OFFSET 7
+#define EAP_EXP_FRAGMENT_LEN_OFFSET 2
+#define EAP_EXP_FLAGS_FRAGMENTED_DATA 2
+#define EAP_EXP_FLAGS_MORE_DATA 1
+#define EAPOL_EAP_HDR_LEN 5
+#define EAP_EXP_HDR_MIN_LENGTH (EAPOL_EAP_HDR_LEN + EAP_EXP_OPCODE_OFFSET)
+#define EAP_ATTRIB_MSGTYPE 0x1022
+#define EAP_WSC_UPNP 0
+#define EAP_WSC_START 1
+#define EAP_WSC_ACK 2
+#define EAP_WSC_NACK 3
+#define EAP_WSC_MSG 4
+#define EAP_WSC_DONE 5
+#define EAP_WSC_MSG_M8 12
+#define EAP_CODE_FAILURE 4
+#define WL_WPS_REAUTH_TIMEOUT 10000
+
+struct wl_eap_header {
+ unsigned char code; /* EAP code */
+ unsigned char id; /* Current request ID */
+ unsigned short length; /* Length including header */
+ unsigned char type; /* EAP type (optional) */
+ unsigned char data[1]; /* Type data (optional) */
+} __attribute__ ((packed));
+typedef struct wl_eap_header wl_eap_header_t;
+
+typedef enum wl_wps_state {
+ WPS_STATE_IDLE = 0,
+ WPS_STATE_STARTED,
+ WPS_STATE_M8_SENT,
+ WPS_STATE_M8_RECVD,
+ WPS_STATE_EAP_FAIL,
+ WPS_STATE_REAUTH_WAIT,
+ WPS_STATE_LINKUP,
+ WPS_STATE_LINKDOWN,
+ WPS_STATE_DISCONNECT,
+ WPS_STATE_DISCONNECT_CLIENT,
+ WPS_STATE_CONNECT_FAIL,
+ WPS_STATE_AUTHORIZE,
+ WPS_STATE_DONE,
+ WPS_STATE_INVALID
+} wl_wps_state_t;
+
+#define WPS_MAX_SESSIONS 2
+typedef struct wl_wps_session {
+ bool in_use;
+ timer_list_compat_t timer;
+ struct net_device *ndev;
+ wl_wps_state_t state;
+ u16 mode;
+ u8 peer_mac[ETHER_ADDR_LEN];
+} wl_wps_session_t;
+#endif /* WL_WPS_SYNC */
+
+#ifndef WL_STATIC_IFNAME_PREFIX
+#define WL_STATIC_IFNAME_PREFIX "wlan%d"
+#endif /* WL_STATIC_IFNAME */
+
+typedef struct buf_data {
+ u32 ver; /* version of struct */
+ u32 len; /* Total len */
+ /* size of each buffer in case of split buffers (0 - single buffer). */
+ u32 buf_threshold;
+ const void *data_buf[1]; /* array of user space buffer pointers. */
+} buf_data_t;
+
+typedef struct wl_loc_info {
+ bool in_progress; /* for tracking listen in progress */
+ struct delayed_work work; /* for taking care of listen timeout */
+ struct wireless_dev *wdev; /* interface on which listen is requested */
+} wl_loc_info_t;
+
+typedef enum wl_sar_modes {
+ HEAD_SAR_BACKOFF_DISABLE = -1,
+ HEAD_SAR_BACKOFF_ENABLE = 0,
+ GRIP_SAR_BACKOFF_DISABLE,
+ GRIP_SAR_BACKOFF_ENABLE,
+ NR_mmWave_SAR_BACKOFF_DISABLE,
+ NR_mmWave_SAR_BACKOFF_ENABLE,
+ NR_Sub6_SAR_BACKOFF_DISABLE,
+ NR_Sub6_SAR_BACKOFF_ENABLE,
+ SAR_BACKOFF_DISABLE_ALL
+} wl_sar_modes_t;
+
+/* Pre selected Power scenarios to be applied from BDF file */
+typedef enum {
+ WIFI_POWER_SCENARIO_INVALID = -2,
+ WIFI_POWER_SCENARIO_DEFAULT = -1,
+ WIFI_POWER_SCENARIO_VOICE_CALL = 0,
+ WIFI_POWER_SCENARIO_ON_HEAD_CELL_OFF = 1,
+ WIFI_POWER_SCENARIO_ON_HEAD_CELL_ON = 2,
+ WIFI_POWER_SCENARIO_ON_BODY_CELL_OFF = 3,
+ WIFI_POWER_SCENARIO_ON_BODY_CELL_ON = 4,
+ WIFI_POWER_SCENARIO_ON_BODY_BT = 5
+} wifi_power_scenario;
+
+/* Log timestamp */
+#define LOG_TS(cfg, ts) cfg->tsinfo.ts = OSL_LOCALTIME_NS();
+#define CLR_TS(cfg, ts) cfg->tsinfo.ts = 0;
+#define GET_TS(cfg, ts) cfg->tsinfo.ts;
+typedef struct wl_ctx_tsinfo {
+ uint64 scan_start;
+ uint64 scan_enq; /* scan event enqueue time */
+ uint64 scan_deq;
+ uint64 scan_hdlr_cmplt;
+ uint64 scan_cmplt; /* scan event handler completion */
+ uint64 conn_start;
+ uint64 conn_cmplt;
+ uint64 wl_evt_deq;
+ uint64 authorize_start;
+ uint64 authorize_cmplt;
+ uint64 wl_evt_hdlr_entry;
+ uint64 wl_evt_hdlr_exit;
+} wl_ctx_tsinfo_t;
+
+typedef struct wlcfg_assoc_info {
+ bool targeted_join; /* Unicast bssid. Host selected bssid for join */
+ bool reassoc;
+ bool bssid_hint;
+ u8 bssid[ETH_ALEN];
+ u16 ssid_len;
+ u8 ssid[DOT11_MAX_SSID_LEN];
+ s32 bssidx;
+ u32 chan_cnt;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL];
+} wlcfg_assoc_info_t;
+
+#define MAX_NUM_OF_ASSOCIATED_DEV 64
+struct bcm_assoclist {
+ u32 count;
+ u8 mac[MAX_NUM_OF_ASSOCIATED_DEV][ETH_ALEN];
+};
+
+typedef struct wl_event_idx {
+ u32 enqd;
+ u32 in_progress;
+ u32 event_type;
+ u32 min_connect_idx;
+} wl_event_idx_t;
+
+/* private data of cfg80211 interface */
+struct bcm_cfg80211 {
+ struct wireless_dev *wdev; /* representing cfg cfg80211 device */
+
+ struct wireless_dev *p2p_wdev; /* representing cfg cfg80211 device for P2P */
+ struct net_device *p2p_net; /* reference to p2p0 interface */
+
+ struct wl_conf *conf;
+ struct cfg80211_scan_request *scan_request; /* scan request object */
+ EVENT_HANDLER evt_handler[WLC_E_LAST];
+ struct list_head eq_list; /* used for event queue */
+ struct list_head net_list; /* used for struct net_info */
+ spinlock_t net_list_sync; /* to protect scan status (and others if needed) */
+ spinlock_t eq_lock; /* for event queue synchronization */
+ spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */
+ struct completion act_frm_scan;
+ struct completion iface_disable;
+ struct completion wait_next_af;
+ struct mutex usr_sync; /* maily for up/down synchronization */
+ struct mutex if_sync; /* maily for iface op synchronization */
+ struct mutex scan_sync; /* scan sync from different scan contexts */
+ wl_scan_results_t *bss_list;
+ wl_scan_results_t *scan_results;
+
+ /* scan request object for internal purpose */
+ struct wl_scan_req *scan_req_int;
+ /* information element object for internal purpose */
+#if defined(STATIC_WL_PRIV_STRUCT)
+ struct wl_ie *ie;
+#else
+ struct wl_ie ie;
+#endif
+
+ /* association information container */
+#if defined(STATIC_WL_PRIV_STRUCT)
+ struct wl_connect_info *conn_info;
+#else
+ struct wl_connect_info conn_info;
+#endif
+ struct wl_pmk_list *pmk_list; /* wpa2 pmk list */
+ tsk_ctl_t event_tsk; /* task of main event handler thread */
+ dhd_pub_t *pub;
+ u32 iface_cnt;
+ u32 channel; /* current channel */
+ u32 af_sent_channel; /* channel action frame is sent */
+ /* next af subtype to cancel the remained dwell time in rx process */
+ u8 next_af_subtype;
+#ifdef WL_CFG80211_SYNC_GON
+ ulong af_tx_sent_jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+ struct escan_info escan_info; /* escan information */
+ bool active_scan; /* current scan mode */
+ bool ibss_starter; /* indicates this sta is ibss starter */
+ bool link_up; /* link/connection up flag */
+
+ /* indicate whether chip to support power save mode */
+ bool pwr_save;
+ bool roam_on; /* on/off switch for self-roaming */
+ bool scan_tried; /* indicates if first scan attempted */
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ bool wlfc_on;
+#endif
+ bool vsdb_mode;
+#define WL_ROAM_OFF_ON_CONCURRENT 0x0001
+#define WL_ROAM_REVERT_STATUS 0x0002
+ u32 roam_flags;
+ u8 *ioctl_buf; /* ioctl buffer */
+ struct mutex ioctl_buf_sync;
+ u8 *escan_ioctl_buf;
+ u8 *extra_buf; /* maily to grab assoc information */
+ struct dentry *debugfsdir;
+ struct rfkill *rfkill;
+ bool rf_blocked;
+ struct ieee80211_channel remain_on_chan;
+ enum nl80211_channel_type remain_on_chan_type;
+ u64 send_action_id;
+ u64 last_roc_id;
+ wait_queue_head_t netif_change_event;
+ wl_if_event_info if_event_info;
+ struct completion send_af_done;
+ struct afx_hdl *afx_hdl;
+ struct p2p_info *p2p;
+ bool p2p_supported;
+ void *btcoex_info;
+ timer_list_compat_t scan_timeout; /* Timer for catch scan event timeout */
+#ifdef WL_CFG80211_GON_COLLISION
+ u8 block_gon_req_tx_count;
+ u8 block_gon_req_rx_count;
+#endif /* WL_CFG80211_GON_COLLISION */
+#if defined(P2P_IE_MISSING_FIX)
+ bool p2p_prb_noti;
+#endif
+ s32(*state_notifier) (struct bcm_cfg80211 *cfg,
+ struct net_info *_net_info, enum wl_status state, bool set);
+ unsigned long interrested_state;
+ wlc_ssid_t hostapd_ssid;
+#ifdef WL_SDO
+ sd_offload_t *sdo;
+#endif
+#ifdef WL11U
+ bool wl11u;
+#endif /* WL11U */
+ bool sched_scan_running; /* scheduled scan req status */
+ struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */
+#ifdef WL_HOST_BAND_MGMT
+ u8 curr_band;
+#endif /* WL_HOST_BAND_MGMT */
+ bool scan_suppressed;
+
+#ifdef OEM_ANDROID
+ timer_list_compat_t scan_supp_timer;
+ struct work_struct wlan_work;
+#endif /* OEM_ANDROID */
+
+ struct mutex event_sync; /* maily for up/down synchronization */
+ bool disable_roam_event;
+ struct delayed_work pm_enable_work;
+
+#ifdef OEM_ANDROID
+ struct workqueue_struct *event_workq; /* workqueue for event */
+#endif /* OEM_ANDROID */
+
+#ifndef OEM_ANDROID
+ bool event_workq_init;
+#endif /* OEM_ANDROID */
+ struct work_struct event_work; /* work item for event */
+ struct mutex pm_sync; /* mainly for pm work synchronization */
+
+ vndr_ie_setbuf_t *ibss_vsie; /* keep the VSIE for IBSS */
+ int ibss_vsie_len;
+#ifdef WLAIBSS
+ u32 aibss_txfail_pid;
+ u32 aibss_txfail_seq;
+#endif /* WLAIBSS */
+#ifdef WL_RELMCAST
+ u32 rmc_event_pid;
+ u32 rmc_event_seq;
+#endif /* WL_RELMCAST */
+#ifdef WLAIBSS_MCHAN
+ struct ether_addr ibss_if_addr;
+ bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */
+#endif /* WLAIBSS_MCHAN */
+ bool bss_pending_op; /* indicate where there is a pending IF operation */
+#ifdef WLFBT
+ uint8 fbt_key[FBT_KEYLEN];
+#endif
+ int roam_offload;
+#ifdef WL_NAN
+ wl_nancfg_t *nancfg;
+#ifdef WL_NANP2P
+ uint8 conc_disc;
+ bool nan_p2p_supported;
+#endif /* WL_NANP2P */
+#endif /* WL_NAN */
+#ifdef WL_IFACE_MGMT
+ iface_mgmt_data_t iface_data;
+#endif /* WL_IFACE_MGMT */
+#ifdef P2PLISTEN_AP_SAMECHN
+ bool p2p_resp_apchn_status;
+#endif /* P2PLISTEN_AP_SAMECHN */
+ struct wl_wsec_key wep_key;
+#ifdef WLTDLS
+ u8 *tdls_mgmt_frame;
+ u32 tdls_mgmt_frame_len;
+ s32 tdls_mgmt_freq;
+#endif /* WLTDLS */
+ bool need_wait_afrx;
+#ifdef QOS_MAP_SET
+ uint8 *up_table; /* user priority table, size is UP_TABLE_MAX */
+#endif /* QOS_MAP_SET */
+ struct ether_addr last_roamed_addr;
+ bool rcc_enabled; /* flag for Roam channel cache feature */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ char bss_info[GET_BSS_INFO_LEN];
+ wl_event_msg_t event_auth_assoc;
+ u32 assoc_reject_status;
+ u32 roam_count;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ u16 ap_oper_channel;
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ bool random_mac_enabled;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef DHD_LOSSLESS_ROAMING
+ timer_list_compat_t roam_timeout; /* Timer for catch roam timeout */
+#endif
+#ifndef DUAL_ESCAN_RESULT_BUFFER
+ uint16 escan_sync_id_cntr;
+#endif
+#ifdef WLTDLS
+ uint8 tdls_supported;
+ struct mutex tdls_sync; /* protect tdls config operations */
+#endif /* WLTDLS */
+#ifdef MFP
+ const uint8 *bip_pos;
+ int mfp_mode;
+#endif /* MFP */
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ int custom_scan_channel_time;
+ int custom_scan_unassoc_time;
+ int custom_scan_passive_time;
+ int custom_scan_home_time;
+ int custom_scan_home_away_time;
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+ uint8 vif_count; /* Virtual Interface count */
+#ifdef WBTEXT
+ struct list_head wbtext_bssid_list;
+#endif /* WBTEXT */
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+ ap_rps_info_t ap_rps_info;
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+ u16 vif_macaddr_mask;
+ osl_t *osh;
+ struct list_head vndr_oui_list;
+ spinlock_t vndr_oui_sync; /* to protect vndr_oui_list */
+ bool rssi_sum_report;
+ int rssi; /* previous RSSI (backup) of get_station */
+#ifdef WL_WPS_SYNC
+ wl_wps_session_t wps_session[WPS_MAX_SESSIONS];
+ spinlock_t wps_sync; /* to protect wps states (and others if needed) */
+#endif /* WL_WPS_SYNC */
+ struct wl_fils_info fils_info;
+#ifdef WL_BAM
+ wl_bad_ap_mngr_t bad_ap_mngr;
+#endif /* WL_BAM */
+
+#ifdef BIGDATA_SOFTAP
+ struct wl_ap_sta_info *ap_sta_info;
+#endif /* BIGDATA_SOFTAP */
+
+ uint8 scanmac_enabled;
+ bool scanmac_config;
+#ifdef WL_BCNRECV
+ /* structure used for fake ap detection info */
+ struct mutex bcn_sync; /* mainly for bcn resume/suspend synchronization */
+ wl_bcnrecv_info_t bcnrecv_info;
+#endif /* WL_BCNRECV */
+ struct net_device *static_ndev[DHD_MAX_STATIC_IFS];
+ uint8 static_ndev_state[DHD_MAX_STATIC_IFS];
+ bool hal_started;
+ wl_wlc_version_t wlc_ver;
+ bool scan_params_v2;
+#ifdef SUPPORT_AP_BWCTRL
+ u32 bw_cap_5g;
+#endif /* SUPPORT_AP_BWCTRL */
+#ifdef WL_6G_BAND
+ bool band_6g_supported;
+#endif /* WL_6G_BAND */
+ wl_loc_info_t loc; /* listen on channel state info */
+ int roamscan_mode;
+ int wes_mode;
+ int ncho_mode;
+ int ncho_band;
+#ifdef WL_SAR_TX_POWER
+ wifi_power_scenario wifi_tx_power_mode;
+#endif /* WL_SAR_TX_POWER */
+ struct mutex connect_sync; /* For assoc/resssoc state sync */
+ wl_ctx_tsinfo_t tsinfo;
+ struct wl_pmk_list *spmk_info_list; /* single pmk info list */
+ struct bcm_assoclist assoclist;
+ chanspec_t acs_chspec; /* Selected chanspec in case of ACS */
+ u32 join_iovar_ver;
+ struct delayed_work ap_work; /* AP linkup timeout handler */
+ wl_event_idx_t eidx; /* event state tracker */
+#ifdef WL_P2P_6G
+ bool p2p_6g_enabled; /* P2P 6G support enabled */
+#endif /* WL_P2P_6G */
+ u32 halpid;
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
+ wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
+#endif
+#if defined(BSSCACHE)
+ wl_bss_cache_ctrl_t g_bss_cache_ctrl;
+#endif
+ int autochannel;
+ int best_2g_ch;
+ int best_5g_ch;
+};
+
+/* Max auth timeout allowed in case of EAP is 70sec, additional 5 sec for
+* inter-layer overheads
+*/
+#define WL_DS_SKIP_THRESHOLD_USECS (75000L * 1000L)
+
+enum wl_state_type {
+ WL_STATE_IDLE,
+ WL_STATE_SCANNING,
+ WL_STATE_CONNECTING,
+ WL_STATE_LISTEN,
+ WL_STATE_AUTHORIZING /* Assocated to authorized */
+};
+
+#define WL_STATIC_IFIDX (DHD_MAX_IFS)
+enum static_ndev_states {
+ NDEV_STATE_NONE,
+ NDEV_STATE_OS_IF_CREATED,
+ NDEV_STATE_FW_IF_CREATED,
+ NDEV_STATE_FW_IF_FAILED,
+ NDEV_STATE_FW_IF_DELETED
+};
+#ifdef WL_STATIC_IF
+bool wl_cfg80211_static_if(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+int wl_cfg80211_static_ifidx(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+struct net_device *wl_cfg80211_static_if_active(struct bcm_cfg80211 *cfg);
+int wl_cfg80211_static_if_name(struct bcm_cfg80211 *cfg, const char *name);
+void wl_cfg80211_static_if_dev_close(struct net_device *dev);
+#endif /* WL_STATIC_IF */
+
+#ifdef WL_SAE
+typedef struct wl_sae_key_info {
+ uint8 peer_mac[ETHER_ADDR_LEN];
+ uint16 pmk_len;
+ uint16 pmkid_len;
+ const uint8 *pmk;
+ const uint8 *pmkid;
+} wl_sae_key_info_t;
+#endif /* WL_SAE */
+
+typedef enum wl_concurrency_mode {
+ CONCURRENCY_MODE_NONE = 0,
+ CONCURRENCY_SCC_MODE,
+ CONCURRENCY_VSDB_MODE,
+ CONCURRENCY_RSDB_MODE
+} wl_concurrency_mode_t;
+
+typedef struct wl_wips_event_info {
+ uint32 timestamp;
+ struct ether_addr bssid;
+ uint16 misdeauth;
+ int16 current_RSSI;
+ int16 deauth_RSSI;
+} wl_wips_event_info_t;
+
+s32 wl_iftype_to_mode(wl_iftype_t iftype);
+
+#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
+ list_for_each_entry_safe((pos), (next), (head), member)
+extern int ioctl_version;
+
+static inline wl_bss_info_t
+*next_bss(wl_scan_results_t *list, wl_bss_info_t *bss)
+{
+ return bss = bss ?
+ (wl_bss_info_t *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+
+static inline void
+wl_probe_wdev_all(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ int idx = 0;
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
+ &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ WL_INFORM_MEM(("wl_probe_wdev_all: net_list[%d] bssidx: %d\n",
+ idx++, _net_info->bssidx));
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return;
+}
+
+static inline struct net_info *
+wl_get_netinfo_by_fw_idx(struct bcm_cfg80211 *cfg, s32 bssidx, u8 ifidx)
+{
+ struct net_info *_net_info, *next, *info = NULL;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if ((bssidx >= 0) && (_net_info->bssidx == bssidx) &&
+ (_net_info->ifidx == ifidx)) {
+ info = _net_info;
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return info;
+}
+
+static inline void
+wl_dealloc_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+#ifdef DHD_IFDEBUG
+ WL_INFORM_MEM(("dealloc_netinfo enter wdev=%p \n", OSL_OBFUSCATE_BUF(wdev)));
+#endif
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (wdev && (_net_info->wdev == wdev)) {
+ wl_cfgbss_t *bss = &_net_info->bss;
+
+ if (bss->wpa_ie) {
+ MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->wpa_ie = NULL;
+ }
+
+ if (bss->rsn_ie) {
+ MFREE(cfg->osh, bss->rsn_ie,
+ bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->rsn_ie = NULL;
+ }
+
+ if (bss->wps_ie) {
+ MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
+ bss->wps_ie = NULL;
+ }
+ list_del(&_net_info->list);
+ cfg->iface_cnt--;
+ MFREE(cfg->osh, _net_info, sizeof(struct net_info));
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+#ifdef DHD_IFDEBUG
+ WL_INFORM_MEM(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
+}
+
+static inline s32
+wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct wireless_dev * wdev, wl_iftype_t iftype, bool pm_block, u8 bssidx, u8 ifidx)
+{
+ struct net_info *_net_info;
+ s32 err = 0;
+ unsigned long int flags;
+#ifdef DHD_IFDEBUG
+ WL_INFORM_MEM(("alloc_netinfo enter bssidx=%d wdev=%p\n",
+ bssidx, OSL_OBFUSCATE_BUF(wdev)));
+#endif
+ /* Check whether there is any duplicate entry for the
+ * same bssidx && ifidx.
+ */
+ if ((_net_info = wl_get_netinfo_by_fw_idx(cfg, bssidx, ifidx))) {
+ /* We have a duplicate entry for the same bssidx
+ * already present which shouldn't have been the case.
+ * Attempt recovery.
+ */
+ WL_ERR(("Duplicate entry for bssidx=%d ifidx=%d present."
+ " Can't add new entry\n", bssidx, ifidx));
+ wl_probe_wdev_all(cfg);
+#ifdef DHD_DEBUG
+ ASSERT(0);
+#endif /* DHD_DEBUG */
+ return -EINVAL;
+ }
+ if (cfg->iface_cnt == IFACE_MAX_CNT)
+ return -ENOMEM;
+ _net_info = (struct net_info *)MALLOCZ(cfg->osh, sizeof(struct net_info));
+ if (!_net_info)
+ err = -ENOMEM;
+ else {
+ _net_info->iftype = iftype;
+ _net_info->ndev = ndev;
+ _net_info->wdev = wdev;
+ _net_info->pm_restore = 0;
+ _net_info->pm = 0;
+ _net_info->pm_block = pm_block;
+ _net_info->roam_off = WL_INVALID;
+ _net_info->bssidx = bssidx;
+ _net_info->ifidx = ifidx;
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ cfg->iface_cnt++;
+ list_add(&_net_info->list, &cfg->net_list);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ }
+#ifdef DHD_IFDEBUG
+ WL_DBG(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
+ return err;
+}
+
+static inline void
+wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ wl_cfgbss_t *bss = &_net_info->bss;
+ GCC_DIAGNOSTIC_POP();
+
+ if (bss->wpa_ie) {
+ MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->wpa_ie = NULL;
+ }
+
+ if (bss->rsn_ie) {
+ MFREE(cfg->osh, bss->rsn_ie, bss->rsn_ie[1]
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->rsn_ie = NULL;
+ }
+
+ if (bss->wps_ie) {
+ MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
+ bss->wps_ie = NULL;
+ }
+
+ if (bss->fils_ind_ie) {
+ MFREE(cfg->osh, bss->fils_ind_ie, bss->fils_ind_ie[1]
+ + FILS_INDICATION_IE_TAG_FIXED_LEN);
+ bss->fils_ind_ie = NULL;
+ }
+ list_del(&_net_info->list);
+ if (_net_info->wdev) {
+ MFREE(cfg->osh, _net_info->wdev, sizeof(struct wireless_dev));
+ }
+ MFREE(cfg->osh, _net_info, sizeof(struct net_info));
+ }
+ cfg->iface_cnt = 0;
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+}
+static inline u32
+wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
+
+{
+ struct net_info *_net_info, *next;
+ u32 cnt = 0;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (_net_info->ndev &&
+ test_bit(status, &_net_info->sme_state))
+ cnt++;
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return cnt;
+}
+static inline void
+wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ switch (op) {
+ case 1:
+ break; /* set all status is not allowed */
+ case 2:
+ /*
+ * Release the spinlock before calling notifier. Else there
+ * will be nested calls
+ */
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ clear_bit(status, &_net_info->sme_state);
+ if (cfg->state_notifier &&
+ test_bit(status, &(cfg->interrested_state)))
+ cfg->state_notifier(cfg, _net_info, status, false);
+ return;
+ case 4:
+ break; /* change all status is not allowed */
+ default:
+ break; /* unknown operation */
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+}
+static inline void
+wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+ struct net_device *ndev, u32 op)
+{
+
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ if (status >= BITS_PER_LONG) {
+ /* max value for shift operation is
+ * (BITS_PER_LONG -1) for unsigned long.
+ * if status crosses BIT_PER_LONG, the variable
+ * sme_state should be correspondingly updated.
+ */
+ ASSERT(0);
+ return;
+ }
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ GCC_DIAGNOSTIC_POP();
+ switch (op) {
+ case 1:
+ /*
+ * Release the spinlock before calling notifier. Else there
+ * will be nested calls
+ */
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ set_bit(status, &_net_info->sme_state);
+ if (cfg->state_notifier &&
+ test_bit(status, &(cfg->interrested_state)))
+ cfg->state_notifier(cfg, _net_info, status, true);
+ return;
+ case 2:
+ /*
+ * Release the spinlock before calling notifier. Else there
+ * will be nested calls
+ */
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ clear_bit(status, &_net_info->sme_state);
+ if (cfg->state_notifier &&
+ test_bit(status, &(cfg->interrested_state)))
+ cfg->state_notifier(cfg, _net_info, status, false);
+ return;
+ case 4:
+ change_bit(status, &_net_info->sme_state);
+ break;
+ }
+ }
+
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+
+}
+
+static inline wl_cfgbss_t *
+wl_get_cfgbss_by_wdev(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next;
+ wl_cfgbss_t *bss = NULL;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (wdev && (_net_info->wdev == wdev)) {
+ bss = &_net_info->bss;
+ break;
+ }
+ }
+
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return bss;
+}
+
+static inline u32
+wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+ struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+ u32 stat = 0;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ stat = test_bit(status, &_net_info->sme_state);
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return stat;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+ s32 mode = -1;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (_net_info->ndev && (_net_info->ndev == ndev)) {
+ mode = wl_iftype_to_mode(_net_info->iftype);
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return mode;
+}
+
+static inline s32
+wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next;
+ s32 bssidx = -1;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (_net_info->wdev && (_net_info->wdev == wdev)) {
+ bssidx = _net_info->bssidx;
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return bssidx;
+}
+
+static inline struct wireless_dev *
+wl_get_wdev_by_fw_idx(struct bcm_cfg80211 *cfg, s32 bssidx, s32 ifidx)
+{
+ struct net_info *_net_info, *next;
+ struct wireless_dev *wdev = NULL;
+ unsigned long int flags;
+
+ if (bssidx < 0)
+ return NULL;
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if ((_net_info->bssidx == bssidx) && (_net_info->ifidx == ifidx)) {
+ wdev = _net_info->wdev;
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return wdev;
+}
+
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+ struct wl_profile *prof = NULL;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ prof = &_net_info->profile;
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return prof;
+}
+static inline struct net_info *
+wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next, *info = NULL;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (ndev && (_net_info->ndev == ndev)) {
+ info = _net_info;
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return info;
+}
+
+static inline struct net_info *
+wl_get_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next, *info = NULL;
+ unsigned long int flags;
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (wdev && (_net_info->wdev == wdev)) {
+ info = _net_info;
+ break;
+ }
+ }
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ return info;
+}
+
+static inline char *
+wl_iftype_to_str(int wl_iftype)
+{
+ switch (wl_iftype) {
+ case (WL_IF_TYPE_STA):
+ return "WL_IF_TYPE_STA";
+ case (WL_IF_TYPE_AP):
+ return "WL_IF_TYPE_AP";
+
+#ifdef WLAWDL
+ case (WL_IF_TYPE_AWDL):
+ return "WL_IF_TYPE_AWDL";
+#endif /* WLAWDL */
+
+ case (WL_IF_TYPE_NAN_NMI):
+ return "WL_IF_TYPE_NAN_NMI";
+ case (WL_IF_TYPE_NAN):
+ return "WL_IF_TYPE_NAN";
+ case (WL_IF_TYPE_P2P_GO):
+ return "WL_IF_TYPE_P2P_GO";
+ case (WL_IF_TYPE_P2P_GC):
+ return "WL_IF_TYPE_P2P_GC";
+ case (WL_IF_TYPE_P2P_DISC):
+ return "WL_IF_TYPE_P2P_DISC";
+ case (WL_IF_TYPE_IBSS):
+ return "WL_IF_TYPE_IBSS";
+ case (WL_IF_TYPE_MONITOR):
+ return "WL_IF_TYPE_MONITOR";
+ case (WL_IF_TYPE_AIBSS):
+ return "WL_IF_TYPE_AIBSS";
+ default:
+ return "WL_IF_TYPE_UNKNOWN";
+ }
+}
+
+#define is_discovery_iface(iface) (((iface == WL_IF_TYPE_P2P_DISC) || \
+ (iface == WL_IF_TYPE_NAN_NMI)) ? 1 : 0)
+#define IS_P2P_GC(wdev) \
+ ((wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) ? 1 : 0)
+#define IS_P2P_GO(wdev) \
+ ((wdev->iftype == NL80211_IFTYPE_P2P_GO) ? 1 : 0)
+#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0)
+#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
+#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
+#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
+#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
+#define wdev_to_ndev(wdev) (wdev->netdev)
+
+#ifdef WL_BLOCK_P2P_SCAN_ON_STA
+#define IS_P2P_IFACE(wdev) (wdev && \
+ ((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) || \
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)))
+#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
+
+#define IS_PRIMARY_NDEV(cfg, ndev) (ndev == bcmcfg_to_prmry_ndev(cfg))
+#define IS_STA_IFACE(wdev) (wdev && \
+ (wdev->iftype == NL80211_IFTYPE_STATION))
+
+#define IS_AP_IFACE(wdev) (wdev && \
+ (wdev->iftype == NL80211_IFTYPE_AP))
+
+#if defined(WL_ENABLE_P2P_IF)
+#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \
+ bcmcfg_to_prmry_ndev(cfg) : ndev)
+#else
+#define ndev_to_wlc_ndev(ndev, cfg) (ndev)
+#endif /* WL_ENABLE_P2P_IF */
+
+#define wdev_to_wlc_ndev(wdev, cfg) \
+ (wdev_to_ndev(wdev) ? \
+ wdev_to_ndev(wdev) : bcmcfg_to_prmry_ndev(cfg))
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg) wdev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
+#elif defined(WL_ENABLE_P2P_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg) ndev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg)
+#else
+#define cfgdev_to_wlc_ndev(cfgdev, cfg) (cfgdev)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define cfgdev_to_wdev(cfgdev) (cfgdev)
+#define ndev_to_cfgdev(ndev) ndev_to_wdev(ndev)
+#define cfgdev_to_ndev(cfgdev) (cfgdev ? (cfgdev->netdev) : NULL)
+#define wdev_to_cfgdev(cfgdev) (cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
+#else
+#define cfgdev_to_wdev(cfgdev) (cfgdev->ieee80211_ptr)
+#define wdev_to_cfgdev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL
+#define ndev_to_cfgdev(ndev) (ndev)
+#define cfgdev_to_ndev(cfgdev) (cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \
+ (cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false)
+#elif defined(WL_ENABLE_P2P_IF)
+#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \
+ (cfg->scan_request->dev == cfg->p2p_net)) ? true : false)
+#else
+#define scan_req_match(cfg) (((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \
+ true : false)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#define PRINT_WDEV_INFO(cfgdev) \
+ { \
+ struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev); \
+ struct net_device *netdev = wdev ? wdev->netdev : NULL; \
+ WL_DBG(("wdev_ptr:%p ndev_ptr:%p ifname:%s iftype:%d\n", OSL_OBFUSCATE_BUF(wdev), \
+ OSL_OBFUSCATE_BUF(netdev), \
+ netdev ? netdev->name : "NULL (non-ndev device)", \
+ wdev ? wdev->iftype : 0xff)); \
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define scan_req_iftype(req) (req->dev->ieee80211_ptr->iftype)
+#else
+#define scan_req_iftype(req) (req->wdev->iftype)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */
+
+#define wl_to_sr(w) (w->scan_req_int)
+#if defined(STATIC_WL_PRIV_STRUCT)
+#define wl_to_ie(w) (w->ie)
+#define wl_to_conn(w) (w->conn_info)
+#else
+#define wl_to_ie(w) (&w->ie)
+#define wl_to_conn(w) (&w->conn_info)
+#endif
+#define wl_to_fils_info(w) (&w->fils_info)
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(cfg, stat) \
+ (wl_get_status_all(cfg, WL_STATUS_ ## stat))
+#define wl_get_drv_status(cfg, stat, ndev) \
+ (wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(cfg, stat, ndev) \
+ (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(cfg, stat, ndev) \
+ (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2))
+#define wl_clr_drv_status_all(cfg, stat) \
+ (wl_set_status_all(cfg, WL_STATUS_ ## stat, 2))
+#define wl_chg_drv_status(cfg, stat, ndev) \
+ (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i) \
+ for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(cfg, iter, next) \
+ list_for_each_entry_safe(iter, next, &cfg->net_list, list)
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+ ((wl_cfgp2p_find_wpsie(_sme->ie, _sme->ie_len) != NULL) && \
+ (!_sme->crypto.n_ciphers_pairwise) && \
+ (!_sme->crypto.cipher_group))
+
+#ifdef WLFBT
+#if defined(WLAN_AKM_SUITE_FT_8021X) && defined(WLAN_AKM_SUITE_FT_PSK)
+#define IS_AKM_SUITE_FT(sec) (sec->wpa_auth == WLAN_AKM_SUITE_FT_8021X || \
+ sec->wpa_auth == WLAN_AKM_SUITE_FT_PSK)
+#elif defined(WLAN_AKM_SUITE_FT_8021X)
+#define IS_AKM_SUITE_FT(sec) (sec->wpa_auth == WLAN_AKM_SUITE_FT_8021X)
+#elif defined(WLAN_AKM_SUITE_FT_PSK)
+#define IS_AKM_SUITE_FT(sec) (sec->wpa_auth == WLAN_AKM_SUITE_FT_PSK)
+#else
+#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
+#endif /* WLAN_AKM_SUITE_FT_8021X && WLAN_AKM_SUITE_FT_PSK */
+#else
+#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
+#endif /* WLFBT */
+
+#define IS_AKM_SUITE_CCKM(sec) ({BCM_REFERENCE(sec); FALSE;})
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info)
+#ifdef strnicmp
+#undef strnicmp
+#endif /* strnicmp */
+#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
+#else
+#define STA_INFO_BIT(info) (STATION_ ## info)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
+
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
+extern void wl_cfg80211_detach(struct bcm_cfg80211 *cfg);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+ void *data);
+extern s32 wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, const wl_event_msg_t * e);
+
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+struct bcm_cfg80211 *wl_cfg80211_get_bcmcfg(void);
+void wl_cfg80211_set_bcmcfg(struct bcm_cfg80211 *cfg);
+
+/* clear IEs */
+extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev);
+extern void wl_cfg80211_clear_p2p_disc_ies(struct bcm_cfg80211 *cfg);
+#ifdef WL_STATIC_IF
+extern int32 wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ int ifidx, uint8 *addr, int bssidx, char *name, int if_state);
+#endif /* WL_STATIC_IF */
+extern s32 wl_cfg80211_up(struct net_device *net);
+extern s32 wl_cfg80211_down(struct net_device *net);
+extern void wl_cfg80211_sta_ifdown(struct net_device *net);
+extern s32 wl_cfg80211_notify_ifadd(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+ uint8 bssidx, uint8 role);
+extern s32 wl_cfg80211_notify_ifdel(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+ uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+ uint8 bssidx);
+extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx,
+ const char *name, uint8 *mac, uint8 bssidx, const char *dngl_name);
+extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg,
+ int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
+extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
+ int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
+extern void wl_cfg80211_cleanup_if(struct net_device *dev);
+extern bool wl_cfg80211_is_concurrent_mode(struct net_device * dev);
+extern void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason);
+extern void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason);
+extern void* wl_cfg80211_get_dhdp(struct net_device * dev);
+extern bool wl_cfg80211_is_p2p_active(struct net_device * dev);
+extern bool wl_cfg80211_is_roam_offload(struct net_device * dev);
+extern bool wl_cfg80211_is_event_from_connected_bssid(struct net_device * dev,
+ const wl_event_msg_t *e, int ifidx);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len);
+#ifdef P2PLISTEN_AP_SAMECHN
+extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+/* btcoex functions */
+void* wl_cfg80211_btcoex_init(struct net_device *ndev);
+void wl_cfg80211_btcoex_deinit(void);
+
+extern chanspec_t wl_chspec_from_legacy(chanspec_t legacy_chspec);
+extern chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
+
+#ifdef WL_SDO
+extern s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_sd_offload(struct net_device *net, char *cmd, char* buf, int len);
+extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
+
+#endif
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CHANSPEC_BUF_SIZE 2048
+#define CHANINFO_LIST_BUF_SIZE (1024 * 4)
+#define CHAN_SEL_IOCTL_DELAY 300
+#define CHAN_SEL_RETRY_COUNT 15
+#define CHANNEL_IS_RADAR(channel) (((channel & WL_CHAN_RADAR) || \
+ (channel & WL_CHAN_PASSIVE)) ? true : false)
+#define CHANNEL_IS_2G(channel) (((channel >= 1) && (channel <= 14)) ? \
+ true : false)
+#define CHANNEL_IS_5G(channel) (((channel >= 36) && (channel <= 165)) ? \
+ true : false)
+extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
+ int total_len);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n);
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern bool wl_cfg80211_macaddr_sync_reqd(struct net_device *dev);
+void wl_cfg80211_generate_mac_addr(struct ether_addr *ea_addr);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(u32 level);
+extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern s32 wl_set_tx_power(struct net_device *dev,
+ enum nl80211_tx_power_setting type, s32 dbm);
+extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
+extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ u8 bsscfgidx);
+#ifdef WL_HOST_BAND_MGMT
+extern s32 wl_cfg80211_set_band(struct net_device *ndev, int band);
+#endif /* WL_HOST_BAND_MGMT */
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+extern int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress);
+#endif /* OEM_ANDROID */
+
+extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
+extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
+extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+extern void wl_terminate_event_handler(struct net_device *dev);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len);
+extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+extern struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev);
+extern s32 wl_cfg80211_set_if_band(struct net_device *ndev, int band);
+extern s32 wl_cfg80211_set_country_code(struct net_device *dev, char *country_code,
+ bool notify, bool user_enforced, int revinfo);
+extern bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg);
+#ifdef WL_WIPSEVT
+extern int wl_cfg80211_wips_event(uint16 misdeauth, char* bssid);
+extern int wl_cfg80211_wips_event_ext(wl_wips_event_info_t *wips_event);
+#endif /* WL_WIPSEVT */
+
+#define SCAN_BUF_CNT 2
+#define SCAN_BUF_NEXT 1
+#define WL_SCANTYPE_LEGACY 0x1
+#define WL_SCANTYPE_P2P 0x2
+extern void wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
+ int ibss_vsie_len);
+extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
+#ifdef WLAIBSS
+extern void wl_cfg80211_set_txfail_pid(struct net_device *dev, int pid);
+#endif /* WLAIBSS */
+#ifdef WL_RELMCAST
+extern void wl_cfg80211_set_rmc_pid(struct net_device *dev, int pid);
+#endif /* WL_RELMCAST */
+extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag,
+ const u8 *vndr_ie, u32 vndr_ie_len);
+
+#ifdef WLFBT
+extern int wl_cfg80211_get_fbt_key(struct net_device *dev, uint8 *key, int total_len);
+#endif
+
+/* Action frame specific functions */
+extern u8 wl_get_action_category(void *frame, u32 frame_len);
+extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_SUPPORT_ACS
+#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */
+#define IOCTL_RETRY_COUNT 5
+#define CHAN_NOISE_DUMMY -80
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+#endif /* WL_SUPPORT_ACS */
+
+#ifdef BCMWAPI_WPI
+#define is_wapi(cipher) (cipher == WLAN_CIPHER_SUITE_SMS4) ? 1 : 0
+#endif /* BCMWAPI_WPI */
+
+extern int wl_cfg80211_get_ioctl_version(void);
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable);
+#ifdef WBTEXT
+extern s32 wl_cfg80211_wbtext_set_default(struct net_device *ndev);
+extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+#endif /* WBTEXT */
+extern s32 wl_cfg80211_get_band_chanspecs(struct net_device *ndev,
+ void *buf, s32 buflen, chanspec_band_t band, bool acs_req);
+
+extern s32 wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx, s32 up);
+extern bool wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+struct net_device *wl_cfg80211_post_ifcreate(struct net_device *ndev,
+ wl_if_event_info *event, u8 *addr, const char *name, bool rtnl_lock_reqd);
+extern s32 wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx);
+#if defined(PKT_FILTER_SUPPORT) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+extern void wl_cfg80211_block_arp(struct net_device *dev, int enable);
+#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+
+#ifdef WLTDLS
+extern s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
+ enum wl_tdls_config state, bool tdls_mode);
+extern s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+
+#ifdef WL_NAN
+extern int wl_cfgvendor_send_nan_event(struct wiphy * wiphy,
+ struct net_device *dev, int event_id,
+ nan_event_data_t *nan_event_data);
+#ifdef RTT_SUPPORT
+extern s32 wl_cfgvendor_send_as_rtt_legacy_event(struct wiphy *wiphy,
+ struct net_device *dev, wl_nan_ev_rng_rpt_ind_t *range_res,
+ uint32 status);
+#endif /* RTT_SUPPORT */
+#ifdef WL_NANP2P
+extern int wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
+ uint8 arg_val);
+extern uint8 wl_cfg80211_get_iface_conc_disc(struct net_device *ndev);
+#endif /* WL_NANP2P */
+#endif /* WL_NAN */
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+extern void wl_cfg80211_del_p2p_wdev(struct net_device *dev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef WL_CFG80211_SYNC_GON
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+ (wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+ wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
+#else
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
+#endif /* WL_CFG80211_SYNC_GON */
+
+#ifdef P2P_LISTEN_OFFLOADING
+extern s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+/* Function to flush the FW log buffer content */
+extern void wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask);
+
+#define RETURN_EIO_IF_NOT_UP(wlpriv) \
+do { \
+ struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
+ if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \
+ WL_INFORM(("device is not ready\n")); \
+ return -EIO; \
+ } \
+} while (0)
+
+#ifdef QOS_MAP_SET
+extern uint8 *wl_get_up_table(dhd_pub_t * dhdp, int idx);
+#endif /* QOS_MAP_SET */
+
+#define P2PO_COOKIE 65535
+u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg);
+
+#define ROAMSCAN_MODE_NORMAL 0
+#define ROAMSCAN_MODE_WES 1
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+int wl_get_rssi_logging(struct net_device *dev, void *param);
+int wl_set_rssi_logging(struct net_device *dev, void *param);
+int wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param);
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+struct wireless_dev * wl_cfg80211_add_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ wl_iftype_t wl_iftype, const char *name, u8 *mac);
+extern s32 wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *name);
+s32 _wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname);
+s32 wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg, wl_iftype_t sec_data_if_type);
+
+#ifdef WL_STATIC_IF
+extern struct net_device *wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg,
+ u16 iftype, char *ifname, int static_ifidx);
+extern void wl_cfg80211_unregister_static_if(struct bcm_cfg80211 * cfg);
+extern s32 wl_cfg80211_static_if_open(struct net_device *net);
+extern s32 wl_cfg80211_static_if_close(struct net_device *net);
+extern struct net_device * wl_cfg80211_post_static_ifcreate(struct bcm_cfg80211 *cfg,
+ wl_if_event_info *event, u8 *addr, s32 iface_type, int static_ifidx);
+extern s32 wl_cfg80211_post_static_ifdel(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+#endif /* WL_STATIC_IF */
+extern struct wireless_dev *wl_cfg80211_get_wdev_from_ifname(struct bcm_cfg80211 *cfg,
+ const char *name);
+struct net_device* wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname);
+extern int wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats);
+extern s32 wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level);
+extern int wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 * cfg);
+extern int wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable);
+extern int wl_cfg80211_determine_p2p_rsdb_scc_mode(struct bcm_cfg80211 *cfg);
+extern uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg);
+#ifdef WL_WPS_SYNC
+void wl_handle_wps_states(struct net_device *ndev, u8 *dump_data, u16 len, bool direction);
+#endif /* WL_WPS_SYNC */
+extern int wl_features_set(u8 *array, uint8 len, u32 ftidx);
+extern void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+extern s32 wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data);
+#ifdef CUSTOMER_HW4_DEBUG
+extern void wl_scan_timeout_dbg_clear(void);
+#endif /* CUSTOMER_HW4_DEBUG */
+extern s32 cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode);
+extern s32 wl_cfg80211_net_attach(struct net_device *primary_ndev);
+extern void wl_print_verinfo(struct bcm_cfg80211 *cfg);
+extern const u8 *wl_find_attribute(const u8 *buf, u16 len, u16 element_id);
+extern int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable);
+#ifdef PCIE_INB_DW
+bool wl_cfg80211_check_in_progress(struct net_device *dev);
+#endif
+#ifdef WES_SUPPORT
+extern int wl_android_set_ncho_mode(struct net_device *dev, int mode);
+#endif /* WES_SUPPORT */
+#ifdef KEEP_ALIVE
+extern int wl_cfg80211_start_mkeep_alive(struct bcm_cfg80211 *cfg, uint8 mkeep_alive_id,
+ uint16 ether_type, uint8 *ip_pkt, uint16 ip_pkt_len, uint8* src_mac_addr,
+ uint8* dst_mac_addr, uint32 period_msec);
+extern int wl_cfg80211_stop_mkeep_alive(struct bcm_cfg80211 *cfg, uint8 mkeep_alive_id);
+#endif /* KEEP_ALIVE */
+
+extern s32 wl_cfg80211_handle_macaddr_change(struct net_device *dev, u8 *macaddr);
+extern int wl_cfg80211_handle_hang_event(struct net_device *ndev,
+ uint16 hang_reason, uint32 memdump_type);
+#ifndef OEM_ANDROID
+extern s32 wl_cfg80211_resume(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_suspend(struct bcm_cfg80211 *cfg);
+#endif /* !OEM_ANDROID */
+bool wl_cfg80211_is_dpp_frame(void *frame, u32 frame_len);
+const char *get_dpp_pa_ftype(enum wl_dpp_ftype ftype);
+bool wl_cfg80211_is_dpp_gas_action(void *frame, u32 frame_len);
+extern bool wl_cfg80211_find_gas_subtype(u8 subtype, u16 adv_id, u8* data, u32 len);
+#ifdef ESCAN_CHANNEL_CACHE
+extern void update_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver);
+#endif /* ESCAN_CHANNEL_CACHE */
+
+#ifdef WL_NAN
+extern int wl_cfgnan_get_stats(struct bcm_cfg80211 *cfg);
+#endif /* WL_NAN */
+
+#ifdef WL_SAE
+extern s32 wl_cfg80211_set_wsec_info(struct net_device *dev, uint32 *data,
+ uint16 data_len, int tag);
+#endif /* WL_SAE */
+#define WL_CHANNEL_ARRAY_INIT(band_chan_arr) \
+do { \
+ u32 arr_size, k; \
+ arr_size = ARRAYSIZE(band_chan_arr); \
+ for (k = 0; k < arr_size; k++) { \
+ band_chan_arr[k].flags = IEEE80211_CHAN_DISABLED; \
+ } \
+} while (0)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#define CFG80211_PUT_BSS(wiphy, bss) cfg80211_put_bss(wiphy, bss);
+#else
+#define CFG80211_PUT_BSS(wiphy, bss) cfg80211_put_bss(bss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+#ifdef RSSI_OFFSET
+static inline s32 wl_rssi_offset(s32 rssi)
+{
+ rssi += RSSI_OFFSET;
+ if (rssi > 0)
+ rssi = 0;
+ return rssi;
+}
+#else
+#define wl_rssi_offset(x) x
+#endif
+extern int wl_channel_to_frequency(u32 chan, chanspec_band_t band);
+extern int wl_cfg80211_config_rsnxe_ie(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ const u8 *parse, u32 len);
+extern bool dhd_force_country_change(struct net_device *dev);
+extern u32 wl_dbg_level;
+extern u32 wl_cfg80211_debug_data_dump(struct net_device *dev, u8 *buf, u32 buf_len);
+extern void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable);
+
+extern void wl_cfg80211_iface_state_ops(struct wireless_dev *wdev, wl_interface_state_t state,
+ wl_iftype_t wl_iftype, u16 wl_mode);
+extern chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+#ifdef SUPPORT_SET_CAC
+extern void wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable);
+#endif /* SUPPORT_SET_CAC */
+extern s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t brcm_iftype, s32 del, u8 *addr);
+extern s32 wl_bss_handle_sae_auth(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *event, void *data);
+#ifdef WL_WPS_SYNC
+extern s32 wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac);
+#endif /* WL_WPS_SYNC */
+extern s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, const void *data, s32 item);
+#ifdef WL_CLIENT_SAE
+extern s32 wl_handle_auth_event(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_CLIENT_SAE */
+#ifdef CUSTOMER_HW6
+extern bool wl_customer6_legacy_chip_check(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev);
+#endif /* CUSTOMER_HW6 */
+void wl_wlfc_enable(struct bcm_cfg80211 *cfg, bool enable);
+s32 wl_handle_join(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ wlcfg_assoc_info_t *assoc_info);
+s32 wl_handle_reassoc(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ wlcfg_assoc_info_t *info);
+s32 wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len);
+#endif /* _wl_cfg80211_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_cfg_btcoex.c b/bcmdhd.101.10.361.x/wl_cfg_btcoex.c
new file mode 100755
index 0000000..934c20e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfg_btcoex.c
@@ -0,0 +1,601 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+struct btcoex_info {
+ timer_list_compat_t timer;
+ u32 timer_ms;
+ u32 timer_on;
+ u32 ts_dhcp_start; /* ms ts ecord time stats */
+ u32 ts_dhcp_ok; /* ms ts ecord time stats */
+ bool dhcp_done; /* flag, indicates that host done with
+ * dhcp before t1/t2 expiration
+ */
+ s32 bt_state;
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+#if defined(OEM_ANDROID)
+static struct btcoex_info *btcoex_info_loc = NULL;
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME 2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+#define BTCOEXMODE "BTCOEXMODE"
+#define POWERMODE "POWERMODE"
+
+enum wl_cfg80211_btcoex_status {
+ BT_DHCP_IDLE,
+ BT_DHCP_START,
+ BT_DHCP_OPPR_WIN,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+ uint reg, int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ bzero(&var, sizeof(var));
+ error = bcm_mkiovar(name, (char *)(&reg), sizeof(reg), (char *)(&var), sizeof(var.buf));
+ if (error == 0) {
+ return BCME_BUFTOOSHORT;
+ }
+ error = wldev_ioctl_get(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf));
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+ char ioctlbuf_local[WLC_IOCTL_SMLEN];
+ int ret;
+
+ ret = bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+ if (ret == 0)
+ return BCME_BUFTOOSHORT;
+ return (wldev_ioctl_set(dev, WLC_SET_VAR, ioctlbuf_local, ret));
+}
+
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+ char reg_addr[8];
+
+ bzero(reg_addr, sizeof(reg_addr));
+ memcpy((char *)&reg_addr[0], (char *)addr, 4);
+ memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+/* andrey: bt pkt period independant sco/esco session detection algo. */
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = FALSE;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+ WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERR(("ioc read btc params error\n"));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) { /* count both sco & esco */
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE(("sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ sco_id_cnt, i));
+ res = TRUE;
+ break;
+ }
+
+ OSL_SLEEP(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = FALSE;
+
+ char buf_reg50va_dhcp_on[8] =
+ { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] =
+ { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] =
+ { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] =
+ { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] =
+ { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+ /* this should reduce eSCO agressive retransmit
+ * w/o breaking it
+ */
+
+ /* 1st save current */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+ saved_status = TRUE;
+ WL_TRACE(("saved bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+ } else {
+ WL_ERR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = FALSE;
+ return -1;
+ }
+
+ /* pacify the eSco */
+ WL_TRACE(("override with [50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = TRUE;
+ } else if (saved_status) {
+ /* restore previously saved bt params */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg71);
+
+ WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = FALSE;
+ } else {
+ WL_ERR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+#if defined(BT_DHCP_eSCO_FIX)
+ /* ANREY: New Yury's eSco pacifier */
+ /* set = 1, save & turn on 0 - off & restore prev settings */
+ set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+/* ANdrey: old WI-FI priority boost via flags */
+ WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE)
+ /* Forcing bt_flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0],
+ sizeof(buf_flag7_dhcp_on));
+ else
+ /* Restoring default bt flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0],
+ sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+ struct btcoex_info *bt_local = (struct btcoex_info *)data;
+ WL_TRACE(("Enter\n"));
+ bt_local->timer_on = 0;
+ schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+ struct btcoex_info *btcx_inf;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ btcx_inf = container_of(work, struct btcoex_info, work);
+ GCC_DIAGNOSTIC_POP();
+
+ if (btcx_inf->timer_on) {
+ btcx_inf->timer_on = 0;
+ del_timer_sync(&btcx_inf->timer);
+ }
+
+ switch (btcx_inf->bt_state) {
+ case BT_DHCP_START:
+ /* DHCP started
+ * provide OPPORTUNITY window to get DHCP address
+ */
+ WL_TRACE(("bt_dhcp stm: started \n"));
+
+ btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+ mod_timer(&btcx_inf->timer,
+ jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPR_WIN:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("DHCP Done before T1 expiration\n"));
+ goto btc_coex_idle;
+ }
+
+ /* DHCP is not over yet, start lowering BT priority
+ * enforce btc_params + flags if necessary
+ */
+ WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+ btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&btcx_inf->timer,
+ jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("DHCP Done before T2 expiration\n"));
+ } else {
+ /* Noo dhcp during T1+T2, restore BT priority */
+ WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
+ BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+ /* Restoring default bt priority */
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+
+ default:
+ WL_ERR(("error g_status=%d !!!\n", btcx_inf->bt_state));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+ }
+
+ /* why we need this? */
+ net_os_wake_unlock(btcx_inf->dev);
+}
+
+void* wl_cfg80211_btcoex_init(struct net_device *ndev)
+{
+ struct btcoex_info *btco_inf = NULL;
+
+ btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+ if (!btco_inf)
+ return NULL;
+
+ btco_inf->bt_state = BT_DHCP_IDLE;
+ btco_inf->ts_dhcp_start = 0;
+ btco_inf->ts_dhcp_ok = 0;
+ /* Set up timer for BT */
+ btco_inf->timer_ms = 10;
+ init_timer_compat(&btco_inf->timer, wl_cfg80211_bt_timerfunc, btco_inf);
+
+ btco_inf->dev = ndev;
+
+ INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+ btcoex_info_loc = btco_inf;
+ return btco_inf;
+}
+
+void wl_cfg80211_btcoex_deinit()
+{
+ if (!btcoex_info_loc)
+ return;
+
+ if (btcoex_info_loc->timer_on) {
+ btcoex_info_loc->timer_on = 0;
+ del_timer_sync(&btcoex_info_loc->timer);
+ }
+
+ cancel_work_sync(&btcoex_info_loc->work);
+
+ kfree(btcoex_info_loc);
+}
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command)
+{
+
+#ifndef OEM_ANDROID
+ static int pm = PM_FAST;
+ int pm_local = PM_OFF;
+#endif /* OEM_ANDROID */
+ struct btcoex_info *btco_inf = btcoex_info_loc;
+ char powermode_val = 0;
+ uint8 cmd_len = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+ /* Figure out powermode 1 or o command */
+#ifdef OEM_ANDROID
+ cmd_len = sizeof(BTCOEXMODE);
+#else
+ cmd_len = sizeof(POWERMODE);
+#endif
+ powermode_val = command[cmd_len];
+
+ WL_INFORM_MEM(("BTCOEX MODE: %c\n", powermode_val));
+ if (powermode_val == '1') {
+ WL_TRACE_HW4(("DHCP session starts\n"));
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+ /* Suppress scan during the DHCP */
+ wl_cfg80211_scan_suppress(dev, 1);
+#endif /* OEM_ANDROID && DHCP_SCAN_SUPPRESS */
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd->dhcp_in_progress = 1;
+
+#if defined(APSTA_BLOCK_ARP_DURING_DHCP)
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)) {
+ /* Block ARP frames while DHCP of STA interface is in
+ * progress in case of STA/SoftAP concurrent mode
+ */
+ wl_cfg80211_block_arp(dev, TRUE);
+ } else
+#endif /* APSTA_BLOCK_ARP_DURING_DHCP */
+ if (dhd->early_suspended) {
+ WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
+ dhd_enable_packet_filter(0, dhd);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* Retrieve and saved orig regs value */
+ if ((saved_status == FALSE) &&
+#ifndef OEM_ANDROID
+ (!dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))) &&
+#endif
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ saved_status = TRUE;
+ WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+
+ /* Disable PM mode during dhpc session */
+#ifndef OEM_ANDROID
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+#endif
+
+ /* Disable PM mode during dhpc session */
+ /* Start BT timer only for SCO connection */
+ if (btcoex_is_sco_active(dev)) {
+ /* btc_params 66 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg66va_dhcp_on[0],
+ sizeof(buf_reg66va_dhcp_on));
+ /* btc_params 41 0x33 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg41va_dhcp_on[0],
+ sizeof(buf_reg41va_dhcp_on));
+ /* btc_params 68 0x190 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg68va_dhcp_on[0],
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ btco_inf->bt_state = BT_DHCP_START;
+ btco_inf->timer_on = 1;
+ mod_timer(&btco_inf->timer,
+ timer_expires(&btco_inf->timer));
+ WL_TRACE(("enable BT DHCP Timer\n"));
+ }
+ }
+ else if (saved_status == TRUE) {
+ WL_ERR(("was called w/o DHCP OFF. Continue\n"));
+ }
+ }
+#ifdef OEM_ANDROID
+ else if (powermode_val == '2')
+#else
+ else if (powermode_val == '0')
+#endif
+ {
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+ /* Since DHCP is complete, enable the scan back */
+ wl_cfg80211_scan_suppress(dev, 0);
+#endif /* OEM_ANDROID */
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd->dhcp_in_progress = 0;
+ WL_TRACE_HW4(("DHCP is complete \n"));
+
+#if defined(APSTA_BLOCK_ARP_DURING_DHCP)
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)) {
+ /* Unblock ARP frames */
+ wl_cfg80211_block_arp(dev, FALSE);
+ } else
+#endif /* APSTA_BLOCK_ARP_DURING_DHCP */
+ if (dhd->early_suspended) {
+ /* Enable packet filtering */
+ WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
+ dhd_enable_packet_filter(1, dhd);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* Restoring PM mode */
+#ifndef OEM_ANDROID
+ dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+#endif
+
+ /* Stop any bt timer because DHCP session is done */
+ WL_TRACE(("disable BT DHCP Timer\n"));
+ if (btco_inf->timer_on) {
+ btco_inf->timer_on = 0;
+ del_timer_sync(&btco_inf->timer);
+
+ if (btco_inf->bt_state != BT_DHCP_IDLE) {
+ /* ANDREY: case when framework signals DHCP end before STM timeout */
+ /* need to restore original btc flags & extra btc params */
+ WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
+ /* wake up btcoex thread to restore btlags+params */
+ schedule_work(&btco_inf->work);
+ }
+ }
+
+ /* Restoring btc_flag paramter anyway */
+ if (saved_status == TRUE)
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+ /* Restore original values */
+ if (saved_status == TRUE) {
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg68);
+
+ WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+
+ }
+ else {
+ WL_ERR(("Unknown yet power setting, ignored\n"));
+ }
+ return 0;
+}
+#endif /* defined(OEM_ANDROID) */
diff --git a/bcmdhd.101.10.361.x/wl_cfgnan.c b/bcmdhd.101.10.361.x/wl_cfgnan.c
new file mode 100755
index 0000000..878495e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgnan.c
@@ -0,0 +1,9473 @@
+/*
+ * Neighbor Awareness Networking
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifdef WL_NAN
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmwifi_channels.h>
+#include <nan.h>
+#include <bcmiov.h>
+#include <net/rtnetlink.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
+#include <wl_android.h>
+#include <wl_cfgnan.h>
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* BCMDONGLEHOST */
+#include <wl_cfgvendor.h>
+#include <bcmbloom.h>
+#include <wl_cfgp2p.h>
+#include <wl_cfgvif.h>
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#include <bcmstdlib_s.h>
+
+#define NAN_RANGE_REQ_EVNT 1
+#define NAN_RAND_MAC_RETRIES 10
+#define NAN_SCAN_DWELL_TIME_DELTA_MS 10
+
+#ifdef WL_NAN_DISC_CACHE
+/* Disc Cache Parameters update Flags */
+#define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
+static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
+ u16 *disc_cache_update_flags);
+static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
+static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
+ uint8 remote_pubid, struct ether_addr *peer);
+#endif /* WL_NAN_DISC_CACHE */
+
+static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
+static int wl_cfgnan_get_capability(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
+static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
+ nan_event_data_t *nan_event_data);
+void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
+static void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg);
+static s32 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg);
+static int wl_cfgnan_init(struct bcm_cfg80211 *cfg);
+static int wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate);
+static void wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
+ nan_data_path_id ndp_id);
+static void wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr, nan_peer_dp_state_t state);
+static nan_ndp_peer_t* wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+static int wl_cfgnan_disable(struct bcm_cfg80211 *cfg);
+static s32 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name);
+static s32 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name);
+
+#ifdef RTT_SUPPORT
+static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
+static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
+static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst);
+static void wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst);
+static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
+ nan_ranging_inst_t *rng_inst, int reason);
+static s32 wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer, int reason);
+static void wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 *cfg);
+static bool wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+static void wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst);
+static void wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst);
+#endif /* RTT_SUPPORT */
+
+static const char *
+nan_role_to_str(u8 role)
+{
+ const char *id2str;
+
+ switch (role) {
+ C2S(WL_NAN_ROLE_AUTO);
+ break;
+ C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC);
+ break;
+ C2S(WL_NAN_ROLE_NON_MASTER_SYNC);
+ break;
+ C2S(WL_NAN_ROLE_MASTER);
+ break;
+ C2S(WL_NAN_ROLE_ANCHOR_MASTER);
+ break;
+ default:
+ id2str = "WL_NAN_ROLE_UNKNOWN";
+ }
+
+ return id2str;
+}
+
+const char *
+nan_event_to_str(u16 cmd)
+{
+ const char *id2str;
+
+ switch (cmd) {
+ C2S(WL_NAN_EVENT_START);
+ break;
+ C2S(WL_NAN_EVENT_JOIN);
+ break;
+ C2S(WL_NAN_EVENT_ROLE);
+ break;
+ C2S(WL_NAN_EVENT_SCAN_COMPLETE);
+ break;
+ C2S(WL_NAN_EVENT_DISCOVERY_RESULT);
+ break;
+ C2S(WL_NAN_EVENT_REPLIED);
+ break;
+ C2S(WL_NAN_EVENT_TERMINATED);
+ break;
+ C2S(WL_NAN_EVENT_RECEIVE);
+ break;
+ C2S(WL_NAN_EVENT_STATUS_CHG);
+ break;
+ C2S(WL_NAN_EVENT_MERGE);
+ break;
+ C2S(WL_NAN_EVENT_STOP);
+ break;
+ C2S(WL_NAN_EVENT_P2P);
+ break;
+ C2S(WL_NAN_EVENT_WINDOW_BEGIN_P2P);
+ break;
+ C2S(WL_NAN_EVENT_WINDOW_BEGIN_MESH);
+ break;
+ C2S(WL_NAN_EVENT_WINDOW_BEGIN_IBSS);
+ break;
+ C2S(WL_NAN_EVENT_WINDOW_BEGIN_RANGING);
+ break;
+ C2S(WL_NAN_EVENT_POST_DISC);
+ break;
+ C2S(WL_NAN_EVENT_DATA_IF_ADD);
+ break;
+ C2S(WL_NAN_EVENT_DATA_PEER_ADD);
+ break;
+ C2S(WL_NAN_EVENT_PEER_DATAPATH_IND);
+ break;
+ C2S(WL_NAN_EVENT_DATAPATH_ESTB);
+ break;
+ C2S(WL_NAN_EVENT_SDF_RX);
+ break;
+ C2S(WL_NAN_EVENT_DATAPATH_END);
+ break;
+ C2S(WL_NAN_EVENT_BCN_RX);
+ break;
+ C2S(WL_NAN_EVENT_PEER_DATAPATH_RESP);
+ break;
+ C2S(WL_NAN_EVENT_PEER_DATAPATH_CONF);
+ break;
+ C2S(WL_NAN_EVENT_RNG_REQ_IND);
+ break;
+ C2S(WL_NAN_EVENT_RNG_RPT_IND);
+ break;
+ C2S(WL_NAN_EVENT_RNG_TERM_IND);
+ break;
+ C2S(WL_NAN_EVENT_PEER_DATAPATH_SEC_INST);
+ break;
+ C2S(WL_NAN_EVENT_TXS);
+ break;
+ C2S(WL_NAN_EVENT_DW_START);
+ break;
+ C2S(WL_NAN_EVENT_DW_END);
+ break;
+ C2S(WL_NAN_EVENT_CHAN_BOUNDARY);
+ break;
+ C2S(WL_NAN_EVENT_MR_CHANGED);
+ break;
+ C2S(WL_NAN_EVENT_RNG_RESP_IND);
+ break;
+ C2S(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF);
+ break;
+ C2S(WL_NAN_EVENT_PEER_SCHED_REQ);
+ break;
+ C2S(WL_NAN_EVENT_PEER_SCHED_RESP);
+ break;
+ C2S(WL_NAN_EVENT_PEER_SCHED_CONF);
+ break;
+ C2S(WL_NAN_EVENT_SENT_DATAPATH_END);
+ break;
+ C2S(WL_NAN_EVENT_SLOT_START);
+ break;
+ C2S(WL_NAN_EVENT_SLOT_END);
+ break;
+ C2S(WL_NAN_EVENT_HOST_ASSIST_REQ);
+ break;
+ C2S(WL_NAN_EVENT_RX_MGMT_FRM);
+ break;
+ C2S(WL_NAN_EVENT_DISC_CACHE_TIMEOUT);
+ break;
+ C2S(WL_NAN_EVENT_OOB_AF_TXS);
+ break;
+ C2S(WL_NAN_EVENT_OOB_AF_RX);
+ break;
+ C2S(WL_NAN_EVENT_INVALID);
+ break;
+
+ default:
+ id2str = "WL_NAN_EVENT_UNKNOWN";
+ }
+
+ return id2str;
+}
+
+static const char *
+nan_frm_type_to_str(u16 frm_type)
+{
+ const char *id2str;
+
+ switch (frm_type) {
+ C2S(WL_NAN_FRM_TYPE_PUBLISH);
+ break;
+ C2S(WL_NAN_FRM_TYPE_SUBSCRIBE);
+ break;
+ C2S(WL_NAN_FRM_TYPE_FOLLOWUP);
+ break;
+
+ C2S(WL_NAN_FRM_TYPE_DP_REQ);
+ break;
+ C2S(WL_NAN_FRM_TYPE_DP_RESP);
+ break;
+ C2S(WL_NAN_FRM_TYPE_DP_CONF);
+ break;
+ C2S(WL_NAN_FRM_TYPE_DP_INSTALL);
+ break;
+ C2S(WL_NAN_FRM_TYPE_DP_END);
+ break;
+
+ C2S(WL_NAN_FRM_TYPE_SCHED_REQ);
+ break;
+ C2S(WL_NAN_FRM_TYPE_SCHED_RESP);
+ break;
+ C2S(WL_NAN_FRM_TYPE_SCHED_CONF);
+ break;
+ C2S(WL_NAN_FRM_TYPE_SCHED_UPD);
+ break;
+
+ C2S(WL_NAN_FRM_TYPE_RNG_REQ);
+ break;
+ C2S(WL_NAN_FRM_TYPE_RNG_RESP);
+ break;
+ C2S(WL_NAN_FRM_TYPE_RNG_TERM);
+ break;
+ C2S(WL_NAN_FRM_TYPE_RNG_REPORT);
+ break;
+
+ default:
+ id2str = "WL_NAN_FRM_TYPE_UNKNOWN";
+ }
+
+ return id2str;
+}
+
+static const char *
+nan_event_cause_to_str(u8 cause)
+{
+ const char *id2str;
+
+ switch (cause) {
+ C2S(WL_NAN_DP_TERM_WITH_INACTIVITY);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_FSM_DESTROY);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_PEER_DP_END);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_STALE_NDP);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_DISABLE);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_NDI_DEL);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_PEER_HB_FAIL);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_HOST_IOVAR);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_ESTB_FAIL);
+ break;
+ C2S(WL_NAN_DP_TERM_WITH_SCHED_REJECT);
+ break;
+
+ default:
+ id2str = "WL_NAN_EVENT_CAUSE_UNKNOWN";
+ }
+
+ return id2str;
+}
+
+static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
+ uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
+ uint16 resp_buf_len);
+int
+wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
+{
+ s32 ret = BCME_OK;
+ uint8 i = 0;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ if (p_inst_id == NULL) {
+ WL_ERR(("Invalid arguments\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (nancfg->inst_id_start == NAN_ID_MAX) {
+ WL_ERR(("Consumed all IDs, resetting the counter\n"));
+ nancfg->inst_id_start = 0;
+ }
+
+ for (i = nancfg->inst_id_start; i < NAN_ID_MAX; i++) {
+ if (isclr(nancfg->svc_inst_id_mask, i)) {
+ setbit(nancfg->svc_inst_id_mask, i);
+ *p_inst_id = i + 1;
+ nancfg->inst_id_start = *p_inst_id;
+ WL_DBG(("Instance ID=%d\n", *p_inst_id));
+ goto exit;
+ }
+ }
+ WL_ERR(("Allocated maximum IDs\n"));
+ ret = BCME_NORESOURCE;
+exit:
+ return ret;
+}
+
+int
+wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
+{
+ s32 ret = BCME_OK;
+ WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
+ clrbit(cfg->nancfg->svc_inst_id_mask, inst_id-1);
+ return ret;
+}
+s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
+ uint16 len, nan_event_data_t *tlv_data)
+{
+ const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
+ uint8 offset;
+ s32 ret = BCME_OK;
+
+ /* service descriptor ext attributes */
+ nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
+
+ /* attribute ID */
+ WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
+
+ /* attribute length */
+ WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
+ if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
+ tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
+ }
+ offset = sizeof(*nan_svc_desc_ext_attr);
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+
+ if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
+ WL_TRACE(("> svc_control: range limited present\n"));
+ }
+ if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
+ WL_TRACE(("> svc_control: sdea svc specific info present\n"));
+ tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
+ WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
+ if (!tlv_data->sde_svc_info.dlen ||
+ tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
+ /* must be able to handle null msg which is not error */
+ tlv_data->sde_svc_info.dlen = 0;
+ WL_ERR(("sde data length is invalid\n"));
+ ret = BCME_BADLEN;
+ goto fail;
+ }
+
+ if (tlv_data->sde_svc_info.dlen > 0) {
+ tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
+ if (!tlv_data->sde_svc_info.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ tlv_data->sde_svc_info.dlen = 0;
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ /* advance read pointer, consider sizeof of Service Update Indicator */
+ offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
+ p_attr, tlv_data->sde_svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy sde_svc_info\n"));
+ goto fail;
+ }
+ } else {
+ /* must be able to handle null msg which is not error */
+ tlv_data->sde_svc_info.dlen = 0;
+ WL_DBG(("%s: sdea svc info length is zero, null info data\n",
+ __FUNCTION__));
+ }
+ }
+ return ret;
+fail:
+ if (tlv_data->sde_svc_info.data) {
+ MFREE(osh, tlv_data->sde_svc_info.data,
+ tlv_data->sde_svc_info.dlen);
+ tlv_data->sde_svc_info.data = NULL;
+ }
+
+ WL_DBG(("Parse SDEA event data, status = %d\n", ret));
+ return ret;
+}
+
+/*
+ * This attribute contains some mandatory fields and some optional fields
+ * depending on the content of the service discovery request.
+ */
+s32
+wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
+ uint16 len, nan_event_data_t *tlv_data)
+{
+ uint8 svc_control = 0, offset = 0;
+ s32 ret = BCME_OK;
+ const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
+
+ /* service descriptor attributes */
+ nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
+ /* attribute ID */
+ WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
+
+ /* attribute length */
+ WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
+
+ /* service ID */
+ ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
+ nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc_hash_name:\n"));
+ return ret;
+ }
+ WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
+
+ /* local instance ID */
+ tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
+ WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
+
+ /* requestor instance ID */
+ tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
+ WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
+
+ /* service control */
+ svc_control = nan_svc_desc_attr->svc_control;
+ if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
+ WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
+ } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
+ WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
+ } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
+ WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
+ }
+ offset = sizeof(*nan_svc_desc_attr);
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+
+ /*
+ * optional fields:
+ * must be in order following by service descriptor attribute format
+ */
+
+ /* binding bitmap */
+ if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
+ uint16 bitmap = 0;
+ WL_TRACE(("> svc_control: binding bitmap present\n"));
+
+ /* Copy binding bitmap */
+ ret = memcpy_s(&bitmap, sizeof(bitmap),
+ p_attr, NAN_BINDING_BITMAP_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy bit map\n"));
+ return ret;
+ }
+ WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
+
+ if (NAN_BINDING_BITMAP_LEN > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += NAN_BINDING_BITMAP_LEN;
+ len -= NAN_BINDING_BITMAP_LEN;
+ }
+
+ /* matching filter */
+ if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
+ WL_TRACE(("> svc_control: matching filter present\n"));
+
+ tlv_data->tx_match_filter.dlen = *p_attr++;
+ WL_TRACE(("> matching filter len: 0x%02x\n",
+ tlv_data->tx_match_filter.dlen));
+
+ if (!tlv_data->tx_match_filter.dlen ||
+ tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
+ tlv_data->tx_match_filter.dlen = 0;
+ WL_ERR(("tx match filter length is invalid\n"));
+ ret = -EINVAL;
+ goto fail;
+ }
+ tlv_data->tx_match_filter.data =
+ MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
+ if (!tlv_data->tx_match_filter.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ tlv_data->tx_match_filter.dlen = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+ ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
+ p_attr, tlv_data->tx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter data\n"));
+ goto fail;
+ }
+ /* advance read pointer */
+ offset = tlv_data->tx_match_filter.dlen;
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ }
+
+ /* service response filter */
+ if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
+ WL_TRACE(("> svc_control: service response filter present\n"));
+
+ tlv_data->rx_match_filter.dlen = *p_attr++;
+ WL_TRACE(("> sr match filter len: 0x%02x\n",
+ tlv_data->rx_match_filter.dlen));
+
+ if (!tlv_data->rx_match_filter.dlen ||
+ tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
+ tlv_data->rx_match_filter.dlen = 0;
+ WL_ERR(("%s: sr matching filter length is invalid\n",
+ __FUNCTION__));
+ ret = BCME_BADLEN;
+ goto fail;
+ }
+ tlv_data->rx_match_filter.data =
+ MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
+ if (!tlv_data->rx_match_filter.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ tlv_data->rx_match_filter.dlen = 0;
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
+ p_attr, tlv_data->rx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy rx match filter data\n"));
+ goto fail;
+ }
+
+ /* advance read pointer */
+ offset = tlv_data->rx_match_filter.dlen;
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ }
+
+ /* service specific info */
+ if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
+ WL_TRACE(("> svc_control: svc specific info present\n"));
+
+ tlv_data->svc_info.dlen = *p_attr++;
+ WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
+
+ if (!tlv_data->svc_info.dlen ||
+ tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
+ /* must be able to handle null msg which is not error */
+ tlv_data->svc_info.dlen = 0;
+ WL_ERR(("sde data length is invalid\n"));
+ ret = BCME_BADLEN;
+ goto fail;
+ }
+
+ if (tlv_data->svc_info.dlen > 0) {
+ tlv_data->svc_info.data =
+ MALLOCZ(osh, tlv_data->svc_info.dlen);
+ if (!tlv_data->svc_info.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ tlv_data->svc_info.dlen = 0;
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
+ p_attr, tlv_data->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto fail;
+ }
+
+ /* advance read pointer */
+ offset = tlv_data->svc_info.dlen;
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ } else {
+ /* must be able to handle null msg which is not error */
+ tlv_data->svc_info.dlen = 0;
+ WL_TRACE(("%s: svc info length is zero, null info data\n",
+ __FUNCTION__));
+ }
+ }
+
+ /*
+ * discovery range limited:
+ * If set to 1, the pub/sub msg is limited in range to close proximity.
+ * If set to 0, the pub/sub msg is not limited in range.
+ * Valid only when the message is either of a publish or a sub.
+ */
+ if (svc_control & NAN_SC_RANGE_LIMITED) {
+ if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
+ ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
+ WL_TRACE(("> svc_control: range limited present\n"));
+ } else {
+ WL_TRACE(("range limited is only valid on pub or sub\n"));
+ }
+
+ /* TODO: send up */
+
+ /* advance read pointer */
+ p_attr++;
+ }
+ return ret;
+fail:
+ if (tlv_data->tx_match_filter.data) {
+ MFREE(osh, tlv_data->tx_match_filter.data,
+ tlv_data->tx_match_filter.dlen);
+ tlv_data->tx_match_filter.data = NULL;
+ }
+ if (tlv_data->rx_match_filter.data) {
+ MFREE(osh, tlv_data->rx_match_filter.data,
+ tlv_data->rx_match_filter.dlen);
+ tlv_data->rx_match_filter.data = NULL;
+ }
+ if (tlv_data->svc_info.data) {
+ MFREE(osh, tlv_data->svc_info.data,
+ tlv_data->svc_info.dlen);
+ tlv_data->svc_info.data = NULL;
+ }
+
+ WL_DBG(("Parse SDA event data, status = %d\n", ret));
+ return ret;
+}
+
+static s32
+wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
+ nan_event_data_t *tlv_data, uint16 type) {
+ const uint8 *p_attr = data;
+ uint16 offset = 0;
+ s32 ret = BCME_OK;
+ const wl_nan_event_disc_result_t *ev_disc = NULL;
+ const wl_nan_event_replied_t *ev_replied = NULL;
+ const wl_nan_ev_receive_t *ev_fup = NULL;
+
+ /*
+ * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
+ */
+ if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
+ u8 iter;
+ ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
+
+ WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
+
+ tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
+ tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
+ tlv_data->publish_rssi = ev_disc->publish_rssi;
+ ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_disc->pub_mac, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto fail;
+ }
+
+ WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
+ WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
+ WL_TRACE(("publish mac addr: " MACDBG "\n",
+ MAC2STRDBG(ev_disc->pub_mac.octet)));
+ WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
+ WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
+ WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
+
+ /* advance to the service descricptor */
+ offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+
+ iter = ev_disc->attr_num;
+ while (iter) {
+ if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
+ WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
+ ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sda_data failed,"
+ "error = %d \n", ret));
+ goto fail;
+ }
+ }
+
+ if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
+ WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
+ ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
+ "error = %d \n", ret));
+ goto fail;
+ }
+ }
+ offset = (sizeof(*p_attr) +
+ sizeof(ev_disc->attr_list_len) +
+ (p_attr[1] | (p_attr[2] << 8)));
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ iter--;
+ }
+ } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
+ uint8 iter;
+ ev_fup = (const wl_nan_ev_receive_t *)p_attr;
+
+ WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
+
+ tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
+ tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
+ tlv_data->fup_rssi = ev_fup->fup_rssi;
+ ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_fup->remote_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto fail;
+ }
+
+ WL_TRACE(("local id: %d\n", ev_fup->local_id));
+ WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
+ WL_TRACE(("peer mac addr: " MACDBG "\n",
+ MAC2STRDBG(ev_fup->remote_addr.octet)));
+ WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
+ WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
+ WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
+
+ /* advance to the service descriptor which is attr_list[0] */
+ offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+
+ iter = ev_fup->attr_num;
+ while (iter) {
+ if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
+ WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
+ ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sda_data failed,"
+ "error = %d \n", ret));
+ goto fail;
+ }
+ }
+
+ if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
+ WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
+ ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
+ "error = %d \n", ret));
+ goto fail;
+ }
+ }
+ offset = (sizeof(*p_attr) +
+ sizeof(ev_fup->attr_list_len) +
+ (p_attr[1] | (p_attr[2] << 8)));
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ iter--;
+ }
+ } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
+ /*
+ * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
+ * and svc controls are optional.
+ */
+ const nan2_pub_act_frame_t *nan_pub_af =
+ (const nan2_pub_act_frame_t *)p_attr;
+
+ WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
+
+ /* nan2_pub_act_frame_t */
+ WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
+ WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
+ WL_TRACE(("nan oui: %2x-%2x-%2x\n",
+ nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
+ WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
+ WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
+
+ offset = sizeof(*nan_pub_af);
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ } else if (type == WL_NAN_XTLV_SD_REPLIED) {
+ ev_replied = (const wl_nan_event_replied_t *)p_attr;
+
+ WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
+
+ tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
+ tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
+ tlv_data->sub_rssi = ev_replied->sub_rssi;
+ ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_replied->sub_mac, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto fail;
+ }
+
+ WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
+ WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
+ WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
+ MAC2STRDBG(ev_replied->sub_mac.octet)));
+ WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
+ WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
+ WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
+
+ /* advance to the service descriptor which is attr_list[0] */
+ offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
+ if (offset > len) {
+ WL_ERR(("Invalid event buffer len\n"));
+ ret = BCME_BUFTOOSHORT;
+ goto fail;
+ }
+ p_attr += offset;
+ len -= offset;
+ ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
+ "error = %d \n", ret));
+ }
+ }
+
+fail:
+ return ret;
+}
+
+/* Based on each case of tlv type id, fill into tlv data */
+static int
+wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
+ nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
+ int ret = BCME_OK;
+
+ NAN_DBG_ENTER();
+ if (!data || !len) {
+ WL_ERR(("data length is invalid\n"));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+ switch (type) {
+ /*
+ * Need to parse service descript attributes including service control,
+ * when Follow up or Discovery result come
+ */
+ case WL_NAN_XTLV_SD_FUP_RECEIVED:
+ case WL_NAN_XTLV_SD_DISC_RESULTS: {
+ ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
+ len, data, tlv_data, type);
+ break;
+ }
+ case WL_NAN_XTLV_SD_NDPE_TLV_LIST:
+ /* Intentional fall through NDPE TLV list and SVC INFO is sent in same container
+ * to upper layers
+ */
+ case WL_NAN_XTLV_SD_SVC_INFO: {
+ tlv_data->svc_info.data =
+ MALLOCZ(ctx_tlv_data->cfg->osh, len);
+ if (!tlv_data->svc_info.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ tlv_data->svc_info.dlen = 0;
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ tlv_data->svc_info.dlen = len;
+ ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
+ data, tlv_data->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info data\n"));
+ goto fail;
+ }
+ break;
+ }
+ case WL_NAN_XTLV_SD_NAN_AF:
+ case WL_NAN_XTLV_DAM_NA_ATTR:
+ /* No action -intentionally added to avoid prints when these events are rcvd */
+ break;
+ default:
+ WL_ERR(("Not available for tlv type = 0x%x\n", type));
+ ret = BCME_ERROR;
+ break;
+ }
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
+ uint16 *subcmd_len)
+{
+ s32 ret = BCME_OK;
+
+ if (subcmd_len != NULL) {
+ *subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
+ ALIGN_SIZE(data_size, 4);
+ if (*subcmd_len > nan_iov_len) {
+ WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
+ __FUNCTION__, *subcmd_len, nan_iov_len));
+ ret = BCME_NOMEM;
+ }
+ } else {
+ WL_ERR(("Invalid subcmd_len\n"));
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
+int
+wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ uint8 event_ind_flag, bool disable_events)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint16 subcmd_len;
+ uint32 status;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
+ wl_nan_evmask_extn_t *evmask;
+ uint16 evmask_cmd_len;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+
+ /* same src and dest len here */
+ bzero(event_mask, sizeof(event_mask));
+ evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
+ sizeof(event_mask);
+ ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
+ if (unlikely(ret)) {
+ WL_ERR((" nan event enable failed, error = %d \n", ret));
+ goto fail;
+ }
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ evmask_cmd_len, &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
+ evmask->ver = WL_NAN_EVMASK_EXTN_VER;
+ evmask->len = WL_NAN_EVMASK_EXTN_LEN;
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+
+ if (disable_events) {
+ WL_DBG(("Disabling all nan events..except stop event\n"));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
+ } else {
+ /*
+ * Android framework event mask configuration.
+ */
+ nan_buf->is_set = false;
+ memset(resp_buf, 0, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan event mask failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
+
+ /* check the response buff */
+ /* same src and dest len here */
+ (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
+ (uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
+
+ if (event_ind_flag) {
+ /* FIXME:BIT0 - Disable disc mac addr change event indication */
+ if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
+ WL_DBG(("Need to add disc mac addr change event\n"));
+ }
+ /* BIT2 - Disable nan cluster join indication (OTA). */
+ if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
+ }
+ }
+
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
+ /* Disable below events by default */
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
+ }
+
+ nan_buf->is_set = true;
+ evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
+ /* same src and dest len here */
+ (void)memcpy_s((uint8*)&evmask->evmask, sizeof(event_mask),
+ &event_mask, sizeof(event_mask));
+
+ nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
+ goto fail;
+ }
+ WL_DBG(("set nan event mask successfull\n"));
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_nan_avail(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ wl_avail_t *avail = NULL;
+ wl_avail_entry_t *entry; /* used for filling entry structure */
+ uint8 *p; /* tracking pointer */
+ uint8 i;
+ u32 status;
+ int c;
+ char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+ char *a = WL_AVAIL_BIT_MAP;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+
+ /* Do not disturb avail if dam is supported */
+ if (FW_SUPPORTED(dhdp, autodam)) {
+ WL_DBG(("DAM is supported, avail modification not allowed\n"));
+ return ret;
+ }
+
+ if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
+ WL_ERR(("Invalid availability type\n"));
+ ret = BCME_USAGE_ERROR;
+ goto fail;
+ }
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*avail), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+ avail = (wl_avail_t *)sub_cmd->data;
+
+ /* populate wl_avail_type */
+ avail->flags = avail_type;
+ if (avail_type == WL_AVAIL_RANGING) {
+ ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
+ &cmd_data->peer_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer nmi\n"));
+ goto fail;
+ }
+ }
+
+ sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_buf->is_set = false;
+ nan_buf->count++;
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
+
+ WL_TRACE(("Read wl nan avail status\n"));
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret)) {
+ WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
+ goto fail;
+ }
+
+ if (status == BCME_NOTFOUND) {
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ avail = (wl_avail_t *)sub_cmd->data;
+ p = avail->entry;
+
+ /* populate wl_avail fields */
+ avail->length = OFFSETOF(wl_avail_t, entry);
+ avail->flags = avail_type;
+ avail->num_entries = 0;
+ avail->id = 0;
+ entry = (wl_avail_entry_t*)p;
+ entry->flags = WL_AVAIL_ENTRY_COM;
+
+ /* set default values for optional parameters */
+ entry->start_offset = 0;
+ entry->u.band = 0;
+
+ if (cmd_data->avail_period) {
+ entry->period = cmd_data->avail_period;
+ } else {
+ entry->period = WL_AVAIL_PERIOD_1024;
+ }
+
+ if (cmd_data->duration != NAN_BAND_INVALID) {
+ entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
+ (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
+ } else {
+ entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
+ (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
+ }
+ entry->bitmap_len = 0;
+
+ if (avail_type == WL_AVAIL_LOCAL) {
+ entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
+ /* Check for 5g support, based on that choose 5g channel */
+ if (cfg->nancfg->support_5g) {
+ entry->u.channel_info =
+ htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
+ WL_AVAIL_BANDWIDTH_5G));
+ } else {
+ entry->u.channel_info =
+ htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
+ WL_AVAIL_BANDWIDTH_2G));
+ }
+ entry->flags = htod16(entry->flags);
+ }
+
+ if (cfg->nancfg->support_5g) {
+ a = WL_5G_AVAIL_BIT_MAP;
+ }
+
+ /* point to bitmap value for processing */
+ if (cmd_data->bmap) {
+ for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
+ i = cmd_data->bmap >> c;
+ if (i & 1) {
+ setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
+ }
+ }
+ } else {
+ for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
+ if (*a == '1') {
+ setbit(entry->bitmap, i);
+ }
+ a++;
+ }
+ }
+
+ /* account for partially filled most significant byte */
+ entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
+ if (avail_type == WL_AVAIL_NDC) {
+ ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
+ ndc_id, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ndc id\n"));
+ goto fail;
+ }
+ } else if (avail_type == WL_AVAIL_RANGING) {
+ ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
+ &cmd_data->peer_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer nmi\n"));
+ goto fail;
+ }
+ }
+ /* account for partially filled most significant byte */
+
+ /* update wl_avail and populate wl_avail_entry */
+ entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
+ avail->num_entries++;
+ avail->length += entry->length;
+ /* advance pointer for next entry */
+ p += entry->length;
+
+ /* convert to dongle endianness */
+ entry->length = htod16(entry->length);
+ entry->start_offset = htod16(entry->start_offset);
+ entry->u.channel_info = htod32(entry->u.channel_info);
+ entry->flags = htod16(entry->flags);
+ /* update avail_len only if
+ * there are avail entries
+ */
+ if (avail->num_entries) {
+ nan_iov_data->nan_iov_len -= avail->length;
+ avail->length = htod16(avail->length);
+ avail->flags = htod16(avail->flags);
+ }
+ avail->length = htod16(avail->length);
+
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_buf->is_set = true;
+ nan_buf->count++;
+
+ /* Reduce the iov_len size by subcmd_len */
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
+
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
+ ret = status;
+ goto fail;
+ }
+ } else if (status == BCME_OK) {
+ WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
+ } else {
+ WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+/* API to configure nan ctrl and nan ctrl2 commands */
+static int
+wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ uint32 flag1, uint32 flag2, uint16 cmd_id, uint32 *status, bool set)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_iov_start, nan_iov_end;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint32 *cfg_ctrl;
+ uint16 cfg_ctrl_size;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG) {
+ cfg_ctrl_size = sizeof(wl_nan_cfg_ctrl_t);
+ } else if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG2) {
+ cfg_ctrl_size = sizeof(wl_nan_cfg_ctrl2_t);
+ } else {
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ cfg_ctrl_size, &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ sub_cmd->id = htod16(cmd_id);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + cfg_ctrl_size;
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_buf->is_set = false;
+ nan_buf->count++;
+
+ /* Reduce the iov_len size by subcmd_len */
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_end = nan_iov_data->nan_iov_len;
+ nan_buf_size = (nan_iov_start - nan_iov_end);
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(*status)) {
+ WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
+ goto fail;
+ }
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+
+ /* check the response buff */
+ if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG) {
+ wl_nan_cfg_ctrl_t *cfg_ctrl1;
+ cfg_ctrl1 = ((uint32 *)&sub_cmd_resp->data[0]);
+ if (set) {
+ *cfg_ctrl1 |= flag1;
+ } else {
+ *cfg_ctrl1 &= ~flag1;
+ }
+ cfg_ctrl = cfg_ctrl1;
+ WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d\n",
+ __FUNCTION__, flag1, set));
+ } else {
+ wl_nan_cfg_ctrl2_t *cfg_ctrl2;
+ cfg_ctrl2 = ((wl_nan_cfg_ctrl2_t *)&sub_cmd_resp->data[0]);
+ if (set) {
+ cfg_ctrl2->flags1 |= flag1;
+ cfg_ctrl2->flags2 |= flag2;
+ } else {
+ cfg_ctrl2->flags1 &= ~flag1;
+ cfg_ctrl2->flags2 &= ~flag2;
+ }
+ cfg_ctrl = (uint32 *)cfg_ctrl2;
+ WL_INFORM_MEM(("%s: Modifying nan ctrl2 flag1 %x flag2 %x val %d\n",
+ __FUNCTION__, flag1, flag2, set));
+ }
+ ret = memcpy_s(sub_cmd->data, cfg_ctrl_size, cfg_ctrl, cfg_ctrl_size);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cfg ctrl\n"));
+ goto fail;
+ }
+
+ nan_buf->is_set = true;
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(*status)) {
+ WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
+ goto fail;
+ }
+ WL_DBG(("set nan cfg ctrl successfull\n"));
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
+ uint32 status;
+ /* if all tlvs are parsed, we should not be here */
+ if (b_resp->count == 0) {
+ return BCME_BADLEN;
+ }
+
+ /* cbfn params may be used in f/w */
+ if (len < sizeof(status)) {
+ return BCME_BUFTOOSHORT;
+ }
+
+ /* first 4 bytes consists status */
+ if (memcpy_s(&status, sizeof(status),
+ data, sizeof(uint32)) != BCME_OK) {
+ WL_ERR(("Failed to copy status\n"));
+ goto exit;
+ }
+
+ status = dtoh32(status);
+
+ /* If status is non zero */
+ if (status != BCME_OK) {
+ printf("cmd type %d failed, status: %04x\n", type, status);
+ goto exit;
+ }
+
+ if (b_resp->count > 0) {
+ b_resp->count--;
+ }
+
+ if (!b_resp->count) {
+ status = BCME_IOV_LAST_CMD;
+ }
+exit:
+ return status;
+}
+
+static int
+wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
+ uint8 *resp_buf, uint16 resp_buf_size)
+{
+ int ret = BCME_OK;
+ uint16 tlvs_len;
+ int res = BCME_OK;
+ bcm_iov_batch_buf_t *p_resp = NULL;
+ char *iov = "nan";
+ int max_resp_len = WLC_IOCTL_MAXLEN;
+
+ WL_DBG(("Enter:\n"));
+ if (nan_buf->is_set) {
+ ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
+ resp_buf, resp_buf_size, NULL);
+ p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
+ } else {
+ ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
+ resp_buf, resp_buf_size, NULL);
+ p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
+ }
+ if (unlikely(ret)) {
+ WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
+ goto fail;
+ }
+
+ p_resp->is_set = nan_buf->is_set;
+ tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ /* Extract the tlvs and print their resp in cb fn */
+ res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
+ tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
+
+ if (res == BCME_IOV_LAST_CMD) {
+ res = BCME_OK;
+ }
+fail:
+ *status = res;
+ WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
+ return ret;
+
+}
+
+static int
+wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
+ struct ether_addr *if_addr)
+{
+ /* nan enable */
+ s32 ret = BCME_OK;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ if (p_buf != NULL) {
+ bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
+ sizeof(*if_addr), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
+ (uint8 *)if_addr, sizeof(*if_addr));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy if addr\n"));
+ goto fail;
+ }
+
+ *nan_buf_size -= subcmd_len;
+ } else {
+ WL_ERR(("nan_iov_buf is NULL\n"));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ wl_nan_ver_t *nan_ver = NULL;
+ uint16 subcmd_len;
+ uint32 status;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(*nan_ver), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ nan_ver = (wl_nan_ver_t *)sub_cmd->data;
+ sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+
+ nan_buf->is_set = false;
+ bzero(resp_buf, sizeof(resp_buf));
+ nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
+
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan ver failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
+ if (!nan_ver) {
+ ret = BCME_NOTFOUND;
+ WL_ERR(("nan_ver not found: err = %d\n", ret));
+ goto fail;
+ }
+ cfg->nancfg->version = *nan_ver;
+ WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg->version));
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+
+}
+
+static int
+wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ struct ether_addr if_addr;
+ uint8 buf[NAN_IOCTL_BUF_SIZE];
+ bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
+ bool rand_mac = cfg->nancfg->mac_rand;
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ if (rand_mac) {
+ RANDOM_BYTES(if_addr.octet, 6);
+ /* restore mcast and local admin bits to 0 and 1 */
+ ETHER_SET_UNICAST(if_addr.octet);
+ ETHER_SET_LOCALADDR(if_addr.octet);
+ } else {
+ /* Use primary MAC with the locally administered bit for the
+ * NAN NMI I/F
+ */
+ if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
+ if_addr.octet) != BCME_OK) {
+ ret = -EINVAL;
+ WL_ERR(("Failed to get mac addr for NMI\n"));
+ goto fail;
+ }
+ }
+ WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(if_addr.octet)));
+ ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
+ &nan_buf_size, &if_addr);
+ if (unlikely(ret)) {
+ WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ nan_buf->is_set = true;
+ nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
+ nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan if addr handler failed ret %d status %d\n",
+ ret, status));
+ goto fail;
+ }
+ ret = memcpy_s(cfg->nancfg->nan_nmi_mac, ETH_ALEN,
+ if_addr.octet, ETH_ALEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nmi addr\n"));
+ goto fail;
+ }
+ return ret;
+fail:
+ if (!rand_mac) {
+ wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
+ }
+
+ return ret;
+}
+
+static int
+wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
+{
+ /* nan enable */
+ s32 ret = BCME_OK;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ if (p_buf != NULL) {
+ bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
+ sizeof(val), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, sizeof(uint8),
+ (uint8*)&val, sizeof(uint8));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy init value\n"));
+ goto fail;
+ }
+
+ *nan_buf_size -= subcmd_len;
+ } else {
+ WL_ERR(("nan_iov_buf is NULL\n"));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
+{
+ /* nan enable */
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(val), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, sizeof(uint8),
+ (uint8*)&val, sizeof(uint8));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy enab value\n"));
+ return ret;
+ }
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data)
+{
+ /* wl nan warm_up_time */
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
+ uint16 subcmd_len;
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*wup_ticks), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ sizeof(*wup_ticks);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ *wup_ticks = cmd_data->warmup_time;
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_election_metric_config_t *metrics = NULL;
+ uint16 subcmd_len;
+ NAN_DBG_ENTER();
+
+ sub_cmd =
+ (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*metrics), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
+
+ if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
+ metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
+ }
+
+ if ((!cmd_data->metrics.master_pref) ||
+ (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
+ WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
+ /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
+ metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
+ } else {
+ metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
+ }
+ sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ sizeof(*metrics);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*rssi_notif_thld), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+ if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
+ rssi_notif_thld->bcn_rssi_2g =
+ cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
+ } else {
+ /* Keeping RSSI threshold value to be -70dBm */
+ rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
+ }
+
+ if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
+ rssi_notif_thld->bcn_rssi_5g =
+ cmd_data->rssi_attr.rssi_proximity_5g_val;
+ } else {
+ /* Keeping RSSI threshold value to be -70dBm */
+ rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
+ sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
+ sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_rssi_thld_t *rssi_thld = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*rssi_thld), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ /*
+ * Keeping RSSI mid value -75dBm for both 2G and 5G
+ * Keeping RSSI close value -60dBm for both 2G and 5G
+ */
+ if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
+ rssi_thld->rssi_mid_2g =
+ cmd_data->rssi_attr.rssi_middle_2dot4g_val;
+ } else {
+ rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
+ }
+
+ if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
+ rssi_thld->rssi_mid_5g =
+ cmd_data->rssi_attr.rssi_middle_5g_val;
+ } else {
+ rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
+ }
+
+ if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
+ rssi_thld->rssi_close_2g =
+ cmd_data->rssi_attr.rssi_close_2dot4g_val;
+ } else {
+ rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
+ }
+
+ if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
+ rssi_thld->rssi_close_5g =
+ cmd_data->rssi_attr.rssi_close_5g_val;
+ } else {
+ rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
+ sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
+ sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
+{
+ s32 ret = BCME_OK;
+ uint bitmap;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ uint32 chanspec_arg;
+ NAN_DBG_ENTER();
+
+ chanspec_arg = CH20MHZ_CHSPEC(chan);
+ chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
+ bzero(ioctl_buf, WLC_IOCTL_SMLEN);
+ ret = wldev_iovar_getbuf(ndev, "per_chan_info",
+ (void *)&chanspec_arg, sizeof(chanspec_arg),
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
+ goto exit;
+ }
+
+ bitmap = dtoh32(*(uint *)ioctl_buf);
+ if (!(bitmap & WL_CHAN_VALID_HW)) {
+ WL_ERR(("Invalid channel\n"));
+ ret = BCME_BADCHAN;
+ goto exit;
+ }
+
+ if (!(bitmap & WL_CHAN_VALID_SW)) {
+ WL_ERR(("Not supported in current locale\n"));
+ ret = BCME_BADCHAN;
+ goto exit;
+ }
+exit:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_social_channels_t *soc_chans = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ soc_chans =
+ (wl_nan_social_channels_t *)sub_cmd->data;
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*soc_chans), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ sizeof(*soc_chans);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
+ soc_chans->soc_chan_2g = cmd_data->chanspec[1];
+ } else {
+ soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
+ }
+
+ if (cmd_data->support_5g) {
+ if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
+ soc_chans->soc_chan_5g = cmd_data->chanspec[2];
+ } else {
+ soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
+ }
+ ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
+ if (ret != BCME_OK) {
+ ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
+ if (ret == BCME_OK) {
+ soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
+ } else {
+ soc_chans->soc_chan_5g = 0;
+ ret = BCME_OK;
+ WL_ERR(("Current locale doesn't support 5G op"
+ "continuing with 2G only operation\n"));
+ }
+ }
+ } else {
+ WL_DBG(("5G support is disabled\n"));
+ }
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_iov_start, nan_iov_end;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ wl_nan_scan_params_t *scan_params = NULL;
+ uint32 status;
+
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*scan_params), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+ scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
+
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ if (!band_index) {
+ /* Fw default: Dwell time for 2G is 210 */
+ if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
+ cmd_data->dwell_time[0]) {
+ scan_params->dwell_time = cmd_data->dwell_time[0] +
+ NAN_SCAN_DWELL_TIME_DELTA_MS;
+ }
+ /* Fw default: Scan period for 2G is 10 */
+ if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
+ scan_params->scan_period = cmd_data->scan_period[0];
+ }
+ } else {
+ if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
+ cmd_data->dwell_time[1]) {
+ scan_params->dwell_time = cmd_data->dwell_time[1] +
+ NAN_SCAN_DWELL_TIME_DELTA_MS;
+ }
+ if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
+ scan_params->scan_period = cmd_data->scan_period[1];
+ }
+ }
+ scan_params->band_index = band_index;
+ nan_buf->is_set = true;
+ nan_buf->count++;
+
+ /* Reduce the iov_len size by subcmd_len */
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_end = nan_iov_data->nan_iov_len;
+ nan_buf_size = (nan_iov_start - nan_iov_end);
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
+ goto fail;
+ }
+ WL_DBG(("set nan scan params successfull\n"));
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static uint16
+wl_cfgnan_gen_rand_cluster_id(uint16 low_val, uint16 high_val)
+{
+ uint16 random_id;
+ ulong random_seed;
+
+ /* In negative case also, assigning to cluster_high value */
+ if (low_val >= high_val)
+ {
+ random_id = high_val;
+ } else {
+ RANDOM_BYTES(&random_seed, sizeof(random_seed));
+ random_id = (uint16)((random_seed % ((high_val + 1) -
+ low_val)) + low_val);
+ }
+ return random_id;
+}
+
+static int
+wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ (sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ cmd_data->clus_id.octet[0] = 0x50;
+ cmd_data->clus_id.octet[1] = 0x6F;
+ cmd_data->clus_id.octet[2] = 0x9A;
+ cmd_data->clus_id.octet[3] = 0x01;
+ hton16_ua_store(wl_cfgnan_gen_rand_cluster_id(cmd_data->cluster_low,
+ cmd_data->cluster_high), &cmd_data->clus_id.octet[4]);
+
+ WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
+
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
+ (uint8 *)&cmd_data->clus_id,
+ sizeof(cmd_data->clus_id));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy clus id\n"));
+ return ret;
+ }
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_hop_count_t *hop_limit = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*hop_limit), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ *hop_limit = cmd_data->hop_count_limit;
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_sid_beacon_control_t *sid_beacon = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*sid_beacon), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
+ sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
+ /* Need to have separate flag for sub beacons
+ * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
+ */
+ if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
+ /* Limit for number of publish SIDs to be included in Beacons */
+ sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
+ }
+ if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
+ /* Limit for number of subscribe SIDs to be included in Beacons */
+ sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
+ }
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ sizeof(*sid_beacon);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 subcmd_len;
+
+ NAN_DBG_ENTER();
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(cmd_data->nan_oui), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
+ (uint32 *)&cmd_data->nan_oui,
+ sizeof(cmd_data->nan_oui));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan oui\n"));
+ return ret;
+ }
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
+ wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_awake_dws_t *awake_dws = NULL;
+ uint16 subcmd_len;
+ NAN_DBG_ENTER();
+
+ sub_cmd =
+ (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ sizeof(*awake_dws), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ return ret;
+ }
+
+ awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
+
+ if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
+ awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
+ if (!awake_dws->dw_interval_2g) {
+ /* Set 2G awake dw value to fw default value 1 */
+ awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
+ }
+ } else {
+ /* Set 2G awake dw value to fw default value 1 */
+ awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
+ }
+
+ if (cfg->nancfg->support_5g) {
+ if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
+ awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
+ /* config sync/discovery beacons on 5G band */
+ ret = wl_cfgnan_config_control_flag(ndev, cfg,
+ WL_NAN_CTRL_DISC_BEACON_TX_5G |
+ WL_NAN_CTRL_SYNC_BEACON_TX_5G,
+ 0, WL_NAN_CMD_CFG_NAN_CONFIG,
+ &(cmd_data->status),
+ awake_dws->dw_interval_5g);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan control set config handler, ret = %d"
+ " status = %d \n", ret, cmd_data->status));
+ goto fail;
+ }
+ } else {
+ /* Set 5G awake dw value to fw default value 1 */
+ awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
+ }
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ sizeof(*awake_dws);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_set_enable_merge(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint8 enable, uint32 *status)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_iov_start, nan_iov_end;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ wl_nan_merge_enable_t merge_enable;
+ uint8 size_of_iov;
+
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ merge_enable = (wl_nan_merge_enable_t)enable;
+ size_of_iov = sizeof(wl_nan_merge_enable_t);
+
+ nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ size_of_iov, &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_MERGE);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ /* Reduce the iov_len size by subcmd_len */
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_end = nan_iov_data->nan_iov_len;
+ nan_buf_size = (nan_iov_start - nan_iov_end);
+
+ (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
+ &merge_enable, size_of_iov);
+
+ nan_buf->is_set = true;
+ nan_buf->count++;
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(*status)) {
+ WL_ERR(("Merge enable %d failed ret %d status %d \n", merge_enable, ret, *status));
+ goto fail;
+ }
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_set_disc_beacon_interval_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ wl_nan_disc_bcn_interval_t disc_beacon_interval)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 subcmd_len;
+ uint8 size_of_iov;
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ size_of_iov = sizeof(wl_nan_disc_bcn_interval_t);
+ nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ size_of_iov, &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ /* Choose default value discovery beacon interval if value is zero */
+ if (!disc_beacon_interval) {
+ disc_beacon_interval = cfg->nancfg->support_5g ? NAN_DISC_BCN_INTERVAL_5G_DEF:
+ NAN_DISC_BCN_INTERVAL_2G_DEF;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_BCN_INTERVAL);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
+ &disc_beacon_interval, size_of_iov);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy disc_beacon_interval\n"));
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_data->nan_iov_buf += subcmd_len;
+
+ nan_buf->count++;
+ nan_buf->is_set = true;
+ nan_buf_size -= nan_iov_data->nan_iov_len;
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Failed to set disc beacon interval, ret = %d status = %d\n",
+ ret, status));
+ goto fail;
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+void
+wl_cfgnan_immediate_nan_disable_pending(struct bcm_cfg80211 *cfg)
+{
+ if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
+ WL_DBG(("Do immediate nan_disable work\n"));
+ DHD_NAN_WAKE_UNLOCK(cfg->pub);
+ if (cancel_delayed_work(&cfg->nancfg->nan_disable)) {
+ schedule_delayed_work(&cfg->nancfg->nan_disable, 0);
+ }
+ }
+}
+
+int
+wl_cfgnan_check_nan_disable_pending(struct bcm_cfg80211 *cfg,
+ bool force_disable, bool is_sync_reqd)
+{
+ int ret = BCME_OK;
+ struct net_device *ndev = NULL;
+
+ if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
+ WL_DBG(("Cancel nan_disable work\n"));
+ /*
+ * Nan gets disabled from dhd_stop(dev_close) and other frameworks contexts.
+ * Can't use cancel_work_sync from dhd_stop context for
+ * wl_cfgnan_delayed_disable since both contexts uses
+ * rtnl_lock resulting in deadlock. If dhd_stop gets invoked,
+ * rely on dhd_stop context to do the nan clean up work and
+ * just do return from delayed WQ based on state check.
+ */
+
+ DHD_NAN_WAKE_UNLOCK(cfg->pub);
+
+ if (is_sync_reqd == true) {
+ cancel_delayed_work_sync(&cfg->nancfg->nan_disable);
+ } else {
+ cancel_delayed_work(&cfg->nancfg->nan_disable);
+ }
+ force_disable = true;
+ }
+ if ((force_disable == true) && (cfg->nancfg->nan_enable == true)) {
+ ret = wl_cfgnan_disable(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ }
+ /* Intentional fall through to cleanup framework */
+ if (cfg->nancfg->notify_user == true) {
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wl_cfgvendor_nan_send_async_disable_resp(ndev->ieee80211_ptr);
+ }
+ }
+ return ret;
+}
+
+int
+wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
+{
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ int i;
+ s32 timeout = 0;
+ nan_hal_capabilities_t capabilities;
+ uint32 cfg_ctrl1_flags = 0;
+ uint32 cfg_ctrl2_flags1 = 0;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ NAN_DBG_ENTER();
+
+ if (!dhdp->up) {
+ WL_ERR(("bus is already down, hence blocking nan start\n"));
+ return BCME_ERROR;
+ }
+
+ /* Protect discovery creation. Ensure proper mutex precedence.
+ * If if_sync & nan_mutex comes together in same context, nan_mutex
+ * should follow if_sync.
+ */
+ mutex_lock(&cfg->if_sync);
+ NAN_MUTEX_LOCK();
+
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
+ WL_ERR(("Conflicting iface is present, cant support nan\n"));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ goto fail;
+ }
+#endif /* WL_IFACE_MGMT */
+
+ /* disable TDLS on NAN init */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
+
+ WL_INFORM_MEM(("Initializing NAN\n"));
+ ret = wl_cfgnan_init(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to initialize NAN[%d]\n", ret));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ goto fail;
+ }
+
+ ret = wl_cfgnan_get_ver(ndev, cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to Nan IOV version[%d]\n", ret));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ goto fail;
+ }
+
+ /* set nmi addr */
+ ret = wl_cfgnan_set_if_addr(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to set nmi address \n"));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ goto fail;
+ }
+ nancfg->nan_event_recvd = false;
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
+ /* config sync/discovery beacons on 2G band */
+ /* 2g is mandatory */
+ if (!cmd_data->beacon_2g_val) {
+ WL_ERR(("Invalid NAN config...2G is mandatory\n"));
+ ret = BCME_BADARG;
+ }
+ cfg_ctrl1_flags |= (WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G);
+ }
+ if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
+ /* config sync/discovery beacons on 5G band */
+ cfg_ctrl1_flags |= (WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G);
+ }
+
+ if (cmd_data->warmup_time) {
+ ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
+ if (unlikely(ret)) {
+ WL_ERR(("warm up time handler sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ }
+ /* setting master preference and random factor */
+ ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("election_metric sub_cmd set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+
+ /* setting nan social channels */
+ ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("nan social channels set failed\n"));
+ goto fail;
+ } else {
+ /* Storing 5g capability which is reqd for avail chan config. */
+ nancfg->support_5g = cmd_data->support_5g;
+ nan_buf->count++;
+ }
+
+ if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
+ (cmd_data->scan_period[0]))) {
+ /* setting scan params */
+ ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("scan params set failed for 2g\n"));
+ goto fail;
+ }
+ }
+
+ if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
+ (cmd_data->scan_period[1]))) {
+ /* setting scan params */
+ ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
+ cmd_data->support_5g, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("scan params set failed for 5g\n"));
+ goto fail;
+ }
+ }
+
+ /*
+ * A cluster_low value matching cluster_high indicates a request
+ * to join a cluster with that value.
+ * If the requested cluster is not found the
+ * device will start its own cluster
+ */
+ /* For Debug purpose, using clust id compulsion */
+ if (cmd_data->cluster_low == cmd_data->cluster_high) {
+ /* device will merge to configured CID only */
+ cfg_ctrl1_flags |= (WL_NAN_CTRL_MERGE_CONF_CID_ONLY);
+ }
+ /* setting cluster ID */
+ ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
+ if (unlikely(ret)) {
+ WL_ERR(("cluster_id sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+
+ /* setting rssi proximaty values for 2.4GHz and 5GHz */
+ ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+
+ /* setting rssi middle/close values for 2.4GHz and 5GHz */
+ ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+
+ /* setting hop count limit or threshold */
+ if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
+ ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
+ if (unlikely(ret)) {
+ WL_ERR(("hop_count_limit sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ }
+
+ /* setting sid beacon val */
+ if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
+ (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
+ ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("sid_beacon sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ }
+
+ /* setting nan oui */
+ if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
+ ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_oui sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ }
+
+ /* setting nan awake dws */
+ ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
+ nan_iov_data, cfg, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("nan awake dws set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+
+ /* enable events */
+ ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
+ goto fail;
+ }
+
+ /* setting nan enable sub_cmd */
+ ret = wl_cfgnan_enable_handler(nan_iov_data, true);
+ if (unlikely(ret)) {
+ WL_ERR(("enable handler sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ nan_buf->is_set = true;
+
+ nan_buf_size -= nan_iov_data->nan_iov_len;
+ memset(resp_buf, 0, sizeof(resp_buf));
+ /* Reset conditon variable */
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
+ &(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+
+ timeout = wait_event_timeout(nancfg->nan_event_wait,
+ nancfg->nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
+ if (!timeout) {
+ WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+ /* Default flags: set NAN proprietary rates and auto datapath confirm
+ * If auto datapath confirms is set, then DPCONF will be sent by FW
+ */
+ cfg_ctrl1_flags |= (WL_NAN_CTRL_AUTO_DPCONF | WL_NAN_CTRL_PROP_RATE);
+
+ /* set CFG CTRL flags */
+ ret = wl_cfgnan_config_control_flag(ndev, cfg, cfg_ctrl1_flags,
+ 0, WL_NAN_CMD_CFG_NAN_CONFIG,
+ &(cmd_data->status), true);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan ctrl1 config flags setting failed, ret = %d status = %d \n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+
+ /* malloc for ndp peer list */
+ if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
+ == BCME_OK) {
+ nancfg->max_ndp_count = capabilities.max_ndp_sessions;
+ nancfg->max_ndi_supported = capabilities.max_ndi_interfaces;
+ nancfg->nan_ndp_peer_info = MALLOCZ(cfg->osh,
+ nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
+ if (!nancfg->nan_ndp_peer_info) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ if (!nancfg->ndi) {
+ nancfg->ndi = MALLOCZ(cfg->osh,
+ nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
+ if (!nancfg->ndi) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ }
+ } else {
+ WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
+ goto fail;
+ }
+
+ BCM_REFERENCE(i);
+#ifdef NAN_IFACE_CREATE_ON_UP
+ for (i = 0; i < nancfg->max_ndi_supported; i++) {
+ /* Create NDI using the information provided by user space */
+ if (nancfg->ndi[i].in_use && !nancfg->ndi[i].created) {
+ ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
+ nancfg->ndi[i].ifname,
+ NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
+ if (ret) {
+ WL_ERR(("failed to create ndp interface [%d]\n", ret));
+ goto fail;
+ }
+ nancfg->ndi[i].created = true;
+ }
+ }
+#endif /* NAN_IFACE_CREATE_ON_UP */
+
+ /* Check if NDPE is capable and use_ndpe_attr is set by framework */
+ /* TODO: For now enabling NDPE by default as framework is not setting use_ndpe_attr
+ * When (cmd_data->use_ndpe_attr) is set by framework, Add additional check for
+ * (cmd_data->use_ndpe_attr) as below
+ * if (capabilities.ndpe_attr_supported && cmd_data->use_ndpe_attr)
+ */
+ if (capabilities.ndpe_attr_supported)
+ {
+ cfg_ctrl2_flags1 |= WL_NAN_CTRL2_FLAG1_NDPE_CAP;
+ nancfg->ndpe_enabled = true;
+ } else {
+ /* reset NDPE capability in FW */
+ ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL2_FLAG1_NDPE_CAP,
+ 0, WL_NAN_CMD_CFG_NAN_CONFIG2,
+ &(cmd_data->status), false);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan ctrl2 config flags resetting failed, ret = %d status = %d \n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+ nancfg->ndpe_enabled = false;
+ }
+
+ /* set CFG CTRL2 flags1 and flags2 */
+ ret = wl_cfgnan_config_control_flag(ndev, cfg, cfg_ctrl2_flags1,
+ 0, WL_NAN_CMD_CFG_NAN_CONFIG2,
+ &(cmd_data->status), true);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan ctrl2 config flags setting failed, ret = %d status = %d \n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+
+#ifdef RTT_SUPPORT
+ /* Initialize geofence cfg */
+ dhd_rtt_initialize_geofence_cfg(cfg->pub);
+#endif /* RTT_SUPPORT */
+
+ if (cmd_data->dw_early_termination > 0) {
+ WL_ERR(("dw early termination is not supported, ignoring for now\n"));
+ }
+
+ if (nan_attr_mask & NAN_ATTR_DISC_BEACON_INTERVAL) {
+ ret = wl_cfgnan_set_disc_beacon_interval_handler(ndev, cfg,
+ cmd_data->disc_bcn_interval);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set beacon interval\n"));
+ goto fail;
+ }
+ }
+
+ nancfg->nan_enable = true;
+ WL_INFORM_MEM(("[NAN] Enable successfull \n"));
+
+fail:
+ /* Enable back TDLS if connected interface is <= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
+
+ /* reset conditon variable */
+ nancfg->nan_event_recvd = false;
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ nancfg->nan_enable = false;
+ mutex_lock(&cfg->if_sync);
+ ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to delete NDI[%d]\n", ret));
+ }
+ mutex_unlock(&cfg->if_sync);
+ if (nancfg->nan_ndp_peer_info) {
+ MFREE(cfg->osh, nancfg->nan_ndp_peer_info,
+ nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
+ nancfg->nan_ndp_peer_info = NULL;
+ }
+ if (nancfg->ndi) {
+ MFREE(cfg->osh, nancfg->ndi,
+ nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
+ nancfg->ndi = NULL;
+ }
+ }
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ NAN_DBG_ENTER();
+ if ((cfg->nancfg->nan_init_state == TRUE) &&
+ (cfg->nancfg->nan_enable == TRUE)) {
+ struct net_device *ndev;
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* We have to remove NDIs so that P2P/Softap can work */
+ ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to delete NDI[%d]\n", ret));
+ }
+
+ ret = wl_cfgnan_stop_handler(ndev, cfg);
+ if (ret == -ENODEV) {
+ WL_ERR(("Bus is down, no need to proceed\n"));
+ } else if (ret != BCME_OK) {
+ WL_ERR(("failed to stop nan, error[%d]\n", ret));
+ }
+ ret = wl_cfgnan_deinit(cfg, dhdp->up);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
+ if (!dhd_query_bus_erros(dhdp)) {
+ ASSERT(0);
+ }
+ }
+ wl_cfgnan_disable_cleanup(cfg);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static void
+wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ nan_event_data_t *nan_event_data = NULL;
+
+ NAN_DBG_ENTER();
+
+ nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
+ if (!nan_event_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ bzero(nan_event_data, sizeof(nan_event_data_t));
+
+ nan_event_data->status = NAN_STATUS_SUCCESS;
+ ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
+ "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
+ goto exit;
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
+ GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to send event to nan hal, (%d)\n",
+ GOOGLE_NAN_EVENT_DISABLED));
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+exit:
+ if (nan_event_data) {
+ MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
+ }
+ NAN_DBG_EXIT();
+ return;
+}
+
+static void
+wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+#ifdef RTT_SUPPORT
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
+ rtt_target_info_t *target_info = NULL;
+
+ /* Delete the geofence rtt target list */
+ dhd_rtt_delete_geofence_target_list(dhdp);
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
+ }
+ /* Remove if any pending proxd timeout for nan-rtt */
+ target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ if (target_info && target_info->peer == RTT_PEER_NAN) {
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work_sync(&rtt_status->proxd_timeout);
+ }
+ }
+ /* Delete if any directed nan rtt session */
+ dhd_rtt_delete_nan_session(dhdp);
+#endif /* RTT_SUPPORT */
+ /* Clear the NDP ID array and dp count */
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ nancfg->ndp_id[i] = 0;
+ }
+ nancfg->nan_dp_count = 0;
+ if (nancfg->nan_ndp_peer_info) {
+ MFREE(cfg->osh, nancfg->nan_ndp_peer_info,
+ nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
+ nancfg->nan_ndp_peer_info = NULL;
+ }
+ if (nancfg->ndi) {
+ MFREE(cfg->osh, nancfg->ndi,
+ nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
+ nancfg->ndi = NULL;
+ }
+ wl_cfg80211_concurrent_roam(cfg, false);
+ return;
+}
+
+/*
+ * Deferred nan disable work,
+ * scheduled with NAN_DISABLE_CMD_DELAY
+ * delay in order to remove any active nan dps
+ */
+void
+wl_cfgnan_delayed_disable(struct work_struct *work)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
+ wl_nancfg_t *nancfg = NULL;
+
+ BCM_SET_CONTAINER_OF(nancfg, work, wl_nancfg_t, nan_disable.work);
+
+ cfg = nancfg->cfg;
+
+ rtnl_lock();
+ if (nancfg->nan_enable == true) {
+ wl_cfgnan_disable(cfg);
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wl_cfgvendor_nan_send_async_disable_resp(ndev->ieee80211_ptr);
+ } else {
+ WL_INFORM_MEM(("nan is in disabled state\n"));
+ }
+ rtnl_unlock();
+
+ DHD_NAN_WAKE_UNLOCK(cfg->pub);
+
+ return;
+}
+
+int
+wl_cfgnan_stop_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ if (!nancfg->nan_enable) {
+ WL_INFORM(("Nan is not enabled\n"));
+ ret = BCME_OK;
+ goto fail;
+ }
+
+ if (dhdp->up != DHD_BUS_DOWN) {
+ /*
+ * Framework doing cleanup(iface remove) on disable command,
+ * so avoiding event to prevent iface delete calls again
+ */
+ WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
+ wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ ret = wl_cfgnan_enable_handler(nan_iov_data, false);
+ if (unlikely(ret)) {
+ WL_ERR(("nan disable handler failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ nan_buf->is_set = true;
+ nan_buf_size -= nan_iov_data->nan_iov_len;
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
+ goto fail;
+ }
+ /* Enable back TDLS if connected interface is <= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
+ }
+
+ if (!nancfg->notify_user) {
+ wl_cfgnan_send_stop_event(cfg);
+ }
+fail:
+ /* Resetting instance ID mask */
+ nancfg->inst_id_start = 0;
+ memset(nancfg->svc_inst_id_mask, 0, sizeof(nancfg->svc_inst_id_mask));
+ memset(nancfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
+ nancfg->nan_enable = false;
+ WL_INFORM_MEM(("[NAN] Disable done\n"));
+
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+
+ /* Nan need to be enabled before configuring/updating params */
+ if (!cfg->nancfg->nan_enable) {
+ WL_INFORM(("nan is not enabled\n"));
+ ret = BCME_NOTENABLED;
+ goto fail;
+ }
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ /* setting sid beacon val */
+ if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
+ (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
+ ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("sid_beacon sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ }
+
+ /* setting master preference and random factor */
+ if (cmd_data->metrics.random_factor ||
+ cmd_data->metrics.master_pref) {
+ ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
+ nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("election_metric sub_cmd set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+ }
+
+ /* setting hop count limit or threshold */
+ if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
+ ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
+ if (unlikely(ret)) {
+ WL_ERR(("hop_count_limit sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ }
+
+ /* setting rssi proximaty values for 2.4GHz and 5GHz */
+ ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
+ nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+
+ /* setting nan awake dws */
+ ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
+ cfg, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("nan awake dws set failed\n"));
+ goto fail;
+ } else {
+ nan_buf->count++;
+ }
+
+ /* TODO: Add below code once use_ndpe_attr is being updated by framework
+ * If NDPE is enabled (cfg.nancfg.ndpe_enabled) and use_ndpe_attr is reset
+ * by framework, then disable NDPE using nan ctrl2 configuration setting.
+ * Else if NDPE is disabled and use_ndpe_attr is set by framework enable NDPE in FW
+ */
+
+ if (cmd_data->disc_ind_cfg) {
+ /* Disable events */
+ WL_TRACE(("Disable events based on flag\n"));
+ ret = wl_cfgnan_config_eventmask(ndev, cfg,
+ cmd_data->disc_ind_cfg, false);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
+ ret));
+ goto fail;
+ }
+ }
+
+ if ((cfg->nancfg->support_5g) && ((cmd_data->dwell_time[1]) ||
+ (cmd_data->scan_period[1]))) {
+ /* setting scan params */
+ ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
+ cmd_data, cfg->nancfg->support_5g, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("scan params set failed for 5g\n"));
+ goto fail;
+ }
+ }
+ if ((cmd_data->dwell_time[0]) ||
+ (cmd_data->scan_period[0])) {
+ ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
+ if (unlikely(ret)) {
+ WL_ERR(("scan params set failed for 2g\n"));
+ goto fail;
+ }
+ }
+ nan_buf->is_set = true;
+ nan_buf_size -= nan_iov_data->nan_iov_len;
+
+ if (nan_buf->count) {
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
+ &(cmd_data->status),
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan config handler failed ret = %d status = %d\n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+ } else {
+ WL_DBG(("No commands to send\n"));
+ }
+
+ if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
+ (!cmd_data->chanspec[0])) {
+ WL_TRACE(("mandatory arguments are not present to set avail\n"));
+ ret = BCME_OK;
+ } else {
+ cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
+ cmd_data->avail_params.bmap = cmd_data->bmap;
+ /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type local\n"));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type ndc\n"));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->nmi_rand_intvl > 0) {
+#ifdef WL_NAN_ENABLE_MERGE
+ /* Cluster merge enable/disable are being set using nmi random interval config param
+ * If MSB(31st bit) is set that indicates cluster merge enable/disable config is set
+ * MSB 30th bit indicates cluser merge enable/disable value to set in firmware
+ */
+ if (cmd_data->nmi_rand_intvl & NAN_NMI_RAND_PVT_CMD_VENDOR) {
+ uint8 merge_enable;
+ uint8 lwt_mode_enable;
+ int status = BCME_OK;
+
+ merge_enable = !!(cmd_data->nmi_rand_intvl &
+ NAN_NMI_RAND_CLUSTER_MERGE_ENAB);
+ ret = wl_cfgnan_set_enable_merge(bcmcfg_to_prmry_ndev(cfg), cfg,
+ merge_enable, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Enable merge: failed to set config request [%d]\n", ret));
+ /* As there is no cmd_reply, check if error is in status or ret */
+ if (status) {
+ ret = status;
+ }
+ goto fail;
+ }
+
+ lwt_mode_enable = !!(cmd_data->nmi_rand_intvl &
+ NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB);
+
+ /* set CFG CTRL2 flags1 and flags2 */
+ ret = wl_cfgnan_config_control_flag(ndev, cfg,
+ WL_NAN_CTRL2_FLAG1_AUTODAM_LWT_MODE,
+ 0, WL_NAN_CMD_CFG_NAN_CONFIG2,
+ &status, lwt_mode_enable);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Enable dam lwt mode: "
+ "failed to set config request [%d]\n", ret));
+ /* As there is no cmd_reply, check if error is in status or ret */
+ if (status) {
+ ret = status;
+ }
+ goto fail;
+ }
+
+ /* reset pvt merge enable bits */
+ cmd_data->nmi_rand_intvl &= ~(NAN_NMI_RAND_PVT_CMD_VENDOR |
+ NAN_NMI_RAND_CLUSTER_MERGE_ENAB |
+ NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB);
+ }
+#endif /* WL_NAN_ENABLE_MERGE */
+
+ if (cmd_data->nmi_rand_intvl) {
+ /* run time nmi rand not supported as of now.
+ * Only during nan enable/iface-create rand mac is used
+ */
+ WL_ERR(("run time nmi rand not supported, ignoring for now\n"));
+ }
+ }
+
+ if (cmd_data->dw_early_termination > 0) {
+ WL_ERR(("dw early termination is not supported, ignoring for now\n"));
+ }
+
+ if (nan_attr_mask & NAN_ATTR_DISC_BEACON_INTERVAL) {
+ ret = wl_cfgnan_set_disc_beacon_interval_handler(ndev, cfg,
+ cmd_data->disc_bcn_interval);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set beacon interval\n"));
+ goto fail;
+ }
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_support_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
+{
+ /* TODO: */
+ return BCME_OK;
+}
+
+int
+wl_cfgnan_status_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
+{
+ /* TODO: */
+ return BCME_OK;
+}
+
+#ifdef WL_NAN_DISC_CACHE
+static
+nan_svc_info_t *
+wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
+ wl_nan_instance_id svc_inst_id, uint8 ndp_id)
+{
+ uint8 i, j;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+ if (ndp_id) {
+ for (i = 0; i < NAN_MAX_SVC_INST; i++) {
+ for (j = 0; j < NAN_MAX_SVC_INST; j++) {
+ if (nancfg->svc_info[i].ndp_id[j] == ndp_id) {
+ return &nancfg->svc_info[i];
+ }
+ }
+ }
+ } else if (svc_inst_id) {
+ for (i = 0; i < NAN_MAX_SVC_INST; i++) {
+ if (nancfg->svc_info[i].svc_id == svc_inst_id) {
+ return &nancfg->svc_info[i];
+ }
+ }
+
+ }
+ return NULL;
+}
+
+static int
+wl_cfgnan_svc_inst_add_ndp(struct bcm_cfg80211 *cfg,
+ wl_nan_instance_id svc_inst_id, uint8 ndp_id)
+{
+ int ret = BCME_OK, i;
+ nan_svc_info_t *svc_info;
+
+ svc_info = wl_cfgnan_get_svc_inst(cfg, svc_inst_id, 0);
+ if (svc_info) {
+ for (i = 0; i < NAN_MAX_SVC_INST; i++) {
+ if (!svc_info->ndp_id[i]) {
+ WL_TRACE(("Found empty field\n"));
+ break;
+ }
+ }
+ if (i == NAN_MAX_SVC_INST) {
+ WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+ svc_info->ndp_id[i] = ndp_id;
+ }
+
+done:
+ return ret;
+}
+
+static int
+wl_cfgnan_svc_inst_del_ndp(struct bcm_cfg80211 *cfg,
+ wl_nan_instance_id svc_inst_id, uint8 ndp_id)
+{
+ int ret = BCME_OK, i;
+ nan_svc_info_t *svc_info;
+
+ svc_info = wl_cfgnan_get_svc_inst(cfg, svc_inst_id, 0);
+
+ if (svc_info) {
+ for (i = 0; i < NAN_MAX_SVC_INST; i++) {
+ if (svc_info->ndp_id[i] == ndp_id) {
+ svc_info->ndp_id[i] = 0;
+ break;
+ }
+ }
+ if (i == NAN_MAX_SVC_INST) {
+ WL_ERR(("couldn't find entry for ndp id = %d\n", ndp_id));
+ ret = BCME_NOTFOUND;
+ }
+ }
+ return ret;
+}
+
+nan_ranging_inst_t *
+wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
+{
+ uint8 i;
+ if (peer) {
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ if (!memcmp(peer, &cfg->nancfg->nan_ranging_info[i].peer_addr,
+ ETHER_ADDR_LEN)) {
+ return &(cfg->nancfg->nan_ranging_info[i]);
+ }
+ }
+ }
+ return NULL;
+}
+
+nan_ranging_inst_t *
+wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg, uint8 rng_id)
+{
+ uint8 i;
+ if (rng_id) {
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ if (cfg->nancfg->nan_ranging_info[i].range_id == rng_id)
+ {
+ return &(cfg->nancfg->nan_ranging_info[i]);
+ }
+ }
+ }
+ WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
+ return NULL;
+}
+
+/*
+ * Find ranging inst for given peer,
+ * On not found, create one
+ * with given range role
+ */
+nan_ranging_inst_t *
+wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
+ nan_range_role_t range_role)
+{
+ nan_ranging_inst_t *ranging_inst = NULL;
+ uint8 i;
+
+ if (!peer) {
+ WL_ERR(("Peer address is NULL"));
+ goto done;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+ if (ranging_inst) {
+ goto done;
+ }
+ WL_TRACE(("Creating Ranging instance \n"));
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ if (cfg->nancfg->nan_ranging_info[i].in_use == FALSE) {
+ break;
+ }
+ }
+
+ if (i == NAN_MAX_RANGING_INST) {
+ WL_ERR(("No buffer available for the ranging instance"));
+ goto done;
+ }
+ ranging_inst = &cfg->nancfg->nan_ranging_info[i];
+ memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ ranging_inst->prev_distance_mm = INVALID_DISTANCE;
+ ranging_inst->range_role = range_role;
+ ranging_inst->in_use = TRUE;
+
+done:
+ return ranging_inst;
+}
+#endif /* WL_NAN_DISC_CACHE */
+
+static int
+process_resp_buf(void *iov_resp,
+ uint8 *instance_id, uint16 sub_cmd_id)
+{
+ int res = BCME_OK;
+ NAN_DBG_ENTER();
+
+ if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
+ wl_nan_dp_req_ret_t *dpreq_ret = NULL;
+ dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
+ *instance_id = dpreq_ret->ndp_id;
+ WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
+ } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
+ wl_nan_range_id *range_id = NULL;
+ range_id = (wl_nan_range_id *)(iov_resp);
+ *instance_id = *range_id;
+ WL_TRACE(("Range id: %d\n", *range_id));
+ }
+ WL_DBG(("instance_id: %d\n", *instance_id));
+ NAN_DBG_EXIT();
+ return res;
+}
+
+int
+wl_cfgnan_cancel_ranging(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint8 *range_id, uint8 flags, uint32 *status)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_iov_start, nan_iov_end;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ wl_nan_iov_t *nan_iov_data = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ wl_nan_range_cancel_ext_t rng_cncl;
+ uint8 size_of_iov;
+
+ NAN_DBG_ENTER();
+
+ if (*range_id == 0) {
+ WL_ERR(("Invalid Range ID\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ if (cfg->nancfg->version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
+ size_of_iov = sizeof(rng_cncl);
+ } else {
+ size_of_iov = sizeof(*range_id);
+ }
+
+ bzero(&rng_cncl, sizeof(rng_cncl));
+ rng_cncl.range_id = *range_id;
+ rng_cncl.flags = flags;
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
+ if (!nan_iov_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
+ nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
+ size_of_iov, &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ /* Reduce the iov_len size by subcmd_len */
+ nan_iov_data->nan_iov_len -= subcmd_len;
+ nan_iov_end = nan_iov_data->nan_iov_len;
+ nan_buf_size = (nan_iov_start - nan_iov_end);
+
+ if (size_of_iov >= sizeof(rng_cncl)) {
+ (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
+ &rng_cncl, size_of_iov);
+ } else {
+ (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
+ range_id, size_of_iov);
+ }
+
+ nan_buf->is_set = true;
+ nan_buf->count++;
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(*status)) {
+ WL_ERR(("Range ID %d cancel failed ret %d status %d \n", *range_id, ret, *status));
+ goto fail;
+ }
+ WL_MEM(("Range cancel with Range ID [%d] successfull\n", *range_id));
+
+ /* Resetting range id */
+ *range_id = 0;
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (nan_iov_data) {
+ MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+#ifdef WL_NAN_DISC_CACHE
+static void
+wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
+ wl_nan_instance_id svc_id)
+{
+ nan_svc_info_t *svc;
+ svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
+ if (svc) {
+ WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
+ memset(svc, 0, sizeof(*svc));
+ }
+}
+
+static int
+wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
+ nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
+{
+ int ret = BCME_OK;
+ int i;
+ nan_svc_info_t *svc_info;
+ uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
+ cmd_data->pub_id;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ for (i = 0; i < NAN_MAX_SVC_INST; i++) {
+ if (update) {
+ if (nancfg->svc_info[i].svc_id == svc_id) {
+ svc_info = &nancfg->svc_info[i];
+ break;
+ } else {
+ continue;
+ }
+ }
+ if (!nancfg->svc_info[i].svc_id) {
+ svc_info = &nancfg->svc_info[i];
+ break;
+ }
+ }
+ if (i == NAN_MAX_SVC_INST) {
+ WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
+ ret = BCME_NORESOURCE;
+ goto fail;
+ }
+ if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
+ WL_TRACE(("%s: updating ranging info, enabling\n", __FUNCTION__));
+ svc_info->status = 1;
+ svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
+ svc_info->ranging_ind = cmd_data->ranging_indication;
+ svc_info->ingress_limit = cmd_data->ingress_limit;
+ svc_info->egress_limit = cmd_data->egress_limit;
+ svc_info->ranging_required = 1;
+ } else {
+ WL_TRACE(("%s: updating ranging info, disabling\n", __FUNCTION__));
+ svc_info->status = 0;
+ svc_info->ranging_interval = 0;
+ svc_info->ranging_ind = 0;
+ svc_info->ingress_limit = 0;
+ svc_info->egress_limit = 0;
+ svc_info->ranging_required = 0;
+ }
+
+ /* Reset Range status flags on svc creation/update */
+ svc_info->svc_range_status = 0;
+ svc_info->flags = cmd_data->flags;
+
+ if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
+ svc_info->svc_id = cmd_data->sub_id;
+ if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
+ (cmd_data->tx_match.dlen)) {
+ ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
+ cmd_data->tx_match.data, cmd_data->tx_match.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter data\n"));
+ goto fail;
+ }
+ svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
+ }
+ } else {
+ svc_info->svc_id = cmd_data->pub_id;
+ }
+ ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
+ cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash\n"));
+ }
+fail:
+ return ret;
+
+}
+
+#ifdef RTT_SUPPORT
+/*
+ * Reset for Initiator
+ * Remove for Responder if no pending
+ * geofence target or else reset
+ */
+static void
+wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst)
+{
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ int8 index;
+ rtt_geofence_target_info_t* geofence_target;
+
+ ASSERT(ranging_inst);
+ if (!ranging_inst) {
+ return;
+ }
+
+ if ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
+ (ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED)) {
+ /* Remove ranging instance for responder */
+ geofence_target = dhd_rtt_get_geofence_target(dhd,
+ &ranging_inst->peer_addr, &index);
+ if (!geofence_target) {
+ /* Remove rng inst if no pend target */
+ WL_INFORM_MEM(("Removing Ranging Instance "
+ "peer: " MACDBG "\n",
+ MAC2STRDBG(&ranging_inst->peer_addr)));
+ bzero(ranging_inst, sizeof(*ranging_inst));
+ } else {
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ /* resolve range role concurrency */
+ WL_INFORM_MEM(("Resolving Role Concurrency constraint, peer : "
+ MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
+ ranging_inst->role_concurrency_status = FALSE;
+ }
+ } else {
+ /* For geofence Initiator */
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ }
+}
+
+/*
+ * Forcecully Remove Ranging instance
+ * Remove if any corresponding Geofence Target
+ */
+static void
+wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst)
+{
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ int8 index;
+ rtt_geofence_target_info_t* geofence_target;
+
+ ASSERT(ranging_inst);
+ if (!ranging_inst) {
+ return;
+ }
+
+ geofence_target = dhd_rtt_get_geofence_target(dhd,
+ &ranging_inst->peer_addr, &index);
+ if (geofence_target) {
+ dhd_rtt_remove_geofence_target(dhd,
+ &geofence_target->peer_addr);
+ }
+ WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(ranging_inst->peer_addr))));
+ bzero(ranging_inst, sizeof(nan_ranging_inst_t));
+
+ return;
+}
+
+static bool
+wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst, nan_svc_info_t *svc)
+{
+ int i = 0;
+ bool cleared = FALSE;
+
+ if (svc && ranging_inst->in_use) {
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ if (svc == ranging_inst->svc_idx[i]) {
+ ranging_inst->num_svc_ctx--;
+ ranging_inst->svc_idx[i] = NULL;
+ cleared = TRUE;
+ /*
+ * This list is maintained dupes free,
+ * hence can break
+ */
+ break;
+ }
+ }
+ }
+ return cleared;
+}
+
+static int
+wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
+{
+ nan_ranging_inst_t *ranging_inst;
+ int i = 0;
+ int ret = BCME_OK;
+
+ nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
+ if (!svc) {
+ WL_ERR(("\n svc not found \n"));
+ ret = BCME_NOTFOUND;
+ goto done;
+ }
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &(cfg->nancfg->nan_ranging_info[i]);
+ wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
+ }
+
+done:
+ return ret;
+}
+
+static int
+wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer, uint8 svc_id)
+{
+ nan_ranging_inst_t *ranging_inst = NULL;
+ nan_svc_info_t *svc = NULL;
+ bool cleared = FALSE;
+ int ret = BCME_OK;
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+ if (!ranging_inst || !ranging_inst->in_use) {
+ goto done;
+ }
+
+ WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
+ " range_id = %d, peer addr = " MACDBG " \n", svc_id,
+ ranging_inst->range_id, MAC2STRDBG(peer)));
+ svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
+ if (!svc) {
+ WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
+ ret = BCME_NOTFOUND;
+ goto done;
+ }
+
+ cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
+ if (!cleared) {
+ /* Only if this svc was cleared, any update needed */
+ ret = BCME_NOTFOUND;
+ goto done;
+ }
+
+ wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
+ wl_cfgnan_reset_geofence_ranging(cfg, NULL,
+ RTT_SCHED_RNG_TERM_PUB_RNG_CLEAR, TRUE);
+
+done:
+ return ret;
+}
+
+/* API to terminate/clear all directed nan-rtt sessions.
+* Can be called from framework RTT stop context
+*/
+int
+wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg)
+{
+ nan_ranging_inst_t *ranging_inst;
+ int i, ret = BCME_OK;
+ uint32 status;
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nancfg->nan_ranging_info[i];
+ if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg, &ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan range cancel failed ret = %d status = %d\n",
+ ret, status));
+ }
+ }
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
+ RTT_SHCED_HOST_DIRECTED_TERM, FALSE);
+ }
+ }
+ return ret;
+}
+
+/*
+ * suspend ongoing geofence ranging session
+ * with a peer if on-going ranging is with given peer
+ * If peer NULL,
+ * Suspend all on-going ranging sessions blindly
+ * Do nothing on:
+ * If ranging is not in progress
+ * If ranging in progress but not with given peer
+ */
+int
+wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
+ struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
+{
+ int ret = BCME_OK;
+ uint32 status;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int suspend_req_dropped_at = 0;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+
+ UNUSED_PARAMETER(suspend_req_dropped_at);
+
+ ASSERT(peer);
+ if (!peer) {
+ WL_DBG(("Incoming Peer is NULL, suspend req dropped\n"));
+ suspend_req_dropped_at = 1;
+ goto exit;
+ }
+
+ if (!wl_ranging_geofence_session_with_peer(cfg, peer)) {
+ WL_DBG(("Geofence Ranging not in progress with given peer,"
+ " suspend req dropped\n"));
+ suspend_req_dropped_at = 2;
+ goto exit;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+ if (ranging_inst) {
+ cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg,
+ &ranging_inst->range_id, cancel_flags, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
+ "suspend_reason = %d, peer: " MACDBG " \n",
+ ret, status, suspend_reason, MAC2STRDBG(peer)));
+ }
+
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
+ &ranging_inst->peer_addr);
+
+ if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
+ ranging_inst->role_concurrency_status) {
+ /* resolve range role concurrency */
+ WL_INFORM_MEM(("Resolving Role Concurrency constraint, peer : "
+ MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
+ ranging_inst->role_concurrency_status = FALSE;
+ }
+
+ WL_INFORM_MEM(("Geofence Range suspended, "
+ " suspend_reason = %d, peer: " MACDBG " \n",
+ suspend_reason, MAC2STRDBG(peer)));
+ }
+
+exit:
+ /* Post pending discovery results */
+ if (ranging_inst &&
+ ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
+ (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
+ wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
+ }
+
+ if (suspend_req_dropped_at) {
+ if (ranging_inst) {
+ WL_INFORM_MEM(("Ranging Suspend Req with peer: " MACDBG
+ ", dropped at = %d\n", MAC2STRDBG(&ranging_inst->peer_addr),
+ suspend_req_dropped_at));
+ } else {
+ WL_INFORM_MEM(("Ranging Suspend Req dropped at = %d\n",
+ suspend_req_dropped_at));
+ }
+ }
+ return ret;
+}
+
+/*
+ * suspends all geofence ranging sessions
+ * including initiators and responders
+ */
+void
+wl_cfgnan_suspend_all_geofence_rng_sessions(struct net_device *ndev,
+ int suspend_reason, u8 cancel_flags)
+{
+
+ uint8 i = 0;
+ int ret = BCME_OK;
+ uint32 status;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+
+ WL_INFORM_MEM(("Suspending all geofence sessions: "
+ "suspend_reason = %d\n", suspend_reason));
+
+ cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nancfg->nan_ranging_info[i];
+ /* Cancel Ranging if in progress for rang_inst */
+ if (ranging_inst->in_use &&
+ NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("wl_cfgnan_suspend_all_geofence_rng_sessions: "
+ "nan range cancel failed ret = %d status = %d\n",
+ ret, status));
+ } else {
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
+ &ranging_inst->peer_addr);
+ wl_cfgnan_reset_remove_ranging_instance(cfg, ranging_inst);
+ }
+ }
+ }
+
+ return;
+
+}
+
+/*
+ * Terminate given ranging instance
+ * if no pending ranging sub service
+ */
+static void
+wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst)
+{
+ int ret = BCME_OK;
+ uint32 status;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+
+ if (ranging_inst->num_svc_ctx != 0) {
+ /*
+ * Make sure to remove all svc_insts for range_inst
+ * in order to cancel ranging and remove target in caller
+ */
+ return;
+ }
+
+ /* Cancel Ranging if in progress for rang_inst */
+ if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
+ __FUNCTION__, ret, status));
+ } else {
+ WL_DBG(("Range cancelled \n"));
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
+ &ranging_inst->peer_addr);
+ }
+ }
+
+ /* Remove ranging instance and clean any corresponding target */
+ wl_cfgnan_remove_ranging_instance(cfg, ranging_inst);
+}
+
+/*
+ * Terminate all ranging sessions
+ * with no pending ranging sub service
+ */
+static void
+wl_cfgnan_terminate_all_obsolete_ranging_sessions(
+ struct bcm_cfg80211 *cfg)
+{
+ /* cancel all related ranging instances */
+ uint8 i = 0;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nancfg->nan_ranging_info[i];
+ if (ranging_inst->in_use) {
+ wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
+ }
+ }
+
+ return;
+}
+
+/*
+ * Store svc_ctx for processing during RNG_RPT
+ * Return BCME_OK only when svc is added
+ */
+static int
+wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
+ nan_svc_info_t *svc)
+{
+ int ret = BCME_OK;
+ int i = 0;
+
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ if (ranging_inst->svc_idx[i] == svc) {
+ WL_DBG(("SVC Ctx for ranging already present, "
+ " Duplication not supported: sub_id: %d\n", svc->svc_id));
+ ret = BCME_UNSUPPORTED;
+ goto done;
+ }
+ }
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ if (ranging_inst->svc_idx[i]) {
+ continue;
+ } else {
+ WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
+ ranging_inst->svc_idx[i] = svc;
+ ranging_inst->num_svc_ctx++;
+ ret = BCME_OK;
+ goto done;
+ }
+ }
+ if (i == MAX_SUBSCRIBES) {
+ WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
+ "No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+done:
+ return ret;
+}
+
+bool
+wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ bool ret = FALSE;
+ nan_ranging_inst_t *rng_inst = NULL;
+
+ rng_inst = wl_cfgnan_check_for_ranging(cfg,
+ peer_addr);
+ if (rng_inst &&
+ (NAN_RANGING_IS_IN_PROG(rng_inst->range_status))) {
+ ret = TRUE;
+ }
+
+ return ret;
+}
+
+int
+wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
+ struct ether_addr *peer_addr)
+{
+ int ret = BCME_OK;
+ int err_at = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ nan_ranging_inst_t *ranging_inst;
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+
+ if (!ranging_inst) {
+ WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
+ MAC2STRDBG(peer_addr)));
+ ASSERT(0);
+ /* Ranging inst should have been added before adding target */
+ dhd_rtt_remove_geofence_target(dhd, peer_addr);
+ ret = BCME_ERROR;
+ err_at = 1;
+ goto exit;
+ }
+
+ if (!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
+ WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
+ ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, ranging_inst, ranging_inst->svc_idx[0],
+ NAN_RANGE_REQ_CMD, TRUE);
+ if (ret != BCME_OK) {
+ /* Unsupported is for already ranging session for peer */
+ if (ret == BCME_BUSY) {
+ /* TODO: Attempt again over a timer */
+ err_at = 2;
+ } else {
+ /* Remove target and clean ranging inst */
+ wl_cfgnan_remove_ranging_instance(cfg, ranging_inst);
+ err_at = 3;
+ goto exit;
+ }
+ } else {
+ ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
+ ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ }
+ } else if (ranging_inst->range_role != NAN_RANGING_ROLE_RESPONDER) {
+ /* already in progress but not as responder.. This should not happen */
+ ASSERT(!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status));
+ ret = BCME_ERROR;
+ err_at = 4;
+ goto exit;
+ } else {
+ /* Already in progress as responder, bail out */
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
+ "trigger ranging, peer: " MACDBG " ret"
+ " = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
+ ret, err_at));
+ }
+ return ret;
+}
+
+static int
+wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
+ nan_event_data_t* nan_event_data, bool *send_disc_result)
+{
+ nan_svc_info_t *svc;
+ int ret = BCME_OK;
+ rtt_geofence_target_info_t geofence_target;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ uint8 index, rtt_invalid_reason = RTT_STATE_VALID;
+ bool add_target;
+
+ *send_disc_result = TRUE;
+ svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
+
+ if (svc && svc->ranging_required) {
+ nan_ranging_inst_t *ranging_inst;
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &nan_event_data->remote_nmi,
+ NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ ret = BCME_NORESOURCE;
+ goto exit;
+ }
+ ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
+
+ /* For responder role, range state should be in progress only */
+ ASSERT((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) ||
+ NAN_RANGING_IS_IN_PROG(ranging_inst->range_status));
+
+ /*
+ * On rec disc result with ranging required, add target, if
+ * ranging role is responder (range state has to be in prog always)
+ * Or ranging role is initiator and ranging is not already in prog
+ */
+ add_target = ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
+ ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
+ (!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status))));
+ if (add_target) {
+ WL_DBG(("Add Range request to geofence target list\n"));
+ memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
+ ETHER_ADDR_LEN);
+ /* check if target is already added */
+ if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
+ {
+ ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
+ bzero(ranging_inst, sizeof(*ranging_inst));
+ goto exit;
+ } else {
+ WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
+ MAC2STRDBG(&geofence_target.peer_addr),
+ svc->svc_id));
+ }
+ }
+ if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
+ != BCME_OK) {
+ goto exit;
+ }
+ if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ /* Adding RTT target while responder, leads to role concurrency */
+ WL_INFORM_MEM(("Entering Role Concurrency constraint, peer : "
+ MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
+ ranging_inst->role_concurrency_status = TRUE;
+ } else {
+ /* Trigger/Reset geofence RTT */
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
+ RTT_SCHED_SUB_MATCH, TRUE);
+ }
+ } else {
+ /* Target already added, check & add svc_inst ref to rang_inst */
+ wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
+ }
+ /* Disc event will be given on receving range_rpt event */
+ WL_TRACE(("Disc event will given when Range RPT event is recvd"));
+ } else {
+ ret = BCME_UNSUPPORTED;
+ }
+
+exit:
+ if (ret == BCME_OK) {
+ /* Check if we have to send disc result immediately or not */
+ rtt_invalid_reason = dhd_rtt_invalid_states
+ (bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
+ /*
+ * If instant RTT not possible (RTT postpone),
+ * send discovery result instantly like
+ * incase of invalid rtt state as
+ * ndp connected/connecting,
+ * or role_concurrency active with peer.
+ * Otherwise, result should be posted
+ * on ranging report event after RTT done
+ */
+ if ((rtt_invalid_reason == RTT_STATE_VALID) &&
+ (!wl_cfgnan_check_role_concurrency(cfg,
+ &nan_event_data->remote_nmi))) {
+ /* Avoid sending disc result instantly */
+ *send_disc_result = FALSE;
+ }
+ }
+
+ return ret;
+}
+
+bool
+wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+ uint8 rng_progress_count = 0;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nancfg->nan_ranging_info[i];
+ if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
+ rng_progress_count++;
+ }
+ }
+
+ if (rng_progress_count >= NAN_MAX_RANGING_SSN_ALLOWED) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+uint8
+wl_cfgnan_cancel_rng_responders(struct net_device *ndev)
+{
+ int i = 0;
+ uint8 num_resp_cancelled = 0;
+ int status, ret;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nancfg->nan_ranging_info[i];
+ if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status) &&
+ (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER)) {
+ num_resp_cancelled++;
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
+ &ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
+ " existing ranging, ret = (%d)\n", ret));
+ }
+ WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(ranging_inst->peer_addr))));
+ bzero(ranging_inst, sizeof(*ranging_inst));
+ }
+ }
+ return num_resp_cancelled;
+}
+
+/* ranging reqeust event handler */
+static int
+wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
+ wl_nan_ev_rng_req_ind_t *rng_ind)
+{
+ int ret = BCME_OK;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ uint8 cancel_flags = 0;
+ bool accept = TRUE;
+ nan_ranging_inst_t tmp_rng_inst;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ struct ether_addr * peer_addr = &(rng_ind->peer_m_addr);
+ uint8 rtt_invalid_state;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ int err_at = 0;
+
+ WL_DBG(("Trigger range response\n"));
+
+ /* Check if ranging is allowed */
+ rtt_invalid_state = dhd_rtt_invalid_states(ndev, peer_addr);
+ if (rtt_invalid_state != RTT_STATE_VALID) {
+ WL_INFORM_MEM(("Cannot allow ranging due to reason %d \n", rtt_invalid_state));
+ ret = BCME_NORESOURCE;
+ err_at = 1;
+ goto done;
+ }
+
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if (rtt_status && !RTT_IS_STOPPED(rtt_status)) {
+ WL_INFORM_MEM(("Direcetd RTT in progress..reject RNG_REQ\n"));
+ ret = BCME_NORESOURCE;
+ err_at = 2;
+ goto done;
+ }
+
+ /* Check if ranging set up in progress */
+ if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
+ WL_INFORM_MEM(("Ranging set up already in progress, "
+ "RNG IND event dropped\n"));
+ err_at = 3;
+ ret = BCME_NOTREADY;
+ goto done;
+ }
+
+ /* check if we are already having any ranging session with peer.
+ * If so below are the policies
+ * If we are already a Geofence Initiator or responder w.r.t the peer
+ * then silently teardown the current session and accept the REQ.
+ * If we are in direct rtt initiator role then reject.
+ */
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (ranging_inst) {
+ if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
+ if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
+ ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ WL_INFORM_MEM(("Already responder/geofence for the Peer, cancel "
+ "current ssn and accept new one,"
+ " range_type = %d, role = %d\n",
+ ranging_inst->range_type, ranging_inst->range_role));
+ cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
+ NAN_RNG_TERM_FLAG_SILENT_TEARDOWN;
+ wl_cfgnan_suspend_geofence_rng_session(ndev,
+ &(rng_ind->peer_m_addr),
+ RTT_GEO_SUSPN_PEER_RTT_TRIGGER, cancel_flags);
+ } else {
+ WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
+ err_at = 4;
+ ret = BCME_BUSY;
+ goto done;
+ }
+ } else {
+ /* Check if new Ranging session is allowed */
+ if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
+ WL_ERR(("Cannot allow more ranging sessions\n"));
+ err_at = 5;
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+ }
+ /* reset ranging instance for responder role */
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
+ ranging_inst->range_type = 0;
+ } else {
+ /* Check if new Ranging session is allowed */
+ if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
+ WL_ERR(("Cannot allow more ranging sessions\n"));
+ err_at = 6;
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
+ NAN_RANGING_ROLE_RESPONDER);
+ ASSERT(ranging_inst);
+ if (!ranging_inst) {
+ WL_ERR(("Failed to create ranging instance \n"));
+ err_at = 7;
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+ }
+
+done:
+ if (ret != BCME_OK) {
+ /* reject the REQ using temp ranging instance */
+ bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
+ ranging_inst = &tmp_rng_inst;
+ (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
+ &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
+ accept = FALSE;
+ }
+
+ ranging_inst->range_id = rng_ind->rng_id;
+
+ WL_INFORM_MEM(("Trigger Ranging at Responder, ret = %d, err_at = %d, "
+ "accept = %d, rng_id = %d\n", ret, err_at,
+ accept, rng_ind->rng_id));
+ ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
+ NULL, NAN_RANGE_REQ_EVNT, accept);
+ if (unlikely(ret) || !accept) {
+ WL_ERR(("Failed to trigger ranging while handling range request, "
+ " ret = %d, rng_id = %d, accept %d\n", ret,
+ rng_ind->rng_id, accept));
+ wl_cfgnan_reset_remove_ranging_instance(cfg, ranging_inst);
+ } else {
+ dhd_rtt_set_geofence_setup_status(dhd, TRUE,
+ &ranging_inst->peer_addr);
+ }
+ mutex_unlock(&rtt_status->rtt_mutex);
+ return ret;
+}
+
+/* ranging quest and response iovar handler */
+int
+wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ void *ranging_ctxt, nan_svc_info_t *svc,
+ uint8 range_cmd, bool accept_req)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_range_req_t *range_req = NULL;
+ wl_nan_range_resp_t *range_resp = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
+ nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
+ nan_avail_cmd_data cmd_data;
+
+ NAN_DBG_ENTER();
+
+ bzero(&cmd_data, sizeof(cmd_data));
+ ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
+ &ranging_inst->peer_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ranging peer addr\n"));
+ goto fail;
+ }
+
+ cmd_data.avail_period = NAN_RANGING_PERIOD;
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data, WL_AVAIL_LOCAL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data, WL_AVAIL_RANGING);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
+ goto fail;
+ }
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ if (range_cmd == NAN_RANGE_REQ_CMD) {
+ sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
+ range_req = (wl_nan_range_req_t *)(sub_cmd->data);
+ /* ranging config */
+ range_req->peer = ranging_inst->peer_addr;
+ if (svc) {
+ range_req->interval = svc->ranging_interval;
+ /* Limits are in cm from host */
+ range_req->ingress = svc->ingress_limit;
+ range_req->egress = svc->egress_limit;
+ }
+ range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
+ } else {
+ /* range response config */
+ sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
+ range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
+ range_resp->range_id = ranging_inst->range_id;
+ range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
+ if (accept_req) {
+ range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
+ } else {
+ range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
+ }
+ nan_buf->is_set = true;
+ }
+
+ nan_buf_size -= (sub_cmd->len +
+ OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
+ nan_buf->count++;
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
+ &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan ranging failed ret = %d status = %d\n",
+ ret, status));
+ ret = (ret == BCME_OK) ? status : ret;
+ goto fail;
+ }
+ WL_TRACE(("nan ranging trigger successful\n"));
+ if (range_cmd == NAN_RANGE_REQ_CMD) {
+ WL_INFORM_MEM(("Ranging Req Triggered"
+ " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
+ MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
+ range_req->ingress, range_req->egress));
+ } else {
+ WL_INFORM_MEM(("Ranging Resp Triggered"
+ " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
+ MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
+ range_resp->ingress, range_resp->egress));
+ }
+
+ /* check the response buff for request */
+ if (range_cmd == NAN_RANGE_REQ_CMD) {
+ ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
+ &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
+ WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
+ }
+
+ /* Move Ranging instance to set up in progress state */
+ ranging_inst->range_status = NAN_RANGING_SETUP_IN_PROGRESS;
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+bool
+wl_cfgnan_ranging_is_in_prog_for_peer(struct bcm_cfg80211 *cfg, struct ether_addr *peer_addr)
+{
+ nan_ranging_inst_t *rng_inst = NULL;
+
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+
+ return (rng_inst && NAN_RANGING_IS_IN_PROG(rng_inst->range_status));
+}
+
+#endif /* RTT_SUPPORT */
+#endif /* WL_NAN_DISC_CACHE */
+
+static void *wl_nan_bloom_alloc(void *ctx, uint size)
+{
+ uint8 *buf;
+ BCM_REFERENCE(ctx);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ buf = NULL;
+ }
+ return buf;
+}
+
+static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
+{
+ BCM_REFERENCE(ctx);
+ BCM_REFERENCE(size);
+ if (buf) {
+ kfree(buf);
+ }
+}
+
+static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
+{
+ uint8* filter_idx = (uint8*)ctx;
+ uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
+ uint b = 0;
+
+ /* Steps 1 and 2 as explained in Section 6.2 */
+ /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
+ b = hndcrc32((uint8*)input, input_len, b);
+ GCC_DIAGNOSTIC_POP();
+ /* Obtain the last 2 bytes of the CRC32 output */
+ b &= NAN_BLOOM_CRC32_MASK;
+
+ /* Step 3 is completed by bcmbloom functions */
+ return b;
+}
+
+static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
+{
+ uint i;
+ int err;
+
+ err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
+ idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
+ if (err != BCME_OK) {
+ goto exit;
+ }
+
+ /* Populate bloom filter with hash functions */
+ for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
+ err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
+ if (err) {
+ WL_ERR(("bcm_bloom_add_hash failed\n"));
+ goto exit;
+ }
+ }
+exit:
+ return err;
+}
+
+static int
+wl_cfgnan_sd_params_handler(struct net_device *ndev,
+ nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
+ void *p_buf, uint16 *nan_buf_size)
+{
+ s32 ret = BCME_OK;
+ uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
+ uint16 buflen_avail;
+ bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
+ wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
+ uint16 srf_size = 0;
+ uint bloom_size, a;
+ bcm_bloom_filter_t *bp = NULL;
+ /* Bloom filter index default, indicates it has not been set */
+ uint bloom_idx = 0xFFFFFFFF;
+ uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
+ /* srf_ctrl_size = bloom_len + src_control field */
+ uint16 srf_ctrl_size = bloom_len + 1;
+
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ BCM_REFERENCE(cfg);
+
+ NAN_DBG_ENTER();
+
+ if (cmd_data->period) {
+ sd_params->awake_dw = cmd_data->period;
+ }
+ sd_params->period = 1;
+
+ if (cmd_data->ttl) {
+ sd_params->ttl = cmd_data->ttl;
+ } else {
+ sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
+ }
+
+ sd_params->flags = 0;
+ sd_params->flags = cmd_data->flags;
+
+ /* Nan Service Based event suppression Flags */
+ if (cmd_data->recv_ind_flag) {
+ /* BIT0 - If set, host wont rec event "terminated" */
+ if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
+ sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
+ }
+
+ /* BIT1 - If set, host wont receive match expiry evt */
+ /* TODO: Exp not yet supported */
+ if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
+ WL_DBG(("Need to add match expiry event\n"));
+ }
+ /* BIT2 - If set, host wont rec event "receive" */
+ if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
+ sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
+ }
+ /* BIT3 - If set, host wont rec event "replied" */
+ if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
+ sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
+ }
+ }
+ if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
+ sd_params->instance_id = cmd_data->pub_id;
+ if (cmd_data->service_responder_policy) {
+ /* Do not disturb avail if dam is supported */
+ if (FW_SUPPORTED(dhdp, autodam)) {
+ /* Nan Accept policy: Per service basis policy
+ * Based on this policy(ALL/NONE), responder side
+ * will send ACCEPT/REJECT
+ * If set, auto datapath responder will be sent by FW
+ */
+ sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
+ } else {
+ WL_ERR(("svc specifiv auto dp resp is not"
+ " supported in non-auto dam fw\n"));
+ }
+ }
+ } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
+ sd_params->instance_id = cmd_data->sub_id;
+ } else {
+ ret = BCME_USAGE_ERROR;
+ WL_ERR(("wrong command id = %d \n", cmd_id));
+ goto fail;
+ }
+
+ if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
+ (cmd_data->svc_hash.data)) {
+ ret = memcpy_s((uint8*)sd_params->svc_hash,
+ sizeof(sd_params->svc_hash),
+ cmd_data->svc_hash.data,
+ cmd_data->svc_hash.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash\n"));
+ goto fail;
+ }
+#ifdef WL_NAN_DEBUG
+ prhex("hashed svc name", cmd_data->svc_hash.data,
+ cmd_data->svc_hash.dlen);
+#endif /* WL_NAN_DEBUG */
+ } else {
+ ret = BCME_ERROR;
+ WL_ERR(("invalid svc hash data or length = %d\n",
+ cmd_data->svc_hash.dlen));
+ goto fail;
+ }
+
+ /* check if ranging support is present in firmware */
+ if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
+ !FW_SUPPORTED(dhdp, nanrange)) {
+ WL_ERR(("Service requires ranging but fw doesnt support it\n"));
+ ret = BCME_UNSUPPORTED;
+ goto fail;
+ }
+
+ /* Optional parameters: fill the sub_command block with service descriptor attr */
+ sub_cmd->id = htod16(cmd_id);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ OFFSETOF(wl_nan_sd_params_t, optional[0]);
+ pxtlv = (uint8*)&sd_params->optional[0];
+
+ *nan_buf_size -= sub_cmd->len;
+ buflen_avail = *nan_buf_size;
+
+ if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
+ WL_TRACE(("optional svc_info present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_SD_SVC_INFO,
+ cmd_data->svc_info.dlen,
+ cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
+ WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
+ cmd_data->sde_svc_info.dlen));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_SD_SDE_SVC_INFO,
+ cmd_data->sde_svc_info.dlen,
+ cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->tx_match.dlen) {
+ WL_TRACE(("optional tx match filter presnet (len=%d)\n",
+ cmd_data->tx_match.dlen));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
+ cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->life_count) {
+ WL_TRACE(("optional life count is present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
+ sizeof(cmd_data->life_count), &cmd_data->life_count,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->use_srf) {
+ uint8 srf_control = 0;
+ /* set include bit */
+ if (cmd_data->srf_include == true) {
+ srf_control |= 0x2;
+ }
+
+ if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
+ (cmd_data->mac_list.num_mac_addr
+ < NAN_SRF_MAX_MAC)) {
+ if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
+ /* mac list */
+ srf_size = (cmd_data->mac_list.num_mac_addr
+ * ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
+ WL_TRACE(("srf size = %d\n", srf_size));
+
+ srf_mac = MALLOCZ(cfg->osh, srf_size);
+ if (srf_mac == NULL) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
+ &srf_control, NAN_SRF_CTRL_FIELD_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy srf control\n"));
+ goto fail;
+ }
+ ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
+ cmd_data->mac_list.list,
+ (srf_size - NAN_SRF_CTRL_FIELD_LEN));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy srf control mac list\n"));
+ goto fail;
+ }
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
+ /* Create bloom filter */
+ srf = MALLOCZ(cfg->osh, srf_ctrl_size);
+ if (srf == NULL) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ /* Bloom filter */
+ srf_control |= 0x1;
+ /* Instance id must be from 1 to 255, 0 is Reserved */
+ if (sd_params->instance_id == NAN_ID_RESERVED) {
+ WL_ERR(("Invalid instance id: %d\n",
+ sd_params->instance_id));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ if (bloom_idx == 0xFFFFFFFF) {
+ bloom_idx = sd_params->instance_id % 4;
+ } else {
+ WL_ERR(("Invalid bloom_idx\n"));
+ ret = BCME_BADARG;
+ goto fail;
+
+ }
+ srf_control |= bloom_idx << 2;
+
+ ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ srftmp = cmd_data->mac_list.list;
+ for (a = 0;
+ a < cmd_data->mac_list.num_mac_addr; a++) {
+ ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: Cannot add to bloom filter\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ srftmp += ETHER_ADDR_LEN;
+ }
+
+ ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
+ &srf_control, NAN_SRF_CTRL_FIELD_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy srf control\n"));
+ goto fail;
+ }
+ ret = bcm_bloom_get_filter_data(bp, bloom_len,
+ (srf + NAN_SRF_CTRL_FIELD_LEN),
+ &bloom_size);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
+ goto fail;
+ }
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
+ srf, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Invalid SRF Type = %d !!!\n",
+ cmd_data->srf_type));
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
+ cmd_data->mac_list.num_mac_addr));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->rx_match.dlen) {
+ WL_TRACE(("optional rx match filter is present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
+ cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
+ goto fail;
+ }
+ }
+
+ /* Security elements */
+ if (cmd_data->csid) {
+ WL_TRACE(("Cipher suite type is present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
+ (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->ndp_cfg.security_cfg) {
+ if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
+ (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
+ if (cmd_data->key.data && cmd_data->key.dlen) {
+ WL_TRACE(("optional pmk present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
+ cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ }
+ } else {
+ WL_ERR(("Invalid security key type\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ }
+
+ if (cmd_data->scid.data && cmd_data->scid.dlen) {
+ WL_TRACE(("optional scid present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
+ cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->sde_control_config) {
+ ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
+ WL_NAN_XTLV_SD_SDE_CONTROL,
+ sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ sub_cmd->len += (buflen_avail - *nan_buf_size);
+
+fail:
+ if (srf) {
+ MFREE(cfg->osh, srf, srf_ctrl_size);
+ }
+
+ if (srf_mac) {
+ MFREE(cfg->osh, srf_mac, srf_size);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
+{
+ s32 ret = BCME_OK;
+ if (cmd_data->svc_info.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->sde_svc_info.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->tx_match.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->rx_match.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->use_srf) {
+ if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
+ *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
+ + NAN_SRF_CTRL_FIELD_LEN;
+ } else { /* Bloom filter type */
+ *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
+ }
+ *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
+ }
+ if (cmd_data->csid)
+ *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->key.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->scid.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->sde_control_config)
+ *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->life_count)
+ *data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
+ return ret;
+}
+
+static int
+wl_cfgnan_aligned_data_size_of_opt_dp_params(struct bcm_cfg80211 *cfg, uint16 *data_size,
+ nan_datapath_cmd_data_t *cmd_data)
+{
+ s32 ret = BCME_OK;
+ if (cmd_data->svc_info.dlen) {
+ *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ /* When NDPE is enabled, adding this extra data_size to provide backward
+ * compatability for non-ndpe devices. Duplicating NDP specific info and sending it
+ * to FW in SD SVCINFO and NDPE TLV list as host doesn't know peer's NDPE capability
+ */
+ if (cfg->nancfg->ndpe_enabled) {
+ *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ }
+ }
+ if (cmd_data->key.dlen)
+ *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
+ if (cmd_data->csid)
+ *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
+
+ *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
+ return ret;
+}
+int
+wl_cfgnan_svc_get_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
+{
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint32 instance_id;
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+
+ uint8 *resp_buf = NULL;
+ uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
+
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, data_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
+ if (!resp_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 1;
+ /* check if service is present */
+ nan_buf->is_set = false;
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
+ if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
+ instance_id = cmd_data->pub_id;
+ } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
+ instance_id = cmd_data->sub_id;
+ } else {
+ ret = BCME_USAGE_ERROR;
+ WL_ERR(("wrong command id = %u\n", cmd_id));
+ goto fail;
+ }
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(cmd_id);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
+ &instance_id, sizeof(instance_id));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
+ &(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
+
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
+ goto fail;
+ } else {
+ WL_DBG(("nan svc check successful..proceed to update\n"));
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, data_size);
+ }
+
+ if (resp_buf) {
+ MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+
+}
+
+int
+wl_cfgnan_svc_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ uint16 nan_buf_size;
+ uint8 *resp_buf = NULL;
+ /* Considering fixed params */
+ uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
+ OFFSETOF(wl_nan_sd_params_t, optional[0]);
+
+ if (cmd_data->svc_update) {
+ ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
+ goto fail;
+ } else {
+ /* Ignoring any other svc get error */
+ if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
+ WL_ERR(("Bad instance status, failed to update svc handler\n"));
+ goto fail;
+ }
+ }
+ }
+
+ ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to get alligned size of optional params\n"));
+ goto fail;
+ }
+ nan_buf_size = data_size;
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, data_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
+ if (!resp_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf->is_set = true;
+
+ ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
+ &nan_buf->cmds[0], &nan_buf_size);
+ if (unlikely(ret)) {
+ WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
+ goto fail;
+ }
+
+ nan_buf->count++;
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
+ &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
+ /* return OK if update tlv data is not present
+ * which means nothing to update
+ */
+ cmd_data->status = BCME_OK;
+ }
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
+ goto fail;
+ } else {
+ WL_DBG(("nan svc successful\n"));
+#ifdef WL_NAN_DISC_CACHE
+ ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
+ if (ret < 0) {
+ WL_ERR(("%s: fail to cache svc info, ret=%d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, data_size);
+ }
+
+ if (resp_buf) {
+ MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_publish_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
+{
+ int ret = BCME_OK;
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+ /*
+ * proceed only if mandatory arguments are present - subscriber id,
+ * service hash
+ */
+ if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
+ (!cmd_data->svc_hash.dlen)) {
+ WL_ERR(("mandatory arguments are not present\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
+ if (ret < 0) {
+ WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+ WL_INFORM_MEM(("[NAN] Service published for instance id:%d is_update %d\n",
+ cmd_data->pub_id, cmd_data->svc_update));
+
+fail:
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_subscribe_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
+{
+ int ret = BCME_OK;
+#ifdef WL_NAN_DISC_CACHE
+ nan_svc_info_t *svc_info;
+#ifdef RTT_SUPPORT
+ uint8 upd_ranging_required;
+#endif /* RTT_SUPPORT */
+#endif /* WL_NAN_DISC_CACHE */
+
+#ifdef RTT_SUPPORT
+#ifdef RTT_GEOFENCE_CONT
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+#endif /* RTT_GEOFENCE_CONT */
+#endif /* RTT_SUPPORT */
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ /*
+ * proceed only if mandatory arguments are present - subscriber id,
+ * service hash
+ */
+ if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
+ (!cmd_data->svc_hash.dlen)) {
+ WL_ERR(("mandatory arguments are not present\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ /* Check for ranging sessions if any */
+ if (cmd_data->svc_update) {
+#ifdef WL_NAN_DISC_CACHE
+ svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
+ if (svc_info) {
+#ifdef RTT_SUPPORT
+ wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
+ /* terminate ranging sessions for this svc, avoid clearing svc cache */
+ wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
+ /* Attempt RTT for current geofence target */
+ wl_cfgnan_reset_geofence_ranging(cfg, NULL,
+ RTT_SCHED_RNG_TERM_SUB_SVC_UPD, TRUE);
+ WL_DBG(("Ranging sessions handled for svc update\n"));
+ upd_ranging_required = !!(cmd_data->sde_control_flag &
+ NAN_SDE_CF_RANGING_REQUIRED);
+ if ((svc_info->ranging_required ^ upd_ranging_required) ||
+ (svc_info->ingress_limit != cmd_data->ingress_limit) ||
+ (svc_info->egress_limit != cmd_data->egress_limit)) {
+ /* Clear cache info in Firmware */
+ ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
+ if (ret != BCME_OK) {
+ WL_ERR(("couldn't send clear cache to FW \n"));
+ goto fail;
+ }
+ /* Invalidate local cache info */
+ wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
+ }
+#endif /* RTT_SUPPORT */
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ }
+
+#ifdef RTT_SUPPORT
+#ifdef RTT_GEOFENCE_CONT
+ /* Override ranging Indication */
+ if (rtt_status->geofence_cfg.geofence_cont) {
+ if (cmd_data->ranging_indication !=
+ NAN_RANGE_INDICATION_NONE) {
+ cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
+ }
+ }
+#endif /* RTT_GEOFENCE_CONT */
+#endif /* RTT_SUPPORT */
+ ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
+ if (ret < 0) {
+ WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
+ goto fail;
+ }
+ WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d is_update %d\n",
+ cmd_data->sub_id, cmd_data->svc_update));
+
+fail:
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
+ uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
+{
+ s32 ret = BCME_OK;
+
+ NAN_DBG_ENTER();
+
+ if (p_buf != NULL) {
+ bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
+ wl_nan_instance_id_t instance_id;
+
+ if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
+ instance_id = cmd_data->pub_id;
+ } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
+ instance_id = cmd_data->sub_id;
+ } else {
+ ret = BCME_USAGE_ERROR;
+ WL_ERR(("wrong command id = %u\n", cmd_id));
+ goto fail;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(cmd_id);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ ret = memcpy_s(sub_cmd->data, *nan_buf_size,
+ &instance_id, sizeof(instance_id));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
+ goto fail;
+ }
+ /* adjust iov data len to the end of last data record */
+ *nan_buf_size -= (sub_cmd->len +
+ OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
+ WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
+ } else {
+ WL_ERR(("nan_iov_buf is NULL\n"));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ /* proceed only if mandatory argument is present - publisher id */
+ if (!cmd_data->pub_id) {
+ WL_ERR(("mandatory argument is not present\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+#ifdef WL_NAN_DISC_CACHE
+ wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
+#endif /* WL_NAN_DISC_CACHE */
+ ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
+ &nan_buf->cmds[0], &nan_buf_size);
+ if (unlikely(ret)) {
+ WL_ERR(("cancel publish failed\n"));
+ goto fail;
+ }
+ nan_buf->is_set = true;
+ nan_buf->count++;
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
+ &(cmd_data->status),
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+ WL_DBG(("nan cancel publish successfull\n"));
+ wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ /* proceed only if mandatory argument is present - subscriber id */
+ if (!cmd_data->sub_id) {
+ WL_ERR(("mandatory argument is not present\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+#ifdef WL_NAN_DISC_CACHE
+#ifdef RTT_SUPPORT
+ /* terminate ranging sessions for this svc */
+ wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
+ wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
+ wl_cfgnan_reset_geofence_ranging(cfg, NULL,
+ RTT_SCHED_RNG_TERM_SUB_SVC_CANCEL, TRUE);
+#endif /* RTT_SUPPORT */
+ /* clear svc cache for the service */
+ wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
+ wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
+#endif /* WL_NAN_DISC_CACHE */
+
+ ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
+ &nan_buf->cmds[0], &nan_buf_size);
+ if (unlikely(ret)) {
+ WL_ERR(("cancel subscribe failed\n"));
+ goto fail;
+ }
+ nan_buf->is_set = true;
+ nan_buf->count++;
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
+ &(cmd_data->status),
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+ WL_DBG(("subscribe cancel successfull\n"));
+ wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_transmit_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_sd_transmit_t *sd_xmit = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bool is_lcl_id = FALSE;
+ bool is_dest_id = FALSE;
+ bool is_dest_mac = FALSE;
+ uint16 buflen_avail;
+ uint8 *pxtlv;
+ uint16 nan_buf_size;
+ uint8 *resp_buf = NULL;
+ /* Considering fixed params */
+ uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
+ OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
+ data_size = ALIGN_SIZE(data_size, 4);
+ ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to get alligned size of optional params\n"));
+ goto fail;
+ }
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+ nan_buf_size = data_size;
+ nan_buf = MALLOCZ(cfg->osh, data_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
+ if (!resp_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ /* nan transmit */
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ /*
+ * proceed only if mandatory arguments are present - subscriber id,
+ * publisher id, mac address
+ */
+ if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
+ ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
+ WL_ERR(("mandatory arguments are not present\n"));
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
+ sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
+
+ /* local instance id must be from 1 to 255, 0 is reserved */
+ if (cmd_data->local_id == NAN_ID_RESERVED) {
+ WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ sd_xmit->local_service_id = cmd_data->local_id;
+ is_lcl_id = TRUE;
+
+ /* remote instance id must be from 1 to 255, 0 is reserved */
+ if (cmd_data->remote_id == NAN_ID_RESERVED) {
+ WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ sd_xmit->requestor_service_id = cmd_data->remote_id;
+ is_dest_id = TRUE;
+
+ if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
+ ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
+ &cmd_data->mac_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy dest mac address\n"));
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Invalid ether addr provided\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ is_dest_mac = TRUE;
+
+ if (cmd_data->priority) {
+ sd_xmit->priority = cmd_data->priority;
+ }
+ sd_xmit->token = cmd_data->token;
+
+ if (cmd_data->recv_ind_flag) {
+ /* BIT0 - If set, host wont rec event "txs" */
+ if (CHECK_BIT(cmd_data->recv_ind_flag,
+ WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
+ sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
+ }
+ }
+ /* Optional parameters: fill the sub_command block with service descriptor attr */
+ sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ pxtlv = (uint8 *)&sd_xmit->opt_tlv;
+
+ nan_buf_size -= (sub_cmd->len +
+ OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
+
+ buflen_avail = nan_buf_size;
+
+ if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
+ bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
+ cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+
+ /* 0xFF is max length for svc_info */
+ if (pxtlv_svc_info->len > 0xFF) {
+ WL_ERR(("Invalid service info length %d\n",
+ (pxtlv_svc_info->len)));
+ ret = BCME_USAGE_ERROR;
+ goto fail;
+ }
+ sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
+ }
+ if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
+ WL_TRACE(("optional sdea svc_info present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
+ cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ /* Check if all mandatory params are provided */
+ if (is_lcl_id && is_dest_id && is_dest_mac) {
+ nan_buf->count++;
+ sub_cmd->len += (buflen_avail - nan_buf_size);
+ } else {
+ WL_ERR(("Missing parameters\n"));
+ ret = BCME_USAGE_ERROR;
+ }
+ nan_buf->is_set = TRUE;
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
+ &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
+ sd_xmit->token, ret, cmd_data->status));
+ goto fail;
+ }
+ WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, data_size);
+ }
+ if (resp_buf) {
+ MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ }
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_get_capability(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ wl_nan_fw_cap_t *fw_cap = NULL;
+ uint16 subcmd_len;
+ uint32 status;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ const bcm_xtlv_t *xtlv;
+ uint16 type = 0;
+ int len = 0;
+
+ NAN_DBG_ENTER();
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(*fw_cap), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
+ sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+
+ nan_buf->is_set = false;
+ memset(resp_buf, 0, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan fw cap failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+
+ /* check the response buff */
+ xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
+ if (!xtlv) {
+ ret = BCME_NOTFOUND;
+ WL_ERR(("xtlv not found: err = %d\n", ret));
+ goto fail;
+ }
+ bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
+ do
+ {
+ switch (type) {
+ case WL_NAN_XTLV_GEN_FW_CAP:
+ if (len > sizeof(wl_nan_fw_cap_t)) {
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
+ GCC_DIAGNOSTIC_POP();
+ break;
+ default:
+ WL_ERR(("Unknown xtlv: id %u\n", type));
+ ret = BCME_ERROR;
+ break;
+ }
+ if (ret != BCME_OK) {
+ goto fail;
+ }
+ } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
+
+ memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
+ capabilities->max_publishes = fw_cap->max_svc_publishes;
+ capabilities->max_subscribes = fw_cap->max_svc_subscribes;
+ capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
+ capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
+ capabilities->max_concurrent_nan_clusters = fw_cap->max_concurrent_nan_clusters;
+ capabilities->max_service_name_len = fw_cap->max_service_name_len;
+ capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
+ capabilities->max_total_match_filter_len = fw_cap->max_total_match_filter_len;
+ capabilities->max_service_specific_info_len = fw_cap->max_service_specific_info_len;
+ capabilities->max_app_info_len = fw_cap->max_app_info_len;
+ capabilities->max_sdea_service_specific_info_len = fw_cap->max_sdea_svc_specific_info_len;
+ capabilities->max_queued_transmit_followup_msgs = fw_cap->max_queued_tx_followup_msgs;
+ capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
+ capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
+ capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
+ capabilities->cipher_suites_supported = fw_cap->cipher_suites_supported_mask;
+ if (fw_cap->flags1 & WL_NAN_FW_CAP_FLAG1_NDPE) {
+ capabilities->ndpe_attr_supported = true;
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_get_capablities_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+
+ NAN_DBG_ENTER();
+
+ /* Do not query fw about nan if feature is not supported */
+ if (!FW_SUPPORTED(dhdp, nan)) {
+ WL_DBG(("NAN is not supported\n"));
+ return ret;
+ }
+
+ if (cfg->nancfg->nan_init_state) {
+ ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
+ if (ret != BCME_OK) {
+ WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
+ cfg->nancfg->nan_init_state, ret));
+ goto exit;
+ }
+ } else {
+ /* Initialize NAN before sending iovar */
+ WL_ERR(("Initializing NAN\n"));
+ ret = wl_cfgnan_init(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to initialize NAN[%d]\n", ret));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
+ if (ret != BCME_OK) {
+ WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
+ cfg->nancfg->nan_init_state, ret));
+ goto exit;
+ }
+ WL_ERR(("De-Initializing NAN\n"));
+ ret = wl_cfgnan_deinit(cfg, dhdp->up);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
+ goto fail;
+ }
+ }
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+exit:
+ /* Keeping backward campatibility */
+ capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
+ capabilities->max_publishes = MAX_PUBLISHES;
+ capabilities->max_subscribes = MAX_SUBSCRIBES;
+ capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
+ capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
+ capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
+ capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
+ capabilities->max_ndi_interfaces = NAN_MAX_NDI;
+ capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
+ capabilities->max_app_info_len = MAX_APP_INFO_LEN;
+ capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
+ capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
+ capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
+ capabilities->cipher_suites_supported = WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
+ capabilities->max_scid_len = MAX_SCID_LEN;
+ capabilities->is_ndp_security_supported = true;
+ capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
+ capabilities->ndpe_attr_supported = false;
+ ret = BCME_OK;
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+bool wl_cfgnan_is_enabled(struct bcm_cfg80211 *cfg)
+{
+ wl_nancfg_t *nancfg = cfg->nancfg;
+ if (nancfg) {
+ if (nancfg->nan_init_state && nancfg->nan_enable) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+static int
+wl_cfgnan_init(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ uint8 buf[NAN_IOCTL_BUF_SIZE];
+ bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
+
+ NAN_DBG_ENTER();
+ if (cfg->nancfg->nan_init_state) {
+ WL_ERR(("nan initialized/nmi exists\n"));
+ return BCME_OK;
+ }
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
+ if (unlikely(ret)) {
+ WL_ERR(("init handler sub_cmd set failed\n"));
+ goto fail;
+ }
+ nan_buf->count++;
+ nan_buf->is_set = true;
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
+ nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan init handler failed ret %d status %d\n",
+ ret, status));
+ goto fail;
+ }
+
+#ifdef WL_NAN_DISC_CACHE
+ /* malloc for disc result */
+ cfg->nancfg->nan_disc_cache = MALLOCZ(cfg->osh,
+ NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
+ if (!cfg->nancfg->nan_disc_cache) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ cfg->nancfg->nan_init_state = true;
+ return ret;
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static void
+wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
+{
+ uint8 i = 0;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ nancfg->nan_dp_count = 0;
+ nancfg->nan_init_state = false;
+#ifdef WL_NAN_DISC_CACHE
+ if (nancfg->nan_disc_cache) {
+ for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
+ if (nancfg->nan_disc_cache[i].tx_match_filter.data) {
+ MFREE(cfg->osh, nancfg->nan_disc_cache[i].tx_match_filter.data,
+ nancfg->nan_disc_cache[i].tx_match_filter.dlen);
+ }
+ if (nancfg->nan_disc_cache[i].svc_info.data) {
+ MFREE(cfg->osh, nancfg->nan_disc_cache[i].svc_info.data,
+ nancfg->nan_disc_cache[i].svc_info.dlen);
+ }
+ }
+ MFREE(cfg->osh, nancfg->nan_disc_cache,
+ NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
+ nancfg->nan_disc_cache = NULL;
+ }
+ nancfg->nan_disc_count = 0;
+ bzero(nancfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
+ bzero(nancfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
+#endif /* WL_NAN_DISC_CACHE */
+ return;
+}
+
+static int
+wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
+{
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ uint8 buf[NAN_IOCTL_BUF_SIZE];
+ bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ if (!nancfg->nan_init_state) {
+ WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
+ ret = BCME_OK;
+ goto fail;
+ }
+
+ if (busstate != DHD_BUS_DOWN) {
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ WL_DBG(("nan deinit\n"));
+ ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
+ if (unlikely(ret)) {
+ WL_ERR(("deinit handler sub_cmd set failed\n"));
+ } else {
+ nan_buf->count++;
+ nan_buf->is_set = true;
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
+ nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan init handler failed ret %d status %d\n",
+ ret, status));
+ }
+ }
+ }
+ wl_cfgnan_deinit_cleanup(cfg);
+
+fail:
+ if (!nancfg->mac_rand && !ETHER_ISNULLADDR(nancfg->nan_nmi_mac)) {
+ wl_release_vif_macaddr(cfg, nancfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
+ }
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
+{
+ int i = 0;
+ int ret = BCME_OK;
+ bool rand_mac = cfg->nancfg->mac_rand;
+ BCM_REFERENCE(i);
+
+ if (rand_mac) {
+ /* ensure nmi != ndi */
+ do {
+ RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
+ /* restore mcast and local admin bits to 0 and 1 */
+ ETHER_SET_UNICAST(mac_addr);
+ ETHER_SET_LOCALADDR(mac_addr);
+ i++;
+ if (i == NAN_RAND_MAC_RETRIES) {
+ break;
+ }
+ } while (eacmp(cfg->nancfg->nan_nmi_mac, mac_addr) == 0);
+
+ if (i == NAN_RAND_MAC_RETRIES) {
+ if (eacmp(cfg->nancfg->nan_nmi_mac, mac_addr) == 0) {
+ WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
+ ret = BCME_NORESOURCE;
+ goto fail;
+ }
+ }
+ } else {
+ if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
+ mac_addr) != BCME_OK) {
+ ret = -EINVAL;
+ WL_ERR(("Failed to get mac addr for NDI\n"));
+ goto fail;
+ }
+ }
+
+fail:
+ return ret;
+}
+
+int
+wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
+{
+ u8 mac_addr[ETH_ALEN];
+ s32 ret = BCME_OK;
+ s32 idx;
+ struct wireless_dev *wdev;
+ NAN_DBG_ENTER();
+
+ if (busstate != DHD_BUS_DOWN) {
+ ASSERT(cfg->nancfg->ndi);
+ if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
+ if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
+ WL_ERR(("No free idx for NAN NDI\n"));
+ ret = BCME_NORESOURCE;
+ goto fail;
+ }
+
+ ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
+ if (ret != BCME_OK) {
+ WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
+ goto fail;
+ }
+ wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
+ ifname, mac_addr);
+ if (!wdev) {
+ ret = -ENODEV;
+ WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
+ goto fail;
+ }
+ /* Store the iface name to pub data so that it can be used
+ * during NAN enable
+ */
+ wl_cfgnan_add_ndi_data(cfg, idx, ifname);
+ cfg->nancfg->ndi[idx].created = true;
+ /* Store nan ndev */
+ cfg->nancfg->ndi[idx].nan_ndev = wdev_to_ndev(wdev);
+
+ } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
+ ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
+ if (ret == BCME_OK) {
+ if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
+ WL_ERR(("Failed to find matching data for ndi:%s\n",
+ ifname));
+ }
+ } else if (ret == -ENODEV) {
+ WL_INFORM(("Already deleted: %s\n", ifname));
+ ret = BCME_OK;
+ } else if (ret != BCME_OK) {
+ WL_ERR(("failed to delete NDI[%d]\n", ret));
+ }
+ }
+ } else {
+ ret = -ENODEV;
+ WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
+ }
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+/*
+ * Return data peer from peer list
+ * for peer_addr
+ * NULL if not found
+ */
+static nan_ndp_peer_t *
+wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ uint8 i;
+ nan_ndp_peer_t* peer = cfg->nancfg->nan_ndp_peer_info;
+
+ if (!peer) {
+ WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
+ goto exit;
+ }
+ for (i = 0; i < cfg->nancfg->max_ndp_count; i++) {
+ if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
+ (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
+ return &peer[i];
+ }
+ }
+
+exit:
+ return NULL;
+}
+
+/*
+ * Returns True if
+ * datapath exists for nan cfg
+ * for given peer
+ */
+bool
+wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ bool ret = FALSE;
+ nan_ndp_peer_t* peer = NULL;
+
+ if ((cfg->nancfg->nan_init_state == FALSE) ||
+ (cfg->nancfg->nan_enable == FALSE)) {
+ goto exit;
+ }
+
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (peer) {
+ ret = TRUE;
+ }
+
+exit:
+ return ret;
+}
+
+/*
+ * As of now API only available
+ * for setting state to CONNECTED
+ * if applicable
+ */
+static void
+wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr, nan_peer_dp_state_t state)
+{
+ nan_ndp_peer_t* peer = NULL;
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (!peer) {
+ goto end;
+ }
+ peer->peer_dp_state = state;
+end:
+ return;
+}
+
+/* Adds peer to nan data peer list */
+void
+wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ uint8 i;
+ nan_ndp_peer_t* peer = NULL;
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (peer) {
+ peer->dp_count++;
+ goto end;
+ }
+ peer = cfg->nancfg->nan_ndp_peer_info;
+ for (i = 0; i < cfg->nancfg->max_ndp_count; i++) {
+ if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
+ break;
+ }
+ }
+ if (i == NAN_MAX_NDP_PEER) {
+ WL_DBG(("DP Peer list full, Droopping add peer req\n"));
+ goto end;
+ }
+ /* Add peer to list */
+ memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
+ peer[i].dp_count = 1;
+ peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
+
+end:
+ return;
+}
+
+/* Removes nan data peer from peer list */
+void
+wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ nan_ndp_peer_t* peer = NULL;
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (!peer) {
+ WL_DBG(("DP Peer not present in list, "
+ "Droopping remove peer req\n"));
+ goto end;
+ }
+ peer->dp_count--;
+ if (peer->dp_count == 0) {
+ /* No more NDPs, delete entry */
+ memset(peer, 0, sizeof(nan_ndp_peer_t));
+ } else {
+ /* Set peer dp state to connected if any ndp still exits */
+ peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
+ }
+end:
+ return;
+}
+
+int
+wl_cfgnan_data_path_request_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
+ uint8 *ndp_instance_id)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_dp_req_t *datareq = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 buflen_avail;
+ uint8 *pxtlv;
+ struct wireless_dev *wdev;
+ uint16 nan_buf_size;
+ uint8 *resp_buf = NULL;
+ /* Considering fixed params */
+ uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
+ OFFSETOF(wl_nan_dp_req_t, tlv_params);
+ data_size = ALIGN_SIZE(data_size, 4);
+
+ ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(cfg, &data_size, cmd_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to get alligned size of optional params\n"));
+ goto fail;
+ }
+
+ nan_buf_size = data_size;
+ NAN_DBG_ENTER();
+
+ mutex_lock(&cfg->if_sync);
+ NAN_MUTEX_LOCK();
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
+ WL_ERR(("Conflicting iface found to be active\n"));
+ ret = BCME_UNSUPPORTED;
+ goto fail;
+ }
+#endif /* WL_IFACE_MGMT */
+
+#ifdef RTT_SUPPORT
+ /* cancel any ongoing RTT session with peer
+ * as we donot support DP and RNG to same peer
+ */
+ wl_cfgnan_handle_dp_ranging_concurrency(cfg, &cmd_data->mac_addr,
+ RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
+#endif /* RTT_SUPPORT */
+
+ nan_buf = MALLOCZ(cfg->osh, data_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
+ if (!resp_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type local\n"));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type ndc\n"));
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
+ datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
+
+ /* setting default data path type to unicast */
+ datareq->type = WL_NAN_DP_TYPE_UNICAST;
+
+ if (cmd_data->pub_id) {
+ datareq->pub_id = cmd_data->pub_id;
+ }
+
+ if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
+ ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
+ &cmd_data->mac_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ether addr provided\n"));
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Invalid ether addr provided\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ /* Retrieve mac from given iface name */
+ wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
+ (char *)cmd_data->ndp_iface);
+ if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
+ ret = -EINVAL;
+ WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
+ (char *)cmd_data->ndp_iface));
+ goto fail;
+ }
+
+ if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
+ ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
+ wdev->netdev->dev_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ether addr provided\n"));
+ goto fail;
+ }
+ WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
+ } else {
+ WL_ERR(("Invalid NDI addr retrieved\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
+ datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ OFFSETOF(wl_nan_dp_req_t, tlv_params);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ pxtlv = (uint8 *)&datareq->tlv_params;
+
+ nan_buf_size -= (sub_cmd->len +
+ OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
+ buflen_avail = nan_buf_size;
+
+ if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
+ cmd_data->svc_info.data,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("unable to process svc_spec_info: %d\n", ret));
+ goto fail;
+ }
+ /* If NDPE is enabled, duplicating svc_info and sending it as part of NDPE TLV list
+ * too along with SD SVC INFO, as FW is considering both of them as different
+ * entities where as framework is sending both of them in same variable
+ * (cmd_data->svc_info). FW will decide which one to use based on
+ * peer's capability (NDPE capable or not)
+ */
+ if (cfg->nancfg->ndpe_enabled) {
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_SD_NDPE_TLV_LIST, cmd_data->svc_info.dlen,
+ cmd_data->svc_info.data,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("unable to process NDPE TLV list: %d\n", ret));
+ goto fail;
+ }
+ }
+ datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
+ }
+
+ /* Security elements */
+
+ if (cmd_data->csid) {
+ WL_TRACE(("Cipher suite type is present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
+ (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->ndp_cfg.security_cfg) {
+ if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
+ (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
+ if (cmd_data->key.data && cmd_data->key.dlen) {
+ WL_TRACE(("optional pmk present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
+ cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ }
+ } else {
+ WL_ERR(("Invalid security key type\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
+ (cmd_data->svc_hash.data)) {
+ WL_TRACE(("svc hash present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
+ cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ } else {
+#ifdef WL_NAN_DISC_CACHE
+ /* check in cache */
+ nan_disc_result_cache *cache;
+ cache = wl_cfgnan_get_disc_result(cfg,
+ datareq->pub_id, &datareq->peer_mac);
+ if (!cache) {
+ ret = BCME_ERROR;
+ WL_ERR(("invalid svc hash data or length = %d\n",
+ cmd_data->svc_hash.dlen));
+ goto fail;
+ }
+ WL_TRACE(("svc hash present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
+ cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
+ __FUNCTION__));
+ goto fail;
+ }
+#else
+ ret = BCME_ERROR;
+ WL_ERR(("invalid svc hash data or length = %d\n",
+ cmd_data->svc_hash.dlen));
+ goto fail;
+#endif /* WL_NAN_DISC_CACHE */
+ }
+ /* If the Data req is for secure data connection */
+ datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
+ }
+
+ sub_cmd->len += (buflen_avail - nan_buf_size);
+ nan_buf->is_set = false;
+ nan_buf->count++;
+
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
+ &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan data path request handler failed, ret = %d,"
+ " status %d, peer: " MACDBG "\n",
+ ret, cmd_data->status, MAC2STRDBG(&(cmd_data->mac_addr))));
+ goto fail;
+ }
+
+ /* check the response buff */
+ if (ret == BCME_OK) {
+ ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
+ ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
+ cmd_data->ndp_instance_id = *ndp_instance_id;
+ }
+ WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d), peer: " MACDBG " \n",
+ cmd_data->ndp_instance_id, MAC2STRDBG(&cmd_data->mac_addr)));
+ /* Add peer to data ndp peer list */
+ wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, data_size);
+ }
+
+ if (resp_buf) {
+ MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ }
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_data_path_response_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_dp_resp_t *dataresp = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 buflen_avail;
+ uint8 *pxtlv;
+ struct wireless_dev *wdev;
+ uint16 nan_buf_size;
+ uint8 *resp_buf = NULL;
+
+ /* Considering fixed params */
+ uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
+ OFFSETOF(wl_nan_dp_resp_t, tlv_params);
+ data_size = ALIGN_SIZE(data_size, 4);
+ ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(cfg, &data_size, cmd_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to get alligned size of optional params\n"));
+ goto fail;
+ }
+ nan_buf_size = data_size;
+
+ NAN_DBG_ENTER();
+
+ mutex_lock(&cfg->if_sync);
+ NAN_MUTEX_LOCK();
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
+ WL_ERR(("Conflicting iface found to be active\n"));
+ ret = BCME_UNSUPPORTED;
+ goto fail;
+ }
+#endif /* WL_IFACE_MGMT */
+
+ nan_buf = MALLOCZ(cfg->osh, data_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
+ if (!resp_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type local\n"));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type ndc\n"));
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
+ dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
+
+ /* Setting default data path type to unicast */
+ dataresp->type = WL_NAN_DP_TYPE_UNICAST;
+ /* Changing status value as per fw convention */
+ dataresp->status = cmd_data->rsp_code ^= 1;
+ dataresp->reason_code = 0;
+
+ /* ndp instance id must be from 1 to 255, 0 is reserved */
+ if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
+ cmd_data->ndp_instance_id > NAN_ID_MAX) {
+ WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ dataresp->ndp_id = cmd_data->ndp_instance_id;
+
+ /* Retrieved initiator ndi from NanDataPathRequestInd */
+ if (!ETHER_ISNULLADDR(&cfg->nancfg->initiator_ndi.octet)) {
+ ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
+ &cfg->nancfg->initiator_ndi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy initiator ndi\n"));
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Invalid ether addr retrieved\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ /* Interface is not mandatory, when it is a reject from framework */
+ if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
+#ifdef RTT_SUPPORT
+ /* cancel any ongoing RTT session with peer
+ * as we donot support DP and RNG to same peer
+ */
+ wl_cfgnan_handle_dp_ranging_concurrency(cfg, &cmd_data->mac_addr,
+ RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
+#endif /* RTT_SUPPORT */
+ /* Retrieve mac from given iface name */
+ wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
+ (char *)cmd_data->ndp_iface);
+ if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
+ ret = -EINVAL;
+ WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
+ (char *)cmd_data->ndp_iface));
+ goto fail;
+ }
+
+ if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
+ ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
+ wdev->netdev->dev_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy responder ndi\n"));
+ goto fail;
+ }
+ WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
+ } else {
+ WL_ERR(("Invalid NDI addr retrieved\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ }
+
+ dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
+ dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ OFFSETOF(wl_nan_dp_resp_t, tlv_params);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ pxtlv = (uint8 *)&dataresp->tlv_params;
+
+ nan_buf_size -= (sub_cmd->len +
+ OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
+ buflen_avail = nan_buf_size;
+
+ if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
+ cmd_data->svc_info.data,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("unable to process svc_spec_info: %d\n", ret));
+ goto fail;
+ }
+ /* If NDPE is enabled, duplicating svc_info and sending it as part of NDPE TLV list
+ * too along with SD SVC INFO, as FW is considering both of them as different
+ * entities where as framework is sending both of them in same variable
+ * (cmd_data->svc_info). FW will decide which one to use based on
+ * peer's capability (NDPE capable or not)
+ */
+ if (cfg->nancfg->ndpe_enabled) {
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_SD_NDPE_TLV_LIST, cmd_data->svc_info.dlen,
+ cmd_data->svc_info.data,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("unable to process NDPE TLV list: %d\n", ret));
+ goto fail;
+ }
+ }
+ dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
+ }
+
+ /* Security elements */
+ if (cmd_data->csid) {
+ WL_TRACE(("Cipher suite type is present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
+ (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
+ goto fail;
+ }
+ }
+
+ if (cmd_data->ndp_cfg.security_cfg) {
+ if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
+ (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
+ if (cmd_data->key.data && cmd_data->key.dlen) {
+ WL_TRACE(("optional pmk present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
+ cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
+ if (unlikely(ret)) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ }
+ } else {
+ WL_ERR(("Invalid security key type\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
+ (cmd_data->svc_hash.data)) {
+ WL_TRACE(("svc hash present, pack it\n"));
+ ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
+ WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
+ cmd_data->svc_hash.data,
+ BCM_XTLV_OPTION_ALIGN32);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ }
+ /* If the Data resp is for secure data connection */
+ dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
+ }
+
+ sub_cmd->len += (buflen_avail - nan_buf_size);
+
+ nan_buf->is_set = false;
+ nan_buf->count++;
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
+ &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+
+ WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, data_size);
+ }
+
+ if (resp_buf) {
+ MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
+ }
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
+ int *status)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_dp_end_t *dataend = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ if (!dhdp->up) {
+ WL_ERR(("bus is already down, hence blocking nan dp end\n"));
+ ret = BCME_OK;
+ goto fail;
+ }
+
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, nan dp end blocked\n"));
+ ret = BCME_OK;
+ goto fail;
+ }
+
+ /* ndp instance id must be from 1 to 255, 0 is reserved */
+ if (ndp_instance_id < NAN_ID_MIN ||
+ ndp_instance_id > NAN_ID_MAX) {
+ WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
+ dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
+
+ /* Fill sub_cmd block */
+ sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
+ sub_cmd->len = sizeof(sub_cmd->u.options) +
+ sizeof(*dataend);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+
+ dataend->lndp_id = ndp_instance_id;
+
+ /*
+ * Currently fw requires ndp_id and reason to end the data path
+ * But wifi_nan.h takes ndp_instances_count and ndp_id.
+ * Will keep reason = accept always.
+ */
+
+ dataend->status = 1;
+
+ nan_buf->is_set = true;
+ nan_buf->count++;
+
+ nan_buf_size -= (sub_cmd->len +
+ OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
+ status, (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(*status)) {
+ WL_ERR(("nan data path end handler failed, error = %d status %d\n",
+ ret, *status));
+ goto fail;
+ }
+ WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
+ dataend->lndp_id));
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+#ifdef WL_NAN_DISC_CACHE
+int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
+ nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
+{
+ s32 ret = BCME_NOTFOUND;
+ /* check in cache */
+ nan_disc_result_cache *disc_cache = NULL;
+ nan_svc_info_t *svc_info = NULL;
+
+ NAN_DBG_ENTER();
+ NAN_MUTEX_LOCK();
+
+ if (!cfg->nancfg->nan_init_state) {
+ WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
+ ret = BCME_NOTENABLED;
+ goto fail;
+ }
+
+ /* datapath request context */
+ if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
+ disc_cache = wl_cfgnan_get_disc_result(cfg,
+ cmd_data->pub_id, &cmd_data->mac_addr);
+ WL_DBG(("datapath request: PUB ID: = %d\n",
+ cmd_data->pub_id));
+ if (disc_cache) {
+ (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
+ disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("disc_cache is NULL\n"));
+ goto fail;
+ }
+ }
+
+ /* datapath response context */
+ if (cmd_data->ndp_instance_id) {
+ WL_DBG(("datapath response: NDP ID: = %d\n",
+ cmd_data->ndp_instance_id));
+ svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
+ /* Note: svc_info will not be present in OOB cases
+ * In such case send NMI alone and let HAL handle if
+ * svc_hash is mandatory
+ */
+ if (svc_info) {
+ WL_DBG(("svc hash present, pack it\n"));
+ (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
+ svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
+ } else {
+ WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
+ }
+ /* Always send NMI */
+ (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
+ cfg->nancfg->nan_nmi_mac, ETHER_ADDR_LEN);
+ ret = BCME_OK;
+ }
+fail:
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+#endif /* WL_NAN_DISC_CACHE */
+
+#ifdef RTT_SUPPORT
+static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
+ nan_event_data_t *nan_event_data, osl_t *osh)
+{
+ s32 ret = BCME_OK;
+ NAN_DBG_ENTER();
+
+ nan_event_data->pub_id = cache->pub_id;
+ nan_event_data->sub_id = cache->sub_id;
+ nan_event_data->publish_rssi = cache->publish_rssi;
+ nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
+ ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
+ &cache->peer, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cached peer nan nmi\n"));
+ goto fail;
+ }
+
+ if (cache->svc_info.dlen && cache->svc_info.data) {
+ nan_event_data->svc_info.dlen = cache->svc_info.dlen;
+ nan_event_data->svc_info.data =
+ MALLOCZ(osh, nan_event_data->svc_info.dlen);
+ if (!nan_event_data->svc_info.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ nan_event_data->svc_info.dlen = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+ ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
+ cache->svc_info.data, cache->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cached svc info data\n"));
+ goto fail;
+ }
+ }
+ if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
+ nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
+ nan_event_data->tx_match_filter.data =
+ MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
+ if (!nan_event_data->tx_match_filter.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ nan_event_data->tx_match_filter.dlen = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+ ret = memcpy_s(nan_event_data->tx_match_filter.data,
+ nan_event_data->tx_match_filter.dlen,
+ cache->tx_match_filter.data, cache->tx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cached tx match filter data\n"));
+ goto fail;
+ }
+ }
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+/*
+ * API to cancel the ranging for given instance
+ * For geofence initiator, suspend ranging.
+ * for directed RTT initiator , report fail result, cancel ranging
+ * and clear ranging instance
+ * For responder, cancel ranging and clear ranging instance
+ */
+static s32
+wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst, int reason)
+{
+ uint32 status = 0;
+ int err = BCME_OK;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
+ rng_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
+ err = wl_cfgnan_suspend_geofence_rng_session(ndev,
+ &rng_inst->peer_addr, reason, 0);
+ } else {
+ if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ dhd_rtt_handle_nan_rtt_session_end(dhdp,
+ &rng_inst->peer_addr);
+ }
+ /* responder */
+ err = wl_cfgnan_cancel_ranging(ndev, cfg,
+ &rng_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
+ }
+
+ if (err) {
+ WL_ERR(("Failed to stop ranging with peer, err : %d\n", err));
+ }
+
+ return err;
+}
+
+/*
+ * Handle NDP-Ranging Concurrency,
+ * for incoming DP Reuest
+ * Cancel Ranging with same peer
+ * Cancel Ranging for set up in prog
+ * for all other peers
+ */
+static s32
+wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer, int reason)
+{
+ uint8 i = 0;
+ nan_ranging_inst_t *cur_rng_inst = NULL;
+ nan_ranging_inst_t *rng_inst = NULL;
+ int err = BCME_OK;
+
+ /*
+ * FixMe:
+ * DP Ranging Concurrency will need more
+ * than what has been addressed till now
+ * Poll max rng sessions and update it
+ * take relevant actions accordingly
+ */
+
+ cur_rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ rng_inst = &cfg->nancfg->nan_ranging_info[i];
+ if (rng_inst->in_use) {
+ if ((cur_rng_inst && cur_rng_inst == rng_inst) &&
+ NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) {
+ err = wl_cfgnan_clear_peer_ranging(cfg, rng_inst,
+ RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
+ }
+ }
+ }
+
+ if (err) {
+ WL_ERR(("Failed to handle dp ranging concurrency, err : %d\n", err));
+ }
+
+ return err;
+}
+
+bool
+wl_cfgnan_check_role_concurrency(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ nan_ranging_inst_t *rng_inst = NULL;
+ bool role_conc_status = FALSE;
+
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (rng_inst) {
+ role_conc_status = rng_inst->role_concurrency_status;
+ }
+
+ return role_conc_status;
+}
+#endif /* RTT_SUPPORT */
+
+static s32
+wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
+ uint16 data_len, uint16 *tlvs_offset,
+ uint16 *nan_opts_len, uint32 event_num,
+ int *hal_event_id, nan_event_data_t *nan_event_data)
+{
+ s32 ret = BCME_OK;
+ uint8 i;
+ wl_nan_ev_datapath_cmn_t *ev_dp;
+ nan_svc_info_t *svc_info;
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+#ifdef RTT_SUPPORT
+ nan_ranging_inst_t *rng_inst = NULL;
+#endif /* RTT_SUPPORT */
+
+ if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
+ ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
+ NAN_DBG_ENTER();
+
+ BCM_REFERENCE(svc_info);
+ BCM_REFERENCE(i);
+ /* Mapping to common struct between DHD and HAL */
+ WL_TRACE(("Event type: %d\n", ev_dp->type));
+ nan_event_data->type = ev_dp->type;
+ WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
+ nan_event_data->pub_id = ev_dp->pub_id;
+ WL_TRACE(("security: %d\n", ev_dp->security));
+ nan_event_data->security = ev_dp->security;
+
+ /* Store initiator_ndi, required for data_path_response_request */
+ ret = memcpy_s(&cfg->nancfg->initiator_ndi, ETHER_ADDR_LEN,
+ &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's initiator addr\n"));
+ goto fail;
+ }
+ if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
+ WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
+ nan_event_data->ndp_id = ev_dp->ndp_id;
+ WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
+ MAC2STRDBG(ev_dp->initiator_ndi.octet)));
+ WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
+ MAC2STRDBG(ev_dp->responder_ndi.octet)));
+ WL_TRACE(("PEER NMI: " MACDBG "\n",
+ MAC2STRDBG(ev_dp->peer_nmi.octet)));
+ ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_dp->peer_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's peer nmi\n"));
+ goto fail;
+ }
+ } else {
+ /* type is multicast */
+ WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
+ nan_event_data->ndp_id = ev_dp->mc_id;
+ WL_TRACE(("PEER NMI: " MACDBG "\n",
+ MAC2STRDBG(ev_dp->peer_nmi.octet)));
+ ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_dp->peer_nmi,
+ ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's peer nmi\n"));
+ goto fail;
+ }
+ }
+ *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
+ OFFSETOF(bcm_xtlv_t, data);
+ *nan_opts_len = data_len - *tlvs_offset;
+ if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
+ *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
+#ifdef WL_NAN_DISC_CACHE
+ ret = wl_cfgnan_svc_inst_add_ndp(cfg, nan_event_data->pub_id,
+ nan_event_data->ndp_id);
+ if (ret != BCME_OK) {
+ goto fail;
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ /* Add peer to data ndp peer list */
+ wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
+#ifdef RTT_SUPPORT
+ /* cancel any ongoing RTT session with peer
+ * as we donot support DP and RNG to same peer
+ */
+ wl_cfgnan_handle_dp_ranging_concurrency(cfg, &ev_dp->peer_nmi,
+ RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
+#endif /* RTT_SUPPORT */
+ } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
+ *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
+ if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
+ ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
+ &ev_dp->responder_ndi,
+ ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's responder ndi\n"));
+ goto fail;
+ }
+ WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
+ MAC2STRDBG(ev_dp->responder_ndi.octet)));
+ WL_TRACE(("Initiator status %d\n", nan_event_data->status));
+ } else {
+ ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
+ &ev_dp->initiator_ndi,
+ ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's responder ndi\n"));
+ goto fail;
+ }
+ WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
+ MAC2STRDBG(ev_dp->initiator_ndi.octet)));
+ }
+ if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
+ nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
+ wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
+ NAN_PEER_DP_CONNECTED);
+ wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
+ wl_cfgnan_get_stats(cfg);
+ } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
+ nan_event_data->status = NAN_DP_REQUEST_REJECT;
+#ifdef WL_NAN_DISC_CACHE
+ if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
+ /* Only at Responder side,
+ * If dp is ended,
+ * clear the resp ndp id from the svc info cache
+ */
+ ret = wl_cfgnan_svc_inst_del_ndp(cfg,
+ nan_event_data->pub_id,
+ nan_event_data->ndp_id);
+ if (ret != BCME_OK) {
+ goto fail;
+ }
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ /* Remove peer from data ndp peer list */
+ wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
+#ifdef RTT_SUPPORT
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
+ if (rng_inst) {
+ /* Trigger/Reset geofence RTT */
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ rng_inst, RTT_SCHED_DP_REJECTED, TRUE);
+ }
+#endif /* RTT_SUPPORT */
+ } else {
+ WL_ERR(("%s:Status code = %x not expected\n",
+ __FUNCTION__, ev_dp->status));
+ ret = BCME_ERROR;
+ goto fail;
+ }
+ WL_TRACE(("Responder status %d\n", nan_event_data->status));
+ } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
+ /* Mapping to common struct between DHD and HAL */
+ *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
+#ifdef WL_NAN_DISC_CACHE
+ if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
+ /* Only at Responder side,
+ * If dp is ended,
+ * clear the resp ndp id from the svc info cache
+ */
+ ret = wl_cfgnan_svc_inst_del_ndp(cfg,
+ nan_event_data->pub_id,
+ nan_event_data->ndp_id);
+ if (ret != BCME_OK) {
+ goto fail;
+ }
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ /* Remove peer from data ndp peer list */
+ wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
+ wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
+ WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG " with %s\n",
+ MAC2STRDBG(&ev_dp->peer_nmi),
+ nan_event_cause_to_str(ev_dp->event_cause)));
+#ifdef RTT_SUPPORT
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
+ if (rng_inst) {
+ /* Trigger/Reset geofence RTT */
+ WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
+ MAC2STRDBG(&rng_inst->peer_addr)));
+ wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
+ RTT_SCHED_DP_END, TRUE);
+ }
+#endif /* RTT_SUPPORT */
+ }
+ } else {
+ /* Follow though, not handling other IDs as of now */
+ WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
+ }
+fail:
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+#ifdef RTT_SUPPORT
+static int
+wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
+ nan_event_data_t *nan_event_data)
+{
+ int ret = BCME_OK;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
+ GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to send event to nan hal\n"));
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+ return ret;
+}
+
+#define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
+ ((distance) >= (egress)))
+#define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
+#define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
+
+static bool
+wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
+ uint8 *ranging_ind, uint32 prev_distance)
+{
+ uint8 svc_ind = svc_info->ranging_ind;
+ bool notify = FALSE;
+ bool range_rep_ev_once =
+ !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
+ uint32 ingress_limit = svc_info->ingress_limit;
+ uint32 egress_limit = svc_info->egress_limit;
+
+ if (svc_ind & NAN_RANGE_INDICATION_CONT) {
+ *ranging_ind = NAN_RANGE_INDICATION_CONT;
+ notify = TRUE;
+ WL_ERR(("\n%s :Svc has continous Ind %d\n",
+ __FUNCTION__, __LINE__));
+ goto done;
+ }
+
+ if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
+ NAN_RANGE_INDICATION_EGRESS)) {
+ if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
+ /* if not already in geofence */
+ if ((range_rep_ev_once == FALSE) ||
+ (!IN_GEOFENCE(ingress_limit, egress_limit,
+ prev_distance))) {
+ notify = TRUE;
+ if (distance > prev_distance) {
+ *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
+ } else {
+ *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
+ }
+ WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
+ __FUNCTION__, __LINE__, *ranging_ind));
+ }
+ }
+ goto done;
+ }
+
+ if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
+ if (IS_INGRESS_VAL(ingress_limit, distance)) {
+ if ((range_rep_ev_once == FALSE) ||
+ (prev_distance == INVALID_DISTANCE) ||
+ !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
+ notify = TRUE;
+ *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
+ WL_ERR(("\n%s :Svc has ingress Ind %d\n",
+ __FUNCTION__, __LINE__));
+ }
+ }
+ goto done;
+ }
+
+ if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
+ if (IS_EGRESS_VAL(egress_limit, distance)) {
+ if ((range_rep_ev_once == FALSE) ||
+ (prev_distance == INVALID_DISTANCE) ||
+ !IS_EGRESS_VAL(egress_limit, prev_distance)) {
+ notify = TRUE;
+ *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
+ WL_ERR(("\n%s :Svc has egress Ind %d\n",
+ __FUNCTION__, __LINE__));
+ }
+ }
+ goto done;
+ }
+done:
+ WL_INFORM_MEM(("SVC ranging Ind %d distance %d prev_distance %d, "
+ "range_rep_ev_once %d ingress_limit %d egress_limit %d notify %d\n",
+ svc_ind, distance, prev_distance, range_rep_ev_once,
+ ingress_limit, egress_limit, notify));
+ svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
+ return notify;
+}
+
+static int32
+wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
+{
+ nan_svc_info_t *svc_info;
+ bool notify_svc = TRUE;
+ nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
+ uint8 ranging_ind = 0;
+ int ret = BCME_OK;
+ int i = 0, j = 0;
+ uint8 result_present = nan_event_data->ranging_result_present;
+
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ svc_info = rng_inst->svc_idx[i];
+ if (svc_info && svc_info->ranging_required) {
+ /* if ranging_result is present notify disc result if
+ * result satisfies the conditions.
+ * if ranging_result is not present, then notify disc
+ * result with out ranging info.
+ */
+ if (result_present) {
+ notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
+ &ranging_ind, rng_inst->prev_distance_mm);
+ nan_event_data->ranging_ind = ranging_ind;
+ }
+ WL_DBG(("Ranging notify for svc_id %d, notify %d and ind %d"
+ " distance_mm %d result_present %d\n", svc_info->svc_id, notify_svc,
+ ranging_ind, distance, result_present));
+ } else {
+ continue;
+ }
+ if (notify_svc) {
+ for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
+ if (!memcmp(&disc_res[j].peer,
+ &(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
+ (svc_info->svc_id == disc_res[j].sub_id)) {
+ ret = wl_nan_cache_to_event_data(&disc_res[j],
+ nan_event_data, cfg->osh);
+ ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
+ /* If its not match once, clear it as the FW indicates
+ * again.
+ */
+ if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
+ wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
+ }
+ }
+ }
+ }
+ }
+ WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
+ return ret;
+}
+
+static int32
+wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst)
+{
+ int ret = BCME_OK;
+ uint32 status;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
+ &rng_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan range cancel failed ret = %d status = %d\n", ret, status));
+ }
+ dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
+ dhd_rtt_nan_update_directed_sessions_cnt(dhd, FALSE);
+
+ wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
+
+ WL_DBG(("Ongoing ranging session is cancelled \n"));
+ return ret;
+}
+
+static void
+wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst)
+{
+ nan_event_data_t *nan_event_data = NULL;
+
+ nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
+ if (!nan_event_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ goto exit;
+ }
+
+ wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
+
+exit:
+ wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
+
+ return;
+}
+
+void
+wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
+ wl_nan_ev_rng_rpt_ind_t *range_res, int status)
+{
+ nan_ranging_inst_t *rng_inst = NULL;
+ nan_event_data_t nan_event_data;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+
+ UNUSED_PARAMETER(nan_event_data);
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
+ if (!rng_inst) {
+ WL_ERR(("No ranging instance but received RNG RPT event..check \n"));
+ goto exit;
+ }
+
+ if (rng_inst->range_status != NAN_RANGING_SESSION_IN_PROGRESS) {
+ WL_ERR(("SSN not in prog but received RNG RPT event..ignore \n"));
+ goto exit;
+ }
+
+#ifdef NAN_RTT_DBG
+ DUMP_NAN_RTT_INST(rng_inst);
+ DUMP_NAN_RTT_RPT(range_res);
+#endif
+ range_res->rng_id = rng_inst->range_id;
+ bzero(&nan_event_data, sizeof(nan_event_data));
+
+ if (status == BCME_OK) {
+ nan_event_data.ranging_result_present = 1;
+ nan_event_data.range_measurement_cm = range_res->dist_mm;
+ nan_event_data.ranging_ind = range_res->indication;
+ }
+
+ (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
+ &range_res->peer_m_addr, ETHER_ADDR_LEN);
+
+ if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
+ /* check in cache and event match to host */
+ wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
+ range_res->dist_mm);
+ rng_inst->prev_distance_mm = range_res->dist_mm;
+ /* Reset geof retry count on valid measurement */
+ rng_inst->geof_retry_count = 0;
+ /*
+ * Suspend and trigger other targets,
+ * if running sessions maxed out and more
+ * pending targets waiting for trigger
+ */
+ if (dhd_rtt_geofence_sessions_maxed_out(dhd) &&
+ (dhd_rtt_get_geofence_target_cnt(dhd) >=
+ dhd_rtt_get_geofence_max_sessions(dhd))) {
+ /*
+ * Update the target idx first, before suspending current target
+ * or else current target will become eligible again
+ * and will get scheduled again on reset ranging
+ */
+ wl_cfgnan_update_geofence_target_idx(cfg);
+ wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
+ &rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
+ }
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE, TRUE);
+
+ } else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst);
+ }
+ rng_inst->ftm_ssn_retry_count = 0;
+
+exit:
+ return;
+}
+#endif /* RTT_SUPPORT */
+
+static void
+wl_nan_print_status(wl_nan_conf_status_t *nstatus)
+{
+ WL_INFORM_MEM(("> NMI: " MACDBG " Cluster_ID: " MACDBG "\n",
+ MAC2STRDBG(nstatus->nmi.octet),
+ MAC2STRDBG(nstatus->cid.octet)));
+
+ WL_INFORM_MEM(("> NAN Device Role %s\n", nan_role_to_str(nstatus->role)));
+ WL_INFORM_MEM(("> Social channels: %d, %d\n",
+ nstatus->social_chans[0], nstatus->social_chans[1]));
+
+ WL_INFORM_MEM(("> Master_rank: " NMRSTR " AMR : " NMRSTR " Hop Count : %d, AMBTT : %d\n",
+ NMR2STR(nstatus->mr),
+ NMR2STR(nstatus->amr),
+ nstatus->hop_count,
+ nstatus->ambtt));
+
+ WL_INFORM_MEM(("> Cluster TSF_H: %x , Cluster TSF_L: %x\n",
+ nstatus->cluster_tsf_h, nstatus->cluster_tsf_l));
+}
+
+static void
+wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
+ nan_event_data_t *nan_event_data)
+{
+ if (nan_event_data) {
+ if (nan_event_data->tx_match_filter.data) {
+ MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
+ nan_event_data->tx_match_filter.dlen);
+ nan_event_data->tx_match_filter.data = NULL;
+ }
+ if (nan_event_data->rx_match_filter.data) {
+ MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
+ nan_event_data->rx_match_filter.dlen);
+ nan_event_data->rx_match_filter.data = NULL;
+ }
+ if (nan_event_data->svc_info.data) {
+ MFREE(cfg->osh, nan_event_data->svc_info.data,
+ nan_event_data->svc_info.dlen);
+ nan_event_data->svc_info.data = NULL;
+ }
+ if (nan_event_data->sde_svc_info.data) {
+ MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
+ nan_event_data->sde_svc_info.dlen);
+ nan_event_data->sde_svc_info.data = NULL;
+ }
+ MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
+ }
+
+}
+
+#ifdef RTT_SUPPORT
+bool
+wl_cfgnan_update_geofence_target_idx(struct bcm_cfg80211 *cfg)
+{
+ int8 i = 0, target_cnt = 0;
+ int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ rtt_geofence_target_info_t *geofence_target_info = NULL;
+ bool found = false;
+ nan_ranging_inst_t *rng_inst = NULL;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
+ ASSERT(target_cnt);
+ if (target_cnt == 0) {
+ WL_DBG(("No geofence targets to schedule\n"));
+ dhd_rtt_set_geofence_cur_target_idx(dhd,
+ DHD_RTT_INVALID_TARGET_INDEX);
+ goto exit;
+ }
+
+ /* cur idx is validated too, in the following API */
+ cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
+ if (cur_idx == DHD_RTT_INVALID_TARGET_INDEX) {
+ WL_DBG(("invalid current target index, start looking from first\n"));
+ cur_idx = 0;
+ }
+
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+
+ /* Loop through to find eligible target idx */
+ i = cur_idx;
+ do {
+ if (geofence_target_info[i].valid == TRUE) {
+ rng_inst = wl_cfgnan_check_for_ranging(cfg,
+ &geofence_target_info[i].peer_addr);
+ if (rng_inst &&
+ (!NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) &&
+ (!wl_cfgnan_check_role_concurrency(cfg,
+ &rng_inst->peer_addr))) {
+ found = TRUE;
+ break;
+ }
+ }
+ i++;
+ if (i == target_cnt) {
+ i = 0;
+ }
+ } while (i != cur_idx);
+
+ if (found) {
+ dhd_rtt_set_geofence_cur_target_idx(dhd, i);
+ WL_DBG(("Updated cur index, cur_idx = %d, target_cnt = %d\n",
+ i, target_cnt));
+ } else {
+ dhd_rtt_set_geofence_cur_target_idx(dhd,
+ DHD_RTT_INVALID_TARGET_INDEX);
+ WL_DBG(("Invalidated cur_idx, as either no target present, or all "
+ "target already running, target_cnt = %d\n", target_cnt));
+
+ }
+
+exit:
+ return found;
+}
+
+/*
+ * Triggers rtt work thread
+ * if set up not in prog already
+ * and max sessions not maxed out,
+ * after setting next eligible target index
+ */
+void
+wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t * rng_inst, int sched_reason,
+ bool need_rtt_mutex)
+{
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ int8 target_cnt = 0;
+ int reset_req_drop = 0;
+
+ if (need_rtt_mutex == TRUE) {
+ mutex_lock(&rtt_status->rtt_mutex);
+ }
+
+ WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging: "
+ "sched_reason = %d, cur_idx = %d, target_cnt = %d\n",
+ sched_reason, rtt_status->geofence_cfg.cur_target_idx,
+ rtt_status->geofence_cfg.geofence_target_cnt));
+
+ if (rtt_status->rtt_sched == TRUE) {
+ reset_req_drop = 1;
+ goto exit;
+ }
+
+ target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
+ if (target_cnt == 0) {
+ WL_DBG(("No geofence targets to schedule\n"));
+ /*
+ * FIXME:
+ * No Geofence target
+ * Remove all valid ranging inst
+ */
+ if (rng_inst) {
+ WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(rng_inst->peer_addr))));
+ bzero(rng_inst, sizeof(*rng_inst));
+ }
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
+
+ /* invalidate current index as there are no targets */
+ dhd_rtt_set_geofence_cur_target_idx(dhd,
+ DHD_RTT_INVALID_TARGET_INDEX);
+ reset_req_drop = 2;
+ goto exit;
+ }
+
+ if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
+ /* Will be called again for schedule once lock is removed */
+ reset_req_drop = 3;
+ goto exit;
+ }
+
+ /* Avoid schedule if
+ * already geofence running
+ * or Directed RTT in progress
+ * or Invalid RTT state like
+ * NDP with Peer
+ */
+ if ((!RTT_IS_STOPPED(rtt_status)) ||
+ (rtt_invalid_reason != RTT_STATE_VALID)) {
+ /* Not in valid RTT state, avoid schedule */
+ reset_req_drop = 4;
+ goto exit;
+ }
+
+ if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
+ reset_req_drop = 5;
+ goto exit;
+ }
+
+ if (!wl_cfgnan_update_geofence_target_idx(cfg)) {
+ reset_req_drop = 6;
+ goto exit;
+ }
+
+ /*
+ * FixMe: Retry geofence target over a timer Logic
+ * to be brought back later again
+ * in accordance to new multipeer implementation
+ */
+
+ /* schedule RTT */
+ dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
+
+exit:
+ if (reset_req_drop) {
+ WL_INFORM_MEM(("reset geofence req dropped, reason = %d\n",
+ reset_req_drop));
+ }
+ if (need_rtt_mutex == TRUE) {
+ mutex_unlock(&rtt_status->rtt_mutex);
+ }
+ return;
+}
+
+void
+wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd_pub_t *dhd, int sched_reason)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_geofence_target_info_t *geofence_target = NULL;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ geofence_target = dhd_rtt_get_geofence_current_target(dhd);
+ if (!geofence_target) {
+ WL_DBG(("reset ranging request dropped: geofence target null\n"));
+ goto exit;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg,
+ &geofence_target->peer_addr);
+ if (!ranging_inst) {
+ WL_DBG(("reset ranging request dropped: ranging instance null\n"));
+ goto exit;
+ }
+
+ if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status) &&
+ (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE)) {
+ WL_DBG(("Ranging is already in progress for Current target "
+ MACDBG " \n", MAC2STRDBG(&ranging_inst->peer_addr)));
+ goto exit;
+ }
+
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst, sched_reason, TRUE);
+
+exit:
+ return;
+}
+
+static bool
+wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst, uint8 reason_code)
+{
+ bool geof_retry = FALSE;
+
+ switch (reason_code) {
+ case NAN_RNG_TERM_IDLE_TIMEOUT:
+ /* Fallthrough: Keep adding more reason code if needed */
+ case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
+ case NAN_RNG_TERM_RNG_RESP_REJ:
+ case NAN_RNG_TERM_RNG_TXS_FAIL:
+ if (rng_inst->geof_retry_count <
+ NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
+ rng_inst->geof_retry_count++;
+ geof_retry = TRUE;
+ }
+ break;
+ default:
+ /* FALSE for any other case */
+ break;
+ }
+
+ return geof_retry;
+}
+#endif /* RTT_SUPPORT */
+
+s32
+wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
+{
+ uint16 data_len;
+ uint32 event_num;
+ s32 event_type;
+ int hal_event_id = 0;
+ nan_event_data_t *nan_event_data = NULL;
+ nan_parse_event_ctx_t nan_event_ctx;
+ uint16 tlvs_offset = 0;
+ uint16 nan_opts_len = 0;
+ uint8 *tlv_buf;
+ s32 ret = BCME_OK;
+ bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
+ uint32 status;
+ nan_svc_info_t *svc;
+#ifdef RTT_SUPPORT
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ UNUSED_PARAMETER(dhd);
+ UNUSED_PARAMETER(rtt_status);
+ if (rtt_status == NULL) {
+ return -EINVAL;
+ }
+#endif /* RTT_SUPPORT */
+
+ UNUSED_PARAMETER(wl_nan_print_status);
+ UNUSED_PARAMETER(status);
+ NAN_DBG_ENTER();
+
+ if (!event || !event_data) {
+ WL_ERR(("event data is NULL\n"));
+ return -EINVAL;
+ }
+
+ event_type = ntoh32(event->event_type);
+ event_num = ntoh32(event->reason);
+ data_len = ntoh32(event->datalen);
+
+#ifdef RTT_SUPPORT
+ if (event_num == WL_NAN_EVENT_RNG_REQ_IND)
+ {
+ /* Flush any RTT work to avoid any
+ * inconsistencies & ensure RNG REQ
+ * is handling in a stable RTT state.
+ * Note new RTT work can be enqueued from
+ * a. host command context - synchronized over rtt_mutex & state
+ * b. event context - event processing is synchronized/serialised
+ */
+ flush_work(&rtt_status->work);
+ }
+#endif /* RTT_SUPPORT */
+
+ NAN_MUTEX_LOCK();
+
+ if (NAN_INVALID_EVENT(event_num)) {
+ WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
+ nan_event_to_str(event_num), event_num, data_len));
+
+#ifdef WL_NAN_DEBUG
+ prhex("nan_event_data:", event_data, data_len);
+#endif /* WL_NAN_DEBUG */
+
+ if (!cfg->nancfg->nan_init_state) {
+ WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
+ ret = BCME_OK;
+ goto exit;
+ }
+
+ nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
+ if (!nan_event_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ goto exit;
+ }
+
+ nan_event_ctx.cfg = cfg;
+ nan_event_ctx.nan_evt_data = nan_event_data;
+ /*
+ * send as preformatted hex string
+ * EVENT_NAN <event_type> <tlv_hex_string>
+ */
+ switch (event_num) {
+ case WL_NAN_EVENT_START:
+ case WL_NAN_EVENT_MERGE:
+ case WL_NAN_EVENT_ROLE: {
+ /* get nan status info as-is */
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
+ WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
+ nan_event_to_str(event_num), event_num, data_len));
+ WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
+ /* Mapping to common struct between DHD and HAL */
+ nan_event_data->enabled = nstatus->enabled;
+ ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
+ &nstatus->nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nmi\n"));
+ goto exit;
+ }
+ ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
+ &nstatus->cid, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cluster id\n"));
+ goto exit;
+ }
+ nan_event_data->nan_de_evt_type = event_num;
+ if (event_num == WL_NAN_EVENT_ROLE) {
+ wl_nan_print_status(nstatus);
+ }
+
+ if (event_num == WL_NAN_EVENT_START) {
+ OSL_SMP_WMB();
+ cfg->nancfg->nan_event_recvd = true;
+ OSL_SMP_WMB();
+ wake_up(&cfg->nancfg->nan_event_wait);
+ }
+ hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
+ break;
+ }
+ case WL_NAN_EVENT_TERMINATED: {
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
+
+ /* Mapping to common struct between DHD and HAL */
+ WL_TRACE(("Instance ID: %d\n", pev->instance_id));
+ nan_event_data->local_inst_id = pev->instance_id;
+ WL_TRACE(("Service Type: %d\n", pev->svctype));
+
+#ifdef WL_NAN_DISC_CACHE
+ wl_cfgnan_clear_svc_cache(cfg, pev->instance_id);
+ /* if we have to store disc_res even after sub_cancel
+ * donot call below api..but need to device on the criteria to expire
+ */
+ if (pev->svctype == NAN_SC_SUBSCRIBE) {
+ wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ /* Mapping reason code of FW to status code of framework */
+ if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
+ pev->reason == NAN_TERM_REASON_USER_REQ ||
+ pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
+ nan_event_data->status = NAN_STATUS_SUCCESS;
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_SUCCESS",
+ strlen("NAN_STATUS_SUCCESS"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
+ } else {
+ nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_INTERNAL_FAILURE",
+ strlen("NAN_STATUS_INTERNAL_FAILURE"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
+ }
+
+ if (pev->svctype == NAN_SC_SUBSCRIBE) {
+ hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
+ } else {
+ hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
+ }
+#ifdef WL_NAN_DISC_CACHE
+#ifdef RTT_SUPPORT
+ if (pev->reason != NAN_TERM_REASON_USER_REQ) {
+ wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, pev->instance_id);
+ /* terminate ranging sessions */
+ wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
+ }
+#endif /* RTT_SUPPORT */
+#endif /* WL_NAN_DISC_CACHE */
+ break;
+ }
+
+ case WL_NAN_EVENT_RECEIVE: {
+ nan_opts_len = data_len;
+ hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
+ xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
+ break;
+ }
+
+ case WL_NAN_EVENT_TXS: {
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
+ wl_nan_event_sd_txs_t *txs_sd = NULL;
+ if (txs->status == WL_NAN_TXS_SUCCESS) {
+ WL_INFORM_MEM(("TXS success for type %s(%d) token %d\n",
+ nan_frm_type_to_str(txs->type), txs->type, txs->host_seq));
+ nan_event_data->status = NAN_STATUS_SUCCESS;
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_SUCCESS",
+ strlen("NAN_STATUS_SUCCESS"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
+ } else {
+ /* TODO : populate status based on reason codes
+ For now adding it as no ACK, so that app/framework can retry
+ */
+ WL_INFORM_MEM(("TXS failed for type %s(%d) status %d token %d\n",
+ nan_frm_type_to_str(txs->type), txs->type, txs->status,
+ txs->host_seq));
+ nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_NO_OTA_ACK",
+ strlen("NAN_STATUS_NO_OTA_ACK"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
+ }
+ nan_event_data->reason = txs->reason_code;
+ nan_event_data->token = txs->host_seq;
+ if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
+ hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
+ xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
+ if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
+ txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
+ nan_event_data->local_inst_id = txs_sd->inst_id;
+ } else {
+ WL_ERR(("Invalid params in TX status for trasnmit followup"));
+ ret = -EINVAL;
+ goto exit;
+ }
+#ifdef RTT_SUPPORT
+ } else if (txs->type == WL_NAN_FRM_TYPE_RNG_RESP) {
+ xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
+ if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_RNG_TXS) {
+ wl_nan_range_txs_t* txs_rng_resp = (wl_nan_range_txs_t*)xtlv->data;
+ nan_ranging_inst_t *rng_inst =
+ wl_cfgnan_get_rng_inst_by_id(cfg, txs_rng_resp->range_id);
+ if (rng_inst &&
+ NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status)) {
+ /* Unset ranging set up in progress */
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
+ &rng_inst->peer_addr);
+ if (txs->status == WL_NAN_TXS_SUCCESS) {
+ /* range set up is over, move range in progress */
+ rng_inst->range_status =
+ NAN_RANGING_SESSION_IN_PROGRESS;
+ /* Increment geofence session count */
+ dhd_rtt_update_geofence_sessions_cnt(dhd,
+ TRUE, NULL);
+ WL_DBG(("Txs for range resp, rng_id = %d\n",
+ rng_inst->range_id));
+ } else {
+ wl_cfgnan_reset_remove_ranging_instance(cfg,
+ rng_inst);
+ }
+ }
+ } else {
+ WL_ERR(("Invalid params in TX status for range response"));
+ ret = -EINVAL;
+ goto exit;
+ }
+#endif /* RTT_SUPPORT */
+ } else { /* TODO: add for other frame types if required */
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ }
+
+ case WL_NAN_EVENT_DISCOVERY_RESULT: {
+ nan_opts_len = data_len;
+ hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
+ xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
+ break;
+ }
+#ifdef WL_NAN_DISC_CACHE
+ case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ wl_nan_ev_disc_cache_timeout_t *cache_data =
+ (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
+ wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
+ uint16 xtlv_len = xtlv->len;
+ uint8 entry_idx = 0;
+
+ if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
+ xtlv_len = xtlv_len -
+ OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
+ while ((entry_idx < cache_data->count) &&
+ (xtlv_len >= sizeof(*cache_entry))) {
+ cache_entry = &cache_data->cache_exp_list[entry_idx];
+ /* Handle ranging cases for cache timeout */
+ WL_INFORM_MEM(("WL_NAN_EVENT_DISC_CACHE_TIMEOUT peer: " MACDBG
+ " l_id:%d r_id:%d\n", MAC2STRDBG(&cache_entry->r_nmi_addr),
+ cache_entry->l_sub_id, cache_entry->r_pub_id));
+#ifdef RTT_SUPPORT
+ wl_cfgnan_ranging_clear_publish(cfg, &cache_entry->r_nmi_addr,
+ cache_entry->l_sub_id);
+#endif /* RTT_SUPPORT */
+ /* Invalidate local cache info */
+ wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
+ xtlv_len = xtlv_len - sizeof(*cache_entry);
+ entry_idx++;
+ }
+ }
+ break;
+ }
+#ifdef RTT_SUPPORT
+ case WL_NAN_EVENT_RNG_REQ_IND: {
+ wl_nan_ev_rng_req_ind_t *rng_ind;
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+
+ nan_opts_len = data_len;
+ rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
+ xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
+ WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
+ " peer:" MACDBG "\n", rng_ind->rng_id,
+ MAC2STRDBG(&rng_ind->peer_m_addr)));
+ ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
+ /* no need to event to HAL */
+ goto exit;
+ }
+
+ case WL_NAN_EVENT_RNG_TERM_IND: {
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ nan_ranging_inst_t *rng_inst;
+ wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
+ int rng_sched_reason = 0;
+ int8 index = -1;
+ rtt_geofence_target_info_t* geofence_target;
+ BCM_REFERENCE(dhd);
+ WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
+ " Range ID:%d Reason Code:%d\n", MAC2STRDBG(&range_term->peer_m_addr),
+ range_term->rng_id, range_term->reason_code));
+ rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
+ if (rng_inst) {
+ if (!NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) {
+ WL_DBG(("Late or unsynchronized nan term indicator event\n"));
+ break;
+ }
+ rng_sched_reason = RTT_SCHED_RNG_TERM;
+ if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
+ &rng_inst->peer_addr);
+ wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
+ } else {
+ if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ dhd_rtt_handle_nan_rtt_session_end(dhd,
+ &rng_inst->peer_addr);
+ if (dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd,
+ &rng_inst->peer_addr)) {
+ dhd_rtt_nan_update_directed_setup_inprog(dhd,
+ NULL, FALSE);
+ } else {
+ dhd_rtt_nan_update_directed_sessions_cnt(dhd,
+ FALSE);
+ }
+ } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
+ rng_inst->range_status = NAN_RANGING_REQUIRED;
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
+ &rng_inst->peer_addr);
+ if (!wl_cfgnan_geofence_retry_check(rng_inst,
+ range_term->reason_code)) {
+ /* Report on ranging failure */
+ wl_cfgnan_disc_result_on_geofence_cancel(cfg,
+ rng_inst);
+ WL_TRACE(("Reset the state on terminate\n"));
+ geofence_target = dhd_rtt_get_geofence_target(dhd,
+ &rng_inst->peer_addr, &index);
+ if (geofence_target) {
+ dhd_rtt_remove_geofence_target(dhd,
+ &geofence_target->peer_addr);
+ }
+ }
+ }
+ }
+ /* Reset Ranging Instance and trigger ranging if applicable */
+ wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, rng_sched_reason, TRUE);
+ } else {
+ /*
+ * This can happen in some scenarios
+ * like receiving term after a fail txs for range resp
+ * where ranging instance is already cleared
+ */
+ WL_DBG(("Term Indication recieved for a peer without rng inst\n"));
+ }
+ break;
+ }
+
+ case WL_NAN_EVENT_RNG_RESP_IND: {
+ bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ nan_ranging_inst_t *rng_inst;
+ wl_nan_ev_rng_resp_t *range_resp = (wl_nan_ev_rng_resp_t *)xtlv->data;
+
+ WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_RESP_IND peer: " MACDBG ", "
+ " Range ID:%d Ranging Status:%d\n", MAC2STRDBG(&range_resp->peer_m_addr),
+ range_resp->rng_id, range_resp->status));
+ rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_resp->rng_id);
+ if (!rng_inst) {
+ WL_DBG(("Late or unsynchronized resp indicator event\n"));
+ break;
+ }
+ //ASSERT(NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status));
+ if (!NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status)) {
+ WL_INFORM_MEM(("Resp Indicator received for not in prog range inst\n"));
+ break;
+ }
+ /* range set up is over now, move to range in progress */
+ rng_inst->range_status = NAN_RANGING_SESSION_IN_PROGRESS;
+ if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ /* FixMe: Ideally, all below like update session cnt
+ * should be appilicabe to nan rtt and not specific to
+ * geofence. To be fixed in next RB
+ */
+ dhd_rtt_nan_update_directed_setup_inprog(dhd, NULL, FALSE);
+ /*
+ * Increase session count here,
+ * failure status is followed by Term Ind
+ * and handled accordingly
+ */
+ dhd_rtt_nan_update_directed_sessions_cnt(dhd, TRUE);
+ /*
+ * If pending targets to be triggered,
+ * and max sessions, not running already,
+ * schedule next target for RTT
+ */
+ if ((!dhd_rtt_nan_all_directed_sessions_triggered(dhd)) &&
+ dhd_rtt_nan_directed_sessions_allowed(dhd)) {
+ /* Find and set next directed target */
+ dhd_rtt_set_next_target_idx(dhd,
+ (dhd_rtt_get_cur_target_idx(dhd) + 1));
+ /* schedule RTT */
+ dhd_rtt_schedule_rtt_work_thread(dhd,
+ RTT_SCHED_RNG_RESP_IND);
+ }
+ break;
+ }
+ /*
+ ASSERT(dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
+ &rng_inst->peer_addr));
+ */
+ if (!dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
+ &rng_inst->peer_addr)) {
+ WL_INFORM_MEM(("Resp Indicator received for not in prog range peer\n"));
+ break;
+ }
+ /* Unset geof ranging setup status */
+ dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE, &rng_inst->peer_addr);
+ /* Increase geofence session count */
+ dhd_rtt_update_geofence_sessions_cnt(dhd, TRUE, NULL);
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ rng_inst, RTT_SCHED_RNG_RESP_IND, TRUE);
+ break;
+ }
+#endif /* RTT_SUPPORT */
+#endif /* WL_NAN_DISC_CACHE */
+ /*
+ * Data path events data are received in common event struct,
+ * Handling all the events as part of one case, hence fall through is intentional
+ */
+ case WL_NAN_EVENT_PEER_DATAPATH_IND:
+ case WL_NAN_EVENT_DATAPATH_ESTB:
+ case WL_NAN_EVENT_DATAPATH_END: {
+ ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
+ &tlvs_offset, &nan_opts_len,
+ event_num, &hal_event_id, nan_event_data);
+ /* Avoiding optional param parsing for DP END Event */
+ if (event_num == WL_NAN_EVENT_DATAPATH_END) {
+ nan_opts_len = 0;
+ xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
+ }
+ if (unlikely(ret)) {
+ WL_ERR(("nan dp common event data parse failed\n"));
+ goto exit;
+ }
+ break;
+ }
+ case WL_NAN_EVENT_PEER_DATAPATH_RESP:
+ {
+ /* No action -intentionally added to avoid prints when this event is rcvd */
+ break;
+ }
+ default:
+ WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ if (nan_opts_len) {
+ tlv_buf = (uint8 *)event_data + tlvs_offset;
+ /* Extract event data tlvs and pass their resp to cb fn */
+ ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
+ nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
+ }
+ }
+
+#ifdef WL_NAN_DISC_CACHE
+ if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
+#ifdef RTT_SUPPORT
+ bool send_disc_result;
+#endif /* RTT_SUPPORT */
+ u16 update_flags = 0;
+
+ WL_TRACE(("Cache disc res\n"));
+ ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
+ if (ret) {
+ WL_ERR(("Failed to cache disc result ret %d\n", ret));
+ }
+#ifdef RTT_SUPPORT
+ if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
+ ret = wl_cfgnan_check_disc_result_for_ranging(cfg,
+ nan_event_data, &send_disc_result);
+ if ((ret == BCME_OK) && (send_disc_result == FALSE)) {
+ /* Avoid sending disc result instantly and exit */
+ goto exit;
+ } else {
+ /* TODO: should we terminate service if ranging fails ? */
+ WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
+ " sub_id:%d , pub_id:%d, ret = %d, send_disc_result = %d\n",
+ MAC2STRDBG(&nan_event_data->remote_nmi),
+ nan_event_data->sub_id, nan_event_data->pub_id,
+ ret, send_disc_result));
+ }
+ } else {
+ nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
+ nan_event_data->sub_id, 0);
+ if (svc_info && svc_info->ranging_required &&
+ (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
+ wl_cfgnan_ranging_clear_publish(cfg,
+ &nan_event_data->remote_nmi, nan_event_data->sub_id);
+ }
+ }
+#endif /* RTT_SUPPORT */
+
+ /*
+ * If tx match filter is present as part of active subscribe, keep same filter
+ * values in discovery results also.
+ */
+ if (nan_event_data->sub_id == nan_event_data->requestor_id) {
+ svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
+ if (svc && svc->tx_match_filter_len) {
+ nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
+ nan_event_data->tx_match_filter.data =
+ MALLOCZ(cfg->osh, svc->tx_match_filter_len);
+ if (!nan_event_data->tx_match_filter.data) {
+ WL_ERR(("%s: tx_match_filter_data alloc failed\n",
+ __FUNCTION__));
+ nan_event_data->tx_match_filter.dlen = 0;
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(nan_event_data->tx_match_filter.data,
+ nan_event_data->tx_match_filter.dlen,
+ svc->tx_match_filter, svc->tx_match_filter_len);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter data\n"));
+ goto exit;
+ }
+ }
+ }
+ }
+#endif /* WL_NAN_DISC_CACHE */
+
+ WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
+ nan_event_to_str(event_num), event_num, hal_event_id));
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
+ hal_event_id, nan_event_data);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
+ nan_event_to_str(event_num), event_num));
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+exit:
+ wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
+
+ NAN_MUTEX_UNLOCK();
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+#ifdef WL_NAN_DISC_CACHE
+static int
+wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
+ u16 *disc_cache_update_flags)
+{
+ nan_event_data_t* disc = (nan_event_data_t*)data;
+ int i, add_index = 0;
+ int ret = BCME_OK;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+ nan_disc_result_cache *disc_res = nancfg->nan_disc_cache;
+ *disc_cache_update_flags = 0;
+
+ if (!nancfg->nan_enable) {
+ WL_DBG(("nan not enabled"));
+ return BCME_NOTENABLED;
+ }
+ if (nancfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
+ WL_DBG(("cache full"));
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+
+ for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
+ if (!disc_res[i].valid) {
+ add_index = i;
+ continue;
+ }
+ if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
+ !memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
+ WL_DBG(("cache entry already present, i = %d", i));
+ /* Update needed parameters here */
+ if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
+ disc_res[i].sde_control_flag = disc->sde_control_flag;
+ *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
+ }
+ ret = BCME_OK; /* entry already present */
+ goto done;
+ }
+ }
+ WL_DBG(("adding cache entry: add_index = %d\n", add_index));
+ disc_res[add_index].valid = 1;
+ disc_res[add_index].pub_id = disc->pub_id;
+ disc_res[add_index].sub_id = disc->sub_id;
+ disc_res[add_index].publish_rssi = disc->publish_rssi;
+ disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
+ disc_res[add_index].sde_control_flag = disc->sde_control_flag;
+ ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
+ &disc->remote_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto done;
+ }
+ ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
+ disc->svc_name, WL_NAN_SVC_HASH_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash\n"));
+ goto done;
+ }
+
+ if (disc->svc_info.dlen && disc->svc_info.data) {
+ disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
+ disc_res[add_index].svc_info.data =
+ MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
+ if (!disc_res[add_index].svc_info.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ disc_res[add_index].svc_info.dlen = 0;
+ ret = BCME_NOMEM;
+ goto done;
+ }
+ ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
+ disc->svc_info.data, disc->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto done;
+ }
+ }
+ if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
+ disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
+ disc_res[add_index].tx_match_filter.data =
+ MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
+ if (!disc_res[add_index].tx_match_filter.data) {
+ WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
+ disc_res[add_index].tx_match_filter.dlen = 0;
+ ret = BCME_NOMEM;
+ goto done;
+ }
+ ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
+ disc_res[add_index].tx_match_filter.dlen,
+ disc->tx_match_filter.data, disc->tx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter\n"));
+ goto done;
+ }
+ }
+ nancfg->nan_disc_count++;
+ WL_DBG(("cfg->nan_disc_count = %d\n", nancfg->nan_disc_count));
+
+done:
+ return ret;
+}
+
+#ifdef RTT_SUPPORT
+/* Sending command to FW for clearing discovery cache info in FW */
+static int
+wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
+{
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ uint8 buf[NAN_IOCTL_BUF_SIZE];
+ bcm_iov_batch_buf_t *nan_buf;
+ bcm_iov_batch_subcmd_t *sub_cmd;
+ uint16 subcmd_len;
+
+ bzero(buf, sizeof(buf));
+ nan_buf = (bcm_iov_batch_buf_t*)buf;
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(sub_id), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ /* Data size len vs buffer len check is already done above.
+ * So, short buffer error is impossible.
+ */
+ (void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
+ &sub_id, sizeof(sub_id));
+ /* adjust iov data len to the end of last data record */
+ nan_buf_size -= (subcmd_len);
+
+ nan_buf->count++;
+ nan_buf->is_set = true;
+ nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
+ /* Same src and dest len here */
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
+ nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
+ ret, status));
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+#endif /* RTT_SUPPORT */
+
+static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
+ uint8 local_subid)
+{
+ int i;
+ int ret = BCME_NOTFOUND;
+ nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
+ if (!cfg->nancfg->nan_enable) {
+ WL_DBG(("nan not enabled\n"));
+ ret = BCME_NOTENABLED;
+ goto done;
+ }
+ for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
+ if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
+ WL_TRACE(("make cache entry invalid\n"));
+ if (disc_res[i].tx_match_filter.data) {
+ MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
+ disc_res[i].tx_match_filter.dlen);
+ }
+ if (disc_res[i].svc_info.data) {
+ MFREE(cfg->osh, disc_res[i].svc_info.data,
+ disc_res[i].svc_info.dlen);
+ }
+ bzero(&disc_res[i], sizeof(disc_res[i]));
+ cfg->nancfg->nan_disc_count--;
+ ret = BCME_OK;
+ }
+ }
+ WL_DBG(("couldn't find entry\n"));
+done:
+ return ret;
+}
+
+static nan_disc_result_cache *
+wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
+ struct ether_addr *peer)
+{
+ int i;
+ nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
+ if (remote_pubid) {
+ for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
+ if ((disc_res[i].pub_id == remote_pubid) &&
+ !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
+ WL_DBG(("Found entry: i = %d\n", i));
+ return &disc_res[i];
+ }
+ }
+ } else {
+ for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
+ if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
+ WL_DBG(("Found entry: %d\n", i));
+ return &disc_res[i];
+ }
+ }
+ }
+ return NULL;
+}
+#endif /* WL_NAN_DISC_CACHE */
+
+static void
+wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
+ nan_data_path_id ndp_id)
+{
+ uint8 i;
+ bool match_found = false;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+ /* As of now, we don't see a need to know which ndp is active.
+ * so just keep tracking of ndp via count. If we need to know
+ * the status of each ndp based on ndp id, we need to change
+ * this implementation to use a bit mask.
+ */
+
+ if (add) {
+ /* On first NAN DP establishment, disable ARP. */
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ if (!nancfg->ndp_id[i]) {
+ WL_TRACE(("Found empty field\n"));
+ break;
+ }
+ }
+
+ if (i == NAN_MAX_NDP_PEER) {
+ WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
+ return;
+ }
+ if (ndp_id) {
+ nancfg->nan_dp_count++;
+ nancfg->ndp_id[i] = ndp_id;
+ WL_DBG(("%s:Added ndp id = [%d] at i = %d\n",
+ __FUNCTION__, nancfg->ndp_id[i], i));
+ wl_cfg80211_concurrent_roam(cfg, true);
+ }
+ } else {
+ ASSERT(nancfg->nan_dp_count);
+ if (ndp_id) {
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ if (nancfg->ndp_id[i] == ndp_id) {
+ nancfg->ndp_id[i] = 0;
+ WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
+ __FUNCTION__, ndp_id, i));
+ match_found = true;
+ if (nancfg->nan_dp_count) {
+ nancfg->nan_dp_count--;
+ }
+ break;
+ } else {
+ WL_DBG(("couldn't find entry for ndp id = %d\n",
+ ndp_id));
+ }
+ }
+ if (match_found == false) {
+ WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
+ } else {
+ if (nancfg->nan_dp_count == 0) {
+ wl_cfg80211_concurrent_roam(cfg, false);
+ wl_cfgnan_immediate_nan_disable_pending(cfg);
+ }
+ }
+
+ }
+ }
+ WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", nancfg->nan_dp_count));
+}
+
+bool
+wl_cfgnan_is_dp_active(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg;
+ bool nan_dp;
+
+ if (!ndev || !ndev->ieee80211_ptr) {
+ WL_ERR(("ndev/wdev null\n"));
+ return false;
+ }
+
+ cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+ nan_dp = cfg->nancfg->nan_dp_count ? true : false;
+
+ WL_DBG(("NAN DP status:%d\n", nan_dp));
+ return nan_dp;
+}
+
+static s32
+wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
+{
+ int i;
+ for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
+ if (!cfg->nancfg->ndi[i].in_use) {
+ /* Free interface, use it */
+ return i;
+ }
+ }
+ /* Don't have a free interface */
+ return WL_INVALID;
+}
+
+static s32
+wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
+{
+ u16 len;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+ if (!name || (idx < 0) || (idx >= cfg->nancfg->max_ndi_supported)) {
+ return -EINVAL;
+ }
+
+ /* Ensure ifname string size <= IFNAMSIZ including null termination */
+ len = MIN(strlen(name), (IFNAMSIZ - 1));
+ strncpy(nancfg->ndi[idx].ifname, name, len);
+ nancfg->ndi[idx].ifname[len] = '\0';
+ nancfg->ndi[idx].in_use = true;
+ nancfg->ndi[idx].created = false;
+
+ /* Don't have a free interface */
+ return WL_INVALID;
+}
+
+static s32
+wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
+{
+ u16 len;
+ int i;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ if (!name) {
+ return -EINVAL;
+ }
+
+ len = MIN(strlen(name), IFNAMSIZ);
+ for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
+ if (strncmp(nancfg->ndi[i].ifname, name, len) == 0) {
+ bzero(&nancfg->ndi[i].ifname, IFNAMSIZ);
+ nancfg->ndi[i].in_use = false;
+ nancfg->ndi[i].created = false;
+ nancfg->ndi[i].nan_ndev = NULL;
+ return i;
+ }
+ }
+ return -EINVAL;
+}
+
+s32
+wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
+ struct net_device *nan_ndev)
+{
+ s32 ret = BCME_OK;
+ uint8 i = 0;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
+ if (nancfg->ndi[i].in_use && nancfg->ndi[i].created &&
+ (nancfg->ndi[i].nan_ndev == nan_ndev)) {
+ WL_INFORM_MEM(("iface name: %s, cfg->nancfg->ndi[i].nan_ndev = %p"
+ " and nan_ndev = %p\n",
+ (char*)nancfg->ndi[i].ifname,
+ nancfg->ndi[i].nan_ndev, nan_ndev));
+ ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
+ (char*)nancfg->ndi[i].ifname);
+ if (ret) {
+ WL_ERR(("failed to del ndi [%d]\n", ret));
+ }
+ /*
+ * Intentional fall through to clear the host data structs
+ * Unconditionally delete the ndi data and states
+ */
+ if (wl_cfgnan_del_ndi_data(cfg,
+ (char*)nancfg->ndi[i].ifname) < 0) {
+ WL_ERR(("Failed to find matching data for ndi:%s\n",
+ (char*)nancfg->ndi[i].ifname));
+ }
+ }
+ }
+ return ret;
+}
+
+int
+wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ wl_nan_conf_status_t *nstatus = NULL;
+ uint32 status;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(*nstatus), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+ nan_buf->is_set = false;
+
+ bzero(resp_buf, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan status failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ /* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
+ nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
+ ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t),
+ nstatus, sizeof(wl_nan_conf_status_t));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter\n"));
+ goto fail;
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+s32
+wl_nan_print_avail_stats(const uint8 *data)
+{
+ int idx;
+ s32 ret = BCME_OK;
+ int s_chan = 0;
+ char pbuf[NAN_IOCTL_BUF_SIZE_MED];
+ const wl_nan_stats_sched_t *sched = (const wl_nan_stats_sched_t *)data;
+#define SLOT_PRINT_SIZE 4
+
+ char *buf = pbuf;
+ int remained_len = 0, bytes_written = 0;
+ bzero(pbuf, sizeof(pbuf));
+
+ if ((sched->num_slot * SLOT_PRINT_SIZE) > (sizeof(pbuf)-1)) {
+ WL_ERR(("overflowed slot number %d detected\n",
+ sched->num_slot));
+ ret = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+
+ remained_len = NAN_IOCTL_BUF_SIZE_MED;
+ bytes_written = snprintf(buf, remained_len, "Map ID:%u, %u/%u, Slot#:%u ",
+ sched->map_id, sched->period, sched->slot_dur, sched->num_slot);
+
+ for (idx = 0; idx < sched->num_slot; idx++) {
+ const wl_nan_stats_sched_slot_t *slot;
+ slot = &sched->slot[idx];
+ s_chan = 0;
+
+ if (!wf_chspec_malformed(slot->chanspec)) {
+ s_chan = wf_chspec_ctlchan(slot->chanspec);
+ }
+
+ buf += bytes_written;
+ remained_len -= bytes_written;
+ bytes_written = snprintf(buf, remained_len, "%03d|", s_chan);
+
+ }
+ WL_INFORM_MEM(("%s\n", pbuf));
+exit:
+ return ret;
+}
+
+static int
+wl_nan_print_stats_tlvs(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ int err = BCME_OK;
+
+ switch (type) {
+ /* Avail stats xtlvs */
+ case WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED:
+ err = wl_nan_print_avail_stats(data);
+ break;
+ default:
+ err = BCME_BADARG;
+ WL_ERR(("Unknown xtlv type received: %x\n", type));
+ break;
+ }
+
+ return err;
+}
+
+int
+wl_cfgnan_get_stats(struct bcm_cfg80211 *cfg)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 *resp_buf = NULL;
+ wl_nan_cmn_get_stat_t *get_stat = NULL;
+ wl_nan_cmn_stat_t *stats = NULL;
+ uint32 status;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
+ resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
+ if (!nan_buf || !resp_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(*get_stat), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ get_stat = (wl_nan_cmn_get_stat_t *)sub_cmd->data;
+ /* get only local availabiity stats */
+ get_stat->modules_btmap = (1 << NAN_AVAIL);
+ get_stat->operation = WLA_NAN_STATS_GET;
+
+ sub_cmd->id = htod16(WL_NAN_CMD_GEN_STATS);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*get_stat);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+ nan_buf->is_set = false;
+
+ ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg),
+ cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan stats failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+
+ stats = (wl_nan_cmn_stat_t *)&sub_cmd_resp->data[0];
+
+ if (stats->n_stats) {
+ WL_INFORM_MEM((" == Aware Local Avail Schedule ==\n"));
+ ret = bcm_unpack_xtlv_buf((void *)&stats->n_stats,
+ (const uint8 *)&stats->stats_tlvs,
+ stats->totlen - 8, BCM_IOV_CMD_OPT_ALIGN32,
+ wl_nan_print_stats_tlvs);
+ }
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ if (resp_buf) {
+ MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
+ }
+
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgnan_attach(struct bcm_cfg80211 *cfg)
+{
+ int err = BCME_OK;
+ wl_nancfg_t *nancfg = NULL;
+
+ if (cfg) {
+ cfg->nancfg = (wl_nancfg_t *)MALLOCZ(cfg->osh, sizeof(wl_nancfg_t));
+ if (cfg->nancfg == NULL) {
+ err = BCME_NOMEM;
+ goto done;
+ }
+ cfg->nancfg->cfg = cfg;
+ } else {
+ err = BCME_BADARG;
+ goto done;
+ }
+
+ nancfg = cfg->nancfg;
+ mutex_init(&nancfg->nan_sync);
+ init_waitqueue_head(&nancfg->nan_event_wait);
+ INIT_DELAYED_WORK(&nancfg->nan_disable, wl_cfgnan_delayed_disable);
+ nancfg->nan_dp_state = NAN_DP_STATE_DISABLED;
+ init_waitqueue_head(&nancfg->ndp_if_change_event);
+
+done:
+ return err;
+
+}
+
+void
+wl_cfgnan_detach(struct bcm_cfg80211 *cfg)
+{
+ if (cfg && cfg->nancfg) {
+ if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
+ WL_DBG(("Cancel nan_disable work\n"));
+ DHD_NAN_WAKE_UNLOCK(cfg->pub);
+ cancel_delayed_work_sync(&cfg->nancfg->nan_disable);
+ }
+ MFREE(cfg->osh, cfg->nancfg, sizeof(wl_nancfg_t));
+ cfg->nancfg = NULL;
+ }
+
+}
+#endif /* WL_NAN */
diff --git a/bcmdhd.101.10.361.x/wl_cfgnan.h b/bcmdhd.101.10.361.x/wl_cfgnan.h
new file mode 100755
index 0000000..be6a717
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgnan.h
@@ -0,0 +1,959 @@
+/*
+ * Neighbor Awareness Networking
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wl_cfgnan_h_
+#define _wl_cfgnan_h_
+
+/* NAN structs versioning b/w DHD and HAL
+* define new version if any change in any of the shared structs
+*/
+#define NAN_HAL_VERSION_1 0x2
+
+#define NAN_EVENT_BUFFER_SIZE_LARGE 1024u
+
+#define NAN_RANGE_EXT_CANCEL_SUPPORT_VER 2
+#define WL_NAN_IOV_BATCH_VERSION 0x8000
+#define WL_NAN_AVAIL_REPEAT_INTVL 0x0200
+#define WL_NAN_AVAIL_START_INTVL 160
+#define WL_NAN_AVAIL_DURATION_INTVL 336
+#define NAN_IOCTL_BUF_SIZE 256u
+#define NAN_IOCTL_BUF_SIZE_MED 512u
+#define NAN_IOCTL_BUF_SIZE_LARGE 1024u
+#define NAN_EVENT_NAME_MAX_LEN 40u
+#define NAN_RTT_IOVAR_BUF_SIZE 1024u
+#define WL_NAN_EVENT_CLEAR_BIT 32
+#define NAN_EVENT_MASK_ALL 0x7fffffff
+#define NAN_MAX_AWAKE_DW_INTERVAL 5
+#define NAN_MAXIMUM_ID_NUMBER 255
+#define NAN_MAXIMUM_MASTER_PREFERENCE 254
+#define NAN_ID_RESERVED 0
+#define NAN_ID_MIN 1
+#define NAN_ID_MAX 255
+#define NAN_DEF_SOCIAL_CHAN_2G 6
+#define NAN_DEF_SOCIAL_CHAN_5G 149
+#define NAN_DEF_SEC_SOCIAL_CHAN_5G 44
+#define NAN_MAX_SOCIAL_CHANNELS 3
+/* Keeping RSSI threshold value to be -70dBm */
+#define NAN_DEF_RSSI_NOTIF_THRESH -70
+/* Keeping default RSSI mid value to be -70dBm */
+#define NAN_DEF_RSSI_MID -75
+/* Keeping default RSSI close value to be -60dBm */
+#define NAN_DEF_RSSI_CLOSE -60
+#define WL_AVAIL_BIT_MAP "1111111111111111111111111111111100000000000000000000000000000000"
+#define WL_5G_AVAIL_BIT_MAP "0000000011111111111111111111111111111111000000000000000000000000"
+#define WL_AVAIL_CHANNEL_2G 6
+#define WL_AVAIL_BANDWIDTH_2G WL_CHANSPEC_BW_20
+#define WL_AVAIL_CHANNEL_5G 149
+#define WL_AVAIL_BANDWIDTH_5G WL_CHANSPEC_BW_80
+#define NAN_RANGING_PERIOD WL_AVAIL_PERIOD_1024
+#define NAN_SYNC_DEF_AWAKE_DW 1
+#define NAN_RNG_TERM_FLAG_NONE 0
+
+#define NAN_BLOOM_LENGTH_DEFAULT 240u
+#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
+#define NAN_SRF_CTRL_FIELD_LEN 1u
+
+#define MAX_IF_ADD_WAIT_TIME 1000
+#define NAN_DP_ROLE_INITIATOR 0x0001
+#define NAN_DP_ROLE_RESPONDER 0x0002
+
+#define WL_NAN_OBUF_DATA_OFFSET (OFFSETOF(bcm_iov_batch_buf_t, cmds[0]) + \
+ OFFSETOF(bcm_iov_batch_subcmd_t, data[0]))
+#define NAN_INVALID_ROLE(role) (role > WL_NAN_ROLE_ANCHOR_MASTER)
+#define NAN_INVALID_CHANSPEC(chanspec) ((chanspec == INVCHANSPEC) || \
+ (chanspec == 0))
+#define NAN_INVALID_EVENT(num) ((num < WL_NAN_EVENT_START) || \
+ (num >= WL_NAN_EVENT_INVALID))
+#define NAN_INVALID_PROXD_EVENT(num) (num != WLC_E_PROXD_NAN_EVENT)
+#define NAN_EVENT_BIT(event) (1U << (event - WL_NAN_EVENT_START))
+#define NAN_EVENT_MAP(event) ((event) - WL_NAN_EVENT_START)
+#define NAME_TO_STR(name) #name
+#define NAN_ID_CTRL_SIZE ((NAN_MAXIMUM_ID_NUMBER/8) + 1)
+
+#define tolower(c) bcm_tolower(c)
+
+#define NMR2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5], (a)[6], (a)[7]
+#define NMRSTR "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
+
+#define NAN_DBG_ENTER() {WL_DBG(("Enter\n"));}
+#define NAN_DBG_EXIT() {WL_DBG(("Exit\n"));}
+
+/* Service Control Type length */
+#define NAN_SVC_CONTROL_TYPE_MASK ((1 << NAN_SVC_CONTROL_TYPE_LEN) - 1)
+
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+
+#define NAN_MAC_ADDR_LEN 6u
+#define NAN_DP_MAX_APP_INFO_LEN 512u
+
+#define NAN_SDE_CF_DP_REQUIRED (1 << 2)
+#define NAN_SDE_CF_DP_TYPE (1 << 3)
+#define NAN_SDE_CF_MULTICAST_TYPE (1 << 4)
+#define NAN_SDE_CF_SECURITY_REQUIRED (1 << 6)
+#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7)
+#define NAN_SDE_CF_RANGE_PRESENT (1 << 8)
+
+#define CHECK_BIT(m, n) ((m >> n) & 1)? 1 : 0
+#define WL_NAN_EVENT_DIC_MAC_ADDR_BIT 0
+#define WL_NAN_EVENT_START_EVENT 1
+#define WL_NAN_EVENT_JOIN_EVENT 2
+
+/* Disabling svc specific(as per part of sub & pub calls) events based on below bits */
+#define WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT 0
+#define WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT 1
+#define WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT 2
+#define WL_NAN_EVENT_SUPPRESS_REPLIED_BIT 3
+
+/* Disabling tranmsit followup events based on below bit */
+#define WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT 0
+
+#define C2S(x) case x: id2str = #x
+#define NAN_BLOOM_LENGTH_DEFAULT 240u
+#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
+#define NAN_MAX_PMK_LEN 32u
+#define NAN_ERROR_STR_LEN 255u
+
+/* NAN related Capabilities */
+#define MAX_CONCURRENT_NAN_CLUSTERS 1u
+#define MAX_PUBLISHES 8u
+#define MAX_SUBSCRIBES 8u
+#define MAX_SVC_NAME_LEN 255u
+#define MAX_MATCH_FILTER_LEN 255u
+#define MAX_TOTAL_MATCH_FILTER_LEN 510u
+#define NAN_MAX_SERVICE_SPECIFIC_INFO_LEN 255u
+#define NAN_MAX_NDI 3u
+#define MAX_NDP_SESSIONS 5u
+#define MAX_APP_INFO_LEN 255u
+#define MAX_QUEUED_TX_FOLLOUP_MSGS 10u
+#define MAX_SDEA_SVC_INFO_LEN 255u
+#define MAX_SUBSCRIBE_ADDRESS 10u
+#define CIPHER_SUITE_SUPPORTED 1u
+#define MAX_SCID_LEN 0u
+#define IS_NDP_SECURITY_SUPPORTED true
+#define NDP_SUPPORTED_BANDS 2u
+#define NAN_MAX_RANGING_INST 8u
+#define NAN_MAX_RANGING_SSN_ALLOWED 1u
+#define NAN_MAX_SVC_INST (MAX_PUBLISHES + MAX_SUBSCRIBES)
+#define NAN_SVC_INST_SIZE 32u
+#define NAN_START_STOP_TIMEOUT 5000u
+#define NAN_MAX_NDP_PEER 8u
+#define NAN_DISABLE_CMD_DELAY 530u
+#define NAN_WAKELOCK_TIMEOUT (NAN_DISABLE_CMD_DELAY + 100u)
+
+#define NAN_NMI_RAND_PVT_CMD_VENDOR (1 << 31)
+#define NAN_NMI_RAND_CLUSTER_MERGE_ENAB (1 << 30)
+#define NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB (1 << 29)
+
+#ifdef WL_NAN_DEBUG
+#define NAN_MUTEX_LOCK() {WL_DBG(("Mutex Lock: Enter: %s\n", __FUNCTION__)); \
+ mutex_lock(&cfg->nancfg->nan_sync);}
+#define NAN_MUTEX_UNLOCK() {mutex_unlock(&cfg->nancfg->nan_sync); \
+ WL_DBG(("Mutex Unlock: Exit: %s\n", __FUNCTION__));}
+#else
+#define NAN_MUTEX_LOCK() {mutex_lock(&cfg->nancfg->nan_sync);}
+#define NAN_MUTEX_UNLOCK() {mutex_unlock(&cfg->nancfg->nan_sync);}
+#endif /* WL_NAN_DEBUG */
+#define NAN_ATTR_SUPPORT_2G_CONFIG (1<<0)
+#define NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG (1<<1)
+#define NAN_ATTR_SDF_2G_SUPPORT_CONFIG (1<<2)
+#define NAN_ATTR_SUPPORT_5G_CONFIG (1<<3)
+#define NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG (1<<4)
+#define NAN_ATTR_SDF_5G_SUPPORT_CONFIG (1<<5)
+#define NAN_ATTR_2G_DW_CONFIG (1<<6)
+#define NAN_ATTR_5G_DW_CONFIG (1<<7)
+#define NAN_ATTR_2G_CHAN_CONFIG (1<<8)
+#define NAN_ATTR_5G_CHAN_CONFIG (1<<9)
+#define NAN_ATTR_2G_DWELL_TIME_CONFIG (1<<10)
+#define NAN_ATTR_5G_DWELL_TIME_CONFIG (1<<11)
+#define NAN_ATTR_2G_SCAN_PERIOD_CONFIG (1<<12)
+#define NAN_ATTR_5G_SCAN_PERIOD_CONFIG (1<<13)
+#define NAN_ATTR_RSSI_CLOSE_CONFIG (1<<14)
+#define NAN_ATTR_RSSI_MIDDLE_2G_CONFIG (1<<15)
+#define NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG (1<<16)
+#define NAN_ATTR_RSSI_CLOSE_5G_CONFIG (1<<17)
+#define NAN_ATTR_RSSI_MIDDLE_5G_CONFIG (1<<18)
+#define NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG (1<<19)
+#define NAN_ATTR_RSSI_WINDOW_SIZE_CONFIG (1<<20)
+#define NAN_ATTR_HOP_COUNT_LIMIT_CONFIG (1<<21)
+#define NAN_ATTR_SID_BEACON_CONFIG (1<<22)
+#define NAN_ATTR_HOP_COUNT_FORCE_CONFIG (1<<23)
+#define NAN_ATTR_RAND_FACTOR_CONFIG (1<<24)
+#define NAN_ATTR_CLUSTER_VAL_CONFIG (1<<25)
+#define NAN_ATTR_IF_ADDR_CONFIG (1<<26)
+#define NAN_ATTR_OUI_CONFIG (1<<27)
+#define NAN_ATTR_SUB_SID_BEACON_CONFIG (1<<28)
+#define NAN_ATTR_DISC_BEACON_INTERVAL (1<<29)
+#define NAN_IOVAR_NAME_SIZE 4u
+#define NAN_XTLV_ID_LEN_SIZE OFFSETOF(bcm_xtlv_t, data)
+#define NAN_RANGING_INDICATE_CONTINUOUS_MASK 0x01
+#define NAN_RANGE_REQ_CMD 0
+#define NAN_RNG_REQ_ACCEPTED_BY_HOST 1
+#define NAN_RNG_REQ_REJECTED_BY_HOST 0
+
+#define NAN_RNG_REQ_ACCEPTED_BY_PEER 0
+#define NAN_RNG_REQ_REJECTED_BY_PEER 1
+
+#define NAN_RNG_GEOFENCE_MAX_RETRY_CNT 3u
+
+/*
+* Discovery Beacon Interval config,
+* Default value is 128 msec in 2G DW and 176 msec in 2G/5G DW.
+*/
+#define NAN_DISC_BCN_INTERVAL_2G_DEF 128u
+#define NAN_DISC_BCN_INTERVAL_5G_DEF 176u
+
+typedef uint32 nan_data_path_id;
+
+typedef enum nan_range_status {
+ NAN_RANGING_INVALID = 0,
+ NAN_RANGING_REQUIRED = 1,
+ NAN_RANGING_SETUP_IN_PROGRESS = 2,
+ NAN_RANGING_SESSION_IN_PROGRESS = 3
+} nan_range_status_t;
+
+typedef enum nan_range_role {
+ NAN_RANGING_ROLE_INVALID = 0,
+ NAN_RANGING_ROLE_INITIATOR = 1,
+ NAN_RANGING_ROLE_RESPONDER = 2
+} nan_range_role_t;
+
+typedef struct nan_svc_inst {
+ uint8 inst_id; /* publisher/subscriber id */
+ uint8 inst_type; /* publisher/subscriber */
+} nan_svc_inst_t;
+
+/* Range Status Flag bits for svc info */
+#define SVC_RANGE_REP_EVENT_ONCE 0x01
+
+/* Range Status Flag bits for svc info */
+#define SVC_RANGE_REP_EVENT_ONCE 0x01
+
+#define NAN_RANGING_SETUP_IS_IN_PROG(status) \
+ ((status) == NAN_RANGING_SETUP_IN_PROGRESS)
+
+#define NAN_RANGING_IS_IN_PROG(status) \
+ (((status) == NAN_RANGING_SETUP_IN_PROGRESS) || \
+ ((status) == NAN_RANGING_SESSION_IN_PROGRESS))
+
+typedef struct nan_svc_info {
+ bool valid;
+ nan_data_path_id ndp_id[NAN_MAX_SVC_INST];
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service hash */
+ uint8 svc_id;
+ uint8 ranging_required;
+ uint8 ranging_ind;
+ uint8 status;
+ uint32 ranging_interval;
+ uint32 ingress_limit;
+ uint32 egress_limit;
+ uint32 flags;
+ uint8 tx_match_filter[MAX_MATCH_FILTER_LEN]; /* TX match filter */
+ uint8 tx_match_filter_len;
+ uint8 svc_range_status; /* For managing any svc range status flags */
+} nan_svc_info_t;
+
+/* NAN Peer DP state */
+typedef enum {
+ NAN_PEER_DP_NOT_CONNECTED = 0,
+ NAN_PEER_DP_CONNECTING = 1,
+ NAN_PEER_DP_CONNECTED = 2
+} nan_peer_dp_state_t;
+
+typedef struct nan_ndp_peer {
+ uint8 peer_dp_state;
+ uint8 dp_count;
+ struct ether_addr peer_addr;
+} nan_ndp_peer_t;
+
+#define INVALID_DISTANCE 0xFFFFFFFF
+#define NAN_RTT_FTM_SSN_RETRIES 2
+
+typedef struct nan_ranging_inst {
+ uint8 range_id;
+ nan_range_status_t range_status;
+ struct ether_addr peer_addr;
+ int range_type;
+ uint8 num_svc_ctx;
+ nan_svc_info_t *svc_idx[MAX_SUBSCRIBES];
+ uint32 prev_distance_mm;
+ nan_range_role_t range_role;
+ bool in_use;
+ uint8 geof_retry_count;
+ uint8 ftm_ssn_retry_count;
+ bool role_concurrency_status;
+} nan_ranging_inst_t;
+
+#define DUMP_NAN_RTT_INST(inst) { printf("svc instance ID %d", (inst)->svc_inst_id); \
+ printf("Range ID %d", (inst)->range_id); \
+ printf("range_status %d", (inst)->range_status); \
+ printf("Range Type %d", (inst)->range_type); \
+ printf("Peer MAC "MACDBG"\n", MAC2STRDBG((inst)->peer_addr.octet)); \
+ }
+
+#define DUMP_NAN_RTT_RPT(rpt) { printf("Range ID %d", (rpt)->rng_id); \
+ printf("Distance in MM %d", (rpt)->dist_mm); \
+ printf("range_indication %d", (rpt)->indication); \
+ printf("Peer MAC "MACDBG"\n", MAC2STRDBG((rpt)->peer_m_addr.octet)); \
+ }
+/*
+ * Data request Initiator/Responder
+ * app/service related info
+ */
+typedef struct nan_data_path_app_info {
+ uint16 ndp_app_info_len;
+ uint8 ndp_app_info[NAN_DP_MAX_APP_INFO_LEN];
+} nan_data_path_app_info_t;
+
+/* QoS configuration */
+typedef enum {
+ NAN_DP_CONFIG_NO_QOS = 0,
+ NAN_DP_CONFIG_QOS
+} nan_data_path_qos_cfg_t;
+
+/* Data request Responder's response */
+typedef enum {
+ NAN_DP_REQUEST_ACCEPT = 0,
+ NAN_DP_REQUEST_REJECT
+} nan_data_path_response_code_t;
+
+/* NAN DP security Configuration */
+typedef enum {
+ NAN_DP_CONFIG_NO_SECURITY = 0,
+ NAN_DP_CONFIG_SECURITY
+} nan_data_path_security_cfg_status_t;
+
+/* NAN Security Key Input Type */
+typedef enum {
+ NAN_SECURITY_KEY_INPUT_PMK = 1,
+ NAN_SECURITY_KEY_INPUT_PASSPHRASE
+} nan_security_key_input_type;
+
+/* Configuration params of Data request Initiator/Responder */
+typedef struct nan_data_path_cfg {
+ /* Status Indicating Security/No Security */
+ nan_data_path_security_cfg_status_t security_cfg;
+ nan_data_path_qos_cfg_t qos_cfg;
+} nan_data_path_cfg_t;
+
+enum nan_dp_states {
+ NAN_DP_STATE_DISABLED = 0,
+ NAN_DP_STATE_ENABLED = 1
+};
+
+enum {
+ SRF_TYPE_BLOOM_FILTER = 0,
+ SRF_TYPE_SEQ_MAC_ADDR = 1
+};
+
+/* NAN Match indication type */
+typedef enum {
+ NAN_MATCH_ALG_MATCH_ONCE = 0,
+ NAN_MATCH_ALG_MATCH_CONTINUOUS = 1,
+ NAN_MATCH_ALG_MATCH_NEVER = 2
+} nan_match_alg;
+
+typedef struct nan_str_data {
+ uint32 dlen;
+ uint8 *data;
+} nan_str_data_t;
+
+typedef struct nan_mac_list {
+ uint32 num_mac_addr;
+ uint8 *list;
+} nan_mac_list_t;
+
+typedef struct wl_nan_sid_beacon_tune {
+ uint8 sid_enable; /* flag for sending service id in beacon */
+ uint8 sid_count; /* Limit for number of SIDs to be included in Beacons */
+ uint8 sub_sid_enable; /* flag for sending subscribe service id in beacon */
+ uint8 sub_sid_count; /* Limit for number of SUb SIDs to be included in Beacons */
+} wl_nan_sid_beacon_ctrl_t;
+
+typedef struct nan_avail_cmd_data {
+ chanspec_t chanspec[NAN_MAX_SOCIAL_CHANNELS]; /* channel */
+ uint32 bmap; /* bitmap */
+ uint8 duration;
+ uint8 avail_period;
+ /* peer mac address reqd for ranging avail type */
+ struct ether_addr peer_nmi;
+ bool no_config_avail;
+} nan_avail_cmd_data;
+
+typedef struct nan_discover_cmd_data {
+ nan_str_data_t svc_info; /* service information */
+ nan_str_data_t sde_svc_info; /* extended service information */
+ nan_str_data_t svc_hash; /* service hash */
+ nan_str_data_t rx_match; /* matching filter rx */
+ nan_str_data_t tx_match; /* matching filter tx */
+ nan_str_data_t key; /* Security key information */
+ nan_str_data_t scid; /* security context information */
+ nan_data_path_cfg_t ndp_cfg;
+ struct ether_addr mac_addr; /* mac address */
+ nan_mac_list_t mac_list; /* mac list */
+ wl_nan_instance_id_t pub_id; /* publisher id */
+ wl_nan_instance_id_t sub_id; /* subscriber id */
+ wl_nan_instance_id_t local_id; /* Local id */
+ wl_nan_instance_id_t remote_id; /* Remote id */
+ uint32 status;
+ uint32 ttl; /* time to live */
+ uint32 period; /* publish period */
+ uint32 flags; /* Flag bits */
+ bool sde_control_config; /* whether sde_control present */
+ uint16 sde_control_flag;
+ uint16 token; /* transmit fup token id */
+ uint8 csid; /* cipher suite type */
+ nan_security_key_input_type key_type; /* cipher suite type */
+ uint8 priority; /* Priority of Transmit */
+ uint8 life_count; /* life count of the instance */
+ uint8 srf_type; /* SRF type */
+ uint8 srf_include; /* SRF include */
+ uint8 use_srf; /* use SRF */
+ uint8 recv_ind_flag; /* Receive Indication Flag */
+ uint8 disc_ind_cfg; /* Discovery Ind cfg */
+ uint8 ranging_indication;
+ uint32 ranging_intvl_msec; /* ranging interval in msec */
+ uint32 ingress_limit;
+ uint32 egress_limit;
+ bool response;
+ uint8 service_responder_policy;
+ bool svc_update;
+} nan_discover_cmd_data_t;
+
+typedef struct nan_datapath_cmd_data {
+ nan_avail_cmd_data avail_params; /* Avail config params */
+ nan_str_data_t svc_hash; /* service hash */
+ nan_str_data_t svc_info; /* service information */
+ nan_str_data_t key; /* security key information */
+ nan_data_path_response_code_t rsp_code;
+ nan_data_path_id ndp_instance_id;
+ nan_data_path_cfg_t ndp_cfg;
+ wl_nan_instance_id_t pub_id; /* publisher id */
+ nan_security_key_input_type key_type; /* cipher suite type */
+ struct ether_addr if_addr; /* if addr */
+ struct ether_addr mac_addr; /* mac address */
+ chanspec_t chanspec[NAN_MAX_SOCIAL_CHANNELS]; /* channel */
+ uint32 status;
+ uint32 bmap; /* bitmap */
+ uint16 service_instance_id;
+ uint16 sde_control_flag;
+ uint8 csid; /* cipher suite type */
+ uint8 peer_disc_mac_addr[ETHER_ADDR_LEN];
+ uint8 peer_ndi_mac_addr[ETHER_ADDR_LEN];
+ uint8 num_ndp_instances;
+ uint8 duration;
+ char ndp_iface[IFNAMSIZ+1];
+} nan_datapath_cmd_data_t;
+
+typedef struct nan_rssi_cmd_data {
+ int8 rssi_middle_2dot4g_val;
+ int8 rssi_close_2dot4g_val;
+ int8 rssi_proximity_2dot4g_val;
+ int8 rssi_proximity_5g_val;
+ int8 rssi_middle_5g_val;
+ int8 rssi_close_5g_val;
+ uint16 rssi_window_size; /* Window size over which rssi calculated */
+} nan_rssi_cmd_data_t;
+
+typedef struct election_metrics {
+ uint8 random_factor; /* Configured random factor */
+ uint8 master_pref; /* configured master preference */
+} election_metrics_t;
+
+typedef struct nan_awake_dws {
+ uint8 dw_interval_2g; /* 2G DW interval */
+ uint8 dw_interval_5g; /* 5G DW interval */
+} nan_awake_dws_t;
+
+typedef struct nan_config_cmd_data {
+ nan_rssi_cmd_data_t rssi_attr; /* RSSI related data */
+ election_metrics_t metrics;
+ nan_awake_dws_t awake_dws; /* Awake DWs */
+ nan_avail_cmd_data avail_params; /* Avail config params */
+ nan_str_data_t p2p_info; /* p2p information */
+ nan_str_data_t scid; /* security context information */
+ struct ether_addr clus_id; /* cluster id */
+ struct ether_addr mac_addr; /* mac address */
+ wl_nan_sid_beacon_ctrl_t sid_beacon; /* sending service id in beacon */
+ chanspec_t chanspec[NAN_MAX_SOCIAL_CHANNELS]; /* channel */
+ uint32 status;
+ uint32 bmap; /* bitmap */
+ uint32 nan_oui; /* configured nan oui */
+ uint32 warmup_time; /* Warm up time */
+ uint8 duration;
+ uint8 hop_count_limit; /* hop count limit */
+ uint8 support_5g; /* To decide dual band support */
+ uint8 support_2g; /* To decide dual band support */
+ uint8 beacon_2g_val;
+ uint8 beacon_5g_val;
+ uint8 sdf_2g_val;
+ uint8 sdf_5g_val;
+ uint8 dwell_time[NAN_MAX_SOCIAL_CHANNELS];
+ uint8 scan_period[NAN_MAX_SOCIAL_CHANNELS];
+ uint8 config_cluster_val;
+ uint8 disc_ind_cfg; /* Discovery Ind cfg */
+ uint8 csid; /* cipher suite type */
+ uint32 nmi_rand_intvl; /* nmi randomization interval */
+ uint32 use_ndpe_attr;
+ uint8 enable_merge;
+ uint16 cluster_low;
+ uint16 cluster_high;
+ wl_nan_disc_bcn_interval_t disc_bcn_interval;
+ uint32 dw_early_termination;
+} nan_config_cmd_data_t;
+
+typedef struct nan_event_hdr {
+ uint32 flags; /* future use */
+ uint16 event_subtype;
+} nan_event_hdr_t;
+
+typedef struct nan_event_data {
+ uint8 svc_name[WL_NAN_SVC_HASH_LEN]; /* service name */
+ uint8 enabled; /* NAN Enabled */
+ uint8 nan_de_evt_type; /* DE event type */
+ uint8 status; /* status */
+ uint8 ndp_id; /* data path instance id */
+ uint8 security; /* data path security */
+ uint8 type;
+ uint8 attr_num;
+ uint8 reason; /* reason */
+ wl_nan_instance_id_t pub_id; /* publisher id */
+ wl_nan_instance_id_t sub_id; /* subscriber id */
+ wl_nan_instance_id_t local_inst_id; /* local instance id */
+ wl_nan_instance_id_t requestor_id; /* Requestor instance id */
+ int publish_rssi; /* discovery rssi value */
+ int sub_rssi; /* Sub rssi value */
+ int fup_rssi; /* followup rssi */
+ uint16 attr_list_len; /* sizeof attributes attached to payload */
+ nan_str_data_t svc_info; /* service info */
+ nan_str_data_t vend_info; /* vendor info */
+ nan_str_data_t sde_svc_info; /* extended service information */
+ nan_str_data_t tx_match_filter; /* tx match filter */
+ nan_str_data_t rx_match_filter; /* rx match filter */
+ struct ether_addr local_nmi; /* local nmi */
+ struct ether_addr clus_id; /* cluster id */
+ struct ether_addr remote_nmi; /* remote nmi */
+ struct ether_addr initiator_ndi; /* initiator_ndi */
+ struct ether_addr responder_ndi; /* responder_ndi */
+ uint16 token; /* transmit fup token id */
+ uint8 peer_cipher_suite; /* peer cipher suite type */
+ nan_str_data_t scid; /* security context information */
+ char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
+ uint16 sde_control_flag;
+ uint8 ranging_result_present;
+ uint32 range_measurement_cm;
+ uint32 ranging_ind;
+ uint8 rng_id;
+} nan_event_data_t;
+
+/*
+ * Various NAN Protocol Response code
+*/
+typedef enum {
+ /* NAN Protocol Response Codes */
+ NAN_STATUS_SUCCESS = 0,
+ /* NAN Discovery Engine/Host driver failures */
+ NAN_STATUS_INTERNAL_FAILURE = 1,
+ /* NAN OTA failures */
+ NAN_STATUS_PROTOCOL_FAILURE = 2,
+ /* if the publish/subscribe id is invalid */
+ NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID = 3,
+ /* If we run out of resources allocated */
+ NAN_STATUS_NO_RESOURCE_AVAILABLE = 4,
+ /* if invalid params are passed */
+ NAN_STATUS_INVALID_PARAM = 5,
+ /* if the requestor instance id is invalid */
+ NAN_STATUS_INVALID_REQUESTOR_INSTANCE_ID = 6,
+ /* if the ndp id is invalid */
+ NAN_STATUS_INVALID_NDP_ID = 7,
+ /* if NAN is enabled when wifi is turned off */
+ NAN_STATUS_NAN_NOT_ALLOWED = 8,
+ /* if over the air ack is not received */
+ NAN_STATUS_NO_OTA_ACK = 9,
+ /* If NAN is already enabled and we are try to re-enable the same */
+ NAN_STATUS_ALREADY_ENABLED = 10,
+ /* If followup message internal queue is full */
+ NAN_STATUS_FOLLOWUP_QUEUE_FULL = 11,
+ /* Unsupported concurrency session enabled, NAN disabled notified */
+ NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED = 12
+} nan_status_type_t;
+
+typedef struct {
+ nan_status_type_t status;
+ char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
+} nan_hal_status_t;
+
+typedef struct nan_parse_event_ctx {
+ struct bcm_cfg80211 *cfg;
+ nan_event_data_t *nan_evt_data;
+} nan_parse_event_ctx_t;
+
+/* Capabilities info supported by FW */
+typedef struct nan_hal_capabilities {
+ uint32 max_concurrent_nan_clusters;
+ uint32 max_publishes;
+ uint32 max_subscribes;
+ uint32 max_service_name_len;
+ uint32 max_match_filter_len;
+ uint32 max_total_match_filter_len;
+ uint32 max_service_specific_info_len;
+ uint32 max_vsa_data_len;
+ uint32 max_mesh_data_len;
+ uint32 max_ndi_interfaces;
+ uint32 max_ndp_sessions;
+ uint32 max_app_info_len;
+ uint32 max_queued_transmit_followup_msgs;
+ uint32 ndp_supported_bands;
+ uint32 cipher_suites_supported;
+ uint32 max_scid_len;
+ bool is_ndp_security_supported;
+ uint32 max_sdea_service_specific_info_len;
+ uint32 max_subscribe_address;
+ uint32 ndpe_attr_supported;
+} nan_hal_capabilities_t;
+
+typedef struct _nan_hal_resp {
+ uint16 instance_id;
+ uint16 subcmd;
+ int32 status;
+ int32 value;
+ /* Identifier for the instance of the NDP */
+ uint16 ndp_instance_id;
+ /* Publisher NMI */
+ uint8 pub_nmi[NAN_MAC_ADDR_LEN];
+ /* SVC_HASH */
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
+ char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
+ char pad[3];
+ nan_hal_capabilities_t capabilities;
+} nan_hal_resp_t;
+
+typedef struct wl_nan_iov {
+ uint16 nan_iov_len;
+ uint8 *nan_iov_buf;
+} wl_nan_iov_t;
+
+#ifdef WL_NAN_DISC_CACHE
+
+#define NAN_MAX_CACHE_DISC_RESULT 16
+typedef struct {
+ bool valid;
+ wl_nan_instance_id_t pub_id;
+ wl_nan_instance_id_t sub_id;
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
+ struct ether_addr peer;
+ int8 publish_rssi;
+ uint8 peer_cipher_suite;
+ uint8 security;
+ nan_str_data_t svc_info; /* service info */
+ nan_str_data_t vend_info; /* vendor info */
+ nan_str_data_t sde_svc_info; /* extended service information */
+ nan_str_data_t tx_match_filter; /* tx match filter */
+ uint16 sde_control_flag;
+} nan_disc_result_cache;
+
+typedef struct nan_datapath_sec_info {
+ nan_data_path_id ndp_instance_id;
+ wl_nan_instance_id_t pub_id; /* publisher id */
+ struct ether_addr mac_addr; /* mac address */
+} nan_datapath_sec_info_cmd_data_t;
+#endif /* WL_NAN_DISC_CACHE */
+
+typedef enum {
+ NAN_RANGING_AUTO_RESPONSE_ENABLE = 0,
+ NAN_RANGING_AUTO_RESPONSE_DISABLE
+} NanRangingAutoResponseCfg;
+
+typedef struct wl_ndi_data
+{
+ u8 ifname[IFNAMSIZ];
+ u8 in_use;
+ u8 created;
+ struct net_device *nan_ndev;
+} wl_ndi_data_t;
+
+typedef struct wl_nancfg
+{
+ struct bcm_cfg80211 *cfg;
+ bool nan_enable;
+ nan_svc_inst_t nan_inst_ctrl[NAN_ID_CTRL_SIZE];
+ struct ether_addr initiator_ndi;
+ uint8 nan_dp_state;
+ bool nan_init_state; /* nan initialization state */
+ wait_queue_head_t ndp_if_change_event;
+ uint8 support_5g;
+ u8 nan_nmi_mac[ETH_ALEN];
+ u8 nan_dp_count;
+ struct delayed_work nan_disable;
+ int nan_disc_count;
+ nan_disc_result_cache *nan_disc_cache;
+ nan_svc_info_t svc_info[NAN_MAX_SVC_INST];
+ nan_ranging_inst_t nan_ranging_info[NAN_MAX_RANGING_INST];
+ wl_nan_ver_t version;
+ struct mutex nan_sync;
+ uint8 svc_inst_id_mask[NAN_SVC_INST_SIZE];
+ uint8 inst_id_start;
+ /* wait queue and condition variable for nan event */
+ bool nan_event_recvd;
+ wait_queue_head_t nan_event_wait;
+ bool notify_user;
+ bool mac_rand;
+ uint8 max_ndp_count; /* Max no. of NDPs */
+ nan_ndp_peer_t *nan_ndp_peer_info;
+ nan_data_path_id ndp_id[NAN_MAX_NDP_PEER];
+ uint8 ndpe_enabled;
+ uint8 max_ndi_supported;
+ wl_ndi_data_t *ndi;
+ bool ranging_enable;
+} wl_nancfg_t;
+
+bool wl_cfgnan_is_enabled(struct bcm_cfg80211 *cfg);
+int wl_cfgnan_check_nan_disable_pending(struct bcm_cfg80211 *cfg,
+bool force_disable, bool is_sync_reqd);
+int wl_cfgnan_start_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask);
+int wl_cfgnan_stop_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg);
+void wl_cfgnan_delayed_disable(struct work_struct *work);
+int wl_cfgnan_config_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask);
+int wl_cfgnan_support_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data);
+int wl_cfgnan_status_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data);
+int wl_cfgnan_publish_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+int wl_cfgnan_subscribe_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+int wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+int wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+int wl_cfgnan_transmit_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+s32 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+int wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id);
+int wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id);
+int wl_cfgnan_get_capablities_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
+int wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate);
+int wl_cfgnan_data_path_request_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
+ uint8 *ndp_instance_id);
+int wl_cfgnan_data_path_response_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data);
+int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
+ int *status);
+const char * nan_event_to_str(u16 cmd);
+
+#ifdef WL_NAN_DISC_CACHE
+int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
+ nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp);
+/* ranging quest and response iovar handler */
+#endif /* WL_NAN_DISC_CACHE */
+bool wl_cfgnan_is_dp_active(struct net_device *ndev);
+bool wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+s32 wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg, struct net_device *nan_ndev);
+int wl_cfgnan_set_enable_merge(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint8 enable, uint32 *status);
+int wl_cfgnan_attach(struct bcm_cfg80211 *cfg);
+void wl_cfgnan_detach(struct bcm_cfg80211 *cfg);
+int wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status);
+
+#ifdef RTT_SUPPORT
+int wl_cfgnan_trigger_ranging(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, void *event_data, nan_svc_info_t *svc,
+ uint8 range_req, bool accept_req);
+nan_ranging_inst_t *wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer, nan_range_role_t range_role);
+nan_ranging_inst_t* wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer);
+int wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
+ struct ether_addr *peer_addr);
+int wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
+ struct ether_addr *peer, int suspend_reason, u8 cancel_flags);
+void wl_cfgnan_suspend_all_geofence_rng_sessions(struct net_device *ndev,
+ int suspend_reason, u8 cancel_flags);
+int wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev, struct bcm_cfg80211 *cfg);
+void wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t * rng_inst, int sched_reason, bool need_rtt_mutex);
+void wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd_pub_t *dhd, int sched_reason);
+void wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
+ wl_nan_ev_rng_rpt_ind_t *range_res, int rtt_status);
+int wl_cfgnan_cancel_ranging(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint8 *range_id, uint8 flags, uint32 *status);
+bool wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg);
+uint8 wl_cfgnan_cancel_rng_responders(struct net_device *ndev);
+bool wl_cfgnan_check_role_concurrency(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+bool wl_cfgnan_update_geofence_target_idx(struct bcm_cfg80211 *cfg);
+bool wl_cfgnan_ranging_is_in_prog_for_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+#endif /* RTT_SUPPORT */
+
+typedef enum {
+ NAN_ATTRIBUTE_HEADER = 100,
+ NAN_ATTRIBUTE_HANDLE = 101,
+ NAN_ATTRIBUTE_TRANSAC_ID = 102,
+
+ /* NAN Enable request attributes */
+ NAN_ATTRIBUTE_2G_SUPPORT = 103,
+ NAN_ATTRIBUTE_5G_SUPPORT = 104,
+ NAN_ATTRIBUTE_CLUSTER_LOW = 105,
+ NAN_ATTRIBUTE_CLUSTER_HIGH = 106,
+ NAN_ATTRIBUTE_SID_BEACON = 107,
+ NAN_ATTRIBUTE_SYNC_DISC_2G_BEACON = 108,
+ NAN_ATTRIBUTE_SYNC_DISC_5G_BEACON = 109,
+ NAN_ATTRIBUTE_SDF_2G_SUPPORT = 110,
+ NAN_ATTRIBUTE_SDF_5G_SUPPORT = 111,
+ NAN_ATTRIBUTE_RSSI_CLOSE = 112,
+ NAN_ATTRIBUTE_RSSI_MIDDLE = 113,
+ NAN_ATTRIBUTE_RSSI_PROXIMITY = 114,
+ NAN_ATTRIBUTE_HOP_COUNT_LIMIT = 115,
+ NAN_ATTRIBUTE_RANDOM_TIME = 116,
+ NAN_ATTRIBUTE_MASTER_PREF = 117,
+ NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL = 118,
+
+ /* Nan Publish/Subscribe request attributes */
+ NAN_ATTRIBUTE_PUBLISH_ID = 119,
+ NAN_ATTRIBUTE_TTL = 120,
+ NAN_ATTRIBUTE_PERIOD = 121,
+ NAN_ATTRIBUTE_REPLIED_EVENT_FLAG = 122,
+ NAN_ATTRIBUTE_PUBLISH_TYPE = 123,
+ NAN_ATTRIBUTE_TX_TYPE = 124,
+ NAN_ATTRIBUTE_PUBLISH_COUNT = 125,
+ NAN_ATTRIBUTE_SERVICE_NAME_LEN = 126,
+ NAN_ATTRIBUTE_SERVICE_NAME = 127,
+ NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN = 128,
+ NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO = 129,
+ NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN = 130,
+ NAN_ATTRIBUTE_RX_MATCH_FILTER = 131,
+ NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN = 132,
+ NAN_ATTRIBUTE_TX_MATCH_FILTER = 133,
+ NAN_ATTRIBUTE_SUBSCRIBE_ID = 134,
+ NAN_ATTRIBUTE_SUBSCRIBE_TYPE = 135,
+ NAN_ATTRIBUTE_SERVICERESPONSEFILTER = 136,
+ NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE = 137,
+ NAN_ATTRIBUTE_USESERVICERESPONSEFILTER = 138,
+ NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION = 139,
+ NAN_ATTRIBUTE_SUBSCRIBE_MATCH = 140,
+ NAN_ATTRIBUTE_SUBSCRIBE_COUNT = 141,
+ NAN_ATTRIBUTE_MAC_ADDR = 142,
+ NAN_ATTRIBUTE_MAC_ADDR_LIST = 143,
+ NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES = 144,
+ NAN_ATTRIBUTE_PUBLISH_MATCH = 145,
+
+ /* Nan Event attributes */
+ NAN_ATTRIBUTE_ENABLE_STATUS = 146,
+ NAN_ATTRIBUTE_JOIN_STATUS = 147,
+ NAN_ATTRIBUTE_ROLE = 148,
+ NAN_ATTRIBUTE_MASTER_RANK = 149,
+ NAN_ATTRIBUTE_ANCHOR_MASTER_RANK = 150,
+ NAN_ATTRIBUTE_CNT_PEND_TXFRM = 151,
+ NAN_ATTRIBUTE_CNT_BCN_TX = 152,
+ NAN_ATTRIBUTE_CNT_BCN_RX = 153,
+ NAN_ATTRIBUTE_CNT_SVC_DISC_TX = 154,
+ NAN_ATTRIBUTE_CNT_SVC_DISC_RX = 155,
+ NAN_ATTRIBUTE_AMBTT = 156,
+ NAN_ATTRIBUTE_CLUSTER_ID = 157,
+ NAN_ATTRIBUTE_INST_ID = 158,
+ NAN_ATTRIBUTE_OUI = 159,
+ NAN_ATTRIBUTE_STATUS = 160,
+ NAN_ATTRIBUTE_DE_EVENT_TYPE = 161,
+ NAN_ATTRIBUTE_MERGE = 162,
+ NAN_ATTRIBUTE_IFACE = 163,
+ NAN_ATTRIBUTE_CHANNEL = 164,
+ NAN_ATTRIBUTE_PEER_ID = 165,
+ NAN_ATTRIBUTE_NDP_ID = 167,
+ NAN_ATTRIBUTE_SECURITY = 168,
+ NAN_ATTRIBUTE_QOS = 169,
+ NAN_ATTRIBUTE_RSP_CODE = 170,
+ NAN_ATTRIBUTE_INST_COUNT = 171,
+ NAN_ATTRIBUTE_PEER_DISC_MAC_ADDR = 172,
+ NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR = 173,
+ NAN_ATTRIBUTE_IF_ADDR = 174,
+ NAN_ATTRIBUTE_WARMUP_TIME = 175,
+ NAN_ATTRIBUTE_RECV_IND_CFG = 176,
+ NAN_ATTRIBUTE_RSSI_CLOSE_5G = 177,
+ NAN_ATTRIBUTE_RSSI_MIDDLE_5G = 178,
+ NAN_ATTRIBUTE_RSSI_PROXIMITY_5G = 179,
+ NAN_ATTRIBUTE_CONNMAP = 180,
+ NAN_ATTRIBUTE_24G_CHANNEL = 181,
+ NAN_ATTRIBUTE_5G_CHANNEL = 182,
+ NAN_ATTRIBUTE_DWELL_TIME = 183,
+ NAN_ATTRIBUTE_SCAN_PERIOD = 184,
+ NAN_ATTRIBUTE_RSSI_WINDOW_SIZE = 185,
+ NAN_ATTRIBUTE_CONF_CLUSTER_VAL = 186,
+ NAN_ATTRIBUTE_AVAIL_BIT_MAP = 187,
+ NAN_ATTRIBUTE_ENTRY_CONTROL = 188,
+ NAN_ATTRIBUTE_CIPHER_SUITE_TYPE = 189,
+ NAN_ATTRIBUTE_KEY_TYPE = 190,
+ NAN_ATTRIBUTE_KEY_LEN = 191,
+ NAN_ATTRIBUTE_SCID = 192,
+ NAN_ATTRIBUTE_SCID_LEN = 193,
+ NAN_ATTRIBUTE_SDE_CONTROL_CONFIG_DP = 194,
+ NAN_ATTRIBUTE_SDE_CONTROL_SECURITY = 195,
+ NAN_ATTRIBUTE_SDE_CONTROL_DP_TYPE = 196,
+ NAN_ATTRIBUTE_SDE_CONTROL_RANGE_SUPPORT = 197,
+ NAN_ATTRIBUTE_NO_CONFIG_AVAIL = 198,
+ NAN_ATTRIBUTE_2G_AWAKE_DW = 199,
+ NAN_ATTRIBUTE_5G_AWAKE_DW = 200,
+ NAN_ATTRIBUTE_RANGING_INTERVAL = 201,
+ NAN_ATTRIBUTE_RANGING_INDICATION = 202,
+ NAN_ATTRIBUTE_RANGING_INGRESS_LIMIT = 203,
+ NAN_ATTRIBUTE_RANGING_EGRESS_LIMIT = 204,
+ NAN_ATTRIBUTE_RANGING_AUTO_ACCEPT = 205,
+ NAN_ATTRIBUTE_RANGING_RESULT = 206,
+ NAN_ATTRIBUTE_DISC_IND_CFG = 207,
+ NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG = 208,
+ NAN_ATTRIBUTE_KEY_DATA = 209,
+ NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN = 210,
+ NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO = 211,
+ NAN_ATTRIBUTE_REASON = 212,
+ NAN_ATTRIBUTE_DWELL_TIME_5G = 215,
+ NAN_ATTRIBUTE_SCAN_PERIOD_5G = 216,
+ NAN_ATTRIBUTE_SVC_RESPONDER_POLICY = 217,
+ NAN_ATTRIBUTE_EVENT_MASK = 218,
+ NAN_ATTRIBUTE_SUB_SID_BEACON = 219,
+ NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL = 220,
+ NAN_ATTRIBUTE_CMD_RESP_DATA = 221,
+ NAN_ATTRIBUTE_CMD_USE_NDPE = 222,
+ NAN_ATTRIBUTE_ENABLE_MERGE = 223,
+ NAN_ATTRIBUTE_DISCOVERY_BEACON_INTERVAL = 224,
+ NAN_ATTRIBUTE_NSS = 225,
+ NAN_ATTRIBUTE_ENABLE_RANGING = 226,
+ NAN_ATTRIBUTE_DW_EARLY_TERM = 227
+} NAN_ATTRIBUTE;
+
+enum geofence_suspend_reason {
+ RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG = 0,
+ RTT_GEO_SUSPN_PEER_RTT_TRIGGER = 1,
+ RTT_GEO_SUSPN_HOST_NDP_TRIGGER = 2,
+ RTT_GEO_SUSPN_PEER_NDP_TRIGGER = 3,
+ RTT_GEO_SUSPN_RANGE_RES_REPORTED = 4
+};
+#endif /* _wl_cfgnan_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_cfgp2p.c b/bcmdhd.101.10.361.x/wl_cfgp2p.c
new file mode 100755
index 0000000..01a04f0
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgp2p.c
@@ -0,0 +1,2811 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <net/rtnetlink.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgvif.h>
+#include <wldev_common.h>
+
+#ifdef OEM_ANDROID
+#include <wl_android.h>
+#endif
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#endif /* defined(BCMDONGLEHOST) */
+#include <dhd_config.h>
+
+static s8 scanparambuf[WLC_IOCTL_MEDLEN];
+static bool wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type);
+
+static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct wireless_dev *wdev, bool notify);
+
+#if defined(WL_ENABLE_P2P_IF)
+static netdev_tx_t wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+ .ndo_open = wl_cfgp2p_if_open,
+ .ndo_stop = wl_cfgp2p_if_stop,
+ .ndo_do_ioctl = wl_cfgp2p_do_ioctl,
+ .ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+static netdev_tx_t wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+
+static int wl_cfgp2p_if_dummy(struct net_device *net)
+{
+ return 0;
+}
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+ .ndo_open = wl_cfgp2p_if_dummy,
+ .ndo_stop = wl_cfgp2p_if_dummy,
+ .ndo_do_ioctl = wl_cfgp2p_do_ioctl,
+ .ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+
+ if (frame == NULL)
+ return false;
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+ return false;
+
+ if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+ pact_frm->action == P2P_PUB_AF_ACTION &&
+ pact_frm->oui_type == P2P_VER &&
+ memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+ return true;
+ }
+
+ return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+ wifi_p2p_action_frame_t *act_frm;
+
+ if (frame == NULL)
+ return false;
+ act_frm = (wifi_p2p_action_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+ return false;
+
+ if (act_frm->category == P2P_AF_CATEGORY &&
+ act_frm->type == P2P_VER &&
+ memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+* Currently Action frame just pass to P2P interface regardless real dst.
+* but GAS Action can be used for Hotspot2.0 as well
+* Need to distingush that it's for P2P or HS20
+*/
+#define GAS_RESP_OFFSET 4
+#define GAS_CRESP_OFFSET 5
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+ return false;
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (wl_cfg80211_is_dpp_gas_action(frame, frame_len)) {
+ return true;
+ }
+
+#ifdef WL11U
+ /* Hotspot2.0 STA mode can receive only response
+ * SoftAP mode cannot run Hotspot2.0 compliant Ap because
+ * Hotspot2.0 support only Enterprise mode
+ */
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP) {
+ return wl_cfg80211_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, P2PSD_GAS_NQP_INFOID,
+ (u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET,
+ frame_len);
+
+ } else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) {
+ return wl_cfg80211_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, P2PSD_GAS_NQP_INFOID,
+ (u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET,
+ frame_len);
+ } else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ) {
+ return true;
+ } else {
+ return false;
+ }
+#else
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return true;
+ else
+ return false;
+#endif /* WL11U */
+}
+
+bool wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len)
+{
+
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+ return false;
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ)
+ return wl_cfg80211_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, P2PSD_GAS_NQP_INFOID,
+ (u8 *)sd_act_frm->query_data,
+ frame_len);
+ else
+ return false;
+}
+
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+ wifi_p2p_action_frame_t *act_frm;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+ if (!frame || frame_len <= 2)
+ return;
+
+ channel = CHSPEC_CHANNEL(channel);
+ if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ switch (pact_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_GON_RSP:
+ CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_GON_CONF:
+ CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_INVITE_REQ:
+ CFGP2P_ACTION(("%s P2P Invitation Request Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_INVITE_RSP:
+ CFGP2P_ACTION(("%s P2P Invitation Response Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ default:
+ CFGP2P_ACTION(("%s Unknown Public Action Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+
+ }
+ } else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+ act_frm = (wifi_p2p_action_frame_t *)frame;
+ switch (act_frm->subtype) {
+ case P2P_AF_NOTICE_OF_ABSENCE:
+ CFGP2P_ACTION(("%s P2P Notice of Absence Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_AF_PRESENCE_REQ:
+ CFGP2P_ACTION(("%s P2P Presence Request Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_AF_PRESENCE_RSP:
+ CFGP2P_ACTION(("%s P2P Presence Response Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ case P2P_AF_GO_DISC_REQ:
+ CFGP2P_ACTION(("%s P2P Discoverability Request Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ break;
+ default:
+ CFGP2P_ACTION(("%s Unknown P2P Action Frame,"
+ " channel=%d\n", (tx)? "TX": "RX", channel));
+ }
+
+ } else if (wl_cfg80211_is_dpp_frame(frame, frame_len)) {
+ wl_dpp_pa_frame_t *pa = (wl_dpp_pa_frame_t *)frame;
+ CFGP2P_ACTION(("%s %s, channel=%d\n",
+ (tx) ? "TX" : "RX", get_dpp_pa_ftype(pa->ftype), channel));
+ } else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ switch (sd_act_frm->action) {
+ case P2PSD_ACTION_ID_GAS_IREQ:
+ CFGP2P_ACTION(("%s GAS Initial Request,"
+ " channel=%d\n", (tx)? "TX" : "RX", channel));
+ break;
+ case P2PSD_ACTION_ID_GAS_IRESP:
+ CFGP2P_ACTION(("%s GAS Initial Response,"
+ " channel=%d\n", (tx)? "TX" : "RX", channel));
+ break;
+ case P2PSD_ACTION_ID_GAS_CREQ:
+ CFGP2P_ACTION(("%s GAS Comback Request,"
+ " channel=%d\n", (tx)? "TX" : "RX", channel));
+ break;
+ case P2PSD_ACTION_ID_GAS_CRESP:
+ CFGP2P_ACTION(("%s GAS Comback Response,"
+ " channel=%d\n", (tx)? "TX" : "RX", channel));
+ break;
+ default:
+ CFGP2P_ACTION(("%s Unknown GAS Frame,"
+ " channel=%d\n", (tx)? "TX" : "RX", channel));
+ }
+ }
+}
+
+/*
+ * Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
+{
+ struct ether_addr primary_mac;
+ cfg->p2p = MALLOCZ(cfg->osh, sizeof(struct p2p_info));
+ if (cfg->p2p == NULL) {
+ CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+ return -ENOMEM;
+ }
+
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) = -1;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) = -1;
+ return BCME_OK;
+
+}
+/*
+ * Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+ CFGP2P_INFO(("In\n"));
+ if (cfg->p2p) {
+ MFREE(cfg->osh, cfg->p2p, sizeof(struct p2p_info));
+ cfg->p2p = NULL;
+ }
+ cfg->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 ret = BCME_OK;
+ s32 val = 0;
+ struct ether_addr *p2p_dev_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+
+ if (ETHER_ISNULLADDR(p2p_dev_addr)) {
+ CFGP2P_ERR(("NULL p2p_dev_addr\n"));
+ return BCME_BADADDR;
+ }
+
+ /* Do we have to check whether APSTA is enabled or not ? */
+ ret = wldev_iovar_getint(ndev, "apsta", &val);
+ if (ret < 0) {
+ CFGP2P_ERR(("get apsta error %d\n", ret));
+ return ret;
+ }
+ if (val == 0) {
+ val = 1;
+ ret = wldev_ioctl_set(ndev, WLC_DOWN, &val, sizeof(s32));
+ if (ret < 0) {
+ CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
+ return ret;
+ }
+
+ ret = wldev_iovar_setint(ndev, "apsta", val);
+ if (ret < 0) {
+ /* return error and fail the initialization */
+ CFGP2P_ERR(("wl apsta %d set error. ret: %d\n", val, ret));
+ return ret;
+ }
+
+ ret = wldev_ioctl_set(ndev, WLC_UP, &val, sizeof(s32));
+ if (ret < 0) {
+ CFGP2P_ERR(("WLC_UP error %d\n", ret));
+ return ret;
+ }
+ }
+
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", p2p_dev_addr,
+ sizeof(*p2p_dev_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+ if (ret && ret != BCME_UNSUPPORTED) {
+ CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+ }
+ return ret;
+}
+
+int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg)
+{
+ if (!cfg->p2p) {
+ CFGP2P_DBG(("p2p not enabled! \n"));
+ return false;
+ }
+
+ if ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) &&
+ (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1))
+ return true;
+ else
+ return false;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_ERR(("---cfg p2p_ifadd "MACDBG" %s %u\n",
+ MAC2STRDBG(ifreq.addr.octet),
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err < 0)) {
+ CFGP2P_ERR(("'cfg p2p_ifadd' error %d\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+/* Disable a P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to disable
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+ s32 ret;
+ struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+ CFGP2P_INFO(("------ cfg p2p_ifdis "MACDBG" dev->ifindex:%d \n",
+ MAC2STRDBG(mac->octet), netdev->ifindex));
+ ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("'cfg p2p_ifdis' error %d\n", ret));
+ }
+ return ret;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to delete
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+ s32 ret;
+ struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+ CFGP2P_ERR(("------ cfg p2p_ifdel "MACDBG" dev->ifindex:%d\n",
+ MAC2STRDBG(mac->octet), netdev->ifindex));
+ ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("'cfg p2p_ifdel' error %d\n", ret));
+ }
+
+ return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec, s32 conn_idx)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+
+ struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, conn_idx);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u"
+ " chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
+ ifreq.chspec));
+
+ err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err < 0)) {
+ CFGP2P_ERR(("'cfg p2p_ifupd' error %d\n", err));
+ } else if (if_type == WL_P2P_IF_GO) {
+ cfg->p2p->p2p_go_count++;
+ }
+ return err;
+}
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the created BSS
+ * @index : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index)
+{
+ s32 ret;
+ u8 getbuf[64];
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+
+ CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
+
+ ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+ sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+ if (ret == 0) {
+ memcpy(index, getbuf, sizeof(s32));
+ CFGP2P_DBG(("---cfg p2p_if ==> %d\n", *index));
+ }
+
+ return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on)
+{
+ s32 ret = BCME_OK;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+ }
+
+ return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+ wl_p2p_disc_st_t discovery_mode;
+ s32 ret;
+ struct net_device *dev;
+ CFGP2P_DBG(("enter\n"));
+
+ if (unlikely(bssidx == WL_INVALID)) {
+ CFGP2P_ERR((" %d index out of range\n", bssidx));
+ return -1;
+ }
+
+ dev = wl_cfgp2p_find_ndev(cfg, bssidx);
+ if (unlikely(dev == NULL)) {
+ CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+ return BCME_NOTFOUND;
+ }
+
+#ifdef P2PLISTEN_AP_SAMECHN
+ CFGP2P_DBG(("p2p0 listen channel %d AP connection chan %d \n",
+ channel, cfg->channel));
+ if ((mode == WL_P2P_DISC_ST_LISTEN) && (cfg->channel == channel)) {
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ if (cfg->p2p_resp_apchn_status) {
+ CFGP2P_DBG(("p2p_resp_apchn_status already ON \n"));
+ return BCME_OK;
+ }
+
+ if (wl_get_drv_status(cfg, CONNECTED, primary_ndev)) {
+ ret = wl_cfg80211_set_p2p_resp_ap_chn(primary_ndev, 1);
+ cfg->p2p_resp_apchn_status = true;
+ CFGP2P_DBG(("p2p_resp_apchn_status ON \n"));
+ return ret;
+ }
+ }
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+ /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+ discovery_mode.state = mode;
+ discovery_mode.chspec = wl_ch_host_to_driver(channel);
+ discovery_mode.dwell = listen_ms;
+ ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+ sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+
+ return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index)
+{
+ s32 ret;
+ struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+
+ ret = wldev_iovar_getint(dev, "p2p_dev", index);
+ CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+ return ret;
+ }
+ return ret;
+}
+
+int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg)
+{
+ int i;
+ s32 connected_cnt;
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (!dhd)
+ return (-ENODEV);
+#endif /* BCMDONGLEHOST */
+ for (i = P2PAPI_BSSCFG_CONNECTION1; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (wl_to_p2p_bss_bssidx(cfg, i) == -1) {
+ if (i == P2PAPI_BSSCFG_CONNECTION2) {
+#if defined(BCMDONGLEHOST)
+ if (!(dhd->op_mode & DHD_FLAG_MP2P_MODE)) {
+ CFGP2P_ERR(("Multi p2p not supported"));
+ return BCME_ERROR;
+ }
+#endif /* BCMDONGLEHOST */
+ if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) {
+ CFGP2P_ERR(("Failed to create second p2p interface"
+ "Already one connection exists"));
+ return BCME_ERROR;
+ }
+ }
+ return i;
+ }
+ }
+ return BCME_ERROR;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
+{
+
+ s32 bssidx = 0;
+ s32 ret = BCME_OK;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ BCM_REFERENCE(ndev);
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
+ CFGP2P_ERR(("do nothing, already initialized\n"));
+ goto exit;
+ }
+
+ ret = wl_cfgp2p_set_discovery(cfg, 1);
+ if (ret < 0) {
+ CFGP2P_ERR(("set discover error\n"));
+ goto exit;
+ }
+ /* Enable P2P Discovery in the WL Driver */
+ ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx);
+ if (ret < 0) {
+ goto exit;
+ }
+
+ /* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */
+ if (!cfg->p2p_wdev) {
+ CFGP2P_ERR(("p2p_wdev is NULL.\n"));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* Once p2p also starts using interface_create iovar, the ifidx may change.
+ * so that time, the ifidx returned in WLC_E_IF should be used for populating
+ * the netinfo
+ */
+ ret = wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_IF_TYPE_STA, 0, bssidx, 0);
+ if (unlikely(ret)) {
+ goto exit;
+ }
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx;
+
+ /* Set the initial discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret != 0)) {
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ wl_cfgp2p_set_discovery(cfg, 0);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+ ret = 0;
+ goto exit;
+ }
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS */
+ wl_cfg80211_clear_p2p_disc_ies(cfg);
+exit:
+ if (ret) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @cfg : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ s32 bssidx;
+
+ CFGP2P_DBG(("enter\n"));
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (bssidx <= 0) {
+ CFGP2P_ERR(("do nothing, not initialized\n"));
+ return -1;
+ }
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS */
+ wl_cfg80211_clear_p2p_disc_ies(cfg);
+
+ /* Set the discovery state to SCAN */
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ bssidx);
+ /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+ ret = wl_cfgp2p_set_discovery(cfg, 0);
+
+ /* Remove the p2p disc entry in the netinfo */
+ wl_dealloc_netinfo_by_wdev(cfg, cfg->p2p_wdev);
+
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+ return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @cfg : wl_private data
+ * @ie : probe request ie (WPS IE + P2P IE)
+ * @ie_len : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ const u8 *ie, u32 ie_len)
+{
+ s32 ret = BCME_OK;
+ s32 bssidx;
+ bcm_struct_cfgdev *cfgdev;
+
+ CFGP2P_DBG(("enter\n"));
+ mutex_lock(&cfg->if_sync);
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_P2P_DISC)) != BCME_OK) {
+ WL_ERR(("secondary iface is active, p2p enable discovery is not supported\n"));
+ goto exit;
+ }
+#endif /* WL_IFACE_MGMT */
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ CFGP2P_DBG((" DISCOVERY is already initialized, we have nothing to do\n"));
+ goto set_ie;
+ }
+
+ ret = wl_cfgp2p_init_discovery(cfg);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" init discovery error %d\n", ret));
+ goto exit;
+ }
+
+ wl_set_p2p_status(cfg, DISCOVERY_ON);
+ /* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+ * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+ * Some peer devices may not initiate WPS with us if this bit is not set.
+ */
+ ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE),
+ "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" wsec error %d\n", ret));
+ goto exit;
+ }
+set_ie:
+
+ if (ie_len) {
+ if (bcmcfg_to_prmry_ndev(cfg) == dev) {
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ } else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ /* For 3.8+ kernels, pass p2p discovery wdev */
+ cfgdev = cfg->p2p_wdev;
+#else
+ /* Prior to 3.8 kernel, there is no netless p2p, so pass p2p0 ndev */
+ cfgdev = ndev_to_cfgdev(dev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, cfgdev,
+ bssidx, VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+ goto exit;
+ }
+ }
+exit:
+ if (ret) {
+ /* Disable discovery I/f on any failure */
+ if (wl_cfgp2p_disable_discovery(cfg) != BCME_OK) {
+ /* Discard error (if any) to avoid override
+ * of p2p enable error.
+ */
+ CFGP2P_ERR(("p2p disable disc failed\n"));
+ }
+ wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
+ }
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @cfg : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ s32 bssidx;
+
+ CFGP2P_DBG((" enter\n"));
+ wl_clr_p2p_status(cfg, DISCOVERY_ON);
+
+ if (!cfg->p2p) { // terence 20130113: Fix for p2p NULL pointer
+ ret = BCME_ERROR;
+ CFGP2P_ERR(("wl->p2p is NULL\n"));
+ goto exit;
+ }
+
+#ifdef DHD_IFDEBUG
+ WL_ERR(("%s: bssidx: %d\n",
+ __FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx));
+#endif
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (bssidx <= 0) {
+ CFGP2P_ERR((" do nothing, not initialized\n"));
+ return 0;
+ }
+
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ }
+ /* Do a scan abort to stop the driver's scan engine in case it is still
+ * waiting out an action frame tx dwell time.
+ */
+#ifdef NOT_YET
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ p2pwlu_scan_abort(hdl, FALSE);
+ }
+#endif
+ wl_clr_p2p_status(cfg, DISCOVERY_ON);
+ ret = wl_cfgp2p_deinit_discovery(cfg);
+
+exit:
+ return ret;
+}
+
+/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_scan,
+ u32 num_chans, u16 *channels,
+ s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+ p2p_scan_purpose_t p2p_scan_purpose)
+{
+ s32 ret = BCME_OK;
+ s32 memsize;
+ s32 eparams_size;
+ u32 i;
+ s8 *memblk;
+ wl_p2p_scan_t *p2p_params;
+ wl_escan_params_t *eparams;
+ wl_escan_params_v2_t *eparams_v2;
+ wlc_ssid_t ssid;
+ u32 sync_id = 0;
+ s32 nprobes = 0;
+ s32 active_time = 0;
+ const struct ether_addr *mac_addr = NULL;
+ u32 scan_type = 0;
+ struct net_device *pri_dev = NULL;
+
+ pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ /* Allocate scan params which need space for 3 channels and 0 ssids */
+ if (cfg->scan_params_v2) {
+ eparams_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ } else {
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ }
+
+ memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+ memblk = scanparambuf;
+ if (memsize > sizeof(scanparambuf)) {
+ CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n",
+ memsize, sizeof(scanparambuf)));
+ return -1;
+ }
+ bzero(memblk, memsize);
+ bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+ if (search_state == WL_P2P_DISC_ST_SEARCH) {
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+ /* use null ssid */
+ ssid.SSID_len = 0;
+ bzero(&ssid.SSID, sizeof(ssid.SSID));
+ } else if (search_state == WL_P2P_DISC_ST_SCAN) {
+ /* SCAN STATE 802.11 SCAN
+ * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+ * So if P2P_find command with type=progressive,
+ * we have to set ssid to P2P WILDCARD because
+ * we just do broadcast scan unless setting SSID
+ */
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+ /* use wild card ssid */
+ ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
+ bzero(&ssid.SSID, sizeof(ssid.SSID));
+ memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
+ } else {
+ CFGP2P_ERR((" invalid search state %d\n", search_state));
+ return -1;
+ }
+
+ /* Fill in the P2P scan structure at the start of the iovar param block */
+ p2p_params = (wl_p2p_scan_t*) memblk;
+ p2p_params->type = 'E';
+
+ if (!active_scan) {
+ scan_type = WL_SCANFLAGS_PASSIVE;
+ }
+
+ if (tx_dst_addr == NULL) {
+ mac_addr = &ether_bcast;
+ } else {
+ mac_addr = tx_dst_addr;
+ }
+
+ switch (p2p_scan_purpose) {
+ case P2P_SCAN_SOCIAL_CHANNEL:
+ active_time = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+ break;
+ case P2P_SCAN_AFX_PEER_NORMAL:
+ case P2P_SCAN_AFX_PEER_REDUCED:
+ active_time = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+ break;
+ case P2P_SCAN_CONNECT_TRY:
+ active_time = WL_SCAN_CONNECT_DWELL_TIME_MS;
+ break;
+ default:
+ active_time = wl_get_drv_status_all(cfg, CONNECTED) ?
+ -1 : P2PAPI_SCAN_DWELL_TIME_MS;
+ break;
+ }
+
+ if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY) {
+ nprobes = active_time /
+ WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+ } else {
+ nprobes = active_time /
+ P2PAPI_SCAN_NPROBS_TIME_MS;
+ }
+
+ if (nprobes <= 0) {
+ nprobes = 1;
+ }
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t*) (p2p_params + 1);
+ eparams_v2->version = htod16(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->params.version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ eparams_v2->params.length = htod16(sizeof(wl_scan_params_v2_t));
+ eparams_v2->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams_v2->params.scan_type = htod32(scan_type);
+ (void)memcpy_s(&eparams_v2->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams_v2->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams_v2->params.active_time = htod32(active_time);
+ eparams_v2->params.nprobes = htod32(nprobes);
+ eparams_v2->params.passive_time = htod32(-1);
+ eparams_v2->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams_v2->params.channel_list[i] =
+ wl_chspec_host_to_driver(channels[i]);
+ }
+ eparams_v2->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams_v2->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams_v2->sync_id;
+ } else {
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams->params.scan_type = htod32(scan_type);
+ (void)memcpy_s(&eparams->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams->params.active_time = htod32(active_time);
+ eparams->params.nprobes = htod32(nprobes);
+ eparams->params.passive_time = htod32(-1);
+ eparams->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] =
+ wl_chspec_host_to_driver(channels[i]);
+ }
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams->sync_id;
+ }
+
+ wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
+
+ CFGP2P_DBG(("nprobes:%d active_time:%d\n", nprobes, active_time));
+ CFGP2P_DBG(("SCAN CHANNELS : "));
+ CFGP2P_DBG(("%d", channels[0]));
+ for (i = 1; i < num_chans; i++) {
+ CFGP2P_DBG((",%d", channels[i]));
+ }
+ CFGP2P_DBG(("\n"));
+
+ WL_MSG(dev->name, "P2P_SEARCH sync ID: %d, bssidx: %d\n", sync_id, bssidx);
+ ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+ memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (ret == BCME_OK) {
+ wl_set_p2p_status(cfg, SCANNING);
+ }
+ return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @cfg : wl_private data
+ * @ndev : net device for bssidx
+ * @bssidx : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
+{
+ s32 ret = 0;
+ u32 chan_cnt = 0;
+ u16 *default_chan_list = NULL;
+ p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
+ if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
+ return -EINVAL;
+ WL_TRACE_HW4((" Enter\n"));
+ if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (channel)
+ chan_cnt = AF_PEER_SEARCH_CNT;
+ else
+ chan_cnt = SOCIAL_CHAN_CNT;
+
+ if (cfg->afx_hdl->pending_tx_act_frm && cfg->afx_hdl->is_active) {
+ wl_action_frame_t *action_frame;
+ action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame);
+ if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) {
+ chan_cnt = 1;
+ p2p_scan_purpose = P2P_SCAN_AFX_PEER_REDUCED;
+ }
+ }
+
+ default_chan_list = (u16 *)MALLOCZ(cfg->osh, chan_cnt * sizeof(*default_chan_list));
+ if (default_chan_list == NULL) {
+ CFGP2P_ERR(("channel list allocation failed \n"));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (channel) {
+ u32 i;
+ /* insert same channel to the chan_list */
+ for (i = 0; i < chan_cnt; i++) {
+ default_chan_list[i] = channel;
+ }
+ } else {
+ default_chan_list[0] = wf_create_chspec_from_primary(SOCIAL_CHAN_1,
+ WL_CHANSPEC_BW_20, WL_CHANSPEC_BAND_2G);
+ default_chan_list[1] = wf_create_chspec_from_primary(SOCIAL_CHAN_2,
+ WL_CHANSPEC_BW_20, WL_CHANSPEC_BAND_2G);
+ default_chan_list[2] = wf_create_chspec_from_primary(SOCIAL_CHAN_3,
+ WL_CHANSPEC_BW_20, WL_CHANSPEC_BAND_2G);
+ }
+ ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
+ default_chan_list, WL_P2P_DISC_ST_SEARCH,
+ WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
+ MFREE(cfg->osh, default_chan_list, chan_cnt * sizeof(*default_chan_list));
+exit:
+ return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#ifndef WFA_OUI_TYPE_WFD
+#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */
+#endif
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie->len >= oui_len + 1 &&
+ !bcmp(ie->data, oui, oui_len) &&
+ type == ie->data[oui_len]) {
+ return TRUE;
+ }
+
+ /* point to the next ie */
+ if (tlvs != NULL) {
+ bcm_tlv_buffer_advance_past(ie, tlvs, tlvs_len);
+ }
+
+ return FALSE;
+}
+
+const wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(const u8 *parse, u32 len)
+{
+ const bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wpa_ie(ie, &parse, &len)) {
+ return (const wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+const wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(const u8 *parse, u32 len)
+{
+ const bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wps_ie(ie, &parse, &len)) {
+ return (const wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(const u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie(ie, &parse, &len)) {
+ return (wifi_p2p_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+const wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(const u8 *parse, u32 len)
+{
+ const bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wfd_ie(ie, &parse, &len)) {
+ return (const wifi_wfd_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+ s8 *oui, s32 ie_id, const s8 *data, s32 datalen, const s8* add_del_cmd)
+{
+ vndr_ie_setbuf_t hdr; /* aligned temporary vndr_ie buffer header */
+ s32 iecount;
+ u32 data_offset;
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG |
+ VNDR_IE_DISASSOC_FLAG))) {
+ CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+ return -1;
+ }
+
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strlcpy(hdr.cmd, add_del_cmd, sizeof(hdr.cmd));
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* For vendor ID DOT11_MNG_ID_EXT_ID, need to set pkt flag to VNDR_IE_CUSTOM_FLAG */
+ if (ie_id == DOT11_MNG_ID_EXT_ID) {
+ pktflag = pktflag | VNDR_IE_CUSTOM_FLAG;
+ }
+
+ /* Copy packet flags that indicate which packets will contain this IE */
+ pktflag = htod32(pktflag);
+ memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+
+ /* Add the IE ID to the buffer */
+ hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+
+ /* Add the IE length to the buffer */
+ hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len =
+ (uint8) VNDR_IE_MIN_LEN + datalen;
+
+ /* Add the IE OUI to the buffer */
+ hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0];
+ hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1];
+ hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2];
+
+ /* Copy the aligned temporary vndr_ie buffer header to the IE buffer */
+ memcpy(iebuf, &hdr, sizeof(hdr) - 1);
+
+ /* Copy the IE data to the IE buffer */
+ data_offset =
+ (u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] -
+ (u8*)&hdr;
+ memcpy(iebuf + data_offset, data, datalen);
+ return data_offset + datalen;
+
+}
+
+struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+ u32 i;
+ struct net_device *ndev = NULL;
+ if (bssidx < 0) {
+ CFGP2P_ERR((" bsscfg idx is invalid\n"));
+ goto exit;
+ }
+
+ for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+ ndev = wl_to_p2p_bss_ndev(cfg, i);
+ break;
+ }
+ }
+
+exit:
+ return ndev;
+}
+/*
+ * Search the driver array idx based on bssidx argument
+ * Parameters: Note that this idx is applicable only
+ * for primary and P2P interfaces. The virtual AP/STA is not
+ * covered here.
+ * @cfg : wl_private data
+ * @bssidx : bssidx which indicate bsscfg->idx of firmware.
+ * @type : output arg to store array idx of p2p->bss.
+ * Returns error
+ */
+
+s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type)
+{
+ u32 i;
+ if (bssidx < 0 || type == NULL) {
+ CFGP2P_ERR((" argument is invalid\n"));
+ goto exit;
+ }
+ if (!cfg->p2p) {
+ CFGP2P_ERR(("p2p if does not exist\n"));
+ goto exit;
+ }
+ for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+ *type = i;
+ return BCME_OK;
+ }
+ }
+
+exit:
+ return BCME_BADARG;
+}
+
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+ struct net_device *ndev = NULL;
+
+ if (!cfg || !cfg->p2p || !cfgdev)
+ return BCME_ERROR;
+
+ CFGP2P_DBG((" Enter\n"));
+#ifdef DHD_IFDEBUG
+ PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef P2P_LISTEN_OFFLOADING
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+ CFGP2P_ERR(("DISC_IN_PROGRESS cleared\n"));
+ if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) {
+ cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+ } else {
+ CFGP2P_ERR(("Invalid cfgdev. Dropping the"
+ "remain_on_channel_expired event.\n"));
+ }
+#else
+ cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+
+ if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
+ wl_set_p2p_status(cfg, LISTEN_EXPIRED);
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
+ }
+
+ if (cfg->afx_hdl->is_listen == TRUE &&
+ wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("Listen DONE for action frame\n"));
+ complete(&cfg->act_frm_scan);
+ }
+#ifdef WL_CFG80211_SYNC_GON
+ else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
+ WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
+ jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies)));
+
+ if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ complete(&cfg->wait_next_af);
+ }
+#endif /* WL_CFG80211_SYNC_GON */
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL))
+#else
+ if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) ||
+ wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL))
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ {
+ WL_DBG(("Listen DONE for remain on channel expired\n"));
+ wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy &&
+ bcmcfg_to_p2p_wdev(cfg)) {
+ /* JIRA:SWWLAN-81873. It may be invalid cfgdev. */
+ /*
+ * To prevent kernel panic,
+ * if cfgdev->wiphy may be invalid, adding explicit check
+ */
+ cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
+ cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
+ } else
+ CFGP2P_ERR(("Invalid cfgdev. Dropping the"
+ "remain_on_channel_expired event.\n"));
+#else
+ if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy)
+ cfg80211_remain_on_channel_expired(cfgdev,
+ cfg->last_roc_id, &cfg->remain_on_chan,
+ cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+ }
+ if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg),
+ WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
+ CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+ }
+ } else
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+ return ret;
+
+}
+
+/*
+ * Timer expire callback function for LISTEN
+ * We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ * so lets do it from thread context.
+ */
+void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
+ struct net_device *ndev;
+ CFGP2P_DBG((" Enter\n"));
+
+ if (!cfg) {
+ CFGP2P_ERR((" No cfg\n"));
+ return;
+ }
+ bzero(&msg, sizeof(wl_event_msg_t));
+ msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ msg.bsscfgidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+#if defined(WL_ENABLE_P2P_IF)
+ ndev = cfg->p2p_net ? cfg->p2p_net :
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE);
+#else
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE);
+#endif /* WL_ENABLE_P2P_IF */
+ if (!ndev) {
+ CFGP2P_ERR((" No ndev\n"));
+ return;
+ }
+ wl_cfg80211_event(ndev, &msg, NULL);
+}
+/*
+ * Routine for cancelling the P2P LISTEN
+ */
+static s32
+wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct wireless_dev *wdev, bool notify)
+{
+ WL_DBG(("Enter \n"));
+ /* Irrespective of whether timer is running or not, reset
+ * the LISTEN state.
+ */
+#ifdef NOT_YET
+/* WAR : it is temporal workaround before resolving the root cause of kernel panic */
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+#endif /* NOT_YET */
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
+ if (notify) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (bcmcfg_to_p2p_wdev(cfg))
+ cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+#else
+ if (ndev && ndev->ieee80211_ptr)
+ cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
+ &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+ }
+ return 0;
+}
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters :
+ * @cfg : wl_private data
+ * @channel : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms)
+{
+#define EXTRA_DELAY_TIME 100
+ s32 ret = BCME_OK;
+ timer_list_compat_t *_timer;
+ s32 extra_delay;
+ struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+ CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
+ if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) {
+ CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+ goto exit;
+
+ }
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ else
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
+ CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
+ }
+
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ _timer = &cfg->p2p->listen_timer;
+
+ /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+ * otherwise we will wait up to duration_ms + 100ms + duration / 10
+ */
+ if (ret == BCME_OK) {
+ extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10);
+ } else {
+ /* if failed to set listen, it doesn't need to wait whole duration. */
+ duration_ms = 100 + duration_ms / 20;
+ extra_delay = 0;
+ }
+
+ INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#undef EXTRA_DELAY_TIME
+exit:
+ return ret;
+}
+
+s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" Enter\n"));
+ if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+
+ CFGP2P_DBG((" do nothing, discovery is off\n"));
+ return ret;
+ }
+ if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) {
+ CFGP2P_DBG(("already : %d\n", enable));
+ return ret;
+ }
+
+ wl_chg_p2p_status(cfg, SEARCH_ENABLED);
+ /* When disabling Search, reset the WL driver's p2p discovery state to
+ * WL_P2P_DISC_ST_SCAN.
+ */
+ if (!enable) {
+ wl_clr_p2p_status(cfg, SCANNING);
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ }
+
+ return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+ u32 event_type = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ struct net_device *ndev = NULL;
+ u8 bsscfgidx = e->bsscfgidx;
+
+ CFGP2P_DBG((" Enter\n"));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+ if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+ CFGP2P_DBG((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+ if (status == WLC_E_STATUS_SUCCESS) {
+ wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+ CFGP2P_ACTION(("TX AF: ACK. wait_rx:%d\n", cfg->need_wait_afrx));
+ if (!cfg->need_wait_afrx && cfg->af_sent_channel) {
+ CFGP2P_DBG(("no need to wait next AF.\n"));
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ }
+ }
+ else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+ wl_set_p2p_status(cfg, ACTION_TX_NOACK);
+ if (status == WLC_E_STATUS_SUPPRESS) {
+ CFGP2P_ACTION(("TX actfrm : SUPPRES\n"));
+ } else {
+ CFGP2P_ACTION(("TX actfrm : NO ACK\n"));
+ }
+ /* if there is no ack, we don't need to wait for
+ * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event for ucast
+ */
+ if (cfg->afx_hdl && !ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+ wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+ }
+ }
+ } else {
+ CFGP2P_ACTION((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+ "status : %d\n", status));
+
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ complete(&cfg->send_af_done);
+ }
+ }
+ return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 evt_ret = BCME_OK;
+ s32 timeout = 0;
+ wl_eventmsg_buf_t buf;
+
+ CFGP2P_DBG(("\n"));
+ CFGP2P_ACTION(("channel : %u , dwell time : %u wait_afrx:%d\n",
+ CHSPEC_CHANNEL(af_params->channel), af_params->dwell_time, cfg->need_wait_afrx));
+
+ wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+ bzero(&buf, sizeof(wl_eventmsg_buf_t));
+ wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
+ wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
+ if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0)
+ return evt_ret;
+
+ cfg->af_sent_channel = af_params->channel;
+ /* For older FW versions actframe does not support chanspec format */
+ if (cfg->wlc_ver.wlc_ver_major < FW_MAJOR_VER_ACTFRAME_CHSPEC) {
+ af_params->channel = CHSPEC_CHANNEL(af_params->channel);
+ }
+#ifdef WL_CFG80211_SYNC_GON
+ cfg->af_tx_sent_jiffies = jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+ if (ret < 0) {
+ CFGP2P_ACTION(("TX actfrm : ERROR %d\n", ret));
+ goto exit;
+ }
+
+ timeout = wait_for_completion_timeout(&cfg->send_af_done,
+ msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
+
+ if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+ CFGP2P_DBG(("tx action frame operation is completed\n"));
+ ret = BCME_OK;
+ } else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+ CFGP2P_DBG(("bcast tx action frame operation is completed\n"));
+ ret = BCME_OK;
+ } else {
+ ret = BCME_ERROR;
+ CFGP2P_DBG(("tx action frame operation is failed\n"));
+ }
+ /* clear status bit for action tx */
+ wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+exit:
+ CFGP2P_DBG((" via act frame iovar : status = %d\n", ret));
+
+ bzero(&buf, sizeof(wl_eventmsg_buf_t));
+ wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
+ wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
+ if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) {
+ WL_ERR(("TX frame events revert back failed \n"));
+ return evt_ret;
+ }
+
+ return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr)
+{
+ struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+ struct ether_addr *int_addr;
+#ifdef P2P_AP_CONCURRENT
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif
+
+ if (ETHER_IS_LOCALADDR(primary_addr)) {
+ /* STA is using locally administered MAC. Use randomized mac
+ * for p2p disc to avoid collision with sta mac add.
+ */
+ wl_cfg80211_generate_mac_addr(mac_addr);
+ } else {
+ (void)memcpy_s(mac_addr, ETH_ALEN, bcmcfg_to_prmry_ndev(cfg)->perm_addr, ETH_ALEN);
+ mac_addr->octet[0] |= 0x02;
+#ifdef P2P_AP_CONCURRENT
+ if (dhd->conf->war & P2P_AP_MAC_CONFLICT)
+ wl_ext_iapsta_get_vif_macaddr(dhd, 2, (u8 *)mac_addr);
+#endif
+ WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet)));
+ }
+
+ int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1);
+ memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
+ int_addr->octet[4] ^= 0x80;
+ WL_DBG(("Primary P2P Interface address:"MACDBG "\n", MAC2STRDBG(int_addr->octet)));
+
+ int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION2);
+ memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
+ int_addr->octet[4] ^= 0x90;
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+ wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+ u16 len = ie->len;
+ u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+ CFGP2P_DBG((" Enter\n"));
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ if (subelt_id == P2P_SEID_INTINTADDR) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_INFO) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_GROUP_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+ } return;
+ } else {
+ CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+ }
+ subel += subelt_len;
+ }
+}
+
+s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ s32 p2p_supported = 0;
+ ret = wldev_iovar_getint(ndev, "p2p",
+ &p2p_supported);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ CFGP2P_INFO(("p2p is unsupported\n"));
+ return 0;
+ } else {
+ CFGP2P_ERR(("cfg p2p error %d\n", ret));
+ return ret;
+ }
+ }
+ if (cfg->pub->conf->fw_type == FW_TYPE_MESH)
+ p2p_supported = 0;
+ if (p2p_supported == 1) {
+ CFGP2P_INFO(("p2p is supported\n"));
+ } else {
+ CFGP2P_INFO(("p2p is unsupported\n"));
+ p2p_supported = 0;
+ }
+ return p2p_supported;
+}
+
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *ndev = NULL;
+ struct wireless_dev *wdev = NULL;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wdev = bcmcfg_to_p2p_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+ ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
+ wdev = ndev_to_wdev(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
+ wl_cfgp2p_disable_discovery(cfg);
+
+#if defined(WL_CFG80211_P2P_DEV_IF) && !defined(KEEP_WIFION_OPTION)
+/*
+ * In CUSTOMER_HW4 implementation "ifconfig wlan0 down" can get
+ * called during phone suspend and customer requires the p2p
+ * discovery interface to be left untouched so that the user
+ * space can resume without any problem.
+ */
+ if (cfg->p2p_wdev) {
+ /* If p2p wdev is left out, clean it up */
+ WL_ERR(("Clean up the p2p discovery IF\n"));
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */
+
+ wl_cfgp2p_deinit_priv(cfg);
+ return 0;
+}
+
+int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg)
+{
+ if (cfg->p2p && ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) ||
+ (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1)))
+ return true;
+ else
+ return false;
+
+}
+
+s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+ s32 ret = -1;
+ int count, start, duration;
+ wl_p2p_sched_t dongle_noa;
+ s32 bssidx, type;
+ int iovar_len = sizeof(dongle_noa);
+ CFGP2P_DBG((" Enter\n"));
+
+ bzero(&dongle_noa, sizeof(dongle_noa));
+
+ if (wl_cfgp2p_vif_created(cfg)) {
+ cfg->p2p->noa.desc[0].start = 0;
+
+ sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
+ CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+ count, start, duration));
+ if (count != -1)
+ cfg->p2p->noa.desc[0].count = count;
+
+ /* supplicant gives interval as start */
+ if (start != -1)
+ cfg->p2p->noa.desc[0].interval = start;
+
+ if (duration != -1)
+ cfg->p2p->noa.desc[0].duration = duration;
+
+ if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) {
+ cfg->p2p->noa.desc[0].start = 200;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+ }
+ else if (cfg->p2p->noa.desc[0].count == 0) {
+ cfg->p2p->noa.desc[0].start = 0;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_RESET;
+ }
+ else {
+ /* Continuous NoA interval. */
+ dongle_noa.action = WL_P2P_SCHED_ACTION_DOZE;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+ /* If the NoA interval is equal to the beacon interval, use
+ * the percentage based NoA API to work-around driver issues
+ * (PR #88043). Otherwise, use the absolute duration/interval API.
+ */
+ if ((cfg->p2p->noa.desc[0].interval == 102) ||
+ (cfg->p2p->noa.desc[0].interval == 100)) {
+ cfg->p2p->noa.desc[0].start = 100 -
+ cfg->p2p->noa.desc[0].duration;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+ }
+ else {
+ dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+ }
+ }
+ /* Put the noa descriptor in dongle format for dongle */
+ dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count);
+ if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+ dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start);
+ dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration);
+ }
+ else {
+ dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000);
+ dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
+ }
+ dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK)
+ return BCME_ERROR;
+
+ if (dongle_noa.action == WL_P2P_SCHED_ACTION_RESET) {
+ iovar_len -= sizeof(wl_p2p_sched_desc_t);
+ }
+
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, type),
+ "p2p_noa", &dongle_noa, iovar_len, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+ }
+ return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len)
+{
+
+ wifi_p2p_noa_desc_t *noa_desc;
+ int len = 0, i;
+ char _buf[200];
+
+ CFGP2P_DBG((" Enter\n"));
+ buf[0] = '\0';
+ if (wl_cfgp2p_vif_created(cfg)) {
+ if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
+ _buf[0] = 1; /* noa index */
+ _buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
+ (cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */
+ len += 2;
+ if (cfg->p2p->noa.desc[0].count) {
+ noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+ noa_desc->cnt_type = cfg->p2p->noa.desc[0].count;
+ noa_desc->duration = cfg->p2p->noa.desc[0].duration;
+ noa_desc->interval = cfg->p2p->noa.desc[0].interval;
+ noa_desc->start = cfg->p2p->noa.desc[0].start;
+ len += sizeof(wifi_p2p_noa_desc_t);
+ }
+ if (buf_len <= len * 2) {
+ CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+ "returning noa in string format\n", buf_len));
+ return -1;
+ }
+ /* We have to convert the buffer data into ASCII strings */
+ for (i = 0; i < len; i++) {
+ snprintf(buf, 3, "%02x", _buf[i]);
+ buf += 2;
+ }
+ buf[i*2] = '\0';
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+ return -1;
+ }
+ return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+ int ps, ctw;
+ int ret = -1;
+ s32 legacy_ps;
+ s32 conn_idx;
+ s32 bssidx;
+ struct net_device *dev;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl_cfgp2p_vif_created(cfg)) {
+ sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
+ CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK)
+ return BCME_ERROR;
+ dev = wl_to_p2p_bss_ndev(cfg, conn_idx);
+ if (ctw != -1) {
+ cfg->p2p->ops.ctw = ctw;
+ ret = 0;
+ }
+ if (ps != -1) {
+ cfg->p2p->ops.ops = ps;
+ ret = wldev_iovar_setbuf(dev,
+ "p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+ }
+ }
+
+ if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+ ret = wldev_ioctl_set(dev,
+ WLC_SET_PM, &legacy_ps, sizeof(legacy_ps));
+ if (unlikely(ret))
+ CFGP2P_ERR(("error (%d)\n", ret));
+ wl_cfg80211_update_power_mode(dev);
+ }
+ else
+ CFGP2P_ERR(("ilegal setting\n"));
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+ ret = -1;
+ }
+ return ret;
+}
+
+s32
+wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+ int ch, bw;
+ s32 conn_idx;
+ s32 bssidx;
+ struct net_device *dev;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_chan_switch_t csa_arg;
+ u32 chnsp = 0;
+ int err = 0;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl_cfgp2p_vif_created(cfg)) {
+ sscanf(buf, "%10d %10d", &ch, &bw);
+ CFGP2P_DBG(("Enter ch %d bw %d\n", ch, bw));
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) {
+ return BCME_ERROR;
+ }
+ dev = wl_to_p2p_bss_ndev(cfg, conn_idx);
+ if (ch <= 0 || bw <= 0) {
+ CFGP2P_ERR(("Negative value not permitted!\n"));
+ return BCME_ERROR;
+ }
+
+ memset_s(&csa_arg, sizeof(csa_arg), 0, sizeof(csa_arg));
+ csa_arg.mode = DOT11_CSA_MODE_ADVISORY;
+ csa_arg.count = P2P_ECSA_CNT;
+ csa_arg.reg = 0;
+
+ snprintf(buf, len, "%d/%d", ch, bw);
+ chnsp = wf_chspec_aton(buf);
+ if (chnsp == 0) {
+ CFGP2P_ERR(("%s:chsp is not correct\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ chnsp = wl_chspec_host_to_driver(chnsp);
+ csa_arg.chspec = chnsp;
+
+ err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
+ smbuf, sizeof(smbuf), NULL);
+ if (err) {
+ CFGP2P_ERR(("%s:set p2p_ecsa failed:%d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else {
+ CFGP2P_ERR(("ERROR: set_p2p_ecsa in non-p2p mode\n"));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+s32
+wl_cfgp2p_increase_p2p_bw(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+ int algo;
+ int bw;
+ int ret = BCME_OK;
+
+ sscanf(buf, "%3d", &bw);
+ if (bw == 0) {
+ algo = 0;
+ ret = wldev_iovar_setbuf(ndev, "mchan_algo", &algo, sizeof(algo), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set mchan_algo failed %d\n", ret));
+ return BCME_ERROR;
+ }
+ } else {
+ algo = 1;
+ ret = wldev_iovar_setbuf(ndev, "mchan_algo", &algo, sizeof(algo), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set mchan_algo failed %d\n", ret));
+ return BCME_ERROR;
+ }
+ ret = wldev_iovar_setbuf(ndev, "mchan_bw", &bw, sizeof(algo), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set mchan_bw failed %d\n", ret));
+ return BCME_ERROR;
+ }
+ }
+ return BCME_OK;
+}
+
+const u8 *
+wl_cfgp2p_retreive_p2pattrib(const void *buf, u8 element_id)
+{
+ const wifi_p2p_ie_t *ie = NULL;
+ u16 len = 0;
+ const u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+
+ if (!buf) {
+ WL_ERR(("P2P IE not present"));
+ return 0;
+ }
+
+ ie = (const wifi_p2p_ie_t*) buf;
+ len = ie->len;
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ /* This will point to start of subelement attrib after
+ * attribute id & len
+ */
+ return subel;
+ }
+
+ /* Go to next subelement */
+ subel += subelt_len;
+ }
+
+ /* Not Found */
+ return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT 0x01
+
+const u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(const u8 *parse, u32 len, u32 attrib)
+{
+ bcm_tlv_t *ie;
+ const u8* pAttrib;
+ uint ie_len;
+
+ CFGP2P_DBG(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
+ ie_len = len;
+ while ((ie = bcm_parse_tlvs(parse, ie_len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie(ie, &parse, &ie_len) == TRUE) {
+ /* Have the P2p ie. Now check for attribute */
+ if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(ie, attrib)) != NULL) {
+ CFGP2P_DBG(("P2P attribute %d was found at parse %p",
+ attrib, parse));
+ return pAttrib;
+ }
+ else {
+ /* move to next IE */
+ bcm_tlv_buffer_advance_past(ie, &parse, &ie_len);
+
+ CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
+ " to %p len to %d", attrib, parse, ie_len));
+ }
+ }
+ else {
+ /* It was not p2p IE. parse will get updated automatically to next TLV */
+ CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, ie_len));
+ }
+ }
+ CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
+ return NULL;
+}
+
+const u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+ const u8 *capability = NULL;
+ bool p2p_go = 0;
+ const u8 *ptr = NULL;
+
+ if (bi->length != bi->ie_offset + bi->ie_length) {
+ return NULL;
+ }
+
+ if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
+ WL_ERR(("P2P Capability attribute not found"));
+ return NULL;
+ }
+
+ /* Check Group capability for Group Owner bit */
+ p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+ if (!p2p_go) {
+ return bi->BSSID.octet;
+ }
+
+ /* In probe responses, DEVICE INFO attribute will be present */
+ if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length, P2P_SEID_DEV_INFO))) {
+ /* If DEVICE_INFO is not found, this might be a beacon frame.
+ * check for DEVICE_ID in the beacon frame.
+ */
+ ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length, P2P_SEID_DEV_ID);
+ }
+
+ if (!ptr)
+ WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+ return ptr;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ /* to prevent kernel panic, add dummy value.
+ * some kernel calls drvinfo even if ethtool is not registered.
+ */
+ snprintf(info->driver, sizeof(info->driver), "p2p");
+ snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0));
+}
+
+struct ethtool_ops cfgp2p_ethtool_ops = {
+ .get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_ENABLE_P2P_IF) || defined (WL_NEWCFG_PRIVCMD_SUPPORT)
+s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
+{
+ int ret = 0;
+ struct net_device* net = NULL;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ struct wireless_dev *wdev = NULL;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+ if (cfg->p2p_net) {
+ CFGP2P_ERR(("p2p_net defined already.\n"));
+ return -EINVAL;
+ }
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) {
+ CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ free_netdev(net);
+ return -ENOMEM;
+ }
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+ strlcpy(net->name, "p2p%d", sizeof(net->name));
+
+ /* Copy the reference to bcm_cfg80211 */
+ memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->do_ioctl = wl_cfgp2p_do_ioctl;
+ net->hard_start_xmit = wl_cfgp2p_start_xmit;
+ net->open = wl_cfgp2p_if_open;
+ net->stop = wl_cfgp2p_if_stop;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+ /* Register with a dummy MAC addr */
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ wdev->wiphy = cfg->wdev->wiphy;
+
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+ net->ieee80211_ptr = wdev;
+#else
+ net->ieee80211_ptr = NULL;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ net->ethtool_ops = &cfgp2p_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+ /* Associate p2p0 network interface with new wdev */
+ wdev->netdev = net;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+ ret = register_netdev(net);
+ if (ret) {
+ CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+ free_netdev(net);
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+ return -ENODEV;
+ }
+
+ /* store p2p net ptr for further reference. Note that iflist won't have this
+ * entry as there corresponding firmware interface is a "Hidden" interface.
+ */
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ cfg->p2p_wdev = wdev;
+#else
+ cfg->p2p_wdev = NULL;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+ cfg->p2p_net = net;
+
+ WL_MSG(net->name, "P2P Interface Registered\n");
+
+ return ret;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
+{
+
+ if (!cfg || !cfg->p2p_net) {
+ CFGP2P_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+
+ unregister_netdev(cfg->p2p_net);
+ free_netdev(cfg->p2p_net);
+
+ return 0;
+}
+static netdev_tx_t wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+
+ if (skb)
+ {
+ CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n",
+ ndev->name));
+ dev_kfree_skb_any(skb);
+ }
+
+ return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* There is no ifidx corresponding to p2p0 in our firmware. So we should
+ * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+ * For Android PRIV CMD handling map it to primary I/F
+ */
+ if (cmd == SIOCDEVPRIVATE+1) {
+
+#if defined(OEM_ANDROID)
+ ret = wl_android_priv_cmd(ndev, ifr);
+#endif /* defined(OEM_ANDROID) */
+
+#if !defined(OEM_ANDROID)
+ (void)ndev;
+#endif
+
+ } else {
+ CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+ __FUNCTION__, cmd));
+ return -1;
+ }
+
+ return ret;
+}
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+ struct wireless_dev *wdev = net->ieee80211_ptr;
+
+ if (!wdev || !wl_cfg80211_is_p2p_active(net))
+ return -EINVAL;
+ WL_TRACE(("Enter\n"));
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+ /* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+ * do it here. This will make sure that in concurrent mode, supplicant
+ * is not dependent on a particular order of interface initialization.
+ * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+ * -iwlan0.
+ */
+ wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+ | BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+ wl_cfg80211_do_driver_init(net);
+
+ return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+ struct wireless_dev *wdev = net->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ if (!wdev)
+ return -EINVAL;
+
+ wl_cfg80211_scan_stop(cfg, net);
+
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+ wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+ & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO)));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+ return 0;
+}
+
+bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
+{
+ return (if_ops == &wl_cfgp2p_if_ops);
+}
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+
+ if (!cfg || !cfg->p2p_supported)
+ return ERR_PTR(-EINVAL);
+
+ WL_TRACE(("Enter\n"));
+
+ if (cfg->p2p_wdev) {
+#ifndef EXPLICIT_DISCIF_CLEANUP
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* EXPLICIT_DISCIF_CLEANUP */
+ /*
+ * This is not expected. This can happen due to
+ * supplicant crash/unclean de-initialization which
+ * didn't free the p2p discovery interface. Indicate
+ * driver hang to user space so that the framework
+ * can rei-init the Wi-Fi.
+ */
+ CFGP2P_ERR(("p2p_wdev defined already.\n"));
+ wl_probe_wdev_all(cfg);
+#ifdef EXPLICIT_DISCIF_CLEANUP
+ /*
+ * CUSTOMER_HW4 design doesn't delete the p2p discovery
+ * interface on ifconfig wlan0 down context which comes
+ * without a preceeding NL80211_CMD_DEL_INTERFACE for p2p
+ * discovery. But during supplicant crash the DEL_IFACE
+ * command will not happen and will cause a left over iface
+ * even after ifconfig wlan0 down. So delete the iface
+ * first and then indicate the HANG event
+ */
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#else
+ dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
+
+#ifdef OEM_ANDROID
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled) {
+ /* Load the dongle side dump to host
+ * memory and then BUG_ON()
+ */
+ dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+#endif /* OEM_ANDROID */
+
+ return ERR_PTR(-ENODEV);
+#endif /* EXPLICIT_DISCIF_CLEANUP */
+ }
+
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ wdev->wiphy = cfg->wdev->wiphy;
+ wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
+ memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN);
+
+#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+ if (cfg->p2p_net)
+ memcpy(cfg->p2p_net->dev_addr, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE),
+ ETHER_ADDR_LEN);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+ /* store p2p wdev ptr for further reference. */
+ cfg->p2p_wdev = wdev;
+
+ printf("P2P interface registered\n");
+ return wdev;
+}
+
+int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ int ret = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ if (!cfg)
+ return -EINVAL;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ WL_TRACE(("Enter\n"));
+
+#ifdef WL_IFACE_MGMT
+ if (wl_cfg80211_get_sec_iface(cfg) != WL_IFACE_NOT_PRESENT) {
+ /* Delay fw initialization till actual discovery. */
+ CFGP2P_ERR(("SEC IFACE present. Initialize p2p from discovery context\n"));
+ return BCME_OK;
+ }
+#endif /* WL_IFACE_MGMT */
+
+ ret = wl_cfgp2p_set_firm_p2p(cfg);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
+ goto exit;
+ }
+
+ ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
+ goto exit;
+ }
+
+ p2p_on(cfg) = true;
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif
+
+ CFGP2P_DBG(("P2P interface started\n"));
+
+exit:
+ return ret;
+}
+
+void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ int ret = 0;
+ struct net_device *ndev = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ if (!cfg)
+ return;
+
+ CFGP2P_DBG(("Enter\n"));
+
+ /* Check if cfg80211 interface is already down */
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ WL_DBG(("cfg80211 interface is already down\n"));
+ return; /* it is even not ready */
+ }
+
+ ret = wl_cfg80211_scan_stop(cfg, wdev);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+ }
+
+ if (!p2p_is_on(cfg)) {
+ return;
+ }
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+ /* Cancel any on-going listen */
+ wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE);
+
+ ret = wl_cfgp2p_disable_discovery(cfg);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
+ }
+
+ p2p_on(cfg) = false;
+
+ CFGP2P_DBG(("Exit. P2P interface stopped\n"));
+
+ return;
+}
+
+int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg)
+{
+ bool rollback_lock = false;
+
+ if (!wdev || !cfg) {
+ WL_ERR(("wdev or cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ WL_INFORM(("Enter\n"));
+
+ if (!cfg->p2p_wdev) {
+ WL_ERR(("Already deleted p2p_wdev\n"));
+ return -EINVAL;
+ }
+
+ /* Ensure discovery i/f is deinitialized */
+ if (wl_cfgp2p_disable_discovery(cfg) != BCME_OK) {
+ /* discard error in the deinit part. Fw state
+ * recovery would happen from wl down/reset
+ * context.
+ */
+ CFGP2P_ERR(("p2p disable disc failed\n"));
+ }
+
+ if (!rtnl_is_locked()) {
+ rtnl_lock();
+ rollback_lock = true;
+ }
+
+ cfg80211_unregister_wdev(wdev);
+
+ if (rollback_lock)
+ rtnl_unlock();
+
+ synchronize_rcu();
+
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+
+ cfg->p2p_wdev = NULL;
+
+ CFGP2P_ERR(("P2P interface unregistered\n"));
+
+ return 0;
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+void
+wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+ int status = 0;
+
+ if (!frame || (frame_len < (sizeof(*pact_frm) + WL_P2P_AF_STATUS_OFFSET - 1))) {
+ return;
+ }
+
+ if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ if (pact_frm->subtype == P2P_PAF_GON_RSP && tx) {
+ CFGP2P_ACTION(("Check TX P2P Group Owner Negotiation Rsp Frame status\n"));
+ status = pact_frm->elts[WL_P2P_AF_STATUS_OFFSET];
+ if (status) {
+ cfg->need_wait_afrx = false;
+ return;
+ }
+ }
+ }
+
+ cfg->need_wait_afrx = true;
+ return;
+}
+
+int
+wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request)
+{
+ if (request && (request->n_ssids == 1) &&
+ (request->n_channels == 1) &&
+ IS_P2P_SSID(request->ssids[0].ssid, WL_P2P_WILDCARD_SSID_LEN) &&
+ (request->ssids[0].ssid_len > WL_P2P_WILDCARD_SSID_LEN)) {
+ return true;
+ }
+ return false;
+}
diff --git a/bcmdhd.101.10.361.x/wl_cfgp2p.h b/bcmdhd.101.10.361.x/wl_cfgp2p.h
new file mode 100755
index 0000000..58a161a
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgp2p.h
@@ -0,0 +1,488 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <802.11.h>
+#include <p2p.h>
+
+struct bcm_cfg80211;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not
+ * confuse this with a bsscfg index. This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+ P2PAPI_BSSCFG_PRIMARY, /**< maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /**< maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION1, /**< maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION2,
+ P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+typedef enum {
+ P2P_SCAN_PURPOSE_MIN,
+ P2P_SCAN_SOCIAL_CHANNEL, /**< scan for social channel */
+ P2P_SCAN_AFX_PEER_NORMAL, /**< scan for action frame search */
+ P2P_SCAN_AFX_PEER_REDUCED, /**< scan for action frame search with short time */
+ P2P_SCAN_DURING_CONNECTED, /**< scan during connected status */
+ P2P_SCAN_CONNECT_TRY, /**< scan for connecting */
+ P2P_SCAN_NORMAL, /**< scan during not-connected status */
+ P2P_SCAN_PURPOSE_MAX
+} p2p_scan_purpose_t;
+
+/** vendor ies max buffer length for probe response or beacon */
+#define VNDR_IES_MAX_BUF_LEN 1400
+/** normal vendor ies buffer length */
+#define VNDR_IES_BUF_LEN 512
+
+struct p2p_bss {
+ s32 bssidx;
+ struct net_device *dev;
+ void *private_data;
+ struct ether_addr mac_addr;
+};
+
+struct p2p_info {
+ bool on; /**< p2p on/off switch */
+ bool scan;
+ int16 search_state;
+ s8 vir_ifname[IFNAMSIZ];
+ unsigned long status;
+ struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
+ timer_list_compat_t listen_timer;
+ wl_p2p_sched_t noa;
+ wl_p2p_ops_t ops;
+ wlc_ssid_t ssid;
+ s8 p2p_go_count;
+};
+
+#define MAX_VNDR_IE_NUMBER 10
+
+struct parsed_vndr_ie_info {
+ const char *ie_ptr;
+ u32 ie_len; /**< total length including id & length field */
+ vndr_ie_t vndrie;
+};
+
+struct parsed_vndr_ies {
+ u32 count;
+ struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+ WLP2P_STATUS_DISCOVERY_ON = 0,
+ WLP2P_STATUS_SEARCH_ENABLED,
+ WLP2P_STATUS_IF_ADDING,
+ WLP2P_STATUS_IF_DELETING,
+ WLP2P_STATUS_IF_CHANGING,
+ WLP2P_STATUS_IF_CHANGED,
+ WLP2P_STATUS_LISTEN_EXPIRED,
+ WLP2P_STATUS_ACTION_TX_COMPLETED,
+ WLP2P_STATUS_ACTION_TX_NOACK,
+ WLP2P_STATUS_SCANNING,
+ WLP2P_STATUS_GO_NEG_PHASE,
+ WLP2P_STATUS_DISC_IN_PROGRESS
+};
+
+#define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev)
+#define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_macaddr(cfg, type) &((cfg)->p2p->bss[type].mac_addr)
+#define wl_to_p2p_bss_saved_ie(cfg, type) ((cfg)->p2p->bss[type].saved_ie)
+#define wl_to_p2p_bss_private(cfg, type) ((cfg)->p2p->bss[type].private_data)
+#define wl_to_p2p_bss(cfg, type) ((cfg)->p2p->bss[type])
+#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define p2p_on(cfg) ((cfg)->p2p->on)
+#define p2p_scan(cfg) ((cfg)->p2p->scan)
+#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+
+#if defined(CUSTOMER_DBG_PREFIX_ENABLE)
+#define USER_PREFIX_CFGP2P "[cfgp2p][wlan] "
+#define CFGP2P_ERROR_TEXT USER_PREFIX_CFGP2P
+#define CFGP2P_INFO_TEXT USER_PREFIX_CFGP2P
+#define CFGP2P_ACTION_TEXT USER_PREFIX_CFGP2P
+#define CFGP2P_DEBUG_TEXT USER_PREFIX_CFGP2P
+#else
+/* Samsung want to print INFO2 instead of ERROR
+ * because most of case, ERROR message is not a real ERROR.
+ * but it can be regarded as real error case for Tester
+ */
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFGP2P_ERROR_TEXT "CFGP2P-INFO2) "
+#else
+#define CFGP2P_ERROR_TEXT "CFGP2P-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
+#define CFGP2P_INFO_TEXT "CFGP2P-INFO) "
+#define CFGP2P_ACTION_TEXT "CFGP2P-ACTION) "
+#define CFGP2P_DEBUG_TEXT "CFGP2P-DEBUG) "
+#endif /* defined(CUSTOMER_DBG_PREFIX_ENABLE) */
+
+#ifdef DHD_LOG_DUMP
+#define CFGP2P_ERR_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFGP2P_ERROR_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_ERR(x) CFGP2P_ERR_MSG x
+#define CFGP2P_INFO_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printf(CFGP2P_INFO_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_INFO(x) CFGP2P_INFO_MSG x
+#define CFGP2P_ACTION_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printf(CFGP2P_ACTION_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE_TS_FN; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_ACTION(x) CFGP2P_ACTION_MSG x
+#else
+#define CFGP2P_ERR_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printf(CFGP2P_ERROR_TEXT "%s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_ERR(x) CFGP2P_ERR_MSG x
+#define CFGP2P_INFO_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printf(CFGP2P_INFO_TEXT "%s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_INFO(x) CFGP2P_INFO_MSG x
+#define CFGP2P_ACTION_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printf(CFGP2P_ACTION_TEXT "%s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_ACTION(x) CFGP2P_ACTION_MSG x
+#endif /* DHD_LOG_DUMP */
+
+#define CFGP2P_DBG_MSG(x, args...) \
+ do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printf(CFGP2P_DEBUG_TEXT "%s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define CFGP2P_DBG(x) CFGP2P_DBG_MSG x
+
+#define INIT_TIMER(timer, func, duration, extra_delay) \
+ do { \
+ init_timer_compat(timer, func, cfg); \
+ timer_expires(timer) = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ add_timer(timer); \
+ } while (0);
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#ifdef WL_CFG80211_STA_EVENT
+#undef WL_CFG80211_STA_EVENT
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && \
+ !defined(WL_CFG80211_P2P_DEV_IF)
+#define WL_CFG80211_P2P_DEV_IF
+
+#ifdef WL_ENABLE_P2P_IF
+#undef WL_ENABLE_P2P_IF
+#endif
+
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#else
+#ifdef WLP2P
+#ifndef WL_ENABLE_P2P_IF
+/* Enable P2P network Interface if P2P support is enabled */
+#define WL_ENABLE_P2P_IF
+#endif /* WL_ENABLE_P2P_IF */
+#endif /* WLP2P */
+#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
+
+#ifndef WL_CFG80211_P2P_DEV_IF
+#ifdef WL_NEWCFG_PRIVCMD_SUPPORT
+#undef WL_NEWCFG_PRIVCMD_SUPPORT
+#endif
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)))
+#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \
+ or kernel version is 3.8.0 or above
+#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */
+
+#if !defined(WLP2P) && \
+ (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF))
+#error WLP2P not defined
+#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define bcm_struct_cfgdev struct wireless_dev
+#else
+#define bcm_struct_cfgdev struct net_device
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+/* If we take 10 or 30 as count value, operation
+ * may failed due to full scan and noisy environments.
+ * So, we choose 50 as the optimum value for P2P ECSA.
+ */
+#define P2P_ECSA_CNT 50
+
+extern void
+wl_cfgp2p_listen_expired(unsigned long data);
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
+extern s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg);
+extern void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode,
+ u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec, s32 conn_idx);
+
+extern s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie,
+ u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans,
+ u16 *channels,
+ s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+ p2p_scan_purpose_t p2p_scan_purpose);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
+
+extern const wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(const u8 *parse, u32 len);
+
+extern const wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(const u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(const u8 *parse, u32 len);
+
+extern const wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(const u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
+
+extern struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
+extern s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
+
+extern s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+
+extern s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+
+extern s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_increase_p2p_bw(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern const u8 *
+wl_cfgp2p_retreive_p2pattrib(const void *buf, u8 element_id);
+
+extern const u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(const u8 *parse, u32 len, u32 attrib);
+
+extern const u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg);
+
+extern bool
+wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
+
+extern u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+ s8 *oui, s32 ie_id, const s8 *data, s32 datalen, const s8* add_del_cmd);
+
+extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg);
+
+extern
+int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg);
+
+extern
+int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
+
+extern int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
+
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+extern void
+wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx);
+
+extern int
+wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request);
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+ (channel == SOCIAL_CHAN_2) || \
+ (channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+#define WL_P2P_TEMP_CHAN_5G 36
+#define WL_P2P_AF_STATUS_OFFSET 9
+
+/* If the provision discovery is for JOIN operations,
+ * or the device discoverablity frame is destined to GO
+ * then we need not do an internal scan to find GO.
+ */
+#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \
+ (wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL)
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+ ((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+ (frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+
+#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \
+ ((subtype == P2P_PAF_GON_CONF) || \
+ (subtype == P2P_PAF_INVITE_RSP) || \
+ (subtype == P2P_PAF_PROVDIS_RSP)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \
+ (len >= WL_P2P_WILDCARD_SSID_LEN))
+
+/* Min FW ver required to support chanspec
+ * instead of channel in actframe iovar.
+ */
+#define FW_MAJOR_VER_ACTFRAME_CHSPEC 14
+#endif /* _wl_cfgp2p_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_cfgscan.c b/bcmdhd.101.10.361.x/wl_cfgscan.c
new file mode 100755
index 0000000..9287940
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgscan.c
@@ -0,0 +1,5637 @@
+/*
+ * Linux cfg80211 driver scan related code
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <bcmiov.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#if defined(CONFIG_TIZEN)
+#include <linux/net_stat_tizen.h>
+#endif /* CONFIG_TIZEN */
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <bcmevent.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgp2p.h>
+#include <wl_cfgvif.h>
+#include <bcmdevs.h>
+
+#ifdef OEM_ANDROID
+#include <wl_android.h>
+#endif
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#include <wl_cfgvendor.h>
+#endif /* defined(BCMDONGLEHOST) */
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include "dhd_rtt.h"
+#endif /* RTT_SUPPORT */
+#include <dhd_config.h>
+
+#define ACTIVE_SCAN 1
+#define PASSIVE_SCAN 0
+
+#define MIN_P2P_IE_LEN 8 /* p2p_ie->OUI(3) + p2p_ie->oui_type(1) +
+ * Attribute ID(1) + Length(2) + 1(Mininum length:1)
+ */
+#define MAX_P2P_IE_LEN 251 /* Up To 251 */
+
+#define WPS_ATTR_REQ_TYPE 0x103a
+#define WPS_REQ_TYPE_ENROLLEE 0x01
+#define SCAN_WAKE_LOCK_MARGIN_MS 500
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define CFG80211_READY_ON_CHANNEL(cfgdev, cookie, channel, channel_type, duration, flags) \
+ cfg80211_ready_on_channel(cfgdev, cookie, channel, duration, GFP_KERNEL);
+#else
+#define CFG80211_READY_ON_CHANNEL(cfgdev, cookie, channel, channel_type, duration, flags) \
+ cfg80211_ready_on_channel(cfgdev, cookie, channel, channel_type, duration, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#define CFG80211_SCHED_SCAN_STOPPED(wiphy, schedscan_req) \
+ cfg80211_sched_scan_stopped(wiphy, schedscan_req->reqid);
+#else
+#define CFG80211_SCHED_SCAN_STOPPED(wiphy, schedscan_req) \
+ cfg80211_sched_scan_stopped(wiphy);
+#endif /* KERNEL > 4.11.0 */
+
+#ifdef DHD_GET_VALID_CHANNELS
+#define IS_DFS(chaninfo) ((chaninfo & WL_CHAN_RADAR) || \
+ (chaninfo & WL_CHAN_PASSIVE))
+#endif /* DHD_GET_VALID_CHANNELS */
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+#ifdef CUSTOMER_HW4_DEBUG
+bool wl_scan_timeout_dbg_enabled = 0;
+#endif /* CUSTOMER_HW4_DEBUG */
+#ifdef P2P_LISTEN_OFFLOADING
+void wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+static void _wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, bool aborted);
+void wl_cfgscan_scan_abort(struct bcm_cfg80211 *cfg);
+static void _wl_cfgscan_cancel_scan(struct bcm_cfg80211 *cfg);
+
+#ifdef ESCAN_CHANNEL_CACHE
+void reset_roam_cache(struct bcm_cfg80211 *cfg);
+void add_roam_cache(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi);
+int get_roam_channel_list(struct bcm_cfg80211 *cfg, chanspec_t target_chan, chanspec_t *channels,
+ int n_channels, const wlc_ssid_t *ssid, int ioctl_ver);
+void set_roam_band(int band);
+#endif /* ESCAN_CHANNEL_CACHE */
+
+#ifdef ROAM_CHANNEL_CACHE
+void print_roam_cache(struct bcm_cfg80211 *cfg);
+#endif /* ROAM_CHANNEL_CACHE */
+
+extern int passive_channel_skip;
+
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+static wl_scan_results_t *
+wl_escan_get_buf(struct bcm_cfg80211 *cfg, bool aborted)
+{
+ u8 index;
+ if (aborted) {
+ if (cfg->escan_info.escan_type[0] == cfg->escan_info.escan_type[1]) {
+ index = (cfg->escan_info.cur_sync_id + 1)%SCAN_BUF_CNT;
+ } else {
+ index = (cfg->escan_info.cur_sync_id)%SCAN_BUF_CNT;
+ }
+ } else {
+ index = (cfg->escan_info.cur_sync_id)%SCAN_BUF_CNT;
+ }
+
+ return (wl_scan_results_t *)cfg->escan_info.escan_buf[index];
+}
+static int
+wl_escan_check_sync_id(struct bcm_cfg80211 *cfg, s32 status, u16 result_id, u16 wl_id)
+{
+ if (result_id != wl_id) {
+ WL_ERR(("ESCAN sync id mismatch :status :%d "
+ "cur_sync_id:%d coming sync_id:%d\n",
+ status, wl_id, result_id));
+#ifdef DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH
+ if (cfg->escan_info.prev_escan_aborted == FALSE) {
+ wl_cfg80211_handle_hang_event(bcmcfg_to_prmry_ndev(cfg),
+ HANG_REASON_ESCAN_SYNCID_MISMATCH, DUMP_TYPE_ESCAN_SYNCID_MISMATCH);
+ }
+#endif /* DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH */
+ return -1;
+ } else {
+ return 0;
+ }
+}
+#define wl_escan_increment_sync_id(a, b) ((a)->escan_info.cur_sync_id += b)
+#define wl_escan_init_sync_id(a) ((a)->escan_info.cur_sync_id = 0)
+#else
+#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
+#define wl_escan_check_sync_id(a, b, c, d) 0
+#define wl_escan_increment_sync_id(a, b)
+#define wl_escan_init_sync_id(a)
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+
+ ie->offset = 0;
+ bzero(ie->buf, sizeof(ie->buf));
+}
+
+static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
+ bool update_ssid)
+{
+ u8 *ssidie;
+ int32 ssid_len = MIN(bi->SSID_len, DOT11_MAX_SSID_LEN);
+ int32 remaining_ie_buf_len, available_buffer_len, unused_buf_len;
+ /* cfg80211_find_ie defined in kernel returning const u8 */
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+ GCC_DIAGNOSTIC_POP();
+
+ /* ERROR out if
+ * 1. No ssid IE is FOUND or
+ * 2. New ssid length is > what was allocated for existing ssid (as
+ * we do not want to overwrite the rest of the IEs) or
+ * 3. If in case of erroneous buffer input where ssid length doesnt match the space
+ * allocated to it.
+ */
+ if (!ssidie) {
+ return;
+ }
+ available_buffer_len = ((int)(*ie_size)) - (ssidie + 2 - ie_stream);
+ remaining_ie_buf_len = available_buffer_len - (int)ssidie[1];
+ unused_buf_len = WL_EXTRA_BUF_MAX - (4 + bi->length + *ie_size);
+ if (ssidie[1] > available_buffer_len) {
+ WL_ERR_MEM(("wl_update_hidden_ap_ie: skip wl_update_hidden_ap_ie : overflow\n"));
+ return;
+ }
+
+ /* ssidie[1] can be different with bi->SSID_len only if roaming status
+ * On scanning the values will be same each other.
+ */
+
+ if (ssidie[1] != ssid_len) {
+ if (ssidie[1]) {
+ WL_ERR_RLMT(("wl_update_hidden_ap_ie: Wrong SSID len: %d != %d\n",
+ ssidie[1], bi->SSID_len));
+ }
+ /* ssidie[1] is 1 in beacon on CISCO hidden networks. */
+ /*
+ * The bss info in firmware gets updated from beacon and probe resp.
+ * In case of hidden network, the bss_info that got updated by beacon,
+ * will not carry SSID and this can result in cfg80211_get_bss not finding a match.
+ * so include the SSID element.
+ */
+ if ((update_ssid && (ssid_len > ssidie[1])) && (unused_buf_len > ssid_len)) {
+ WL_INFORM_MEM(("Changing the SSID Info.\n"));
+ memmove(ssidie + ssid_len + 2,
+ (ssidie + 2) + ssidie[1],
+ remaining_ie_buf_len);
+ memcpy(ssidie + 2, bi->SSID, ssid_len);
+ *ie_size = *ie_size + ssid_len - ssidie[1];
+ ssidie[1] = ssid_len;
+ } else if (ssid_len < ssidie[1]) {
+ WL_ERR_MEM(("wl_update_hidden_ap_ie: Invalid SSID len: %d < %d\n",
+ bi->SSID_len, ssidie[1]));
+ }
+ return;
+ }
+ if (*(ssidie + 2) == '\0')
+ memcpy(ssidie + 2, bi->SSID, ssid_len);
+ return;
+}
+
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei_stream crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+ ie->offset += ie_size;
+
+ return err;
+}
+
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+ s32 err = 0;
+
+ if (unlikely(ie->offset > dst_size)) {
+ WL_ERR(("dst_size is not enough\n"));
+ return -ENOSPC;
+ }
+ memcpy(dst, &ie->buf[0], ie->offset);
+
+ return err;
+}
+
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+
+ return ie->offset;
+}
+
+s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid)
+{
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct wl_cfg80211_bss_info *notif_bss_info;
+ struct wl_scan_req *sr = wl_to_sr(cfg);
+ struct beacon_proberesp *beacon_proberesp;
+ struct cfg80211_bss *cbss = NULL;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len;
+ u32 payload_len;
+ s32 mgmt_type;
+ s32 signal;
+ u32 freq;
+ s32 err = 0;
+ gfp_t aflags;
+ u8 tmp_buf[IEEE80211_MAX_SSID_LEN + 1];
+ chanspec_t chanspec;
+
+ if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+ WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+ return err;
+ }
+
+ if (bi->SSID_len > IEEE80211_MAX_SSID_LEN) {
+ WL_ERR(("wrong SSID len:%d\n", bi->SSID_len));
+ return -EINVAL;
+ }
+
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ notif_bss_info = (struct wl_cfg80211_bss_info *)MALLOCZ(cfg->osh,
+ sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ if (unlikely(!notif_bss_info)) {
+ WL_ERR(("notif_bss_info alloc failed\n"));
+ return -ENOMEM;
+ }
+ /* Check for all currently supported bands */
+ if (!(
+#ifdef WL_6G_BAND
+ CHSPEC_IS6G(bi->chanspec) ||
+#endif /* WL_6G_BAND */
+ CHSPEC_IS5G(bi->chanspec) || CHSPEC_IS2G(bi->chanspec))) {
+ WL_ERR(("No valid band"));
+ MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
+ + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ return -EINVAL;
+ }
+
+ mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+ chanspec = wl_chspec_driver_to_host(bi->chanspec);
+ notif_bss_info->channel = wf_chspec_ctlchan(chanspec);
+ notif_bss_info->band = CHSPEC_BAND(bi->chanspec);
+ notif_bss_info->rssi = dtoh16(bi->RSSI);
+#if defined(RSSIAVG)
+ notif_bss_info->rssi = wl_get_avg_rssi(&cfg->g_rssi_cache_ctrl, &bi->BSSID);
+ if (notif_bss_info->rssi == RSSI_MINVAL)
+ notif_bss_info->rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#endif
+#if defined(RSSIOFFSET)
+ notif_bss_info->rssi = wl_update_rssi_offset(bcmcfg_to_prmry_ndev(cfg), notif_bss_info->rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ notif_bss_info->rssi = MIN(notif_bss_info->rssi, RSSI_MAXVAL);
+#endif
+ memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+ mgmt_type = cfg->active_scan ?
+ IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+ if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+ }
+ beacon_proberesp = cfg->active_scan ?
+ (struct beacon_proberesp *)&mgmt->u.probe_resp :
+ (struct beacon_proberesp *)&mgmt->u.beacon;
+ beacon_proberesp->timestamp = 0;
+ beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+ beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+ wl_rst_ie(cfg);
+ wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, update_ssid);
+ wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+ wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+ offsetof(struct wl_cfg80211_bss_info, frame_buf));
+ notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable) + wl_get_ielen(cfg);
+ freq = wl_channel_to_frequency(notif_bss_info->channel, notif_bss_info->band);
+ if (freq == 0) {
+ WL_ERR(("Invalid channel, failed to change channel to freq\n"));
+ MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
+ + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ return -EINVAL;
+ }
+ channel = ieee80211_get_channel(wiphy, freq);
+ memcpy(tmp_buf, bi->SSID, bi->SSID_len);
+ tmp_buf[bi->SSID_len] = '\0';
+ WL_SCAN(("BSSID %pM, channel %3d(%3d %3sMHz), rssi %3d, capa 0x%-4x, mgmt_type %d, "
+ "frame_len %3d, SSID \"%s\"\n",
+ &bi->BSSID, notif_bss_info->channel, CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS20(chanspec)?"20":
+ CHSPEC_IS40(chanspec)?"40":
+ CHSPEC_IS80(chanspec)?"80":
+ CHSPEC_IS160(chanspec)?"160":"??",
+ notif_bss_info->rssi, mgmt->u.beacon.capab_info, mgmt_type,
+ notif_bss_info->frame_len, tmp_buf));
+ if (unlikely(!channel)) {
+ WL_ERR(("ieee80211_get_channel error\n"));
+ MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
+ + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ return -EINVAL;
+ }
+
+ signal = notif_bss_info->rssi * 100;
+ if (!mgmt->u.probe_resp.timestamp) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct osl_timespec ts;
+ osl_get_monotonic_boottime(&ts);
+ mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+ + ts.tv_nsec / 1000;
+#else
+ struct osl_timespec tv;
+ osl_do_gettimeofday(&tv);
+ mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+ + tv.tv_usec;
+#endif
+ }
+
+ cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
+ if (unlikely(!cbss)) {
+ WL_ERR(("cfg80211_inform_bss_frame error bssid " MACDBG " channel %d \n",
+ MAC2STRDBG((u8*)(&bi->BSSID)), notif_bss_info->channel));
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ CFG80211_PUT_BSS(wiphy, cbss);
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID) &&
+ (cfg->sched_scan_req && !cfg->scan_request)) {
+ alloc_len = sizeof(log_conn_event_t) + (3 * sizeof(tlv_log)) +
+ IEEE80211_MAX_SSID_LEN + sizeof(uint16) +
+ sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOCZ(dhdp->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = bi->SSID_len;
+ memcpy(tlv_data->value, bi->SSID, bi->SSID_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ memcpy(tlv_data->value, &notif_bss_info->channel, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ memcpy(tlv_data->value, &notif_bss_info->rssi, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ event_data, payload_len);
+ MFREE(dhdp->osh, event_data, alloc_len);
+ }
+
+out_err:
+ MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
+ + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ return err;
+}
+
+struct wireless_dev * wl_get_scan_wdev(struct bcm_cfg80211 *cfg);
+struct net_device *
+wl_get_scan_ndev(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+
+ wdev = wl_get_scan_wdev(cfg);
+ if (!wdev) {
+ WL_ERR(("No wdev present\n"));
+ return NULL;
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ if (!ndev) {
+ WL_ERR(("No ndev present\n"));
+ }
+
+ return ndev;
+}
+
+#if defined(BSSCACHE) || defined(RSSIAVG)
+void wl_cfg80211_update_bss_cache(struct bcm_cfg80211 *cfg)
+{
+#if defined(RSSIAVG)
+ struct net_device *ndev = wl_get_scan_ndev(cfg);
+ int rssi;
+#endif
+ wl_scan_results_t *bss_list = cfg->bss_list;
+
+ /* Free cache in p2p scanning*/
+ if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+ }
+
+ /* Update cache */
+#if defined(RSSIAVG)
+ wl_update_rssi_cache(&cfg->g_rssi_cache_ctrl, bss_list);
+ if (!in_atomic() && ndev) {
+ wl_update_connected_rssi_cache(ndev, &cfg->g_rssi_cache_ctrl, &rssi);
+ }
+#endif
+#if defined(BSSCACHE)
+ wl_update_bss_cache(&cfg->g_bss_cache_ctrl,
+#if defined(RSSIAVG)
+ &cfg->g_rssi_cache_ctrl,
+#endif
+ bss_list);
+#endif
+
+ /* delete dirty cache */
+#if defined(RSSIAVG)
+ wl_delete_dirty_rssi_cache(&cfg->g_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_delete_dirty_bss_cache(&cfg->g_bss_cache_ctrl);
+ wl_reset_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+
+}
+#endif
+
+#if defined(BSSCACHE)
+s32 wl_inform_bss_cache(struct bcm_cfg80211 *cfg)
+{
+ wl_scan_results_t *bss_list = cfg->bss_list;
+ wl_bss_info_t *bi = NULL; /* must be initialized */
+ s32 err = 0;
+ s32 i, cnt;
+ wl_bss_cache_t *node;
+
+ WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+ bss_list = cfg->bss_list;
+ preempt_disable();
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ err = wl_inform_single_bss(cfg, bi, false);
+ if (unlikely(err)) {
+ WL_ERR(("bss inform failed\n"));
+ }
+ }
+
+ cnt = i;
+ node = cfg->g_bss_cache_ctrl.m_cache_head;
+ WL_SCAN(("cached AP count (%d)\n", wl_bss_cache_size(&cfg->g_bss_cache_ctrl)));
+ for (i=cnt; node && i<WL_AP_MAX; i++) {
+ if (node->dirty > 1) {
+ bi = node->results.bss_info;
+ err = wl_inform_single_bss(cfg, bi, false);
+ }
+ node = node->next;
+ }
+ preempt_enable();
+
+ return err;
+}
+#endif
+
+static s32
+wl_inform_bss(struct bcm_cfg80211 *cfg)
+{
+#if !defined(BSSCACHE)
+ wl_scan_results_t *bss_list;
+ wl_bss_info_t *bi = NULL; /* must be initialized */
+ s32 i;
+#endif
+ struct net_device *ndev = wl_get_scan_ndev(cfg);
+ s32 err = 0;
+
+#ifdef WL_EXT_IAPSTA
+ if (ndev)
+ wl_ext_in4way_sync(ndev, 0, WL_EXT_STATUS_SCAN_COMPLETE, NULL);
+#endif
+
+#if defined(BSSCACHE) || defined(RSSIAVG)
+ wl_cfg80211_update_bss_cache(cfg);
+#endif
+
+#if defined(BSSCACHE)
+ err = wl_inform_bss_cache(cfg);
+#else
+ bss_list = cfg->bss_list;
+ WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+#ifdef ESCAN_CHANNEL_CACHE
+ reset_roam_cache(cfg);
+#endif /* ESCAN_CHANNEL_CACHE */
+ preempt_disable();
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+#ifdef ESCAN_CHANNEL_CACHE
+ add_roam_cache(cfg, bi);
+#endif /* ESCAN_CHANNEL_CACHE */
+ err = wl_inform_single_bss(cfg, bi, false);
+ if (unlikely(err)) {
+ WL_ERR(("bss inform failed\n"));
+ }
+ }
+ preempt_enable();
+#endif
+
+ if (cfg->autochannel && ndev) {
+#if defined(BSSCACHE)
+ wl_ext_get_best_channel(ndev, &cfg->g_bss_cache_ctrl, ioctl_version,
+ &cfg->best_2g_ch, &cfg->best_5g_ch);
+#else
+ wl_ext_get_best_channel(ndev, bss_list, ioctl_version,
+ &cfg->best_2g_ch, &cfg->best_5g_ch);
+#endif
+ }
+
+ WL_MEM(("cfg80211 scan cache updated\n"));
+#ifdef ROAM_CHANNEL_CACHE
+ /* print_roam_cache(); */
+ update_roam_cache(cfg, ioctl_version);
+#endif /* ROAM_CHANNEL_CACHE */
+ return err;
+}
+
+#ifdef WL11U
+static bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
+ * is used everywhere and changing its prototype to take const qualifier needs
+ * a massive change to all its callers...
+ */
+
+ if ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_INTERWORKING_ID))) {
+ return ie;
+ }
+ return NULL;
+}
+
+static s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
+{
+ ie_setbuf_t ie_setbuf;
+
+ WL_DBG(("clear interworking IE\n"));
+
+ bzero(&ie_setbuf, sizeof(ie_setbuf_t));
+
+ ie_setbuf.ie_buffer.iecount = htod32(1);
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
+
+ return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+}
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ ie_setbuf_t *ie_setbuf;
+ ie_getbuf_t ie_getbufp;
+ char getbuf[WLC_IOCTL_SMLEN];
+
+ if (ie_id != DOT11_MNG_INTERWORKING_ID) {
+ WL_ERR(("unsupported (id=%d)\n", ie_id));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* access network options (1 octet) is the mandatory field */
+ if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
+ WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
+ return BCME_BADARG;
+ }
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+ VNDR_IE_CUSTOM_FLAG))) {
+ WL_ERR(("invalid packet flag 0x%x\n", pktflag));
+ return BCME_BADARG;
+ }
+
+ buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+
+ ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
+ if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
+ sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
+ == BCME_OK) {
+ if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
+ WL_DBG(("skip to set interworking IE\n"));
+ return BCME_OK;
+ }
+ }
+
+ /* if already set with previous values, delete it first */
+ if (cfg->wl11u) {
+ if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
+ return err;
+ }
+ }
+
+ ie_setbuf = (ie_setbuf_t *)MALLOCZ(cfg->osh, buf_len);
+ if (!ie_setbuf) {
+ WL_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+ strlcpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
+
+ /* Buffer contains only 1 IE */
+ ie_setbuf->ie_buffer.iecount = htod32(1);
+ /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+ ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
+
+ /* Now, add the IE to the buffer */
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+ /* Returning void here as max data_len can be 8 */
+ (void)memcpy_s((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], sizeof(uint8),
+ data, data_len);
+
+ if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
+ == BCME_OK) {
+ WL_DBG(("set interworking IE\n"));
+ cfg->wl11u = TRUE;
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+ }
+
+ MFREE(cfg->osh, ie_setbuf, buf_len);
+ return err;
+}
+#endif /* WL11U */
+
+#ifdef WL_BCNRECV
+/* Beacon recv results handler sending to upper layer */
+static s32
+wl_bcnrecv_result_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ wl_bss_info_v109_2_t *bi, uint32 scan_status)
+{
+ s32 err = BCME_OK;
+ struct wiphy *wiphy = NULL;
+ wl_bcnrecv_result_t *bcn_recv = NULL;
+ struct osl_timespec ts;
+ if (!bi) {
+ WL_ERR(("%s: bi is NULL\n", __func__));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+ if ((bi->length - bi->ie_length) < sizeof(wl_bss_info_v109_2_t)) {
+ WL_ERR(("bi info version doesn't support bcn_recv attributes\n"));
+ goto exit;
+ }
+
+ if (scan_status == WLC_E_STATUS_RXBCN) {
+ wiphy = cfg->wdev->wiphy;
+ if (!wiphy) {
+ WL_ERR(("wiphy is NULL\n"));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+ bcn_recv = (wl_bcnrecv_result_t *)MALLOCZ(cfg->osh, sizeof(*bcn_recv));
+ if (unlikely(!bcn_recv)) {
+ WL_ERR(("Failed to allocate memory\n"));
+ return -ENOMEM;
+ }
+ /* Returning void here as copy size does not exceed dest size of SSID */
+ (void)memcpy_s((char *)bcn_recv->SSID, DOT11_MAX_SSID_LEN,
+ (char *)bi->SSID, DOT11_MAX_SSID_LEN);
+ /* Returning void here as copy size does not exceed dest size of ETH_LEN */
+ (void)memcpy_s(&bcn_recv->BSSID, ETHER_ADDR_LEN, &bi->BSSID, ETH_ALEN);
+ bcn_recv->channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+ bcn_recv->beacon_interval = bi->beacon_period;
+
+ /* kernal timestamp */
+ osl_get_monotonic_boottime(&ts);
+ bcn_recv->system_time = ((u64)ts.tv_sec*1000000)
+ + ts.tv_nsec / 1000;
+ bcn_recv->timestamp[0] = bi->timestamp[0];
+ bcn_recv->timestamp[1] = bi->timestamp[1];
+ if ((err = wl_android_bcnrecv_event(cfgdev_to_wlc_ndev(cfgdev, cfg),
+ BCNRECV_ATTR_BCNINFO, 0, 0,
+ (uint8 *)bcn_recv, sizeof(*bcn_recv)))
+ != BCME_OK) {
+ WL_ERR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+ } else {
+ WL_DBG(("Ignoring Escan Event:%d \n", scan_status));
+ }
+exit:
+ if (bcn_recv) {
+ MFREE(cfg->osh, bcn_recv, sizeof(*bcn_recv));
+ }
+ return err;
+}
+#endif /* WL_BCNRECV */
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#ifndef WL_DRV_AVOID_SCANCACHE
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+ int idx;
+ for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+ int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+ if (bss->RSSI < candidate[idx].RSSI) {
+ if (len) {
+ /* In the below memcpy operation the candidate array always has the
+ * buffer space available to max 'len' calculated in the for loop.
+ */
+ (void)memcpy_s(&candidate[idx + 1],
+ (sizeof(removal_element_t) * len),
+ &candidate[idx], sizeof(removal_element_t) * len);
+ }
+ candidate[idx].RSSI = bss->RSSI;
+ candidate[idx].length = bss->length;
+ (void)memcpy_s(&candidate[idx].BSSID, ETHER_ADDR_LEN,
+ &bss->BSSID, ETHER_ADDR_LEN);
+ return;
+ }
+ }
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+ wl_bss_info_t *bi)
+{
+ int idx1, idx2;
+ int total_delete_len = 0;
+ for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+ wl_bss_info_t *bss = NULL;
+ if (candidate[idx1].RSSI >= bi->RSSI)
+ continue;
+ for (idx2 = 0; idx2 < list->count; idx2++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+ list->bss_info;
+ if (!bss) {
+ continue;
+ }
+ if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ candidate[idx1].RSSI == bss->RSSI &&
+ candidate[idx1].length == dtoh32(bss->length)) {
+ u32 delete_len = dtoh32(bss->length);
+ WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
+ MAC2STRDBG(bss->BSSID.octet)));
+ if (idx2 < list->count -1) {
+ memmove((u8 *)bss, (u8 *)bss + delete_len,
+ list->buflen - cur_len - delete_len);
+ }
+ list->buflen -= delete_len;
+ list->count--;
+ total_delete_len += delete_len;
+ /* if delete_len is greater than or equal to result length */
+ if (total_delete_len >= bi->length) {
+ return;
+ }
+ break;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ }
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+s32
+wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_escan_result_t *escan_result;
+ struct net_device *ndev = NULL;
+#ifndef WL_DRV_AVOID_SCANCACHE
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ const wifi_p2p_ie_t * p2p_ie;
+ const u8 *p2p_dev_addr = NULL;
+ wl_scan_results_t *list;
+ wl_bss_info_t *bss = NULL;
+ u32 i;
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ u16 channel;
+ struct ieee80211_supported_band *band;
+
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->scan_sync);
+
+ if (cfg->loc.in_progress) {
+ /* Listen in progress */
+ if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
+ if (delayed_work_pending(&cfg->loc.work)) {
+ cancel_delayed_work_sync(&cfg->loc.work);
+ }
+ err = wl_cfgscan_notify_listen_complete(cfg);
+ goto exit;
+ } else {
+ WL_DBG(("Listen in progress. Unknown status. %d\n", status));
+ }
+ }
+
+ /* P2P SCAN is coming from primary interface */
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ ndev = cfg->afx_hdl->dev;
+ else
+ ndev = cfg->escan_info.ndev;
+ }
+ escan_result = (wl_escan_result_t *)data;
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL data)\n"));
+ goto exit;
+ }
+#ifdef WL_BCNRECV
+ if (status == WLC_E_STATUS_RXBCN) {
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
+ /* handle beacon recv scan results */
+ wl_bss_info_v109_2_t *bi_info;
+ bi_info = (wl_bss_info_v109_2_t *)escan_result->bss_info;
+ err = wl_bcnrecv_result_handler(cfg, cfgdev, bi_info, status);
+ } else {
+ WL_ERR(("ignore bcnrx event in disabled state(%d)\n",
+ cfg->bcnrecv_info.bcnrecv_state));
+ }
+ goto exit;
+ }
+#endif /* WL_BCNRECV */
+ if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+ WL_ERR_RLMT(("escan is not ready. drv_scan_status 0x%x"
+ " e_type %d e_status %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ ntoh32(e->event_type), ntoh32(e->status)));
+ goto exit;
+ }
+
+#ifndef WL_DRV_AVOID_SCANCACHE
+ if (wl_escan_check_sync_id(cfg, status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0) {
+ goto exit;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
+ (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
+ WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+
+ /* +++++ terence 20130524: skip invalid bss */
+ channel =
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
+ if (!band) {
+ WL_ERR(("No valid band\n"));
+ goto exit;
+ }
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ goto exit;
+ /* ----- terence 20130524: skip invalid bss */
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0))
+ channel = WL_INVALID;
+ else
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
+
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ goto exit;
+ }
+
+ } else {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+ int remove_lower_rssi = FALSE;
+
+ bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ list = wl_escan_get_buf(cfg, FALSE);
+ if (scan_req_match(cfg)) {
+#ifdef WL_HOST_BAND_MGMT
+ s32 channel_band = 0;
+ chanspec_t chspec;
+#endif /* WL_HOST_BAND_MGMT */
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length)) == NULL) {
+ WL_ERR(("Couldn't find P2PIE in probe"
+ " response/beacon\n"));
+ goto exit;
+ }
+#ifdef WL_HOST_BAND_MGMT
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ channel_band = CHSPEC2WLC_BAND(chspec);
+
+ if ((
+#ifdef WL_6G_BAND
+ (cfg->curr_band == WLC_BAND_6G) ||
+#endif /* WL_6G_BAND */
+ (cfg->curr_band == WLC_BAND_5G)) &&
+ (channel_band == WLC_BAND_2G)) {
+ /* Avoid sending the GO results in band conflict */
+ if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
+ P2P_SEID_GROUP_ID) != NULL)
+ goto exit;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+ remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+ if (!bss) {
+ WL_ERR(("bss is NULL\n"));
+ goto exit;
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ WL_DBG(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+ i, bss->RSSI, list->count));
+
+ if (remove_lower_rssi)
+ wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+ /* do not allow beacon data to update
+ *the data recd from a probe response
+ */
+ if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+
+ WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
+ " flags 0x%x, new: RSSI %d flags 0x%x\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+ bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+ if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ WL_DBG(("%s("MACDBG"), same onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ WL_DBG(("%s("MACDBG"), prev onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = bss->RSSI;
+ bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+ }
+ if (dtoh32(bss->length) != bi_length) {
+ u32 prev_len = dtoh32(bss->length);
+
+ WL_DBG(("bss info replacement"
+ " is occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length));
+ WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ prev_len, bi_length));
+
+ if ((list->buflen - prev_len) + bi_length
+ > ESCAN_BUF_SIZE) {
+ WL_ERR(("Buffer is too small: keep the"
+ " previous result of this AP\n"));
+ /* Only update RSSI */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= (bi->flags
+ & WL_BSS_FLAGS_RSSI_ONCHANNEL);
+ goto exit;
+ }
+
+ if (i < list->count - 1) {
+ /* memory copy required by this case only */
+ memmove((u8 *)bss + bi_length,
+ (u8 *)bss + prev_len,
+ list->buflen - cur_len - prev_len);
+ }
+ list->buflen -= prev_len;
+ list->buflen += bi_length;
+ }
+ list->version = dtoh32(bi->version);
+ /* In the above code under check
+ * '(dtoh32(bss->length) != bi_length)'
+ * buffer overflow is avoided. bi_length
+ * is already accounted in list->buflen
+ */
+ if ((err = memcpy_s((u8 *)bss,
+ (ESCAN_BUF_SIZE - (list->buflen - bi_length)),
+ (u8 *)bi, bi_length)) != BCME_OK) {
+ WL_ERR(("Failed to copy the recent bss_info."
+ "err:%d recv_len:%d bi_len:%d\n", err,
+ ESCAN_BUF_SIZE - (list->buflen - bi_length),
+ bi_length));
+ /* This scenario should never happen. If it happens,
+ * set list->count to zero for recovery
+ */
+ list->count = 0;
+ list->buflen = 0;
+ ASSERT(0);
+ }
+ goto exit;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+ goto exit;
+ }
+#else
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ }
+ /* In the previous step check is added to ensure the bi_legth does not
+ * exceed the ESCAN_BUF_SIZE
+ */
+ (void)memcpy_s(&(((char *)list)[list->buflen]),
+ (ESCAN_BUF_SIZE - list->buflen), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ wl_cfgscan_scan_abort(cfg);
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false);
+ goto exit;
+ }
+ }
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+#ifdef DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH
+ cfg->escan_info.prev_escan_aborted = FALSE;
+#endif /* DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH */
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM_MEM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!scan_req_match(cfg)) {
+ WL_DBG(("SCAN COMPLETED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, false);
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Dump FW preserve buffer content */
+ if (status == WLC_E_STATUS_ABORT) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ /* Handle all cases of scan abort */
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_p2p_status(cfg, SCANNING);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM_MEM(("ESCAN ABORTED\n"));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (p2p_scan(cfg) && cfg->scan_request &&
+ (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
+ WL_ERR(("scan list is changed"));
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ } else
+#endif
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ if (escan_result->sync_id != cfg->escan_info.cur_sync_id) {
+ /* If sync_id is not matching, then the abort might have
+ * come for the old scan req or for the in-driver initiated
+ * scan. So do abort for scan_req for which sync_id is
+ * matching.
+ */
+ WL_INFORM_MEM(("sync_id mismatch (%d != %d). "
+ "Ignore the scan abort event.\n",
+ escan_result->sync_id, cfg->escan_info.cur_sync_id));
+ goto exit;
+ } else {
+ /* sync id is matching, abort the scan */
+ WL_INFORM_MEM(("scan aborted for sync_id: %d \n",
+ cfg->escan_info.cur_sync_id));
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true);
+ }
+#else
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true);
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+ } else {
+ /* If there is no pending host initiated scan, do nothing */
+ WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
+ }
+ /* scan aborted, need to set previous success result */
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ wl_scan_results_t *bss_list;
+ bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!bss_list) {
+ WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+ } else {
+ WL_ERR(("Dump scan buffer: scanned AP count (%d)\n", bss_list->count));
+ bi = NULL;
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
+ }
+ }
+ _wl_cfgscan_cancel_scan(cfg);
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+ "scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true);
+ }
+ /* scan aborted, need to set previous success result */
+ wl_escan_increment_sync_id(cfg, 2);
+ }
+#else /* WL_DRV_AVOID_SCANCACHE */
+ err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
+#endif /* WL_DRV_AVOID_SCANCACHE */
+exit:
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN)
+static const u8 *
+wl_retrieve_wps_attribute(const u8 *buf, u16 element_id)
+{
+ const wl_wps_ie_t *ie = NULL;
+ u16 len = 0;
+ const u8 *attrib;
+
+ if (!buf) {
+ WL_ERR(("WPS IE not present"));
+ return 0;
+ }
+
+ ie = (const wl_wps_ie_t*) buf;
+ len = ie->len;
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ attrib = ie->attrib;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ /* Search for attrib */
+ return wl_find_attribute(attrib, len, element_id);
+}
+
+bool
+wl_is_wps_enrollee_active(struct net_device *ndev, const u8 *ie_ptr, u16 len)
+{
+ const u8 *ie;
+ const u8 *attrib;
+
+ if ((ie = (const u8 *)wl_cfgp2p_find_wpsie(ie_ptr, len)) == NULL) {
+ WL_DBG(("WPS IE not present. Do nothing.\n"));
+ return false;
+ }
+
+ if ((attrib = wl_retrieve_wps_attribute(ie, WPS_ATTR_REQ_TYPE)) == NULL) {
+ WL_DBG(("WPS_ATTR_REQ_TYPE not found!\n"));
+ return false;
+ }
+
+ if (*attrib == WPS_REQ_TYPE_ENROLLEE) {
+ WL_INFORM_MEM(("WPS Enrolle Active\n"));
+ return true;
+ } else {
+ WL_DBG(("WPS_REQ_TYPE:%d\n", *attrib));
+ }
+
+ return false;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+ const u8 *ie, u32 ie_len)
+{
+ const wifi_p2p_ie_t *p2p_ie;
+ const u8 *end, *pos;
+ s32 listen_channel;
+
+ pos = (const u8 *)ie;
+
+ p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+ if (p2p_ie == NULL) {
+ return 0;
+ }
+
+ if (p2p_ie->len < MIN_P2P_IE_LEN || p2p_ie->len > MAX_P2P_IE_LEN) {
+ CFGP2P_ERR(("p2p_ie->len out of range - %d\n", p2p_ie->len));
+ return 0;
+ }
+ pos = p2p_ie->subelts;
+ end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+ CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+ p2p_ie->len));
+
+ while (pos < end) {
+ uint16 attr_len;
+ if (pos + 2 >= end) {
+ CFGP2P_DBG((" -- Invalid P2P attribute"));
+ return 0;
+ }
+ attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+ if (pos + 3 + attr_len > end) {
+ CFGP2P_DBG(("P2P: Attribute underflow "
+ "(len=%u left=%d)",
+ attr_len, (int) (end - pos - 3)));
+ return 0;
+ }
+
+ /* if Listen Channel att id is 6 and the vailue is valid,
+ * return the listen channel
+ */
+ if (pos[0] == 6) {
+ /* listen channel subel length format
+ * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+ */
+ listen_channel = pos[1 + 2 + 3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+ return listen_channel;
+ }
+ }
+ pos += 3 + attr_len;
+ }
+ return 0;
+}
+
+#ifdef WL_SCAN_TYPE
+static u32
+wl_cfgscan_map_nl80211_scan_type(struct bcm_cfg80211 *cfg, struct cfg80211_scan_request *request)
+{
+ u32 scan_flags = 0;
+
+ if (!request) {
+ return scan_flags;
+ }
+
+ if (request->flags & NL80211_SCAN_FLAG_LOW_SPAN) {
+ scan_flags |= WL_SCANFLAGS_LOW_SPAN;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) {
+ scan_flags |= WL_SCANFLAGS_HIGH_ACCURACY;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_LOW_POWER) {
+ scan_flags |= WL_SCANFLAGS_LOW_POWER_SCAN;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) {
+ scan_flags |= WL_SCANFLAGS_LOW_PRIO;
+ }
+
+ WL_INFORM(("scan flags. wl:%x cfg80211:%x\n", scan_flags, request->flags));
+ return scan_flags;
+}
+#endif /* WL_SCAN_TYPE */
+
+chanspec_t wl_freq_to_chanspec(int freq)
+{
+ chanspec_t chanspec = 0;
+ u16 bw;
+
+ /* see 802.11 17.3.8.3.2 and Annex J */
+ if (freq == 2484) {
+ chanspec = 14;
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ bw = WL_CHANSPEC_BW_20;
+ } else if (freq >= 2412 && freq < 2484) {
+ chanspec = (freq - 2407) / 5;
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ bw = WL_CHANSPEC_BW_20;
+ } else if (freq >= 4005 && freq <= 4980) {
+ chanspec = (freq - 4000) / 5;
+ chanspec |= WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+ } else if (freq >= 5005 && freq < 5895) {
+ chanspec = (freq - 5000) / 5;
+ chanspec |= WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+#ifdef WL_6G_BAND
+ } else if (freq >= 5945 && freq <= 7200) {
+ /* see 802.11ax D4.1 27.3.22.2 */
+ chanspec = (freq - 5950) / 5;
+ bw = WL_CHANSPEC_BW_20;
+ if ((chanspec % 8) == 3) {
+ bw = WL_CHANSPEC_BW_40;
+ } else if ((chanspec % 16) == 7) {
+ bw = WL_CHANSPEC_BW_80;
+ } else if ((chanspec % 32) == 15) {
+ bw = WL_CHANSPEC_BW_160;
+ }
+ chanspec |= WL_CHANSPEC_BAND_6G;
+ } else if (freq == 5935) {
+ chanspec = 2;
+ bw = WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_BAND_6G;
+#endif /* WL_6G_BAND */
+ } else {
+ WL_ERR(("Invalid frequency %d\n", freq));
+ return INVCHANSPEC;
+ }
+
+ /* Get the min_bw set for the interface */
+ chanspec |= bw;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return chanspec;
+}
+
+#ifdef SCAN_SUPPRESS
+static void
+wl_cfgscan_populate_scan_channel(struct bcm_cfg80211 *cfg,
+ struct ieee80211_channel **channels, u32 n_channels,
+ u16 *channel_list, u32 target_channel)
+{
+ u32 i = 0;
+ u32 chanspec = 0;
+ u32 channel;
+
+ for (i=0; i<n_channels; i++) {
+ channel = ieee80211_frequency_to_channel(channels[i]->center_freq);
+ if (channel != target_channel)
+ continue;
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ return;
+
+ chanspec = wl_freq_to_chanspec(channels[i]->center_freq);
+ if (chanspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec! Skipping channel\n"));
+ continue;
+ }
+
+ channel_list[0] = chanspec;
+ break;
+ }
+ WL_SCAN(("chan: %d, chanspec: %x\n", target_channel, chanspec));
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))
+#else
+#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+#endif
+static void
+wl_cfgscan_populate_scan_channels(struct bcm_cfg80211 *cfg,
+ struct ieee80211_channel **channels, u32 n_channels,
+ u16 *channel_list, u32 *num_channels, bool use_chanspecs, bool skip_dfs)
+{
+ u32 i = 0, j = 0;
+ u32 chanspec = 0;
+ struct wireless_dev *wdev;
+ bool is_p2p_scan = false;
+#ifdef P2P_SKIP_DFS
+ int is_printed = false;
+#endif /* P2P_SKIP_DFS */
+ u32 channel;
+
+ if (!channels || !n_channels) {
+ /* Do full channel scan */
+ return;
+ }
+
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+ if (!skip_dfs && wdev && wdev->netdev &&
+ (wdev->netdev != bcmcfg_to_prmry_ndev(cfg))) {
+ /* SKIP DFS channels for Secondary interface */
+ skip_dfs = true;
+ }
+
+ /* Check if request is for p2p scans */
+ is_p2p_scan = p2p_is_on(cfg) && p2p_scan(cfg);
+
+ for (i = 0; i < n_channels; i++) {
+ channel = ieee80211_frequency_to_channel(channels[i]->center_freq);
+ if (skip_dfs && (IS_RADAR_CHAN(channels[i]->flags))) {
+ WL_DBG(("Skipping radar channel. freq:%d\n",
+ (channels[i]->center_freq)));
+ continue;
+ }
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ continue;
+
+ chanspec = wl_freq_to_chanspec(channels[i]->center_freq);
+ if (chanspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec! Skipping channel\n"));
+ continue;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ if (channels[i]->band == IEEE80211_BAND_60GHZ) {
+ /* Not supported */
+ continue;
+ }
+#endif /* LINUX_VER >= 3.6 */
+#ifdef WL_HOST_BAND_MGMT
+ if (channels[i]->band == IEEE80211_BAND_2GHZ) {
+ if ((cfg->curr_band == WLC_BAND_5G) ||
+ (cfg->curr_band == WLC_BAND_6G)) {
+ if !(is_p2p_scan &&
+ IS_P2P_SOCIAL_CHANNEL(CHSPEC_CHANNEL(chanspec))) {
+ WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
+ continue;
+ }
+ }
+ } else {
+ if (cfg->curr_band == WLC_BAND_2G) {
+ WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
+ continue;
+ }
+ }
+#endif /* WL_HOST_BAND_MGMT */
+
+ if (is_p2p_scan) {
+#ifdef WL_P2P_6G
+ if (!(cfg->p2p_6g_enabled)) {
+#endif /* WL_P2P_6G */
+ if (CHSPEC_IS6G(chanspec)) {
+ continue;
+ }
+#ifdef WL_P2P_6G
+ }
+#endif /* WL_P2P_6G */
+
+#ifdef P2P_SKIP_DFS
+ if (CHSPEC_IS5G(chanspec) &&
+ (CHSPEC_CHANNEL(chanspec) >= 52 &&
+ CHSPEC_CHANNEL(chanspec) <= 144)) {
+ if (is_printed == false) {
+ WL_ERR(("SKIP DFS CHANs(52~144)\n"));
+ is_printed = true;
+ }
+ continue;
+ }
+#endif /* P2P_SKIP_DFS */
+ }
+
+ if (use_chanspecs) {
+ channel_list[j] = chanspec;
+ } else {
+ channel_list[j] = CHSPEC_CHANNEL(chanspec);
+ }
+ WL_SCAN(("chan: %d, chanspec: %x\n", channel, channel_list[j]));
+ j++;
+ if (j == WL_NUMCHANSPECS) {
+ /* max limit */
+ break;
+ }
+ }
+ *num_channels = j;
+}
+
+static void
+wl_cfgscan_populate_scan_ssids(struct bcm_cfg80211 *cfg, u8 *buf_ptr, u32 buf_len,
+ struct cfg80211_scan_request *request, u32 *ssid_num)
+{
+ u32 n_ssids;
+ wlc_ssid_t ssid;
+ int i, j = 0;
+
+ if (!request || !buf_ptr) {
+ /* Do full channel scan */
+ return;
+ }
+
+ n_ssids = request->n_ssids;
+ if (n_ssids > 0) {
+
+ if (buf_len < (n_ssids * sizeof(wlc_ssid_t))) {
+ WL_ERR(("buf len not sufficient for scan ssids\n"));
+ return;
+ }
+
+ for (i = 0; i < n_ssids; i++) {
+ bzero(&ssid, sizeof(wlc_ssid_t));
+ ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
+ /* Returning void here, as per previous line copy length does not exceed
+ * DOT11_MAX_SSID_LEN
+ */
+ (void)memcpy_s(ssid.SSID, DOT11_MAX_SSID_LEN, request->ssids[i].ssid,
+ ssid.SSID_len);
+ if (!ssid.SSID_len) {
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ } else {
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ }
+ /* For multiple ssid case copy the each SSID info the ptr below corresponds
+ * to that so dest is of type wlc_ssid_t
+ */
+ (void)memcpy_s(buf_ptr, sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ buf_ptr += sizeof(wlc_ssid_t);
+ j++;
+ }
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ *ssid_num = j;
+}
+
+static s32
+wl_scan_prep(struct bcm_cfg80211 *cfg, struct net_device *ndev, void *scan_params, u32 len,
+ struct cfg80211_scan_request *request)
+{
+#ifdef SCAN_SUPPRESS
+ u32 channel;
+#endif
+ wl_scan_params_t *params = NULL;
+ wl_scan_params_v2_t *params_v2 = NULL;
+ u32 scan_type = 0;
+ u32 scan_param_size = 0;
+ u32 n_channels = 0;
+ u32 n_ssids = 0;
+ uint16 *chan_list = NULL;
+ u32 channel_offset = 0;
+ u32 cur_offset;
+
+ if (!scan_params) {
+ return BCME_ERROR;
+ }
+
+ if (cfg->active_scan == PASSIVE_SCAN) {
+ WL_INFORM_MEM(("Enforcing passive scan\n"));
+ scan_type = WL_SCANFLAGS_PASSIVE;
+ }
+
+ WL_DBG(("Preparing Scan request\n"));
+ if (cfg->scan_params_v2) {
+ params_v2 = (wl_scan_params_v2_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_v2_t);
+ channel_offset = offsetof(wl_scan_params_v2_t, channel_list);
+ } else {
+ params = (wl_scan_params_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_t);
+ channel_offset = offsetof(wl_scan_params_t, channel_list);
+ }
+
+ if (params_v2) {
+ /* scan params ver2 */
+#if defined(WL_SCAN_TYPE)
+ scan_type += wl_cfgscan_map_nl80211_scan_type(cfg, request);
+#endif /* WL_SCAN_TYPE */
+
+ (void)memcpy_s(&params_v2->bssid, ETHER_ADDR_LEN, &ether_bcast, ETHER_ADDR_LEN);
+ params_v2->version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ params_v2->length = htod16(sizeof(wl_scan_params_v2_t));
+ params_v2->bss_type = DOT11_BSSTYPE_ANY;
+ params_v2->scan_type = htod32(scan_type);
+ params_v2->nprobes = htod32(-1);
+ params_v2->active_time = htod32(-1);
+ params_v2->passive_time = htod32(-1);
+ params_v2->home_time = htod32(-1);
+ params_v2->channel_num = 0;
+ bzero(&params_v2->ssid, sizeof(wlc_ssid_t));
+ chan_list = params_v2->channel_list;
+ } else {
+ /* scan params ver 1 */
+ if (!params) {
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+ (void)memcpy_s(&params->bssid, ETHER_ADDR_LEN, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = htod32(-1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(-1);
+ params->channel_num = 0;
+ bzero(&params->ssid, sizeof(wlc_ssid_t));
+ chan_list = params->channel_list;
+ }
+
+ if (!request) {
+ /* scan_request null, do scan based on base config */
+ WL_DBG(("scan_request is null\n"));
+ return BCME_OK;
+ }
+
+ WL_INFORM(("n_channels:%d n_ssids:%d\n", request->n_channels, request->n_ssids));
+
+ cur_offset = channel_offset;
+ /* Copy channel array if applicable */
+#ifdef SCAN_SUPPRESS
+ channel = wl_ext_scan_suppress(ndev, scan_params, cfg->scan_params_v2);
+ if (channel) {
+ n_channels = 1;
+ if ((n_channels > 0) && chan_list) {
+ if (len >= (scan_param_size + (n_channels * sizeof(u16)))) {
+ wl_cfgscan_populate_scan_channel(cfg,
+ request->channels, request->n_channels,
+ chan_list, channel);
+ cur_offset += (n_channels * (sizeof(u16)));
+ }
+ }
+ } else
+#endif
+ if ((request->n_channels > 0) && chan_list) {
+ if (len >= (scan_param_size + (request->n_channels * sizeof(u16)))) {
+ wl_cfgscan_populate_scan_channels(cfg,
+ request->channels, request->n_channels,
+ chan_list, &n_channels, true, false);
+ cur_offset += (uint32)(n_channels * (sizeof(u16)));
+ }
+ }
+
+ /* Copy ssid array if applicable */
+ if (request->n_ssids > 0) {
+ cur_offset = (u32) roundup(cur_offset, sizeof(u32));
+ if (len > (cur_offset + (request->n_ssids * sizeof(wlc_ssid_t)))) {
+ u32 rem_len = len - cur_offset;
+ wl_cfgscan_populate_scan_ssids(cfg,
+ ((u8 *)scan_params + cur_offset), rem_len, request, &n_ssids);
+ }
+ }
+
+ if (n_ssids || n_channels) {
+ u32 channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+ if (params_v2) {
+ params_v2->channel_num = channel_num;
+ if (n_channels == 1) {
+ params_v2->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params_v2->nprobes = htod32(
+ params_v2->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ } else {
+ params->channel_num = channel_num;
+ if (n_channels == 1) {
+ params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params->nprobes = htod32(
+ params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ }
+ }
+
+ WL_DBG_MEM(("scan_prep done. n_channels:%d n_ssids:%d\n", n_channels, n_ssids));
+ return BCME_OK;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN)
+static s32
+wl_config_scan_macaddr(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, bool randmac_enable, u8 *mac_addr, u8 *mac_addr_mask)
+{
+ s32 err = BCME_OK;
+
+ if (randmac_enable) {
+ if (!cfg->scanmac_enabled) {
+ err = wl_cfg80211_scan_mac_enable(ndev);
+ if (unlikely(err)) {
+ goto exit;
+ }
+ WL_DBG(("randmac enabled\n"));
+ }
+
+#ifdef WL_HOST_RANDMAC_CONFIG
+ /* If mask provided, apply user space configuration */
+ if (!mac_addr_mask && !mac_addr && !ETHER_ISNULLADDR(mac_addr_mask)) {
+ err = wl_cfg80211_scan_mac_config(ndev,
+ mac_addr, mac_addr_mask);
+ if (unlikely(err)) {
+ WL_ERR(("scan mac config failed\n"));
+ goto exit;
+ }
+ }
+#endif /* WL_HOST_RANDMAC_CONFIG */
+ if (cfg->scanmac_config) {
+ /* Use default scanmac configuration */
+ WL_DBG(("Use host provided scanmac config\n"));
+ } else {
+ WL_DBG(("Use fw default scanmac config\n"));
+ }
+ } else if (!randmac_enable && cfg->scanmac_enabled) {
+ WL_DBG(("randmac disabled\n"));
+ err = wl_cfg80211_scan_mac_disable(ndev);
+ } else {
+ WL_DBG(("no change in randmac configuration\n"));
+ }
+
+exit:
+ if (err < 0) {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore if chip doesnt support the feature */
+ err = BCME_OK;
+ } else {
+ /* For errors other than unsupported fail the scan */
+ WL_ERR(("%s : failed to configure random mac for host scan, %d\n",
+ __FUNCTION__, err));
+ err = -EAGAIN;
+ }
+ }
+
+ return err;
+}
+#endif /* LINUX VER > 3.19 && SUPPORT_RANDOM_MAC_SCAN */
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 num_chans = 0;
+ u32 n_channels = 0;
+ u32 n_ssids;
+ s32 params_size;
+ wl_escan_params_t *eparams = NULL;
+ wl_escan_params_v2_t *eparams_v2 = NULL;
+ u8 *scan_params = NULL;
+ u8 *params = NULL;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u16 *default_chan_list = NULL;
+ s32 bssidx = -1;
+ struct net_device *dev = NULL;
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ bool is_first_init_2g_scan = false;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+ p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+ u32 chan_mem = 0;
+ u32 sync_id = 0;
+
+ WL_DBG(("Enter \n"));
+
+ if (!cfg || !request) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (cfg->scan_params_v2) {
+ params_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params));
+ } else {
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ }
+
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN)
+ if (request) {
+ bool randmac_enable = (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR);
+ if (wl_is_wps_enrollee_active(ndev, request->ie, request->ie_len)) {
+ randmac_enable = false;
+ }
+ if ((err = wl_config_scan_macaddr(cfg, ndev, randmac_enable,
+ request->mac_addr, request->mac_addr_mask)) != BCME_OK) {
+ WL_ERR(("scanmac addr config failed\n"));
+ goto exit;
+ }
+ }
+#endif /* KERNEL_VER >= 3.19 && SUPPORT_RANDOM_MAC_SCAN */
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+#ifdef USE_INITIAL_2G_SCAN
+ struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
+ /* allow one 5G channel to add previous connected channel in 5G */
+ bool allow_one_5g_channel = TRUE;
+ int i, j;
+ j = 0;
+ for (i = 0; i < request->n_channels; i++) {
+ int tmp_chan = ieee80211_frequency_to_channel
+ (request->channels[i]->center_freq);
+ if (tmp_chan > CH_MAX_2G_CHANNEL) {
+ if (allow_one_5g_channel)
+ allow_one_5g_channel = FALSE;
+ else
+ continue;
+ }
+ if (j > CH_MAX_2G_CHANNEL) {
+ WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
+ " and previous 5G connected channel\n",
+ j, CH_MAX_2G_CHANNEL));
+ break;
+ }
+ bcopy(request->channels[i], &tmp_channel_list[j],
+ sizeof(struct ieee80211_channel));
+ WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
+ j++;
+ }
+ if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
+ for (i = 0; i < j; i++)
+ bcopy(&tmp_channel_list[i], request->channels[i],
+ sizeof(struct ieee80211_channel));
+
+ request->n_channels = j;
+ is_first_init_2g_scan = true;
+ }
+ else
+ WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
+
+ WL_SCAN(("request->n_channels=%d\n", request->n_channels));
+#else /* USE_INITIAL_SHORT_DWELL_TIME */
+ is_first_init_2g_scan = true;
+#endif /* USE_INITIAL_2G_SCAN */
+ g_first_broadcast_scan = false;
+ }
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_escan_params_t struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t *)params;
+ scan_params = (u8 *)&eparams_v2->params;
+ eparams_v2->version = htod32(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->sync_id = sync_id;
+ } else {
+ eparams = (wl_escan_params_t *)params;
+ scan_params = (u8 *)&eparams->params;
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = sync_id;
+ }
+
+ if (wl_scan_prep(cfg, ndev, scan_params, params_size, request) < 0) {
+ WL_ERR(("scan_prep failed\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ /* Override active_time to reduce scan time if it's first bradcast scan. */
+ if (is_first_init_2g_scan) {
+ if (eparams_v2) {
+ eparams_v2->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ } else {
+ eparams->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ }
+ }
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+ wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ MFREE(cfg->osh, params, params_size);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ WL_MSG(ndev->name, "LEGACY_SCAN sync ID: %d, bssidx: %d\n", sync_id, bssidx);
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err)) {
+ if (err == BCME_EPERM)
+ /* Scan Not permitted at this point of time */
+ WL_DBG((" Escan not permitted at this time (%d)\n", err));
+ else
+ WL_ERR((" Escan set error (%d)\n", err));
+ } else {
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
+ }
+ MFREE(cfg->osh, params, params_size);
+ }
+ else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+ /* P2P SCAN TRIGGER */
+ if (request->n_channels) {
+ num_chans = request->n_channels;
+ WL_SCAN((" chan number : %d\n", num_chans));
+ chan_mem = (u32)(num_chans * sizeof(*default_chan_list));
+ default_chan_list = MALLOCZ(cfg->osh, chan_mem);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* Populate channels for p2p scanning */
+ wl_cfgscan_populate_scan_channels(cfg,
+ request->channels, request->n_channels,
+ default_chan_list, &num_chans, true, true);
+
+ if (num_chans == SOCIAL_CHAN_CNT && (
+ (CHSPEC_CHANNEL(default_chan_list[0]) ==
+ SOCIAL_CHAN_1) &&
+ (CHSPEC_CHANNEL(default_chan_list[1]) ==
+ SOCIAL_CHAN_2) &&
+ (CHSPEC_CHANNEL(default_chan_list[2]) ==
+ SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ WL_DBG(("P2P SEARCH PHASE START \n"));
+ } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
+ ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_DBG(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+ } else if (num_chans == 1) {
+ p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+ } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+ /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+ * the supplicant
+ */
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ } else {
+ WL_DBG(("P2P SCAN STATE START \n"));
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+ }
+ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+ WL_INFORM_MEM(("p2p_scan num_channels:%d\n", num_chans));
+ err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+ p2p_scan_purpose);
+
+ if (!err)
+ cfg->p2p->search_state = search_state;
+
+ MFREE(cfg->osh, default_chan_list, chan_mem);
+ }
+exit:
+ if (unlikely(err)) {
+ /* Don't print Error incase of Scan suppress */
+ if ((err == BCME_EPERM) && cfg->scan_suppressed)
+ WL_DBG(("Escan failed: Scan Suppressed \n"));
+ else
+ WL_ERR(("scan error (%d)\n", err));
+ }
+ return err;
+}
+
+s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = BCME_OK;
+ s32 passive_scan;
+ s32 passive_scan_time;
+ s32 passive_scan_time_org;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+
+ results = wl_escan_get_buf(cfg, FALSE);
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ cfg->escan_info.ndev = ndev;
+ cfg->escan_info.wiphy = wiphy;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ goto exit;
+ }
+
+ if (passive_channel_skip) {
+
+ err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
+
+ passive_scan_time = 0;
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time, sizeof(passive_scan_time));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
+ passive_channel_skip));
+ }
+
+ err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+
+ if (passive_channel_skip) {
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
+ passive_scan_time_org));
+ }
+
+exit:
+ return err;
+}
+
+static s32
+wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
+{
+ u32 scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
+
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+ if ((cfg->custom_scan_channel_time > DHD_SCAN_ASSOC_ACTIVE_TIME) |
+ (cfg->custom_scan_unassoc_time > DHD_SCAN_UNASSOC_ACTIVE_TIME) |
+ (cfg->custom_scan_passive_time > DHD_SCAN_PASSIVE_TIME) |
+ (cfg->custom_scan_home_time > DHD_SCAN_HOME_TIME) |
+ (cfg->custom_scan_home_away_time > DHD_SCAN_HOME_AWAY_TIME)) {
+ scan_timer_interval_ms = CUSTOMER_WL_SCAN_TIMER_INTERVAL_MS;
+ }
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+
+ /* If NAN is enabled adding +10 sec to the existing timeout value */
+#ifdef WL_NAN
+ if (wl_cfgnan_is_enabled(cfg)) {
+ scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_NAN;
+ }
+#endif /* WL_NAN */
+ /* Additional time to scan 6GHz band channels */
+#ifdef WL_6G_BAND
+ if (cfg->band_6g_supported) {
+ scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_6G;
+ }
+#endif /* WL_6G_BAND */
+ WL_MEM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
+ return scan_timer_interval_ms;
+}
+
+#define SCAN_EBUSY_RETRY_LIMIT 20
+static s32
+wl_cfgscan_handle_scanbusy(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 err)
+{
+ s32 scanbusy_err = 0;
+ static u32 busy_count = 0;
+
+ if (!err) {
+ busy_count = 0;
+ return scanbusy_err;
+ }
+ if (err == BCME_BUSY || err == BCME_NOTREADY) {
+ WL_ERR(("Scan err = (%d), busy?%d\n", err, -EBUSY));
+ scanbusy_err = -EBUSY;
+ } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
+ WL_ERR(("Scan not permitted due to scan suppress\n"));
+ scanbusy_err = -EPERM;
+ } else {
+ /* For all other fw errors, use a generic error code as return
+ * value to cfg80211 stack
+ */
+ scanbusy_err = -EAGAIN;
+ }
+
+ /* if continuous busy state, clear assoc type in FW by disassoc cmd */
+ if (scanbusy_err == -EBUSY) {
+ /* Flush FW preserve buffer logs for checking failure */
+ if (busy_count++ > (SCAN_EBUSY_RETRY_LIMIT/5)) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ if (busy_count > SCAN_EBUSY_RETRY_LIMIT) {
+ struct ether_addr bssid;
+ s32 ret = 0;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_NOTREADY;
+ }
+ dhdp->scan_busy_occurred = TRUE;
+#endif /* BCMDONGLEHOST */
+ busy_count = 0;
+ WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, AP_CREATING, ndev),
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+#ifdef BCMDONGLEHOST
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+ dhdp->hang_reason = HANG_REASON_SCAN_BUSY;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID)
+ dhd_os_send_hang_message(dhdp);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID) */
+
+#if !((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
+ defined(OEM_ANDROID))
+ WL_ERR(("%s: HANG event is unsupported\n", __FUNCTION__));
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+#endif /* BCMDONGLEHOST */
+
+ bzero(&bssid, sizeof(bssid));
+ if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
+ &bssid, ETHER_ADDR_LEN)) == 0) {
+ WL_ERR(("FW is connected with " MACDBG "\n",
+ MAC2STRDBG(bssid.octet)));
+ } else {
+ WL_ERR(("GET BSSID failed with %d\n", ret));
+ }
+
+ /* To support GO, wl_cfgscan_cancel_scan()
+ * is needed instead of wl_cfg80211_disconnect()
+ */
+ wl_cfgscan_cancel_scan(cfg);
+
+ } else {
+ /* Hold the context for 400msec, so that 10 subsequent scans
+ * can give a buffer of 4sec which is enough to
+ * cover any on-going scan in the firmware
+ */
+ WL_DBG(("Enforcing delay for EBUSY case \n"));
+ msleep(400);
+ }
+ } else {
+ busy_count = 0;
+ }
+
+ return scanbusy_err;
+}
+
+s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ bool p2p_ssid;
+#ifdef WL11U
+ bcm_tlv_t *interworking_ie;
+#endif
+ s32 err = 0;
+ s32 bssidx = -1;
+ s32 i;
+ bool escan_req_failed = false;
+ s32 scanbusy_err = 0;
+
+ unsigned long flags;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ struct net_device *remain_on_channel_ndev = NULL;
+#endif
+ /*
+ * Hostapd triggers scan before starting automatic channel selection
+ * to collect channel characteristics. However firmware scan engine
+ * doesn't support any channel characteristics collection along with
+ * scan. Hence return scan success.
+ */
+ if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
+ WL_DBG(("Scan Command on SoftAP Interface. Ignoring...\n"));
+// terence 20161023: let it scan in SoftAP mode
+// return 0;
+ }
+
+ if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
+ }
+
+ ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ WL_ERR(("Sending Action Frames. Try it again.\n"));
+ return -EAGAIN;
+ }
+
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ mutex_lock(&cfg->scan_sync);
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ if (cfg->scan_request == NULL) {
+ wl_clr_drv_status_all(cfg, SCANNING);
+ WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+ } else {
+ WL_ERR(("Scanning already\n"));
+ mutex_unlock(&cfg->scan_sync);
+ return -EAGAIN;
+ }
+ }
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ mutex_unlock(&cfg->scan_sync);
+ return -EAGAIN;
+ }
+
+ if (cfg->loc.in_progress) {
+ /* Listen in progress, avoid new scan trigger */
+ mutex_unlock(&cfg->scan_sync);
+ return -EBUSY;
+ }
+ mutex_unlock(&cfg->scan_sync);
+
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_SCANBUSY);
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ mutex_lock(&cfg->scan_sync);
+ remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+ if (remain_on_channel_ndev) {
+ WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+ _wl_cfgscan_cancel_scan(cfg);
+ }
+ mutex_unlock(&cfg->scan_sync);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_cancel_p2plo(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef WL_SDO
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ wl_cfg80211_pause_sdo(ndev, cfg);
+ }
+#endif
+
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len &&
+ IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+ /* P2P Scan */
+#ifdef WL_BLOCK_P2P_SCAN_ON_STA
+ if (!(IS_P2P_IFACE(request->wdev))) {
+ /* P2P scan on non-p2p iface. Fail scan */
+ WL_ERR(("p2p_search on non p2p iface\n"));
+ goto scan_out;
+ }
+#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
+ p2p_ssid = true;
+ break;
+ }
+ }
+ if (p2p_ssid) {
+ if (cfg->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(cfg) == false) {
+ /* p2p on at the first time */
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif
+ }
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ p2p_scan(cfg) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (cfg->p2p_supported) {
+ p2p_scan(cfg) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+
+ if (p2p_scan(cfg) == false) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(cfg,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n",
+ ndev));
+ err = BCME_ERROR;
+ goto scan_out;
+ }
+#ifdef WL11U
+ if (request && (interworking_ie = wl_cfg80211_find_interworking_ie(
+ request->ie, request->ie_len)) != NULL) {
+ if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data,
+ interworking_ie->len)) != BCME_OK) {
+ WL_ERR(("Failed to add interworking IE"));
+ }
+ } else if (cfg->wl11u) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
+ 0, bssidx);
+ /* we don't care about error here
+ * because the only failure case is unsupported,
+ * which is fine
+ */
+ if (unlikely(err)) {
+ WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
+ }
+ cfg->wl11u = FALSE;
+ }
+#endif /* WL11U */
+ if (request) {
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ }
+
+ if (unlikely(err)) {
+// terence 20161023: let it scan in SoftAP mode
+// goto scan_out;
+ }
+
+ }
+ }
+ } else { /* scan in ibss */
+ ssids = this_ssid;
+ }
+
+ WL_TRACE_HW4(("START SCAN\n"));
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
+ wl_get_scan_timeout_val(cfg) + SCAN_WAKE_LOCK_MARGIN_MS);
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+#endif
+
+ if (cfg->p2p_supported) {
+ if (request && p2p_on(cfg) && p2p_scan(cfg)) {
+
+#ifdef WL_SDO
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ /* We shouldn't be getting p2p_find while discovery
+ * offload is in progress
+ */
+ WL_SD(("P2P_FIND: Discovery offload is in progress."
+ " Do nothing\n"));
+ err = -EINVAL;
+ goto scan_out;
+ }
+#endif
+ /* find my listen channel */
+ cfg->afx_hdl->my_listen_chan =
+ wl_find_listen_channel(cfg, request->ie,
+ request->ie_len);
+ err = wl_cfgp2p_enable_discovery(cfg, ndev,
+ request->ie, request->ie_len);
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+
+#ifdef WL_EXT_IAPSTA
+ if (wl_ext_in4way_sync(ndev, STA_FAKE_SCAN_IN_CONNECT, WL_EXT_STATUS_SCANNING, NULL)) {
+ mutex_lock(&cfg->scan_sync);
+ goto scan_success;
+ }
+#endif
+ mutex_lock(&cfg->scan_sync);
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+ if (likely(!err)) {
+ goto scan_success;
+ } else {
+ escan_req_failed = true;
+ goto scan_out;
+ }
+
+scan_success:
+ wl_cfgscan_handle_scanbusy(cfg, ndev, BCME_OK);
+ cfg->scan_request = request;
+ LOG_TS(cfg, scan_start);
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ /* Arm the timer */
+ mod_timer(&cfg->scan_timeout,
+ jiffies + msecs_to_jiffies(wl_get_scan_timeout_val(cfg)));
+ mutex_unlock(&cfg->scan_sync);
+ return 0;
+
+scan_out:
+ if (escan_req_failed) {
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->scan_request = NULL;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ mutex_unlock(&cfg->scan_sync);
+ /* Handling for scan busy errors */
+ scanbusy_err = wl_cfgscan_handle_scanbusy(cfg, ndev, err);
+ if (scanbusy_err == BCME_NOTREADY) {
+ /* In case of bus failures avoid ioctl calls */
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+#endif
+
+ return -ENODEV;
+ }
+ err = scanbusy_err;
+ }
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+#endif
+
+#ifdef WL_SDO
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ wl_cfg80211_resume_sdo(ndev, cfg);
+ }
+#endif
+ return err;
+}
+
+s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ WL_DBG(("Enter\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef DHD_IFDEBUG
+#ifdef WL_CFG80211_P2P_DEV_IF
+ PRINT_WDEV_INFO(request->wdev);
+#else
+ PRINT_WDEV_INFO(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* DHD_IFDEBUG */
+
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_cfg_multip2p_operational(cfg)) {
+ WL_ERR(("wlan0 scan failed, p2p devices are operational"));
+ return -ENODEV;
+ }
+ }
+#ifdef WL_EXT_IAPSTA
+ err = wl_ext_in4way_sync(ndev_to_wlc_ndev(ndev, cfg), STA_NO_SCAN_IN4WAY,
+ WL_EXT_STATUS_SCAN, NULL);
+ if (err) {
+ WL_SCAN(("scan suppressed %d\n", err));
+ return err;
+ }
+#endif
+
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
+ }
+#ifdef WL_DRV_AVOID_SCANCACHE
+ /* Reset roam cache after successful scan request */
+#ifdef ROAM_CHANNEL_CACHE
+ if (!err) {
+ reset_roam_cache(cfg);
+ }
+#endif /* ROAM_CHANNEL_CACHE */
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ return err;
+}
+
+/* Note: This API should be invoked with scan_sync mutex
+ * held so that scan_request data structures doesn't
+ * get modified in between.
+ */
+struct wireless_dev *
+wl_get_scan_wdev(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+
+ if (!cfg) {
+ WL_ERR(("cfg ptr null\n"));
+ return NULL;
+ }
+
+ if (!cfg->scan_request && !cfg->sched_scan_req) {
+ /* No scans in progress */
+ WL_MEM(("no scan in progress \n"));
+ return NULL;
+ }
+
+ if (cfg->scan_request) {
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+#ifdef WL_SCHED_SCAN
+ } else if (cfg->sched_scan_req) {
+ wdev = GET_SCHED_SCAN_WDEV(cfg->sched_scan_req);
+#endif /* WL_SCHED_SCAN */
+ } else {
+ WL_MEM(("no scan in progress \n"));
+ }
+
+ return wdev;
+}
+
+static void _wl_cfgscan_cancel_scan(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+
+ if (!cfg->scan_request && !cfg->sched_scan_req) {
+ /* No scans in progress */
+ WL_INFORM_MEM(("No scan in progress\n"));
+ return;
+ }
+
+ wdev = wl_get_scan_wdev(cfg);
+ if (!wdev) {
+ WL_ERR(("No wdev present\n"));
+ return;
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+
+ /* Check if any scan in progress only then abort */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_cfgscan_scan_abort(cfg);
+
+ /* Indicate escan completion to upper layer */
+ wl_notify_escan_complete(cfg, ndev, true);
+ }
+ WL_INFORM_MEM(("Scan aborted! \n"));
+}
+
+/* Wrapper function for cancel_scan with scan_sync mutex */
+void wl_cfgscan_cancel_scan(struct bcm_cfg80211 *cfg)
+{
+ mutex_lock(&cfg->scan_sync);
+ _wl_cfgscan_cancel_scan(cfg);
+ mutex_unlock(&cfg->scan_sync);
+}
+
+/* Use wl_cfgscan_cancel_scan function for scan abort, as this would do a FW abort
+* followed by indication to upper layer, the current function wl_cfgscan_scan_abort, does
+* only FW abort.
+*/
+void wl_cfgscan_scan_abort(struct bcm_cfg80211 *cfg)
+{
+ void *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ u32 channel, channel_num;
+
+ /* Abort scan params only need space for 1 channel and 0 ssids */
+ if (cfg->scan_params_v2) {
+ params_size = WL_SCAN_PARAMS_V2_FIXED_SIZE + (1 * sizeof(uint16));
+ } else {
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + (1 * sizeof(uint16));
+ }
+
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+ return;
+ }
+
+ /* Use magic value of channel=-1 to abort scan */
+ channel = htodchanspec(-1);
+ channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (1 & WL_SCAN_PARAMS_COUNT_MASK));
+ if (cfg->scan_params_v2) {
+ wl_scan_params_v2_t *params_v2 = (wl_scan_params_v2_t *)params;
+ params_v2->channel_list[0] = channel;
+ params_v2->channel_num = channel_num;
+ params_v2->length = htod16(sizeof(wl_scan_params_v2_t));
+ } else {
+ wl_scan_params_t *params_v1 = (wl_scan_params_t *)params;
+ params_v1->channel_list[0] = channel;
+ params_v1->channel_num = channel_num;
+ }
+#ifdef DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH
+ cfg->escan_info.prev_escan_aborted = TRUE;
+#endif /* DHD_SEND_HANG_ESCAN_SYNCID_MISMATCH */
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
+ if (err < 0) {
+ /* scan abort can fail if there is no outstanding scan */
+ WL_ERR(("scan engine not aborted ret(%d)\n", err));
+ }
+ MFREE(cfg->osh, params, params_size);
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_frame_len = 0;
+ }
+#endif /* WLTDLS */
+}
+
+static s32
+wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, bool aborted)
+{
+ s32 err = BCME_OK;
+ unsigned long flags;
+ struct net_device *dev;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_DBG(("Enter \n"));
+ BCM_REFERENCE(dhdp);
+
+ if (!ndev) {
+ WL_ERR(("ndev is null\n"));
+ err = BCME_ERROR;
+ goto out;
+ }
+
+ if (cfg->escan_info.ndev != ndev) {
+ WL_ERR(("Outstanding scan req ndev not matching (%p:%p)\n",
+ cfg->escan_info.ndev, ndev));
+ err = BCME_ERROR;
+ goto out;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN) && \
+ (!defined(WL_USE_RANDOMIZED_SCAN))
+ /* Disable scanmac if enabled */
+ if (cfg->scanmac_enabled) {
+ wl_cfg80211_scan_mac_disable(ndev);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && defined(SUPPORT_RANDOM_MAC_SCAN) */
+ if (cfg->scan_request) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->scan_request->dev != cfg->p2p_net)
+ dev = cfg->scan_request->dev;
+#elif defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+ dev = cfg->scan_request->wdev->netdev;
+#endif /* WL_ENABLE_P2P_IF */
+ }
+ else {
+ WL_DBG(("cfg->scan_request is NULL. Internal scan scenario."
+ "doing scan_abort for ndev %p primary %p",
+ ndev, bcmcfg_to_prmry_ndev(cfg)));
+ dev = ndev;
+ }
+
+ del_timer_sync(&cfg->scan_timeout);
+ /* clear scan enq time on complete */
+ CLR_TS(cfg, scan_enq);
+ CLR_TS(cfg, scan_start);
+#if defined (ESCAN_RESULT_PATCH)
+ if (likely(cfg->scan_request)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (aborted && cfg->p2p && p2p_scan(cfg) &&
+ (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
+ WL_ERR(("scan list is changed"));
+ cfg->bss_list = wl_escan_get_buf(cfg, !aborted);
+ } else
+#endif
+ cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+
+ wl_inform_bss(cfg);
+ }
+#endif /* ESCAN_RESULT_PATCH */
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (likely(cfg->scan_request)) {
+ WL_INFORM_MEM(("[%s] Report scan done.\n", dev->name));
+ /* scan_sync mutex is already held */
+ _wl_notify_scan_done(cfg, aborted);
+ cfg->scan_request = NULL;
+ }
+ if (p2p_is_on(cfg))
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, dev);
+ CLR_TS(cfg, scan_start);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+#ifdef WL_SCHED_SCAN
+ if (cfg->sched_scan_running && cfg->sched_scan_req) {
+ struct wiphy *wiphy = cfg->sched_scan_req->wiphy;
+ if (!aborted) {
+ WL_INFORM_MEM(("[%s] Report sched scan done.\n", dev->name));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ cfg80211_sched_scan_results(wiphy,
+ cfg->sched_scan_req->reqid);
+#else
+ cfg80211_sched_scan_results(wiphy);
+#endif /* LINUX_VER > 4.11 */
+ }
+
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
+ /* Mark target scan as done */
+ cfg->sched_scan_running = FALSE;
+
+ if (cfg->bss_list && (cfg->bss_list->count == 0)) {
+ WL_INFORM_MEM(("bss list empty. report sched_scan_stop\n"));
+ /* Indicated sched scan stopped so that user space
+ * can do a full scan incase found match is empty.
+ */
+ CFG80211_SCHED_SCAN_STOPPED(wiphy, cfg->sched_scan_req);
+ cfg->sched_scan_req = NULL;
+ }
+ }
+#endif /* WL_SCHED_SCAN */
+ wake_up_interruptible(&dhdp->conf->event_complete);
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+#endif
+
+#ifdef WL_SDO
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS) && !in_atomic()) {
+ /* If it is in atomic, we probably have to wait till the
+ * next event or find someother way of invoking this.
+ */
+ wl_cfg80211_resume_sdo(ndev, cfg);
+ }
+#endif
+
+out:
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+void
+wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg;
+
+ WL_DBG(("Enter wl_cfg80211_abort_scan\n"));
+ cfg = wiphy_priv(wdev->wiphy);
+
+ /* Check if any scan in progress only then abort */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_cfgscan_scan_abort(cfg);
+ /* Only scan abort is issued here. As per the expectation of abort_scan
+ * the status of abort is needed to be communicated using cfg80211_scan_done call.
+ * Here we just issue abort request and let the scan complete path to indicate
+ * abort to cfg80211 layer.
+ */
+ WL_DBG(("wl_cfg80211_abort_scan: Scan abort issued to FW\n"));
+ }
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+static void wl_cfg80211_scan_supp_timerfunc(ulong data)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+ WL_DBG(("Enter \n"));
+ schedule_work(&cfg->wlan_work);
+}
+
+int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+ struct bcm_cfg80211 *cfg;
+ if (!dev || ((suppress != 0) && (suppress != 1))) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ wdev = ndev_to_wdev(dev);
+ if (!wdev) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+ if (!cfg) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (suppress == cfg->scan_suppressed) {
+ WL_DBG(("No change in scan_suppress state. Ignoring cmd..\n"));
+ return 0;
+ }
+
+ del_timer_sync(&cfg->scan_supp_timer);
+
+ if ((ret = wldev_ioctl_set(dev, WLC_SET_SCANSUPPRESS,
+ &suppress, sizeof(int))) < 0) {
+ WL_ERR(("Scan suppress setting failed ret:%d \n", ret));
+ } else {
+ WL_DBG(("Scan suppress %s \n", suppress ? "Enabled" : "Disabled"));
+ cfg->scan_suppressed = suppress;
+ }
+
+ /* If scan_suppress is set, Start a timer to monitor it (just incase) */
+ if (cfg->scan_suppressed) {
+ if (ret) {
+ WL_ERR(("Retry scan_suppress reset at a later time \n"));
+ mod_timer(&cfg->scan_supp_timer,
+ jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_RETRY));
+ } else {
+ WL_DBG(("Start wlan_timer to clear of scan_suppress \n"));
+ mod_timer(&cfg->scan_supp_timer,
+ jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_TIMEOUT));
+ }
+ }
+exit:
+ return ret;
+}
+#endif /* DHCP_SCAN_SUPPRESS */
+
+int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
+{
+ int ret = 0;
+
+ WL_TRACE(("Enter\n"));
+
+ if (!cfg || !cfgdev) {
+ return -EINVAL;
+ }
+
+ /* cancel scan and notify scan status */
+ wl_cfgscan_cancel_scan(cfg);
+
+ return ret;
+}
+
+/* This API is just meant as a wrapper for cfg80211_scan_done
+ * API. This doesn't do state mgmt. For cancelling scan,
+ * please use wl_cfgscan_cancel_scan API.
+ */
+static void
+_wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif
+
+ if (!cfg->scan_request) {
+ return;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ memset_s(&info, sizeof(struct cfg80211_scan_info), 0, sizeof(struct cfg80211_scan_info));
+ info.aborted = aborted;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif
+ cfg->scan_request = NULL;
+}
+
+#ifdef WL_DRV_AVOID_SCANCACHE
+static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
+ u32 bi_length)
+{
+ u32 ret;
+ u8 *p2p_dev_addr = NULL;
+
+ ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
+ if (!ret) {
+ return ret;
+ }
+ if (status == WLC_E_STATUS_PARTIAL) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0)) {
+ channel = WL_INVALID;
+ } else {
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
+ }
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ }
+ } else {
+ WL_INFORM_MEM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ }
+
+ return ret;
+}
+
+static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
+ struct net_device *ndev, const wl_event_msg_t *e, s32 status)
+{
+ s32 err = BCME_OK;
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ bool aborted = false;
+ bool fw_abort = false;
+ bool notify_escan_complete = false;
+
+ if (wl_escan_check_sync_id(cfg, status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0) {
+ goto exit;
+ }
+
+ if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+
+ if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ notify_escan_complete = true;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
+ WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
+ goto exit;
+ }
+
+ bi = escan_result->bss_info;
+ bi_length = dtoh32(bi->length);
+ if ((!bi) ||
+ (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)"
+ "or invalid bss_info length\n"));
+ goto exit;
+ }
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
+ goto exit;
+ } else {
+ if (scan_req_match(cfg)) {
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ }
+#ifdef ROAM_CHANNEL_CACHE
+ add_roam_cache(cfg, bi);
+#endif /* ROAM_CHANNEL_CACHE */
+ err = wl_inform_single_bss(cfg, bi, false);
+#ifdef ROAM_CHANNEL_CACHE
+ /* print_roam_cache(); */
+ update_roam_cache(cfg, ioctl_version);
+#endif /* ROAM_CHANNEL_CACHE */
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ aborted = false;
+ fw_abort = true;
+ }
+ /* Directly exit from function here and
+ * avoid sending notify completion to cfg80211
+ */
+ goto exit;
+ }
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM_MEM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+
+ /* Update escan complete status */
+ aborted = false;
+ fw_abort = false;
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
+
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM_MEM(("ESCAN ABORTED\n"));
+
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
+
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = true;
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
+ }
+
+ /* Notify escan complete status */
+ if (notify_escan_complete) {
+ if (fw_abort == true) {
+ wl_cfgscan_cancel_scan(cfg);
+ } else {
+ wl_notify_escan_complete(cfg, ndev, aborted);
+ }
+ }
+
+exit:
+ return err;
+
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+
+s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ wl_scan_results_t *bss_list;
+ struct net_device *ndev = NULL;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+ WL_DBG(("scan is not ready \n"));
+ return err;
+ }
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->scan_sync);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ bzero(&channel_inform, sizeof(channel_inform));
+ err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform));
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ cfg->bss_list = cfg->scan_results;
+ bss_list = cfg->bss_list;
+ bzero(bss_list, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
+ if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(cfg);
+
+scan_done_out:
+ del_timer_sync(&cfg->scan_timeout);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ _wl_notify_scan_done(cfg, false);
+ cfg->scan_request = NULL;
+ }
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ WL_DBG(("cfg80211_scan_done\n"));
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+{
+#if defined(CONFIG_TIZEN)
+ struct net_device *ndev = NULL;
+#endif /* CONFIG_TIZEN */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+
+ bzero(&info, sizeof(struct cfg80211_scan_info));
+ info.aborted = aborted;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif
+
+#if defined(CONFIG_TIZEN)
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (aborted)
+ net_stat_tizen_update_wifi(ndev, WIFISTAT_SCAN_ABORT);
+ else
+ net_stat_tizen_update_wifi(ndev, WIFISTAT_SCAN_DONE);
+#endif /* CONFIG_TIZEN */
+}
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int
+wl_cfg80211_set_random_mac(struct net_device *dev, bool enable)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int ret;
+
+ if (cfg->random_mac_enabled == enable) {
+ WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled"));
+ return BCME_OK;
+ }
+
+ if (enable) {
+ ret = wl_cfg80211_random_mac_enable(dev);
+ } else {
+ ret = wl_cfg80211_random_mac_disable(dev);
+ }
+
+ if (!ret) {
+ cfg->random_mac_enabled = enable;
+ }
+
+ return ret;
+}
+
+int
+wl_cfg80211_random_mac_enable(struct net_device *dev)
+{
+ u8 random_mac[ETH_ALEN] = {0, };
+ u8 rand_bytes[3] = {0, };
+ s32 err = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#if !defined(LEGACY_RANDOM_MAC)
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+ wl_scanmac_config_t *sm_config = NULL;
+#endif /* !LEGACY_RANDOM_MAC */
+
+ if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
+ wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
+ WL_ERR(("fail to Set random mac, current state is wrong\n"));
+ return err;
+ }
+
+ (void)memcpy_s(random_mac, ETH_ALEN, bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN);
+ get_random_bytes(&rand_bytes, sizeof(rand_bytes));
+
+ if (rand_bytes[2] == 0x0 || rand_bytes[2] == 0xff) {
+ rand_bytes[2] = 0xf0;
+ }
+
+#if defined(LEGACY_RANDOM_MAC)
+ /* of the six bytes of random_mac the bytes 3, 4, 5 are copied with contents of rand_bytes
+ * So while copying 3 bytes of content no overflow would be seen. Hence returning void.
+ */
+ (void)memcpy_s(&random_mac[3], (sizeof(u8) * 3), rand_bytes, sizeof(rand_bytes));
+
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ random_mac, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set random generate MAC address\n"));
+ } else {
+ WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
+ MAC2STRDBG((const u8 *)&random_mac)));
+ WL_ERR(("random MAC enable done"));
+ }
+#else
+ /* Enable scan mac */
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = 1;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ /* For older chip which which does not have scanmac support can still use
+ * cur_etheraddr to set the randmac. rand_mask and rand_mac comes from upper
+ * cfg80211 layer. If rand_mask and rand_mac is not passed then fallback
+ * to default cur_etheraddr and default mask.
+ */
+ if (err == BCME_UNSUPPORTED) {
+ /* In case of host based legacy randomization, random address is
+ * generated by mixing 3 bytes of cur_etheraddr and 3 bytes of
+ * random bytes generated.In that case rand_mask is nothing but
+ * random bytes.
+ */
+ (void)memcpy_s(&random_mac[3], (sizeof(u8) * 3), rand_bytes, sizeof(rand_bytes));
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ random_mac, ETH_ALEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set random generate MAC address\n"));
+ } else {
+ WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
+ MAC2STRDBG((const u8 *)&random_mac)));
+ WL_ERR(("random MAC enable done using legacy randmac"));
+ }
+ } else if (err == BCME_OK) {
+ /* Configure scanmac */
+ (void)memset_s(buffer, sizeof(buffer), 0x0, sizeof(buffer));
+ sm_config = (wl_scanmac_config_t *)sm->data;
+ sm->len = sizeof(*sm_config);
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
+ sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
+
+ /* Set randomize mac address recv from upper layer */
+ (void)memcpy_s(&sm_config->mac.octet, ETH_ALEN, random_mac, ETH_ALEN);
+
+ /* Set randomize mask recv from upper layer */
+
+ /* Currently in samsung case, upper layer does not provide
+ * variable randmask and its using fixed 3 byte randomization
+ */
+ (void)memset_s(&sm_config->random_mask.octet, ETH_ALEN, 0x0, ETH_ALEN);
+ /* Memsetting the remaining octets 3, 4, 5. So remaining dest length is 3 */
+ (void)memset_s(&sm_config->random_mask.octet[3], 3, 0xFF, 3);
+
+ WL_DBG(("recv random mac addr " MACDBG " recv rand mask" MACDBG "\n",
+ MAC2STRDBG((const u8 *)&sm_config->mac.octet),
+ MAC2STRDBG((const u8 *)&sm_config->random_mask)));
+
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed scanmac configuration\n"));
+
+ /* Disable scan mac for clean-up */
+ wl_cfg80211_random_mac_disable(dev);
+ return err;
+ }
+ WL_DBG(("random MAC enable done using scanmac"));
+ } else {
+ WL_ERR(("failed to enable scanmac, err=%d\n", err));
+ }
+#endif /* LEGACY_RANDOM_MAC */
+
+ return err;
+}
+
+int
+wl_cfg80211_random_mac_disable(struct net_device *dev)
+{
+ s32 err = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#if !defined(LEGACY_RANDOM_MAC)
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+#endif /* !LEGACY_RANDOM_MAC */
+
+#if defined(LEGACY_RANDOM_MAC)
+ WL_ERR(("set original mac " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr)));
+
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set original MAC address\n"));
+ } else {
+ WL_ERR(("legacy random MAC disable done \n"));
+ }
+#else
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ /* Disable scanmac */
+ sm_enable->enable = 0;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to disable scanmac, err=%d\n", err));
+ return err;
+ }
+ /* Clear scanmac enabled status */
+ cfg->scanmac_enabled = 0;
+ WL_DBG(("random MAC disable done\n"));
+#endif /* LEGACY_RANDOM_MAC */
+
+ return err;
+}
+
+int wl_cfg80211_scan_mac_enable(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 err = BCME_ERROR;
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0};
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+
+ /* Enable scan mac */
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = 1;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("scanmac enable failed\n"));
+ } else {
+ /* Mark scanmac configured */
+ cfg->scanmac_enabled = 1;
+ }
+
+ return err;
+}
+/*
+ * This is new interface for mac randomization. It takes randmac and randmask
+ * as arg and it uses scanmac iovar to offload the mac randomization to firmware.
+ */
+int wl_cfg80211_scan_mac_config(struct net_device *dev, uint8 *rand_mac, uint8 *rand_mask)
+{
+ int byte_index = 0;
+ s32 err = BCME_ERROR;
+ uint8 buffer[WLC_IOCTL_SMLEN] = {0};
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_config_t *sm_config = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ uint8 random_mask_46_bits[ETHER_ADDR_LEN] = {0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+ if (rand_mac == NULL) {
+ err = BCME_BADARG;
+ WL_ERR(("fail to Set random mac, bad argument\n"));
+ /* Disable the current scanmac config */
+ return err;
+ }
+
+ if (ETHER_ISNULLADDR(rand_mac)) {
+ WL_DBG(("fail to Set random mac, Invalid rand mac\n"));
+ /* Disable the current scanmac config */
+ return err;
+ }
+
+ /* Configure scanmac */
+ (void)memset_s(buffer, sizeof(buffer), 0x0, sizeof(buffer));
+ sm = (wl_scanmac_t *)buffer;
+ sm_config = (wl_scanmac_config_t *)sm->data;
+ sm->len = sizeof(*sm_config);
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
+ sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
+#ifdef WL_USE_RANDOMIZED_SCAN
+ sm_config->scan_bitmap |= WL_SCANMAC_SCAN_ASSOC_HOST;
+#endif /* WL_USE_RANDOMIZED_SCAN */
+ /* Set randomize mac address recv from upper layer */
+ (void)memcpy_s(&sm_config->mac.octet, ETH_ALEN, rand_mac, ETH_ALEN);
+
+ /* Set randomize mask recv from upper layer */
+
+ /* There is a difference in how to interpret rand_mask between
+ * upperlayer and firmware. If the byte is set as FF then for
+ * upper layer it means keep that byte and do not randomize whereas
+ * for firmware it means randomize those bytes and vice versa. Hence
+ * conversion is needed before setting the iovar
+ */
+ (void)memset_s(&sm_config->random_mask.octet, ETH_ALEN, 0x0, ETH_ALEN);
+ /* Only byte randomization is supported currently. If mask recv is 0x0F
+ * for a particular byte then it will be treated as no randomization
+ * for that byte.
+ */
+ if (!rand_mask) {
+ /* If rand_mask not provided, use 46_bits_mask */
+ (void)memcpy_s(&sm_config->random_mask.octet, ETH_ALEN,
+ random_mask_46_bits, ETH_ALEN);
+ } else {
+ while (byte_index < ETH_ALEN) {
+ if (rand_mask[byte_index] == 0xFF) {
+ sm_config->random_mask.octet[byte_index] = 0x00;
+ } else if (rand_mask[byte_index] == 0x00) {
+ sm_config->random_mask.octet[byte_index] = 0xFF;
+ }
+ byte_index++;
+ }
+ }
+
+ WL_DBG(("recv random mac addr " MACDBG "recv rand mask" MACDBG "\n",
+ MAC2STRDBG((const u8 *)&sm_config->mac.octet),
+ MAC2STRDBG((const u8 *)&sm_config->random_mask)));
+
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed scanmac configuration\n"));
+
+ /* Disable scan mac for clean-up */
+ return err;
+ }
+ WL_INFORM_MEM(("scanmac configured"));
+ cfg->scanmac_config = true;
+
+ return err;
+}
+
+int
+wl_cfg80211_scan_mac_disable(struct net_device *dev)
+{
+ s32 err = BCME_ERROR;
+
+ err = wl_cfg80211_random_mac_disable(dev);
+
+ return err;
+}
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME 30
+#define PNO_REPEAT 4
+#define PNO_FREQ_EXPO_MAX 2
+#define PNO_ADAPTIVE_SCAN_LIMIT 60
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+ int i;
+
+ if (!ssid || !ssid_list)
+ return FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (ssid->ssid_len == ssid_list[i].ssid_len) {
+ if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ u16 chan_list[WL_NUMCHANNELS] = {0};
+ u32 num_channels = 0;
+ ushort pno_time;
+ int pno_repeat = PNO_REPEAT;
+ int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ struct cfg80211_ssid *ssid = NULL;
+ struct cfg80211_ssid *hidden_ssid_list = NULL;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len = 0;
+ u32 payload_len;
+ int ssid_cnt = 0;
+ int i;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!request) {
+ WL_ERR(("Sched scan request was NULL\n"));
+ return -EINVAL;
+ }
+
+ if ((request->n_scan_plans == 1) && request->scan_plans &&
+ (request->scan_plans->interval > PNO_ADAPTIVE_SCAN_LIMIT)) {
+ /* If the host gives a high value for scan interval, then
+ * doing adaptive scan doesn't make sense. Better stick to the
+ * scan interval that host gives.
+ */
+ pno_time = request->scan_plans->interval;
+ pno_repeat = 0;
+ pno_freq_expo_max = 0;
+ } else {
+ /* Run adaptive PNO */
+ pno_time = PNO_TIME;
+ }
+
+ WL_DBG(("Enter. ssids:%d match_sets:%d pno_time:%d pno_repeat:%d channels:%d\n",
+ request->n_ssids, request->n_match_sets,
+ pno_time, pno_repeat, request->n_channels));
+
+ if (!request->n_ssids || !request->n_match_sets) {
+ WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+ return -EINVAL;
+ }
+
+ bzero(&ssids_local, sizeof(ssids_local));
+
+ if (request->n_ssids > 0) {
+ hidden_ssid_list = request->ssids;
+ }
+
+ if (request->n_channels && request->n_channels < WL_NUMCHANNELS) {
+ /* get channel list. Note PNO uses channels and not chanspecs */
+ wl_cfgscan_populate_scan_channels(cfg,
+ request->channels, request->n_channels,
+ chan_list, &num_channels, false, false);
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + sizeof(tlv_log) + DOT11_MAX_SSID_LEN;
+ event_data = (log_conn_event_t *)MALLOCZ(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ return -ENOMEM;
+ }
+ }
+ for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+ ssid = &request->match_sets[i].ssid;
+ /* No need to include null ssid */
+ if (ssid->ssid_len) {
+ ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ /* In previous step max SSID_len is limited to DOT11_MAX_SSID_LEN,
+ * returning void
+ */
+ (void)memcpy_s(ssids_local[ssid_cnt].SSID, DOT11_MAX_SSID_LEN, ssid->ssid,
+ ssids_local[ssid_cnt].SSID_len);
+ if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+ ssids_local[ssid_cnt].hidden = TRUE;
+ WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+ } else {
+ ssids_local[ssid_cnt].hidden = FALSE;
+ WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
+ if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+ ssids_local[ssid_cnt].rssi_thresh =
+ (int8)request->match_sets[i].rssi_thold;
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
+ ssid_cnt++;
+ }
+ }
+
+ if (ssid_cnt) {
+#if defined(BCMDONGLEHOST)
+ if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
+ pno_time, pno_repeat, pno_freq_expo_max,
+ (num_channels ? chan_list : NULL), num_channels)) < 0) {
+ WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+#endif /* BCMDONGLEHOST */
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ /*
+ * purposefully logging here to make sure that
+ * firmware configuration was successful
+ */
+ for (i = 0; i < ssid_cnt; i++) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
+ tlv_data = event_data->tlvs;
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssids_local[i].SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssids_local[i].SSID, ssids_local[i].SSID_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ event_data, payload_len);
+ }
+ }
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = request;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ } else {
+ ret = -EINVAL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(SUPPORT_RANDOM_MAC_SCAN)
+ if ((ret = wl_config_scan_macaddr(cfg, dev,
+ (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR),
+ request->mac_addr, request->mac_addr_mask)) != BCME_OK) {
+ WL_ERR(("scanmac addr config failed\n"));
+ /* Cleanup the states and stop the pno */
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+ WL_ERR(("PNO Stop for SSID failed"));
+ }
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ goto exit;
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && (defined(SUPPORT_RANDOM_MAC_SCAN)) */
+
+exit:
+ if (event_data) {
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+ return ret;
+}
+
+int
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 11, 0))
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid)
+#else
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+#endif /* LINUX_VER > 4.11 */
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN STOP\n"));
+
+#if defined(BCMDONGLEHOST)
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+ WL_ERR(("PNO Stop for SSID failed"));
+ } else {
+ /*
+ * purposefully logging here to make sure that
+ * firmware configuration was successful
+ */
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
+ }
+#endif /* BCMDONGLEHOST */
+
+ mutex_lock(&cfg->scan_sync);
+ if (cfg->sched_scan_req) {
+ WL_PNO((">>> Sched scan running. Aborting it..\n"));
+ _wl_cfgscan_cancel_scan(cfg);
+ }
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+ mutex_unlock(&cfg->scan_sync);
+
+ return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+s32 wl_cfg80211_custom_scan_time(struct net_device *dev,
+ enum wl_custom_scan_time_type type, int time)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (cfg == NULL) {
+ return FALSE;
+ }
+
+ switch (type) {
+ case WL_CUSTOM_SCAN_CHANNEL_TIME :
+ WL_ERR(("Scan Channel Time %d\n", time));
+ cfg->custom_scan_channel_time = time;
+ break;
+ case WL_CUSTOM_SCAN_UNASSOC_TIME :
+ WL_ERR(("Scan Unassoc Time %d\n", time));
+ cfg->custom_scan_unassoc_time = time;
+ break;
+ case WL_CUSTOM_SCAN_PASSIVE_TIME :
+ WL_ERR(("Scan Passive Time %d\n", time));
+ cfg->custom_scan_passive_time = time;
+ break;
+ case WL_CUSTOM_SCAN_HOME_TIME :
+ WL_ERR(("Scan Home Time %d\n", time));
+ cfg->custom_scan_home_time = time;
+ break;
+ case WL_CUSTOM_SCAN_HOME_AWAY_TIME :
+ WL_ERR(("Scan Home Away Time %d\n", time));
+ cfg->custom_scan_home_away_time = time;
+ break;
+ default:
+ return FALSE;
+ }
+ return TRUE;
+}
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+
+#ifdef CUSTOMER_HW4_DEBUG
+uint prev_dhd_console_ms = 0;
+u32 prev_wl_dbg_level = 0;
+static void wl_scan_timeout_dbg_set(void);
+
+static void wl_scan_timeout_dbg_set(void)
+{
+ WL_ERR(("Enter \n"));
+ prev_dhd_console_ms = dhd_console_ms;
+ prev_wl_dbg_level = wl_dbg_level;
+
+ dhd_console_ms = 1;
+ wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN);
+
+ wl_scan_timeout_dbg_enabled = 1;
+}
+void wl_scan_timeout_dbg_clear(void)
+{
+ WL_ERR(("Enter \n"));
+ dhd_console_ms = prev_dhd_console_ms;
+ wl_dbg_level = prev_wl_dbg_level;
+
+ wl_scan_timeout_dbg_enabled = 0;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+static void wl_scan_timeout(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+#if 0
+ wl_scan_results_t *bss_list;
+ wl_bss_info_t *bi = NULL;
+ s32 i;
+ u32 channel;
+#endif
+ u64 cur_time = OSL_LOCALTIME_NS();
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+ unsigned long flags;
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status = NULL;
+ UNUSED_PARAMETER(rtt_status);
+#endif /* RTT_SUPPORT */
+
+ UNUSED_PARAMETER(cur_time);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (!(cfg->scan_request)) {
+ WL_ERR(("timer expired but no scan request\n"));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ return;
+ }
+
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
+ }
+
+#ifdef BCMDONGLEHOST
+ if (dhd_query_bus_erros(dhdp)) {
+ return;
+ }
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ /* DHD triggers Kernel panic if the SCAN timeout occurrs
+ * due to tasklet or workqueue scheduling problems in the Linux Kernel.
+ * Customer informs that it is hard to find any clue from the
+ * host memory dump since the important tasklet or workqueue information
+ * is already disappered due the latency while printing out the timestamp
+ * logs for debugging scan timeout issue.
+ * For this reason, customer requestes us to trigger Kernel Panic rather than
+ * taking a SOCRAM dump.
+ */
+ if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON &&
+ ((cfg->tsinfo.scan_deq < cfg->tsinfo.scan_enq) ||
+ dhd_bus_query_dpc_sched_errors(dhdp))) {
+ WL_ERR(("****SCAN event timeout due to scheduling problem\n"));
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhdp);
+#endif /* RTT_SUPPORT */
+ WL_ERR(("***SCAN event timeout. WQ state:0x%x scan_enq_time:"SEC_USEC_FMT
+ " evt_hdlr_entry_time:"SEC_USEC_FMT" evt_deq_time:"SEC_USEC_FMT
+ "\nscan_deq_time:"SEC_USEC_FMT" scan_hdlr_cmplt_time:"SEC_USEC_FMT
+ " scan_cmplt_time:"SEC_USEC_FMT" evt_hdlr_exit_time:"SEC_USEC_FMT
+ "\ncurrent_time:"SEC_USEC_FMT"\n", work_busy(&cfg->event_work),
+ GET_SEC_USEC(cfg->tsinfo.scan_enq),
+ GET_SEC_USEC(cfg->tsinfo.wl_evt_hdlr_entry),
+ GET_SEC_USEC(cfg->tsinfo.wl_evt_deq),
+ GET_SEC_USEC(cfg->tsinfo.scan_deq),
+ GET_SEC_USEC(cfg->tsinfo.scan_hdlr_cmplt),
+ GET_SEC_USEC(cfg->tsinfo.scan_cmplt),
+ GET_SEC_USEC(cfg->tsinfo.wl_evt_hdlr_exit), GET_SEC_USEC(cur_time)));
+ if (cfg->tsinfo.scan_enq) {
+ WL_ERR(("Elapsed time(ns): %llu\n", (cur_time - cfg->tsinfo.scan_enq)));
+ }
+ WL_ERR(("lock_states:[%d:%d:%d:%d:%d:%d]\n",
+ mutex_is_locked(&cfg->if_sync),
+ mutex_is_locked(&cfg->usr_sync),
+ mutex_is_locked(&cfg->pm_sync),
+ mutex_is_locked(&cfg->scan_sync),
+ spin_is_locked(&cfg->cfgdrv_lock),
+ spin_is_locked(&cfg->eq_lock)));
+#ifdef RTT_SUPPORT
+ WL_ERR(("RTT lock_state:[%d]\n",
+ mutex_is_locked(&rtt_status->rtt_mutex)));
+#ifdef WL_NAN
+ WL_ERR(("RTT and Geofence lock_states:[%d:%d]\n",
+ mutex_is_locked(&cfg->nancfg->nan_sync),
+ mutex_is_locked(&(rtt_status)->geofence_mutex)));
+#endif /* WL_NAN */
+#endif /* RTT_SUPPORT */
+
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+ dhd_bus_intr_count_dump(dhdp);
+#endif /* BCMDONGLEHOST */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && !defined(CONFIG_MODULES)
+ /* Print WQ states. Enable only for in-built drivers as the symbol is not exported */
+ show_workqueue_state();
+#endif /* LINUX_VER >= 4.1 && !CONFIG_MODULES */
+
+#if 0
+ bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!bss_list) {
+ WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+ } else {
+ WL_ERR(("Dump scan buffer:\n"
+ "scanned AP count (%d)\n", bss_list->count));
+
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
+ }
+ }
+#endif
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ bzero(&msg, sizeof(wl_event_msg_t));
+ WL_ERR(("timer expired\n"));
+#ifdef BCMDONGLEHOST
+ dhdp->scan_timeout_occurred = TRUE;
+#ifdef BCMPCIE
+ if (!dhd_pcie_dump_int_regs(dhdp)) {
+ WL_ERR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd_bus_set_linkdown(dhdp, TRUE);
+ dhdp->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(OEM_ANDROID)
+ dhd_os_send_hang_message(dhdp);
+#else
+ WL_ERR(("%s: HANG event is unsupported\n", __FUNCTION__));
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+ }
+
+ dhd_pcie_dump_rc_conf_space_cap(dhdp);
+#endif /* BCMPCIE */
+#if 0
+ if (!dhd_bus_get_linkdown(dhdp) && dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_FW_COREDUMP */
+ /*
+ * For the memdump sanity, blocking bus transactions for a while
+ * Keeping it TRUE causes the sequential private cmd error
+ */
+ dhdp->scan_timeout_occurred = FALSE;
+#endif /* BCMDONGLEHOST */
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+ msg.reason = 0xFFFFFFFF;
+ wl_cfg80211_event(ndev, &msg, NULL);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_set();
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_ENABLE_RUNTIME_PM(dhdp);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+}
+
+s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+ int err = 0;
+
+ cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_init_sync_id(cfg);
+
+ /* Init scan_timeout timer */
+ init_timer_compat(&cfg->scan_timeout, wl_scan_timeout, cfg);
+
+ wl_cfg80211_set_bcmcfg(cfg);
+
+ return err;
+}
+
+#ifdef WL_SCHED_SCAN
+static s32
+wl_cfgscan_init_pno_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int err = 0;
+
+ mutex_lock(&cfg->scan_sync);
+ LOG_TS(cfg, scan_start);
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ _wl_cfgscan_cancel_scan(cfg);
+ }
+
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+ if (err) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ mutex_unlock(&cfg->scan_sync);
+ WL_ERR(("targeted escan failed. err:%d\n", err));
+ return err;
+ }
+
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+
+ cfg->sched_scan_running = TRUE;
+ mutex_unlock(&cfg->scan_sync);
+
+ return err;
+}
+
+static s32
+wl_cfgscan_update_v3_schedscan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ wl_pfn_scanresults_v3_t *pfn_result, uint32 event_type)
+{
+ int err = 0;
+ wl_pfn_net_info_v3_t *netinfo, *pnetinfo;
+ struct cfg80211_scan_request *request = NULL;
+ struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+ struct ieee80211_channel *channel = NULL;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len = 0;
+ int channel_req = 0;
+ u32 payload_len;
+
+ if (event_type == WLC_E_PFN_NET_LOST) {
+ WL_PNO(("Do Nothing %d\n", event_type));
+ return 0;
+ }
+
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", pfn_result->count));
+
+ pnetinfo = (wl_pfn_net_info_v3_t *)pfn_result->netinfo;
+ if (pfn_result->count > 0) {
+ int i;
+
+ if (pfn_result->count > MAX_PFN_LIST_COUNT) {
+ pfn_result->count = MAX_PFN_LIST_COUNT;
+ }
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * pfn_result->count);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * pfn_result->count));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + (3 * sizeof(tlv_log)) +
+ DOT11_MAX_SSID_LEN + sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOCZ(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ err = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < pfn_result->count; i++) {
+ u16 ssid_len;
+ u8 ssid_buf[DOT11_MAX_SSID_LEN + 1] = {0};
+ netinfo = &pnetinfo[i];
+
+ /* PFN result doesn't have all the info which are required by the
+ * supplicant. (For e.g IEs) Do a target Escan so that sched scan
+ * results are reported via wl_inform_single_bss in the required
+ * format. Escan does require the scan request in the form of
+ * cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+ ssid[i].ssid_len = ssid_len = MIN(DOT11_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID_len);
+ /* max ssid_len as in previous step DOT11_MAX_SSID_LEN is same
+ * as DOT11_MAX_SSID_LEN = 32
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo->pfnsubnet.u.SSID, ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo->pfnsubnet.chanspec;
+ channel[i].center_freq = wl_channel_to_frequency(
+ wf_chspec_ctlchan(netinfo->pfnsubnet.chanspec),
+ CHSPEC_BAND(netinfo->pfnsubnet.chanspec));
+ channel[i].band =
+ wl_get_nl80211_band(CHSPEC_BAND(netinfo->pfnsubnet.chanspec));
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ (void)memcpy_s(ssid_buf, IEEE80211_MAX_SSID_LEN,
+ ssid[i].ssid, ssid_len);
+ ssid_buf[ssid_len] = '\0';
+ WL_INFORM_MEM(("[PNO] SSID:%s chanspec:0x%x freq:%d band:%d\n",
+ ssid_buf, netinfo->pfnsubnet.chanspec,
+ channel[i].center_freq, channel[i].band));
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = netinfo->pfnsubnet.SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &netinfo->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ return err;
+ }
+ p2p_scan(cfg) = false;
+ }
+
+ err = wl_cfgscan_init_pno_escan(cfg, ndev, request);
+ if (err) {
+ goto out_err;
+ }
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+
+out_err:
+ if (request) {
+ MFREE(cfg->osh, request,
+ sizeof(*request) + sizeof(*request->channels) * pfn_result->count);
+ }
+ if (channel) {
+ MFREE(cfg->osh, channel,
+ (sizeof(struct ieee80211_channel) * pfn_result->count));
+ }
+
+ if (event_data) {
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+
+ return err;
+}
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ wl_pfn_net_info_v1_t *netinfo, *pnetinfo;
+ wl_pfn_net_info_v2_t *netinfo_v2, *pnetinfo_v2;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int err = 0;
+ struct cfg80211_scan_request *request = NULL;
+ struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+ struct ieee80211_channel *channel = NULL;
+ int channel_req = 0;
+ int band = 0;
+ wl_pfn_scanresults_v1_t *pfn_result_v1 = (wl_pfn_scanresults_v1_t *)data;
+ wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
+ wl_pfn_scanresults_v3_t *pfn_result_v3 = (wl_pfn_scanresults_v3_t *)data;
+ int n_pfn_results = 0;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len = 0;
+ u32 payload_len;
+ u8 tmp_buf[DOT11_MAX_SSID_LEN + 1];
+
+ WL_DBG(("Enter\n"));
+
+ /* These static asserts guarantee v1/v2 net_info and subnet_info are compatible
+ * in size and SSID offset, allowing v1 to be used below except for the results
+ * fields themselves (status, count, offset to netinfo).
+ */
+ STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
+ STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
+ OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
+
+ /* Extract the version-specific items */
+ if (pfn_result_v1->version == PFN_SCANRESULT_VERSION_V1) {
+ n_pfn_results = pfn_result_v1->count;
+ pnetinfo = pfn_result_v1->netinfo;
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
+
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + (3 * sizeof(tlv_log)) +
+ DOT11_MAX_SSID_LEN + sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOCZ(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo = &pnetinfo[i];
+ /* This looks useless, shouldn't Coverity complain? */
+ if (!netinfo) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ if (netinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ WL_ERR(("Wrong SSID length:%d\n",
+ netinfo->pfnsubnet.SSID_len));
+ err = -EINVAL;
+ goto out_err;
+ }
+ /* In previous step max SSID_len limited to DOT11_MAX_SSID_LEN
+ * and tmp_buf size is DOT11_MAX_SSID_LEN+1
+ */
+ (void)memcpy_s(tmp_buf, DOT11_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.SSID_len);
+ tmp_buf[netinfo->pfnsubnet.SSID_len] = '\0';
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ tmp_buf, netinfo->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by
+ * the supplicant. (For e.g IEs) Do a target Escan so that
+ * sched scan results are reported via wl_inform_single_bss in
+ * the required format. Escan does require the scan request in
+ * the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+
+ ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len;
+ /* Returning void as ssid[i].ssid_len is limited to max of
+ * DOT11_MAX_SSID_LEN
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID, ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssid[i].ssid_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(int16),
+ &netinfo->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+ err = wl_cfgscan_init_pno_escan(cfg, ndev, request);
+ if (err) {
+ goto out_err;
+ }
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+
+ } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
+ n_pfn_results = pfn_result_v2->count;
+ pnetinfo_v2 = (wl_pfn_net_info_v2_t *)pfn_result_v2->netinfo;
+
+ if (e->event_type == WLC_E_PFN_NET_LOST) {
+ WL_PNO(("Do Nothing %d\n", e->event_type));
+ return 0;
+ }
+
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
+
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + (3 * sizeof(tlv_log)) +
+ DOT11_MAX_SSID_LEN + sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo_v2 = &pnetinfo_v2[i];
+ /* This looks useless, shouldn't Coverity complain? */
+ if (!netinfo_v2) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ netinfo_v2->pfnsubnet.u.SSID,
+ netinfo_v2->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by the
+ * supplicant. (For e.g IEs) Do a target Escan so that sched scan
+ * results are reported via wl_inform_single_bss in the required
+ * format. Escan does require the scan request in the form of
+ * cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+ ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
+ netinfo_v2->pfnsubnet.SSID_len);
+ /* max ssid_len as in previous step DOT11_MAX_SSID_LEN is same
+ * as DOT11_MAX_SSID_LEN = 32
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo_v2->pfnsubnet.u.SSID, ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo_v2->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = netinfo_v2->pfnsubnet.SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &netinfo_v2->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+
+ err = wl_cfgscan_init_pno_escan(cfg, ndev, request);
+ if (err) {
+ goto out_err;
+ }
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+ } else if (pfn_result_v3->version == PFN_SCANRESULT_VERSION_V3) {
+ err = wl_cfgscan_update_v3_schedscan_results(cfg, ndev,
+ pfn_result_v3, e->event_type);
+ if (err) {
+ goto out_err;
+ }
+ } else {
+ WL_ERR(("Unsupported version %d, expected %d or %d\n", pfn_result_v1->version,
+ PFN_SCANRESULT_VERSION_V1, PFN_SCANRESULT_VERSION_V2));
+ err = -EINVAL;
+ }
+
+out_err:
+
+ mutex_lock(&cfg->scan_sync);
+ if (err) {
+ /* Notify upper layer that sched scan has stopped so that
+ * upper layer can attempt fresh scan.
+ */
+ if (cfg->sched_scan_req) {
+ WL_ERR(("sched_scan stopped\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ cfg80211_sched_scan_stopped(wiphy, cfg->sched_scan_req->reqid);
+#else
+ cfg80211_sched_scan_stopped(wiphy);
+#endif /* KERNEL > 4.11.0 */
+ cfg->sched_scan_req = NULL;
+ } else {
+ WL_ERR(("sched scan req null!\n"));
+ }
+ cfg->sched_scan_running = FALSE;
+ CLR_TS(cfg, scan_start);
+ }
+ mutex_unlock(&cfg->scan_sync);
+
+ if (request) {
+ MFREE(cfg->osh, request,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ }
+ if (channel) {
+ MFREE(cfg->osh, channel,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ }
+
+ if (event_data) {
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+ return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef PNO_SUPPORT
+s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+#ifdef GSCAN_SUPPORT
+ void *ptr;
+ int send_evt_bytes = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* GSCAN_SUPPORT */
+
+ WL_INFORM_MEM((">>> PNO Event\n"));
+
+ if (!data) {
+ WL_ERR(("Data received is NULL!\n"));
+ return 0;
+ }
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef GSCAN_SUPPORT
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ }
+ if (!dhd_dev_is_legacy_pno_enabled(ndev))
+ return 0;
+#endif /* GSCAN_SUPPORT */
+
+#ifndef WL_SCHED_SCAN
+ /* CUSTOMER_HW4 has other PNO wakelock time by RB:5911 */
+ mutex_lock(&cfg->usr_sync);
+ /* TODO: Use cfg80211_sched_scan_results(wiphy); */
+ /* GregG : WAR as to supplicant busy and not allowed Kernel to suspend */
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ mutex_unlock(&cfg->usr_sync);
+#else
+ /* If cfg80211 scheduled scan is supported, report the pno results via sched
+ * scan results
+ */
+ wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+ return 0;
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ void *ptr = NULL;
+ int send_evt_bytes = 0;
+ int event_type;
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ u32 len = ntoh32(e->datalen);
+ u32 buf_len = 0;
+
+ switch (event) {
+ case WLC_E_PFN_BEST_BATCHING:
+ err = dhd_dev_retrieve_batch_scan(ndev);
+ if (err < 0) {
+ WL_ERR(("Batch retrieval already in progress %d\n", err));
+ } else {
+ event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
+ if (data && len) {
+ event_type = *((int *)data);
+ }
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ &event_type, sizeof(int));
+ }
+ break;
+ case WLC_E_PFN_SCAN_COMPLETE:
+ event_type = WIFI_SCAN_COMPLETE;
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ &event_type, sizeof(int));
+ break;
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_FOUND, &buf_len);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_BSSID_NET_LOST:
+ /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+ * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+ */
+ if (len) {
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_LOST, &buf_len);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_SSID_EXT:
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown event %d\n", event));
+ break;
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (strcmp(command, "SCAN-ACTIVE") == 0) {
+ cfg->active_scan = 1;
+ } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
+ cfg->active_scan = 0;
+ } else
+ WL_ERR(("Unknown command \n"));
+ return;
+}
+
+void
+wl_cfgscan_listen_complete_work(struct work_struct *work)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, loc.work.work);
+
+ WL_ERR(("listen timeout\n"));
+ /* listen not completed. Do recovery */
+ if (!cfg->loc.in_progress) {
+ WL_ERR(("No listen in progress!\n"));
+ return;
+ }
+ wl_cfgscan_notify_listen_complete(cfg);
+}
+
+s32
+wl_cfgscan_notify_listen_complete(struct bcm_cfg80211 *cfg)
+{
+ WL_DBG(("listen on channel complete! cookie:%llu\n", cfg->last_roc_id));
+ if (cfg->loc.wdev && cfg->loc.in_progress) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ cfg80211_remain_on_channel_expired(cfg->loc.wdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(cfg->loc.wdev->netdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif
+ cfg->loc.in_progress = false;
+ cfg->loc.wdev = NULL;
+ }
+ return BCME_OK;
+}
+
+static void
+wl_init_scan_params(struct bcm_cfg80211 *cfg, u8 *params, u16 params_size,
+ u32 scan_type, u32 action, u32 passive_time)
+{
+ u32 sync_id = 0;
+ wl_escan_params_t *eparams = NULL;
+ wl_escan_params_v2_t *eparams_v2 = NULL;
+ wl_scan_params_t *scanparams = NULL;
+ wl_scan_params_v2_t *scanparams_v2 = NULL;
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t *)params;
+ eparams_v2->version = htod32(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->sync_id = sync_id;
+ scanparams_v2 = (wl_scan_params_v2_t *)&eparams_v2->params;
+ (void)memcpy_s(&scanparams_v2->bssid, ETHER_ADDR_LEN, &ether_bcast, ETHER_ADDR_LEN);
+ scanparams_v2->version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ scanparams_v2->length = htod16(sizeof(wl_scan_params_v2_t));
+ scanparams_v2->bss_type = DOT11_BSSTYPE_ANY;
+ scanparams_v2->scan_type = htod32(scan_type);
+ scanparams_v2->nprobes = htod32(-1);
+ scanparams_v2->active_time = htod32(-1);
+ scanparams_v2->passive_time = htod32(passive_time);
+ scanparams_v2->home_time = htod32(-1);
+ bzero(&scanparams_v2->ssid, sizeof(wlc_ssid_t));
+ } else {
+ eparams = (wl_escan_params_t *)params;
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = sync_id;
+ scanparams = (wl_scan_params_t *)&eparams->params;
+ (void)memcpy_s(&scanparams->bssid, ETHER_ADDR_LEN, &ether_bcast, ETHER_ADDR_LEN);
+ scanparams->bss_type = DOT11_BSSTYPE_ANY;
+ scanparams->scan_type = 0;
+ scanparams->nprobes = htod32(-1);
+ scanparams->active_time = htod32(-1);
+ scanparams->passive_time = htod32(passive_time);
+ scanparams->home_time = htod32(-1);
+ bzero(&scanparams->ssid, sizeof(wlc_ssid_t));
+ }
+}
+
+/* timeout for recoverying upper layer statemachine */
+#define WL_LISTEN_TIMEOUT 3000u
+
+s32
+wl_cfgscan_cancel_listen_on_channel(struct bcm_cfg80211 *cfg, bool notify_user)
+{
+ WL_DBG(("Enter\n"));
+
+ mutex_lock(&cfg->scan_sync);
+ if (!cfg->loc.in_progress) {
+ WL_ERR(("listen not in progress. do nothing\n"));
+ goto exit;
+ }
+
+ if (delayed_work_pending(&cfg->loc.work)) {
+ cancel_delayed_work_sync(&cfg->loc.work);
+ }
+
+ /* abort scan listen */
+ _wl_cfgscan_cancel_scan(cfg);
+
+ if (notify_user) {
+ wl_cfgscan_notify_listen_complete(cfg);
+ }
+ cfg->loc.in_progress = false;
+ cfg->loc.wdev = NULL;
+exit:
+ mutex_unlock(&cfg->scan_sync);
+ return 0;
+}
+
+s32
+wl_cfgscan_listen_on_channel(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel, unsigned int duration)
+{
+ u32 dwell = duration;
+ u32 chanspec, params_size;
+ u16 chanspec_num = 0;
+ s32 bssidx = -1;
+ s32 err = 0;
+ struct net_device *ndev = NULL;
+ u8 *params = NULL;
+ wl_escan_params_t *eparams = NULL;
+ wl_escan_params_v2_t *eparams_v2 = NULL;
+ wl_scan_params_t *scanparams = NULL;
+ wl_scan_params_v2_t *scanparams_v2 = NULL;
+ u16 *chanspec_list = NULL;
+ u32 channel_num = 0, scan_type = 0;
+
+ WL_DBG(("Enter \n"));
+ if (!wdev) {
+ WL_ERR(("wdev null!\n"));
+ return -EINVAL;
+ }
+
+ mutex_lock(&cfg->scan_sync);
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_ERR(("Scanning in progress avoid listen on channel\n"));
+ err = -EBUSY;
+ goto exit;
+ }
+ if (cfg->loc.in_progress == true) {
+ WL_ERR(("Listen in progress\n"));
+ err = -EAGAIN;
+ goto exit;
+ }
+ bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
+ if (bssidx < 0) {
+ WL_ERR(("invalid bssidx!\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Use primary ndev for netless dev. BSSIDX will point to right I/F */
+ ndev = wdev->netdev ? wdev->netdev : bcmcfg_to_prmry_ndev(cfg);
+
+ if (cfg->scan_params_v2) {
+ params_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params));
+ } else {
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ }
+
+ /* Single channel for listen case. Add a padd of u16 for alignment */
+ chanspec_num = 1;
+ params_size += (chanspec_num + 1);
+
+ /* Allocate space for populating single ssid in wl_escan_params_t struct */
+ params_size += ((u32) sizeof(struct wlc_ssid));
+
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ err = -ENOMEM;
+ WL_ERR(("listen fail. no mem.\n"));
+ goto exit;
+ }
+
+ scan_type = WL_SCANFLAGS_PASSIVE | WL_SCANFLAGS_LISTEN;
+
+ wl_init_scan_params(cfg, params, params_size,
+ scan_type, WL_SCAN_ACTION_START, dwell);
+
+ channel_num = (chanspec_num & WL_SCAN_PARAMS_COUNT_MASK);
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t *)params;
+ scanparams_v2 = (wl_scan_params_v2_t *)&eparams_v2->params;
+ chanspec_list = scanparams_v2->channel_list;
+ scanparams_v2->channel_num = channel_num;
+ } else {
+ eparams = (wl_escan_params_t *)params;
+ scanparams = (wl_scan_params_t *)&eparams->params;
+ chanspec_list = scanparams->channel_list;
+ scanparams->channel_num = channel_num;
+ }
+
+ /* Copy the single listen channel */
+ chanspec = wl_freq_to_chanspec(channel->center_freq);
+ chanspec_list[0] = chanspec;
+
+ err = wldev_iovar_setbuf_bsscfg(ndev, "escan", params, params_size,
+ cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ if (err == BCME_EPERM) {
+ /* Scan Not permitted at this point of time */
+ WL_DBG((" listen not permitted at this time (%d)\n", err));
+ } else {
+ WL_ERR((" listen set error (%d)\n", err));
+ }
+ goto exit;
+ } else {
+ unsigned long listen_timeout = dwell + WL_LISTEN_TIMEOUT;
+ WL_DBG(("listen started. chanspec:%x\n", chanspec));
+ cfg->loc.in_progress = true;
+ cfg->loc.wdev = wdev;
+
+ if (schedule_delayed_work(&cfg->loc.work,
+ msecs_to_jiffies(listen_timeout))) {
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_PM_WAKE_LOCK_TIMEOUT(cfg->pub, listen_timeout);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ } else {
+ WL_ERR(("Can't schedule listen work handler\n"));
+ }
+ }
+
+exit:
+ if (params) {
+ MFREE(cfg->osh, params, params_size);
+ }
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+#define LONG_LISTEN_TIME 2000
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+static void
+wl_priortize_scan_over_listen(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, unsigned int duration)
+{
+ WL_DBG(("scan is running. go to fake listen state\n"));
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+
+ WL_DBG(("cancel current listen timer \n"));
+ del_timer_sync(&cfg->p2p->listen_timer);
+
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+ INIT_TIMER(&cfg->p2p->listen_timer,
+ wl_cfgp2p_listen_expired, duration, 0);
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+/* Few vendors use hard coded static ndev p2p0 for p2p disc */
+#define IS_P2P_DISC_NDEV(wdev) \
+ (wdev->netdev ? (strncmp(wdev->netdev->name, "p2p0", strlen("p2p0")) == 0) : false)
+
+s32
+wl_cfgscan_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+ struct ieee80211_channel *channel,
+#if !defined(WL_CFG80211_P2P_DEV_IF)
+ enum nl80211_channel_type channel_type,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ unsigned int duration, u64 *cookie)
+{
+ s32 target_channel;
+ u32 id;
+ s32 err = BCME_OK;
+ struct net_device *ndev = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev *wdev;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ mutex_lock(&cfg->usr_sync);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ wdev = cfgdev;
+#else
+ wdev = ndev_to_wdev(ndev);
+#endif
+ if (!wdev) {
+ WL_ERR(("wdev null\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+
+ WL_DBG(("Enter, channel: %d, duration ms (%d) scan_state:%d\n",
+ target_channel, duration,
+ (wl_get_drv_status(cfg, SCANNING, ndev)) ? TRUE : FALSE));
+
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_LISTENBUSY);
+#endif /* WL_BCNRECV */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if ((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) || IS_P2P_DISC_NDEV(wdev))
+#else
+ if (cfg->p2p)
+#endif
+ {
+ /* p2p discovery */
+ if (!cfg->p2p) {
+ WL_ERR(("cfg->p2p is not initialized\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+#ifdef P2P_LISTEN_OFFLOADING
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
+ err = -EAGAIN;
+ goto exit;
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ if (duration > LONG_LISTEN_TIME) {
+ wl_cfgscan_cancel_scan(cfg);
+ } else {
+ wl_priortize_scan_over_listen(cfg, ndev, duration);
+ err = BCME_OK;
+ goto exit;
+ }
+#else
+ wl_cfgscan_cancel_scan(cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ }
+
+#ifdef WL_CFG80211_SYNC_GON
+ if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+ /* Do not enter listen mode again if we are in listen mode already
+ * for next af. Remain on channel completion will be returned by
+ * af completion.
+ */
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#else
+ wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif
+ goto exit;
+ }
+#endif /* WL_CFG80211_SYNC_GON */
+
+ if (!cfg->p2p->on) {
+ /* In case of p2p_listen command, supplicant may send
+ * remain_on_channel without turning on P2P
+ */
+ p2p_on(cfg) = true;
+ }
+
+ err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+ if (unlikely(err)) {
+ goto exit;
+ }
+ err = wl_cfgp2p_discover_listen(cfg, target_channel, duration);
+ if (err == BCME_OK) {
+ wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+ } else {
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ if (err == BCME_BUSY) {
+ /* if failed, firmware may be internal scanning state.
+ * so other scan request shall not abort it
+ */
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+ /* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
+ * and expire timer will send a completion to the upper layer
+ */
+ err = BCME_OK;
+ }
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+ }
+ } else if (wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_AP) {
+ WL_DBG(("LISTEN ON CHANNEL\n"));
+ err = wl_cfgscan_listen_on_channel(cfg, wdev, channel, duration);
+ }
+
+exit:
+ if (err == BCME_OK) {
+ WL_DBG(("Success\n"));
+ (void)memcpy_s(&cfg->remain_on_chan, sizeof(struct ieee80211_channel),
+ channel, sizeof(struct ieee80211_channel));
+#if defined(WL_ENABLE_P2P_IF)
+ cfg->remain_on_chan_type = channel_type;
+#endif /* WL_ENABLE_P2P_IF */
+ id = ++cfg->last_roc_id;
+ if (id == 0) {
+ id = ++cfg->last_roc_id;
+ }
+ *cookie = id;
+
+ /* Notify userspace that listen has started */
+ CFG80211_READY_ON_CHANNEL(cfgdev, *cookie, channel, channel_type, duration, flags);
+ WL_INFORM_MEM(("listen started on channel:%d duration (ms):%d cookie:%llu\n",
+ target_channel, duration, *cookie));
+ } else {
+ WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+
+s32
+wl_cfgscan_cancel_remain_on_channel(struct wiphy *wiphy,
+ bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef P2PLISTEN_AP_SAMECHN
+ struct net_device *dev;
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef DHD_IFDEBUG
+ PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+ mutex_lock(&cfg->usr_sync);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ WL_DBG(("cancel listen for iftype:%d\n", cfgdev->iftype));
+ if ((cfgdev->iftype != NL80211_IFTYPE_P2P_DEVICE) &&
+ !IS_P2P_DISC_NDEV(cfgdev)) {
+ /* Handle non-p2p cases here */
+ err = wl_cfgscan_cancel_listen_on_channel(cfg, false);
+ goto exit;
+ }
+#else
+ WL_DBG(("cancel listen for netdev_ifidx: %d \n", cfgdev->ifindex));
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#ifdef P2PLISTEN_AP_SAMECHN
+ if (cfg && cfg->p2p_resp_apchn_status) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+ wl_cfg80211_set_p2p_resp_ap_chn(dev, 0);
+ cfg->p2p_resp_apchn_status = false;
+ WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
+ }
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+ if (cfg->last_roc_id == cookie) {
+ WL_DBG(("cancel p2p listen. cookie:%llu\n", cookie));
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ } else {
+ WL_ERR(("wl_cfg80211_cancel_remain_on_channel: ignore, request cookie(%llu)"
+ " is not matched. (cur : %llu)\n",
+ cookie, cfg->last_roc_id));
+ }
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+exit:
+#endif
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+
+#ifdef WL_GET_RCC
+int
+wl_android_get_roam_scan_chanlist(struct bcm_cfg80211 *cfg)
+{
+ s32 err = BCME_OK;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct net_device *ndev;
+ struct wiphy *wiphy;
+ wlc_ssid_t *ssid = NULL;
+ wl_roam_channel_list_t channel_list;
+ uint16 channels[MAX_ROAM_CHANNEL] = {0};
+ int i = 0;
+
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wiphy = bcmcfg_to_wiphy(cfg);
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev),
+ BRCM_VENDOR_GET_RCC_EVENT_BUF_LEN, BRCM_VENDOR_EVENT_RCC_INFO, kflags);
+
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return BCME_NOMEM;
+ }
+
+ /* Get Current SSID */
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ if (!ssid) {
+ WL_ERR(("No SSID found in the saved profile\n"));
+ err = BCME_ERROR;
+ goto fail;
+ }
+
+ /* Get Current RCC List */
+ err = wldev_iovar_getbuf(ndev, "roamscan_channels", 0, 0,
+ (void *)&channel_list, sizeof(channel_list), NULL);
+ if (err) {
+ WL_ERR(("Failed to get roamscan channels, err = %d\n", err));
+ goto fail;
+ }
+ if (channel_list.n > MAX_ROAM_CHANNEL) {
+ WL_ERR(("Invalid roamscan channels count(%d)\n", channel_list.n));
+ goto fail;
+ }
+
+ WL_DBG(("SSID %s(%d), RCC(%d)\n", ssid->SSID, ssid->SSID_len, channel_list.n));
+ for (i = 0; i < channel_list.n; i++) {
+ channels[i] = CHSPEC_CHANNEL(channel_list.channels[i]);
+ WL_DBG(("Chanspec[%d] CH:%03d(0x%04x)\n",
+ i, channels[i], channel_list.channels[i]));
+ }
+
+ err = nla_put_string(skb, RCC_ATTRIBUTE_SSID, ssid->SSID);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_string RCC_ATTRIBUTE_SSID failed\n"));
+ goto fail;
+ }
+
+ err = nla_put_u32(skb, RCC_ATTRIBUTE_SSID_LEN, ssid->SSID_len);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u32 RCC_ATTRIBUTE_SSID_LEN failed\n"));
+ goto fail;
+ }
+
+ err = nla_put_u32(skb, RCC_ATTRIBUTE_NUM_CHANNELS, channel_list.n);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u32 RCC_ATTRIBUTE_NUM_CHANNELS failed\n"));
+ goto fail;
+ }
+
+ err = nla_put(skb, RCC_ATTRIBUTE_CHANNEL_LIST,
+ sizeof(uint16) * MAX_ROAM_CHANNEL, channels);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put RCC_ATTRIBUTE_CHANNEL_LIST failed\n"));
+ goto fail;
+ }
+
+ cfg80211_vendor_event(skb, kflags);
+
+ return err;
+
+fail:
+ if (skb) {
+ nlmsg_free(skb);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+ return err;
+}
+#endif /* WL_GET_RCC */
+
+/*
+ * This function prepares assoc channel/s
+ */
+s32
+wl_get_assoc_channels(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, wlcfg_assoc_info_t *info)
+{
+#ifdef ESCAN_CHANNEL_CACHE
+ s32 err;
+ u32 max_channels = MAX_ROAM_CHANNEL;
+ u16 rcc_chan_cnt = 0;
+
+ /*
+ * If bcast join 'OR' no channel information is provided by user space,
+ * then use channels from ESCAN_CHANNEL_CACHE. For other cases where target
+ * channel is available, update RCC via iovar.
+ *
+ * For a given SSID there might multiple APs on different channels and FW
+ * would scan all those channels before deciding up on the AP.
+ */
+ if (cfg->rcc_enabled) {
+ wlc_ssid_t ssid;
+ int band;
+ chanspec_t chanspecs[MAX_ROAM_CHANNEL] = {0};
+ chanspec_t target_chspec;
+
+ err = wldev_get_band(dev, &band);
+ if (!err) {
+ set_roam_band(band);
+ }
+
+ if (memcpy_s(ssid.SSID, sizeof(ssid.SSID), info->ssid, info->ssid_len) != BCME_OK) {
+ WL_ERR(("ssid copy failed\n"));
+ return -EINVAL;
+ }
+ ssid.SSID_len = (uint32)info->ssid_len;
+
+ if (info->targeted_join && info->chanspecs[0]) {
+ target_chspec = info->chanspecs[0];
+ } else {
+ target_chspec = INVCHANSPEC;
+ }
+ rcc_chan_cnt = get_roam_channel_list(cfg, target_chspec, chanspecs,
+ max_channels, &ssid, ioctl_version);
+ if ((!info->targeted_join) || (info->bssid_hint) ||
+ (info->chan_cnt == 0)) {
+#if !defined(DISABLE_FW_NW_SEL_FOR_6G) && defined(WL_6G_BAND)
+ int i;
+ /* If 6G AP is present, override bssid_hint with our fw nw
+ * selection. Supplicant bssid_hint logic doesn't have support for
+ * 6G, HE, OCE load IE support
+ */
+ for (i = 0; i < rcc_chan_cnt; i++) {
+ if (CHSPEC_IS6G(chanspecs[i])) {
+ WL_INFORM_MEM(("6G channel in rcc. use fw nw sel\n"));
+ /* skip bssid hint inclusion and provide bcast bssid */
+ info->bssid_hint = false;
+ (void)memcpy_s(&info->bssid,
+ ETH_ALEN, &ether_bcast, ETH_ALEN);
+ break;
+ }
+ }
+#endif /* !DISABLE_FW_NW_SEL_FOR_6G && WL_6G_BAND */
+ /* Use RCC channels as part of join params */
+ info->chan_cnt = rcc_chan_cnt;
+ if (memcpy_s(info->chanspecs, sizeof(info->chanspecs), chanspecs,
+ (sizeof(chanspec_t) * rcc_chan_cnt)) != BCME_OK) {
+ WL_ERR(("chanspec copy failed!\n"));
+ return -EINVAL;
+ }
+ }
+ }
+#endif /* ESCAN_CHANNEL_CACHE */
+
+ WL_DBG_MEM(("channel cnt:%d\n", info->chan_cnt));
+ return BCME_OK;
+}
+
+#ifdef DHD_GET_VALID_CHANNELS
+bool
+wl_cfgscan_is_dfs_set(wifi_band band)
+{
+ switch (band) {
+ case WIFI_BAND_A_DFS:
+ case WIFI_BAND_A_WITH_DFS:
+ case WIFI_BAND_ABG_WITH_DFS:
+ case WIFI_BAND_24GHZ_5GHZ_WITH_DFS_6GHZ:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+s32
+wl_cfgscan_get_band_freq_list(struct bcm_cfg80211 *cfg, int band,
+ uint16 *list, uint32 *num_channels)
+{
+ s32 err = BCME_OK;
+ uint32 i, freq, list_count, count = 0;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ uint32 chspec, chaninfo;
+ bool dfs_set = false;
+
+ dfs_set = wl_cfgscan_is_dfs_set(band);
+ err = wldev_iovar_getbuf_bsscfg(dev, "chan_info_list", NULL,
+ 0, list, CHANINFO_LIST_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+ if (err == BCME_UNSUPPORTED) {
+ WL_INFORM(("get chan_info_list, UNSUPPORTED\n"));
+ return err;
+ } else if (err != BCME_OK) {
+ WL_ERR(("get chan_info_list err(%d)\n", err));
+ return err;
+ }
+
+ list_count = ((wl_chanspec_list_v1_t *)list)->count;
+ for (i = 0; i < list_count; i++) {
+ chspec = dtoh32(((wl_chanspec_list_v1_t *)list)->chspecs[i].chanspec);
+ chaninfo = dtoh32(((wl_chanspec_list_v1_t *)list)->chspecs[i].chaninfo);
+ freq = wl_channel_to_frequency(wf_chspec_ctlchan(chspec),
+ CHSPEC_BAND(chspec));
+ if (((band & WIFI_BAND_BG) && CHSPEC_IS2G(chspec)) ||
+ ((band & WIFI_BAND_6GHZ) && CHSPEC_IS6G(chspec))) {
+ /* add 2g/6g channels */
+ list[i] = freq;
+ count++;
+ }
+ /* handle 5g separately */
+ if (CHSPEC_IS5G(chspec)) {
+ if (!((band == WIFI_BAND_A_DFS) && IS_DFS(chaninfo)) &&
+ !(band & WIFI_BAND_A)) {
+ /* Not DFS only case nor 5G case */
+ continue;
+ }
+
+ if ((band & WIFI_BAND_A) && !dfs_set && IS_DFS(chaninfo)) {
+ continue;
+ }
+
+ list[i] = freq;
+ count++;
+ }
+ }
+ *num_channels = count;
+ return err;
+}
+#endif /* DHD_GET_VALID_CHANNELS */
diff --git a/bcmdhd.101.10.361.x/wl_cfgscan.h b/bcmdhd.101.10.361.x/wl_cfgscan.h
new file mode 100755
index 0000000..7488332
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgscan.h
@@ -0,0 +1,178 @@
+/*
+ * Header for Linux cfg80211 scan
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wl_cfgscan_h_
+#define _wl_cfgscan_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+#include <osl.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define GET_SCAN_WDEV(scan_request) \
+ (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
+#else
+#define GET_SCAN_WDEV(scan_request) \
+ scan_request ? scan_request->wdev : NULL;
+#endif
+#ifdef WL_SCHED_SCAN
+#define GET_SCHED_SCAN_WDEV(scan_request) \
+ (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
+#endif /* WL_SCHED_SCAN */
+
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+#define wl_escan_set_sync_id(a, b) ((a) = (b)->escan_info.cur_sync_id)
+#define wl_escan_set_type(a, b) ((a)->escan_info.escan_type\
+ [((a)->escan_info.cur_sync_id)%SCAN_BUF_CNT] = (b))
+#else
+#define wl_escan_set_sync_id(a, b) ((a) = htod16((b)->escan_sync_id_cntr++))
+#define wl_escan_set_type(a, b)
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+extern s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy,
+ struct net_device *ndev, struct cfg80211_scan_request *request);
+extern s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(OEM_ANDROID) && defined(DHCP_SCAN_SUPPRESS)
+extern void wl_cfg80211_work_handler(struct work_struct *work);
+extern void wl_cfg80211_scan_supp_timerfunc(ulong data);
+#endif /* DHCP_SCAN_SUPPRESS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+extern void wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+extern s32 wl_init_scan(struct bcm_cfg80211 *cfg);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+extern s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
+#ifdef PNO_SUPPORT
+extern s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+extern s32 wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
+
+#ifdef WES_SUPPORT
+#ifdef CUSTOMER_SCAN_TIMEOUT_SETTING
+#define CUSTOMER_WL_SCAN_TIMER_INTERVAL_MS 25000 /* Scan timeout */
+enum wl_custom_scan_time_type {
+ WL_CUSTOM_SCAN_CHANNEL_TIME = 0,
+ WL_CUSTOM_SCAN_UNASSOC_TIME,
+ WL_CUSTOM_SCAN_PASSIVE_TIME,
+ WL_CUSTOM_SCAN_HOME_TIME,
+ WL_CUSTOM_SCAN_HOME_AWAY_TIME
+};
+extern s32 wl_cfg80211_custom_scan_time(struct net_device *dev,
+ enum wl_custom_scan_time_type type, int time);
+#endif /* CUSTOMER_SCAN_TIMEOUT_SETTING */
+#endif /* WES_SUPPORT */
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable);
+int wl_cfg80211_random_mac_enable(struct net_device *dev);
+int wl_cfg80211_random_mac_disable(struct net_device *dev);
+int wl_cfg80211_scan_mac_enable(struct net_device *dev);
+int wl_cfg80211_scan_mac_disable(struct net_device *dev);
+int wl_cfg80211_scan_mac_config(struct net_device *dev, uint8 *rand_mac, uint8 *rand_mask);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef WL_SCHED_SCAN
+extern int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_sched_scan_request *request);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 11, 0))
+extern int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid);
+#else
+extern int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif /* LINUX_VER > 4.11 */
+#endif /* WL_SCHED_SCAN */
+extern s32 wl_cfgscan_listen_on_channel(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel, unsigned int duration);
+extern void wl_cfgscan_listen_complete_work(struct work_struct *work);
+extern s32 wl_cfgscan_notify_listen_complete(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfgscan_cancel_listen_on_channel(struct bcm_cfg80211 *cfg, bool notify_user);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern s32 wl_cfgscan_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+ struct ieee80211_channel *channel, unsigned int duration, u64 *cookie);
+#else
+extern s32 wl_cfgscan_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+ struct ieee80211_channel *channel, enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+extern s32 wl_cfgscan_cancel_remain_on_channel(struct wiphy *wiphy,
+ bcm_struct_cfgdev *cfgdev, u64 cookie);
+extern chanspec_t wl_freq_to_chanspec(int freq);
+extern s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid);
+#ifdef WL_GET_RCC
+extern int wl_android_get_roam_scan_chanlist(struct bcm_cfg80211 *cfg);
+#endif /* WL_GET_RCC */
+extern s32 wl_get_assoc_channels(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, wlcfg_assoc_info_t *info);
+extern void wl_cfgscan_cancel_scan(struct bcm_cfg80211 *cfg);
+extern void wl_cfgscan_scan_abort(struct bcm_cfg80211 *cfg);
+#ifdef DHD_GET_VALID_CHANNELS
+typedef enum {
+ WIFI_BAND_UNSPECIFIED,
+ /* 2.4 GHz */
+ WIFI_BAND_BG = 1,
+ /* 5 GHz without DFS */
+ WIFI_BAND_A = 2,
+ /* 5 GHz DFS only */
+ WIFI_BAND_A_DFS = 4,
+ /* 5 GHz with DFS */
+ WIFI_BAND_A_WITH_DFS = 6,
+ /* 2.4 GHz + 5 GHz; no DFS */
+ WIFI_BAND_ABG = 3,
+ /* 2.4 GHz + 5 GHz with DFS */
+ WIFI_BAND_ABG_WITH_DFS = 7,
+ /* 6GHz */
+ WIFI_BAND_6GHZ = 8,
+ /* 5 GHz no DFS + 6 GHz */
+ WIFI_BAND_5GHZ_6GHZ = 10,
+ /* 2.4 GHz + 5 GHz no DFS + 6 GHz */
+ WIFI_AND_24GHZ_5GHZ_6GHZ = 11,
+ /* 2.4 GHz + 5 GHz with DFS + 6 GHz */
+ WIFI_BAND_24GHZ_5GHZ_WITH_DFS_6GHZ = 15
+} wifi_band;
+
+extern bool wl_cfgscan_is_dfs_set(wifi_band band);
+extern s32 wl_cfgscan_get_band_freq_list(struct bcm_cfg80211 *cfg, int band,
+ uint16 *list, uint32 *num_channels);
+#endif /* DHD_GET_VALID_CHANNELS */
+#endif /* _wl_cfgscan_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_cfgvendor.c b/bcmdhd.101.10.361.x/wl_cfgvendor.c
new file mode 100755
index 0000000..f6672a9
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgvendor.c
@@ -0,0 +1,10061 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include "wifi_stats.h"
+#include <dhd.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+#include <dhd_cfg80211.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#endif /* defined(BCMDONGLEHOST) */
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
+#ifdef OEM_ANDROID
+#include <wl_android.h>
+#endif /* OEM_ANDROID */
+
+#include <wl_cfgvendor.h>
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#include <brcm_nl80211.h>
+
+char*
+wl_get_kernel_timestamp(void)
+{
+ static char buf[32];
+ u64 ts_nsec;
+ unsigned long rem_nsec;
+
+ ts_nsec = local_clock();
+ rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
+ snprintf(buf, sizeof(buf), "%5lu.%06lu",
+ (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
+
+ return buf;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+#if defined(WL_SUPP_EVENT)
+int
+wl_cfgvendor_send_supp_eventstring(const char *func_name, const char *fmt, ...)
+{
+ char buf[SUPP_LOG_LEN] = {0};
+ struct bcm_cfg80211 *cfg;
+ struct wiphy *wiphy;
+ va_list args;
+ int len;
+ int prefix_len;
+ int rem_len;
+
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_DBG(("supp evt invalid arg\n"));
+ return BCME_OK;
+ }
+
+ wiphy = cfg->wdev->wiphy;
+ prefix_len = snprintf(buf, SUPP_LOG_LEN, "[DHD]<%s> %s: ",
+ wl_get_kernel_timestamp(), __func__);
+ /* Remaining buffer len */
+ rem_len = SUPP_LOG_LEN - (prefix_len + 1);
+ /* Print the arg list on to the remaining part of the buffer */
+ va_start(args, fmt);
+ len = vsnprintf((buf + prefix_len), rem_len, fmt, args);
+ va_end(args);
+ if (len < 0) {
+ return -EINVAL;
+ }
+
+ if (len > rem_len) {
+ /* If return length is greater than buffer len,
+ * then its truncated buffer case.
+ */
+ len = rem_len;
+ }
+
+ /* Ensure the buffer is null terminated */
+ len += prefix_len;
+ buf[len] = '\0';
+ len++;
+
+ return wl_cfgvendor_send_async_event(wiphy,
+ bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_PRIV_STR, buf, len);
+}
+
+int
+wl_cfgvendor_notify_supp_event_str(const char *evt_name, const char *fmt, ...)
+{
+ char buf[SUPP_LOG_LEN] = {0};
+ struct bcm_cfg80211 *cfg;
+ struct wiphy *wiphy;
+ va_list args;
+ int len;
+ int prefix_len;
+ int rem_len;
+
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_DBG(("supp evt invalid arg\n"));
+ return BCME_OK;
+ }
+ wiphy = cfg->wdev->wiphy;
+ prefix_len = snprintf(buf, SUPP_LOG_LEN, "%s ", evt_name);
+ /* Remaining buffer len */
+ rem_len = SUPP_LOG_LEN - (prefix_len + 1);
+ /* Print the arg list on to the remaining part of the buffer */
+ va_start(args, fmt);
+ len = vsnprintf((buf + prefix_len), rem_len, fmt, args);
+ va_end(args);
+ if (len < 0) {
+ return -EINVAL;
+ }
+
+ if (len > rem_len) {
+ /* If return length is greater than buffer len,
+ * then its truncated buffer case.
+ */
+ len = rem_len;
+ }
+
+ /* Ensure the buffer is null terminated */
+ len += prefix_len;
+ buf[len] = '\0';
+ len++;
+
+ return wl_cfgvendor_send_async_event(wiphy,
+ bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_PRIV_STR, buf, len);
+}
+#endif /* WL_SUPP_EVENT */
+
+/*
+ * This API is to be used for asynchronous vendor events. This
+ * shouldn't be used in response to a vendor command from its
+ * do_it handler context (instead wl_cfgvendor_send_cmd_reply should
+ * be used).
+ */
+int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+ struct net_device *dev, int event_id, const void *data, int len)
+{
+ gfp_t kflags;
+ struct sk_buff *skb;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ /* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(dev), len, event_id, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ /* Push the data to the skb */
+ nla_put_nohdr(skb, len, data);
+
+ cfg80211_vendor_event(skb, kflags);
+
+ return 0;
+}
+
+static int
+wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy,
+ const void *data, int len)
+{
+ struct sk_buff *skb;
+ int err;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Push the data to the skb */
+ nla_put_nohdr(skb, len, data);
+ err = cfg80211_vendor_cmd_reply(skb);
+exit:
+ WL_DBG(("status %d\n", err));
+ return err;
+}
+
+static int
+wl_cfgvendor_get_feature_set(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int reply;
+
+ reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
+
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &reply, sizeof(int));
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+ return err;
+}
+
+static int
+wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct sk_buff *skb;
+ int reply;
+ int mem_needed, i;
+
+ mem_needed = VENDOR_REPLY_OVERHEAD +
+ (ATTRIBUTE_U32_LEN * MAX_FEATURE_SET_CONCURRRENT_GROUPS) + ATTRIBUTE_U32_LEN;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+ MAX_FEATURE_SET_CONCURRRENT_GROUPS);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto exit;
+ }
+ for (i = 0; i < MAX_FEATURE_SET_CONCURRRENT_GROUPS; i++) {
+ reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), i);
+ if (reply != WIFI_FEATURE_INVALID) {
+ err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+ reply);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto exit;
+ }
+ }
+ }
+
+ err = cfg80211_vendor_cmd_reply(skb);
+
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+exit:
+ return err;
+}
+
+static int
+wl_cfgvendor_set_rand_mac_oui(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = -EINVAL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ goto exit;
+ }
+
+ type = nla_type(data);
+
+ if (type == ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI) {
+ if (nla_len(data) != DOT11_OUI_LEN) {
+ WL_ERR(("nla_len not matched.\n"));
+ goto exit;
+ }
+ err = dhd_dev_cfg_rand_mac_oui(bcmcfg_to_prmry_ndev(cfg), nla_data(data));
+
+ if (unlikely(err))
+ WL_ERR(("Bad OUI, could not set:%d \n", err));
+ }
+exit:
+ return err;
+}
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+static int
+wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = -EINVAL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ u32 nodfs;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
+ nodfs = nla_get_u32(data);
+ err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
+ }
+
+ return err;
+}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
+static int
+wl_cfgvendor_set_country(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_ERROR, rem, type;
+ char country_code[WLC_CNTRY_BUF_SZ] = {0};
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case ANDR_WIFI_ATTRIBUTE_COUNTRY:
+ err = memcpy_s(country_code, WLC_CNTRY_BUF_SZ,
+ nla_data(iter), nla_len(iter));
+ if (err) {
+ WL_ERR(("Failed to copy country code: %d\n", err));
+ return err;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ return err;
+ }
+ }
+ /* country code is unique for dongle..hence using primary interface. */
+ err = wl_cfg80211_set_country_code(primary_ndev, country_code, true, true, 0);
+ if (err < 0) {
+ WL_ERR(("Set country failed ret:%d\n", err));
+ }
+
+ return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int
+wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event)
+{
+ gfp_t kflags;
+ const void *ptr;
+ struct sk_buff *skb;
+ int malloc_len, total, iter_cnt_to_send, cnt;
+ gscan_results_cache_t *cache = (gscan_results_cache_t *)data;
+
+ total = len/sizeof(wifi_gscan_result_t);
+ while (total > 0) {
+ malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD;
+ if (malloc_len > NLMSG_DEFAULT_SIZE) {
+ malloc_len = NLMSG_DEFAULT_SIZE;
+ }
+ iter_cnt_to_send =
+ (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t);
+ total = total - iter_cnt_to_send;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ /* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(dev),
+ malloc_len, event, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ while (cache && iter_cnt_to_send) {
+ ptr = (const void *) &cache->results[cache->tot_consumed];
+
+ if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed)) {
+ cnt = iter_cnt_to_send;
+ } else {
+ cnt = (cache->tot_count - cache->tot_consumed);
+ }
+
+ iter_cnt_to_send -= cnt;
+ cache->tot_consumed += cnt;
+ /* Push the data to the skb */
+ nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr);
+ if (cache->tot_consumed == cache->tot_count) {
+ cache = cache->next;
+ }
+
+ }
+
+ cfg80211_vendor_event(skb, kflags);
+ }
+
+ return 0;
+}
+
+static int
+wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pno_gscan_capabilities_t *reply = NULL;
+ uint32 reply_len = 0;
+
+ reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GET_CAPABILITIES, NULL, &reply_len);
+ if (!reply) {
+ WL_ERR(("Could not get capabilities\n"));
+ err = -EINVAL;
+ return err;
+ }
+
+ err = wl_cfgvendor_send_cmd_reply(wiphy, reply, reply_len);
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+
+ MFREE(cfg->osh, reply, reply_len);
+ return err;
+}
+
+static int
+wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_results_cache_t *results, *iter;
+ uint32 reply_len, is_done = 1;
+ int32 mem_needed, num_results_iter;
+ wifi_gscan_result_t *ptr;
+ uint16 num_scan_ids, num_results;
+ struct sk_buff *skb;
+ struct nlattr *scan_hdr, *complete_flag;
+
+ err = dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg));
+ if (err != BCME_OK)
+ return -EBUSY;
+
+ err = dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ if (err != BCME_OK) {
+ WL_ERR(("Can't obtain lock to access batch results %d\n", err));
+ return -EBUSY;
+ }
+ results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);
+
+ if (!results) {
+ WL_ERR(("No results to send %d\n", err));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, results, 0);
+
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return err;
+ }
+ num_scan_ids = reply_len & 0xFFFF;
+ num_results = (reply_len & 0xFFFF0000) >> 16;
+ mem_needed = (num_results * sizeof(wifi_gscan_result_t)) +
+ (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) +
+ VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN;
+
+ if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) {
+ mem_needed = (int32)NLMSG_DEFAULT_SIZE;
+ }
+
+ WL_TRACE(("is_done %d mem_needed %d max_mem %d\n", is_done, mem_needed,
+ (int)NLMSG_DEFAULT_SIZE));
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return -ENOMEM;
+ }
+ iter = results;
+ complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
+ sizeof(is_done));
+
+ if (unlikely(!complete_flag)) {
+ WL_ERR(("complete_flag could not be reserved"));
+ kfree_skb(skb);
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return -ENOMEM;
+ }
+ mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
+
+ while (iter) {
+ num_results_iter = (mem_needed - (int32)GSCAN_BATCH_RESULT_HDR_LEN);
+ num_results_iter /= (int32)sizeof(wifi_gscan_result_t);
+ if (num_results_iter <= 0 ||
+ ((iter->tot_count - iter->tot_consumed) > num_results_iter)) {
+ break;
+ }
+ scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS);
+ /* no more room? we are done then (for now) */
+ if (scan_hdr == NULL) {
+ is_done = 0;
+ break;
+ }
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ err = nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ num_results_iter = iter->tot_count - iter->tot_consumed;
+
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ if (num_results_iter) {
+ ptr = &iter->results[iter->tot_consumed];
+ err = nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+ num_results_iter * sizeof(wifi_gscan_result_t), ptr);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ iter->tot_consumed += num_results_iter;
+ }
+ nla_nest_end(skb, scan_hdr);
+ mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
+ (num_results_iter * sizeof(wifi_gscan_result_t));
+ iter = iter->next;
+ }
+ /* Cleans up consumed results and returns TRUE if all results are consumed */
+ is_done = dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+ memcpy(nla_data(complete_flag), &is_done, sizeof(is_done));
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return cfg80211_vendor_cmd_reply(skb);
+fail:
+ /* Free up consumed results which will now not be sent */
+ (void)dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+ kfree_skb(skb);
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return err;
+}
+
+static int
+wl_cfgvendor_initiate_gscan(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type, tmp = len;
+ int run = 0xFF;
+ int flush = 0;
+ const struct nlattr *iter;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE)
+ run = nla_get_u32(iter);
+ else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE)
+ flush = nla_get_u32(iter);
+ }
+
+ if (run != 0xFF) {
+ err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush);
+
+ if (unlikely(err)) {
+ WL_ERR(("Could not run gscan:%d \n", err));
+ }
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+}
+
+static int
+wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ bool real_time = FALSE;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
+ type = nla_type(data);
+
+ if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
+ real_time = nla_get_u32(data);
+
+ err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time);
+
+ if (unlikely(err)) {
+ WL_ERR(("Could not run gscan:%d \n", err));
+ }
+
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int
+wl_cfgvendor_set_scan_cfg_bucket(const struct nlattr *prev,
+ gscan_scan_params_t *scan_param, int num)
+{
+ struct dhd_pno_gscan_channel_bucket *ch_bucket;
+ int k = 0;
+ int type, err = 0, rem;
+ const struct nlattr *cur, *next;
+
+ nla_for_each_nested(cur, prev, rem) {
+ type = nla_type(cur);
+ ch_bucket = scan_param->channel_bucket;
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BUCKET_ID:
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_PERIOD:
+ if (nla_len(cur) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ch_bucket[num].bucket_freq_multiple =
+ nla_get_u32(cur) / MSEC_PER_SEC;
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS:
+ if (nla_len(cur) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ ch_bucket[num].num_channels = nla_get_u32(cur);
+ if (ch_bucket[num].num_channels >
+ GSCAN_MAX_CHANNELS_IN_BUCKET) {
+ WL_ERR(("channel range:%d,bucket:%d\n",
+ ch_bucket[num].num_channels,
+ num));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_CHANNELS:
+ nla_for_each_nested(next, cur, rem) {
+ if (k >= GSCAN_MAX_CHANNELS_IN_BUCKET)
+ break;
+ if (nla_len(next) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ ch_bucket[num].chan_list[k] = nla_get_u32(next);
+ k++;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_BUCKETS_BAND:
+ if (nla_len(cur) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ ch_bucket[num].band = (uint16)nla_get_u32(cur);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_EVENTS:
+ if (nla_len(cur) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ ch_bucket[num].report_flag = (uint8)nla_get_u32(cur);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_STEP_COUNT:
+ if (nla_len(cur) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ ch_bucket[num].repeat = (uint16)nla_get_u32(cur);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_MAX_PERIOD:
+ if (nla_len(cur) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ ch_bucket[num].bucket_max_multiple =
+ nla_get_u32(cur) / MSEC_PER_SEC;
+ break;
+ default:
+ WL_ERR(("unknown attr type:%d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_scan_params_t *scan_param;
+ int j = 0;
+ int type, tmp;
+ const struct nlattr *iter;
+
+ scan_param = (gscan_scan_params_t *)MALLOCZ(cfg->osh,
+ sizeof(gscan_scan_params_t));
+ if (!scan_param) {
+ WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n"));
+ err = -EINVAL;
+ return err;
+
+ }
+
+ scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC;
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+
+ if (j >= GSCAN_MAX_CH_BUCKETS) {
+ break;
+ }
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BASE_PERIOD:
+ if (nla_len(iter) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ scan_param->scan_fr = nla_get_u32(iter) / MSEC_PER_SEC;
+ break;
+ case GSCAN_ATTRIBUTE_NUM_BUCKETS:
+ if (nla_len(iter) != sizeof(uint32)) {
+ err = -EINVAL;
+ goto exit;
+ }
+ scan_param->nchannel_buckets = nla_get_u32(iter);
+ if (scan_param->nchannel_buckets >=
+ GSCAN_MAX_CH_BUCKETS) {
+ WL_ERR(("ncha_buck out of range %d\n",
+ scan_param->nchannel_buckets));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_CH_BUCKET_1:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_2:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_3:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_4:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_5:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_6:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_7:
+ err = wl_cfgvendor_set_scan_cfg_bucket(iter, scan_param, j);
+ if (err < 0) {
+ WL_ERR(("set_scan_cfg_buck error:%d\n", err));
+ goto exit;
+ }
+ j++;
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+
+ err = dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_SCAN_CFG_ID, scan_param, FALSE);
+
+ if (err < 0) {
+ WL_ERR(("Could not set GSCAN scan cfg\n"));
+ err = -EINVAL;
+ }
+
+exit:
+ MFREE(cfg->osh, scan_param, sizeof(gscan_scan_params_t));
+ return err;
+
+}
+
+static int
+wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_hotlist_scan_params_t *hotlist_params;
+ int tmp, tmp1, tmp2, type, j = 0, dummy;
+ const struct nlattr *outer, *inner = NULL, *iter;
+ bool flush = FALSE;
+ struct bssid_t *pbssid;
+
+ BCM_REFERENCE(dummy);
+
+ if (len < sizeof(*hotlist_params) || len >= WLC_IOCTL_MAXLEN) {
+ WL_ERR(("buffer length :%d wrong - bail out.\n", len));
+ return -EINVAL;
+ }
+
+ hotlist_params = (gscan_hotlist_scan_params_t *)MALLOCZ(cfg->osh,
+ sizeof(*hotlist_params)
+ + (sizeof(struct bssid_t) * (PFN_SWC_MAX_NUM_APS - 1)));
+
+ if (!hotlist_params) {
+ WL_ERR(("Cannot Malloc memory.\n"));
+ return -ENOMEM;
+ }
+
+ hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT;
+
+ nla_for_each_attr(iter, data, len, tmp2) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_HOTLIST_BSSID_COUNT:
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_DBG(("type:%d length:%d not matching.\n",
+ type, nla_len(iter)));
+ err = -EINVAL;
+ goto exit;
+ }
+ hotlist_params->nbssid = (uint16)nla_get_u32(iter);
+ if ((hotlist_params->nbssid == 0) ||
+ (hotlist_params->nbssid > PFN_SWC_MAX_NUM_APS)) {
+ WL_ERR(("nbssid:%d exceed limit.\n",
+ hotlist_params->nbssid));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS:
+ if (hotlist_params->nbssid == 0) {
+ WL_ERR(("nbssid not retrieved.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ pbssid = hotlist_params->bssid;
+ nla_for_each_nested(outer, iter, tmp) {
+ if (j >= hotlist_params->nbssid)
+ break;
+ nla_for_each_nested(inner, outer, tmp1) {
+ type = nla_type(inner);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BSSID:
+ if (nla_len(inner) != sizeof(pbssid[j].macaddr)) {
+ WL_ERR(("type:%d length:%d not matching.\n",
+ type, nla_len(inner)));
+ err = -EINVAL;
+ goto exit;
+ }
+ memcpy(
+ &(pbssid[j].macaddr),
+ nla_data(inner),
+ sizeof(pbssid[j].macaddr));
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_LOW:
+ if (nla_len(inner) != sizeof(uint8)) {
+ WL_ERR(("type:%d length:%d not matching.\n",
+ type, nla_len(inner)));
+ err = -EINVAL;
+ goto exit;
+ }
+ pbssid[j].rssi_reporting_threshold =
+ (int8)nla_get_u8(inner);
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_HIGH:
+ if (nla_len(inner) != sizeof(uint8)) {
+ WL_ERR(("type:%d length:%d not matching.\n",
+ type, nla_len(inner)));
+ err = -EINVAL;
+ goto exit;
+ }
+ dummy = (int8)nla_get_u8(inner);
+ WL_DBG(("dummy %d\n", dummy));
+ break;
+ default:
+ WL_ERR(("ATTR unknown %d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+ j++;
+ }
+ if (j != hotlist_params->nbssid) {
+ WL_ERR(("bssid_cnt:%d != nbssid:%d.\n", j,
+ hotlist_params->nbssid));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_HOTLIST_FLUSH:
+ if (nla_len(iter) != sizeof(uint8)) {
+ WL_ERR(("type:%d length:%d not matching.\n",
+ type, nla_len(iter)));
+ err = -EINVAL;
+ goto exit;
+ }
+ flush = nla_get_u8(iter);
+ break;
+ case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("type:%d length:%d not matching.\n",
+ type, nla_len(iter)));
+ err = -EINVAL;
+ goto exit;
+ }
+ hotlist_params->lost_ap_window = (uint16)nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ }
+
+ if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GEOFENCE_SCAN_CFG_ID, hotlist_params, flush) < 0) {
+ WL_ERR(("Could not set GSCAN HOTLIST cfg error: %d\n", err));
+ err = -EINVAL;
+ goto exit;
+ }
+exit:
+ MFREE(cfg->osh, hotlist_params, sizeof(*hotlist_params)
+ + (sizeof(struct bssid_t) * (PFN_SWC_MAX_NUM_APS - 1)));
+ return err;
+}
+
+static int wl_cfgvendor_epno_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pno_ssid_t *ssid_elem = NULL;
+ int tmp, tmp1, tmp2, type = 0, num = 0;
+ const struct nlattr *outer, *inner, *iter;
+ uint8 flush = FALSE, i = 0;
+ wl_ssid_ext_params_t params;
+
+ nla_for_each_attr(iter, data, len, tmp2) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_EPNO_SSID_LIST:
+ nla_for_each_nested(outer, iter, tmp) {
+ ssid_elem = (dhd_pno_ssid_t *)
+ dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GET_NEW_EPNO_SSID_ELEM,
+ NULL, &num);
+ if (!ssid_elem) {
+ WL_ERR(("Failed to get SSID LIST buffer\n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ i++;
+ nla_for_each_nested(inner, outer, tmp1) {
+ type = nla_type(inner);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_EPNO_SSID:
+ memcpy(ssid_elem->SSID,
+ nla_data(inner),
+ DOT11_MAX_SSID_LEN);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_SSID_LEN:
+ ssid_elem->SSID_len =
+ nla_get_u32(inner);
+ if (ssid_elem->SSID_len >
+ DOT11_MAX_SSID_LEN) {
+ WL_ERR(("SSID too"
+ "long %d\n",
+ ssid_elem->SSID_len));
+ err = -EINVAL;
+ MFREE(cfg->osh, ssid_elem,
+ num);
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_FLAGS:
+ ssid_elem->flags =
+ nla_get_u32(inner);
+ ssid_elem->hidden =
+ ((ssid_elem->flags &
+ DHD_EPNO_HIDDEN_SSID) != 0);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_AUTH:
+ ssid_elem->wpa_auth =
+ nla_get_u32(inner);
+ break;
+ }
+ }
+ if (!ssid_elem->SSID_len) {
+ WL_ERR(("Broadcast SSID is illegal for ePNO\n"));
+ err = -EINVAL;
+ MFREE(cfg->osh, ssid_elem, num);
+ goto exit;
+ }
+ dhd_pno_translate_epno_fw_flags(&ssid_elem->flags);
+ dhd_pno_set_epno_auth_flag(&ssid_elem->wpa_auth);
+ MFREE(cfg->osh, ssid_elem, num);
+ }
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_SSID_NUM:
+ num = nla_get_u8(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_FLUSH:
+ flush = (bool)nla_get_u32(iter);
+ /* Flush attribute is expected before any ssid attribute */
+ if (i && flush) {
+ WL_ERR(("Bad attributes\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ /* Need to flush driver and FW cfg */
+ dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_EPNO_CFG_ID, NULL, flush);
+ dhd_dev_flush_fw_epno(bcmcfg_to_prmry_ndev(cfg));
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_5G_RSSI_THR:
+ params.min5G_rssi = nla_get_s8(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_2G_RSSI_THR:
+ params.min2G_rssi = nla_get_s8(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_INIT_SCORE_MAX:
+ params.init_score_max = nla_get_s16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_CUR_CONN_BONUS:
+ params.cur_bssid_bonus = nla_get_s16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS:
+ params.same_ssid_bonus = nla_get_s16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_SECURE_BONUS:
+ params.secure_bonus = nla_get_s16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_EPNO_5G_BONUS:
+ params.band_5g_bonus = nla_get_s16(iter);
+ break;
+ default:
+ WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+ if (i != num) {
+ WL_ERR(("%s: num_ssid %d does not match ssids sent %d\n", __FUNCTION__,
+ num, i));
+ err = -EINVAL;
+ }
+exit:
+ /* Flush all configs if error condition */
+ if (err < 0) {
+ dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_EPNO_CFG_ID, NULL, TRUE);
+ dhd_dev_flush_fw_epno(bcmcfg_to_prmry_ndev(cfg));
+ } else if (type != GSCAN_ATTRIBUTE_EPNO_FLUSH) {
+ /* If the last attribute was FLUSH, nothing else to do */
+ dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_EPNO_PARAMS_ID, &params, FALSE);
+ err = dhd_dev_set_epno(bcmcfg_to_prmry_ndev(cfg));
+ }
+ return err;
+}
+
+static int
+wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, tmp, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_batch_params_t batch_param;
+ const struct nlattr *iter;
+
+ batch_param.mscan = batch_param.bestn = 0;
+ batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN:
+ batch_param.bestn = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE:
+ batch_param.mscan = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_THRESHOLD:
+ batch_param.buffer_threshold = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ break;
+ }
+ }
+
+ if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_BATCH_SCAN_CFG_ID, &batch_param, FALSE) < 0) {
+ WL_ERR(("Could not set batch cfg\n"));
+ err = -EINVAL;
+ return err;
+ }
+
+ return err;
+}
+
+#endif /* GSCAN_SUPPORT */
+#if defined (GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+static int
+wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, type, band;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ uint16 *reply = NULL;
+ uint32 reply_len = 0, num_channels, mem_needed;
+ struct sk_buff *skb;
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = wdev->netdev;
+
+ if (!ndev) {
+ WL_ERR(("ndev null\n"));
+ return -EINVAL;
+ }
+
+ dhdp = wl_cfg80211_get_dhdp(ndev);
+ if (!dhdp) {
+ WL_ERR(("dhdp null\n"));
+ return -EINVAL;
+ }
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
+ type = nla_type(data);
+ if (type == GSCAN_ATTRIBUTE_BAND) {
+ band = nla_get_u32(data);
+ } else {
+ return -EINVAL;
+ }
+
+ reply = MALLOCZ(cfg->osh, CHANINFO_LIST_BUF_SIZE);
+ if (reply == NULL) {
+ WL_ERR(("failed to allocate chanspec buffer\n"));
+ return -ENOMEM;
+ }
+ err = wl_cfgscan_get_band_freq_list(cfg, band, reply, &num_channels);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("%s: failed to get valid channel list\n",
+ __FUNCTION__));
+ err = -EINVAL;
+ goto exit;
+ } else if (err == BCME_OK) {
+ reply_len = (num_channels * sizeof(uint32));
+ } else if (err == BCME_UNSUPPORTED) {
+ reply = dhd_pno_get_gscan(dhdp,
+ DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
+ if (!reply) {
+ WL_ERR(("Could not get channel list\n"));
+ err = -EINVAL;
+ return err;
+ }
+ num_channels = reply_len/sizeof(uint32);
+ }
+ mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2);
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels);
+ nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply);
+
+ err = cfg80211_vendor_cmd_reply(skb);
+
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+exit:
+ MFREE(cfg->osh, reply, CHANINFO_LIST_BUF_SIZE);
+ return err;
+}
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+#ifdef RSSI_MONITOR_SUPPORT
+static int wl_cfgvendor_set_rssi_monitor(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, tmp, type, start = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int8 max_rssi = 0, min_rssi = 0;
+ const struct nlattr *iter;
+
+ if (!wl_get_drv_status(cfg, CONNECTED, wdev_to_ndev(wdev))) {
+ WL_ERR(("Sta is not connected to an AP, rssi monitoring is not allowed\n"));
+ return -EINVAL;
+ }
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case RSSI_MONITOR_ATTRIBUTE_MAX_RSSI:
+ max_rssi = (int8) nla_get_u32(iter);
+ break;
+ case RSSI_MONITOR_ATTRIBUTE_MIN_RSSI:
+ min_rssi = (int8) nla_get_u32(iter);
+ break;
+ case RSSI_MONITOR_ATTRIBUTE_START:
+ start = nla_get_u32(iter);
+ }
+ }
+
+ if (dhd_dev_set_rssi_monitor_cfg(bcmcfg_to_prmry_ndev(cfg),
+ start, max_rssi, min_rssi) < 0) {
+ WL_ERR(("Could not set rssi monitor cfg\n"));
+ err = -EINVAL;
+ }
+ return err;
+}
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef DHD_WAKE_STATUS
+static int
+wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ wake_counts_t *pwake_count_info;
+ int ret, mem_needed;
+#if defined(DHD_DEBUG) && defined(DHD_WAKE_EVENT_STATUS)
+ int flowid;
+#endif /* DHD_DEBUG && DHD_WAKE_EVENT_STATUS */
+ struct sk_buff *skb = NULL;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+
+ WL_DBG(("Recv get wake status info cmd.\n"));
+
+ pwake_count_info = dhd_get_wakecount(dhdp);
+ mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 20) +
+ (WLC_E_LAST * sizeof(uint));
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("%s: can't allocate %d bytes\n", __FUNCTION__, mem_needed));
+ ret = -ENOMEM;
+ goto exit;
+ }
+#ifdef DHD_WAKE_EVENT_STATUS
+ WL_ERR(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total count of CMD event, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Max count of event used, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
+ pwake_count_info->rc_event);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Event wake data, ret=%d\n", ret));
+ goto exit;
+ }
+#ifdef DHD_DEBUG
+ for (flowid = 0; flowid < WLC_E_LAST; flowid++) {
+ if (pwake_count_info->rc_event[flowid] != 0) {
+ WL_ERR((" %s = %u\n", bcmevent_get_name(flowid),
+ pwake_count_info->rc_event[flowid]));
+ }
+ }
+#endif /* DHD_DEBUG */
+#endif /* DHD_WAKE_EVENT_STATUS */
+#ifdef DHD_WAKE_RX_STATUS
+ WL_ERR(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total Wake due RX data, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX unicast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due RX multicast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX broadcast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMP pkt, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due ICMPV6 pkt, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_RA, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_NA, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_NS, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+ pwake_count_info->rx_multi_ipv4);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX IPV4 MULTICAST, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+ pwake_count_info->rx_multi_ipv6);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX IPV6 MULTICAST, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
+ pwake_count_info->rx_multi_other);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to Other RX Multicast, ret=%d\n", ret));
+ goto exit;
+ }
+#endif /* #ifdef DHD_WAKE_RX_STATUS */
+ ret = cfg80211_vendor_cmd_reply(skb);
+ if (unlikely(ret)) {
+ WL_ERR(("Vendor cmd reply for -get wake status failed:%d \n", ret));
+ }
+ /* On cfg80211_vendor_cmd_reply() skb is consumed and freed in case of success or failure */
+ return ret;
+
+exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
+ return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+#ifdef DHDTCPACK_SUPPRESS
+static int
+wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+ uint8 enable = 0;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
+ enable = (uint8) nla_get_u32(data);
+ err = dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable);
+ if (unlikely(err)) {
+ WL_ERR(("Could not set TCP Ack Suppress mode cfg: %d\n", err));
+ }
+ } else {
+ err = BCME_BADARG;
+ }
+
+exit:
+ return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+static int
+wl_cfgvendor_notify_dump_completion(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ unsigned long flags = 0;
+
+ WL_INFORM(("%s, [DUMP] received file dump notification from HAL\n", __FUNCTION__));
+
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ /* call wmb() to synchronize with the previous memory operations */
+ OSL_SMP_WMB();
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhd_pub);
+ /* Call another wmb() to make sure wait_for_dump_completion value
+ * gets updated before waking up waiting context.
+ */
+ OSL_SMP_WMB();
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+#if defined(WL_CFG80211)
+static int
+wl_cfgvendor_set_hal_pid(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int ret = BCME_OK;
+ uint32 type;
+ if (!data) {
+ WL_DBG(("%s,data is not available\n", __FUNCTION__));
+ } else {
+ if (len > 0) {
+ type = nla_type(data);
+ if (type == SET_HAL_START_ATTRIBUTE_EVENT_SOCK_PID) {
+ if (nla_len(data)) {
+ WL_DBG(("HAL PID = %u\n", nla_get_u32(data)));
+ cfg->halpid = nla_get_u32(data);
+ }
+ }
+ } else {
+ WL_ERR(("invalid len %d\n", len));
+ ret = BCME_ERROR;
+ }
+ }
+ return ret;
+}
+
+static int
+wl_cfgvendor_set_hal_started(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef WL_STA_ASSOC_RAND
+ struct ether_addr primary_mac;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WL_STA_ASSOC_RAND */
+ int ret = BCME_OK;
+#if defined(WIFI_TURNON_USE_HALINIT)
+ struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+ uint32 type;
+
+ if (!data) {
+ WL_DBG(("%s,data is not available\n", __FUNCTION__));
+ } else {
+ if (len > 0) {
+ type = nla_type(data);
+ WL_INFORM(("%s,type: %xh\n", __FUNCTION__, type));
+ if (type == SET_HAL_START_ATTRIBUTE_PRE_INIT) {
+ if (nla_len(data)) {
+ WL_INFORM(("%s, HAL version: %s\n", __FUNCTION__,
+ (char*)nla_data(data)));
+ }
+ WL_INFORM(("%s, dhd_open start\n", __FUNCTION__));
+ ret = dhd_open(ndev);
+ if (ret != BCME_OK) {
+ WL_INFORM(("%s, dhd_open failed\n", __FUNCTION__));
+ return ret;
+ } else {
+ WL_INFORM(("%s, dhd_open succeeded\n", __FUNCTION__));
+ }
+ return ret;
+ }
+ } else {
+ WL_ERR(("invalid len %d\n", len));
+ }
+ }
+#endif /* WIFI_TURNON_USE_HALINIT */
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_INFORM(("%s,[DUMP] HAL STARTED\n", __FUNCTION__));
+
+ cfg->hal_started = true;
+#ifdef WL_STA_ASSOC_RAND
+ /* If mac randomization is enabled and primary macaddress is not
+ * randomized, randomize it from HAL init context
+ */
+ get_primary_mac(cfg, &primary_mac);
+ if ((!ETHER_IS_LOCALADDR(&primary_mac)) &&
+ (!wl_get_drv_status(cfg, CONNECTED, wdev_to_ndev(wdev)))) {
+ WL_DBG_MEM(("%s, Local admin bit not set, randomize"
+ "STA MAC address \n", __FUNCTION__));
+ if ((ret = dhd_update_rand_mac_addr(dhd)) < 0) {
+ WL_ERR(("%s: failed to set macaddress, ret = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ }
+#endif /* WL_STA_ASSOC_RAND */
+ return ret;
+}
+
+static int
+wl_cfgvendor_stop_hal(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ WL_INFORM(("%s,[DUMP] HAL STOPPED\n", __FUNCTION__));
+
+ cfg->hal_started = false;
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 */
+
+#ifdef WL_LATENCY_MODE
+static int
+wl_cfgvendor_set_latency_mode(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK, rem, type;
+ u32 latency_mode;
+ const struct nlattr *iter;
+#ifdef SUPPORT_LATENCY_CRITICAL_DATA
+ bool enable;
+#endif /* SUPPORT_LATENCY_CRITICAL_DATA */
+#ifdef WL_AUTO_QOS
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
+#endif /* WL_AUTO_QOS */
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case ANDR_WIFI_ATTRIBUTE_LATENCY_MODE:
+ latency_mode = nla_get_u32(iter);
+ WL_DBG(("%s,Setting latency mode %u\n", __FUNCTION__,
+ latency_mode));
+#ifdef WL_AUTO_QOS
+ /* Enable/Disable qos monitoring */
+ dhd_wl_sock_qos_set_status(dhdp, latency_mode);
+#endif /* WL_AUTO_QOS */
+#ifdef SUPPORT_LATENCY_CRITICAL_DATA
+ enable = latency_mode ? true : false;
+ err = wldev_iovar_setint(wdev->netdev,
+ "latency_critical_data", enable);
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set latency_critical_data "
+ "enable %d, error = %d\n", enable, err));
+ /* Proceed with other optimizations possible */
+ err = BCME_OK;
+ }
+#endif /* SUPPORT_LATENCY_CRITICAL_DATA */
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ return err;
+ }
+ }
+
+ return err;
+}
+#endif /* WL_LATENCY_MODE */
+
+#ifdef RTT_SUPPORT
+void
+wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
+{
+ struct wireless_dev *wdev = (struct wireless_dev *)ctx;
+ struct wiphy *wiphy;
+ struct sk_buff *skb = NULL;
+ uint32 evt_complete = 0;
+ gfp_t kflags;
+ rtt_result_t *rtt_result;
+ rtt_results_header_t *rtt_header;
+ struct list_head *rtt_cache_list;
+ struct nlattr *rtt_nl_hdr;
+ int ret = BCME_OK;
+ wiphy = wdev->wiphy;
+
+ WL_DBG(("In\n"));
+ /* Push the data to the skb */
+ if (!rtt_data) {
+ WL_ERR(("rtt_data is NULL\n"));
+ return;
+ }
+ rtt_cache_list = (struct list_head *)rtt_data;
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ if (list_empty(rtt_cache_list)) {
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, 100,
+ GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, 100, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return;
+ }
+ evt_complete = 1;
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto free_mem;
+ }
+ cfg80211_vendor_event(skb, kflags);
+ return;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(rtt_header, rtt_cache_list, list) {
+ /* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, rtt_header->result_tot_len + 100,
+ GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, rtt_header->result_tot_len + 100,
+ GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return;
+ }
+ if (list_is_last(&rtt_header->list, rtt_cache_list)) {
+ evt_complete = 1;
+ }
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto free_mem;
+ }
+ rtt_nl_hdr = nla_nest_start(skb, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
+ if (!rtt_nl_hdr) {
+ WL_ERR(("rtt_nl_hdr is NULL\n"));
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ ret = nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN,
+ &rtt_header->peer_mac);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC, ret:%d\n", ret));
+ goto free_mem;
+ }
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT, ret:%d\n", ret));
+ goto free_mem;
+ }
+ list_for_each_entry(rtt_result, &rtt_header->result_list, list) {
+ ret = nla_put(skb, RTT_ATTRIBUTE_RESULT,
+ rtt_result->report_len, &rtt_result->report);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT, ret:%d\n", ret));
+ goto free_mem;
+ }
+ ret = nla_put(skb, RTT_ATTRIBUTE_RESULT_DETAIL,
+ rtt_result->detail_len, &rtt_result->rtt_detail);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_DETAIL, ret:%d\n",
+ ret));
+ goto free_mem;
+ }
+ }
+ nla_nest_end(skb, rtt_nl_hdr);
+ cfg80211_vendor_event(skb, kflags);
+ }
+ GCC_DIAGNOSTIC_POP();
+
+ return;
+
+free_mem:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
+}
+
+static int
+wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len) {
+ int err = 0, rem, rem1, rem2, type;
+ int target_cnt = 0;
+ rtt_config_params_t rtt_param;
+ rtt_target_info_t* rtt_target = NULL;
+ const struct nlattr *iter, *iter1, *iter2;
+ int8 eabuf[ETHER_ADDR_STR_LEN];
+ int8 chanbuf[CHANSPEC_STR_LEN];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ rtt_capabilities_t capability;
+
+ bzero(&rtt_param, sizeof(rtt_param));
+
+ WL_DBG(("In\n"));
+ err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
+ if (err < 0) {
+ WL_ERR(("failed to register rtt_noti_callback\n"));
+ goto exit;
+ }
+ err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+ if (err < 0) {
+ WL_ERR(("failed to get the capability\n"));
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case RTT_ATTRIBUTE_TARGET_CNT:
+ if (target_cnt != 0) {
+ WL_ERR(("attempt to overwrite target_cnt"));
+ err = -EINVAL;
+ goto exit;
+ }
+ target_cnt = nla_get_u8(iter);
+ if ((target_cnt <= 0) || (target_cnt > RTT_MAX_TARGET_CNT)) {
+ WL_ERR(("target_cnt is not valid : %d\n",
+ target_cnt));
+ err = BCME_RANGE;
+ goto exit;
+ }
+ rtt_param.rtt_target_cnt = target_cnt;
+
+ rtt_param.target_info = (rtt_target_info_t *)MALLOCZ(cfg->osh,
+ TARGET_INFO_SIZE(target_cnt));
+ if (rtt_param.target_info == NULL) {
+ WL_ERR(("failed to allocate target info for (%d)\n", target_cnt));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_INFO:
+ /* Added this variable for safe check to avoid crash
+ * incase the caller did not respect the order
+ */
+ if (rtt_param.target_info == NULL) {
+ WL_ERR(("rtt_target_info is NULL\n"));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ rtt_target = rtt_param.target_info;
+ nla_for_each_nested(iter1, iter, rem1) {
+ if ((uint8 *)rtt_target >= ((uint8 *)rtt_param.target_info +
+ TARGET_INFO_SIZE(target_cnt))) {
+ WL_ERR(("rtt_target increased over its max size"));
+ err = -EINVAL;
+ goto exit;
+ }
+ nla_for_each_nested(iter2, iter1, rem2) {
+ type = nla_type(iter2);
+ switch (type) {
+ case RTT_ATTRIBUTE_TARGET_MAC:
+ if (nla_len(iter2) != ETHER_ADDR_LEN) {
+ WL_ERR(("mac_addr length not match\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ memcpy(&rtt_target->addr, nla_data(iter2),
+ ETHER_ADDR_LEN);
+ break;
+ case RTT_ATTRIBUTE_TARGET_TYPE:
+ rtt_target->type = nla_get_u8(iter2);
+ if (rtt_target->type == RTT_INVALID ||
+ (rtt_target->type == RTT_ONE_WAY &&
+ !capability.rtt_one_sided_supported)) {
+ WL_ERR(("doesn't support RTT type"
+ " : %d\n",
+ rtt_target->type));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_PEER:
+ rtt_target->peer = nla_get_u8(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_CHAN:
+ memcpy(&rtt_target->channel, nla_data(iter2),
+ sizeof(rtt_target->channel));
+ break;
+ case RTT_ATTRIBUTE_TARGET_PERIOD:
+ rtt_target->burst_period = nla_get_u32(iter2);
+ if (rtt_target->burst_period < 32) {
+ /* 100ms unit */
+ rtt_target->burst_period *= 100;
+ } else {
+ WL_ERR(("%d value must in (0-31)\n",
+ rtt_target->burst_period));
+ err = EINVAL;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_BURST:
+ rtt_target->num_burst = nla_get_u32(iter2);
+ if (rtt_target->num_burst > 16) {
+ WL_ERR(("%d value must in (0-15)\n",
+ rtt_target->num_burst));
+ err = -EINVAL;
+ goto exit;
+ }
+ rtt_target->num_burst = BIT(rtt_target->num_burst);
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_FTM_BURST:
+ rtt_target->num_frames_per_burst =
+ nla_get_u32(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTM:
+ rtt_target->num_retries_per_ftm =
+ nla_get_u32(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTMR:
+ rtt_target->num_retries_per_ftmr =
+ nla_get_u32(iter2);
+ if (rtt_target->num_retries_per_ftmr > 3) {
+ WL_ERR(("%d value must in (0-3)\n",
+ rtt_target->num_retries_per_ftmr));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_LCI:
+ rtt_target->LCI_request = nla_get_u8(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_LCR:
+ rtt_target->LCI_request = nla_get_u8(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_BURST_DURATION:
+ if ((nla_get_u32(iter2) > 1 &&
+ nla_get_u32(iter2) < 12)) {
+ rtt_target->burst_duration =
+ dhd_rtt_idx_to_burst_duration(
+ nla_get_u32(iter2));
+ } else if (nla_get_u32(iter2) == 15) {
+ /* use default value */
+ rtt_target->burst_duration = 0;
+ } else {
+ WL_ERR(("%d value must in (2-11) or 15\n",
+ nla_get_u32(iter2)));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_BW:
+ rtt_target->bw = nla_get_u8(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_PREAMBLE:
+ rtt_target->preamble = nla_get_u8(iter2);
+ break;
+ }
+ }
+ /* convert to chanspec value */
+ rtt_target->chanspec =
+ dhd_rtt_convert_to_chspec(rtt_target->channel);
+ if (rtt_target->chanspec == 0) {
+ WL_ERR(("Channel is not valid \n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ WL_INFORM_MEM(("Target addr %s, Channel : %s for RTT \n",
+ bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr,
+ eabuf),
+ wf_chspec_ntoa(rtt_target->chanspec, chanbuf)));
+ rtt_target++;
+ }
+ break;
+ }
+ }
+ WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt));
+ if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) {
+ WL_ERR(("Could not set RTT configuration\n"));
+ err = -EINVAL;
+ }
+exit:
+ /* free the target info list */
+ if (rtt_param.target_info) {
+ MFREE(cfg->osh, rtt_param.target_info,
+ TARGET_INFO_SIZE(target_cnt));
+ }
+ return err;
+}
+
+static int
+wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0, rem, type, target_cnt = 0;
+ int target_idx = 0;
+ const struct nlattr *iter;
+ struct ether_addr *mac_list = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ if (len <= 0) {
+ WL_ERR(("Length of nlattr is not valid len : %d\n", len));
+ err = -EINVAL;
+ goto exit;
+ }
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case RTT_ATTRIBUTE_TARGET_CNT:
+ if (mac_list != NULL) {
+ WL_ERR(("mac_list is not NULL\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ target_cnt = nla_get_u8(iter);
+ if ((target_cnt > 0) && (target_cnt < RTT_MAX_TARGET_CNT)) {
+ mac_list = (struct ether_addr *)MALLOCZ(cfg->osh,
+ target_cnt * ETHER_ADDR_LEN);
+ if (mac_list == NULL) {
+ WL_ERR(("failed to allocate mem for mac list\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ } else {
+ /* cancel the current whole RTT process */
+ goto cancel;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_MAC:
+ if (mac_list == NULL) {
+ WL_ERR(("ATTRIBUTE_TARGET_CNT not found before "
+ " ATTRIBUTE_TARGET_MAC\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (target_idx >= target_cnt) {
+ WL_ERR(("More TARGET_MAC entries found, "
+ "expected TARGET_CNT:%d\n", target_cnt));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ WL_ERR(("Invalid TARGET_MAC ATTR len :%d\n", nla_len(iter)));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(&mac_list[target_idx], nla_data(iter), ETHER_ADDR_LEN);
+ target_idx++;
+
+ break;
+ default:
+ WL_ERR(("Uknown type : %d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+cancel:
+ if (mac_list && dhd_dev_rtt_cancel_cfg(
+ bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+ WL_ERR(("Could not cancel RTT configuration\n"));
+ err = -EINVAL;
+ }
+
+exit:
+ if (mac_list) {
+ MFREE(cfg->osh, mac_list, target_cnt * ETHER_ADDR_LEN);
+ }
+ return err;
+}
+
+static int
+wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ rtt_capabilities_t capability;
+
+ err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ goto exit;
+ }
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &capability, sizeof(capability));
+
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+exit:
+ return err;
+}
+static int
+get_responder_info(struct bcm_cfg80211 *cfg,
+ struct wifi_rtt_responder *responder_info)
+{
+ int err = 0;
+ rtt_capabilities_t capability;
+ err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get responder capability:%d \n", err));
+ return err;
+ }
+ if (capability.preamble_support & RTT_PREAMBLE_VHT) {
+ responder_info->preamble = RTT_PREAMBLE_VHT;
+ } else if (capability.preamble_support & RTT_PREAMBLE_HT) {
+ responder_info->preamble = RTT_PREAMBLE_HT;
+ } else {
+ responder_info->preamble = RTT_PREAMBLE_LEGACY;
+ }
+ err = dhd_dev_rtt_avail_channel(bcmcfg_to_prmry_ndev(cfg), &(responder_info->channel));
+ if (unlikely(err)) {
+ WL_ERR(("Could not get available channel:%d \n", err));
+ return err;
+ }
+ return err;
+}
+static int
+wl_cfgvendor_rtt_get_responder_info(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wifi_rtt_responder_t responder_info;
+
+ WL_DBG(("Recv -get_avail_ch command \n"));
+
+ bzero(&responder_info, sizeof(responder_info));
+ err = get_responder_info(cfg, &responder_info);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to get responder info:%d \n", err));
+ return err;
+ }
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &responder_info, sizeof(responder_info));
+ if (unlikely(err)) {
+ WL_ERR(("Vendor cmd reply for -get_avail_ch failed ret:%d \n", err));
+ }
+ return err;
+}
+
+static int
+wl_cfgvendor_rtt_set_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+ wifi_rtt_responder_t responder_info;
+
+ WL_DBG(("Recv rtt -enable_resp cmd.\n"));
+
+ bzero(&responder_info, sizeof(responder_info));
+
+ /*
+ *Passing channel as NULL until implementation
+ *to get chan info from upper layers is donex
+ */
+ err = dhd_dev_rtt_enable_responder(ndev, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Could not enable responder ret:%d \n", err));
+ goto done;
+ }
+ err = get_responder_info(cfg, &responder_info);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to get responder info:%d \n", err));
+ dhd_dev_rtt_cancel_responder(ndev);
+ goto done;
+ }
+done:
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &responder_info, sizeof(responder_info));
+ if (unlikely(err)) {
+ WL_ERR(("Vendor cmd reply for -enable_resp failed ret:%d \n", err));
+ }
+ return err;
+}
+
+static int
+wl_cfgvendor_rtt_cancel_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ WL_DBG(("Recv rtt -cancel_resp cmd \n"));
+
+ err = dhd_dev_rtt_cancel_responder(bcmcfg_to_prmry_ndev(cfg));
+ if (unlikely(err)) {
+ WL_ERR(("Vendor cmd -cancel_resp failed ret:%d \n", err));
+ }
+ return err;
+}
+#endif /* RTT_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+static int wl_cfgvendor_enable_lazy_roam(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = -EINVAL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ uint32 lazy_roam_enable_flag;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invaild len %d\n", len));
+ return -EINVAL;
+ }
+
+ type = nla_type(data);
+
+ if (type == GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE) {
+ lazy_roam_enable_flag = nla_get_u32(data);
+
+ err = dhd_dev_lazy_roam_enable(bcmcfg_to_prmry_ndev(cfg),
+ lazy_roam_enable_flag);
+ if (unlikely(err))
+ WL_ERR(("Could not enable lazy roam:%d \n", err));
+ }
+
+ return err;
+}
+
+static int wl_cfgvendor_set_lazy_roam_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, tmp, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wlc_roam_exp_params_t roam_param;
+ const struct nlattr *iter;
+
+ bzero(&roam_param, sizeof(roam_param));
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_A_BAND_BOOST_THRESHOLD:
+ roam_param.a_band_boost_threshold = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_A_BAND_PENALTY_THRESHOLD:
+ roam_param.a_band_penalty_threshold = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_A_BAND_BOOST_FACTOR:
+ roam_param.a_band_boost_factor = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_A_BAND_PENALTY_FACTOR:
+ roam_param.a_band_penalty_factor = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_A_BAND_MAX_BOOST:
+ roam_param.a_band_max_boost = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_LAZY_ROAM_HYSTERESIS:
+ roam_param.cur_bssid_boost = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_ALERT_ROAM_RSSI_TRIGGER:
+ roam_param.alert_roam_trigger_threshold = nla_get_u32(iter);
+ break;
+ }
+ }
+
+ if (dhd_dev_set_lazy_roam_cfg(bcmcfg_to_prmry_ndev(cfg), &roam_param) < 0) {
+ WL_ERR(("Could not set batch cfg\n"));
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/* small helper function */
+static wl_bssid_pref_cfg_t *
+create_bssid_pref_cfg(struct bcm_cfg80211 *cfg, uint32 num, uint32 *buf_len)
+{
+ wl_bssid_pref_cfg_t *bssid_pref;
+
+ *buf_len = sizeof(wl_bssid_pref_cfg_t);
+ if (num) {
+ *buf_len += (num - 1) * sizeof(wl_bssid_pref_list_t);
+ }
+ bssid_pref = (wl_bssid_pref_cfg_t *)MALLOC(cfg->osh, *buf_len);
+
+ return bssid_pref;
+}
+
+static int
+wl_cfgvendor_set_bssid_pref(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_bssid_pref_cfg_t *bssid_pref = NULL;
+ wl_bssid_pref_list_t *bssids;
+ int tmp, tmp1, tmp2, type;
+ const struct nlattr *outer, *inner, *iter;
+ uint32 flush = 0, num = 0, buf_len = 0;
+ uint8 bssid_found = 0, rssi_found = 0;
+
+ /* Assumption: NUM attribute must come first */
+ nla_for_each_attr(iter, data, len, tmp2) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_BSSID:
+ if (num) {
+ WL_ERR(("attempt overide bssid num.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("nla_len not match\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ num = nla_get_u32(iter);
+ if (num == 0 || num > MAX_BSSID_PREF_LIST_NUM) {
+ WL_ERR(("wrong BSSID num:%d\n", num));
+ err = -EINVAL;
+ goto exit;
+ }
+ if ((bssid_pref = create_bssid_pref_cfg(cfg, num, &buf_len))
+ == NULL) {
+ WL_ERR(("Can't malloc memory\n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_BSSID_PREF_FLUSH:
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("nla_len not match\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ flush = nla_get_u32(iter);
+ if (flush != 1) {
+ WL_ERR(("wrong flush value\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_BSSID_PREF_LIST:
+ if (!num || !bssid_pref) {
+ WL_ERR(("bssid list count not set\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ bssid_pref->count = 0;
+ bssids = bssid_pref->bssids;
+ nla_for_each_nested(outer, iter, tmp) {
+ if (bssid_pref->count >= num) {
+ WL_ERR(("too many bssid list\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ bssid_found = 0;
+ rssi_found = 0;
+ nla_for_each_nested(inner, outer, tmp1) {
+ type = nla_type(inner);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BSSID_PREF:
+ if (nla_len(inner) != ETHER_ADDR_LEN) {
+ WL_ERR(("nla_len not match.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ memcpy(&(bssids[bssid_pref->count].bssid),
+ nla_data(inner), ETHER_ADDR_LEN);
+ /* not used for now */
+ bssids[bssid_pref->count].flags = 0;
+ bssid_found = 1;
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_MODIFIER:
+ if (nla_len(inner) != sizeof(uint32)) {
+ WL_ERR(("nla_len not match.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ bssids[bssid_pref->count].rssi_factor =
+ (int8) nla_get_u32(inner);
+ rssi_found = 1;
+ break;
+ default:
+ WL_ERR(("wrong type:%d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (bssid_found && rssi_found) {
+ break;
+ }
+ }
+ bssid_pref->count++;
+ }
+ break;
+ default:
+ WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+ break;
+ }
+ }
+
+ if (!bssid_pref) {
+ /* What if only flush is desired? */
+ if (flush) {
+ if ((bssid_pref = create_bssid_pref_cfg(cfg, 0, &buf_len)) == NULL) {
+ WL_ERR(("%s: Can't malloc memory\n", __FUNCTION__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ bssid_pref->count = 0;
+ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+ err = dhd_dev_set_lazy_roam_bssid_pref(bcmcfg_to_prmry_ndev(cfg),
+ bssid_pref, flush);
+exit:
+ if (bssid_pref) {
+ MFREE(cfg->osh, bssid_pref, buf_len);
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
+static int
+wl_cfgvendor_set_bssid_blacklist(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ maclist_t *blacklist = NULL;
+ int err = 0;
+ int type, tmp;
+ const struct nlattr *iter;
+ uint32 mem_needed = 0, flush = 0, num = 0;
+
+ /* Assumption: NUM attribute must come first */
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_BSSID:
+ if (num != 0) {
+ WL_ERR(("attempt to change BSSID num\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("not matching nla_len.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ num = nla_get_u32(iter);
+ if (num == 0 || num > MAX_BSSID_BLACKLIST_NUM) {
+ WL_ERR(("wrong BSSID count:%d\n", num));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (!blacklist) {
+ mem_needed = (uint32) (OFFSETOF(maclist_t, ea) +
+ sizeof(struct ether_addr) * (num));
+ blacklist = (maclist_t *)
+ MALLOCZ(cfg->osh, mem_needed);
+ if (!blacklist) {
+ WL_ERR(("MALLOCZ failed.\n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ }
+ break;
+ case GSCAN_ATTRIBUTE_BSSID_BLACKLIST_FLUSH:
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("not matching nla_len.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ flush = nla_get_u32(iter);
+ if (flush != 1) {
+ WL_ERR(("flush arg is worng:%d\n", flush));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_BLACKLIST_BSSID:
+ if (num == 0 || !blacklist) {
+ WL_ERR(("number of BSSIDs not received.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ WL_ERR(("not matching nla_len.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (blacklist->count >= num) {
+ WL_ERR(("too many BSSIDs than expected:%d\n",
+ blacklist->count));
+ err = -EINVAL;
+ goto exit;
+ }
+ memcpy(&(blacklist->ea[blacklist->count]), nla_data(iter),
+ ETHER_ADDR_LEN);
+ blacklist->count++;
+ break;
+ default:
+ WL_ERR(("No such attribute:%d\n", type));
+ break;
+ }
+ }
+
+ if (blacklist && (blacklist->count != num)) {
+ WL_ERR(("not matching bssid count:%d to expected:%d\n",
+ blacklist->count, num));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = dhd_dev_set_blacklist_bssid(bcmcfg_to_prmry_ndev(cfg),
+ blacklist, mem_needed, flush);
+exit:
+ MFREE(cfg->osh, blacklist, mem_needed);
+ return err;
+}
+
+static int
+wl_cfgvendor_set_ssid_whitelist(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_ssid_whitelist_t *ssid_whitelist = NULL;
+ wlc_ssid_t *ssid_elem;
+ int tmp, tmp1, mem_needed = 0, type;
+ const struct nlattr *iter, *iter1;
+ uint32 flush = 0, num = 0;
+ int ssid_found = 0;
+
+ /* Assumption: NUM attribute must come first */
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_WL_SSID:
+ if (num != 0) {
+ WL_ERR(("try to change SSID num\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("not matching nla_len.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ num = nla_get_u32(iter);
+ if (num == 0 || num > MAX_SSID_WHITELIST_NUM) {
+ WL_ERR(("wrong SSID count:%d\n", num));
+ err = -EINVAL;
+ goto exit;
+ }
+ mem_needed = sizeof(wl_ssid_whitelist_t) +
+ sizeof(wlc_ssid_t) * num;
+ ssid_whitelist = (wl_ssid_whitelist_t *)
+ MALLOCZ(cfg->osh, mem_needed);
+ if (ssid_whitelist == NULL) {
+ WL_ERR(("failed to alloc mem\n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_WL_SSID_FLUSH:
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("not matching nla_len.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ flush = nla_get_u32(iter);
+ if (flush != 1) {
+ WL_ERR(("flush arg worng:%d\n", flush));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_WHITELIST_SSID_ELEM:
+ if (!num || !ssid_whitelist) {
+ WL_ERR(("num ssid is not set!\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (ssid_whitelist->ssid_count >= num) {
+ WL_ERR(("too many SSIDs:%d\n",
+ ssid_whitelist->ssid_count));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ssid_elem = &ssid_whitelist->ssids[
+ ssid_whitelist->ssid_count];
+ ssid_found = 0;
+ nla_for_each_nested(iter1, iter, tmp1) {
+ type = nla_type(iter1);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_WL_SSID_LEN:
+ if (nla_len(iter1) != sizeof(uint32)) {
+ WL_ERR(("not match nla_len\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ ssid_elem->SSID_len = nla_get_u32(iter1);
+ if (ssid_elem->SSID_len >
+ DOT11_MAX_SSID_LEN) {
+ WL_ERR(("wrong SSID len:%d\n",
+ ssid_elem->SSID_len));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case GSCAN_ATTRIBUTE_WHITELIST_SSID:
+ if (ssid_elem->SSID_len == 0) {
+ WL_ERR(("SSID_len not received\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ if (nla_len(iter1) != ssid_elem->SSID_len) {
+ WL_ERR(("not match nla_len\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ memcpy(ssid_elem->SSID, nla_data(iter1),
+ ssid_elem->SSID_len);
+ ssid_found = 1;
+ break;
+ }
+ if (ssid_found) {
+ ssid_whitelist->ssid_count++;
+ break;
+ }
+ }
+ break;
+ default:
+ WL_ERR(("No such attribute: %d\n", type));
+ break;
+ }
+ }
+
+ if (ssid_whitelist && (ssid_whitelist->ssid_count != num)) {
+ WL_ERR(("not matching ssid count:%d to expected:%d\n",
+ ssid_whitelist->ssid_count, num));
+ err = -EINVAL;
+ goto exit;
+ }
+ err = dhd_dev_set_whitelist_ssid(bcmcfg_to_prmry_ndev(cfg),
+ ssid_whitelist, mem_needed, flush);
+ if (err == BCME_UNSUPPORTED) {
+ /* If firmware doesn't support feature, ignore the error
+ * Android framework doesn't populate/use whitelist ssids
+ * as of now, but invokes whitelist as part of roam config
+ * API. so this handler cannot be compiled out. but its
+ * safe to ignore.
+ */
+ WL_ERR(("whilelist ssid not supported. Ignore."));
+ err = BCME_OK;
+ }
+exit:
+ MFREE(cfg->osh, ssid_whitelist, mem_needed);
+ return err;
+}
+#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+
+#ifdef ROAMEXP_SUPPORT
+typedef enum {
+ FW_ROAMING_ENABLE = 1,
+ FW_ROAMING_DISABLE,
+ FW_ROAMING_PAUSE,
+ FW_ROAMING_RESUME
+} fw_roaming_state_t;
+
+static int
+wl_cfgvendor_set_fw_roaming_state(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ fw_roaming_state_t requested_roaming_state;
+ int type;
+ int err = 0;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
+ /* Get the requested fw roaming state */
+ type = nla_type(data);
+ if (type != GSCAN_ATTRIBUTE_ROAM_STATE_SET) {
+ WL_ERR(("%s: Invalid attribute %d\n", __FUNCTION__, type));
+ return -EINVAL;
+ }
+
+ requested_roaming_state = nla_get_u32(data);
+ WL_INFORM(("setting FW roaming state to %d\n", requested_roaming_state));
+
+ if ((requested_roaming_state == FW_ROAMING_ENABLE) ||
+ (requested_roaming_state == FW_ROAMING_RESUME)) {
+ err = wldev_iovar_setint(wdev_to_ndev(wdev), "roam_off", FALSE);
+ } else if ((requested_roaming_state == FW_ROAMING_DISABLE) ||
+ (requested_roaming_state == FW_ROAMING_PAUSE)) {
+ err = wldev_iovar_setint(wdev_to_ndev(wdev), "roam_off", TRUE);
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int
+wl_cfgvendor_fw_roam_get_capability(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ wifi_roaming_capabilities_t roaming_capability;
+
+ /* Update max number of blacklist bssids supported */
+ roaming_capability.max_blacklist_size = MAX_BSSID_BLACKLIST_NUM;
+ roaming_capability.max_whitelist_size = MAX_SSID_WHITELIST_NUM;
+ err = wl_cfgvendor_send_cmd_reply(wiphy, &roaming_capability,
+ sizeof(roaming_capability));
+ if (unlikely(err)) {
+ WL_ERR(("Vendor cmd reply for fw roam capability failed ret:%d \n", err));
+ }
+
+ return err;
+}
+#endif /* ROAMEXP_SUPPORT */
+
+static int
+wl_cfgvendor_priv_string_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int ret = 0;
+ int ret_len = 0, payload = 0, msglen;
+ const struct bcm_nlmsg_hdr *nlioc = data;
+ void *buf = NULL, *cur;
+ int maxmsglen = PAGE_SIZE - 0x100;
+ struct sk_buff *reply;
+
+#if defined(OEM_ANDROID)
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhdp && dhdp->hang_was_sent) {
+ WL_INFORM(("Bus down. HANG was sent up earlier\n"));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, DHD_EVENT_TIMEOUT_MS);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+#endif /* (OEM_ANDROID) */
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return BCME_BADARG;
+ }
+
+ if (len <= sizeof(struct bcm_nlmsg_hdr)) {
+ WL_ERR(("invalid len %d\n", len));
+ return BCME_BADARG;
+ }
+
+ WL_DBG(("entry: cmd = %d\n", nlioc->cmd));
+
+ if (nlioc->offset != sizeof(struct bcm_nlmsg_hdr)) {
+ WL_ERR(("invalid offset %d\n", nlioc->offset));
+ return BCME_BADARG;
+ }
+ len -= sizeof(struct bcm_nlmsg_hdr);
+ ret_len = nlioc->len;
+ if (ret_len > 0 || len > 0) {
+ if (len >= DHD_IOCTL_MAXLEN) {
+ WL_ERR(("oversize input buffer %d\n", len));
+ len = DHD_IOCTL_MAXLEN - 1;
+ }
+ if (ret_len >= DHD_IOCTL_MAXLEN) {
+ WL_ERR(("oversize return buffer %d\n", ret_len));
+ ret_len = DHD_IOCTL_MAXLEN - 1;
+ }
+
+ payload = max(ret_len, len) + 1;
+ buf = vzalloc(payload);
+ if (!buf) {
+ return -ENOMEM;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ memcpy(buf, (void *)((char *)nlioc + nlioc->offset), len);
+ GCC_DIAGNOSTIC_POP();
+ *((char *)buf + len) = '\0';
+ }
+
+ ret = dhd_cfgvendor_priv_string_handler(cfg, wdev, nlioc, buf);
+ if (ret) {
+ WL_ERR(("dhd_cfgvendor returned error %d", ret));
+ vfree(buf);
+ return ret;
+ }
+ cur = buf;
+ while (ret_len > 0) {
+ msglen = ret_len > maxmsglen ? maxmsglen : ret_len;
+ ret_len -= msglen;
+ payload = msglen + sizeof(msglen);
+ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+ if (!reply) {
+ WL_ERR(("Failed to allocate reply msg\n"));
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) ||
+ nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) {
+ kfree_skb(reply);
+ ret = -ENOBUFS;
+ break;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(reply);
+ if (ret) {
+ WL_ERR(("testmode reply failed:%d\n", ret));
+ break;
+ }
+ cur = (void *)((char *)cur + msglen);
+ }
+
+ return ret;
+}
+
+struct net_device *
+wl_cfgvendor_get_ndev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+ const char *data, unsigned long int *out_addr)
+{
+ char *pos, *pos1;
+ char ifname[IFNAMSIZ + 1] = {0};
+ struct net_info *iter, *next;
+ struct net_device *ndev = NULL;
+ ulong ifname_len;
+ *out_addr = (unsigned long int) data; /* point to command str by default */
+
+ /* check whether ifname=<ifname> is provided in the command */
+ pos = strstr(data, "ifname=");
+ if (pos) {
+ pos += strlen("ifname=");
+ pos1 = strstr(pos, " ");
+ if (!pos1) {
+ WL_ERR(("command format error \n"));
+ return NULL;
+ }
+
+ ifname_len = pos1 - pos;
+ if (memcpy_s(ifname, (sizeof(ifname) - 1), pos, ifname_len) != BCME_OK) {
+ WL_ERR(("Failed to copy data. len: %ld\n", ifname_len));
+ return NULL;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, ifname,
+ strlen(iter->ndev->name)) == 0) {
+ /* matching ifname found */
+ WL_DBG(("matching interface (%s) found ndev:%p \n",
+ iter->ndev->name, iter->ndev));
+ *out_addr = (unsigned long int)(pos1 + 1);
+ /* Returns the command portion after ifname=<name> */
+ return iter->ndev;
+ }
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+ WL_ERR(("Couldn't find ifname:%s in the netinfo list \n",
+ ifname));
+ return NULL;
+ }
+
+ /* If ifname=<name> arg is not provided, use default ndev */
+ ndev = wdev->netdev ? wdev->netdev : bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("Using default ndev (%s) \n", ndev->name));
+ return ndev;
+}
+
+#ifdef WL_SAE
+static int wl_cfgvendor_map_supp_sae_pwe_to_fw(u32 sup_value, u32 *sae_pwe)
+{
+ s32 ret = BCME_OK;
+ switch (sup_value) {
+ case SUPP_SAE_PWE_LOOP:
+ *sae_pwe = SAE_PWE_LOOP;
+ break;
+ case SUPP_SAE_PWE_H2E:
+ *sae_pwe = SAE_PWE_H2E;
+ break;
+ case SUPP_SAE_PWE_TRANS:
+ *sae_pwe = SAE_PWE_LOOP | SAE_PWE_H2E;
+ break;
+ default:
+ ret = BCME_BADARG;
+ }
+ return ret;
+}
+#endif /* WL_SAE */
+
+int
+wl_cfgvendor_connect_params_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct net_device *net = wdev->netdev;
+ int ret = BCME_OK;
+ int attr_type;
+ int rem = len;
+ const struct nlattr *iter;
+
+ BCM_REFERENCE(net);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ attr_type = nla_type(iter);
+ WL_DBG(("attr type: (%u)\n", attr_type));
+
+ switch (attr_type) {
+#ifdef WL_SAE
+ case BRCM_ATTR_SAE_PWE: {
+ u32 sae_pwe = 0;
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("Invalid value of sae_pwe\n"));
+ ret = -EINVAL;
+ break;
+ }
+ ret = wl_cfgvendor_map_supp_sae_pwe_to_fw(nla_get_u32(iter), &sae_pwe);
+ if (unlikely(ret)) {
+ WL_ERR(("Invalid sae_pwe\n"));
+ break;
+ }
+ ret = wl_cfg80211_set_wsec_info(net, &sae_pwe,
+ sizeof(sae_pwe), WL_WSEC_INFO_BSS_SAE_PWE);
+ if (unlikely(ret)) {
+ WL_ERR(("set wsec_info_sae_pwe failed \n"));
+ }
+ break;
+ }
+#endif /* WL_SAE */
+ /* Add new attributes here */
+ default:
+ WL_DBG(("%s: Unknown type, %d\n", __FUNCTION__, attr_type));
+ }
+ }
+
+ return ret;
+}
+
+int
+wl_cfgvendor_start_ap_params_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct net_device *net = wdev->netdev;
+ int ret = BCME_OK;
+ int attr_type;
+ int rem = len;
+ const struct nlattr *iter;
+
+ BCM_REFERENCE(net);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ attr_type = nla_type(iter);
+ WL_DBG(("attr type: (%u)\n", attr_type));
+
+ switch (attr_type) {
+#ifdef WL_SAE
+ case BRCM_ATTR_SAE_PWE: {
+ u32 sae_pwe = 0;
+ if (nla_len(iter) != sizeof(uint32)) {
+ WL_ERR(("Invalid value of sae_pwe\n"));
+ ret = -EINVAL;
+ break;
+ }
+ ret = wl_cfgvendor_map_supp_sae_pwe_to_fw(nla_get_u32(iter), &sae_pwe);
+ if (unlikely(ret)) {
+ WL_ERR(("Invalid sae_pwe\n"));
+ break;
+ }
+ ret = wl_cfg80211_set_wsec_info(net, &sae_pwe,
+ sizeof(sae_pwe), WL_WSEC_INFO_BSS_SAE_PWE);
+ if (unlikely(ret)) {
+ WL_ERR(("set wsec_info_sae_pwe failed \n"));
+ }
+ break;
+ }
+#endif /* WL_SAE */
+ /* Add new attributes here */
+ default:
+ WL_DBG(("%s: Unknown type, %d\n", __FUNCTION__, attr_type));
+ }
+ }
+
+ return ret;
+}
+
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+static int
+wl_cfgvendor_set_sae_password(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK;
+ struct net_device *net = wdev->netdev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ wsec_pmk_t pmk;
+ s32 bssidx;
+
+/* This api not needed for wpa_supplicant based sae authentication */
+#ifdef WL_CLIENT_SAE
+ WL_INFORM_MEM(("Ignore for external sae auth\n"));
+ return BCME_OK;
+#endif /* WL_CLIENT_SAE */
+
+ /* clear the content of pmk structure before usage */
+ (void)memset_s(&pmk, sizeof(wsec_pmk_t), 0x0, sizeof(wsec_pmk_t));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, net->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", net->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if ((len < WSEC_MIN_PSK_LEN) || (len >= WSEC_MAX_PASSPHRASE_LEN)) {
+ WL_ERR(("Invalid passphrase length %d..should be >= 8 and < 256\n",
+ len));
+ err = BCME_BADLEN;
+ goto done;
+ }
+ /* Set AUTH to SAE */
+ err = wldev_iovar_setint_bsscfg(net, "wpa_auth", WPA3_AUTH_SAE_PSK, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("could not set wpa_auth (0x%x)\n", err));
+ goto done;
+ }
+ pmk.key_len = htod16(len);
+ bcopy((const u8*)data, pmk.key, len);
+ pmk.flags = htod16(WSEC_PASSPHRASE);
+
+ err = wldev_ioctl_set(net, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (err) {
+ WL_ERR(("\n failed to set pmk %d\n", err));
+ goto done;
+ } else {
+ WL_INFORM_MEM(("sae passphrase set successfully\n"));
+ }
+done:
+ return err;
+}
+#endif /* WL_SAE || WL_CLIENT_SAE */
+
+#ifdef BCM_PRIV_CMD_SUPPORT
+/* strlen("ifname=") + IFNAMESIZE + strlen(" ") + '\0' */
+#define ANDROID_PRIV_CMD_IF_PREFIX_LEN (7 + IFNAMSIZ + 2)
+/* Max length for the reply buffer. For BRCM_ATTR_DRIVER_CMD, the reply
+ * would be a formatted string and reply buf would be the size of the
+ * string.
+ */
+#define WL_DRIVER_PRIV_CMD_LEN 512
+static int
+wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ const struct nlattr *iter;
+ int err = 0;
+ int data_len = 0, cmd_len = 0, tmp = 0, type = 0;
+ struct net_device *ndev = wdev->netdev;
+ char *cmd = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int bytes_written;
+ struct net_device *net = NULL;
+ unsigned long int cmd_out = 0;
+
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211) && defined(OEM_ANDROID)
+ u32 cmd_buf_len = WL_DRIVER_PRIV_CMD_LEN;
+ char cmd_prefix[ANDROID_PRIV_CMD_IF_PREFIX_LEN + 1] = {0};
+ char *cmd_buf = NULL;
+ char *current_pos;
+ u32 cmd_offset;
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
+
+ WL_DBG(("%s: Enter \n", __func__));
+
+ /* hold wake lock */
+ net_os_wake_lock(ndev);
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ cmd = nla_data(iter);
+ cmd_len = nla_len(iter);
+
+ WL_DBG(("%s: type: %d cmd_len:%d cmd_ptr:%p \n", __func__, type, cmd_len, cmd));
+ if (!cmd || !cmd_len) {
+ WL_ERR(("Invalid cmd data \n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211) && defined(OEM_ANDROID)
+ if (type == BRCM_ATTR_DRIVER_CMD) {
+ if ((cmd_len >= WL_DRIVER_PRIV_CMD_LEN) ||
+ (cmd_len < ANDROID_PRIV_CMD_IF_PREFIX_LEN)) {
+ WL_ERR(("Unexpected command length (%u)."
+ "Ignore the command\n", cmd_len));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* check whether there is any ifname prefix provided */
+ if (memcpy_s(cmd_prefix, (sizeof(cmd_prefix) - 1),
+ cmd, ANDROID_PRIV_CMD_IF_PREFIX_LEN) != BCME_OK) {
+ WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ net = wl_cfgvendor_get_ndev(cfg, wdev, cmd_prefix, &cmd_out);
+ if (!cmd_out || !net) {
+ WL_ERR(("ndev not found\n"));
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /* find offset of the command */
+ current_pos = (char *)cmd_out;
+ cmd_offset = current_pos - cmd_prefix;
+
+ if (!current_pos || (cmd_offset) > ANDROID_PRIV_CMD_IF_PREFIX_LEN) {
+ WL_ERR(("Invalid len cmd_offset: %u \n", cmd_offset));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Private command data in expected to be in str format. To ensure that
+ * the data is null terminated, copy to a local buffer before use
+ */
+ cmd_buf = (char *)MALLOCZ(cfg->osh, cmd_buf_len);
+ if (!cmd_buf) {
+ WL_ERR(("memory alloc failed for %u \n", cmd_buf_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Point to the start of command */
+ if (memcpy_s(cmd_buf, (WL_DRIVER_PRIV_CMD_LEN - 1),
+ (const void *)(cmd + cmd_offset),
+ (cmd_len - cmd_offset - 1)) != BCME_OK) {
+ WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+ cmd_buf[WL_DRIVER_PRIV_CMD_LEN - 1] = '\0';
+
+ WL_DBG(("vendor_command: %s len: %u \n", cmd_buf, cmd_buf_len));
+ bytes_written = wl_handle_private_cmd(net, cmd_buf, cmd_buf_len);
+ WL_DBG(("bytes_written: %d \n", bytes_written));
+ if (bytes_written == 0) {
+ snprintf(cmd_buf, cmd_buf_len, "%s", "OK");
+ data_len = sizeof("OK");
+ } else if (bytes_written > 0) {
+ if (bytes_written >= (cmd_buf_len - 1)) {
+ /* Not expected */
+ ASSERT(0);
+ err = -EINVAL;
+ goto exit;
+ }
+ data_len = bytes_written;
+ } else {
+ /* -ve return value. Propagate the error back */
+ err = bytes_written;
+ goto exit;
+ }
+ if ((data_len > 0) && (data_len < (cmd_buf_len - 1)) && cmd_buf) {
+ err = wl_cfgvendor_send_cmd_reply(wiphy, cmd_buf, data_len);
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ } else {
+ WL_DBG(("Vendor Command reply sent successfully!\n"));
+ }
+ } else {
+ /* No data to be sent back as reply */
+ WL_ERR(("Vendor_cmd: No reply expected. data_len:%u cmd_buf %p \n",
+ data_len, cmd_buf));
+ }
+ break;
+ }
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
+
+ }
+
+exit:
+
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211) && defined(OEM_ANDROID)
+ if (cmd_buf) {
+ MFREE(cfg->osh, cmd_buf, cmd_buf_len);
+ }
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
+
+ net_os_wake_unlock(ndev);
+ return err;
+}
+#endif /* BCM_PRIV_CMD_SUPPORT */
+
+#ifdef WL_NAN
+static const char *
+nan_attr_to_str(u16 cmd)
+{
+ const char *id2str;
+
+ switch (cmd) {
+ C2S(NAN_ATTRIBUTE_HEADER);
+ break;
+ C2S(NAN_ATTRIBUTE_HANDLE);
+ break;
+ C2S(NAN_ATTRIBUTE_TRANSAC_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_2G_SUPPORT);
+ break;
+ C2S(NAN_ATTRIBUTE_SDF_2G_SUPPORT);
+ break;
+ C2S(NAN_ATTRIBUTE_SDF_5G_SUPPORT);
+ break;
+ C2S(NAN_ATTRIBUTE_5G_SUPPORT);
+ break;
+ C2S(NAN_ATTRIBUTE_SYNC_DISC_2G_BEACON);
+ break;
+ C2S(NAN_ATTRIBUTE_SYNC_DISC_5G_BEACON);
+ break;
+ C2S(NAN_ATTRIBUTE_CLUSTER_LOW);
+ break;
+ C2S(NAN_ATTRIBUTE_CLUSTER_HIGH);
+ break;
+ C2S(NAN_ATTRIBUTE_SID_BEACON);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_CLOSE);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_MIDDLE);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_PROXIMITY);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_CLOSE_5G);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_MIDDLE_5G);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_PROXIMITY_5G);
+ break;
+ C2S(NAN_ATTRIBUTE_HOP_COUNT_LIMIT);
+ break;
+ C2S(NAN_ATTRIBUTE_RANDOM_TIME);
+ break;
+ C2S(NAN_ATTRIBUTE_MASTER_PREF);
+ break;
+ C2S(NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL);
+ break;
+ C2S(NAN_ATTRIBUTE_PUBLISH_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_TTL);
+ break;
+ C2S(NAN_ATTRIBUTE_PERIOD);
+ break;
+ C2S(NAN_ATTRIBUTE_REPLIED_EVENT_FLAG);
+ break;
+ C2S(NAN_ATTRIBUTE_PUBLISH_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_TX_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_PUBLISH_COUNT);
+ break;
+ C2S(NAN_ATTRIBUTE_SERVICE_NAME_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_SERVICE_NAME);
+ break;
+ C2S(NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO);
+ break;
+ C2S(NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_RX_MATCH_FILTER);
+ break;
+ C2S(NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_TX_MATCH_FILTER);
+ break;
+ C2S(NAN_ATTRIBUTE_SUBSCRIBE_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_SUBSCRIBE_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_SERVICERESPONSEFILTER);
+ break;
+ C2S(NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE);
+ break;
+ C2S(NAN_ATTRIBUTE_USESERVICERESPONSEFILTER);
+ break;
+ C2S(NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION);
+ break;
+ C2S(NAN_ATTRIBUTE_SUBSCRIBE_MATCH);
+ break;
+ C2S(NAN_ATTRIBUTE_SUBSCRIBE_COUNT);
+ break;
+ C2S(NAN_ATTRIBUTE_MAC_ADDR);
+ break;
+ C2S(NAN_ATTRIBUTE_MAC_ADDR_LIST);
+ break;
+ C2S(NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES);
+ break;
+ C2S(NAN_ATTRIBUTE_PUBLISH_MATCH);
+ break;
+ C2S(NAN_ATTRIBUTE_ENABLE_STATUS);
+ break;
+ C2S(NAN_ATTRIBUTE_JOIN_STATUS);
+ break;
+ C2S(NAN_ATTRIBUTE_ROLE);
+ break;
+ C2S(NAN_ATTRIBUTE_MASTER_RANK);
+ break;
+ C2S(NAN_ATTRIBUTE_ANCHOR_MASTER_RANK);
+ break;
+ C2S(NAN_ATTRIBUTE_CNT_PEND_TXFRM);
+ break;
+ C2S(NAN_ATTRIBUTE_CNT_BCN_TX);
+ break;
+ C2S(NAN_ATTRIBUTE_CNT_BCN_RX);
+ break;
+ C2S(NAN_ATTRIBUTE_CNT_SVC_DISC_TX);
+ break;
+ C2S(NAN_ATTRIBUTE_CNT_SVC_DISC_RX);
+ break;
+ C2S(NAN_ATTRIBUTE_AMBTT);
+ break;
+ C2S(NAN_ATTRIBUTE_CLUSTER_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_INST_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_OUI);
+ break;
+ C2S(NAN_ATTRIBUTE_STATUS);
+ break;
+ C2S(NAN_ATTRIBUTE_DE_EVENT_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_MERGE);
+ break;
+ C2S(NAN_ATTRIBUTE_IFACE);
+ break;
+ C2S(NAN_ATTRIBUTE_CHANNEL);
+ break;
+ C2S(NAN_ATTRIBUTE_24G_CHANNEL);
+ break;
+ C2S(NAN_ATTRIBUTE_5G_CHANNEL);
+ break;
+ C2S(NAN_ATTRIBUTE_PEER_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_NDP_ID);
+ break;
+ C2S(NAN_ATTRIBUTE_SECURITY);
+ break;
+ C2S(NAN_ATTRIBUTE_QOS);
+ break;
+ C2S(NAN_ATTRIBUTE_RSP_CODE);
+ break;
+ C2S(NAN_ATTRIBUTE_INST_COUNT);
+ break;
+ C2S(NAN_ATTRIBUTE_PEER_DISC_MAC_ADDR);
+ break;
+ C2S(NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR);
+ break;
+ C2S(NAN_ATTRIBUTE_IF_ADDR);
+ break;
+ C2S(NAN_ATTRIBUTE_WARMUP_TIME);
+ break;
+ C2S(NAN_ATTRIBUTE_RECV_IND_CFG);
+ break;
+ C2S(NAN_ATTRIBUTE_CONNMAP);
+ break;
+ C2S(NAN_ATTRIBUTE_DWELL_TIME);
+ break;
+ C2S(NAN_ATTRIBUTE_SCAN_PERIOD);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_WINDOW_SIZE);
+ break;
+ C2S(NAN_ATTRIBUTE_CONF_CLUSTER_VAL);
+ break;
+ C2S(NAN_ATTRIBUTE_CIPHER_SUITE_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_KEY_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_KEY_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_SCID);
+ break;
+ C2S(NAN_ATTRIBUTE_SCID_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_SDE_CONTROL_CONFIG_DP);
+ break;
+ C2S(NAN_ATTRIBUTE_SDE_CONTROL_SECURITY);
+ break;
+ C2S(NAN_ATTRIBUTE_SDE_CONTROL_DP_TYPE);
+ break;
+ C2S(NAN_ATTRIBUTE_SDE_CONTROL_RANGE_SUPPORT);
+ break;
+ C2S(NAN_ATTRIBUTE_NO_CONFIG_AVAIL);
+ break;
+ C2S(NAN_ATTRIBUTE_2G_AWAKE_DW);
+ break;
+ C2S(NAN_ATTRIBUTE_5G_AWAKE_DW);
+ break;
+ C2S(NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG);
+ break;
+ C2S(NAN_ATTRIBUTE_KEY_DATA);
+ break;
+ C2S(NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN);
+ break;
+ C2S(NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO);
+ break;
+ C2S(NAN_ATTRIBUTE_REASON);
+ break;
+ C2S(NAN_ATTRIBUTE_DISC_IND_CFG);
+ break;
+ C2S(NAN_ATTRIBUTE_DWELL_TIME_5G);
+ break;
+ C2S(NAN_ATTRIBUTE_SCAN_PERIOD_5G);
+ break;
+ C2S(NAN_ATTRIBUTE_SVC_RESPONDER_POLICY);
+ break;
+ C2S(NAN_ATTRIBUTE_EVENT_MASK);
+ break;
+ C2S(NAN_ATTRIBUTE_SUB_SID_BEACON);
+ break;
+ C2S(NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL);
+ break;
+ C2S(NAN_ATTRIBUTE_CMD_RESP_DATA);
+ break;
+ C2S(NAN_ATTRIBUTE_CMD_USE_NDPE);
+ break;
+ C2S(NAN_ATTRIBUTE_ENABLE_MERGE);
+ break;
+ C2S(NAN_ATTRIBUTE_DISCOVERY_BEACON_INTERVAL);
+ break;
+ C2S(NAN_ATTRIBUTE_NSS);
+ break;
+ C2S(NAN_ATTRIBUTE_ENABLE_RANGING);
+ break;
+ C2S(NAN_ATTRIBUTE_DW_EARLY_TERM);
+ break;
+ default:
+ id2str = "NAN_ATTRIBUTE_UNKNOWN";
+ }
+
+ return id2str;
+}
+
+nan_hal_status_t nan_status_reasonstr_map[] = {
+ {NAN_STATUS_SUCCESS, "NAN status success"},
+ {NAN_STATUS_INTERNAL_FAILURE, "NAN Discovery engine failure"},
+ {NAN_STATUS_PROTOCOL_FAILURE, "protocol failure"},
+ {NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID, "invalid pub_sub ID"},
+ {NAN_STATUS_NO_RESOURCE_AVAILABLE, "No space available"},
+ {NAN_STATUS_INVALID_PARAM, "invalid param"},
+ {NAN_STATUS_INVALID_REQUESTOR_INSTANCE_ID, "invalid req inst id"},
+ {NAN_STATUS_INVALID_NDP_ID, "invalid ndp id"},
+ {NAN_STATUS_NAN_NOT_ALLOWED, "Nan not allowed"},
+ {NAN_STATUS_NO_OTA_ACK, "No OTA ack"},
+ {NAN_STATUS_ALREADY_ENABLED, "NAN is Already enabled"},
+ {NAN_STATUS_FOLLOWUP_QUEUE_FULL, "Follow-up queue full"},
+ {NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED, "unsupported concurrency"},
+};
+
+void
+wl_cfgvendor_add_nan_reason_str(nan_status_type_t status, nan_hal_resp_t *nan_req_resp)
+{
+ int i = 0;
+ int num = (int)(sizeof(nan_status_reasonstr_map)/sizeof(nan_status_reasonstr_map[0]));
+ for (i = 0; i < num; i++) {
+ if (nan_status_reasonstr_map[i].status == status) {
+ strlcpy(nan_req_resp->nan_reason, nan_status_reasonstr_map[i].nan_reason,
+ sizeof(nan_status_reasonstr_map[i].nan_reason));
+ break;
+ }
+ }
+}
+
+nan_status_type_t
+wl_cfgvendor_brcm_to_nanhal_status(int32 vendor_status)
+{
+ nan_status_type_t hal_status;
+ switch (vendor_status) {
+ case BCME_OK:
+ hal_status = NAN_STATUS_SUCCESS;
+ break;
+ case BCME_BUSY:
+ case BCME_NOTREADY:
+ hal_status = NAN_STATUS_NAN_NOT_ALLOWED;
+ break;
+ case BCME_BADLEN:
+ case BCME_BADBAND:
+ case BCME_UNSUPPORTED:
+ case BCME_USAGE_ERROR:
+ case BCME_BADARG:
+ case BCME_NOTENABLED:
+ hal_status = NAN_STATUS_INVALID_PARAM;
+ break;
+ case BCME_NOMEM:
+ case BCME_NORESOURCE:
+ case WL_NAN_E_SVC_SUB_LIST_FULL:
+ hal_status = NAN_STATUS_NO_RESOURCE_AVAILABLE;
+ break;
+ case WL_NAN_E_SD_TX_LIST_FULL:
+ hal_status = NAN_STATUS_FOLLOWUP_QUEUE_FULL;
+ break;
+ case WL_NAN_E_BAD_INSTANCE:
+ hal_status = NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ break;
+ default:
+ WL_ERR(("%s Unknown vendor status, status = %d\n",
+ __func__, vendor_status));
+ /* Generic error */
+ hal_status = NAN_STATUS_INTERNAL_FAILURE;
+ }
+ return hal_status;
+}
+
+static int
+wl_cfgvendor_nan_cmd_reply(struct wiphy *wiphy, int nan_cmd,
+ nan_hal_resp_t *nan_req_resp, int ret, int nan_cmd_status)
+{
+ int err;
+ int nan_reply;
+ nan_req_resp->subcmd = nan_cmd;
+ if (ret == BCME_OK) {
+ nan_reply = nan_cmd_status;
+ } else {
+ nan_reply = ret;
+ }
+ nan_req_resp->status = wl_cfgvendor_brcm_to_nanhal_status(nan_reply);
+ nan_req_resp->value = ret;
+ err = wl_cfgvendor_send_cmd_reply(wiphy, nan_req_resp,
+ sizeof(*nan_req_resp));
+ return err;
+}
+
+static void
+wl_cfgvendor_free_disc_cmd_data(struct bcm_cfg80211 *cfg,
+ nan_discover_cmd_data_t *cmd_data)
+{
+ if (!cmd_data) {
+ WL_ERR(("Cmd_data is null\n"));
+ return;
+ }
+ if (cmd_data->svc_info.data) {
+ MFREE(cfg->osh, cmd_data->svc_info.data, cmd_data->svc_info.dlen);
+ }
+ if (cmd_data->svc_hash.data) {
+ MFREE(cfg->osh, cmd_data->svc_hash.data, cmd_data->svc_hash.dlen);
+ }
+ if (cmd_data->rx_match.data) {
+ MFREE(cfg->osh, cmd_data->rx_match.data, cmd_data->rx_match.dlen);
+ }
+ if (cmd_data->tx_match.data) {
+ MFREE(cfg->osh, cmd_data->tx_match.data, cmd_data->tx_match.dlen);
+ }
+ if (cmd_data->mac_list.list) {
+ MFREE(cfg->osh, cmd_data->mac_list.list,
+ cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN);
+ }
+ if (cmd_data->key.data) {
+ MFREE(cfg->osh, cmd_data->key.data, NAN_MAX_PMK_LEN);
+ }
+ if (cmd_data->sde_svc_info.data) {
+ MFREE(cfg->osh, cmd_data->sde_svc_info.data, cmd_data->sde_svc_info.dlen);
+ }
+ MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
+}
+
+static void
+wl_cfgvendor_free_dp_cmd_data(struct bcm_cfg80211 *cfg,
+ nan_datapath_cmd_data_t *cmd_data)
+{
+ if (!cmd_data) {
+ WL_ERR(("Cmd_data is null\n"));
+ return;
+ }
+ if (cmd_data->svc_hash.data) {
+ MFREE(cfg->osh, cmd_data->svc_hash.data, cmd_data->svc_hash.dlen);
+ }
+ if (cmd_data->svc_info.data) {
+ MFREE(cfg->osh, cmd_data->svc_info.data, cmd_data->svc_info.dlen);
+ }
+ if (cmd_data->key.data) {
+ MFREE(cfg->osh, cmd_data->key.data, NAN_MAX_PMK_LEN);
+ }
+ MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
+}
+
+#define WL_NAN_EVENT_MAX_BUF 256
+#ifdef WL_NAN_DISC_CACHE
+static int
+wl_cfgvendor_nan_parse_dp_sec_info_args(struct wiphy *wiphy,
+ const void *buf, int len, nan_datapath_sec_info_cmd_data_t *cmd_data)
+{
+ int ret = BCME_OK;
+ int attr_type;
+ int rem = len;
+ const struct nlattr *iter;
+
+ NAN_DBG_ENTER();
+
+ nla_for_each_attr(iter, buf, len, rem) {
+ attr_type = nla_type(iter);
+ WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
+
+ switch (attr_type) {
+ case NAN_ATTRIBUTE_MAC_ADDR:
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_PUBLISH_ID:
+ cmd_data->pub_id = nla_get_u16(iter);
+ break;
+ case NAN_ATTRIBUTE_NDP_ID:
+ cmd_data->ndp_instance_id = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("%s: Unknown type, %d\n", __FUNCTION__, attr_type));
+ ret = BCME_BADARG;
+ break;
+ }
+ }
+ /* We need to call set_config_handler b/f calling start enable TBD */
+ NAN_DBG_EXIT();
+ return ret;
+}
+#endif /* WL_NAN_DISC_CACHE */
+
+int8 chanbuf[CHANSPEC_STR_LEN];
+static int
+wl_cfgvendor_nan_parse_datapath_args(struct wiphy *wiphy,
+ const void *buf, int len, nan_datapath_cmd_data_t *cmd_data)
+{
+ int ret = BCME_OK;
+ int attr_type;
+ int rem = len;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int chan;
+
+ NAN_DBG_ENTER();
+
+ nla_for_each_attr(iter, buf, len, rem) {
+ attr_type = nla_type(iter);
+ WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
+
+ switch (attr_type) {
+ case NAN_ATTRIBUTE_NDP_ID:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ndp_instance_id = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_IFACE:
+ if (nla_len(iter) >= sizeof(cmd_data->ndp_iface)) {
+ WL_ERR(("iface_name len wrong:%d\n", nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ strlcpy((char *)cmd_data->ndp_iface, (char *)nla_data(iter),
+ nla_len(iter));
+ break;
+ case NAN_ATTRIBUTE_SECURITY:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ndp_cfg.security_cfg = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_QOS:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ndp_cfg.qos_cfg = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_RSP_CODE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rsp_code = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_INST_COUNT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->num_ndp_instances = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_PEER_DISC_MAC_ADDR:
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s((char*)&cmd_data->peer_disc_mac_addr,
+ ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_disc_mac_addr\n"));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR:
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s((char*)&cmd_data->peer_ndi_mac_addr,
+ ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_ndi_mac_addr\n"));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_MAC_ADDR:
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac_addr\n"));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_IF_ADDR:
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s((char*)&cmd_data->if_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy if_addr\n"));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_ENTRY_CONTROL:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->avail_params.duration = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_AVAIL_BIT_MAP:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->avail_params.bmap = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_CHANNEL: {
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* take the default channel start_factor frequency */
+ chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
+ if (chan <= CH_MAX_2G_CHANNEL) {
+ cmd_data->avail_params.chanspec[0] =
+ wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
+ } else {
+ cmd_data->avail_params.chanspec[0] =
+ wf_channel2chspec(chan, WL_CHANSPEC_BW_80);
+ }
+ if (cmd_data->avail_params.chanspec[0] == 0) {
+ WL_ERR(("Channel is not valid \n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ WL_TRACE(("valid chanspec, chanspec = 0x%04x \n",
+ cmd_data->avail_params.chanspec[0]));
+ break;
+ }
+ case NAN_ATTRIBUTE_NO_CONFIG_AVAIL:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->avail_params.no_config_avail = (bool)nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_SERVICE_NAME_LEN: {
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_hash.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->svc_hash.dlen = nla_get_u16(iter);
+ if (cmd_data->svc_hash.dlen != WL_NAN_SVC_HASH_LEN) {
+ WL_ERR(("invalid svc_hash length = %u\n", cmd_data->svc_hash.dlen));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ }
+ case NAN_ATTRIBUTE_SERVICE_NAME:
+ if ((!cmd_data->svc_hash.dlen) ||
+ (nla_len(iter) != cmd_data->svc_hash.dlen)) {
+ WL_ERR(("invalid svc_hash length = %d,%d\n",
+ cmd_data->svc_hash.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_hash.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->svc_hash.data =
+ MALLOCZ(cfg->osh, cmd_data->svc_hash.dlen);
+ if (!cmd_data->svc_hash.data) {
+ WL_ERR(("failed to allocate svc_hash data, len=%d\n",
+ cmd_data->svc_hash.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash data\n"));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_info.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->svc_info.dlen = nla_get_u16(iter);
+ if (cmd_data->svc_info.dlen > MAX_APP_INFO_LEN) {
+ WL_ERR_RLMT(("Not allowed beyond :%d\n", MAX_APP_INFO_LEN));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO:
+ if ((!cmd_data->svc_info.dlen) ||
+ (nla_len(iter) != cmd_data->svc_info.dlen)) {
+ WL_ERR(("failed to allocate svc info by invalid len=%d,%d\n",
+ cmd_data->svc_info.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_info.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->svc_info.data = MALLOCZ(cfg->osh, cmd_data->svc_info.dlen);
+ if (cmd_data->svc_info.data == NULL) {
+ WL_ERR(("failed to allocate svc info data, len=%d\n",
+ cmd_data->svc_info.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_PUBLISH_ID:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->pub_id = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_CIPHER_SUITE_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->csid = nla_get_u8(iter);
+ WL_TRACE(("CSID = %u\n", cmd_data->csid));
+ break;
+ case NAN_ATTRIBUTE_KEY_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->key_type = nla_get_u8(iter);
+ WL_TRACE(("Key Type = %u\n", cmd_data->key_type));
+ break;
+ case NAN_ATTRIBUTE_KEY_LEN:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->key.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->key.dlen = nla_get_u32(iter);
+ if ((!cmd_data->key.dlen) || (cmd_data->key.dlen > WL_NAN_NCS_SK_PMK_LEN)) {
+ WL_ERR(("invalid key length = %u\n", cmd_data->key.dlen));
+ ret = -EINVAL;
+ goto exit;
+ }
+ WL_TRACE(("valid key length = %u\n", cmd_data->key.dlen));
+ break;
+ case NAN_ATTRIBUTE_KEY_DATA:
+ if ((!cmd_data->key.dlen) ||
+ (nla_len(iter) != cmd_data->key.dlen)) {
+ WL_ERR(("failed to allocate key data by invalid len=%d,%d\n",
+ cmd_data->key.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->key.data) {
+ WL_ERR(("trying to overwrite key data.\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cmd_data->key.data = MALLOCZ(cfg->osh, NAN_MAX_PMK_LEN);
+ if (cmd_data->key.data == NULL) {
+ WL_ERR(("failed to allocate key data, len=%d\n",
+ cmd_data->key.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to key data\n"));
+ goto exit;
+ }
+ break;
+
+ default:
+ WL_ERR(("Unknown type, %d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+exit:
+ /* We need to call set_config_handler b/f calling start enable TBD */
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_parse_discover_args(struct wiphy *wiphy,
+ const void *buf, int len, nan_discover_cmd_data_t *cmd_data)
+{
+ int ret = BCME_OK;
+ int attr_type;
+ int rem = len;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ u8 val_u8;
+ u32 bit_flag;
+ u8 flag_match;
+
+ NAN_DBG_ENTER();
+
+ nla_for_each_attr(iter, buf, len, rem) {
+ attr_type = nla_type(iter);
+ WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
+
+ switch (attr_type) {
+ case NAN_ATTRIBUTE_TRANSAC_ID:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->token = nla_get_u16(iter);
+ break;
+ case NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL:
+ break;
+
+ /* Nan Publish/Subscribe request Attributes */
+ case NAN_ATTRIBUTE_PUBLISH_ID:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->pub_id = nla_get_u16(iter);
+ cmd_data->local_id = cmd_data->pub_id;
+ break;
+ case NAN_ATTRIBUTE_MAC_ADDR:
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_info.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->svc_info.dlen = nla_get_u16(iter);
+ if (cmd_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
+ WL_ERR_RLMT(("Not allowed beyond :%d\n",
+ NAN_MAX_SERVICE_SPECIFIC_INFO_LEN));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO:
+ if ((!cmd_data->svc_info.dlen) ||
+ (nla_len(iter) != cmd_data->svc_info.dlen)) {
+ WL_ERR(("failed to allocate svc info by invalid len=%d,%d\n",
+ cmd_data->svc_info.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_info.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cmd_data->svc_info.data = MALLOCZ(cfg->osh, cmd_data->svc_info.dlen);
+ if (cmd_data->svc_info.data == NULL) {
+ WL_ERR(("failed to allocate svc info data, len=%d\n",
+ cmd_data->svc_info.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_SUBSCRIBE_ID:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->sub_id = nla_get_u16(iter);
+ cmd_data->local_id = cmd_data->sub_id;
+ break;
+ case NAN_ATTRIBUTE_SUBSCRIBE_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->flags |= nla_get_u8(iter) ? WL_NAN_SUB_ACTIVE : 0;
+ break;
+ case NAN_ATTRIBUTE_PUBLISH_COUNT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->life_count = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_PUBLISH_TYPE: {
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ val_u8 = nla_get_u8(iter);
+ if (val_u8 == 0) {
+ cmd_data->flags |= WL_NAN_PUB_UNSOLICIT;
+ } else if (val_u8 == 1) {
+ cmd_data->flags |= WL_NAN_PUB_SOLICIT;
+ } else {
+ cmd_data->flags |= WL_NAN_PUB_BOTH;
+ }
+ break;
+ }
+ case NAN_ATTRIBUTE_PERIOD: {
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u16(iter) > NAN_MAX_AWAKE_DW_INTERVAL) {
+ WL_ERR(("Invalid/Out of bound value = %u\n", nla_get_u16(iter)));
+ ret = BCME_BADARG;
+ break;
+ }
+ if (nla_get_u16(iter)) {
+ cmd_data->period = 1 << (nla_get_u16(iter)-1);
+ }
+ break;
+ }
+ case NAN_ATTRIBUTE_REPLIED_EVENT_FLAG:
+ break;
+ case NAN_ATTRIBUTE_TTL:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ttl = nla_get_u16(iter);
+ break;
+ case NAN_ATTRIBUTE_SERVICE_NAME_LEN: {
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_hash.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cmd_data->svc_hash.dlen = nla_get_u16(iter);
+ if (cmd_data->svc_hash.dlen != WL_NAN_SVC_HASH_LEN) {
+ WL_ERR(("invalid svc_hash length = %u\n", cmd_data->svc_hash.dlen));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ }
+ case NAN_ATTRIBUTE_SERVICE_NAME:
+ if ((!cmd_data->svc_hash.dlen) ||
+ (nla_len(iter) != cmd_data->svc_hash.dlen)) {
+ WL_ERR(("invalid svc_hash length = %d,%d\n",
+ cmd_data->svc_hash.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->svc_hash.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cmd_data->svc_hash.data =
+ MALLOCZ(cfg->osh, cmd_data->svc_hash.dlen);
+ if (!cmd_data->svc_hash.data) {
+ WL_ERR(("failed to allocate svc_hash data, len=%d\n",
+ cmd_data->svc_hash.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash data\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_PEER_ID:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->remote_id = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_INST_ID:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->local_id = nla_get_u16(iter);
+ break;
+ case NAN_ATTRIBUTE_SUBSCRIBE_COUNT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->life_count = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION: {
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ bit_flag = (u32)nla_get_u8(iter);
+ cmd_data->flags |=
+ bit_flag ? WL_NAN_SUB_MATCH_IF_SVC_INFO : 0;
+ break;
+ }
+ case NAN_ATTRIBUTE_SUBSCRIBE_MATCH:
+ case NAN_ATTRIBUTE_PUBLISH_MATCH: {
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ flag_match = nla_get_u8(iter);
+
+ switch (flag_match) {
+ case NAN_MATCH_ALG_MATCH_CONTINUOUS:
+ /* Default fw behaviour, no need to set explicitly */
+ break;
+ case NAN_MATCH_ALG_MATCH_ONCE:
+ cmd_data->flags |= WL_NAN_MATCH_ONCE;
+ break;
+ case NAN_MATCH_ALG_MATCH_NEVER:
+ cmd_data->flags |= WL_NAN_MATCH_NEVER;
+ break;
+ default:
+ WL_ERR(("invalid nan match alg = %u\n", flag_match));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ }
+ case NAN_ATTRIBUTE_SERVICERESPONSEFILTER:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->srf_type = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->srf_include = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_USESERVICERESPONSEFILTER:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->use_srf = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->rx_match.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rx_match.dlen = nla_get_u16(iter);
+ if (cmd_data->rx_match.dlen > MAX_MATCH_FILTER_LEN) {
+ ret = -EINVAL;
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_RX_MATCH_FILTER:
+ if ((!cmd_data->rx_match.dlen) ||
+ (nla_len(iter) != cmd_data->rx_match.dlen)) {
+ WL_ERR(("RX match filter len wrong:%d,%d\n",
+ cmd_data->rx_match.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->rx_match.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rx_match.data =
+ MALLOCZ(cfg->osh, cmd_data->rx_match.dlen);
+ if (cmd_data->rx_match.data == NULL) {
+ WL_ERR(("failed to allocate LEN=[%u]\n",
+ cmd_data->rx_match.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->rx_match.data, cmd_data->rx_match.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy rx match data\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->tx_match.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->tx_match.dlen = nla_get_u16(iter);
+ if (cmd_data->tx_match.dlen > MAX_MATCH_FILTER_LEN) {
+ ret = -EINVAL;
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_TX_MATCH_FILTER:
+ if ((!cmd_data->tx_match.dlen) ||
+ (nla_len(iter) != cmd_data->tx_match.dlen)) {
+ WL_ERR(("TX match filter len wrong:%d,%d\n",
+ cmd_data->tx_match.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->tx_match.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->tx_match.data =
+ MALLOCZ(cfg->osh, cmd_data->tx_match.dlen);
+ if (cmd_data->tx_match.data == NULL) {
+ WL_ERR(("failed to allocate LEN=[%u]\n",
+ cmd_data->tx_match.dlen));
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->tx_match.data, cmd_data->tx_match.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match data\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->mac_list.num_mac_addr) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->mac_list.num_mac_addr = nla_get_u16(iter);
+ if (cmd_data->mac_list.num_mac_addr >= NAN_SRF_MAX_MAC) {
+ WL_ERR(("trying to overflow num :%d\n",
+ cmd_data->mac_list.num_mac_addr));
+ cmd_data->mac_list.num_mac_addr = 0;
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_MAC_ADDR_LIST:
+ if ((!cmd_data->mac_list.num_mac_addr) ||
+ (nla_len(iter) != (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN))) {
+ WL_ERR(("wrong mac list len:%d,%d\n",
+ cmd_data->mac_list.num_mac_addr, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->mac_list.list) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->mac_list.list =
+ MALLOCZ(cfg->osh, (cmd_data->mac_list.num_mac_addr
+ * ETHER_ADDR_LEN));
+ if (cmd_data->mac_list.list == NULL) {
+ WL_ERR(("failed to allocate LEN=[%u]\n",
+ (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->mac_list.list,
+ (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN),
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy list of mac addresses\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_TX_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ val_u8 = nla_get_u8(iter);
+ if (val_u8 == 0) {
+ cmd_data->flags |= WL_NAN_PUB_BCAST;
+ WL_TRACE(("NAN_ATTRIBUTE_TX_TYPE: flags=NAN_PUB_BCAST\n"));
+ }
+ break;
+ case NAN_ATTRIBUTE_SDE_CONTROL_CONFIG_DP:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u8(iter) == 1) {
+ cmd_data->sde_control_flag
+ |= NAN_SDE_CF_DP_REQUIRED;
+ break;
+ }
+ break;
+ case NAN_ATTRIBUTE_SDE_CONTROL_RANGE_SUPPORT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->sde_control_config = TRUE;
+ if (nla_get_u8(iter) == 1) {
+ cmd_data->sde_control_flag
+ |= NAN_SDE_CF_RANGING_REQUIRED;
+ break;
+ }
+ break;
+ case NAN_ATTRIBUTE_SDE_CONTROL_DP_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u8(iter) == 1) {
+ cmd_data->sde_control_flag
+ |= NAN_SDE_CF_MULTICAST_TYPE;
+ break;
+ }
+ break;
+ case NAN_ATTRIBUTE_SDE_CONTROL_SECURITY:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u8(iter) == 1) {
+ cmd_data->sde_control_flag
+ |= NAN_SDE_CF_SECURITY_REQUIRED;
+ break;
+ }
+ break;
+ case NAN_ATTRIBUTE_RECV_IND_CFG:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->recv_ind_flag = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_CIPHER_SUITE_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->csid = nla_get_u8(iter);
+ WL_TRACE(("CSID = %u\n", cmd_data->csid));
+ break;
+ case NAN_ATTRIBUTE_KEY_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->key_type = nla_get_u8(iter);
+ WL_TRACE(("Key Type = %u\n", cmd_data->key_type));
+ break;
+ case NAN_ATTRIBUTE_KEY_LEN:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->key.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->key.dlen = nla_get_u32(iter);
+ if ((!cmd_data->key.dlen) || (cmd_data->key.dlen > WL_NAN_NCS_SK_PMK_LEN)) {
+ WL_ERR(("invalid key length = %u\n",
+ cmd_data->key.dlen));
+ break;
+ }
+ WL_TRACE(("valid key length = %u\n", cmd_data->key.dlen));
+ break;
+ case NAN_ATTRIBUTE_KEY_DATA:
+ if (!cmd_data->key.dlen ||
+ (nla_len(iter) != cmd_data->key.dlen)) {
+ WL_ERR(("failed to allocate key data by invalid len=%d,%d\n",
+ cmd_data->key.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->key.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cmd_data->key.data = MALLOCZ(cfg->osh, NAN_MAX_PMK_LEN);
+ if (cmd_data->key.data == NULL) {
+ WL_ERR(("failed to allocate key data, len=%d\n",
+ cmd_data->key.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to key data\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u8(iter) == 1) {
+ cmd_data->flags |=
+ WL_NAN_RANGE_LIMITED;
+ break;
+ }
+ break;
+ case NAN_ATTRIBUTE_DISC_IND_CFG:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->disc_ind_cfg = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->sde_svc_info.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->sde_svc_info.dlen = nla_get_u16(iter);
+ if (cmd_data->sde_svc_info.dlen > MAX_SDEA_SVC_INFO_LEN) {
+ ret = -EINVAL;
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SDEA_SVC_INFO_LEN));
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO:
+ if ((!cmd_data->sde_svc_info.dlen) ||
+ (nla_len(iter) != cmd_data->sde_svc_info.dlen)) {
+ WL_ERR(("wrong sdea info len:%d,%d\n",
+ cmd_data->sde_svc_info.dlen, nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->sde_svc_info.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->sde_svc_info.data = MALLOCZ(cfg->osh,
+ cmd_data->sde_svc_info.dlen);
+ if (cmd_data->sde_svc_info.data == NULL) {
+ WL_ERR(("failed to allocate svc info data, len=%d\n",
+ cmd_data->sde_svc_info.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->sde_svc_info.data,
+ cmd_data->sde_svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to sdea info data\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_SECURITY:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ndp_cfg.security_cfg = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_RANGING_INTERVAL:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ranging_intvl_msec = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_RANGING_INGRESS_LIMIT:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ingress_limit = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_RANGING_EGRESS_LIMIT:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->egress_limit = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_RANGING_INDICATION:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->ranging_indication = nla_get_u32(iter);
+ break;
+ /* Nan accept policy: Per service basis policy
+ * Based on this policy(ALL/NONE), responder side
+ * will send ACCEPT/REJECT
+ */
+ case NAN_ATTRIBUTE_SVC_RESPONDER_POLICY:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->service_responder_policy = nla_get_u8(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type, %d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+exit:
+ /* We need to call set_config_handler b/f calling start enable TBD */
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_parse_args(struct wiphy *wiphy, const void *buf,
+ int len, nan_config_cmd_data_t *cmd_data, uint32 *nan_attr_mask)
+{
+ int ret = BCME_OK;
+ int attr_type;
+ int rem = len;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int chan;
+ u8 sid_beacon = 0, sub_sid_beacon = 0;
+
+ NAN_DBG_ENTER();
+
+ nla_for_each_attr(iter, buf, len, rem) {
+ attr_type = nla_type(iter);
+ WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
+
+ switch (attr_type) {
+ /* NAN Enable request attributes */
+ case NAN_ATTRIBUTE_2G_SUPPORT:{
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->support_2g = nla_get_u8(iter);
+ if (cmd_data->support_2g == 0) {
+ WL_ERR((" 2.4GHz support is not set \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_SUPPORT_2G_CONFIG;
+ break;
+ }
+ case NAN_ATTRIBUTE_5G_SUPPORT:{
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->support_5g = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_SUPPORT_5G_CONFIG;
+ break;
+ }
+ case NAN_ATTRIBUTE_CLUSTER_LOW: {
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->cluster_low = nla_get_u16(iter);
+ break;
+ }
+ case NAN_ATTRIBUTE_CLUSTER_HIGH: {
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->cluster_high = nla_get_u16(iter);
+ break;
+ }
+ case NAN_ATTRIBUTE_SID_BEACON: {
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ sid_beacon = nla_get_u8(iter);
+ cmd_data->sid_beacon.sid_enable = (sid_beacon & 0x01);
+ if (cmd_data->sid_beacon.sid_enable) {
+ cmd_data->sid_beacon.sid_count = (sid_beacon >> 1);
+ *nan_attr_mask |= NAN_ATTR_SID_BEACON_CONFIG;
+ } else {
+ WL_ERR((" sid beacon is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+
+ break;
+ }
+ case NAN_ATTRIBUTE_SUB_SID_BEACON: {
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ sub_sid_beacon = nla_get_u8(iter);
+ cmd_data->sid_beacon.sub_sid_enable = (sub_sid_beacon & 0x01);
+ if (cmd_data->sid_beacon.sub_sid_enable) {
+ cmd_data->sid_beacon.sub_sid_count = (sub_sid_beacon >> 1);
+ *nan_attr_mask |= NAN_ATTR_SUB_SID_BEACON_CONFIG;
+ } else {
+ WL_ERR((" sub sid beacon is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ break;
+ }
+ case NAN_ATTRIBUTE_SYNC_DISC_2G_BEACON:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->beacon_2g_val = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_SYNC_DISC_5G_BEACON:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->beacon_5g_val = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_SDF_2G_SUPPORT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->sdf_2g_val = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_SDF_2G_SUPPORT_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_SDF_5G_SUPPORT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->sdf_5g_val = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_SDF_5G_SUPPORT_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_HOP_COUNT_LIMIT:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->hop_count_limit = nla_get_u8(iter);
+ if (cmd_data->hop_count_limit == 0) {
+ WL_ERR((" hop count limit is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_HOP_COUNT_LIMIT_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RANDOM_TIME:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->metrics.random_factor = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_RAND_FACTOR_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_MASTER_PREF:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->metrics.master_pref = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_OUI:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->nan_oui = nla_get_u32(iter);
+ *nan_attr_mask |= NAN_ATTR_OUI_CONFIG;
+ WL_TRACE(("nan_oui=%d\n", cmd_data->nan_oui));
+ break;
+ case NAN_ATTRIBUTE_WARMUP_TIME:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->warmup_time = nla_get_u16(iter);
+ break;
+ case NAN_ATTRIBUTE_AMBTT:
+ case NAN_ATTRIBUTE_MASTER_RANK:
+ WL_DBG(("Unhandled attribute, %d\n", attr_type));
+ break;
+ case NAN_ATTRIBUTE_CHANNEL: {
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* take the default channel start_factor frequency */
+ chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
+ if (chan <= CH_MAX_2G_CHANNEL) {
+ cmd_data->chanspec[0] = wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
+ } else {
+ cmd_data->chanspec[0] = wf_channel2chspec(chan, WL_CHANSPEC_BW_80);
+ }
+ if (cmd_data->chanspec[0] == 0) {
+ WL_ERR(("Channel is not valid \n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ WL_TRACE(("valid chanspec, chanspec = 0x%04x \n",
+ cmd_data->chanspec[0]));
+ break;
+ }
+ case NAN_ATTRIBUTE_24G_CHANNEL: {
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* take the default channel start_factor frequency */
+ chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
+ /* 20MHz as BW */
+ cmd_data->chanspec[1] = wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
+ if (cmd_data->chanspec[1] == 0) {
+ WL_ERR((" 2.4GHz Channel is not valid \n"));
+ ret = -EINVAL;
+ break;
+ }
+ *nan_attr_mask |= NAN_ATTR_2G_CHAN_CONFIG;
+ WL_TRACE(("valid 2.4GHz chanspec, chanspec = 0x%04x \n",
+ cmd_data->chanspec[1]));
+ break;
+ }
+ case NAN_ATTRIBUTE_5G_CHANNEL: {
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* take the default channel start_factor frequency */
+ chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
+ /* 20MHz as BW */
+ cmd_data->chanspec[2] = wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
+ if (cmd_data->chanspec[2] == 0) {
+ WL_ERR((" 5GHz Channel is not valid \n"));
+ ret = -EINVAL;
+ break;
+ }
+ *nan_attr_mask |= NAN_ATTR_5G_CHAN_CONFIG;
+ WL_TRACE(("valid 5GHz chanspec, chanspec = 0x%04x \n",
+ cmd_data->chanspec[2]));
+ break;
+ }
+ case NAN_ATTRIBUTE_CONF_CLUSTER_VAL:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->config_cluster_val = nla_get_u8(iter);
+ *nan_attr_mask |= NAN_ATTR_CLUSTER_VAL_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_DWELL_TIME:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->dwell_time[0] = nla_get_u8(iter);
+ if (cmd_data->dwell_time[0] == 0) {
+ WL_ERR((" 2.4GHz dwell time is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_2G_DWELL_TIME_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_SCAN_PERIOD:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->scan_period[0] = nla_get_u16(iter);
+ if (cmd_data->scan_period[0] == 0) {
+ WL_ERR((" 2.4GHz scan period is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_2G_SCAN_PERIOD_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_DWELL_TIME_5G:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->dwell_time[1] = nla_get_u8(iter);
+ if (cmd_data->dwell_time[1] == 0) {
+ WL_ERR((" 5GHz dwell time is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_5G_DWELL_TIME_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_SCAN_PERIOD_5G:
+ if (nla_len(iter) != sizeof(uint16)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->scan_period[1] = nla_get_u16(iter);
+ if (cmd_data->scan_period[1] == 0) {
+ WL_ERR((" 5GHz scan period is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_5G_SCAN_PERIOD_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_AVAIL_BIT_MAP:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->bmap = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_ENTRY_CONTROL:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->avail_params.duration = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_RSSI_CLOSE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_close_2dot4g_val = nla_get_s8(iter);
+ if (cmd_data->rssi_attr.rssi_close_2dot4g_val == 0) {
+ WL_ERR((" 2.4GHz rssi close is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_CLOSE_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RSSI_MIDDLE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_middle_2dot4g_val = nla_get_s8(iter);
+ if (cmd_data->rssi_attr.rssi_middle_2dot4g_val == 0) {
+ WL_ERR((" 2.4GHz rssi middle is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_MIDDLE_2G_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RSSI_PROXIMITY:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_proximity_2dot4g_val = nla_get_s8(iter);
+ if (cmd_data->rssi_attr.rssi_proximity_2dot4g_val == 0) {
+ WL_ERR((" 2.4GHz rssi proximity is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RSSI_CLOSE_5G:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_close_5g_val = nla_get_s8(iter);
+ if (cmd_data->rssi_attr.rssi_close_5g_val == 0) {
+ WL_ERR((" 5GHz rssi close is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_CLOSE_5G_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RSSI_MIDDLE_5G:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_middle_5g_val = nla_get_s8(iter);
+ if (cmd_data->rssi_attr.rssi_middle_5g_val == 0) {
+ WL_ERR((" 5Hz rssi middle is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_MIDDLE_5G_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RSSI_PROXIMITY_5G:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_proximity_5g_val = nla_get_s8(iter);
+ if (cmd_data->rssi_attr.rssi_proximity_5g_val == 0) {
+ WL_ERR((" 5GHz rssi proximity is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_RSSI_WINDOW_SIZE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->rssi_attr.rssi_window_size = nla_get_u8(iter);
+ if (cmd_data->rssi_attr.rssi_window_size == 0) {
+ WL_ERR((" rssi window size is not valid \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ *nan_attr_mask |= NAN_ATTR_RSSI_WINDOW_SIZE_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_CIPHER_SUITE_TYPE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->csid = nla_get_u8(iter);
+ WL_TRACE(("CSID = %u\n", cmd_data->csid));
+ break;
+ case NAN_ATTRIBUTE_SCID_LEN:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->scid.dlen) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->scid.dlen = nla_get_u32(iter);
+ if (cmd_data->scid.dlen > MAX_SCID_LEN) {
+ ret = -EINVAL;
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SCID_LEN));
+ goto exit;
+ }
+ WL_TRACE(("valid scid length = %u\n", cmd_data->scid.dlen));
+ break;
+ case NAN_ATTRIBUTE_SCID:
+ if (!cmd_data->scid.dlen || (nla_len(iter) != cmd_data->scid.dlen)) {
+ WL_ERR(("wrong scid len:%d,%d\n", cmd_data->scid.dlen,
+ nla_len(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_data->scid.data) {
+ WL_ERR(("trying to overwrite:%d\n", attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ cmd_data->scid.data = MALLOCZ(cfg->osh, cmd_data->scid.dlen);
+ if (cmd_data->scid.data == NULL) {
+ WL_ERR(("failed to allocate scid, len=%d\n",
+ cmd_data->scid.dlen));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = memcpy_s(cmd_data->scid.data, cmd_data->scid.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to scid data\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_2G_AWAKE_DW:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u32(iter) > NAN_MAX_AWAKE_DW_INTERVAL) {
+ WL_ERR(("%s: Invalid/Out of bound value = %u\n",
+ __FUNCTION__, nla_get_u32(iter)));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u32(iter)) {
+ cmd_data->awake_dws.dw_interval_2g =
+ 1 << (nla_get_u32(iter)-1);
+ }
+ *nan_attr_mask |= NAN_ATTR_2G_DW_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_5G_AWAKE_DW:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_get_u32(iter) > NAN_MAX_AWAKE_DW_INTERVAL) {
+ WL_ERR(("%s: Invalid/Out of bound value = %u\n",
+ __FUNCTION__, nla_get_u32(iter)));
+ ret = BCME_BADARG;
+ break;
+ }
+ if (nla_get_u32(iter)) {
+ cmd_data->awake_dws.dw_interval_5g =
+ 1 << (nla_get_u32(iter)-1);
+ }
+ *nan_attr_mask |= NAN_ATTR_5G_DW_CONFIG;
+ break;
+ case NAN_ATTRIBUTE_DISC_IND_CFG:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->disc_ind_cfg = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_MAC_ADDR:
+ if (nla_len(iter) != ETHER_ADDR_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
+ break;
+ case NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* run time nmi rand not supported as of now.
+ * Only during nan enable/iface-create rand mac is used
+ */
+ cmd_data->nmi_rand_intvl = nla_get_u32(iter);
+ if (cmd_data->nmi_rand_intvl > 0) {
+ cfg->nancfg->mac_rand = true;
+ } else {
+ cfg->nancfg->mac_rand = false;
+ }
+ break;
+ case NAN_ATTRIBUTE_CMD_USE_NDPE:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->use_ndpe_attr = nla_get_u32(iter);
+ break;
+ case NAN_ATTRIBUTE_ENABLE_MERGE:
+ if (nla_len(iter) != sizeof(uint8)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->enable_merge = nla_get_u8(iter);
+ break;
+ case NAN_ATTRIBUTE_DISCOVERY_BEACON_INTERVAL:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->disc_bcn_interval = nla_get_u32(iter);
+ *nan_attr_mask |= NAN_ATTR_DISC_BEACON_INTERVAL;
+ break;
+ case NAN_ATTRIBUTE_NSS:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ /* FW handles it internally,
+ * nothing to do as per the value rxed from framework, ignore.
+ */
+ break;
+ case NAN_ATTRIBUTE_ENABLE_RANGING:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cfg->nancfg->ranging_enable = nla_get_u32(iter);
+ if (cfg->nancfg->ranging_enable == 0) {
+ WL_ERR((" ranging enable is not set \n"));
+ cmd_data->status = BCME_BADARG;
+ goto exit;
+ }
+ break;
+ case NAN_ATTRIBUTE_DW_EARLY_TERM:
+ if (nla_len(iter) != sizeof(uint32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ cmd_data->dw_early_termination = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("%s: Unknown type, %d\n", __FUNCTION__, attr_type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+ /* We need to call set_config_handler b/f calling start enable TBD */
+ NAN_DBG_EXIT();
+ if (ret) {
+ WL_ERR(("%s: Failed to parse attribute %d ret %d",
+ __FUNCTION__, attr_type, ret));
+ }
+ return ret;
+
+}
+
+static int
+wl_cfgvendor_nan_dp_estb_event_data_filler(struct sk_buff *msg,
+ nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+ ret = nla_put_u32(msg, NAN_ATTRIBUTE_NDP_ID, event_data->ndp_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put NDP ID, ret=%d\n", ret));
+ goto fail;
+ }
+ /*
+ * NDI mac address of the peer
+ * (required to derive target ipv6 address)
+ */
+ ret = nla_put(msg, NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR, ETH_ALEN,
+ event_data->responder_ndi.octet);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put resp ndi, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_RSP_CODE, event_data->status);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put response code, ret=%d\n", ret));
+ goto fail;
+ }
+ if (event_data->svc_info.dlen && event_data->svc_info.data) {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
+ event_data->svc_info.dlen);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put svc info len, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
+ event_data->svc_info.dlen, event_data->svc_info.data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put svc info, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+
+fail:
+ return ret;
+}
+static int
+wl_cfgvendor_nan_dp_ind_event_data_filler(struct sk_buff *msg,
+ nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_PUBLISH_ID,
+ event_data->pub_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put pub ID, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u32(msg, NAN_ATTRIBUTE_NDP_ID, event_data->ndp_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put NDP ID, ret=%d\n", ret));
+ goto fail;
+ }
+ /* Discovery MAC addr of the peer/initiator */
+ ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETH_ALEN,
+ event_data->remote_nmi.octet);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put remote NMI, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_SECURITY, event_data->security);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put security, ret=%d\n", ret));
+ goto fail;
+ }
+ if (event_data->svc_info.dlen && event_data->svc_info.data) {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
+ event_data->svc_info.dlen);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put svc info len, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
+ event_data->svc_info.dlen, event_data->svc_info.data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put svc info, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+
+fail:
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_tx_followup_ind_event_data_filler(struct sk_buff *msg,
+ nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_TRANSAC_ID, event_data->token);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put transaction id, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->local_inst_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put handle, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_STATUS, event_data->status);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put nan status, ret=%d\n", ret));
+ goto fail;
+ }
+ if (event_data->status == NAN_STATUS_SUCCESS) {
+ ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
+ strlen("NAN_STATUS_SUCCESS"), event_data->nan_reason);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
+ goto fail;
+ }
+ } else {
+ ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
+ strlen("NAN_STATUS_NO_OTA_ACK"), event_data->nan_reason);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+fail:
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_svc_terminate_event_filler(struct sk_buff *msg,
+ struct bcm_cfg80211 *cfg, int event_id, nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->local_inst_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put handle, ret=%d\n", ret));
+ goto fail;
+ }
+
+ if (event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED) {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_SUBSCRIBE_ID,
+ event_data->local_inst_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put local inst id, ret=%d\n", ret));
+ goto fail;
+ }
+ } else {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_PUBLISH_ID,
+ event_data->local_inst_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put local inst id, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_STATUS, event_data->status);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put status, ret=%d\n", ret));
+ goto fail;
+ }
+ if (event_data->status == NAN_STATUS_SUCCESS) {
+ ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
+ strlen("NAN_STATUS_SUCCESS"), event_data->nan_reason);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
+ goto fail;
+ }
+ } else {
+ ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
+ strlen("NAN_STATUS_INTERNAL_FAILURE"), event_data->nan_reason);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+
+ ret = wl_cfgnan_remove_inst_id(cfg, event_data->local_inst_id);
+ if (ret) {
+ WL_ERR(("failed to free svc instance-id[%d], ret=%d, event_id = %d\n",
+ event_data->local_inst_id, ret, event_id));
+ goto fail;
+ }
+fail:
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_opt_params_filler(struct sk_buff *msg,
+ nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+ /* service specific info data */
+ if (event_data->svc_info.dlen && event_data->svc_info.data) {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
+ event_data->svc_info.dlen);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put svc info len, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
+ event_data->svc_info.dlen, event_data->svc_info.data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put svc info, ret=%d\n", ret));
+ goto fail;
+ }
+ WL_TRACE(("svc info len = %d\n", event_data->svc_info.dlen));
+ }
+
+ /* sdea service specific info data */
+ if (event_data->sde_svc_info.dlen && event_data->sde_svc_info.data) {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN,
+ event_data->sde_svc_info.dlen);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put sdea svc info len, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO,
+ event_data->sde_svc_info.dlen,
+ event_data->sde_svc_info.data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put sdea svc info, ret=%d\n", ret));
+ goto fail;
+ }
+ WL_TRACE(("sdea svc info len = %d\n", event_data->sde_svc_info.dlen));
+ }
+ /* service control discovery range limit */
+ /* TODO: */
+
+ /* service control binding bitmap */
+ /* TODO: */
+fail:
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_tx_followup_event_filler(struct sk_buff *msg,
+ nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+ /* In followup pkt, instance id and requestor instance id are configured
+ * from the transmitter perspective. As the event is processed with the
+ * role of receiver, the local handle should use requestor instance
+ * id (peer_inst_id)
+ */
+ WL_TRACE(("handle=%d\n", event_data->requestor_id));
+ WL_TRACE(("inst id (local id)=%d\n", event_data->local_inst_id));
+ WL_TRACE(("peer id (remote id)=%d\n", event_data->requestor_id));
+ WL_TRACE(("peer mac addr=" MACDBG "\n",
+ MAC2STRDBG(event_data->remote_nmi.octet)));
+ WL_TRACE(("peer rssi: %d\n", event_data->fup_rssi));
+ WL_TRACE(("attribute no: %d\n", event_data->attr_num));
+ WL_TRACE(("attribute len: %d\n", event_data->attr_list_len));
+
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->requestor_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put handle, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u32(msg, NAN_ATTRIBUTE_INST_ID, event_data->local_inst_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put local inst id, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_PEER_ID, event_data->requestor_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put requestor inst id, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETHER_ADDR_LEN,
+ event_data->remote_nmi.octet);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put remote nmi, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_s8(msg, NAN_ATTRIBUTE_RSSI_PROXIMITY,
+ event_data->fup_rssi);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put fup rssi, ret=%d\n", ret));
+ goto fail;
+ }
+fail:
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_sub_match_event_filler(struct sk_buff *msg,
+ nan_event_data_t *event_data) {
+ int ret = BCME_OK;
+ WL_TRACE(("handle (sub_id)=%d\n", event_data->sub_id));
+ WL_TRACE(("pub id=%d\n", event_data->pub_id));
+ WL_TRACE(("sub id=%d\n", event_data->sub_id));
+ WL_TRACE(("pub mac addr=" MACDBG "\n",
+ MAC2STRDBG(event_data->remote_nmi.octet)));
+ WL_TRACE(("attr no: %d\n", event_data->attr_num));
+ WL_TRACE(("attr len: %d\n", event_data->attr_list_len));
+
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->sub_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put handle, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_PUBLISH_ID, event_data->pub_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put pub id, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_SUBSCRIBE_ID, event_data->sub_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Sub Id, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETHER_ADDR_LEN,
+ event_data->remote_nmi.octet);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put remote NMI, ret=%d\n", ret));
+ goto fail;
+ }
+ if (event_data->publish_rssi) {
+ event_data->publish_rssi = -event_data->publish_rssi;
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_RSSI_PROXIMITY,
+ event_data->publish_rssi);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put publish rssi, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+ if (event_data->ranging_result_present) {
+ ret = nla_put_u32(msg, NAN_ATTRIBUTE_RANGING_INDICATION,
+ event_data->ranging_ind);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put ranging ind, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u32(msg, NAN_ATTRIBUTE_RANGING_RESULT,
+ event_data->range_measurement_cm);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put range measurement cm, ret=%d\n",
+ ret));
+ goto fail;
+ }
+ }
+ /*
+ * handling optional service control, service response filter
+ */
+ if (event_data->tx_match_filter.dlen && event_data->tx_match_filter.data) {
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN,
+ event_data->tx_match_filter.dlen);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put tx match filter len, ret=%d\n",
+ ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_TX_MATCH_FILTER,
+ event_data->tx_match_filter.dlen,
+ event_data->tx_match_filter.data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put tx match filter data, ret=%d\n",
+ ret));
+ goto fail;
+ }
+ WL_TRACE(("tx matching filter (%d):\n",
+ event_data->tx_match_filter.dlen));
+ }
+
+fail:
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_de_event_filler(struct sk_buff *msg, nan_event_data_t *event_data)
+{
+ int ret = BCME_OK;
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_ENABLE_STATUS, event_data->enabled);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put event_data->enabled, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_DE_EVENT_TYPE,
+ event_data->nan_de_evt_type);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put nan_de_evt_type, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_CLUSTER_ID, ETH_ALEN,
+ event_data->clus_id.octet);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put clust id, ret=%d\n", ret));
+ goto fail;
+ }
+ /* OOB tests requires local nmi */
+ ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETH_ALEN,
+ event_data->local_nmi.octet);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put NMI, ret=%d\n", ret));
+ goto fail;
+ }
+fail:
+ return ret;
+}
+
+#ifdef RTT_SUPPORT
+s32
+wl_cfgvendor_send_as_rtt_legacy_event(struct wiphy *wiphy, struct net_device *dev,
+ wl_nan_ev_rng_rpt_ind_t *range_res, uint32 status)
+{
+ s32 ret = BCME_OK;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ rtt_report_t *report = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct sk_buff *msg = NULL;
+ struct nlattr *rtt_nl_hdr;
+
+ NAN_DBG_ENTER();
+
+ report = MALLOCZ(cfg->osh, sizeof(*report));
+ if (!report) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ if (range_res) {
+ report->distance = range_res->dist_mm/10;
+ ret = memcpy_s(&report->addr, ETHER_ADDR_LEN,
+ &range_res->peer_m_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_m_addr\n"));
+ goto exit;
+ }
+ }
+ report->status = (rtt_reason_t)status;
+ report->type = RTT_TWO_WAY;
+
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ msg = cfg80211_vendor_event_alloc(wiphy, NULL, 100,
+ GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+ msg = cfg80211_vendor_event_alloc(wiphy, 100, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!msg) {
+ WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULTS_COMPLETE, 1);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto exit;
+ }
+ rtt_nl_hdr = nla_nest_start(msg, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
+ if (!rtt_nl_hdr) {
+ WL_ERR(("rtt_nl_hdr is NULL\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = nla_put(msg, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &report->addr);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC\n"));
+ goto exit;
+ }
+ ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULT_CNT, 1);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT\n"));
+ goto exit;
+ }
+ ret = nla_put(msg, RTT_ATTRIBUTE_RESULT,
+ sizeof(*report), report);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS\n"));
+ goto exit;
+ }
+ nla_nest_end(msg, rtt_nl_hdr);
+ cfg80211_vendor_event(msg, kflags);
+ if (report) {
+ MFREE(cfg->osh, report, sizeof(*report));
+ }
+
+ return ret;
+exit:
+ if (msg)
+ dev_kfree_skb_any(msg);
+ WL_ERR(("Failed to send event GOOGLE_RTT_COMPLETE_EVENT,"
+ " -- Free skb, ret = %d\n", ret));
+ if (report)
+ MFREE(cfg->osh, report, sizeof(*report));
+ NAN_DBG_EXIT();
+ return ret;
+}
+#endif /* RTT_SUPPORT */
+
+static int
+wl_cfgvendor_send_nan_async_resp(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int event_id, u8* nan_req_resp, u16 len)
+{
+ int ret = BCME_OK;
+ int buf_len = NAN_EVENT_BUFFER_SIZE_LARGE;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ struct sk_buff *msg;
+
+ NAN_DBG_ENTER();
+
+ /* Allocate the skb for vendor event */
+ msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, buf_len,
+ event_id, kflags);
+ if (!msg) {
+ WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ ret = nla_put(msg, NAN_ATTRIBUTE_CMD_RESP_DATA,
+ len, (u8*)nan_req_resp);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put resp data, ret=%d\n",
+ ret));
+ goto fail;
+ }
+ WL_DBG(("Event sent up to hal, event_id = %d, ret = %d\n",
+ event_id, ret));
+ cfg80211_vendor_event(msg, kflags);
+ NAN_DBG_EXIT();
+ return ret;
+
+fail:
+ dev_kfree_skb_any(msg);
+ WL_ERR(("Event not implemented or unknown -- Free skb, event_id = %d, ret = %d\n",
+ event_id, ret));
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgvendor_nan_send_async_disable_resp(struct wireless_dev *wdev)
+{
+ int ret = BCME_OK;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ nan_req_resp.status = NAN_STATUS_SUCCESS;
+ nan_req_resp.value = BCME_OK;
+ nan_req_resp.subcmd = NAN_WIFI_SUBCMD_DISABLE;
+ WL_INFORM_MEM(("Send NAN_ASYNC_RESPONSE_DISABLED\n"));
+ ret = wl_cfgvendor_send_nan_async_resp(wiphy, wdev,
+ NAN_ASYNC_RESPONSE_DISABLED, (u8*)&nan_req_resp, sizeof(nan_req_resp));
+ cfg->nancfg->notify_user = false;
+ return ret;
+}
+
+int
+wl_cfgvendor_send_nan_event(struct wiphy *wiphy, struct net_device *dev,
+ int event_id, nan_event_data_t *event_data)
+{
+ int ret = BCME_OK;
+ int buf_len = NAN_EVENT_BUFFER_SIZE_LARGE;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct sk_buff *msg;
+
+ NAN_DBG_ENTER();
+
+ /* Allocate the skb for vendor event */
+ msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(dev), buf_len,
+ event_id, kflags);
+ if (!msg) {
+ WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ switch (event_id) {
+ case GOOGLE_NAN_EVENT_DE_EVENT: {
+ WL_INFORM_MEM(("[NAN] GOOGLE_NAN_DE_EVENT cluster id=" MACDBG "nmi= " MACDBG "\n",
+ MAC2STRDBG(event_data->clus_id.octet),
+ MAC2STRDBG(event_data->local_nmi.octet)));
+ ret = wl_cfgvendor_nan_de_event_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill de event data, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+ case GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH:
+ case GOOGLE_NAN_EVENT_FOLLOWUP: {
+ if (event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
+ WL_DBG(("GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH\n"));
+ ret = wl_cfgvendor_nan_sub_match_event_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill sub match event data, ret=%d\n", ret));
+ goto fail;
+ }
+ } else if (event_id == GOOGLE_NAN_EVENT_FOLLOWUP) {
+ WL_DBG(("GOOGLE_NAN_EVENT_FOLLOWUP\n"));
+ ret = wl_cfgvendor_nan_tx_followup_event_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill sub match event data, ret=%d\n", ret));
+ goto fail;
+ }
+ }
+ ret = wl_cfgvendor_nan_opt_params_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill sub match event data, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+
+ case GOOGLE_NAN_EVENT_DISABLED: {
+ WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DISABLED\n"));
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, 0);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put handle, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u16(msg, NAN_ATTRIBUTE_STATUS, event_data->status);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put status, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
+ strlen("NAN_STATUS_SUCCESS"), event_data->nan_reason);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put reason code, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+
+ case GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED:
+ case GOOGLE_NAN_EVENT_PUBLISH_TERMINATED: {
+ WL_DBG(("GOOGLE_NAN_SVC_TERMINATED, %d\n", event_id));
+ ret = wl_cfgvendor_nan_svc_terminate_event_filler(msg, cfg, event_id, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill svc terminate event data, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+
+ case GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND: {
+ WL_DBG(("GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND %d\n",
+ GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND));
+ ret = wl_cfgvendor_nan_tx_followup_ind_event_data_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill tx follow up ind event data, ret=%d\n", ret));
+ goto fail;
+ }
+
+ break;
+ }
+
+ case GOOGLE_NAN_EVENT_DATA_REQUEST: {
+ WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DATA_REQUEST\n"));
+ ret = wl_cfgvendor_nan_dp_ind_event_data_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill dp ind event data, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+
+ case GOOGLE_NAN_EVENT_DATA_CONFIRMATION: {
+ WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DATA_CONFIRMATION\n"));
+
+ ret = wl_cfgvendor_nan_dp_estb_event_data_filler(msg, event_data);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to fill dp estb event data, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+
+ case GOOGLE_NAN_EVENT_DATA_END: {
+ WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DATA_END\n"));
+ ret = nla_put_u8(msg, NAN_ATTRIBUTE_INST_COUNT, 1);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put inst count, ret=%d\n", ret));
+ goto fail;
+ }
+ ret = nla_put_u32(msg, NAN_ATTRIBUTE_NDP_ID, event_data->ndp_id);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put ndp id, ret=%d\n", ret));
+ goto fail;
+ }
+ break;
+ }
+
+ default:
+ goto fail;
+ }
+
+ cfg80211_vendor_event(msg, kflags);
+ NAN_DBG_EXIT();
+ return ret;
+
+fail:
+ dev_kfree_skb_any(msg);
+ WL_ERR(("Event not implemented or unknown -- Free skb, event_id = %d, ret = %d\n",
+ event_id, ret));
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_req_subscribe(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_discover_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+
+ NAN_DBG_ENTER();
+ /* Blocking Subscribe if NAN is not enable */
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, subscribe blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+
+ if (cmd_data->sub_id == 0) {
+ ret = wl_cfgnan_generate_inst_id(cfg, &cmd_data->sub_id);
+ if (ret) {
+ WL_ERR(("failed to generate instance-id for subscribe\n"));
+ goto exit;
+ }
+ } else {
+ cmd_data->svc_update = true;
+ }
+
+ ret = wl_cfgnan_subscribe_handler(wdev->netdev, cfg, cmd_data);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("failed to subscribe error[%d], status = [%d]\n",
+ ret, cmd_data->status));
+ wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
+ goto exit;
+ }
+
+ WL_DBG(("subscriber instance id=%d\n", cmd_data->sub_id));
+
+ if (cmd_data->status == WL_NAN_E_OK) {
+ nan_req_resp.instance_id = cmd_data->sub_id;
+ } else {
+ nan_req_resp.instance_id = 0;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_req_publish(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_discover_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ NAN_DBG_ENTER();
+
+ /* Blocking Publish if NAN is not enable */
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled publish blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+
+ if (cmd_data->pub_id == 0) {
+ ret = wl_cfgnan_generate_inst_id(cfg, &cmd_data->pub_id);
+ if (ret) {
+ WL_ERR(("failed to generate instance-id for publisher\n"));
+ goto exit;
+ }
+ } else {
+ cmd_data->svc_update = true;
+ }
+
+ ret = wl_cfgnan_publish_handler(wdev->netdev, cfg, cmd_data);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR(("failed to publish error[%d], status[%d]\n",
+ ret, cmd_data->status));
+ wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
+ goto exit;
+ }
+
+ WL_DBG(("publisher instance id=%d\n", cmd_data->pub_id));
+
+ if (cmd_data->status == WL_NAN_E_OK) {
+ nan_req_resp.instance_id = cmd_data->pub_id;
+ } else {
+ nan_req_resp.instance_id = 0;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_REQUEST_PUBLISH,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_start_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ nan_config_cmd_data_t *cmd_data;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ uint32 nan_attr_mask = 0;
+
+ cmd_data = (nan_config_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+
+ ret = wl_cfgnan_check_nan_disable_pending(cfg, false, true);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ goto exit;
+ }
+
+ if (cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is already enabled\n"));
+ ret = BCME_OK;
+ goto exit;
+ }
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+
+ cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
+ cmd_data->sid_beacon.sid_count = NAN_SID_BEACON_COUNT_INVALID; /* Setting to some default */
+
+ ret = wl_cfgvendor_nan_parse_args(wiphy, data, len, cmd_data, &nan_attr_mask);
+ if (ret) {
+ WL_ERR(("failed to parse nan vendor args, ret %d\n", ret));
+ goto exit;
+ }
+ if (cmd_data->status == BCME_BADARG) {
+ WL_ERR(("nan vendor args is invalid\n"));
+ goto exit;
+ }
+
+ ret = wl_cfgnan_start_handler(wdev->netdev, cfg, cmd_data, nan_attr_mask);
+ if (ret) {
+ WL_ERR(("failed to start nan error[%d]\n", ret));
+ goto exit;
+ }
+ /* Initializing Instance Id List */
+ bzero(cfg->nancfg->nan_inst_ctrl, NAN_ID_CTRL_SIZE * sizeof(nan_svc_inst_t));
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_ENABLE,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ if (cmd_data) {
+ if (cmd_data->scid.data) {
+ MFREE(cfg->osh, cmd_data->scid.data, cmd_data->scid.dlen);
+ cmd_data->scid.dlen = 0;
+ }
+ MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_terminate_dp_rng_sessions(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, bool *ssn_exists)
+{
+ int ret = 0;
+ uint8 i = 0;
+ int status = BCME_ERROR;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+ dhd_pub_t *dhdp;
+#ifdef RTT_SUPPORT
+ nan_ranging_inst_t *ranging_inst = NULL;
+#endif /* RTT_SUPPORT */
+
+ *ssn_exists = false;
+ dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
+ /* Cleanup active Data Paths If any */
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ if (nancfg->ndp_id[i]) {
+ WL_DBG(("Found entry of ndp id = [%d], end dp associated to it\n",
+ nancfg->ndp_id[i]));
+ ret = wl_cfgnan_data_path_end_handler(wdev->netdev, cfg,
+ nancfg->ndp_id[i], &status);
+ if ((ret == BCME_OK) && cfg->nancfg->nan_enable &&
+ dhdp->up) {
+ *ssn_exists = true;
+ }
+ }
+ }
+
+#ifdef RTT_SUPPORT
+ /* Cancel ranging sessiosns */
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &nancfg->nan_ranging_info[i];
+ if (ranging_inst->in_use &&
+ (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status))) {
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
+ &ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_NONE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan range cancel failed ret = %d status = %d\n",
+ ret, status));
+ } else {
+ *ssn_exists = true;
+ }
+ }
+ }
+#endif /* RTT_SUPPORT */
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_stop_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ bool ssn_exists = false;
+ uint32 delay_ms = 0;
+ wl_nancfg_t *nancfg = cfg->nancfg;
+
+ NAN_DBG_ENTER();
+ mutex_lock(&cfg->if_sync);
+
+ if (nancfg->nan_init_state == false) {
+ WL_INFORM_MEM(("nan is not initialized/nmi doesnt exists\n"));
+ goto exit;
+ }
+ if (nancfg->nan_enable == false) {
+ WL_INFORM_MEM(("nan is in disabled state\n"));
+ } else {
+ nancfg->notify_user = true;
+ wl_cfgvendor_terminate_dp_rng_sessions(cfg, wdev, &ssn_exists);
+ if (ssn_exists == true) {
+ /*
+ * Schedule nan disable with NAN_DISABLE_CMD_DELAY
+ * delay to make sure
+ * fw cleans any active Data paths and
+ * notifies the peer about the dp session terminations
+ */
+ WL_INFORM_MEM(("Schedule Nan Disable Req with NAN_DISABLE_CMD_DELAY\n"));
+ delay_ms = NAN_DISABLE_CMD_DELAY;
+ DHD_NAN_WAKE_LOCK_TIMEOUT(cfg->pub, NAN_WAKELOCK_TIMEOUT);
+ } else {
+ delay_ms = 0;
+ }
+ schedule_delayed_work(&nancfg->nan_disable,
+ msecs_to_jiffies(delay_ms));
+ }
+exit:
+ mutex_unlock(&cfg->if_sync);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_config_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ nan_config_cmd_data_t *cmd_data;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ uint32 nan_attr_mask = 0;
+
+ cmd_data = MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+
+ cmd_data->avail_params.duration = NAN_BAND_INVALID; /* Setting to some default */
+ cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
+ cmd_data->sid_beacon.sid_count = NAN_SID_BEACON_COUNT_INVALID; /* Setting to some default */
+
+ ret = wl_cfgvendor_nan_parse_args(wiphy, data, len, cmd_data, &nan_attr_mask);
+ if (ret) {
+ WL_ERR(("failed to parse nan vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+ if (cmd_data->status == BCME_BADARG) {
+ WL_ERR(("nan vendor args is invalid\n"));
+ goto exit;
+ }
+
+ ret = wl_cfgnan_config_handler(wdev->netdev, cfg, cmd_data, nan_attr_mask);
+ if (ret) {
+ WL_ERR(("failed in config request, nan error[%d]\n", ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_CONFIG,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ if (cmd_data) {
+ if (cmd_data->scid.data) {
+ MFREE(cfg->osh, cmd_data->scid.data, cmd_data->scid.dlen);
+ cmd_data->scid.dlen = 0;
+ }
+ MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_cancel_publish(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_discover_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+
+ /* Blocking Cancel_Publish if NAN is not enable */
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, cancel publish blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+
+ ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan disc vendor args, ret= %d\n", ret));
+ goto exit;
+ }
+ nan_req_resp.instance_id = cmd_data->pub_id;
+ WL_INFORM_MEM(("[NAN] cancel publish instance_id=%d\n", cmd_data->pub_id));
+
+ ret = wl_cfgnan_cancel_pub_handler(wdev->netdev, cfg, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to cancel publish nan instance-id[%d] error[%d]\n",
+ cmd_data->pub_id, ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_CANCEL_PUBLISH,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_cancel_subscribe(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_discover_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+
+ /* Blocking Cancel_Subscribe if NAN is not enableb */
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, cancel subscribe blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ cmd_data = MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+
+ ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan disc vendor args, ret= %d\n", ret));
+ goto exit;
+ }
+ nan_req_resp.instance_id = cmd_data->sub_id;
+ WL_INFORM_MEM(("[NAN] cancel subscribe instance_id=%d\n", cmd_data->sub_id));
+
+ ret = wl_cfgnan_cancel_sub_handler(wdev->netdev, cfg, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to cancel subscribe nan instance-id[%d] error[%d]\n",
+ cmd_data->sub_id, ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_transmit(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_discover_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+
+ /* Blocking Transmit if NAN is not enable */
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, transmit blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+
+ ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan disc vendor args, ret= %d\n", ret));
+ goto exit;
+ }
+ nan_req_resp.instance_id = cmd_data->local_id;
+ ret = wl_cfgnan_transmit_handler(wdev->netdev, cfg, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to transmit-followup nan error[%d]\n", ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_TRANSMIT,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_get_capablities(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+
+ NAN_DBG_ENTER();
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgnan_get_capablities_handler(wdev->netdev, cfg, &nan_req_resp.capabilities);
+ if (ret) {
+ WL_ERR(("Could not get capabilities\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_GET_CAPABILITIES,
+ &nan_req_resp, ret, BCME_OK);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_data_path_iface_create(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_datapath_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
+
+ if (!cfg->nancfg->nan_init_state) {
+ WL_ERR(("%s: NAN is not inited or Device doesn't support NAN \n", __func__));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+
+ ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+
+ if (cfg->nancfg->nan_enable) { /* new framework Impl, iface create called after nan enab */
+ ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev,
+ cfg, cmd_data->ndp_iface,
+ NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to create iface, ret = %d\n", ret));
+ goto exit;
+ }
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_data_path_iface_delete(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_datapath_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
+
+ if (cfg->nancfg->nan_init_state == false) {
+ WL_ERR(("%s: NAN is not inited or Device doesn't support NAN \n", __func__));
+ /* Deinit has taken care of cleaing the virtual iface */
+ ret = BCME_OK;
+ goto exit;
+ }
+
+ NAN_DBG_ENTER();
+ cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+
+ ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev, cfg,
+ (char*)cmd_data->ndp_iface,
+ NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
+ if (ret) {
+ WL_ERR(("failed to delete ndp iface [%d]\n", ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_data_path_request(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_datapath_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ uint8 ndp_instance_id = 0;
+
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, nan data path request blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+
+ NAN_DBG_ENTER();
+ cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+
+ ret = wl_cfgnan_data_path_request_handler(wdev->netdev, cfg,
+ cmd_data, &ndp_instance_id);
+ if (ret) {
+ WL_ERR(("failed to request nan data path [%d]\n", ret));
+ goto exit;
+ }
+
+ if (cmd_data->status == BCME_OK) {
+ nan_req_resp.ndp_instance_id = cmd_data->ndp_instance_id;
+ } else {
+ nan_req_resp.ndp_instance_id = 0;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_REQUEST,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_data_path_response(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_datapath_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, nan data path response blocked\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ NAN_DBG_ENTER();
+ cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+ ret = wl_cfgnan_data_path_response_handler(wdev->netdev, cfg, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to response nan data path [%d]\n", ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_RESPONSE,
+ &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_data_path_end(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_datapath_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ int status = BCME_ERROR;
+
+ NAN_DBG_ENTER();
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled, nan data path end blocked\n"));
+ ret = BCME_OK;
+ goto exit;
+ }
+ cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+ ret = wl_cfgnan_data_path_end_handler(wdev->netdev, cfg,
+ cmd_data->ndp_instance_id, &status);
+ if (ret) {
+ WL_ERR(("failed to end nan data path [%d]\n", ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_END,
+ &nan_req_resp, ret, cmd_data ? status : BCME_OK);
+ wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+#ifdef WL_NAN_DISC_CACHE
+static int
+wl_cfgvendor_nan_data_path_sec_info(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_hal_resp_t nan_req_resp;
+ nan_datapath_sec_info_cmd_data_t *cmd_data = NULL;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
+
+ NAN_DBG_ENTER();
+ if (!cfg->nancfg->nan_enable) {
+ WL_ERR(("nan is not enabled\n"));
+ ret = BCME_UNSUPPORTED;
+ goto exit;
+ }
+ cmd_data = MALLOCZ(dhdp->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = wl_cfgvendor_nan_parse_dp_sec_info_args(wiphy, data, len, cmd_data);
+ if (ret) {
+ WL_ERR(("failed to parse sec info args\n"));
+ goto exit;
+ }
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ ret = wl_cfgnan_sec_info_handler(cfg, cmd_data, &nan_req_resp);
+ if (ret) {
+ WL_ERR(("failed to retrieve svc hash/pub nmi error[%d]\n", ret));
+ goto exit;
+ }
+exit:
+ ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_SEC_INFO,
+ &nan_req_resp, ret, BCME_OK);
+ if (cmd_data) {
+ MFREE(dhdp->osh, cmd_data, sizeof(*cmd_data));
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+#endif /* WL_NAN_DISC_CACHE */
+
+static int
+wl_cfgvendor_nan_version_info(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ uint32 version = NAN_HAL_VERSION_1;
+
+ BCM_REFERENCE(cfg);
+ WL_DBG(("Enter %s version %d\n", __FUNCTION__, version));
+ ret = wl_cfgvendor_send_cmd_reply(wiphy, &version, sizeof(version));
+ return ret;
+}
+
+static int
+wl_cfgvendor_nan_enable_merge(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void * data, int len)
+{
+ int ret = 0;
+ nan_config_cmd_data_t *cmd_data = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int status = BCME_OK;
+ uint32 nan_attr_mask = 0;
+
+ BCM_REFERENCE(nan_attr_mask);
+ NAN_DBG_ENTER();
+ cmd_data = (nan_config_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
+ if (!cmd_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = wl_cfgvendor_nan_parse_args(wiphy, data, len, cmd_data, &nan_attr_mask);
+ if (ret) {
+ WL_ERR((" Enable merge: failed to parse nan config vendor args, ret = %d\n", ret));
+ goto exit;
+ }
+ ret = wl_cfgnan_set_enable_merge(wdev->netdev, cfg, cmd_data->enable_merge, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Enable merge: failed to set config request [%d]\n", ret));
+ /* As there is no cmd_reply, return status if error is in status else return ret */
+ if (status) {
+ ret = status;
+ }
+ goto exit;
+ }
+exit:
+ if (cmd_data) {
+ if (cmd_data->scid.data) {
+ MFREE(cfg->osh, cmd_data->scid.data, cmd_data->scid.dlen);
+ cmd_data->scid.dlen = 0;
+ }
+ MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
+#endif /* WL_NAN */
+
+#ifdef LINKSTAT_SUPPORT
+
+#define NUM_RATE 32
+#define NUM_PEER 1
+#define NUM_CHAN 11
+#define HEADER_SIZE sizeof(ver_len)
+
+#define NUM_PNO_SCANS 8
+#define NUM_CCA_SAMPLING_SECS 1
+
+static int wl_cfgvendor_lstats_get_bcn_mbss(char *buf, uint32 *rxbeaconmbss)
+{
+ wl_cnt_info_t *cbuf = (wl_cnt_info_t *)buf;
+ const void *cnt;
+
+ if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
+ WL_CNT_XTLV_CNTV_LE10_UCODE, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
+ *rxbeaconmbss = ((const wl_cnt_v_le10_mcst_t *)cnt)->rxbeaconmbss;
+ } else if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
+ WL_CNT_XTLV_LT40_UCODE_V1, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
+ *rxbeaconmbss = ((const wl_cnt_lt40mcst_v1_t *)cnt)->rxbeaconmbss;
+ } else if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
+ WL_CNT_XTLV_GE40_UCODE_V1, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
+ *rxbeaconmbss = ((const wl_cnt_ge40mcst_v1_t *)cnt)->rxbeaconmbss;
+ } else if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
+ WL_CNT_XTLV_GE80_UCODE_V1, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
+ *rxbeaconmbss = ((const wl_cnt_ge80mcst_v1_t *)cnt)->rxbeaconmbss;
+ } else {
+ *rxbeaconmbss = 0;
+ return BCME_NOTFOUND;
+ }
+
+ return BCME_OK;
+}
+
+static void fill_chanspec_to_channel_info(chanspec_t cur_chanspec,
+ wifi_channel_info *channel, int *cur_band)
+{
+ int band;
+ channel->width = WIFI_CHAN_WIDTH_INVALID;
+
+ if (CHSPEC_IS20(cur_chanspec)) {
+ channel->width = WIFI_CHAN_WIDTH_20;
+ } else if (CHSPEC_IS40(cur_chanspec)) {
+ channel->width = WIFI_CHAN_WIDTH_40;
+ } else if (CHSPEC_IS80(cur_chanspec)) {
+ channel->width = WIFI_CHAN_WIDTH_80;
+ } else if (CHSPEC_IS160(cur_chanspec)) {
+ channel->width = WIFI_CHAN_WIDTH_160;
+ } else if (CHSPEC_IS8080(cur_chanspec)) {
+ channel->width = WIFI_CHAN_WIDTH_80P80;
+ }
+
+ band = *cur_band = CHSPEC_BAND(cur_chanspec);
+ channel->center_freq =
+ wl_channel_to_frequency(wf_chspec_primary20_chan(cur_chanspec),
+ band);
+
+ if (CHSPEC_IS160(cur_chanspec) || CHSPEC_IS8080(cur_chanspec)) {
+ channel->center_freq0 =
+ wl_channel_to_frequency(wf_chspec_primary80_channel(cur_chanspec),
+ band);
+ channel->center_freq1 =
+ wl_channel_to_frequency(wf_chspec_secondary80_channel(cur_chanspec),
+ band);
+ } else {
+ channel->center_freq0 =
+ wl_channel_to_frequency(CHSPEC_CHANNEL(cur_chanspec),
+ band);
+ channel->center_freq1 = 0;
+ }
+}
+
+static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ static char iovar_buf[WLC_IOCTL_MAXLEN];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int err = 0, ret = 0, i;
+ wifi_radio_stat *radio;
+ wifi_radio_stat_h radio_h;
+ wifi_channel_stat *chan_stats = NULL;
+ uint chan_stats_size = 0;
+#ifdef CHAN_STATS_SUPPORT
+ wifi_channel_stat *p_chan_stats = NULL;
+ cca_congest_ext_channel_req_v2_t *per_chspec_stats = NULL;
+ uint per_chspec_stats_size = 0;
+ cca_congest_ext_channel_req_v3_t *all_chan_results;
+ cca_congest_ext_channel_req_v3_t all_chan_req;
+#else
+ /* cca_get_stats_ext iovar for Wifi channel statics */
+ struct cca_congest_ext_channel_req_v2 *cca_v2_results;
+ struct cca_congest_ext_channel_req_v2 cca_v2_req;
+#endif /* CHAN_STATS_SUPPORT */
+ const wl_cnt_wlc_t *wlc_cnt;
+ scb_val_t scbval;
+ char *output = NULL;
+ char *outdata = NULL;
+ wifi_rate_stat_v1 *p_wifi_rate_stat_v1 = NULL;
+ wifi_rate_stat *p_wifi_rate_stat = NULL;
+ uint total_len = 0;
+ uint32 rxbeaconmbss;
+ wlc_rev_info_t revinfo;
+ wl_if_stats_t *if_stats = NULL;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_pwrstats_query_t scan_query;
+ wl_pwrstats_t *pwrstats;
+ wl_pwr_scan_stats_t scan_stats;
+ int scan_time_len;
+ uint32 tot_pno_dur = 0;
+ wifi_channel_stat cur_channel_stat;
+ cca_congest_channel_req_t *cca_result;
+ cca_congest_channel_req_t cca_req;
+ uint32 cca_busy_time = 0;
+ int cur_chansp, cur_band;
+ chanspec_t cur_chanspec;
+
+ COMPAT_STRUCT_IFACE(wifi_iface_stat, iface);
+
+ WL_TRACE(("%s: Enter \n", __func__));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ BCM_REFERENCE(if_stats);
+ BCM_REFERENCE(dhdp);
+ /* Get the device rev info */
+ bzero(&revinfo, sizeof(revinfo));
+ err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_REVINFO, &revinfo,
+ sizeof(revinfo));
+ if (err != BCME_OK) {
+ goto exit;
+ }
+
+ outdata = (void *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ if (outdata == NULL) {
+ WL_ERR(("outdata alloc failed\n"));
+ return BCME_NOMEM;
+ }
+
+ bzero(&scbval, sizeof(scb_val_t));
+ bzero(outdata, WLC_IOCTL_MAXLEN);
+ output = outdata;
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat)));
+ goto exit;
+ }
+ radio = (wifi_radio_stat *)iovar_buf;
+
+ bzero(&radio_h, sizeof(wifi_radio_stat_h));
+ radio_h.on_time = radio->on_time;
+ radio_h.tx_time = radio->tx_time;
+ radio_h.rx_time = radio->rx_time;
+ radio_h.on_time_scan = radio->on_time_scan;
+ radio_h.on_time_nbd = radio->on_time_nbd;
+ radio_h.on_time_gscan = radio->on_time_gscan;
+ radio_h.on_time_roam_scan = radio->on_time_roam_scan;
+ radio_h.on_time_pno_scan = radio->on_time_pno_scan;
+ radio_h.on_time_hs20 = radio->on_time_hs20;
+ radio_h.num_channels = NUM_PEER;
+
+ scan_query.length = 1;
+ scan_query.type[0] = WL_PWRSTATS_TYPE_SCAN;
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "pwrstats", &scan_query,
+ sizeof(scan_query), iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_pwrstats_t)));
+ goto exit;
+ }
+
+ pwrstats = (wl_pwrstats_t *) iovar_buf;
+
+ if (dtoh16(pwrstats->version) != WL_PWRSTATS_VERSION) {
+ WL_ERR(("PWRSTATS Version mismatch\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ scan_time_len = dtoh16(((uint16 *)pwrstats->data)[1]);
+
+ if (scan_time_len < sizeof(wl_pwr_scan_stats_t)) {
+ WL_ERR(("WL_PWRSTATS_TYPE_SCAN IOVAR info short len : %d < %d\n",
+ scan_time_len, (int)sizeof(wl_pwr_scan_stats_t)));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ (void) memcpy_s(&scan_stats, sizeof(wl_pwr_scan_stats_t), pwrstats->data, scan_time_len);
+
+ /* wl_pwr_scan_stats structure has the array of pno_scans.
+ * scan_data_t pno_scans[8];
+ * The number of array is 8 : For future PNO bucketing (BSSID, SSID, etc)
+ * FW sets the number as harcoded.
+ * If the hardcoded number (8) is changed,
+ * the loop condition or NUM_PNO_SCANS has to be changed
+ */
+
+ for (i = 0; i < NUM_PNO_SCANS; i++) {
+ tot_pno_dur += dtoh32(scan_stats.pno_scans[i].dur);
+ }
+
+ /* Android Framework defines the total scan time in ms.
+ * But FW sends each scan time in us except for roam scan time.
+ * So we need to scale the times in ms.
+ */
+
+ radio_h.on_time_scan = (uint32)((tot_pno_dur +
+ dtoh32(scan_stats.user_scans.dur) +
+ dtoh32(scan_stats.assoc_scans.dur) +
+ dtoh32(scan_stats.other_scans.dur)) / 1000);
+
+ radio_h.on_time_scan += dtoh32(scan_stats.roam_scans.dur);
+ radio_h.on_time_roam_scan = dtoh32(scan_stats.roam_scans.dur);
+ radio_h.on_time_pno_scan = (uint32)(tot_pno_dur / 1000);
+
+ WL_TRACE(("pwr_scan_stats : %u %u %u %u %u %u\n",
+ radio_h.on_time_scan,
+ dtoh32(scan_stats.user_scans.dur),
+ dtoh32(scan_stats.assoc_scans.dur),
+ dtoh32(scan_stats.roam_scans.dur),
+ tot_pno_dur,
+ dtoh32(scan_stats.other_scans.dur)));
+
+ err = wldev_iovar_getint(bcmcfg_to_prmry_ndev(cfg), "chanspec", (int*)&cur_chansp);
+ if (err != BCME_OK) {
+ WL_ERR(("error (%d) \n", err));
+ goto exit;
+ }
+
+ cur_chanspec = wl_chspec_driver_to_host(cur_chansp);
+
+ if (!wf_chspec_valid(cur_chanspec)) {
+ WL_ERR(("Invalid chanspec : %x\n", cur_chanspec));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ fill_chanspec_to_channel_info(cur_chanspec, &cur_channel_stat.channel, &cur_band);
+ WL_TRACE(("chanspec : %x, BW : %d, Cur Band : %x, freq : %d, freq0 :%d, freq1 : %d\n",
+ cur_chanspec,
+ cur_channel_stat.channel.width,
+ cur_band,
+ cur_channel_stat.channel.center_freq,
+ cur_channel_stat.channel.center_freq0,
+ cur_channel_stat.channel.center_freq1));
+
+ chan_stats_size = sizeof(wifi_channel_stat);
+ chan_stats = &cur_channel_stat;
+
+#ifdef CHAN_STATS_SUPPORT
+ /* Option to get all channel statistics */
+ all_chan_req.num_of_entries = 0;
+ all_chan_req.ver = WL_CCA_EXT_REQ_VER_V3;
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cca_get_stats_ext",
+ &all_chan_req, sizeof(all_chan_req), iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("cca_get_stats_ext iovar err = %d\n", err));
+ goto exit;
+ }
+
+ all_chan_results = (cca_congest_ext_channel_req_v3_t *) iovar_buf;
+ if ((err == BCME_OK) &&
+ (dtoh16(all_chan_results->ver) == WL_CCA_EXT_REQ_VER_V3)) {
+ wifi_channel_stat *all_chan_stats = NULL;
+ int i = 0, num_channels;
+
+ num_channels = dtoh16(all_chan_results->num_of_entries);
+ radio_h.num_channels = num_channels;
+
+ chan_stats_size = sizeof(wifi_channel_stat) * num_channels;
+ chan_stats = (wifi_channel_stat*)MALLOCZ(cfg->osh, chan_stats_size);
+ p_chan_stats = chan_stats;
+ if (chan_stats == NULL) {
+ WL_ERR(("chan_stats alloc failed\n"));
+ goto exit;
+ }
+ bzero(chan_stats, chan_stats_size);
+ all_chan_stats = chan_stats;
+
+ per_chspec_stats_size =
+ sizeof(cca_congest_ext_channel_req_v2_t) * num_channels;
+ per_chspec_stats = (cca_congest_ext_channel_req_v2_t *)
+ MALLOCZ(cfg->osh, per_chspec_stats_size);
+ if (per_chspec_stats == NULL) {
+ WL_ERR(("per_chspec_stats alloc failed\n"));
+ goto exit;
+ }
+ (void) memcpy_s(per_chspec_stats, per_chspec_stats_size,
+ &all_chan_results->per_chan_stats, per_chspec_stats_size);
+
+ WL_TRACE(("** Per channel CCA entries ** \n"));
+
+ for (i = 0; i < num_channels; i++, all_chan_stats++) {
+ if (per_chspec_stats[i].num_secs != 1) {
+ WL_ERR(("Bogus num of seconds returned %d\n",
+ per_chspec_stats[i].num_secs));
+ goto exit;
+ }
+
+ fill_chanspec_to_channel_info(per_chspec_stats[i].chanspec,
+ &all_chan_stats->channel, &cur_band);
+
+ all_chan_stats->on_time =
+ per_chspec_stats[i].secs[0].radio_on_time;
+ all_chan_stats->cca_busy_time =
+ per_chspec_stats[i].secs[0].cca_busy_time;
+
+ WL_TRACE(("chanspec %x num_sec %d radio_on_time %d cca_busytime %d \n",
+ per_chspec_stats[i].chanspec, per_chspec_stats[i].num_secs,
+ per_chspec_stats[i].secs[0].radio_on_time,
+ per_chspec_stats[i].secs[0].cca_busy_time));
+ }
+ }
+#else
+ cca_v2_req.ver = WL_CCA_EXT_REQ_VER_V2;
+ cca_v2_req.chanspec =
+ wl_chspec_host_to_driver(wf_chspec_primary20_chspec(cur_chanspec));
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cca_get_stats_ext", &cca_v2_req,
+ sizeof(cca_v2_req), iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("cca_get_stats_ext iovar err = %d\n", err));
+ goto exit;
+ }
+
+ cca_v2_results = (struct cca_congest_ext_channel_req_v2 *) iovar_buf;
+
+ /* Check the verison for cca_get_stats_ext iovar */
+ if ((err == BCME_OK) &&
+ (dtoh16(cca_v2_results->ver) == WL_CCA_EXT_REQ_VER_V2)) {
+ /* the accumulated time for the current channel */
+ cur_channel_stat.on_time = dtoh32(cca_v2_results->secs[0].radio_on_time);
+ cur_channel_stat.cca_busy_time = dtoh32(cca_v2_results->secs[0].cca_busy_time);
+
+ WL_TRACE(("wifi chan statics - on_time : %u, cca_busy_time : %u\n",
+ cur_channel_stat.on_time, cur_channel_stat.cca_busy_time));
+ }
+#endif /* CHAN_STATS_SUPPORT */
+ else {
+ /* To get fine-grained cca result,
+ * you can increase num_secs because num_secs is the time to get samples.
+ * Also if the time is increased,
+ * it is necessary to use a loop to add the times of cca_result->sec[].
+ * For simplicity, the sampling time is set to 1sec.
+ */
+ WL_TRACE(("cca_get_stats_ext unsupported or version mismatch\n"));
+
+ cca_req.num_secs = NUM_CCA_SAMPLING_SECS;
+ cca_req.chanspec = wl_chspec_host_to_driver(cur_chanspec);
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cca_get_stats", &cca_req,
+ sizeof(cca_req), iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n",
+ err, sizeof(cca_congest_channel_req_t)));
+ goto exit;
+ }
+
+ cur_channel_stat.on_time = radio_h.on_time;
+
+ if (err == BCME_OK) {
+ cca_result = (cca_congest_channel_req_t *) iovar_buf;
+ cca_busy_time = dtoh32(cca_result->secs[0].congest_ibss) +
+ dtoh32(cca_result->secs[0].congest_obss) +
+ dtoh32(cca_result->secs[0].interference);
+
+ WL_TRACE(("wifi stats : %u, %u, %u, %u, %u\n", cur_channel_stat.on_time,
+ cca_busy_time,
+ dtoh32(cca_result->secs[0].congest_ibss),
+ dtoh32(cca_result->secs[0].congest_obss),
+ dtoh32(cca_result->secs[0].interference)));
+ } else {
+ WL_ERR(("cca_get_stats is unsupported \n"));
+ }
+
+ /* If cca_get_stats is unsupported, cca_busy_time has zero value as initial value */
+ cur_channel_stat.cca_busy_time = cca_busy_time;
+ }
+
+ ret = memcpy_s(output, WLC_IOCTL_MAXLEN, &radio_h, sizeof(wifi_radio_stat_h));
+ if (ret) {
+ WL_ERR(("Failed to copy wifi_radio_stat_h: %d\n", ret));
+ goto exit;
+ }
+ output += sizeof(wifi_radio_stat_h);
+
+ ret = memcpy_s(output, (WLC_IOCTL_MAXLEN - sizeof(wifi_radio_stat_h)),
+ chan_stats, chan_stats_size);
+ if (ret) {
+ WL_ERR(("Failed to copy wifi_channel_stat: %d\n", ret));
+ goto exit;
+ }
+ output += chan_stats_size;
+
+ COMPAT_BZERO_IFACE(wifi_iface_stat, iface);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].ac, WIFI_AC_VO);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].ac, WIFI_AC_VI);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].ac, WIFI_AC_BE);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].ac, WIFI_AC_BK);
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_wlc_t)));
+ goto exit;
+ }
+
+ CHK_CNTBUF_DATALEN(iovar_buf, WLC_IOCTL_MAXLEN);
+ /* Translate traditional (ver <= 10) counters struct to new xtlv type struct */
+ /* traditional(ver<=10)counters will use WL_CNT_XTLV_CNTV_LE10_UCODE.
+ * Other cases will use its xtlv type accroding to corerev
+ */
+ err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WLC_IOCTL_MAXLEN, revinfo.corerev);
+ if (err != BCME_OK) {
+ WL_ERR(("wl_cntbuf_to_xtlv_format ERR %d\n", err));
+ goto exit;
+ }
+
+ if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+ WL_ERR(("wlc_cnt NULL!\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+#ifndef DISABLE_IF_COUNTERS
+ if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(wl_if_stats_t));
+ if (!if_stats) {
+ WL_ERR(("MALLOCZ failed\n"));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ if (FW_SUPPORTED(dhdp, ifst)) {
+ err = wl_cfg80211_ifstats_counters(bcmcfg_to_prmry_ndev(cfg), if_stats);
+ } else {
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "if_counters",
+ NULL, 0, (char *)if_stats, sizeof(*if_stats), NULL);
+ }
+
+ if (!err) {
+ /* Populate from if_stats */
+ if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
+ WL_ERR(("incorrect version of wl_if_stats_t,"
+ " expected=%u got=%u\n", WL_IF_STATS_T_VERSION,
+ if_stats->version));
+ goto exit;
+ }
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].tx_mpdu, (uint32)if_stats->txframe);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].rx_mpdu,
+ (uint32)(if_stats->rxframe - if_stats->rxmulti));
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].mpdu_lost, (uint32)if_stats->txfail);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].retries, (uint32)if_stats->txretrans);
+ } else
+#endif /* !DISABLE_IF_COUNTERS */
+ {
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].tx_mpdu,
+ (wlc_cnt->txfrmsnt - wlc_cnt->txmulti));
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].rx_mpdu, wlc_cnt->rxframe);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].mpdu_lost, wlc_cnt->txfail);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].retries, wlc_cnt->txretrans);
+ }
+
+ err = wl_cfgvendor_lstats_get_bcn_mbss(iovar_buf, &rxbeaconmbss);
+ if (unlikely(err)) {
+ WL_ERR(("get_bcn_mbss error (%d)\n", err));
+ goto exit;
+ }
+
+ err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &scbval);
+ if (unlikely(err)) {
+ WL_ERR(("get_rssi error (%d)\n", err));
+ goto exit;
+ }
+
+ COMPAT_ASSIGN_VALUE(iface, beacon_rx, rxbeaconmbss);
+ COMPAT_ASSIGN_VALUE(iface, rssi_mgmt, scbval.val);
+ COMPAT_ASSIGN_VALUE(iface, num_peers, NUM_PEER);
+ COMPAT_ASSIGN_VALUE(iface, peer_info->num_rate, NUM_RATE);
+
+ COMPAT_MEMCOPY_IFACE(output, total_len, wifi_iface_stat, iface, wifi_rate_stat);
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat)));
+ goto exit;
+ }
+ for (i = 0; i < NUM_RATE; i++) {
+ p_wifi_rate_stat =
+ (wifi_rate_stat *)(iovar_buf + i*sizeof(wifi_rate_stat));
+ p_wifi_rate_stat_v1 = (wifi_rate_stat_v1 *)output;
+ p_wifi_rate_stat_v1->rate.preamble = p_wifi_rate_stat->rate.preamble;
+ p_wifi_rate_stat_v1->rate.nss = p_wifi_rate_stat->rate.nss;
+ p_wifi_rate_stat_v1->rate.bw = p_wifi_rate_stat->rate.bw;
+ p_wifi_rate_stat_v1->rate.rateMcsIdx = p_wifi_rate_stat->rate.rateMcsIdx;
+ p_wifi_rate_stat_v1->rate.reserved = p_wifi_rate_stat->rate.reserved;
+ p_wifi_rate_stat_v1->rate.bitrate = p_wifi_rate_stat->rate.bitrate;
+ p_wifi_rate_stat_v1->tx_mpdu = p_wifi_rate_stat->tx_mpdu;
+ p_wifi_rate_stat_v1->rx_mpdu = p_wifi_rate_stat->rx_mpdu;
+ p_wifi_rate_stat_v1->mpdu_lost = p_wifi_rate_stat->mpdu_lost;
+ p_wifi_rate_stat_v1->retries = p_wifi_rate_stat->retries;
+ p_wifi_rate_stat_v1->retries_short = p_wifi_rate_stat->retries_short;
+ p_wifi_rate_stat_v1->retries_long = p_wifi_rate_stat->retries_long;
+ output = (char *) &(p_wifi_rate_stat_v1->retries_long);
+ output += sizeof(p_wifi_rate_stat_v1->retries_long);
+ }
+
+ total_len = sizeof(wifi_radio_stat_h) + chan_stats_size;
+ total_len = total_len - sizeof(wifi_peer_info) +
+ NUM_PEER * (sizeof(wifi_peer_info) - sizeof(wifi_rate_stat_v1) +
+ NUM_RATE * sizeof(wifi_rate_stat_v1));
+
+ if (total_len > WLC_IOCTL_MAXLEN) {
+ WL_ERR(("Error! total_len:%d is unexpected value\n", total_len));
+ err = BCME_BADLEN;
+ goto exit;
+ }
+ err = wl_cfgvendor_send_cmd_reply(wiphy, outdata, total_len);
+
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+exit:
+ if (outdata) {
+ MFREE(cfg->osh, outdata, WLC_IOCTL_MAXLEN);
+ }
+ if (if_stats) {
+ MFREE(cfg->osh, if_stats, sizeof(wl_if_stats_t));
+ }
+#ifdef CHAN_STATS_SUPPORT
+ if (p_chan_stats) {
+ MFREE(cfg->osh, p_chan_stats, chan_stats_size);
+ }
+ if (per_chspec_stats) {
+ MFREE(cfg->osh, per_chspec_stats, per_chspec_stats_size);
+ }
+#endif /* CHAN_STATS_SUPPORT */
+ return err;
+}
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef DHD_LOG_DUMP
+static int
+wl_cfgvendor_get_buf_data(const struct nlattr *iter, struct buf_data **buf)
+{
+ int ret = BCME_OK;
+
+ if (nla_len(iter) != sizeof(struct buf_data)) {
+ WL_ERR(("Invalid len : %d\n", nla_len(iter)));
+ ret = BCME_BADLEN;
+ }
+ (*buf) = (struct buf_data *)nla_data(iter);
+ if (!(*buf) || (((*buf)->len) <= 0) || !((*buf)->data_buf[0])) {
+ WL_ERR(("Invalid buffer\n"));
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
+static int
+wl_cfgvendor_dbg_file_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type = 0;
+ const struct nlattr *iter;
+ char *mem_buf = NULL;
+ struct sk_buff *skb = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct buf_data *buf;
+ int pos = 0;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ if (!skb) {
+ WL_ERR(("skb allocation is failed\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ WL_ERR(("%s\n", __FUNCTION__));
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ ret = wl_cfgvendor_get_buf_data(iter, &buf);
+ if (ret)
+ goto exit;
+ switch (type) {
+ case DUMP_BUF_ATTR_MEMDUMP:
+ ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf,
+ (uint32 *)(&(buf->len)));
+ if (ret) {
+ WL_ERR(("failed to get_socram_dump : %d\n", ret));
+ goto exit;
+ }
+ ret = dhd_export_debug_data(mem_buf, NULL, buf->data_buf[0],
+ (int)buf->len, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_TIMESTAMP :
+ ret = dhd_print_time_str(buf->data_buf[0], NULL,
+ (uint32)buf->len, &pos);
+ break;
+#ifdef EWP_ECNTRS_LOGGING
+ case DUMP_BUF_ATTR_ECNTRS :
+ ret = dhd_print_ecntrs_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* EWP_ECNTRS_LOGGING */
+#ifdef DHD_STATUS_LOGGING
+ case DUMP_BUF_ATTR_STATUS_LOG :
+ ret = dhd_print_status_log_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* DHD_STATUS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ case DUMP_BUF_ATTR_RTT_LOG :
+ ret = dhd_print_rtt_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* EWP_RTT_LOGGING */
+ case DUMP_BUF_ATTR_DHD_DUMP :
+ ret = dhd_print_dump_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#if defined(BCMPCIE)
+ case DUMP_BUF_ATTR_EXT_TRAP :
+ ret = dhd_print_ext_trap_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* BCMPCIE */
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ case DUMP_BUF_ATTR_HEALTH_CHK :
+ ret = dhd_print_health_chk_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif
+ case DUMP_BUF_ATTR_COOKIE :
+ ret = dhd_print_cookie_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#ifdef DHD_DUMP_PCIE_RINGS
+ case DUMP_BUF_ATTR_FLOWRING_DUMP :
+ ret = dhd_print_flowring_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif
+ case DUMP_BUF_ATTR_GENERAL_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_GENERAL, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_PRESERVE_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_PRESERVE, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_SPECIAL_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_SPECIAL, &pos);
+ break;
+#ifdef DHD_SSSR_DUMP
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ case DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 0);
+ break;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ case DUMP_BUF_ATTR_SSSR_C0_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 0);
+ break;
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ case DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 1);
+ break;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ case DUMP_BUF_ATTR_SSSR_C1_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 1);
+ break;
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ case DUMP_BUF_ATTR_SSSR_C2_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 2);
+ break;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ case DUMP_BUF_ATTR_SSSR_C2_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 2);
+ break;
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ case DUMP_BUF_ATTR_SSSR_DIG_BEFORE :
+ ret = dhd_sssr_dump_dig_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ case DUMP_BUF_ATTR_SSSR_DIG_AFTER :
+ ret = dhd_sssr_dump_dig_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DHD_SSSR_DUMP */
+#ifdef DHD_PKT_LOGGING
+ case DUMP_BUF_ATTR_PKTLOG:
+ ret = dhd_os_get_pktlog_dump(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+
+ case DUMP_BUF_ATTR_PKTLOG_DEBUG:
+ ret = dhd_os_get_pktlog_dump(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DHD_PKT_LOGGING */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ case DUMP_BUF_ATTR_AXI_ERROR:
+ ret = dhd_os_get_axi_error_dump(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
+
+ if (ret)
+ goto exit;
+
+ ret = nla_put_u32(skb, type, (uint32)(ret));
+ if (ret < 0) {
+ WL_ERR(("Failed to put type, ret:%d\n", ret));
+ goto exit;
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+ if (ret) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ }
+ return ret;
+exit:
+ if (skb) {
+ /* Free skb memory */
+ kfree_skb(skb);
+ }
+ return ret;
+}
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DEBUGABILITY
+#ifndef DEBUGABILITY_DISABLE_MEMDUMP
+static int
+wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ uint32 alloc_len;
+ struct sk_buff *skb = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ u32 supported_features = 0;
+
+ WL_ERR(("wl_cfgvendor_dbg_trigger_mem_dump %d\n", __LINE__));
+
+ ret = dhd_os_dbg_get_feature(dhdp, &supported_features);
+ if (!(supported_features & DBG_MEMORY_DUMP_SUPPORTED)) {
+ WL_ERR(("not support DBG_MEMORY_DUMP_SUPPORTED\n"));
+ ret = -3; //WIFI_ERROR_NOT_SUPPORTED=-3
+ goto exit;
+ }
+
+ dhdp->memdump_type = DUMP_TYPE_CFG_VENDOR_TRIGGERED;
+ ret = dhd_os_socram_dump(bcmcfg_to_prmry_ndev(cfg), &alloc_len);
+ if (ret) {
+ WL_ERR(("failed to call dhd_os_socram_dump : %d\n", ret));
+ goto exit;
+ }
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ if (!skb) {
+ WL_ERR(("skb allocation is failed\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
+
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put fw dump length, ret=%d\n", ret));
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ goto exit;
+ }
+ return ret;
+exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
+ return ret;
+}
+
+static int
+wl_cfgvendor_dbg_get_mem_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ int buf_len = 0;
+ uintptr_t user_buf = (uintptr_t)NULL;
+ const struct nlattr *iter;
+ char *mem_buf = NULL;
+ struct sk_buff *skb = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_FW_DUMP_LEN:
+ /* Check if the iter is valid and
+ * buffer length is not already initialized.
+ */
+ if ((nla_len(iter) == sizeof(uint32)) &&
+ !buf_len) {
+ buf_len = nla_get_u32(iter);
+ if (buf_len <= 0) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ } else {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ break;
+ case DEBUG_ATTRIBUTE_FW_DUMP_DATA:
+ if (nla_len(iter) != sizeof(uint64)) {
+ WL_ERR(("Invalid len\n"));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ user_buf = (uintptr_t)nla_get_u64(iter);
+ if (!user_buf) {
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
+ if (buf_len > 0 && user_buf) {
+#if 0
+ mem_buf = vmalloc(buf_len);
+ if (!mem_buf) {
+ WL_ERR(("failed to allocate mem_buf with size : %d\n", buf_len));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+#endif
+ ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf, &buf_len);
+ if (ret) {
+ WL_ERR(("failed to get_socram_dump : %d\n", ret));
+ goto free_mem;
+ }
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif /* LINUX_VER >= 4.6 */
+ {
+ void * usr_ptr = compat_ptr((uintptr_t) user_buf);
+ ret = copy_to_user(usr_ptr, mem_buf, buf_len);
+ if (ret) {
+ WL_ERR(("failed to copy memdump into user buffer : %d\n", ret));
+ goto free_mem;
+ }
+ }
+ else
+#endif /* CONFIG_COMPAT */
+ {
+ ret = copy_to_user((void*)user_buf, mem_buf, buf_len);
+ if (ret) {
+ WL_ERR(("failed to copy memdump into user buffer : %d\n", ret));
+ goto free_mem;
+ }
+ }
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ if (!skb) {
+ WL_ERR(("skb allocation is failed\n"));
+ ret = BCME_NOMEM;
+ goto free_mem;
+ }
+ /* Indicate the memdump is succesfully copied */
+ ret = nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
+ if (ret < 0) {
+ WL_ERR(("Failed to put DEBUG_ATTRIBUTE_FW_DUMP_DATA, ret:%d\n", ret));
+ goto free_mem;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ }
+ skb = NULL;
+ }
+
+free_mem:
+// vfree(mem_buf);
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
+exit:
+ return ret;
+}
+#else
+static int
+wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+
+static int
+wl_cfgvendor_dbg_get_mem_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ return WIFI_ERROR_NOT_SUPPORTED;
+}
+#endif /* !DEBUGABILITY_DISABLE_MEMDUMP */
+
+static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ char ring_name[DBGRING_NAME_MAX] = {0};
+ int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_RING_NAME:
+ strncpy(ring_name, nla_data(iter),
+ MIN(sizeof(ring_name) -1, nla_len(iter)));
+ break;
+ case DEBUG_ATTRIBUTE_LOG_LEVEL:
+ log_level = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_RING_FLAGS:
+ flags = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
+ time_intval = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
+ threshold = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADADDR;
+ goto exit;
+ }
+ }
+
+ ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+ if (ret < 0) {
+ WL_ERR(("start_logging is failed ret: %d\n", ret));
+ }
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+
+ ret = dhd_os_reset_logging(dhd_pub);
+ if (ret < 0) {
+ WL_ERR(("reset logging is failed ret: %d\n", ret));
+ }
+
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_get_ring_status(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ int ring_id, i;
+ int ring_cnt;
+ struct sk_buff *skb;
+ dhd_dbg_ring_status_t dbg_ring_status[DEBUG_RING_ID_MAX];
+ dhd_dbg_ring_status_t ring_status;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ bzero(dbg_ring_status, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
+ ring_cnt = 0;
+ for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+ ret = dhd_os_get_ring_status(dhd_pub, ring_id, &ring_status);
+ if (ret == BCME_NOTFOUND) {
+ WL_DBG(("The ring (%d) is not found \n", ring_id));
+ } else if (ret == BCME_OK) {
+ dbg_ring_status[ring_cnt++] = ring_status;
+ }
+ }
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
+ nla_total_size(DBG_RING_STATUS_SIZE) * ring_cnt + nla_total_size(sizeof(ring_cnt)));
+ if (!skb) {
+ WL_ERR(("skb allocation is failed\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* Ignore return of nla_put_u32 and nla_put since the skb allocated
+ * above has a requested size for all payload
+ */
+ (void)nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
+ for (i = 0; i < ring_cnt; i++) {
+ (void)nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
+ &dbg_ring_status[i]);
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ }
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_get_ring_data(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ char ring_name[DBGRING_NAME_MAX] = {0};
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_RING_NAME:
+ strlcpy(ring_name, nla_data(iter), sizeof(ring_name));
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ return ret;
+ }
+ }
+
+ ret = dhd_os_trigger_get_ring_data(dhd_pub, ring_name);
+ if (ret < 0) {
+ WL_ERR(("trigger_get_data failed ret:%d\n", ret));
+ }
+
+ return ret;
+}
+#endif /* DEBUGABILITY */
+
+static int wl_cfgvendor_dbg_get_feature(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ u32 supported_features = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+
+ ret = dhd_os_dbg_get_feature(dhd_pub, &supported_features);
+ if (ret < 0) {
+ WL_ERR(("dbg_get_feature failed ret:%d\n", ret));
+ goto exit;
+ }
+ ret = wl_cfgvendor_send_cmd_reply(wiphy, &supported_features,
+ sizeof(supported_features));
+exit:
+ return ret;
+}
+
+#ifdef DEBUGABILITY
+static void wl_cfgvendor_dbg_ring_send_evt(void *ctx,
+ const int ring_id, const void *data, const uint32 len,
+ const dhd_dbg_ring_status_t ring_status)
+{
+ struct net_device *ndev = ctx;
+ struct wiphy *wiphy;
+ gfp_t kflags;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct bcm_cfg80211 *cfg;
+ if (!ndev) {
+ WL_ERR(("ndev is NULL\n"));
+ return;
+ }
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ wiphy = ndev->ieee80211_ptr->wiphy;
+ cfg = wiphy_priv(wiphy);
+
+ /* If wifi hal is not start, don't send event to wifi hal */
+ if (!cfg->hal_started) {
+ WL_ERR(("Hal is not started\n"));
+ return;
+ }
+ /* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_DEBUG_RING_EVENT, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_DEBUG_RING_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return;
+ }
+ /* Set halpid for sending unicast event to wifi hal */
+ nlh = (struct nlmsghdr*)skb->data;
+ nlh->nlmsg_pid = cfg->halpid;
+ nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, sizeof(ring_status), &ring_status);
+ nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
+ cfg80211_vendor_event(skb, kflags);
+}
+#endif /* DEBUGABILITY */
+
+#ifdef DHD_LOG_DUMP
+#ifdef DHD_SSSR_DUMP
+#define DUMP_SSSR_DUMP_MAX_COUNT 8
+static int wl_cfgvendor_nla_put_sssr_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = BCME_OK;
+#ifdef DHD_SSSR_DUMP
+ uint32 arr_len[DUMP_SSSR_DUMP_MAX_COUNT];
+#endif /* DHD_SSSR_DUMP */
+ char memdump_path[MEMDUMP_PATH_LEN];
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_0_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 0 before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_0_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_1_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_1_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ if (dhdp->sssr_d11_outofreset[2]) {
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_2_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_2_BEFORE_DUMP,
+ memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 2 before dump path, ret=%d\n",
+ ret));
+ goto exit;
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_2_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_2_AFTER_DUMP,
+ memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 2 after dump path, ret=%d\n",
+ ret));
+ goto exit;
+ }
+ }
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_dig_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dig before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_dig_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dig after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+#ifdef DHD_SSSR_DUMP
+ memset(arr_len, 0, sizeof(arr_len));
+ dhd_nla_put_sssr_dump_len(ndev, arr_len);
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_C0_D11_BEFORE, arr_len[0]);
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_C1_D11_BEFORE, arr_len[2]);
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_C2_D11_BEFORE, arr_len[4]);
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_DIG_BEFORE, arr_len[6]);
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_C0_D11_AFTER, arr_len[1]);
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_C1_D11_AFTER, arr_len[3]);
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_C2_D11_AFTER, arr_len[5]);
+ ret |= nla_put_u32(skb, DUMP_LEN_ATTR_SSSR_DIG_AFTER, arr_len[7]);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dump len, ret=%d\n", ret));
+ goto exit;
+ }
+#endif /* DHD_SSSR_DUMP */
+
+exit:
+ return ret;
+}
+#else
+static int wl_cfgvendor_nla_put_sssr_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ return BCME_OK;
+}
+#endif /* DHD_SSSR_DUMP */
+
+static int wl_cfgvendor_nla_put_debug_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = BCME_OK;
+ uint32 len = 0;
+ char dump_path[128];
+
+ ret = dhd_get_debug_dump_file_name(ndev, NULL, dump_path, sizeof(dump_path));
+ if (ret < 0) {
+ WL_ERR(("%s: Failed to get debug dump filename\n", __FUNCTION__));
+ goto exit;
+ }
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_DEBUG_DUMP, dump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put debug dump path, ret=%d\n", ret));
+ goto exit;
+ }
+ WL_ERR(("debug_dump path = %s%s\n", dump_path, FILE_NAME_HAL_TAG));
+ wl_print_verinfo(wl_get_cfg(ndev));
+
+ len = dhd_get_time_str_len();
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_TIMESTAMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put time stamp length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ len = dhd_get_dld_len(DLD_BUF_TYPE_GENERAL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_GENERAL_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put general log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#ifdef EWP_ECNTRS_LOGGING
+ len = dhd_get_ecntrs_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_ECNTRS, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put ecntrs length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+ len = dhd_get_dld_len(DLD_BUF_TYPE_SPECIAL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_SPECIAL_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put special log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+ len = dhd_get_dhd_dump_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_DHD_DUMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put dhd dump length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+#if defined(BCMPCIE)
+ len = dhd_get_ext_trap_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_EXT_TRAP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put ext trap length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ len = dhd_get_health_chk_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_HEALTH_CHK, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put health check length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif
+
+ len = dhd_get_dld_len(DLD_BUF_TYPE_PRESERVE);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_PRESERVE_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put preserve log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ len = dhd_get_cookie_log_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_COOKIE, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put cookie length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#ifdef DHD_DUMP_PCIE_RINGS
+ len = dhd_get_flowring_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_FLOWRING_DUMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put flowring dump length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif
+#ifdef DHD_STATUS_LOGGING
+ len = dhd_get_status_log_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_STATUS_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put status log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* DHD_STATUS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ len = dhd_get_rtt_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_RTT_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put rtt log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* EWP_RTT_LOGGING */
+exit:
+ return ret;
+}
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void wl_cfgvendor_nla_put_axi_error_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = 0;
+ char axierrordump_path[MEMDUMP_PATH_LEN];
+ int dumpsize = dhd_os_get_axi_error_dump_size(ndev);
+ if (dumpsize <= 0) {
+ WL_ERR(("Failed to calcuate axi error dump len\n"));
+ return;
+ }
+ dhd_os_get_axi_error_filename(ndev, axierrordump_path, MEMDUMP_PATH_LEN);
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_AXI_ERROR_DUMP, axierrordump_path);
+ if (ret) {
+ WL_ERR(("Failed to put filename\n"));
+ return;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_AXI_ERROR, dumpsize);
+ if (ret) {
+ WL_ERR(("Failed to put filesize\n"));
+ return;
+ }
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef DHD_PKT_LOGGING
+static int wl_cfgvendor_nla_put_pktlogdump_data(struct sk_buff *skb,
+ struct net_device *ndev, bool pktlogdbg)
+{
+ int ret = BCME_OK;
+ char pktlogdump_path[MEMDUMP_PATH_LEN];
+ uint32 pktlog_dumpsize = dhd_os_get_pktlog_dump_size(ndev);
+ if (pktlog_dumpsize == 0) {
+ WL_ERR(("Failed to calcuate pktlog len\n"));
+ return BCME_ERROR;
+ }
+
+ dhd_os_get_pktlogdump_filename(ndev, pktlogdump_path, MEMDUMP_PATH_LEN);
+
+ if (pktlogdbg) {
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_PKTLOG_DEBUG_DUMP, pktlogdump_path);
+ if (ret) {
+ WL_ERR(("Failed to put filename\n"));
+ return ret;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_PKTLOG_DEBUG, pktlog_dumpsize);
+ if (ret) {
+ WL_ERR(("Failed to put filesize\n"));
+ return ret;
+ }
+ } else {
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_PKTLOG_DUMP, pktlogdump_path);
+ if (ret) {
+ WL_ERR(("Failed to put filename\n"));
+ return ret;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_PKTLOG, pktlog_dumpsize);
+ if (ret) {
+ WL_ERR(("Failed to put filesize\n"));
+ return ret;
+ }
+ }
+ return ret;
+}
+#endif /* DHD_PKT_LOGGING */
+
+static int wl_cfgvendor_nla_put_memdump_data(struct sk_buff *skb,
+ struct net_device *ndev, const uint32 fw_len)
+{
+ char memdump_path[MEMDUMP_PATH_LEN];
+ int ret = BCME_OK;
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN, "mem_dump");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_MEM_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put mem dump path, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_MEMDUMP, fw_len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put mem dump length, ret=%d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_nla_put_dump_data(dhd_pub_t *dhd_pub, struct sk_buff *skb,
+ struct net_device *ndev, const uint32 fw_len)
+{
+ int ret = BCME_OK;
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhd_pub->smmu_fault_occurred) {
+ wl_cfgvendor_nla_put_axi_error_data(skb, ndev);
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ if (dhd_pub->memdump_enabled || (dhd_pub->memdump_type == DUMP_TYPE_BY_SYSDUMP)) {
+ if (((ret = wl_cfgvendor_nla_put_debug_dump_data(skb, ndev)) < 0) ||
+ ((ret = wl_cfgvendor_nla_put_memdump_data(skb, ndev, fw_len)) < 0) ||
+ ((ret = wl_cfgvendor_nla_put_sssr_dump_data(skb, ndev)) < 0)) {
+ goto done;
+ }
+#ifdef DHD_PKT_LOGGING
+ if ((ret = wl_cfgvendor_nla_put_pktlogdump_data(skb, ndev, FALSE)) < 0) {
+ goto done;
+ }
+#endif /* DHD_PKT_LOGGING */
+ }
+done:
+ return ret;
+}
+
+static void wl_cfgvendor_dbg_send_file_dump_evt(void *ctx, const void *data,
+ const uint32 len, const uint32 fw_len)
+{
+ struct net_device *ndev = ctx;
+ struct wiphy *wiphy;
+ gfp_t kflags;
+ struct sk_buff *skb = NULL;
+ struct bcm_cfg80211 *cfg;
+ dhd_pub_t *dhd_pub;
+ int ret = BCME_OK;
+
+ if (!ndev) {
+ WL_ERR(("ndev is NULL\n"));
+ return;
+ }
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ wiphy = ndev->ieee80211_ptr->wiphy;
+ /* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_FILE_DUMP_EVENT, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_FILE_DUMP_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return;
+ }
+
+ cfg = wiphy_priv(wiphy);
+ dhd_pub = cfg->pub;
+
+#ifdef DHD_PKT_LOGGING
+ if (dhd_pub->pktlog_debug) {
+ if ((ret = wl_cfgvendor_nla_put_pktlogdump_data(skb, ndev, TRUE)) < 0) {
+ WL_ERR(("nla put failed\n"));
+ goto done;
+ }
+ dhd_pub->pktlog_debug = FALSE;
+ } else
+#endif /* DHD_PKT_LOGGING */
+ {
+ if ((ret = wl_cfgvendor_nla_put_dump_data(dhd_pub, skb, ndev, fw_len)) < 0) {
+ WL_ERR(("nla put failed\n"));
+ goto done;
+ }
+ }
+ /* TODO : Similar to above function add for debug_dump, sssr_dump, and pktlog also. */
+ cfg80211_vendor_event(skb, kflags);
+ return;
+done:
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ }
+}
+#endif /* DHD_LOG_DUMP */
+
+static int wl_cfgvendor_dbg_get_version(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ int buf_len = 1024;
+ bool dhd_ver = FALSE;
+ char *buf_ptr, *ver, *p;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ buf_ptr = (char *)MALLOCZ(cfg->osh, buf_len);
+ if (!buf_ptr) {
+ WL_ERR(("failed to allocate the buffer for version n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_GET_DRIVER:
+ dhd_ver = TRUE;
+ break;
+ case DEBUG_ATTRIBUTE_GET_FW:
+ dhd_ver = FALSE;
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
+ ret = dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg), dhd_ver, &buf_ptr, buf_len);
+ if (ret < 0) {
+ WL_ERR(("failed to get the version %d\n", ret));
+ goto exit;
+ }
+ ver = strstr(buf_ptr, "version ");
+ if (!ver) {
+ WL_ERR(("failed to locate the version\n"));
+ goto exit;
+ }
+ ver += strlen("version ");
+ /* Adjust version format to fit in android sys property */
+ for (p = ver; (*p != ' ') && (*p != '\n') && (*p != 0); p++) {
+ ;
+ }
+ ret = wl_cfgvendor_send_cmd_reply(wiphy, ver, p - ver);
+exit:
+ MFREE(cfg->osh, buf_ptr, buf_len);
+ return ret;
+}
+
+#ifdef DBG_PKT_MON
+static int wl_cfgvendor_dbg_start_pkt_fate_monitoring(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ int ret;
+
+ ret = dhd_os_dbg_attach_pkt_monitor(dhd_pub);
+ if (unlikely(ret)) {
+ WL_ERR(("failed to start pkt fate monitoring, ret=%d", ret));
+ }
+
+ return ret;
+}
+
+typedef int (*dbg_mon_get_pkts_t) (dhd_pub_t *dhdp, void __user *user_buf,
+ uint16 req_count, uint16 *resp_count);
+
+static int __wl_cfgvendor_dbg_get_pkt_fates(struct wiphy *wiphy,
+ const void *data, int len, dbg_mon_get_pkts_t dbg_mon_get_pkts)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ struct sk_buff *skb = NULL;
+ const struct nlattr *iter;
+ void __user *user_buf = NULL;
+ uint16 req_count = 0, resp_count = 0;
+ int ret, tmp, type, mem_needed;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_PKT_FATE_NUM:
+ req_count = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_PKT_FATE_DATA:
+ user_buf = (void __user *)(unsigned long) nla_get_u64(iter);
+ break;
+ default:
+ WL_ERR(("%s: no such attribute %d\n", __FUNCTION__, type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (!req_count || !user_buf) {
+ WL_ERR(("%s: invalid request, user_buf=%p, req_count=%u\n",
+ __FUNCTION__, user_buf, req_count));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = dbg_mon_get_pkts(dhd_pub, user_buf, req_count, &resp_count);
+ if (unlikely(ret)) {
+ WL_ERR(("failed to get packets, ret:%d \n", ret));
+ goto exit;
+ }
+
+ mem_needed = VENDOR_REPLY_OVERHEAD + ATTRIBUTE_U32_LEN;
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
+ if (ret < 0) {
+ WL_ERR(("Failed to put DEBUG_ATTRIBUTE_PKT_FATE_NUM, ret:%d\n", ret));
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+ if (unlikely(ret)) {
+ WL_ERR(("vendor Command reply failed ret:%d \n", ret));
+ }
+ return ret;
+
+exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_get_tx_pkt_fates(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret;
+
+ ret = __wl_cfgvendor_dbg_get_pkt_fates(wiphy, data, len,
+ dhd_os_dbg_monitor_get_tx_pkts);
+ if (unlikely(ret)) {
+ WL_ERR(("failed to get tx packets, ret:%d \n", ret));
+ }
+
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_get_rx_pkt_fates(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret;
+
+ ret = __wl_cfgvendor_dbg_get_pkt_fates(wiphy, data, len,
+ dhd_os_dbg_monitor_get_rx_pkts);
+ if (unlikely(ret)) {
+ WL_ERR(("failed to get rx packets, ret:%d \n", ret));
+ }
+
+ return ret;
+}
+#endif /* DBG_PKT_MON */
+
+#ifdef KEEP_ALIVE
+static int wl_cfgvendor_start_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ /* max size of IP packet for keep alive */
+ const int MKEEP_ALIVE_IP_PKT_MAX = 256;
+
+ int ret = BCME_OK, rem, type;
+ uint8 mkeep_alive_id = 0;
+ uint8 *ip_pkt = NULL;
+ uint16 ip_pkt_len = 0;
+ uint16 ether_type = ETHERTYPE_IP;
+ uint8 src_mac[ETHER_ADDR_LEN];
+ uint8 dst_mac[ETHER_ADDR_LEN];
+ uint32 period_msec = 0;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case MKEEP_ALIVE_ATTRIBUTE_ID:
+ mkeep_alive_id = nla_get_u8(iter);
+ break;
+ case MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN:
+ ip_pkt_len = nla_get_u16(iter);
+ if (ip_pkt_len > MKEEP_ALIVE_IP_PKT_MAX) {
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ break;
+ case MKEEP_ALIVE_ATTRIBUTE_IP_PKT:
+ if (ip_pkt) {
+ ret = BCME_BADARG;
+ WL_ERR(("ip_pkt already allocated\n"));
+ goto exit;
+ }
+ if (!ip_pkt_len) {
+ ret = BCME_BADARG;
+ WL_ERR(("ip packet length is 0\n"));
+ goto exit;
+ }
+ ip_pkt = (u8 *)MALLOCZ(cfg->osh, ip_pkt_len);
+ if (ip_pkt == NULL) {
+ ret = BCME_NOMEM;
+ WL_ERR(("Failed to allocate mem for ip packet\n"));
+ goto exit;
+ }
+ memcpy(ip_pkt, (u8*)nla_data(iter), ip_pkt_len);
+ break;
+ case MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR:
+ memcpy(src_mac, nla_data(iter), ETHER_ADDR_LEN);
+ break;
+ case MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR:
+ memcpy(dst_mac, nla_data(iter), ETHER_ADDR_LEN);
+ break;
+ case MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC:
+ period_msec = nla_get_u32(iter);
+ break;
+ case MKEEP_ALIVE_ATTRIBUTE_ETHER_TYPE:
+ ether_type = nla_get_u16(iter);
+ if (!((ether_type == ETHERTYPE_IP) ||
+ (ether_type == ETHERTYPE_IPV6))) {
+ WL_ERR(("Invalid ether type, %2x\n", ether_type));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ }
+
+ if (ip_pkt == NULL) {
+ ret = BCME_BADARG;
+ WL_ERR(("ip packet is NULL\n"));
+ goto exit;
+ }
+
+ ret = wl_cfg80211_start_mkeep_alive(cfg, mkeep_alive_id,
+ ether_type, ip_pkt, ip_pkt_len, src_mac, dst_mac, period_msec);
+ if (ret < 0) {
+ WL_ERR(("start_mkeep_alive is failed ret: %d\n", ret));
+ }
+
+exit:
+ if (ip_pkt) {
+ MFREE(cfg->osh, ip_pkt, ip_pkt_len);
+ }
+
+ return ret;
+}
+
+static int wl_cfgvendor_stop_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ uint8 mkeep_alive_id = 0;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case MKEEP_ALIVE_ATTRIBUTE_ID:
+ mkeep_alive_id = nla_get_u8(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADARG;
+ break;
+ }
+ }
+
+ ret = wl_cfg80211_stop_mkeep_alive(cfg, mkeep_alive_id);
+ if (ret < 0) {
+ WL_ERR(("stop_mkeep_alive is failed ret: %d\n", ret));
+ }
+
+ return ret;
+}
+#endif /* KEEP_ALIVE */
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static int
+wl_cfgvendor_apf_get_capabilities(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ struct sk_buff *skb = NULL;
+ int ret, ver, max_len, mem_needed;
+
+ /* APF version */
+ ver = 0;
+ ret = dhd_dev_apf_get_version(ndev, &ver);
+ if (unlikely(ret)) {
+ WL_ERR(("APF get version failed, ret=%d\n", ret));
+ return ret;
+ }
+
+ /* APF memory size limit */
+ max_len = 0;
+ ret = dhd_dev_apf_get_max_len(ndev, &max_len);
+ if (unlikely(ret)) {
+ WL_ERR(("APF get maximum length failed, ret=%d\n", ret));
+ return ret;
+ }
+
+ mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2);
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("%s: can't allocate %d bytes\n", __FUNCTION__, mem_needed));
+ return -ENOMEM;
+ }
+
+ ret = nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
+ if (ret < 0) {
+ WL_ERR(("Failed to put APF_ATTRIBUTE_VERSION, ret:%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+ if (ret < 0) {
+ WL_ERR(("Failed to put APF_ATTRIBUTE_MAX_LEN, ret:%d\n", ret));
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+ if (unlikely(ret)) {
+ WL_ERR(("vendor command reply failed, ret=%d\n", ret));
+ }
+ return ret;
+exit:
+ /* Free skb memory */
+ kfree_skb(skb);
+ return ret;
+}
+
+static int
+wl_cfgvendor_apf_set_filter(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ const struct nlattr *iter;
+ u8 *program = NULL;
+ u32 program_len = 0;
+ int ret, tmp, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ if (len <= 0) {
+ WL_ERR(("Invalid len: %d\n", len));
+ ret = -EINVAL;
+ goto exit;
+ }
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case APF_ATTRIBUTE_PROGRAM_LEN:
+ /* check if the iter value is valid and program_len
+ * is not already initialized.
+ */
+ if (nla_len(iter) == sizeof(uint32) && !program_len) {
+ program_len = nla_get_u32(iter);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (program_len > WL_APF_PROGRAM_MAX_SIZE) {
+ WL_ERR(("program len is more than expected len\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (unlikely(!program_len)) {
+ WL_ERR(("zero program length\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ case APF_ATTRIBUTE_PROGRAM:
+ if (unlikely(program)) {
+ WL_ERR(("program already allocated\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (unlikely(!program_len)) {
+ WL_ERR(("program len is not set\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_len(iter) != program_len) {
+ WL_ERR(("program_len is not same\n"));
+ ret = -EINVAL;
+ goto exit;
+ }
+ program = MALLOCZ(cfg->osh, program_len);
+ if (unlikely(!program)) {
+ WL_ERR(("%s: can't allocate %d bytes\n",
+ __FUNCTION__, program_len));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memcpy(program, (u8*)nla_data(iter), program_len);
+ break;
+ default:
+ WL_ERR(("%s: no such attribute %d\n", __FUNCTION__, type));
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ ret = dhd_dev_apf_add_filter(ndev, program, program_len);
+
+exit:
+ if (program) {
+ MFREE(cfg->osh, program, program_len);
+ }
+ return ret;
+}
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+#ifdef NDO_CONFIG_SUPPORT
+static int wl_cfgvendor_configure_nd_offload(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ const struct nlattr *iter;
+ int ret = BCME_OK, rem, type;
+ u8 enable = 0;
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE:
+ enable = nla_get_u8(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ }
+
+ ret = dhd_dev_ndo_cfg(bcmcfg_to_prmry_ndev(cfg), enable);
+ if (ret < 0) {
+ WL_ERR(("dhd_dev_ndo_cfg() failed: %d\n", ret));
+ }
+
+exit:
+ return ret;
+}
+#endif /* NDO_CONFIG_SUPPORT */
+
+#if !defined(BCMSUP_4WAY_HANDSHAKE) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+static int wl_cfgvendor_set_pmk(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ wsec_pmk_t pmk;
+ const struct nlattr *iter;
+ int rem, type;
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+
+ bzero(&pmk, sizeof(pmk));
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case BRCM_ATTR_DRIVER_KEY_PMK:
+ pmk.flags = 0;
+ pmk.key_len = htod16(nla_len(iter));
+ ret = memcpy_s(pmk.key, sizeof(pmk.key),
+ (uint8 *)nla_data(iter), nla_len(iter));
+ if (ret) {
+ WL_ERR(("Failed to copy pmk: %d\n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ }
+
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ ret = wldev_iovar_setbuf(ndev, "okc_info_pmk", pmk.key, pmk.key_len, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
+ }
+ }
+
+ ret = wldev_ioctl_set(ndev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ WL_INFORM_MEM(("IOVAR set_pmk ret:%d", ret));
+exit:
+ return ret;
+}
+#endif /* !BCMSUP_4WAY_HANDSHAKE || LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+
+static int wl_cfgvendor_get_driver_feature(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ u8 supported[(BRCM_WLAN_VENDOR_FEATURES_MAX / 8) + 1] = {0};
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ struct sk_buff *skb;
+ int32 mem_needed;
+
+ mem_needed = VENDOR_REPLY_OVERHEAD + NLA_HDRLEN + sizeof(supported);
+
+ BCM_REFERENCE(dhd_pub);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+ if (FW_SUPPORTED(dhd_pub, idsup)) {
+ ret = wl_features_set(supported, sizeof(supported),
+ BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = nla_put(skb, BRCM_ATTR_DRIVER_FEATURE_FLAGS, sizeof(supported), supported);
+ if (ret) {
+ kfree_skb(skb);
+ goto exit;
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+exit:
+ return ret;
+}
+
+#ifdef WL_P2P_RAND
+static int
+wl_cfgvendor_set_p2p_rand_mac(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ WL_DBG(("%s, wdev->iftype = %d\n", __FUNCTION__, wdev->iftype));
+ WL_INFORM_MEM(("randomized p2p_dev_addr - "MACDBG"\n", MAC2STRDBG(nla_data(data))));
+
+ BCM_REFERENCE(cfg);
+
+ type = nla_type(data);
+
+ if (type == BRCM_ATTR_DRIVER_RAND_MAC) {
+ if (nla_len(data) != ETHER_ADDR_LEN) {
+ WL_ERR(("nla_len not matched.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) {
+ WL_ERR(("wrong interface type , wdev->iftype=%d\n", wdev->iftype));
+ err = -EINVAL;
+ goto exit;
+ }
+ (void)memcpy_s(wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN,
+ nla_data(data), ETHER_ADDR_LEN);
+ (void)memcpy_s(wdev->address, ETHER_ADDR_LEN, nla_data(data), ETHER_ADDR_LEN);
+
+ err = wl_cfgp2p_disable_discovery(cfg);
+ if (unlikely(err < 0)) {
+ WL_ERR(("P2P disable discovery failed, ret=%d\n", err));
+ goto exit;
+ }
+
+ err = wl_cfgp2p_set_firm_p2p(cfg);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Set P2P address in firmware failed, ret=%d\n", err));
+ goto exit;
+ }
+
+ err = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
+ if (unlikely(err < 0)) {
+ WL_ERR(("P2P enable discovery failed, ret=%d\n", err));
+ goto exit;
+ }
+ } else {
+ WL_ERR(("unexpected attrib type:%d\n", type));
+ err = -EINVAL;
+ }
+exit:
+ return err;
+}
+#endif /* WL_P2P_RAND */
+
+#ifdef WL_SAR_TX_POWER
+static int
+wl_cfgvendor_tx_power_scenario(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_ERROR, rem, type;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(wdev_to_ndev(wdev));
+ const struct nlattr *iter;
+ wifi_power_scenario sar_tx_power_val = WIFI_POWER_SCENARIO_INVALID;
+ wl_sar_modes_t wifi_tx_power_mode = 0;
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ if (type == ANDR_WIFI_ATTRIBUTE_TX_POWER_SCENARIO) {
+ sar_tx_power_val = nla_get_s8(iter);
+ } else {
+ WL_ERR(("Unknown attr type: %d\n", type));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+ /* If sar tx power is already configured, no need to set it again */
+ if (cfg->wifi_tx_power_mode == sar_tx_power_val) {
+ WL_INFORM_MEM(("%s, tx_power_mode %d is already set\n",
+ __FUNCTION__, sar_tx_power_val));
+ err = BCME_OK;
+ goto exit;
+ }
+
+ /* Map Android TX power modes to Brcm power mode */
+ switch (sar_tx_power_val) {
+ case WIFI_POWER_SCENARIO_VOICE_CALL:
+ case WIFI_POWER_SCENARIO_DEFAULT:
+ wifi_tx_power_mode = HEAD_SAR_BACKOFF_ENABLE;
+ break;
+ case WIFI_POWER_SCENARIO_ON_HEAD_CELL_OFF:
+ wifi_tx_power_mode = GRIP_SAR_BACKOFF_DISABLE;
+ break;
+ case WIFI_POWER_SCENARIO_ON_BODY_CELL_OFF:
+ wifi_tx_power_mode = GRIP_SAR_BACKOFF_ENABLE;
+ break;
+ case WIFI_POWER_SCENARIO_ON_BODY_BT:
+ wifi_tx_power_mode = NR_mmWave_SAR_BACKOFF_ENABLE;
+ break;
+ case WIFI_POWER_SCENARIO_ON_HEAD_CELL_ON:
+ wifi_tx_power_mode = NR_Sub6_SAR_BACKOFF_DISABLE;
+ break;
+ case WIFI_POWER_SCENARIO_ON_BODY_CELL_ON:
+ wifi_tx_power_mode = NR_Sub6_SAR_BACKOFF_ENABLE;
+ break;
+ default:
+ WL_ERR(("invalid wifi tx power scenario = %d\n",
+ sar_tx_power_val));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ WL_DBG(("%s, tx_power_mode %d\n", __FUNCTION__, wifi_tx_power_mode));
+ err = wldev_iovar_setint(wdev_to_ndev(wdev), "sar_enable", wifi_tx_power_mode);
+ if (unlikely(err)) {
+ WL_ERR(("%s: Failed to set sar_enable - error (%d)\n", __FUNCTION__, err));
+ goto exit;
+ }
+ /* Cache the tx power mode sent by the hal */
+ cfg->wifi_tx_power_mode = sar_tx_power_val;
+exit:
+ return err;
+}
+#endif /* WL_SAR_TX_POWER */
+
+static struct wiphy_vendor_command wl_vendor_cmds [] = {
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_PRIV_STR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_string_handler
+ },
+#ifdef BCM_PRIV_CMD_SUPPORT
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_BCM_STR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_bcm_handler
+ },
+#endif /* BCM_PRIV_CMD_SUPPORT */
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_BCM_PSK
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_sae_password
+ },
+#endif /* WL_SAE || WL_CLIENT_SAE */
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_SET_CONNECT_PARAMS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_connect_params_handler
+ },
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_SET_START_AP_PARAMS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_start_ap_params_handler
+ },
+#ifdef GSCAN_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_gscan_get_capabilities
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_scan_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_batch_scan_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_initiate_gscan
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_enable_full_scan_result
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_HOTLIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_hotlist_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_gscan_get_batch_results
+ },
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_gscan_get_channel_list
+ },
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef RTT_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_SET_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_set_config
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_CANCEL_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_cancel_config
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_GETCAPABILITY
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_get_capability
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_GETAVAILCHANNEL
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_get_responder_info
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_SET_RESPONDER
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_set_responder
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_CANCEL_RESPONDER
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_cancel_responder
+ },
+#endif /* RTT_SUPPORT */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_feature_set
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_feature_set_matrix
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_RANDOM_MAC_OUI
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_rand_mac_oui
+ },
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_NODFS_CHANNELS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_nodfs_flag
+ },
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_SET_COUNTRY
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_country
+ },
+#ifdef LINKSTAT_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = LSTATS_SUBCMD_GET_INFO
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_lstats_get_info
+ },
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_EPNO_SSID
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_epno_cfg
+
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_LAZY_ROAM_PARAMS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_lazy_roam_cfg
+
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_ENABLE_LAZY_ROAM
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_enable_lazy_roam
+
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_BSSID_PREF
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_bssid_pref
+
+ },
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_SSID_WHITELIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_ssid_whitelist
+
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_BSSID_BLACKLIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_bssid_blacklist
+ },
+#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+#ifdef ROAMEXP_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_FW_ROAM_POLICY
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_fw_roaming_state
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_ROAM_CAPABILITY
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_fw_roam_get_capability
+ },
+#endif /* ROAMEXP_SUPPORT */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_VER
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_version
+ },
+#ifdef DHD_LOG_DUMP
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_FILE_DUMP_BUF
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_file_dump
+ },
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DEBUGABILITY
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_TRIGGER_MEM_DUMP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_trigger_mem_dump
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_MEM_DUMP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_mem_dump
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_START_LOGGING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_start_logging
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_RESET_LOGGING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_reset_logging
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_RING_STATUS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_ring_status
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_RING_DATA
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_ring_data
+ },
+#endif /* DEBUGABILITY */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_FEATURE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_feature
+ },
+#ifdef DBG_PKT_MON
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_START_PKT_FATE_MONITORING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_start_pkt_fate_monitoring
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_TX_PKT_FATES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_tx_pkt_fates
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_RX_PKT_FATES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_get_rx_pkt_fates
+ },
+#endif /* DBG_PKT_MON */
+#ifdef KEEP_ALIVE
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_start_mkeep_alive
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_stop_mkeep_alive
+ },
+#endif /* KEEP_ALIVE */
+#ifdef WL_NAN
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_ENABLE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_start_handler
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DISABLE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_stop_handler
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_config_handler
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_REQUEST_PUBLISH
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_req_publish
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_req_subscribe
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_CANCEL_PUBLISH
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_cancel_publish
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_cancel_subscribe
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_TRANSMIT
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_transmit
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_get_capablities
+ },
+
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_data_path_iface_create
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_data_path_iface_delete
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_REQUEST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_data_path_request
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_RESPONSE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_data_path_response
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_END
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_data_path_end
+ },
+#ifdef WL_NAN_DISC_CACHE
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_SEC_INFO
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_data_path_sec_info
+ },
+#endif /* WL_NAN_DISC_CACHE */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_VERSION_INFO
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_version_info
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = NAN_WIFI_SUBCMD_ENABLE_MERGE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_nan_enable_merge
+ },
+#endif /* WL_NAN */
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = APF_SUBCMD_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_apf_get_capabilities
+ },
+
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = APF_SUBCMD_SET_FILTER
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_apf_set_filter
+ },
+#endif /* PKT_FILTER_SUPPORT && APF */
+#ifdef NDO_CONFIG_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_CONFIG_ND_OFFLOAD
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_configure_nd_offload
+ },
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_RSSI_MONITOR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_rssi_monitor
+ },
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef DHD_WAKE_STATUS
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_WAKE_REASON_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_wake_reason_stats
+ },
+#endif /* DHD_WAKE_STATUS */
+#ifdef DHDTCPACK_SUPPRESS
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_CONFIG_TCPACK_SUP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_tcpack_sup_mode
+ },
+#endif /* DHDTCPACK_SUPPRESS */
+#if !defined(BCMSUP_4WAY_HANDSHAKE) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_SET_PMK
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_pmk
+ },
+#endif /* !BCMSUP_4WAY_HANDSHAKE || LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_GET_FEATURES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_driver_feature
+ },
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_FILE_DUMP_DONE_IND
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_notify_dump_completion
+ },
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+#if defined(WL_CFG80211)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_START
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_hal_started
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_STOP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_stop_hal
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_PID
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_hal_pid
+ },
+#endif /* WL_CFG80211 */
+#ifdef WL_LATENCY_MODE
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_SET_LATENCY_MODE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_latency_mode
+ },
+#endif /* WL_LATENCY_MODE */
+#ifdef WL_P2P_RAND
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_SET_MAC
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV,
+ .doit = wl_cfgvendor_set_p2p_rand_mac
+ },
+#endif /* WL_P2P_RAND */
+#ifdef WL_SAR_TX_POWER
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = WIFI_SUBCMD_TX_POWER_SCENARIO
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV,
+ .doit = wl_cfgvendor_tx_power_scenario
+ }
+#endif /* WL_SAR_TX_POWER */
+
+};
+
+static const struct nl80211_vendor_cmd_info wl_vendor_events [] = {
+ { OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC },
+ { OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR },
+ { OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT },
+ { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT },
+ { OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT },
+ { OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT },
+ { OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT },
+ { OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT },
+ { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT },
+ { OUI_GOOGLE, GOOGLE_SCAN_EPNO_EVENT },
+ { OUI_GOOGLE, GOOGLE_DEBUG_RING_EVENT },
+ { OUI_GOOGLE, GOOGLE_FW_DUMP_EVENT },
+ { OUI_GOOGLE, GOOGLE_PNO_HOTSPOT_FOUND_EVENT },
+ { OUI_GOOGLE, GOOGLE_RSSI_MONITOR_EVENT },
+ { OUI_GOOGLE, GOOGLE_MKEEP_ALIVE_EVENT },
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_ENABLED},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_DISABLED},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_REPLIED},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_PUBLISH_TERMINATED},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_DE_EVENT},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_FOLLOWUP},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_REQUEST},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_CONFIRMATION},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_END},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_BEACON},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_SDF},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_TCA},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_UNKNOWN},
+ { OUI_GOOGLE, GOOGLE_ROAM_EVENT_START},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_HANGED},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_SAE_KEY},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_BEACON_RECV},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_PORT_AUTHORIZED},
+ { OUI_GOOGLE, GOOGLE_FILE_DUMP_EVENT },
+ { OUI_BRCM, BRCM_VENDOR_EVENT_CU},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_WIPS},
+ { OUI_GOOGLE, NAN_ASYNC_RESPONSE_DISABLED},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_RCC_INFO}
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
+static void
+wl_cfgvendor_apply_cmd_policy(struct wiphy *wiphy)
+{
+ int i;
+ u32 n_cmds = wiphy->n_vendor_commands;
+
+ WL_INFORM(("Apply CMD_RAW_DATA policy\n"));
+ for (i = 0; i < n_cmds; i++) {
+ wl_vendor_cmds[i].policy = VENDOR_CMD_RAW_DATA;
+ }
+}
+#endif /* LINUX VER >= 5.3 */
+
+int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd)
+{
+
+ WL_INFORM_MEM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n",
+ NL80211_CMD_VENDOR));
+
+ wiphy->vendor_commands = wl_vendor_cmds;
+ wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
+ wl_cfgvendor_apply_cmd_policy(wiphy);
+#endif /* LINUX VER >= 5.3 */
+
+ wiphy->vendor_events = wl_vendor_events;
+ wiphy->n_vendor_events = ARRAY_SIZE(wl_vendor_events);
+
+#ifdef DEBUGABILITY
+ dhd_os_dbg_register_callback(FW_VERBOSE_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+ dhd_os_dbg_register_callback(DHD_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+#ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
+ dhd_os_dbg_register_callback(DRIVER_LOG_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+ dhd_os_dbg_register_callback(ROAM_STATS_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+#endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
+#endif /* DEBUGABILITY */
+#ifdef DHD_LOG_DUMP
+ dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_file_dump_evt);
+#endif /* DHD_LOG_DUMP */
+
+ return 0;
+}
+
+int wl_cfgvendor_detach(struct wiphy *wiphy)
+{
+ WL_INFORM_MEM(("Vendor: Unregister BRCM cfg80211 vendor interface \n"));
+
+ wiphy->vendor_commands = NULL;
+ wiphy->vendor_events = NULL;
+ wiphy->n_vendor_commands = 0;
+ wiphy->n_vendor_events = 0;
+
+ return 0;
+}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+void
+wl_cfgvendor_send_hang_event(struct net_device *dev, u16 reason, char *string, int hang_info_cnt)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wiphy *wiphy;
+ char *hang_info;
+ int len = 0;
+ int bytes_written;
+ uint32 dummy_data = 0;
+ int reason_hang_info = 0;
+ int cnt = 0;
+ dhd_pub_t *dhd;
+ int hang_reason_mismatch = FALSE;
+
+ if (!cfg || !cfg->wdev) {
+ WL_ERR(("cfg=%p wdev=%p\n", cfg, (cfg ? cfg->wdev : NULL)));
+ return;
+ }
+
+ wiphy = cfg->wdev->wiphy;
+
+ if (!wiphy) {
+ WL_ERR(("wiphy is NULL\n"));
+ return;
+ }
+
+ hang_info = MALLOCZ(cfg->osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ if (hang_info == NULL) {
+ WL_ERR(("alloc hang_info failed\n"));
+ return;
+ }
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then stop scan */
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
+ wl_android_bcnrecv_stop(dev, WL_BCNRECV_HANG);
+ }
+#endif /* WL_BCNRECV */
+ sscanf(string, "%d", &reason_hang_info);
+ bytes_written = 0;
+ len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ if (strlen(string) == 0 || (reason_hang_info != reason)) {
+ WL_ERR(("hang reason mismatch: string len %d reason_hang_info %d\n",
+ (int)strlen(string), reason_hang_info));
+ hang_reason_mismatch = TRUE;
+ if (dhd) {
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str,
+ dhd->debug_dump_time_hang_str);
+ }
+ /* Fill bigdata key with */
+ bytes_written += scnprintf(&hang_info[bytes_written], len,
+ "%d %d %s %08x %08x %08x %08x %08x %08x %08x",
+ reason, VENDOR_SEND_HANG_EXT_INFO_VER,
+ dhd->debug_dump_time_hang_str,
+ 0, 0, 0, 0, 0, 0, 0);
+ if (dhd) {
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+ }
+ } else {
+ bytes_written += scnprintf(&hang_info[bytes_written], len, "%s", string);
+ }
+
+ WL_ERR(("hang reason: %d info cnt: %d\n", reason, hang_info_cnt));
+
+ if (hang_reason_mismatch == FALSE) {
+ cnt = hang_info_cnt;
+ } else {
+ cnt = HANG_FIELD_MISMATCH_CNT;
+ }
+
+ while (cnt < HANG_FIELD_CNT_MAX) {
+ len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+ if (len <= 0) {
+ break;
+ }
+ bytes_written += scnprintf(&hang_info[bytes_written], len,
+ "%c%08x", HANG_RAW_DEL, dummy_data);
+ cnt++;
+ }
+
+ WL_ERR(("hang info cnt: %d len: %d\n", cnt, (int)strlen(hang_info)));
+ WL_ERR(("hang info data: %s\n", hang_info));
+
+ wl_cfgvendor_send_async_event(wiphy,
+ bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_HANGED,
+ hang_info, (int)strlen(hang_info));
+
+ memset(string, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
+
+ if (hang_info) {
+ MFREE(cfg->osh, hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ }
+
+#ifdef DHD_LOG_DUMP
+ dhd_logdump_cookie_save(dhd, dhd->debug_dump_time_hang_str, "HANG");
+#endif /* DHD_LOG_DUMP */
+
+ if (dhd) {
+ clear_debug_dump_time(dhd->debug_dump_time_str);
+ }
+}
+
+void
+wl_cfgvendor_simple_hang_event(struct net_device *dev, u16 reason)
+{
+ struct bcm_cfg80211 *cfg;
+ struct wiphy *wiphy;
+ struct sk_buff *msg;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ int hang_event_len = 0;
+#ifdef DHD_COREDUMP
+ dhd_pub_t *dhd;
+#endif
+ WL_ERR(("0x%x\n", reason));
+
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_ERR(("fw dump evt invalid arg\n"));
+ return;
+ }
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+ if (!wiphy) {
+ WL_ERR(("wiphy is NULL\n"));
+ return;
+ }
+
+#ifdef DHD_COREDUMP
+ hang_event_len = DHD_MEMDUMP_LONGSTR_LEN;
+#endif
+
+ /* Allocate the skb for vendor event */
+ msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(dev),
+ hang_event_len, BRCM_VENDOR_EVENT_HANGED, kflags);
+ if (!msg) {
+ WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
+ return;
+ }
+
+#ifdef DHD_COREDUMP
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+ WL_ERR(("hang reason: %s\n", dhd->memdump_str));
+ nla_put(msg, DEBUG_ATTRIBUTE_HANG_REASON, DHD_MEMDUMP_LONGSTR_LEN, dhd->memdump_str);
+#endif
+
+ cfg80211_vendor_event(msg, kflags);
+ return;
+}
+
+void
+wl_copy_hang_info_if_falure(struct net_device *dev, u16 reason, s32 ret)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ dhd_pub_t *dhd;
+ s32 err = 0;
+ char ioctl_buf[WLC_IOCTL_SMLEN];
+ memuse_info_t mu;
+ int bytes_written = 0;
+ int remain_len = 0;
+
+ if (!dev) {
+ WL_ERR(("dev is null"));
+ return;
+
+ }
+
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ WL_ERR(("dev=%p cfg=%p\n", dev, cfg));
+ return;
+ }
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhd || !dhd->hang_info) {
+ WL_ERR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
+ dhd, (dhd ? dhd->hang_info : NULL)));
+ return;
+ }
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "memuse",
+ NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, 0, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return;
+ }
+
+ memcpy(&mu, ioctl_buf, sizeof(memuse_info_t));
+
+ if (mu.len >= sizeof(memuse_info_t)) {
+ WL_ERR(("Heap Total: %d(%dK)\n", mu.arena_size, KB(mu.arena_size)));
+ WL_ERR(("Free: %d(%dK), LWM: %d(%dK)\n",
+ mu.arena_free, KB(mu.arena_free),
+ mu.free_lwm, KB(mu.free_lwm)));
+ WL_ERR(("In use: %d(%dK), HWM: %d(%dK)\n",
+ mu.inuse_size, KB(mu.inuse_size),
+ mu.inuse_hwm, KB(mu.inuse_hwm)));
+ WL_ERR(("Malloc failure count: %d\n", mu.mf_count));
+ }
+
+ memset(dhd->hang_info, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
+
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
+
+ bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len,
+ "%d %d %s %d %d %d %d %d %08x %08x",
+ reason, VENDOR_SEND_HANG_EXT_INFO_VER,
+ dhd->debug_dump_time_hang_str,
+ ret, mu.arena_size, mu.arena_free, mu.inuse_size, mu.mf_count, 0, 0);
+
+ dhd->hang_info_cnt = HANG_FIELD_IF_FAILURE_CNT;
+
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+
+ return;
+}
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
diff --git a/bcmdhd.101.10.361.x/wl_cfgvendor.h b/bcmdhd.101.10.361.x/wl_cfgvendor.h
new file mode 100755
index 0000000..581ab11
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgvendor.h
@@ -0,0 +1,855 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+ */
+
+#ifndef _wl_cfgvendor_h_
+#define _wl_cfgvendor_h_
+
+#define OUI_BRCM 0x001018
+#define OUI_GOOGLE 0x001A11
+#define BRCM_VENDOR_SUBCMD_PRIV_STR 1
+#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4)
+#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN
+#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN
+#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
+#define ETHERTYPE_IP 0x0800 /* IP */
+#define ETHERTYPE_IPV6 0x86dd /* IP protocol version 6 */
+
+enum brcm_vendor_attr {
+ BRCM_ATTR_DRIVER_CMD = 0,
+ BRCM_ATTR_DRIVER_KEY_PMK = 1,
+ BRCM_ATTR_DRIVER_FEATURE_FLAGS = 2,
+ BRCM_ATTR_DRIVER_RAND_MAC = 3,
+ BRCM_ATTR_SAE_PWE = 4,
+ BRCM_ATTR_DRIVER_MAX = 5
+};
+
+enum brcm_wlan_vendor_features {
+ BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD = 0,
+ BRCM_WLAN_VENDOR_FEATURES_MAX = 1
+};
+
+typedef enum wifi_error {
+ WIFI_SUCCESS = 0,
+ WIFI_ERROR_NONE = 0,
+ WIFI_ERROR_UNKNOWN = -1,
+ WIFI_ERROR_UNINITIALIZED = -2,
+ WIFI_ERROR_NOT_SUPPORTED = -3,
+ WIFI_ERROR_NOT_AVAILABLE = -4,
+ WIFI_ERROR_INVALID_ARGS = -5,
+ WIFI_ERROR_INVALID_REQUEST_ID = -6,
+ WIFI_ERROR_TIMED_OUT = -7,
+ WIFI_ERROR_TOO_MANY_REQUESTS = -8,
+ WIFI_ERROR_OUT_OF_MEMORY = -9,
+ WIFI_ERROR_BUSY = -10
+} wifi_error_t;
+
+#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN
+#define SCAN_INDEX_HDR_LEN (NLA_HDRLEN)
+#define SCAN_ID_HDR_LEN ATTRIBUTE_U32_LEN
+#define SCAN_FLAGS_HDR_LEN ATTRIBUTE_U32_LEN
+#define GSCAN_NUM_RESULTS_HDR_LEN ATTRIBUTE_U32_LEN
+#define GSCAN_CH_BUCKET_MASK_HDR_LEN ATTRIBUTE_U32_LEN
+#define GSCAN_RESULTS_HDR_LEN (NLA_HDRLEN)
+#define GSCAN_BATCH_RESULT_HDR_LEN (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \
+ SCAN_FLAGS_HDR_LEN + \
+ GSCAN_NUM_RESULTS_HDR_LEN + \
+ GSCAN_CH_BUCKET_MASK_HDR_LEN + \
+ GSCAN_RESULTS_HDR_LEN)
+
+#define VENDOR_REPLY_OVERHEAD (VENDOR_ID_OVERHEAD + \
+ VENDOR_SUBCMD_OVERHEAD + \
+ VENDOR_DATA_OVERHEAD)
+
+#define GSCAN_ATTR_SET1 10
+#define GSCAN_ATTR_SET2 20
+#define GSCAN_ATTR_SET3 30
+#define GSCAN_ATTR_SET4 40
+#define GSCAN_ATTR_SET5 50
+#define GSCAN_ATTR_SET6 60
+#define GSCAN_ATTR_SET7 70
+#define GSCAN_ATTR_SET8 80
+#define GSCAN_ATTR_SET9 90
+#define GSCAN_ATTR_SET10 100
+#define GSCAN_ATTR_SET11 110
+#define GSCAN_ATTR_SET12 120
+#define GSCAN_ATTR_SET13 130
+#define GSCAN_ATTR_SET14 140
+
+#define NAN_SVC_INFO_LEN 255
+#define NAN_SID_ENABLE_FLAG_INVALID 0xff
+#define NAN_SID_BEACON_COUNT_INVALID 0xff
+#define WL_NAN_DW_INTERVAL 512
+
+#define CFG80211_VENDOR_CMD_REPLY_SKB_SZ 100
+#define CFG80211_VENDOR_EVT_SKB_SZ 2048
+
+#define SUPP_SAE_PWE_LOOP 0x00
+#define SUPP_SAE_PWE_H2E 0x01
+#define SUPP_SAE_PWE_TRANS 0x02
+
+typedef enum {
+ /* don't use 0 as a valid subcommand */
+ VENDOR_NL80211_SUBCMD_UNSPECIFIED,
+
+ /* define all vendor startup commands between 0x0 and 0x0FFF */
+ VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001,
+ VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF,
+
+ /* define all GScan related commands between 0x1000 and 0x10FF */
+ ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000,
+ ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF,
+
+ /* define all RTT related commands between 0x1100 and 0x11FF */
+ ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100,
+ ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF,
+
+ ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200,
+ ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF,
+
+ ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300,
+ ANDROID_NL80211_SUBCMD_TDLS_RANGE_END = 0x13FF,
+
+ ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START = 0x1400,
+ ANDROID_NL80211_SUBCMD_DEBUG_RANGE_END = 0x14FF,
+
+ /* define all NearbyDiscovery related commands between 0x1500 and 0x15FF */
+ ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1500,
+ ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x15FF,
+
+ /* define all wifi calling related commands between 0x1600 and 0x16FF */
+ ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START = 0x1600,
+ ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END = 0x16FF,
+
+ /* define all NAN related commands between 0x1700 and 0x17FF */
+ ANDROID_NL80211_SUBCMD_NAN_RANGE_START = 0x1700,
+ ANDROID_NL80211_SUBCMD_NAN_RANGE_END = 0x17FF,
+
+ /* define all packet filter related commands between 0x1800 and 0x18FF */
+ ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START = 0x1800,
+ ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_END = 0x18FF,
+
+ /* define all tx power related commands between 0x1900 and 0x19FF */
+ ANDROID_NL80211_SUBCMD_TX_POWER_RANGE_START = 0x1900,
+ ANDROID_NL80211_SUBCMD_TX_POWER_RANGE_END = 0x19FF,
+
+ /* This is reserved for future usage */
+
+} ANDROID_VENDOR_SUB_COMMAND;
+
+enum andr_vendor_subcmd {
+ GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START,
+ GSCAN_SUBCMD_SET_CONFIG,
+ GSCAN_SUBCMD_SET_SCAN_CONFIG,
+ GSCAN_SUBCMD_ENABLE_GSCAN,
+ GSCAN_SUBCMD_GET_SCAN_RESULTS,
+ GSCAN_SUBCMD_SCAN_RESULTS,
+ GSCAN_SUBCMD_SET_HOTLIST,
+ GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG,
+ GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS,
+ GSCAN_SUBCMD_GET_CHANNEL_LIST,
+ /* ANDR_WIFI_XXX although not related to gscan are defined here */
+ ANDR_WIFI_SUBCMD_GET_FEATURE_SET,
+ ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX,
+ ANDR_WIFI_RANDOM_MAC_OUI,
+ ANDR_WIFI_NODFS_CHANNELS,
+ ANDR_WIFI_SET_COUNTRY,
+ GSCAN_SUBCMD_SET_EPNO_SSID,
+ WIFI_SUBCMD_SET_SSID_WHITELIST,
+ WIFI_SUBCMD_SET_LAZY_ROAM_PARAMS,
+ WIFI_SUBCMD_ENABLE_LAZY_ROAM,
+ WIFI_SUBCMD_SET_BSSID_PREF,
+ WIFI_SUBCMD_SET_BSSID_BLACKLIST,
+ GSCAN_SUBCMD_ANQPO_CONFIG,
+ WIFI_SUBCMD_SET_RSSI_MONITOR,
+ WIFI_SUBCMD_CONFIG_ND_OFFLOAD,
+ WIFI_SUBCMD_CONFIG_TCPACK_SUP,
+ WIFI_SUBCMD_FW_ROAM_POLICY,
+ WIFI_SUBCMD_ROAM_CAPABILITY,
+ WIFI_SUBCMD_SET_LATENCY_MODE,
+ RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
+ RTT_SUBCMD_CANCEL_CONFIG,
+ RTT_SUBCMD_GETCAPABILITY,
+ RTT_SUBCMD_GETAVAILCHANNEL,
+ RTT_SUBCMD_SET_RESPONDER,
+ RTT_SUBCMD_CANCEL_RESPONDER,
+ LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START,
+
+ DEBUG_START_LOGGING = ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START,
+ DEBUG_TRIGGER_MEM_DUMP,
+ DEBUG_GET_MEM_DUMP,
+ DEBUG_GET_VER,
+ DEBUG_GET_RING_STATUS,
+ DEBUG_GET_RING_DATA,
+ DEBUG_GET_FEATURE,
+ DEBUG_RESET_LOGGING,
+
+ DEBUG_TRIGGER_DRIVER_MEM_DUMP,
+ DEBUG_GET_DRIVER_MEM_DUMP,
+ DEBUG_START_PKT_FATE_MONITORING,
+ DEBUG_GET_TX_PKT_FATES,
+ DEBUG_GET_RX_PKT_FATES,
+ DEBUG_GET_WAKE_REASON_STATS,
+ DEBUG_GET_FILE_DUMP_BUF,
+ DEBUG_FILE_DUMP_DONE_IND,
+ DEBUG_SET_HAL_START,
+ DEBUG_SET_HAL_STOP,
+ DEBUG_SET_HAL_PID,
+
+ WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START,
+ WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE,
+
+ NAN_WIFI_SUBCMD_ENABLE = ANDROID_NL80211_SUBCMD_NAN_RANGE_START, /* 0x1700 */
+ NAN_WIFI_SUBCMD_DISABLE, /* 0x1701 */
+ NAN_WIFI_SUBCMD_REQUEST_PUBLISH, /* 0x1702 */
+ NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE, /* 0x1703 */
+ NAN_WIFI_SUBCMD_CANCEL_PUBLISH, /* 0x1704 */
+ NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE, /* 0x1705 */
+ NAN_WIFI_SUBCMD_TRANSMIT, /* 0x1706 */
+ NAN_WIFI_SUBCMD_CONFIG, /* 0x1707 */
+ NAN_WIFI_SUBCMD_TCA, /* 0x1708 */
+ NAN_WIFI_SUBCMD_STATS, /* 0x1709 */
+ NAN_WIFI_SUBCMD_GET_CAPABILITIES, /* 0x170A */
+ NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, /* 0x170B */
+ NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, /* 0x170C */
+ NAN_WIFI_SUBCMD_DATA_PATH_REQUEST, /* 0x170D */
+ NAN_WIFI_SUBCMD_DATA_PATH_RESPONSE, /* 0x170E */
+ NAN_WIFI_SUBCMD_DATA_PATH_END, /* 0x170F */
+ NAN_WIFI_SUBCMD_DATA_PATH_SEC_INFO, /* 0x1710 */
+ NAN_WIFI_SUBCMD_VERSION_INFO, /* 0x1711 */
+ NAN_WIFI_SUBCMD_ENABLE_MERGE, /* 0x1712 */
+ APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START,
+ APF_SUBCMD_SET_FILTER,
+ WIFI_SUBCMD_TX_POWER_SCENARIO = ANDROID_NL80211_SUBCMD_TX_POWER_RANGE_START,
+ /* Add more sub commands here */
+ VENDOR_SUBCMD_MAX
+};
+
+enum gscan_attributes {
+ GSCAN_ATTRIBUTE_NUM_BUCKETS = GSCAN_ATTR_SET1,
+ GSCAN_ATTRIBUTE_BASE_PERIOD,
+ GSCAN_ATTRIBUTE_BUCKETS_BAND,
+ GSCAN_ATTRIBUTE_BUCKET_ID,
+ GSCAN_ATTRIBUTE_BUCKET_PERIOD,
+ GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
+ GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
+ GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
+ GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
+ GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
+ GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
+
+ GSCAN_ATTRIBUTE_ENABLE_FEATURE = GSCAN_ATTR_SET2,
+ GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
+ GSCAN_ATTRIBUTE_FLUSH_FEATURE,
+ GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS,
+ GSCAN_ATTRIBUTE_REPORT_EVENTS,
+ /* remaining reserved for additional attributes */
+ GSCAN_ATTRIBUTE_NUM_OF_RESULTS = GSCAN_ATTR_SET3,
+ GSCAN_ATTRIBUTE_FLUSH_RESULTS,
+ GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */
+ GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */
+ GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */
+ GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */
+ GSCAN_ATTRIBUTE_NUM_CHANNELS,
+ GSCAN_ATTRIBUTE_CHANNEL_LIST,
+ GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK,
+
+ /* remaining reserved for additional attributes */
+
+ GSCAN_ATTRIBUTE_SSID = GSCAN_ATTR_SET4,
+ GSCAN_ATTRIBUTE_BSSID,
+ GSCAN_ATTRIBUTE_CHANNEL,
+ GSCAN_ATTRIBUTE_RSSI,
+ GSCAN_ATTRIBUTE_TIMESTAMP,
+ GSCAN_ATTRIBUTE_RTT,
+ GSCAN_ATTRIBUTE_RTTSD,
+
+ /* remaining reserved for additional attributes */
+
+ GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = GSCAN_ATTR_SET5,
+ GSCAN_ATTRIBUTE_RSSI_LOW,
+ GSCAN_ATTRIBUTE_RSSI_HIGH,
+ GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM,
+ GSCAN_ATTRIBUTE_HOTLIST_FLUSH,
+ GSCAN_ATTRIBUTE_HOTLIST_BSSID_COUNT,
+
+ /* remaining reserved for additional attributes */
+ GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6,
+ GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE,
+ GSCAN_ATTRIBUTE_MIN_BREACHING,
+ GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS,
+ GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH,
+
+ /* EPNO */
+ GSCAN_ATTRIBUTE_EPNO_SSID_LIST = GSCAN_ATTR_SET7,
+ GSCAN_ATTRIBUTE_EPNO_SSID,
+ GSCAN_ATTRIBUTE_EPNO_SSID_LEN,
+ GSCAN_ATTRIBUTE_EPNO_RSSI,
+ GSCAN_ATTRIBUTE_EPNO_FLAGS,
+ GSCAN_ATTRIBUTE_EPNO_AUTH,
+ GSCAN_ATTRIBUTE_EPNO_SSID_NUM,
+ GSCAN_ATTRIBUTE_EPNO_FLUSH,
+
+ /* Roam SSID Whitelist and BSSID pref */
+ GSCAN_ATTRIBUTE_WHITELIST_SSID = GSCAN_ATTR_SET8,
+ GSCAN_ATTRIBUTE_NUM_WL_SSID,
+ GSCAN_ATTRIBUTE_WL_SSID_LEN,
+ GSCAN_ATTRIBUTE_WL_SSID_FLUSH,
+ GSCAN_ATTRIBUTE_WHITELIST_SSID_ELEM,
+ GSCAN_ATTRIBUTE_NUM_BSSID,
+ GSCAN_ATTRIBUTE_BSSID_PREF_LIST,
+ GSCAN_ATTRIBUTE_BSSID_PREF_FLUSH,
+ GSCAN_ATTRIBUTE_BSSID_PREF,
+ GSCAN_ATTRIBUTE_RSSI_MODIFIER,
+
+ /* Roam cfg */
+ GSCAN_ATTRIBUTE_A_BAND_BOOST_THRESHOLD = GSCAN_ATTR_SET9,
+ GSCAN_ATTRIBUTE_A_BAND_PENALTY_THRESHOLD,
+ GSCAN_ATTRIBUTE_A_BAND_BOOST_FACTOR,
+ GSCAN_ATTRIBUTE_A_BAND_PENALTY_FACTOR,
+ GSCAN_ATTRIBUTE_A_BAND_MAX_BOOST,
+ GSCAN_ATTRIBUTE_LAZY_ROAM_HYSTERESIS,
+ GSCAN_ATTRIBUTE_ALERT_ROAM_RSSI_TRIGGER,
+ GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE,
+
+ /* BSSID blacklist */
+ GSCAN_ATTRIBUTE_BSSID_BLACKLIST_FLUSH = GSCAN_ATTR_SET10,
+ GSCAN_ATTRIBUTE_BLACKLIST_BSSID,
+
+ GSCAN_ATTRIBUTE_ANQPO_HS_LIST = GSCAN_ATTR_SET11,
+ GSCAN_ATTRIBUTE_ANQPO_HS_LIST_SIZE,
+ GSCAN_ATTRIBUTE_ANQPO_HS_NETWORK_ID,
+ GSCAN_ATTRIBUTE_ANQPO_HS_NAI_REALM,
+ GSCAN_ATTRIBUTE_ANQPO_HS_ROAM_CONSORTIUM_ID,
+ GSCAN_ATTRIBUTE_ANQPO_HS_PLMN,
+
+ /* Adaptive scan attributes */
+ GSCAN_ATTRIBUTE_BUCKET_STEP_COUNT = GSCAN_ATTR_SET12,
+ GSCAN_ATTRIBUTE_BUCKET_MAX_PERIOD,
+
+ /* ePNO cfg */
+ GSCAN_ATTRIBUTE_EPNO_5G_RSSI_THR = GSCAN_ATTR_SET13,
+ GSCAN_ATTRIBUTE_EPNO_2G_RSSI_THR,
+ GSCAN_ATTRIBUTE_EPNO_INIT_SCORE_MAX,
+ GSCAN_ATTRIBUTE_EPNO_CUR_CONN_BONUS,
+ GSCAN_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS,
+ GSCAN_ATTRIBUTE_EPNO_SECURE_BONUS,
+ GSCAN_ATTRIBUTE_EPNO_5G_BONUS,
+
+ /* Android O Roaming features */
+ GSCAN_ATTRIBUTE_ROAM_STATE_SET = GSCAN_ATTR_SET14,
+
+ GSCAN_ATTRIBUTE_MAX
+};
+
+enum gscan_bucket_attributes {
+ GSCAN_ATTRIBUTE_CH_BUCKET_1,
+ GSCAN_ATTRIBUTE_CH_BUCKET_2,
+ GSCAN_ATTRIBUTE_CH_BUCKET_3,
+ GSCAN_ATTRIBUTE_CH_BUCKET_4,
+ GSCAN_ATTRIBUTE_CH_BUCKET_5,
+ GSCAN_ATTRIBUTE_CH_BUCKET_6,
+ GSCAN_ATTRIBUTE_CH_BUCKET_7
+};
+
+enum gscan_ch_attributes {
+ GSCAN_ATTRIBUTE_CH_ID_1,
+ GSCAN_ATTRIBUTE_CH_ID_2,
+ GSCAN_ATTRIBUTE_CH_ID_3,
+ GSCAN_ATTRIBUTE_CH_ID_4,
+ GSCAN_ATTRIBUTE_CH_ID_5,
+ GSCAN_ATTRIBUTE_CH_ID_6,
+ GSCAN_ATTRIBUTE_CH_ID_7
+};
+
+enum rtt_attributes {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || (ANDROID_VERSION >= 12)
+ RTT_ATTRIBUTE_INVALID,
+#endif
+ RTT_ATTRIBUTE_TARGET_CNT,
+ RTT_ATTRIBUTE_TARGET_INFO,
+ RTT_ATTRIBUTE_TARGET_MAC,
+ RTT_ATTRIBUTE_TARGET_TYPE,
+ RTT_ATTRIBUTE_TARGET_PEER,
+ RTT_ATTRIBUTE_TARGET_CHAN,
+ RTT_ATTRIBUTE_TARGET_PERIOD,
+ RTT_ATTRIBUTE_TARGET_NUM_BURST,
+ RTT_ATTRIBUTE_TARGET_NUM_FTM_BURST,
+ RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTM,
+ RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTMR,
+ RTT_ATTRIBUTE_TARGET_LCI,
+ RTT_ATTRIBUTE_TARGET_LCR,
+ RTT_ATTRIBUTE_TARGET_BURST_DURATION,
+ RTT_ATTRIBUTE_TARGET_PREAMBLE,
+ RTT_ATTRIBUTE_TARGET_BW,
+ RTT_ATTRIBUTE_RESULTS_COMPLETE = 30,
+ RTT_ATTRIBUTE_RESULTS_PER_TARGET,
+ RTT_ATTRIBUTE_RESULT_CNT,
+ RTT_ATTRIBUTE_RESULT,
+ RTT_ATTRIBUTE_RESULT_DETAIL
+};
+
+enum wifi_rssi_monitor_attr {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || (ANDROID_VERSION >= 12)
+ RSSI_MONITOR_ATTRIBUTE_INVALID,
+#endif
+ RSSI_MONITOR_ATTRIBUTE_MAX_RSSI,
+ RSSI_MONITOR_ATTRIBUTE_MIN_RSSI,
+ RSSI_MONITOR_ATTRIBUTE_START
+};
+
+enum wifi_sae_key_attr {
+ BRCM_SAE_KEY_ATTR_PEER_MAC,
+ BRCM_SAE_KEY_ATTR_PMK,
+ BRCM_SAE_KEY_ATTR_PMKID
+};
+
+enum debug_attributes {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || (ANDROID_VERSION >= 12)
+ DEBUG_ATTRIBUTE_INVALID,
+#endif
+ DEBUG_ATTRIBUTE_GET_DRIVER,
+ DEBUG_ATTRIBUTE_GET_FW,
+ DEBUG_ATTRIBUTE_RING_ID,
+ DEBUG_ATTRIBUTE_RING_NAME,
+ DEBUG_ATTRIBUTE_RING_FLAGS,
+ DEBUG_ATTRIBUTE_LOG_LEVEL,
+ DEBUG_ATTRIBUTE_LOG_TIME_INTVAL,
+ DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE,
+ DEBUG_ATTRIBUTE_FW_DUMP_LEN,
+ DEBUG_ATTRIBUTE_FW_DUMP_DATA,
+ DEBUG_ATTRIBUTE_FW_ERR_CODE,
+ DEBUG_ATTRIBUTE_RING_DATA,
+ DEBUG_ATTRIBUTE_RING_STATUS,
+ DEBUG_ATTRIBUTE_RING_NUM,
+ DEBUG_ATTRIBUTE_DRIVER_DUMP_LEN,
+ DEBUG_ATTRIBUTE_DRIVER_DUMP_DATA,
+ DEBUG_ATTRIBUTE_PKT_FATE_NUM,
+ DEBUG_ATTRIBUTE_PKT_FATE_DATA,
+ DEBUG_ATTRIBUTE_HANG_REASON
+};
+
+typedef enum {
+ DUMP_LEN_ATTR_INVALID = 0,
+ DUMP_LEN_ATTR_MEMDUMP = 1,
+ DUMP_LEN_ATTR_SSSR_C0_D11_BEFORE = 2,
+ DUMP_LEN_ATTR_SSSR_C0_D11_AFTER = 3,
+ DUMP_LEN_ATTR_SSSR_C1_D11_BEFORE = 4,
+ DUMP_LEN_ATTR_SSSR_C1_D11_AFTER = 5,
+ DUMP_LEN_ATTR_SSSR_C2_D11_BEFORE = 6,
+ DUMP_LEN_ATTR_SSSR_C2_D11_AFTER = 7,
+ DUMP_LEN_ATTR_SSSR_DIG_BEFORE = 8,
+ DUMP_LEN_ATTR_SSSR_DIG_AFTER = 9,
+ DUMP_LEN_ATTR_TIMESTAMP = 10,
+ DUMP_LEN_ATTR_GENERAL_LOG = 11,
+ DUMP_LEN_ATTR_ECNTRS = 12,
+ DUMP_LEN_ATTR_SPECIAL_LOG = 13,
+ DUMP_LEN_ATTR_DHD_DUMP = 14,
+ DUMP_LEN_ATTR_EXT_TRAP = 15,
+ DUMP_LEN_ATTR_HEALTH_CHK = 16,
+ DUMP_LEN_ATTR_PRESERVE_LOG = 17,
+ DUMP_LEN_ATTR_COOKIE = 18,
+ DUMP_LEN_ATTR_FLOWRING_DUMP = 19,
+ DUMP_LEN_ATTR_PKTLOG = 20,
+ DUMP_LEN_ATTR_PKTLOG_DEBUG = 21,
+ DUMP_FILENAME_ATTR_DEBUG_DUMP = 22,
+ DUMP_FILENAME_ATTR_MEM_DUMP = 23,
+ DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP = 24,
+ DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP = 25,
+ DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP = 26,
+ DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP = 27,
+ DUMP_FILENAME_ATTR_SSSR_CORE_2_BEFORE_DUMP = 28,
+ DUMP_FILENAME_ATTR_SSSR_CORE_2_AFTER_DUMP = 29,
+ DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP = 30,
+ DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP = 31,
+ DUMP_FILENAME_ATTR_PKTLOG_DUMP = 32,
+ DUMP_FILENAME_ATTR_PKTLOG_DEBUG_DUMP = 33,
+ DUMP_LEN_ATTR_STATUS_LOG = 34,
+ DUMP_LEN_ATTR_AXI_ERROR = 35,
+ DUMP_FILENAME_ATTR_AXI_ERROR_DUMP = 36,
+ DUMP_LEN_ATTR_RTT_LOG = 37
+ /* Please add new attributes from here to sync up old HAL */
+} EWP_DUMP_EVENT_ATTRIBUTE;
+
+/* Attributes associated with DEBUG_GET_DUMP_BUF */
+typedef enum {
+ DUMP_BUF_ATTR_INVALID = 0,
+ DUMP_BUF_ATTR_MEMDUMP = 1,
+ DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE = 2,
+ DUMP_BUF_ATTR_SSSR_C0_D11_AFTER = 3,
+ DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE = 4,
+ DUMP_BUF_ATTR_SSSR_C1_D11_AFTER = 5,
+ DUMP_BUF_ATTR_SSSR_C2_D11_BEFORE = 6,
+ DUMP_BUF_ATTR_SSSR_C2_D11_AFTER = 7,
+ DUMP_BUF_ATTR_SSSR_DIG_BEFORE = 8,
+ DUMP_BUF_ATTR_SSSR_DIG_AFTER = 9,
+ DUMP_BUF_ATTR_TIMESTAMP = 10,
+ DUMP_BUF_ATTR_GENERAL_LOG = 11,
+ DUMP_BUF_ATTR_ECNTRS = 12,
+ DUMP_BUF_ATTR_SPECIAL_LOG = 13,
+ DUMP_BUF_ATTR_DHD_DUMP = 14,
+ DUMP_BUF_ATTR_EXT_TRAP = 15,
+ DUMP_BUF_ATTR_HEALTH_CHK = 16,
+ DUMP_BUF_ATTR_PRESERVE_LOG = 17,
+ DUMP_BUF_ATTR_COOKIE = 18,
+ DUMP_BUF_ATTR_FLOWRING_DUMP = 19,
+ DUMP_BUF_ATTR_PKTLOG = 20,
+ DUMP_BUF_ATTR_PKTLOG_DEBUG = 21,
+ DUMP_BUF_ATTR_STATUS_LOG = 22,
+ DUMP_BUF_ATTR_AXI_ERROR = 23,
+ DUMP_BUF_ATTR_RTT_LOG = 24
+ /* Please add new attributes from here to sync up old HAL */
+} EWP_DUMP_CMD_ATTRIBUTE;
+
+enum mkeep_alive_attributes {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || (ANDROID_VERSION >= 12)
+ MKEEP_ALIVE_ATTRIBUTE_INVALID,
+#endif
+ MKEEP_ALIVE_ATTRIBUTE_ID,
+ MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
+ MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN,
+ MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR,
+ MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR,
+ MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC,
+ MKEEP_ALIVE_ATTRIBUTE_ETHER_TYPE
+};
+
+typedef enum wl_vendor_event {
+ BRCM_VENDOR_EVENT_UNSPEC = 0,
+ BRCM_VENDOR_EVENT_PRIV_STR = 1,
+ GOOGLE_GSCAN_SIGNIFICANT_EVENT = 2,
+ GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT = 3,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT = 4,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT = 5,
+ GOOGLE_RTT_COMPLETE_EVENT = 6,
+ GOOGLE_SCAN_COMPLETE_EVENT = 7,
+ GOOGLE_GSCAN_GEOFENCE_LOST_EVENT = 8,
+ GOOGLE_SCAN_EPNO_EVENT = 9,
+ GOOGLE_DEBUG_RING_EVENT = 10,
+ GOOGLE_FW_DUMP_EVENT = 11,
+ GOOGLE_PNO_HOTSPOT_FOUND_EVENT = 12,
+ GOOGLE_RSSI_MONITOR_EVENT = 13,
+ GOOGLE_MKEEP_ALIVE_EVENT = 14,
+
+ /*
+ * BRCM specific events should be placed after
+ * the Generic events so that enums don't mismatch
+ * between the DHD and HAL
+ */
+ GOOGLE_NAN_EVENT_ENABLED = 15,
+ GOOGLE_NAN_EVENT_DISABLED = 16,
+ GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH = 17,
+ GOOGLE_NAN_EVENT_REPLIED = 18,
+ GOOGLE_NAN_EVENT_PUBLISH_TERMINATED = 19,
+ GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED = 20,
+ GOOGLE_NAN_EVENT_DE_EVENT = 21,
+ GOOGLE_NAN_EVENT_FOLLOWUP = 22,
+ GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND = 23,
+ GOOGLE_NAN_EVENT_DATA_REQUEST = 24,
+ GOOGLE_NAN_EVENT_DATA_CONFIRMATION = 25,
+ GOOGLE_NAN_EVENT_DATA_END = 26,
+ GOOGLE_NAN_EVENT_BEACON = 27,
+ GOOGLE_NAN_EVENT_SDF = 28,
+ GOOGLE_NAN_EVENT_TCA = 29,
+ GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH = 30,
+ GOOGLE_NAN_EVENT_UNKNOWN = 31,
+ GOOGLE_ROAM_EVENT_START = 32,
+ BRCM_VENDOR_EVENT_HANGED = 33,
+ BRCM_VENDOR_EVENT_SAE_KEY = 34,
+ BRCM_VENDOR_EVENT_BEACON_RECV = 35,
+ BRCM_VENDOR_EVENT_PORT_AUTHORIZED = 36,
+ GOOGLE_FILE_DUMP_EVENT = 37,
+ BRCM_VENDOR_EVENT_CU = 38,
+ BRCM_VENDOR_EVENT_WIPS = 39,
+ NAN_ASYNC_RESPONSE_DISABLED = 40,
+ BRCM_VENDOR_EVENT_RCC_INFO = 41
+} wl_vendor_event_t;
+
+enum andr_wifi_attr {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || (ANDROID_VERSION >= 12)
+ ANDR_WIFI_ATTRIBUTE_INVALID,
+#endif
+ ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+ ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+ ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI,
+ ANDR_WIFI_ATTRIBUTE_NODFS_SET,
+ ANDR_WIFI_ATTRIBUTE_COUNTRY,
+ ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE,
+ ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE,
+ ANDR_WIFI_ATTRIBUTE_LATENCY_MODE,
+ ANDR_WIFI_ATTRIBUTE_RANDOM_MAC,
+ ANDR_WIFI_ATTRIBUTE_TX_POWER_SCENARIO
+};
+enum apf_attributes {
+ APF_ATTRIBUTE_VERSION,
+ APF_ATTRIBUTE_MAX_LEN,
+ APF_ATTRIBUTE_PROGRAM,
+ APF_ATTRIBUTE_PROGRAM_LEN
+};
+
+typedef enum wl_vendor_gscan_attribute {
+ ATTR_START_GSCAN,
+ ATTR_STOP_GSCAN,
+ ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */
+ ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */
+ ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */
+ ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */
+ ATTR_GET_GSCAN_CAPABILITIES_ID,
+ /* Add more sub commands here */
+ ATTR_GSCAN_MAX
+} wl_vendor_gscan_attribute_t;
+
+typedef enum gscan_batch_attribute {
+ ATTR_GSCAN_BATCH_BESTN,
+ ATTR_GSCAN_BATCH_MSCAN,
+ ATTR_GSCAN_BATCH_BUFFER_THRESHOLD
+} gscan_batch_attribute_t;
+
+typedef enum gscan_geofence_attribute {
+ ATTR_GSCAN_NUM_HOTLIST_BSSID,
+ ATTR_GSCAN_HOTLIST_BSSID
+} gscan_geofence_attribute_t;
+
+typedef enum gscan_complete_event {
+ WIFI_SCAN_COMPLETE,
+ WIFI_SCAN_THRESHOLD_NUM_SCANS,
+ WIFI_SCAN_BUFFER_THR_BREACHED
+} gscan_complete_event_t;
+
+#ifdef DHD_WAKE_STATUS
+enum wake_stat_attributes {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || (ANDROID_VERSION >= 12)
+ WAKE_STAT_ATTRIBUTE_INVALID,
+#endif
+ WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT,
+ WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE,
+ WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT,
+ WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED,
+ WAKE_STAT_ATTRIBUTE_TOTAL_DRIVER_FW,
+ WAKE_STAT_ATTRIBUTE_DRIVER_FW_WAKE,
+ WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT,
+ WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT_USED,
+ WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE,
+ WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT,
+ WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT,
+ WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT,
+ WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT,
+ WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT,
+ WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA,
+ WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA,
+ WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS,
+ WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+ WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+ WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT
+};
+
+typedef struct rx_data_cnt_details_t {
+ int rx_unicast_cnt; /* Total rx unicast packet which woke up host */
+ int rx_multicast_cnt; /* Total rx multicast packet which woke up host */
+ int rx_broadcast_cnt; /* Total rx broadcast packet which woke up host */
+} RX_DATA_WAKE_CNT_DETAILS;
+
+typedef struct rx_wake_pkt_type_classification_t {
+ int icmp_pkt; /* wake icmp packet count */
+ int icmp6_pkt; /* wake icmp6 packet count */
+ int icmp6_ra; /* wake icmp6 RA packet count */
+ int icmp6_na; /* wake icmp6 NA packet count */
+ int icmp6_ns; /* wake icmp6 NS packet count */
+} RX_WAKE_PKT_TYPE_CLASSFICATION;
+
+typedef struct rx_multicast_cnt_t {
+ int ipv4_rx_multicast_addr_cnt; /* Rx wake packet was ipv4 multicast */
+ int ipv6_rx_multicast_addr_cnt; /* Rx wake packet was ipv6 multicast */
+ int other_rx_multicast_addr_cnt; /* Rx wake packet was non-ipv4 and non-ipv6 */
+} RX_MULTICAST_WAKE_DATA_CNT;
+
+typedef struct wlan_driver_wake_reason_cnt_t {
+ int total_cmd_event_wake; /* Total count of cmd event wakes */
+ int *cmd_event_wake_cnt; /* Individual wake count array, each index a reason */
+ int cmd_event_wake_cnt_sz; /* Max number of cmd event wake reasons */
+ int cmd_event_wake_cnt_used; /* Number of cmd event wake reasons specific to the driver */
+ int total_driver_fw_local_wake; /* Total count of drive/fw wakes, for local reasons */
+ int *driver_fw_local_wake_cnt; /* Individual wake count array, each index a reason */
+ int driver_fw_local_wake_cnt_sz; /* Max number of local driver/fw wake reasons */
+ /* Number of local driver/fw wake reasons specific to the driver */
+ int driver_fw_local_wake_cnt_used;
+ int total_rx_data_wake; /* total data rx packets, that woke up host */
+ RX_DATA_WAKE_CNT_DETAILS rx_wake_details;
+ RX_WAKE_PKT_TYPE_CLASSFICATION rx_wake_pkt_classification_info;
+ RX_MULTICAST_WAKE_DATA_CNT rx_multicast_wake_pkt_info;
+} WLAN_DRIVER_WAKE_REASON_CNT;
+#endif /* DHD_WAKE_STATUS */
+
+#define BRCM_VENDOR_WIPS_EVENT_BUF_LEN 128
+typedef enum wl_vendor_wips_attr_type {
+ WIPS_ATTR_DEAUTH_CNT = 1,
+ WIPS_ATTR_DEAUTH_BSSID,
+ WIPS_ATTR_CURRENT_RSSI,
+ WIPS_ATTR_DEAUTH_RSSI
+} wl_vendor_wips_attr_type_t;
+
+#define BRCM_VENDOR_GET_RCC_EVENT_BUF_LEN \
+ sizeof(uint32) + DOT11_MAX_SSID_LEN + \
+ sizeof(int32) + (sizeof(uint16) * MAX_ROAM_CHANNEL)
+typedef enum wl_vendor_get_rcc_attr_type {
+ RCC_ATTRIBUTE_SSID = 1,
+ RCC_ATTRIBUTE_SSID_LEN,
+ RCC_ATTRIBUTE_NUM_CHANNELS,
+ RCC_ATTRIBUTE_CHANNEL_LIST
+} wl_vendor_get_rcc_attr_type_t;
+
+/* Chipset roaming capabilities */
+typedef struct wifi_roaming_capabilities {
+ u32 max_blacklist_size;
+ u32 max_whitelist_size;
+} wifi_roaming_capabilities_t;
+
+typedef enum {
+ SET_HAL_START_ATTRIBUTE_DEINIT = 0x0001,
+ SET_HAL_START_ATTRIBUTE_PRE_INIT = 0x0002,
+ SET_HAL_START_ATTRIBUTE_EVENT_SOCK_PID = 0x0003
+} SET_HAL_START_ATTRIBUTE;
+
+/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
+#define BRCM_VENDOR_SCMD_CAPA "cap"
+#define MEMDUMP_PATH_LEN 128
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+extern int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd);
+extern int wl_cfgvendor_detach(struct wiphy *wiphy);
+extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+ struct net_device *dev, int event_id, const void *data, int len);
+extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event);
+#else
+static INLINE int wl_cfgvendor_attach(struct wiphy *wiphy,
+ dhd_pub_t *dhd) { UNUSED_PARAMETER(wiphy); UNUSED_PARAMETER(dhd); return 0; }
+static INLINE int wl_cfgvendor_detach(struct wiphy *wiphy) { UNUSED_PARAMETER(wiphy); return 0; }
+static INLINE int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+ struct net_device *dev, int event_id, const void *data, int len)
+{ return 0; }
+static INLINE int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event)
+{ return 0; }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+#if defined(WL_SUPP_EVENT) && \
+ ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT))
+extern int wl_cfgvendor_send_supp_eventstring(const char *func, const char *fmt, ...);
+int wl_cfgvendor_notify_supp_event_str(const char *evt_name, const char *fmt, ...);
+#define SUPP_LOG_LEN 256
+#define PRINT_SUPP_LOG(fmt, ...) \
+ wl_cfgvendor_send_supp_eventstring(__func__, fmt, ##__VA_ARGS__);
+#define SUPP_LOG(args) PRINT_SUPP_LOG args;
+#define SUPP_EVT_LOG(evt_name, fmt, ...) \
+ wl_cfgvendor_notify_supp_event_str(evt_name, fmt, ##__VA_ARGS__);
+#define SUPP_EVENT(args) SUPP_EVT_LOG args
+#else
+#define SUPP_LOG(x)
+#define SUPP_EVENT(x)
+#endif /* WL_SUPP_EVENT && (kernel > (3, 13, 0)) || WL_VENDOR_EXT_SUPPORT */
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+#define COMPAT_STRUCT_IFACE(normal_structure, value) \
+ compat_ ## normal_structure compat_ ## iface; \
+ int compat_task_state = in_compat_syscall(); \
+ normal_structure value;
+#else
+#define COMPAT_STRUCT_IFACE(normal_structure, value) \
+ compat_ ## normal_structure compat_ ## iface; \
+ int compat_task_state = is_compat_task(); \
+ normal_structure value;
+#endif
+
+#define COMPAT_BZERO_IFACE(normal_structure, value) \
+ do { \
+ if (compat_task_state) { \
+ bzero(&compat_ ## value, sizeof(compat_ ## normal_structure)); \
+ } else { \
+ bzero(&value, sizeof(normal_structure)); \
+ } \
+ } while (0)
+
+#define COMPAT_ASSIGN_VALUE(normal_structure, member, value) \
+ do { \
+ if (compat_task_state) { \
+ compat_ ## normal_structure.member = value; \
+ } else { \
+ normal_structure.member = value; \
+ } \
+ } while (0)
+
+#define COMPAT_MEMCOPY_IFACE(output, total_len, normal_structure, value, wifi_rate_stat) \
+ do { \
+ if (compat_task_state) { \
+ memcpy(output, &compat_ ## value, sizeof(compat_ ## normal_structure)); \
+ output += (sizeof(compat_ ## value) - sizeof(wifi_rate_stat)); \
+ total_len += sizeof(compat_ ## normal_structure); \
+ } else { \
+ memcpy(output, &value, sizeof(normal_structure)); \
+ output += (sizeof(value) - sizeof(wifi_rate_stat)); \
+ total_len += sizeof(normal_structure); \
+ } \
+ } while (0)
+#else
+#define COMPAT_STRUCT_IFACE(normal_structure, value) normal_structure value;
+#define COMPAT_BZERO_IFACE(normal_structure, value) bzero(&value, sizeof(normal_structure));
+#define COMPAT_ASSIGN_VALUE(normal_structure, member, value) normal_structure.member = value;
+#define COMPAT_MEMCOPY_IFACE(output, total_len, normal_structure, value, rate_stat) \
+ do { \
+ memcpy(output, &value, sizeof(normal_structure)); \
+ output += (sizeof(value) - sizeof(wifi_rate_stat)); \
+ total_len += sizeof(normal_structure); \
+ } while (0)
+#endif /* CONFIG_COMPAT */
+
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
+ cfg80211_vendor_event_alloc(wiphy, wdev, len, type, kflags);
+#else
+#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
+ cfg80211_vendor_event_alloc(wiphy, len, type, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+int wl_cfgvendor_nan_send_async_disable_resp(struct wireless_dev *wdev);
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+void wl_cfgvendor_send_hang_event(struct net_device *dev, u16 reason,
+ char *string, int hang_info_cnt);
+void wl_cfgvendor_simple_hang_event(struct net_device *dev, u16 reason);
+void wl_copy_hang_info_if_falure(struct net_device *dev, u16 reason, s32 ret);
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
+#ifdef DHD_PKT_LOGGING
+int wl_cfgvendor_dbg_send_pktlog_dbg_file_dump_evt(struct net_device *ndev);
+#endif /* DHD_PKT_LOGGING */
+int wl_cfgvendor_connect_params_handler(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len);
+int wl_cfgvendor_start_ap_params_handler(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len);
+#endif /* _wl_cfgvendor_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_cfgvif.c b/bcmdhd.101.10.361.x/wl_cfgvif.c
new file mode 100755
index 0000000..e9a1d76
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgvif.c
@@ -0,0 +1,6601 @@
+/*
+ * Wifi Virtual Interface implementaion
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+/* */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#ifdef WL_WPS_SYNC
+#include <eapol.h>
+#endif /* WL_WPS_SYNC */
+#include <802.11.h>
+#include <bcmiov.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <bcmevent.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgvif.h>
+#include <bcmdevs.h>
+#include <bcmdevs_legacy.h>
+#ifdef WL_FILS
+#include <fils.h>
+#include <frag.h>
+#endif /* WL_FILS */
+
+#ifdef OEM_ANDROID
+#include <wl_android.h>
+#endif
+
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#include <wl_cfgvendor.h>
+#endif /* defined(BCMDONGLEHOST) */
+
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+#if defined(BIGDATA_SOFTAP) || defined(DHD_ENABLE_BIGDATA_LOGGING)
+#include <wl_bigdata.h>
+#endif /* BIGDATA_SOFTAP || DHD_ENABLE_BIGDATA_LOGGING */
+#include <dhd_config.h>
+
+#define MAX_VIF_OFFSET 15
+#define MAX_WAIT_TIME 1500
+
+#if !defined(BCMDONGLEHOST)
+#ifdef ntoh32
+#undef ntoh32
+#endif
+#ifdef ntoh16
+#undef ntoh16
+#endif
+#ifdef htod32
+#undef htod32
+#endif
+#ifdef htod16
+#undef htod16
+#endif
+#define ntoh32(i) (i)
+#define ntoh16(i) (i)
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define DNGL_FUNC(func, parameters)
+#else
+#define DNGL_FUNC(func, parameters) func parameters
+#define COEX_DHCP
+
+#endif /* defined(BCMDONGLEHOST) */
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && \
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+
+/* SoftAP related parameters */
+#define DEFAULT_2G_SOFTAP_CHANNEL 1
+#define DEFAULT_2G_SOFTAP_CHANSPEC 0x1006
+#define DEFAULT_5G_SOFTAP_CHANNEL 149
+
+#define MAX_VNDR_OUI_STR_LEN 256u
+#define VNDR_OUI_STR_LEN 10u
+#define DOT11_DISCONNECT_RC 2u
+
+#if defined(WL_FW_OCE_AP_SELECT)
+static bool
+wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+/* Check whether the given IE looks like WFA OCE IE. */
+#define wl_cfgoce_is_oce_ie(ie, tlvs, len) wl_cfgoce_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_MBO_OCE)
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif /* WL_FW_OCE_AP_SELECT */
+
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+
+#ifdef SUPPORT_AP_BWCTRL
+static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
+ WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
+#endif /* SUPPORT_AP_BWCTRL */
+
+#if !defined(BCMDONGLEHOST)
+/* Wake lock are used in Android only, which is dongle based as of now */
+#define DHD_OS_WAKE_LOCK(pub)
+#define DHD_OS_WAKE_UNLOCK(pub)
+#define DHD_EVENT_WAKE_LOCK(pub)
+#define DHD_EVENT_WAKE_UNLOCK(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)
+#endif /* defined(BCMDONGLEHOST) */
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+
+#ifdef SUPPORT_AP_BWCTRL
+static void
+wl_update_apchan_bwcap(struct bcm_cfg80211 *cfg, struct net_device *ndev, chanspec_t chanspec);
+#endif /* SUPPORT_AP_BWCTRL */
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+struct chan_info {
+ int freq;
+ int chan_type;
+};
+#endif
+
+#if defined(WL_FW_OCE_AP_SELECT)
+bool wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint)
+{
+ const u8 *parse = NULL;
+ bcm_tlv_t *ie;
+ const struct cfg80211_bss_ies *ies;
+ u32 len;
+ struct cfg80211_bss *bss;
+
+ bss = CFG80211_GET_BSS(wiphy, NULL, bssid_hint, 0, 0);
+ if (!bss) {
+ WL_ERR(("Unable to find AP in the cache"));
+ return false;
+ }
+
+ if (rcu_access_pointer(bss->ies)) {
+ ies = rcu_access_pointer(bss->ies);
+ parse = ies->data;
+ len = ies->len;
+ } else {
+ WL_ERR(("ies is NULL"));
+ return false;
+ }
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgoce_is_oce_ie((const uint8*)ie, (u8 const **)&parse, &len) == TRUE) {
+ return true;
+ } else {
+ ie = bcm_next_tlv((const bcm_tlv_t*) ie, &len);
+ if (!ie) {
+ return false;
+ }
+ parse = (uint8 *)ie;
+ WL_DBG(("NON OCE IE. next ie ptr:%p", parse));
+ }
+ }
+ WL_DBG(("OCE IE NOT found"));
+ return false;
+}
+#endif /* WL_FW_OCE_AP_SELECT */
+
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
+void
+wl_validate_wps_ie(const char *wps_ie, s32 wps_ie_len, bool *pbc)
+{
+ #define WPS_IE_FIXED_LEN 6
+ s16 len;
+ const u8 *subel = NULL;
+ u16 subelt_id;
+ u16 subelt_len;
+ u16 val;
+ u8 *valptr = (uint8*) &val;
+ if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) {
+ WL_ERR(("invalid argument : NULL\n"));
+ return;
+ }
+ len = (s16)wps_ie[TLV_LEN_OFF];
+
+ if (len > wps_ie_len) {
+ WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len));
+ return;
+ }
+ WL_DBG(("wps_ie len=%d\n", len));
+ len -= 4; /* for the WPS IE's OUI, oui_type fields */
+ subel = wps_ie + WPS_IE_FIXED_LEN;
+ while (len >= 4) { /* must have attr id, attr len fields */
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_id = HTON16(val);
+
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_len = HTON16(val);
+
+ len -= 4; /* for the attr id, attr len fields */
+ len -= (s16)subelt_len; /* for the remaining fields in this attribute */
+ if (len < 0) {
+ break;
+ }
+ WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+ subel, subelt_id, subelt_len));
+
+ if (subelt_id == WPS_ID_VERSION) {
+ WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_REQ_TYPE) {
+ WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_DEVICE_NAME) {
+ char devname[33];
+ int namelen = MIN(subelt_len, (sizeof(devname) - 1));
+
+ if (namelen) {
+ memcpy(devname, subel, namelen);
+ devname[namelen] = '\0';
+ /* Printing len as rx'ed in the IE */
+ WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+ devname, subelt_len));
+ }
+ } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+ *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+ } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+ ": cat=%u\n", HTON16(val)));
+ } else {
+ WL_DBG((" unknown attr 0x%x\n", subelt_id));
+ }
+
+ subel += subelt_len;
+ }
+}
+
+bool
+wl_cfg80211_check_vif_in_use(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool nan_enabled = FALSE;
+
+#ifdef WL_NAN
+ nan_enabled = wl_cfgnan_is_enabled(cfg);
+#endif /* WL_NAN */
+
+ if (nan_enabled || (wl_cfgp2p_vif_created(cfg)) ||
+ (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_MEM(("%s: Virtual interfaces in use. NAN %d P2P %d softAP %d\n",
+ __FUNCTION__, nan_enabled, wl_cfgp2p_vif_created(cfg),
+ (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)));
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_IFACE_MGMT
+static s32
+wl_cfg80211_is_policy_config_allowed(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ wl_iftype_t active_sec_iface = WL_IFACE_NOT_PRESENT;
+ bool p2p_disc_on = false;
+ bool sta_assoc_state = false;
+ bool nan_init_state = false;
+
+ mutex_lock(&cfg->if_sync);
+
+ sta_assoc_state = (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||
+ wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg)));
+ active_sec_iface = wl_cfg80211_get_sec_iface(cfg);
+ p2p_disc_on = wl_get_p2p_status(cfg, SCANNING);
+
+#ifdef WL_NAN
+ if (cfg->nancfg) {
+ nan_init_state = cfg->nancfg->nan_init_state;
+ }
+#endif
+
+ if ((sta_assoc_state == TRUE) || (p2p_disc_on == TRUE) ||
+ (nan_init_state == TRUE) ||
+ (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
+ WL_INFORM_MEM(("Active iface matrix: sta_assoc_state = %d,"
+ " p2p_disc = %d, nan_disc = %d, active iface = %s\n",
+ sta_assoc_state, p2p_disc_on, nan_init_state,
+ wl_iftype_to_str(active_sec_iface)));
+ ret = BCME_BUSY;
+ }
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+#endif /* WL_IFACE_MGMT */
+#ifdef WL_NANP2P
+int
+wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
+ uint8 arg_val)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
+ WL_ERR(("Cant allow iface management modifications\n"));
+ return BCME_BUSY;
+ }
+
+ if (arg_val) {
+ cfg->conc_disc |= arg_val;
+ } else {
+ cfg->conc_disc &= ~arg_val;
+ }
+ return BCME_OK;
+}
+
+uint8
+wl_cfg80211_get_iface_conc_disc(struct net_device *ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return cfg->conc_disc;
+}
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+int
+wl_cfg80211_set_iface_policy(struct net_device *ndev,
+ char *arg, int len)
+{
+ int ret = BCME_OK;
+ uint8 i = 0;
+ iface_mgmt_data_t *iface_data = NULL;
+
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
+ WL_ERR(("Cant allow iface management modifications\n"));
+ return BCME_BUSY;
+ }
+
+ if (!arg || len <= 0 || len > sizeof(iface_mgmt_data_t)) {
+ return BCME_BADARG;
+ }
+
+ iface_data = (iface_mgmt_data_t *)arg;
+ if (iface_data->policy >= WL_IF_POLICY_INVALID) {
+ WL_ERR(("Unexpected value of policy = %d\n",
+ iface_data->policy));
+ return BCME_BADARG;
+ }
+
+ bzero(&cfg->iface_data, sizeof(iface_mgmt_data_t));
+ ret = memcpy_s(&cfg->iface_data, sizeof(iface_mgmt_data_t), arg, len);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy iface data, src len = %d\n", len));
+ return ret;
+ }
+
+ if (cfg->iface_data.policy == WL_IF_POLICY_ROLE_PRIORITY) {
+ for (i = 0; i < WL_IF_TYPE_MAX; i++) {
+ WL_DBG(("iface = %s, priority[i] = %d\n",
+ wl_iftype_to_str(i), cfg->iface_data.priority[i]));
+ }
+ }
+
+ return ret;
+}
+
+uint8
+wl_cfg80211_get_iface_policy(struct net_device *ndev)
+
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return cfg->iface_data.policy;
+}
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_IFACE_MGMT_CONF */
+
+#ifdef WL_IFACE_MGMT
+/* Get active secondary data iface type */
+wl_iftype_t
+wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg)
+{
+#ifdef WL_STATIC_IF
+ struct net_device *static_if_ndev;
+#else
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WL_STATIC_IF */
+ struct net_device *p2p_ndev = NULL;
+
+ p2p_ndev = wl_to_p2p_bss_ndev(cfg,
+ P2PAPI_BSSCFG_CONNECTION1);
+
+#ifdef WL_STATIC_IF
+ static_if_ndev = wl_cfg80211_static_if_active(cfg);
+ if (static_if_ndev) {
+ if (IS_AP_IFACE(static_if_ndev->ieee80211_ptr)) {
+ return WL_IF_TYPE_AP;
+ }
+ }
+#else
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ return WL_IF_TYPE_AP;
+ }
+#endif /* WL_STATIC_IF */
+
+ if (p2p_ndev && p2p_ndev->ieee80211_ptr) {
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ return WL_IF_TYPE_P2P_GO;
+ }
+
+ /* Set role to GC when cfg80211 layer downgrades P2P
+ * role to station type while bringing down the interface
+ */
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) {
+ WL_DBG_MEM(("%s, Change to GC base role\n", __FUNCTION__));
+ return WL_IF_TYPE_P2P_GC;
+ }
+
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ return WL_IF_TYPE_P2P_GC;
+ }
+ }
+
+#ifdef WL_NAN
+ if (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg))) {
+ return WL_IF_TYPE_NAN;
+ }
+#endif /* WL_NAN */
+ return WL_IFACE_NOT_PRESENT;
+}
+
+/*
+* Handle incoming data interface request based on policy.
+* If there is any conflicting interface, that will be
+* deleted.
+*/
+static s32
+wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+ bool del_iface = false;
+ wl_iftype_t sec_wl_if_type = wl_cfg80211_get_sec_iface(cfg);
+
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_NAN) {
+ /* Multi NDP is allowed irrespective of Policy */
+ return BCME_OK;
+ }
+
+ if (sec_wl_if_type == WL_IFACE_NOT_PRESENT) {
+ /*
+ * If there is no active secondary I/F, there
+ * is no interface conflict. Do nothing.
+ */
+ return BCME_OK;
+ }
+
+ /* Handle secondary data link case */
+ switch (cfg->iface_data.policy) {
+ case WL_IF_POLICY_CUSTOM:
+ case WL_IF_POLICY_DEFAULT: {
+ WL_INFORM_MEM(("%s, Delete any existing iface\n", __FUNCTION__));
+ del_iface = true;
+ break;
+ }
+ case WL_IF_POLICY_FCFS: {
+ WL_INFORM_MEM(("Found active iface = %s, can't support new iface = %s\n",
+ wl_iftype_to_str(sec_wl_if_type), wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ break;
+ }
+ case WL_IF_POLICY_LP: {
+ WL_INFORM_MEM(("Remove active sec data interface, allow incoming iface\n"));
+ /* Delete existing data iface and allow incoming sec iface */
+ del_iface = true;
+ break;
+ }
+ case WL_IF_POLICY_ROLE_PRIORITY: {
+ WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
+ wl_iftype_to_str(sec_wl_if_type),
+ cfg->iface_data.priority[sec_wl_if_type],
+ wl_iftype_to_str(new_wl_iftype),
+ cfg->iface_data.priority[new_wl_iftype]));
+ if (cfg->iface_data.priority[new_wl_iftype] >
+ cfg->iface_data.priority[sec_wl_if_type]) {
+ del_iface = true;
+ } else {
+ WL_ERR(("Can't support new iface = %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported interface policy = %d\n",
+ cfg->iface_data.policy));
+ return BCME_ERROR;
+ }
+ }
+ if (del_iface) {
+ ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
+ }
+ return ret;
+}
+
+/* Handle discovery ifaces based on policy */
+static s32
+wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype, bool *disable_nan, bool *disable_p2p)
+{
+ s32 ret = BCME_OK;
+ wl_iftype_t sec_wl_if_type =
+ wl_cfg80211_get_sec_iface(cfg);
+ *disable_p2p = false;
+ *disable_nan = false;
+
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_NAN) {
+ /* Multi NDP is allowed irrespective of Policy */
+ return BCME_OK;
+ }
+
+ /*
+ * Check for any policy conflicts with active secondary
+ * interface for incoming discovery iface
+ */
+ if ((sec_wl_if_type != WL_IFACE_NOT_PRESENT) &&
+ (is_discovery_iface(new_wl_iftype))) {
+ switch (cfg->iface_data.policy) {
+ case WL_IF_POLICY_CUSTOM: {
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ WL_INFORM_MEM(("Allow P2P Discovery with active NDP\n"));
+ /* No further checks are required. */
+ return BCME_OK;
+ }
+ /*
+ * Intentional fall through to default policy
+ * as for AP and associated ifaces, both are same
+ */
+ }
+ case WL_IF_POLICY_DEFAULT: {
+ if (sec_wl_if_type == WL_IF_TYPE_AP) {
+ WL_INFORM_MEM(("AP is active, cant support new iface\n"));
+ ret = BCME_ERROR;
+ } else if (sec_wl_if_type == WL_IF_TYPE_P2P_GC ||
+ sec_wl_if_type == WL_IF_TYPE_P2P_GO) {
+ if (new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ /*
+ * Associated discovery case,
+ * Fall through
+ */
+ } else {
+ /* Active iface is present, returning error */
+ WL_INFORM_MEM(("P2P group is active,"
+ " cant support new iface\n"));
+ ret = BCME_ERROR;
+ }
+ } else if (sec_wl_if_type == WL_IF_TYPE_NAN) {
+ ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
+ }
+ break;
+ }
+ case WL_IF_POLICY_FCFS: {
+ WL_INFORM_MEM(("Can't support new iface = %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ break;
+ }
+ case WL_IF_POLICY_LP: {
+ /* Delete existing data iface n allow incoming sec iface */
+ WL_INFORM_MEM(("Remove active sec data interface = %s\n",
+ wl_iftype_to_str(sec_wl_if_type)));
+ ret = wl_cfg80211_delete_iface(cfg,
+ sec_wl_if_type);
+ break;
+ }
+ case WL_IF_POLICY_ROLE_PRIORITY: {
+ WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
+ wl_iftype_to_str(sec_wl_if_type),
+ cfg->iface_data.priority[sec_wl_if_type],
+ wl_iftype_to_str(new_wl_iftype),
+ cfg->iface_data.priority[new_wl_iftype]));
+ if (cfg->iface_data.priority[new_wl_iftype] >
+ cfg->iface_data.priority[sec_wl_if_type]) {
+ WL_INFORM_MEM(("Remove active sec data iface\n"));
+ ret = wl_cfg80211_delete_iface(cfg,
+ sec_wl_if_type);
+ } else {
+ WL_ERR(("Can't support new iface = %s"
+ " due to low priority\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported policy\n"));
+ return BCME_ERROR;
+ }
+ }
+ } else {
+ /*
+ * Handle incoming new secondary iface request,
+ * irrespective of existing discovery ifaces
+ */
+ if ((cfg->iface_data.policy == WL_IF_POLICY_CUSTOM) &&
+ (new_wl_iftype == WL_IF_TYPE_NAN)) {
+ WL_INFORM_MEM(("Allow NAN Data Path\n"));
+ /* No further checks are required. */
+ return BCME_OK;
+ }
+ }
+
+ /* Check for any conflicting discovery iface */
+ switch (new_wl_iftype) {
+ case WL_IF_TYPE_P2P_DISC:
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC: {
+ *disable_nan = true;
+ break;
+ }
+ case WL_IF_TYPE_NAN_NMI:
+ case WL_IF_TYPE_NAN: {
+ *disable_p2p = true;
+ break;
+ }
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_AP: {
+ *disable_nan = true;
+ *disable_p2p = true;
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported\n"));
+ return BCME_ERROR;
+ }
+ }
+ return ret;
+}
+
+static bool
+wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ struct net_device *p2p_ndev = NULL;
+ p2p_ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
+
+ if (new_wl_iftype == WL_IF_TYPE_P2P_DISC && p2p_ndev &&
+ p2p_ndev->ieee80211_ptr &&
+ is_p2p_group_iface(p2p_ndev->ieee80211_ptr)) {
+ return true;
+ }
+#ifdef WL_NAN
+ else if ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) &&
+ (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg)))) {
+ return true;
+ }
+#endif /* WL_NAN */
+ return false;
+}
+
+/* Handle incoming discovery iface request */
+static s32
+wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+ bool disable_p2p = false;
+ bool disable_nan = false;
+
+ wl_iftype_t active_sec_iface =
+ wl_cfg80211_get_sec_iface(cfg);
+
+ if (is_discovery_iface(new_wl_iftype) &&
+ (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
+ if (wl_cfg80211_is_associated_discovery(cfg,
+ new_wl_iftype) == TRUE) {
+ WL_DBG(("Associate iface request is allowed= %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ return ret;
+ }
+ }
+
+ ret = wl_cfg80211_disc_if_mgmt(cfg, new_wl_iftype,
+ &disable_nan, &disable_p2p);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed at disc iface mgmt, ret = %d\n", ret));
+ return ret;
+ }
+#ifdef WL_NANP2P
+ if (((new_wl_iftype == WL_IF_TYPE_P2P_DISC) && disable_nan) ||
+ ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) && disable_p2p)) {
+ if ((cfg->nan_p2p_supported == TRUE) &&
+ (cfg->conc_disc == WL_NANP2P_CONC_SUPPORT)) {
+ WL_INFORM_MEM(("P2P + NAN conc is supported\n"));
+ disable_p2p = false;
+ disable_nan = false;
+ }
+ }
+#endif /* WL_NANP2P */
+
+ if (disable_nan) {
+#ifdef WL_NAN
+ /* Disable nan to avoid conflict with p2p */
+ ret = wl_cfgnan_check_nan_disable_pending(cfg, true, true);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ return ret;
+ }
+#endif /* WL_NAN */
+ }
+
+ if (disable_p2p) {
+ /* Disable p2p discovery */
+ ret = wl_cfg80211_deinit_p2p_discovery(cfg);
+ if (ret != BCME_OK) {
+ /* Should we fail nan enab here */
+ WL_ERR(("Failed to disable p2p_disc for allowing nan\n"));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/*
+* Check for any conflicting iface before adding iface.
+* Based on policy, either conflicting iface is removed
+* or new iface add request is blocked.
+*/
+s32
+wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+#ifdef P2P_AP_CONCURRENT
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif
+
+ WL_INFORM_MEM(("Incoming iface = %s\n", wl_iftype_to_str(new_wl_iftype)));
+
+#ifdef P2P_AP_CONCURRENT
+ if (dhd->conf->war & P2P_AP_MAC_CONFLICT) {
+ return ret;
+ } else
+#endif
+#ifdef WL_STATIC_IF
+ if (wl_cfg80211_get_sec_iface(cfg) == WL_IF_TYPE_AP &&
+ new_wl_iftype == WL_IF_TYPE_AP) {
+ } else
+#endif /* WL_STATIC_IF */
+ if (!is_discovery_iface(new_wl_iftype)) {
+ /* Incoming data interface request */
+ if (wl_cfg80211_get_sec_iface(cfg) != WL_IFACE_NOT_PRESENT) {
+ /* active interface present - Apply interface data policy */
+ ret = wl_cfg80211_data_if_mgmt(cfg, new_wl_iftype);
+ if (ret != BCME_OK) {
+ WL_ERR(("if_mgmt fail:%d\n", ret));
+ return ret;
+ }
+ }
+ }
+ /* Apply discovery config */
+ ret = wl_cfg80211_handle_discovery_config(cfg, new_wl_iftype);
+ return ret;
+}
+#endif /* WL_IFACE_MGMT */
+
+s32
+wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype)
+{
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 org_toggle_bytes;
+ u16 cur_toggle_bytes;
+ u16 toggled_bit;
+
+ if (!ndev || !mac_addr || ETHER_ISNULLADDR(mac_addr)) {
+ return -EINVAL;
+ }
+ WL_DBG(("%s:Mac addr" MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(mac_addr)));
+
+ if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_AP) ||
+ (wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
+ /* Avoid invoking release mac addr code for interfaces using
+ * fixed mac addr.
+ */
+ return BCME_OK;
+ }
+
+ /* Fetch last two bytes of mac address */
+ org_toggle_bytes = ntoh16(*((u16 *)&ndev->dev_addr[4]));
+ cur_toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
+
+ toggled_bit = (org_toggle_bytes ^ cur_toggle_bytes);
+ WL_DBG(("org_toggle_bytes:%04X cur_toggle_bytes:%04X\n",
+ org_toggle_bytes, cur_toggle_bytes));
+ if (toggled_bit & cfg->vif_macaddr_mask) {
+ /* This toggled_bit is marked in the used mac addr
+ * mask. Clear it.
+ */
+ cfg->vif_macaddr_mask &= ~toggled_bit;
+ WL_INFORM(("MAC address - " MACDBG " released. toggled_bit:%04X vif_mask:%04X\n",
+ MAC2STRDBG(mac_addr), toggled_bit, cfg->vif_macaddr_mask));
+ } else {
+ WL_ERR(("MAC address - " MACDBG " not found in the used list."
+ " toggled_bit:%04x vif_mask:%04x\n", MAC2STRDBG(mac_addr),
+ toggled_bit, cfg->vif_macaddr_mask));
+ return -EINVAL;
+ }
+
+ return BCME_OK;
+}
+
+s32
+wl_get_vif_macaddr(struct bcm_cfg80211 *cfg, u16 wl_iftype, u8 *mac_addr)
+{
+ struct ether_addr *p2p_dev_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 toggle_mask;
+ u16 toggle_bit;
+ u16 toggle_bytes;
+ u16 used;
+ u32 offset = 0;
+ /* Toggle mask starts from MSB of second last byte */
+ u16 mask = 0x8000;
+ if (!mac_addr) {
+ return -EINVAL;
+ }
+ if ((wl_iftype == WL_IF_TYPE_P2P_DISC) && p2p_dev_addr &&
+ ETHER_IS_LOCALADDR(p2p_dev_addr)) {
+ /* If mac address is already generated return the mac */
+ (void)memcpy_s(mac_addr, ETH_ALEN, p2p_dev_addr->octet, ETH_ALEN);
+ return 0;
+ }
+ (void)memcpy_s(mac_addr, ETH_ALEN, ndev->perm_addr, ETH_ALEN);
+/*
+ * VIF MAC address managment
+ * P2P Device addres: Primary MAC with locally admin. bit set
+ * P2P Group address/NAN NMI/Softap/NAN DPI: Primary MAC addr
+ * with local admin bit set and one additional bit toggled.
+ * cfg->vif_macaddr_mask will hold the info regarding the mac address
+ * released. Ensure to call wl_release_vif_macaddress to free up
+ * the mac address.
+ */
+#if defined (SPECIFIC_MAC_GEN_SCHEME)
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC || wl_iftype == WL_IF_TYPE_AP) {
+ mac_addr[0] |= 0x02;
+ } else if ((wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
+ mac_addr[0] |= 0x02;
+ mac_addr[4] ^= 0x80;
+ }
+#else
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ mac_addr[0] |= 0x02;
+ }
+#endif /* SEPCIFIC_MAC_GEN_SCHEME */
+ else {
+ /* For locally administered mac addresses, we keep the
+ * OUI part constant and just work on the last two bytes.
+ */
+ mac_addr[0] |= 0x02;
+ toggle_mask = cfg->vif_macaddr_mask;
+ toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
+ do {
+ used = toggle_mask & mask;
+ if (!used) {
+ /* Use this bit position */
+ toggle_bit = mask >> offset;
+ toggle_bytes ^= toggle_bit;
+ cfg->vif_macaddr_mask |= toggle_bit;
+ WL_DBG(("toggle_bit:%04X toggle_bytes:%04X toggle_mask:%04X\n",
+ toggle_bit, toggle_bytes, cfg->vif_macaddr_mask));
+ /* Macaddress are stored in network order */
+ mac_addr[5] = *((u8 *)&toggle_bytes);
+ mac_addr[4] = *(((u8 *)&toggle_bytes + 1));
+ break;
+ }
+
+ /* Shift by one */
+ toggle_mask = toggle_mask << 0x1;
+ offset++;
+ if (offset > MAX_VIF_OFFSET) {
+ /* We have used up all macaddresses. Something wrong! */
+ WL_ERR(("Entire range of macaddress used up.\n"));
+ ASSERT(0);
+ break;
+ }
+ } while (true);
+ }
+ WL_INFORM_MEM(("Get virtual I/F mac addr: "MACDBG"\n", MAC2STRDBG(mac_addr)));
+ return 0;
+}
+
+bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ const char *name,
+#else
+ char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ unsigned char name_assign_type,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
+ struct vif_params *params)
+{
+ u16 wl_iftype;
+ u16 wl_mode;
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev *wdev;
+
+ WL_DBG(("Enter iftype: %d\n", type));
+ if (!cfg) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Use primary I/F for sending cmds down to firmware */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+ WL_ERR(("device is not ready\n"));
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!name) {
+ WL_ERR(("Interface name not provided \n"));
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cfg80211_to_wl_iftype(type, &wl_iftype, &wl_mode) < 0) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, name, NULL);
+ if (unlikely(!wdev)) {
+ return ERR_PTR(-ENODEV);
+ }
+ return wdev_to_cfgdev(wdev);
+}
+
+s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev);
+ int ret = BCME_OK;
+ u16 wl_iftype;
+ u16 wl_mode;
+ struct net_device *primary_ndev;
+
+ if (!cfg) {
+ return -EINVAL;
+ }
+
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ wdev = cfgdev_to_wdev(cfgdev);
+ if (!wdev) {
+ WL_ERR(("wdev null"));
+ return -ENODEV;
+ }
+
+ WL_DBG(("Enter wdev:%p iftype: %d\n", wdev, wdev->iftype));
+ if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
+ WL_ERR(("Wrong iftype: %d\n", wdev->iftype));
+ return -ENODEV;
+ }
+
+ if ((ret = wl_cfg80211_del_if(cfg, primary_ndev,
+ wdev, NULL)) < 0) {
+ WL_ERR(("IF del failed\n"));
+ }
+
+ return ret;
+}
+
+static s32
+wl_cfg80211_change_p2prole(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type)
+{
+ s32 wlif_type;
+ s32 mode = 0;
+ s32 index;
+ s32 err;
+ s32 conn_idx = -1;
+ chanspec_t chspec;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct ether_addr p2p_dev_addr = {{0, 0, 0, 0, 0, 0}};
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ WL_INFORM_MEM(("Enter. current_role:%d new_role:%d \n", ndev->ieee80211_ptr->iftype, type));
+
+ (void)memcpy_s(p2p_dev_addr.octet, ETHER_ADDR_LEN,
+ ndev->dev_addr, ETHER_ADDR_LEN);
+
+ if (!cfg->p2p || !wl_cfgp2p_vif_created(cfg)) {
+ WL_ERR(("P2P not initialized \n"));
+ return -EINVAL;
+ }
+
+ if (!is_p2p_group_iface(ndev->ieee80211_ptr)) {
+ WL_ERR(("Wrong if type \n"));
+ return -EINVAL;
+ }
+
+ /* Abort any on-going scans to avoid race condition issues */
+ wl_cfgscan_cancel_scan(cfg);
+
+ index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (index < 0) {
+ WL_ERR(("Find bsscfg index from ndev(%p) failed\n", ndev));
+ return BCME_ERROR;
+ }
+ if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) {
+ return BCME_ERROR;
+ }
+
+ /* In concurrency case, STA may be already associated in a particular
+ * channel. so retrieve the current channel of primary interface and
+ * then start the virtual interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+ if (type == NL80211_IFTYPE_P2P_GO) {
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+ WL_ERR(("FW does not support multiple GO\n"));
+ return BCME_ERROR;
+ }
+ mode = WL_MODE_AP;
+ wlif_type = WL_P2P_IF_GO;
+#ifdef BCMDONGLEHOST
+ dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
+ dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
+#endif /* BCMDONGLEHOST */
+ } else {
+ wlif_type = WL_P2P_IF_CLIENT;
+ /* for GO */
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ WL_INFORM_MEM(("Downgrading P2P GO to cfg_iftype:%d \n", type));
+ wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+ cfg->p2p->p2p_go_count--;
+ /* disable interface before bsscfg free */
+ err = wl_cfgp2p_ifdisable(cfg, &p2p_dev_addr);
+ /* if fw doesn't support "ifdis",
+ * do not wait for link down of ap mode
+ */
+ if (err == 0) {
+ WL_DBG(("Wait for Link Down event for GO !!!\n"));
+ wait_for_completion_timeout(&cfg->iface_disable,
+ msecs_to_jiffies(500));
+ } else if (err != BCME_UNSUPPORTED) {
+ msleep(300);
+ }
+ }
+ }
+
+ wl_set_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+ wl_cfgp2p_ifchange(cfg, &p2p_dev_addr,
+ htod32(wlif_type), chspec, conn_idx);
+ wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_CHANGED) == true),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ wl_clr_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+
+ if (mode == WL_MODE_AP) {
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE */
+ }
+
+ return BCME_OK;
+}
+
+s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
+ struct vif_params *params)
+{
+ s32 infra = 1;
+ s32 err = BCME_OK;
+ u16 wl_iftype;
+ u16 wl_mode;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_info *netinfo = NULL;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+ struct net_device *primary_ndev;
+
+#ifdef BCMDONGLEHOST
+ if (!dhd)
+ return -EINVAL;
+#endif /* BCMDONGLEHOST */
+
+ WL_INFORM_MEM(("[%s] Enter. current cfg_iftype:%d new cfg_iftype:%d \n",
+ ndev->name, ndev->ieee80211_ptr->iftype, type));
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ if (cfg80211_to_wl_iftype(type, &wl_iftype, &wl_mode) < 0) {
+ WL_ERR(("Unknown role \n"));
+ return -EINVAL;
+ }
+
+ mutex_lock(&cfg->if_sync);
+ netinfo = wl_get_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (unlikely(!netinfo)) {
+#ifdef WL_STATIC_IF
+ if (wl_cfg80211_static_if(cfg, ndev)) {
+ /* Incase of static interfaces, the netinfo will be
+ * allocated only when FW interface is initialized. So
+ * store the value and use it during initialization.
+ */
+ WL_INFORM_MEM(("skip change vif for static if\n"));
+ ndev->ieee80211_ptr->iftype = type;
+ err = BCME_OK;
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ WL_ERR(("netinfo not found \n"));
+ err = -ENODEV;
+ }
+ goto fail;
+ }
+
+ if ((primary_ndev == ndev) && !(ndev->flags & IFF_UP)) {
+ /*
+ * If interface is not initialized, store the role and
+ * return. The role will be initilized after interface
+ * up
+ */
+ WL_INFORM_MEM(("skip change role before dev up\n"));
+ ndev->ieee80211_ptr->iftype = type;
+ err = BCME_OK;
+ goto fail;
+ }
+
+ /* perform pre-if-change tasks */
+ wl_cfg80211_iface_state_ops(ndev->ieee80211_ptr,
+ WL_IF_CHANGE_REQ, wl_iftype, wl_mode);
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ infra = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* Supplicant sets iftype to STATION while removing p2p GO */
+ if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ /* Downgrading P2P GO */
+ err = wl_cfg80211_change_p2prole(wiphy, ndev, type);
+ if (unlikely(err)) {
+ WL_ERR(("P2P downgrade failed \n"));
+ }
+ } else if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ /* Downgrade role from AP to STA */
+ if ((err = wl_cfg80211_add_del_bss(cfg, ndev,
+ netinfo->bssidx, wl_iftype, 0, NULL)) < 0) {
+ WL_ERR(("AP-STA Downgrade failed \n"));
+ goto fail;
+ }
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /* intentional fall through */
+ case NL80211_IFTYPE_AP_VLAN:
+ {
+ if (!wl_get_drv_status(cfg, AP_CREATED, ndev) &&
+ wl_get_drv_status(cfg, READY, ndev)) {
+#if defined(BCMDONGLEHOST) && !defined(OEM_ANDROID)
+ dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#endif /* BCMDONGLEHOST */
+ err = wl_cfg80211_set_ap_role(cfg, ndev);
+ if (unlikely(err)) {
+ WL_ERR(("set ap role failed!\n"));
+ goto fail;
+ }
+ } else {
+ WL_INFORM_MEM(("AP_CREATED bit set. Skip role change\n"));
+ }
+ break;
+ }
+ case NL80211_IFTYPE_P2P_GO:
+ /* Intentional fall through */
+ case NL80211_IFTYPE_P2P_CLIENT:
+ infra = 1;
+ err = wl_cfg80211_change_p2prole(wiphy, ndev, type);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ /* Intentional fall through */
+ default:
+ WL_ERR(("Unsupported type:%d \n", type));
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (wl_get_drv_status(cfg, READY, ndev)) {
+ err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET INFRA/IBSS error %d\n", err));
+ goto fail;
+ }
+ }
+
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
+ WL_IF_CHANGE_DONE, wl_iftype, wl_mode);
+
+ /* Update new iftype in relevant structures */
+ if (is_p2p_group_iface(ndev->ieee80211_ptr) && (type == NL80211_IFTYPE_STATION)) {
+ /* For role downgrade cases, we keep interface role as GC */
+ netinfo->iftype = WL_IF_TYPE_P2P_GC;
+ WL_DBG_MEM(("[%s] Set base role to GC, current role"
+ "ndev->ieee80211_ptr->iftype = %d\n",
+ __FUNCTION__, ndev->ieee80211_ptr->iftype));
+ } else {
+ netinfo->iftype = wl_iftype;
+ }
+
+ ndev->ieee80211_ptr->iftype = type;
+
+ WL_INFORM_MEM(("[%s] cfg_iftype changed to %d\n", ndev->name, type));
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_iftype(ndev, wl_iftype);
+#endif
+
+fail:
+ if (err) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ mutex_unlock(&cfg->if_sync);
+ return err;
+}
+
+#ifdef SUPPORT_AP_BWCTRL
+static chanspec_t
+wl_channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ u8 *buf = NULL;
+ wl_uint32_list_t *list;
+ int err = BCME_OK;
+ chanspec_t c = 0, ret_c = 0;
+ int bw = 0, tmp_bw = 0;
+ int i;
+ u32 tmp_c;
+
+#define LOCAL_BUF_SIZE 1024
+ buf = (u8 *)MALLOC(cfg->osh, LOCAL_BUF_SIZE);
+ if (!buf) {
+ WL_ERR(("buf memory alloc failed\n"));
+ goto exit;
+ }
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+ 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+ if (err != BCME_OK) {
+ WL_ERR(("get chanspecs failed with %d\n", err));
+ goto exit;
+ }
+
+ list = (wl_uint32_list_t *)(void *)buf;
+ for (i = 0; i < dtoh32(list->count); i++) {
+ c = dtoh32(list->element[i]);
+ if (channel <= CH_MAX_2G_CHANNEL) {
+ if (!CHSPEC_IS20(c))
+ continue;
+ if (channel == CHSPEC_CHANNEL(c)) {
+ ret_c = c;
+ bw = 20;
+ goto exit;
+ }
+ }
+ tmp_c = wf_chspec_ctlchan(c);
+ tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
+ if (tmp_c != channel)
+ continue;
+
+ if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+ bw = tmp_bw;
+ ret_c = c;
+ if (bw == bw_cap)
+ goto exit;
+ }
+ }
+exit:
+ if (buf) {
+ MFREE(cfg->osh, buf, LOCAL_BUF_SIZE);
+ }
+#undef LOCAL_BUF_SIZE
+ WL_DBG(("return chanspec %x %d\n", ret_c, bw));
+ return ret_c;
+}
+#endif /* SUPPORT_AP_BWCTRL */
+
+void
+wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd)
+{
+ struct net_info *iter, *next;
+ struct net_device *primary_ndev;
+
+ /* Note: This function will clean up only the network interface and host
+ * data structures. The firmware interface clean up will happen in the
+ * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+ * context for the module case).
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("Enter\n"));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && (iter->ndev != primary_ndev)) {
+ /* Ensure interfaces are down before deleting */
+#ifdef WL_STATIC_IF
+ /* Avoiding cleaning static ifaces */
+ if (!wl_cfg80211_static_if(cfg, iter->ndev))
+#endif /* WL_STATIC_IF */
+ {
+ dev_close(iter->ndev);
+ WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
+ wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd, 0);
+ }
+ }
+ }
+}
+
+int
+wl_get_bandwidth_cap(struct net_device *ndev, uint32 band, uint32 *bandwidth)
+{
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 err = BCME_OK;
+ s32 bw_cap = 0;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ if (band == WL_CHANSPEC_BAND_5G) {
+ param.band = WLC_BAND_5G;
+ }
+ else if (band == WL_CHANSPEC_BAND_2G) {
+ param.band = WLC_BAND_2G;
+ }
+#ifdef WL_6G_BAND
+ else if (band == WL_CHANSPEC_BAND_6G) {
+ param.band = WLC_BAND_6G;
+ }
+#endif
+ if (param.band) {
+ /* bw_cap is newly defined iovar for checking bandwith
+ * capability of the band in Aardvark_branch_tob
+ */
+ err = wldev_iovar_getbuf(ndev, "bw_cap", &param, sizeof(param),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ WL_ERR(("bw_cap failed, %d\n", err));
+ return err;
+ } else {
+ /* if firmware doesn't support bw_cap iovar,
+ * we have to use mimo_bw_cap
+ */
+ err = wldev_iovar_getint(ndev, "mimo_bw_cap", &bw_cap);
+ if (err) {
+ WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+ }
+ if (bw_cap != WLC_N_BW_20ALL) {
+ bw = WL_CHANSPEC_BW_40;
+ }
+ }
+ } else {
+ if (WL_BW_CAP_160MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_160;
+ } else if (WL_BW_CAP_80MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_80;
+ } else if (WL_BW_CAP_40MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_40;
+ } else {
+ bw = WL_CHANSPEC_BW_20;
+ }
+ }
+ } else if (band == WL_CHANSPEC_BAND_2G) {
+ bw = WL_CHANSPEC_BW_20;
+ }
+
+ *bandwidth = bw;
+
+ return err;
+}
+
+s32
+wl_get_nl80211_band(u32 wl_band)
+{
+ s32 err = BCME_ERROR;
+
+ switch (wl_band) {
+ case WL_CHANSPEC_BAND_2G:
+ return IEEE80211_BAND_2GHZ;
+ case WL_CHANSPEC_BAND_5G:
+ return IEEE80211_BAND_5GHZ;
+#ifdef WL_BAND_6G
+ case WL_CHANSPEC_BAND_6G:
+ /* current kernels doesn't support seperate
+ * band for 6GHz. so till patch is available
+ * map it under 5GHz
+ */
+ return IEEE80211_BAND_5GHZ;
+#endif /* WL_BAND_6G */
+ default:
+ WL_ERR(("unsupported Band. %d\n", wl_band));
+ }
+
+ return err;
+}
+
+s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ chanspec_t chspec = INVCHANSPEC;
+ chanspec_t cur_chspec = INVCHANSPEC;
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(CUSTOM_SET_CPUCORE) || defined(APSTA_RESTRICTED_CHANNEL)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE || APSTA_RESTRICTED_CHANNEL */
+#ifdef WL_EXT_IAPSTA
+ enum nl80211_band band;
+ s32 _chan;
+#endif /* WL_EXT_IAPSTA */
+ u16 center_freq = chan->center_freq;
+
+ dev = ndev_to_wlc_ndev(dev, cfg);
+#ifdef WL_EXT_IAPSTA
+ _chan = ieee80211_frequency_to_channel(chan->center_freq);
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP ||
+ dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ u16 wl_iftype = 0;
+ u16 wl_mode = 0;
+ if (cfg80211_to_wl_iftype(dev->ieee80211_ptr->iftype,
+ &wl_iftype, &wl_mode) < 0) {
+ WL_ERR(("Unknown interface type:0x%x\n", dev->ieee80211_ptr->iftype));
+ return -EINVAL;
+ }
+ wl_ext_iapsta_update_iftype(dev, wl_iftype);
+ _chan = wl_ext_iapsta_update_channel(dev, _chan);
+ }
+ if (CHANNEL_IS_5G(_chan))
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_2GHZ;
+ center_freq = ieee80211_channel_to_frequency(_chan, band);
+#endif
+ chspec = wl_freq_to_chanspec(center_freq);
+
+ WL_MSG(dev->name, "netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+ dev->ifindex, channel_type, CHSPEC_CHANNEL(chspec));
+
+#ifdef WL_P2P_6G
+ if (!(cfg->p2p_6g_enabled)) {
+#endif /* WL_P2P_6G */
+ if (IS_P2P_GO(dev->ieee80211_ptr) && (CHSPEC_IS6G(chspec))) {
+ WL_ERR(("P2P GO not allowed on 6G\n"));
+ return -ENOTSUPP;
+ }
+#ifdef WL_P2P_6G
+ }
+#endif /* WL_P2P_6G */
+
+#ifdef NOT_YET
+ switch (channel_type) {
+ case NL80211_CHAN_HT40MINUS:
+ /* secondary channel is below the control channel */
+ chspec = CH40MHZ_CHSPEC(CHSPEC_CHANNEL(chspec), WL_CHANSPEC_CTL_SB_UPPER);
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ /* secondary channel is above the control channel */
+ chspec = CH40MHZ_CHSPEC(CHSPEC_CHANNEL(chspec), WL_CHANSPEC_CTL_SB_LOWER);
+ break;
+ default:
+ chspec = CH20MHZ_CHSPEC(CHSPEC_CHANNEL(chspec));
+
+ }
+#endif /* NOT_YET */
+
+#if defined(APSTA_RESTRICTED_CHANNEL)
+ /* Some customer platform used limited number of channels
+ * for SoftAP interface on STA/SoftAP concurrent mode.
+ * - 2.4GHz Channel: CH1 - CH13
+ * - 5GHz Channel: CH149 (it depends on the country code)
+ * If the Android framework sent invaild channel configuration
+ * to DHD, driver should change the channel which is sutible for
+ * STA/SoftAP concurrent mode.
+ * - Set operating channel to CH1 (default 2.4GHz channel for
+ * restricted APSTA mode) if STA interface was associated to
+ * 5GHz APs except for CH149.
+ * - Otherwise, set the channel to the same channel as existing AP.
+ */
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
+ DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
+ u32 *sta_chanspec = (u32 *)wl_read_prof(cfg,
+ bcmcfg_to_prmry_ndev(cfg), WL_PROF_CHAN);
+ if (chan->band == wl_get_nl80211_band(CHSPEC_BAND(*sta_chanspec))) {
+ /* Do not try SCC in 5GHz if channel is not CH149 */
+ chspec = (
+#ifdef WL_6G_BAND
+ CHSPEC_IS6G(*sta_chanspec) ||
+#endif /* WL_6G_BAND */
+ (CHSPEC_IS5G(*sta_chanspec) &&
+ wf_chspec_primary20_chan(*sta_chanspec) !=
+ DEFAULT_5G_SOFTAP_CHANNEL)) ?
+ DEFAULT_2G_SOFTAP_CHANSPEC: *sta_chanspec;
+ WL_ERR(("target chanspec will be changed to %x\n", chspec));
+ if (CHSPEC_IS2G(chspec)) {
+ bw = WL_CHANSPEC_BW_20;
+ goto set_channel;
+ }
+ }
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+
+ err = wl_get_bandwidth_cap(dev, CHSPEC_BAND(chspec), &bw);
+ if (err < 0) {
+ WL_ERR(("Failed to get bandwidth information, err=%d\n", err));
+ return err;
+ }
+
+ /* In case of 5G downgrade BW to 80MHz as 160MHz channels falls in DFS */
+ if (CHSPEC_IS5G(chspec) && (bw == WL_CHANSPEC_BW_160)) {
+ bw = WL_CHANSPEC_BW_80;
+ }
+set_channel:
+ cur_chspec = wf_create_chspec_from_primary(wf_chspec_primary20_chan(chspec),
+ bw, CHSPEC_BAND(chspec));
+#ifdef WL_6G_BAND
+ if (cfg->acs_chspec &&
+ CHSPEC_IS6G(cfg->acs_chspec) &&
+ (wf_chspec_ctlchspec(cfg->acs_chspec) == wf_chspec_ctlchspec(cur_chspec))) {
+ WL_DBG(("using acs_chanspec %x\n", cfg->acs_chspec));
+ cur_chspec = cfg->acs_chspec;
+ cfg->acs_chspec = 0;
+ }
+#endif /* WL_6G_BAND */
+ if (wf_chspec_valid(cur_chspec)) {
+ /* convert 802.11 ac chanspec to current fw chanspec type */
+ cur_chspec = wl_chspec_host_to_driver(cur_chspec);
+ if (cur_chspec != INVCHANSPEC) {
+ if ((err = wldev_iovar_setint(dev, "chanspec",
+ cur_chspec)) == BCME_BADCHAN) {
+ u32 local_channel = CHSPEC_CHANNEL(chspec);
+ if ((bw == WL_CHANSPEC_BW_80) || (bw == WL_CHANSPEC_BW_160))
+ goto change_bw;
+ err = wldev_ioctl_set(dev, WLC_SET_CHANNEL,
+ &local_channel, sizeof(local_channel));
+ if (err < 0) {
+ WL_ERR(("WLC_SET_CHANNEL error %d"
+ "chip may not be supporting this channel\n", err));
+ }
+ } else if (err) {
+ WL_ERR(("failed to set chanspec error %d\n", err));
+ }
+#ifdef BCMDONGLEHOST
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ else {
+ /* Disable Frameburst only for stand-alone 2GHz SoftAP */
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
+ DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE) &&
+ (CHSPEC_IS2G(chspec)) &&
+ !wl_get_drv_status(cfg, CONNECTED,
+ bcmcfg_to_prmry_ndev(cfg))) {
+ WL_DBG(("Disabling frameburst on "
+ "stand-alone 2GHz SoftAP\n"));
+ wl_cfg80211_set_frameburst(cfg, FALSE);
+ }
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+#endif /* BCMDONGLEHOST */
+ } else {
+ WL_ERR(("failed to convert host chanspec to fw chanspec\n"));
+ err = BCME_ERROR;
+ }
+ } else {
+change_bw:
+ if (bw == WL_CHANSPEC_BW_160) {
+ bw = WL_CHANSPEC_BW_80;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ bw = WL_CHANSPEC_BW_40;
+ } else if (bw == WL_CHANSPEC_BW_40) {
+ bw = WL_CHANSPEC_BW_20;
+ } else {
+ bw = 0;
+ }
+ if (bw)
+ goto set_channel;
+ WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+ err = BCME_ERROR;
+ }
+#ifdef CUSTOM_SET_CPUCORE
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+ WL_DBG(("SoftAP mode do not need to set cpucore\n"));
+ } else if (chspec & WL_CHANSPEC_BW_80) {
+ /* SoftAp only mode do not need to set cpucore */
+ if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) &&
+ dev != bcmcfg_to_prmry_ndev(cfg)) {
+ /* Soft AP on virtual Iface (AP+STA case) */
+ dhd->chan_isvht80 |= DHD_FLAG_HOSTAP_MODE;
+ dhd_set_cpucore(dhd, TRUE);
+ } else if (is_p2p_group_iface(dev->ieee80211_ptr)) {
+ /* If P2P IF is vht80 */
+ dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+ dhd_set_cpucore(dhd, TRUE);
+ }
+ }
+#endif /* CUSTOM_SET_CPUCORE */
+ if (!err && (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+ /* Update AP/GO operating chanspec */
+ cfg->ap_oper_channel = wl_freq_to_chanspec(center_freq);
+ }
+ if (err) {
+ wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
+ FW_LOGSET_MASK_ALL);
+ } else {
+ WL_DBG(("Setting chanspec %x for GO/AP \n", chspec));
+ }
+ return err;
+}
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy)
+{
+ s32 err = BCME_OK;
+ u32 wpa_val;
+ s32 wsec = 0;
+
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ if (privacy) {
+ /* If privacy bit is set in open mode, then WEP would be enabled */
+ wsec = WEP_ENABLED;
+ WL_DBG(("Setting wsec to %d for WEP \n", wsec));
+ }
+
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ /* set upper-layer auth */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC)
+ wpa_val = WPA_AUTH_NONE;
+ else
+ wpa_val = WPA_AUTH_DISABLED;
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_val, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ return 0;
+}
+
+#define MAX_FILS_IND_IE_LEN 1024u
+static s32
+wl_validate_fils_ind_ie(struct net_device *dev, const bcm_tlv_t *filsindie, s32 bssidx)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = NULL;
+ bcm_iov_buf_t *iov_buf = NULL;
+ bcm_xtlv_t* pxtlv;
+ int iov_buf_size = 0;
+
+ if (!dev || !filsindie) {
+ WL_ERR(("%s: dev/filsidie is null\n", __FUNCTION__));
+ goto exit;
+ }
+
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ WL_ERR(("%s: cfg is null\n", __FUNCTION__));
+ goto exit;
+ }
+
+ iov_buf_size = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) + filsindie->len - 1;
+ iov_buf = MALLOCZ(cfg->osh, iov_buf_size);
+ if (!iov_buf) {
+ WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, iov_buf_size));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ iov_buf->version = WL_FILS_IOV_VERSION;
+ iov_buf->id = WL_FILS_CMD_ADD_IND_IE;
+ iov_buf->len = sizeof(bcm_xtlv_t) + filsindie->len - 1;
+ pxtlv = (bcm_xtlv_t*)&iov_buf->data[0];
+ pxtlv->id = WL_FILS_XTLV_IND_IE;
+ pxtlv->len = filsindie->len;
+ /* memcpy_s return check not required as buffer is allocated based on ie
+ * len
+ */
+ (void)memcpy_s(pxtlv->data, filsindie->len, filsindie->data, filsindie->len);
+
+ err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf_size,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("fils indication ioctl error (%d)\n", err));
+ goto exit;
+ }
+
+exit:
+ if (err < 0) {
+ WL_ERR(("FILS Ind setting error %d\n", err));
+ }
+
+ if (iov_buf) {
+ MFREE(cfg->osh, iov_buf, iov_buf_size);
+ }
+ return err;
+}
+
+#ifdef MFP
+static int
+wl_get_mfp_capability(u8 rsn_cap, u32 *wpa_auth, u32 *mfp_val)
+{
+ u32 mfp = 0;
+ if (rsn_cap & RSN_CAP_MFPR) {
+ WL_DBG(("MFP Required \n"));
+ mfp = WL_MFP_REQUIRED;
+ /* Our firmware has requirement that WPA2_AUTH_PSK/WPA2_AUTH_UNSPECIFIED
+ * be set, if SHA256 OUI is to be included in the rsn ie.
+ */
+ if (*wpa_auth & WPA2_AUTH_PSK_SHA256) {
+ *wpa_auth |= WPA2_AUTH_PSK;
+ } else if (*wpa_auth & WPA2_AUTH_1X_SHA256) {
+ *wpa_auth |= WPA2_AUTH_UNSPECIFIED;
+ }
+ } else if (rsn_cap & RSN_CAP_MFPC) {
+ WL_DBG(("MFP Capable \n"));
+ mfp = WL_MFP_CAPABLE;
+ }
+
+ /* Validate MFP */
+ if ((*wpa_auth == WPA3_AUTH_SAE_PSK) && (mfp != WL_MFP_REQUIRED)) {
+ WL_ERR(("MFPR should be set for SAE PSK. mfp:%d\n", mfp));
+ return BCME_ERROR;
+ } else if ((*wpa_auth == (WPA3_AUTH_SAE_PSK | WPA2_AUTH_PSK)) &&
+ (mfp != WL_MFP_CAPABLE)) {
+ WL_ERR(("mfp(%d) should be set to capable(%d) for SAE transition mode\n",
+ mfp, WL_MFP_CAPABLE));
+ return BCME_ERROR;
+ }
+
+ *mfp_val = mfp;
+ return BCME_OK;
+}
+#endif /* MFP */
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, const bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+ s32 len = 0;
+ s32 err = BCME_OK;
+ u16 auth = 0; /* d11 open authentication */
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ const wpa_suite_mcast_t *mcast;
+ const wpa_suite_ucast_t *ucast;
+ const wpa_suite_auth_key_mgmt_t *mgmt;
+ const wpa_pmkid_list_t *pmkid;
+ int cnt = 0;
+#ifdef MFP
+ u32 mfp = 0;
+#endif /* MFP */
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+
+ u16 suite_count;
+ u8 rsn_cap[2];
+ u32 wme_bss_disable;
+
+ if (wpa2ie == NULL)
+ goto exit;
+
+ WL_DBG(("Enter \n"));
+ len = wpa2ie->len - WPA2_VERSION_LEN;
+ /* check the mcast cipher */
+ mcast = (const wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ switch (mcast->type) {
+ case WPA_CIPHER_NONE:
+ gval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ gval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ gval = AES_ENABLED;
+ break;
+
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ gval = SMS4_ENABLED;
+ break;
+#endif
+
+ default:
+ WL_ERR(("No Security Info\n"));
+ break;
+ }
+ if ((len -= WPA_SUITE_LEN) <= 0)
+ return BCME_BADLEN;
+
+ /* check the unicast cipher */
+ ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ suite_count = ltoh16_ua(&ucast->count);
+ switch (ucast->list[0].type) {
+ case WPA_CIPHER_NONE:
+ pval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ pval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ pval = AES_ENABLED;
+ break;
+
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ pval = SMS4_ENABLED;
+ break;
+#endif
+
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+ return BCME_BADLEN;
+
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* check the AKM */
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ suite_count = cnt = ltoh16_ua(&mgmt->count);
+ while (cnt--) {
+ if (bcmp(mgmt->list[cnt].oui, WFA_OUI, WFA_OUI_LEN) == 0) {
+ switch (mgmt->list[cnt].type) {
+ case RSN_AKM_DPP:
+ wpa_auth |= WPA3_AUTH_DPP_AKM;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info in WFA_OUI\n"));
+ }
+ } else {
+ switch (mgmt->list[cnt].type) {
+ case RSN_AKM_NONE:
+ wpa_auth |= WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ wpa_auth |= WPA2_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ wpa_auth |= WPA2_AUTH_PSK;
+ break;
+#ifdef MFP
+ case RSN_AKM_MFP_PSK:
+ wpa_auth |= WPA2_AUTH_PSK_SHA256;
+ break;
+ case RSN_AKM_MFP_1X:
+ wpa_auth |= WPA2_AUTH_1X_SHA256;
+ break;
+ case RSN_AKM_FILS_SHA256:
+ wpa_auth |= WPA2_AUTH_FILS_SHA256;
+ break;
+ case RSN_AKM_FILS_SHA384:
+ wpa_auth |= WPA2_AUTH_FILS_SHA384;
+ break;
+#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
+ case RSN_AKM_SAE_PSK:
+ wpa_auth |= WPA3_AUTH_SAE_PSK;
+ break;
+#endif /* WL_SAE || WL_CLIENT_SAE */
+#endif /* MFP */
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ }
+ }
+ if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+ rsn_cap[0] = *(const u8 *)&mgmt->list[suite_count];
+ rsn_cap[1] = *((const u8 *)&mgmt->list[suite_count] + 1);
+
+ if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+ wme_bss_disable = 0;
+ } else {
+ wme_bss_disable = 1;
+ }
+
+#ifdef MFP
+ if (wl_get_mfp_capability(rsn_cap[0], &wpa_auth, &mfp) != BCME_OK) {
+ WL_ERR(("mfp configuration invalid. rsn_cap:0x%x\n", rsn_cap[0]));
+ return BCME_ERROR;
+ }
+#endif /* MFP */
+
+ /* set wme_bss_disable to sync RSN Capabilities */
+ err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+ if (err < 0) {
+ WL_ERR(("wme_bss_disable error %d\n", err));
+ return BCME_ERROR;
+ }
+ } else {
+ WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+ }
+
+ len -= RSN_CAP_LEN;
+ if (len >= WPA2_PMKID_COUNT_LEN) {
+ pmkid = (const wpa_pmkid_list_t *)
+ ((const u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN);
+ cnt = ltoh16_ua(&pmkid->count);
+ if (cnt != 0) {
+ WL_ERR(("AP has non-zero PMKID count. Wrong!\n"));
+ return BCME_ERROR;
+ }
+ /* since PMKID cnt is known to be 0 for AP, */
+ /* so don't bother to send down this info to firmware */
+ }
+
+#ifdef MFP
+ len -= WPA2_PMKID_COUNT_LEN;
+ if (len >= WPA_SUITE_LEN) {
+ cfg->bip_pos =
+ (const u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN;
+ } else {
+ cfg->bip_pos = NULL;
+ }
+#endif
+
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+
+#ifdef MFP
+ cfg->mfp_mode = mfp;
+#endif /* MFP */
+
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ if (sec) {
+ /* store applied sec settings */
+ sec->fw_wpa_auth = wpa_auth;
+ sec->fw_wsec = wsec;
+ sec->fw_auth = auth;
+#ifdef MFP
+ sec->fw_mfp = mfp;
+#endif /* mfp */
+ }
+exit:
+ return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, const wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+ const wpa_suite_mcast_t *mcast;
+ const wpa_suite_ucast_t *ucast;
+ const wpa_suite_auth_key_mgmt_t *mgmt;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = BCME_OK;
+ s32 len = 0;
+ u32 i;
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 tmp = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+
+ if (wpaie == NULL)
+ goto exit;
+ WL_DBG(("Enter \n"));
+ len = wpaie->length; /* value length */
+ len -= WPA_IE_TAG_FIXED_LEN;
+ /* check for multicast cipher suite */
+ if (len < WPA_SUITE_LEN) {
+ WL_INFORM_MEM(("no multicast cipher suite\n"));
+ goto exit;
+ }
+
+ /* pick up multicast cipher */
+ mcast = (const wpa_suite_mcast_t *)&wpaie[1];
+ len -= WPA_SUITE_LEN;
+ if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(mcast->type)) {
+ tmp = 0;
+ switch (mcast->type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ gval |= tmp;
+ }
+ }
+ /* Check for unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFORM_MEM(("no unicast suite\n"));
+ goto exit;
+ }
+ /* walk thru unicast cipher list and pick up what we recognize */
+ ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ tmp = 0;
+ switch (ucast->list[i].type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ pval |= tmp;
+ }
+ }
+ }
+ len -= (count - i) * WPA_SUITE_LEN;
+ /* Check for auth key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFORM_MEM((" no auth key mgmt suite\n"));
+ goto exit;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ count = ltoh16_ua(&mgmt->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_AKM(mgmt->list[i].type)) {
+ tmp = 0;
+ switch (mgmt->list[i].type) {
+ case RSN_AKM_NONE:
+ tmp = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ tmp = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ tmp = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ wpa_auth |= tmp;
+ }
+ }
+
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ if (sec) {
+ /* store applied sec settings */
+ sec->fw_wpa_auth = wpa_auth;
+ sec->fw_wsec = wsec;
+ sec->fw_auth = auth;
+ }
+
+exit:
+ return 0;
+}
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+static u32 wl_get_cipher_type(uint8 type)
+{
+ u32 ret = 0;
+ switch (type) {
+ case WPA_CIPHER_NONE:
+ ret = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ ret = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ ret = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ ret = AES_ENABLED;
+ break;
+
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ ret = SMS4_ENABLED;
+ break;
+#endif
+
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ return ret;
+}
+
+static u32 wl_get_suite_auth_key_mgmt_type(uint8 type, const wpa_suite_mcast_t *mcast)
+{
+ u32 ret = 0;
+ u32 is_wpa2 = 0;
+
+ if (!bcmp(mcast->oui, WPA2_OUI, WPA2_OUI_LEN)) {
+ is_wpa2 = 1;
+ }
+
+ WL_INFORM_MEM(("%s, type = %d\n", is_wpa2 ? "WPA2":"WPA", type));
+ if (bcmp(mcast->oui, WFA_OUI, WFA_OUI_LEN) == 0) {
+ switch (type) {
+ case RSN_AKM_DPP:
+ ret = WPA3_AUTH_DPP_AKM;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info in WFA_OUI\n"));
+ }
+ } else {
+ switch (type) {
+ case RSN_AKM_NONE:
+ /* For WPA and WPA2, AUTH_NONE is common */
+ ret = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ if (is_wpa2) {
+ ret = WPA2_AUTH_UNSPECIFIED;
+ } else {
+ ret = WPA_AUTH_UNSPECIFIED;
+ }
+ break;
+ case RSN_AKM_PSK:
+ if (is_wpa2) {
+ ret = WPA2_AUTH_PSK;
+ } else {
+ ret = WPA_AUTH_PSK;
+ }
+ break;
+#ifdef WL_SAE
+ case RSN_AKM_SAE_PSK:
+ ret = WPA3_AUTH_SAE_PSK;
+ break;
+#endif /* WL_SAE */
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ }
+ return ret;
+}
+
+static s32
+wl_validate_wpaie_wpa2ie(struct net_device *dev, const wpa_ie_fixed_t *wpaie,
+ const bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+ const wpa_suite_mcast_t *mcast;
+ const wpa_suite_ucast_t *ucast;
+ const wpa_suite_auth_key_mgmt_t *mgmt;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = BCME_OK;
+ u32 wme_bss_disable;
+ u16 suite_count;
+ u8 rsn_cap[2];
+ s32 len = 0;
+ u32 i;
+ u32 wsec1, wsec2, wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 wpa_auth1 = 0;
+ u32 wpa_auth2 = 0;
+#ifdef MFP
+ u32 mfp = 0;
+#endif /* MFP */
+
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_security *sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+
+ if (wpaie == NULL || wpa2ie == NULL)
+ goto exit;
+
+ WL_DBG(("Enter \n"));
+ len = wpaie->length; /* value length */
+ len -= WPA_IE_TAG_FIXED_LEN;
+ /* check for multicast cipher suite */
+ if (len < WPA_SUITE_LEN) {
+ WL_INFORM_MEM(("no multicast cipher suite\n"));
+ goto exit;
+ }
+
+ /* pick up multicast cipher */
+ mcast = (const wpa_suite_mcast_t *)&wpaie[1];
+ len -= WPA_SUITE_LEN;
+ if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(mcast->type)) {
+ gval |= wl_get_cipher_type(mcast->type);
+ }
+ }
+ WL_DBG(("\nwpa ie validate\n"));
+ WL_DBG(("wpa ie mcast cipher = 0x%X\n", gval));
+
+ /* Check for unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFORM_MEM(("no unicast suite\n"));
+ goto exit;
+ }
+
+ /* walk thru unicast cipher list and pick up what we recognize */
+ ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ pval |= wl_get_cipher_type(ucast->list[i].type);
+ }
+ }
+ }
+ WL_ERR(("wpa ie ucast count =%d, cipher = 0x%X\n", count, pval));
+
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec1 = (pval | gval | SES_OW_ENABLED);
+ WL_ERR(("wpa ie wsec = 0x%X\n", wsec1));
+
+ len -= (count - i) * WPA_SUITE_LEN;
+ /* Check for auth key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFORM_MEM((" no auth key mgmt suite\n"));
+ goto exit;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ count = ltoh16_ua(&mgmt->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_AKM(mgmt->list[i].type)) {
+ wpa_auth1 |=
+ wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type, mcast);
+ }
+ }
+
+ }
+ WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth1));
+ WL_ERR(("\nwpa2 ie validate\n"));
+
+ pval = 0;
+ gval = 0;
+ len = wpa2ie->len;
+ /* check the mcast cipher */
+ mcast = (const wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ gval = wl_get_cipher_type(mcast->type);
+
+ WL_ERR(("wpa2 ie mcast cipher = 0x%X\n", gval));
+ if ((len -= WPA_SUITE_LEN) <= 0)
+ {
+ WL_ERR(("P:wpa2 ie len[%d]", len));
+ return BCME_BADLEN;
+ }
+
+ /* check the unicast cipher */
+ ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ suite_count = ltoh16_ua(&ucast->count);
+ WL_ERR((" WPA2 ucast cipher count=%d\n", suite_count));
+ pval |= wl_get_cipher_type(ucast->list[0].type);
+
+ if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+ return BCME_BADLEN;
+
+ WL_ERR(("wpa2 ie ucast cipher = 0x%X\n", pval));
+
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec2 = (pval | gval | SES_OW_ENABLED);
+ WL_ERR(("wpa2 ie wsec = 0x%X\n", wsec2));
+
+ /* check the AKM */
+ mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ suite_count = ltoh16_ua(&mgmt->count);
+ wpa_auth2 = wl_get_suite_auth_key_mgmt_type(mgmt->list[0].type, mcast);
+ WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2));
+
+ wsec = (wsec1 | wsec2);
+ wpa_auth = (wpa_auth1 | wpa_auth2);
+ WL_ERR(("wpa_wpa2 wsec=0x%X wpa_auth=0x%X\n", wsec, wpa_auth));
+
+ if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+ rsn_cap[0] = *(const u8 *)&mgmt->list[suite_count];
+ rsn_cap[1] = *((const u8 *)&mgmt->list[suite_count] + 1);
+ if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+ wme_bss_disable = 0;
+ } else {
+ wme_bss_disable = 1;
+ }
+ WL_DBG(("P:rsn_cap[0]=[0x%X]:wme_bss_disabled[%d]\n", rsn_cap[0], wme_bss_disable));
+
+#ifdef MFP
+ if (wl_get_mfp_capability(rsn_cap[0], &wpa_auth, &mfp) != BCME_OK) {
+ WL_ERR(("mfp configuration invalid. rsn_cap:0x%x\n", rsn_cap[0]));
+ return BCME_ERROR;
+ }
+ cfg->mfp_mode = mfp;
+#endif /* MFP */
+
+ /* set wme_bss_disable to sync RSN Capabilities */
+ err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+ if (err < 0) {
+ WL_ERR(("wme_bss_disable error %d\n", err));
+ return BCME_ERROR;
+ }
+ } else {
+ WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+ }
+
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ if (sec) {
+ sec->fw_wpa_auth = wpa_auth;
+ sec->fw_auth = auth;
+ sec->fw_wsec = wsec;
+ }
+
+exit:
+ return 0;
+}
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+
+static s32
+wl_cfg80211_bcn_validate_sec(
+ struct net_device *dev,
+ struct parsed_ies *ies,
+ u32 dev_role,
+ s32 bssidx,
+ bool privacy)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr);
+ struct wl_security *sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+
+ if (!bss) {
+ WL_ERR(("cfgbss is NULL \n"));
+ return BCME_ERROR;
+ }
+
+ if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
+ /* For P2P GO, the sec type is WPA2-PSK */
+ WL_DBG(("P2P GO: validating wpa2_ie\n"));
+ if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0)
+ return BCME_ERROR;
+
+ } else if (dev_role == NL80211_IFTYPE_AP) {
+
+ WL_DBG(("SoftAP: validating security\n"));
+ /* If wpa2_ie or wpa_ie is present validate it */
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ if ((ies->wpa_ie != NULL && ies->wpa2_ie != NULL)) {
+ if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx) < 0) {
+ bss->security_mode = false;
+ return BCME_ERROR;
+ }
+ }
+ else {
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+ if ((ies->wpa2_ie || ies->wpa_ie) &&
+ ((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
+ bss->security_mode = false;
+ return BCME_ERROR;
+ }
+
+ if (ies->fils_ind_ie &&
+ (wl_validate_fils_ind_ie(dev, ies->fils_ind_ie, bssidx) < 0)) {
+ bss->security_mode = false;
+ return BCME_ERROR;
+ }
+
+ bss->security_mode = true;
+ if (bss->rsn_ie) {
+ MFREE(cfg->osh, bss->rsn_ie, bss->rsn_ie[1]
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->rsn_ie = NULL;
+ }
+ if (bss->wpa_ie) {
+ MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->wpa_ie = NULL;
+ }
+ if (bss->wps_ie) {
+ MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
+ bss->wps_ie = NULL;
+ }
+ if (bss->fils_ind_ie) {
+ MFREE(cfg->osh, bss->fils_ind_ie, bss->fils_ind_ie[1]
+ + FILS_INDICATION_IE_TAG_FIXED_LEN);
+ bss->fils_ind_ie = NULL;
+ }
+ if (ies->wpa_ie != NULL) {
+ /* WPAIE */
+ bss->rsn_ie = NULL;
+ bss->wpa_ie = MALLOCZ(cfg->osh,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->wpa_ie) {
+ memcpy(bss->wpa_ie, ies->wpa_ie,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ } else if (ies->wpa2_ie != NULL) {
+ /* RSNIE */
+ bss->wpa_ie = NULL;
+ bss->rsn_ie = MALLOCZ(cfg->osh,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->rsn_ie) {
+ memcpy(bss->rsn_ie, ies->wpa2_ie,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ }
+#ifdef WL_FILS
+ if (ies->fils_ind_ie) {
+ bss->fils_ind_ie = MALLOCZ(cfg->osh,
+ ies->fils_ind_ie->len
+ + FILS_INDICATION_IE_TAG_FIXED_LEN);
+ if (bss->fils_ind_ie) {
+ memcpy(bss->fils_ind_ie, ies->fils_ind_ie,
+ ies->fils_ind_ie->len
+ + FILS_INDICATION_IE_TAG_FIXED_LEN);
+ }
+ }
+#endif /* WL_FILS */
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ }
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+ if (!ies->wpa2_ie && !ies->wpa_ie) {
+ wl_validate_opensecurity(dev, bssidx, privacy);
+ bss->security_mode = false;
+ }
+
+ if (ies->wps_ie) {
+ bss->wps_ie = MALLOCZ(cfg->osh, ies->wps_ie_len);
+ if (bss->wps_ie) {
+ memcpy(bss->wps_ie, ies->wps_ie, ies->wps_ie_len);
+ }
+ }
+ }
+
+ WL_INFORM_MEM(("[%s] wpa_auth:0x%x auth:0x%x wsec:0x%x mfp:0x%x\n",
+ dev->name, sec->fw_wpa_auth, sec->fw_auth, sec->fw_wsec, sec->fw_mfp));
+ return 0;
+
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+static s32 wl_cfg80211_bcn_set_params(
+ struct cfg80211_ap_settings *info,
+ struct net_device *dev,
+ u32 dev_role, s32 bssidx)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 err = BCME_OK;
+
+ WL_DBG(("interval (%d) dtim_period (%d) \n",
+ info->beacon_interval, info->dtim_period));
+
+ if (info->beacon_interval) {
+ if ((err = wldev_ioctl_set(dev, WLC_SET_BCNPRD,
+ &info->beacon_interval, sizeof(s32))) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+
+ if (info->dtim_period) {
+ if ((err = wldev_ioctl_set(dev, WLC_SET_DTIMPRD,
+ &info->dtim_period, sizeof(s32))) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+
+ if ((info->ssid) && (info->ssid_len > 0) &&
+ (info->ssid_len <= DOT11_MAX_SSID_LEN)) {
+ WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
+ if (dev_role == NL80211_IFTYPE_AP) {
+ /* Store the hostapd SSID */
+ bzero(cfg->hostapd_ssid.SSID, DOT11_MAX_SSID_LEN);
+ memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+ cfg->hostapd_ssid.SSID_len = (uint32)info->ssid_len;
+ } else {
+ /* P2P GO */
+ bzero(cfg->p2p->ssid.SSID, DOT11_MAX_SSID_LEN);
+ memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+ cfg->p2p->ssid.SSID_len = (uint32)info->ssid_len;
+ }
+ }
+
+ return err;
+}
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
+s32
+wl_cfg80211_parse_ies(const u8 *ptr, u32 len, struct parsed_ies *ies)
+{
+ s32 err = BCME_OK;
+
+ bzero(ies, sizeof(struct parsed_ies));
+
+ /* find the WPSIE */
+ if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
+ WL_DBG(("WPSIE in beacon \n"));
+ ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ } else {
+ WL_DBG(("No WPSIE in beacon \n"));
+ }
+
+ /* find the RSN_IE */
+ if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE found\n"));
+ ies->wpa2_ie_len = ies->wpa2_ie->len;
+ }
+
+ /* find the FILS_IND_IE */
+ if ((ies->fils_ind_ie = bcm_parse_tlvs(ptr, len,
+ DOT11_MNG_FILS_IND_ID)) != NULL) {
+ WL_DBG((" FILS IND IE found\n"));
+ ies->fils_ind_ie_len = ies->fils_ind_ie->len;
+ }
+
+ /* find the WPA_IE */
+ if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) {
+ WL_DBG((" WPA found\n"));
+ ies->wpa_ie_len = ies->wpa_ie->length;
+ }
+
+ return err;
+
+}
+
+s32
+wl_cfg80211_set_ap_role(
+ struct bcm_cfg80211 *cfg,
+ struct net_device *dev)
+{
+ s32 err = BCME_OK;
+ s32 infra = 1;
+ s32 ap = 0;
+ s32 pm;
+ s32 bssidx;
+ s32 apsta = 0;
+ bool new_chip;
+#ifdef WLEASYMESH
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WLEASYMESH */
+
+ new_chip = wl_new_chip_check(dev);
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return -EINVAL;
+ }
+
+ WL_INFORM_MEM(("[%s] Bringup SoftAP on bssidx:%d \n", dev->name, bssidx));
+
+ if (bssidx != 0 || new_chip) {
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
+ WL_IF_TYPE_AP, 0, NULL)) < 0) {
+ WL_ERR(("wl add_del_bss returned error:%d\n", err));
+ return err;
+ }
+ }
+
+ /*
+ * For older chips, "bss" iovar does not support
+ * bsscfg role change/upgradation, and still
+ * return BCME_OK on attempt
+ * Hence, below traditional way to handle the same
+ */
+
+ if ((err = wldev_ioctl_get(dev,
+ WLC_GET_AP, &ap, sizeof(s32))) < 0) {
+ WL_ERR(("Getting AP mode failed %d \n", err));
+ return err;
+ }
+#ifdef WLEASYMESH
+ else if (dhd->conf->fw_type == FW_TYPE_EZMESH) {
+ WL_MSG(dev->name, "Getting AP mode ok, set map and dwds");
+ err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
+ return err;
+ }
+ //For FrontHaulAP
+ err = wldev_iovar_setint(dev, "map", 2);
+ if (err < 0) {
+ WL_ERR(("wl map 2 error %d\n", err));
+ return err;
+ }
+ err = wldev_iovar_setint(dev, "dwds", 1);
+ if (err < 0) {
+ WL_ERR(("wl dwds 1 error %d\n", err));
+ return err;
+ }
+ WL_MSG(dev->name, "Get AP %d", (int)ap);
+ }
+#endif /* WLEASYMESH*/
+
+ if (!ap) {
+ /* AP mode switch not supported. Try setting up AP explicitly */
+ err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get apsta %d\n", err));
+ return err;
+ }
+ if (apsta == 0) {
+ /* If apsta is not set, set it */
+
+ /* Check for any connected interfaces before wl down */
+ if (wl_get_drv_status_all(cfg, CONNECTED) > 0) {
+#ifdef WLEASYMESH
+ if (dhd->conf->fw_type == FW_TYPE_EZMESH) {
+ WL_MSG(dev->name, "do wl down");
+ } else {
+#endif /* WLEASYMESH */
+ WL_ERR(("Concurrent i/f operational. can't do wl down"));
+ return BCME_ERROR;
+#ifdef WLEASYMESH
+ }
+#endif /* WLEASYMESH */
+ }
+ err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
+ return err;
+ }
+#ifdef WLEASYMESH
+ if (dhd->conf->fw_type == FW_TYPE_EZMESH)
+ err = wldev_iovar_setint(dev, "apsta", 1);
+ else
+#endif /* WLEASYMESH */
+ err = wldev_iovar_setint(dev, "apsta", 0);
+ if (err < 0) {
+ WL_ERR(("wl apsta 0 error %d\n", err));
+ return err;
+ }
+ ap = 1;
+ if ((err = wldev_ioctl_set(dev,
+ WLC_SET_AP, &ap, sizeof(s32))) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
+ }
+#ifdef WLEASYMESH
+ //For FrontHaulAP
+ if (dhd->conf->fw_type == FW_TYPE_EZMESH) {
+ WL_MSG(dev->name, "wl map 2");
+ err = wldev_iovar_setint(dev, "map", 2);
+ if (err < 0) {
+ WL_ERR(("wl map 2 error %d\n", err));
+ return err;
+ }
+ err = wldev_iovar_setint(dev, "dwds", 1);
+ if (err < 0) {
+ WL_ERR(("wl dwds 1 error %d\n", err));
+ return err;
+ }
+ }
+#endif /* WLEASYMESH */
+ }
+ }
+ else if (bssidx == 0 && !new_chip
+#ifdef WL_EXT_IAPSTA
+ && !wl_ext_iapsta_other_if_enabled(dev)
+#endif
+ ) {
+ err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
+ return err;
+ }
+ err = wldev_iovar_setint(dev, "apsta", 0);
+ if (err < 0) {
+ WL_ERR(("wl apsta 0 error %d\n", err));
+ return err;
+ }
+ ap = 1;
+ if ((err = wldev_ioctl_set(dev, WLC_SET_AP, &ap, sizeof(s32))) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
+ }
+ }
+
+ if (bssidx == 0) {
+ pm = 0;
+ if ((err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm))) != 0) {
+ WL_ERR(("wl PM 0 returned error:%d\n", err));
+ /* Ignore error, if any */
+ err = BCME_OK;
+ }
+ err = wldev_ioctl_set(dev, WLC_SET_INFRA, &infra, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ return err;
+ }
+ }
+
+ /* On success, mark AP creation in progress. */
+ wl_set_drv_status(cfg, AP_CREATING, dev);
+ return 0;
+}
+
+void
+wl_cfg80211_ap_timeout_work(struct work_struct *work)
+{
+#if defined (BCMDONGLEHOST)
+ struct bcm_cfg80211 *cfg = NULL;
+ dhd_pub_t *dhdp = NULL;
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, ap_work.work);
+
+ WL_ERR(("** AP LINK UP TIMEOUT **\n"));
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhd_query_bus_erros(dhdp)) {
+ return;
+ }
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+ dhdp->iface_op_failed = TRUE;
+
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+
+#if defined(OEM_ANDROID)
+ WL_ERR(("Notify hang event to upper layer \n"));
+ dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+#endif /* OEM_ANDROID */
+#endif /* BCMDONGLEHOST */
+}
+
+/* In RSDB downgrade cases, the link up event can get delayed upto 7-8 secs */
+#define MAX_AP_LINK_WAIT_TIME 10000
+static s32
+wl_cfg80211_bcn_bringup_ap(
+ struct net_device *dev,
+ struct parsed_ies *ies,
+ u32 dev_role, s32 bssidx)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wl_join_params join_params;
+ bool is_bssup = false;
+ s32 infra = 1;
+ s32 join_params_size = 0;
+ s32 ap = 1;
+ s32 wsec;
+#ifdef DISABLE_11H_SOFTAP
+ s32 spect = 0;
+#endif /* DISABLE_11H_SOFTAP */
+#ifdef SOFTAP_UAPSD_OFF
+ uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
+ s32 err = BCME_OK;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ s32 is_rsdb_supported = BCME_ERROR;
+ char sec[64];
+
+#if defined (BCMDONGLEHOST)
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+ if (is_rsdb_supported < 0)
+ return (-ENODEV);
+#endif /* BCMDONGLEHOST */
+
+ WL_DBG(("Enter dev_role:%d bssidx:%d ifname:%s\n", dev_role, bssidx, dev->name));
+
+ /* Common code for SoftAP and P2P GO */
+ wl_clr_drv_status(cfg, AP_CREATED, dev);
+
+ /* Make sure INFRA is set for AP/GO */
+ err = wldev_ioctl_set(dev, WLC_SET_INFRA, &infra, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ goto exit;
+ }
+
+ /* Do abort scan before creating GO */
+ wl_cfgscan_cancel_scan(cfg);
+
+ /* Schedule delayed work to handle link time out. schedule
+ * before ssid iovar. Sometimes before iovar context should
+ * resume, the event may come and get processed.
+ */
+ if (schedule_delayed_work(&cfg->ap_work,
+ msecs_to_jiffies((const unsigned int)MAX_AP_LINK_WAIT_TIME))) {
+ WL_DBG(("ap timeout work scheduled\n"));
+ }
+
+ if (dev_role == NL80211_IFTYPE_P2P_GO) {
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec), TRUE);
+ WL_MSG(dev->name, "Creating GO with sec=%s\n", sec);
+ is_bssup = wl_cfg80211_bss_isup(dev, bssidx);
+ if (!is_bssup && (ies->wpa2_ie != NULL)) {
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
+ sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("GO SSID setting error %d\n", err));
+ goto exit;
+ }
+
+ if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 1)) < 0) {
+ WL_ERR(("GO Bring up error %d\n", err));
+ goto exit;
+ }
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+ } else
+ WL_DBG(("Bss is already up\n"));
+ } else if (dev_role == NL80211_IFTYPE_AP) {
+
+// if (!wl_get_drv_status(cfg, AP_CREATING, dev)) {
+ /* Make sure fw is in proper state */
+ err = wl_cfg80211_set_ap_role(cfg, dev);
+ if (unlikely(err)) {
+ WL_ERR(("set ap role failed!\n"));
+ goto exit;
+ }
+// }
+
+ /* Device role SoftAP */
+ WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role));
+ /* Clear the status bit after use */
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+
+#ifdef DISABLE_11H_SOFTAP
+ /* Some old WLAN card (e.g. Intel PRO/Wireless 2200BG)
+ * does not try to connect SoftAP because they cannot detect
+ * 11h IEs. For this reason, we disable 11h feature in case
+ * of SoftAP mode. (Related CSP case number: 661635)
+ */
+ if (is_rsdb_supported == 0) {
+ err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
+ goto exit;
+ }
+ }
+ err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT,
+ &spect, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET SPECT_MANAGMENT error %d\n", err));
+ goto exit;
+ }
+#endif /* DISABLE_11H_SOFTAP */
+
+#ifdef WL_DISABLE_HE_SOFTAP
+ err = wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_HE_FEATURES_HE_AP, FALSE);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+#endif /* WL_DISABLE_HE_SOFTAP */
+
+#ifdef SOFTAP_UAPSD_OFF
+ err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("failed to disable uapsd, error=%d\n", err));
+ }
+#endif /* SOFTAP_UAPSD_OFF */
+
+ err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ goto exit;
+ }
+
+#ifdef MFP
+ if (cfg->bip_pos) {
+ err = wldev_iovar_setbuf_bsscfg(dev, "bip",
+ (const void *)(cfg->bip_pos), WPA_SUITE_LEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("bip set error %d\n", err));
+
+#ifdef CUSTOMER_HW6
+ if (wl_customer6_legacy_chip_check(cfg,
+ bcmcfg_to_prmry_ndev(cfg))) {
+ /* Ignore bip error: Some older firmwares doesn't
+ * support bip iovar/ return BCME_NOTUP while trying
+ * to set bip from AP bring up context. These firmares
+ * include bip in RSNIE by default. So its okay to ignore
+ * the error.
+ */
+ err = BCME_OK;
+ } else
+#endif /* CUSTOMER_HW6 */
+
+ {
+ goto exit;
+ }
+ }
+ }
+#endif /* MFP */
+
+ err = wldev_iovar_getint(dev, "wsec", (s32 *)&wsec);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get wsec %d\n", err));
+ goto exit;
+ }
+ if (dhd->conf->chip == BCM43430_CHIP_ID && bssidx > 0 &&
+ (wsec & (TKIP_ENABLED|AES_ENABLED))) {
+ wsec |= WSEC_SWFLAG; // terence 20180628: fix me, this is a workaround
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ goto exit;
+ }
+ }
+ if ((wsec == WEP_ENABLED) && cfg->wep_key.len) {
+ WL_DBG(("Applying buffered WEP KEY \n"));
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key,
+ sizeof(struct wl_wsec_key), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ /* clear the key after use */
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ goto exit;
+ }
+ }
+
+#ifdef MFP
+ if (cfg->mfp_mode) {
+ /* This needs to go after wsec otherwise the wsec command will
+ * overwrite the values set by MFP
+ */
+ err = wldev_iovar_setint_bsscfg(dev, "mfp", cfg->mfp_mode, bssidx);
+ if (err < 0) {
+ WL_ERR(("MFP Setting failed. ret = %d \n", err));
+ /* If fw doesn't support mfp, Ignore the error */
+ if (err != BCME_UNSUPPORTED) {
+ goto exit;
+ }
+ }
+ }
+#endif /* MFP */
+
+ bzero(&join_params, sizeof(join_params));
+ /* join parameters starts with ssid */
+ join_params_size = sizeof(join_params.ssid);
+ join_params.ssid.SSID_len = MIN(cfg->hostapd_ssid.SSID_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+ join_params.ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec), TRUE);
+ WL_MSG(dev->name, "Creating AP with sec=%s\n", sec);
+ /* create softap */
+ if ((err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+ join_params_size)) != 0) {
+ WL_ERR(("SoftAP/GO set ssid failed! \n"));
+ goto exit;
+ } else {
+ WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
+ }
+
+ if (bssidx != 0) {
+ /* AP on Virtual Interface */
+ if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 1)) < 0) {
+ WL_ERR(("AP Bring up error %d\n", err));
+ goto exit;
+ }
+ }
+
+ } else {
+ WL_ERR(("Wrong interface type %d\n", dev_role));
+ goto exit;
+ }
+
+ SUPP_LOG(("AP/GO UP\n"));
+
+exit:
+ if (cfg->wep_key.len) {
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
+ }
+
+#ifdef MFP
+ if (cfg->mfp_mode) {
+ cfg->mfp_mode = 0;
+ }
+
+ if (cfg->bip_pos) {
+ cfg->bip_pos = NULL;
+ }
+#endif /* MFP */
+
+ if (err) {
+ SUPP_LOG(("AP/GO bring up fail. err:%d\n", err));
+ /* Cancel work if scheduled */
+ if (delayed_work_pending(&cfg->ap_work)) {
+ cancel_delayed_work_sync(&cfg->ap_work);
+ WL_DBG(("cancelled ap_work\n"));
+ }
+ }
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+s32
+wl_cfg80211_parse_ap_ies(
+ struct net_device *dev,
+ struct cfg80211_beacon_data *info,
+ struct parsed_ies *ies)
+{
+ struct parsed_ies prb_ies;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ const u8 *vndr = NULL;
+ u32 vndr_ie_len = 0;
+ s32 err = BCME_OK;
+
+ /* Parse Beacon IEs */
+ if (wl_cfg80211_parse_ies((const u8 *)info->tail,
+ info->tail_len, ies) < 0) {
+ WL_ERR(("Beacon get IEs failed \n"));
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if ((err = wl_cfg80211_config_rsnxe_ie(cfg, dev,
+ (const u8 *)info->tail, info->tail_len)) < 0) {
+ WL_ERR(("Failed to configure rsnxe ie: %d\n", err));
+ err = -EINVAL;
+ goto fail;
+ }
+
+ vndr = (const u8 *)info->proberesp_ies;
+ vndr_ie_len = (uint32)info->proberesp_ies_len;
+
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ /* SoftAP mode */
+ const struct ieee80211_mgmt *mgmt;
+ mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
+ if (mgmt != NULL) {
+ vndr = (const u8 *)&mgmt->u.probe_resp.variable;
+ vndr_ie_len = (uint32)(info->probe_resp_len -
+ offsetof(const struct ieee80211_mgmt, u.probe_resp.variable));
+ }
+ }
+ /* Parse Probe Response IEs */
+ if (wl_cfg80211_parse_ies((const u8 *)vndr, vndr_ie_len, &prb_ies) < 0) {
+ WL_ERR(("PROBE RESP get IEs failed \n"));
+ err = -EINVAL;
+ }
+fail:
+
+ return err;
+}
+
+s32
+wl_cfg80211_set_ies(
+ struct net_device *dev,
+ struct cfg80211_beacon_data *info,
+ s32 bssidx)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ const u8 *vndr = NULL;
+ u32 vndr_ie_len = 0;
+ s32 err = BCME_OK;
+
+ /* Set Beacon IEs to FW */
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_BEACON_FLAG, (const u8 *)info->tail,
+ info->tail_len)) < 0) {
+ WL_ERR(("Set Beacon IE Failed \n"));
+ } else {
+ WL_DBG(("Applied Vndr IEs for Beacon \n"));
+ }
+
+ vndr = (const u8 *)info->proberesp_ies;
+ vndr_ie_len = (uint32)info->proberesp_ies_len;
+
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ /* SoftAP mode */
+ const struct ieee80211_mgmt *mgmt;
+ mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
+ if (mgmt != NULL) {
+ vndr = (const u8 *)&mgmt->u.probe_resp.variable;
+ vndr_ie_len = (uint32)(info->probe_resp_len -
+ offsetof(struct ieee80211_mgmt, u.probe_resp.variable));
+ }
+ }
+
+ /* Set Probe Response IEs to FW */
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
+ WL_ERR(("Set Probe Resp IE Failed \n"));
+ } else {
+ WL_DBG(("Applied Vndr IEs for Probe Resp \n"));
+ }
+
+ return err;
+}
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
+static s32 wl_cfg80211_hostapd_sec(
+ struct net_device *dev,
+ struct parsed_ies *ies,
+ s32 bssidx)
+{
+ bool update_bss = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr);
+
+ if (!bss) {
+ WL_ERR(("cfgbss is NULL \n"));
+ return -EINVAL;
+ }
+
+ if (ies->wps_ie) {
+ /* Remove after verification.
+ * Setting IE part moved to set_ies func
+ */
+ if (bss->wps_ie &&
+ memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+ WL_DBG((" WPS IE is changed\n"));
+ MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
+ bss->wps_ie = MALLOCZ(cfg->osh, ies->wps_ie_len);
+ if (bss->wps_ie) {
+ memcpy(bss->wps_ie, ies->wps_ie, ies->wps_ie_len);
+ }
+ } else if (bss->wps_ie == NULL) {
+ WL_DBG((" WPS IE is added\n"));
+ bss->wps_ie = MALLOCZ(cfg->osh, ies->wps_ie_len);
+ if (bss->wps_ie) {
+ memcpy(bss->wps_ie, ies->wps_ie, ies->wps_ie_len);
+ }
+ }
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ if (ies->wpa_ie != NULL && ies->wpa2_ie != NULL) {
+ WL_ERR(("update bss - wpa_ie and wpa2_ie is not null\n"));
+ if (!bss->security_mode) {
+ /* change from open mode to security mode */
+ update_bss = true;
+ bss->wpa_ie = MALLOCZ(cfg->osh,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->wpa_ie) {
+ memcpy(bss->wpa_ie, ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ bss->rsn_ie = MALLOCZ(cfg->osh,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->rsn_ie) {
+ memcpy(bss->rsn_ie, ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ } else {
+ /* change from (WPA or WPA2 or WPA/WPA2) to WPA/WPA2 mixed mode */
+ if (bss->wpa_ie) {
+ if (memcmp(bss->wpa_ie,
+ ies->wpa_ie, ies->wpa_ie->length +
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+ MFREE(cfg->osh, bss->wpa_ie,
+ bss->wpa_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
+ update_bss = true;
+ bss->wpa_ie = MALLOCZ(cfg->osh,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->wpa_ie) {
+ memcpy(bss->wpa_ie, ies->wpa_ie,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ }
+ }
+ else {
+ update_bss = true;
+ bss->wpa_ie = MALLOCZ(cfg->osh,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->wpa_ie) {
+ memcpy(bss->wpa_ie, ies->wpa_ie,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ }
+ if (bss->rsn_ie) {
+ if (memcmp(bss->rsn_ie,
+ ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
+ update_bss = true;
+ MFREE(cfg->osh, bss->rsn_ie,
+ bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->rsn_ie = MALLOCZ(cfg->osh,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->rsn_ie) {
+ memcpy(bss->rsn_ie, ies->wpa2_ie,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ }
+ }
+ else {
+ update_bss = true;
+ bss->rsn_ie = MALLOCZ(cfg->osh,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->rsn_ie) {
+ memcpy(bss->rsn_ie, ies->wpa2_ie,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ }
+ }
+ WL_ERR(("update_bss=%d\n", update_bss));
+ if (update_bss) {
+ bss->security_mode = true;
+ wl_cfg80211_bss_up(cfg, dev, bssidx, 0);
+ if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie,
+ ies->wpa2_ie, bssidx) < 0) {
+ return BCME_ERROR;
+ }
+ wl_cfg80211_bss_up(cfg, dev, bssidx, 1);
+ }
+
+ }
+ else
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+ if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
+ if (!bss->security_mode) {
+ /* change from open mode to security mode */
+ update_bss = true;
+ if (ies->wpa_ie != NULL) {
+ bss->wpa_ie = MALLOCZ(cfg->osh,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->wpa_ie) {
+ memcpy(bss->wpa_ie,
+ ies->wpa_ie,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ } else {
+ bss->rsn_ie = MALLOCZ(cfg->osh,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->rsn_ie) {
+ memcpy(bss->rsn_ie,
+ ies->wpa2_ie,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ }
+ } else if (bss->wpa_ie) {
+ /* change from WPA2 mode to WPA mode */
+ if (ies->wpa_ie != NULL) {
+ update_bss = true;
+ MFREE(cfg->osh, bss->rsn_ie,
+ bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->rsn_ie = NULL;
+ bss->wpa_ie = MALLOCZ(cfg->osh,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->wpa_ie) {
+ memcpy(bss->wpa_ie,
+ ies->wpa_ie,
+ ies->wpa_ie->length
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ } else if (memcmp(bss->rsn_ie,
+ ies->wpa2_ie, ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN)) {
+ update_bss = true;
+ MFREE(cfg->osh, bss->rsn_ie,
+ bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
+ bss->rsn_ie = MALLOCZ(cfg->osh,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
+ if (bss->rsn_ie) {
+ memcpy(bss->rsn_ie,
+ ies->wpa2_ie,
+ ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN);
+ }
+ bss->wpa_ie = NULL;
+ }
+ }
+ if (update_bss) {
+ bss->security_mode = true;
+ wl_cfg80211_bss_up(cfg, dev, bssidx, 0);
+ if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
+ return BCME_ERROR;
+ }
+ wl_cfg80211_bss_up(cfg, dev, bssidx, 1);
+ }
+ }
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+ return 0;
+}
+
+static s32
+wl_cfg80211_set_scb_timings(
+ struct bcm_cfg80211 *cfg,
+ struct net_device *dev)
+{
+ int err;
+ u32 ps_pretend;
+ wl_scb_probe_t scb_probe;
+ u32 ps_pretend_retries;
+
+ bzero(&scb_probe, sizeof(wl_scb_probe_t));
+ scb_probe.scb_timeout = WL_SCB_TIMEOUT;
+ scb_probe.scb_activity_time = WL_SCB_ACTIVITY_TIME;
+ scb_probe.scb_max_probe = WL_SCB_MAX_PROBE;
+ err = wldev_iovar_setbuf(dev, "scb_probe", (void *)&scb_probe,
+ sizeof(wl_scb_probe_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("set 'scb_probe' failed, error = %d\n", err));
+ return err;
+ }
+
+ ps_pretend_retries = WL_PSPRETEND_RETRY_LIMIT;
+ err = wldev_iovar_setint(dev, "pspretend_retry_limit", ps_pretend_retries);
+ if (unlikely(err)) {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore error if fw doesn't support the iovar */
+ WL_DBG(("set 'pspretend_retry_limit %d' failed, error = %d\n",
+ ps_pretend_retries, err));
+ } else {
+ WL_ERR(("set 'pspretend_retry_limit %d' failed, error = %d\n",
+ ps_pretend_retries, err));
+ return err;
+ }
+ }
+
+ ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD);
+ err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend);
+ if (unlikely(err)) {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore error if fw doesn't support the iovar */
+ WL_DBG(("wl pspretend_threshold %d set error %d\n",
+ ps_pretend, err));
+ } else {
+ WL_ERR(("wl pspretend_threshold %d set error %d\n",
+ ps_pretend, err));
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+s32
+wl_cfg80211_start_ap(
+ struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ap_settings *info)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = BCME_OK;
+ struct parsed_ies ies;
+ s32 bssidx = 0;
+ u32 dev_role = 0;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ WL_DBG(("Enter \n"));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (p2p_is_on(cfg) && (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) {
+ dev_role = NL80211_IFTYPE_P2P_GO;
+ } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+
+ if (!wl_get_drv_status(cfg, AP_CREATING, dev)) {
+ /* Make sure fw is in proper state */
+ err = wl_cfg80211_set_ap_role(cfg, dev);
+ if (unlikely(err)) {
+ WL_ERR(("set ap role failed!\n"));
+ return BCME_ERROR;
+ }
+ }
+ dev_role = NL80211_IFTYPE_AP;
+#ifdef BCMDONGLEHOST
+ dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+ err = dhd_ndo_enable(dhd, FALSE);
+ WL_DBG(("Disabling NDO on Hostapd mode %d\n", err));
+ if (err) {
+ WL_ERR(("Disabling NDO Failed %d\n", err));
+ }
+ wl_wlfc_enable(cfg, TRUE);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_iftype(dev, WL_IF_TYPE_AP);
+#endif /* WL_EXT_IAPSTA */
+#ifdef PKT_FILTER_SUPPORT
+ /* Disable packet filter */
+ if (dhd->early_suspended) {
+ WL_ERR(("Disable pkt_filter\n"));
+ dhd_enable_packet_filter(0, dhd);
+#ifdef APF
+ dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+ }
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* BCMDONGLEHOST */
+ } else {
+ /* only AP or GO role need to be handled here. */
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* disable TDLS */
+#ifdef WLTDLS
+ if (bssidx == 0) {
+ /* Disable TDLS for primary Iface. For virtual interface,
+ * tdls disable will happen from interface create context
+ */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_AP_CREATE, false);
+ }
+#endif /* WLTDLS */
+
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+/*
+ * TODO:
+ * Check whether 802.11ac-160MHz bandwidth channel setting has to use the
+ * center frequencies present in 'preset_chandef' instead of using the
+ * hardcoded values in 'wl_cfg80211_set_channel()'.
+ */
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
+ if (!dev->ieee80211_ptr->preset_chandef.chan) {
+ WL_ERR(("chan is NULL\n"));
+ err = -EINVAL;
+ goto fail;
+ }
+ if ((err = wl_cfg80211_set_channel(wiphy, dev,
+ dev->ieee80211_ptr->preset_chandef.chan,
+ NL80211_CHAN_HT20) < 0)) {
+ WL_ERR(("Set channel failed \n"));
+ goto fail;
+ }
+#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
+
+ if ((err = wl_cfg80211_bcn_set_params(info, dev,
+ dev_role, bssidx)) < 0) {
+ WL_ERR(("Beacon params set failed \n"));
+ goto fail;
+ }
+
+ /* Parse IEs */
+ if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) {
+ WL_ERR(("Set IEs failed \n"));
+ goto fail;
+ }
+
+ if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies,
+ dev_role, bssidx, info->privacy)) < 0)
+ {
+ WL_ERR(("Beacon set security failed \n"));
+ goto fail;
+ }
+
+ if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies,
+ dev_role, bssidx)) < 0) {
+ WL_ERR(("Beacon bring up AP/GO failed \n"));
+ goto fail;
+ }
+
+ /* Set GC/STA SCB expiry timings. */
+ if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
+ WL_ERR(("scb setting failed \n"));
+// goto fail;
+ }
+
+ wl_set_drv_status(cfg, CONNECTED, dev);
+ WL_DBG(("** AP/GO Created **\n"));
+
+#ifdef WL_CFG80211_ACL
+ /* Enfoce Admission Control. */
+ if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) {
+ WL_ERR(("Set ACL failed\n"));
+ }
+#endif /* WL_CFG80211_ACL */
+
+ /* Set IEs to FW */
+ if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
+ WL_ERR(("Set IEs failed \n"));
+
+#ifdef WLDWDS
+ if (dev->ieee80211_ptr->use_4addr) {
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_ASSOCRSP_FLAG, (const u8 *)info->beacon.assocresp_ies,
+ info->beacon.assocresp_ies_len)) < 0) {
+ WL_ERR(("Set ASSOC RESP IE Failed\n"));
+ }
+ }
+#endif /* WLDWDS */
+
+ /* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+ if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+ bool pbc = 0;
+ wl_validate_wps_ie((const char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ if (pbc) {
+ WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+ }
+ }
+
+ /* Configure hidden SSID */
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
+ if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+ WL_ERR(("failed to set hidden : %d\n", err));
+ WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+ }
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+ if (dev_role == NL80211_IFTYPE_AP) {
+ if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
+ wl_cfg80211_init_ap_rps(cfg);
+ } else {
+ WL_ERR(("Set rpsnoa failed \n"));
+ }
+ }
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+fail:
+ if (err) {
+ WL_ERR(("ADD/SET beacon failed\n"));
+ wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
+ wl_cfg80211_stop_ap(wiphy, dev);
+ if (dev_role == NL80211_IFTYPE_AP) {
+#ifdef WL_EXT_IAPSTA
+ if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
+#endif /* WL_EXT_IAPSTA */
+#ifdef BCMDONGLEHOST
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter */
+ if (dhd->early_suspended) {
+ WL_ERR(("Enable pkt_filter\n"));
+ dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+ dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+ }
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ wl_cfg80211_set_frameburst(cfg, TRUE);
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+#endif /* BCMDONGLEHOST */
+ wl_wlfc_enable(cfg, FALSE);
+#ifdef WL_EXT_IAPSTA
+ }
+#endif /* WL_EXT_IAPSTA */
+ }
+#ifdef WLTDLS
+ if (bssidx == 0) {
+ /* Since AP creation failed, re-enable TDLS */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_AP_DELETE, false);
+ }
+#endif /* WLTDLS */
+
+ }
+
+ return err;
+}
+
+s32
+wl_cfg80211_stop_ap(
+ struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ int err = 0;
+ u32 dev_role = 0;
+ int ap = 0;
+ s32 bssidx = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 is_rsdb_supported = BCME_ERROR;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ WL_DBG(("Enter \n"));
+
+ if (wl_cfg80211_get_bus_state(cfg)) {
+ /* since bus is down, iovar will fail. recovery path will bringup the bus. */
+ WL_ERR(("bus is not ready\n"));
+ return BCME_OK;
+ }
+#if defined (BCMDONGLEHOST)
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+ if (is_rsdb_supported < 0)
+ return (-ENODEV);
+#endif
+
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+ wl_clr_drv_status(cfg, AP_CREATED, dev);
+ cfg->ap_oper_channel = INVCHANSPEC;
+
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ dev_role = NL80211_IFTYPE_AP;
+ WL_MSG(dev->name, "stopping AP operation\n");
+ } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ dev_role = NL80211_IFTYPE_P2P_GO;
+ WL_MSG(dev->name, "stopping P2P GO operation\n");
+ } else {
+ WL_ERR(("no AP/P2P GO interface is operational.\n"));
+ return -EINVAL;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ WL_ERR(("role integrity check failed \n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Free up resources */
+ wl_cfg80211_cleanup_if(dev);
+
+ /* Clear AP/GO connected status */
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+ if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 0)) < 0) {
+ WL_ERR(("bss down error %d\n", err));
+ }
+
+ if (dev_role == NL80211_IFTYPE_AP) {
+#ifdef BCMDONGLEHOST
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ wl_cfg80211_set_frameburst(cfg, TRUE);
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+#endif /* BCMDONGLEHOST */
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter */
+ if (dhd->early_suspended) {
+ WL_ERR(("Enable pkt_filter\n"));
+ dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+ dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+ }
+#endif /* PKT_FILTER_SUPPORT */
+
+ if (is_rsdb_supported == 0) {
+ /* For non-rsdb chips, we use stand alone AP. Do wl down on stop AP */
+ err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+
+#ifdef WL_DISABLE_HE_SOFTAP
+ if (wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_HE_FEATURES_HE_AP,
+ TRUE) != BCME_OK) {
+ WL_ERR(("failed to set he features\n"));
+ }
+#endif /* WL_DISABLE_HE_SOFTAP */
+
+ wl_cfg80211_clear_per_bss_ies(cfg, dev->ieee80211_ptr);
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+ if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
+ wl_cfg80211_init_ap_rps(cfg);
+ } else {
+ WL_ERR(("Set rpsnoa failed \n"));
+ }
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+ } else {
+ /* Do we need to do something here */
+ WL_DBG(("Stopping P2P GO \n"));
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
+ DHD_EVENT_TIMEOUT_MS*3);
+ DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
+#endif
+
+ }
+
+ SUPP_LOG(("AP/GO Link down\n"));
+exit:
+ if (err) {
+ /* In case of failure, flush fw logs */
+ wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
+ SUPP_LOG(("AP/GO Link down fail. err:%d\n", err));
+ }
+#ifdef WLTDLS
+ if (bssidx == 0) {
+ /* re-enable TDLS if the number of connected interfaces is less than 2 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_AP_DELETE, false);
+ }
+#endif /* WLTDLS */
+
+#ifdef BCMDONGLEHOST
+ if (dev_role == NL80211_IFTYPE_AP) {
+#ifdef WL_EXT_IAPSTA
+ if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
+#endif /* WL_EXT_IAPSTA */
+ /* clear the AP mode */
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+ wl_wlfc_enable(cfg, FALSE);
+#ifdef WL_EXT_IAPSTA
+ }
+#endif /* WL_EXT_IAPSTA */
+ }
+#endif /* BCMDONGLEHOST */
+ return err;
+}
+
+s32
+wl_cfg80211_change_beacon(
+ struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_beacon_data *info)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct parsed_ies ies;
+ u32 dev_role = 0;
+ s32 bssidx = 0;
+ bool pbc = 0;
+
+ WL_DBG(("Enter \n"));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ dev_role = NL80211_IFTYPE_P2P_GO;
+ } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ dev_role = NL80211_IFTYPE_AP;
+ } else {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+ WL_ERR(("P2P already down status!\n"));
+ err = BCME_ERROR;
+ goto fail;
+ }
+
+ /* Parse IEs */
+ if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) {
+ WL_ERR(("Parse IEs failed \n"));
+ goto fail;
+ }
+
+ /* Set IEs to FW */
+ if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) {
+ WL_ERR(("Set IEs failed \n"));
+ goto fail;
+ }
+
+ if (dev_role == NL80211_IFTYPE_AP) {
+ if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+ WL_ERR(("Hostapd update sec failed \n"));
+ err = -EINVAL;
+ goto fail;
+ }
+ /* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+ if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+ wl_validate_wps_ie((const char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
+ if (pbc)
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+ else
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+ }
+ }
+
+fail:
+ if (err) {
+ wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
+ }
+ return err;
+}
+#else
+s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 ie_offset = 0;
+ s32 bssidx = 0;
+ u32 dev_role = NL80211_IFTYPE_AP;
+ struct parsed_ies ies;
+ bcm_tlv_t *ssid_ie;
+ bool pbc = 0;
+ bool privacy;
+ bool is_bss_up = 0;
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+ info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ dev_role = NL80211_IFTYPE_AP;
+ }
+#if defined(WL_ENABLE_P2P_IF)
+ else if (dev == cfg->p2p_net) {
+ /* Group Add request on p2p0 */
+ dev = bcmcfg_to_prmry_ndev(cfg);
+ dev_role = NL80211_IFTYPE_P2P_GO;
+ }
+#endif /* WL_ENABLE_P2P_IF */
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ dev_role = NL80211_IFTYPE_P2P_GO;
+ } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+#ifdef BCMDONGLEHOST
+ dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+#endif
+ }
+
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+ WL_ERR(("P2P already down status!\n"));
+ err = BCME_ERROR;
+ goto fail;
+ }
+
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ if (dev_role == NL80211_IFTYPE_AP) {
+ /* Store the hostapd SSID */
+ bzero(&cfg->hostapd_ssid.SSID[0], DOT11_MAX_SSID_LEN);
+ cfg->hostapd_ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
+ memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data,
+ cfg->hostapd_ssid.SSID_len);
+ } else {
+ /* P2P GO */
+ bzero(&cfg->p2p->ssid.SSID[0], DOT11_MAX_SSID_LEN);
+ cfg->p2p->ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
+ memcpy(cfg->p2p->ssid.SSID, ssid_ie->data,
+ cfg->p2p->ssid.SSID_len);
+ }
+ }
+
+ if (wl_cfg80211_parse_ies((u8 *)info->tail,
+ info->tail_len, &ies) < 0) {
+ WL_ERR(("Beacon get IEs failed \n"));
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+ info->tail_len)) < 0) {
+ WL_ERR(("Beacon set IEs failed \n"));
+ goto fail;
+ } else {
+ WL_DBG(("Applied Vndr IEs for Beacon \n"));
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies,
+ info->proberesp_ies_len)) < 0) {
+ WL_ERR(("ProbeRsp set IEs failed \n"));
+ goto fail;
+ } else {
+ WL_DBG(("Applied Vndr IEs for ProbeRsp \n"));
+ }
+#endif
+
+ is_bss_up = wl_cfg80211_bss_isup(dev, bssidx);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ privacy = info->privacy;
+#else
+ privacy = 0;
+#endif
+ if (!is_bss_up &&
+ (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0))
+ {
+ WL_ERR(("Beacon set security failed \n"));
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* Set BI and DTIM period */
+ if (info->interval) {
+ if ((err = wldev_ioctl_set(dev, WLC_SET_BCNPRD,
+ &info->interval, sizeof(s32))) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ if (info->dtim_period) {
+ if ((err = wldev_ioctl_set(dev, WLC_SET_DTIMPRD,
+ &info->dtim_period, sizeof(s32))) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+
+ /* If bss is already up, skip bring up */
+ if (!is_bss_up &&
+ (err = wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx)) < 0)
+ {
+ WL_ERR(("Beacon bring up AP/GO failed \n"));
+ goto fail;
+ }
+
+ /* Set GC/STA SCB expiry timings. */
+ if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
+ WL_ERR(("scb setting failed \n"));
+ if (err == BCME_UNSUPPORTED)
+ err = 0;
+// goto fail;
+ }
+
+ if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
+ /* Soft AP already running. Update changed params */
+ if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+ WL_ERR(("Hostapd update sec failed \n"));
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* Enable Probe Req filter */
+ if (((dev_role == NL80211_IFTYPE_P2P_GO) ||
+ (dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) {
+ wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ if (pbc)
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+ }
+
+ WL_DBG(("** ADD/SET beacon done **\n"));
+ wl_set_drv_status(cfg, CONNECTED, dev);
+
+fail:
+ if (err) {
+ WL_ERR(("ADD/SET beacon failed\n"));
+#ifdef BCMDONGLEHOST
+ if (dev_role == NL80211_IFTYPE_AP) {
+#ifdef WL_EXT_IAPSTA
+ if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
+#endif /* WL_EXT_IAPSTA */
+ /* clear the AP mode */
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+#ifdef WL_EXT_IAPSTA
+ }
+#endif /* WL_EXT_IAPSTA */
+ }
+#endif /* BCMDONGLEHOST */
+ }
+ return err;
+
+}
+
+s32
+wl_cfg80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
+{
+ int err = 0;
+ s32 bssidx = 0;
+ int infra = 0;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef BCMDONGLEHOST
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMDONGLEHOST */
+
+ WL_DBG(("Enter. \n"));
+
+ if (!wdev) {
+ WL_ERR(("wdev null \n"));
+ return -EINVAL;
+ }
+
+ if ((wdev->iftype != NL80211_IFTYPE_P2P_GO) && (wdev->iftype != NL80211_IFTYPE_AP)) {
+ WL_ERR(("Unspported iface type iftype:%d \n", wdev->iftype));
+ }
+
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+ wl_clr_drv_status(cfg, AP_CREATED, dev);
+
+ /* Clear AP/GO connected status */
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+
+ cfg->ap_oper_channel = INVCHANSPEC;
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ /* Do bss down */
+ if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 0)) < 0) {
+ WL_ERR(("bss down error %d\n", err));
+ }
+
+ /* fall through is intentional */
+ err = wldev_ioctl_set(dev, WLC_SET_INFRA, &infra, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ }
+ wl_cfg80211_clear_per_bss_ies(cfg, dev->ieee80211_ptr);
+
+#ifdef BCMDONGLEHOST
+ if (wdev->iftype == NL80211_IFTYPE_AP) {
+#ifdef WL_EXT_IAPSTA
+ if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
+#endif /* WL_EXT_IAPSTA */
+ /* clear the AP mode */
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+#ifdef WL_EXT_IAPSTA
+ }
+#endif /* WL_EXT_IAPSTA */
+ }
+#endif /* BCMDONGLEHOST */
+
+ return 0;
+}
+#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
+s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 reason = ntoh32(e->reason);
+ u32 event = ntoh32(e->event_type);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ (void)memcpy_s(&cfg->event_auth_assoc, sizeof(wl_event_msg_t),
+ e, sizeof(wl_event_msg_t));
+ WL_DBG(("event=%d status %d reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ ntoh32(cfg->event_auth_assoc.status),
+ ntoh32(cfg->event_auth_assoc.reason)));
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ if (sec) {
+ switch (event) {
+ case WLC_E_ASSOC:
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ sec->auth_assoc_res_status = reason;
+ break;
+ default:
+ break;
+ }
+ } else {
+ WL_ERR(("sec is NULL\n"));
+ }
+ return 0;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+#if !defined(WL_CFG80211_STA_EVENT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+static s32
+wl_notify_connect_status_ap_legacy(struct bcm_cfg80211 *cfg, struct net_device *ndev
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ u32 len = ntoh32(e->datalen);
+ u32 status = ntoh32(e->status);
+
+ bool isfree = false;
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ s32 freq;
+ s32 channel;
+ u8 *body = NULL;
+ u16 fc = 0;
+ u32 body_len = 0;
+
+ struct ieee80211_supported_band *band;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ channel_info_t ci;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ WL_DBG(("Enter \n"));
+ if (!len && (event == WLC_E_DEAUTH)) {
+ len = 2; /* reason code field */
+ data = &reason;
+ }
+ if (len) {
+ body = (u8 *)MALLOCZ(cfg->osh, len);
+ if (body == NULL) {
+ WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
+ return WL_INVALID;
+ }
+ }
+ bzero(&bssid, ETHER_ADDR_LEN);
+ WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
+ MFREE(cfg->osh, body, len);
+ return WL_INVALID;
+ }
+ if (len)
+ memcpy(body, data, len);
+
+ wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx, NULL);
+ memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
+ bzero(&bssid, sizeof(bssid));
+ err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ switch (event) {
+ case WLC_E_ASSOC_IND:
+ fc = FC_ASSOC_REQ;
+ break;
+ case WLC_E_REASSOC_IND:
+ fc = FC_REASSOC_REQ;
+ break;
+ case WLC_E_DISASSOC_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH:
+ fc = FC_DISASSOC;
+ break;
+ default:
+ fc = 0;
+ goto exit;
+ }
+ err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (unlikely(err)) {
+ MFREE(cfg->osh, body, len);
+ WL_ERR(("%s: Could not get chanspec %d\n", __FUNCTION__, err));
+ return err;
+ }
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ freq = wl_channel_to_frequency(wf_chspec_ctlchan(chanspec), CHSPEC_BAND(chanspec));
+ body_len = len;
+ err = wl_frame_get_mgmt(cfg, fc, &da, &e->addr, &bssid,
+ &mgmt_frame, &len, body);
+ if (err < 0)
+ goto exit;
+ isfree = true;
+
+ if ((event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) ||
+ (event == WLC_E_DISASSOC_IND) ||
+ ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH))) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 18,0) || WL_COMPAT_WIRELESS */
+ }
+
+exit:
+ if (isfree) {
+ MFREE(cfg->osh, mgmt_frame, len);
+ }
+ if (body) {
+ MFREE(cfg->osh, body, body_len);
+ }
+
+}
+#endif /* WL_CFG80211_STA_EVENT || KERNEL_VER < 3.2 */
+
+s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ u32 len = ntoh32(e->datalen);
+ u32 status = ntoh32(e->status);
+#if defined(WL_CFG80211_STA_EVENT) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ struct station_info sinfo;
+#endif /* (LINUX_VERSION >= VERSION(3,2,0)) || !WL_CFG80211_STA_EVENT */
+#ifdef BIGDATA_SOFTAP
+ dhd_pub_t *dhdp;
+#endif /* BIGDATA_SOFTAP */
+
+ WL_INFORM_MEM(("[%s] Mode AP/GO. Event:%d status:%d reason:%d\n",
+ ndev->name, event, ntoh32(e->status), reason));
+
+#ifdef WL_CLIENT_SAE
+ if (event == WLC_E_AUTH && ntoh32(e->auth_type) == DOT11_SAE) {
+ WL_MSG_RLMT(ndev->name, &e->addr, ETHER_ADDR_LEN,
+ "add sta auth event for "MACDBG "\n", MAC2STRDBG(e->addr.octet));
+ err = wl_handle_auth_event(cfg, ndev, e, data);
+ if (err != BCME_OK) {
+ return err;
+ }
+ }
+#endif /* WL_CLIENT_SAE */
+
+ if (event == WLC_E_AUTH_IND) {
+#ifdef WL_SAE
+ if (ntoh32(e->auth_type) == DOT11_SAE) {
+ wl_bss_handle_sae_auth(cfg, ndev, e, data);
+ }
+#endif /* WL_SAE */
+ wl_get_auth_assoc_status(cfg, ndev, e, data);
+ return 0;
+ }
+ /* if link down, bsscfg is disabled. */
+ if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
+ wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
+ wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+ WL_MSG(ndev->name, "AP mode link down !! \n");
+ complete(&cfg->iface_disable);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, 0, WL_EXT_STATUS_AP_DISABLED, NULL);
+#endif
+ return 0;
+ }
+
+ if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) &&
+ (reason == WLC_E_REASON_INITIAL_ASSOC) &&
+ (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) {
+ if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ /* AP/GO brought up successfull in firmware */
+ WL_MSG(ndev->name, "AP/GO Link up\n");
+ wl_set_drv_status(cfg, AP_CREATED, ndev);
+ if (delayed_work_pending(&cfg->ap_work)) {
+ cancel_delayed_work_sync(&cfg->ap_work);
+ WL_DBG(("cancelled ap_work\n"));
+ }
+#ifdef BIGDATA_SOFTAP
+ wl_ap_stainfo_init(cfg);
+#endif /* BIGDATA_SOFTAP */
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, 0, WL_EXT_STATUS_AP_ENABLED, NULL);
+#endif
+ return 0;
+ }
+ }
+
+ if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+ WL_MSG_RLMT(ndev->name, &e->addr, ETHER_ADDR_LEN,
+ "event %s(%d) status %d reason %d\n",
+ bcmevent_get_name(event), event, ntoh32(e->status), reason);
+ }
+
+#ifdef BIGDATA_SOFTAP
+ if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) {
+ WL_ERR(("AP link down - skip get sta data\n"));
+ } else {
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhdp && dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ dhd_schedule_gather_ap_stadata(cfg, ndev, e);
+ }
+ }
+#endif /* BIGDATA_SOFTAP */
+
+#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+ err = wl_notify_connect_status_ap_legacy(cfg, ndev, e, data);
+#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+ memset_s(&sinfo, sizeof(sinfo), 0, sizeof(sinfo));
+ if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+ reason == DOT11_SC_SUCCESS) {
+ /* Linux ver >= 4.0 assoc_req_ies_len is used instead of
+ * STATION_INFO_ASSOC_REQ_IES flag
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+ sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
+ if (!data) {
+ WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+ return -EINVAL;
+ }
+ sinfo.assoc_req_ies = data;
+ sinfo.assoc_req_ies_len = len;
+ WL_MSG(ndev->name, "new sta event for "MACDBG "\n",
+ MAC2STRDBG(e->addr.octet));
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, AP_WAIT_STA_RECONNECT,
+ WL_EXT_STATUS_STA_CONNECTED, (void *)&e->addr);
+#endif
+ cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+#ifdef WL_WPS_SYNC
+ wl_wps_session_update(ndev, WPS_STATE_LINKUP, e->addr.octet);
+#endif /* WL_WPS_SYNC */
+ } else if ((event == WLC_E_DEAUTH_IND) ||
+ ((event == WLC_E_DEAUTH) && (reason != DOT11_RC_RESERVED)) ||
+ (event == WLC_E_DISASSOC_IND)) {
+ /*
+ * WAR: Dongle sends WLC_E_DEAUTH event with DOT11_RC_RESERVED
+ * to delete flowring in case of PCIE Full dongle.
+ * By deleting flowring on SoftAP interface we can avoid any issues
+ * due to stale/bad state of flowring.
+ * Therefore, we don't need to notify the client dissaociation to Hostapd
+ * in this case.
+ * Please refer to the RB:115182 to understand the case more clearly.
+ */
+ WL_MSG_RLMT(ndev->name, &e->addr, ETHER_ADDR_LEN,
+ "del sta event for "MACDBG "\n", MAC2STRDBG(e->addr.octet));
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync(ndev, AP_WAIT_STA_RECONNECT,
+ WL_EXT_STATUS_STA_DISCONNECTED, (void *)&e->addr);
+#endif
+ cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+#ifdef WL_WPS_SYNC
+ wl_wps_session_update(ndev, WPS_STATE_LINKDOWN, e->addr.octet);
+#endif /* WL_WPS_SYNC */
+ }
+
+#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+ return err;
+}
+
+s32
+wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
+ const struct ether_addr *da, const struct ether_addr *sa,
+ const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody)
+{
+ struct dot11_management_header *hdr;
+ u32 totlen = 0;
+ s32 err = 0;
+ u8 *offset;
+ u32 prebody_len = *body_len;
+ switch (fc) {
+ case FC_ASSOC_REQ:
+ /* capability , listen interval */
+ totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+ break;
+
+ case FC_REASSOC_REQ:
+ /* capability, listen inteval, ap address */
+ totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+ break;
+ }
+ totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+ *pheader = (u8 *)MALLOCZ(cfg->osh, totlen);
+ if (*pheader == NULL) {
+ WL_ERR(("memory alloc failed \n"));
+ return -ENOMEM;
+ }
+ hdr = (struct dot11_management_header *) (*pheader);
+ hdr->fc = htol16(fc);
+ hdr->durid = 0;
+ hdr->seq = 0;
+ offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+ bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+ bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+ bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+ if ((pbody != NULL) && prebody_len)
+ bcopy((const char*)pbody, offset, prebody_len);
+ *body_len = totlen;
+ return err;
+}
+
+#if defined(WLTDLS)
+bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len)
+{
+ unsigned char *data;
+
+ if (frame == NULL) {
+ WL_ERR(("Invalid frame \n"));
+ return false;
+ }
+
+ if (frame_len < 5) {
+ WL_ERR(("Invalid frame length [%d] \n", frame_len));
+ return false;
+ }
+
+ data = frame;
+
+ if (!memcmp(data, TDLS_TUNNELED_PRB_REQ, 5) ||
+ !memcmp(data, TDLS_TUNNELED_PRB_RESP, 5)) {
+ WL_DBG(("TDLS Vendor Specific Received type\n"));
+ return true;
+ }
+
+ return false;
+}
+#endif /* WLTDLS */
+
+#ifdef WLTDLS
+s32
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data) {
+
+ struct net_device *ndev = NULL;
+ u32 reason = ntoh32(e->reason);
+ s8 *msg = NULL;
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ switch (reason) {
+ case WLC_E_TDLS_PEER_DISCOVERED :
+ msg = " TDLS PEER DISCOVERD ";
+ break;
+ case WLC_E_TDLS_PEER_CONNECTED :
+ if (cfg->tdls_mgmt_frame) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0,
+ GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, GFP_ATOMIC);
+
+#endif /* LINUX_VERSION >= VERSION(3, 18,0) || WL_COMPAT_WIRELESS */
+ }
+ msg = " TDLS PEER CONNECTED ";
+#ifdef SUPPORT_SET_CAC
+ /* TDLS connect reset CAC */
+ wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+ break;
+ case WLC_E_TDLS_PEER_DISCONNECTED :
+ if (cfg->tdls_mgmt_frame) {
+ MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
+ cfg->tdls_mgmt_frame_len = 0;
+ cfg->tdls_mgmt_freq = 0;
+ }
+ msg = "TDLS PEER DISCONNECTED ";
+#ifdef SUPPORT_SET_CAC
+ /* TDLS disconnec, set CAC */
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+ break;
+ }
+ if (msg) {
+ WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((const u8*)(&e->addr)),
+ (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+ }
+ return 0;
+
+}
+
+#if defined(CUSTOMER_HW10)
+static void wl_tdls_enable(struct bcm_cfg80211 *cfg)
+{
+ int enable = true;
+ int err = 0;
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+/* #define IS_P2P_OPERATING (p2p_is_on(cfg) && cfg->p2p->vif_created ) */
+#define IS_P2P_OPERATING (dhd->op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))
+#if !defined(DISABLE_TDLS_IN_P2P)
+ if (cfg->vsdb_mode)
+#else
+ if (cfg->vsdb_mode || IS_P2P_OPERATING)
+#endif
+ {
+ enable = false;
+ }
+
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", enable);
+ if (err) {
+ WL_ERR(("tdls_enable failed!!: %d\n", enable));
+ }
+#undef IS_P2P_OPERATING
+}
+#endif /* defined(CUSTOMER_HW10) */
+
+#endif /* WLTDLS */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+s32
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || \
+ ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *buf, size_t len)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *buf, size_t len)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator, const u8 *buf, size_t len)
+#else /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ const u8 *buf, size_t len)
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+{
+ s32 ret = 0;
+#if defined(BCMDONGLEHOST)
+#if defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
+ struct bcm_cfg80211 *cfg;
+ tdls_wfd_ie_iovar_t info;
+ bzero(&info, sizeof(info));
+ cfg = wl_get_cfg(dev);
+
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+ /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
+ * and that cuases build error
+ */
+ BCM_REFERENCE(peer_capability);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+
+ switch (action_code) {
+ /* We need to set TDLS Wifi Display IE to firmware
+ * using tdls_wfd_ie iovar
+ */
+ case WLAN_TDLS_SET_PROBE_WFD_IE:
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_PROBE_WFD_IE\n"));
+ info.mode = TDLS_WFD_PROBE_IE_TX;
+
+ if (len > sizeof(info.data)) {
+ return -EINVAL;
+ }
+ memcpy(&info.data, buf, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_SETUP_WFD_IE:
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_SETUP_WFD_IE\n"));
+ info.mode = TDLS_WFD_IE_TX;
+
+ if (len > sizeof(info.data)) {
+ return -EINVAL;
+ }
+ memcpy(&info.data, buf, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_WFD_ENABLED:
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_ENABLED\n"));
+ dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
+ goto out;
+ case WLAN_TDLS_SET_WFD_DISABLED:
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_DISABLED\n"));
+ dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
+ goto out;
+ default:
+ WL_ERR(("Unsupported action code : %d\n", action_code));
+ goto out;
+ }
+ ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret) {
+ WL_ERR(("tdls_wfd_ie error %d\n", ret));
+ }
+
+out:
+#endif /* TDLS_MSG_ONLY_WFD && WLTDLS */
+#endif /* BCMDONGLEHOST */
+ return ret;
+}
+
+s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper)
+#else
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper)
+#endif
+{
+ s32 ret = 0;
+#ifdef WLTDLS
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ tdls_iovar_t info;
+ dhd_pub_t *dhdp;
+ bool tdls_auto_mode = false;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ bzero(&info, sizeof(tdls_iovar_t));
+ if (peer) {
+ memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+ } else {
+ return -1;
+ }
+ switch (oper) {
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* If the discovery request is broadcast then we need to set
+ * info.mode to Tunneled Probe Request
+ */
+ if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
+ info.mode = TDLS_MANUAL_EP_WFD_TPQ;
+ WL_ERR(("wl_cfg80211_tdls_oper: TDLS TUNNELED PRBOBE REQUEST\n"));
+ } else {
+ info.mode = TDLS_MANUAL_EP_DISCOVERY;
+ }
+ break;
+ case NL80211_TDLS_SETUP:
+ if (dhdp->tdls_mode == true) {
+ info.mode = TDLS_MANUAL_EP_CREATE;
+ tdls_auto_mode = false;
+ /* Do tear down and create a fresh one */
+ ret = wl_cfg80211_tdls_config(cfg, TDLS_STATE_TEARDOWN, tdls_auto_mode);
+ if (ret < 0) {
+ return ret;
+ }
+ } else {
+ tdls_auto_mode = true;
+ }
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ info.mode = TDLS_MANUAL_EP_DELETE;
+ break;
+ default:
+ WL_ERR(("Unsupported operation : %d\n", oper));
+ goto out;
+ }
+ /* turn on TDLS */
+ ret = wl_cfg80211_tdls_config(cfg, TDLS_STATE_SETUP, tdls_auto_mode);
+ if (ret < 0) {
+ return ret;
+ }
+ if (info.mode) {
+ ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ WL_ERR(("tdls_endpoint error %d\n", ret));
+ }
+ }
+out:
+ /* use linux generic error code instead of firmware error code */
+ if (ret) {
+ wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
+ return -ENOTSUPP;
+ }
+#endif /* WLTDLS */
+ return ret;
+}
+#endif /* LINUX_VERSION > VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+#if defined(BCMDONGLEHOST)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (((dev_role == NL80211_IFTYPE_AP) &&
+ !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+ ((dev_role == NL80211_IFTYPE_P2P_GO) &&
+ !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+ {
+ WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode));
+ return false;
+ }
+#endif /* defined(BCMDONGLEHOST) */
+ return true;
+}
+
+s32
+wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len)
+{
+ char ioctl_buf[WLC_IOCTL_SMLEN];
+ int err = 0;
+ uint32 val = 0;
+ chanspec_t chanspec = 0;
+ int abort;
+ int bytes_written = 0;
+ struct wl_dfs_ap_move_status_v2 *status;
+ char chanbuf[CHANSPEC_STR_LEN];
+ const char *dfs_state_str[DFS_SCAN_S_MAX] = {
+ "Radar Free On Channel",
+ "Radar Found On Channel",
+ "Radar Scan In Progress",
+ "Radar Scan Aborted",
+ "RSDB Mode switch in Progress For Scan"
+ };
+ if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ bytes_written = snprintf(command, total_len, "AP is not up\n");
+ return bytes_written;
+ }
+ if (!*data) {
+ if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0,
+ ioctl_buf, sizeof(ioctl_buf), NULL))) {
+ WL_ERR(("setting dfs_ap_move failed with err=%d \n", err));
+ return err;
+ }
+ status = (struct wl_dfs_ap_move_status_v2 *)ioctl_buf;
+
+ if (status->version != WL_DFS_AP_MOVE_VERSION) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("err=%d version=%d\n", err, status->version));
+ return err;
+ }
+
+ if (status->move_status != (int8) DFS_SCAN_S_IDLE) {
+ chanspec = wl_chspec_driver_to_host(status->chanspec);
+ if (chanspec != 0 && chanspec != INVCHANSPEC) {
+ wf_chspec_ntoa(chanspec, chanbuf);
+ bytes_written = snprintf(command, total_len,
+ "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec);
+ }
+ bytes_written += snprintf(command + bytes_written,
+ total_len - bytes_written,
+ "%s\n", dfs_state_str[status->move_status]);
+ return bytes_written;
+ } else {
+ bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n");
+ return bytes_written;
+ }
+ }
+
+ abort = bcm_atoi(data);
+ if (abort == -1) {
+ if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+ return err;
+ }
+ } else {
+ chanspec = wf_chspec_aton(data);
+ if (chanspec != 0) {
+ val = wl_chspec_host_to_driver(chanspec);
+ if (val != INVCHANSPEC) {
+ if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+ return err;
+ }
+ WL_DBG((" set dfs_ap_move successfull"));
+ } else {
+ err = BCME_USAGE_ERROR;
+ }
+ }
+ }
+ return err;
+}
+
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+ const struct cfg80211_acl_data *acl)
+{
+ int i;
+ int ret = 0;
+ int macnum = 0;
+ int macmode = MACLIST_MODE_DISABLED;
+ struct maclist *list;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(cfgdev);
+
+ /* get the MAC filter mode */
+ if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+ macmode = MACLIST_MODE_ALLOW;
+ } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+ acl->n_acl_entries) {
+ macmode = MACLIST_MODE_DENY;
+ }
+
+ /* if acl == NULL, macmode is still disabled.. */
+ if (macmode == MACLIST_MODE_DISABLED) {
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+ WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list"
+ " failed error=%d\n", ret));
+
+ return ret;
+ }
+
+ macnum = acl->n_acl_entries;
+ if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+ WL_ERR(("wl_cfg80211_set_mac_acl: invalid number of MAC address entries %d\n",
+ macnum));
+ return -1;
+ }
+
+ /* allocate memory for the MAC list */
+ list = (struct maclist *)MALLOC(cfg->osh, sizeof(int) +
+ sizeof(struct ether_addr) * macnum);
+ if (!list) {
+ WL_ERR(("wl_cfg80211_set_mac_acl: failed to allocate memory\n"));
+ return -1;
+ }
+
+ /* prepare the MAC list */
+ list->count = htod32(macnum);
+ for (i = 0; i < macnum; i++) {
+ memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+ }
+ /* set the list */
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+ WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list failed error=%d\n", ret));
+
+ MFREE(cfg->osh, list, sizeof(int) +
+ sizeof(struct ether_addr) * macnum);
+
+ return ret;
+}
+#endif /* WL_CFG80211_ACL */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+int wl_chspec_chandef(chanspec_t chanspec,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ struct cfg80211_chan_def *chandef,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ struct chan_info *chaninfo,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */
+ struct wiphy *wiphy)
+{
+ uint16 freq = 0;
+ int chan_type = 0;
+ int channel = 0;
+ struct ieee80211_channel *chan;
+
+ if (!chandef) {
+ return -1;
+ }
+ channel = CHSPEC_CHANNEL(chanspec);
+
+ switch (CHSPEC_BW(chanspec)) {
+ case WL_CHANSPEC_BW_20:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+ case WL_CHANSPEC_BW_40:
+ {
+ if (CHSPEC_SB_UPPER(chanspec)) {
+ channel += CH_10MHZ_APART;
+ } else {
+ channel -= CH_10MHZ_APART;
+ }
+ }
+ chan_type = NL80211_CHAN_HT40PLUS;
+ break;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ case WL_CHANSPEC_BW_80:
+ case WL_CHANSPEC_BW_8080:
+ {
+ uint16 sb = CHSPEC_CTL_SB(chanspec);
+
+ if (sb == WL_CHANSPEC_CTL_SB_LL) {
+ channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+ } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+ channel -= CH_10MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+ channel += CH_10MHZ_APART;
+ } else {
+ /* WL_CHANSPEC_CTL_SB_UU */
+ channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+ }
+
+ if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU)
+ chan_type = NL80211_CHAN_HT40MINUS;
+ else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU)
+ chan_type = NL80211_CHAN_HT40PLUS;
+ }
+ break;
+ case WL_CHANSPEC_BW_160:
+ channel = wf_chspec_primary20_chan(chanspec);
+ /* Using base chan_type as kernel does not define chan_type for 160 MHz */
+ chan_type = NL80211_CHAN_HT20;
+ break;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ default:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+
+ }
+ freq = wl_channel_to_frequency(channel, CHSPEC_BAND(chanspec));
+ chan = ieee80211_get_channel(wiphy, freq);
+ WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n",
+ channel, freq, chan_type, chan));
+ if (unlikely(!chan)) {
+ /* fw and cfg80211 channel lists are not in sync */
+ WL_ERR(("Couldn't find matching channel in wiphy channel list \n"));
+ ASSERT(0);
+ return -EINVAL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ cfg80211_chandef_create(chandef, chan, chan_type);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ chaninfo->freq = freq;
+ chaninfo->chan_type = chan_type;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ return 0;
+}
+
+void
+wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy)
+{
+ u32 freq;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ struct cfg80211_chan_def chandef;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ struct chan_info chaninfo;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+
+ if (!wiphy) {
+ WL_ERR(("wiphy is null\n"));
+ return;
+ }
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION (3, 18, 0))
+ /* Channel switch support is only for AP/GO/ADHOC/MESH */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION ||
+ dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ WL_ERR(("No channel switch notify support for STA/GC\n"));
+ return;
+ }
+#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION (3, 18, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ if (wl_chspec_chandef(chanspec, &chandef, wiphy))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ if (wl_chspec_chandef(chanspec, &chaninfo, wiphy))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ {
+ WL_ERR(("chspec_chandef failed\n"));
+ return;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1;
+ cfg80211_ch_switch_notify(dev, &chandef);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ freq = chan_info.freq;
+ cfg80211_ch_switch_notify(dev, freq, chan_info.chan_type);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+
+ WL_MSG(dev->name, "Channel switch notification for freq: %d chanspec: 0x%x\n",
+ freq, chanspec);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_fw_reinit_incsa(dev);
+#endif
+ return;
+}
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+
+static void
+wl_ap_channel_ind(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev,
+ chanspec_t chanspec)
+{
+ u32 channel = LCHSPEC_CHANNEL(chanspec);
+
+ WL_INFORM_MEM(("(%s) AP channel:%d chspec:0x%x \n",
+ ndev->name, channel, chanspec));
+
+#ifdef SUPPORT_AP_BWCTRL
+ wl_update_apchan_bwcap(cfg, ndev, chanspec);
+#endif /* SUPPORT_AP_BWCTRL */
+
+ if (!(cfg->ap_oper_channel == INVCHANSPEC) && (cfg->ap_oper_channel != chanspec)) {
+ /*
+ * If cached channel is different from the channel indicated
+ * by the event, notify user space about the channel switch.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ cfg->ap_oper_channel = chanspec;
+ }
+}
+
+s32
+wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ chanspec_t chanspec;
+
+ WL_DBG(("Enter\n"));
+ if (unlikely(e->status)) {
+ WL_ERR(("status:0x%x \n", e->status));
+ return -1;
+ }
+
+ if (!data) {
+ return -EINVAL;
+ }
+
+ if (likely(cfgdev)) {
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ chanspec = *((chanspec_t *)data);
+
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ /* For AP/GO role */
+ wl_ap_channel_ind(cfg, ndev, chanspec);
+ }
+ }
+
+ return 0;
+}
+
+s32
+wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
+{
+ int error = 0;
+ u32 chanspec = 0;
+ struct net_device *ndev = NULL;
+ struct ether_addr bssid;
+
+ WL_DBG(("Enter\n"));
+ if (unlikely(e->status)) {
+ WL_ERR(("status:0x%x \n", e->status));
+ return -1;
+ }
+
+ if (likely(cfgdev)) {
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ /* Get association state if not AP and then query chanspec */
+ if (!((wl_get_mode_by_netdev(cfg, ndev)) == WL_MODE_AP)) {
+ error = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (error) {
+ WL_ERR(("CSA on %s. Not associated. error=%d\n",
+ ndev->name, error));
+ return BCME_ERROR;
+ }
+ }
+
+ error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
+ if (unlikely(error)) {
+ WL_ERR(("Get chanspec error: %d \n", error));
+ return -1;
+ }
+
+ WL_INFORM_MEM(("[%s] CSA ind. ch:0x%x\n", ndev->name, chanspec));
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ /* For AP/GO role */
+ wl_ap_channel_ind(cfg, ndev, chanspec);
+ } else {
+ /* STA/GC roles */
+ if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ WL_ERR(("CSA on %s. Not associated.\n", ndev->name));
+ return BCME_ERROR;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ }
+
+ }
+
+ return 0;
+}
+
+#ifdef WLTDLS
+s32
+wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg, enum wl_tdls_config state, bool auto_mode)
+{
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ int err = 0;
+ struct net_info *iter, *next;
+ int update_reqd = 0;
+ int enable = 0;
+ dhd_pub_t *dhdp;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ /*
+ * TDLS need to be enabled only if we have a single STA/GC
+ * connection.
+ */
+
+ WL_DBG(("Enter state:%d\n", state));
+ if (!cfg->tdls_supported) {
+ /* FW doesn't support tdls. Do nothing */
+ return -ENODEV;
+ }
+
+ /* Protect tdls config session */
+ mutex_lock(&cfg->tdls_sync);
+
+ if (state == TDLS_STATE_TEARDOWN) {
+ /* Host initiated TDLS tear down */
+ err = dhd_tdls_enable(ndev, false, auto_mode, NULL);
+ goto exit;
+ } else if ((state == TDLS_STATE_AP_CREATE) ||
+ (state == TDLS_STATE_NMI_CREATE)) {
+ /* We don't support tdls while AP/GO/NAN is operational */
+ update_reqd = true;
+ enable = false;
+ } else if ((state == TDLS_STATE_CONNECT) || (state == TDLS_STATE_IF_CREATE)) {
+ if (wl_get_drv_status_all(cfg,
+ CONNECTED) >= TDLS_MAX_IFACE_FOR_ENABLE) {
+ /* For STA/GC connect command request, disable
+ * tdls if we have any concurrent interfaces
+ * operational.
+ */
+ WL_DBG(("Interface limit restriction. disable tdls.\n"));
+ update_reqd = true;
+ enable = false;
+ }
+ } else if ((state == TDLS_STATE_DISCONNECT) ||
+ (state == TDLS_STATE_AP_DELETE) ||
+ (state == TDLS_STATE_SETUP) ||
+ (state == TDLS_STATE_IF_DELETE)) {
+ /* Enable back the tdls connection only if we have less than
+ * or equal to a single STA/GC connection.
+ */
+ if (wl_get_drv_status_all(cfg,
+ CONNECTED) == 0) {
+ /* If there are no interfaces connected, enable tdls */
+ update_reqd = true;
+ enable = true;
+ } else if (wl_get_drv_status_all(cfg,
+ CONNECTED) == TDLS_MAX_IFACE_FOR_ENABLE) {
+ /* We have one interface in CONNECTED state.
+ * Verify whether its a STA interface before
+ * we enable back tdls.
+ */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if ((iter->ndev) && (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
+ (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)) {
+ WL_DBG(("Non STA iface operational. cfg_iftype:%d"
+ " Can't enable tdls.\n",
+ ndev->ieee80211_ptr->iftype));
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ }
+ /* No AP/GO found. Enable back tdls */
+ update_reqd = true;
+ enable = true;
+ } else {
+ WL_DBG(("Concurrent connection mode. Can't enable tdls. \n"));
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ } else {
+ WL_ERR(("Unknown tdls state:%d \n", state));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (update_reqd == true) {
+ if (dhdp->tdls_enable == enable) {
+ WL_DBG(("No change in tdls state. Do nothing."
+ " tdls_enable:%d\n", enable));
+ goto exit;
+ }
+ err = wldev_iovar_setint(ndev, "tdls_enable", enable);
+ if (unlikely(err)) {
+ WL_ERR(("tdls_enable setting failed. err:%d\n", err));
+ goto exit;
+ } else {
+ WL_INFORM_MEM(("tdls_enable %d state:%d\n", enable, state));
+ /* Update the dhd state variable to be in sync */
+ dhdp->tdls_enable = enable;
+ if (state == TDLS_STATE_SETUP) {
+ /* For host initiated setup, apply TDLS params
+ * Don't propagate errors up for param config
+ * failures
+ */
+ dhd_tdls_enable(ndev, true, auto_mode, NULL);
+
+ }
+ }
+ } else {
+ WL_DBG(("Skip tdls config. state:%d update_reqd:%d "
+ "current_status:%d \n",
+ state, update_reqd, dhdp->tdls_enable));
+ }
+
+exit:
+ if (err) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ mutex_unlock(&cfg->tdls_sync);
+ return err;
+}
+#endif /* WLTDLS */
+
+struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname)
+{
+ struct net_info *iter, *next;
+ struct net_device *ndev = NULL;
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
+ if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ ndev = iter->ndev;
+ break;
+ }
+ }
+ }
+ }
+
+ return ndev;
+}
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+#define WLC_RATE_FLAG 0x80
+#define RATE_MASK 0x7f
+
+int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ wl_rateset_args_t rs;
+ int error = BCME_ERROR, i;
+ struct net_device *ndev = NULL;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (dhdp && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ bzero(&rs, sizeof(wl_rateset_args_t));
+ error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+ &rs, sizeof(wl_rateset_args_t), NULL);
+ if (error < 0) {
+ WL_ERR(("get rateset failed = %d\n", error));
+ return error;
+ }
+
+ if (rs.count < 1) {
+ WL_ERR(("Failed to get rate count\n"));
+ return BCME_ERROR;
+ }
+
+ /* Host delivers target rate in the unit of 500kbps */
+ /* To make it to 1mbps unit, atof should be implemented for 5.5mbps basic rate */
+ for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+ if (rs.rates[i] & WLC_RATE_FLAG)
+ if ((rs.rates[i] & RATE_MASK) == val)
+ break;
+
+ /* Valid rate has been delivered as an argument */
+ if (i < rs.count && i < WL_NUMRATES) {
+ error = wldev_iovar_setint(ndev, "force_bcn_rspec", val);
+ if (error < 0) {
+ WL_ERR(("set beacon rate failed = %d\n", error));
+ return BCME_ERROR;
+ }
+ } else {
+ WL_ERR(("Rate is invalid"));
+ return BCME_BADARG;
+ }
+
+ return BCME_OK;
+}
+
+int
+wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ wl_rateset_args_t rs;
+ int error = BCME_ERROR;
+ int i, bytes_written = 0;
+ struct net_device *ndev = NULL;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ bzero(&rs, sizeof(wl_rateset_args_t));
+ error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+ &rs, sizeof(wl_rateset_args_t), NULL);
+ if (error < 0) {
+ WL_ERR(("get rateset failed = %d\n", error));
+ return error;
+ }
+
+ if (rs.count < 1) {
+ WL_ERR(("Failed to get rate count\n"));
+ return BCME_ERROR;
+ }
+
+ /* Delivers basic rate in the unit of 500kbps to host */
+ for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+ if (rs.rates[i] & WLC_RATE_FLAG)
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%d ", rs.rates[i] & RATE_MASK);
+
+ /* Remove last space in the command buffer */
+ if (bytes_written && (bytes_written < total_len)) {
+ command[bytes_written - 1] = '\0';
+ bytes_written--;
+ }
+
+ return bytes_written;
+
+}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define MSEC_PER_MIN (60000L)
+
+static int
+_wl_update_ap_rps_params(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ rpsnoa_iovar_params_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+
+ if (!dev)
+ return BCME_BADARG;
+
+ cfg = wl_get_cfg(dev);
+
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.param->band = WLC_BAND_ALL;
+ iovar.param->level = cfg->ap_rps_info.level;
+ iovar.param->stas_assoc_check = cfg->ap_rps_info.sta_assoc_check;
+ iovar.param->pps = cfg->ap_rps_info.pps;
+ iovar.param->quiet_time = cfg->ap_rps_info.quiet_time;
+
+ if (wldev_iovar_setbuf(dev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL)) {
+ WL_ERR(("Failed to set rpsnoa params"));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+int
+wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ int error = BCME_ERROR;
+ int bytes_written = 0;
+ struct net_device *ndev = NULL;
+ rpsnoa_iovar_status_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ u32 chanspec = 0;
+ u8 idx = 0;
+ u16 state;
+ u32 sleep;
+ u32 time_since_enable;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ error = BCME_NOTUP;
+ goto fail;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ error = BCME_NOTAP;
+ goto fail;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ error = BCME_NOTAP;
+ goto fail;
+ }
+
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.stats->band = WLC_BAND_ALL;
+
+ error = wldev_iovar_getbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0) {
+ WL_ERR(("get ap radio pwrsave failed = %d\n", error));
+ goto fail;
+ }
+
+ /* RSDB event doesn't seem to be handled correctly.
+ * So check chanspec of AP directly from the firmware
+ */
+ error = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (error < 0) {
+ WL_ERR(("get chanspec from AP failed = %d\n", error));
+ goto fail;
+ }
+
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ if (CHSPEC_IS2G(chanspec))
+ idx = 0;
+ else if (
+#ifdef WL_6G_BAND
+ CHSPEC_IS6G(chanspec) ||
+#endif /* WL_6G_BAND */
+ CHSPEC_IS5G(chanspec))
+ idx = 1;
+ else {
+ error = BCME_BADCHAN;
+ goto fail;
+ }
+
+ state = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].state;
+ sleep = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_dur;
+ time_since_enable = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_avail_dur;
+
+ /* Conver ms to minute, round down only */
+ sleep = DIV_U64_BY_U32(sleep, MSEC_PER_MIN);
+ time_since_enable = DIV_U64_BY_U32(time_since_enable, MSEC_PER_MIN);
+
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "state=%d sleep=%d time_since_enable=%d", state, sleep, time_since_enable);
+ error = bytes_written;
+
+fail:
+ return error;
+}
+
+int
+wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ rpsnoa_iovar_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ int ret = BCME_OK;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ ret = BCME_NOTUP;
+ goto exit;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ ret = BCME_NOTAP;
+ goto exit;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ ret = BCME_NOTAP;
+ goto exit;
+ }
+
+ if (cfg->ap_rps_info.enable != enable) {
+ cfg->ap_rps_info.enable = enable;
+ if (enable) {
+ ret = _wl_update_ap_rps_params(ndev);
+ if (ret) {
+ WL_ERR(("Filed to update rpsnoa params\n"));
+ goto exit;
+ }
+ }
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.data->band = WLC_BAND_ALL;
+ iovar.data->value = (int16)enable;
+
+ ret = wldev_iovar_setbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL);
+ if (ret) {
+ WL_ERR(("Failed to enable AP radio power save"));
+ goto exit;
+ }
+ cfg->ap_rps_info.enable = enable;
+ }
+exit:
+ return ret;
+}
+
+int
+wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp)
+ return BCME_NOTUP;
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ if (!rps)
+ return BCME_BADARG;
+
+ if (rps->pps < RADIO_PWRSAVE_PPS_MIN)
+ return BCME_BADARG;
+
+ if (rps->level < RADIO_PWRSAVE_LEVEL_MIN ||
+ rps->level > RADIO_PWRSAVE_LEVEL_MAX)
+ return BCME_BADARG;
+
+ if (rps->quiet_time < RADIO_PWRSAVE_QUIETTIME_MIN)
+ return BCME_BADARG;
+
+ if (rps->sta_assoc_check > RADIO_PWRSAVE_ASSOCCHECK_MAX ||
+ rps->sta_assoc_check < RADIO_PWRSAVE_ASSOCCHECK_MIN)
+ return BCME_BADARG;
+
+ cfg->ap_rps_info.pps = rps->pps;
+ cfg->ap_rps_info.level = rps->level;
+ cfg->ap_rps_info.quiet_time = rps->quiet_time;
+ cfg->ap_rps_info.sta_assoc_check = rps->sta_assoc_check;
+
+ if (cfg->ap_rps_info.enable) {
+ if (_wl_update_ap_rps_params(ndev)) {
+ WL_ERR(("Failed to update rpsnoa params"));
+ return BCME_ERROR;
+ }
+ }
+
+ return BCME_OK;
+}
+
+void
+wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg)
+{
+ cfg->ap_rps_info.enable = FALSE;
+ cfg->ap_rps_info.sta_assoc_check = RADIO_PWRSAVE_STAS_ASSOC_CHECK;
+ cfg->ap_rps_info.pps = RADIO_PWRSAVE_PPS;
+ cfg->ap_rps_info.quiet_time = RADIO_PWRSAVE_QUIET_TIME;
+ cfg->ap_rps_info.level = RADIO_PWRSAVE_LEVEL;
+}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+int
+wl_cfg80211_iface_count(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_info *iter, *next;
+ int iface_count = 0;
+
+ /* Return the count of network interfaces (skip netless p2p discovery
+ * interface)
+ */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ iface_count++;
+ }
+ }
+ return iface_count;
+}
+
+typedef struct {
+ uint16 id;
+ uint16 len;
+ uint32 val;
+} he_xtlv_v32;
+
+static bool
+wl_he_get_uint_cb(void *ctx, uint16 *id, uint16 *len)
+{
+ he_xtlv_v32 *v32 = ctx;
+
+ *id = v32->id;
+ *len = v32->len;
+
+ return FALSE;
+}
+
+ static void
+wl_he_pack_uint_cb(void *ctx, uint16 id, uint16 len, uint8 *buf)
+{
+ he_xtlv_v32 *v32 = ctx;
+
+ BCM_REFERENCE(id);
+ BCM_REFERENCE(len);
+
+ v32->val = htod32(v32->val);
+
+ switch (v32->len) {
+ case sizeof(uint8):
+ *buf = (uint8)v32->val;
+ break;
+ case sizeof(uint16):
+ store16_ua(buf, (uint16)v32->val);
+ break;
+ case sizeof(uint32):
+ store32_ua(buf, v32->val);
+ break;
+ default:
+ /* ASSERT(0); */
+ break;
+ }
+}
+
+int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ s32 bssidx, u32 he_flag, bool set)
+{
+ bcm_xtlv_t read_he_xtlv;
+ uint8 se_he_xtlv[32];
+ int se_he_xtlv_len = sizeof(se_he_xtlv);
+ he_xtlv_v32 v32;
+ u32 he_feature = 0;
+ s32 err = 0;
+
+ read_he_xtlv.id = WL_HE_CMD_FEATURES;
+ read_he_xtlv.len = 0;
+ err = wldev_iovar_getbuf_bsscfg(dev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL);
+ if (err < 0) {
+ WL_ERR(("HE get failed. error=%d\n", err));
+ return err;
+ } else {
+ he_feature = *(int*)cfg->ioctl_buf;
+ he_feature = dtoh32(he_feature);
+ }
+
+ v32.id = WL_HE_CMD_FEATURES;
+ v32.len = sizeof(s32);
+
+ if (set) {
+ v32.val = (he_feature | he_flag);
+ } else {
+ v32.val = (he_feature & ~he_flag);
+ }
+
+ err = bcm_pack_xtlv_buf((void *)&v32, se_he_xtlv, sizeof(se_he_xtlv),
+ BCM_XTLV_OPTION_ALIGN32, wl_he_get_uint_cb, wl_he_pack_uint_cb,
+ &se_he_xtlv_len);
+ if (err != BCME_OK) {
+ WL_ERR(("failed to pack he settvl=%d\n", err));
+ }
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "he", &se_he_xtlv, sizeof(se_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+ WL_INFORM(("Set HE[%d] done\n", set));
+
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+int
+wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ s32 err = BCME_OK;
+ u32 bw = WL_CHANSPEC_BW_20;
+ chanspec_t chspec = 0;
+ wl_chan_switch_t csa_arg;
+ struct cfg80211_chan_def *chandef = &params->chandef;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+
+ dev = ndev_to_wlc_ndev(dev, cfg);
+ chspec = wl_freq_to_chanspec(chandef->chan->center_freq);
+
+ WL_ERR(("netdev_ifidx(%d), target channel(%d) target bandwidth(%d),"
+ " mode(%d), count(%d)\n", dev->ifindex, CHSPEC_CHANNEL(chspec), chandef->width,
+ params->block_tx, params->count));
+
+ if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) {
+ WL_ERR(("Channel Switch doesn't support on "
+ "the non-SoftAP mode\n"));
+ return -EINVAL;
+ }
+
+ /* Check if STA is trying to associate with an AP */
+ if (wl_get_drv_status(cfg, CONNECTING, primary_dev)) {
+ WL_ERR(("Connecting is in progress\n"));
+ return BCME_BUSY;
+ }
+
+ if (chspec == cfg->ap_oper_channel) {
+ WL_ERR(("Channel %d is same as current operating channel,"
+ " so skip\n", CHSPEC_CHANNEL(chspec)));
+ return BCME_OK;
+ }
+
+ if (
+#ifdef WL_6G_BAND
+ CHSPEC_IS6G(chspec) ||
+#endif
+ CHSPEC_IS5G(chspec)) {
+#ifdef APSTA_RESTRICTED_CHANNEL
+ if (CHSPEC_CHANNEL(chspec) != DEFAULT_5G_SOFTAP_CHANNEL) {
+ WL_ERR(("Invalid 5G Channel, chan=%d\n", CHSPEC_CHANNEL(chspec)));
+ return -EINVAL;
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+ err = wl_get_bandwidth_cap(primary_dev, CHSPEC_BAND(chspec), &bw);
+ if (err < 0) {
+ WL_ERR(("Failed to get bandwidth information,"
+ " err=%d\n", err));
+ return err;
+ }
+ } else if (CHSPEC_IS2G(chspec)) {
+#ifdef BCMDONGLEHOST
+#ifdef APSTA_RESTRICTED_CHANNEL
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ chanspec_t *sta_chanspec = (chanspec_t *)wl_read_prof(cfg,
+ primary_dev, WL_PROF_CHAN);
+
+ /* In 2GHz STA/SoftAP concurrent mode, the operating channel
+ * of STA and SoftAP should be confgiured to the same 2GHz
+ * channel. Otherwise, it is an invalid configuration.
+ */
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
+ wl_get_drv_status(cfg, CONNECTED, primary_dev) &&
+ sta_chanspec && (CHSPEC_CHANNEL(*sta_chanspec) != CHSPEC_CHANNEL(chspec))) {
+ WL_ERR(("Invalid 2G Channel in case of STA/SoftAP"
+ " concurrent mode, sta_chan=%d, chan=%d\n",
+ CHSPEC_CHANNEL(*sta_chanspec), CHSPEC_CHANNEL(chspec)));
+ return -EINVAL;
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+#endif /* BCMDONGLEHOST */
+ bw = WL_CHANSPEC_BW_20;
+ } else {
+ WL_ERR(("invalid band (%d)\n", CHSPEC_BAND(chspec)));
+ return -EINVAL;
+ }
+
+#ifdef WL_6G_BAND
+ /* Avoid in case of 6G as for each center frequency bw is unique and is
+ * detected based on centre frequency.
+ */
+ if (!CHSPEC_IS6G(chspec))
+#endif /* WL_6G_BAND */
+ {
+ chspec = wf_channel2chspec(CHSPEC_CHANNEL(chspec), bw);
+ }
+ if (!wf_chspec_valid(chspec)) {
+ WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+ return -EINVAL;
+ }
+
+ /* Send CSA to associated STAs */
+ memset(&csa_arg, 0, sizeof(wl_chan_switch_t));
+ csa_arg.mode = params->block_tx;
+ csa_arg.count = params->count;
+ csa_arg.chspec = chspec;
+ csa_arg.frame_type = CSA_BROADCAST_ACTION_FRAME;
+ csa_arg.reg = 0;
+
+ err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(wl_chan_switch_t),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("Failed to switch channel, err=%d\n", err));
+ }
+
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+
+#ifdef SUPPORT_AP_SUSPEND
+void
+wl_set_ap_suspend_error_handler(struct net_device *ndev, bool suspend)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (wl_get_drv_status(cfg, READY, ndev)) {
+#if defined(BCMDONGLEHOST)
+ /* IF dongle is down due to previous hang or other conditions, sending
+ * one more hang notification is not needed.
+ */
+ if (dhd_query_bus_erros(dhdp)) {
+ return;
+ }
+ dhdp->iface_op_failed = TRUE;
+#if defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* BCMDONGLEHOST */
+
+#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ WL_ERR(("Notify hang event to upper layer \n"));
+ dhdp->hang_reason = suspend ?
+ HANG_REASON_BSS_DOWN_FAILURE : HANG_REASON_BSS_UP_FAILURE;
+ net_os_send_hang_message(ndev);
+#endif /* BCMDONGLEHOST && OEM_ANDROID */
+
+ }
+}
+
+#define MAX_AP_RESUME_TIME 5000
+int
+wl_set_ap_suspend(struct net_device *dev, bool suspend, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ int ret = BCME_OK;
+ bool is_bssup = FALSE;
+ int bssidx;
+ unsigned long start_j;
+ int time_to_sleep = MAX_AP_RESUME_TIME;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ return BCME_NOTUP;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", ndev->ieee80211_ptr));
+ return BCME_NOTFOUND;
+ }
+
+ is_bssup = wl_cfg80211_bss_isup(ndev, bssidx);
+ if (is_bssup && suspend) {
+ wl_clr_drv_status(cfg, AP_CREATED, ndev);
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+
+ if ((ret = wl_cfg80211_bss_up(cfg, ndev, bssidx, 0)) < 0) {
+ WL_ERR(("AP suspend error %d, suspend %d\n", ret, suspend));
+ ret = BCME_NOTDOWN;
+ goto exit;
+ }
+ } else if (!is_bssup && !suspend) {
+ /* Abort scan before starting AP again */
+ wl_cfgscan_cancel_scan(cfg);
+
+ if ((ret = wl_cfg80211_bss_up(cfg, ndev, bssidx, 1)) < 0) {
+ WL_ERR(("AP resume error %d, suspend %d\n", ret, suspend));
+ ret = BCME_NOTUP;
+ goto exit;
+ }
+
+ while (TRUE) {
+ start_j = get_jiffies_64();
+ /* Wait for Linkup event to mark successful AP bring up */
+ ret = wait_event_interruptible_timeout(cfg->netif_change_event,
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ msecs_to_jiffies(time_to_sleep));
+ if (ret == -ERESTARTSYS) {
+ WL_ERR(("waitqueue was interrupted by a signal\n"));
+ time_to_sleep -= jiffies_to_msecs(get_jiffies_64() - start_j);
+ if (time_to_sleep <= 0) {
+ WL_ERR(("time to sleep hits 0\n"));
+ ret = BCME_NOTUP;
+ goto exit;
+ }
+ } else if (ret == 0 || !wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ WL_ERR(("AP resume failed!\n"));
+ ret = BCME_NOTUP;
+ goto exit;
+ } else {
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+ wl_clr_drv_status(cfg, AP_CREATING, ndev);
+ ret = BCME_OK;
+ break;
+ }
+ }
+ } else {
+ /* bssup + resume or bssdown + suspend,
+ * So, returns OK
+ */
+ ret = BCME_OK;
+ }
+exit:
+ if (ret != BCME_OK)
+ wl_set_ap_suspend_error_handler(bcmcfg_to_prmry_ndev(cfg), suspend);
+
+ return ret;
+}
+#endif /* SUPPORT_AP_SUSPEND */
+
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+int wl_set_softap_elna_bypass(struct net_device *dev, char *ifname, int enable)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
+
+ memset(iobuf, 0, WLC_IOCTL_SMLEN);
+
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("%s: Could not find net_device for ifname:%s\n", __FUNCTION__, ifname));
+ err = BCME_BADARG;
+ goto fail;
+ }
+
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP) {
+ err = wldev_iovar_setint(ifdev, "softap_elnabypass", enable);
+ if (unlikely(err)) {
+ WL_ERR(("%s: Failed to set softap_elnabypass, err=%d\n",
+ __FUNCTION__, err));
+ }
+ } else {
+ WL_ERR(("%s: softap_elnabypass should control in SoftAP mode only\n",
+ __FUNCTION__));
+ err = BCME_BADARG;
+ }
+fail:
+ return err;
+}
+int wl_get_softap_elna_bypass(struct net_device *dev, char *ifname, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int *enable = (int*)param;
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
+
+ memset(iobuf, 0, WLC_IOCTL_SMLEN);
+
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("%s: Could not find net_device for ifname:%s\n", __FUNCTION__, ifname));
+ err = BCME_BADARG;
+ goto fail;
+ }
+
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP) {
+ err = wldev_iovar_getint(ifdev, "softap_elnabypass", enable);
+ if (unlikely(err)) {
+ WL_ERR(("%s: Failed to get softap_elnabypass, err=%d\n",
+ __FUNCTION__, err));
+ }
+ } else {
+ WL_ERR(("%s: softap_elnabypass should control in SoftAP mode only\n",
+ __FUNCTION__));
+ err = BCME_BADARG;
+ }
+fail:
+ return err;
+
+}
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+
+#ifdef SUPPORT_AP_BWCTRL
+#define OPER_MODE_ENABLE (1 << 8)
+static int op2bw[] = {20, 40, 80, 160};
+
+static int
+wl_get_ap_he_mode(struct net_device *ndev, struct bcm_cfg80211 *cfg, bool *he)
+{
+ bcm_xtlv_t read_he_xtlv;
+ int ret = 0;
+ u8 he_enab = 0;
+ u32 he_feature = 0;
+ *he = FALSE;
+
+ /* Check he enab first */
+ read_he_xtlv.id = WL_HE_CMD_ENAB;
+ read_he_xtlv.len = 0;
+
+ ret = wldev_iovar_getbuf(ndev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* HE not supported */
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("HE ENAB get failed. ret=%d\n", ret));
+ }
+ goto exit;
+ } else {
+ he_enab = *(u8*)cfg->ioctl_buf;
+ }
+
+ if (!he_enab) {
+ goto exit;
+ }
+
+ /* Then check BIT3 of he features */
+ read_he_xtlv.id = WL_HE_CMD_FEATURES;
+ read_he_xtlv.len = 0;
+
+ ret = wldev_iovar_getbuf(ndev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ WL_ERR(("HE FEATURE get failed. error=%d\n", ret));
+ goto exit;
+ } else {
+ he_feature = *(int*)cfg->ioctl_buf;
+ he_feature = dtoh32(he_feature);
+ }
+
+ if (he_feature & WL_HE_FEATURES_HE_AP) {
+ WL_DBG(("HE is enabled in AP\n"));
+ *he = TRUE;
+ }
+exit:
+ return ret;
+}
+
+static void
+wl_update_apchan_bwcap(struct bcm_cfg80211 *cfg, struct net_device *ndev, chanspec_t chanspec)
+{
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ int ret = BCME_OK;
+ u32 bw_cap;
+ u32 ctl_chan;
+ chanspec_t chanbw = WL_CHANSPEC_BW_20;
+
+ /* Update channel in profile */
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ wl_update_prof(cfg, ndev, NULL, &chanspec, WL_PROF_CHAN);
+
+ /* BW cap is only updated in 5GHz */
+ if (ctl_chan <= CH_MAX_2G_CHANNEL)
+ return;
+
+ /* Get WL BW CAP */
+ ret = wl_get_bandwidth_cap(bcmcfg_to_prmry_ndev(cfg),
+ CHSPEC_BAND(chanspec), &bw_cap);
+ if (ret < 0) {
+ WL_ERR(("get bw_cap failed = %d\n", ret));
+ goto exit;
+ }
+
+ chanbw = CHSPEC_BW(wl_channel_to_chanspec(wiphy,
+ ndev, wf_chspec_ctlchan(chanspec), bw_cap));
+
+exit:
+ cfg->bw_cap_5g = bw2cap[chanbw >> WL_CHANSPEC_BW_SHIFT];
+ WL_INFORM_MEM(("supported bw cap is:0x%x\n", cfg->bw_cap_5g));
+
+}
+
+int
+wl_rxchain_to_opmode_nss(int rxchain)
+{
+ /*
+ * Nss 1 -> 0, Nss 2 -> 1
+ * This is from operating mode field
+ * in 8.4.1.50 of 802.11ac-2013
+ */
+ /* TODO : Nss 3 ? */
+ if (rxchain == 3)
+ return (1 << 4);
+ else
+ return 0;
+}
+
+int
+wl_update_opmode(struct net_device *ndev, u32 bw)
+{
+ int ret = BCME_OK;
+ int oper_mode;
+ int rxchain;
+
+ ret = wldev_iovar_getint(ndev, "rxchain", (s32 *)&rxchain);
+ if (ret < 0) {
+ WL_ERR(("get rxchain failed = %d\n", ret));
+ goto exit;
+ }
+
+ oper_mode = bw;
+ oper_mode |= wl_rxchain_to_opmode_nss(rxchain);
+ /* Enable flag */
+ oper_mode |= OPER_MODE_ENABLE;
+
+ ret = wldev_iovar_setint(ndev, "oper_mode", oper_mode);
+ if (ret < 0) {
+ WL_ERR(("set oper_mode failed = %d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+int
+wl_set_ap_bw(struct net_device *dev, u32 bw, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ int ret = BCME_OK;
+ chanspec_t *chanspec;
+ bool he;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ return BCME_NOTUP;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ if (bw > DOT11_OPER_MODE_160MHZ) {
+ WL_ERR(("BW is too big %d\n", bw));
+ return BCME_BADARG;
+ }
+
+ chanspec = (chanspec_t *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+ if (CHSPEC_IS2G(*chanspec)) {
+ WL_ERR(("current chanspec is %d, not supported\n", *chanspec));
+ ret = BCME_BADCHAN;
+ goto exit;
+ }
+
+ if ((DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
+ wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) ||
+ wl_cfgnan_is_enabled(cfg)) {
+ WL_ERR(("BW control in concurrent mode is not supported\n"));
+ return BCME_BUSY;
+ }
+
+ /* When SCAN is on going either in STA or in AP, return BUSY */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_ERR(("STA is SCANNING, not support BW control\n"));
+ return BCME_BUSY;
+ }
+
+ /* When SCANABORT is on going either in STA or in AP, return BUSY */
+ if (wl_get_drv_status_all(cfg, SCAN_ABORTING)) {
+ WL_ERR(("STA is SCAN_ABORTING, not support BW control\n"));
+ return BCME_BUSY;
+ }
+
+ /* When CONNECTION is on going in STA, return BUSY */
+ if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+ WL_ERR(("STA is CONNECTING, not support BW control\n"));
+ return BCME_BUSY;
+ }
+
+ /* BW control in AX mode needs more verification */
+ ret = wl_get_ap_he_mode(ndev, cfg, &he);
+ if (ret == BCME_OK && he) {
+ WL_ERR(("BW control in HE mode is not supported\n"));
+ return BCME_UNSUPPORTED;
+ }
+ if (ret < 0) {
+ WL_ERR(("Check AX mode is failed\n"));
+ goto exit;
+ }
+
+ if ((!WL_BW_CAP_160MHZ(cfg->bw_cap_5g) && (bw == DOT11_OPER_MODE_160MHZ)) ||
+ (!WL_BW_CAP_80MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_80MHZ)) ||
+ (!WL_BW_CAP_40MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_40MHZ)) ||
+ (!WL_BW_CAP_20MHZ(cfg->bw_cap_5g))) {
+ WL_ERR(("bw_cap %x does not support bw = %d\n", cfg->bw_cap_5g, bw));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ WL_DBG(("Updating AP BW to %d\n", op2bw[bw]));
+
+ ret = wl_update_opmode(ndev, bw);
+ if (ret < 0) {
+ WL_ERR(("opmode set failed = %d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+int
+wl_get_ap_bw(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ int ret = BCME_OK;
+ u32 chanspec = 0;
+ u32 bw = DOT11_OPER_MODE_20MHZ;
+ int bytes_written = 0;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ return BCME_NOTUP;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ ret = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (ret < 0) {
+ WL_ERR(("get chanspec from AP failed = %d\n", ret));
+ goto exit;
+ }
+
+ chanspec = wl_chspec_driver_to_host(chanspec);
+
+ if (CHSPEC_IS20(chanspec)) {
+ bw = DOT11_OPER_MODE_20MHZ;
+ } else if (CHSPEC_IS40(chanspec)) {
+ bw = DOT11_OPER_MODE_40MHZ;
+ } else if (CHSPEC_IS80(chanspec)) {
+ bw = DOT11_OPER_MODE_80MHZ;
+ } else if (CHSPEC_IS_BW_160_WIDE(chanspec)) {
+ bw = DOT11_OPER_MODE_160MHZ;
+ } else {
+ WL_ERR(("chanspec error %x\n", chanspec));
+ ret = BCME_BADCHAN;
+ goto exit;
+ }
+
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "bw=%d", bw);
+ ret = bytes_written;
+exit:
+ return ret;
+}
+
+void
+wl_restore_ap_bw(struct bcm_cfg80211 *cfg)
+{
+ int ret = BCME_OK;
+ u32 bw;
+ bool he = FALSE;
+ struct net_info *iter, *next;
+ struct net_device *ndev = NULL;
+ chanspec_t *chanspec;
+
+ if (!cfg) {
+ return;
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ chanspec = (chanspec_t *)wl_read_prof(cfg, iter->ndev,
+ WL_PROF_CHAN);
+ if (CHSPEC_IS2G(*chanspec)) {
+ ndev = iter->ndev;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!ndev) {
+ return;
+ }
+
+ /* BW control in AX mode not allowed */
+ ret = wl_get_ap_he_mode(bcmcfg_to_prmry_ndev(cfg), cfg, &he);
+ if (ret == BCME_OK && he) {
+ return;
+ }
+ if (ret < 0) {
+ WL_ERR(("Check AX mode is failed\n"));
+ return;
+ }
+
+ if (WL_BW_CAP_160MHZ(cfg->bw_cap_5g)) {
+ bw = DOT11_OPER_MODE_160MHZ;
+ } else if (WL_BW_CAP_80MHZ(cfg->bw_cap_5g)) {
+ bw = DOT11_OPER_MODE_80MHZ;
+ } else if (WL_BW_CAP_40MHZ(cfg->bw_cap_5g)) {
+ bw = DOT11_OPER_MODE_40MHZ;
+ } else {
+ return;
+ }
+
+ WL_DBG(("Restoring AP BW to %d\n", op2bw[bw]));
+
+ ret = wl_update_opmode(ndev, bw);
+ if (ret < 0) {
+ WL_ERR(("bw restore failed = %d\n", ret));
+ return;
+ }
+}
+#endif /* SUPPORT_AP_BWCTRL */
diff --git a/bcmdhd.101.10.361.x/wl_cfgvif.h b/bcmdhd.101.10.361.x/wl_cfgvif.h
new file mode 100755
index 0000000..09f63fb
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_cfgvif.h
@@ -0,0 +1,251 @@
+/*
+ * Wifi Virtual Interface implementaion
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#ifndef _wl_cfgvif_h_
+#define _wl_cfgvif_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+#include <osl.h>
+#if defined(BCMDONGLEHOST)
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* BCMDONGLEHOST */
+#include <wl_cfgp2p.h>
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+#ifdef WL_BAM
+#include <wl_bam.h>
+#endif /* WL_BAM */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define RADIO_PWRSAVE_PPS 10
+#define RADIO_PWRSAVE_QUIET_TIME 10
+#define RADIO_PWRSAVE_LEVEL 3
+#define RADIO_PWRSAVE_STAS_ASSOC_CHECK 0
+
+#define RADIO_PWRSAVE_LEVEL_MIN 1
+#define RADIO_PWRSAVE_LEVEL_MAX 9
+#define RADIO_PWRSAVE_PPS_MIN 1
+#define RADIO_PWRSAVE_QUIETTIME_MIN 1
+#define RADIO_PWRSAVE_ASSOCCHECK_MIN 0
+#define RADIO_PWRSAVE_ASSOCCHECK_MAX 1
+
+#define RADIO_PWRSAVE_MAJOR_VER 1
+#define RADIO_PWRSAVE_MINOR_VER 1
+#define RADIO_PWRSAVE_MAJOR_VER_SHIFT 8
+#define RADIO_PWRSAVE_VERSION \
+ ((RADIO_PWRSAVE_MAJOR_VER << RADIO_PWRSAVE_MAJOR_VER_SHIFT)| RADIO_PWRSAVE_MINOR_VER)
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef WLTDLS
+#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04"
+#define TDLS_TUNNELED_PRB_RESP "\x7f\x50\x6f\x9a\05"
+#define TDLS_MAX_IFACE_FOR_ENABLE 1
+#endif /* WLTDLS */
+
+/* HE flag defines */
+#define WL_HE_FEATURES_HE_AP 0x8
+#define WL_HE_FEATURES_HE_P2P 0x20
+#define WL_HE_FEATURES_6G 0x80u
+
+extern bool wl_cfg80211_check_vif_in_use(struct net_device *ndev);
+
+extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag,
+ const u8 *vndr_ie, u32 vndr_ie_len);
+
+#ifdef WL_SUPPORT_ACS
+#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */
+#define IOCTL_RETRY_COUNT 5
+#define CHAN_NOISE_DUMMY -80
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+#endif /* WL_SUPPORT_ACS */
+
+extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern s32 wl_cfg80211_get_band_chanspecs(struct net_device *ndev,
+ void *buf, s32 buflen, chanspec_band_t band, bool acs_req);
+
+#ifdef WLTDLS
+extern s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
+ enum wl_tdls_config state, bool tdls_mode);
+extern s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname);
+int wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len);
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+int wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len);
+int wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname);
+int wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname);
+void wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg);
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+int wl_cfg80211_iface_count(struct net_device *dev);
+struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname);
+void wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd);
+#ifdef WL_IFACE_MGMT
+extern int wl_cfg80211_set_iface_policy(struct net_device *ndev, char *arg, int len);
+extern uint8 wl_cfg80211_get_iface_policy(struct net_device *ndev);
+extern s32 wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+extern wl_iftype_t wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg);
+#endif /* WL_IFACE_MGMT */
+
+extern s32 wl_get_vif_macaddr(struct bcm_cfg80211 *cfg, u16 wl_iftype, u8 *mac_addr);
+extern s32 wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype);
+
+int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ s32 bssidx, u32 interface_type, bool set);
+#ifdef SUPPORT_AP_SUSPEND
+extern int wl_set_ap_suspend(struct net_device *dev, bool enable, char *ifname);
+#endif /* SUPPORT_AP_SUSPEND */
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+int wl_set_softap_elna_bypass(struct net_device *dev, char *ifname, int enable);
+int wl_get_softap_elna_bypass(struct net_device *dev, char *ifname, void *param);
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+#ifdef SUPPORT_AP_BWCTRL
+extern int wl_set_ap_bw(struct net_device *dev, u32 bw, char *ifname);
+extern int wl_get_ap_bw(struct net_device *dev, char* command, char *ifname, int total_len);
+#endif /* SUPPORT_AP_BWCTRL */
+extern s32 wl_get_nl80211_band(u32 wl_band);
+extern int wl_get_bandwidth_cap(struct net_device *ndev, uint32 band, uint32 *bandwidth);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || \
+ ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)))
+extern s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *buf, size_t len);
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+extern s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *buf, size_t len);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+extern s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator, const u8 *buf, size_t len);
+#else /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+extern s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ const u8 *buf, size_t len);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+extern s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper);
+#else
+extern s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper);
+#endif
+#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+
+extern s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
+ struct vif_params *params);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type);
+#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+extern s32 wl_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *info);
+extern s32 wl_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev);
+extern s32 wl_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *info);
+#else
+extern s32 wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info);
+extern s32 wl_cfg80211_del_beacon(struct wiphy *wiphy, struct net_device *dev);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || WL_COMPAT_WIRELESS */
+
+extern s32 wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_cfg80211_set_ap_role(struct bcm_cfg80211 *cfg, struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+extern int wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+
+extern bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ const char *name,
+#else
+ char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ unsigned char name_assign_type,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
+ struct vif_params *params);
+extern s32 wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+extern s32 wl_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *info);
+
+extern s32 wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
+ const struct ether_addr *da, const struct ether_addr *sa,
+ const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody);
+extern s32 wl_cfg80211_parse_ies(const u8 *ptr, u32 len, struct parsed_ies *ies);
+extern void wl_cfg80211_ap_timeout_work(struct work_struct *work);
+
+#if defined(WLTDLS)
+extern bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len);
+#endif /* WLTDLS */
+
+#ifdef SUPPORT_AP_BWCTRL
+extern void wl_restore_ap_bw(struct bcm_cfg80211 *cfg);
+#endif /* SUPPORT_AP_BWCTRL */
+#endif /* _wl_cfgvif_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_dbg.h b/bcmdhd.101.10.361.x/wl_dbg.h
new file mode 100755
index 0000000..cbdce44
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_dbg.h
@@ -0,0 +1,1544 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+/* XXX, some old "wl msglevel" for PHY module has been moved to phy on 6/10/2009 "wl phymsglevel"
+ * They are spare in TOT and can be reused if needed. see src/wl/phy/wlc_phy_int.h
+ */
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+#include <event_log.h>
+
+/* wl_msg_level is a bit vector with defs in wlioctl.h */
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+extern uint32 wl_msg_level3;
+
+#if defined (BCMDBG) && \
+!defined(BCMDONGLEHOST) && !defined(BCMDBG_EXCLUDE_HW_TIMESTAMP)
+extern char* wlc_dbg_get_hw_timestamp(void);
+
+#define WL_TIMESTAMP() do { if (wl_msg_level2 & WL_TIMESTAMP_VAL) {\
+ printf(wlc_dbg_get_hw_timestamp()); }\
+ } while (0)
+#else
+#define WL_TIMESTAMP()
+#endif /* BCMDBG && !BCMDONGLEHOST && !BCMDBG_EXCLUDE_HW_TIMESTAMP */
+
+#ifdef ENABLE_CORECAPTURE
+#define MAX_BACKTRACE_DEPTH 32
+extern int wl_print_backtrace(const char * prefix, void * i_backtrace, int i_backtrace_depth);
+#else
+#define wl_print_backtrace(a, b, c)
+#endif /* ENABLE_CORECAPTURE */
+
+#define WIFICC_CAPTURE(_reason)
+#define WIFICC_LOGDEBUGIF(_flags, _args)
+#define WIFICC_LOGDEBUG(_args)
+
+#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0)
+
+#ifdef BCM_UPTIME_PROFILE
+#define WL_PROF(args) WL_PRINT(args)
+#else
+#define WL_PROF(args)
+#endif /* BCM_UPTIME_PROFILE */
+
+#if defined(ERR_USE_EVENT_LOG) && defined(EVENT_LOG_COMPILE)
+#define EVENT_LOG_PRSRV_DUMP() EVENT_LOG_PRSRV_FLUSH()
+#else
+#define EVENT_LOG_PRSRV_DUMP()
+#endif /* ERR_USE_EVENT_LOG && EVENT_LOG_COMPILE */
+
+#if defined(BCMDBG)
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ * Ex.
+ * myfn() {
+ * int i;
+ * DBGONLY(int dbg; )
+ */
+#define DBGONLY(x) x
+
+#define WL_WARN(x) WL_ERROR(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args) do {if (wl_msg_level & 0) WL_PRINT(args);} while (0)
+
+#define WL_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args);} while (0)
+
+#define WL_IE_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_AMSDU_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_ASSOC_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_SCAN_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+#define KM_ERR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_WBTEXT_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_MBO_ERR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_RANDMAC_ERR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_TRACE(args) do {if (wl_msg_level & WL_TRACE_VAL) WL_PRINT(args);} while (0)
+
+#define WL_PFN_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+
+#define WL_PRHDRS_MSG(args) do {if (wl_msg_level & WL_PRHDRS_VAL) WL_PRINT(args);} while (0)
+#define WL_PRHDRS(i, p, f, t, r, l) do { \
+ if (wl_msg_level & WL_PRHDRS_VAL) \
+ wlc_print_hdrs(i, p, f, t, r, l); \
+ } while (0)
+#define WL_PRPKT(m, b, n) do {if (wl_msg_level & WL_PRPKT_VAL) prhex(m, b, n);} while (0)
+#define WL_INFORM(args) do {if (wl_msg_level & WL_INFORM_VAL) WL_PRINT(args);} while (0)
+#define WL_TMP(args) do {if (wl_msg_level & WL_TMP_VAL) WL_PRINT(args);} while (0)
+#define WL_OID(args) do {if (wl_msg_level & WL_OID_VAL) WL_PRINT(args);} while (0)
+#define WL_RATE(args) do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC(args) do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args);} while (0)
+#define WL_PRUSR(m, b, n) do {if (wl_msg_level & WL_PRUSR_VAL) prhex(m, b, n);} while (0)
+#define WL_PS(args) do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0)
+#define WL_SPARE1(args) do {if (wl_msg_level & WL_TXPWR_VAL) WL_PRINT(args);} while (0)
+#define WL_PORT(args) do {if (wl_msg_level & WL_PORT_VAL) WL_PRINT(args);} while (0)
+#define WL_DUAL(args) do {if (wl_msg_level & WL_DUAL_VAL) WL_PRINT(args);} while (0)
+#define WL_WSEC(args) do {if (wl_msg_level & WL_WSEC_VAL) WL_PRINT(args);} while (0)
+#define WL_WSEC_DUMP(args) do {if (wl_msg_level & WL_WSEC_DUMP_VAL) WL_PRINT(args);} while (0)
+#define WL_SPARE2(args) do {if (wl_msg_level & WL_NRSSI_VAL) WL_PRINT(args);} while (0)
+#define WL_SPARE3(args) do {if (wl_msg_level & WL_LOFT_VAL) WL_PRINT(args);} while (0)
+#define WL_REGULATORY(args) do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args);} while (0)
+#define WL_SPARE4(args) do {if (wl_msg_level & WL_PHYCAL_VAL) WL_PRINT(args);} while (0)
+#define WL_WDI(args) do {if (wl_msg_level & WL_WDI_VAL) WL_PRINT(args);} while (0)
+#define WL_MPC(args) do {if (wl_msg_level & WL_MPC_VAL) WL_PRINT(args);} while (0)
+#define WL_APSTA(args) do {if (wl_msg_level & WL_APSTA_VAL) WL_PRINT(args);} while (0)
+#define WL_DFS(args) do {if (wl_msg_level & WL_DFS_VAL) WL_PRINT(args);} while (0)
+#define WL_MUMIMO(args) do {if (wl_msg_level & WL_MUMIMO_VAL) WL_PRINT(args);} while (0)
+#define WL_MODE_SWITCH(args) do {if (wl_msg_level & WL_MODE_SWITCH_VAL) WL_PRINT(args);} while (0)
+#define WL_BCNTRIM_DBG(args) do {if (wl_msg_level & WL_BCNTRIM_VAL) WL_PRINT(args);} while (0)
+#define WL_MBSS(args) do {if (wl_msg_level & WL_MBSS_VAL) WL_PRINT(args);} while (0)
+#define WL_CAC(args) do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
+#define WL_AMSDU(args) do {if (wl_msg_level & WL_AMSDU_VAL) WL_PRINT(args);} while (0)
+#define WL_AMPDU(args) do {if (wl_msg_level & WL_AMPDU_VAL) WL_PRINT(args);} while (0)
+#define WL_FFPLD(args) do {if (wl_msg_level & WL_FFPLD_VAL) WL_PRINT(args);} while (0)
+#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0)
+/* wl_msg_level is full. Use wl_msg_level_2 now */
+#define WL_WOWL(args) do {if (wl_msg_level2 & WL_WOWL_VAL) WL_PRINT(args);} while (0)
+#define WL_SCAN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_SCAN_WARN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_COEX(args) do {if (wl_msg_level2 & WL_COEX_VAL) WL_PRINT(args);} while (0)
+#define WL_RTDC(w,s,i,j) do {if (wl_msg_level2 & WL_RTDC_VAL) wlc_log(w,s,i,j);} while (0)
+#define WL_PROTO(args) do {if (wl_msg_level2 & WL_PROTO_VAL) WL_PRINT(args);} while (0)
+#define WL_RTDC2(w,s,i,j) do {if (wl_msg_level2 & 0) wlc_log(w,s,i,j);} while (0)
+#define WL_CHANINT(args) do {if (wl_msg_level2 & WL_CHANINT_VAL) WL_PRINT(args);} while (0)
+#define WL_WMF(args) do {if (wl_msg_level2 & WL_WMF_VAL) WL_PRINT(args);} while (0)
+#define WL_P2P(args) do {if (wl_msg_level2 & WL_P2P_VAL) WL_PRINT(args);} while (0)
+#define WL_ITFR(args) do {if (wl_msg_level2 & WL_ITFR_VAL) WL_PRINT(args);} while (0)
+#define WL_MCHAN(args) do {if (wl_msg_level2 & WL_MCHAN_VAL) WL_PRINT(args);} while (0)
+#define WL_TDLS(args) do {if (wl_msg_level2 & WL_TDLS_VAL) WL_PRINT(args);} while (0)
+#define WL_MCNX(args) do {if (wl_msg_level2 & WL_MCNX_VAL) WL_PRINT(args);} while (0)
+#define WL_PROT(args) do {if (wl_msg_level2 & WL_PROT_VAL) WL_PRINT(args);} while (0)
+#define WL_PSTA(args) do {if (wl_msg_level2 & WL_PSTA_VAL) WL_PRINT(args);} while (0)
+#define WL_TBTT(args) do {if (wl_msg_level2 & WL_TBTT_VAL) WL_PRINT(args);} while (0)
+#define WL_TRF_MGMT(args) do {if (wl_msg_level2 & WL_TRF_MGMT_VAL) WL_PRINT(args);} while (0)
+#define WL_L2FILTER(args) do {if (wl_msg_level2 & WL_L2FILTER_VAL) WL_PRINT(args);} while (0)
+#define WL_TSO(args) do {if (wl_msg_level2 & WL_TSO_VAL) WL_PRINT(args);} while (0)
+#define WL_MQ(args) do {if (wl_msg_level2 & WL_MQ_VAL) WL_PRINT(args);} while (0)
+#define WL_P2PO(args) do {if (wl_msg_level2 & WL_P2PO_VAL) WL_PRINT(args);} while (0)
+#ifdef WLAWDL
+#define WL_AWDL(args) do {if (wl_msg_level2 & WL_AWDL_VAL) WL_PRINT(args);} while (0)
+#endif /* WLAWDL */
+#define WL_WNM(args) do {if (wl_msg_level2 & WL_WNM_VAL) WL_PRINT(args);} while (0)
+#define WL_TXBF(args) do {if (wl_msg_level2 & WL_TXBF_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PMDUR(args) do {if (wl_msg_level2 & WL_PMDUR_VAL) WL_PRINT(args);} while (0)
+#ifdef BCMTSTAMPEDLOGS
+void wlc_bmac_tslog(struct wlc_hw_info *hw, const char *str, uint32 p1, uint32 p2);
+#else
+#define wlc_bmac_tslog(hw, str, p1, p2) do {} while (0)
+#endif
+#define WL_TSLOG(w, s, i, j) \
+ do { \
+ if (wl_msg_level2 & WL_TIMESTAMP_VAL) { \
+ wlc_bmac_tslog(w, s, i, j); \
+ } \
+ } while (0)
+/* not using WL_ROAM for BCMDBG at the moment */
+#define WL_ROAM(args)
+#define WL_PRMAC(args) do {if (wl_msg_level & WL_PRMAC_VAL) WL_PRINT(args);} while (0)
+#define WL_FBT(args) do {if (wl_msg_level2 & WL_FBT_VAL) WL_PRINT(args);} while (0)
+#define WL_MESH(args) do {if (wl_msg_level2 & WL_MESH_VAL) WL_PRINT(args);} while (0)
+#define WL_SWDIV(args) do {if (wl_msg_level2 & WL_SWDIV_VAL) WL_PRINT(args);} while (0)
+#define WL_MBO_DBG(args) do {if (wl_msg_level2 & WL_MBO_VAL) WL_PRINT(args);} while (0)
+#define WL_RANDMAC_INFO(args) do {if (wl_msg_level2 & WL_RANDMAC_VAL) WL_PRINT(args);} while (0)
+#define WL_BAM_ERR(args) do {if (wl_msg_level2 & WL_ERROR_VAL) WL_PRINT(args);} while (0)
+#define WL_ADPS(args) do {if (wl_msg_level2 & WL_ADPS_VAL) WL_PRINT(args);} while (0)
+#define WL_OCE_DBG(args) do {if (wl_msg_level2 & WL_OCE_VAL) WL_PRINT(args);} while (0)
+#define WL_WBTEXT_INFO(args) do {if (wl_msg_level2 & WL_WNM_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC_AP(args) \
+ do { \
+ if (wl_msg_level3 & WL_ASSOC_AP_VAL) { \
+ WL_PRINT(args); \
+ } else { \
+ WIFICC_LOGDEBUG(args); \
+ } \
+ } while (0)
+#define WL_TPA_ERR(args) do {if (wl_msg_level2 & WL_ERROR_VAL) WL_PRINT(args);} while (0)
+#define WL_TPA_INFO(args) do {if (wl_msg_level2 & WL_INFORM_VAL) WL_PRINT(args);} while (0)
+#define WL_LATENCY_INFO(args) do {if (wl_msg_level3 & WL_LATENCY_VAL) WL_PRINT(args);} while (0)
+
+#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
+#define WL_TRACE_ON() (wl_msg_level & WL_TRACE_VAL)
+#define WL_PRHDRS_ON() (wl_msg_level & WL_PRHDRS_VAL)
+#define WL_PRPKT_ON() (wl_msg_level & WL_PRPKT_VAL)
+#define WL_INFORM_ON() (wl_msg_level & WL_INFORM_VAL)
+#define WL_TMP_ON() (wl_msg_level & WL_TMP_VAL)
+#define WL_OID_ON() (wl_msg_level & WL_OID_VAL)
+#define WL_RATE_ON() (wl_msg_level & WL_RATE_VAL)
+#define WL_ASSOC_ON() (wl_msg_level & WL_ASSOC_VAL)
+#define WL_PORT_ON() (wl_msg_level & WL_PORT_VAL)
+#define WL_WSEC_ON() (wl_msg_level & WL_WSEC_VAL)
+#define WL_WSEC_DUMP_ON() (wl_msg_level & WL_WSEC_DUMP_VAL)
+#define WL_MPC_ON() (wl_msg_level & WL_MPC_VAL)
+#define WL_REGULATORY_ON() (wl_msg_level & WL_REGULATORY_VAL)
+#define WL_APSTA_ON() (wl_msg_level & WL_APSTA_VAL)
+#define WL_DFS_ON() (wl_msg_level & WL_DFS_VAL)
+#define WL_MUMIMO_ON() (wl_msg_level & WL_MUMIMO_VAL)
+#define WL_MODE_SWITCH_ON() (wl_msg_level & WL_MODE_SWITCH_VAL)
+#define WL_MBSS_ON() (wl_msg_level & WL_MBSS_VAL)
+#define WL_AMPDU_ON() (wl_msg_level & WL_AMPDU_VAL)
+#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL)
+#define WL_WOWL_ON() (wl_msg_level2 & WL_WOWL_VAL)
+#define WL_SCAN_ON() (wl_msg_level2 & WL_SCAN_VAL)
+#define WL_WMF_ON() (wl_msg_level2 & WL_WMF_VAL)
+#define WL_P2P_ON() (wl_msg_level2 & WL_P2P_VAL)
+#define WL_ITFR_ON() (wl_msg_level2 & WL_ITFR_VAL)
+#define WL_MCHAN_ON() (wl_msg_level2 & WL_MCHAN_VAL)
+#define WL_TDLS_ON() (wl_msg_level2 & WL_TDLS_VAL)
+#define WL_MCNX_ON() (wl_msg_level2 & WL_MCNX_VAL)
+#define WL_PROT_ON() (wl_msg_level2 & WL_PROT_VAL)
+#define WL_PSTA_ON() (wl_msg_level2 & WL_PSTA_VAL)
+#define WL_TBTT_ON() (wl_msg_level2 & WL_TBTT_VAL)
+#define WL_TRF_MGMT_ON() (wl_msg_level2 & WL_TRF_MGMT)
+#define WL_LPC_ON() (wl_msg_level2 & WL_LPC_VAL)
+#define WL_L2FILTER_ON() (wl_msg_level2 & WL_L2FILTER_VAL)
+#define WL_MQ_ON() (wl_msg_level2 & WL_MQ_VAL)
+#define WL_P2PO_ON() (wl_msg_level2 & WL_P2PO_VAL)
+#ifdef WLAWDL
+#define WL_AWDL_ON() (wl_msg_level2 & WL_AWDL_VAL)
+#endif /* WLAWDL */
+#define WL_WNM_ON() (wl_msg_level2 & WL_WNM_VAL)
+#define WL_TXBF_ON() (wl_msg_level2 & WL_TXBF_VAL)
+#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL)
+#define WL_TSLOG_ON() (wl_msg_level2 & WL_TIMESTAMP_VAL)
+#define WL_MESH_ON() (wl_msg_level2 & WL_MESH_VAL)
+#define WL_SWDIV_ON() (wl_msg_level2 & WL_SWDIV_VAL)
+#define WL_MBO_DBG_ON() (wl_msg_level2 & WL_MBO_VAL)
+#define WL_RANDMAC_DBG_ON() (wl_msg_level2 & WL_RANDMAC_VAL)
+#define WL_ADPS_ON() (wl_msg_level2 & WL_ADPS_VAL)
+#define WL_OCE_DBG_ON() (wl_msg_level2 & WL_OCE_VAL)
+#define WL_ASSOC_AP_ON() (wl_msg_level3 & WL_ASSOC_AP_VAL)
+#define WL_FILS_DBG_ON() (wl_msg_level3 & WL_FILS_DBG_VAL)
+
+/* Extra message control for APSTA debugging */
+#define WL_APSTA_UPDN_VAL 0x00000001 /* Config up/down related */
+#define WL_APSTA_BCN_VAL 0x00000002 /* Calls to beacon update */
+#define WL_APSTA_TX_VAL 0x00000004 /* Transmit data path */
+#define WL_APSTA_RX_VAL 0x00000008 /* Receive data path */
+#define WL_APSTA_TSF_VAL 0x00000010 /* TSF-related items */
+#define WL_APSTA_BSSID_VAL 0x00000020 /* Calls to set bssid */
+
+extern uint32 wl_apsta_dbg;
+
+#define WL_APSTA_UPDN(args) do {if (wl_apsta_dbg & WL_APSTA_UPDN_VAL) {WL_APSTA(args);}} while (0)
+#define WL_APSTA_BCN(args) do {if (wl_apsta_dbg & WL_APSTA_BCN_VAL) {WL_APSTA(args);}} while (0)
+#define WL_APSTA_TX(args) do {if (wl_apsta_dbg & WL_APSTA_TX_VAL) {WL_APSTA(args);}} while (0)
+#define WL_APSTA_RX(args) do {if (wl_apsta_dbg & WL_APSTA_RX_VAL) {WL_APSTA(args);}} while (0)
+#define WL_APSTA_TSF(args) do {if (wl_apsta_dbg & WL_APSTA_TSF_VAL) {WL_APSTA(args);}} while (0)
+#define WL_APSTA_BSSID(args) do {if (wl_apsta_dbg & WL_APSTA_BSSID_VAL) {WL_APSTA(args);}} while (0)
+
+/* Extra message control for AMPDU debugging */
+#define WL_AMPDU_UPDN_VAL 0x00000001 /* Config up/down related */
+#define WL_AMPDU_ERR_VAL 0x00000002 /* Calls to beaocn update */
+#define WL_AMPDU_TX_VAL 0x00000004 /* Transmit data path */
+#define WL_AMPDU_RX_VAL 0x00000008 /* Receive data path */
+#define WL_AMPDU_CTL_VAL 0x00000010 /* TSF-related items */
+#define WL_AMPDU_HW_VAL 0x00000020 /* AMPDU_HW */
+#define WL_AMPDU_HWTXS_VAL 0x00000040 /* AMPDU_HWTXS */
+#define WL_AMPDU_HWDBG_VAL 0x00000080 /* AMPDU_DBG */
+#define WL_AMPDU_STAT_VAL 0x00000100 /* statistics */
+
+extern uint32 wl_ampdu_dbg;
+
+#define WL_AMPDU_UPDN(args) do {if (wl_ampdu_dbg & WL_AMPDU_UPDN_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_RX(args) do {if (wl_ampdu_dbg & WL_AMPDU_RX_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_ERR(args) do {if (wl_ampdu_dbg & WL_AMPDU_ERR_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_TX(args) do {if (wl_ampdu_dbg & WL_AMPDU_TX_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_CTL(args) do {if (wl_ampdu_dbg & WL_AMPDU_CTL_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_HW(args) do {if (wl_ampdu_dbg & WL_AMPDU_HW_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_HWTXS(args) do {if (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_HWDBG(args) do {if (wl_ampdu_dbg & WL_AMPDU_HWDBG_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_STAT(args) do {if (wl_ampdu_dbg & WL_AMPDU_STAT_VAL) {WL_AMPDU(args);}} while (0)
+#define WL_AMPDU_ERR_ON() (wl_ampdu_dbg & WL_AMPDU_ERR_VAL)
+#define WL_AMPDU_HW_ON() (wl_ampdu_dbg & WL_AMPDU_HW_VAL)
+#define WL_AMPDU_HWTXS_ON() (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL)
+
+/* Extra Message control for Mesh debugging */
+extern uint32 wl_mesh_dbg;
+#define WL_MESH_AMPE_VAL 0x00000001
+#define WL_MESH_ROUTE_VAL 0x00000002
+#define WL_MESH_BCN_VAL 0x00000004
+
+#define WL_MESH_AMPE(args) do {if (wl_mesh_dbg & WL_MESH_AMPE_VAL) {WL_MESH(args);}} while (0)
+#define WL_MESH_ROUTE(args) do {if (wl_mesh_dbg & WL_MESH_ROUTE_VAL) {WL_MESH(args);}} while (0)
+#define WL_MESH_BCN(args) do {if (wl_mesh_dbg & WL_MESH_BCN_VAL) {WL_MESH(args);}} while (0)
+
+/* BCMDBG */
+#elif defined(BCMCONDITIONAL_LOGGING)
+
+/* Ideally this should be some include file that vendors can include to conditionalize logging */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x) x
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+#define WL_WARN(x) WL_ERROR(x)
+#define WL_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args); } while (0)
+#define WL_SCAN_ERROR(args)
+#define WL_IE_ERROR(args)
+#define WL_AMSDU_ERROR(args)
+#define WL_ASSOC_ERROR(args)
+#define WL_WNM_PDT_ERROR(args)
+#define KM_ERR(args)
+#define WL_WBTEXT_ERROR(args)
+#define WL_WBTEXT_INFO(args)
+#define WL_LATENCY_INFO(args)
+
+#define WL_TRACE(args)
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#define WL_PRPKT(m, b, n)
+#define WL_INFORM(args)
+#define WL_TMP(args)
+#define WL_OID(args)
+#define WL_RATE(args) do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC(args) do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args);} while (0)
+#define WL_PRUSR(m, b, n)
+#define WL_PS(args) do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0)
+
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args) do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args); \
+ else WIFICC_LOGDEBUG(args);} while (0)
+
+#define WL_MPC(args)
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_MODE_SWITCH(args)
+#define WL_PROTO(args)
+
+#define WL_CAC(args) do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+#define WL_DFS(args)
+#define WL_WOWL(args)
+#define WL_DPT(args)
+#define WL_ASSOC_OR_DPT(args)
+#define WL_SCAN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_SCAN_WARN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#define WL_BTA(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_WFDS(m, b, n)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_TXBF(args)
+#define WL_MUMIMO(args)
+#define WL_P2PO(args)
+#ifdef WLAWDL
+#define WL_AWDL(args)
+#endif /* WLAWDL */
+#define WL_ROAM(args)
+#define WL_WNM(args)
+
+#ifdef WLMSG_MESH
+#define WL_MESH(args) WL_PRINT(args)
+#define WL_MESH_AMPE(args) WL_PRINT(args)
+#define WL_MESH_ROUTE(args) WL_PRINT(args)
+#define WL_MESH_BCN(args)
+#else
+#define WL_MESH(args)
+#define WL_MESH_AMPE(args)
+#define WL_MESH_ROUTE(args)
+#define WL_MESH_BCN(args)
+#endif
+#define WL_ASSOC_AP(args) \
+ do { \
+ if (wl_msg_level3 & WL_ASSOC_AP_VAL) { \
+ WL_PRINT(args); \
+ } else { \
+ WIFICC_LOGDEBUG(args); \
+ } \
+ } while (0)
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_PFN_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_SCAN_ERR, args)
+#else
+#define WL_PFN_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_SCAN_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_PFN_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_PFN_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON() 0
+#define WL_AMPDU_HW_ON() 0
+#define WL_AMPDU_HWTXS_ON() 0
+
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#define WL_PCIE(args)
+#define WL_PMDUR(args)
+#define WL_TSLOG(w, s, i, j)
+#define WL_FBT(args)
+#define WL_MBO_DBG(args)
+#define WL_RANDMAC_DBG(args)
+#define WL_BAM_ERR(args)
+#define WL_ADPS(args)
+#define WL_OCE_DBG(args)
+#define WL_TPA_ERR(args)
+#define WL_TPA_INFO(args)
+
+#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
+#define WL_TRACE_ON() 0
+#define WL_PRHDRS_ON() 0
+#define WL_PRPKT_ON() 0
+#define WL_INFORM_ON() 0
+#define WL_TMP_ON() 0
+#define WL_OID_ON() 0
+#define WL_RATE_ON() (wl_msg_level & WL_RATE_VAL)
+#define WL_ASSOC_ON() (wl_msg_level & WL_ASSOC_VAL)
+#define WL_PRUSR_ON() 0
+#define WL_PS_ON() (wl_msg_level & WL_PS_VAL)
+#define WL_PORT_ON() 0
+#define WL_WSEC_ON() 0
+#define WL_WSEC_DUMP_ON() 0
+#define WL_MPC_ON() 0
+#define WL_REGULATORY_ON() (wl_msg_level & WL_REGULATORY_VAL)
+#define WL_APSTA_ON() 0
+#define WL_DFS_ON() 0
+#define WL_MBSS_ON() 0
+#define WL_CAC_ON() (wl_msg_level & WL_CAC_VAL)
+#define WL_AMPDU_ON() 0
+#define WL_DPT_ON() 0
+#define WL_WOWL_ON() 0
+#define WL_SCAN_ON() (wl_msg_level2 & WL_SCAN_VAL)
+#define WL_BTA_ON() 0
+#define WL_P2P_ON() 0
+#define WL_ITFR_ON() 0
+#define WL_MCHAN_ON() 0
+#define WL_TDLS_ON() 0
+#define WL_MCNX_ON() 0
+#define WL_PROT_ON() 0
+#define WL_PSTA_ON() 0
+#define WL_TRF_MGMT_ON() 0
+#define WL_LPC_ON() 0
+#define WL_L2FILTER_ON() 0
+#define WL_TXBF_ON() 0
+#define WL_P2PO_ON() 0
+#ifdef WLAWDL
+#define WL_AWDL_ON() 0
+#endif /* WLAWDL */
+#define WL_TSLOG_ON() 0
+#define WL_WNM_ON() 0
+#define WL_PCIE_ON() 0
+#define WL_MUMIMO_ON() 0
+#define WL_MESH_ON() 0
+#define WL_MBO_DBG_ON() 0
+#define WL_RANDMAC_DBG_ON() 0
+#define WL_ADPS_ON() 0
+#define WL_OCE_DBG_ON() 0
+#define WL_FILS_DBG_ON() 0
+#define WL_ASSOC_AP_ON() (wl_msg_level3 & WL_ASSOC_AP_VAL)
+
+#else /* !BCMDBG */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_ERROR, args)
+#define WL_WARN(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_WARN, args)
+#else
+#define WL_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args)
+#define WL_WARN(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WARN, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+
+#else
+#define WL_ERROR(args) WL_PRINT(args)
+#define WL_WARN(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_ERROR(args)
+#define WL_WARN(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef TS_PLOT
+#define TS_LOG_DBG(x) x
+#define TS_LOG(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_TSLOG, args)
+#else
+#define TS_LOG_DBG(x)
+#define TS_LOG(args)
+#endif
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE)&& defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define KM_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_KM_ERROR, args)
+#else
+#define KM_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_KM_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define KM_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define KM_ERR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_AMPDU_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_AMPDU_ERROR, args)
+#else
+#define WL_AMPDU_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_AMPDU_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_AMPDU_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_AMPDU_ERR(args)
+#endif /* BCMDBG_ERR */
+
+#define WL_TRACE(args)
+#ifdef WLMSG_PRHDRS
+#define WL_PRHDRS_MSG(args) WL_PRINT(args)
+#define WL_PRHDRS(i, p, f, t, r, l) wlc_print_hdrs(i, p, f, t, r, l)
+#else
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#endif
+#ifdef WLMSG_PRPKT
+#define WL_PRPKT(m, b, n) prhex(m, b, n)
+#else
+#define WL_PRPKT(m, b, n)
+#endif
+#ifdef WLMSG_INFORM
+#define WL_INFORM(args) WL_PRINT(args)
+#else
+#define WL_INFORM(args)
+#endif
+#define WL_TMP(args)
+#ifdef WLMSG_OID
+#define WL_OID(args) WL_PRINT(args)
+#else
+#define WL_OID(args)
+#endif
+#define WL_RATE(args)
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_IE_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_IE_ERROR, args)
+#else
+#define WL_IE_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_IE_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_IE_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_IE_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef WLMSG_WNM_BSSTRANS
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_WBTEXT_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
+#else
+#define WL_WBTEXT_INFO(args) \
+ EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_WBTEXT_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_WBTEXT_INFO(args)
+#endif /* WLMSG_WNM_BSSTRANS */
+
+#if defined(BCMPCIE_LATENCY) && defined(BCMPCIE_LATENCY_DEBUG)
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_LATENCY_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_LATENCY_INFO, args)
+#else
+#define WL_LATENCY_INFO(args) \
+ EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_LATENCY_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_LATENCY_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_LATENCY_INFO(args)
+#endif /* BCMPCIE_LATENCY && BCMPCIE_LATENCY_DEBUG */
+
+#ifdef BCMDBG_ERR
+#if defined(ERR_USE_EVENT_LOG) && defined(EVENT_LOG_COMPILE)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_WBTEXT_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_ERR, args)
+#else
+#define WL_WBTEXT_ERROR(args) \
+ EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_WBTEXT_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG && EVENT_LOG_COMPILE */
+#else
+#define WL_WBTEXT_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef WLMSG_WNM_BSSTRANS
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_WNM_PDT_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
+#else
+#define WL_WNM_PDT_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_WNM_PDT_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_WNM_PDT_INFO(args)
+#endif /* WLMSG_WNM_BSSTRANS */
+
+#ifdef BCMDBG_ERR
+#if defined(ERR_USE_EVENT_LOG) && defined(EVENT_LOG_COMPILE)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_WNM_PDT_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_ERR, args)
+#else
+#define WL_WNM_PDT_ERROR(args) \
+ EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_WNM_PDT_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG && EVENT_LOG_COMPILE */
+#else
+#define WL_WNM_PDT_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef WLMSG_ASSOC
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_ASSOC(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_ASSOC_LOG, args)
+#else
+#define WL_ASSOC(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ASSOC_LOG, args)
+#define WL_ASSOC_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ASSOC_LOG, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_ASSOC(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#define WL_ASSOC_AP(args) WL_PRINT(args)
+#else
+#define WL_ASSOC(args)
+#define WL_ASSOC_AP(args)
+#endif /* WLMSG_ASSOC */
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_ASSOC_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_ASSOC_ERROR, args)
+#else
+#define WL_ASSOC_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_ASSOC_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_ASSOC_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_ASSOC_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_SCAN_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_SCAN_ERR, args)
+#else
+#define WL_SCAN_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_SCAN_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_SCAN_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_SCAN_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#define WL_PRUSR(m, b, n)
+
+#ifdef WLMSG_PS
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_PS(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_PS_LOG, args)
+#else
+#define WL_PS(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_PS_LOG, args)
+#define WL_PS_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_PS_LOG, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_PS(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_PS(args)
+#endif /* WLMSG_PS */
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_AMSDU_ERROR(args) EVENT_LOG_RA(EVENT_LOG_TAG_AMSDU_ERROR, args)
+#else
+#define WL_AMSDU_ERROR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_AMSDU_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_AMSDU_ERROR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_AMSDU_ERROR(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef BCMDBG_PRINT_EAP_PKT_INFO
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_8021X_ERR(args) do {printf args; \
+ EVENT_LOG_RA(EVENT_LOG_TAG_4WAYHANDSHAKE, args);} while (0)
+#else
+#define WL_8021X_ERR(args) do {printf args; \
+ EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_4WAYHANDSHAKE, args);} while (0)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_8021X_ERR(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_8021X_ERR(args)
+#endif /* BCMDBG_PRINT_EAP_PKT_INFO */
+
+#ifdef WLMSG_ROAM
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_ROAM(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_ROAM_LOG, args)
+#else
+#define WL_ROAM(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ROAM_LOG, args)
+#define WL_ROAM_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ROAM_LOG, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_ROAM(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_ROAM(args)
+#endif /* WLMSG_ROAM */
+
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args)
+
+#ifdef WLMSG_MPC
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_MPC(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_MPC_LOG, args)
+#else
+#define WL_MPC(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_MPC_LOG, args)
+#define WL_MPC_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_MPC_LOG, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_MPC(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_MPC(args)
+#endif /* WLMSG_MPC */
+
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_MODE_SWITCH(args)
+#define WL_PROTO(args)
+
+#define WL_CAC(args)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+#define WL_BCNTRIM_DBG(args)
+
+/* Define WLMSG_DFS automatically for WLTEST builds */
+#if defined(WLTEST) && !defined(WLTEST_DISABLED)
+#ifndef WLMSG_DFS
+#define WLMSG_DFS
+#endif
+#endif /* WLTEST */
+
+#ifdef WLMSG_DFS
+#define WL_DFS(args) do {if (wl_msg_level & WL_DFS_VAL) WL_PRINT(args);} while (0)
+#else /* WLMSG_DFS */
+#define WL_DFS(args)
+#endif /* WLMSG_DFS */
+#define WL_WOWL(args)
+
+#ifdef WLMSG_SCAN
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_SCAN(args) EVENT_LOG_RA(EVENT_LOG_TAG_SCAN_TRACE_LOW, args)
+#define WL_SCAN_WARN(args) EVENT_LOG_RA(EVENT_LOG_TAG_SCAN_WARN, args)
+#else
+#define WL_SCAN(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_SCAN_TRACE_LOW, args)
+#define WL_SCAN_DP(args) \
+ EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_SCAN_TRACE_LOW, args)
+#define WL_SCAN_WARN(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_SCAN_WARN, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_SCAN(args) WL_PRINT(args)
+#define WL_SCAN_WARN(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_SCAN(args)
+#define WL_SCAN_WARN(args)
+#endif /* WLMSG_SCAN */
+
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#ifdef WLMSG_BTA
+#define WL_BTA(args) WL_PRINT(args)
+#else
+#define WL_BTA(args)
+#endif
+#define WL_WMF(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+
+#ifdef WLMSG_MCNX
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_MCNX(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_MCNX_LOG, args)
+#else
+#define WL_MCNX(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_MCNX_LOG, args)
+#define WL_MCNX_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_MCNX_LOG, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_MCNX(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_MCNX(args)
+#endif /* WLMSG_MCNX */
+
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_TBTT(args)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_P2PO(args)
+#ifdef WLAWDL
+#define WL_AWDL(args)
+#endif /* WLAWDL */
+#define WL_WNM(args)
+#define WL_TXBF(args)
+#define WL_TSLOG(w, s, i, j)
+#define WL_FBT(args)
+#define WL_MUMIMO(args)
+#ifdef WLMSG_MESH
+#define WL_MESH(args) WL_PRINT(args)
+#define WL_MESH_AMPE(args) WL_PRINT(args)
+#define WL_MESH_ROUTE(args) WL_PRINT(args)
+#define WL_MESH_BCN(args)
+#else
+#define WL_MESH(args)
+#define WL_MESH_AMPE(args)
+#define WL_MESH_ROUTE(args)
+#define WL_MESH_BCN(args)
+#endif
+#define WL_SWDIV(args)
+#define WL_ADPS(args)
+
+#ifdef BCMDBG_ERR
+#define WL_ERROR_ON() 1
+#else
+#define WL_ERROR_ON() 0
+#endif
+#define WL_TRACE_ON() 0
+#ifdef WLMSG_PRHDRS
+#define WL_PRHDRS_ON() 1
+#else
+#define WL_PRHDRS_ON() 0
+#endif
+#ifdef WLMSG_PRPKT
+#define WL_PRPKT_ON() 1
+#else
+#define WL_PRPKT_ON() 0
+#endif
+#ifdef WLMSG_INFORM
+#define WL_INFORM_ON() 1
+#else
+#define WL_INFORM_ON() 0
+#endif
+#ifdef WLMSG_OID
+#define WL_OID_ON() 1
+#else
+#define WL_OID_ON() 0
+#endif
+#define WL_TMP_ON() 0
+#define WL_RATE_ON() 0
+#ifdef WLMSG_ASSOC
+#define WL_ASSOC_ON() 1
+#define WL_ASSOC_AP_ON() 1
+#else
+#define WL_ASSOC_ON() 0
+#define WL_ASSOC_AP_ON() 0
+#endif
+#define WL_PORT_ON() 0
+#ifdef WLMSG_WSEC
+#define WL_WSEC_ON() 1
+#define WL_WSEC_DUMP_ON() 1
+#else
+#define WL_WSEC_ON() 0
+#define WL_WSEC_DUMP_ON() 0
+#endif
+#ifdef WLMSG_MPC
+#define WL_MPC_ON() 1
+#else
+#define WL_MPC_ON() 0
+#endif
+#define WL_REGULATORY_ON() 0
+
+#define WL_APSTA_ON() 0
+#define WL_BA_ON() 0
+#define WL_MBSS_ON() 0
+#define WL_MODE_SWITCH_ON() 0
+#ifdef WLMSG_DFS
+#define WL_DFS_ON() 1
+#else /* WLMSG_DFS */
+#define WL_DFS_ON() 0
+#endif /* WLMSG_DFS */
+#ifdef WLMSG_SCAN
+#define WL_SCAN_ON() 1
+#else
+#define WL_SCAN_ON() 0
+#endif
+#ifdef WLMSG_BTA
+#define WL_BTA_ON() 1
+#else
+#define WL_BTA_ON() 0
+#endif
+#define WL_WMF_ON() 0
+#define WL_P2P_ON() 0
+#define WL_MCHAN_ON() 0
+#define WL_TDLS_ON() 0
+#define WL_MCNX_ON() 0
+#define WL_PROT_ON() 0
+#define WL_TBTT_ON() 0
+#define WL_LPC_ON() 0
+#define WL_L2FILTER_ON() 0
+#define WL_MQ_ON() 0
+#define WL_P2PO_ON() 0
+#ifdef WLAWDL
+#define WL_AWDL_ON() 0
+#endif /* WLAWDL */
+#define WL_TXBF_ON() 0
+#define WL_TSLOG_ON() 0
+#define WL_MUMIMO_ON() 0
+#define WL_SWDIV_ON() 0
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON() 0
+#define WL_AMPDU_HW_ON() 0
+#define WL_AMPDU_HWTXS_ON() 0
+
+#define WL_WNM_ON() 0
+#ifdef WLMSG_MBO
+#define WL_MBO_DBG_ON() 1
+#else
+#define WL_MBO_DBG_ON() 0
+#endif /* WLMSG_MBO */
+#ifdef WLMSG_RANDMAC
+#define WL_RANDMAC_DBG_ON() 1
+#else
+#define WL_RANDMAC_DBG_ON() 0
+#endif /* WLMSG_RANDMAC */
+#define WL_ADPS_ON() 0
+#ifdef WLMSG_OCE
+#define WL_OCE_DBG_ON() 1
+#else
+#define WL_OCE_DBG_ON() 0
+#endif /* WLMSG_OCE */
+#ifdef WLMSG_FILS
+#define WL_FILS_DBG_ON() 1
+#else
+#define WL_FILS_DBG_ON() 0
+#endif /* WLMSG_FILS */
+
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+
+#ifdef WLMSG_WSEC
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_WSEC(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_WSEC_LOG, args)
+#define WL_WSEC_DUMP(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_WSEC_DUMP, args)
+#else
+#define WL_WSEC(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WSEC_LOG, args)
+#define WL_WSEC_DUMP(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WSEC_DUMP, args)
+#define WL_WSEC_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WSEC_LOG, args)
+#define WL_WSEC_DUMP_DP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WSEC_DUMP, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_WSEC(args) WL_PRINT(args)
+#define WL_WSEC_DUMP(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif /* WLMSG_WSEC */
+
+#ifdef WLMSG_MBO
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_MBO_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_MBO_DBG, args)
+#define WL_MBO_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_MBO_INFO, args)
+#else
+#define WL_MBO_DBG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_MBO_DBG, args)
+#define WL_MBO_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_MBO_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_MBO_DBG(args) WL_PRINT(args)
+#define WL_MBO_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_MBO_DBG(args)
+#define WL_MBO_INFO(args)
+#endif /* WLMSG_MBO */
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_MBO_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_MBO_ERR, args)
+#else
+#define WL_MBO_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_MBO_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_MBO_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_MBO_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+
+#ifdef WLMSG_RANDMAC
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_RANDMAC_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_RANDMAC_DBG, args)
+#define WL_RANDMAC_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_RANDMAC_INFO, args)
+#else
+#define WL_RANDMAC_DBG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RANDMAC_DBG, args)
+#define WL_RANDMAC_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RANDMAC_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_RANDMAC_DBG(args) WL_PRINT(args)
+#define WL_RANDMAC_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_RANDMAC_DBG(args)
+#define WL_RANDMAC_INFO(args)
+#endif /* WLMSG_RANDMAC */
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_RANDMAC_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_RANDMAC_ERR, args)
+#else
+#define WL_RANDMAC_ERR(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RANDMAC_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_RANDMAC_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_RANDMAC_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+
+#ifdef WLMSG_OCE
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_OCE_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_OCE_DBG, args)
+#define WL_OCE_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_OCE_INFO, args)
+#else
+#define WL_OCE_DBG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_OCE_DBG, args)
+#define WL_OCE_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_OCE_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_OCE_DBG(args) WL_PRINT(args)
+#define WL_OCE_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_OCE_DBG(args)
+#define WL_OCE_INFO(args)
+#endif /* WLMSG_OCE */
+
+#ifdef WLMSG_FILS
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_FILS_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_FILS_DBG, args)
+#define WL_FILS_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_FILS_INFO, args)
+#else
+#define WL_FILS_DBG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_FILS_DBG, args)
+#define WL_FILS_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_FILS_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_FILS_DBG(args) WL_PRINT(args)
+#define WL_FILS_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_FILS_DBG(args)
+#define WL_FILS_INFO(args)
+#endif /* WLMSG_FILS */
+#ifdef BCMDBG_ERR
+#if defined(ERR_USE_EVENT_LOG) && defined(EVENT_LOG_COMPILE)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_OCE_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_OCE_ERR, args)
+#define WL_FILS_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_FILS_ERROR, args)
+
+#else
+#define WL_OCE_ERR(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_OCE_ERR, args)
+#define WL_FILS_ERR(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_FILS_ERROR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_OCE_ERR(args) WL_PRINT(args)
+#define WL_FILS_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG && EVENT_LOG_COMPILE */
+#else
+#define WL_OCE_ERR(args) WL_PRINT(args)
+#define WL_FILS_ERR(args) WL_PRINT(args)
+#endif /* BCMDBG_ERR */
+
+#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL)
+#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0)
+#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL)
+#define WL_PMDUR(args)
+
+#ifdef WLMSG_BAM
+#if defined(EVENT_LOG_COMPILE)
+#ifdef USE_EVENT_LOG_RA
+#define WL_BAM_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_BAM, args)
+#else
+#define WL_BAM_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_BAM, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_BAM_ERR(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_BAM_ERR(args)
+#endif /* WLMSG_BAM */
+#endif /* BCMDBG */
+
+#ifdef BCMDBG_ERR
+/* ROM and ROML optimized builds */
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_HE_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_HE_INFO, args)
+#define WL_HE_TRACE(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_HE_TRACE, args)
+#define WL_HE_WARN(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_HE_WARN, args)
+#define WL_HE_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_HE_ERROR, args)
+#define WL_TWT_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_TWT_INFO, args)
+#define WL_TWT_TRACE(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_TWT_TRACE, args)
+#define WL_TWT_WARN(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_TWT_WARN, args)
+#define WL_TWT_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_TWT_ERROR, args)
+#define WL_HEB_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_HEB_ERROR, args)
+#define WL_HEB_TRACE(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_HEB_TRACE, args)
+#else
+#define WL_HE_INFO(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_HE_INFO, args)
+#define WL_HE_TRACE(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_HE_TRACE, args)
+#define WL_HE_WARN(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_HE_WARN, args)
+#define WL_HE_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_HE_ERROR, args)
+#define WL_TWT_INFO(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_TWT_INFO, args)
+#define WL_TWT_TRACE(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_TWT_TRACE, args)
+#define WL_TWT_WARN(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_TWT_WARN, args)
+#define WL_TWT_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_TWT_ERROR, args)
+#define WL_HEB_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_HEB_ERROR, args)
+#define WL_HEB_TRACE(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_HEB_TRACE, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_HE_INFO(args) WL_PRINT(args)
+#define WL_HE_TRACE(args) WL_PRINT(args)
+#define WL_HE_WARN(args) WL_PRINT(args)
+#define WL_HE_ERR(args) WL_PRINT(args)
+#define WL_TWT_INFO(args) WL_PRINT(args)
+#define WL_TWT_TRACE(args) WL_PRINT(args)
+#define WL_TWT_WARN(args) WL_PRINT(args)
+#define WL_TWT_ERR(args) WL_PRINT(args)
+#define WL_HEB_ERR(args) WL_PRINT(args)
+#define WL_HEB_TRACE(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_HE_INFO(args)
+#define WL_HE_TRACE(args)
+#define WL_HE_WARN(args)
+#define WL_HE_ERR(args)
+#define WL_TWT_INFO(args)
+#define WL_TWT_TRACE(args)
+#define WL_TWT_WARN(args)
+#define WL_TWT_ERR(args)
+#define WL_HEB_ERR(args)
+#define WL_HEB_TRACE(args)
+#endif /* BCMDBG_ERR */
+
+#ifdef WLMSG_TPA
+#ifdef EVENT_LOG_COMPILE
+#ifdef USE_EVENT_LOG_RA
+#define WL_TPA_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_TPA_ERR, args)
+#define WL_TPA_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_TPA_INFO, args)
+#else
+#define WL_TPA_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_TPA_ERR, args)
+#define WL_TPA_INFO(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_TPA_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_TPA_ERR(args) WL_PRINT(args)
+#define WL_TPA_INFO(args) WL_INFORM(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#ifndef WL_TPA_ERR
+#define WL_TPA_ERR(args)
+#endif /* WL_TPA_ERR */
+#ifndef WL_TPA_INFO
+#define WL_TPA_INFO(args)
+#endif /* WL_TPA_INFO */
+#endif /* WLMSG_TPA */
+
+#ifdef WLMSG_WNM_BSSTRANS
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_BSSTRANS_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
+#else
+#define WL_BSSTRANS_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_BSSTRANS_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_BSSTRANS_INFO(args)
+#endif /* WLMSG_WNM_BSSTRANS */
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_BSSTRANS_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_ERR, args)
+#else
+#define WL_BSSTRANS_ERR(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_BSSTRANS_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_BSSTRANS_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+
+#if defined(BCMDBG) || defined(BCMDBG_ERR)
+#define DBGERRONLY(x) x
+#else
+#define DBGERRONLY(x)
+#endif
+
+#ifdef EVENT_LOG_COMPILE
+#ifdef USE_EVENT_LOG_RA
+#define WL_ADPS_ELOG(args) EVENT_LOG_RA(EVENT_LOG_TAG_ADPS, args)
+#define WL_ADPS_ELOG_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_ADPS_INFO, args)
+#else
+#define WL_ADPS_ELOG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_ADPS, args)
+#define WL_ADPS_ELOG_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_ADPS_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_ADPS_ELOG(args) WL_ADPS(args)
+#define WL_ADPS_ELOG_INFO(args) WL_ADPS(args)
+#endif /* EVENT_LOG_COMPILE */
+
+#ifdef WLMSG_RRM
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_RRM_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_RRM_DBG, args)
+#define WL_RRM_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_RRM_INFO, args)
+#else
+#define WL_RRM_DBG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RRM_DBG, args)
+#define WL_RRM_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RRM_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_RRM_DBG(args) WL_PRINT(args)
+#define WL_RRM_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_RRM_DBG(args)
+#define WL_RRM_INFO(args)
+#endif /* WLMSG_RRM */
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_RRM_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_RRM_ERR, args)
+#else
+#define WL_RRM_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RRM_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_RRM_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_RRM_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+
+#ifdef WLMSG_ESP
+#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_ESP_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_ESP_DBG, args)
+#define WL_ESP_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_ESP_INFO, args)
+#else
+#define WL_ESP_DBG(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_ESP_DBG, args)
+#define WL_ESP_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_ESP_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_ESP_DBG(args) WL_PRINT(args)
+#define WL_ESP_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_ESP_DBG(args)
+#define WL_ESP_INFO(args)
+#endif /* WLMSG_ESP */
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE) && defined(ERR_USE_EVENT_LOG)
+#if defined(ERR_USE_EVENT_LOG_RA)
+#define WL_ESP_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_ESP_ERR, args)
+#else
+#define WL_ESP_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_ESP_ERR, args)
+#endif /* ERR_USE_EVENT_LOG_RA */
+#else
+#define WL_ESP_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+#else
+#define WL_ESP_ERR(args) WL_PRINT(args)
+#endif /* ERR_USE_EVENT_LOG */
+
+#ifndef WL_ASSOC_DP
+#define WL_ASSOC_DP(args) WL_ASSOC(args)
+#endif
+
+#ifndef WL_ROAM_DP
+#define WL_ROAM_DP(args) WL_ROAM(args)
+#endif
+
+#ifndef WL_PS_DP
+#define WL_PS_DP(args) WL_PS(args)
+#endif
+
+#ifndef WL_WSEC_DP
+#define WL_WSEC_DP(args) WL_WSEC(args)
+#endif
+
+#ifdef EVENT_LOG_COMPILE
+#ifdef USE_EVENT_LOG_RA
+#define WL_EVT_NOTIF_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_EVT_NOTIF_INFO, args)
+#else
+#define WL_EVT_NOTIF_INFO(args) \
+ EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_EVT_NOTIF_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_EVT_NOTIF_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+
+#ifdef BCMDBG_ERR
+#if defined(EVENT_LOG_COMPILE)
+#if defined(USE_EVENT_LOG_RA)
+#define WL_PKTFLTR_CNT(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_PS_LOG, args)
+#else
+#define WL_PKTFLTR_CNT(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_PS_LOG, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_PKTFLTR_CNT(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+#else
+#define WL_PKTFLTR_CNT(args)
+#endif /* BCMDBG_ERR */
+#ifdef EVENT_LOG_COMPILE
+
+#ifdef USE_EVENT_LOG_RA
+#define WL_TDLS_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_TDLS_INFO, args)
+#else
+#define WL_TDLS_INFO(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_TDLS_INFO, args)
+#endif /* USE_EVENT_LOG_RA */
+#else
+#define WL_TDLS_INFO(args) WL_PRINT(args)
+#endif /* EVENT_LOG_COMPILE */
+
+#ifndef WL_OCE_INFO
+#define WL_OCE_INFO(args)
+#endif
+
+#ifndef WL_OCE_ERR
+#define WL_OCE_ERR(args)
+#endif
+
+#ifndef WL_MBO_INFO
+#define WL_MBO_INFO(args)
+#endif
+
+#ifndef WL_FILS_ERR
+#define WL_FILS_ERR(args)
+#endif
+
+#ifndef WL_FILS_DBG
+#define WL_FILS_DBG(args)
+#endif
+
+#ifndef WL_FILS_INFO
+#define WL_FILS_INFO(args)
+#endif
+
+/* ===============================================================
+ * ====define BCMDBG_RATESET/WL_RATESET_ON()/WL_RATESET_PRT(x)====
+ * ===============================================================
+ */
+/* 1. #define BCMDBG_RATESET explicitly turns on WL_RATESET_ON() */
+#ifdef BCMDBG_RATESET
+#define WL_RATESET_ON() 1
+#define WL_RATESET_PRT(x) WL_PRINT(x)
+#endif
+/* 2. #define BCMDBG implicitly turns on BCMDBG_RATESET but not WL_RATESET_ON() */
+#ifdef BCMDBG
+#ifndef BCMDBG_RATESET
+#define BCMDBG_RATESET
+#endif
+#endif /* BCMDBG */
+/* 3. default WL_RATESET_ON() is 0 */
+#ifndef WL_RATESET_ON
+#define WL_RATESET_ON() 0
+#endif
+/* 4. default WL_RATESET_PRT(x) is WL_RATE(x) */
+#ifndef WL_RATESET_PRT
+#define WL_RATESET_PRT(x) WL_RATE(x)
+#endif
+
+#endif /* _wl_dbg_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_escan.c b/bcmdhd.101.10.361.x/wl_escan.c
new file mode 100755
index 0000000..666a5b2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_escan.c
@@ -0,0 +1,1767 @@
+
+#if defined(WL_ESCAN)
+#include <bcmendian.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <wl_android.h>
+#include <wl_escan.h>
+#include <dhd_config.h>
+
+#define ESCAN_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printf("[%s] ESCAN-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define ESCAN_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printf("[%s] ESCAN-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define ESCAN_SCAN(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_SCAN_LEVEL) { \
+ printf("[%s] ESCAN-SCAN) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define ESCAN_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printf("[%s] ESCAN-DBG) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#define WL_EXTRA_BUF_MAX 2048
+
+#define wl_escan_get_buf(a) ((wl_scan_results_t *) (a)->escan_buf)
+
+#if defined(WL_WIRELESS_EXT)
+extern int wl_iw_handle_scanresults_ies(char **event_p, char *end,
+ struct iw_request_info *info, wl_bss_info_t *bi);
+#define for_each_bss_wext(list, bss, __i) \
+ for (__i = 0; __i < list->count && __i < IW_MAX_AP; __i++, bss = next_bss(list, bss))
+#endif
+#define for_each_bss(list, bss, __i) \
+ for (__i = 0; __i < list->count; __i++, bss = next_bss(list, bss))
+
+#define wl_escan_set_sync_id(a) ((a) = htod16(0x1234))
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#define BUF_OVERFLOW_MGMT_COUNT 3
+typedef struct {
+ int RSSI;
+ int length;
+ struct ether_addr BSSID;
+} removal_element_t;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+ chanspec_t chspec;
+
+ /* get the channel number */
+ chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+ /* convert the band */
+ if (LCHSPEC_IS2G(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chspec |= WL_CHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (LCHSPEC_IS20(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BW_20;
+ } else {
+ chspec |= WL_CHANSPEC_BW_40;
+ if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+ chspec |= WL_CHANSPEC_CTL_SB_L;
+ } else {
+ chspec |= WL_CHANSPEC_CTL_SB_U;
+ }
+ }
+
+ if (wf_chspec_malformed(chspec)) {
+ ESCAN_ERROR("wlan", "wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec);
+ return INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+ chanspec_t lchspec;
+
+ if (wf_chspec_malformed(chspec)) {
+ ESCAN_ERROR("wlan", "wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec);
+ return INVCHANSPEC;
+ }
+
+ /* get the channel number */
+ lchspec = CHSPEC_CHANNEL(chspec);
+
+ /* convert the band */
+ if (CHSPEC_IS2G(chspec)) {
+ lchspec |= WL_LCHANSPEC_BAND_2G;
+ } else {
+ lchspec |= WL_LCHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (CHSPEC_IS20(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_20;
+ lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+ } else if (CHSPEC_IS40(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_40;
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+ lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+ } else {
+ lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+ }
+ } else {
+ /* cannot express the bandwidth */
+ char chanbuf[CHANSPEC_STR_LEN];
+ ESCAN_ERROR("wlan", "wl_chspec_to_legacy: unable to convert chanspec %s "
+ "(0x%04X) to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec);
+ return INVCHANSPEC;
+ }
+
+ return lchspec;
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(int ioctl_ver, chanspec_t chanspec)
+{
+ chanspec = dtohchanspec(chanspec);
+ if (ioctl_ver == 1) {
+ chanspec = wl_chspec_from_legacy(chanspec);
+ }
+
+ return chanspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec)
+{
+ if (ioctl_ver == 1) {
+ chanspec = wl_chspec_to_legacy(chanspec);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
+ }
+ chanspec = htodchanspec(chanspec);
+
+ return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_ch_host_to_driver(int ioctl_ver, u16 channel)
+{
+ chanspec_t chanspec;
+
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return wl_chspec_host_to_driver(ioctl_ver, chanspec);
+}
+
+static inline struct wl_bss_info *next_bss(wl_scan_results_t *list,
+ struct wl_bss_info *bss)
+{
+ return bss = bss ?
+ (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+
+#if defined(ESCAN_RESULT_PATCH)
+#ifndef BSSCACHE
+static void
+wl_escan_dump_bss(struct net_device *dev, struct wl_escan_info *escan,
+ wl_bss_info_t *bi)
+{
+ int16 rssi;
+ int channel;
+ chanspec_t chanspec;
+
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(&escan->g_rssi_cache_ctrl, &bi->BSSID);
+ if (rssi == RSSI_MINVAL)
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#else
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#endif
+ chanspec = wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec);
+ channel = wf_chspec_ctlchan(chanspec);
+ ESCAN_SCAN(dev->name, "BSSID %pM, channel %3d(%3d %sMHz), rssi %3d, SSID \"%s\"\n",
+ &bi->BSSID, channel, CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS20(chanspec)?"20":
+ CHSPEC_IS40(chanspec)?"40":
+ CHSPEC_IS80(chanspec)?"80":"160",
+ rssi, bi->SSID);
+}
+#endif /* BSSCACHE */
+
+static s32
+wl_escan_inform_bss(struct net_device *dev, struct wl_escan_info *escan)
+{
+ wl_scan_results_t *bss_list;
+#ifndef BSSCACHE
+ wl_bss_info_t *bi = NULL; /* must be initialized */
+ s32 i;
+#endif
+ s32 err = 0;
+#if defined(RSSIAVG)
+ int rssi;
+#endif
+
+ bss_list = escan->bss_list;
+
+ ESCAN_SCAN(dev->name, "scanned AP count (%d)\n", bss_list->count);
+
+ /* Update cache */
+#if defined(RSSIAVG)
+ wl_update_rssi_cache(&escan->g_rssi_cache_ctrl, bss_list);
+ if (!in_atomic())
+ wl_update_connected_rssi_cache(dev, &escan->g_rssi_cache_ctrl, &rssi);
+#endif
+#if defined(BSSCACHE)
+ wl_update_bss_cache(&escan->g_bss_cache_ctrl,
+#if defined(RSSIAVG)
+ &escan->g_rssi_cache_ctrl,
+#endif
+ bss_list);
+#endif
+
+ /* delete dirty cache */
+#if defined(RSSIAVG)
+ wl_delete_dirty_rssi_cache(&escan->g_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&escan->g_rssi_cache_ctrl);
+#endif
+
+#if defined(BSSCACHE)
+ wl_delete_dirty_bss_cache(&escan->g_bss_cache_ctrl);
+ wl_reset_bss_cache(&escan->g_bss_cache_ctrl);
+ if (escan->autochannel)
+ wl_ext_get_best_channel(dev, &escan->g_bss_cache_ctrl,
+ escan->ioctl_ver, &escan->best_2g_ch, &escan->best_5g_ch);
+#else
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ wl_escan_dump_bss(dev, escan, bi);
+ }
+ if (escan->autochannel)
+ wl_ext_get_best_channel(dev, bss_list, escan->ioctl_ver,
+ &escan->best_2g_ch, &escan->best_5g_ch);
+#endif
+
+ return err;
+}
+#endif /* ESCAN_RESULT_PATCH */
+
+static wl_scan_params_t *
+wl_escan_alloc_params(struct net_device *dev, struct wl_escan_info *escan,
+ int channel, int nprobes, int *out_params_size)
+{
+ wl_scan_params_t *params;
+ int params_size;
+ int num_chans;
+
+ *out_params_size = 0;
+
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ ESCAN_ERROR(dev->name, "mem alloc failed (%d bytes)\n", params_size);
+ return params;
+ }
+ memset(params, 0, params_size);
+ params->nprobes = nprobes;
+
+ num_chans = (channel == 0) ? 0 : 1;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params->nprobes = htod32(1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(10);
+ if (channel == -1)
+ params->channel_list[0] = htodchanspec(channel);
+ else
+ params->channel_list[0] = wl_ch_host_to_driver(escan->ioctl_ver, channel);
+
+ /* Our scan params have 1 channel and 0 ssids */
+ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ *out_params_size = params_size; /* rtn size to the caller */
+ return params;
+}
+
+static void
+wl_escan_abort(struct net_device *dev, struct wl_escan_info *escan)
+{
+ wl_scan_params_t *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ if (!in_atomic()) {
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params = wl_escan_alloc_params(dev, escan, -1, 0, &params_size);
+ if (params == NULL) {
+ ESCAN_ERROR(dev->name, "scan params allocation failed \n");
+ err = -ENOMEM;
+ } else {
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+ if (err < 0) {
+ ESCAN_ERROR(dev->name, "scan abort failed \n");
+ }
+ kfree(params);
+ }
+ }
+}
+
+static s32
+wl_escan_notify_complete(struct net_device *dev,
+ struct wl_escan_info *escan, bool fw_abort)
+{
+ s32 err = BCME_OK;
+#if defined(WL_WIRELESS_EXT)
+ int cmd = 0;
+#if WIRELESS_EXT > 13
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+#endif
+#endif
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+
+ ESCAN_TRACE(dev->name, "Enter\n");
+
+ if (fw_abort && !in_atomic())
+ wl_escan_abort(dev, escan);
+
+ if (timer_pending(&escan->scan_timeout))
+ del_timer_sync(&escan->scan_timeout);
+
+#if defined(ESCAN_RESULT_PATCH)
+ escan->bss_list = wl_escan_get_buf(escan);
+ wl_escan_inform_bss(dev, escan);
+#endif /* ESCAN_RESULT_PATCH */
+
+ escan->escan_state = ESCAN_STATE_IDLE;
+ wake_up_interruptible(&dhd->conf->event_complete);
+
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT > 13
+#if WIRELESS_EXT > 14
+ cmd = SIOCGIWSCAN;
+#endif
+ // terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+ if (cmd) {
+ if (cmd == SIOCGIWSCAN) {
+ wireless_send_event(dev, cmd, &wrqu, NULL);
+ } else
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+#endif
+#endif
+
+ return err;
+}
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+static void
+wl_escan_find_removal_candidate(struct wl_escan_info *escan,
+ wl_bss_info_t *bss, removal_element_t *candidate)
+{
+ int idx;
+ for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+ int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+ if (bss->RSSI < candidate[idx].RSSI) {
+ if (len)
+ memcpy(&candidate[idx + 1], &candidate[idx],
+ sizeof(removal_element_t) * len);
+ candidate[idx].RSSI = bss->RSSI;
+ candidate[idx].length = bss->length;
+ memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
+ return;
+ }
+ }
+}
+
+static void
+wl_escan_remove_lowRSSI_info(struct net_device *dev, struct wl_escan_info *escan,
+ wl_scan_results_t *list, removal_element_t *candidate, wl_bss_info_t *bi)
+{
+ int idx1, idx2;
+ int total_delete_len = 0;
+ for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+ wl_bss_info_t *bss = NULL;
+ if (candidate[idx1].RSSI >= bi->RSSI)
+ continue;
+ for (idx2 = 0; idx2 < list->count; idx2++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+ list->bss_info;
+ if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ candidate[idx1].RSSI == bss->RSSI &&
+ candidate[idx1].length == dtoh32(bss->length)) {
+ u32 delete_len = dtoh32(bss->length);
+ ESCAN_DBG(dev->name,
+ "delete scan info of %pM to add new AP\n", &bss->BSSID);
+ if (idx2 < list->count -1) {
+ memmove((u8 *)bss, (u8 *)bss + delete_len,
+ list->buflen - cur_len - delete_len);
+ }
+ list->buflen -= delete_len;
+ list->count--;
+ total_delete_len += delete_len;
+ /* if delete_len is greater than or equal to result length */
+ if (total_delete_len >= bi->length) {
+ return;
+ }
+ break;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ }
+}
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+void
+wl_escan_ext_handler(struct net_device *dev, void *argu,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_escan_info *escan = (struct wl_escan_info *)argu;
+ s32 status = ntoh32(e->status);
+ wl_bss_info_t *bi;
+ wl_escan_result_t *escan_result;
+ wl_bss_info_t *bss = NULL;
+ wl_scan_results_t *list;
+ u32 bi_length;
+ u32 i;
+ u16 channel;
+
+ mutex_lock(&escan->usr_sync);
+ escan_result = (wl_escan_result_t *)data;
+
+ if (escan->escan_state != ESCAN_STATE_SCANING) {
+ ESCAN_DBG(dev->name, "Not my scan\n");
+ goto exit;
+ }
+
+ ESCAN_DBG(dev->name, "enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status));
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ ESCAN_DBG(dev->name, "WLC_E_STATUS_PARTIAL \n");
+ if (!escan_result) {
+ ESCAN_ERROR(dev->name, "Invalid escan result (NULL pointer)\n");
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ ESCAN_ERROR(dev->name, "Invalid bss_count %d: ignoring\n",
+ escan_result->bss_count);
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ ESCAN_ERROR(dev->name, "Invalid escan bss info (NULL pointer)\n");
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ ESCAN_ERROR(dev->name, "Invalid bss_info length %d: ignoring\n",
+ bi_length);
+ goto exit;
+ }
+
+ /* +++++ terence 20130524: skip invalid bss */
+ channel =
+ bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ if (!dhd_conf_match_channel(escan->pub, channel))
+ goto exit;
+ /* ----- terence 20130524: skip invalid bss */
+
+ {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+ int remove_lower_rssi = FALSE;
+
+ bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ list = wl_escan_get_buf(escan);
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+ remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ ESCAN_DBG(dev->name, "%s(%pM) RSSI %d flags 0x%x length %d\n",
+ bi->SSID, &bi->BSSID, bi->RSSI, bi->flags, bi->length);
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ ESCAN_DBG(dev->name,
+ "%s(%pM), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, &bss->BSSID, i, bss->RSSI, list->count);
+
+ if (remove_lower_rssi)
+ wl_escan_find_removal_candidate(escan, bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+ /* do not allow beacon data to update
+ *the data recd from a probe response
+ */
+ if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+
+ ESCAN_DBG(dev->name,
+ "%s(%pM), i=%d prev: RSSI %d flags 0x%x, "
+ "new: RSSI %d flags 0x%x\n",
+ bss->SSID, &bi->BSSID, i, bss->RSSI, bss->flags,
+ bi->RSSI, bi->flags);
+
+ if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ ESCAN_DBG(dev->name,
+ "%s(%pM), same onchan, RSSI: prev %d new %d\n",
+ bss->SSID, &bi->BSSID, bss->RSSI, bi->RSSI);
+ bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ ESCAN_DBG(dev->name,
+ "%s(%pM), prev onchan, RSSI: prev %d new %d\n",
+ bss->SSID, &bi->BSSID, bss->RSSI, bi->RSSI);
+ bi->RSSI = bss->RSSI;
+ bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+ }
+ if (dtoh32(bss->length) != bi_length) {
+ u32 prev_len = dtoh32(bss->length);
+
+ ESCAN_DBG(dev->name,
+ "bss info replacement occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length);
+ ESCAN_DBG(dev->name,
+ "%s(%pM), replacement!(%d -> %d)\n",
+ bss->SSID, &bi->BSSID, prev_len, bi_length);
+
+ if (list->buflen - prev_len + bi_length > ESCAN_BUF_SIZE) {
+ ESCAN_ERROR(dev->name,
+ "Buffer is too small: keep the previous result "
+ "of this AP\n");
+ /* Only update RSSI */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= (bi->flags
+ & WL_BSS_FLAGS_RSSI_ONCHANNEL);
+ goto exit;
+ }
+
+ if (i < list->count - 1) {
+ /* memory copy required by this case only */
+ memmove((u8 *)bss + bi_length,
+ (u8 *)bss + prev_len,
+ list->buflen - cur_len - prev_len);
+ }
+ list->buflen -= prev_len;
+ list->buflen += bi_length;
+ }
+ list->version = dtoh32(bi->version);
+ memcpy((u8 *)bss, (u8 *)bi, bi_length);
+ goto exit;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ wl_escan_remove_lowRSSI_info(dev, escan, list, candidate, bi);
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ ESCAN_DBG(dev->name,
+ "RSSI(%pM) is too low(%d) to add Buffer\n",
+ &bi->BSSID, bi->RSSI);
+ goto exit;
+ }
+#else
+ ESCAN_ERROR(dev->name, "Buffer is too small: ignoring\n");
+ goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ }
+
+ memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+ }
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ ESCAN_DBG(dev->name, "ESCAN COMPLETED\n");
+ escan->bss_list = wl_escan_get_buf(escan);
+ ESCAN_DBG(dev->name, "SCAN COMPLETED: scanned AP count=%d\n",
+ escan->bss_list->count);
+ wl_escan_notify_complete(dev, escan, false);
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
+ ESCAN_DBG(dev->name, "ESCAN ABORT reason: %d\n", status);
+ escan->bss_list = wl_escan_get_buf(escan);
+ ESCAN_DBG(dev->name, "SCAN ABORT: scanned AP count=%d\n",
+ escan->bss_list->count);
+ wl_escan_notify_complete(dev, escan, false);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ ESCAN_ERROR(dev->name, "WLC_E_STATUS_TIMEOUT\n");
+ ESCAN_ERROR(dev->name, "reason[0x%x]\n", e->reason);
+ if (e->reason == 0xFFFFFFFF) {
+ wl_escan_notify_complete(dev, escan, true);
+ }
+ } else {
+ ESCAN_ERROR(dev->name, "unexpected Escan Event %d : abort\n", status);
+ escan->bss_list = wl_escan_get_buf(escan);
+ ESCAN_DBG(dev->name, "SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n",
+ escan->bss_list->count);
+ wl_escan_notify_complete(dev, escan, false);
+ }
+exit:
+ mutex_unlock(&escan->usr_sync);
+ return;
+}
+
+static int
+wl_escan_prep(struct net_device *dev, struct wl_escan_info *escan,
+ wl_uint32_list_t *list, void *scan_params, wl_scan_info_t *scan_info)
+{
+ int err = 0;
+ wl_scan_results_t *results;
+ char *ptr;
+ int i = 0, j = 0;
+ wlc_ssid_t ssid_tmp;
+ u32 n_channels = 0;
+ uint channel;
+ chanspec_t chanspec;
+ u32 n_ssids = 0;
+ wl_scan_params_t *params = NULL;
+ wl_scan_params_v2_t *params_v2 = NULL;
+ u32 scan_param_size = 0;
+ u32 channel_offset = 0;
+ u32 cur_offset;
+ uint16 *chan_list = NULL;
+
+ results = wl_escan_get_buf(escan);
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+ escan->escan_state = ESCAN_STATE_SCANING;
+
+ /* Arm scan timeout timer */
+ mod_timer(&escan->scan_timeout, jiffies + msecs_to_jiffies(WL_ESCAN_TIMER_INTERVAL_MS));
+
+ if (escan->scan_params_v2) {
+ params_v2 = (wl_scan_params_v2_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_v2_t);
+ channel_offset = offsetof(wl_scan_params_v2_t, channel_list);
+ } else {
+ params = (wl_scan_params_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_t);
+ channel_offset = offsetof(wl_scan_params_t, channel_list);
+ }
+
+ if (params_v2) {
+ /* scan params ver2 */
+ memcpy(&params_v2->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params_v2->version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ params_v2->length = htod16(sizeof(wl_scan_params_v2_t));
+ params_v2->bss_type = DOT11_BSSTYPE_ANY;
+ params_v2->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params_v2->nprobes = htod32(-1);
+ if (scan_info->scan_time)
+ params_v2->active_time = htod32(scan_info->scan_time);
+ else
+ params_v2->active_time = htod32(-1);
+ params_v2->passive_time = htod32(-1);
+ params_v2->home_time = htod32(-1);
+ params_v2->channel_num = 0;
+ bzero(&params_v2->ssid, sizeof(wlc_ssid_t));
+ chan_list = params_v2->channel_list;
+ } else {
+ /* scan params ver 1 */
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params->nprobes = htod32(-1);
+ if (scan_info->scan_time)
+ params_v2->active_time = htod32(scan_info->scan_time);
+ else
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(-1);
+ params->channel_num = 0;
+ bzero(&params->ssid, sizeof(wlc_ssid_t));
+ chan_list = params->channel_list;
+ }
+
+ cur_offset = channel_offset;
+
+ n_channels = dtoh32(list->count);
+ /* Copy channel array if applicable */
+ ESCAN_SCAN(dev->name, "### List of channelspecs to scan ###\n");
+ if (n_channels > 0) {
+ for (i = 0; i < n_channels; i++) {
+ channel = dtoh32(list->element[i]);
+ if (!dhd_conf_match_channel(escan->pub, channel))
+ continue;
+ chanspec = WL_CHANSPEC_BW_20;
+ if (chanspec == INVCHANSPEC) {
+ ESCAN_ERROR(dev->name, "Invalid chanspec! Skipping channel\n");
+ continue;
+ }
+ if (channel <= CH_MAX_2G_CHANNEL) {
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chanspec |= WL_CHANSPEC_BAND_5G;
+ }
+ chan_list[j] = channel;
+ chan_list[j] &= WL_CHANSPEC_CHAN_MASK;
+ chan_list[j] |= chanspec;
+ ESCAN_SCAN(dev->name, "Chan : %d, Channel spec: %x\n",
+ channel, chan_list[j]);
+ chan_list[j] = wl_chspec_host_to_driver(escan->ioctl_ver,
+ chan_list[j]);
+ j++;
+ }
+ cur_offset += (j * (sizeof(u16)));
+ n_channels = j;
+ } else {
+ ESCAN_SCAN(dev->name, "Scanning all channels\n");
+ }
+
+ if (scan_info->ssid.SSID_len) {
+ /* Copy ssid array if applicable */
+ ESCAN_SCAN(dev->name, "### List of SSIDs to scan ###\n");
+ cur_offset = (u32) roundup(cur_offset, sizeof(u32));
+ if (params_v2)
+ ptr = (char*)params_v2 + cur_offset;
+ else
+ ptr = (char*)params + cur_offset;
+
+ if (scan_info->bcast_ssid) {
+ n_ssids = 2;
+ ESCAN_SCAN(dev->name, "0: Broadcast scan\n");
+ memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
+ ssid_tmp.SSID_len = 0;
+ memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
+ } else {
+ n_ssids = 1;
+ }
+
+ memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
+ ssid_tmp.SSID_len = scan_info->ssid.SSID_len;
+ memcpy(ssid_tmp.SSID, scan_info->ssid.SSID, scan_info->ssid.SSID_len);
+ memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
+ ESCAN_SCAN(dev->name, "1: scan for %s size=%d\n",
+ ssid_tmp.SSID, ssid_tmp.SSID_len);
+ }
+ else {
+ ESCAN_SCAN(dev->name, "Broadcast scan\n");
+ }
+
+ if (n_ssids || n_channels) {
+ u32 channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+ if (params_v2) {
+ params_v2->channel_num = channel_num;
+ } else {
+ params->channel_num = channel_num;
+ }
+ }
+
+ return err;
+}
+
+static int
+wl_escan_reset(struct wl_escan_info *escan)
+{
+ if (timer_pending(&escan->scan_timeout))
+ del_timer_sync(&escan->scan_timeout);
+ escan->escan_state = ESCAN_STATE_IDLE;
+
+ return 0;
+}
+
+static void
+wl_escan_timeout(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct wl_escan_info *escan = (struct wl_escan_info *)data;
+ wl_scan_results_t *bss_list;
+ struct wl_bss_info *bi = NULL;
+ s32 i;
+ u32 channel;
+
+ if (!escan->dev) {
+ ESCAN_ERROR("wlan", "No dev present\n");
+ return;
+ }
+
+ bss_list = wl_escan_get_buf(escan);
+ if (!bss_list) {
+ ESCAN_ERROR(escan->dev->name,
+ "bss_list is null. Didn't receive any partial scan results\n");
+ } else {
+ ESCAN_ERROR(escan->dev->name, "scanned AP count (%d)\n", bss_list->count);
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver,
+ bi->chanspec));
+ ESCAN_ERROR(escan->dev->name, "SSID :%s Channel :%d\n", bi->SSID, channel);
+ }
+ }
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ ESCAN_ERROR(escan->dev->name, "timer expired\n");
+
+ msg.ifidx = dhd_net2idx(escan->pub->info, escan->dev);
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+ msg.reason = 0xFFFFFFFF;
+ wl_ext_event_send(escan->pub->event_params, &msg, NULL);
+}
+
+int
+wl_escan_set_scan(struct net_device *dev, wl_scan_info_t *scan_info)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ s32 err = BCME_OK;
+ wl_escan_params_t *eparams = NULL;
+ wl_escan_params_v2_t *eparams_v2 = NULL;
+ u8 *scan_params = NULL;
+ s32 params_size;
+ wl_escan_params_t *params = NULL;
+ u32 n_channels = 0;
+ wl_uint32_list_t *list;
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+
+ mutex_lock(&escan->usr_sync);
+ if (escan->escan_state == ESCAN_STATE_DOWN) {
+ ESCAN_ERROR(dev->name, "STATE is down\n");
+ err = -EINVAL;
+ goto exit2;
+ }
+
+#if defined(WL_EXT_IAPSTA) && defined(WL_CFG80211)
+ err = wl_ext_in4way_sync(dev, STA_NO_SCAN_IN4WAY, WL_EXT_STATUS_SCAN, NULL);
+ if (err) {
+ ESCAN_SCAN(dev->name, "scan busy %d\n", err);
+ goto exit2;
+ }
+#endif
+
+ if (wl_ext_check_scan(dev, dhdp)) {
+ err = -EBUSY;
+ goto exit2;
+ }
+
+ ESCAN_TRACE(dev->name, "Enter \n");
+
+ if (escan->scan_params_v2) {
+ params_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params));
+ } else {
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params));
+ }
+
+ /* if scan request is not empty parse scan request paramters */
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+
+ if (scan_info->channels.count) {
+ memcpy(list, &scan_info->channels, sizeof(wl_channel_list_t));
+ } else {
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
+ sizeof(valid_chan_list), false);
+ if (err != 0) {
+ ESCAN_ERROR(dev->name, "get channels failed with %d\n", err);
+ goto exit;
+ }
+ }
+
+ n_channels = dtoh32(list->count);
+ /* Allocate space for populating ssids in wl_escan_params_t struct */
+ if (dtoh32(list->count) % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+ if (scan_info->ssid.SSID_len) {
+ params_size += sizeof(struct wlc_ssid) * 2;
+ }
+
+ params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ ESCAN_ERROR(dev->name, "kzalloc failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (escan->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t *)params;
+ scan_params = (u8 *)&eparams_v2->params;
+ eparams_v2->version = htod32(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(WL_SCAN_ACTION_START);
+ } else {
+ eparams = (wl_escan_params_t *)params;
+ scan_params = (u8 *)&eparams->params;
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(WL_SCAN_ACTION_START);
+ }
+ wl_escan_set_sync_id(params->sync_id);
+
+ wl_escan_prep(dev, escan, list, scan_params, scan_info);
+
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ ESCAN_ERROR(dev->name, "ioctl buffer length not sufficient\n");
+ kfree(params);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ WL_MSG(dev->name, "LEGACY_SCAN\n");
+ err = wldev_iovar_setbuf(dev, "escan", params, params_size,
+ escan->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err)) {
+ ESCAN_ERROR(dev->name, "escan error (%d)\n", err);
+ } else {
+ escan->dev = dev;
+ }
+ kfree(params);
+exit:
+ if (unlikely(err)) {
+ wl_escan_reset(escan);
+ }
+exit2:
+ mutex_unlock(&escan->usr_sync);
+ return err;
+}
+
+#if defined(WL_WIRELESS_EXT)
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+static int
+wl_escan_merge_scan_results(struct net_device *dev, struct wl_escan_info *escan,
+ struct iw_request_info *info, char *extra, wl_bss_info_t *bi, int *len, int max_size)
+{
+ s32 err = BCME_OK;
+ struct iw_event iwe;
+ int j;
+ char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+ int16 rssi;
+ int channel;
+ chanspec_t chanspec;
+
+ /* overflow check cover fields before wpa IEs */
+ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+ IW_EV_QUAL_LEN >= end) {
+ err = -E2BIG;
+ goto exit;
+ }
+
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(&escan->g_rssi_cache_ctrl, &bi->BSSID);
+ if (rssi == RSSI_MINVAL)
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#else
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#endif
+ chanspec = wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec);
+ channel = wf_chspec_ctlchan(chanspec);
+ ESCAN_SCAN(dev->name, "BSSID %pM, channel %3d(%3d %sMHz), rssi %3d, SSID \"%s\"\n",
+ &bi->BSSID, channel, CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS20(chanspec)?"20":
+ CHSPEC_IS40(chanspec)?"40":
+ CHSPEC_IS80(chanspec)?"80":"160",
+ rssi, bi->SSID);
+
+ /* First entry must be the BSSID */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ /* SSID */
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+ /* Mode */
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* Channel */
+ iwe.cmd = SIOCGIWFREQ;
+#if 1
+ iwe.u.freq.m = wf_channel2mhz(channel, channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+#else
+ iwe.u.freq.m = wf_channel2mhz(bi->n_cap ?
+ bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+#endif
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+ /* Channel quality */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(rssi);
+ iwe.u.qual.level = 0x100 + rssi;
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+ /* Encryption */
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+ /* Rates */
+ if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) {
+ err = -E2BIG;
+ goto exit;
+ }
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ *len = event - extra;
+ if (*len < 0)
+ ESCAN_ERROR(dev->name, "==> Wrong size\n");
+
+exit:
+ return err;
+}
+
+int
+wl_escan_merge_scan_list(struct net_device *dev, u8 *cur_bssid,
+ struct iw_request_info *info, struct iw_point *dwrq, char *extra,
+ int *len_ret, int *bss_cnt)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ s32 err = BCME_OK;
+ int i = 0, cnt = 0;
+ int len_prep = 0;
+ wl_bss_info_t *bi = NULL;
+ wl_scan_results_t *bss_list;
+ __u16 buflen_from_user = dwrq->length;
+
+ bss_list = escan->bss_list;
+ bi = next_bss(bss_list, bi);
+ for_each_bss_wext(bss_list, bi, i)
+ {
+ if (!memcmp(&bi->BSSID, cur_bssid, ETHER_ADDR_LEN)) {
+ ESCAN_SCAN(dev->name, "skip connected AP %pM\n", cur_bssid);
+ continue;
+ }
+ len_prep = 0;
+ err = wl_escan_merge_scan_results(dev, escan, info, extra+*len_ret, bi,
+ &len_prep, buflen_from_user-*len_ret);
+ *len_ret += len_prep;
+ if (err)
+ goto exit;
+ cnt++;
+ }
+ *bss_cnt = cnt;
+
+exit:
+ return err;
+}
+
+#if defined(BSSCACHE)
+int
+wl_escan_merge_cache_list(struct net_device *dev, u8 *cur_bssid,
+ struct iw_request_info *info, struct iw_point *dwrq, char *extra,
+ int *len_ret, int *bss_cnt)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ s32 err = BCME_OK;
+ int i = 0, cnt = 0;
+ int len_prep = 0;
+ wl_bss_info_t *bi = NULL;
+ wl_scan_results_t *bss_list;
+ __u16 buflen_from_user = dwrq->length;
+ wl_bss_cache_t *node;
+
+ bss_list = &escan->g_bss_cache_ctrl.m_cache_head->results;
+ node = escan->g_bss_cache_ctrl.m_cache_head;
+ for (i=0; node && i<IW_MAX_AP; i++)
+ {
+ bi = node->results.bss_info;
+ if (node->dirty > 1) {
+ if (!memcmp(&bi->BSSID, cur_bssid, ETHER_ADDR_LEN)) {
+ ESCAN_SCAN(dev->name, "skip connected AP %pM\n", cur_bssid);
+ node = node->next;
+ continue;
+ }
+ len_prep = 0;
+ err = wl_escan_merge_scan_results(dev, escan, info, extra+*len_ret, bi,
+ &len_prep, buflen_from_user-*len_ret);
+ *len_ret += len_prep;
+ if (err)
+ goto exit;
+ cnt++;
+ }
+ node = node->next;
+ }
+ *bss_cnt = cnt;
+
+exit:
+ return err;
+}
+#endif
+
+int
+wl_escan_get_scan(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq, char *extra)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ s32 err = BCME_OK;
+ int scan_cnt = 0;
+#if defined(BSSCACHE)
+ int cache_cnt = 0;
+#endif
+ int len_prep = 0, len_ret = 0;
+ wl_bss_info_t *bi = NULL;
+ __u16 buflen_from_user = dwrq->length;
+ char *buf = NULL;
+ struct ether_addr cur_bssid;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ if (!extra) {
+ ESCAN_TRACE(dev->name, "extra is null\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&escan->usr_sync);
+
+ /* Check for scan in progress */
+ if (escan->escan_state == ESCAN_STATE_SCANING) {
+ ESCAN_DBG(dev->name, "SIOCGIWSCAN GET still scanning\n");
+ err = -EAGAIN;
+ goto exit;
+ }
+ if (!escan->bss_list) {
+ ESCAN_ERROR(dev->name, "scan not ready\n");
+ err = -EAGAIN;
+ goto exit;
+ }
+ if (dev != escan->dev) {
+ ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ESCAN_SCAN(dev->name, "SIOCGIWSCAN, len=%d\n", dwrq->length);
+
+ wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ err = wldev_ioctl(dev, WLC_GET_BSSID, &cur_bssid, sizeof(cur_bssid), false);
+ if (err != BCME_NOTASSOCIATED &&
+ memcmp(&ether_null, &cur_bssid, ETHER_ADDR_LEN) &&
+ memcmp(ioctl_buf, &cur_bssid, ETHER_ADDR_LEN)) {
+ // merge current connected bss
+ buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_ATOMIC);
+ if (!buf) {
+ ESCAN_ERROR(dev->name, "buffer alloc failed.\n");
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ *(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+ err = wldev_ioctl(dev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX, false);
+ if (unlikely(err)) {
+ ESCAN_ERROR(dev->name, "Could not get bss info %d\n", err);
+ goto exit;
+ }
+ bi = (struct wl_bss_info *)(buf + 4);
+ len_prep = 0;
+ err = wl_escan_merge_scan_results(dev, escan, info, extra+len_ret, bi,
+ &len_prep, buflen_from_user-len_ret);
+ len_ret += len_prep;
+ if (err)
+ goto exit;
+ bi = NULL;
+ }
+
+ err = wl_escan_merge_scan_list(dev, (u8 *)&cur_bssid, info, dwrq, extra,
+ &len_ret, &scan_cnt);
+ if (err)
+ goto exit;
+#if defined(BSSCACHE)
+ err = wl_escan_merge_cache_list(dev, (u8 *)&cur_bssid, info, dwrq, extra,
+ &len_ret, &cache_cnt);
+ if (err)
+ goto exit;
+#endif
+
+ if ((len_ret + WE_ADD_EVENT_FIX) < dwrq->length)
+ dwrq->length = len_ret;
+
+ dwrq->flags = 0; /* todo */
+ ESCAN_SCAN(dev->name, "scanned AP count (%d)\n", scan_cnt);
+#if defined(BSSCACHE)
+ ESCAN_SCAN(dev->name, "cached AP count (%d)\n", cache_cnt);
+#endif
+exit:
+ kfree(buf);
+ dwrq->length = len_ret;
+ mutex_unlock(&escan->usr_sync);
+ return err;
+}
+#endif /* WL_WIRELESS_EXT */
+
+#ifdef WLMESH
+bool
+wl_escan_meshid_ie(u8 *parse, u32 len, wlc_ssid_t *mesh_id)
+{
+ bcm_tlv_t *ie;
+
+ if((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_MESH_ID)) != NULL) {
+ mesh_id->SSID_len = ie->len;
+ if (ie->len) {
+ strncpy(mesh_id->SSID, ie->data, ie->len);
+ }
+ return TRUE;
+ }
+ return FALSE;
+}
+
+bool
+wl_escan_rsn_ie(u8 *parse, u32 len)
+{
+ if (bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_RSN_ID)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+bool
+wl_escan_mesh_info_ie(struct net_device *dev, u8 *parse, u32 len,
+ struct wl_mesh_params *mesh_info)
+{
+ bcm_tlv_t *ie;
+ uchar mesh_oui[]={0x00, 0x22, 0xf4};
+ int totl_len;
+ uint8 *pie;
+ uint max_len;
+ bool found = FALSE;
+
+ memset(mesh_info, 0, sizeof(struct wl_mesh_params));
+ if((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID)) != NULL) {
+ totl_len = ie->len;
+ if (!memcmp(ie->data, &mesh_oui, sizeof(mesh_oui))) {
+ pie = ie->data + sizeof(mesh_oui);
+ ie = (bcm_tlv_t *)pie;
+ totl_len -= sizeof(mesh_oui);
+ while (totl_len > 2 && ie->len) {
+ if (ie->id == MESH_INFO_MASTER_BSSID && ie->len == ETHER_ADDR_LEN) {
+ memcpy(&mesh_info->master_bssid, ie->data, ETHER_ADDR_LEN);
+ } else if (ie->id == MESH_INFO_MASTER_CHANNEL) {
+ mesh_info->master_channel = ie->data[0];
+ found = TRUE;
+ } else if (ie->id == MESH_INFO_HOP_CNT) {
+ mesh_info->hop_cnt = ie->data[0];
+ } else if (ie->id == MESH_INFO_PEER_BSSID) {
+ max_len = min(MAX_HOP_LIST*ETHER_ADDR_LEN, (int)ie->len);
+ memcpy(mesh_info->peer_bssid, ie->data, max_len);
+ }
+ totl_len -= (ie->len + 2);
+ pie = ie->data + ie->len;
+ ie = (bcm_tlv_t *)pie;
+ }
+ }
+ }
+
+ return found;
+}
+
+bool
+wl_escan_mesh_info(struct net_device *dev, struct wl_escan_info *escan,
+ struct ether_addr *peer_bssid, struct wl_mesh_params *mesh_info)
+{
+ int i = 0;
+ wl_bss_info_t *bi = NULL;
+ wl_scan_results_t *bss_list;
+ int16 bi_rssi, bi_chan;
+ wlc_ssid_t bi_meshid;
+ bool is_mesh_peer = FALSE, found = FALSE;
+ struct wl_mesh_params peer_mesh_info;
+
+ mutex_lock(&escan->usr_sync);
+
+ /* Check for scan in progress */
+ if (escan->escan_state == ESCAN_STATE_SCANING) {
+ ESCAN_ERROR(dev->name, "SIOCGIWSCAN GET still scanning\n");
+ goto exit;
+ }
+ if (!escan->bss_list) {
+ ESCAN_ERROR(dev->name, "scan not ready\n");
+ goto exit;
+ }
+ if (dev != escan->dev) {
+ ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
+ goto exit;
+ }
+
+ bss_list = escan->bss_list;
+ bi = next_bss(bss_list, bi);
+ ESCAN_SCAN(dev->name, "scanned AP/Mesh count (%d)\n", bss_list->count);
+ for_each_bss(bss_list, bi, i)
+ {
+ memset(&bi_meshid, 0, sizeof(bi_meshid));
+ is_mesh_peer = FALSE;
+ bi_chan = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ bi_rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+ is_mesh_peer = wl_escan_meshid_ie(((u8*)bi)+bi->ie_offset,
+ bi->ie_length, &bi_meshid);
+ if (!(bi->capability & (DOT11_CAP_ESS|DOT11_CAP_IBSS)) && is_mesh_peer) {
+ bool bi_sae = FALSE, bss_found = FALSE, prefer = FALSE;
+ if (!memcmp(peer_bssid, &bi->BSSID, ETHER_ADDR_LEN)) {
+ bi_sae = wl_escan_rsn_ie(((u8*)bi)+bi->ie_offset, bi->ie_length);
+ bss_found = wl_escan_mesh_info_ie(dev, ((u8*)bi)+bi->ie_offset,
+ bi->ie_length, &peer_mesh_info);
+ if (bss_found) {
+ memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
+ ETHER_ADDR_LEN);
+ mesh_info->master_channel = peer_mesh_info.master_channel;
+ mesh_info->hop_cnt = peer_mesh_info.hop_cnt;
+ memcpy(mesh_info->peer_bssid, peer_mesh_info.peer_bssid,
+ sizeof(peer_mesh_info.peer_bssid));
+ prefer = TRUE;
+ found = TRUE;
+ }
+ }
+ ESCAN_SCAN(dev->name,
+ "%s[Mesh] BSSID=%pM, channel=%d, RSSI=%d, sec=%s, "
+ "mbssid=%pM, mchannel=%d, hop=%d, pbssid=%pM, MeshID=\"%s\"\n",
+ prefer?"*":" ", &bi->BSSID, bi_chan, bi_rssi, bi_sae?"SAE":"OPEN",
+ &peer_mesh_info.master_bssid, peer_mesh_info.master_channel,
+ peer_mesh_info.hop_cnt, &peer_mesh_info.peer_bssid, bi_meshid.SSID);
+ }
+ }
+
+exit:
+ mutex_unlock(&escan->usr_sync);
+ return found;
+}
+
+bool
+wl_escan_mesh_peer(struct net_device *dev, struct wl_escan_info *escan,
+ wlc_ssid_t *cur_ssid, uint16 cur_chan, bool sae,
+ struct wl_mesh_params *mesh_info)
+{
+ int i = 0;
+ wl_bss_info_t *bi = NULL;
+ wl_scan_results_t *bss_list;
+ int16 bi_rssi, bi_chan, max_rssi = -100;
+ uint min_hop_cnt = 255;
+ wlc_ssid_t bi_meshid;
+ bool is_mesh_peer = FALSE, chan_matched = FALSE, found = FALSE;
+ struct wl_mesh_params peer_mesh_info;
+
+ mutex_lock(&escan->usr_sync);
+
+ /* Check for scan in progress */
+ if (escan->escan_state == ESCAN_STATE_SCANING) {
+ ESCAN_ERROR(dev->name, "SIOCGIWSCAN GET still scanning\n");
+ goto exit;
+ }
+ if (!escan->bss_list) {
+ ESCAN_ERROR(dev->name, "scan not ready\n");
+ goto exit;
+ }
+ if (dev != escan->dev) {
+ ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
+ goto exit;
+ }
+
+ bss_list = escan->bss_list;
+ bi = next_bss(bss_list, bi);
+ ESCAN_SCAN(dev->name, "scanned AP/Mesh count (%d)\n", bss_list->count);
+ for_each_bss(bss_list, bi, i)
+ {
+ memset(&bi_meshid, 0, sizeof(bi_meshid));
+ is_mesh_peer = FALSE;
+ bi_chan = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ bi_rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+ is_mesh_peer = wl_escan_meshid_ie(((u8*)bi)+bi->ie_offset,
+ bi->ie_length, &bi_meshid);
+ if (!(bi->capability & (DOT11_CAP_ESS|DOT11_CAP_IBSS)) && is_mesh_peer) {
+ bool meshid_matched = FALSE, sec_matched = FALSE, bi_sae = FALSE,
+ bss_found = FALSE, prefer = FALSE;
+
+ if (cur_ssid->SSID_len && cur_ssid->SSID_len == bi_meshid.SSID_len &&
+ !memcmp(cur_ssid->SSID, bi_meshid.SSID, bi_meshid.SSID_len))
+ meshid_matched = TRUE;
+
+ bi_sae = wl_escan_rsn_ie(((u8*)bi)+bi->ie_offset, bi->ie_length);
+ if (bi_sae == sae)
+ sec_matched = TRUE;
+
+ bss_found = wl_escan_mesh_info_ie(dev, ((u8*)bi)+bi->ie_offset, bi->ie_length,
+ &peer_mesh_info);
+ if (meshid_matched && sec_matched && bss_found &&
+ (cur_chan == bi_chan)) {
+ if (peer_mesh_info.hop_cnt < min_hop_cnt) {
+ memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
+ ETHER_ADDR_LEN);
+ mesh_info->master_channel = peer_mesh_info.master_channel;
+ mesh_info->hop_cnt = peer_mesh_info.hop_cnt;
+ memcpy(mesh_info->peer_bssid, peer_mesh_info.peer_bssid,
+ sizeof(peer_mesh_info.peer_bssid));
+ min_hop_cnt = peer_mesh_info.hop_cnt;
+ prefer = TRUE;
+ chan_matched = TRUE;
+ found = TRUE;
+ }
+ }
+ else if (meshid_matched && sec_matched && bss_found &&
+ (cur_chan != bi_chan) && !chan_matched) {
+ if (bi_rssi > max_rssi) {
+ memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
+ ETHER_ADDR_LEN);
+ mesh_info->master_channel = peer_mesh_info.master_channel;
+ mesh_info->hop_cnt = peer_mesh_info.hop_cnt;
+ memcpy(mesh_info->peer_bssid, peer_mesh_info.peer_bssid,
+ sizeof(peer_mesh_info.peer_bssid));
+ max_rssi = bi_rssi;
+ prefer = TRUE;
+ found = TRUE;
+ }
+ }
+
+ ESCAN_SCAN(dev->name,
+ "%s[Mesh] BSSID=%pM, channel=%d, RSSI=%d, sec=%s, "
+ "mbssid=%pM, mchannel=%d, hop=%d, pbssid=%pM, MeshID=\"%s\"\n",
+ prefer?"*":" ", &bi->BSSID, bi_chan, bi_rssi, bi_sae?"SAE":"OPEN",
+ &peer_mesh_info.master_bssid, peer_mesh_info.master_channel,
+ peer_mesh_info.hop_cnt, &peer_mesh_info.peer_bssid, bi_meshid.SSID);
+ } else {
+ ESCAN_SCAN(dev->name,
+ "[AP] BSSID=%pM, channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ &bi->BSSID, bi_chan, bi_rssi, bi->SSID);
+ }
+ }
+
+exit:
+ mutex_unlock(&escan->usr_sync);
+ return found;
+}
+#endif /* WLMESH */
+
+static void
+wl_escan_deinit(struct net_device *dev, struct wl_escan_info *escan)
+{
+ ESCAN_TRACE(dev->name, "Enter\n");
+
+ del_timer_sync(&escan->scan_timeout);
+ escan->escan_state = ESCAN_STATE_DOWN;
+
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&escan->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&escan->g_bss_cache_ctrl);
+#endif
+}
+
+static s32
+wl_escan_init(struct net_device *dev, struct wl_escan_info *escan)
+{
+ ESCAN_TRACE(dev->name, "Enter\n");
+
+ /* Init scan_timeout timer */
+ init_timer_compat(&escan->scan_timeout, wl_escan_timeout, escan);
+ escan->escan_state = ESCAN_STATE_IDLE;
+
+ return 0;
+}
+
+void
+wl_escan_down(struct net_device *dev)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+
+ ESCAN_TRACE(dev->name, "Enter\n");
+ if (!escan) {
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return;
+ }
+
+ escan->scan_params_v2 = false;
+
+ wl_escan_deinit(dev, escan);
+}
+
+int
+wl_escan_up(struct net_device *dev)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s32 val = 0;
+ int ret = -1;
+
+ ESCAN_TRACE(dev->name, "Enter\n");
+ if (!escan) {
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return ret;
+ }
+
+ ret = wl_escan_init(dev, escan);
+ if (ret) {
+ ESCAN_ERROR(dev->name, "wl_escan_init ret %d\n", ret);
+ return ret;
+ }
+
+ if (!escan->ioctl_ver) {
+ val = 1;
+ if ((ret = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) {
+ ESCAN_ERROR(dev->name, "WLC_GET_VERSION failed, ret=%d\n", ret);
+ return ret;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ ESCAN_ERROR(dev->name,
+ "Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION);
+ return ret;
+ }
+ escan->ioctl_ver = val;
+ }
+
+ if ((ret = wldev_iovar_getbuf(dev, "scan_ver", NULL, 0,
+ ioctl_buf, sizeof(ioctl_buf), NULL)) == BCME_OK) {
+ ESCAN_TRACE(dev->name, "scan_params v2\n");
+ /* use scan_params ver2 */
+ escan->scan_params_v2 = true;
+ } else {
+ if (ret == BCME_UNSUPPORTED) {
+ ESCAN_TRACE(dev->name, "scan_ver, UNSUPPORTED\n");
+ ret = BCME_OK;
+ } else {
+ ESCAN_ERROR(dev->name, "get scan_ver err(%d)\n", ret);
+ }
+ }
+
+ return 0;
+}
+
+int
+wl_escan_event_dettach(struct net_device *dev, int ifidx)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ int ret = -1;
+
+ if (!escan) {
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return ret;
+ }
+
+ if (ifidx < DHD_MAX_IFS) {
+ wl_ext_event_deregister(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_ext_handler);
+ }
+
+ return 0;
+}
+
+int
+wl_escan_event_attach(struct net_device *dev, int ifidx)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+ int ret = -1;
+
+ if (!escan) {
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return ret;
+ }
+
+ if (ifidx < DHD_MAX_IFS) {
+ ret = wl_ext_event_register(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_ext_handler,
+ escan, PRIO_EVENT_ESCAN);
+ if (ret) {
+ ESCAN_ERROR(dev->name, "wl_ext_event_register err %d\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+void
+wl_escan_detach(struct net_device *dev)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = dhdp->escan;
+
+ ESCAN_TRACE(dev->name, "Enter\n");
+
+ if (!escan)
+ return;
+
+ wl_escan_deinit(dev, escan);
+ if (escan->escan_ioctl_buf) {
+ kfree(escan->escan_ioctl_buf);
+ escan->escan_ioctl_buf = NULL;
+ }
+ wl_ext_event_deregister(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_ext_handler);
+
+ DHD_OS_PREFREE(dhdp, escan, sizeof(struct wl_escan_info));
+ dhdp->escan = NULL;
+}
+
+int
+wl_escan_attach(struct net_device *dev)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ struct wl_escan_info *escan = NULL;
+ int ret = 0;
+
+ ESCAN_TRACE(dev->name, "Enter\n");
+
+ escan = (struct wl_escan_info *)DHD_OS_PREALLOC(dhdp,
+ DHD_PREALLOC_WL_ESCAN, sizeof(struct wl_escan_info));
+ if (!escan)
+ return -ENOMEM;
+ memset(escan, 0, sizeof(struct wl_escan_info));
+
+ dhdp->escan = escan;
+
+ /* we only care about main interface so save a global here */
+ escan->pub = dhdp;
+ escan->escan_state = ESCAN_STATE_DOWN;
+
+ escan->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!escan->escan_ioctl_buf)) {
+ ESCAN_ERROR(dev->name, "Ioctl buf alloc failed\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = wl_escan_init(dev, escan);
+ if (ret) {
+ ESCAN_ERROR(dev->name, "wl_escan_init err %d\n", ret);
+ goto exit;
+ }
+ mutex_init(&escan->usr_sync);
+
+ return 0;
+
+exit:
+ wl_escan_detach(dev);
+ return ret;
+}
+
+#endif /* WL_ESCAN */
diff --git a/bcmdhd.101.10.361.x/wl_escan.h b/bcmdhd.101.10.361.x/wl_escan.h
new file mode 100755
index 0000000..fcd19d9
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_escan.h
@@ -0,0 +1,89 @@
+
+#ifndef _wl_escan_
+#define _wl_escan_
+#include <linuxver.h>
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* WL_WIRELESS_EXT */
+#include <wl_iapsta.h>
+#include <wl_android_ext.h>
+#include <dhd_config.h>
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */
+
+/* donlge escan state */
+enum escan_state {
+ ESCAN_STATE_DOWN,
+ ESCAN_STATE_IDLE,
+ ESCAN_STATE_SCANING
+};
+
+typedef struct wl_scan_info {
+ bool bcast_ssid;
+ wlc_ssid_t ssid;
+ wl_channel_list_t channels;
+ int scan_time;
+} wl_scan_info_t;
+
+typedef struct wl_escan_info {
+ struct net_device *dev;
+ bool scan_params_v2;
+ dhd_pub_t *pub;
+ timer_list_compat_t scan_timeout; /* Timer for catch scan event timeout */
+ int escan_state;
+ int ioctl_ver;
+ u8 escan_buf[ESCAN_BUF_SIZE];
+ wl_scan_results_t *bss_list;
+ u8 *escan_ioctl_buf;
+ struct mutex usr_sync; /* maily for up/down synchronization */
+ int autochannel;
+ int best_2g_ch;
+ int best_5g_ch;
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
+ wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
+#endif
+#if defined(BSSCACHE)
+ wl_bss_cache_ctrl_t g_bss_cache_ctrl;
+#endif
+} wl_escan_info_t;
+
+#if defined(WLMESH)
+enum mesh_info_id {
+ MESH_INFO_MASTER_BSSID = 1,
+ MESH_INFO_MASTER_CHANNEL,
+ MESH_INFO_HOP_CNT,
+ MESH_INFO_PEER_BSSID
+};
+
+#define MAX_HOP_LIST 10
+typedef struct wl_mesh_params {
+ struct ether_addr master_bssid;
+ uint16 master_channel;
+ uint hop_cnt;
+ struct ether_addr peer_bssid[MAX_HOP_LIST];
+ uint16 scan_channel;
+} wl_mesh_params_t;
+bool wl_escan_mesh_info(struct net_device *dev,
+ struct wl_escan_info *escan, struct ether_addr *peer_bssid,
+ struct wl_mesh_params *mesh_info);
+bool wl_escan_mesh_peer(struct net_device *dev,
+ struct wl_escan_info *escan, wlc_ssid_t *cur_ssid, uint16 cur_chan, bool sae,
+ struct wl_mesh_params *mesh_info);
+#endif /* WLMESH */
+
+int wl_escan_set_scan(struct net_device *dev, wl_scan_info_t *scan_info);
+#if defined(WL_WIRELESS_EXT)
+int wl_escan_get_scan(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq, char *extra);
+#endif
+int wl_escan_attach(struct net_device *dev);
+void wl_escan_detach(struct net_device *dev);
+int wl_escan_event_attach(struct net_device *dev, int ifidx);
+int wl_escan_event_dettach(struct net_device *dev, int ifidx);
+int wl_escan_up(struct net_device *dev);
+void wl_escan_down(struct net_device *dev);
+
+#endif /* _wl_escan_ */
diff --git a/bcmdhd.101.10.361.x/wl_event.c b/bcmdhd.101.10.361.x/wl_event.c
new file mode 100755
index 0000000..a111b3f
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_event.c
@@ -0,0 +1,556 @@
+
+#include <wl_android.h>
+#ifdef WL_EVENT
+#include <bcmendian.h>
+#include <dhd_config.h>
+
+#define EVENT_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printf("[%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define EVENT_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printf("[%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define EVENT_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printf("[%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ wl_event_msg_t emsg;
+ s8 edata[1];
+};
+
+typedef void(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
+ const wl_event_msg_t *e, void *data);
+
+typedef struct event_handler_list {
+ struct event_handler_list *next;
+ struct net_device *dev;
+ uint32 etype;
+ EXT_EVENT_HANDLER cb_func;
+ void *cb_argu;
+ wl_event_prio_t prio;
+} event_handler_list_t;
+
+typedef struct event_handler_head {
+ event_handler_list_t *evt_head;
+} event_handler_head_t;
+
+typedef struct wl_event_params {
+ dhd_pub_t *pub;
+ struct net_device *dev[DHD_MAX_IFS];
+ struct event_handler_head evt_head;
+ struct list_head eq_list; /* used for event queue */
+ spinlock_t eq_lock; /* for event queue synchronization */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ tsk_ctl_t thr_event_ctl;
+#else
+ struct workqueue_struct *event_workq; /* workqueue for event */
+ struct work_struct event_work; /* work item for event */
+#endif
+ struct mutex event_sync;
+} wl_event_params_t;
+
+static unsigned long
+wl_ext_event_lock_eq(struct wl_event_params *event_params)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_params->eq_lock, flags);
+ return flags;
+}
+
+static void
+wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
+{
+ spin_unlock_irqrestore(&event_params->eq_lock, flags);
+}
+
+static void
+wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
+{
+ spin_lock_init(&event_params->eq_lock);
+}
+
+static void
+wl_ext_event_init_eq(struct wl_event_params *event_params)
+{
+ wl_ext_event_init_eq_lock(event_params);
+ INIT_LIST_HEAD(&event_params->eq_list);
+}
+
+static void
+wl_ext_event_flush_eq(struct wl_event_params *event_params)
+{
+ struct wl_event_q *e;
+ unsigned long flags;
+
+ flags = wl_ext_event_lock_eq(event_params);
+ while (!list_empty_careful(&event_params->eq_list)) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ kfree(e);
+ }
+ wl_ext_event_unlock_eq(event_params, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *
+wl_ext_event_deq_event(struct wl_event_params *event_params)
+{
+ struct wl_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_ext_event_lock_eq(event_params);
+ if (likely(!list_empty(&event_params->eq_list))) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_ext_event_unlock_eq(event_params, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
+ const wl_event_msg_t *msg, void *data)
+{
+ struct wl_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+ gfp_t aflags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = sizeof(struct wl_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
+ if (unlikely(!e)) {
+ EVENT_ERROR("wlan", "event alloc failed\n");
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ flags = wl_ext_event_lock_eq(event_params);
+ list_add_tail(&e->eq_list, &event_params->eq_list);
+ wl_ext_event_unlock_eq(event_params, flags);
+
+ return err;
+}
+
+static void
+wl_ext_event_put_event(struct wl_event_q *e)
+{
+ kfree(e);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+static int wl_ext_event_handler(void *data);
+#define WL_EXT_EVENT_HANDLER() static int wl_ext_event_handler(void *data)
+#else
+static void wl_ext_event_handler(struct work_struct *data);
+#define WL_EXT_EVENT_HANDLER() static void wl_ext_event_handler(struct work_struct *data)
+#endif
+
+WL_EXT_EVENT_HANDLER()
+{
+ struct wl_event_params *event_params = NULL;
+ struct wl_event_q *e;
+ struct net_device *dev = NULL;
+ struct event_handler_list *evt_node;
+ dhd_pub_t *dhd;
+ unsigned long flags = 0;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ event_params = (struct wl_event_params *)tsk->parent;
+#else
+ BCM_SET_CONTAINER_OF(event_params, data, struct wl_event_params, event_work);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ while (1) {
+ if (down_interruptible(&tsk->sema) == 0) {
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+#endif
+ DHD_EVENT_WAKE_LOCK(event_params->pub);
+ while ((e = wl_ext_event_deq_event(event_params))) {
+ if (e->emsg.ifidx >= DHD_MAX_IFS) {
+ EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
+ goto fail;
+ }
+ dev = event_params->dev[e->emsg.ifidx];
+ if (!dev) {
+ EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
+ goto fail;
+ }
+ dhd = dhd_get_pub(dev);
+ if (e->etype > WLC_E_LAST) {
+ EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
+ goto fail;
+ }
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
+ EVENT_ERROR(dev->name, "BUS is DOWN.\n");
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ goto fail;
+ }
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
+ mutex_lock(&event_params->event_sync);
+ evt_node = event_params->evt_head.evt_head;
+ for (;evt_node;) {
+ if (evt_node->dev == dev &&
+ (evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
+ evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
+ evt_node = evt_node->next;
+ }
+ mutex_unlock(&event_params->event_sync);
+fail:
+ wl_ext_event_put_event(e);
+ }
+ DHD_EVENT_WAKE_UNLOCK(event_params->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ } else {
+ break;
+ }
+ }
+ complete_and_exit(&tsk->completed, 0);
+#endif
+}
+
+void
+wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
+{
+ struct wl_event_params *event_params = params;
+ u32 event_type = ntoh32(e->event_type);
+
+ if (event_params == NULL) {
+ EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
+ event_type, bcmevent_get_name(event_type));
+ return;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (event_params->event_workq == NULL) {
+ EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
+ event_type, bcmevent_get_name(event_type));
+ return;
+ }
+#endif
+
+ if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ if (event_params->thr_event_ctl.thr_pid >= 0) {
+ up(&event_params->thr_event_ctl.sema);
+ }
+#else
+ queue_work(event_params->event_workq, &event_params->event_work);
+#endif
+ }
+}
+
+static s32
+wl_ext_event_create_handler(struct wl_event_params *event_params)
+{
+ int ret = 0;
+ EVENT_TRACE("wlan", "Enter\n");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ PROC_START(wl_ext_event_handler, event_params, &event_params->thr_event_ctl, 0, "ext_eventd");
+ if (event_params->thr_event_ctl.thr_pid < 0) {
+ ret = -ENOMEM;
+ }
+#else
+ /* Allocate workqueue for event */
+ if (!event_params->event_workq) {
+ event_params->event_workq = alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ }
+
+ if (!event_params->event_workq) {
+ EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
+ ret = -ENOMEM;
+ } else {
+ INIT_WORK(&event_params->event_work, wl_ext_event_handler);
+ }
+#endif
+
+ return ret;
+}
+
+static void
+wl_ext_event_free(struct wl_event_params *event_params)
+{
+ struct event_handler_list *node, *cur, **evt_head;
+
+ evt_head = &event_params->evt_head.evt_head;
+ node = *evt_head;
+
+ for (;node;) {
+ EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
+ cur = node;
+ node = cur->next;
+ kfree(cur);
+ }
+ *evt_head = NULL;
+}
+
+static void
+wl_ext_event_destroy_handler(struct wl_event_params *event_params)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+ if (event_params->thr_event_ctl.thr_pid >= 0) {
+ PROC_STOP(&event_params->thr_event_ctl);
+ }
+#else
+ if (event_params && event_params->event_workq) {
+ cancel_work_sync(&event_params->event_work);
+ destroy_workqueue(event_params->event_workq);
+ event_params->event_workq = NULL;
+ }
+#endif
+}
+
+int
+wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
+ void *cb_func, void *data, wl_event_prio_t prio)
+{
+ struct wl_event_params *event_params = dhd->event_params;
+ struct event_handler_list *node, *leaf, *node_prev, **evt_head;
+ int ret = 0;
+
+ if (event_params) {
+ mutex_lock(&event_params->event_sync);
+ evt_head = &event_params->evt_head.evt_head;
+ node = *evt_head;
+ for (;node;) {
+ if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
+ EVENT_TRACE(dev->name, "skip event %d\n", event);
+ mutex_unlock(&event_params->event_sync);
+ return 0;
+ }
+ node = node->next;
+ }
+ leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
+ if (!leaf) {
+ EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
+ (int)sizeof(event_handler_list_t), event);
+ mutex_unlock(&event_params->event_sync);
+ return -ENOMEM;
+ }
+ leaf->next = NULL;
+ leaf->dev = dev;
+ leaf->etype = event;
+ leaf->cb_func = cb_func;
+ leaf->cb_argu = data;
+ leaf->prio = prio;
+ if (*evt_head == NULL) {
+ *evt_head = leaf;
+ } else {
+ node = *evt_head;
+ node_prev = NULL;
+ for (;node;) {
+ if (node->prio <= prio) {
+ leaf->next = node;
+ if (node_prev)
+ node_prev->next = leaf;
+ else
+ *evt_head = leaf;
+ break;
+ } else if (node->next == NULL) {
+ node->next = leaf;
+ break;
+ }
+ node_prev = node;
+ node = node->next;
+ }
+ }
+ EVENT_TRACE(dev->name, "event %d registered\n", event);
+ mutex_unlock(&event_params->event_sync);
+ } else {
+ EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+void
+wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
+ uint32 event, void *cb_func)
+{
+ struct wl_event_params *event_params = dhd->event_params;
+ struct event_handler_list *node, *prev, **evt_head;
+ int tmp = 0;
+
+ if (event_params) {
+ mutex_lock(&event_params->event_sync);
+ evt_head = &event_params->evt_head.evt_head;
+ node = *evt_head;
+ prev = node;
+ for (;node;) {
+ if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
+ if (node == *evt_head) {
+ tmp = 1;
+ *evt_head = node->next;
+ } else {
+ tmp = 0;
+ prev->next = node->next;
+ }
+ EVENT_TRACE(dev->name, "event %d deregistered\n", event);
+ kfree(node);
+ if (tmp == 1) {
+ node = *evt_head;
+ prev = node;
+ } else {
+ node = prev->next;
+ }
+ continue;
+ }
+ prev = node;
+ node = node->next;
+ }
+ mutex_unlock(&event_params->event_sync);
+ } else {
+ EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
+ }
+}
+
+static s32
+wl_ext_event_init_priv(struct wl_event_params *event_params)
+{
+ s32 err = 0;
+
+ mutex_init(&event_params->event_sync);
+ wl_ext_event_init_eq(event_params);
+ if (wl_ext_event_create_handler(event_params))
+ return -ENOMEM;
+
+ return err;
+}
+
+static void
+wl_ext_event_deinit_priv(struct wl_event_params *event_params)
+{
+ wl_ext_event_destroy_handler(event_params);
+ wl_ext_event_flush_eq(event_params);
+ wl_ext_event_free(event_params);
+}
+
+int
+wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_event_params *event_params = dhd->event_params;
+
+ if (event_params && ifidx < DHD_MAX_IFS) {
+ EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
+ event_params->dev[ifidx] = net;
+ }
+
+ return 0;
+}
+
+int
+wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_event_params *event_params = dhd->event_params;
+
+ if (event_params && ifidx < DHD_MAX_IFS) {
+ EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
+ event_params->dev[ifidx] = NULL;
+ }
+
+ return 0;
+}
+
+s32
+wl_ext_event_attach(struct net_device *net)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(net);
+ struct wl_event_params *event_params = NULL;
+ s32 err = 0;
+
+ event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
+ if (!event_params) {
+ EVENT_ERROR(net->name, "Failed to allocate memory (%zu)\n",
+ sizeof(wl_event_params_t));
+ return -ENOMEM;
+ }
+ dhdp->event_params = event_params;
+ memset(event_params, 0, sizeof(wl_event_params_t));
+ event_params->pub = dhdp;
+
+ err = wl_ext_event_init_priv(event_params);
+ if (err) {
+ EVENT_ERROR(net->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
+ goto ext_attach_out;
+ }
+
+ return err;
+ext_attach_out:
+ wl_ext_event_dettach(dhdp);
+ return err;
+}
+
+void
+wl_ext_event_dettach(dhd_pub_t *dhdp)
+{
+ struct wl_event_params *event_params = dhdp->event_params;
+
+ if (event_params) {
+ wl_ext_event_deinit_priv(event_params);
+ kfree(event_params);
+ dhdp->event_params = NULL;
+ }
+}
+#endif
diff --git a/bcmdhd.101.10.361.x/wl_event.h b/bcmdhd.101.10.361.x/wl_event.h
new file mode 100755
index 0000000..cae7154
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_event.h
@@ -0,0 +1,18 @@
+
+#ifndef _wl_event_
+#define _wl_event_
+typedef enum WL_EVENT_PRIO {
+ PRIO_EVENT_IAPSTA,
+ PRIO_EVENT_ESCAN,
+ PRIO_EVENT_WEXT
+}wl_event_prio_t;
+s32 wl_ext_event_attach(struct net_device *net);
+void wl_ext_event_dettach(dhd_pub_t *dhdp);
+int wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
+int wl_ext_event_dettach_netdev(struct net_device *net, int ifidx);
+int wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd,
+ uint32 event, void *cb_func, void *data, wl_event_prio_t prio);
+void wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
+ uint32 event, void *cb_func);
+void wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data);
+#endif
diff --git a/bcmdhd.101.10.361.x/wl_export.h b/bcmdhd.101.10.361.x/wl_export.h
new file mode 100755
index 0000000..1575d88
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_export.h
@@ -0,0 +1,285 @@
+/*
+ * Required functions exported by the port-specific (os-dependent) driver
+ * to common (os-independent) driver code.
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _wl_export_h_
+#define _wl_export_h_
+
+/* misc callbacks */
+struct wl_info;
+struct wl_if;
+struct wlc_if;
+struct wlc_event;
+struct wlc_info;
+struct wl_timer;
+struct wl_rxsts;
+struct wl_txsts;
+struct reorder_rxcpl_id_list;
+
+/** wl_init() is called upon fault ('big hammer') conditions and as part of a 'wlc up' */
+extern void wl_init(struct wl_info *wl);
+extern uint wl_reset(struct wl_info *wl);
+extern void wl_intrson(struct wl_info *wl);
+extern void wl_intrsoff(struct wl_info *wl, bcm_int_bitmask_t *curr_mask);
+extern void wl_intrsrestore(struct wl_info *wl, bcm_int_bitmask_t *macintmask);
+extern int wl_up(struct wl_info *wl);
+extern void wl_down(struct wl_info *wl);
+extern void wl_dump_ver(struct wl_info *wl, struct bcmstrbuf *b);
+extern void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state, int prio);
+extern void wl_set_copycount_bytes(struct wl_info *wl, uint16 copycount,
+ uint16 d11rxoffset);
+extern int wl_bus_pcie_config_access(struct wl_info *wl, uint32 configaddr, uint32 *configdata,
+ bool read);
+
+extern bool wl_alloc_dma_resources(struct wl_info *wl, uint dmaddrwidth);
+
+#ifdef TKO
+extern void * wl_get_tko(struct wl_info *wl, struct wl_if *wlif);
+#endif /* TKO */
+#ifdef ICMP
+extern void * wl_get_icmp(struct wl_info *wl, struct wl_if *wlif);
+#endif /* ICMP */
+/* timer functions */
+extern struct wl_timer *wl_init_timer(struct wl_info *wl, void (*fn)(void* arg), void *arg,
+ const char *name);
+extern void wl_free_timer(struct wl_info *wl, struct wl_timer *timer);
+/* Add timer guarantees the callback fn will not be called until AT LEAST ms later. In the
+ * case of a periodic timer, this guarantee is true of consecutive callback fn invocations.
+ * As a result, the period may not average ms duration and the callbacks may "drift".
+ *
+ * A periodic timer must have a non-zero ms delay.
+ */
+extern void wl_add_timer(struct wl_info *wl, struct wl_timer *timer, uint ms, int periodic);
+extern void wl_add_timer_us(struct wl_info *wl, struct wl_timer *timer, uint us, int periodic);
+extern bool wl_del_timer(struct wl_info *wl, struct wl_timer *timer);
+
+#ifdef WLATF_DONGLE
+int wlfc_upd_flr_weight(struct wl_info *wl, uint8 mac_handle, uint8 tid, void* params);
+int wlfc_enab_fair_fetch_scheduling(struct wl_info *wl, uint32 enab);
+int wlfc_get_fair_fetch_scheduling(struct wl_info *wl, uint32 *status);
+#endif /* WLATF_DONGLE */
+
+#ifdef MONITOR_DNGL_CONV
+extern void wl_sendup_monitor(struct wl_info *wl, void *p);
+#endif
+
+/* data receive and interface management functions */
+extern void wl_sendup_fp(struct wl_info *wl, struct wl_if *wlif, void *p);
+extern void wl_sendup(struct wl_info *wl, struct wl_if *wlif, void *p, int numpkt);
+extern void wl_sendup_event(struct wl_info *wl, struct wl_if *wlif, void *pkt);
+extern void wl_event(struct wl_info *wl, char *ifname, struct wlc_event *e);
+extern void wl_event_sync(struct wl_info *wl, char *ifname, struct wlc_event *e);
+extern void wl_event_sendup(struct wl_info *wl, const struct wlc_event *e, uint8 *data, uint32 len);
+
+/* interface manipulation functions */
+extern char *wl_ifname(struct wl_info *wl, struct wl_if *wlif);
+void wl_set_ifname(struct wl_if *wlif, char *name);
+extern struct wl_if *wl_add_if(struct wl_info *wl, struct wlc_if* wlcif, uint unit,
+ struct ether_addr *remote);
+extern void wl_del_if(struct wl_info *wl, struct wl_if *wlif);
+/* RSDB specific interface update function */
+void wl_update_wlcif(struct wlc_if *wlcif);
+extern void wl_update_if(struct wl_info *from_wl, struct wl_info *to_wl, struct wl_if *from_wlif,
+ struct wlc_if *to_wlcif);
+int wl_find_if(struct wl_if *wlif);
+extern int wl_rebind_if(struct wl_if *wlif, int idx, bool rebind);
+
+/* contexts in wlif structure. Currently following are valid */
+#define IFCTX_ARPI (1)
+#define IFCTX_NDI (2)
+#define IFCTX_NETDEV (3)
+extern void *wl_get_ifctx(struct wl_info *wl, int ctx_id, struct wl_if *wlif);
+
+/* pcie root complex operations
+ op == 0: get link capability in configuration space
+ op == 1: hot reset
+*/
+extern int wl_osl_pcie_rc(struct wl_info *wl, uint op, int param);
+
+/* monitor mode functions */
+#ifndef MONITOR_DNGL_CONV
+extern void wl_monitor(struct wl_info *wl, struct wl_rxsts *rxsts, void *p);
+#endif
+extern void wl_set_monitor(struct wl_info *wl, int val);
+
+#define wl_sort_bsslist(a, b, c) FALSE
+
+#if defined(D0_COALESCING) || defined(WLAWDL)
+extern void wl_sendup_no_filter(struct wl_info *wl, struct wl_if *wlif, void *p, int numpkt);
+#endif
+
+#ifdef LINUX_CRYPTO
+struct wlc_key_info;
+extern int wl_tkip_miccheck(struct wl_info *wl, void *p, int hdr_len, bool group_key, int id);
+extern int wl_tkip_micadd(struct wl_info *wl, void *p, int hdr_len);
+extern int wl_tkip_encrypt(struct wl_info *wl, void *p, int hdr_len);
+extern int wl_tkip_decrypt(struct wl_info *wl, void *p, int hdr_len, bool group_key);
+extern void wl_tkip_printstats(struct wl_info *wl, bool group_key);
+#ifdef BCMINTERNAL
+extern int wl_tkip_keydump(struct wl_info *wl, bool group);
+#endif /* BCMINTERNAL */
+extern int wl_tkip_keyset(struct wl_info *wl, const struct wlc_key_info *key_info,
+ const uint8 *key_data, size_t key_len, const uint8 *rx_seq, size_t rx_seq_len);
+#endif /* LINUX_CRYPTO */
+
+#ifdef DONGLEBUILD
+/* XXX 156-byte dongle size savings hack (rte version of routine doesn't use names) */
+#define wl_init_timer(wl, fn, arg, name) wl_init_timer(wl, fn, arg, NULL)
+extern int wl_busioctl(struct wl_info *wl, uint32 cmd, void *buf, int len, int *used,
+ int *needed, int set);
+extern void wl_isucodereclaimed(uint8 *value);
+extern void wl_reclaim(void);
+extern void wl_reclaim_postattach(void);
+extern bool wl_dngl_is_ss(struct wl_info *wl);
+extern void wl_sendctl_tx(struct wl_info *wl, uint8 type, uint32 op, void *opdata);
+extern void wl_flowring_ctl(struct wl_info *wl, uint32 op, void *opdata);
+extern void wl_indicate_maccore_state(struct wl_info *wl, uint8 state);
+extern void wl_indicate_macwake_state(struct wl_info *wl, uint8 state);
+extern void wl_flush_rxreorderqeue_flow(struct wl_info *wl, struct reorder_rxcpl_id_list *list);
+extern uint32 wl_chain_rxcomplete_id(struct reorder_rxcpl_id_list *list, uint16 id, bool head);
+extern void wl_chain_rxcompletions_amsdu(osl_t *osh, void *p, bool norxcpl);
+extern void wl_timesync_add_rx_timestamp(struct wl_info *wl, void *p,
+ uint32 ts_low, uint32 ts_high);
+extern void wl_timesync_add_tx_timestamp(struct wl_info *wl, void *p,
+ uint32 ts_low, uint32 ts_high);
+extern void wl_timesync_get_tx_timestamp(struct wl_info *wl, void *p,
+ uint32 *ts_low, uint32 *ts_high);
+
+#define wl_chain_rxcomplete_id_tail(a, b) wl_chain_rxcomplete_id(a, b, FALSE)
+#define wl_chain_rxcomplete_id_head(a, b) wl_chain_rxcomplete_id(a, b, TRUE)
+extern void wl_inform_additional_buffers(struct wl_info *wl, uint16 buf_cnts);
+extern void wl_health_check_notify(struct wl_info *wl, mbool notification, bool state);
+extern void wl_health_check_notify_clear_all(struct wl_info *wl);
+extern void wl_health_check_log(struct wl_info *wl, uint32 hc_log_type,
+ uint32 val, uint32 caller);
+#ifdef BCMPCIEDEV
+extern bool wl_get_hcapistimesync(void);
+extern bool wl_get_hcapispkttxs(void);
+#endif /* BCMPCIEDEV */
+#else
+#define wl_indicate_maccore_state(a, b) do { } while (0)
+#define wl_indicate_macwake_state(a, b) do { } while (0)
+#define wl_flush_rxreorderqeue_flow(a, b) do { } while (0)
+#define wl_chain_rxcomplete_id_tail(a, b) 0
+#define wl_chain_rxcomplete_id_head(a, b) 0
+#define wl_chain_rxcompletions_amsdu(a, b, c) do {} while (0)
+#define wl_inform_additional_buffers(a, b) do { } while (0)
+#define wl_health_check_notify(a, b, c) do { } while (0)
+#define wl_health_check_notify_clear_all(a) do { } while (0)
+#define wl_health_check_log(a, b, c, d) do { } while (0)
+#define wl_get_hcapistimesync() do { } while (0)
+#define wl_get_hcapispkttxs() do { } while (0)
+#endif /* DONGLEBUILD */
+
+extern int wl_fatal_error(void * wl, int rc);
+
+#ifdef NEED_HARD_RESET
+extern int wl_powercycle(void * wl);
+extern bool wl_powercycle_inprogress(void * wl);
+#else
+#define wl_powercycle(a)
+#define wl_powercycle_inprogress(a) (0)
+#endif /* NEED_HARD_RESET */
+
+void *wl_create_fwdpkt(struct wl_info *wl, void *p, struct wl_if *wlif);
+
+#ifdef BCMFRWDPOOLREORG
+void wl_upd_frwd_resrv_bufcnt(struct wl_info *wl);
+#endif /* BCMFRWDPOOLREORG */
+#ifdef BCMFRWDPKT
+void wl_prepare_frwd_pkt_rxcmplt(struct wl_info *wl, void *p);
+#endif /* BCMFRWDPKT */
+
+#ifdef WL_NATOE
+void wl_natoe_notify_pktc(struct wl_info *wl, uint8 action);
+int wl_natoe_ampdu_config_upd(struct wl_info *wl);
+#endif /* WL_NATOE */
+
+#ifdef ENABLE_CORECAPTURE
+extern int wl_log_system_state(void * wl, const char * reason, bool capture);
+#else
+#define wl_log_system_state(a, b, c)
+#endif
+
+#define WL_DUMP_MEM_SOCRAM 1
+#define WL_DUMP_MEM_UCM 2
+
+extern void wl_dump_mem(char *addr, int len, int type);
+
+#ifdef HEALTH_CHECK
+typedef int (*wl_health_check_fn)(uint8 *buffer, uint16 length, void *context,
+ int16 *bytes_written);
+typedef int (*health_check_event_mask_fn)(void *context, bool get, uint32 *evt_bits);
+extern int wl_health_check_evtmask_upd(struct wl_info *wl, int module_id, bool get,
+ uint32 *evt_mask);
+
+typedef struct health_check_info health_check_info_t;
+typedef struct health_check_client_info health_check_client_info_t;
+
+/* WL wrapper to health check APIs. */
+extern health_check_client_info_t* wl_health_check_module_register(struct wl_info *wl,
+ const char* name, wl_health_check_fn fn, health_check_event_mask_fn evt_fn,
+ void *context, int module_id);
+
+extern void wl_health_check_execute(void *wl);
+
+extern int wl_health_check_execute_clients(struct wl_info *wl,
+ health_check_client_info_t** modules, uint16 num_modules);
+
+/* Following are not implemented in dongle health check */
+extern int wl_health_check_deinit(struct wl_info *wl);
+extern int wl_health_check_module_unregister(struct wl_info *wl,
+ health_check_client_info_t *client);
+#endif /* HEALTH_CHECK */
+
+#ifdef ECOUNTERS
+#define WL_ECOUNTERS_CALLBACK_V2
+typedef int (*wl_ecounters_stats_get)(uint16 stats_type, struct wlc_info *wlc,
+ const ecounters_stats_types_report_req_t * req, struct bcm_xtlvbuf *xtlvbuf,
+ uint32 *cookie, const bcm_xtlv_t* tlv, uint16 *attempted_write_len);
+
+extern int wl_ecounters_register_source(struct wl_info *wl, uint16 stats_type,
+ wl_ecounters_stats_get some_fn);
+extern int wl_ecounters_register_source_periodic(struct wl_info *wl, uint16 stats_type,
+ wl_ecounters_stats_get periodic_fn, wl_ecounters_stats_get some_fn);
+
+extern int wl_ecounters_trigger(void *trigger_context, uint16 reason);
+#endif
+extern bool wl_health_check_enabled(struct wl_info *wl);
+typedef void (*wl_send_if_event_cb_fn_t)(void *ctx);
+extern int wl_if_event_send_cb_fn_register(struct wl_info *wl, wl_send_if_event_cb_fn_t fn,
+ void *arg);
+extern int wl_if_event_send_cb_fn_unregister(struct wl_info *wl,
+ wl_send_if_event_cb_fn_t fn, void *arg);
+extern uint32 wl_get_ramsize(void);
+#ifdef PACKET_FILTER
+extern void wl_periodic_pktfltr_cntrs_state_upd(wlc_info_t *wlc);
+#endif /* PACKET_FILTER */
+
+#ifdef BCMPCIE_LATENCY
+int wl_bus_pcie_latency_enab(struct wl_info *wl, bool val);
+#endif /* BCMPCIE_LATENCY */
+void wl_hp2p_update_prio(struct wl_info *wl, void *p);
+#endif /* _wl_export_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_ext_genl.c b/bcmdhd.101.10.361.x/wl_ext_genl.c
new file mode 100755
index 0000000..815eb6d
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_ext_genl.c
@@ -0,0 +1,568 @@
+#ifdef WL_EXT_GENL
+#include <bcmendian.h>
+#include <wl_android.h>
+#include <dhd_config.h>
+#include <net/genetlink.h>
+
+#define AGENL_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printf("[%s] AGENL-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AGENL_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printf("[%s] AGENL-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AGENL_INFO(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printf("[%s] AGENL-INFO) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+
+#ifdef SENDPROB
+#define MGMT_PROBE_REQ 0x40
+#define MGMT_PROBE_RES 0x50
+#endif
+
+enum {
+ __GENL_CUSTOM_ATTR_INVALID,
+ GENL_CUSTOM_ATTR_MSG, /* message */
+ __GENL_CUSTOM_ATTR_MAX,
+};
+
+enum {
+ __GENLL_CUSTOM_COMMAND_INVALID,
+ GENL_CUSTOM_COMMAND_BIND, /* bind */
+ GENL_CUSTOM_COMMAND_SEND, /* user -> kernel */
+ GENL_CUSTOM_COMMAND_RECV, /* kernel -> user */
+ __GENL_CUSTOM_COMMAND_MAX,
+};
+
+#if defined(ALIBABA_ZEROCONFIG)
+#define GENL_FAMILY_NAME "WIFI_NL_CUSTOM"
+#define PROBE_RSP_DST_MAC_OFFSET 4
+#define PROBE_RSP_VNDR_ID_OFFSET 55
+#else
+#define GENL_FAMILY_NAME "WLAN_NL_CUSTOM"
+#define PROBE_RSP_DST_MAC_OFFSET 4
+#define PROBE_RSP_VNDR_ID_OFFSET DOT11_MGMT_HDR_LEN
+#endif
+#define PROBE_RSP_VNDR_LEN_OFFSET (PROBE_RSP_VNDR_ID_OFFSET+1)
+#define PROBE_RSP_VNDR_OUI_OFFSET (PROBE_RSP_VNDR_ID_OFFSET+2)
+#define MAX_CUSTOM_PKT_LENGTH 2048
+#define GENL_CUSTOM_ATTR_MAX (__GENL_CUSTOM_ATTR_MAX - 1)
+#define GENLMSG_UNICAST_RETRY_LIMIT 5
+
+typedef struct genl_params {
+ struct net_device *dev;
+ bool bind;
+ int pm;
+ int bind_pid;
+ int send_retry_cnt;
+} genl_params_t;
+
+struct genl_params *g_zconf = NULL;
+
+static int wl_ext_genl_bind(struct sk_buff *skb, struct genl_info *info);
+static int wl_ext_genl_recv(struct sk_buff *skb, struct genl_info *info);
+static int wl_ext_genl_send(struct genl_params *zconf, struct net_device *dev,
+ char* buf, int buf_len);
+
+static struct nla_policy wl_ext_genl_policy[GENL_CUSTOM_ATTR_MAX + 1] = {
+ [GENL_CUSTOM_ATTR_MSG] = {.type = NLA_NUL_STRING},
+};
+
+static struct genl_ops wl_ext_genl_ops[] = {
+ {
+ .cmd = GENL_CUSTOM_COMMAND_BIND,
+ .flags = 0,
+ .policy = wl_ext_genl_policy,
+ .doit = wl_ext_genl_bind,
+ .dumpit = NULL,
+ },
+ {
+ .cmd = GENL_CUSTOM_COMMAND_SEND,
+ .flags = 0,
+ .policy = wl_ext_genl_policy,
+ .doit = wl_ext_genl_recv,
+ .dumpit = NULL,
+ },
+};
+
+static struct genl_family wl_ext_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = 0,
+ .name = GENL_FAMILY_NAME,
+ .version = 1,
+ .maxattr = GENL_CUSTOM_ATTR_MAX,
+};
+
+#ifdef SENDPROB
+static int
+wl_ext_add_del_ie_hex(struct net_device *dev, uint pktflag,
+ char *ie_data, int ie_len, const char* add_del_cmd)
+{
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ int tot_len = 0, iecount;
+ int err = -1;
+
+ if (!ie_len) {
+ AGENL_ERROR(dev->name, "wrong ie_len %d\n", ie_len);
+ goto exit;
+ }
+
+ tot_len = (int)(sizeof(vndr_ie_setbuf_t) + (ie_len));
+ vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, GFP_KERNEL);
+ if (!vndr_ie) {
+ AGENL_ERROR(dev->name, "IE memory alloc failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strncpy(vndr_ie->cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* Set packet flag to indicate that BEACON's will contain this IE */
+ pktflag = htod32(pktflag);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+
+ /* Set the IE ID */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar)DOT11_MNG_VS_ID;
+
+ /* Set the IE LEN */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = ie_len;
+
+ /* Set the IE OUI and DATA */
+ memcpy((char *)vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, ie_data, ie_len);
+
+ err = wldev_iovar_setbuf(dev, "vndr_ie", vndr_ie, tot_len,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (err != 0)
+ AGENL_ERROR(dev->name, "vndr_ie, ret=%d\n", err);
+
+exit:
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+ return err;
+}
+
+static int
+wl_ext_send_probersp(struct net_device *dev, char* buf, int buf_len)
+{
+ char addr[ETHER_ADDR_LEN], *pVndrOUI;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ int err = -1, ie_len;
+
+ if (buf == NULL || buf_len <= 0){
+ AGENL_ERROR(dev->name, "buf is NULL or buf_len <= 0\n");
+ return -1;
+ }
+
+ AGENL_TRACE(dev->name, "Enter\n");
+
+ memcpy(addr, (buf+PROBE_RSP_DST_MAC_OFFSET), ETHER_ADDR_LEN);
+ pVndrOUI = (buf+PROBE_RSP_VNDR_OUI_OFFSET);
+ ie_len = *(buf+PROBE_RSP_VNDR_LEN_OFFSET);
+
+ if (ie_len > (buf_len-PROBE_RSP_VNDR_OUI_OFFSET)) {
+ AGENL_ERROR(dev->name, "wrong vendor ie len %d\n", ie_len);
+ return -1;
+ }
+
+ err = wl_ext_add_del_ie_hex(dev, VNDR_IE_PRBRSP_FLAG, pVndrOUI, ie_len, "add");
+ if (err)
+ goto exit;
+
+ err = wldev_iovar_setbuf(dev, "send_probresp", addr, ETHER_ADDR_LEN,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (err != 0)
+ AGENL_ERROR(dev->name, "vndr_ie, ret=%d\n", err);
+
+ OSL_SLEEP(100);
+ wl_ext_add_del_ie_hex(dev, VNDR_IE_PRBRSP_FLAG, pVndrOUI, ie_len, "del");
+
+exit:
+ return err;
+}
+
+static int
+wl_ext_set_probreq(struct net_device *dev, bool set)
+{
+ int bytes_written = 0;
+ char recv_probreq[32];
+
+ AGENL_TRACE(dev->name, "Enter\n");
+
+ if (set) {
+ sprintf(recv_probreq, "wl recv_probreq 1");
+ wl_android_ext_priv_cmd(dev, recv_probreq, 0, &bytes_written);
+ } else {
+ sprintf(recv_probreq, "wl recv_probreq 0");
+ wl_android_ext_priv_cmd(dev, recv_probreq, 0, &bytes_written);
+ }
+
+ return 0;
+}
+
+void
+wl_ext_probreq_event(struct net_device *dev, void *argu,
+ const wl_event_msg_t *e, void *data)
+{
+ struct genl_params *zconf = (struct genl_params *)argu;
+ int i, ret = 0, num_ie = 0, totlen;
+ uint32 event_len = 0;
+ char *buf, *pbuf;
+ uint rem_len, buflen = MAX_CUSTOM_PKT_LENGTH;
+ uint32 event_id[] = {DOT11_MNG_VS_ID};
+ uint32 datalen = ntoh32(e->datalen);
+ bcm_tlv_t *ie;
+
+ AGENL_TRACE(dev->name, "Enter\n");
+
+ rem_len = buflen;
+ buf = kzalloc(MAX_CUSTOM_PKT_LENGTH, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ AGENL_ERROR(dev->name, "Could not allocate buf\n");
+ return;
+ }
+
+ // copy mgmt header
+ pbuf = buf;
+ memcpy(pbuf, data, DOT11_MGMT_HDR_LEN);
+ rem_len -= (DOT11_MGMT_HDR_LEN+1);
+ datalen -= DOT11_MGMT_HDR_LEN;
+ data += DOT11_MGMT_HDR_LEN;
+
+ // copy IEs
+ pbuf = buf + DOT11_MGMT_HDR_LEN;
+#if 1 // non-sort by id
+ ie = (bcm_tlv_t*)data;
+ totlen = datalen;
+ while (ie && totlen >= TLV_HDR_LEN) {
+ int ie_id = -1;
+ int ie_len = ie->len + TLV_HDR_LEN;
+ for (i=0; i<sizeof(event_id)/sizeof(event_id[0]); i++) {
+ if (ie->id == event_id[i]) {
+ ie_id = ie->id;
+ break;
+ }
+ }
+ if ((ie->id == ie_id) && (totlen >= ie_len) && (rem_len >= ie_len)) {
+ memcpy(pbuf, ie, ie_len);
+ pbuf += ie_len;
+ rem_len -= ie_len;
+ num_ie++;
+ }
+ ie = (bcm_tlv_t*)((uint8*)ie + ie_len);
+ totlen -= ie_len;
+ }
+#else // sort by id
+ for (i = 0; i < sizeof(event_id)/sizeof(event_id[0]); i++) {
+ void *pdata = data;
+ int data_len = datalen;
+ while (rem_len > 0) {
+ ie = bcm_parse_tlvs(pdata, data_len, event_id[i]);
+ if (!ie)
+ break;
+ if (rem_len < (ie->len+TLV_HDR_LEN)) {
+ ANDROID_TRACE(("%s: buffer is not enough\n", __FUNCTION__));
+ break;
+ }
+ memcpy(pbuf, ie, min(ie->len+TLV_HDR_LEN, rem_len));
+ pbuf += (ie->len+TLV_HDR_LEN);
+ rem_len -= (ie->len+TLV_HDR_LEN);
+ data_len -= (((void *)ie-pdata) + (ie->len+TLV_HDR_LEN));
+ pdata = (char *)ie + (ie->len+TLV_HDR_LEN);
+ num_ie++;
+ }
+ }
+#endif
+ if (num_ie) {
+ event_len = buflen - rem_len;
+ AGENL_INFO(dev->name, "num_ie=%d\n", num_ie);
+ if (android_msg_level & ANDROID_INFO_LEVEL)
+ prhex("buf", buf, event_len);
+ ret = wl_ext_genl_send(zconf, dev, buf, event_len);
+ }
+
+ if(buf)
+ kfree(buf);
+ return;
+}
+#endif
+
+static int
+wl_ext_genl_recv(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genl_params *zconf = g_zconf;
+ struct net_device *dev;
+ struct nlattr *na;
+ char* pData = NULL;
+ int DataLen = 0;
+
+ if (info == NULL) {
+ AGENL_ERROR(dev->name, "genl_info is NULL\n");
+ return -1;
+ }
+
+ if (zconf == NULL) {
+ AGENL_ERROR("wlan", "g_zconf is NULL\n");
+ return -1;
+ }
+ dev = zconf->dev;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ AGENL_TRACE(dev->name, "Enter snd_portid=%d\n", info->snd_portid);
+#else
+ AGENL_TRACE(dev->name, "Enter\n");
+#endif
+ na = info->attrs[GENL_CUSTOM_ATTR_MSG];
+
+ if (na) {
+ pData = (char*) nla_data(na);
+ DataLen = nla_len(na);
+ AGENL_INFO(dev->name, "nla_len(na) : %d\n", DataLen);
+ if (android_msg_level & ANDROID_INFO_LEVEL)
+ prhex("nla_data(na)", pData, DataLen);
+ }
+
+#ifdef SENDPROB
+ if(*pData == MGMT_PROBE_RES) {
+ wl_ext_send_probersp(dev, pData, DataLen);
+ } else if(*pData == MGMT_PROBE_REQ) {
+ AGENL_ERROR(dev->name, "probe req\n");
+ } else {
+ AGENL_ERROR(dev->name, "Unexpected pkt %d\n", *pData);
+ if (android_msg_level & ANDROID_INFO_LEVEL)
+ prhex("nla_data(na)", pData, DataLen);
+ }
+#endif
+
+ return 0;
+}
+
+static int
+wl_ext_genl_send(struct genl_params *zconf, struct net_device *dev,
+ char* buf, int buf_len)
+{
+ struct sk_buff *skb = NULL;
+ char* msg_head = NULL;
+ int ret = -1;
+ int bytes_written = 0;
+ char recv_probreq[32];
+
+ if (zconf->bind_pid == -1) {
+ AGENL_ERROR(dev->name, "There is no binded process\n");
+ return -1;
+ }
+
+ if(buf == NULL || buf_len <= 0) {
+ AGENL_ERROR(dev->name, "buf is NULL or buf_len : %d\n", buf_len);
+ return -1;
+ }
+
+ skb = genlmsg_new(MAX_CUSTOM_PKT_LENGTH, GFP_KERNEL);
+
+ if (skb) {
+ msg_head = genlmsg_put(skb, 0, 0, &wl_ext_genl_family, 0, GENL_CUSTOM_COMMAND_RECV);
+ if (msg_head == NULL) {
+ nlmsg_free(skb);
+ AGENL_ERROR(dev->name, "genlmsg_put fail\n");
+ return -1;
+ }
+
+ ret = nla_put(skb, GENL_CUSTOM_ATTR_MSG, buf_len, buf);
+ if (ret != 0) {
+ nlmsg_free(skb);
+ AGENL_ERROR(dev->name, "nla_put fail : %d\n", ret);
+ return ret;
+ }
+
+ genlmsg_end(skb, msg_head);
+
+ /* sending message */
+ AGENL_TRACE(dev->name, "send to process %d\n", zconf->bind_pid);
+ ret = genlmsg_unicast(&init_net, skb, zconf->bind_pid);
+ if (ret != 0) {
+ AGENL_ERROR(dev->name, "genlmsg_unicast fail : %d\n", ret);
+ zconf->send_retry_cnt++;
+ if(zconf->send_retry_cnt >= GENLMSG_UNICAST_RETRY_LIMIT) {
+ AGENL_ERROR(dev->name, "Exceeding retry cnt %d, Unbind pid : %d\n",
+ zconf->send_retry_cnt, zconf->bind_pid);
+ zconf->bind_pid = -1;
+ sprintf(recv_probreq, "wl recv_probreq 0");
+ wl_android_ext_priv_cmd(dev, recv_probreq, 0, &bytes_written);
+ }
+ return ret;
+ }
+ } else {
+ AGENL_ERROR(dev->name, "genlmsg_new fail\n");
+ return -1;
+ }
+
+ zconf->send_retry_cnt = 0;
+
+ return 0;
+}
+
+static int
+wl_ext_genl_bind(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genl_params *zconf = g_zconf;
+ struct net_device *dev;
+ struct dhd_pub *dhd;
+ struct nlattr *na;
+ bool bind;
+ char* pData = NULL;
+ int DataLen = 0;
+
+ if (info == NULL) {
+ AGENL_ERROR("wlan", "genl_info is NULL\n");
+ return -1;
+ }
+
+ if (zconf == NULL) {
+ AGENL_ERROR("wlan", "zconf is NULL\n");
+ return -1;
+ }
+ dev = zconf->dev;
+ dhd = dhd_get_pub(dev);
+
+ AGENL_TRACE(dev->name, "Enter\n");
+
+ na = info->attrs[GENL_CUSTOM_ATTR_MSG];
+ if (na) {
+ pData = (char*) nla_data(na);
+ DataLen = nla_len(na);
+ AGENL_INFO(dev->name, "nla_len(na) : %d\n", DataLen);
+ if (android_msg_level & ANDROID_INFO_LEVEL)
+ prhex("nla_data(na)", pData, DataLen);
+ }
+
+ if (strcmp(pData, "BIND") == 0) {
+ bind = TRUE;
+ } else if (strcmp(pData, "UNBIND") == 0) {
+ bind = FALSE;
+ } else {
+ AGENL_ERROR(dev->name, "Unknown cmd %s\n", pData);
+ return -1;
+ }
+
+ if (bind == zconf->bind) {
+ AGENL_TRACE(dev->name, "Already %s\n", bind?"BIND":"UNBIND");
+ return 0;
+ }
+
+ if (bind) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ zconf->bind_pid = info->snd_portid;
+#endif
+ AGENL_TRACE(dev->name, "BIND pid = %d\n", zconf->bind_pid);
+#ifdef SENDPROB
+ wl_ext_set_probreq(dev, TRUE);
+#endif
+ zconf->bind = TRUE;
+ zconf->pm = dhd->conf->pm;
+ dhd->conf->pm = PM_OFF;
+ } else {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ AGENL_TRACE(dev->name, "UNBIND snd_portid = %d\n", info->snd_portid);
+#else
+ AGENL_TRACE(dev->name, "UNBIND pid = %d\n", zconf->bind_pid);
+#endif
+ zconf->bind_pid = -1;
+#ifdef SENDPROB
+ wl_ext_set_probreq(dev, FALSE);
+#endif
+ dhd->conf->pm = zconf->pm;
+ zconf->bind = FALSE;
+ }
+
+ return 0;
+}
+
+int
+wl_ext_genl_init(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct genl_params *zconf = dhd->zconf;
+ int ret = 0;
+
+ AGENL_TRACE(net->name, "Enter falimy name: \"%s\"\n", wl_ext_genl_family.name);
+
+ zconf = kzalloc(sizeof(struct genl_params), GFP_KERNEL);
+ if (unlikely(!zconf)) {
+ AGENL_ERROR(net->name, "Could not allocate zconf\n");
+ return -ENOMEM;
+ }
+ dhd->zconf = (void *)zconf;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ ret = genl_register_family(&wl_ext_genl_family);
+ //fix me: how to attach wl_ext_genl_ops
+ ret = -1;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ ret = genl_register_family_with_ops(&wl_ext_genl_family, wl_ext_genl_ops);
+#else
+ ret = genl_register_family_with_ops(&wl_ext_genl_family, wl_ext_genl_ops,
+ ARRAY_SIZE(wl_ext_genl_ops));
+#endif
+ if (ret != 0) {
+ AGENL_ERROR(net->name, "GE_NELINK family registration fail\n");
+ goto err;
+ }
+ zconf->bind_pid = -1;
+#ifdef SENDPROB
+ ret = wl_ext_event_register(net, dhd, WLC_E_PROBREQ_MSG, wl_ext_probreq_event,
+ zconf, PRIO_EVENT_IAPSTA);
+ if (ret)
+ goto err;
+#endif
+ zconf->dev = net;
+ g_zconf = zconf;
+
+ return ret;
+err:
+ if(zconf)
+ kfree(zconf);
+ return ret;
+}
+
+void
+wl_ext_genl_deinit(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct genl_params *zconf = dhd->zconf;
+
+ AGENL_TRACE(net->name, "Enter\n");
+
+#ifdef SENDPROB
+ wl_ext_event_deregister(net, dhd, WLC_E_PROBREQ_MSG, wl_ext_probreq_event);
+#endif
+
+ genl_unregister_family(&wl_ext_genl_family);
+ if(zconf != NULL) {
+ kfree(dhd->zconf);
+ dhd->zconf = NULL;
+ }
+ g_zconf = NULL;
+
+}
+#endif
diff --git a/bcmdhd.101.10.361.x/wl_iapsta.c b/bcmdhd.101.10.361.x/wl_iapsta.c
new file mode 100755
index 0000000..aeaef14
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_iapsta.c
@@ -0,0 +1,5748 @@
+
+#ifdef WL_EXT_IAPSTA
+#include <net/rtnetlink.h>
+#include <bcmendian.h>
+#include <dhd_linux.h>
+#include <wl_android.h>
+#include <dhd_config.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif /* WL_ESCAN */
+
+#define IAPSTA_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printf("[%s] IAPSTA-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define IAPSTA_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printf("[%s] IAPSTA-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define IAPSTA_INFO(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printf("[%s] IAPSTA-INFO) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define IAPSTA_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printf("[%s] IAPSTA-DBG) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#ifdef PROP_TXSTATUS_VSDB
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+#endif /* PROP_TXSTATUS */
+
+#ifndef WL_CFG80211
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define IEEE80211_BAND_2GHZ 0
+#define IEEE80211_BAND_5GHZ 1
+#endif /* WL_CFG80211 */
+
+#define CSA_FW_BIT (1<<0)
+#define CSA_DRV_BIT (1<<1)
+
+#define MAX_AP_LINK_WAIT_TIME 3000
+#define MAX_STA_LINK_WAIT_TIME 15000
+#define STA_LINKDOWN_TIMEOUT 10000
+#define STA_CONNECT_TIMEOUT 10500
+#define STA_CONNECT_RETRY_TIMEOUT 600
+#define STA_RECONNECT_RETRY_TIMEOUT 300
+#define STA_EAPOL_TIMEOUT 100
+#define STA_EMPTY_SCAN_MAX 6
+
+enum wl_if_list {
+ IF_PIF,
+ IF_VIF,
+ IF_VIF2,
+ MAX_IF_NUM
+};
+
+typedef enum WL_PRIO {
+ PRIO_AP,
+ PRIO_MESH,
+ PRIO_P2P,
+ PRIO_STA
+} wl_prio_t;
+
+typedef enum APSTAMODE {
+ IUNKNOWN_MODE = 0,
+ ISTAONLY_MODE = 1,
+ IAPONLY_MODE = 2,
+ ISTAAP_MODE = 3,
+ ISTAGO_MODE = 4,
+ ISTASTA_MODE = 5,
+ IDUALAP_MODE = 6,
+ ISTAAPAP_MODE = 7,
+ IMESHONLY_MODE = 8,
+ ISTAMESH_MODE = 9,
+ IMESHAP_MODE = 10,
+ ISTAAPMESH_MODE = 11,
+ IMESHAPAP_MODE = 12
+} apstamode_t;
+
+typedef enum BGNMODE {
+ IEEE80211B = 1,
+ IEEE80211G,
+ IEEE80211BG,
+ IEEE80211BGN,
+ IEEE80211BGNAC
+} bgnmode_t;
+
+typedef enum AUTHMODE {
+ AUTH_OPEN,
+ AUTH_SHARED,
+ AUTH_WPAPSK,
+ AUTH_WPA2PSK,
+ AUTH_WPAWPA2PSK,
+ AUTH_SAE
+} authmode_t;
+
+typedef enum ENCMODE {
+ ENC_NONE,
+ ENC_WEP,
+ ENC_TKIP,
+ ENC_AES,
+ ENC_TKIPAES
+} encmode_t;
+
+typedef struct wl_if_info {
+ struct net_device *dev;
+ ifmode_t ifmode;
+ unsigned long status;
+ char prefix;
+ wl_prio_t prio;
+ int ifidx;
+ uint8 bssidx;
+ char ifname[IFNAMSIZ+1];
+ char ssid[DOT11_MAX_SSID_LEN];
+ struct ether_addr bssid;
+ bgnmode_t bgnmode;
+ int hidden;
+ int maxassoc;
+ uint16 channel;
+ authmode_t amode;
+ encmode_t emode;
+ bool vsdb;
+ char key[100];
+#ifdef WL_ESCAN
+#if (defined(WLMESH) || defined(ACS_MONITOR))
+ struct wl_escan_info *escan;
+#ifdef WLMESH
+ timer_list_compat_t delay_scan;
+#endif /* WLMESH */
+#ifdef ACS_MONITOR
+ timer_list_compat_t acs_timer;
+#endif /* ACS_MONITOR */
+#endif /* WLMESH || ACS_MONITOR */
+#endif /* WL_ESCAN */
+ struct delayed_work pm_enable_work;
+ struct mutex pm_sync;
+#ifdef PROPTX_MAXCOUNT
+ int transit_maxcount;
+#endif /* PROPTX_MAXCOUNT */
+ uint conn_state;
+ uint16 prev_channel;
+ uint16 post_channel;
+#ifdef TPUT_MONITOR
+ unsigned long last_tx;
+ unsigned long last_rx;
+ struct osl_timespec tput_ts;
+ int32 tput_tx;
+ int32 tput_rx;
+ int32 tput_tx_kb;
+ int32 tput_rx_kb;
+#endif /* TPUT_MONITOR */
+ timer_list_compat_t connect_timer;
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wlcfg_assoc_info_t assoc_info;
+ timer_list_compat_t reconnect_timer;
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef EAPOL_RESEND
+ void *pend_eapol_pkt;
+ timer_list_compat_t eapol_timer;
+#ifdef EAPOL_DYNAMATIC_RESEND
+ struct osl_timespec eapol_tx_ts;
+ bool eapol_retry;
+ int eapol_cnt;
+ int eapol_avg_intvl;
+ int eapol_min_intvl;
+ int eapol_max_intvl;
+ int eapol_resend_intvl;
+#endif /* EAPOL_DYNAMATIC_RESEND */
+#endif /* EAPOL_RESEND */
+ int empty_scan;
+} wl_if_info_t;
+
+typedef struct wl_apsta_params {
+ struct wl_if_info if_info[MAX_IF_NUM];
+ struct dhd_pub *dhd;
+ int ioctl_ver;
+ bool init;
+ int rsdb;
+ bool vsdb;
+ uint csa;
+ uint acs;
+#ifdef ACS_MONITOR
+ uint acs_tmo;
+#endif /* ACS_MONITOR */
+ bool radar;
+ apstamode_t apstamode;
+ wait_queue_head_t netif_change_event;
+ struct mutex usr_sync;
+#if defined(WLMESH) && defined(WL_ESCAN)
+ int macs;
+ struct wl_mesh_params mesh_info;
+#endif /* WLMESH && WL_ESCAN */
+ struct mutex in4way_sync;
+ int sta_btc_mode;
+ struct osl_timespec sta_disc_ts;
+ struct osl_timespec sta_conn_ts;
+ bool ap_recon_sta;
+ wait_queue_head_t ap_recon_sta_event;
+ struct ether_addr ap_disc_sta_bssid;
+ struct osl_timespec ap_disc_sta_ts;
+#ifdef TPUT_MONITOR
+ timer_list_compat_t monitor_timer;
+ int32 tput_sum;
+ int32 tput_sum_kb;
+#endif /* TPUT_MONITOR */
+#ifdef SCAN_SUPPRESS
+ struct osl_timespec scan_busy_ts;
+ int scan_busy_cnt;
+#endif /* SCAN_SUPPRESS */
+ uint32 linkdown_reason;
+#ifdef EAPOL_RESEND
+ spinlock_t eapol_lock;
+#endif /* EAPOL_RESEND */
+} wl_apsta_params_t;
+
+enum wifi_isam_status {
+ ISAM_STATUS_IF_ADDING = 0,
+ ISAM_STATUS_IF_READY,
+ ISAM_STATUS_STA_CONNECTING,
+ ISAM_STATUS_STA_CONNECTED,
+ ISAM_STATUS_AP_CREATING,
+ ISAM_STATUS_AP_CREATED
+};
+
+enum wifi_isam_reason {
+ ISAM_RC_MESH_ACS = 1,
+ ISAM_RC_TPUT_MONITOR = 2,
+ ISAM_RC_AP_ACS = 3,
+ ISAM_RC_EAPOL_RESEND = 4
+};
+
+#define wl_get_isam_status(cur_if, stat) \
+ (test_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+#define wl_set_isam_status(cur_if, stat) \
+ (set_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+#define wl_clr_isam_status(cur_if, stat) \
+ (clear_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+#define wl_chg_isam_status(cur_if, stat) \
+ (change_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+
+static int wl_ext_enable_iface(struct net_device *dev, char *ifname,
+ int wait_up, bool lock);
+static int wl_ext_disable_iface(struct net_device *dev, char *ifname);
+#if defined(WLMESH) && defined(WL_ESCAN)
+static int wl_mesh_escan_attach(dhd_pub_t *dhd, struct wl_if_info *cur_if);
+#endif /* WLMESH && WL_ESCAN */
+
+static struct wl_if_info *
+wl_get_cur_if(struct net_device *dev)
+{
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->dev == dev) {
+ cur_if = tmp_if;
+ break;
+ }
+ }
+
+ return cur_if;
+}
+
+#define WL_PM_ENABLE_TIMEOUT 10000
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop")
+#else
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member);
+#endif /* STRICT_GCC_WARNINGS */
+
+static void
+wl_ext_pm_work_handler(struct work_struct *work)
+{
+ struct wl_if_info *cur_if;
+ s32 pm = PM_FAST;
+ dhd_pub_t *dhd;
+
+ BCM_SET_CONTAINER_OF(cur_if, work, struct wl_if_info, pm_enable_work.work);
+
+ IAPSTA_TRACE("wlan", "%s: Enter\n", __FUNCTION__);
+
+ if (cur_if->dev == NULL)
+ return;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+
+ dhd = dhd_get_pub(cur_if->dev);
+
+ if (!dhd || !dhd->up) {
+ IAPSTA_TRACE(cur_if->ifname, "dhd is null or not up\n");
+ return;
+ }
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ DHD_PM_WAKE_UNLOCK(dhd);
+
+}
+
+void
+wl_ext_add_remove_pm_enable_work(struct net_device *dev, bool add)
+{
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_if_info *cur_if = NULL;
+ u16 wq_duration = 0;
+ s32 pm = PM_OFF;
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return;
+
+ mutex_lock(&cur_if->pm_sync);
+ /*
+ * Make cancel and schedule work part mutually exclusive
+ * so that while cancelling, we are sure that there is no
+ * work getting scheduled.
+ */
+
+ if (delayed_work_pending(&cur_if->pm_enable_work)) {
+ cancel_delayed_work_sync(&cur_if->pm_enable_work);
+ DHD_PM_WAKE_UNLOCK(dhd);
+ }
+
+ if (add) {
+ wq_duration = (WL_PM_ENABLE_TIMEOUT);
+ }
+
+ /* It should schedule work item only if driver is up */
+ if (dhd->up) {
+ if (add) {
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ }
+ if (wq_duration) {
+ if (schedule_delayed_work(&cur_if->pm_enable_work,
+ msecs_to_jiffies((const unsigned int)wq_duration))) {
+ DHD_PM_WAKE_LOCK_TIMEOUT(dhd, wq_duration);
+ } else {
+ IAPSTA_ERROR(cur_if->ifname, "Can't schedule pm work handler\n");
+ }
+ }
+ }
+ mutex_unlock(&cur_if->pm_sync);
+
+}
+
+static int
+wl_ext_parse_wep(char *key, struct wl_wsec_key *wsec_key)
+{
+ char hex[] = "XX";
+ unsigned char *data = wsec_key->data;
+ char *keystr = key;
+
+ switch (strlen(keystr)) {
+ case 5:
+ case 13:
+ case 16:
+ wsec_key->len = strlen(keystr);
+ memcpy(data, keystr, wsec_key->len + 1);
+ break;
+ case 12:
+ case 28:
+ case 34:
+ case 66:
+ /* strip leading 0x */
+ if (!strnicmp(keystr, "0x", 2))
+ keystr += 2;
+ else
+ return -1;
+ /* fall through */
+ case 10:
+ case 26:
+ case 32:
+ case 64:
+ wsec_key->len = strlen(keystr) / 2;
+ while (*keystr) {
+ strncpy(hex, keystr, 2);
+ *data++ = (char) strtoul(hex, NULL, 16);
+ keystr += 2;
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ switch (wsec_key->len) {
+ case 5:
+ wsec_key->algo = CRYPTO_ALGO_WEP1;
+ break;
+ case 13:
+ wsec_key->algo = CRYPTO_ALGO_WEP128;
+ break;
+ case 16:
+ /* default to AES-CCM */
+ wsec_key->algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ case 32:
+ wsec_key->algo = CRYPTO_ALGO_TKIP;
+ break;
+ default:
+ return -1;
+ }
+
+ /* Set as primary wsec_key by default */
+ wsec_key->flags |= WL_PRIMARY_KEY;
+
+ return 0;
+}
+
+static int
+wl_ext_set_bgnmode(struct wl_if_info *cur_if)
+{
+ struct net_device *dev = cur_if->dev;
+ bgnmode_t bgnmode = cur_if->bgnmode;
+ int val;
+
+ if (bgnmode == 0)
+ return 0;
+
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ if (bgnmode == IEEE80211B) {
+ wl_ext_iovar_setint(dev, "nmode", 0);
+ val = 0;
+ wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+ IAPSTA_TRACE(dev->name, "Network mode: B only\n");
+ } else if (bgnmode == IEEE80211G) {
+ wl_ext_iovar_setint(dev, "nmode", 0);
+ val = 2;
+ wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+ IAPSTA_TRACE(dev->name, "Network mode: G only\n");
+ } else if (bgnmode == IEEE80211BG) {
+ wl_ext_iovar_setint(dev, "nmode", 0);
+ val = 1;
+ wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+ IAPSTA_TRACE(dev->name, "Network mode: B/G mixed\n");
+ } else if (bgnmode == IEEE80211BGN) {
+ wl_ext_iovar_setint(dev, "nmode", 0);
+ wl_ext_iovar_setint(dev, "nmode", 1);
+ wl_ext_iovar_setint(dev, "vhtmode", 0);
+ val = 1;
+ wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+ IAPSTA_TRACE(dev->name, "Network mode: B/G/N mixed\n");
+ } else if (bgnmode == IEEE80211BGNAC) {
+ wl_ext_iovar_setint(dev, "nmode", 0);
+ wl_ext_iovar_setint(dev, "nmode", 1);
+ wl_ext_iovar_setint(dev, "vhtmode", 1);
+ val = 1;
+ wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+ IAPSTA_TRACE(dev->name, "Network mode: B/G/N/AC mixed\n");
+ }
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+
+ return 0;
+}
+
+static int
+wl_ext_set_amode(struct wl_if_info *cur_if)
+{
+ struct net_device *dev = cur_if->dev;
+ authmode_t amode = cur_if->amode;
+ int auth=0, wpa_auth=0;
+
+#ifdef WLMESH
+ if (cur_if->ifmode == IMESH_MODE) {
+ if (amode == AUTH_SAE) {
+ auth = WL_AUTH_OPEN_SYSTEM;
+ wpa_auth = WPA2_AUTH_PSK;
+ IAPSTA_INFO(dev->name, "SAE\n");
+ } else {
+ auth = WL_AUTH_OPEN_SYSTEM;
+ wpa_auth = WPA_AUTH_DISABLED;
+ IAPSTA_INFO(dev->name, "Open System\n");
+ }
+ } else
+#endif /* WLMESH */
+ if (amode == AUTH_OPEN) {
+ auth = WL_AUTH_OPEN_SYSTEM;
+ wpa_auth = WPA_AUTH_DISABLED;
+ IAPSTA_INFO(dev->name, "Open System\n");
+ } else if (amode == AUTH_SHARED) {
+ auth = WL_AUTH_SHARED_KEY;
+ wpa_auth = WPA_AUTH_DISABLED;
+ IAPSTA_INFO(dev->name, "Shared Key\n");
+ } else if (amode == AUTH_WPAPSK) {
+ auth = WL_AUTH_OPEN_SYSTEM;
+ wpa_auth = WPA_AUTH_PSK;
+ IAPSTA_INFO(dev->name, "WPA-PSK\n");
+ } else if (amode == AUTH_WPA2PSK) {
+ auth = WL_AUTH_OPEN_SYSTEM;
+ wpa_auth = WPA2_AUTH_PSK;
+ IAPSTA_INFO(dev->name, "WPA2-PSK\n");
+ } else if (amode == AUTH_WPAWPA2PSK) {
+ auth = WL_AUTH_OPEN_SYSTEM;
+ wpa_auth = WPA2_AUTH_PSK | WPA_AUTH_PSK;
+ IAPSTA_INFO(dev->name, "WPA/WPA2-PSK\n");
+ }
+#ifdef WLMESH
+ if (cur_if->ifmode == IMESH_MODE) {
+ s32 val = WL_BSSTYPE_MESH;
+ wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1);
+ } else
+#endif /* WLMESH */
+ if (cur_if->ifmode == ISTA_MODE) {
+ s32 val = WL_BSSTYPE_INFRA;
+ wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1);
+ }
+ wl_ext_iovar_setint(dev, "auth", auth);
+
+ wl_ext_iovar_setint(dev, "wpa_auth", wpa_auth);
+
+ return 0;
+}
+
+static int
+wl_ext_set_emode(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct net_device *dev = cur_if->dev;
+ int wsec=0;
+ struct wl_wsec_key wsec_key;
+ wsec_pmk_t psk;
+ authmode_t amode = cur_if->amode;
+ encmode_t emode = cur_if->emode;
+ char *key = cur_if->key;
+ struct dhd_pub *dhd = apsta_params->dhd;
+
+ memset(&wsec_key, 0, sizeof(wsec_key));
+ memset(&psk, 0, sizeof(psk));
+
+#ifdef WLMESH
+ if (cur_if->ifmode == IMESH_MODE) {
+ if (amode == AUTH_SAE) {
+ wsec = AES_ENABLED;
+ } else {
+ wsec = WSEC_NONE;
+ }
+ } else
+#endif /* WLMESH */
+ if (emode == ENC_NONE) {
+ wsec = WSEC_NONE;
+ IAPSTA_INFO(dev->name, "No securiy\n");
+ } else if (emode == ENC_WEP) {
+ wsec = WEP_ENABLED;
+ wl_ext_parse_wep(key, &wsec_key);
+ IAPSTA_INFO(dev->name, "WEP key \"%s\"\n", wsec_key.data);
+ } else if (emode == ENC_TKIP) {
+ wsec = TKIP_ENABLED;
+ psk.key_len = strlen(key);
+ psk.flags = WSEC_PASSPHRASE;
+ memcpy(psk.key, key, strlen(key));
+ IAPSTA_INFO(dev->name, "TKIP key \"%s\"\n", psk.key);
+ } else if (emode == ENC_AES || amode == AUTH_SAE) {
+ wsec = AES_ENABLED;
+ psk.key_len = strlen(key);
+ psk.flags = WSEC_PASSPHRASE;
+ memcpy(psk.key, key, strlen(key));
+ IAPSTA_INFO(dev->name, "AES key \"%s\"\n", psk.key);
+ } else if (emode == ENC_TKIPAES) {
+ wsec = TKIP_ENABLED | AES_ENABLED;
+ psk.key_len = strlen(key);
+ psk.flags = WSEC_PASSPHRASE;
+ memcpy(psk.key, key, strlen(key));
+ IAPSTA_INFO(dev->name, "TKIP/AES key \"%s\"\n", psk.key);
+ }
+ if (dhd->conf->chip == BCM43430_CHIP_ID && cur_if->ifidx > 0 && wsec >= 2 &&
+ apsta_params->apstamode == ISTAAP_MODE) {
+ wsec |= WSEC_SWFLAG; // terence 20180628: fix me, this is a workaround
+ }
+
+ wl_ext_iovar_setint(dev, "wsec", wsec);
+
+#ifdef WLMESH
+ if (cur_if->ifmode == IMESH_MODE) {
+ if (amode == AUTH_SAE) {
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ IAPSTA_INFO(dev->name, "AES key \"%s\"\n", key);
+ wl_ext_iovar_setint(dev, "mesh_auth_proto", 1);
+ wl_ext_iovar_setint(dev, "mfp", WL_MFP_REQUIRED);
+ wl_ext_iovar_setbuf(dev, "sae_password", key, strlen(key),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ } else {
+ IAPSTA_INFO(dev->name, "No securiy\n");
+ wl_ext_iovar_setint(dev, "mesh_auth_proto", 0);
+ wl_ext_iovar_setint(dev, "mfp", WL_MFP_NONE);
+ }
+ } else
+#endif /* WLMESH */
+ if (emode == ENC_WEP) {
+ wl_ext_ioctl(dev, WLC_SET_KEY, &wsec_key, sizeof(wsec_key), 1);
+ } else if (emode == ENC_TKIP || emode == ENC_AES || emode == ENC_TKIPAES) {
+ if (cur_if->ifmode == ISTA_MODE)
+ wl_ext_iovar_setint(dev, "sup_wpa", 1);
+ wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1);
+ }
+
+ return 0;
+}
+
+static u32
+wl_ext_get_chanspec(struct wl_apsta_params *apsta_params,
+ struct net_device *dev)
+{
+ int ret = 0;
+ struct ether_addr bssid;
+ u32 chanspec = 0;
+
+ ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ if (ret != BCME_NOTASSOCIATED && memcmp(&ether_null, &bssid, ETHER_ADDR_LEN)) {
+ if (wl_ext_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_ext_chspec_driver_to_host(apsta_params->ioctl_ver, chanspec);
+ return chanspec;
+ }
+ }
+
+ return 0;
+}
+
+static uint16
+wl_ext_get_chan(struct wl_apsta_params *apsta_params, struct net_device *dev)
+{
+ int ret = 0;
+ uint16 chan = 0, ctl_chan;
+ struct ether_addr bssid;
+ u32 chanspec = 0;
+
+ ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ if (ret != BCME_NOTASSOCIATED && memcmp(&ether_null, &bssid, ETHER_ADDR_LEN)) {
+ if (wl_ext_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_ext_chspec_driver_to_host(apsta_params->ioctl_ver, chanspec);
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ chan = (u16)(ctl_chan & 0x00FF);
+ return chan;
+ }
+ }
+
+ return 0;
+}
+
+static chanspec_t
+wl_ext_chan_to_chanspec(struct wl_apsta_params *apsta_params,
+ struct net_device *dev, uint16 channel)
+{
+ s32 _chan = channel;
+ chanspec_t chspec = 0;
+ chanspec_t fw_chspec = 0;
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 err = BCME_OK;
+ s32 bw_cap = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+ uint band;
+
+ if (_chan <= CH_MAX_2G_CHANNEL)
+ band = IEEE80211_BAND_2GHZ;
+ else
+ band = IEEE80211_BAND_5GHZ;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ param.band = WLC_BAND_5G;
+ err = wl_ext_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ IAPSTA_ERROR(dev->name, "bw_cap failed, %d\n", err);
+ return err;
+ } else {
+ err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ if (bw_cap != WLC_N_BW_20ALL)
+ bw = WL_CHANSPEC_BW_40;
+ }
+ } else {
+ if (WL_BW_CAP_80MHZ(iovar_buf[0]))
+ bw = WL_CHANSPEC_BW_80;
+ else if (WL_BW_CAP_40MHZ(iovar_buf[0]))
+ bw = WL_CHANSPEC_BW_40;
+ else
+ bw = WL_CHANSPEC_BW_20;
+ }
+ }
+ else if (band == IEEE80211_BAND_2GHZ)
+ bw = WL_CHANSPEC_BW_20;
+
+set_channel:
+ chspec = wf_channel2chspec(_chan, bw);
+ if (wf_chspec_valid(chspec)) {
+ fw_chspec = wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver, chspec);
+ if (fw_chspec == INVCHANSPEC) {
+ IAPSTA_ERROR(dev->name, "failed to convert host chanspec to fw chanspec\n");
+ fw_chspec = 0;
+ }
+ } else {
+ if (bw == WL_CHANSPEC_BW_80)
+ bw = WL_CHANSPEC_BW_40;
+ else if (bw == WL_CHANSPEC_BW_40)
+ bw = WL_CHANSPEC_BW_20;
+ else
+ bw = 0;
+ if (bw)
+ goto set_channel;
+ IAPSTA_ERROR(dev->name, "Invalid chanspec 0x%x\n", chspec);
+ err = BCME_ERROR;
+ }
+
+ return fw_chspec;
+}
+
+static bool
+wl_ext_radar_detect(struct net_device *dev)
+{
+ int ret = BCME_OK;
+ bool radar = FALSE;
+ s32 val = 0;
+
+ if ((ret = wldev_ioctl(dev, WLC_GET_RADAR, &val, sizeof(int), false) == 0)) {
+ radar = TRUE;
+ }
+
+ return radar;
+}
+
+static int
+wl_ext_assoclist(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = 0, i, maxassoc = 0, bytes_written = 0;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+ assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+ ret = wl_ext_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), 0);
+ if (ret)
+ return -1;
+ maxassoc = dtoh32(assoc_maclist->count);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%2s: %12s",
+ "no", "------addr------");
+ for (i=0; i<maxassoc; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\n%2d: %pM", i, &assoc_maclist->ea[i]);
+ }
+
+ return bytes_written;
+}
+
+static void
+wl_ext_mod_timer(timer_list_compat_t *timer, uint sec, uint msec)
+{
+ uint timeout = sec * 1000 + msec;
+
+ IAPSTA_TRACE("wlan", "timeout=%d\n", timeout);
+
+ if (timer_pending(timer))
+ del_timer_sync(timer);
+
+ if (timeout)
+ mod_timer(timer, jiffies + msecs_to_jiffies(timeout));
+}
+
+static void
+wl_ext_send_event_msg(struct net_device *dev, int event, int status)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_if_info *cur_if;
+ wl_event_msg_t msg;
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return;
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+
+ msg.ifidx = hton32(dhd_net2idx(dhd->info, dev));
+ msg.event_type = hton32(event);
+ msg.status = hton32(status);
+ memcpy(&msg.addr, &cur_if->bssid, ETHER_ADDR_LEN);
+
+#ifdef WL_EVENT
+ wl_ext_event_send(dhd->event_params, &msg, NULL);
+#endif
+#ifdef WL_CFG80211
+ if (dhd->up) {
+ wl_cfg80211_event(dev, &msg, NULL);
+ }
+#endif /* defined(WL_CFG80211) */
+}
+
+static void
+wl_ext_connect_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct wl_if_info *cur_if;
+
+ if (!dev) {
+ IAPSTA_ERROR("wlan", "dev is not ready\n");
+ return;
+ }
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return;
+
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ cur_if->assoc_info.reassoc = 0;
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+ IAPSTA_ERROR(dev->name, "timer expired\n");
+ wl_ext_send_event_msg(dev, WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS);
+}
+
+#if defined(WL_CFG80211) || (defined(WLMESH) && defined(WL_ESCAN))
+static struct wl_if_info *
+wl_ext_if_enabled(struct wl_apsta_params *apsta_params, ifmode_t ifmode)
+{
+ struct wl_if_info *tmp_if, *target_if = NULL;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if && tmp_if->ifmode == ifmode &&
+ wl_get_isam_status(tmp_if, IF_READY)) {
+ if (wl_ext_get_chan(apsta_params, tmp_if->dev)) {
+ target_if = tmp_if;
+ break;
+ }
+ }
+ }
+
+ return target_if;
+}
+#endif
+
+#ifdef WLMESH
+static int
+wl_mesh_print_peer_info(mesh_peer_info_ext_t *mpi_ext,
+ uint32 peer_results_count, char *command, int total_len)
+{
+ char *peering_map[] = MESH_PEERING_STATE_STRINGS;
+ uint32 count = 0;
+ int bytes_written = 0;
+
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%2s: %12s : %6s : %-6s : %6s :"
+ " %5s : %4s : %4s : %11s : %4s",
+ "no", "------addr------ ", "l.aid", "state", "p.aid",
+ "mppid", "llid", "plid", "entry_state", "rssi");
+ for (count=0; count < peer_results_count; count++) {
+ if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\n%2d: %pM : 0x%4x : %6s : 0x%4x :"
+ " %5d : %4d : %4d : %11s : %4d",
+ count, &mpi_ext->ea, mpi_ext->local_aid,
+ peering_map[mpi_ext->peer_info.state],
+ mpi_ext->peer_info.peer_aid,
+ mpi_ext->peer_info.mesh_peer_prot_id,
+ mpi_ext->peer_info.local_link_id,
+ mpi_ext->peer_info.peer_link_id,
+ (mpi_ext->entry_state == MESH_SELF_PEER_ENTRY_STATE_ACTIVE) ?
+ "ACTIVE" :
+ "EXTERNAL",
+ mpi_ext->rssi);
+ } else {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\n%2d: %pM : %6s : %5s : %6s :"
+ " %5s : %4s : %4s : %11s : %4s",
+ count, &mpi_ext->ea, " NA ", " NA ", " NA ",
+ " NA ", " NA ", " NA ", " TIMEDOUT ", " NA ");
+ }
+ mpi_ext++;
+ }
+
+ return bytes_written;
+}
+
+static int
+wl_mesh_get_peer_results(struct net_device *dev, char *buf, int len)
+{
+ int indata, inlen;
+ mesh_peer_info_dump_t *peer_results;
+ int ret;
+
+ memset(buf, 0, len);
+ peer_results = (mesh_peer_info_dump_t *)buf;
+ indata = htod32(len);
+ inlen = 4;
+ ret = wl_ext_iovar_getbuf(dev, "mesh_peer_status", &indata, inlen, buf, len, NULL);
+ if (!ret) {
+ peer_results = (mesh_peer_info_dump_t *)buf;
+ ret = peer_results->count;
+ }
+
+ return ret;
+}
+
+int
+wl_ext_mesh_peer_status(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct wl_if_info *cur_if;
+ mesh_peer_info_dump_t *peer_results;
+ mesh_peer_info_ext_t *mpi_ext;
+ char *peer_buf = NULL;
+ int peer_len = WLC_IOCTL_MAXLEN;
+ int dump_written = 0, ret;
+
+ if (!data) {
+ peer_buf = kmalloc(peer_len, GFP_KERNEL);
+ if (peer_buf == NULL) {
+ IAPSTA_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ peer_len);
+ return -1;
+ }
+ cur_if = wl_get_cur_if(dev);
+ if (cur_if && cur_if->ifmode == IMESH_MODE) {
+ memset(peer_buf, 0, peer_len);
+ ret = wl_mesh_get_peer_results(dev, peer_buf, peer_len);
+ if (ret >= 0) {
+ peer_results = (mesh_peer_info_dump_t *)peer_buf;
+ mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
+ dump_written += wl_mesh_print_peer_info(mpi_ext,
+ peer_results->count, command+dump_written,
+ total_len-dump_written);
+ }
+ } else if (cur_if) {
+ IAPSTA_ERROR(dev->name, "[%s][%c] is not mesh interface\n",
+ cur_if->ifname, cur_if->prefix);
+ }
+ }
+
+ if (peer_buf)
+ kfree(peer_buf);
+ return dump_written;
+}
+
+#ifdef WL_ESCAN
+#define WL_MESH_DELAY_SCAN_TMO 3
+static void
+wl_mesh_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dhd_pub *dhd;
+ wl_event_msg_t msg;
+
+ if (!dev) {
+ IAPSTA_ERROR("wlan", "dev is not ready\n");
+ return;
+ }
+
+ dhd = dhd_get_pub(dev);
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ IAPSTA_TRACE(dev->name, "timer expired\n");
+
+ msg.ifidx = hton32(dhd_net2idx(dhd->info, dev));
+ msg.event_type = hton32(WLC_E_RESERVED);
+ msg.reason = hton32(ISAM_RC_MESH_ACS);
+ wl_ext_event_send(dhd->event_params, &msg, NULL);
+}
+
+static int
+wl_mesh_clear_vndr_ie(struct net_device *dev, uchar *oui)
+{
+ char *vndr_ie_buf = NULL;
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ ie_getbuf_t vndr_ie_tmp;
+ char *iovar_buf = NULL;
+ int err = -1, i;
+ vndr_ie_buf_t *vndr_ie_dump = NULL;
+ uchar *iebuf;
+ vndr_ie_info_t *ie_info;
+ vndr_ie_t *ie;
+
+ vndr_ie_buf = kzalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (!vndr_ie_buf) {
+ IAPSTA_ERROR(dev->name, "IE memory alloc failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ iovar_buf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (!iovar_buf) {
+ IAPSTA_ERROR(dev->name, "iovar_buf alloc failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memset(iovar_buf, 0, WLC_IOCTL_MEDLEN);
+ vndr_ie_tmp.pktflag = (uint32) -1;
+ vndr_ie_tmp.id = (uint8) DOT11_MNG_PROPR_ID;
+ err = wl_ext_iovar_getbuf(dev, "vndr_ie", &vndr_ie_tmp, sizeof(vndr_ie_tmp),
+ iovar_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (err)
+ goto exit;
+
+ vndr_ie_dump = (vndr_ie_buf_t *)iovar_buf;
+ if (!vndr_ie_dump->iecount)
+ goto exit;
+
+ iebuf = (uchar *)&vndr_ie_dump->vndr_ie_list[0];
+ for (i=0; i<vndr_ie_dump->iecount; i++) {
+ ie_info = (vndr_ie_info_t *) iebuf;
+ ie = &ie_info->vndr_ie_data;
+ if (memcmp(ie->oui, oui, 3))
+ memset(ie->oui, 0, 3);
+ iebuf += sizeof(uint32) + ie->len + VNDR_IE_HDR_LEN;
+ }
+
+ vndr_ie = (vndr_ie_setbuf_t *) vndr_ie_buf;
+ strncpy(vndr_ie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ memcpy(&vndr_ie->vndr_ie_buffer, vndr_ie_dump, WLC_IOCTL_SMLEN-VNDR_IE_CMD_LEN-1);
+
+ memset(iovar_buf, 0, WLC_IOCTL_MEDLEN);
+ err = wl_ext_iovar_setbuf(dev, "vndr_ie", vndr_ie, WLC_IOCTL_SMLEN, iovar_buf,
+ WLC_IOCTL_MEDLEN, NULL);
+
+exit:
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+ if (iovar_buf) {
+ kfree(iovar_buf);
+ }
+ return err;
+}
+
+static int
+wl_mesh_clear_mesh_info(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *mesh_if, bool scan)
+{
+ struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
+ uchar mesh_oui[]={0x00, 0x22, 0xf4};
+ int ret;
+
+ IAPSTA_TRACE(mesh_if->dev->name, "Enter\n");
+
+ ret = wl_mesh_clear_vndr_ie(mesh_if->dev, mesh_oui);
+ memset(mesh_info, 0, sizeof(struct wl_mesh_params));
+ if (scan) {
+ mesh_info->scan_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
+ wl_ext_mod_timer(&mesh_if->delay_scan, 0, 100);
+ }
+
+ return ret;
+}
+
+static int
+wl_mesh_update_vndr_ie(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *mesh_if)
+{
+ struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
+ char *vndr_ie;
+ uchar mesh_oui[]={0x00, 0x22, 0xf4};
+ int bytes_written = 0;
+ int ret = 0, i, vndr_ie_len;
+ uint8 *peer_bssid;
+
+ wl_mesh_clear_vndr_ie(mesh_if->dev, mesh_oui);
+
+ vndr_ie_len = WLC_IOCTL_MEDLEN;
+ vndr_ie = kmalloc(vndr_ie_len, GFP_KERNEL);
+ if (vndr_ie == NULL) {
+ IAPSTA_ERROR(mesh_if->dev->name, "Failed to allocate buffer of %d bytes\n",
+ WLC_IOCTL_MEDLEN);
+ ret = -1;
+ goto exit;
+ }
+
+ bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
+ "0x%02x%02x%02x", mesh_oui[0], mesh_oui[1], mesh_oui[2]);
+
+ bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
+ "%02x%02x%02x%02x%02x%02x%02x%02x", MESH_INFO_MASTER_BSSID, ETHER_ADDR_LEN,
+ ((u8 *)(&mesh_info->master_bssid))[0], ((u8 *)(&mesh_info->master_bssid))[1],
+ ((u8 *)(&mesh_info->master_bssid))[2], ((u8 *)(&mesh_info->master_bssid))[3],
+ ((u8 *)(&mesh_info->master_bssid))[4], ((u8 *)(&mesh_info->master_bssid))[5]);
+
+ bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
+ "%02x%02x%02x", MESH_INFO_MASTER_CHANNEL, 1, mesh_info->master_channel);
+
+ bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
+ "%02x%02x%02x", MESH_INFO_HOP_CNT, 1, mesh_info->hop_cnt);
+
+ bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
+ "%02x%02x", MESH_INFO_PEER_BSSID, mesh_info->hop_cnt*ETHER_ADDR_LEN);
+ for (i=0; i<mesh_info->hop_cnt && i<MAX_HOP_LIST; i++) {
+ peer_bssid = (uint8 *)&mesh_info->peer_bssid[i];
+ bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
+ "%02x%02x%02x%02x%02x%02x",
+ peer_bssid[0], peer_bssid[1], peer_bssid[2],
+ peer_bssid[3], peer_bssid[4], peer_bssid[5]);
+ }
+
+ ret = wl_ext_add_del_ie(mesh_if->dev, VNDR_IE_BEACON_FLAG|VNDR_IE_PRBRSP_FLAG,
+ vndr_ie, "add");
+ if (!ret) {
+ IAPSTA_INFO(mesh_if->dev->name, "mbssid=%pM, mchannel=%d, hop=%d, pbssid=%pM\n",
+ &mesh_info->master_bssid, mesh_info->master_channel, mesh_info->hop_cnt,
+ mesh_info->peer_bssid);
+ }
+
+exit:
+ if (vndr_ie)
+ kfree(vndr_ie);
+ return ret;
+}
+
+static bool
+wl_mesh_update_master_info(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *mesh_if)
+{
+ struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
+ struct wl_if_info *sta_if = NULL;
+ bool updated = FALSE;
+
+ sta_if = wl_ext_if_enabled(apsta_params, ISTA_MODE);
+ if (sta_if) {
+ wldev_ioctl(mesh_if->dev, WLC_GET_BSSID, &mesh_info->master_bssid,
+ ETHER_ADDR_LEN, 0);
+ mesh_info->master_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
+ mesh_info->hop_cnt = 0;
+ memset(mesh_info->peer_bssid, 0, MAX_HOP_LIST*ETHER_ADDR_LEN);
+ if (!wl_mesh_update_vndr_ie(apsta_params, mesh_if))
+ updated = TRUE;
+ }
+
+ return updated;
+}
+
+static bool
+wl_mesh_update_mesh_info(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *mesh_if)
+{
+ struct wl_mesh_params *mesh_info = &apsta_params->mesh_info, peer_mesh_info;
+ uint32 count = 0;
+ char *dump_buf = NULL;
+ mesh_peer_info_dump_t *peer_results;
+ mesh_peer_info_ext_t *mpi_ext;
+ struct ether_addr bssid;
+ bool updated = FALSE, bss_found = FALSE;
+ uint16 cur_chan;
+
+ dump_buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (dump_buf == NULL) {
+ IAPSTA_ERROR(mesh_if->dev->name, "Failed to allocate buffer of %d bytes\n",
+ WLC_IOCTL_MAXLEN);
+ return FALSE;
+ }
+ count = wl_mesh_get_peer_results(mesh_if->dev, dump_buf, WLC_IOCTL_MAXLEN);
+ if (count > 0) {
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ wldev_ioctl(mesh_if->dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, 0);
+ peer_results = (mesh_peer_info_dump_t *)dump_buf;
+ mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
+ for (count = 0; count < peer_results->count; count++) {
+ if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT &&
+ mpi_ext->peer_info.state == MESH_PEERING_ESTAB) {
+ memset(&peer_mesh_info, 0, sizeof(struct wl_mesh_params));
+ bss_found = wl_escan_mesh_info(mesh_if->dev, mesh_if->escan,
+ &mpi_ext->ea, &peer_mesh_info);
+ if (bss_found && (mesh_info->master_channel == 0 ||
+ peer_mesh_info.hop_cnt <= mesh_info->hop_cnt) &&
+ memcmp(&peer_mesh_info.peer_bssid, &bssid, ETHER_ADDR_LEN)) {
+ memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
+ ETHER_ADDR_LEN);
+ mesh_info->master_channel = peer_mesh_info.master_channel;
+ mesh_info->hop_cnt = peer_mesh_info.hop_cnt+1;
+ memset(mesh_info->peer_bssid, 0, MAX_HOP_LIST*ETHER_ADDR_LEN);
+ memcpy(&mesh_info->peer_bssid, &mpi_ext->ea, ETHER_ADDR_LEN);
+ memcpy(&mesh_info->peer_bssid[1], peer_mesh_info.peer_bssid,
+ (MAX_HOP_LIST-1)*ETHER_ADDR_LEN);
+ updated = TRUE;
+ }
+ }
+ mpi_ext++;
+ }
+ if (updated) {
+ if (wl_mesh_update_vndr_ie(apsta_params, mesh_if)) {
+ IAPSTA_ERROR(mesh_if->dev->name, "update failed\n");
+ mesh_info->master_channel = 0;
+ updated = FALSE;
+ goto exit;
+ }
+ }
+ }
+
+ if (!mesh_info->master_channel) {
+ wlc_ssid_t cur_ssid;
+ char sec[64];
+ bool sae = FALSE;
+ memset(&peer_mesh_info, 0, sizeof(struct wl_mesh_params));
+ wl_ext_ioctl(mesh_if->dev, WLC_GET_SSID, &cur_ssid, sizeof(cur_ssid), 0);
+ wl_ext_get_sec(mesh_if->dev, mesh_if->ifmode, sec, sizeof(sec), FALSE);
+ if (strnicmp(sec, "sae/sae", strlen("sae/sae")) == 0)
+ sae = TRUE;
+ cur_chan = wl_ext_get_chan(apsta_params, mesh_if->dev);
+ bss_found = wl_escan_mesh_peer(mesh_if->dev, mesh_if->escan, &cur_ssid, cur_chan,
+ sae, &peer_mesh_info);
+
+ if (bss_found && peer_mesh_info.master_channel&&
+ (cur_chan != peer_mesh_info.master_channel)) {
+ WL_MSG(mesh_if->ifname, "moving channel %d -> %d\n",
+ cur_chan, peer_mesh_info.master_channel);
+ wl_ext_disable_iface(mesh_if->dev, mesh_if->ifname);
+ mesh_if->channel = peer_mesh_info.master_channel;
+ wl_ext_enable_iface(mesh_if->dev, mesh_if->ifname, 500, TRUE);
+ }
+ }
+
+exit:
+ if (dump_buf)
+ kfree(dump_buf);
+ return updated;
+}
+
+static void
+wl_mesh_event_handler(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *mesh_if, const wl_event_msg_t *e, void *data)
+{
+ struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
+ uint32 event_type = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+ int ret;
+
+ if (wl_get_isam_status(mesh_if, AP_CREATED) &&
+ ((event_type == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
+ (event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_INITIAL_ASSOC))) {
+ if (!wl_mesh_update_master_info(apsta_params, mesh_if)) {
+ mesh_info->scan_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
+ wl_ext_mod_timer(&mesh_if->delay_scan, WL_MESH_DELAY_SCAN_TMO, 0);
+ }
+ }
+ else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
+ (event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_DEAUTH)) {
+ wl_mesh_clear_mesh_info(apsta_params, mesh_if, FALSE);
+ }
+ else if (wl_get_isam_status(mesh_if, AP_CREATED) &&
+ (event_type == WLC_E_ASSOC_IND || event_type == WLC_E_REASSOC_IND) &&
+ reason == DOT11_SC_SUCCESS) {
+ mesh_info->scan_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
+ wl_ext_mod_timer(&mesh_if->delay_scan, 0, 100);
+ }
+ else if (event_type == WLC_E_DISASSOC_IND || event_type == WLC_E_DEAUTH_IND ||
+ (event_type == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
+ if (!memcmp(&mesh_info->peer_bssid, &e->addr, ETHER_ADDR_LEN))
+ wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
+ }
+ else if (wl_get_isam_status(mesh_if, AP_CREATED) &&
+ event_type == WLC_E_RESERVED && reason == ISAM_RC_MESH_ACS) {
+ if (!wl_mesh_update_master_info(apsta_params, mesh_if)) {
+ wl_scan_info_t scan_info;
+ memset(&scan_info, 0, sizeof(wl_scan_info_t));
+ wl_ext_ioctl(mesh_if->dev, WLC_GET_SSID, &scan_info.ssid, sizeof(wlc_ssid_t), 0);
+ scan_info.channels.count = 1;
+ scan_info.channels.channel[0] = mesh_info->scan_channel;
+ ret = wl_escan_set_scan(mesh_if->dev, &scan_info);
+ if (ret)
+ wl_ext_mod_timer(&mesh_if->delay_scan, WL_MESH_DELAY_SCAN_TMO, 0);
+ }
+ }
+ else if (wl_get_isam_status(mesh_if, AP_CREATED) &&
+ ((event_type == WLC_E_ESCAN_RESULT && status == WLC_E_STATUS_SUCCESS) ||
+ (event_type == WLC_E_ESCAN_RESULT &&
+ (status == WLC_E_STATUS_ABORT || status == WLC_E_STATUS_NEWSCAN ||
+ status == WLC_E_STATUS_11HQUIET || status == WLC_E_STATUS_CS_ABORT ||
+ status == WLC_E_STATUS_NEWASSOC || status == WLC_E_STATUS_TIMEOUT)))) {
+ if (!wl_mesh_update_master_info(apsta_params, mesh_if)) {
+ if (!wl_mesh_update_mesh_info(apsta_params, mesh_if)) {
+ mesh_info->scan_channel = 0;
+ wl_ext_mod_timer(&mesh_if->delay_scan, WL_MESH_DELAY_SCAN_TMO, 0);
+ }
+ }
+ }
+}
+
+static void
+wl_mesh_escan_detach(dhd_pub_t *dhd, struct wl_if_info *mesh_if)
+{
+ IAPSTA_TRACE(mesh_if->dev->name, "Enter\n");
+
+ del_timer_sync(&mesh_if->delay_scan);
+
+ if (mesh_if->escan) {
+ mesh_if->escan = NULL;
+ }
+}
+
+static int
+wl_mesh_escan_attach(dhd_pub_t *dhd, struct wl_if_info *mesh_if)
+{
+ IAPSTA_TRACE(mesh_if->dev->name, "Enter\n");
+
+ mesh_if->escan = dhd->escan;
+ init_timer_compat(&mesh_if->delay_scan, wl_mesh_timer, mesh_if->dev);
+
+ return 0;
+}
+
+static uint
+wl_mesh_update_peer_path(struct wl_if_info *mesh_if, char *command,
+ int total_len)
+{
+ struct wl_mesh_params peer_mesh_info;
+ uint32 count = 0;
+ char *dump_buf = NULL;
+ mesh_peer_info_dump_t *peer_results;
+ mesh_peer_info_ext_t *mpi_ext;
+ int bytes_written = 0, j, k;
+ bool bss_found = FALSE;
+
+ dump_buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (dump_buf == NULL) {
+ IAPSTA_ERROR(mesh_if->dev->name, "Failed to allocate buffer of %d bytes\n",
+ WLC_IOCTL_MAXLEN);
+ return FALSE;
+ }
+ count = wl_mesh_get_peer_results(mesh_if->dev, dump_buf, WLC_IOCTL_MAXLEN);
+ if (count > 0) {
+ peer_results = (mesh_peer_info_dump_t *)dump_buf;
+ mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
+ for (count = 0; count < peer_results->count; count++) {
+ if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT &&
+ mpi_ext->peer_info.state == MESH_PEERING_ESTAB) {
+ memset(&peer_mesh_info, 0, sizeof(struct wl_mesh_params));
+ bss_found = wl_escan_mesh_info(mesh_if->dev, mesh_if->escan,
+ &mpi_ext->ea, &peer_mesh_info);
+ if (bss_found) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\npeer=%pM, hop=%d",
+ &mpi_ext->ea, peer_mesh_info.hop_cnt);
+ for (j=1; j<peer_mesh_info.hop_cnt; j++) {
+ bytes_written += snprintf(command+bytes_written,
+ total_len, "\n");
+ for (k=0; k<j; k++) {
+ bytes_written += snprintf(command+bytes_written,
+ total_len, " ");
+ }
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%pM", &peer_mesh_info.peer_bssid[j]);
+ }
+ }
+ }
+ mpi_ext++;
+ }
+ }
+
+ if (dump_buf)
+ kfree(dump_buf);
+ return bytes_written;
+}
+
+int
+wl_ext_isam_peer_path(struct net_device *dev, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
+ struct wl_if_info *tmp_if;
+ uint16 chan = 0;
+ char *dump_buf = NULL;
+ int dump_len = WLC_IOCTL_MEDLEN;
+ int dump_written = 0;
+ int i;
+
+ if (command || android_msg_level & ANDROID_INFO_LEVEL) {
+ if (command) {
+ dump_buf = command;
+ dump_len = total_len;
+ } else {
+ dump_buf = kmalloc(dump_len, GFP_KERNEL);
+ if (dump_buf == NULL) {
+ IAPSTA_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ dump_len);
+ return -1;
+ }
+ }
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifmode == IMESH_MODE && apsta_params->macs) {
+ chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (chan) {
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ DHD_LOG_PREFIXS "[%s-%c] mbssid=%pM, mchan=%d, hop=%d, pbssid=%pM",
+ tmp_if->ifname, tmp_if->prefix, &mesh_info->master_bssid,
+ mesh_info->master_channel, mesh_info->hop_cnt,
+ &mesh_info->peer_bssid);
+ dump_written += wl_mesh_update_peer_path(tmp_if,
+ dump_buf+dump_written, dump_len-dump_written);
+ }
+ }
+ }
+ IAPSTA_INFO(dev->name, "%s\n", dump_buf);
+ }
+
+ if (!command && dump_buf)
+ kfree(dump_buf);
+ return dump_written;
+}
+#endif /* WL_ESCAN */
+#endif /* WLMESH */
+
+static bool
+wl_ext_master_if(struct wl_if_info *cur_if)
+{
+ if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+static int
+wl_ext_if_down(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ scb_val_t scbval;
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ apstamode_t apstamode = apsta_params->apstamode;
+
+ WL_MSG(cur_if->ifname, "[%c] Turning off...\n", cur_if->prefix);
+
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
+ return 0;
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ // deauthenticate all STA first
+ memcpy(scbval.ea.octet, &ether_bcast, ETHER_ADDR_LEN);
+ wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1);
+ }
+
+ if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_DOWN, NULL, 0, 1);
+ } else {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(0);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ }
+ wl_clr_isam_status(cur_if, AP_CREATED);
+
+ return 0;
+}
+
+static int
+wl_ext_if_up(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if,
+ bool force_enable, int wait_up)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ apstamode_t apstamode = apsta_params->apstamode;
+ chanspec_t fw_chspec;
+ u32 timeout;
+ wlc_ssid_t ssid = { 0, {0} };
+ uint16 chan = 0;
+
+ if (cur_if->ifmode != IAP_MODE) {
+ IAPSTA_ERROR(cur_if->ifname, "Wrong ifmode\n");
+ return 0;
+ }
+
+ if (wl_ext_dfs_chan(cur_if->channel) && !apsta_params->radar && !force_enable) {
+ WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
+ cur_if->prefix, cur_if->channel);
+ return 0;
+ } else if (!cur_if->channel) {
+ WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
+ return 0;
+ }
+
+ WL_MSG(cur_if->ifname, "[%c] Turning on...\n", cur_if->prefix);
+
+ wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
+ &fw_chspec);
+
+ wl_clr_isam_status(cur_if, AP_CREATED);
+ wl_set_isam_status(cur_if, AP_CREATING);
+ if (apstamode == IAPONLY_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_UP, NULL, 0, 1);
+ } else {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(1);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf,
+ sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ }
+
+ if (wait_up) {
+ OSL_SLEEP(wait_up);
+ } else {
+ timeout = wait_event_interruptible_timeout(apsta_params->netif_change_event,
+ wl_get_isam_status(cur_if, AP_CREATED),
+ msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ if (timeout <= 0 || !wl_get_isam_status(cur_if, AP_CREATED)) {
+ wl_ext_if_down(apsta_params, cur_if);
+ WL_MSG(cur_if->ifname, "[%c] failed to up with SSID: \"%s\"\n",
+ cur_if->prefix, cur_if->ssid);
+ }
+ }
+
+ wl_ext_ioctl(cur_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
+ chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ WL_MSG(cur_if->ifname, "[%c] enabled with SSID: \"%s\" on channel %d\n",
+ cur_if->prefix, ssid.SSID, chan);
+
+ wl_clr_isam_status(cur_if, AP_CREATING);
+
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+
+ return 0;
+}
+
+static bool
+wl_ext_diff_band(uint16 chan1, uint16 chan2)
+{
+ if ((chan1 <= CH_MAX_2G_CHANNEL && chan2 > CH_MAX_2G_CHANNEL) ||
+ (chan1 > CH_MAX_2G_CHANNEL && chan2 <= CH_MAX_2G_CHANNEL)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static uint16
+wl_ext_same_band(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if, bool nodfs)
+{
+ struct wl_if_info *tmp_if;
+ uint16 tmp_chan, target_chan = 0;
+ wl_prio_t max_prio;
+ int i;
+
+ // find the max prio
+ max_prio = cur_if->prio;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ tmp_if->prio > max_prio) {
+ tmp_chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (wl_ext_dfs_chan(tmp_chan) && nodfs)
+ continue;
+ if (tmp_chan && !wl_ext_diff_band(cur_if->channel, tmp_chan)) {
+ target_chan = tmp_chan;
+ max_prio = tmp_if->prio;
+ }
+ }
+ }
+
+ return target_chan;
+}
+
+static uint16
+wl_ext_get_vsdb_chan(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if, struct wl_if_info *target_if)
+{
+ uint16 target_chan = 0, cur_chan = cur_if->channel;
+
+ if (cur_if->vsdb && target_if->vsdb)
+ return 0;
+
+ target_chan = wl_ext_get_chan(apsta_params, target_if->dev);
+ if (target_chan) {
+ IAPSTA_INFO(cur_if->ifname, "cur_chan=%d, target_chan=%d\n",
+ cur_chan, target_chan);
+ if (wl_ext_diff_band(cur_chan, target_chan)) {
+ if (!apsta_params->rsdb)
+ return target_chan;
+ } else {
+ if (cur_chan != target_chan)
+ return target_chan;
+ }
+ }
+
+ return 0;
+}
+
+static int
+wl_ext_rsdb_core_conflict(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if;
+ uint16 cur_chan, tmp_chan;
+ int i;
+
+ if (apsta_params->rsdb) {
+ cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if != cur_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ tmp_if->prio > cur_if->prio) {
+ tmp_chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (!tmp_chan)
+ continue;
+ if (wl_ext_diff_band(cur_chan, tmp_chan) &&
+ wl_ext_diff_band(cur_chan, cur_if->channel))
+ return TRUE;
+ else if (!wl_ext_diff_band(cur_chan, tmp_chan) &&
+ wl_ext_diff_band(cur_chan, cur_if->channel))
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
+static int
+wl_ext_trigger_csa(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ bool core_conflict = FALSE;
+
+ if (wl_ext_master_if(cur_if) && (apsta_params->csa & CSA_DRV_BIT)) {
+ if (!cur_if->channel) {
+ WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
+ } else if (wl_ext_dfs_chan(cur_if->channel) && !apsta_params->radar) {
+ WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
+ cur_if->prefix, cur_if->channel);
+ wl_ext_if_down(apsta_params, cur_if);
+ } else {
+ wl_chan_switch_t csa_arg;
+ memset(&csa_arg, 0, sizeof(csa_arg));
+ csa_arg.mode = 1;
+ csa_arg.count = 3;
+ csa_arg.chspec = wl_ext_chan_to_chanspec(apsta_params, cur_if->dev,
+ cur_if->channel);
+ core_conflict = wl_ext_rsdb_core_conflict(apsta_params, cur_if);
+ if (core_conflict) {
+ WL_MSG(cur_if->ifname, "[%c] Skip CSA due to rsdb core conflict\n",
+ cur_if->prefix);
+ } else if (csa_arg.chspec) {
+ WL_MSG(cur_if->ifname, "[%c] Trigger CSA to channel %d(0x%x)\n",
+ cur_if->prefix, cur_if->channel, csa_arg.chspec);
+ wl_set_isam_status(cur_if, AP_CREATING);
+ wl_ext_iovar_setbuf(cur_if->dev, "csa", &csa_arg, sizeof(csa_arg),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ OSL_SLEEP(500);
+ wl_clr_isam_status(cur_if, AP_CREATING);
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+ } else {
+ IAPSTA_ERROR(cur_if->ifname, "fail to get chanspec\n");
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+wl_ext_move_cur_dfs_channel(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ uint16 other_chan = 0, cur_chan = cur_if->channel;
+ uint16 chan_2g = 0, chan_5g = 0;
+ uint32 auto_band = WLC_BAND_2G;
+
+ if (wl_ext_master_if(cur_if) && wl_ext_dfs_chan(cur_if->channel) &&
+ !apsta_params->radar) {
+
+ wl_ext_get_default_chan(cur_if->dev, &chan_2g, &chan_5g, TRUE);
+ if (!chan_2g && !chan_5g) {
+ cur_if->channel = 0;
+ WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
+ return;
+ }
+
+ if (apsta_params->vsdb) {
+ if (chan_5g) {
+ cur_if->channel = chan_5g;
+ auto_band = WLC_BAND_5G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ } else {
+ cur_if->channel = chan_2g;
+ auto_band = WLC_BAND_2G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ }
+ if (!other_chan) {
+ other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
+ auto_band);
+ }
+ if (other_chan)
+ cur_if->channel = other_chan;
+ } else if (apsta_params->rsdb) {
+ if (chan_5g) {
+ cur_if->channel = chan_5g;
+ auto_band = WLC_BAND_5G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, FALSE);
+ if (wl_ext_dfs_chan(other_chan) && chan_2g) {
+ cur_if->channel = chan_2g;
+ auto_band = WLC_BAND_2G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ }
+ } else {
+ cur_if->channel = chan_2g;
+ auto_band = WLC_BAND_2G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ }
+ if (!other_chan) {
+ other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
+ auto_band);
+ }
+ if (other_chan)
+ cur_if->channel = other_chan;
+ } else {
+ cur_if->channel = chan_5g;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, FALSE);
+ if (other_chan) {
+ cur_if->channel = other_chan;
+ } else {
+ auto_band = WLC_BAND_5G;
+ other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
+ auto_band);
+ if (other_chan)
+ cur_if->channel = other_chan;
+ }
+ }
+ WL_MSG(cur_if->ifname, "[%c] move channel %d => %d\n",
+ cur_if->prefix, cur_chan, cur_if->channel);
+ }
+}
+
+static void
+wl_ext_move_other_dfs_channel(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ uint16 other_chan = 0, cur_chan = cur_if->channel;
+ uint16 chan_2g = 0, chan_5g = 0;
+ uint32 auto_band = WLC_BAND_2G;
+
+ if (wl_ext_master_if(cur_if) && wl_ext_dfs_chan(cur_if->channel) &&
+ !apsta_params->radar) {
+
+ wl_ext_get_default_chan(cur_if->dev, &chan_2g, &chan_5g, TRUE);
+ if (!chan_2g && !chan_5g) {
+ cur_if->channel = 0;
+ WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
+ return;
+ }
+
+ if (apsta_params->vsdb) {
+ if (chan_5g) {
+ cur_if->channel = chan_5g;
+ auto_band = WLC_BAND_5G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ } else {
+ cur_if->channel = chan_2g;
+ auto_band = WLC_BAND_2G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ }
+ if (!other_chan) {
+ other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
+ auto_band);
+ }
+ if (other_chan)
+ cur_if->channel = other_chan;
+ } else if (apsta_params->rsdb) {
+ if (chan_2g) {
+ cur_if->channel = chan_2g;
+ auto_band = WLC_BAND_2G;
+ other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
+ if (!other_chan) {
+ other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
+ auto_band);
+ }
+ } else {
+ cur_if->channel = 0;
+ }
+ if (other_chan)
+ cur_if->channel = other_chan;
+ } else {
+ cur_if->channel = 0;
+ }
+ WL_MSG(cur_if->ifname, "[%c] move channel %d => %d\n",
+ cur_if->prefix, cur_chan, cur_if->channel);
+ }
+}
+
+static uint16
+wl_ext_move_cur_channel(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if, *target_if = NULL;
+ uint16 tmp_chan, target_chan = 0;
+ wl_prio_t max_prio;
+ int i;
+
+ if (apsta_params->vsdb) {
+ target_chan = cur_if->channel;
+ goto exit;
+ }
+
+ // find the max prio
+ max_prio = cur_if->prio;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ tmp_if->prio > max_prio) {
+ tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
+ if (tmp_chan) {
+ target_if = tmp_if;
+ target_chan = tmp_chan;
+ max_prio = tmp_if->prio;
+ }
+ }
+ }
+
+ if (target_chan) {
+ tmp_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ if (apsta_params->rsdb && tmp_chan &&
+ wl_ext_diff_band(tmp_chan, target_chan)) {
+ WL_MSG(cur_if->ifname, "[%c] keep on current channel %d\n",
+ cur_if->prefix, tmp_chan);
+ cur_if->channel = 0;
+ } else {
+ WL_MSG(cur_if->ifname, "[%c] channel=%d => %s[%c] channel=%d\n",
+ cur_if->prefix, cur_if->channel,
+ target_if->ifname, target_if->prefix, target_chan);
+ cur_if->channel = target_chan;
+ }
+ }
+
+exit:
+ wl_ext_move_cur_dfs_channel(apsta_params, cur_if);
+
+ return cur_if->channel;
+}
+
+static struct wl_if_info *
+wl_ext_move_other_channel(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if, *target_if=NULL;
+ uint16 tmp_chan, target_chan = 0;
+ wl_prio_t max_prio = 0, cur_prio;
+ int i;
+
+ if (apsta_params->vsdb || !cur_if->channel) {
+ return NULL;
+ }
+
+ // find the max prio, but lower than cur_if
+ cur_prio = cur_if->prio;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ tmp_if->prio >= max_prio && tmp_if->prio <= cur_prio) {
+ tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
+ if (tmp_chan) {
+ target_if = tmp_if;
+ target_chan = tmp_chan;
+ max_prio = tmp_if->prio;
+ }
+ }
+ }
+
+ if (target_if) {
+ WL_MSG(target_if->ifname, "channel=%d => %s channel=%d\n",
+ target_chan, cur_if->ifname, cur_if->channel);
+ target_if->channel = cur_if->channel;
+ wl_ext_move_other_dfs_channel(apsta_params, target_if);
+ if (apsta_params->csa == 0) {
+ wl_ext_if_down(apsta_params, target_if);
+ wl_ext_move_other_channel(apsta_params, cur_if);
+ if (target_if->ifmode == IMESH_MODE) {
+ wl_ext_enable_iface(target_if->dev, target_if->ifname, 0, FALSE);
+ } else if (target_if->ifmode == IAP_MODE) {
+ wl_ext_if_up(apsta_params, target_if, FALSE, 0);
+ }
+ } else {
+ wl_ext_trigger_csa(apsta_params, target_if);
+ }
+ }
+
+ return target_if;
+}
+
+static bool
+wl_ext_wait_other_enabling(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if;
+ bool enabling = FALSE;
+ u32 timeout = 1;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->dev != cur_if->dev) {
+ if (tmp_if->ifmode == ISTA_MODE)
+ enabling = wl_get_isam_status(tmp_if, STA_CONNECTING);
+ else if (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)
+ enabling = wl_get_isam_status(tmp_if, AP_CREATING);
+ if (enabling)
+ WL_MSG(cur_if->ifname, "waiting for %s[%c] enabling...\n",
+ tmp_if->ifname, tmp_if->prefix);
+ if (enabling && tmp_if->ifmode == ISTA_MODE) {
+ timeout = wait_event_interruptible_timeout(
+ apsta_params->netif_change_event,
+ !wl_get_isam_status(tmp_if, STA_CONNECTING),
+ msecs_to_jiffies(MAX_STA_LINK_WAIT_TIME));
+ } else if (enabling &&
+ (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)) {
+ timeout = wait_event_interruptible_timeout(
+ apsta_params->netif_change_event,
+ !wl_get_isam_status(tmp_if, AP_CREATING),
+ msecs_to_jiffies(MAX_STA_LINK_WAIT_TIME));
+ }
+ if (tmp_if->ifmode == ISTA_MODE)
+ enabling = wl_get_isam_status(tmp_if, STA_CONNECTING);
+ else if (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)
+ enabling = wl_get_isam_status(tmp_if, AP_CREATING);
+ if (timeout <= 0 || enabling) {
+ WL_MSG(cur_if->ifname, "%s[%c] is still enabling...\n",
+ tmp_if->ifname, tmp_if->prefix);
+ }
+ }
+ }
+
+ return enabling;
+}
+
+bool
+wl_ext_iapsta_other_if_enabled(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *tmp_if;
+ bool enabled = FALSE;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if && wl_get_isam_status(tmp_if, IF_READY)) {
+ if (wl_ext_get_chan(apsta_params, tmp_if->dev)) {
+ enabled = TRUE;
+ break;
+ }
+ }
+ }
+
+ return enabled;
+}
+
+bool
+wl_ext_sta_connecting(struct net_device *dev)
+{
+ struct wl_if_info *cur_if = NULL;
+ bool connecting = FALSE;
+ int state;
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return FALSE;
+
+ if (cur_if->ifmode != ISTA_MODE && cur_if->ifmode != IGC_MODE)
+ return FALSE;
+
+ state = cur_if->conn_state;
+ if (state >= CONN_STATE_CONNECTING && state < CONN_STATE_CONNECTED) {
+ connecting = TRUE;
+ IAPSTA_TRACE(dev->name, "conn_state %d\n", state);
+ }
+
+ return connecting;
+}
+
+#ifdef PROPTX_MAXCOUNT
+int
+wl_ext_get_wlfc_maxcount(struct dhd_pub *dhd, int ifidx)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *tmp_if, *cur_if = NULL;
+ int i, maxcount = WL_TXSTATUS_FREERUNCTR_MASK;
+
+ if (!apsta_params->rsdb)
+ return maxcount;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifidx == ifidx) {
+ cur_if = tmp_if;
+ maxcount = cur_if->transit_maxcount;
+ }
+ }
+
+ if (cur_if)
+ IAPSTA_INFO(cur_if->ifname, "update maxcount %d\n", maxcount);
+ else
+ IAPSTA_INFO("wlan", "update maxcount %d for ifidx %d\n", maxcount, ifidx);
+ return maxcount;
+}
+
+static void
+wl_ext_update_wlfc_maxcount(struct dhd_pub *dhd)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *tmp_if;
+ bool band_5g = FALSE;
+ uint16 chan = 0;
+ int i, ret;
+
+ if (!apsta_params->rsdb)
+ return;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev) {
+ chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (chan > CH_MAX_2G_CHANNEL) {
+ tmp_if->transit_maxcount = dhd->conf->proptx_maxcnt_5g;
+ ret = dhd_wlfc_update_maxcount(dhd, tmp_if->ifidx,
+ tmp_if->transit_maxcount);
+ if (ret == 0)
+ IAPSTA_INFO(tmp_if->ifname, "updated maxcount %d\n",
+ tmp_if->transit_maxcount);
+ band_5g = TRUE;
+ }
+ }
+ }
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev) {
+ chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if ((chan == 0) || (chan <= CH_MAX_2G_CHANNEL && chan >= CH_MIN_2G_CHANNEL)) {
+ if (chan == 0) {
+ tmp_if->transit_maxcount = WL_TXSTATUS_FREERUNCTR_MASK;
+ } else if (band_5g) {
+ tmp_if->transit_maxcount = dhd->conf->proptx_maxcnt_2g;
+ } else {
+ tmp_if->transit_maxcount = dhd->conf->proptx_maxcnt_5g;
+ }
+ ret = dhd_wlfc_update_maxcount(dhd, tmp_if->ifidx,
+ tmp_if->transit_maxcount);
+ if (ret == 0)
+ IAPSTA_INFO(tmp_if->ifname, "updated maxcount %d\n",
+ tmp_if->transit_maxcount);
+ }
+ }
+ }
+}
+#endif /* PROPTX_MAXCOUNT */
+
+#ifdef WL_CFG80211
+static struct wl_if_info *
+wl_ext_get_dfs_master_if(struct wl_apsta_params *apsta_params)
+{
+ struct wl_if_info *cur_if = NULL;
+ uint16 chan = 0;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (!cur_if->dev || !wl_ext_master_if(cur_if))
+ continue;
+ chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ if (wl_ext_dfs_chan(chan)) {
+ return cur_if;
+ }
+ }
+ return NULL;
+}
+
+static void
+wl_ext_save_master_channel(struct wl_apsta_params *apsta_params,
+ uint16 post_channel)
+{
+ struct wl_if_info *cur_if = NULL;
+ uint16 chan = 0;
+ int i;
+
+ if (apsta_params->vsdb)
+ return;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (!cur_if->dev || !wl_ext_master_if(cur_if))
+ continue;
+ chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ if (chan) {
+ cur_if->prev_channel = chan;
+ cur_if->post_channel = post_channel;
+ }
+ }
+}
+
+static void
+wl_ext_if_reenabled(struct wl_apsta_params *apsta_params, ifmode_t ifmode, u32 channel)
+{
+ struct wl_if_info *tmp_if;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if && tmp_if->ifmode == ifmode &&
+ wl_get_isam_status(tmp_if, IF_READY)) {
+ if (wl_ext_get_chan(apsta_params, tmp_if->dev) == channel) {
+ WL_MSG(tmp_if->ifname, "re-enable channel %d\n", channel);
+ if (ifmode == IAP_MODE) {
+ wl_ext_if_down(apsta_params, tmp_if);
+ wl_ext_if_up(apsta_params, tmp_if, FALSE, 0);
+ }
+ break;
+ }
+ }
+ }
+
+}
+
+u32
+wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL, *target_if = NULL;
+ struct dhd_conf *conf = dhd->conf;
+
+ cur_if = wl_get_cur_if(dev);
+ if (cur_if) {
+ mutex_lock(&apsta_params->usr_sync);
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+ cur_if->channel = channel;
+ if (wl_ext_master_if(cur_if) && apsta_params->acs) {
+ uint auto_band = WL_GET_BAND(channel);
+ cur_if->channel = wl_ext_autochannel(cur_if->dev, apsta_params->acs,
+ auto_band);
+ }
+ channel = wl_ext_move_cur_channel(apsta_params, cur_if);
+ if (channel) {
+ if (cur_if->ifmode == ISTA_MODE && wl_ext_dfs_chan(channel))
+ wl_ext_save_master_channel(apsta_params, channel);
+ target_if = wl_ext_move_other_channel(apsta_params, cur_if);
+ if (dhd->conf->chip == BCM4359_CHIP_ID &&
+ cur_if->ifmode == ISTA_MODE && !target_if) {
+ /* this is a WAR to fix 4359 fw trap issue as below procedure:
+ * step1: enable wlan1 on channel 1
+ * step2: enable wlan2 on channel 36
+ * step3: enable wlan0 to connect channel 1 AP, then it will fw trap
+ */
+ wl_ext_if_reenabled(apsta_params, IAP_MODE, channel);
+ }
+ }
+ if (cur_if->ifmode == ISTA_MODE) {
+ if (conf->war & SET_CHAN_INCONN) {
+ chanspec_t fw_chspec;
+ IAPSTA_INFO(dev->name, "set channel %d\n", channel);
+ wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, channel,
+ &fw_chspec);
+ }
+ wl_set_isam_status(cur_if, STA_CONNECTING);
+ }
+ mutex_unlock(&apsta_params->usr_sync);
+ }
+
+ return channel;
+}
+
+static int
+wl_ext_iftype_to_ifmode(struct net_device *net, int wl_iftype, ifmode_t *ifmode)
+{
+ switch (wl_iftype) {
+ case WL_IF_TYPE_STA:
+ *ifmode = ISTA_MODE;
+ break;
+ case WL_IF_TYPE_AP:
+ *ifmode = IAP_MODE;
+ break;
+ case WL_IF_TYPE_P2P_GO:
+ *ifmode = IGO_MODE;
+ break;
+ case WL_IF_TYPE_P2P_GC:
+ *ifmode = IGC_MODE;
+ break;
+ default:
+ IAPSTA_ERROR(net->name, "Unknown interface wl_iftype:0x%x\n", wl_iftype);
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
+void
+wl_ext_iapsta_update_iftype(struct net_device *net, int wl_iftype)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+ int ifidx = dhd_net2idx(dhd->info, net);
+
+ IAPSTA_TRACE(net->name, "ifidx=%d, wl_iftype=%d\n", ifidx, wl_iftype);
+
+ if (ifidx < MAX_IF_NUM) {
+ cur_if = &apsta_params->if_info[ifidx];
+ }
+
+ if (cur_if) {
+ if (wl_iftype == WL_IF_TYPE_STA) {
+ cur_if->ifmode = ISTA_MODE;
+ cur_if->prio = PRIO_STA;
+ cur_if->vsdb = TRUE;
+ cur_if->prefix = 'S';
+ } else if (wl_iftype == WL_IF_TYPE_AP && cur_if->ifmode != IMESH_MODE) {
+ cur_if->ifmode = IAP_MODE;
+ cur_if->prio = PRIO_AP;
+ cur_if->vsdb = FALSE;
+ cur_if->prefix = 'A';
+ } else if (wl_iftype == WL_IF_TYPE_P2P_GO) {
+ cur_if->ifmode = IGO_MODE;
+ cur_if->prio = PRIO_P2P;
+ cur_if->vsdb = TRUE;
+ cur_if->prefix = 'P';
+ } else if (wl_iftype == WL_IF_TYPE_P2P_GC) {
+ cur_if->ifmode = IGC_MODE;
+ cur_if->prio = PRIO_P2P;
+ cur_if->vsdb = TRUE;
+ cur_if->prefix = 'P';
+ } else if (wl_iftype == WL_IF_TYPE_IBSS) {
+ cur_if->ifmode = IAP_MODE;
+ cur_if->prio = PRIO_AP;
+ cur_if->vsdb = FALSE;
+ cur_if->prefix = 'H';
+ wl_ext_iovar_setint(cur_if->dev, "assoc_retry_max", 3);
+ }
+ }
+}
+
+void
+wl_ext_iapsta_ifadding(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+
+ IAPSTA_TRACE(net->name, "ifidx=%d\n", ifidx);
+ if (ifidx < MAX_IF_NUM) {
+ cur_if = &apsta_params->if_info[ifidx];
+ wl_set_isam_status(cur_if, IF_ADDING);
+ }
+}
+
+bool
+wl_ext_iapsta_iftype_enabled(struct net_device *net, int wl_iftype)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+ ifmode_t ifmode = 0;
+
+ wl_ext_iftype_to_ifmode(net, wl_iftype, &ifmode);
+ cur_if = wl_ext_if_enabled(apsta_params, ifmode);
+ if (cur_if)
+ return TRUE;
+
+ return FALSE;
+}
+
+void
+wl_ext_iapsta_enable_master_if(struct net_device *dev, bool post)
+{
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (cur_if && cur_if->post_channel) {
+ if (post)
+ cur_if->channel = cur_if->post_channel;
+ else
+ cur_if->channel = cur_if->prev_channel;
+ wl_ext_if_up(apsta_params, cur_if, TRUE, 0);
+ cur_if->prev_channel = 0;
+ cur_if->post_channel = 0;
+ }
+ }
+}
+
+void
+wl_ext_iapsta_restart_master(struct net_device *dev)
+{
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *ap_if = NULL;
+
+ if (apsta_params->radar)
+ return;
+
+ ap_if = wl_ext_get_dfs_master_if(apsta_params);
+ if (ap_if) {
+ uint16 chan_2g, chan_5g;
+ wl_ext_if_down(apsta_params, ap_if);
+ wl_ext_iapsta_restart_master(dev);
+ wl_ext_get_default_chan(ap_if->dev, &chan_2g, &chan_5g, TRUE);
+ if (chan_5g)
+ ap_if->channel = chan_5g;
+ else if (chan_2g)
+ ap_if->channel = chan_2g;
+ else
+ ap_if->channel = 0;
+ if (ap_if->channel) {
+ wl_ext_move_cur_channel(apsta_params, ap_if);
+ wl_ext_if_up(apsta_params, ap_if, FALSE, 0);
+ }
+ }
+}
+
+bool
+wl_ext_iapsta_mesh_creating(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if;
+ int i;
+
+ if (apsta_params) {
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (cur_if->ifmode==IMESH_MODE && wl_get_isam_status(cur_if, IF_ADDING))
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+void
+wl_ext_fw_reinit_incsa(struct net_device *dev)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct dhd_conf *conf = dhd->conf;
+ struct wl_if_info *cur_if = NULL;
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return;
+
+ if (conf->war & FW_REINIT_INCSA) {
+ if (cur_if->ifmode == ISTA_MODE &&
+ wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
+ IAPSTA_INFO(dev->name, "wl reinit\n");
+ wl_ext_ioctl(dev, WLC_INIT, NULL, 0, 1);
+ }
+ }
+}
+
+#ifdef WL_EXT_RECONNECT
+static void
+wl_ext_reconnect_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ if (!dev) {
+ IAPSTA_ERROR("wlan", "dev is not ready\n");
+ return;
+ }
+ IAPSTA_ERROR(dev->name, "timer expired\n");
+ wl_ext_send_event_msg(dev, WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS);
+}
+
+static int
+wl_ext_connect_retry(struct net_device *dev, wl_event_msg_t *e)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct osl_timespec cur_ts, *sta_conn_ts = &apsta_params->sta_conn_ts;
+ uint32 diff_ms = 0;
+ int max_wait_time = 0, ret = 0;
+ bool connecting = FALSE;
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return ret;
+
+ mutex_unlock(&apsta_params->in4way_sync);
+ mutex_lock(&cfg->connect_sync);
+ connecting = wl_ext_sta_connecting(dev);
+
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, sta_conn_ts)/1000;
+
+ if (connecting && diff_ms < STA_CONNECT_TIMEOUT &&
+ !wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ uint32 etype = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ if (etype == WLC_E_SET_SSID && (status == WLC_E_STATUS_NO_NETWORKS ||
+ status == WLC_E_STATUS_NO_ACK)) {
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+ if (cur_if->assoc_info.reassoc) {
+ WL_MSG(dev->name, "retry reassoc\n");
+ wl_handle_reassoc(cfg, dev, &cur_if->assoc_info);
+ max_wait_time = STA_RECONNECT_RETRY_TIMEOUT;
+ } else {
+ if (!wl_ext_get_chan(apsta_params, dev)) {
+ WL_MSG(dev->name, "retry join\n");
+ wl_cfg80211_disassoc(dev, WLAN_REASON_DEAUTH_LEAVING);
+ wl_handle_join(cfg, dev, &cur_if->assoc_info);
+ max_wait_time = STA_CONNECT_RETRY_TIMEOUT;
+ }
+ }
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, max_wait_time);
+ }
+ ret = -EAGAIN;
+ }
+ mutex_unlock(&cfg->connect_sync);
+ mutex_lock(&apsta_params->in4way_sync);
+
+ return ret;
+}
+
+static void
+wl_ext_set_connect_retry(struct net_device *dev, void *context)
+{
+ wlcfg_assoc_info_t *assoc_info = (wlcfg_assoc_info_t *)context;
+ struct wl_if_info *cur_if;
+ int max_wait_time;
+ int wpa_auth = 0;
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return;
+
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+ memset(&cur_if->assoc_info, 0, sizeof(wlcfg_assoc_info_t));
+ wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+ if (!(wpa_auth & (WPA3_AUTH_SAE_PSK|0x20) && assoc_info)) {
+ memcpy(&cur_if->bssid, assoc_info->bssid, ETHER_ADDR_LEN);
+ memcpy(&cur_if->assoc_info, assoc_info, sizeof(wlcfg_assoc_info_t));
+ if (assoc_info->reassoc)
+ max_wait_time = STA_RECONNECT_RETRY_TIMEOUT;
+ else
+ max_wait_time = STA_CONNECT_RETRY_TIMEOUT;
+ IAPSTA_INFO(dev->name, "reconnect %dms later\n", max_wait_time);
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, max_wait_time);
+ }
+}
+#endif /* WL_EXT_RECONNECT */
+#endif /* WL_CFG80211 */
+
+#ifndef WL_STATIC_IF
+s32
+wl_ext_add_del_bss(struct net_device *ndev, s32 bsscfg_idx,
+ int iftype, s32 del, u8 *addr)
+{
+ s32 ret = BCME_OK;
+ s32 val = 0;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ struct {
+ s32 cfg;
+ s32 val;
+ struct ether_addr ea;
+ } bss_setbuf;
+
+ IAPSTA_TRACE(ndev->name, "wl_iftype:%d del:%d \n", iftype, del);
+
+ bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+ /* AP=2, STA=3, up=1, down=0, val=-1 */
+ if (del) {
+ val = WLC_AP_IOV_OP_DELETE;
+ } else if (iftype == WL_INTERFACE_TYPE_AP) {
+ /* Add/role change to AP Interface */
+ IAPSTA_TRACE(ndev->name, "Adding AP Interface\n");
+ val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else if (iftype == WL_INTERFACE_TYPE_STA) {
+ /* Add/role change to STA Interface */
+ IAPSTA_TRACE(ndev->name, "Adding STA Interface\n");
+ val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else {
+ IAPSTA_ERROR(ndev->name, "add_del_bss NOT supported for IFACE type:0x%x", iftype);
+ return -EINVAL;
+ }
+
+ if (!del) {
+ wl_ext_bss_iovar_war(ndev, &val);
+ }
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+
+ if (addr) {
+ memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+ }
+
+ IAPSTA_INFO(ndev->name, "wl bss %d bssidx:%d\n", val, bsscfg_idx);
+ ret = wl_ext_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret != 0)
+ IAPSTA_ERROR(ndev->name, "'bss %d' failed with %d\n", val, ret);
+
+ return ret;
+}
+
+static int
+wl_ext_interface_ops(struct net_device *dev,
+ struct wl_apsta_params *apsta_params, int iftype, u8 *addr)
+{
+ s32 ret;
+ struct wl_interface_create_v2 iface;
+ wl_interface_create_v3_t iface_v3;
+ struct wl_interface_info_v1 *info;
+ wl_interface_info_v2_t *info_v2;
+ uint32 ifflags = 0;
+ bool use_iface_info_v2 = false;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ wl_wlc_version_t wlc_ver;
+
+ /* Interface create */
+ bzero(&iface, sizeof(iface));
+
+ if (addr) {
+ ifflags |= WL_INTERFACE_MAC_USE;
+ }
+
+ ret = wldev_iovar_getbuf(dev, "wlc_ver", NULL, 0,
+ &wlc_ver, sizeof(wl_wlc_version_t), NULL);
+ if ((ret == BCME_OK) && (wlc_ver.wlc_ver_major >= 5)) {
+ ret = wldev_iovar_getbuf(dev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if ((ret == BCME_OK) && (*((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3)) {
+ use_iface_info_v2 = true;
+ bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
+ iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+ iface_v3.iftype = iftype;
+ iface_v3.flags = ifflags;
+ if (addr) {
+ memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wl_ext_iovar_getbuf(dev, "interface_create",
+ &iface_v3, sizeof(wl_interface_create_v3_t),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (unlikely(ret)) {
+ IAPSTA_ERROR(dev->name, "Interface v3 create failed!! ret %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ /* success case */
+ if (use_iface_info_v2 == true) {
+ info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
+ ret = info_v2->bsscfgidx;
+ } else {
+ /* Use v1 struct */
+ iface.ver = WL_INTERFACE_CREATE_VER_2;
+ iface.iftype = iftype;
+ iface.flags = iftype | ifflags;
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(dev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (ret == BCME_OK) {
+ info = (struct wl_interface_info_v1 *)ioctl_buf;
+ ret = info->bsscfgidx;
+ }
+ }
+
+ IAPSTA_INFO(dev->name, "wl interface create success!! bssidx:%d \n", ret);
+ return ret;
+}
+
+static void
+wl_ext_wait_netif_change(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ rtnl_unlock();
+ wait_event_interruptible_timeout(apsta_params->netif_change_event,
+ wl_get_isam_status(cur_if, IF_READY),
+ msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ rtnl_lock();
+}
+
+static void
+wl_ext_interface_create(struct net_device *dev, struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if, int iftype, u8 *addr)
+{
+ s32 ret;
+
+ wl_set_isam_status(cur_if, IF_ADDING);
+ ret = wl_ext_interface_ops(dev, apsta_params, iftype, addr);
+ if (ret == BCME_UNSUPPORTED) {
+ wl_ext_add_del_bss(dev, 1, iftype, 0, addr);
+ }
+ wl_ext_wait_netif_change(apsta_params, cur_if);
+}
+
+static void
+wl_ext_iapsta_intf_add(struct net_device *dev, struct wl_apsta_params *apsta_params)
+{
+ struct dhd_pub *dhd;
+ apstamode_t apstamode = apsta_params->apstamode;
+ struct wl_if_info *cur_if;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_p2p_if_t ifreq;
+ struct ether_addr mac_addr;
+
+ dhd = dhd_get_pub(dev);
+ bzero(&mac_addr, sizeof(mac_addr));
+
+ if (apstamode == ISTAAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP, NULL);
+ }
+ else if (apstamode == ISTAGO_MODE) {
+ bzero(&ifreq, sizeof(wl_p2p_if_t));
+ ifreq.type = htod32(WL_P2P_IF_GO);
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_set_isam_status(cur_if, IF_ADDING);
+ wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, cur_if);
+ }
+ else if (apstamode == ISTASTA_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ mac_addr.octet[0] |= 0x02;
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_STA,
+ (u8*)&mac_addr);
+ }
+ else if (apstamode == IDUALAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP, NULL);
+ }
+ else if (apstamode == ISTAAPAP_MODE) {
+ u8 rand_bytes[2] = {0, };
+ get_random_bytes(&rand_bytes, sizeof(rand_bytes));
+ cur_if = &apsta_params->if_info[IF_VIF];
+ memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ mac_addr.octet[0] |= 0x02;
+ mac_addr.octet[5] += 0x01;
+ memcpy(&mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP,
+ (u8*)&mac_addr);
+ cur_if = &apsta_params->if_info[IF_VIF2];
+ memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ mac_addr.octet[0] |= 0x02;
+ mac_addr.octet[5] += 0x02;
+ memcpy(&mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP,
+ (u8*)&mac_addr);
+ }
+#ifdef WLMESH
+ else if (apstamode == ISTAMESH_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_STA, NULL);
+ }
+ else if (apstamode == IMESHAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP, NULL);
+ }
+ else if (apstamode == ISTAAPMESH_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP, NULL);
+ cur_if = &apsta_params->if_info[IF_VIF2];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_STA, NULL);
+ }
+ else if (apstamode == IMESHAPAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP, NULL);
+ cur_if = &apsta_params->if_info[IF_VIF2];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_TYPE_AP, NULL);
+ }
+#endif /* WLMESH */
+
+}
+#endif /* WL_STATIC_IF */
+
+void
+wl_ext_update_conn_state(dhd_pub_t *dhd, int ifidx, uint conn_state)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+#ifdef EAPOL_RESEND
+ unsigned long flags = 0;
+#endif /* EAPOL_RESEND */
+
+ if (ifidx < MAX_IF_NUM) {
+ cur_if = &apsta_params->if_info[ifidx];
+#ifdef EAPOL_RESEND
+ spin_lock_irqsave(&apsta_params->eapol_lock, flags);
+#endif /* EAPOL_RESEND */
+ if (cur_if->ifmode == ISTA_MODE || cur_if->ifmode == IGC_MODE) {
+ if (wl_ext_sta_connecting(cur_if->dev) ||
+ conn_state >= CONN_STATE_CONNECTED ||
+ conn_state <= CONN_STATE_CONNECTING)
+ apsta_params->if_info[ifidx].conn_state = conn_state;
+ else
+ IAPSTA_INFO(cur_if->dev->name, "skip update %d\n", conn_state);
+ } else {
+ apsta_params->if_info[ifidx].conn_state = conn_state;
+ }
+#ifdef EAPOL_RESEND
+ spin_unlock_irqrestore(&apsta_params->eapol_lock, flags);
+#endif /* EAPOL_RESEND */
+ }
+}
+
+#ifdef EAPOL_RESEND
+#ifdef EAPOL_DYNAMATIC_RESEND
+static void
+wl_ext_calc_eapol_intvl(struct wl_if_info *cur_if, bool rx)
+{
+ struct osl_timespec cur_ts;
+ uint32 diff_ms;
+
+ if (rx && cur_if->pend_eapol_pkt && !cur_if->eapol_retry) {
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, &cur_if->eapol_tx_ts)/1000;
+ if (diff_ms > STA_EAPOL_TIMEOUT)
+ diff_ms = STA_EAPOL_TIMEOUT;
+ if (diff_ms > cur_if->eapol_max_intvl)
+ cur_if->eapol_max_intvl = diff_ms;
+ if (!cur_if->eapol_cnt || diff_ms < cur_if->eapol_min_intvl ||
+ cur_if->eapol_min_intvl == 0)
+ cur_if->eapol_min_intvl = diff_ms;
+
+ if (cur_if->eapol_cnt)
+ cur_if->eapol_avg_intvl =
+ (cur_if->eapol_avg_intvl * cur_if->eapol_cnt + diff_ms) /
+ (cur_if->eapol_cnt+1);
+ else
+ cur_if->eapol_avg_intvl = (diff_ms + STA_EAPOL_TIMEOUT) / 2;
+ cur_if->eapol_cnt++;
+
+ if (cur_if->eapol_avg_intvl <= (cur_if->eapol_min_intvl + 2) ||
+ cur_if->eapol_avg_intvl <= 10) {
+ cur_if->eapol_avg_intvl = (cur_if->eapol_max_intvl+STA_EAPOL_TIMEOUT)/2;
+ cur_if->eapol_cnt = 1;
+ }
+ }
+}
+#endif /* EAPOL_DYNAMATIC_RESEND */
+
+void
+wl_ext_free_eapol_txpkt(struct wl_if_info *cur_if, bool rx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(cur_if->dev);
+
+#ifdef BCMDBUS
+ if (!rx)
+#endif /* BCMDBUS */
+ wl_ext_mod_timer(&cur_if->eapol_timer, 0, 0);
+
+ if (cur_if->pend_eapol_pkt) {
+ PKTCFREE(dhd->osh, cur_if->pend_eapol_pkt, TRUE);
+ cur_if->pend_eapol_pkt = NULL;
+ IAPSTA_TRACE(cur_if->dev->name, "release eapol pkt\n");
+ }
+}
+
+void
+wl_ext_release_eapol_txpkt(dhd_pub_t *dhd, int ifidx, bool rx)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+ unsigned long flags = 0;
+
+ if (ifidx < MAX_IF_NUM && (dhd->conf->war & RESEND_EAPOL_PKT)) {
+ cur_if = &apsta_params->if_info[ifidx];
+ spin_lock_irqsave(&apsta_params->eapol_lock, flags);
+#ifdef EAPOL_DYNAMATIC_RESEND
+ wl_ext_calc_eapol_intvl(cur_if, rx);
+ if (rx)
+ cur_if->eapol_retry = FALSE;
+#endif /* EAPOL_DYNAMATIC_RESEND */
+ wl_ext_free_eapol_txpkt(cur_if, rx);
+ spin_unlock_irqrestore(&apsta_params->eapol_lock, flags);
+ }
+}
+
+void
+wl_ext_backup_eapol_txpkt(dhd_pub_t *dhd, int ifidx, void *pkt)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+ unsigned long flags = 0;
+ int interval;
+
+ if (ifidx < MAX_IF_NUM && (dhd->conf->war & RESEND_EAPOL_PKT)) {
+ cur_if = &apsta_params->if_info[ifidx];
+ if (cur_if->dev && cur_if->ifmode == ISTA_MODE &&
+ wl_ext_sta_connecting(cur_if->dev)) {
+ spin_lock_irqsave(&apsta_params->eapol_lock, flags);
+ wl_ext_free_eapol_txpkt(cur_if, TRUE);
+ cur_if->pend_eapol_pkt = skb_copy(pkt, GFP_ATOMIC);
+ if (cur_if->pend_eapol_pkt) {
+#ifdef EAPOL_DYNAMATIC_RESEND
+ osl_do_gettimeofday(&cur_if->eapol_tx_ts);
+ if (cur_if->eapol_retry)
+ interval = cur_if->eapol_max_intvl;
+ else
+ interval = (cur_if->eapol_avg_intvl + cur_if->eapol_max_intvl) / 2;
+ if (interval <= 20) {
+ cur_if->eapol_avg_intvl = (cur_if->eapol_max_intvl+STA_EAPOL_TIMEOUT)/2;
+ cur_if->eapol_cnt = 1;
+ }
+ cur_if->eapol_resend_intvl = interval;
+#else
+ interval = STA_EAPOL_TIMEOUT;
+#endif /* EAPOL_DYNAMATIC_RESEND */
+ wl_ext_mod_timer(&cur_if->eapol_timer, 0, interval);
+ IAPSTA_TRACE(cur_if->dev->name, "backup eapol pkt\n");
+ }
+ spin_unlock_irqrestore(&apsta_params->eapol_lock, flags);
+ }
+ }
+}
+
+static void
+wl_resend_eapol_handler(struct wl_if_info *cur_if,
+ const wl_event_msg_t *e, void *data)
+{
+ struct dhd_pub *dhd = dhd_get_pub(cur_if->dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct net_device *dev = cur_if->dev;
+ uint32 etype = ntoh32(e->event_type);
+ uint32 reason = ntoh32(e->reason);
+ unsigned long flags = 0;
+ bool pending = FALSE;
+ void *pend_eapol_pkt = NULL;
+
+ if (etype == WLC_E_RESERVED && reason == ISAM_RC_EAPOL_RESEND) {
+ spin_lock_irqsave(&apsta_params->eapol_lock, flags);
+ if (cur_if->pend_eapol_pkt && wl_ext_sta_connecting(cur_if->dev)) {
+ pend_eapol_pkt = skb_copy(cur_if->pend_eapol_pkt, GFP_ATOMIC);
+ if (pend_eapol_pkt) {
+#ifdef EAPOL_DYNAMATIC_RESEND
+ cur_if->eapol_retry = TRUE;
+ IAPSTA_INFO(dev->name, "resend eapol pkt %d(%d/%d/%d/%d), cnt=%d\n",
+ cur_if->eapol_resend_intvl,
+ cur_if->eapol_min_intvl, cur_if->eapol_avg_intvl,
+ cur_if->eapol_max_intvl, STA_EAPOL_TIMEOUT,
+ cur_if->eapol_cnt);
+#else
+ IAPSTA_INFO(dev->name, "resend eapol pkt %d\n", STA_EAPOL_TIMEOUT);
+#endif /* EAPOL_DYNAMATIC_RESEND */
+ pending = TRUE;
+ }
+ }
+ spin_unlock_irqrestore(&apsta_params->eapol_lock, flags);
+ if (pending) {
+ dhd_sendpkt(dhd, cur_if->ifidx, pend_eapol_pkt);
+ }
+ }
+}
+
+static void
+wl_eapol_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dhd_pub *dhd;
+ wl_event_msg_t msg;
+
+ if (!dev) {
+ IAPSTA_ERROR("wlan", "dev is not ready\n");
+ return;
+ }
+
+ dhd = dhd_get_pub(dev);
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ IAPSTA_TRACE(dev->name, "timer expired\n");
+
+ msg.ifidx = hton32(dhd_net2idx(dhd->info, dev));
+ msg.event_type = hton32(WLC_E_RESERVED);
+ msg.reason = hton32(ISAM_RC_EAPOL_RESEND);
+ wl_ext_event_send(dhd->event_params, &msg, NULL);
+}
+#endif /* EAPOL_RESEND */
+
+#if defined(WL_CFG80211) && defined(SCAN_SUPPRESS)
+static void
+wl_ext_light_scan_prep(struct net_device *dev, void *scan_params, bool scan_v2)
+{
+ wl_scan_params_t *params = NULL;
+ wl_scan_params_v2_t *params_v2 = NULL;
+
+ if (!scan_params) {
+ IAPSTA_ERROR(dev->name, "NULL scan_params\n");
+ return;
+ }
+ IAPSTA_INFO(dev->name, "Enter\n");
+
+ if (scan_v2) {
+ params_v2 = (wl_scan_params_v2_t *)scan_params;
+ } else {
+ params = (wl_scan_params_t *)scan_params;
+ }
+
+ if (params_v2) {
+ /* scan params ver2 */
+ params_v2->nprobes = 1;
+ params_v2->active_time = 20;
+ params_v2->home_time = 150;
+ } else {
+ /* scan params ver 1 */
+ if (!params) {
+ ASSERT(0);
+ return;
+ }
+ params->nprobes = 1;
+ params->active_time = 20;
+ params->home_time = 150;
+ }
+
+ return;
+}
+
+static uint16
+wl_ext_max_tput_chan(struct wl_apsta_params *apsta_params)
+{
+ struct wl_if_info *tmp_if, *max_tput_if = NULL;
+ uint16 chan = 0, max_tput_chan = 0;
+ int32 tput_sum = 0;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && (tmp_if->tput_tx + tmp_if->tput_rx) > tput_sum) {
+ chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (chan) {
+ max_tput_if = tmp_if;
+ tput_sum = tmp_if->tput_tx + tmp_if->tput_rx;
+ max_tput_chan = chan;
+ break;
+ }
+ }
+ }
+
+ if (max_tput_chan)
+ IAPSTA_INFO(max_tput_if->dev->name, "chan=%d\n", max_tput_chan);
+
+ return max_tput_chan;
+}
+
+uint16
+wl_ext_scan_suppress(struct net_device *dev, void *scan_params, bool scan_v2)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct dhd_conf *conf = dhd->conf;
+ uint16 chan = 0;
+
+ if (!(conf->scan_intput & (SCAN_CURCHAN_INTPUT|SCAN_LIGHT_INTPUT)))
+ return 0;
+
+ if (apsta_params->tput_sum >= conf->scan_tput_thresh) {
+ IAPSTA_INFO(dev->name, "tput %dMbps >= %dMbps (busy cnt/thresh %d/%d)\n",
+ apsta_params->tput_sum, conf->scan_tput_thresh,
+ apsta_params->scan_busy_cnt, conf->scan_busy_thresh);
+ if (apsta_params->scan_busy_cnt >= conf->scan_busy_thresh) {
+ apsta_params->scan_busy_cnt = 0;
+ } else if (conf->scan_intput & SCAN_CURCHAN_INTPUT) {
+ chan = wl_ext_max_tput_chan(apsta_params);
+ }
+ if ((conf->scan_intput & SCAN_LIGHT_INTPUT) && !chan)
+ wl_ext_light_scan_prep(dev, scan_params, scan_v2);
+ apsta_params->scan_busy_cnt++;
+ }
+ else {
+ apsta_params->scan_busy_cnt = 0;
+ }
+
+ return chan;
+}
+
+static int
+wl_ext_scan_busy(dhd_pub_t *dhd, struct wl_if_info *cur_if)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct dhd_conf *conf = dhd->conf;
+ struct osl_timespec cur_ts;
+ uint32 diff_ms;
+ int ret = 0;
+
+ if (!(conf->scan_intput & NO_SCAN_INTPUT))
+ return 0;
+
+ if (apsta_params->tput_sum >= conf->scan_tput_thresh) {
+ if (apsta_params->scan_busy_cnt) {
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, &apsta_params->scan_busy_ts)/1000;
+ if ((diff_ms/1000) >= conf->scan_busy_tmo) {
+ apsta_params->scan_busy_cnt = 0;
+ IAPSTA_INFO(cur_if->dev->name, "reset scan_busy_cnt\n");
+ goto exit;
+ }
+ }
+ if (apsta_params->scan_busy_cnt >= conf->scan_busy_thresh) {
+ apsta_params->scan_busy_cnt = 0;
+ } else if (conf->scan_intput & NO_SCAN_INTPUT) {
+ IAPSTA_INFO(cur_if->dev->name,
+ "tput %dMbps >= %dMbps(busy cnt/thresh %d/%d)\n",
+ apsta_params->tput_sum, conf->scan_tput_thresh,
+ apsta_params->scan_busy_cnt, conf->scan_busy_thresh);
+ apsta_params->scan_busy_cnt++;
+ if (apsta_params->scan_busy_cnt == 1)
+ osl_do_gettimeofday(&apsta_params->scan_busy_ts);
+ ret = -EBUSY;
+ goto exit;
+ }
+ }
+ else {
+ apsta_params->scan_busy_cnt = 0;
+ }
+
+exit:
+ return ret;
+}
+
+void
+wl_ext_reset_scan_busy(dhd_pub_t *dhd)
+{
+ struct wl_apsta_params *apsta_params = (struct wl_apsta_params *)dhd->iapsta_params;
+ apsta_params->scan_busy_cnt = 0;
+}
+#endif /* SCAN_SUPPRESS */
+
+#ifdef SET_CARRIER
+static void
+wl_ext_net_setcarrier(struct wl_if_info *cur_if, bool on, bool force)
+{
+ IAPSTA_TRACE(cur_if->ifname, "carrier=%d\n", on);
+ if (on) {
+ if (!netif_carrier_ok(cur_if->dev) || force)
+ netif_carrier_on(cur_if->dev);
+ } else {
+ if (netif_carrier_ok(cur_if->dev) || force)
+ netif_carrier_off(cur_if->dev);
+ }
+}
+#endif /* SET_CARRIER */
+
+static void
+wl_set_btc_in4way(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if,
+ enum wl_ext_status status, bool disable)
+{
+ struct net_device *dev = cur_if->dev;
+ int err;
+
+ if (cur_if->ifidx == 0) {
+ if (disable) {
+ err = wldev_iovar_getint(dev, "btc_mode", &apsta_params->sta_btc_mode);
+ if (!err && apsta_params->sta_btc_mode) {
+ IAPSTA_INFO(dev->name, "status=%d, disable current btc_mode %d\n",
+ status, apsta_params->sta_btc_mode);
+ wldev_iovar_setint(dev, "btc_mode", 0);
+ }
+ } else {
+ if (apsta_params->sta_btc_mode) {
+ IAPSTA_INFO(dev->name, "status=%d, restore btc_mode %d\n",
+ status, apsta_params->sta_btc_mode);
+ wldev_iovar_setint(dev, "btc_mode", apsta_params->sta_btc_mode);
+ apsta_params->sta_btc_mode = 0;
+ }
+ }
+ }
+
+}
+
+static void
+wl_wait_disconnect(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if,
+ enum wl_ext_status status)
+{
+ struct net_device *dev = cur_if->dev;
+ struct osl_timespec cur_ts, *sta_disc_ts = &apsta_params->sta_disc_ts;
+ int max_wait_time = 200, max_wait_cnt = 20;
+ int cur_conn_state = cur_if->conn_state;
+ uint32 diff_ms = 0;
+
+ if (cur_conn_state > CONN_STATE_IDLE)
+ osl_do_gettimeofday(sta_disc_ts);
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, sta_disc_ts)/1000;
+ while (diff_ms < max_wait_time && max_wait_cnt) {
+ IAPSTA_INFO(dev->name, "status=%d, max_wait_cnt=%d waiting...\n",
+ status, max_wait_cnt);
+ mutex_unlock(&apsta_params->in4way_sync);
+ OSL_SLEEP(50);
+ mutex_lock(&apsta_params->in4way_sync);
+ max_wait_cnt--;
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, sta_disc_ts)/1000;
+ }
+
+}
+
+void
+wl_iapsta_wait_event_complete(struct dhd_pub *dhd)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (cur_if->dev && cur_if->ifmode == ISTA_MODE) {
+ wl_ext_wait_event_complete(dhd, cur_if->ifidx);
+ }
+ }
+}
+
+int
+wl_iapsta_suspend_resume_ap(dhd_pub_t *dhd, struct wl_if_info *cur_if,
+ int suspend)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ uint insuspend = 0;
+
+ insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);
+ if (insuspend)
+ WL_MSG(cur_if->ifname, "suspend %d\n", suspend);
+
+ if (suspend) {
+ if (insuspend & AP_DOWN_IN_SUSPEND) {
+ cur_if->channel = wl_ext_get_chan(apsta_params, cur_if->dev);
+ if (cur_if->channel)
+ wl_ext_if_down(apsta_params, cur_if);
+ }
+ } else {
+ if (insuspend & AP_DOWN_IN_SUSPEND) {
+ if (cur_if->channel)
+ wl_ext_if_up(apsta_params, cur_if, FALSE, 0);
+ }
+ }
+
+ return 0;
+}
+
+int
+wl_iapsta_suspend_resume(dhd_pub_t *dhd, int suspend)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if;
+ int i;
+
+#ifdef TPUT_MONITOR
+ if (suspend)
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, 0);
+#endif /* TPUT_MONITOR */
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (cur_if->dev && cur_if->ifmode == ISTA_MODE) {
+ if (!suspend)
+ memcpy(&dhd->conf->bssid_insuspend, &cur_if->bssid, ETHER_ADDR_LEN);
+ dhd_conf_suspend_resume_sta(dhd, cur_if->ifidx, suspend);
+ if (suspend)
+ memcpy(&cur_if->bssid, &dhd->conf->bssid_insuspend, ETHER_ADDR_LEN);
+ }
+ else if (cur_if->dev && cur_if->ifmode == IAP_MODE) {
+ wl_iapsta_suspend_resume_ap(dhd, cur_if, suspend);
+ }
+ }
+
+#ifdef TPUT_MONITOR
+ if (!suspend)
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, dhd->conf->tput_monitor_ms);
+#endif /* TPUT_MONITOR */
+
+ return 0;
+}
+
+static int
+wl_ext_in4way_sync_sta(dhd_pub_t *dhd, struct wl_if_info *cur_if,
+ uint action, enum wl_ext_status status, void *context)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct dhd_conf *conf = dhd->conf;
+ struct net_device *dev = cur_if->dev;
+ struct osl_timespec cur_ts, *sta_disc_ts = &apsta_params->sta_disc_ts;
+ struct osl_timespec *sta_conn_ts = &apsta_params->sta_conn_ts;
+ uint32 diff_ms = 0;
+ int ret = 0, cur_conn_state;
+ int suppressed = 0, wpa_auth = 0;
+ bool connecting = FALSE;
+ wl_event_msg_t *e = (wl_event_msg_t *)context;
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* WL_CFG80211 */
+
+ action = action & conf->in4way;
+#ifdef WL_CFG80211
+ if ((conf->in4way & STA_FAKE_SCAN_IN_CONNECT) && (action & STA_NO_SCAN_IN4WAY))
+ action &= ~(STA_NO_SCAN_IN4WAY);
+#endif /* WL_CFG80211 */
+ cur_conn_state = cur_if->conn_state;
+ IAPSTA_TRACE(dev->name, "status=%d, action=0x%x, in4way=0x%x\n",
+ status, action, conf->in4way);
+
+ connecting = wl_ext_sta_connecting(dev);
+
+ switch (status) {
+ case WL_EXT_STATUS_SCAN:
+ wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
+ if (suppressed) {
+ IAPSTA_ERROR(dev->name, "scan suppressed\n");
+ ret = -EBUSY;
+ break;
+ }
+#ifdef WL_ESCAN
+ if (dhd->escan->escan_state == ESCAN_STATE_SCANING) {
+ IAPSTA_ERROR(dev->name, "escan busy\n");
+ ret = -EBUSY;
+ break;
+ }
+#endif /* WL_ESCAN */
+#ifdef WL_CFG80211
+ if (wl_get_drv_status_all(cfg, SCANNING) && cfg->scan_request) {
+ IAPSTA_ERROR(dev->name, "cfg80211 scanning\n");
+ ret = -EAGAIN;
+ break;
+ }
+#endif /* WL_CFG80211 */
+#if defined(WL_CFG80211) && defined(SCAN_SUPPRESS)
+ ret = wl_ext_scan_busy(dhd, cur_if);
+ if (ret) {
+ WL_MSG(dev->name, "no scan intput\n");
+ break;
+ }
+#endif /* WL_CFG80211 && SCAN_SUPPRESS */
+ if (action & STA_NO_SCAN_IN4WAY) {
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, sta_conn_ts)/1000;
+ if (connecting && diff_ms <= STA_CONNECT_TIMEOUT) {
+ IAPSTA_ERROR(dev->name, "connecting... %d\n", cur_conn_state);
+ ret = -EBUSY;
+ break;
+ }
+ }
+ break;
+#ifdef WL_CFG80211
+ case WL_EXT_STATUS_SCANNING:
+ if (action & STA_FAKE_SCAN_IN_CONNECT) {
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, sta_conn_ts)/1000;
+ if (wl_get_drv_status(cfg, CONNECTING, dev) ||
+ (connecting && diff_ms <= STA_CONNECT_TIMEOUT) ||
+ (cur_if->empty_scan >= STA_EMPTY_SCAN_MAX)) {
+ unsigned long flags = 0;
+ cur_if->empty_scan = 0;
+ spin_lock_irqsave(&dhd->up_lock, flags);
+ if (dhd->up) {
+ wl_event_msg_t msg;
+ bzero(&msg, sizeof(wl_event_msg_t));
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_SUCCESS);
+ WL_MSG(dev->name, "FAKE SCAN\n");
+ wl_cfg80211_event(dev, &msg, NULL);
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&dhd->up_lock, flags);
+ }
+ }
+ break;
+ case WL_EXT_STATUS_SCAN_COMPLETE:
+ if ((conf->war & FW_REINIT_EMPTY_SCAN) && cfg->bss_list->count == 0) {
+ uint16 channel;
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, sta_disc_ts)/1000;
+ channel = wl_ext_get_chan(apsta_params, dev);
+ cur_if->empty_scan++;
+ if ((channel && cur_if->empty_scan >= STA_EMPTY_SCAN_MAX) ||
+ (diff_ms < STA_LINKDOWN_TIMEOUT &&
+ apsta_params->linkdown_reason == WLC_E_LINK_BCN_LOSS)) {
+ if (conf->chip == BCM43569_CHIP_ID) {
+ if (channel) {
+ IAPSTA_INFO(dev->name, "wl disassoc for empty scan\n");
+ wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
+ }
+ } else {
+ IAPSTA_INFO(dev->name, "wl reinit for empty scan\n");
+ wl_ext_ioctl(dev, WLC_INIT, NULL, 0, 1);
+ }
+ }
+ }
+ else {
+ cur_if->empty_scan = 0;
+ }
+ break;
+#endif /* WL_CFG80211 */
+ case WL_EXT_STATUS_DISCONNECTING:
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhd, cur_if->ifidx, FALSE);
+#endif /* EAPOL_RESEND */
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, 0);
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+ memset(&cur_if->assoc_info, 0, sizeof(wlcfg_assoc_info_t));
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef SCAN_SUPPRESS
+ apsta_params->scan_busy_cnt = 0;
+#endif /* SCAN_SUPPRESS */
+ if (connecting) {
+ IAPSTA_ERROR(dev->name, "connect failed at %d\n", cur_conn_state);
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, CONN_STATE_IDLE);
+ }
+ if (action & STA_NO_BTC_IN4WAY) {
+ wl_set_btc_in4way(apsta_params, cur_if, status, FALSE);
+ }
+ if (action & STA_WAIT_DISCONNECTED) {
+ wl_wait_disconnect(apsta_params, cur_if, status);
+ wake_up_interruptible(&conf->event_complete);
+ }
+ break;
+ case WL_EXT_STATUS_CONNECTING:
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ if (action & STA_REASSOC_RETRY) {
+ wl_ext_set_connect_retry(dev, context);
+ }
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, STA_CONNECT_TIMEOUT);
+ osl_do_gettimeofday(sta_conn_ts);
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, CONN_STATE_CONNECTING);
+ if (action & STA_NO_BTC_IN4WAY) {
+ wl_set_btc_in4way(apsta_params, cur_if, status, TRUE);
+ }
+ break;
+ case WL_EXT_STATUS_CONNECTED:
+ wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+ if ((wpa_auth < WPA_AUTH_UNSPECIFIED) || (wpa_auth & WPA2_AUTH_FT)) {
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, 0);
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, CONN_STATE_CONNECTED);
+ }
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+ memset(&cur_if->assoc_info, 0, sizeof(wlcfg_assoc_info_t));
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+ if (cur_if->ifmode == ISTA_MODE) {
+ dhd_conf_set_wme(dhd, cur_if->ifidx, 0);
+ wake_up_interruptible(&conf->event_complete);
+ }
+ else if (cur_if->ifmode == IGC_MODE) {
+ dhd_conf_set_mchan_bw(dhd, WL_P2P_IF_CLIENT, -1);
+ }
+ break;
+ case WL_EXT_STATUS_RECONNECT:
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhd, cur_if->ifidx, FALSE);
+#endif /* EAPOL_RESEND */
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ if (action & STA_REASSOC_RETRY) {
+ ret = wl_ext_connect_retry(dev, e);
+ }
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+ break;
+ case WL_EXT_STATUS_DISCONNECTED:
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhd, cur_if->ifidx, FALSE);
+#endif /* EAPOL_RESEND */
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+ memset(&cur_if->assoc_info, 0, sizeof(wlcfg_assoc_info_t));
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef SCAN_SUPPRESS
+ apsta_params->scan_busy_cnt = 0;
+#endif /* SCAN_SUPPRESS */
+ if (e && ntoh32(e->event_type) == WLC_E_LINK &&
+ !(ntoh16(e->flags) & WLC_EVENT_MSG_LINK)) {
+ apsta_params->linkdown_reason = ntoh32(e->reason);
+ }
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, 0);
+ if (connecting) {
+ IAPSTA_ERROR(dev->name, "connect failed at %d\n", cur_conn_state);
+ }
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, CONN_STATE_IDLE);
+ if (action & STA_NO_BTC_IN4WAY) {
+ wl_set_btc_in4way(apsta_params, cur_if, status, FALSE);
+ }
+ osl_do_gettimeofday(sta_disc_ts);
+ wake_up_interruptible(&conf->event_complete);
+ break;
+ case WL_EXT_STATUS_ADD_KEY:
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, 0);
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, CONN_STATE_CONNECTED);
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhd, cur_if->ifidx, FALSE);
+#endif /* EAPOL_RESEND */
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+ if (action & STA_NO_BTC_IN4WAY) {
+ wl_set_btc_in4way(apsta_params, cur_if, status, FALSE);
+ }
+ wake_up_interruptible(&conf->event_complete);
+ IAPSTA_INFO(dev->name, "WPA 4-WAY complete %d\n", cur_conn_state);
+ break;
+ default:
+ IAPSTA_INFO(dev->name, "Unknown action=0x%x, status=%d\n", action, status);
+ }
+
+ return ret;
+}
+
+#ifdef WL_CFG80211
+static int
+wl_ext_in4way_sync_ap(dhd_pub_t *dhd, struct wl_if_info *cur_if,
+ uint action, enum wl_ext_status status, void *context)
+{
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct net_device *dev = cur_if->dev;
+ struct osl_timespec cur_ts, *ap_disc_sta_ts = &apsta_params->ap_disc_sta_ts;
+ u8 *ap_disc_sta_bssid = (u8*)&apsta_params->ap_disc_sta_bssid;
+ uint32 diff_ms = 0, timeout, max_wait_time = 300;
+ int ret = 0, suppressed = 0;
+ u8* mac_addr = context;
+ bool wait = FALSE;
+
+ action = action & dhd->conf->in4way;
+ IAPSTA_TRACE(dev->name, "status=%d, action=0x%x, in4way=0x%x\n",
+ status, action, dhd->conf->in4way);
+
+ switch (status) {
+ case WL_EXT_STATUS_SCAN:
+ wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
+ if (suppressed) {
+ IAPSTA_ERROR(dev->name, "scan suppressed\n");
+ ret = -EBUSY;
+ break;
+ }
+ break;
+ case WL_EXT_STATUS_AP_ENABLED:
+ if (cur_if->ifmode == IAP_MODE)
+ dhd_conf_set_wme(dhd, cur_if->ifidx, 1);
+ else if (cur_if->ifmode == IGO_MODE)
+ dhd_conf_set_mchan_bw(dhd, WL_P2P_IF_GO, -1);
+ break;
+ case WL_EXT_STATUS_DELETE_STA:
+ if (action & AP_WAIT_STA_RECONNECT) {
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, ap_disc_sta_ts)/1000;
+ if (cur_if->ifmode == IAP_MODE &&
+ mac_addr && diff_ms < max_wait_time &&
+ !memcmp(ap_disc_sta_bssid, mac_addr, ETHER_ADDR_LEN)) {
+ wait = TRUE;
+ } else if (cur_if->ifmode == IGO_MODE &&
+ cur_if->conn_state == CONN_STATE_WSC_DONE &&
+ memcmp(&ether_bcast, mac_addr, ETHER_ADDR_LEN)) {
+ wait = TRUE;
+ }
+ if (wait) {
+ IAPSTA_INFO(dev->name, "status=%d, ap_recon_sta=%d, waiting %dms ...\n",
+ status, apsta_params->ap_recon_sta, max_wait_time);
+ mutex_unlock(&apsta_params->in4way_sync);
+ timeout = wait_event_interruptible_timeout(apsta_params->ap_recon_sta_event,
+ apsta_params->ap_recon_sta, msecs_to_jiffies(max_wait_time));
+ mutex_lock(&apsta_params->in4way_sync);
+ IAPSTA_INFO(dev->name, "status=%d, ap_recon_sta=%d, timeout=%d\n",
+ status, apsta_params->ap_recon_sta, timeout);
+ if (timeout > 0) {
+ IAPSTA_INFO(dev->name, "skip delete STA %pM\n", mac_addr);
+ ret = -1;
+ break;
+ }
+ } else {
+ IAPSTA_INFO(dev->name, "status=%d, ap_recon_sta=%d => 0\n",
+ status, apsta_params->ap_recon_sta);
+ apsta_params->ap_recon_sta = FALSE;
+ if (cur_if->ifmode == IGO_MODE)
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, CONN_STATE_IDLE);
+ }
+ }
+ break;
+ case WL_EXT_STATUS_STA_DISCONNECTED:
+ if (action & AP_WAIT_STA_RECONNECT) {
+ IAPSTA_INFO(dev->name, "latest disc STA %pM ap_recon_sta=%d\n",
+ ap_disc_sta_bssid, apsta_params->ap_recon_sta);
+ osl_do_gettimeofday(ap_disc_sta_ts);
+ memcpy(ap_disc_sta_bssid, mac_addr, ETHER_ADDR_LEN);
+ apsta_params->ap_recon_sta = FALSE;
+ }
+ break;
+ case WL_EXT_STATUS_STA_CONNECTED:
+ if (action & AP_WAIT_STA_RECONNECT) {
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, ap_disc_sta_ts)/1000;
+ if (diff_ms < max_wait_time &&
+ !memcmp(ap_disc_sta_bssid, mac_addr, ETHER_ADDR_LEN)) {
+ IAPSTA_INFO(dev->name, "status=%d, ap_recon_sta=%d => 1\n",
+ status, apsta_params->ap_recon_sta);
+ apsta_params->ap_recon_sta = TRUE;
+ wake_up_interruptible(&apsta_params->ap_recon_sta_event);
+ } else {
+ apsta_params->ap_recon_sta = FALSE;
+ }
+ }
+ break;
+ default:
+ IAPSTA_INFO(dev->name, "Unknown action=0x%x, status=%d\n", action, status);
+ }
+
+ return ret;
+}
+
+int
+wl_ext_in4way_sync(struct net_device *dev, uint action,
+ enum wl_ext_status status, void *context)
+{
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+ int ret = 0;
+
+ mutex_lock(&apsta_params->in4way_sync);
+ cur_if = wl_get_cur_if(dev);
+ if (cur_if) {
+ if (cur_if->ifmode == ISTA_MODE || cur_if->ifmode == IGC_MODE)
+ ret = wl_ext_in4way_sync_sta(dhd, cur_if, action, status, context);
+ else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IGO_MODE)
+ ret = wl_ext_in4way_sync_ap(dhd, cur_if, action, status, context);
+ else
+ IAPSTA_INFO(dev->name, "Unknown mode %d\n", cur_if->ifmode);
+ }
+ mutex_unlock(&apsta_params->in4way_sync);
+
+ return ret;
+}
+
+void
+wl_ext_update_extsae_4way(struct net_device *dev,
+ const struct ieee80211_mgmt *mgmt, bool tx)
+{
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_if_info *cur_if = NULL;
+ uint32 auth_alg, auth_seq, status_code;
+ uint conn_state = 0;
+ char sae_type[32] = "";
+
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ return;
+
+ auth_alg = mgmt->u.auth.auth_alg;
+ auth_seq = mgmt->u.auth.auth_transaction;
+ status_code = mgmt->u.auth.status_code;
+ if (auth_alg == WLAN_AUTH_SAE) {
+ if (cur_if->ifmode == ISTA_MODE || cur_if->ifmode == IGC_MODE) {
+ if (auth_seq == 1) {
+ if (tx)
+ conn_state = CONN_STATE_AUTH_SAE_M1;
+ else
+ conn_state = CONN_STATE_AUTH_SAE_M2;
+ } else if (auth_seq == 2) {
+ if (tx)
+ conn_state = CONN_STATE_AUTH_SAE_M3;
+ else
+ conn_state = CONN_STATE_AUTH_SAE_M4;
+ }
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IGO_MODE) {
+ if (auth_seq == 1) {
+ if (tx)
+ conn_state = CONN_STATE_AUTH_SAE_M2;
+ else
+ conn_state = CONN_STATE_AUTH_SAE_M1;
+ } else if (auth_seq == 2) {
+ if (tx)
+ conn_state = CONN_STATE_AUTH_SAE_M4;
+ else
+ conn_state = CONN_STATE_AUTH_SAE_M3;
+ }
+ }
+ if (status_code == 76) {
+ snprintf(sae_type, sizeof(sae_type), "%d(Anti-clogging)", status_code);
+ } else if (status_code == 126) {
+ snprintf(sae_type, sizeof(sae_type), "%d(R3-H2E)", status_code);
+ } else {
+ snprintf(sae_type, sizeof(sae_type), "%d", status_code);
+ }
+ }
+ if (conn_state) {
+ wl_ext_update_conn_state(dhd, cur_if->ifidx, conn_state);
+ if (dump_msg_level & DUMP_EAPOL_VAL) {
+ if (tx) {
+ WL_MSG(dev->name, "WPA3 SAE M%d [TX] : (%pM) -> (%pM), status=%s\n",
+ conn_state-CONN_STATE_AUTH_SAE_M1+1, mgmt->sa, mgmt->da, sae_type);
+ } else {
+ WL_MSG(dev->name, "WPA3 SAE M%d [RX] : (%pM) <- (%pM), status=%s\n",
+ conn_state-CONN_STATE_AUTH_SAE_M1+1, mgmt->da, mgmt->sa, sae_type);
+ }
+ }
+ } else {
+ WL_ERR(("Unknown auth_alg=%d or auth_seq=%d\n", auth_alg, auth_seq));
+ }
+
+ return;
+}
+#endif /* WL_CFG80211 */
+
+#ifdef WL_WIRELESS_EXT
+int
+wl_ext_in4way_sync_wext(struct net_device *dev, uint action,
+ enum wl_ext_status status, void *context)
+{
+ int ret = 0;
+#ifndef WL_CFG80211
+ dhd_pub_t *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params;
+ struct wl_if_info *cur_if = NULL;
+
+ if (!dhd)
+ return 0;
+
+ apsta_params = dhd->iapsta_params;
+
+ mutex_lock(&apsta_params->in4way_sync);
+ cur_if = wl_get_cur_if(dev);
+ if (cur_if && cur_if->ifmode == ISTA_MODE) {
+ if (status == WL_EXT_STATUS_DISCONNECTING) {
+ wl_ext_add_remove_pm_enable_work(dev, FALSE);
+ } else if (status == WL_EXT_STATUS_CONNECTING) {
+ wl_ext_add_remove_pm_enable_work(dev, TRUE);
+ }
+ ret = wl_ext_in4way_sync_sta(dhd, cur_if, 0, status, NULL);
+ }
+ mutex_unlock(&apsta_params->in4way_sync);
+#endif
+ return ret;
+}
+#endif /* WL_WIRELESS_EXT */
+
+#ifdef TPUT_MONITOR
+static int
+wl_ext_assoclist_num(struct net_device *dev)
+{
+ int ret = 0, maxassoc = 0;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+ assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+ ret = wl_ext_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), 0);
+ if (ret)
+ return 0;
+ maxassoc = dtoh32(assoc_maclist->count);
+
+ return maxassoc;
+}
+
+static void
+wl_tput_monitor_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dhd_pub *dhd;
+ wl_event_msg_t msg;
+
+ if (!dev) {
+ IAPSTA_ERROR("wlan", "dev is not ready\n");
+ return;
+ }
+
+ dhd = dhd_get_pub(dev);
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ IAPSTA_TRACE(dev->name, "timer expired\n");
+
+ msg.ifidx = 0;
+ msg.event_type = hton32(WLC_E_RESERVED);
+ msg.reason = hton32(ISAM_RC_TPUT_MONITOR);
+ wl_ext_event_send(dhd->event_params, &msg, NULL);
+}
+
+static void
+wl_tput_dump(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+ struct dhd_pub *dhd = apsta_params->dhd;
+ void *buf = NULL;
+ sta_info_v4_t *sta = NULL;
+ struct ether_addr bssid;
+ wl_rssi_ant_t *rssi_ant_p;
+ char rssi_buf[16];
+ scb_val_t scb_val;
+ int ret, bytes_written = 0, i;
+ s32 rate = 0;
+ dhd_if_t *ifp = NULL;
+
+ if (!(android_msg_level & ANDROID_TPUT_LEVEL))
+ return;
+
+ ifp = dhd_get_ifp(dhd, cur_if->ifidx);
+
+ buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (buf == NULL) {
+ IAPSTA_ERROR(cur_if->dev->name, "MALLOC failed\n");
+ goto exit;
+ }
+
+ wldev_ioctl_get(cur_if->dev, WLC_GET_RATE, &rate, sizeof(rate));
+ rate = dtoh32(rate);
+ memset(rssi_buf, 0, sizeof(rssi_buf));
+ if (cur_if->ifmode == ISTA_MODE) {
+ ret = wldev_iovar_getbuf(cur_if->dev, "phy_rssi_ant", NULL, 0,
+ buf, WLC_IOCTL_MEDLEN, NULL);
+ rssi_ant_p = (wl_rssi_ant_t *)buf;
+ rssi_ant_p->version = dtoh32(rssi_ant_p->version);
+ rssi_ant_p->count = dtoh32(rssi_ant_p->count);
+ if (ret < 0 || rssi_ant_p->count == 0) {
+ wldev_ioctl(cur_if->dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+ rssi_ant_p->count = 1;
+ rssi_ant_p->rssi_ant[0] = dtoh32(scb_val.val);
+ }
+ for (i=0; i<rssi_ant_p->count && rssi_ant_p->rssi_ant[i]; i++) {
+ bytes_written += snprintf(rssi_buf+bytes_written, sizeof(rssi_buf),
+ "[%2d]", rssi_ant_p->rssi_ant[i]);
+ }
+ wldev_ioctl(cur_if->dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ ret = wldev_iovar_getbuf(cur_if->dev, "sta_info", (const void*)&bssid,
+ ETHER_ADDR_LEN, buf, WLC_IOCTL_MEDLEN, NULL);
+ if (ret == 0) {
+ sta = (sta_info_v4_t *)buf;
+ }
+ }
+ else {
+ bytes_written += snprintf(rssi_buf+bytes_written, sizeof(rssi_buf),
+ "[ ][ ]");
+ }
+
+ if (sta == NULL || (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5)) {
+ WL_MSG(cur_if->dev->name,
+ "rssi=%s, tx=%3d.%d%d%d Mbps(rate:%4d%2s), rx=%3d.%d%d%d Mbps(rate: ), "\
+ "tput_sum=%3d.%d%d%d Mbps\n",
+ rssi_buf, cur_if->tput_tx, (cur_if->tput_tx_kb/100)%10,
+ (cur_if->tput_tx_kb/10)%10, (cur_if->tput_tx_kb)%10,
+ rate/2, (rate & 1) ? ".5" : "",
+ cur_if->tput_rx, (cur_if->tput_rx_kb/100)%10,
+ (cur_if->tput_rx_kb/10)%10, (cur_if->tput_rx_kb)%10,
+ apsta_params->tput_sum, (apsta_params->tput_sum_kb/100)%10,
+ (apsta_params->tput_sum_kb/10)%10, (apsta_params->tput_sum_kb)%10);
+ } else {
+ WL_MSG(cur_if->dev->name,
+ "rssi=%s, tx=%3d.%d%d%d Mbps(rate:%4d%2s), rx=%3d.%d%d%d Mbps(rate:%4d.%d), "\
+ "tput_sum=%3d.%d%d%d Mbps\n",
+ rssi_buf, cur_if->tput_tx, (cur_if->tput_tx_kb/100)%10,
+ (cur_if->tput_tx_kb/10)%10, (cur_if->tput_tx_kb)%10,
+ rate/2, (rate & 1) ? ".5" : "",
+ cur_if->tput_rx, (cur_if->tput_rx_kb/100)%10,
+ (cur_if->tput_rx_kb/10)%10, (cur_if->tput_rx_kb)%10,
+ dtoh32(sta->rx_rate)/1000, ((dtoh32(sta->rx_rate)/100)%10),
+ apsta_params->tput_sum, (apsta_params->tput_sum_kb/100)%10,
+ (apsta_params->tput_sum_kb/10)%10, (apsta_params->tput_sum_kb)%10);
+ }
+
+exit:
+ if (buf) {
+ kfree(buf);
+ }
+}
+
+static void
+wl_tput_monitor(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+ struct dhd_pub *dhd = apsta_params->dhd;
+ dhd_if_t *ifp = NULL;
+
+ ifp = dhd_get_ifp(dhd, cur_if->ifidx);
+
+ if (cur_if->tput_ts.tv_sec == 0 && cur_if->tput_ts.tv_nsec == 0) {
+ osl_do_gettimeofday(&cur_if->tput_ts);
+ cur_if->last_tx = ifp->stats.tx_bytes;
+ cur_if->last_rx = ifp->stats.rx_bytes;
+ } else {
+ struct osl_timespec cur_ts;
+ uint32 diff_ms;
+
+ osl_do_gettimeofday(&cur_ts);
+ diff_ms = osl_do_gettimediff(&cur_ts, &cur_if->tput_ts)/1000;
+ memcpy(&cur_if->tput_ts, &cur_ts, sizeof(struct osl_timespec));
+ cur_if->tput_tx = (int32)(((ifp->stats.tx_bytes-cur_if->last_tx)/1024/1024)*8)*1000/diff_ms;
+ if (cur_if->tput_tx == 0) {
+ cur_if->tput_tx = (int32)((ifp->stats.tx_bytes-cur_if->last_tx)*8*1000/1024/1024)/diff_ms;
+ cur_if->tput_tx_kb = (int32)((ifp->stats.tx_bytes-cur_if->last_tx)*8*1000/1024)/diff_ms;
+ cur_if->tput_tx_kb = cur_if->tput_tx_kb % 1000;
+ } else
+ cur_if->tput_tx_kb = 0;
+ cur_if->tput_rx = (int32)(((ifp->stats.rx_bytes-cur_if->last_rx)/1024/1024)*8)*1000/diff_ms;
+ if (cur_if->tput_rx == 0) {
+ cur_if->tput_rx = (int32)((ifp->stats.rx_bytes-cur_if->last_rx)*8*1000/1024/1024)/diff_ms;
+ cur_if->tput_rx_kb = (int32)((ifp->stats.rx_bytes-cur_if->last_rx)*8*1000/1024)/diff_ms;
+ cur_if->tput_rx_kb = cur_if->tput_rx_kb % 1000;
+ } else
+ cur_if->tput_rx_kb = 0;
+ cur_if->last_tx = ifp->stats.tx_bytes;
+ cur_if->last_rx = ifp->stats.rx_bytes;
+ }
+}
+
+static void
+wl_tput_monitor_handler(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if, const wl_event_msg_t *e, void *data)
+{
+ struct dhd_pub *dhd = apsta_params->dhd;
+ struct wl_if_info *tmp_if;
+ uint32 etype = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+ uint16 flags = ntoh16(e->flags);
+ uint timeout = dhd->conf->tput_monitor_ms;
+ int32 tput_sum = 0, tput_sum_kb = 0;
+ bool monitor_if[MAX_IF_NUM] = {FALSE};
+ int i;
+
+ if (etype == WLC_E_RESERVED && reason == ISAM_RC_TPUT_MONITOR) {
+ tput_sum = 0;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifmode == ISTA_MODE &&
+ wl_ext_get_chan(apsta_params, tmp_if->dev)) {
+ wl_tput_monitor(apsta_params, tmp_if);
+ monitor_if[i] = TRUE;
+ }
+ else if (tmp_if->dev && tmp_if->ifmode == IAP_MODE &&
+ wl_ext_assoclist_num(tmp_if->dev)) {
+ wl_tput_monitor(apsta_params, tmp_if);
+ monitor_if[i] = TRUE;
+ }
+ tput_sum += (tmp_if->tput_tx + tmp_if->tput_rx);
+ tput_sum_kb += (tmp_if->tput_tx_kb + tmp_if->tput_rx_kb);
+ }
+ apsta_params->tput_sum = tput_sum + (tput_sum_kb/1000);
+ apsta_params->tput_sum_kb = tput_sum_kb % 1000;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ if (monitor_if[i]) {
+ tmp_if = &apsta_params->if_info[i];
+ wl_tput_dump(apsta_params, tmp_if);
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, timeout);
+ }
+ }
+#ifdef BCMSDIO
+ if (apsta_params->tput_sum >= dhd->conf->doflow_tput_thresh && dhd_doflow) {
+ dhd_doflow = FALSE;
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ IAPSTA_INFO("wlan", "dhd_doflow=%d\n", dhd_doflow);
+ }
+ else if (apsta_params->tput_sum < dhd->conf->doflow_tput_thresh && !dhd_doflow) {
+ dhd_doflow = TRUE;
+ IAPSTA_INFO("wlan", "dhd_doflow=%d\n", dhd_doflow);
+ }
+#endif
+ }
+ else if (cur_if->ifmode == ISTA_MODE) {
+ if (etype == WLC_E_LINK) {
+ if (flags & WLC_EVENT_MSG_LINK) {
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, timeout);
+ } else if (!wl_ext_iapsta_other_if_enabled(cur_if->dev)) {
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, 0);
+ }
+ }
+ }
+ else if (cur_if->ifmode == IAP_MODE) {
+ if ((etype == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
+ (etype == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_INITIAL_ASSOC)) {
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, timeout);
+ } else if ((etype == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
+ (etype == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_DEAUTH)) {
+ if (!wl_ext_iapsta_other_if_enabled(cur_if->dev)) {
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, 0);
+ }
+ } else if ((etype == WLC_E_ASSOC_IND || etype == WLC_E_REASSOC_IND) &&
+ reason == DOT11_SC_SUCCESS) {
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, timeout);
+ }
+ }
+}
+#endif /* TPUT_MONITOR */
+
+#ifdef ACS_MONITOR
+static void
+wl_ext_mod_timer_pending(timer_list_compat_t *timer, uint sec, uint msec)
+{
+ uint timeout = sec * 1000 + msec;
+
+ if (timeout && !timer_pending(timer)) {
+ IAPSTA_TRACE("wlan", "timeout=%d\n", timeout);
+ mod_timer(timer, jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
+static bool
+wl_ext_max_prio_if(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if;
+ wl_prio_t max_prio;
+ uint16 target_chan;
+ int i;
+
+ if (apsta_params->vsdb) {
+ target_chan = cur_if->channel;
+ goto exit;
+ }
+
+ // find the max prio
+ max_prio = cur_if->prio;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ tmp_if->prio > max_prio) {
+ target_chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (target_chan) {
+ return TRUE;
+ }
+ }
+ }
+exit:
+ return FALSE;
+}
+
+static void
+wl_ext_acs_scan(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+ if (apsta_params->acs & ACS_DRV_BIT) {
+ if (wl_ext_get_chan(apsta_params, cur_if->dev)) {
+ int ret, cur_scan_time;
+ cur_if->escan->autochannel = 1;
+ cur_scan_time = wl_ext_set_scan_time(cur_if->dev, 80,
+ WLC_GET_SCAN_CHANNEL_TIME, WLC_SET_SCAN_CHANNEL_TIME);
+ WL_MSG(cur_if->dev->name, "ACS_SCAN\n");
+ wl_ext_drv_scan(cur_if->dev, WLC_BAND_AUTO, FALSE);
+ if (cur_scan_time) {
+ ret = wl_ext_ioctl(cur_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
+ &cur_scan_time, sizeof(cur_scan_time), 1);
+ }
+ }
+ }
+}
+
+static void
+wl_ext_acs(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+ uint cur_band;
+ uint16 cur_chan, acs_chan;
+
+ if (apsta_params->acs & ACS_DRV_BIT) {
+ mutex_lock(&apsta_params->usr_sync);
+ cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ if (cur_chan) {
+ cur_band = WL_GET_BAND(cur_chan);
+ if (cur_band == WLC_BAND_5G)
+ cur_if->channel = cur_if->escan->best_5g_ch;
+ else
+ cur_if->channel = cur_if->escan->best_2g_ch;
+ acs_chan = wl_ext_move_cur_channel(apsta_params, cur_if);
+ if (acs_chan != cur_chan) {
+ WL_MSG(cur_if->dev->name, "move channel %d => %d\n",
+ cur_chan, acs_chan);
+ wl_ext_if_down(apsta_params, cur_if);
+ wl_ext_move_other_channel(apsta_params, cur_if);
+ wl_ext_if_up(apsta_params, cur_if, FALSE, 500);
+ }
+ }
+ mutex_unlock(&apsta_params->usr_sync);
+ }
+}
+
+static void
+wl_acs_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dhd_pub *dhd;
+ wl_event_msg_t msg;
+
+ if (!dev) {
+ IAPSTA_ERROR("wlan", "dev is not ready\n");
+ return;
+ }
+
+ dhd = dhd_get_pub(dev);
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ IAPSTA_TRACE(dev->name, "timer expired\n");
+
+ msg.ifidx = hton32(dhd_net2idx(dhd->info, dev));
+ msg.event_type = hton32(WLC_E_RESERVED);
+ msg.reason = hton32(ISAM_RC_AP_ACS);
+ wl_ext_event_send(dhd->event_params, &msg, NULL);
+}
+
+static void
+wl_acs_handler(struct wl_if_info *cur_if, const wl_event_msg_t *e, void *data)
+{
+ struct dhd_pub *dhd = dhd_get_pub(cur_if->dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ uint acs_tmo = apsta_params->acs_tmo;
+ uint32 etype = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+
+ if (wl_get_isam_status(cur_if, AP_CREATED)) {
+ if ((etype == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
+ (etype == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_INITIAL_ASSOC)) {
+ // Link up
+ wl_ext_mod_timer_pending(&cur_if->acs_timer, acs_tmo, 0);
+ }
+ else if ((etype == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
+ (etype == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_DEAUTH)) {
+ // Link down
+ wl_ext_mod_timer(&cur_if->acs_timer, 0, 0);
+ cur_if->escan->autochannel = 0;
+ }
+ else if ((etype == WLC_E_ASSOC_IND || etype == WLC_E_REASSOC_IND) &&
+ reason == DOT11_SC_SUCCESS) {
+ // external STA connected
+ wl_ext_mod_timer(&cur_if->acs_timer, 0, 0);
+ }
+ else if (etype == WLC_E_DISASSOC_IND ||
+ etype == WLC_E_DEAUTH_IND ||
+ (etype == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
+ // external STA disconnected
+ wl_ext_mod_timer_pending(&cur_if->acs_timer, acs_tmo, 0);
+ }
+ else if (etype == WLC_E_RESERVED && reason == ISAM_RC_AP_ACS) {
+ // acs_tmo expired
+ if (!wl_ext_assoclist_num(cur_if->dev) &&
+ !wl_ext_max_prio_if(apsta_params, cur_if)) {
+ wl_ext_acs_scan(apsta_params, cur_if);
+ wl_ext_mod_timer(&cur_if->acs_timer, acs_tmo, 0);
+ } else {
+ wl_ext_mod_timer(&cur_if->acs_timer, 0, 0);
+ }
+ }
+ else if (((etype == WLC_E_ESCAN_RESULT && status == WLC_E_STATUS_SUCCESS) ||
+ (etype == WLC_E_ESCAN_RESULT &&
+ (status == WLC_E_STATUS_ABORT || status == WLC_E_STATUS_NEWSCAN ||
+ status == WLC_E_STATUS_11HQUIET || status == WLC_E_STATUS_CS_ABORT ||
+ status == WLC_E_STATUS_NEWASSOC || status == WLC_E_STATUS_TIMEOUT)))) {
+ // scan complete
+ cur_if->escan->autochannel = 0;
+ if (!wl_ext_assoclist_num(cur_if->dev) &&
+ !wl_ext_max_prio_if(apsta_params, cur_if)) {
+ wl_ext_acs(apsta_params, cur_if);
+ } else {
+ wl_ext_mod_timer(&cur_if->acs_timer, 0, 0);
+ }
+ }
+ }
+}
+
+static void
+wl_acs_detach(struct wl_if_info *cur_if)
+{
+ IAPSTA_TRACE(cur_if->dev->name, "Enter\n");
+ del_timer_sync(&cur_if->acs_timer);
+ if (cur_if->escan) {
+ cur_if->escan = NULL;
+ }
+}
+
+static void
+wl_acs_attach(dhd_pub_t *dhd, struct wl_if_info *cur_if)
+{
+ IAPSTA_TRACE(cur_if->dev->name, "Enter\n");
+ cur_if->escan = dhd->escan;
+ init_timer_compat(&cur_if->acs_timer, wl_acs_timer, cur_if->dev);
+}
+#endif /* ACS_MONITOR */
+
+void
+wl_ext_iapsta_event(struct net_device *dev, void *argu,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_apsta_params *apsta_params = (struct wl_apsta_params *)argu;
+ struct wl_if_info *cur_if = NULL;
+#if defined(WLMESH) && defined(WL_ESCAN)
+ struct wl_if_info *tmp_if = NULL;
+ struct wl_if_info *mesh_if = NULL;
+ int i;
+#endif /* WLMESH && WL_ESCAN */
+ uint32 event_type = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+ uint16 flags = ntoh16(e->flags);
+
+ cur_if = wl_get_cur_if(dev);
+
+#if defined(WLMESH) && defined(WL_ESCAN)
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifmode == IMESH_MODE) {
+ mesh_if = tmp_if;
+ break;
+ }
+ }
+#endif /* WLMESH && WL_ESCAN */
+ if (!cur_if || !cur_if->dev) {
+ IAPSTA_DBG(dev->name, "ifidx %d is not ready\n", e->ifidx);
+ return;
+ }
+
+ if (cur_if->ifmode == ISTA_MODE || cur_if->ifmode == IGC_MODE) {
+ if (event_type == WLC_E_LINK) {
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+ WL_MSG(cur_if->ifname,
+ "[%c] Link down with %pM, %s(%d), reason %d\n",
+ cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
+ event_type, reason);
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, FALSE);
+#endif /* SET_CARRIER */
+ wl_clr_isam_status(cur_if, STA_CONNECTED);
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (mesh_if && apsta_params->macs)
+ wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
+#endif /* WLMESH && WL_ESCAN */
+ } else {
+ WL_MSG(cur_if->ifname, "[%c] Link UP with %pM\n",
+ cur_if->prefix, &e->addr);
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, TRUE, FALSE);
+#endif /* SET_CARRIER */
+ wl_set_isam_status(cur_if, STA_CONNECTED);
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (mesh_if && apsta_params->macs)
+ wl_mesh_update_master_info(apsta_params, mesh_if);
+#endif /* WLMESH && WL_ESCAN */
+ }
+ wl_clr_isam_status(cur_if, STA_CONNECTING);
+ wake_up_interruptible(&apsta_params->netif_change_event);
+#ifdef PROPTX_MAXCOUNT
+ wl_ext_update_wlfc_maxcount(apsta_params->dhd);
+#endif /* PROPTX_MAXCOUNT */
+ }
+ else if (event_type == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) {
+ WL_MSG(cur_if->ifname,
+ "connect failed event=%d, reason=%d, status=%d\n",
+ event_type, reason, status);
+ wl_clr_isam_status(cur_if, STA_CONNECTING);
+ wake_up_interruptible(&apsta_params->netif_change_event);
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (mesh_if && apsta_params->macs)
+ wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
+#endif /* WLMESH && WL_ESCAN */
+#ifdef PROPTX_MAXCOUNT
+ wl_ext_update_wlfc_maxcount(apsta_params->dhd);
+#endif /* PROPTX_MAXCOUNT */
+ }
+ else if (event_type == WLC_E_DEAUTH || event_type == WLC_E_DEAUTH_IND ||
+ event_type == WLC_E_DISASSOC || event_type == WLC_E_DISASSOC_IND) {
+ WL_MSG(cur_if->ifname, "[%c] Link down with %pM, %s(%d), reason %d\n",
+ cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
+ event_type, reason);
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, FALSE);
+#endif /* SET_CARRIER */
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (mesh_if && apsta_params->macs)
+ wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
+#endif /* WLMESH && WL_ESCAN */
+ }
+ }
+ else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IGO_MODE ||
+ cur_if->ifmode == IMESH_MODE) {
+ if ((event_type == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
+ (event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_INITIAL_ASSOC)) {
+ if (wl_get_isam_status(cur_if, AP_CREATING)) {
+ WL_MSG(cur_if->ifname, "[%c] Link up (etype=%d)\n",
+ cur_if->prefix, event_type);
+ wl_set_isam_status(cur_if, AP_CREATED);
+ wake_up_interruptible(&apsta_params->netif_change_event);
+ } else {
+ wl_set_isam_status(cur_if, AP_CREATED);
+ WL_MSG(cur_if->ifname, "[%c] Link up w/o creating? (etype=%d)\n",
+ cur_if->prefix, event_type);
+ }
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, TRUE, FALSE);
+#endif /* SET_CARRIER */
+#ifdef PROPTX_MAXCOUNT
+ wl_ext_update_wlfc_maxcount(apsta_params->dhd);
+#endif /* PROPTX_MAXCOUNT */
+ }
+ else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
+ (event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+ reason == WLC_E_REASON_DEAUTH)) {
+ wl_clr_isam_status(cur_if, AP_CREATED);
+ WL_MSG(cur_if->ifname, "[%c] Link down, reason=%d\n",
+ cur_if->prefix, reason);
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, FALSE);
+#endif /* SET_CARRIER */
+#ifdef PROPTX_MAXCOUNT
+ wl_ext_update_wlfc_maxcount(apsta_params->dhd);
+#endif /* PROPTX_MAXCOUNT */
+ }
+ else if ((event_type == WLC_E_ASSOC_IND || event_type == WLC_E_REASSOC_IND) &&
+ reason == DOT11_SC_SUCCESS) {
+ WL_MSG(cur_if->ifname, "[%c] connected device %pM\n",
+ cur_if->prefix, &e->addr);
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+ }
+ else if (event_type == WLC_E_DISASSOC_IND ||
+ event_type == WLC_E_DEAUTH_IND ||
+ (event_type == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
+ WL_MSG_RLMT(cur_if->ifname, &e->addr, ETHER_ADDR_LEN,
+ "[%c] disconnected device %pM, %s(%d), reason=%d\n",
+ cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
+ event_type, reason);
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+ }
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (cur_if->ifmode == IMESH_MODE && apsta_params->macs)
+ wl_mesh_event_handler(apsta_params, cur_if, e, data);
+#endif /* WLMESH && WL_ESCAN */
+ }
+
+#ifdef TPUT_MONITOR
+ if (apsta_params->dhd->conf->tput_monitor_ms)
+ wl_tput_monitor_handler(apsta_params, cur_if, e, data);
+#endif /* TPUT_MONITOR */
+
+#ifdef ACS_MONITOR
+ if ((apsta_params->acs & ACS_DRV_BIT) && apsta_params->acs_tmo)
+ wl_acs_handler(cur_if, e, data);
+#endif /* ACS_MONITOR */
+#ifdef EAPOL_RESEND
+ wl_resend_eapol_handler(cur_if, e, data);
+#endif /* EAPOL_RESEND */
+
+ return;
+}
+
+static int
+wl_ext_parse_config(struct wl_if_info *cur_if, char *command, char **pick_next)
+{
+ char *pch, *pick_tmp;
+ char name[20], data[100];
+ int i, j, len;
+ char *ifname_head = NULL;
+
+ typedef struct config_map_t {
+ char name[20];
+ char *head;
+ char *tail;
+ } config_map_t;
+
+ config_map_t config_map [] = {
+ {" ifname ", NULL, NULL},
+ {" ssid ", NULL, NULL},
+ {" bssid ", NULL, NULL},
+ {" bgnmode ", NULL, NULL},
+ {" hidden ", NULL, NULL},
+ {" maxassoc ", NULL, NULL},
+ {" chan ", NULL, NULL},
+ {" amode ", NULL, NULL},
+ {" emode ", NULL, NULL},
+ {" key ", NULL, NULL},
+ };
+ config_map_t *row, *row_prev;
+
+ pick_tmp = command;
+
+ // reset head and tail
+ for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+ row = &config_map[i];
+ row->head = NULL;
+ row->tail = pick_tmp + strlen(pick_tmp);
+ }
+
+ // pick head
+ for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+ row = &config_map[i];
+ pch = strstr(pick_tmp, row->name);
+ if (pch) {
+ row->head = pch;
+ }
+ }
+
+ // sort by head
+ for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]) - 1; i++) {
+ row_prev = &config_map[i];
+ for (j = i+1; j < sizeof(config_map)/sizeof(config_map[0]); j++) {
+ row = &config_map[j];
+ if (row->head < row_prev->head) {
+ strcpy(name, row_prev->name);
+ strcpy(row_prev->name, row->name);
+ strcpy(row->name, name);
+ pch = row_prev->head;
+ row_prev->head = row->head;
+ row->head = pch;
+ }
+ }
+ }
+
+ // pick tail
+ for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]) - 1; i++) {
+ row_prev = &config_map[i];
+ row = &config_map[i+1];
+ if (row_prev->head) {
+ row_prev->tail = row->head;
+ }
+ }
+
+ // remove name from head
+ for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+ row = &config_map[i];
+ if (row->head) {
+ if (!strcmp(row->name, " ifname ")) {
+ ifname_head = row->head + 1;
+ break;
+ }
+ row->head += strlen(row->name);
+ }
+ }
+
+ for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+ row = &config_map[i];
+ if (row->head) {
+ memset(data, 0, sizeof(data));
+ if (row->tail && row->tail > row->head) {
+ strncpy(data, row->head, row->tail-row->head);
+ } else {
+ strcpy(data, row->head);
+ }
+ pick_tmp = data;
+
+ if (!strcmp(row->name, " ifname ")) {
+ break;
+ } else if (!strcmp(row->name, " ssid ")) {
+ len = strlen(pick_tmp);
+ memset(cur_if->ssid, 0, sizeof(cur_if->ssid));
+ if (pick_tmp[0] == '"' && pick_tmp[len-1] == '"')
+ strncpy(cur_if->ssid, &pick_tmp[1], len-2);
+ else
+ strcpy(cur_if->ssid, pick_tmp);
+ } else if (!strcmp(row->name, " bssid ")) {
+ pch = bcmstrtok(&pick_tmp, ": ", 0);
+ for (j=0; j<6 && pch; j++) {
+ ((u8 *)&cur_if->bssid)[j] = (int)simple_strtol(pch, NULL, 16);
+ pch = bcmstrtok(&pick_tmp, ": ", 0);
+ }
+ } else if (!strcmp(row->name, " bgnmode ")) {
+ if (!strcmp(pick_tmp, "b"))
+ cur_if->bgnmode = IEEE80211B;
+ else if (!strcmp(pick_tmp, "g"))
+ cur_if->bgnmode = IEEE80211G;
+ else if (!strcmp(pick_tmp, "bg"))
+ cur_if->bgnmode = IEEE80211BG;
+ else if (!strcmp(pick_tmp, "bgn"))
+ cur_if->bgnmode = IEEE80211BGN;
+ else if (!strcmp(pick_tmp, "bgnac"))
+ cur_if->bgnmode = IEEE80211BGNAC;
+ else {
+ IAPSTA_ERROR(cur_if->dev->name, "bgnmode [b|g|bg|bgn|bgnac]\n");
+ return -1;
+ }
+ } else if (!strcmp(row->name, " hidden ")) {
+ if (!strcmp(pick_tmp, "n"))
+ cur_if->hidden = 0;
+ else if (!strcmp(pick_tmp, "y"))
+ cur_if->hidden = 1;
+ else {
+ IAPSTA_ERROR(cur_if->dev->name, "hidden [y|n]\n");
+ return -1;
+ }
+ } else if (!strcmp(row->name, " maxassoc ")) {
+ cur_if->maxassoc = (int)simple_strtol(pick_tmp, NULL, 10);
+ } else if (!strcmp(row->name, " chan ")) {
+ cur_if->channel = (int)simple_strtol(pick_tmp, NULL, 10);
+ } else if (!strcmp(row->name, " amode ")) {
+ if (!strcmp(pick_tmp, "open"))
+ cur_if->amode = AUTH_OPEN;
+ else if (!strcmp(pick_tmp, "shared"))
+ cur_if->amode = AUTH_SHARED;
+ else if (!strcmp(pick_tmp, "wpapsk"))
+ cur_if->amode = AUTH_WPAPSK;
+ else if (!strcmp(pick_tmp, "wpa2psk"))
+ cur_if->amode = AUTH_WPA2PSK;
+ else if (!strcmp(pick_tmp, "wpawpa2psk"))
+ cur_if->amode = AUTH_WPAWPA2PSK;
+ else if (!strcmp(pick_tmp, "sae"))
+ cur_if->amode = AUTH_SAE;
+ else {
+ IAPSTA_ERROR(cur_if->dev->name, "amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n");
+ return -1;
+ }
+ } else if (!strcmp(row->name, " emode ")) {
+ if (!strcmp(pick_tmp, "none"))
+ cur_if->emode = ENC_NONE;
+ else if (!strcmp(pick_tmp, "wep"))
+ cur_if->emode = ENC_WEP;
+ else if (!strcmp(pick_tmp, "tkip"))
+ cur_if->emode = ENC_TKIP;
+ else if (!strcmp(pick_tmp, "aes"))
+ cur_if->emode = ENC_AES;
+ else if (!strcmp(pick_tmp, "tkipaes"))
+ cur_if->emode = ENC_TKIPAES;
+ else {
+ IAPSTA_ERROR(cur_if->dev->name, "emode [none|wep|tkip|aes|tkipaes]\n");
+ return -1;
+ }
+ } else if (!strcmp(row->name, " key ")) {
+ len = strlen(pick_tmp);
+ memset(cur_if->key, 0, sizeof(cur_if->key));
+ if (pick_tmp[0] == '"' && pick_tmp[len-1] == '"')
+ strncpy(cur_if->key, &pick_tmp[1], len-2);
+ else
+ strcpy(cur_if->key, pick_tmp);
+ }
+ }
+ }
+
+ *pick_next = ifname_head;
+ return 0;
+}
+
+static void
+wl_ext_iapsta_preinit(struct net_device *dev, struct wl_apsta_params *apsta_params)
+{
+ struct dhd_pub *dhd;
+ apstamode_t apstamode = apsta_params->apstamode;
+ struct wl_if_info *cur_if;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 val = 0;
+ int i;
+
+ dhd = dhd_get_pub(dev);
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (i >= 1 && !strlen(cur_if->ifname))
+ snprintf(cur_if->ifname, IFNAMSIZ, "wlan%d", i);
+ if (cur_if->ifmode == ISTA_MODE) {
+ cur_if->channel = 0;
+ cur_if->maxassoc = -1;
+ cur_if->prio = PRIO_STA;
+ cur_if->vsdb = TRUE;
+ cur_if->prefix = 'S';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
+ } else if (cur_if->ifmode == IAP_MODE) {
+ cur_if->channel = 1;
+ cur_if->maxassoc = -1;
+ cur_if->prio = PRIO_AP;
+ cur_if->vsdb = FALSE;
+ cur_if->prefix = 'A';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
+#ifdef WLMESH
+ } else if (cur_if->ifmode == IMESH_MODE) {
+ cur_if->channel = 1;
+ cur_if->maxassoc = -1;
+ cur_if->prio = PRIO_MESH;
+ cur_if->vsdb = FALSE;
+ cur_if->prefix = 'M';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
+#ifdef WL_ESCAN
+ if (i == 0 && apsta_params->macs)
+ wl_mesh_escan_attach(dhd, cur_if);
+#endif /* WL_ESCAN */
+#endif /* WLMESH */
+ }
+ }
+
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ if (apstamode == IDUALAP_MODE)
+ apsta_params->rsdb = -1;
+ else if (apstamode == ISTAAPAP_MODE)
+ apsta_params->rsdb = 0;
+ if (apstamode == ISTAAPAP_MODE || apstamode == IDUALAP_MODE ||
+ apstamode == IMESHONLY_MODE || apstamode == ISTAMESH_MODE ||
+ apstamode == IMESHAP_MODE || apstamode == ISTAAPMESH_MODE ||
+ apstamode == IMESHAPAP_MODE) {
+ wl_config_t rsdb_mode_cfg = {0, 0};
+ rsdb_mode_cfg.config = apsta_params->rsdb;
+ IAPSTA_INFO(dev->name, "set rsdb_mode %d\n", rsdb_mode_cfg.config);
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setbuf(dev, "rsdb_mode", &rsdb_mode_cfg,
+ sizeof(rsdb_mode_cfg), iovar_buf, sizeof(iovar_buf), NULL);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ }
+ } else {
+ apsta_params->rsdb = 0;
+ }
+
+ if (apstamode == ISTAONLY_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+ // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ } else if (apstamode == IAPONLY_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF SoftAP is enabled, disable arpoe */
+ dhd_arp_offload_set(dhd, 0);
+ dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "apsta", 0);
+ val = 1;
+ wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+ if (!(FW_SUPPORTED(dhd, rsdb)) && !disable_proptx) {
+ bool enabled;
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (!enabled) {
+ dhd_wlfc_init(dhd);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ }
+ }
+#endif /* BCMSDIO */
+#endif /* PROP_TXSTATUS_VSDB */
+ }
+ else if (apstamode == ISTAAP_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "apsta", 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ }
+ else if (apstamode == ISTAGO_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "apsta", 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ }
+ else if (apstamode == ISTASTA_MODE) {
+ }
+ else if (apstamode == IDUALAP_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ /* IF SoftAP is enabled, disable arpoe or wlan1 will ping fail */
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF SoftAP is enabled, disable arpoe */
+ dhd_arp_offload_set(dhd, 0);
+ dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "mbcn", 1);
+ wl_ext_iovar_setint(dev, "apsta", 0);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ val = 1;
+ wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
+ }
+ else if (apstamode == ISTAAPAP_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "mbss", 1);
+ wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ }
+#ifdef WLMESH
+ else if (apstamode == IMESHONLY_MODE || apstamode == ISTAMESH_MODE ||
+ apstamode == IMESHAP_MODE || apstamode == ISTAAPMESH_MODE ||
+ apstamode == IMESHAPAP_MODE) {
+ int pm = 0;
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ if (apstamode == IMESHONLY_MODE)
+ wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ else
+ wl_ext_iovar_setint(dev, "mbcn", 1);
+ wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ }
+#endif /* WLMESH */
+
+ wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+ apsta_params->init = TRUE;
+
+ WL_MSG(dev->name, "apstamode=%d\n", apstamode);
+}
+
+static int
+wl_ext_disable_iface(struct net_device *dev, char *ifname)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int i;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wlc_ssid_t ssid = { 0, {0} };
+ scb_val_t scbval;
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ apstamode_t apstamode = apsta_params->apstamode;
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
+ cur_if = tmp_if;
+ break;
+ }
+ }
+ if (!cur_if) {
+ IAPSTA_ERROR(dev->name, "wrong ifname=%s or dev not ready\n", ifname);
+ return -1;
+ }
+
+ mutex_lock(&apsta_params->usr_sync);
+ WL_MSG(ifname, "[%c] Disabling...\n", cur_if->prefix);
+
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
+ wl_ext_add_remove_pm_enable_work(dev, FALSE);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ // deauthenticate all STA first
+ memcpy(scbval.ea.octet, &ether_bcast, ETHER_ADDR_LEN);
+ wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1);
+ }
+
+ if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); // reset ssid
+ wl_ext_iovar_setint(dev, "mpc", 1);
+ } else if ((apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) &&
+ cur_if->ifmode == IAP_MODE) {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(0);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_iovar_setint(dev, "mpc", 1);
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF SoftAP is disabled, enable arpoe back for STA mode. */
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ dhd_arp_offload_enable(dhd, TRUE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+ if (dhd->conf->disable_proptx!=0) {
+ bool enabled;
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled) {
+ dhd_wlfc_deinit(dhd);
+ }
+ }
+#endif /* BCMSDIO */
+#endif /* PROP_TXSTATUS_VSDB */
+ }
+ else if (apstamode == IDUALAP_MODE || apstamode == ISTAAPAP_MODE) {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(0);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+#ifdef WLMESH
+ } else if (apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE) {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(0);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (cur_if->ifmode == IMESH_MODE) {
+ int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifmode == ISTA_MODE) {
+ wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
+ &scan_assoc_time, sizeof(scan_assoc_time), 1);
+ }
+ }
+ }
+#endif /* WLMESH */
+ }
+
+ wl_clr_isam_status(cur_if, AP_CREATED);
+
+ WL_MSG(ifname, "[%c] Exit\n", cur_if->prefix);
+ mutex_unlock(&apsta_params->usr_sync);
+ return 0;
+}
+
+static int
+wl_ext_enable_iface(struct net_device *dev, char *ifname, int wait_up, bool lock)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int i, ret = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wlc_ssid_t ssid = { 0, {0} };
+ chanspec_t fw_chspec;
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ apstamode_t apstamode = apsta_params->apstamode;
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+ uint16 cur_chan;
+ struct wl_conn_info conn_info;
+ u32 timeout;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
+ cur_if = tmp_if;
+ break;
+ }
+ }
+ if (!cur_if) {
+ IAPSTA_ERROR(dev->name, "wrong ifname=%s or dev not ready\n", ifname);
+ return -1;
+ }
+
+ if (lock)
+ mutex_lock(&apsta_params->usr_sync);
+
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_set_isam_status(cur_if, STA_CONNECTING);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_set_isam_status(cur_if, AP_CREATING);
+ }
+
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+ WL_MSG(ifname, "[%c] Enabling...\n", cur_if->prefix);
+
+ wl_ext_wait_other_enabling(apsta_params, cur_if);
+
+ if (wl_ext_master_if(cur_if) && apsta_params->acs) {
+ uint16 chan_2g, chan_5g;
+ uint auto_band;
+ auto_band = WL_GET_BAND(cur_if->channel);
+ wl_ext_get_default_chan(cur_if->dev, &chan_2g, &chan_5g, TRUE);
+ if ((chan_2g && auto_band == WLC_BAND_2G) ||
+ (chan_5g && auto_band == WLC_BAND_5G)) {
+ cur_if->channel = wl_ext_autochannel(cur_if->dev, apsta_params->acs,
+ auto_band);
+ } else {
+ IAPSTA_ERROR(ifname, "invalid channel\n");
+ ret = -1;
+ goto exit;
+ }
+ }
+
+ wl_ext_move_cur_channel(apsta_params, cur_if);
+
+ if (wl_ext_master_if(cur_if) && !cur_if->channel) {
+ IAPSTA_ERROR(ifname, "skip channel 0\n");
+ ret = -1;
+ goto exit;
+ }
+
+ cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ if (cur_chan) {
+ IAPSTA_INFO(cur_if->ifname, "Associated\n");
+ if (cur_chan != cur_if->channel) {
+ wl_ext_trigger_csa(apsta_params, cur_if);
+ }
+ goto exit;
+ }
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_clr_isam_status(cur_if, STA_CONNECTED);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_clr_isam_status(cur_if, AP_CREATED);
+ }
+
+ wl_ext_move_other_channel(apsta_params, cur_if);
+
+ if (cur_if->ifidx > 0) {
+ wl_ext_iovar_setbuf(cur_if->dev, "cur_etheraddr", (u8 *)cur_if->dev->dev_addr,
+ ETHER_ADDR_LEN, iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ }
+
+ // set ssid for AP
+ ssid.SSID_len = strlen(cur_if->ssid);
+ memcpy(ssid.SSID, cur_if->ssid, ssid.SSID_len);
+ if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ } else if (apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) {
+ wl_ext_iovar_setbuf_bsscfg(cur_if->dev, "ssid", &ssid, sizeof(ssid),
+ iovar_buf, WLC_IOCTL_SMLEN, cur_if->bssidx, NULL);
+ }
+ }
+
+ if (wl_ext_master_if(cur_if)) {
+ wl_ext_set_bgnmode(cur_if);
+ if (!cur_if->channel) {
+ cur_if->channel = 1;
+ }
+ ret = wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver,
+ cur_if->channel, &fw_chspec);
+ if (ret)
+ goto exit;
+ }
+
+ wl_ext_set_amode(cur_if);
+ wl_ext_set_emode(apsta_params, cur_if);
+
+ if (cur_if->ifmode == ISTA_MODE) {
+ conn_info.bssidx = cur_if->bssidx;
+ conn_info.channel = cur_if->channel;
+ memcpy(conn_info.ssid.SSID, cur_if->ssid, strlen(cur_if->ssid));
+ conn_info.ssid.SSID_len = strlen(cur_if->ssid);
+ memcpy(&conn_info.bssid, &cur_if->bssid, ETHER_ADDR_LEN);
+ }
+ if (cur_if->ifmode == IAP_MODE) {
+ if (cur_if->maxassoc >= 0)
+ wl_ext_iovar_setint(dev, "maxassoc", cur_if->maxassoc);
+ // terence: fix me, hidden does not work in dualAP mode
+ if (cur_if->hidden > 0) {
+ wl_ext_ioctl(cur_if->dev, WLC_SET_CLOSED, &cur_if->hidden,
+ sizeof(cur_if->hidden), 1);
+ WL_MSG(ifname, "[%c] Broadcast SSID: %s\n",
+ cur_if->prefix, cur_if->hidden ? "OFF":"ON");
+ }
+ }
+
+ if (apstamode == ISTAONLY_MODE) {
+ wl_ext_connect(cur_if->dev, &conn_info);
+ } else if (apstamode == IAPONLY_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ } else if (apstamode == ISTAAP_MODE || apstamode == ISTAGO_MODE) {
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_connect(cur_if->dev, &conn_info);
+ } else {
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+ } else {
+ bss_setbuf.cfg = htod32(cur_if->bssidx);
+ bss_setbuf.val = htod32(1);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf,
+ sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ }
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF SoftAP is enabled, disable arpoe */
+ dhd_arp_offload_set(dhd, 0);
+ dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+ if (!(FW_SUPPORTED(dhd, rsdb)) && !disable_proptx) {
+ bool enabled;
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (!enabled) {
+ dhd_wlfc_init(dhd);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ }
+ }
+#endif /* BCMSDIO */
+#endif /* PROP_TXSTATUS_VSDB */
+ }
+ }
+ else if (apstamode == IDUALAP_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+ } else if (apstamode == ISTAAPAP_MODE) {
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_connect(cur_if->dev, &conn_info);
+ } else if (cur_if->ifmode == IAP_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+ } else {
+ IAPSTA_ERROR(cur_if->ifname, "wrong ifmode %d\n", cur_if->ifmode);
+ }
+#ifdef WLMESH
+ } else if (apstamode == IMESHONLY_MODE ||
+ apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE) {
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_connect(cur_if->dev, &conn_info);
+ } else if (cur_if->ifmode == IAP_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+ } else if (cur_if->ifmode == IMESH_MODE) {
+ struct wl_join_params join_params;
+ // need to up before setting ssid
+ memset(&join_params, 0, sizeof(join_params));
+ join_params.ssid.SSID_len = strlen(cur_if->ssid);
+ memcpy((void *)join_params.ssid.SSID, cur_if->ssid, strlen(cur_if->ssid));
+ join_params.params.chanspec_list[0] = fw_chspec;
+ join_params.params.chanspec_num = 1;
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &join_params, sizeof(join_params), 1);
+ } else {
+ IAPSTA_ERROR(cur_if->ifname, "wrong ifmode %d\n", cur_if->ifmode);
+ }
+#endif /* WLMESH */
+ }
+
+ if (wait_up) {
+ OSL_SLEEP(wait_up);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ timeout = wait_event_interruptible_timeout(apsta_params->netif_change_event,
+ wl_get_isam_status(cur_if, AP_CREATED),
+ msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ if (timeout <= 0 || !wl_get_isam_status(cur_if, AP_CREATED)) {
+ if (lock)
+ mutex_unlock(&apsta_params->usr_sync);
+ wl_ext_disable_iface(dev, cur_if->ifname);
+ WL_MSG(ifname, "[%c] failed to enable with SSID: \"%s\"\n",
+ cur_if->prefix, cur_if->ssid);
+ ret = -1;
+ }
+ }
+
+ if (wl_get_isam_status(cur_if, AP_CREATED) &&
+ (cur_if->ifmode == IMESH_MODE || cur_if->ifmode == IAP_MODE) &&
+ (apstamode == ISTAAP_MODE || apstamode == ISTAAPAP_MODE ||
+ apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE)) {
+ wl_ext_set_scan_time(cur_if->dev, 80,
+ WLC_GET_SCAN_CHANNEL_TIME, WLC_SET_SCAN_CHANNEL_TIME);
+ }
+
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+
+exit:
+ if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_clr_isam_status(cur_if, AP_CREATING);
+ }
+ WL_MSG(ifname, "[%c] Exit ret=%d\n", cur_if->prefix, ret);
+ if (lock)
+ mutex_unlock(&apsta_params->usr_sync);
+ return ret;
+}
+
+int
+wl_ext_isam_status(struct net_device *dev, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int i;
+ struct wl_if_info *tmp_if;
+ uint16 chan = 0;
+ wlc_ssid_t ssid = { 0, {0} };
+ struct ether_addr bssid;
+ scb_val_t scb_val;
+ char sec[64];
+ u32 chanspec = 0;
+ char *dump_buf = NULL;
+ int dump_len = WLC_IOCTL_MEDLEN;
+ int dump_written = 0;
+
+ if (command || android_msg_level & ANDROID_INFO_LEVEL) {
+ if (command) {
+ dump_buf = command;
+ dump_len = total_len;
+ } else {
+ dump_buf = kmalloc(dump_len, GFP_KERNEL);
+ if (dump_buf == NULL) {
+ IAPSTA_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ dump_len);
+ return -1;
+ }
+ }
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ "apstamode=%d", apsta_params->apstamode);
+ for (i=0; i<MAX_IF_NUM; i++) {
+ memset(&ssid, 0, sizeof(ssid));
+ memset(&bssid, 0, sizeof(bssid));
+ memset(&scb_val, 0, sizeof(scb_val));
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev) {
+ chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (chan) {
+ wl_ext_ioctl(tmp_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
+ wldev_ioctl(tmp_if->dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ wldev_ioctl(tmp_if->dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t), 0);
+ chanspec = wl_ext_get_chanspec(apsta_params, tmp_if->dev);
+ wl_ext_get_sec(tmp_if->dev, tmp_if->ifmode, sec, sizeof(sec), FALSE);
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ "\n" DHD_LOG_PREFIXS "[%s-%c]: bssid=%pM, chan=%3d(0x%x %sMHz), "
+ "rssi=%3d, sec=%-15s, SSID=\"%s\"",
+ tmp_if->ifname, tmp_if->prefix, &bssid, chan, chanspec,
+ CHSPEC_IS20(chanspec)?"20":
+ CHSPEC_IS40(chanspec)?"40":
+ CHSPEC_IS80(chanspec)?"80":"160",
+ dtoh32(scb_val.val), sec, ssid.SSID);
+ if (tmp_if->ifmode == IAP_MODE) {
+ dump_written += snprintf(dump_buf+dump_written, dump_len, "\n");
+ dump_written += wl_ext_assoclist(tmp_if->dev, NULL,
+ dump_buf+dump_written, dump_len-dump_written);
+ }
+#ifdef WLMESH
+ else if (tmp_if->ifmode == IMESH_MODE) {
+ dump_written += snprintf(dump_buf+dump_written, dump_len, "\n");
+ dump_written += wl_ext_mesh_peer_status(tmp_if->dev, NULL,
+ dump_buf+dump_written, dump_len-dump_written);
+ }
+#endif /* WLMESH */
+ } else {
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ "\n" DHD_LOG_PREFIXS "[%s-%c]:", tmp_if->ifname, tmp_if->prefix);
+ }
+ }
+ }
+ IAPSTA_INFO(dev->name, "%s\n", dump_buf);
+ }
+
+ if (!command && dump_buf)
+ kfree(dump_buf);
+ return dump_written;
+}
+
+int
+wl_ext_isam_param(struct net_device *dev, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int ret = -1;
+ char *pick_tmp, *data, *param;
+ int bytes_written=-1;
+
+ IAPSTA_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // pick isam_param
+ param = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
+ while (param != NULL) {
+ data = bcmstrtok(&pick_tmp, " ", 0); // pick data
+ if (!strcmp(param, "acs")) {
+ if (data) {
+ apsta_params->acs = simple_strtol(data, NULL, 0);
+ ret = 0;
+ } else {
+ bytes_written = snprintf(command, total_len, "%d", apsta_params->acs);
+ ret = bytes_written;
+ goto exit;
+ }
+ }
+#ifdef ACS_MONITOR
+ else if (!strcmp(param, "acs_tmo")) {
+ if (data) {
+ struct wl_if_info *cur_if = NULL;
+ uint acs_tmo;
+ cur_if = wl_get_cur_if(dev);
+ if (!cur_if)
+ goto exit;
+ acs_tmo = simple_strtol(data, NULL, 0);
+ if (apsta_params->acs_tmo != acs_tmo) {
+ apsta_params->acs_tmo = acs_tmo;
+ WL_MSG(dev->name, "acs_timer reset to %d\n", acs_tmo);
+ wl_ext_mod_timer(&cur_if->acs_timer, acs_tmo, 0);
+ }
+ ret = 0;
+ } else {
+ bytes_written = snprintf(command, total_len, "%d", apsta_params->acs_tmo);
+ ret = bytes_written;
+ goto exit;
+ }
+ }
+#endif /* ACS_MONITOR */
+ param = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
+ }
+
+exit:
+ return ret;
+}
+
+int
+wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len)
+{
+ int ret = 0;
+ char *pch, *pick_tmp, *param;
+ char ifname[IFNAMSIZ+1];
+
+ IAPSTA_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ while (param != NULL) {
+ if (!strcmp(param, "ifname")) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch) {
+ strcpy(ifname, pch);
+ ret = wl_ext_disable_iface(dev, ifname);
+ if (ret)
+ return ret;
+ }
+ else {
+ IAPSTA_ERROR(dev->name, "ifname [wlanX]\n");
+ return -1;
+ }
+ }
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ }
+
+ return ret;
+}
+
+int
+wl_ext_iapsta_enable(struct net_device *dev, char *command, int total_len)
+{
+ int ret = 0;
+ char *pch, *pick_tmp, *param;
+ char ifname[IFNAMSIZ+1];
+
+ IAPSTA_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_enable
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ while (param != NULL) {
+ if (!strcmp(param, "ifname")) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch) {
+ strcpy(ifname, pch);
+ ret = wl_ext_enable_iface(dev, ifname, 0, TRUE);
+ if (ret)
+ return ret;
+ } else {
+ IAPSTA_ERROR(dev->name, "ifname [wlanX]\n");
+ return -1;
+ }
+ }
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ }
+
+ return ret;
+}
+
+int
+wl_ext_iapsta_config(struct net_device *dev, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int ret=0, i;
+ char *pch, *pch2, *pick_tmp, *pick_next=NULL, *param;
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ char ifname[IFNAMSIZ+1];
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+
+ if (!apsta_params->init) {
+ IAPSTA_ERROR(dev->name, "please init first\n");
+ return -1;
+ }
+
+ IAPSTA_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_config
+
+ mutex_lock(&apsta_params->usr_sync);
+
+ while (pick_tmp != NULL) {
+ memset(ifname, 0, IFNAMSIZ+1);
+ if (!strncmp(pick_tmp, "ifname ", strlen("ifname "))) {
+ pch = pick_tmp + strlen("ifname ");
+ pch2 = strchr(pch, ' ');
+ if (pch && pch2) {
+ strncpy(ifname, pch, pch2-pch);
+ } else {
+ IAPSTA_ERROR(dev->name, "ifname [wlanX]\n");
+ ret = -1;
+ break;
+ }
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
+ cur_if = tmp_if;
+ break;
+ }
+ }
+ if (!cur_if) {
+ IAPSTA_ERROR(dev->name, "wrong ifname=%s in apstamode=%d\n",
+ ifname, apsta_params->apstamode);
+ ret = -1;
+ break;
+ }
+ ret = wl_ext_parse_config(cur_if, pick_tmp, &pick_next);
+ if (ret)
+ break;
+ pick_tmp = pick_next;
+ } else {
+ IAPSTA_ERROR(dev->name, "first arg must be ifname\n");
+ ret = -1;
+ break;
+ }
+
+ }
+
+ mutex_unlock(&apsta_params->usr_sync);
+
+ return ret;
+}
+
+int
+wl_ext_isam_init(struct net_device *dev, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ char *pch, *pick_tmp, *pick_tmp2, *param;
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int i;
+
+ if (apsta_params->init) {
+ IAPSTA_ERROR(dev->name, "don't init twice\n");
+ return -1;
+ }
+ IAPSTA_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_init
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ while (param != NULL) {
+ pick_tmp2 = bcmstrtok(&pick_tmp, " ", 0);
+ if (!pick_tmp2) {
+ IAPSTA_ERROR(dev->name, "wrong param %s\n", param);
+ return -1;
+ }
+ if (!strcmp(param, "mode")) {
+ pch = NULL;
+ if (!strcmp(pick_tmp2, "sta")) {
+ apsta_params->apstamode = ISTAONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "ap")) {
+ apsta_params->apstamode = IAPONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-ap")) {
+ apsta_params->apstamode = ISTAAP_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-sta")) {
+ apsta_params->apstamode = ISTASTA_MODE;
+ apsta_params->vsdb = TRUE;
+ } else if (!strcmp(pick_tmp2, "ap-ap")) {
+ apsta_params->apstamode = IDUALAP_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-ap-ap")) {
+ apsta_params->apstamode = ISTAAPAP_MODE;
+ } else if (!strcmp(pick_tmp2, "apsta")) {
+ apsta_params->apstamode = ISTAAP_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+ } else if (!strcmp(pick_tmp2, "dualap")) {
+ apsta_params->apstamode = IDUALAP_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+ apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-go") ||
+ !strcmp(pick_tmp2, "gosta")) {
+ if (!FW_SUPPORTED(dhd, p2p)) {
+ return -1;
+ }
+ apsta_params->apstamode = ISTAGO_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+#ifdef WLMESH
+ } else if (!strcmp(pick_tmp2, "mesh")) {
+ apsta_params->apstamode = IMESHONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-mesh")) {
+ apsta_params->apstamode = ISTAMESH_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-ap-mesh")) {
+ apsta_params->apstamode = ISTAAPMESH_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap")) {
+ apsta_params->apstamode = IMESHAP_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap-ap")) {
+ apsta_params->apstamode = IMESHAPAP_MODE;
+#endif /* WLMESH */
+ } else {
+ IAPSTA_ERROR(dev->name, "mode [sta|ap|sta-ap|ap-ap]\n");
+ return -1;
+ }
+ pch = bcmstrtok(&pick_tmp2, " -", 0);
+ for (i=0; i<MAX_IF_NUM && pch; i++) {
+ if (!strcmp(pch, "sta"))
+ apsta_params->if_info[i].ifmode = ISTA_MODE;
+ else if (!strcmp(pch, "ap"))
+ apsta_params->if_info[i].ifmode = IAP_MODE;
+#ifdef WLMESH
+ else if (!strcmp(pch, "mesh")) {
+ if (dhd->conf->fw_type != FW_TYPE_MESH) {
+ IAPSTA_ERROR(dev->name, "wrong fw type\n");
+ return -1;
+ }
+ apsta_params->if_info[i].ifmode = IMESH_MODE;
+ }
+#endif /* WLMESH */
+ pch = bcmstrtok(&pick_tmp2, " -", 0);
+ }
+ }
+ else if (!strcmp(param, "rsdb")) {
+ apsta_params->rsdb = (int)simple_strtol(pick_tmp2, NULL, 0);
+ } else if (!strcmp(param, "vsdb")) {
+ if (!strcmp(pick_tmp2, "y")) {
+ apsta_params->vsdb = TRUE;
+ } else if (!strcmp(pick_tmp2, "n")) {
+ apsta_params->vsdb = FALSE;
+ } else {
+ IAPSTA_ERROR(dev->name, "vsdb [y|n]\n");
+ return -1;
+ }
+ } else if (!strcmp(param, "csa")) {
+ apsta_params->csa = (int)simple_strtol(pick_tmp2, NULL, 0);
+ } else if (!strcmp(param, "acs")) {
+ apsta_params->acs = (int)simple_strtol(pick_tmp2, NULL, 0);
+#if defined(WLMESH) && defined(WL_ESCAN)
+ } else if (!strcmp(param, "macs")) {
+ apsta_params->macs = (int)simple_strtol(pick_tmp2, NULL, 0);
+#endif /* WLMESH && WL_ESCAN */
+ } else if (!strcmp(param, "ifname")) {
+ pch = NULL;
+ pch = bcmstrtok(&pick_tmp2, " -", 0);
+ for (i=0; i<MAX_IF_NUM && pch; i++) {
+ strcpy(apsta_params->if_info[i].ifname, pch);
+ pch = bcmstrtok(&pick_tmp2, " -", 0);
+ }
+ } else if (!strcmp(param, "vifname")) {
+ strcpy(apsta_params->if_info[IF_VIF].ifname, pick_tmp2);
+ }
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ }
+
+ if (apsta_params->apstamode == 0) {
+ IAPSTA_ERROR(dev->name, "mode [sta|ap|sta-ap|ap-ap]\n");
+ return -1;
+ }
+
+ wl_ext_iapsta_preinit(dev, apsta_params);
+#ifndef WL_STATIC_IF
+ wl_ext_iapsta_intf_add(dev, apsta_params);
+#endif /* WL_STATIC_IF */
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_alive_preinit(struct net_device *dev)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+
+ if (apsta_params->init == TRUE) {
+ IAPSTA_ERROR(dev->name, "don't init twice\n");
+ return -1;
+ }
+
+ IAPSTA_TRACE(dev->name, "Enter\n");
+
+ apsta_params->init = TRUE;
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_alive_postinit(struct net_device *dev)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ s32 apsta = 0, ap = 0;
+ struct wl_if_info *cur_if;
+ int i;
+
+ wl_ext_iovar_getint(dev, "apsta", &apsta);
+ wl_ext_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap), 0);
+ if (apsta == 1 || ap == 0) {
+ apsta_params->apstamode = ISTAONLY_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ op_mode = DHD_FLAG_STA_MODE;
+ } else {
+ apsta_params->apstamode = IAPONLY_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+ op_mode = DHD_FLAG_HOSTAP_MODE;
+ }
+ // fix me: how to check it's ISTAAP_MODE or IDUALAP_MODE?
+
+ wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+ WL_MSG(dev->name, "apstamode=%d\n", apsta_params->apstamode);
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (i == 1 && !strlen(cur_if->ifname))
+ strcpy(cur_if->ifname, "wlan1");
+ if (i == 2 && !strlen(cur_if->ifname))
+ strcpy(cur_if->ifname, "wlan2");
+ if (cur_if->ifmode == ISTA_MODE) {
+ cur_if->channel = 0;
+ cur_if->maxassoc = -1;
+ wl_set_isam_status(cur_if, IF_READY);
+ cur_if->prio = PRIO_STA;
+ cur_if->vsdb = TRUE;
+ cur_if->prefix = 'S';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
+ }
+ else if (cur_if->ifmode == IAP_MODE) {
+ cur_if->channel = 1;
+ cur_if->maxassoc = -1;
+ wl_set_isam_status(cur_if, IF_READY);
+ cur_if->prio = PRIO_AP;
+ cur_if->vsdb = FALSE;
+ cur_if->prefix = 'A';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
+ }
+#ifdef WLMESH
+ else if (cur_if->ifmode == IMESH_MODE) {
+ cur_if->channel = 1;
+ cur_if->maxassoc = -1;
+ wl_set_isam_status(cur_if, IF_READY);
+ cur_if->prio = PRIO_MESH;
+ cur_if->vsdb = FALSE;
+ cur_if->prefix = 'M';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
+ }
+#endif /* WLMESH */
+ }
+
+ return op_mode;
+}
+
+static int
+wl_ext_iapsta_get_rsdb(struct net_device *net, struct dhd_pub *dhd)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_config_t *rsdb_p;
+ int ret = 0, rsdb = 0;
+
+ if (dhd->conf->chip == BCM4359_CHIP_ID || dhd->conf->chip == BCM4375_CHIP_ID) {
+ ret = wldev_iovar_getbuf(net, "rsdb_mode", NULL, 0,
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (!ret) {
+ if (dhd->conf->fw_type == FW_TYPE_MESH) {
+ rsdb = 1;
+ } else {
+ rsdb_p = (wl_config_t *) iovar_buf;
+ rsdb = rsdb_p->status;
+ IAPSTA_INFO(net->name, "config=%d, status=%d\n",
+ rsdb_p->config, rsdb_p->status);
+ }
+ }
+ }
+
+ IAPSTA_INFO(net->name, "rsdb_mode=%d\n", rsdb);
+
+ return rsdb;
+}
+
+static void
+wl_ext_iapsta_postinit(struct net_device *net, struct wl_if_info *cur_if)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int pm;
+
+ IAPSTA_TRACE(cur_if->ifname, "ifidx=%d\n", cur_if->ifidx);
+ if (cur_if->ifidx == 0) {
+ apsta_params->rsdb = wl_ext_iapsta_get_rsdb(net, dhd);
+ apsta_params->vsdb = FALSE;
+ apsta_params->csa = 0;
+ apsta_params->acs = 0;
+ apsta_params->radar = wl_ext_radar_detect(net);
+ if (dhd->conf->fw_type == FW_TYPE_MESH) {
+ apsta_params->csa |= (CSA_FW_BIT | CSA_DRV_BIT);
+ }
+ if (dhd->conf->vndr_ie_assocreq && strlen(dhd->conf->vndr_ie_assocreq))
+ wl_ext_add_del_ie(net, VNDR_IE_ASSOCREQ_FLAG, dhd->conf->vndr_ie_assocreq, "add");
+ } else {
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_iovar_setint(cur_if->dev, "roam_off", dhd->conf->roam_off);
+ wl_ext_iovar_setint(cur_if->dev, "bcn_timeout", dhd->conf->bcn_timeout);
+ if (dhd->conf->pm >= 0)
+ pm = dhd->conf->pm;
+ else
+ pm = PM_FAST;
+ wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ wl_ext_iovar_setint(cur_if->dev, "assoc_retry_max", 10);
+ }
+#ifdef WLMESH
+ else if (cur_if->ifmode == IMESH_MODE) {
+ pm = 0;
+ wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ }
+#endif /* WLMESH */
+ }
+#ifdef PROPTX_MAXCOUNT
+ wl_ext_update_wlfc_maxcount(dhd);
+#endif /* PROPTX_MAXCOUNT */
+
+}
+
+void
+wl_ext_iapsta_get_vif_macaddr(struct dhd_pub *dhd, int ifidx, u8 *mac_addr)
+{
+ if (ifidx >= 2) {
+ IAPSTA_TRACE("wlan", "ifidx=%d\n", ifidx);
+ mac_addr[0] |= 0x02;
+ mac_addr[4] ^= 0x80;
+ mac_addr[4] += ifidx;
+ mac_addr[5] += (ifidx-1);
+ }
+}
+
+int
+wl_ext_iapsta_attach_name(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+
+ if (ifidx < MAX_IF_NUM) {
+ IAPSTA_TRACE(net->name, "ifidx=%d\n", ifidx);
+ cur_if = &apsta_params->if_info[ifidx];
+ }
+ if (ifidx == 0) {
+ strcpy(cur_if->ifname, net->name);
+ wl_ext_iapsta_postinit(net, cur_if);
+ wl_set_isam_status(cur_if, IF_READY);
+ } else if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
+ strcpy(cur_if->ifname, net->name);
+ wl_ext_iapsta_postinit(net, cur_if);
+ wl_clr_isam_status(cur_if, IF_ADDING);
+ wl_set_isam_status(cur_if, IF_READY);
+#ifndef WL_STATIC_IF
+ wake_up_interruptible(&apsta_params->netif_change_event);
+#endif /* WL_STATIC_IF */
+ }
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_update_net_device(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL, *primary_if;
+
+ if (ifidx < MAX_IF_NUM) {
+ IAPSTA_TRACE(net->name, "ifidx=%d\n", ifidx);
+ cur_if = &apsta_params->if_info[ifidx];
+ }
+ if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
+ primary_if = &apsta_params->if_info[IF_PIF];
+ if (strlen(cur_if->ifname)) {
+ memset(net->name, 0, sizeof(IFNAMSIZ));
+ strcpy(net->name, cur_if->ifname);
+ net->name[IFNAMSIZ-1] = '\0';
+ }
+#ifndef WL_STATIC_IF
+ if (apsta_params->apstamode != IUNKNOWN_MODE &&
+ apsta_params->apstamode != ISTAAPAP_MODE &&
+ apsta_params->apstamode != ISTASTA_MODE) {
+ memcpy(net->dev_addr, primary_if->dev->dev_addr, ETHER_ADDR_LEN);
+ net->dev_addr[0] |= 0x02;
+ wl_ext_iapsta_get_vif_macaddr(dhd, ifidx, net->dev_addr);
+ }
+#endif /* WL_STATIC_IF */
+ }
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL, *primary_if;
+
+ if (ifidx < MAX_IF_NUM) {
+ IAPSTA_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
+ cur_if = &apsta_params->if_info[ifidx];
+ }
+ if (ifidx == 0) {
+ memset(apsta_params, 0, sizeof(struct wl_apsta_params));
+ apsta_params->dhd = dhd;
+ cur_if->dev = net;
+ cur_if->ifidx = ifidx;
+ cur_if->bssidx = bssidx;
+ cur_if->ifmode = ISTA_MODE;
+ cur_if->prio = PRIO_STA;
+ cur_if->vsdb = TRUE;
+ cur_if->prefix = 'S';
+ wl_ext_event_register(net, dhd, WLC_E_LAST, wl_ext_iapsta_event,
+ apsta_params, PRIO_EVENT_IAPSTA);
+ strcpy(cur_if->ifname, net->name);
+ init_waitqueue_head(&apsta_params->netif_change_event);
+ init_waitqueue_head(&apsta_params->ap_recon_sta_event);
+ mutex_init(&apsta_params->usr_sync);
+ mutex_init(&apsta_params->in4way_sync);
+ mutex_init(&cur_if->pm_sync);
+#ifdef TPUT_MONITOR
+ init_timer_compat(&apsta_params->monitor_timer, wl_tput_monitor_timer, net);
+#endif /* TPUT_MONITOR */
+#ifdef ACS_MONITOR
+ wl_acs_attach(dhd, cur_if);
+#endif /* ACS_MONITOR */
+ INIT_DELAYED_WORK(&cur_if->pm_enable_work, wl_ext_pm_work_handler);
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, TRUE);
+#endif /* SET_CARRIER */
+ init_timer_compat(&cur_if->connect_timer, wl_ext_connect_timeout, net);
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ init_timer_compat(&cur_if->reconnect_timer, wl_ext_reconnect_timeout, net);
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef EAPOL_RESEND
+ spin_lock_init(&apsta_params->eapol_lock);
+ init_timer_compat(&cur_if->eapol_timer, wl_eapol_timer, net);
+#endif /* EAPOL_RESEND */
+ } else if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
+ primary_if = &apsta_params->if_info[IF_PIF];
+ cur_if->dev = net;
+ cur_if->ifidx = ifidx;
+ cur_if->bssidx = bssidx;
+ wl_ext_event_register(net, dhd, WLC_E_LAST, wl_ext_iapsta_event,
+ apsta_params, PRIO_EVENT_IAPSTA);
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (cur_if->ifmode == IMESH_MODE && apsta_params->macs) {
+ wl_mesh_escan_attach(dhd, cur_if);
+ }
+#endif /* WLMESH && WL_ESCAN */
+#ifdef ACS_MONITOR
+ wl_acs_attach(dhd, cur_if);
+#endif /* ACS_MONITOR */
+ mutex_init(&cur_if->pm_sync);
+ INIT_DELAYED_WORK(&cur_if->pm_enable_work, wl_ext_pm_work_handler);
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, TRUE);
+#endif /* SET_CARRIER */
+ init_timer_compat(&cur_if->connect_timer, wl_ext_connect_timeout, net);
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ init_timer_compat(&cur_if->reconnect_timer, wl_ext_reconnect_timeout, net);
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef EAPOL_RESEND
+ init_timer_compat(&cur_if->eapol_timer, wl_eapol_timer, net);
+#endif /* EAPOL_RESEND */
+ }
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+
+ if (!apsta_params)
+ return 0;
+
+ if (ifidx < MAX_IF_NUM) {
+ IAPSTA_TRACE(net->name, "ifidx=%d\n", ifidx);
+ cur_if = &apsta_params->if_info[ifidx];
+ }
+
+ if (ifidx == 0) {
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhd, ifidx, FALSE);
+#endif /* EAPOL_RESEND */
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, 0);
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, FALSE);
+#endif /* SET_CARRIER */
+ wl_ext_add_remove_pm_enable_work(net, FALSE);
+#ifdef ACS_MONITOR
+ wl_acs_detach(cur_if);
+#endif /* ACS_MONITOR */
+#ifdef TPUT_MONITOR
+ wl_ext_mod_timer(&apsta_params->monitor_timer, 0, 0);
+#endif /* TPUT_MONITOR */
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (cur_if->ifmode == IMESH_MODE && apsta_params->macs) {
+ wl_mesh_escan_detach(dhd, cur_if);
+ }
+#endif /* WLMESH && WL_ESCAN */
+ wl_ext_event_deregister(net, dhd, WLC_E_LAST, wl_ext_iapsta_event);
+ memset(apsta_params, 0, sizeof(struct wl_apsta_params));
+ }
+ else if (cur_if && (wl_get_isam_status(cur_if, IF_READY) ||
+ wl_get_isam_status(cur_if, IF_ADDING))) {
+#ifdef EAPOL_RESEND
+ wl_ext_release_eapol_txpkt(dhd, ifidx, FALSE);
+#endif /* EAPOL_RESEND */
+ wl_ext_mod_timer(&cur_if->connect_timer, 0, 0);
+#if defined(WL_EXT_RECONNECT) && defined(WL_CFG80211)
+ wl_ext_mod_timer(&cur_if->reconnect_timer, 0, 0);
+#endif /* WL_EXT_RECONNECT && WL_CFG80211 */
+#ifdef SET_CARRIER
+ wl_ext_net_setcarrier(cur_if, FALSE, FALSE);
+#endif /* SET_CARRIER */
+ wl_ext_add_remove_pm_enable_work(net, FALSE);
+#ifdef ACS_MONITOR
+ wl_acs_detach(cur_if);
+#endif /* ACS_MONITOR */
+#if defined(WLMESH) && defined(WL_ESCAN)
+ if (cur_if->ifmode == IMESH_MODE && apsta_params->macs) {
+ wl_mesh_escan_detach(dhd, cur_if);
+ }
+#endif /* WLMESH && WL_ESCAN */
+ wl_ext_event_deregister(net, dhd, WLC_E_LAST, wl_ext_iapsta_event);
+ memset(cur_if, 0, sizeof(struct wl_if_info));
+ }
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_attach(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *iapsta_params;
+
+ IAPSTA_TRACE(net->name, "Enter\n");
+
+ iapsta_params = kzalloc(sizeof(struct wl_apsta_params), GFP_KERNEL);
+ if (unlikely(!iapsta_params)) {
+ IAPSTA_ERROR("wlan", "Could not allocate apsta_params\n");
+ return -ENOMEM;
+ }
+ dhd->iapsta_params = (void *)iapsta_params;
+
+ return 0;
+}
+
+void
+wl_ext_iapsta_dettach(struct net_device *net)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+
+ IAPSTA_TRACE(net->name, "Enter\n");
+
+ if (dhd->iapsta_params) {
+ kfree(dhd->iapsta_params);
+ dhd->iapsta_params = NULL;
+ }
+}
+#endif /* WL_EXT_IAPSTA */
diff --git a/bcmdhd.101.10.361.x/wl_iapsta.h b/bcmdhd.101.10.361.x/wl_iapsta.h
new file mode 100755
index 0000000..6e42b16
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_iapsta.h
@@ -0,0 +1,85 @@
+
+#ifndef _wl_iapsta_
+#define _wl_iapsta_
+typedef enum IFMODE {
+ ISTA_MODE = 1,
+ IAP_MODE,
+ IGO_MODE,
+ IGC_MODE,
+ IMESH_MODE
+} ifmode_t;
+
+enum wl_ext_status {
+ WL_EXT_STATUS_DISCONNECTING = 0,
+ WL_EXT_STATUS_DISCONNECTED,
+ WL_EXT_STATUS_SCAN,
+ WL_EXT_STATUS_SCANNING,
+ WL_EXT_STATUS_SCAN_COMPLETE,
+ WL_EXT_STATUS_CONNECTING,
+ WL_EXT_STATUS_RECONNECT,
+ WL_EXT_STATUS_CONNECTED,
+ WL_EXT_STATUS_ADD_KEY,
+ WL_EXT_STATUS_AP_ENABLED,
+ WL_EXT_STATUS_DELETE_STA,
+ WL_EXT_STATUS_STA_DISCONNECTED,
+ WL_EXT_STATUS_STA_CONNECTED,
+ WL_EXT_STATUS_AP_DISABLED
+};
+
+extern int op_mode;
+void wl_ext_update_conn_state(dhd_pub_t *dhd, int ifidx, uint conn_state);
+#ifdef EAPOL_RESEND
+void wl_ext_backup_eapol_txpkt(dhd_pub_t *dhd, int ifidx, void *pkt);
+void wl_ext_release_eapol_txpkt(dhd_pub_t *dhd, int ifidx, bool rx);
+#endif /* EAPOL_RESEND */
+void wl_ext_iapsta_get_vif_macaddr(struct dhd_pub *dhd, int ifidx, u8 *mac_addr);
+int wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
+int wl_ext_iapsta_attach_name(struct net_device *net, int ifidx);
+int wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx);
+int wl_ext_iapsta_update_net_device(struct net_device *net, int ifidx);
+int wl_ext_iapsta_alive_preinit(struct net_device *dev);
+int wl_ext_iapsta_alive_postinit(struct net_device *dev);
+int wl_ext_iapsta_attach(struct net_device *net);
+void wl_ext_iapsta_dettach(struct net_device *net);
+int wl_ext_iapsta_enable(struct net_device *dev, char *command, int total_len);
+int wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len);
+int wl_ext_isam_param(struct net_device *dev, char *command, int total_len);
+int wl_ext_isam_status(struct net_device *dev, char *command, int total_len);
+int wl_ext_isam_init(struct net_device *dev, char *command, int total_len);
+int wl_ext_iapsta_config(struct net_device *dev, char *command, int total_len);
+void wl_ext_add_remove_pm_enable_work(struct net_device *dev, bool add);
+bool wl_ext_iapsta_other_if_enabled(struct net_device *net);
+bool wl_ext_sta_connecting(struct net_device *dev);
+void wl_iapsta_wait_event_complete(struct dhd_pub *dhd);
+int wl_iapsta_suspend_resume(dhd_pub_t *dhd, int suspend);
+#ifdef USE_IW
+int wl_ext_in4way_sync_wext(struct net_device *dev, uint action,
+ enum wl_ext_status status, void *context);
+#endif /* USE_IW */
+#ifdef WLMESH
+int wl_ext_mesh_peer_status(struct net_device *dev, char *data, char *command,
+ int total_len);
+int wl_ext_isam_peer_path(struct net_device *dev, char *command, int total_len);
+#endif
+#ifdef WL_CFG80211
+int wl_ext_in4way_sync(struct net_device *dev, uint action,
+ enum wl_ext_status status, void *context);
+void wl_ext_update_extsae_4way(struct net_device *dev,
+ const struct ieee80211_mgmt *mgmt, bool tx);
+u32 wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel);
+void wl_ext_iapsta_update_iftype(struct net_device *net, int wl_iftype);
+bool wl_ext_iapsta_iftype_enabled(struct net_device *net, int wl_iftype);
+void wl_ext_iapsta_enable_master_if(struct net_device *dev, bool post);
+void wl_ext_iapsta_restart_master(struct net_device *dev);
+void wl_ext_iapsta_ifadding(struct net_device *net, int ifidx);
+bool wl_ext_iapsta_mesh_creating(struct net_device *net);
+void wl_ext_fw_reinit_incsa(struct net_device *dev);
+#ifdef SCAN_SUPPRESS
+uint16 wl_ext_scan_suppress(struct net_device *dev, void *scan_params, bool scan_v2);
+void wl_ext_reset_scan_busy(dhd_pub_t *dhd);
+#endif /* SCAN_SUPPRESS */
+#endif
+#ifdef PROPTX_MAXCOUNT
+int wl_ext_get_wlfc_maxcount(struct dhd_pub *dhd, int ifidx);
+#endif /* PROPTX_MAXCOUNT */
+#endif
diff --git a/bcmdhd.101.10.361.x/wl_iw.c b/bcmdhd.101.10.361.x/wl_iw.c
new file mode 100755
index 0000000..160148e
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_iw.c
@@ -0,0 +1,4302 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_iw.c 616333 2016-02-01 05:30:29Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <wlioctl.h>
+#ifdef WL_NAN
+#include <wlioctl_utils.h>
+#endif
+#include <wl_iw.h>
+#include <wl_android.h>
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
+#include <dhd_config.h>
+
+uint iw_msg_level = WL_ERROR_LEVEL;
+
+#define WL_ERROR_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_ERROR_LEVEL) { \
+ printf("WEXT-ERROR) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_TRACE_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_TRACE_LEVEL) { \
+ printf("WEXT-TRACE) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_SCAN_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_SCAN_LEVEL) { \
+ printf("WEXT-SCAN) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_WSEC_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_WSEC_LEVEL) { \
+ printf("WEXT-WSEC) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_ERROR(x) WL_ERROR_MSG x
+#define WL_TRACE(x) WL_TRACE_MSG x
+#define WL_SCAN(x) WL_SCAN_MSG x
+#define WL_WSEC(x) WL_WSEC_MSG x
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
+#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
+#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_FT_PSK
+#define IW_AUTH_KEY_MGMT_FT_PSK 0x08
+#endif
+
+#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE
+#define IW_ENC_CAPA_FW_ROAM_ENABLE 0x00000020
+#endif
+
+
+/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest
+ * version 22.
+ */
+#ifndef IW_ENCODE_ALG_PMK
+#define IW_ENCODE_ALG_PMK 4
+#endif
+#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE
+#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
+#endif
+/* End FC9. */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+ uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN WLC_IOCTL_MEDLEN
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
+#endif /* WIRELESS_EXT < 19 */
+
+
+#ifndef WL_ESCAN
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a) do { \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM); \
+ } while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif /* LINUX_VERSION_CODE */
+
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+ struct net_device *dev;
+ timer_list_compat_t timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ int iscan_state;
+ iscan_buf_t * list_hdr;
+ iscan_buf_t * list_cur;
+
+ /* Thread to work on iscan */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ struct task_struct *kthread;
+#endif
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+ char ioctlbuf[WLC_IOCTL_SMLEN];
+} iscan_info_t;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+#endif /* !WL_ESCAN */
+
+struct pmk_list {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID - 1];
+};
+
+typedef struct wl_wext_info {
+ struct net_device *dev;
+ dhd_pub_t *dhd;
+ struct delayed_work pm_enable_work;
+ struct mutex pm_sync;
+ struct wl_conn_info conn_info;
+ struct pmk_list pmk_list;
+#ifndef WL_ESCAN
+ struct iscan_info iscan;
+#endif
+} wl_wext_info_t;
+
+/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
+typedef struct priv_link {
+ wl_iw_t *wliw;
+} priv_link_t;
+
+/* dev to priv_link */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev)
+#endif
+
+/* dev to wl_iw_t */
+#define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+ struct net_device *dev,
+ int cmd,
+ void *arg,
+ int len
+)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ dhd_ioctl_t ioc;
+ int8 index;
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+
+ index = dhd_net2idx(dhd->info, dev);
+ if (index == DHD_BAD_IF) {
+ WL_ERROR(("Bad ifidx from dev:%p\n", dev));
+ return -ENODEV;
+ }
+ ret = dhd_ioctl_process(dhd, index, &ioc, arg);
+
+ return ret;
+}
+
+/*
+set named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_set(dev, "arate", rate)
+*/
+
+static int
+dev_wlc_intvar_set(
+ struct net_device *dev,
+ char *name,
+ int val)
+{
+ char buf[WLC_IOCTL_SMLEN];
+ uint len;
+
+ val = htod32(val);
+ len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ ASSERT(len);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+#ifndef WL_ESCAN
+static int
+dev_iw_iovar_setbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+ BCM_REFERENCE(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+ BCM_REFERENCE(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+#endif
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+ struct net_device *dev,
+ char *name,
+ char *buf, int len)
+{
+ char *ioctlbuf;
+ uint buflen;
+ int error;
+
+ ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!ioctlbuf)
+ return -ENOMEM;
+
+ buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ ASSERT(buflen);
+ error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+ kfree(ioctlbuf);
+ return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_bufvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_bufvar_get(
+ struct net_device *dev,
+ char *name,
+ char *buf, int buflen)
+{
+ char *ioctlbuf;
+ int error;
+
+ uint len;
+
+ ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!ioctlbuf)
+ return -ENOMEM;
+ len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ ASSERT(len);
+ BCM_REFERENCE(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ if (!error)
+ bcopy(ioctlbuf, buf, buflen);
+
+ kfree(ioctlbuf);
+ return (error);
+}
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_intvar_get(
+ struct net_device *dev,
+ char *name,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ uint data_null;
+
+ len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+
+ return (error);
+}
+
+/* Maintain backward compatibility */
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+ __u16 cmd; /* Wireless Extension command */
+ __u16 flags; /* More to come ;-) */
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+ void *wrqu, char *extra);
+#endif /* WIRELESS_EXT < 13 */
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int dc = *(int *)extra;
+ int error;
+
+ error = dev_wlc_intvar_set(dev, "leddc", dc);
+ return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int mode = *(int *)extra;
+ int error;
+
+ mode = htod32(mode);
+ error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+ return error;
+}
+
+static int
+wl_iw_set_pm(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int pm = *(int *)extra;
+ int error;
+
+ pm = htod32(pm);
+ error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+ return error;
+}
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_send_priv_event(
+ struct net_device *dev,
+ char *flag
+)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd;
+
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ if (strlen(flag) > sizeof(extra))
+ return -1;
+
+ strncpy(extra, flag, sizeof(extra));
+ extra[sizeof(extra) - 1] = '\0';
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+ return 0;
+}
+
+static int
+wl_iw_config_commit(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *zwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+ struct sockaddr bssid;
+
+ WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+ return error;
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ if (!ssid.SSID_len)
+ return 0;
+
+ bzero(&bssid, sizeof(struct sockaddr));
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+ WL_ERROR(("WLC_REASSOC failed (%d)\n", error));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_name(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *cwrq,
+ char *extra
+)
+{
+ int phytype, err;
+ uint band[3];
+ char cap[5];
+
+ WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+ cap[0] = 0;
+ if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+ goto done;
+ if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+ goto done;
+
+ band[0] = dtoh32(band[0]);
+ switch (phytype) {
+ case WLC_PHY_TYPE_A:
+ strncpy(cap, "a", sizeof(cap));
+ break;
+ case WLC_PHY_TYPE_B:
+ strncpy(cap, "b", sizeof(cap));
+ break;
+ case WLC_PHY_TYPE_G:
+ if (band[0] >= 2)
+ strncpy(cap, "abg", sizeof(cap));
+ else
+ strncpy(cap, "bg", sizeof(cap));
+ break;
+ case WLC_PHY_TYPE_N:
+ if (band[0] >= 2)
+ strncpy(cap, "abgn", sizeof(cap));
+ else
+ strncpy(cap, "bgn", sizeof(cap));
+ break;
+ }
+done:
+ (void)snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+
+ return 0;
+}
+
+#define DHD_CHECK(dhd, dev) \
+ if (!dhd) { \
+ WL_ERROR (("[%s] dhd is NULL\n", dev->name)); \
+ return -ENODEV; \
+ } \
+
+static int
+wl_iw_set_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ int error, chan;
+ uint sf = 0;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+
+ WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+
+ /* Setting by channel number */
+ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+ chan = fwrq->m;
+ }
+
+ /* Setting by frequency */
+ else {
+ /* Convert to MHz as best we can */
+ if (fwrq->e >= 6) {
+ fwrq->e -= 6;
+ while (fwrq->e--)
+ fwrq->m *= 10;
+ } else if (fwrq->e < 6) {
+ while (fwrq->e++ < 6)
+ fwrq->m /= 10;
+ }
+ /* handle 4.9GHz frequencies as Japan 4 GHz based channelization */
+ if (fwrq->m > 4000 && fwrq->m < 5000) {
+ sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */
+ }
+ chan = wf_mhz2channel(fwrq->m, sf);
+ }
+ if (wext_info)
+ wext_info->conn_info.channel = chan;
+ WL_MSG(dev->name, "chan=%d\n", chan);
+ chan = htod32(chan);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) {
+ WL_ERROR(("WLC_SET_CHANNEL failed (%d).\n", error));
+ return error;
+ }
+
+ /* -EINPROGRESS: Call commit handler */
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ int error;
+ u32 chanspec = 0;
+ int ctl_chan;
+
+ WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "chanspec", &chanspec)))
+ return error;
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+
+ /* Return radio channel in channel form */
+ fwrq->m = ctl_chan;
+ fwrq->e = dtoh32(0);
+ return 0;
+}
+
+static int
+wl_iw_set_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int infra = 0, ap = 0, error = 0;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+
+ WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ if (wext_info) {
+ memset(&wext_info->conn_info.ssid, 0, sizeof(wlc_ssid_t));
+ memset(&wext_info->conn_info.bssid, 0, sizeof(struct ether_addr));
+ wext_info->conn_info.channel = 0;
+ }
+
+ switch (*uwrq) {
+ case IW_MODE_MASTER:
+ infra = ap = 1;
+ break;
+ case IW_MODE_ADHOC:
+ case IW_MODE_AUTO:
+ break;
+ case IW_MODE_INFRA:
+ infra = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ infra = htod32(infra);
+ ap = htod32(ap);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+ return error;
+
+ /* -EINPROGRESS: Call commit handler */
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int error, infra = 0, ap = 0;
+
+ WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+ return error;
+
+ infra = dtoh32(infra);
+ ap = dtoh32(ap);
+ *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+ return 0;
+}
+
+static int
+wl_iw_get_range(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ static int channels[MAXCHANNEL+1];
+ wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+ wl_rateset_t rateset;
+ int error, i, k;
+ uint sf, ch;
+
+ int phytype;
+ int bw_cap = 0, sgi_tx = 0, nmode = 0;
+ channel_info_t ci;
+ uint8 nrate_list2copy = 0;
+ uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+ {14, 29, 43, 58, 87, 116, 130, 144},
+ {27, 54, 81, 108, 162, 216, 243, 270},
+ {30, 60, 90, 120, 180, 240, 270, 300}};
+ int fbt_cap = 0;
+
+ WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+
+ /* We don't use nwids */
+ range->min_nwid = range->max_nwid = 0;
+
+ /* Set available channels/frequencies */
+ list->count = htod32(MAXCHANNEL);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+ return error;
+ for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+ range->freq[i].i = dtoh32(list->element[i]);
+
+ ch = dtoh32(list->element[i]);
+ if (ch <= CH_MAX_2G_CHANNEL)
+ sf = WF_CHAN_FACTOR_2_4_G;
+ else
+ sf = WF_CHAN_FACTOR_5_G;
+
+ range->freq[i].m = wf_channel2mhz(ch, sf);
+ range->freq[i].e = 6;
+ }
+ range->num_frequency = range->num_channels = i;
+
+ /* Link quality (use NDIS cutoffs) */
+ range->max_qual.qual = 5;
+ /* Signal level (use RSSI) */
+ range->max_qual.level = 0x100 - 200; /* -200 dBm */
+ /* Noise level (use noise) */
+ range->max_qual.noise = 0x100 - 200; /* -200 dBm */
+ /* Signal level threshold range (?) */
+ range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+ /* Link quality (use NDIS cutoffs) */
+ range->avg_qual.qual = 3;
+ /* Signal level (use RSSI) */
+ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+ /* Noise level (use noise) */
+ range->avg_qual.noise = 0x100 - 75; /* -75 dBm */
+#endif /* WIRELESS_EXT > 11 */
+
+ /* Set available bitrates */
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ rateset.count = dtoh32(rateset.count);
+ range->num_bitrates = rateset.count;
+ for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */
+ if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode)))
+ return error;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+ return error;
+ if (nmode == 1 && (((phytype == WLC_PHY_TYPE_LCN) ||
+ (phytype == WLC_PHY_TYPE_LCN40)))) {
+ if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap)))
+ return error;
+ if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx)))
+ return error;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t))))
+ return error;
+ ci.hw_channel = dtoh32(ci.hw_channel);
+
+ if (bw_cap == 0 ||
+ (bw_cap == 2 && ci.hw_channel <= 14)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 0;
+ else
+ nrate_list2copy = 1;
+ }
+ if (bw_cap == 1 ||
+ (bw_cap == 2 && ci.hw_channel >= 36)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 2;
+ else
+ nrate_list2copy = 3;
+ }
+ range->num_bitrates += 8;
+ ASSERT(range->num_bitrates < IW_MAX_BITRATES);
+ for (k = 0; i < range->num_bitrates; k++, i++) {
+ /* convert to bps */
+ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+ }
+ }
+
+ /* Set an indication of the max TCP throughput
+ * in bit/s that we can expect using this interface.
+ * May be use for QoS stuff... Jean II
+ */
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+ return error;
+ i = dtoh32(i);
+ if (i == WLC_PHY_TYPE_A)
+ range->throughput = 24000000; /* 24 Mbits/s */
+ else
+ range->throughput = 1500000; /* 1.5 Mbits/s */
+
+ /* RTS and fragmentation thresholds */
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+ range->num_encoding_sizes = 4;
+ range->encoding_size[0] = WEP1_KEY_SIZE;
+ range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+ range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+ range->encoding_size[2] = 0;
+#endif
+ range->encoding_size[3] = AES_KEY_SIZE;
+
+ /* Do not support power micro-management */
+ range->min_pmp = 0;
+ range->max_pmp = 0;
+ range->min_pmt = 0;
+ range->max_pmt = 0;
+ range->pmp_flags = 0;
+ range->pm_capa = 0;
+
+ /* Transmit Power - values are in mW */
+ range->num_txpower = 2;
+ range->txpower[0] = 1;
+ range->txpower[1] = 255;
+ range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 19;
+
+ /* Only support retry limits */
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+ /* SRL and LRL limits */
+ range->min_retry = 1;
+ range->max_retry = 255;
+ /* Retry lifetime limits unsupported */
+ range->min_r_time = 0;
+ range->max_r_time = 0;
+#endif /* WIRELESS_EXT > 10 */
+
+#if WIRELESS_EXT > 17
+ range->enc_capa = IW_ENC_CAPA_WPA;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+ range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+ /* Determine driver FBT capability. */
+ if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+ if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+ /* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */
+// range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+ }
+ }
+
+#ifdef BCMFW_ROAM_ENABLE_WEXT
+ /* Advertise firmware roam capability to the external supplicant */
+ range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE;
+#endif /* BCMFW_ROAM_ENABLE_WEXT */
+
+ /* Event capability (kernel) */
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+ /* Event capability (driver) */
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+ /* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */
+ range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif /* WIRELESS_EXT > 17 */
+
+ return 0;
+}
+
+#ifndef WL_ESCAN
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+#endif /* WL_ESCAN */
+
+static int
+wl_iw_set_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ int i;
+
+ WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+ for (i = 0; i < iw->spy_num; i++)
+ memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+ memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+ return 0;
+}
+
+static int
+wl_iw_get_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = iw->spy_num;
+ for (i = 0; i < iw->spy_num; i++) {
+ memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+ addr[i].sa_family = AF_UNIX;
+ memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+ iw->spy_qual[i].updated = 0;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_set_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ int error = -EINVAL;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+
+ WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ if (awrq->sa_family != ARPHRD_ETHER) {
+ WL_ERROR(("Invalid Header...sa_family\n"));
+ return -EINVAL;
+ }
+
+ /* Ignore "auto" or "off" */
+ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+ scb_val_t scbval;
+ bzero(&scbval, sizeof(scb_val_t));
+ WL_MSG(dev->name, "WLC_DISASSOC\n");
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+ WL_ERROR(("WLC_DISASSOC failed (%d).\n", error));
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY|STA_WAIT_DISCONNECTED,
+ WL_EXT_STATUS_DISCONNECTING, NULL);
+#endif
+ return 0;
+ }
+ /* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data),
+ * eabuf)));
+ */
+ /* Reassociate to the specified AP */
+ if (wext_info)
+ memcpy(&wext_info->conn_info.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+ if (wext_info && wext_info->conn_info.ssid.SSID_len) {
+ if ((error = wl_ext_connect(dev, &wext_info->conn_info)))
+ return error;
+ } else {
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+ WL_ERROR(("WLC_REASSOC failed (%d).\n", error));
+ return error;
+ }
+ WL_MSG(dev->name, "join BSSID="MACSTR"\n", MAC2STR((u8 *)awrq->sa_data));
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_CONNECTING, NULL);
+#endif
+
+ return 0;
+}
+
+static int
+wl_iw_get_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+ awrq->sa_family = ARPHRD_ETHER;
+ memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+ /* Ignore error (may be down or disassociated) */
+ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ struct iw_mlme *mlme;
+ scb_val_t scbval;
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+ mlme = (struct iw_mlme *)extra;
+ if (mlme == NULL) {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ scbval.val = mlme->reason_code;
+ bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+ if (mlme->cmd == IW_MLME_DISASSOC) {
+ scbval.val = htod32(scbval.val);
+ WL_MSG(dev->name, "WLC_DISASSOC\n");
+ error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ }
+ else if (mlme->cmd == IW_MLME_DEAUTH) {
+ scbval.val = htod32(scbval.val);
+ WL_MSG(dev->name, "WLC_SCB_DEAUTHENTICATE_FOR_REASON\n");
+ error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+ sizeof(scb_val_t));
+ }
+ else {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY|STA_WAIT_DISCONNECTED,
+ WL_EXT_STATUS_DISCONNECTING, NULL);
+#endif
+
+ return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifndef WL_ESCAN
+static int
+wl_iw_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int error, i;
+ uint buflen = dwrq->length;
+ int16 rssi;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ /* Get scan results (too large to put on the stack) */
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+ /* Infrastructure only */
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+ /* BSSID */
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+ qual[dwrq->length].qual = rssi_to_qual(rssi);
+ qual[dwrq->length].level = 0x100 + rssi;
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+ /* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+ dwrq->length++;
+ }
+
+ kfree(list);
+
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+ /* Provided qual */
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ iscan_buf_t * buf;
+ iscan_info_t *iscan;
+
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int i;
+ int16 rssi;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ iscan = &wext_info->iscan;
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_get_aplist(dev, info, dwrq, extra);
+ }
+
+ buf = iscan->list_hdr;
+ /* Get scan results (too large to put on the stack) */
+ while (buf) {
+ list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ bi = NULL;
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+ /* Infrastructure only */
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+ /* BSSID */
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+ qual[dwrq->length].qual = rssi_to_qual(rssi);
+ qual[dwrq->length].level = 0x100 + rssi;
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+ /* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+ dwrq->length++;
+ }
+ buf = buf->next;
+ }
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+ /* Provided qual */
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+#endif
+
+#if WIRELESS_EXT > 13
+#ifndef WL_ESCAN
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+
+ WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+ /* default Broadcast scan */
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+ /* check for given essid */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+ /* Ignore error (most likely scan in progress) */
+ (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+ return 0;
+}
+#endif
+
+static int
+wl_iw_iscan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+ wlc_ssid_t ssid;
+#ifdef WL_ESCAN
+ wl_scan_info_t scan_info;
+#else
+ iscan_info_t *iscan;
+#ifdef WL_EXT_IAPSTA
+ int err;
+#endif
+#endif
+
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+#ifdef WL_ESCAN
+ /* default Broadcast scan */
+ memset(&ssid, 0, sizeof(ssid));
+#if WIRELESS_EXT > 17
+ /* check for given essid */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+ memset(&scan_info, 0, sizeof(wl_scan_info_t));
+ scan_info.bcast_ssid = TRUE;
+ memcpy(scan_info.ssid.SSID, ssid.SSID, ssid.SSID_len);
+ scan_info.ssid.SSID_len = ssid.SSID_len;
+ return wl_escan_set_scan(dev, &scan_info);
+#else
+ iscan = &wext_info->iscan;
+ WL_TRACE(("%s: SIOCSIWSCAN iscan=%p\n", dev->name, iscan));
+#ifdef WL_EXT_IAPSTA
+ err = wl_ext_in4way_sync_wext(dev, STA_NO_SCAN_IN4WAY, WL_EXT_STATUS_SCAN, NULL);
+ if (err)
+ return err;
+#endif
+
+ /* use backup if our thread is not successful */
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_set_scan(dev, info, wrqu, extra);
+ }
+ if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+ return 0;
+ }
+
+ /* default Broadcast scan */
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+ /* check for given essid */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+ wl_iw_set_event_mask(dev);
+ wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+
+ return 0;
+#endif
+}
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPA entry? If */
+/* not update the tlvs buffer pointer/length */
+ uint8 *ie = *wpaie;
+
+ /* If the contents match the WPA_OUI and type=1 */
+ if ((ie[1] >= 6) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+ return TRUE;
+ }
+
+ /* point to the next ie */
+ ie += ie[1] + 2;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+ return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPS entry? If */
+/* not update the tlvs buffer pointer/length */
+ uint8 *ie = *wpsie;
+
+ /* If the contents match the WPA_OUI and type=4 */
+ if ((ie[1] >= 4) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+ return TRUE;
+ }
+
+ /* point to the next ie */
+ ie += ie[1] + 2;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+ return FALSE;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+ size_t len, int uppercase)
+{
+ size_t i;
+ char *pos = buf, *end = buf + buf_size;
+ int ret;
+ if (buf_size == 0)
+ return 0;
+ for (i = 0; i < len; i++) {
+ ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+ data[i]);
+ if (ret < 0 || ret >= end - pos) {
+ end[-1] = '\0';
+ return pos - buf;
+ }
+ pos += ret;
+ }
+ end[-1] = '\0';
+ return pos - buf;
+}
+
+/**
+ * wpa_snprintf_hex - Print data as a hex string into a buffer
+ * @buf: Memory area to use as the output buffer
+ * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
+ * @data: Data to be printed
+ * @len: Length of data in bytes
+ * Returns: Number of bytes written
+ */
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+ return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif /* BCMWAPI_WPI */
+
+#ifndef WL_ESCAN
+static
+#endif
+int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+ struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+ struct iw_event iwe;
+ char *event;
+#ifdef BCMWAPI_WPI
+ char *buf;
+ int custom_event_len;
+#endif
+
+ event = *event_p;
+ if (bi->ie_length) {
+ /* look for wpa/rsn ies in the ie list... */
+ bcm_tlv_t *ie;
+ uint8 *ptr = ((uint8 *)bi) + bi->ie_offset;
+ int ptr_len = bi->ie_length;
+
+ /* OSEN IE */
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) &&
+ ie->len > WFA_OUI_LEN + 1 &&
+ !bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) &&
+ ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + bi->ie_offset;
+
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + bi->ie_offset;
+
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + bi->ie_offset;
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+ /* look for WPS IE */
+ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ ptr = ((uint8 *)bi) + bi->ie_offset;
+ ptr_len = bi->ie_length;
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+ if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+#ifdef BCMWAPI_WPI
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+ WL_TRACE(("found a WAPI IE...\n"));
+#ifdef WAPI_IE_USE_GENIE
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else /* using CUSTOM event */
+ iwe.cmd = IWEVCUSTOM;
+ custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+ iwe.u.data.length = custom_event_len;
+
+ buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+ if (buf == NULL)
+ {
+ WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+ break;
+ }
+
+ memcpy(buf, "wapi_ie=", 8);
+ wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+ wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+ wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+ kfree(buf);
+#endif /* WAPI_IE_USE_GENIE */
+ break;
+ }
+#endif /* BCMWAPI_WPI */
+ *event_p = event;
+ }
+
+#endif /* WIRELESS_EXT > 17 */
+ return 0;
+}
+
+#ifndef WL_ESCAN
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int error, i, j;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ uint buflen = dwrq->length;
+ int16 rssi;
+ int channel;
+
+ WL_TRACE(("%s SIOCGIWSCAN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ /* Check for scan in progress */
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+ ci.scan_channel = dtoh32(ci.scan_channel);
+ if (ci.scan_channel)
+ return -EAGAIN;
+
+ /* Get scan results (too large to put on the stack) */
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ WL_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+
+ /* First entry must be the BSSID */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ /* SSID */
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+ /* Mode */
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* Channel */
+ iwe.cmd = SIOCGIWFREQ;
+
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ (CHSPEC_IS2G(bi->chanspec)) ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+ /* Channel quality */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(rssi);
+ iwe.u.qual.level = 0x100 + rssi;
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+ /* Encryption */
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+ /* Rates */
+ if (bi->rateset.count) {
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+
+ kfree(list);
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0; /* todo */
+
+ return 0;
+}
+#endif /* WL_ESCAN */
+
+static int
+wl_iw_iscan_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+#ifndef WL_ESCAN
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int ii, j;
+ int apcnt;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ iscan_buf_t * p_buf;
+ int16 rssi;
+ int channel;
+ iscan_info_t *iscan;
+#endif
+
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+#ifdef WL_ESCAN
+ return wl_escan_get_scan(dev, info, dwrq, extra);
+#else
+ WL_TRACE(("%s SIOCGIWSCAN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ /* use backup if our thread is not successful */
+ iscan = &wext_info->iscan;
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_get_scan(dev, info, dwrq, extra);
+ }
+
+ /* Check for scan in progress */
+ if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+ WL_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
+ return -EAGAIN;
+ }
+
+ apcnt = 0;
+ p_buf = iscan->list_hdr;
+ /* Get scan results */
+ while (p_buf != iscan->list_cur) {
+ list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+ }
+
+ bi = NULL;
+ for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+ /* overflow check cover fields before wpa IEs */
+ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+ IW_EV_QUAL_LEN >= end)
+ return -E2BIG;
+
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+ channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+ WL_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+
+ /* First entry must be the BSSID */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ /* SSID */
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+ /* Mode */
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* Channel */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ (CHSPEC_IS2G(bi->chanspec)) ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+ /* Channel quality */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(rssi);
+ iwe.u.qual.level = 0x100 + rssi;
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+ /* Encryption */
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+ /* Rates */
+ if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+ return -E2BIG;
+
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ p_buf = p_buf->next;
+ } /* while (p_buf) */
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0; /* todo */
+ WL_SCAN(("apcnt=%d\n", apcnt));
+
+ return 0;
+#endif
+}
+#endif /* WIRELESS_EXT > 13 */
+
+
+static int
+wl_iw_set_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+
+ WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+
+ /* default Broadcast SSID */
+ memset(&ssid, 0, sizeof(ssid));
+ if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+ memcpy(ssid.SSID, extra, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+
+ if (wext_info) {
+ memcpy(wext_info->conn_info.ssid.SSID, ssid.SSID, ssid.SSID_len);
+ wext_info->conn_info.ssid.SSID_len = ssid.SSID_len;
+ }
+ if (wext_info && memcmp(&ether_null, &wext_info->conn_info.bssid, ETHER_ADDR_LEN)) {
+ if ((error = wl_ext_connect(dev, &wext_info->conn_info)))
+ return error;
+ } else {
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("WLC_SET_SSID failed (%d).\n", error));
+ return error;
+ }
+ WL_MSG(dev->name, "join SSID=\"%s\"\n", ssid.SSID);
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_CONNECTING, NULL);
+#endif
+ }
+ /* If essid null then it is "iwconfig <interface> essid off" command */
+ else {
+ scb_val_t scbval;
+ bzero(&scbval, sizeof(scb_val_t));
+ WL_MSG(dev->name, "WLC_DISASSOC\n");
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+ WL_ERROR(("WLC_DISASSOC failed (%d).\n", error));
+ return error;
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY|STA_WAIT_DISCONNECTED,
+ WL_EXT_STATUS_DISCONNECTING, NULL);
+#endif
+ }
+ return 0;
+}
+
+static int
+wl_iw_get_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("Error getting the SSID %d\n", error));
+ return error;
+ }
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ /* Max SSID length check */
+ if (ssid.SSID_len > IW_ESSID_MAX_SIZE) {
+ ssid.SSID_len = IW_ESSID_MAX_SIZE;
+ }
+
+ /* Get the current SSID */
+ memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+ /* NULL terminating as length of extra buffer is IW_ESSID_MAX_SIZE ie 32 */
+ extra[IW_ESSID_MAX_SIZE] = '\0';
+
+ dwrq->length = ssid.SSID_len;
+
+ dwrq->flags = 1; /* active */
+
+ return 0;
+}
+
+static int
+wl_iw_set_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ /* Check the size of the string */
+ if (dwrq->length > sizeof(iw->nickname))
+ return -E2BIG;
+
+ memcpy(iw->nickname, extra, dwrq->length);
+ iw->nickname[dwrq->length - 1] = '\0';
+
+ return 0;
+}
+
+static int
+wl_iw_get_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ strcpy(extra, iw->nickname);
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+static int wl_iw_set_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ wl_rateset_t rateset;
+ int error, rate, i, error_bg, error_a;
+
+ WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+ /* Get current rateset */
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+
+ rateset.count = dtoh32(rateset.count);
+
+ if (vwrq->value < 0) {
+ /* Select maximum rate */
+ rate = rateset.rates[rateset.count - 1] & 0x7f;
+ } else if (vwrq->value < rateset.count) {
+ /* Select rate by rateset index */
+ rate = rateset.rates[vwrq->value] & 0x7f;
+ } else {
+ /* Specified rate in bps */
+ rate = vwrq->value / 500000;
+ }
+
+ if (vwrq->fixed) {
+ /*
+ Set rate override,
+ Since the is a/b/g-blind, both a/bg_rate are enforced.
+ */
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+ error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+ } else {
+ /*
+ clear rate override
+ Since the is a/b/g-blind, both a/bg_rate are enforced.
+ */
+ /* 0 is for clearing rate override */
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+ /* 0 is for clearing rate override */
+ error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+
+ /* Remove rates above selected rate */
+ for (i = 0; i < rateset.count; i++)
+ if ((rateset.rates[i] & 0x7f) > rate)
+ break;
+ rateset.count = htod32(i);
+
+ /* Set current rateset */
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ }
+
+ return 0;
+}
+
+static int wl_iw_get_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rate;
+
+ WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+ /* Report the current tx rate */
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+ return error;
+ rate = dtoh32(rate);
+ vwrq->value = rate * 500000;
+
+ return 0;
+}
+
+static int
+wl_iw_set_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+ if (vwrq->disabled)
+ rts = DOT11_DEFAULT_RTS_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+ return -EINVAL;
+ else
+ rts = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+ return error;
+
+ vwrq->value = rts;
+ vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, frag;
+
+ WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+ if (vwrq->disabled)
+ frag = DOT11_DEFAULT_FRAG_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+ return -EINVAL;
+ else
+ frag = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, fragthreshold;
+
+ WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+ return error;
+
+ vwrq->value = fragthreshold;
+ vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable;
+ uint16 txpwrmw;
+ WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+ disable += WL_RADIO_SW_DISABLE << 16;
+
+ disable = htod32(disable);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+ return error;
+
+ /* If Radio is off, nothing more to do */
+ if (disable & WL_RADIO_SW_DISABLE)
+ return 0;
+
+ /* Only handle mW */
+ if (!(vwrq->flags & IW_TXPOW_MWATT))
+ return -EINVAL;
+
+ /* Value < 0 means just "on" or "off" */
+ if (vwrq->value < 0)
+ return 0;
+
+ if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+ else txpwrmw = (uint16)vwrq->value;
+
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+ return error;
+}
+
+static int
+wl_iw_get_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable, txpwrdbm;
+ uint8 result;
+
+ WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+ (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+ return error;
+
+ disable = dtoh32(disable);
+ result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ vwrq->value = (int32)bcm_qdbm_to_mw(result);
+ vwrq->fixed = 0;
+ vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+ vwrq->flags = IW_TXPOW_MWATT;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+ /* Do not handle "off" or "lifetime" */
+ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+ return -EINVAL;
+
+ /* Handle "[min|max] limit" */
+ if (vwrq->flags & IW_RETRY_LIMIT) {
+ /* "max limit" or just "limit" */
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+ !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN)))
+#else
+ if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN))
+#endif /* WIRELESS_EXT > 20 */
+ {
+ lrl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+ return error;
+ }
+ /* "min limit" or just "limit" */
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+ !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX)))
+#else
+ if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX))
+#endif /* WIRELESS_EXT > 20 */
+ {
+ srl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+ vwrq->disabled = 0; /* Can't be disabled */
+
+ /* Do not handle lifetime queries */
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+ return -EINVAL;
+
+ /* Get retry limits */
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+ return error;
+
+ lrl = dtoh32(lrl);
+ srl = dtoh32(srl);
+
+ /* Note : by default, display the min retry number */
+ if (vwrq->flags & IW_RETRY_MAX) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = lrl;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = srl;
+ if (srl != lrl)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int
+wl_iw_set_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec;
+
+ WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+ /* Find the current key */
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+ /* Default to 0 */
+ if (key.index == DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+ } else {
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ return -EINVAL;
+ }
+
+ /* Interpret "off" to mean no encryption */
+ wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+ return error;
+
+ /* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */
+ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+ /* Just select a new current key */
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ } else {
+ key.len = dwrq->length;
+
+ if (dwrq->length > sizeof(key.data))
+ return -EINVAL;
+
+ memcpy(key.data, extra, dwrq->length);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (key.len) {
+ case WEP1_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WEP128_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ case TKIP_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+#endif
+ case AES_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+ return error;
+ }
+
+ /* Interpret "restricted" to mean shared key authentication */
+ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+ val = htod32(val);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec, auth;
+
+ WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+ /* assure default values of zero for things we don't touch */
+ bzero(&key, sizeof(wl_wsec_key_t));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+ /* Find the current key */
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = key.index;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+ } else
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+
+ /* Get info */
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+ return error;
+
+ swap_key_to_BE(&key);
+
+ wsec = dtoh32(wsec);
+ auth = dtoh32(auth);
+ /* Get key length */
+ dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+ /* Get flags */
+ dwrq->flags = key.index + 1;
+ if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+ /* Interpret "off" to mean no encryption */
+ dwrq->flags |= IW_ENCODE_DISABLED;
+ }
+ if (auth) {
+ /* Interpret "restricted" to mean shared key authentication */
+ dwrq->flags |= IW_ENCODE_RESTRICTED;
+ }
+
+ /* Get key */
+ if (dwrq->length && extra)
+ memcpy(extra, key.data, dwrq->length);
+
+ return 0;
+}
+
+static int
+wl_iw_set_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+ pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+ pm = htod32(pm);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+ return error;
+
+ pm = dtoh32(pm);
+ vwrq->disabled = pm ? 0 : 1;
+ vwrq->flags = IW_POWER_ALL_R;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+#if defined(BCMWAPI_WPI)
+ uchar buf[WLC_IOCTL_SMLEN] = {0};
+ uchar *p = buf;
+ int wapi_ie_size;
+
+ WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+ if (extra[0] == DOT11_MNG_WAPI_ID)
+ {
+ wapi_ie_size = iwp->length;
+ memcpy(p, extra, iwp->length);
+ dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+ }
+ else
+#endif
+ dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+ return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+ iwp->length = 64;
+ dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+ return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error;
+ struct iw_encode_ext *iwe;
+
+ WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+ iwe = (struct iw_encode_ext *)extra;
+
+ /* disable encryption completely */
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+ }
+
+ /* get the key index */
+ key.index = 0;
+ if (dwrq->flags & IW_ENCODE_INDEX)
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ key.len = iwe->key_len;
+
+ /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */
+ if (!ETHER_ISMULTI(iwe->addr.sa_data))
+ bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+ /* check for key index change */
+ if (key.len == 0) {
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+ /* change the key index .... */
+ key.index = htod32(key.index);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+ &key.index, sizeof(key.index));
+ if (error)
+ return error;
+ }
+ /* key delete */
+ else {
+ swap_key_from_BE(&key);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ }
+ /* This case is used to allow an external 802.1x supplicant
+ * to pass the PMK to the in-driver supplicant for use in
+ * the 4-way handshake.
+ */
+ else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+ int j;
+ wsec_pmk_t pmk;
+ char keystring[WSEC_MAX_PSK_LEN + 1];
+ char* charptr = keystring;
+ uint len;
+
+ /* copy the raw hex key to the appropriate format */
+ for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+ (void)snprintf(charptr, 3, "%02x", iwe->key[j]);
+ charptr += 2;
+ }
+ len = strlen(keystring);
+ pmk.key_len = htod16(len);
+ bcopy(keystring, pmk.key, len);
+ pmk.flags = htod16(WSEC_PASSPHRASE);
+
+ WL_WSEC(("set key %s\n", keystring));
+ error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (error) {
+ WL_ERROR(("WLC_SET_WSEC_PMK error %d\n", error));
+ return error;
+ }
+ }
+
+ else {
+ if (iwe->key_len > sizeof(key.data))
+ return -EINVAL;
+
+ WL_WSEC(("Setting the key index %d\n", key.index));
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("key is a Primary Key\n"));
+ key.flags = WL_PRIMARY_KEY;
+ }
+
+ bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+ if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+ uint8 keybuf[8];
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+
+ /* rx iv */
+ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ uchar *ivptr;
+ ivptr = (uchar *)iwe->rx_seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = TRUE;
+ }
+
+ switch (iwe->alg) {
+ case IW_ENCODE_ALG_NONE:
+ key.algo = CRYPTO_ALGO_OFF;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (iwe->key_len == WEP1_KEY_SIZE)
+ key.algo = CRYPTO_ALGO_WEP1;
+ else
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+#ifdef BCMWAPI_WPI
+ case IW_ENCODE_ALG_SM4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ key.flags &= ~WL_PRIMARY_KEY;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ swap_key_from_BE(&key);
+
+ dhd_wait_pend8021x(dev);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY, WL_EXT_STATUS_ADD_KEY, NULL);
+#endif
+ }
+ return 0;
+}
+
+/* wpa2 pmk list */
+static int
+wl_iw_set_pmksa(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ struct pmk_list *pmk_list = NULL;
+ struct iw_pmksa *iwpmksa;
+ uint i;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ pmkid_t *pmkid_array = NULL;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+
+ WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ pmk_list = &wext_info->pmk_list;
+ if (pmk_list)
+ pmkid_array = pmk_list->pmkids.pmkid;
+ iwpmksa = (struct iw_pmksa *)extra;
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+ if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+ WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+ bzero((char *)pmk_list, sizeof(struct pmk_list));
+ }
+ if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+ pmkid_list_t pmkid, *pmkidptr;
+ pmkidptr = &pmkid;
+ bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+ {
+ uint j;
+ WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+ WL_TRACE(("\n"));
+ }
+ for (i = 0; i < pmk_list->pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ for (; i < pmk_list->pmkids.npmkid; i++) {
+ bcopy(&pmkid_array[i+1].BSSID,
+ &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&pmkid_array[i+1].PMKID,
+ &pmkid_array[i].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ pmk_list->pmkids.npmkid--;
+ }
+ if (iwpmksa->cmd == IW_PMKSA_ADD) {
+ bcopy(&iwpmksa->bssid.sa_data[0],
+ &pmkid_array[pmk_list->pmkids.npmkid].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmk_list->pmkids.npmkid].PMKID,
+ WPA2_PMKID_LEN);
+ {
+ uint j;
+ uint k;
+ k = pmk_list->pmkids.npmkid;
+ BCM_REFERENCE(k);
+ WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkid_array[k].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+ WL_TRACE(("\n"));
+ }
+ pmk_list->pmkids.npmkid++;
+ }
+ WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmk_list->pmkids.npmkid));
+ for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+ uint j;
+ WL_TRACE(("PMKID[%d]: %s = ", i,
+ bcm_ether_ntoa(&pmkid_array[i].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+ printf("\n");
+ }
+ dev_wlc_bufvar_set(dev, "pmkid_info", (char *)pmk_list, sizeof(struct pmk_list));
+ return 0;
+}
+
+static int
+wl_iw_get_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+ return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error = 0;
+ int paramid;
+ int paramval;
+ uint32 cipher_combined;
+ int val = 0;
+ wl_iw_t *iw = IW_DEV_IF(dev);
+
+ WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+ paramval = vwrq->value;
+
+ WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+ dev->name, paramid, paramval));
+
+ switch (paramid) {
+
+ case IW_AUTH_WPA_VERSION:
+ /* supported wpa version disabled or wpa or wpa2 */
+ if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+ val = WPA_AUTH_DISABLED;
+ else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#ifdef BCMWAPI_WPI
+ else if (paramval & IW_AUTH_WAPI_VERSION_1)
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%d: setting wpa_auth to 0x%0x\n", __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP: {
+ int fbt_cap = 0;
+
+ if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+ iw->pwsec = paramval;
+ }
+ else {
+ iw->gwsec = paramval;
+ }
+
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
+ WL_ERROR(("wsec error %d\n", error));
+ return error;
+ }
+ WL_WSEC(("get wsec=0x%x\n", val));
+
+ cipher_combined = iw->gwsec | iw->pwsec;
+ val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+ if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+ val |= WEP_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+ val |= TKIP_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+ val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+ val &= ~SMS4_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+ val |= SMS4_ENABLED;
+#endif
+
+ if (iw->privacy_invoked && !val) {
+ WL_WSEC(("%s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+ "we're a WPS enrollee\n", dev->name));
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else if (val) {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+
+ WL_WSEC(("set wsec=0x%x\n", val));
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("wsec error %d\n", error));
+ return error;
+ }
+
+ /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+ * handshake.
+ */
+ if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+ WL_WSEC(("get fbt_cap=0x%x\n", fbt_cap));
+ if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+ if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) {
+ WL_ERROR(("sup_wpa 1 error %d\n", error));
+ return error;
+ }
+ }
+ else if (val == 0) {
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) {
+ WL_ERROR(("sup_wpa 0 error %d\n", error));
+ return error;
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ case IW_AUTH_KEY_MGMT:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
+ WL_ERROR(("wpa_auth error %d\n", error));
+ return error;
+ }
+ WL_WSEC(("get wpa_auth to %d\n", val));
+
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+ val = WPA_AUTH_PSK;
+ else
+ val = WPA_AUTH_UNSPECIFIED;
+ if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+ val |= WPA2_AUTH_FT;
+ }
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+ val = WPA2_AUTH_PSK;
+ else
+ val = WPA2_AUTH_UNSPECIFIED;
+ if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+ val |= WPA2_AUTH_FT;
+ }
+#ifdef BCMWAPI_WPI
+ if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%d: setting wpa_auth to %d\n", __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+ /* open shared */
+ WL_MSG(dev->name, "Setting the D11auth %d\n", paramval);
+ if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+ val = 0;
+ else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+ val = 1;
+ else
+ error = 1;
+ if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ if (paramval == 0) {
+ val = 0;
+ WL_TRACE(("%d: setting wpa_auth to %d\n", __LINE__, val));
+ error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+ return error;
+ }
+ else {
+ /* If WPA is enabled, wpa_auth is set elsewhere */
+ }
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+#if WIRELESS_EXT > 17
+
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_TRACE(("IW_AUTH_ROAMING_CONTROL\n"));
+ /* driver control or user space app control */
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED: {
+ int wsec;
+
+ if (paramval == 0) {
+ iw->privacy_invoked = FALSE;
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ iw->privacy_invoked = TRUE;
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+ return error;
+
+ if (!WSEC_ENABLED(wsec)) {
+ /* if privacy is true, but wsec is false, we are a WPS enrollee */
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+ }
+ break;
+ }
+
+
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+
+ case IW_AUTH_WAPI_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+ if (paramval) {
+ val |= SMS4_ENABLED;
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("setting wsec to 0x%0x returned error %d\n",
+ val, error));
+ return error;
+ }
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+ WL_ERROR(("setting wpa_auth(%d) returned %d\n",
+ WAPI_AUTH_UNSPECIFIED,
+ error));
+ return error;
+ }
+ }
+
+ break;
+
+#endif /* BCMWAPI_WPI */
+
+ default:
+ break;
+ }
+ return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error;
+ int paramid;
+ int paramval = 0;
+ int val;
+ wl_iw_t *iw = IW_DEV_IF(dev);
+
+ WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+ /* supported wpa version disabled or wpa or wpa2 */
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+ paramval = IW_AUTH_WPA_VERSION_DISABLED;
+ else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA;
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA2;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ paramval = iw->pwsec;
+ break;
+
+ case IW_AUTH_CIPHER_GROUP:
+ paramval = iw->gwsec;
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+ /* psk, 1x */
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (VAL_PSK(val))
+ paramval = IW_AUTH_KEY_MGMT_PSK;
+ else
+ paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+ /* open, shared, leap */
+ if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+ return error;
+ if (!val)
+ paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+ else
+ paramval = IW_AUTH_ALG_SHARED_KEY;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val)
+ paramval = TRUE;
+ else
+ paramval = FALSE;
+ break;
+
+#if WIRELESS_EXT > 17
+
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_ERROR(("IW_AUTH_ROAMING_CONTROL\n"));
+ /* driver control or user space app control */
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED:
+ paramval = iw->privacy_invoked;
+ break;
+
+#endif /* WIRELESS_EXT > 17 */
+ }
+ vwrq->value = paramval;
+ return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static const iw_handler wl_iw_handler[] =
+{
+ (iw_handler) wl_iw_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) wl_iw_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) wl_iw_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) wl_iw_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) wl_iw_set_mode, /* SIOCSIWMODE */
+ (iw_handler) wl_iw_get_mode, /* SIOCGIWMODE */
+ (iw_handler) NULL, /* SIOCSIWSENS */
+ (iw_handler) NULL, /* SIOCGIWSENS */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) wl_iw_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) NULL, /* SIOCGIWSTATS */
+ (iw_handler) wl_iw_set_spy, /* SIOCSIWSPY */
+ (iw_handler) wl_iw_get_spy, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) wl_iw_set_wap, /* SIOCSIWAP */
+ (iw_handler) wl_iw_get_wap, /* SIOCGIWAP */
+#if WIRELESS_EXT > 17
+ (iw_handler) wl_iw_mlme, /* SIOCSIWMLME */
+#else
+ (iw_handler) NULL, /* -- hole -- */
+#endif
+#ifdef WL_ESCAN
+ (iw_handler) NULL, /* SIOCGIWAPLIST */
+#else
+ (iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */
+#endif
+#if WIRELESS_EXT > 13
+ (iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */
+#else /* WIRELESS_EXT > 13 */
+ (iw_handler) NULL, /* SIOCSIWSCAN */
+ (iw_handler) NULL, /* SIOCGIWSCAN */
+#endif /* WIRELESS_EXT > 13 */
+ (iw_handler) wl_iw_set_essid, /* SIOCSIWESSID */
+ (iw_handler) wl_iw_get_essid, /* SIOCGIWESSID */
+ (iw_handler) wl_iw_set_nick, /* SIOCSIWNICKN */
+ (iw_handler) wl_iw_get_nick, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) wl_iw_set_rate, /* SIOCSIWRATE */
+ (iw_handler) wl_iw_get_rate, /* SIOCGIWRATE */
+ (iw_handler) wl_iw_set_rts, /* SIOCSIWRTS */
+ (iw_handler) wl_iw_get_rts, /* SIOCGIWRTS */
+ (iw_handler) wl_iw_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) wl_iw_get_frag, /* SIOCGIWFRAG */
+ (iw_handler) wl_iw_set_txpow, /* SIOCSIWTXPOW */
+ (iw_handler) wl_iw_get_txpow, /* SIOCGIWTXPOW */
+#if WIRELESS_EXT > 10
+ (iw_handler) wl_iw_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) wl_iw_get_retry, /* SIOCGIWRETRY */
+#endif /* WIRELESS_EXT > 10 */
+ (iw_handler) wl_iw_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) wl_iw_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) wl_iw_set_power, /* SIOCSIWPOWER */
+ (iw_handler) wl_iw_get_power, /* SIOCGIWPOWER */
+#if WIRELESS_EXT > 17
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) wl_iw_set_wpaie, /* SIOCSIWGENIE */
+ (iw_handler) wl_iw_get_wpaie, /* SIOCGIWGENIE */
+ (iw_handler) wl_iw_set_wpaauth, /* SIOCSIWAUTH */
+ (iw_handler) wl_iw_get_wpaauth, /* SIOCGIWAUTH */
+ (iw_handler) wl_iw_set_encodeext, /* SIOCSIWENCODEEXT */
+ (iw_handler) wl_iw_get_encodeext, /* SIOCGIWENCODEEXT */
+ (iw_handler) wl_iw_set_pmksa, /* SIOCSIWPMKSA */
+#endif /* WIRELESS_EXT > 17 */
+};
+
+#if WIRELESS_EXT > 12
+enum {
+ WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+ WL_IW_SET_VLANMODE,
+ WL_IW_SET_PM,
+ WL_IW_SET_LAST
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+ wl_iw_set_leddc,
+ wl_iw_set_vlanmode,
+ wl_iw_set_pm,
+ NULL
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+ {
+ WL_IW_SET_LEDDC,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_leddc"
+ },
+ {
+ WL_IW_SET_VLANMODE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_vlanmode"
+ },
+ {
+ WL_IW_SET_PM,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_pm"
+ },
+ { 0, 0, 0, { 0 } }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+ .num_standard = ARRAYSIZE(wl_iw_handler),
+ .num_private = ARRAY_SIZE(wl_iw_priv_handler),
+ .num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+ .standard = (const iw_handler *) wl_iw_handler,
+ .private = wl_iw_priv_handler,
+ .private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+ get_wireless_stats: dhd_get_wireless_stats,
+#endif /* WIRELESS_EXT >= 19 */
+ };
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_ioctl(
+ struct net_device *dev,
+ struct ifreq *rq,
+ int cmd
+)
+{
+ struct iwreq *wrq = (struct iwreq *) rq;
+ struct iw_request_info info;
+ iw_handler handler;
+ char *extra = NULL;
+ size_t token_size = 1;
+ int max_tokens = 0, ret = 0;
+#ifndef WL_ESCAN
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+ iscan_info_t *iscan;
+
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ iscan = &wext_info->iscan;
+#endif
+
+ if (cmd < SIOCIWFIRST ||
+ IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+ !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+
+ case SIOCSIWESSID:
+ case SIOCGIWESSID:
+ case SIOCSIWNICKN:
+ case SIOCGIWNICKN:
+ max_tokens = IW_ESSID_MAX_SIZE + 1;
+ break;
+
+ case SIOCSIWENCODE:
+ case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+ case SIOCSIWENCODEEXT:
+ case SIOCGIWENCODEEXT:
+#endif
+ max_tokens = IW_ENCODING_TOKEN_MAX;
+ break;
+
+ case SIOCGIWRANGE:
+ max_tokens = sizeof(struct iw_range);
+ break;
+
+ case SIOCGIWAPLIST:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_AP;
+ break;
+
+#if WIRELESS_EXT > 13
+ case SIOCGIWSCAN:
+#ifndef WL_ESCAN
+ if (iscan)
+ max_tokens = wrq->u.data.length;
+ else
+#endif
+ max_tokens = IW_SCAN_MAX_DATA;
+ break;
+#endif /* WIRELESS_EXT > 13 */
+
+ case SIOCSIWSPY:
+ token_size = sizeof(struct sockaddr);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+ case SIOCGIWSPY:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_SPY;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tokens && wrq->u.data.pointer) {
+ if (wrq->u.data.length > max_tokens)
+ return -E2BIG;
+
+ if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+ }
+
+ info.cmd = cmd;
+ info.flags = 0;
+
+ ret = handler(dev, &info, &wrq->u, extra);
+
+ if (extra) {
+ if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+/* Convert a connection status event into a connection status string.
+ * Returns TRUE if a matching connection status string was found.
+ */
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+ char* stringBuf, uint buflen)
+{
+ typedef struct conn_fail_event_map_t {
+ uint32 inEvent; /* input: event type to match */
+ uint32 inStatus; /* input: event status code to match */
+ uint32 inReason; /* input: event reason code to match */
+ const char* outName; /* output: failure type */
+ const char* outCause; /* output: failure cause */
+ } conn_fail_event_map_t;
+
+ /* Map of WLC_E events to connection failure strings */
+# define WL_IW_DONT_CARE 9999
+ const conn_fail_event_map_t event_map [] = {
+ /* inEvent inStatus inReason */
+ /* outName outCause */
+ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE,
+ "Conn", "Success"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+ "Conn", "NoNetworks"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ConfigMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH,
+ "Conn", "EncrypMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH,
+ "Conn", "RsnMismatch"},
+ {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "AuthTimeout"},
+ {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "AuthFail"},
+ {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE,
+ "Conn", "AuthNoAck"},
+ {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ReassocFail"},
+ {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "ReassocTimeout"},
+ {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE,
+ "Conn", "ReassocAbort"},
+ {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE,
+ "Sup", "ConnSuccess"},
+ {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Sup", "WpaHandshakeFail"},
+ {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Deauth"},
+ {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "DisassocInd"},
+ {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Disassoc"}
+ };
+
+ const char* name = "";
+ const char* cause = NULL;
+ int i;
+
+ /* Search the event map table for a matching event */
+ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+ const conn_fail_event_map_t* row = &event_map[i];
+ if (row->inEvent == event_type &&
+ (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+ (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+ name = row->outName;
+ cause = row->outCause;
+ break;
+ }
+ }
+
+ /* If found, generate a connection failure string and return TRUE */
+ if (cause) {
+ memset(stringBuf, 0, buflen);
+ (void)snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason);
+ WL_TRACE(("Connection status: %s\n", stringBuf));
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+#if (WIRELESS_EXT > 14)
+/* Check if we have received an event that indicates connection failure
+ * If so, generate a connection failure report string.
+ * The caller supplies a buffer to hold the generated string.
+ */
+static bool
+wl_iw_check_conn_fail(const wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+ uint32 event = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+
+ if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+ return TRUE;
+ } else
+ {
+ return FALSE;
+ }
+}
+#endif /* WIRELESS_EXT > 14 */
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+void
+wl_iw_event(struct net_device *dev, void *argu,
+ const wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd = 0;
+ uint32 event_type = ntoh32(e->event_type);
+ uint16 flags = ntoh16(e->flags);
+ uint32 datalen = ntoh32(e->datalen);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+#ifndef WL_ESCAN
+ struct wl_wext_info *wext_info = (struct wl_wext_info *)argu;
+ iscan_info_t *iscan = &wext_info->iscan;
+#endif
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ switch (event_type) {
+ case WLC_E_TXFAIL:
+ cmd = IWEVTXDROP;
+ break;
+#if WIRELESS_EXT > 14
+ case WLC_E_JOIN:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+ cmd = IWEVREGISTERED;
+ break;
+ case WLC_E_DEAUTH:
+ case WLC_E_DISASSOC:
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY,
+ WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+ WL_MSG_RLMT(dev->name, &e->addr, ETHER_ADDR_LEN,
+ "disconnected with "MACSTR", event %d, reason %d\n",
+ MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
+ break;
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ cmd = SIOCGIWAP;
+ WL_MSG(dev->name, "disconnected with "MACSTR", event %d, reason %d\n",
+ MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY,
+ WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+ break;
+
+ case WLC_E_LINK:
+ cmd = SIOCGIWAP;
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+ WL_MSG(dev->name, "Link Down with "MACSTR", reason=%d\n",
+ MAC2STR((u8 *)wrqu.addr.sa_data), reason);
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_in4way_sync_wext(dev, STA_NO_BTC_IN4WAY,
+ WL_EXT_STATUS_DISCONNECTED, NULL);
+#endif
+ } else {
+ WL_MSG(dev->name, "Link UP with "MACSTR"\n",
+ MAC2STR((u8 *)wrqu.addr.sa_data));
+ }
+ break;
+ case WLC_E_ACTION_FRAME:
+ cmd = IWEVCUSTOM;
+ if (datalen + 1 <= sizeof(extra)) {
+ wrqu.data.length = datalen + 1;
+ extra[0] = WLC_E_ACTION_FRAME;
+ memcpy(&extra[1], data, datalen);
+ WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+ }
+ break;
+
+ case WLC_E_ACTION_FRAME_COMPLETE:
+ cmd = IWEVCUSTOM;
+ if (sizeof(status) + 1 <= sizeof(extra)) {
+ wrqu.data.length = sizeof(status) + 1;
+ extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+ memcpy(&extra[1], &status, sizeof(status));
+ WL_TRACE(("wl_iw_event status %d \n", status));
+ }
+ break;
+#endif /* WIRELESS_EXT > 14 */
+#if WIRELESS_EXT > 17
+ case WLC_E_MIC_ERROR: {
+ struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra;
+ cmd = IWEVMICHAELMICFAILURE;
+ wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ micerrevt->flags |= IW_MICFAILURE_GROUP;
+ else
+ micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+ memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+ break;
+ }
+
+ case WLC_E_ASSOC_REQ_IE:
+ cmd = IWEVASSOCREQIE;
+ wrqu.data.length = datalen;
+ if (datalen < sizeof(extra))
+ memcpy(extra, data, datalen);
+ break;
+
+ case WLC_E_ASSOC_RESP_IE:
+ cmd = IWEVASSOCRESPIE;
+ wrqu.data.length = datalen;
+ if (datalen < sizeof(extra))
+ memcpy(extra, data, datalen);
+ break;
+
+ case WLC_E_PMKID_CACHE: {
+ struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+ pmkid_cand_list_t *pmkcandlist;
+ pmkid_cand_t *pmkidcand;
+ int count;
+
+ if (data == NULL)
+ break;
+
+ cmd = IWEVPMKIDCAND;
+ pmkcandlist = data;
+ count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+ wrqu.data.length = sizeof(struct iw_pmkid_cand);
+ pmkidcand = pmkcandlist->pmkid_cand;
+ while (count) {
+ bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+ if (pmkidcand->preauth)
+ iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+ bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+ ETHER_ADDR_LEN);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ pmkidcand++;
+ count--;
+ }
+ break;
+ }
+#endif /* WIRELESS_EXT > 17 */
+
+#ifndef WL_ESCAN
+ case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+ cmd = SIOCGIWSCAN;
+#endif
+ WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+ // terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
+ memset(&wrqu, 0, sizeof(wrqu));
+ if ((iscan) && (iscan->sysioc_pid >= 0) &&
+ (iscan->iscan_state != ISCAN_STATE_IDLE))
+ up(&iscan->sysioc_sem);
+ break;
+#endif
+
+ default:
+ /* Cannot translate event */
+ break;
+ }
+
+ if (cmd) {
+#ifndef WL_ESCAN
+ if (cmd == SIOCGIWSCAN) {
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ wireless_send_event(dev, cmd, &wrqu, NULL);
+ }
+ } else
+#endif
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+
+#if WIRELESS_EXT > 14
+ /* Look for WLC events that indicate a connection failure.
+ * If found, generate an IWEVCUSTOM event.
+ */
+ memset(extra, 0, sizeof(extra));
+ if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+ cmd = IWEVCUSTOM;
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+#endif /* WIRELESS_EXT > 14 */
+
+#endif /* WIRELESS_EXT > 13 */
+}
+
+#ifdef WL_NAN
+static int wl_iw_get_wireless_stats_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ struct iw_statistics *wstats = ctx;
+ int res = BCME_OK;
+
+ switch (type) {
+ case WL_CNT_XTLV_WLC: {
+ wl_cnt_wlc_t *cnt = (wl_cnt_wlc_t *)data;
+ if (len > sizeof(wl_cnt_wlc_t)) {
+ printf("counter structure length invalid! %d > %d\n",
+ len, (int)sizeof(wl_cnt_wlc_t));
+ }
+ wstats->discard.nwid = 0;
+ wstats->discard.code = dtoh32(cnt->rxundec);
+ wstats->discard.fragment = dtoh32(cnt->rxfragerr);
+ wstats->discard.retries = dtoh32(cnt->txfail);
+ wstats->discard.misc = dtoh32(cnt->rxrunt) + dtoh32(cnt->rxgiant);
+ wstats->miss.beacon = 0;
+ WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt->txframe), dtoh32(cnt->txbyte)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n",
+ dtoh32(cnt->rxundec)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n",
+ dtoh32(cnt->txfail)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
+ dtoh32(cnt->rxfragerr)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n",
+ dtoh32(cnt->rxrunt)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n",
+ dtoh32(cnt->rxgiant)));
+ break;
+ }
+ case WL_CNT_XTLV_CNTV_LE10_UCODE:
+ case WL_CNT_XTLV_LT40_UCODE_V1:
+ case WL_CNT_XTLV_GE40_UCODE_V1:
+ {
+ /* Offsets of rxfrmtoolong and rxbadplcp are the same in
+ * wl_cnt_v_le10_mcst_t, wl_cnt_lt40mcst_v1_t, and wl_cnt_ge40mcst_v1_t.
+ * So we can just cast to wl_cnt_v_le10_mcst_t here.
+ */
+ wl_cnt_v_le10_mcst_t *cnt = (wl_cnt_v_le10_mcst_t *)data;
+ if (len != WL_CNT_MCST_STRUCT_SZ) {
+ printf("counter structure length mismatch! %d != %d\n",
+ len, WL_CNT_MCST_STRUCT_SZ);
+ }
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
+ dtoh32(cnt->rxfrmtoolong)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
+ dtoh32(cnt->rxbadplcp)));
+ BCM_REFERENCE(cnt);
+ break;
+ }
+ default:
+ WL_ERROR(("%d: Unsupported type %d\n", __LINE__, type));
+ break;
+ }
+ return res;
+}
+#endif
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+ int res = 0;
+ int phy_noise;
+ int rssi;
+ scb_val_t scb_val;
+#if WIRELESS_EXT > 11
+ char *cntbuf = NULL;
+ wl_cnt_info_t *cntinfo;
+ uint16 ver;
+ uint32 corerev = 0;
+#endif /* WIRELESS_EXT > 11 */
+
+ phy_noise = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) {
+ WL_TRACE(("WLC_GET_PHY_NOISE error=%d\n", res));
+ goto done;
+ }
+
+ phy_noise = dtoh32(phy_noise);
+ WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+ memset(&scb_val, 0, sizeof(scb_val));
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) {
+ WL_TRACE(("WLC_GET_RSSI error=%d\n", res));
+ goto done;
+ }
+
+ rssi = dtoh32(scb_val.val);
+ rssi = MIN(rssi, RSSI_MAXVAL);
+ WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ wstats->qual.qual = 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ wstats->qual.qual = 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ wstats->qual.qual = 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ wstats->qual.qual = 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ wstats->qual.qual = 4;
+ else
+ wstats->qual.qual = 5;
+
+ /* Wraps to 0 if RSSI is 0 */
+ wstats->qual.level = 0x100 + rssi;
+ wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+ wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+ wstats->qual.updated |= 7;
+#endif /* WIRELESS_EXT > 18 */
+
+#if WIRELESS_EXT > 11
+ WL_TRACE(("wl_iw_get_wireless_stats counters\n *****"));
+
+ cntbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!cntbuf) {
+ res = BCME_NOMEM;
+ goto done;
+ }
+
+ memset(cntbuf, 0, MAX_WLIW_IOCTL_LEN);
+ res = dev_wlc_bufvar_get(dev, "counters", cntbuf, MAX_WLIW_IOCTL_LEN);
+ if (res)
+ {
+ WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+ goto done;
+ }
+
+ cntinfo = (wl_cnt_info_t *)cntbuf;
+ cntinfo->version = dtoh16(cntinfo->version);
+ cntinfo->datalen = dtoh16(cntinfo->datalen);
+ ver = cntinfo->version;
+#ifdef WL_NAN
+ CHK_CNTBUF_DATALEN(cntbuf, MAX_WLIW_IOCTL_LEN);
+#endif
+ if (ver > WL_CNT_T_VERSION) {
+ WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+ WL_CNT_T_VERSION, ver));
+ res = BCME_VERSION;
+ goto done;
+ }
+
+ if (ver == WL_CNT_VERSION_11) {
+ wlc_rev_info_t revinfo;
+ memset(&revinfo, 0, sizeof(revinfo));
+ res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (res) {
+ WL_ERROR(("WLC_GET_REVINFO failed %d\n", res));
+ goto done;
+ }
+ corerev = dtoh32(revinfo.corerev);
+ }
+
+#ifdef WL_NAN
+ res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, MAX_WLIW_IOCTL_LEN, corerev);
+ if (res) {
+ WL_ERROR(("wl_cntbuf_to_xtlv_format failed %d\n", res));
+ goto done;
+ }
+
+ if ((res = bcm_unpack_xtlv_buf(wstats, cntinfo->data, cntinfo->datalen,
+ BCM_XTLV_OPTION_ALIGN32, wl_iw_get_wireless_stats_cbfn))) {
+ goto done;
+ }
+#endif
+#endif /* WIRELESS_EXT > 11 */
+
+done:
+#if WIRELESS_EXT > 11
+ if (cntbuf) {
+ kfree(cntbuf);
+ }
+#endif /* WIRELESS_EXT > 11 */
+ return res;
+}
+
+#ifndef WL_ESCAN
+static void
+wl_iw_timerfunc(ulong data)
+{
+ iscan_info_t *iscan = (iscan_info_t *)data;
+ iscan->timer_on = 0;
+ if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+ WL_TRACE(("timer trigger\n"));
+ up(&iscan->sysioc_sem);
+ }
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+
+ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+ dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+ int err = 0;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+ if (ssid && ssid->SSID_len)
+ memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+ return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+ int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+ wl_iscan_params_t *params;
+ int err = 0;
+
+ if (ssid && ssid->SSID_len) {
+ params_size += sizeof(wlc_ssid_t);
+ }
+ params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ return -ENOMEM;
+ }
+ memset(params, 0, params_size);
+ ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+ err = wl_iw_iscan_prep(&params->params, ssid);
+
+ if (!err) {
+ params->version = htod32(ISCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->scan_duration = htod16(0);
+
+ /* params_size += OFFSETOF(wl_iscan_params_t, params); */
+ (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+ }
+
+ kfree(params);
+ return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+ iscan_buf_t * buf;
+ iscan_buf_t * ptr;
+ wl_iscan_results_t * list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ uint32 status;
+
+ /* buffers are allocated on demand */
+ if (iscan->list_cur) {
+ buf = iscan->list_cur;
+ iscan->list_cur = buf->next;
+ }
+ else {
+ buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+ if (!buf)
+ return WL_SCAN_RESULTS_ABORTED;
+ buf->next = NULL;
+ if (!iscan->list_hdr)
+ iscan->list_hdr = buf;
+ else {
+ ptr = iscan->list_hdr;
+ while (ptr->next) {
+ ptr = ptr->next;
+ }
+ ptr->next = buf;
+ }
+ }
+ memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ (void) dev_iw_iovar_getbuf(
+ iscan->dev,
+ "iscanresults",
+ &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE,
+ buf->iscan_buf,
+ WLC_IW_ISCAN_MAXLEN);
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_TRACE(("results->count = %d\n", results->count));
+
+ WL_TRACE(("results->buflen = %d\n", results->buflen));
+ status = dtoh32(list_buf->status);
+ return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+ union iwreq_data wrqu;
+
+ memset(&wrqu, 0, sizeof(wrqu));
+
+ /* wext expects to get no data for SIOCGIWSCAN Event */
+ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+ uint32 status;
+ iscan_info_t *iscan = (iscan_info_t *)data;
+
+ WL_MSG("wlan", "thread Enter\n");
+ DAEMONIZE("iscan_sysioc");
+
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (down_interruptible(&iscan->sysioc_sem) == 0) {
+ if (iscan->timer_on) {
+ del_timer(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ switch (status) {
+ case WL_SCAN_RESULTS_PARTIAL:
+ WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ /* make sure our buffer size is enough before going next round */
+ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+ /* Reschedule the timer */
+ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_SUCCESS:
+ WL_TRACE(("iscanresults complete\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ case WL_SCAN_RESULTS_PENDING:
+ WL_TRACE(("iscanresults pending\n"));
+ /* Reschedule the timer */
+ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_ABORTED:
+ WL_TRACE(("iscanresults aborted\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ default:
+ WL_TRACE(("iscanresults returned unknown status %d\n", status));
+ break;
+ }
+ }
+ WL_MSG("wlan", "was terminated\n");
+ complete_and_exit(&iscan->sysioc_exited, 0);
+}
+#endif /* !WL_ESCAN */
+
+void
+wl_iw_detach(struct net_device *dev)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = dhdp->wext_info;
+#ifndef WL_ESCAN
+ iscan_buf_t *buf;
+ iscan_info_t *iscan;
+#endif
+ if (!wext_info)
+ return;
+
+#ifndef WL_ESCAN
+ iscan = &wext_info->iscan;
+ if (iscan->sysioc_pid >= 0) {
+ KILL_PROC(iscan->sysioc_pid, SIGTERM);
+ wait_for_completion(&iscan->sysioc_exited);
+ }
+
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
+ }
+#endif
+ wl_ext_event_deregister(dev, dhdp, WLC_E_LAST, wl_iw_event);
+ if (wext_info) {
+ kfree(wext_info);
+ dhdp->wext_info = NULL;
+ }
+}
+
+int
+wl_iw_attach(struct net_device *dev)
+{
+ struct dhd_pub *dhdp = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+ int ret = 0;
+#ifndef WL_ESCAN
+ iscan_info_t *iscan = NULL;
+#endif
+
+ if (!dev)
+ return 0;
+ WL_TRACE(("Enter\n"));
+
+ wext_info = (void *)kzalloc(sizeof(struct wl_wext_info), GFP_KERNEL);
+ if (!wext_info)
+ return -ENOMEM;
+ memset(wext_info, 0, sizeof(wl_wext_info_t));
+ wext_info->dev = dev;
+ wext_info->dhd = dhdp;
+ wext_info->conn_info.bssidx = 0;
+ dhdp->wext_info = (void *)wext_info;
+
+#ifndef WL_ESCAN
+ iscan = &wext_info->iscan;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ iscan->kthread = NULL;
+#endif
+ iscan->sysioc_pid = -1;
+ /* we only care about main interface so save a global here */
+ iscan->dev = dev;
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+
+ /* Set up the timer */
+ iscan->timer_ms = 2000;
+ init_timer_compat(&iscan->timer, wl_iw_timerfunc, iscan);
+
+ sema_init(&iscan->sysioc_sem, 0);
+ init_completion(&iscan->sysioc_exited);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc");
+ iscan->sysioc_pid = iscan->kthread->pid;
+#else
+ iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+#endif
+ if (iscan->sysioc_pid < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+#endif
+ ret = wl_ext_event_register(dev, dhdp, WLC_E_LAST, wl_iw_event, dhdp->wext_info,
+ PRIO_EVENT_WEXT);
+ if (ret) {
+ WL_ERROR(("wl_ext_event_register err %d\n", ret));
+ goto exit;
+ }
+
+ return ret;
+exit:
+ wl_iw_detach(dev);
+ return ret;
+}
+
+s32
+wl_iw_autochannel(struct net_device *dev, char* command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+ int ret = 0;
+#ifdef WL_ESCAN
+ int bytes_written = -1;
+#endif
+
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+#ifdef WL_ESCAN
+ sscanf(command, "%*s %d", &dhd->escan->autochannel);
+ if (dhd->escan->autochannel == 0) {
+ dhd->escan->best_2g_ch = 0;
+ dhd->escan->best_5g_ch = 0;
+ } else if (dhd->escan->autochannel == 2) {
+ bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
+ dhd->escan->best_2g_ch, dhd->escan->best_5g_ch);
+ WL_TRACE(("command result is %s\n", command));
+ ret = bytes_written;
+ }
+#endif
+
+ return ret;
+}
+
+#endif /* USE_IW */
diff --git a/bcmdhd.101.10.361.x/wl_iw.h b/bcmdhd.101.10.361.x/wl_iw.h
new file mode 100755
index 0000000..e161006
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_iw.h
@@ -0,0 +1,171 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_iw.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+#define get_ds() (KERNEL_DS)
+#endif
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+#define GET_SSID "SSID="
+#define GET_CHANNEL "CH="
+#define GET_NPROBE "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
+#define GET_HOME_DWELL "HOME="
+#define GET_SCAN_TYPE "TYPE="
+
+#define BAND_GET_CMD "GETBAND"
+#define BAND_SET_CMD "SETBAND"
+#define DTIM_SKIP_GET_CMD "DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD "DTIMSKIPSET"
+#define SETSUSPEND_CMD "SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR"
+/* Lin - Is the extra space needed? */
+#define PNOSETUP_SET_CMD "PNOSETUP " /* TLV command has extra end space */
+#define PNOENABLE_SET_CMD "PNOFORCE"
+#define PNODEBUG_SET_CMD "PNODEBUG"
+#define TXPOWER_SET_CMD "TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+/* Structure to keep global parameters */
+typedef struct wl_iw_extra_params {
+ int target_channel; /* target channel */
+} wl_iw_extra_params_t;
+
+/* ============================================== */
+/* Defines from wlc_pub.h */
+#define WL_IW_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */
+#define WL_IW_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */
+#define WL_IW_RSSI_VERY_LOW -80 /* Very low quality cutoffs */
+#define WL_IW_RSSI_LOW -70 /* Low quality cutoffs */
+#define WL_IW_RSSI_GOOD -68 /* Good quality cutoffs */
+#define WL_IW_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */
+#define WL_IW_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */
+#define WL_IW_RSSI_INVALID 0 /* invalid RSSI value */
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN ((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13)
+
+#define G_SCAN_RESULTS 8*1024
+#define WE_ADD_EVENT_FIX 0x80
+#define G_WLAN_SET_ON 0
+#define G_WLAN_SET_OFF 1
+
+
+typedef struct wl_iw {
+ char nickname[IW_ESSID_MAX_SIZE];
+
+ struct iw_statistics wstats;
+
+ int spy_num;
+ uint32 pwsec; /* pairwise wsec setting */
+ uint32 gwsec; /* group wsec setting */
+ bool privacy_invoked; /* IW_AUTH_PRIVACY_INVOKED setting */
+ struct ether_addr spy_addr[IW_MAX_SPY];
+ struct iw_quality spy_qual[IW_MAX_SPY];
+ void *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+ timer_list_compat_t *timer;
+ struct net_device *dev;
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+int wl_iw_attach(struct net_device *dev);
+void wl_iw_detach(struct net_device *dev);
+s32 wl_iw_autochannel(struct net_device *dev, char* command, int total_len);
+
+/* message levels */
+#define WL_ERROR_LEVEL (1 << 0)
+#define WL_TRACE_LEVEL (1 << 1)
+#define WL_INFO_LEVEL (1 << 2)
+#define WL_SCAN_LEVEL (1 << 3)
+#define WL_WSEC_LEVEL (1 << 4)
+
+#define CSCAN_COMMAND "CSCAN "
+#define CSCAN_TLV_PREFIX 'S'
+#define CSCAN_TLV_VERSION 1
+#define CSCAN_TLV_SUBVERSION 0
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE 'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE 'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE 'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE 'P'
+#define CSCAN_TLV_TYPE_HOME_IE 'H'
+#define CSCAN_TLV_TYPE_STYPE_IE 'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif /* _wl_iw_h_ */
diff --git a/bcmdhd.101.10.361.x/wl_linux_mon.c b/bcmdhd.101.10.361.x/wl_linux_mon.c
new file mode 100755
index 0000000..d8b69aa
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_linux_mon.c
@@ -0,0 +1,412 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#if defined(BCMDONGLEHOST)
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(BCMDONGLEHOST) */
+#if defined(__linux__)
+#include <bcmstdlib_s.h>
+#endif /* defined(__linux__) */
+
+typedef enum monitor_states
+{
+ MONITOR_STATE_DEINIT = 0x0,
+ MONITOR_STATE_INIT = 0x1,
+ MONITOR_STATE_INTERFACE_ADDED = 0x2,
+ MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(const char *name, struct net_device **new_ndev);
+extern netdev_tx_t dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printf("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+ int radiotap_enabled;
+ struct net_device* real_ndev; /* The real interface that the monitor is on */
+ struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+ void *dhd_pub;
+ monitor_states_t monitor_state;
+ monitor_interface mon_if[DHD_MAX_IFS];
+ struct mutex lock; /* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(const char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static netdev_tx_t dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+ .ndo_open = dhd_mon_if_open,
+ .ndo_stop = dhd_mon_if_stop,
+ .ndo_start_xmit = dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+ .ndo_set_mac_address = dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(const char *name)
+{
+ struct net_device *ndev_found = NULL;
+
+#if defined(BCMDONGLEHOST)
+ int i;
+ int len = 0;
+ int last_name_len = 0;
+ struct net_device *ndev;
+
+ /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+ * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+ * iface would be mon-p2p0-0.
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+ /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+ * it matches, then this netdev is the corresponding real_netdev.
+ */
+ if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+ len = strlen("p2p");
+ } else {
+ /* if p2p- is not present, then the IFNAMSIZ have reached and name
+ * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+ */
+ len = 0;
+ }
+ if (ndev && strstr(name, (ndev->name + len))) {
+ if (strlen(ndev->name) > last_name_len) {
+ ndev_found = ndev;
+ last_name_len = strlen(ndev->name);
+ }
+ }
+ }
+#endif /* defined(BCMDONGLEHOST) */
+
+ return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev)
+ return &g_monitor.mon_if[i];
+ }
+
+ return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static netdev_tx_t dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret = 0;
+ int rtap_len;
+ int qos_len = 0;
+ int dot11_hdr_len = 24;
+ int snap_len = 6;
+ unsigned char *pdata;
+ unsigned short frame_ctl;
+ unsigned char src_mac_addr[6];
+ unsigned char dst_mac_addr[6];
+ struct ieee80211_hdr *dot11_hdr;
+ struct ieee80211_radiotap_header *rtap_hdr;
+ monitor_interface* mon_if;
+
+ MON_PRINT("enter\n");
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ goto fail;
+ }
+
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ goto fail;
+
+ rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+ if (unlikely(rtap_hdr->it_version))
+ goto fail;
+
+ rtap_len = ieee80211_get_radiotap_len(skb->data);
+ if (unlikely(skb->len < rtap_len))
+ goto fail;
+
+ MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+ /* Skip the ratio tap header */
+ skb_pull(skb, rtap_len);
+
+ dot11_hdr = (struct ieee80211_hdr *)skb->data;
+ frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+ /* Check if the QoS bit is set */
+ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+ /* Check if this ia a Wireless Distribution System (WDS) frame
+ * which has 4 MAC addresses
+ */
+ if (dot11_hdr->frame_control & 0x0080)
+ qos_len = 2;
+ if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+ dot11_hdr_len += 6;
+
+ eacopy(dot11_hdr->addr1, dst_mac_addr);
+ eacopy(dot11_hdr->addr2, src_mac_addr);
+
+ /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+ * for two MAC addresses
+ */
+ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+ pdata = (unsigned char*)skb->data;
+ (void)memcpy_s(pdata, sizeof(dst_mac_addr), dst_mac_addr, sizeof(dst_mac_addr));
+ (void)memcpy_s(pdata + sizeof(dst_mac_addr), sizeof(src_mac_addr), src_mac_addr,
+ sizeof(src_mac_addr));
+ PKTSETPRIO(skb, 0);
+
+ MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+ /* Use the real net device to transmit the packet */
+#if defined(BCMDONGLEHOST)
+ ret = dhd_start_xmit(skb, mon_if->real_ndev);
+#endif /* defined(BCMDONGLEHOST) */
+
+ return ret;
+ }
+fail:
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ } else {
+ MON_PRINT("enter, if name: %s, matched if name %s\n",
+ ndev->name, mon_if->real_ndev->name);
+ }
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+ int ret = 0;
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ } else {
+ MON_PRINT("enter, if name: %s, matched if name %s\n",
+ ndev->name, mon_if->real_ndev->name);
+ }
+ return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(const char *name, struct net_device **new_ndev)
+{
+ int i;
+ int idx = -1;
+ int ret = 0;
+ struct net_device* ndev = NULL;
+ dhd_linux_monitor_t **dhd_mon;
+
+ mutex_lock(&g_monitor.lock);
+
+ MON_TRACE("enter, if name: %s\n", name);
+ if (!name || !new_ndev) {
+ MON_PRINT("invalid parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Find a vacancy
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (g_monitor.mon_if[i].mon_ndev == NULL) {
+ idx = i;
+ break;
+ }
+ if (idx == -1) {
+ MON_PRINT("exceeds maximum interfaces\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+ if (!ndev) {
+ MON_PRINT("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strlcpy(ndev->name, name, sizeof(ndev->name));
+ ndev->netdev_ops = &dhd_mon_if_ops;
+
+ ret = register_netdevice(ndev);
+ if (ret) {
+ MON_PRINT(" register_netdevice failed (%d)\n", ret);
+ goto out;
+ }
+
+ *new_ndev = ndev;
+ g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+ g_monitor.mon_if[idx].mon_ndev = ndev;
+ g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+ dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+ *dhd_mon = &g_monitor;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+ MON_PRINT("net device returned: 0x%p\n", ndev);
+ MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+ if (ret && ndev)
+ free_netdev(ndev);
+
+ mutex_unlock(&g_monitor.lock);
+ return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+ int i;
+ if (!ndev)
+ return -EINVAL;
+ mutex_lock(&g_monitor.lock);
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev ||
+ g_monitor.mon_if[i].real_ndev == ndev) {
+
+ g_monitor.mon_if[i].real_ndev = NULL;
+ unregister_netdevice(g_monitor.mon_if[i].mon_ndev);
+ free_netdev(g_monitor.mon_if[i].mon_ndev);
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+ break;
+ }
+ }
+
+ if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED)
+ MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev);
+ mutex_unlock(&g_monitor.lock);
+
+ return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+ if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+ g_monitor.dhd_pub = dhd_pub;
+ mutex_init(&g_monitor.lock);
+ g_monitor.monitor_state = MONITOR_STATE_INIT;
+ }
+ return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+ int i;
+ struct net_device *ndev;
+ if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+ mutex_lock(&g_monitor.lock);
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = g_monitor.mon_if[i].mon_ndev;
+ if (ndev) {
+ unregister_netdevice(ndev);
+ free_netdev(ndev);
+ g_monitor.mon_if[i].real_ndev = NULL;
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ }
+ }
+ g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+ mutex_unlock(&g_monitor.lock);
+ }
+ return 0;
+}
diff --git a/bcmdhd.101.10.361.x/wl_roam.c b/bcmdhd.101.10.361.x/wl_roam.c
new file mode 100755
index 0000000..c6254da
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wl_roam.c
@@ -0,0 +1,548 @@
+/*
+ * Linux roam cache
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmwifi_channels.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#include <wldev_common.h>
+#if defined(__linux__)
+#include <bcmstdlib_s.h>
+#endif /* defined(__linux__) */
+
+#ifdef ESCAN_CHANNEL_CACHE
+#define MAX_ROAM_CACHE 200
+#define MAX_SSID_BUFSIZE 36
+
+typedef struct {
+ chanspec_t chanspec;
+ int ssid_len;
+ char ssid[MAX_SSID_BUFSIZE];
+} roam_channel_cache;
+
+static int n_roam_cache = 0;
+static int roam_band = WLC_BAND_AUTO;
+static roam_channel_cache roam_cache[MAX_ROAM_CACHE];
+static uint band_bw;
+
+static void add_roamcache_channel(wl_roam_channel_list_t *channels, chanspec_t ch)
+{
+ int i;
+
+ if (channels->n >= MAX_ROAM_CHANNEL) /* buffer full */
+ return;
+
+ for (i = 0; i < channels->n; i++) {
+ if (channels->channels[i] == ch) /* already in the list */
+ return;
+ }
+
+ channels->channels[i] = ch;
+ channels->n++;
+
+ WL_DBG((" RCC: %02d 0x%04X\n",
+ ch & WL_CHANSPEC_CHAN_MASK, ch));
+}
+
+void update_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver)
+{
+ int error, i, prev_channels;
+ wl_roam_channel_list_t channel_list;
+ char iobuf[WLC_IOCTL_SMLEN];
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ wlc_ssid_t ssid;
+
+ if (!cfg->rcc_enabled) {
+ return;
+ }
+
+#ifdef WES_SUPPORT
+ if (cfg->roamscan_mode == ROAMSCAN_MODE_WES) {
+ /* no update when ROAMSCAN_MODE_WES */
+ return;
+ }
+#endif /* WES_SUPPORT */
+
+ if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
+ WL_DBG(("Not associated\n"));
+ return;
+ }
+
+ /* need to read out the current cache list
+ as the firmware may change dynamically
+ */
+ error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+ (void *)&channel_list, sizeof(channel_list), NULL);
+ if (error) {
+ WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
+ return;
+ }
+
+ error = wldev_get_ssid(dev, &ssid);
+ if (error) {
+ WL_ERR(("Failed to get SSID, err=%d\n", error));
+ return;
+ }
+
+ prev_channels = channel_list.n;
+ for (i = 0; i < n_roam_cache; i++) {
+ chanspec_t ch = roam_cache[i].chanspec;
+ bool band_match = ((roam_band == WLC_BAND_AUTO) ||
+#ifdef WL_6G_BAND
+ ((roam_band == WLC_BAND_6G) && (CHSPEC_IS6G(ch))) ||
+#endif /* WL_6G_BAND */
+ ((roam_band == WLC_BAND_2G) && (CHSPEC_IS2G(ch))) ||
+ ((roam_band == WLC_BAND_5G) && (CHSPEC_IS5G(ch))));
+
+ if ((roam_cache[i].ssid_len == ssid.SSID_len) &&
+ band_match && (memcmp(roam_cache[i].ssid, ssid.SSID, ssid.SSID_len) == 0)) {
+ /* match found, add it */
+ ch = wf_chspec_ctlchan(ch) | CHSPEC_BAND(ch) | band_bw;
+ add_roamcache_channel(&channel_list, ch);
+ }
+ }
+ if (prev_channels != channel_list.n) {
+ /* channel list updated */
+ error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
+ sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
+ if (error) {
+ WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
+ }
+ }
+
+ WL_DBG(("%d AP, %d cache item(s), err=%d\n", n_roam_cache, channel_list.n, error));
+}
+
+void set_roam_band(int band)
+{
+ roam_band = band;
+}
+
+void reset_roam_cache(struct bcm_cfg80211 *cfg)
+{
+ if (!cfg->rcc_enabled) {
+ return;
+ }
+
+#ifdef WES_SUPPORT
+ if (cfg->roamscan_mode == ROAMSCAN_MODE_WES)
+ return;
+#endif /* WES_SUPPORT */
+
+ n_roam_cache = 0;
+}
+
+static void
+add_roam_cache_list(uint8 *SSID, uint32 SSID_len, chanspec_t chanspec)
+{
+ int i;
+ uint8 channel;
+ char chanbuf[CHANSPEC_STR_LEN];
+
+ if (n_roam_cache >= MAX_ROAM_CACHE) {
+ return;
+ }
+
+ for (i = 0; i < n_roam_cache; i++) {
+ if ((roam_cache[i].ssid_len == SSID_len) &&
+ (roam_cache[i].chanspec == chanspec) &&
+ (memcmp(roam_cache[i].ssid, SSID, SSID_len) == 0)) {
+ /* identical one found, just return */
+ return;
+ }
+ }
+
+ roam_cache[n_roam_cache].ssid_len = SSID_len;
+ channel = wf_chspec_ctlchan(chanspec);
+ WL_DBG(("CHSPEC = %s, CTL %d SSID %s\n",
+ wf_chspec_ntoa_ex(chanspec, chanbuf), channel, SSID));
+ roam_cache[n_roam_cache].chanspec = CHSPEC_BAND(chanspec) | band_bw | channel;
+ (void)memcpy_s(roam_cache[n_roam_cache].ssid, SSID_len, SSID, SSID_len);
+
+ n_roam_cache++;
+}
+
+void
+add_roam_cache(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi)
+{
+ if (!cfg->rcc_enabled) {
+ return;
+ }
+
+#ifdef WES_SUPPORT
+ if (cfg->roamscan_mode == ROAMSCAN_MODE_WES) {
+ return;
+ }
+#endif /* WES_SUPPORT */
+
+ add_roam_cache_list(bi->SSID, bi->SSID_len, bi->chanspec);
+}
+
+static bool is_duplicated_channel(const chanspec_t *channels, int n_channels, chanspec_t new)
+{
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ if (channels[i] == new)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+int get_roam_channel_list(struct bcm_cfg80211 *cfg, chanspec_t target_chan,
+ chanspec_t *channels, int n_channels, const wlc_ssid_t *ssid, int ioctl_ver)
+{
+ int i, n = 0;
+ char chanbuf[CHANSPEC_STR_LEN];
+
+ /* first index is filled with the given target channel */
+ if ((target_chan != INVCHANSPEC) && (target_chan != 0)) {
+ channels[0] = target_chan;
+ n++;
+ }
+
+ WL_DBG((" %s: 0x%04X\n", __FUNCTION__, channels[0]));
+
+#ifdef WES_SUPPORT
+ if (cfg->roamscan_mode == ROAMSCAN_MODE_WES) {
+ for (i = 0; i < n_roam_cache; i++) {
+ chanspec_t ch = roam_cache[i].chanspec;
+ bool band_match = ((roam_band == WLC_BAND_AUTO) ||
+#ifdef WL_6G_BAND
+ ((roam_band == WLC_BAND_6G) && (CHSPEC_IS6G(ch))) ||
+#endif /* WL_6G_BAND */
+ ((roam_band == WLC_BAND_2G) && (CHSPEC_IS2G(ch))) ||
+ ((roam_band == WLC_BAND_5G) && (CHSPEC_IS5G(ch))));
+
+ ch = wf_chspec_ctlchan(ch) | CHSPEC_BAND(ch) | band_bw;
+
+ if (band_match && !is_duplicated_channel(channels, n, ch)) {
+ WL_DBG(("%s: Chanspec = %s\n", __FUNCTION__,
+ wf_chspec_ntoa_ex(ch, chanbuf)));
+ channels[n++] = ch;
+ if (n >= n_channels) {
+ WL_ERR(("Too many roam scan channels\n"));
+ return n;
+ }
+ }
+ }
+
+ return n;
+ }
+#endif /* WES_SUPPORT */
+
+ for (i = 0; i < n_roam_cache; i++) {
+ chanspec_t ch = roam_cache[i].chanspec;
+ bool band_match = ((roam_band == WLC_BAND_AUTO) ||
+#ifdef WL_6G_BAND
+ ((roam_band == WLC_BAND_6G) && (CHSPEC_IS6G(ch))) ||
+#endif /* WL_6G_BAND */
+ ((roam_band == WLC_BAND_2G) && (CHSPEC_IS2G(ch))) ||
+ ((roam_band == WLC_BAND_5G) && (CHSPEC_IS5G(ch))));
+
+ ch = wf_chspec_ctlchan(ch) | CHSPEC_BAND(ch) | band_bw;
+ if ((roam_cache[i].ssid_len == ssid->SSID_len) &&
+ band_match && !is_duplicated_channel(channels, n, ch) &&
+ (memcmp(roam_cache[i].ssid, ssid->SSID, ssid->SSID_len) == 0)) {
+ /* match found, add it */
+ WL_DBG(("%s: Chanspec = %s\n", __FUNCTION__,
+ wf_chspec_ntoa_ex(ch, chanbuf)));
+ channels[n++] = ch;
+ if (n >= n_channels) {
+ WL_ERR(("Too many roam scan channels\n"));
+ return n;
+ }
+ }
+ }
+
+ return n;
+}
+
+#ifdef WES_SUPPORT
+int get_roamscan_mode(struct net_device *dev, int *mode)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ *mode = cfg->roamscan_mode;
+
+ return 0;
+}
+
+int set_roamscan_mode(struct net_device *dev, int mode)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int error = 0;
+ cfg->roamscan_mode = mode;
+ n_roam_cache = 0;
+
+ error = wldev_iovar_setint(dev, "roamscan_mode", mode);
+ if (error) {
+ WL_ERR(("Failed to set roamscan mode to %d, error = %d\n", mode, error));
+ }
+
+ return error;
+}
+
+int
+get_roamscan_chanspec_list(struct net_device *dev, chanspec_t *chanspecs)
+{
+ int i = 0;
+ int error = BCME_OK;
+ wl_roam_channel_list_t channel_list;
+
+ /* Get Current RCC List */
+ error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+ (void *)&channel_list, sizeof(channel_list), NULL);
+ if (error) {
+ WL_ERR(("Failed to get roamscan channels, err = %d\n", error));
+ return error;
+ }
+ if (channel_list.n > MAX_ROAM_CHANNEL) {
+ WL_ERR(("Invalid roamscan channels count(%d)\n", channel_list.n));
+ return BCME_ERROR;
+ }
+
+ for (i = 0; i < channel_list.n; i++) {
+ chanspecs[i] = channel_list.channels[i];
+ WL_DBG(("%02d: chanspec %04x\n", i, chanspecs[i]));
+ }
+
+ return i;
+}
+
+int
+set_roamscan_chanspec_list(struct net_device *dev, uint nchan, chanspec_t *chanspecs)
+{
+ int i;
+ int error;
+ wl_roam_channel_list_t channel_list;
+ char iobuf[WLC_IOCTL_SMLEN];
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ cfg->roamscan_mode = ROAMSCAN_MODE_WES;
+
+ if (nchan > MAX_ROAM_CHANNEL) {
+ nchan = MAX_ROAM_CHANNEL;
+ }
+
+ for (i = 0; i < nchan; i++) {
+ roam_cache[i].chanspec = chanspecs[i];
+ channel_list.channels[i] = chanspecs[i];
+
+ WL_DBG(("%02d/%d: chan: 0x%04x\n", i, nchan, chanspecs[i]));
+ }
+
+ n_roam_cache = nchan;
+ channel_list.n = nchan;
+
+ /* need to set ROAMSCAN_MODE_NORMAL to update roamscan_channels,
+ * otherwise, it won't be updated
+ */
+ error = wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_NORMAL);
+ if (error) {
+ WL_ERR(("Failed to set roamscan mode to %d, error = %d\n",
+ ROAMSCAN_MODE_NORMAL, error));
+ return error;
+ }
+ error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
+ sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
+ if (error) {
+ WL_ERR(("Failed to set roamscan channels, error = %d\n", error));
+ return error;
+ }
+ error = wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_WES);
+ if (error) {
+ WL_ERR(("Failed to set roamscan mode to %d, error = %d\n",
+ ROAMSCAN_MODE_WES, error));
+ }
+
+ return error;
+}
+
+int
+add_roamscan_chanspec_list(struct net_device *dev, uint nchan, chanspec_t *chanspecs)
+{
+ int i, error = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wlc_ssid_t ssid;
+
+ if (!cfg->rcc_enabled) {
+ return BCME_ERROR;
+ }
+
+ if (cfg->roamscan_mode == ROAMSCAN_MODE_WES) {
+ WL_ERR(("Failed to add roamscan channels, WES mode %d\n",
+ cfg->roamscan_mode));
+ return BCME_ERROR;
+ }
+
+ if (nchan > MAX_ROAM_CHANNEL) {
+ WL_ERR(("Failed Over MAX channel list(%d)\n", nchan));
+ return BCME_BADARG;
+ }
+
+ error = wldev_get_ssid(dev, &ssid);
+ if (error) {
+ WL_ERR(("Failed to get SSID, err=%d\n", error));
+ return error;
+ }
+
+ WL_DBG(("Add Roam scan channel count %d\n", nchan));
+
+ for (i = 0; i < nchan; i++) {
+ if (chanspecs[i] == 0) {
+ continue;
+ }
+ add_roam_cache_list(ssid.SSID, ssid.SSID_len, chanspecs[i]);
+ WL_DBG(("channel[%d] - 0x%04x SSID %s\n", i, chanspecs[i], ssid.SSID));
+ }
+
+ update_roam_cache(cfg, ioctl_version);
+
+ return error;
+}
+#endif /* WES_SUPPORT */
+
+#ifdef ROAM_CHANNEL_CACHE
+int init_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver)
+{
+ int err;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ s32 mode;
+
+ /* Check support in firmware */
+ err = wldev_iovar_getint(dev, "roamscan_mode", &mode);
+ if (err && (err == BCME_UNSUPPORTED)) {
+ /* If firmware doesn't support, return error. Else proceed */
+ WL_ERR(("roamscan_mode iovar failed. %d\n", err));
+ return err;
+ }
+
+#ifdef D11AC_IOTYPES
+ band_bw = WL_CHANSPEC_BW_20;
+#else
+ band_bw = WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+#endif /* D11AC_IOTYPES */
+
+ n_roam_cache = 0;
+ roam_band = WLC_BAND_AUTO;
+ cfg->roamscan_mode = ROAMSCAN_MODE_NORMAL;
+
+ return 0;
+}
+
+void print_roam_cache(struct bcm_cfg80211 *cfg)
+{
+ int i;
+
+ if (!cfg->rcc_enabled) {
+ return;
+ }
+
+ WL_DBG((" %d cache\n", n_roam_cache));
+
+ for (i = 0; i < n_roam_cache; i++) {
+ roam_cache[i].ssid[roam_cache[i].ssid_len] = 0;
+ WL_DBG(("0x%02X %02d %s\n", roam_cache[i].chanspec,
+ roam_cache[i].ssid_len, roam_cache[i].ssid));
+ }
+}
+
+void wl_update_roamscan_cache_by_band(struct net_device *dev, int band)
+{
+ int i, error, roamscan_mode;
+ wl_roam_channel_list_t chanlist_before, chanlist_after;
+ char iobuf[WLC_IOCTL_SMLEN];
+
+ roam_band = band;
+
+ error = wldev_iovar_getint(dev, "roamscan_mode", &roamscan_mode);
+ if (error) {
+ WL_ERR(("Failed to get roamscan mode, error = %d\n", error));
+ return;
+ }
+
+ /* in case of WES mode, update channel list by band based on the cache in DHD */
+ if (roamscan_mode) {
+ int n = 0;
+ chanlist_before.n = n_roam_cache;
+
+ for (n = 0; n < n_roam_cache; n++) {
+ chanspec_t ch = roam_cache[n].chanspec;
+ chanlist_before.channels[n] = wf_chspec_ctlchan(ch) |
+ CHSPEC_BAND(ch) | band_bw;
+ }
+ } else {
+ if (band == WLC_BAND_AUTO) {
+ return;
+ }
+ error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+ (void *)&chanlist_before, sizeof(wl_roam_channel_list_t), NULL);
+ if (error) {
+ WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
+ return;
+ }
+ }
+ chanlist_after.n = 0;
+ /* filtering by the given band */
+ for (i = 0; i < chanlist_before.n; i++) {
+ chanspec_t chspec = chanlist_before.channels[i];
+ bool band_match = ((band == WLC_BAND_AUTO) ||
+#ifdef WL_6G_BAND
+ ((band == WLC_BAND_6G) && (CHSPEC_IS6G(chspec))) ||
+#endif /* WL_6G_BAND */
+ ((band == WLC_BAND_2G) && (CHSPEC_IS2G(chspec))) ||
+ ((band == WLC_BAND_5G) && (CHSPEC_IS5G(chspec))));
+ if (band_match) {
+ chanlist_after.channels[chanlist_after.n++] = chspec;
+ }
+ }
+
+ if (roamscan_mode) {
+ /* need to set ROAMSCAN_MODE_NORMAL to update roamscan_channels,
+ * otherwise, it won't be updated
+ */
+ wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_NORMAL);
+
+ error = wldev_iovar_setbuf(dev, "roamscan_channels", &chanlist_after,
+ sizeof(wl_roam_channel_list_t), iobuf, sizeof(iobuf), NULL);
+ if (error) {
+ WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
+ }
+ wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_WES);
+ } else {
+ if (chanlist_before.n == chanlist_after.n) {
+ return;
+ }
+ error = wldev_iovar_setbuf(dev, "roamscan_channels", &chanlist_after,
+ sizeof(wl_roam_channel_list_t), iobuf, sizeof(iobuf), NULL);
+ if (error) {
+ WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
+ }
+ }
+}
+#endif /* ROAM_CHANNEL_CACHE */
+#endif /* ESCAN_CHANNEL_CACHE */
diff --git a/bcmdhd.101.10.361.x/wlc_types.h b/bcmdhd.101.10.361.x/wlc_types.h
new file mode 100755
index 0000000..fd98363
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wlc_types.h
@@ -0,0 +1,714 @@
+/*
+ * Forward declarations for commonly used wl driver structs
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _wlc_types_h_
+#define _wlc_types_h_
+#include <wlioctl.h>
+
+/* Version of WLC interface to be returned as a part of wl_wlc_version structure.
+ */
+
+#define WLC_API_VERSION_MAJOR 14
+#define WLC_API_VERSION_MINOR 0
+
+/* forward declarations */
+
+typedef struct wlc_info wlc_info_t;
+typedef struct join_pref join_pref_t;
+typedef struct wlcband wlcband_t;
+typedef struct wlc_cmn_info wlc_cmn_info_t;
+typedef struct wlc_assoc_info wlc_assoc_info_t;
+typedef struct wlc_pm_info wlc_pm_info_t;
+
+typedef struct wlc_bsscfg wlc_bsscfg_t;
+typedef struct wlc_mbss_info wlc_mbss_info_t;
+typedef struct wlc_spt wlc_spt_t;
+typedef struct scb scb_t;
+typedef struct scb_iter scb_iter_t;
+typedef struct vndr_ie_listel vndr_ie_listel_t;
+typedef struct wlc_if wlc_if_t;
+typedef struct wl_if wl_if_t;
+typedef struct led_info led_info_t;
+typedef struct bmac_led bmac_led_t;
+typedef struct bmac_led_info bmac_led_info_t;
+typedef struct seq_cmds_info wlc_seq_cmds_info_t;
+typedef struct ota_test_info ota_test_info_t;
+typedef struct apps_wlc_psinfo apps_wlc_psinfo_t;
+typedef struct scb_module scb_module_t;
+typedef struct hscb_module hscb_module_t;
+typedef struct wlc_scb_handle wlc_scbh_t;
+typedef struct ba_info ba_info_t;
+typedef struct wlc_frminfo wlc_frminfo_t;
+typedef struct amsdu_info amsdu_info_t;
+typedef struct txq_info txq_info_t;
+typedef struct wlc_mux_info wlc_mux_info_t;
+typedef struct wlc_scbq_info wlc_scbq_info_t;
+typedef struct txq txq_t;
+typedef struct flow_ctx_info wlc_flow_ctx_info_t;
+typedef struct cram_info cram_info_t;
+typedef struct wlc_txq_info wlc_txq_info_t;
+typedef struct wlc_hrt_info wlc_hrt_info_t;
+typedef struct wlc_hrt_to wlc_hrt_to_t;
+typedef struct wlc_cac wlc_cac_t;
+typedef struct ampdu_tx_info ampdu_tx_info_t;
+typedef struct ampdu_rx_info ampdu_rx_info_t;
+typedef struct scb_ampdu_tx scb_ampdu_tx_t;
+typedef struct scb_ampdu_tid_ini scb_ampdu_tid_ini_t;
+typedef struct wlc_ratesel_info wlc_ratesel_info_t;
+typedef struct ratesel_info ratesel_info_t;
+#ifdef WLFIPS
+typedef struct wl_fips_info wl_fips_info_t;
+#endif
+typedef struct wlc_ap_info wlc_ap_info_t;
+typedef struct cs_info cs_info_t;
+typedef struct wlc_scan_info wlc_scan_info_t;
+typedef struct wlc_scan_cmn_info wlc_scan_cmn_t;
+typedef struct wlc_slotted_bss_info wlc_slotted_bss_info_t;
+typedef struct wl_bcn_report_cfg wl_bcn_report_cfg_t;
+#ifdef WLAWDL
+typedef struct wlc_awdl_info wlc_awdl_info_t;
+#endif
+#ifdef BCMPCIE_HP2P
+typedef struct wlc_hp2p_info wlc_hp2p_info_t;
+typedef struct wlc_hp2p_cmn wlc_hp2p_cmn_t;
+#endif
+#ifdef WLDFSP
+typedef struct wlc_dfsp_info wlc_dfsp_info_t;
+#endif /* WLDFSP */
+typedef struct tdls_info tdls_info_t;
+typedef struct dls_info dls_info_t;
+typedef struct l2_filter_info l2_filter_info_t;
+typedef struct wlc_auth_info wlc_auth_info_t;
+typedef struct wlc_sup_info wlc_sup_info_t;
+typedef struct wlc_fbt_info wlc_fbt_info_t;
+typedef struct wlc_assoc_mgr_info wlc_assoc_mgr_info_t;
+typedef struct wlc_psta_info wlc_psta_info_t;
+typedef struct wlc_mcnx_info wlc_mcnx_info_t;
+typedef struct wlc_p2p_info wlc_p2p_info_t;
+typedef struct wlc_cxnoa_info wlc_cxnoa_info_t;
+typedef struct mchan_info mchan_info_t;
+typedef struct wlc_mchan_context wlc_mchan_context_t;
+typedef struct bta_info bta_info_t;
+typedef struct wowlpf_info wowlpf_info_t;
+typedef struct wlc_plt_info wlc_plt_pub_t;
+typedef struct antsel_info antsel_info_t;
+typedef struct bmac_pmq bmac_pmq_t;
+typedef struct wmf_info wmf_info_t;
+typedef struct wlc_rrm_info wlc_rrm_info_t;
+typedef struct rm_info rm_info_t;
+
+typedef struct wlc_dpc_info wlc_dpc_info_t;
+
+typedef struct wlc_11h_info wlc_11h_info_t;
+typedef struct wlc_tpc_info wlc_tpc_info_t;
+typedef struct wlc_csa_info wlc_csa_info_t;
+typedef struct wlc_quiet_info wlc_quiet_info_t;
+typedef struct cca_info cca_info_t;
+typedef struct itfr_info itfr_info_t;
+typedef struct wlc_wbtext_info wlc_wbtext_info_t;
+typedef struct wlc_wnm_info wlc_wnm_info_t;
+typedef struct wlc_wnm_pdt_info wlc_wnm_pdt_info_t;
+typedef struct wlc_wnm_roamscan_complete_data wlc_wnm_roamscan_complete_data_t;
+typedef struct wlc_wnm_roamscan_complete_res wlc_wnm_roamscan_complete_res_t;
+typedef struct wlc_11d_info wlc_11d_info_t;
+typedef struct wlc_cntry_info wlc_cntry_info_t;
+
+typedef struct wlc_dfs_info wlc_dfs_info_t;
+
+typedef struct bsscfg_module bsscfg_module_t;
+
+typedef struct wlc_prot_info wlc_prot_info_t;
+typedef struct wlc_prot_g_info wlc_prot_g_info_t;
+typedef struct wlc_prot_n_info wlc_prot_n_info_t;
+typedef struct wlc_prot_obss_info wlc_prot_obss_info_t;
+typedef struct wlc_obss_dynbw wlc_obss_dynbw_t;
+typedef struct wlc_11u_info wlc_11u_info_t;
+typedef struct wlc_probresp_info wlc_probresp_info_t;
+typedef struct wlc_wapi_info wlc_wapi_info_t;
+
+typedef struct wlc_tbtt_info wlc_tbtt_info_t;
+typedef struct wlc_nic_info wlc_nic_info_t;
+
+typedef struct wlc_bssload_info wlc_bssload_info_t;
+
+typedef struct wlc_pcb_info wlc_pcb_info_t;
+typedef struct wlc_txc_info wlc_txc_info_t;
+
+typedef struct wlc_trf_mgmt_ctxt wlc_trf_mgmt_ctxt_t;
+typedef struct wlc_trf_mgmt_info wlc_trf_mgmt_info_t;
+
+typedef struct wlc_net_detect_ctxt wlc_net_detect_ctxt_t;
+
+typedef struct wlc_powersel_info wlc_powersel_info_t;
+typedef struct powersel_info powersel_info_t;
+
+typedef struct wlc_lpc_info wlc_lpc_info_t;
+typedef struct lpc_info lpc_info_t;
+typedef struct rate_lcb_info rate_lcb_info_t;
+typedef struct wlc_txbf_info wlc_txbf_info_t;
+typedef struct wlc_mutx_info wlc_mutx_info_t;
+typedef struct wlc_murx_info wlc_murx_info_t;
+
+typedef struct wlc_olpc_eng_info_t wlc_olpc_eng_info_t;
+/* used by olpc to register for callbacks from stf */
+typedef void (*wlc_stf_txchain_evt_notify)(wlc_info_t *wlc);
+
+typedef struct wlc_rfc wlc_rfc_t;
+typedef struct wlc_pktc_info wlc_pktc_info_t;
+
+typedef struct wlc_mfp_info wlc_mfp_info_t;
+
+typedef struct wlc_mdns_info wlc_mdns_info_t;
+
+typedef struct wlc_macfltr_info wlc_macfltr_info_t;
+
+typedef struct wlc_bs_data_info wlc_bs_data_info_t;
+
+typedef struct wlc_keymgmt wlc_keymgmt_t;
+typedef struct wlc_key wlc_key_t;
+typedef struct wlc_key_info wlc_key_info_t;
+
+typedef struct wlc_hw wlc_hw_t;
+typedef struct wlc_hw_info wlc_hw_info_t;
+typedef struct wlc_hwband wlc_hwband_t;
+
+typedef struct wlc_rx_stall_info wlc_rx_stall_info_t;
+typedef struct wlc_txs_hist wlc_txs_hist_t;
+
+typedef struct wlc_tx_stall_info wlc_tx_stall_info_t;
+typedef struct wlc_tx_stall_counters wlc_tx_stall_counters_t;
+
+typedef struct wlc_rmc_info wlc_rmc_info_t;
+
+typedef struct wlc_iem_info wlc_iem_info_t;
+
+typedef struct wlc_ier_info wlc_ier_info_t;
+typedef struct wlc_ier_reg wlc_ier_reg_t;
+typedef struct wlc_ier_reg_data wlc_ier_reg_data_t;
+typedef struct wlc_ier_reg_cmn wlc_ier_reg_cmn_t;
+
+typedef struct wlc_filter_ie_info wlc_filter_ie_info_t;
+
+typedef struct wlc_ht_info wlc_ht_info_t;
+typedef struct wlc_obss_info wlc_obss_info_t;
+typedef struct wlc_vht_info wlc_vht_info_t;
+typedef struct wlc_akm_info wlc_akm_info_t;
+
+typedef struct wlc_bss_info wlc_bss_info_t;
+
+typedef struct wlc_hs20_info wlc_hs20_info_t;
+typedef struct wlc_hs20_cmn wlc_hs20_cmn_t;
+typedef struct wlc_pmkid_info wlc_pmkid_info_t;
+typedef struct wlc_btc_info wlc_btc_info_t;
+typedef struct wlc_btc_hw_info wlc_btc_hw_info_t;
+
+typedef struct wlc_txh_info wlc_txh_info_t;
+
+typedef struct wlc_staprio_info wlc_staprio_info_t;
+typedef struct wlc_stamon_info wlc_stamon_info_t;
+typedef struct wlc_monitor_info wlc_monitor_info_t;
+typedef struct monitor_info monitor_info_t;
+
+typedef struct wlc_debug_crash_info wlc_debug_crash_info_t;
+
+typedef struct wlc_nan_info wlc_nan_info_t;
+typedef struct wlc_edv_info wlc_edv_info_t;
+typedef struct wlc_tsmap_info wlc_tsmap_info_t;
+
+typedef struct wlc_wds_info wlc_wds_info_t;
+typedef struct okc_info okc_info_t;
+typedef struct wlc_aibss_info wlc_aibss_info_t;
+typedef struct wlc_ipfo_info wlc_ipfo_info_t;
+typedef struct wlc_stats_info wlc_stats_info_t;
+
+typedef struct wlc_pps_info wlc_pps_info_t;
+
+typedef struct duration_info duration_info_t;
+
+typedef struct wlc_pdsvc_info wlc_pdsvc_info_t;
+
+typedef struct wlc_swdiv_info wlc_swdiv_info_t;
+typedef struct lqcm_stats lqcm_stats_t;
+
+/* For LTE Coex */
+typedef struct wlc_ltecx_info wlc_ltecx_info_t;
+
+typedef struct mws_scanreq_bms mws_scanreq_bms_t;
+
+typedef struct wlc_probresp_mac_filter_info wlc_probresp_mac_filter_info_t;
+
+typedef struct wlc_ltr_info wlc_ltr_info_t;
+
+typedef struct bwte_info bwte_info_t;
+
+typedef struct tbow_info tbow_info_t;
+
+typedef struct wlc_modesw_info wlc_modesw_info_t;
+
+typedef struct wlc_pm_mute_tx_info wlc_pm_mute_tx_t;
+
+typedef struct wlc_nr5gcx_info wlc_nr5gcx_info_t;
+
+typedef struct wlc_bcntrim_info wlc_bcntrim_info_t;
+typedef wl_bcntrim_cfg_v1_t wl_bcntrim_cfg_t;
+typedef wl_bcntrim_status_query_v1_t wl_bcntrim_status_query_t;
+typedef wl_bcntrim_status_v1_t wl_bcntrim_status_t;
+#define WL_BCNTRIM_STATUS_VERSION WL_BCNTRIM_STATUS_VERSION_1
+#define WL_BCNTRIM_CFG_VERSION WL_BCNTRIM_CFG_VERSION_1
+
+typedef struct wlc_ops_info wlc_ops_info_t;
+typedef wl_ops_cfg_v1_t wl_ops_cfg_t;
+typedef wl_ops_status_v1_t wl_ops_status_t;
+#define WL_OPS_STATUS_VERSION WL_OPS_STATUS_VERSION_1
+#define WL_OPS_CFG_VERSION WL_OPS_CFG_VERSION_1
+
+typedef struct wlc_psbw_info wlc_psbw_info_t;
+typedef wl_psbw_cfg_v1_t wl_psbw_cfg_t;
+typedef wl_psbw_status_v1_t wl_psbw_status_t;
+#define WL_PSBW_STATUS_VERSION WL_PSBW_STATUS_VERSION_1
+#define WL_PSBW_CFG_VERSION WL_PSBW_CFG_VERSION_1
+
+typedef struct wlc_smfs_info wlc_smfs_info_t;
+typedef struct wlc_misc_info wlc_misc_info_t;
+
+typedef struct wlc_eventq wlc_eventq_t;
+typedef struct wlc_event wlc_event_t;
+
+typedef struct wlc_bsscfg_psq_info wlc_bsscfg_psq_info_t;
+typedef struct wlc_bsscfg_viel_info wlc_bsscfg_viel_info_t;
+
+typedef struct wlc_txmod_info wlc_txmod_info_t;
+typedef struct tx_path_node tx_path_node_t;
+
+typedef struct wlc_linkstats_info wlc_linkstats_info_t;
+
+typedef struct wl_shub_info wl_shub_info_t;
+
+typedef struct wlc_lq_info wlc_lq_info_t;
+typedef struct chanim_info chanim_info_t;
+
+typedef struct wlc_mesh_info wlc_mesh_info_t;
+typedef struct wlc_wlfc_info wlc_wlfc_info_t;
+
+typedef struct wlc_frag_info wlc_frag_info_t;
+typedef struct wlc_bss_list wlc_bss_list_t;
+
+typedef struct wlc_msch_info wlc_msch_info_t;
+typedef struct wlc_msch_req_handle wlc_msch_req_handle_t;
+
+typedef struct wlc_randmac_info wlc_randmac_info_t;
+
+typedef struct wlc_chanctxt wlc_chanctxt_t;
+typedef struct wlc_chanctxt_info wlc_chanctxt_info_t;
+typedef struct wlc_sta_info wlc_sta_info_t;
+
+typedef struct health_check_info health_check_info_t;
+typedef struct wlc_act_frame_info wlc_act_frame_info_t;
+typedef struct nan_sched_req_handle nan_sched_req_handle_t;
+
+typedef struct wlc_qos_info wlc_qos_info_t;
+
+typedef struct wlc_assoc wlc_assoc_t;
+typedef struct wlc_roam wlc_roam_t;
+typedef struct wlc_pm_st wlc_pm_st_t;
+typedef struct wlc_pm_timers wlc_pm_timers_t;
+typedef struct wlc_wme wlc_wme_t;
+
+typedef struct wlc_link_qual wlc_link_qual_t;
+
+typedef struct wlc_rsdb_info wlc_rsdb_info_t;
+
+typedef struct wlc_asdb wlc_asdb_t;
+
+typedef struct rsdb_common_info rsdb_cmn_info_t;
+typedef struct rsdb_chan_sw_info rsdb_chan_sw_info_t;
+
+typedef struct wlc_macdbg_info wlc_macdbg_info_t;
+typedef struct wlc_rspec_info wlc_rspec_info_t;
+typedef struct wlc_ndis_info wlc_ndis_info_t;
+
+typedef struct wlc_join_pref wlc_join_pref_t;
+
+typedef struct wlc_scan_utils wlc_scan_utils_t;
+#ifdef ACKSUPR_MAC_FILTER
+typedef struct wlc_addrmatch_info wlc_addrmatch_info_t;
+#endif /* ACKSUPR_MAC_FILTER */
+
+typedef struct cca_ucode_counts cca_ucode_counts_t;
+typedef struct cca_chan_qual cca_chan_qual_t;
+
+typedef struct wlc_perf_utils wlc_perf_utils_t;
+typedef struct wlc_test_info wlc_test_info_t;
+
+typedef struct chanswitch_times chanswitch_times_t;
+typedef struct wlc_dump_info wlc_dump_info_t;
+
+typedef struct wlc_stf wlc_stf_t;
+typedef struct wlc_rsdb_policymgr_info wlc_rsdb_policymgr_info_t;
+
+typedef struct wlc_he_info wlc_he_info_t;
+typedef struct wlc_twt_info wlc_twt_info_t;
+
+/* TWT_STATS */
+typedef wl_twt_peer_stats_v1_t wl_twt_peer_stats_t;
+typedef wl_twt_stats_v1_t wl_twt_stats_t;
+typedef wl_twt_stats_cmd_v1_t wl_twt_stats_cmd_t;
+
+typedef struct wlc_heb_info wlc_heb_info_t;
+
+typedef struct resv_info resv_info_t;
+
+typedef struct wl_scan_summary wl_scan_summary_t;
+
+typedef struct wlc_stf_arb wlc_stf_arb_t;
+
+typedef struct wlc_stf_nss_request_st wlc_stf_nss_request_t;
+
+typedef struct wlc_stf_nss_request_q_st wlc_stf_nss_request_q_t;
+
+typedef struct wl_mimo_siso_stats wl_mimo_siso_stats_t;
+
+typedef struct wlc_mimo_ps_cfg wlc_mimo_ps_cfg_t;
+
+typedef struct wlc_hw_config wlc_hw_config_t;
+
+typedef struct wlc_stf_arb_mps_info wlc_stf_arb_mps_info_t;
+
+typedef struct wlc_tsync wlc_tsync_t;
+
+typedef struct wlc_fragdur_info wlc_fragdur_info_t;
+
+typedef struct wlc_mbo_info wlc_mbo_info_t;
+
+typedef struct wlc_rx_hc wlc_rx_hc_t;
+
+typedef struct wlc_oce_info wlc_oce_info_t;
+
+typedef struct wlc_fils_info wlc_fils_info_t;
+
+typedef struct wlc_sfd_cache wlc_sfd_cache_t;
+
+typedef struct wlc_ifstats_info wlc_ifstats_info_t;
+
+typedef struct wlc_mbo_oce_info wlc_mbo_oce_info_t;
+
+typedef struct wlc_esp_info wlc_esp_info_t;
+
+/* sta_info_v6 uses rateset_v2 which supported only when RATESET_VERSION_ENABLED */
+#ifdef RATESET_VERSION_ENABLED
+typedef sta_info_v6_t sta_info_t;
+#else
+typedef sta_info_v5_t sta_info_t;
+#endif /* RATESET_VERSION_ENABLED */
+
+typedef struct wl_roam_prof_band_v3 wl_roam_prof_band_t;
+typedef struct wl_roam_prof_v3 wl_roam_prof_t;
+
+typedef struct wlc_swdiv_stats_v2 wlc_swdiv_stats_t;
+typedef struct wl_dfs_ap_move_status_v2 wl_dfs_ap_move_status_t;
+
+typedef struct wl_utrace_capture_args_v2 wl_utrace_capture_args_t;
+typedef struct wl_pmalert_ucode_dbg_v2 wl_pmalert_ucode_dbg_t;
+
+typedef struct wl_proxd_collect_data_v3 wl_proxd_collect_data_t;
+typedef struct wl_proxd_collect_event_data_v3 wl_proxd_collect_event_data_t;
+
+typedef struct wlc_leakyapstats_info_v1 wlc_leakyapstats_info_t;
+
+typedef struct wlc_chctx_info wlc_chctx_info_t;
+
+typedef struct wlc_rpsnoa_info wlc_rpsnoa_info_t;
+
+/* Inteface version mapping for versioned pfn structures */
+#undef PFN_SCANRESULT_VERSION
+#define PFN_SCANRESULT_VERSION PFN_SCANRESULT_VERSION_V2
+#define PFN_SCANRESULTS_VERSION PFN_SCANRESULTS_VERSION_V2
+#define PFN_LBEST_SCAN_RESULT_VERSION PFN_LBEST_SCAN_RESULT_VERSION_V2
+typedef wl_pfn_subnet_info_v2_t wl_pfn_subnet_info_t;
+typedef wl_pfn_net_info_v2_t wl_pfn_net_info_t;
+typedef wl_pfn_lnet_info_v2_t wl_pfn_lnet_info_t;
+typedef wl_pfn_lscanresults_v2_t wl_pfn_lscanresults_t;
+typedef wl_pfn_scanresults_v2_t wl_pfn_scanresults_t;
+typedef wl_pfn_scanresult_v2_1_t wl_pfn_scanresult_t;
+
+/* XXX: 13_10 uses interface_create_v3 */
+#define WL_INTERFACE_CREATE_VER WL_INTERFACE_CREATE_VER_3
+typedef wl_interface_create_v3_t wl_interface_create_t;
+
+#define WL_INTERFACE_INFO_VER WL_INTERFACE_INFO_VER_2
+typedef wl_interface_info_v2_t wl_interface_info_t;
+
+#define WL_PKTENG_RU_FILL_CURRENT_VER WL_PKTENG_RU_FILL_VER_2
+
+#ifdef WLAWDL
+typedef awdl_stats_core_v3_t awdl_stats_core_t;
+typedef awdl_stats_cmn_v3_t awdl_stats_cmn_t;
+typedef awdl_stats_v3_t awdl_stats_t;
+#endif
+typedef struct wlc_hwa_info wlc_hwa_info_t;
+#define UCM_PROFILE_VERSION UCM_PROFILE_VERSION_1
+typedef wlc_btcx_profile_v1_t wlc_btcx_profile_t;
+
+#ifndef WL_BTCXSTATS
+#define BTCX_STATS_VER BTCX_STATS_VER_3
+typedef wlc_btc_stats_v3_t wlc_btc_stats_t;
+#else
+#define BTCX_STATS_VER BTCX_STATS_VER_4
+typedef wlc_btc_stats_v4_t wlc_btc_stats_t;
+#endif
+
+#define WL_BTC_WIFI_PROT_VER WL_BTC_WIFI_PROT_VER_1
+typedef wl_btc_wifi_prot_v1_t wl_btc_wifi_prot_t;
+
+/* TXQ MUX Broadcast/Multicast module and cubby structure definition */
+typedef struct wlc_bcmcq_info wlc_bcmcq_info_t;
+typedef struct bcmc_cubby bcmc_cubby_t;
+
+typedef struct wlc_ratelinkmem_info wlc_ratelinkmem_info_t;
+
+typedef wl_proxd_params_tof_tune_v3_t wl_proxd_params_tof_tune_t;
+
+/* ranging context */
+typedef struct wlc_ftm_ranging_ctx wlc_ftm_ranging_ctx_t;
+
+#define ETD_DATA_VERSION ETD_DATA_VERSION_V1
+typedef etd_data_v1_t etd_data_t;
+typedef etd_tag_data_v1_t etd_tag_data_t;
+typedef join_classification_info_v1_t join_classification_info_t;
+typedef join_target_classification_info_v1_t join_target_classification_info_t;
+typedef join_assoc_state_v1_t join_assoc_state_t;
+typedef join_channel_v1_t join_channel_t;
+typedef join_total_attempts_num_v1_t join_total_attempts_num_t;
+
+typedef rmc_bss_info_v1_t rmc_bss_info_t;
+typedef rmc_candidate_info_v1_t rmc_candidate_info_t;
+
+typedef struct wlc_adps_info wlc_adps_info_t;
+typedef event_ecounters_config_request_v2_t ecounters_event_based_config_t;
+
+#define WL_BAM_CMD_ENABLE WL_BAM_CMD_ENABLE_V1
+#define WL_BAM_CMD_DISABLE WL_BAM_CMD_DISABLE_V1
+#define WL_BAM_CMD_CONFIG WL_BAM_CMD_CONFIG_V1
+#define WL_BAM_CMD_DUMP WL_BAM_CMD_DUMP_V1
+typedef struct wlc_bam_info wlc_bam_info_t;
+typedef struct wl_bam_iov_enable_v1 wl_bam_iov_enable_type;
+typedef struct wl_bam_iov_enable_v1 wl_bam_iov_disable_type;
+typedef struct wl_bam_iov_config_v1 wl_bam_iov_config_type;
+typedef struct wl_bam_iov_dump_v1 wl_bam_iov_dump_type;
+typedef struct wl_bam_iov_bcn_config_v1 wl_bam_iov_bcn_config_type;
+typedef struct wl_bam_iov_dump_bcn_elem_v1 wl_bam_iov_dump_bcn_elem_type;
+
+typedef struct chanswitch_hist_info wl_chsw_hist_info_t;
+typedef struct wlc_tdm_tx_info wlc_tdm_tx_info_t;
+#ifdef WL_TDMTX_TYPEDEF_HAS_ALIAS
+typedef tdmtx_cnt_v2_t tdmtx_cnt_t;
+typedef tdmtx_cnt_shm_v2_t tdmtx_cnt_shm_t;
+typedef wl_tdmtx_ecounters_v2_t wl_tdmtx_ecounters_t;
+#define WL_CNT_TDMTX_STRUCT_SZ (sizeof(tdmtx_cnt_t))
+#define WL_CNT_TDMTX_SHM_SZ (sizeof(tdmtx_cnt_shm_t))
+#endif
+typedef struct wlc_tvpm_info wlc_tvpm_info_t;
+
+#define WL_HEB_CURRENT_VER WL_HEB_VER_1
+
+typedef wl_heb_cnt_v1_t wl_heb_cnt_t;
+typedef wl_config_heb_fill_v1_t wl_config_heb_fill_t;
+typedef wl_heb_blk_params_v1_t wl_heb_blk_params_t;
+typedef wl_heb_reg_status_v1_t wl_heb_reg_status_t;
+typedef wl_heb_status_v1_t wl_heb_status_t;
+typedef wl_heb_int_cnt_v1_t wl_heb_int_cnt_t;
+
+typedef struct wl_proxd_rtt_sample_v2 wl_proxd_rtt_sample_t;
+typedef struct wl_proxd_rtt_result_v2 wl_proxd_rtt_result_t;
+
+#ifdef RATESET_VERSION_ENABLED
+/* all rateset_args structures and version updates will come here */
+#define RATESET_ARGS_VERSION (RATESET_ARGS_V2)
+typedef wl_rateset_args_v2_t wl_rateset_args_t;
+#endif /* RATESET_VERSION_ENABLED */
+
+#ifdef WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS
+typedef wl_pkteng_stats_v2_t wl_pkteng_stats_t;
+#endif /* WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS */
+
+#ifdef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
+typedef wl_bss_info_v109_2_t wl_bss_info_t;
+
+typedef wl_gscan_bss_info_v3_t wl_gscan_bss_info_t;
+#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (OFFSETOF(wl_gscan_bss_info_t, info))
+
+typedef wl_scan_results_v2_t wl_scan_results_t;
+/** size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (OFFSETOF(wl_scan_results_t, bss_info))
+
+typedef wl_escan_result_v2_t wl_escan_result_t;
+#define WL_ESCAN_RESULTS_FIXED_SIZE (OFFSETOF(wl_escan_result_t, bss_info))
+
+typedef wl_iscan_results_v2_t wl_iscan_results_t;
+/** size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+typedef wl_gscan_result_v2_1_t wl_gscan_result_t;
+#define WL_GSCAN_RESULTS_FIXED_SIZE (OFFSETOF(wl_gscan_result_t, bss_info))
+#endif /* WL_BSS_INFO_TYPEDEF_HAS_ALIAS */
+
+#define WLC_ALLOCMEM_POSTREV80
+
+typedef uint8 wlc_mbsp_sel_t;
+typedef uint8 dot11_mbsp_sel_t;
+
+#ifdef WL_PKTENG_RU_VER
+typedef wl_pkteng_ru_v2_t wl_pkteng_ru_fill_t;
+#endif
+
+typedef wl_msglevel_v1_t wl_msglevel_t;
+#define WL_MSGLEVEL_STRUCT_VERSION WL_MSGLEVEL_STRUCT_VERSION_1
+
+typedef struct wlc_tpa_info wlc_tpa_info_t;
+
+typedef struct wl_event_mbo wl_event_mbo_t;
+typedef struct wl_event_mbo_cell_nw_switch wl_event_mbo_cell_nw_switch_t;
+typedef struct wbrq_process_data wbrq_process_data_t;
+typedef struct wbrq_process_res wbrq_process_res_t;
+typedef struct wlc_lphs_info wlc_lphs_info_t;
+
+typedef struct wlc_obj_registry wlc_obj_registry_t;
+
+#ifdef WL_EVENT_RX_FRAME_DATA_ALIAS
+#define BCM_RX_FRAME_DATA_VERSION BCM_RX_FRAME_DATA_VERSION_2
+typedef wl_event_rx_frame_data_v2_t wl_event_rx_frame_data_t;
+#endif
+
+#ifdef ECOUNTERS_COMPACT
+typedef wl_periodic_compact_cntrs_v3_t wl_periodic_compact_cntrs_t;
+typedef wl_periodic_txbf_cntrs_v1_t wl_periodic_txbf_cntrs_t;
+typedef wl_event_based_statistics_v3_t wl_event_based_statistics_t;
+#endif /* ECOUNTERS_COMPACT */
+
+typedef struct wlc_sae_cubby wlc_sae_cubby_t;
+typedef struct wlc_sae_info wlc_sae_info_t;
+
+typedef struct wlc_owe_info wlc_owe_info_t;
+
+#ifdef PMKID_VERSION_ENABLED
+#define PMKID_VERSION (PMKID_LIST_VER_3)
+typedef pmkid_v3_t pmkid_t;
+typedef pmkid_list_v3_t pmkid_list_t;
+#endif /* PMKID_VERSION_ENABLED */
+
+typedef struct wlc_txdc_info wlc_txdc_info_t;
+/* PHY health check */
+typedef wl_hc_desense_stats_v1_t wl_hc_desense_stats_t;
+typedef wl_hc_temp_stats_v2_t wl_hc_temp_stats_t;
+typedef wl_hc_vcocal_stats_v1_t wl_hc_vcocal_stats_t;
+typedef wl_hc_txpwr_stats_v2_t wl_hc_txpwr_stats_t;
+
+typedef wl_scb_ecounters_v2_t wl_scb_ecounters_t;
+typedef wl_nan_slot_ecounters_v1_t wl_nan_slot_ecounters_t;
+typedef wlc_scb_stats_v1_t wlc_scb_stats_t;
+
+typedef struct wlc_hw_cmn_info wlc_hw_cmn_info_t;
+typedef struct wlc_hw_tunables wlc_hw_tunables_t;
+
+typedef struct wlc_rxsig_info wlc_rxsig_info_t;
+typedef struct wlc_rxsig_hw_info wlc_rxsig_hw_info_t;
+
+typedef struct wlc_hw_macint_regs wlc_hw_macint_regs_t;
+
+#ifdef WL_SAE
+typedef struct wlc_sae_info wlc_sae_info_t;
+#endif
+#if defined(BCM_EWP) || defined (BCM_SDC)
+typedef wl_hist_compact_toss_stats_v2_t wl_hist_compact_toss_stats_t;
+#endif /* BCM_EWP */
+typedef struct wlc_ewp_info wlc_ewp_info_t;
+typedef struct wlc_ewp_htr_db wlc_ewp_htr_db_t;
+
+typedef struct wl_sc_info wl_sc_info_t;
+
+typedef struct wlc_swdiv_hw_info wlc_swdiv_hw_info_t;
+typedef struct wlc_ltecx_hw_info wlc_ltecx_hw_info_t;
+typedef struct wlc_stf_hw_info wlc_stf_hw_info_t;
+
+typedef struct wlc_macdbg_hw_info wlc_macdbg_hw_info_t;
+typedef struct wlc_perf_stats wlc_perf_stats_t;
+
+typedef wl_cnt_ge80mcst_v1_t wl_cnt_ge80mcst_t;
+typedef wl_cnt_ge40mcst_v1_t wl_cnt_ge40mcst_t;
+
+#ifdef HE_COUNTERS_VERSION_ENABLED
+#define HE_COUNTERS_VERSION (HE_COUNTERS_V2)
+typedef wl_he_cnt_wlc_v2_t wl_he_cnt_wlc_t;
+#endif /* HE_COUNTERS_VERSION_ENABLED */
+typedef wl_compact_he_cnt_wlc_v2_t wl_compact_he_cnt_wlc_t;
+
+typedef struct wlc_cm_info wlc_cm_info_t;
+typedef struct wlc_cm_data wlc_cm_data_t;
+
+typedef struct wlc_calload_info wlc_calload_info_t;
+typedef struct wlc_reinit_st wlc_reinit_st_t;
+typedef wl_prio_roam_prof_v1_t wl_prio_roam_prof_t;
+
+#ifdef WL_ANQPO_PEER_LIST_TYPEDEF_HAS_ALIAS
+#define WL_ANQPO_PEER_LIST_VERSION WL_ANQPO_PEER_LIST_VERSION_2
+typedef wl_anqpo_peer_list_v2_t wl_anqpo_peer_list_t;
+typedef wl_anqpo_peer_v2_t wl_anqpo_peer_t;
+#endif /* WL_ANQPO_PEER_LIST_VERSION */
+
+typedef roamstats_counter_info_v1_t roamstats_counter_info_t;
+typedef roamstats_prev_roam_events_v1_t roamstats_prev_roam_events_t;
+typedef roamstats_reason_info_v1_t roamstats_reason_info_t;
+#undef SSSR_REG_INFO_VER
+#define SSSR_REG_INFO_VER SSSR_REG_INFO_VER_2
+
+typedef struct tdls_bss_info tdls_bss_info_t;
+typedef wl_bsstrans_rssi_rate_map_v2_t wl_bsstrans_rssi_rate_map_ext_t;
+
+#ifdef ETD
+#define MAX_DMAFIFO_DESC_ENTRIES MAX_DMAFIFO_DESC_ENTRIES_V1
+#define MAX_DMAFIFO_ENTRIES MAX_DMAFIFO_ENTRIES_V1
+typedef struct hnd_ext_trap_desc_entry_v1 dma_dentry_t;
+typedef struct hnd_ext_trap_dma_fifo_v1 dma_fifo_t;
+typedef struct hnd_ext_trap_axi_error_v1 hnd_ext_trap_axi_error_t;
+#else
+typedef void hnd_ext_trap_axi_error_t;
+#endif /* ETD */
+
+typedef struct wl_btm_event_type_data wl_btm_event_type_data_t;
+typedef struct wl_bssid_prune_evt_info wl_bssid_pruned_evt_info_t;
+
+#ifdef ETD
+typedef struct hnd_ext_trap_phydbg_v3 hnd_etd_phydbg_t;
+typedef struct hnd_ext_trap_wlc_mem_err_v3 hnd_ext_trap_wlc_mem_err_info_t;
+#define HND_EXT_TRAP_WLC_MEM_ERR_VER HND_EXT_TRAP_WLC_MEM_ERR_VER_V3
+#endif
+typedef key_update_info_v1_t key_update_info_t;
+typedef wl_omi_req_v1_t wl_omi_req_t;
+typedef wl_omi_status_v1_t wl_omi_status_t;
+#define WL_OMI_STATUS_VERSION WL_OMI_STATUS_VERSION_1
+#define WL_OMI_CONFIG_VERSION WL_OMI_CONFIG_VERSION_1
+
+#endif /* _wlc_types_h_ */
diff --git a/bcmdhd.101.10.361.x/wldev_common.c b/bcmdhd.101.10.361.x/wldev_common.c
new file mode 100755
index 0000000..9cf13d2
--- /dev/null
+++ b/bcmdhd.101.10.361.x/wldev_common.c
@@ -0,0 +1,537 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 2020, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Dual:>>
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
+#endif /* WL_CFG80211 */
+#include <dhd_config.h>
+
+#if defined(IL_BIGENDIAN)
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif
+
+#if defined(CUSTOMER_DBG_PREFIX_ENABLE)
+#define USER_PREFIX_WLDEV "[wldev][wlan] "
+#define WLDEV_ERROR_TEXT USER_PREFIX_WLDEV
+#define WLDEV_INFO_TEXT USER_PREFIX_WLDEV
+#else
+#define WLDEV_ERROR_TEXT "WLDEV-ERROR) "
+#define WLDEV_INFO_TEXT "WLDEV-INFO) "
+#endif /* defined(CUSTOMER_DBG_PREFIX_ENABLE) */
+
+#define WLDEV_ERROR_MSG(x, args...) \
+ do { \
+ printf(WLDEV_ERROR_TEXT x, ## args); \
+ } while (0)
+#define WLDEV_ERROR(x) WLDEV_ERROR_MSG x
+
+#define WLDEV_INFO_MSG(x, args...) \
+ do { \
+ printf(WLDEV_INFO_TEXT x, ## args); \
+ } while (0)
+#define WLDEV_INFO(x) WLDEV_INFO_MSG x
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ s32 ret = 0;
+ struct wl_ioctl ioc;
+
+#if defined(BCMDONGLEHOST)
+
+ bzero(&ioc, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+ ret = dhd_ioctl_entry_local(dev, (wl_ioctl_t *)&ioc, cmd);
+#else
+ struct ifreq ifr;
+ mm_segment_t fs;
+
+ bzero(&ioc, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ strlcpy(ifr.ifr_name, dev->name, sizeof(ifr.ifr_name));
+ ifr.ifr_data = (caddr_t)&ioc;
+
+ fs = get_fs();
+ set_fs(get_ds());
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+ set_fs(fs);
+
+ ret = 0;
+#endif /* defined(BCMDONGLEHOST) */
+
+ return ret;
+}
+
+/*
+SET commands :
+cast buffer to non-const and call the GET function
+*/
+
+s32 wldev_ioctl_set(
+ struct net_device *dev, u32 cmd, const void *arg, u32 len)
+{
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ return wldev_ioctl(dev, cmd, (void *)arg, len, 1);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+}
+
+s32 wldev_ioctl_get(
+ struct net_device *dev, u32 cmd, void *arg, u32 len)
+{
+ return wldev_ioctl(dev, cmd, (void *)arg, len, 0);
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+ const s8 *iovar_name, const s8 *param, u32 paramlen,
+ s8 *iovar_buf, u32 buflen)
+{
+ s32 iolen = 0;
+
+ iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+ return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ const void *param, u32 paramlen, void *buf, u32 buflen, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+
+ if (buf && (buflen > 0)) {
+ /* initialize the response buffer */
+ bzero(buf, buflen);
+ } else {
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ ret = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+
+ if (!ret) {
+ ret = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ ret = wldev_ioctl_get(dev, WLC_GET_VAR, buf, buflen);
+exit:
+ if (buf_sync)
+ mutex_unlock(buf_sync);
+ return ret;
+}
+
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ if (iovar_len > 0)
+ ret = wldev_ioctl_set(dev, WLC_SET_VAR, buf, iovar_len);
+ else
+ ret = BCME_BUFTOOSHORT;
+
+ if (buf_sync)
+ mutex_unlock(buf_sync);
+ return ret;
+}
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ bzero(iovar_buf, sizeof(iovar_buf));
+ return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), NULL);
+}
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ bzero(iovar_buf, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), NULL);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, const s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+
+ /* initialize buffer */
+ if (!iovar_buf || buflen <= 0)
+ return BCME_BADARG;
+ bzero(iovar_buf, buflen);
+
+ if (bssidx == 0) {
+ return wldev_mkiovar(iovar_name, param, paramlen,
+ iovar_buf, buflen);
+ }
+
+ prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+ namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+ if (iolen > (u32)buflen) {
+ WLDEV_ERROR(("wldev_mkiovar_bsscfg: buffer is too short\n"));
+ return BCME_BUFTOOSHORT;
+ }
+
+ p = (s8 *)iovar_buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, iovar_name, namelen);
+ p += namelen;
+
+ /* bss config index as first param */
+ bssidx = htod32(bssidx);
+ memcpy(p, &bssidx, sizeof(u32));
+ p += sizeof(u32);
+
+ /* parameter buffer follows */
+ if (paramlen)
+ memcpy(p, param, paramlen);
+
+ return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+
+ wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl_get(dev, WLC_GET_VAR, buf, buflen);
+ if (buf_sync) {
+ mutex_unlock(buf_sync);
+ }
+ return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, const s8 *iovar_name,
+ const void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ if (iovar_len > 0)
+ ret = wldev_ioctl_set(dev, WLC_SET_VAR, buf, iovar_len);
+ else {
+ ret = BCME_BUFTOOSHORT;
+ }
+
+ if (buf_sync) {
+ mutex_unlock(buf_sync);
+ }
+ return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ bzero(iovar_buf, sizeof(iovar_buf));
+ return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), bssidx, NULL);
+}
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ bzero(iovar_buf, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), bssidx, NULL);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+int wldev_get_link_speed(
+ struct net_device *dev, int *plink_speed)
+{
+ int error;
+
+ if (!plink_speed)
+ return -ENOMEM;
+ *plink_speed = 0;
+ error = wldev_ioctl_get(dev, WLC_GET_RATE, plink_speed, sizeof(int));
+ if (unlikely(error))
+ return error;
+
+ /* Convert internal 500Kbps to Kbps */
+ *plink_speed *= 500;
+ return error;
+}
+
+int wldev_get_rssi(
+ struct net_device *dev, scb_val_t *scb_val)
+{
+ int error;
+
+ if (!scb_val)
+ return -ENOMEM;
+ bzero(scb_val, sizeof(scb_val_t));
+ error = wldev_ioctl_get(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t));
+ if (unlikely(error))
+ return error;
+
+ return error;
+}
+
+int wldev_get_ssid(
+ struct net_device *dev, wlc_ssid_t *pssid)
+{
+ int error;
+
+ if (!pssid)
+ return -ENOMEM;
+ bzero(pssid, sizeof(wlc_ssid_t));
+ error = wldev_ioctl_get(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t));
+ if (unlikely(error))
+ return error;
+ pssid->SSID_len = dtoh32(pssid->SSID_len);
+ return error;
+}
+
+int wldev_get_band(
+ struct net_device *dev, uint *pband)
+{
+ int error;
+
+ *pband = 0;
+ error = wldev_ioctl_get(dev, WLC_GET_BAND, pband, sizeof(uint));
+ return error;
+}
+
+int wldev_set_band(
+ struct net_device *dev, uint band)
+{
+ int error = -1;
+
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+ error = wldev_ioctl_set(dev, WLC_SET_BAND, &band, sizeof(band));
+ if (!error)
+ dhd_bus_band_set(dev, band);
+ }
+ return error;
+}
+int wldev_get_datarate(struct net_device *dev, int *datarate)
+{
+ int error = 0;
+
+ error = wldev_ioctl_get(dev, WLC_GET_RATE, datarate, sizeof(int));
+ if (error) {
+ return -1;
+ } else {
+ *datarate = dtoh32(*datarate);
+ }
+
+ return error;
+}
+
+#ifdef WL_CFG80211
+extern chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec);
+#define WL_EXTRA_BUF_MAX 2048
+int wldev_get_mode(
+ struct net_device *dev, uint8 *cap, uint8 caplen)
+{
+ int error = 0;
+ int chanspec = 0;
+ uint16 band = 0;
+ uint16 bandwidth = 0;
+ wl_bss_info_t *bss = NULL;
+ char* buf = NULL;
+
+ buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (!buf) {
+ WLDEV_ERROR(("%s:ENOMEM\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ *(u32*) buf = htod32(WL_EXTRA_BUF_MAX);
+ error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void*)buf, WL_EXTRA_BUF_MAX);
+ if (error) {
+ WLDEV_ERROR(("%s:failed:%d\n", __FUNCTION__, error));
+ kfree(buf);
+ buf = NULL;
+ return error;
+ }
+ bss = (wl_bss_info_t*)(buf + 4);
+ chanspec = wl_chspec_driver_to_host(bss->chanspec);
+
+ band = chanspec & WL_CHANSPEC_BAND_MASK;
+ bandwidth = chanspec & WL_CHANSPEC_BW_MASK;
+
+ if (band == WL_CHANSPEC_BAND_2G) {
+ if (bss->n_cap)
+ strlcpy(cap, "n", caplen);
+ else
+ strlcpy(cap, "bg", caplen);
+ } else if (band == WL_CHANSPEC_BAND_5G) {
+ if (bandwidth == WL_CHANSPEC_BW_80)
+ strlcpy(cap, "ac", caplen);
+ else if ((bandwidth == WL_CHANSPEC_BW_40) || (bandwidth == WL_CHANSPEC_BW_20)) {
+ if ((bss->nbss_cap & 0xf00) && (bss->n_cap))
+ strlcpy(cap, "n|ac", caplen);
+ else if (bss->n_cap)
+ strlcpy(cap, "n", caplen);
+ else if (bss->vht_cap)
+ strlcpy(cap, "ac", caplen);
+ else
+ strlcpy(cap, "a", caplen);
+ } else {
+ WLDEV_ERROR(("wldev_get_mode: Mode get failed\n"));
+ error = BCME_ERROR;
+ }
+
+ }
+ kfree(buf);
+ buf = NULL;
+ return error;
+}
+#endif
+
+int wldev_set_country(
+ struct net_device *dev, char *country_code, bool notify, int revinfo)
+{
+#if defined(BCMDONGLEHOST)
+ int error = -1;
+ wl_country_t cspec = {{0}, 0, {0}};
+
+ if (!country_code)
+ return error;
+
+ cspec.rev = revinfo;
+ strlcpy(cspec.country_abbrev, country_code, WL_CCODE_LEN + 1);
+ strlcpy(cspec.ccode, country_code, WL_CCODE_LEN + 1);
+ error = dhd_conf_map_country_list(dhd_get_pub(dev), &cspec);
+ if (error)
+ dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+ error = dhd_conf_set_country(dhd_get_pub(dev), &cspec);
+ if (error < 0) {
+ WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return error;
+ }
+ dhd_conf_fix_country(dhd_get_pub(dev));
+ dhd_conf_get_country(dhd_get_pub(dev), &cspec);
+ dhd_bus_country_set(dev, &cspec, notify);
+ printf("%s: set country for %s as %s rev %d\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev);
+#endif /* defined(BCMDONGLEHOST) */
+ return 0;
+}